diff --git a/demo/demo_3d.py b/demo/demo_3d.py index 50b7954..b6134f0 100644 --- a/demo/demo_3d.py +++ b/demo/demo_3d.py @@ -1,12 +1,22 @@ -import taichi as ti +import sys +import os + +project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) +sys.path.insert(0, project_root) + import numpy as np import utils + +local_packages = os.path.join(project_root, 'local_packages') +sys.path.insert(0, local_packages) + +import taichi as ti from engine.mpm_solver import MPMSolver write_to_disk = False -# Try to run on GPU -ti.init(arch=ti.cuda, device_memory_GB=4.0) +# Try to run on CPU (for testing) +ti.init(arch=ti.cpu) gui = ti.GUI("Taichi Elements", res=512, background_color=0x112F41) @@ -25,6 +35,12 @@ mpm.set_gravity((0, -50, 0)) +mpm.add_wind_field( + lower_bound=[0, 0, 0], + upper_bound=[3, 10, 10], + force=[200, 0, 0] +) + for frame in range(1500): mpm.step(4e-3) colors = np.array([0x068587, 0xED553B, 0xEEEEF0, 0xFFFF00], diff --git a/demo/test_wind_field.py b/demo/test_wind_field.py new file mode 100644 index 0000000..783cf4b --- /dev/null +++ b/demo/test_wind_field.py @@ -0,0 +1,63 @@ +import sys +import os +import warnings + +warnings.filterwarnings("ignore") +os.environ["TI_LOG_LEVEL"] = "error" + +project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) +sys.path.insert(0, project_root) + +import numpy as np + +local_packages = os.path.join(project_root, 'local_packages') +sys.path.insert(0, local_packages) + +import taichi as ti +from engine.mpm_solver import MPMSolver + +ti.init(arch=ti.cpu, log_level=ti.ERROR) + +print("=== Testing Wind Field Feature ===") +print() + +mpm = MPMSolver(res=(32, 32, 32), size=10) + +mpm.add_cube(lower_corner=[1, 4, 4], + cube_size=[1, 1, 1], + material=MPMSolver.material_water, + velocity=[0, 0, 0]) + +mpm.set_gravity((0, -20, 0)) + +mpm.add_wind_field( + lower_bound=[0, 0, 0], + upper_bound=[3, 10, 10], + force=[100, 0, 0] +) + +print("Initial particles:") +particles = mpm.particle_info() +print(f" Number of particles: {len(particles['position'])}") +print(f" Initial X positions range: [{particles['position'][:, 0].min():.3f}, {particles['position'][:, 0].max():.3f}]") +print(f" Initial mean X position: {particles['position'][:, 0].mean():.3f}") +print() + +print("Running simulation for 100 steps...") +for frame in range(100): + mpm.step(4e-3) + if frame % 20 == 0: + particles = mpm.particle_info() + mean_x = particles['position'][:, 0].mean() + mean_vx = particles['velocity'][:, 0].mean() + print(f" Step {frame:3d}: mean X = {mean_x:.3f}, mean Vx = {mean_vx:.3f}") + +print() +print("Final state:") +particles = mpm.particle_info() +print(f" Final X positions range: [{particles['position'][:, 0].min():.3f}, {particles['position'][:, 0].max():.3f}]") +print(f" Final mean X position: {particles['position'][:, 0].mean():.3f}") +print(f" Final mean X velocity: {particles['velocity'][:, 0].mean():.3f}") +print() +print("=== Wind Field Test Passed! ===") +print("Particles should have moved to the right due to the wind field.") diff --git a/demo/wind_field_visual_test.py b/demo/wind_field_visual_test.py new file mode 100644 index 0000000..392b444 --- /dev/null +++ b/demo/wind_field_visual_test.py @@ -0,0 +1,100 @@ +import sys +import os +import warnings + +warnings.filterwarnings("ignore") +os.environ["TI_LOG_LEVEL"] = "error" + +project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) +sys.path.insert(0, project_root) + +import numpy as np + +local_packages = os.path.join(project_root, 'local_packages') +sys.path.insert(0, local_packages) + +import taichi as ti +from engine.mpm_solver import MPMSolver + +ti.init(arch=ti.cpu) + +print("=" * 60) +print("Wind Field Visual Test") +print("=" * 60) +print() +print("Scene setup:") +print(" - Simulation size: 10 units, 64x64x64 resolution") +print(" - Wind field: X from 0 to 3, Y from 0 to 10, Z from 0 to 10") +print(" - Wind force: (150, 0, 0) - pushing particles to the right") +print(" - Particles: water cube at (1, 4, 4) with size 1x1x1") +print(" - Gravity: (0, -30, 0)") +print() + +mpm = MPMSolver(res=(64, 64, 64), size=10) + +mpm.add_cube(lower_corner=[1, 4, 4], + cube_size=[1, 1, 1], + material=MPMSolver.material_water, + velocity=[0, 0, 0]) + +mpm.set_gravity((0, -30, 0)) + +mpm.add_wind_field( + lower_bound=[0, 0, 0], + upper_bound=[3, 10, 10], + force=[150, 0, 0] +) + +gui = ti.GUI("Wind Field Test", res=800, background_color=0x112F41) + +colors = np.array([0x068587, 0xED553B, 0xEEEEF0, 0xFFFF00], dtype=np.uint32) + +print("Press ESC to exit. Press SPACE to pause/resume.") +print() + +paused = False +frame = 0 + +while gui.running: + for e in gui.get_events(ti.GUI.PRESS): + if e.key == ti.GUI.ESCAPE: + gui.running = False + elif e.key == ti.GUI.SPACE: + paused = not paused + print(f"Paused: {paused}") + + if not paused: + mpm.step(4e-3) + frame += 1 + + particles = mpm.particle_info() + np_x = particles['position'] / 10.0 + + screen_x = ((np_x[:, 0] + np_x[:, 2]) / 2**0.5) - 0.2 + screen_y = np_x[:, 1] + screen_pos = np.stack([screen_x, screen_y], axis=-1) + + gui.circles(screen_pos, radius=2.0, color=colors[particles['material']]) + + gui.line((0.0, 0.0), (0.3, 0.0), color=0xFF0000, radius=3) + gui.line((0.3, 0.0), (0.3, 1.0), color=0xFF0000, radius=3) + + mean_x = particles['position'][:, 0].mean() + mean_vx = particles['velocity'][:, 0].mean() + + gui.text(f"Frame: {frame}", pos=(0.02, 0.98), color=0xFFFFFF, font_size=20) + gui.text(f"Mean X: {mean_x:.2f}", pos=(0.02, 0.93), color=0xFFFFFF, font_size=20) + gui.text(f"Mean Vx: {mean_vx:.2f}", pos=(0.02, 0.88), color=0xFFFFFF, font_size=20) + + wind_zone_text = "Wind Zone (X < 3)" + gui.text(wind_zone_text, pos=(0.02, 0.83), color=0xFF6666, font_size=16) + + if frame % 50 == 0 and not paused: + print(f"Frame {frame:4d}: Mean X = {mean_x:6.3f}, Mean Vx = {mean_vx:6.3f}") + + gui.show() + +print() +print("=" * 60) +print("Test completed!") +print("=" * 60) diff --git a/engine/mpm_solver.py b/engine/mpm_solver.py index b898c37..3098ec4 100644 --- a/engine/mpm_solver.py +++ b/engine/mpm_solver.py @@ -318,6 +318,26 @@ def set_gravity(self, g): assert len(g) == self.dim self.gravity[None] = g + def add_wind_field(self, lower_bound, upper_bound, force): + lower_bound = list(lower_bound) + upper_bound = list(upper_bound) + force = list(force) + assert len(lower_bound) == self.dim + assert len(upper_bound) == self.dim + assert len(force) == self.dim + + @ti.kernel + def apply_wind(t: ti.f32, dt: ti.f32, grid_v: ti.template()): + for I in ti.grouped(grid_v): + grid_pos = I * self.dx + inside = True + for d in ti.static(range(self.dim)): + inside = inside and (lower_bound[d] <= grid_pos[d] < upper_bound[d]) + if inside: + grid_v[I] += dt * ti.Vector(force) + + self.grid_postprocess.append(apply_wind) + @ti.func def sand_projection(self, sigma, p): sigma_out = ti.Matrix.zero(ti.f32, self.dim, self.dim) diff --git a/local_packages/bin/SPIRV-Tools-shared.dll b/local_packages/bin/SPIRV-Tools-shared.dll new file mode 100644 index 0000000..c1eda7f Binary files /dev/null and b/local_packages/bin/SPIRV-Tools-shared.dll differ diff --git a/local_packages/bin/f2py.exe b/local_packages/bin/f2py.exe new file mode 100644 index 0000000..957435c Binary files /dev/null and b/local_packages/bin/f2py.exe differ diff --git a/local_packages/bin/get_gprof b/local_packages/bin/get_gprof new file mode 100644 index 0000000..67fa079 --- /dev/null +++ b/local_packages/bin/get_gprof @@ -0,0 +1,75 @@ +#!E:\ProgramFiles\Anaconda\python.exe +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2008-2016 California Institute of Technology. +# Copyright (c) 2016-2026 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE +''' +build profile graph for the given instance + +running: + $ get_gprof + +executes: + gprof2dot -f pstats .prof | dot -Tpng -o .call.png + +where: + are arguments for gprof2dot, such as "-n 5 -e 5" + is code to create the instance to profile + is the class of the instance (i.e. type(instance)) + +For example: + $ get_gprof -n 5 -e 1 "import numpy; numpy.array([1,2])" + +will create 'ndarray.call.png' with the profile graph for numpy.array([1,2]), +where '-n 5' eliminates nodes below 5% threshold, similarly '-e 1' eliminates +edges below 1% threshold +''' + +if __name__ == "__main__": + import sys + if len(sys.argv) < 2: + print ("Please provide an object instance (e.g. 'import math; math.pi')") + sys.exit() + # grab args for gprof2dot + args = sys.argv[1:-1] + args = ' '.join(args) + # last arg builds the object + obj = sys.argv[-1] + obj = obj.split(';') + # multi-line prep for generating an instance + for line in obj[:-1]: + exec(line) + # one-line generation of an instance + try: + obj = eval(obj[-1]) + except Exception: + print ("Error processing object instance") + sys.exit() + + # get object 'name' + objtype = type(obj) + name = getattr(objtype, '__name__', getattr(objtype, '__class__', objtype)) + + # profile dumping an object + import dill + import os + import cProfile + #name = os.path.splitext(os.path.basename(__file__))[0] + cProfile.run("dill.dumps(obj)", filename="%s.prof" % name) + msg = "gprof2dot -f pstats %s %s.prof | dot -Tpng -o %s.call.png" % (args, name, name) + try: + res = os.system(msg) + except Exception: + print ("Please verify install of 'gprof2dot' to view profile graphs") + if res: + print ("Please verify install of 'gprof2dot' to view profile graphs") + + # get stats + f_prof = "%s.prof" % name + import pstats + stats = pstats.Stats(f_prof, stream=sys.stdout) + stats.strip_dirs().sort_stats('cumtime') + stats.print_stats(20) #XXX: save to file instead of print top 20? + os.remove(f_prof) diff --git a/local_packages/bin/get_objgraph b/local_packages/bin/get_objgraph new file mode 100644 index 0000000..3170d0a --- /dev/null +++ b/local_packages/bin/get_objgraph @@ -0,0 +1,54 @@ +#!E:\ProgramFiles\Anaconda\python.exe +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2008-2016 California Institute of Technology. +# Copyright (c) 2016-2026 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE +""" +display the reference paths for objects in ``dill.types`` or a .pkl file + +Notes: + the generated image is useful in showing the pointer references in + objects that are or can be pickled. Any object in ``dill.objects`` + listed in ``dill.load_types(picklable=True, unpicklable=True)`` works. + +Examples:: + + $ get_objgraph ArrayType + Image generated as ArrayType.png +""" + +import dill as pickle +#pickle.debug.trace(True) +#import pickle + +# get all objects for testing +from dill import load_types +load_types(pickleable=True,unpickleable=True) +from dill import objects + +if __name__ == "__main__": + import sys + if len(sys.argv) != 2: + print ("Please provide exactly one file or type name (e.g. 'IntType')") + msg = "\n" + for objtype in list(objects.keys())[:40]: + msg += objtype + ', ' + print (msg + "...") + else: + objtype = str(sys.argv[-1]) + try: + obj = objects[objtype] + except KeyError: + obj = pickle.load(open(objtype,'rb')) + import os + objtype = os.path.splitext(objtype)[0] + try: + import objgraph + objgraph.show_refs(obj, filename=objtype+'.png') + except ImportError: + print ("Please install 'objgraph' to view object graphs") + + +# EOF diff --git a/local_packages/bin/markdown-it.exe b/local_packages/bin/markdown-it.exe new file mode 100644 index 0000000..5e6b7e1 Binary files /dev/null and b/local_packages/bin/markdown-it.exe differ diff --git a/local_packages/bin/numpy-config.exe b/local_packages/bin/numpy-config.exe new file mode 100644 index 0000000..584950a Binary files /dev/null and b/local_packages/bin/numpy-config.exe differ diff --git a/local_packages/bin/pygmentize.exe b/local_packages/bin/pygmentize.exe new file mode 100644 index 0000000..22d27ef Binary files /dev/null and b/local_packages/bin/pygmentize.exe differ diff --git a/local_packages/bin/ti.exe b/local_packages/bin/ti.exe new file mode 100644 index 0000000..af1fa5e Binary files /dev/null and b/local_packages/bin/ti.exe differ diff --git a/local_packages/bin/undill b/local_packages/bin/undill new file mode 100644 index 0000000..e9f0ae4 --- /dev/null +++ b/local_packages/bin/undill @@ -0,0 +1,22 @@ +#!E:\ProgramFiles\Anaconda\python.exe +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2008-2016 California Institute of Technology. +# Copyright (c) 2016-2026 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE +""" +unpickle the contents of a pickled object file + +Examples:: + + $ undill hello.pkl + ['hello', 'world'] +""" + +if __name__ == '__main__': + import sys + import dill + for file in sys.argv[1:]: + print (dill.load(open(file,'rb'))) + diff --git a/local_packages/colorama-0.4.6.dist-info/INSTALLER b/local_packages/colorama-0.4.6.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/local_packages/colorama-0.4.6.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/local_packages/colorama-0.4.6.dist-info/METADATA b/local_packages/colorama-0.4.6.dist-info/METADATA new file mode 100644 index 0000000..a1b5c57 --- /dev/null +++ b/local_packages/colorama-0.4.6.dist-info/METADATA @@ -0,0 +1,441 @@ +Metadata-Version: 2.1 +Name: colorama +Version: 0.4.6 +Summary: Cross-platform colored terminal text. +Project-URL: Homepage, https://github.com/tartley/colorama +Author-email: Jonathan Hartley +License-File: LICENSE.txt +Keywords: ansi,color,colour,crossplatform,terminal,text,windows,xplatform +Classifier: Development Status :: 5 - Production/Stable +Classifier: Environment :: Console +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Topic :: Terminals +Requires-Python: !=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7 +Description-Content-Type: text/x-rst + +.. image:: https://img.shields.io/pypi/v/colorama.svg + :target: https://pypi.org/project/colorama/ + :alt: Latest Version + +.. image:: https://img.shields.io/pypi/pyversions/colorama.svg + :target: https://pypi.org/project/colorama/ + :alt: Supported Python versions + +.. image:: https://github.com/tartley/colorama/actions/workflows/test.yml/badge.svg + :target: https://github.com/tartley/colorama/actions/workflows/test.yml + :alt: Build Status + +Colorama +======== + +Makes ANSI escape character sequences (for producing colored terminal text and +cursor positioning) work under MS Windows. + +.. |donate| image:: https://www.paypalobjects.com/en_US/i/btn/btn_donate_SM.gif + :target: https://www.paypal.com/cgi-bin/webscr?cmd=_donations&business=2MZ9D2GMLYCUJ&item_name=Colorama¤cy_code=USD + :alt: Donate with Paypal + +`PyPI for releases `_ | +`Github for source `_ | +`Colorama for enterprise on Tidelift `_ + +If you find Colorama useful, please |donate| to the authors. Thank you! + +Installation +------------ + +Tested on CPython 2.7, 3.7, 3.8, 3.9 and 3.10 and Pypy 2.7 and 3.8. + +No requirements other than the standard library. + +.. code-block:: bash + + pip install colorama + # or + conda install -c anaconda colorama + +Description +----------- + +ANSI escape character sequences have long been used to produce colored terminal +text and cursor positioning on Unix and Macs. Colorama makes this work on +Windows, too, by wrapping ``stdout``, stripping ANSI sequences it finds (which +would appear as gobbledygook in the output), and converting them into the +appropriate win32 calls to modify the state of the terminal. On other platforms, +Colorama does nothing. + +This has the upshot of providing a simple cross-platform API for printing +colored terminal text from Python, and has the happy side-effect that existing +applications or libraries which use ANSI sequences to produce colored output on +Linux or Macs can now also work on Windows, simply by calling +``colorama.just_fix_windows_console()`` (since v0.4.6) or ``colorama.init()`` +(all versions, but may have other side-effects – see below). + +An alternative approach is to install ``ansi.sys`` on Windows machines, which +provides the same behaviour for all applications running in terminals. Colorama +is intended for situations where that isn't easy (e.g., maybe your app doesn't +have an installer.) + +Demo scripts in the source code repository print some colored text using +ANSI sequences. Compare their output under Gnome-terminal's built in ANSI +handling, versus on Windows Command-Prompt using Colorama: + +.. image:: https://github.com/tartley/colorama/raw/master/screenshots/ubuntu-demo.png + :width: 661 + :height: 357 + :alt: ANSI sequences on Ubuntu under gnome-terminal. + +.. image:: https://github.com/tartley/colorama/raw/master/screenshots/windows-demo.png + :width: 668 + :height: 325 + :alt: Same ANSI sequences on Windows, using Colorama. + +These screenshots show that, on Windows, Colorama does not support ANSI 'dim +text'; it looks the same as 'normal text'. + +Usage +----- + +Initialisation +.............. + +If the only thing you want from Colorama is to get ANSI escapes to work on +Windows, then run: + +.. code-block:: python + + from colorama import just_fix_windows_console + just_fix_windows_console() + +If you're on a recent version of Windows 10 or better, and your stdout/stderr +are pointing to a Windows console, then this will flip the magic configuration +switch to enable Windows' built-in ANSI support. + +If you're on an older version of Windows, and your stdout/stderr are pointing to +a Windows console, then this will wrap ``sys.stdout`` and/or ``sys.stderr`` in a +magic file object that intercepts ANSI escape sequences and issues the +appropriate Win32 calls to emulate them. + +In all other circumstances, it does nothing whatsoever. Basically the idea is +that this makes Windows act like Unix with respect to ANSI escape handling. + +It's safe to call this function multiple times. It's safe to call this function +on non-Windows platforms, but it won't do anything. It's safe to call this +function when one or both of your stdout/stderr are redirected to a file – it +won't do anything to those streams. + +Alternatively, you can use the older interface with more features (but also more +potential footguns): + +.. code-block:: python + + from colorama import init + init() + +This does the same thing as ``just_fix_windows_console``, except for the +following differences: + +- It's not safe to call ``init`` multiple times; you can end up with multiple + layers of wrapping and broken ANSI support. + +- Colorama will apply a heuristic to guess whether stdout/stderr support ANSI, + and if it thinks they don't, then it will wrap ``sys.stdout`` and + ``sys.stderr`` in a magic file object that strips out ANSI escape sequences + before printing them. This happens on all platforms, and can be convenient if + you want to write your code to emit ANSI escape sequences unconditionally, and + let Colorama decide whether they should actually be output. But note that + Colorama's heuristic is not particularly clever. + +- ``init`` also accepts explicit keyword args to enable/disable various + functionality – see below. + +To stop using Colorama before your program exits, simply call ``deinit()``. +This will restore ``stdout`` and ``stderr`` to their original values, so that +Colorama is disabled. To resume using Colorama again, call ``reinit()``; it is +cheaper than calling ``init()`` again (but does the same thing). + +Most users should depend on ``colorama >= 0.4.6``, and use +``just_fix_windows_console``. The old ``init`` interface will be supported +indefinitely for backwards compatibility, but we don't plan to fix any issues +with it, also for backwards compatibility. + +Colored Output +.............. + +Cross-platform printing of colored text can then be done using Colorama's +constant shorthand for ANSI escape sequences. These are deliberately +rudimentary, see below. + +.. code-block:: python + + from colorama import Fore, Back, Style + print(Fore.RED + 'some red text') + print(Back.GREEN + 'and with a green background') + print(Style.DIM + 'and in dim text') + print(Style.RESET_ALL) + print('back to normal now') + +...or simply by manually printing ANSI sequences from your own code: + +.. code-block:: python + + print('\033[31m' + 'some red text') + print('\033[39m') # and reset to default color + +...or, Colorama can be used in conjunction with existing ANSI libraries +such as the venerable `Termcolor `_ +the fabulous `Blessings `_, +or the incredible `_Rich `_. + +If you wish Colorama's Fore, Back and Style constants were more capable, +then consider using one of the above highly capable libraries to generate +colors, etc, and use Colorama just for its primary purpose: to convert +those ANSI sequences to also work on Windows: + +SIMILARLY, do not send PRs adding the generation of new ANSI types to Colorama. +We are only interested in converting ANSI codes to win32 API calls, not +shortcuts like the above to generate ANSI characters. + +.. code-block:: python + + from colorama import just_fix_windows_console + from termcolor import colored + + # use Colorama to make Termcolor work on Windows too + just_fix_windows_console() + + # then use Termcolor for all colored text output + print(colored('Hello, World!', 'green', 'on_red')) + +Available formatting constants are:: + + Fore: BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE, RESET. + Back: BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE, RESET. + Style: DIM, NORMAL, BRIGHT, RESET_ALL + +``Style.RESET_ALL`` resets foreground, background, and brightness. Colorama will +perform this reset automatically on program exit. + +These are fairly well supported, but not part of the standard:: + + Fore: LIGHTBLACK_EX, LIGHTRED_EX, LIGHTGREEN_EX, LIGHTYELLOW_EX, LIGHTBLUE_EX, LIGHTMAGENTA_EX, LIGHTCYAN_EX, LIGHTWHITE_EX + Back: LIGHTBLACK_EX, LIGHTRED_EX, LIGHTGREEN_EX, LIGHTYELLOW_EX, LIGHTBLUE_EX, LIGHTMAGENTA_EX, LIGHTCYAN_EX, LIGHTWHITE_EX + +Cursor Positioning +.................. + +ANSI codes to reposition the cursor are supported. See ``demos/demo06.py`` for +an example of how to generate them. + +Init Keyword Args +................. + +``init()`` accepts some ``**kwargs`` to override default behaviour. + +init(autoreset=False): + If you find yourself repeatedly sending reset sequences to turn off color + changes at the end of every print, then ``init(autoreset=True)`` will + automate that: + + .. code-block:: python + + from colorama import init + init(autoreset=True) + print(Fore.RED + 'some red text') + print('automatically back to default color again') + +init(strip=None): + Pass ``True`` or ``False`` to override whether ANSI codes should be + stripped from the output. The default behaviour is to strip if on Windows + or if output is redirected (not a tty). + +init(convert=None): + Pass ``True`` or ``False`` to override whether to convert ANSI codes in the + output into win32 calls. The default behaviour is to convert if on Windows + and output is to a tty (terminal). + +init(wrap=True): + On Windows, Colorama works by replacing ``sys.stdout`` and ``sys.stderr`` + with proxy objects, which override the ``.write()`` method to do their work. + If this wrapping causes you problems, then this can be disabled by passing + ``init(wrap=False)``. The default behaviour is to wrap if ``autoreset`` or + ``strip`` or ``convert`` are True. + + When wrapping is disabled, colored printing on non-Windows platforms will + continue to work as normal. To do cross-platform colored output, you can + use Colorama's ``AnsiToWin32`` proxy directly: + + .. code-block:: python + + import sys + from colorama import init, AnsiToWin32 + init(wrap=False) + stream = AnsiToWin32(sys.stderr).stream + + # Python 2 + print >>stream, Fore.BLUE + 'blue text on stderr' + + # Python 3 + print(Fore.BLUE + 'blue text on stderr', file=stream) + +Recognised ANSI Sequences +......................... + +ANSI sequences generally take the form:: + + ESC [ ; ... + +Where ```` is an integer, and ```` is a single letter. Zero or +more params are passed to a ````. If no params are passed, it is +generally synonymous with passing a single zero. No spaces exist in the +sequence; they have been inserted here simply to read more easily. + +The only ANSI sequences that Colorama converts into win32 calls are:: + + ESC [ 0 m # reset all (colors and brightness) + ESC [ 1 m # bright + ESC [ 2 m # dim (looks same as normal brightness) + ESC [ 22 m # normal brightness + + # FOREGROUND: + ESC [ 30 m # black + ESC [ 31 m # red + ESC [ 32 m # green + ESC [ 33 m # yellow + ESC [ 34 m # blue + ESC [ 35 m # magenta + ESC [ 36 m # cyan + ESC [ 37 m # white + ESC [ 39 m # reset + + # BACKGROUND + ESC [ 40 m # black + ESC [ 41 m # red + ESC [ 42 m # green + ESC [ 43 m # yellow + ESC [ 44 m # blue + ESC [ 45 m # magenta + ESC [ 46 m # cyan + ESC [ 47 m # white + ESC [ 49 m # reset + + # cursor positioning + ESC [ y;x H # position cursor at x across, y down + ESC [ y;x f # position cursor at x across, y down + ESC [ n A # move cursor n lines up + ESC [ n B # move cursor n lines down + ESC [ n C # move cursor n characters forward + ESC [ n D # move cursor n characters backward + + # clear the screen + ESC [ mode J # clear the screen + + # clear the line + ESC [ mode K # clear the line + +Multiple numeric params to the ``'m'`` command can be combined into a single +sequence:: + + ESC [ 36 ; 45 ; 1 m # bright cyan text on magenta background + +All other ANSI sequences of the form ``ESC [ ; ... `` +are silently stripped from the output on Windows. + +Any other form of ANSI sequence, such as single-character codes or alternative +initial characters, are not recognised or stripped. It would be cool to add +them though. Let me know if it would be useful for you, via the Issues on +GitHub. + +Status & Known Problems +----------------------- + +I've personally only tested it on Windows XP (CMD, Console2), Ubuntu +(gnome-terminal, xterm), and OS X. + +Some valid ANSI sequences aren't recognised. + +If you're hacking on the code, see `README-hacking.md`_. ESPECIALLY, see the +explanation there of why we do not want PRs that allow Colorama to generate new +types of ANSI codes. + +See outstanding issues and wish-list: +https://github.com/tartley/colorama/issues + +If anything doesn't work for you, or doesn't do what you expected or hoped for, +I'd love to hear about it on that issues list, would be delighted by patches, +and would be happy to grant commit access to anyone who submits a working patch +or two. + +.. _README-hacking.md: README-hacking.md + +License +------- + +Copyright Jonathan Hartley & Arnon Yaari, 2013-2020. BSD 3-Clause license; see +LICENSE file. + +Professional support +-------------------- + +.. |tideliftlogo| image:: https://cdn2.hubspot.net/hubfs/4008838/website/logos/logos_for_download/Tidelift_primary-shorthand-logo.png + :alt: Tidelift + :target: https://tidelift.com/subscription/pkg/pypi-colorama?utm_source=pypi-colorama&utm_medium=referral&utm_campaign=readme + +.. list-table:: + :widths: 10 100 + + * - |tideliftlogo| + - Professional support for colorama is available as part of the + `Tidelift Subscription`_. + Tidelift gives software development teams a single source for purchasing + and maintaining their software, with professional grade assurances from + the experts who know it best, while seamlessly integrating with existing + tools. + +.. _Tidelift Subscription: https://tidelift.com/subscription/pkg/pypi-colorama?utm_source=pypi-colorama&utm_medium=referral&utm_campaign=readme + +Thanks +------ + +See the CHANGELOG for more thanks! + +* Marc Schlaich (schlamar) for a ``setup.py`` fix for Python2.5. +* Marc Abramowitz, reported & fixed a crash on exit with closed ``stdout``, + providing a solution to issue #7's setuptools/distutils debate, + and other fixes. +* User 'eryksun', for guidance on correctly instantiating ``ctypes.windll``. +* Matthew McCormick for politely pointing out a longstanding crash on non-Win. +* Ben Hoyt, for a magnificent fix under 64-bit Windows. +* Jesse at Empty Square for submitting a fix for examples in the README. +* User 'jamessp', an observant documentation fix for cursor positioning. +* User 'vaal1239', Dave Mckee & Lackner Kristof for a tiny but much-needed Win7 + fix. +* Julien Stuyck, for wisely suggesting Python3 compatible updates to README. +* Daniel Griffith for multiple fabulous patches. +* Oscar Lesta for a valuable fix to stop ANSI chars being sent to non-tty + output. +* Roger Binns, for many suggestions, valuable feedback, & bug reports. +* Tim Golden for thought and much appreciated feedback on the initial idea. +* User 'Zearin' for updates to the README file. +* John Szakmeister for adding support for light colors +* Charles Merriam for adding documentation to demos +* Jurko for a fix on 64-bit Windows CPython2.5 w/o ctypes +* Florian Bruhin for a fix when stdout or stderr are None +* Thomas Weininger for fixing ValueError on Windows +* Remi Rampin for better Github integration and fixes to the README file +* Simeon Visser for closing a file handle using 'with' and updating classifiers + to include Python 3.3 and 3.4 +* Andy Neff for fixing RESET of LIGHT_EX colors. +* Jonathan Hartley for the initial idea and implementation. diff --git a/local_packages/colorama-0.4.6.dist-info/RECORD b/local_packages/colorama-0.4.6.dist-info/RECORD new file mode 100644 index 0000000..cd6b130 --- /dev/null +++ b/local_packages/colorama-0.4.6.dist-info/RECORD @@ -0,0 +1,31 @@ +colorama-0.4.6.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +colorama-0.4.6.dist-info/METADATA,sha256=e67SnrUMOym9sz_4TjF3vxvAV4T3aF7NyqRHHH3YEMw,17158 +colorama-0.4.6.dist-info/RECORD,, +colorama-0.4.6.dist-info/WHEEL,sha256=cdcF4Fbd0FPtw2EMIOwH-3rSOTUdTCeOSXRMD1iLUb8,105 +colorama-0.4.6.dist-info/licenses/LICENSE.txt,sha256=ysNcAmhuXQSlpxQL-zs25zrtSWZW6JEQLkKIhteTAxg,1491 +colorama/__init__.py,sha256=wePQA4U20tKgYARySLEC047ucNX-g8pRLpYBuiHlLb8,266 +colorama/__pycache__/__init__.cpython-312.pyc,, +colorama/__pycache__/ansi.cpython-312.pyc,, +colorama/__pycache__/ansitowin32.cpython-312.pyc,, +colorama/__pycache__/initialise.cpython-312.pyc,, +colorama/__pycache__/win32.cpython-312.pyc,, +colorama/__pycache__/winterm.cpython-312.pyc,, +colorama/ansi.py,sha256=Top4EeEuaQdBWdteKMEcGOTeKeF19Q-Wo_6_Cj5kOzQ,2522 +colorama/ansitowin32.py,sha256=vPNYa3OZbxjbuFyaVo0Tmhmy1FZ1lKMWCnT7odXpItk,11128 +colorama/initialise.py,sha256=-hIny86ClXo39ixh5iSCfUIa2f_h_bgKRDW7gqs-KLU,3325 +colorama/tests/__init__.py,sha256=MkgPAEzGQd-Rq0w0PZXSX2LadRWhUECcisJY8lSrm4Q,75 +colorama/tests/__pycache__/__init__.cpython-312.pyc,, +colorama/tests/__pycache__/ansi_test.cpython-312.pyc,, +colorama/tests/__pycache__/ansitowin32_test.cpython-312.pyc,, +colorama/tests/__pycache__/initialise_test.cpython-312.pyc,, +colorama/tests/__pycache__/isatty_test.cpython-312.pyc,, +colorama/tests/__pycache__/utils.cpython-312.pyc,, +colorama/tests/__pycache__/winterm_test.cpython-312.pyc,, +colorama/tests/ansi_test.py,sha256=FeViDrUINIZcr505PAxvU4AjXz1asEiALs9GXMhwRaE,2839 +colorama/tests/ansitowin32_test.py,sha256=RN7AIhMJ5EqDsYaCjVo-o4u8JzDD4ukJbmevWKS70rY,10678 +colorama/tests/initialise_test.py,sha256=BbPy-XfyHwJ6zKozuQOvNvQZzsx9vdb_0bYXn7hsBTc,6741 +colorama/tests/isatty_test.py,sha256=Pg26LRpv0yQDB5Ac-sxgVXG7hsA1NYvapFgApZfYzZg,1866 +colorama/tests/utils.py,sha256=1IIRylG39z5-dzq09R_ngufxyPZxgldNbrxKxUGwGKE,1079 +colorama/tests/winterm_test.py,sha256=qoWFPEjym5gm2RuMwpf3pOis3a5r_PJZFCzK254JL8A,3709 +colorama/win32.py,sha256=YQOKwMTwtGBbsY4dL5HYTvwTeP9wIQra5MvPNddpxZs,6181 +colorama/winterm.py,sha256=XCQFDHjPi6AHYNdZwy0tA02H-Jh48Jp-HvCjeLeLp3U,7134 diff --git a/local_packages/colorama-0.4.6.dist-info/WHEEL b/local_packages/colorama-0.4.6.dist-info/WHEEL new file mode 100644 index 0000000..d79189f --- /dev/null +++ b/local_packages/colorama-0.4.6.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: hatchling 1.11.1 +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any diff --git a/local_packages/colorama-0.4.6.dist-info/licenses/LICENSE.txt b/local_packages/colorama-0.4.6.dist-info/licenses/LICENSE.txt new file mode 100644 index 0000000..3105888 --- /dev/null +++ b/local_packages/colorama-0.4.6.dist-info/licenses/LICENSE.txt @@ -0,0 +1,27 @@ +Copyright (c) 2010 Jonathan Hartley +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holders, nor those of its contributors + may be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/local_packages/colorama/__init__.py b/local_packages/colorama/__init__.py new file mode 100644 index 0000000..383101c --- /dev/null +++ b/local_packages/colorama/__init__.py @@ -0,0 +1,7 @@ +# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. +from .initialise import init, deinit, reinit, colorama_text, just_fix_windows_console +from .ansi import Fore, Back, Style, Cursor +from .ansitowin32 import AnsiToWin32 + +__version__ = '0.4.6' + diff --git a/local_packages/colorama/ansi.py b/local_packages/colorama/ansi.py new file mode 100644 index 0000000..11ec695 --- /dev/null +++ b/local_packages/colorama/ansi.py @@ -0,0 +1,102 @@ +# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. +''' +This module generates ANSI character codes to printing colors to terminals. +See: http://en.wikipedia.org/wiki/ANSI_escape_code +''' + +CSI = '\033[' +OSC = '\033]' +BEL = '\a' + + +def code_to_chars(code): + return CSI + str(code) + 'm' + +def set_title(title): + return OSC + '2;' + title + BEL + +def clear_screen(mode=2): + return CSI + str(mode) + 'J' + +def clear_line(mode=2): + return CSI + str(mode) + 'K' + + +class AnsiCodes(object): + def __init__(self): + # the subclasses declare class attributes which are numbers. + # Upon instantiation we define instance attributes, which are the same + # as the class attributes but wrapped with the ANSI escape sequence + for name in dir(self): + if not name.startswith('_'): + value = getattr(self, name) + setattr(self, name, code_to_chars(value)) + + +class AnsiCursor(object): + def UP(self, n=1): + return CSI + str(n) + 'A' + def DOWN(self, n=1): + return CSI + str(n) + 'B' + def FORWARD(self, n=1): + return CSI + str(n) + 'C' + def BACK(self, n=1): + return CSI + str(n) + 'D' + def POS(self, x=1, y=1): + return CSI + str(y) + ';' + str(x) + 'H' + + +class AnsiFore(AnsiCodes): + BLACK = 30 + RED = 31 + GREEN = 32 + YELLOW = 33 + BLUE = 34 + MAGENTA = 35 + CYAN = 36 + WHITE = 37 + RESET = 39 + + # These are fairly well supported, but not part of the standard. + LIGHTBLACK_EX = 90 + LIGHTRED_EX = 91 + LIGHTGREEN_EX = 92 + LIGHTYELLOW_EX = 93 + LIGHTBLUE_EX = 94 + LIGHTMAGENTA_EX = 95 + LIGHTCYAN_EX = 96 + LIGHTWHITE_EX = 97 + + +class AnsiBack(AnsiCodes): + BLACK = 40 + RED = 41 + GREEN = 42 + YELLOW = 43 + BLUE = 44 + MAGENTA = 45 + CYAN = 46 + WHITE = 47 + RESET = 49 + + # These are fairly well supported, but not part of the standard. + LIGHTBLACK_EX = 100 + LIGHTRED_EX = 101 + LIGHTGREEN_EX = 102 + LIGHTYELLOW_EX = 103 + LIGHTBLUE_EX = 104 + LIGHTMAGENTA_EX = 105 + LIGHTCYAN_EX = 106 + LIGHTWHITE_EX = 107 + + +class AnsiStyle(AnsiCodes): + BRIGHT = 1 + DIM = 2 + NORMAL = 22 + RESET_ALL = 0 + +Fore = AnsiFore() +Back = AnsiBack() +Style = AnsiStyle() +Cursor = AnsiCursor() diff --git a/local_packages/colorama/ansitowin32.py b/local_packages/colorama/ansitowin32.py new file mode 100644 index 0000000..abf209e --- /dev/null +++ b/local_packages/colorama/ansitowin32.py @@ -0,0 +1,277 @@ +# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. +import re +import sys +import os + +from .ansi import AnsiFore, AnsiBack, AnsiStyle, Style, BEL +from .winterm import enable_vt_processing, WinTerm, WinColor, WinStyle +from .win32 import windll, winapi_test + + +winterm = None +if windll is not None: + winterm = WinTerm() + + +class StreamWrapper(object): + ''' + Wraps a stream (such as stdout), acting as a transparent proxy for all + attribute access apart from method 'write()', which is delegated to our + Converter instance. + ''' + def __init__(self, wrapped, converter): + # double-underscore everything to prevent clashes with names of + # attributes on the wrapped stream object. + self.__wrapped = wrapped + self.__convertor = converter + + def __getattr__(self, name): + return getattr(self.__wrapped, name) + + def __enter__(self, *args, **kwargs): + # special method lookup bypasses __getattr__/__getattribute__, see + # https://stackoverflow.com/questions/12632894/why-doesnt-getattr-work-with-exit + # thus, contextlib magic methods are not proxied via __getattr__ + return self.__wrapped.__enter__(*args, **kwargs) + + def __exit__(self, *args, **kwargs): + return self.__wrapped.__exit__(*args, **kwargs) + + def __setstate__(self, state): + self.__dict__ = state + + def __getstate__(self): + return self.__dict__ + + def write(self, text): + self.__convertor.write(text) + + def isatty(self): + stream = self.__wrapped + if 'PYCHARM_HOSTED' in os.environ: + if stream is not None and (stream is sys.__stdout__ or stream is sys.__stderr__): + return True + try: + stream_isatty = stream.isatty + except AttributeError: + return False + else: + return stream_isatty() + + @property + def closed(self): + stream = self.__wrapped + try: + return stream.closed + # AttributeError in the case that the stream doesn't support being closed + # ValueError for the case that the stream has already been detached when atexit runs + except (AttributeError, ValueError): + return True + + +class AnsiToWin32(object): + ''' + Implements a 'write()' method which, on Windows, will strip ANSI character + sequences from the text, and if outputting to a tty, will convert them into + win32 function calls. + ''' + ANSI_CSI_RE = re.compile('\001?\033\\[((?:\\d|;)*)([a-zA-Z])\002?') # Control Sequence Introducer + ANSI_OSC_RE = re.compile('\001?\033\\]([^\a]*)(\a)\002?') # Operating System Command + + def __init__(self, wrapped, convert=None, strip=None, autoreset=False): + # The wrapped stream (normally sys.stdout or sys.stderr) + self.wrapped = wrapped + + # should we reset colors to defaults after every .write() + self.autoreset = autoreset + + # create the proxy wrapping our output stream + self.stream = StreamWrapper(wrapped, self) + + on_windows = os.name == 'nt' + # We test if the WinAPI works, because even if we are on Windows + # we may be using a terminal that doesn't support the WinAPI + # (e.g. Cygwin Terminal). In this case it's up to the terminal + # to support the ANSI codes. + conversion_supported = on_windows and winapi_test() + try: + fd = wrapped.fileno() + except Exception: + fd = -1 + system_has_native_ansi = not on_windows or enable_vt_processing(fd) + have_tty = not self.stream.closed and self.stream.isatty() + need_conversion = conversion_supported and not system_has_native_ansi + + # should we strip ANSI sequences from our output? + if strip is None: + strip = need_conversion or not have_tty + self.strip = strip + + # should we should convert ANSI sequences into win32 calls? + if convert is None: + convert = need_conversion and have_tty + self.convert = convert + + # dict of ansi codes to win32 functions and parameters + self.win32_calls = self.get_win32_calls() + + # are we wrapping stderr? + self.on_stderr = self.wrapped is sys.stderr + + def should_wrap(self): + ''' + True if this class is actually needed. If false, then the output + stream will not be affected, nor will win32 calls be issued, so + wrapping stdout is not actually required. This will generally be + False on non-Windows platforms, unless optional functionality like + autoreset has been requested using kwargs to init() + ''' + return self.convert or self.strip or self.autoreset + + def get_win32_calls(self): + if self.convert and winterm: + return { + AnsiStyle.RESET_ALL: (winterm.reset_all, ), + AnsiStyle.BRIGHT: (winterm.style, WinStyle.BRIGHT), + AnsiStyle.DIM: (winterm.style, WinStyle.NORMAL), + AnsiStyle.NORMAL: (winterm.style, WinStyle.NORMAL), + AnsiFore.BLACK: (winterm.fore, WinColor.BLACK), + AnsiFore.RED: (winterm.fore, WinColor.RED), + AnsiFore.GREEN: (winterm.fore, WinColor.GREEN), + AnsiFore.YELLOW: (winterm.fore, WinColor.YELLOW), + AnsiFore.BLUE: (winterm.fore, WinColor.BLUE), + AnsiFore.MAGENTA: (winterm.fore, WinColor.MAGENTA), + AnsiFore.CYAN: (winterm.fore, WinColor.CYAN), + AnsiFore.WHITE: (winterm.fore, WinColor.GREY), + AnsiFore.RESET: (winterm.fore, ), + AnsiFore.LIGHTBLACK_EX: (winterm.fore, WinColor.BLACK, True), + AnsiFore.LIGHTRED_EX: (winterm.fore, WinColor.RED, True), + AnsiFore.LIGHTGREEN_EX: (winterm.fore, WinColor.GREEN, True), + AnsiFore.LIGHTYELLOW_EX: (winterm.fore, WinColor.YELLOW, True), + AnsiFore.LIGHTBLUE_EX: (winterm.fore, WinColor.BLUE, True), + AnsiFore.LIGHTMAGENTA_EX: (winterm.fore, WinColor.MAGENTA, True), + AnsiFore.LIGHTCYAN_EX: (winterm.fore, WinColor.CYAN, True), + AnsiFore.LIGHTWHITE_EX: (winterm.fore, WinColor.GREY, True), + AnsiBack.BLACK: (winterm.back, WinColor.BLACK), + AnsiBack.RED: (winterm.back, WinColor.RED), + AnsiBack.GREEN: (winterm.back, WinColor.GREEN), + AnsiBack.YELLOW: (winterm.back, WinColor.YELLOW), + AnsiBack.BLUE: (winterm.back, WinColor.BLUE), + AnsiBack.MAGENTA: (winterm.back, WinColor.MAGENTA), + AnsiBack.CYAN: (winterm.back, WinColor.CYAN), + AnsiBack.WHITE: (winterm.back, WinColor.GREY), + AnsiBack.RESET: (winterm.back, ), + AnsiBack.LIGHTBLACK_EX: (winterm.back, WinColor.BLACK, True), + AnsiBack.LIGHTRED_EX: (winterm.back, WinColor.RED, True), + AnsiBack.LIGHTGREEN_EX: (winterm.back, WinColor.GREEN, True), + AnsiBack.LIGHTYELLOW_EX: (winterm.back, WinColor.YELLOW, True), + AnsiBack.LIGHTBLUE_EX: (winterm.back, WinColor.BLUE, True), + AnsiBack.LIGHTMAGENTA_EX: (winterm.back, WinColor.MAGENTA, True), + AnsiBack.LIGHTCYAN_EX: (winterm.back, WinColor.CYAN, True), + AnsiBack.LIGHTWHITE_EX: (winterm.back, WinColor.GREY, True), + } + return dict() + + def write(self, text): + if self.strip or self.convert: + self.write_and_convert(text) + else: + self.wrapped.write(text) + self.wrapped.flush() + if self.autoreset: + self.reset_all() + + + def reset_all(self): + if self.convert: + self.call_win32('m', (0,)) + elif not self.strip and not self.stream.closed: + self.wrapped.write(Style.RESET_ALL) + + + def write_and_convert(self, text): + ''' + Write the given text to our wrapped stream, stripping any ANSI + sequences from the text, and optionally converting them into win32 + calls. + ''' + cursor = 0 + text = self.convert_osc(text) + for match in self.ANSI_CSI_RE.finditer(text): + start, end = match.span() + self.write_plain_text(text, cursor, start) + self.convert_ansi(*match.groups()) + cursor = end + self.write_plain_text(text, cursor, len(text)) + + + def write_plain_text(self, text, start, end): + if start < end: + self.wrapped.write(text[start:end]) + self.wrapped.flush() + + + def convert_ansi(self, paramstring, command): + if self.convert: + params = self.extract_params(command, paramstring) + self.call_win32(command, params) + + + def extract_params(self, command, paramstring): + if command in 'Hf': + params = tuple(int(p) if len(p) != 0 else 1 for p in paramstring.split(';')) + while len(params) < 2: + # defaults: + params = params + (1,) + else: + params = tuple(int(p) for p in paramstring.split(';') if len(p) != 0) + if len(params) == 0: + # defaults: + if command in 'JKm': + params = (0,) + elif command in 'ABCD': + params = (1,) + + return params + + + def call_win32(self, command, params): + if command == 'm': + for param in params: + if param in self.win32_calls: + func_args = self.win32_calls[param] + func = func_args[0] + args = func_args[1:] + kwargs = dict(on_stderr=self.on_stderr) + func(*args, **kwargs) + elif command in 'J': + winterm.erase_screen(params[0], on_stderr=self.on_stderr) + elif command in 'K': + winterm.erase_line(params[0], on_stderr=self.on_stderr) + elif command in 'Hf': # cursor position - absolute + winterm.set_cursor_position(params, on_stderr=self.on_stderr) + elif command in 'ABCD': # cursor position - relative + n = params[0] + # A - up, B - down, C - forward, D - back + x, y = {'A': (0, -n), 'B': (0, n), 'C': (n, 0), 'D': (-n, 0)}[command] + winterm.cursor_adjust(x, y, on_stderr=self.on_stderr) + + + def convert_osc(self, text): + for match in self.ANSI_OSC_RE.finditer(text): + start, end = match.span() + text = text[:start] + text[end:] + paramstring, command = match.groups() + if command == BEL: + if paramstring.count(";") == 1: + params = paramstring.split(";") + # 0 - change title and icon (we will only change title) + # 1 - change icon (we don't support this) + # 2 - change title + if params[0] in '02': + winterm.set_title(params[1]) + return text + + + def flush(self): + self.wrapped.flush() diff --git a/local_packages/colorama/initialise.py b/local_packages/colorama/initialise.py new file mode 100644 index 0000000..d5fd4b7 --- /dev/null +++ b/local_packages/colorama/initialise.py @@ -0,0 +1,121 @@ +# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. +import atexit +import contextlib +import sys + +from .ansitowin32 import AnsiToWin32 + + +def _wipe_internal_state_for_tests(): + global orig_stdout, orig_stderr + orig_stdout = None + orig_stderr = None + + global wrapped_stdout, wrapped_stderr + wrapped_stdout = None + wrapped_stderr = None + + global atexit_done + atexit_done = False + + global fixed_windows_console + fixed_windows_console = False + + try: + # no-op if it wasn't registered + atexit.unregister(reset_all) + except AttributeError: + # python 2: no atexit.unregister. Oh well, we did our best. + pass + + +def reset_all(): + if AnsiToWin32 is not None: # Issue #74: objects might become None at exit + AnsiToWin32(orig_stdout).reset_all() + + +def init(autoreset=False, convert=None, strip=None, wrap=True): + + if not wrap and any([autoreset, convert, strip]): + raise ValueError('wrap=False conflicts with any other arg=True') + + global wrapped_stdout, wrapped_stderr + global orig_stdout, orig_stderr + + orig_stdout = sys.stdout + orig_stderr = sys.stderr + + if sys.stdout is None: + wrapped_stdout = None + else: + sys.stdout = wrapped_stdout = \ + wrap_stream(orig_stdout, convert, strip, autoreset, wrap) + if sys.stderr is None: + wrapped_stderr = None + else: + sys.stderr = wrapped_stderr = \ + wrap_stream(orig_stderr, convert, strip, autoreset, wrap) + + global atexit_done + if not atexit_done: + atexit.register(reset_all) + atexit_done = True + + +def deinit(): + if orig_stdout is not None: + sys.stdout = orig_stdout + if orig_stderr is not None: + sys.stderr = orig_stderr + + +def just_fix_windows_console(): + global fixed_windows_console + + if sys.platform != "win32": + return + if fixed_windows_console: + return + if wrapped_stdout is not None or wrapped_stderr is not None: + # Someone already ran init() and it did stuff, so we won't second-guess them + return + + # On newer versions of Windows, AnsiToWin32.__init__ will implicitly enable the + # native ANSI support in the console as a side-effect. We only need to actually + # replace sys.stdout/stderr if we're in the old-style conversion mode. + new_stdout = AnsiToWin32(sys.stdout, convert=None, strip=None, autoreset=False) + if new_stdout.convert: + sys.stdout = new_stdout + new_stderr = AnsiToWin32(sys.stderr, convert=None, strip=None, autoreset=False) + if new_stderr.convert: + sys.stderr = new_stderr + + fixed_windows_console = True + +@contextlib.contextmanager +def colorama_text(*args, **kwargs): + init(*args, **kwargs) + try: + yield + finally: + deinit() + + +def reinit(): + if wrapped_stdout is not None: + sys.stdout = wrapped_stdout + if wrapped_stderr is not None: + sys.stderr = wrapped_stderr + + +def wrap_stream(stream, convert, strip, autoreset, wrap): + if wrap: + wrapper = AnsiToWin32(stream, + convert=convert, strip=strip, autoreset=autoreset) + if wrapper.should_wrap(): + stream = wrapper.stream + return stream + + +# Use this for initial setup as well, to reduce code duplication +_wipe_internal_state_for_tests() diff --git a/local_packages/colorama/tests/__init__.py b/local_packages/colorama/tests/__init__.py new file mode 100644 index 0000000..8c5661e --- /dev/null +++ b/local_packages/colorama/tests/__init__.py @@ -0,0 +1 @@ +# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. diff --git a/local_packages/colorama/tests/ansi_test.py b/local_packages/colorama/tests/ansi_test.py new file mode 100644 index 0000000..0a20c80 --- /dev/null +++ b/local_packages/colorama/tests/ansi_test.py @@ -0,0 +1,76 @@ +# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. +import sys +from unittest import TestCase, main + +from ..ansi import Back, Fore, Style +from ..ansitowin32 import AnsiToWin32 + +stdout_orig = sys.stdout +stderr_orig = sys.stderr + + +class AnsiTest(TestCase): + + def setUp(self): + # sanity check: stdout should be a file or StringIO object. + # It will only be AnsiToWin32 if init() has previously wrapped it + self.assertNotEqual(type(sys.stdout), AnsiToWin32) + self.assertNotEqual(type(sys.stderr), AnsiToWin32) + + def tearDown(self): + sys.stdout = stdout_orig + sys.stderr = stderr_orig + + + def testForeAttributes(self): + self.assertEqual(Fore.BLACK, '\033[30m') + self.assertEqual(Fore.RED, '\033[31m') + self.assertEqual(Fore.GREEN, '\033[32m') + self.assertEqual(Fore.YELLOW, '\033[33m') + self.assertEqual(Fore.BLUE, '\033[34m') + self.assertEqual(Fore.MAGENTA, '\033[35m') + self.assertEqual(Fore.CYAN, '\033[36m') + self.assertEqual(Fore.WHITE, '\033[37m') + self.assertEqual(Fore.RESET, '\033[39m') + + # Check the light, extended versions. + self.assertEqual(Fore.LIGHTBLACK_EX, '\033[90m') + self.assertEqual(Fore.LIGHTRED_EX, '\033[91m') + self.assertEqual(Fore.LIGHTGREEN_EX, '\033[92m') + self.assertEqual(Fore.LIGHTYELLOW_EX, '\033[93m') + self.assertEqual(Fore.LIGHTBLUE_EX, '\033[94m') + self.assertEqual(Fore.LIGHTMAGENTA_EX, '\033[95m') + self.assertEqual(Fore.LIGHTCYAN_EX, '\033[96m') + self.assertEqual(Fore.LIGHTWHITE_EX, '\033[97m') + + + def testBackAttributes(self): + self.assertEqual(Back.BLACK, '\033[40m') + self.assertEqual(Back.RED, '\033[41m') + self.assertEqual(Back.GREEN, '\033[42m') + self.assertEqual(Back.YELLOW, '\033[43m') + self.assertEqual(Back.BLUE, '\033[44m') + self.assertEqual(Back.MAGENTA, '\033[45m') + self.assertEqual(Back.CYAN, '\033[46m') + self.assertEqual(Back.WHITE, '\033[47m') + self.assertEqual(Back.RESET, '\033[49m') + + # Check the light, extended versions. + self.assertEqual(Back.LIGHTBLACK_EX, '\033[100m') + self.assertEqual(Back.LIGHTRED_EX, '\033[101m') + self.assertEqual(Back.LIGHTGREEN_EX, '\033[102m') + self.assertEqual(Back.LIGHTYELLOW_EX, '\033[103m') + self.assertEqual(Back.LIGHTBLUE_EX, '\033[104m') + self.assertEqual(Back.LIGHTMAGENTA_EX, '\033[105m') + self.assertEqual(Back.LIGHTCYAN_EX, '\033[106m') + self.assertEqual(Back.LIGHTWHITE_EX, '\033[107m') + + + def testStyleAttributes(self): + self.assertEqual(Style.DIM, '\033[2m') + self.assertEqual(Style.NORMAL, '\033[22m') + self.assertEqual(Style.BRIGHT, '\033[1m') + + +if __name__ == '__main__': + main() diff --git a/local_packages/colorama/tests/ansitowin32_test.py b/local_packages/colorama/tests/ansitowin32_test.py new file mode 100644 index 0000000..91ca551 --- /dev/null +++ b/local_packages/colorama/tests/ansitowin32_test.py @@ -0,0 +1,294 @@ +# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. +from io import StringIO, TextIOWrapper +from unittest import TestCase, main +try: + from contextlib import ExitStack +except ImportError: + # python 2 + from contextlib2 import ExitStack + +try: + from unittest.mock import MagicMock, Mock, patch +except ImportError: + from mock import MagicMock, Mock, patch + +from ..ansitowin32 import AnsiToWin32, StreamWrapper +from ..win32 import ENABLE_VIRTUAL_TERMINAL_PROCESSING +from .utils import osname + + +class StreamWrapperTest(TestCase): + + def testIsAProxy(self): + mockStream = Mock() + wrapper = StreamWrapper(mockStream, None) + self.assertTrue( wrapper.random_attr is mockStream.random_attr ) + + def testDelegatesWrite(self): + mockStream = Mock() + mockConverter = Mock() + wrapper = StreamWrapper(mockStream, mockConverter) + wrapper.write('hello') + self.assertTrue(mockConverter.write.call_args, (('hello',), {})) + + def testDelegatesContext(self): + mockConverter = Mock() + s = StringIO() + with StreamWrapper(s, mockConverter) as fp: + fp.write(u'hello') + self.assertTrue(s.closed) + + def testProxyNoContextManager(self): + mockStream = MagicMock() + mockStream.__enter__.side_effect = AttributeError() + mockConverter = Mock() + with self.assertRaises(AttributeError) as excinfo: + with StreamWrapper(mockStream, mockConverter) as wrapper: + wrapper.write('hello') + + def test_closed_shouldnt_raise_on_closed_stream(self): + stream = StringIO() + stream.close() + wrapper = StreamWrapper(stream, None) + self.assertEqual(wrapper.closed, True) + + def test_closed_shouldnt_raise_on_detached_stream(self): + stream = TextIOWrapper(StringIO()) + stream.detach() + wrapper = StreamWrapper(stream, None) + self.assertEqual(wrapper.closed, True) + +class AnsiToWin32Test(TestCase): + + def testInit(self): + mockStdout = Mock() + auto = Mock() + stream = AnsiToWin32(mockStdout, autoreset=auto) + self.assertEqual(stream.wrapped, mockStdout) + self.assertEqual(stream.autoreset, auto) + + @patch('colorama.ansitowin32.winterm', None) + @patch('colorama.ansitowin32.winapi_test', lambda *_: True) + def testStripIsTrueOnWindows(self): + with osname('nt'): + mockStdout = Mock() + stream = AnsiToWin32(mockStdout) + self.assertTrue(stream.strip) + + def testStripIsFalseOffWindows(self): + with osname('posix'): + mockStdout = Mock(closed=False) + stream = AnsiToWin32(mockStdout) + self.assertFalse(stream.strip) + + def testWriteStripsAnsi(self): + mockStdout = Mock() + stream = AnsiToWin32(mockStdout) + stream.wrapped = Mock() + stream.write_and_convert = Mock() + stream.strip = True + + stream.write('abc') + + self.assertFalse(stream.wrapped.write.called) + self.assertEqual(stream.write_and_convert.call_args, (('abc',), {})) + + def testWriteDoesNotStripAnsi(self): + mockStdout = Mock() + stream = AnsiToWin32(mockStdout) + stream.wrapped = Mock() + stream.write_and_convert = Mock() + stream.strip = False + stream.convert = False + + stream.write('abc') + + self.assertFalse(stream.write_and_convert.called) + self.assertEqual(stream.wrapped.write.call_args, (('abc',), {})) + + def assert_autoresets(self, convert, autoreset=True): + stream = AnsiToWin32(Mock()) + stream.convert = convert + stream.reset_all = Mock() + stream.autoreset = autoreset + stream.winterm = Mock() + + stream.write('abc') + + self.assertEqual(stream.reset_all.called, autoreset) + + def testWriteAutoresets(self): + self.assert_autoresets(convert=True) + self.assert_autoresets(convert=False) + self.assert_autoresets(convert=True, autoreset=False) + self.assert_autoresets(convert=False, autoreset=False) + + def testWriteAndConvertWritesPlainText(self): + stream = AnsiToWin32(Mock()) + stream.write_and_convert( 'abc' ) + self.assertEqual( stream.wrapped.write.call_args, (('abc',), {}) ) + + def testWriteAndConvertStripsAllValidAnsi(self): + stream = AnsiToWin32(Mock()) + stream.call_win32 = Mock() + data = [ + 'abc\033[mdef', + 'abc\033[0mdef', + 'abc\033[2mdef', + 'abc\033[02mdef', + 'abc\033[002mdef', + 'abc\033[40mdef', + 'abc\033[040mdef', + 'abc\033[0;1mdef', + 'abc\033[40;50mdef', + 'abc\033[50;30;40mdef', + 'abc\033[Adef', + 'abc\033[0Gdef', + 'abc\033[1;20;128Hdef', + ] + for datum in data: + stream.wrapped.write.reset_mock() + stream.write_and_convert( datum ) + self.assertEqual( + [args[0] for args in stream.wrapped.write.call_args_list], + [ ('abc',), ('def',) ] + ) + + def testWriteAndConvertSkipsEmptySnippets(self): + stream = AnsiToWin32(Mock()) + stream.call_win32 = Mock() + stream.write_and_convert( '\033[40m\033[41m' ) + self.assertFalse( stream.wrapped.write.called ) + + def testWriteAndConvertCallsWin32WithParamsAndCommand(self): + stream = AnsiToWin32(Mock()) + stream.convert = True + stream.call_win32 = Mock() + stream.extract_params = Mock(return_value='params') + data = { + 'abc\033[adef': ('a', 'params'), + 'abc\033[;;bdef': ('b', 'params'), + 'abc\033[0cdef': ('c', 'params'), + 'abc\033[;;0;;Gdef': ('G', 'params'), + 'abc\033[1;20;128Hdef': ('H', 'params'), + } + for datum, expected in data.items(): + stream.call_win32.reset_mock() + stream.write_and_convert( datum ) + self.assertEqual( stream.call_win32.call_args[0], expected ) + + def test_reset_all_shouldnt_raise_on_closed_orig_stdout(self): + stream = StringIO() + converter = AnsiToWin32(stream) + stream.close() + + converter.reset_all() + + def test_wrap_shouldnt_raise_on_closed_orig_stdout(self): + stream = StringIO() + stream.close() + with \ + patch("colorama.ansitowin32.os.name", "nt"), \ + patch("colorama.ansitowin32.winapi_test", lambda: True): + converter = AnsiToWin32(stream) + self.assertTrue(converter.strip) + self.assertFalse(converter.convert) + + def test_wrap_shouldnt_raise_on_missing_closed_attr(self): + with \ + patch("colorama.ansitowin32.os.name", "nt"), \ + patch("colorama.ansitowin32.winapi_test", lambda: True): + converter = AnsiToWin32(object()) + self.assertTrue(converter.strip) + self.assertFalse(converter.convert) + + def testExtractParams(self): + stream = AnsiToWin32(Mock()) + data = { + '': (0,), + ';;': (0,), + '2': (2,), + ';;002;;': (2,), + '0;1': (0, 1), + ';;003;;456;;': (3, 456), + '11;22;33;44;55': (11, 22, 33, 44, 55), + } + for datum, expected in data.items(): + self.assertEqual(stream.extract_params('m', datum), expected) + + def testCallWin32UsesLookup(self): + listener = Mock() + stream = AnsiToWin32(listener) + stream.win32_calls = { + 1: (lambda *_, **__: listener(11),), + 2: (lambda *_, **__: listener(22),), + 3: (lambda *_, **__: listener(33),), + } + stream.call_win32('m', (3, 1, 99, 2)) + self.assertEqual( + [a[0][0] for a in listener.call_args_list], + [33, 11, 22] ) + + def test_osc_codes(self): + mockStdout = Mock() + stream = AnsiToWin32(mockStdout, convert=True) + with patch('colorama.ansitowin32.winterm') as winterm: + data = [ + '\033]0\x07', # missing arguments + '\033]0;foo\x08', # wrong OSC command + '\033]0;colorama_test_title\x07', # should work + '\033]1;colorama_test_title\x07', # wrong set command + '\033]2;colorama_test_title\x07', # should work + '\033]' + ';' * 64 + '\x08', # see issue #247 + ] + for code in data: + stream.write(code) + self.assertEqual(winterm.set_title.call_count, 2) + + def test_native_windows_ansi(self): + with ExitStack() as stack: + def p(a, b): + stack.enter_context(patch(a, b, create=True)) + # Pretend to be on Windows + p("colorama.ansitowin32.os.name", "nt") + p("colorama.ansitowin32.winapi_test", lambda: True) + p("colorama.win32.winapi_test", lambda: True) + p("colorama.winterm.win32.windll", "non-None") + p("colorama.winterm.get_osfhandle", lambda _: 1234) + + # Pretend that our mock stream has native ANSI support + p( + "colorama.winterm.win32.GetConsoleMode", + lambda _: ENABLE_VIRTUAL_TERMINAL_PROCESSING, + ) + SetConsoleMode = Mock() + p("colorama.winterm.win32.SetConsoleMode", SetConsoleMode) + + stdout = Mock() + stdout.closed = False + stdout.isatty.return_value = True + stdout.fileno.return_value = 1 + + # Our fake console says it has native vt support, so AnsiToWin32 should + # enable that support and do nothing else. + stream = AnsiToWin32(stdout) + SetConsoleMode.assert_called_with(1234, ENABLE_VIRTUAL_TERMINAL_PROCESSING) + self.assertFalse(stream.strip) + self.assertFalse(stream.convert) + self.assertFalse(stream.should_wrap()) + + # Now let's pretend we're on an old Windows console, that doesn't have + # native ANSI support. + p("colorama.winterm.win32.GetConsoleMode", lambda _: 0) + SetConsoleMode = Mock() + p("colorama.winterm.win32.SetConsoleMode", SetConsoleMode) + + stream = AnsiToWin32(stdout) + SetConsoleMode.assert_called_with(1234, ENABLE_VIRTUAL_TERMINAL_PROCESSING) + self.assertTrue(stream.strip) + self.assertTrue(stream.convert) + self.assertTrue(stream.should_wrap()) + + +if __name__ == '__main__': + main() diff --git a/local_packages/colorama/tests/initialise_test.py b/local_packages/colorama/tests/initialise_test.py new file mode 100644 index 0000000..89f9b07 --- /dev/null +++ b/local_packages/colorama/tests/initialise_test.py @@ -0,0 +1,189 @@ +# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. +import sys +from unittest import TestCase, main, skipUnless + +try: + from unittest.mock import patch, Mock +except ImportError: + from mock import patch, Mock + +from ..ansitowin32 import StreamWrapper +from ..initialise import init, just_fix_windows_console, _wipe_internal_state_for_tests +from .utils import osname, replace_by + +orig_stdout = sys.stdout +orig_stderr = sys.stderr + + +class InitTest(TestCase): + + @skipUnless(sys.stdout.isatty(), "sys.stdout is not a tty") + def setUp(self): + # sanity check + self.assertNotWrapped() + + def tearDown(self): + _wipe_internal_state_for_tests() + sys.stdout = orig_stdout + sys.stderr = orig_stderr + + def assertWrapped(self): + self.assertIsNot(sys.stdout, orig_stdout, 'stdout should be wrapped') + self.assertIsNot(sys.stderr, orig_stderr, 'stderr should be wrapped') + self.assertTrue(isinstance(sys.stdout, StreamWrapper), + 'bad stdout wrapper') + self.assertTrue(isinstance(sys.stderr, StreamWrapper), + 'bad stderr wrapper') + + def assertNotWrapped(self): + self.assertIs(sys.stdout, orig_stdout, 'stdout should not be wrapped') + self.assertIs(sys.stderr, orig_stderr, 'stderr should not be wrapped') + + @patch('colorama.initialise.reset_all') + @patch('colorama.ansitowin32.winapi_test', lambda *_: True) + @patch('colorama.ansitowin32.enable_vt_processing', lambda *_: False) + def testInitWrapsOnWindows(self, _): + with osname("nt"): + init() + self.assertWrapped() + + @patch('colorama.initialise.reset_all') + @patch('colorama.ansitowin32.winapi_test', lambda *_: False) + def testInitDoesntWrapOnEmulatedWindows(self, _): + with osname("nt"): + init() + self.assertNotWrapped() + + def testInitDoesntWrapOnNonWindows(self): + with osname("posix"): + init() + self.assertNotWrapped() + + def testInitDoesntWrapIfNone(self): + with replace_by(None): + init() + # We can't use assertNotWrapped here because replace_by(None) + # changes stdout/stderr already. + self.assertIsNone(sys.stdout) + self.assertIsNone(sys.stderr) + + def testInitAutoresetOnWrapsOnAllPlatforms(self): + with osname("posix"): + init(autoreset=True) + self.assertWrapped() + + def testInitWrapOffDoesntWrapOnWindows(self): + with osname("nt"): + init(wrap=False) + self.assertNotWrapped() + + def testInitWrapOffIncompatibleWithAutoresetOn(self): + self.assertRaises(ValueError, lambda: init(autoreset=True, wrap=False)) + + @patch('colorama.win32.SetConsoleTextAttribute') + @patch('colorama.initialise.AnsiToWin32') + def testAutoResetPassedOn(self, mockATW32, _): + with osname("nt"): + init(autoreset=True) + self.assertEqual(len(mockATW32.call_args_list), 2) + self.assertEqual(mockATW32.call_args_list[1][1]['autoreset'], True) + self.assertEqual(mockATW32.call_args_list[0][1]['autoreset'], True) + + @patch('colorama.initialise.AnsiToWin32') + def testAutoResetChangeable(self, mockATW32): + with osname("nt"): + init() + + init(autoreset=True) + self.assertEqual(len(mockATW32.call_args_list), 4) + self.assertEqual(mockATW32.call_args_list[2][1]['autoreset'], True) + self.assertEqual(mockATW32.call_args_list[3][1]['autoreset'], True) + + init() + self.assertEqual(len(mockATW32.call_args_list), 6) + self.assertEqual( + mockATW32.call_args_list[4][1]['autoreset'], False) + self.assertEqual( + mockATW32.call_args_list[5][1]['autoreset'], False) + + + @patch('colorama.initialise.atexit.register') + def testAtexitRegisteredOnlyOnce(self, mockRegister): + init() + self.assertTrue(mockRegister.called) + mockRegister.reset_mock() + init() + self.assertFalse(mockRegister.called) + + +class JustFixWindowsConsoleTest(TestCase): + def _reset(self): + _wipe_internal_state_for_tests() + sys.stdout = orig_stdout + sys.stderr = orig_stderr + + def tearDown(self): + self._reset() + + @patch("colorama.ansitowin32.winapi_test", lambda: True) + def testJustFixWindowsConsole(self): + if sys.platform != "win32": + # just_fix_windows_console should be a no-op + just_fix_windows_console() + self.assertIs(sys.stdout, orig_stdout) + self.assertIs(sys.stderr, orig_stderr) + else: + def fake_std(): + # Emulate stdout=not a tty, stderr=tty + # to check that we handle both cases correctly + stdout = Mock() + stdout.closed = False + stdout.isatty.return_value = False + stdout.fileno.return_value = 1 + sys.stdout = stdout + + stderr = Mock() + stderr.closed = False + stderr.isatty.return_value = True + stderr.fileno.return_value = 2 + sys.stderr = stderr + + for native_ansi in [False, True]: + with patch( + 'colorama.ansitowin32.enable_vt_processing', + lambda *_: native_ansi + ): + self._reset() + fake_std() + + # Regular single-call test + prev_stdout = sys.stdout + prev_stderr = sys.stderr + just_fix_windows_console() + self.assertIs(sys.stdout, prev_stdout) + if native_ansi: + self.assertIs(sys.stderr, prev_stderr) + else: + self.assertIsNot(sys.stderr, prev_stderr) + + # second call without resetting is always a no-op + prev_stdout = sys.stdout + prev_stderr = sys.stderr + just_fix_windows_console() + self.assertIs(sys.stdout, prev_stdout) + self.assertIs(sys.stderr, prev_stderr) + + self._reset() + fake_std() + + # If init() runs first, just_fix_windows_console should be a no-op + init() + prev_stdout = sys.stdout + prev_stderr = sys.stderr + just_fix_windows_console() + self.assertIs(prev_stdout, sys.stdout) + self.assertIs(prev_stderr, sys.stderr) + + +if __name__ == '__main__': + main() diff --git a/local_packages/colorama/tests/isatty_test.py b/local_packages/colorama/tests/isatty_test.py new file mode 100644 index 0000000..0f84e4b --- /dev/null +++ b/local_packages/colorama/tests/isatty_test.py @@ -0,0 +1,57 @@ +# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. +import sys +from unittest import TestCase, main + +from ..ansitowin32 import StreamWrapper, AnsiToWin32 +from .utils import pycharm, replace_by, replace_original_by, StreamTTY, StreamNonTTY + + +def is_a_tty(stream): + return StreamWrapper(stream, None).isatty() + +class IsattyTest(TestCase): + + def test_TTY(self): + tty = StreamTTY() + self.assertTrue(is_a_tty(tty)) + with pycharm(): + self.assertTrue(is_a_tty(tty)) + + def test_nonTTY(self): + non_tty = StreamNonTTY() + self.assertFalse(is_a_tty(non_tty)) + with pycharm(): + self.assertFalse(is_a_tty(non_tty)) + + def test_withPycharm(self): + with pycharm(): + self.assertTrue(is_a_tty(sys.stderr)) + self.assertTrue(is_a_tty(sys.stdout)) + + def test_withPycharmTTYOverride(self): + tty = StreamTTY() + with pycharm(), replace_by(tty): + self.assertTrue(is_a_tty(tty)) + + def test_withPycharmNonTTYOverride(self): + non_tty = StreamNonTTY() + with pycharm(), replace_by(non_tty): + self.assertFalse(is_a_tty(non_tty)) + + def test_withPycharmNoneOverride(self): + with pycharm(): + with replace_by(None), replace_original_by(None): + self.assertFalse(is_a_tty(None)) + self.assertFalse(is_a_tty(StreamNonTTY())) + self.assertTrue(is_a_tty(StreamTTY())) + + def test_withPycharmStreamWrapped(self): + with pycharm(): + self.assertTrue(AnsiToWin32(StreamTTY()).stream.isatty()) + self.assertFalse(AnsiToWin32(StreamNonTTY()).stream.isatty()) + self.assertTrue(AnsiToWin32(sys.stdout).stream.isatty()) + self.assertTrue(AnsiToWin32(sys.stderr).stream.isatty()) + + +if __name__ == '__main__': + main() diff --git a/local_packages/colorama/tests/utils.py b/local_packages/colorama/tests/utils.py new file mode 100644 index 0000000..472fafb --- /dev/null +++ b/local_packages/colorama/tests/utils.py @@ -0,0 +1,49 @@ +# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. +from contextlib import contextmanager +from io import StringIO +import sys +import os + + +class StreamTTY(StringIO): + def isatty(self): + return True + +class StreamNonTTY(StringIO): + def isatty(self): + return False + +@contextmanager +def osname(name): + orig = os.name + os.name = name + yield + os.name = orig + +@contextmanager +def replace_by(stream): + orig_stdout = sys.stdout + orig_stderr = sys.stderr + sys.stdout = stream + sys.stderr = stream + yield + sys.stdout = orig_stdout + sys.stderr = orig_stderr + +@contextmanager +def replace_original_by(stream): + orig_stdout = sys.__stdout__ + orig_stderr = sys.__stderr__ + sys.__stdout__ = stream + sys.__stderr__ = stream + yield + sys.__stdout__ = orig_stdout + sys.__stderr__ = orig_stderr + +@contextmanager +def pycharm(): + os.environ["PYCHARM_HOSTED"] = "1" + non_tty = StreamNonTTY() + with replace_by(non_tty), replace_original_by(non_tty): + yield + del os.environ["PYCHARM_HOSTED"] diff --git a/local_packages/colorama/tests/winterm_test.py b/local_packages/colorama/tests/winterm_test.py new file mode 100644 index 0000000..d0955f9 --- /dev/null +++ b/local_packages/colorama/tests/winterm_test.py @@ -0,0 +1,131 @@ +# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. +import sys +from unittest import TestCase, main, skipUnless + +try: + from unittest.mock import Mock, patch +except ImportError: + from mock import Mock, patch + +from ..winterm import WinColor, WinStyle, WinTerm + + +class WinTermTest(TestCase): + + @patch('colorama.winterm.win32') + def testInit(self, mockWin32): + mockAttr = Mock() + mockAttr.wAttributes = 7 + 6 * 16 + 8 + mockWin32.GetConsoleScreenBufferInfo.return_value = mockAttr + term = WinTerm() + self.assertEqual(term._fore, 7) + self.assertEqual(term._back, 6) + self.assertEqual(term._style, 8) + + @skipUnless(sys.platform.startswith("win"), "requires Windows") + def testGetAttrs(self): + term = WinTerm() + + term._fore = 0 + term._back = 0 + term._style = 0 + self.assertEqual(term.get_attrs(), 0) + + term._fore = WinColor.YELLOW + self.assertEqual(term.get_attrs(), WinColor.YELLOW) + + term._back = WinColor.MAGENTA + self.assertEqual( + term.get_attrs(), + WinColor.YELLOW + WinColor.MAGENTA * 16) + + term._style = WinStyle.BRIGHT + self.assertEqual( + term.get_attrs(), + WinColor.YELLOW + WinColor.MAGENTA * 16 + WinStyle.BRIGHT) + + @patch('colorama.winterm.win32') + def testResetAll(self, mockWin32): + mockAttr = Mock() + mockAttr.wAttributes = 1 + 2 * 16 + 8 + mockWin32.GetConsoleScreenBufferInfo.return_value = mockAttr + term = WinTerm() + + term.set_console = Mock() + term._fore = -1 + term._back = -1 + term._style = -1 + + term.reset_all() + + self.assertEqual(term._fore, 1) + self.assertEqual(term._back, 2) + self.assertEqual(term._style, 8) + self.assertEqual(term.set_console.called, True) + + @skipUnless(sys.platform.startswith("win"), "requires Windows") + def testFore(self): + term = WinTerm() + term.set_console = Mock() + term._fore = 0 + + term.fore(5) + + self.assertEqual(term._fore, 5) + self.assertEqual(term.set_console.called, True) + + @skipUnless(sys.platform.startswith("win"), "requires Windows") + def testBack(self): + term = WinTerm() + term.set_console = Mock() + term._back = 0 + + term.back(5) + + self.assertEqual(term._back, 5) + self.assertEqual(term.set_console.called, True) + + @skipUnless(sys.platform.startswith("win"), "requires Windows") + def testStyle(self): + term = WinTerm() + term.set_console = Mock() + term._style = 0 + + term.style(22) + + self.assertEqual(term._style, 22) + self.assertEqual(term.set_console.called, True) + + @patch('colorama.winterm.win32') + def testSetConsole(self, mockWin32): + mockAttr = Mock() + mockAttr.wAttributes = 0 + mockWin32.GetConsoleScreenBufferInfo.return_value = mockAttr + term = WinTerm() + term.windll = Mock() + + term.set_console() + + self.assertEqual( + mockWin32.SetConsoleTextAttribute.call_args, + ((mockWin32.STDOUT, term.get_attrs()), {}) + ) + + @patch('colorama.winterm.win32') + def testSetConsoleOnStderr(self, mockWin32): + mockAttr = Mock() + mockAttr.wAttributes = 0 + mockWin32.GetConsoleScreenBufferInfo.return_value = mockAttr + term = WinTerm() + term.windll = Mock() + + term.set_console(on_stderr=True) + + self.assertEqual( + mockWin32.SetConsoleTextAttribute.call_args, + ((mockWin32.STDERR, term.get_attrs()), {}) + ) + + +if __name__ == '__main__': + main() diff --git a/local_packages/colorama/win32.py b/local_packages/colorama/win32.py new file mode 100644 index 0000000..841b0e2 --- /dev/null +++ b/local_packages/colorama/win32.py @@ -0,0 +1,180 @@ +# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. + +# from winbase.h +STDOUT = -11 +STDERR = -12 + +ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x0004 + +try: + import ctypes + from ctypes import LibraryLoader + windll = LibraryLoader(ctypes.WinDLL) + from ctypes import wintypes +except (AttributeError, ImportError): + windll = None + SetConsoleTextAttribute = lambda *_: None + winapi_test = lambda *_: None +else: + from ctypes import byref, Structure, c_char, POINTER + + COORD = wintypes._COORD + + class CONSOLE_SCREEN_BUFFER_INFO(Structure): + """struct in wincon.h.""" + _fields_ = [ + ("dwSize", COORD), + ("dwCursorPosition", COORD), + ("wAttributes", wintypes.WORD), + ("srWindow", wintypes.SMALL_RECT), + ("dwMaximumWindowSize", COORD), + ] + def __str__(self): + return '(%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d)' % ( + self.dwSize.Y, self.dwSize.X + , self.dwCursorPosition.Y, self.dwCursorPosition.X + , self.wAttributes + , self.srWindow.Top, self.srWindow.Left, self.srWindow.Bottom, self.srWindow.Right + , self.dwMaximumWindowSize.Y, self.dwMaximumWindowSize.X + ) + + _GetStdHandle = windll.kernel32.GetStdHandle + _GetStdHandle.argtypes = [ + wintypes.DWORD, + ] + _GetStdHandle.restype = wintypes.HANDLE + + _GetConsoleScreenBufferInfo = windll.kernel32.GetConsoleScreenBufferInfo + _GetConsoleScreenBufferInfo.argtypes = [ + wintypes.HANDLE, + POINTER(CONSOLE_SCREEN_BUFFER_INFO), + ] + _GetConsoleScreenBufferInfo.restype = wintypes.BOOL + + _SetConsoleTextAttribute = windll.kernel32.SetConsoleTextAttribute + _SetConsoleTextAttribute.argtypes = [ + wintypes.HANDLE, + wintypes.WORD, + ] + _SetConsoleTextAttribute.restype = wintypes.BOOL + + _SetConsoleCursorPosition = windll.kernel32.SetConsoleCursorPosition + _SetConsoleCursorPosition.argtypes = [ + wintypes.HANDLE, + COORD, + ] + _SetConsoleCursorPosition.restype = wintypes.BOOL + + _FillConsoleOutputCharacterA = windll.kernel32.FillConsoleOutputCharacterA + _FillConsoleOutputCharacterA.argtypes = [ + wintypes.HANDLE, + c_char, + wintypes.DWORD, + COORD, + POINTER(wintypes.DWORD), + ] + _FillConsoleOutputCharacterA.restype = wintypes.BOOL + + _FillConsoleOutputAttribute = windll.kernel32.FillConsoleOutputAttribute + _FillConsoleOutputAttribute.argtypes = [ + wintypes.HANDLE, + wintypes.WORD, + wintypes.DWORD, + COORD, + POINTER(wintypes.DWORD), + ] + _FillConsoleOutputAttribute.restype = wintypes.BOOL + + _SetConsoleTitleW = windll.kernel32.SetConsoleTitleW + _SetConsoleTitleW.argtypes = [ + wintypes.LPCWSTR + ] + _SetConsoleTitleW.restype = wintypes.BOOL + + _GetConsoleMode = windll.kernel32.GetConsoleMode + _GetConsoleMode.argtypes = [ + wintypes.HANDLE, + POINTER(wintypes.DWORD) + ] + _GetConsoleMode.restype = wintypes.BOOL + + _SetConsoleMode = windll.kernel32.SetConsoleMode + _SetConsoleMode.argtypes = [ + wintypes.HANDLE, + wintypes.DWORD + ] + _SetConsoleMode.restype = wintypes.BOOL + + def _winapi_test(handle): + csbi = CONSOLE_SCREEN_BUFFER_INFO() + success = _GetConsoleScreenBufferInfo( + handle, byref(csbi)) + return bool(success) + + def winapi_test(): + return any(_winapi_test(h) for h in + (_GetStdHandle(STDOUT), _GetStdHandle(STDERR))) + + def GetConsoleScreenBufferInfo(stream_id=STDOUT): + handle = _GetStdHandle(stream_id) + csbi = CONSOLE_SCREEN_BUFFER_INFO() + success = _GetConsoleScreenBufferInfo( + handle, byref(csbi)) + return csbi + + def SetConsoleTextAttribute(stream_id, attrs): + handle = _GetStdHandle(stream_id) + return _SetConsoleTextAttribute(handle, attrs) + + def SetConsoleCursorPosition(stream_id, position, adjust=True): + position = COORD(*position) + # If the position is out of range, do nothing. + if position.Y <= 0 or position.X <= 0: + return + # Adjust for Windows' SetConsoleCursorPosition: + # 1. being 0-based, while ANSI is 1-based. + # 2. expecting (x,y), while ANSI uses (y,x). + adjusted_position = COORD(position.Y - 1, position.X - 1) + if adjust: + # Adjust for viewport's scroll position + sr = GetConsoleScreenBufferInfo(STDOUT).srWindow + adjusted_position.Y += sr.Top + adjusted_position.X += sr.Left + # Resume normal processing + handle = _GetStdHandle(stream_id) + return _SetConsoleCursorPosition(handle, adjusted_position) + + def FillConsoleOutputCharacter(stream_id, char, length, start): + handle = _GetStdHandle(stream_id) + char = c_char(char.encode()) + length = wintypes.DWORD(length) + num_written = wintypes.DWORD(0) + # Note that this is hard-coded for ANSI (vs wide) bytes. + success = _FillConsoleOutputCharacterA( + handle, char, length, start, byref(num_written)) + return num_written.value + + def FillConsoleOutputAttribute(stream_id, attr, length, start): + ''' FillConsoleOutputAttribute( hConsole, csbi.wAttributes, dwConSize, coordScreen, &cCharsWritten )''' + handle = _GetStdHandle(stream_id) + attribute = wintypes.WORD(attr) + length = wintypes.DWORD(length) + num_written = wintypes.DWORD(0) + # Note that this is hard-coded for ANSI (vs wide) bytes. + return _FillConsoleOutputAttribute( + handle, attribute, length, start, byref(num_written)) + + def SetConsoleTitle(title): + return _SetConsoleTitleW(title) + + def GetConsoleMode(handle): + mode = wintypes.DWORD() + success = _GetConsoleMode(handle, byref(mode)) + if not success: + raise ctypes.WinError() + return mode.value + + def SetConsoleMode(handle, mode): + success = _SetConsoleMode(handle, mode) + if not success: + raise ctypes.WinError() diff --git a/local_packages/colorama/winterm.py b/local_packages/colorama/winterm.py new file mode 100644 index 0000000..aad867e --- /dev/null +++ b/local_packages/colorama/winterm.py @@ -0,0 +1,195 @@ +# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. +try: + from msvcrt import get_osfhandle +except ImportError: + def get_osfhandle(_): + raise OSError("This isn't windows!") + + +from . import win32 + +# from wincon.h +class WinColor(object): + BLACK = 0 + BLUE = 1 + GREEN = 2 + CYAN = 3 + RED = 4 + MAGENTA = 5 + YELLOW = 6 + GREY = 7 + +# from wincon.h +class WinStyle(object): + NORMAL = 0x00 # dim text, dim background + BRIGHT = 0x08 # bright text, dim background + BRIGHT_BACKGROUND = 0x80 # dim text, bright background + +class WinTerm(object): + + def __init__(self): + self._default = win32.GetConsoleScreenBufferInfo(win32.STDOUT).wAttributes + self.set_attrs(self._default) + self._default_fore = self._fore + self._default_back = self._back + self._default_style = self._style + # In order to emulate LIGHT_EX in windows, we borrow the BRIGHT style. + # So that LIGHT_EX colors and BRIGHT style do not clobber each other, + # we track them separately, since LIGHT_EX is overwritten by Fore/Back + # and BRIGHT is overwritten by Style codes. + self._light = 0 + + def get_attrs(self): + return self._fore + self._back * 16 + (self._style | self._light) + + def set_attrs(self, value): + self._fore = value & 7 + self._back = (value >> 4) & 7 + self._style = value & (WinStyle.BRIGHT | WinStyle.BRIGHT_BACKGROUND) + + def reset_all(self, on_stderr=None): + self.set_attrs(self._default) + self.set_console(attrs=self._default) + self._light = 0 + + def fore(self, fore=None, light=False, on_stderr=False): + if fore is None: + fore = self._default_fore + self._fore = fore + # Emulate LIGHT_EX with BRIGHT Style + if light: + self._light |= WinStyle.BRIGHT + else: + self._light &= ~WinStyle.BRIGHT + self.set_console(on_stderr=on_stderr) + + def back(self, back=None, light=False, on_stderr=False): + if back is None: + back = self._default_back + self._back = back + # Emulate LIGHT_EX with BRIGHT_BACKGROUND Style + if light: + self._light |= WinStyle.BRIGHT_BACKGROUND + else: + self._light &= ~WinStyle.BRIGHT_BACKGROUND + self.set_console(on_stderr=on_stderr) + + def style(self, style=None, on_stderr=False): + if style is None: + style = self._default_style + self._style = style + self.set_console(on_stderr=on_stderr) + + def set_console(self, attrs=None, on_stderr=False): + if attrs is None: + attrs = self.get_attrs() + handle = win32.STDOUT + if on_stderr: + handle = win32.STDERR + win32.SetConsoleTextAttribute(handle, attrs) + + def get_position(self, handle): + position = win32.GetConsoleScreenBufferInfo(handle).dwCursorPosition + # Because Windows coordinates are 0-based, + # and win32.SetConsoleCursorPosition expects 1-based. + position.X += 1 + position.Y += 1 + return position + + def set_cursor_position(self, position=None, on_stderr=False): + if position is None: + # I'm not currently tracking the position, so there is no default. + # position = self.get_position() + return + handle = win32.STDOUT + if on_stderr: + handle = win32.STDERR + win32.SetConsoleCursorPosition(handle, position) + + def cursor_adjust(self, x, y, on_stderr=False): + handle = win32.STDOUT + if on_stderr: + handle = win32.STDERR + position = self.get_position(handle) + adjusted_position = (position.Y + y, position.X + x) + win32.SetConsoleCursorPosition(handle, adjusted_position, adjust=False) + + def erase_screen(self, mode=0, on_stderr=False): + # 0 should clear from the cursor to the end of the screen. + # 1 should clear from the cursor to the beginning of the screen. + # 2 should clear the entire screen, and move cursor to (1,1) + handle = win32.STDOUT + if on_stderr: + handle = win32.STDERR + csbi = win32.GetConsoleScreenBufferInfo(handle) + # get the number of character cells in the current buffer + cells_in_screen = csbi.dwSize.X * csbi.dwSize.Y + # get number of character cells before current cursor position + cells_before_cursor = csbi.dwSize.X * csbi.dwCursorPosition.Y + csbi.dwCursorPosition.X + if mode == 0: + from_coord = csbi.dwCursorPosition + cells_to_erase = cells_in_screen - cells_before_cursor + elif mode == 1: + from_coord = win32.COORD(0, 0) + cells_to_erase = cells_before_cursor + elif mode == 2: + from_coord = win32.COORD(0, 0) + cells_to_erase = cells_in_screen + else: + # invalid mode + return + # fill the entire screen with blanks + win32.FillConsoleOutputCharacter(handle, ' ', cells_to_erase, from_coord) + # now set the buffer's attributes accordingly + win32.FillConsoleOutputAttribute(handle, self.get_attrs(), cells_to_erase, from_coord) + if mode == 2: + # put the cursor where needed + win32.SetConsoleCursorPosition(handle, (1, 1)) + + def erase_line(self, mode=0, on_stderr=False): + # 0 should clear from the cursor to the end of the line. + # 1 should clear from the cursor to the beginning of the line. + # 2 should clear the entire line. + handle = win32.STDOUT + if on_stderr: + handle = win32.STDERR + csbi = win32.GetConsoleScreenBufferInfo(handle) + if mode == 0: + from_coord = csbi.dwCursorPosition + cells_to_erase = csbi.dwSize.X - csbi.dwCursorPosition.X + elif mode == 1: + from_coord = win32.COORD(0, csbi.dwCursorPosition.Y) + cells_to_erase = csbi.dwCursorPosition.X + elif mode == 2: + from_coord = win32.COORD(0, csbi.dwCursorPosition.Y) + cells_to_erase = csbi.dwSize.X + else: + # invalid mode + return + # fill the entire screen with blanks + win32.FillConsoleOutputCharacter(handle, ' ', cells_to_erase, from_coord) + # now set the buffer's attributes accordingly + win32.FillConsoleOutputAttribute(handle, self.get_attrs(), cells_to_erase, from_coord) + + def set_title(self, title): + win32.SetConsoleTitle(title) + + +def enable_vt_processing(fd): + if win32.windll is None or not win32.winapi_test(): + return False + + try: + handle = get_osfhandle(fd) + mode = win32.GetConsoleMode(handle) + win32.SetConsoleMode( + handle, + mode | win32.ENABLE_VIRTUAL_TERMINAL_PROCESSING, + ) + + mode = win32.GetConsoleMode(handle) + if mode & win32.ENABLE_VIRTUAL_TERMINAL_PROCESSING: + return True + # Can get TypeError in testsuite where 'fd' is a Mock() + except (OSError, TypeError): + return False diff --git a/local_packages/dill-0.4.1.dist-info/INSTALLER b/local_packages/dill-0.4.1.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/local_packages/dill-0.4.1.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/local_packages/dill-0.4.1.dist-info/LICENSE b/local_packages/dill-0.4.1.dist-info/LICENSE new file mode 100644 index 0000000..483e245 --- /dev/null +++ b/local_packages/dill-0.4.1.dist-info/LICENSE @@ -0,0 +1,35 @@ +Copyright (c) 2004-2016 California Institute of Technology. +Copyright (c) 2016-2026 The Uncertainty Quantification Foundation. +All rights reserved. + +This software is available subject to the conditions and terms laid +out below. By downloading and using this software you are agreeing +to the following conditions. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + + - Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + - Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + - Neither the names of the copyright holders nor the names of any of + the contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR +CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; +OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF +ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/local_packages/dill-0.4.1.dist-info/METADATA b/local_packages/dill-0.4.1.dist-info/METADATA new file mode 100644 index 0000000..934a527 --- /dev/null +++ b/local_packages/dill-0.4.1.dist-info/METADATA @@ -0,0 +1,281 @@ +Metadata-Version: 2.1 +Name: dill +Version: 0.4.1 +Summary: serialize all of Python +Home-page: https://github.com/uqfoundation/dill +Download-URL: https://pypi.org/project/dill/#files +Author: Mike McKerns +Author-email: mmckerns@uqfoundation.org +Maintainer: Mike McKerns +Maintainer-email: mmckerns@uqfoundation.org +License: BSD-3-Clause +Project-URL: Documentation, http://dill.rtfd.io +Project-URL: Source Code, https://github.com/uqfoundation/dill +Project-URL: Bug Tracker, https://github.com/uqfoundation/dill/issues +Platform: Linux +Platform: Windows +Platform: Mac +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: Science/Research +Classifier: License :: OSI Approved :: BSD License +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3.13 +Classifier: Programming Language :: Python :: 3.14 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Topic :: Scientific/Engineering +Classifier: Topic :: Software Development +Requires-Python: >=3.9 +License-File: LICENSE +Provides-Extra: readline +Provides-Extra: graph +Requires-Dist: objgraph>=1.7.2; extra == "graph" +Provides-Extra: profile +Requires-Dist: gprof2dot>=2022.7.29; extra == "profile" + +----------------------------- +dill: serialize all of Python +----------------------------- + +About Dill +========== + +``dill`` extends Python's ``pickle`` module for serializing and de-serializing +Python objects to the majority of the built-in Python types. Serialization +is the process of converting an object to a byte stream, and the inverse +of which is converting a byte stream back to a Python object hierarchy. + +``dill`` provides the user the same interface as the ``pickle`` module, and +also includes some additional features. In addition to pickling Python +objects, ``dill`` provides the ability to save the state of an interpreter +session in a single command. Hence, it would be feasible to save an +interpreter session, close the interpreter, ship the pickled file to +another computer, open a new interpreter, unpickle the session and +thus continue from the 'saved' state of the original interpreter +session. + +``dill`` can be used to store Python objects to a file, but the primary +usage is to send Python objects across the network as a byte stream. +``dill`` is quite flexible, and allows arbitrary user defined classes +and functions to be serialized. Thus ``dill`` is not intended to be +secure against erroneously or maliciously constructed data. It is +left to the user to decide whether the data they unpickle is from +a trustworthy source. + +``dill`` is part of ``pathos``, a Python framework for heterogeneous computing. +``dill`` is in active development, so any user feedback, bug reports, comments, +or suggestions are highly appreciated. A list of issues is located at +https://github.com/uqfoundation/dill/issues, with a legacy list maintained at +https://uqfoundation.github.io/project/pathos/query. + + +Major Features +============== + +``dill`` can pickle the following standard types: + + - none, type, bool, int, float, complex, bytes, str, + - tuple, list, dict, file, buffer, builtin, + - Python classes, namedtuples, dataclasses, metaclasses, + - instances of classes, + - set, frozenset, array, functions, exceptions + +``dill`` can also pickle more 'exotic' standard types: + + - functions with yields, nested functions, lambdas, + - cell, method, unboundmethod, module, code, methodwrapper, + - methoddescriptor, getsetdescriptor, memberdescriptor, wrapperdescriptor, + - dictproxy, slice, notimplemented, ellipsis, quit + +``dill`` cannot yet pickle these standard types: + + - frame, generator, traceback + +``dill`` also provides the capability to: + + - save and load Python interpreter sessions + - save and extract the source code from functions and classes + - interactively diagnose pickling errors + + +Current Release +=============== + +The latest released version of ``dill`` is available from: + + https://pypi.org/project/dill + +``dill`` is distributed under a 3-clause BSD license. + + +Development Version +=================== + +You can get the latest development version with all the shiny new features at: + + https://github.com/uqfoundation + +If you have a new contribution, please submit a pull request. + + +Installation +============ + +``dill`` can be installed with ``pip``:: + + $ pip install dill + +To optionally include the ``objgraph`` diagnostic tool in the install:: + + $ pip install dill[graph] + +To optionally include the ``gprof2dot`` diagnostic tool in the install:: + + $ pip install dill[profile] + +For windows users, to optionally install session history tools:: + + $ pip install dill[readline] + + +Requirements +============ + +``dill`` requires: + + - ``python`` (or ``pypy``), **>=3.9** + - ``setuptools``, **>=42** + +Optional requirements: + + - ``objgraph``, **>=1.7.2** + - ``gprof2dot``, **>=2022.7.29** + - ``pyreadline``, **>=1.7.1** (on windows) + + +Basic Usage +=========== + +``dill`` is a drop-in replacement for ``pickle``. Existing code can be +updated to allow complete pickling using:: + + >>> import dill as pickle + +or:: + + >>> from dill import dumps, loads + +``dumps`` converts the object to a unique byte string, and ``loads`` performs +the inverse operation:: + + >>> squared = lambda x: x**2 + >>> loads(dumps(squared))(3) + 9 + +There are a number of options to control serialization which are provided +as keyword arguments to several ``dill`` functions: + +* with *protocol*, the pickle protocol level can be set. This uses the + same value as the ``pickle`` module, *DEFAULT_PROTOCOL*. +* with *byref=True*, ``dill`` to behave a lot more like pickle with + certain objects (like modules) pickled by reference as opposed to + attempting to pickle the object itself. +* with *recurse=True*, objects referred to in the global dictionary are + recursively traced and pickled, instead of the default behavior of + attempting to store the entire global dictionary. +* with *fmode*, the contents of the file can be pickled along with the file + handle, which is useful if the object is being sent over the wire to a + remote system which does not have the original file on disk. Options are + *HANDLE_FMODE* for just the handle, *CONTENTS_FMODE* for the file content + and *FILE_FMODE* for content and handle. +* with *ignore=False*, objects reconstructed with types defined in the + top-level script environment use the existing type in the environment + rather than a possibly different reconstructed type. + +The default serialization can also be set globally in *dill.settings*. +Thus, we can modify how ``dill`` handles references to the global dictionary +locally or globally:: + + >>> import dill.settings + >>> dumps(absolute) == dumps(absolute, recurse=True) + False + >>> dill.settings['recurse'] = True + >>> dumps(absolute) == dumps(absolute, recurse=True) + True + +``dill`` also includes source code inspection, as an alternate to pickling:: + + >>> import dill.source + >>> print(dill.source.getsource(squared)) + squared = lambda x:x**2 + +To aid in debugging pickling issues, use *dill.detect* which provides +tools like pickle tracing:: + + >>> import dill.detect + >>> with dill.detect.trace(): + >>> dumps(squared) + ┬ F1: at 0x7fe074f8c280> + ├┬ F2: + │└ # F2 [34 B] + ├┬ Co: at 0x7fe07501eb30, file "", line 1> + │├┬ F2: + ││└ # F2 [19 B] + │└ # Co [87 B] + ├┬ D1: + │└ # D1 [22 B] + ├┬ D2: + │└ # D2 [2 B] + ├┬ D2: + │├┬ D2: + ││└ # D2 [2 B] + │└ # D2 [23 B] + └ # F1 [180 B] + +With trace, we see how ``dill`` stored the lambda (``F1``) by first storing +``_create_function``, the underlying code object (``Co``) and ``_create_code`` +(which is used to handle code objects), then we handle the reference to +the global dict (``D2``) plus other dictionaries (``D1`` and ``D2``) that +save the lambda object's state. A ``#`` marks when the object is actually stored. + + +More Information +================ + +Probably the best way to get started is to look at the documentation at +http://dill.rtfd.io. Also see ``dill.tests`` for a set of scripts that +demonstrate how ``dill`` can serialize different Python objects. You can +run the test suite with ``python -m dill.tests``. The contents of any +pickle file can be examined with ``undill``. As ``dill`` conforms to +the ``pickle`` interface, the examples and documentation found at +http://docs.python.org/library/pickle.html also apply to ``dill`` +if one will ``import dill as pickle``. The source code is also generally +well documented, so further questions may be resolved by inspecting the +code itself. Please feel free to submit a ticket on github, or ask a +question on stackoverflow (**@Mike McKerns**). +If you would like to share how you use ``dill`` in your work, please send +an email (to **mmckerns at uqfoundation dot org**). + + +Citation +======== + +If you use ``dill`` to do research that leads to publication, we ask that you +acknowledge use of ``dill`` by citing the following in your publication:: + + M.M. McKerns, L. Strand, T. Sullivan, A. Fang, M.A.G. Aivazis, + "Building a framework for predictive science", Proceedings of + the 10th Python in Science Conference, 2011; + http://arxiv.org/pdf/1202.1056 + + Michael McKerns and Michael Aivazis, + "pathos: a framework for heterogeneous computing", 2010- ; + https://uqfoundation.github.io/project/pathos + +Please see https://uqfoundation.github.io/project/pathos or +http://arxiv.org/pdf/1202.1056 for further information. diff --git a/local_packages/dill-0.4.1.dist-info/RECORD b/local_packages/dill-0.4.1.dist-info/RECORD new file mode 100644 index 0000000..dea1d83 --- /dev/null +++ b/local_packages/dill-0.4.1.dist-info/RECORD @@ -0,0 +1,101 @@ +../../bin/get_gprof,sha256=qz_EtWetP-du21ooUp5b0xKqCqS3_idMmInNrTrcVTw,2477 +../../bin/get_objgraph,sha256=4LhRdw1jazVqOYaP4coEAyZ8dFCjGmzoR_sohYKgk5g,1671 +../../bin/undill,sha256=Wvwjh4KxR2WOZ1IUJFfMRmsCVt6slrvvhHSRJjXTC-c,607 +dill-0.4.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +dill-0.4.1.dist-info/LICENSE,sha256=9mnWO-v3ZbqqCyDYsMEZMpan4OoMIsUKMIt012qzk5w,1790 +dill-0.4.1.dist-info/METADATA,sha256=BA1shO3fKcjBTcKu8hSWKpGYA_VXJDgk3U-Ko7SeOGg,10171 +dill-0.4.1.dist-info/RECORD,, +dill-0.4.1.dist-info/WHEEL,sha256=PZUExdf71Ui_so67QXpySuHtCi3-J3wvF4ORK6k_S8U,91 +dill-0.4.1.dist-info/top_level.txt,sha256=HLSIyYIjQzJiBvs3_-16ntezE3j6mWGTW0DT1xDd7X0,5 +dill/__diff.py,sha256=7XCqWIelTWSLjieOIu3Xtr1mUui9YBmltP7-tvtrG-Q,7146 +dill/__info__.py,sha256=EGg2Nh84NhPnYq1J2j6EAPixBb3psVxY-5rAWzr8prk,10756 +dill/__init__.py,sha256=Y8usnzpfsclJxT6TvRYGarEXkWEXprv4lC2ZJ0bGM2Y,3798 +dill/__pycache__/__diff.cpython-312.pyc,, +dill/__pycache__/__info__.cpython-312.pyc,, +dill/__pycache__/__init__.cpython-312.pyc,, +dill/__pycache__/_dill.cpython-312.pyc,, +dill/__pycache__/_objects.cpython-312.pyc,, +dill/__pycache__/_shims.cpython-312.pyc,, +dill/__pycache__/detect.cpython-312.pyc,, +dill/__pycache__/logger.cpython-312.pyc,, +dill/__pycache__/objtypes.cpython-312.pyc,, +dill/__pycache__/pointers.cpython-312.pyc,, +dill/__pycache__/session.cpython-312.pyc,, +dill/__pycache__/settings.cpython-312.pyc,, +dill/__pycache__/source.cpython-312.pyc,, +dill/__pycache__/temp.cpython-312.pyc,, +dill/_dill.py,sha256=NdaVyGzOb5S57abYN0eFXcogocHQrTBkUru7cqw2fwM,91730 +dill/_objects.py,sha256=_Xc7OqnZesDmKBmtkbkrBBMDzH3TIFy_vtMRtc7lsSI,20015 +dill/_shims.py,sha256=kkIa7muWHHp9uItpNffqmbhBO4MEKPIdeyHOKxx2VF4,6635 +dill/detect.py,sha256=VbCyNMaIVudiqM1nLmGwS-yXW6M9CkdpbkVarAmnJqU,11207 +dill/logger.py,sha256=Z1wgU8BbSAEWBKoucYRHYrFhVLqqqTTi-BHcfsebi88,11143 +dill/objtypes.py,sha256=3hDEnRS040mVyA3nMJp0seIiAw9SafxbuvG5tFvthfE,736 +dill/pointers.py,sha256=zeX8XqlLBIW9vL-XH_er9HUWmInlbfWjYtva56eTmww,4467 +dill/session.py,sha256=88DJFIQrA8733P-VKkbCq0d1lyJzckQYCrGxcPeSKeE,23541 +dill/settings.py,sha256=DRFp2QC8xspPZ5Sb3eIB6abPp1wNuBNtCEjZ8DLY-x4,630 +dill/source.py,sha256=GsmYf6bEPLM9rivz1Ec7G2E-tEgAoXI95CisqdcQGGA,45710 +dill/temp.py,sha256=eFyMgZIJRUVgdRi2a4I2tZd5g3HSUFnfm5K3LV2Z93k,8027 +dill/tests/__init__.py,sha256=8OmXwJHyWK1axs4XwCD4TimAE1U_oF6S1ZIPJ8tI8Dg,479 +dill/tests/__main__.py,sha256=6jkJVIcrIHqHjyKOoDr95npJz4eASssTGKbuhWEqMow,899 +dill/tests/__pycache__/__init__.cpython-312.pyc,, +dill/tests/__pycache__/__main__.cpython-312.pyc,, +dill/tests/__pycache__/test_abc.cpython-312.pyc,, +dill/tests/__pycache__/test_check.cpython-312.pyc,, +dill/tests/__pycache__/test_classdef.cpython-312.pyc,, +dill/tests/__pycache__/test_dataclasses.cpython-312.pyc,, +dill/tests/__pycache__/test_detect.cpython-312.pyc,, +dill/tests/__pycache__/test_dictviews.cpython-312.pyc,, +dill/tests/__pycache__/test_diff.cpython-312.pyc,, +dill/tests/__pycache__/test_extendpickle.cpython-312.pyc,, +dill/tests/__pycache__/test_fglobals.cpython-312.pyc,, +dill/tests/__pycache__/test_file.cpython-312.pyc,, +dill/tests/__pycache__/test_functions.cpython-312.pyc,, +dill/tests/__pycache__/test_functors.cpython-312.pyc,, +dill/tests/__pycache__/test_logger.cpython-312.pyc,, +dill/tests/__pycache__/test_mixins.cpython-312.pyc,, +dill/tests/__pycache__/test_module.cpython-312.pyc,, +dill/tests/__pycache__/test_moduledict.cpython-312.pyc,, +dill/tests/__pycache__/test_nested.cpython-312.pyc,, +dill/tests/__pycache__/test_objects.cpython-312.pyc,, +dill/tests/__pycache__/test_properties.cpython-312.pyc,, +dill/tests/__pycache__/test_pycapsule.cpython-312.pyc,, +dill/tests/__pycache__/test_recursive.cpython-312.pyc,, +dill/tests/__pycache__/test_registered.cpython-312.pyc,, +dill/tests/__pycache__/test_restricted.cpython-312.pyc,, +dill/tests/__pycache__/test_selected.cpython-312.pyc,, +dill/tests/__pycache__/test_session.cpython-312.pyc,, +dill/tests/__pycache__/test_source.cpython-312.pyc,, +dill/tests/__pycache__/test_sources.cpython-312.pyc,, +dill/tests/__pycache__/test_temp.cpython-312.pyc,, +dill/tests/__pycache__/test_threads.cpython-312.pyc,, +dill/tests/__pycache__/test_weakref.cpython-312.pyc,, +dill/tests/test_abc.py,sha256=QJ5rW4-HPG0mj-K-96zwASOC-LqGa0XnzDTNW43Cnr4,4227 +dill/tests/test_check.py,sha256=h567c9d3L-O5lc4H8lBsN5St4NkQr5Em9eAU-0l8xYA,1396 +dill/tests/test_classdef.py,sha256=UTctS9spat_KwiubR2-05hVbMvmT5_BqZJWReDC34lM,8600 +dill/tests/test_dataclasses.py,sha256=btxL2-lqjFOLSDf8imLZAAQ4bkm9xQwvS2VlPiIrlQY,890 +dill/tests/test_detect.py,sha256=pm55LV1oi3xr6zmawc2pBlzSMSMkh4TwV7IuoAh3r8M,4329 +dill/tests/test_dictviews.py,sha256=aRqVTK5DOuIoKzxWQcPenSRnvCqLyYeM2jIQsn32Tqo,1337 +dill/tests/test_diff.py,sha256=cvfUtLcJOqlH0VcEmuIHQf69194Jy7vWoEk8pO-GLKM,2667 +dill/tests/test_extendpickle.py,sha256=kYtoA2gr_CLo7NRdyu2fwOdPMt8yp4kVzd9hki7pZBk,1315 +dill/tests/test_fglobals.py,sha256=dSyPOYarpytrnb723m3nAJqGp3zeTFovA7wfoZr6Z_o,1676 +dill/tests/test_file.py,sha256=OsM189KoskuN84bag6GbYJ4OYcTy5WlHSlMWj0j0BhQ,13578 +dill/tests/test_functions.py,sha256=40QuhQIqV-jaDvuR5pq0_Haf7Pmd9M3DYJDs2oSxjiU,4267 +dill/tests/test_functors.py,sha256=iLv95xcaTasuqZh5LQBLa6AWrPeCxFRb-TLzhNzrMB4,930 +dill/tests/test_logger.py,sha256=W0QJMVMQOUOizGb1sNsKInNVS76XqrM7UFyEX-Ei_bs,2385 +dill/tests/test_mixins.py,sha256=tlCp5-NYObO7VIiK0YkciB7yRqepRJPd_RYAc79Zcvw,4007 +dill/tests/test_module.py,sha256=LVdwwdkdbRofoEHhOfszi7oVEUoE__YZyyJzRyza_Ws,1943 +dill/tests/test_moduledict.py,sha256=s5siniJxn78G83lPiQnx4eYV6Z6isCU3m2phgRTtlVA,1182 +dill/tests/test_nested.py,sha256=EuyAWkXGuERTaUaPQDOxhO1cvDhCDWPML3vYOQ0HpFg,3146 +dill/tests/test_objects.py,sha256=6Gy-YuVDuX1efV_pQEbCen7ZpMxTazY9Ihq7y1otesQ,1931 +dill/tests/test_properties.py,sha256=ex9gCfebSawz6n5RGLS4NK5tyC7zjULJBYqPSl33Yqs,1346 +dill/tests/test_pycapsule.py,sha256=EJFsKZ0kTWi17YOQ0wwaoglXP9XqDS83EYIr1-ZsNUk,1417 +dill/tests/test_recursive.py,sha256=5zwJ62LHCtd3lFsFX7__XqflV78QKwuv-hnvBISoiVs,4182 +dill/tests/test_registered.py,sha256=fO2J8t1VlNFd6ecUtd8akgjl4nRy5TldZbnBEzymtWE,1573 +dill/tests/test_restricted.py,sha256=MG6gWcyn1X2kMNL7hX40iTZNbtre8-6QwneTt6EW7eE,783 +dill/tests/test_selected.py,sha256=1lr05NaRqy27XV-xj5A93Mh8vuOEVXbZZ_ELgjyBPl0,3069 +dill/tests/test_session.py,sha256=fILaOGfRHdMd7wED2fqkRHkHlRLY13TaTnvWIcbP8ic,10161 +dill/tests/test_source.py,sha256=3QnPpuQovYz0wAxzH7U4LWaGWI0sejOra8qPRgGj8W0,7311 +dill/tests/test_sources.py,sha256=QWX_Oc3NjpiMZqLLIJsXpK7kGGOYFDv47rCt7HIX_7s,8672 +dill/tests/test_temp.py,sha256=81i8qP5zFbX7bBlXBiYMDlm_p5GTvze6zVtAPifaD_g,2619 +dill/tests/test_threads.py,sha256=a8GRhk81_gE-TsLX21BG8Jb85vRaDd8All2AafqKzHA,1257 +dill/tests/test_weakref.py,sha256=vKT130ADMeqzhD3vx8KZnF5zvvkhTAaq8WD1tyACX2Y,1602 diff --git a/local_packages/dill-0.4.1.dist-info/WHEEL b/local_packages/dill-0.4.1.dist-info/WHEEL new file mode 100644 index 0000000..ae527e7 --- /dev/null +++ b/local_packages/dill-0.4.1.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: setuptools (75.6.0) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/local_packages/dill-0.4.1.dist-info/top_level.txt b/local_packages/dill-0.4.1.dist-info/top_level.txt new file mode 100644 index 0000000..85eea70 --- /dev/null +++ b/local_packages/dill-0.4.1.dist-info/top_level.txt @@ -0,0 +1 @@ +dill diff --git a/local_packages/dill/__diff.py b/local_packages/dill/__diff.py new file mode 100644 index 0000000..6fa8dd5 --- /dev/null +++ b/local_packages/dill/__diff.py @@ -0,0 +1,234 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2008-2016 California Institute of Technology. +# Copyright (c) 2016-2026 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE + +""" +Module to show if an object has changed since it was memorised +""" + +import builtins +import os +import sys +import types +try: + import numpy.ma + HAS_NUMPY = True +except ImportError: + HAS_NUMPY = False + +# pypy doesn't use reference counting +getrefcount = getattr(sys, 'getrefcount', lambda x:0) + +# memo of objects indexed by id to a tuple (attributes, sequence items) +# attributes is a dict indexed by attribute name to attribute id +# sequence items is either a list of ids, of a dictionary of keys to ids +memo = {} +id_to_obj = {} +# types that cannot have changing attributes +builtins_types = set((str, list, dict, set, frozenset, int)) +dont_memo = set(id(i) for i in (memo, sys.modules, sys.path_importer_cache, + os.environ, id_to_obj)) + + +def get_attrs(obj): + """ + Gets all the attributes of an object though its __dict__ or return None + """ + if type(obj) in builtins_types \ + or type(obj) is type and obj in builtins_types: + return + return getattr(obj, '__dict__', None) + + +def get_seq(obj, cache={str: False, frozenset: False, list: True, set: True, + dict: True, tuple: True, type: False, + types.ModuleType: False, types.FunctionType: False, + types.BuiltinFunctionType: False}): + """ + Gets all the items in a sequence or return None + """ + try: + o_type = obj.__class__ + except AttributeError: + o_type = type(obj) + hsattr = hasattr + if o_type in cache: + if cache[o_type]: + if hsattr(obj, "copy"): + return obj.copy() + return obj + elif HAS_NUMPY and o_type in (numpy.ndarray, numpy.ma.core.MaskedConstant): + if obj.shape and obj.size: + return obj + else: + return [] + elif hsattr(obj, "__contains__") and hsattr(obj, "__iter__") \ + and hsattr(obj, "__len__") and hsattr(o_type, "__contains__") \ + and hsattr(o_type, "__iter__") and hsattr(o_type, "__len__"): + cache[o_type] = True + if hsattr(obj, "copy"): + return obj.copy() + return obj + else: + cache[o_type] = False + return None + + +def memorise(obj, force=False): + """ + Adds an object to the memo, and recursively adds all the objects + attributes, and if it is a container, its items. Use force=True to update + an object already in the memo. Updating is not recursively done. + """ + obj_id = id(obj) + if obj_id in memo and not force or obj_id in dont_memo: + return + id_ = id + g = get_attrs(obj) + if g is None: + attrs_id = None + else: + attrs_id = dict((key,id_(value)) for key, value in g.items()) + + s = get_seq(obj) + if s is None: + seq_id = None + elif hasattr(s, "items"): + seq_id = dict((id_(key),id_(value)) for key, value in s.items()) + elif not hasattr(s, "__len__"): #XXX: avoid TypeError from unexpected case + seq_id = None + else: + seq_id = [id_(i) for i in s] + + memo[obj_id] = attrs_id, seq_id + id_to_obj[obj_id] = obj + mem = memorise + if g is not None: + [mem(value) for key, value in g.items()] + + if s is not None: + if hasattr(s, "items"): + [(mem(key), mem(item)) + for key, item in s.items()] + else: + if hasattr(s, '__len__'): + [mem(item) for item in s] + else: mem(s) + + +def release_gone(): + itop, mp, src = id_to_obj.pop, memo.pop, getrefcount + [(itop(id_), mp(id_)) for id_, obj in list(id_to_obj.items()) + if src(obj) < 4] #XXX: correct for pypy? + + +def whats_changed(obj, seen=None, simple=False, first=True): + """ + Check an object against the memo. Returns a list in the form + (attribute changes, container changed). Attribute changes is a dict of + attribute name to attribute value. container changed is a boolean. + If simple is true, just returns a boolean. None for either item means + that it has not been checked yet + """ + # Special cases + if first: + # ignore the _ variable, which only appears in interactive sessions + if "_" in builtins.__dict__: + del builtins._ + if seen is None: + seen = {} + + obj_id = id(obj) + + if obj_id in seen: + if simple: + return any(seen[obj_id]) + return seen[obj_id] + + # Safety checks + if obj_id in dont_memo: + seen[obj_id] = [{}, False] + if simple: + return False + return seen[obj_id] + elif obj_id not in memo: + if simple: + return True + else: + raise RuntimeError("Object not memorised " + str(obj)) + + seen[obj_id] = ({}, False) + + chngd = whats_changed + id_ = id + + # compare attributes + attrs = get_attrs(obj) + if attrs is None: + changed = {} + else: + obj_attrs = memo[obj_id][0] + obj_get = obj_attrs.get + changed = dict((key,None) for key in obj_attrs if key not in attrs) + for key, o in attrs.items(): + if id_(o) != obj_get(key, None) or chngd(o, seen, True, False): + changed[key] = o + + # compare sequence + items = get_seq(obj) + seq_diff = False + if (items is not None) and (hasattr(items, '__len__')): + obj_seq = memo[obj_id][1] + if (len(items) != len(obj_seq)): + seq_diff = True + elif hasattr(obj, "items"): # dict type obj + obj_get = obj_seq.get + for key, item in items.items(): + if id_(item) != obj_get(id_(key)) \ + or chngd(key, seen, True, False) \ + or chngd(item, seen, True, False): + seq_diff = True + break + else: + for i, j in zip(items, obj_seq): # list type obj + if id_(i) != j or chngd(i, seen, True, False): + seq_diff = True + break + seen[obj_id] = changed, seq_diff + if simple: + return changed or seq_diff + return changed, seq_diff + + +def has_changed(*args, **kwds): + kwds['simple'] = True # ignore simple if passed in + return whats_changed(*args, **kwds) + +__import__ = __import__ + + +def _imp(*args, **kwds): + """ + Replaces the default __import__, to allow a module to be memorised + before the user can change it + """ + before = set(sys.modules.keys()) + mod = __import__(*args, **kwds) + after = set(sys.modules.keys()).difference(before) + for m in after: + memorise(sys.modules[m]) + return mod + +builtins.__import__ = _imp +if hasattr(builtins, "_"): + del builtins._ + +# memorise all already imported modules. This implies that this must be +# imported first for any changes to be recorded +for mod in list(sys.modules.values()): + memorise(mod) +release_gone() diff --git a/local_packages/dill/__info__.py b/local_packages/dill/__info__.py new file mode 100644 index 0000000..87b181b --- /dev/null +++ b/local_packages/dill/__info__.py @@ -0,0 +1,291 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2026 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE +''' +----------------------------- +dill: serialize all of Python +----------------------------- + +About Dill +========== + +``dill`` extends Python's ``pickle`` module for serializing and de-serializing +Python objects to the majority of the built-in Python types. Serialization +is the process of converting an object to a byte stream, and the inverse +of which is converting a byte stream back to a Python object hierarchy. + +``dill`` provides the user the same interface as the ``pickle`` module, and +also includes some additional features. In addition to pickling Python +objects, ``dill`` provides the ability to save the state of an interpreter +session in a single command. Hence, it would be feasible to save an +interpreter session, close the interpreter, ship the pickled file to +another computer, open a new interpreter, unpickle the session and +thus continue from the 'saved' state of the original interpreter +session. + +``dill`` can be used to store Python objects to a file, but the primary +usage is to send Python objects across the network as a byte stream. +``dill`` is quite flexible, and allows arbitrary user defined classes +and functions to be serialized. Thus ``dill`` is not intended to be +secure against erroneously or maliciously constructed data. It is +left to the user to decide whether the data they unpickle is from +a trustworthy source. + +``dill`` is part of ``pathos``, a Python framework for heterogeneous computing. +``dill`` is in active development, so any user feedback, bug reports, comments, +or suggestions are highly appreciated. A list of issues is located at +https://github.com/uqfoundation/dill/issues, with a legacy list maintained at +https://uqfoundation.github.io/project/pathos/query. + + +Major Features +============== + +``dill`` can pickle the following standard types: + + - none, type, bool, int, float, complex, bytes, str, + - tuple, list, dict, file, buffer, builtin, + - Python classes, namedtuples, dataclasses, metaclasses, + - instances of classes, + - set, frozenset, array, functions, exceptions + +``dill`` can also pickle more 'exotic' standard types: + + - functions with yields, nested functions, lambdas, + - cell, method, unboundmethod, module, code, methodwrapper, + - methoddescriptor, getsetdescriptor, memberdescriptor, wrapperdescriptor, + - dictproxy, slice, notimplemented, ellipsis, quit + +``dill`` cannot yet pickle these standard types: + + - frame, generator, traceback + +``dill`` also provides the capability to: + + - save and load Python interpreter sessions + - save and extract the source code from functions and classes + - interactively diagnose pickling errors + + +Current Release +=============== + +The latest released version of ``dill`` is available from: + + https://pypi.org/project/dill + +``dill`` is distributed under a 3-clause BSD license. + + +Development Version +=================== + +You can get the latest development version with all the shiny new features at: + + https://github.com/uqfoundation + +If you have a new contribution, please submit a pull request. + + +Installation +============ + +``dill`` can be installed with ``pip``:: + + $ pip install dill + +To optionally include the ``objgraph`` diagnostic tool in the install:: + + $ pip install dill[graph] + +To optionally include the ``gprof2dot`` diagnostic tool in the install:: + + $ pip install dill[profile] + +For windows users, to optionally install session history tools:: + + $ pip install dill[readline] + + +Requirements +============ + +``dill`` requires: + + - ``python`` (or ``pypy``), **>=3.9** + - ``setuptools``, **>=42** + +Optional requirements: + + - ``objgraph``, **>=1.7.2** + - ``gprof2dot``, **>=2022.7.29** + - ``pyreadline``, **>=1.7.1** (on windows) + + +Basic Usage +=========== + +``dill`` is a drop-in replacement for ``pickle``. Existing code can be +updated to allow complete pickling using:: + + >>> import dill as pickle + +or:: + + >>> from dill import dumps, loads + +``dumps`` converts the object to a unique byte string, and ``loads`` performs +the inverse operation:: + + >>> squared = lambda x: x**2 + >>> loads(dumps(squared))(3) + 9 + +There are a number of options to control serialization which are provided +as keyword arguments to several ``dill`` functions: + +* with *protocol*, the pickle protocol level can be set. This uses the + same value as the ``pickle`` module, *DEFAULT_PROTOCOL*. +* with *byref=True*, ``dill`` to behave a lot more like pickle with + certain objects (like modules) pickled by reference as opposed to + attempting to pickle the object itself. +* with *recurse=True*, objects referred to in the global dictionary are + recursively traced and pickled, instead of the default behavior of + attempting to store the entire global dictionary. +* with *fmode*, the contents of the file can be pickled along with the file + handle, which is useful if the object is being sent over the wire to a + remote system which does not have the original file on disk. Options are + *HANDLE_FMODE* for just the handle, *CONTENTS_FMODE* for the file content + and *FILE_FMODE* for content and handle. +* with *ignore=False*, objects reconstructed with types defined in the + top-level script environment use the existing type in the environment + rather than a possibly different reconstructed type. + +The default serialization can also be set globally in *dill.settings*. +Thus, we can modify how ``dill`` handles references to the global dictionary +locally or globally:: + + >>> import dill.settings + >>> dumps(absolute) == dumps(absolute, recurse=True) + False + >>> dill.settings['recurse'] = True + >>> dumps(absolute) == dumps(absolute, recurse=True) + True + +``dill`` also includes source code inspection, as an alternate to pickling:: + + >>> import dill.source + >>> print(dill.source.getsource(squared)) + squared = lambda x:x**2 + +To aid in debugging pickling issues, use *dill.detect* which provides +tools like pickle tracing:: + + >>> import dill.detect + >>> with dill.detect.trace(): + >>> dumps(squared) + ┬ F1: at 0x7fe074f8c280> + ├┬ F2: + │└ # F2 [34 B] + ├┬ Co: at 0x7fe07501eb30, file "", line 1> + │├┬ F2: + ││└ # F2 [19 B] + │└ # Co [87 B] + ├┬ D1: + │└ # D1 [22 B] + ├┬ D2: + │└ # D2 [2 B] + ├┬ D2: + │├┬ D2: + ││└ # D2 [2 B] + │└ # D2 [23 B] + └ # F1 [180 B] + +With trace, we see how ``dill`` stored the lambda (``F1``) by first storing +``_create_function``, the underlying code object (``Co``) and ``_create_code`` +(which is used to handle code objects), then we handle the reference to +the global dict (``D2``) plus other dictionaries (``D1`` and ``D2``) that +save the lambda object's state. A ``#`` marks when the object is actually stored. + + +More Information +================ + +Probably the best way to get started is to look at the documentation at +http://dill.rtfd.io. Also see ``dill.tests`` for a set of scripts that +demonstrate how ``dill`` can serialize different Python objects. You can +run the test suite with ``python -m dill.tests``. The contents of any +pickle file can be examined with ``undill``. As ``dill`` conforms to +the ``pickle`` interface, the examples and documentation found at +http://docs.python.org/library/pickle.html also apply to ``dill`` +if one will ``import dill as pickle``. The source code is also generally +well documented, so further questions may be resolved by inspecting the +code itself. Please feel free to submit a ticket on github, or ask a +question on stackoverflow (**@Mike McKerns**). +If you would like to share how you use ``dill`` in your work, please send +an email (to **mmckerns at uqfoundation dot org**). + + +Citation +======== + +If you use ``dill`` to do research that leads to publication, we ask that you +acknowledge use of ``dill`` by citing the following in your publication:: + + M.M. McKerns, L. Strand, T. Sullivan, A. Fang, M.A.G. Aivazis, + "Building a framework for predictive science", Proceedings of + the 10th Python in Science Conference, 2011; + http://arxiv.org/pdf/1202.1056 + + Michael McKerns and Michael Aivazis, + "pathos: a framework for heterogeneous computing", 2010- ; + https://uqfoundation.github.io/project/pathos + +Please see https://uqfoundation.github.io/project/pathos or +http://arxiv.org/pdf/1202.1056 for further information. + +''' + +__version__ = '0.4.1' +__author__ = 'Mike McKerns' + +__license__ = ''' +Copyright (c) 2004-2016 California Institute of Technology. +Copyright (c) 2016-2026 The Uncertainty Quantification Foundation. +All rights reserved. + +This software is available subject to the conditions and terms laid +out below. By downloading and using this software you are agreeing +to the following conditions. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + + - Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + - Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + - Neither the names of the copyright holders nor the names of any of + the contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR +CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; +OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF +ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +''' diff --git a/local_packages/dill/__init__.py b/local_packages/dill/__init__.py new file mode 100644 index 0000000..8c45188 --- /dev/null +++ b/local_packages/dill/__init__.py @@ -0,0 +1,119 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2008-2016 California Institute of Technology. +# Copyright (c) 2016-2026 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE + +# author, version, license, and long description +try: # the package is installed + from .__info__ import __version__, __author__, __doc__, __license__ +except: # pragma: no cover + import os + import sys + parent = os.path.dirname(os.path.abspath(os.path.dirname(__file__))) + sys.path.append(parent) + # get distribution meta info + from version import (__version__, __author__, + get_license_text, get_readme_as_rst) + __license__ = get_license_text(os.path.join(parent, 'LICENSE')) + __license__ = "\n%s" % __license__ + __doc__ = get_readme_as_rst(os.path.join(parent, 'README.md')) + del os, sys, parent, get_license_text, get_readme_as_rst + + +from ._dill import ( + dump, dumps, load, loads, copy, + Pickler, Unpickler, register, pickle, pickles, check, + DEFAULT_PROTOCOL, HIGHEST_PROTOCOL, HANDLE_FMODE, CONTENTS_FMODE, FILE_FMODE, + PickleError, PickleWarning, PicklingError, PicklingWarning, UnpicklingError, + UnpicklingWarning, +) +from .session import ( + dump_module, load_module, load_module_asdict, + dump_session, load_session # backward compatibility +) +from . import detect, logger, session, source, temp + +# get global settings +from .settings import settings + +# make sure "trace" is turned off +logger.trace(False) + +objects = {} +# local import of dill._objects +#from . import _objects +#objects.update(_objects.succeeds) +#del _objects + +# local import of dill.objtypes +from . import objtypes as types + +def load_types(pickleable=True, unpickleable=True): + """load pickleable and/or unpickleable types to ``dill.types`` + + ``dill.types`` is meant to mimic the ``types`` module, providing a + registry of object types. By default, the module is empty (for import + speed purposes). Use the ``load_types`` function to load selected object + types to the ``dill.types`` module. + + Args: + pickleable (bool, default=True): if True, load pickleable types. + unpickleable (bool, default=True): if True, load unpickleable types. + + Returns: + None + """ + from importlib import reload + # local import of dill.objects + from . import _objects + if pickleable: + objects.update(_objects.succeeds) + else: + [objects.pop(obj,None) for obj in _objects.succeeds] + if unpickleable: + objects.update(_objects.failures) + else: + [objects.pop(obj,None) for obj in _objects.failures] + objects.update(_objects.registered) + del _objects + # reset contents of types to 'empty' + [types.__dict__.pop(obj) for obj in list(types.__dict__.keys()) \ + if obj.find('Type') != -1] + # add corresponding types from objects to types + reload(types) + +def extend(use_dill=True): + '''add (or remove) dill types to/from the pickle registry + + by default, ``dill`` populates its types to ``pickle.Pickler.dispatch``. + Thus, all ``dill`` types are available upon calling ``'import pickle'``. + To drop all ``dill`` types from the ``pickle`` dispatch, *use_dill=False*. + + Args: + use_dill (bool, default=True): if True, extend the dispatch table. + + Returns: + None + ''' + from ._dill import _revert_extension, _extend + if use_dill: _extend() + else: _revert_extension() + return + +extend() + + +def license(): + """print license""" + print (__license__) + return + +def citation(): + """print citation""" + print (__doc__[-491:-118]) + return + +# end of file diff --git a/local_packages/dill/_dill.py b/local_packages/dill/_dill.py new file mode 100644 index 0000000..27ab56c --- /dev/null +++ b/local_packages/dill/_dill.py @@ -0,0 +1,2265 @@ +# -*- coding: utf-8 -*- +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2008-2015 California Institute of Technology. +# Copyright (c) 2016-2026 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE +""" +dill: a utility for serialization of python objects + +The primary functions in `dill` are :func:`dump` and +:func:`dumps` for serialization ("pickling") to a +file or to a string, respectively, and :func:`load` +and :func:`loads` for deserialization ("unpickling"), +similarly, from a file or from a string. Other notable +functions are :func:`~dill.dump_module` and +:func:`~dill.load_module`, which are used to save and +restore module objects, including an intepreter session. + +Based on code written by Oren Tirosh and Armin Ronacher. +Extended to a (near) full set of the builtin types (in types module), +and coded to the pickle interface, by . +Initial port to python3 by Jonathan Dobson, continued by mmckerns. +Tested against "all" python types (Std. Lib. CH 1-15 @ 2.7) by mmckerns. +Tested against CH16+ Std. Lib. ... TBD. +""" + +from __future__ import annotations + +__all__ = [ + 'dump','dumps','load','loads','copy', + 'Pickler','Unpickler','register','pickle','pickles','check', + 'DEFAULT_PROTOCOL','HIGHEST_PROTOCOL','HANDLE_FMODE','CONTENTS_FMODE','FILE_FMODE', + 'PickleError','PickleWarning','PicklingError','PicklingWarning','UnpicklingError', + 'UnpicklingWarning', +] + +__module__ = 'dill' + +import warnings +from .logger import adapter as logger +from .logger import trace as _trace +log = logger # backward compatibility (see issue #582) + +import os +import sys +diff = None +_use_diff = False +OLD38 = (sys.hexversion < 0x3080000) +OLD39 = (sys.hexversion < 0x3090000) +OLD310 = (sys.hexversion < 0x30a0000) +OLD312a7 = (sys.hexversion < 0x30c00a7) +#XXX: get types from .objtypes ? +import builtins as __builtin__ +from pickle import _Pickler as StockPickler, Unpickler as StockUnpickler +from pickle import GLOBAL, POP +from _contextvars import Context as ContextType +from _thread import LockType +from _thread import RLock as RLockType +try: + from _thread import _ExceptHookArgs as ExceptHookArgsType +except ImportError: + ExceptHookArgsType = None +try: + from _thread import _ThreadHandle as ThreadHandleType +except ImportError: + ThreadHandleType = None +#from io import IOBase +from types import CodeType, FunctionType, MethodType, GeneratorType, \ + TracebackType, FrameType, ModuleType, BuiltinMethodType +BufferType = memoryview #XXX: unregistered +ClassType = type # no 'old-style' classes +EllipsisType = type(Ellipsis) +#FileType = IOBase +NotImplementedType = type(NotImplemented) +SliceType = slice +TypeType = type # 'new-style' classes #XXX: unregistered +XRangeType = range +from types import MappingProxyType as DictProxyType, new_class +from pickle import DEFAULT_PROTOCOL, HIGHEST_PROTOCOL, PickleError, PicklingError, UnpicklingError +import __main__ as _main_module +import marshal +import gc +# import zlib +import abc +import dataclasses +from weakref import ReferenceType, ProxyType, CallableProxyType +from collections import OrderedDict +from enum import Enum, EnumMeta +from functools import partial +from operator import itemgetter, attrgetter +GENERATOR_FAIL = False +import importlib.machinery +EXTENSION_SUFFIXES = tuple(importlib.machinery.EXTENSION_SUFFIXES) +try: + import ctypes + HAS_CTYPES = True + # if using `pypy`, pythonapi is not found + IS_PYPY = not hasattr(ctypes, 'pythonapi') +except ImportError: + HAS_CTYPES = False + IS_PYPY = False +NumpyUfuncType = None +NumpyDType = None +NumpyArrayType = None +try: + if not importlib.machinery.PathFinder().find_spec('numpy'): + raise ImportError("No module named 'numpy'") + NumpyUfuncType = True + NumpyDType = True + NumpyArrayType = True +except ImportError: + pass +def __hook__(): + global NumpyArrayType, NumpyDType, NumpyUfuncType + from numpy import ufunc as NumpyUfuncType + from numpy import ndarray as NumpyArrayType + from numpy import dtype as NumpyDType + return True +if NumpyArrayType: # then has numpy + def ndarraysubclassinstance(obj_type): + if all((c.__module__, c.__name__) != ('numpy', 'ndarray') for c in obj_type.__mro__): + return False + # anything below here is a numpy array (or subclass) instance + __hook__() # import numpy (so the following works!!!) + # verify that __reduce__ has not been overridden + if obj_type.__reduce_ex__ is not NumpyArrayType.__reduce_ex__ \ + or obj_type.__reduce__ is not NumpyArrayType.__reduce__: + return False + return True + def numpyufunc(obj_type): + return any((c.__module__, c.__name__) == ('numpy', 'ufunc') for c in obj_type.__mro__) + def numpydtype(obj_type): + if all((c.__module__, c.__name__) != ('numpy', 'dtype') for c in obj_type.__mro__): + return False + # anything below here is a numpy dtype + __hook__() # import numpy (so the following works!!!) + return obj_type is type(NumpyDType) # handles subclasses +else: + def ndarraysubclassinstance(obj): return False + def numpyufunc(obj): return False + def numpydtype(obj): return False + +from types import GetSetDescriptorType, ClassMethodDescriptorType, \ + WrapperDescriptorType, MethodDescriptorType, MemberDescriptorType, \ + MethodWrapperType #XXX: unused + +# make sure to add these 'hand-built' types to _typemap +CellType = type((lambda x: lambda y: x)(0).__closure__[0]) +PartialType = type(partial(int, base=2)) +SuperType = type(super(Exception, TypeError())) +ItemGetterType = type(itemgetter(0)) +AttrGetterType = type(attrgetter('__repr__')) + +try: + from functools import _lru_cache_wrapper as LRUCacheType +except ImportError: + LRUCacheType = None + +if not isinstance(LRUCacheType, type): + LRUCacheType = None + +def get_file_type(*args, **kwargs): + open = kwargs.pop("open", __builtin__.open) + f = open(os.devnull, *args, **kwargs) + t = type(f) + f.close() + return t + +IS_PYODIDE = sys.platform == 'emscripten' + +FileType = get_file_type('rb', buffering=0) +TextWrapperType = get_file_type('r', buffering=-1) +BufferedRandomType = None if IS_PYODIDE else get_file_type('r+b', buffering=-1) +BufferedReaderType = get_file_type('rb', buffering=-1) +BufferedWriterType = get_file_type('wb', buffering=-1) +try: + from _pyio import open as _open + PyTextWrapperType = get_file_type('r', buffering=-1, open=_open) + PyBufferedRandomType = None if IS_PYODIDE else get_file_type('r+b', buffering=-1, open=_open) + PyBufferedReaderType = get_file_type('rb', buffering=-1, open=_open) + PyBufferedWriterType = get_file_type('wb', buffering=-1, open=_open) +except ImportError: + PyTextWrapperType = PyBufferedRandomType = PyBufferedReaderType = PyBufferedWriterType = None +from io import BytesIO as StringIO +InputType = OutputType = None +from socket import socket as SocketType +#FIXME: additionally calls ForkingPickler.register several times +from multiprocessing.reduction import _reduce_socket as reduce_socket +try: #pragma: no cover + IS_IPYTHON = __IPYTHON__ # is True + ExitType = None # IPython.core.autocall.ExitAutocall + IPYTHON_SINGLETONS = ('exit', 'quit', 'get_ipython') +except NameError: + IS_IPYTHON = False + try: ExitType = type(exit) # apparently 'exit' can be removed + except NameError: ExitType = None + IPYTHON_SINGLETONS = () + +import inspect +import typing + + +### Shims for different versions of Python and dill +class Sentinel(object): + """ + Create a unique sentinel object that is pickled as a constant. + """ + def __init__(self, name, module_name=None): + self.name = name + if module_name is None: + # Use the calling frame's module + self.__module__ = inspect.currentframe().f_back.f_globals['__name__'] + else: + self.__module__ = module_name # pragma: no cover + def __repr__(self): + return self.__module__ + '.' + self.name # pragma: no cover + def __copy__(self): + return self # pragma: no cover + def __deepcopy__(self, memo): + return self # pragma: no cover + def __reduce__(self): + return self.name + def __reduce_ex__(self, protocol): + return self.name + +from . import _shims +from ._shims import Reduce, Getattr + +### File modes +#: Pickles the file handle, preserving mode. The position of the unpickled +#: object is as for a new file handle. +HANDLE_FMODE = 0 +#: Pickles the file contents, creating a new file if on load the file does +#: not exist. The position = min(pickled position, EOF) and mode is chosen +#: as such that "best" preserves behavior of the original file. +CONTENTS_FMODE = 1 +#: Pickles the entire file (handle and contents), preserving mode and position. +FILE_FMODE = 2 + +### Shorthands (modified from python2.5/lib/pickle.py) +def copy(obj, *args, **kwds): + """ + Use pickling to 'copy' an object (i.e. `loads(dumps(obj))`). + + See :func:`dumps` and :func:`loads` for keyword arguments. + """ + ignore = kwds.pop('ignore', Unpickler.settings['ignore']) + return loads(dumps(obj, *args, **kwds), ignore=ignore) + +def dump(obj, file, protocol=None, byref=None, fmode=None, recurse=None, **kwds):#, strictio=None): + """ + Pickle an object to a file. + + See :func:`dumps` for keyword arguments. + """ + from .settings import settings + protocol = settings['protocol'] if protocol is None else int(protocol) + _kwds = kwds.copy() + _kwds.update(dict(byref=byref, fmode=fmode, recurse=recurse)) + Pickler(file, protocol, **_kwds).dump(obj) + return + +def dumps(obj, protocol=None, byref=None, fmode=None, recurse=None, **kwds):#, strictio=None): + """ + Pickle an object to a string. + + *protocol* is the pickler protocol, as defined for Python *pickle*. + + If *byref=True*, then dill behaves a lot more like pickle as certain + objects (like modules) are pickled by reference as opposed to attempting + to pickle the object itself. + + If *recurse=True*, then objects referred to in the global dictionary + are recursively traced and pickled, instead of the default behavior + of attempting to store the entire global dictionary. This is needed for + functions defined via *exec()*. + + *fmode* (:const:`HANDLE_FMODE`, :const:`CONTENTS_FMODE`, + or :const:`FILE_FMODE`) indicates how file handles will be pickled. + For example, when pickling a data file handle for transfer to a remote + compute service, *FILE_FMODE* will include the file contents in the + pickle and cursor position so that a remote method can operate + transparently on an object with an open file handle. + + Default values for keyword arguments can be set in :mod:`dill.settings`. + """ + file = StringIO() + dump(obj, file, protocol, byref, fmode, recurse, **kwds)#, strictio) + return file.getvalue() + +def load(file, ignore=None, **kwds): + """ + Unpickle an object from a file. + + See :func:`loads` for keyword arguments. + """ + return Unpickler(file, ignore=ignore, **kwds).load() + +def loads(str, ignore=None, **kwds): + """ + Unpickle an object from a string. + + If *ignore=False* then objects whose class is defined in the module + *__main__* are updated to reference the existing class in *__main__*, + otherwise they are left to refer to the reconstructed type, which may + be different. + + Default values for keyword arguments can be set in :mod:`dill.settings`. + """ + file = StringIO(str) + return load(file, ignore, **kwds) + +# def dumpzs(obj, protocol=None): +# """pickle an object to a compressed string""" +# return zlib.compress(dumps(obj, protocol)) + +# def loadzs(str): +# """unpickle an object from a compressed string""" +# return loads(zlib.decompress(str)) + +### End: Shorthands ### + +class MetaCatchingDict(dict): + def get(self, key, default=None): + try: + return self[key] + except KeyError: + return default + + def __missing__(self, key): + if issubclass(key, type): + return save_type + else: + raise KeyError() + +class PickleWarning(Warning, PickleError): + pass + +class PicklingWarning(PickleWarning, PicklingError): + pass + +class UnpicklingWarning(PickleWarning, UnpicklingError): + pass + +### Extend the Picklers +class Pickler(StockPickler): + """python's Pickler extended to interpreter sessions""" + dispatch: typing.Dict[type, typing.Callable[[Pickler, typing.Any], None]] \ + = MetaCatchingDict(StockPickler.dispatch.copy()) + """The dispatch table, a dictionary of serializing functions used + by Pickler to save objects of specific types. Use :func:`pickle` + or :func:`register` to associate types to custom functions. + + :meta hide-value: + """ + _session = False + from .settings import settings + + def __init__(self, file, *args, **kwds): + settings = Pickler.settings + _byref = kwds.pop('byref', None) + #_strictio = kwds.pop('strictio', None) + _fmode = kwds.pop('fmode', None) + _recurse = kwds.pop('recurse', None) + StockPickler.__init__(self, file, *args, **kwds) + self._main = _main_module + self._diff_cache = {} + self._byref = settings['byref'] if _byref is None else _byref + self._strictio = False #_strictio + self._fmode = settings['fmode'] if _fmode is None else _fmode + self._recurse = settings['recurse'] if _recurse is None else _recurse + self._postproc = OrderedDict() + self._file = file + + def save(self, obj, save_persistent_id=True): + # numpy hack + obj_type = type(obj) + if NumpyArrayType and not (obj_type is type or obj_type in Pickler.dispatch): + # register if the object is a numpy ufunc + # thanks to Paul Kienzle for pointing out ufuncs didn't pickle + if numpyufunc(obj_type): + @register(obj_type) + def save_numpy_ufunc(pickler, obj): + logger.trace(pickler, "Nu: %s", obj) + name = getattr(obj, '__qualname__', getattr(obj, '__name__', None)) + StockPickler.save_global(pickler, obj, name=name) + logger.trace(pickler, "# Nu") + return + # NOTE: the above 'save' performs like: + # import copy_reg + # def udump(f): return f.__name__ + # def uload(name): return getattr(numpy, name) + # copy_reg.pickle(NumpyUfuncType, udump, uload) + # register if the object is a numpy dtype + if numpydtype(obj_type): + @register(obj_type) + def save_numpy_dtype(pickler, obj): + logger.trace(pickler, "Dt: %s", obj) + pickler.save_reduce(_create_dtypemeta, (obj.type,), obj=obj) + logger.trace(pickler, "# Dt") + return + # NOTE: the above 'save' performs like: + # import copy_reg + # def uload(name): return type(NumpyDType(name)) + # def udump(f): return uload, (f.type,) + # copy_reg.pickle(NumpyDTypeType, udump, uload) + # register if the object is a subclassed numpy array instance + if ndarraysubclassinstance(obj_type): + @register(obj_type) + def save_numpy_array(pickler, obj): + logger.trace(pickler, "Nu: (%s, %s)", obj.shape, obj.dtype) + npdict = getattr(obj, '__dict__', None) + f, args, state = obj.__reduce__() + pickler.save_reduce(_create_array, (f,args,state,npdict), obj=obj) + logger.trace(pickler, "# Nu") + return + # end numpy hack + + if GENERATOR_FAIL and obj_type is GeneratorType: + msg = "Can't pickle %s: attribute lookup builtins.generator failed" % GeneratorType + raise PicklingError(msg) + StockPickler.save(self, obj, save_persistent_id) + + save.__doc__ = StockPickler.save.__doc__ + + def dump(self, obj): #NOTE: if settings change, need to update attributes + logger.trace_setup(self) + StockPickler.dump(self, obj) + dump.__doc__ = StockPickler.dump.__doc__ + +class Unpickler(StockUnpickler): + """python's Unpickler extended to interpreter sessions and more types""" + from .settings import settings + _session = False + + def find_class(self, module, name): + if (module, name) == ('__builtin__', '__main__'): + return self._main.__dict__ #XXX: above set w/save_module_dict + elif (module, name) == ('__builtin__', 'NoneType'): + return type(None) #XXX: special case: NoneType missing + if module == 'dill.dill': module = 'dill._dill' + return StockUnpickler.find_class(self, module, name) + + def __init__(self, *args, **kwds): + settings = Pickler.settings + _ignore = kwds.pop('ignore', None) + StockUnpickler.__init__(self, *args, **kwds) + self._main = _main_module + self._ignore = settings['ignore'] if _ignore is None else _ignore + + def load(self): #NOTE: if settings change, need to update attributes + obj = StockUnpickler.load(self) + if type(obj).__module__ == getattr(_main_module, '__name__', '__main__'): + if not self._ignore: + # point obj class to main + try: obj.__class__ = getattr(self._main, type(obj).__name__) + except (AttributeError,TypeError): pass # defined in a file + #_main_module.__dict__.update(obj.__dict__) #XXX: should update globals ? + return obj + load.__doc__ = StockUnpickler.load.__doc__ + pass + +''' +def dispatch_table(): + """get the dispatch table of registered types""" + return Pickler.dispatch +''' + +pickle_dispatch_copy = StockPickler.dispatch.copy() + +def pickle(t, func): + """expose :attr:`~Pickler.dispatch` table for user-created extensions""" + Pickler.dispatch[t] = func + return + +def register(t): + """decorator to register types to Pickler's :attr:`~Pickler.dispatch` table""" + def proxy(func): + Pickler.dispatch[t] = func + return func + return proxy + +def _revert_extension(): + """drop dill-registered types from pickle's dispatch table""" + for type, func in list(StockPickler.dispatch.items()): + if func.__module__ == __name__: + del StockPickler.dispatch[type] + if type in pickle_dispatch_copy: + StockPickler.dispatch[type] = pickle_dispatch_copy[type] + +def use_diff(on=True): + """ + Reduces size of pickles by only including object which have changed. + + Decreases pickle size but increases CPU time needed. + Also helps avoid some unpickleable objects. + MUST be called at start of script, otherwise changes will not be recorded. + """ + global _use_diff, diff + _use_diff = on + if _use_diff and diff is None: + try: + from . import diff as d + except ImportError: + import diff as d + diff = d + +def _create_typemap(): + import types + d = dict(list(__builtin__.__dict__.items()) + \ + list(types.__dict__.items())).items() + for key, value in d: + if getattr(value, '__module__', None) == 'builtins' \ + and type(value) is type: + yield key, value + return +_reverse_typemap = dict(_create_typemap()) +_reverse_typemap.update({ + 'PartialType': PartialType, + 'SuperType': SuperType, + 'ItemGetterType': ItemGetterType, + 'AttrGetterType': AttrGetterType, +}) +if sys.hexversion < 0x30800a2: + _reverse_typemap.update({ + 'CellType': CellType, + }) + +# "Incidental" implementation specific types. Unpickling these types in another +# implementation of Python (PyPy -> CPython) is not guaranteed to work + +# This dictionary should contain all types that appear in Python implementations +# but are not defined in https://docs.python.org/3/library/types.html#standard-interpreter-types +x=OrderedDict() +_incedental_reverse_typemap = { + 'FileType': FileType, + 'BufferedRandomType': BufferedRandomType, + 'BufferedReaderType': BufferedReaderType, + 'BufferedWriterType': BufferedWriterType, + 'TextWrapperType': TextWrapperType, + 'PyBufferedRandomType': PyBufferedRandomType, + 'PyBufferedReaderType': PyBufferedReaderType, + 'PyBufferedWriterType': PyBufferedWriterType, + 'PyTextWrapperType': PyTextWrapperType, +} + +_incedental_reverse_typemap.update({ + "DictKeysType": type({}.keys()), + "DictValuesType": type({}.values()), + "DictItemsType": type({}.items()), + + "OdictKeysType": type(x.keys()), + "OdictValuesType": type(x.values()), + "OdictItemsType": type(x.items()), +}) + +if ExitType: + _incedental_reverse_typemap['ExitType'] = ExitType +if InputType: + _incedental_reverse_typemap['InputType'] = InputType + _incedental_reverse_typemap['OutputType'] = OutputType + +''' +try: + import symtable + _incedental_reverse_typemap["SymtableEntryType"] = type(symtable.symtable("", "string", "exec")._table) +except: #FIXME: fails to pickle + pass + +if sys.hexversion >= 0x30a00a0: + _incedental_reverse_typemap['LineIteratorType'] = type(compile('3', '', 'eval').co_lines()) +''' + +if sys.hexversion >= 0x30b00b0 and not IS_PYPY: + from types import GenericAlias + _incedental_reverse_typemap["GenericAliasIteratorType"] = type(iter(GenericAlias(list, (int,)))) + ''' + _incedental_reverse_typemap['PositionsIteratorType'] = type(compile('3', '', 'eval').co_positions()) + ''' + +try: + import winreg + _incedental_reverse_typemap["HKEYType"] = winreg.HKEYType +except ImportError: + pass + +_reverse_typemap.update(_incedental_reverse_typemap) +_incedental_types = set(_incedental_reverse_typemap.values()) + +del x + +_typemap = dict((v, k) for k, v in _reverse_typemap.items()) + +def _unmarshal(string): + return marshal.loads(string) + +def _load_type(name): + return _reverse_typemap[name] + +def _create_type(typeobj, *args): + return typeobj(*args) + +def _create_function(fcode, fglobals, fname=None, fdefaults=None, + fclosure=None, fdict=None, fkwdefaults=None): + # same as FunctionType, but enable passing __dict__ to new function, + # __dict__ is the storehouse for attributes added after function creation + func = FunctionType(fcode, fglobals or dict(), fname, fdefaults, fclosure) + if fdict is not None: + func.__dict__.update(fdict) #XXX: better copy? option to copy? + if fkwdefaults is not None: + func.__kwdefaults__ = fkwdefaults + # 'recurse' only stores referenced modules/objects in fglobals, + # thus we need to make sure that we have __builtins__ as well + if "__builtins__" not in func.__globals__: + func.__globals__["__builtins__"] = globals()["__builtins__"] + # assert id(fglobals) == id(func.__globals__) + return func + +class match: + """ + Make avaialable a limited structural pattern matching-like syntax for Python < 3.10 + + Patterns can be only tuples (without types) currently. + Inspired by the package pattern-matching-PEP634. + + Usage: + >>> with match(args) as m: + >>> if m.case(('x', 'y')): + >>> # use m.x and m.y + >>> elif m.case(('x', 'y', 'z')): + >>> # use m.x, m.y and m.z + + Equivalent native code for Python >= 3.10: + >>> match args: + >>> case (x, y): + >>> # use x and y + >>> case (x, y, z): + >>> # use x, y and z + """ + def __init__(self, value): + self.value = value + self._fields = None + def __enter__(self): + return self + def __exit__(self, *exc_info): + return False + def case(self, args): # *args, **kwargs): + """just handles tuple patterns""" + if len(self.value) != len(args): # + len(kwargs): + return False + #if not all(isinstance(arg, pat) for arg, pat in zip(self.value[len(args):], kwargs.values())): + # return False + self.args = args # (*args, *kwargs) + return True + @property + def fields(self): + # Only bind names to values if necessary. + if self._fields is None: + self._fields = dict(zip(self.args, self.value)) + return self._fields + def __getattr__(self, item): + return self.fields[item] + +ALL_CODE_PARAMS = [ + # Version New attribute CodeType parameters + ((3,11,'a'), 'co_endlinetable', 'argcount posonlyargcount kwonlyargcount nlocals stacksize flags code consts names varnames filename name qualname firstlineno linetable endlinetable columntable exceptiontable freevars cellvars'), + ((3,11), 'co_exceptiontable', 'argcount posonlyargcount kwonlyargcount nlocals stacksize flags code consts names varnames filename name qualname firstlineno linetable exceptiontable freevars cellvars'), + ((3,11,'p'), 'co_qualname', 'argcount posonlyargcount kwonlyargcount nlocals stacksize flags code consts names varnames filename name qualname firstlineno linetable freevars cellvars'), + ((3,10), 'co_linetable', 'argcount posonlyargcount kwonlyargcount nlocals stacksize flags code consts names varnames filename name firstlineno linetable freevars cellvars'), + ((3,8), 'co_posonlyargcount', 'argcount posonlyargcount kwonlyargcount nlocals stacksize flags code consts names varnames filename name firstlineno lnotab freevars cellvars'), + ((3,7), 'co_kwonlyargcount', 'argcount kwonlyargcount nlocals stacksize flags code consts names varnames filename name firstlineno lnotab freevars cellvars'), + ] +for version, new_attr, params in ALL_CODE_PARAMS: + if hasattr(CodeType, new_attr): + CODE_VERSION = version + CODE_PARAMS = params.split() + break +ENCODE_PARAMS = set(CODE_PARAMS).intersection( + ['code', 'lnotab', 'linetable', 'endlinetable', 'columntable', 'exceptiontable']) + +def _create_code(*args): + if not isinstance(args[0], int): # co_lnotab stored from >= 3.10 + LNOTAB, *args = args + else: # from < 3.10 (or pre-LNOTAB storage) + LNOTAB = b'' + + with match(args) as m: + # Python 3.11/3.12a (18 members) + if m.case(( + 'argcount', 'posonlyargcount', 'kwonlyargcount', 'nlocals', 'stacksize', 'flags', # args[0:6] + 'code', 'consts', 'names', 'varnames', 'filename', 'name', 'qualname', 'firstlineno', # args[6:14] + 'linetable', 'exceptiontable', 'freevars', 'cellvars' # args[14:] + )): + if CODE_VERSION == (3,11): + return CodeType( + *args[:6], + args[6].encode() if hasattr(args[6], 'encode') else args[6], # code + *args[7:14], + args[14].encode() if hasattr(args[14], 'encode') else args[14], # linetable + args[15].encode() if hasattr(args[15], 'encode') else args[15], # exceptiontable + args[16], + args[17], + ) + fields = m.fields + # PyPy 3.11 7.3.19+ (17 members) + elif m.case(( + 'argcount', 'posonlyargcount', 'kwonlyargcount', 'nlocals', 'stacksize', 'flags', # args[0:6] + 'code', 'consts', 'names', 'varnames', 'filename', 'name', 'qualname', # args[6:13] + 'firstlineno', 'linetable', 'freevars', 'cellvars' # args[13:] + )): + if CODE_VERSION == (3,11,'p'): + return CodeType( + *args[:6], + args[6].encode() if hasattr(args[6], 'encode') else args[6], # code + *args[7:14], + args[14].encode() if hasattr(args[14], 'encode') else args[14], # linetable + args[15], + args[16], + ) + fields = m.fields + # Python 3.10 or 3.8/3.9 (16 members) + elif m.case(( + 'argcount', 'posonlyargcount', 'kwonlyargcount', 'nlocals', 'stacksize', 'flags', # args[0:6] + 'code', 'consts', 'names', 'varnames', 'filename', 'name', 'firstlineno', # args[6:13] + 'LNOTAB_OR_LINETABLE', 'freevars', 'cellvars' # args[13:] + )): + if CODE_VERSION == (3,10) or CODE_VERSION == (3,8): + return CodeType( + *args[:6], + args[6].encode() if hasattr(args[6], 'encode') else args[6], # code + *args[7:13], + args[13].encode() if hasattr(args[13], 'encode') else args[13], # lnotab/linetable + args[14], + args[15], + ) + fields = m.fields + if CODE_VERSION >= (3,10): + fields['linetable'] = m.LNOTAB_OR_LINETABLE + else: + fields['lnotab'] = LNOTAB if LNOTAB else m.LNOTAB_OR_LINETABLE + # Python 3.7 (15 args) + elif m.case(( + 'argcount', 'kwonlyargcount', 'nlocals', 'stacksize', 'flags', # args[0:5] + 'code', 'consts', 'names', 'varnames', 'filename', 'name', 'firstlineno', # args[5:12] + 'lnotab', 'freevars', 'cellvars' # args[12:] + )): + if CODE_VERSION == (3,7): + return CodeType( + *args[:5], + args[5].encode() if hasattr(args[5], 'encode') else args[5], # code + *args[6:12], + args[12].encode() if hasattr(args[12], 'encode') else args[12], # lnotab + args[13], + args[14], + ) + fields = m.fields + # Python 3.11a (20 members) + elif m.case(( + 'argcount', 'posonlyargcount', 'kwonlyargcount', 'nlocals', 'stacksize', 'flags', # args[0:6] + 'code', 'consts', 'names', 'varnames', 'filename', 'name', 'qualname', 'firstlineno', # args[6:14] + 'linetable', 'endlinetable', 'columntable', 'exceptiontable', 'freevars', 'cellvars' # args[14:] + )): + if CODE_VERSION == (3,11,'a'): + return CodeType( + *args[:6], + args[6].encode() if hasattr(args[6], 'encode') else args[6], # code + *args[7:14], + *(a.encode() if hasattr(a, 'encode') else a for a in args[14:18]), # linetable-exceptiontable + args[18], + args[19], + ) + fields = m.fields + else: + raise UnpicklingError("pattern match for code object failed") + + # The args format doesn't match this version. + fields.setdefault('posonlyargcount', 0) # from python <= 3.7 + fields.setdefault('lnotab', LNOTAB) # from python >= 3.10 + fields.setdefault('linetable', b'') # from python <= 3.9 + fields.setdefault('qualname', fields['name']) # from python <= 3.10 + fields.setdefault('exceptiontable', b'') # from python <= 3.10 + fields.setdefault('endlinetable', None) # from python != 3.11a + fields.setdefault('columntable', None) # from python != 3.11a + + args = (fields[k].encode() if k in ENCODE_PARAMS and hasattr(fields[k], 'encode') else fields[k] + for k in CODE_PARAMS) + return CodeType(*args) + +def _create_ftype(ftypeobj, func, args, kwds): + if kwds is None: + kwds = {} + if args is None: + args = () + return ftypeobj(func, *args, **kwds) + +def _create_typing_tuple(argz, *args): #NOTE: workaround python/cpython#94245 + if not argz: + return typing.Tuple[()].copy_with(()) + if argz == ((),): + return typing.Tuple[()] + return typing.Tuple[argz] + +if ThreadHandleType: + def _create_thread_handle(ident, done, *args): #XXX: ignores 'blocking' + from threading import _make_thread_handle + handle = _make_thread_handle(ident) + if done: + handle._set_done() + return handle + +def _create_lock(locked, *args): #XXX: ignores 'blocking' + from threading import Lock + lock = Lock() + if locked: + if not lock.acquire(False): + raise UnpicklingError("Cannot acquire lock") + return lock + +def _create_rlock(count, owner, *args): #XXX: ignores 'blocking' + lock = RLockType() + if owner is not None: + lock._acquire_restore((count, owner)) + if owner and not lock._is_owned(): + raise UnpicklingError("Cannot acquire lock") + return lock + +# thanks to matsjoyce for adding all the different file modes +def _create_filehandle(name, mode, position, closed, open, strictio, fmode, fdata): # buffering=0 + # only pickles the handle, not the file contents... good? or StringIO(data)? + # (for file contents see: http://effbot.org/librarybook/copy-reg.htm) + # NOTE: handle special cases first (are there more special cases?) + names = {'':sys.__stdin__, '':sys.__stdout__, + '':sys.__stderr__} #XXX: better fileno=(0,1,2) ? + if name in list(names.keys()): + f = names[name] #XXX: safer "f=sys.stdin" + elif name == '': + f = os.tmpfile() + elif name == '': + import tempfile + f = tempfile.TemporaryFile(mode) + else: + try: + exists = os.path.exists(name) + except Exception: + exists = False + if not exists: + if strictio: + raise FileNotFoundError("[Errno 2] No such file or directory: '%s'" % name) + elif "r" in mode and fmode != FILE_FMODE: + name = '' # or os.devnull? + current_size = 0 # or maintain position? + else: + current_size = os.path.getsize(name) + + if position > current_size: + if strictio: + raise ValueError("invalid buffer size") + elif fmode == CONTENTS_FMODE: + position = current_size + # try to open the file by name + # NOTE: has different fileno + try: + #FIXME: missing: *buffering*, encoding, softspace + if fmode == FILE_FMODE: + f = open(name, mode if "w" in mode else "w") + f.write(fdata) + if "w" not in mode: + f.close() + f = open(name, mode) + elif name == '': # file did not exist + import tempfile + f = tempfile.TemporaryFile(mode) + # treat x mode as w mode + elif fmode == CONTENTS_FMODE \ + and ("w" in mode or "x" in mode): + # stop truncation when opening + flags = os.O_CREAT + if "+" in mode: + flags |= os.O_RDWR + else: + flags |= os.O_WRONLY + f = os.fdopen(os.open(name, flags), mode) + # set name to the correct value + r = getattr(f, "buffer", f) + r = getattr(r, "raw", r) + r.name = name + assert f.name == name + else: + f = open(name, mode) + except (IOError, FileNotFoundError): + err = sys.exc_info()[1] + raise UnpicklingError(err) + if closed: + f.close() + elif position >= 0 and fmode != HANDLE_FMODE: + f.seek(position) + return f + +def _create_stringi(value, position, closed): + f = StringIO(value) + if closed: f.close() + else: f.seek(position) + return f + +def _create_stringo(value, position, closed): + f = StringIO() + if closed: f.close() + else: + f.write(value) + f.seek(position) + return f + +class _itemgetter_helper(object): + def __init__(self): + self.items = [] + def __getitem__(self, item): + self.items.append(item) + return + +class _attrgetter_helper(object): + def __init__(self, attrs, index=None): + self.attrs = attrs + self.index = index + def __getattribute__(self, attr): + attrs = object.__getattribute__(self, "attrs") + index = object.__getattribute__(self, "index") + if index is None: + index = len(attrs) + attrs.append(attr) + else: + attrs[index] = ".".join([attrs[index], attr]) + return type(self)(attrs, index) + +class _dictproxy_helper(dict): + def __ror__(self, a): + return a + +_dictproxy_helper_instance = _dictproxy_helper() + +__d = {} +try: + # In CPython 3.9 and later, this trick can be used to exploit the + # implementation of the __or__ function of MappingProxyType to get the true + # mapping referenced by the proxy. It may work for other implementations, + # but is not guaranteed. + MAPPING_PROXY_TRICK = __d is (DictProxyType(__d) | _dictproxy_helper_instance) +except Exception: + MAPPING_PROXY_TRICK = False +del __d + +# _CELL_REF and _CELL_EMPTY are used to stay compatible with versions of dill +# whose _create_cell functions do not have a default value. +# _CELL_REF can be safely removed entirely (replaced by empty tuples for calls +# to _create_cell) once breaking changes are allowed. +_CELL_REF = None +_CELL_EMPTY = Sentinel('_CELL_EMPTY') + +def _create_cell(contents=None): + if contents is not _CELL_EMPTY: + value = contents + return (lambda: value).__closure__[0] + +def _create_weakref(obj, *args): + from weakref import ref + if obj is None: # it's dead + from collections import UserDict + return ref(UserDict(), *args) + return ref(obj, *args) + +def _create_weakproxy(obj, callable=False, *args): + from weakref import proxy + if obj is None: # it's dead + if callable: return proxy(lambda x:x, *args) + from collections import UserDict + return proxy(UserDict(), *args) + return proxy(obj, *args) + +def _eval_repr(repr_str): + return eval(repr_str) + +def _create_array(f, args, state, npdict=None): + #array = numpy.core.multiarray._reconstruct(*args) + array = f(*args) + array.__setstate__(state) + if npdict is not None: # we also have saved state in __dict__ + array.__dict__.update(npdict) + return array + +def _create_dtypemeta(scalar_type): + if NumpyDType is True: __hook__() # a bit hacky I think + if scalar_type is None: + return NumpyDType + return type(NumpyDType(scalar_type)) + +def _create_namedtuple(name, fieldnames, modulename, defaults=None): + class_ = _import_module(modulename + '.' + name, safe=True) + if class_ is not None: + return class_ + import collections + t = collections.namedtuple(name, fieldnames, defaults=defaults, module=modulename) + return t + +def _create_capsule(pointer, name, context, destructor): + attr_found = False + try: + # based on https://github.com/python/cpython/blob/f4095e53ab708d95e019c909d5928502775ba68f/Objects/capsule.c#L209-L231 + uname = name.decode('utf8') + for i in range(1, uname.count('.')+1): + names = uname.rsplit('.', i) + try: + module = __import__(names[0]) + except ImportError: + pass + obj = module + for attr in names[1:]: + obj = getattr(obj, attr) + capsule = obj + attr_found = True + break + except Exception: + pass + + if attr_found: + if _PyCapsule_IsValid(capsule, name): + return capsule + raise UnpicklingError("%s object exists at %s but a PyCapsule object was expected." % (type(capsule), name)) + else: + #warnings.warn('Creating a new PyCapsule %s for a C data structure that may not be present in memory. Segmentation faults or other memory errors are possible.' % (name,), UnpicklingWarning) + capsule = _PyCapsule_New(pointer, name, destructor) + _PyCapsule_SetContext(capsule, context) + return capsule + +def _getattr(objclass, name, repr_str): + # hack to grab the reference directly + try: #XXX: works only for __builtin__ ? + attr = repr_str.split("'")[3] + return eval(attr+'.__dict__["'+name+'"]') + except Exception: + try: + attr = objclass.__dict__ + if type(attr) is DictProxyType: + if sys.hexversion > 0x30f00a0 and name in ('__weakref__','__dict__'): + attr = _dictproxy_helper.__dict__[name] + else: + attr = attr[name] + else: + attr = getattr(objclass,name) + except (AttributeError, KeyError): + attr = getattr(objclass,name) + return attr + +def _get_attr(self, name): + # stop recursive pickling + return getattr(self, name, None) or getattr(__builtin__, name) + +def _import_module(import_name, safe=False): + try: + if import_name.startswith('__runtime__.'): + return sys.modules[import_name] + elif '.' in import_name: + items = import_name.split('.') + module = '.'.join(items[:-1]) + obj = items[-1] + submodule = getattr(__import__(module, None, None, [obj]), obj) + if isinstance(submodule, (ModuleType, type)): + return submodule + return __import__(import_name, None, None, [obj]) + else: + return __import__(import_name) + except (ImportError, AttributeError, KeyError): + if safe: + return None + raise + +# https://github.com/python/cpython/blob/a8912a0f8d9eba6d502c37d522221f9933e976db/Lib/pickle.py#L322-L333 +def _getattribute(obj, name): + for subpath in name.split('.'): + if subpath == '': + raise AttributeError("Can't get local attribute {!r} on {!r}" + .format(name, obj)) + try: + parent = obj + obj = getattr(obj, subpath) + except AttributeError: + raise AttributeError("Can't get attribute {!r} on {!r}" + .format(name, obj)) + return obj, parent + +def _locate_function(obj, pickler=None): + module_name = getattr(obj, '__module__', None) + if module_name in ['__main__', None] or \ + pickler and is_dill(pickler, child=False) and pickler._session and module_name == pickler._main.__name__: + return False + if hasattr(obj, '__qualname__'): + module = _import_module(module_name, safe=True) + try: + found, _ = _getattribute(module, obj.__qualname__) + return found is obj + except AttributeError: + return False + else: + found = _import_module(module_name + '.' + obj.__name__, safe=True) + return found is obj + + +def _setitems(dest, source): + for k, v in source.items(): + dest[k] = v + + +def _save_with_postproc(pickler, reduction, is_pickler_dill=None, obj=Getattr.NO_DEFAULT, postproc_list=None): + if obj is Getattr.NO_DEFAULT: + obj = Reduce(reduction) # pragma: no cover + + if is_pickler_dill is None: + is_pickler_dill = is_dill(pickler, child=True) + if is_pickler_dill: + # assert id(obj) not in pickler._postproc, str(obj) + ' already pushed on stack!' + # if not hasattr(pickler, 'x'): pickler.x = 0 + # print(pickler.x*' ', 'push', obj, id(obj), pickler._recurse) + # pickler.x += 1 + if postproc_list is None: + postproc_list = [] + + # Recursive object not supported. Default to a global instead. + if id(obj) in pickler._postproc: + name = '%s.%s ' % (obj.__module__, getattr(obj, '__qualname__', obj.__name__)) if hasattr(obj, '__module__') else '' + warnings.warn('Cannot pickle %r: %shas recursive self-references that trigger a RecursionError.' % (obj, name), PicklingWarning) + pickler.save_global(obj) + return + pickler._postproc[id(obj)] = postproc_list + + # TODO: Use state_setter in Python 3.8 to allow for faster cPickle implementations + pickler.save_reduce(*reduction, obj=obj) + + if is_pickler_dill: + # pickler.x -= 1 + # print(pickler.x*' ', 'pop', obj, id(obj)) + postproc = pickler._postproc.pop(id(obj)) + # assert postproc_list == postproc, 'Stack tampered!' + for reduction in reversed(postproc): + if reduction[0] is _setitems: + # use the internal machinery of pickle.py to speedup when + # updating a dictionary in postproc + dest, source = reduction[1] + if source: + pickler.write(pickler.get(pickler.memo[id(dest)][0])) + if sys.hexversion < 0x30e00a1: + pickler._batch_setitems(iter(source.items())) + else: + pickler._batch_setitems(iter(source.items()), obj=obj) + else: + # Updating with an empty dictionary. Same as doing nothing. + continue + else: + pickler.save_reduce(*reduction) + # pop None created by calling preprocessing step off stack + pickler.write(POP) + +#@register(CodeType) +#def save_code(pickler, obj): +# logger.trace(pickler, "Co: %s", obj) +# pickler.save_reduce(_unmarshal, (marshal.dumps(obj),), obj=obj) +# logger.trace(pickler, "# Co") +# return + +# The following function is based on 'save_codeobject' from 'cloudpickle' +# Copyright (c) 2012, Regents of the University of California. +# Copyright (c) 2009 `PiCloud, Inc. `_. +# License: https://github.com/cloudpipe/cloudpickle/blob/master/LICENSE +@register(CodeType) +def save_code(pickler, obj): + logger.trace(pickler, "Co: %s", obj) + if hasattr(obj, "co_endlinetable"): # python 3.11a (20 args) + args = ( + obj.co_lnotab, # for < python 3.10 [not counted in args] + obj.co_argcount, obj.co_posonlyargcount, + obj.co_kwonlyargcount, obj.co_nlocals, obj.co_stacksize, + obj.co_flags, obj.co_code, obj.co_consts, obj.co_names, + obj.co_varnames, obj.co_filename, obj.co_name, obj.co_qualname, + obj.co_firstlineno, obj.co_linetable, obj.co_endlinetable, + obj.co_columntable, obj.co_exceptiontable, obj.co_freevars, + obj.co_cellvars + ) + elif hasattr(obj, "co_exceptiontable"): # python 3.11 (18 args) + with warnings.catch_warnings(): + if not OLD312a7: # issue 597 + warnings.filterwarnings('ignore', category=DeprecationWarning) + args = ( + obj.co_lnotab, # for < python 3.10 [not counted in args] + obj.co_argcount, obj.co_posonlyargcount, + obj.co_kwonlyargcount, obj.co_nlocals, obj.co_stacksize, + obj.co_flags, obj.co_code, obj.co_consts, obj.co_names, + obj.co_varnames, obj.co_filename, obj.co_name, obj.co_qualname, + obj.co_firstlineno, obj.co_linetable, obj.co_exceptiontable, + obj.co_freevars, obj.co_cellvars + ) + elif hasattr(obj, "co_qualname"): # pypy 3.11 7.3.19+ (17 args) + args = ( + obj.co_lnotab, obj.co_argcount, obj.co_posonlyargcount, + obj.co_kwonlyargcount, obj.co_nlocals, obj.co_stacksize, + obj.co_flags, obj.co_code, obj.co_consts, obj.co_names, + obj.co_varnames, obj.co_filename, obj.co_name, obj.co_qualname, + obj.co_firstlineno, obj.co_linetable, obj.co_freevars, + obj.co_cellvars + ) + elif hasattr(obj, "co_linetable"): # python 3.10 (16 args) + args = ( + obj.co_lnotab, # for < python 3.10 [not counted in args] + obj.co_argcount, obj.co_posonlyargcount, + obj.co_kwonlyargcount, obj.co_nlocals, obj.co_stacksize, + obj.co_flags, obj.co_code, obj.co_consts, obj.co_names, + obj.co_varnames, obj.co_filename, obj.co_name, + obj.co_firstlineno, obj.co_linetable, obj.co_freevars, + obj.co_cellvars + ) + elif hasattr(obj, "co_posonlyargcount"): # python 3.8 (16 args) + args = ( + obj.co_argcount, obj.co_posonlyargcount, + obj.co_kwonlyargcount, obj.co_nlocals, obj.co_stacksize, + obj.co_flags, obj.co_code, obj.co_consts, obj.co_names, + obj.co_varnames, obj.co_filename, obj.co_name, + obj.co_firstlineno, obj.co_lnotab, obj.co_freevars, + obj.co_cellvars + ) + else: # python 3.7 (15 args) + args = ( + obj.co_argcount, obj.co_kwonlyargcount, obj.co_nlocals, + obj.co_stacksize, obj.co_flags, obj.co_code, obj.co_consts, + obj.co_names, obj.co_varnames, obj.co_filename, + obj.co_name, obj.co_firstlineno, obj.co_lnotab, + obj.co_freevars, obj.co_cellvars + ) + + pickler.save_reduce(_create_code, args, obj=obj) + logger.trace(pickler, "# Co") + return + +def _repr_dict(obj): + """Make a short string representation of a dictionary.""" + return "<%s object at %#012x>" % (type(obj).__name__, id(obj)) + +@register(dict) +def save_module_dict(pickler, obj): + if is_dill(pickler, child=False) and obj == pickler._main.__dict__ and \ + not (pickler._session and pickler._first_pass): + logger.trace(pickler, "D1: %s", _repr_dict(obj)) # obj + pickler.write(bytes('c__builtin__\n__main__\n', 'UTF-8')) + logger.trace(pickler, "# D1") + elif (not is_dill(pickler, child=False)) and (obj == _main_module.__dict__): + logger.trace(pickler, "D3: %s", _repr_dict(obj)) # obj + pickler.write(bytes('c__main__\n__dict__\n', 'UTF-8')) #XXX: works in general? + logger.trace(pickler, "# D3") + elif '__name__' in obj and obj != _main_module.__dict__ \ + and type(obj['__name__']) is str \ + and obj is getattr(_import_module(obj['__name__'],True), '__dict__', None): + logger.trace(pickler, "D4: %s", _repr_dict(obj)) # obj + pickler.write(bytes('c%s\n__dict__\n' % obj['__name__'], 'UTF-8')) + logger.trace(pickler, "# D4") + else: + logger.trace(pickler, "D2: %s", _repr_dict(obj)) # obj + if is_dill(pickler, child=False) and pickler._session: + # we only care about session the first pass thru + pickler._first_pass = False + StockPickler.save_dict(pickler, obj) + logger.trace(pickler, "# D2") + return + + +if not OLD310 and MAPPING_PROXY_TRICK: + def save_dict_view(dicttype): + def save_dict_view_for_function(func): + def _save_dict_view(pickler, obj): + logger.trace(pickler, "Dkvi: <%s>", obj) + mapping = obj.mapping | _dictproxy_helper_instance + pickler.save_reduce(func, (mapping,), obj=obj) + logger.trace(pickler, "# Dkvi") + return _save_dict_view + return [ + (funcname, save_dict_view_for_function(getattr(dicttype, funcname))) + for funcname in ('keys', 'values', 'items') + ] +else: + # The following functions are based on 'cloudpickle' + # https://github.com/cloudpipe/cloudpickle/blob/5d89947288a18029672596a4d719093cc6d5a412/cloudpickle/cloudpickle.py#L922-L940 + # Copyright (c) 2012, Regents of the University of California. + # Copyright (c) 2009 `PiCloud, Inc. `_. + # License: https://github.com/cloudpipe/cloudpickle/blob/master/LICENSE + def save_dict_view(dicttype): + def save_dict_keys(pickler, obj): + logger.trace(pickler, "Dk: <%s>", obj) + dict_constructor = _shims.Reduce(dicttype.fromkeys, (list(obj),)) + pickler.save_reduce(dicttype.keys, (dict_constructor,), obj=obj) + logger.trace(pickler, "# Dk") + + def save_dict_values(pickler, obj): + logger.trace(pickler, "Dv: <%s>", obj) + dict_constructor = _shims.Reduce(dicttype, (enumerate(obj),)) + pickler.save_reduce(dicttype.values, (dict_constructor,), obj=obj) + logger.trace(pickler, "# Dv") + + def save_dict_items(pickler, obj): + logger.trace(pickler, "Di: <%s>", obj) + pickler.save_reduce(dicttype.items, (dicttype(obj),), obj=obj) + logger.trace(pickler, "# Di") + + return ( + ('keys', save_dict_keys), + ('values', save_dict_values), + ('items', save_dict_items) + ) + +for __dicttype in ( + dict, + OrderedDict +): + __obj = __dicttype() + for __funcname, __savefunc in save_dict_view(__dicttype): + __tview = type(getattr(__obj, __funcname)()) + if __tview not in Pickler.dispatch: + Pickler.dispatch[__tview] = __savefunc +del __dicttype, __obj, __funcname, __tview, __savefunc + + +@register(ClassType) +def save_classobj(pickler, obj): #FIXME: enable pickler._byref + if not _locate_function(obj, pickler): + logger.trace(pickler, "C1: %s", obj) + pickler.save_reduce(ClassType, (obj.__name__, obj.__bases__, + obj.__dict__), obj=obj) + #XXX: or obj.__dict__.copy()), obj=obj) ? + logger.trace(pickler, "# C1") + else: + logger.trace(pickler, "C2: %s", obj) + name = getattr(obj, '__qualname__', getattr(obj, '__name__', None)) + StockPickler.save_global(pickler, obj, name=name) + logger.trace(pickler, "# C2") + return + +@register(typing._GenericAlias) +def save_generic_alias(pickler, obj): + args = obj.__args__ + if type(obj.__reduce__()) is str: + logger.trace(pickler, "Ga0: %s", obj) + StockPickler.save_global(pickler, obj, name=obj.__reduce__()) + logger.trace(pickler, "# Ga0") + elif obj.__origin__ is tuple and (not args or args == ((),)): + logger.trace(pickler, "Ga1: %s", obj) + pickler.save_reduce(_create_typing_tuple, (args,), obj=obj) + logger.trace(pickler, "# Ga1") + else: + logger.trace(pickler, "Ga2: %s", obj) + StockPickler.save_reduce(pickler, *obj.__reduce__(), obj=obj) + logger.trace(pickler, "# Ga2") + return + +if ThreadHandleType: + @register(ThreadHandleType) + def save_thread_handle(pickler, obj): + logger.trace(pickler, "Th: %s", obj) + pickler.save_reduce(_create_thread_handle, (obj.ident, obj.is_done()), obj=obj) + logger.trace(pickler, "# Th") + return + +@register(LockType) #XXX: copied Thread will have new Event (due to new Lock) +def save_lock(pickler, obj): + logger.trace(pickler, "Lo: %s", obj) + pickler.save_reduce(_create_lock, (obj.locked(),), obj=obj) + logger.trace(pickler, "# Lo") + return + +@register(RLockType) +def save_rlock(pickler, obj): + logger.trace(pickler, "RL: %s", obj) + r = obj.__repr__() # don't use _release_save as it unlocks the lock + count = int(r.split('count=')[1].split()[0].rstrip('>')) + owner = int(r.split('owner=')[1].split()[0]) + pickler.save_reduce(_create_rlock, (count,owner,), obj=obj) + logger.trace(pickler, "# RL") + return + +#@register(SocketType) #FIXME: causes multiprocess test_pickling FAIL +def save_socket(pickler, obj): + logger.trace(pickler, "So: %s", obj) + pickler.save_reduce(*reduce_socket(obj)) + logger.trace(pickler, "# So") + return + +def _save_file(pickler, obj, open_): + if obj.closed: + position = 0 + else: + obj.flush() + if obj in (sys.__stdout__, sys.__stderr__, sys.__stdin__): + position = -1 + else: + position = obj.tell() + if is_dill(pickler, child=True) and pickler._fmode == FILE_FMODE: + f = open_(obj.name, "r") + fdata = f.read() + f.close() + else: + fdata = "" + if is_dill(pickler, child=True): + strictio = pickler._strictio + fmode = pickler._fmode + else: + strictio = False + fmode = 0 # HANDLE_FMODE + pickler.save_reduce(_create_filehandle, (obj.name, obj.mode, position, + obj.closed, open_, strictio, + fmode, fdata), obj=obj) + return + + +@register(FileType) #XXX: in 3.x has buffer=0, needs different _create? +@register(BufferedReaderType) +@register(BufferedWriterType) +@register(TextWrapperType) +def save_file(pickler, obj): + logger.trace(pickler, "Fi: %s", obj) + f = _save_file(pickler, obj, open) + logger.trace(pickler, "# Fi") + return f + +if BufferedRandomType: + @register(BufferedRandomType) + def save_file(pickler, obj): + logger.trace(pickler, "Fi: %s", obj) + f = _save_file(pickler, obj, open) + logger.trace(pickler, "# Fi") + return f + +if PyTextWrapperType: + @register(PyBufferedReaderType) + @register(PyBufferedWriterType) + @register(PyTextWrapperType) + def save_file(pickler, obj): + logger.trace(pickler, "Fi: %s", obj) + f = _save_file(pickler, obj, _open) + logger.trace(pickler, "# Fi") + return f + + if PyBufferedRandomType: + @register(PyBufferedRandomType) + def save_file(pickler, obj): + logger.trace(pickler, "Fi: %s", obj) + f = _save_file(pickler, obj, _open) + logger.trace(pickler, "# Fi") + return f + + +# The following two functions are based on 'saveCStringIoInput' +# and 'saveCStringIoOutput' from spickle +# Copyright (c) 2011 by science+computing ag +# License: http://www.apache.org/licenses/LICENSE-2.0 +if InputType: + @register(InputType) + def save_stringi(pickler, obj): + logger.trace(pickler, "Io: %s", obj) + if obj.closed: + value = ''; position = 0 + else: + value = obj.getvalue(); position = obj.tell() + pickler.save_reduce(_create_stringi, (value, position, \ + obj.closed), obj=obj) + logger.trace(pickler, "# Io") + return + + @register(OutputType) + def save_stringo(pickler, obj): + logger.trace(pickler, "Io: %s", obj) + if obj.closed: + value = ''; position = 0 + else: + value = obj.getvalue(); position = obj.tell() + pickler.save_reduce(_create_stringo, (value, position, \ + obj.closed), obj=obj) + logger.trace(pickler, "# Io") + return + +if LRUCacheType is not None: + from functools import lru_cache + @register(LRUCacheType) + def save_lru_cache(pickler, obj): + logger.trace(pickler, "LRU: %s", obj) + if OLD39: + kwargs = obj.cache_info() + args = (kwargs.maxsize,) + else: + kwargs = obj.cache_parameters() + args = (kwargs['maxsize'], kwargs['typed']) + if args != lru_cache.__defaults__: + wrapper = Reduce(lru_cache, args, is_callable=True) + else: + wrapper = lru_cache + pickler.save_reduce(wrapper, (obj.__wrapped__,), obj=obj) + logger.trace(pickler, "# LRU") + return + +@register(SuperType) +def save_super(pickler, obj): + logger.trace(pickler, "Su: %s", obj) + pickler.save_reduce(super, (obj.__thisclass__, obj.__self__), obj=obj) + logger.trace(pickler, "# Su") + return + +if IS_PYPY: + @register(MethodType) + def save_instancemethod0(pickler, obj): + code = getattr(obj.__func__, '__code__', None) + if code is not None and type(code) is not CodeType \ + and getattr(obj.__self__, obj.__name__) == obj: + # Some PyPy builtin functions have no module name + logger.trace(pickler, "Me2: %s", obj) + # TODO: verify that this works for all PyPy builtin methods + pickler.save_reduce(getattr, (obj.__self__, obj.__name__), obj=obj) + logger.trace(pickler, "# Me2") + return + + logger.trace(pickler, "Me1: %s", obj) + pickler.save_reduce(MethodType, (obj.__func__, obj.__self__), obj=obj) + logger.trace(pickler, "# Me1") + return +else: + @register(MethodType) + def save_instancemethod0(pickler, obj): + logger.trace(pickler, "Me1: %s", obj) + pickler.save_reduce(MethodType, (obj.__func__, obj.__self__), obj=obj) + logger.trace(pickler, "# Me1") + return + +if not IS_PYPY: + @register(MemberDescriptorType) + @register(GetSetDescriptorType) + @register(MethodDescriptorType) + @register(WrapperDescriptorType) + @register(ClassMethodDescriptorType) + def save_wrapper_descriptor(pickler, obj): + logger.trace(pickler, "Wr: %s", obj) + pickler.save_reduce(_getattr, (obj.__objclass__, obj.__name__, + obj.__repr__()), obj=obj) + logger.trace(pickler, "# Wr") + return +else: + @register(MemberDescriptorType) + @register(GetSetDescriptorType) + def save_wrapper_descriptor(pickler, obj): + logger.trace(pickler, "Wr: %s", obj) + pickler.save_reduce(_getattr, (obj.__objclass__, obj.__name__, + obj.__repr__()), obj=obj) + logger.trace(pickler, "# Wr") + return + +@register(CellType) +def save_cell(pickler, obj): + try: + f = obj.cell_contents + except ValueError: # cell is empty + logger.trace(pickler, "Ce3: %s", obj) + # _shims._CELL_EMPTY is defined in _shims.py to support PyPy 2.7. + # It unpickles to a sentinel object _dill._CELL_EMPTY, also created in + # _shims.py. This object is not present in Python 3 because the cell's + # contents can be deleted in newer versions of Python. The reduce object + # will instead unpickle to None if unpickled in Python 3. + + # When breaking changes are made to dill, (_shims._CELL_EMPTY,) can + # be replaced by () OR the delattr function can be removed repending on + # whichever is more convienient. + pickler.save_reduce(_create_cell, (_shims._CELL_EMPTY,), obj=obj) + # Call the function _delattr on the cell's cell_contents attribute + # The result of this function call will be None + pickler.save_reduce(_shims._delattr, (obj, 'cell_contents')) + # pop None created by calling _delattr off stack + pickler.write(POP) + logger.trace(pickler, "# Ce3") + return + if is_dill(pickler, child=True): + if id(f) in pickler._postproc: + # Already seen. Add to its postprocessing. + postproc = pickler._postproc[id(f)] + else: + # Haven't seen it. Add to the highest possible object and set its + # value as late as possible to prevent cycle. + postproc = next(iter(pickler._postproc.values()), None) + if postproc is not None: + logger.trace(pickler, "Ce2: %s", obj) + # _CELL_REF is defined in _shims.py to support older versions of + # dill. When breaking changes are made to dill, (_CELL_REF,) can + # be replaced by () + pickler.save_reduce(_create_cell, (_CELL_REF,), obj=obj) + postproc.append((_shims._setattr, (obj, 'cell_contents', f))) + logger.trace(pickler, "# Ce2") + return + logger.trace(pickler, "Ce1: %s", obj) + pickler.save_reduce(_create_cell, (f,), obj=obj) + logger.trace(pickler, "# Ce1") + return + +if MAPPING_PROXY_TRICK: + @register(DictProxyType) + def save_dictproxy(pickler, obj): + logger.trace(pickler, "Mp: %s", _repr_dict(obj)) # obj + mapping = obj | _dictproxy_helper_instance + pickler.save_reduce(DictProxyType, (mapping,), obj=obj) + logger.trace(pickler, "# Mp") + return +else: + @register(DictProxyType) + def save_dictproxy(pickler, obj): + logger.trace(pickler, "Mp: %s", _repr_dict(obj)) # obj + pickler.save_reduce(DictProxyType, (obj.copy(),), obj=obj) + logger.trace(pickler, "# Mp") + return + +@register(SliceType) +def save_slice(pickler, obj): + logger.trace(pickler, "Sl: %s", obj) + pickler.save_reduce(slice, (obj.start, obj.stop, obj.step), obj=obj) + logger.trace(pickler, "# Sl") + return + +@register(XRangeType) +@register(EllipsisType) +@register(NotImplementedType) +def save_singleton(pickler, obj): + logger.trace(pickler, "Si: %s", obj) + pickler.save_reduce(_eval_repr, (obj.__repr__(),), obj=obj) + logger.trace(pickler, "# Si") + return + +def _proxy_helper(obj): # a dead proxy returns a reference to None + """get memory address of proxy's reference object""" + _repr = repr(obj) + try: _str = str(obj) + except ReferenceError: # it's a dead proxy + return id(None) + if _str == _repr: return id(obj) # it's a repr + try: # either way, it's a proxy from here + address = int(_str.rstrip('>').split(' at ')[-1], base=16) + except ValueError: # special case: proxy of a 'type' + if not IS_PYPY: + address = int(_repr.rstrip('>').split(' at ')[-1], base=16) + else: + objects = iter(gc.get_objects()) + for _obj in objects: + if repr(_obj) == _str: return id(_obj) + # all bad below... nothing found so throw ReferenceError + msg = "Cannot reference object for proxy at '%s'" % id(obj) + raise ReferenceError(msg) + return address + +def _locate_object(address, module=None): + """get object located at the given memory address (inverse of id(obj))""" + special = [None, True, False] #XXX: more...? + for obj in special: + if address == id(obj): return obj + if module: + objects = iter(module.__dict__.values()) + else: objects = iter(gc.get_objects()) + for obj in objects: + if address == id(obj): return obj + # all bad below... nothing found so throw ReferenceError or TypeError + try: address = hex(address) + except TypeError: + raise TypeError("'%s' is not a valid memory address" % str(address)) + raise ReferenceError("Cannot reference object at '%s'" % address) + +@register(ReferenceType) +def save_weakref(pickler, obj): + refobj = obj() + logger.trace(pickler, "R1: %s", obj) + #refobj = ctypes.pythonapi.PyWeakref_GetObject(obj) # dead returns "None" + pickler.save_reduce(_create_weakref, (refobj,), obj=obj) + logger.trace(pickler, "# R1") + return + +@register(ProxyType) +@register(CallableProxyType) +def save_weakproxy(pickler, obj): + # Must do string substitution here and use %r to avoid ReferenceError. + logger.trace(pickler, "R2: %r" % obj) + refobj = _locate_object(_proxy_helper(obj)) + pickler.save_reduce(_create_weakproxy, (refobj, callable(obj)), obj=obj) + logger.trace(pickler, "# R2") + return + +def _is_builtin_module(module): + if not hasattr(module, "__file__"): return True + if module.__file__ is None: return False + # If a module file name starts with prefix, it should be a builtin + # module, so should always be pickled as a reference. + names = ["base_prefix", "base_exec_prefix", "exec_prefix", "prefix", "real_prefix"] + rp = os.path.realpath + # See https://github.com/uqfoundation/dill/issues/566 + return ( + any( + module.__file__.startswith(getattr(sys, name)) + or rp(module.__file__).startswith(rp(getattr(sys, name))) + for name in names + if hasattr(sys, name) + ) + or module.__file__.endswith(EXTENSION_SUFFIXES) + or 'site-packages' in module.__file__ + ) + +def _is_imported_module(module): + return getattr(module, '__loader__', None) is not None or module in sys.modules.values() + +@register(ModuleType) +def save_module(pickler, obj): + if False: #_use_diff: + if obj.__name__.split('.', 1)[0] != "dill": + try: + changed = diff.whats_changed(obj, seen=pickler._diff_cache)[0] + except RuntimeError: # not memorised module, probably part of dill + pass + else: + logger.trace(pickler, "M2: %s with diff", obj) + logger.info("Diff: %s", changed.keys()) + pickler.save_reduce(_import_module, (obj.__name__,), obj=obj, + state=changed) + logger.trace(pickler, "# M2") + return + + logger.trace(pickler, "M1: %s", obj) + pickler.save_reduce(_import_module, (obj.__name__,), obj=obj) + logger.trace(pickler, "# M1") + else: + builtin_mod = _is_builtin_module(obj) + is_session_main = is_dill(pickler, child=True) and obj is pickler._main + if (obj.__name__ not in ("builtins", "dill", "dill._dill") and not builtin_mod + or is_session_main): + logger.trace(pickler, "M1: %s", obj) + # Hack for handling module-type objects in load_module(). + mod_name = obj.__name__ if _is_imported_module(obj) else '__runtime__.%s' % obj.__name__ + # Second references are saved as __builtin__.__main__ in save_module_dict(). + main_dict = obj.__dict__.copy() + for item in ('__builtins__', '__loader__'): + main_dict.pop(item, None) + for item in IPYTHON_SINGLETONS: #pragma: no cover + if getattr(main_dict.get(item), '__module__', '').startswith('IPython'): + del main_dict[item] + pickler.save_reduce(_import_module, (mod_name,), obj=obj, state=main_dict) + logger.trace(pickler, "# M1") + elif obj.__name__ == "dill._dill": + logger.trace(pickler, "M2: %s", obj) + pickler.save_global(obj, name="_dill") + logger.trace(pickler, "# M2") + else: + logger.trace(pickler, "M2: %s", obj) + pickler.save_reduce(_import_module, (obj.__name__,), obj=obj) + logger.trace(pickler, "# M2") + return + +# The following function is based on '_extract_class_dict' from 'cloudpickle' +# Copyright (c) 2012, Regents of the University of California. +# Copyright (c) 2009 `PiCloud, Inc. `_. +# License: https://github.com/cloudpipe/cloudpickle/blob/master/LICENSE +def _get_typedict_type(cls, clsdict, attrs, postproc_list): + """Retrieve a copy of the dict of a class without the inherited methods""" + if len(cls.__bases__) == 1: + inherited_dict = cls.__bases__[0].__dict__ + else: + inherited_dict = {} + for base in reversed(cls.__bases__): + inherited_dict.update(base.__dict__) + to_remove = [] + for name, value in dict.items(clsdict): + try: + base_value = inherited_dict[name] + if value is base_value and hasattr(value, '__qualname__'): + to_remove.append(name) + except KeyError: + pass + for name in to_remove: + dict.pop(clsdict, name) + + if issubclass(type(cls), type): + clsdict.pop('__dict__', None) + clsdict.pop('__weakref__', None) + # clsdict.pop('__prepare__', None) + return clsdict, attrs + +def _get_typedict_abc(obj, _dict, attrs, postproc_list): + if hasattr(abc, '_get_dump'): + (registry, _, _, _) = abc._get_dump(obj) + register = obj.register + postproc_list.extend((register, (reg(),)) for reg in registry) + elif hasattr(obj, '_abc_registry'): + registry = obj._abc_registry + register = obj.register + postproc_list.extend((register, (reg,)) for reg in registry) + else: + raise PicklingError("Cannot find registry of ABC %s", obj) + + if '_abc_registry' in _dict: + _dict.pop('_abc_registry', None) + _dict.pop('_abc_cache', None) + _dict.pop('_abc_negative_cache', None) + # _dict.pop('_abc_negative_cache_version', None) + else: + _dict.pop('_abc_impl', None) + return _dict, attrs + +@register(TypeType) +def save_type(pickler, obj, postproc_list=None): + if obj in _typemap: + logger.trace(pickler, "T1: %s", obj) + # if obj in _incedental_types: + # warnings.warn('Type %r may only exist on this implementation of Python and cannot be unpickled in other implementations.' % (obj,), PicklingWarning) + pickler.save_reduce(_load_type, (_typemap[obj],), obj=obj) + logger.trace(pickler, "# T1") + elif obj.__bases__ == (tuple,) and all([hasattr(obj, attr) for attr in ('_fields','_asdict','_make','_replace')]): + # special case: namedtuples + logger.trace(pickler, "T6: %s", obj) + + obj_name = getattr(obj, '__qualname__', getattr(obj, '__name__', None)) + if obj.__name__ != obj_name: + if postproc_list is None: + postproc_list = [] + postproc_list.append((setattr, (obj, '__qualname__', obj_name))) + + if not obj._field_defaults: + _save_with_postproc(pickler, (_create_namedtuple, (obj.__name__, obj._fields, obj.__module__)), obj=obj, postproc_list=postproc_list) + else: + defaults = [obj._field_defaults[field] for field in obj._fields if field in obj._field_defaults] + _save_with_postproc(pickler, (_create_namedtuple, (obj.__name__, obj._fields, obj.__module__, defaults)), obj=obj, postproc_list=postproc_list) + logger.trace(pickler, "# T6") + return + + # special caes: NoneType, NotImplementedType, EllipsisType, EnumMeta, etc + elif obj is type(None): + logger.trace(pickler, "T7: %s", obj) + #XXX: pickler.save_reduce(type, (None,), obj=obj) + pickler.write(GLOBAL + b'__builtin__\nNoneType\n') + logger.trace(pickler, "# T7") + elif obj is NotImplementedType: + logger.trace(pickler, "T7: %s", obj) + pickler.save_reduce(type, (NotImplemented,), obj=obj) + logger.trace(pickler, "# T7") + elif obj is EllipsisType: + logger.trace(pickler, "T7: %s", obj) + pickler.save_reduce(type, (Ellipsis,), obj=obj) + logger.trace(pickler, "# T7") + elif obj is EnumMeta: + logger.trace(pickler, "T7: %s", obj) + pickler.write(GLOBAL + b'enum\nEnumMeta\n') + logger.trace(pickler, "# T7") + elif obj is ExceptHookArgsType: #NOTE: must be after NoneType for pypy + logger.trace(pickler, "T7: %s", obj) + pickler.write(GLOBAL + b'threading\nExceptHookArgs\n') + logger.trace(pickler, "# T7") + + else: + _byref = getattr(pickler, '_byref', None) + obj_recursive = id(obj) in getattr(pickler, '_postproc', ()) + incorrectly_named = not _locate_function(obj, pickler) + if not _byref and not obj_recursive and incorrectly_named: # not a function, but the name was held over + if postproc_list is None: + postproc_list = [] + + # thanks to Tom Stepleton pointing out pickler._session unneeded + logger.trace(pickler, "T2: %s", obj) + _dict, attrs = _get_typedict_type(obj, obj.__dict__.copy(), None, postproc_list) # copy dict proxy to a dict + + #print (_dict) + #print ("%s\n%s" % (type(obj), obj.__name__)) + #print ("%s\n%s" % (obj.__bases__, obj.__dict__)) + slots = _dict.get('__slots__', ()) + if type(slots) == str: + # __slots__ accepts a single string + slots = (slots,) + + for name in slots: + _dict.pop(name, None) + + if isinstance(obj, abc.ABCMeta): + logger.trace(pickler, "ABC: %s", obj) + _dict, attrs = _get_typedict_abc(obj, _dict, attrs, postproc_list) + logger.trace(pickler, "# ABC") + + qualname = getattr(obj, '__qualname__', None) + if attrs is not None: + for k, v in attrs.items(): + postproc_list.append((setattr, (obj, k, v))) + # TODO: Consider using the state argument to save_reduce? + if qualname is not None: + postproc_list.append((setattr, (obj, '__qualname__', qualname))) + + if not hasattr(obj, '__orig_bases__'): + _save_with_postproc(pickler, (_create_type, ( + type(obj), obj.__name__, obj.__bases__, _dict + )), obj=obj, postproc_list=postproc_list) + else: + # This case will always work, but might be overkill. + _metadict = { + 'metaclass': type(obj) + } + + if _dict: + _dict_update = PartialType(_setitems, source=_dict) + else: + _dict_update = None + + _save_with_postproc(pickler, (new_class, ( + obj.__name__, obj.__orig_bases__, _metadict, _dict_update + )), obj=obj, postproc_list=postproc_list) + logger.trace(pickler, "# T2") + else: + obj_name = getattr(obj, '__qualname__', getattr(obj, '__name__', None)) + logger.trace(pickler, "T4: %s", obj) + if incorrectly_named: + warnings.warn( + "Cannot locate reference to %r." % (obj,), + PicklingWarning, + stacklevel=3, + ) + if obj_recursive: + warnings.warn( + "Cannot pickle %r: %s.%s has recursive self-references that " + "trigger a RecursionError." % (obj, obj.__module__, obj_name), + PicklingWarning, + stacklevel=3, + ) + #print (obj.__dict__) + #print ("%s\n%s" % (type(obj), obj.__name__)) + #print ("%s\n%s" % (obj.__bases__, obj.__dict__)) + StockPickler.save_global(pickler, obj, name=obj_name) + logger.trace(pickler, "# T4") + return + +@register(property) +@register(abc.abstractproperty) +def save_property(pickler, obj): + logger.trace(pickler, "Pr: %s", obj) + pickler.save_reduce(type(obj), (obj.fget, obj.fset, obj.fdel, obj.__doc__), + obj=obj) + logger.trace(pickler, "# Pr") + +@register(staticmethod) +@register(classmethod) +@register(abc.abstractstaticmethod) +@register(abc.abstractclassmethod) +def save_classmethod(pickler, obj): + logger.trace(pickler, "Cm: %s", obj) + orig_func = obj.__func__ + + # if type(obj.__dict__) is dict: + # if obj.__dict__: + # state = obj.__dict__ + # else: + # state = None + # else: + # state = (None, {'__dict__', obj.__dict__}) + + pickler.save_reduce(type(obj), (orig_func,), obj=obj) + logger.trace(pickler, "# Cm") + +@register(FunctionType) +def save_function(pickler, obj): + if not _locate_function(obj, pickler): + if type(obj.__code__) is not CodeType: + # Some PyPy builtin functions have no module name, and thus are not + # able to be located + module_name = getattr(obj, '__module__', None) + if module_name is None: + module_name = __builtin__.__name__ + module = _import_module(module_name, safe=True) + _pypy_builtin = False + try: + found, _ = _getattribute(module, obj.__qualname__) + if getattr(found, '__func__', None) is obj: + _pypy_builtin = True + except AttributeError: + pass + + if _pypy_builtin: + logger.trace(pickler, "F3: %s", obj) + pickler.save_reduce(getattr, (found, '__func__'), obj=obj) + logger.trace(pickler, "# F3") + return + + logger.trace(pickler, "F1: %s", obj) + _recurse = getattr(pickler, '_recurse', None) + _postproc = getattr(pickler, '_postproc', None) + _main_modified = getattr(pickler, '_main_modified', None) + _original_main = getattr(pickler, '_original_main', __builtin__)#'None' + postproc_list = [] + if _recurse: + # recurse to get all globals referred to by obj + from .detect import globalvars + globs_copy = globalvars(obj, recurse=True, builtin=True) + + # Add the name of the module to the globs dictionary to prevent + # the duplication of the dictionary. Pickle the unpopulated + # globals dictionary and set the remaining items after the function + # is created to correctly handle recursion. + globs = {'__name__': obj.__module__} + else: + globs_copy = obj.__globals__ + + # If the globals is the __dict__ from the module being saved as a + # session, substitute it by the dictionary being actually saved. + if _main_modified and globs_copy is _original_main.__dict__: + globs_copy = getattr(pickler, '_main', _original_main).__dict__ + globs = globs_copy + # If the globals is a module __dict__, do not save it in the pickle. + elif globs_copy is not None and obj.__module__ is not None and \ + getattr(_import_module(obj.__module__, True), '__dict__', None) is globs_copy: + globs = globs_copy + else: + globs = {'__name__': obj.__module__} + + if globs_copy is not None and globs is not globs_copy: + # In the case that the globals are copied, we need to ensure that + # the globals dictionary is updated when all objects in the + # dictionary are already created. + glob_ids = {id(g) for g in globs_copy.values()} + for stack_element in _postproc: + if stack_element in glob_ids: + _postproc[stack_element].append((_setitems, (globs, globs_copy))) + break + else: + postproc_list.append((_setitems, (globs, globs_copy))) + + closure = obj.__closure__ + state_dict = {} + for fattrname in ('__doc__', '__kwdefaults__', '__annotations__'): + fattr = getattr(obj, fattrname, None) + if fattr is not None: + state_dict[fattrname] = fattr + if obj.__qualname__ != obj.__name__: + state_dict['__qualname__'] = obj.__qualname__ + if '__name__' not in globs or obj.__module__ != globs['__name__']: + state_dict['__module__'] = obj.__module__ + + state = obj.__dict__ + if type(state) is not dict: + state_dict['__dict__'] = state + state = None + if state_dict: + state = state, state_dict + + _save_with_postproc(pickler, (_create_function, ( + obj.__code__, globs, obj.__name__, obj.__defaults__, + closure + ), state), obj=obj, postproc_list=postproc_list) + + # Lift closure cell update to earliest function (#458) + if _postproc: + topmost_postproc = next(iter(_postproc.values()), None) + if closure and topmost_postproc: + for cell in closure: + possible_postproc = (setattr, (cell, 'cell_contents', obj)) + try: + topmost_postproc.remove(possible_postproc) + except ValueError: + continue + + # Change the value of the cell + pickler.save_reduce(*possible_postproc) + # pop None created by calling preprocessing step off stack + pickler.write(POP) + + logger.trace(pickler, "# F1") + else: + logger.trace(pickler, "F2: %s", obj) + name = getattr(obj, '__qualname__', getattr(obj, '__name__', None)) + StockPickler.save_global(pickler, obj, name=name) + logger.trace(pickler, "# F2") + return + +if HAS_CTYPES and hasattr(ctypes, 'pythonapi'): + _PyCapsule_New = ctypes.pythonapi.PyCapsule_New + _PyCapsule_New.argtypes = (ctypes.c_void_p, ctypes.c_char_p, ctypes.c_void_p) + _PyCapsule_New.restype = ctypes.py_object + _PyCapsule_GetPointer = ctypes.pythonapi.PyCapsule_GetPointer + _PyCapsule_GetPointer.argtypes = (ctypes.py_object, ctypes.c_char_p) + _PyCapsule_GetPointer.restype = ctypes.c_void_p + _PyCapsule_GetDestructor = ctypes.pythonapi.PyCapsule_GetDestructor + _PyCapsule_GetDestructor.argtypes = (ctypes.py_object,) + _PyCapsule_GetDestructor.restype = ctypes.c_void_p + _PyCapsule_GetContext = ctypes.pythonapi.PyCapsule_GetContext + _PyCapsule_GetContext.argtypes = (ctypes.py_object,) + _PyCapsule_GetContext.restype = ctypes.c_void_p + _PyCapsule_GetName = ctypes.pythonapi.PyCapsule_GetName + _PyCapsule_GetName.argtypes = (ctypes.py_object,) + _PyCapsule_GetName.restype = ctypes.c_char_p + _PyCapsule_IsValid = ctypes.pythonapi.PyCapsule_IsValid + _PyCapsule_IsValid.argtypes = (ctypes.py_object, ctypes.c_char_p) + _PyCapsule_IsValid.restype = ctypes.c_bool + _PyCapsule_SetContext = ctypes.pythonapi.PyCapsule_SetContext + _PyCapsule_SetContext.argtypes = (ctypes.py_object, ctypes.c_void_p) + _PyCapsule_SetDestructor = ctypes.pythonapi.PyCapsule_SetDestructor + _PyCapsule_SetDestructor.argtypes = (ctypes.py_object, ctypes.c_void_p) + _PyCapsule_SetName = ctypes.pythonapi.PyCapsule_SetName + _PyCapsule_SetName.argtypes = (ctypes.py_object, ctypes.c_char_p) + _PyCapsule_SetPointer = ctypes.pythonapi.PyCapsule_SetPointer + _PyCapsule_SetPointer.argtypes = (ctypes.py_object, ctypes.c_void_p) + #from _socket import CAPI as _testcapsule + _testcapsule_name = b'dill._dill._testcapsule' + _testcapsule = _PyCapsule_New( + ctypes.cast(_PyCapsule_New, ctypes.c_void_p), + ctypes.c_char_p(_testcapsule_name), + None + ) + PyCapsuleType = type(_testcapsule) + @register(PyCapsuleType) + def save_capsule(pickler, obj): + logger.trace(pickler, "Cap: %s", obj) + name = _PyCapsule_GetName(obj) + #warnings.warn('Pickling a PyCapsule (%s) does not pickle any C data structures and could cause segmentation faults or other memory errors when unpickling.' % (name,), PicklingWarning) + pointer = _PyCapsule_GetPointer(obj, name) + context = _PyCapsule_GetContext(obj) + destructor = _PyCapsule_GetDestructor(obj) + pickler.save_reduce(_create_capsule, (pointer, name, context, destructor), obj=obj) + logger.trace(pickler, "# Cap") + _incedental_reverse_typemap['PyCapsuleType'] = PyCapsuleType + _reverse_typemap['PyCapsuleType'] = PyCapsuleType + _incedental_types.add(PyCapsuleType) +else: + _testcapsule = None + +@register(ContextType) +def save_context(pickler, obj): + logger.trace(pickler, "Cx: %s", obj) + pickler.save_reduce(ContextType, tuple(obj.items()), obj=obj) + logger.trace(pickler, "# Cx") + + +############################# +# A quick fix for issue #500 +# This should be removed when a better solution is found. + +if hasattr(dataclasses, "_HAS_DEFAULT_FACTORY_CLASS"): + @register(dataclasses._HAS_DEFAULT_FACTORY_CLASS) + def save_dataclasses_HAS_DEFAULT_FACTORY_CLASS(pickler, obj): + logger.trace(pickler, "DcHDF: %s", obj) + pickler.write(GLOBAL + b"dataclasses\n_HAS_DEFAULT_FACTORY\n") + logger.trace(pickler, "# DcHDF") + +if hasattr(dataclasses, "MISSING"): + @register(type(dataclasses.MISSING)) + def save_dataclasses_MISSING_TYPE(pickler, obj): + logger.trace(pickler, "DcM: %s", obj) + pickler.write(GLOBAL + b"dataclasses\nMISSING\n") + logger.trace(pickler, "# DcM") + +if hasattr(dataclasses, "KW_ONLY"): + @register(type(dataclasses.KW_ONLY)) + def save_dataclasses_KW_ONLY_TYPE(pickler, obj): + logger.trace(pickler, "DcKWO: %s", obj) + pickler.write(GLOBAL + b"dataclasses\nKW_ONLY\n") + logger.trace(pickler, "# DcKWO") + +if hasattr(dataclasses, "_FIELD_BASE"): + @register(dataclasses._FIELD_BASE) + def save_dataclasses_FIELD_BASE(pickler, obj): + logger.trace(pickler, "DcFB: %s", obj) + pickler.write(GLOBAL + b"dataclasses\n" + obj.name.encode() + b"\n") + logger.trace(pickler, "# DcFB") + +############################# + +# quick sanity checking +def pickles(obj,exact=False,safe=False,**kwds): + """ + Quick check if object pickles with dill. + + If *exact=True* then an equality test is done to check if the reconstructed + object matches the original object. + + If *safe=True* then any exception will raised in copy signal that the + object is not picklable, otherwise only pickling errors will be trapped. + + Additional keyword arguments are as :func:`dumps` and :func:`loads`. + """ + if safe: exceptions = (Exception,) # RuntimeError, ValueError + else: + exceptions = (TypeError, AssertionError, NotImplementedError, PicklingError, UnpicklingError) + try: + pik = copy(obj, **kwds) + #FIXME: should check types match first, then check content if "exact" + try: + #FIXME: should be "(pik == obj).all()" for numpy comparison, though that'll fail if shapes differ + result = bool(pik.all() == obj.all()) + except (AttributeError, TypeError): + warnings.filterwarnings('ignore') #FIXME: be specific + result = pik == obj + if warnings.filters: del warnings.filters[0] + if hasattr(result, 'toarray'): # for unusual types like sparse matrix + result = result.toarray().all() + if result: return True + if not exact: + result = type(pik) == type(obj) + if result: return result + # class instances might have been dumped with byref=False + return repr(type(pik)) == repr(type(obj)) #XXX: InstanceType? + return False + except exceptions: + return False + +def check(obj, *args, **kwds): + """ + Check pickling of an object across another process. + + *python* is the path to the python interpreter (defaults to sys.executable) + + Set *verbose=True* to print the unpickled object in the other process. + + Additional keyword arguments are as :func:`dumps` and :func:`loads`. + """ + # == undocumented == + # python -- the string path or executable name of the selected python + # verbose -- if True, be verbose about printing warning messages + # all other args and kwds are passed to dill.dumps #FIXME: ignore on load + verbose = kwds.pop('verbose', False) + python = kwds.pop('python', None) + if python is None: + import sys + python = sys.executable + # type check + isinstance(python, str) + import subprocess + fail = True + try: + _obj = dumps(obj, *args, **kwds) + fail = False + finally: + if fail and verbose: + print("DUMP FAILED") + #FIXME: fails if python interpreter path contains spaces + # Use the following instead (which also processes the 'ignore' keyword): + # ignore = kwds.pop('ignore', None) + # unpickle = "dill.loads(%s, ignore=%s)"%(repr(_obj), repr(ignore)) + # cmd = [python, "-c", "import dill; print(%s)"%unpickle] + # msg = "SUCCESS" if not subprocess.call(cmd) else "LOAD FAILED" + msg = "%s -c import dill; print(dill.loads(%s))" % (python, repr(_obj)) + msg = "SUCCESS" if not subprocess.call(msg.split(None,2)) else "LOAD FAILED" + if verbose: + print(msg) + return + +# use to protect against missing attributes +def is_dill(pickler, child=None): + "check the dill-ness of your pickler" + if child is False or not hasattr(pickler.__class__, 'mro'): + return 'dill' in pickler.__module__ + return Pickler in pickler.__class__.mro() + +def _extend(): + """extend pickle with all of dill's registered types""" + # need to have pickle not choke on _main_module? use is_dill(pickler) + for t,func in Pickler.dispatch.items(): + try: + StockPickler.dispatch[t] = func + except Exception: #TypeError, PicklingError, UnpicklingError + logger.trace(pickler, "skip: %s", t) + return + +del diff, _use_diff, use_diff + +# EOF diff --git a/local_packages/dill/_objects.py b/local_packages/dill/_objects.py new file mode 100644 index 0000000..573fae8 --- /dev/null +++ b/local_packages/dill/_objects.py @@ -0,0 +1,545 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2008-2016 California Institute of Technology. +# Copyright (c) 2016-2026 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE +""" +all Python Standard Library objects (currently: CH 1-15 @ 2.7) +and some other common objects (i.e. numpy.ndarray) +""" + +__all__ = ['registered','failures','succeeds'] + +# helper imports +import warnings; warnings.filterwarnings("ignore", category=DeprecationWarning) +import sys +import queue as Queue +#import dbm as anydbm #XXX: delete foo +from io import BytesIO as StringIO +import re +import array +import collections +import codecs +import struct +import dataclasses +import datetime +import calendar +import weakref +import pprint +import decimal +import numbers +import functools +import itertools +import operator +import tempfile +import shelve +import zlib +import gzip +import zipfile +import tarfile +import csv +import hashlib +import hmac +import os +import logging +import logging.handlers +import optparse +#import __hello__ +import threading +import socket +import contextlib +import contextvars +try: + import bz2 + import sqlite3 + import dbm.ndbm as dbm + HAS_ALL = True +except ImportError: # Ubuntu + HAS_ALL = False +try: + #import curses + #from curses import textpad, panel + HAS_CURSES = True +except ImportError: # Windows + HAS_CURSES = False +try: + import ctypes + HAS_CTYPES = True + # if using `pypy`, pythonapi is not found + IS_PYPY = not hasattr(ctypes, 'pythonapi') +except ImportError: # MacPorts + HAS_CTYPES = False + IS_PYPY = False + +IS_PYODIDE = sys.platform == 'emscripten' + +# helper objects +class _class: + def _method(self): + pass +# @classmethod +# def _clsmethod(cls): #XXX: test me +# pass +# @staticmethod +# def _static(self): #XXX: test me +# pass +class _class2: + def __call__(self): + pass +_instance2 = _class2() +class _newclass(object): + def _method(self): + pass +# @classmethod +# def _clsmethod(cls): #XXX: test me +# pass +# @staticmethod +# def _static(self): #XXX: test me +# pass +class _newclass2(object): + __slots__ = ['descriptor'] +def _function(x): yield x +def _function2(): + try: raise + except Exception: + from sys import exc_info + e, er, tb = exc_info() + return er, tb +if HAS_CTYPES: + class _Struct(ctypes.Structure): + pass + _Struct._fields_ = [("_field", ctypes.c_int),("next", ctypes.POINTER(_Struct))] +_filedescrip, _tempfile = tempfile.mkstemp('r') # deleted in cleanup +if sys.hexversion < 0x30d00a1: + _tmpf = tempfile.TemporaryFile('w') # emits OSError 9 in python 3.13 +else: + _tmpf = tempfile.NamedTemporaryFile('w').file # for > python 3.9 + +# objects used by dill for type declaration +registered = d = {} +# objects dill fails to pickle +failures = x = {} +# all other type objects +succeeds = a = {} + +# types module (part of CH 8) +a['BooleanType'] = bool(1) +a['BuiltinFunctionType'] = len +a['BuiltinMethodType'] = a['BuiltinFunctionType'] +a['BytesType'] = _bytes = codecs.latin_1_encode('\x00')[0] # bytes(1) +a['ClassType'] = _class +a['ComplexType'] = complex(1) +a['DictType'] = _dict = {} +a['DictionaryType'] = a['DictType'] +a['FloatType'] = float(1) +a['FunctionType'] = _function +a['InstanceType'] = _instance = _class() +a['IntType'] = _int = int(1) +a['ListType'] = _list = [] +a['NoneType'] = None +a['ObjectType'] = object() +a['StringType'] = _str = str(1) +a['TupleType'] = _tuple = () +a['TypeType'] = type +a['LongType'] = _int +a['UnicodeType'] = _str +# built-in constants (CH 4) +a['CopyrightType'] = copyright +# built-in types (CH 5) +a['ClassObjectType'] = _newclass # +a['ClassInstanceType'] = _newclass() # +a['SetType'] = _set = set() +a['FrozenSetType'] = frozenset() +# built-in exceptions (CH 6) +a['ExceptionType'] = _exception = _function2()[0] +# string services (CH 7) +a['SREPatternType'] = _srepattern = re.compile('') +# data types (CH 8) +a['ArrayType'] = array.array("f") +a['DequeType'] = collections.deque([0]) +a['DefaultDictType'] = collections.defaultdict(_function, _dict) +a['TZInfoType'] = datetime.tzinfo() +a['DateTimeType'] = datetime.datetime.today() +a['CalendarType'] = calendar.Calendar() +# numeric and mathematical types (CH 9) +a['DecimalType'] = decimal.Decimal(1) +# data compression and archiving (CH 12) +a['TarInfoType'] = tarfile.TarInfo() +# generic operating system services (CH 15) +a['LoggerType'] = _logger = logging.getLogger() +a['FormatterType'] = logging.Formatter() # pickle ok +a['FilterType'] = logging.Filter() # pickle ok +a['LogRecordType'] = logging.makeLogRecord(_dict) # pickle ok +a['OptionParserType'] = _oparser = optparse.OptionParser() # pickle ok +a['OptionGroupType'] = optparse.OptionGroup(_oparser,"foo") # pickle ok +a['OptionType'] = optparse.Option('--foo') # pickle ok +if HAS_CTYPES: + z = x if (IS_PYPY and sys.hexversion < 0x30b0df0) else a + z['CCharType'] = _cchar = ctypes.c_char() + z['CWCharType'] = ctypes.c_wchar() # fail == 2.6 + z['CByteType'] = ctypes.c_byte() + z['CUByteType'] = ctypes.c_ubyte() + z['CShortType'] = ctypes.c_short() + z['CUShortType'] = ctypes.c_ushort() + z['CIntType'] = ctypes.c_int() + z['CUIntType'] = ctypes.c_uint() + z['CLongType'] = ctypes.c_long() + z['CULongType'] = ctypes.c_ulong() + z['CLongLongType'] = ctypes.c_longlong() + z['CULongLongType'] = ctypes.c_ulonglong() + z['CFloatType'] = ctypes.c_float() + z['CDoubleType'] = ctypes.c_double() + z['CSizeTType'] = ctypes.c_size_t() + del z + a['CLibraryLoaderType'] = ctypes.cdll + a['StructureType'] = _Struct + # if not IS_PYPY: + # a['BigEndianStructureType'] = ctypes.BigEndianStructure() +#NOTE: also LittleEndianStructureType and UnionType... abstract classes +#NOTE: remember for ctypesobj.contents creates a new python object +#NOTE: ctypes.c_int._objects is memberdescriptor for object's __dict__ +#NOTE: base class of all ctypes data types is non-public _CData + +import fractions +import io +from io import StringIO as TextIO +# built-in functions (CH 2) +a['ByteArrayType'] = bytearray([1]) +# numeric and mathematical types (CH 9) +a['FractionType'] = fractions.Fraction() +a['NumberType'] = numbers.Number() +# generic operating system services (CH 15) +a['IOBaseType'] = io.IOBase() +a['RawIOBaseType'] = io.RawIOBase() +a['TextIOBaseType'] = io.TextIOBase() +a['BufferedIOBaseType'] = io.BufferedIOBase() +a['UnicodeIOType'] = TextIO() # the new StringIO +a['LoggerAdapterType'] = logging.LoggerAdapter(_logger,_dict) # pickle ok +if HAS_CTYPES: + z = x if (IS_PYPY and sys.hexversion < 0x30b0df0) else a + z['CBoolType'] = ctypes.c_bool(1) + z['CLongDoubleType'] = ctypes.c_longdouble() + del z +import argparse +# data types (CH 8) +a['OrderedDictType'] = collections.OrderedDict(_dict) +a['CounterType'] = collections.Counter(_dict) +if HAS_CTYPES: + z = x if (IS_PYPY and sys.hexversion < 0x30b0df0) else a + z['CSSizeTType'] = ctypes.c_ssize_t() + del z +# generic operating system services (CH 15) +a['NullHandlerType'] = logging.NullHandler() # pickle ok # new 2.7 +a['ArgParseFileType'] = argparse.FileType() # pickle ok + +# -- pickle fails on all below here ----------------------------------------- +# types module (part of CH 8) +a['CodeType'] = compile('','','exec') +a['DictProxyType'] = type.__dict__ +a['DictProxyType2'] = _newclass.__dict__ +a['EllipsisType'] = Ellipsis +a['ClosedFileType'] = open(os.devnull, 'wb', buffering=0).close() +a['GetSetDescriptorType'] = array.array.typecode +a['LambdaType'] = _lambda = lambda x: lambda y: x #XXX: works when not imported! +a['MemberDescriptorType'] = _newclass2.descriptor +if not IS_PYPY: + a['MemberDescriptorType2'] = datetime.timedelta.days +a['MethodType'] = _method = _class()._method #XXX: works when not imported! +a['ModuleType'] = datetime +a['NotImplementedType'] = NotImplemented +a['SliceType'] = slice(1) +a['UnboundMethodType'] = _class._method #XXX: works when not imported! +from dill._dill import get_file_type as openfile +d['TextWrapperType'] = openfile('r', buffering=-1) # same as mode='w','w+','r+' +if not IS_PYODIDE: + d['BufferedRandomType'] = openfile('r+b', buffering=-1) # same as mode='w+b' +d['BufferedReaderType'] = openfile('rb', buffering=-1) # (default: buffering=-1) +d['BufferedWriterType'] = openfile('wb', buffering=-1) +try: # oddities: deprecated + from _pyio import open as _open + d['PyTextWrapperType'] = openfile('r', buffering=-1, open=_open) + if not IS_PYODIDE: + d['PyBufferedRandomType'] = openfile('r+b', buffering=-1, open=_open) + d['PyBufferedReaderType'] = openfile('rb', buffering=-1, open=_open) + d['PyBufferedWriterType'] = openfile('wb', buffering=-1, open=_open) +except ImportError: + pass +del openfile +# other (concrete) object types +z = d if sys.hexversion < 0x30800a2 else a +z['CellType'] = (_lambda)(0).__closure__[0] +del z +a['XRangeType'] = _xrange = range(1) +a['MethodDescriptorType'] = type.__dict__['mro'] +a['WrapperDescriptorType'] = type.__repr__ +#a['WrapperDescriptorType2'] = type.__dict__['__module__']#XXX: GetSetDescriptor +a['ClassMethodDescriptorType'] = type.__dict__['__prepare__'] +# built-in functions (CH 2) +_methodwrap = (1).__lt__ +a['MethodWrapperType'] = _methodwrap +a['StaticMethodType'] = staticmethod(_method) +a['ClassMethodType'] = classmethod(_method) +a['PropertyType'] = property() +d['SuperType'] = super(Exception, _exception) +# string services (CH 7) +_in = _bytes +a['InputType'] = _cstrI = StringIO(_in) +a['OutputType'] = _cstrO = StringIO() +# data types (CH 8) +a['WeakKeyDictionaryType'] = weakref.WeakKeyDictionary() +a['WeakValueDictionaryType'] = weakref.WeakValueDictionary() +a['ReferenceType'] = weakref.ref(_instance) +a['DeadReferenceType'] = weakref.ref(_class()) +a['ProxyType'] = weakref.proxy(_instance) +a['DeadProxyType'] = weakref.proxy(_class()) +a['CallableProxyType'] = weakref.proxy(_instance2) +a['DeadCallableProxyType'] = weakref.proxy(_class2()) +a['QueueType'] = Queue.Queue() +# numeric and mathematical types (CH 9) +d['PartialType'] = functools.partial(int,base=2) +a['IzipType'] = zip('0','1') +d['ItemGetterType'] = operator.itemgetter(0) +d['AttrGetterType'] = operator.attrgetter('__repr__') +# file and directory access (CH 10) +_fileW = _cstrO +# data persistence (CH 11) +if HAS_ALL: + x['ConnectionType'] = _conn = sqlite3.connect(':memory:') + x['CursorType'] = _conn.cursor() +a['ShelveType'] = shelve.Shelf({}) +# data compression and archiving (CH 12) +if HAS_ALL: + x['BZ2FileType'] = bz2.BZ2File(os.devnull) + x['BZ2CompressorType'] = bz2.BZ2Compressor() + x['BZ2DecompressorType'] = bz2.BZ2Decompressor() +#x['ZipFileType'] = _zip = zipfile.ZipFile(os.devnull,'w') +#_zip.write(_tempfile,'x') [causes annoying warning/error printed on import] +#a['ZipInfoType'] = _zip.getinfo('x') +a['TarFileType'] = tarfile.open(fileobj=_fileW,mode='w') +# file formats (CH 13) +x['DialectType'] = csv.get_dialect('excel') +if sys.hexversion < 0x30d00a1: + import xdrlib + a['PackerType'] = xdrlib.Packer() +# optional operating system services (CH 16) +a['LockType'] = threading.Lock() +a['RLockType'] = threading.RLock() +# generic operating system services (CH 15) # also closed/open and r/w/etc... +a['NamedLoggerType'] = _logger = logging.getLogger(__name__) +#a['FrozenModuleType'] = __hello__ #FIXME: prints "Hello world..." +# interprocess communication (CH 17) +x['SocketType'] = _socket = socket.socket() +x['SocketPairType'] = socket.socketpair()[0] +# python runtime services (CH 27) +a['GeneratorContextManagerType'] = contextlib.contextmanager(max)([1]) +#a['ContextType'] = contextvars.Context() #XXX: ContextVar + +try: # ipython + __IPYTHON__ is True # is ipython +except NameError: + # built-in constants (CH 4) + a['QuitterType'] = quit + d['ExitType'] = a['QuitterType'] +try: # numpy #FIXME: slow... 0.05 to 0.1 sec to import numpy + from numpy import ufunc as _numpy_ufunc + from numpy import array as _numpy_array + from numpy import int32 as _numpy_int32 + a['NumpyUfuncType'] = _numpy_ufunc + a['NumpyArrayType'] = _numpy_array + a['NumpyInt32Type'] = _numpy_int32 +except ImportError: + pass +# generic operating system services (CH 15) +a['FileHandlerType'] = logging.FileHandler(os.devnull) +a['RotatingFileHandlerType'] = logging.handlers.RotatingFileHandler(os.devnull) +a['SocketHandlerType'] = logging.handlers.SocketHandler('localhost',514) +a['MemoryHandlerType'] = logging.handlers.MemoryHandler(1) +# data types (CH 8) +a['WeakSetType'] = weakref.WeakSet() # 2.7 +# generic operating system services (CH 15) [errors when dill is imported] +#a['ArgumentParserType'] = _parser = argparse.ArgumentParser('PROG') +#a['NamespaceType'] = _parser.parse_args() # pickle ok +#a['SubParsersActionType'] = _parser.add_subparsers() +#a['MutuallyExclusiveGroupType'] = _parser.add_mutually_exclusive_group() +#a['ArgumentGroupType'] = _parser.add_argument_group() + +# -- dill fails in some versions below here --------------------------------- +# types module (part of CH 8) +d['FileType'] = open(os.devnull, 'rb', buffering=0) # same 'wb','wb+','rb+' +# built-in functions (CH 2) +# Iterators: +a['ListIteratorType'] = iter(_list) # empty vs non-empty +a['SetIteratorType'] = iter(_set) #XXX: empty vs non-empty #FIXME: list_iterator +a['TupleIteratorType']= iter(_tuple) # empty vs non-empty +a['XRangeIteratorType'] = iter(_xrange) # empty vs non-empty +a["BytesIteratorType"] = iter(b'') +a["BytearrayIteratorType"] = iter(bytearray(b'')) +z = x if IS_PYPY else a +z["CallableIteratorType"] = iter(iter, None) +del z +x["MemoryIteratorType"] = iter(memoryview(b'')) +a["ListReverseiteratorType"] = reversed([]) +X = a['OrderedDictType'] +d["OdictKeysType"] = X.keys() +d["OdictValuesType"] = X.values() +d["OdictItemsType"] = X.items() +a["OdictIteratorType"] = iter(X.keys()) #FIXME: list_iterator +del X +#FIXME: list_iterator +a['DictionaryItemIteratorType'] = iter(type.__dict__.items()) +a['DictionaryKeyIteratorType'] = iter(type.__dict__.keys()) +a['DictionaryValueIteratorType'] = iter(type.__dict__.values()) +if sys.hexversion >= 0x30800a0: + a["DictReversekeyiteratorType"] = reversed({}.keys()) + a["DictReversevalueiteratorType"] = reversed({}.values()) + a["DictReverseitemiteratorType"] = reversed({}.items()) + +try: + import symtable + #FIXME: fails to pickle + x["SymtableEntryType"] = symtable.symtable("", "string", "exec")._table +except ImportError: + pass + +if sys.hexversion >= 0x30a00a0 and not IS_PYPY: + x['LineIteratorType'] = compile('3', '', 'eval').co_lines() + +if sys.hexversion >= 0x30b00b0 and not IS_PYPY: + from types import GenericAlias + d["GenericAliasIteratorType"] = iter(GenericAlias(list, (int,))) + x['PositionsIteratorType'] = compile('3', '', 'eval').co_positions() + +# data types (CH 8) +a['PrettyPrinterType'] = pprint.PrettyPrinter() +# file and directory access (CH 10) +a['TemporaryFileType'] = _tmpf +# data compression and archiving (CH 12) +x['GzipFileType'] = gzip.GzipFile(fileobj=_fileW) +# generic operating system services (CH 15) +a['StreamHandlerType'] = logging.StreamHandler() +# numeric and mathematical types (CH 9) +z = a if sys.hexversion < 0x30e00a1 else x +z['CountType'] = itertools.count(0) #FIXME: __reduce__ removed in 3.14.0a1 +z['ChainType'] = itertools.chain('0','1') +z['ProductType'] = itertools.product('0','1') +z['CycleType'] = itertools.cycle('0') +z['PermutationsType'] = itertools.permutations('0') +z['CombinationsType'] = itertools.combinations('0',1) +z['RepeatType'] = itertools.repeat(0) +z['CompressType'] = itertools.compress('0',[1]) +del z +#XXX: ...and etc + +# -- dill fails on all below here ------------------------------------------- +# types module (part of CH 8) +x['GeneratorType'] = _generator = _function(1) #XXX: priority +x['FrameType'] = _generator.gi_frame #XXX: inspect.currentframe() +x['TracebackType'] = _function2()[1] #(see: inspect.getouterframes,getframeinfo) +# other (concrete) object types +# (also: Capsule / CObject ?) +# built-in functions (CH 2) +# built-in types (CH 5) +# string services (CH 7) +x['StructType'] = struct.Struct('c') +x['CallableIteratorType'] = _srepattern.finditer('') +x['SREMatchType'] = _srepattern.match('') +x['SREScannerType'] = _srepattern.scanner('') +x['StreamReader'] = codecs.StreamReader(_cstrI) #XXX: ... and etc +# python object persistence (CH 11) +# x['DbShelveType'] = shelve.open('foo','n')#,protocol=2) #XXX: delete foo +if HAS_ALL: + z = a if IS_PYPY else x + z['DbmType'] = dbm.open(_tempfile,'n') + del z +# x['DbCursorType'] = _dbcursor = anydbm.open('foo','n') #XXX: delete foo +# x['DbType'] = _dbcursor.db +# data compression and archiving (CH 12) +x['ZlibCompressType'] = zlib.compressobj() +x['ZlibDecompressType'] = zlib.decompressobj() +# file formats (CH 13) +x['CSVReaderType'] = csv.reader(_cstrI) +x['CSVWriterType'] = csv.writer(_cstrO) +x['CSVDictReaderType'] = csv.DictReader(_cstrI) +x['CSVDictWriterType'] = csv.DictWriter(_cstrO,{}) +# cryptographic services (CH 14) +x['HashType'] = hashlib.md5() +if (sys.hexversion < 0x30800a1): + x['HMACType'] = hmac.new(_in) +else: + x['HMACType'] = hmac.new(_in, digestmod='md5') +# generic operating system services (CH 15) +if HAS_CURSES: pass + #x['CursesWindowType'] = _curwin = curses.initscr() #FIXME: messes up tty + #x['CursesTextPadType'] = textpad.Textbox(_curwin) + #x['CursesPanelType'] = panel.new_panel(_curwin) +if HAS_CTYPES: + x['CCharPType'] = ctypes.c_char_p() + x['CWCharPType'] = ctypes.c_wchar_p() + x['CVoidPType'] = ctypes.c_void_p() + if sys.platform[:3] == 'win': + x['CDLLType'] = _cdll = ctypes.cdll.msvcrt + else: + x['CDLLType'] = _cdll = ctypes.CDLL(None) + if not IS_PYPY: + x['PyDLLType'] = _pydll = ctypes.pythonapi + x['FuncPtrType'] = _cdll._FuncPtr() + x['CCharArrayType'] = ctypes.create_string_buffer(1) + x['CWCharArrayType'] = ctypes.create_unicode_buffer(1) + x['CParamType'] = ctypes.byref(_cchar) + x['LPCCharType'] = ctypes.pointer(_cchar) + x['LPCCharObjType'] = _lpchar = ctypes.POINTER(ctypes.c_char) + x['NullPtrType'] = _lpchar() + x['NullPyObjectType'] = ctypes.py_object() + x['PyObjectType'] = ctypes.py_object(lambda :None) + z = a if IS_PYPY else x + z['FieldType'] = _field = _Struct._field + z['CFUNCTYPEType'] = _cfunc = ctypes.CFUNCTYPE(ctypes.c_char) + if sys.hexversion < 0x30c00b3: + x['CFunctionType'] = _cfunc(str) + del z +# numeric and mathematical types (CH 9) +a['MethodCallerType'] = operator.methodcaller('mro') # 2.6 +# built-in types (CH 5) +x['MemoryType'] = memoryview(_in) # 2.7 +x['MemoryType2'] = memoryview(bytearray(_in)) # 2.7 +d['DictItemsType'] = _dict.items() # 2.7 +d['DictKeysType'] = _dict.keys() # 2.7 +d['DictValuesType'] = _dict.values() # 2.7 +# generic operating system services (CH 15) +a['RawTextHelpFormatterType'] = argparse.RawTextHelpFormatter('PROG') +a['RawDescriptionHelpFormatterType'] = argparse.RawDescriptionHelpFormatter('PROG') +a['ArgDefaultsHelpFormatterType'] = argparse.ArgumentDefaultsHelpFormatter('PROG') +z = a if IS_PYPY else x +z['CmpKeyType'] = _cmpkey = functools.cmp_to_key(_methodwrap) # 2.7, >=3.2 +z['CmpKeyObjType'] = _cmpkey('0') #2.7, >=3.2 +del z +# oddities: removed, etc +x['BufferType'] = x['MemoryType'] + +from dill._dill import _testcapsule +if _testcapsule is not None: + d['PyCapsuleType'] = _testcapsule +del _testcapsule + +if hasattr(dataclasses, '_HAS_DEFAULT_FACTORY'): + a['DataclassesHasDefaultFactoryType'] = dataclasses._HAS_DEFAULT_FACTORY + +if hasattr(dataclasses, 'MISSING'): + a['DataclassesMissingType'] = dataclasses.MISSING + +if hasattr(dataclasses, 'KW_ONLY'): + a['DataclassesKWOnlyType'] = dataclasses.KW_ONLY + +if hasattr(dataclasses, '_FIELD_BASE'): + a['DataclassesFieldBaseType'] = dataclasses._FIELD + +# -- cleanup ---------------------------------------------------------------- +a.update(d) # registered also succeed +if sys.platform[:3] == 'win': + os.close(_filedescrip) # required on win32 +os.remove(_tempfile) + + +# EOF diff --git a/local_packages/dill/_shims.py b/local_packages/dill/_shims.py new file mode 100644 index 0000000..a7a5bdf --- /dev/null +++ b/local_packages/dill/_shims.py @@ -0,0 +1,193 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Author: Anirudh Vegesana (avegesan@cs.stanford.edu) +# Copyright (c) 2021-2026 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE +""" +Provides shims for compatibility between versions of dill and Python. + +Compatibility shims should be provided in this file. Here are two simple example +use cases. + +Deprecation of constructor function: +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Assume that we were transitioning _import_module in _dill.py to +the builtin function importlib.import_module when present. + +@move_to(_dill) +def _import_module(import_name): + ... # code already in _dill.py + +_import_module = Getattr(importlib, 'import_module', Getattr(_dill, '_import_module', None)) + +The code will attempt to find import_module in the importlib module. If not +present, it will use the _import_module function in _dill. + +Emulate new Python behavior in older Python versions: +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +CellType.cell_contents behaves differently in Python 3.6 and 3.7. It is +read-only in Python 3.6 and writable and deletable in 3.7. + +if _dill.OLD37 and _dill.HAS_CTYPES and ...: + @move_to(_dill) + def _setattr(object, name, value): + if type(object) is _dill.CellType and name == 'cell_contents': + _PyCell_Set.argtypes = (ctypes.py_object, ctypes.py_object) + _PyCell_Set(object, value) + else: + setattr(object, name, value) +... # more cases below + +_setattr = Getattr(_dill, '_setattr', setattr) + +_dill._setattr will be used when present to emulate Python 3.7 functionality in +older versions of Python while defaulting to the standard setattr in 3.7+. + +See this PR for the discussion that lead to this system: +https://github.com/uqfoundation/dill/pull/443 +""" + +import inspect +import sys + +_dill = sys.modules['dill._dill'] + + +class Reduce(object): + """ + Reduce objects are wrappers used for compatibility enforcement during + unpickle-time. They should only be used in calls to pickler.save and + other Reduce objects. They are only evaluated within unpickler.load. + + Pickling a Reduce object makes the two implementations equivalent: + + pickler.save(Reduce(*reduction)) + + pickler.save_reduce(*reduction, obj=reduction) + """ + __slots__ = ['reduction'] + def __new__(cls, *reduction, **kwargs): + """ + Args: + *reduction: a tuple that matches the format given here: + https://docs.python.org/3/library/pickle.html#object.__reduce__ + is_callable: a bool to indicate that the object created by + unpickling `reduction` is callable. If true, the current Reduce + is allowed to be used as the function in further save_reduce calls + or Reduce objects. + """ + is_callable = kwargs.get('is_callable', False) # Pleases Py2. Can be removed later + if is_callable: + self = object.__new__(_CallableReduce) + else: + self = object.__new__(Reduce) + self.reduction = reduction + return self + def __repr__(self): + return 'Reduce%s' % (self.reduction,) + def __copy__(self): + return self # pragma: no cover + def __deepcopy__(self, memo): + return self # pragma: no cover + def __reduce__(self): + return self.reduction + def __reduce_ex__(self, protocol): + return self.__reduce__() + +class _CallableReduce(Reduce): + # A version of Reduce for functions. Used to trick pickler.save_reduce into + # thinking that Reduce objects of functions are themselves meaningful functions. + def __call__(self, *args, **kwargs): + reduction = self.__reduce__() + func = reduction[0] + f_args = reduction[1] + obj = func(*f_args) + return obj(*args, **kwargs) + +__NO_DEFAULT = _dill.Sentinel('Getattr.NO_DEFAULT') + +def Getattr(object, name, default=__NO_DEFAULT): + """ + A Reduce object that represents the getattr operation. When unpickled, the + Getattr will access an attribute 'name' of 'object' and return the value + stored there. If the attribute doesn't exist, the default value will be + returned if present. + + The following statements are equivalent: + + Getattr(collections, 'OrderedDict') + Getattr(collections, 'spam', None) + Getattr(*args) + + Reduce(getattr, (collections, 'OrderedDict')) + Reduce(getattr, (collections, 'spam', None)) + Reduce(getattr, args) + + During unpickling, the first two will result in collections.OrderedDict and + None respectively because the first attribute exists and the second one does + not, forcing it to use the default value given in the third argument. + """ + + if default is Getattr.NO_DEFAULT: + reduction = (getattr, (object, name)) + else: + reduction = (getattr, (object, name, default)) + + return Reduce(*reduction, is_callable=callable(default)) + +Getattr.NO_DEFAULT = __NO_DEFAULT +del __NO_DEFAULT + +def move_to(module, name=None): + def decorator(func): + if name is None: + fname = func.__name__ + else: + fname = name + module.__dict__[fname] = func + func.__module__ = module.__name__ + return func + return decorator + +def register_shim(name, default): + """ + A easier to understand and more compact way of "softly" defining a function. + These two pieces of code are equivalent: + + if _dill.OLD3X: + def _create_class(): + ... + _create_class = register_shim('_create_class', types.new_class) + + if _dill.OLD3X: + @move_to(_dill) + def _create_class(): + ... + _create_class = Getattr(_dill, '_create_class', types.new_class) + + Intuitively, it creates a function or object in the versions of dill/python + that require special reimplementations, and use a core library or default + implementation if that function or object does not exist. + """ + func = globals().get(name) + if func is not None: + _dill.__dict__[name] = func + func.__module__ = _dill.__name__ + + if default is Getattr.NO_DEFAULT: + reduction = (getattr, (_dill, name)) + else: + reduction = (getattr, (_dill, name, default)) + + return Reduce(*reduction, is_callable=callable(default)) + +###################### +## Compatibility Shims are defined below +###################### + +_CELL_EMPTY = register_shim('_CELL_EMPTY', None) + +_setattr = register_shim('_setattr', setattr) +_delattr = register_shim('_delattr', delattr) diff --git a/local_packages/dill/detect.py b/local_packages/dill/detect.py new file mode 100644 index 0000000..cdd4287 --- /dev/null +++ b/local_packages/dill/detect.py @@ -0,0 +1,287 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2008-2016 California Institute of Technology. +# Copyright (c) 2016-2026 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE +""" +Methods for detecting objects leading to pickling failures. +""" + +import dis +from inspect import ismethod, isfunction, istraceback, isframe, iscode + +from .pointers import parent, reference, at, parents, children +from .logger import trace + +__all__ = ['baditems','badobjects','badtypes','code','errors','freevars', + 'getmodule','globalvars','nestedcode','nestedglobals','outermost', + 'referredglobals','referrednested','trace','varnames'] + +def getmodule(object, _filename=None, force=False): + """get the module of the object""" + from inspect import getmodule as getmod + module = getmod(object, _filename) + if module or not force: return module + import builtins + from .source import getname + name = getname(object, force=True) + return builtins if name in vars(builtins).keys() else None + +def outermost(func): # is analogous to getsource(func,enclosing=True) + """get outermost enclosing object (i.e. the outer function in a closure) + + NOTE: this is the object-equivalent of getsource(func, enclosing=True) + """ + if ismethod(func): + _globals = func.__func__.__globals__ or {} + elif isfunction(func): + _globals = func.__globals__ or {} + else: + return #XXX: or raise? no matches + _globals = _globals.items() + # get the enclosing source + from .source import getsourcelines + try: lines,lnum = getsourcelines(func, enclosing=True) + except Exception: #TypeError, IOError + lines,lnum = [],None + code = ''.join(lines) + # get all possible names,objects that are named in the enclosing source + _locals = ((name,obj) for (name,obj) in _globals if name in code) + # now only save the objects that generate the enclosing block + for name,obj in _locals: #XXX: don't really need 'name' + try: + if getsourcelines(obj) == (lines,lnum): return obj + except Exception: #TypeError, IOError + pass + return #XXX: or raise? no matches + +def nestedcode(func, recurse=True): #XXX: or return dict of {co_name: co} ? + """get the code objects for any nested functions (e.g. in a closure)""" + func = code(func) + if not iscode(func): return [] #XXX: or raise? no matches + nested = set() + for co in func.co_consts: + if co is None: continue + co = code(co) + if co: + nested.add(co) + if recurse: nested |= set(nestedcode(co, recurse=True)) + return list(nested) + +def code(func): + """get the code object for the given function or method + + NOTE: use dill.source.getsource(CODEOBJ) to get the source code + """ + if ismethod(func): func = func.__func__ + if isfunction(func): func = func.__code__ + if istraceback(func): func = func.tb_frame + if isframe(func): func = func.f_code + if iscode(func): return func + return + +#XXX: ugly: parse dis.dis for name after " len(referrednested(func)), try calling func(). + If possible, python builds code objects, but delays building functions + until func() is called. + """ + import gc + funcs = set() + # get the code objects, and try to track down by referrence + for co in nestedcode(func, recurse): + # look for function objects that refer to the code object + for obj in gc.get_referrers(co): + # get methods + _ = getattr(obj, '__func__', None) # ismethod + if getattr(_, '__code__', None) is co: funcs.add(obj) + # get functions + elif getattr(obj, '__code__', None) is co: funcs.add(obj) + # get frame objects + elif getattr(obj, 'f_code', None) is co: funcs.add(obj) + # get code objects + elif hasattr(obj, 'co_code') and obj is co: funcs.add(obj) +# frameobjs => func.__code__.co_varnames not in func.__code__.co_cellvars +# funcobjs => func.__code__.co_cellvars not in func.__code__.co_varnames +# frameobjs are not found, however funcobjs are... +# (see: test_mixins.quad ... and test_mixins.wtf) +# after execution, code objects get compiled, and then may be found by gc + return list(funcs) + + +def freevars(func): + """get objects defined in enclosing code that are referred to by func + + returns a dict of {name:object}""" + if ismethod(func): func = func.__func__ + if isfunction(func): + closures = func.__closure__ or () + func = func.__code__.co_freevars # get freevars + else: + return {} + + def get_cell_contents(): + for name, c in zip(func, closures): + try: + cell_contents = c.cell_contents + except ValueError: # cell is empty + continue + yield name, c.cell_contents + + return dict(get_cell_contents()) + +# thanks to Davies Liu for recursion of globals +def nestedglobals(func, recurse=True): + """get the names of any globals found within func""" + func = code(func) + if func is None: return list() + import sys + from .temp import capture + CAN_NULL = sys.hexversion >= 0x30b00a7 # NULL may be prepended >= 3.11a7 + names = set() + with capture('stdout') as out: + try: + dis.dis(func) #XXX: dis.dis(None) disassembles last traceback + except IndexError: + pass #FIXME: HACK for IS_PYPY (3.11) + for line in out.getvalue().splitlines(): + if '_GLOBAL' in line: + name = line.split('(')[-1].split(')')[0] + if CAN_NULL: + names.add(name.replace('NULL + ', '').replace(' + NULL', '')) + else: + names.add(name) + for co in getattr(func, 'co_consts', tuple()): + if co and recurse and iscode(co): + names.update(nestedglobals(co, recurse=True)) + return list(names) + +def referredglobals(func, recurse=True, builtin=False): + """get the names of objects in the global scope referred to by func""" + return globalvars(func, recurse, builtin).keys() + +def globalvars(func, recurse=True, builtin=False): + """get objects defined in global scope that are referred to by func + + return a dict of {name:object}""" + if ismethod(func): func = func.__func__ + if isfunction(func): + globs = vars(getmodule(sum)).copy() if builtin else {} + # get references from within closure + orig_func, func = func, set() + for obj in orig_func.__closure__ or {}: + try: + cell_contents = obj.cell_contents + except ValueError: # cell is empty + pass + else: + _vars = globalvars(cell_contents, recurse, builtin) or {} + func.update(_vars) #XXX: (above) be wary of infinte recursion? + globs.update(_vars) + # get globals + globs.update(orig_func.__globals__ or {}) + # get names of references + if not recurse: + func.update(orig_func.__code__.co_names) + else: + func.update(nestedglobals(orig_func.__code__)) + # find globals for all entries of func + for key in func.copy(): #XXX: unnecessary...? + nested_func = globs.get(key) + if nested_func is orig_func: + #func.remove(key) if key in func else None + continue #XXX: globalvars(func, False)? + func.update(globalvars(nested_func, True, builtin)) + elif iscode(func): + globs = vars(getmodule(sum)).copy() if builtin else {} + #globs.update(globals()) + if not recurse: + func = func.co_names # get names + else: + orig_func = func.co_name # to stop infinite recursion + func = set(nestedglobals(func)) + # find globals for all entries of func + for key in func.copy(): #XXX: unnecessary...? + if key is orig_func: + #func.remove(key) if key in func else None + continue #XXX: globalvars(func, False)? + nested_func = globs.get(key) + func.update(globalvars(nested_func, True, builtin)) + else: + return {} + #NOTE: if name not in __globals__, then we skip it... + return dict((name,globs[name]) for name in func if name in globs) + + +def varnames(func): + """get names of variables defined by func + + returns a tuple (local vars, local vars referrenced by nested functions)""" + func = code(func) + if not iscode(func): + return () #XXX: better ((),())? or None? + return func.co_varnames, func.co_cellvars + + +def baditems(obj, exact=False, safe=False): #XXX: obj=globals() ? + """get items in object that fail to pickle""" + if not hasattr(obj,'__iter__'): # is not iterable + return [j for j in (badobjects(obj,0,exact,safe),) if j is not None] + obj = obj.values() if getattr(obj,'values',None) else obj + _obj = [] # can't use a set, as items may be unhashable + [_obj.append(badobjects(i,0,exact,safe)) for i in obj if i not in _obj] + return [j for j in _obj if j is not None] + + +def badobjects(obj, depth=0, exact=False, safe=False): + """get objects that fail to pickle""" + from dill import pickles + if not depth: + if pickles(obj,exact,safe): return None + return obj + return dict(((attr, badobjects(getattr(obj,attr),depth-1,exact,safe)) \ + for attr in dir(obj) if not pickles(getattr(obj,attr),exact,safe))) + +def badtypes(obj, depth=0, exact=False, safe=False): + """get types for objects that fail to pickle""" + from dill import pickles + if not depth: + if pickles(obj,exact,safe): return None + return type(obj) + return dict(((attr, badtypes(getattr(obj,attr),depth-1,exact,safe)) \ + for attr in dir(obj) if not pickles(getattr(obj,attr),exact,safe))) + +def errors(obj, depth=0, exact=False, safe=False): + """get errors for objects that fail to pickle""" + from dill import pickles, copy + if not depth: + try: + pik = copy(obj) + if exact: + assert pik == obj, \ + "Unpickling produces %s instead of %s" % (pik,obj) + assert type(pik) == type(obj), \ + "Unpickling produces %s instead of %s" % (type(pik),type(obj)) + return None + except Exception: + import sys + return sys.exc_info()[1] + _dict = {} + for attr in dir(obj): + try: + _attr = getattr(obj,attr) + except Exception: + import sys + _dict[attr] = sys.exc_info()[1] + continue + if not pickles(_attr,exact,safe): + _dict[attr] = errors(_attr,depth-1,exact,safe) + return _dict + + +# EOF diff --git a/local_packages/dill/logger.py b/local_packages/dill/logger.py new file mode 100644 index 0000000..a330112 --- /dev/null +++ b/local_packages/dill/logger.py @@ -0,0 +1,285 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Author: Leonardo Gama (@leogama) +# Copyright (c) 2022-2026 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE +""" +Logging utilities for dill. + +The 'logger' object is dill's top-level logger. + +The 'adapter' object wraps the logger and implements a 'trace()' method that +generates a detailed tree-style trace for the pickling call at log level INFO. + +The 'trace()' function sets and resets dill's logger log level, enabling and +disabling the pickling trace. + +The trace shows a tree structure depicting the depth of each object serialized +*with dill save functions*, but not the ones that use save functions from +'pickle._Pickler.dispatch'. If the information is available, it also displays +the size in bytes that the object contributed to the pickle stream (including +its child objects). Sample trace output: + + >>> import dill, dill.tests + >>> dill.detect.trace(True) + >>> dill.dump_session(main=dill.tests) + ┬ M1: + ├┬ F2: + │└ # F2 [32 B] + ├┬ D2: + │├┬ T4: + ││└ # T4 [35 B] + │├┬ D2: + ││├┬ T4: + │││└ # T4 [50 B] + ││├┬ D2: + │││└ # D2 [84 B] + ││└ # D2 [413 B] + │└ # D2 [763 B] + └ # M1 [813 B] +""" + +__all__ = ['adapter', 'logger', 'trace'] + +import codecs +import contextlib +import locale +import logging +import math +import os +from functools import partial +from typing import TextIO, Union + +import dill + +# Tree drawing characters: Unicode to ASCII map. +ASCII_MAP = str.maketrans({"│": "|", "├": "|", "┬": "+", "└": "`"}) + +## Notes about the design choices ## + +# Here is some domumentation of the Standard Library's logging internals that +# can't be found completely in the official documentation. dill's logger is +# obtained by calling logging.getLogger('dill') and therefore is an instance of +# logging.getLoggerClass() at the call time. As this is controlled by the user, +# in order to add some functionality to it it's necessary to use a LoggerAdapter +# to wrap it, overriding some of the adapter's methods and creating new ones. +# +# Basic calling sequence +# ====================== +# +# Python's logging functionality can be conceptually divided into five steps: +# 0. Check logging level -> abort if call level is greater than logger level +# 1. Gather information -> construct a LogRecord from passed arguments and context +# 2. Filter (optional) -> discard message if the record matches a filter +# 3. Format -> format message with args, then format output string with message plus record +# 4. Handle -> write the formatted string to output as defined in the handler +# +# dill.logging.logger.log -> # or logger.info, etc. +# Logger.log -> \ +# Logger._log -> }- accept 'extra' parameter for custom record entries +# Logger.makeRecord -> / +# LogRecord.__init__ +# Logger.handle -> +# Logger.callHandlers -> +# Handler.handle -> +# Filterer.filter -> +# Filter.filter +# StreamHandler.emit -> +# Handler.format -> +# Formatter.format -> +# LogRecord.getMessage # does: record.message = msg % args +# Formatter.formatMessage -> +# PercentStyle.format # does: self._fmt % vars(record) +# +# NOTE: All methods from the second line on are from logging.__init__.py + +class TraceAdapter(logging.LoggerAdapter): + """ + Tracks object tree depth and calculates pickled object size. + + A single instance of this wraps the module's logger, as the logging API + doesn't allow setting it directly with a custom Logger subclass. The added + 'trace()' method receives a pickle instance as the first argument and + creates extra values to be added in the LogRecord from it, then calls + 'info()'. + + Usage of logger with 'trace()' method: + + >>> from dill.logger import adapter as logger #NOTE: not dill.logger.logger + >>> ... + >>> def save_atype(pickler, obj): + >>> logger.trace(pickler, "Message with %s and %r etc. placeholders", 'text', obj) + >>> ... + """ + def __init__(self, logger): + self.logger = logger + def addHandler(self, handler): + formatter = TraceFormatter("%(prefix)s%(message)s%(suffix)s", handler=handler) + handler.setFormatter(formatter) + self.logger.addHandler(handler) + def removeHandler(self, handler): + self.logger.removeHandler(handler) + def process(self, msg, kwargs): + # A no-op override, as we don't have self.extra. + return msg, kwargs + def trace_setup(self, pickler): + # Called by Pickler.dump(). + if not dill._dill.is_dill(pickler, child=False): + return + if self.isEnabledFor(logging.INFO): + pickler._trace_depth = 1 + pickler._size_stack = [] + else: + pickler._trace_depth = None + def trace(self, pickler, msg, *args, **kwargs): + if not hasattr(pickler, '_trace_depth'): + logger.info(msg, *args, **kwargs) + return + if pickler._trace_depth is None: + return + extra = kwargs.get('extra', {}) + pushed_obj = msg.startswith('#') + size = None + try: + # Streams are not required to be tellable. + size = pickler._file.tell() + frame = pickler.framer.current_frame + try: + size += frame.tell() + except AttributeError: + # PyPy may use a BytesBuilder as frame + size += len(frame) + except (AttributeError, TypeError): + pass + if size is not None: + if not pushed_obj: + pickler._size_stack.append(size) + else: + size -= pickler._size_stack.pop() + extra['size'] = size + if pushed_obj: + pickler._trace_depth -= 1 + extra['depth'] = pickler._trace_depth + kwargs['extra'] = extra + self.info(msg, *args, **kwargs) + if not pushed_obj: + pickler._trace_depth += 1 + +class TraceFormatter(logging.Formatter): + """ + Generates message prefix and suffix from record. + + This Formatter adds prefix and suffix strings to the log message in trace + mode (an also provides empty string defaults for normal logs). + """ + def __init__(self, *args, handler=None, **kwargs): + super().__init__(*args, **kwargs) + try: + encoding = handler.stream.encoding + if encoding is None: + raise AttributeError + except AttributeError: + encoding = locale.getpreferredencoding() + try: + encoding = codecs.lookup(encoding).name + except LookupError: + self.is_utf8 = False + else: + self.is_utf8 = (encoding == codecs.lookup('utf-8').name) + def format(self, record): + fields = {'prefix': "", 'suffix': ""} + if getattr(record, 'depth', 0) > 0: + if record.msg.startswith("#"): + prefix = (record.depth - 1)*"│" + "└" + elif record.depth == 1: + prefix = "┬" + else: + prefix = (record.depth - 2)*"│" + "├┬" + if not self.is_utf8: + prefix = prefix.translate(ASCII_MAP) + "-" + fields['prefix'] = prefix + " " + if hasattr(record, 'size') and record.size is not None and record.size >= 1: + # Show object size in human-readable form. + power = int(math.log(record.size, 2)) // 10 + size = record.size >> power*10 + fields['suffix'] = " [%d %sB]" % (size, "KMGTP"[power] + "i" if power else "") + vars(record).update(fields) + return super().format(record) + +logger = logging.getLogger('dill') +logger.propagate = False +adapter = TraceAdapter(logger) +stderr_handler = logging._StderrHandler() +adapter.addHandler(stderr_handler) + +def trace(arg: Union[bool, TextIO, str, os.PathLike] = None, *, mode: str = 'a') -> None: + """print a trace through the stack when pickling; useful for debugging + + With a single boolean argument, enable or disable the tracing. + + Example usage: + + >>> import dill + >>> dill.detect.trace(True) + >>> dill.dump_session() + + Alternatively, ``trace()`` can be used as a context manager. With no + arguments, it just takes care of restoring the tracing state on exit. + Either a file handle, or a file name and (optionally) a file mode may be + specitfied to redirect the tracing output in the ``with`` block context. A + log function is yielded by the manager so the user can write extra + information to the file. + + Example usage: + + >>> from dill import detect + >>> D = {'a': 42, 'b': {'x': None}} + >>> with detect.trace(): + >>> dumps(D) + ┬ D2: + ├┬ D2: + │└ # D2 [8 B] + └ # D2 [22 B] + >>> squared = lambda x: x**2 + >>> with detect.trace('output.txt', mode='w') as log: + >>> log("> D = %r", D) + >>> dumps(D) + >>> log("> squared = %r", squared) + >>> dumps(squared) + + Arguments: + arg: a boolean value, or an optional file-like or path-like object for the context manager + mode: mode string for ``open()`` if a file name is passed as the first argument + """ + if repr(arg) not in ('False', 'True'): + return TraceManager(file=arg, mode=mode) + logger.setLevel(logging.INFO if arg else logging.WARNING) + +class TraceManager(contextlib.AbstractContextManager): + """context manager version of trace(); can redirect the trace to a file""" + def __init__(self, file, mode): + self.file = file + self.mode = mode + self.redirect = file is not None + self.file_is_stream = hasattr(file, 'write') + def __enter__(self): + if self.redirect: + stderr_handler.flush() + if self.file_is_stream: + self.handler = logging.StreamHandler(self.file) + else: + self.handler = logging.FileHandler(self.file, self.mode) + adapter.removeHandler(stderr_handler) + adapter.addHandler(self.handler) + self.old_level = adapter.getEffectiveLevel() + adapter.setLevel(logging.INFO) + return adapter.info + def __exit__(self, *exc_info): + adapter.setLevel(self.old_level) + if self.redirect: + adapter.removeHandler(self.handler) + adapter.addHandler(stderr_handler) + if not self.file_is_stream: + self.handler.close() diff --git a/local_packages/dill/objtypes.py b/local_packages/dill/objtypes.py new file mode 100644 index 0000000..bf88a47 --- /dev/null +++ b/local_packages/dill/objtypes.py @@ -0,0 +1,24 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2008-2016 California Institute of Technology. +# Copyright (c) 2016-2026 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE +""" +all Python Standard Library object types (currently: CH 1-15 @ 2.7) +and some other common object types (i.e. numpy.ndarray) + +to load more objects and types, use dill.load_types() +""" + +# non-local import of dill.objects +from dill import objects +for _type in objects.keys(): + exec("%s = type(objects['%s'])" % (_type,_type)) + +del objects +try: + del _type +except NameError: + pass diff --git a/local_packages/dill/pointers.py b/local_packages/dill/pointers.py new file mode 100644 index 0000000..d27745f --- /dev/null +++ b/local_packages/dill/pointers.py @@ -0,0 +1,122 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2008-2016 California Institute of Technology. +# Copyright (c) 2016-2026 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE + +__all__ = ['parent', 'reference', 'at', 'parents', 'children'] + +import gc +import sys + +from ._dill import _proxy_helper as reference +from ._dill import _locate_object as at + +def parent(obj, objtype, ignore=()): + """ +>>> listiter = iter([4,5,6,7]) +>>> obj = parent(listiter, list) +>>> obj == [4,5,6,7] # actually 'is', but don't have handle any longer +True + +NOTE: objtype can be a single type (e.g. int or list) or a tuple of types. + +WARNING: if obj is a sequence (e.g. list), may produce unexpected results. +Parent finds *one* parent (e.g. the last member of the sequence). + """ + depth = 1 #XXX: always looking for the parent (only, right?) + chain = parents(obj, objtype, depth, ignore) + parent = chain.pop() + if parent is obj: + return None + return parent + + +def parents(obj, objtype, depth=1, ignore=()): #XXX: objtype=object ? + """Find the chain of referents for obj. Chain will end with obj. + + objtype: an object type or tuple of types to search for + depth: search depth (e.g. depth=2 is 'grandparents') + ignore: an object or tuple of objects to ignore in the search + """ + edge_func = gc.get_referents # looking for refs, not back_refs + predicate = lambda x: isinstance(x, objtype) # looking for parent type + #if objtype is None: predicate = lambda x: True #XXX: in obj.mro() ? + ignore = (ignore,) if not hasattr(ignore, '__len__') else ignore + ignore = (id(obj) for obj in ignore) + chain = find_chain(obj, predicate, edge_func, depth)[::-1] + #XXX: should pop off obj... ? + return chain + + +def children(obj, objtype, depth=1, ignore=()): #XXX: objtype=object ? + """Find the chain of referrers for obj. Chain will start with obj. + + objtype: an object type or tuple of types to search for + depth: search depth (e.g. depth=2 is 'grandchildren') + ignore: an object or tuple of objects to ignore in the search + + NOTE: a common thing to ignore is all globals, 'ignore=(globals(),)' + + NOTE: repeated calls may yield different results, as python stores + the last value in the special variable '_'; thus, it is often good + to execute something to replace '_' (e.g. >>> 1+1). + """ + edge_func = gc.get_referrers # looking for back_refs, not refs + predicate = lambda x: isinstance(x, objtype) # looking for child type + #if objtype is None: predicate = lambda x: True #XXX: in obj.mro() ? + ignore = (ignore,) if not hasattr(ignore, '__len__') else ignore + ignore = (id(obj) for obj in ignore) + chain = find_chain(obj, predicate, edge_func, depth, ignore) + #XXX: should pop off obj... ? + return chain + + +# more generic helper function (cut-n-paste from objgraph) +# Source at http://mg.pov.lt/objgraph/ +# Copyright (c) 2008-2010 Marius Gedminas +# Copyright (c) 2010 Stefano Rivera +# Released under the MIT licence (see objgraph/objgrah.py) + +def find_chain(obj, predicate, edge_func, max_depth=20, extra_ignore=()): + queue = [obj] + depth = {id(obj): 0} + parent = {id(obj): None} + ignore = set(extra_ignore) + ignore.add(id(extra_ignore)) + ignore.add(id(queue)) + ignore.add(id(depth)) + ignore.add(id(parent)) + ignore.add(id(ignore)) + ignore.add(id(sys._getframe())) # this function + ignore.add(id(sys._getframe(1))) # find_chain/find_backref_chain, likely + gc.collect() + while queue: + target = queue.pop(0) + if predicate(target): + chain = [target] + while parent[id(target)] is not None: + target = parent[id(target)] + chain.append(target) + return chain + tdepth = depth[id(target)] + if tdepth < max_depth: + referrers = edge_func(target) + ignore.add(id(referrers)) + for source in referrers: + if id(source) in ignore: + continue + if id(source) not in depth: + depth[id(source)] = tdepth + 1 + parent[id(source)] = target + queue.append(source) + return [obj] # not found + + +# backward compatibility +refobject = at + + +# EOF diff --git a/local_packages/dill/session.py b/local_packages/dill/session.py new file mode 100644 index 0000000..b5dd2ff --- /dev/null +++ b/local_packages/dill/session.py @@ -0,0 +1,612 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Author: Leonardo Gama (@leogama) +# Copyright (c) 2008-2015 California Institute of Technology. +# Copyright (c) 2016-2026 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE +""" +Pickle and restore the intepreter session. +""" + +__all__ = [ + 'dump_module', 'load_module', 'load_module_asdict', + 'dump_session', 'load_session' # backward compatibility +] + +import re +import os +import sys +import warnings +import pathlib +import tempfile + +TEMPDIR = pathlib.PurePath(tempfile.gettempdir()) + +# Type hints. +from typing import Optional, Union + +from dill import _dill, Pickler, Unpickler +from ._dill import ( + BuiltinMethodType, FunctionType, MethodType, ModuleType, TypeType, + _import_module, _is_builtin_module, _is_imported_module, _main_module, + _reverse_typemap, __builtin__, UnpicklingError, +) + +def _module_map(): + """get map of imported modules""" + from collections import defaultdict + from types import SimpleNamespace + modmap = SimpleNamespace( + by_name=defaultdict(list), + by_id=defaultdict(list), + top_level={}, + ) + for modname, module in sys.modules.items(): + if modname in ('__main__', '__mp_main__') or not isinstance(module, ModuleType): + continue + if '.' not in modname: + modmap.top_level[id(module)] = modname + for objname, modobj in module.__dict__.items(): + modmap.by_name[objname].append((modobj, modname)) + modmap.by_id[id(modobj)].append((modobj, objname, modname)) + return modmap + +IMPORTED_AS_TYPES = (ModuleType, TypeType, FunctionType, MethodType, BuiltinMethodType) +if 'PyCapsuleType' in _reverse_typemap: + IMPORTED_AS_TYPES += (_reverse_typemap['PyCapsuleType'],) +IMPORTED_AS_MODULES = ('ctypes', 'typing', 'subprocess', 'threading', + r'concurrent\.futures(\.\w+)?', r'multiprocessing(\.\w+)?') +IMPORTED_AS_MODULES = tuple(re.compile(x) for x in IMPORTED_AS_MODULES) + +def _lookup_module(modmap, name, obj, main_module): + """lookup name or id of obj if module is imported""" + for modobj, modname in modmap.by_name[name]: + if modobj is obj and sys.modules[modname] is not main_module: + return modname, name + __module__ = getattr(obj, '__module__', None) + if isinstance(obj, IMPORTED_AS_TYPES) or (__module__ is not None + and any(regex.fullmatch(__module__) for regex in IMPORTED_AS_MODULES)): + for modobj, objname, modname in modmap.by_id[id(obj)]: + if sys.modules[modname] is not main_module: + return modname, objname + return None, None + +def _stash_modules(main_module): + modmap = _module_map() + newmod = ModuleType(main_module.__name__) + + imported = [] + imported_as = [] + imported_top_level = [] # keep separated for backward compatibility + original = {} + for name, obj in main_module.__dict__.items(): + if obj is main_module: + original[name] = newmod # self-reference + elif obj is main_module.__dict__: + original[name] = newmod.__dict__ + # Avoid incorrectly matching a singleton value in another package (ex.: __doc__). + elif any(obj is singleton for singleton in (None, False, True)) \ + or isinstance(obj, ModuleType) and _is_builtin_module(obj): # always saved by ref + original[name] = obj + else: + source_module, objname = _lookup_module(modmap, name, obj, main_module) + if source_module is not None: + if objname == name: + imported.append((source_module, name)) + else: + imported_as.append((source_module, objname, name)) + else: + try: + imported_top_level.append((modmap.top_level[id(obj)], name)) + except KeyError: + original[name] = obj + + if len(original) < len(main_module.__dict__): + newmod.__dict__.update(original) + newmod.__dill_imported = imported + newmod.__dill_imported_as = imported_as + newmod.__dill_imported_top_level = imported_top_level + if getattr(newmod, '__loader__', None) is None and _is_imported_module(main_module): + # Trick _is_imported_module() to force saving as an imported module. + newmod.__loader__ = True # will be discarded by save_module() + return newmod + else: + return main_module + +def _restore_modules(unpickler, main_module): + try: + for modname, name in main_module.__dict__.pop('__dill_imported'): + main_module.__dict__[name] = unpickler.find_class(modname, name) + for modname, objname, name in main_module.__dict__.pop('__dill_imported_as'): + main_module.__dict__[name] = unpickler.find_class(modname, objname) + for modname, name in main_module.__dict__.pop('__dill_imported_top_level'): + main_module.__dict__[name] = __import__(modname) + except KeyError: + pass + +#NOTE: 06/03/15 renamed main_module to main +def dump_module( + filename: Union[str, os.PathLike] = None, + module: Optional[Union[ModuleType, str]] = None, + refimported: bool = False, + **kwds +) -> None: + """Pickle the current state of :py:mod:`__main__` or another module to a file. + + Save the contents of :py:mod:`__main__` (e.g. from an interactive + interpreter session), an imported module, or a module-type object (e.g. + built with :py:class:`~types.ModuleType`), to a file. The pickled + module can then be restored with the function :py:func:`load_module`. + + Args: + filename: a path-like object or a writable stream. If `None` + (the default), write to a named file in a temporary directory. + module: a module object or the name of an importable module. If `None` + (the default), :py:mod:`__main__` is saved. + refimported: if `True`, all objects identified as having been imported + into the module's namespace are saved by reference. *Note:* this is + similar but independent from ``dill.settings[`byref`]``, as + ``refimported`` refers to virtually all imported objects, while + ``byref`` only affects select objects. + **kwds: extra keyword arguments passed to :py:class:`Pickler()`. + + Raises: + :py:exc:`PicklingError`: if pickling fails. + + Examples: + + - Save current interpreter session state: + + >>> import dill + >>> squared = lambda x: x*x + >>> dill.dump_module() # save state of __main__ to /tmp/session.pkl + + - Save the state of an imported/importable module: + + >>> import dill + >>> import pox + >>> pox.plus_one = lambda x: x+1 + >>> dill.dump_module('pox_session.pkl', module=pox) + + - Save the state of a non-importable, module-type object: + + >>> import dill + >>> from types import ModuleType + >>> foo = ModuleType('foo') + >>> foo.values = [1,2,3] + >>> import math + >>> foo.sin = math.sin + >>> dill.dump_module('foo_session.pkl', module=foo, refimported=True) + + - Restore the state of the saved modules: + + >>> import dill + >>> dill.load_module() + >>> squared(2) + 4 + >>> pox = dill.load_module('pox_session.pkl') + >>> pox.plus_one(1) + 2 + >>> foo = dill.load_module('foo_session.pkl') + >>> [foo.sin(x) for x in foo.values] + [0.8414709848078965, 0.9092974268256817, 0.1411200080598672] + + - Use `refimported` to save imported objects by reference: + + >>> import dill + >>> from html.entities import html5 + >>> type(html5), len(html5) + (dict, 2231) + >>> import io + >>> buf = io.BytesIO() + >>> dill.dump_module(buf) # saves __main__, with html5 saved by value + >>> len(buf.getvalue()) # pickle size in bytes + 71665 + >>> buf = io.BytesIO() + >>> dill.dump_module(buf, refimported=True) # html5 saved by reference + >>> len(buf.getvalue()) + 438 + + *Changed in version 0.3.6:* Function ``dump_session()`` was renamed to + ``dump_module()``. Parameters ``main`` and ``byref`` were renamed to + ``module`` and ``refimported``, respectively. + + Note: + Currently, ``dill.settings['byref']`` and ``dill.settings['recurse']`` + don't apply to this function. + """ + for old_par, par in [('main', 'module'), ('byref', 'refimported')]: + if old_par in kwds: + message = "The argument %r has been renamed %r" % (old_par, par) + if old_par == 'byref': + message += " to distinguish it from dill.settings['byref']" + warnings.warn(message + ".", PendingDeprecationWarning) + if locals()[par]: # the defaults are None and False + raise TypeError("both %r and %r arguments were used" % (par, old_par)) + refimported = kwds.pop('byref', refimported) + module = kwds.pop('main', module) + + from .settings import settings + protocol = settings['protocol'] + main = module + if main is None: + main = _main_module + elif isinstance(main, str): + main = _import_module(main) + if not isinstance(main, ModuleType): + raise TypeError("%r is not a module" % main) + if hasattr(filename, 'write'): + file = filename + else: + if filename is None: + filename = str(TEMPDIR/'session.pkl') + file = open(filename, 'wb') + try: + pickler = Pickler(file, protocol, **kwds) + pickler._original_main = main + if refimported: + main = _stash_modules(main) + pickler._main = main #FIXME: dill.settings are disabled + pickler._byref = False # disable pickling by name reference + pickler._recurse = False # disable pickling recursion for globals + pickler._session = True # is best indicator of when pickling a session + pickler._first_pass = True + pickler._main_modified = main is not pickler._original_main + pickler.dump(main) + finally: + if file is not filename: # if newly opened file + file.close() + return + +# Backward compatibility. +def dump_session(filename=None, main=None, byref=False, **kwds): + warnings.warn("dump_session() has been renamed dump_module()", PendingDeprecationWarning) + dump_module(filename, module=main, refimported=byref, **kwds) +dump_session.__doc__ = dump_module.__doc__ + +class _PeekableReader: + """lightweight stream wrapper that implements peek()""" + def __init__(self, stream): + self.stream = stream + def read(self, n): + return self.stream.read(n) + def readline(self): + return self.stream.readline() + def tell(self): + return self.stream.tell() + def close(self): + return self.stream.close() + def peek(self, n): + stream = self.stream + try: + if hasattr(stream, 'flush'): stream.flush() + position = stream.tell() + stream.seek(position) # assert seek() works before reading + chunk = stream.read(n) + stream.seek(position) + return chunk + except (AttributeError, OSError): + raise NotImplementedError("stream is not peekable: %r", stream) from None + +def _make_peekable(stream): + """return stream as an object with a peek() method""" + import io + if hasattr(stream, 'peek'): + return stream + if not (hasattr(stream, 'tell') and hasattr(stream, 'seek')): + try: + return io.BufferedReader(stream) + except Exception: + pass + return _PeekableReader(stream) + +def _identify_module(file, main=None): + """identify the name of the module stored in the given file-type object""" + from pickletools import genops + UNICODE = {'UNICODE', 'BINUNICODE', 'SHORT_BINUNICODE'} + found_import = False + try: + for opcode, arg, pos in genops(file.peek(256)): + if not found_import: + if opcode.name in ('GLOBAL', 'SHORT_BINUNICODE') and \ + arg.endswith('_import_module'): + found_import = True + else: + if opcode.name in UNICODE: + return arg + else: + raise UnpicklingError("reached STOP without finding main module") + except (NotImplementedError, ValueError) as error: + # ValueError occours when the end of the chunk is reached (without a STOP). + if isinstance(error, NotImplementedError) and main is not None: + # file is not peekable, but we have main. + return None + raise UnpicklingError("unable to identify main module") from error + +def load_module( + filename: Union[str, os.PathLike] = None, + module: Optional[Union[ModuleType, str]] = None, + **kwds +) -> Optional[ModuleType]: + """Update the selected module (default is :py:mod:`__main__`) with + the state saved at ``filename``. + + Restore a module to the state saved with :py:func:`dump_module`. The + saved module can be :py:mod:`__main__` (e.g. an interpreter session), + an imported module, or a module-type object (e.g. created with + :py:class:`~types.ModuleType`). + + When restoring the state of a non-importable module-type object, the + current instance of this module may be passed as the argument ``main``. + Otherwise, a new instance is created with :py:class:`~types.ModuleType` + and returned. + + Args: + filename: a path-like object or a readable stream. If `None` + (the default), read from a named file in a temporary directory. + module: a module object or the name of an importable module; + the module name and kind (i.e. imported or non-imported) must + match the name and kind of the module stored at ``filename``. + **kwds: extra keyword arguments passed to :py:class:`Unpickler()`. + + Raises: + :py:exc:`UnpicklingError`: if unpickling fails. + :py:exc:`ValueError`: if the argument ``main`` and module saved + at ``filename`` are incompatible. + + Returns: + A module object, if the saved module is not :py:mod:`__main__` or + a module instance wasn't provided with the argument ``main``. + + Examples: + + - Save the state of some modules: + + >>> import dill + >>> squared = lambda x: x*x + >>> dill.dump_module() # save state of __main__ to /tmp/session.pkl + >>> + >>> import pox # an imported module + >>> pox.plus_one = lambda x: x+1 + >>> dill.dump_module('pox_session.pkl', module=pox) + >>> + >>> from types import ModuleType + >>> foo = ModuleType('foo') # a module-type object + >>> foo.values = [1,2,3] + >>> import math + >>> foo.sin = math.sin + >>> dill.dump_module('foo_session.pkl', module=foo, refimported=True) + + - Restore the state of the interpreter: + + >>> import dill + >>> dill.load_module() # updates __main__ from /tmp/session.pkl + >>> squared(2) + 4 + + - Load the saved state of an importable module: + + >>> import dill + >>> pox = dill.load_module('pox_session.pkl') + >>> pox.plus_one(1) + 2 + >>> import sys + >>> pox in sys.modules.values() + True + + - Load the saved state of a non-importable module-type object: + + >>> import dill + >>> foo = dill.load_module('foo_session.pkl') + >>> [foo.sin(x) for x in foo.values] + [0.8414709848078965, 0.9092974268256817, 0.1411200080598672] + >>> import math + >>> foo.sin is math.sin # foo.sin was saved by reference + True + >>> import sys + >>> foo in sys.modules.values() + False + + - Update the state of a non-importable module-type object: + + >>> import dill + >>> from types import ModuleType + >>> foo = ModuleType('foo') + >>> foo.values = ['a','b'] + >>> foo.sin = lambda x: x*x + >>> dill.load_module('foo_session.pkl', module=foo) + >>> [foo.sin(x) for x in foo.values] + [0.8414709848078965, 0.9092974268256817, 0.1411200080598672] + + *Changed in version 0.3.6:* Function ``load_session()`` was renamed to + ``load_module()``. Parameter ``main`` was renamed to ``module``. + + See also: + :py:func:`load_module_asdict` to load the contents of module saved + with :py:func:`dump_module` into a dictionary. + """ + if 'main' in kwds: + warnings.warn( + "The argument 'main' has been renamed 'module'.", + PendingDeprecationWarning + ) + if module is not None: + raise TypeError("both 'module' and 'main' arguments were used") + module = kwds.pop('main') + main = module + if hasattr(filename, 'read'): + file = filename + else: + if filename is None: + filename = str(TEMPDIR/'session.pkl') + file = open(filename, 'rb') + try: + file = _make_peekable(file) + #FIXME: dill.settings are disabled + unpickler = Unpickler(file, **kwds) + unpickler._session = True + + # Resolve unpickler._main + pickle_main = _identify_module(file, main) + if main is None and pickle_main is not None: + main = pickle_main + if isinstance(main, str): + if main.startswith('__runtime__.'): + # Create runtime module to load the session into. + main = ModuleType(main.partition('.')[-1]) + else: + main = _import_module(main) + if main is not None: + if not isinstance(main, ModuleType): + raise TypeError("%r is not a module" % main) + unpickler._main = main + else: + main = unpickler._main + + # Check against the pickle's main. + is_main_imported = _is_imported_module(main) + if pickle_main is not None: + is_runtime_mod = pickle_main.startswith('__runtime__.') + if is_runtime_mod: + pickle_main = pickle_main.partition('.')[-1] + error_msg = "can't update{} module{} %r with the saved state of{} module{} %r" + if is_runtime_mod and is_main_imported: + raise ValueError( + error_msg.format(" imported", "", "", "-type object") + % (main.__name__, pickle_main) + ) + if not is_runtime_mod and not is_main_imported: + raise ValueError( + error_msg.format("", "-type object", " imported", "") + % (pickle_main, main.__name__) + ) + if main.__name__ != pickle_main: + raise ValueError(error_msg.format("", "", "", "") % (main.__name__, pickle_main)) + + # This is for find_class() to be able to locate it. + if not is_main_imported: + runtime_main = '__runtime__.%s' % main.__name__ + sys.modules[runtime_main] = main + + loaded = unpickler.load() + finally: + if not hasattr(filename, 'read'): # if newly opened file + file.close() + try: + del sys.modules[runtime_main] + except (KeyError, NameError): + pass + assert loaded is main + _restore_modules(unpickler, main) + if main is _main_module or main is module: + return None + else: + return main + +# Backward compatibility. +def load_session(filename=None, main=None, **kwds): + warnings.warn("load_session() has been renamed load_module().", PendingDeprecationWarning) + load_module(filename, module=main, **kwds) +load_session.__doc__ = load_module.__doc__ + +def load_module_asdict( + filename: Union[str, os.PathLike] = None, + update: bool = False, + **kwds +) -> dict: + """ + Load the contents of a saved module into a dictionary. + + ``load_module_asdict()`` is the near-equivalent of:: + + lambda filename: vars(dill.load_module(filename)).copy() + + however, does not alter the original module. Also, the path of + the loaded module is stored in the ``__session__`` attribute. + + Args: + filename: a path-like object or a readable stream. If `None` + (the default), read from a named file in a temporary directory. + update: if `True`, initialize the dictionary with the current state + of the module prior to loading the state stored at filename. + **kwds: extra keyword arguments passed to :py:class:`Unpickler()` + + Raises: + :py:exc:`UnpicklingError`: if unpickling fails + + Returns: + A copy of the restored module's dictionary. + + Note: + If ``update`` is True, the corresponding module may first be imported + into the current namespace before the saved state is loaded from + filename to the dictionary. Note that any module that is imported into + the current namespace as a side-effect of using ``update`` will not be + modified by loading the saved module in filename to a dictionary. + + Example: + >>> import dill + >>> alist = [1, 2, 3] + >>> anum = 42 + >>> dill.dump_module() + >>> anum = 0 + >>> new_var = 'spam' + >>> main = dill.load_module_asdict() + >>> main['__name__'], main['__session__'] + ('__main__', '/tmp/session.pkl') + >>> main is globals() # loaded objects don't reference globals + False + >>> main['alist'] == alist + True + >>> main['alist'] is alist # was saved by value + False + >>> main['anum'] == anum # changed after the session was saved + False + >>> new_var in main # would be True if the option 'update' was set + False + """ + if 'module' in kwds: + raise TypeError("'module' is an invalid keyword argument for load_module_asdict()") + if hasattr(filename, 'read'): + file = filename + else: + if filename is None: + filename = str(TEMPDIR/'session.pkl') + file = open(filename, 'rb') + try: + file = _make_peekable(file) + main_name = _identify_module(file) + old_main = sys.modules.get(main_name) + main = ModuleType(main_name) + if update: + if old_main is None: + old_main = _import_module(main_name) + main.__dict__.update(old_main.__dict__) + else: + main.__builtins__ = __builtin__ + sys.modules[main_name] = main + load_module(file, **kwds) + finally: + if not hasattr(filename, 'read'): # if newly opened file + file.close() + try: + if old_main is None: + del sys.modules[main_name] + else: + sys.modules[main_name] = old_main + except NameError: # failed before setting old_main + pass + main.__session__ = str(filename) + return main.__dict__ + + +# Internal exports for backward compatibility with dill v0.3.5.1 +# Can't be placed in dill._dill because of circular import problems. +for name in ( + '_lookup_module', '_module_map', '_restore_modules', '_stash_modules', + 'dump_session', 'load_session' # backward compatibility functions +): + setattr(_dill, name, globals()[name]) +del name diff --git a/local_packages/dill/settings.py b/local_packages/dill/settings.py new file mode 100644 index 0000000..eb07fef --- /dev/null +++ b/local_packages/dill/settings.py @@ -0,0 +1,25 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2008-2016 California Institute of Technology. +# Copyright (c) 2016-2026 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE +""" +global settings for Pickler +""" + +from pickle import DEFAULT_PROTOCOL + +settings = { + #'main' : None, + 'protocol' : DEFAULT_PROTOCOL, + 'byref' : False, + #'strictio' : False, + 'fmode' : 0, #HANDLE_FMODE + 'recurse' : False, + 'ignore' : False, +} + +del DEFAULT_PROTOCOL + diff --git a/local_packages/dill/source.py b/local_packages/dill/source.py new file mode 100644 index 0000000..b77d9fb --- /dev/null +++ b/local_packages/dill/source.py @@ -0,0 +1,1025 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2008-2016 California Institute of Technology. +# Copyright (c) 2016-2026 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE +# +# inspired by inspect.py from Python-2.7.6 +# inspect.py author: 'Ka-Ping Yee ' +# inspect.py merged into original dill.source by Mike McKerns 4/13/14 +""" +Extensions to python's 'inspect' module, which can be used +to retrieve information from live python objects. The methods +defined in this module are augmented to facilitate access to +source code of interactively defined functions and classes, +as well as provide access to source code for objects defined +in a file. +""" + +__all__ = ['findsource', 'getsourcelines', 'getsource', 'indent', 'outdent', \ + '_wrap', 'dumpsource', 'getname', '_namespace', 'getimport', \ + '_importable', 'importable','isdynamic', 'isfrommain'] + +import linecache +import re +from inspect import (getblock, getfile, getmodule, getsourcefile, indentsize, + isbuiltin, isclass, iscode, isframe, isfunction, ismethod, + ismodule, istraceback) +from tokenize import TokenError + +from ._dill import IS_IPYTHON + + +def isfrommain(obj): + "check if object was built in __main__" + module = getmodule(obj) + if module and module.__name__ == '__main__': + return True + return False + + +def isdynamic(obj): + "check if object was built in the interpreter" + try: file = getfile(obj) + except TypeError: file = None + if file == '' and isfrommain(obj): + return True + return False + + +def _matchlambda(func, line): + """check if lambda object 'func' matches raw line of code 'line'""" + from .detect import code as getcode + from .detect import freevars, globalvars, varnames + dummy = lambda : '__this_is_a_big_dummy_function__' + # process the line (removing leading whitespace, etc) + lhs,rhs = line.split('lambda ',1)[-1].split(":", 1) #FIXME: if !1 inputs + try: #FIXME: unsafe + _ = eval("lambda %s : %s" % (lhs,rhs), globals(),locals()) + except Exception: _ = dummy + # get code objects, for comparison + _, code = getcode(_).co_code, getcode(func).co_code + # check if func is in closure + _f = [line.count(i) for i in freevars(func).keys()] + if not _f: # not in closure + # check if code matches + if _ == code: return True + return False + # weak check on freevars + if not all(_f): return False #XXX: VERY WEAK + # weak check on varnames and globalvars + _f = varnames(func) + _f = [line.count(i) for i in _f[0]+_f[1]] + if _f and not all(_f): return False #XXX: VERY WEAK + _f = [line.count(i) for i in globalvars(func).keys()] + if _f and not all(_f): return False #XXX: VERY WEAK + # check if func is a double lambda + if (line.count('lambda ') > 1) and (lhs in freevars(func).keys()): + _lhs,_rhs = rhs.split('lambda ',1)[-1].split(":",1) #FIXME: if !1 inputs + try: #FIXME: unsafe + _f = eval("lambda %s : %s" % (_lhs,_rhs), globals(),locals()) + except Exception: _f = dummy + # get code objects, for comparison + _, code = getcode(_f).co_code, getcode(func).co_code + if len(_) != len(code): return False + #NOTE: should be same code same order, but except for 't' and '\x88' + _ = set((i,j) for (i,j) in zip(_,code) if i != j) + if len(_) != 1: return False #('t','\x88') + return True + # check indentsize + if not indentsize(line): return False #FIXME: is this a good check??? + # check if code 'pattern' matches + #XXX: or pattern match against dis.dis(code)? (or use uncompyle2?) + _ = _.split(_[0]) # 't' #XXX: remove matching values if starts the same? + _f = code.split(code[0]) # '\x88' + #NOTE: should be same code different order, with different first element + _ = dict(re.match(r'([\W\D\S])(.*)', _[i]).groups() for i in range(1,len(_))) + _f = dict(re.match(r'([\W\D\S])(.*)', _f[i]).groups() for i in range(1,len(_f))) + if (_.keys() == _f.keys()) and (sorted(_.values()) == sorted(_f.values())): + return True + return False + + +def findsource(object): + """Return the entire source file and starting line number for an object. + For interactively-defined objects, the 'file' is the interpreter's history. + + The argument may be a module, class, method, function, traceback, frame, + or code object. The source code is returned as a list of all the lines + in the file and the line number indexes a line in that list. An IOError + is raised if the source code cannot be retrieved, while a TypeError is + raised for objects where the source code is unavailable (e.g. builtins).""" + + module = getmodule(object) + try: file = getfile(module) + except TypeError: file = None + is_module_main = (module and module.__name__ == '__main__' and not file) + if IS_IPYTHON and is_module_main: + #FIXME: quick fix for functions and classes in IPython interpreter + try: + file = getfile(object) + sourcefile = getsourcefile(object) + except TypeError: + if isclass(object): + for object_method in filter(isfunction, object.__dict__.values()): + # look for a method of the class + file_candidate = getfile(object_method) + if not file_candidate.startswith('': pat1 = r'(.*(?': + pat1 = r'(.*(?' + if stdin: + lnum = len(lines) - 1 # can't get lnum easily, so leverage pat + if not pat1: pat1 = r'^(\s*def\s)|(.*(? 0: #XXX: won't find decorators in ? + line = lines[lnum] + if pat1.match(line): + if not stdin: break # co_firstlineno does the job + if name == '': # hackery needed to confirm a match + if _matchlambda(obj, line): break + else: # not a lambda, just look for the name + if name in line: # need to check for decorator... + hats = 0 + for _lnum in range(lnum-1,-1,-1): + if pat2.match(lines[_lnum]): hats += 1 + else: break + lnum = lnum - hats + break + lnum = lnum - 1 + return lines, lnum + + try: # turn instances into classes + if not isclass(object) and isclass(type(object)): # __class__ + object = object.__class__ #XXX: sometimes type(class) is better? + #XXX: we don't find how the instance was built + except AttributeError: pass + if isclass(object): + name = object.__name__ + pat = re.compile(r'^(\s*)class\s*' + name + r'\b') + # make some effort to find the best matching class definition: + # use the one with the least indentation, which is the one + # that's most probably not inside a function definition. + candidates = [] + for i in range(len(lines)-1,-1,-1): + match = pat.match(lines[i]) + if match: + # if it's at toplevel, it's already the best one + if lines[i][0] == 'c': + return lines, i + # else add whitespace to candidate list + candidates.append((match.group(1), i)) + if candidates: + # this will sort by whitespace, and by line number, + # less whitespace first #XXX: should sort high lnum before low + candidates.sort() + return lines, candidates[0][1] + else: + raise IOError('could not find class definition') + raise IOError('could not find code object') + + +def getblocks(object, lstrip=False, enclosing=False, locate=False): + """Return a list of source lines and starting line number for an object. + Interactively-defined objects refer to lines in the interpreter's history. + + If enclosing=True, then also return any enclosing code. + If lstrip=True, ensure there is no indentation in the first line of code. + If locate=True, then also return the line number for the block of code. + + DEPRECATED: use 'getsourcelines' instead + """ + lines, lnum = findsource(object) + + if ismodule(object): + if lstrip: lines = _outdent(lines) + return ([lines], [0]) if locate is True else [lines] + + #XXX: 'enclosing' means: closures only? or classes and files? + indent = indentsize(lines[lnum]) + block = getblock(lines[lnum:]) #XXX: catch any TokenError here? + + if not enclosing or not indent: + if lstrip: block = _outdent(block) + return ([block], [lnum]) if locate is True else [block] + + pat1 = r'^(\s*def\s)|(.*(? indent: #XXX: should be >= ? + line += len(code) - skip + elif target in ''.join(code): + blocks.append(code) # save code block as the potential winner + _lnum.append(line - skip) # save the line number for the match + line += len(code) - skip + else: + line += 1 + skip = 0 + # find skip: the number of consecutive decorators + elif pat2.match(lines[line]): + try: code = getblock(lines[line:]) + except TokenError: code = [lines[line]] + skip = 1 + for _line in code[1:]: # skip lines that are decorators + if not pat2.match(_line): break + skip += 1 + line += skip + # no match: reset skip and go to the next line + else: + line +=1 + skip = 0 + + if not blocks: + blocks = [block] + _lnum = [lnum] + if lstrip: blocks = [_outdent(block) for block in blocks] + # return last match + return (blocks, _lnum) if locate is True else blocks + + +def getsourcelines(object, lstrip=False, enclosing=False): + """Return a list of source lines and starting line number for an object. + Interactively-defined objects refer to lines in the interpreter's history. + + The argument may be a module, class, method, function, traceback, frame, + or code object. The source code is returned as a list of the lines + corresponding to the object and the line number indicates where in the + original source file the first line of code was found. An IOError is + raised if the source code cannot be retrieved, while a TypeError is + raised for objects where the source code is unavailable (e.g. builtins). + + If lstrip=True, ensure there is no indentation in the first line of code. + If enclosing=True, then also return any enclosing code.""" + code, n = getblocks(object, lstrip=lstrip, enclosing=enclosing, locate=True) + return code[-1], n[-1] + + +#NOTE: broke backward compatibility 4/16/14 (was lstrip=True, force=True) +def getsource(object, alias='', lstrip=False, enclosing=False, \ + force=False, builtin=False): + """Return the text of the source code for an object. The source code for + interactively-defined objects are extracted from the interpreter's history. + + The argument may be a module, class, method, function, traceback, frame, + or code object. The source code is returned as a single string. An + IOError is raised if the source code cannot be retrieved, while a + TypeError is raised for objects where the source code is unavailable + (e.g. builtins). + + If alias is provided, then add a line of code that renames the object. + If lstrip=True, ensure there is no indentation in the first line of code. + If enclosing=True, then also return any enclosing code. + If force=True, catch (TypeError,IOError) and try to use import hooks. + If builtin=True, force an import for any builtins + """ + # hascode denotes a callable + hascode = _hascode(object) + # is a class instance type (and not in builtins) + instance = _isinstance(object) + + # get source lines; if fail, try to 'force' an import + try: # fails for builtins, and other assorted object types + lines, lnum = getsourcelines(object, enclosing=enclosing) + except (TypeError, IOError): # failed to get source, resort to import hooks + if not force: # don't try to get types that findsource can't get + raise + if not getmodule(object): # get things like 'None' and '1' + if not instance: return getimport(object, alias, builtin=builtin) + # special handling (numpy arrays, ...) + _import = getimport(object, builtin=builtin) + name = getname(object, force=True) + _alias = "%s = " % alias if alias else "" + if alias == name: _alias = "" + return _import+_alias+"%s\n" % name + else: #FIXME: could use a good bit of cleanup, since using getimport... + if not instance: return getimport(object, alias, builtin=builtin) + # now we are dealing with an instance... + name = object.__class__.__name__ + module = object.__module__ + if not name.isidentifier() or not all(i.isidentifier() for i in module.split('.')): #XXX: does exclusing malicious code exclude valid use? + raise SyntaxError('invalid syntax') + if module in ['builtins','__builtin__']: + return getimport(object, alias, builtin=builtin) + else: #FIXME: leverage getimport? use 'from module import name'? + lines, lnum = ["%s = __import__('%s', fromlist=['%s']).%s\n" % (name,module,name,name)], 0 + obj = eval(lines[0].lstrip(name + ' = ')) + lines, lnum = getsourcelines(obj, enclosing=enclosing) + + # strip leading indent (helps ensure can be imported) + if lstrip or alias: + lines = _outdent(lines) + + # instantiate, if there's a nice repr #XXX: BAD IDEA??? + if instance: #and force: #XXX: move into findsource or getsourcelines ? + if '(' in repr(object): lines.append('%r\n' % object) + #else: #XXX: better to somehow to leverage __reduce__ ? + # reconstructor,args = object.__reduce__() + # _ = reconstructor(*args) + else: # fall back to serialization #XXX: bad idea? + #XXX: better not duplicate work? #XXX: better new/enclose=True? + lines = dumpsource(object, alias='', new=force, enclose=False) + lines, lnum = [line+'\n' for line in lines.split('\n')][:-1], 0 + #else: object.__code__ # raise AttributeError + + # add an alias to the source code + if alias: + if hascode: + skip = 0 + for line in lines: # skip lines that are decorators + if not line.startswith('@'): break + skip += 1 + #XXX: use regex from findsource / getsourcelines ? + if lines[skip].lstrip().startswith('def '): # we have a function + if alias != object.__name__: + lines.append('\n%s = %s\n' % (alias, object.__name__)) + elif 'lambda ' in lines[skip]: # we have a lambda + if alias != lines[skip].split('=')[0].strip(): + lines[skip] = '%s = %s' % (alias, lines[skip]) + else: # ...try to use the object's name + if alias != object.__name__: + lines.append('\n%s = %s\n' % (alias, object.__name__)) + else: # class or class instance + if instance: + if alias != lines[-1].split('=')[0].strip(): + lines[-1] = ('%s = ' % alias) + lines[-1] + else: + name = getname(object, force=True) or object.__name__ + if alias != name: + lines.append('\n%s = %s\n' % (alias, name)) + return ''.join(lines) + + +def _hascode(object): + '''True if object has an attribute that stores it's __code__''' + return getattr(object,'__code__',None) or getattr(object,'func_code',None) + +def _isinstance(object): + '''True if object is a class instance type (and is not a builtin)''' + if _hascode(object) or isclass(object) or ismodule(object): + return False + if istraceback(object) or isframe(object) or iscode(object): + return False + # special handling (numpy arrays, ...) + if not getmodule(object) and getmodule(type(object)).__name__ in ['numpy']: + return True +# # check if is instance of a builtin +# if not getmodule(object) and getmodule(type(object)).__name__ in ['__builtin__','builtins']: +# return False + _types = ('") + if not repr(type(object)).startswith(_types): #FIXME: weak hack + return False + if not getmodule(object) or object.__module__ in ['builtins','__builtin__'] or getname(object, force=True) in ['array']: + return False + return True # by process of elimination... it's what we want + + +def _intypes(object): + '''check if object is in the 'types' module''' + import types + # allow user to pass in object or object.__name__ + if type(object) is not type(''): + object = getname(object, force=True) + if object == 'ellipsis': object = 'EllipsisType' + return True if hasattr(types, object) else False + + +def _isstring(object): #XXX: isstringlike better? + '''check if object is a string-like type''' + return isinstance(object, (str, bytes)) + + +def indent(code, spaces=4): + '''indent a block of code with whitespace (default is 4 spaces)''' + indent = indentsize(code) + from numbers import Integral + if isinstance(spaces, Integral): spaces = ' '*spaces + # if '\t' is provided, will indent with a tab + nspaces = indentsize(spaces) + # blank lines (etc) need to be ignored + lines = code.split('\n') +## stq = "'''"; dtq = '"""' +## in_stq = in_dtq = False + for i in range(len(lines)): + #FIXME: works... but shouldn't indent 2nd+ lines of multiline doc + _indent = indentsize(lines[i]) + if indent > _indent: continue + lines[i] = spaces+lines[i] +## #FIXME: may fail when stq and dtq in same line (depends on ordering) +## nstq, ndtq = lines[i].count(stq), lines[i].count(dtq) +## if not in_dtq and not in_stq: +## lines[i] = spaces+lines[i] # we indent +## # entering a comment block +## if nstq%2: in_stq = not in_stq +## if ndtq%2: in_dtq = not in_dtq +## # leaving a comment block +## elif in_dtq and ndtq%2: in_dtq = not in_dtq +## elif in_stq and nstq%2: in_stq = not in_stq +## else: pass + if lines[-1].strip() == '': lines[-1] = '' + return '\n'.join(lines) + + +def _outdent(lines, spaces=None, all=True): + '''outdent lines of code, accounting for docs and line continuations''' + indent = indentsize(lines[0]) + if spaces is None or spaces > indent or spaces < 0: spaces = indent + for i in range(len(lines) if all else 1): + #FIXME: works... but shouldn't outdent 2nd+ lines of multiline doc + _indent = indentsize(lines[i]) + if spaces > _indent: _spaces = _indent + else: _spaces = spaces + lines[i] = lines[i][_spaces:] + return lines + +def outdent(code, spaces=None, all=True): + '''outdent a block of code (default is to strip all leading whitespace)''' + indent = indentsize(code) + if spaces is None or spaces > indent or spaces < 0: spaces = indent + #XXX: will this delete '\n' in some cases? + if not all: return code[spaces:] + return '\n'.join(_outdent(code.split('\n'), spaces=spaces, all=all)) + + +# _wrap provides an wrapper to correctly exec and load into locals +__globals__ = globals() +__locals__ = locals() +def _wrap(f): + """ encapsulate a function and it's __import__ """ + def func(*args, **kwds): + try: + # _ = eval(getsource(f, force=True)) #XXX: safer but less robust + exec(getimportable(f, alias='_'), __globals__, __locals__) + except Exception: + raise ImportError('cannot import name ' + f.__name__) + return _(*args, **kwds) + func.__name__ = f.__name__ + func.__doc__ = f.__doc__ + return func + + +def _enclose(object, alias=''): #FIXME: needs alias to hold returned object + """create a function enclosure around the source of some object""" + #XXX: dummy and stub should append a random string + dummy = '__this_is_a_big_dummy_enclosing_function__' + stub = '__this_is_a_stub_variable__' + code = 'def %s():\n' % dummy + code += indent(getsource(object, alias=stub, lstrip=True, force=True)) + code += indent('return %s\n' % stub) + if alias: code += '%s = ' % alias + code += '%s(); del %s\n' % (dummy, dummy) + #code += "globals().pop('%s',lambda :None)()\n" % dummy + return code + + +def dumpsource(object, alias='', new=False, enclose=True): + """'dump to source', where the code includes a pickled object. + + If new=True and object is a class instance, then create a new + instance using the unpacked class source code. If enclose, then + create the object inside a function enclosure (thus minimizing + any global namespace pollution). + """ + from dill import dumps + pik = repr(dumps(object)) + code = 'import dill\n' + if enclose: + stub = '__this_is_a_stub_variable__' #XXX: *must* be same _enclose.stub + pre = '%s = ' % stub + new = False #FIXME: new=True doesn't work with enclose=True + else: + stub = alias + pre = '%s = ' % stub if alias else alias + + # if a 'new' instance is not needed, then just dump and load + if not new or not _isinstance(object): + code += pre + 'dill.loads(%s)\n' % pik + else: #XXX: other cases where source code is needed??? + code += getsource(object.__class__, alias='', lstrip=True, force=True) + mod = repr(object.__module__) # should have a module (no builtins here) + code += pre + 'dill.loads(%s.replace(b%s,bytes(__name__,"UTF-8")))\n' % (pik,mod) + #code += 'del %s' % object.__class__.__name__ #NOTE: kills any existing! + + if enclose: + # generation of the 'enclosure' + dummy = '__this_is_a_big_dummy_object__' + dummy = _enclose(dummy, alias=alias) + # hack to replace the 'dummy' with the 'real' code + dummy = dummy.split('\n') + code = dummy[0]+'\n' + indent(code) + '\n'.join(dummy[-3:]) + + return code #XXX: better 'dumpsourcelines', returning list of lines? + + +def getname(obj, force=False, fqn=False): #XXX: throw(?) to raise error on fail? + """get the name of the object. for lambdas, get the name of the pointer """ + if fqn: return '.'.join(_namespace(obj)) #NOTE: returns 'type' + module = getmodule(obj) + if not module: # things like "None" and "1" + if not force: return None #NOTE: returns 'instance' NOT 'type' #FIXME? + # handle some special cases + if hasattr(obj, 'dtype') and not obj.shape: + return getname(obj.__class__) + "(" + repr(obj.tolist()) + ")" + return repr(obj) + try: + #XXX: 'wrong' for decorators and curried functions ? + # if obj.func_closure: ...use logic from getimportable, etc ? + name = obj.__name__ + if name == '': + return getsource(obj).split('=',1)[0].strip() + # handle some special cases + if module.__name__ in ['builtins','__builtin__']: + if name == 'ellipsis': name = 'EllipsisType' + return name + except AttributeError: #XXX: better to just throw AttributeError ? + if not force: return None + name = repr(obj) + if name.startswith('<'): # or name.split('('): + return None + return name + + +def _namespace(obj): + """_namespace(obj); return namespace hierarchy (as a list of names) + for the given object. For an instance, find the class hierarchy. + + For example: + + >>> from functools import partial + >>> p = partial(int, base=2) + >>> _namespace(p) + [\'functools\', \'partial\'] + """ + # mostly for functions and modules and such + #FIXME: 'wrong' for decorators and curried functions + try: #XXX: needs some work and testing on different types + module = qual = str(getmodule(obj)).split()[1].strip('>').strip('"').strip("'") + qual = qual.split('.') + if ismodule(obj): + return qual + # get name of a lambda, function, etc + name = getname(obj) or obj.__name__ # failing, raise AttributeError + # check special cases (NoneType, ...) + if module in ['builtins','__builtin__']: # BuiltinFunctionType + if _intypes(name): return ['types'] + [name] + return qual + [name] #XXX: can be wrong for some aliased objects + except Exception: pass + # special case: numpy.inf and numpy.nan (we don't want them as floats) + if str(obj) in ['inf','nan','Inf','NaN']: # is more, but are they needed? + return ['numpy'] + [str(obj)] + # mostly for classes and class instances and such + module = getattr(obj.__class__, '__module__', None) + qual = str(obj.__class__) + try: qual = qual[qual.index("'")+1:-2] + except ValueError: pass # str(obj.__class__) made the 'try' unnecessary + qual = qual.split(".") + if module in ['builtins','__builtin__']: + # check special cases (NoneType, Ellipsis, ...) + if qual[-1] == 'ellipsis': qual[-1] = 'EllipsisType' + if _intypes(qual[-1]): module = 'types' #XXX: BuiltinFunctionType + qual = [module] + qual + return qual + + +#NOTE: 05/25/14 broke backward compatibility: added 'alias' as 3rd argument +def _getimport(head, tail, alias='', verify=True, builtin=False): + """helper to build a likely import string from head and tail of namespace. + ('head','tail') are used in the following context: "from head import tail" + + If verify=True, then test the import string before returning it. + If builtin=True, then force an import for builtins where possible. + If alias is provided, then rename the object on import. + """ + # special handling for a few common types + if tail in ['Ellipsis', 'NotImplemented'] and head in ['types']: + head = len.__module__ + elif tail in ['None'] and head in ['types']: + _alias = '%s = ' % alias if alias else '' + if alias == tail: _alias = '' + return _alias+'%s\n' % tail + # we don't need to import from builtins, so return '' +# elif tail in ['NoneType','int','float','long','complex']: return '' #XXX: ? + if head in ['builtins','__builtin__']: + # special cases (NoneType, Ellipsis, ...) #XXX: BuiltinFunctionType + if tail == 'ellipsis': tail = 'EllipsisType' + if _intypes(tail): head = 'types' + elif not builtin: + _alias = '%s = ' % alias if alias else '' + if alias == tail: _alias = '' + return _alias+'%s\n' % tail + else: pass # handle builtins below + # get likely import string + if not head: _str = "import %s" % tail + else: _str = "from %s import %s" % (head, tail) + _alias = " as %s\n" % alias if alias else "\n" + if alias == tail: _alias = "\n" + _str += _alias + # FIXME: fails on most decorators, currying, and such... + # (could look for magic __wrapped__ or __func__ attr) + # (could fix in 'namespace' to check obj for closure) + if verify and not head.startswith('dill.'):# weird behavior for dill + #print(_str) + try: exec(_str) #XXX: check if == obj? (name collision) + except ImportError: #XXX: better top-down or bottom-up recursion? + _head = head.rsplit(".",1)[0] #(or get all, then compare == obj?) + if not _head: raise + if _head != head: + _str = _getimport(_head, tail, alias, verify) + return _str + + +#XXX: rename builtin to force? vice versa? verify to force? (as in getsource) +#NOTE: 05/25/14 broke backward compatibility: added 'alias' as 2nd argument +def getimport(obj, alias='', verify=True, builtin=False, enclosing=False): + """get the likely import string for the given object + + obj is the object to inspect + If verify=True, then test the import string before returning it. + If builtin=True, then force an import for builtins where possible. + If enclosing=True, get the import for the outermost enclosing callable. + If alias is provided, then rename the object on import. + """ + if enclosing: + from .detect import outermost + _obj = outermost(obj) + obj = _obj if _obj else obj + # get the namespace + qual = _namespace(obj) + head = '.'.join(qual[:-1]) + tail = qual[-1] + # for named things... with a nice repr #XXX: move into _namespace? + try: # look for '<...>' and be mindful it might be in lists, dicts, etc... + name = repr(obj).split('<',1)[1].split('>',1)[1] + name = None # we have a 'object'-style repr + except Exception: # it's probably something 'importable' + if head in ['builtins','__builtin__']: + name = repr(obj) #XXX: catch [1,2], (1,2), set([1,2])... others? + elif _isinstance(obj): + name = getname(obj, force=True).split('(')[0] + else: + name = repr(obj).split('(')[0] + #if not repr(obj).startswith('<'): name = repr(obj).split('(')[0] + #else: name = None + if name: # try using name instead of tail + try: return _getimport(head, name, alias, verify, builtin) + except ImportError: pass + except SyntaxError: + if head in ['builtins','__builtin__']: + _alias = '%s = ' % alias if alias else '' + if alias == name: _alias = '' + return _alias+'%s\n' % name + else: pass + try: + #if type(obj) is type(abs): _builtin = builtin # BuiltinFunctionType + #else: _builtin = False + return _getimport(head, tail, alias, verify, builtin) + except ImportError: + raise # could do some checking against obj + except SyntaxError: + if head in ['builtins','__builtin__']: + _alias = '%s = ' % alias if alias else '' + if alias == tail: _alias = '' + return _alias+'%s\n' % tail + raise # could do some checking against obj + + +def _importable(obj, alias='', source=None, enclosing=False, force=True, \ + builtin=True, lstrip=True): + """get an import string (or the source code) for the given object + + This function will attempt to discover the name of the object, or the repr + of the object, or the source code for the object. To attempt to force + discovery of the source code, use source=True, to attempt to force the + use of an import, use source=False; otherwise an import will be sought + for objects not defined in __main__. The intent is to build a string + that can be imported from a python file. obj is the object to inspect. + If alias is provided, then rename the object with the given alias. + + If source=True, use these options: + If enclosing=True, then also return any enclosing code. + If force=True, catch (TypeError,IOError) and try to use import hooks. + If lstrip=True, ensure there is no indentation in the first line of code. + + If source=False, use these options: + If enclosing=True, get the import for the outermost enclosing callable. + If force=True, then don't test the import string before returning it. + If builtin=True, then force an import for builtins where possible. + """ + if source is None: + source = True if isfrommain(obj) else False + if source: # first try to get the source + try: + return getsource(obj, alias, enclosing=enclosing, \ + force=force, lstrip=lstrip, builtin=builtin) + except Exception: pass + try: + if not _isinstance(obj): + return getimport(obj, alias, enclosing=enclosing, \ + verify=(not force), builtin=builtin) + # first 'get the import', then 'get the instance' + _import = getimport(obj, enclosing=enclosing, \ + verify=(not force), builtin=builtin) + name = getname(obj, force=True) + if not name: + raise AttributeError("object has no atribute '__name__'") + _alias = "%s = " % alias if alias else "" + if alias == name: _alias = "" + return _import+_alias+"%s\n" % name + + except Exception: pass + if not source: # try getsource, only if it hasn't been tried yet + try: + return getsource(obj, alias, enclosing=enclosing, \ + force=force, lstrip=lstrip, builtin=builtin) + except Exception: pass + # get the name (of functions, lambdas, and classes) + # or hope that obj can be built from the __repr__ + #XXX: what to do about class instances and such? + obj = getname(obj, force=force) + # we either have __repr__ or __name__ (or None) + if not obj or obj.startswith('<'): + raise AttributeError("object has no atribute '__name__'") + _alias = '%s = ' % alias if alias else '' + if alias == obj: _alias = '' + return _alias+'%s\n' % obj + #XXX: possible failsafe... (for example, for instances when source=False) + # "import dill; result = dill.loads(); # repr()" + +def _closuredimport(func, alias='', builtin=False): + """get import for closured objects; return a dict of 'name' and 'import'""" + import re + from .detect import freevars, outermost + free_vars = freevars(func) + func_vars = {} + # split into 'funcs' and 'non-funcs' + for name,obj in list(free_vars.items()): + if not isfunction(obj): continue + # get import for 'funcs' + fobj = free_vars.pop(name) + src = getsource(fobj) + if src.lstrip().startswith('@'): # we have a decorator + src = getimport(fobj, alias=alias, builtin=builtin) + else: # we have to "hack" a bit... and maybe be lucky + encl = outermost(func) + # pattern: 'func = enclosing(fobj' + pat = r'.*[\w\s]=\s*'+getname(encl)+r'\('+getname(fobj) + mod = getname(getmodule(encl)) + #HACK: get file containing 'outer' function; is func there? + lines,_ = findsource(encl) + candidate = [line for line in lines if getname(encl) in line and \ + re.match(pat, line)] + if not candidate: + mod = getname(getmodule(fobj)) + #HACK: get file containing 'inner' function; is func there? + lines,_ = findsource(fobj) + candidate = [line for line in lines \ + if getname(fobj) in line and re.match(pat, line)] + if not len(candidate): raise TypeError('import could not be found') + candidate = candidate[-1] + name = candidate.split('=',1)[0].split()[-1].strip() + src = _getimport(mod, name, alias=alias, builtin=builtin) + func_vars[name] = src + if not func_vars: + name = outermost(func) + mod = getname(getmodule(name)) + if not mod or name is func: # then it can be handled by getimport + name = getname(func, force=True) #XXX: better key? + src = getimport(func, alias=alias, builtin=builtin) + else: + lines,_ = findsource(name) + # pattern: 'func = enclosing(' + candidate = [line for line in lines if getname(name) in line and \ + re.match(r'.*[\w\s]=\s*'+getname(name)+r'\(', line)] + if not len(candidate): raise TypeError('import could not be found') + candidate = candidate[-1] + name = candidate.split('=',1)[0].split()[-1].strip() + src = _getimport(mod, name, alias=alias, builtin=builtin) + func_vars[name] = src + return func_vars + +#XXX: should be able to use __qualname__ +def _closuredsource(func, alias=''): + """get source code for closured objects; return a dict of 'name' + and 'code blocks'""" + #FIXME: this entire function is a messy messy HACK + # - pollutes global namespace + # - fails if name of freevars are reused + # - can unnecessarily duplicate function code + from .detect import freevars + free_vars = freevars(func) + func_vars = {} + # split into 'funcs' and 'non-funcs' + for name,obj in list(free_vars.items()): + if not isfunction(obj): + # get source for 'non-funcs' + free_vars[name] = getsource(obj, force=True, alias=name) + continue + # get source for 'funcs' + fobj = free_vars.pop(name) + src = getsource(fobj, alias) # DO NOT include dependencies + # if source doesn't start with '@', use name as the alias + if not src.lstrip().startswith('@'): #FIXME: 'enclose' in dummy; + src = importable(fobj,alias=name)# wrong ref 'name' + org = getsource(func, alias, enclosing=False, lstrip=True) + src = (src, org) # undecorated first, then target + else: #NOTE: reproduces the code! + org = getsource(func, enclosing=True, lstrip=False) + src = importable(fobj, alias, source=True) # include dependencies + src = (org, src) # target first, then decorated + func_vars[name] = src + src = ''.join(free_vars.values()) + if not func_vars: #FIXME: 'enclose' in dummy; wrong ref 'name' + org = getsource(func, alias, force=True, enclosing=False, lstrip=True) + src = (src, org) # variables first, then target + else: + src = (src, None) # just variables (better '' instead of None?) + func_vars[None] = src + # FIXME: remove duplicates (however, order is important...) + return func_vars + +def importable(obj, alias='', source=None, builtin=True): + """get an importable string (i.e. source code or the import string) + for the given object, including any required objects from the enclosing + and global scope + + This function will attempt to discover the name of the object, or the repr + of the object, or the source code for the object. To attempt to force + discovery of the source code, use source=True, to attempt to force the + use of an import, use source=False; otherwise an import will be sought + for objects not defined in __main__. The intent is to build a string + that can be imported from a python file. + + obj is the object to inspect. If alias is provided, then rename the + object with the given alias. If builtin=True, then force an import for + builtins where possible. + """ + #NOTE: we always 'force', and 'lstrip' as necessary + #NOTE: for 'enclosing', use importable(outermost(obj)) + if source is None: + source = True if isfrommain(obj) else False + elif builtin and isbuiltin(obj): + source = False + tried_source = tried_import = False + while True: + if not source: # we want an import + try: + if _isinstance(obj): # for instances, punt to _importable + return _importable(obj, alias, source=False, builtin=builtin) + src = _closuredimport(obj, alias=alias, builtin=builtin) + if len(src) == 0: + raise NotImplementedError('not implemented') + if len(src) > 1: + raise NotImplementedError('not implemented') + return list(src.values())[0] + except Exception: + if tried_source: raise + tried_import = True + # we want the source + try: + src = _closuredsource(obj, alias=alias) + if len(src) == 0: + raise NotImplementedError('not implemented') + # groan... an inline code stitcher + def _code_stitcher(block): + "stitch together the strings in tuple 'block'" + if block[0] and block[-1]: block = '\n'.join(block) + elif block[0]: block = block[0] + elif block[-1]: block = block[-1] + else: block = '' + return block + # get free_vars first + _src = _code_stitcher(src.pop(None)) + _src = [_src] if _src else [] + # get func_vars + for xxx in src.values(): + xxx = _code_stitcher(xxx) + if xxx: _src.append(xxx) + # make a single source string + if not len(_src): + src = '' + elif len(_src) == 1: + src = _src[0] + else: + src = '\n'.join(_src) + # get source code of objects referred to by obj in global scope + from .detect import globalvars + obj = globalvars(obj) #XXX: don't worry about alias? recurse? etc? + obj = list(getsource(_obj,name,force=True) for (name,_obj) in obj.items() if not isbuiltin(_obj)) + obj = '\n'.join(obj) if obj else '' + # combine all referred-to source (global then enclosing) + if not obj: return src + if not src: return obj + return obj + src + except Exception: + if tried_import: raise + tried_source = True + source = not source + # should never get here + return + + +# backward compatibility +def getimportable(obj, alias='', byname=True, explicit=False): + return importable(obj,alias,source=(not byname),builtin=explicit) + #return outdent(_importable(obj,alias,source=(not byname),builtin=explicit)) +def likely_import(obj, passive=False, explicit=False): + return getimport(obj, verify=(not passive), builtin=explicit) +def _likely_import(first, last, passive=False, explicit=True): + return _getimport(first, last, verify=(not passive), builtin=explicit) +_get_name = getname +getblocks_from_history = getblocks + + + +# EOF diff --git a/local_packages/dill/temp.py b/local_packages/dill/temp.py new file mode 100644 index 0000000..4b9f4e0 --- /dev/null +++ b/local_packages/dill/temp.py @@ -0,0 +1,252 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2008-2016 California Institute of Technology. +# Copyright (c) 2016-2026 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE +""" +Methods for serialized objects (or source code) stored in temporary files +and file-like objects. +""" +#XXX: better instead to have functions write to any given file-like object ? +#XXX: currently, all file-like objects are created by the function... + +__all__ = ['dump_source', 'dump', 'dumpIO_source', 'dumpIO',\ + 'load_source', 'load', 'loadIO_source', 'loadIO',\ + 'capture'] + +import contextlib + + +@contextlib.contextmanager +def capture(stream='stdout'): + """builds a context that temporarily replaces the given stream name + + >>> with capture('stdout') as out: + ... print ("foo!") + ... + >>> print (out.getvalue()) + foo! + + """ + import sys + from io import StringIO + orig = getattr(sys, stream) + setattr(sys, stream, StringIO()) + try: + yield getattr(sys, stream) + finally: + setattr(sys, stream, orig) + + +def b(x): # deal with b'foo' versus 'foo' + import codecs + return codecs.latin_1_encode(x)[0] + +def load_source(file, **kwds): + """load an object that was stored with dill.temp.dump_source + + file: filehandle + alias: string name of stored object + mode: mode to open the file, one of: {'r', 'rb'} + + >>> f = lambda x: x**2 + >>> pyfile = dill.temp.dump_source(f, alias='_f') + >>> _f = dill.temp.load_source(pyfile) + >>> _f(4) + 16 + """ + alias = kwds.pop('alias', None) + mode = kwds.pop('mode', 'r') + fname = getattr(file, 'name', file) # fname=file.name or fname=file (if str) + source = open(fname, mode=mode, **kwds).read() + if not alias: + tag = source.strip().splitlines()[-1].split() + if tag[0] != '#NAME:': + stub = source.splitlines()[0] + raise IOError("unknown name for code: %s" % stub) + alias = tag[-1] + local = {} + exec(source, local) + _ = eval("%s" % alias, local) + return _ + +def dump_source(object, **kwds): + """write object source to a NamedTemporaryFile (instead of dill.dump) +Loads with "import" or "dill.temp.load_source". Returns the filehandle. + + >>> f = lambda x: x**2 + >>> pyfile = dill.temp.dump_source(f, alias='_f') + >>> _f = dill.temp.load_source(pyfile) + >>> _f(4) + 16 + + >>> f = lambda x: x**2 + >>> pyfile = dill.temp.dump_source(f, dir='.') + >>> modulename = os.path.basename(pyfile.name).split('.py')[0] + >>> exec('from %s import f as _f' % modulename) + >>> _f(4) + 16 + +Optional kwds: + If 'alias' is specified, the object will be renamed to the given string. + + If 'prefix' is specified, the file name will begin with that prefix, + otherwise a default prefix is used. + + If 'dir' is specified, the file will be created in that directory, + otherwise a default directory is used. + + If 'text' is specified and true, the file is opened in text + mode. Else (the default) the file is opened in binary mode. On + some operating systems, this makes no difference. + +NOTE: Keep the return value for as long as you want your file to exist ! + """ #XXX: write a "load_source"? + from .source import importable, getname + import tempfile + kwds.setdefault('delete', True) + kwds.pop('suffix', '') # this is *always* '.py' + alias = kwds.pop('alias', '') #XXX: include an alias so a name is known + name = str(alias) or getname(object) + name = "\n#NAME: %s\n" % name + #XXX: assumes kwds['dir'] is writable and on $PYTHONPATH + file = tempfile.NamedTemporaryFile(suffix='.py', **kwds) + file.write(b(''.join([importable(object, alias=alias),name]))) + file.flush() + return file + +def load(file, **kwds): + """load an object that was stored with dill.temp.dump + + file: filehandle + mode: mode to open the file, one of: {'r', 'rb'} + + >>> dumpfile = dill.temp.dump([1, 2, 3, 4, 5]) + >>> dill.temp.load(dumpfile) + [1, 2, 3, 4, 5] + """ + import dill as pickle + mode = kwds.pop('mode', 'rb') + name = getattr(file, 'name', file) # name=file.name or name=file (if str) + return pickle.load(open(name, mode=mode, **kwds)) + +def dump(object, **kwds): + """dill.dump of object to a NamedTemporaryFile. +Loads with "dill.temp.load". Returns the filehandle. + + >>> dumpfile = dill.temp.dump([1, 2, 3, 4, 5]) + >>> dill.temp.load(dumpfile) + [1, 2, 3, 4, 5] + +Optional kwds: + If 'suffix' is specified, the file name will end with that suffix, + otherwise there will be no suffix. + + If 'prefix' is specified, the file name will begin with that prefix, + otherwise a default prefix is used. + + If 'dir' is specified, the file will be created in that directory, + otherwise a default directory is used. + + If 'text' is specified and true, the file is opened in text + mode. Else (the default) the file is opened in binary mode. On + some operating systems, this makes no difference. + +NOTE: Keep the return value for as long as you want your file to exist ! + """ + import dill as pickle + import tempfile + kwds.setdefault('delete', True) + file = tempfile.NamedTemporaryFile(**kwds) + pickle.dump(object, file) + file.flush() + return file + +def loadIO(buffer, **kwds): + """load an object that was stored with dill.temp.dumpIO + + buffer: buffer object + + >>> dumpfile = dill.temp.dumpIO([1, 2, 3, 4, 5]) + >>> dill.temp.loadIO(dumpfile) + [1, 2, 3, 4, 5] + """ + import dill as pickle + from io import BytesIO as StringIO + value = getattr(buffer, 'getvalue', buffer) # value or buffer.getvalue + if value != buffer: value = value() # buffer.getvalue() + return pickle.load(StringIO(value)) + +def dumpIO(object, **kwds): + """dill.dump of object to a buffer. +Loads with "dill.temp.loadIO". Returns the buffer object. + + >>> dumpfile = dill.temp.dumpIO([1, 2, 3, 4, 5]) + >>> dill.temp.loadIO(dumpfile) + [1, 2, 3, 4, 5] + """ + import dill as pickle + from io import BytesIO as StringIO + file = StringIO() + pickle.dump(object, file) + file.flush() + return file + +def loadIO_source(buffer, **kwds): + """load an object that was stored with dill.temp.dumpIO_source + + buffer: buffer object + alias: string name of stored object + + >>> f = lambda x:x**2 + >>> pyfile = dill.temp.dumpIO_source(f, alias='_f') + >>> _f = dill.temp.loadIO_source(pyfile) + >>> _f(4) + 16 + """ + alias = kwds.pop('alias', None) + source = getattr(buffer, 'getvalue', buffer) # source or buffer.getvalue + if source != buffer: source = source() # buffer.getvalue() + source = source.decode() # buffer to string + if not alias: + tag = source.strip().splitlines()[-1].split() + if tag[0] != '#NAME:': + stub = source.splitlines()[0] + raise IOError("unknown name for code: %s" % stub) + alias = tag[-1] + local = {} + exec(source, local) + _ = eval("%s" % alias, local) + return _ + +def dumpIO_source(object, **kwds): + """write object source to a buffer (instead of dill.dump) +Loads by with dill.temp.loadIO_source. Returns the buffer object. + + >>> f = lambda x:x**2 + >>> pyfile = dill.temp.dumpIO_source(f, alias='_f') + >>> _f = dill.temp.loadIO_source(pyfile) + >>> _f(4) + 16 + +Optional kwds: + If 'alias' is specified, the object will be renamed to the given string. + """ + from .source import importable, getname + from io import BytesIO as StringIO + alias = kwds.pop('alias', '') #XXX: include an alias so a name is known + name = str(alias) or getname(object) + name = "\n#NAME: %s\n" % name + #XXX: assumes kwds['dir'] is writable and on $PYTHONPATH + file = StringIO() + file.write(b(''.join([importable(object, alias=alias),name]))) + file.flush() + return file + + +del contextlib + + +# EOF diff --git a/local_packages/dill/tests/__init__.py b/local_packages/dill/tests/__init__.py new file mode 100644 index 0000000..f97361a --- /dev/null +++ b/local_packages/dill/tests/__init__.py @@ -0,0 +1,22 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2018-2026 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE +""" +to run this test suite, first build and install `dill`. + + $ python -m pip install ../.. + + +then run the tests with: + + $ python -m dill.tests + + +or, if `nose` is installed: + + $ nosetests + +""" diff --git a/local_packages/dill/tests/__main__.py b/local_packages/dill/tests/__main__.py new file mode 100644 index 0000000..0c0f968 --- /dev/null +++ b/local_packages/dill/tests/__main__.py @@ -0,0 +1,35 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2018-2026 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE + +import glob +import os +import sys +import subprocess as sp +python = sys.executable +try: + import pox + python = pox.which_python(version=True) or python +except ImportError: + pass +shell = sys.platform[:3] == 'win' + +suite = os.path.dirname(__file__) or os.path.curdir +tests = glob.glob(suite + os.path.sep + 'test_*.py') + + +if __name__ == '__main__': + + failed = 0 + for test in tests: + p = sp.Popen([python, test], shell=shell).wait() + if p: + print('F', end='', flush=True) + failed = 1 + else: + print('.', end='', flush=True) + print('') + exit(failed) diff --git a/local_packages/dill/tests/test_abc.py b/local_packages/dill/tests/test_abc.py new file mode 100644 index 0000000..70678fe --- /dev/null +++ b/local_packages/dill/tests/test_abc.py @@ -0,0 +1,169 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2023-2026 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE +""" +test dill's ability to pickle abstract base class objects +""" +import dill +import abc +from abc import ABC +import warnings + +from types import FunctionType + +dill.settings['recurse'] = True + +class OneTwoThree(ABC): + @abc.abstractmethod + def foo(self): + """A method""" + pass + + @property + @abc.abstractmethod + def bar(self): + """Property getter""" + pass + + @bar.setter + @abc.abstractmethod + def bar(self, value): + """Property setter""" + pass + + @classmethod + @abc.abstractmethod + def cfoo(cls): + """Class method""" + pass + + @staticmethod + @abc.abstractmethod + def sfoo(): + """Static method""" + pass + +class EasyAsAbc(OneTwoThree): + def __init__(self): + self._bar = None + + def foo(self): + return "Instance Method FOO" + + @property + def bar(self): + return self._bar + + @bar.setter + def bar(self, value): + self._bar = value + + @classmethod + def cfoo(cls): + return "Class Method CFOO" + + @staticmethod + def sfoo(): + return "Static Method SFOO" + +def test_abc_non_local(): + assert dill.copy(OneTwoThree) is not OneTwoThree + assert dill.copy(EasyAsAbc) is not EasyAsAbc + + with warnings.catch_warnings(): + warnings.simplefilter("ignore", dill.PicklingWarning) + assert dill.copy(OneTwoThree, byref=True) is OneTwoThree + assert dill.copy(EasyAsAbc, byref=True) is EasyAsAbc + + instance = EasyAsAbc() + # Set a property that StockPickle can't preserve + instance.bar = lambda x: x**2 + depickled = dill.copy(instance) + assert type(depickled) is type(instance) #NOTE: issue #612, test_abc_local + #NOTE: dill.copy of local (or non-local) classes should (not) be the same? + assert type(depickled.bar) is FunctionType + assert depickled.bar(3) == 9 + assert depickled.sfoo() == "Static Method SFOO" + assert depickled.cfoo() == "Class Method CFOO" + assert depickled.foo() == "Instance Method FOO" + +def test_abc_local(): + """ + Test using locally scoped ABC class + """ + class LocalABC(ABC): + @abc.abstractmethod + def foo(self): + pass + + def baz(self): + return repr(self) + + labc = dill.copy(LocalABC) + assert labc is not LocalABC + assert type(labc) is type(LocalABC) + #NOTE: dill.copy of local (or non-local) classes should (not) be the same? + # + # .LocalABC'> + + class Real(labc): + def foo(self): + return "True!" + + def baz(self): + return "My " + super(Real, self).baz() + + real = Real() + assert real.foo() == "True!" + + try: + labc() + except TypeError as e: + # Expected error + pass + else: + print('Failed to raise type error') + assert False + + labc2, pik = dill.copy((labc, Real())) + assert 'Real' == type(pik).__name__ + assert '.Real' in type(pik).__qualname__ + assert type(pik) is not Real + assert labc2 is not LocalABC + assert labc2 is not labc + assert isinstance(pik, labc2) + assert not isinstance(pik, labc) + assert not isinstance(pik, LocalABC) + assert pik.baz() == "My " + repr(pik) + +def test_meta_local_no_cache(): + """ + Test calling metaclass and cache registration + """ + LocalMetaABC = abc.ABCMeta('LocalMetaABC', (), {}) + + class ClassyClass: + pass + + class KlassyClass: + pass + + LocalMetaABC.register(ClassyClass) + + assert not issubclass(KlassyClass, LocalMetaABC) + assert issubclass(ClassyClass, LocalMetaABC) + + res = dill.dumps((LocalMetaABC, ClassyClass, KlassyClass)) + + lmabc, cc, kc = dill.loads(res) + assert type(lmabc) == type(LocalMetaABC) + assert not issubclass(kc, lmabc) + assert issubclass(cc, lmabc) + +if __name__ == '__main__': + test_abc_non_local() + test_abc_local() + test_meta_local_no_cache() diff --git a/local_packages/dill/tests/test_check.py b/local_packages/dill/tests/test_check.py new file mode 100644 index 0000000..f052f83 --- /dev/null +++ b/local_packages/dill/tests/test_check.py @@ -0,0 +1,62 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2008-2016 California Institute of Technology. +# Copyright (c) 2016-2026 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE + +from dill import check +import sys + +from dill.temp import capture + + +#FIXME: this doesn't catch output... it's from the internal call +def raise_check(func, **kwds): + try: + with capture('stdout') as out: + check(func, **kwds) + except Exception: + e = sys.exc_info()[1] + raise AssertionError(str(e)) + else: + assert 'Traceback' not in out.getvalue() + finally: + out.close() + + +f = lambda x:x**2 + + +def test_simple(verbose=None): + raise_check(f, verbose=verbose) + + +def test_recurse(verbose=None): + raise_check(f, recurse=True, verbose=verbose) + + +def test_byref(verbose=None): + raise_check(f, byref=True, verbose=verbose) + + +def test_protocol(verbose=None): + raise_check(f, protocol=True, verbose=verbose) + + +def test_python(verbose=None): + raise_check(f, python=None, verbose=verbose) + + +#TODO: test incompatible versions +#TODO: test dump failure +#TODO: test load failure + + +if __name__ == '__main__': + test_simple() + test_recurse() + test_byref() + test_protocol() + test_python() diff --git a/local_packages/dill/tests/test_classdef.py b/local_packages/dill/tests/test_classdef.py new file mode 100644 index 0000000..ec7f268 --- /dev/null +++ b/local_packages/dill/tests/test_classdef.py @@ -0,0 +1,340 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2008-2016 California Institute of Technology. +# Copyright (c) 2016-2026 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE + +import dill +from enum import EnumMeta +import sys +dill.settings['recurse'] = True + +# test classdefs +class _class: + def _method(self): + pass + def ok(self): + return True + +class _class2: + def __call__(self): + pass + def ok(self): + return True + +class _newclass(object): + def _method(self): + pass + def ok(self): + return True + +class _newclass2(object): + def __call__(self): + pass + def ok(self): + return True + +class _meta(type): + pass + +def __call__(self): + pass +def ok(self): + return True + +_mclass = _meta("_mclass", (object,), {"__call__": __call__, "ok": ok}) + +del __call__ +del ok + +o = _class() +oc = _class2() +n = _newclass() +nc = _newclass2() +m = _mclass() + +if sys.hexversion < 0x03090000: + import typing + class customIntList(typing.List[int]): + pass +else: + class customIntList(list[int]): + pass + +# test pickles for class instances +def test_class_instances(): + assert dill.pickles(o) + assert dill.pickles(oc) + assert dill.pickles(n) + assert dill.pickles(nc) + assert dill.pickles(m) + +def test_class_objects(): + clslist = [_class,_class2,_newclass,_newclass2,_mclass] + objlist = [o,oc,n,nc,m] + _clslist = [dill.dumps(obj) for obj in clslist] + _objlist = [dill.dumps(obj) for obj in objlist] + + for obj in clslist: + globals().pop(obj.__name__) + del clslist + for obj in ['o','oc','n','nc']: + globals().pop(obj) + del objlist + del obj + + for obj,cls in zip(_objlist,_clslist): + _cls = dill.loads(cls) + _obj = dill.loads(obj) + assert _obj.ok() + assert _cls.ok(_cls()) + if _cls.__name__ == "_mclass": + assert type(_cls).__name__ == "_meta" + +# test NoneType +def test_specialtypes(): + assert dill.pickles(type(None)) + assert dill.pickles(type(NotImplemented)) + assert dill.pickles(type(Ellipsis)) + assert dill.pickles(type(EnumMeta)) + +from collections import namedtuple +Z = namedtuple("Z", ['a','b']) +Zi = Z(0,1) +X = namedtuple("Y", ['a','b']) +X.__name__ = "X" +X.__qualname__ = "X" #XXX: name must 'match' or fails to pickle +Xi = X(0,1) +Bad = namedtuple("FakeName", ['a','b']) +Badi = Bad(0,1) +Defaults = namedtuple('Defaults', ['x', 'y'], defaults=[1]) +Defaultsi = Defaults(2) + +# test namedtuple +def test_namedtuple(): + assert Z is dill.loads(dill.dumps(Z)) + assert Zi == dill.loads(dill.dumps(Zi)) + assert X is dill.loads(dill.dumps(X)) + assert Xi == dill.loads(dill.dumps(Xi)) + assert Defaults is dill.loads(dill.dumps(Defaults)) + assert Defaultsi == dill.loads(dill.dumps(Defaultsi)) + assert Bad is not dill.loads(dill.dumps(Bad)) + assert Bad._fields == dill.loads(dill.dumps(Bad))._fields + assert tuple(Badi) == tuple(dill.loads(dill.dumps(Badi))) + + class A: + class B(namedtuple("C", ["one", "two"])): + '''docstring''' + B.__module__ = 'testing' + + a = A() + assert dill.copy(a) + + assert dill.copy(A.B).__name__ == 'B' + assert dill.copy(A.B).__qualname__.endswith('..A.B') + assert dill.copy(A.B).__doc__ == 'docstring' + assert dill.copy(A.B).__module__ == 'testing' + + from typing import NamedTuple + + def A(): + class B(NamedTuple): + x: int + return B + + assert type(dill.copy(A()(8))).__qualname__ == type(A()(8)).__qualname__ + +def test_dtype(): + try: + import numpy as np + + dti = np.dtype('int') + assert np.dtype == dill.copy(np.dtype) + assert dti == dill.copy(dti) + except ImportError: pass + + +def test_array_nested(): + try: + import numpy as np + + x = np.array([1]) + y = (x,) + assert y == dill.copy(y) + + except ImportError: pass + + +def test_array_subclass(): + try: + import numpy as np + + class TestArray(np.ndarray): + def __new__(cls, input_array, color): + obj = np.asarray(input_array).view(cls) + obj.color = color + return obj + def __array_finalize__(self, obj): + if obj is None: + return + if isinstance(obj, type(self)): + self.color = obj.color + def __getnewargs__(self): + return np.asarray(self), self.color + + a1 = TestArray(np.zeros(100), color='green') + if not dill._dill.IS_PYPY: + assert dill.pickles(a1) + assert a1.__dict__ == dill.copy(a1).__dict__ + + a2 = a1[0:9] + if not dill._dill.IS_PYPY: + assert dill.pickles(a2) + assert a2.__dict__ == dill.copy(a2).__dict__ + + class TestArray2(np.ndarray): + color = 'blue' + + a3 = TestArray2([1,2,3,4,5]) + a3.color = 'green' + if not dill._dill.IS_PYPY: + assert dill.pickles(a3) + assert a3.__dict__ == dill.copy(a3).__dict__ + + except ImportError: pass + + +def test_method_decorator(): + class A(object): + @classmethod + def test(cls): + pass + + a = A() + + res = dill.dumps(a) + new_obj = dill.loads(res) + new_obj.__class__.test() + +# test slots +class Y(object): + __slots__ = ('y', '__weakref__') + def __init__(self, y): + self.y = y + +value = 123 +y = Y(value) + +class Y2(object): + __slots__ = 'y' + def __init__(self, y): + self.y = y + +def test_slots(): + assert dill.pickles(Y) + assert dill.pickles(y) + assert dill.pickles(Y.y) + assert dill.copy(y).y == value + assert dill.copy(Y2(value)).y == value + +def test_origbases(): + assert dill.copy(customIntList).__orig_bases__ == customIntList.__orig_bases__ + +def test_attr(): + import attr + @attr.s + class A: + a = attr.ib() + + v = A(1) + assert dill.copy(v) == v + +def test_metaclass(): + class metaclass_with_new(type): + def __new__(mcls, name, bases, ns, **kwds): + cls = super().__new__(mcls, name, bases, ns, **kwds) + assert mcls is not None + assert cls.method(mcls) + return cls + def method(cls, mcls): + return isinstance(cls, mcls) + + l = locals() + exec("""class subclass_with_new(metaclass=metaclass_with_new): + def __new__(cls): + self = super().__new__(cls) + return self""", None, l) + subclass_with_new = l['subclass_with_new'] + + assert dill.copy(subclass_with_new()) + +def test_enummeta(): + from http import HTTPStatus + import enum + assert dill.copy(HTTPStatus.OK) is HTTPStatus.OK + assert dill.copy(enum.EnumMeta) is enum.EnumMeta + +def test_inherit(): #NOTE: see issue #612 + class Foo: + w = 0 + x = 1 + y = 1.1 + a = () + b = (1,) + n = None + + class Bar(Foo): + w = 2 + x = 1 + y = 1.1 + z = 0.2 + a = () + b = (1,) + c = (2,) + n = None + + Baz = dill.copy(Bar) + + import platform + is_pypy = platform.python_implementation() == 'PyPy' + assert Bar.__dict__ == Baz.__dict__ + # ints + assert 'w' in Bar.__dict__ and 'w' in Baz.__dict__ + assert Bar.__dict__['w'] is Baz.__dict__['w'] + assert 'x' in Bar.__dict__ and 'x' in Baz.__dict__ + assert Bar.__dict__['x'] is Baz.__dict__['x'] + # floats + assert 'y' in Bar.__dict__ and 'y' in Baz.__dict__ + same = Bar.__dict__['y'] is Baz.__dict__['y'] + assert same if is_pypy else not same + assert 'z' in Bar.__dict__ and 'z' in Baz.__dict__ + same = Bar.__dict__['z'] is Baz.__dict__['z'] + assert same if is_pypy else not same + # tuples + assert 'a' in Bar.__dict__ and 'a' in Baz.__dict__ + assert Bar.__dict__['a'] is Baz.__dict__['a'] + assert 'b' in Bar.__dict__ and 'b' in Baz.__dict__ + assert Bar.__dict__['b'] is not Baz.__dict__['b'] + assert 'c' in Bar.__dict__ and 'c' in Baz.__dict__ + assert Bar.__dict__['c'] is not Baz.__dict__['c'] + # None + assert 'n' in Bar.__dict__ and 'n' in Baz.__dict__ + assert Bar.__dict__['n'] is Baz.__dict__['n'] + + +if __name__ == '__main__': + test_class_instances() + test_class_objects() + test_specialtypes() + test_namedtuple() + test_dtype() + test_array_nested() + test_array_subclass() + test_method_decorator() + test_slots() + test_origbases() + test_metaclass() + test_enummeta() + test_inherit() diff --git a/local_packages/dill/tests/test_dataclasses.py b/local_packages/dill/tests/test_dataclasses.py new file mode 100644 index 0000000..5b2af2d --- /dev/null +++ b/local_packages/dill/tests/test_dataclasses.py @@ -0,0 +1,35 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Author: Anirudh Vegesana (avegesan@cs.stanford.edu) +# Copyright (c) 2022-2026 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE +""" +test pickling a dataclass +""" + +import dill +import dataclasses + +def test_dataclasses(): + # Issue #500 + @dataclasses.dataclass + class A: + x: int + y: str + + @dataclasses.dataclass + class B: + a: A + + a = A(1, "test") + before = B(a) + save = dill.dumps(before) + after = dill.loads(save) + assert before != after # classes don't match + assert before == B(A(**dataclasses.asdict(after.a))) + assert dataclasses.asdict(before) == dataclasses.asdict(after) + +if __name__ == '__main__': + test_dataclasses() diff --git a/local_packages/dill/tests/test_detect.py b/local_packages/dill/tests/test_detect.py new file mode 100644 index 0000000..b232b6a --- /dev/null +++ b/local_packages/dill/tests/test_detect.py @@ -0,0 +1,163 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2008-2016 California Institute of Technology. +# Copyright (c) 2016-2026 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE + +from dill.detect import baditems, badobjects, badtypes, errors, parent, at, globalvars +from dill import settings +from dill._dill import IS_PYPY +from pickle import PicklingError + +import inspect +import sys +import os + +def test_bad_things(): + f = inspect.currentframe() + assert baditems(f) == [f] + #assert baditems(globals()) == [f] #XXX + assert badobjects(f) is f + assert badtypes(f) == type(f) + assert type(errors(f)) is TypeError + d = badtypes(f, 1) + assert isinstance(d, dict) + assert list(badobjects(f, 1).keys()) == list(d.keys()) + assert list(errors(f, 1).keys()) == list(d.keys()) + s = set([(err.__class__.__name__,err.args[0]) for err in list(errors(f, 1).values())]) + a = dict(s) + if not os.environ.get('COVERAGE'): #XXX: travis-ci + proxy = 0 if type(f.f_locals) is dict else 1 + assert len(s) == len(a) + proxy # TypeError (and possibly PicklingError) + n = 2 + assert len(a) is n if 'PicklingError' in a.keys() else n-1 + +def test_parent(): + x = [4,5,6,7] + listiter = iter(x) + obj = parent(listiter, list) + assert obj is x + + if IS_PYPY: assert parent(obj, int) is None + else: assert parent(obj, int) is x[-1] # python oddly? finds last int + assert at(id(at)) is at + +a, b, c = 1, 2, 3 + +def squared(x): + return a+x**2 + +def foo(x): + def bar(y): + return squared(x)+y + return bar + +class _class: + def _method(self): + pass + def ok(self): + return True + +def test_globals(): + def f(): + a + def g(): + b + def h(): + c + assert globalvars(f) == dict(a=1, b=2, c=3) + + res = globalvars(foo, recurse=True) + assert set(res) == set(['squared', 'a']) + res = globalvars(foo, recurse=False) + assert res == {} + zap = foo(2) + res = globalvars(zap, recurse=True) + assert set(res) == set(['squared', 'a']) + res = globalvars(zap, recurse=False) + assert set(res) == set(['squared']) + del zap + res = globalvars(squared) + assert set(res) == set(['a']) + # FIXME: should find referenced __builtins__ + #res = globalvars(_class, recurse=True) + #assert set(res) == set(['True']) + #res = globalvars(_class, recurse=False) + #assert res == {} + #res = globalvars(_class.ok, recurse=True) + #assert set(res) == set(['True']) + #res = globalvars(_class.ok, recurse=False) + #assert set(res) == set(['True']) + + +#98 dill ignores __getstate__ in interactive lambdas +bar = [0] + +class Foo(object): + def __init__(self): + pass + def __getstate__(self): + bar[0] = bar[0]+1 + return {} + def __setstate__(self, data): + pass + +f = Foo() + +def test_getstate(): + from dill import dumps, loads + dumps(f) + b = bar[0] + dumps(lambda: f, recurse=False) # doesn't call __getstate__ + assert bar[0] == b + dumps(lambda: f, recurse=True) # calls __getstate__ + assert bar[0] == b + 1 + +#97 serialize lambdas in test files +def test_deleted(): + global sin + from dill import dumps, loads + from math import sin, pi + + def sinc(x): + return sin(x)/x + + settings['recurse'] = True + _sinc = dumps(sinc) + sin = globals().pop('sin') + sin = 1 + del sin + sinc_ = loads(_sinc) # no NameError... pickling preserves 'sin' + res = sinc_(1) + from math import sin + assert sinc(1) == res + + +def test_lambdify(): + try: + from sympy import symbols, lambdify + from numpy import __version__ as numversion + if numversion < '2.4.0' and sys.hexversion == 0x30f00a3: + return #NOTE: numpy Segfaults for the above combination + except ImportError: + return + settings['recurse'] = True + x = symbols("x") + y = x**2 + f = lambdify([x], y) + z = min + d = globals() + globalvars(f, recurse=True, builtin=True) + assert z is min + assert d is globals() + + +if __name__ == '__main__': + test_bad_things() + test_parent() + test_globals() + test_getstate() + test_deleted() + test_lambdify() diff --git a/local_packages/dill/tests/test_dictviews.py b/local_packages/dill/tests/test_dictviews.py new file mode 100644 index 0000000..d544415 --- /dev/null +++ b/local_packages/dill/tests/test_dictviews.py @@ -0,0 +1,39 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Author: Anirudh Vegesana (avegesan@cs.stanford.edu) +# Copyright (c) 2021-2026 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE + +import dill +from dill._dill import OLD310, MAPPING_PROXY_TRICK, DictProxyType + +def test_dictproxy(): + assert dill.copy(DictProxyType({'a': 2})) + +def test_dictviews(): + x = {'a': 1} + assert dill.copy(x.keys()) + assert dill.copy(x.values()) + assert dill.copy(x.items()) + +def test_dictproxy_trick(): + if not OLD310 and MAPPING_PROXY_TRICK: + x = {'a': 1} + all_views = (x.values(), x.items(), x.keys(), x) + seperate_views = dill.copy(all_views) + new_x = seperate_views[-1] + new_x['b'] = 2 + new_x['c'] = 1 + assert len(new_x) == 3 and len(x) == 1 + assert len(seperate_views[0]) == 3 and len(all_views[0]) == 1 + assert len(seperate_views[1]) == 3 and len(all_views[1]) == 1 + assert len(seperate_views[2]) == 3 and len(all_views[2]) == 1 + assert dict(all_views[1]) == x + assert dict(seperate_views[1]) == new_x + +if __name__ == '__main__': + test_dictproxy() + test_dictviews() + test_dictproxy_trick() diff --git a/local_packages/dill/tests/test_diff.py b/local_packages/dill/tests/test_diff.py new file mode 100644 index 0000000..bb8874c --- /dev/null +++ b/local_packages/dill/tests/test_diff.py @@ -0,0 +1,107 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2008-2016 California Institute of Technology. +# Copyright (c) 2016-2026 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE + +from dill import __diff as diff + +import sys +IS_PYPY = not hasattr(sys, 'getrefcount') + +class A: + pass + +def test_diff(): + a = A() + b = A() + c = A() + a.a = b + b.a = c + diff.memorise(a) + assert not diff.has_changed(a) + c.a = 1 + assert diff.has_changed(a) + diff.memorise(c, force=True) + assert not diff.has_changed(a) + c.a = 2 + assert diff.has_changed(a) + changed = diff.whats_changed(a) + assert list(changed[0].keys()) == ["a"] + assert not changed[1] + + a2 = [] + b2 = [a2] + c2 = [b2] + diff.memorise(c2) + assert not diff.has_changed(c2) + a2.append(1) + assert diff.has_changed(c2) + changed = diff.whats_changed(c2) + assert changed[0] == {} + assert changed[1] + + a3 = {} + b3 = {1: a3} + c3 = {1: b3} + diff.memorise(c3) + assert not diff.has_changed(c3) + a3[1] = 1 + assert diff.has_changed(c3) + changed = diff.whats_changed(c3) + assert changed[0] == {} + assert changed[1] + + if not IS_PYPY: + import abc + # make sure the "_abc_invaldation_counter" doesn't make test fail + diff.memorise(abc.ABCMeta, force=True) + assert not diff.has_changed(abc) + abc.ABCMeta.zzz = 1 + assert diff.has_changed(abc) + changed = diff.whats_changed(abc) + assert list(changed[0].keys()) == ["ABCMeta"] + assert not changed[1] + + ''' + import Queue + diff.memorise(Queue, force=True) + assert not diff.has_changed(Queue) + Queue.Queue.zzz = 1 + assert diff.has_changed(Queue) + changed = diff.whats_changed(Queue) + assert list(changed[0].keys()) == ["Queue"] + assert not changed[1] + + import math + diff.memorise(math, force=True) + assert not diff.has_changed(math) + math.zzz = 1 + assert diff.has_changed(math) + changed = diff.whats_changed(math) + assert list(changed[0].keys()) == ["zzz"] + assert not changed[1] + ''' + + a = A() + b = A() + c = A() + a.a = b + b.a = c + diff.memorise(a) + assert not diff.has_changed(a) + c.a = 1 + assert diff.has_changed(a) + diff.memorise(c, force=True) + assert not diff.has_changed(a) + del c.a + assert diff.has_changed(a) + changed = diff.whats_changed(a) + assert list(changed[0].keys()) == ["a"] + assert not changed[1] + + +if __name__ == '__main__': + test_diff() diff --git a/local_packages/dill/tests/test_extendpickle.py b/local_packages/dill/tests/test_extendpickle.py new file mode 100644 index 0000000..f86e44a --- /dev/null +++ b/local_packages/dill/tests/test_extendpickle.py @@ -0,0 +1,53 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2008-2016 California Institute of Technology. +# Copyright (c) 2016-2026 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE + +import dill as pickle +from io import BytesIO as StringIO + + +def my_fn(x): + return x * 17 + + +def test_extend(): + obj = lambda : my_fn(34) + assert obj() == 578 + + obj_io = StringIO() + pickler = pickle.Pickler(obj_io) + pickler.dump(obj) + + obj_str = obj_io.getvalue() + + obj2_io = StringIO(obj_str) + unpickler = pickle.Unpickler(obj2_io) + obj2 = unpickler.load() + + assert obj2() == 578 + + +def test_isdill(): + obj_io = StringIO() + pickler = pickle.Pickler(obj_io) + assert pickle._dill.is_dill(pickler) is True + + pickler = pickle._dill.StockPickler(obj_io) + assert pickle._dill.is_dill(pickler) is False + + try: + import multiprocess as mp + pickler = mp.reduction.ForkingPickler(obj_io) + assert pickle._dill.is_dill(pickler, child=True) is True + assert pickle._dill.is_dill(pickler, child=False) is False + except Exception: + pass + + +if __name__ == '__main__': + test_extend() + test_isdill() diff --git a/local_packages/dill/tests/test_fglobals.py b/local_packages/dill/tests/test_fglobals.py new file mode 100644 index 0000000..aa93850 --- /dev/null +++ b/local_packages/dill/tests/test_fglobals.py @@ -0,0 +1,55 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2021-2026 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE + +import dill +dill.settings['recurse'] = True + +def get_fun_with_strftime(): + def fun_with_strftime(): + import datetime + return datetime.datetime.strptime("04-01-1943", "%d-%m-%Y").strftime( + "%Y-%m-%d %H:%M:%S" + ) + return fun_with_strftime + + +def get_fun_with_strftime2(): + import datetime + return datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + + +def test_doc_dill_issue_219(): + back_fn = dill.loads(dill.dumps(get_fun_with_strftime())) + assert back_fn() == "1943-01-04 00:00:00" + dupl = dill.loads(dill.dumps(get_fun_with_strftime2)) + assert dupl() == get_fun_with_strftime2() + + +def get_fun_with_internal_import(): + def fun_with_import(): + import re + return re.compile("$") + return fun_with_import + + +def test_method_with_internal_import_should_work(): + import re + back_fn = dill.loads(dill.dumps(get_fun_with_internal_import())) + import inspect + if hasattr(inspect, 'getclosurevars'): + vars = inspect.getclosurevars(back_fn) + assert vars.globals == {} + assert vars.nonlocals == {} + assert back_fn() == re.compile("$") + assert "__builtins__" in back_fn.__globals__ + + +if __name__ == "__main__": + import sys + if (sys.version_info[:3] != (3,10,0) or sys.version_info[3] != 'alpha'): + test_doc_dill_issue_219() + test_method_with_internal_import_should_work() diff --git a/local_packages/dill/tests/test_file.py b/local_packages/dill/tests/test_file.py new file mode 100644 index 0000000..6d2b256 --- /dev/null +++ b/local_packages/dill/tests/test_file.py @@ -0,0 +1,500 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2008-2016 California Institute of Technology. +# Copyright (c) 2016-2026 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE + +import os +import sys +import string +import random + +import dill + + +dill.settings['recurse'] = True + +fname = "_test_file.txt" +rand_chars = list(string.ascii_letters) + ["\n"] * 40 # bias newline + +buffer_error = ValueError("invalid buffer size") +dne_error = FileNotFoundError("[Errno 2] No such file or directory: '%s'" % fname) + + +def write_randomness(number=200): + f = open(fname, "w") + for i in range(number): + f.write(random.choice(rand_chars)) + f.close() + f = open(fname, "r") + contents = f.read() + f.close() + return contents + + +def trunc_file(): + open(fname, "w").close() + + +def throws(op, args, exc): + try: + op(*args) + except type(exc): + return sys.exc_info()[1].args == exc.args + else: + return False + + +def teardown_module(): + if os.path.exists(fname): + os.remove(fname) + + +def bench(strictio, fmode, skippypy): + import platform + if skippypy and platform.python_implementation() == 'PyPy': + # Skip for PyPy... + return + + # file exists, with same contents + # read + + write_randomness() + + f = open(fname, "r") + _f = dill.loads(dill.dumps(f, fmode=fmode))#, strictio=strictio)) + assert _f.mode == f.mode + assert _f.tell() == f.tell() + assert _f.read() == f.read() + f.close() + _f.close() + + # write + + f = open(fname, "w") + f.write("hello") + f_dumped = dill.dumps(f, fmode=fmode)#, strictio=strictio) + f1mode = f.mode + ftell = f.tell() + f.close() + f2 = dill.loads(f_dumped) #FIXME: fails due to pypy/issues/1233 + # TypeError: expected py_object instance instead of str + f2mode = f2.mode + f2tell = f2.tell() + f2name = f2.name + f2.write(" world!") + f2.close() + + if fmode == dill.HANDLE_FMODE: + assert open(fname).read() == " world!" + assert f2mode == f1mode + assert f2tell == 0 + elif fmode == dill.CONTENTS_FMODE: + assert open(fname).read() == "hello world!" + assert f2mode == f1mode + assert f2tell == ftell + assert f2name == fname + elif fmode == dill.FILE_FMODE: + assert open(fname).read() == "hello world!" + assert f2mode == f1mode + assert f2tell == ftell + else: + raise RuntimeError("Unknown file mode '%s'" % fmode) + + # append + + trunc_file() + + f = open(fname, "a") + f.write("hello") + f_dumped = dill.dumps(f, fmode=fmode)#, strictio=strictio) + f1mode = f.mode + ftell = f.tell() + f.close() + f2 = dill.loads(f_dumped) + f2mode = f2.mode + f2tell = f2.tell() + f2.write(" world!") + f2.close() + + assert f2mode == f1mode + if fmode == dill.CONTENTS_FMODE: + assert open(fname).read() == "hello world!" + assert f2tell == ftell + elif fmode == dill.HANDLE_FMODE: + assert open(fname).read() == "hello world!" + assert f2tell == ftell + elif fmode == dill.FILE_FMODE: + assert open(fname).read() == "hello world!" + assert f2tell == ftell + else: + raise RuntimeError("Unknown file mode '%s'" % fmode) + + # file exists, with different contents (smaller size) + # read + + write_randomness() + + f = open(fname, "r") + fstr = f.read() + f_dumped = dill.dumps(f, fmode=fmode)#, strictio=strictio) + f1mode = f.mode + ftell = f.tell() + f.close() + _flen = 150 + _fstr = write_randomness(number=_flen) + + if strictio: # throw error if ftell > EOF + assert throws(dill.loads, (f_dumped,), buffer_error) + else: + f2 = dill.loads(f_dumped) + assert f2.mode == f1mode + if fmode == dill.CONTENTS_FMODE: + assert f2.tell() == _flen + assert f2.read() == "" + f2.seek(0) + assert f2.read() == _fstr + assert f2.tell() == _flen # 150 + elif fmode == dill.HANDLE_FMODE: + assert f2.tell() == 0 + assert f2.read() == _fstr + assert f2.tell() == _flen # 150 + elif fmode == dill.FILE_FMODE: + assert f2.tell() == ftell # 200 + assert f2.read() == "" + f2.seek(0) + assert f2.read() == fstr + assert f2.tell() == ftell # 200 + else: + raise RuntimeError("Unknown file mode '%s'" % fmode) + f2.close() + + # write + + write_randomness() + + f = open(fname, "w") + f.write("hello") + f_dumped = dill.dumps(f, fmode=fmode)#, strictio=strictio) + f1mode = f.mode + ftell = f.tell() + f.close() + fstr = open(fname).read() + + f = open(fname, "w") + f.write("h") + _ftell = f.tell() + f.close() + + if strictio: # throw error if ftell > EOF + assert throws(dill.loads, (f_dumped,), buffer_error) + else: + f2 = dill.loads(f_dumped) + f2mode = f2.mode + f2tell = f2.tell() + f2.write(" world!") + f2.close() + if fmode == dill.CONTENTS_FMODE: + assert open(fname).read() == "h world!" + assert f2mode == f1mode + assert f2tell == _ftell + elif fmode == dill.HANDLE_FMODE: + assert open(fname).read() == " world!" + assert f2mode == f1mode + assert f2tell == 0 + elif fmode == dill.FILE_FMODE: + assert open(fname).read() == "hello world!" + assert f2mode == f1mode + assert f2tell == ftell + else: + raise RuntimeError("Unknown file mode '%s'" % fmode) + f2.close() + + # append + + trunc_file() + + f = open(fname, "a") + f.write("hello") + f_dumped = dill.dumps(f, fmode=fmode)#, strictio=strictio) + f1mode = f.mode + ftell = f.tell() + f.close() + fstr = open(fname).read() + + f = open(fname, "w") + f.write("h") + _ftell = f.tell() + f.close() + + if strictio: # throw error if ftell > EOF + assert throws(dill.loads, (f_dumped,), buffer_error) + else: + f2 = dill.loads(f_dumped) + f2mode = f2.mode + f2tell = f2.tell() + f2.write(" world!") + f2.close() + assert f2mode == f1mode + if fmode == dill.CONTENTS_FMODE: + # position of writes cannot be changed on some OSs + assert open(fname).read() == "h world!" + assert f2tell == _ftell + elif fmode == dill.HANDLE_FMODE: + assert open(fname).read() == "h world!" + assert f2tell == _ftell + elif fmode == dill.FILE_FMODE: + assert open(fname).read() == "hello world!" + assert f2tell == ftell + else: + raise RuntimeError("Unknown file mode '%s'" % fmode) + f2.close() + + # file does not exist + # read + + write_randomness() + + f = open(fname, "r") + fstr = f.read() + f_dumped = dill.dumps(f, fmode=fmode)#, strictio=strictio) + f1mode = f.mode + ftell = f.tell() + f.close() + + os.remove(fname) + + if strictio: # throw error if file DNE + assert throws(dill.loads, (f_dumped,), dne_error) + else: + f2 = dill.loads(f_dumped) + assert f2.mode == f1mode + if fmode == dill.CONTENTS_FMODE: + # FIXME: this fails on systems where f2.tell() always returns 0 + # assert f2.tell() == ftell # 200 + assert f2.read() == "" + f2.seek(0) + assert f2.read() == "" + assert f2.tell() == 0 + elif fmode == dill.FILE_FMODE: + assert f2.tell() == ftell # 200 + assert f2.read() == "" + f2.seek(0) + assert f2.read() == fstr + assert f2.tell() == ftell # 200 + elif fmode == dill.HANDLE_FMODE: + assert f2.tell() == 0 + assert f2.read() == "" + assert f2.tell() == 0 + else: + raise RuntimeError("Unknown file mode '%s'" % fmode) + f2.close() + + # write + + write_randomness() + + f = open(fname, "w+") + f.write("hello") + f_dumped = dill.dumps(f, fmode=fmode)#, strictio=strictio) + ftell = f.tell() + f1mode = f.mode + f.close() + + os.remove(fname) + + if strictio: # throw error if file DNE + assert throws(dill.loads, (f_dumped,), dne_error) + else: + f2 = dill.loads(f_dumped) + f2mode = f2.mode + f2tell = f2.tell() + f2.write(" world!") + f2.close() + if fmode == dill.CONTENTS_FMODE: + assert open(fname).read() == " world!" + assert f2mode == 'w+' + assert f2tell == 0 + elif fmode == dill.HANDLE_FMODE: + assert open(fname).read() == " world!" + assert f2mode == f1mode + assert f2tell == 0 + elif fmode == dill.FILE_FMODE: + assert open(fname).read() == "hello world!" + assert f2mode == f1mode + assert f2tell == ftell + else: + raise RuntimeError("Unknown file mode '%s'" % fmode) + + # append + + trunc_file() + + f = open(fname, "a") + f.write("hello") + f_dumped = dill.dumps(f, fmode=fmode)#, strictio=strictio) + ftell = f.tell() + f1mode = f.mode + f.close() + + os.remove(fname) + + if strictio: # throw error if file DNE + assert throws(dill.loads, (f_dumped,), dne_error) + else: + f2 = dill.loads(f_dumped) + f2mode = f2.mode + f2tell = f2.tell() + f2.write(" world!") + f2.close() + assert f2mode == f1mode + if fmode == dill.CONTENTS_FMODE: + assert open(fname).read() == " world!" + assert f2tell == 0 + elif fmode == dill.HANDLE_FMODE: + assert open(fname).read() == " world!" + assert f2tell == 0 + elif fmode == dill.FILE_FMODE: + assert open(fname).read() == "hello world!" + assert f2tell == ftell + else: + raise RuntimeError("Unknown file mode '%s'" % fmode) + + # file exists, with different contents (larger size) + # read + + write_randomness() + + f = open(fname, "r") + fstr = f.read() + f_dumped = dill.dumps(f, fmode=fmode)#, strictio=strictio) + f1mode = f.mode + ftell = f.tell() + f.close() + _flen = 250 + _fstr = write_randomness(number=_flen) + + # XXX: no safe_file: no way to be 'safe'? + + f2 = dill.loads(f_dumped) + assert f2.mode == f1mode + if fmode == dill.CONTENTS_FMODE: + assert f2.tell() == ftell # 200 + assert f2.read() == _fstr[ftell:] + f2.seek(0) + assert f2.read() == _fstr + assert f2.tell() == _flen # 250 + elif fmode == dill.HANDLE_FMODE: + assert f2.tell() == 0 + assert f2.read() == _fstr + assert f2.tell() == _flen # 250 + elif fmode == dill.FILE_FMODE: + assert f2.tell() == ftell # 200 + assert f2.read() == "" + f2.seek(0) + assert f2.read() == fstr + assert f2.tell() == ftell # 200 + else: + raise RuntimeError("Unknown file mode '%s'" % fmode) + f2.close() # XXX: other alternatives? + + # write + + f = open(fname, "w") + f.write("hello") + f_dumped = dill.dumps(f, fmode=fmode)#, strictio=strictio) + f1mode = f.mode + ftell = f.tell() + + fstr = open(fname).read() + + f.write(" and goodbye!") + _ftell = f.tell() + f.close() + + # XXX: no safe_file: no way to be 'safe'? + + f2 = dill.loads(f_dumped) + f2mode = f2.mode + f2tell = f2.tell() + f2.write(" world!") + f2.close() + if fmode == dill.CONTENTS_FMODE: + assert open(fname).read() == "hello world!odbye!" + assert f2mode == f1mode + assert f2tell == ftell + elif fmode == dill.HANDLE_FMODE: + assert open(fname).read() == " world!" + assert f2mode == f1mode + assert f2tell == 0 + elif fmode == dill.FILE_FMODE: + assert open(fname).read() == "hello world!" + assert f2mode == f1mode + assert f2tell == ftell + else: + raise RuntimeError("Unknown file mode '%s'" % fmode) + f2.close() + + # append + + trunc_file() + + f = open(fname, "a") + f.write("hello") + f_dumped = dill.dumps(f, fmode=fmode)#, strictio=strictio) + f1mode = f.mode + ftell = f.tell() + fstr = open(fname).read() + + f.write(" and goodbye!") + _ftell = f.tell() + f.close() + + # XXX: no safe_file: no way to be 'safe'? + + f2 = dill.loads(f_dumped) + f2mode = f2.mode + f2tell = f2.tell() + f2.write(" world!") + f2.close() + assert f2mode == f1mode + if fmode == dill.CONTENTS_FMODE: + assert open(fname).read() == "hello and goodbye! world!" + assert f2tell == ftell + elif fmode == dill.HANDLE_FMODE: + assert open(fname).read() == "hello and goodbye! world!" + assert f2tell == _ftell + elif fmode == dill.FILE_FMODE: + assert open(fname).read() == "hello world!" + assert f2tell == ftell + else: + raise RuntimeError("Unknown file mode '%s'" % fmode) + f2.close() + + +def test_nostrictio_handlefmode(): + bench(False, dill.HANDLE_FMODE, False) + teardown_module() + + +def test_nostrictio_filefmode(): + bench(False, dill.FILE_FMODE, False) + teardown_module() + + +def test_nostrictio_contentsfmode(): + bench(False, dill.CONTENTS_FMODE, True) + teardown_module() + + +#bench(True, dill.HANDLE_FMODE, False) +#bench(True, dill.FILE_FMODE, False) +#bench(True, dill.CONTENTS_FMODE, True) + + +if __name__ == '__main__': + test_nostrictio_handlefmode() + test_nostrictio_filefmode() + test_nostrictio_contentsfmode() diff --git a/local_packages/dill/tests/test_functions.py b/local_packages/dill/tests/test_functions.py new file mode 100644 index 0000000..f3db1a4 --- /dev/null +++ b/local_packages/dill/tests/test_functions.py @@ -0,0 +1,141 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2019-2026 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE + +import functools +import dill +import sys +dill.settings['recurse'] = True + + +def function_a(a): + return a + + +def function_b(b, b1): + return b + b1 + + +def function_c(c, c1=1): + return c + c1 + + +def function_d(d, d1, d2=1): + """doc string""" + return d + d1 + d2 + +function_d.__module__ = 'a module' + + +exec(''' +def function_e(e, *e1, e2=1, e3=2): + return e + sum(e1) + e2 + e3''') + +globalvar = 0 + +@functools.lru_cache(None) +def function_with_cache(x): + global globalvar + globalvar += x + return globalvar + + +def function_with_unassigned_variable(): + if False: + value = None + return (lambda: value) + + +def test_issue_510(): + # A very bizzare use of functions and methods that pickle doesn't get + # correctly for odd reasons. + class Foo: + def __init__(self): + def f2(self): + return self + self.f2 = f2.__get__(self) + + import dill, pickletools + f = Foo() + f1 = dill.copy(f) + assert f1.f2() is f1 + + +def test_functions(): + dumped_func_a = dill.dumps(function_a) + assert dill.loads(dumped_func_a)(0) == 0 + + dumped_func_b = dill.dumps(function_b) + assert dill.loads(dumped_func_b)(1,2) == 3 + + dumped_func_c = dill.dumps(function_c) + assert dill.loads(dumped_func_c)(1) == 2 + assert dill.loads(dumped_func_c)(1, 2) == 3 + + dumped_func_d = dill.dumps(function_d) + assert dill.loads(dumped_func_d).__doc__ == function_d.__doc__ + assert dill.loads(dumped_func_d).__module__ == function_d.__module__ + assert dill.loads(dumped_func_d)(1, 2) == 4 + assert dill.loads(dumped_func_d)(1, 2, 3) == 6 + assert dill.loads(dumped_func_d)(1, 2, d2=3) == 6 + + function_with_cache(1) + globalvar = 0 + dumped_func_cache = dill.dumps(function_with_cache) + assert function_with_cache(2) == 3 + assert function_with_cache(1) == 1 + assert function_with_cache(3) == 6 + assert function_with_cache(2) == 3 + + empty_cell = function_with_unassigned_variable() + cell_copy = dill.loads(dill.dumps(empty_cell)) + assert 'empty' in str(cell_copy.__closure__[0]) + try: + cell_copy() + except Exception: + # this is good + pass + else: + raise AssertionError('cell_copy() did not read an empty cell') + + exec(''' +dumped_func_e = dill.dumps(function_e) +assert dill.loads(dumped_func_e)(1, 2) == 6 +assert dill.loads(dumped_func_e)(1, 2, 3) == 9 +assert dill.loads(dumped_func_e)(1, 2, e2=3) == 8 +assert dill.loads(dumped_func_e)(1, 2, e2=3, e3=4) == 10 +assert dill.loads(dumped_func_e)(1, 2, 3, e2=4) == 12 +assert dill.loads(dumped_func_e)(1, 2, 3, e2=4, e3=5) == 15''') + +def test_code_object(): + import warnings + from dill._dill import ALL_CODE_PARAMS, CODE_PARAMS, CODE_VERSION, _create_code + code = function_c.__code__ + warnings.filterwarnings('ignore', category=DeprecationWarning) # issue 597 + LNOTAB = getattr(code, 'co_lnotab', b'') + if warnings.filters: del warnings.filters[0] + fields = {f: getattr(code, 'co_'+f) for f in CODE_PARAMS} + fields.setdefault('posonlyargcount', 0) # python >= 3.8 + fields.setdefault('lnotab', LNOTAB) # python <= 3.9 + fields.setdefault('linetable', b'') # python >= 3.10 + fields.setdefault('qualname', fields['name']) # python >= 3.11 + fields.setdefault('exceptiontable', b'') # python >= 3.11 + fields.setdefault('endlinetable', None) # python == 3.11a + fields.setdefault('columntable', None) # python == 3.11a + + for version, _, params in ALL_CODE_PARAMS: + args = tuple(fields[p] for p in params.split()) + try: + _create_code(*args) + if version >= (3,10): + _create_code(fields['lnotab'], *args) + except Exception as error: + raise Exception("failed to construct code object with format version {}".format(version)) from error + +if __name__ == '__main__': + test_functions() + test_issue_510() + test_code_object() diff --git a/local_packages/dill/tests/test_functors.py b/local_packages/dill/tests/test_functors.py new file mode 100644 index 0000000..882d24d --- /dev/null +++ b/local_packages/dill/tests/test_functors.py @@ -0,0 +1,39 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2008-2016 California Institute of Technology. +# Copyright (c) 2016-2026 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE + +import functools +import dill +dill.settings['recurse'] = True + + +def f(a, b, c): # without keywords + pass + + +def g(a, b, c=2): # with keywords + pass + + +def h(a=1, b=2, c=3): # without args + pass + + +def test_functools(): + fp = functools.partial(f, 1, 2) + gp = functools.partial(g, 1, c=2) + hp = functools.partial(h, 1, c=2) + bp = functools.partial(int, base=2) + + assert dill.pickles(fp, safe=True) + assert dill.pickles(gp, safe=True) + assert dill.pickles(hp, safe=True) + assert dill.pickles(bp, safe=True) + + +if __name__ == '__main__': + test_functools() diff --git a/local_packages/dill/tests/test_logger.py b/local_packages/dill/tests/test_logger.py new file mode 100644 index 0000000..ec1d5e6 --- /dev/null +++ b/local_packages/dill/tests/test_logger.py @@ -0,0 +1,70 @@ +#!/usr/bin/env python + +# Author: Leonardo Gama (@leogama) +# Copyright (c) 2022-2026 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE + +import logging +import re +import tempfile + +import dill +from dill import detect +from dill.logger import stderr_handler, adapter as logger + +try: + from StringIO import StringIO +except ImportError: + from io import StringIO + +test_obj = {'a': (1, 2), 'b': object(), 'f': lambda x: x**2, 'big': list(range(10))} + +def test_logging(should_trace): + buffer = StringIO() + handler = logging.StreamHandler(buffer) + logger.addHandler(handler) + try: + dill.dumps(test_obj) + if should_trace: + regex = re.compile(r'(\S*┬ \w.*[^)]' # begin pickling object + r'|│*└ # \w.* \[\d+ (\wi)?B])' # object written (with size) + ) + for line in buffer.getvalue().splitlines(): + assert regex.fullmatch(line) + return buffer.getvalue() + else: + assert buffer.getvalue() == "" + finally: + logger.removeHandler(handler) + buffer.close() + +def test_trace_to_file(stream_trace): + file = tempfile.NamedTemporaryFile(mode='r') + with detect.trace(file.name, mode='w'): + dill.dumps(test_obj) + file_trace = file.read() + file.close() + # Apparently, objects can change location in memory... + reghex = re.compile(r'0x[0-9A-Za-z]+') + file_trace, stream_trace = reghex.sub('0x', file_trace), reghex.sub('0x', stream_trace) + # PyPy prints dictionary contents with repr(dict)... + regdict = re.compile(r'(dict\.__repr__ of ).*') + file_trace, stream_trace = regdict.sub(r'\1{}>', file_trace), regdict.sub(r'\1{}>', stream_trace) + assert file_trace == stream_trace + +if __name__ == '__main__': + logger.removeHandler(stderr_handler) + test_logging(should_trace=False) + detect.trace(True) + test_logging(should_trace=True) + detect.trace(False) + test_logging(should_trace=False) + + loglevel = logging.ERROR + logger.setLevel(loglevel) + with detect.trace(): + stream_trace = test_logging(should_trace=True) + test_logging(should_trace=False) + assert logger.getEffectiveLevel() == loglevel + test_trace_to_file(stream_trace) diff --git a/local_packages/dill/tests/test_mixins.py b/local_packages/dill/tests/test_mixins.py new file mode 100644 index 0000000..447811d --- /dev/null +++ b/local_packages/dill/tests/test_mixins.py @@ -0,0 +1,121 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2008-2016 California Institute of Technology. +# Copyright (c) 2016-2026 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE + +import dill +dill.settings['recurse'] = True + + +def wtf(x,y,z): + def zzz(): + return x + def yyy(): + return y + def xxx(): + return z + return zzz,yyy + + +def quad(a=1, b=1, c=0): + inverted = [False] + def invert(): + inverted[0] = not inverted[0] + def dec(f): + def func(*args, **kwds): + x = f(*args, **kwds) + if inverted[0]: x = -x + return a*x**2 + b*x + c + func.__wrapped__ = f + func.invert = invert + func.inverted = inverted + return func + return dec + + +@quad(a=0,b=2) +def double_add(*args): + return sum(args) + + +fx = sum([1,2,3]) + + +### to make it interesting... +def quad_factory(a=1,b=1,c=0): + def dec(f): + def func(*args,**kwds): + fx = f(*args,**kwds) + return a*fx**2 + b*fx + c + return func + return dec + + +@quad_factory(a=0,b=4,c=0) +def quadish(x): + return x+1 + + +quadratic = quad_factory() + + +def doubler(f): + def inner(*args, **kwds): + fx = f(*args, **kwds) + return 2*fx + return inner + + +@doubler +def quadruple(x): + return 2*x + + +def test_mixins(): + # test mixins + assert double_add(1,2,3) == 2*fx + double_add.invert() + assert double_add(1,2,3) == -2*fx + + _d = dill.copy(double_add) + assert _d(1,2,3) == -2*fx + #_d.invert() #FIXME: fails seemingly randomly + #assert _d(1,2,3) == 2*fx + + assert _d.__wrapped__(1,2,3) == fx + + # XXX: issue or feature? in python3.4, inverted is linked through copy + if not double_add.inverted[0]: + double_add.invert() + + # test some stuff from source and pointers + ds = dill.source + dd = dill.detect + assert ds.getsource(dd.freevars(quadish)['f']) == '@quad_factory(a=0,b=4,c=0)\ndef quadish(x):\n return x+1\n' + assert ds.getsource(dd.freevars(quadruple)['f']) == '@doubler\ndef quadruple(x):\n return 2*x\n' + assert ds.importable(quadish, source=False) == 'from %s import quadish\n' % __name__ + assert ds.importable(quadruple, source=False) == 'from %s import quadruple\n' % __name__ + assert ds.importable(quadratic, source=False) == 'from %s import quadratic\n' % __name__ + assert ds.importable(double_add, source=False) == 'from %s import double_add\n' % __name__ + assert ds.importable(quadruple, source=True) == 'def doubler(f):\n def inner(*args, **kwds):\n fx = f(*args, **kwds)\n return 2*fx\n return inner\n\n@doubler\ndef quadruple(x):\n return 2*x\n' + #***** #FIXME: this needs work + result = ds.importable(quadish, source=True) + a,b,c,_,result = result.split('\n',4) + assert result == 'def quad_factory(a=1,b=1,c=0):\n def dec(f):\n def func(*args,**kwds):\n fx = f(*args,**kwds)\n return a*fx**2 + b*fx + c\n return func\n return dec\n\n@quad_factory(a=0,b=4,c=0)\ndef quadish(x):\n return x+1\n' + assert set([a,b,c]) == set(['a = 0', 'c = 0', 'b = 4']) + result = ds.importable(quadratic, source=True) + a,b,c,result = result.split('\n',3) + assert result == '\ndef dec(f):\n def func(*args,**kwds):\n fx = f(*args,**kwds)\n return a*fx**2 + b*fx + c\n return func\n' + assert set([a,b,c]) == set(['a = 1', 'c = 0', 'b = 1']) + result = ds.importable(double_add, source=True) + a,b,c,d,_,result = result.split('\n',5) + assert result == 'def quad(a=1, b=1, c=0):\n inverted = [False]\n def invert():\n inverted[0] = not inverted[0]\n def dec(f):\n def func(*args, **kwds):\n x = f(*args, **kwds)\n if inverted[0]: x = -x\n return a*x**2 + b*x + c\n func.__wrapped__ = f\n func.invert = invert\n func.inverted = inverted\n return func\n return dec\n\n@quad(a=0,b=2)\ndef double_add(*args):\n return sum(args)\n' + assert set([a,b,c,d]) == set(['a = 0', 'c = 0', 'b = 2', 'inverted = [True]']) + #***** + + +if __name__ == '__main__': + test_mixins() diff --git a/local_packages/dill/tests/test_module.py b/local_packages/dill/tests/test_module.py new file mode 100644 index 0000000..bc0e18e --- /dev/null +++ b/local_packages/dill/tests/test_module.py @@ -0,0 +1,84 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2008-2016 California Institute of Technology. +# Copyright (c) 2016-2026 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE + +import sys +import dill +import test_mixins as module +from importlib import reload +dill.settings['recurse'] = True + +cached = (module.__cached__ if hasattr(module, "__cached__") + else module.__file__.split(".", 1)[0] + ".pyc") + +module.a = 1234 + +pik_mod = dill.dumps(module) + +module.a = 0 + +# remove module +del sys.modules[module.__name__] +del module + +module = dill.loads(pik_mod) +def test_attributes(): + #assert hasattr(module, "a") and module.a == 1234 #FIXME: -m dill.tests + assert module.double_add(1, 2, 3) == 2 * module.fx + +# Restart, and test use_diff + +reload(module) + +try: + dill.use_diff() + + module.a = 1234 + + pik_mod = dill.dumps(module) + + module.a = 0 + + # remove module + del sys.modules[module.__name__] + del module + + module = dill.loads(pik_mod) + def test_diff_attributes(): + assert hasattr(module, "a") and module.a == 1234 + assert module.double_add(1, 2, 3) == 2 * module.fx + +except AttributeError: + def test_diff_attributes(): + pass + +# clean up +import os +if os.path.exists(cached): + os.remove(cached) +pycache = os.path.join(os.path.dirname(module.__file__), "__pycache__") +if os.path.exists(pycache) and not os.listdir(pycache): + os.removedirs(pycache) + + +# test when module is None +import math + +def get_lambda(str, **kwarg): + return eval(str, kwarg, None) + +obj = get_lambda('lambda x: math.exp(x)', math=math) + +def test_module_is_none(): + assert obj.__module__ is None + assert dill.copy(obj)(3) == obj(3) + + +if __name__ == '__main__': + test_attributes() + test_diff_attributes() + test_module_is_none() diff --git a/local_packages/dill/tests/test_moduledict.py b/local_packages/dill/tests/test_moduledict.py new file mode 100644 index 0000000..7c93921 --- /dev/null +++ b/local_packages/dill/tests/test_moduledict.py @@ -0,0 +1,54 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2008-2016 California Institute of Technology. +# Copyright (c) 2016-2026 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE + +import dill +dill.settings['recurse'] = True + +def f(func): + def w(*args): + return f(*args) + return w + +@f +def f2(): pass + +# check when __main__ and on import +def test_decorated(): + assert dill.pickles(f2) + + +import doctest +import logging +logging.basicConfig(level=logging.DEBUG) + +class SomeUnreferencedUnpicklableClass(object): + def __reduce__(self): + raise Exception + +unpicklable = SomeUnreferencedUnpicklableClass() + +# This works fine outside of Doctest: +def test_normal(): + serialized = dill.dumps(lambda x: x) + +# should not try to pickle unpicklable object in __globals__ +def tests(): + """ + >>> serialized = dill.dumps(lambda x: x) + """ + return + +#print("\n\nRunning Doctest:") +def test_doctest(): + doctest.testmod() + + +if __name__ == '__main__': + test_decorated() + test_normal() + test_doctest() diff --git a/local_packages/dill/tests/test_nested.py b/local_packages/dill/tests/test_nested.py new file mode 100644 index 0000000..0e863ab --- /dev/null +++ b/local_packages/dill/tests/test_nested.py @@ -0,0 +1,135 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2008-2016 California Institute of Technology. +# Copyright (c) 2016-2026 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE +""" +test dill's ability to handle nested functions +""" + +import os +import math + +import dill as pickle +pickle.settings['recurse'] = True + + +# the nested function: pickle should fail here, but dill is ok. +def adder(augend): + zero = [0] + + def inner(addend): + return addend + augend + zero[0] + return inner + + +# rewrite the nested function using a class: standard pickle should work here. +class cadder(object): + def __init__(self, augend): + self.augend = augend + self.zero = [0] + + def __call__(self, addend): + return addend + self.augend + self.zero[0] + + +# rewrite again, but as an old-style class +class c2adder: + def __init__(self, augend): + self.augend = augend + self.zero = [0] + + def __call__(self, addend): + return addend + self.augend + self.zero[0] + + +# some basic class stuff +class basic(object): + pass + + +class basic2: + pass + + +x = 5 +y = 1 + + +def test_basic(): + a = [0, 1, 2] + pa = pickle.dumps(a) + pmath = pickle.dumps(math) #XXX: FAILS in pickle + pmap = pickle.dumps(map) + # ... + la = pickle.loads(pa) + lmath = pickle.loads(pmath) + lmap = pickle.loads(pmap) + assert list(map(math.sin, a)) == list(lmap(lmath.sin, la)) + + +def test_basic_class(): + pbasic2 = pickle.dumps(basic2) + _pbasic2 = pickle.loads(pbasic2)() + pbasic = pickle.dumps(basic) + _pbasic = pickle.loads(pbasic)() + + +def test_c2adder(): + pc2adder = pickle.dumps(c2adder) + pc2add5 = pickle.loads(pc2adder)(x) + assert pc2add5(y) == x+y + + +def test_pickled_cadder(): + pcadder = pickle.dumps(cadder) + pcadd5 = pickle.loads(pcadder)(x) + assert pcadd5(y) == x+y + + +def test_raw_adder_and_inner(): + add5 = adder(x) + assert add5(y) == x+y + + +def test_pickled_adder(): + padder = pickle.dumps(adder) + padd5 = pickle.loads(padder)(x) + assert padd5(y) == x+y + + +def test_pickled_inner(): + add5 = adder(x) + pinner = pickle.dumps(add5) #XXX: FAILS in pickle + p5add = pickle.loads(pinner) + assert p5add(y) == x+y + + +def test_moduledict_where_not_main(): + try: + from . import test_moduledict + except ImportError: + import test_moduledict + name = 'test_moduledict.py' + if os.path.exists(name) and os.path.exists(name+'c'): + os.remove(name+'c') + + if os.path.exists(name) and hasattr(test_moduledict, "__cached__") \ + and os.path.exists(test_moduledict.__cached__): + os.remove(getattr(test_moduledict, "__cached__")) + + if os.path.exists("__pycache__") and not os.listdir("__pycache__"): + os.removedirs("__pycache__") + + +if __name__ == '__main__': + test_basic() + test_basic_class() + test_c2adder() + test_pickled_cadder() + test_raw_adder_and_inner() + test_pickled_adder() + test_pickled_inner() + test_moduledict_where_not_main() diff --git a/local_packages/dill/tests/test_objects.py b/local_packages/dill/tests/test_objects.py new file mode 100644 index 0000000..0828cf9 --- /dev/null +++ b/local_packages/dill/tests/test_objects.py @@ -0,0 +1,63 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2008-2016 California Institute of Technology. +# Copyright (c) 2016-2026 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE +""" +demonstrate dill's ability to pickle different python types +test pickling of all Python Standard Library objects (currently: CH 1-14 @ 2.7) +""" + +import dill as pickle +pickle.settings['recurse'] = True +#pickle.detect.trace(True) +#import pickle + +# get all objects for testing +from dill import load_types, objects, extend +load_types(pickleable=True,unpickleable=False) + +# uncomment the next two lines to test cloudpickle +#extend(False) +#import cloudpickle as pickle + +# helper objects +class _class: + def _method(self): + pass + +# objects that *fail* if imported +special = {} +special['LambdaType'] = _lambda = lambda x: lambda y: x +special['MethodType'] = _method = _class()._method +special['UnboundMethodType'] = _class._method +objects.update(special) + +def pickles(name, exact=False, verbose=True): + """quick check if object pickles with dill""" + obj = objects[name] + try: + pik = pickle.loads(pickle.dumps(obj)) + if exact: + try: + assert pik == obj + except AssertionError: + assert type(obj) == type(pik) + if verbose: print ("weak: %s %s" % (name, type(obj))) + else: + assert type(obj) == type(pik) + except Exception: + if verbose: print ("fails: %s %s" % (name, type(obj))) + + +def test_objects(verbose=True): + for member in objects.keys(): + #pickles(member, exact=True, verbose=verbose) + pickles(member, exact=False, verbose=verbose) + +if __name__ == '__main__': + import warnings + warnings.simplefilter('ignore') + test_objects(verbose=False) diff --git a/local_packages/dill/tests/test_properties.py b/local_packages/dill/tests/test_properties.py new file mode 100644 index 0000000..2b55b39 --- /dev/null +++ b/local_packages/dill/tests/test_properties.py @@ -0,0 +1,62 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2008-2016 California Institute of Technology. +# Copyright (c) 2016-2026 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE + +import sys + +import dill +dill.settings['recurse'] = True + + +class Foo(object): + def __init__(self): + self._data = 1 + + def _get_data(self): + return self._data + + def _set_data(self, x): + self._data = x + + data = property(_get_data, _set_data) + + +def test_data_not_none(): + FooS = dill.copy(Foo) + assert FooS.data.fget is not None + assert FooS.data.fset is not None + assert FooS.data.fdel is None + + +def test_data_unchanged(): + FooS = dill.copy(Foo) + try: + res = FooS().data + except Exception: + e = sys.exc_info()[1] + raise AssertionError(str(e)) + else: + assert res == 1 + + +def test_data_changed(): + FooS = dill.copy(Foo) + try: + f = FooS() + f.data = 1024 + res = f.data + except Exception: + e = sys.exc_info()[1] + raise AssertionError(str(e)) + else: + assert res == 1024 + + +if __name__ == '__main__': + test_data_not_none() + test_data_unchanged() + test_data_changed() diff --git a/local_packages/dill/tests/test_pycapsule.py b/local_packages/dill/tests/test_pycapsule.py new file mode 100644 index 0000000..735801a --- /dev/null +++ b/local_packages/dill/tests/test_pycapsule.py @@ -0,0 +1,45 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Author: Anirudh Vegesana (avegesan@cs.stanford.edu) +# Copyright (c) 2022-2026 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE +""" +test pickling a PyCapsule object +""" + +import dill +import warnings + +test_pycapsule = None + +if dill._dill._testcapsule is not None: + import ctypes + def test_pycapsule(): + name = ctypes.create_string_buffer(b'dill._testcapsule') + capsule = dill._dill._PyCapsule_New( + ctypes.cast(dill._dill._PyCapsule_New, ctypes.c_void_p), + name, + None + ) + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + dill.copy(capsule) + dill._testcapsule = capsule + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + dill.copy(capsule) + dill._testcapsule = None + try: + with warnings.catch_warnings(): + warnings.simplefilter("ignore", dill.PicklingWarning) + dill.copy(capsule) + except dill.UnpicklingError: + pass + else: + raise AssertionError("Expected a different error") + +if __name__ == '__main__': + if test_pycapsule is not None: + test_pycapsule() diff --git a/local_packages/dill/tests/test_recursive.py b/local_packages/dill/tests/test_recursive.py new file mode 100644 index 0000000..5237f68 --- /dev/null +++ b/local_packages/dill/tests/test_recursive.py @@ -0,0 +1,177 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2019-2026 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE + +import dill +from functools import partial +import warnings + + +def copy(obj, byref=False, recurse=False): + if byref: + try: + return dill.copy(obj, byref=byref, recurse=recurse) + except Exception: + pass + else: + raise AssertionError('Copy of %s with byref=True should have given a warning!' % (obj,)) + + warnings.simplefilter('ignore') + val = dill.copy(obj, byref=byref, recurse=recurse) + warnings.simplefilter('error') + return val + else: + return dill.copy(obj, byref=byref, recurse=recurse) + + +class obj1(object): + def __init__(self): + super(obj1, self).__init__() + +class obj2(object): + def __init__(self): + super(obj2, self).__init__() + +class obj3(object): + super_ = super + def __init__(self): + obj3.super_(obj3, self).__init__() + + +def test_super(): + assert copy(obj1(), byref=True) + assert copy(obj1(), byref=True, recurse=True) + assert copy(obj1(), recurse=True) + assert copy(obj1()) + + assert copy(obj2(), byref=True) + assert copy(obj2(), byref=True, recurse=True) + assert copy(obj2(), recurse=True) + assert copy(obj2()) + + assert copy(obj3(), byref=True) + assert copy(obj3(), byref=True, recurse=True) + assert copy(obj3(), recurse=True) + assert copy(obj3()) + + +def get_trigger(model): + pass + +class Machine(object): + def __init__(self): + self.child = Model() + self.trigger = partial(get_trigger, self) + self.child.trigger = partial(get_trigger, self.child) + +class Model(object): + pass + + + +def test_partial(): + assert copy(Machine(), byref=True) + assert copy(Machine(), byref=True, recurse=True) + assert copy(Machine(), recurse=True) + assert copy(Machine()) + + +class Machine2(object): + def __init__(self): + self.go = partial(self.member, self) + def member(self, model): + pass + + +class SubMachine(Machine2): + def __init__(self): + super(SubMachine, self).__init__() + + +def test_partials(): + assert copy(SubMachine(), byref=True) + assert copy(SubMachine(), byref=True, recurse=True) + assert copy(SubMachine(), recurse=True) + assert copy(SubMachine()) + + +class obj4(object): + def __init__(self): + super(obj4, self).__init__() + a = self + class obj5(object): + def __init__(self): + super(obj5, self).__init__() + self.a = a + self.b = obj5() + + +def test_circular_reference(): + assert copy(obj4()) + obj4_copy = dill.loads(dill.dumps(obj4())) + assert type(obj4_copy) is type(obj4_copy).__init__.__closure__[0].cell_contents + assert type(obj4_copy.b) is type(obj4_copy.b).__init__.__closure__[0].cell_contents + + +def f(): + def g(): + return g + return g + + +def test_function_cells(): + assert copy(f()) + + +def fib(n): + assert n >= 0 + if n <= 1: + return n + else: + return fib(n-1) + fib(n-2) + + +def test_recursive_function(): + global fib + fib2 = copy(fib, recurse=True) + fib3 = copy(fib) + fib4 = fib + del fib + assert fib2(5) == 5 + for _fib in (fib3, fib4): + try: + _fib(5) + except Exception: + # This is expected to fail because fib no longer exists + pass + else: + raise AssertionError("Function fib shouldn't have been found") + fib = fib4 + + +def collection_function_recursion(): + d = {} + def g(): + return d + d['g'] = g + return g + + +def test_collection_function_recursion(): + g = copy(collection_function_recursion()) + assert g()['g'] is g + + +if __name__ == '__main__': + with warnings.catch_warnings(): + warnings.simplefilter('error') + test_super() + test_partial() + test_partials() + test_circular_reference() + test_function_cells() + test_recursive_function() + test_collection_function_recursion() diff --git a/local_packages/dill/tests/test_registered.py b/local_packages/dill/tests/test_registered.py new file mode 100644 index 0000000..5c57d8a --- /dev/null +++ b/local_packages/dill/tests/test_registered.py @@ -0,0 +1,64 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2022-2026 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE +""" +test pickling registered objects +""" + +import dill +from dill._objects import failures, registered, succeeds +import warnings +warnings.filterwarnings('ignore') + +def check(d, ok=True): + res = [] + for k,v in d.items(): + try: + z = dill.copy(v) + if ok: res.append(k) + except: + if not ok: res.append(k) + return res + +fails = check(failures) +try: + assert not bool(fails) +except AssertionError as e: + print("FAILS: %s" % fails) + raise e from None + +register = check(registered, ok=False) +try: + assert not bool(register) +except AssertionError as e: + print("REGISTER: %s" % register) + raise e from None + +success = check(succeeds, ok=False) +try: + assert not bool(success) +except AssertionError as e: + print("SUCCESS: %s" % success) + raise e from None + +import builtins +import types +q = dill._dill._reverse_typemap +p = {k:v for k,v in q.items() if k not in vars(builtins) and k not in vars(types)} + +diff = set(p.keys()).difference(registered.keys()) +try: + assert not bool(diff) +except AssertionError as e: + print("DIFF: %s" % diff) + raise e from None + +miss = set(registered.keys()).difference(p.keys()) +try: + assert not bool(miss) +except AssertionError as e: + print("MISS: %s" % miss) + raise e from None diff --git a/local_packages/dill/tests/test_restricted.py b/local_packages/dill/tests/test_restricted.py new file mode 100644 index 0000000..45bba91 --- /dev/null +++ b/local_packages/dill/tests/test_restricted.py @@ -0,0 +1,27 @@ +#!/usr/bin/env python +# +# Author: Kirill Makhonin (@kirillmakhonin) +# Copyright (c) 2008-2016 California Institute of Technology. +# Copyright (c) 2016-2026 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE + +import dill + +class RestrictedType: + def __bool__(*args, **kwargs): + raise Exception('Restricted function') + + __eq__ = __lt__ = __le__ = __ne__ = __gt__ = __ge__ = __hash__ = __bool__ + +glob_obj = RestrictedType() + +def restricted_func(): + a = glob_obj + +def test_function_with_restricted_object(): + deserialized = dill.loads(dill.dumps(restricted_func, recurse=True)) + + +if __name__ == '__main__': + test_function_with_restricted_object() diff --git a/local_packages/dill/tests/test_selected.py b/local_packages/dill/tests/test_selected.py new file mode 100644 index 0000000..b4edb34 --- /dev/null +++ b/local_packages/dill/tests/test_selected.py @@ -0,0 +1,121 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2008-2016 California Institute of Technology. +# Copyright (c) 2016-2026 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE +""" +testing some selected object types +""" + +import dill +dill.settings['recurse'] = True + +verbose = False + +def test_dict_contents(): + c = type.__dict__ + for i,j in c.items(): + #try: + ok = dill.pickles(j) + #except Exception: + # print ("FAIL: %s with %s" % (i, dill.detect.errors(j))) + if verbose: print ("%s: %s, %s" % (ok, type(j), j)) + assert ok + if verbose: print ("") + +def _g(x): yield x; + +def _f(): + try: raise + except Exception: + from sys import exc_info + e, er, tb = exc_info() + return er, tb + +class _d(object): + def _method(self): + pass + +from dill import objects +from dill import load_types +load_types(pickleable=True,unpickleable=False) +_newclass = objects['ClassObjectType'] +# some clean-up #FIXME: should happen internal to dill +objects['TemporaryFileType'].close() +objects['FileType'].close() +del objects + +# getset_descriptor for new-style classes (fails on '_method', if not __main__) +def test_class_descriptors(): + d = _d.__dict__ + for i in d.values(): + ok = dill.pickles(i) + if verbose: print ("%s: %s, %s" % (ok, type(i), i)) + assert ok + if verbose: print ("") + od = _newclass.__dict__ + for i in od.values(): + ok = dill.pickles(i) + if verbose: print ("%s: %s, %s" % (ok, type(i), i)) + assert ok + if verbose: print ("") + +# (__main__) class instance for new-style classes +def test_class(): + o = _d() + oo = _newclass() + ok = dill.pickles(o) + if verbose: print ("%s: %s, %s" % (ok, type(o), o)) + assert ok + ok = dill.pickles(oo) + if verbose: print ("%s: %s, %s" % (ok, type(oo), oo)) + assert ok + if verbose: print ("") + +# frames, generators, and tracebacks (all depend on frame) +def test_frame_related(): + g = _g(1) + f = g.gi_frame + e,t = _f() + _is = lambda ok: ok + ok = dill.pickles(f) + if verbose: print ("%s: %s, %s" % (ok, type(f), f)) + assert not ok + ok = dill.pickles(g) + if verbose: print ("%s: %s, %s" % (ok, type(g), g)) + assert _is(not ok) #XXX: dill fails + ok = dill.pickles(t) + if verbose: print ("%s: %s, %s" % (ok, type(t), t)) + assert not ok #XXX: dill fails + ok = dill.pickles(e) + if verbose: print ("%s: %s, %s" % (ok, type(e), e)) + assert ok + if verbose: print ("") + +def test_typing(): + import typing + x = typing.Any + assert x == dill.copy(x) + x = typing.Dict[int, str] + assert x == dill.copy(x) + x = typing.List[int] + assert x == dill.copy(x) + x = typing.Tuple[int, str] + assert x == dill.copy(x) + x = typing.Tuple[int] + assert x == dill.copy(x) + x = typing.Tuple[()] + assert x == dill.copy(x) + x = typing.Tuple[()].copy_with(()) + assert x == dill.copy(x) + return + + +if __name__ == '__main__': + test_frame_related() + test_dict_contents() + test_class() + test_class_descriptors() + test_typing() diff --git a/local_packages/dill/tests/test_session.py b/local_packages/dill/tests/test_session.py new file mode 100644 index 0000000..0ec6bb5 --- /dev/null +++ b/local_packages/dill/tests/test_session.py @@ -0,0 +1,280 @@ +#!/usr/bin/env python + +# Author: Leonardo Gama (@leogama) +# Copyright (c) 2022-2026 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE + +import atexit +import os +import sys +import __main__ +from contextlib import suppress +from io import BytesIO + +import dill + +session_file = os.path.join(os.path.dirname(__file__), 'session-refimported-%s.pkl') + +################### +# Child process # +################### + +def _error_line(error, obj, refimported): + import traceback + line = traceback.format_exc().splitlines()[-2].replace('[obj]', '['+repr(obj)+']') + return "while testing (with refimported=%s): %s" % (refimported, line.lstrip()) + +if __name__ == '__main__' and len(sys.argv) >= 3 and sys.argv[1] == '--child': + # Test session loading in a fresh interpreter session. + refimported = (sys.argv[2] == 'True') + dill.load_module(session_file % refimported, module='__main__') + + def test_modules(refimported): + # FIXME: In this test setting with CPython 3.7, 'calendar' is not included + # in sys.modules, independent of the value of refimported. Tried to + # run garbage collection just before loading the session with no luck. It + # fails even when preceding them with 'import calendar'. Needed to run + # these kinds of tests in a supbrocess. Failing test sample: + # assert globals()['day_name'] is sys.modules['calendar'].__dict__['day_name'] + try: + for obj in ('json', 'url', 'local_mod', 'sax', 'dom'): + assert globals()[obj].__name__ in sys.modules + assert 'calendar' in sys.modules and 'cmath' in sys.modules + import calendar, cmath + + for obj in ('Calendar', 'isleap'): + assert globals()[obj] is sys.modules['calendar'].__dict__[obj] + assert __main__.day_name.__module__ == 'calendar' + if refimported: + assert __main__.day_name is calendar.day_name + + assert __main__.complex_log is cmath.log + + except AssertionError as error: + error.args = (_error_line(error, obj, refimported),) + raise + + test_modules(refimported) + sys.exit() + +#################### +# Parent process # +#################### + +# Create various kinds of objects to test different internal logics. + +## Modules. +import json # top-level module +import urllib as url # top-level module under alias +from xml import sax # submodule +import xml.dom.minidom as dom # submodule under alias +import test_dictviews as local_mod # non-builtin top-level module + +## Imported objects. +from calendar import Calendar, isleap, day_name # class, function, other object +from cmath import log as complex_log # imported with alias + +## Local objects. +x = 17 +empty = None +names = ['Alice', 'Bob', 'Carol'] +def squared(x): return x**2 +cubed = lambda x: x**3 +class Person: + def __init__(self, name, age): + self.name = name + self.age = age +person = Person(names[0], x) +class CalendarSubclass(Calendar): + def weekdays(self): + return [day_name[i] for i in self.iterweekdays()] +cal = CalendarSubclass() +selfref = __main__ + +# Setup global namespace for session saving tests. +class TestNamespace: + test_globals = globals().copy() + def __init__(self, **extra): + self.extra = extra + def __enter__(self): + self.backup = globals().copy() + globals().clear() + globals().update(self.test_globals) + globals().update(self.extra) + return self + def __exit__(self, *exc_info): + globals().clear() + globals().update(self.backup) + +def _clean_up_cache(module): + cached = module.__file__.split('.', 1)[0] + '.pyc' + cached = module.__cached__ if hasattr(module, '__cached__') else cached + pycache = os.path.join(os.path.dirname(module.__file__), '__pycache__') + for remove, file in [(os.remove, cached), (os.removedirs, pycache)]: + with suppress(OSError): + remove(file) + +atexit.register(_clean_up_cache, local_mod) + +def _test_objects(main, globals_copy, refimported): + try: + main_dict = __main__.__dict__ + global Person, person, Calendar, CalendarSubclass, cal, selfref + + for obj in ('json', 'url', 'local_mod', 'sax', 'dom'): + assert globals()[obj].__name__ == globals_copy[obj].__name__ + + for obj in ('x', 'empty', 'names'): + assert main_dict[obj] == globals_copy[obj] + + for obj in ['squared', 'cubed']: + assert main_dict[obj].__globals__ is main_dict + assert main_dict[obj](3) == globals_copy[obj](3) + + assert Person.__module__ == __main__.__name__ + assert isinstance(person, Person) + assert person.age == globals_copy['person'].age + + assert issubclass(CalendarSubclass, Calendar) + assert isinstance(cal, CalendarSubclass) + assert cal.weekdays() == globals_copy['cal'].weekdays() + + assert selfref is __main__ + + except AssertionError as error: + error.args = (_error_line(error, obj, refimported),) + raise + +def test_session_main(refimported): + """test dump/load_module() for __main__, both in this process and in a subprocess""" + extra_objects = {} + if refimported: + # Test unpickleable imported object in main. + from sys import flags + extra_objects['flags'] = flags + + with TestNamespace(**extra_objects) as ns: + try: + # Test session loading in a new session. + dill.dump_module(session_file % refimported, refimported=refimported) + from dill.tests.__main__ import python, shell, sp + error = sp.call([python, __file__, '--child', str(refimported)], shell=shell) + if error: sys.exit(error) + finally: + with suppress(OSError): + os.remove(session_file % refimported) + + # Test session loading in the same session. + session_buffer = BytesIO() + dill.dump_module(session_buffer, refimported=refimported) + session_buffer.seek(0) + dill.load_module(session_buffer, module='__main__') + ns.backup['_test_objects'](__main__, ns.backup, refimported) + +def test_session_other(): + """test dump/load_module() for a module other than __main__""" + import test_classdef as module + atexit.register(_clean_up_cache, module) + module.selfref = module + dict_objects = [obj for obj in module.__dict__.keys() if not obj.startswith('__')] + + session_buffer = BytesIO() + dill.dump_module(session_buffer, module) + + for obj in dict_objects: + del module.__dict__[obj] + + session_buffer.seek(0) + dill.load_module(session_buffer, module) + + assert all(obj in module.__dict__ for obj in dict_objects) + assert module.selfref is module + +def test_runtime_module(): + from types import ModuleType + modname = '__runtime__' + runtime = ModuleType(modname) + runtime.x = 42 + + mod = dill.session._stash_modules(runtime) + if mod is not runtime: + print("There are objects to save by referenece that shouldn't be:", + mod.__dill_imported, mod.__dill_imported_as, mod.__dill_imported_top_level, + file=sys.stderr) + + # This is also for code coverage, tests the use case of dump_module(refimported=True) + # without imported objects in the namespace. It's a contrived example because + # even dill can't be in it. This should work after fixing #462. + session_buffer = BytesIO() + dill.dump_module(session_buffer, module=runtime, refimported=True) + session_dump = session_buffer.getvalue() + + # Pass a new runtime created module with the same name. + runtime = ModuleType(modname) # empty + return_val = dill.load_module(BytesIO(session_dump), module=runtime) + assert return_val is None + assert runtime.__name__ == modname + assert runtime.x == 42 + assert runtime not in sys.modules.values() + + # Pass nothing as main. load_module() must create it. + session_buffer.seek(0) + runtime = dill.load_module(BytesIO(session_dump)) + assert runtime.__name__ == modname + assert runtime.x == 42 + assert runtime not in sys.modules.values() + +def test_refimported_imported_as(): + import collections + import concurrent.futures + import types + import typing + mod = sys.modules['__test__'] = types.ModuleType('__test__') + dill.executor = concurrent.futures.ThreadPoolExecutor(max_workers=1) + mod.Dict = collections.UserDict # select by type + mod.AsyncCM = typing.AsyncContextManager # select by __module__ + mod.thread_exec = dill.executor # select by __module__ with regex + + session_buffer = BytesIO() + dill.dump_module(session_buffer, mod, refimported=True) + session_buffer.seek(0) + mod = dill.load(session_buffer) + del sys.modules['__test__'] + + assert set(mod.__dill_imported_as) == { + ('collections', 'UserDict', 'Dict'), + ('typing', 'AsyncContextManager', 'AsyncCM'), + ('dill', 'executor', 'thread_exec'), + } + +def test_load_module_asdict(): + with TestNamespace(): + session_buffer = BytesIO() + dill.dump_module(session_buffer) + + global empty, names, x, y + x = y = 0 # change x and create y + del empty + globals_state = globals().copy() + + session_buffer.seek(0) + main_vars = dill.load_module_asdict(session_buffer) + + assert main_vars is not globals() + assert globals() == globals_state + + assert main_vars['__name__'] == '__main__' + assert main_vars['names'] == names + assert main_vars['names'] is not names + assert main_vars['x'] != x + assert 'y' not in main_vars + assert 'empty' in main_vars + +if __name__ == '__main__': + test_session_main(refimported=False) + test_session_main(refimported=True) + test_session_other() + test_runtime_module() + test_refimported_imported_as() + test_load_module_asdict() diff --git a/local_packages/dill/tests/test_source.py b/local_packages/dill/tests/test_source.py new file mode 100644 index 0000000..260ce6a --- /dev/null +++ b/local_packages/dill/tests/test_source.py @@ -0,0 +1,186 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2008-2016 California Institute of Technology. +# Copyright (c) 2016-2026 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE + +from dill.source import getsource, getname, _wrap, getimport +from dill.source import importable +from dill._dill import IS_PYPY + +import sys +PY310b = 0x30a00b1 + +f = lambda x: x**2 +def g(x): return f(x) - x + +def h(x): + def g(x): return x + return g(x) - x + +class Foo(object): + def bar(self, x): + return x*x+x +_foo = Foo() + +def add(x,y): + return x+y + +# yes, same as 'f', but things are tricky when it comes to pointers +squared = lambda x:x**2 + +class Bar: + pass +_bar = Bar() + + # inspect.getsourcelines # dill.source.getblocks +def test_getsource(): + assert getsource(f) == 'f = lambda x: x**2\n' + assert getsource(g) == 'def g(x): return f(x) - x\n' + assert getsource(h) == 'def h(x):\n def g(x): return x\n return g(x) - x\n' + assert getname(f) == 'f' + assert getname(g) == 'g' + assert getname(h) == 'h' + assert _wrap(f)(4) == 16 + assert _wrap(g)(4) == 12 + assert _wrap(h)(4) == 0 + + assert getname(Foo) == 'Foo' + assert getname(Bar) == 'Bar' + assert getsource(Bar) == 'class Bar:\n pass\n' + assert getsource(Foo) == 'class Foo(object):\n def bar(self, x):\n return x*x+x\n' + #XXX: add getsource for _foo, _bar + +# test itself +def test_itself(): + assert getimport(getimport)=='from dill.source import getimport\n' + +# builtin functions and objects +def test_builtin(): + assert getimport(pow) == 'pow\n' + assert getimport(100) == '100\n' + assert getimport(True) == 'True\n' + assert getimport(pow, builtin=True) == 'from builtins import pow\n' + assert getimport(100, builtin=True) == '100\n' + assert getimport(True, builtin=True) == 'True\n' + # this is kinda BS... you can't import a None + assert getimport(None) == 'None\n' + assert getimport(None, builtin=True) == 'None\n' + + +# other imported functions +def test_imported(): + from math import sin + assert getimport(sin) == 'from math import sin\n' + +# interactively defined functions +def test_dynamic(): + assert getimport(add) == 'from %s import add\n' % __name__ + # interactive lambdas + assert getimport(squared) == 'from %s import squared\n' % __name__ + +# classes and class instances +def test_classes(): + from io import BytesIO as StringIO + y = "from _io import BytesIO\n" + x = y if (IS_PYPY or sys.hexversion >= PY310b) else "from io import BytesIO\n" + s = StringIO() + + assert getimport(StringIO) == x + assert getimport(s) == y + # interactively defined classes and class instances + assert getimport(Foo) == 'from %s import Foo\n' % __name__ + assert getimport(_foo) == 'from %s import Foo\n' % __name__ + + +# test importable +def test_importable(): + assert importable(add, source=False) == 'from %s import add\n' % __name__ + assert importable(squared, source=False) == 'from %s import squared\n' % __name__ + assert importable(Foo, source=False) == 'from %s import Foo\n' % __name__ + assert importable(Foo.bar, source=False) == 'from %s import bar\n' % __name__ + assert importable(_foo.bar, source=False) == 'from %s import bar\n' % __name__ + assert importable(None, source=False) == 'None\n' + assert importable(100, source=False) == '100\n' + + assert importable(add, source=True) == 'def add(x,y):\n return x+y\n' + assert importable(squared, source=True) == 'squared = lambda x:x**2\n' + assert importable(None, source=True) == 'None\n' + assert importable(Bar, source=True) == 'class Bar:\n pass\n' + assert importable(Foo, source=True) == 'class Foo(object):\n def bar(self, x):\n return x*x+x\n' + assert importable(Foo.bar, source=True) == 'def bar(self, x):\n return x*x+x\n' + assert importable(Foo.bar, source=False) == 'from %s import bar\n' % __name__ + assert importable(Foo.bar, alias='memo', source=False) == 'from %s import bar as memo\n' % __name__ + assert importable(Foo, alias='memo', source=False) == 'from %s import Foo as memo\n' % __name__ + assert importable(squared, alias='memo', source=False) == 'from %s import squared as memo\n' % __name__ + assert importable(squared, alias='memo', source=True) == 'memo = squared = lambda x:x**2\n' + assert importable(add, alias='memo', source=True) == 'def add(x,y):\n return x+y\n\nmemo = add\n' + assert importable(None, alias='memo', source=True) == 'memo = None\n' + assert importable(100, alias='memo', source=True) == 'memo = 100\n' + assert importable(add, builtin=True, source=False) == 'from %s import add\n' % __name__ + assert importable(squared, builtin=True, source=False) == 'from %s import squared\n' % __name__ + assert importable(Foo, builtin=True, source=False) == 'from %s import Foo\n' % __name__ + assert importable(Foo.bar, builtin=True, source=False) == 'from %s import bar\n' % __name__ + assert importable(_foo.bar, builtin=True, source=False) == 'from %s import bar\n' % __name__ + assert importable(None, builtin=True, source=False) == 'None\n' + assert importable(100, builtin=True, source=False) == '100\n' + + +def test_numpy(): + try: + import numpy as np + y = np.array + x = y([1,2,3]) + assert importable(x, source=False) == 'from numpy import array\narray([1, 2, 3])\n' + assert importable(y, source=False) == 'from %s import array\n' % y.__module__ + assert importable(x, source=True) == 'from numpy import array\narray([1, 2, 3])\n' + assert importable(y, source=True) == 'from %s import array\n' % y.__module__ + y = np.int64 + x = y(0) + assert importable(x, source=False) == 'from numpy import int64\nint64(0)\n' + assert importable(y, source=False) == 'from %s import int64\n' % y.__module__ + assert importable(x, source=True) == 'from numpy import int64\nint64(0)\n' + assert importable(y, source=True) == 'from %s import int64\n' % y.__module__ + y = np.bool_ + x = y(0) + import warnings + with warnings.catch_warnings(): + warnings.filterwarnings('ignore', category=FutureWarning) + warnings.filterwarnings('ignore', category=DeprecationWarning) + if hasattr(np, 'bool'): b = 'bool_' if np.bool is bool else 'bool' + else: b = 'bool_' + assert importable(x, source=False) == 'from numpy import %s\n%s(False)\n' % (b,b) + assert importable(y, source=False) == 'from %s import %s\n' % (y.__module__,b) + assert importable(x, source=True) == 'from numpy import %s\n%s(False)\n' % (b,b) + assert importable(y, source=True) == 'from %s import %s\n' % (y.__module__,b) + except ImportError: pass + +#NOTE: if before getimport(pow), will cause pow to throw AssertionError +def test_foo(): + assert importable(_foo, source=True).startswith("import dill\nclass Foo(object):\n def bar(self, x):\n return x*x+x\ndill.loads(") + +def test_safe(): + import abc + obj = abc.ABC() + obj.__class__.__name__ = "(abc' + print('foo')]) # " + try: + source = getsource(obj, force=True) + assert False + except TypeError: + assert False + except SyntaxError: + pass + +if __name__ == '__main__': + test_getsource() + test_itself() + test_builtin() + test_imported() + test_dynamic() + test_classes() + test_importable() + test_numpy() + test_foo() + test_safe() diff --git a/local_packages/dill/tests/test_sources.py b/local_packages/dill/tests/test_sources.py new file mode 100644 index 0000000..2e978dc --- /dev/null +++ b/local_packages/dill/tests/test_sources.py @@ -0,0 +1,190 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @uqfoundation) +# Copyright (c) 2024-2026 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE +""" +check that dill.source performs as expected with changes to locals in 3.13.0b1 +see: https://github.com/python/cpython/issues/118888 +""" +# repeat functions from test_source.py +f = lambda x: x**2 +def g(x): return f(x) - x + +def h(x): + def g(x): return x + return g(x) - x + +class Foo(object): + def bar(self, x): + return x*x+x +_foo = Foo() + +def add(x,y): + return x+y + +squared = lambda x:x**2 + +class Bar: + pass +_bar = Bar() + +# repeat, but from test_source.py +import test_source as ts + +# test objects created in other test modules +import test_mixins as tm + +import dill.source as ds + + +def test_isfrommain(): + assert ds.isfrommain(add) == True + assert ds.isfrommain(squared) == True + assert ds.isfrommain(Bar) == True + assert ds.isfrommain(_bar) == True + assert ds.isfrommain(ts.add) == False + assert ds.isfrommain(ts.squared) == False + assert ds.isfrommain(ts.Bar) == False + assert ds.isfrommain(ts._bar) == False + assert ds.isfrommain(tm.quad) == False + assert ds.isfrommain(tm.double_add) == False + assert ds.isfrommain(tm.quadratic) == False + assert ds.isdynamic(add) == False + assert ds.isdynamic(squared) == False + assert ds.isdynamic(ts.add) == False + assert ds.isdynamic(ts.squared) == False + assert ds.isdynamic(tm.double_add) == False + assert ds.isdynamic(tm.quadratic) == False + + +def test_matchlambda(): + assert ds._matchlambda(f, 'f = lambda x: x**2\n') + assert ds._matchlambda(squared, 'squared = lambda x:x**2\n') + assert ds._matchlambda(ts.f, 'f = lambda x: x**2\n') + assert ds._matchlambda(ts.squared, 'squared = lambda x:x**2\n') + + +def test_findsource(): + lines, lineno = ds.findsource(add) + assert lines[lineno] == 'def add(x,y):\n' + lines, lineno = ds.findsource(ts.add) + assert lines[lineno] == 'def add(x,y):\n' + lines, lineno = ds.findsource(squared) + assert lines[lineno] == 'squared = lambda x:x**2\n' + lines, lineno = ds.findsource(ts.squared) + assert lines[lineno] == 'squared = lambda x:x**2\n' + lines, lineno = ds.findsource(Bar) + assert lines[lineno] == 'class Bar:\n' + lines, lineno = ds.findsource(ts.Bar) + assert lines[lineno] == 'class Bar:\n' + lines, lineno = ds.findsource(_bar) + assert lines[lineno] == 'class Bar:\n' + lines, lineno = ds.findsource(ts._bar) + assert lines[lineno] == 'class Bar:\n' + lines, lineno = ds.findsource(tm.quad) + assert lines[lineno] == 'def quad(a=1, b=1, c=0):\n' + lines, lineno = ds.findsource(tm.double_add) + assert lines[lineno] == ' def func(*args, **kwds):\n' + lines, lineno = ds.findsource(tm.quadratic) + assert lines[lineno] == ' def dec(f):\n' + + +def test_getsourcelines(): + assert ''.join(ds.getsourcelines(add)[0]) == 'def add(x,y):\n return x+y\n' + assert ''.join(ds.getsourcelines(ts.add)[0]) == 'def add(x,y):\n return x+y\n' + assert ''.join(ds.getsourcelines(squared)[0]) == 'squared = lambda x:x**2\n' + assert ''.join(ds.getsourcelines(ts.squared)[0]) == 'squared = lambda x:x**2\n' + assert ''.join(ds.getsourcelines(Bar)[0]) == 'class Bar:\n pass\n' + assert ''.join(ds.getsourcelines(ts.Bar)[0]) == 'class Bar:\n pass\n' + assert ''.join(ds.getsourcelines(_bar)[0]) == 'class Bar:\n pass\n' #XXX: ? + assert ''.join(ds.getsourcelines(ts._bar)[0]) == 'class Bar:\n pass\n' #XXX: ? + assert ''.join(ds.getsourcelines(tm.quad)[0]) == 'def quad(a=1, b=1, c=0):\n inverted = [False]\n def invert():\n inverted[0] = not inverted[0]\n def dec(f):\n def func(*args, **kwds):\n x = f(*args, **kwds)\n if inverted[0]: x = -x\n return a*x**2 + b*x + c\n func.__wrapped__ = f\n func.invert = invert\n func.inverted = inverted\n return func\n return dec\n' + assert ''.join(ds.getsourcelines(tm.quadratic)[0]) == ' def dec(f):\n def func(*args,**kwds):\n fx = f(*args,**kwds)\n return a*fx**2 + b*fx + c\n return func\n' + assert ''.join(ds.getsourcelines(tm.quadratic, lstrip=True)[0]) == 'def dec(f):\n def func(*args,**kwds):\n fx = f(*args,**kwds)\n return a*fx**2 + b*fx + c\n return func\n' + assert ''.join(ds.getsourcelines(tm.quadratic, enclosing=True)[0]) == 'def quad_factory(a=1,b=1,c=0):\n def dec(f):\n def func(*args,**kwds):\n fx = f(*args,**kwds)\n return a*fx**2 + b*fx + c\n return func\n return dec\n' + assert ''.join(ds.getsourcelines(tm.double_add)[0]) == ' def func(*args, **kwds):\n x = f(*args, **kwds)\n if inverted[0]: x = -x\n return a*x**2 + b*x + c\n' + assert ''.join(ds.getsourcelines(tm.double_add, enclosing=True)[0]) == 'def quad(a=1, b=1, c=0):\n inverted = [False]\n def invert():\n inverted[0] = not inverted[0]\n def dec(f):\n def func(*args, **kwds):\n x = f(*args, **kwds)\n if inverted[0]: x = -x\n return a*x**2 + b*x + c\n func.__wrapped__ = f\n func.invert = invert\n func.inverted = inverted\n return func\n return dec\n' + + +def test_indent(): + assert ds.outdent(''.join(ds.getsourcelines(tm.quadratic)[0])) == ''.join(ds.getsourcelines(tm.quadratic, lstrip=True)[0]) + assert ds.indent(''.join(ds.getsourcelines(tm.quadratic, lstrip=True)[0]), 2) == ''.join(ds.getsourcelines(tm.quadratic)[0]) + + +def test_dumpsource(): + local = {} + exec(ds.dumpsource(add, alias='raw'), {}, local) + exec(ds.dumpsource(ts.add, alias='mod'), {}, local) + assert local['raw'](1,2) == local['mod'](1,2) + exec(ds.dumpsource(squared, alias='raw'), {}, local) + exec(ds.dumpsource(ts.squared, alias='mod'), {}, local) + assert local['raw'](3) == local['mod'](3) + assert ds._wrap(add)(1,2) == ds._wrap(ts.add)(1,2) + assert ds._wrap(squared)(3) == ds._wrap(ts.squared)(3) + + +def test_name(): + assert ds._namespace(add) == ds.getname(add, fqn=True).split('.') + assert ds._namespace(ts.add) == ds.getname(ts.add, fqn=True).split('.') + assert ds._namespace(squared) == ds.getname(squared, fqn=True).split('.') + assert ds._namespace(ts.squared) == ds.getname(ts.squared, fqn=True).split('.') + assert ds._namespace(Bar) == ds.getname(Bar, fqn=True).split('.') + assert ds._namespace(ts.Bar) == ds.getname(ts.Bar, fqn=True).split('.') + assert ds._namespace(tm.quad) == ds.getname(tm.quad, fqn=True).split('.') + #XXX: the following also works, however behavior may be wrong for nested functions + #assert ds._namespace(tm.double_add) == ds.getname(tm.double_add, fqn=True).split('.') + #assert ds._namespace(tm.quadratic) == ds.getname(tm.quadratic, fqn=True).split('.') + assert ds.getname(add) == 'add' + assert ds.getname(ts.add) == 'add' + assert ds.getname(squared) == 'squared' + assert ds.getname(ts.squared) == 'squared' + assert ds.getname(Bar) == 'Bar' + assert ds.getname(ts.Bar) == 'Bar' + assert ds.getname(tm.quad) == 'quad' + assert ds.getname(tm.double_add) == 'func' #XXX: ? + assert ds.getname(tm.quadratic) == 'dec' #XXX: ? + + +def test_getimport(): + local = {} + exec(ds.getimport(add, alias='raw'), {}, local) + exec(ds.getimport(ts.add, alias='mod'), {}, local) + assert local['raw'](1,2) == local['mod'](1,2) + exec(ds.getimport(squared, alias='raw'), {}, local) + exec(ds.getimport(ts.squared, alias='mod'), {}, local) + assert local['raw'](3) == local['mod'](3) + exec(ds.getimport(Bar, alias='raw'), {}, local) + exec(ds.getimport(ts.Bar, alias='mod'), {}, local) + assert ds.getname(local['raw']) == ds.getname(local['mod']) + exec(ds.getimport(tm.quad, alias='mod'), {}, local) + assert local['mod']()(sum)([1,2,3]) == tm.quad()(sum)([1,2,3]) + #FIXME: wrong results for nested functions (e.g. tm.double_add, tm.quadratic) + + +def test_importable(): + assert ds.importable(add, source=False) == ds.getimport(add) + assert ds.importable(add) == ds.getsource(add) + assert ds.importable(squared, source=False) == ds.getimport(squared) + assert ds.importable(squared) == ds.getsource(squared) + assert ds.importable(Bar, source=False) == ds.getimport(Bar) + assert ds.importable(Bar) == ds.getsource(Bar) + assert ds.importable(ts.add) == ds.getimport(ts.add) + assert ds.importable(ts.add, source=True) == ds.getsource(ts.add) + assert ds.importable(ts.squared) == ds.getimport(ts.squared) + assert ds.importable(ts.squared, source=True) == ds.getsource(ts.squared) + assert ds.importable(ts.Bar) == ds.getimport(ts.Bar) + assert ds.importable(ts.Bar, source=True) == ds.getsource(ts.Bar) + + +if __name__ == '__main__': + test_isfrommain() + test_matchlambda() + test_findsource() + test_getsourcelines() + test_indent() + test_dumpsource() + test_name() + test_getimport() + test_importable() diff --git a/local_packages/dill/tests/test_temp.py b/local_packages/dill/tests/test_temp.py new file mode 100644 index 0000000..6de1995 --- /dev/null +++ b/local_packages/dill/tests/test_temp.py @@ -0,0 +1,103 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2008-2016 California Institute of Technology. +# Copyright (c) 2016-2026 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE + +import sys +from dill.temp import dump, dump_source, dumpIO, dumpIO_source +from dill.temp import load, load_source, loadIO, loadIO_source +WINDOWS = sys.platform[:3] == 'win' + + +f = lambda x: x**2 +x = [1,2,3,4,5] + +# source code to tempfile +def test_code_to_tempfile(): + if not WINDOWS: #see: https://bugs.python.org/issue14243 + pyfile = dump_source(f, alias='_f') + _f = load_source(pyfile) + assert _f(4) == f(4) + +# source code to stream +def test_code_to_stream(): + pyfile = dumpIO_source(f, alias='_f') + _f = loadIO_source(pyfile) + assert _f(4) == f(4) + +# pickle to tempfile +def test_pickle_to_tempfile(): + if not WINDOWS: #see: https://bugs.python.org/issue14243 + dumpfile = dump(x) + _x = load(dumpfile) + assert _x == x + +# pickle to stream +def test_pickle_to_stream(): + dumpfile = dumpIO(x) + _x = loadIO(dumpfile) + assert _x == x + +### now testing the objects ### +f = lambda x: x**2 +def g(x): return f(x) - x + +def h(x): + def g(x): return x + return g(x) - x + +class Foo(object): + def bar(self, x): + return x*x+x +_foo = Foo() + +def add(x,y): + return x+y + +# yes, same as 'f', but things are tricky when it comes to pointers +squared = lambda x:x**2 + +class Bar: + pass +_bar = Bar() + + +# test function-type objects that take 2 args +def test_two_arg_functions(): + for obj in [add]: + pyfile = dumpIO_source(obj, alias='_obj') + _obj = loadIO_source(pyfile) + assert _obj(4,2) == obj(4,2) + +# test function-type objects that take 1 arg +def test_one_arg_functions(): + for obj in [g, h, squared]: + pyfile = dumpIO_source(obj, alias='_obj') + _obj = loadIO_source(pyfile) + assert _obj(4) == obj(4) + +# test instance-type objects +#for obj in [_bar, _foo]: +# pyfile = dumpIO_source(obj, alias='_obj') +# _obj = loadIO_source(pyfile) +# assert type(_obj) == type(obj) + +# test the rest of the objects +def test_the_rest(): + for obj in [Bar, Foo, Foo.bar, _foo.bar]: + pyfile = dumpIO_source(obj, alias='_obj') + _obj = loadIO_source(pyfile) + assert _obj.__name__ == obj.__name__ + + +if __name__ == '__main__': + test_code_to_tempfile() + test_code_to_stream() + test_pickle_to_tempfile() + test_pickle_to_stream() + test_two_arg_functions() + test_one_arg_functions() + test_the_rest() diff --git a/local_packages/dill/tests/test_threads.py b/local_packages/dill/tests/test_threads.py new file mode 100644 index 0000000..c8843d9 --- /dev/null +++ b/local_packages/dill/tests/test_threads.py @@ -0,0 +1,46 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2024-2026 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE + +import dill +dill.settings['recurse'] = True + + +def test_new_thread(): + import threading + t = threading.Thread() + t_ = dill.copy(t) + assert t.is_alive() == t_.is_alive() + for i in ['daemon','name','ident','native_id']: + if hasattr(t, i): + assert getattr(t, i) == getattr(t_, i) + +def test_run_thread(): + import threading + t = threading.Thread() + t.start() + t_ = dill.copy(t) + assert t.is_alive() == t_.is_alive() + for i in ['daemon','name','ident','native_id']: + if hasattr(t, i): + assert getattr(t, i) == getattr(t_, i) + +def test_join_thread(): + import threading + t = threading.Thread() + t.start() + t.join() + t_ = dill.copy(t) + assert t.is_alive() == t_.is_alive() + for i in ['daemon','name','ident','native_id']: + if hasattr(t, i): + assert getattr(t, i) == getattr(t_, i) + + +if __name__ == '__main__': + test_new_thread() + test_run_thread() + test_join_thread() diff --git a/local_packages/dill/tests/test_weakref.py b/local_packages/dill/tests/test_weakref.py new file mode 100644 index 0000000..5962de5 --- /dev/null +++ b/local_packages/dill/tests/test_weakref.py @@ -0,0 +1,72 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2008-2016 California Institute of Technology. +# Copyright (c) 2016-2026 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE + +import dill +dill.settings['recurse'] = True +import weakref + +class _class: + def _method(self): + pass + +class _callable_class: + def __call__(self): + pass + +def _function(): + pass + + +def test_weakref(): + o = _class() + oc = _callable_class() + f = _function + x = _class + + # ReferenceType + r = weakref.ref(o) + d_r = weakref.ref(_class()) + fr = weakref.ref(f) + xr = weakref.ref(x) + + # ProxyType + p = weakref.proxy(o) + d_p = weakref.proxy(_class()) + + # CallableProxyType + cp = weakref.proxy(oc) + d_cp = weakref.proxy(_callable_class()) + fp = weakref.proxy(f) + xp = weakref.proxy(x) + + objlist = [r,d_r,fr,xr, p,d_p, cp,d_cp,fp,xp] + #dill.detect.trace(True) + + for obj in objlist: + res = dill.detect.errors(obj) + if res: + print ("%r:\n %s" % (obj, res)) + # else: + # print ("PASS: %s" % obj) + assert not res + +def test_dictproxy(): + from dill._dill import DictProxyType + try: + m = DictProxyType({"foo": "bar"}) + except Exception: + m = type.__dict__ + mp = dill.copy(m) + assert mp.items() == m.items() + + +if __name__ == '__main__': + test_weakref() + from dill._dill import IS_PYPY + if not IS_PYPY: + test_dictproxy() diff --git a/local_packages/include/GLFW/glfw3.h b/local_packages/include/GLFW/glfw3.h new file mode 100644 index 0000000..751615e --- /dev/null +++ b/local_packages/include/GLFW/glfw3.h @@ -0,0 +1,6389 @@ +/************************************************************************* + * GLFW 3.4 - www.glfw.org + * A library for OpenGL, window and input + *------------------------------------------------------------------------ + * Copyright (c) 2002-2006 Marcus Geelnard + * Copyright (c) 2006-2019 Camilla Löwy + * + * This software is provided 'as-is', without any express or implied + * warranty. In no event will the authors be held liable for any damages + * arising from the use of this software. + * + * Permission is granted to anyone to use this software for any purpose, + * including commercial applications, and to alter it and redistribute it + * freely, subject to the following restrictions: + * + * 1. The origin of this software must not be misrepresented; you must not + * claim that you wrote the original software. If you use this software + * in a product, an acknowledgment in the product documentation would + * be appreciated but is not required. + * + * 2. Altered source versions must be plainly marked as such, and must not + * be misrepresented as being the original software. + * + * 3. This notice may not be removed or altered from any source + * distribution. + * + *************************************************************************/ + +#ifndef _glfw3_h_ +#define _glfw3_h_ + +#ifdef __cplusplus +extern "C" { +#endif + + +/************************************************************************* + * Doxygen documentation + *************************************************************************/ + +/*! @file glfw3.h + * @brief The header of the GLFW 3 API. + * + * This is the header file of the GLFW 3 API. It defines all its types and + * declares all its functions. + * + * For more information about how to use this file, see @ref build_include. + */ +/*! @defgroup context Context reference + * @brief Functions and types related to OpenGL and OpenGL ES contexts. + * + * This is the reference documentation for OpenGL and OpenGL ES context related + * functions. For more task-oriented information, see the @ref context_guide. + */ +/*! @defgroup vulkan Vulkan support reference + * @brief Functions and types related to Vulkan. + * + * This is the reference documentation for Vulkan related functions and types. + * For more task-oriented information, see the @ref vulkan_guide. + */ +/*! @defgroup init Initialization, version and error reference + * @brief Functions and types related to initialization and error handling. + * + * This is the reference documentation for initialization and termination of + * the library, version management and error handling. For more task-oriented + * information, see the @ref intro_guide. + */ +/*! @defgroup input Input reference + * @brief Functions and types related to input handling. + * + * This is the reference documentation for input related functions and types. + * For more task-oriented information, see the @ref input_guide. + */ +/*! @defgroup monitor Monitor reference + * @brief Functions and types related to monitors. + * + * This is the reference documentation for monitor related functions and types. + * For more task-oriented information, see the @ref monitor_guide. + */ +/*! @defgroup window Window reference + * @brief Functions and types related to windows. + * + * This is the reference documentation for window related functions and types, + * including creation, deletion and event polling. For more task-oriented + * information, see the @ref window_guide. + */ + + +/************************************************************************* + * Compiler- and platform-specific preprocessor work + *************************************************************************/ + +/* If we are we on Windows, we want a single define for it. + */ +#if !defined(_WIN32) && (defined(__WIN32__) || defined(WIN32) || defined(__MINGW32__)) + #define _WIN32 +#endif /* _WIN32 */ + +/* Include because most Windows GLU headers need wchar_t and + * the macOS OpenGL header blocks the definition of ptrdiff_t by glext.h. + * Include it unconditionally to avoid surprising side-effects. + */ +#include + +/* Include because it is needed by Vulkan and related functions. + * Include it unconditionally to avoid surprising side-effects. + */ +#include + +#if defined(GLFW_INCLUDE_VULKAN) + #include +#endif /* Vulkan header */ + +/* The Vulkan header may have indirectly included windows.h (because of + * VK_USE_PLATFORM_WIN32_KHR) so we offer our replacement symbols after it. + */ + +/* It is customary to use APIENTRY for OpenGL function pointer declarations on + * all platforms. Additionally, the Windows OpenGL header needs APIENTRY. + */ +#if !defined(APIENTRY) + #if defined(_WIN32) + #define APIENTRY __stdcall + #else + #define APIENTRY + #endif + #define GLFW_APIENTRY_DEFINED +#endif /* APIENTRY */ + +/* Some Windows OpenGL headers need this. + */ +#if !defined(WINGDIAPI) && defined(_WIN32) + #define WINGDIAPI __declspec(dllimport) + #define GLFW_WINGDIAPI_DEFINED +#endif /* WINGDIAPI */ + +/* Some Windows GLU headers need this. + */ +#if !defined(CALLBACK) && defined(_WIN32) + #define CALLBACK __stdcall + #define GLFW_CALLBACK_DEFINED +#endif /* CALLBACK */ + +/* Include the chosen OpenGL or OpenGL ES headers. + */ +#if defined(GLFW_INCLUDE_ES1) + + #include + #if defined(GLFW_INCLUDE_GLEXT) + #include + #endif + +#elif defined(GLFW_INCLUDE_ES2) + + #include + #if defined(GLFW_INCLUDE_GLEXT) + #include + #endif + +#elif defined(GLFW_INCLUDE_ES3) + + #include + #if defined(GLFW_INCLUDE_GLEXT) + #include + #endif + +#elif defined(GLFW_INCLUDE_ES31) + + #include + #if defined(GLFW_INCLUDE_GLEXT) + #include + #endif + +#elif defined(GLFW_INCLUDE_ES32) + + #include + #if defined(GLFW_INCLUDE_GLEXT) + #include + #endif + +#elif defined(GLFW_INCLUDE_GLCOREARB) + + #if defined(__APPLE__) + + #include + #if defined(GLFW_INCLUDE_GLEXT) + #include + #endif /*GLFW_INCLUDE_GLEXT*/ + + #else /*__APPLE__*/ + + #include + #if defined(GLFW_INCLUDE_GLEXT) + #include + #endif + + #endif /*__APPLE__*/ + +#elif defined(GLFW_INCLUDE_GLU) + + #if defined(__APPLE__) + + #if defined(GLFW_INCLUDE_GLU) + #include + #endif + + #else /*__APPLE__*/ + + #if defined(GLFW_INCLUDE_GLU) + #include + #endif + + #endif /*__APPLE__*/ + +#elif !defined(GLFW_INCLUDE_NONE) && \ + !defined(__gl_h_) && \ + !defined(__gles1_gl_h_) && \ + !defined(__gles2_gl2_h_) && \ + !defined(__gles2_gl3_h_) && \ + !defined(__gles2_gl31_h_) && \ + !defined(__gles2_gl32_h_) && \ + !defined(__gl_glcorearb_h_) && \ + !defined(__gl2_h_) /*legacy*/ && \ + !defined(__gl3_h_) /*legacy*/ && \ + !defined(__gl31_h_) /*legacy*/ && \ + !defined(__gl32_h_) /*legacy*/ && \ + !defined(__glcorearb_h_) /*legacy*/ && \ + !defined(__GL_H__) /*non-standard*/ && \ + !defined(__gltypes_h_) /*non-standard*/ && \ + !defined(__glee_h_) /*non-standard*/ + + #if defined(__APPLE__) + + #if !defined(GLFW_INCLUDE_GLEXT) + #define GL_GLEXT_LEGACY + #endif + #include + + #else /*__APPLE__*/ + + #include + #if defined(GLFW_INCLUDE_GLEXT) + #include + #endif + + #endif /*__APPLE__*/ + +#endif /* OpenGL and OpenGL ES headers */ + +#if defined(GLFW_DLL) && defined(_GLFW_BUILD_DLL) + /* GLFW_DLL must be defined by applications that are linking against the DLL + * version of the GLFW library. _GLFW_BUILD_DLL is defined by the GLFW + * configuration header when compiling the DLL version of the library. + */ + #error "You must not have both GLFW_DLL and _GLFW_BUILD_DLL defined" +#endif + +/* GLFWAPI is used to declare public API functions for export + * from the DLL / shared library / dynamic library. + */ +#if defined(_WIN32) && defined(_GLFW_BUILD_DLL) + /* We are building GLFW as a Win32 DLL */ + #define GLFWAPI __declspec(dllexport) +#elif defined(_WIN32) && defined(GLFW_DLL) + /* We are calling GLFW as a Win32 DLL */ + #define GLFWAPI __declspec(dllimport) +#elif defined(__GNUC__) && defined(_GLFW_BUILD_DLL) + /* We are building GLFW as a shared / dynamic library */ + #define GLFWAPI __attribute__((visibility("default"))) +#else + /* We are building or calling GLFW as a static library */ + #define GLFWAPI +#endif + + +/************************************************************************* + * GLFW API tokens + *************************************************************************/ + +/*! @name GLFW version macros + * @{ */ +/*! @brief The major version number of the GLFW header. + * + * The major version number of the GLFW header. This is incremented when the + * API is changed in non-compatible ways. + * @ingroup init + */ +#define GLFW_VERSION_MAJOR 3 +/*! @brief The minor version number of the GLFW header. + * + * The minor version number of the GLFW header. This is incremented when + * features are added to the API but it remains backward-compatible. + * @ingroup init + */ +#define GLFW_VERSION_MINOR 4 +/*! @brief The revision number of the GLFW header. + * + * The revision number of the GLFW header. This is incremented when a bug fix + * release is made that does not contain any API changes. + * @ingroup init + */ +#define GLFW_VERSION_REVISION 0 +/*! @} */ + +/*! @brief One. + * + * This is only semantic sugar for the number 1. You can instead use `1` or + * `true` or `_True` or `GL_TRUE` or `VK_TRUE` or anything else that is equal + * to one. + * + * @ingroup init + */ +#define GLFW_TRUE 1 +/*! @brief Zero. + * + * This is only semantic sugar for the number 0. You can instead use `0` or + * `false` or `_False` or `GL_FALSE` or `VK_FALSE` or anything else that is + * equal to zero. + * + * @ingroup init + */ +#define GLFW_FALSE 0 + +/*! @name Key and button actions + * @{ */ +/*! @brief The key or mouse button was released. + * + * The key or mouse button was released. + * + * @ingroup input + */ +#define GLFW_RELEASE 0 +/*! @brief The key or mouse button was pressed. + * + * The key or mouse button was pressed. + * + * @ingroup input + */ +#define GLFW_PRESS 1 +/*! @brief The key was held down until it repeated. + * + * The key was held down until it repeated. + * + * @ingroup input + */ +#define GLFW_REPEAT 2 +/*! @} */ + +/*! @defgroup hat_state Joystick hat states + * @brief Joystick hat states. + * + * See [joystick hat input](@ref joystick_hat) for how these are used. + * + * @ingroup input + * @{ */ +#define GLFW_HAT_CENTERED 0 +#define GLFW_HAT_UP 1 +#define GLFW_HAT_RIGHT 2 +#define GLFW_HAT_DOWN 4 +#define GLFW_HAT_LEFT 8 +#define GLFW_HAT_RIGHT_UP (GLFW_HAT_RIGHT | GLFW_HAT_UP) +#define GLFW_HAT_RIGHT_DOWN (GLFW_HAT_RIGHT | GLFW_HAT_DOWN) +#define GLFW_HAT_LEFT_UP (GLFW_HAT_LEFT | GLFW_HAT_UP) +#define GLFW_HAT_LEFT_DOWN (GLFW_HAT_LEFT | GLFW_HAT_DOWN) +/*! @} */ + +/*! @defgroup keys Keyboard keys + * @brief Keyboard key IDs. + * + * See [key input](@ref input_key) for how these are used. + * + * These key codes are inspired by the _USB HID Usage Tables v1.12_ (p. 53-60), + * but re-arranged to map to 7-bit ASCII for printable keys (function keys are + * put in the 256+ range). + * + * The naming of the key codes follow these rules: + * - The US keyboard layout is used + * - Names of printable alphanumeric characters are used (e.g. "A", "R", + * "3", etc.) + * - For non-alphanumeric characters, Unicode:ish names are used (e.g. + * "COMMA", "LEFT_SQUARE_BRACKET", etc.). Note that some names do not + * correspond to the Unicode standard (usually for brevity) + * - Keys that lack a clear US mapping are named "WORLD_x" + * - For non-printable keys, custom names are used (e.g. "F4", + * "BACKSPACE", etc.) + * + * @ingroup input + * @{ + */ + +/* The unknown key */ +#define GLFW_KEY_UNKNOWN -1 + +/* Printable keys */ +#define GLFW_KEY_SPACE 32 +#define GLFW_KEY_APOSTROPHE 39 /* ' */ +#define GLFW_KEY_COMMA 44 /* , */ +#define GLFW_KEY_MINUS 45 /* - */ +#define GLFW_KEY_PERIOD 46 /* . */ +#define GLFW_KEY_SLASH 47 /* / */ +#define GLFW_KEY_0 48 +#define GLFW_KEY_1 49 +#define GLFW_KEY_2 50 +#define GLFW_KEY_3 51 +#define GLFW_KEY_4 52 +#define GLFW_KEY_5 53 +#define GLFW_KEY_6 54 +#define GLFW_KEY_7 55 +#define GLFW_KEY_8 56 +#define GLFW_KEY_9 57 +#define GLFW_KEY_SEMICOLON 59 /* ; */ +#define GLFW_KEY_EQUAL 61 /* = */ +#define GLFW_KEY_A 65 +#define GLFW_KEY_B 66 +#define GLFW_KEY_C 67 +#define GLFW_KEY_D 68 +#define GLFW_KEY_E 69 +#define GLFW_KEY_F 70 +#define GLFW_KEY_G 71 +#define GLFW_KEY_H 72 +#define GLFW_KEY_I 73 +#define GLFW_KEY_J 74 +#define GLFW_KEY_K 75 +#define GLFW_KEY_L 76 +#define GLFW_KEY_M 77 +#define GLFW_KEY_N 78 +#define GLFW_KEY_O 79 +#define GLFW_KEY_P 80 +#define GLFW_KEY_Q 81 +#define GLFW_KEY_R 82 +#define GLFW_KEY_S 83 +#define GLFW_KEY_T 84 +#define GLFW_KEY_U 85 +#define GLFW_KEY_V 86 +#define GLFW_KEY_W 87 +#define GLFW_KEY_X 88 +#define GLFW_KEY_Y 89 +#define GLFW_KEY_Z 90 +#define GLFW_KEY_LEFT_BRACKET 91 /* [ */ +#define GLFW_KEY_BACKSLASH 92 /* \ */ +#define GLFW_KEY_RIGHT_BRACKET 93 /* ] */ +#define GLFW_KEY_GRAVE_ACCENT 96 /* ` */ +#define GLFW_KEY_WORLD_1 161 /* non-US #1 */ +#define GLFW_KEY_WORLD_2 162 /* non-US #2 */ + +/* Function keys */ +#define GLFW_KEY_ESCAPE 256 +#define GLFW_KEY_ENTER 257 +#define GLFW_KEY_TAB 258 +#define GLFW_KEY_BACKSPACE 259 +#define GLFW_KEY_INSERT 260 +#define GLFW_KEY_DELETE 261 +#define GLFW_KEY_RIGHT 262 +#define GLFW_KEY_LEFT 263 +#define GLFW_KEY_DOWN 264 +#define GLFW_KEY_UP 265 +#define GLFW_KEY_PAGE_UP 266 +#define GLFW_KEY_PAGE_DOWN 267 +#define GLFW_KEY_HOME 268 +#define GLFW_KEY_END 269 +#define GLFW_KEY_CAPS_LOCK 280 +#define GLFW_KEY_SCROLL_LOCK 281 +#define GLFW_KEY_NUM_LOCK 282 +#define GLFW_KEY_PRINT_SCREEN 283 +#define GLFW_KEY_PAUSE 284 +#define GLFW_KEY_F1 290 +#define GLFW_KEY_F2 291 +#define GLFW_KEY_F3 292 +#define GLFW_KEY_F4 293 +#define GLFW_KEY_F5 294 +#define GLFW_KEY_F6 295 +#define GLFW_KEY_F7 296 +#define GLFW_KEY_F8 297 +#define GLFW_KEY_F9 298 +#define GLFW_KEY_F10 299 +#define GLFW_KEY_F11 300 +#define GLFW_KEY_F12 301 +#define GLFW_KEY_F13 302 +#define GLFW_KEY_F14 303 +#define GLFW_KEY_F15 304 +#define GLFW_KEY_F16 305 +#define GLFW_KEY_F17 306 +#define GLFW_KEY_F18 307 +#define GLFW_KEY_F19 308 +#define GLFW_KEY_F20 309 +#define GLFW_KEY_F21 310 +#define GLFW_KEY_F22 311 +#define GLFW_KEY_F23 312 +#define GLFW_KEY_F24 313 +#define GLFW_KEY_F25 314 +#define GLFW_KEY_KP_0 320 +#define GLFW_KEY_KP_1 321 +#define GLFW_KEY_KP_2 322 +#define GLFW_KEY_KP_3 323 +#define GLFW_KEY_KP_4 324 +#define GLFW_KEY_KP_5 325 +#define GLFW_KEY_KP_6 326 +#define GLFW_KEY_KP_7 327 +#define GLFW_KEY_KP_8 328 +#define GLFW_KEY_KP_9 329 +#define GLFW_KEY_KP_DECIMAL 330 +#define GLFW_KEY_KP_DIVIDE 331 +#define GLFW_KEY_KP_MULTIPLY 332 +#define GLFW_KEY_KP_SUBTRACT 333 +#define GLFW_KEY_KP_ADD 334 +#define GLFW_KEY_KP_ENTER 335 +#define GLFW_KEY_KP_EQUAL 336 +#define GLFW_KEY_LEFT_SHIFT 340 +#define GLFW_KEY_LEFT_CONTROL 341 +#define GLFW_KEY_LEFT_ALT 342 +#define GLFW_KEY_LEFT_SUPER 343 +#define GLFW_KEY_RIGHT_SHIFT 344 +#define GLFW_KEY_RIGHT_CONTROL 345 +#define GLFW_KEY_RIGHT_ALT 346 +#define GLFW_KEY_RIGHT_SUPER 347 +#define GLFW_KEY_MENU 348 + +#define GLFW_KEY_LAST GLFW_KEY_MENU + +/*! @} */ + +/*! @defgroup mods Modifier key flags + * @brief Modifier key flags. + * + * See [key input](@ref input_key) for how these are used. + * + * @ingroup input + * @{ */ + +/*! @brief If this bit is set one or more Shift keys were held down. + * + * If this bit is set one or more Shift keys were held down. + */ +#define GLFW_MOD_SHIFT 0x0001 +/*! @brief If this bit is set one or more Control keys were held down. + * + * If this bit is set one or more Control keys were held down. + */ +#define GLFW_MOD_CONTROL 0x0002 +/*! @brief If this bit is set one or more Alt keys were held down. + * + * If this bit is set one or more Alt keys were held down. + */ +#define GLFW_MOD_ALT 0x0004 +/*! @brief If this bit is set one or more Super keys were held down. + * + * If this bit is set one or more Super keys were held down. + */ +#define GLFW_MOD_SUPER 0x0008 +/*! @brief If this bit is set the Caps Lock key is enabled. + * + * If this bit is set the Caps Lock key is enabled and the @ref + * GLFW_LOCK_KEY_MODS input mode is set. + */ +#define GLFW_MOD_CAPS_LOCK 0x0010 +/*! @brief If this bit is set the Num Lock key is enabled. + * + * If this bit is set the Num Lock key is enabled and the @ref + * GLFW_LOCK_KEY_MODS input mode is set. + */ +#define GLFW_MOD_NUM_LOCK 0x0020 + +/*! @} */ + +/*! @defgroup buttons Mouse buttons + * @brief Mouse button IDs. + * + * See [mouse button input](@ref input_mouse_button) for how these are used. + * + * @ingroup input + * @{ */ +#define GLFW_MOUSE_BUTTON_1 0 +#define GLFW_MOUSE_BUTTON_2 1 +#define GLFW_MOUSE_BUTTON_3 2 +#define GLFW_MOUSE_BUTTON_4 3 +#define GLFW_MOUSE_BUTTON_5 4 +#define GLFW_MOUSE_BUTTON_6 5 +#define GLFW_MOUSE_BUTTON_7 6 +#define GLFW_MOUSE_BUTTON_8 7 +#define GLFW_MOUSE_BUTTON_LAST GLFW_MOUSE_BUTTON_8 +#define GLFW_MOUSE_BUTTON_LEFT GLFW_MOUSE_BUTTON_1 +#define GLFW_MOUSE_BUTTON_RIGHT GLFW_MOUSE_BUTTON_2 +#define GLFW_MOUSE_BUTTON_MIDDLE GLFW_MOUSE_BUTTON_3 +/*! @} */ + +/*! @defgroup joysticks Joysticks + * @brief Joystick IDs. + * + * See [joystick input](@ref joystick) for how these are used. + * + * @ingroup input + * @{ */ +#define GLFW_JOYSTICK_1 0 +#define GLFW_JOYSTICK_2 1 +#define GLFW_JOYSTICK_3 2 +#define GLFW_JOYSTICK_4 3 +#define GLFW_JOYSTICK_5 4 +#define GLFW_JOYSTICK_6 5 +#define GLFW_JOYSTICK_7 6 +#define GLFW_JOYSTICK_8 7 +#define GLFW_JOYSTICK_9 8 +#define GLFW_JOYSTICK_10 9 +#define GLFW_JOYSTICK_11 10 +#define GLFW_JOYSTICK_12 11 +#define GLFW_JOYSTICK_13 12 +#define GLFW_JOYSTICK_14 13 +#define GLFW_JOYSTICK_15 14 +#define GLFW_JOYSTICK_16 15 +#define GLFW_JOYSTICK_LAST GLFW_JOYSTICK_16 +/*! @} */ + +/*! @defgroup gamepad_buttons Gamepad buttons + * @brief Gamepad buttons. + * + * See @ref gamepad for how these are used. + * + * @ingroup input + * @{ */ +#define GLFW_GAMEPAD_BUTTON_A 0 +#define GLFW_GAMEPAD_BUTTON_B 1 +#define GLFW_GAMEPAD_BUTTON_X 2 +#define GLFW_GAMEPAD_BUTTON_Y 3 +#define GLFW_GAMEPAD_BUTTON_LEFT_BUMPER 4 +#define GLFW_GAMEPAD_BUTTON_RIGHT_BUMPER 5 +#define GLFW_GAMEPAD_BUTTON_BACK 6 +#define GLFW_GAMEPAD_BUTTON_START 7 +#define GLFW_GAMEPAD_BUTTON_GUIDE 8 +#define GLFW_GAMEPAD_BUTTON_LEFT_THUMB 9 +#define GLFW_GAMEPAD_BUTTON_RIGHT_THUMB 10 +#define GLFW_GAMEPAD_BUTTON_DPAD_UP 11 +#define GLFW_GAMEPAD_BUTTON_DPAD_RIGHT 12 +#define GLFW_GAMEPAD_BUTTON_DPAD_DOWN 13 +#define GLFW_GAMEPAD_BUTTON_DPAD_LEFT 14 +#define GLFW_GAMEPAD_BUTTON_LAST GLFW_GAMEPAD_BUTTON_DPAD_LEFT + +#define GLFW_GAMEPAD_BUTTON_CROSS GLFW_GAMEPAD_BUTTON_A +#define GLFW_GAMEPAD_BUTTON_CIRCLE GLFW_GAMEPAD_BUTTON_B +#define GLFW_GAMEPAD_BUTTON_SQUARE GLFW_GAMEPAD_BUTTON_X +#define GLFW_GAMEPAD_BUTTON_TRIANGLE GLFW_GAMEPAD_BUTTON_Y +/*! @} */ + +/*! @defgroup gamepad_axes Gamepad axes + * @brief Gamepad axes. + * + * See @ref gamepad for how these are used. + * + * @ingroup input + * @{ */ +#define GLFW_GAMEPAD_AXIS_LEFT_X 0 +#define GLFW_GAMEPAD_AXIS_LEFT_Y 1 +#define GLFW_GAMEPAD_AXIS_RIGHT_X 2 +#define GLFW_GAMEPAD_AXIS_RIGHT_Y 3 +#define GLFW_GAMEPAD_AXIS_LEFT_TRIGGER 4 +#define GLFW_GAMEPAD_AXIS_RIGHT_TRIGGER 5 +#define GLFW_GAMEPAD_AXIS_LAST GLFW_GAMEPAD_AXIS_RIGHT_TRIGGER +/*! @} */ + +/*! @defgroup errors Error codes + * @brief Error codes. + * + * See [error handling](@ref error_handling) for how these are used. + * + * @ingroup init + * @{ */ +/*! @brief No error has occurred. + * + * No error has occurred. + * + * @analysis Yay. + */ +#define GLFW_NO_ERROR 0 +/*! @brief GLFW has not been initialized. + * + * This occurs if a GLFW function was called that must not be called unless the + * library is [initialized](@ref intro_init). + * + * @analysis Application programmer error. Initialize GLFW before calling any + * function that requires initialization. + */ +#define GLFW_NOT_INITIALIZED 0x00010001 +/*! @brief No context is current for this thread. + * + * This occurs if a GLFW function was called that needs and operates on the + * current OpenGL or OpenGL ES context but no context is current on the calling + * thread. One such function is @ref glfwSwapInterval. + * + * @analysis Application programmer error. Ensure a context is current before + * calling functions that require a current context. + */ +#define GLFW_NO_CURRENT_CONTEXT 0x00010002 +/*! @brief One of the arguments to the function was an invalid enum value. + * + * One of the arguments to the function was an invalid enum value, for example + * requesting @ref GLFW_RED_BITS with @ref glfwGetWindowAttrib. + * + * @analysis Application programmer error. Fix the offending call. + */ +#define GLFW_INVALID_ENUM 0x00010003 +/*! @brief One of the arguments to the function was an invalid value. + * + * One of the arguments to the function was an invalid value, for example + * requesting a non-existent OpenGL or OpenGL ES version like 2.7. + * + * Requesting a valid but unavailable OpenGL or OpenGL ES version will instead + * result in a @ref GLFW_VERSION_UNAVAILABLE error. + * + * @analysis Application programmer error. Fix the offending call. + */ +#define GLFW_INVALID_VALUE 0x00010004 +/*! @brief A memory allocation failed. + * + * A memory allocation failed. + * + * @analysis A bug in GLFW or the underlying operating system. Report the bug + * to our [issue tracker](https://github.com/glfw/glfw/issues). + */ +#define GLFW_OUT_OF_MEMORY 0x00010005 +/*! @brief GLFW could not find support for the requested API on the system. + * + * GLFW could not find support for the requested API on the system. + * + * @analysis The installed graphics driver does not support the requested + * API, or does not support it via the chosen context creation API. + * Below are a few examples. + * + * @par + * Some pre-installed Windows graphics drivers do not support OpenGL. AMD only + * supports OpenGL ES via EGL, while Nvidia and Intel only support it via + * a WGL or GLX extension. macOS does not provide OpenGL ES at all. The Mesa + * EGL, OpenGL and OpenGL ES libraries do not interface with the Nvidia binary + * driver. Older graphics drivers do not support Vulkan. + */ +#define GLFW_API_UNAVAILABLE 0x00010006 +/*! @brief The requested OpenGL or OpenGL ES version is not available. + * + * The requested OpenGL or OpenGL ES version (including any requested context + * or framebuffer hints) is not available on this machine. + * + * @analysis The machine does not support your requirements. If your + * application is sufficiently flexible, downgrade your requirements and try + * again. Otherwise, inform the user that their machine does not match your + * requirements. + * + * @par + * Future invalid OpenGL and OpenGL ES versions, for example OpenGL 4.8 if 5.0 + * comes out before the 4.x series gets that far, also fail with this error and + * not @ref GLFW_INVALID_VALUE, because GLFW cannot know what future versions + * will exist. + */ +#define GLFW_VERSION_UNAVAILABLE 0x00010007 +/*! @brief A platform-specific error occurred that does not match any of the + * more specific categories. + * + * A platform-specific error occurred that does not match any of the more + * specific categories. + * + * @analysis A bug or configuration error in GLFW, the underlying operating + * system or its drivers, or a lack of required resources. Report the issue to + * our [issue tracker](https://github.com/glfw/glfw/issues). + */ +#define GLFW_PLATFORM_ERROR 0x00010008 +/*! @brief The requested format is not supported or available. + * + * If emitted during window creation, the requested pixel format is not + * supported. + * + * If emitted when querying the clipboard, the contents of the clipboard could + * not be converted to the requested format. + * + * @analysis If emitted during window creation, one or more + * [hard constraints](@ref window_hints_hard) did not match any of the + * available pixel formats. If your application is sufficiently flexible, + * downgrade your requirements and try again. Otherwise, inform the user that + * their machine does not match your requirements. + * + * @par + * If emitted when querying the clipboard, ignore the error or report it to + * the user, as appropriate. + */ +#define GLFW_FORMAT_UNAVAILABLE 0x00010009 +/*! @brief The specified window does not have an OpenGL or OpenGL ES context. + * + * A window that does not have an OpenGL or OpenGL ES context was passed to + * a function that requires it to have one. + * + * @analysis Application programmer error. Fix the offending call. + */ +#define GLFW_NO_WINDOW_CONTEXT 0x0001000A +/*! @brief The specified cursor shape is not available. + * + * The specified standard cursor shape is not available, either because the + * current platform cursor theme does not provide it or because it is not + * available on the platform. + * + * @analysis Platform or system settings limitation. Pick another + * [standard cursor shape](@ref shapes) or create a + * [custom cursor](@ref cursor_custom). + */ +#define GLFW_CURSOR_UNAVAILABLE 0x0001000B +/*! @brief The requested feature is not provided by the platform. + * + * The requested feature is not provided by the platform, so GLFW is unable to + * implement it. The documentation for each function notes if it could emit + * this error. + * + * @analysis Platform or platform version limitation. The error can be ignored + * unless the feature is critical to the application. + * + * @par + * A function call that emits this error has no effect other than the error and + * updating any existing out parameters. + */ +#define GLFW_FEATURE_UNAVAILABLE 0x0001000C +/*! @brief The requested feature is not implemented for the platform. + * + * The requested feature has not yet been implemented in GLFW for this platform. + * + * @analysis An incomplete implementation of GLFW for this platform, hopefully + * fixed in a future release. The error can be ignored unless the feature is + * critical to the application. + * + * @par + * A function call that emits this error has no effect other than the error and + * updating any existing out parameters. + */ +#define GLFW_FEATURE_UNIMPLEMENTED 0x0001000D +/*! @brief Platform unavailable or no matching platform was found. + * + * If emitted during initialization, no matching platform was found. If @ref + * GLFW_PLATFORM is set to `GLFW_ANY_PLATFORM`, GLFW could not detect any of the + * platforms supported by this library binary, except for the Null platform. If set to + * a specific platform, it is either not supported by this library binary or GLFW was not + * able to detect it. + * + * If emitted by a native access function, GLFW was initialized for a different platform + * than the function is for. + * + * @analysis Failure to detect any platform usually only happens on non-macOS Unix + * systems, either when no window system is running or the program was run from + * a terminal that does not have the necessary environment variables. Fall back to + * a different platform if possible or notify the user that no usable platform was + * detected. + * + * Failure to detect a specific platform may have the same cause as above or be because + * support for that platform was not compiled in. Call @ref glfwPlatformSupported to + * check whether a specific platform is supported by a library binary. + */ +#define GLFW_PLATFORM_UNAVAILABLE 0x0001000E +/*! @} */ + +/*! @addtogroup window + * @{ */ +/*! @brief Input focus window hint and attribute + * + * Input focus [window hint](@ref GLFW_FOCUSED_hint) or + * [window attribute](@ref GLFW_FOCUSED_attrib). + */ +#define GLFW_FOCUSED 0x00020001 +/*! @brief Window iconification window attribute + * + * Window iconification [window attribute](@ref GLFW_ICONIFIED_attrib). + */ +#define GLFW_ICONIFIED 0x00020002 +/*! @brief Window resize-ability window hint and attribute + * + * Window resize-ability [window hint](@ref GLFW_RESIZABLE_hint) and + * [window attribute](@ref GLFW_RESIZABLE_attrib). + */ +#define GLFW_RESIZABLE 0x00020003 +/*! @brief Window visibility window hint and attribute + * + * Window visibility [window hint](@ref GLFW_VISIBLE_hint) and + * [window attribute](@ref GLFW_VISIBLE_attrib). + */ +#define GLFW_VISIBLE 0x00020004 +/*! @brief Window decoration window hint and attribute + * + * Window decoration [window hint](@ref GLFW_DECORATED_hint) and + * [window attribute](@ref GLFW_DECORATED_attrib). + */ +#define GLFW_DECORATED 0x00020005 +/*! @brief Window auto-iconification window hint and attribute + * + * Window auto-iconification [window hint](@ref GLFW_AUTO_ICONIFY_hint) and + * [window attribute](@ref GLFW_AUTO_ICONIFY_attrib). + */ +#define GLFW_AUTO_ICONIFY 0x00020006 +/*! @brief Window decoration window hint and attribute + * + * Window decoration [window hint](@ref GLFW_FLOATING_hint) and + * [window attribute](@ref GLFW_FLOATING_attrib). + */ +#define GLFW_FLOATING 0x00020007 +/*! @brief Window maximization window hint and attribute + * + * Window maximization [window hint](@ref GLFW_MAXIMIZED_hint) and + * [window attribute](@ref GLFW_MAXIMIZED_attrib). + */ +#define GLFW_MAXIMIZED 0x00020008 +/*! @brief Cursor centering window hint + * + * Cursor centering [window hint](@ref GLFW_CENTER_CURSOR_hint). + */ +#define GLFW_CENTER_CURSOR 0x00020009 +/*! @brief Window framebuffer transparency hint and attribute + * + * Window framebuffer transparency + * [window hint](@ref GLFW_TRANSPARENT_FRAMEBUFFER_hint) and + * [window attribute](@ref GLFW_TRANSPARENT_FRAMEBUFFER_attrib). + */ +#define GLFW_TRANSPARENT_FRAMEBUFFER 0x0002000A +/*! @brief Mouse cursor hover window attribute. + * + * Mouse cursor hover [window attribute](@ref GLFW_HOVERED_attrib). + */ +#define GLFW_HOVERED 0x0002000B +/*! @brief Input focus on calling show window hint and attribute + * + * Input focus [window hint](@ref GLFW_FOCUS_ON_SHOW_hint) or + * [window attribute](@ref GLFW_FOCUS_ON_SHOW_attrib). + */ +#define GLFW_FOCUS_ON_SHOW 0x0002000C + +/*! @brief Mouse input transparency window hint and attribute + * + * Mouse input transparency [window hint](@ref GLFW_MOUSE_PASSTHROUGH_hint) or + * [window attribute](@ref GLFW_MOUSE_PASSTHROUGH_attrib). + */ +#define GLFW_MOUSE_PASSTHROUGH 0x0002000D + +/*! @brief Framebuffer bit depth hint. + * + * Framebuffer bit depth [hint](@ref GLFW_RED_BITS). + */ +#define GLFW_RED_BITS 0x00021001 +/*! @brief Framebuffer bit depth hint. + * + * Framebuffer bit depth [hint](@ref GLFW_GREEN_BITS). + */ +#define GLFW_GREEN_BITS 0x00021002 +/*! @brief Framebuffer bit depth hint. + * + * Framebuffer bit depth [hint](@ref GLFW_BLUE_BITS). + */ +#define GLFW_BLUE_BITS 0x00021003 +/*! @brief Framebuffer bit depth hint. + * + * Framebuffer bit depth [hint](@ref GLFW_ALPHA_BITS). + */ +#define GLFW_ALPHA_BITS 0x00021004 +/*! @brief Framebuffer bit depth hint. + * + * Framebuffer bit depth [hint](@ref GLFW_DEPTH_BITS). + */ +#define GLFW_DEPTH_BITS 0x00021005 +/*! @brief Framebuffer bit depth hint. + * + * Framebuffer bit depth [hint](@ref GLFW_STENCIL_BITS). + */ +#define GLFW_STENCIL_BITS 0x00021006 +/*! @brief Framebuffer bit depth hint. + * + * Framebuffer bit depth [hint](@ref GLFW_ACCUM_RED_BITS). + */ +#define GLFW_ACCUM_RED_BITS 0x00021007 +/*! @brief Framebuffer bit depth hint. + * + * Framebuffer bit depth [hint](@ref GLFW_ACCUM_GREEN_BITS). + */ +#define GLFW_ACCUM_GREEN_BITS 0x00021008 +/*! @brief Framebuffer bit depth hint. + * + * Framebuffer bit depth [hint](@ref GLFW_ACCUM_BLUE_BITS). + */ +#define GLFW_ACCUM_BLUE_BITS 0x00021009 +/*! @brief Framebuffer bit depth hint. + * + * Framebuffer bit depth [hint](@ref GLFW_ACCUM_ALPHA_BITS). + */ +#define GLFW_ACCUM_ALPHA_BITS 0x0002100A +/*! @brief Framebuffer auxiliary buffer hint. + * + * Framebuffer auxiliary buffer [hint](@ref GLFW_AUX_BUFFERS). + */ +#define GLFW_AUX_BUFFERS 0x0002100B +/*! @brief OpenGL stereoscopic rendering hint. + * + * OpenGL stereoscopic rendering [hint](@ref GLFW_STEREO). + */ +#define GLFW_STEREO 0x0002100C +/*! @brief Framebuffer MSAA samples hint. + * + * Framebuffer MSAA samples [hint](@ref GLFW_SAMPLES). + */ +#define GLFW_SAMPLES 0x0002100D +/*! @brief Framebuffer sRGB hint. + * + * Framebuffer sRGB [hint](@ref GLFW_SRGB_CAPABLE). + */ +#define GLFW_SRGB_CAPABLE 0x0002100E +/*! @brief Monitor refresh rate hint. + * + * Monitor refresh rate [hint](@ref GLFW_REFRESH_RATE). + */ +#define GLFW_REFRESH_RATE 0x0002100F +/*! @brief Framebuffer double buffering hint and attribute. + * + * Framebuffer double buffering [hint](@ref GLFW_DOUBLEBUFFER_hint) and + * [attribute](@ref GLFW_DOUBLEBUFFER_attrib). + */ +#define GLFW_DOUBLEBUFFER 0x00021010 + +/*! @brief Context client API hint and attribute. + * + * Context client API [hint](@ref GLFW_CLIENT_API_hint) and + * [attribute](@ref GLFW_CLIENT_API_attrib). + */ +#define GLFW_CLIENT_API 0x00022001 +/*! @brief Context client API major version hint and attribute. + * + * Context client API major version [hint](@ref GLFW_CONTEXT_VERSION_MAJOR_hint) + * and [attribute](@ref GLFW_CONTEXT_VERSION_MAJOR_attrib). + */ +#define GLFW_CONTEXT_VERSION_MAJOR 0x00022002 +/*! @brief Context client API minor version hint and attribute. + * + * Context client API minor version [hint](@ref GLFW_CONTEXT_VERSION_MINOR_hint) + * and [attribute](@ref GLFW_CONTEXT_VERSION_MINOR_attrib). + */ +#define GLFW_CONTEXT_VERSION_MINOR 0x00022003 +/*! @brief Context client API revision number hint and attribute. + * + * Context client API revision number + * [attribute](@ref GLFW_CONTEXT_REVISION_attrib). + */ +#define GLFW_CONTEXT_REVISION 0x00022004 +/*! @brief Context robustness hint and attribute. + * + * Context client API revision number [hint](@ref GLFW_CONTEXT_ROBUSTNESS_hint) + * and [attribute](@ref GLFW_CONTEXT_ROBUSTNESS_attrib). + */ +#define GLFW_CONTEXT_ROBUSTNESS 0x00022005 +/*! @brief OpenGL forward-compatibility hint and attribute. + * + * OpenGL forward-compatibility [hint](@ref GLFW_OPENGL_FORWARD_COMPAT_hint) + * and [attribute](@ref GLFW_OPENGL_FORWARD_COMPAT_attrib). + */ +#define GLFW_OPENGL_FORWARD_COMPAT 0x00022006 +/*! @brief Debug mode context hint and attribute. + * + * Debug mode context [hint](@ref GLFW_CONTEXT_DEBUG_hint) and + * [attribute](@ref GLFW_CONTEXT_DEBUG_attrib). + */ +#define GLFW_CONTEXT_DEBUG 0x00022007 +/*! @brief Legacy name for compatibility. + * + * This is an alias for compatibility with earlier versions. + */ +#define GLFW_OPENGL_DEBUG_CONTEXT GLFW_CONTEXT_DEBUG +/*! @brief OpenGL profile hint and attribute. + * + * OpenGL profile [hint](@ref GLFW_OPENGL_PROFILE_hint) and + * [attribute](@ref GLFW_OPENGL_PROFILE_attrib). + */ +#define GLFW_OPENGL_PROFILE 0x00022008 +/*! @brief Context flush-on-release hint and attribute. + * + * Context flush-on-release [hint](@ref GLFW_CONTEXT_RELEASE_BEHAVIOR_hint) and + * [attribute](@ref GLFW_CONTEXT_RELEASE_BEHAVIOR_attrib). + */ +#define GLFW_CONTEXT_RELEASE_BEHAVIOR 0x00022009 +/*! @brief Context error suppression hint and attribute. + * + * Context error suppression [hint](@ref GLFW_CONTEXT_NO_ERROR_hint) and + * [attribute](@ref GLFW_CONTEXT_NO_ERROR_attrib). + */ +#define GLFW_CONTEXT_NO_ERROR 0x0002200A +/*! @brief Context creation API hint and attribute. + * + * Context creation API [hint](@ref GLFW_CONTEXT_CREATION_API_hint) and + * [attribute](@ref GLFW_CONTEXT_CREATION_API_attrib). + */ +#define GLFW_CONTEXT_CREATION_API 0x0002200B +/*! @brief Window content area scaling window + * [window hint](@ref GLFW_SCALE_TO_MONITOR). + */ +#define GLFW_SCALE_TO_MONITOR 0x0002200C +/*! @brief macOS specific + * [window hint](@ref GLFW_COCOA_RETINA_FRAMEBUFFER_hint). + */ +#define GLFW_COCOA_RETINA_FRAMEBUFFER 0x00023001 +/*! @brief macOS specific + * [window hint](@ref GLFW_COCOA_FRAME_NAME_hint). + */ +#define GLFW_COCOA_FRAME_NAME 0x00023002 +/*! @brief macOS specific + * [window hint](@ref GLFW_COCOA_GRAPHICS_SWITCHING_hint). + */ +#define GLFW_COCOA_GRAPHICS_SWITCHING 0x00023003 +/*! @brief X11 specific + * [window hint](@ref GLFW_X11_CLASS_NAME_hint). + */ +#define GLFW_X11_CLASS_NAME 0x00024001 +/*! @brief X11 specific + * [window hint](@ref GLFW_X11_CLASS_NAME_hint). + */ +#define GLFW_X11_INSTANCE_NAME 0x00024002 +#define GLFW_WIN32_KEYBOARD_MENU 0x00025001 +/*! @} */ + +#define GLFW_NO_API 0 +#define GLFW_OPENGL_API 0x00030001 +#define GLFW_OPENGL_ES_API 0x00030002 + +#define GLFW_NO_ROBUSTNESS 0 +#define GLFW_NO_RESET_NOTIFICATION 0x00031001 +#define GLFW_LOSE_CONTEXT_ON_RESET 0x00031002 + +#define GLFW_OPENGL_ANY_PROFILE 0 +#define GLFW_OPENGL_CORE_PROFILE 0x00032001 +#define GLFW_OPENGL_COMPAT_PROFILE 0x00032002 + +#define GLFW_CURSOR 0x00033001 +#define GLFW_STICKY_KEYS 0x00033002 +#define GLFW_STICKY_MOUSE_BUTTONS 0x00033003 +#define GLFW_LOCK_KEY_MODS 0x00033004 +#define GLFW_RAW_MOUSE_MOTION 0x00033005 + +#define GLFW_CURSOR_NORMAL 0x00034001 +#define GLFW_CURSOR_HIDDEN 0x00034002 +#define GLFW_CURSOR_DISABLED 0x00034003 + +#define GLFW_ANY_RELEASE_BEHAVIOR 0 +#define GLFW_RELEASE_BEHAVIOR_FLUSH 0x00035001 +#define GLFW_RELEASE_BEHAVIOR_NONE 0x00035002 + +#define GLFW_NATIVE_CONTEXT_API 0x00036001 +#define GLFW_EGL_CONTEXT_API 0x00036002 +#define GLFW_OSMESA_CONTEXT_API 0x00036003 + +#define GLFW_ANGLE_PLATFORM_TYPE_NONE 0x00037001 +#define GLFW_ANGLE_PLATFORM_TYPE_OPENGL 0x00037002 +#define GLFW_ANGLE_PLATFORM_TYPE_OPENGLES 0x00037003 +#define GLFW_ANGLE_PLATFORM_TYPE_D3D9 0x00037004 +#define GLFW_ANGLE_PLATFORM_TYPE_D3D11 0x00037005 +#define GLFW_ANGLE_PLATFORM_TYPE_VULKAN 0x00037007 +#define GLFW_ANGLE_PLATFORM_TYPE_METAL 0x00037008 + +/*! @defgroup shapes Standard cursor shapes + * @brief Standard system cursor shapes. + * + * These are the [standard cursor shapes](@ref cursor_standard) that can be + * requested from the platform (window system). + * + * @ingroup input + * @{ */ + +/*! @brief The regular arrow cursor shape. + * + * The regular arrow cursor shape. + */ +#define GLFW_ARROW_CURSOR 0x00036001 +/*! @brief The text input I-beam cursor shape. + * + * The text input I-beam cursor shape. + */ +#define GLFW_IBEAM_CURSOR 0x00036002 +/*! @brief The crosshair cursor shape. + * + * The crosshair cursor shape. + */ +#define GLFW_CROSSHAIR_CURSOR 0x00036003 +/*! @brief The pointing hand cursor shape. + * + * The pointing hand cursor shape. + */ +#define GLFW_POINTING_HAND_CURSOR 0x00036004 +/*! @brief The horizontal resize/move arrow shape. + * + * The horizontal resize/move arrow shape. This is usually a horizontal + * double-headed arrow. + */ +#define GLFW_RESIZE_EW_CURSOR 0x00036005 +/*! @brief The vertical resize/move arrow shape. + * + * The vertical resize/move shape. This is usually a vertical double-headed + * arrow. + */ +#define GLFW_RESIZE_NS_CURSOR 0x00036006 +/*! @brief The top-left to bottom-right diagonal resize/move arrow shape. + * + * The top-left to bottom-right diagonal resize/move shape. This is usually + * a diagonal double-headed arrow. + * + * @note @macos This shape is provided by a private system API and may fail + * with @ref GLFW_CURSOR_UNAVAILABLE in the future. + * + * @note @x11 This shape is provided by a newer standard not supported by all + * cursor themes. + * + * @note @wayland This shape is provided by a newer standard not supported by + * all cursor themes. + */ +#define GLFW_RESIZE_NWSE_CURSOR 0x00036007 +/*! @brief The top-right to bottom-left diagonal resize/move arrow shape. + * + * The top-right to bottom-left diagonal resize/move shape. This is usually + * a diagonal double-headed arrow. + * + * @note @macos This shape is provided by a private system API and may fail + * with @ref GLFW_CURSOR_UNAVAILABLE in the future. + * + * @note @x11 This shape is provided by a newer standard not supported by all + * cursor themes. + * + * @note @wayland This shape is provided by a newer standard not supported by + * all cursor themes. + */ +#define GLFW_RESIZE_NESW_CURSOR 0x00036008 +/*! @brief The omni-directional resize/move cursor shape. + * + * The omni-directional resize cursor/move shape. This is usually either + * a combined horizontal and vertical double-headed arrow or a grabbing hand. + */ +#define GLFW_RESIZE_ALL_CURSOR 0x00036009 +/*! @brief The operation-not-allowed shape. + * + * The operation-not-allowed shape. This is usually a circle with a diagonal + * line through it. + * + * @note @x11 This shape is provided by a newer standard not supported by all + * cursor themes. + * + * @note @wayland This shape is provided by a newer standard not supported by + * all cursor themes. + */ +#define GLFW_NOT_ALLOWED_CURSOR 0x0003600A +/*! @brief Legacy name for compatibility. + * + * This is an alias for compatibility with earlier versions. + */ +#define GLFW_HRESIZE_CURSOR GLFW_RESIZE_EW_CURSOR +/*! @brief Legacy name for compatibility. + * + * This is an alias for compatibility with earlier versions. + */ +#define GLFW_VRESIZE_CURSOR GLFW_RESIZE_NS_CURSOR +/*! @brief Legacy name for compatibility. + * + * This is an alias for compatibility with earlier versions. + */ +#define GLFW_HAND_CURSOR GLFW_POINTING_HAND_CURSOR +/*! @} */ + +#define GLFW_CONNECTED 0x00040001 +#define GLFW_DISCONNECTED 0x00040002 + +/*! @addtogroup init + * @{ */ +/*! @brief Joystick hat buttons init hint. + * + * Joystick hat buttons [init hint](@ref GLFW_JOYSTICK_HAT_BUTTONS). + */ +#define GLFW_JOYSTICK_HAT_BUTTONS 0x00050001 +/*! @brief ANGLE rendering backend init hint. + * + * ANGLE rendering backend [init hint](@ref GLFW_ANGLE_PLATFORM_TYPE_hint). + */ +#define GLFW_ANGLE_PLATFORM_TYPE 0x00050002 +/*! @brief Platform selection init hint. + * + * Platform selection [init hint](@ref GLFW_PLATFORM). + */ +#define GLFW_PLATFORM 0x00050003 +/*! @brief macOS specific init hint. + * + * macOS specific [init hint](@ref GLFW_COCOA_CHDIR_RESOURCES_hint). + */ +#define GLFW_COCOA_CHDIR_RESOURCES 0x00051001 +/*! @brief macOS specific init hint. + * + * macOS specific [init hint](@ref GLFW_COCOA_MENUBAR_hint). + */ +#define GLFW_COCOA_MENUBAR 0x00051002 +/*! @brief X11 specific init hint. + * + * X11 specific [init hint](@ref GLFW_X11_XCB_VULKAN_SURFACE_hint). + */ +#define GLFW_X11_XCB_VULKAN_SURFACE 0x00052001 +/*! @} */ + +/*! @addtogroup init + * @{ */ +/*! @brief Hint value that enables automatic platform selection. + * + * Hint value for @ref GLFW_PLATFORM that enables automatic platform selection. + */ +#define GLFW_ANY_PLATFORM 0x00060000 +#define GLFW_PLATFORM_WIN32 0x00060001 +#define GLFW_PLATFORM_COCOA 0x00060002 +#define GLFW_PLATFORM_WAYLAND 0x00060003 +#define GLFW_PLATFORM_X11 0x00060004 +#define GLFW_PLATFORM_NULL 0x00060005 +/*! @} */ + +#define GLFW_DONT_CARE -1 + + +/************************************************************************* + * GLFW API types + *************************************************************************/ + +/*! @brief Client API function pointer type. + * + * Generic function pointer used for returning client API function pointers + * without forcing a cast from a regular pointer. + * + * @sa @ref context_glext + * @sa @ref glfwGetProcAddress + * + * @since Added in version 3.0. + * + * @ingroup context + */ +typedef void (*GLFWglproc)(void); + +/*! @brief Vulkan API function pointer type. + * + * Generic function pointer used for returning Vulkan API function pointers + * without forcing a cast from a regular pointer. + * + * @sa @ref vulkan_proc + * @sa @ref glfwGetInstanceProcAddress + * + * @since Added in version 3.2. + * + * @ingroup vulkan + */ +typedef void (*GLFWvkproc)(void); + +/*! @brief Opaque monitor object. + * + * Opaque monitor object. + * + * @see @ref monitor_object + * + * @since Added in version 3.0. + * + * @ingroup monitor + */ +typedef struct GLFWmonitor GLFWmonitor; + +/*! @brief Opaque window object. + * + * Opaque window object. + * + * @see @ref window_object + * + * @since Added in version 3.0. + * + * @ingroup window + */ +typedef struct GLFWwindow GLFWwindow; + +/*! @brief Opaque cursor object. + * + * Opaque cursor object. + * + * @see @ref cursor_object + * + * @since Added in version 3.1. + * + * @ingroup input + */ +typedef struct GLFWcursor GLFWcursor; + +/*! @brief The function pointer type for memory allocation callbacks. + * + * This is the function pointer type for memory allocation callbacks. A memory + * allocation callback function has the following signature: + * @code + * void* function_name(size_t size, void* user) + * @endcode + * + * This function must return either a memory block at least `size` bytes long, + * or `NULL` if allocation failed. Note that not all parts of GLFW handle allocation + * failures gracefully yet. + * + * This function may be called during @ref glfwInit but before the library is + * flagged as initialized, as well as during @ref glfwTerminate after the + * library is no longer flagged as initialized. + * + * Any memory allocated by this function will be deallocated during library + * termination or earlier. + * + * The size will always be greater than zero. Allocations of size zero are filtered out + * before reaching the custom allocator. + * + * @param[in] size The minimum size, in bytes, of the memory block. + * @param[in] user The user-defined pointer from the allocator. + * @return The address of the newly allocated memory block, or `NULL` if an + * error occurred. + * + * @pointer_lifetime The returned memory block must be valid at least until it + * is deallocated. + * + * @reentrancy This function should not call any GLFW function. + * + * @thread_safety This function may be called from any thread that calls GLFW functions. + * + * @sa @ref init_allocator + * @sa @ref GLFWallocator + * + * @since Added in version 3.4. + * + * @ingroup init + */ +typedef void* (* GLFWallocatefun)(size_t size, void* user); + +/*! @brief The function pointer type for memory reallocation callbacks. + * + * This is the function pointer type for memory reallocation callbacks. + * A memory reallocation callback function has the following signature: + * @code + * void* function_name(void* block, size_t size, void* user) + * @endcode + * + * This function must return a memory block at least `size` bytes long, or + * `NULL` if allocation failed. Note that not all parts of GLFW handle allocation + * failures gracefully yet. + * + * This function may be called during @ref glfwInit but before the library is + * flagged as initialized, as well as during @ref glfwTerminate after the + * library is no longer flagged as initialized. + * + * Any memory allocated by this function will be deallocated during library + * termination or earlier. + * + * The block address will never be `NULL` and the size will always be greater than zero. + * Reallocations of a block to size zero are converted into deallocations. Reallocations + * of `NULL` to a non-zero size are converted into regular allocations. + * + * @param[in] block The address of the memory block to reallocate. + * @param[in] size The new minimum size, in bytes, of the memory block. + * @param[in] user The user-defined pointer from the allocator. + * @return The address of the newly allocated or resized memory block, or + * `NULL` if an error occurred. + * + * @pointer_lifetime The returned memory block must be valid at least until it + * is deallocated. + * + * @reentrancy This function should not call any GLFW function. + * + * @thread_safety This function may be called from any thread that calls GLFW functions. + * + * @sa @ref init_allocator + * @sa @ref GLFWallocator + * + * @since Added in version 3.4. + * + * @ingroup init + */ +typedef void* (* GLFWreallocatefun)(void* block, size_t size, void* user); + +/*! @brief The function pointer type for memory deallocation callbacks. + * + * This is the function pointer type for memory deallocation callbacks. + * A memory deallocation callback function has the following signature: + * @code + * void function_name(void* block, void* user) + * @endcode + * + * This function may deallocate the specified memory block. This memory block + * will have been allocated with the same allocator. + * + * This function may be called during @ref glfwInit but before the library is + * flagged as initialized, as well as during @ref glfwTerminate after the + * library is no longer flagged as initialized. + * + * The block address will never be `NULL`. Deallocations of `NULL` are filtered out + * before reaching the custom allocator. + * + * @param[in] block The address of the memory block to deallocate. + * @param[in] user The user-defined pointer from the allocator. + * + * @pointer_lifetime The specified memory block will not be accessed by GLFW + * after this function is called. + * + * @reentrancy This function should not call any GLFW function. + * + * @thread_safety This function may be called from any thread that calls GLFW functions. + * + * @sa @ref init_allocator + * @sa @ref GLFWallocator + * + * @since Added in version 3.4. + * + * @ingroup init + */ +typedef void (* GLFWdeallocatefun)(void* block, void* user); + +/*! @brief The function pointer type for error callbacks. + * + * This is the function pointer type for error callbacks. An error callback + * function has the following signature: + * @code + * void callback_name(int error_code, const char* description) + * @endcode + * + * @param[in] error_code An [error code](@ref errors). Future releases may add + * more error codes. + * @param[in] description A UTF-8 encoded string describing the error. + * + * @pointer_lifetime The error description string is valid until the callback + * function returns. + * + * @sa @ref error_handling + * @sa @ref glfwSetErrorCallback + * + * @since Added in version 3.0. + * + * @ingroup init + */ +typedef void (* GLFWerrorfun)(int error_code, const char* description); + +/*! @brief The function pointer type for window position callbacks. + * + * This is the function pointer type for window position callbacks. A window + * position callback function has the following signature: + * @code + * void callback_name(GLFWwindow* window, int xpos, int ypos) + * @endcode + * + * @param[in] window The window that was moved. + * @param[in] xpos The new x-coordinate, in screen coordinates, of the + * upper-left corner of the content area of the window. + * @param[in] ypos The new y-coordinate, in screen coordinates, of the + * upper-left corner of the content area of the window. + * + * @sa @ref window_pos + * @sa @ref glfwSetWindowPosCallback + * + * @since Added in version 3.0. + * + * @ingroup window + */ +typedef void (* GLFWwindowposfun)(GLFWwindow* window, int xpos, int ypos); + +/*! @brief The function pointer type for window size callbacks. + * + * This is the function pointer type for window size callbacks. A window size + * callback function has the following signature: + * @code + * void callback_name(GLFWwindow* window, int width, int height) + * @endcode + * + * @param[in] window The window that was resized. + * @param[in] width The new width, in screen coordinates, of the window. + * @param[in] height The new height, in screen coordinates, of the window. + * + * @sa @ref window_size + * @sa @ref glfwSetWindowSizeCallback + * + * @since Added in version 1.0. + * @glfw3 Added window handle parameter. + * + * @ingroup window + */ +typedef void (* GLFWwindowsizefun)(GLFWwindow* window, int width, int height); + +/*! @brief The function pointer type for window close callbacks. + * + * This is the function pointer type for window close callbacks. A window + * close callback function has the following signature: + * @code + * void function_name(GLFWwindow* window) + * @endcode + * + * @param[in] window The window that the user attempted to close. + * + * @sa @ref window_close + * @sa @ref glfwSetWindowCloseCallback + * + * @since Added in version 2.5. + * @glfw3 Added window handle parameter. + * + * @ingroup window + */ +typedef void (* GLFWwindowclosefun)(GLFWwindow* window); + +/*! @brief The function pointer type for window content refresh callbacks. + * + * This is the function pointer type for window content refresh callbacks. + * A window content refresh callback function has the following signature: + * @code + * void function_name(GLFWwindow* window); + * @endcode + * + * @param[in] window The window whose content needs to be refreshed. + * + * @sa @ref window_refresh + * @sa @ref glfwSetWindowRefreshCallback + * + * @since Added in version 2.5. + * @glfw3 Added window handle parameter. + * + * @ingroup window + */ +typedef void (* GLFWwindowrefreshfun)(GLFWwindow* window); + +/*! @brief The function pointer type for window focus callbacks. + * + * This is the function pointer type for window focus callbacks. A window + * focus callback function has the following signature: + * @code + * void function_name(GLFWwindow* window, int focused) + * @endcode + * + * @param[in] window The window that gained or lost input focus. + * @param[in] focused `GLFW_TRUE` if the window was given input focus, or + * `GLFW_FALSE` if it lost it. + * + * @sa @ref window_focus + * @sa @ref glfwSetWindowFocusCallback + * + * @since Added in version 3.0. + * + * @ingroup window + */ +typedef void (* GLFWwindowfocusfun)(GLFWwindow* window, int focused); + +/*! @brief The function pointer type for window iconify callbacks. + * + * This is the function pointer type for window iconify callbacks. A window + * iconify callback function has the following signature: + * @code + * void function_name(GLFWwindow* window, int iconified) + * @endcode + * + * @param[in] window The window that was iconified or restored. + * @param[in] iconified `GLFW_TRUE` if the window was iconified, or + * `GLFW_FALSE` if it was restored. + * + * @sa @ref window_iconify + * @sa @ref glfwSetWindowIconifyCallback + * + * @since Added in version 3.0. + * + * @ingroup window + */ +typedef void (* GLFWwindowiconifyfun)(GLFWwindow* window, int iconified); + +/*! @brief The function pointer type for window maximize callbacks. + * + * This is the function pointer type for window maximize callbacks. A window + * maximize callback function has the following signature: + * @code + * void function_name(GLFWwindow* window, int maximized) + * @endcode + * + * @param[in] window The window that was maximized or restored. + * @param[in] maximized `GLFW_TRUE` if the window was maximized, or + * `GLFW_FALSE` if it was restored. + * + * @sa @ref window_maximize + * @sa glfwSetWindowMaximizeCallback + * + * @since Added in version 3.3. + * + * @ingroup window + */ +typedef void (* GLFWwindowmaximizefun)(GLFWwindow* window, int maximized); + +/*! @brief The function pointer type for framebuffer size callbacks. + * + * This is the function pointer type for framebuffer size callbacks. + * A framebuffer size callback function has the following signature: + * @code + * void function_name(GLFWwindow* window, int width, int height) + * @endcode + * + * @param[in] window The window whose framebuffer was resized. + * @param[in] width The new width, in pixels, of the framebuffer. + * @param[in] height The new height, in pixels, of the framebuffer. + * + * @sa @ref window_fbsize + * @sa @ref glfwSetFramebufferSizeCallback + * + * @since Added in version 3.0. + * + * @ingroup window + */ +typedef void (* GLFWframebuffersizefun)(GLFWwindow* window, int width, int height); + +/*! @brief The function pointer type for window content scale callbacks. + * + * This is the function pointer type for window content scale callbacks. + * A window content scale callback function has the following signature: + * @code + * void function_name(GLFWwindow* window, float xscale, float yscale) + * @endcode + * + * @param[in] window The window whose content scale changed. + * @param[in] xscale The new x-axis content scale of the window. + * @param[in] yscale The new y-axis content scale of the window. + * + * @sa @ref window_scale + * @sa @ref glfwSetWindowContentScaleCallback + * + * @since Added in version 3.3. + * + * @ingroup window + */ +typedef void (* GLFWwindowcontentscalefun)(GLFWwindow* window, float xscale, float yscale); + +/*! @brief The function pointer type for mouse button callbacks. + * + * This is the function pointer type for mouse button callback functions. + * A mouse button callback function has the following signature: + * @code + * void function_name(GLFWwindow* window, int button, int action, int mods) + * @endcode + * + * @param[in] window The window that received the event. + * @param[in] button The [mouse button](@ref buttons) that was pressed or + * released. + * @param[in] action One of `GLFW_PRESS` or `GLFW_RELEASE`. Future releases + * may add more actions. + * @param[in] mods Bit field describing which [modifier keys](@ref mods) were + * held down. + * + * @sa @ref input_mouse_button + * @sa @ref glfwSetMouseButtonCallback + * + * @since Added in version 1.0. + * @glfw3 Added window handle and modifier mask parameters. + * + * @ingroup input + */ +typedef void (* GLFWmousebuttonfun)(GLFWwindow* window, int button, int action, int mods); + +/*! @brief The function pointer type for cursor position callbacks. + * + * This is the function pointer type for cursor position callbacks. A cursor + * position callback function has the following signature: + * @code + * void function_name(GLFWwindow* window, double xpos, double ypos); + * @endcode + * + * @param[in] window The window that received the event. + * @param[in] xpos The new cursor x-coordinate, relative to the left edge of + * the content area. + * @param[in] ypos The new cursor y-coordinate, relative to the top edge of the + * content area. + * + * @sa @ref cursor_pos + * @sa @ref glfwSetCursorPosCallback + * + * @since Added in version 3.0. Replaces `GLFWmouseposfun`. + * + * @ingroup input + */ +typedef void (* GLFWcursorposfun)(GLFWwindow* window, double xpos, double ypos); + +/*! @brief The function pointer type for cursor enter/leave callbacks. + * + * This is the function pointer type for cursor enter/leave callbacks. + * A cursor enter/leave callback function has the following signature: + * @code + * void function_name(GLFWwindow* window, int entered) + * @endcode + * + * @param[in] window The window that received the event. + * @param[in] entered `GLFW_TRUE` if the cursor entered the window's content + * area, or `GLFW_FALSE` if it left it. + * + * @sa @ref cursor_enter + * @sa @ref glfwSetCursorEnterCallback + * + * @since Added in version 3.0. + * + * @ingroup input + */ +typedef void (* GLFWcursorenterfun)(GLFWwindow* window, int entered); + +/*! @brief The function pointer type for scroll callbacks. + * + * This is the function pointer type for scroll callbacks. A scroll callback + * function has the following signature: + * @code + * void function_name(GLFWwindow* window, double xoffset, double yoffset) + * @endcode + * + * @param[in] window The window that received the event. + * @param[in] xoffset The scroll offset along the x-axis. + * @param[in] yoffset The scroll offset along the y-axis. + * + * @sa @ref scrolling + * @sa @ref glfwSetScrollCallback + * + * @since Added in version 3.0. Replaces `GLFWmousewheelfun`. + * + * @ingroup input + */ +typedef void (* GLFWscrollfun)(GLFWwindow* window, double xoffset, double yoffset); + +/*! @brief The function pointer type for keyboard key callbacks. + * + * This is the function pointer type for keyboard key callbacks. A keyboard + * key callback function has the following signature: + * @code + * void function_name(GLFWwindow* window, int key, int scancode, int action, int mods) + * @endcode + * + * @param[in] window The window that received the event. + * @param[in] key The [keyboard key](@ref keys) that was pressed or released. + * @param[in] scancode The platform-specific scancode of the key. + * @param[in] action `GLFW_PRESS`, `GLFW_RELEASE` or `GLFW_REPEAT`. Future + * releases may add more actions. + * @param[in] mods Bit field describing which [modifier keys](@ref mods) were + * held down. + * + * @sa @ref input_key + * @sa @ref glfwSetKeyCallback + * + * @since Added in version 1.0. + * @glfw3 Added window handle, scancode and modifier mask parameters. + * + * @ingroup input + */ +typedef void (* GLFWkeyfun)(GLFWwindow* window, int key, int scancode, int action, int mods); + +/*! @brief The function pointer type for Unicode character callbacks. + * + * This is the function pointer type for Unicode character callbacks. + * A Unicode character callback function has the following signature: + * @code + * void function_name(GLFWwindow* window, unsigned int codepoint) + * @endcode + * + * @param[in] window The window that received the event. + * @param[in] codepoint The Unicode code point of the character. + * + * @sa @ref input_char + * @sa @ref glfwSetCharCallback + * + * @since Added in version 2.4. + * @glfw3 Added window handle parameter. + * + * @ingroup input + */ +typedef void (* GLFWcharfun)(GLFWwindow* window, unsigned int codepoint); + +/*! @brief The function pointer type for Unicode character with modifiers + * callbacks. + * + * This is the function pointer type for Unicode character with modifiers + * callbacks. It is called for each input character, regardless of what + * modifier keys are held down. A Unicode character with modifiers callback + * function has the following signature: + * @code + * void function_name(GLFWwindow* window, unsigned int codepoint, int mods) + * @endcode + * + * @param[in] window The window that received the event. + * @param[in] codepoint The Unicode code point of the character. + * @param[in] mods Bit field describing which [modifier keys](@ref mods) were + * held down. + * + * @sa @ref input_char + * @sa @ref glfwSetCharModsCallback + * + * @deprecated Scheduled for removal in version 4.0. + * + * @since Added in version 3.1. + * + * @ingroup input + */ +typedef void (* GLFWcharmodsfun)(GLFWwindow* window, unsigned int codepoint, int mods); + +/*! @brief The function pointer type for path drop callbacks. + * + * This is the function pointer type for path drop callbacks. A path drop + * callback function has the following signature: + * @code + * void function_name(GLFWwindow* window, int path_count, const char* paths[]) + * @endcode + * + * @param[in] window The window that received the event. + * @param[in] path_count The number of dropped paths. + * @param[in] paths The UTF-8 encoded file and/or directory path names. + * + * @pointer_lifetime The path array and its strings are valid until the + * callback function returns. + * + * @sa @ref path_drop + * @sa @ref glfwSetDropCallback + * + * @since Added in version 3.1. + * + * @ingroup input + */ +typedef void (* GLFWdropfun)(GLFWwindow* window, int path_count, const char* paths[]); + +/*! @brief The function pointer type for monitor configuration callbacks. + * + * This is the function pointer type for monitor configuration callbacks. + * A monitor callback function has the following signature: + * @code + * void function_name(GLFWmonitor* monitor, int event) + * @endcode + * + * @param[in] monitor The monitor that was connected or disconnected. + * @param[in] event One of `GLFW_CONNECTED` or `GLFW_DISCONNECTED`. Future + * releases may add more events. + * + * @sa @ref monitor_event + * @sa @ref glfwSetMonitorCallback + * + * @since Added in version 3.0. + * + * @ingroup monitor + */ +typedef void (* GLFWmonitorfun)(GLFWmonitor* monitor, int event); + +/*! @brief The function pointer type for joystick configuration callbacks. + * + * This is the function pointer type for joystick configuration callbacks. + * A joystick configuration callback function has the following signature: + * @code + * void function_name(int jid, int event) + * @endcode + * + * @param[in] jid The joystick that was connected or disconnected. + * @param[in] event One of `GLFW_CONNECTED` or `GLFW_DISCONNECTED`. Future + * releases may add more events. + * + * @sa @ref joystick_event + * @sa @ref glfwSetJoystickCallback + * + * @since Added in version 3.2. + * + * @ingroup input + */ +typedef void (* GLFWjoystickfun)(int jid, int event); + +/*! @brief Video mode type. + * + * This describes a single video mode. + * + * @sa @ref monitor_modes + * @sa @ref glfwGetVideoMode + * @sa @ref glfwGetVideoModes + * + * @since Added in version 1.0. + * @glfw3 Added refresh rate member. + * + * @ingroup monitor + */ +typedef struct GLFWvidmode +{ + /*! The width, in screen coordinates, of the video mode. + */ + int width; + /*! The height, in screen coordinates, of the video mode. + */ + int height; + /*! The bit depth of the red channel of the video mode. + */ + int redBits; + /*! The bit depth of the green channel of the video mode. + */ + int greenBits; + /*! The bit depth of the blue channel of the video mode. + */ + int blueBits; + /*! The refresh rate, in Hz, of the video mode. + */ + int refreshRate; +} GLFWvidmode; + +/*! @brief Gamma ramp. + * + * This describes the gamma ramp for a monitor. + * + * @sa @ref monitor_gamma + * @sa @ref glfwGetGammaRamp + * @sa @ref glfwSetGammaRamp + * + * @since Added in version 3.0. + * + * @ingroup monitor + */ +typedef struct GLFWgammaramp +{ + /*! An array of value describing the response of the red channel. + */ + unsigned short* red; + /*! An array of value describing the response of the green channel. + */ + unsigned short* green; + /*! An array of value describing the response of the blue channel. + */ + unsigned short* blue; + /*! The number of elements in each array. + */ + unsigned int size; +} GLFWgammaramp; + +/*! @brief Image data. + * + * This describes a single 2D image. See the documentation for each related + * function what the expected pixel format is. + * + * @sa @ref cursor_custom + * @sa @ref window_icon + * + * @since Added in version 2.1. + * @glfw3 Removed format and bytes-per-pixel members. + * + * @ingroup window + */ +typedef struct GLFWimage +{ + /*! The width, in pixels, of this image. + */ + int width; + /*! The height, in pixels, of this image. + */ + int height; + /*! The pixel data of this image, arranged left-to-right, top-to-bottom. + */ + unsigned char* pixels; +} GLFWimage; + +/*! @brief Gamepad input state + * + * This describes the input state of a gamepad. + * + * @sa @ref gamepad + * @sa @ref glfwGetGamepadState + * + * @since Added in version 3.3. + * + * @ingroup input + */ +typedef struct GLFWgamepadstate +{ + /*! The states of each [gamepad button](@ref gamepad_buttons), `GLFW_PRESS` + * or `GLFW_RELEASE`. + */ + unsigned char buttons[15]; + /*! The states of each [gamepad axis](@ref gamepad_axes), in the range -1.0 + * to 1.0 inclusive. + */ + float axes[6]; +} GLFWgamepadstate; + +/*! @brief + * + * @sa @ref init_allocator + * @sa @ref glfwInitAllocator + * + * @since Added in version 3.4. + * + * @ingroup init + */ +typedef struct GLFWallocator +{ + GLFWallocatefun allocate; + GLFWreallocatefun reallocate; + GLFWdeallocatefun deallocate; + void* user; +} GLFWallocator; + + +/************************************************************************* + * GLFW API functions + *************************************************************************/ + +/*! @brief Initializes the GLFW library. + * + * This function initializes the GLFW library. Before most GLFW functions can + * be used, GLFW must be initialized, and before an application terminates GLFW + * should be terminated in order to free any resources allocated during or + * after initialization. + * + * If this function fails, it calls @ref glfwTerminate before returning. If it + * succeeds, you should call @ref glfwTerminate before the application exits. + * + * Additional calls to this function after successful initialization but before + * termination will return `GLFW_TRUE` immediately. + * + * The @ref GLFW_PLATFORM init hint controls which platforms are considered during + * initialization. This also depends on which platforms the library was compiled to + * support. + * + * @return `GLFW_TRUE` if successful, or `GLFW_FALSE` if an + * [error](@ref error_handling) occurred. + * + * @errors Possible errors include @ref GLFW_PLATFORM_UNAVAILABLE and @ref + * GLFW_PLATFORM_ERROR. + * + * @remark @macos This function will change the current directory of the + * application to the `Contents/Resources` subdirectory of the application's + * bundle, if present. This can be disabled with the @ref + * GLFW_COCOA_CHDIR_RESOURCES init hint. + * + * @remark @macos This function will create the main menu and dock icon for the + * application. If GLFW finds a `MainMenu.nib` it is loaded and assumed to + * contain a menu bar. Otherwise a minimal menu bar is created manually with + * common commands like Hide, Quit and About. The About entry opens a minimal + * about dialog with information from the application's bundle. The menu bar + * and dock icon can be disabled entirely with the @ref GLFW_COCOA_MENUBAR init + * hint. + * + * @remark @x11 This function will set the `LC_CTYPE` category of the + * application locale according to the current environment if that category is + * still "C". This is because the "C" locale breaks Unicode text input. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref intro_init + * @sa @ref glfwInitHint + * @sa @ref glfwInitAllocator + * @sa @ref glfwTerminate + * + * @since Added in version 1.0. + * + * @ingroup init + */ +GLFWAPI int glfwInit(void); + +/*! @brief Terminates the GLFW library. + * + * This function destroys all remaining windows and cursors, restores any + * modified gamma ramps and frees any other allocated resources. Once this + * function is called, you must again call @ref glfwInit successfully before + * you will be able to use most GLFW functions. + * + * If GLFW has been successfully initialized, this function should be called + * before the application exits. If initialization fails, there is no need to + * call this function, as it is called by @ref glfwInit before it returns + * failure. + * + * This function has no effect if GLFW is not initialized. + * + * @errors Possible errors include @ref GLFW_PLATFORM_ERROR. + * + * @remark This function may be called before @ref glfwInit. + * + * @warning The contexts of any remaining windows must not be current on any + * other thread when this function is called. + * + * @reentrancy This function must not be called from a callback. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref intro_init + * @sa @ref glfwInit + * + * @since Added in version 1.0. + * + * @ingroup init + */ +GLFWAPI void glfwTerminate(void); + +/*! @brief Sets the specified init hint to the desired value. + * + * This function sets hints for the next initialization of GLFW. + * + * The values you set hints to are never reset by GLFW, but they only take + * effect during initialization. Once GLFW has been initialized, any values + * you set will be ignored until the library is terminated and initialized + * again. + * + * Some hints are platform specific. These may be set on any platform but they + * will only affect their specific platform. Other platforms will ignore them. + * Setting these hints requires no platform specific headers or functions. + * + * @param[in] hint The [init hint](@ref init_hints) to set. + * @param[in] value The new value of the init hint. + * + * @errors Possible errors include @ref GLFW_INVALID_ENUM and @ref + * GLFW_INVALID_VALUE. + * + * @remarks This function may be called before @ref glfwInit. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa init_hints + * @sa glfwInit + * + * @since Added in version 3.3. + * + * @ingroup init + */ +GLFWAPI void glfwInitHint(int hint, int value); + +/*! @brief Sets the init allocator to the desired value. + * + * To use the default allocator, call this function with a `NULL` argument. + * + * If you specify an allocator struct, every member must be a valid function + * pointer. If any member is `NULL`, this function emits @ref + * GLFW_INVALID_VALUE and the init allocator is unchanged. + * + * @param[in] allocator The allocator to use at the next initialization, or + * `NULL` to use the default one. + * + * @errors Possible errors include @ref GLFW_INVALID_VALUE. + * + * @pointer_lifetime The specified allocator is copied before this function + * returns. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref init_allocator + * @sa @ref glfwInit + * + * @since Added in version 3.4. + * + * @ingroup init + */ +GLFWAPI void glfwInitAllocator(const GLFWallocator* allocator); + +#if defined(VK_VERSION_1_0) + +/*! @brief Sets the desired Vulkan `vkGetInstanceProcAddr` function. + * + * This function sets the `vkGetInstanceProcAddr` function that GLFW will use for all + * Vulkan related entry point queries. + * + * This feature is mostly useful on macOS, if your copy of the Vulkan loader is in + * a location where GLFW cannot find it through dynamic loading, or if you are still + * using the static library version of the loader. + * + * If set to `NULL`, GLFW will try to load the Vulkan loader dynamically by its standard + * name and get this function from there. This is the default behavior. + * + * The standard name of the loader is `vulkan-1.dll` on Windows, `libvulkan.so.1` on + * Linux and other Unix-like systems and `libvulkan.1.dylib` on macOS. If your code is + * also loading it via these names then you probably don't need to use this function. + * + * The function address you set is never reset by GLFW, but it only takes effect during + * initialization. Once GLFW has been initialized, any updates will be ignored until the + * library is terminated and initialized again. + * + * @param[in] loader The address of the function to use, or `NULL`. + * + * @par Loader function signature + * @code + * PFN_vkVoidFunction vkGetInstanceProcAddr(VkInstance instance, const char* name) + * @endcode + * For more information about this function, see the + * [Vulkan Registry](https://www.khronos.org/registry/vulkan/). + * + * @errors None. + * + * @remark This function may be called before @ref glfwInit. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref vulkan_loader + * @sa @ref glfwInit + * + * @since Added in version 3.4. + * + * @ingroup init + */ +GLFWAPI void glfwInitVulkanLoader(PFN_vkGetInstanceProcAddr loader); + +#endif /*VK_VERSION_1_0*/ + +/*! @brief Retrieves the version of the GLFW library. + * + * This function retrieves the major, minor and revision numbers of the GLFW + * library. It is intended for when you are using GLFW as a shared library and + * want to ensure that you are using the minimum required version. + * + * Any or all of the version arguments may be `NULL`. + * + * @param[out] major Where to store the major version number, or `NULL`. + * @param[out] minor Where to store the minor version number, or `NULL`. + * @param[out] rev Where to store the revision number, or `NULL`. + * + * @errors None. + * + * @remark This function may be called before @ref glfwInit. + * + * @thread_safety This function may be called from any thread. + * + * @sa @ref intro_version + * @sa @ref glfwGetVersionString + * + * @since Added in version 1.0. + * + * @ingroup init + */ +GLFWAPI void glfwGetVersion(int* major, int* minor, int* rev); + +/*! @brief Returns a string describing the compile-time configuration. + * + * This function returns the compile-time generated + * [version string](@ref intro_version_string) of the GLFW library binary. It describes + * the version, platforms, compiler and any platform or operating system specific + * compile-time options. It should not be confused with the OpenGL or OpenGL ES version + * string, queried with `glGetString`. + * + * __Do not use the version string__ to parse the GLFW library version. The + * @ref glfwGetVersion function provides the version of the running library + * binary in numerical format. + * + * __Do not use the version string__ to parse what platforms are supported. The @ref + * glfwPlatformSupported function lets you query platform support. + * + * @return The ASCII encoded GLFW version string. + * + * @errors None. + * + * @remark This function may be called before @ref glfwInit. + * + * @pointer_lifetime The returned string is static and compile-time generated. + * + * @thread_safety This function may be called from any thread. + * + * @sa @ref intro_version + * @sa @ref glfwGetVersion + * + * @since Added in version 3.0. + * + * @ingroup init + */ +GLFWAPI const char* glfwGetVersionString(void); + +/*! @brief Returns and clears the last error for the calling thread. + * + * This function returns and clears the [error code](@ref errors) of the last + * error that occurred on the calling thread, and optionally a UTF-8 encoded + * human-readable description of it. If no error has occurred since the last + * call, it returns @ref GLFW_NO_ERROR (zero) and the description pointer is + * set to `NULL`. + * + * @param[in] description Where to store the error description pointer, or `NULL`. + * @return The last error code for the calling thread, or @ref GLFW_NO_ERROR + * (zero). + * + * @errors None. + * + * @pointer_lifetime The returned string is allocated and freed by GLFW. You + * should not free it yourself. It is guaranteed to be valid only until the + * next error occurs or the library is terminated. + * + * @remark This function may be called before @ref glfwInit. + * + * @thread_safety This function may be called from any thread. + * + * @sa @ref error_handling + * @sa @ref glfwSetErrorCallback + * + * @since Added in version 3.3. + * + * @ingroup init + */ +GLFWAPI int glfwGetError(const char** description); + +/*! @brief Sets the error callback. + * + * This function sets the error callback, which is called with an error code + * and a human-readable description each time a GLFW error occurs. + * + * The error code is set before the callback is called. Calling @ref + * glfwGetError from the error callback will return the same value as the error + * code argument. + * + * The error callback is called on the thread where the error occurred. If you + * are using GLFW from multiple threads, your error callback needs to be + * written accordingly. + * + * Because the description string may have been generated specifically for that + * error, it is not guaranteed to be valid after the callback has returned. If + * you wish to use it after the callback returns, you need to make a copy. + * + * Once set, the error callback remains set even after the library has been + * terminated. + * + * @param[in] callback The new callback, or `NULL` to remove the currently set + * callback. + * @return The previously set callback, or `NULL` if no callback was set. + * + * @callback_signature + * @code + * void callback_name(int error_code, const char* description) + * @endcode + * For more information about the callback parameters, see the + * [callback pointer type](@ref GLFWerrorfun). + * + * @errors None. + * + * @remark This function may be called before @ref glfwInit. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref error_handling + * @sa @ref glfwGetError + * + * @since Added in version 3.0. + * + * @ingroup init + */ +GLFWAPI GLFWerrorfun glfwSetErrorCallback(GLFWerrorfun callback); + +/*! @brief Returns the currently selected platform. + * + * This function returns the platform that was selected during initialization. The + * returned value will be one of `GLFW_PLATFORM_WIN32`, `GLFW_PLATFORM_COCOA`, + * `GLFW_PLATFORM_WAYLAND`, `GLFW_PLATFORM_X11` or `GLFW_PLATFORM_NULL`. + * + * @return The currently selected platform, or zero if an error occurred. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @thread_safety This function may be called from any thread. + * + * @sa @ref platform + * @sa @ref glfwPlatformSupported + * + * @since Added in version 3.4. + * + * @ingroup init + */ +GLFWAPI int glfwGetPlatform(void); + +/*! @brief Returns whether the library includes support for the specified platform. + * + * This function returns whether the library was compiled with support for the specified + * platform. The platform must be one of `GLFW_PLATFORM_WIN32`, `GLFW_PLATFORM_COCOA`, + * `GLFW_PLATFORM_WAYLAND`, `GLFW_PLATFORM_X11` or `GLFW_PLATFORM_NULL`. + * + * @param[in] platform The platform to query. + * @return `GLFW_TRUE` if the platform is supported, or `GLFW_FALSE` otherwise. + * + * @errors Possible errors include @ref GLFW_INVALID_ENUM. + * + * @remark This function may be called before @ref glfwInit. + * + * @thread_safety This function may be called from any thread. + * + * @sa @ref platform + * @sa @ref glfwGetPlatform + * + * @since Added in version 3.4. + * + * @ingroup init + */ +GLFWAPI int glfwPlatformSupported(int platform); + +/*! @brief Returns the currently connected monitors. + * + * This function returns an array of handles for all currently connected + * monitors. The primary monitor is always first in the returned array. If no + * monitors were found, this function returns `NULL`. + * + * @param[out] count Where to store the number of monitors in the returned + * array. This is set to zero if an error occurred. + * @return An array of monitor handles, or `NULL` if no monitors were found or + * if an [error](@ref error_handling) occurred. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @pointer_lifetime The returned array is allocated and freed by GLFW. You + * should not free it yourself. It is guaranteed to be valid only until the + * monitor configuration changes or the library is terminated. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref monitor_monitors + * @sa @ref monitor_event + * @sa @ref glfwGetPrimaryMonitor + * + * @since Added in version 3.0. + * + * @ingroup monitor + */ +GLFWAPI GLFWmonitor** glfwGetMonitors(int* count); + +/*! @brief Returns the primary monitor. + * + * This function returns the primary monitor. This is usually the monitor + * where elements like the task bar or global menu bar are located. + * + * @return The primary monitor, or `NULL` if no monitors were found or if an + * [error](@ref error_handling) occurred. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @thread_safety This function must only be called from the main thread. + * + * @remark The primary monitor is always first in the array returned by @ref + * glfwGetMonitors. + * + * @sa @ref monitor_monitors + * @sa @ref glfwGetMonitors + * + * @since Added in version 3.0. + * + * @ingroup monitor + */ +GLFWAPI GLFWmonitor* glfwGetPrimaryMonitor(void); + +/*! @brief Returns the position of the monitor's viewport on the virtual screen. + * + * This function returns the position, in screen coordinates, of the upper-left + * corner of the specified monitor. + * + * Any or all of the position arguments may be `NULL`. If an error occurs, all + * non-`NULL` position arguments will be set to zero. + * + * @param[in] monitor The monitor to query. + * @param[out] xpos Where to store the monitor x-coordinate, or `NULL`. + * @param[out] ypos Where to store the monitor y-coordinate, or `NULL`. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_PLATFORM_ERROR. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref monitor_properties + * + * @since Added in version 3.0. + * + * @ingroup monitor + */ +GLFWAPI void glfwGetMonitorPos(GLFWmonitor* monitor, int* xpos, int* ypos); + +/*! @brief Retrieves the work area of the monitor. + * + * This function returns the position, in screen coordinates, of the upper-left + * corner of the work area of the specified monitor along with the work area + * size in screen coordinates. The work area is defined as the area of the + * monitor not occluded by the window system task bar where present. If no + * task bar exists then the work area is the monitor resolution in screen + * coordinates. + * + * Any or all of the position and size arguments may be `NULL`. If an error + * occurs, all non-`NULL` position and size arguments will be set to zero. + * + * @param[in] monitor The monitor to query. + * @param[out] xpos Where to store the monitor x-coordinate, or `NULL`. + * @param[out] ypos Where to store the monitor y-coordinate, or `NULL`. + * @param[out] width Where to store the monitor width, or `NULL`. + * @param[out] height Where to store the monitor height, or `NULL`. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_PLATFORM_ERROR. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref monitor_workarea + * + * @since Added in version 3.3. + * + * @ingroup monitor + */ +GLFWAPI void glfwGetMonitorWorkarea(GLFWmonitor* monitor, int* xpos, int* ypos, int* width, int* height); + +/*! @brief Returns the physical size of the monitor. + * + * This function returns the size, in millimetres, of the display area of the + * specified monitor. + * + * Some platforms do not provide accurate monitor size information, either + * because the monitor + * [EDID](https://en.wikipedia.org/wiki/Extended_display_identification_data) + * data is incorrect or because the driver does not report it accurately. + * + * Any or all of the size arguments may be `NULL`. If an error occurs, all + * non-`NULL` size arguments will be set to zero. + * + * @param[in] monitor The monitor to query. + * @param[out] widthMM Where to store the width, in millimetres, of the + * monitor's display area, or `NULL`. + * @param[out] heightMM Where to store the height, in millimetres, of the + * monitor's display area, or `NULL`. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @remark @win32 On Windows 8 and earlier the physical size is calculated from + * the current resolution and system DPI instead of querying the monitor EDID data. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref monitor_properties + * + * @since Added in version 3.0. + * + * @ingroup monitor + */ +GLFWAPI void glfwGetMonitorPhysicalSize(GLFWmonitor* monitor, int* widthMM, int* heightMM); + +/*! @brief Retrieves the content scale for the specified monitor. + * + * This function retrieves the content scale for the specified monitor. The + * content scale is the ratio between the current DPI and the platform's + * default DPI. This is especially important for text and any UI elements. If + * the pixel dimensions of your UI scaled by this look appropriate on your + * machine then it should appear at a reasonable size on other machines + * regardless of their DPI and scaling settings. This relies on the system DPI + * and scaling settings being somewhat correct. + * + * The content scale may depend on both the monitor resolution and pixel + * density and on user settings. It may be very different from the raw DPI + * calculated from the physical size and current resolution. + * + * @param[in] monitor The monitor to query. + * @param[out] xscale Where to store the x-axis content scale, or `NULL`. + * @param[out] yscale Where to store the y-axis content scale, or `NULL`. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_PLATFORM_ERROR. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref monitor_scale + * @sa @ref glfwGetWindowContentScale + * + * @since Added in version 3.3. + * + * @ingroup monitor + */ +GLFWAPI void glfwGetMonitorContentScale(GLFWmonitor* monitor, float* xscale, float* yscale); + +/*! @brief Returns the name of the specified monitor. + * + * This function returns a human-readable name, encoded as UTF-8, of the + * specified monitor. The name typically reflects the make and model of the + * monitor and is not guaranteed to be unique among the connected monitors. + * + * @param[in] monitor The monitor to query. + * @return The UTF-8 encoded name of the monitor, or `NULL` if an + * [error](@ref error_handling) occurred. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @pointer_lifetime The returned string is allocated and freed by GLFW. You + * should not free it yourself. It is valid until the specified monitor is + * disconnected or the library is terminated. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref monitor_properties + * + * @since Added in version 3.0. + * + * @ingroup monitor + */ +GLFWAPI const char* glfwGetMonitorName(GLFWmonitor* monitor); + +/*! @brief Sets the user pointer of the specified monitor. + * + * This function sets the user-defined pointer of the specified monitor. The + * current value is retained until the monitor is disconnected. The initial + * value is `NULL`. + * + * This function may be called from the monitor callback, even for a monitor + * that is being disconnected. + * + * @param[in] monitor The monitor whose pointer to set. + * @param[in] pointer The new value. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @thread_safety This function may be called from any thread. Access is not + * synchronized. + * + * @sa @ref monitor_userptr + * @sa @ref glfwGetMonitorUserPointer + * + * @since Added in version 3.3. + * + * @ingroup monitor + */ +GLFWAPI void glfwSetMonitorUserPointer(GLFWmonitor* monitor, void* pointer); + +/*! @brief Returns the user pointer of the specified monitor. + * + * This function returns the current value of the user-defined pointer of the + * specified monitor. The initial value is `NULL`. + * + * This function may be called from the monitor callback, even for a monitor + * that is being disconnected. + * + * @param[in] monitor The monitor whose pointer to return. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @thread_safety This function may be called from any thread. Access is not + * synchronized. + * + * @sa @ref monitor_userptr + * @sa @ref glfwSetMonitorUserPointer + * + * @since Added in version 3.3. + * + * @ingroup monitor + */ +GLFWAPI void* glfwGetMonitorUserPointer(GLFWmonitor* monitor); + +/*! @brief Sets the monitor configuration callback. + * + * This function sets the monitor configuration callback, or removes the + * currently set callback. This is called when a monitor is connected to or + * disconnected from the system. + * + * @param[in] callback The new callback, or `NULL` to remove the currently set + * callback. + * @return The previously set callback, or `NULL` if no callback was set or the + * library had not been [initialized](@ref intro_init). + * + * @callback_signature + * @code + * void function_name(GLFWmonitor* monitor, int event) + * @endcode + * For more information about the callback parameters, see the + * [function pointer type](@ref GLFWmonitorfun). + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref monitor_event + * + * @since Added in version 3.0. + * + * @ingroup monitor + */ +GLFWAPI GLFWmonitorfun glfwSetMonitorCallback(GLFWmonitorfun callback); + +/*! @brief Returns the available video modes for the specified monitor. + * + * This function returns an array of all video modes supported by the specified + * monitor. The returned array is sorted in ascending order, first by color + * bit depth (the sum of all channel depths), then by resolution area (the + * product of width and height), then resolution width and finally by refresh + * rate. + * + * @param[in] monitor The monitor to query. + * @param[out] count Where to store the number of video modes in the returned + * array. This is set to zero if an error occurred. + * @return An array of video modes, or `NULL` if an + * [error](@ref error_handling) occurred. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_PLATFORM_ERROR. + * + * @pointer_lifetime The returned array is allocated and freed by GLFW. You + * should not free it yourself. It is valid until the specified monitor is + * disconnected, this function is called again for that monitor or the library + * is terminated. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref monitor_modes + * @sa @ref glfwGetVideoMode + * + * @since Added in version 1.0. + * @glfw3 Changed to return an array of modes for a specific monitor. + * + * @ingroup monitor + */ +GLFWAPI const GLFWvidmode* glfwGetVideoModes(GLFWmonitor* monitor, int* count); + +/*! @brief Returns the current mode of the specified monitor. + * + * This function returns the current video mode of the specified monitor. If + * you have created a full screen window for that monitor, the return value + * will depend on whether that window is iconified. + * + * @param[in] monitor The monitor to query. + * @return The current mode of the monitor, or `NULL` if an + * [error](@ref error_handling) occurred. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_PLATFORM_ERROR. + * + * @pointer_lifetime The returned array is allocated and freed by GLFW. You + * should not free it yourself. It is valid until the specified monitor is + * disconnected or the library is terminated. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref monitor_modes + * @sa @ref glfwGetVideoModes + * + * @since Added in version 3.0. Replaces `glfwGetDesktopMode`. + * + * @ingroup monitor + */ +GLFWAPI const GLFWvidmode* glfwGetVideoMode(GLFWmonitor* monitor); + +/*! @brief Generates a gamma ramp and sets it for the specified monitor. + * + * This function generates an appropriately sized gamma ramp from the specified + * exponent and then calls @ref glfwSetGammaRamp with it. The value must be + * a finite number greater than zero. + * + * The software controlled gamma ramp is applied _in addition_ to the hardware + * gamma correction, which today is usually an approximation of sRGB gamma. + * This means that setting a perfectly linear ramp, or gamma 1.0, will produce + * the default (usually sRGB-like) behavior. + * + * For gamma correct rendering with OpenGL or OpenGL ES, see the @ref + * GLFW_SRGB_CAPABLE hint. + * + * @param[in] monitor The monitor whose gamma ramp to set. + * @param[in] gamma The desired exponent. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref + * GLFW_INVALID_VALUE and @ref GLFW_PLATFORM_ERROR. + * + * @remark @wayland Gamma handling is a privileged protocol, this function + * will thus never be implemented and emits @ref GLFW_PLATFORM_ERROR. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref monitor_gamma + * + * @since Added in version 3.0. + * + * @ingroup monitor + */ +GLFWAPI void glfwSetGamma(GLFWmonitor* monitor, float gamma); + +/*! @brief Returns the current gamma ramp for the specified monitor. + * + * This function returns the current gamma ramp of the specified monitor. + * + * @param[in] monitor The monitor to query. + * @return The current gamma ramp, or `NULL` if an + * [error](@ref error_handling) occurred. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_PLATFORM_ERROR. + * + * @remark @wayland Gamma handling is a privileged protocol, this function + * will thus never be implemented and emits @ref GLFW_PLATFORM_ERROR while + * returning `NULL`. + * + * @pointer_lifetime The returned structure and its arrays are allocated and + * freed by GLFW. You should not free them yourself. They are valid until the + * specified monitor is disconnected, this function is called again for that + * monitor or the library is terminated. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref monitor_gamma + * + * @since Added in version 3.0. + * + * @ingroup monitor + */ +GLFWAPI const GLFWgammaramp* glfwGetGammaRamp(GLFWmonitor* monitor); + +/*! @brief Sets the current gamma ramp for the specified monitor. + * + * This function sets the current gamma ramp for the specified monitor. The + * original gamma ramp for that monitor is saved by GLFW the first time this + * function is called and is restored by @ref glfwTerminate. + * + * The software controlled gamma ramp is applied _in addition_ to the hardware + * gamma correction, which today is usually an approximation of sRGB gamma. + * This means that setting a perfectly linear ramp, or gamma 1.0, will produce + * the default (usually sRGB-like) behavior. + * + * For gamma correct rendering with OpenGL or OpenGL ES, see the @ref + * GLFW_SRGB_CAPABLE hint. + * + * @param[in] monitor The monitor whose gamma ramp to set. + * @param[in] ramp The gamma ramp to use. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_PLATFORM_ERROR. + * + * @remark The size of the specified gamma ramp should match the size of the + * current ramp for that monitor. + * + * @remark @win32 The gamma ramp size must be 256. + * + * @remark @wayland Gamma handling is a privileged protocol, this function + * will thus never be implemented and emits @ref GLFW_PLATFORM_ERROR. + * + * @pointer_lifetime The specified gamma ramp is copied before this function + * returns. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref monitor_gamma + * + * @since Added in version 3.0. + * + * @ingroup monitor + */ +GLFWAPI void glfwSetGammaRamp(GLFWmonitor* monitor, const GLFWgammaramp* ramp); + +/*! @brief Resets all window hints to their default values. + * + * This function resets all window hints to their + * [default values](@ref window_hints_values). + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref window_hints + * @sa @ref glfwWindowHint + * @sa @ref glfwWindowHintString + * + * @since Added in version 3.0. + * + * @ingroup window + */ +GLFWAPI void glfwDefaultWindowHints(void); + +/*! @brief Sets the specified window hint to the desired value. + * + * This function sets hints for the next call to @ref glfwCreateWindow. The + * hints, once set, retain their values until changed by a call to this + * function or @ref glfwDefaultWindowHints, or until the library is terminated. + * + * Only integer value hints can be set with this function. String value hints + * are set with @ref glfwWindowHintString. + * + * This function does not check whether the specified hint values are valid. + * If you set hints to invalid values this will instead be reported by the next + * call to @ref glfwCreateWindow. + * + * Some hints are platform specific. These may be set on any platform but they + * will only affect their specific platform. Other platforms will ignore them. + * Setting these hints requires no platform specific headers or functions. + * + * @param[in] hint The [window hint](@ref window_hints) to set. + * @param[in] value The new value of the window hint. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_INVALID_ENUM. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref window_hints + * @sa @ref glfwWindowHintString + * @sa @ref glfwDefaultWindowHints + * + * @since Added in version 3.0. Replaces `glfwOpenWindowHint`. + * + * @ingroup window + */ +GLFWAPI void glfwWindowHint(int hint, int value); + +/*! @brief Sets the specified window hint to the desired value. + * + * This function sets hints for the next call to @ref glfwCreateWindow. The + * hints, once set, retain their values until changed by a call to this + * function or @ref glfwDefaultWindowHints, or until the library is terminated. + * + * Only string type hints can be set with this function. Integer value hints + * are set with @ref glfwWindowHint. + * + * This function does not check whether the specified hint values are valid. + * If you set hints to invalid values this will instead be reported by the next + * call to @ref glfwCreateWindow. + * + * Some hints are platform specific. These may be set on any platform but they + * will only affect their specific platform. Other platforms will ignore them. + * Setting these hints requires no platform specific headers or functions. + * + * @param[in] hint The [window hint](@ref window_hints) to set. + * @param[in] value The new value of the window hint. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_INVALID_ENUM. + * + * @pointer_lifetime The specified string is copied before this function + * returns. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref window_hints + * @sa @ref glfwWindowHint + * @sa @ref glfwDefaultWindowHints + * + * @since Added in version 3.3. + * + * @ingroup window + */ +GLFWAPI void glfwWindowHintString(int hint, const char* value); + +/*! @brief Creates a window and its associated context. + * + * This function creates a window and its associated OpenGL or OpenGL ES + * context. Most of the options controlling how the window and its context + * should be created are specified with [window hints](@ref window_hints). + * + * Successful creation does not change which context is current. Before you + * can use the newly created context, you need to + * [make it current](@ref context_current). For information about the `share` + * parameter, see @ref context_sharing. + * + * The created window, framebuffer and context may differ from what you + * requested, as not all parameters and hints are + * [hard constraints](@ref window_hints_hard). This includes the size of the + * window, especially for full screen windows. To query the actual attributes + * of the created window, framebuffer and context, see @ref + * glfwGetWindowAttrib, @ref glfwGetWindowSize and @ref glfwGetFramebufferSize. + * + * To create a full screen window, you need to specify the monitor the window + * will cover. If no monitor is specified, the window will be windowed mode. + * Unless you have a way for the user to choose a specific monitor, it is + * recommended that you pick the primary monitor. For more information on how + * to query connected monitors, see @ref monitor_monitors. + * + * For full screen windows, the specified size becomes the resolution of the + * window's _desired video mode_. As long as a full screen window is not + * iconified, the supported video mode most closely matching the desired video + * mode is set for the specified monitor. For more information about full + * screen windows, including the creation of so called _windowed full screen_ + * or _borderless full screen_ windows, see @ref window_windowed_full_screen. + * + * Once you have created the window, you can switch it between windowed and + * full screen mode with @ref glfwSetWindowMonitor. This will not affect its + * OpenGL or OpenGL ES context. + * + * By default, newly created windows use the placement recommended by the + * window system. To create the window at a specific position, make it + * initially invisible using the [GLFW_VISIBLE](@ref GLFW_VISIBLE_hint) window + * hint, set its [position](@ref window_pos) and then [show](@ref window_hide) + * it. + * + * As long as at least one full screen window is not iconified, the screensaver + * is prohibited from starting. + * + * Window systems put limits on window sizes. Very large or very small window + * dimensions may be overridden by the window system on creation. Check the + * actual [size](@ref window_size) after creation. + * + * The [swap interval](@ref buffer_swap) is not set during window creation and + * the initial value may vary depending on driver settings and defaults. + * + * @param[in] width The desired width, in screen coordinates, of the window. + * This must be greater than zero. + * @param[in] height The desired height, in screen coordinates, of the window. + * This must be greater than zero. + * @param[in] title The initial, UTF-8 encoded window title. + * @param[in] monitor The monitor to use for full screen mode, or `NULL` for + * windowed mode. + * @param[in] share The window whose context to share resources with, or `NULL` + * to not share resources. + * @return The handle of the created window, or `NULL` if an + * [error](@ref error_handling) occurred. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref + * GLFW_INVALID_ENUM, @ref GLFW_INVALID_VALUE, @ref GLFW_API_UNAVAILABLE, @ref + * GLFW_VERSION_UNAVAILABLE, @ref GLFW_FORMAT_UNAVAILABLE and @ref + * GLFW_PLATFORM_ERROR. + * + * @remark @win32 Window creation will fail if the Microsoft GDI software + * OpenGL implementation is the only one available. + * + * @remark @win32 If the executable has an icon resource named `GLFW_ICON,` it + * will be set as the initial icon for the window. If no such icon is present, + * the `IDI_APPLICATION` icon will be used instead. To set a different icon, + * see @ref glfwSetWindowIcon. + * + * @remark @win32 The context to share resources with must not be current on + * any other thread. + * + * @remark @macos The OS only supports core profile contexts for OpenGL + * versions 3.2 and later. Before creating an OpenGL context of version 3.2 or + * later you must set the [GLFW_OPENGL_PROFILE](@ref GLFW_OPENGL_PROFILE_hint) + * hint accordingly. OpenGL 3.0 and 3.1 contexts are not supported at all + * on macOS. + * + * @remark @macos The GLFW window has no icon, as it is not a document + * window, but the dock icon will be the same as the application bundle's icon. + * For more information on bundles, see the + * [Bundle Programming Guide](https://developer.apple.com/library/mac/documentation/CoreFoundation/Conceptual/CFBundles/) + * in the Mac Developer Library. + * + * @remark @macos On OS X 10.10 and later the window frame will not be rendered + * at full resolution on Retina displays unless the + * [GLFW_COCOA_RETINA_FRAMEBUFFER](@ref GLFW_COCOA_RETINA_FRAMEBUFFER_hint) + * hint is `GLFW_TRUE` and the `NSHighResolutionCapable` key is enabled in the + * application bundle's `Info.plist`. For more information, see + * [High Resolution Guidelines for OS X](https://developer.apple.com/library/mac/documentation/GraphicsAnimation/Conceptual/HighResolutionOSX/Explained/Explained.html) + * in the Mac Developer Library. The GLFW test and example programs use + * a custom `Info.plist` template for this, which can be found as + * `CMake/Info.plist.in` in the source tree. + * + * @remark @macos When activating frame autosaving with + * [GLFW_COCOA_FRAME_NAME](@ref GLFW_COCOA_FRAME_NAME_hint), the specified + * window size and position may be overridden by previously saved values. + * + * @remark @x11 Some window managers will not respect the placement of + * initially hidden windows. + * + * @remark @x11 Due to the asynchronous nature of X11, it may take a moment for + * a window to reach its requested state. This means you may not be able to + * query the final size, position or other attributes directly after window + * creation. + * + * @remark @x11 The class part of the `WM_CLASS` window property will by + * default be set to the window title passed to this function. The instance + * part will use the contents of the `RESOURCE_NAME` environment variable, if + * present and not empty, or fall back to the window title. Set the + * [GLFW_X11_CLASS_NAME](@ref GLFW_X11_CLASS_NAME_hint) and + * [GLFW_X11_INSTANCE_NAME](@ref GLFW_X11_INSTANCE_NAME_hint) window hints to + * override this. + * + * @remark @wayland Compositors should implement the xdg-decoration protocol + * for GLFW to decorate the window properly. If this protocol isn't + * supported, or if the compositor prefers client-side decorations, a very + * simple fallback frame will be drawn using the wp_viewporter protocol. A + * compositor can still emit close, maximize or fullscreen events, using for + * instance a keybind mechanism. If neither of these protocols is supported, + * the window won't be decorated. + * + * @remark @wayland A full screen window will not attempt to change the mode, + * no matter what the requested size or refresh rate. + * + * @remark @wayland Screensaver inhibition requires the idle-inhibit protocol + * to be implemented in the user's compositor. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref window_creation + * @sa @ref glfwDestroyWindow + * + * @since Added in version 3.0. Replaces `glfwOpenWindow`. + * + * @ingroup window + */ +GLFWAPI GLFWwindow* glfwCreateWindow(int width, int height, const char* title, GLFWmonitor* monitor, GLFWwindow* share); + +/*! @brief Destroys the specified window and its context. + * + * This function destroys the specified window and its context. On calling + * this function, no further callbacks will be called for that window. + * + * If the context of the specified window is current on the main thread, it is + * detached before being destroyed. + * + * @param[in] window The window to destroy. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_PLATFORM_ERROR. + * + * @note The context of the specified window must not be current on any other + * thread when this function is called. + * + * @reentrancy This function must not be called from a callback. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref window_creation + * @sa @ref glfwCreateWindow + * + * @since Added in version 3.0. Replaces `glfwCloseWindow`. + * + * @ingroup window + */ +GLFWAPI void glfwDestroyWindow(GLFWwindow* window); + +/*! @brief Checks the close flag of the specified window. + * + * This function returns the value of the close flag of the specified window. + * + * @param[in] window The window to query. + * @return The value of the close flag. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @thread_safety This function may be called from any thread. Access is not + * synchronized. + * + * @sa @ref window_close + * + * @since Added in version 3.0. + * + * @ingroup window + */ +GLFWAPI int glfwWindowShouldClose(GLFWwindow* window); + +/*! @brief Sets the close flag of the specified window. + * + * This function sets the value of the close flag of the specified window. + * This can be used to override the user's attempt to close the window, or + * to signal that it should be closed. + * + * @param[in] window The window whose flag to change. + * @param[in] value The new value. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @thread_safety This function may be called from any thread. Access is not + * synchronized. + * + * @sa @ref window_close + * + * @since Added in version 3.0. + * + * @ingroup window + */ +GLFWAPI void glfwSetWindowShouldClose(GLFWwindow* window, int value); + +/*! @brief Sets the title of the specified window. + * + * This function sets the window title, encoded as UTF-8, of the specified + * window. + * + * @param[in] window The window whose title to change. + * @param[in] title The UTF-8 encoded window title. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_PLATFORM_ERROR. + * + * @remark @macos The window title will not be updated until the next time you + * process events. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref window_title + * + * @since Added in version 1.0. + * @glfw3 Added window handle parameter. + * + * @ingroup window + */ +GLFWAPI void glfwSetWindowTitle(GLFWwindow* window, const char* title); + +/*! @brief Sets the icon for the specified window. + * + * This function sets the icon of the specified window. If passed an array of + * candidate images, those of or closest to the sizes desired by the system are + * selected. If no images are specified, the window reverts to its default + * icon. + * + * The pixels are 32-bit, little-endian, non-premultiplied RGBA, i.e. eight + * bits per channel with the red channel first. They are arranged canonically + * as packed sequential rows, starting from the top-left corner. + * + * The desired image sizes varies depending on platform and system settings. + * The selected images will be rescaled as needed. Good sizes include 16x16, + * 32x32 and 48x48. + * + * @param[in] window The window whose icon to set. + * @param[in] count The number of images in the specified array, or zero to + * revert to the default window icon. + * @param[in] images The images to create the icon from. This is ignored if + * count is zero. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref + * GLFW_PLATFORM_ERROR and @ref GLFW_FEATURE_UNAVAILABLE (see remarks). + * + * @pointer_lifetime The specified image data is copied before this function + * returns. + * + * @remark @macos Regular windows do not have icons on macOS. This function + * will emit @ref GLFW_FEATURE_UNAVAILABLE. The dock icon will be the same as + * the application bundle's icon. For more information on bundles, see the + * [Bundle Programming Guide](https://developer.apple.com/library/mac/documentation/CoreFoundation/Conceptual/CFBundles/) + * in the Mac Developer Library. + * + * @remark @wayland There is no existing protocol to change an icon, the + * window will thus inherit the one defined in the application's desktop file. + * This function will emit @ref GLFW_FEATURE_UNAVAILABLE. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref window_icon + * + * @since Added in version 3.2. + * + * @ingroup window + */ +GLFWAPI void glfwSetWindowIcon(GLFWwindow* window, int count, const GLFWimage* images); + +/*! @brief Retrieves the position of the content area of the specified window. + * + * This function retrieves the position, in screen coordinates, of the + * upper-left corner of the content area of the specified window. + * + * Any or all of the position arguments may be `NULL`. If an error occurs, all + * non-`NULL` position arguments will be set to zero. + * + * @param[in] window The window to query. + * @param[out] xpos Where to store the x-coordinate of the upper-left corner of + * the content area, or `NULL`. + * @param[out] ypos Where to store the y-coordinate of the upper-left corner of + * the content area, or `NULL`. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref + * GLFW_PLATFORM_ERROR and @ref GLFW_FEATURE_UNAVAILABLE (see remarks). + * + * @remark @wayland There is no way for an application to retrieve the global + * position of its windows. This function will emit @ref + * GLFW_FEATURE_UNAVAILABLE. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref window_pos + * @sa @ref glfwSetWindowPos + * + * @since Added in version 3.0. + * + * @ingroup window + */ +GLFWAPI void glfwGetWindowPos(GLFWwindow* window, int* xpos, int* ypos); + +/*! @brief Sets the position of the content area of the specified window. + * + * This function sets the position, in screen coordinates, of the upper-left + * corner of the content area of the specified windowed mode window. If the + * window is a full screen window, this function does nothing. + * + * __Do not use this function__ to move an already visible window unless you + * have very good reasons for doing so, as it will confuse and annoy the user. + * + * The window manager may put limits on what positions are allowed. GLFW + * cannot and should not override these limits. + * + * @param[in] window The window to query. + * @param[in] xpos The x-coordinate of the upper-left corner of the content area. + * @param[in] ypos The y-coordinate of the upper-left corner of the content area. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref + * GLFW_PLATFORM_ERROR and @ref GLFW_FEATURE_UNAVAILABLE (see remarks). + * + * @remark @wayland There is no way for an application to set the global + * position of its windows. This function will emit @ref + * GLFW_FEATURE_UNAVAILABLE. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref window_pos + * @sa @ref glfwGetWindowPos + * + * @since Added in version 1.0. + * @glfw3 Added window handle parameter. + * + * @ingroup window + */ +GLFWAPI void glfwSetWindowPos(GLFWwindow* window, int xpos, int ypos); + +/*! @brief Retrieves the size of the content area of the specified window. + * + * This function retrieves the size, in screen coordinates, of the content area + * of the specified window. If you wish to retrieve the size of the + * framebuffer of the window in pixels, see @ref glfwGetFramebufferSize. + * + * Any or all of the size arguments may be `NULL`. If an error occurs, all + * non-`NULL` size arguments will be set to zero. + * + * @param[in] window The window whose size to retrieve. + * @param[out] width Where to store the width, in screen coordinates, of the + * content area, or `NULL`. + * @param[out] height Where to store the height, in screen coordinates, of the + * content area, or `NULL`. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_PLATFORM_ERROR. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref window_size + * @sa @ref glfwSetWindowSize + * + * @since Added in version 1.0. + * @glfw3 Added window handle parameter. + * + * @ingroup window + */ +GLFWAPI void glfwGetWindowSize(GLFWwindow* window, int* width, int* height); + +/*! @brief Sets the size limits of the specified window. + * + * This function sets the size limits of the content area of the specified + * window. If the window is full screen, the size limits only take effect + * once it is made windowed. If the window is not resizable, this function + * does nothing. + * + * The size limits are applied immediately to a windowed mode window and may + * cause it to be resized. + * + * The maximum dimensions must be greater than or equal to the minimum + * dimensions and all must be greater than or equal to zero. + * + * @param[in] window The window to set limits for. + * @param[in] minwidth The minimum width, in screen coordinates, of the content + * area, or `GLFW_DONT_CARE`. + * @param[in] minheight The minimum height, in screen coordinates, of the + * content area, or `GLFW_DONT_CARE`. + * @param[in] maxwidth The maximum width, in screen coordinates, of the content + * area, or `GLFW_DONT_CARE`. + * @param[in] maxheight The maximum height, in screen coordinates, of the + * content area, or `GLFW_DONT_CARE`. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref + * GLFW_INVALID_VALUE and @ref GLFW_PLATFORM_ERROR. + * + * @remark If you set size limits and an aspect ratio that conflict, the + * results are undefined. + * + * @remark @wayland The size limits will not be applied until the window is + * actually resized, either by the user or by the compositor. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref window_sizelimits + * @sa @ref glfwSetWindowAspectRatio + * + * @since Added in version 3.2. + * + * @ingroup window + */ +GLFWAPI void glfwSetWindowSizeLimits(GLFWwindow* window, int minwidth, int minheight, int maxwidth, int maxheight); + +/*! @brief Sets the aspect ratio of the specified window. + * + * This function sets the required aspect ratio of the content area of the + * specified window. If the window is full screen, the aspect ratio only takes + * effect once it is made windowed. If the window is not resizable, this + * function does nothing. + * + * The aspect ratio is specified as a numerator and a denominator and both + * values must be greater than zero. For example, the common 16:9 aspect ratio + * is specified as 16 and 9, respectively. + * + * If the numerator and denominator is set to `GLFW_DONT_CARE` then the aspect + * ratio limit is disabled. + * + * The aspect ratio is applied immediately to a windowed mode window and may + * cause it to be resized. + * + * @param[in] window The window to set limits for. + * @param[in] numer The numerator of the desired aspect ratio, or + * `GLFW_DONT_CARE`. + * @param[in] denom The denominator of the desired aspect ratio, or + * `GLFW_DONT_CARE`. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref + * GLFW_INVALID_VALUE and @ref GLFW_PLATFORM_ERROR. + * + * @remark If you set size limits and an aspect ratio that conflict, the + * results are undefined. + * + * @remark @wayland The aspect ratio will not be applied until the window is + * actually resized, either by the user or by the compositor. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref window_sizelimits + * @sa @ref glfwSetWindowSizeLimits + * + * @since Added in version 3.2. + * + * @ingroup window + */ +GLFWAPI void glfwSetWindowAspectRatio(GLFWwindow* window, int numer, int denom); + +/*! @brief Sets the size of the content area of the specified window. + * + * This function sets the size, in screen coordinates, of the content area of + * the specified window. + * + * For full screen windows, this function updates the resolution of its desired + * video mode and switches to the video mode closest to it, without affecting + * the window's context. As the context is unaffected, the bit depths of the + * framebuffer remain unchanged. + * + * If you wish to update the refresh rate of the desired video mode in addition + * to its resolution, see @ref glfwSetWindowMonitor. + * + * The window manager may put limits on what sizes are allowed. GLFW cannot + * and should not override these limits. + * + * @param[in] window The window to resize. + * @param[in] width The desired width, in screen coordinates, of the window + * content area. + * @param[in] height The desired height, in screen coordinates, of the window + * content area. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_PLATFORM_ERROR. + * + * @remark @wayland A full screen window will not attempt to change the mode, + * no matter what the requested size. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref window_size + * @sa @ref glfwGetWindowSize + * @sa @ref glfwSetWindowMonitor + * + * @since Added in version 1.0. + * @glfw3 Added window handle parameter. + * + * @ingroup window + */ +GLFWAPI void glfwSetWindowSize(GLFWwindow* window, int width, int height); + +/*! @brief Retrieves the size of the framebuffer of the specified window. + * + * This function retrieves the size, in pixels, of the framebuffer of the + * specified window. If you wish to retrieve the size of the window in screen + * coordinates, see @ref glfwGetWindowSize. + * + * Any or all of the size arguments may be `NULL`. If an error occurs, all + * non-`NULL` size arguments will be set to zero. + * + * @param[in] window The window whose framebuffer to query. + * @param[out] width Where to store the width, in pixels, of the framebuffer, + * or `NULL`. + * @param[out] height Where to store the height, in pixels, of the framebuffer, + * or `NULL`. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_PLATFORM_ERROR. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref window_fbsize + * @sa @ref glfwSetFramebufferSizeCallback + * + * @since Added in version 3.0. + * + * @ingroup window + */ +GLFWAPI void glfwGetFramebufferSize(GLFWwindow* window, int* width, int* height); + +/*! @brief Retrieves the size of the frame of the window. + * + * This function retrieves the size, in screen coordinates, of each edge of the + * frame of the specified window. This size includes the title bar, if the + * window has one. The size of the frame may vary depending on the + * [window-related hints](@ref window_hints_wnd) used to create it. + * + * Because this function retrieves the size of each window frame edge and not + * the offset along a particular coordinate axis, the retrieved values will + * always be zero or positive. + * + * Any or all of the size arguments may be `NULL`. If an error occurs, all + * non-`NULL` size arguments will be set to zero. + * + * @param[in] window The window whose frame size to query. + * @param[out] left Where to store the size, in screen coordinates, of the left + * edge of the window frame, or `NULL`. + * @param[out] top Where to store the size, in screen coordinates, of the top + * edge of the window frame, or `NULL`. + * @param[out] right Where to store the size, in screen coordinates, of the + * right edge of the window frame, or `NULL`. + * @param[out] bottom Where to store the size, in screen coordinates, of the + * bottom edge of the window frame, or `NULL`. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_PLATFORM_ERROR. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref window_size + * + * @since Added in version 3.1. + * + * @ingroup window + */ +GLFWAPI void glfwGetWindowFrameSize(GLFWwindow* window, int* left, int* top, int* right, int* bottom); + +/*! @brief Retrieves the content scale for the specified window. + * + * This function retrieves the content scale for the specified window. The + * content scale is the ratio between the current DPI and the platform's + * default DPI. This is especially important for text and any UI elements. If + * the pixel dimensions of your UI scaled by this look appropriate on your + * machine then it should appear at a reasonable size on other machines + * regardless of their DPI and scaling settings. This relies on the system DPI + * and scaling settings being somewhat correct. + * + * On platforms where each monitors can have its own content scale, the window + * content scale will depend on which monitor the system considers the window + * to be on. + * + * @param[in] window The window to query. + * @param[out] xscale Where to store the x-axis content scale, or `NULL`. + * @param[out] yscale Where to store the y-axis content scale, or `NULL`. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_PLATFORM_ERROR. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref window_scale + * @sa @ref glfwSetWindowContentScaleCallback + * @sa @ref glfwGetMonitorContentScale + * + * @since Added in version 3.3. + * + * @ingroup window + */ +GLFWAPI void glfwGetWindowContentScale(GLFWwindow* window, float* xscale, float* yscale); + +/*! @brief Returns the opacity of the whole window. + * + * This function returns the opacity of the window, including any decorations. + * + * The opacity (or alpha) value is a positive finite number between zero and + * one, where zero is fully transparent and one is fully opaque. If the system + * does not support whole window transparency, this function always returns one. + * + * The initial opacity value for newly created windows is one. + * + * @param[in] window The window to query. + * @return The opacity value of the specified window. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_PLATFORM_ERROR. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref window_transparency + * @sa @ref glfwSetWindowOpacity + * + * @since Added in version 3.3. + * + * @ingroup window + */ +GLFWAPI float glfwGetWindowOpacity(GLFWwindow* window); + +/*! @brief Sets the opacity of the whole window. + * + * This function sets the opacity of the window, including any decorations. + * + * The opacity (or alpha) value is a positive finite number between zero and + * one, where zero is fully transparent and one is fully opaque. + * + * The initial opacity value for newly created windows is one. + * + * A window created with framebuffer transparency may not use whole window + * transparency. The results of doing this are undefined. + * + * @param[in] window The window to set the opacity for. + * @param[in] opacity The desired opacity of the specified window. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref + * GLFW_PLATFORM_ERROR and @ref GLFW_FEATURE_UNAVAILABLE (see remarks). + * + * @remark @wayland There is no way to set an opacity factor for a window. + * This function will emit @ref GLFW_FEATURE_UNAVAILABLE. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref window_transparency + * @sa @ref glfwGetWindowOpacity + * + * @since Added in version 3.3. + * + * @ingroup window + */ +GLFWAPI void glfwSetWindowOpacity(GLFWwindow* window, float opacity); + +/*! @brief Iconifies the specified window. + * + * This function iconifies (minimizes) the specified window if it was + * previously restored. If the window is already iconified, this function does + * nothing. + * + * If the specified window is a full screen window, the original monitor + * resolution is restored until the window is restored. + * + * @param[in] window The window to iconify. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_PLATFORM_ERROR. + * + * @remark @wayland Once a window is iconified, @ref glfwRestoreWindow won’t + * be able to restore it. This is a design decision of the xdg-shell + * protocol. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref window_iconify + * @sa @ref glfwRestoreWindow + * @sa @ref glfwMaximizeWindow + * + * @since Added in version 2.1. + * @glfw3 Added window handle parameter. + * + * @ingroup window + */ +GLFWAPI void glfwIconifyWindow(GLFWwindow* window); + +/*! @brief Restores the specified window. + * + * This function restores the specified window if it was previously iconified + * (minimized) or maximized. If the window is already restored, this function + * does nothing. + * + * If the specified window is a full screen window, the resolution chosen for + * the window is restored on the selected monitor. + * + * @param[in] window The window to restore. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_PLATFORM_ERROR. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref window_iconify + * @sa @ref glfwIconifyWindow + * @sa @ref glfwMaximizeWindow + * + * @since Added in version 2.1. + * @glfw3 Added window handle parameter. + * + * @ingroup window + */ +GLFWAPI void glfwRestoreWindow(GLFWwindow* window); + +/*! @brief Maximizes the specified window. + * + * This function maximizes the specified window if it was previously not + * maximized. If the window is already maximized, this function does nothing. + * + * If the specified window is a full screen window, this function does nothing. + * + * @param[in] window The window to maximize. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_PLATFORM_ERROR. + * + * @par Thread Safety + * This function may only be called from the main thread. + * + * @sa @ref window_iconify + * @sa @ref glfwIconifyWindow + * @sa @ref glfwRestoreWindow + * + * @since Added in GLFW 3.2. + * + * @ingroup window + */ +GLFWAPI void glfwMaximizeWindow(GLFWwindow* window); + +/*! @brief Makes the specified window visible. + * + * This function makes the specified window visible if it was previously + * hidden. If the window is already visible or is in full screen mode, this + * function does nothing. + * + * By default, windowed mode windows are focused when shown + * Set the [GLFW_FOCUS_ON_SHOW](@ref GLFW_FOCUS_ON_SHOW_hint) window hint + * to change this behavior for all newly created windows, or change the + * behavior for an existing window with @ref glfwSetWindowAttrib. + * + * @param[in] window The window to make visible. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_PLATFORM_ERROR. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref window_hide + * @sa @ref glfwHideWindow + * + * @since Added in version 3.0. + * + * @ingroup window + */ +GLFWAPI void glfwShowWindow(GLFWwindow* window); + +/*! @brief Hides the specified window. + * + * This function hides the specified window if it was previously visible. If + * the window is already hidden or is in full screen mode, this function does + * nothing. + * + * @param[in] window The window to hide. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_PLATFORM_ERROR. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref window_hide + * @sa @ref glfwShowWindow + * + * @since Added in version 3.0. + * + * @ingroup window + */ +GLFWAPI void glfwHideWindow(GLFWwindow* window); + +/*! @brief Brings the specified window to front and sets input focus. + * + * This function brings the specified window to front and sets input focus. + * The window should already be visible and not iconified. + * + * By default, both windowed and full screen mode windows are focused when + * initially created. Set the [GLFW_FOCUSED](@ref GLFW_FOCUSED_hint) to + * disable this behavior. + * + * Also by default, windowed mode windows are focused when shown + * with @ref glfwShowWindow. Set the + * [GLFW_FOCUS_ON_SHOW](@ref GLFW_FOCUS_ON_SHOW_hint) to disable this behavior. + * + * __Do not use this function__ to steal focus from other applications unless + * you are certain that is what the user wants. Focus stealing can be + * extremely disruptive. + * + * For a less disruptive way of getting the user's attention, see + * [attention requests](@ref window_attention). + * + * @param[in] window The window to give input focus. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref + * GLFW_PLATFORM_ERROR and @ref GLFW_FEATURE_UNAVAILABLE (see remarks). + * + * @remark @wayland It is not possible for an application to set the input + * focus. This function will emit @ref GLFW_FEATURE_UNAVAILABLE. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref window_focus + * @sa @ref window_attention + * + * @since Added in version 3.2. + * + * @ingroup window + */ +GLFWAPI void glfwFocusWindow(GLFWwindow* window); + +/*! @brief Requests user attention to the specified window. + * + * This function requests user attention to the specified window. On + * platforms where this is not supported, attention is requested to the + * application as a whole. + * + * Once the user has given attention, usually by focusing the window or + * application, the system will end the request automatically. + * + * @param[in] window The window to request attention to. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_PLATFORM_ERROR. + * + * @remark @macos Attention is requested to the application as a whole, not the + * specific window. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref window_attention + * + * @since Added in version 3.3. + * + * @ingroup window + */ +GLFWAPI void glfwRequestWindowAttention(GLFWwindow* window); + +/*! @brief Returns the monitor that the window uses for full screen mode. + * + * This function returns the handle of the monitor that the specified window is + * in full screen on. + * + * @param[in] window The window to query. + * @return The monitor, or `NULL` if the window is in windowed mode or an + * [error](@ref error_handling) occurred. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref window_monitor + * @sa @ref glfwSetWindowMonitor + * + * @since Added in version 3.0. + * + * @ingroup window + */ +GLFWAPI GLFWmonitor* glfwGetWindowMonitor(GLFWwindow* window); + +/*! @brief Sets the mode, monitor, video mode and placement of a window. + * + * This function sets the monitor that the window uses for full screen mode or, + * if the monitor is `NULL`, makes it windowed mode. + * + * When setting a monitor, this function updates the width, height and refresh + * rate of the desired video mode and switches to the video mode closest to it. + * The window position is ignored when setting a monitor. + * + * When the monitor is `NULL`, the position, width and height are used to + * place the window content area. The refresh rate is ignored when no monitor + * is specified. + * + * If you only wish to update the resolution of a full screen window or the + * size of a windowed mode window, see @ref glfwSetWindowSize. + * + * When a window transitions from full screen to windowed mode, this function + * restores any previous window settings such as whether it is decorated, + * floating, resizable, has size or aspect ratio limits, etc. + * + * @param[in] window The window whose monitor, size or video mode to set. + * @param[in] monitor The desired monitor, or `NULL` to set windowed mode. + * @param[in] xpos The desired x-coordinate of the upper-left corner of the + * content area. + * @param[in] ypos The desired y-coordinate of the upper-left corner of the + * content area. + * @param[in] width The desired with, in screen coordinates, of the content + * area or video mode. + * @param[in] height The desired height, in screen coordinates, of the content + * area or video mode. + * @param[in] refreshRate The desired refresh rate, in Hz, of the video mode, + * or `GLFW_DONT_CARE`. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_PLATFORM_ERROR. + * + * @remark The OpenGL or OpenGL ES context will not be destroyed or otherwise + * affected by any resizing or mode switching, although you may need to update + * your viewport if the framebuffer size has changed. + * + * @remark @wayland The desired window position is ignored, as there is no way + * for an application to set this property. + * + * @remark @wayland Setting the window to full screen will not attempt to + * change the mode, no matter what the requested size or refresh rate. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref window_monitor + * @sa @ref window_full_screen + * @sa @ref glfwGetWindowMonitor + * @sa @ref glfwSetWindowSize + * + * @since Added in version 3.2. + * + * @ingroup window + */ +GLFWAPI void glfwSetWindowMonitor(GLFWwindow* window, GLFWmonitor* monitor, int xpos, int ypos, int width, int height, int refreshRate); + +/*! @brief Returns an attribute of the specified window. + * + * This function returns the value of an attribute of the specified window or + * its OpenGL or OpenGL ES context. + * + * @param[in] window The window to query. + * @param[in] attrib The [window attribute](@ref window_attribs) whose value to + * return. + * @return The value of the attribute, or zero if an + * [error](@ref error_handling) occurred. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref + * GLFW_INVALID_ENUM and @ref GLFW_PLATFORM_ERROR. + * + * @remark Framebuffer related hints are not window attributes. See @ref + * window_attribs_fb for more information. + * + * @remark Zero is a valid value for many window and context related + * attributes so you cannot use a return value of zero as an indication of + * errors. However, this function should not fail as long as it is passed + * valid arguments and the library has been [initialized](@ref intro_init). + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref window_attribs + * @sa @ref glfwSetWindowAttrib + * + * @since Added in version 3.0. Replaces `glfwGetWindowParam` and + * `glfwGetGLVersion`. + * + * @ingroup window + */ +GLFWAPI int glfwGetWindowAttrib(GLFWwindow* window, int attrib); + +/*! @brief Sets an attribute of the specified window. + * + * This function sets the value of an attribute of the specified window. + * + * The supported attributes are [GLFW_DECORATED](@ref GLFW_DECORATED_attrib), + * [GLFW_RESIZABLE](@ref GLFW_RESIZABLE_attrib), + * [GLFW_FLOATING](@ref GLFW_FLOATING_attrib), + * [GLFW_AUTO_ICONIFY](@ref GLFW_AUTO_ICONIFY_attrib) and + * [GLFW_FOCUS_ON_SHOW](@ref GLFW_FOCUS_ON_SHOW_attrib). + * [GLFW_MOUSE_PASSTHROUGH](@ref GLFW_MOUSE_PASSTHROUGH_attrib) + * + * Some of these attributes are ignored for full screen windows. The new + * value will take effect if the window is later made windowed. + * + * Some of these attributes are ignored for windowed mode windows. The new + * value will take effect if the window is later made full screen. + * + * @param[in] window The window to set the attribute for. + * @param[in] attrib A supported window attribute. + * @param[in] value `GLFW_TRUE` or `GLFW_FALSE`. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref + * GLFW_INVALID_ENUM, @ref GLFW_INVALID_VALUE and @ref GLFW_PLATFORM_ERROR. + * + * @remark Calling @ref glfwGetWindowAttrib will always return the latest + * value, even if that value is ignored by the current mode of the window. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref window_attribs + * @sa @ref glfwGetWindowAttrib + * + * @since Added in version 3.3. + * + * @ingroup window + */ +GLFWAPI void glfwSetWindowAttrib(GLFWwindow* window, int attrib, int value); + +/*! @brief Sets the user pointer of the specified window. + * + * This function sets the user-defined pointer of the specified window. The + * current value is retained until the window is destroyed. The initial value + * is `NULL`. + * + * @param[in] window The window whose pointer to set. + * @param[in] pointer The new value. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @thread_safety This function may be called from any thread. Access is not + * synchronized. + * + * @sa @ref window_userptr + * @sa @ref glfwGetWindowUserPointer + * + * @since Added in version 3.0. + * + * @ingroup window + */ +GLFWAPI void glfwSetWindowUserPointer(GLFWwindow* window, void* pointer); + +/*! @brief Returns the user pointer of the specified window. + * + * This function returns the current value of the user-defined pointer of the + * specified window. The initial value is `NULL`. + * + * @param[in] window The window whose pointer to return. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @thread_safety This function may be called from any thread. Access is not + * synchronized. + * + * @sa @ref window_userptr + * @sa @ref glfwSetWindowUserPointer + * + * @since Added in version 3.0. + * + * @ingroup window + */ +GLFWAPI void* glfwGetWindowUserPointer(GLFWwindow* window); + +/*! @brief Sets the position callback for the specified window. + * + * This function sets the position callback of the specified window, which is + * called when the window is moved. The callback is provided with the + * position, in screen coordinates, of the upper-left corner of the content + * area of the window. + * + * @param[in] window The window whose callback to set. + * @param[in] callback The new callback, or `NULL` to remove the currently set + * callback. + * @return The previously set callback, or `NULL` if no callback was set or the + * library had not been [initialized](@ref intro_init). + * + * @callback_signature + * @code + * void function_name(GLFWwindow* window, int xpos, int ypos) + * @endcode + * For more information about the callback parameters, see the + * [function pointer type](@ref GLFWwindowposfun). + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @remark @wayland This callback will never be called, as there is no way for + * an application to know its global position. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref window_pos + * + * @since Added in version 3.0. + * + * @ingroup window + */ +GLFWAPI GLFWwindowposfun glfwSetWindowPosCallback(GLFWwindow* window, GLFWwindowposfun callback); + +/*! @brief Sets the size callback for the specified window. + * + * This function sets the size callback of the specified window, which is + * called when the window is resized. The callback is provided with the size, + * in screen coordinates, of the content area of the window. + * + * @param[in] window The window whose callback to set. + * @param[in] callback The new callback, or `NULL` to remove the currently set + * callback. + * @return The previously set callback, or `NULL` if no callback was set or the + * library had not been [initialized](@ref intro_init). + * + * @callback_signature + * @code + * void function_name(GLFWwindow* window, int width, int height) + * @endcode + * For more information about the callback parameters, see the + * [function pointer type](@ref GLFWwindowsizefun). + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref window_size + * + * @since Added in version 1.0. + * @glfw3 Added window handle parameter and return value. + * + * @ingroup window + */ +GLFWAPI GLFWwindowsizefun glfwSetWindowSizeCallback(GLFWwindow* window, GLFWwindowsizefun callback); + +/*! @brief Sets the close callback for the specified window. + * + * This function sets the close callback of the specified window, which is + * called when the user attempts to close the window, for example by clicking + * the close widget in the title bar. + * + * The close flag is set before this callback is called, but you can modify it + * at any time with @ref glfwSetWindowShouldClose. + * + * The close callback is not triggered by @ref glfwDestroyWindow. + * + * @param[in] window The window whose callback to set. + * @param[in] callback The new callback, or `NULL` to remove the currently set + * callback. + * @return The previously set callback, or `NULL` if no callback was set or the + * library had not been [initialized](@ref intro_init). + * + * @callback_signature + * @code + * void function_name(GLFWwindow* window) + * @endcode + * For more information about the callback parameters, see the + * [function pointer type](@ref GLFWwindowclosefun). + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @remark @macos Selecting Quit from the application menu will trigger the + * close callback for all windows. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref window_close + * + * @since Added in version 2.5. + * @glfw3 Added window handle parameter and return value. + * + * @ingroup window + */ +GLFWAPI GLFWwindowclosefun glfwSetWindowCloseCallback(GLFWwindow* window, GLFWwindowclosefun callback); + +/*! @brief Sets the refresh callback for the specified window. + * + * This function sets the refresh callback of the specified window, which is + * called when the content area of the window needs to be redrawn, for example + * if the window has been exposed after having been covered by another window. + * + * On compositing window systems such as Aero, Compiz, Aqua or Wayland, where + * the window contents are saved off-screen, this callback may be called only + * very infrequently or never at all. + * + * @param[in] window The window whose callback to set. + * @param[in] callback The new callback, or `NULL` to remove the currently set + * callback. + * @return The previously set callback, or `NULL` if no callback was set or the + * library had not been [initialized](@ref intro_init). + * + * @callback_signature + * @code + * void function_name(GLFWwindow* window); + * @endcode + * For more information about the callback parameters, see the + * [function pointer type](@ref GLFWwindowrefreshfun). + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref window_refresh + * + * @since Added in version 2.5. + * @glfw3 Added window handle parameter and return value. + * + * @ingroup window + */ +GLFWAPI GLFWwindowrefreshfun glfwSetWindowRefreshCallback(GLFWwindow* window, GLFWwindowrefreshfun callback); + +/*! @brief Sets the focus callback for the specified window. + * + * This function sets the focus callback of the specified window, which is + * called when the window gains or loses input focus. + * + * After the focus callback is called for a window that lost input focus, + * synthetic key and mouse button release events will be generated for all such + * that had been pressed. For more information, see @ref glfwSetKeyCallback + * and @ref glfwSetMouseButtonCallback. + * + * @param[in] window The window whose callback to set. + * @param[in] callback The new callback, or `NULL` to remove the currently set + * callback. + * @return The previously set callback, or `NULL` if no callback was set or the + * library had not been [initialized](@ref intro_init). + * + * @callback_signature + * @code + * void function_name(GLFWwindow* window, int focused) + * @endcode + * For more information about the callback parameters, see the + * [function pointer type](@ref GLFWwindowfocusfun). + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref window_focus + * + * @since Added in version 3.0. + * + * @ingroup window + */ +GLFWAPI GLFWwindowfocusfun glfwSetWindowFocusCallback(GLFWwindow* window, GLFWwindowfocusfun callback); + +/*! @brief Sets the iconify callback for the specified window. + * + * This function sets the iconification callback of the specified window, which + * is called when the window is iconified or restored. + * + * @param[in] window The window whose callback to set. + * @param[in] callback The new callback, or `NULL` to remove the currently set + * callback. + * @return The previously set callback, or `NULL` if no callback was set or the + * library had not been [initialized](@ref intro_init). + * + * @callback_signature + * @code + * void function_name(GLFWwindow* window, int iconified) + * @endcode + * For more information about the callback parameters, see the + * [function pointer type](@ref GLFWwindowiconifyfun). + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref window_iconify + * + * @since Added in version 3.0. + * + * @ingroup window + */ +GLFWAPI GLFWwindowiconifyfun glfwSetWindowIconifyCallback(GLFWwindow* window, GLFWwindowiconifyfun callback); + +/*! @brief Sets the maximize callback for the specified window. + * + * This function sets the maximization callback of the specified window, which + * is called when the window is maximized or restored. + * + * @param[in] window The window whose callback to set. + * @param[in] callback The new callback, or `NULL` to remove the currently set + * callback. + * @return The previously set callback, or `NULL` if no callback was set or the + * library had not been [initialized](@ref intro_init). + * + * @callback_signature + * @code + * void function_name(GLFWwindow* window, int maximized) + * @endcode + * For more information about the callback parameters, see the + * [function pointer type](@ref GLFWwindowmaximizefun). + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref window_maximize + * + * @since Added in version 3.3. + * + * @ingroup window + */ +GLFWAPI GLFWwindowmaximizefun glfwSetWindowMaximizeCallback(GLFWwindow* window, GLFWwindowmaximizefun callback); + +/*! @brief Sets the framebuffer resize callback for the specified window. + * + * This function sets the framebuffer resize callback of the specified window, + * which is called when the framebuffer of the specified window is resized. + * + * @param[in] window The window whose callback to set. + * @param[in] callback The new callback, or `NULL` to remove the currently set + * callback. + * @return The previously set callback, or `NULL` if no callback was set or the + * library had not been [initialized](@ref intro_init). + * + * @callback_signature + * @code + * void function_name(GLFWwindow* window, int width, int height) + * @endcode + * For more information about the callback parameters, see the + * [function pointer type](@ref GLFWframebuffersizefun). + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref window_fbsize + * + * @since Added in version 3.0. + * + * @ingroup window + */ +GLFWAPI GLFWframebuffersizefun glfwSetFramebufferSizeCallback(GLFWwindow* window, GLFWframebuffersizefun callback); + +/*! @brief Sets the window content scale callback for the specified window. + * + * This function sets the window content scale callback of the specified window, + * which is called when the content scale of the specified window changes. + * + * @param[in] window The window whose callback to set. + * @param[in] callback The new callback, or `NULL` to remove the currently set + * callback. + * @return The previously set callback, or `NULL` if no callback was set or the + * library had not been [initialized](@ref intro_init). + * + * @callback_signature + * @code + * void function_name(GLFWwindow* window, float xscale, float yscale) + * @endcode + * For more information about the callback parameters, see the + * [function pointer type](@ref GLFWwindowcontentscalefun). + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref window_scale + * @sa @ref glfwGetWindowContentScale + * + * @since Added in version 3.3. + * + * @ingroup window + */ +GLFWAPI GLFWwindowcontentscalefun glfwSetWindowContentScaleCallback(GLFWwindow* window, GLFWwindowcontentscalefun callback); + +/*! @brief Processes all pending events. + * + * This function processes only those events that are already in the event + * queue and then returns immediately. Processing events will cause the window + * and input callbacks associated with those events to be called. + * + * On some platforms, a window move, resize or menu operation will cause event + * processing to block. This is due to how event processing is designed on + * those platforms. You can use the + * [window refresh callback](@ref window_refresh) to redraw the contents of + * your window when necessary during such operations. + * + * Do not assume that callbacks you set will _only_ be called in response to + * event processing functions like this one. While it is necessary to poll for + * events, window systems that require GLFW to register callbacks of its own + * can pass events to GLFW in response to many window system function calls. + * GLFW will pass those events on to the application callbacks before + * returning. + * + * Event processing is not required for joystick input to work. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_PLATFORM_ERROR. + * + * @reentrancy This function must not be called from a callback. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref events + * @sa @ref glfwWaitEvents + * @sa @ref glfwWaitEventsTimeout + * + * @since Added in version 1.0. + * + * @ingroup window + */ +GLFWAPI void glfwPollEvents(void); + +/*! @brief Waits until events are queued and processes them. + * + * This function puts the calling thread to sleep until at least one event is + * available in the event queue. Once one or more events are available, + * it behaves exactly like @ref glfwPollEvents, i.e. the events in the queue + * are processed and the function then returns immediately. Processing events + * will cause the window and input callbacks associated with those events to be + * called. + * + * Since not all events are associated with callbacks, this function may return + * without a callback having been called even if you are monitoring all + * callbacks. + * + * On some platforms, a window move, resize or menu operation will cause event + * processing to block. This is due to how event processing is designed on + * those platforms. You can use the + * [window refresh callback](@ref window_refresh) to redraw the contents of + * your window when necessary during such operations. + * + * Do not assume that callbacks you set will _only_ be called in response to + * event processing functions like this one. While it is necessary to poll for + * events, window systems that require GLFW to register callbacks of its own + * can pass events to GLFW in response to many window system function calls. + * GLFW will pass those events on to the application callbacks before + * returning. + * + * Event processing is not required for joystick input to work. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_PLATFORM_ERROR. + * + * @reentrancy This function must not be called from a callback. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref events + * @sa @ref glfwPollEvents + * @sa @ref glfwWaitEventsTimeout + * + * @since Added in version 2.5. + * + * @ingroup window + */ +GLFWAPI void glfwWaitEvents(void); + +/*! @brief Waits with timeout until events are queued and processes them. + * + * This function puts the calling thread to sleep until at least one event is + * available in the event queue, or until the specified timeout is reached. If + * one or more events are available, it behaves exactly like @ref + * glfwPollEvents, i.e. the events in the queue are processed and the function + * then returns immediately. Processing events will cause the window and input + * callbacks associated with those events to be called. + * + * The timeout value must be a positive finite number. + * + * Since not all events are associated with callbacks, this function may return + * without a callback having been called even if you are monitoring all + * callbacks. + * + * On some platforms, a window move, resize or menu operation will cause event + * processing to block. This is due to how event processing is designed on + * those platforms. You can use the + * [window refresh callback](@ref window_refresh) to redraw the contents of + * your window when necessary during such operations. + * + * Do not assume that callbacks you set will _only_ be called in response to + * event processing functions like this one. While it is necessary to poll for + * events, window systems that require GLFW to register callbacks of its own + * can pass events to GLFW in response to many window system function calls. + * GLFW will pass those events on to the application callbacks before + * returning. + * + * Event processing is not required for joystick input to work. + * + * @param[in] timeout The maximum amount of time, in seconds, to wait. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref + * GLFW_INVALID_VALUE and @ref GLFW_PLATFORM_ERROR. + * + * @reentrancy This function must not be called from a callback. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref events + * @sa @ref glfwPollEvents + * @sa @ref glfwWaitEvents + * + * @since Added in version 3.2. + * + * @ingroup window + */ +GLFWAPI void glfwWaitEventsTimeout(double timeout); + +/*! @brief Posts an empty event to the event queue. + * + * This function posts an empty event from the current thread to the event + * queue, causing @ref glfwWaitEvents or @ref glfwWaitEventsTimeout to return. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_PLATFORM_ERROR. + * + * @thread_safety This function may be called from any thread. + * + * @sa @ref events + * @sa @ref glfwWaitEvents + * @sa @ref glfwWaitEventsTimeout + * + * @since Added in version 3.1. + * + * @ingroup window + */ +GLFWAPI void glfwPostEmptyEvent(void); + +/*! @brief Returns the value of an input option for the specified window. + * + * This function returns the value of an input option for the specified window. + * The mode must be one of @ref GLFW_CURSOR, @ref GLFW_STICKY_KEYS, + * @ref GLFW_STICKY_MOUSE_BUTTONS, @ref GLFW_LOCK_KEY_MODS or + * @ref GLFW_RAW_MOUSE_MOTION. + * + * @param[in] window The window to query. + * @param[in] mode One of `GLFW_CURSOR`, `GLFW_STICKY_KEYS`, + * `GLFW_STICKY_MOUSE_BUTTONS`, `GLFW_LOCK_KEY_MODS` or + * `GLFW_RAW_MOUSE_MOTION`. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_INVALID_ENUM. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref glfwSetInputMode + * + * @since Added in version 3.0. + * + * @ingroup input + */ +GLFWAPI int glfwGetInputMode(GLFWwindow* window, int mode); + +/*! @brief Sets an input option for the specified window. + * + * This function sets an input mode option for the specified window. The mode + * must be one of @ref GLFW_CURSOR, @ref GLFW_STICKY_KEYS, + * @ref GLFW_STICKY_MOUSE_BUTTONS, @ref GLFW_LOCK_KEY_MODS or + * @ref GLFW_RAW_MOUSE_MOTION. + * + * If the mode is `GLFW_CURSOR`, the value must be one of the following cursor + * modes: + * - `GLFW_CURSOR_NORMAL` makes the cursor visible and behaving normally. + * - `GLFW_CURSOR_HIDDEN` makes the cursor invisible when it is over the + * content area of the window but does not restrict the cursor from leaving. + * - `GLFW_CURSOR_DISABLED` hides and grabs the cursor, providing virtual + * and unlimited cursor movement. This is useful for implementing for + * example 3D camera controls. + * + * If the mode is `GLFW_STICKY_KEYS`, the value must be either `GLFW_TRUE` to + * enable sticky keys, or `GLFW_FALSE` to disable it. If sticky keys are + * enabled, a key press will ensure that @ref glfwGetKey returns `GLFW_PRESS` + * the next time it is called even if the key had been released before the + * call. This is useful when you are only interested in whether keys have been + * pressed but not when or in which order. + * + * If the mode is `GLFW_STICKY_MOUSE_BUTTONS`, the value must be either + * `GLFW_TRUE` to enable sticky mouse buttons, or `GLFW_FALSE` to disable it. + * If sticky mouse buttons are enabled, a mouse button press will ensure that + * @ref glfwGetMouseButton returns `GLFW_PRESS` the next time it is called even + * if the mouse button had been released before the call. This is useful when + * you are only interested in whether mouse buttons have been pressed but not + * when or in which order. + * + * If the mode is `GLFW_LOCK_KEY_MODS`, the value must be either `GLFW_TRUE` to + * enable lock key modifier bits, or `GLFW_FALSE` to disable them. If enabled, + * callbacks that receive modifier bits will also have the @ref + * GLFW_MOD_CAPS_LOCK bit set when the event was generated with Caps Lock on, + * and the @ref GLFW_MOD_NUM_LOCK bit when Num Lock was on. + * + * If the mode is `GLFW_RAW_MOUSE_MOTION`, the value must be either `GLFW_TRUE` + * to enable raw (unscaled and unaccelerated) mouse motion when the cursor is + * disabled, or `GLFW_FALSE` to disable it. If raw motion is not supported, + * attempting to set this will emit @ref GLFW_FEATURE_UNAVAILABLE. Call @ref + * glfwRawMouseMotionSupported to check for support. + * + * @param[in] window The window whose input mode to set. + * @param[in] mode One of `GLFW_CURSOR`, `GLFW_STICKY_KEYS`, + * `GLFW_STICKY_MOUSE_BUTTONS`, `GLFW_LOCK_KEY_MODS` or + * `GLFW_RAW_MOUSE_MOTION`. + * @param[in] value The new value of the specified input mode. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref + * GLFW_INVALID_ENUM, @ref GLFW_PLATFORM_ERROR and @ref + * GLFW_FEATURE_UNAVAILABLE (see above). + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref glfwGetInputMode + * + * @since Added in version 3.0. Replaces `glfwEnable` and `glfwDisable`. + * + * @ingroup input + */ +GLFWAPI void glfwSetInputMode(GLFWwindow* window, int mode, int value); + +/*! @brief Returns whether raw mouse motion is supported. + * + * This function returns whether raw mouse motion is supported on the current + * system. This status does not change after GLFW has been initialized so you + * only need to check this once. If you attempt to enable raw motion on + * a system that does not support it, @ref GLFW_PLATFORM_ERROR will be emitted. + * + * Raw mouse motion is closer to the actual motion of the mouse across + * a surface. It is not affected by the scaling and acceleration applied to + * the motion of the desktop cursor. That processing is suitable for a cursor + * while raw motion is better for controlling for example a 3D camera. Because + * of this, raw mouse motion is only provided when the cursor is disabled. + * + * @return `GLFW_TRUE` if raw mouse motion is supported on the current machine, + * or `GLFW_FALSE` otherwise. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref raw_mouse_motion + * @sa @ref glfwSetInputMode + * + * @since Added in version 3.3. + * + * @ingroup input + */ +GLFWAPI int glfwRawMouseMotionSupported(void); + +/*! @brief Returns the layout-specific name of the specified printable key. + * + * This function returns the name of the specified printable key, encoded as + * UTF-8. This is typically the character that key would produce without any + * modifier keys, intended for displaying key bindings to the user. For dead + * keys, it is typically the diacritic it would add to a character. + * + * __Do not use this function__ for [text input](@ref input_char). You will + * break text input for many languages even if it happens to work for yours. + * + * If the key is `GLFW_KEY_UNKNOWN`, the scancode is used to identify the key, + * otherwise the scancode is ignored. If you specify a non-printable key, or + * `GLFW_KEY_UNKNOWN` and a scancode that maps to a non-printable key, this + * function returns `NULL` but does not emit an error. + * + * This behavior allows you to always pass in the arguments in the + * [key callback](@ref input_key) without modification. + * + * The printable keys are: + * - `GLFW_KEY_APOSTROPHE` + * - `GLFW_KEY_COMMA` + * - `GLFW_KEY_MINUS` + * - `GLFW_KEY_PERIOD` + * - `GLFW_KEY_SLASH` + * - `GLFW_KEY_SEMICOLON` + * - `GLFW_KEY_EQUAL` + * - `GLFW_KEY_LEFT_BRACKET` + * - `GLFW_KEY_RIGHT_BRACKET` + * - `GLFW_KEY_BACKSLASH` + * - `GLFW_KEY_WORLD_1` + * - `GLFW_KEY_WORLD_2` + * - `GLFW_KEY_0` to `GLFW_KEY_9` + * - `GLFW_KEY_A` to `GLFW_KEY_Z` + * - `GLFW_KEY_KP_0` to `GLFW_KEY_KP_9` + * - `GLFW_KEY_KP_DECIMAL` + * - `GLFW_KEY_KP_DIVIDE` + * - `GLFW_KEY_KP_MULTIPLY` + * - `GLFW_KEY_KP_SUBTRACT` + * - `GLFW_KEY_KP_ADD` + * - `GLFW_KEY_KP_EQUAL` + * + * Names for printable keys depend on keyboard layout, while names for + * non-printable keys are the same across layouts but depend on the application + * language and should be localized along with other user interface text. + * + * @param[in] key The key to query, or `GLFW_KEY_UNKNOWN`. + * @param[in] scancode The scancode of the key to query. + * @return The UTF-8 encoded, layout-specific name of the key, or `NULL`. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_PLATFORM_ERROR. + * + * @remark The contents of the returned string may change when a keyboard + * layout change event is received. + * + * @pointer_lifetime The returned string is allocated and freed by GLFW. You + * should not free it yourself. It is valid until the library is terminated. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref input_key_name + * + * @since Added in version 3.2. + * + * @ingroup input + */ +GLFWAPI const char* glfwGetKeyName(int key, int scancode); + +/*! @brief Returns the platform-specific scancode of the specified key. + * + * This function returns the platform-specific scancode of the specified key. + * + * If the key is `GLFW_KEY_UNKNOWN` or does not exist on the keyboard this + * method will return `-1`. + * + * @param[in] key Any [named key](@ref keys). + * @return The platform-specific scancode for the key, or `-1` if an + * [error](@ref error_handling) occurred. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref + * GLFW_INVALID_ENUM and @ref GLFW_PLATFORM_ERROR. + * + * @thread_safety This function may be called from any thread. + * + * @sa @ref input_key + * + * @since Added in version 3.3. + * + * @ingroup input + */ +GLFWAPI int glfwGetKeyScancode(int key); + +/*! @brief Returns the last reported state of a keyboard key for the specified + * window. + * + * This function returns the last state reported for the specified key to the + * specified window. The returned state is one of `GLFW_PRESS` or + * `GLFW_RELEASE`. The higher-level action `GLFW_REPEAT` is only reported to + * the key callback. + * + * If the @ref GLFW_STICKY_KEYS input mode is enabled, this function returns + * `GLFW_PRESS` the first time you call it for a key that was pressed, even if + * that key has already been released. + * + * The key functions deal with physical keys, with [key tokens](@ref keys) + * named after their use on the standard US keyboard layout. If you want to + * input text, use the Unicode character callback instead. + * + * The [modifier key bit masks](@ref mods) are not key tokens and cannot be + * used with this function. + * + * __Do not use this function__ to implement [text input](@ref input_char). + * + * @param[in] window The desired window. + * @param[in] key The desired [keyboard key](@ref keys). `GLFW_KEY_UNKNOWN` is + * not a valid key for this function. + * @return One of `GLFW_PRESS` or `GLFW_RELEASE`. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_INVALID_ENUM. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref input_key + * + * @since Added in version 1.0. + * @glfw3 Added window handle parameter. + * + * @ingroup input + */ +GLFWAPI int glfwGetKey(GLFWwindow* window, int key); + +/*! @brief Returns the last reported state of a mouse button for the specified + * window. + * + * This function returns the last state reported for the specified mouse button + * to the specified window. The returned state is one of `GLFW_PRESS` or + * `GLFW_RELEASE`. + * + * If the @ref GLFW_STICKY_MOUSE_BUTTONS input mode is enabled, this function + * returns `GLFW_PRESS` the first time you call it for a mouse button that was + * pressed, even if that mouse button has already been released. + * + * @param[in] window The desired window. + * @param[in] button The desired [mouse button](@ref buttons). + * @return One of `GLFW_PRESS` or `GLFW_RELEASE`. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_INVALID_ENUM. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref input_mouse_button + * + * @since Added in version 1.0. + * @glfw3 Added window handle parameter. + * + * @ingroup input + */ +GLFWAPI int glfwGetMouseButton(GLFWwindow* window, int button); + +/*! @brief Retrieves the position of the cursor relative to the content area of + * the window. + * + * This function returns the position of the cursor, in screen coordinates, + * relative to the upper-left corner of the content area of the specified + * window. + * + * If the cursor is disabled (with `GLFW_CURSOR_DISABLED`) then the cursor + * position is unbounded and limited only by the minimum and maximum values of + * a `double`. + * + * The coordinate can be converted to their integer equivalents with the + * `floor` function. Casting directly to an integer type works for positive + * coordinates, but fails for negative ones. + * + * Any or all of the position arguments may be `NULL`. If an error occurs, all + * non-`NULL` position arguments will be set to zero. + * + * @param[in] window The desired window. + * @param[out] xpos Where to store the cursor x-coordinate, relative to the + * left edge of the content area, or `NULL`. + * @param[out] ypos Where to store the cursor y-coordinate, relative to the to + * top edge of the content area, or `NULL`. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_PLATFORM_ERROR. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref cursor_pos + * @sa @ref glfwSetCursorPos + * + * @since Added in version 3.0. Replaces `glfwGetMousePos`. + * + * @ingroup input + */ +GLFWAPI void glfwGetCursorPos(GLFWwindow* window, double* xpos, double* ypos); + +/*! @brief Sets the position of the cursor, relative to the content area of the + * window. + * + * This function sets the position, in screen coordinates, of the cursor + * relative to the upper-left corner of the content area of the specified + * window. The window must have input focus. If the window does not have + * input focus when this function is called, it fails silently. + * + * __Do not use this function__ to implement things like camera controls. GLFW + * already provides the `GLFW_CURSOR_DISABLED` cursor mode that hides the + * cursor, transparently re-centers it and provides unconstrained cursor + * motion. See @ref glfwSetInputMode for more information. + * + * If the cursor mode is `GLFW_CURSOR_DISABLED` then the cursor position is + * unconstrained and limited only by the minimum and maximum values of + * a `double`. + * + * @param[in] window The desired window. + * @param[in] xpos The desired x-coordinate, relative to the left edge of the + * content area. + * @param[in] ypos The desired y-coordinate, relative to the top edge of the + * content area. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_PLATFORM_ERROR. + * + * @remark @wayland This function will only work when the cursor mode is + * `GLFW_CURSOR_DISABLED`, otherwise it will do nothing. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref cursor_pos + * @sa @ref glfwGetCursorPos + * + * @since Added in version 3.0. Replaces `glfwSetMousePos`. + * + * @ingroup input + */ +GLFWAPI void glfwSetCursorPos(GLFWwindow* window, double xpos, double ypos); + +/*! @brief Creates a custom cursor. + * + * Creates a new custom cursor image that can be set for a window with @ref + * glfwSetCursor. The cursor can be destroyed with @ref glfwDestroyCursor. + * Any remaining cursors are destroyed by @ref glfwTerminate. + * + * The pixels are 32-bit, little-endian, non-premultiplied RGBA, i.e. eight + * bits per channel with the red channel first. They are arranged canonically + * as packed sequential rows, starting from the top-left corner. + * + * The cursor hotspot is specified in pixels, relative to the upper-left corner + * of the cursor image. Like all other coordinate systems in GLFW, the X-axis + * points to the right and the Y-axis points down. + * + * @param[in] image The desired cursor image. + * @param[in] xhot The desired x-coordinate, in pixels, of the cursor hotspot. + * @param[in] yhot The desired y-coordinate, in pixels, of the cursor hotspot. + * @return The handle of the created cursor, or `NULL` if an + * [error](@ref error_handling) occurred. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_PLATFORM_ERROR. + * + * @pointer_lifetime The specified image data is copied before this function + * returns. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref cursor_object + * @sa @ref glfwDestroyCursor + * @sa @ref glfwCreateStandardCursor + * + * @since Added in version 3.1. + * + * @ingroup input + */ +GLFWAPI GLFWcursor* glfwCreateCursor(const GLFWimage* image, int xhot, int yhot); + +/*! @brief Creates a cursor with a standard shape. + * + * Returns a cursor with a standard shape, that can be set for a window with + * @ref glfwSetCursor. The images for these cursors come from the system + * cursor theme and their exact appearance will vary between platforms. + * + * Most of these shapes are guaranteed to exist on every supported platform but + * a few may not be present. See the table below for details. + * + * Cursor shape | Windows | macOS | X11 | Wayland + * ------------------------------ | ------- | ----- | ------ | ------- + * @ref GLFW_ARROW_CURSOR | Yes | Yes | Yes | Yes + * @ref GLFW_IBEAM_CURSOR | Yes | Yes | Yes | Yes + * @ref GLFW_CROSSHAIR_CURSOR | Yes | Yes | Yes | Yes + * @ref GLFW_POINTING_HAND_CURSOR | Yes | Yes | Yes | Yes + * @ref GLFW_RESIZE_EW_CURSOR | Yes | Yes | Yes | Yes + * @ref GLFW_RESIZE_NS_CURSOR | Yes | Yes | Yes | Yes + * @ref GLFW_RESIZE_NWSE_CURSOR | Yes | Yes1 | Maybe2 | Maybe2 + * @ref GLFW_RESIZE_NESW_CURSOR | Yes | Yes1 | Maybe2 | Maybe2 + * @ref GLFW_RESIZE_ALL_CURSOR | Yes | Yes | Yes | Yes + * @ref GLFW_NOT_ALLOWED_CURSOR | Yes | Yes | Maybe2 | Maybe2 + * + * 1) This uses a private system API and may fail in the future. + * + * 2) This uses a newer standard that not all cursor themes support. + * + * If the requested shape is not available, this function emits a @ref + * GLFW_CURSOR_UNAVAILABLE error and returns `NULL`. + * + * @param[in] shape One of the [standard shapes](@ref shapes). + * @return A new cursor ready to use or `NULL` if an + * [error](@ref error_handling) occurred. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref + * GLFW_INVALID_ENUM, @ref GLFW_CURSOR_UNAVAILABLE and @ref + * GLFW_PLATFORM_ERROR. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref cursor_standard + * @sa @ref glfwCreateCursor + * + * @since Added in version 3.1. + * + * @ingroup input + */ +GLFWAPI GLFWcursor* glfwCreateStandardCursor(int shape); + +/*! @brief Destroys a cursor. + * + * This function destroys a cursor previously created with @ref + * glfwCreateCursor. Any remaining cursors will be destroyed by @ref + * glfwTerminate. + * + * If the specified cursor is current for any window, that window will be + * reverted to the default cursor. This does not affect the cursor mode. + * + * @param[in] cursor The cursor object to destroy. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_PLATFORM_ERROR. + * + * @reentrancy This function must not be called from a callback. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref cursor_object + * @sa @ref glfwCreateCursor + * + * @since Added in version 3.1. + * + * @ingroup input + */ +GLFWAPI void glfwDestroyCursor(GLFWcursor* cursor); + +/*! @brief Sets the cursor for the window. + * + * This function sets the cursor image to be used when the cursor is over the + * content area of the specified window. The set cursor will only be visible + * when the [cursor mode](@ref cursor_mode) of the window is + * `GLFW_CURSOR_NORMAL`. + * + * On some platforms, the set cursor may not be visible unless the window also + * has input focus. + * + * @param[in] window The window to set the cursor for. + * @param[in] cursor The cursor to set, or `NULL` to switch back to the default + * arrow cursor. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_PLATFORM_ERROR. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref cursor_object + * + * @since Added in version 3.1. + * + * @ingroup input + */ +GLFWAPI void glfwSetCursor(GLFWwindow* window, GLFWcursor* cursor); + +/*! @brief Sets the key callback. + * + * This function sets the key callback of the specified window, which is called + * when a key is pressed, repeated or released. + * + * The key functions deal with physical keys, with layout independent + * [key tokens](@ref keys) named after their values in the standard US keyboard + * layout. If you want to input text, use the + * [character callback](@ref glfwSetCharCallback) instead. + * + * When a window loses input focus, it will generate synthetic key release + * events for all pressed keys. You can tell these events from user-generated + * events by the fact that the synthetic ones are generated after the focus + * loss event has been processed, i.e. after the + * [window focus callback](@ref glfwSetWindowFocusCallback) has been called. + * + * The scancode of a key is specific to that platform or sometimes even to that + * machine. Scancodes are intended to allow users to bind keys that don't have + * a GLFW key token. Such keys have `key` set to `GLFW_KEY_UNKNOWN`, their + * state is not saved and so it cannot be queried with @ref glfwGetKey. + * + * Sometimes GLFW needs to generate synthetic key events, in which case the + * scancode may be zero. + * + * @param[in] window The window whose callback to set. + * @param[in] callback The new key callback, or `NULL` to remove the currently + * set callback. + * @return The previously set callback, or `NULL` if no callback was set or the + * library had not been [initialized](@ref intro_init). + * + * @callback_signature + * @code + * void function_name(GLFWwindow* window, int key, int scancode, int action, int mods) + * @endcode + * For more information about the callback parameters, see the + * [function pointer type](@ref GLFWkeyfun). + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref input_key + * + * @since Added in version 1.0. + * @glfw3 Added window handle parameter and return value. + * + * @ingroup input + */ +GLFWAPI GLFWkeyfun glfwSetKeyCallback(GLFWwindow* window, GLFWkeyfun callback); + +/*! @brief Sets the Unicode character callback. + * + * This function sets the character callback of the specified window, which is + * called when a Unicode character is input. + * + * The character callback is intended for Unicode text input. As it deals with + * characters, it is keyboard layout dependent, whereas the + * [key callback](@ref glfwSetKeyCallback) is not. Characters do not map 1:1 + * to physical keys, as a key may produce zero, one or more characters. If you + * want to know whether a specific physical key was pressed or released, see + * the key callback instead. + * + * The character callback behaves as system text input normally does and will + * not be called if modifier keys are held down that would prevent normal text + * input on that platform, for example a Super (Command) key on macOS or Alt key + * on Windows. + * + * @param[in] window The window whose callback to set. + * @param[in] callback The new callback, or `NULL` to remove the currently set + * callback. + * @return The previously set callback, or `NULL` if no callback was set or the + * library had not been [initialized](@ref intro_init). + * + * @callback_signature + * @code + * void function_name(GLFWwindow* window, unsigned int codepoint) + * @endcode + * For more information about the callback parameters, see the + * [function pointer type](@ref GLFWcharfun). + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref input_char + * + * @since Added in version 2.4. + * @glfw3 Added window handle parameter and return value. + * + * @ingroup input + */ +GLFWAPI GLFWcharfun glfwSetCharCallback(GLFWwindow* window, GLFWcharfun callback); + +/*! @brief Sets the Unicode character with modifiers callback. + * + * This function sets the character with modifiers callback of the specified + * window, which is called when a Unicode character is input regardless of what + * modifier keys are used. + * + * The character with modifiers callback is intended for implementing custom + * Unicode character input. For regular Unicode text input, see the + * [character callback](@ref glfwSetCharCallback). Like the character + * callback, the character with modifiers callback deals with characters and is + * keyboard layout dependent. Characters do not map 1:1 to physical keys, as + * a key may produce zero, one or more characters. If you want to know whether + * a specific physical key was pressed or released, see the + * [key callback](@ref glfwSetKeyCallback) instead. + * + * @param[in] window The window whose callback to set. + * @param[in] callback The new callback, or `NULL` to remove the currently set + * callback. + * @return The previously set callback, or `NULL` if no callback was set or an + * [error](@ref error_handling) occurred. + * + * @callback_signature + * @code + * void function_name(GLFWwindow* window, unsigned int codepoint, int mods) + * @endcode + * For more information about the callback parameters, see the + * [function pointer type](@ref GLFWcharmodsfun). + * + * @deprecated Scheduled for removal in version 4.0. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref input_char + * + * @since Added in version 3.1. + * + * @ingroup input + */ +GLFWAPI GLFWcharmodsfun glfwSetCharModsCallback(GLFWwindow* window, GLFWcharmodsfun callback); + +/*! @brief Sets the mouse button callback. + * + * This function sets the mouse button callback of the specified window, which + * is called when a mouse button is pressed or released. + * + * When a window loses input focus, it will generate synthetic mouse button + * release events for all pressed mouse buttons. You can tell these events + * from user-generated events by the fact that the synthetic ones are generated + * after the focus loss event has been processed, i.e. after the + * [window focus callback](@ref glfwSetWindowFocusCallback) has been called. + * + * @param[in] window The window whose callback to set. + * @param[in] callback The new callback, or `NULL` to remove the currently set + * callback. + * @return The previously set callback, or `NULL` if no callback was set or the + * library had not been [initialized](@ref intro_init). + * + * @callback_signature + * @code + * void function_name(GLFWwindow* window, int button, int action, int mods) + * @endcode + * For more information about the callback parameters, see the + * [function pointer type](@ref GLFWmousebuttonfun). + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref input_mouse_button + * + * @since Added in version 1.0. + * @glfw3 Added window handle parameter and return value. + * + * @ingroup input + */ +GLFWAPI GLFWmousebuttonfun glfwSetMouseButtonCallback(GLFWwindow* window, GLFWmousebuttonfun callback); + +/*! @brief Sets the cursor position callback. + * + * This function sets the cursor position callback of the specified window, + * which is called when the cursor is moved. The callback is provided with the + * position, in screen coordinates, relative to the upper-left corner of the + * content area of the window. + * + * @param[in] window The window whose callback to set. + * @param[in] callback The new callback, or `NULL` to remove the currently set + * callback. + * @return The previously set callback, or `NULL` if no callback was set or the + * library had not been [initialized](@ref intro_init). + * + * @callback_signature + * @code + * void function_name(GLFWwindow* window, double xpos, double ypos); + * @endcode + * For more information about the callback parameters, see the + * [function pointer type](@ref GLFWcursorposfun). + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref cursor_pos + * + * @since Added in version 3.0. Replaces `glfwSetMousePosCallback`. + * + * @ingroup input + */ +GLFWAPI GLFWcursorposfun glfwSetCursorPosCallback(GLFWwindow* window, GLFWcursorposfun callback); + +/*! @brief Sets the cursor enter/leave callback. + * + * This function sets the cursor boundary crossing callback of the specified + * window, which is called when the cursor enters or leaves the content area of + * the window. + * + * @param[in] window The window whose callback to set. + * @param[in] callback The new callback, or `NULL` to remove the currently set + * callback. + * @return The previously set callback, or `NULL` if no callback was set or the + * library had not been [initialized](@ref intro_init). + * + * @callback_signature + * @code + * void function_name(GLFWwindow* window, int entered) + * @endcode + * For more information about the callback parameters, see the + * [function pointer type](@ref GLFWcursorenterfun). + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref cursor_enter + * + * @since Added in version 3.0. + * + * @ingroup input + */ +GLFWAPI GLFWcursorenterfun glfwSetCursorEnterCallback(GLFWwindow* window, GLFWcursorenterfun callback); + +/*! @brief Sets the scroll callback. + * + * This function sets the scroll callback of the specified window, which is + * called when a scrolling device is used, such as a mouse wheel or scrolling + * area of a touchpad. + * + * The scroll callback receives all scrolling input, like that from a mouse + * wheel or a touchpad scrolling area. + * + * @param[in] window The window whose callback to set. + * @param[in] callback The new scroll callback, or `NULL` to remove the + * currently set callback. + * @return The previously set callback, or `NULL` if no callback was set or the + * library had not been [initialized](@ref intro_init). + * + * @callback_signature + * @code + * void function_name(GLFWwindow* window, double xoffset, double yoffset) + * @endcode + * For more information about the callback parameters, see the + * [function pointer type](@ref GLFWscrollfun). + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref scrolling + * + * @since Added in version 3.0. Replaces `glfwSetMouseWheelCallback`. + * + * @ingroup input + */ +GLFWAPI GLFWscrollfun glfwSetScrollCallback(GLFWwindow* window, GLFWscrollfun callback); + +/*! @brief Sets the path drop callback. + * + * This function sets the path drop callback of the specified window, which is + * called when one or more dragged paths are dropped on the window. + * + * Because the path array and its strings may have been generated specifically + * for that event, they are not guaranteed to be valid after the callback has + * returned. If you wish to use them after the callback returns, you need to + * make a deep copy. + * + * @param[in] window The window whose callback to set. + * @param[in] callback The new file drop callback, or `NULL` to remove the + * currently set callback. + * @return The previously set callback, or `NULL` if no callback was set or the + * library had not been [initialized](@ref intro_init). + * + * @callback_signature + * @code + * void function_name(GLFWwindow* window, int path_count, const char* paths[]) + * @endcode + * For more information about the callback parameters, see the + * [function pointer type](@ref GLFWdropfun). + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @remark @wayland File drop is currently unimplemented. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref path_drop + * + * @since Added in version 3.1. + * + * @ingroup input + */ +GLFWAPI GLFWdropfun glfwSetDropCallback(GLFWwindow* window, GLFWdropfun callback); + +/*! @brief Returns whether the specified joystick is present. + * + * This function returns whether the specified joystick is present. + * + * There is no need to call this function before other functions that accept + * a joystick ID, as they all check for presence before performing any other + * work. + * + * @param[in] jid The [joystick](@ref joysticks) to query. + * @return `GLFW_TRUE` if the joystick is present, or `GLFW_FALSE` otherwise. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref + * GLFW_INVALID_ENUM and @ref GLFW_PLATFORM_ERROR. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref joystick + * + * @since Added in version 3.0. Replaces `glfwGetJoystickParam`. + * + * @ingroup input + */ +GLFWAPI int glfwJoystickPresent(int jid); + +/*! @brief Returns the values of all axes of the specified joystick. + * + * This function returns the values of all axes of the specified joystick. + * Each element in the array is a value between -1.0 and 1.0. + * + * If the specified joystick is not present this function will return `NULL` + * but will not generate an error. This can be used instead of first calling + * @ref glfwJoystickPresent. + * + * @param[in] jid The [joystick](@ref joysticks) to query. + * @param[out] count Where to store the number of axis values in the returned + * array. This is set to zero if the joystick is not present or an error + * occurred. + * @return An array of axis values, or `NULL` if the joystick is not present or + * an [error](@ref error_handling) occurred. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref + * GLFW_INVALID_ENUM and @ref GLFW_PLATFORM_ERROR. + * + * @pointer_lifetime The returned array is allocated and freed by GLFW. You + * should not free it yourself. It is valid until the specified joystick is + * disconnected or the library is terminated. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref joystick_axis + * + * @since Added in version 3.0. Replaces `glfwGetJoystickPos`. + * + * @ingroup input + */ +GLFWAPI const float* glfwGetJoystickAxes(int jid, int* count); + +/*! @brief Returns the state of all buttons of the specified joystick. + * + * This function returns the state of all buttons of the specified joystick. + * Each element in the array is either `GLFW_PRESS` or `GLFW_RELEASE`. + * + * For backward compatibility with earlier versions that did not have @ref + * glfwGetJoystickHats, the button array also includes all hats, each + * represented as four buttons. The hats are in the same order as returned by + * __glfwGetJoystickHats__ and are in the order _up_, _right_, _down_ and + * _left_. To disable these extra buttons, set the @ref + * GLFW_JOYSTICK_HAT_BUTTONS init hint before initialization. + * + * If the specified joystick is not present this function will return `NULL` + * but will not generate an error. This can be used instead of first calling + * @ref glfwJoystickPresent. + * + * @param[in] jid The [joystick](@ref joysticks) to query. + * @param[out] count Where to store the number of button states in the returned + * array. This is set to zero if the joystick is not present or an error + * occurred. + * @return An array of button states, or `NULL` if the joystick is not present + * or an [error](@ref error_handling) occurred. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref + * GLFW_INVALID_ENUM and @ref GLFW_PLATFORM_ERROR. + * + * @pointer_lifetime The returned array is allocated and freed by GLFW. You + * should not free it yourself. It is valid until the specified joystick is + * disconnected or the library is terminated. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref joystick_button + * + * @since Added in version 2.2. + * @glfw3 Changed to return a dynamic array. + * + * @ingroup input + */ +GLFWAPI const unsigned char* glfwGetJoystickButtons(int jid, int* count); + +/*! @brief Returns the state of all hats of the specified joystick. + * + * This function returns the state of all hats of the specified joystick. + * Each element in the array is one of the following values: + * + * Name | Value + * ---- | ----- + * `GLFW_HAT_CENTERED` | 0 + * `GLFW_HAT_UP` | 1 + * `GLFW_HAT_RIGHT` | 2 + * `GLFW_HAT_DOWN` | 4 + * `GLFW_HAT_LEFT` | 8 + * `GLFW_HAT_RIGHT_UP` | `GLFW_HAT_RIGHT` \| `GLFW_HAT_UP` + * `GLFW_HAT_RIGHT_DOWN` | `GLFW_HAT_RIGHT` \| `GLFW_HAT_DOWN` + * `GLFW_HAT_LEFT_UP` | `GLFW_HAT_LEFT` \| `GLFW_HAT_UP` + * `GLFW_HAT_LEFT_DOWN` | `GLFW_HAT_LEFT` \| `GLFW_HAT_DOWN` + * + * The diagonal directions are bitwise combinations of the primary (up, right, + * down and left) directions and you can test for these individually by ANDing + * it with the corresponding direction. + * + * @code + * if (hats[2] & GLFW_HAT_RIGHT) + * { + * // State of hat 2 could be right-up, right or right-down + * } + * @endcode + * + * If the specified joystick is not present this function will return `NULL` + * but will not generate an error. This can be used instead of first calling + * @ref glfwJoystickPresent. + * + * @param[in] jid The [joystick](@ref joysticks) to query. + * @param[out] count Where to store the number of hat states in the returned + * array. This is set to zero if the joystick is not present or an error + * occurred. + * @return An array of hat states, or `NULL` if the joystick is not present + * or an [error](@ref error_handling) occurred. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref + * GLFW_INVALID_ENUM and @ref GLFW_PLATFORM_ERROR. + * + * @pointer_lifetime The returned array is allocated and freed by GLFW. You + * should not free it yourself. It is valid until the specified joystick is + * disconnected, this function is called again for that joystick or the library + * is terminated. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref joystick_hat + * + * @since Added in version 3.3. + * + * @ingroup input + */ +GLFWAPI const unsigned char* glfwGetJoystickHats(int jid, int* count); + +/*! @brief Returns the name of the specified joystick. + * + * This function returns the name, encoded as UTF-8, of the specified joystick. + * The returned string is allocated and freed by GLFW. You should not free it + * yourself. + * + * If the specified joystick is not present this function will return `NULL` + * but will not generate an error. This can be used instead of first calling + * @ref glfwJoystickPresent. + * + * @param[in] jid The [joystick](@ref joysticks) to query. + * @return The UTF-8 encoded name of the joystick, or `NULL` if the joystick + * is not present or an [error](@ref error_handling) occurred. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref + * GLFW_INVALID_ENUM and @ref GLFW_PLATFORM_ERROR. + * + * @pointer_lifetime The returned string is allocated and freed by GLFW. You + * should not free it yourself. It is valid until the specified joystick is + * disconnected or the library is terminated. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref joystick_name + * + * @since Added in version 3.0. + * + * @ingroup input + */ +GLFWAPI const char* glfwGetJoystickName(int jid); + +/*! @brief Returns the SDL compatible GUID of the specified joystick. + * + * This function returns the SDL compatible GUID, as a UTF-8 encoded + * hexadecimal string, of the specified joystick. The returned string is + * allocated and freed by GLFW. You should not free it yourself. + * + * The GUID is what connects a joystick to a gamepad mapping. A connected + * joystick will always have a GUID even if there is no gamepad mapping + * assigned to it. + * + * If the specified joystick is not present this function will return `NULL` + * but will not generate an error. This can be used instead of first calling + * @ref glfwJoystickPresent. + * + * The GUID uses the format introduced in SDL 2.0.5. This GUID tries to + * uniquely identify the make and model of a joystick but does not identify + * a specific unit, e.g. all wired Xbox 360 controllers will have the same + * GUID on that platform. The GUID for a unit may vary between platforms + * depending on what hardware information the platform specific APIs provide. + * + * @param[in] jid The [joystick](@ref joysticks) to query. + * @return The UTF-8 encoded GUID of the joystick, or `NULL` if the joystick + * is not present or an [error](@ref error_handling) occurred. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref + * GLFW_INVALID_ENUM and @ref GLFW_PLATFORM_ERROR. + * + * @pointer_lifetime The returned string is allocated and freed by GLFW. You + * should not free it yourself. It is valid until the specified joystick is + * disconnected or the library is terminated. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref gamepad + * + * @since Added in version 3.3. + * + * @ingroup input + */ +GLFWAPI const char* glfwGetJoystickGUID(int jid); + +/*! @brief Sets the user pointer of the specified joystick. + * + * This function sets the user-defined pointer of the specified joystick. The + * current value is retained until the joystick is disconnected. The initial + * value is `NULL`. + * + * This function may be called from the joystick callback, even for a joystick + * that is being disconnected. + * + * @param[in] jid The joystick whose pointer to set. + * @param[in] pointer The new value. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @thread_safety This function may be called from any thread. Access is not + * synchronized. + * + * @sa @ref joystick_userptr + * @sa @ref glfwGetJoystickUserPointer + * + * @since Added in version 3.3. + * + * @ingroup input + */ +GLFWAPI void glfwSetJoystickUserPointer(int jid, void* pointer); + +/*! @brief Returns the user pointer of the specified joystick. + * + * This function returns the current value of the user-defined pointer of the + * specified joystick. The initial value is `NULL`. + * + * This function may be called from the joystick callback, even for a joystick + * that is being disconnected. + * + * @param[in] jid The joystick whose pointer to return. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @thread_safety This function may be called from any thread. Access is not + * synchronized. + * + * @sa @ref joystick_userptr + * @sa @ref glfwSetJoystickUserPointer + * + * @since Added in version 3.3. + * + * @ingroup input + */ +GLFWAPI void* glfwGetJoystickUserPointer(int jid); + +/*! @brief Returns whether the specified joystick has a gamepad mapping. + * + * This function returns whether the specified joystick is both present and has + * a gamepad mapping. + * + * If the specified joystick is present but does not have a gamepad mapping + * this function will return `GLFW_FALSE` but will not generate an error. Call + * @ref glfwJoystickPresent to check if a joystick is present regardless of + * whether it has a mapping. + * + * @param[in] jid The [joystick](@ref joysticks) to query. + * @return `GLFW_TRUE` if a joystick is both present and has a gamepad mapping, + * or `GLFW_FALSE` otherwise. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_INVALID_ENUM. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref gamepad + * @sa @ref glfwGetGamepadState + * + * @since Added in version 3.3. + * + * @ingroup input + */ +GLFWAPI int glfwJoystickIsGamepad(int jid); + +/*! @brief Sets the joystick configuration callback. + * + * This function sets the joystick configuration callback, or removes the + * currently set callback. This is called when a joystick is connected to or + * disconnected from the system. + * + * For joystick connection and disconnection events to be delivered on all + * platforms, you need to call one of the [event processing](@ref events) + * functions. Joystick disconnection may also be detected and the callback + * called by joystick functions. The function will then return whatever it + * returns if the joystick is not present. + * + * @param[in] callback The new callback, or `NULL` to remove the currently set + * callback. + * @return The previously set callback, or `NULL` if no callback was set or the + * library had not been [initialized](@ref intro_init). + * + * @callback_signature + * @code + * void function_name(int jid, int event) + * @endcode + * For more information about the callback parameters, see the + * [function pointer type](@ref GLFWjoystickfun). + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref joystick_event + * + * @since Added in version 3.2. + * + * @ingroup input + */ +GLFWAPI GLFWjoystickfun glfwSetJoystickCallback(GLFWjoystickfun callback); + +/*! @brief Adds the specified SDL_GameControllerDB gamepad mappings. + * + * This function parses the specified ASCII encoded string and updates the + * internal list with any gamepad mappings it finds. This string may + * contain either a single gamepad mapping or many mappings separated by + * newlines. The parser supports the full format of the `gamecontrollerdb.txt` + * source file including empty lines and comments. + * + * See @ref gamepad_mapping for a description of the format. + * + * If there is already a gamepad mapping for a given GUID in the internal list, + * it will be replaced by the one passed to this function. If the library is + * terminated and re-initialized the internal list will revert to the built-in + * default. + * + * @param[in] string The string containing the gamepad mappings. + * @return `GLFW_TRUE` if successful, or `GLFW_FALSE` if an + * [error](@ref error_handling) occurred. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_INVALID_VALUE. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref gamepad + * @sa @ref glfwJoystickIsGamepad + * @sa @ref glfwGetGamepadName + * + * @since Added in version 3.3. + * + * @ingroup input + */ +GLFWAPI int glfwUpdateGamepadMappings(const char* string); + +/*! @brief Returns the human-readable gamepad name for the specified joystick. + * + * This function returns the human-readable name of the gamepad from the + * gamepad mapping assigned to the specified joystick. + * + * If the specified joystick is not present or does not have a gamepad mapping + * this function will return `NULL` but will not generate an error. Call + * @ref glfwJoystickPresent to check whether it is present regardless of + * whether it has a mapping. + * + * @param[in] jid The [joystick](@ref joysticks) to query. + * @return The UTF-8 encoded name of the gamepad, or `NULL` if the + * joystick is not present, does not have a mapping or an + * [error](@ref error_handling) occurred. + * + * @pointer_lifetime The returned string is allocated and freed by GLFW. You + * should not free it yourself. It is valid until the specified joystick is + * disconnected, the gamepad mappings are updated or the library is terminated. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref gamepad + * @sa @ref glfwJoystickIsGamepad + * + * @since Added in version 3.3. + * + * @ingroup input + */ +GLFWAPI const char* glfwGetGamepadName(int jid); + +/*! @brief Retrieves the state of the specified joystick remapped as a gamepad. + * + * This function retrieves the state of the specified joystick remapped to + * an Xbox-like gamepad. + * + * If the specified joystick is not present or does not have a gamepad mapping + * this function will return `GLFW_FALSE` but will not generate an error. Call + * @ref glfwJoystickPresent to check whether it is present regardless of + * whether it has a mapping. + * + * The Guide button may not be available for input as it is often hooked by the + * system or the Steam client. + * + * Not all devices have all the buttons or axes provided by @ref + * GLFWgamepadstate. Unavailable buttons and axes will always report + * `GLFW_RELEASE` and 0.0 respectively. + * + * @param[in] jid The [joystick](@ref joysticks) to query. + * @param[out] state The gamepad input state of the joystick. + * @return `GLFW_TRUE` if successful, or `GLFW_FALSE` if no joystick is + * connected, it has no gamepad mapping or an [error](@ref error_handling) + * occurred. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_INVALID_ENUM. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref gamepad + * @sa @ref glfwUpdateGamepadMappings + * @sa @ref glfwJoystickIsGamepad + * + * @since Added in version 3.3. + * + * @ingroup input + */ +GLFWAPI int glfwGetGamepadState(int jid, GLFWgamepadstate* state); + +/*! @brief Sets the clipboard to the specified string. + * + * This function sets the system clipboard to the specified, UTF-8 encoded + * string. + * + * @param[in] window Deprecated. Any valid window or `NULL`. + * @param[in] string A UTF-8 encoded string. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_PLATFORM_ERROR. + * + * @pointer_lifetime The specified string is copied before this function + * returns. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref clipboard + * @sa @ref glfwGetClipboardString + * + * @since Added in version 3.0. + * + * @ingroup input + */ +GLFWAPI void glfwSetClipboardString(GLFWwindow* window, const char* string); + +/*! @brief Returns the contents of the clipboard as a string. + * + * This function returns the contents of the system clipboard, if it contains + * or is convertible to a UTF-8 encoded string. If the clipboard is empty or + * if its contents cannot be converted, `NULL` is returned and a @ref + * GLFW_FORMAT_UNAVAILABLE error is generated. + * + * @param[in] window Deprecated. Any valid window or `NULL`. + * @return The contents of the clipboard as a UTF-8 encoded string, or `NULL` + * if an [error](@ref error_handling) occurred. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_PLATFORM_ERROR. + * + * @pointer_lifetime The returned string is allocated and freed by GLFW. You + * should not free it yourself. It is valid until the next call to @ref + * glfwGetClipboardString or @ref glfwSetClipboardString, or until the library + * is terminated. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref clipboard + * @sa @ref glfwSetClipboardString + * + * @since Added in version 3.0. + * + * @ingroup input + */ +GLFWAPI const char* glfwGetClipboardString(GLFWwindow* window); + +/*! @brief Returns the GLFW time. + * + * This function returns the current GLFW time, in seconds. Unless the time + * has been set using @ref glfwSetTime it measures time elapsed since GLFW was + * initialized. + * + * This function and @ref glfwSetTime are helper functions on top of @ref + * glfwGetTimerFrequency and @ref glfwGetTimerValue. + * + * The resolution of the timer is system dependent, but is usually on the order + * of a few micro- or nanoseconds. It uses the highest-resolution monotonic + * time source on each operating system. + * + * @return The current time, in seconds, or zero if an + * [error](@ref error_handling) occurred. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @thread_safety This function may be called from any thread. Reading and + * writing of the internal base time is not atomic, so it needs to be + * externally synchronized with calls to @ref glfwSetTime. + * + * @sa @ref time + * + * @since Added in version 1.0. + * + * @ingroup input + */ +GLFWAPI double glfwGetTime(void); + +/*! @brief Sets the GLFW time. + * + * This function sets the current GLFW time, in seconds. The value must be + * a positive finite number less than or equal to 18446744073.0, which is + * approximately 584.5 years. + * + * This function and @ref glfwGetTime are helper functions on top of @ref + * glfwGetTimerFrequency and @ref glfwGetTimerValue. + * + * @param[in] time The new value, in seconds. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_INVALID_VALUE. + * + * @remark The upper limit of GLFW time is calculated as + * floor((264 - 1) / 109) and is due to implementations + * storing nanoseconds in 64 bits. The limit may be increased in the future. + * + * @thread_safety This function may be called from any thread. Reading and + * writing of the internal base time is not atomic, so it needs to be + * externally synchronized with calls to @ref glfwGetTime. + * + * @sa @ref time + * + * @since Added in version 2.2. + * + * @ingroup input + */ +GLFWAPI void glfwSetTime(double time); + +/*! @brief Returns the current value of the raw timer. + * + * This function returns the current value of the raw timer, measured in + * 1 / frequency seconds. To get the frequency, call @ref + * glfwGetTimerFrequency. + * + * @return The value of the timer, or zero if an + * [error](@ref error_handling) occurred. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @thread_safety This function may be called from any thread. + * + * @sa @ref time + * @sa @ref glfwGetTimerFrequency + * + * @since Added in version 3.2. + * + * @ingroup input + */ +GLFWAPI uint64_t glfwGetTimerValue(void); + +/*! @brief Returns the frequency, in Hz, of the raw timer. + * + * This function returns the frequency, in Hz, of the raw timer. + * + * @return The frequency of the timer, in Hz, or zero if an + * [error](@ref error_handling) occurred. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @thread_safety This function may be called from any thread. + * + * @sa @ref time + * @sa @ref glfwGetTimerValue + * + * @since Added in version 3.2. + * + * @ingroup input + */ +GLFWAPI uint64_t glfwGetTimerFrequency(void); + +/*! @brief Makes the context of the specified window current for the calling + * thread. + * + * This function makes the OpenGL or OpenGL ES context of the specified window + * current on the calling thread. A context must only be made current on + * a single thread at a time and each thread can have only a single current + * context at a time. + * + * When moving a context between threads, you must make it non-current on the + * old thread before making it current on the new one. + * + * By default, making a context non-current implicitly forces a pipeline flush. + * On machines that support `GL_KHR_context_flush_control`, you can control + * whether a context performs this flush by setting the + * [GLFW_CONTEXT_RELEASE_BEHAVIOR](@ref GLFW_CONTEXT_RELEASE_BEHAVIOR_hint) + * hint. + * + * The specified window must have an OpenGL or OpenGL ES context. Specifying + * a window without a context will generate a @ref GLFW_NO_WINDOW_CONTEXT + * error. + * + * @param[in] window The window whose context to make current, or `NULL` to + * detach the current context. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref + * GLFW_NO_WINDOW_CONTEXT and @ref GLFW_PLATFORM_ERROR. + * + * @thread_safety This function may be called from any thread. + * + * @sa @ref context_current + * @sa @ref glfwGetCurrentContext + * + * @since Added in version 3.0. + * + * @ingroup context + */ +GLFWAPI void glfwMakeContextCurrent(GLFWwindow* window); + +/*! @brief Returns the window whose context is current on the calling thread. + * + * This function returns the window whose OpenGL or OpenGL ES context is + * current on the calling thread. + * + * @return The window whose context is current, or `NULL` if no window's + * context is current. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @thread_safety This function may be called from any thread. + * + * @sa @ref context_current + * @sa @ref glfwMakeContextCurrent + * + * @since Added in version 3.0. + * + * @ingroup context + */ +GLFWAPI GLFWwindow* glfwGetCurrentContext(void); + +/*! @brief Swaps the front and back buffers of the specified window. + * + * This function swaps the front and back buffers of the specified window when + * rendering with OpenGL or OpenGL ES. If the swap interval is greater than + * zero, the GPU driver waits the specified number of screen updates before + * swapping the buffers. + * + * The specified window must have an OpenGL or OpenGL ES context. Specifying + * a window without a context will generate a @ref GLFW_NO_WINDOW_CONTEXT + * error. + * + * This function does not apply to Vulkan. If you are rendering with Vulkan, + * see `vkQueuePresentKHR` instead. + * + * @param[in] window The window whose buffers to swap. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref + * GLFW_NO_WINDOW_CONTEXT and @ref GLFW_PLATFORM_ERROR. + * + * @remark __EGL:__ The context of the specified window must be current on the + * calling thread. + * + * @thread_safety This function may be called from any thread. + * + * @sa @ref buffer_swap + * @sa @ref glfwSwapInterval + * + * @since Added in version 1.0. + * @glfw3 Added window handle parameter. + * + * @ingroup window + */ +GLFWAPI void glfwSwapBuffers(GLFWwindow* window); + +/*! @brief Sets the swap interval for the current context. + * + * This function sets the swap interval for the current OpenGL or OpenGL ES + * context, i.e. the number of screen updates to wait from the time @ref + * glfwSwapBuffers was called before swapping the buffers and returning. This + * is sometimes called _vertical synchronization_, _vertical retrace + * synchronization_ or just _vsync_. + * + * A context that supports either of the `WGL_EXT_swap_control_tear` and + * `GLX_EXT_swap_control_tear` extensions also accepts _negative_ swap + * intervals, which allows the driver to swap immediately even if a frame + * arrives a little bit late. You can check for these extensions with @ref + * glfwExtensionSupported. + * + * A context must be current on the calling thread. Calling this function + * without a current context will cause a @ref GLFW_NO_CURRENT_CONTEXT error. + * + * This function does not apply to Vulkan. If you are rendering with Vulkan, + * see the present mode of your swapchain instead. + * + * @param[in] interval The minimum number of screen updates to wait for + * until the buffers are swapped by @ref glfwSwapBuffers. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref + * GLFW_NO_CURRENT_CONTEXT and @ref GLFW_PLATFORM_ERROR. + * + * @remark This function is not called during context creation, leaving the + * swap interval set to whatever is the default for that API. This is done + * because some swap interval extensions used by GLFW do not allow the swap + * interval to be reset to zero once it has been set to a non-zero value. + * + * @remark Some GPU drivers do not honor the requested swap interval, either + * because of a user setting that overrides the application's request or due to + * bugs in the driver. + * + * @thread_safety This function may be called from any thread. + * + * @sa @ref buffer_swap + * @sa @ref glfwSwapBuffers + * + * @since Added in version 1.0. + * + * @ingroup context + */ +GLFWAPI void glfwSwapInterval(int interval); + +/*! @brief Returns whether the specified extension is available. + * + * This function returns whether the specified + * [API extension](@ref context_glext) is supported by the current OpenGL or + * OpenGL ES context. It searches both for client API extension and context + * creation API extensions. + * + * A context must be current on the calling thread. Calling this function + * without a current context will cause a @ref GLFW_NO_CURRENT_CONTEXT error. + * + * As this functions retrieves and searches one or more extension strings each + * call, it is recommended that you cache its results if it is going to be used + * frequently. The extension strings will not change during the lifetime of + * a context, so there is no danger in doing this. + * + * This function does not apply to Vulkan. If you are using Vulkan, see @ref + * glfwGetRequiredInstanceExtensions, `vkEnumerateInstanceExtensionProperties` + * and `vkEnumerateDeviceExtensionProperties` instead. + * + * @param[in] extension The ASCII encoded name of the extension. + * @return `GLFW_TRUE` if the extension is available, or `GLFW_FALSE` + * otherwise. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref + * GLFW_NO_CURRENT_CONTEXT, @ref GLFW_INVALID_VALUE and @ref + * GLFW_PLATFORM_ERROR. + * + * @thread_safety This function may be called from any thread. + * + * @sa @ref context_glext + * @sa @ref glfwGetProcAddress + * + * @since Added in version 1.0. + * + * @ingroup context + */ +GLFWAPI int glfwExtensionSupported(const char* extension); + +/*! @brief Returns the address of the specified function for the current + * context. + * + * This function returns the address of the specified OpenGL or OpenGL ES + * [core or extension function](@ref context_glext), if it is supported + * by the current context. + * + * A context must be current on the calling thread. Calling this function + * without a current context will cause a @ref GLFW_NO_CURRENT_CONTEXT error. + * + * This function does not apply to Vulkan. If you are rendering with Vulkan, + * see @ref glfwGetInstanceProcAddress, `vkGetInstanceProcAddr` and + * `vkGetDeviceProcAddr` instead. + * + * @param[in] procname The ASCII encoded name of the function. + * @return The address of the function, or `NULL` if an + * [error](@ref error_handling) occurred. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref + * GLFW_NO_CURRENT_CONTEXT and @ref GLFW_PLATFORM_ERROR. + * + * @remark The address of a given function is not guaranteed to be the same + * between contexts. + * + * @remark This function may return a non-`NULL` address despite the + * associated version or extension not being available. Always check the + * context version or extension string first. + * + * @pointer_lifetime The returned function pointer is valid until the context + * is destroyed or the library is terminated. + * + * @thread_safety This function may be called from any thread. + * + * @sa @ref context_glext + * @sa @ref glfwExtensionSupported + * + * @since Added in version 1.0. + * + * @ingroup context + */ +GLFWAPI GLFWglproc glfwGetProcAddress(const char* procname); + +/*! @brief Returns whether the Vulkan loader and an ICD have been found. + * + * This function returns whether the Vulkan loader and any minimally functional + * ICD have been found. + * + * The availability of a Vulkan loader and even an ICD does not by itself guarantee that + * surface creation or even instance creation is possible. Call @ref + * glfwGetRequiredInstanceExtensions to check whether the extensions necessary for Vulkan + * surface creation are available and @ref glfwGetPhysicalDevicePresentationSupport to + * check whether a queue family of a physical device supports image presentation. + * + * @return `GLFW_TRUE` if Vulkan is minimally available, or `GLFW_FALSE` + * otherwise. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @thread_safety This function may be called from any thread. + * + * @sa @ref vulkan_support + * + * @since Added in version 3.2. + * + * @ingroup vulkan + */ +GLFWAPI int glfwVulkanSupported(void); + +/*! @brief Returns the Vulkan instance extensions required by GLFW. + * + * This function returns an array of names of Vulkan instance extensions required + * by GLFW for creating Vulkan surfaces for GLFW windows. If successful, the + * list will always contain `VK_KHR_surface`, so if you don't require any + * additional extensions you can pass this list directly to the + * `VkInstanceCreateInfo` struct. + * + * If Vulkan is not available on the machine, this function returns `NULL` and + * generates a @ref GLFW_API_UNAVAILABLE error. Call @ref glfwVulkanSupported + * to check whether Vulkan is at least minimally available. + * + * If Vulkan is available but no set of extensions allowing window surface + * creation was found, this function returns `NULL`. You may still use Vulkan + * for off-screen rendering and compute work. + * + * @param[out] count Where to store the number of extensions in the returned + * array. This is set to zero if an error occurred. + * @return An array of ASCII encoded extension names, or `NULL` if an + * [error](@ref error_handling) occurred. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_API_UNAVAILABLE. + * + * @remark Additional extensions may be required by future versions of GLFW. + * You should check if any extensions you wish to enable are already in the + * returned array, as it is an error to specify an extension more than once in + * the `VkInstanceCreateInfo` struct. + * + * @remark @macos GLFW currently supports both the `VK_MVK_macos_surface` and + * the newer `VK_EXT_metal_surface` extensions. + * + * @pointer_lifetime The returned array is allocated and freed by GLFW. You + * should not free it yourself. It is guaranteed to be valid only until the + * library is terminated. + * + * @thread_safety This function may be called from any thread. + * + * @sa @ref vulkan_ext + * @sa @ref glfwCreateWindowSurface + * + * @since Added in version 3.2. + * + * @ingroup vulkan + */ +GLFWAPI const char** glfwGetRequiredInstanceExtensions(uint32_t* count); + +#if defined(VK_VERSION_1_0) + +/*! @brief Returns the address of the specified Vulkan instance function. + * + * This function returns the address of the specified Vulkan core or extension + * function for the specified instance. If instance is set to `NULL` it can + * return any function exported from the Vulkan loader, including at least the + * following functions: + * + * - `vkEnumerateInstanceExtensionProperties` + * - `vkEnumerateInstanceLayerProperties` + * - `vkCreateInstance` + * - `vkGetInstanceProcAddr` + * + * If Vulkan is not available on the machine, this function returns `NULL` and + * generates a @ref GLFW_API_UNAVAILABLE error. Call @ref glfwVulkanSupported + * to check whether Vulkan is at least minimally available. + * + * This function is equivalent to calling `vkGetInstanceProcAddr` with + * a platform-specific query of the Vulkan loader as a fallback. + * + * @param[in] instance The Vulkan instance to query, or `NULL` to retrieve + * functions related to instance creation. + * @param[in] procname The ASCII encoded name of the function. + * @return The address of the function, or `NULL` if an + * [error](@ref error_handling) occurred. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_API_UNAVAILABLE. + * + * @pointer_lifetime The returned function pointer is valid until the library + * is terminated. + * + * @thread_safety This function may be called from any thread. + * + * @sa @ref vulkan_proc + * + * @since Added in version 3.2. + * + * @ingroup vulkan + */ +GLFWAPI GLFWvkproc glfwGetInstanceProcAddress(VkInstance instance, const char* procname); + +/*! @brief Returns whether the specified queue family can present images. + * + * This function returns whether the specified queue family of the specified + * physical device supports presentation to the platform GLFW was built for. + * + * If Vulkan or the required window surface creation instance extensions are + * not available on the machine, or if the specified instance was not created + * with the required extensions, this function returns `GLFW_FALSE` and + * generates a @ref GLFW_API_UNAVAILABLE error. Call @ref glfwVulkanSupported + * to check whether Vulkan is at least minimally available and @ref + * glfwGetRequiredInstanceExtensions to check what instance extensions are + * required. + * + * @param[in] instance The instance that the physical device belongs to. + * @param[in] device The physical device that the queue family belongs to. + * @param[in] queuefamily The index of the queue family to query. + * @return `GLFW_TRUE` if the queue family supports presentation, or + * `GLFW_FALSE` otherwise. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref + * GLFW_API_UNAVAILABLE and @ref GLFW_PLATFORM_ERROR. + * + * @remark @macos This function currently always returns `GLFW_TRUE`, as the + * `VK_MVK_macos_surface` and `VK_EXT_metal_surface` extensions do not provide + * a `vkGetPhysicalDevice*PresentationSupport` type function. + * + * @thread_safety This function may be called from any thread. For + * synchronization details of Vulkan objects, see the Vulkan specification. + * + * @sa @ref vulkan_present + * + * @since Added in version 3.2. + * + * @ingroup vulkan + */ +GLFWAPI int glfwGetPhysicalDevicePresentationSupport(VkInstance instance, VkPhysicalDevice device, uint32_t queuefamily); + +/*! @brief Creates a Vulkan surface for the specified window. + * + * This function creates a Vulkan surface for the specified window. + * + * If the Vulkan loader or at least one minimally functional ICD were not found, + * this function returns `VK_ERROR_INITIALIZATION_FAILED` and generates a @ref + * GLFW_API_UNAVAILABLE error. Call @ref glfwVulkanSupported to check whether + * Vulkan is at least minimally available. + * + * If the required window surface creation instance extensions are not + * available or if the specified instance was not created with these extensions + * enabled, this function returns `VK_ERROR_EXTENSION_NOT_PRESENT` and + * generates a @ref GLFW_API_UNAVAILABLE error. Call @ref + * glfwGetRequiredInstanceExtensions to check what instance extensions are + * required. + * + * The window surface cannot be shared with another API so the window must + * have been created with the [client api hint](@ref GLFW_CLIENT_API_attrib) + * set to `GLFW_NO_API` otherwise it generates a @ref GLFW_INVALID_VALUE error + * and returns `VK_ERROR_NATIVE_WINDOW_IN_USE_KHR`. + * + * The window surface must be destroyed before the specified Vulkan instance. + * It is the responsibility of the caller to destroy the window surface. GLFW + * does not destroy it for you. Call `vkDestroySurfaceKHR` to destroy the + * surface. + * + * @param[in] instance The Vulkan instance to create the surface in. + * @param[in] window The window to create the surface for. + * @param[in] allocator The allocator to use, or `NULL` to use the default + * allocator. + * @param[out] surface Where to store the handle of the surface. This is set + * to `VK_NULL_HANDLE` if an error occurred. + * @return `VK_SUCCESS` if successful, or a Vulkan error code if an + * [error](@ref error_handling) occurred. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref + * GLFW_API_UNAVAILABLE, @ref GLFW_PLATFORM_ERROR and @ref GLFW_INVALID_VALUE + * + * @remark If an error occurs before the creation call is made, GLFW returns + * the Vulkan error code most appropriate for the error. Appropriate use of + * @ref glfwVulkanSupported and @ref glfwGetRequiredInstanceExtensions should + * eliminate almost all occurrences of these errors. + * + * @remark @macos This function currently only supports the + * `VK_MVK_macos_surface` extension from MoltenVK. + * + * @remark @macos This function creates and sets a `CAMetalLayer` instance for + * the window content view, which is required for MoltenVK to function. + * + * @remark @x11 GLFW by default attempts to use the `VK_KHR_xcb_surface` + * extension, if available. You can make it prefer the `VK_KHR_xlib_surface` + * extension by setting the + * [GLFW_X11_XCB_VULKAN_SURFACE](@ref GLFW_X11_XCB_VULKAN_SURFACE_hint) init + * hint. + * + * @thread_safety This function may be called from any thread. For + * synchronization details of Vulkan objects, see the Vulkan specification. + * + * @sa @ref vulkan_surface + * @sa @ref glfwGetRequiredInstanceExtensions + * + * @since Added in version 3.2. + * + * @ingroup vulkan + */ +GLFWAPI VkResult glfwCreateWindowSurface(VkInstance instance, GLFWwindow* window, const VkAllocationCallbacks* allocator, VkSurfaceKHR* surface); + +#endif /*VK_VERSION_1_0*/ + + +/************************************************************************* + * Global definition cleanup + *************************************************************************/ + +/* ------------------- BEGIN SYSTEM/COMPILER SPECIFIC -------------------- */ + +#ifdef GLFW_WINGDIAPI_DEFINED + #undef WINGDIAPI + #undef GLFW_WINGDIAPI_DEFINED +#endif + +#ifdef GLFW_CALLBACK_DEFINED + #undef CALLBACK + #undef GLFW_CALLBACK_DEFINED +#endif + +/* Some OpenGL related headers need GLAPIENTRY, but it is unconditionally + * defined by some gl.h variants (OpenBSD) so define it after if needed. + */ +#ifndef GLAPIENTRY + #define GLAPIENTRY APIENTRY +#endif + +/* -------------------- END SYSTEM/COMPILER SPECIFIC --------------------- */ + + +#ifdef __cplusplus +} +#endif + +#endif /* _glfw3_h_ */ + diff --git a/local_packages/include/GLFW/glfw3native.h b/local_packages/include/GLFW/glfw3native.h new file mode 100644 index 0000000..41b2f86 --- /dev/null +++ b/local_packages/include/GLFW/glfw3native.h @@ -0,0 +1,594 @@ +/************************************************************************* + * GLFW 3.4 - www.glfw.org + * A library for OpenGL, window and input + *------------------------------------------------------------------------ + * Copyright (c) 2002-2006 Marcus Geelnard + * Copyright (c) 2006-2018 Camilla Löwy + * + * This software is provided 'as-is', without any express or implied + * warranty. In no event will the authors be held liable for any damages + * arising from the use of this software. + * + * Permission is granted to anyone to use this software for any purpose, + * including commercial applications, and to alter it and redistribute it + * freely, subject to the following restrictions: + * + * 1. The origin of this software must not be misrepresented; you must not + * claim that you wrote the original software. If you use this software + * in a product, an acknowledgment in the product documentation would + * be appreciated but is not required. + * + * 2. Altered source versions must be plainly marked as such, and must not + * be misrepresented as being the original software. + * + * 3. This notice may not be removed or altered from any source + * distribution. + * + *************************************************************************/ + +#ifndef _glfw3_native_h_ +#define _glfw3_native_h_ + +#ifdef __cplusplus +extern "C" { +#endif + + +/************************************************************************* + * Doxygen documentation + *************************************************************************/ + +/*! @file glfw3native.h + * @brief The header of the native access functions. + * + * This is the header file of the native access functions. See @ref native for + * more information. + */ +/*! @defgroup native Native access + * @brief Functions related to accessing native handles. + * + * **By using the native access functions you assert that you know what you're + * doing and how to fix problems caused by using them. If you don't, you + * shouldn't be using them.** + * + * Before the inclusion of @ref glfw3native.h, you may define zero or more + * window system API macro and zero or more context creation API macros. + * + * The chosen backends must match those the library was compiled for. Failure + * to do this will cause a link-time error. + * + * The available window API macros are: + * * `GLFW_EXPOSE_NATIVE_WIN32` + * * `GLFW_EXPOSE_NATIVE_COCOA` + * * `GLFW_EXPOSE_NATIVE_X11` + * * `GLFW_EXPOSE_NATIVE_WAYLAND` + * + * The available context API macros are: + * * `GLFW_EXPOSE_NATIVE_WGL` + * * `GLFW_EXPOSE_NATIVE_NSGL` + * * `GLFW_EXPOSE_NATIVE_GLX` + * * `GLFW_EXPOSE_NATIVE_EGL` + * * `GLFW_EXPOSE_NATIVE_OSMESA` + * + * These macros select which of the native access functions that are declared + * and which platform-specific headers to include. It is then up your (by + * definition platform-specific) code to handle which of these should be + * defined. + */ + + +/************************************************************************* + * System headers and types + *************************************************************************/ + +#if defined(GLFW_EXPOSE_NATIVE_WIN32) || defined(GLFW_EXPOSE_NATIVE_WGL) + // This is a workaround for the fact that glfw3.h needs to export APIENTRY (for + // example to allow applications to correctly declare a GL_KHR_debug callback) + // but windows.h assumes no one will define APIENTRY before it does + #if defined(GLFW_APIENTRY_DEFINED) + #undef APIENTRY + #undef GLFW_APIENTRY_DEFINED + #endif + #include +#elif defined(GLFW_EXPOSE_NATIVE_COCOA) || defined(GLFW_EXPOSE_NATIVE_NSGL) + #if defined(__OBJC__) + #import + #else + #include + typedef void* id; + #endif +#elif defined(GLFW_EXPOSE_NATIVE_X11) || defined(GLFW_EXPOSE_NATIVE_GLX) + #include + #include +#elif defined(GLFW_EXPOSE_NATIVE_WAYLAND) + #include +#endif + +#if defined(GLFW_EXPOSE_NATIVE_WGL) + /* WGL is declared by windows.h */ +#endif +#if defined(GLFW_EXPOSE_NATIVE_NSGL) + /* NSGL is declared by Cocoa.h */ +#endif +#if defined(GLFW_EXPOSE_NATIVE_GLX) + #include +#endif +#if defined(GLFW_EXPOSE_NATIVE_EGL) + #include +#endif +#if defined(GLFW_EXPOSE_NATIVE_OSMESA) + #include +#endif + + +/************************************************************************* + * Functions + *************************************************************************/ + +#if defined(GLFW_EXPOSE_NATIVE_WIN32) +/*! @brief Returns the adapter device name of the specified monitor. + * + * @return The UTF-8 encoded adapter device name (for example `\\.\DISPLAY1`) + * of the specified monitor, or `NULL` if an [error](@ref error_handling) + * occurred. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @thread_safety This function may be called from any thread. Access is not + * synchronized. + * + * @since Added in version 3.1. + * + * @ingroup native + */ +GLFWAPI const char* glfwGetWin32Adapter(GLFWmonitor* monitor); + +/*! @brief Returns the display device name of the specified monitor. + * + * @return The UTF-8 encoded display device name (for example + * `\\.\DISPLAY1\Monitor0`) of the specified monitor, or `NULL` if an + * [error](@ref error_handling) occurred. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @thread_safety This function may be called from any thread. Access is not + * synchronized. + * + * @since Added in version 3.1. + * + * @ingroup native + */ +GLFWAPI const char* glfwGetWin32Monitor(GLFWmonitor* monitor); + +/*! @brief Returns the `HWND` of the specified window. + * + * @return The `HWND` of the specified window, or `NULL` if an + * [error](@ref error_handling) occurred. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @remark The `HDC` associated with the window can be queried with the + * [GetDC](https://docs.microsoft.com/en-us/windows/win32/api/winuser/nf-winuser-getdc) + * function. + * @code + * HDC dc = GetDC(glfwGetWin32Window(window)); + * @endcode + * This DC is private and does not need to be released. + * + * @thread_safety This function may be called from any thread. Access is not + * synchronized. + * + * @since Added in version 3.0. + * + * @ingroup native + */ +GLFWAPI HWND glfwGetWin32Window(GLFWwindow* window); +#endif + +#if defined(GLFW_EXPOSE_NATIVE_WGL) +/*! @brief Returns the `HGLRC` of the specified window. + * + * @return The `HGLRC` of the specified window, or `NULL` if an + * [error](@ref error_handling) occurred. + * + * @errors Possible errors include @ref GLFW_NO_WINDOW_CONTEXT and @ref + * GLFW_NOT_INITIALIZED. + * + * @remark The `HDC` associated with the window can be queried with the + * [GetDC](https://docs.microsoft.com/en-us/windows/win32/api/winuser/nf-winuser-getdc) + * function. + * @code + * HDC dc = GetDC(glfwGetWin32Window(window)); + * @endcode + * This DC is private and does not need to be released. + * + * @thread_safety This function may be called from any thread. Access is not + * synchronized. + * + * @since Added in version 3.0. + * + * @ingroup native + */ +GLFWAPI HGLRC glfwGetWGLContext(GLFWwindow* window); +#endif + +#if defined(GLFW_EXPOSE_NATIVE_COCOA) +/*! @brief Returns the `CGDirectDisplayID` of the specified monitor. + * + * @return The `CGDirectDisplayID` of the specified monitor, or + * `kCGNullDirectDisplay` if an [error](@ref error_handling) occurred. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @thread_safety This function may be called from any thread. Access is not + * synchronized. + * + * @since Added in version 3.1. + * + * @ingroup native + */ +GLFWAPI CGDirectDisplayID glfwGetCocoaMonitor(GLFWmonitor* monitor); + +/*! @brief Returns the `NSWindow` of the specified window. + * + * @return The `NSWindow` of the specified window, or `nil` if an + * [error](@ref error_handling) occurred. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @thread_safety This function may be called from any thread. Access is not + * synchronized. + * + * @since Added in version 3.0. + * + * @ingroup native + */ +GLFWAPI id glfwGetCocoaWindow(GLFWwindow* window); +#endif + +#if defined(GLFW_EXPOSE_NATIVE_NSGL) +/*! @brief Returns the `NSOpenGLContext` of the specified window. + * + * @return The `NSOpenGLContext` of the specified window, or `nil` if an + * [error](@ref error_handling) occurred. + * + * @errors Possible errors include @ref GLFW_NO_WINDOW_CONTEXT and @ref + * GLFW_NOT_INITIALIZED. + * + * @thread_safety This function may be called from any thread. Access is not + * synchronized. + * + * @since Added in version 3.0. + * + * @ingroup native + */ +GLFWAPI id glfwGetNSGLContext(GLFWwindow* window); +#endif + +#if defined(GLFW_EXPOSE_NATIVE_X11) +/*! @brief Returns the `Display` used by GLFW. + * + * @return The `Display` used by GLFW, or `NULL` if an + * [error](@ref error_handling) occurred. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @thread_safety This function may be called from any thread. Access is not + * synchronized. + * + * @since Added in version 3.0. + * + * @ingroup native + */ +GLFWAPI Display* glfwGetX11Display(void); + +/*! @brief Returns the `RRCrtc` of the specified monitor. + * + * @return The `RRCrtc` of the specified monitor, or `None` if an + * [error](@ref error_handling) occurred. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @thread_safety This function may be called from any thread. Access is not + * synchronized. + * + * @since Added in version 3.1. + * + * @ingroup native + */ +GLFWAPI RRCrtc glfwGetX11Adapter(GLFWmonitor* monitor); + +/*! @brief Returns the `RROutput` of the specified monitor. + * + * @return The `RROutput` of the specified monitor, or `None` if an + * [error](@ref error_handling) occurred. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @thread_safety This function may be called from any thread. Access is not + * synchronized. + * + * @since Added in version 3.1. + * + * @ingroup native + */ +GLFWAPI RROutput glfwGetX11Monitor(GLFWmonitor* monitor); + +/*! @brief Returns the `Window` of the specified window. + * + * @return The `Window` of the specified window, or `None` if an + * [error](@ref error_handling) occurred. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @thread_safety This function may be called from any thread. Access is not + * synchronized. + * + * @since Added in version 3.0. + * + * @ingroup native + */ +GLFWAPI Window glfwGetX11Window(GLFWwindow* window); + +/*! @brief Sets the current primary selection to the specified string. + * + * @param[in] string A UTF-8 encoded string. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_PLATFORM_ERROR. + * + * @pointer_lifetime The specified string is copied before this function + * returns. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref clipboard + * @sa glfwGetX11SelectionString + * @sa glfwSetClipboardString + * + * @since Added in version 3.3. + * + * @ingroup native + */ +GLFWAPI void glfwSetX11SelectionString(const char* string); + +/*! @brief Returns the contents of the current primary selection as a string. + * + * If the selection is empty or if its contents cannot be converted, `NULL` + * is returned and a @ref GLFW_FORMAT_UNAVAILABLE error is generated. + * + * @return The contents of the selection as a UTF-8 encoded string, or `NULL` + * if an [error](@ref error_handling) occurred. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_PLATFORM_ERROR. + * + * @pointer_lifetime The returned string is allocated and freed by GLFW. You + * should not free it yourself. It is valid until the next call to @ref + * glfwGetX11SelectionString or @ref glfwSetX11SelectionString, or until the + * library is terminated. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref clipboard + * @sa glfwSetX11SelectionString + * @sa glfwGetClipboardString + * + * @since Added in version 3.3. + * + * @ingroup native + */ +GLFWAPI const char* glfwGetX11SelectionString(void); +#endif + +#if defined(GLFW_EXPOSE_NATIVE_GLX) +/*! @brief Returns the `GLXContext` of the specified window. + * + * @return The `GLXContext` of the specified window, or `NULL` if an + * [error](@ref error_handling) occurred. + * + * @errors Possible errors include @ref GLFW_NO_WINDOW_CONTEXT and @ref + * GLFW_NOT_INITIALIZED. + * + * @thread_safety This function may be called from any thread. Access is not + * synchronized. + * + * @since Added in version 3.0. + * + * @ingroup native + */ +GLFWAPI GLXContext glfwGetGLXContext(GLFWwindow* window); + +/*! @brief Returns the `GLXWindow` of the specified window. + * + * @return The `GLXWindow` of the specified window, or `None` if an + * [error](@ref error_handling) occurred. + * + * @errors Possible errors include @ref GLFW_NO_WINDOW_CONTEXT and @ref + * GLFW_NOT_INITIALIZED. + * + * @thread_safety This function may be called from any thread. Access is not + * synchronized. + * + * @since Added in version 3.2. + * + * @ingroup native + */ +GLFWAPI GLXWindow glfwGetGLXWindow(GLFWwindow* window); +#endif + +#if defined(GLFW_EXPOSE_NATIVE_WAYLAND) +/*! @brief Returns the `struct wl_display*` used by GLFW. + * + * @return The `struct wl_display*` used by GLFW, or `NULL` if an + * [error](@ref error_handling) occurred. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @thread_safety This function may be called from any thread. Access is not + * synchronized. + * + * @since Added in version 3.2. + * + * @ingroup native + */ +GLFWAPI struct wl_display* glfwGetWaylandDisplay(void); + +/*! @brief Returns the `struct wl_output*` of the specified monitor. + * + * @return The `struct wl_output*` of the specified monitor, or `NULL` if an + * [error](@ref error_handling) occurred. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @thread_safety This function may be called from any thread. Access is not + * synchronized. + * + * @since Added in version 3.2. + * + * @ingroup native + */ +GLFWAPI struct wl_output* glfwGetWaylandMonitor(GLFWmonitor* monitor); + +/*! @brief Returns the main `struct wl_surface*` of the specified window. + * + * @return The main `struct wl_surface*` of the specified window, or `NULL` if + * an [error](@ref error_handling) occurred. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @thread_safety This function may be called from any thread. Access is not + * synchronized. + * + * @since Added in version 3.2. + * + * @ingroup native + */ +GLFWAPI struct wl_surface* glfwGetWaylandWindow(GLFWwindow* window); +#endif + +#if defined(GLFW_EXPOSE_NATIVE_EGL) +/*! @brief Returns the `EGLDisplay` used by GLFW. + * + * @return The `EGLDisplay` used by GLFW, or `EGL_NO_DISPLAY` if an + * [error](@ref error_handling) occurred. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @thread_safety This function may be called from any thread. Access is not + * synchronized. + * + * @since Added in version 3.0. + * + * @ingroup native + */ +GLFWAPI EGLDisplay glfwGetEGLDisplay(void); + +/*! @brief Returns the `EGLContext` of the specified window. + * + * @return The `EGLContext` of the specified window, or `EGL_NO_CONTEXT` if an + * [error](@ref error_handling) occurred. + * + * @errors Possible errors include @ref GLFW_NO_WINDOW_CONTEXT and @ref + * GLFW_NOT_INITIALIZED. + * + * @thread_safety This function may be called from any thread. Access is not + * synchronized. + * + * @since Added in version 3.0. + * + * @ingroup native + */ +GLFWAPI EGLContext glfwGetEGLContext(GLFWwindow* window); + +/*! @brief Returns the `EGLSurface` of the specified window. + * + * @return The `EGLSurface` of the specified window, or `EGL_NO_SURFACE` if an + * [error](@ref error_handling) occurred. + * + * @errors Possible errors include @ref GLFW_NO_WINDOW_CONTEXT and @ref + * GLFW_NOT_INITIALIZED. + * + * @thread_safety This function may be called from any thread. Access is not + * synchronized. + * + * @since Added in version 3.0. + * + * @ingroup native + */ +GLFWAPI EGLSurface glfwGetEGLSurface(GLFWwindow* window); +#endif + +#if defined(GLFW_EXPOSE_NATIVE_OSMESA) +/*! @brief Retrieves the color buffer associated with the specified window. + * + * @param[in] window The window whose color buffer to retrieve. + * @param[out] width Where to store the width of the color buffer, or `NULL`. + * @param[out] height Where to store the height of the color buffer, or `NULL`. + * @param[out] format Where to store the OSMesa pixel format of the color + * buffer, or `NULL`. + * @param[out] buffer Where to store the address of the color buffer, or + * `NULL`. + * @return `GLFW_TRUE` if successful, or `GLFW_FALSE` if an + * [error](@ref error_handling) occurred. + * + * @errors Possible errors include @ref GLFW_NO_WINDOW_CONTEXT and @ref + * GLFW_NOT_INITIALIZED. + * + * @thread_safety This function may be called from any thread. Access is not + * synchronized. + * + * @since Added in version 3.3. + * + * @ingroup native + */ +GLFWAPI int glfwGetOSMesaColorBuffer(GLFWwindow* window, int* width, int* height, int* format, void** buffer); + +/*! @brief Retrieves the depth buffer associated with the specified window. + * + * @param[in] window The window whose depth buffer to retrieve. + * @param[out] width Where to store the width of the depth buffer, or `NULL`. + * @param[out] height Where to store the height of the depth buffer, or `NULL`. + * @param[out] bytesPerValue Where to store the number of bytes per depth + * buffer element, or `NULL`. + * @param[out] buffer Where to store the address of the depth buffer, or + * `NULL`. + * @return `GLFW_TRUE` if successful, or `GLFW_FALSE` if an + * [error](@ref error_handling) occurred. + * + * @errors Possible errors include @ref GLFW_NO_WINDOW_CONTEXT and @ref + * GLFW_NOT_INITIALIZED. + * + * @thread_safety This function may be called from any thread. Access is not + * synchronized. + * + * @since Added in version 3.3. + * + * @ingroup native + */ +GLFWAPI int glfwGetOSMesaDepthBuffer(GLFWwindow* window, int* width, int* height, int* bytesPerValue, void** buffer); + +/*! @brief Returns the `OSMesaContext` of the specified window. + * + * @return The `OSMesaContext` of the specified window, or `NULL` if an + * [error](@ref error_handling) occurred. + * + * @errors Possible errors include @ref GLFW_NO_WINDOW_CONTEXT and @ref + * GLFW_NOT_INITIALIZED. + * + * @thread_safety This function may be called from any thread. Access is not + * synchronized. + * + * @since Added in version 3.3. + * + * @ingroup native + */ +GLFWAPI OSMesaContext glfwGetOSMesaContext(GLFWwindow* window); +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* _glfw3_native_h_ */ + diff --git a/local_packages/include/spirv-tools/instrument.hpp b/local_packages/include/spirv-tools/instrument.hpp new file mode 100644 index 0000000..a75561b --- /dev/null +++ b/local_packages/include/spirv-tools/instrument.hpp @@ -0,0 +1,268 @@ +// Copyright (c) 2018 The Khronos Group Inc. +// Copyright (c) 2018 Valve Corporation +// Copyright (c) 2018 LunarG Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef INCLUDE_SPIRV_TOOLS_INSTRUMENT_HPP_ +#define INCLUDE_SPIRV_TOOLS_INSTRUMENT_HPP_ + +// Shader Instrumentation Interface +// +// This file provides an external interface for applications that wish to +// communicate with shaders instrumented by passes created by: +// +// CreateInstBindlessCheckPass +// CreateInstBuffAddrCheckPass +// CreateInstDebugPrintfPass +// +// More detailed documentation of these routines can be found in optimizer.hpp + +namespace spvtools { + +// Stream Output Buffer Offsets +// +// The following values provide offsets into the output buffer struct +// generated by InstrumentPass::GenDebugStreamWrite. This method is utilized +// by InstBindlessCheckPass, InstBuffAddrCheckPass, and InstDebugPrintfPass. +// +// The 1st member of the debug output buffer contains a set of flags +// controlling the behavior of instrumentation code. +static const int kDebugOutputFlagsOffset = 0; + +// Values stored at kDebugOutputFlagsOffset +enum kInstFlags : unsigned int { + kInstBufferOOBEnable = 0x1, +}; + +// The 2nd member of the debug output buffer contains the next available word +// in the data stream to be written. Shaders will atomically read and update +// this value so as not to overwrite each others records. This value must be +// initialized to zero +static const int kDebugOutputSizeOffset = 1; + +// The 3rd member of the output buffer is the start of the stream of records +// written by the instrumented shaders. Each record represents a validation +// error. The format of the records is documented below. +static const int kDebugOutputDataOffset = 2; + +// Common Stream Record Offsets +// +// The following are offsets to fields which are common to all records written +// to the output stream. +// +// Each record first contains the size of the record in 32-bit words, including +// the size word. +static const int kInstCommonOutSize = 0; + +// This is the shader id passed by the layer when the instrumentation pass is +// created. +static const int kInstCommonOutShaderId = 1; + +// This is the ordinal position of the instruction within the SPIR-V shader +// which generated the validation error. +static const int kInstCommonOutInstructionIdx = 2; + +// This is the stage which generated the validation error. This word is used +// to determine the contents of the next two words in the record. +// 0:Vert, 1:TessCtrl, 2:TessEval, 3:Geom, 4:Frag, 5:Compute +static const int kInstCommonOutStageIdx = 3; +static const int kInstCommonOutCnt = 4; + +// Stage-specific Stream Record Offsets +// +// Each stage will contain different values in the next set of words of the +// record used to identify which instantiation of the shader generated the +// validation error. +// +// Vertex Shader Output Record Offsets +static const int kInstVertOutVertexIndex = kInstCommonOutCnt; +static const int kInstVertOutInstanceIndex = kInstCommonOutCnt + 1; +static const int kInstVertOutUnused = kInstCommonOutCnt + 2; + +// Frag Shader Output Record Offsets +static const int kInstFragOutFragCoordX = kInstCommonOutCnt; +static const int kInstFragOutFragCoordY = kInstCommonOutCnt + 1; +static const int kInstFragOutUnused = kInstCommonOutCnt + 2; + +// Compute Shader Output Record Offsets +static const int kInstCompOutGlobalInvocationIdX = kInstCommonOutCnt; +static const int kInstCompOutGlobalInvocationIdY = kInstCommonOutCnt + 1; +static const int kInstCompOutGlobalInvocationIdZ = kInstCommonOutCnt + 2; + +// Tessellation Control Shader Output Record Offsets +static const int kInstTessCtlOutInvocationId = kInstCommonOutCnt; +static const int kInstTessCtlOutPrimitiveId = kInstCommonOutCnt + 1; +static const int kInstTessCtlOutUnused = kInstCommonOutCnt + 2; + +// Tessellation Eval Shader Output Record Offsets +static const int kInstTessEvalOutPrimitiveId = kInstCommonOutCnt; +static const int kInstTessEvalOutTessCoordU = kInstCommonOutCnt + 1; +static const int kInstTessEvalOutTessCoordV = kInstCommonOutCnt + 2; + +// Geometry Shader Output Record Offsets +static const int kInstGeomOutPrimitiveId = kInstCommonOutCnt; +static const int kInstGeomOutInvocationId = kInstCommonOutCnt + 1; +static const int kInstGeomOutUnused = kInstCommonOutCnt + 2; + +// Ray Tracing Shader Output Record Offsets +static const int kInstRayTracingOutLaunchIdX = kInstCommonOutCnt; +static const int kInstRayTracingOutLaunchIdY = kInstCommonOutCnt + 1; +static const int kInstRayTracingOutLaunchIdZ = kInstCommonOutCnt + 2; + +// Mesh Shader Output Record Offsets +static const int kInstMeshOutGlobalInvocationIdX = kInstCommonOutCnt; +static const int kInstMeshOutGlobalInvocationIdY = kInstCommonOutCnt + 1; +static const int kInstMeshOutGlobalInvocationIdZ = kInstCommonOutCnt + 2; + +// Task Shader Output Record Offsets +static const int kInstTaskOutGlobalInvocationIdX = kInstCommonOutCnt; +static const int kInstTaskOutGlobalInvocationIdY = kInstCommonOutCnt + 1; +static const int kInstTaskOutGlobalInvocationIdZ = kInstCommonOutCnt + 2; + +// Size of Common and Stage-specific Members +static const int kInstStageOutCnt = kInstCommonOutCnt + 3; + +// Validation Error Code Offset +// +// This identifies the validation error. It also helps to identify +// how many words follow in the record and their meaning. +static const int kInstValidationOutError = kInstStageOutCnt; + +// Validation-specific Output Record Offsets +// +// Each different validation will generate a potentially different +// number of words at the end of the record giving more specifics +// about the validation error. +// +// A bindless bounds error will output the index and the bound. +static const int kInstBindlessBoundsOutDescIndex = kInstStageOutCnt + 1; +static const int kInstBindlessBoundsOutDescBound = kInstStageOutCnt + 2; +static const int kInstBindlessBoundsOutUnused = kInstStageOutCnt + 3; +static const int kInstBindlessBoundsOutCnt = kInstStageOutCnt + 4; + +// A descriptor uninitialized error will output the index. +static const int kInstBindlessUninitOutDescIndex = kInstStageOutCnt + 1; +static const int kInstBindlessUninitOutUnused = kInstStageOutCnt + 2; +static const int kInstBindlessUninitOutUnused2 = kInstStageOutCnt + 3; +static const int kInstBindlessUninitOutCnt = kInstStageOutCnt + 4; + +// A buffer out-of-bounds error will output the descriptor +// index, the buffer offset and the buffer size +static const int kInstBindlessBuffOOBOutDescIndex = kInstStageOutCnt + 1; +static const int kInstBindlessBuffOOBOutBuffOff = kInstStageOutCnt + 2; +static const int kInstBindlessBuffOOBOutBuffSize = kInstStageOutCnt + 3; +static const int kInstBindlessBuffOOBOutCnt = kInstStageOutCnt + 4; + +// A buffer address unalloc error will output the 64-bit pointer in +// two 32-bit pieces, lower bits first. +static const int kInstBuffAddrUnallocOutDescPtrLo = kInstStageOutCnt + 1; +static const int kInstBuffAddrUnallocOutDescPtrHi = kInstStageOutCnt + 2; +static const int kInstBuffAddrUnallocOutCnt = kInstStageOutCnt + 3; + +// Maximum Output Record Member Count +static const int kInstMaxOutCnt = kInstStageOutCnt + 4; + +// Validation Error Codes +// +// These are the possible validation error codes. +static const int kInstErrorBindlessBounds = 0; +static const int kInstErrorBindlessUninit = 1; +static const int kInstErrorBuffAddrUnallocRef = 2; +// Deleted: static const int kInstErrorBindlessBuffOOB = 3; +// This comment will will remain for 2 releases to allow +// for the transition of all builds. Buffer OOB is +// generating the following four differentiated codes instead: +static const int kInstErrorBuffOOBUniform = 4; +static const int kInstErrorBuffOOBStorage = 5; +static const int kInstErrorBuffOOBUniformTexel = 6; +static const int kInstErrorBuffOOBStorageTexel = 7; +static const int kInstErrorMax = kInstErrorBuffOOBStorageTexel; + +// Direct Input Buffer Offsets +// +// The following values provide member offsets into the input buffers +// consumed by InstrumentPass::GenDebugDirectRead(). This method is utilized +// by InstBindlessCheckPass. +// +// The only object in an input buffer is a runtime array of unsigned +// integers. Each validation will have its own formatting of this array. +static const int kDebugInputDataOffset = 0; + +// Debug Buffer Bindings +// +// These are the bindings for the different buffers which are +// read or written by the instrumentation passes. +// +// This is the output buffer written by InstBindlessCheckPass, +// InstBuffAddrCheckPass, and possibly other future validations. +static const int kDebugOutputBindingStream = 0; + +// The binding for the input buffer read by InstBindlessCheckPass. +static const int kDebugInputBindingBindless = 1; + +// The binding for the input buffer read by InstBuffAddrCheckPass. +static const int kDebugInputBindingBuffAddr = 2; + +// This is the output buffer written by InstDebugPrintfPass. +static const int kDebugOutputPrintfStream = 3; + +// Bindless Validation Input Buffer Format +// +// An input buffer for bindless validation consists of a single array of +// unsigned integers we will call Data[]. This array is formatted as follows. +// +// At offset kDebugInputBindlessInitOffset in Data[] is a single uint which +// gives an offset to the start of the bindless initialization data. More +// specifically, if the following value is zero, we know that the descriptor at +// (set = s, binding = b, index = i) is not initialized; if the value is +// non-zero, and the descriptor points to a buffer, the value is the length of +// the buffer in bytes and can be used to check for out-of-bounds buffer +// references: +// Data[ i + Data[ b + Data[ s + Data[ kDebugInputBindlessInitOffset ] ] ] ] +static const int kDebugInputBindlessInitOffset = 0; + +// At offset kDebugInputBindlessOffsetLengths is some number of uints which +// provide the bindless length data. More specifically, the number of +// descriptors at (set=s, binding=b) is: +// Data[ Data[ s + kDebugInputBindlessOffsetLengths ] + b ] +static const int kDebugInputBindlessOffsetLengths = 1; + +// Buffer Device Address Input Buffer Format +// +// An input buffer for buffer device address validation consists of a single +// array of unsigned 64-bit integers we will call Data[]. This array is +// formatted as follows: +// +// At offset kDebugInputBuffAddrPtrOffset is a list of sorted valid buffer +// addresses. The list is terminated with the address 0xffffffffffffffff. +// If 0x0 is not a valid buffer address, this address is inserted at the +// start of the list. +// +static const int kDebugInputBuffAddrPtrOffset = 1; +// +// At offset kDebugInputBuffAddrLengthOffset in Data[] is a single uint64 which +// gives an offset to the start of the buffer length data. More +// specifically, for a buffer whose pointer is located at input buffer offset +// i, the length is located at: +// +// Data[ i - kDebugInputBuffAddrPtrOffset +// + Data[ kDebugInputBuffAddrLengthOffset ] ] +// +// The length associated with the 0xffffffffffffffff address is zero. If +// not a valid buffer, the length associated with the 0x0 address is zero. +static const int kDebugInputBuffAddrLengthOffset = 0; + +} // namespace spvtools + +#endif // INCLUDE_SPIRV_TOOLS_INSTRUMENT_HPP_ diff --git a/local_packages/include/spirv-tools/libspirv.h b/local_packages/include/spirv-tools/libspirv.h new file mode 100644 index 0000000..b549efb --- /dev/null +++ b/local_packages/include/spirv-tools/libspirv.h @@ -0,0 +1,907 @@ +// Copyright (c) 2015-2020 The Khronos Group Inc. +// Modifications Copyright (C) 2020 Advanced Micro Devices, Inc. All rights +// reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef INCLUDE_SPIRV_TOOLS_LIBSPIRV_H_ +#define INCLUDE_SPIRV_TOOLS_LIBSPIRV_H_ + +#ifdef __cplusplus +extern "C" { +#else +#include +#endif + +#include +#include + +#if defined(SPIRV_TOOLS_SHAREDLIB) +#if defined(_WIN32) +#if defined(SPIRV_TOOLS_IMPLEMENTATION) +#define SPIRV_TOOLS_EXPORT __declspec(dllexport) +#else +#define SPIRV_TOOLS_EXPORT __declspec(dllimport) +#endif +#else +#if defined(SPIRV_TOOLS_IMPLEMENTATION) +#define SPIRV_TOOLS_EXPORT __attribute__((visibility("default"))) +#else +#define SPIRV_TOOLS_EXPORT +#endif +#endif +#else +#define SPIRV_TOOLS_EXPORT +#endif + +// Helpers + +#define SPV_BIT(shift) (1 << (shift)) + +#define SPV_FORCE_16_BIT_ENUM(name) SPV_FORCE_16BIT_##name = 0x7fff +#define SPV_FORCE_32_BIT_ENUM(name) SPV_FORCE_32BIT_##name = 0x7fffffff + +// Enumerations + +typedef enum spv_result_t { + SPV_SUCCESS = 0, + SPV_UNSUPPORTED = 1, + SPV_END_OF_STREAM = 2, + SPV_WARNING = 3, + SPV_FAILED_MATCH = 4, + SPV_REQUESTED_TERMINATION = 5, // Success, but signals early termination. + SPV_ERROR_INTERNAL = -1, + SPV_ERROR_OUT_OF_MEMORY = -2, + SPV_ERROR_INVALID_POINTER = -3, + SPV_ERROR_INVALID_BINARY = -4, + SPV_ERROR_INVALID_TEXT = -5, + SPV_ERROR_INVALID_TABLE = -6, + SPV_ERROR_INVALID_VALUE = -7, + SPV_ERROR_INVALID_DIAGNOSTIC = -8, + SPV_ERROR_INVALID_LOOKUP = -9, + SPV_ERROR_INVALID_ID = -10, + SPV_ERROR_INVALID_CFG = -11, + SPV_ERROR_INVALID_LAYOUT = -12, + SPV_ERROR_INVALID_CAPABILITY = -13, + SPV_ERROR_INVALID_DATA = -14, // Indicates data rules validation failure. + SPV_ERROR_MISSING_EXTENSION = -15, + SPV_ERROR_WRONG_VERSION = -16, // Indicates wrong SPIR-V version + SPV_FORCE_32_BIT_ENUM(spv_result_t) +} spv_result_t; + +// Severity levels of messages communicated to the consumer. +typedef enum spv_message_level_t { + SPV_MSG_FATAL, // Unrecoverable error due to environment. + // Will exit the program immediately. E.g., + // out of memory. + SPV_MSG_INTERNAL_ERROR, // Unrecoverable error due to SPIRV-Tools + // internals. + // Will exit the program immediately. E.g., + // unimplemented feature. + SPV_MSG_ERROR, // Normal error due to user input. + SPV_MSG_WARNING, // Warning information. + SPV_MSG_INFO, // General information. + SPV_MSG_DEBUG, // Debug information. +} spv_message_level_t; + +typedef enum spv_endianness_t { + SPV_ENDIANNESS_LITTLE, + SPV_ENDIANNESS_BIG, + SPV_FORCE_32_BIT_ENUM(spv_endianness_t) +} spv_endianness_t; + +// The kinds of operands that an instruction may have. +// +// Some operand types are "concrete". The binary parser uses a concrete +// operand type to describe an operand of a parsed instruction. +// +// The assembler uses all operand types. In addition to determining what +// kind of value an operand may be, non-concrete operand types capture the +// fact that an operand might be optional (may be absent, or present exactly +// once), or might occur zero or more times. +// +// Sometimes we also need to be able to express the fact that an operand +// is a member of an optional tuple of values. In that case the first member +// would be optional, and the subsequent members would be required. +// +// NOTE: Although we don't promise binary compatibility, as a courtesy, please +// add new enum values at the end. +typedef enum spv_operand_type_t { + // A sentinel value. + SPV_OPERAND_TYPE_NONE = 0, + + // Set 1: Operands that are IDs. + SPV_OPERAND_TYPE_ID, + SPV_OPERAND_TYPE_TYPE_ID, + SPV_OPERAND_TYPE_RESULT_ID, + SPV_OPERAND_TYPE_MEMORY_SEMANTICS_ID, // SPIR-V Sec 3.25 + SPV_OPERAND_TYPE_SCOPE_ID, // SPIR-V Sec 3.27 + + // Set 2: Operands that are literal numbers. + SPV_OPERAND_TYPE_LITERAL_INTEGER, // Always unsigned 32-bits. + // The Instruction argument to OpExtInst. It's an unsigned 32-bit literal + // number indicating which instruction to use from an extended instruction + // set. + SPV_OPERAND_TYPE_EXTENSION_INSTRUCTION_NUMBER, + // The Opcode argument to OpSpecConstantOp. It determines the operation + // to be performed on constant operands to compute a specialization constant + // result. + SPV_OPERAND_TYPE_SPEC_CONSTANT_OP_NUMBER, + // A literal number whose format and size are determined by a previous operand + // in the same instruction. It's a signed integer, an unsigned integer, or a + // floating point number. It also has a specified bit width. The width + // may be larger than 32, which would require such a typed literal value to + // occupy multiple SPIR-V words. + SPV_OPERAND_TYPE_TYPED_LITERAL_NUMBER, + + // Set 3: The literal string operand type. + SPV_OPERAND_TYPE_LITERAL_STRING, + + // Set 4: Operands that are a single word enumerated value. + SPV_OPERAND_TYPE_SOURCE_LANGUAGE, // SPIR-V Sec 3.2 + SPV_OPERAND_TYPE_EXECUTION_MODEL, // SPIR-V Sec 3.3 + SPV_OPERAND_TYPE_ADDRESSING_MODEL, // SPIR-V Sec 3.4 + SPV_OPERAND_TYPE_MEMORY_MODEL, // SPIR-V Sec 3.5 + SPV_OPERAND_TYPE_EXECUTION_MODE, // SPIR-V Sec 3.6 + SPV_OPERAND_TYPE_STORAGE_CLASS, // SPIR-V Sec 3.7 + SPV_OPERAND_TYPE_DIMENSIONALITY, // SPIR-V Sec 3.8 + SPV_OPERAND_TYPE_SAMPLER_ADDRESSING_MODE, // SPIR-V Sec 3.9 + SPV_OPERAND_TYPE_SAMPLER_FILTER_MODE, // SPIR-V Sec 3.10 + SPV_OPERAND_TYPE_SAMPLER_IMAGE_FORMAT, // SPIR-V Sec 3.11 + SPV_OPERAND_TYPE_IMAGE_CHANNEL_ORDER, // SPIR-V Sec 3.12 + SPV_OPERAND_TYPE_IMAGE_CHANNEL_DATA_TYPE, // SPIR-V Sec 3.13 + SPV_OPERAND_TYPE_FP_ROUNDING_MODE, // SPIR-V Sec 3.16 + SPV_OPERAND_TYPE_LINKAGE_TYPE, // SPIR-V Sec 3.17 + SPV_OPERAND_TYPE_ACCESS_QUALIFIER, // SPIR-V Sec 3.18 + SPV_OPERAND_TYPE_FUNCTION_PARAMETER_ATTRIBUTE, // SPIR-V Sec 3.19 + SPV_OPERAND_TYPE_DECORATION, // SPIR-V Sec 3.20 + SPV_OPERAND_TYPE_BUILT_IN, // SPIR-V Sec 3.21 + SPV_OPERAND_TYPE_GROUP_OPERATION, // SPIR-V Sec 3.28 + SPV_OPERAND_TYPE_KERNEL_ENQ_FLAGS, // SPIR-V Sec 3.29 + SPV_OPERAND_TYPE_KERNEL_PROFILING_INFO, // SPIR-V Sec 3.30 + SPV_OPERAND_TYPE_CAPABILITY, // SPIR-V Sec 3.31 + + // NOTE: New concrete enum values should be added at the end. + + // Set 5: Operands that are a single word bitmask. + // Sometimes a set bit indicates the instruction requires still more operands. + SPV_OPERAND_TYPE_IMAGE, // SPIR-V Sec 3.14 + SPV_OPERAND_TYPE_FP_FAST_MATH_MODE, // SPIR-V Sec 3.15 + SPV_OPERAND_TYPE_SELECTION_CONTROL, // SPIR-V Sec 3.22 + SPV_OPERAND_TYPE_LOOP_CONTROL, // SPIR-V Sec 3.23 + SPV_OPERAND_TYPE_FUNCTION_CONTROL, // SPIR-V Sec 3.24 + SPV_OPERAND_TYPE_MEMORY_ACCESS, // SPIR-V Sec 3.26 + SPV_OPERAND_TYPE_FRAGMENT_SHADING_RATE, // SPIR-V Sec 3.FSR + +// NOTE: New concrete enum values should be added at the end. + +// The "optional" and "variable" operand types are only used internally by +// the assembler and the binary parser. +// There are two categories: +// Optional : expands to 0 or 1 operand, like ? in regular expressions. +// Variable : expands to 0, 1 or many operands or pairs of operands. +// This is similar to * in regular expressions. + +// NOTE: These FIRST_* and LAST_* enum values are DEPRECATED. +// The concept of "optional" and "variable" operand types are only intended +// for use as an implementation detail of parsing SPIR-V, either in text or +// binary form. Instead of using enum ranges, use characteristic function +// spvOperandIsConcrete. +// The use of enum value ranges in a public API makes it difficult to insert +// new values into a range without also breaking binary compatibility. +// +// Macros for defining bounds on optional and variable operand types. +// Any variable operand type is also optional. +// TODO(dneto): Remove SPV_OPERAND_TYPE_FIRST_* and SPV_OPERAND_TYPE_LAST_* +#define FIRST_OPTIONAL(ENUM) ENUM, SPV_OPERAND_TYPE_FIRST_OPTIONAL_TYPE = ENUM +#define FIRST_VARIABLE(ENUM) ENUM, SPV_OPERAND_TYPE_FIRST_VARIABLE_TYPE = ENUM +#define LAST_VARIABLE(ENUM) \ + ENUM, SPV_OPERAND_TYPE_LAST_VARIABLE_TYPE = ENUM, \ + SPV_OPERAND_TYPE_LAST_OPTIONAL_TYPE = ENUM + + // An optional operand represents zero or one logical operands. + // In an instruction definition, this may only appear at the end of the + // operand types. + FIRST_OPTIONAL(SPV_OPERAND_TYPE_OPTIONAL_ID), + // An optional image operand type. + SPV_OPERAND_TYPE_OPTIONAL_IMAGE, + // An optional memory access type. + SPV_OPERAND_TYPE_OPTIONAL_MEMORY_ACCESS, + // An optional literal integer. + SPV_OPERAND_TYPE_OPTIONAL_LITERAL_INTEGER, + // An optional literal number, which may be either integer or floating point. + SPV_OPERAND_TYPE_OPTIONAL_LITERAL_NUMBER, + // Like SPV_OPERAND_TYPE_TYPED_LITERAL_NUMBER, but optional, and integral. + SPV_OPERAND_TYPE_OPTIONAL_TYPED_LITERAL_INTEGER, + // An optional literal string. + SPV_OPERAND_TYPE_OPTIONAL_LITERAL_STRING, + // An optional access qualifier + SPV_OPERAND_TYPE_OPTIONAL_ACCESS_QUALIFIER, + // An optional context-independent value, or CIV. CIVs are tokens that we can + // assemble regardless of where they occur -- literals, IDs, immediate + // integers, etc. + SPV_OPERAND_TYPE_OPTIONAL_CIV, + + // A variable operand represents zero or more logical operands. + // In an instruction definition, this may only appear at the end of the + // operand types. + FIRST_VARIABLE(SPV_OPERAND_TYPE_VARIABLE_ID), + SPV_OPERAND_TYPE_VARIABLE_LITERAL_INTEGER, + // A sequence of zero or more pairs of (typed literal integer, Id). + // Expands to zero or more: + // (SPV_OPERAND_TYPE_TYPED_LITERAL_INTEGER, SPV_OPERAND_TYPE_ID) + // where the literal number must always be an integer of some sort. + SPV_OPERAND_TYPE_VARIABLE_LITERAL_INTEGER_ID, + // A sequence of zero or more pairs of (Id, Literal integer) + LAST_VARIABLE(SPV_OPERAND_TYPE_VARIABLE_ID_LITERAL_INTEGER), + + // The following are concrete enum types from the DebugInfo extended + // instruction set. + SPV_OPERAND_TYPE_DEBUG_INFO_FLAGS, // DebugInfo Sec 3.2. A mask. + SPV_OPERAND_TYPE_DEBUG_BASE_TYPE_ATTRIBUTE_ENCODING, // DebugInfo Sec 3.3 + SPV_OPERAND_TYPE_DEBUG_COMPOSITE_TYPE, // DebugInfo Sec 3.4 + SPV_OPERAND_TYPE_DEBUG_TYPE_QUALIFIER, // DebugInfo Sec 3.5 + SPV_OPERAND_TYPE_DEBUG_OPERATION, // DebugInfo Sec 3.6 + + // The following are concrete enum types from the OpenCL.DebugInfo.100 + // extended instruction set. + SPV_OPERAND_TYPE_CLDEBUG100_DEBUG_INFO_FLAGS, // Sec 3.2. A Mask + SPV_OPERAND_TYPE_CLDEBUG100_DEBUG_BASE_TYPE_ATTRIBUTE_ENCODING, // Sec 3.3 + SPV_OPERAND_TYPE_CLDEBUG100_DEBUG_COMPOSITE_TYPE, // Sec 3.4 + SPV_OPERAND_TYPE_CLDEBUG100_DEBUG_TYPE_QUALIFIER, // Sec 3.5 + SPV_OPERAND_TYPE_CLDEBUG100_DEBUG_OPERATION, // Sec 3.6 + SPV_OPERAND_TYPE_CLDEBUG100_DEBUG_IMPORTED_ENTITY, // Sec 3.7 + + // The following are concrete enum types from SPV_INTEL_float_controls2 + // https://github.com/intel/llvm/blob/39fa9b0cbfbae88327118990a05c5b387b56d2ef/sycl/doc/extensions/SPIRV/SPV_INTEL_float_controls2.asciidoc + SPV_OPERAND_TYPE_FPDENORM_MODE, // Sec 3.17 FP Denorm Mode + SPV_OPERAND_TYPE_FPOPERATION_MODE, // Sec 3.18 FP Operation Mode + // A value enum from https://github.com/KhronosGroup/SPIRV-Headers/pull/177 + SPV_OPERAND_TYPE_QUANTIZATION_MODES, + // A value enum from https://github.com/KhronosGroup/SPIRV-Headers/pull/177 + SPV_OPERAND_TYPE_OVERFLOW_MODES, + + // Concrete operand types for the provisional Vulkan ray tracing feature. + SPV_OPERAND_TYPE_RAY_FLAGS, // SPIR-V Sec 3.RF + SPV_OPERAND_TYPE_RAY_QUERY_INTERSECTION, // SPIR-V Sec 3.RQIntersection + SPV_OPERAND_TYPE_RAY_QUERY_COMMITTED_INTERSECTION_TYPE, // SPIR-V Sec + // 3.RQCommitted + SPV_OPERAND_TYPE_RAY_QUERY_CANDIDATE_INTERSECTION_TYPE, // SPIR-V Sec + // 3.RQCandidate + + // Concrete operand types for integer dot product. + // Packed vector format + SPV_OPERAND_TYPE_PACKED_VECTOR_FORMAT, // SPIR-V Sec 3.x + // An optional packed vector format + SPV_OPERAND_TYPE_OPTIONAL_PACKED_VECTOR_FORMAT, + + // This is a sentinel value, and does not represent an operand type. + // It should come last. + SPV_OPERAND_TYPE_NUM_OPERAND_TYPES, + + SPV_FORCE_32_BIT_ENUM(spv_operand_type_t) +} spv_operand_type_t; + +// Returns true if the given type is concrete. +bool spvOperandIsConcrete(spv_operand_type_t type); + +// Returns true if the given type is concrete and also a mask. +bool spvOperandIsConcreteMask(spv_operand_type_t type); + +typedef enum spv_ext_inst_type_t { + SPV_EXT_INST_TYPE_NONE = 0, + SPV_EXT_INST_TYPE_GLSL_STD_450, + SPV_EXT_INST_TYPE_OPENCL_STD, + SPV_EXT_INST_TYPE_SPV_AMD_SHADER_EXPLICIT_VERTEX_PARAMETER, + SPV_EXT_INST_TYPE_SPV_AMD_SHADER_TRINARY_MINMAX, + SPV_EXT_INST_TYPE_SPV_AMD_GCN_SHADER, + SPV_EXT_INST_TYPE_SPV_AMD_SHADER_BALLOT, + SPV_EXT_INST_TYPE_DEBUGINFO, + SPV_EXT_INST_TYPE_OPENCL_DEBUGINFO_100, + SPV_EXT_INST_TYPE_NONSEMANTIC_CLSPVREFLECTION, + SPV_EXT_INST_TYPE_NONSEMANTIC_SHADER_DEBUGINFO_100, + + // Multiple distinct extended instruction set types could return this + // value, if they are prefixed with NonSemantic. and are otherwise + // unrecognised + SPV_EXT_INST_TYPE_NONSEMANTIC_UNKNOWN, + + SPV_FORCE_32_BIT_ENUM(spv_ext_inst_type_t) +} spv_ext_inst_type_t; + +// This determines at a high level the kind of a binary-encoded literal +// number, but not the bit width. +// In principle, these could probably be folded into new entries in +// spv_operand_type_t. But then we'd have some special case differences +// between the assembler and disassembler. +typedef enum spv_number_kind_t { + SPV_NUMBER_NONE = 0, // The default for value initialization. + SPV_NUMBER_UNSIGNED_INT, + SPV_NUMBER_SIGNED_INT, + SPV_NUMBER_FLOATING, +} spv_number_kind_t; + +typedef enum spv_text_to_binary_options_t { + SPV_TEXT_TO_BINARY_OPTION_NONE = SPV_BIT(0), + // Numeric IDs in the binary will have the same values as in the source. + // Non-numeric IDs are allocated by filling in the gaps, starting with 1 + // and going up. + SPV_TEXT_TO_BINARY_OPTION_PRESERVE_NUMERIC_IDS = SPV_BIT(1), + SPV_FORCE_32_BIT_ENUM(spv_text_to_binary_options_t) +} spv_text_to_binary_options_t; + +typedef enum spv_binary_to_text_options_t { + SPV_BINARY_TO_TEXT_OPTION_NONE = SPV_BIT(0), + SPV_BINARY_TO_TEXT_OPTION_PRINT = SPV_BIT(1), + SPV_BINARY_TO_TEXT_OPTION_COLOR = SPV_BIT(2), + SPV_BINARY_TO_TEXT_OPTION_INDENT = SPV_BIT(3), + SPV_BINARY_TO_TEXT_OPTION_SHOW_BYTE_OFFSET = SPV_BIT(4), + // Do not output the module header as leading comments in the assembly. + SPV_BINARY_TO_TEXT_OPTION_NO_HEADER = SPV_BIT(5), + // Use friendly names where possible. The heuristic may expand over + // time, but will use common names for scalar types, and debug names from + // OpName instructions. + SPV_BINARY_TO_TEXT_OPTION_FRIENDLY_NAMES = SPV_BIT(6), + // Add some comments to the generated assembly + SPV_BINARY_TO_TEXT_OPTION_COMMENT = SPV_BIT(7), + SPV_FORCE_32_BIT_ENUM(spv_binary_to_text_options_t) +} spv_binary_to_text_options_t; + +// Constants + +// The default id bound is to the minimum value for the id limit +// in the spir-v specification under the section "Universal Limits". +const uint32_t kDefaultMaxIdBound = 0x3FFFFF; + +// Structures + +// Information about an operand parsed from a binary SPIR-V module. +// Note that the values are not included. You still need access to the binary +// to extract the values. +typedef struct spv_parsed_operand_t { + // Location of the operand, in words from the start of the instruction. + uint16_t offset; + // Number of words occupied by this operand. + uint16_t num_words; + // The "concrete" operand type. See the definition of spv_operand_type_t + // for details. + spv_operand_type_t type; + // If type is a literal number type, then number_kind says whether it's + // a signed integer, an unsigned integer, or a floating point number. + spv_number_kind_t number_kind; + // The number of bits for a literal number type. + uint32_t number_bit_width; +} spv_parsed_operand_t; + +// An instruction parsed from a binary SPIR-V module. +typedef struct spv_parsed_instruction_t { + // An array of words for this instruction, in native endianness. + const uint32_t* words; + // The number of words in this instruction. + uint16_t num_words; + uint16_t opcode; + // The extended instruction type, if opcode is OpExtInst. Otherwise + // this is the "none" value. + spv_ext_inst_type_t ext_inst_type; + // The type id, or 0 if this instruction doesn't have one. + uint32_t type_id; + // The result id, or 0 if this instruction doesn't have one. + uint32_t result_id; + // The array of parsed operands. + const spv_parsed_operand_t* operands; + uint16_t num_operands; +} spv_parsed_instruction_t; + +typedef struct spv_const_binary_t { + const uint32_t* code; + const size_t wordCount; +} spv_const_binary_t; + +typedef struct spv_binary_t { + uint32_t* code; + size_t wordCount; +} spv_binary_t; + +typedef struct spv_text_t { + const char* str; + size_t length; +} spv_text_t; + +typedef struct spv_position_t { + size_t line; + size_t column; + size_t index; +} spv_position_t; + +typedef struct spv_diagnostic_t { + spv_position_t position; + char* error; + bool isTextSource; +} spv_diagnostic_t; + +// Opaque struct containing the context used to operate on a SPIR-V module. +// Its object is used by various translation API functions. +typedef struct spv_context_t spv_context_t; + +typedef struct spv_validator_options_t spv_validator_options_t; + +typedef struct spv_optimizer_options_t spv_optimizer_options_t; + +typedef struct spv_reducer_options_t spv_reducer_options_t; + +typedef struct spv_fuzzer_options_t spv_fuzzer_options_t; + +// Type Definitions + +typedef spv_const_binary_t* spv_const_binary; +typedef spv_binary_t* spv_binary; +typedef spv_text_t* spv_text; +typedef spv_position_t* spv_position; +typedef spv_diagnostic_t* spv_diagnostic; +typedef const spv_context_t* spv_const_context; +typedef spv_context_t* spv_context; +typedef spv_validator_options_t* spv_validator_options; +typedef const spv_validator_options_t* spv_const_validator_options; +typedef spv_optimizer_options_t* spv_optimizer_options; +typedef const spv_optimizer_options_t* spv_const_optimizer_options; +typedef spv_reducer_options_t* spv_reducer_options; +typedef const spv_reducer_options_t* spv_const_reducer_options; +typedef spv_fuzzer_options_t* spv_fuzzer_options; +typedef const spv_fuzzer_options_t* spv_const_fuzzer_options; + +// Platform API + +// Returns the SPIRV-Tools software version as a null-terminated string. +// The contents of the underlying storage is valid for the remainder of +// the process. +SPIRV_TOOLS_EXPORT const char* spvSoftwareVersionString(void); +// Returns a null-terminated string containing the name of the project, +// the software version string, and commit details. +// The contents of the underlying storage is valid for the remainder of +// the process. +SPIRV_TOOLS_EXPORT const char* spvSoftwareVersionDetailsString(void); + +// Certain target environments impose additional restrictions on SPIR-V, so it's +// often necessary to specify which one applies. SPV_ENV_UNIVERSAL_* implies an +// environment-agnostic SPIR-V. +// +// When an API method needs to derive a SPIR-V version from a target environment +// (from the spv_context object), the method will choose the highest version of +// SPIR-V supported by the target environment. Examples: +// SPV_ENV_VULKAN_1_0 -> SPIR-V 1.0 +// SPV_ENV_VULKAN_1_1 -> SPIR-V 1.3 +// SPV_ENV_VULKAN_1_1_SPIRV_1_4 -> SPIR-V 1.4 +// SPV_ENV_VULKAN_1_2 -> SPIR-V 1.5 +// SPV_ENV_VULKAN_1_3 -> SPIR-V 1.6 +// Consult the description of API entry points for specific rules. +typedef enum { + SPV_ENV_UNIVERSAL_1_0, // SPIR-V 1.0 latest revision, no other restrictions. + SPV_ENV_VULKAN_1_0, // Vulkan 1.0 latest revision. + SPV_ENV_UNIVERSAL_1_1, // SPIR-V 1.1 latest revision, no other restrictions. + SPV_ENV_OPENCL_2_1, // OpenCL Full Profile 2.1 latest revision. + SPV_ENV_OPENCL_2_2, // OpenCL Full Profile 2.2 latest revision. + SPV_ENV_OPENGL_4_0, // OpenGL 4.0 plus GL_ARB_gl_spirv, latest revisions. + SPV_ENV_OPENGL_4_1, // OpenGL 4.1 plus GL_ARB_gl_spirv, latest revisions. + SPV_ENV_OPENGL_4_2, // OpenGL 4.2 plus GL_ARB_gl_spirv, latest revisions. + SPV_ENV_OPENGL_4_3, // OpenGL 4.3 plus GL_ARB_gl_spirv, latest revisions. + // There is no variant for OpenGL 4.4. + SPV_ENV_OPENGL_4_5, // OpenGL 4.5 plus GL_ARB_gl_spirv, latest revisions. + SPV_ENV_UNIVERSAL_1_2, // SPIR-V 1.2, latest revision, no other restrictions. + SPV_ENV_OPENCL_1_2, // OpenCL Full Profile 1.2 plus cl_khr_il_program, + // latest revision. + SPV_ENV_OPENCL_EMBEDDED_1_2, // OpenCL Embedded Profile 1.2 plus + // cl_khr_il_program, latest revision. + SPV_ENV_OPENCL_2_0, // OpenCL Full Profile 2.0 plus cl_khr_il_program, + // latest revision. + SPV_ENV_OPENCL_EMBEDDED_2_0, // OpenCL Embedded Profile 2.0 plus + // cl_khr_il_program, latest revision. + SPV_ENV_OPENCL_EMBEDDED_2_1, // OpenCL Embedded Profile 2.1 latest revision. + SPV_ENV_OPENCL_EMBEDDED_2_2, // OpenCL Embedded Profile 2.2 latest revision. + SPV_ENV_UNIVERSAL_1_3, // SPIR-V 1.3 latest revision, no other restrictions. + SPV_ENV_VULKAN_1_1, // Vulkan 1.1 latest revision. + SPV_ENV_WEBGPU_0, // DEPRECATED, may be removed in the future. + SPV_ENV_UNIVERSAL_1_4, // SPIR-V 1.4 latest revision, no other restrictions. + + // Vulkan 1.1 with VK_KHR_spirv_1_4, i.e. SPIR-V 1.4 binary. + SPV_ENV_VULKAN_1_1_SPIRV_1_4, + + SPV_ENV_UNIVERSAL_1_5, // SPIR-V 1.5 latest revision, no other restrictions. + SPV_ENV_VULKAN_1_2, // Vulkan 1.2 latest revision. + + SPV_ENV_UNIVERSAL_1_6, // SPIR-V 1.6 latest revision, no other restrictions. + SPV_ENV_VULKAN_1_3, // Vulkan 1.3 latest revision. + + SPV_ENV_MAX // Keep this as the last enum value. +} spv_target_env; + +// SPIR-V Validator can be parameterized with the following Universal Limits. +typedef enum { + spv_validator_limit_max_struct_members, + spv_validator_limit_max_struct_depth, + spv_validator_limit_max_local_variables, + spv_validator_limit_max_global_variables, + spv_validator_limit_max_switch_branches, + spv_validator_limit_max_function_args, + spv_validator_limit_max_control_flow_nesting_depth, + spv_validator_limit_max_access_chain_indexes, + spv_validator_limit_max_id_bound, +} spv_validator_limit; + +// Returns a string describing the given SPIR-V target environment. +SPIRV_TOOLS_EXPORT const char* spvTargetEnvDescription(spv_target_env env); + +// Parses s into *env and returns true if successful. If unparsable, returns +// false and sets *env to SPV_ENV_UNIVERSAL_1_0. +SPIRV_TOOLS_EXPORT bool spvParseTargetEnv(const char* s, spv_target_env* env); + +// Determines the target env value with the least features but which enables +// the given Vulkan and SPIR-V versions. If such a target is supported, returns +// true and writes the value to |env|, otherwise returns false. +// +// The Vulkan version is given as an unsigned 32-bit number as specified in +// Vulkan section "29.2.1 Version Numbers": the major version number appears +// in bits 22 to 21, and the minor version is in bits 12 to 21. The SPIR-V +// version is given in the SPIR-V version header word: major version in bits +// 16 to 23, and minor version in bits 8 to 15. +SPIRV_TOOLS_EXPORT bool spvParseVulkanEnv(uint32_t vulkan_ver, + uint32_t spirv_ver, + spv_target_env* env); + +// Creates a context object for most of the SPIRV-Tools API. +// Returns null if env is invalid. +// +// See specific API calls for how the target environment is interpreted +// (particularly assembly and validation). +SPIRV_TOOLS_EXPORT spv_context spvContextCreate(spv_target_env env); + +// Destroys the given context object. +SPIRV_TOOLS_EXPORT void spvContextDestroy(spv_context context); + +// Creates a Validator options object with default options. Returns a valid +// options object. The object remains valid until it is passed into +// spvValidatorOptionsDestroy. +SPIRV_TOOLS_EXPORT spv_validator_options spvValidatorOptionsCreate(void); + +// Destroys the given Validator options object. +SPIRV_TOOLS_EXPORT void spvValidatorOptionsDestroy( + spv_validator_options options); + +// Records the maximum Universal Limit that is considered valid in the given +// Validator options object. argument must be a valid options object. +SPIRV_TOOLS_EXPORT void spvValidatorOptionsSetUniversalLimit( + spv_validator_options options, spv_validator_limit limit_type, + uint32_t limit); + +// Record whether or not the validator should relax the rules on types for +// stores to structs. When relaxed, it will allow a type mismatch as long as +// the types are structs with the same layout. Two structs have the same layout +// if +// +// 1) the members of the structs are either the same type or are structs with +// same layout, and +// +// 2) the decorations that affect the memory layout are identical for both +// types. Other decorations are not relevant. +SPIRV_TOOLS_EXPORT void spvValidatorOptionsSetRelaxStoreStruct( + spv_validator_options options, bool val); + +// Records whether or not the validator should relax the rules on pointer usage +// in logical addressing mode. +// +// When relaxed, it will allow the following usage cases of pointers: +// 1) OpVariable allocating an object whose type is a pointer type +// 2) OpReturnValue returning a pointer value +SPIRV_TOOLS_EXPORT void spvValidatorOptionsSetRelaxLogicalPointer( + spv_validator_options options, bool val); + +// Records whether or not the validator should relax the rules because it is +// expected that the optimizations will make the code legal. +// +// When relaxed, it will allow the following: +// 1) It will allow relaxed logical pointers. Setting this option will also +// set that option. +// 2) Pointers that are pass as parameters to function calls do not have to +// match the storage class of the formal parameter. +// 3) Pointers that are actual parameters on function calls do not have to point +// to the same type pointed as the formal parameter. The types just need to +// logically match. +// 4) GLSLstd450 Interpolate* instructions can have a load of an interpolant +// for a first argument. +SPIRV_TOOLS_EXPORT void spvValidatorOptionsSetBeforeHlslLegalization( + spv_validator_options options, bool val); + +// Records whether the validator should use "relaxed" block layout rules. +// Relaxed layout rules are described by Vulkan extension +// VK_KHR_relaxed_block_layout, and they affect uniform blocks, storage blocks, +// and push constants. +// +// This is enabled by default when targeting Vulkan 1.1 or later. +// Relaxed layout is more permissive than the default rules in Vulkan 1.0. +SPIRV_TOOLS_EXPORT void spvValidatorOptionsSetRelaxBlockLayout( + spv_validator_options options, bool val); + +// Records whether the validator should use standard block layout rules for +// uniform blocks. +SPIRV_TOOLS_EXPORT void spvValidatorOptionsSetUniformBufferStandardLayout( + spv_validator_options options, bool val); + +// Records whether the validator should use "scalar" block layout rules. +// Scalar layout rules are more permissive than relaxed block layout. +// +// See Vulkan extension VK_EXT_scalar_block_layout. The scalar alignment is +// defined as follows: +// - scalar alignment of a scalar is the scalar size +// - scalar alignment of a vector is the scalar alignment of its component +// - scalar alignment of a matrix is the scalar alignment of its component +// - scalar alignment of an array is the scalar alignment of its element +// - scalar alignment of a struct is the max scalar alignment among its +// members +// +// For a struct in Uniform, StorageClass, or PushConstant: +// - a member Offset must be a multiple of the member's scalar alignment +// - ArrayStride or MatrixStride must be a multiple of the array or matrix +// scalar alignment +SPIRV_TOOLS_EXPORT void spvValidatorOptionsSetScalarBlockLayout( + spv_validator_options options, bool val); + +// Records whether the validator should use "scalar" block layout +// rules (as defined above) for Workgroup blocks. See Vulkan +// extension VK_KHR_workgroup_memory_explicit_layout. +SPIRV_TOOLS_EXPORT void spvValidatorOptionsSetWorkgroupScalarBlockLayout( + spv_validator_options options, bool val); + +// Records whether or not the validator should skip validating standard +// uniform/storage block layout. +SPIRV_TOOLS_EXPORT void spvValidatorOptionsSetSkipBlockLayout( + spv_validator_options options, bool val); + +// Records whether or not the validator should allow the LocalSizeId +// decoration where the environment otherwise would not allow it. +SPIRV_TOOLS_EXPORT void spvValidatorOptionsSetAllowLocalSizeId( + spv_validator_options options, bool val); + +// Whether friendly names should be used in validation error messages. +SPIRV_TOOLS_EXPORT void spvValidatorOptionsSetFriendlyNames( + spv_validator_options options, bool val); + +// Creates an optimizer options object with default options. Returns a valid +// options object. The object remains valid until it is passed into +// |spvOptimizerOptionsDestroy|. +SPIRV_TOOLS_EXPORT spv_optimizer_options spvOptimizerOptionsCreate(void); + +// Destroys the given optimizer options object. +SPIRV_TOOLS_EXPORT void spvOptimizerOptionsDestroy( + spv_optimizer_options options); + +// Records whether or not the optimizer should run the validator before +// optimizing. If |val| is true, the validator will be run. +SPIRV_TOOLS_EXPORT void spvOptimizerOptionsSetRunValidator( + spv_optimizer_options options, bool val); + +// Records the validator options that should be passed to the validator if it is +// run. +SPIRV_TOOLS_EXPORT void spvOptimizerOptionsSetValidatorOptions( + spv_optimizer_options options, spv_validator_options val); + +// Records the maximum possible value for the id bound. +SPIRV_TOOLS_EXPORT void spvOptimizerOptionsSetMaxIdBound( + spv_optimizer_options options, uint32_t val); + +// Records whether all bindings within the module should be preserved. +SPIRV_TOOLS_EXPORT void spvOptimizerOptionsSetPreserveBindings( + spv_optimizer_options options, bool val); + +// Records whether all specialization constants within the module +// should be preserved. +SPIRV_TOOLS_EXPORT void spvOptimizerOptionsSetPreserveSpecConstants( + spv_optimizer_options options, bool val); + +// Creates a reducer options object with default options. Returns a valid +// options object. The object remains valid until it is passed into +// |spvReducerOptionsDestroy|. +SPIRV_TOOLS_EXPORT spv_reducer_options spvReducerOptionsCreate(void); + +// Destroys the given reducer options object. +SPIRV_TOOLS_EXPORT void spvReducerOptionsDestroy(spv_reducer_options options); + +// Sets the maximum number of reduction steps that should run before the reducer +// gives up. +SPIRV_TOOLS_EXPORT void spvReducerOptionsSetStepLimit( + spv_reducer_options options, uint32_t step_limit); + +// Sets the fail-on-validation-error option; if true, the reducer will return +// kStateInvalid if a reduction step yields a state that fails SPIR-V +// validation. Otherwise, an invalid state is treated as uninteresting and the +// reduction backtracks and continues. +SPIRV_TOOLS_EXPORT void spvReducerOptionsSetFailOnValidationError( + spv_reducer_options options, bool fail_on_validation_error); + +// Sets the function that the reducer should target. If set to zero the reducer +// will target all functions as well as parts of the module that lie outside +// functions. Otherwise the reducer will restrict reduction to the function +// with result id |target_function|, which is required to exist. +SPIRV_TOOLS_EXPORT void spvReducerOptionsSetTargetFunction( + spv_reducer_options options, uint32_t target_function); + +// Creates a fuzzer options object with default options. Returns a valid +// options object. The object remains valid until it is passed into +// |spvFuzzerOptionsDestroy|. +SPIRV_TOOLS_EXPORT spv_fuzzer_options spvFuzzerOptionsCreate(void); + +// Destroys the given fuzzer options object. +SPIRV_TOOLS_EXPORT void spvFuzzerOptionsDestroy(spv_fuzzer_options options); + +// Enables running the validator after every transformation is applied during +// a replay. +SPIRV_TOOLS_EXPORT void spvFuzzerOptionsEnableReplayValidation( + spv_fuzzer_options options); + +// Sets the seed with which the random number generator used by the fuzzer +// should be initialized. +SPIRV_TOOLS_EXPORT void spvFuzzerOptionsSetRandomSeed( + spv_fuzzer_options options, uint32_t seed); + +// Sets the range of transformations that should be applied during replay: 0 +// means all transformations, +N means the first N transformations, -N means all +// except the final N transformations. +SPIRV_TOOLS_EXPORT void spvFuzzerOptionsSetReplayRange( + spv_fuzzer_options options, int32_t replay_range); + +// Sets the maximum number of steps that the shrinker should take before giving +// up. +SPIRV_TOOLS_EXPORT void spvFuzzerOptionsSetShrinkerStepLimit( + spv_fuzzer_options options, uint32_t shrinker_step_limit); + +// Enables running the validator after every pass is applied during a fuzzing +// run. +SPIRV_TOOLS_EXPORT void spvFuzzerOptionsEnableFuzzerPassValidation( + spv_fuzzer_options options); + +// Enables all fuzzer passes during a fuzzing run (instead of a random subset +// of passes). +SPIRV_TOOLS_EXPORT void spvFuzzerOptionsEnableAllPasses( + spv_fuzzer_options options); + +// Encodes the given SPIR-V assembly text to its binary representation. The +// length parameter specifies the number of bytes for text. Encoded binary will +// be stored into *binary. Any error will be written into *diagnostic if +// diagnostic is non-null, otherwise the context's message consumer will be +// used. The generated binary is independent of the context and may outlive it. +// The SPIR-V binary version is set to the highest version of SPIR-V supported +// by the context's target environment. +SPIRV_TOOLS_EXPORT spv_result_t spvTextToBinary(const spv_const_context context, + const char* text, + const size_t length, + spv_binary* binary, + spv_diagnostic* diagnostic); + +// Encodes the given SPIR-V assembly text to its binary representation. Same as +// spvTextToBinary but with options. The options parameter is a bit field of +// spv_text_to_binary_options_t. +SPIRV_TOOLS_EXPORT spv_result_t spvTextToBinaryWithOptions( + const spv_const_context context, const char* text, const size_t length, + const uint32_t options, spv_binary* binary, spv_diagnostic* diagnostic); + +// Frees an allocated text stream. This is a no-op if the text parameter +// is a null pointer. +SPIRV_TOOLS_EXPORT void spvTextDestroy(spv_text text); + +// Decodes the given SPIR-V binary representation to its assembly text. The +// word_count parameter specifies the number of words for binary. The options +// parameter is a bit field of spv_binary_to_text_options_t. Decoded text will +// be stored into *text. Any error will be written into *diagnostic if +// diagnostic is non-null, otherwise the context's message consumer will be +// used. +SPIRV_TOOLS_EXPORT spv_result_t spvBinaryToText(const spv_const_context context, + const uint32_t* binary, + const size_t word_count, + const uint32_t options, + spv_text* text, + spv_diagnostic* diagnostic); + +// Frees a binary stream from memory. This is a no-op if binary is a null +// pointer. +SPIRV_TOOLS_EXPORT void spvBinaryDestroy(spv_binary binary); + +// Validates a SPIR-V binary for correctness. Any errors will be written into +// *diagnostic if diagnostic is non-null, otherwise the context's message +// consumer will be used. +// +// Validate for SPIR-V spec rules for the SPIR-V version named in the +// binary's header (at word offset 1). Additionally, if the context target +// environment is a client API (such as Vulkan 1.1), then validate for that +// client API version, to the extent that it is verifiable from data in the +// binary itself. +SPIRV_TOOLS_EXPORT spv_result_t spvValidate(const spv_const_context context, + const spv_const_binary binary, + spv_diagnostic* diagnostic); + +// Validates a SPIR-V binary for correctness. Uses the provided Validator +// options. Any errors will be written into *diagnostic if diagnostic is +// non-null, otherwise the context's message consumer will be used. +// +// Validate for SPIR-V spec rules for the SPIR-V version named in the +// binary's header (at word offset 1). Additionally, if the context target +// environment is a client API (such as Vulkan 1.1), then validate for that +// client API version, to the extent that it is verifiable from data in the +// binary itself, or in the validator options. +SPIRV_TOOLS_EXPORT spv_result_t spvValidateWithOptions( + const spv_const_context context, const spv_const_validator_options options, + const spv_const_binary binary, spv_diagnostic* diagnostic); + +// Validates a raw SPIR-V binary for correctness. Any errors will be written +// into *diagnostic if diagnostic is non-null, otherwise the context's message +// consumer will be used. +SPIRV_TOOLS_EXPORT spv_result_t +spvValidateBinary(const spv_const_context context, const uint32_t* words, + const size_t num_words, spv_diagnostic* diagnostic); + +// Creates a diagnostic object. The position parameter specifies the location in +// the text/binary stream. The message parameter, copied into the diagnostic +// object, contains the error message to display. +SPIRV_TOOLS_EXPORT spv_diagnostic +spvDiagnosticCreate(const spv_position position, const char* message); + +// Destroys a diagnostic object. This is a no-op if diagnostic is a null +// pointer. +SPIRV_TOOLS_EXPORT void spvDiagnosticDestroy(spv_diagnostic diagnostic); + +// Prints the diagnostic to stderr. +SPIRV_TOOLS_EXPORT spv_result_t +spvDiagnosticPrint(const spv_diagnostic diagnostic); + +// Gets the name of an instruction, without the "Op" prefix. +SPIRV_TOOLS_EXPORT const char* spvOpcodeString(const uint32_t opcode); + +// The binary parser interface. + +// A pointer to a function that accepts a parsed SPIR-V header. +// The integer arguments are the 32-bit words from the header, as specified +// in SPIR-V 1.0 Section 2.3 Table 1. +// The function should return SPV_SUCCESS if parsing should continue. +typedef spv_result_t (*spv_parsed_header_fn_t)( + void* user_data, spv_endianness_t endian, uint32_t magic, uint32_t version, + uint32_t generator, uint32_t id_bound, uint32_t reserved); + +// A pointer to a function that accepts a parsed SPIR-V instruction. +// The parsed_instruction value is transient: it may be overwritten +// or released immediately after the function has returned. That also +// applies to the words array member of the parsed instruction. The +// function should return SPV_SUCCESS if and only if parsing should +// continue. +typedef spv_result_t (*spv_parsed_instruction_fn_t)( + void* user_data, const spv_parsed_instruction_t* parsed_instruction); + +// Parses a SPIR-V binary, specified as counted sequence of 32-bit words. +// Parsing feedback is provided via two callbacks provided as function +// pointers. Each callback function pointer can be a null pointer, in +// which case it is never called. Otherwise, in a valid parse the +// parsed-header callback is called once, and then the parsed-instruction +// callback once for each instruction in the stream. The user_data parameter +// is supplied as context to the callbacks. Returns SPV_SUCCESS on successful +// parse where the callbacks always return SPV_SUCCESS. For an invalid parse, +// returns a status code other than SPV_SUCCESS, and if diagnostic is non-null +// also emits a diagnostic. If diagnostic is null the context's message consumer +// will be used to emit any errors. If a callback returns anything other than +// SPV_SUCCESS, then that status code is returned, no further callbacks are +// issued, and no additional diagnostics are emitted. +SPIRV_TOOLS_EXPORT spv_result_t spvBinaryParse( + const spv_const_context context, void* user_data, const uint32_t* words, + const size_t num_words, spv_parsed_header_fn_t parse_header, + spv_parsed_instruction_fn_t parse_instruction, spv_diagnostic* diagnostic); + +#ifdef __cplusplus +} +#endif + +#endif // INCLUDE_SPIRV_TOOLS_LIBSPIRV_H_ diff --git a/local_packages/include/spirv-tools/libspirv.hpp b/local_packages/include/spirv-tools/libspirv.hpp new file mode 100644 index 0000000..408e3eb --- /dev/null +++ b/local_packages/include/spirv-tools/libspirv.hpp @@ -0,0 +1,375 @@ +// Copyright (c) 2016 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef INCLUDE_SPIRV_TOOLS_LIBSPIRV_HPP_ +#define INCLUDE_SPIRV_TOOLS_LIBSPIRV_HPP_ + +#include +#include +#include +#include + +#include "spirv-tools/libspirv.h" + +namespace spvtools { + +// Message consumer. The C strings for source and message are only alive for the +// specific invocation. +using MessageConsumer = std::function; + +// C++ RAII wrapper around the C context object spv_context. +class Context { + public: + // Constructs a context targeting the given environment |env|. + // + // See specific API calls for how the target environment is interpreted + // (particularly assembly and validation). + // + // The constructed instance will have an empty message consumer, which just + // ignores all messages from the library. Use SetMessageConsumer() to supply + // one if messages are of concern. + explicit Context(spv_target_env env); + + // Enables move constructor/assignment operations. + Context(Context&& other); + Context& operator=(Context&& other); + + // Disables copy constructor/assignment operations. + Context(const Context&) = delete; + Context& operator=(const Context&) = delete; + + // Destructs this instance. + ~Context(); + + // Sets the message consumer to the given |consumer|. The |consumer| will be + // invoked once for each message communicated from the library. + void SetMessageConsumer(MessageConsumer consumer); + + // Returns the underlying spv_context. + spv_context& CContext(); + const spv_context& CContext() const; + + private: + spv_context context_; +}; + +// A RAII wrapper around a validator options object. +class ValidatorOptions { + public: + ValidatorOptions() : options_(spvValidatorOptionsCreate()) {} + ~ValidatorOptions() { spvValidatorOptionsDestroy(options_); } + // Allow implicit conversion to the underlying object. + operator spv_validator_options() const { return options_; } + + // Sets a limit. + void SetUniversalLimit(spv_validator_limit limit_type, uint32_t limit) { + spvValidatorOptionsSetUniversalLimit(options_, limit_type, limit); + } + + void SetRelaxStructStore(bool val) { + spvValidatorOptionsSetRelaxStoreStruct(options_, val); + } + + // Enables VK_KHR_relaxed_block_layout when validating standard + // uniform/storage buffer/push-constant layout. If true, disables + // scalar block layout rules. + void SetRelaxBlockLayout(bool val) { + spvValidatorOptionsSetRelaxBlockLayout(options_, val); + } + + // Enables VK_KHR_uniform_buffer_standard_layout when validating standard + // uniform layout. If true, disables scalar block layout rules. + void SetUniformBufferStandardLayout(bool val) { + spvValidatorOptionsSetUniformBufferStandardLayout(options_, val); + } + + // Enables VK_EXT_scalar_block_layout when validating standard + // uniform/storage buffer/push-constant layout. If true, disables + // relaxed block layout rules. + void SetScalarBlockLayout(bool val) { + spvValidatorOptionsSetScalarBlockLayout(options_, val); + } + + // Enables scalar layout when validating Workgroup blocks. See + // VK_KHR_workgroup_memory_explicit_layout. + void SetWorkgroupScalarBlockLayout(bool val) { + spvValidatorOptionsSetWorkgroupScalarBlockLayout(options_, val); + } + + // Skips validating standard uniform/storage buffer/push-constant layout. + void SetSkipBlockLayout(bool val) { + spvValidatorOptionsSetSkipBlockLayout(options_, val); + } + + // Enables LocalSizeId decorations where the environment would not otherwise + // allow them. + void SetAllowLocalSizeId(bool val) { + spvValidatorOptionsSetAllowLocalSizeId(options_, val); + } + + // Records whether or not the validator should relax the rules on pointer + // usage in logical addressing mode. + // + // When relaxed, it will allow the following usage cases of pointers: + // 1) OpVariable allocating an object whose type is a pointer type + // 2) OpReturnValue returning a pointer value + void SetRelaxLogicalPointer(bool val) { + spvValidatorOptionsSetRelaxLogicalPointer(options_, val); + } + + // Records whether or not the validator should relax the rules because it is + // expected that the optimizations will make the code legal. + // + // When relaxed, it will allow the following: + // 1) It will allow relaxed logical pointers. Setting this option will also + // set that option. + // 2) Pointers that are pass as parameters to function calls do not have to + // match the storage class of the formal parameter. + // 3) Pointers that are actual parameters on function calls do not have to + // point to the same type pointed as the formal parameter. The types just + // need to logically match. + // 4) GLSLstd450 Interpolate* instructions can have a load of an interpolant + // for a first argument. + void SetBeforeHlslLegalization(bool val) { + spvValidatorOptionsSetBeforeHlslLegalization(options_, val); + } + + // Whether friendly names should be used in validation error messages. + void SetFriendlyNames(bool val) { + spvValidatorOptionsSetFriendlyNames(options_, val); + } + + private: + spv_validator_options options_; +}; + +// A C++ wrapper around an optimization options object. +class OptimizerOptions { + public: + OptimizerOptions() : options_(spvOptimizerOptionsCreate()) {} + ~OptimizerOptions() { spvOptimizerOptionsDestroy(options_); } + + // Allow implicit conversion to the underlying object. + operator spv_optimizer_options() const { return options_; } + + // Records whether or not the optimizer should run the validator before + // optimizing. If |run| is true, the validator will be run. + void set_run_validator(bool run) { + spvOptimizerOptionsSetRunValidator(options_, run); + } + + // Records the validator options that should be passed to the validator if it + // is run. + void set_validator_options(const ValidatorOptions& val_options) { + spvOptimizerOptionsSetValidatorOptions(options_, val_options); + } + + // Records the maximum possible value for the id bound. + void set_max_id_bound(uint32_t new_bound) { + spvOptimizerOptionsSetMaxIdBound(options_, new_bound); + } + + // Records whether all bindings within the module should be preserved. + void set_preserve_bindings(bool preserve_bindings) { + spvOptimizerOptionsSetPreserveBindings(options_, preserve_bindings); + } + + // Records whether all specialization constants within the module + // should be preserved. + void set_preserve_spec_constants(bool preserve_spec_constants) { + spvOptimizerOptionsSetPreserveSpecConstants(options_, + preserve_spec_constants); + } + + private: + spv_optimizer_options options_; +}; + +// A C++ wrapper around a reducer options object. +class ReducerOptions { + public: + ReducerOptions() : options_(spvReducerOptionsCreate()) {} + ~ReducerOptions() { spvReducerOptionsDestroy(options_); } + + // Allow implicit conversion to the underlying object. + operator spv_reducer_options() const { // NOLINT(google-explicit-constructor) + return options_; + } + + // See spvReducerOptionsSetStepLimit. + void set_step_limit(uint32_t step_limit) { + spvReducerOptionsSetStepLimit(options_, step_limit); + } + + // See spvReducerOptionsSetFailOnValidationError. + void set_fail_on_validation_error(bool fail_on_validation_error) { + spvReducerOptionsSetFailOnValidationError(options_, + fail_on_validation_error); + } + + // See spvReducerOptionsSetTargetFunction. + void set_target_function(uint32_t target_function) { + spvReducerOptionsSetTargetFunction(options_, target_function); + } + + private: + spv_reducer_options options_; +}; + +// A C++ wrapper around a fuzzer options object. +class FuzzerOptions { + public: + FuzzerOptions() : options_(spvFuzzerOptionsCreate()) {} + ~FuzzerOptions() { spvFuzzerOptionsDestroy(options_); } + + // Allow implicit conversion to the underlying object. + operator spv_fuzzer_options() const { // NOLINT(google-explicit-constructor) + return options_; + } + + // See spvFuzzerOptionsEnableReplayValidation. + void enable_replay_validation() { + spvFuzzerOptionsEnableReplayValidation(options_); + } + + // See spvFuzzerOptionsSetRandomSeed. + void set_random_seed(uint32_t seed) { + spvFuzzerOptionsSetRandomSeed(options_, seed); + } + + // See spvFuzzerOptionsSetReplayRange. + void set_replay_range(int32_t replay_range) { + spvFuzzerOptionsSetReplayRange(options_, replay_range); + } + + // See spvFuzzerOptionsSetShrinkerStepLimit. + void set_shrinker_step_limit(uint32_t shrinker_step_limit) { + spvFuzzerOptionsSetShrinkerStepLimit(options_, shrinker_step_limit); + } + + // See spvFuzzerOptionsEnableFuzzerPassValidation. + void enable_fuzzer_pass_validation() { + spvFuzzerOptionsEnableFuzzerPassValidation(options_); + } + + // See spvFuzzerOptionsEnableAllPasses. + void enable_all_passes() { spvFuzzerOptionsEnableAllPasses(options_); } + + private: + spv_fuzzer_options options_; +}; + +// C++ interface for SPIRV-Tools functionalities. It wraps the context +// (including target environment and the corresponding SPIR-V grammar) and +// provides methods for assembling, disassembling, and validating. +// +// Instances of this class provide basic thread-safety guarantee. +class SpirvTools { + public: + enum { + // Default assembling option used by assemble(): + kDefaultAssembleOption = SPV_TEXT_TO_BINARY_OPTION_NONE, + + // Default disassembling option used by Disassemble(): + // * Avoid prefix comments from decoding the SPIR-V module header, and + // * Use friendly names for variables. + kDefaultDisassembleOption = SPV_BINARY_TO_TEXT_OPTION_NO_HEADER | + SPV_BINARY_TO_TEXT_OPTION_FRIENDLY_NAMES + }; + + // Constructs an instance targeting the given environment |env|. + // + // The constructed instance will have an empty message consumer, which just + // ignores all messages from the library. Use SetMessageConsumer() to supply + // one if messages are of concern. + explicit SpirvTools(spv_target_env env); + + // Disables copy/move constructor/assignment operations. + SpirvTools(const SpirvTools&) = delete; + SpirvTools(SpirvTools&&) = delete; + SpirvTools& operator=(const SpirvTools&) = delete; + SpirvTools& operator=(SpirvTools&&) = delete; + + // Destructs this instance. + ~SpirvTools(); + + // Sets the message consumer to the given |consumer|. The |consumer| will be + // invoked once for each message communicated from the library. + void SetMessageConsumer(MessageConsumer consumer); + + // Assembles the given assembly |text| and writes the result to |binary|. + // Returns true on successful assembling. |binary| will be kept untouched if + // assembling is unsuccessful. + // The SPIR-V binary version is set to the highest version of SPIR-V supported + // by the target environment with which this SpirvTools object was created. + bool Assemble(const std::string& text, std::vector* binary, + uint32_t options = kDefaultAssembleOption) const; + // |text_size| specifies the number of bytes in |text|. A terminating null + // character is not required to present in |text| as long as |text| is valid. + // The SPIR-V binary version is set to the highest version of SPIR-V supported + // by the target environment with which this SpirvTools object was created. + bool Assemble(const char* text, size_t text_size, + std::vector* binary, + uint32_t options = kDefaultAssembleOption) const; + + // Disassembles the given SPIR-V |binary| with the given |options| and writes + // the assembly to |text|. Returns true on successful disassembling. |text| + // will be kept untouched if diassembling is unsuccessful. + bool Disassemble(const std::vector& binary, std::string* text, + uint32_t options = kDefaultDisassembleOption) const; + // |binary_size| specifies the number of words in |binary|. + bool Disassemble(const uint32_t* binary, size_t binary_size, + std::string* text, + uint32_t options = kDefaultDisassembleOption) const; + + // Validates the given SPIR-V |binary|. Returns true if no issues are found. + // Otherwise, returns false and communicates issues via the message consumer + // registered. + // Validates for SPIR-V spec rules for the SPIR-V version named in the + // binary's header (at word offset 1). Additionally, if the target + // environment is a client API (such as Vulkan 1.1), then validate for that + // client API version, to the extent that it is verifiable from data in the + // binary itself. + bool Validate(const std::vector& binary) const; + // Like the previous overload, but provides the binary as a pointer and size: + // |binary_size| specifies the number of words in |binary|. + // Validates for SPIR-V spec rules for the SPIR-V version named in the + // binary's header (at word offset 1). Additionally, if the target + // environment is a client API (such as Vulkan 1.1), then validate for that + // client API version, to the extent that it is verifiable from data in the + // binary itself. + bool Validate(const uint32_t* binary, size_t binary_size) const; + // Like the previous overload, but takes an options object. + // Validates for SPIR-V spec rules for the SPIR-V version named in the + // binary's header (at word offset 1). Additionally, if the target + // environment is a client API (such as Vulkan 1.1), then validate for that + // client API version, to the extent that it is verifiable from data in the + // binary itself, or in the validator options. + bool Validate(const uint32_t* binary, size_t binary_size, + spv_validator_options options) const; + + // Was this object successfully constructed. + bool IsValid() const; + + private: + struct Impl; // Opaque struct for holding the data fields used by this class. + std::unique_ptr impl_; // Unique pointer to implementation data. +}; + +} // namespace spvtools + +#endif // INCLUDE_SPIRV_TOOLS_LIBSPIRV_HPP_ diff --git a/local_packages/include/spirv-tools/linker.hpp b/local_packages/include/spirv-tools/linker.hpp new file mode 100644 index 0000000..d2f3e72 --- /dev/null +++ b/local_packages/include/spirv-tools/linker.hpp @@ -0,0 +1,97 @@ +// Copyright (c) 2017 Pierre Moreau +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef INCLUDE_SPIRV_TOOLS_LINKER_HPP_ +#define INCLUDE_SPIRV_TOOLS_LINKER_HPP_ + +#include + +#include +#include + +#include "libspirv.hpp" + +namespace spvtools { + +class LinkerOptions { + public: + LinkerOptions() + : create_library_(false), + verify_ids_(false), + allow_partial_linkage_(false) {} + + // Returns whether a library or an executable should be produced by the + // linking phase. + // + // All exported symbols are kept when creating a library, whereas they will + // be removed when creating an executable. + // The returned value will be true if creating a library, and false if + // creating an executable. + bool GetCreateLibrary() const { return create_library_; } + + // Sets whether a library or an executable should be produced. + void SetCreateLibrary(bool create_library) { + create_library_ = create_library; + } + + // Returns whether to verify the uniqueness of the unique ids in the merged + // context. + bool GetVerifyIds() const { return verify_ids_; } + + // Sets whether to verify the uniqueness of the unique ids in the merged + // context. + void SetVerifyIds(bool verify_ids) { verify_ids_ = verify_ids; } + + // Returns whether to allow for imported symbols to have no corresponding + // exported symbols + bool GetAllowPartialLinkage() const { return allow_partial_linkage_; } + + // Sets whether to allow for imported symbols to have no corresponding + // exported symbols + void SetAllowPartialLinkage(bool allow_partial_linkage) { + allow_partial_linkage_ = allow_partial_linkage; + } + + private: + bool create_library_; + bool verify_ids_; + bool allow_partial_linkage_; +}; + +// Links one or more SPIR-V modules into a new SPIR-V module. That is, combine +// several SPIR-V modules into one, resolving link dependencies between them. +// +// At least one binary has to be provided in |binaries|. Those binaries do not +// have to be valid, but they should be at least parseable. +// The functions can fail due to the following: +// * The given context was not initialised using `spvContextCreate()`; +// * No input modules were given; +// * One or more of those modules were not parseable; +// * The input modules used different addressing or memory models; +// * The ID or global variable number limit were exceeded; +// * Some entry points were defined multiple times; +// * Some imported symbols did not have an exported counterpart; +// * Possibly other reasons. +spv_result_t Link(const Context& context, + const std::vector>& binaries, + std::vector* linked_binary, + const LinkerOptions& options = LinkerOptions()); +spv_result_t Link(const Context& context, const uint32_t* const* binaries, + const size_t* binary_sizes, size_t num_binaries, + std::vector* linked_binary, + const LinkerOptions& options = LinkerOptions()); + +} // namespace spvtools + +#endif // INCLUDE_SPIRV_TOOLS_LINKER_HPP_ diff --git a/local_packages/include/spirv-tools/optimizer.hpp b/local_packages/include/spirv-tools/optimizer.hpp new file mode 100644 index 0000000..41752d6 --- /dev/null +++ b/local_packages/include/spirv-tools/optimizer.hpp @@ -0,0 +1,970 @@ +// Copyright (c) 2016 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef INCLUDE_SPIRV_TOOLS_OPTIMIZER_HPP_ +#define INCLUDE_SPIRV_TOOLS_OPTIMIZER_HPP_ + +#include +#include +#include +#include +#include +#include +#include + +#include "libspirv.hpp" + +namespace spvtools { + +namespace opt { +class Pass; +struct DescriptorSetAndBinding; +} // namespace opt + +// C++ interface for SPIR-V optimization functionalities. It wraps the context +// (including target environment and the corresponding SPIR-V grammar) and +// provides methods for registering optimization passes and optimizing. +// +// Instances of this class provides basic thread-safety guarantee. +class Optimizer { + public: + // The token for an optimization pass. It is returned via one of the + // Create*Pass() standalone functions at the end of this header file and + // consumed by the RegisterPass() method. Tokens are one-time objects that + // only support move; copying is not allowed. + struct PassToken { + struct Impl; // Opaque struct for holding internal data. + + PassToken(std::unique_ptr); + + // Tokens for built-in passes should be created using Create*Pass functions + // below; for out-of-tree passes, use this constructor instead. + // Note that this API isn't guaranteed to be stable and may change without + // preserving source or binary compatibility in the future. + PassToken(std::unique_ptr&& pass); + + // Tokens can only be moved. Copying is disabled. + PassToken(const PassToken&) = delete; + PassToken(PassToken&&); + PassToken& operator=(const PassToken&) = delete; + PassToken& operator=(PassToken&&); + + ~PassToken(); + + std::unique_ptr impl_; // Unique pointer to internal data. + }; + + // Constructs an instance with the given target |env|, which is used to decode + // the binaries to be optimized later. + // + // The instance will have an empty message consumer, which ignores all + // messages from the library. Use SetMessageConsumer() to supply a consumer + // if messages are of concern. + explicit Optimizer(spv_target_env env); + + // Disables copy/move constructor/assignment operations. + Optimizer(const Optimizer&) = delete; + Optimizer(Optimizer&&) = delete; + Optimizer& operator=(const Optimizer&) = delete; + Optimizer& operator=(Optimizer&&) = delete; + + // Destructs this instance. + ~Optimizer(); + + // Sets the message consumer to the given |consumer|. The |consumer| will be + // invoked once for each message communicated from the library. + void SetMessageConsumer(MessageConsumer consumer); + + // Returns a reference to the registered message consumer. + const MessageConsumer& consumer() const; + + // Registers the given |pass| to this optimizer. Passes will be run in the + // exact order of registration. The token passed in will be consumed by this + // method. + Optimizer& RegisterPass(PassToken&& pass); + + // Registers passes that attempt to improve performance of generated code. + // This sequence of passes is subject to constant review and will change + // from time to time. + Optimizer& RegisterPerformancePasses(); + + // Registers passes that attempt to improve the size of generated code. + // This sequence of passes is subject to constant review and will change + // from time to time. + Optimizer& RegisterSizePasses(); + + // Registers passes that attempt to legalize the generated code. + // + // Note: this recipe is specially designed for legalizing SPIR-V. It should be + // used by compilers after translating HLSL source code literally. It should + // *not* be used by general workloads for performance or size improvement. + // + // This sequence of passes is subject to constant review and will change + // from time to time. + Optimizer& RegisterLegalizationPasses(); + + // Register passes specified in the list of |flags|. Each flag must be a + // string of a form accepted by Optimizer::FlagHasValidForm(). + // + // If the list of flags contains an invalid entry, it returns false and an + // error message is emitted to the MessageConsumer object (use + // Optimizer::SetMessageConsumer to define a message consumer, if needed). + // + // If all the passes are registered successfully, it returns true. + bool RegisterPassesFromFlags(const std::vector& flags); + + // Registers the optimization pass associated with |flag|. This only accepts + // |flag| values of the form "--pass_name[=pass_args]". If no such pass + // exists, it returns false. Otherwise, the pass is registered and it returns + // true. + // + // The following flags have special meaning: + // + // -O: Registers all performance optimization passes + // (Optimizer::RegisterPerformancePasses) + // + // -Os: Registers all size optimization passes + // (Optimizer::RegisterSizePasses). + // + // --legalize-hlsl: Registers all passes that legalize SPIR-V generated by an + // HLSL front-end. + bool RegisterPassFromFlag(const std::string& flag); + + // Validates that |flag| has a valid format. Strings accepted: + // + // --pass_name[=pass_args] + // -O + // -Os + // + // If |flag| takes one of the forms above, it returns true. Otherwise, it + // returns false. + bool FlagHasValidForm(const std::string& flag) const; + + // Allows changing, after creation time, the target environment to be + // optimized for and validated. Should be called before calling Run(). + void SetTargetEnv(const spv_target_env env); + + // Optimizes the given SPIR-V module |original_binary| and writes the + // optimized binary into |optimized_binary|. The optimized binary uses + // the same SPIR-V version as the original binary. + // + // Returns true on successful optimization, whether or not the module is + // modified. Returns false if |original_binary| fails to validate or if errors + // occur when processing |original_binary| using any of the registered passes. + // In that case, no further passes are executed and the contents in + // |optimized_binary| may be invalid. + // + // By default, the binary is validated before any transforms are performed, + // and optionally after each transform. Validation uses SPIR-V spec rules + // for the SPIR-V version named in the binary's header (at word offset 1). + // Additionally, if the target environment is a client API (such as + // Vulkan 1.1), then validate for that client API version, to the extent + // that it is verifiable from data in the binary itself. + // + // It's allowed to alias |original_binary| to the start of |optimized_binary|. + bool Run(const uint32_t* original_binary, size_t original_binary_size, + std::vector* optimized_binary) const; + + // DEPRECATED: Same as above, except passes |options| to the validator when + // trying to validate the binary. If |skip_validation| is true, then the + // caller is guaranteeing that |original_binary| is valid, and the validator + // will not be run. The |max_id_bound| is the limit on the max id in the + // module. + bool Run(const uint32_t* original_binary, const size_t original_binary_size, + std::vector* optimized_binary, + const ValidatorOptions& options, bool skip_validation) const; + + // Same as above, except it takes an options object. See the documentation + // for |OptimizerOptions| to see which options can be set. + // + // By default, the binary is validated before any transforms are performed, + // and optionally after each transform. Validation uses SPIR-V spec rules + // for the SPIR-V version named in the binary's header (at word offset 1). + // Additionally, if the target environment is a client API (such as + // Vulkan 1.1), then validate for that client API version, to the extent + // that it is verifiable from data in the binary itself, or from the + // validator options set on the optimizer options. + bool Run(const uint32_t* original_binary, const size_t original_binary_size, + std::vector* optimized_binary, + const spv_optimizer_options opt_options) const; + + // Returns a vector of strings with all the pass names added to this + // optimizer's pass manager. These strings are valid until the associated + // pass manager is destroyed. + std::vector GetPassNames() const; + + // Sets the option to print the disassembly before each pass and after the + // last pass. If |out| is null, then no output is generated. Otherwise, + // output is sent to the |out| output stream. + Optimizer& SetPrintAll(std::ostream* out); + + // Sets the option to print the resource utilization of each pass. If |out| + // is null, then no output is generated. Otherwise, output is sent to the + // |out| output stream. + Optimizer& SetTimeReport(std::ostream* out); + + // Sets the option to validate the module after each pass. + Optimizer& SetValidateAfterAll(bool validate); + + private: + struct Impl; // Opaque struct for holding internal data. + std::unique_ptr impl_; // Unique pointer to internal data. +}; + +// Creates a null pass. +// A null pass does nothing to the SPIR-V module to be optimized. +Optimizer::PassToken CreateNullPass(); + +// Creates a strip-debug-info pass. +// A strip-debug-info pass removes all debug instructions (as documented in +// Section 3.42.2 of the SPIR-V spec) of the SPIR-V module to be optimized. +Optimizer::PassToken CreateStripDebugInfoPass(); + +// [Deprecated] This will create a strip-nonsemantic-info pass. See below. +Optimizer::PassToken CreateStripReflectInfoPass(); + +// Creates a strip-nonsemantic-info pass. +// A strip-nonsemantic-info pass removes all reflections and explicitly +// non-semantic instructions. +Optimizer::PassToken CreateStripNonSemanticInfoPass(); + +// Creates an eliminate-dead-functions pass. +// An eliminate-dead-functions pass will remove all functions that are not in +// the call trees rooted at entry points and exported functions. These +// functions are not needed because they will never be called. +Optimizer::PassToken CreateEliminateDeadFunctionsPass(); + +// Creates an eliminate-dead-members pass. +// An eliminate-dead-members pass will remove all unused members of structures. +// This will not affect the data layout of the remaining members. +Optimizer::PassToken CreateEliminateDeadMembersPass(); + +// Creates a set-spec-constant-default-value pass from a mapping from spec-ids +// to the default values in the form of string. +// A set-spec-constant-default-value pass sets the default values for the +// spec constants that have SpecId decorations (i.e., those defined by +// OpSpecConstant{|True|False} instructions). +Optimizer::PassToken CreateSetSpecConstantDefaultValuePass( + const std::unordered_map& id_value_map); + +// Creates a set-spec-constant-default-value pass from a mapping from spec-ids +// to the default values in the form of bit pattern. +// A set-spec-constant-default-value pass sets the default values for the +// spec constants that have SpecId decorations (i.e., those defined by +// OpSpecConstant{|True|False} instructions). +Optimizer::PassToken CreateSetSpecConstantDefaultValuePass( + const std::unordered_map>& id_value_map); + +// Creates a flatten-decoration pass. +// A flatten-decoration pass replaces grouped decorations with equivalent +// ungrouped decorations. That is, it replaces each OpDecorationGroup +// instruction and associated OpGroupDecorate and OpGroupMemberDecorate +// instructions with equivalent OpDecorate and OpMemberDecorate instructions. +// The pass does not attempt to preserve debug information for instructions +// it removes. +Optimizer::PassToken CreateFlattenDecorationPass(); + +// Creates a freeze-spec-constant-value pass. +// A freeze-spec-constant pass specializes the value of spec constants to +// their default values. This pass only processes the spec constants that have +// SpecId decorations (defined by OpSpecConstant, OpSpecConstantTrue, or +// OpSpecConstantFalse instructions) and replaces them with their normal +// counterparts (OpConstant, OpConstantTrue, or OpConstantFalse). The +// corresponding SpecId annotation instructions will also be removed. This +// pass does not fold the newly added normal constants and does not process +// other spec constants defined by OpSpecConstantComposite or +// OpSpecConstantOp. +Optimizer::PassToken CreateFreezeSpecConstantValuePass(); + +// Creates a fold-spec-constant-op-and-composite pass. +// A fold-spec-constant-op-and-composite pass folds spec constants defined by +// OpSpecConstantOp or OpSpecConstantComposite instruction, to normal Constants +// defined by OpConstantTrue, OpConstantFalse, OpConstant, OpConstantNull, or +// OpConstantComposite instructions. Note that spec constants defined with +// OpSpecConstant, OpSpecConstantTrue, or OpSpecConstantFalse instructions are +// not handled, as these instructions indicate their value are not determined +// and can be changed in future. A spec constant is foldable if all of its +// value(s) can be determined from the module. E.g., an integer spec constant +// defined with OpSpecConstantOp instruction can be folded if its value won't +// change later. This pass will replace the original OpSpecConstantOp +// instruction with an OpConstant instruction. When folding composite spec +// constants, new instructions may be inserted to define the components of the +// composite constant first, then the original spec constants will be replaced +// by OpConstantComposite instructions. +// +// There are some operations not supported yet: +// OpSConvert, OpFConvert, OpQuantizeToF16 and +// all the operations under Kernel capability. +// TODO(qining): Add support for the operations listed above. +Optimizer::PassToken CreateFoldSpecConstantOpAndCompositePass(); + +// Creates a unify-constant pass. +// A unify-constant pass de-duplicates the constants. Constants with the exact +// same value and identical form will be unified and only one constant will +// be kept for each unique pair of type and value. +// There are several cases not handled by this pass: +// 1) Constants defined by OpConstantNull instructions (null constants) and +// constants defined by OpConstantFalse, OpConstant or OpConstantComposite +// with value 0 (zero-valued normal constants) are not considered equivalent. +// So null constants won't be used to replace zero-valued normal constants, +// vice versa. +// 2) Whenever there are decorations to the constant's result id id, the +// constant won't be handled, which means, it won't be used to replace any +// other constants, neither can other constants replace it. +// 3) NaN in float point format with different bit patterns are not unified. +Optimizer::PassToken CreateUnifyConstantPass(); + +// Creates a eliminate-dead-constant pass. +// A eliminate-dead-constant pass removes dead constants, including normal +// constants defined by OpConstant, OpConstantComposite, OpConstantTrue, or +// OpConstantFalse and spec constants defined by OpSpecConstant, +// OpSpecConstantComposite, OpSpecConstantTrue, OpSpecConstantFalse or +// OpSpecConstantOp. +Optimizer::PassToken CreateEliminateDeadConstantPass(); + +// Creates a strength-reduction pass. +// A strength-reduction pass will look for opportunities to replace an +// instruction with an equivalent and less expensive one. For example, +// multiplying by a power of 2 can be replaced by a bit shift. +Optimizer::PassToken CreateStrengthReductionPass(); + +// Creates a block merge pass. +// This pass searches for blocks with a single Branch to a block with no +// other predecessors and merges the blocks into a single block. Continue +// blocks and Merge blocks are not candidates for the second block. +// +// The pass is most useful after Dead Branch Elimination, which can leave +// such sequences of blocks. Merging them makes subsequent passes more +// effective, such as single block local store-load elimination. +// +// While this pass reduces the number of occurrences of this sequence, at +// this time it does not guarantee all such sequences are eliminated. +// +// Presence of phi instructions can inhibit this optimization. Handling +// these is left for future improvements. +Optimizer::PassToken CreateBlockMergePass(); + +// Creates an exhaustive inline pass. +// An exhaustive inline pass attempts to exhaustively inline all function +// calls in all functions in an entry point call tree. The intent is to enable, +// albeit through brute force, analysis and optimization across function +// calls by subsequent optimization passes. As the inlining is exhaustive, +// there is no attempt to optimize for size or runtime performance. Functions +// that are not in the call tree of an entry point are not changed. +Optimizer::PassToken CreateInlineExhaustivePass(); + +// Creates an opaque inline pass. +// An opaque inline pass inlines all function calls in all functions in all +// entry point call trees where the called function contains an opaque type +// in either its parameter types or return type. An opaque type is currently +// defined as Image, Sampler or SampledImage. The intent is to enable, albeit +// through brute force, analysis and optimization across these function calls +// by subsequent passes in order to remove the storing of opaque types which is +// not legal in Vulkan. Functions that are not in the call tree of an entry +// point are not changed. +Optimizer::PassToken CreateInlineOpaquePass(); + +// Creates a single-block local variable load/store elimination pass. +// For every entry point function, do single block memory optimization of +// function variables referenced only with non-access-chain loads and stores. +// For each targeted variable load, if previous store to that variable in the +// block, replace the load's result id with the value id of the store. +// If previous load within the block, replace the current load's result id +// with the previous load's result id. In either case, delete the current +// load. Finally, check if any remaining stores are useless, and delete store +// and variable if possible. +// +// The presence of access chain references and function calls can inhibit +// the above optimization. +// +// Only modules with relaxed logical addressing (see opt/instruction.h) are +// currently processed. +// +// This pass is most effective if preceded by Inlining and +// LocalAccessChainConvert. This pass will reduce the work needed to be done +// by LocalSingleStoreElim and LocalMultiStoreElim. +// +// Only functions in the call tree of an entry point are processed. +Optimizer::PassToken CreateLocalSingleBlockLoadStoreElimPass(); + +// Create dead branch elimination pass. +// For each entry point function, this pass will look for SelectionMerge +// BranchConditionals with constant condition and convert to a Branch to +// the indicated label. It will delete resulting dead blocks. +// +// For all phi functions in merge block, replace all uses with the id +// corresponding to the living predecessor. +// +// Note that some branches and blocks may be left to avoid creating invalid +// control flow. Improving this is left to future work. +// +// This pass is most effective when preceded by passes which eliminate +// local loads and stores, effectively propagating constant values where +// possible. +Optimizer::PassToken CreateDeadBranchElimPass(); + +// Creates an SSA local variable load/store elimination pass. +// For every entry point function, eliminate all loads and stores of function +// scope variables only referenced with non-access-chain loads and stores. +// Eliminate the variables as well. +// +// The presence of access chain references and function calls can inhibit +// the above optimization. +// +// Only shader modules with relaxed logical addressing (see opt/instruction.h) +// are currently processed. Currently modules with any extensions enabled are +// not processed. This is left for future work. +// +// This pass is most effective if preceded by Inlining and +// LocalAccessChainConvert. LocalSingleStoreElim and LocalSingleBlockElim +// will reduce the work that this pass has to do. +Optimizer::PassToken CreateLocalMultiStoreElimPass(); + +// Creates a local access chain conversion pass. +// A local access chain conversion pass identifies all function scope +// variables which are accessed only with loads, stores and access chains +// with constant indices. It then converts all loads and stores of such +// variables into equivalent sequences of loads, stores, extracts and inserts. +// +// This pass only processes entry point functions. It currently only converts +// non-nested, non-ptr access chains. It does not process modules with +// non-32-bit integer types present. Optional memory access options on loads +// and stores are ignored as we are only processing function scope variables. +// +// This pass unifies access to these variables to a single mode and simplifies +// subsequent analysis and elimination of these variables along with their +// loads and stores allowing values to propagate to their points of use where +// possible. +Optimizer::PassToken CreateLocalAccessChainConvertPass(); + +// Creates a local single store elimination pass. +// For each entry point function, this pass eliminates loads and stores for +// function scope variable that are stored to only once, where possible. Only +// whole variable loads and stores are eliminated; access-chain references are +// not optimized. Replace all loads of such variables with the value that is +// stored and eliminate any resulting dead code. +// +// Currently, the presence of access chains and function calls can inhibit this +// pass, however the Inlining and LocalAccessChainConvert passes can make it +// more effective. In additional, many non-load/store memory operations are +// not supported and will prohibit optimization of a function. Support of +// these operations are future work. +// +// Only shader modules with relaxed logical addressing (see opt/instruction.h) +// are currently processed. +// +// This pass will reduce the work needed to be done by LocalSingleBlockElim +// and LocalMultiStoreElim and can improve the effectiveness of other passes +// such as DeadBranchElimination which depend on values for their analysis. +Optimizer::PassToken CreateLocalSingleStoreElimPass(); + +// Creates an insert/extract elimination pass. +// This pass processes each entry point function in the module, searching for +// extracts on a sequence of inserts. It further searches the sequence for an +// insert with indices identical to the extract. If such an insert can be +// found before hitting a conflicting insert, the extract's result id is +// replaced with the id of the values from the insert. +// +// Besides removing extracts this pass enables subsequent dead code elimination +// passes to delete the inserts. This pass performs best after access chains are +// converted to inserts and extracts and local loads and stores are eliminated. +Optimizer::PassToken CreateInsertExtractElimPass(); + +// Creates a dead insert elimination pass. +// This pass processes each entry point function in the module, searching for +// unreferenced inserts into composite types. These are most often unused +// stores to vector components. They are unused because they are never +// referenced, or because there is another insert to the same component between +// the insert and the reference. After removing the inserts, dead code +// elimination is attempted on the inserted values. +// +// This pass performs best after access chains are converted to inserts and +// extracts and local loads and stores are eliminated. While executing this +// pass can be advantageous on its own, it is also advantageous to execute +// this pass after CreateInsertExtractPass() as it will remove any unused +// inserts created by that pass. +Optimizer::PassToken CreateDeadInsertElimPass(); + +// Create aggressive dead code elimination pass +// This pass eliminates unused code from the module. In addition, +// it detects and eliminates code which may have spurious uses but which do +// not contribute to the output of the function. The most common cause of +// such code sequences is summations in loops whose result is no longer used +// due to dead code elimination. This optimization has additional compile +// time cost over standard dead code elimination. +// +// This pass only processes entry point functions. It also only processes +// shaders with relaxed logical addressing (see opt/instruction.h). It +// currently will not process functions with function calls. Unreachable +// functions are deleted. +// +// This pass will be made more effective by first running passes that remove +// dead control flow and inlines function calls. +// +// This pass can be especially useful after running Local Access Chain +// Conversion, which tends to cause cycles of dead code to be left after +// Store/Load elimination passes are completed. These cycles cannot be +// eliminated with standard dead code elimination. +// +// If |preserve_interface| is true, all non-io variables in the entry point +// interface are considered live and are not eliminated. This mode is needed +// by GPU-Assisted validation instrumentation, where a change in the interface +// is not allowed. +Optimizer::PassToken CreateAggressiveDCEPass(); +Optimizer::PassToken CreateAggressiveDCEPass(bool preserve_interface); + +// Creates a remove-unused-interface-variables pass. +// Removes variables referenced on the |OpEntryPoint| instruction that are not +// referenced in the entry point function or any function in its call tree. Note +// that this could cause the shader interface to no longer match other shader +// stages. +Optimizer::PassToken CreateRemoveUnusedInterfaceVariablesPass(); + +// Creates an empty pass. +// This is deprecated and will be removed. +// TODO(jaebaek): remove this pass after handling glslang's broken unit tests. +// https://github.com/KhronosGroup/glslang/pull/2440 +Optimizer::PassToken CreatePropagateLineInfoPass(); + +// Creates an empty pass. +// This is deprecated and will be removed. +// TODO(jaebaek): remove this pass after handling glslang's broken unit tests. +// https://github.com/KhronosGroup/glslang/pull/2440 +Optimizer::PassToken CreateRedundantLineInfoElimPass(); + +// Creates a compact ids pass. +// The pass remaps result ids to a compact and gapless range starting from %1. +Optimizer::PassToken CreateCompactIdsPass(); + +// Creates a remove duplicate pass. +// This pass removes various duplicates: +// * duplicate capabilities; +// * duplicate extended instruction imports; +// * duplicate types; +// * duplicate decorations. +Optimizer::PassToken CreateRemoveDuplicatesPass(); + +// Creates a CFG cleanup pass. +// This pass removes cruft from the control flow graph of functions that are +// reachable from entry points and exported functions. It currently includes the +// following functionality: +// +// - Removal of unreachable basic blocks. +Optimizer::PassToken CreateCFGCleanupPass(); + +// Create dead variable elimination pass. +// This pass will delete module scope variables, along with their decorations, +// that are not referenced. +Optimizer::PassToken CreateDeadVariableEliminationPass(); + +// create merge return pass. +// changes functions that have multiple return statements so they have a single +// return statement. +// +// for structured control flow it is assumed that the only unreachable blocks in +// the function are trivial merge and continue blocks. +// +// a trivial merge block contains the label and an opunreachable instructions, +// nothing else. a trivial continue block contain a label and an opbranch to +// the header, nothing else. +// +// these conditions are guaranteed to be met after running dead-branch +// elimination. +Optimizer::PassToken CreateMergeReturnPass(); + +// Create value numbering pass. +// This pass will look for instructions in the same basic block that compute the +// same value, and remove the redundant ones. +Optimizer::PassToken CreateLocalRedundancyEliminationPass(); + +// Create LICM pass. +// This pass will look for invariant instructions inside loops and hoist them to +// the loops preheader. +Optimizer::PassToken CreateLoopInvariantCodeMotionPass(); + +// Creates a loop fission pass. +// This pass will split all top level loops whose register pressure exceedes the +// given |threshold|. +Optimizer::PassToken CreateLoopFissionPass(size_t threshold); + +// Creates a loop fusion pass. +// This pass will look for adjacent loops that are compatible and legal to be +// fused. The fuse all such loops as long as the register usage for the fused +// loop stays under the threshold defined by |max_registers_per_loop|. +Optimizer::PassToken CreateLoopFusionPass(size_t max_registers_per_loop); + +// Creates a loop peeling pass. +// This pass will look for conditions inside a loop that are true or false only +// for the N first or last iteration. For loop with such condition, those N +// iterations of the loop will be executed outside of the main loop. +// To limit code size explosion, the loop peeling can only happen if the code +// size growth for each loop is under |code_growth_threshold|. +Optimizer::PassToken CreateLoopPeelingPass(); + +// Creates a loop unswitch pass. +// This pass will look for loop independent branch conditions and move the +// condition out of the loop and version the loop based on the taken branch. +// Works best after LICM and local multi store elimination pass. +Optimizer::PassToken CreateLoopUnswitchPass(); + +// Create global value numbering pass. +// This pass will look for instructions where the same value is computed on all +// paths leading to the instruction. Those instructions are deleted. +Optimizer::PassToken CreateRedundancyEliminationPass(); + +// Create scalar replacement pass. +// This pass replaces composite function scope variables with variables for each +// element if those elements are accessed individually. The parameter is a +// limit on the number of members in the composite variable that the pass will +// consider replacing. +Optimizer::PassToken CreateScalarReplacementPass(uint32_t size_limit = 100); + +// Create a private to local pass. +// This pass looks for variables declared in the private storage class that are +// used in only one function. Those variables are moved to the function storage +// class in the function that they are used. +Optimizer::PassToken CreatePrivateToLocalPass(); + +// Creates a conditional constant propagation (CCP) pass. +// This pass implements the SSA-CCP algorithm in +// +// Constant propagation with conditional branches, +// Wegman and Zadeck, ACM TOPLAS 13(2):181-210. +// +// Constant values in expressions and conditional jumps are folded and +// simplified. This may reduce code size by removing never executed jump targets +// and computations with constant operands. +Optimizer::PassToken CreateCCPPass(); + +// Creates a workaround driver bugs pass. This pass attempts to work around +// a known driver bug (issue #1209) by identifying the bad code sequences and +// rewriting them. +// +// Current workaround: Avoid OpUnreachable instructions in loops. +Optimizer::PassToken CreateWorkaround1209Pass(); + +// Creates a pass that converts if-then-else like assignments into OpSelect. +Optimizer::PassToken CreateIfConversionPass(); + +// Creates a pass that will replace instructions that are not valid for the +// current shader stage by constants. Has no effect on non-shader modules. +Optimizer::PassToken CreateReplaceInvalidOpcodePass(); + +// Creates a pass that simplifies instructions using the instruction folder. +Optimizer::PassToken CreateSimplificationPass(); + +// Create loop unroller pass. +// Creates a pass to unroll loops which have the "Unroll" loop control +// mask set. The loops must meet a specific criteria in order to be unrolled +// safely this criteria is checked before doing the unroll by the +// LoopUtils::CanPerformUnroll method. Any loop that does not meet the criteria +// won't be unrolled. See CanPerformUnroll LoopUtils.h for more information. +Optimizer::PassToken CreateLoopUnrollPass(bool fully_unroll, int factor = 0); + +// Create the SSA rewrite pass. +// This pass converts load/store operations on function local variables into +// operations on SSA IDs. This allows SSA optimizers to act on these variables. +// Only variables that are local to the function and of supported types are +// processed (see IsSSATargetVar for details). +Optimizer::PassToken CreateSSARewritePass(); + +// Create pass to convert relaxed precision instructions to half precision. +// This pass converts as many relaxed float32 arithmetic operations to half as +// possible. It converts any float32 operands to half if needed. It converts +// any resulting half precision values back to float32 as needed. No variables +// are changed. No image operations are changed. +// +// Best if run after function scope store/load and composite operation +// eliminations are run. Also best if followed by instruction simplification, +// redundancy elimination and DCE. +Optimizer::PassToken CreateConvertRelaxedToHalfPass(); + +// Create relax float ops pass. +// This pass decorates all float32 result instructions with RelaxedPrecision +// if not already so decorated. +Optimizer::PassToken CreateRelaxFloatOpsPass(); + +// Create copy propagate arrays pass. +// This pass looks to copy propagate memory references for arrays. It looks +// for specific code patterns to recognize array copies. +Optimizer::PassToken CreateCopyPropagateArraysPass(); + +// Create a vector dce pass. +// This pass looks for components of vectors that are unused, and removes them +// from the vector. Note this would still leave around lots of dead code that +// a pass of ADCE will be able to remove. +Optimizer::PassToken CreateVectorDCEPass(); + +// Create a pass to reduce the size of loads. +// This pass looks for loads of structures where only a few of its members are +// used. It replaces the loads feeding an OpExtract with an OpAccessChain and +// a load of the specific elements. The parameter is a threshold to determine +// whether we have to replace the load or not. If the ratio of the used +// components of the load is less than the threshold, we replace the load. +Optimizer::PassToken CreateReduceLoadSizePass( + double load_replacement_threshold = 0.9); + +// Create a pass to combine chained access chains. +// This pass looks for access chains fed by other access chains and combines +// them into a single instruction where possible. +Optimizer::PassToken CreateCombineAccessChainsPass(); + +// Create a pass to instrument bindless descriptor checking +// This pass instruments all bindless references to check that descriptor +// array indices are inbounds, and if the descriptor indexing extension is +// enabled, that the descriptor has been initialized. If the reference is +// invalid, a record is written to the debug output buffer (if space allows) +// and a null value is returned. This pass is designed to support bindless +// validation in the Vulkan validation layers. +// +// TODO(greg-lunarg): Add support for buffer references. Currently only does +// checking for image references. +// +// Dead code elimination should be run after this pass as the original, +// potentially invalid code is not removed and could cause undefined behavior, +// including crashes. It may also be beneficial to run Simplification +// (ie Constant Propagation), DeadBranchElim and BlockMerge after this pass to +// optimize instrument code involving the testing of compile-time constants. +// It is also generally recommended that this pass (and all +// instrumentation passes) be run after any legalization and optimization +// passes. This will give better analysis for the instrumentation and avoid +// potentially de-optimizing the instrument code, for example, inlining +// the debug record output function throughout the module. +// +// The instrumentation will read and write buffers in debug +// descriptor set |desc_set|. It will write |shader_id| in each output record +// to identify the shader module which generated the record. +// |desc_length_enable| controls instrumentation of runtime descriptor array +// references, |desc_init_enable| controls instrumentation of descriptor +// initialization checking, and |buff_oob_enable| controls instrumentation +// of storage and uniform buffer bounds checking, all of which require input +// buffer support. |texbuff_oob_enable| controls instrumentation of texel +// buffers, which does not require input buffer support. +Optimizer::PassToken CreateInstBindlessCheckPass( + uint32_t desc_set, uint32_t shader_id, bool desc_length_enable = false, + bool desc_init_enable = false, bool buff_oob_enable = false, + bool texbuff_oob_enable = false); + +// Create a pass to instrument physical buffer address checking +// This pass instruments all physical buffer address references to check that +// all referenced bytes fall in a valid buffer. If the reference is +// invalid, a record is written to the debug output buffer (if space allows) +// and a null value is returned. This pass is designed to support buffer +// address validation in the Vulkan validation layers. +// +// Dead code elimination should be run after this pass as the original, +// potentially invalid code is not removed and could cause undefined behavior, +// including crashes. Instruction simplification would likely also be +// beneficial. It is also generally recommended that this pass (and all +// instrumentation passes) be run after any legalization and optimization +// passes. This will give better analysis for the instrumentation and avoid +// potentially de-optimizing the instrument code, for example, inlining +// the debug record output function throughout the module. +// +// The instrumentation will read and write buffers in debug +// descriptor set |desc_set|. It will write |shader_id| in each output record +// to identify the shader module which generated the record. +Optimizer::PassToken CreateInstBuffAddrCheckPass(uint32_t desc_set, + uint32_t shader_id); + +// Create a pass to instrument OpDebugPrintf instructions. +// This pass replaces all OpDebugPrintf instructions with instructions to write +// a record containing the string id and the all specified values into a special +// printf output buffer (if space allows). This pass is designed to support +// the printf validation in the Vulkan validation layers. +// +// The instrumentation will write buffers in debug descriptor set |desc_set|. +// It will write |shader_id| in each output record to identify the shader +// module which generated the record. +Optimizer::PassToken CreateInstDebugPrintfPass(uint32_t desc_set, + uint32_t shader_id); + +// Create a pass to upgrade to the VulkanKHR memory model. +// This pass upgrades the Logical GLSL450 memory model to Logical VulkanKHR. +// Additionally, it modifies memory, image, atomic and barrier operations to +// conform to that model's requirements. +Optimizer::PassToken CreateUpgradeMemoryModelPass(); + +// Create a pass to do code sinking. Code sinking is a transformation +// where an instruction is moved into a more deeply nested construct. +Optimizer::PassToken CreateCodeSinkingPass(); + +// Create a pass to fix incorrect storage classes. In order to make code +// generation simpler, DXC may generate code where the storage classes do not +// match up correctly. This pass will fix the errors that it can. +Optimizer::PassToken CreateFixStorageClassPass(); + +// Creates a graphics robust access pass. +// +// This pass injects code to clamp indexed accesses to buffers and internal +// arrays, providing guarantees satisfying Vulkan's robustBufferAccess rules. +// +// TODO(dneto): Clamps coordinates and sample index for pointer calculations +// into storage images (OpImageTexelPointer). For an cube array image, it +// assumes the maximum layer count times 6 is at most 0xffffffff. +// +// NOTE: This pass will fail with a message if: +// - The module is not a Shader module. +// - The module declares VariablePointers, VariablePointersStorageBuffer, or +// RuntimeDescriptorArrayEXT capabilities. +// - The module uses an addressing model other than Logical +// - Access chain indices are wider than 64 bits. +// - Access chain index for a struct is not an OpConstant integer or is out +// of range. (The module is already invalid if that is the case.) +// - TODO(dneto): The OpImageTexelPointer coordinate component is not 32-bits +// wide. +// +// NOTE: Access chain indices are always treated as signed integers. So +// if an array has a fixed size of more than 2^31 elements, then elements +// from 2^31 and above are never accessible with a 32-bit index, +// signed or unsigned. For this case, this pass will clamp the index +// between 0 and at 2^31-1, inclusive. +// Similarly, if an array has more then 2^15 element and is accessed with +// a 16-bit index, then elements from 2^15 and above are not accessible. +// In this case, the pass will clamp the index between 0 and 2^15-1 +// inclusive. +Optimizer::PassToken CreateGraphicsRobustAccessPass(); + +// Create a pass to spread Volatile semantics to variables with SMIDNV, +// WarpIDNV, SubgroupSize, SubgroupLocalInvocationId, SubgroupEqMask, +// SubgroupGeMask, SubgroupGtMask, SubgroupLeMask, or SubgroupLtMask BuiltIn +// decorations or OpLoad for them when the shader model is the ray generation, +// closest hit, miss, intersection, or callable. This pass can be used for +// VUID-StandaloneSpirv-VulkanMemoryModel-04678 and +// VUID-StandaloneSpirv-VulkanMemoryModel-04679 (See "Standalone SPIR-V +// Validation" section of Vulkan spec "Appendix A: Vulkan Environment for +// SPIR-V"). When the SPIR-V version is 1.6 or above, the pass also spreads +// the Volatile semantics to a variable with HelperInvocation BuiltIn decoration +// in the fragement shader. +Optimizer::PassToken CreateSpreadVolatileSemanticsPass(); + +// Create a pass to replace a descriptor access using variable index. +// This pass replaces every access using a variable index to array variable +// |desc| that has a DescriptorSet and Binding decorations with a constant +// element of the array. In order to replace the access using a variable index +// with the constant element, it uses a switch statement. +Optimizer::PassToken CreateReplaceDescArrayAccessUsingVarIndexPass(); + +// Create descriptor scalar replacement pass. +// This pass replaces every array variable |desc| that has a DescriptorSet and +// Binding decorations with a new variable for each element of the array. +// Suppose |desc| was bound at binding |b|. Then the variable corresponding to +// |desc[i]| will have binding |b+i|. The descriptor set will be the same. It +// is assumed that no other variable already has a binding that will used by one +// of the new variables. If not, the pass will generate invalid Spir-V. All +// accesses to |desc| must be OpAccessChain instructions with a literal index +// for the first index. +Optimizer::PassToken CreateDescriptorScalarReplacementPass(); + +// Create a pass to replace each OpKill instruction with a function call to a +// function that has a single OpKill. Also replace each OpTerminateInvocation +// instruction with a function call to a function that has a single +// OpTerminateInvocation. This allows more code to be inlined. +Optimizer::PassToken CreateWrapOpKillPass(); + +// Replaces the extensions VK_AMD_shader_ballot,VK_AMD_gcn_shader, and +// VK_AMD_shader_trinary_minmax with equivalent code using core instructions and +// capabilities. +Optimizer::PassToken CreateAmdExtToKhrPass(); + +// Replaces the internal version of GLSLstd450 InterpolateAt* extended +// instructions with the externally valid version. The internal version allows +// an OpLoad of the interpolant for the first argument. This pass removes the +// OpLoad and replaces it with its pointer. glslang and possibly other +// frontends will create the internal version for HLSL. This pass will be part +// of HLSL legalization and should be called after interpolants have been +// propagated into their final positions. +Optimizer::PassToken CreateInterpolateFixupPass(); + +// Removes unused components from composite input variables. Current +// implementation just removes trailing unused components from input arrays +// and structs. The pass performs best after maximizing dead code removal. +// A subsequent dead code elimination pass would be beneficial in removing +// newly unused component types. +// +// WARNING: This pass can only be safely applied standalone to vertex shaders +// as it can otherwise cause interface incompatibilities with the preceding +// shader in the pipeline. If applied to non-vertex shaders, the user should +// follow by applying EliminateDeadOutputStores and +// EliminateDeadOutputComponents to the preceding shader. +Optimizer::PassToken CreateEliminateDeadInputComponentsPass(); + +// Removes unused components from composite output variables. Current +// implementation just removes trailing unused components from output arrays +// and structs. The pass performs best after eliminating dead output stores. +// A subsequent dead code elimination pass would be beneficial in removing +// newly unused component types. Currently only supports vertex and fragment +// shaders. +// +// WARNING: This pass cannot be safely applied standalone as it can cause +// interface incompatibility with the following shader in the pipeline. The +// user should first apply EliminateDeadInputComponents to the following +// shader, then apply EliminateDeadOutputStores to this shader. +Optimizer::PassToken CreateEliminateDeadOutputComponentsPass(); + +// Removes unused components from composite input variables. This safe +// version will not cause interface incompatibilities since it only changes +// vertex shaders. The current implementation just removes trailing unused +// components from input structs and input arrays. The pass performs best +// after maximizing dead code removal. A subsequent dead code elimination +// pass would be beneficial in removing newly unused component types. +Optimizer::PassToken CreateEliminateDeadInputComponentsSafePass(); + +// Analyzes shader and populates |live_locs| and |live_builtins|. Best results +// will be obtained if shader has all dead code eliminated first. |live_locs| +// and |live_builtins| are subsequently used when calling +// CreateEliminateDeadOutputStoresPass on the preceding shader. Currently only +// supports tesc, tese, geom, and frag shaders. +Optimizer::PassToken CreateAnalyzeLiveInputPass( + std::unordered_set* live_locs, + std::unordered_set* live_builtins); + +// Removes stores to output locations not listed in |live_locs| or +// |live_builtins|. Best results are obtained if constant propagation is +// performed first. A subsequent call to ADCE will eliminate any dead code +// created by the removal of the stores. A subsequent call to +// CreateEliminateDeadOutputComponentsPass will eliminate any dead output +// components created by the elimination of the stores. Currently only supports +// vert, tesc, tese, and geom shaders. +Optimizer::PassToken CreateEliminateDeadOutputStoresPass( + std::unordered_set* live_locs, + std::unordered_set* live_builtins); + +// Creates a convert-to-sampled-image pass to convert images and/or +// samplers with given pairs of descriptor set and binding to sampled image. +// If a pair of an image and a sampler have the same pair of descriptor set and +// binding that is one of the given pairs, they will be converted to a sampled +// image. In addition, if only an image has the descriptor set and binding that +// is one of the given pairs, it will be converted to a sampled image as well. +Optimizer::PassToken CreateConvertToSampledImagePass( + const std::vector& + descriptor_set_binding_pairs); + +// Create an interface-variable-scalar-replacement pass that replaces array or +// matrix interface variables with a series of scalar or vector interface +// variables. For example, it replaces `float3 foo[2]` with `float3 foo0, foo1`. +Optimizer::PassToken CreateInterfaceVariableScalarReplacementPass(); + +// Creates a remove-dont-inline pass to remove the |DontInline| function control +// from every function in the module. This is useful if you want the inliner to +// inline these functions some reason. +Optimizer::PassToken CreateRemoveDontInlinePass(); +// Create a fix-func-call-param pass to fix non memory argument for the function +// call, as spirv-validation requires function parameters to be an memory +// object, currently the pass would remove accesschain pointer argument passed +// to the function +Optimizer::PassToken CreateFixFuncCallArgumentsPass(); +} // namespace spvtools + +#endif // INCLUDE_SPIRV_TOOLS_OPTIMIZER_HPP_ diff --git a/local_packages/include/spirv_cross/GLSL.std.450.h b/local_packages/include/spirv_cross/GLSL.std.450.h new file mode 100644 index 0000000..2686fc4 --- /dev/null +++ b/local_packages/include/spirv_cross/GLSL.std.450.h @@ -0,0 +1,114 @@ +/* + * Copyright 2014-2016,2021 The Khronos Group, Inc. + * SPDX-License-Identifier: MIT + * + * MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS KHRONOS + * STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS SPECIFICATIONS AND + * HEADER INFORMATION ARE LOCATED AT https://www.khronos.org/registry/ +*/ + +#ifndef GLSLstd450_H +#define GLSLstd450_H + +static const int GLSLstd450Version = 100; +static const int GLSLstd450Revision = 3; + +enum GLSLstd450 { + GLSLstd450Bad = 0, // Don't use + + GLSLstd450Round = 1, + GLSLstd450RoundEven = 2, + GLSLstd450Trunc = 3, + GLSLstd450FAbs = 4, + GLSLstd450SAbs = 5, + GLSLstd450FSign = 6, + GLSLstd450SSign = 7, + GLSLstd450Floor = 8, + GLSLstd450Ceil = 9, + GLSLstd450Fract = 10, + + GLSLstd450Radians = 11, + GLSLstd450Degrees = 12, + GLSLstd450Sin = 13, + GLSLstd450Cos = 14, + GLSLstd450Tan = 15, + GLSLstd450Asin = 16, + GLSLstd450Acos = 17, + GLSLstd450Atan = 18, + GLSLstd450Sinh = 19, + GLSLstd450Cosh = 20, + GLSLstd450Tanh = 21, + GLSLstd450Asinh = 22, + GLSLstd450Acosh = 23, + GLSLstd450Atanh = 24, + GLSLstd450Atan2 = 25, + + GLSLstd450Pow = 26, + GLSLstd450Exp = 27, + GLSLstd450Log = 28, + GLSLstd450Exp2 = 29, + GLSLstd450Log2 = 30, + GLSLstd450Sqrt = 31, + GLSLstd450InverseSqrt = 32, + + GLSLstd450Determinant = 33, + GLSLstd450MatrixInverse = 34, + + GLSLstd450Modf = 35, // second operand needs an OpVariable to write to + GLSLstd450ModfStruct = 36, // no OpVariable operand + GLSLstd450FMin = 37, + GLSLstd450UMin = 38, + GLSLstd450SMin = 39, + GLSLstd450FMax = 40, + GLSLstd450UMax = 41, + GLSLstd450SMax = 42, + GLSLstd450FClamp = 43, + GLSLstd450UClamp = 44, + GLSLstd450SClamp = 45, + GLSLstd450FMix = 46, + GLSLstd450IMix = 47, // Reserved + GLSLstd450Step = 48, + GLSLstd450SmoothStep = 49, + + GLSLstd450Fma = 50, + GLSLstd450Frexp = 51, // second operand needs an OpVariable to write to + GLSLstd450FrexpStruct = 52, // no OpVariable operand + GLSLstd450Ldexp = 53, + + GLSLstd450PackSnorm4x8 = 54, + GLSLstd450PackUnorm4x8 = 55, + GLSLstd450PackSnorm2x16 = 56, + GLSLstd450PackUnorm2x16 = 57, + GLSLstd450PackHalf2x16 = 58, + GLSLstd450PackDouble2x32 = 59, + GLSLstd450UnpackSnorm2x16 = 60, + GLSLstd450UnpackUnorm2x16 = 61, + GLSLstd450UnpackHalf2x16 = 62, + GLSLstd450UnpackSnorm4x8 = 63, + GLSLstd450UnpackUnorm4x8 = 64, + GLSLstd450UnpackDouble2x32 = 65, + + GLSLstd450Length = 66, + GLSLstd450Distance = 67, + GLSLstd450Cross = 68, + GLSLstd450Normalize = 69, + GLSLstd450FaceForward = 70, + GLSLstd450Reflect = 71, + GLSLstd450Refract = 72, + + GLSLstd450FindILsb = 73, + GLSLstd450FindSMsb = 74, + GLSLstd450FindUMsb = 75, + + GLSLstd450InterpolateAtCentroid = 76, + GLSLstd450InterpolateAtSample = 77, + GLSLstd450InterpolateAtOffset = 78, + + GLSLstd450NMin = 79, + GLSLstd450NMax = 80, + GLSLstd450NClamp = 81, + + GLSLstd450Count +}; + +#endif // #ifndef GLSLstd450_H diff --git a/local_packages/include/spirv_cross/spirv.h b/local_packages/include/spirv_cross/spirv.h new file mode 100644 index 0000000..5b6e8aa --- /dev/null +++ b/local_packages/include/spirv_cross/spirv.h @@ -0,0 +1,2568 @@ +/* +** Copyright (c) 2014-2020 The Khronos Group Inc. +** +** Permission is hereby granted, free of charge, to any person obtaining a copy +** of this software and/or associated documentation files (the "Materials"), +** to deal in the Materials without restriction, including without limitation +** the rights to use, copy, modify, merge, publish, distribute, sublicense, +** and/or sell copies of the Materials, and to permit persons to whom the +** Materials are furnished to do so, subject to the following conditions: +** +** The above copyright notice and this permission notice shall be included in +** all copies or substantial portions of the Materials. +** +** MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS KHRONOS +** STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS SPECIFICATIONS AND +** HEADER INFORMATION ARE LOCATED AT https://www.khronos.org/registry/ +** +** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +** OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +** THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +** FROM,OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS +** IN THE MATERIALS. +*/ + +/* +** This header is automatically generated by the same tool that creates +** the Binary Section of the SPIR-V specification. +*/ + +/* +** Enumeration tokens for SPIR-V, in various styles: +** C, C++, C++11, JSON, Lua, Python, C#, D, Beef +** +** - C will have tokens with a "Spv" prefix, e.g.: SpvSourceLanguageGLSL +** - C++ will have tokens in the "spv" name space, e.g.: spv::SourceLanguageGLSL +** - C++11 will use enum classes in the spv namespace, e.g.: spv::SourceLanguage::GLSL +** - Lua will use tables, e.g.: spv.SourceLanguage.GLSL +** - Python will use dictionaries, e.g.: spv['SourceLanguage']['GLSL'] +** - C# will use enum classes in the Specification class located in the "Spv" namespace, +** e.g.: Spv.Specification.SourceLanguage.GLSL +** - D will have tokens under the "spv" module, e.g: spv.SourceLanguage.GLSL +** - Beef will use enum classes in the Specification class located in the "Spv" namespace, +** e.g.: Spv.Specification.SourceLanguage.GLSL +** +** Some tokens act like mask values, which can be OR'd together, +** while others are mutually exclusive. The mask-like ones have +** "Mask" in their name, and a parallel enum that has the shift +** amount (1 << x) for each corresponding enumerant. +*/ + +#ifndef spirv_H +#define spirv_H + +typedef unsigned int SpvId; + +#define SPV_VERSION 0x10600 +#define SPV_REVISION 1 + +static const unsigned int SpvMagicNumber = 0x07230203; +static const unsigned int SpvVersion = 0x00010600; +static const unsigned int SpvRevision = 1; +static const unsigned int SpvOpCodeMask = 0xffff; +static const unsigned int SpvWordCountShift = 16; + +typedef enum SpvSourceLanguage_ { + SpvSourceLanguageUnknown = 0, + SpvSourceLanguageESSL = 1, + SpvSourceLanguageGLSL = 2, + SpvSourceLanguageOpenCL_C = 3, + SpvSourceLanguageOpenCL_CPP = 4, + SpvSourceLanguageHLSL = 5, + SpvSourceLanguageCPP_for_OpenCL = 6, + SpvSourceLanguageSYCL = 7, + SpvSourceLanguageMax = 0x7fffffff, +} SpvSourceLanguage; + +typedef enum SpvExecutionModel_ { + SpvExecutionModelVertex = 0, + SpvExecutionModelTessellationControl = 1, + SpvExecutionModelTessellationEvaluation = 2, + SpvExecutionModelGeometry = 3, + SpvExecutionModelFragment = 4, + SpvExecutionModelGLCompute = 5, + SpvExecutionModelKernel = 6, + SpvExecutionModelTaskNV = 5267, + SpvExecutionModelMeshNV = 5268, + SpvExecutionModelRayGenerationKHR = 5313, + SpvExecutionModelRayGenerationNV = 5313, + SpvExecutionModelIntersectionKHR = 5314, + SpvExecutionModelIntersectionNV = 5314, + SpvExecutionModelAnyHitKHR = 5315, + SpvExecutionModelAnyHitNV = 5315, + SpvExecutionModelClosestHitKHR = 5316, + SpvExecutionModelClosestHitNV = 5316, + SpvExecutionModelMissKHR = 5317, + SpvExecutionModelMissNV = 5317, + SpvExecutionModelCallableKHR = 5318, + SpvExecutionModelCallableNV = 5318, + SpvExecutionModelTaskEXT = 5364, + SpvExecutionModelMeshEXT = 5365, + SpvExecutionModelMax = 0x7fffffff, +} SpvExecutionModel; + +typedef enum SpvAddressingModel_ { + SpvAddressingModelLogical = 0, + SpvAddressingModelPhysical32 = 1, + SpvAddressingModelPhysical64 = 2, + SpvAddressingModelPhysicalStorageBuffer64 = 5348, + SpvAddressingModelPhysicalStorageBuffer64EXT = 5348, + SpvAddressingModelMax = 0x7fffffff, +} SpvAddressingModel; + +typedef enum SpvMemoryModel_ { + SpvMemoryModelSimple = 0, + SpvMemoryModelGLSL450 = 1, + SpvMemoryModelOpenCL = 2, + SpvMemoryModelVulkan = 3, + SpvMemoryModelVulkanKHR = 3, + SpvMemoryModelMax = 0x7fffffff, +} SpvMemoryModel; + +typedef enum SpvExecutionMode_ { + SpvExecutionModeInvocations = 0, + SpvExecutionModeSpacingEqual = 1, + SpvExecutionModeSpacingFractionalEven = 2, + SpvExecutionModeSpacingFractionalOdd = 3, + SpvExecutionModeVertexOrderCw = 4, + SpvExecutionModeVertexOrderCcw = 5, + SpvExecutionModePixelCenterInteger = 6, + SpvExecutionModeOriginUpperLeft = 7, + SpvExecutionModeOriginLowerLeft = 8, + SpvExecutionModeEarlyFragmentTests = 9, + SpvExecutionModePointMode = 10, + SpvExecutionModeXfb = 11, + SpvExecutionModeDepthReplacing = 12, + SpvExecutionModeDepthGreater = 14, + SpvExecutionModeDepthLess = 15, + SpvExecutionModeDepthUnchanged = 16, + SpvExecutionModeLocalSize = 17, + SpvExecutionModeLocalSizeHint = 18, + SpvExecutionModeInputPoints = 19, + SpvExecutionModeInputLines = 20, + SpvExecutionModeInputLinesAdjacency = 21, + SpvExecutionModeTriangles = 22, + SpvExecutionModeInputTrianglesAdjacency = 23, + SpvExecutionModeQuads = 24, + SpvExecutionModeIsolines = 25, + SpvExecutionModeOutputVertices = 26, + SpvExecutionModeOutputPoints = 27, + SpvExecutionModeOutputLineStrip = 28, + SpvExecutionModeOutputTriangleStrip = 29, + SpvExecutionModeVecTypeHint = 30, + SpvExecutionModeContractionOff = 31, + SpvExecutionModeInitializer = 33, + SpvExecutionModeFinalizer = 34, + SpvExecutionModeSubgroupSize = 35, + SpvExecutionModeSubgroupsPerWorkgroup = 36, + SpvExecutionModeSubgroupsPerWorkgroupId = 37, + SpvExecutionModeLocalSizeId = 38, + SpvExecutionModeLocalSizeHintId = 39, + SpvExecutionModeSubgroupUniformControlFlowKHR = 4421, + SpvExecutionModePostDepthCoverage = 4446, + SpvExecutionModeDenormPreserve = 4459, + SpvExecutionModeDenormFlushToZero = 4460, + SpvExecutionModeSignedZeroInfNanPreserve = 4461, + SpvExecutionModeRoundingModeRTE = 4462, + SpvExecutionModeRoundingModeRTZ = 4463, + SpvExecutionModeEarlyAndLateFragmentTestsAMD = 5017, + SpvExecutionModeStencilRefReplacingEXT = 5027, + SpvExecutionModeStencilRefUnchangedFrontAMD = 5079, + SpvExecutionModeStencilRefGreaterFrontAMD = 5080, + SpvExecutionModeStencilRefLessFrontAMD = 5081, + SpvExecutionModeStencilRefUnchangedBackAMD = 5082, + SpvExecutionModeStencilRefGreaterBackAMD = 5083, + SpvExecutionModeStencilRefLessBackAMD = 5084, + SpvExecutionModeOutputLinesEXT = 5269, + SpvExecutionModeOutputLinesNV = 5269, + SpvExecutionModeOutputPrimitivesEXT = 5270, + SpvExecutionModeOutputPrimitivesNV = 5270, + SpvExecutionModeDerivativeGroupQuadsNV = 5289, + SpvExecutionModeDerivativeGroupLinearNV = 5290, + SpvExecutionModeOutputTrianglesEXT = 5298, + SpvExecutionModeOutputTrianglesNV = 5298, + SpvExecutionModePixelInterlockOrderedEXT = 5366, + SpvExecutionModePixelInterlockUnorderedEXT = 5367, + SpvExecutionModeSampleInterlockOrderedEXT = 5368, + SpvExecutionModeSampleInterlockUnorderedEXT = 5369, + SpvExecutionModeShadingRateInterlockOrderedEXT = 5370, + SpvExecutionModeShadingRateInterlockUnorderedEXT = 5371, + SpvExecutionModeSharedLocalMemorySizeINTEL = 5618, + SpvExecutionModeRoundingModeRTPINTEL = 5620, + SpvExecutionModeRoundingModeRTNINTEL = 5621, + SpvExecutionModeFloatingPointModeALTINTEL = 5622, + SpvExecutionModeFloatingPointModeIEEEINTEL = 5623, + SpvExecutionModeMaxWorkgroupSizeINTEL = 5893, + SpvExecutionModeMaxWorkDimINTEL = 5894, + SpvExecutionModeNoGlobalOffsetINTEL = 5895, + SpvExecutionModeNumSIMDWorkitemsINTEL = 5896, + SpvExecutionModeSchedulerTargetFmaxMhzINTEL = 5903, + SpvExecutionModeNamedBarrierCountINTEL = 6417, + SpvExecutionModeMax = 0x7fffffff, +} SpvExecutionMode; + +typedef enum SpvStorageClass_ { + SpvStorageClassUniformConstant = 0, + SpvStorageClassInput = 1, + SpvStorageClassUniform = 2, + SpvStorageClassOutput = 3, + SpvStorageClassWorkgroup = 4, + SpvStorageClassCrossWorkgroup = 5, + SpvStorageClassPrivate = 6, + SpvStorageClassFunction = 7, + SpvStorageClassGeneric = 8, + SpvStorageClassPushConstant = 9, + SpvStorageClassAtomicCounter = 10, + SpvStorageClassImage = 11, + SpvStorageClassStorageBuffer = 12, + SpvStorageClassCallableDataKHR = 5328, + SpvStorageClassCallableDataNV = 5328, + SpvStorageClassIncomingCallableDataKHR = 5329, + SpvStorageClassIncomingCallableDataNV = 5329, + SpvStorageClassRayPayloadKHR = 5338, + SpvStorageClassRayPayloadNV = 5338, + SpvStorageClassHitAttributeKHR = 5339, + SpvStorageClassHitAttributeNV = 5339, + SpvStorageClassIncomingRayPayloadKHR = 5342, + SpvStorageClassIncomingRayPayloadNV = 5342, + SpvStorageClassShaderRecordBufferKHR = 5343, + SpvStorageClassShaderRecordBufferNV = 5343, + SpvStorageClassPhysicalStorageBuffer = 5349, + SpvStorageClassPhysicalStorageBufferEXT = 5349, + SpvStorageClassTaskPayloadWorkgroupEXT = 5402, + SpvStorageClassCodeSectionINTEL = 5605, + SpvStorageClassDeviceOnlyINTEL = 5936, + SpvStorageClassHostOnlyINTEL = 5937, + SpvStorageClassMax = 0x7fffffff, +} SpvStorageClass; + +typedef enum SpvDim_ { + SpvDim1D = 0, + SpvDim2D = 1, + SpvDim3D = 2, + SpvDimCube = 3, + SpvDimRect = 4, + SpvDimBuffer = 5, + SpvDimSubpassData = 6, + SpvDimMax = 0x7fffffff, +} SpvDim; + +typedef enum SpvSamplerAddressingMode_ { + SpvSamplerAddressingModeNone = 0, + SpvSamplerAddressingModeClampToEdge = 1, + SpvSamplerAddressingModeClamp = 2, + SpvSamplerAddressingModeRepeat = 3, + SpvSamplerAddressingModeRepeatMirrored = 4, + SpvSamplerAddressingModeMax = 0x7fffffff, +} SpvSamplerAddressingMode; + +typedef enum SpvSamplerFilterMode_ { + SpvSamplerFilterModeNearest = 0, + SpvSamplerFilterModeLinear = 1, + SpvSamplerFilterModeMax = 0x7fffffff, +} SpvSamplerFilterMode; + +typedef enum SpvImageFormat_ { + SpvImageFormatUnknown = 0, + SpvImageFormatRgba32f = 1, + SpvImageFormatRgba16f = 2, + SpvImageFormatR32f = 3, + SpvImageFormatRgba8 = 4, + SpvImageFormatRgba8Snorm = 5, + SpvImageFormatRg32f = 6, + SpvImageFormatRg16f = 7, + SpvImageFormatR11fG11fB10f = 8, + SpvImageFormatR16f = 9, + SpvImageFormatRgba16 = 10, + SpvImageFormatRgb10A2 = 11, + SpvImageFormatRg16 = 12, + SpvImageFormatRg8 = 13, + SpvImageFormatR16 = 14, + SpvImageFormatR8 = 15, + SpvImageFormatRgba16Snorm = 16, + SpvImageFormatRg16Snorm = 17, + SpvImageFormatRg8Snorm = 18, + SpvImageFormatR16Snorm = 19, + SpvImageFormatR8Snorm = 20, + SpvImageFormatRgba32i = 21, + SpvImageFormatRgba16i = 22, + SpvImageFormatRgba8i = 23, + SpvImageFormatR32i = 24, + SpvImageFormatRg32i = 25, + SpvImageFormatRg16i = 26, + SpvImageFormatRg8i = 27, + SpvImageFormatR16i = 28, + SpvImageFormatR8i = 29, + SpvImageFormatRgba32ui = 30, + SpvImageFormatRgba16ui = 31, + SpvImageFormatRgba8ui = 32, + SpvImageFormatR32ui = 33, + SpvImageFormatRgb10a2ui = 34, + SpvImageFormatRg32ui = 35, + SpvImageFormatRg16ui = 36, + SpvImageFormatRg8ui = 37, + SpvImageFormatR16ui = 38, + SpvImageFormatR8ui = 39, + SpvImageFormatR64ui = 40, + SpvImageFormatR64i = 41, + SpvImageFormatMax = 0x7fffffff, +} SpvImageFormat; + +typedef enum SpvImageChannelOrder_ { + SpvImageChannelOrderR = 0, + SpvImageChannelOrderA = 1, + SpvImageChannelOrderRG = 2, + SpvImageChannelOrderRA = 3, + SpvImageChannelOrderRGB = 4, + SpvImageChannelOrderRGBA = 5, + SpvImageChannelOrderBGRA = 6, + SpvImageChannelOrderARGB = 7, + SpvImageChannelOrderIntensity = 8, + SpvImageChannelOrderLuminance = 9, + SpvImageChannelOrderRx = 10, + SpvImageChannelOrderRGx = 11, + SpvImageChannelOrderRGBx = 12, + SpvImageChannelOrderDepth = 13, + SpvImageChannelOrderDepthStencil = 14, + SpvImageChannelOrdersRGB = 15, + SpvImageChannelOrdersRGBx = 16, + SpvImageChannelOrdersRGBA = 17, + SpvImageChannelOrdersBGRA = 18, + SpvImageChannelOrderABGR = 19, + SpvImageChannelOrderMax = 0x7fffffff, +} SpvImageChannelOrder; + +typedef enum SpvImageChannelDataType_ { + SpvImageChannelDataTypeSnormInt8 = 0, + SpvImageChannelDataTypeSnormInt16 = 1, + SpvImageChannelDataTypeUnormInt8 = 2, + SpvImageChannelDataTypeUnormInt16 = 3, + SpvImageChannelDataTypeUnormShort565 = 4, + SpvImageChannelDataTypeUnormShort555 = 5, + SpvImageChannelDataTypeUnormInt101010 = 6, + SpvImageChannelDataTypeSignedInt8 = 7, + SpvImageChannelDataTypeSignedInt16 = 8, + SpvImageChannelDataTypeSignedInt32 = 9, + SpvImageChannelDataTypeUnsignedInt8 = 10, + SpvImageChannelDataTypeUnsignedInt16 = 11, + SpvImageChannelDataTypeUnsignedInt32 = 12, + SpvImageChannelDataTypeHalfFloat = 13, + SpvImageChannelDataTypeFloat = 14, + SpvImageChannelDataTypeUnormInt24 = 15, + SpvImageChannelDataTypeUnormInt101010_2 = 16, + SpvImageChannelDataTypeMax = 0x7fffffff, +} SpvImageChannelDataType; + +typedef enum SpvImageOperandsShift_ { + SpvImageOperandsBiasShift = 0, + SpvImageOperandsLodShift = 1, + SpvImageOperandsGradShift = 2, + SpvImageOperandsConstOffsetShift = 3, + SpvImageOperandsOffsetShift = 4, + SpvImageOperandsConstOffsetsShift = 5, + SpvImageOperandsSampleShift = 6, + SpvImageOperandsMinLodShift = 7, + SpvImageOperandsMakeTexelAvailableShift = 8, + SpvImageOperandsMakeTexelAvailableKHRShift = 8, + SpvImageOperandsMakeTexelVisibleShift = 9, + SpvImageOperandsMakeTexelVisibleKHRShift = 9, + SpvImageOperandsNonPrivateTexelShift = 10, + SpvImageOperandsNonPrivateTexelKHRShift = 10, + SpvImageOperandsVolatileTexelShift = 11, + SpvImageOperandsVolatileTexelKHRShift = 11, + SpvImageOperandsSignExtendShift = 12, + SpvImageOperandsZeroExtendShift = 13, + SpvImageOperandsNontemporalShift = 14, + SpvImageOperandsOffsetsShift = 16, + SpvImageOperandsMax = 0x7fffffff, +} SpvImageOperandsShift; + +typedef enum SpvImageOperandsMask_ { + SpvImageOperandsMaskNone = 0, + SpvImageOperandsBiasMask = 0x00000001, + SpvImageOperandsLodMask = 0x00000002, + SpvImageOperandsGradMask = 0x00000004, + SpvImageOperandsConstOffsetMask = 0x00000008, + SpvImageOperandsOffsetMask = 0x00000010, + SpvImageOperandsConstOffsetsMask = 0x00000020, + SpvImageOperandsSampleMask = 0x00000040, + SpvImageOperandsMinLodMask = 0x00000080, + SpvImageOperandsMakeTexelAvailableMask = 0x00000100, + SpvImageOperandsMakeTexelAvailableKHRMask = 0x00000100, + SpvImageOperandsMakeTexelVisibleMask = 0x00000200, + SpvImageOperandsMakeTexelVisibleKHRMask = 0x00000200, + SpvImageOperandsNonPrivateTexelMask = 0x00000400, + SpvImageOperandsNonPrivateTexelKHRMask = 0x00000400, + SpvImageOperandsVolatileTexelMask = 0x00000800, + SpvImageOperandsVolatileTexelKHRMask = 0x00000800, + SpvImageOperandsSignExtendMask = 0x00001000, + SpvImageOperandsZeroExtendMask = 0x00002000, + SpvImageOperandsNontemporalMask = 0x00004000, + SpvImageOperandsOffsetsMask = 0x00010000, +} SpvImageOperandsMask; + +typedef enum SpvFPFastMathModeShift_ { + SpvFPFastMathModeNotNaNShift = 0, + SpvFPFastMathModeNotInfShift = 1, + SpvFPFastMathModeNSZShift = 2, + SpvFPFastMathModeAllowRecipShift = 3, + SpvFPFastMathModeFastShift = 4, + SpvFPFastMathModeAllowContractFastINTELShift = 16, + SpvFPFastMathModeAllowReassocINTELShift = 17, + SpvFPFastMathModeMax = 0x7fffffff, +} SpvFPFastMathModeShift; + +typedef enum SpvFPFastMathModeMask_ { + SpvFPFastMathModeMaskNone = 0, + SpvFPFastMathModeNotNaNMask = 0x00000001, + SpvFPFastMathModeNotInfMask = 0x00000002, + SpvFPFastMathModeNSZMask = 0x00000004, + SpvFPFastMathModeAllowRecipMask = 0x00000008, + SpvFPFastMathModeFastMask = 0x00000010, + SpvFPFastMathModeAllowContractFastINTELMask = 0x00010000, + SpvFPFastMathModeAllowReassocINTELMask = 0x00020000, +} SpvFPFastMathModeMask; + +typedef enum SpvFPRoundingMode_ { + SpvFPRoundingModeRTE = 0, + SpvFPRoundingModeRTZ = 1, + SpvFPRoundingModeRTP = 2, + SpvFPRoundingModeRTN = 3, + SpvFPRoundingModeMax = 0x7fffffff, +} SpvFPRoundingMode; + +typedef enum SpvLinkageType_ { + SpvLinkageTypeExport = 0, + SpvLinkageTypeImport = 1, + SpvLinkageTypeLinkOnceODR = 2, + SpvLinkageTypeMax = 0x7fffffff, +} SpvLinkageType; + +typedef enum SpvAccessQualifier_ { + SpvAccessQualifierReadOnly = 0, + SpvAccessQualifierWriteOnly = 1, + SpvAccessQualifierReadWrite = 2, + SpvAccessQualifierMax = 0x7fffffff, +} SpvAccessQualifier; + +typedef enum SpvFunctionParameterAttribute_ { + SpvFunctionParameterAttributeZext = 0, + SpvFunctionParameterAttributeSext = 1, + SpvFunctionParameterAttributeByVal = 2, + SpvFunctionParameterAttributeSret = 3, + SpvFunctionParameterAttributeNoAlias = 4, + SpvFunctionParameterAttributeNoCapture = 5, + SpvFunctionParameterAttributeNoWrite = 6, + SpvFunctionParameterAttributeNoReadWrite = 7, + SpvFunctionParameterAttributeMax = 0x7fffffff, +} SpvFunctionParameterAttribute; + +typedef enum SpvDecoration_ { + SpvDecorationRelaxedPrecision = 0, + SpvDecorationSpecId = 1, + SpvDecorationBlock = 2, + SpvDecorationBufferBlock = 3, + SpvDecorationRowMajor = 4, + SpvDecorationColMajor = 5, + SpvDecorationArrayStride = 6, + SpvDecorationMatrixStride = 7, + SpvDecorationGLSLShared = 8, + SpvDecorationGLSLPacked = 9, + SpvDecorationCPacked = 10, + SpvDecorationBuiltIn = 11, + SpvDecorationNoPerspective = 13, + SpvDecorationFlat = 14, + SpvDecorationPatch = 15, + SpvDecorationCentroid = 16, + SpvDecorationSample = 17, + SpvDecorationInvariant = 18, + SpvDecorationRestrict = 19, + SpvDecorationAliased = 20, + SpvDecorationVolatile = 21, + SpvDecorationConstant = 22, + SpvDecorationCoherent = 23, + SpvDecorationNonWritable = 24, + SpvDecorationNonReadable = 25, + SpvDecorationUniform = 26, + SpvDecorationUniformId = 27, + SpvDecorationSaturatedConversion = 28, + SpvDecorationStream = 29, + SpvDecorationLocation = 30, + SpvDecorationComponent = 31, + SpvDecorationIndex = 32, + SpvDecorationBinding = 33, + SpvDecorationDescriptorSet = 34, + SpvDecorationOffset = 35, + SpvDecorationXfbBuffer = 36, + SpvDecorationXfbStride = 37, + SpvDecorationFuncParamAttr = 38, + SpvDecorationFPRoundingMode = 39, + SpvDecorationFPFastMathMode = 40, + SpvDecorationLinkageAttributes = 41, + SpvDecorationNoContraction = 42, + SpvDecorationInputAttachmentIndex = 43, + SpvDecorationAlignment = 44, + SpvDecorationMaxByteOffset = 45, + SpvDecorationAlignmentId = 46, + SpvDecorationMaxByteOffsetId = 47, + SpvDecorationNoSignedWrap = 4469, + SpvDecorationNoUnsignedWrap = 4470, + SpvDecorationExplicitInterpAMD = 4999, + SpvDecorationOverrideCoverageNV = 5248, + SpvDecorationPassthroughNV = 5250, + SpvDecorationViewportRelativeNV = 5252, + SpvDecorationSecondaryViewportRelativeNV = 5256, + SpvDecorationPerPrimitiveEXT = 5271, + SpvDecorationPerPrimitiveNV = 5271, + SpvDecorationPerViewNV = 5272, + SpvDecorationPerTaskNV = 5273, + SpvDecorationPerVertexKHR = 5285, + SpvDecorationPerVertexNV = 5285, + SpvDecorationNonUniform = 5300, + SpvDecorationNonUniformEXT = 5300, + SpvDecorationRestrictPointer = 5355, + SpvDecorationRestrictPointerEXT = 5355, + SpvDecorationAliasedPointer = 5356, + SpvDecorationAliasedPointerEXT = 5356, + SpvDecorationBindlessSamplerNV = 5398, + SpvDecorationBindlessImageNV = 5399, + SpvDecorationBoundSamplerNV = 5400, + SpvDecorationBoundImageNV = 5401, + SpvDecorationSIMTCallINTEL = 5599, + SpvDecorationReferencedIndirectlyINTEL = 5602, + SpvDecorationClobberINTEL = 5607, + SpvDecorationSideEffectsINTEL = 5608, + SpvDecorationVectorComputeVariableINTEL = 5624, + SpvDecorationFuncParamIOKindINTEL = 5625, + SpvDecorationVectorComputeFunctionINTEL = 5626, + SpvDecorationStackCallINTEL = 5627, + SpvDecorationGlobalVariableOffsetINTEL = 5628, + SpvDecorationCounterBuffer = 5634, + SpvDecorationHlslCounterBufferGOOGLE = 5634, + SpvDecorationHlslSemanticGOOGLE = 5635, + SpvDecorationUserSemantic = 5635, + SpvDecorationUserTypeGOOGLE = 5636, + SpvDecorationFunctionRoundingModeINTEL = 5822, + SpvDecorationFunctionDenormModeINTEL = 5823, + SpvDecorationRegisterINTEL = 5825, + SpvDecorationMemoryINTEL = 5826, + SpvDecorationNumbanksINTEL = 5827, + SpvDecorationBankwidthINTEL = 5828, + SpvDecorationMaxPrivateCopiesINTEL = 5829, + SpvDecorationSinglepumpINTEL = 5830, + SpvDecorationDoublepumpINTEL = 5831, + SpvDecorationMaxReplicatesINTEL = 5832, + SpvDecorationSimpleDualPortINTEL = 5833, + SpvDecorationMergeINTEL = 5834, + SpvDecorationBankBitsINTEL = 5835, + SpvDecorationForcePow2DepthINTEL = 5836, + SpvDecorationBurstCoalesceINTEL = 5899, + SpvDecorationCacheSizeINTEL = 5900, + SpvDecorationDontStaticallyCoalesceINTEL = 5901, + SpvDecorationPrefetchINTEL = 5902, + SpvDecorationStallEnableINTEL = 5905, + SpvDecorationFuseLoopsInFunctionINTEL = 5907, + SpvDecorationAliasScopeINTEL = 5914, + SpvDecorationNoAliasINTEL = 5915, + SpvDecorationBufferLocationINTEL = 5921, + SpvDecorationIOPipeStorageINTEL = 5944, + SpvDecorationFunctionFloatingPointModeINTEL = 6080, + SpvDecorationSingleElementVectorINTEL = 6085, + SpvDecorationVectorComputeCallableFunctionINTEL = 6087, + SpvDecorationMediaBlockIOINTEL = 6140, + SpvDecorationMax = 0x7fffffff, +} SpvDecoration; + +typedef enum SpvBuiltIn_ { + SpvBuiltInPosition = 0, + SpvBuiltInPointSize = 1, + SpvBuiltInClipDistance = 3, + SpvBuiltInCullDistance = 4, + SpvBuiltInVertexId = 5, + SpvBuiltInInstanceId = 6, + SpvBuiltInPrimitiveId = 7, + SpvBuiltInInvocationId = 8, + SpvBuiltInLayer = 9, + SpvBuiltInViewportIndex = 10, + SpvBuiltInTessLevelOuter = 11, + SpvBuiltInTessLevelInner = 12, + SpvBuiltInTessCoord = 13, + SpvBuiltInPatchVertices = 14, + SpvBuiltInFragCoord = 15, + SpvBuiltInPointCoord = 16, + SpvBuiltInFrontFacing = 17, + SpvBuiltInSampleId = 18, + SpvBuiltInSamplePosition = 19, + SpvBuiltInSampleMask = 20, + SpvBuiltInFragDepth = 22, + SpvBuiltInHelperInvocation = 23, + SpvBuiltInNumWorkgroups = 24, + SpvBuiltInWorkgroupSize = 25, + SpvBuiltInWorkgroupId = 26, + SpvBuiltInLocalInvocationId = 27, + SpvBuiltInGlobalInvocationId = 28, + SpvBuiltInLocalInvocationIndex = 29, + SpvBuiltInWorkDim = 30, + SpvBuiltInGlobalSize = 31, + SpvBuiltInEnqueuedWorkgroupSize = 32, + SpvBuiltInGlobalOffset = 33, + SpvBuiltInGlobalLinearId = 34, + SpvBuiltInSubgroupSize = 36, + SpvBuiltInSubgroupMaxSize = 37, + SpvBuiltInNumSubgroups = 38, + SpvBuiltInNumEnqueuedSubgroups = 39, + SpvBuiltInSubgroupId = 40, + SpvBuiltInSubgroupLocalInvocationId = 41, + SpvBuiltInVertexIndex = 42, + SpvBuiltInInstanceIndex = 43, + SpvBuiltInSubgroupEqMask = 4416, + SpvBuiltInSubgroupEqMaskKHR = 4416, + SpvBuiltInSubgroupGeMask = 4417, + SpvBuiltInSubgroupGeMaskKHR = 4417, + SpvBuiltInSubgroupGtMask = 4418, + SpvBuiltInSubgroupGtMaskKHR = 4418, + SpvBuiltInSubgroupLeMask = 4419, + SpvBuiltInSubgroupLeMaskKHR = 4419, + SpvBuiltInSubgroupLtMask = 4420, + SpvBuiltInSubgroupLtMaskKHR = 4420, + SpvBuiltInBaseVertex = 4424, + SpvBuiltInBaseInstance = 4425, + SpvBuiltInDrawIndex = 4426, + SpvBuiltInPrimitiveShadingRateKHR = 4432, + SpvBuiltInDeviceIndex = 4438, + SpvBuiltInViewIndex = 4440, + SpvBuiltInShadingRateKHR = 4444, + SpvBuiltInBaryCoordNoPerspAMD = 4992, + SpvBuiltInBaryCoordNoPerspCentroidAMD = 4993, + SpvBuiltInBaryCoordNoPerspSampleAMD = 4994, + SpvBuiltInBaryCoordSmoothAMD = 4995, + SpvBuiltInBaryCoordSmoothCentroidAMD = 4996, + SpvBuiltInBaryCoordSmoothSampleAMD = 4997, + SpvBuiltInBaryCoordPullModelAMD = 4998, + SpvBuiltInFragStencilRefEXT = 5014, + SpvBuiltInViewportMaskNV = 5253, + SpvBuiltInSecondaryPositionNV = 5257, + SpvBuiltInSecondaryViewportMaskNV = 5258, + SpvBuiltInPositionPerViewNV = 5261, + SpvBuiltInViewportMaskPerViewNV = 5262, + SpvBuiltInFullyCoveredEXT = 5264, + SpvBuiltInTaskCountNV = 5274, + SpvBuiltInPrimitiveCountNV = 5275, + SpvBuiltInPrimitiveIndicesNV = 5276, + SpvBuiltInClipDistancePerViewNV = 5277, + SpvBuiltInCullDistancePerViewNV = 5278, + SpvBuiltInLayerPerViewNV = 5279, + SpvBuiltInMeshViewCountNV = 5280, + SpvBuiltInMeshViewIndicesNV = 5281, + SpvBuiltInBaryCoordKHR = 5286, + SpvBuiltInBaryCoordNV = 5286, + SpvBuiltInBaryCoordNoPerspKHR = 5287, + SpvBuiltInBaryCoordNoPerspNV = 5287, + SpvBuiltInFragSizeEXT = 5292, + SpvBuiltInFragmentSizeNV = 5292, + SpvBuiltInFragInvocationCountEXT = 5293, + SpvBuiltInInvocationsPerPixelNV = 5293, + SpvBuiltInPrimitivePointIndicesEXT = 5294, + SpvBuiltInPrimitiveLineIndicesEXT = 5295, + SpvBuiltInPrimitiveTriangleIndicesEXT = 5296, + SpvBuiltInCullPrimitiveEXT = 5299, + SpvBuiltInLaunchIdKHR = 5319, + SpvBuiltInLaunchIdNV = 5319, + SpvBuiltInLaunchSizeKHR = 5320, + SpvBuiltInLaunchSizeNV = 5320, + SpvBuiltInWorldRayOriginKHR = 5321, + SpvBuiltInWorldRayOriginNV = 5321, + SpvBuiltInWorldRayDirectionKHR = 5322, + SpvBuiltInWorldRayDirectionNV = 5322, + SpvBuiltInObjectRayOriginKHR = 5323, + SpvBuiltInObjectRayOriginNV = 5323, + SpvBuiltInObjectRayDirectionKHR = 5324, + SpvBuiltInObjectRayDirectionNV = 5324, + SpvBuiltInRayTminKHR = 5325, + SpvBuiltInRayTminNV = 5325, + SpvBuiltInRayTmaxKHR = 5326, + SpvBuiltInRayTmaxNV = 5326, + SpvBuiltInInstanceCustomIndexKHR = 5327, + SpvBuiltInInstanceCustomIndexNV = 5327, + SpvBuiltInObjectToWorldKHR = 5330, + SpvBuiltInObjectToWorldNV = 5330, + SpvBuiltInWorldToObjectKHR = 5331, + SpvBuiltInWorldToObjectNV = 5331, + SpvBuiltInHitTNV = 5332, + SpvBuiltInHitKindKHR = 5333, + SpvBuiltInHitKindNV = 5333, + SpvBuiltInCurrentRayTimeNV = 5334, + SpvBuiltInIncomingRayFlagsKHR = 5351, + SpvBuiltInIncomingRayFlagsNV = 5351, + SpvBuiltInRayGeometryIndexKHR = 5352, + SpvBuiltInWarpsPerSMNV = 5374, + SpvBuiltInSMCountNV = 5375, + SpvBuiltInWarpIDNV = 5376, + SpvBuiltInSMIDNV = 5377, + SpvBuiltInCullMaskKHR = 6021, + SpvBuiltInMax = 0x7fffffff, +} SpvBuiltIn; + +typedef enum SpvSelectionControlShift_ { + SpvSelectionControlFlattenShift = 0, + SpvSelectionControlDontFlattenShift = 1, + SpvSelectionControlMax = 0x7fffffff, +} SpvSelectionControlShift; + +typedef enum SpvSelectionControlMask_ { + SpvSelectionControlMaskNone = 0, + SpvSelectionControlFlattenMask = 0x00000001, + SpvSelectionControlDontFlattenMask = 0x00000002, +} SpvSelectionControlMask; + +typedef enum SpvLoopControlShift_ { + SpvLoopControlUnrollShift = 0, + SpvLoopControlDontUnrollShift = 1, + SpvLoopControlDependencyInfiniteShift = 2, + SpvLoopControlDependencyLengthShift = 3, + SpvLoopControlMinIterationsShift = 4, + SpvLoopControlMaxIterationsShift = 5, + SpvLoopControlIterationMultipleShift = 6, + SpvLoopControlPeelCountShift = 7, + SpvLoopControlPartialCountShift = 8, + SpvLoopControlInitiationIntervalINTELShift = 16, + SpvLoopControlMaxConcurrencyINTELShift = 17, + SpvLoopControlDependencyArrayINTELShift = 18, + SpvLoopControlPipelineEnableINTELShift = 19, + SpvLoopControlLoopCoalesceINTELShift = 20, + SpvLoopControlMaxInterleavingINTELShift = 21, + SpvLoopControlSpeculatedIterationsINTELShift = 22, + SpvLoopControlNoFusionINTELShift = 23, + SpvLoopControlMax = 0x7fffffff, +} SpvLoopControlShift; + +typedef enum SpvLoopControlMask_ { + SpvLoopControlMaskNone = 0, + SpvLoopControlUnrollMask = 0x00000001, + SpvLoopControlDontUnrollMask = 0x00000002, + SpvLoopControlDependencyInfiniteMask = 0x00000004, + SpvLoopControlDependencyLengthMask = 0x00000008, + SpvLoopControlMinIterationsMask = 0x00000010, + SpvLoopControlMaxIterationsMask = 0x00000020, + SpvLoopControlIterationMultipleMask = 0x00000040, + SpvLoopControlPeelCountMask = 0x00000080, + SpvLoopControlPartialCountMask = 0x00000100, + SpvLoopControlInitiationIntervalINTELMask = 0x00010000, + SpvLoopControlMaxConcurrencyINTELMask = 0x00020000, + SpvLoopControlDependencyArrayINTELMask = 0x00040000, + SpvLoopControlPipelineEnableINTELMask = 0x00080000, + SpvLoopControlLoopCoalesceINTELMask = 0x00100000, + SpvLoopControlMaxInterleavingINTELMask = 0x00200000, + SpvLoopControlSpeculatedIterationsINTELMask = 0x00400000, + SpvLoopControlNoFusionINTELMask = 0x00800000, +} SpvLoopControlMask; + +typedef enum SpvFunctionControlShift_ { + SpvFunctionControlInlineShift = 0, + SpvFunctionControlDontInlineShift = 1, + SpvFunctionControlPureShift = 2, + SpvFunctionControlConstShift = 3, + SpvFunctionControlOptNoneINTELShift = 16, + SpvFunctionControlMax = 0x7fffffff, +} SpvFunctionControlShift; + +typedef enum SpvFunctionControlMask_ { + SpvFunctionControlMaskNone = 0, + SpvFunctionControlInlineMask = 0x00000001, + SpvFunctionControlDontInlineMask = 0x00000002, + SpvFunctionControlPureMask = 0x00000004, + SpvFunctionControlConstMask = 0x00000008, + SpvFunctionControlOptNoneINTELMask = 0x00010000, +} SpvFunctionControlMask; + +typedef enum SpvMemorySemanticsShift_ { + SpvMemorySemanticsAcquireShift = 1, + SpvMemorySemanticsReleaseShift = 2, + SpvMemorySemanticsAcquireReleaseShift = 3, + SpvMemorySemanticsSequentiallyConsistentShift = 4, + SpvMemorySemanticsUniformMemoryShift = 6, + SpvMemorySemanticsSubgroupMemoryShift = 7, + SpvMemorySemanticsWorkgroupMemoryShift = 8, + SpvMemorySemanticsCrossWorkgroupMemoryShift = 9, + SpvMemorySemanticsAtomicCounterMemoryShift = 10, + SpvMemorySemanticsImageMemoryShift = 11, + SpvMemorySemanticsOutputMemoryShift = 12, + SpvMemorySemanticsOutputMemoryKHRShift = 12, + SpvMemorySemanticsMakeAvailableShift = 13, + SpvMemorySemanticsMakeAvailableKHRShift = 13, + SpvMemorySemanticsMakeVisibleShift = 14, + SpvMemorySemanticsMakeVisibleKHRShift = 14, + SpvMemorySemanticsVolatileShift = 15, + SpvMemorySemanticsMax = 0x7fffffff, +} SpvMemorySemanticsShift; + +typedef enum SpvMemorySemanticsMask_ { + SpvMemorySemanticsMaskNone = 0, + SpvMemorySemanticsAcquireMask = 0x00000002, + SpvMemorySemanticsReleaseMask = 0x00000004, + SpvMemorySemanticsAcquireReleaseMask = 0x00000008, + SpvMemorySemanticsSequentiallyConsistentMask = 0x00000010, + SpvMemorySemanticsUniformMemoryMask = 0x00000040, + SpvMemorySemanticsSubgroupMemoryMask = 0x00000080, + SpvMemorySemanticsWorkgroupMemoryMask = 0x00000100, + SpvMemorySemanticsCrossWorkgroupMemoryMask = 0x00000200, + SpvMemorySemanticsAtomicCounterMemoryMask = 0x00000400, + SpvMemorySemanticsImageMemoryMask = 0x00000800, + SpvMemorySemanticsOutputMemoryMask = 0x00001000, + SpvMemorySemanticsOutputMemoryKHRMask = 0x00001000, + SpvMemorySemanticsMakeAvailableMask = 0x00002000, + SpvMemorySemanticsMakeAvailableKHRMask = 0x00002000, + SpvMemorySemanticsMakeVisibleMask = 0x00004000, + SpvMemorySemanticsMakeVisibleKHRMask = 0x00004000, + SpvMemorySemanticsVolatileMask = 0x00008000, +} SpvMemorySemanticsMask; + +typedef enum SpvMemoryAccessShift_ { + SpvMemoryAccessVolatileShift = 0, + SpvMemoryAccessAlignedShift = 1, + SpvMemoryAccessNontemporalShift = 2, + SpvMemoryAccessMakePointerAvailableShift = 3, + SpvMemoryAccessMakePointerAvailableKHRShift = 3, + SpvMemoryAccessMakePointerVisibleShift = 4, + SpvMemoryAccessMakePointerVisibleKHRShift = 4, + SpvMemoryAccessNonPrivatePointerShift = 5, + SpvMemoryAccessNonPrivatePointerKHRShift = 5, + SpvMemoryAccessAliasScopeINTELMaskShift = 16, + SpvMemoryAccessNoAliasINTELMaskShift = 17, + SpvMemoryAccessMax = 0x7fffffff, +} SpvMemoryAccessShift; + +typedef enum SpvMemoryAccessMask_ { + SpvMemoryAccessMaskNone = 0, + SpvMemoryAccessVolatileMask = 0x00000001, + SpvMemoryAccessAlignedMask = 0x00000002, + SpvMemoryAccessNontemporalMask = 0x00000004, + SpvMemoryAccessMakePointerAvailableMask = 0x00000008, + SpvMemoryAccessMakePointerAvailableKHRMask = 0x00000008, + SpvMemoryAccessMakePointerVisibleMask = 0x00000010, + SpvMemoryAccessMakePointerVisibleKHRMask = 0x00000010, + SpvMemoryAccessNonPrivatePointerMask = 0x00000020, + SpvMemoryAccessNonPrivatePointerKHRMask = 0x00000020, + SpvMemoryAccessAliasScopeINTELMaskMask = 0x00010000, + SpvMemoryAccessNoAliasINTELMaskMask = 0x00020000, +} SpvMemoryAccessMask; + +typedef enum SpvScope_ { + SpvScopeCrossDevice = 0, + SpvScopeDevice = 1, + SpvScopeWorkgroup = 2, + SpvScopeSubgroup = 3, + SpvScopeInvocation = 4, + SpvScopeQueueFamily = 5, + SpvScopeQueueFamilyKHR = 5, + SpvScopeShaderCallKHR = 6, + SpvScopeMax = 0x7fffffff, +} SpvScope; + +typedef enum SpvGroupOperation_ { + SpvGroupOperationReduce = 0, + SpvGroupOperationInclusiveScan = 1, + SpvGroupOperationExclusiveScan = 2, + SpvGroupOperationClusteredReduce = 3, + SpvGroupOperationPartitionedReduceNV = 6, + SpvGroupOperationPartitionedInclusiveScanNV = 7, + SpvGroupOperationPartitionedExclusiveScanNV = 8, + SpvGroupOperationMax = 0x7fffffff, +} SpvGroupOperation; + +typedef enum SpvKernelEnqueueFlags_ { + SpvKernelEnqueueFlagsNoWait = 0, + SpvKernelEnqueueFlagsWaitKernel = 1, + SpvKernelEnqueueFlagsWaitWorkGroup = 2, + SpvKernelEnqueueFlagsMax = 0x7fffffff, +} SpvKernelEnqueueFlags; + +typedef enum SpvKernelProfilingInfoShift_ { + SpvKernelProfilingInfoCmdExecTimeShift = 0, + SpvKernelProfilingInfoMax = 0x7fffffff, +} SpvKernelProfilingInfoShift; + +typedef enum SpvKernelProfilingInfoMask_ { + SpvKernelProfilingInfoMaskNone = 0, + SpvKernelProfilingInfoCmdExecTimeMask = 0x00000001, +} SpvKernelProfilingInfoMask; + +typedef enum SpvCapability_ { + SpvCapabilityMatrix = 0, + SpvCapabilityShader = 1, + SpvCapabilityGeometry = 2, + SpvCapabilityTessellation = 3, + SpvCapabilityAddresses = 4, + SpvCapabilityLinkage = 5, + SpvCapabilityKernel = 6, + SpvCapabilityVector16 = 7, + SpvCapabilityFloat16Buffer = 8, + SpvCapabilityFloat16 = 9, + SpvCapabilityFloat64 = 10, + SpvCapabilityInt64 = 11, + SpvCapabilityInt64Atomics = 12, + SpvCapabilityImageBasic = 13, + SpvCapabilityImageReadWrite = 14, + SpvCapabilityImageMipmap = 15, + SpvCapabilityPipes = 17, + SpvCapabilityGroups = 18, + SpvCapabilityDeviceEnqueue = 19, + SpvCapabilityLiteralSampler = 20, + SpvCapabilityAtomicStorage = 21, + SpvCapabilityInt16 = 22, + SpvCapabilityTessellationPointSize = 23, + SpvCapabilityGeometryPointSize = 24, + SpvCapabilityImageGatherExtended = 25, + SpvCapabilityStorageImageMultisample = 27, + SpvCapabilityUniformBufferArrayDynamicIndexing = 28, + SpvCapabilitySampledImageArrayDynamicIndexing = 29, + SpvCapabilityStorageBufferArrayDynamicIndexing = 30, + SpvCapabilityStorageImageArrayDynamicIndexing = 31, + SpvCapabilityClipDistance = 32, + SpvCapabilityCullDistance = 33, + SpvCapabilityImageCubeArray = 34, + SpvCapabilitySampleRateShading = 35, + SpvCapabilityImageRect = 36, + SpvCapabilitySampledRect = 37, + SpvCapabilityGenericPointer = 38, + SpvCapabilityInt8 = 39, + SpvCapabilityInputAttachment = 40, + SpvCapabilitySparseResidency = 41, + SpvCapabilityMinLod = 42, + SpvCapabilitySampled1D = 43, + SpvCapabilityImage1D = 44, + SpvCapabilitySampledCubeArray = 45, + SpvCapabilitySampledBuffer = 46, + SpvCapabilityImageBuffer = 47, + SpvCapabilityImageMSArray = 48, + SpvCapabilityStorageImageExtendedFormats = 49, + SpvCapabilityImageQuery = 50, + SpvCapabilityDerivativeControl = 51, + SpvCapabilityInterpolationFunction = 52, + SpvCapabilityTransformFeedback = 53, + SpvCapabilityGeometryStreams = 54, + SpvCapabilityStorageImageReadWithoutFormat = 55, + SpvCapabilityStorageImageWriteWithoutFormat = 56, + SpvCapabilityMultiViewport = 57, + SpvCapabilitySubgroupDispatch = 58, + SpvCapabilityNamedBarrier = 59, + SpvCapabilityPipeStorage = 60, + SpvCapabilityGroupNonUniform = 61, + SpvCapabilityGroupNonUniformVote = 62, + SpvCapabilityGroupNonUniformArithmetic = 63, + SpvCapabilityGroupNonUniformBallot = 64, + SpvCapabilityGroupNonUniformShuffle = 65, + SpvCapabilityGroupNonUniformShuffleRelative = 66, + SpvCapabilityGroupNonUniformClustered = 67, + SpvCapabilityGroupNonUniformQuad = 68, + SpvCapabilityShaderLayer = 69, + SpvCapabilityShaderViewportIndex = 70, + SpvCapabilityUniformDecoration = 71, + SpvCapabilityFragmentShadingRateKHR = 4422, + SpvCapabilitySubgroupBallotKHR = 4423, + SpvCapabilityDrawParameters = 4427, + SpvCapabilityWorkgroupMemoryExplicitLayoutKHR = 4428, + SpvCapabilityWorkgroupMemoryExplicitLayout8BitAccessKHR = 4429, + SpvCapabilityWorkgroupMemoryExplicitLayout16BitAccessKHR = 4430, + SpvCapabilitySubgroupVoteKHR = 4431, + SpvCapabilityStorageBuffer16BitAccess = 4433, + SpvCapabilityStorageUniformBufferBlock16 = 4433, + SpvCapabilityStorageUniform16 = 4434, + SpvCapabilityUniformAndStorageBuffer16BitAccess = 4434, + SpvCapabilityStoragePushConstant16 = 4435, + SpvCapabilityStorageInputOutput16 = 4436, + SpvCapabilityDeviceGroup = 4437, + SpvCapabilityMultiView = 4439, + SpvCapabilityVariablePointersStorageBuffer = 4441, + SpvCapabilityVariablePointers = 4442, + SpvCapabilityAtomicStorageOps = 4445, + SpvCapabilitySampleMaskPostDepthCoverage = 4447, + SpvCapabilityStorageBuffer8BitAccess = 4448, + SpvCapabilityUniformAndStorageBuffer8BitAccess = 4449, + SpvCapabilityStoragePushConstant8 = 4450, + SpvCapabilityDenormPreserve = 4464, + SpvCapabilityDenormFlushToZero = 4465, + SpvCapabilitySignedZeroInfNanPreserve = 4466, + SpvCapabilityRoundingModeRTE = 4467, + SpvCapabilityRoundingModeRTZ = 4468, + SpvCapabilityRayQueryProvisionalKHR = 4471, + SpvCapabilityRayQueryKHR = 4472, + SpvCapabilityRayTraversalPrimitiveCullingKHR = 4478, + SpvCapabilityRayTracingKHR = 4479, + SpvCapabilityFloat16ImageAMD = 5008, + SpvCapabilityImageGatherBiasLodAMD = 5009, + SpvCapabilityFragmentMaskAMD = 5010, + SpvCapabilityStencilExportEXT = 5013, + SpvCapabilityImageReadWriteLodAMD = 5015, + SpvCapabilityInt64ImageEXT = 5016, + SpvCapabilityShaderClockKHR = 5055, + SpvCapabilitySampleMaskOverrideCoverageNV = 5249, + SpvCapabilityGeometryShaderPassthroughNV = 5251, + SpvCapabilityShaderViewportIndexLayerEXT = 5254, + SpvCapabilityShaderViewportIndexLayerNV = 5254, + SpvCapabilityShaderViewportMaskNV = 5255, + SpvCapabilityShaderStereoViewNV = 5259, + SpvCapabilityPerViewAttributesNV = 5260, + SpvCapabilityFragmentFullyCoveredEXT = 5265, + SpvCapabilityMeshShadingNV = 5266, + SpvCapabilityImageFootprintNV = 5282, + SpvCapabilityMeshShadingEXT = 5283, + SpvCapabilityFragmentBarycentricKHR = 5284, + SpvCapabilityFragmentBarycentricNV = 5284, + SpvCapabilityComputeDerivativeGroupQuadsNV = 5288, + SpvCapabilityFragmentDensityEXT = 5291, + SpvCapabilityShadingRateNV = 5291, + SpvCapabilityGroupNonUniformPartitionedNV = 5297, + SpvCapabilityShaderNonUniform = 5301, + SpvCapabilityShaderNonUniformEXT = 5301, + SpvCapabilityRuntimeDescriptorArray = 5302, + SpvCapabilityRuntimeDescriptorArrayEXT = 5302, + SpvCapabilityInputAttachmentArrayDynamicIndexing = 5303, + SpvCapabilityInputAttachmentArrayDynamicIndexingEXT = 5303, + SpvCapabilityUniformTexelBufferArrayDynamicIndexing = 5304, + SpvCapabilityUniformTexelBufferArrayDynamicIndexingEXT = 5304, + SpvCapabilityStorageTexelBufferArrayDynamicIndexing = 5305, + SpvCapabilityStorageTexelBufferArrayDynamicIndexingEXT = 5305, + SpvCapabilityUniformBufferArrayNonUniformIndexing = 5306, + SpvCapabilityUniformBufferArrayNonUniformIndexingEXT = 5306, + SpvCapabilitySampledImageArrayNonUniformIndexing = 5307, + SpvCapabilitySampledImageArrayNonUniformIndexingEXT = 5307, + SpvCapabilityStorageBufferArrayNonUniformIndexing = 5308, + SpvCapabilityStorageBufferArrayNonUniformIndexingEXT = 5308, + SpvCapabilityStorageImageArrayNonUniformIndexing = 5309, + SpvCapabilityStorageImageArrayNonUniformIndexingEXT = 5309, + SpvCapabilityInputAttachmentArrayNonUniformIndexing = 5310, + SpvCapabilityInputAttachmentArrayNonUniformIndexingEXT = 5310, + SpvCapabilityUniformTexelBufferArrayNonUniformIndexing = 5311, + SpvCapabilityUniformTexelBufferArrayNonUniformIndexingEXT = 5311, + SpvCapabilityStorageTexelBufferArrayNonUniformIndexing = 5312, + SpvCapabilityStorageTexelBufferArrayNonUniformIndexingEXT = 5312, + SpvCapabilityRayTracingNV = 5340, + SpvCapabilityRayTracingMotionBlurNV = 5341, + SpvCapabilityVulkanMemoryModel = 5345, + SpvCapabilityVulkanMemoryModelKHR = 5345, + SpvCapabilityVulkanMemoryModelDeviceScope = 5346, + SpvCapabilityVulkanMemoryModelDeviceScopeKHR = 5346, + SpvCapabilityPhysicalStorageBufferAddresses = 5347, + SpvCapabilityPhysicalStorageBufferAddressesEXT = 5347, + SpvCapabilityComputeDerivativeGroupLinearNV = 5350, + SpvCapabilityRayTracingProvisionalKHR = 5353, + SpvCapabilityCooperativeMatrixNV = 5357, + SpvCapabilityFragmentShaderSampleInterlockEXT = 5363, + SpvCapabilityFragmentShaderShadingRateInterlockEXT = 5372, + SpvCapabilityShaderSMBuiltinsNV = 5373, + SpvCapabilityFragmentShaderPixelInterlockEXT = 5378, + SpvCapabilityDemoteToHelperInvocation = 5379, + SpvCapabilityDemoteToHelperInvocationEXT = 5379, + SpvCapabilityBindlessTextureNV = 5390, + SpvCapabilitySubgroupShuffleINTEL = 5568, + SpvCapabilitySubgroupBufferBlockIOINTEL = 5569, + SpvCapabilitySubgroupImageBlockIOINTEL = 5570, + SpvCapabilitySubgroupImageMediaBlockIOINTEL = 5579, + SpvCapabilityRoundToInfinityINTEL = 5582, + SpvCapabilityFloatingPointModeINTEL = 5583, + SpvCapabilityIntegerFunctions2INTEL = 5584, + SpvCapabilityFunctionPointersINTEL = 5603, + SpvCapabilityIndirectReferencesINTEL = 5604, + SpvCapabilityAsmINTEL = 5606, + SpvCapabilityAtomicFloat32MinMaxEXT = 5612, + SpvCapabilityAtomicFloat64MinMaxEXT = 5613, + SpvCapabilityAtomicFloat16MinMaxEXT = 5616, + SpvCapabilityVectorComputeINTEL = 5617, + SpvCapabilityVectorAnyINTEL = 5619, + SpvCapabilityExpectAssumeKHR = 5629, + SpvCapabilitySubgroupAvcMotionEstimationINTEL = 5696, + SpvCapabilitySubgroupAvcMotionEstimationIntraINTEL = 5697, + SpvCapabilitySubgroupAvcMotionEstimationChromaINTEL = 5698, + SpvCapabilityVariableLengthArrayINTEL = 5817, + SpvCapabilityFunctionFloatControlINTEL = 5821, + SpvCapabilityFPGAMemoryAttributesINTEL = 5824, + SpvCapabilityFPFastMathModeINTEL = 5837, + SpvCapabilityArbitraryPrecisionIntegersINTEL = 5844, + SpvCapabilityArbitraryPrecisionFloatingPointINTEL = 5845, + SpvCapabilityUnstructuredLoopControlsINTEL = 5886, + SpvCapabilityFPGALoopControlsINTEL = 5888, + SpvCapabilityKernelAttributesINTEL = 5892, + SpvCapabilityFPGAKernelAttributesINTEL = 5897, + SpvCapabilityFPGAMemoryAccessesINTEL = 5898, + SpvCapabilityFPGAClusterAttributesINTEL = 5904, + SpvCapabilityLoopFuseINTEL = 5906, + SpvCapabilityMemoryAccessAliasingINTEL = 5910, + SpvCapabilityFPGABufferLocationINTEL = 5920, + SpvCapabilityArbitraryPrecisionFixedPointINTEL = 5922, + SpvCapabilityUSMStorageClassesINTEL = 5935, + SpvCapabilityIOPipesINTEL = 5943, + SpvCapabilityBlockingPipesINTEL = 5945, + SpvCapabilityFPGARegINTEL = 5948, + SpvCapabilityDotProductInputAll = 6016, + SpvCapabilityDotProductInputAllKHR = 6016, + SpvCapabilityDotProductInput4x8Bit = 6017, + SpvCapabilityDotProductInput4x8BitKHR = 6017, + SpvCapabilityDotProductInput4x8BitPacked = 6018, + SpvCapabilityDotProductInput4x8BitPackedKHR = 6018, + SpvCapabilityDotProduct = 6019, + SpvCapabilityDotProductKHR = 6019, + SpvCapabilityRayCullMaskKHR = 6020, + SpvCapabilityBitInstructions = 6025, + SpvCapabilityGroupNonUniformRotateKHR = 6026, + SpvCapabilityAtomicFloat32AddEXT = 6033, + SpvCapabilityAtomicFloat64AddEXT = 6034, + SpvCapabilityLongConstantCompositeINTEL = 6089, + SpvCapabilityOptNoneINTEL = 6094, + SpvCapabilityAtomicFloat16AddEXT = 6095, + SpvCapabilityDebugInfoModuleINTEL = 6114, + SpvCapabilitySplitBarrierINTEL = 6141, + SpvCapabilityGroupUniformArithmeticKHR = 6400, + SpvCapabilityMax = 0x7fffffff, +} SpvCapability; + +typedef enum SpvRayFlagsShift_ { + SpvRayFlagsOpaqueKHRShift = 0, + SpvRayFlagsNoOpaqueKHRShift = 1, + SpvRayFlagsTerminateOnFirstHitKHRShift = 2, + SpvRayFlagsSkipClosestHitShaderKHRShift = 3, + SpvRayFlagsCullBackFacingTrianglesKHRShift = 4, + SpvRayFlagsCullFrontFacingTrianglesKHRShift = 5, + SpvRayFlagsCullOpaqueKHRShift = 6, + SpvRayFlagsCullNoOpaqueKHRShift = 7, + SpvRayFlagsSkipTrianglesKHRShift = 8, + SpvRayFlagsSkipAABBsKHRShift = 9, + SpvRayFlagsMax = 0x7fffffff, +} SpvRayFlagsShift; + +typedef enum SpvRayFlagsMask_ { + SpvRayFlagsMaskNone = 0, + SpvRayFlagsOpaqueKHRMask = 0x00000001, + SpvRayFlagsNoOpaqueKHRMask = 0x00000002, + SpvRayFlagsTerminateOnFirstHitKHRMask = 0x00000004, + SpvRayFlagsSkipClosestHitShaderKHRMask = 0x00000008, + SpvRayFlagsCullBackFacingTrianglesKHRMask = 0x00000010, + SpvRayFlagsCullFrontFacingTrianglesKHRMask = 0x00000020, + SpvRayFlagsCullOpaqueKHRMask = 0x00000040, + SpvRayFlagsCullNoOpaqueKHRMask = 0x00000080, + SpvRayFlagsSkipTrianglesKHRMask = 0x00000100, + SpvRayFlagsSkipAABBsKHRMask = 0x00000200, +} SpvRayFlagsMask; + +typedef enum SpvRayQueryIntersection_ { + SpvRayQueryIntersectionRayQueryCandidateIntersectionKHR = 0, + SpvRayQueryIntersectionRayQueryCommittedIntersectionKHR = 1, + SpvRayQueryIntersectionMax = 0x7fffffff, +} SpvRayQueryIntersection; + +typedef enum SpvRayQueryCommittedIntersectionType_ { + SpvRayQueryCommittedIntersectionTypeRayQueryCommittedIntersectionNoneKHR = 0, + SpvRayQueryCommittedIntersectionTypeRayQueryCommittedIntersectionTriangleKHR = 1, + SpvRayQueryCommittedIntersectionTypeRayQueryCommittedIntersectionGeneratedKHR = 2, + SpvRayQueryCommittedIntersectionTypeMax = 0x7fffffff, +} SpvRayQueryCommittedIntersectionType; + +typedef enum SpvRayQueryCandidateIntersectionType_ { + SpvRayQueryCandidateIntersectionTypeRayQueryCandidateIntersectionTriangleKHR = 0, + SpvRayQueryCandidateIntersectionTypeRayQueryCandidateIntersectionAABBKHR = 1, + SpvRayQueryCandidateIntersectionTypeMax = 0x7fffffff, +} SpvRayQueryCandidateIntersectionType; + +typedef enum SpvFragmentShadingRateShift_ { + SpvFragmentShadingRateVertical2PixelsShift = 0, + SpvFragmentShadingRateVertical4PixelsShift = 1, + SpvFragmentShadingRateHorizontal2PixelsShift = 2, + SpvFragmentShadingRateHorizontal4PixelsShift = 3, + SpvFragmentShadingRateMax = 0x7fffffff, +} SpvFragmentShadingRateShift; + +typedef enum SpvFragmentShadingRateMask_ { + SpvFragmentShadingRateMaskNone = 0, + SpvFragmentShadingRateVertical2PixelsMask = 0x00000001, + SpvFragmentShadingRateVertical4PixelsMask = 0x00000002, + SpvFragmentShadingRateHorizontal2PixelsMask = 0x00000004, + SpvFragmentShadingRateHorizontal4PixelsMask = 0x00000008, +} SpvFragmentShadingRateMask; + +typedef enum SpvFPDenormMode_ { + SpvFPDenormModePreserve = 0, + SpvFPDenormModeFlushToZero = 1, + SpvFPDenormModeMax = 0x7fffffff, +} SpvFPDenormMode; + +typedef enum SpvFPOperationMode_ { + SpvFPOperationModeIEEE = 0, + SpvFPOperationModeALT = 1, + SpvFPOperationModeMax = 0x7fffffff, +} SpvFPOperationMode; + +typedef enum SpvQuantizationModes_ { + SpvQuantizationModesTRN = 0, + SpvQuantizationModesTRN_ZERO = 1, + SpvQuantizationModesRND = 2, + SpvQuantizationModesRND_ZERO = 3, + SpvQuantizationModesRND_INF = 4, + SpvQuantizationModesRND_MIN_INF = 5, + SpvQuantizationModesRND_CONV = 6, + SpvQuantizationModesRND_CONV_ODD = 7, + SpvQuantizationModesMax = 0x7fffffff, +} SpvQuantizationModes; + +typedef enum SpvOverflowModes_ { + SpvOverflowModesWRAP = 0, + SpvOverflowModesSAT = 1, + SpvOverflowModesSAT_ZERO = 2, + SpvOverflowModesSAT_SYM = 3, + SpvOverflowModesMax = 0x7fffffff, +} SpvOverflowModes; + +typedef enum SpvPackedVectorFormat_ { + SpvPackedVectorFormatPackedVectorFormat4x8Bit = 0, + SpvPackedVectorFormatPackedVectorFormat4x8BitKHR = 0, + SpvPackedVectorFormatMax = 0x7fffffff, +} SpvPackedVectorFormat; + +typedef enum SpvOp_ { + SpvOpNop = 0, + SpvOpUndef = 1, + SpvOpSourceContinued = 2, + SpvOpSource = 3, + SpvOpSourceExtension = 4, + SpvOpName = 5, + SpvOpMemberName = 6, + SpvOpString = 7, + SpvOpLine = 8, + SpvOpExtension = 10, + SpvOpExtInstImport = 11, + SpvOpExtInst = 12, + SpvOpMemoryModel = 14, + SpvOpEntryPoint = 15, + SpvOpExecutionMode = 16, + SpvOpCapability = 17, + SpvOpTypeVoid = 19, + SpvOpTypeBool = 20, + SpvOpTypeInt = 21, + SpvOpTypeFloat = 22, + SpvOpTypeVector = 23, + SpvOpTypeMatrix = 24, + SpvOpTypeImage = 25, + SpvOpTypeSampler = 26, + SpvOpTypeSampledImage = 27, + SpvOpTypeArray = 28, + SpvOpTypeRuntimeArray = 29, + SpvOpTypeStruct = 30, + SpvOpTypeOpaque = 31, + SpvOpTypePointer = 32, + SpvOpTypeFunction = 33, + SpvOpTypeEvent = 34, + SpvOpTypeDeviceEvent = 35, + SpvOpTypeReserveId = 36, + SpvOpTypeQueue = 37, + SpvOpTypePipe = 38, + SpvOpTypeForwardPointer = 39, + SpvOpConstantTrue = 41, + SpvOpConstantFalse = 42, + SpvOpConstant = 43, + SpvOpConstantComposite = 44, + SpvOpConstantSampler = 45, + SpvOpConstantNull = 46, + SpvOpSpecConstantTrue = 48, + SpvOpSpecConstantFalse = 49, + SpvOpSpecConstant = 50, + SpvOpSpecConstantComposite = 51, + SpvOpSpecConstantOp = 52, + SpvOpFunction = 54, + SpvOpFunctionParameter = 55, + SpvOpFunctionEnd = 56, + SpvOpFunctionCall = 57, + SpvOpVariable = 59, + SpvOpImageTexelPointer = 60, + SpvOpLoad = 61, + SpvOpStore = 62, + SpvOpCopyMemory = 63, + SpvOpCopyMemorySized = 64, + SpvOpAccessChain = 65, + SpvOpInBoundsAccessChain = 66, + SpvOpPtrAccessChain = 67, + SpvOpArrayLength = 68, + SpvOpGenericPtrMemSemantics = 69, + SpvOpInBoundsPtrAccessChain = 70, + SpvOpDecorate = 71, + SpvOpMemberDecorate = 72, + SpvOpDecorationGroup = 73, + SpvOpGroupDecorate = 74, + SpvOpGroupMemberDecorate = 75, + SpvOpVectorExtractDynamic = 77, + SpvOpVectorInsertDynamic = 78, + SpvOpVectorShuffle = 79, + SpvOpCompositeConstruct = 80, + SpvOpCompositeExtract = 81, + SpvOpCompositeInsert = 82, + SpvOpCopyObject = 83, + SpvOpTranspose = 84, + SpvOpSampledImage = 86, + SpvOpImageSampleImplicitLod = 87, + SpvOpImageSampleExplicitLod = 88, + SpvOpImageSampleDrefImplicitLod = 89, + SpvOpImageSampleDrefExplicitLod = 90, + SpvOpImageSampleProjImplicitLod = 91, + SpvOpImageSampleProjExplicitLod = 92, + SpvOpImageSampleProjDrefImplicitLod = 93, + SpvOpImageSampleProjDrefExplicitLod = 94, + SpvOpImageFetch = 95, + SpvOpImageGather = 96, + SpvOpImageDrefGather = 97, + SpvOpImageRead = 98, + SpvOpImageWrite = 99, + SpvOpImage = 100, + SpvOpImageQueryFormat = 101, + SpvOpImageQueryOrder = 102, + SpvOpImageQuerySizeLod = 103, + SpvOpImageQuerySize = 104, + SpvOpImageQueryLod = 105, + SpvOpImageQueryLevels = 106, + SpvOpImageQuerySamples = 107, + SpvOpConvertFToU = 109, + SpvOpConvertFToS = 110, + SpvOpConvertSToF = 111, + SpvOpConvertUToF = 112, + SpvOpUConvert = 113, + SpvOpSConvert = 114, + SpvOpFConvert = 115, + SpvOpQuantizeToF16 = 116, + SpvOpConvertPtrToU = 117, + SpvOpSatConvertSToU = 118, + SpvOpSatConvertUToS = 119, + SpvOpConvertUToPtr = 120, + SpvOpPtrCastToGeneric = 121, + SpvOpGenericCastToPtr = 122, + SpvOpGenericCastToPtrExplicit = 123, + SpvOpBitcast = 124, + SpvOpSNegate = 126, + SpvOpFNegate = 127, + SpvOpIAdd = 128, + SpvOpFAdd = 129, + SpvOpISub = 130, + SpvOpFSub = 131, + SpvOpIMul = 132, + SpvOpFMul = 133, + SpvOpUDiv = 134, + SpvOpSDiv = 135, + SpvOpFDiv = 136, + SpvOpUMod = 137, + SpvOpSRem = 138, + SpvOpSMod = 139, + SpvOpFRem = 140, + SpvOpFMod = 141, + SpvOpVectorTimesScalar = 142, + SpvOpMatrixTimesScalar = 143, + SpvOpVectorTimesMatrix = 144, + SpvOpMatrixTimesVector = 145, + SpvOpMatrixTimesMatrix = 146, + SpvOpOuterProduct = 147, + SpvOpDot = 148, + SpvOpIAddCarry = 149, + SpvOpISubBorrow = 150, + SpvOpUMulExtended = 151, + SpvOpSMulExtended = 152, + SpvOpAny = 154, + SpvOpAll = 155, + SpvOpIsNan = 156, + SpvOpIsInf = 157, + SpvOpIsFinite = 158, + SpvOpIsNormal = 159, + SpvOpSignBitSet = 160, + SpvOpLessOrGreater = 161, + SpvOpOrdered = 162, + SpvOpUnordered = 163, + SpvOpLogicalEqual = 164, + SpvOpLogicalNotEqual = 165, + SpvOpLogicalOr = 166, + SpvOpLogicalAnd = 167, + SpvOpLogicalNot = 168, + SpvOpSelect = 169, + SpvOpIEqual = 170, + SpvOpINotEqual = 171, + SpvOpUGreaterThan = 172, + SpvOpSGreaterThan = 173, + SpvOpUGreaterThanEqual = 174, + SpvOpSGreaterThanEqual = 175, + SpvOpULessThan = 176, + SpvOpSLessThan = 177, + SpvOpULessThanEqual = 178, + SpvOpSLessThanEqual = 179, + SpvOpFOrdEqual = 180, + SpvOpFUnordEqual = 181, + SpvOpFOrdNotEqual = 182, + SpvOpFUnordNotEqual = 183, + SpvOpFOrdLessThan = 184, + SpvOpFUnordLessThan = 185, + SpvOpFOrdGreaterThan = 186, + SpvOpFUnordGreaterThan = 187, + SpvOpFOrdLessThanEqual = 188, + SpvOpFUnordLessThanEqual = 189, + SpvOpFOrdGreaterThanEqual = 190, + SpvOpFUnordGreaterThanEqual = 191, + SpvOpShiftRightLogical = 194, + SpvOpShiftRightArithmetic = 195, + SpvOpShiftLeftLogical = 196, + SpvOpBitwiseOr = 197, + SpvOpBitwiseXor = 198, + SpvOpBitwiseAnd = 199, + SpvOpNot = 200, + SpvOpBitFieldInsert = 201, + SpvOpBitFieldSExtract = 202, + SpvOpBitFieldUExtract = 203, + SpvOpBitReverse = 204, + SpvOpBitCount = 205, + SpvOpDPdx = 207, + SpvOpDPdy = 208, + SpvOpFwidth = 209, + SpvOpDPdxFine = 210, + SpvOpDPdyFine = 211, + SpvOpFwidthFine = 212, + SpvOpDPdxCoarse = 213, + SpvOpDPdyCoarse = 214, + SpvOpFwidthCoarse = 215, + SpvOpEmitVertex = 218, + SpvOpEndPrimitive = 219, + SpvOpEmitStreamVertex = 220, + SpvOpEndStreamPrimitive = 221, + SpvOpControlBarrier = 224, + SpvOpMemoryBarrier = 225, + SpvOpAtomicLoad = 227, + SpvOpAtomicStore = 228, + SpvOpAtomicExchange = 229, + SpvOpAtomicCompareExchange = 230, + SpvOpAtomicCompareExchangeWeak = 231, + SpvOpAtomicIIncrement = 232, + SpvOpAtomicIDecrement = 233, + SpvOpAtomicIAdd = 234, + SpvOpAtomicISub = 235, + SpvOpAtomicSMin = 236, + SpvOpAtomicUMin = 237, + SpvOpAtomicSMax = 238, + SpvOpAtomicUMax = 239, + SpvOpAtomicAnd = 240, + SpvOpAtomicOr = 241, + SpvOpAtomicXor = 242, + SpvOpPhi = 245, + SpvOpLoopMerge = 246, + SpvOpSelectionMerge = 247, + SpvOpLabel = 248, + SpvOpBranch = 249, + SpvOpBranchConditional = 250, + SpvOpSwitch = 251, + SpvOpKill = 252, + SpvOpReturn = 253, + SpvOpReturnValue = 254, + SpvOpUnreachable = 255, + SpvOpLifetimeStart = 256, + SpvOpLifetimeStop = 257, + SpvOpGroupAsyncCopy = 259, + SpvOpGroupWaitEvents = 260, + SpvOpGroupAll = 261, + SpvOpGroupAny = 262, + SpvOpGroupBroadcast = 263, + SpvOpGroupIAdd = 264, + SpvOpGroupFAdd = 265, + SpvOpGroupFMin = 266, + SpvOpGroupUMin = 267, + SpvOpGroupSMin = 268, + SpvOpGroupFMax = 269, + SpvOpGroupUMax = 270, + SpvOpGroupSMax = 271, + SpvOpReadPipe = 274, + SpvOpWritePipe = 275, + SpvOpReservedReadPipe = 276, + SpvOpReservedWritePipe = 277, + SpvOpReserveReadPipePackets = 278, + SpvOpReserveWritePipePackets = 279, + SpvOpCommitReadPipe = 280, + SpvOpCommitWritePipe = 281, + SpvOpIsValidReserveId = 282, + SpvOpGetNumPipePackets = 283, + SpvOpGetMaxPipePackets = 284, + SpvOpGroupReserveReadPipePackets = 285, + SpvOpGroupReserveWritePipePackets = 286, + SpvOpGroupCommitReadPipe = 287, + SpvOpGroupCommitWritePipe = 288, + SpvOpEnqueueMarker = 291, + SpvOpEnqueueKernel = 292, + SpvOpGetKernelNDrangeSubGroupCount = 293, + SpvOpGetKernelNDrangeMaxSubGroupSize = 294, + SpvOpGetKernelWorkGroupSize = 295, + SpvOpGetKernelPreferredWorkGroupSizeMultiple = 296, + SpvOpRetainEvent = 297, + SpvOpReleaseEvent = 298, + SpvOpCreateUserEvent = 299, + SpvOpIsValidEvent = 300, + SpvOpSetUserEventStatus = 301, + SpvOpCaptureEventProfilingInfo = 302, + SpvOpGetDefaultQueue = 303, + SpvOpBuildNDRange = 304, + SpvOpImageSparseSampleImplicitLod = 305, + SpvOpImageSparseSampleExplicitLod = 306, + SpvOpImageSparseSampleDrefImplicitLod = 307, + SpvOpImageSparseSampleDrefExplicitLod = 308, + SpvOpImageSparseSampleProjImplicitLod = 309, + SpvOpImageSparseSampleProjExplicitLod = 310, + SpvOpImageSparseSampleProjDrefImplicitLod = 311, + SpvOpImageSparseSampleProjDrefExplicitLod = 312, + SpvOpImageSparseFetch = 313, + SpvOpImageSparseGather = 314, + SpvOpImageSparseDrefGather = 315, + SpvOpImageSparseTexelsResident = 316, + SpvOpNoLine = 317, + SpvOpAtomicFlagTestAndSet = 318, + SpvOpAtomicFlagClear = 319, + SpvOpImageSparseRead = 320, + SpvOpSizeOf = 321, + SpvOpTypePipeStorage = 322, + SpvOpConstantPipeStorage = 323, + SpvOpCreatePipeFromPipeStorage = 324, + SpvOpGetKernelLocalSizeForSubgroupCount = 325, + SpvOpGetKernelMaxNumSubgroups = 326, + SpvOpTypeNamedBarrier = 327, + SpvOpNamedBarrierInitialize = 328, + SpvOpMemoryNamedBarrier = 329, + SpvOpModuleProcessed = 330, + SpvOpExecutionModeId = 331, + SpvOpDecorateId = 332, + SpvOpGroupNonUniformElect = 333, + SpvOpGroupNonUniformAll = 334, + SpvOpGroupNonUniformAny = 335, + SpvOpGroupNonUniformAllEqual = 336, + SpvOpGroupNonUniformBroadcast = 337, + SpvOpGroupNonUniformBroadcastFirst = 338, + SpvOpGroupNonUniformBallot = 339, + SpvOpGroupNonUniformInverseBallot = 340, + SpvOpGroupNonUniformBallotBitExtract = 341, + SpvOpGroupNonUniformBallotBitCount = 342, + SpvOpGroupNonUniformBallotFindLSB = 343, + SpvOpGroupNonUniformBallotFindMSB = 344, + SpvOpGroupNonUniformShuffle = 345, + SpvOpGroupNonUniformShuffleXor = 346, + SpvOpGroupNonUniformShuffleUp = 347, + SpvOpGroupNonUniformShuffleDown = 348, + SpvOpGroupNonUniformIAdd = 349, + SpvOpGroupNonUniformFAdd = 350, + SpvOpGroupNonUniformIMul = 351, + SpvOpGroupNonUniformFMul = 352, + SpvOpGroupNonUniformSMin = 353, + SpvOpGroupNonUniformUMin = 354, + SpvOpGroupNonUniformFMin = 355, + SpvOpGroupNonUniformSMax = 356, + SpvOpGroupNonUniformUMax = 357, + SpvOpGroupNonUniformFMax = 358, + SpvOpGroupNonUniformBitwiseAnd = 359, + SpvOpGroupNonUniformBitwiseOr = 360, + SpvOpGroupNonUniformBitwiseXor = 361, + SpvOpGroupNonUniformLogicalAnd = 362, + SpvOpGroupNonUniformLogicalOr = 363, + SpvOpGroupNonUniformLogicalXor = 364, + SpvOpGroupNonUniformQuadBroadcast = 365, + SpvOpGroupNonUniformQuadSwap = 366, + SpvOpCopyLogical = 400, + SpvOpPtrEqual = 401, + SpvOpPtrNotEqual = 402, + SpvOpPtrDiff = 403, + SpvOpTerminateInvocation = 4416, + SpvOpSubgroupBallotKHR = 4421, + SpvOpSubgroupFirstInvocationKHR = 4422, + SpvOpSubgroupAllKHR = 4428, + SpvOpSubgroupAnyKHR = 4429, + SpvOpSubgroupAllEqualKHR = 4430, + SpvOpGroupNonUniformRotateKHR = 4431, + SpvOpSubgroupReadInvocationKHR = 4432, + SpvOpTraceRayKHR = 4445, + SpvOpExecuteCallableKHR = 4446, + SpvOpConvertUToAccelerationStructureKHR = 4447, + SpvOpIgnoreIntersectionKHR = 4448, + SpvOpTerminateRayKHR = 4449, + SpvOpSDot = 4450, + SpvOpSDotKHR = 4450, + SpvOpUDot = 4451, + SpvOpUDotKHR = 4451, + SpvOpSUDot = 4452, + SpvOpSUDotKHR = 4452, + SpvOpSDotAccSat = 4453, + SpvOpSDotAccSatKHR = 4453, + SpvOpUDotAccSat = 4454, + SpvOpUDotAccSatKHR = 4454, + SpvOpSUDotAccSat = 4455, + SpvOpSUDotAccSatKHR = 4455, + SpvOpTypeRayQueryKHR = 4472, + SpvOpRayQueryInitializeKHR = 4473, + SpvOpRayQueryTerminateKHR = 4474, + SpvOpRayQueryGenerateIntersectionKHR = 4475, + SpvOpRayQueryConfirmIntersectionKHR = 4476, + SpvOpRayQueryProceedKHR = 4477, + SpvOpRayQueryGetIntersectionTypeKHR = 4479, + SpvOpGroupIAddNonUniformAMD = 5000, + SpvOpGroupFAddNonUniformAMD = 5001, + SpvOpGroupFMinNonUniformAMD = 5002, + SpvOpGroupUMinNonUniformAMD = 5003, + SpvOpGroupSMinNonUniformAMD = 5004, + SpvOpGroupFMaxNonUniformAMD = 5005, + SpvOpGroupUMaxNonUniformAMD = 5006, + SpvOpGroupSMaxNonUniformAMD = 5007, + SpvOpFragmentMaskFetchAMD = 5011, + SpvOpFragmentFetchAMD = 5012, + SpvOpReadClockKHR = 5056, + SpvOpImageSampleFootprintNV = 5283, + SpvOpEmitMeshTasksEXT = 5294, + SpvOpSetMeshOutputsEXT = 5295, + SpvOpGroupNonUniformPartitionNV = 5296, + SpvOpWritePackedPrimitiveIndices4x8NV = 5299, + SpvOpReportIntersectionKHR = 5334, + SpvOpReportIntersectionNV = 5334, + SpvOpIgnoreIntersectionNV = 5335, + SpvOpTerminateRayNV = 5336, + SpvOpTraceNV = 5337, + SpvOpTraceMotionNV = 5338, + SpvOpTraceRayMotionNV = 5339, + SpvOpTypeAccelerationStructureKHR = 5341, + SpvOpTypeAccelerationStructureNV = 5341, + SpvOpExecuteCallableNV = 5344, + SpvOpTypeCooperativeMatrixNV = 5358, + SpvOpCooperativeMatrixLoadNV = 5359, + SpvOpCooperativeMatrixStoreNV = 5360, + SpvOpCooperativeMatrixMulAddNV = 5361, + SpvOpCooperativeMatrixLengthNV = 5362, + SpvOpBeginInvocationInterlockEXT = 5364, + SpvOpEndInvocationInterlockEXT = 5365, + SpvOpDemoteToHelperInvocation = 5380, + SpvOpDemoteToHelperInvocationEXT = 5380, + SpvOpIsHelperInvocationEXT = 5381, + SpvOpConvertUToImageNV = 5391, + SpvOpConvertUToSamplerNV = 5392, + SpvOpConvertImageToUNV = 5393, + SpvOpConvertSamplerToUNV = 5394, + SpvOpConvertUToSampledImageNV = 5395, + SpvOpConvertSampledImageToUNV = 5396, + SpvOpSamplerImageAddressingModeNV = 5397, + SpvOpSubgroupShuffleINTEL = 5571, + SpvOpSubgroupShuffleDownINTEL = 5572, + SpvOpSubgroupShuffleUpINTEL = 5573, + SpvOpSubgroupShuffleXorINTEL = 5574, + SpvOpSubgroupBlockReadINTEL = 5575, + SpvOpSubgroupBlockWriteINTEL = 5576, + SpvOpSubgroupImageBlockReadINTEL = 5577, + SpvOpSubgroupImageBlockWriteINTEL = 5578, + SpvOpSubgroupImageMediaBlockReadINTEL = 5580, + SpvOpSubgroupImageMediaBlockWriteINTEL = 5581, + SpvOpUCountLeadingZerosINTEL = 5585, + SpvOpUCountTrailingZerosINTEL = 5586, + SpvOpAbsISubINTEL = 5587, + SpvOpAbsUSubINTEL = 5588, + SpvOpIAddSatINTEL = 5589, + SpvOpUAddSatINTEL = 5590, + SpvOpIAverageINTEL = 5591, + SpvOpUAverageINTEL = 5592, + SpvOpIAverageRoundedINTEL = 5593, + SpvOpUAverageRoundedINTEL = 5594, + SpvOpISubSatINTEL = 5595, + SpvOpUSubSatINTEL = 5596, + SpvOpIMul32x16INTEL = 5597, + SpvOpUMul32x16INTEL = 5598, + SpvOpConstantFunctionPointerINTEL = 5600, + SpvOpFunctionPointerCallINTEL = 5601, + SpvOpAsmTargetINTEL = 5609, + SpvOpAsmINTEL = 5610, + SpvOpAsmCallINTEL = 5611, + SpvOpAtomicFMinEXT = 5614, + SpvOpAtomicFMaxEXT = 5615, + SpvOpAssumeTrueKHR = 5630, + SpvOpExpectKHR = 5631, + SpvOpDecorateString = 5632, + SpvOpDecorateStringGOOGLE = 5632, + SpvOpMemberDecorateString = 5633, + SpvOpMemberDecorateStringGOOGLE = 5633, + SpvOpVmeImageINTEL = 5699, + SpvOpTypeVmeImageINTEL = 5700, + SpvOpTypeAvcImePayloadINTEL = 5701, + SpvOpTypeAvcRefPayloadINTEL = 5702, + SpvOpTypeAvcSicPayloadINTEL = 5703, + SpvOpTypeAvcMcePayloadINTEL = 5704, + SpvOpTypeAvcMceResultINTEL = 5705, + SpvOpTypeAvcImeResultINTEL = 5706, + SpvOpTypeAvcImeResultSingleReferenceStreamoutINTEL = 5707, + SpvOpTypeAvcImeResultDualReferenceStreamoutINTEL = 5708, + SpvOpTypeAvcImeSingleReferenceStreaminINTEL = 5709, + SpvOpTypeAvcImeDualReferenceStreaminINTEL = 5710, + SpvOpTypeAvcRefResultINTEL = 5711, + SpvOpTypeAvcSicResultINTEL = 5712, + SpvOpSubgroupAvcMceGetDefaultInterBaseMultiReferencePenaltyINTEL = 5713, + SpvOpSubgroupAvcMceSetInterBaseMultiReferencePenaltyINTEL = 5714, + SpvOpSubgroupAvcMceGetDefaultInterShapePenaltyINTEL = 5715, + SpvOpSubgroupAvcMceSetInterShapePenaltyINTEL = 5716, + SpvOpSubgroupAvcMceGetDefaultInterDirectionPenaltyINTEL = 5717, + SpvOpSubgroupAvcMceSetInterDirectionPenaltyINTEL = 5718, + SpvOpSubgroupAvcMceGetDefaultIntraLumaShapePenaltyINTEL = 5719, + SpvOpSubgroupAvcMceGetDefaultInterMotionVectorCostTableINTEL = 5720, + SpvOpSubgroupAvcMceGetDefaultHighPenaltyCostTableINTEL = 5721, + SpvOpSubgroupAvcMceGetDefaultMediumPenaltyCostTableINTEL = 5722, + SpvOpSubgroupAvcMceGetDefaultLowPenaltyCostTableINTEL = 5723, + SpvOpSubgroupAvcMceSetMotionVectorCostFunctionINTEL = 5724, + SpvOpSubgroupAvcMceGetDefaultIntraLumaModePenaltyINTEL = 5725, + SpvOpSubgroupAvcMceGetDefaultNonDcLumaIntraPenaltyINTEL = 5726, + SpvOpSubgroupAvcMceGetDefaultIntraChromaModeBasePenaltyINTEL = 5727, + SpvOpSubgroupAvcMceSetAcOnlyHaarINTEL = 5728, + SpvOpSubgroupAvcMceSetSourceInterlacedFieldPolarityINTEL = 5729, + SpvOpSubgroupAvcMceSetSingleReferenceInterlacedFieldPolarityINTEL = 5730, + SpvOpSubgroupAvcMceSetDualReferenceInterlacedFieldPolaritiesINTEL = 5731, + SpvOpSubgroupAvcMceConvertToImePayloadINTEL = 5732, + SpvOpSubgroupAvcMceConvertToImeResultINTEL = 5733, + SpvOpSubgroupAvcMceConvertToRefPayloadINTEL = 5734, + SpvOpSubgroupAvcMceConvertToRefResultINTEL = 5735, + SpvOpSubgroupAvcMceConvertToSicPayloadINTEL = 5736, + SpvOpSubgroupAvcMceConvertToSicResultINTEL = 5737, + SpvOpSubgroupAvcMceGetMotionVectorsINTEL = 5738, + SpvOpSubgroupAvcMceGetInterDistortionsINTEL = 5739, + SpvOpSubgroupAvcMceGetBestInterDistortionsINTEL = 5740, + SpvOpSubgroupAvcMceGetInterMajorShapeINTEL = 5741, + SpvOpSubgroupAvcMceGetInterMinorShapeINTEL = 5742, + SpvOpSubgroupAvcMceGetInterDirectionsINTEL = 5743, + SpvOpSubgroupAvcMceGetInterMotionVectorCountINTEL = 5744, + SpvOpSubgroupAvcMceGetInterReferenceIdsINTEL = 5745, + SpvOpSubgroupAvcMceGetInterReferenceInterlacedFieldPolaritiesINTEL = 5746, + SpvOpSubgroupAvcImeInitializeINTEL = 5747, + SpvOpSubgroupAvcImeSetSingleReferenceINTEL = 5748, + SpvOpSubgroupAvcImeSetDualReferenceINTEL = 5749, + SpvOpSubgroupAvcImeRefWindowSizeINTEL = 5750, + SpvOpSubgroupAvcImeAdjustRefOffsetINTEL = 5751, + SpvOpSubgroupAvcImeConvertToMcePayloadINTEL = 5752, + SpvOpSubgroupAvcImeSetMaxMotionVectorCountINTEL = 5753, + SpvOpSubgroupAvcImeSetUnidirectionalMixDisableINTEL = 5754, + SpvOpSubgroupAvcImeSetEarlySearchTerminationThresholdINTEL = 5755, + SpvOpSubgroupAvcImeSetWeightedSadINTEL = 5756, + SpvOpSubgroupAvcImeEvaluateWithSingleReferenceINTEL = 5757, + SpvOpSubgroupAvcImeEvaluateWithDualReferenceINTEL = 5758, + SpvOpSubgroupAvcImeEvaluateWithSingleReferenceStreaminINTEL = 5759, + SpvOpSubgroupAvcImeEvaluateWithDualReferenceStreaminINTEL = 5760, + SpvOpSubgroupAvcImeEvaluateWithSingleReferenceStreamoutINTEL = 5761, + SpvOpSubgroupAvcImeEvaluateWithDualReferenceStreamoutINTEL = 5762, + SpvOpSubgroupAvcImeEvaluateWithSingleReferenceStreaminoutINTEL = 5763, + SpvOpSubgroupAvcImeEvaluateWithDualReferenceStreaminoutINTEL = 5764, + SpvOpSubgroupAvcImeConvertToMceResultINTEL = 5765, + SpvOpSubgroupAvcImeGetSingleReferenceStreaminINTEL = 5766, + SpvOpSubgroupAvcImeGetDualReferenceStreaminINTEL = 5767, + SpvOpSubgroupAvcImeStripSingleReferenceStreamoutINTEL = 5768, + SpvOpSubgroupAvcImeStripDualReferenceStreamoutINTEL = 5769, + SpvOpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeMotionVectorsINTEL = 5770, + SpvOpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeDistortionsINTEL = 5771, + SpvOpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeReferenceIdsINTEL = 5772, + SpvOpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeMotionVectorsINTEL = 5773, + SpvOpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeDistortionsINTEL = 5774, + SpvOpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeReferenceIdsINTEL = 5775, + SpvOpSubgroupAvcImeGetBorderReachedINTEL = 5776, + SpvOpSubgroupAvcImeGetTruncatedSearchIndicationINTEL = 5777, + SpvOpSubgroupAvcImeGetUnidirectionalEarlySearchTerminationINTEL = 5778, + SpvOpSubgroupAvcImeGetWeightingPatternMinimumMotionVectorINTEL = 5779, + SpvOpSubgroupAvcImeGetWeightingPatternMinimumDistortionINTEL = 5780, + SpvOpSubgroupAvcFmeInitializeINTEL = 5781, + SpvOpSubgroupAvcBmeInitializeINTEL = 5782, + SpvOpSubgroupAvcRefConvertToMcePayloadINTEL = 5783, + SpvOpSubgroupAvcRefSetBidirectionalMixDisableINTEL = 5784, + SpvOpSubgroupAvcRefSetBilinearFilterEnableINTEL = 5785, + SpvOpSubgroupAvcRefEvaluateWithSingleReferenceINTEL = 5786, + SpvOpSubgroupAvcRefEvaluateWithDualReferenceINTEL = 5787, + SpvOpSubgroupAvcRefEvaluateWithMultiReferenceINTEL = 5788, + SpvOpSubgroupAvcRefEvaluateWithMultiReferenceInterlacedINTEL = 5789, + SpvOpSubgroupAvcRefConvertToMceResultINTEL = 5790, + SpvOpSubgroupAvcSicInitializeINTEL = 5791, + SpvOpSubgroupAvcSicConfigureSkcINTEL = 5792, + SpvOpSubgroupAvcSicConfigureIpeLumaINTEL = 5793, + SpvOpSubgroupAvcSicConfigureIpeLumaChromaINTEL = 5794, + SpvOpSubgroupAvcSicGetMotionVectorMaskINTEL = 5795, + SpvOpSubgroupAvcSicConvertToMcePayloadINTEL = 5796, + SpvOpSubgroupAvcSicSetIntraLumaShapePenaltyINTEL = 5797, + SpvOpSubgroupAvcSicSetIntraLumaModeCostFunctionINTEL = 5798, + SpvOpSubgroupAvcSicSetIntraChromaModeCostFunctionINTEL = 5799, + SpvOpSubgroupAvcSicSetBilinearFilterEnableINTEL = 5800, + SpvOpSubgroupAvcSicSetSkcForwardTransformEnableINTEL = 5801, + SpvOpSubgroupAvcSicSetBlockBasedRawSkipSadINTEL = 5802, + SpvOpSubgroupAvcSicEvaluateIpeINTEL = 5803, + SpvOpSubgroupAvcSicEvaluateWithSingleReferenceINTEL = 5804, + SpvOpSubgroupAvcSicEvaluateWithDualReferenceINTEL = 5805, + SpvOpSubgroupAvcSicEvaluateWithMultiReferenceINTEL = 5806, + SpvOpSubgroupAvcSicEvaluateWithMultiReferenceInterlacedINTEL = 5807, + SpvOpSubgroupAvcSicConvertToMceResultINTEL = 5808, + SpvOpSubgroupAvcSicGetIpeLumaShapeINTEL = 5809, + SpvOpSubgroupAvcSicGetBestIpeLumaDistortionINTEL = 5810, + SpvOpSubgroupAvcSicGetBestIpeChromaDistortionINTEL = 5811, + SpvOpSubgroupAvcSicGetPackedIpeLumaModesINTEL = 5812, + SpvOpSubgroupAvcSicGetIpeChromaModeINTEL = 5813, + SpvOpSubgroupAvcSicGetPackedSkcLumaCountThresholdINTEL = 5814, + SpvOpSubgroupAvcSicGetPackedSkcLumaSumThresholdINTEL = 5815, + SpvOpSubgroupAvcSicGetInterRawSadsINTEL = 5816, + SpvOpVariableLengthArrayINTEL = 5818, + SpvOpSaveMemoryINTEL = 5819, + SpvOpRestoreMemoryINTEL = 5820, + SpvOpArbitraryFloatSinCosPiINTEL = 5840, + SpvOpArbitraryFloatCastINTEL = 5841, + SpvOpArbitraryFloatCastFromIntINTEL = 5842, + SpvOpArbitraryFloatCastToIntINTEL = 5843, + SpvOpArbitraryFloatAddINTEL = 5846, + SpvOpArbitraryFloatSubINTEL = 5847, + SpvOpArbitraryFloatMulINTEL = 5848, + SpvOpArbitraryFloatDivINTEL = 5849, + SpvOpArbitraryFloatGTINTEL = 5850, + SpvOpArbitraryFloatGEINTEL = 5851, + SpvOpArbitraryFloatLTINTEL = 5852, + SpvOpArbitraryFloatLEINTEL = 5853, + SpvOpArbitraryFloatEQINTEL = 5854, + SpvOpArbitraryFloatRecipINTEL = 5855, + SpvOpArbitraryFloatRSqrtINTEL = 5856, + SpvOpArbitraryFloatCbrtINTEL = 5857, + SpvOpArbitraryFloatHypotINTEL = 5858, + SpvOpArbitraryFloatSqrtINTEL = 5859, + SpvOpArbitraryFloatLogINTEL = 5860, + SpvOpArbitraryFloatLog2INTEL = 5861, + SpvOpArbitraryFloatLog10INTEL = 5862, + SpvOpArbitraryFloatLog1pINTEL = 5863, + SpvOpArbitraryFloatExpINTEL = 5864, + SpvOpArbitraryFloatExp2INTEL = 5865, + SpvOpArbitraryFloatExp10INTEL = 5866, + SpvOpArbitraryFloatExpm1INTEL = 5867, + SpvOpArbitraryFloatSinINTEL = 5868, + SpvOpArbitraryFloatCosINTEL = 5869, + SpvOpArbitraryFloatSinCosINTEL = 5870, + SpvOpArbitraryFloatSinPiINTEL = 5871, + SpvOpArbitraryFloatCosPiINTEL = 5872, + SpvOpArbitraryFloatASinINTEL = 5873, + SpvOpArbitraryFloatASinPiINTEL = 5874, + SpvOpArbitraryFloatACosINTEL = 5875, + SpvOpArbitraryFloatACosPiINTEL = 5876, + SpvOpArbitraryFloatATanINTEL = 5877, + SpvOpArbitraryFloatATanPiINTEL = 5878, + SpvOpArbitraryFloatATan2INTEL = 5879, + SpvOpArbitraryFloatPowINTEL = 5880, + SpvOpArbitraryFloatPowRINTEL = 5881, + SpvOpArbitraryFloatPowNINTEL = 5882, + SpvOpLoopControlINTEL = 5887, + SpvOpAliasDomainDeclINTEL = 5911, + SpvOpAliasScopeDeclINTEL = 5912, + SpvOpAliasScopeListDeclINTEL = 5913, + SpvOpFixedSqrtINTEL = 5923, + SpvOpFixedRecipINTEL = 5924, + SpvOpFixedRsqrtINTEL = 5925, + SpvOpFixedSinINTEL = 5926, + SpvOpFixedCosINTEL = 5927, + SpvOpFixedSinCosINTEL = 5928, + SpvOpFixedSinPiINTEL = 5929, + SpvOpFixedCosPiINTEL = 5930, + SpvOpFixedSinCosPiINTEL = 5931, + SpvOpFixedLogINTEL = 5932, + SpvOpFixedExpINTEL = 5933, + SpvOpPtrCastToCrossWorkgroupINTEL = 5934, + SpvOpCrossWorkgroupCastToPtrINTEL = 5938, + SpvOpReadPipeBlockingINTEL = 5946, + SpvOpWritePipeBlockingINTEL = 5947, + SpvOpFPGARegINTEL = 5949, + SpvOpRayQueryGetRayTMinKHR = 6016, + SpvOpRayQueryGetRayFlagsKHR = 6017, + SpvOpRayQueryGetIntersectionTKHR = 6018, + SpvOpRayQueryGetIntersectionInstanceCustomIndexKHR = 6019, + SpvOpRayQueryGetIntersectionInstanceIdKHR = 6020, + SpvOpRayQueryGetIntersectionInstanceShaderBindingTableRecordOffsetKHR = 6021, + SpvOpRayQueryGetIntersectionGeometryIndexKHR = 6022, + SpvOpRayQueryGetIntersectionPrimitiveIndexKHR = 6023, + SpvOpRayQueryGetIntersectionBarycentricsKHR = 6024, + SpvOpRayQueryGetIntersectionFrontFaceKHR = 6025, + SpvOpRayQueryGetIntersectionCandidateAABBOpaqueKHR = 6026, + SpvOpRayQueryGetIntersectionObjectRayDirectionKHR = 6027, + SpvOpRayQueryGetIntersectionObjectRayOriginKHR = 6028, + SpvOpRayQueryGetWorldRayDirectionKHR = 6029, + SpvOpRayQueryGetWorldRayOriginKHR = 6030, + SpvOpRayQueryGetIntersectionObjectToWorldKHR = 6031, + SpvOpRayQueryGetIntersectionWorldToObjectKHR = 6032, + SpvOpAtomicFAddEXT = 6035, + SpvOpTypeBufferSurfaceINTEL = 6086, + SpvOpTypeStructContinuedINTEL = 6090, + SpvOpConstantCompositeContinuedINTEL = 6091, + SpvOpSpecConstantCompositeContinuedINTEL = 6092, + SpvOpControlBarrierArriveINTEL = 6142, + SpvOpControlBarrierWaitINTEL = 6143, + SpvOpGroupIMulKHR = 6401, + SpvOpGroupFMulKHR = 6402, + SpvOpGroupBitwiseAndKHR = 6403, + SpvOpGroupBitwiseOrKHR = 6404, + SpvOpGroupBitwiseXorKHR = 6405, + SpvOpGroupLogicalAndKHR = 6406, + SpvOpGroupLogicalOrKHR = 6407, + SpvOpGroupLogicalXorKHR = 6408, + SpvOpMax = 0x7fffffff, +} SpvOp; + +#ifdef SPV_ENABLE_UTILITY_CODE +#ifndef __cplusplus +#include +#endif +inline void SpvHasResultAndType(SpvOp opcode, bool *hasResult, bool *hasResultType) { + *hasResult = *hasResultType = false; + switch (opcode) { + default: /* unknown opcode */ break; + case SpvOpNop: *hasResult = false; *hasResultType = false; break; + case SpvOpUndef: *hasResult = true; *hasResultType = true; break; + case SpvOpSourceContinued: *hasResult = false; *hasResultType = false; break; + case SpvOpSource: *hasResult = false; *hasResultType = false; break; + case SpvOpSourceExtension: *hasResult = false; *hasResultType = false; break; + case SpvOpName: *hasResult = false; *hasResultType = false; break; + case SpvOpMemberName: *hasResult = false; *hasResultType = false; break; + case SpvOpString: *hasResult = true; *hasResultType = false; break; + case SpvOpLine: *hasResult = false; *hasResultType = false; break; + case SpvOpExtension: *hasResult = false; *hasResultType = false; break; + case SpvOpExtInstImport: *hasResult = true; *hasResultType = false; break; + case SpvOpExtInst: *hasResult = true; *hasResultType = true; break; + case SpvOpMemoryModel: *hasResult = false; *hasResultType = false; break; + case SpvOpEntryPoint: *hasResult = false; *hasResultType = false; break; + case SpvOpExecutionMode: *hasResult = false; *hasResultType = false; break; + case SpvOpCapability: *hasResult = false; *hasResultType = false; break; + case SpvOpTypeVoid: *hasResult = true; *hasResultType = false; break; + case SpvOpTypeBool: *hasResult = true; *hasResultType = false; break; + case SpvOpTypeInt: *hasResult = true; *hasResultType = false; break; + case SpvOpTypeFloat: *hasResult = true; *hasResultType = false; break; + case SpvOpTypeVector: *hasResult = true; *hasResultType = false; break; + case SpvOpTypeMatrix: *hasResult = true; *hasResultType = false; break; + case SpvOpTypeImage: *hasResult = true; *hasResultType = false; break; + case SpvOpTypeSampler: *hasResult = true; *hasResultType = false; break; + case SpvOpTypeSampledImage: *hasResult = true; *hasResultType = false; break; + case SpvOpTypeArray: *hasResult = true; *hasResultType = false; break; + case SpvOpTypeRuntimeArray: *hasResult = true; *hasResultType = false; break; + case SpvOpTypeStruct: *hasResult = true; *hasResultType = false; break; + case SpvOpTypeOpaque: *hasResult = true; *hasResultType = false; break; + case SpvOpTypePointer: *hasResult = true; *hasResultType = false; break; + case SpvOpTypeFunction: *hasResult = true; *hasResultType = false; break; + case SpvOpTypeEvent: *hasResult = true; *hasResultType = false; break; + case SpvOpTypeDeviceEvent: *hasResult = true; *hasResultType = false; break; + case SpvOpTypeReserveId: *hasResult = true; *hasResultType = false; break; + case SpvOpTypeQueue: *hasResult = true; *hasResultType = false; break; + case SpvOpTypePipe: *hasResult = true; *hasResultType = false; break; + case SpvOpTypeForwardPointer: *hasResult = false; *hasResultType = false; break; + case SpvOpConstantTrue: *hasResult = true; *hasResultType = true; break; + case SpvOpConstantFalse: *hasResult = true; *hasResultType = true; break; + case SpvOpConstant: *hasResult = true; *hasResultType = true; break; + case SpvOpConstantComposite: *hasResult = true; *hasResultType = true; break; + case SpvOpConstantSampler: *hasResult = true; *hasResultType = true; break; + case SpvOpConstantNull: *hasResult = true; *hasResultType = true; break; + case SpvOpSpecConstantTrue: *hasResult = true; *hasResultType = true; break; + case SpvOpSpecConstantFalse: *hasResult = true; *hasResultType = true; break; + case SpvOpSpecConstant: *hasResult = true; *hasResultType = true; break; + case SpvOpSpecConstantComposite: *hasResult = true; *hasResultType = true; break; + case SpvOpSpecConstantOp: *hasResult = true; *hasResultType = true; break; + case SpvOpFunction: *hasResult = true; *hasResultType = true; break; + case SpvOpFunctionParameter: *hasResult = true; *hasResultType = true; break; + case SpvOpFunctionEnd: *hasResult = false; *hasResultType = false; break; + case SpvOpFunctionCall: *hasResult = true; *hasResultType = true; break; + case SpvOpVariable: *hasResult = true; *hasResultType = true; break; + case SpvOpImageTexelPointer: *hasResult = true; *hasResultType = true; break; + case SpvOpLoad: *hasResult = true; *hasResultType = true; break; + case SpvOpStore: *hasResult = false; *hasResultType = false; break; + case SpvOpCopyMemory: *hasResult = false; *hasResultType = false; break; + case SpvOpCopyMemorySized: *hasResult = false; *hasResultType = false; break; + case SpvOpAccessChain: *hasResult = true; *hasResultType = true; break; + case SpvOpInBoundsAccessChain: *hasResult = true; *hasResultType = true; break; + case SpvOpPtrAccessChain: *hasResult = true; *hasResultType = true; break; + case SpvOpArrayLength: *hasResult = true; *hasResultType = true; break; + case SpvOpGenericPtrMemSemantics: *hasResult = true; *hasResultType = true; break; + case SpvOpInBoundsPtrAccessChain: *hasResult = true; *hasResultType = true; break; + case SpvOpDecorate: *hasResult = false; *hasResultType = false; break; + case SpvOpMemberDecorate: *hasResult = false; *hasResultType = false; break; + case SpvOpDecorationGroup: *hasResult = true; *hasResultType = false; break; + case SpvOpGroupDecorate: *hasResult = false; *hasResultType = false; break; + case SpvOpGroupMemberDecorate: *hasResult = false; *hasResultType = false; break; + case SpvOpVectorExtractDynamic: *hasResult = true; *hasResultType = true; break; + case SpvOpVectorInsertDynamic: *hasResult = true; *hasResultType = true; break; + case SpvOpVectorShuffle: *hasResult = true; *hasResultType = true; break; + case SpvOpCompositeConstruct: *hasResult = true; *hasResultType = true; break; + case SpvOpCompositeExtract: *hasResult = true; *hasResultType = true; break; + case SpvOpCompositeInsert: *hasResult = true; *hasResultType = true; break; + case SpvOpCopyObject: *hasResult = true; *hasResultType = true; break; + case SpvOpTranspose: *hasResult = true; *hasResultType = true; break; + case SpvOpSampledImage: *hasResult = true; *hasResultType = true; break; + case SpvOpImageSampleImplicitLod: *hasResult = true; *hasResultType = true; break; + case SpvOpImageSampleExplicitLod: *hasResult = true; *hasResultType = true; break; + case SpvOpImageSampleDrefImplicitLod: *hasResult = true; *hasResultType = true; break; + case SpvOpImageSampleDrefExplicitLod: *hasResult = true; *hasResultType = true; break; + case SpvOpImageSampleProjImplicitLod: *hasResult = true; *hasResultType = true; break; + case SpvOpImageSampleProjExplicitLod: *hasResult = true; *hasResultType = true; break; + case SpvOpImageSampleProjDrefImplicitLod: *hasResult = true; *hasResultType = true; break; + case SpvOpImageSampleProjDrefExplicitLod: *hasResult = true; *hasResultType = true; break; + case SpvOpImageFetch: *hasResult = true; *hasResultType = true; break; + case SpvOpImageGather: *hasResult = true; *hasResultType = true; break; + case SpvOpImageDrefGather: *hasResult = true; *hasResultType = true; break; + case SpvOpImageRead: *hasResult = true; *hasResultType = true; break; + case SpvOpImageWrite: *hasResult = false; *hasResultType = false; break; + case SpvOpImage: *hasResult = true; *hasResultType = true; break; + case SpvOpImageQueryFormat: *hasResult = true; *hasResultType = true; break; + case SpvOpImageQueryOrder: *hasResult = true; *hasResultType = true; break; + case SpvOpImageQuerySizeLod: *hasResult = true; *hasResultType = true; break; + case SpvOpImageQuerySize: *hasResult = true; *hasResultType = true; break; + case SpvOpImageQueryLod: *hasResult = true; *hasResultType = true; break; + case SpvOpImageQueryLevels: *hasResult = true; *hasResultType = true; break; + case SpvOpImageQuerySamples: *hasResult = true; *hasResultType = true; break; + case SpvOpConvertFToU: *hasResult = true; *hasResultType = true; break; + case SpvOpConvertFToS: *hasResult = true; *hasResultType = true; break; + case SpvOpConvertSToF: *hasResult = true; *hasResultType = true; break; + case SpvOpConvertUToF: *hasResult = true; *hasResultType = true; break; + case SpvOpUConvert: *hasResult = true; *hasResultType = true; break; + case SpvOpSConvert: *hasResult = true; *hasResultType = true; break; + case SpvOpFConvert: *hasResult = true; *hasResultType = true; break; + case SpvOpQuantizeToF16: *hasResult = true; *hasResultType = true; break; + case SpvOpConvertPtrToU: *hasResult = true; *hasResultType = true; break; + case SpvOpSatConvertSToU: *hasResult = true; *hasResultType = true; break; + case SpvOpSatConvertUToS: *hasResult = true; *hasResultType = true; break; + case SpvOpConvertUToPtr: *hasResult = true; *hasResultType = true; break; + case SpvOpPtrCastToGeneric: *hasResult = true; *hasResultType = true; break; + case SpvOpGenericCastToPtr: *hasResult = true; *hasResultType = true; break; + case SpvOpGenericCastToPtrExplicit: *hasResult = true; *hasResultType = true; break; + case SpvOpBitcast: *hasResult = true; *hasResultType = true; break; + case SpvOpSNegate: *hasResult = true; *hasResultType = true; break; + case SpvOpFNegate: *hasResult = true; *hasResultType = true; break; + case SpvOpIAdd: *hasResult = true; *hasResultType = true; break; + case SpvOpFAdd: *hasResult = true; *hasResultType = true; break; + case SpvOpISub: *hasResult = true; *hasResultType = true; break; + case SpvOpFSub: *hasResult = true; *hasResultType = true; break; + case SpvOpIMul: *hasResult = true; *hasResultType = true; break; + case SpvOpFMul: *hasResult = true; *hasResultType = true; break; + case SpvOpUDiv: *hasResult = true; *hasResultType = true; break; + case SpvOpSDiv: *hasResult = true; *hasResultType = true; break; + case SpvOpFDiv: *hasResult = true; *hasResultType = true; break; + case SpvOpUMod: *hasResult = true; *hasResultType = true; break; + case SpvOpSRem: *hasResult = true; *hasResultType = true; break; + case SpvOpSMod: *hasResult = true; *hasResultType = true; break; + case SpvOpFRem: *hasResult = true; *hasResultType = true; break; + case SpvOpFMod: *hasResult = true; *hasResultType = true; break; + case SpvOpVectorTimesScalar: *hasResult = true; *hasResultType = true; break; + case SpvOpMatrixTimesScalar: *hasResult = true; *hasResultType = true; break; + case SpvOpVectorTimesMatrix: *hasResult = true; *hasResultType = true; break; + case SpvOpMatrixTimesVector: *hasResult = true; *hasResultType = true; break; + case SpvOpMatrixTimesMatrix: *hasResult = true; *hasResultType = true; break; + case SpvOpOuterProduct: *hasResult = true; *hasResultType = true; break; + case SpvOpDot: *hasResult = true; *hasResultType = true; break; + case SpvOpIAddCarry: *hasResult = true; *hasResultType = true; break; + case SpvOpISubBorrow: *hasResult = true; *hasResultType = true; break; + case SpvOpUMulExtended: *hasResult = true; *hasResultType = true; break; + case SpvOpSMulExtended: *hasResult = true; *hasResultType = true; break; + case SpvOpAny: *hasResult = true; *hasResultType = true; break; + case SpvOpAll: *hasResult = true; *hasResultType = true; break; + case SpvOpIsNan: *hasResult = true; *hasResultType = true; break; + case SpvOpIsInf: *hasResult = true; *hasResultType = true; break; + case SpvOpIsFinite: *hasResult = true; *hasResultType = true; break; + case SpvOpIsNormal: *hasResult = true; *hasResultType = true; break; + case SpvOpSignBitSet: *hasResult = true; *hasResultType = true; break; + case SpvOpLessOrGreater: *hasResult = true; *hasResultType = true; break; + case SpvOpOrdered: *hasResult = true; *hasResultType = true; break; + case SpvOpUnordered: *hasResult = true; *hasResultType = true; break; + case SpvOpLogicalEqual: *hasResult = true; *hasResultType = true; break; + case SpvOpLogicalNotEqual: *hasResult = true; *hasResultType = true; break; + case SpvOpLogicalOr: *hasResult = true; *hasResultType = true; break; + case SpvOpLogicalAnd: *hasResult = true; *hasResultType = true; break; + case SpvOpLogicalNot: *hasResult = true; *hasResultType = true; break; + case SpvOpSelect: *hasResult = true; *hasResultType = true; break; + case SpvOpIEqual: *hasResult = true; *hasResultType = true; break; + case SpvOpINotEqual: *hasResult = true; *hasResultType = true; break; + case SpvOpUGreaterThan: *hasResult = true; *hasResultType = true; break; + case SpvOpSGreaterThan: *hasResult = true; *hasResultType = true; break; + case SpvOpUGreaterThanEqual: *hasResult = true; *hasResultType = true; break; + case SpvOpSGreaterThanEqual: *hasResult = true; *hasResultType = true; break; + case SpvOpULessThan: *hasResult = true; *hasResultType = true; break; + case SpvOpSLessThan: *hasResult = true; *hasResultType = true; break; + case SpvOpULessThanEqual: *hasResult = true; *hasResultType = true; break; + case SpvOpSLessThanEqual: *hasResult = true; *hasResultType = true; break; + case SpvOpFOrdEqual: *hasResult = true; *hasResultType = true; break; + case SpvOpFUnordEqual: *hasResult = true; *hasResultType = true; break; + case SpvOpFOrdNotEqual: *hasResult = true; *hasResultType = true; break; + case SpvOpFUnordNotEqual: *hasResult = true; *hasResultType = true; break; + case SpvOpFOrdLessThan: *hasResult = true; *hasResultType = true; break; + case SpvOpFUnordLessThan: *hasResult = true; *hasResultType = true; break; + case SpvOpFOrdGreaterThan: *hasResult = true; *hasResultType = true; break; + case SpvOpFUnordGreaterThan: *hasResult = true; *hasResultType = true; break; + case SpvOpFOrdLessThanEqual: *hasResult = true; *hasResultType = true; break; + case SpvOpFUnordLessThanEqual: *hasResult = true; *hasResultType = true; break; + case SpvOpFOrdGreaterThanEqual: *hasResult = true; *hasResultType = true; break; + case SpvOpFUnordGreaterThanEqual: *hasResult = true; *hasResultType = true; break; + case SpvOpShiftRightLogical: *hasResult = true; *hasResultType = true; break; + case SpvOpShiftRightArithmetic: *hasResult = true; *hasResultType = true; break; + case SpvOpShiftLeftLogical: *hasResult = true; *hasResultType = true; break; + case SpvOpBitwiseOr: *hasResult = true; *hasResultType = true; break; + case SpvOpBitwiseXor: *hasResult = true; *hasResultType = true; break; + case SpvOpBitwiseAnd: *hasResult = true; *hasResultType = true; break; + case SpvOpNot: *hasResult = true; *hasResultType = true; break; + case SpvOpBitFieldInsert: *hasResult = true; *hasResultType = true; break; + case SpvOpBitFieldSExtract: *hasResult = true; *hasResultType = true; break; + case SpvOpBitFieldUExtract: *hasResult = true; *hasResultType = true; break; + case SpvOpBitReverse: *hasResult = true; *hasResultType = true; break; + case SpvOpBitCount: *hasResult = true; *hasResultType = true; break; + case SpvOpDPdx: *hasResult = true; *hasResultType = true; break; + case SpvOpDPdy: *hasResult = true; *hasResultType = true; break; + case SpvOpFwidth: *hasResult = true; *hasResultType = true; break; + case SpvOpDPdxFine: *hasResult = true; *hasResultType = true; break; + case SpvOpDPdyFine: *hasResult = true; *hasResultType = true; break; + case SpvOpFwidthFine: *hasResult = true; *hasResultType = true; break; + case SpvOpDPdxCoarse: *hasResult = true; *hasResultType = true; break; + case SpvOpDPdyCoarse: *hasResult = true; *hasResultType = true; break; + case SpvOpFwidthCoarse: *hasResult = true; *hasResultType = true; break; + case SpvOpEmitVertex: *hasResult = false; *hasResultType = false; break; + case SpvOpEndPrimitive: *hasResult = false; *hasResultType = false; break; + case SpvOpEmitStreamVertex: *hasResult = false; *hasResultType = false; break; + case SpvOpEndStreamPrimitive: *hasResult = false; *hasResultType = false; break; + case SpvOpControlBarrier: *hasResult = false; *hasResultType = false; break; + case SpvOpMemoryBarrier: *hasResult = false; *hasResultType = false; break; + case SpvOpAtomicLoad: *hasResult = true; *hasResultType = true; break; + case SpvOpAtomicStore: *hasResult = false; *hasResultType = false; break; + case SpvOpAtomicExchange: *hasResult = true; *hasResultType = true; break; + case SpvOpAtomicCompareExchange: *hasResult = true; *hasResultType = true; break; + case SpvOpAtomicCompareExchangeWeak: *hasResult = true; *hasResultType = true; break; + case SpvOpAtomicIIncrement: *hasResult = true; *hasResultType = true; break; + case SpvOpAtomicIDecrement: *hasResult = true; *hasResultType = true; break; + case SpvOpAtomicIAdd: *hasResult = true; *hasResultType = true; break; + case SpvOpAtomicISub: *hasResult = true; *hasResultType = true; break; + case SpvOpAtomicSMin: *hasResult = true; *hasResultType = true; break; + case SpvOpAtomicUMin: *hasResult = true; *hasResultType = true; break; + case SpvOpAtomicSMax: *hasResult = true; *hasResultType = true; break; + case SpvOpAtomicUMax: *hasResult = true; *hasResultType = true; break; + case SpvOpAtomicAnd: *hasResult = true; *hasResultType = true; break; + case SpvOpAtomicOr: *hasResult = true; *hasResultType = true; break; + case SpvOpAtomicXor: *hasResult = true; *hasResultType = true; break; + case SpvOpPhi: *hasResult = true; *hasResultType = true; break; + case SpvOpLoopMerge: *hasResult = false; *hasResultType = false; break; + case SpvOpSelectionMerge: *hasResult = false; *hasResultType = false; break; + case SpvOpLabel: *hasResult = true; *hasResultType = false; break; + case SpvOpBranch: *hasResult = false; *hasResultType = false; break; + case SpvOpBranchConditional: *hasResult = false; *hasResultType = false; break; + case SpvOpSwitch: *hasResult = false; *hasResultType = false; break; + case SpvOpKill: *hasResult = false; *hasResultType = false; break; + case SpvOpReturn: *hasResult = false; *hasResultType = false; break; + case SpvOpReturnValue: *hasResult = false; *hasResultType = false; break; + case SpvOpUnreachable: *hasResult = false; *hasResultType = false; break; + case SpvOpLifetimeStart: *hasResult = false; *hasResultType = false; break; + case SpvOpLifetimeStop: *hasResult = false; *hasResultType = false; break; + case SpvOpGroupAsyncCopy: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupWaitEvents: *hasResult = false; *hasResultType = false; break; + case SpvOpGroupAll: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupAny: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupBroadcast: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupIAdd: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupFAdd: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupFMin: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupUMin: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupSMin: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupFMax: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupUMax: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupSMax: *hasResult = true; *hasResultType = true; break; + case SpvOpReadPipe: *hasResult = true; *hasResultType = true; break; + case SpvOpWritePipe: *hasResult = true; *hasResultType = true; break; + case SpvOpReservedReadPipe: *hasResult = true; *hasResultType = true; break; + case SpvOpReservedWritePipe: *hasResult = true; *hasResultType = true; break; + case SpvOpReserveReadPipePackets: *hasResult = true; *hasResultType = true; break; + case SpvOpReserveWritePipePackets: *hasResult = true; *hasResultType = true; break; + case SpvOpCommitReadPipe: *hasResult = false; *hasResultType = false; break; + case SpvOpCommitWritePipe: *hasResult = false; *hasResultType = false; break; + case SpvOpIsValidReserveId: *hasResult = true; *hasResultType = true; break; + case SpvOpGetNumPipePackets: *hasResult = true; *hasResultType = true; break; + case SpvOpGetMaxPipePackets: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupReserveReadPipePackets: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupReserveWritePipePackets: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupCommitReadPipe: *hasResult = false; *hasResultType = false; break; + case SpvOpGroupCommitWritePipe: *hasResult = false; *hasResultType = false; break; + case SpvOpEnqueueMarker: *hasResult = true; *hasResultType = true; break; + case SpvOpEnqueueKernel: *hasResult = true; *hasResultType = true; break; + case SpvOpGetKernelNDrangeSubGroupCount: *hasResult = true; *hasResultType = true; break; + case SpvOpGetKernelNDrangeMaxSubGroupSize: *hasResult = true; *hasResultType = true; break; + case SpvOpGetKernelWorkGroupSize: *hasResult = true; *hasResultType = true; break; + case SpvOpGetKernelPreferredWorkGroupSizeMultiple: *hasResult = true; *hasResultType = true; break; + case SpvOpRetainEvent: *hasResult = false; *hasResultType = false; break; + case SpvOpReleaseEvent: *hasResult = false; *hasResultType = false; break; + case SpvOpCreateUserEvent: *hasResult = true; *hasResultType = true; break; + case SpvOpIsValidEvent: *hasResult = true; *hasResultType = true; break; + case SpvOpSetUserEventStatus: *hasResult = false; *hasResultType = false; break; + case SpvOpCaptureEventProfilingInfo: *hasResult = false; *hasResultType = false; break; + case SpvOpGetDefaultQueue: *hasResult = true; *hasResultType = true; break; + case SpvOpBuildNDRange: *hasResult = true; *hasResultType = true; break; + case SpvOpImageSparseSampleImplicitLod: *hasResult = true; *hasResultType = true; break; + case SpvOpImageSparseSampleExplicitLod: *hasResult = true; *hasResultType = true; break; + case SpvOpImageSparseSampleDrefImplicitLod: *hasResult = true; *hasResultType = true; break; + case SpvOpImageSparseSampleDrefExplicitLod: *hasResult = true; *hasResultType = true; break; + case SpvOpImageSparseSampleProjImplicitLod: *hasResult = true; *hasResultType = true; break; + case SpvOpImageSparseSampleProjExplicitLod: *hasResult = true; *hasResultType = true; break; + case SpvOpImageSparseSampleProjDrefImplicitLod: *hasResult = true; *hasResultType = true; break; + case SpvOpImageSparseSampleProjDrefExplicitLod: *hasResult = true; *hasResultType = true; break; + case SpvOpImageSparseFetch: *hasResult = true; *hasResultType = true; break; + case SpvOpImageSparseGather: *hasResult = true; *hasResultType = true; break; + case SpvOpImageSparseDrefGather: *hasResult = true; *hasResultType = true; break; + case SpvOpImageSparseTexelsResident: *hasResult = true; *hasResultType = true; break; + case SpvOpNoLine: *hasResult = false; *hasResultType = false; break; + case SpvOpAtomicFlagTestAndSet: *hasResult = true; *hasResultType = true; break; + case SpvOpAtomicFlagClear: *hasResult = false; *hasResultType = false; break; + case SpvOpImageSparseRead: *hasResult = true; *hasResultType = true; break; + case SpvOpSizeOf: *hasResult = true; *hasResultType = true; break; + case SpvOpTypePipeStorage: *hasResult = true; *hasResultType = false; break; + case SpvOpConstantPipeStorage: *hasResult = true; *hasResultType = true; break; + case SpvOpCreatePipeFromPipeStorage: *hasResult = true; *hasResultType = true; break; + case SpvOpGetKernelLocalSizeForSubgroupCount: *hasResult = true; *hasResultType = true; break; + case SpvOpGetKernelMaxNumSubgroups: *hasResult = true; *hasResultType = true; break; + case SpvOpTypeNamedBarrier: *hasResult = true; *hasResultType = false; break; + case SpvOpNamedBarrierInitialize: *hasResult = true; *hasResultType = true; break; + case SpvOpMemoryNamedBarrier: *hasResult = false; *hasResultType = false; break; + case SpvOpModuleProcessed: *hasResult = false; *hasResultType = false; break; + case SpvOpExecutionModeId: *hasResult = false; *hasResultType = false; break; + case SpvOpDecorateId: *hasResult = false; *hasResultType = false; break; + case SpvOpGroupNonUniformElect: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupNonUniformAll: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupNonUniformAny: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupNonUniformAllEqual: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupNonUniformBroadcast: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupNonUniformBroadcastFirst: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupNonUniformBallot: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupNonUniformInverseBallot: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupNonUniformBallotBitExtract: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupNonUniformBallotBitCount: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupNonUniformBallotFindLSB: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupNonUniformBallotFindMSB: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupNonUniformShuffle: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupNonUniformShuffleXor: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupNonUniformShuffleUp: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupNonUniformShuffleDown: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupNonUniformIAdd: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupNonUniformFAdd: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupNonUniformIMul: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupNonUniformFMul: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupNonUniformSMin: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupNonUniformUMin: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupNonUniformFMin: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupNonUniformSMax: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupNonUniformUMax: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupNonUniformFMax: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupNonUniformBitwiseAnd: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupNonUniformBitwiseOr: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupNonUniformBitwiseXor: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupNonUniformLogicalAnd: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupNonUniformLogicalOr: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupNonUniformLogicalXor: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupNonUniformQuadBroadcast: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupNonUniformQuadSwap: *hasResult = true; *hasResultType = true; break; + case SpvOpCopyLogical: *hasResult = true; *hasResultType = true; break; + case SpvOpPtrEqual: *hasResult = true; *hasResultType = true; break; + case SpvOpPtrNotEqual: *hasResult = true; *hasResultType = true; break; + case SpvOpPtrDiff: *hasResult = true; *hasResultType = true; break; + case SpvOpTerminateInvocation: *hasResult = false; *hasResultType = false; break; + case SpvOpSubgroupBallotKHR: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupFirstInvocationKHR: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAllKHR: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAnyKHR: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAllEqualKHR: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupNonUniformRotateKHR: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupReadInvocationKHR: *hasResult = true; *hasResultType = true; break; + case SpvOpTraceRayKHR: *hasResult = false; *hasResultType = false; break; + case SpvOpExecuteCallableKHR: *hasResult = false; *hasResultType = false; break; + case SpvOpConvertUToAccelerationStructureKHR: *hasResult = true; *hasResultType = true; break; + case SpvOpIgnoreIntersectionKHR: *hasResult = false; *hasResultType = false; break; + case SpvOpTerminateRayKHR: *hasResult = false; *hasResultType = false; break; + case SpvOpSDot: *hasResult = true; *hasResultType = true; break; + case SpvOpUDot: *hasResult = true; *hasResultType = true; break; + case SpvOpSUDot: *hasResult = true; *hasResultType = true; break; + case SpvOpSDotAccSat: *hasResult = true; *hasResultType = true; break; + case SpvOpUDotAccSat: *hasResult = true; *hasResultType = true; break; + case SpvOpSUDotAccSat: *hasResult = true; *hasResultType = true; break; + case SpvOpTypeRayQueryKHR: *hasResult = true; *hasResultType = false; break; + case SpvOpRayQueryInitializeKHR: *hasResult = false; *hasResultType = false; break; + case SpvOpRayQueryTerminateKHR: *hasResult = false; *hasResultType = false; break; + case SpvOpRayQueryGenerateIntersectionKHR: *hasResult = false; *hasResultType = false; break; + case SpvOpRayQueryConfirmIntersectionKHR: *hasResult = false; *hasResultType = false; break; + case SpvOpRayQueryProceedKHR: *hasResult = true; *hasResultType = true; break; + case SpvOpRayQueryGetIntersectionTypeKHR: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupIAddNonUniformAMD: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupFAddNonUniformAMD: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupFMinNonUniformAMD: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupUMinNonUniformAMD: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupSMinNonUniformAMD: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupFMaxNonUniformAMD: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupUMaxNonUniformAMD: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupSMaxNonUniformAMD: *hasResult = true; *hasResultType = true; break; + case SpvOpFragmentMaskFetchAMD: *hasResult = true; *hasResultType = true; break; + case SpvOpFragmentFetchAMD: *hasResult = true; *hasResultType = true; break; + case SpvOpReadClockKHR: *hasResult = true; *hasResultType = true; break; + case SpvOpImageSampleFootprintNV: *hasResult = true; *hasResultType = true; break; + case SpvOpEmitMeshTasksEXT: *hasResult = false; *hasResultType = false; break; + case SpvOpSetMeshOutputsEXT: *hasResult = false; *hasResultType = false; break; + case SpvOpGroupNonUniformPartitionNV: *hasResult = true; *hasResultType = true; break; + case SpvOpWritePackedPrimitiveIndices4x8NV: *hasResult = false; *hasResultType = false; break; + case SpvOpReportIntersectionNV: *hasResult = true; *hasResultType = true; break; + case SpvOpIgnoreIntersectionNV: *hasResult = false; *hasResultType = false; break; + case SpvOpTerminateRayNV: *hasResult = false; *hasResultType = false; break; + case SpvOpTraceNV: *hasResult = false; *hasResultType = false; break; + case SpvOpTraceMotionNV: *hasResult = false; *hasResultType = false; break; + case SpvOpTraceRayMotionNV: *hasResult = false; *hasResultType = false; break; + case SpvOpTypeAccelerationStructureNV: *hasResult = true; *hasResultType = false; break; + case SpvOpExecuteCallableNV: *hasResult = false; *hasResultType = false; break; + case SpvOpTypeCooperativeMatrixNV: *hasResult = true; *hasResultType = false; break; + case SpvOpCooperativeMatrixLoadNV: *hasResult = true; *hasResultType = true; break; + case SpvOpCooperativeMatrixStoreNV: *hasResult = false; *hasResultType = false; break; + case SpvOpCooperativeMatrixMulAddNV: *hasResult = true; *hasResultType = true; break; + case SpvOpCooperativeMatrixLengthNV: *hasResult = true; *hasResultType = true; break; + case SpvOpBeginInvocationInterlockEXT: *hasResult = false; *hasResultType = false; break; + case SpvOpEndInvocationInterlockEXT: *hasResult = false; *hasResultType = false; break; + case SpvOpDemoteToHelperInvocation: *hasResult = false; *hasResultType = false; break; + case SpvOpIsHelperInvocationEXT: *hasResult = true; *hasResultType = true; break; + case SpvOpConvertUToImageNV: *hasResult = true; *hasResultType = true; break; + case SpvOpConvertUToSamplerNV: *hasResult = true; *hasResultType = true; break; + case SpvOpConvertImageToUNV: *hasResult = true; *hasResultType = true; break; + case SpvOpConvertSamplerToUNV: *hasResult = true; *hasResultType = true; break; + case SpvOpConvertUToSampledImageNV: *hasResult = true; *hasResultType = true; break; + case SpvOpConvertSampledImageToUNV: *hasResult = true; *hasResultType = true; break; + case SpvOpSamplerImageAddressingModeNV: *hasResult = false; *hasResultType = false; break; + case SpvOpSubgroupShuffleINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupShuffleDownINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupShuffleUpINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupShuffleXorINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupBlockReadINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupBlockWriteINTEL: *hasResult = false; *hasResultType = false; break; + case SpvOpSubgroupImageBlockReadINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupImageBlockWriteINTEL: *hasResult = false; *hasResultType = false; break; + case SpvOpSubgroupImageMediaBlockReadINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupImageMediaBlockWriteINTEL: *hasResult = false; *hasResultType = false; break; + case SpvOpUCountLeadingZerosINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpUCountTrailingZerosINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpAbsISubINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpAbsUSubINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpIAddSatINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpUAddSatINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpIAverageINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpUAverageINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpIAverageRoundedINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpUAverageRoundedINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpISubSatINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpUSubSatINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpIMul32x16INTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpUMul32x16INTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpConstantFunctionPointerINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpFunctionPointerCallINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpAsmTargetINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpAsmINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpAsmCallINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpAtomicFMinEXT: *hasResult = true; *hasResultType = true; break; + case SpvOpAtomicFMaxEXT: *hasResult = true; *hasResultType = true; break; + case SpvOpAssumeTrueKHR: *hasResult = false; *hasResultType = false; break; + case SpvOpExpectKHR: *hasResult = true; *hasResultType = true; break; + case SpvOpDecorateString: *hasResult = false; *hasResultType = false; break; + case SpvOpMemberDecorateString: *hasResult = false; *hasResultType = false; break; + case SpvOpVmeImageINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpTypeVmeImageINTEL: *hasResult = true; *hasResultType = false; break; + case SpvOpTypeAvcImePayloadINTEL: *hasResult = true; *hasResultType = false; break; + case SpvOpTypeAvcRefPayloadINTEL: *hasResult = true; *hasResultType = false; break; + case SpvOpTypeAvcSicPayloadINTEL: *hasResult = true; *hasResultType = false; break; + case SpvOpTypeAvcMcePayloadINTEL: *hasResult = true; *hasResultType = false; break; + case SpvOpTypeAvcMceResultINTEL: *hasResult = true; *hasResultType = false; break; + case SpvOpTypeAvcImeResultINTEL: *hasResult = true; *hasResultType = false; break; + case SpvOpTypeAvcImeResultSingleReferenceStreamoutINTEL: *hasResult = true; *hasResultType = false; break; + case SpvOpTypeAvcImeResultDualReferenceStreamoutINTEL: *hasResult = true; *hasResultType = false; break; + case SpvOpTypeAvcImeSingleReferenceStreaminINTEL: *hasResult = true; *hasResultType = false; break; + case SpvOpTypeAvcImeDualReferenceStreaminINTEL: *hasResult = true; *hasResultType = false; break; + case SpvOpTypeAvcRefResultINTEL: *hasResult = true; *hasResultType = false; break; + case SpvOpTypeAvcSicResultINTEL: *hasResult = true; *hasResultType = false; break; + case SpvOpSubgroupAvcMceGetDefaultInterBaseMultiReferencePenaltyINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcMceSetInterBaseMultiReferencePenaltyINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcMceGetDefaultInterShapePenaltyINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcMceSetInterShapePenaltyINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcMceGetDefaultInterDirectionPenaltyINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcMceSetInterDirectionPenaltyINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcMceGetDefaultIntraLumaShapePenaltyINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcMceGetDefaultInterMotionVectorCostTableINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcMceGetDefaultHighPenaltyCostTableINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcMceGetDefaultMediumPenaltyCostTableINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcMceGetDefaultLowPenaltyCostTableINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcMceSetMotionVectorCostFunctionINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcMceGetDefaultIntraLumaModePenaltyINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcMceGetDefaultNonDcLumaIntraPenaltyINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcMceGetDefaultIntraChromaModeBasePenaltyINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcMceSetAcOnlyHaarINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcMceSetSourceInterlacedFieldPolarityINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcMceSetSingleReferenceInterlacedFieldPolarityINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcMceSetDualReferenceInterlacedFieldPolaritiesINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcMceConvertToImePayloadINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcMceConvertToImeResultINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcMceConvertToRefPayloadINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcMceConvertToRefResultINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcMceConvertToSicPayloadINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcMceConvertToSicResultINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcMceGetMotionVectorsINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcMceGetInterDistortionsINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcMceGetBestInterDistortionsINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcMceGetInterMajorShapeINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcMceGetInterMinorShapeINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcMceGetInterDirectionsINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcMceGetInterMotionVectorCountINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcMceGetInterReferenceIdsINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcMceGetInterReferenceInterlacedFieldPolaritiesINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcImeInitializeINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcImeSetSingleReferenceINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcImeSetDualReferenceINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcImeRefWindowSizeINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcImeAdjustRefOffsetINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcImeConvertToMcePayloadINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcImeSetMaxMotionVectorCountINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcImeSetUnidirectionalMixDisableINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcImeSetEarlySearchTerminationThresholdINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcImeSetWeightedSadINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcImeEvaluateWithSingleReferenceINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcImeEvaluateWithDualReferenceINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcImeEvaluateWithSingleReferenceStreaminINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcImeEvaluateWithDualReferenceStreaminINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcImeEvaluateWithSingleReferenceStreamoutINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcImeEvaluateWithDualReferenceStreamoutINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcImeEvaluateWithSingleReferenceStreaminoutINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcImeEvaluateWithDualReferenceStreaminoutINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcImeConvertToMceResultINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcImeGetSingleReferenceStreaminINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcImeGetDualReferenceStreaminINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcImeStripSingleReferenceStreamoutINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcImeStripDualReferenceStreamoutINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeMotionVectorsINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeDistortionsINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeReferenceIdsINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeMotionVectorsINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeDistortionsINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeReferenceIdsINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcImeGetBorderReachedINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcImeGetTruncatedSearchIndicationINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcImeGetUnidirectionalEarlySearchTerminationINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcImeGetWeightingPatternMinimumMotionVectorINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcImeGetWeightingPatternMinimumDistortionINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcFmeInitializeINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcBmeInitializeINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcRefConvertToMcePayloadINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcRefSetBidirectionalMixDisableINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcRefSetBilinearFilterEnableINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcRefEvaluateWithSingleReferenceINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcRefEvaluateWithDualReferenceINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcRefEvaluateWithMultiReferenceINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcRefEvaluateWithMultiReferenceInterlacedINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcRefConvertToMceResultINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcSicInitializeINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcSicConfigureSkcINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcSicConfigureIpeLumaINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcSicConfigureIpeLumaChromaINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcSicGetMotionVectorMaskINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcSicConvertToMcePayloadINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcSicSetIntraLumaShapePenaltyINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcSicSetIntraLumaModeCostFunctionINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcSicSetIntraChromaModeCostFunctionINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcSicSetBilinearFilterEnableINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcSicSetSkcForwardTransformEnableINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcSicSetBlockBasedRawSkipSadINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcSicEvaluateIpeINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcSicEvaluateWithSingleReferenceINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcSicEvaluateWithDualReferenceINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcSicEvaluateWithMultiReferenceINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcSicEvaluateWithMultiReferenceInterlacedINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcSicConvertToMceResultINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcSicGetIpeLumaShapeINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcSicGetBestIpeLumaDistortionINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcSicGetBestIpeChromaDistortionINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcSicGetPackedIpeLumaModesINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcSicGetIpeChromaModeINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcSicGetPackedSkcLumaCountThresholdINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcSicGetPackedSkcLumaSumThresholdINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSubgroupAvcSicGetInterRawSadsINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpVariableLengthArrayINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpSaveMemoryINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpRestoreMemoryINTEL: *hasResult = false; *hasResultType = false; break; + case SpvOpArbitraryFloatSinCosPiINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpArbitraryFloatCastINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpArbitraryFloatCastFromIntINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpArbitraryFloatCastToIntINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpArbitraryFloatAddINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpArbitraryFloatSubINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpArbitraryFloatMulINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpArbitraryFloatDivINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpArbitraryFloatGTINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpArbitraryFloatGEINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpArbitraryFloatLTINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpArbitraryFloatLEINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpArbitraryFloatEQINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpArbitraryFloatRecipINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpArbitraryFloatRSqrtINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpArbitraryFloatCbrtINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpArbitraryFloatHypotINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpArbitraryFloatSqrtINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpArbitraryFloatLogINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpArbitraryFloatLog2INTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpArbitraryFloatLog10INTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpArbitraryFloatLog1pINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpArbitraryFloatExpINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpArbitraryFloatExp2INTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpArbitraryFloatExp10INTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpArbitraryFloatExpm1INTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpArbitraryFloatSinINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpArbitraryFloatCosINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpArbitraryFloatSinCosINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpArbitraryFloatSinPiINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpArbitraryFloatCosPiINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpArbitraryFloatASinINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpArbitraryFloatASinPiINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpArbitraryFloatACosINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpArbitraryFloatACosPiINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpArbitraryFloatATanINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpArbitraryFloatATanPiINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpArbitraryFloatATan2INTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpArbitraryFloatPowINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpArbitraryFloatPowRINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpArbitraryFloatPowNINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpLoopControlINTEL: *hasResult = false; *hasResultType = false; break; + case SpvOpAliasDomainDeclINTEL: *hasResult = true; *hasResultType = false; break; + case SpvOpAliasScopeDeclINTEL: *hasResult = true; *hasResultType = false; break; + case SpvOpAliasScopeListDeclINTEL: *hasResult = true; *hasResultType = false; break; + case SpvOpFixedSqrtINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpFixedRecipINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpFixedRsqrtINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpFixedSinINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpFixedCosINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpFixedSinCosINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpFixedSinPiINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpFixedCosPiINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpFixedSinCosPiINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpFixedLogINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpFixedExpINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpPtrCastToCrossWorkgroupINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpCrossWorkgroupCastToPtrINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpReadPipeBlockingINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpWritePipeBlockingINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpFPGARegINTEL: *hasResult = true; *hasResultType = true; break; + case SpvOpRayQueryGetRayTMinKHR: *hasResult = true; *hasResultType = true; break; + case SpvOpRayQueryGetRayFlagsKHR: *hasResult = true; *hasResultType = true; break; + case SpvOpRayQueryGetIntersectionTKHR: *hasResult = true; *hasResultType = true; break; + case SpvOpRayQueryGetIntersectionInstanceCustomIndexKHR: *hasResult = true; *hasResultType = true; break; + case SpvOpRayQueryGetIntersectionInstanceIdKHR: *hasResult = true; *hasResultType = true; break; + case SpvOpRayQueryGetIntersectionInstanceShaderBindingTableRecordOffsetKHR: *hasResult = true; *hasResultType = true; break; + case SpvOpRayQueryGetIntersectionGeometryIndexKHR: *hasResult = true; *hasResultType = true; break; + case SpvOpRayQueryGetIntersectionPrimitiveIndexKHR: *hasResult = true; *hasResultType = true; break; + case SpvOpRayQueryGetIntersectionBarycentricsKHR: *hasResult = true; *hasResultType = true; break; + case SpvOpRayQueryGetIntersectionFrontFaceKHR: *hasResult = true; *hasResultType = true; break; + case SpvOpRayQueryGetIntersectionCandidateAABBOpaqueKHR: *hasResult = true; *hasResultType = true; break; + case SpvOpRayQueryGetIntersectionObjectRayDirectionKHR: *hasResult = true; *hasResultType = true; break; + case SpvOpRayQueryGetIntersectionObjectRayOriginKHR: *hasResult = true; *hasResultType = true; break; + case SpvOpRayQueryGetWorldRayDirectionKHR: *hasResult = true; *hasResultType = true; break; + case SpvOpRayQueryGetWorldRayOriginKHR: *hasResult = true; *hasResultType = true; break; + case SpvOpRayQueryGetIntersectionObjectToWorldKHR: *hasResult = true; *hasResultType = true; break; + case SpvOpRayQueryGetIntersectionWorldToObjectKHR: *hasResult = true; *hasResultType = true; break; + case SpvOpAtomicFAddEXT: *hasResult = true; *hasResultType = true; break; + case SpvOpTypeBufferSurfaceINTEL: *hasResult = true; *hasResultType = false; break; + case SpvOpTypeStructContinuedINTEL: *hasResult = false; *hasResultType = false; break; + case SpvOpConstantCompositeContinuedINTEL: *hasResult = false; *hasResultType = false; break; + case SpvOpSpecConstantCompositeContinuedINTEL: *hasResult = false; *hasResultType = false; break; + case SpvOpControlBarrierArriveINTEL: *hasResult = false; *hasResultType = false; break; + case SpvOpControlBarrierWaitINTEL: *hasResult = false; *hasResultType = false; break; + case SpvOpGroupIMulKHR: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupFMulKHR: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupBitwiseAndKHR: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupBitwiseOrKHR: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupBitwiseXorKHR: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupLogicalAndKHR: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupLogicalOrKHR: *hasResult = true; *hasResultType = true; break; + case SpvOpGroupLogicalXorKHR: *hasResult = true; *hasResultType = true; break; + } +} +#endif /* SPV_ENABLE_UTILITY_CODE */ + +#endif + diff --git a/local_packages/include/spirv_cross/spirv.hpp b/local_packages/include/spirv_cross/spirv.hpp new file mode 100644 index 0000000..e25264a --- /dev/null +++ b/local_packages/include/spirv_cross/spirv.hpp @@ -0,0 +1,2579 @@ +// Copyright (c) 2014-2020 The Khronos Group Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and/or associated documentation files (the "Materials"), +// to deal in the Materials without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Materials, and to permit persons to whom the +// Materials are furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Materials. +// +// MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS KHRONOS +// STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS SPECIFICATIONS AND +// HEADER INFORMATION ARE LOCATED AT https://www.khronos.org/registry/ +// +// THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM,OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS +// IN THE MATERIALS. + +// This header is automatically generated by the same tool that creates +// the Binary Section of the SPIR-V specification. + +// Enumeration tokens for SPIR-V, in various styles: +// C, C++, C++11, JSON, Lua, Python, C#, D, Beef +// +// - C will have tokens with a "Spv" prefix, e.g.: SpvSourceLanguageGLSL +// - C++ will have tokens in the "spv" name space, e.g.: spv::SourceLanguageGLSL +// - C++11 will use enum classes in the spv namespace, e.g.: spv::SourceLanguage::GLSL +// - Lua will use tables, e.g.: spv.SourceLanguage.GLSL +// - Python will use dictionaries, e.g.: spv['SourceLanguage']['GLSL'] +// - C# will use enum classes in the Specification class located in the "Spv" namespace, +// e.g.: Spv.Specification.SourceLanguage.GLSL +// - D will have tokens under the "spv" module, e.g: spv.SourceLanguage.GLSL +// - Beef will use enum classes in the Specification class located in the "Spv" namespace, +// e.g.: Spv.Specification.SourceLanguage.GLSL +// +// Some tokens act like mask values, which can be OR'd together, +// while others are mutually exclusive. The mask-like ones have +// "Mask" in their name, and a parallel enum that has the shift +// amount (1 << x) for each corresponding enumerant. + +#ifndef spirv_HPP +#define spirv_HPP + +namespace spv { + +typedef unsigned int Id; + +#define SPV_VERSION 0x10600 +#define SPV_REVISION 1 + +static const unsigned int MagicNumber = 0x07230203; +static const unsigned int Version = 0x00010600; +static const unsigned int Revision = 1; +static const unsigned int OpCodeMask = 0xffff; +static const unsigned int WordCountShift = 16; + +enum SourceLanguage { + SourceLanguageUnknown = 0, + SourceLanguageESSL = 1, + SourceLanguageGLSL = 2, + SourceLanguageOpenCL_C = 3, + SourceLanguageOpenCL_CPP = 4, + SourceLanguageHLSL = 5, + SourceLanguageCPP_for_OpenCL = 6, + SourceLanguageSYCL = 7, + SourceLanguageMax = 0x7fffffff, +}; + +enum ExecutionModel { + ExecutionModelVertex = 0, + ExecutionModelTessellationControl = 1, + ExecutionModelTessellationEvaluation = 2, + ExecutionModelGeometry = 3, + ExecutionModelFragment = 4, + ExecutionModelGLCompute = 5, + ExecutionModelKernel = 6, + ExecutionModelTaskNV = 5267, + ExecutionModelMeshNV = 5268, + ExecutionModelRayGenerationKHR = 5313, + ExecutionModelRayGenerationNV = 5313, + ExecutionModelIntersectionKHR = 5314, + ExecutionModelIntersectionNV = 5314, + ExecutionModelAnyHitKHR = 5315, + ExecutionModelAnyHitNV = 5315, + ExecutionModelClosestHitKHR = 5316, + ExecutionModelClosestHitNV = 5316, + ExecutionModelMissKHR = 5317, + ExecutionModelMissNV = 5317, + ExecutionModelCallableKHR = 5318, + ExecutionModelCallableNV = 5318, + ExecutionModelTaskEXT = 5364, + ExecutionModelMeshEXT = 5365, + ExecutionModelMax = 0x7fffffff, +}; + +enum AddressingModel { + AddressingModelLogical = 0, + AddressingModelPhysical32 = 1, + AddressingModelPhysical64 = 2, + AddressingModelPhysicalStorageBuffer64 = 5348, + AddressingModelPhysicalStorageBuffer64EXT = 5348, + AddressingModelMax = 0x7fffffff, +}; + +enum MemoryModel { + MemoryModelSimple = 0, + MemoryModelGLSL450 = 1, + MemoryModelOpenCL = 2, + MemoryModelVulkan = 3, + MemoryModelVulkanKHR = 3, + MemoryModelMax = 0x7fffffff, +}; + +enum ExecutionMode { + ExecutionModeInvocations = 0, + ExecutionModeSpacingEqual = 1, + ExecutionModeSpacingFractionalEven = 2, + ExecutionModeSpacingFractionalOdd = 3, + ExecutionModeVertexOrderCw = 4, + ExecutionModeVertexOrderCcw = 5, + ExecutionModePixelCenterInteger = 6, + ExecutionModeOriginUpperLeft = 7, + ExecutionModeOriginLowerLeft = 8, + ExecutionModeEarlyFragmentTests = 9, + ExecutionModePointMode = 10, + ExecutionModeXfb = 11, + ExecutionModeDepthReplacing = 12, + ExecutionModeDepthGreater = 14, + ExecutionModeDepthLess = 15, + ExecutionModeDepthUnchanged = 16, + ExecutionModeLocalSize = 17, + ExecutionModeLocalSizeHint = 18, + ExecutionModeInputPoints = 19, + ExecutionModeInputLines = 20, + ExecutionModeInputLinesAdjacency = 21, + ExecutionModeTriangles = 22, + ExecutionModeInputTrianglesAdjacency = 23, + ExecutionModeQuads = 24, + ExecutionModeIsolines = 25, + ExecutionModeOutputVertices = 26, + ExecutionModeOutputPoints = 27, + ExecutionModeOutputLineStrip = 28, + ExecutionModeOutputTriangleStrip = 29, + ExecutionModeVecTypeHint = 30, + ExecutionModeContractionOff = 31, + ExecutionModeInitializer = 33, + ExecutionModeFinalizer = 34, + ExecutionModeSubgroupSize = 35, + ExecutionModeSubgroupsPerWorkgroup = 36, + ExecutionModeSubgroupsPerWorkgroupId = 37, + ExecutionModeLocalSizeId = 38, + ExecutionModeLocalSizeHintId = 39, + ExecutionModeSubgroupUniformControlFlowKHR = 4421, + ExecutionModePostDepthCoverage = 4446, + ExecutionModeDenormPreserve = 4459, + ExecutionModeDenormFlushToZero = 4460, + ExecutionModeSignedZeroInfNanPreserve = 4461, + ExecutionModeRoundingModeRTE = 4462, + ExecutionModeRoundingModeRTZ = 4463, + ExecutionModeEarlyAndLateFragmentTestsAMD = 5017, + ExecutionModeStencilRefReplacingEXT = 5027, + ExecutionModeStencilRefUnchangedFrontAMD = 5079, + ExecutionModeStencilRefGreaterFrontAMD = 5080, + ExecutionModeStencilRefLessFrontAMD = 5081, + ExecutionModeStencilRefUnchangedBackAMD = 5082, + ExecutionModeStencilRefGreaterBackAMD = 5083, + ExecutionModeStencilRefLessBackAMD = 5084, + ExecutionModeOutputLinesEXT = 5269, + ExecutionModeOutputLinesNV = 5269, + ExecutionModeOutputPrimitivesEXT = 5270, + ExecutionModeOutputPrimitivesNV = 5270, + ExecutionModeDerivativeGroupQuadsNV = 5289, + ExecutionModeDerivativeGroupLinearNV = 5290, + ExecutionModeOutputTrianglesEXT = 5298, + ExecutionModeOutputTrianglesNV = 5298, + ExecutionModePixelInterlockOrderedEXT = 5366, + ExecutionModePixelInterlockUnorderedEXT = 5367, + ExecutionModeSampleInterlockOrderedEXT = 5368, + ExecutionModeSampleInterlockUnorderedEXT = 5369, + ExecutionModeShadingRateInterlockOrderedEXT = 5370, + ExecutionModeShadingRateInterlockUnorderedEXT = 5371, + ExecutionModeSharedLocalMemorySizeINTEL = 5618, + ExecutionModeRoundingModeRTPINTEL = 5620, + ExecutionModeRoundingModeRTNINTEL = 5621, + ExecutionModeFloatingPointModeALTINTEL = 5622, + ExecutionModeFloatingPointModeIEEEINTEL = 5623, + ExecutionModeMaxWorkgroupSizeINTEL = 5893, + ExecutionModeMaxWorkDimINTEL = 5894, + ExecutionModeNoGlobalOffsetINTEL = 5895, + ExecutionModeNumSIMDWorkitemsINTEL = 5896, + ExecutionModeSchedulerTargetFmaxMhzINTEL = 5903, + ExecutionModeNamedBarrierCountINTEL = 6417, + ExecutionModeMax = 0x7fffffff, +}; + +enum StorageClass { + StorageClassUniformConstant = 0, + StorageClassInput = 1, + StorageClassUniform = 2, + StorageClassOutput = 3, + StorageClassWorkgroup = 4, + StorageClassCrossWorkgroup = 5, + StorageClassPrivate = 6, + StorageClassFunction = 7, + StorageClassGeneric = 8, + StorageClassPushConstant = 9, + StorageClassAtomicCounter = 10, + StorageClassImage = 11, + StorageClassStorageBuffer = 12, + StorageClassCallableDataKHR = 5328, + StorageClassCallableDataNV = 5328, + StorageClassIncomingCallableDataKHR = 5329, + StorageClassIncomingCallableDataNV = 5329, + StorageClassRayPayloadKHR = 5338, + StorageClassRayPayloadNV = 5338, + StorageClassHitAttributeKHR = 5339, + StorageClassHitAttributeNV = 5339, + StorageClassIncomingRayPayloadKHR = 5342, + StorageClassIncomingRayPayloadNV = 5342, + StorageClassShaderRecordBufferKHR = 5343, + StorageClassShaderRecordBufferNV = 5343, + StorageClassPhysicalStorageBuffer = 5349, + StorageClassPhysicalStorageBufferEXT = 5349, + StorageClassTaskPayloadWorkgroupEXT = 5402, + StorageClassCodeSectionINTEL = 5605, + StorageClassDeviceOnlyINTEL = 5936, + StorageClassHostOnlyINTEL = 5937, + StorageClassMax = 0x7fffffff, +}; + +enum Dim { + Dim1D = 0, + Dim2D = 1, + Dim3D = 2, + DimCube = 3, + DimRect = 4, + DimBuffer = 5, + DimSubpassData = 6, + DimMax = 0x7fffffff, +}; + +enum SamplerAddressingMode { + SamplerAddressingModeNone = 0, + SamplerAddressingModeClampToEdge = 1, + SamplerAddressingModeClamp = 2, + SamplerAddressingModeRepeat = 3, + SamplerAddressingModeRepeatMirrored = 4, + SamplerAddressingModeMax = 0x7fffffff, +}; + +enum SamplerFilterMode { + SamplerFilterModeNearest = 0, + SamplerFilterModeLinear = 1, + SamplerFilterModeMax = 0x7fffffff, +}; + +enum ImageFormat { + ImageFormatUnknown = 0, + ImageFormatRgba32f = 1, + ImageFormatRgba16f = 2, + ImageFormatR32f = 3, + ImageFormatRgba8 = 4, + ImageFormatRgba8Snorm = 5, + ImageFormatRg32f = 6, + ImageFormatRg16f = 7, + ImageFormatR11fG11fB10f = 8, + ImageFormatR16f = 9, + ImageFormatRgba16 = 10, + ImageFormatRgb10A2 = 11, + ImageFormatRg16 = 12, + ImageFormatRg8 = 13, + ImageFormatR16 = 14, + ImageFormatR8 = 15, + ImageFormatRgba16Snorm = 16, + ImageFormatRg16Snorm = 17, + ImageFormatRg8Snorm = 18, + ImageFormatR16Snorm = 19, + ImageFormatR8Snorm = 20, + ImageFormatRgba32i = 21, + ImageFormatRgba16i = 22, + ImageFormatRgba8i = 23, + ImageFormatR32i = 24, + ImageFormatRg32i = 25, + ImageFormatRg16i = 26, + ImageFormatRg8i = 27, + ImageFormatR16i = 28, + ImageFormatR8i = 29, + ImageFormatRgba32ui = 30, + ImageFormatRgba16ui = 31, + ImageFormatRgba8ui = 32, + ImageFormatR32ui = 33, + ImageFormatRgb10a2ui = 34, + ImageFormatRg32ui = 35, + ImageFormatRg16ui = 36, + ImageFormatRg8ui = 37, + ImageFormatR16ui = 38, + ImageFormatR8ui = 39, + ImageFormatR64ui = 40, + ImageFormatR64i = 41, + ImageFormatMax = 0x7fffffff, +}; + +enum ImageChannelOrder { + ImageChannelOrderR = 0, + ImageChannelOrderA = 1, + ImageChannelOrderRG = 2, + ImageChannelOrderRA = 3, + ImageChannelOrderRGB = 4, + ImageChannelOrderRGBA = 5, + ImageChannelOrderBGRA = 6, + ImageChannelOrderARGB = 7, + ImageChannelOrderIntensity = 8, + ImageChannelOrderLuminance = 9, + ImageChannelOrderRx = 10, + ImageChannelOrderRGx = 11, + ImageChannelOrderRGBx = 12, + ImageChannelOrderDepth = 13, + ImageChannelOrderDepthStencil = 14, + ImageChannelOrdersRGB = 15, + ImageChannelOrdersRGBx = 16, + ImageChannelOrdersRGBA = 17, + ImageChannelOrdersBGRA = 18, + ImageChannelOrderABGR = 19, + ImageChannelOrderMax = 0x7fffffff, +}; + +enum ImageChannelDataType { + ImageChannelDataTypeSnormInt8 = 0, + ImageChannelDataTypeSnormInt16 = 1, + ImageChannelDataTypeUnormInt8 = 2, + ImageChannelDataTypeUnormInt16 = 3, + ImageChannelDataTypeUnormShort565 = 4, + ImageChannelDataTypeUnormShort555 = 5, + ImageChannelDataTypeUnormInt101010 = 6, + ImageChannelDataTypeSignedInt8 = 7, + ImageChannelDataTypeSignedInt16 = 8, + ImageChannelDataTypeSignedInt32 = 9, + ImageChannelDataTypeUnsignedInt8 = 10, + ImageChannelDataTypeUnsignedInt16 = 11, + ImageChannelDataTypeUnsignedInt32 = 12, + ImageChannelDataTypeHalfFloat = 13, + ImageChannelDataTypeFloat = 14, + ImageChannelDataTypeUnormInt24 = 15, + ImageChannelDataTypeUnormInt101010_2 = 16, + ImageChannelDataTypeMax = 0x7fffffff, +}; + +enum ImageOperandsShift { + ImageOperandsBiasShift = 0, + ImageOperandsLodShift = 1, + ImageOperandsGradShift = 2, + ImageOperandsConstOffsetShift = 3, + ImageOperandsOffsetShift = 4, + ImageOperandsConstOffsetsShift = 5, + ImageOperandsSampleShift = 6, + ImageOperandsMinLodShift = 7, + ImageOperandsMakeTexelAvailableShift = 8, + ImageOperandsMakeTexelAvailableKHRShift = 8, + ImageOperandsMakeTexelVisibleShift = 9, + ImageOperandsMakeTexelVisibleKHRShift = 9, + ImageOperandsNonPrivateTexelShift = 10, + ImageOperandsNonPrivateTexelKHRShift = 10, + ImageOperandsVolatileTexelShift = 11, + ImageOperandsVolatileTexelKHRShift = 11, + ImageOperandsSignExtendShift = 12, + ImageOperandsZeroExtendShift = 13, + ImageOperandsNontemporalShift = 14, + ImageOperandsOffsetsShift = 16, + ImageOperandsMax = 0x7fffffff, +}; + +enum ImageOperandsMask { + ImageOperandsMaskNone = 0, + ImageOperandsBiasMask = 0x00000001, + ImageOperandsLodMask = 0x00000002, + ImageOperandsGradMask = 0x00000004, + ImageOperandsConstOffsetMask = 0x00000008, + ImageOperandsOffsetMask = 0x00000010, + ImageOperandsConstOffsetsMask = 0x00000020, + ImageOperandsSampleMask = 0x00000040, + ImageOperandsMinLodMask = 0x00000080, + ImageOperandsMakeTexelAvailableMask = 0x00000100, + ImageOperandsMakeTexelAvailableKHRMask = 0x00000100, + ImageOperandsMakeTexelVisibleMask = 0x00000200, + ImageOperandsMakeTexelVisibleKHRMask = 0x00000200, + ImageOperandsNonPrivateTexelMask = 0x00000400, + ImageOperandsNonPrivateTexelKHRMask = 0x00000400, + ImageOperandsVolatileTexelMask = 0x00000800, + ImageOperandsVolatileTexelKHRMask = 0x00000800, + ImageOperandsSignExtendMask = 0x00001000, + ImageOperandsZeroExtendMask = 0x00002000, + ImageOperandsNontemporalMask = 0x00004000, + ImageOperandsOffsetsMask = 0x00010000, +}; + +enum FPFastMathModeShift { + FPFastMathModeNotNaNShift = 0, + FPFastMathModeNotInfShift = 1, + FPFastMathModeNSZShift = 2, + FPFastMathModeAllowRecipShift = 3, + FPFastMathModeFastShift = 4, + FPFastMathModeAllowContractFastINTELShift = 16, + FPFastMathModeAllowReassocINTELShift = 17, + FPFastMathModeMax = 0x7fffffff, +}; + +enum FPFastMathModeMask { + FPFastMathModeMaskNone = 0, + FPFastMathModeNotNaNMask = 0x00000001, + FPFastMathModeNotInfMask = 0x00000002, + FPFastMathModeNSZMask = 0x00000004, + FPFastMathModeAllowRecipMask = 0x00000008, + FPFastMathModeFastMask = 0x00000010, + FPFastMathModeAllowContractFastINTELMask = 0x00010000, + FPFastMathModeAllowReassocINTELMask = 0x00020000, +}; + +enum FPRoundingMode { + FPRoundingModeRTE = 0, + FPRoundingModeRTZ = 1, + FPRoundingModeRTP = 2, + FPRoundingModeRTN = 3, + FPRoundingModeMax = 0x7fffffff, +}; + +enum LinkageType { + LinkageTypeExport = 0, + LinkageTypeImport = 1, + LinkageTypeLinkOnceODR = 2, + LinkageTypeMax = 0x7fffffff, +}; + +enum AccessQualifier { + AccessQualifierReadOnly = 0, + AccessQualifierWriteOnly = 1, + AccessQualifierReadWrite = 2, + AccessQualifierMax = 0x7fffffff, +}; + +enum FunctionParameterAttribute { + FunctionParameterAttributeZext = 0, + FunctionParameterAttributeSext = 1, + FunctionParameterAttributeByVal = 2, + FunctionParameterAttributeSret = 3, + FunctionParameterAttributeNoAlias = 4, + FunctionParameterAttributeNoCapture = 5, + FunctionParameterAttributeNoWrite = 6, + FunctionParameterAttributeNoReadWrite = 7, + FunctionParameterAttributeMax = 0x7fffffff, +}; + +enum Decoration { + DecorationRelaxedPrecision = 0, + DecorationSpecId = 1, + DecorationBlock = 2, + DecorationBufferBlock = 3, + DecorationRowMajor = 4, + DecorationColMajor = 5, + DecorationArrayStride = 6, + DecorationMatrixStride = 7, + DecorationGLSLShared = 8, + DecorationGLSLPacked = 9, + DecorationCPacked = 10, + DecorationBuiltIn = 11, + DecorationNoPerspective = 13, + DecorationFlat = 14, + DecorationPatch = 15, + DecorationCentroid = 16, + DecorationSample = 17, + DecorationInvariant = 18, + DecorationRestrict = 19, + DecorationAliased = 20, + DecorationVolatile = 21, + DecorationConstant = 22, + DecorationCoherent = 23, + DecorationNonWritable = 24, + DecorationNonReadable = 25, + DecorationUniform = 26, + DecorationUniformId = 27, + DecorationSaturatedConversion = 28, + DecorationStream = 29, + DecorationLocation = 30, + DecorationComponent = 31, + DecorationIndex = 32, + DecorationBinding = 33, + DecorationDescriptorSet = 34, + DecorationOffset = 35, + DecorationXfbBuffer = 36, + DecorationXfbStride = 37, + DecorationFuncParamAttr = 38, + DecorationFPRoundingMode = 39, + DecorationFPFastMathMode = 40, + DecorationLinkageAttributes = 41, + DecorationNoContraction = 42, + DecorationInputAttachmentIndex = 43, + DecorationAlignment = 44, + DecorationMaxByteOffset = 45, + DecorationAlignmentId = 46, + DecorationMaxByteOffsetId = 47, + DecorationNoSignedWrap = 4469, + DecorationNoUnsignedWrap = 4470, + DecorationExplicitInterpAMD = 4999, + DecorationOverrideCoverageNV = 5248, + DecorationPassthroughNV = 5250, + DecorationViewportRelativeNV = 5252, + DecorationSecondaryViewportRelativeNV = 5256, + DecorationPerPrimitiveEXT = 5271, + DecorationPerPrimitiveNV = 5271, + DecorationPerViewNV = 5272, + DecorationPerTaskNV = 5273, + DecorationPerVertexKHR = 5285, + DecorationPerVertexNV = 5285, + DecorationNonUniform = 5300, + DecorationNonUniformEXT = 5300, + DecorationRestrictPointer = 5355, + DecorationRestrictPointerEXT = 5355, + DecorationAliasedPointer = 5356, + DecorationAliasedPointerEXT = 5356, + DecorationBindlessSamplerNV = 5398, + DecorationBindlessImageNV = 5399, + DecorationBoundSamplerNV = 5400, + DecorationBoundImageNV = 5401, + DecorationSIMTCallINTEL = 5599, + DecorationReferencedIndirectlyINTEL = 5602, + DecorationClobberINTEL = 5607, + DecorationSideEffectsINTEL = 5608, + DecorationVectorComputeVariableINTEL = 5624, + DecorationFuncParamIOKindINTEL = 5625, + DecorationVectorComputeFunctionINTEL = 5626, + DecorationStackCallINTEL = 5627, + DecorationGlobalVariableOffsetINTEL = 5628, + DecorationCounterBuffer = 5634, + DecorationHlslCounterBufferGOOGLE = 5634, + DecorationHlslSemanticGOOGLE = 5635, + DecorationUserSemantic = 5635, + DecorationUserTypeGOOGLE = 5636, + DecorationFunctionRoundingModeINTEL = 5822, + DecorationFunctionDenormModeINTEL = 5823, + DecorationRegisterINTEL = 5825, + DecorationMemoryINTEL = 5826, + DecorationNumbanksINTEL = 5827, + DecorationBankwidthINTEL = 5828, + DecorationMaxPrivateCopiesINTEL = 5829, + DecorationSinglepumpINTEL = 5830, + DecorationDoublepumpINTEL = 5831, + DecorationMaxReplicatesINTEL = 5832, + DecorationSimpleDualPortINTEL = 5833, + DecorationMergeINTEL = 5834, + DecorationBankBitsINTEL = 5835, + DecorationForcePow2DepthINTEL = 5836, + DecorationBurstCoalesceINTEL = 5899, + DecorationCacheSizeINTEL = 5900, + DecorationDontStaticallyCoalesceINTEL = 5901, + DecorationPrefetchINTEL = 5902, + DecorationStallEnableINTEL = 5905, + DecorationFuseLoopsInFunctionINTEL = 5907, + DecorationAliasScopeINTEL = 5914, + DecorationNoAliasINTEL = 5915, + DecorationBufferLocationINTEL = 5921, + DecorationIOPipeStorageINTEL = 5944, + DecorationFunctionFloatingPointModeINTEL = 6080, + DecorationSingleElementVectorINTEL = 6085, + DecorationVectorComputeCallableFunctionINTEL = 6087, + DecorationMediaBlockIOINTEL = 6140, + DecorationMax = 0x7fffffff, +}; + +enum BuiltIn { + BuiltInPosition = 0, + BuiltInPointSize = 1, + BuiltInClipDistance = 3, + BuiltInCullDistance = 4, + BuiltInVertexId = 5, + BuiltInInstanceId = 6, + BuiltInPrimitiveId = 7, + BuiltInInvocationId = 8, + BuiltInLayer = 9, + BuiltInViewportIndex = 10, + BuiltInTessLevelOuter = 11, + BuiltInTessLevelInner = 12, + BuiltInTessCoord = 13, + BuiltInPatchVertices = 14, + BuiltInFragCoord = 15, + BuiltInPointCoord = 16, + BuiltInFrontFacing = 17, + BuiltInSampleId = 18, + BuiltInSamplePosition = 19, + BuiltInSampleMask = 20, + BuiltInFragDepth = 22, + BuiltInHelperInvocation = 23, + BuiltInNumWorkgroups = 24, + BuiltInWorkgroupSize = 25, + BuiltInWorkgroupId = 26, + BuiltInLocalInvocationId = 27, + BuiltInGlobalInvocationId = 28, + BuiltInLocalInvocationIndex = 29, + BuiltInWorkDim = 30, + BuiltInGlobalSize = 31, + BuiltInEnqueuedWorkgroupSize = 32, + BuiltInGlobalOffset = 33, + BuiltInGlobalLinearId = 34, + BuiltInSubgroupSize = 36, + BuiltInSubgroupMaxSize = 37, + BuiltInNumSubgroups = 38, + BuiltInNumEnqueuedSubgroups = 39, + BuiltInSubgroupId = 40, + BuiltInSubgroupLocalInvocationId = 41, + BuiltInVertexIndex = 42, + BuiltInInstanceIndex = 43, + BuiltInSubgroupEqMask = 4416, + BuiltInSubgroupEqMaskKHR = 4416, + BuiltInSubgroupGeMask = 4417, + BuiltInSubgroupGeMaskKHR = 4417, + BuiltInSubgroupGtMask = 4418, + BuiltInSubgroupGtMaskKHR = 4418, + BuiltInSubgroupLeMask = 4419, + BuiltInSubgroupLeMaskKHR = 4419, + BuiltInSubgroupLtMask = 4420, + BuiltInSubgroupLtMaskKHR = 4420, + BuiltInBaseVertex = 4424, + BuiltInBaseInstance = 4425, + BuiltInDrawIndex = 4426, + BuiltInPrimitiveShadingRateKHR = 4432, + BuiltInDeviceIndex = 4438, + BuiltInViewIndex = 4440, + BuiltInShadingRateKHR = 4444, + BuiltInBaryCoordNoPerspAMD = 4992, + BuiltInBaryCoordNoPerspCentroidAMD = 4993, + BuiltInBaryCoordNoPerspSampleAMD = 4994, + BuiltInBaryCoordSmoothAMD = 4995, + BuiltInBaryCoordSmoothCentroidAMD = 4996, + BuiltInBaryCoordSmoothSampleAMD = 4997, + BuiltInBaryCoordPullModelAMD = 4998, + BuiltInFragStencilRefEXT = 5014, + BuiltInViewportMaskNV = 5253, + BuiltInSecondaryPositionNV = 5257, + BuiltInSecondaryViewportMaskNV = 5258, + BuiltInPositionPerViewNV = 5261, + BuiltInViewportMaskPerViewNV = 5262, + BuiltInFullyCoveredEXT = 5264, + BuiltInTaskCountNV = 5274, + BuiltInPrimitiveCountNV = 5275, + BuiltInPrimitiveIndicesNV = 5276, + BuiltInClipDistancePerViewNV = 5277, + BuiltInCullDistancePerViewNV = 5278, + BuiltInLayerPerViewNV = 5279, + BuiltInMeshViewCountNV = 5280, + BuiltInMeshViewIndicesNV = 5281, + BuiltInBaryCoordKHR = 5286, + BuiltInBaryCoordNV = 5286, + BuiltInBaryCoordNoPerspKHR = 5287, + BuiltInBaryCoordNoPerspNV = 5287, + BuiltInFragSizeEXT = 5292, + BuiltInFragmentSizeNV = 5292, + BuiltInFragInvocationCountEXT = 5293, + BuiltInInvocationsPerPixelNV = 5293, + BuiltInPrimitivePointIndicesEXT = 5294, + BuiltInPrimitiveLineIndicesEXT = 5295, + BuiltInPrimitiveTriangleIndicesEXT = 5296, + BuiltInCullPrimitiveEXT = 5299, + BuiltInLaunchIdKHR = 5319, + BuiltInLaunchIdNV = 5319, + BuiltInLaunchSizeKHR = 5320, + BuiltInLaunchSizeNV = 5320, + BuiltInWorldRayOriginKHR = 5321, + BuiltInWorldRayOriginNV = 5321, + BuiltInWorldRayDirectionKHR = 5322, + BuiltInWorldRayDirectionNV = 5322, + BuiltInObjectRayOriginKHR = 5323, + BuiltInObjectRayOriginNV = 5323, + BuiltInObjectRayDirectionKHR = 5324, + BuiltInObjectRayDirectionNV = 5324, + BuiltInRayTminKHR = 5325, + BuiltInRayTminNV = 5325, + BuiltInRayTmaxKHR = 5326, + BuiltInRayTmaxNV = 5326, + BuiltInInstanceCustomIndexKHR = 5327, + BuiltInInstanceCustomIndexNV = 5327, + BuiltInObjectToWorldKHR = 5330, + BuiltInObjectToWorldNV = 5330, + BuiltInWorldToObjectKHR = 5331, + BuiltInWorldToObjectNV = 5331, + BuiltInHitTNV = 5332, + BuiltInHitKindKHR = 5333, + BuiltInHitKindNV = 5333, + BuiltInCurrentRayTimeNV = 5334, + BuiltInIncomingRayFlagsKHR = 5351, + BuiltInIncomingRayFlagsNV = 5351, + BuiltInRayGeometryIndexKHR = 5352, + BuiltInWarpsPerSMNV = 5374, + BuiltInSMCountNV = 5375, + BuiltInWarpIDNV = 5376, + BuiltInSMIDNV = 5377, + BuiltInCullMaskKHR = 6021, + BuiltInMax = 0x7fffffff, +}; + +enum SelectionControlShift { + SelectionControlFlattenShift = 0, + SelectionControlDontFlattenShift = 1, + SelectionControlMax = 0x7fffffff, +}; + +enum SelectionControlMask { + SelectionControlMaskNone = 0, + SelectionControlFlattenMask = 0x00000001, + SelectionControlDontFlattenMask = 0x00000002, +}; + +enum LoopControlShift { + LoopControlUnrollShift = 0, + LoopControlDontUnrollShift = 1, + LoopControlDependencyInfiniteShift = 2, + LoopControlDependencyLengthShift = 3, + LoopControlMinIterationsShift = 4, + LoopControlMaxIterationsShift = 5, + LoopControlIterationMultipleShift = 6, + LoopControlPeelCountShift = 7, + LoopControlPartialCountShift = 8, + LoopControlInitiationIntervalINTELShift = 16, + LoopControlMaxConcurrencyINTELShift = 17, + LoopControlDependencyArrayINTELShift = 18, + LoopControlPipelineEnableINTELShift = 19, + LoopControlLoopCoalesceINTELShift = 20, + LoopControlMaxInterleavingINTELShift = 21, + LoopControlSpeculatedIterationsINTELShift = 22, + LoopControlNoFusionINTELShift = 23, + LoopControlMax = 0x7fffffff, +}; + +enum LoopControlMask { + LoopControlMaskNone = 0, + LoopControlUnrollMask = 0x00000001, + LoopControlDontUnrollMask = 0x00000002, + LoopControlDependencyInfiniteMask = 0x00000004, + LoopControlDependencyLengthMask = 0x00000008, + LoopControlMinIterationsMask = 0x00000010, + LoopControlMaxIterationsMask = 0x00000020, + LoopControlIterationMultipleMask = 0x00000040, + LoopControlPeelCountMask = 0x00000080, + LoopControlPartialCountMask = 0x00000100, + LoopControlInitiationIntervalINTELMask = 0x00010000, + LoopControlMaxConcurrencyINTELMask = 0x00020000, + LoopControlDependencyArrayINTELMask = 0x00040000, + LoopControlPipelineEnableINTELMask = 0x00080000, + LoopControlLoopCoalesceINTELMask = 0x00100000, + LoopControlMaxInterleavingINTELMask = 0x00200000, + LoopControlSpeculatedIterationsINTELMask = 0x00400000, + LoopControlNoFusionINTELMask = 0x00800000, +}; + +enum FunctionControlShift { + FunctionControlInlineShift = 0, + FunctionControlDontInlineShift = 1, + FunctionControlPureShift = 2, + FunctionControlConstShift = 3, + FunctionControlOptNoneINTELShift = 16, + FunctionControlMax = 0x7fffffff, +}; + +enum FunctionControlMask { + FunctionControlMaskNone = 0, + FunctionControlInlineMask = 0x00000001, + FunctionControlDontInlineMask = 0x00000002, + FunctionControlPureMask = 0x00000004, + FunctionControlConstMask = 0x00000008, + FunctionControlOptNoneINTELMask = 0x00010000, +}; + +enum MemorySemanticsShift { + MemorySemanticsAcquireShift = 1, + MemorySemanticsReleaseShift = 2, + MemorySemanticsAcquireReleaseShift = 3, + MemorySemanticsSequentiallyConsistentShift = 4, + MemorySemanticsUniformMemoryShift = 6, + MemorySemanticsSubgroupMemoryShift = 7, + MemorySemanticsWorkgroupMemoryShift = 8, + MemorySemanticsCrossWorkgroupMemoryShift = 9, + MemorySemanticsAtomicCounterMemoryShift = 10, + MemorySemanticsImageMemoryShift = 11, + MemorySemanticsOutputMemoryShift = 12, + MemorySemanticsOutputMemoryKHRShift = 12, + MemorySemanticsMakeAvailableShift = 13, + MemorySemanticsMakeAvailableKHRShift = 13, + MemorySemanticsMakeVisibleShift = 14, + MemorySemanticsMakeVisibleKHRShift = 14, + MemorySemanticsVolatileShift = 15, + MemorySemanticsMax = 0x7fffffff, +}; + +enum MemorySemanticsMask { + MemorySemanticsMaskNone = 0, + MemorySemanticsAcquireMask = 0x00000002, + MemorySemanticsReleaseMask = 0x00000004, + MemorySemanticsAcquireReleaseMask = 0x00000008, + MemorySemanticsSequentiallyConsistentMask = 0x00000010, + MemorySemanticsUniformMemoryMask = 0x00000040, + MemorySemanticsSubgroupMemoryMask = 0x00000080, + MemorySemanticsWorkgroupMemoryMask = 0x00000100, + MemorySemanticsCrossWorkgroupMemoryMask = 0x00000200, + MemorySemanticsAtomicCounterMemoryMask = 0x00000400, + MemorySemanticsImageMemoryMask = 0x00000800, + MemorySemanticsOutputMemoryMask = 0x00001000, + MemorySemanticsOutputMemoryKHRMask = 0x00001000, + MemorySemanticsMakeAvailableMask = 0x00002000, + MemorySemanticsMakeAvailableKHRMask = 0x00002000, + MemorySemanticsMakeVisibleMask = 0x00004000, + MemorySemanticsMakeVisibleKHRMask = 0x00004000, + MemorySemanticsVolatileMask = 0x00008000, +}; + +enum MemoryAccessShift { + MemoryAccessVolatileShift = 0, + MemoryAccessAlignedShift = 1, + MemoryAccessNontemporalShift = 2, + MemoryAccessMakePointerAvailableShift = 3, + MemoryAccessMakePointerAvailableKHRShift = 3, + MemoryAccessMakePointerVisibleShift = 4, + MemoryAccessMakePointerVisibleKHRShift = 4, + MemoryAccessNonPrivatePointerShift = 5, + MemoryAccessNonPrivatePointerKHRShift = 5, + MemoryAccessAliasScopeINTELMaskShift = 16, + MemoryAccessNoAliasINTELMaskShift = 17, + MemoryAccessMax = 0x7fffffff, +}; + +enum MemoryAccessMask { + MemoryAccessMaskNone = 0, + MemoryAccessVolatileMask = 0x00000001, + MemoryAccessAlignedMask = 0x00000002, + MemoryAccessNontemporalMask = 0x00000004, + MemoryAccessMakePointerAvailableMask = 0x00000008, + MemoryAccessMakePointerAvailableKHRMask = 0x00000008, + MemoryAccessMakePointerVisibleMask = 0x00000010, + MemoryAccessMakePointerVisibleKHRMask = 0x00000010, + MemoryAccessNonPrivatePointerMask = 0x00000020, + MemoryAccessNonPrivatePointerKHRMask = 0x00000020, + MemoryAccessAliasScopeINTELMaskMask = 0x00010000, + MemoryAccessNoAliasINTELMaskMask = 0x00020000, +}; + +enum Scope { + ScopeCrossDevice = 0, + ScopeDevice = 1, + ScopeWorkgroup = 2, + ScopeSubgroup = 3, + ScopeInvocation = 4, + ScopeQueueFamily = 5, + ScopeQueueFamilyKHR = 5, + ScopeShaderCallKHR = 6, + ScopeMax = 0x7fffffff, +}; + +enum GroupOperation { + GroupOperationReduce = 0, + GroupOperationInclusiveScan = 1, + GroupOperationExclusiveScan = 2, + GroupOperationClusteredReduce = 3, + GroupOperationPartitionedReduceNV = 6, + GroupOperationPartitionedInclusiveScanNV = 7, + GroupOperationPartitionedExclusiveScanNV = 8, + GroupOperationMax = 0x7fffffff, +}; + +enum KernelEnqueueFlags { + KernelEnqueueFlagsNoWait = 0, + KernelEnqueueFlagsWaitKernel = 1, + KernelEnqueueFlagsWaitWorkGroup = 2, + KernelEnqueueFlagsMax = 0x7fffffff, +}; + +enum KernelProfilingInfoShift { + KernelProfilingInfoCmdExecTimeShift = 0, + KernelProfilingInfoMax = 0x7fffffff, +}; + +enum KernelProfilingInfoMask { + KernelProfilingInfoMaskNone = 0, + KernelProfilingInfoCmdExecTimeMask = 0x00000001, +}; + +enum Capability { + CapabilityMatrix = 0, + CapabilityShader = 1, + CapabilityGeometry = 2, + CapabilityTessellation = 3, + CapabilityAddresses = 4, + CapabilityLinkage = 5, + CapabilityKernel = 6, + CapabilityVector16 = 7, + CapabilityFloat16Buffer = 8, + CapabilityFloat16 = 9, + CapabilityFloat64 = 10, + CapabilityInt64 = 11, + CapabilityInt64Atomics = 12, + CapabilityImageBasic = 13, + CapabilityImageReadWrite = 14, + CapabilityImageMipmap = 15, + CapabilityPipes = 17, + CapabilityGroups = 18, + CapabilityDeviceEnqueue = 19, + CapabilityLiteralSampler = 20, + CapabilityAtomicStorage = 21, + CapabilityInt16 = 22, + CapabilityTessellationPointSize = 23, + CapabilityGeometryPointSize = 24, + CapabilityImageGatherExtended = 25, + CapabilityStorageImageMultisample = 27, + CapabilityUniformBufferArrayDynamicIndexing = 28, + CapabilitySampledImageArrayDynamicIndexing = 29, + CapabilityStorageBufferArrayDynamicIndexing = 30, + CapabilityStorageImageArrayDynamicIndexing = 31, + CapabilityClipDistance = 32, + CapabilityCullDistance = 33, + CapabilityImageCubeArray = 34, + CapabilitySampleRateShading = 35, + CapabilityImageRect = 36, + CapabilitySampledRect = 37, + CapabilityGenericPointer = 38, + CapabilityInt8 = 39, + CapabilityInputAttachment = 40, + CapabilitySparseResidency = 41, + CapabilityMinLod = 42, + CapabilitySampled1D = 43, + CapabilityImage1D = 44, + CapabilitySampledCubeArray = 45, + CapabilitySampledBuffer = 46, + CapabilityImageBuffer = 47, + CapabilityImageMSArray = 48, + CapabilityStorageImageExtendedFormats = 49, + CapabilityImageQuery = 50, + CapabilityDerivativeControl = 51, + CapabilityInterpolationFunction = 52, + CapabilityTransformFeedback = 53, + CapabilityGeometryStreams = 54, + CapabilityStorageImageReadWithoutFormat = 55, + CapabilityStorageImageWriteWithoutFormat = 56, + CapabilityMultiViewport = 57, + CapabilitySubgroupDispatch = 58, + CapabilityNamedBarrier = 59, + CapabilityPipeStorage = 60, + CapabilityGroupNonUniform = 61, + CapabilityGroupNonUniformVote = 62, + CapabilityGroupNonUniformArithmetic = 63, + CapabilityGroupNonUniformBallot = 64, + CapabilityGroupNonUniformShuffle = 65, + CapabilityGroupNonUniformShuffleRelative = 66, + CapabilityGroupNonUniformClustered = 67, + CapabilityGroupNonUniformQuad = 68, + CapabilityShaderLayer = 69, + CapabilityShaderViewportIndex = 70, + CapabilityUniformDecoration = 71, + CapabilityFragmentShadingRateKHR = 4422, + CapabilitySubgroupBallotKHR = 4423, + CapabilityDrawParameters = 4427, + CapabilityWorkgroupMemoryExplicitLayoutKHR = 4428, + CapabilityWorkgroupMemoryExplicitLayout8BitAccessKHR = 4429, + CapabilityWorkgroupMemoryExplicitLayout16BitAccessKHR = 4430, + CapabilitySubgroupVoteKHR = 4431, + CapabilityStorageBuffer16BitAccess = 4433, + CapabilityStorageUniformBufferBlock16 = 4433, + CapabilityStorageUniform16 = 4434, + CapabilityUniformAndStorageBuffer16BitAccess = 4434, + CapabilityStoragePushConstant16 = 4435, + CapabilityStorageInputOutput16 = 4436, + CapabilityDeviceGroup = 4437, + CapabilityMultiView = 4439, + CapabilityVariablePointersStorageBuffer = 4441, + CapabilityVariablePointers = 4442, + CapabilityAtomicStorageOps = 4445, + CapabilitySampleMaskPostDepthCoverage = 4447, + CapabilityStorageBuffer8BitAccess = 4448, + CapabilityUniformAndStorageBuffer8BitAccess = 4449, + CapabilityStoragePushConstant8 = 4450, + CapabilityDenormPreserve = 4464, + CapabilityDenormFlushToZero = 4465, + CapabilitySignedZeroInfNanPreserve = 4466, + CapabilityRoundingModeRTE = 4467, + CapabilityRoundingModeRTZ = 4468, + CapabilityRayQueryProvisionalKHR = 4471, + CapabilityRayQueryKHR = 4472, + CapabilityRayTraversalPrimitiveCullingKHR = 4478, + CapabilityRayTracingKHR = 4479, + CapabilityFloat16ImageAMD = 5008, + CapabilityImageGatherBiasLodAMD = 5009, + CapabilityFragmentMaskAMD = 5010, + CapabilityStencilExportEXT = 5013, + CapabilityImageReadWriteLodAMD = 5015, + CapabilityInt64ImageEXT = 5016, + CapabilityShaderClockKHR = 5055, + CapabilitySampleMaskOverrideCoverageNV = 5249, + CapabilityGeometryShaderPassthroughNV = 5251, + CapabilityShaderViewportIndexLayerEXT = 5254, + CapabilityShaderViewportIndexLayerNV = 5254, + CapabilityShaderViewportMaskNV = 5255, + CapabilityShaderStereoViewNV = 5259, + CapabilityPerViewAttributesNV = 5260, + CapabilityFragmentFullyCoveredEXT = 5265, + CapabilityMeshShadingNV = 5266, + CapabilityImageFootprintNV = 5282, + CapabilityMeshShadingEXT = 5283, + CapabilityFragmentBarycentricKHR = 5284, + CapabilityFragmentBarycentricNV = 5284, + CapabilityComputeDerivativeGroupQuadsNV = 5288, + CapabilityFragmentDensityEXT = 5291, + CapabilityShadingRateNV = 5291, + CapabilityGroupNonUniformPartitionedNV = 5297, + CapabilityShaderNonUniform = 5301, + CapabilityShaderNonUniformEXT = 5301, + CapabilityRuntimeDescriptorArray = 5302, + CapabilityRuntimeDescriptorArrayEXT = 5302, + CapabilityInputAttachmentArrayDynamicIndexing = 5303, + CapabilityInputAttachmentArrayDynamicIndexingEXT = 5303, + CapabilityUniformTexelBufferArrayDynamicIndexing = 5304, + CapabilityUniformTexelBufferArrayDynamicIndexingEXT = 5304, + CapabilityStorageTexelBufferArrayDynamicIndexing = 5305, + CapabilityStorageTexelBufferArrayDynamicIndexingEXT = 5305, + CapabilityUniformBufferArrayNonUniformIndexing = 5306, + CapabilityUniformBufferArrayNonUniformIndexingEXT = 5306, + CapabilitySampledImageArrayNonUniformIndexing = 5307, + CapabilitySampledImageArrayNonUniformIndexingEXT = 5307, + CapabilityStorageBufferArrayNonUniformIndexing = 5308, + CapabilityStorageBufferArrayNonUniformIndexingEXT = 5308, + CapabilityStorageImageArrayNonUniformIndexing = 5309, + CapabilityStorageImageArrayNonUniformIndexingEXT = 5309, + CapabilityInputAttachmentArrayNonUniformIndexing = 5310, + CapabilityInputAttachmentArrayNonUniformIndexingEXT = 5310, + CapabilityUniformTexelBufferArrayNonUniformIndexing = 5311, + CapabilityUniformTexelBufferArrayNonUniformIndexingEXT = 5311, + CapabilityStorageTexelBufferArrayNonUniformIndexing = 5312, + CapabilityStorageTexelBufferArrayNonUniformIndexingEXT = 5312, + CapabilityRayTracingNV = 5340, + CapabilityRayTracingMotionBlurNV = 5341, + CapabilityVulkanMemoryModel = 5345, + CapabilityVulkanMemoryModelKHR = 5345, + CapabilityVulkanMemoryModelDeviceScope = 5346, + CapabilityVulkanMemoryModelDeviceScopeKHR = 5346, + CapabilityPhysicalStorageBufferAddresses = 5347, + CapabilityPhysicalStorageBufferAddressesEXT = 5347, + CapabilityComputeDerivativeGroupLinearNV = 5350, + CapabilityRayTracingProvisionalKHR = 5353, + CapabilityCooperativeMatrixNV = 5357, + CapabilityFragmentShaderSampleInterlockEXT = 5363, + CapabilityFragmentShaderShadingRateInterlockEXT = 5372, + CapabilityShaderSMBuiltinsNV = 5373, + CapabilityFragmentShaderPixelInterlockEXT = 5378, + CapabilityDemoteToHelperInvocation = 5379, + CapabilityDemoteToHelperInvocationEXT = 5379, + CapabilityBindlessTextureNV = 5390, + CapabilitySubgroupShuffleINTEL = 5568, + CapabilitySubgroupBufferBlockIOINTEL = 5569, + CapabilitySubgroupImageBlockIOINTEL = 5570, + CapabilitySubgroupImageMediaBlockIOINTEL = 5579, + CapabilityRoundToInfinityINTEL = 5582, + CapabilityFloatingPointModeINTEL = 5583, + CapabilityIntegerFunctions2INTEL = 5584, + CapabilityFunctionPointersINTEL = 5603, + CapabilityIndirectReferencesINTEL = 5604, + CapabilityAsmINTEL = 5606, + CapabilityAtomicFloat32MinMaxEXT = 5612, + CapabilityAtomicFloat64MinMaxEXT = 5613, + CapabilityAtomicFloat16MinMaxEXT = 5616, + CapabilityVectorComputeINTEL = 5617, + CapabilityVectorAnyINTEL = 5619, + CapabilityExpectAssumeKHR = 5629, + CapabilitySubgroupAvcMotionEstimationINTEL = 5696, + CapabilitySubgroupAvcMotionEstimationIntraINTEL = 5697, + CapabilitySubgroupAvcMotionEstimationChromaINTEL = 5698, + CapabilityVariableLengthArrayINTEL = 5817, + CapabilityFunctionFloatControlINTEL = 5821, + CapabilityFPGAMemoryAttributesINTEL = 5824, + CapabilityFPFastMathModeINTEL = 5837, + CapabilityArbitraryPrecisionIntegersINTEL = 5844, + CapabilityArbitraryPrecisionFloatingPointINTEL = 5845, + CapabilityUnstructuredLoopControlsINTEL = 5886, + CapabilityFPGALoopControlsINTEL = 5888, + CapabilityKernelAttributesINTEL = 5892, + CapabilityFPGAKernelAttributesINTEL = 5897, + CapabilityFPGAMemoryAccessesINTEL = 5898, + CapabilityFPGAClusterAttributesINTEL = 5904, + CapabilityLoopFuseINTEL = 5906, + CapabilityMemoryAccessAliasingINTEL = 5910, + CapabilityFPGABufferLocationINTEL = 5920, + CapabilityArbitraryPrecisionFixedPointINTEL = 5922, + CapabilityUSMStorageClassesINTEL = 5935, + CapabilityIOPipesINTEL = 5943, + CapabilityBlockingPipesINTEL = 5945, + CapabilityFPGARegINTEL = 5948, + CapabilityDotProductInputAll = 6016, + CapabilityDotProductInputAllKHR = 6016, + CapabilityDotProductInput4x8Bit = 6017, + CapabilityDotProductInput4x8BitKHR = 6017, + CapabilityDotProductInput4x8BitPacked = 6018, + CapabilityDotProductInput4x8BitPackedKHR = 6018, + CapabilityDotProduct = 6019, + CapabilityDotProductKHR = 6019, + CapabilityRayCullMaskKHR = 6020, + CapabilityBitInstructions = 6025, + CapabilityGroupNonUniformRotateKHR = 6026, + CapabilityAtomicFloat32AddEXT = 6033, + CapabilityAtomicFloat64AddEXT = 6034, + CapabilityLongConstantCompositeINTEL = 6089, + CapabilityOptNoneINTEL = 6094, + CapabilityAtomicFloat16AddEXT = 6095, + CapabilityDebugInfoModuleINTEL = 6114, + CapabilitySplitBarrierINTEL = 6141, + CapabilityGroupUniformArithmeticKHR = 6400, + CapabilityMax = 0x7fffffff, +}; + +enum RayFlagsShift { + RayFlagsOpaqueKHRShift = 0, + RayFlagsNoOpaqueKHRShift = 1, + RayFlagsTerminateOnFirstHitKHRShift = 2, + RayFlagsSkipClosestHitShaderKHRShift = 3, + RayFlagsCullBackFacingTrianglesKHRShift = 4, + RayFlagsCullFrontFacingTrianglesKHRShift = 5, + RayFlagsCullOpaqueKHRShift = 6, + RayFlagsCullNoOpaqueKHRShift = 7, + RayFlagsSkipTrianglesKHRShift = 8, + RayFlagsSkipAABBsKHRShift = 9, + RayFlagsMax = 0x7fffffff, +}; + +enum RayFlagsMask { + RayFlagsMaskNone = 0, + RayFlagsOpaqueKHRMask = 0x00000001, + RayFlagsNoOpaqueKHRMask = 0x00000002, + RayFlagsTerminateOnFirstHitKHRMask = 0x00000004, + RayFlagsSkipClosestHitShaderKHRMask = 0x00000008, + RayFlagsCullBackFacingTrianglesKHRMask = 0x00000010, + RayFlagsCullFrontFacingTrianglesKHRMask = 0x00000020, + RayFlagsCullOpaqueKHRMask = 0x00000040, + RayFlagsCullNoOpaqueKHRMask = 0x00000080, + RayFlagsSkipTrianglesKHRMask = 0x00000100, + RayFlagsSkipAABBsKHRMask = 0x00000200, +}; + +enum RayQueryIntersection { + RayQueryIntersectionRayQueryCandidateIntersectionKHR = 0, + RayQueryIntersectionRayQueryCommittedIntersectionKHR = 1, + RayQueryIntersectionMax = 0x7fffffff, +}; + +enum RayQueryCommittedIntersectionType { + RayQueryCommittedIntersectionTypeRayQueryCommittedIntersectionNoneKHR = 0, + RayQueryCommittedIntersectionTypeRayQueryCommittedIntersectionTriangleKHR = 1, + RayQueryCommittedIntersectionTypeRayQueryCommittedIntersectionGeneratedKHR = 2, + RayQueryCommittedIntersectionTypeMax = 0x7fffffff, +}; + +enum RayQueryCandidateIntersectionType { + RayQueryCandidateIntersectionTypeRayQueryCandidateIntersectionTriangleKHR = 0, + RayQueryCandidateIntersectionTypeRayQueryCandidateIntersectionAABBKHR = 1, + RayQueryCandidateIntersectionTypeMax = 0x7fffffff, +}; + +enum FragmentShadingRateShift { + FragmentShadingRateVertical2PixelsShift = 0, + FragmentShadingRateVertical4PixelsShift = 1, + FragmentShadingRateHorizontal2PixelsShift = 2, + FragmentShadingRateHorizontal4PixelsShift = 3, + FragmentShadingRateMax = 0x7fffffff, +}; + +enum FragmentShadingRateMask { + FragmentShadingRateMaskNone = 0, + FragmentShadingRateVertical2PixelsMask = 0x00000001, + FragmentShadingRateVertical4PixelsMask = 0x00000002, + FragmentShadingRateHorizontal2PixelsMask = 0x00000004, + FragmentShadingRateHorizontal4PixelsMask = 0x00000008, +}; + +enum FPDenormMode { + FPDenormModePreserve = 0, + FPDenormModeFlushToZero = 1, + FPDenormModeMax = 0x7fffffff, +}; + +enum FPOperationMode { + FPOperationModeIEEE = 0, + FPOperationModeALT = 1, + FPOperationModeMax = 0x7fffffff, +}; + +enum QuantizationModes { + QuantizationModesTRN = 0, + QuantizationModesTRN_ZERO = 1, + QuantizationModesRND = 2, + QuantizationModesRND_ZERO = 3, + QuantizationModesRND_INF = 4, + QuantizationModesRND_MIN_INF = 5, + QuantizationModesRND_CONV = 6, + QuantizationModesRND_CONV_ODD = 7, + QuantizationModesMax = 0x7fffffff, +}; + +enum OverflowModes { + OverflowModesWRAP = 0, + OverflowModesSAT = 1, + OverflowModesSAT_ZERO = 2, + OverflowModesSAT_SYM = 3, + OverflowModesMax = 0x7fffffff, +}; + +enum PackedVectorFormat { + PackedVectorFormatPackedVectorFormat4x8Bit = 0, + PackedVectorFormatPackedVectorFormat4x8BitKHR = 0, + PackedVectorFormatMax = 0x7fffffff, +}; + +enum Op { + OpNop = 0, + OpUndef = 1, + OpSourceContinued = 2, + OpSource = 3, + OpSourceExtension = 4, + OpName = 5, + OpMemberName = 6, + OpString = 7, + OpLine = 8, + OpExtension = 10, + OpExtInstImport = 11, + OpExtInst = 12, + OpMemoryModel = 14, + OpEntryPoint = 15, + OpExecutionMode = 16, + OpCapability = 17, + OpTypeVoid = 19, + OpTypeBool = 20, + OpTypeInt = 21, + OpTypeFloat = 22, + OpTypeVector = 23, + OpTypeMatrix = 24, + OpTypeImage = 25, + OpTypeSampler = 26, + OpTypeSampledImage = 27, + OpTypeArray = 28, + OpTypeRuntimeArray = 29, + OpTypeStruct = 30, + OpTypeOpaque = 31, + OpTypePointer = 32, + OpTypeFunction = 33, + OpTypeEvent = 34, + OpTypeDeviceEvent = 35, + OpTypeReserveId = 36, + OpTypeQueue = 37, + OpTypePipe = 38, + OpTypeForwardPointer = 39, + OpConstantTrue = 41, + OpConstantFalse = 42, + OpConstant = 43, + OpConstantComposite = 44, + OpConstantSampler = 45, + OpConstantNull = 46, + OpSpecConstantTrue = 48, + OpSpecConstantFalse = 49, + OpSpecConstant = 50, + OpSpecConstantComposite = 51, + OpSpecConstantOp = 52, + OpFunction = 54, + OpFunctionParameter = 55, + OpFunctionEnd = 56, + OpFunctionCall = 57, + OpVariable = 59, + OpImageTexelPointer = 60, + OpLoad = 61, + OpStore = 62, + OpCopyMemory = 63, + OpCopyMemorySized = 64, + OpAccessChain = 65, + OpInBoundsAccessChain = 66, + OpPtrAccessChain = 67, + OpArrayLength = 68, + OpGenericPtrMemSemantics = 69, + OpInBoundsPtrAccessChain = 70, + OpDecorate = 71, + OpMemberDecorate = 72, + OpDecorationGroup = 73, + OpGroupDecorate = 74, + OpGroupMemberDecorate = 75, + OpVectorExtractDynamic = 77, + OpVectorInsertDynamic = 78, + OpVectorShuffle = 79, + OpCompositeConstruct = 80, + OpCompositeExtract = 81, + OpCompositeInsert = 82, + OpCopyObject = 83, + OpTranspose = 84, + OpSampledImage = 86, + OpImageSampleImplicitLod = 87, + OpImageSampleExplicitLod = 88, + OpImageSampleDrefImplicitLod = 89, + OpImageSampleDrefExplicitLod = 90, + OpImageSampleProjImplicitLod = 91, + OpImageSampleProjExplicitLod = 92, + OpImageSampleProjDrefImplicitLod = 93, + OpImageSampleProjDrefExplicitLod = 94, + OpImageFetch = 95, + OpImageGather = 96, + OpImageDrefGather = 97, + OpImageRead = 98, + OpImageWrite = 99, + OpImage = 100, + OpImageQueryFormat = 101, + OpImageQueryOrder = 102, + OpImageQuerySizeLod = 103, + OpImageQuerySize = 104, + OpImageQueryLod = 105, + OpImageQueryLevels = 106, + OpImageQuerySamples = 107, + OpConvertFToU = 109, + OpConvertFToS = 110, + OpConvertSToF = 111, + OpConvertUToF = 112, + OpUConvert = 113, + OpSConvert = 114, + OpFConvert = 115, + OpQuantizeToF16 = 116, + OpConvertPtrToU = 117, + OpSatConvertSToU = 118, + OpSatConvertUToS = 119, + OpConvertUToPtr = 120, + OpPtrCastToGeneric = 121, + OpGenericCastToPtr = 122, + OpGenericCastToPtrExplicit = 123, + OpBitcast = 124, + OpSNegate = 126, + OpFNegate = 127, + OpIAdd = 128, + OpFAdd = 129, + OpISub = 130, + OpFSub = 131, + OpIMul = 132, + OpFMul = 133, + OpUDiv = 134, + OpSDiv = 135, + OpFDiv = 136, + OpUMod = 137, + OpSRem = 138, + OpSMod = 139, + OpFRem = 140, + OpFMod = 141, + OpVectorTimesScalar = 142, + OpMatrixTimesScalar = 143, + OpVectorTimesMatrix = 144, + OpMatrixTimesVector = 145, + OpMatrixTimesMatrix = 146, + OpOuterProduct = 147, + OpDot = 148, + OpIAddCarry = 149, + OpISubBorrow = 150, + OpUMulExtended = 151, + OpSMulExtended = 152, + OpAny = 154, + OpAll = 155, + OpIsNan = 156, + OpIsInf = 157, + OpIsFinite = 158, + OpIsNormal = 159, + OpSignBitSet = 160, + OpLessOrGreater = 161, + OpOrdered = 162, + OpUnordered = 163, + OpLogicalEqual = 164, + OpLogicalNotEqual = 165, + OpLogicalOr = 166, + OpLogicalAnd = 167, + OpLogicalNot = 168, + OpSelect = 169, + OpIEqual = 170, + OpINotEqual = 171, + OpUGreaterThan = 172, + OpSGreaterThan = 173, + OpUGreaterThanEqual = 174, + OpSGreaterThanEqual = 175, + OpULessThan = 176, + OpSLessThan = 177, + OpULessThanEqual = 178, + OpSLessThanEqual = 179, + OpFOrdEqual = 180, + OpFUnordEqual = 181, + OpFOrdNotEqual = 182, + OpFUnordNotEqual = 183, + OpFOrdLessThan = 184, + OpFUnordLessThan = 185, + OpFOrdGreaterThan = 186, + OpFUnordGreaterThan = 187, + OpFOrdLessThanEqual = 188, + OpFUnordLessThanEqual = 189, + OpFOrdGreaterThanEqual = 190, + OpFUnordGreaterThanEqual = 191, + OpShiftRightLogical = 194, + OpShiftRightArithmetic = 195, + OpShiftLeftLogical = 196, + OpBitwiseOr = 197, + OpBitwiseXor = 198, + OpBitwiseAnd = 199, + OpNot = 200, + OpBitFieldInsert = 201, + OpBitFieldSExtract = 202, + OpBitFieldUExtract = 203, + OpBitReverse = 204, + OpBitCount = 205, + OpDPdx = 207, + OpDPdy = 208, + OpFwidth = 209, + OpDPdxFine = 210, + OpDPdyFine = 211, + OpFwidthFine = 212, + OpDPdxCoarse = 213, + OpDPdyCoarse = 214, + OpFwidthCoarse = 215, + OpEmitVertex = 218, + OpEndPrimitive = 219, + OpEmitStreamVertex = 220, + OpEndStreamPrimitive = 221, + OpControlBarrier = 224, + OpMemoryBarrier = 225, + OpAtomicLoad = 227, + OpAtomicStore = 228, + OpAtomicExchange = 229, + OpAtomicCompareExchange = 230, + OpAtomicCompareExchangeWeak = 231, + OpAtomicIIncrement = 232, + OpAtomicIDecrement = 233, + OpAtomicIAdd = 234, + OpAtomicISub = 235, + OpAtomicSMin = 236, + OpAtomicUMin = 237, + OpAtomicSMax = 238, + OpAtomicUMax = 239, + OpAtomicAnd = 240, + OpAtomicOr = 241, + OpAtomicXor = 242, + OpPhi = 245, + OpLoopMerge = 246, + OpSelectionMerge = 247, + OpLabel = 248, + OpBranch = 249, + OpBranchConditional = 250, + OpSwitch = 251, + OpKill = 252, + OpReturn = 253, + OpReturnValue = 254, + OpUnreachable = 255, + OpLifetimeStart = 256, + OpLifetimeStop = 257, + OpGroupAsyncCopy = 259, + OpGroupWaitEvents = 260, + OpGroupAll = 261, + OpGroupAny = 262, + OpGroupBroadcast = 263, + OpGroupIAdd = 264, + OpGroupFAdd = 265, + OpGroupFMin = 266, + OpGroupUMin = 267, + OpGroupSMin = 268, + OpGroupFMax = 269, + OpGroupUMax = 270, + OpGroupSMax = 271, + OpReadPipe = 274, + OpWritePipe = 275, + OpReservedReadPipe = 276, + OpReservedWritePipe = 277, + OpReserveReadPipePackets = 278, + OpReserveWritePipePackets = 279, + OpCommitReadPipe = 280, + OpCommitWritePipe = 281, + OpIsValidReserveId = 282, + OpGetNumPipePackets = 283, + OpGetMaxPipePackets = 284, + OpGroupReserveReadPipePackets = 285, + OpGroupReserveWritePipePackets = 286, + OpGroupCommitReadPipe = 287, + OpGroupCommitWritePipe = 288, + OpEnqueueMarker = 291, + OpEnqueueKernel = 292, + OpGetKernelNDrangeSubGroupCount = 293, + OpGetKernelNDrangeMaxSubGroupSize = 294, + OpGetKernelWorkGroupSize = 295, + OpGetKernelPreferredWorkGroupSizeMultiple = 296, + OpRetainEvent = 297, + OpReleaseEvent = 298, + OpCreateUserEvent = 299, + OpIsValidEvent = 300, + OpSetUserEventStatus = 301, + OpCaptureEventProfilingInfo = 302, + OpGetDefaultQueue = 303, + OpBuildNDRange = 304, + OpImageSparseSampleImplicitLod = 305, + OpImageSparseSampleExplicitLod = 306, + OpImageSparseSampleDrefImplicitLod = 307, + OpImageSparseSampleDrefExplicitLod = 308, + OpImageSparseSampleProjImplicitLod = 309, + OpImageSparseSampleProjExplicitLod = 310, + OpImageSparseSampleProjDrefImplicitLod = 311, + OpImageSparseSampleProjDrefExplicitLod = 312, + OpImageSparseFetch = 313, + OpImageSparseGather = 314, + OpImageSparseDrefGather = 315, + OpImageSparseTexelsResident = 316, + OpNoLine = 317, + OpAtomicFlagTestAndSet = 318, + OpAtomicFlagClear = 319, + OpImageSparseRead = 320, + OpSizeOf = 321, + OpTypePipeStorage = 322, + OpConstantPipeStorage = 323, + OpCreatePipeFromPipeStorage = 324, + OpGetKernelLocalSizeForSubgroupCount = 325, + OpGetKernelMaxNumSubgroups = 326, + OpTypeNamedBarrier = 327, + OpNamedBarrierInitialize = 328, + OpMemoryNamedBarrier = 329, + OpModuleProcessed = 330, + OpExecutionModeId = 331, + OpDecorateId = 332, + OpGroupNonUniformElect = 333, + OpGroupNonUniformAll = 334, + OpGroupNonUniformAny = 335, + OpGroupNonUniformAllEqual = 336, + OpGroupNonUniformBroadcast = 337, + OpGroupNonUniformBroadcastFirst = 338, + OpGroupNonUniformBallot = 339, + OpGroupNonUniformInverseBallot = 340, + OpGroupNonUniformBallotBitExtract = 341, + OpGroupNonUniformBallotBitCount = 342, + OpGroupNonUniformBallotFindLSB = 343, + OpGroupNonUniformBallotFindMSB = 344, + OpGroupNonUniformShuffle = 345, + OpGroupNonUniformShuffleXor = 346, + OpGroupNonUniformShuffleUp = 347, + OpGroupNonUniformShuffleDown = 348, + OpGroupNonUniformIAdd = 349, + OpGroupNonUniformFAdd = 350, + OpGroupNonUniformIMul = 351, + OpGroupNonUniformFMul = 352, + OpGroupNonUniformSMin = 353, + OpGroupNonUniformUMin = 354, + OpGroupNonUniformFMin = 355, + OpGroupNonUniformSMax = 356, + OpGroupNonUniformUMax = 357, + OpGroupNonUniformFMax = 358, + OpGroupNonUniformBitwiseAnd = 359, + OpGroupNonUniformBitwiseOr = 360, + OpGroupNonUniformBitwiseXor = 361, + OpGroupNonUniformLogicalAnd = 362, + OpGroupNonUniformLogicalOr = 363, + OpGroupNonUniformLogicalXor = 364, + OpGroupNonUniformQuadBroadcast = 365, + OpGroupNonUniformQuadSwap = 366, + OpCopyLogical = 400, + OpPtrEqual = 401, + OpPtrNotEqual = 402, + OpPtrDiff = 403, + OpTerminateInvocation = 4416, + OpSubgroupBallotKHR = 4421, + OpSubgroupFirstInvocationKHR = 4422, + OpSubgroupAllKHR = 4428, + OpSubgroupAnyKHR = 4429, + OpSubgroupAllEqualKHR = 4430, + OpGroupNonUniformRotateKHR = 4431, + OpSubgroupReadInvocationKHR = 4432, + OpTraceRayKHR = 4445, + OpExecuteCallableKHR = 4446, + OpConvertUToAccelerationStructureKHR = 4447, + OpIgnoreIntersectionKHR = 4448, + OpTerminateRayKHR = 4449, + OpSDot = 4450, + OpSDotKHR = 4450, + OpUDot = 4451, + OpUDotKHR = 4451, + OpSUDot = 4452, + OpSUDotKHR = 4452, + OpSDotAccSat = 4453, + OpSDotAccSatKHR = 4453, + OpUDotAccSat = 4454, + OpUDotAccSatKHR = 4454, + OpSUDotAccSat = 4455, + OpSUDotAccSatKHR = 4455, + OpTypeRayQueryKHR = 4472, + OpRayQueryInitializeKHR = 4473, + OpRayQueryTerminateKHR = 4474, + OpRayQueryGenerateIntersectionKHR = 4475, + OpRayQueryConfirmIntersectionKHR = 4476, + OpRayQueryProceedKHR = 4477, + OpRayQueryGetIntersectionTypeKHR = 4479, + OpGroupIAddNonUniformAMD = 5000, + OpGroupFAddNonUniformAMD = 5001, + OpGroupFMinNonUniformAMD = 5002, + OpGroupUMinNonUniformAMD = 5003, + OpGroupSMinNonUniformAMD = 5004, + OpGroupFMaxNonUniformAMD = 5005, + OpGroupUMaxNonUniformAMD = 5006, + OpGroupSMaxNonUniformAMD = 5007, + OpFragmentMaskFetchAMD = 5011, + OpFragmentFetchAMD = 5012, + OpReadClockKHR = 5056, + OpImageSampleFootprintNV = 5283, + OpEmitMeshTasksEXT = 5294, + OpSetMeshOutputsEXT = 5295, + OpGroupNonUniformPartitionNV = 5296, + OpWritePackedPrimitiveIndices4x8NV = 5299, + OpReportIntersectionKHR = 5334, + OpReportIntersectionNV = 5334, + OpIgnoreIntersectionNV = 5335, + OpTerminateRayNV = 5336, + OpTraceNV = 5337, + OpTraceMotionNV = 5338, + OpTraceRayMotionNV = 5339, + OpTypeAccelerationStructureKHR = 5341, + OpTypeAccelerationStructureNV = 5341, + OpExecuteCallableNV = 5344, + OpTypeCooperativeMatrixNV = 5358, + OpCooperativeMatrixLoadNV = 5359, + OpCooperativeMatrixStoreNV = 5360, + OpCooperativeMatrixMulAddNV = 5361, + OpCooperativeMatrixLengthNV = 5362, + OpBeginInvocationInterlockEXT = 5364, + OpEndInvocationInterlockEXT = 5365, + OpDemoteToHelperInvocation = 5380, + OpDemoteToHelperInvocationEXT = 5380, + OpIsHelperInvocationEXT = 5381, + OpConvertUToImageNV = 5391, + OpConvertUToSamplerNV = 5392, + OpConvertImageToUNV = 5393, + OpConvertSamplerToUNV = 5394, + OpConvertUToSampledImageNV = 5395, + OpConvertSampledImageToUNV = 5396, + OpSamplerImageAddressingModeNV = 5397, + OpSubgroupShuffleINTEL = 5571, + OpSubgroupShuffleDownINTEL = 5572, + OpSubgroupShuffleUpINTEL = 5573, + OpSubgroupShuffleXorINTEL = 5574, + OpSubgroupBlockReadINTEL = 5575, + OpSubgroupBlockWriteINTEL = 5576, + OpSubgroupImageBlockReadINTEL = 5577, + OpSubgroupImageBlockWriteINTEL = 5578, + OpSubgroupImageMediaBlockReadINTEL = 5580, + OpSubgroupImageMediaBlockWriteINTEL = 5581, + OpUCountLeadingZerosINTEL = 5585, + OpUCountTrailingZerosINTEL = 5586, + OpAbsISubINTEL = 5587, + OpAbsUSubINTEL = 5588, + OpIAddSatINTEL = 5589, + OpUAddSatINTEL = 5590, + OpIAverageINTEL = 5591, + OpUAverageINTEL = 5592, + OpIAverageRoundedINTEL = 5593, + OpUAverageRoundedINTEL = 5594, + OpISubSatINTEL = 5595, + OpUSubSatINTEL = 5596, + OpIMul32x16INTEL = 5597, + OpUMul32x16INTEL = 5598, + OpConstantFunctionPointerINTEL = 5600, + OpFunctionPointerCallINTEL = 5601, + OpAsmTargetINTEL = 5609, + OpAsmINTEL = 5610, + OpAsmCallINTEL = 5611, + OpAtomicFMinEXT = 5614, + OpAtomicFMaxEXT = 5615, + OpAssumeTrueKHR = 5630, + OpExpectKHR = 5631, + OpDecorateString = 5632, + OpDecorateStringGOOGLE = 5632, + OpMemberDecorateString = 5633, + OpMemberDecorateStringGOOGLE = 5633, + OpVmeImageINTEL = 5699, + OpTypeVmeImageINTEL = 5700, + OpTypeAvcImePayloadINTEL = 5701, + OpTypeAvcRefPayloadINTEL = 5702, + OpTypeAvcSicPayloadINTEL = 5703, + OpTypeAvcMcePayloadINTEL = 5704, + OpTypeAvcMceResultINTEL = 5705, + OpTypeAvcImeResultINTEL = 5706, + OpTypeAvcImeResultSingleReferenceStreamoutINTEL = 5707, + OpTypeAvcImeResultDualReferenceStreamoutINTEL = 5708, + OpTypeAvcImeSingleReferenceStreaminINTEL = 5709, + OpTypeAvcImeDualReferenceStreaminINTEL = 5710, + OpTypeAvcRefResultINTEL = 5711, + OpTypeAvcSicResultINTEL = 5712, + OpSubgroupAvcMceGetDefaultInterBaseMultiReferencePenaltyINTEL = 5713, + OpSubgroupAvcMceSetInterBaseMultiReferencePenaltyINTEL = 5714, + OpSubgroupAvcMceGetDefaultInterShapePenaltyINTEL = 5715, + OpSubgroupAvcMceSetInterShapePenaltyINTEL = 5716, + OpSubgroupAvcMceGetDefaultInterDirectionPenaltyINTEL = 5717, + OpSubgroupAvcMceSetInterDirectionPenaltyINTEL = 5718, + OpSubgroupAvcMceGetDefaultIntraLumaShapePenaltyINTEL = 5719, + OpSubgroupAvcMceGetDefaultInterMotionVectorCostTableINTEL = 5720, + OpSubgroupAvcMceGetDefaultHighPenaltyCostTableINTEL = 5721, + OpSubgroupAvcMceGetDefaultMediumPenaltyCostTableINTEL = 5722, + OpSubgroupAvcMceGetDefaultLowPenaltyCostTableINTEL = 5723, + OpSubgroupAvcMceSetMotionVectorCostFunctionINTEL = 5724, + OpSubgroupAvcMceGetDefaultIntraLumaModePenaltyINTEL = 5725, + OpSubgroupAvcMceGetDefaultNonDcLumaIntraPenaltyINTEL = 5726, + OpSubgroupAvcMceGetDefaultIntraChromaModeBasePenaltyINTEL = 5727, + OpSubgroupAvcMceSetAcOnlyHaarINTEL = 5728, + OpSubgroupAvcMceSetSourceInterlacedFieldPolarityINTEL = 5729, + OpSubgroupAvcMceSetSingleReferenceInterlacedFieldPolarityINTEL = 5730, + OpSubgroupAvcMceSetDualReferenceInterlacedFieldPolaritiesINTEL = 5731, + OpSubgroupAvcMceConvertToImePayloadINTEL = 5732, + OpSubgroupAvcMceConvertToImeResultINTEL = 5733, + OpSubgroupAvcMceConvertToRefPayloadINTEL = 5734, + OpSubgroupAvcMceConvertToRefResultINTEL = 5735, + OpSubgroupAvcMceConvertToSicPayloadINTEL = 5736, + OpSubgroupAvcMceConvertToSicResultINTEL = 5737, + OpSubgroupAvcMceGetMotionVectorsINTEL = 5738, + OpSubgroupAvcMceGetInterDistortionsINTEL = 5739, + OpSubgroupAvcMceGetBestInterDistortionsINTEL = 5740, + OpSubgroupAvcMceGetInterMajorShapeINTEL = 5741, + OpSubgroupAvcMceGetInterMinorShapeINTEL = 5742, + OpSubgroupAvcMceGetInterDirectionsINTEL = 5743, + OpSubgroupAvcMceGetInterMotionVectorCountINTEL = 5744, + OpSubgroupAvcMceGetInterReferenceIdsINTEL = 5745, + OpSubgroupAvcMceGetInterReferenceInterlacedFieldPolaritiesINTEL = 5746, + OpSubgroupAvcImeInitializeINTEL = 5747, + OpSubgroupAvcImeSetSingleReferenceINTEL = 5748, + OpSubgroupAvcImeSetDualReferenceINTEL = 5749, + OpSubgroupAvcImeRefWindowSizeINTEL = 5750, + OpSubgroupAvcImeAdjustRefOffsetINTEL = 5751, + OpSubgroupAvcImeConvertToMcePayloadINTEL = 5752, + OpSubgroupAvcImeSetMaxMotionVectorCountINTEL = 5753, + OpSubgroupAvcImeSetUnidirectionalMixDisableINTEL = 5754, + OpSubgroupAvcImeSetEarlySearchTerminationThresholdINTEL = 5755, + OpSubgroupAvcImeSetWeightedSadINTEL = 5756, + OpSubgroupAvcImeEvaluateWithSingleReferenceINTEL = 5757, + OpSubgroupAvcImeEvaluateWithDualReferenceINTEL = 5758, + OpSubgroupAvcImeEvaluateWithSingleReferenceStreaminINTEL = 5759, + OpSubgroupAvcImeEvaluateWithDualReferenceStreaminINTEL = 5760, + OpSubgroupAvcImeEvaluateWithSingleReferenceStreamoutINTEL = 5761, + OpSubgroupAvcImeEvaluateWithDualReferenceStreamoutINTEL = 5762, + OpSubgroupAvcImeEvaluateWithSingleReferenceStreaminoutINTEL = 5763, + OpSubgroupAvcImeEvaluateWithDualReferenceStreaminoutINTEL = 5764, + OpSubgroupAvcImeConvertToMceResultINTEL = 5765, + OpSubgroupAvcImeGetSingleReferenceStreaminINTEL = 5766, + OpSubgroupAvcImeGetDualReferenceStreaminINTEL = 5767, + OpSubgroupAvcImeStripSingleReferenceStreamoutINTEL = 5768, + OpSubgroupAvcImeStripDualReferenceStreamoutINTEL = 5769, + OpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeMotionVectorsINTEL = 5770, + OpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeDistortionsINTEL = 5771, + OpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeReferenceIdsINTEL = 5772, + OpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeMotionVectorsINTEL = 5773, + OpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeDistortionsINTEL = 5774, + OpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeReferenceIdsINTEL = 5775, + OpSubgroupAvcImeGetBorderReachedINTEL = 5776, + OpSubgroupAvcImeGetTruncatedSearchIndicationINTEL = 5777, + OpSubgroupAvcImeGetUnidirectionalEarlySearchTerminationINTEL = 5778, + OpSubgroupAvcImeGetWeightingPatternMinimumMotionVectorINTEL = 5779, + OpSubgroupAvcImeGetWeightingPatternMinimumDistortionINTEL = 5780, + OpSubgroupAvcFmeInitializeINTEL = 5781, + OpSubgroupAvcBmeInitializeINTEL = 5782, + OpSubgroupAvcRefConvertToMcePayloadINTEL = 5783, + OpSubgroupAvcRefSetBidirectionalMixDisableINTEL = 5784, + OpSubgroupAvcRefSetBilinearFilterEnableINTEL = 5785, + OpSubgroupAvcRefEvaluateWithSingleReferenceINTEL = 5786, + OpSubgroupAvcRefEvaluateWithDualReferenceINTEL = 5787, + OpSubgroupAvcRefEvaluateWithMultiReferenceINTEL = 5788, + OpSubgroupAvcRefEvaluateWithMultiReferenceInterlacedINTEL = 5789, + OpSubgroupAvcRefConvertToMceResultINTEL = 5790, + OpSubgroupAvcSicInitializeINTEL = 5791, + OpSubgroupAvcSicConfigureSkcINTEL = 5792, + OpSubgroupAvcSicConfigureIpeLumaINTEL = 5793, + OpSubgroupAvcSicConfigureIpeLumaChromaINTEL = 5794, + OpSubgroupAvcSicGetMotionVectorMaskINTEL = 5795, + OpSubgroupAvcSicConvertToMcePayloadINTEL = 5796, + OpSubgroupAvcSicSetIntraLumaShapePenaltyINTEL = 5797, + OpSubgroupAvcSicSetIntraLumaModeCostFunctionINTEL = 5798, + OpSubgroupAvcSicSetIntraChromaModeCostFunctionINTEL = 5799, + OpSubgroupAvcSicSetBilinearFilterEnableINTEL = 5800, + OpSubgroupAvcSicSetSkcForwardTransformEnableINTEL = 5801, + OpSubgroupAvcSicSetBlockBasedRawSkipSadINTEL = 5802, + OpSubgroupAvcSicEvaluateIpeINTEL = 5803, + OpSubgroupAvcSicEvaluateWithSingleReferenceINTEL = 5804, + OpSubgroupAvcSicEvaluateWithDualReferenceINTEL = 5805, + OpSubgroupAvcSicEvaluateWithMultiReferenceINTEL = 5806, + OpSubgroupAvcSicEvaluateWithMultiReferenceInterlacedINTEL = 5807, + OpSubgroupAvcSicConvertToMceResultINTEL = 5808, + OpSubgroupAvcSicGetIpeLumaShapeINTEL = 5809, + OpSubgroupAvcSicGetBestIpeLumaDistortionINTEL = 5810, + OpSubgroupAvcSicGetBestIpeChromaDistortionINTEL = 5811, + OpSubgroupAvcSicGetPackedIpeLumaModesINTEL = 5812, + OpSubgroupAvcSicGetIpeChromaModeINTEL = 5813, + OpSubgroupAvcSicGetPackedSkcLumaCountThresholdINTEL = 5814, + OpSubgroupAvcSicGetPackedSkcLumaSumThresholdINTEL = 5815, + OpSubgroupAvcSicGetInterRawSadsINTEL = 5816, + OpVariableLengthArrayINTEL = 5818, + OpSaveMemoryINTEL = 5819, + OpRestoreMemoryINTEL = 5820, + OpArbitraryFloatSinCosPiINTEL = 5840, + OpArbitraryFloatCastINTEL = 5841, + OpArbitraryFloatCastFromIntINTEL = 5842, + OpArbitraryFloatCastToIntINTEL = 5843, + OpArbitraryFloatAddINTEL = 5846, + OpArbitraryFloatSubINTEL = 5847, + OpArbitraryFloatMulINTEL = 5848, + OpArbitraryFloatDivINTEL = 5849, + OpArbitraryFloatGTINTEL = 5850, + OpArbitraryFloatGEINTEL = 5851, + OpArbitraryFloatLTINTEL = 5852, + OpArbitraryFloatLEINTEL = 5853, + OpArbitraryFloatEQINTEL = 5854, + OpArbitraryFloatRecipINTEL = 5855, + OpArbitraryFloatRSqrtINTEL = 5856, + OpArbitraryFloatCbrtINTEL = 5857, + OpArbitraryFloatHypotINTEL = 5858, + OpArbitraryFloatSqrtINTEL = 5859, + OpArbitraryFloatLogINTEL = 5860, + OpArbitraryFloatLog2INTEL = 5861, + OpArbitraryFloatLog10INTEL = 5862, + OpArbitraryFloatLog1pINTEL = 5863, + OpArbitraryFloatExpINTEL = 5864, + OpArbitraryFloatExp2INTEL = 5865, + OpArbitraryFloatExp10INTEL = 5866, + OpArbitraryFloatExpm1INTEL = 5867, + OpArbitraryFloatSinINTEL = 5868, + OpArbitraryFloatCosINTEL = 5869, + OpArbitraryFloatSinCosINTEL = 5870, + OpArbitraryFloatSinPiINTEL = 5871, + OpArbitraryFloatCosPiINTEL = 5872, + OpArbitraryFloatASinINTEL = 5873, + OpArbitraryFloatASinPiINTEL = 5874, + OpArbitraryFloatACosINTEL = 5875, + OpArbitraryFloatACosPiINTEL = 5876, + OpArbitraryFloatATanINTEL = 5877, + OpArbitraryFloatATanPiINTEL = 5878, + OpArbitraryFloatATan2INTEL = 5879, + OpArbitraryFloatPowINTEL = 5880, + OpArbitraryFloatPowRINTEL = 5881, + OpArbitraryFloatPowNINTEL = 5882, + OpLoopControlINTEL = 5887, + OpAliasDomainDeclINTEL = 5911, + OpAliasScopeDeclINTEL = 5912, + OpAliasScopeListDeclINTEL = 5913, + OpFixedSqrtINTEL = 5923, + OpFixedRecipINTEL = 5924, + OpFixedRsqrtINTEL = 5925, + OpFixedSinINTEL = 5926, + OpFixedCosINTEL = 5927, + OpFixedSinCosINTEL = 5928, + OpFixedSinPiINTEL = 5929, + OpFixedCosPiINTEL = 5930, + OpFixedSinCosPiINTEL = 5931, + OpFixedLogINTEL = 5932, + OpFixedExpINTEL = 5933, + OpPtrCastToCrossWorkgroupINTEL = 5934, + OpCrossWorkgroupCastToPtrINTEL = 5938, + OpReadPipeBlockingINTEL = 5946, + OpWritePipeBlockingINTEL = 5947, + OpFPGARegINTEL = 5949, + OpRayQueryGetRayTMinKHR = 6016, + OpRayQueryGetRayFlagsKHR = 6017, + OpRayQueryGetIntersectionTKHR = 6018, + OpRayQueryGetIntersectionInstanceCustomIndexKHR = 6019, + OpRayQueryGetIntersectionInstanceIdKHR = 6020, + OpRayQueryGetIntersectionInstanceShaderBindingTableRecordOffsetKHR = 6021, + OpRayQueryGetIntersectionGeometryIndexKHR = 6022, + OpRayQueryGetIntersectionPrimitiveIndexKHR = 6023, + OpRayQueryGetIntersectionBarycentricsKHR = 6024, + OpRayQueryGetIntersectionFrontFaceKHR = 6025, + OpRayQueryGetIntersectionCandidateAABBOpaqueKHR = 6026, + OpRayQueryGetIntersectionObjectRayDirectionKHR = 6027, + OpRayQueryGetIntersectionObjectRayOriginKHR = 6028, + OpRayQueryGetWorldRayDirectionKHR = 6029, + OpRayQueryGetWorldRayOriginKHR = 6030, + OpRayQueryGetIntersectionObjectToWorldKHR = 6031, + OpRayQueryGetIntersectionWorldToObjectKHR = 6032, + OpAtomicFAddEXT = 6035, + OpTypeBufferSurfaceINTEL = 6086, + OpTypeStructContinuedINTEL = 6090, + OpConstantCompositeContinuedINTEL = 6091, + OpSpecConstantCompositeContinuedINTEL = 6092, + OpControlBarrierArriveINTEL = 6142, + OpControlBarrierWaitINTEL = 6143, + OpGroupIMulKHR = 6401, + OpGroupFMulKHR = 6402, + OpGroupBitwiseAndKHR = 6403, + OpGroupBitwiseOrKHR = 6404, + OpGroupBitwiseXorKHR = 6405, + OpGroupLogicalAndKHR = 6406, + OpGroupLogicalOrKHR = 6407, + OpGroupLogicalXorKHR = 6408, + OpMax = 0x7fffffff, +}; + +#ifdef SPV_ENABLE_UTILITY_CODE +#ifndef __cplusplus +#include +#endif +inline void HasResultAndType(Op opcode, bool *hasResult, bool *hasResultType) { + *hasResult = *hasResultType = false; + switch (opcode) { + default: /* unknown opcode */ break; + case OpNop: *hasResult = false; *hasResultType = false; break; + case OpUndef: *hasResult = true; *hasResultType = true; break; + case OpSourceContinued: *hasResult = false; *hasResultType = false; break; + case OpSource: *hasResult = false; *hasResultType = false; break; + case OpSourceExtension: *hasResult = false; *hasResultType = false; break; + case OpName: *hasResult = false; *hasResultType = false; break; + case OpMemberName: *hasResult = false; *hasResultType = false; break; + case OpString: *hasResult = true; *hasResultType = false; break; + case OpLine: *hasResult = false; *hasResultType = false; break; + case OpExtension: *hasResult = false; *hasResultType = false; break; + case OpExtInstImport: *hasResult = true; *hasResultType = false; break; + case OpExtInst: *hasResult = true; *hasResultType = true; break; + case OpMemoryModel: *hasResult = false; *hasResultType = false; break; + case OpEntryPoint: *hasResult = false; *hasResultType = false; break; + case OpExecutionMode: *hasResult = false; *hasResultType = false; break; + case OpCapability: *hasResult = false; *hasResultType = false; break; + case OpTypeVoid: *hasResult = true; *hasResultType = false; break; + case OpTypeBool: *hasResult = true; *hasResultType = false; break; + case OpTypeInt: *hasResult = true; *hasResultType = false; break; + case OpTypeFloat: *hasResult = true; *hasResultType = false; break; + case OpTypeVector: *hasResult = true; *hasResultType = false; break; + case OpTypeMatrix: *hasResult = true; *hasResultType = false; break; + case OpTypeImage: *hasResult = true; *hasResultType = false; break; + case OpTypeSampler: *hasResult = true; *hasResultType = false; break; + case OpTypeSampledImage: *hasResult = true; *hasResultType = false; break; + case OpTypeArray: *hasResult = true; *hasResultType = false; break; + case OpTypeRuntimeArray: *hasResult = true; *hasResultType = false; break; + case OpTypeStruct: *hasResult = true; *hasResultType = false; break; + case OpTypeOpaque: *hasResult = true; *hasResultType = false; break; + case OpTypePointer: *hasResult = true; *hasResultType = false; break; + case OpTypeFunction: *hasResult = true; *hasResultType = false; break; + case OpTypeEvent: *hasResult = true; *hasResultType = false; break; + case OpTypeDeviceEvent: *hasResult = true; *hasResultType = false; break; + case OpTypeReserveId: *hasResult = true; *hasResultType = false; break; + case OpTypeQueue: *hasResult = true; *hasResultType = false; break; + case OpTypePipe: *hasResult = true; *hasResultType = false; break; + case OpTypeForwardPointer: *hasResult = false; *hasResultType = false; break; + case OpConstantTrue: *hasResult = true; *hasResultType = true; break; + case OpConstantFalse: *hasResult = true; *hasResultType = true; break; + case OpConstant: *hasResult = true; *hasResultType = true; break; + case OpConstantComposite: *hasResult = true; *hasResultType = true; break; + case OpConstantSampler: *hasResult = true; *hasResultType = true; break; + case OpConstantNull: *hasResult = true; *hasResultType = true; break; + case OpSpecConstantTrue: *hasResult = true; *hasResultType = true; break; + case OpSpecConstantFalse: *hasResult = true; *hasResultType = true; break; + case OpSpecConstant: *hasResult = true; *hasResultType = true; break; + case OpSpecConstantComposite: *hasResult = true; *hasResultType = true; break; + case OpSpecConstantOp: *hasResult = true; *hasResultType = true; break; + case OpFunction: *hasResult = true; *hasResultType = true; break; + case OpFunctionParameter: *hasResult = true; *hasResultType = true; break; + case OpFunctionEnd: *hasResult = false; *hasResultType = false; break; + case OpFunctionCall: *hasResult = true; *hasResultType = true; break; + case OpVariable: *hasResult = true; *hasResultType = true; break; + case OpImageTexelPointer: *hasResult = true; *hasResultType = true; break; + case OpLoad: *hasResult = true; *hasResultType = true; break; + case OpStore: *hasResult = false; *hasResultType = false; break; + case OpCopyMemory: *hasResult = false; *hasResultType = false; break; + case OpCopyMemorySized: *hasResult = false; *hasResultType = false; break; + case OpAccessChain: *hasResult = true; *hasResultType = true; break; + case OpInBoundsAccessChain: *hasResult = true; *hasResultType = true; break; + case OpPtrAccessChain: *hasResult = true; *hasResultType = true; break; + case OpArrayLength: *hasResult = true; *hasResultType = true; break; + case OpGenericPtrMemSemantics: *hasResult = true; *hasResultType = true; break; + case OpInBoundsPtrAccessChain: *hasResult = true; *hasResultType = true; break; + case OpDecorate: *hasResult = false; *hasResultType = false; break; + case OpMemberDecorate: *hasResult = false; *hasResultType = false; break; + case OpDecorationGroup: *hasResult = true; *hasResultType = false; break; + case OpGroupDecorate: *hasResult = false; *hasResultType = false; break; + case OpGroupMemberDecorate: *hasResult = false; *hasResultType = false; break; + case OpVectorExtractDynamic: *hasResult = true; *hasResultType = true; break; + case OpVectorInsertDynamic: *hasResult = true; *hasResultType = true; break; + case OpVectorShuffle: *hasResult = true; *hasResultType = true; break; + case OpCompositeConstruct: *hasResult = true; *hasResultType = true; break; + case OpCompositeExtract: *hasResult = true; *hasResultType = true; break; + case OpCompositeInsert: *hasResult = true; *hasResultType = true; break; + case OpCopyObject: *hasResult = true; *hasResultType = true; break; + case OpTranspose: *hasResult = true; *hasResultType = true; break; + case OpSampledImage: *hasResult = true; *hasResultType = true; break; + case OpImageSampleImplicitLod: *hasResult = true; *hasResultType = true; break; + case OpImageSampleExplicitLod: *hasResult = true; *hasResultType = true; break; + case OpImageSampleDrefImplicitLod: *hasResult = true; *hasResultType = true; break; + case OpImageSampleDrefExplicitLod: *hasResult = true; *hasResultType = true; break; + case OpImageSampleProjImplicitLod: *hasResult = true; *hasResultType = true; break; + case OpImageSampleProjExplicitLod: *hasResult = true; *hasResultType = true; break; + case OpImageSampleProjDrefImplicitLod: *hasResult = true; *hasResultType = true; break; + case OpImageSampleProjDrefExplicitLod: *hasResult = true; *hasResultType = true; break; + case OpImageFetch: *hasResult = true; *hasResultType = true; break; + case OpImageGather: *hasResult = true; *hasResultType = true; break; + case OpImageDrefGather: *hasResult = true; *hasResultType = true; break; + case OpImageRead: *hasResult = true; *hasResultType = true; break; + case OpImageWrite: *hasResult = false; *hasResultType = false; break; + case OpImage: *hasResult = true; *hasResultType = true; break; + case OpImageQueryFormat: *hasResult = true; *hasResultType = true; break; + case OpImageQueryOrder: *hasResult = true; *hasResultType = true; break; + case OpImageQuerySizeLod: *hasResult = true; *hasResultType = true; break; + case OpImageQuerySize: *hasResult = true; *hasResultType = true; break; + case OpImageQueryLod: *hasResult = true; *hasResultType = true; break; + case OpImageQueryLevels: *hasResult = true; *hasResultType = true; break; + case OpImageQuerySamples: *hasResult = true; *hasResultType = true; break; + case OpConvertFToU: *hasResult = true; *hasResultType = true; break; + case OpConvertFToS: *hasResult = true; *hasResultType = true; break; + case OpConvertSToF: *hasResult = true; *hasResultType = true; break; + case OpConvertUToF: *hasResult = true; *hasResultType = true; break; + case OpUConvert: *hasResult = true; *hasResultType = true; break; + case OpSConvert: *hasResult = true; *hasResultType = true; break; + case OpFConvert: *hasResult = true; *hasResultType = true; break; + case OpQuantizeToF16: *hasResult = true; *hasResultType = true; break; + case OpConvertPtrToU: *hasResult = true; *hasResultType = true; break; + case OpSatConvertSToU: *hasResult = true; *hasResultType = true; break; + case OpSatConvertUToS: *hasResult = true; *hasResultType = true; break; + case OpConvertUToPtr: *hasResult = true; *hasResultType = true; break; + case OpPtrCastToGeneric: *hasResult = true; *hasResultType = true; break; + case OpGenericCastToPtr: *hasResult = true; *hasResultType = true; break; + case OpGenericCastToPtrExplicit: *hasResult = true; *hasResultType = true; break; + case OpBitcast: *hasResult = true; *hasResultType = true; break; + case OpSNegate: *hasResult = true; *hasResultType = true; break; + case OpFNegate: *hasResult = true; *hasResultType = true; break; + case OpIAdd: *hasResult = true; *hasResultType = true; break; + case OpFAdd: *hasResult = true; *hasResultType = true; break; + case OpISub: *hasResult = true; *hasResultType = true; break; + case OpFSub: *hasResult = true; *hasResultType = true; break; + case OpIMul: *hasResult = true; *hasResultType = true; break; + case OpFMul: *hasResult = true; *hasResultType = true; break; + case OpUDiv: *hasResult = true; *hasResultType = true; break; + case OpSDiv: *hasResult = true; *hasResultType = true; break; + case OpFDiv: *hasResult = true; *hasResultType = true; break; + case OpUMod: *hasResult = true; *hasResultType = true; break; + case OpSRem: *hasResult = true; *hasResultType = true; break; + case OpSMod: *hasResult = true; *hasResultType = true; break; + case OpFRem: *hasResult = true; *hasResultType = true; break; + case OpFMod: *hasResult = true; *hasResultType = true; break; + case OpVectorTimesScalar: *hasResult = true; *hasResultType = true; break; + case OpMatrixTimesScalar: *hasResult = true; *hasResultType = true; break; + case OpVectorTimesMatrix: *hasResult = true; *hasResultType = true; break; + case OpMatrixTimesVector: *hasResult = true; *hasResultType = true; break; + case OpMatrixTimesMatrix: *hasResult = true; *hasResultType = true; break; + case OpOuterProduct: *hasResult = true; *hasResultType = true; break; + case OpDot: *hasResult = true; *hasResultType = true; break; + case OpIAddCarry: *hasResult = true; *hasResultType = true; break; + case OpISubBorrow: *hasResult = true; *hasResultType = true; break; + case OpUMulExtended: *hasResult = true; *hasResultType = true; break; + case OpSMulExtended: *hasResult = true; *hasResultType = true; break; + case OpAny: *hasResult = true; *hasResultType = true; break; + case OpAll: *hasResult = true; *hasResultType = true; break; + case OpIsNan: *hasResult = true; *hasResultType = true; break; + case OpIsInf: *hasResult = true; *hasResultType = true; break; + case OpIsFinite: *hasResult = true; *hasResultType = true; break; + case OpIsNormal: *hasResult = true; *hasResultType = true; break; + case OpSignBitSet: *hasResult = true; *hasResultType = true; break; + case OpLessOrGreater: *hasResult = true; *hasResultType = true; break; + case OpOrdered: *hasResult = true; *hasResultType = true; break; + case OpUnordered: *hasResult = true; *hasResultType = true; break; + case OpLogicalEqual: *hasResult = true; *hasResultType = true; break; + case OpLogicalNotEqual: *hasResult = true; *hasResultType = true; break; + case OpLogicalOr: *hasResult = true; *hasResultType = true; break; + case OpLogicalAnd: *hasResult = true; *hasResultType = true; break; + case OpLogicalNot: *hasResult = true; *hasResultType = true; break; + case OpSelect: *hasResult = true; *hasResultType = true; break; + case OpIEqual: *hasResult = true; *hasResultType = true; break; + case OpINotEqual: *hasResult = true; *hasResultType = true; break; + case OpUGreaterThan: *hasResult = true; *hasResultType = true; break; + case OpSGreaterThan: *hasResult = true; *hasResultType = true; break; + case OpUGreaterThanEqual: *hasResult = true; *hasResultType = true; break; + case OpSGreaterThanEqual: *hasResult = true; *hasResultType = true; break; + case OpULessThan: *hasResult = true; *hasResultType = true; break; + case OpSLessThan: *hasResult = true; *hasResultType = true; break; + case OpULessThanEqual: *hasResult = true; *hasResultType = true; break; + case OpSLessThanEqual: *hasResult = true; *hasResultType = true; break; + case OpFOrdEqual: *hasResult = true; *hasResultType = true; break; + case OpFUnordEqual: *hasResult = true; *hasResultType = true; break; + case OpFOrdNotEqual: *hasResult = true; *hasResultType = true; break; + case OpFUnordNotEqual: *hasResult = true; *hasResultType = true; break; + case OpFOrdLessThan: *hasResult = true; *hasResultType = true; break; + case OpFUnordLessThan: *hasResult = true; *hasResultType = true; break; + case OpFOrdGreaterThan: *hasResult = true; *hasResultType = true; break; + case OpFUnordGreaterThan: *hasResult = true; *hasResultType = true; break; + case OpFOrdLessThanEqual: *hasResult = true; *hasResultType = true; break; + case OpFUnordLessThanEqual: *hasResult = true; *hasResultType = true; break; + case OpFOrdGreaterThanEqual: *hasResult = true; *hasResultType = true; break; + case OpFUnordGreaterThanEqual: *hasResult = true; *hasResultType = true; break; + case OpShiftRightLogical: *hasResult = true; *hasResultType = true; break; + case OpShiftRightArithmetic: *hasResult = true; *hasResultType = true; break; + case OpShiftLeftLogical: *hasResult = true; *hasResultType = true; break; + case OpBitwiseOr: *hasResult = true; *hasResultType = true; break; + case OpBitwiseXor: *hasResult = true; *hasResultType = true; break; + case OpBitwiseAnd: *hasResult = true; *hasResultType = true; break; + case OpNot: *hasResult = true; *hasResultType = true; break; + case OpBitFieldInsert: *hasResult = true; *hasResultType = true; break; + case OpBitFieldSExtract: *hasResult = true; *hasResultType = true; break; + case OpBitFieldUExtract: *hasResult = true; *hasResultType = true; break; + case OpBitReverse: *hasResult = true; *hasResultType = true; break; + case OpBitCount: *hasResult = true; *hasResultType = true; break; + case OpDPdx: *hasResult = true; *hasResultType = true; break; + case OpDPdy: *hasResult = true; *hasResultType = true; break; + case OpFwidth: *hasResult = true; *hasResultType = true; break; + case OpDPdxFine: *hasResult = true; *hasResultType = true; break; + case OpDPdyFine: *hasResult = true; *hasResultType = true; break; + case OpFwidthFine: *hasResult = true; *hasResultType = true; break; + case OpDPdxCoarse: *hasResult = true; *hasResultType = true; break; + case OpDPdyCoarse: *hasResult = true; *hasResultType = true; break; + case OpFwidthCoarse: *hasResult = true; *hasResultType = true; break; + case OpEmitVertex: *hasResult = false; *hasResultType = false; break; + case OpEndPrimitive: *hasResult = false; *hasResultType = false; break; + case OpEmitStreamVertex: *hasResult = false; *hasResultType = false; break; + case OpEndStreamPrimitive: *hasResult = false; *hasResultType = false; break; + case OpControlBarrier: *hasResult = false; *hasResultType = false; break; + case OpMemoryBarrier: *hasResult = false; *hasResultType = false; break; + case OpAtomicLoad: *hasResult = true; *hasResultType = true; break; + case OpAtomicStore: *hasResult = false; *hasResultType = false; break; + case OpAtomicExchange: *hasResult = true; *hasResultType = true; break; + case OpAtomicCompareExchange: *hasResult = true; *hasResultType = true; break; + case OpAtomicCompareExchangeWeak: *hasResult = true; *hasResultType = true; break; + case OpAtomicIIncrement: *hasResult = true; *hasResultType = true; break; + case OpAtomicIDecrement: *hasResult = true; *hasResultType = true; break; + case OpAtomicIAdd: *hasResult = true; *hasResultType = true; break; + case OpAtomicISub: *hasResult = true; *hasResultType = true; break; + case OpAtomicSMin: *hasResult = true; *hasResultType = true; break; + case OpAtomicUMin: *hasResult = true; *hasResultType = true; break; + case OpAtomicSMax: *hasResult = true; *hasResultType = true; break; + case OpAtomicUMax: *hasResult = true; *hasResultType = true; break; + case OpAtomicAnd: *hasResult = true; *hasResultType = true; break; + case OpAtomicOr: *hasResult = true; *hasResultType = true; break; + case OpAtomicXor: *hasResult = true; *hasResultType = true; break; + case OpPhi: *hasResult = true; *hasResultType = true; break; + case OpLoopMerge: *hasResult = false; *hasResultType = false; break; + case OpSelectionMerge: *hasResult = false; *hasResultType = false; break; + case OpLabel: *hasResult = true; *hasResultType = false; break; + case OpBranch: *hasResult = false; *hasResultType = false; break; + case OpBranchConditional: *hasResult = false; *hasResultType = false; break; + case OpSwitch: *hasResult = false; *hasResultType = false; break; + case OpKill: *hasResult = false; *hasResultType = false; break; + case OpReturn: *hasResult = false; *hasResultType = false; break; + case OpReturnValue: *hasResult = false; *hasResultType = false; break; + case OpUnreachable: *hasResult = false; *hasResultType = false; break; + case OpLifetimeStart: *hasResult = false; *hasResultType = false; break; + case OpLifetimeStop: *hasResult = false; *hasResultType = false; break; + case OpGroupAsyncCopy: *hasResult = true; *hasResultType = true; break; + case OpGroupWaitEvents: *hasResult = false; *hasResultType = false; break; + case OpGroupAll: *hasResult = true; *hasResultType = true; break; + case OpGroupAny: *hasResult = true; *hasResultType = true; break; + case OpGroupBroadcast: *hasResult = true; *hasResultType = true; break; + case OpGroupIAdd: *hasResult = true; *hasResultType = true; break; + case OpGroupFAdd: *hasResult = true; *hasResultType = true; break; + case OpGroupFMin: *hasResult = true; *hasResultType = true; break; + case OpGroupUMin: *hasResult = true; *hasResultType = true; break; + case OpGroupSMin: *hasResult = true; *hasResultType = true; break; + case OpGroupFMax: *hasResult = true; *hasResultType = true; break; + case OpGroupUMax: *hasResult = true; *hasResultType = true; break; + case OpGroupSMax: *hasResult = true; *hasResultType = true; break; + case OpReadPipe: *hasResult = true; *hasResultType = true; break; + case OpWritePipe: *hasResult = true; *hasResultType = true; break; + case OpReservedReadPipe: *hasResult = true; *hasResultType = true; break; + case OpReservedWritePipe: *hasResult = true; *hasResultType = true; break; + case OpReserveReadPipePackets: *hasResult = true; *hasResultType = true; break; + case OpReserveWritePipePackets: *hasResult = true; *hasResultType = true; break; + case OpCommitReadPipe: *hasResult = false; *hasResultType = false; break; + case OpCommitWritePipe: *hasResult = false; *hasResultType = false; break; + case OpIsValidReserveId: *hasResult = true; *hasResultType = true; break; + case OpGetNumPipePackets: *hasResult = true; *hasResultType = true; break; + case OpGetMaxPipePackets: *hasResult = true; *hasResultType = true; break; + case OpGroupReserveReadPipePackets: *hasResult = true; *hasResultType = true; break; + case OpGroupReserveWritePipePackets: *hasResult = true; *hasResultType = true; break; + case OpGroupCommitReadPipe: *hasResult = false; *hasResultType = false; break; + case OpGroupCommitWritePipe: *hasResult = false; *hasResultType = false; break; + case OpEnqueueMarker: *hasResult = true; *hasResultType = true; break; + case OpEnqueueKernel: *hasResult = true; *hasResultType = true; break; + case OpGetKernelNDrangeSubGroupCount: *hasResult = true; *hasResultType = true; break; + case OpGetKernelNDrangeMaxSubGroupSize: *hasResult = true; *hasResultType = true; break; + case OpGetKernelWorkGroupSize: *hasResult = true; *hasResultType = true; break; + case OpGetKernelPreferredWorkGroupSizeMultiple: *hasResult = true; *hasResultType = true; break; + case OpRetainEvent: *hasResult = false; *hasResultType = false; break; + case OpReleaseEvent: *hasResult = false; *hasResultType = false; break; + case OpCreateUserEvent: *hasResult = true; *hasResultType = true; break; + case OpIsValidEvent: *hasResult = true; *hasResultType = true; break; + case OpSetUserEventStatus: *hasResult = false; *hasResultType = false; break; + case OpCaptureEventProfilingInfo: *hasResult = false; *hasResultType = false; break; + case OpGetDefaultQueue: *hasResult = true; *hasResultType = true; break; + case OpBuildNDRange: *hasResult = true; *hasResultType = true; break; + case OpImageSparseSampleImplicitLod: *hasResult = true; *hasResultType = true; break; + case OpImageSparseSampleExplicitLod: *hasResult = true; *hasResultType = true; break; + case OpImageSparseSampleDrefImplicitLod: *hasResult = true; *hasResultType = true; break; + case OpImageSparseSampleDrefExplicitLod: *hasResult = true; *hasResultType = true; break; + case OpImageSparseSampleProjImplicitLod: *hasResult = true; *hasResultType = true; break; + case OpImageSparseSampleProjExplicitLod: *hasResult = true; *hasResultType = true; break; + case OpImageSparseSampleProjDrefImplicitLod: *hasResult = true; *hasResultType = true; break; + case OpImageSparseSampleProjDrefExplicitLod: *hasResult = true; *hasResultType = true; break; + case OpImageSparseFetch: *hasResult = true; *hasResultType = true; break; + case OpImageSparseGather: *hasResult = true; *hasResultType = true; break; + case OpImageSparseDrefGather: *hasResult = true; *hasResultType = true; break; + case OpImageSparseTexelsResident: *hasResult = true; *hasResultType = true; break; + case OpNoLine: *hasResult = false; *hasResultType = false; break; + case OpAtomicFlagTestAndSet: *hasResult = true; *hasResultType = true; break; + case OpAtomicFlagClear: *hasResult = false; *hasResultType = false; break; + case OpImageSparseRead: *hasResult = true; *hasResultType = true; break; + case OpSizeOf: *hasResult = true; *hasResultType = true; break; + case OpTypePipeStorage: *hasResult = true; *hasResultType = false; break; + case OpConstantPipeStorage: *hasResult = true; *hasResultType = true; break; + case OpCreatePipeFromPipeStorage: *hasResult = true; *hasResultType = true; break; + case OpGetKernelLocalSizeForSubgroupCount: *hasResult = true; *hasResultType = true; break; + case OpGetKernelMaxNumSubgroups: *hasResult = true; *hasResultType = true; break; + case OpTypeNamedBarrier: *hasResult = true; *hasResultType = false; break; + case OpNamedBarrierInitialize: *hasResult = true; *hasResultType = true; break; + case OpMemoryNamedBarrier: *hasResult = false; *hasResultType = false; break; + case OpModuleProcessed: *hasResult = false; *hasResultType = false; break; + case OpExecutionModeId: *hasResult = false; *hasResultType = false; break; + case OpDecorateId: *hasResult = false; *hasResultType = false; break; + case OpGroupNonUniformElect: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformAll: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformAny: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformAllEqual: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformBroadcast: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformBroadcastFirst: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformBallot: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformInverseBallot: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformBallotBitExtract: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformBallotBitCount: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformBallotFindLSB: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformBallotFindMSB: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformShuffle: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformShuffleXor: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformShuffleUp: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformShuffleDown: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformIAdd: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformFAdd: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformIMul: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformFMul: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformSMin: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformUMin: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformFMin: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformSMax: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformUMax: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformFMax: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformBitwiseAnd: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformBitwiseOr: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformBitwiseXor: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformLogicalAnd: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformLogicalOr: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformLogicalXor: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformQuadBroadcast: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformQuadSwap: *hasResult = true; *hasResultType = true; break; + case OpCopyLogical: *hasResult = true; *hasResultType = true; break; + case OpPtrEqual: *hasResult = true; *hasResultType = true; break; + case OpPtrNotEqual: *hasResult = true; *hasResultType = true; break; + case OpPtrDiff: *hasResult = true; *hasResultType = true; break; + case OpTerminateInvocation: *hasResult = false; *hasResultType = false; break; + case OpSubgroupBallotKHR: *hasResult = true; *hasResultType = true; break; + case OpSubgroupFirstInvocationKHR: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAllKHR: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAnyKHR: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAllEqualKHR: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformRotateKHR: *hasResult = true; *hasResultType = true; break; + case OpSubgroupReadInvocationKHR: *hasResult = true; *hasResultType = true; break; + case OpTraceRayKHR: *hasResult = false; *hasResultType = false; break; + case OpExecuteCallableKHR: *hasResult = false; *hasResultType = false; break; + case OpConvertUToAccelerationStructureKHR: *hasResult = true; *hasResultType = true; break; + case OpIgnoreIntersectionKHR: *hasResult = false; *hasResultType = false; break; + case OpTerminateRayKHR: *hasResult = false; *hasResultType = false; break; + case OpSDot: *hasResult = true; *hasResultType = true; break; + case OpUDot: *hasResult = true; *hasResultType = true; break; + case OpSUDot: *hasResult = true; *hasResultType = true; break; + case OpSDotAccSat: *hasResult = true; *hasResultType = true; break; + case OpUDotAccSat: *hasResult = true; *hasResultType = true; break; + case OpSUDotAccSat: *hasResult = true; *hasResultType = true; break; + case OpTypeRayQueryKHR: *hasResult = true; *hasResultType = false; break; + case OpRayQueryInitializeKHR: *hasResult = false; *hasResultType = false; break; + case OpRayQueryTerminateKHR: *hasResult = false; *hasResultType = false; break; + case OpRayQueryGenerateIntersectionKHR: *hasResult = false; *hasResultType = false; break; + case OpRayQueryConfirmIntersectionKHR: *hasResult = false; *hasResultType = false; break; + case OpRayQueryProceedKHR: *hasResult = true; *hasResultType = true; break; + case OpRayQueryGetIntersectionTypeKHR: *hasResult = true; *hasResultType = true; break; + case OpGroupIAddNonUniformAMD: *hasResult = true; *hasResultType = true; break; + case OpGroupFAddNonUniformAMD: *hasResult = true; *hasResultType = true; break; + case OpGroupFMinNonUniformAMD: *hasResult = true; *hasResultType = true; break; + case OpGroupUMinNonUniformAMD: *hasResult = true; *hasResultType = true; break; + case OpGroupSMinNonUniformAMD: *hasResult = true; *hasResultType = true; break; + case OpGroupFMaxNonUniformAMD: *hasResult = true; *hasResultType = true; break; + case OpGroupUMaxNonUniformAMD: *hasResult = true; *hasResultType = true; break; + case OpGroupSMaxNonUniformAMD: *hasResult = true; *hasResultType = true; break; + case OpFragmentMaskFetchAMD: *hasResult = true; *hasResultType = true; break; + case OpFragmentFetchAMD: *hasResult = true; *hasResultType = true; break; + case OpReadClockKHR: *hasResult = true; *hasResultType = true; break; + case OpImageSampleFootprintNV: *hasResult = true; *hasResultType = true; break; + case OpEmitMeshTasksEXT: *hasResult = false; *hasResultType = false; break; + case OpSetMeshOutputsEXT: *hasResult = false; *hasResultType = false; break; + case OpGroupNonUniformPartitionNV: *hasResult = true; *hasResultType = true; break; + case OpWritePackedPrimitiveIndices4x8NV: *hasResult = false; *hasResultType = false; break; + case OpReportIntersectionNV: *hasResult = true; *hasResultType = true; break; + case OpIgnoreIntersectionNV: *hasResult = false; *hasResultType = false; break; + case OpTerminateRayNV: *hasResult = false; *hasResultType = false; break; + case OpTraceNV: *hasResult = false; *hasResultType = false; break; + case OpTraceMotionNV: *hasResult = false; *hasResultType = false; break; + case OpTraceRayMotionNV: *hasResult = false; *hasResultType = false; break; + case OpTypeAccelerationStructureNV: *hasResult = true; *hasResultType = false; break; + case OpExecuteCallableNV: *hasResult = false; *hasResultType = false; break; + case OpTypeCooperativeMatrixNV: *hasResult = true; *hasResultType = false; break; + case OpCooperativeMatrixLoadNV: *hasResult = true; *hasResultType = true; break; + case OpCooperativeMatrixStoreNV: *hasResult = false; *hasResultType = false; break; + case OpCooperativeMatrixMulAddNV: *hasResult = true; *hasResultType = true; break; + case OpCooperativeMatrixLengthNV: *hasResult = true; *hasResultType = true; break; + case OpBeginInvocationInterlockEXT: *hasResult = false; *hasResultType = false; break; + case OpEndInvocationInterlockEXT: *hasResult = false; *hasResultType = false; break; + case OpDemoteToHelperInvocation: *hasResult = false; *hasResultType = false; break; + case OpIsHelperInvocationEXT: *hasResult = true; *hasResultType = true; break; + case OpConvertUToImageNV: *hasResult = true; *hasResultType = true; break; + case OpConvertUToSamplerNV: *hasResult = true; *hasResultType = true; break; + case OpConvertImageToUNV: *hasResult = true; *hasResultType = true; break; + case OpConvertSamplerToUNV: *hasResult = true; *hasResultType = true; break; + case OpConvertUToSampledImageNV: *hasResult = true; *hasResultType = true; break; + case OpConvertSampledImageToUNV: *hasResult = true; *hasResultType = true; break; + case OpSamplerImageAddressingModeNV: *hasResult = false; *hasResultType = false; break; + case OpSubgroupShuffleINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupShuffleDownINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupShuffleUpINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupShuffleXorINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupBlockReadINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupBlockWriteINTEL: *hasResult = false; *hasResultType = false; break; + case OpSubgroupImageBlockReadINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupImageBlockWriteINTEL: *hasResult = false; *hasResultType = false; break; + case OpSubgroupImageMediaBlockReadINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupImageMediaBlockWriteINTEL: *hasResult = false; *hasResultType = false; break; + case OpUCountLeadingZerosINTEL: *hasResult = true; *hasResultType = true; break; + case OpUCountTrailingZerosINTEL: *hasResult = true; *hasResultType = true; break; + case OpAbsISubINTEL: *hasResult = true; *hasResultType = true; break; + case OpAbsUSubINTEL: *hasResult = true; *hasResultType = true; break; + case OpIAddSatINTEL: *hasResult = true; *hasResultType = true; break; + case OpUAddSatINTEL: *hasResult = true; *hasResultType = true; break; + case OpIAverageINTEL: *hasResult = true; *hasResultType = true; break; + case OpUAverageINTEL: *hasResult = true; *hasResultType = true; break; + case OpIAverageRoundedINTEL: *hasResult = true; *hasResultType = true; break; + case OpUAverageRoundedINTEL: *hasResult = true; *hasResultType = true; break; + case OpISubSatINTEL: *hasResult = true; *hasResultType = true; break; + case OpUSubSatINTEL: *hasResult = true; *hasResultType = true; break; + case OpIMul32x16INTEL: *hasResult = true; *hasResultType = true; break; + case OpUMul32x16INTEL: *hasResult = true; *hasResultType = true; break; + case OpConstantFunctionPointerINTEL: *hasResult = true; *hasResultType = true; break; + case OpFunctionPointerCallINTEL: *hasResult = true; *hasResultType = true; break; + case OpAsmTargetINTEL: *hasResult = true; *hasResultType = true; break; + case OpAsmINTEL: *hasResult = true; *hasResultType = true; break; + case OpAsmCallINTEL: *hasResult = true; *hasResultType = true; break; + case OpAtomicFMinEXT: *hasResult = true; *hasResultType = true; break; + case OpAtomicFMaxEXT: *hasResult = true; *hasResultType = true; break; + case OpAssumeTrueKHR: *hasResult = false; *hasResultType = false; break; + case OpExpectKHR: *hasResult = true; *hasResultType = true; break; + case OpDecorateString: *hasResult = false; *hasResultType = false; break; + case OpMemberDecorateString: *hasResult = false; *hasResultType = false; break; + case OpVmeImageINTEL: *hasResult = true; *hasResultType = true; break; + case OpTypeVmeImageINTEL: *hasResult = true; *hasResultType = false; break; + case OpTypeAvcImePayloadINTEL: *hasResult = true; *hasResultType = false; break; + case OpTypeAvcRefPayloadINTEL: *hasResult = true; *hasResultType = false; break; + case OpTypeAvcSicPayloadINTEL: *hasResult = true; *hasResultType = false; break; + case OpTypeAvcMcePayloadINTEL: *hasResult = true; *hasResultType = false; break; + case OpTypeAvcMceResultINTEL: *hasResult = true; *hasResultType = false; break; + case OpTypeAvcImeResultINTEL: *hasResult = true; *hasResultType = false; break; + case OpTypeAvcImeResultSingleReferenceStreamoutINTEL: *hasResult = true; *hasResultType = false; break; + case OpTypeAvcImeResultDualReferenceStreamoutINTEL: *hasResult = true; *hasResultType = false; break; + case OpTypeAvcImeSingleReferenceStreaminINTEL: *hasResult = true; *hasResultType = false; break; + case OpTypeAvcImeDualReferenceStreaminINTEL: *hasResult = true; *hasResultType = false; break; + case OpTypeAvcRefResultINTEL: *hasResult = true; *hasResultType = false; break; + case OpTypeAvcSicResultINTEL: *hasResult = true; *hasResultType = false; break; + case OpSubgroupAvcMceGetDefaultInterBaseMultiReferencePenaltyINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceSetInterBaseMultiReferencePenaltyINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceGetDefaultInterShapePenaltyINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceSetInterShapePenaltyINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceGetDefaultInterDirectionPenaltyINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceSetInterDirectionPenaltyINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceGetDefaultIntraLumaShapePenaltyINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceGetDefaultInterMotionVectorCostTableINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceGetDefaultHighPenaltyCostTableINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceGetDefaultMediumPenaltyCostTableINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceGetDefaultLowPenaltyCostTableINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceSetMotionVectorCostFunctionINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceGetDefaultIntraLumaModePenaltyINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceGetDefaultNonDcLumaIntraPenaltyINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceGetDefaultIntraChromaModeBasePenaltyINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceSetAcOnlyHaarINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceSetSourceInterlacedFieldPolarityINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceSetSingleReferenceInterlacedFieldPolarityINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceSetDualReferenceInterlacedFieldPolaritiesINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceConvertToImePayloadINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceConvertToImeResultINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceConvertToRefPayloadINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceConvertToRefResultINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceConvertToSicPayloadINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceConvertToSicResultINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceGetMotionVectorsINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceGetInterDistortionsINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceGetBestInterDistortionsINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceGetInterMajorShapeINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceGetInterMinorShapeINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceGetInterDirectionsINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceGetInterMotionVectorCountINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceGetInterReferenceIdsINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceGetInterReferenceInterlacedFieldPolaritiesINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeInitializeINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeSetSingleReferenceINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeSetDualReferenceINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeRefWindowSizeINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeAdjustRefOffsetINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeConvertToMcePayloadINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeSetMaxMotionVectorCountINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeSetUnidirectionalMixDisableINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeSetEarlySearchTerminationThresholdINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeSetWeightedSadINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeEvaluateWithSingleReferenceINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeEvaluateWithDualReferenceINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeEvaluateWithSingleReferenceStreaminINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeEvaluateWithDualReferenceStreaminINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeEvaluateWithSingleReferenceStreamoutINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeEvaluateWithDualReferenceStreamoutINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeEvaluateWithSingleReferenceStreaminoutINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeEvaluateWithDualReferenceStreaminoutINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeConvertToMceResultINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeGetSingleReferenceStreaminINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeGetDualReferenceStreaminINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeStripSingleReferenceStreamoutINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeStripDualReferenceStreamoutINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeMotionVectorsINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeDistortionsINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeReferenceIdsINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeMotionVectorsINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeDistortionsINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeReferenceIdsINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeGetBorderReachedINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeGetTruncatedSearchIndicationINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeGetUnidirectionalEarlySearchTerminationINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeGetWeightingPatternMinimumMotionVectorINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeGetWeightingPatternMinimumDistortionINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcFmeInitializeINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcBmeInitializeINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcRefConvertToMcePayloadINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcRefSetBidirectionalMixDisableINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcRefSetBilinearFilterEnableINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcRefEvaluateWithSingleReferenceINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcRefEvaluateWithDualReferenceINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcRefEvaluateWithMultiReferenceINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcRefEvaluateWithMultiReferenceInterlacedINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcRefConvertToMceResultINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicInitializeINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicConfigureSkcINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicConfigureIpeLumaINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicConfigureIpeLumaChromaINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicGetMotionVectorMaskINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicConvertToMcePayloadINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicSetIntraLumaShapePenaltyINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicSetIntraLumaModeCostFunctionINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicSetIntraChromaModeCostFunctionINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicSetBilinearFilterEnableINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicSetSkcForwardTransformEnableINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicSetBlockBasedRawSkipSadINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicEvaluateIpeINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicEvaluateWithSingleReferenceINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicEvaluateWithDualReferenceINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicEvaluateWithMultiReferenceINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicEvaluateWithMultiReferenceInterlacedINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicConvertToMceResultINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicGetIpeLumaShapeINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicGetBestIpeLumaDistortionINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicGetBestIpeChromaDistortionINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicGetPackedIpeLumaModesINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicGetIpeChromaModeINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicGetPackedSkcLumaCountThresholdINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicGetPackedSkcLumaSumThresholdINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicGetInterRawSadsINTEL: *hasResult = true; *hasResultType = true; break; + case OpVariableLengthArrayINTEL: *hasResult = true; *hasResultType = true; break; + case OpSaveMemoryINTEL: *hasResult = true; *hasResultType = true; break; + case OpRestoreMemoryINTEL: *hasResult = false; *hasResultType = false; break; + case OpArbitraryFloatSinCosPiINTEL: *hasResult = true; *hasResultType = true; break; + case OpArbitraryFloatCastINTEL: *hasResult = true; *hasResultType = true; break; + case OpArbitraryFloatCastFromIntINTEL: *hasResult = true; *hasResultType = true; break; + case OpArbitraryFloatCastToIntINTEL: *hasResult = true; *hasResultType = true; break; + case OpArbitraryFloatAddINTEL: *hasResult = true; *hasResultType = true; break; + case OpArbitraryFloatSubINTEL: *hasResult = true; *hasResultType = true; break; + case OpArbitraryFloatMulINTEL: *hasResult = true; *hasResultType = true; break; + case OpArbitraryFloatDivINTEL: *hasResult = true; *hasResultType = true; break; + case OpArbitraryFloatGTINTEL: *hasResult = true; *hasResultType = true; break; + case OpArbitraryFloatGEINTEL: *hasResult = true; *hasResultType = true; break; + case OpArbitraryFloatLTINTEL: *hasResult = true; *hasResultType = true; break; + case OpArbitraryFloatLEINTEL: *hasResult = true; *hasResultType = true; break; + case OpArbitraryFloatEQINTEL: *hasResult = true; *hasResultType = true; break; + case OpArbitraryFloatRecipINTEL: *hasResult = true; *hasResultType = true; break; + case OpArbitraryFloatRSqrtINTEL: *hasResult = true; *hasResultType = true; break; + case OpArbitraryFloatCbrtINTEL: *hasResult = true; *hasResultType = true; break; + case OpArbitraryFloatHypotINTEL: *hasResult = true; *hasResultType = true; break; + case OpArbitraryFloatSqrtINTEL: *hasResult = true; *hasResultType = true; break; + case OpArbitraryFloatLogINTEL: *hasResult = true; *hasResultType = true; break; + case OpArbitraryFloatLog2INTEL: *hasResult = true; *hasResultType = true; break; + case OpArbitraryFloatLog10INTEL: *hasResult = true; *hasResultType = true; break; + case OpArbitraryFloatLog1pINTEL: *hasResult = true; *hasResultType = true; break; + case OpArbitraryFloatExpINTEL: *hasResult = true; *hasResultType = true; break; + case OpArbitraryFloatExp2INTEL: *hasResult = true; *hasResultType = true; break; + case OpArbitraryFloatExp10INTEL: *hasResult = true; *hasResultType = true; break; + case OpArbitraryFloatExpm1INTEL: *hasResult = true; *hasResultType = true; break; + case OpArbitraryFloatSinINTEL: *hasResult = true; *hasResultType = true; break; + case OpArbitraryFloatCosINTEL: *hasResult = true; *hasResultType = true; break; + case OpArbitraryFloatSinCosINTEL: *hasResult = true; *hasResultType = true; break; + case OpArbitraryFloatSinPiINTEL: *hasResult = true; *hasResultType = true; break; + case OpArbitraryFloatCosPiINTEL: *hasResult = true; *hasResultType = true; break; + case OpArbitraryFloatASinINTEL: *hasResult = true; *hasResultType = true; break; + case OpArbitraryFloatASinPiINTEL: *hasResult = true; *hasResultType = true; break; + case OpArbitraryFloatACosINTEL: *hasResult = true; *hasResultType = true; break; + case OpArbitraryFloatACosPiINTEL: *hasResult = true; *hasResultType = true; break; + case OpArbitraryFloatATanINTEL: *hasResult = true; *hasResultType = true; break; + case OpArbitraryFloatATanPiINTEL: *hasResult = true; *hasResultType = true; break; + case OpArbitraryFloatATan2INTEL: *hasResult = true; *hasResultType = true; break; + case OpArbitraryFloatPowINTEL: *hasResult = true; *hasResultType = true; break; + case OpArbitraryFloatPowRINTEL: *hasResult = true; *hasResultType = true; break; + case OpArbitraryFloatPowNINTEL: *hasResult = true; *hasResultType = true; break; + case OpLoopControlINTEL: *hasResult = false; *hasResultType = false; break; + case OpAliasDomainDeclINTEL: *hasResult = true; *hasResultType = false; break; + case OpAliasScopeDeclINTEL: *hasResult = true; *hasResultType = false; break; + case OpAliasScopeListDeclINTEL: *hasResult = true; *hasResultType = false; break; + case OpFixedSqrtINTEL: *hasResult = true; *hasResultType = true; break; + case OpFixedRecipINTEL: *hasResult = true; *hasResultType = true; break; + case OpFixedRsqrtINTEL: *hasResult = true; *hasResultType = true; break; + case OpFixedSinINTEL: *hasResult = true; *hasResultType = true; break; + case OpFixedCosINTEL: *hasResult = true; *hasResultType = true; break; + case OpFixedSinCosINTEL: *hasResult = true; *hasResultType = true; break; + case OpFixedSinPiINTEL: *hasResult = true; *hasResultType = true; break; + case OpFixedCosPiINTEL: *hasResult = true; *hasResultType = true; break; + case OpFixedSinCosPiINTEL: *hasResult = true; *hasResultType = true; break; + case OpFixedLogINTEL: *hasResult = true; *hasResultType = true; break; + case OpFixedExpINTEL: *hasResult = true; *hasResultType = true; break; + case OpPtrCastToCrossWorkgroupINTEL: *hasResult = true; *hasResultType = true; break; + case OpCrossWorkgroupCastToPtrINTEL: *hasResult = true; *hasResultType = true; break; + case OpReadPipeBlockingINTEL: *hasResult = true; *hasResultType = true; break; + case OpWritePipeBlockingINTEL: *hasResult = true; *hasResultType = true; break; + case OpFPGARegINTEL: *hasResult = true; *hasResultType = true; break; + case OpRayQueryGetRayTMinKHR: *hasResult = true; *hasResultType = true; break; + case OpRayQueryGetRayFlagsKHR: *hasResult = true; *hasResultType = true; break; + case OpRayQueryGetIntersectionTKHR: *hasResult = true; *hasResultType = true; break; + case OpRayQueryGetIntersectionInstanceCustomIndexKHR: *hasResult = true; *hasResultType = true; break; + case OpRayQueryGetIntersectionInstanceIdKHR: *hasResult = true; *hasResultType = true; break; + case OpRayQueryGetIntersectionInstanceShaderBindingTableRecordOffsetKHR: *hasResult = true; *hasResultType = true; break; + case OpRayQueryGetIntersectionGeometryIndexKHR: *hasResult = true; *hasResultType = true; break; + case OpRayQueryGetIntersectionPrimitiveIndexKHR: *hasResult = true; *hasResultType = true; break; + case OpRayQueryGetIntersectionBarycentricsKHR: *hasResult = true; *hasResultType = true; break; + case OpRayQueryGetIntersectionFrontFaceKHR: *hasResult = true; *hasResultType = true; break; + case OpRayQueryGetIntersectionCandidateAABBOpaqueKHR: *hasResult = true; *hasResultType = true; break; + case OpRayQueryGetIntersectionObjectRayDirectionKHR: *hasResult = true; *hasResultType = true; break; + case OpRayQueryGetIntersectionObjectRayOriginKHR: *hasResult = true; *hasResultType = true; break; + case OpRayQueryGetWorldRayDirectionKHR: *hasResult = true; *hasResultType = true; break; + case OpRayQueryGetWorldRayOriginKHR: *hasResult = true; *hasResultType = true; break; + case OpRayQueryGetIntersectionObjectToWorldKHR: *hasResult = true; *hasResultType = true; break; + case OpRayQueryGetIntersectionWorldToObjectKHR: *hasResult = true; *hasResultType = true; break; + case OpAtomicFAddEXT: *hasResult = true; *hasResultType = true; break; + case OpTypeBufferSurfaceINTEL: *hasResult = true; *hasResultType = false; break; + case OpTypeStructContinuedINTEL: *hasResult = false; *hasResultType = false; break; + case OpConstantCompositeContinuedINTEL: *hasResult = false; *hasResultType = false; break; + case OpSpecConstantCompositeContinuedINTEL: *hasResult = false; *hasResultType = false; break; + case OpControlBarrierArriveINTEL: *hasResult = false; *hasResultType = false; break; + case OpControlBarrierWaitINTEL: *hasResult = false; *hasResultType = false; break; + case OpGroupIMulKHR: *hasResult = true; *hasResultType = true; break; + case OpGroupFMulKHR: *hasResult = true; *hasResultType = true; break; + case OpGroupBitwiseAndKHR: *hasResult = true; *hasResultType = true; break; + case OpGroupBitwiseOrKHR: *hasResult = true; *hasResultType = true; break; + case OpGroupBitwiseXorKHR: *hasResult = true; *hasResultType = true; break; + case OpGroupLogicalAndKHR: *hasResult = true; *hasResultType = true; break; + case OpGroupLogicalOrKHR: *hasResult = true; *hasResultType = true; break; + case OpGroupLogicalXorKHR: *hasResult = true; *hasResultType = true; break; + } +} +#endif /* SPV_ENABLE_UTILITY_CODE */ + +// Overload operator| for mask bit combining + +inline ImageOperandsMask operator|(ImageOperandsMask a, ImageOperandsMask b) { return ImageOperandsMask(unsigned(a) | unsigned(b)); } +inline FPFastMathModeMask operator|(FPFastMathModeMask a, FPFastMathModeMask b) { return FPFastMathModeMask(unsigned(a) | unsigned(b)); } +inline SelectionControlMask operator|(SelectionControlMask a, SelectionControlMask b) { return SelectionControlMask(unsigned(a) | unsigned(b)); } +inline LoopControlMask operator|(LoopControlMask a, LoopControlMask b) { return LoopControlMask(unsigned(a) | unsigned(b)); } +inline FunctionControlMask operator|(FunctionControlMask a, FunctionControlMask b) { return FunctionControlMask(unsigned(a) | unsigned(b)); } +inline MemorySemanticsMask operator|(MemorySemanticsMask a, MemorySemanticsMask b) { return MemorySemanticsMask(unsigned(a) | unsigned(b)); } +inline MemoryAccessMask operator|(MemoryAccessMask a, MemoryAccessMask b) { return MemoryAccessMask(unsigned(a) | unsigned(b)); } +inline KernelProfilingInfoMask operator|(KernelProfilingInfoMask a, KernelProfilingInfoMask b) { return KernelProfilingInfoMask(unsigned(a) | unsigned(b)); } +inline RayFlagsMask operator|(RayFlagsMask a, RayFlagsMask b) { return RayFlagsMask(unsigned(a) | unsigned(b)); } +inline FragmentShadingRateMask operator|(FragmentShadingRateMask a, FragmentShadingRateMask b) { return FragmentShadingRateMask(unsigned(a) | unsigned(b)); } + +} // end namespace spv + +#endif // #ifndef spirv_HPP + diff --git a/local_packages/include/spirv_cross/spirv_cfg.hpp b/local_packages/include/spirv_cross/spirv_cfg.hpp new file mode 100644 index 0000000..1d85fe0 --- /dev/null +++ b/local_packages/include/spirv_cross/spirv_cfg.hpp @@ -0,0 +1,168 @@ +/* + * Copyright 2016-2021 Arm Limited + * SPDX-License-Identifier: Apache-2.0 OR MIT + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * At your option, you may choose to accept this material under either: + * 1. The Apache License, Version 2.0, found at , or + * 2. The MIT License, found at . + */ + +#ifndef SPIRV_CROSS_CFG_HPP +#define SPIRV_CROSS_CFG_HPP + +#include "spirv_common.hpp" +#include + +namespace SPIRV_CROSS_NAMESPACE +{ +class Compiler; +class CFG +{ +public: + CFG(Compiler &compiler, const SPIRFunction &function); + + Compiler &get_compiler() + { + return compiler; + } + + const Compiler &get_compiler() const + { + return compiler; + } + + const SPIRFunction &get_function() const + { + return func; + } + + uint32_t get_immediate_dominator(uint32_t block) const + { + auto itr = immediate_dominators.find(block); + if (itr != std::end(immediate_dominators)) + return itr->second; + else + return 0; + } + + bool is_reachable(uint32_t block) const + { + return visit_order.count(block) != 0; + } + + uint32_t get_visit_order(uint32_t block) const + { + auto itr = visit_order.find(block); + assert(itr != std::end(visit_order)); + int v = itr->second.get(); + assert(v > 0); + return uint32_t(v); + } + + uint32_t find_common_dominator(uint32_t a, uint32_t b) const; + + const SmallVector &get_preceding_edges(uint32_t block) const + { + auto itr = preceding_edges.find(block); + if (itr != std::end(preceding_edges)) + return itr->second; + else + return empty_vector; + } + + const SmallVector &get_succeeding_edges(uint32_t block) const + { + auto itr = succeeding_edges.find(block); + if (itr != std::end(succeeding_edges)) + return itr->second; + else + return empty_vector; + } + + template + void walk_from(std::unordered_set &seen_blocks, uint32_t block, const Op &op) const + { + if (seen_blocks.count(block)) + return; + seen_blocks.insert(block); + + if (op(block)) + { + for (auto b : get_succeeding_edges(block)) + walk_from(seen_blocks, b, op); + } + } + + uint32_t find_loop_dominator(uint32_t block) const; + + bool node_terminates_control_flow_in_sub_graph(BlockID from, BlockID to) const; + +private: + struct VisitOrder + { + int &get() + { + return v; + } + + const int &get() const + { + return v; + } + + int v = -1; + }; + + Compiler &compiler; + const SPIRFunction &func; + std::unordered_map> preceding_edges; + std::unordered_map> succeeding_edges; + std::unordered_map immediate_dominators; + std::unordered_map visit_order; + SmallVector post_order; + SmallVector empty_vector; + + void add_branch(uint32_t from, uint32_t to); + void build_post_order_visit_order(); + void build_immediate_dominators(); + bool post_order_visit(uint32_t block); + uint32_t visit_count = 0; + + bool is_back_edge(uint32_t to) const; + bool has_visited_forward_edge(uint32_t to) const; +}; + +class DominatorBuilder +{ +public: + DominatorBuilder(const CFG &cfg); + + void add_block(uint32_t block); + uint32_t get_dominator() const + { + return dominator; + } + + void lift_continue_block_dominator(); + +private: + const CFG &cfg; + uint32_t dominator = 0; +}; +} // namespace SPIRV_CROSS_NAMESPACE + +#endif diff --git a/local_packages/include/spirv_cross/spirv_common.hpp b/local_packages/include/spirv_cross/spirv_common.hpp new file mode 100644 index 0000000..71b1ead --- /dev/null +++ b/local_packages/include/spirv_cross/spirv_common.hpp @@ -0,0 +1,1920 @@ +/* + * Copyright 2015-2021 Arm Limited + * SPDX-License-Identifier: Apache-2.0 OR MIT + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * At your option, you may choose to accept this material under either: + * 1. The Apache License, Version 2.0, found at , or + * 2. The MIT License, found at . + */ + +#ifndef SPIRV_CROSS_COMMON_HPP +#define SPIRV_CROSS_COMMON_HPP + +#ifndef SPV_ENABLE_UTILITY_CODE +#define SPV_ENABLE_UTILITY_CODE +#endif +#include "spirv.hpp" + +#include "spirv_cross_containers.hpp" +#include "spirv_cross_error_handling.hpp" +#include + +// A bit crude, but allows projects which embed SPIRV-Cross statically to +// effectively hide all the symbols from other projects. +// There is a case where we have: +// - Project A links against SPIRV-Cross statically. +// - Project A links against Project B statically. +// - Project B links against SPIRV-Cross statically (might be a different version). +// This leads to a conflict with extremely bizarre results. +// By overriding the namespace in one of the project builds, we can work around this. +// If SPIRV-Cross is embedded in dynamic libraries, +// prefer using -fvisibility=hidden on GCC/Clang instead. +#ifdef SPIRV_CROSS_NAMESPACE_OVERRIDE +#define SPIRV_CROSS_NAMESPACE SPIRV_CROSS_NAMESPACE_OVERRIDE +#else +#define SPIRV_CROSS_NAMESPACE spirv_cross +#endif + +namespace SPIRV_CROSS_NAMESPACE +{ +namespace inner +{ +template +void join_helper(StringStream<> &stream, T &&t) +{ + stream << std::forward(t); +} + +template +void join_helper(StringStream<> &stream, T &&t, Ts &&... ts) +{ + stream << std::forward(t); + join_helper(stream, std::forward(ts)...); +} +} // namespace inner + +class Bitset +{ +public: + Bitset() = default; + explicit inline Bitset(uint64_t lower_) + : lower(lower_) + { + } + + inline bool get(uint32_t bit) const + { + if (bit < 64) + return (lower & (1ull << bit)) != 0; + else + return higher.count(bit) != 0; + } + + inline void set(uint32_t bit) + { + if (bit < 64) + lower |= 1ull << bit; + else + higher.insert(bit); + } + + inline void clear(uint32_t bit) + { + if (bit < 64) + lower &= ~(1ull << bit); + else + higher.erase(bit); + } + + inline uint64_t get_lower() const + { + return lower; + } + + inline void reset() + { + lower = 0; + higher.clear(); + } + + inline void merge_and(const Bitset &other) + { + lower &= other.lower; + std::unordered_set tmp_set; + for (auto &v : higher) + if (other.higher.count(v) != 0) + tmp_set.insert(v); + higher = std::move(tmp_set); + } + + inline void merge_or(const Bitset &other) + { + lower |= other.lower; + for (auto &v : other.higher) + higher.insert(v); + } + + inline bool operator==(const Bitset &other) const + { + if (lower != other.lower) + return false; + + if (higher.size() != other.higher.size()) + return false; + + for (auto &v : higher) + if (other.higher.count(v) == 0) + return false; + + return true; + } + + inline bool operator!=(const Bitset &other) const + { + return !(*this == other); + } + + template + void for_each_bit(const Op &op) const + { + // TODO: Add ctz-based iteration. + for (uint32_t i = 0; i < 64; i++) + { + if (lower & (1ull << i)) + op(i); + } + + if (higher.empty()) + return; + + // Need to enforce an order here for reproducible results, + // but hitting this path should happen extremely rarely, so having this slow path is fine. + SmallVector bits; + bits.reserve(higher.size()); + for (auto &v : higher) + bits.push_back(v); + std::sort(std::begin(bits), std::end(bits)); + + for (auto &v : bits) + op(v); + } + + inline bool empty() const + { + return lower == 0 && higher.empty(); + } + +private: + // The most common bits to set are all lower than 64, + // so optimize for this case. Bits spilling outside 64 go into a slower data structure. + // In almost all cases, higher data structure will not be used. + uint64_t lower = 0; + std::unordered_set higher; +}; + +// Helper template to avoid lots of nasty string temporary munging. +template +std::string join(Ts &&... ts) +{ + StringStream<> stream; + inner::join_helper(stream, std::forward(ts)...); + return stream.str(); +} + +inline std::string merge(const SmallVector &list, const char *between = ", ") +{ + StringStream<> stream; + for (auto &elem : list) + { + stream << elem; + if (&elem != &list.back()) + stream << between; + } + return stream.str(); +} + +// Make sure we don't accidentally call this with float or doubles with SFINAE. +// Have to use the radix-aware overload. +template ::value, int>::type = 0> +inline std::string convert_to_string(const T &t) +{ + return std::to_string(t); +} + +static inline std::string convert_to_string(int32_t value) +{ + // INT_MIN is ... special on some backends. If we use a decimal literal, and negate it, we + // could accidentally promote the literal to long first, then negate. + // To workaround it, emit int(0x80000000) instead. + if (value == std::numeric_limits::min()) + return "int(0x80000000)"; + else + return std::to_string(value); +} + +static inline std::string convert_to_string(int64_t value, const std::string &int64_type, bool long_long_literal_suffix) +{ + // INT64_MIN is ... special on some backends. + // If we use a decimal literal, and negate it, we might overflow the representable numbers. + // To workaround it, emit int(0x80000000) instead. + if (value == std::numeric_limits::min()) + return join(int64_type, "(0x8000000000000000u", (long_long_literal_suffix ? "ll" : "l"), ")"); + else + return std::to_string(value) + (long_long_literal_suffix ? "ll" : "l"); +} + +// Allow implementations to set a convenient standard precision +#ifndef SPIRV_CROSS_FLT_FMT +#define SPIRV_CROSS_FLT_FMT "%.32g" +#endif + +// Disable sprintf and strcat warnings. +// We cannot rely on snprintf and family existing because, ..., MSVC. +#if defined(__clang__) || defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wdeprecated-declarations" +#elif defined(_MSC_VER) +#pragma warning(push) +#pragma warning(disable : 4996) +#endif + +static inline void fixup_radix_point(char *str, char radix_point) +{ + // Setting locales is a very risky business in multi-threaded program, + // so just fixup locales instead. We only need to care about the radix point. + if (radix_point != '.') + { + while (*str != '\0') + { + if (*str == radix_point) + *str = '.'; + str++; + } + } +} + +inline std::string convert_to_string(float t, char locale_radix_point) +{ + // std::to_string for floating point values is broken. + // Fallback to something more sane. + char buf[64]; + sprintf(buf, SPIRV_CROSS_FLT_FMT, t); + fixup_radix_point(buf, locale_radix_point); + + // Ensure that the literal is float. + if (!strchr(buf, '.') && !strchr(buf, 'e')) + strcat(buf, ".0"); + return buf; +} + +inline std::string convert_to_string(double t, char locale_radix_point) +{ + // std::to_string for floating point values is broken. + // Fallback to something more sane. + char buf[64]; + sprintf(buf, SPIRV_CROSS_FLT_FMT, t); + fixup_radix_point(buf, locale_radix_point); + + // Ensure that the literal is float. + if (!strchr(buf, '.') && !strchr(buf, 'e')) + strcat(buf, ".0"); + return buf; +} + +template +struct ValueSaver +{ + explicit ValueSaver(T ¤t_) + : current(current_) + , saved(current_) + { + } + + void release() + { + current = saved; + } + + ~ValueSaver() + { + release(); + } + + T ¤t; + T saved; +}; + +#if defined(__clang__) || defined(__GNUC__) +#pragma GCC diagnostic pop +#elif defined(_MSC_VER) +#pragma warning(pop) +#endif + +struct Instruction +{ + uint16_t op = 0; + uint16_t count = 0; + // If offset is 0 (not a valid offset into the instruction stream), + // we have an instruction stream which is embedded in the object. + uint32_t offset = 0; + uint32_t length = 0; + + inline bool is_embedded() const + { + return offset == 0; + } +}; + +struct EmbeddedInstruction : Instruction +{ + SmallVector ops; +}; + +enum Types +{ + TypeNone, + TypeType, + TypeVariable, + TypeConstant, + TypeFunction, + TypeFunctionPrototype, + TypeBlock, + TypeExtension, + TypeExpression, + TypeConstantOp, + TypeCombinedImageSampler, + TypeAccessChain, + TypeUndef, + TypeString, + TypeCount +}; + +template +class TypedID; + +template <> +class TypedID +{ +public: + TypedID() = default; + TypedID(uint32_t id_) + : id(id_) + { + } + + template + TypedID(const TypedID &other) + { + *this = other; + } + + template + TypedID &operator=(const TypedID &other) + { + id = uint32_t(other); + return *this; + } + + // Implicit conversion to u32 is desired here. + // As long as we block implicit conversion between TypedID and TypedID we're good. + operator uint32_t() const + { + return id; + } + + template + operator TypedID() const + { + return TypedID(*this); + } + +private: + uint32_t id = 0; +}; + +template +class TypedID +{ +public: + TypedID() = default; + TypedID(uint32_t id_) + : id(id_) + { + } + + explicit TypedID(const TypedID &other) + : id(uint32_t(other)) + { + } + + operator uint32_t() const + { + return id; + } + +private: + uint32_t id = 0; +}; + +using VariableID = TypedID; +using TypeID = TypedID; +using ConstantID = TypedID; +using FunctionID = TypedID; +using BlockID = TypedID; +using ID = TypedID; + +// Helper for Variant interface. +struct IVariant +{ + virtual ~IVariant() = default; + virtual IVariant *clone(ObjectPoolBase *pool) = 0; + ID self = 0; + +protected: + IVariant() = default; + IVariant(const IVariant&) = default; + IVariant &operator=(const IVariant&) = default; +}; + +#define SPIRV_CROSS_DECLARE_CLONE(T) \ + IVariant *clone(ObjectPoolBase *pool) override \ + { \ + return static_cast *>(pool)->allocate(*this); \ + } + +struct SPIRUndef : IVariant +{ + enum + { + type = TypeUndef + }; + + explicit SPIRUndef(TypeID basetype_) + : basetype(basetype_) + { + } + TypeID basetype; + + SPIRV_CROSS_DECLARE_CLONE(SPIRUndef) +}; + +struct SPIRString : IVariant +{ + enum + { + type = TypeString + }; + + explicit SPIRString(std::string str_) + : str(std::move(str_)) + { + } + + std::string str; + + SPIRV_CROSS_DECLARE_CLONE(SPIRString) +}; + +// This type is only used by backends which need to access the combined image and sampler IDs separately after +// the OpSampledImage opcode. +struct SPIRCombinedImageSampler : IVariant +{ + enum + { + type = TypeCombinedImageSampler + }; + SPIRCombinedImageSampler(TypeID type_, VariableID image_, VariableID sampler_) + : combined_type(type_) + , image(image_) + , sampler(sampler_) + { + } + TypeID combined_type; + VariableID image; + VariableID sampler; + + SPIRV_CROSS_DECLARE_CLONE(SPIRCombinedImageSampler) +}; + +struct SPIRConstantOp : IVariant +{ + enum + { + type = TypeConstantOp + }; + + SPIRConstantOp(TypeID result_type, spv::Op op, const uint32_t *args, uint32_t length) + : opcode(op) + , basetype(result_type) + { + arguments.reserve(length); + for (uint32_t i = 0; i < length; i++) + arguments.push_back(args[i]); + } + + spv::Op opcode; + SmallVector arguments; + TypeID basetype; + + SPIRV_CROSS_DECLARE_CLONE(SPIRConstantOp) +}; + +struct SPIRType : IVariant +{ + enum + { + type = TypeType + }; + + enum BaseType + { + Unknown, + Void, + Boolean, + SByte, + UByte, + Short, + UShort, + Int, + UInt, + Int64, + UInt64, + AtomicCounter, + Half, + Float, + Double, + Struct, + Image, + SampledImage, + Sampler, + AccelerationStructure, + RayQuery, + + // Keep internal types at the end. + ControlPointArray, + Interpolant, + Char + }; + + // Scalar/vector/matrix support. + BaseType basetype = Unknown; + uint32_t width = 0; + uint32_t vecsize = 1; + uint32_t columns = 1; + + // Arrays, support array of arrays by having a vector of array sizes. + SmallVector array; + + // Array elements can be either specialization constants or specialization ops. + // This array determines how to interpret the array size. + // If an element is true, the element is a literal, + // otherwise, it's an expression, which must be resolved on demand. + // The actual size is not really known until runtime. + SmallVector array_size_literal; + + // Pointers + // Keep track of how many pointer layers we have. + uint32_t pointer_depth = 0; + bool pointer = false; + bool forward_pointer = false; + + spv::StorageClass storage = spv::StorageClassGeneric; + + SmallVector member_types; + + // If member order has been rewritten to handle certain scenarios with Offset, + // allow codegen to rewrite the index. + SmallVector member_type_index_redirection; + + struct ImageType + { + TypeID type; + spv::Dim dim; + bool depth; + bool arrayed; + bool ms; + uint32_t sampled; + spv::ImageFormat format; + spv::AccessQualifier access; + } image; + + // Structs can be declared multiple times if they are used as part of interface blocks. + // We want to detect this so that we only emit the struct definition once. + // Since we cannot rely on OpName to be equal, we need to figure out aliases. + TypeID type_alias = 0; + + // Denotes the type which this type is based on. + // Allows the backend to traverse how a complex type is built up during access chains. + TypeID parent_type = 0; + + // Used in backends to avoid emitting members with conflicting names. + std::unordered_set member_name_cache; + + SPIRV_CROSS_DECLARE_CLONE(SPIRType) +}; + +struct SPIRExtension : IVariant +{ + enum + { + type = TypeExtension + }; + + enum Extension + { + Unsupported, + GLSL, + SPV_debug_info, + SPV_AMD_shader_ballot, + SPV_AMD_shader_explicit_vertex_parameter, + SPV_AMD_shader_trinary_minmax, + SPV_AMD_gcn_shader, + NonSemanticDebugPrintf, + NonSemanticShaderDebugInfo + }; + + explicit SPIRExtension(Extension ext_) + : ext(ext_) + { + } + + Extension ext; + SPIRV_CROSS_DECLARE_CLONE(SPIRExtension) +}; + +// SPIREntryPoint is not a variant since its IDs are used to decorate OpFunction, +// so in order to avoid conflicts, we can't stick them in the ids array. +struct SPIREntryPoint +{ + SPIREntryPoint(FunctionID self_, spv::ExecutionModel execution_model, const std::string &entry_name) + : self(self_) + , name(entry_name) + , orig_name(entry_name) + , model(execution_model) + { + } + SPIREntryPoint() = default; + + FunctionID self = 0; + std::string name; + std::string orig_name; + SmallVector interface_variables; + + Bitset flags; + struct WorkgroupSize + { + uint32_t x = 0, y = 0, z = 0; + uint32_t id_x = 0, id_y = 0, id_z = 0; + uint32_t constant = 0; // Workgroup size can be expressed as a constant/spec-constant instead. + } workgroup_size; + uint32_t invocations = 0; + uint32_t output_vertices = 0; + uint32_t output_primitives = 0; + spv::ExecutionModel model = spv::ExecutionModelMax; + bool geometry_passthrough = false; +}; + +struct SPIRExpression : IVariant +{ + enum + { + type = TypeExpression + }; + + // Only created by the backend target to avoid creating tons of temporaries. + SPIRExpression(std::string expr, TypeID expression_type_, bool immutable_) + : expression(std::move(expr)) + , expression_type(expression_type_) + , immutable(immutable_) + { + } + + // If non-zero, prepend expression with to_expression(base_expression). + // Used in amortizing multiple calls to to_expression() + // where in certain cases that would quickly force a temporary when not needed. + ID base_expression = 0; + + std::string expression; + TypeID expression_type = 0; + + // If this expression is a forwarded load, + // allow us to reference the original variable. + ID loaded_from = 0; + + // If this expression will never change, we can avoid lots of temporaries + // in high level source. + // An expression being immutable can be speculative, + // it is assumed that this is true almost always. + bool immutable = false; + + // Before use, this expression must be transposed. + // This is needed for targets which don't support row_major layouts. + bool need_transpose = false; + + // Whether or not this is an access chain expression. + bool access_chain = false; + + // A list of expressions which this expression depends on. + SmallVector expression_dependencies; + + // By reading this expression, we implicitly read these expressions as well. + // Used by access chain Store and Load since we read multiple expressions in this case. + SmallVector implied_read_expressions; + + // The expression was emitted at a certain scope. Lets us track when an expression read means multiple reads. + uint32_t emitted_loop_level = 0; + + SPIRV_CROSS_DECLARE_CLONE(SPIRExpression) +}; + +struct SPIRFunctionPrototype : IVariant +{ + enum + { + type = TypeFunctionPrototype + }; + + explicit SPIRFunctionPrototype(TypeID return_type_) + : return_type(return_type_) + { + } + + TypeID return_type; + SmallVector parameter_types; + + SPIRV_CROSS_DECLARE_CLONE(SPIRFunctionPrototype) +}; + +struct SPIRBlock : IVariant +{ + enum + { + type = TypeBlock + }; + + enum Terminator + { + Unknown, + Direct, // Emit next block directly without a particular condition. + + Select, // Block ends with an if/else block. + MultiSelect, // Block ends with switch statement. + + Return, // Block ends with return. + Unreachable, // Noop + Kill, // Discard + IgnoreIntersection, // Ray Tracing + TerminateRay, // Ray Tracing + EmitMeshTasks // Mesh shaders + }; + + enum Merge + { + MergeNone, + MergeLoop, + MergeSelection + }; + + enum Hints + { + HintNone, + HintUnroll, + HintDontUnroll, + HintFlatten, + HintDontFlatten + }; + + enum Method + { + MergeToSelectForLoop, + MergeToDirectForLoop, + MergeToSelectContinueForLoop + }; + + enum ContinueBlockType + { + ContinueNone, + + // Continue block is branchless and has at least one instruction. + ForLoop, + + // Noop continue block. + WhileLoop, + + // Continue block is conditional. + DoWhileLoop, + + // Highly unlikely that anything will use this, + // since it is really awkward/impossible to express in GLSL. + ComplexLoop + }; + + enum : uint32_t + { + NoDominator = 0xffffffffu + }; + + Terminator terminator = Unknown; + Merge merge = MergeNone; + Hints hint = HintNone; + BlockID next_block = 0; + BlockID merge_block = 0; + BlockID continue_block = 0; + + ID return_value = 0; // If 0, return nothing (void). + ID condition = 0; + BlockID true_block = 0; + BlockID false_block = 0; + BlockID default_block = 0; + + // If terminator is EmitMeshTasksEXT. + struct + { + ID groups[3]; + ID payload; + } mesh = {}; + + SmallVector ops; + + struct Phi + { + ID local_variable; // flush local variable ... + BlockID parent; // If we're in from_block and want to branch into this block ... + VariableID function_variable; // to this function-global "phi" variable first. + }; + + // Before entering this block flush out local variables to magical "phi" variables. + SmallVector phi_variables; + + // Declare these temporaries before beginning the block. + // Used for handling complex continue blocks which have side effects. + SmallVector> declare_temporary; + + // Declare these temporaries, but only conditionally if this block turns out to be + // a complex loop header. + SmallVector> potential_declare_temporary; + + struct Case + { + uint64_t value; + BlockID block; + }; + SmallVector cases_32bit; + SmallVector cases_64bit; + + // If we have tried to optimize code for this block but failed, + // keep track of this. + bool disable_block_optimization = false; + + // If the continue block is complex, fallback to "dumb" for loops. + bool complex_continue = false; + + // Do we need a ladder variable to defer breaking out of a loop construct after a switch block? + bool need_ladder_break = false; + + // If marked, we have explicitly handled Phi from this block, so skip any flushes related to that on a branch. + // Used to handle an edge case with switch and case-label fallthrough where fall-through writes to Phi. + BlockID ignore_phi_from_block = 0; + + // The dominating block which this block might be within. + // Used in continue; blocks to determine if we really need to write continue. + BlockID loop_dominator = 0; + + // All access to these variables are dominated by this block, + // so before branching anywhere we need to make sure that we declare these variables. + SmallVector dominated_variables; + + // These are variables which should be declared in a for loop header, if we + // fail to use a classic for-loop, + // we remove these variables, and fall back to regular variables outside the loop. + SmallVector loop_variables; + + // Some expressions are control-flow dependent, i.e. any instruction which relies on derivatives or + // sub-group-like operations. + // Make sure that we only use these expressions in the original block. + SmallVector invalidate_expressions; + + SPIRV_CROSS_DECLARE_CLONE(SPIRBlock) +}; + +struct SPIRFunction : IVariant +{ + enum + { + type = TypeFunction + }; + + SPIRFunction(TypeID return_type_, TypeID function_type_) + : return_type(return_type_) + , function_type(function_type_) + { + } + + struct Parameter + { + TypeID type; + ID id; + uint32_t read_count; + uint32_t write_count; + + // Set to true if this parameter aliases a global variable, + // used mostly in Metal where global variables + // have to be passed down to functions as regular arguments. + // However, for this kind of variable, we should not care about + // read and write counts as access to the function arguments + // is not local to the function in question. + bool alias_global_variable; + }; + + // When calling a function, and we're remapping separate image samplers, + // resolve these arguments into combined image samplers and pass them + // as additional arguments in this order. + // It gets more complicated as functions can pull in their own globals + // and combine them with parameters, + // so we need to distinguish if something is local parameter index + // or a global ID. + struct CombinedImageSamplerParameter + { + VariableID id; + VariableID image_id; + VariableID sampler_id; + bool global_image; + bool global_sampler; + bool depth; + }; + + TypeID return_type; + TypeID function_type; + SmallVector arguments; + + // Can be used by backends to add magic arguments. + // Currently used by combined image/sampler implementation. + + SmallVector shadow_arguments; + SmallVector local_variables; + BlockID entry_block = 0; + SmallVector blocks; + SmallVector combined_parameters; + + struct EntryLine + { + uint32_t file_id = 0; + uint32_t line_literal = 0; + }; + EntryLine entry_line; + + void add_local_variable(VariableID id) + { + local_variables.push_back(id); + } + + void add_parameter(TypeID parameter_type, ID id, bool alias_global_variable = false) + { + // Arguments are read-only until proven otherwise. + arguments.push_back({ parameter_type, id, 0u, 0u, alias_global_variable }); + } + + // Hooks to be run when the function returns. + // Mostly used for lowering internal data structures onto flattened structures. + // Need to defer this, because they might rely on things which change during compilation. + // Intentionally not a small vector, this one is rare, and std::function can be large. + Vector> fixup_hooks_out; + + // Hooks to be run when the function begins. + // Mostly used for populating internal data structures from flattened structures. + // Need to defer this, because they might rely on things which change during compilation. + // Intentionally not a small vector, this one is rare, and std::function can be large. + Vector> fixup_hooks_in; + + // On function entry, make sure to copy a constant array into thread addr space to work around + // the case where we are passing a constant array by value to a function on backends which do not + // consider arrays value types. + SmallVector constant_arrays_needed_on_stack; + + bool active = false; + bool flush_undeclared = true; + bool do_combined_parameters = true; + + SPIRV_CROSS_DECLARE_CLONE(SPIRFunction) +}; + +struct SPIRAccessChain : IVariant +{ + enum + { + type = TypeAccessChain + }; + + SPIRAccessChain(TypeID basetype_, spv::StorageClass storage_, std::string base_, std::string dynamic_index_, + int32_t static_index_) + : basetype(basetype_) + , storage(storage_) + , base(std::move(base_)) + , dynamic_index(std::move(dynamic_index_)) + , static_index(static_index_) + { + } + + // The access chain represents an offset into a buffer. + // Some backends need more complicated handling of access chains to be able to use buffers, like HLSL + // which has no usable buffer type ala GLSL SSBOs. + // StructuredBuffer is too limited, so our only option is to deal with ByteAddressBuffer which works with raw addresses. + + TypeID basetype; + spv::StorageClass storage; + std::string base; + std::string dynamic_index; + int32_t static_index; + + VariableID loaded_from = 0; + uint32_t matrix_stride = 0; + uint32_t array_stride = 0; + bool row_major_matrix = false; + bool immutable = false; + + // By reading this expression, we implicitly read these expressions as well. + // Used by access chain Store and Load since we read multiple expressions in this case. + SmallVector implied_read_expressions; + + SPIRV_CROSS_DECLARE_CLONE(SPIRAccessChain) +}; + +struct SPIRVariable : IVariant +{ + enum + { + type = TypeVariable + }; + + SPIRVariable() = default; + SPIRVariable(TypeID basetype_, spv::StorageClass storage_, ID initializer_ = 0, VariableID basevariable_ = 0) + : basetype(basetype_) + , storage(storage_) + , initializer(initializer_) + , basevariable(basevariable_) + { + } + + TypeID basetype = 0; + spv::StorageClass storage = spv::StorageClassGeneric; + uint32_t decoration = 0; + ID initializer = 0; + VariableID basevariable = 0; + + SmallVector dereference_chain; + bool compat_builtin = false; + + // If a variable is shadowed, we only statically assign to it + // and never actually emit a statement for it. + // When we read the variable as an expression, just forward + // shadowed_id as the expression. + bool statically_assigned = false; + ID static_expression = 0; + + // Temporaries which can remain forwarded as long as this variable is not modified. + SmallVector dependees; + + bool deferred_declaration = false; + bool phi_variable = false; + + // Used to deal with Phi variable flushes. See flush_phi(). + bool allocate_temporary_copy = false; + + bool remapped_variable = false; + uint32_t remapped_components = 0; + + // The block which dominates all access to this variable. + BlockID dominator = 0; + // If true, this variable is a loop variable, when accessing the variable + // outside a loop, + // we should statically forward it. + bool loop_variable = false; + // Set to true while we're inside the for loop. + bool loop_variable_enable = false; + + SPIRFunction::Parameter *parameter = nullptr; + + SPIRV_CROSS_DECLARE_CLONE(SPIRVariable) +}; + +struct SPIRConstant : IVariant +{ + enum + { + type = TypeConstant + }; + + union Constant + { + uint32_t u32; + int32_t i32; + float f32; + + uint64_t u64; + int64_t i64; + double f64; + }; + + struct ConstantVector + { + Constant r[4]; + // If != 0, this element is a specialization constant, and we should keep track of it as such. + ID id[4]; + uint32_t vecsize = 1; + + ConstantVector() + { + memset(r, 0, sizeof(r)); + } + }; + + struct ConstantMatrix + { + ConstantVector c[4]; + // If != 0, this column is a specialization constant, and we should keep track of it as such. + ID id[4]; + uint32_t columns = 1; + }; + + static inline float f16_to_f32(uint16_t u16_value) + { + // Based on the GLM implementation. + int s = (u16_value >> 15) & 0x1; + int e = (u16_value >> 10) & 0x1f; + int m = (u16_value >> 0) & 0x3ff; + + union + { + float f32; + uint32_t u32; + } u; + + if (e == 0) + { + if (m == 0) + { + u.u32 = uint32_t(s) << 31; + return u.f32; + } + else + { + while ((m & 0x400) == 0) + { + m <<= 1; + e--; + } + + e++; + m &= ~0x400; + } + } + else if (e == 31) + { + if (m == 0) + { + u.u32 = (uint32_t(s) << 31) | 0x7f800000u; + return u.f32; + } + else + { + u.u32 = (uint32_t(s) << 31) | 0x7f800000u | (m << 13); + return u.f32; + } + } + + e += 127 - 15; + m <<= 13; + u.u32 = (uint32_t(s) << 31) | (e << 23) | m; + return u.f32; + } + + inline uint32_t specialization_constant_id(uint32_t col, uint32_t row) const + { + return m.c[col].id[row]; + } + + inline uint32_t specialization_constant_id(uint32_t col) const + { + return m.id[col]; + } + + inline uint32_t scalar(uint32_t col = 0, uint32_t row = 0) const + { + return m.c[col].r[row].u32; + } + + inline int16_t scalar_i16(uint32_t col = 0, uint32_t row = 0) const + { + return int16_t(m.c[col].r[row].u32 & 0xffffu); + } + + inline uint16_t scalar_u16(uint32_t col = 0, uint32_t row = 0) const + { + return uint16_t(m.c[col].r[row].u32 & 0xffffu); + } + + inline int8_t scalar_i8(uint32_t col = 0, uint32_t row = 0) const + { + return int8_t(m.c[col].r[row].u32 & 0xffu); + } + + inline uint8_t scalar_u8(uint32_t col = 0, uint32_t row = 0) const + { + return uint8_t(m.c[col].r[row].u32 & 0xffu); + } + + inline float scalar_f16(uint32_t col = 0, uint32_t row = 0) const + { + return f16_to_f32(scalar_u16(col, row)); + } + + inline float scalar_f32(uint32_t col = 0, uint32_t row = 0) const + { + return m.c[col].r[row].f32; + } + + inline int32_t scalar_i32(uint32_t col = 0, uint32_t row = 0) const + { + return m.c[col].r[row].i32; + } + + inline double scalar_f64(uint32_t col = 0, uint32_t row = 0) const + { + return m.c[col].r[row].f64; + } + + inline int64_t scalar_i64(uint32_t col = 0, uint32_t row = 0) const + { + return m.c[col].r[row].i64; + } + + inline uint64_t scalar_u64(uint32_t col = 0, uint32_t row = 0) const + { + return m.c[col].r[row].u64; + } + + inline const ConstantVector &vector() const + { + return m.c[0]; + } + + inline uint32_t vector_size() const + { + return m.c[0].vecsize; + } + + inline uint32_t columns() const + { + return m.columns; + } + + inline void make_null(const SPIRType &constant_type_) + { + m = {}; + m.columns = constant_type_.columns; + for (auto &c : m.c) + c.vecsize = constant_type_.vecsize; + } + + inline bool constant_is_null() const + { + if (specialization) + return false; + if (!subconstants.empty()) + return false; + + for (uint32_t col = 0; col < columns(); col++) + for (uint32_t row = 0; row < vector_size(); row++) + if (scalar_u64(col, row) != 0) + return false; + + return true; + } + + explicit SPIRConstant(uint32_t constant_type_) + : constant_type(constant_type_) + { + } + + SPIRConstant() = default; + + SPIRConstant(TypeID constant_type_, const uint32_t *elements, uint32_t num_elements, bool specialized) + : constant_type(constant_type_) + , specialization(specialized) + { + subconstants.reserve(num_elements); + for (uint32_t i = 0; i < num_elements; i++) + subconstants.push_back(elements[i]); + specialization = specialized; + } + + // Construct scalar (32-bit). + SPIRConstant(TypeID constant_type_, uint32_t v0, bool specialized) + : constant_type(constant_type_) + , specialization(specialized) + { + m.c[0].r[0].u32 = v0; + m.c[0].vecsize = 1; + m.columns = 1; + } + + // Construct scalar (64-bit). + SPIRConstant(TypeID constant_type_, uint64_t v0, bool specialized) + : constant_type(constant_type_) + , specialization(specialized) + { + m.c[0].r[0].u64 = v0; + m.c[0].vecsize = 1; + m.columns = 1; + } + + // Construct vectors and matrices. + SPIRConstant(TypeID constant_type_, const SPIRConstant *const *vector_elements, uint32_t num_elements, + bool specialized) + : constant_type(constant_type_) + , specialization(specialized) + { + bool matrix = vector_elements[0]->m.c[0].vecsize > 1; + + if (matrix) + { + m.columns = num_elements; + + for (uint32_t i = 0; i < num_elements; i++) + { + m.c[i] = vector_elements[i]->m.c[0]; + if (vector_elements[i]->specialization) + m.id[i] = vector_elements[i]->self; + } + } + else + { + m.c[0].vecsize = num_elements; + m.columns = 1; + + for (uint32_t i = 0; i < num_elements; i++) + { + m.c[0].r[i] = vector_elements[i]->m.c[0].r[0]; + if (vector_elements[i]->specialization) + m.c[0].id[i] = vector_elements[i]->self; + } + } + } + + TypeID constant_type = 0; + ConstantMatrix m; + + // If this constant is a specialization constant (i.e. created with OpSpecConstant*). + bool specialization = false; + // If this constant is used as an array length which creates specialization restrictions on some backends. + bool is_used_as_array_length = false; + + // If true, this is a LUT, and should always be declared in the outer scope. + bool is_used_as_lut = false; + + // For composites which are constant arrays, etc. + SmallVector subconstants; + + // Non-Vulkan GLSL, HLSL and sometimes MSL emits defines for each specialization constant, + // and uses them to initialize the constant. This allows the user + // to still be able to specialize the value by supplying corresponding + // preprocessor directives before compiling the shader. + std::string specialization_constant_macro_name; + + SPIRV_CROSS_DECLARE_CLONE(SPIRConstant) +}; + +// Variants have a very specific allocation scheme. +struct ObjectPoolGroup +{ + std::unique_ptr pools[TypeCount]; +}; + +class Variant +{ +public: + explicit Variant(ObjectPoolGroup *group_) + : group(group_) + { + } + + ~Variant() + { + if (holder) + group->pools[type]->deallocate_opaque(holder); + } + + // Marking custom move constructor as noexcept is important. + Variant(Variant &&other) SPIRV_CROSS_NOEXCEPT + { + *this = std::move(other); + } + + // We cannot copy from other variant without our own pool group. + // Have to explicitly copy. + Variant(const Variant &variant) = delete; + + // Marking custom move constructor as noexcept is important. + Variant &operator=(Variant &&other) SPIRV_CROSS_NOEXCEPT + { + if (this != &other) + { + if (holder) + group->pools[type]->deallocate_opaque(holder); + holder = other.holder; + group = other.group; + type = other.type; + allow_type_rewrite = other.allow_type_rewrite; + + other.holder = nullptr; + other.type = TypeNone; + } + return *this; + } + + // This copy/clone should only be called in the Compiler constructor. + // If this is called inside ::compile(), we invalidate any references we took higher in the stack. + // This should never happen. + Variant &operator=(const Variant &other) + { +//#define SPIRV_CROSS_COPY_CONSTRUCTOR_SANITIZE +#ifdef SPIRV_CROSS_COPY_CONSTRUCTOR_SANITIZE + abort(); +#endif + if (this != &other) + { + if (holder) + group->pools[type]->deallocate_opaque(holder); + + if (other.holder) + holder = other.holder->clone(group->pools[other.type].get()); + else + holder = nullptr; + + type = other.type; + allow_type_rewrite = other.allow_type_rewrite; + } + return *this; + } + + void set(IVariant *val, Types new_type) + { + if (holder) + group->pools[type]->deallocate_opaque(holder); + holder = nullptr; + + if (!allow_type_rewrite && type != TypeNone && type != new_type) + { + if (val) + group->pools[new_type]->deallocate_opaque(val); + SPIRV_CROSS_THROW("Overwriting a variant with new type."); + } + + holder = val; + type = new_type; + allow_type_rewrite = false; + } + + template + T *allocate_and_set(Types new_type, Ts &&... ts) + { + T *val = static_cast &>(*group->pools[new_type]).allocate(std::forward(ts)...); + set(val, new_type); + return val; + } + + template + T &get() + { + if (!holder) + SPIRV_CROSS_THROW("nullptr"); + if (static_cast(T::type) != type) + SPIRV_CROSS_THROW("Bad cast"); + return *static_cast(holder); + } + + template + const T &get() const + { + if (!holder) + SPIRV_CROSS_THROW("nullptr"); + if (static_cast(T::type) != type) + SPIRV_CROSS_THROW("Bad cast"); + return *static_cast(holder); + } + + Types get_type() const + { + return type; + } + + ID get_id() const + { + return holder ? holder->self : ID(0); + } + + bool empty() const + { + return !holder; + } + + void reset() + { + if (holder) + group->pools[type]->deallocate_opaque(holder); + holder = nullptr; + type = TypeNone; + } + + void set_allow_type_rewrite() + { + allow_type_rewrite = true; + } + +private: + ObjectPoolGroup *group = nullptr; + IVariant *holder = nullptr; + Types type = TypeNone; + bool allow_type_rewrite = false; +}; + +template +T &variant_get(Variant &var) +{ + return var.get(); +} + +template +const T &variant_get(const Variant &var) +{ + return var.get(); +} + +template +T &variant_set(Variant &var, P &&... args) +{ + auto *ptr = var.allocate_and_set(static_cast(T::type), std::forward

(args)...); + return *ptr; +} + +struct AccessChainMeta +{ + uint32_t storage_physical_type = 0; + bool need_transpose = false; + bool storage_is_packed = false; + bool storage_is_invariant = false; + bool flattened_struct = false; + bool relaxed_precision = false; +}; + +enum ExtendedDecorations +{ + // Marks if a buffer block is re-packed, i.e. member declaration might be subject to PhysicalTypeID remapping and padding. + SPIRVCrossDecorationBufferBlockRepacked = 0, + + // A type in a buffer block might be declared with a different physical type than the logical type. + // If this is not set, PhysicalTypeID == the SPIR-V type as declared. + SPIRVCrossDecorationPhysicalTypeID, + + // Marks if the physical type is to be declared with tight packing rules, i.e. packed_floatN on MSL and friends. + // If this is set, PhysicalTypeID might also be set. It can be set to same as logical type if all we're doing + // is converting float3 to packed_float3 for example. + // If this is marked on a struct, it means the struct itself must use only Packed types for all its members. + SPIRVCrossDecorationPhysicalTypePacked, + + // The padding in bytes before declaring this struct member. + // If used on a struct type, marks the target size of a struct. + SPIRVCrossDecorationPaddingTarget, + + SPIRVCrossDecorationInterfaceMemberIndex, + SPIRVCrossDecorationInterfaceOrigID, + SPIRVCrossDecorationResourceIndexPrimary, + // Used for decorations like resource indices for samplers when part of combined image samplers. + // A variable might need to hold two resource indices in this case. + SPIRVCrossDecorationResourceIndexSecondary, + // Used for resource indices for multiplanar images when part of combined image samplers. + SPIRVCrossDecorationResourceIndexTertiary, + SPIRVCrossDecorationResourceIndexQuaternary, + + // Marks a buffer block for using explicit offsets (GLSL/HLSL). + SPIRVCrossDecorationExplicitOffset, + + // Apply to a variable in the Input storage class; marks it as holding the base group passed to vkCmdDispatchBase(), + // or the base vertex and instance indices passed to vkCmdDrawIndexed(). + // In MSL, this is used to adjust the WorkgroupId and GlobalInvocationId variables in compute shaders, + // and to hold the BaseVertex and BaseInstance variables in vertex shaders. + SPIRVCrossDecorationBuiltInDispatchBase, + + // Apply to a variable that is a function parameter; marks it as being a "dynamic" + // combined image-sampler. In MSL, this is used when a function parameter might hold + // either a regular combined image-sampler or one that has an attached sampler + // Y'CbCr conversion. + SPIRVCrossDecorationDynamicImageSampler, + + // Apply to a variable in the Input storage class; marks it as holding the size of the stage + // input grid. + // In MSL, this is used to hold the vertex and instance counts in a tessellation pipeline + // vertex shader. + SPIRVCrossDecorationBuiltInStageInputSize, + + // Apply to any access chain of a tessellation I/O variable; stores the type of the sub-object + // that was chained to, as recorded in the input variable itself. This is used in case the pointer + // is itself used as the base of an access chain, to calculate the original type of the sub-object + // chained to, in case a swizzle needs to be applied. This should not happen normally with valid + // SPIR-V, but the MSL backend can change the type of input variables, necessitating the + // addition of swizzles to keep the generated code compiling. + SPIRVCrossDecorationTessIOOriginalInputTypeID, + + // Apply to any access chain of an interface variable used with pull-model interpolation, where the variable is a + // vector but the resulting pointer is a scalar; stores the component index that is to be accessed by the chain. + // This is used when emitting calls to interpolation functions on the chain in MSL: in this case, the component + // must be applied to the result, since pull-model interpolants in MSL cannot be swizzled directly, but the + // results of interpolation can. + SPIRVCrossDecorationInterpolantComponentExpr, + + // Apply to any struct type that is used in the Workgroup storage class. + // This causes matrices in MSL prior to Metal 3.0 to be emitted using a special + // class that is convertible to the standard matrix type, to work around the + // lack of constructors in the 'threadgroup' address space. + SPIRVCrossDecorationWorkgroupStruct, + + SPIRVCrossDecorationCount +}; + +struct Meta +{ + struct Decoration + { + std::string alias; + std::string qualified_alias; + std::string hlsl_semantic; + Bitset decoration_flags; + spv::BuiltIn builtin_type = spv::BuiltInMax; + uint32_t location = 0; + uint32_t component = 0; + uint32_t set = 0; + uint32_t binding = 0; + uint32_t offset = 0; + uint32_t xfb_buffer = 0; + uint32_t xfb_stride = 0; + uint32_t stream = 0; + uint32_t array_stride = 0; + uint32_t matrix_stride = 0; + uint32_t input_attachment = 0; + uint32_t spec_id = 0; + uint32_t index = 0; + spv::FPRoundingMode fp_rounding_mode = spv::FPRoundingModeMax; + bool builtin = false; + + struct Extended + { + Extended() + { + // MSVC 2013 workaround to init like this. + for (auto &v : values) + v = 0; + } + + Bitset flags; + uint32_t values[SPIRVCrossDecorationCount]; + } extended; + }; + + Decoration decoration; + + // Intentionally not a SmallVector. Decoration is large and somewhat rare. + Vector members; + + std::unordered_map decoration_word_offset; + + // For SPV_GOOGLE_hlsl_functionality1. + bool hlsl_is_magic_counter_buffer = false; + // ID for the sibling counter buffer. + uint32_t hlsl_magic_counter_buffer = 0; +}; + +// A user callback that remaps the type of any variable. +// var_name is the declared name of the variable. +// name_of_type is the textual name of the type which will be used in the code unless written to by the callback. +using VariableTypeRemapCallback = + std::function; + +class Hasher +{ +public: + inline void u32(uint32_t value) + { + h = (h * 0x100000001b3ull) ^ value; + } + + inline uint64_t get() const + { + return h; + } + +private: + uint64_t h = 0xcbf29ce484222325ull; +}; + +static inline bool type_is_floating_point(const SPIRType &type) +{ + return type.basetype == SPIRType::Half || type.basetype == SPIRType::Float || type.basetype == SPIRType::Double; +} + +static inline bool type_is_integral(const SPIRType &type) +{ + return type.basetype == SPIRType::SByte || type.basetype == SPIRType::UByte || type.basetype == SPIRType::Short || + type.basetype == SPIRType::UShort || type.basetype == SPIRType::Int || type.basetype == SPIRType::UInt || + type.basetype == SPIRType::Int64 || type.basetype == SPIRType::UInt64; +} + +static inline SPIRType::BaseType to_signed_basetype(uint32_t width) +{ + switch (width) + { + case 8: + return SPIRType::SByte; + case 16: + return SPIRType::Short; + case 32: + return SPIRType::Int; + case 64: + return SPIRType::Int64; + default: + SPIRV_CROSS_THROW("Invalid bit width."); + } +} + +static inline SPIRType::BaseType to_unsigned_basetype(uint32_t width) +{ + switch (width) + { + case 8: + return SPIRType::UByte; + case 16: + return SPIRType::UShort; + case 32: + return SPIRType::UInt; + case 64: + return SPIRType::UInt64; + default: + SPIRV_CROSS_THROW("Invalid bit width."); + } +} + +// Returns true if an arithmetic operation does not change behavior depending on signedness. +static inline bool opcode_is_sign_invariant(spv::Op opcode) +{ + switch (opcode) + { + case spv::OpIEqual: + case spv::OpINotEqual: + case spv::OpISub: + case spv::OpIAdd: + case spv::OpIMul: + case spv::OpShiftLeftLogical: + case spv::OpBitwiseOr: + case spv::OpBitwiseXor: + case spv::OpBitwiseAnd: + return true; + + default: + return false; + } +} + +static inline bool opcode_can_promote_integer_implicitly(spv::Op opcode) +{ + switch (opcode) + { + case spv::OpSNegate: + case spv::OpNot: + case spv::OpBitwiseAnd: + case spv::OpBitwiseOr: + case spv::OpBitwiseXor: + case spv::OpShiftLeftLogical: + case spv::OpShiftRightLogical: + case spv::OpShiftRightArithmetic: + case spv::OpIAdd: + case spv::OpISub: + case spv::OpIMul: + case spv::OpSDiv: + case spv::OpUDiv: + case spv::OpSRem: + case spv::OpUMod: + case spv::OpSMod: + return true; + + default: + return false; + } +} + +struct SetBindingPair +{ + uint32_t desc_set; + uint32_t binding; + + inline bool operator==(const SetBindingPair &other) const + { + return desc_set == other.desc_set && binding == other.binding; + } + + inline bool operator<(const SetBindingPair &other) const + { + return desc_set < other.desc_set || (desc_set == other.desc_set && binding < other.binding); + } +}; + +struct LocationComponentPair +{ + uint32_t location; + uint32_t component; + + inline bool operator==(const LocationComponentPair &other) const + { + return location == other.location && component == other.component; + } + + inline bool operator<(const LocationComponentPair &other) const + { + return location < other.location || (location == other.location && component < other.component); + } +}; + +struct StageSetBinding +{ + spv::ExecutionModel model; + uint32_t desc_set; + uint32_t binding; + + inline bool operator==(const StageSetBinding &other) const + { + return model == other.model && desc_set == other.desc_set && binding == other.binding; + } +}; + +struct InternalHasher +{ + inline size_t operator()(const SetBindingPair &value) const + { + // Quality of hash doesn't really matter here. + auto hash_set = std::hash()(value.desc_set); + auto hash_binding = std::hash()(value.binding); + return (hash_set * 0x10001b31) ^ hash_binding; + } + + inline size_t operator()(const LocationComponentPair &value) const + { + // Quality of hash doesn't really matter here. + auto hash_set = std::hash()(value.location); + auto hash_binding = std::hash()(value.component); + return (hash_set * 0x10001b31) ^ hash_binding; + } + + inline size_t operator()(const StageSetBinding &value) const + { + // Quality of hash doesn't really matter here. + auto hash_model = std::hash()(value.model); + auto hash_set = std::hash()(value.desc_set); + auto tmp_hash = (hash_model * 0x10001b31) ^ hash_set; + return (tmp_hash * 0x10001b31) ^ value.binding; + } +}; + +// Special constant used in a {MSL,HLSL}ResourceBinding desc_set +// element to indicate the bindings for the push constants. +static const uint32_t ResourceBindingPushConstantDescriptorSet = ~(0u); + +// Special constant used in a {MSL,HLSL}ResourceBinding binding +// element to indicate the bindings for the push constants. +static const uint32_t ResourceBindingPushConstantBinding = 0; +} // namespace SPIRV_CROSS_NAMESPACE + +namespace std +{ +template +struct hash> +{ + size_t operator()(const SPIRV_CROSS_NAMESPACE::TypedID &value) const + { + return std::hash()(value); + } +}; +} // namespace std + +#endif diff --git a/local_packages/include/spirv_cross/spirv_cpp.hpp b/local_packages/include/spirv_cross/spirv_cpp.hpp new file mode 100644 index 0000000..c76629c --- /dev/null +++ b/local_packages/include/spirv_cross/spirv_cpp.hpp @@ -0,0 +1,93 @@ +/* + * Copyright 2015-2021 Arm Limited + * SPDX-License-Identifier: Apache-2.0 OR MIT + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * At your option, you may choose to accept this material under either: + * 1. The Apache License, Version 2.0, found at , or + * 2. The MIT License, found at . + */ + +#ifndef SPIRV_CROSS_CPP_HPP +#define SPIRV_CROSS_CPP_HPP + +#include "spirv_glsl.hpp" +#include + +namespace SPIRV_CROSS_NAMESPACE +{ +class CompilerCPP : public CompilerGLSL +{ +public: + explicit CompilerCPP(std::vector spirv_) + : CompilerGLSL(std::move(spirv_)) + { + } + + CompilerCPP(const uint32_t *ir_, size_t word_count) + : CompilerGLSL(ir_, word_count) + { + } + + explicit CompilerCPP(const ParsedIR &ir_) + : CompilerGLSL(ir_) + { + } + + explicit CompilerCPP(ParsedIR &&ir_) + : CompilerGLSL(std::move(ir_)) + { + } + + std::string compile() override; + + // Sets a custom symbol name that can override + // spirv_cross_get_interface. + // + // Useful when several shader interfaces are linked + // statically into the same binary. + void set_interface_name(std::string name) + { + interface_name = std::move(name); + } + +private: + void emit_header() override; + void emit_c_linkage(); + void emit_function_prototype(SPIRFunction &func, const Bitset &return_flags) override; + + void emit_resources(); + void emit_buffer_block(const SPIRVariable &type) override; + void emit_push_constant_block(const SPIRVariable &var) override; + void emit_interface_block(const SPIRVariable &type); + void emit_block_chain(SPIRBlock &block); + void emit_uniform(const SPIRVariable &var) override; + void emit_shared(const SPIRVariable &var); + void emit_block_struct(SPIRType &type); + std::string variable_decl(const SPIRType &type, const std::string &name, uint32_t id) override; + + std::string argument_decl(const SPIRFunction::Parameter &arg); + + SmallVector resource_registrations; + std::string impl_type; + std::string resource_type; + uint32_t shared_counter = 0; + + std::string interface_name; +}; +} // namespace SPIRV_CROSS_NAMESPACE + +#endif diff --git a/local_packages/include/spirv_cross/spirv_cross.hpp b/local_packages/include/spirv_cross/spirv_cross.hpp new file mode 100644 index 0000000..789010f --- /dev/null +++ b/local_packages/include/spirv_cross/spirv_cross.hpp @@ -0,0 +1,1171 @@ +/* + * Copyright 2015-2021 Arm Limited + * SPDX-License-Identifier: Apache-2.0 OR MIT + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * At your option, you may choose to accept this material under either: + * 1. The Apache License, Version 2.0, found at , or + * 2. The MIT License, found at . + */ + +#ifndef SPIRV_CROSS_HPP +#define SPIRV_CROSS_HPP + +#ifndef SPV_ENABLE_UTILITY_CODE +#define SPV_ENABLE_UTILITY_CODE +#endif +#include "spirv.hpp" +#include "spirv_cfg.hpp" +#include "spirv_cross_parsed_ir.hpp" + +namespace SPIRV_CROSS_NAMESPACE +{ +struct Resource +{ + // Resources are identified with their SPIR-V ID. + // This is the ID of the OpVariable. + ID id; + + // The type ID of the variable which includes arrays and all type modifications. + // This type ID is not suitable for parsing OpMemberDecoration of a struct and other decorations in general + // since these modifications typically happen on the base_type_id. + TypeID type_id; + + // The base type of the declared resource. + // This type is the base type which ignores pointers and arrays of the type_id. + // This is mostly useful to parse decorations of the underlying type. + // base_type_id can also be obtained with get_type(get_type(type_id).self). + TypeID base_type_id; + + // The declared name (OpName) of the resource. + // For Buffer blocks, the name actually reflects the externally + // visible Block name. + // + // This name can be retrieved again by using either + // get_name(id) or get_name(base_type_id) depending if it's a buffer block or not. + // + // This name can be an empty string in which case get_fallback_name(id) can be + // used which obtains a suitable fallback identifier for an ID. + std::string name; +}; + +struct BuiltInResource +{ + // This is mostly here to support reflection of builtins such as Position/PointSize/CullDistance/ClipDistance. + // This needs to be different from Resource since we can collect builtins from blocks. + // A builtin present here does not necessarily mean it's considered an active builtin, + // since variable ID "activeness" is only tracked on OpVariable level, not Block members. + // For that, update_active_builtins() -> has_active_builtin() can be used to further refine the reflection. + spv::BuiltIn builtin; + + // This is the actual value type of the builtin. + // Typically float4, float, array for the gl_PerVertex builtins. + // If the builtin is a control point, the control point array type will be stripped away here as appropriate. + TypeID value_type_id; + + // This refers to the base resource which contains the builtin. + // If resource is a Block, it can hold multiple builtins, or it might not be a block. + // For advanced reflection scenarios, all information in builtin/value_type_id can be deduced, + // it's just more convenient this way. + Resource resource; +}; + +struct ShaderResources +{ + SmallVector uniform_buffers; + SmallVector storage_buffers; + SmallVector stage_inputs; + SmallVector stage_outputs; + SmallVector subpass_inputs; + SmallVector storage_images; + SmallVector sampled_images; + SmallVector atomic_counters; + SmallVector acceleration_structures; + + // There can only be one push constant block, + // but keep the vector in case this restriction is lifted in the future. + SmallVector push_constant_buffers; + + SmallVector shader_record_buffers; + + // For Vulkan GLSL and HLSL source, + // these correspond to separate texture2D and samplers respectively. + SmallVector separate_images; + SmallVector separate_samplers; + + SmallVector builtin_inputs; + SmallVector builtin_outputs; +}; + +struct CombinedImageSampler +{ + // The ID of the sampler2D variable. + VariableID combined_id; + // The ID of the texture2D variable. + VariableID image_id; + // The ID of the sampler variable. + VariableID sampler_id; +}; + +struct SpecializationConstant +{ + // The ID of the specialization constant. + ConstantID id; + // The constant ID of the constant, used in Vulkan during pipeline creation. + uint32_t constant_id; +}; + +struct BufferRange +{ + unsigned index; + size_t offset; + size_t range; +}; + +enum BufferPackingStandard +{ + BufferPackingStd140, + BufferPackingStd430, + BufferPackingStd140EnhancedLayout, + BufferPackingStd430EnhancedLayout, + BufferPackingHLSLCbuffer, + BufferPackingHLSLCbufferPackOffset, + BufferPackingScalar, + BufferPackingScalarEnhancedLayout +}; + +struct EntryPoint +{ + std::string name; + spv::ExecutionModel execution_model; +}; + +class Compiler +{ +public: + friend class CFG; + friend class DominatorBuilder; + + // The constructor takes a buffer of SPIR-V words and parses it. + // It will create its own parser, parse the SPIR-V and move the parsed IR + // as if you had called the constructors taking ParsedIR directly. + explicit Compiler(std::vector ir); + Compiler(const uint32_t *ir, size_t word_count); + + // This is more modular. We can also consume a ParsedIR structure directly, either as a move, or copy. + // With copy, we can reuse the same parsed IR for multiple Compiler instances. + explicit Compiler(const ParsedIR &ir); + explicit Compiler(ParsedIR &&ir); + + virtual ~Compiler() = default; + + // After parsing, API users can modify the SPIR-V via reflection and call this + // to disassemble the SPIR-V into the desired langauage. + // Sub-classes actually implement this. + virtual std::string compile(); + + // Gets the identifier (OpName) of an ID. If not defined, an empty string will be returned. + const std::string &get_name(ID id) const; + + // Applies a decoration to an ID. Effectively injects OpDecorate. + void set_decoration(ID id, spv::Decoration decoration, uint32_t argument = 0); + void set_decoration_string(ID id, spv::Decoration decoration, const std::string &argument); + + // Overrides the identifier OpName of an ID. + // Identifiers beginning with underscores or identifiers which contain double underscores + // are reserved by the implementation. + void set_name(ID id, const std::string &name); + + // Gets a bitmask for the decorations which are applied to ID. + // I.e. (1ull << spv::DecorationFoo) | (1ull << spv::DecorationBar) + const Bitset &get_decoration_bitset(ID id) const; + + // Returns whether the decoration has been applied to the ID. + bool has_decoration(ID id, spv::Decoration decoration) const; + + // Gets the value for decorations which take arguments. + // If the decoration is a boolean (i.e. spv::DecorationNonWritable), + // 1 will be returned. + // If decoration doesn't exist or decoration is not recognized, + // 0 will be returned. + uint32_t get_decoration(ID id, spv::Decoration decoration) const; + const std::string &get_decoration_string(ID id, spv::Decoration decoration) const; + + // Removes the decoration for an ID. + void unset_decoration(ID id, spv::Decoration decoration); + + // Gets the SPIR-V type associated with ID. + // Mostly used with Resource::type_id and Resource::base_type_id to parse the underlying type of a resource. + const SPIRType &get_type(TypeID id) const; + + // Gets the SPIR-V type of a variable. + const SPIRType &get_type_from_variable(VariableID id) const; + + // Gets the underlying storage class for an OpVariable. + spv::StorageClass get_storage_class(VariableID id) const; + + // If get_name() is an empty string, get the fallback name which will be used + // instead in the disassembled source. + virtual const std::string get_fallback_name(ID id) const; + + // If get_name() of a Block struct is an empty string, get the fallback name. + // This needs to be per-variable as multiple variables can use the same block type. + virtual const std::string get_block_fallback_name(VariableID id) const; + + // Given an OpTypeStruct in ID, obtain the identifier for member number "index". + // This may be an empty string. + const std::string &get_member_name(TypeID id, uint32_t index) const; + + // Given an OpTypeStruct in ID, obtain the OpMemberDecoration for member number "index". + uint32_t get_member_decoration(TypeID id, uint32_t index, spv::Decoration decoration) const; + const std::string &get_member_decoration_string(TypeID id, uint32_t index, spv::Decoration decoration) const; + + // Sets the member identifier for OpTypeStruct ID, member number "index". + void set_member_name(TypeID id, uint32_t index, const std::string &name); + + // Returns the qualified member identifier for OpTypeStruct ID, member number "index", + // or an empty string if no qualified alias exists + const std::string &get_member_qualified_name(TypeID type_id, uint32_t index) const; + + // Gets the decoration mask for a member of a struct, similar to get_decoration_mask. + const Bitset &get_member_decoration_bitset(TypeID id, uint32_t index) const; + + // Returns whether the decoration has been applied to a member of a struct. + bool has_member_decoration(TypeID id, uint32_t index, spv::Decoration decoration) const; + + // Similar to set_decoration, but for struct members. + void set_member_decoration(TypeID id, uint32_t index, spv::Decoration decoration, uint32_t argument = 0); + void set_member_decoration_string(TypeID id, uint32_t index, spv::Decoration decoration, + const std::string &argument); + + // Unsets a member decoration, similar to unset_decoration. + void unset_member_decoration(TypeID id, uint32_t index, spv::Decoration decoration); + + // Gets the fallback name for a member, similar to get_fallback_name. + virtual const std::string get_fallback_member_name(uint32_t index) const + { + return join("_", index); + } + + // Returns a vector of which members of a struct are potentially in use by a + // SPIR-V shader. The granularity of this analysis is per-member of a struct. + // This can be used for Buffer (UBO), BufferBlock/StorageBuffer (SSBO) and PushConstant blocks. + // ID is the Resource::id obtained from get_shader_resources(). + SmallVector get_active_buffer_ranges(VariableID id) const; + + // Returns the effective size of a buffer block. + size_t get_declared_struct_size(const SPIRType &struct_type) const; + + // Returns the effective size of a buffer block, with a given array size + // for a runtime array. + // SSBOs are typically declared as runtime arrays. get_declared_struct_size() will return 0 for the size. + // This is not very helpful for applications which might need to know the array stride of its last member. + // This can be done through the API, but it is not very intuitive how to accomplish this, so here we provide a helper function + // to query the size of the buffer, assuming that the last member has a certain size. + // If the buffer does not contain a runtime array, array_size is ignored, and the function will behave as + // get_declared_struct_size(). + // To get the array stride of the last member, something like: + // get_declared_struct_size_runtime_array(type, 1) - get_declared_struct_size_runtime_array(type, 0) will work. + size_t get_declared_struct_size_runtime_array(const SPIRType &struct_type, size_t array_size) const; + + // Returns the effective size of a buffer block struct member. + size_t get_declared_struct_member_size(const SPIRType &struct_type, uint32_t index) const; + + // Returns a set of all global variables which are statically accessed + // by the control flow graph from the current entry point. + // Only variables which change the interface for a shader are returned, that is, + // variables with storage class of Input, Output, Uniform, UniformConstant, PushConstant and AtomicCounter + // storage classes are returned. + // + // To use the returned set as the filter for which variables are used during compilation, + // this set can be moved to set_enabled_interface_variables(). + std::unordered_set get_active_interface_variables() const; + + // Sets the interface variables which are used during compilation. + // By default, all variables are used. + // Once set, compile() will only consider the set in active_variables. + void set_enabled_interface_variables(std::unordered_set active_variables); + + // Query shader resources, use ids with reflection interface to modify or query binding points, etc. + ShaderResources get_shader_resources() const; + + // Query shader resources, but only return the variables which are part of active_variables. + // E.g.: get_shader_resources(get_active_variables()) to only return the variables which are statically + // accessed. + ShaderResources get_shader_resources(const std::unordered_set &active_variables) const; + + // Remapped variables are considered built-in variables and a backend will + // not emit a declaration for this variable. + // This is mostly useful for making use of builtins which are dependent on extensions. + void set_remapped_variable_state(VariableID id, bool remap_enable); + bool get_remapped_variable_state(VariableID id) const; + + // For subpassInput variables which are remapped to plain variables, + // the number of components in the remapped + // variable must be specified as the backing type of subpass inputs are opaque. + void set_subpass_input_remapped_components(VariableID id, uint32_t components); + uint32_t get_subpass_input_remapped_components(VariableID id) const; + + // All operations work on the current entry point. + // Entry points can be swapped out with set_entry_point(). + // Entry points should be set right after the constructor completes as some reflection functions traverse the graph from the entry point. + // Resource reflection also depends on the entry point. + // By default, the current entry point is set to the first OpEntryPoint which appears in the SPIR-V module. + + // Some shader languages restrict the names that can be given to entry points, and the + // corresponding backend will automatically rename an entry point name, during the call + // to compile() if it is illegal. For example, the common entry point name main() is + // illegal in MSL, and is renamed to an alternate name by the MSL backend. + // Given the original entry point name contained in the SPIR-V, this function returns + // the name, as updated by the backend during the call to compile(). If the name is not + // illegal, and has not been renamed, or if this function is called before compile(), + // this function will simply return the same name. + + // New variants of entry point query and reflection. + // Names for entry points in the SPIR-V module may alias if they belong to different execution models. + // To disambiguate, we must pass along with the entry point names the execution model. + SmallVector get_entry_points_and_stages() const; + void set_entry_point(const std::string &entry, spv::ExecutionModel execution_model); + + // Renames an entry point from old_name to new_name. + // If old_name is currently selected as the current entry point, it will continue to be the current entry point, + // albeit with a new name. + // get_entry_points() is essentially invalidated at this point. + void rename_entry_point(const std::string &old_name, const std::string &new_name, + spv::ExecutionModel execution_model); + const SPIREntryPoint &get_entry_point(const std::string &name, spv::ExecutionModel execution_model) const; + SPIREntryPoint &get_entry_point(const std::string &name, spv::ExecutionModel execution_model); + const std::string &get_cleansed_entry_point_name(const std::string &name, + spv::ExecutionModel execution_model) const; + + // Traverses all reachable opcodes and sets active_builtins to a bitmask of all builtin variables which are accessed in the shader. + void update_active_builtins(); + bool has_active_builtin(spv::BuiltIn builtin, spv::StorageClass storage) const; + + // Query and modify OpExecutionMode. + const Bitset &get_execution_mode_bitset() const; + + void unset_execution_mode(spv::ExecutionMode mode); + void set_execution_mode(spv::ExecutionMode mode, uint32_t arg0 = 0, uint32_t arg1 = 0, uint32_t arg2 = 0); + + // Gets argument for an execution mode (LocalSize, Invocations, OutputVertices). + // For LocalSize or LocalSizeId, the index argument is used to select the dimension (X = 0, Y = 1, Z = 2). + // For execution modes which do not have arguments, 0 is returned. + // LocalSizeId query returns an ID. If LocalSizeId execution mode is not used, it returns 0. + // LocalSize always returns a literal. If execution mode is LocalSizeId, + // the literal (spec constant or not) is still returned. + uint32_t get_execution_mode_argument(spv::ExecutionMode mode, uint32_t index = 0) const; + spv::ExecutionModel get_execution_model() const; + + bool is_tessellation_shader() const; + bool is_tessellating_triangles() const; + + // In SPIR-V, the compute work group size can be represented by a constant vector, in which case + // the LocalSize execution mode is ignored. + // + // This constant vector can be a constant vector, specialization constant vector, or partly specialized constant vector. + // To modify and query work group dimensions which are specialization constants, SPIRConstant values must be modified + // directly via get_constant() rather than using LocalSize directly. This function will return which constants should be modified. + // + // To modify dimensions which are *not* specialization constants, set_execution_mode should be used directly. + // Arguments to set_execution_mode which are specialization constants are effectively ignored during compilation. + // NOTE: This is somewhat different from how SPIR-V works. In SPIR-V, the constant vector will completely replace LocalSize, + // while in this interface, LocalSize is only ignored for specialization constants. + // + // The specialization constant will be written to x, y and z arguments. + // If the component is not a specialization constant, a zeroed out struct will be written. + // The return value is the constant ID of the builtin WorkGroupSize, but this is not expected to be useful + // for most use cases. + // If LocalSizeId is used, there is no uvec3 value representing the workgroup size, so the return value is 0, + // but x, y and z are written as normal if the components are specialization constants. + uint32_t get_work_group_size_specialization_constants(SpecializationConstant &x, SpecializationConstant &y, + SpecializationConstant &z) const; + + // Analyzes all OpImageFetch (texelFetch) opcodes and checks if there are instances where + // said instruction is used without a combined image sampler. + // GLSL targets do not support the use of texelFetch without a sampler. + // To workaround this, we must inject a dummy sampler which can be used to form a sampler2D at the call-site of + // texelFetch as necessary. + // + // This must be called before build_combined_image_samplers(). + // build_combined_image_samplers() may refer to the ID returned by this method if the returned ID is non-zero. + // The return value will be the ID of a sampler object if a dummy sampler is necessary, or 0 if no sampler object + // is required. + // + // If the returned ID is non-zero, it can be decorated with set/bindings as desired before calling compile(). + // Calling this function also invalidates get_active_interface_variables(), so this should be called + // before that function. + VariableID build_dummy_sampler_for_combined_images(); + + // Analyzes all separate image and samplers used from the currently selected entry point, + // and re-routes them all to a combined image sampler instead. + // This is required to "support" separate image samplers in targets which do not natively support + // this feature, like GLSL/ESSL. + // + // This must be called before compile() if such remapping is desired. + // This call will add new sampled images to the SPIR-V, + // so it will appear in reflection if get_shader_resources() is called after build_combined_image_samplers. + // + // If any image/sampler remapping was found, no separate image/samplers will appear in the decompiled output, + // but will still appear in reflection. + // + // The resulting samplers will be void of any decorations like name, descriptor sets and binding points, + // so this can be added before compile() if desired. + // + // Combined image samplers originating from this set are always considered active variables. + // Arrays of separate samplers are not supported, but arrays of separate images are supported. + // Array of images + sampler -> Array of combined image samplers. + void build_combined_image_samplers(); + + // Gets a remapping for the combined image samplers. + const SmallVector &get_combined_image_samplers() const + { + return combined_image_samplers; + } + + // Set a new variable type remap callback. + // The type remapping is designed to allow global interface variable to assume more special types. + // A typical example here is to remap sampler2D into samplerExternalOES, which currently isn't supported + // directly by SPIR-V. + // + // In compile() while emitting code, + // for every variable that is declared, including function parameters, the callback will be called + // and the API user has a chance to change the textual representation of the type used to declare the variable. + // The API user can detect special patterns in names to guide the remapping. + void set_variable_type_remap_callback(VariableTypeRemapCallback cb) + { + variable_remap_callback = std::move(cb); + } + + // API for querying which specialization constants exist. + // To modify a specialization constant before compile(), use get_constant(constant.id), + // then update constants directly in the SPIRConstant data structure. + // For composite types, the subconstants can be iterated over and modified. + // constant_type is the SPIRType for the specialization constant, + // which can be queried to determine which fields in the unions should be poked at. + SmallVector get_specialization_constants() const; + SPIRConstant &get_constant(ConstantID id); + const SPIRConstant &get_constant(ConstantID id) const; + + uint32_t get_current_id_bound() const + { + return uint32_t(ir.ids.size()); + } + + // API for querying buffer objects. + // The type passed in here should be the base type of a resource, i.e. + // get_type(resource.base_type_id) + // as decorations are set in the basic Block type. + // The type passed in here must have these decorations set, or an exception is raised. + // Only UBOs and SSBOs or sub-structs which are part of these buffer types will have these decorations set. + uint32_t type_struct_member_offset(const SPIRType &type, uint32_t index) const; + uint32_t type_struct_member_array_stride(const SPIRType &type, uint32_t index) const; + uint32_t type_struct_member_matrix_stride(const SPIRType &type, uint32_t index) const; + + // Gets the offset in SPIR-V words (uint32_t) for a decoration which was originally declared in the SPIR-V binary. + // The offset will point to one or more uint32_t literals which can be modified in-place before using the SPIR-V binary. + // Note that adding or removing decorations using the reflection API will not change the behavior of this function. + // If the decoration was declared, sets the word_offset to an offset into the provided SPIR-V binary buffer and returns true, + // otherwise, returns false. + // If the decoration does not have any value attached to it (e.g. DecorationRelaxedPrecision), this function will also return false. + bool get_binary_offset_for_decoration(VariableID id, spv::Decoration decoration, uint32_t &word_offset) const; + + // HLSL counter buffer reflection interface. + // Append/Consume/Increment/Decrement in HLSL is implemented as two "neighbor" buffer objects where + // one buffer implements the storage, and a single buffer containing just a lone "int" implements the counter. + // To SPIR-V these will be exposed as two separate buffers, but glslang HLSL frontend emits a special indentifier + // which lets us link the two buffers together. + + // Queries if a variable ID is a counter buffer which "belongs" to a regular buffer object. + + // If SPV_GOOGLE_hlsl_functionality1 is used, this can be used even with a stripped SPIR-V module. + // Otherwise, this query is purely based on OpName identifiers as found in the SPIR-V module, and will + // only return true if OpSource was reported HLSL. + // To rely on this functionality, ensure that the SPIR-V module is not stripped. + + bool buffer_is_hlsl_counter_buffer(VariableID id) const; + + // Queries if a buffer object has a neighbor "counter" buffer. + // If so, the ID of that counter buffer will be returned in counter_id. + // If SPV_GOOGLE_hlsl_functionality1 is used, this can be used even with a stripped SPIR-V module. + // Otherwise, this query is purely based on OpName identifiers as found in the SPIR-V module, and will + // only return true if OpSource was reported HLSL. + // To rely on this functionality, ensure that the SPIR-V module is not stripped. + bool buffer_get_hlsl_counter_buffer(VariableID id, uint32_t &counter_id) const; + + // Gets the list of all SPIR-V Capabilities which were declared in the SPIR-V module. + const SmallVector &get_declared_capabilities() const; + + // Gets the list of all SPIR-V extensions which were declared in the SPIR-V module. + const SmallVector &get_declared_extensions() const; + + // When declaring buffer blocks in GLSL, the name declared in the GLSL source + // might not be the same as the name declared in the SPIR-V module due to naming conflicts. + // In this case, SPIRV-Cross needs to find a fallback-name, and it might only + // be possible to know this name after compiling to GLSL. + // This is particularly important for HLSL input and UAVs which tends to reuse the same block type + // for multiple distinct blocks. For these cases it is not possible to modify the name of the type itself + // because it might be unique. Instead, you can use this interface to check after compilation which + // name was actually used if your input SPIR-V tends to have this problem. + // For other names like remapped names for variables, etc, it's generally enough to query the name of the variables + // after compiling, block names are an exception to this rule. + // ID is the name of a variable as returned by Resource::id, and must be a variable with a Block-like type. + // + // This also applies to HLSL cbuffers. + std::string get_remapped_declared_block_name(VariableID id) const; + + // For buffer block variables, get the decorations for that variable. + // Sometimes, decorations for buffer blocks are found in member decorations instead + // of direct decorations on the variable itself. + // The most common use here is to check if a buffer is readonly or writeonly. + Bitset get_buffer_block_flags(VariableID id) const; + + // Returns whether the position output is invariant + bool is_position_invariant() const + { + return position_invariant; + } + +protected: + const uint32_t *stream(const Instruction &instr) const + { + // If we're not going to use any arguments, just return nullptr. + // We want to avoid case where we return an out of range pointer + // that trips debug assertions on some platforms. + if (!instr.length) + return nullptr; + + if (instr.is_embedded()) + { + auto &embedded = static_cast(instr); + assert(embedded.ops.size() == instr.length); + return embedded.ops.data(); + } + else + { + if (instr.offset + instr.length > ir.spirv.size()) + SPIRV_CROSS_THROW("Compiler::stream() out of range."); + return &ir.spirv[instr.offset]; + } + } + + uint32_t *stream_mutable(const Instruction &instr) const + { + return const_cast(stream(instr)); + } + + ParsedIR ir; + // Marks variables which have global scope and variables which can alias with other variables + // (SSBO, image load store, etc) + SmallVector global_variables; + SmallVector aliased_variables; + + SPIRFunction *current_function = nullptr; + SPIRBlock *current_block = nullptr; + uint32_t current_loop_level = 0; + std::unordered_set active_interface_variables; + bool check_active_interface_variables = false; + + void add_loop_level(); + + void set_initializers(SPIRExpression &e) + { + e.emitted_loop_level = current_loop_level; + } + + template + void set_initializers(const T &) + { + } + + // If our IDs are out of range here as part of opcodes, throw instead of + // undefined behavior. + template + T &set(uint32_t id, P &&... args) + { + ir.add_typed_id(static_cast(T::type), id); + auto &var = variant_set(ir.ids[id], std::forward

(args)...); + var.self = id; + set_initializers(var); + return var; + } + + template + T &get(uint32_t id) + { + return variant_get(ir.ids[id]); + } + + template + T *maybe_get(uint32_t id) + { + if (id >= ir.ids.size()) + return nullptr; + else if (ir.ids[id].get_type() == static_cast(T::type)) + return &get(id); + else + return nullptr; + } + + template + const T &get(uint32_t id) const + { + return variant_get(ir.ids[id]); + } + + template + const T *maybe_get(uint32_t id) const + { + if (id >= ir.ids.size()) + return nullptr; + else if (ir.ids[id].get_type() == static_cast(T::type)) + return &get(id); + else + return nullptr; + } + + // Gets the id of SPIR-V type underlying the given type_id, which might be a pointer. + uint32_t get_pointee_type_id(uint32_t type_id) const; + + // Gets the SPIR-V type underlying the given type, which might be a pointer. + const SPIRType &get_pointee_type(const SPIRType &type) const; + + // Gets the SPIR-V type underlying the given type_id, which might be a pointer. + const SPIRType &get_pointee_type(uint32_t type_id) const; + + // Gets the ID of the SPIR-V type underlying a variable. + uint32_t get_variable_data_type_id(const SPIRVariable &var) const; + + // Gets the SPIR-V type underlying a variable. + SPIRType &get_variable_data_type(const SPIRVariable &var); + + // Gets the SPIR-V type underlying a variable. + const SPIRType &get_variable_data_type(const SPIRVariable &var) const; + + // Gets the SPIR-V element type underlying an array variable. + SPIRType &get_variable_element_type(const SPIRVariable &var); + + // Gets the SPIR-V element type underlying an array variable. + const SPIRType &get_variable_element_type(const SPIRVariable &var) const; + + // Sets the qualified member identifier for OpTypeStruct ID, member number "index". + void set_member_qualified_name(uint32_t type_id, uint32_t index, const std::string &name); + void set_qualified_name(uint32_t id, const std::string &name); + + // Returns if the given type refers to a sampled image. + bool is_sampled_image_type(const SPIRType &type); + + const SPIREntryPoint &get_entry_point() const; + SPIREntryPoint &get_entry_point(); + static bool is_tessellation_shader(spv::ExecutionModel model); + + virtual std::string to_name(uint32_t id, bool allow_alias = true) const; + bool is_builtin_variable(const SPIRVariable &var) const; + bool is_builtin_type(const SPIRType &type) const; + bool is_hidden_variable(const SPIRVariable &var, bool include_builtins = false) const; + bool is_immutable(uint32_t id) const; + bool is_member_builtin(const SPIRType &type, uint32_t index, spv::BuiltIn *builtin) const; + bool is_scalar(const SPIRType &type) const; + bool is_vector(const SPIRType &type) const; + bool is_matrix(const SPIRType &type) const; + bool is_array(const SPIRType &type) const; + uint32_t expression_type_id(uint32_t id) const; + const SPIRType &expression_type(uint32_t id) const; + bool expression_is_lvalue(uint32_t id) const; + bool variable_storage_is_aliased(const SPIRVariable &var); + SPIRVariable *maybe_get_backing_variable(uint32_t chain); + + void register_read(uint32_t expr, uint32_t chain, bool forwarded); + void register_write(uint32_t chain); + + inline bool is_continue(uint32_t next) const + { + return (ir.block_meta[next] & ParsedIR::BLOCK_META_CONTINUE_BIT) != 0; + } + + inline bool is_single_block_loop(uint32_t next) const + { + auto &block = get(next); + return block.merge == SPIRBlock::MergeLoop && block.continue_block == ID(next); + } + + inline bool is_break(uint32_t next) const + { + return (ir.block_meta[next] & + (ParsedIR::BLOCK_META_LOOP_MERGE_BIT | ParsedIR::BLOCK_META_MULTISELECT_MERGE_BIT)) != 0; + } + + inline bool is_loop_break(uint32_t next) const + { + return (ir.block_meta[next] & ParsedIR::BLOCK_META_LOOP_MERGE_BIT) != 0; + } + + inline bool is_conditional(uint32_t next) const + { + return (ir.block_meta[next] & + (ParsedIR::BLOCK_META_SELECTION_MERGE_BIT | ParsedIR::BLOCK_META_MULTISELECT_MERGE_BIT)) != 0; + } + + // Dependency tracking for temporaries read from variables. + void flush_dependees(SPIRVariable &var); + void flush_all_active_variables(); + void flush_control_dependent_expressions(uint32_t block); + void flush_all_atomic_capable_variables(); + void flush_all_aliased_variables(); + void register_global_read_dependencies(const SPIRBlock &func, uint32_t id); + void register_global_read_dependencies(const SPIRFunction &func, uint32_t id); + std::unordered_set invalid_expressions; + + void update_name_cache(std::unordered_set &cache, std::string &name); + + // A variant which takes two sets of names. The secondary is only used to verify there are no collisions, + // but the set is not updated when we have found a new name. + // Used primarily when adding block interface names. + void update_name_cache(std::unordered_set &cache_primary, + const std::unordered_set &cache_secondary, std::string &name); + + bool function_is_pure(const SPIRFunction &func); + bool block_is_pure(const SPIRBlock &block); + + bool execution_is_branchless(const SPIRBlock &from, const SPIRBlock &to) const; + bool execution_is_direct_branch(const SPIRBlock &from, const SPIRBlock &to) const; + bool execution_is_noop(const SPIRBlock &from, const SPIRBlock &to) const; + SPIRBlock::ContinueBlockType continue_block_type(const SPIRBlock &continue_block) const; + + void force_recompile(); + void force_recompile_guarantee_forward_progress(); + void clear_force_recompile(); + bool is_forcing_recompilation() const; + bool is_force_recompile = false; + bool is_force_recompile_forward_progress = false; + + bool block_is_loop_candidate(const SPIRBlock &block, SPIRBlock::Method method) const; + + bool types_are_logically_equivalent(const SPIRType &a, const SPIRType &b) const; + void inherit_expression_dependencies(uint32_t dst, uint32_t source); + void add_implied_read_expression(SPIRExpression &e, uint32_t source); + void add_implied_read_expression(SPIRAccessChain &e, uint32_t source); + void add_active_interface_variable(uint32_t var_id); + + // For proper multiple entry point support, allow querying if an Input or Output + // variable is part of that entry points interface. + bool interface_variable_exists_in_entry_point(uint32_t id) const; + + SmallVector combined_image_samplers; + + void remap_variable_type_name(const SPIRType &type, const std::string &var_name, std::string &type_name) const + { + if (variable_remap_callback) + variable_remap_callback(type, var_name, type_name); + } + + void set_ir(const ParsedIR &parsed); + void set_ir(ParsedIR &&parsed); + void parse_fixup(); + + // Used internally to implement various traversals for queries. + struct OpcodeHandler + { + virtual ~OpcodeHandler() = default; + + // Return true if traversal should continue. + // If false, traversal will end immediately. + virtual bool handle(spv::Op opcode, const uint32_t *args, uint32_t length) = 0; + virtual bool handle_terminator(const SPIRBlock &) + { + return true; + } + + virtual bool follow_function_call(const SPIRFunction &) + { + return true; + } + + virtual void set_current_block(const SPIRBlock &) + { + } + + // Called after returning from a function or when entering a block, + // can be called multiple times per block, + // while set_current_block is only called on block entry. + virtual void rearm_current_block(const SPIRBlock &) + { + } + + virtual bool begin_function_scope(const uint32_t *, uint32_t) + { + return true; + } + + virtual bool end_function_scope(const uint32_t *, uint32_t) + { + return true; + } + }; + + struct BufferAccessHandler : OpcodeHandler + { + BufferAccessHandler(const Compiler &compiler_, SmallVector &ranges_, uint32_t id_) + : compiler(compiler_) + , ranges(ranges_) + , id(id_) + { + } + + bool handle(spv::Op opcode, const uint32_t *args, uint32_t length) override; + + const Compiler &compiler; + SmallVector &ranges; + uint32_t id; + + std::unordered_set seen; + }; + + struct InterfaceVariableAccessHandler : OpcodeHandler + { + InterfaceVariableAccessHandler(const Compiler &compiler_, std::unordered_set &variables_) + : compiler(compiler_) + , variables(variables_) + { + } + + bool handle(spv::Op opcode, const uint32_t *args, uint32_t length) override; + + const Compiler &compiler; + std::unordered_set &variables; + }; + + struct CombinedImageSamplerHandler : OpcodeHandler + { + CombinedImageSamplerHandler(Compiler &compiler_) + : compiler(compiler_) + { + } + bool handle(spv::Op opcode, const uint32_t *args, uint32_t length) override; + bool begin_function_scope(const uint32_t *args, uint32_t length) override; + bool end_function_scope(const uint32_t *args, uint32_t length) override; + + Compiler &compiler; + + // Each function in the call stack needs its own remapping for parameters so we can deduce which global variable each texture/sampler the parameter is statically bound to. + std::stack> parameter_remapping; + std::stack functions; + + uint32_t remap_parameter(uint32_t id); + void push_remap_parameters(const SPIRFunction &func, const uint32_t *args, uint32_t length); + void pop_remap_parameters(); + void register_combined_image_sampler(SPIRFunction &caller, VariableID combined_id, VariableID texture_id, + VariableID sampler_id, bool depth); + }; + + struct DummySamplerForCombinedImageHandler : OpcodeHandler + { + DummySamplerForCombinedImageHandler(Compiler &compiler_) + : compiler(compiler_) + { + } + bool handle(spv::Op opcode, const uint32_t *args, uint32_t length) override; + + Compiler &compiler; + bool need_dummy_sampler = false; + }; + + struct ActiveBuiltinHandler : OpcodeHandler + { + ActiveBuiltinHandler(Compiler &compiler_) + : compiler(compiler_) + { + } + + bool handle(spv::Op opcode, const uint32_t *args, uint32_t length) override; + Compiler &compiler; + + void handle_builtin(const SPIRType &type, spv::BuiltIn builtin, const Bitset &decoration_flags); + void add_if_builtin(uint32_t id); + void add_if_builtin_or_block(uint32_t id); + void add_if_builtin(uint32_t id, bool allow_blocks); + }; + + bool traverse_all_reachable_opcodes(const SPIRBlock &block, OpcodeHandler &handler) const; + bool traverse_all_reachable_opcodes(const SPIRFunction &block, OpcodeHandler &handler) const; + // This must be an ordered data structure so we always pick the same type aliases. + SmallVector global_struct_cache; + + ShaderResources get_shader_resources(const std::unordered_set *active_variables) const; + + VariableTypeRemapCallback variable_remap_callback; + + bool get_common_basic_type(const SPIRType &type, SPIRType::BaseType &base_type); + + std::unordered_set forced_temporaries; + std::unordered_set forwarded_temporaries; + std::unordered_set suppressed_usage_tracking; + std::unordered_set hoisted_temporaries; + std::unordered_set forced_invariant_temporaries; + + Bitset active_input_builtins; + Bitset active_output_builtins; + uint32_t clip_distance_count = 0; + uint32_t cull_distance_count = 0; + bool position_invariant = false; + + void analyze_parameter_preservation( + SPIRFunction &entry, const CFG &cfg, + const std::unordered_map> &variable_to_blocks, + const std::unordered_map> &complete_write_blocks); + + // If a variable ID or parameter ID is found in this set, a sampler is actually a shadow/comparison sampler. + // SPIR-V does not support this distinction, so we must keep track of this information outside the type system. + // There might be unrelated IDs found in this set which do not correspond to actual variables. + // This set should only be queried for the existence of samplers which are already known to be variables or parameter IDs. + // Similar is implemented for images, as well as if subpass inputs are needed. + std::unordered_set comparison_ids; + bool need_subpass_input = false; + bool need_subpass_input_ms = false; + + // In certain backends, we will need to use a dummy sampler to be able to emit code. + // GLSL does not support texelFetch on texture2D objects, but SPIR-V does, + // so we need to workaround by having the application inject a dummy sampler. + uint32_t dummy_sampler_id = 0; + + void analyze_image_and_sampler_usage(); + + struct CombinedImageSamplerDrefHandler : OpcodeHandler + { + CombinedImageSamplerDrefHandler(Compiler &compiler_) + : compiler(compiler_) + { + } + bool handle(spv::Op opcode, const uint32_t *args, uint32_t length) override; + + Compiler &compiler; + std::unordered_set dref_combined_samplers; + }; + + struct CombinedImageSamplerUsageHandler : OpcodeHandler + { + CombinedImageSamplerUsageHandler(Compiler &compiler_, + const std::unordered_set &dref_combined_samplers_) + : compiler(compiler_) + , dref_combined_samplers(dref_combined_samplers_) + { + } + + bool begin_function_scope(const uint32_t *args, uint32_t length) override; + bool handle(spv::Op opcode, const uint32_t *args, uint32_t length) override; + Compiler &compiler; + const std::unordered_set &dref_combined_samplers; + + std::unordered_map> dependency_hierarchy; + std::unordered_set comparison_ids; + + void add_hierarchy_to_comparison_ids(uint32_t ids); + bool need_subpass_input = false; + bool need_subpass_input_ms = false; + void add_dependency(uint32_t dst, uint32_t src); + }; + + void build_function_control_flow_graphs_and_analyze(); + std::unordered_map> function_cfgs; + const CFG &get_cfg_for_current_function() const; + const CFG &get_cfg_for_function(uint32_t id) const; + + struct CFGBuilder : OpcodeHandler + { + explicit CFGBuilder(Compiler &compiler_); + + bool follow_function_call(const SPIRFunction &func) override; + bool handle(spv::Op op, const uint32_t *args, uint32_t length) override; + Compiler &compiler; + std::unordered_map> function_cfgs; + }; + + struct AnalyzeVariableScopeAccessHandler : OpcodeHandler + { + AnalyzeVariableScopeAccessHandler(Compiler &compiler_, SPIRFunction &entry_); + + bool follow_function_call(const SPIRFunction &) override; + void set_current_block(const SPIRBlock &block) override; + + void notify_variable_access(uint32_t id, uint32_t block); + bool id_is_phi_variable(uint32_t id) const; + bool id_is_potential_temporary(uint32_t id) const; + bool handle(spv::Op op, const uint32_t *args, uint32_t length) override; + bool handle_terminator(const SPIRBlock &block) override; + + Compiler &compiler; + SPIRFunction &entry; + std::unordered_map> accessed_variables_to_block; + std::unordered_map> accessed_temporaries_to_block; + std::unordered_map result_id_to_type; + std::unordered_map> complete_write_variables_to_block; + std::unordered_map> partial_write_variables_to_block; + std::unordered_set access_chain_expressions; + // Access chains used in multiple blocks mean hoisting all the variables used to construct the access chain as not all backends can use pointers. + std::unordered_map> access_chain_children; + const SPIRBlock *current_block = nullptr; + }; + + struct StaticExpressionAccessHandler : OpcodeHandler + { + StaticExpressionAccessHandler(Compiler &compiler_, uint32_t variable_id_); + bool follow_function_call(const SPIRFunction &) override; + bool handle(spv::Op op, const uint32_t *args, uint32_t length) override; + + Compiler &compiler; + uint32_t variable_id; + uint32_t static_expression = 0; + uint32_t write_count = 0; + }; + + struct PhysicalBlockMeta + { + uint32_t alignment = 0; + }; + + struct PhysicalStorageBufferPointerHandler : OpcodeHandler + { + explicit PhysicalStorageBufferPointerHandler(Compiler &compiler_); + bool handle(spv::Op op, const uint32_t *args, uint32_t length) override; + Compiler &compiler; + + std::unordered_set non_block_types; + std::unordered_map physical_block_type_meta; + std::unordered_map access_chain_to_physical_block; + + void mark_aligned_access(uint32_t id, const uint32_t *args, uint32_t length); + PhysicalBlockMeta *find_block_meta(uint32_t id) const; + bool type_is_bda_block_entry(uint32_t type_id) const; + void setup_meta_chain(uint32_t type_id, uint32_t var_id); + uint32_t get_minimum_scalar_alignment(const SPIRType &type) const; + void analyze_non_block_types_from_block(const SPIRType &type); + uint32_t get_base_non_block_type_id(uint32_t type_id) const; + }; + void analyze_non_block_pointer_types(); + SmallVector physical_storage_non_block_pointer_types; + std::unordered_map physical_storage_type_to_alignment; + + void analyze_variable_scope(SPIRFunction &function, AnalyzeVariableScopeAccessHandler &handler); + void find_function_local_luts(SPIRFunction &function, const AnalyzeVariableScopeAccessHandler &handler, + bool single_function); + bool may_read_undefined_variable_in_block(const SPIRBlock &block, uint32_t var); + + // Finds all resources that are written to from inside the critical section, if present. + // The critical section is delimited by OpBeginInvocationInterlockEXT and + // OpEndInvocationInterlockEXT instructions. In MSL and HLSL, any resources written + // while inside the critical section must be placed in a raster order group. + struct InterlockedResourceAccessHandler : OpcodeHandler + { + InterlockedResourceAccessHandler(Compiler &compiler_, uint32_t entry_point_id) + : compiler(compiler_) + { + call_stack.push_back(entry_point_id); + } + + bool handle(spv::Op op, const uint32_t *args, uint32_t length) override; + bool begin_function_scope(const uint32_t *args, uint32_t length) override; + bool end_function_scope(const uint32_t *args, uint32_t length) override; + + Compiler &compiler; + bool in_crit_sec = false; + + uint32_t interlock_function_id = 0; + bool split_function_case = false; + bool control_flow_interlock = false; + bool use_critical_section = false; + bool call_stack_is_interlocked = false; + SmallVector call_stack; + + void access_potential_resource(uint32_t id); + }; + + struct InterlockedResourceAccessPrepassHandler : OpcodeHandler + { + InterlockedResourceAccessPrepassHandler(Compiler &compiler_, uint32_t entry_point_id) + : compiler(compiler_) + { + call_stack.push_back(entry_point_id); + } + + void rearm_current_block(const SPIRBlock &block) override; + bool handle(spv::Op op, const uint32_t *args, uint32_t length) override; + bool begin_function_scope(const uint32_t *args, uint32_t length) override; + bool end_function_scope(const uint32_t *args, uint32_t length) override; + + Compiler &compiler; + uint32_t interlock_function_id = 0; + uint32_t current_block_id = 0; + bool split_function_case = false; + bool control_flow_interlock = false; + SmallVector call_stack; + }; + + void analyze_interlocked_resource_usage(); + // The set of all resources written while inside the critical section, if present. + std::unordered_set interlocked_resources; + bool interlocked_is_complex = false; + + void make_constant_null(uint32_t id, uint32_t type); + + std::unordered_map declared_block_names; + + bool instruction_to_result_type(uint32_t &result_type, uint32_t &result_id, spv::Op op, const uint32_t *args, + uint32_t length); + + Bitset combined_decoration_for_member(const SPIRType &type, uint32_t index) const; + static bool is_desktop_only_format(spv::ImageFormat format); + + bool is_depth_image(const SPIRType &type, uint32_t id) const; + + void set_extended_decoration(uint32_t id, ExtendedDecorations decoration, uint32_t value = 0); + uint32_t get_extended_decoration(uint32_t id, ExtendedDecorations decoration) const; + bool has_extended_decoration(uint32_t id, ExtendedDecorations decoration) const; + void unset_extended_decoration(uint32_t id, ExtendedDecorations decoration); + + void set_extended_member_decoration(uint32_t type, uint32_t index, ExtendedDecorations decoration, + uint32_t value = 0); + uint32_t get_extended_member_decoration(uint32_t type, uint32_t index, ExtendedDecorations decoration) const; + bool has_extended_member_decoration(uint32_t type, uint32_t index, ExtendedDecorations decoration) const; + void unset_extended_member_decoration(uint32_t type, uint32_t index, ExtendedDecorations decoration); + + bool type_is_array_of_pointers(const SPIRType &type) const; + bool type_is_top_level_physical_pointer(const SPIRType &type) const; + bool type_is_block_like(const SPIRType &type) const; + bool type_is_opaque_value(const SPIRType &type) const; + + bool reflection_ssbo_instance_name_is_significant() const; + std::string get_remapped_declared_block_name(uint32_t id, bool fallback_prefer_instance_name) const; + + bool flush_phi_required(BlockID from, BlockID to) const; + + uint32_t evaluate_spec_constant_u32(const SPIRConstantOp &spec) const; + uint32_t evaluate_constant_u32(uint32_t id) const; + + bool is_vertex_like_shader() const; + + // Get the correct case list for the OpSwitch, since it can be either a + // 32 bit wide condition or a 64 bit, but the type is not embedded in the + // instruction itself. + const SmallVector &get_case_list(const SPIRBlock &block) const; + +private: + // Used only to implement the old deprecated get_entry_point() interface. + const SPIREntryPoint &get_first_entry_point(const std::string &name) const; + SPIREntryPoint &get_first_entry_point(const std::string &name); +}; +} // namespace SPIRV_CROSS_NAMESPACE + +#endif diff --git a/local_packages/include/spirv_cross/spirv_cross_c.h b/local_packages/include/spirv_cross/spirv_cross_c.h new file mode 100644 index 0000000..601a6cf --- /dev/null +++ b/local_packages/include/spirv_cross/spirv_cross_c.h @@ -0,0 +1,1074 @@ +/* + * Copyright 2019-2021 Hans-Kristian Arntzen + * SPDX-License-Identifier: Apache-2.0 OR MIT + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * At your option, you may choose to accept this material under either: + * 1. The Apache License, Version 2.0, found at , or + * 2. The MIT License, found at . + */ + +#ifndef SPIRV_CROSS_C_API_H +#define SPIRV_CROSS_C_API_H + +#include +#include "spirv.h" + +/* + * C89-compatible wrapper for SPIRV-Cross' API. + * Documentation here is sparse unless the behavior does not map 1:1 with C++ API. + * It is recommended to look at the canonical C++ API for more detailed information. + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/* Bumped if ABI or API breaks backwards compatibility. */ +#define SPVC_C_API_VERSION_MAJOR 0 +/* Bumped if APIs or enumerations are added in a backwards compatible way. */ +#define SPVC_C_API_VERSION_MINOR 52 +/* Bumped if internal implementation details change. */ +#define SPVC_C_API_VERSION_PATCH 0 + +#if !defined(SPVC_PUBLIC_API) +#if defined(SPVC_EXPORT_SYMBOLS) +/* Exports symbols. Standard C calling convention is used. */ +#if defined(__GNUC__) +#define SPVC_PUBLIC_API __attribute__((visibility("default"))) +#elif defined(_MSC_VER) +#define SPVC_PUBLIC_API __declspec(dllexport) +#else +#define SPVC_PUBLIC_API +#endif +#else +#define SPVC_PUBLIC_API +#endif +#endif + +/* + * Gets the SPVC_C_API_VERSION_* used to build this library. + * Can be used to check for ABI mismatch if so-versioning did not catch it. + */ +SPVC_PUBLIC_API void spvc_get_version(unsigned *major, unsigned *minor, unsigned *patch); + +/* Gets a human readable version string to identify which commit a particular binary was created from. */ +SPVC_PUBLIC_API const char *spvc_get_commit_revision_and_timestamp(void); + +/* These types are opaque to the user. */ +typedef struct spvc_context_s *spvc_context; +typedef struct spvc_parsed_ir_s *spvc_parsed_ir; +typedef struct spvc_compiler_s *spvc_compiler; +typedef struct spvc_compiler_options_s *spvc_compiler_options; +typedef struct spvc_resources_s *spvc_resources; +struct spvc_type_s; +typedef const struct spvc_type_s *spvc_type; +typedef struct spvc_constant_s *spvc_constant; +struct spvc_set_s; +typedef const struct spvc_set_s *spvc_set; + +/* + * Shallow typedefs. All SPIR-V IDs are plain 32-bit numbers, but this helps communicate which data is used. + * Maps to a SPIRType. + */ +typedef SpvId spvc_type_id; +/* Maps to a SPIRVariable. */ +typedef SpvId spvc_variable_id; +/* Maps to a SPIRConstant. */ +typedef SpvId spvc_constant_id; + +/* See C++ API. */ +typedef struct spvc_reflected_resource +{ + spvc_variable_id id; + spvc_type_id base_type_id; + spvc_type_id type_id; + const char *name; +} spvc_reflected_resource; + +typedef struct spvc_reflected_builtin_resource +{ + SpvBuiltIn builtin; + spvc_type_id value_type_id; + spvc_reflected_resource resource; +} spvc_reflected_builtin_resource; + +/* See C++ API. */ +typedef struct spvc_entry_point +{ + SpvExecutionModel execution_model; + const char *name; +} spvc_entry_point; + +/* See C++ API. */ +typedef struct spvc_combined_image_sampler +{ + spvc_variable_id combined_id; + spvc_variable_id image_id; + spvc_variable_id sampler_id; +} spvc_combined_image_sampler; + +/* See C++ API. */ +typedef struct spvc_specialization_constant +{ + spvc_constant_id id; + unsigned constant_id; +} spvc_specialization_constant; + +/* See C++ API. */ +typedef struct spvc_buffer_range +{ + unsigned index; + size_t offset; + size_t range; +} spvc_buffer_range; + +/* See C++ API. */ +typedef struct spvc_hlsl_root_constants +{ + unsigned start; + unsigned end; + unsigned binding; + unsigned space; +} spvc_hlsl_root_constants; + +/* See C++ API. */ +typedef struct spvc_hlsl_vertex_attribute_remap +{ + unsigned location; + const char *semantic; +} spvc_hlsl_vertex_attribute_remap; + +/* + * Be compatible with non-C99 compilers, which do not have stdbool. + * Only recent MSVC compilers supports this for example, and ideally SPIRV-Cross should be linkable + * from a wide range of compilers in its C wrapper. + */ +typedef unsigned char spvc_bool; +#define SPVC_TRUE ((spvc_bool)1) +#define SPVC_FALSE ((spvc_bool)0) + +typedef enum spvc_result +{ + /* Success. */ + SPVC_SUCCESS = 0, + + /* The SPIR-V is invalid. Should have been caught by validation ideally. */ + SPVC_ERROR_INVALID_SPIRV = -1, + + /* The SPIR-V might be valid or invalid, but SPIRV-Cross currently cannot correctly translate this to your target language. */ + SPVC_ERROR_UNSUPPORTED_SPIRV = -2, + + /* If for some reason we hit this, new or malloc failed. */ + SPVC_ERROR_OUT_OF_MEMORY = -3, + + /* Invalid API argument. */ + SPVC_ERROR_INVALID_ARGUMENT = -4, + + SPVC_ERROR_INT_MAX = 0x7fffffff +} spvc_result; + +typedef enum spvc_capture_mode +{ + /* The Parsed IR payload will be copied, and the handle can be reused to create other compiler instances. */ + SPVC_CAPTURE_MODE_COPY = 0, + + /* + * The payload will now be owned by the compiler. + * parsed_ir should now be considered a dead blob and must not be used further. + * This is optimal for performance and should be the go-to option. + */ + SPVC_CAPTURE_MODE_TAKE_OWNERSHIP = 1, + + SPVC_CAPTURE_MODE_INT_MAX = 0x7fffffff +} spvc_capture_mode; + +typedef enum spvc_backend +{ + /* This backend can only perform reflection, no compiler options are supported. Maps to spirv_cross::Compiler. */ + SPVC_BACKEND_NONE = 0, + SPVC_BACKEND_GLSL = 1, /* spirv_cross::CompilerGLSL */ + SPVC_BACKEND_HLSL = 2, /* CompilerHLSL */ + SPVC_BACKEND_MSL = 3, /* CompilerMSL */ + SPVC_BACKEND_CPP = 4, /* CompilerCPP */ + SPVC_BACKEND_JSON = 5, /* CompilerReflection w/ JSON backend */ + SPVC_BACKEND_INT_MAX = 0x7fffffff +} spvc_backend; + +/* Maps to C++ API. */ +typedef enum spvc_resource_type +{ + SPVC_RESOURCE_TYPE_UNKNOWN = 0, + SPVC_RESOURCE_TYPE_UNIFORM_BUFFER = 1, + SPVC_RESOURCE_TYPE_STORAGE_BUFFER = 2, + SPVC_RESOURCE_TYPE_STAGE_INPUT = 3, + SPVC_RESOURCE_TYPE_STAGE_OUTPUT = 4, + SPVC_RESOURCE_TYPE_SUBPASS_INPUT = 5, + SPVC_RESOURCE_TYPE_STORAGE_IMAGE = 6, + SPVC_RESOURCE_TYPE_SAMPLED_IMAGE = 7, + SPVC_RESOURCE_TYPE_ATOMIC_COUNTER = 8, + SPVC_RESOURCE_TYPE_PUSH_CONSTANT = 9, + SPVC_RESOURCE_TYPE_SEPARATE_IMAGE = 10, + SPVC_RESOURCE_TYPE_SEPARATE_SAMPLERS = 11, + SPVC_RESOURCE_TYPE_ACCELERATION_STRUCTURE = 12, + SPVC_RESOURCE_TYPE_RAY_QUERY = 13, + SPVC_RESOURCE_TYPE_SHADER_RECORD_BUFFER = 14, + SPVC_RESOURCE_TYPE_INT_MAX = 0x7fffffff +} spvc_resource_type; + +typedef enum spvc_builtin_resource_type +{ + SPVC_BUILTIN_RESOURCE_TYPE_UNKNOWN = 0, + SPVC_BUILTIN_RESOURCE_TYPE_STAGE_INPUT = 1, + SPVC_BUILTIN_RESOURCE_TYPE_STAGE_OUTPUT = 2, + SPVC_BUILTIN_RESOURCE_TYPE_INT_MAX = 0x7fffffff +} spvc_builtin_resource_type; + +/* Maps to spirv_cross::SPIRType::BaseType. */ +typedef enum spvc_basetype +{ + SPVC_BASETYPE_UNKNOWN = 0, + SPVC_BASETYPE_VOID = 1, + SPVC_BASETYPE_BOOLEAN = 2, + SPVC_BASETYPE_INT8 = 3, + SPVC_BASETYPE_UINT8 = 4, + SPVC_BASETYPE_INT16 = 5, + SPVC_BASETYPE_UINT16 = 6, + SPVC_BASETYPE_INT32 = 7, + SPVC_BASETYPE_UINT32 = 8, + SPVC_BASETYPE_INT64 = 9, + SPVC_BASETYPE_UINT64 = 10, + SPVC_BASETYPE_ATOMIC_COUNTER = 11, + SPVC_BASETYPE_FP16 = 12, + SPVC_BASETYPE_FP32 = 13, + SPVC_BASETYPE_FP64 = 14, + SPVC_BASETYPE_STRUCT = 15, + SPVC_BASETYPE_IMAGE = 16, + SPVC_BASETYPE_SAMPLED_IMAGE = 17, + SPVC_BASETYPE_SAMPLER = 18, + SPVC_BASETYPE_ACCELERATION_STRUCTURE = 19, + + SPVC_BASETYPE_INT_MAX = 0x7fffffff +} spvc_basetype; + +#define SPVC_COMPILER_OPTION_COMMON_BIT 0x1000000 +#define SPVC_COMPILER_OPTION_GLSL_BIT 0x2000000 +#define SPVC_COMPILER_OPTION_HLSL_BIT 0x4000000 +#define SPVC_COMPILER_OPTION_MSL_BIT 0x8000000 +#define SPVC_COMPILER_OPTION_LANG_BITS 0x0f000000 +#define SPVC_COMPILER_OPTION_ENUM_BITS 0xffffff + +#define SPVC_MAKE_MSL_VERSION(major, minor, patch) ((major) * 10000 + (minor) * 100 + (patch)) + +/* Maps to C++ API. */ +typedef enum spvc_msl_platform +{ + SPVC_MSL_PLATFORM_IOS = 0, + SPVC_MSL_PLATFORM_MACOS = 1, + SPVC_MSL_PLATFORM_MAX_INT = 0x7fffffff +} spvc_msl_platform; + +/* Maps to C++ API. */ +typedef enum spvc_msl_index_type +{ + SPVC_MSL_INDEX_TYPE_NONE = 0, + SPVC_MSL_INDEX_TYPE_UINT16 = 1, + SPVC_MSL_INDEX_TYPE_UINT32 = 2, + SPVC_MSL_INDEX_TYPE_MAX_INT = 0x7fffffff +} spvc_msl_index_type; + +/* Maps to C++ API. */ +typedef enum spvc_msl_shader_variable_format +{ + SPVC_MSL_SHADER_VARIABLE_FORMAT_OTHER = 0, + SPVC_MSL_SHADER_VARIABLE_FORMAT_UINT8 = 1, + SPVC_MSL_SHADER_VARIABLE_FORMAT_UINT16 = 2, + SPVC_MSL_SHADER_VARIABLE_FORMAT_ANY16 = 3, + SPVC_MSL_SHADER_VARIABLE_FORMAT_ANY32 = 4, + + /* Deprecated names. */ + SPVC_MSL_VERTEX_FORMAT_OTHER = SPVC_MSL_SHADER_VARIABLE_FORMAT_OTHER, + SPVC_MSL_VERTEX_FORMAT_UINT8 = SPVC_MSL_SHADER_VARIABLE_FORMAT_UINT8, + SPVC_MSL_VERTEX_FORMAT_UINT16 = SPVC_MSL_SHADER_VARIABLE_FORMAT_UINT16, + SPVC_MSL_SHADER_INPUT_FORMAT_OTHER = SPVC_MSL_SHADER_VARIABLE_FORMAT_OTHER, + SPVC_MSL_SHADER_INPUT_FORMAT_UINT8 = SPVC_MSL_SHADER_VARIABLE_FORMAT_UINT8, + SPVC_MSL_SHADER_INPUT_FORMAT_UINT16 = SPVC_MSL_SHADER_VARIABLE_FORMAT_UINT16, + SPVC_MSL_SHADER_INPUT_FORMAT_ANY16 = SPVC_MSL_SHADER_VARIABLE_FORMAT_ANY16, + SPVC_MSL_SHADER_INPUT_FORMAT_ANY32 = SPVC_MSL_SHADER_VARIABLE_FORMAT_ANY32, + + + SPVC_MSL_SHADER_INPUT_FORMAT_INT_MAX = 0x7fffffff +} spvc_msl_shader_variable_format, spvc_msl_shader_input_format, spvc_msl_vertex_format; + +/* Maps to C++ API. Deprecated; use spvc_msl_shader_interface_var. */ +typedef struct spvc_msl_vertex_attribute +{ + unsigned location; + + /* Obsolete, do not use. Only lingers on for ABI compatibility. */ + unsigned msl_buffer; + /* Obsolete, do not use. Only lingers on for ABI compatibility. */ + unsigned msl_offset; + /* Obsolete, do not use. Only lingers on for ABI compatibility. */ + unsigned msl_stride; + /* Obsolete, do not use. Only lingers on for ABI compatibility. */ + spvc_bool per_instance; + + spvc_msl_vertex_format format; + SpvBuiltIn builtin; +} spvc_msl_vertex_attribute; + +/* + * Initializes the vertex attribute struct. + */ +SPVC_PUBLIC_API void spvc_msl_vertex_attribute_init(spvc_msl_vertex_attribute *attr); + +/* Maps to C++ API. Deprecated; use spvc_msl_shader_interface_var_2. */ +typedef struct spvc_msl_shader_interface_var +{ + unsigned location; + spvc_msl_vertex_format format; + SpvBuiltIn builtin; + unsigned vecsize; +} spvc_msl_shader_interface_var, spvc_msl_shader_input; + +/* + * Initializes the shader input struct. + * Deprecated. Use spvc_msl_shader_interface_var_init_2(). + */ +SPVC_PUBLIC_API void spvc_msl_shader_interface_var_init(spvc_msl_shader_interface_var *var); +/* + * Deprecated. Use spvc_msl_shader_interface_var_init_2(). + */ +SPVC_PUBLIC_API void spvc_msl_shader_input_init(spvc_msl_shader_input *input); + +/* Maps to C++ API. */ +typedef enum spvc_msl_shader_variable_rate +{ + SPVC_MSL_SHADER_VARIABLE_RATE_PER_VERTEX = 0, + SPVC_MSL_SHADER_VARIABLE_RATE_PER_PRIMITIVE = 1, + SPVC_MSL_SHADER_VARIABLE_RATE_PER_PATCH = 2, + + SPVC_MSL_SHADER_VARIABLE_RATE_INT_MAX = 0x7fffffff, +} spvc_msl_shader_variable_rate; + +/* Maps to C++ API. */ +typedef struct spvc_msl_shader_interface_var_2 +{ + unsigned location; + spvc_msl_shader_variable_format format; + SpvBuiltIn builtin; + unsigned vecsize; + spvc_msl_shader_variable_rate rate; +} spvc_msl_shader_interface_var_2; + +/* + * Initializes the shader interface variable struct. + */ +SPVC_PUBLIC_API void spvc_msl_shader_interface_var_init_2(spvc_msl_shader_interface_var_2 *var); + +/* Maps to C++ API. */ +typedef struct spvc_msl_resource_binding +{ + SpvExecutionModel stage; + unsigned desc_set; + unsigned binding; + unsigned msl_buffer; + unsigned msl_texture; + unsigned msl_sampler; +} spvc_msl_resource_binding; + +/* + * Initializes the resource binding struct. + * The defaults are non-zero. + */ +SPVC_PUBLIC_API void spvc_msl_resource_binding_init(spvc_msl_resource_binding *binding); + +#define SPVC_MSL_PUSH_CONSTANT_DESC_SET (~(0u)) +#define SPVC_MSL_PUSH_CONSTANT_BINDING (0) +#define SPVC_MSL_SWIZZLE_BUFFER_BINDING (~(1u)) +#define SPVC_MSL_BUFFER_SIZE_BUFFER_BINDING (~(2u)) +#define SPVC_MSL_ARGUMENT_BUFFER_BINDING (~(3u)) + +/* Obsolete. Sticks around for backwards compatibility. */ +#define SPVC_MSL_AUX_BUFFER_STRUCT_VERSION 1 + +/* Runtime check for incompatibility. Obsolete. */ +SPVC_PUBLIC_API unsigned spvc_msl_get_aux_buffer_struct_version(void); + +/* Maps to C++ API. */ +typedef enum spvc_msl_sampler_coord +{ + SPVC_MSL_SAMPLER_COORD_NORMALIZED = 0, + SPVC_MSL_SAMPLER_COORD_PIXEL = 1, + SPVC_MSL_SAMPLER_INT_MAX = 0x7fffffff +} spvc_msl_sampler_coord; + +/* Maps to C++ API. */ +typedef enum spvc_msl_sampler_filter +{ + SPVC_MSL_SAMPLER_FILTER_NEAREST = 0, + SPVC_MSL_SAMPLER_FILTER_LINEAR = 1, + SPVC_MSL_SAMPLER_FILTER_INT_MAX = 0x7fffffff +} spvc_msl_sampler_filter; + +/* Maps to C++ API. */ +typedef enum spvc_msl_sampler_mip_filter +{ + SPVC_MSL_SAMPLER_MIP_FILTER_NONE = 0, + SPVC_MSL_SAMPLER_MIP_FILTER_NEAREST = 1, + SPVC_MSL_SAMPLER_MIP_FILTER_LINEAR = 2, + SPVC_MSL_SAMPLER_MIP_FILTER_INT_MAX = 0x7fffffff +} spvc_msl_sampler_mip_filter; + +/* Maps to C++ API. */ +typedef enum spvc_msl_sampler_address +{ + SPVC_MSL_SAMPLER_ADDRESS_CLAMP_TO_ZERO = 0, + SPVC_MSL_SAMPLER_ADDRESS_CLAMP_TO_EDGE = 1, + SPVC_MSL_SAMPLER_ADDRESS_CLAMP_TO_BORDER = 2, + SPVC_MSL_SAMPLER_ADDRESS_REPEAT = 3, + SPVC_MSL_SAMPLER_ADDRESS_MIRRORED_REPEAT = 4, + SPVC_MSL_SAMPLER_ADDRESS_INT_MAX = 0x7fffffff +} spvc_msl_sampler_address; + +/* Maps to C++ API. */ +typedef enum spvc_msl_sampler_compare_func +{ + SPVC_MSL_SAMPLER_COMPARE_FUNC_NEVER = 0, + SPVC_MSL_SAMPLER_COMPARE_FUNC_LESS = 1, + SPVC_MSL_SAMPLER_COMPARE_FUNC_LESS_EQUAL = 2, + SPVC_MSL_SAMPLER_COMPARE_FUNC_GREATER = 3, + SPVC_MSL_SAMPLER_COMPARE_FUNC_GREATER_EQUAL = 4, + SPVC_MSL_SAMPLER_COMPARE_FUNC_EQUAL = 5, + SPVC_MSL_SAMPLER_COMPARE_FUNC_NOT_EQUAL = 6, + SPVC_MSL_SAMPLER_COMPARE_FUNC_ALWAYS = 7, + SPVC_MSL_SAMPLER_COMPARE_FUNC_INT_MAX = 0x7fffffff +} spvc_msl_sampler_compare_func; + +/* Maps to C++ API. */ +typedef enum spvc_msl_sampler_border_color +{ + SPVC_MSL_SAMPLER_BORDER_COLOR_TRANSPARENT_BLACK = 0, + SPVC_MSL_SAMPLER_BORDER_COLOR_OPAQUE_BLACK = 1, + SPVC_MSL_SAMPLER_BORDER_COLOR_OPAQUE_WHITE = 2, + SPVC_MSL_SAMPLER_BORDER_COLOR_INT_MAX = 0x7fffffff +} spvc_msl_sampler_border_color; + +/* Maps to C++ API. */ +typedef enum spvc_msl_format_resolution +{ + SPVC_MSL_FORMAT_RESOLUTION_444 = 0, + SPVC_MSL_FORMAT_RESOLUTION_422, + SPVC_MSL_FORMAT_RESOLUTION_420, + SPVC_MSL_FORMAT_RESOLUTION_INT_MAX = 0x7fffffff +} spvc_msl_format_resolution; + +/* Maps to C++ API. */ +typedef enum spvc_msl_chroma_location +{ + SPVC_MSL_CHROMA_LOCATION_COSITED_EVEN = 0, + SPVC_MSL_CHROMA_LOCATION_MIDPOINT, + SPVC_MSL_CHROMA_LOCATION_INT_MAX = 0x7fffffff +} spvc_msl_chroma_location; + +/* Maps to C++ API. */ +typedef enum spvc_msl_component_swizzle +{ + SPVC_MSL_COMPONENT_SWIZZLE_IDENTITY = 0, + SPVC_MSL_COMPONENT_SWIZZLE_ZERO, + SPVC_MSL_COMPONENT_SWIZZLE_ONE, + SPVC_MSL_COMPONENT_SWIZZLE_R, + SPVC_MSL_COMPONENT_SWIZZLE_G, + SPVC_MSL_COMPONENT_SWIZZLE_B, + SPVC_MSL_COMPONENT_SWIZZLE_A, + SPVC_MSL_COMPONENT_SWIZZLE_INT_MAX = 0x7fffffff +} spvc_msl_component_swizzle; + +/* Maps to C++ API. */ +typedef enum spvc_msl_sampler_ycbcr_model_conversion +{ + SPVC_MSL_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY = 0, + SPVC_MSL_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_IDENTITY, + SPVC_MSL_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_BT_709, + SPVC_MSL_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_BT_601, + SPVC_MSL_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_BT_2020, + SPVC_MSL_SAMPLER_YCBCR_MODEL_CONVERSION_INT_MAX = 0x7fffffff +} spvc_msl_sampler_ycbcr_model_conversion; + +/* Maps to C+ API. */ +typedef enum spvc_msl_sampler_ycbcr_range +{ + SPVC_MSL_SAMPLER_YCBCR_RANGE_ITU_FULL = 0, + SPVC_MSL_SAMPLER_YCBCR_RANGE_ITU_NARROW, + SPVC_MSL_SAMPLER_YCBCR_RANGE_INT_MAX = 0x7fffffff +} spvc_msl_sampler_ycbcr_range; + +/* Maps to C++ API. */ +typedef struct spvc_msl_constexpr_sampler +{ + spvc_msl_sampler_coord coord; + spvc_msl_sampler_filter min_filter; + spvc_msl_sampler_filter mag_filter; + spvc_msl_sampler_mip_filter mip_filter; + spvc_msl_sampler_address s_address; + spvc_msl_sampler_address t_address; + spvc_msl_sampler_address r_address; + spvc_msl_sampler_compare_func compare_func; + spvc_msl_sampler_border_color border_color; + float lod_clamp_min; + float lod_clamp_max; + int max_anisotropy; + + spvc_bool compare_enable; + spvc_bool lod_clamp_enable; + spvc_bool anisotropy_enable; +} spvc_msl_constexpr_sampler; + +/* + * Initializes the constexpr sampler struct. + * The defaults are non-zero. + */ +SPVC_PUBLIC_API void spvc_msl_constexpr_sampler_init(spvc_msl_constexpr_sampler *sampler); + +/* Maps to the sampler Y'CbCr conversion-related portions of MSLConstexprSampler. See C++ API for defaults and details. */ +typedef struct spvc_msl_sampler_ycbcr_conversion +{ + unsigned planes; + spvc_msl_format_resolution resolution; + spvc_msl_sampler_filter chroma_filter; + spvc_msl_chroma_location x_chroma_offset; + spvc_msl_chroma_location y_chroma_offset; + spvc_msl_component_swizzle swizzle[4]; + spvc_msl_sampler_ycbcr_model_conversion ycbcr_model; + spvc_msl_sampler_ycbcr_range ycbcr_range; + unsigned bpc; +} spvc_msl_sampler_ycbcr_conversion; + +/* + * Initializes the constexpr sampler struct. + * The defaults are non-zero. + */ +SPVC_PUBLIC_API void spvc_msl_sampler_ycbcr_conversion_init(spvc_msl_sampler_ycbcr_conversion *conv); + +/* Maps to C++ API. */ +typedef enum spvc_hlsl_binding_flag_bits +{ + SPVC_HLSL_BINDING_AUTO_NONE_BIT = 0, + SPVC_HLSL_BINDING_AUTO_PUSH_CONSTANT_BIT = 1 << 0, + SPVC_HLSL_BINDING_AUTO_CBV_BIT = 1 << 1, + SPVC_HLSL_BINDING_AUTO_SRV_BIT = 1 << 2, + SPVC_HLSL_BINDING_AUTO_UAV_BIT = 1 << 3, + SPVC_HLSL_BINDING_AUTO_SAMPLER_BIT = 1 << 4, + SPVC_HLSL_BINDING_AUTO_ALL = 0x7fffffff +} spvc_hlsl_binding_flag_bits; +typedef unsigned spvc_hlsl_binding_flags; + +#define SPVC_HLSL_PUSH_CONSTANT_DESC_SET (~(0u)) +#define SPVC_HLSL_PUSH_CONSTANT_BINDING (0) + +/* Maps to C++ API. */ +typedef struct spvc_hlsl_resource_binding_mapping +{ + unsigned register_space; + unsigned register_binding; +} spvc_hlsl_resource_binding_mapping; + +typedef struct spvc_hlsl_resource_binding +{ + SpvExecutionModel stage; + unsigned desc_set; + unsigned binding; + + spvc_hlsl_resource_binding_mapping cbv, uav, srv, sampler; +} spvc_hlsl_resource_binding; + +/* + * Initializes the resource binding struct. + * The defaults are non-zero. + */ +SPVC_PUBLIC_API void spvc_hlsl_resource_binding_init(spvc_hlsl_resource_binding *binding); + +/* Maps to the various spirv_cross::Compiler*::Option structures. See C++ API for defaults and details. */ +typedef enum spvc_compiler_option +{ + SPVC_COMPILER_OPTION_UNKNOWN = 0, + + SPVC_COMPILER_OPTION_FORCE_TEMPORARY = 1 | SPVC_COMPILER_OPTION_COMMON_BIT, + SPVC_COMPILER_OPTION_FLATTEN_MULTIDIMENSIONAL_ARRAYS = 2 | SPVC_COMPILER_OPTION_COMMON_BIT, + SPVC_COMPILER_OPTION_FIXUP_DEPTH_CONVENTION = 3 | SPVC_COMPILER_OPTION_COMMON_BIT, + SPVC_COMPILER_OPTION_FLIP_VERTEX_Y = 4 | SPVC_COMPILER_OPTION_COMMON_BIT, + + SPVC_COMPILER_OPTION_GLSL_SUPPORT_NONZERO_BASE_INSTANCE = 5 | SPVC_COMPILER_OPTION_GLSL_BIT, + SPVC_COMPILER_OPTION_GLSL_SEPARATE_SHADER_OBJECTS = 6 | SPVC_COMPILER_OPTION_GLSL_BIT, + SPVC_COMPILER_OPTION_GLSL_ENABLE_420PACK_EXTENSION = 7 | SPVC_COMPILER_OPTION_GLSL_BIT, + SPVC_COMPILER_OPTION_GLSL_VERSION = 8 | SPVC_COMPILER_OPTION_GLSL_BIT, + SPVC_COMPILER_OPTION_GLSL_ES = 9 | SPVC_COMPILER_OPTION_GLSL_BIT, + SPVC_COMPILER_OPTION_GLSL_VULKAN_SEMANTICS = 10 | SPVC_COMPILER_OPTION_GLSL_BIT, + SPVC_COMPILER_OPTION_GLSL_ES_DEFAULT_FLOAT_PRECISION_HIGHP = 11 | SPVC_COMPILER_OPTION_GLSL_BIT, + SPVC_COMPILER_OPTION_GLSL_ES_DEFAULT_INT_PRECISION_HIGHP = 12 | SPVC_COMPILER_OPTION_GLSL_BIT, + + SPVC_COMPILER_OPTION_HLSL_SHADER_MODEL = 13 | SPVC_COMPILER_OPTION_HLSL_BIT, + SPVC_COMPILER_OPTION_HLSL_POINT_SIZE_COMPAT = 14 | SPVC_COMPILER_OPTION_HLSL_BIT, + SPVC_COMPILER_OPTION_HLSL_POINT_COORD_COMPAT = 15 | SPVC_COMPILER_OPTION_HLSL_BIT, + SPVC_COMPILER_OPTION_HLSL_SUPPORT_NONZERO_BASE_VERTEX_BASE_INSTANCE = 16 | SPVC_COMPILER_OPTION_HLSL_BIT, + + SPVC_COMPILER_OPTION_MSL_VERSION = 17 | SPVC_COMPILER_OPTION_MSL_BIT, + SPVC_COMPILER_OPTION_MSL_TEXEL_BUFFER_TEXTURE_WIDTH = 18 | SPVC_COMPILER_OPTION_MSL_BIT, + + /* Obsolete, use SWIZZLE_BUFFER_INDEX instead. */ + SPVC_COMPILER_OPTION_MSL_AUX_BUFFER_INDEX = 19 | SPVC_COMPILER_OPTION_MSL_BIT, + SPVC_COMPILER_OPTION_MSL_SWIZZLE_BUFFER_INDEX = 19 | SPVC_COMPILER_OPTION_MSL_BIT, + + SPVC_COMPILER_OPTION_MSL_INDIRECT_PARAMS_BUFFER_INDEX = 20 | SPVC_COMPILER_OPTION_MSL_BIT, + SPVC_COMPILER_OPTION_MSL_SHADER_OUTPUT_BUFFER_INDEX = 21 | SPVC_COMPILER_OPTION_MSL_BIT, + SPVC_COMPILER_OPTION_MSL_SHADER_PATCH_OUTPUT_BUFFER_INDEX = 22 | SPVC_COMPILER_OPTION_MSL_BIT, + SPVC_COMPILER_OPTION_MSL_SHADER_TESS_FACTOR_OUTPUT_BUFFER_INDEX = 23 | SPVC_COMPILER_OPTION_MSL_BIT, + SPVC_COMPILER_OPTION_MSL_SHADER_INPUT_WORKGROUP_INDEX = 24 | SPVC_COMPILER_OPTION_MSL_BIT, + SPVC_COMPILER_OPTION_MSL_ENABLE_POINT_SIZE_BUILTIN = 25 | SPVC_COMPILER_OPTION_MSL_BIT, + SPVC_COMPILER_OPTION_MSL_DISABLE_RASTERIZATION = 26 | SPVC_COMPILER_OPTION_MSL_BIT, + SPVC_COMPILER_OPTION_MSL_CAPTURE_OUTPUT_TO_BUFFER = 27 | SPVC_COMPILER_OPTION_MSL_BIT, + SPVC_COMPILER_OPTION_MSL_SWIZZLE_TEXTURE_SAMPLES = 28 | SPVC_COMPILER_OPTION_MSL_BIT, + SPVC_COMPILER_OPTION_MSL_PAD_FRAGMENT_OUTPUT_COMPONENTS = 29 | SPVC_COMPILER_OPTION_MSL_BIT, + SPVC_COMPILER_OPTION_MSL_TESS_DOMAIN_ORIGIN_LOWER_LEFT = 30 | SPVC_COMPILER_OPTION_MSL_BIT, + SPVC_COMPILER_OPTION_MSL_PLATFORM = 31 | SPVC_COMPILER_OPTION_MSL_BIT, + SPVC_COMPILER_OPTION_MSL_ARGUMENT_BUFFERS = 32 | SPVC_COMPILER_OPTION_MSL_BIT, + + SPVC_COMPILER_OPTION_GLSL_EMIT_PUSH_CONSTANT_AS_UNIFORM_BUFFER = 33 | SPVC_COMPILER_OPTION_GLSL_BIT, + + SPVC_COMPILER_OPTION_MSL_TEXTURE_BUFFER_NATIVE = 34 | SPVC_COMPILER_OPTION_MSL_BIT, + + SPVC_COMPILER_OPTION_GLSL_EMIT_UNIFORM_BUFFER_AS_PLAIN_UNIFORMS = 35 | SPVC_COMPILER_OPTION_GLSL_BIT, + + SPVC_COMPILER_OPTION_MSL_BUFFER_SIZE_BUFFER_INDEX = 36 | SPVC_COMPILER_OPTION_MSL_BIT, + + SPVC_COMPILER_OPTION_EMIT_LINE_DIRECTIVES = 37 | SPVC_COMPILER_OPTION_COMMON_BIT, + + SPVC_COMPILER_OPTION_MSL_MULTIVIEW = 38 | SPVC_COMPILER_OPTION_MSL_BIT, + SPVC_COMPILER_OPTION_MSL_VIEW_MASK_BUFFER_INDEX = 39 | SPVC_COMPILER_OPTION_MSL_BIT, + SPVC_COMPILER_OPTION_MSL_DEVICE_INDEX = 40 | SPVC_COMPILER_OPTION_MSL_BIT, + SPVC_COMPILER_OPTION_MSL_VIEW_INDEX_FROM_DEVICE_INDEX = 41 | SPVC_COMPILER_OPTION_MSL_BIT, + SPVC_COMPILER_OPTION_MSL_DISPATCH_BASE = 42 | SPVC_COMPILER_OPTION_MSL_BIT, + SPVC_COMPILER_OPTION_MSL_DYNAMIC_OFFSETS_BUFFER_INDEX = 43 | SPVC_COMPILER_OPTION_MSL_BIT, + SPVC_COMPILER_OPTION_MSL_TEXTURE_1D_AS_2D = 44 | SPVC_COMPILER_OPTION_MSL_BIT, + SPVC_COMPILER_OPTION_MSL_ENABLE_BASE_INDEX_ZERO = 45 | SPVC_COMPILER_OPTION_MSL_BIT, + + /* Obsolete. Use MSL_FRAMEBUFFER_FETCH_SUBPASS instead. */ + SPVC_COMPILER_OPTION_MSL_IOS_FRAMEBUFFER_FETCH_SUBPASS = 46 | SPVC_COMPILER_OPTION_MSL_BIT, + SPVC_COMPILER_OPTION_MSL_FRAMEBUFFER_FETCH_SUBPASS = 46 | SPVC_COMPILER_OPTION_MSL_BIT, + + SPVC_COMPILER_OPTION_MSL_INVARIANT_FP_MATH = 47 | SPVC_COMPILER_OPTION_MSL_BIT, + SPVC_COMPILER_OPTION_MSL_EMULATE_CUBEMAP_ARRAY = 48 | SPVC_COMPILER_OPTION_MSL_BIT, + SPVC_COMPILER_OPTION_MSL_ENABLE_DECORATION_BINDING = 49 | SPVC_COMPILER_OPTION_MSL_BIT, + SPVC_COMPILER_OPTION_MSL_FORCE_ACTIVE_ARGUMENT_BUFFER_RESOURCES = 50 | SPVC_COMPILER_OPTION_MSL_BIT, + SPVC_COMPILER_OPTION_MSL_FORCE_NATIVE_ARRAYS = 51 | SPVC_COMPILER_OPTION_MSL_BIT, + + SPVC_COMPILER_OPTION_ENABLE_STORAGE_IMAGE_QUALIFIER_DEDUCTION = 52 | SPVC_COMPILER_OPTION_COMMON_BIT, + + SPVC_COMPILER_OPTION_HLSL_FORCE_STORAGE_BUFFER_AS_UAV = 53 | SPVC_COMPILER_OPTION_HLSL_BIT, + + SPVC_COMPILER_OPTION_FORCE_ZERO_INITIALIZED_VARIABLES = 54 | SPVC_COMPILER_OPTION_COMMON_BIT, + + SPVC_COMPILER_OPTION_HLSL_NONWRITABLE_UAV_TEXTURE_AS_SRV = 55 | SPVC_COMPILER_OPTION_HLSL_BIT, + + SPVC_COMPILER_OPTION_MSL_ENABLE_FRAG_OUTPUT_MASK = 56 | SPVC_COMPILER_OPTION_MSL_BIT, + SPVC_COMPILER_OPTION_MSL_ENABLE_FRAG_DEPTH_BUILTIN = 57 | SPVC_COMPILER_OPTION_MSL_BIT, + SPVC_COMPILER_OPTION_MSL_ENABLE_FRAG_STENCIL_REF_BUILTIN = 58 | SPVC_COMPILER_OPTION_MSL_BIT, + SPVC_COMPILER_OPTION_MSL_ENABLE_CLIP_DISTANCE_USER_VARYING = 59 | SPVC_COMPILER_OPTION_MSL_BIT, + + SPVC_COMPILER_OPTION_HLSL_ENABLE_16BIT_TYPES = 60 | SPVC_COMPILER_OPTION_HLSL_BIT, + + SPVC_COMPILER_OPTION_MSL_MULTI_PATCH_WORKGROUP = 61 | SPVC_COMPILER_OPTION_MSL_BIT, + SPVC_COMPILER_OPTION_MSL_SHADER_INPUT_BUFFER_INDEX = 62 | SPVC_COMPILER_OPTION_MSL_BIT, + SPVC_COMPILER_OPTION_MSL_SHADER_INDEX_BUFFER_INDEX = 63 | SPVC_COMPILER_OPTION_MSL_BIT, + SPVC_COMPILER_OPTION_MSL_VERTEX_FOR_TESSELLATION = 64 | SPVC_COMPILER_OPTION_MSL_BIT, + SPVC_COMPILER_OPTION_MSL_VERTEX_INDEX_TYPE = 65 | SPVC_COMPILER_OPTION_MSL_BIT, + + SPVC_COMPILER_OPTION_GLSL_FORCE_FLATTENED_IO_BLOCKS = 66 | SPVC_COMPILER_OPTION_GLSL_BIT, + + SPVC_COMPILER_OPTION_MSL_MULTIVIEW_LAYERED_RENDERING = 67 | SPVC_COMPILER_OPTION_MSL_BIT, + SPVC_COMPILER_OPTION_MSL_ARRAYED_SUBPASS_INPUT = 68 | SPVC_COMPILER_OPTION_MSL_BIT, + SPVC_COMPILER_OPTION_MSL_R32UI_LINEAR_TEXTURE_ALIGNMENT = 69 | SPVC_COMPILER_OPTION_MSL_BIT, + SPVC_COMPILER_OPTION_MSL_R32UI_ALIGNMENT_CONSTANT_ID = 70 | SPVC_COMPILER_OPTION_MSL_BIT, + + SPVC_COMPILER_OPTION_HLSL_FLATTEN_MATRIX_VERTEX_INPUT_SEMANTICS = 71 | SPVC_COMPILER_OPTION_HLSL_BIT, + + SPVC_COMPILER_OPTION_MSL_IOS_USE_SIMDGROUP_FUNCTIONS = 72 | SPVC_COMPILER_OPTION_MSL_BIT, + SPVC_COMPILER_OPTION_MSL_EMULATE_SUBGROUPS = 73 | SPVC_COMPILER_OPTION_MSL_BIT, + SPVC_COMPILER_OPTION_MSL_FIXED_SUBGROUP_SIZE = 74 | SPVC_COMPILER_OPTION_MSL_BIT, + SPVC_COMPILER_OPTION_MSL_FORCE_SAMPLE_RATE_SHADING = 75 | SPVC_COMPILER_OPTION_MSL_BIT, + SPVC_COMPILER_OPTION_MSL_IOS_SUPPORT_BASE_VERTEX_INSTANCE = 76 | SPVC_COMPILER_OPTION_MSL_BIT, + + SPVC_COMPILER_OPTION_GLSL_OVR_MULTIVIEW_VIEW_COUNT = 77 | SPVC_COMPILER_OPTION_GLSL_BIT, + + SPVC_COMPILER_OPTION_RELAX_NAN_CHECKS = 78 | SPVC_COMPILER_OPTION_COMMON_BIT, + + SPVC_COMPILER_OPTION_MSL_RAW_BUFFER_TESE_INPUT = 79 | SPVC_COMPILER_OPTION_MSL_BIT, + SPVC_COMPILER_OPTION_MSL_SHADER_PATCH_INPUT_BUFFER_INDEX = 80 | SPVC_COMPILER_OPTION_MSL_BIT, + SPVC_COMPILER_OPTION_MSL_MANUAL_HELPER_INVOCATION_UPDATES = 81 | SPVC_COMPILER_OPTION_MSL_BIT, + SPVC_COMPILER_OPTION_MSL_CHECK_DISCARDED_FRAG_STORES = 82 | SPVC_COMPILER_OPTION_MSL_BIT, + + SPVC_COMPILER_OPTION_INT_MAX = 0x7fffffff +} spvc_compiler_option; + +/* + * Context is the highest-level API construct. + * The context owns all memory allocations made by its child object hierarchy, including various non-opaque structs and strings. + * This means that the API user only has to care about one "destroy" call ever when using the C API. + * All pointers handed out by the APIs are only valid as long as the context + * is alive and spvc_context_release_allocations has not been called. + */ +SPVC_PUBLIC_API spvc_result spvc_context_create(spvc_context *context); + +/* Frees all memory allocations and objects associated with the context and its child objects. */ +SPVC_PUBLIC_API void spvc_context_destroy(spvc_context context); + +/* Frees all memory allocations and objects associated with the context and its child objects, but keeps the context alive. */ +SPVC_PUBLIC_API void spvc_context_release_allocations(spvc_context context); + +/* Get the string for the last error which was logged. */ +SPVC_PUBLIC_API const char *spvc_context_get_last_error_string(spvc_context context); + +/* Get notified in a callback when an error triggers. Useful for debugging. */ +typedef void (*spvc_error_callback)(void *userdata, const char *error); +SPVC_PUBLIC_API void spvc_context_set_error_callback(spvc_context context, spvc_error_callback cb, void *userdata); + +/* SPIR-V parsing interface. Maps to Parser which then creates a ParsedIR, and that IR is extracted into the handle. */ +SPVC_PUBLIC_API spvc_result spvc_context_parse_spirv(spvc_context context, const SpvId *spirv, size_t word_count, + spvc_parsed_ir *parsed_ir); + +/* + * Create a compiler backend. Capture mode controls if we construct by copy or move semantics. + * It is always recommended to use SPVC_CAPTURE_MODE_TAKE_OWNERSHIP if you only intend to cross-compile the IR once. + */ +SPVC_PUBLIC_API spvc_result spvc_context_create_compiler(spvc_context context, spvc_backend backend, + spvc_parsed_ir parsed_ir, spvc_capture_mode mode, + spvc_compiler *compiler); + +/* Maps directly to C++ API. */ +SPVC_PUBLIC_API unsigned spvc_compiler_get_current_id_bound(spvc_compiler compiler); + +/* Create compiler options, which will initialize defaults. */ +SPVC_PUBLIC_API spvc_result spvc_compiler_create_compiler_options(spvc_compiler compiler, + spvc_compiler_options *options); +/* Override options. Will return error if e.g. MSL options are used for the HLSL backend, etc. */ +SPVC_PUBLIC_API spvc_result spvc_compiler_options_set_bool(spvc_compiler_options options, + spvc_compiler_option option, spvc_bool value); +SPVC_PUBLIC_API spvc_result spvc_compiler_options_set_uint(spvc_compiler_options options, + spvc_compiler_option option, unsigned value); +/* Set compiler options. */ +SPVC_PUBLIC_API spvc_result spvc_compiler_install_compiler_options(spvc_compiler compiler, + spvc_compiler_options options); + +/* Compile IR into a string. *source is owned by the context, and caller must not free it themselves. */ +SPVC_PUBLIC_API spvc_result spvc_compiler_compile(spvc_compiler compiler, const char **source); + +/* Maps to C++ API. */ +SPVC_PUBLIC_API spvc_result spvc_compiler_add_header_line(spvc_compiler compiler, const char *line); +SPVC_PUBLIC_API spvc_result spvc_compiler_require_extension(spvc_compiler compiler, const char *ext); +SPVC_PUBLIC_API spvc_result spvc_compiler_flatten_buffer_block(spvc_compiler compiler, spvc_variable_id id); + +SPVC_PUBLIC_API spvc_bool spvc_compiler_variable_is_depth_or_compare(spvc_compiler compiler, spvc_variable_id id); + +SPVC_PUBLIC_API spvc_result spvc_compiler_mask_stage_output_by_location(spvc_compiler compiler, + unsigned location, unsigned component); +SPVC_PUBLIC_API spvc_result spvc_compiler_mask_stage_output_by_builtin(spvc_compiler compiler, SpvBuiltIn builtin); + +/* + * HLSL specifics. + * Maps to C++ API. + */ +SPVC_PUBLIC_API spvc_result spvc_compiler_hlsl_set_root_constants_layout(spvc_compiler compiler, + const spvc_hlsl_root_constants *constant_info, + size_t count); +SPVC_PUBLIC_API spvc_result spvc_compiler_hlsl_add_vertex_attribute_remap(spvc_compiler compiler, + const spvc_hlsl_vertex_attribute_remap *remap, + size_t remaps); +SPVC_PUBLIC_API spvc_variable_id spvc_compiler_hlsl_remap_num_workgroups_builtin(spvc_compiler compiler); + +SPVC_PUBLIC_API spvc_result spvc_compiler_hlsl_set_resource_binding_flags(spvc_compiler compiler, + spvc_hlsl_binding_flags flags); + +SPVC_PUBLIC_API spvc_result spvc_compiler_hlsl_add_resource_binding(spvc_compiler compiler, + const spvc_hlsl_resource_binding *binding); +SPVC_PUBLIC_API spvc_bool spvc_compiler_hlsl_is_resource_used(spvc_compiler compiler, + SpvExecutionModel model, + unsigned set, + unsigned binding); + +/* + * MSL specifics. + * Maps to C++ API. + */ +SPVC_PUBLIC_API spvc_bool spvc_compiler_msl_is_rasterization_disabled(spvc_compiler compiler); + +/* Obsolete. Renamed to needs_swizzle_buffer. */ +SPVC_PUBLIC_API spvc_bool spvc_compiler_msl_needs_aux_buffer(spvc_compiler compiler); +SPVC_PUBLIC_API spvc_bool spvc_compiler_msl_needs_swizzle_buffer(spvc_compiler compiler); +SPVC_PUBLIC_API spvc_bool spvc_compiler_msl_needs_buffer_size_buffer(spvc_compiler compiler); + +SPVC_PUBLIC_API spvc_bool spvc_compiler_msl_needs_output_buffer(spvc_compiler compiler); +SPVC_PUBLIC_API spvc_bool spvc_compiler_msl_needs_patch_output_buffer(spvc_compiler compiler); +SPVC_PUBLIC_API spvc_bool spvc_compiler_msl_needs_input_threadgroup_mem(spvc_compiler compiler); +SPVC_PUBLIC_API spvc_result spvc_compiler_msl_add_vertex_attribute(spvc_compiler compiler, + const spvc_msl_vertex_attribute *attrs); +SPVC_PUBLIC_API spvc_result spvc_compiler_msl_add_resource_binding(spvc_compiler compiler, + const spvc_msl_resource_binding *binding); +/* Deprecated; use spvc_compiler_msl_add_shader_input_2(). */ +SPVC_PUBLIC_API spvc_result spvc_compiler_msl_add_shader_input(spvc_compiler compiler, + const spvc_msl_shader_interface_var *input); +SPVC_PUBLIC_API spvc_result spvc_compiler_msl_add_shader_input_2(spvc_compiler compiler, + const spvc_msl_shader_interface_var_2 *input); +/* Deprecated; use spvc_compiler_msl_add_shader_output_2(). */ +SPVC_PUBLIC_API spvc_result spvc_compiler_msl_add_shader_output(spvc_compiler compiler, + const spvc_msl_shader_interface_var *output); +SPVC_PUBLIC_API spvc_result spvc_compiler_msl_add_shader_output_2(spvc_compiler compiler, + const spvc_msl_shader_interface_var_2 *output); +SPVC_PUBLIC_API spvc_result spvc_compiler_msl_add_discrete_descriptor_set(spvc_compiler compiler, unsigned desc_set); +SPVC_PUBLIC_API spvc_result spvc_compiler_msl_set_argument_buffer_device_address_space(spvc_compiler compiler, unsigned desc_set, spvc_bool device_address); + +/* Obsolete, use is_shader_input_used. */ +SPVC_PUBLIC_API spvc_bool spvc_compiler_msl_is_vertex_attribute_used(spvc_compiler compiler, unsigned location); +SPVC_PUBLIC_API spvc_bool spvc_compiler_msl_is_shader_input_used(spvc_compiler compiler, unsigned location); +SPVC_PUBLIC_API spvc_bool spvc_compiler_msl_is_shader_output_used(spvc_compiler compiler, unsigned location); + +SPVC_PUBLIC_API spvc_bool spvc_compiler_msl_is_resource_used(spvc_compiler compiler, + SpvExecutionModel model, + unsigned set, + unsigned binding); +SPVC_PUBLIC_API spvc_result spvc_compiler_msl_remap_constexpr_sampler(spvc_compiler compiler, spvc_variable_id id, const spvc_msl_constexpr_sampler *sampler); +SPVC_PUBLIC_API spvc_result spvc_compiler_msl_remap_constexpr_sampler_by_binding(spvc_compiler compiler, unsigned desc_set, unsigned binding, const spvc_msl_constexpr_sampler *sampler); +SPVC_PUBLIC_API spvc_result spvc_compiler_msl_remap_constexpr_sampler_ycbcr(spvc_compiler compiler, spvc_variable_id id, const spvc_msl_constexpr_sampler *sampler, const spvc_msl_sampler_ycbcr_conversion *conv); +SPVC_PUBLIC_API spvc_result spvc_compiler_msl_remap_constexpr_sampler_by_binding_ycbcr(spvc_compiler compiler, unsigned desc_set, unsigned binding, const spvc_msl_constexpr_sampler *sampler, const spvc_msl_sampler_ycbcr_conversion *conv); +SPVC_PUBLIC_API spvc_result spvc_compiler_msl_set_fragment_output_components(spvc_compiler compiler, unsigned location, unsigned components); + +SPVC_PUBLIC_API unsigned spvc_compiler_msl_get_automatic_resource_binding(spvc_compiler compiler, spvc_variable_id id); +SPVC_PUBLIC_API unsigned spvc_compiler_msl_get_automatic_resource_binding_secondary(spvc_compiler compiler, spvc_variable_id id); + +SPVC_PUBLIC_API spvc_result spvc_compiler_msl_add_dynamic_buffer(spvc_compiler compiler, unsigned desc_set, unsigned binding, unsigned index); + +SPVC_PUBLIC_API spvc_result spvc_compiler_msl_add_inline_uniform_block(spvc_compiler compiler, unsigned desc_set, unsigned binding); + +SPVC_PUBLIC_API spvc_result spvc_compiler_msl_set_combined_sampler_suffix(spvc_compiler compiler, const char *suffix); +SPVC_PUBLIC_API const char *spvc_compiler_msl_get_combined_sampler_suffix(spvc_compiler compiler); + +/* + * Reflect resources. + * Maps almost 1:1 to C++ API. + */ +SPVC_PUBLIC_API spvc_result spvc_compiler_get_active_interface_variables(spvc_compiler compiler, spvc_set *set); +SPVC_PUBLIC_API spvc_result spvc_compiler_set_enabled_interface_variables(spvc_compiler compiler, spvc_set set); +SPVC_PUBLIC_API spvc_result spvc_compiler_create_shader_resources(spvc_compiler compiler, spvc_resources *resources); +SPVC_PUBLIC_API spvc_result spvc_compiler_create_shader_resources_for_active_variables(spvc_compiler compiler, + spvc_resources *resources, + spvc_set active); +SPVC_PUBLIC_API spvc_result spvc_resources_get_resource_list_for_type(spvc_resources resources, spvc_resource_type type, + const spvc_reflected_resource **resource_list, + size_t *resource_size); + +SPVC_PUBLIC_API spvc_result spvc_resources_get_builtin_resource_list_for_type( + spvc_resources resources, spvc_builtin_resource_type type, + const spvc_reflected_builtin_resource **resource_list, + size_t *resource_size); + +/* + * Decorations. + * Maps to C++ API. + */ +SPVC_PUBLIC_API void spvc_compiler_set_decoration(spvc_compiler compiler, SpvId id, SpvDecoration decoration, + unsigned argument); +SPVC_PUBLIC_API void spvc_compiler_set_decoration_string(spvc_compiler compiler, SpvId id, SpvDecoration decoration, + const char *argument); +SPVC_PUBLIC_API void spvc_compiler_set_name(spvc_compiler compiler, SpvId id, const char *argument); +SPVC_PUBLIC_API void spvc_compiler_set_member_decoration(spvc_compiler compiler, spvc_type_id id, unsigned member_index, + SpvDecoration decoration, unsigned argument); +SPVC_PUBLIC_API void spvc_compiler_set_member_decoration_string(spvc_compiler compiler, spvc_type_id id, + unsigned member_index, SpvDecoration decoration, + const char *argument); +SPVC_PUBLIC_API void spvc_compiler_set_member_name(spvc_compiler compiler, spvc_type_id id, unsigned member_index, + const char *argument); +SPVC_PUBLIC_API void spvc_compiler_unset_decoration(spvc_compiler compiler, SpvId id, SpvDecoration decoration); +SPVC_PUBLIC_API void spvc_compiler_unset_member_decoration(spvc_compiler compiler, spvc_type_id id, + unsigned member_index, SpvDecoration decoration); + +SPVC_PUBLIC_API spvc_bool spvc_compiler_has_decoration(spvc_compiler compiler, SpvId id, SpvDecoration decoration); +SPVC_PUBLIC_API spvc_bool spvc_compiler_has_member_decoration(spvc_compiler compiler, spvc_type_id id, + unsigned member_index, SpvDecoration decoration); +SPVC_PUBLIC_API const char *spvc_compiler_get_name(spvc_compiler compiler, SpvId id); +SPVC_PUBLIC_API unsigned spvc_compiler_get_decoration(spvc_compiler compiler, SpvId id, SpvDecoration decoration); +SPVC_PUBLIC_API const char *spvc_compiler_get_decoration_string(spvc_compiler compiler, SpvId id, + SpvDecoration decoration); +SPVC_PUBLIC_API unsigned spvc_compiler_get_member_decoration(spvc_compiler compiler, spvc_type_id id, + unsigned member_index, SpvDecoration decoration); +SPVC_PUBLIC_API const char *spvc_compiler_get_member_decoration_string(spvc_compiler compiler, spvc_type_id id, + unsigned member_index, SpvDecoration decoration); +SPVC_PUBLIC_API const char *spvc_compiler_get_member_name(spvc_compiler compiler, spvc_type_id id, unsigned member_index); + +/* + * Entry points. + * Maps to C++ API. + */ +SPVC_PUBLIC_API spvc_result spvc_compiler_get_entry_points(spvc_compiler compiler, + const spvc_entry_point **entry_points, + size_t *num_entry_points); +SPVC_PUBLIC_API spvc_result spvc_compiler_set_entry_point(spvc_compiler compiler, const char *name, + SpvExecutionModel model); +SPVC_PUBLIC_API spvc_result spvc_compiler_rename_entry_point(spvc_compiler compiler, const char *old_name, + const char *new_name, SpvExecutionModel model); +SPVC_PUBLIC_API const char *spvc_compiler_get_cleansed_entry_point_name(spvc_compiler compiler, const char *name, + SpvExecutionModel model); +SPVC_PUBLIC_API void spvc_compiler_set_execution_mode(spvc_compiler compiler, SpvExecutionMode mode); +SPVC_PUBLIC_API void spvc_compiler_unset_execution_mode(spvc_compiler compiler, SpvExecutionMode mode); +SPVC_PUBLIC_API void spvc_compiler_set_execution_mode_with_arguments(spvc_compiler compiler, SpvExecutionMode mode, + unsigned arg0, unsigned arg1, unsigned arg2); +SPVC_PUBLIC_API spvc_result spvc_compiler_get_execution_modes(spvc_compiler compiler, const SpvExecutionMode **modes, + size_t *num_modes); +SPVC_PUBLIC_API unsigned spvc_compiler_get_execution_mode_argument(spvc_compiler compiler, SpvExecutionMode mode); +SPVC_PUBLIC_API unsigned spvc_compiler_get_execution_mode_argument_by_index(spvc_compiler compiler, + SpvExecutionMode mode, unsigned index); +SPVC_PUBLIC_API SpvExecutionModel spvc_compiler_get_execution_model(spvc_compiler compiler); +SPVC_PUBLIC_API void spvc_compiler_update_active_builtins(spvc_compiler compiler); +SPVC_PUBLIC_API spvc_bool spvc_compiler_has_active_builtin(spvc_compiler compiler, SpvBuiltIn builtin, SpvStorageClass storage); + +/* + * Type query interface. + * Maps to C++ API, except it's read-only. + */ +SPVC_PUBLIC_API spvc_type spvc_compiler_get_type_handle(spvc_compiler compiler, spvc_type_id id); + +/* Pulls out SPIRType::self. This effectively gives the type ID without array or pointer qualifiers. + * This is necessary when reflecting decoration/name information on members of a struct, + * which are placed in the base type, not the qualified type. + * This is similar to spvc_reflected_resource::base_type_id. */ +SPVC_PUBLIC_API spvc_type_id spvc_type_get_base_type_id(spvc_type type); + +SPVC_PUBLIC_API spvc_basetype spvc_type_get_basetype(spvc_type type); +SPVC_PUBLIC_API unsigned spvc_type_get_bit_width(spvc_type type); +SPVC_PUBLIC_API unsigned spvc_type_get_vector_size(spvc_type type); +SPVC_PUBLIC_API unsigned spvc_type_get_columns(spvc_type type); +SPVC_PUBLIC_API unsigned spvc_type_get_num_array_dimensions(spvc_type type); +SPVC_PUBLIC_API spvc_bool spvc_type_array_dimension_is_literal(spvc_type type, unsigned dimension); +SPVC_PUBLIC_API SpvId spvc_type_get_array_dimension(spvc_type type, unsigned dimension); +SPVC_PUBLIC_API unsigned spvc_type_get_num_member_types(spvc_type type); +SPVC_PUBLIC_API spvc_type_id spvc_type_get_member_type(spvc_type type, unsigned index); +SPVC_PUBLIC_API SpvStorageClass spvc_type_get_storage_class(spvc_type type); + +/* Image type query. */ +SPVC_PUBLIC_API spvc_type_id spvc_type_get_image_sampled_type(spvc_type type); +SPVC_PUBLIC_API SpvDim spvc_type_get_image_dimension(spvc_type type); +SPVC_PUBLIC_API spvc_bool spvc_type_get_image_is_depth(spvc_type type); +SPVC_PUBLIC_API spvc_bool spvc_type_get_image_arrayed(spvc_type type); +SPVC_PUBLIC_API spvc_bool spvc_type_get_image_multisampled(spvc_type type); +SPVC_PUBLIC_API spvc_bool spvc_type_get_image_is_storage(spvc_type type); +SPVC_PUBLIC_API SpvImageFormat spvc_type_get_image_storage_format(spvc_type type); +SPVC_PUBLIC_API SpvAccessQualifier spvc_type_get_image_access_qualifier(spvc_type type); + +/* + * Buffer layout query. + * Maps to C++ API. + */ +SPVC_PUBLIC_API spvc_result spvc_compiler_get_declared_struct_size(spvc_compiler compiler, spvc_type struct_type, size_t *size); +SPVC_PUBLIC_API spvc_result spvc_compiler_get_declared_struct_size_runtime_array(spvc_compiler compiler, + spvc_type struct_type, size_t array_size, size_t *size); +SPVC_PUBLIC_API spvc_result spvc_compiler_get_declared_struct_member_size(spvc_compiler compiler, spvc_type type, unsigned index, size_t *size); + +SPVC_PUBLIC_API spvc_result spvc_compiler_type_struct_member_offset(spvc_compiler compiler, + spvc_type type, unsigned index, unsigned *offset); +SPVC_PUBLIC_API spvc_result spvc_compiler_type_struct_member_array_stride(spvc_compiler compiler, + spvc_type type, unsigned index, unsigned *stride); +SPVC_PUBLIC_API spvc_result spvc_compiler_type_struct_member_matrix_stride(spvc_compiler compiler, + spvc_type type, unsigned index, unsigned *stride); + +/* + * Workaround helper functions. + * Maps to C++ API. + */ +SPVC_PUBLIC_API spvc_result spvc_compiler_build_dummy_sampler_for_combined_images(spvc_compiler compiler, spvc_variable_id *id); +SPVC_PUBLIC_API spvc_result spvc_compiler_build_combined_image_samplers(spvc_compiler compiler); +SPVC_PUBLIC_API spvc_result spvc_compiler_get_combined_image_samplers(spvc_compiler compiler, + const spvc_combined_image_sampler **samplers, + size_t *num_samplers); + +/* + * Constants + * Maps to C++ API. + */ +SPVC_PUBLIC_API spvc_result spvc_compiler_get_specialization_constants(spvc_compiler compiler, + const spvc_specialization_constant **constants, + size_t *num_constants); +SPVC_PUBLIC_API spvc_constant spvc_compiler_get_constant_handle(spvc_compiler compiler, + spvc_constant_id id); + +SPVC_PUBLIC_API spvc_constant_id spvc_compiler_get_work_group_size_specialization_constants(spvc_compiler compiler, + spvc_specialization_constant *x, + spvc_specialization_constant *y, + spvc_specialization_constant *z); + +/* + * Buffer ranges + * Maps to C++ API. + */ +SPVC_PUBLIC_API spvc_result spvc_compiler_get_active_buffer_ranges(spvc_compiler compiler, + spvc_variable_id id, + const spvc_buffer_range **ranges, + size_t *num_ranges); + +/* + * No stdint.h until C99, sigh :( + * For smaller types, the result is sign or zero-extended as appropriate. + * Maps to C++ API. + * TODO: The SPIRConstant query interface and modification interface is not quite complete. + */ +SPVC_PUBLIC_API float spvc_constant_get_scalar_fp16(spvc_constant constant, unsigned column, unsigned row); +SPVC_PUBLIC_API float spvc_constant_get_scalar_fp32(spvc_constant constant, unsigned column, unsigned row); +SPVC_PUBLIC_API double spvc_constant_get_scalar_fp64(spvc_constant constant, unsigned column, unsigned row); +SPVC_PUBLIC_API unsigned spvc_constant_get_scalar_u32(spvc_constant constant, unsigned column, unsigned row); +SPVC_PUBLIC_API int spvc_constant_get_scalar_i32(spvc_constant constant, unsigned column, unsigned row); +SPVC_PUBLIC_API unsigned spvc_constant_get_scalar_u16(spvc_constant constant, unsigned column, unsigned row); +SPVC_PUBLIC_API int spvc_constant_get_scalar_i16(spvc_constant constant, unsigned column, unsigned row); +SPVC_PUBLIC_API unsigned spvc_constant_get_scalar_u8(spvc_constant constant, unsigned column, unsigned row); +SPVC_PUBLIC_API int spvc_constant_get_scalar_i8(spvc_constant constant, unsigned column, unsigned row); +SPVC_PUBLIC_API void spvc_constant_get_subconstants(spvc_constant constant, const spvc_constant_id **constituents, size_t *count); +SPVC_PUBLIC_API spvc_type_id spvc_constant_get_type(spvc_constant constant); + +/* + * Misc reflection + * Maps to C++ API. + */ +SPVC_PUBLIC_API spvc_bool spvc_compiler_get_binary_offset_for_decoration(spvc_compiler compiler, + spvc_variable_id id, + SpvDecoration decoration, + unsigned *word_offset); + +SPVC_PUBLIC_API spvc_bool spvc_compiler_buffer_is_hlsl_counter_buffer(spvc_compiler compiler, spvc_variable_id id); +SPVC_PUBLIC_API spvc_bool spvc_compiler_buffer_get_hlsl_counter_buffer(spvc_compiler compiler, spvc_variable_id id, + spvc_variable_id *counter_id); + +SPVC_PUBLIC_API spvc_result spvc_compiler_get_declared_capabilities(spvc_compiler compiler, + const SpvCapability **capabilities, + size_t *num_capabilities); +SPVC_PUBLIC_API spvc_result spvc_compiler_get_declared_extensions(spvc_compiler compiler, const char ***extensions, + size_t *num_extensions); + +SPVC_PUBLIC_API const char *spvc_compiler_get_remapped_declared_block_name(spvc_compiler compiler, spvc_variable_id id); +SPVC_PUBLIC_API spvc_result spvc_compiler_get_buffer_block_decorations(spvc_compiler compiler, spvc_variable_id id, + const SpvDecoration **decorations, + size_t *num_decorations); + +#ifdef __cplusplus +} +#endif +#endif diff --git a/local_packages/include/spirv_cross/spirv_cross_containers.hpp b/local_packages/include/spirv_cross/spirv_cross_containers.hpp new file mode 100644 index 0000000..1b32870 --- /dev/null +++ b/local_packages/include/spirv_cross/spirv_cross_containers.hpp @@ -0,0 +1,754 @@ +/* + * Copyright 2019-2021 Hans-Kristian Arntzen + * SPDX-License-Identifier: Apache-2.0 OR MIT + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * At your option, you may choose to accept this material under either: + * 1. The Apache License, Version 2.0, found at , or + * 2. The MIT License, found at . + */ + +#ifndef SPIRV_CROSS_CONTAINERS_HPP +#define SPIRV_CROSS_CONTAINERS_HPP + +#include "spirv_cross_error_handling.hpp" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef SPIRV_CROSS_NAMESPACE_OVERRIDE +#define SPIRV_CROSS_NAMESPACE SPIRV_CROSS_NAMESPACE_OVERRIDE +#else +#define SPIRV_CROSS_NAMESPACE spirv_cross +#endif + +namespace SPIRV_CROSS_NAMESPACE +{ +#ifndef SPIRV_CROSS_FORCE_STL_TYPES +// std::aligned_storage does not support size == 0, so roll our own. +template +class AlignedBuffer +{ +public: + T *data() + { +#if defined(_MSC_VER) && _MSC_VER < 1900 + // MSVC 2013 workarounds, sigh ... + // Only use this workaround on MSVC 2013 due to some confusion around default initialized unions. + // Spec seems to suggest the memory will be zero-initialized, which is *not* what we want. + return reinterpret_cast(u.aligned_char); +#else + return reinterpret_cast(aligned_char); +#endif + } + +private: +#if defined(_MSC_VER) && _MSC_VER < 1900 + // MSVC 2013 workarounds, sigh ... + union + { + char aligned_char[sizeof(T) * N]; + double dummy_aligner; + } u; +#else + alignas(T) char aligned_char[sizeof(T) * N]; +#endif +}; + +template +class AlignedBuffer +{ +public: + T *data() + { + return nullptr; + } +}; + +// An immutable version of SmallVector which erases type information about storage. +template +class VectorView +{ +public: + T &operator[](size_t i) SPIRV_CROSS_NOEXCEPT + { + return ptr[i]; + } + + const T &operator[](size_t i) const SPIRV_CROSS_NOEXCEPT + { + return ptr[i]; + } + + bool empty() const SPIRV_CROSS_NOEXCEPT + { + return buffer_size == 0; + } + + size_t size() const SPIRV_CROSS_NOEXCEPT + { + return buffer_size; + } + + T *data() SPIRV_CROSS_NOEXCEPT + { + return ptr; + } + + const T *data() const SPIRV_CROSS_NOEXCEPT + { + return ptr; + } + + T *begin() SPIRV_CROSS_NOEXCEPT + { + return ptr; + } + + T *end() SPIRV_CROSS_NOEXCEPT + { + return ptr + buffer_size; + } + + const T *begin() const SPIRV_CROSS_NOEXCEPT + { + return ptr; + } + + const T *end() const SPIRV_CROSS_NOEXCEPT + { + return ptr + buffer_size; + } + + T &front() SPIRV_CROSS_NOEXCEPT + { + return ptr[0]; + } + + const T &front() const SPIRV_CROSS_NOEXCEPT + { + return ptr[0]; + } + + T &back() SPIRV_CROSS_NOEXCEPT + { + return ptr[buffer_size - 1]; + } + + const T &back() const SPIRV_CROSS_NOEXCEPT + { + return ptr[buffer_size - 1]; + } + + // Makes it easier to consume SmallVector. +#if defined(_MSC_VER) && _MSC_VER < 1900 + explicit operator std::vector() const + { + // Another MSVC 2013 workaround. It does not understand lvalue/rvalue qualified operations. + return std::vector(ptr, ptr + buffer_size); + } +#else + // Makes it easier to consume SmallVector. + explicit operator std::vector() const & + { + return std::vector(ptr, ptr + buffer_size); + } + + // If we are converting as an r-value, we can pilfer our elements. + explicit operator std::vector() && + { + return std::vector(std::make_move_iterator(ptr), std::make_move_iterator(ptr + buffer_size)); + } +#endif + + // Avoid sliced copies. Base class should only be read as a reference. + VectorView(const VectorView &) = delete; + void operator=(const VectorView &) = delete; + +protected: + VectorView() = default; + T *ptr = nullptr; + size_t buffer_size = 0; +}; + +// Simple vector which supports up to N elements inline, without malloc/free. +// We use a lot of throwaway vectors all over the place which triggers allocations. +// This class only implements the subset of std::vector we need in SPIRV-Cross. +// It is *NOT* a drop-in replacement in general projects. +template +class SmallVector : public VectorView +{ +public: + SmallVector() SPIRV_CROSS_NOEXCEPT + { + this->ptr = stack_storage.data(); + buffer_capacity = N; + } + + template + SmallVector(const U *arg_list_begin, const U *arg_list_end) SPIRV_CROSS_NOEXCEPT : SmallVector() + { + auto count = size_t(arg_list_end - arg_list_begin); + reserve(count); + for (size_t i = 0; i < count; i++, arg_list_begin++) + new (&this->ptr[i]) T(*arg_list_begin); + this->buffer_size = count; + } + + template + SmallVector(std::initializer_list init) SPIRV_CROSS_NOEXCEPT : SmallVector(init.begin(), init.end()) + { + } + + template + SmallVector(const U (&init)[M]) SPIRV_CROSS_NOEXCEPT : SmallVector(init, init + M) + { + } + + SmallVector(SmallVector &&other) SPIRV_CROSS_NOEXCEPT : SmallVector() + { + *this = std::move(other); + } + + SmallVector &operator=(SmallVector &&other) SPIRV_CROSS_NOEXCEPT + { + clear(); + if (other.ptr != other.stack_storage.data()) + { + // Pilfer allocated pointer. + if (this->ptr != stack_storage.data()) + free(this->ptr); + this->ptr = other.ptr; + this->buffer_size = other.buffer_size; + buffer_capacity = other.buffer_capacity; + other.ptr = nullptr; + other.buffer_size = 0; + other.buffer_capacity = 0; + } + else + { + // Need to move the stack contents individually. + reserve(other.buffer_size); + for (size_t i = 0; i < other.buffer_size; i++) + { + new (&this->ptr[i]) T(std::move(other.ptr[i])); + other.ptr[i].~T(); + } + this->buffer_size = other.buffer_size; + other.buffer_size = 0; + } + return *this; + } + + SmallVector(const SmallVector &other) SPIRV_CROSS_NOEXCEPT : SmallVector() + { + *this = other; + } + + SmallVector &operator=(const SmallVector &other) SPIRV_CROSS_NOEXCEPT + { + if (this == &other) + return *this; + + clear(); + reserve(other.buffer_size); + for (size_t i = 0; i < other.buffer_size; i++) + new (&this->ptr[i]) T(other.ptr[i]); + this->buffer_size = other.buffer_size; + return *this; + } + + explicit SmallVector(size_t count) SPIRV_CROSS_NOEXCEPT : SmallVector() + { + resize(count); + } + + ~SmallVector() + { + clear(); + if (this->ptr != stack_storage.data()) + free(this->ptr); + } + + void clear() SPIRV_CROSS_NOEXCEPT + { + for (size_t i = 0; i < this->buffer_size; i++) + this->ptr[i].~T(); + this->buffer_size = 0; + } + + void push_back(const T &t) SPIRV_CROSS_NOEXCEPT + { + reserve(this->buffer_size + 1); + new (&this->ptr[this->buffer_size]) T(t); + this->buffer_size++; + } + + void push_back(T &&t) SPIRV_CROSS_NOEXCEPT + { + reserve(this->buffer_size + 1); + new (&this->ptr[this->buffer_size]) T(std::move(t)); + this->buffer_size++; + } + + void pop_back() SPIRV_CROSS_NOEXCEPT + { + // Work around false positive warning on GCC 8.3. + // Calling pop_back on empty vector is undefined. + if (!this->empty()) + resize(this->buffer_size - 1); + } + + template + void emplace_back(Ts &&... ts) SPIRV_CROSS_NOEXCEPT + { + reserve(this->buffer_size + 1); + new (&this->ptr[this->buffer_size]) T(std::forward(ts)...); + this->buffer_size++; + } + + void reserve(size_t count) SPIRV_CROSS_NOEXCEPT + { + if ((count > (std::numeric_limits::max)() / sizeof(T)) || + (count > (std::numeric_limits::max)() / 2)) + { + // Only way this should ever happen is with garbage input, terminate. + std::terminate(); + } + + if (count > buffer_capacity) + { + size_t target_capacity = buffer_capacity; + if (target_capacity == 0) + target_capacity = 1; + + // Weird parens works around macro issues on Windows if NOMINMAX is not used. + target_capacity = (std::max)(target_capacity, N); + + // Need to ensure there is a POT value of target capacity which is larger than count, + // otherwise this will overflow. + while (target_capacity < count) + target_capacity <<= 1u; + + T *new_buffer = + target_capacity > N ? static_cast(malloc(target_capacity * sizeof(T))) : stack_storage.data(); + + // If we actually fail this malloc, we are hosed anyways, there is no reason to attempt recovery. + if (!new_buffer) + std::terminate(); + + // In case for some reason two allocations both come from same stack. + if (new_buffer != this->ptr) + { + // We don't deal with types which can throw in move constructor. + for (size_t i = 0; i < this->buffer_size; i++) + { + new (&new_buffer[i]) T(std::move(this->ptr[i])); + this->ptr[i].~T(); + } + } + + if (this->ptr != stack_storage.data()) + free(this->ptr); + this->ptr = new_buffer; + buffer_capacity = target_capacity; + } + } + + void insert(T *itr, const T *insert_begin, const T *insert_end) SPIRV_CROSS_NOEXCEPT + { + auto count = size_t(insert_end - insert_begin); + if (itr == this->end()) + { + reserve(this->buffer_size + count); + for (size_t i = 0; i < count; i++, insert_begin++) + new (&this->ptr[this->buffer_size + i]) T(*insert_begin); + this->buffer_size += count; + } + else + { + if (this->buffer_size + count > buffer_capacity) + { + auto target_capacity = this->buffer_size + count; + if (target_capacity == 0) + target_capacity = 1; + if (target_capacity < N) + target_capacity = N; + + while (target_capacity < count) + target_capacity <<= 1u; + + // Need to allocate new buffer. Move everything to a new buffer. + T *new_buffer = + target_capacity > N ? static_cast(malloc(target_capacity * sizeof(T))) : stack_storage.data(); + + // If we actually fail this malloc, we are hosed anyways, there is no reason to attempt recovery. + if (!new_buffer) + std::terminate(); + + // First, move elements from source buffer to new buffer. + // We don't deal with types which can throw in move constructor. + auto *target_itr = new_buffer; + auto *original_source_itr = this->begin(); + + if (new_buffer != this->ptr) + { + while (original_source_itr != itr) + { + new (target_itr) T(std::move(*original_source_itr)); + original_source_itr->~T(); + ++original_source_itr; + ++target_itr; + } + } + + // Copy-construct new elements. + for (auto *source_itr = insert_begin; source_itr != insert_end; ++source_itr, ++target_itr) + new (target_itr) T(*source_itr); + + // Move over the other half. + if (new_buffer != this->ptr || insert_begin != insert_end) + { + while (original_source_itr != this->end()) + { + new (target_itr) T(std::move(*original_source_itr)); + original_source_itr->~T(); + ++original_source_itr; + ++target_itr; + } + } + + if (this->ptr != stack_storage.data()) + free(this->ptr); + this->ptr = new_buffer; + buffer_capacity = target_capacity; + } + else + { + // Move in place, need to be a bit careful about which elements are constructed and which are not. + // Move the end and construct the new elements. + auto *target_itr = this->end() + count; + auto *source_itr = this->end(); + while (target_itr != this->end() && source_itr != itr) + { + --target_itr; + --source_itr; + new (target_itr) T(std::move(*source_itr)); + } + + // For already constructed elements we can move-assign. + std::move_backward(itr, source_itr, target_itr); + + // For the inserts which go to already constructed elements, we can do a plain copy. + while (itr != this->end() && insert_begin != insert_end) + *itr++ = *insert_begin++; + + // For inserts into newly allocated memory, we must copy-construct instead. + while (insert_begin != insert_end) + { + new (itr) T(*insert_begin); + ++itr; + ++insert_begin; + } + } + + this->buffer_size += count; + } + } + + void insert(T *itr, const T &value) SPIRV_CROSS_NOEXCEPT + { + insert(itr, &value, &value + 1); + } + + T *erase(T *itr) SPIRV_CROSS_NOEXCEPT + { + std::move(itr + 1, this->end(), itr); + this->ptr[--this->buffer_size].~T(); + return itr; + } + + void erase(T *start_erase, T *end_erase) SPIRV_CROSS_NOEXCEPT + { + if (end_erase == this->end()) + { + resize(size_t(start_erase - this->begin())); + } + else + { + auto new_size = this->buffer_size - (end_erase - start_erase); + std::move(end_erase, this->end(), start_erase); + resize(new_size); + } + } + + void resize(size_t new_size) SPIRV_CROSS_NOEXCEPT + { + if (new_size < this->buffer_size) + { + for (size_t i = new_size; i < this->buffer_size; i++) + this->ptr[i].~T(); + } + else if (new_size > this->buffer_size) + { + reserve(new_size); + for (size_t i = this->buffer_size; i < new_size; i++) + new (&this->ptr[i]) T(); + } + + this->buffer_size = new_size; + } + +private: + size_t buffer_capacity = 0; + AlignedBuffer stack_storage; +}; + +// A vector without stack storage. +// Could also be a typedef-ed to std::vector, +// but might as well use the one we have. +template +using Vector = SmallVector; + +#else // SPIRV_CROSS_FORCE_STL_TYPES + +template +using SmallVector = std::vector; +template +using Vector = std::vector; +template +using VectorView = std::vector; + +#endif // SPIRV_CROSS_FORCE_STL_TYPES + +// An object pool which we use for allocating IVariant-derived objects. +// We know we are going to allocate a bunch of objects of each type, +// so amortize the mallocs. +class ObjectPoolBase +{ +public: + virtual ~ObjectPoolBase() = default; + virtual void deallocate_opaque(void *ptr) = 0; +}; + +template +class ObjectPool : public ObjectPoolBase +{ +public: + explicit ObjectPool(unsigned start_object_count_ = 16) + : start_object_count(start_object_count_) + { + } + + template + T *allocate(P &&... p) + { + if (vacants.empty()) + { + unsigned num_objects = start_object_count << memory.size(); + T *ptr = static_cast(malloc(num_objects * sizeof(T))); + if (!ptr) + return nullptr; + + for (unsigned i = 0; i < num_objects; i++) + vacants.push_back(&ptr[i]); + + memory.emplace_back(ptr); + } + + T *ptr = vacants.back(); + vacants.pop_back(); + new (ptr) T(std::forward

(p)...); + return ptr; + } + + void deallocate(T *ptr) + { + ptr->~T(); + vacants.push_back(ptr); + } + + void deallocate_opaque(void *ptr) override + { + deallocate(static_cast(ptr)); + } + + void clear() + { + vacants.clear(); + memory.clear(); + } + +protected: + Vector vacants; + + struct MallocDeleter + { + void operator()(T *ptr) + { + ::free(ptr); + } + }; + + SmallVector> memory; + unsigned start_object_count; +}; + +template +class StringStream +{ +public: + StringStream() + { + reset(); + } + + ~StringStream() + { + reset(); + } + + // Disable copies and moves. Makes it easier to implement, and we don't need it. + StringStream(const StringStream &) = delete; + void operator=(const StringStream &) = delete; + + template ::value, int>::type = 0> + StringStream &operator<<(const T &t) + { + auto s = std::to_string(t); + append(s.data(), s.size()); + return *this; + } + + // Only overload this to make float/double conversions ambiguous. + StringStream &operator<<(uint32_t v) + { + auto s = std::to_string(v); + append(s.data(), s.size()); + return *this; + } + + StringStream &operator<<(char c) + { + append(&c, 1); + return *this; + } + + StringStream &operator<<(const std::string &s) + { + append(s.data(), s.size()); + return *this; + } + + StringStream &operator<<(const char *s) + { + append(s, strlen(s)); + return *this; + } + + template + StringStream &operator<<(const char (&s)[N]) + { + append(s, strlen(s)); + return *this; + } + + std::string str() const + { + std::string ret; + size_t target_size = 0; + for (auto &saved : saved_buffers) + target_size += saved.offset; + target_size += current_buffer.offset; + ret.reserve(target_size); + + for (auto &saved : saved_buffers) + ret.insert(ret.end(), saved.buffer, saved.buffer + saved.offset); + ret.insert(ret.end(), current_buffer.buffer, current_buffer.buffer + current_buffer.offset); + return ret; + } + + void reset() + { + for (auto &saved : saved_buffers) + if (saved.buffer != stack_buffer) + free(saved.buffer); + if (current_buffer.buffer != stack_buffer) + free(current_buffer.buffer); + + saved_buffers.clear(); + current_buffer.buffer = stack_buffer; + current_buffer.offset = 0; + current_buffer.size = sizeof(stack_buffer); + } + +private: + struct Buffer + { + char *buffer = nullptr; + size_t offset = 0; + size_t size = 0; + }; + Buffer current_buffer; + char stack_buffer[StackSize]; + SmallVector saved_buffers; + + void append(const char *s, size_t len) + { + size_t avail = current_buffer.size - current_buffer.offset; + if (avail < len) + { + if (avail > 0) + { + memcpy(current_buffer.buffer + current_buffer.offset, s, avail); + s += avail; + len -= avail; + current_buffer.offset += avail; + } + + saved_buffers.push_back(current_buffer); + size_t target_size = len > BlockSize ? len : BlockSize; + current_buffer.buffer = static_cast(malloc(target_size)); + if (!current_buffer.buffer) + SPIRV_CROSS_THROW("Out of memory."); + + memcpy(current_buffer.buffer, s, len); + current_buffer.offset = len; + current_buffer.size = target_size; + } + else + { + memcpy(current_buffer.buffer + current_buffer.offset, s, len); + current_buffer.offset += len; + } + } +}; + +} // namespace SPIRV_CROSS_NAMESPACE + +#endif diff --git a/local_packages/include/spirv_cross/spirv_cross_error_handling.hpp b/local_packages/include/spirv_cross/spirv_cross_error_handling.hpp new file mode 100644 index 0000000..e96ebb9 --- /dev/null +++ b/local_packages/include/spirv_cross/spirv_cross_error_handling.hpp @@ -0,0 +1,94 @@ +/* + * Copyright 2015-2021 Arm Limited + * SPDX-License-Identifier: Apache-2.0 OR MIT + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * At your option, you may choose to accept this material under either: + * 1. The Apache License, Version 2.0, found at , or + * 2. The MIT License, found at . + */ + +#ifndef SPIRV_CROSS_ERROR_HANDLING +#define SPIRV_CROSS_ERROR_HANDLING + +#include +#include +#include +#ifndef SPIRV_CROSS_EXCEPTIONS_TO_ASSERTIONS +#include +#endif + +#ifdef SPIRV_CROSS_NAMESPACE_OVERRIDE +#define SPIRV_CROSS_NAMESPACE SPIRV_CROSS_NAMESPACE_OVERRIDE +#else +#define SPIRV_CROSS_NAMESPACE spirv_cross +#endif + +namespace SPIRV_CROSS_NAMESPACE +{ +#ifdef SPIRV_CROSS_EXCEPTIONS_TO_ASSERTIONS +#if !defined(_MSC_VER) || defined(__clang__) +[[noreturn]] +#elif defined(_MSC_VER) +__declspec(noreturn) +#endif +inline void +report_and_abort(const std::string &msg) +{ +#ifdef NDEBUG + (void)msg; +#else + fprintf(stderr, "There was a compiler error: %s\n", msg.c_str()); +#endif + fflush(stderr); + abort(); +} + +#define SPIRV_CROSS_THROW(x) report_and_abort(x) +#else +class CompilerError : public std::runtime_error +{ +public: + explicit CompilerError(const std::string &str) + : std::runtime_error(str) + { + } +}; + +#define SPIRV_CROSS_THROW(x) throw CompilerError(x) +#endif + +// MSVC 2013 does not have noexcept. We need this for Variant to get move constructor to work correctly +// instead of copy constructor. +// MSVC 2013 ignores that move constructors cannot throw in std::vector, so just don't define it. +#if defined(_MSC_VER) && _MSC_VER < 1900 +#define SPIRV_CROSS_NOEXCEPT +#else +#define SPIRV_CROSS_NOEXCEPT noexcept +#endif + +#if __cplusplus >= 201402l +#define SPIRV_CROSS_DEPRECATED(reason) [[deprecated(reason)]] +#elif defined(__GNUC__) +#define SPIRV_CROSS_DEPRECATED(reason) __attribute__((deprecated)) +#elif defined(_MSC_VER) +#define SPIRV_CROSS_DEPRECATED(reason) __declspec(deprecated(reason)) +#else +#define SPIRV_CROSS_DEPRECATED(reason) +#endif +} // namespace SPIRV_CROSS_NAMESPACE + +#endif diff --git a/local_packages/include/spirv_cross/spirv_cross_parsed_ir.hpp b/local_packages/include/spirv_cross/spirv_cross_parsed_ir.hpp new file mode 100644 index 0000000..7f35c38 --- /dev/null +++ b/local_packages/include/spirv_cross/spirv_cross_parsed_ir.hpp @@ -0,0 +1,256 @@ +/* + * Copyright 2018-2021 Arm Limited + * SPDX-License-Identifier: Apache-2.0 OR MIT + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * At your option, you may choose to accept this material under either: + * 1. The Apache License, Version 2.0, found at , or + * 2. The MIT License, found at . + */ + +#ifndef SPIRV_CROSS_PARSED_IR_HPP +#define SPIRV_CROSS_PARSED_IR_HPP + +#include "spirv_common.hpp" +#include +#include + +namespace SPIRV_CROSS_NAMESPACE +{ + +// This data structure holds all information needed to perform cross-compilation and reflection. +// It is the output of the Parser, but any implementation could create this structure. +// It is intentionally very "open" and struct-like with some helper functions to deal with decorations. +// Parser is the reference implementation of how this data structure should be filled in. + +class ParsedIR +{ +private: + // This must be destroyed after the "ids" vector. + std::unique_ptr pool_group; + +public: + ParsedIR(); + + // Due to custom allocations from object pools, we cannot use a default copy constructor. + ParsedIR(const ParsedIR &other); + ParsedIR &operator=(const ParsedIR &other); + + // Moves are unproblematic, but we need to implement it anyways, since MSVC 2013 does not understand + // how to default-implement these. + ParsedIR(ParsedIR &&other) SPIRV_CROSS_NOEXCEPT; + ParsedIR &operator=(ParsedIR &&other) SPIRV_CROSS_NOEXCEPT; + + // Resizes ids, meta and block_meta. + void set_id_bounds(uint32_t bounds); + + // The raw SPIR-V, instructions and opcodes refer to this by offset + count. + std::vector spirv; + + // Holds various data structures which inherit from IVariant. + SmallVector ids; + + // Various meta data for IDs, decorations, names, etc. + std::unordered_map meta; + + // Holds all IDs which have a certain type. + // This is needed so we can iterate through a specific kind of resource quickly, + // and in-order of module declaration. + SmallVector ids_for_type[TypeCount]; + + // Special purpose lists which contain a union of types. + // This is needed so we can declare specialization constants and structs in an interleaved fashion, + // among other things. + // Constants can be undef or of struct type, and struct array sizes can use specialization constants. + SmallVector ids_for_constant_undef_or_type; + SmallVector ids_for_constant_or_variable; + + // We need to keep track of the width the Ops that contains a type for the + // OpSwitch instruction, since this one doesn't contains the type in the + // instruction itself. And in some case we need to cast the condition to + // wider types. We only need the width to do the branch fixup since the + // type check itself can be done at runtime + std::unordered_map load_type_width; + + // Declared capabilities and extensions in the SPIR-V module. + // Not really used except for reflection at the moment. + SmallVector declared_capabilities; + SmallVector declared_extensions; + + // Meta data about blocks. The cross-compiler needs to query if a block is either of these types. + // It is a bitset as there can be more than one tag per block. + enum BlockMetaFlagBits + { + BLOCK_META_LOOP_HEADER_BIT = 1 << 0, + BLOCK_META_CONTINUE_BIT = 1 << 1, + BLOCK_META_LOOP_MERGE_BIT = 1 << 2, + BLOCK_META_SELECTION_MERGE_BIT = 1 << 3, + BLOCK_META_MULTISELECT_MERGE_BIT = 1 << 4 + }; + using BlockMetaFlags = uint8_t; + SmallVector block_meta; + std::unordered_map continue_block_to_loop_header; + + // Normally, we'd stick SPIREntryPoint in ids array, but it conflicts with SPIRFunction. + // Entry points can therefore be seen as some sort of meta structure. + std::unordered_map entry_points; + FunctionID default_entry_point = 0; + + struct Source + { + uint32_t version = 0; + bool es = false; + bool known = false; + bool hlsl = false; + + Source() = default; + }; + + Source source; + + spv::AddressingModel addressing_model = spv::AddressingModelMax; + spv::MemoryModel memory_model = spv::MemoryModelMax; + + // Decoration handling methods. + // Can be useful for simple "raw" reflection. + // However, most members are here because the Parser needs most of these, + // and might as well just have the whole suite of decoration/name handling in one place. + void set_name(ID id, const std::string &name); + const std::string &get_name(ID id) const; + void set_decoration(ID id, spv::Decoration decoration, uint32_t argument = 0); + void set_decoration_string(ID id, spv::Decoration decoration, const std::string &argument); + bool has_decoration(ID id, spv::Decoration decoration) const; + uint32_t get_decoration(ID id, spv::Decoration decoration) const; + const std::string &get_decoration_string(ID id, spv::Decoration decoration) const; + const Bitset &get_decoration_bitset(ID id) const; + void unset_decoration(ID id, spv::Decoration decoration); + + // Decoration handling methods (for members of a struct). + void set_member_name(TypeID id, uint32_t index, const std::string &name); + const std::string &get_member_name(TypeID id, uint32_t index) const; + void set_member_decoration(TypeID id, uint32_t index, spv::Decoration decoration, uint32_t argument = 0); + void set_member_decoration_string(TypeID id, uint32_t index, spv::Decoration decoration, + const std::string &argument); + uint32_t get_member_decoration(TypeID id, uint32_t index, spv::Decoration decoration) const; + const std::string &get_member_decoration_string(TypeID id, uint32_t index, spv::Decoration decoration) const; + bool has_member_decoration(TypeID id, uint32_t index, spv::Decoration decoration) const; + const Bitset &get_member_decoration_bitset(TypeID id, uint32_t index) const; + void unset_member_decoration(TypeID id, uint32_t index, spv::Decoration decoration); + + void mark_used_as_array_length(ID id); + uint32_t increase_bound_by(uint32_t count); + Bitset get_buffer_block_flags(const SPIRVariable &var) const; + Bitset get_buffer_block_type_flags(const SPIRType &type) const; + + void add_typed_id(Types type, ID id); + void remove_typed_id(Types type, ID id); + + class LoopLock + { + public: + explicit LoopLock(uint32_t *counter); + LoopLock(const LoopLock &) = delete; + void operator=(const LoopLock &) = delete; + LoopLock(LoopLock &&other) SPIRV_CROSS_NOEXCEPT; + LoopLock &operator=(LoopLock &&other) SPIRV_CROSS_NOEXCEPT; + ~LoopLock(); + + private: + uint32_t *lock; + }; + + // This must be held while iterating over a type ID array. + // It is undefined if someone calls set<>() while we're iterating over a data structure, so we must + // make sure that this case is avoided. + + // If we have a hard lock, it is an error to call set<>(), and an exception is thrown. + // If we have a soft lock, we silently ignore any additions to the typed arrays. + // This should only be used for physical ID remapping where we need to create an ID, but we will never + // care about iterating over them. + LoopLock create_loop_hard_lock() const; + LoopLock create_loop_soft_lock() const; + + template + void for_each_typed_id(const Op &op) + { + auto loop_lock = create_loop_hard_lock(); + for (auto &id : ids_for_type[T::type]) + { + if (ids[id].get_type() == static_cast(T::type)) + op(id, get(id)); + } + } + + template + void for_each_typed_id(const Op &op) const + { + auto loop_lock = create_loop_hard_lock(); + for (auto &id : ids_for_type[T::type]) + { + if (ids[id].get_type() == static_cast(T::type)) + op(id, get(id)); + } + } + + template + void reset_all_of_type() + { + reset_all_of_type(static_cast(T::type)); + } + + void reset_all_of_type(Types type); + + Meta *find_meta(ID id); + const Meta *find_meta(ID id) const; + + const std::string &get_empty_string() const + { + return empty_string; + } + + void make_constant_null(uint32_t id, uint32_t type, bool add_to_typed_id_set); + + void fixup_reserved_names(); + + static void sanitize_underscores(std::string &str); + static void sanitize_identifier(std::string &str, bool member, bool allow_reserved_prefixes); + static bool is_globally_reserved_identifier(std::string &str, bool allow_reserved_prefixes); + + uint32_t get_spirv_version() const; + +private: + template + T &get(uint32_t id) + { + return variant_get(ids[id]); + } + + template + const T &get(uint32_t id) const + { + return variant_get(ids[id]); + } + + mutable uint32_t loop_iteration_depth_hard = 0; + mutable uint32_t loop_iteration_depth_soft = 0; + std::string empty_string; + Bitset cleared_bitset; + + std::unordered_set meta_needing_name_fixup; +}; +} // namespace SPIRV_CROSS_NAMESPACE + +#endif diff --git a/local_packages/include/spirv_cross/spirv_cross_util.hpp b/local_packages/include/spirv_cross/spirv_cross_util.hpp new file mode 100644 index 0000000..e6e3fcd --- /dev/null +++ b/local_packages/include/spirv_cross/spirv_cross_util.hpp @@ -0,0 +1,37 @@ +/* + * Copyright 2015-2021 Arm Limited + * SPDX-License-Identifier: Apache-2.0 OR MIT + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * At your option, you may choose to accept this material under either: + * 1. The Apache License, Version 2.0, found at , or + * 2. The MIT License, found at . + */ + +#ifndef SPIRV_CROSS_UTIL_HPP +#define SPIRV_CROSS_UTIL_HPP + +#include "spirv_cross.hpp" + +namespace spirv_cross_util +{ +void rename_interface_variable(SPIRV_CROSS_NAMESPACE::Compiler &compiler, + const SPIRV_CROSS_NAMESPACE::SmallVector &resources, + uint32_t location, const std::string &name); +void inherit_combined_sampler_bindings(SPIRV_CROSS_NAMESPACE::Compiler &compiler); +} // namespace spirv_cross_util + +#endif diff --git a/local_packages/include/spirv_cross/spirv_glsl.hpp b/local_packages/include/spirv_cross/spirv_glsl.hpp new file mode 100644 index 0000000..c3f2bfc --- /dev/null +++ b/local_packages/include/spirv_cross/spirv_glsl.hpp @@ -0,0 +1,1001 @@ +/* + * Copyright 2015-2021 Arm Limited + * SPDX-License-Identifier: Apache-2.0 OR MIT + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * At your option, you may choose to accept this material under either: + * 1. The Apache License, Version 2.0, found at , or + * 2. The MIT License, found at . + */ + +#ifndef SPIRV_CROSS_GLSL_HPP +#define SPIRV_CROSS_GLSL_HPP + +#include "GLSL.std.450.h" +#include "spirv_cross.hpp" +#include +#include +#include + +namespace SPIRV_CROSS_NAMESPACE +{ +enum PlsFormat +{ + PlsNone = 0, + + PlsR11FG11FB10F, + PlsR32F, + PlsRG16F, + PlsRGB10A2, + PlsRGBA8, + PlsRG16, + + PlsRGBA8I, + PlsRG16I, + + PlsRGB10A2UI, + PlsRGBA8UI, + PlsRG16UI, + PlsR32UI +}; + +struct PlsRemap +{ + uint32_t id; + PlsFormat format; +}; + +enum AccessChainFlagBits +{ + ACCESS_CHAIN_INDEX_IS_LITERAL_BIT = 1 << 0, + ACCESS_CHAIN_CHAIN_ONLY_BIT = 1 << 1, + ACCESS_CHAIN_PTR_CHAIN_BIT = 1 << 2, + ACCESS_CHAIN_SKIP_REGISTER_EXPRESSION_READ_BIT = 1 << 3, + ACCESS_CHAIN_LITERAL_MSB_FORCE_ID = 1 << 4, + ACCESS_CHAIN_FLATTEN_ALL_MEMBERS_BIT = 1 << 5, + ACCESS_CHAIN_FORCE_COMPOSITE_BIT = 1 << 6 +}; +typedef uint32_t AccessChainFlags; + +class CompilerGLSL : public Compiler +{ +public: + struct Options + { + // The shading language version. Corresponds to #version $VALUE. + uint32_t version = 450; + + // Emit the OpenGL ES shading language instead of desktop OpenGL. + bool es = false; + + // Debug option to always emit temporary variables for all expressions. + bool force_temporary = false; + // Debug option, can be increased in an attempt to workaround SPIRV-Cross bugs temporarily. + // If this limit has to be increased, it points to an implementation bug. + // In certain scenarios, the maximum number of debug iterations may increase beyond this limit + // as long as we can prove we're making certain kinds of forward progress. + uint32_t force_recompile_max_debug_iterations = 3; + + // If true, Vulkan GLSL features are used instead of GL-compatible features. + // Mostly useful for debugging SPIR-V files. + bool vulkan_semantics = false; + + // If true, gl_PerVertex is explicitly redeclared in vertex, geometry and tessellation shaders. + // The members of gl_PerVertex is determined by which built-ins are declared by the shader. + // This option is ignored in ES versions, as redeclaration in ES is not required, and it depends on a different extension + // (EXT_shader_io_blocks) which makes things a bit more fuzzy. + bool separate_shader_objects = false; + + // Flattens multidimensional arrays, e.g. float foo[a][b][c] into single-dimensional arrays, + // e.g. float foo[a * b * c]. + // This function does not change the actual SPIRType of any object. + // Only the generated code, including declarations of interface variables are changed to be single array dimension. + bool flatten_multidimensional_arrays = false; + + // For older desktop GLSL targets than version 420, the + // GL_ARB_shading_language_420pack extensions is used to be able to support + // layout(binding) on UBOs and samplers. + // If disabled on older targets, binding decorations will be stripped. + bool enable_420pack_extension = true; + + // In non-Vulkan GLSL, emit push constant blocks as UBOs rather than plain uniforms. + bool emit_push_constant_as_uniform_buffer = false; + + // Always emit uniform blocks as plain uniforms, regardless of the GLSL version, even when UBOs are supported. + // Does not apply to shader storage or push constant blocks. + bool emit_uniform_buffer_as_plain_uniforms = false; + + // Emit OpLine directives if present in the module. + // May not correspond exactly to original source, but should be a good approximation. + bool emit_line_directives = false; + + // In cases where readonly/writeonly decoration are not used at all, + // we try to deduce which qualifier(s) we should actually used, since actually emitting + // read-write decoration is very rare, and older glslang/HLSL compilers tend to just emit readwrite as a matter of fact. + // The default (true) is to enable automatic deduction for these cases, but if you trust the decorations set + // by the SPIR-V, it's recommended to set this to false. + bool enable_storage_image_qualifier_deduction = true; + + // On some targets (WebGPU), uninitialized variables are banned. + // If this is enabled, all variables (temporaries, Private, Function) + // which would otherwise be uninitialized will now be initialized to 0 instead. + bool force_zero_initialized_variables = false; + + // In GLSL, force use of I/O block flattening, similar to + // what happens on legacy GLSL targets for blocks and structs. + bool force_flattened_io_blocks = false; + + // For opcodes where we have to perform explicit additional nan checks, very ugly code is generated. + // If we opt-in, ignore these requirements. + // In opcodes like NClamp/NMin/NMax and FP compare, ignore NaN behavior. + // Use FClamp/FMin/FMax semantics for clamps and lets implementation choose ordered or unordered + // compares. + bool relax_nan_checks = false; + + // Loading row-major matrices from UBOs on older AMD Windows OpenGL drivers is problematic. + // To load these types correctly, we must generate a wrapper. them in a dummy function which only purpose is to + // ensure row_major decoration is actually respected. + // This workaround may cause significant performance degeneration on some Android devices. + bool enable_row_major_load_workaround = true; + + // If non-zero, controls layout(num_views = N) in; in GL_OVR_multiview2. + uint32_t ovr_multiview_view_count = 0; + + enum Precision + { + DontCare, + Lowp, + Mediump, + Highp + }; + + struct VertexOptions + { + // "Vertex-like shader" here is any shader stage that can write BuiltInPosition. + + // GLSL: In vertex-like shaders, rewrite [0, w] depth (Vulkan/D3D style) to [-w, w] depth (GL style). + // MSL: In vertex-like shaders, rewrite [-w, w] depth (GL style) to [0, w] depth. + // HLSL: In vertex-like shaders, rewrite [-w, w] depth (GL style) to [0, w] depth. + bool fixup_clipspace = false; + + // In vertex-like shaders, inverts gl_Position.y or equivalent. + bool flip_vert_y = false; + + // GLSL only, for HLSL version of this option, see CompilerHLSL. + // If true, the backend will assume that InstanceIndex will need to apply + // a base instance offset. Set to false if you know you will never use base instance + // functionality as it might remove some internal uniforms. + bool support_nonzero_base_instance = true; + } vertex; + + struct FragmentOptions + { + // Add precision mediump float in ES targets when emitting GLES source. + // Add precision highp int in ES targets when emitting GLES source. + Precision default_float_precision = Mediump; + Precision default_int_precision = Highp; + } fragment; + }; + + void remap_pixel_local_storage(std::vector inputs, std::vector outputs) + { + pls_inputs = std::move(inputs); + pls_outputs = std::move(outputs); + remap_pls_variables(); + } + + // Redirect a subpassInput reading from input_attachment_index to instead load its value from + // the color attachment at location = color_location. Requires ESSL. + // If coherent, uses GL_EXT_shader_framebuffer_fetch, if not, uses noncoherent variant. + void remap_ext_framebuffer_fetch(uint32_t input_attachment_index, uint32_t color_location, bool coherent); + + explicit CompilerGLSL(std::vector spirv_) + : Compiler(std::move(spirv_)) + { + init(); + } + + CompilerGLSL(const uint32_t *ir_, size_t word_count) + : Compiler(ir_, word_count) + { + init(); + } + + explicit CompilerGLSL(const ParsedIR &ir_) + : Compiler(ir_) + { + init(); + } + + explicit CompilerGLSL(ParsedIR &&ir_) + : Compiler(std::move(ir_)) + { + init(); + } + + const Options &get_common_options() const + { + return options; + } + + void set_common_options(const Options &opts) + { + options = opts; + } + + std::string compile() override; + + // Returns the current string held in the conversion buffer. Useful for + // capturing what has been converted so far when compile() throws an error. + std::string get_partial_source(); + + // Adds a line to be added right after #version in GLSL backend. + // This is useful for enabling custom extensions which are outside the scope of SPIRV-Cross. + // This can be combined with variable remapping. + // A new-line will be added. + // + // While add_header_line() is a more generic way of adding arbitrary text to the header + // of a GLSL file, require_extension() should be used when adding extensions since it will + // avoid creating collisions with SPIRV-Cross generated extensions. + // + // Code added via add_header_line() is typically backend-specific. + void add_header_line(const std::string &str); + + // Adds an extension which is required to run this shader, e.g. + // require_extension("GL_KHR_my_extension"); + void require_extension(const std::string &ext); + + // Legacy GLSL compatibility method. + // Takes a uniform or push constant variable and flattens it into a (i|u)vec4 array[N]; array instead. + // For this to work, all types in the block must be the same basic type, e.g. mixing vec2 and vec4 is fine, but + // mixing int and float is not. + // The name of the uniform array will be the same as the interface block name. + void flatten_buffer_block(VariableID id); + + // After compilation, query if a variable ID was used as a depth resource. + // This is meaningful for MSL since descriptor types depend on this knowledge. + // Cases which return true: + // - Images which are declared with depth = 1 image type. + // - Samplers which are statically used at least once with Dref opcodes. + // - Images which are statically used at least once with Dref opcodes. + bool variable_is_depth_or_compare(VariableID id) const; + + // If a shader output is active in this stage, but inactive in a subsequent stage, + // this can be signalled here. This can be used to work around certain cross-stage matching problems + // which plagues MSL and HLSL in certain scenarios. + // An output which matches one of these will not be emitted in stage output interfaces, but rather treated as a private + // variable. + // This option is only meaningful for MSL and HLSL, since GLSL matches by location directly. + // Masking builtins only takes effect if the builtin in question is part of the stage output interface. + void mask_stage_output_by_location(uint32_t location, uint32_t component); + void mask_stage_output_by_builtin(spv::BuiltIn builtin); + +protected: + struct ShaderSubgroupSupportHelper + { + // lower enum value = greater priority + enum Candidate + { + KHR_shader_subgroup_ballot, + KHR_shader_subgroup_basic, + KHR_shader_subgroup_vote, + NV_gpu_shader_5, + NV_shader_thread_group, + NV_shader_thread_shuffle, + ARB_shader_ballot, + ARB_shader_group_vote, + AMD_gcn_shader, + + CandidateCount + }; + + static const char *get_extension_name(Candidate c); + static SmallVector get_extra_required_extension_names(Candidate c); + static const char *get_extra_required_extension_predicate(Candidate c); + + enum Feature + { + SubgroupMask = 0, + SubgroupSize = 1, + SubgroupInvocationID = 2, + SubgroupID = 3, + NumSubgroups = 4, + SubgroupBroadcast_First = 5, + SubgroupBallotFindLSB_MSB = 6, + SubgroupAll_Any_AllEqualBool = 7, + SubgroupAllEqualT = 8, + SubgroupElect = 9, + SubgroupBarrier = 10, + SubgroupMemBarrier = 11, + SubgroupBallot = 12, + SubgroupInverseBallot_InclBitCount_ExclBitCout = 13, + SubgroupBallotBitExtract = 14, + SubgroupBallotBitCount = 15, + + FeatureCount + }; + + using FeatureMask = uint32_t; + static_assert(sizeof(FeatureMask) * 8u >= FeatureCount, "Mask type needs more bits."); + + using CandidateVector = SmallVector; + using FeatureVector = SmallVector; + + static FeatureVector get_feature_dependencies(Feature feature); + static FeatureMask get_feature_dependency_mask(Feature feature); + static bool can_feature_be_implemented_without_extensions(Feature feature); + static Candidate get_KHR_extension_for_feature(Feature feature); + + struct Result + { + Result(); + uint32_t weights[CandidateCount]; + }; + + void request_feature(Feature feature); + bool is_feature_requested(Feature feature) const; + Result resolve() const; + + static CandidateVector get_candidates_for_feature(Feature ft, const Result &r); + + private: + static CandidateVector get_candidates_for_feature(Feature ft); + static FeatureMask build_mask(const SmallVector &features); + FeatureMask feature_mask = 0; + }; + + // TODO remove this function when all subgroup ops are supported (or make it always return true) + static bool is_supported_subgroup_op_in_opengl(spv::Op op); + + void reset(uint32_t iteration_count); + void emit_function(SPIRFunction &func, const Bitset &return_flags); + + bool has_extension(const std::string &ext) const; + void require_extension_internal(const std::string &ext); + + // Virtualize methods which need to be overridden by subclass targets like C++ and such. + virtual void emit_function_prototype(SPIRFunction &func, const Bitset &return_flags); + + SPIRBlock *current_emitting_block = nullptr; + SmallVector current_emitting_switch_stack; + bool current_emitting_switch_fallthrough = false; + + virtual void emit_instruction(const Instruction &instr); + struct TemporaryCopy + { + uint32_t dst_id; + uint32_t src_id; + }; + TemporaryCopy handle_instruction_precision(const Instruction &instr); + void emit_block_instructions(SPIRBlock &block); + + // For relax_nan_checks. + GLSLstd450 get_remapped_glsl_op(GLSLstd450 std450_op) const; + spv::Op get_remapped_spirv_op(spv::Op op) const; + + virtual void emit_glsl_op(uint32_t result_type, uint32_t result_id, uint32_t op, const uint32_t *args, + uint32_t count); + virtual void emit_spv_amd_shader_ballot_op(uint32_t result_type, uint32_t result_id, uint32_t op, + const uint32_t *args, uint32_t count); + virtual void emit_spv_amd_shader_explicit_vertex_parameter_op(uint32_t result_type, uint32_t result_id, uint32_t op, + const uint32_t *args, uint32_t count); + virtual void emit_spv_amd_shader_trinary_minmax_op(uint32_t result_type, uint32_t result_id, uint32_t op, + const uint32_t *args, uint32_t count); + virtual void emit_spv_amd_gcn_shader_op(uint32_t result_type, uint32_t result_id, uint32_t op, const uint32_t *args, + uint32_t count); + virtual void emit_header(); + void emit_line_directive(uint32_t file_id, uint32_t line_literal); + void build_workgroup_size(SmallVector &arguments, const SpecializationConstant &x, + const SpecializationConstant &y, const SpecializationConstant &z); + + void request_subgroup_feature(ShaderSubgroupSupportHelper::Feature feature); + + virtual void emit_sampled_image_op(uint32_t result_type, uint32_t result_id, uint32_t image_id, uint32_t samp_id); + virtual void emit_texture_op(const Instruction &i, bool sparse); + virtual std::string to_texture_op(const Instruction &i, bool sparse, bool *forward, + SmallVector &inherited_expressions); + virtual void emit_subgroup_op(const Instruction &i); + virtual std::string type_to_glsl(const SPIRType &type, uint32_t id = 0); + virtual std::string builtin_to_glsl(spv::BuiltIn builtin, spv::StorageClass storage); + virtual void emit_struct_member(const SPIRType &type, uint32_t member_type_id, uint32_t index, + const std::string &qualifier = "", uint32_t base_offset = 0); + virtual void emit_struct_padding_target(const SPIRType &type); + virtual std::string image_type_glsl(const SPIRType &type, uint32_t id = 0); + std::string constant_expression(const SPIRConstant &c, bool inside_block_like_struct_scope = false); + virtual std::string constant_op_expression(const SPIRConstantOp &cop); + virtual std::string constant_expression_vector(const SPIRConstant &c, uint32_t vector); + virtual void emit_fixup(); + virtual std::string variable_decl(const SPIRType &type, const std::string &name, uint32_t id = 0); + virtual bool variable_decl_is_remapped_storage(const SPIRVariable &var, spv::StorageClass storage) const; + virtual std::string to_func_call_arg(const SPIRFunction::Parameter &arg, uint32_t id); + + struct TextureFunctionBaseArguments + { + // GCC 4.8 workarounds, it doesn't understand '{}' constructor here, use explicit default constructor. + TextureFunctionBaseArguments() = default; + VariableID img = 0; + const SPIRType *imgtype = nullptr; + bool is_fetch = false, is_gather = false, is_proj = false; + }; + + struct TextureFunctionNameArguments + { + // GCC 4.8 workarounds, it doesn't understand '{}' constructor here, use explicit default constructor. + TextureFunctionNameArguments() = default; + TextureFunctionBaseArguments base; + bool has_array_offsets = false, has_offset = false, has_grad = false; + bool has_dref = false, is_sparse_feedback = false, has_min_lod = false; + uint32_t lod = 0; + }; + virtual std::string to_function_name(const TextureFunctionNameArguments &args); + + struct TextureFunctionArguments + { + // GCC 4.8 workarounds, it doesn't understand '{}' constructor here, use explicit default constructor. + TextureFunctionArguments() = default; + TextureFunctionBaseArguments base; + uint32_t coord = 0, coord_components = 0, dref = 0; + uint32_t grad_x = 0, grad_y = 0, lod = 0, coffset = 0, offset = 0; + uint32_t bias = 0, component = 0, sample = 0, sparse_texel = 0, min_lod = 0; + bool nonuniform_expression = false; + }; + virtual std::string to_function_args(const TextureFunctionArguments &args, bool *p_forward); + + void emit_sparse_feedback_temporaries(uint32_t result_type_id, uint32_t id, uint32_t &feedback_id, + uint32_t &texel_id); + uint32_t get_sparse_feedback_texel_id(uint32_t id) const; + virtual void emit_buffer_block(const SPIRVariable &type); + virtual void emit_push_constant_block(const SPIRVariable &var); + virtual void emit_uniform(const SPIRVariable &var); + virtual std::string unpack_expression_type(std::string expr_str, const SPIRType &type, uint32_t physical_type_id, + bool packed_type, bool row_major); + + virtual bool builtin_translates_to_nonarray(spv::BuiltIn builtin) const; + + void emit_copy_logical_type(uint32_t lhs_id, uint32_t lhs_type_id, uint32_t rhs_id, uint32_t rhs_type_id, + SmallVector chain); + + StringStream<> buffer; + + template + inline void statement_inner(T &&t) + { + buffer << std::forward(t); + statement_count++; + } + + template + inline void statement_inner(T &&t, Ts &&... ts) + { + buffer << std::forward(t); + statement_count++; + statement_inner(std::forward(ts)...); + } + + template + inline void statement(Ts &&... ts) + { + if (is_forcing_recompilation()) + { + // Do not bother emitting code while force_recompile is active. + // We will compile again. + statement_count++; + return; + } + + if (redirect_statement) + { + redirect_statement->push_back(join(std::forward(ts)...)); + statement_count++; + } + else + { + for (uint32_t i = 0; i < indent; i++) + buffer << " "; + statement_inner(std::forward(ts)...); + buffer << '\n'; + } + } + + template + inline void statement_no_indent(Ts &&... ts) + { + auto old_indent = indent; + indent = 0; + statement(std::forward(ts)...); + indent = old_indent; + } + + // Used for implementing continue blocks where + // we want to obtain a list of statements we can merge + // on a single line separated by comma. + SmallVector *redirect_statement = nullptr; + const SPIRBlock *current_continue_block = nullptr; + bool block_temporary_hoisting = false; + + void begin_scope(); + void end_scope(); + void end_scope(const std::string &trailer); + void end_scope_decl(); + void end_scope_decl(const std::string &decl); + + Options options; + + virtual std::string type_to_array_glsl( + const SPIRType &type); // Allow Metal to use the array template to make arrays a value type + std::string to_array_size(const SPIRType &type, uint32_t index); + uint32_t to_array_size_literal(const SPIRType &type, uint32_t index) const; + uint32_t to_array_size_literal(const SPIRType &type) const; + virtual std::string variable_decl(const SPIRVariable &variable); // Threadgroup arrays can't have a wrapper type + std::string variable_decl_function_local(SPIRVariable &variable); + + void add_local_variable_name(uint32_t id); + void add_resource_name(uint32_t id); + void add_member_name(SPIRType &type, uint32_t name); + void add_function_overload(const SPIRFunction &func); + + virtual bool is_non_native_row_major_matrix(uint32_t id); + virtual bool member_is_non_native_row_major_matrix(const SPIRType &type, uint32_t index); + bool member_is_remapped_physical_type(const SPIRType &type, uint32_t index) const; + bool member_is_packed_physical_type(const SPIRType &type, uint32_t index) const; + virtual std::string convert_row_major_matrix(std::string exp_str, const SPIRType &exp_type, + uint32_t physical_type_id, bool is_packed); + + std::unordered_set local_variable_names; + std::unordered_set resource_names; + std::unordered_set block_input_names; + std::unordered_set block_output_names; + std::unordered_set block_ubo_names; + std::unordered_set block_ssbo_names; + std::unordered_set block_names; // A union of all block_*_names. + std::unordered_map> function_overloads; + std::unordered_map preserved_aliases; + void preserve_alias_on_reset(uint32_t id); + void reset_name_caches(); + + bool processing_entry_point = false; + + // Can be overriden by subclass backends for trivial things which + // shouldn't need polymorphism. + struct BackendVariations + { + std::string discard_literal = "discard"; + std::string demote_literal = "demote"; + std::string null_pointer_literal = ""; + bool float_literal_suffix = false; + bool double_literal_suffix = true; + bool uint32_t_literal_suffix = true; + bool long_long_literal_suffix = false; + const char *basic_int_type = "int"; + const char *basic_uint_type = "uint"; + const char *basic_int8_type = "int8_t"; + const char *basic_uint8_type = "uint8_t"; + const char *basic_int16_type = "int16_t"; + const char *basic_uint16_type = "uint16_t"; + const char *int16_t_literal_suffix = "s"; + const char *uint16_t_literal_suffix = "us"; + const char *nonuniform_qualifier = "nonuniformEXT"; + const char *boolean_mix_function = "mix"; + bool swizzle_is_function = false; + bool shared_is_implied = false; + bool unsized_array_supported = true; + bool explicit_struct_type = false; + bool use_initializer_list = false; + bool use_typed_initializer_list = false; + bool can_declare_struct_inline = true; + bool can_declare_arrays_inline = true; + bool native_row_major_matrix = true; + bool use_constructor_splatting = true; + bool allow_precision_qualifiers = false; + bool can_swizzle_scalar = false; + bool force_gl_in_out_block = false; + bool force_merged_mesh_block = false; + bool can_return_array = true; + bool allow_truncated_access_chain = false; + bool supports_extensions = false; + bool supports_empty_struct = false; + bool array_is_value_type = true; + bool array_is_value_type_in_buffer_blocks = true; + bool comparison_image_samples_scalar = false; + bool native_pointers = false; + bool support_small_type_sampling_result = false; + bool support_case_fallthrough = true; + bool use_array_constructor = false; + bool needs_row_major_load_workaround = false; + bool support_pointer_to_pointer = false; + bool support_precise_qualifier = false; + bool support_64bit_switch = false; + bool workgroup_size_is_hidden = false; + bool requires_relaxed_precision_analysis = false; + bool implicit_c_integer_promotion_rules = false; + } backend; + + void emit_struct(SPIRType &type); + void emit_resources(); + void emit_extension_workarounds(spv::ExecutionModel model); + void emit_buffer_block_native(const SPIRVariable &var); + void emit_buffer_reference_block(uint32_t type_id, bool forward_declaration); + void emit_buffer_block_legacy(const SPIRVariable &var); + void emit_buffer_block_flattened(const SPIRVariable &type); + void fixup_implicit_builtin_block_names(spv::ExecutionModel model); + void emit_declared_builtin_block(spv::StorageClass storage, spv::ExecutionModel model); + bool should_force_emit_builtin_block(spv::StorageClass storage); + void emit_push_constant_block_vulkan(const SPIRVariable &var); + void emit_push_constant_block_glsl(const SPIRVariable &var); + void emit_interface_block(const SPIRVariable &type); + void emit_flattened_io_block(const SPIRVariable &var, const char *qual); + void emit_flattened_io_block_struct(const std::string &basename, const SPIRType &type, const char *qual, + const SmallVector &indices); + void emit_flattened_io_block_member(const std::string &basename, const SPIRType &type, const char *qual, + const SmallVector &indices); + void emit_block_chain(SPIRBlock &block); + void emit_hoisted_temporaries(SmallVector> &temporaries); + std::string constant_value_macro_name(uint32_t id); + int get_constant_mapping_to_workgroup_component(const SPIRConstant &constant) const; + void emit_constant(const SPIRConstant &constant); + void emit_specialization_constant_op(const SPIRConstantOp &constant); + std::string emit_continue_block(uint32_t continue_block, bool follow_true_block, bool follow_false_block); + bool attempt_emit_loop_header(SPIRBlock &block, SPIRBlock::Method method); + + void branch(BlockID from, BlockID to); + void branch_to_continue(BlockID from, BlockID to); + void branch(BlockID from, uint32_t cond, BlockID true_block, BlockID false_block); + void flush_phi(BlockID from, BlockID to); + void flush_variable_declaration(uint32_t id); + void flush_undeclared_variables(SPIRBlock &block); + void emit_variable_temporary_copies(const SPIRVariable &var); + + bool should_dereference(uint32_t id); + bool should_forward(uint32_t id) const; + bool should_suppress_usage_tracking(uint32_t id) const; + void emit_mix_op(uint32_t result_type, uint32_t id, uint32_t left, uint32_t right, uint32_t lerp); + void emit_nminmax_op(uint32_t result_type, uint32_t id, uint32_t op0, uint32_t op1, GLSLstd450 op); + bool to_trivial_mix_op(const SPIRType &type, std::string &op, uint32_t left, uint32_t right, uint32_t lerp); + void emit_quaternary_func_op(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, uint32_t op2, + uint32_t op3, const char *op); + void emit_trinary_func_op(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, uint32_t op2, + const char *op); + void emit_binary_func_op(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, const char *op); + void emit_atomic_func_op(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, const char *op); + void emit_atomic_func_op(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, uint32_t op2, const char *op); + + void emit_unary_func_op_cast(uint32_t result_type, uint32_t result_id, uint32_t op0, const char *op, + SPIRType::BaseType input_type, SPIRType::BaseType expected_result_type); + void emit_binary_func_op_cast(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, const char *op, + SPIRType::BaseType input_type, bool skip_cast_if_equal_type); + void emit_binary_func_op_cast_clustered(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, + const char *op, SPIRType::BaseType input_type); + void emit_trinary_func_op_cast(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, uint32_t op2, + const char *op, SPIRType::BaseType input_type); + void emit_trinary_func_op_bitextract(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, + uint32_t op2, const char *op, SPIRType::BaseType expected_result_type, + SPIRType::BaseType input_type0, SPIRType::BaseType input_type1, + SPIRType::BaseType input_type2); + void emit_bitfield_insert_op(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, uint32_t op2, + uint32_t op3, const char *op, SPIRType::BaseType offset_count_type); + + void emit_unary_func_op(uint32_t result_type, uint32_t result_id, uint32_t op0, const char *op); + void emit_unrolled_unary_op(uint32_t result_type, uint32_t result_id, uint32_t operand, const char *op); + void emit_binary_op(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, const char *op); + void emit_unrolled_binary_op(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, const char *op, + bool negate, SPIRType::BaseType expected_type); + void emit_binary_op_cast(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, const char *op, + SPIRType::BaseType input_type, bool skip_cast_if_equal_type, bool implicit_integer_promotion); + + SPIRType binary_op_bitcast_helper(std::string &cast_op0, std::string &cast_op1, SPIRType::BaseType &input_type, + uint32_t op0, uint32_t op1, bool skip_cast_if_equal_type); + + virtual bool emit_complex_bitcast(uint32_t result_type, uint32_t id, uint32_t op0); + + std::string to_ternary_expression(const SPIRType &result_type, uint32_t select, uint32_t true_value, + uint32_t false_value); + + void emit_unary_op(uint32_t result_type, uint32_t result_id, uint32_t op0, const char *op); + void emit_unary_op_cast(uint32_t result_type, uint32_t result_id, uint32_t op0, const char *op); + bool expression_is_forwarded(uint32_t id) const; + bool expression_suppresses_usage_tracking(uint32_t id) const; + bool expression_read_implies_multiple_reads(uint32_t id) const; + SPIRExpression &emit_op(uint32_t result_type, uint32_t result_id, const std::string &rhs, bool forward_rhs, + bool suppress_usage_tracking = false); + + void access_chain_internal_append_index(std::string &expr, uint32_t base, const SPIRType *type, + AccessChainFlags flags, bool &access_chain_is_arrayed, uint32_t index); + + std::string access_chain_internal(uint32_t base, const uint32_t *indices, uint32_t count, AccessChainFlags flags, + AccessChainMeta *meta); + + spv::StorageClass get_expression_effective_storage_class(uint32_t ptr); + virtual bool access_chain_needs_stage_io_builtin_translation(uint32_t base); + + virtual void check_physical_type_cast(std::string &expr, const SPIRType *type, uint32_t physical_type); + virtual void prepare_access_chain_for_scalar_access(std::string &expr, const SPIRType &type, + spv::StorageClass storage, bool &is_packed); + + std::string access_chain(uint32_t base, const uint32_t *indices, uint32_t count, const SPIRType &target_type, + AccessChainMeta *meta = nullptr, bool ptr_chain = false); + + std::string flattened_access_chain(uint32_t base, const uint32_t *indices, uint32_t count, + const SPIRType &target_type, uint32_t offset, uint32_t matrix_stride, + uint32_t array_stride, bool need_transpose); + std::string flattened_access_chain_struct(uint32_t base, const uint32_t *indices, uint32_t count, + const SPIRType &target_type, uint32_t offset); + std::string flattened_access_chain_matrix(uint32_t base, const uint32_t *indices, uint32_t count, + const SPIRType &target_type, uint32_t offset, uint32_t matrix_stride, + bool need_transpose); + std::string flattened_access_chain_vector(uint32_t base, const uint32_t *indices, uint32_t count, + const SPIRType &target_type, uint32_t offset, uint32_t matrix_stride, + bool need_transpose); + std::pair flattened_access_chain_offset(const SPIRType &basetype, const uint32_t *indices, + uint32_t count, uint32_t offset, + uint32_t word_stride, bool *need_transpose = nullptr, + uint32_t *matrix_stride = nullptr, + uint32_t *array_stride = nullptr, + bool ptr_chain = false); + + const char *index_to_swizzle(uint32_t index); + std::string remap_swizzle(const SPIRType &result_type, uint32_t input_components, const std::string &expr); + std::string declare_temporary(uint32_t type, uint32_t id); + void emit_uninitialized_temporary(uint32_t type, uint32_t id); + SPIRExpression &emit_uninitialized_temporary_expression(uint32_t type, uint32_t id); + void append_global_func_args(const SPIRFunction &func, uint32_t index, SmallVector &arglist); + std::string to_non_uniform_aware_expression(uint32_t id); + std::string to_expression(uint32_t id, bool register_expression_read = true); + std::string to_composite_constructor_expression(uint32_t id, bool block_like_type); + std::string to_rerolled_array_expression(const std::string &expr, const SPIRType &type); + std::string to_enclosed_expression(uint32_t id, bool register_expression_read = true); + std::string to_unpacked_expression(uint32_t id, bool register_expression_read = true); + std::string to_unpacked_row_major_matrix_expression(uint32_t id); + std::string to_enclosed_unpacked_expression(uint32_t id, bool register_expression_read = true); + std::string to_dereferenced_expression(uint32_t id, bool register_expression_read = true); + std::string to_pointer_expression(uint32_t id, bool register_expression_read = true); + std::string to_enclosed_pointer_expression(uint32_t id, bool register_expression_read = true); + std::string to_extract_component_expression(uint32_t id, uint32_t index); + std::string to_extract_constant_composite_expression(uint32_t result_type, const SPIRConstant &c, + const uint32_t *chain, uint32_t length); + std::string enclose_expression(const std::string &expr); + std::string dereference_expression(const SPIRType &expression_type, const std::string &expr); + std::string address_of_expression(const std::string &expr); + void strip_enclosed_expression(std::string &expr); + std::string to_member_name(const SPIRType &type, uint32_t index); + virtual std::string to_member_reference(uint32_t base, const SPIRType &type, uint32_t index, bool ptr_chain_is_resolved); + std::string to_multi_member_reference(const SPIRType &type, const SmallVector &indices); + std::string type_to_glsl_constructor(const SPIRType &type); + std::string argument_decl(const SPIRFunction::Parameter &arg); + virtual std::string to_qualifiers_glsl(uint32_t id); + void fixup_io_block_patch_primitive_qualifiers(const SPIRVariable &var); + void emit_output_variable_initializer(const SPIRVariable &var); + std::string to_precision_qualifiers_glsl(uint32_t id); + virtual const char *to_storage_qualifiers_glsl(const SPIRVariable &var); + std::string flags_to_qualifiers_glsl(const SPIRType &type, const Bitset &flags); + const char *format_to_glsl(spv::ImageFormat format); + virtual std::string layout_for_member(const SPIRType &type, uint32_t index); + virtual std::string to_interpolation_qualifiers(const Bitset &flags); + std::string layout_for_variable(const SPIRVariable &variable); + std::string to_combined_image_sampler(VariableID image_id, VariableID samp_id); + virtual bool skip_argument(uint32_t id) const; + virtual void emit_array_copy(const std::string &lhs, uint32_t lhs_id, uint32_t rhs_id, + spv::StorageClass lhs_storage, spv::StorageClass rhs_storage); + virtual void emit_block_hints(const SPIRBlock &block); + virtual std::string to_initializer_expression(const SPIRVariable &var); + virtual std::string to_zero_initialized_expression(uint32_t type_id); + bool type_can_zero_initialize(const SPIRType &type) const; + + bool buffer_is_packing_standard(const SPIRType &type, BufferPackingStandard packing, + uint32_t *failed_index = nullptr, uint32_t start_offset = 0, + uint32_t end_offset = ~(0u)); + std::string buffer_to_packing_standard(const SPIRType &type, bool support_std430_without_scalar_layout); + + uint32_t type_to_packed_base_size(const SPIRType &type, BufferPackingStandard packing); + uint32_t type_to_packed_alignment(const SPIRType &type, const Bitset &flags, BufferPackingStandard packing); + uint32_t type_to_packed_array_stride(const SPIRType &type, const Bitset &flags, BufferPackingStandard packing); + uint32_t type_to_packed_size(const SPIRType &type, const Bitset &flags, BufferPackingStandard packing); + uint32_t type_to_location_count(const SPIRType &type) const; + + std::string bitcast_glsl(const SPIRType &result_type, uint32_t arg); + virtual std::string bitcast_glsl_op(const SPIRType &result_type, const SPIRType &argument_type); + + std::string bitcast_expression(SPIRType::BaseType target_type, uint32_t arg); + std::string bitcast_expression(const SPIRType &target_type, SPIRType::BaseType expr_type, const std::string &expr); + + std::string build_composite_combiner(uint32_t result_type, const uint32_t *elems, uint32_t length); + bool remove_duplicate_swizzle(std::string &op); + bool remove_unity_swizzle(uint32_t base, std::string &op); + + // Can modify flags to remote readonly/writeonly if image type + // and force recompile. + bool check_atomic_image(uint32_t id); + + virtual void replace_illegal_names(); + void replace_illegal_names(const std::unordered_set &keywords); + virtual void emit_entry_point_declarations(); + + void replace_fragment_output(SPIRVariable &var); + void replace_fragment_outputs(); + std::string legacy_tex_op(const std::string &op, const SPIRType &imgtype, uint32_t id); + + void forward_relaxed_precision(uint32_t dst_id, const uint32_t *args, uint32_t length); + void analyze_precision_requirements(uint32_t type_id, uint32_t dst_id, uint32_t *args, uint32_t length); + Options::Precision analyze_expression_precision(const uint32_t *args, uint32_t length) const; + + uint32_t indent = 0; + + std::unordered_set emitted_functions; + + // Ensure that we declare phi-variable copies even if the original declaration isn't deferred + std::unordered_set flushed_phi_variables; + + std::unordered_set flattened_buffer_blocks; + std::unordered_map flattened_structs; + + ShaderSubgroupSupportHelper shader_subgroup_supporter; + + std::string load_flattened_struct(const std::string &basename, const SPIRType &type); + std::string to_flattened_struct_member(const std::string &basename, const SPIRType &type, uint32_t index); + void store_flattened_struct(uint32_t lhs_id, uint32_t value); + void store_flattened_struct(const std::string &basename, uint32_t rhs, const SPIRType &type, + const SmallVector &indices); + std::string to_flattened_access_chain_expression(uint32_t id); + + // Usage tracking. If a temporary is used more than once, use the temporary instead to + // avoid AST explosion when SPIRV is generated with pure SSA and doesn't write stuff to variables. + std::unordered_map expression_usage_counts; + void track_expression_read(uint32_t id); + + SmallVector forced_extensions; + SmallVector header_lines; + + // Used when expressions emit extra opcodes with their own unique IDs, + // and we need to reuse the IDs across recompilation loops. + // Currently used by NMin/Max/Clamp implementations. + std::unordered_map extra_sub_expressions; + + SmallVector workaround_ubo_load_overload_types; + void request_workaround_wrapper_overload(TypeID id); + void rewrite_load_for_wrapped_row_major(std::string &expr, TypeID loaded_type, ID ptr); + + uint32_t statement_count = 0; + + inline bool is_legacy() const + { + return (options.es && options.version < 300) || (!options.es && options.version < 130); + } + + inline bool is_legacy_es() const + { + return options.es && options.version < 300; + } + + inline bool is_legacy_desktop() const + { + return !options.es && options.version < 130; + } + + bool requires_transpose_2x2 = false; + bool requires_transpose_3x3 = false; + bool requires_transpose_4x4 = false; + bool ray_tracing_is_khr = false; + bool barycentric_is_nv = false; + void ray_tracing_khr_fixup_locations(); + + bool args_will_forward(uint32_t id, const uint32_t *args, uint32_t num_args, bool pure); + void register_call_out_argument(uint32_t id); + void register_impure_function_call(); + void register_control_dependent_expression(uint32_t expr); + + // GL_EXT_shader_pixel_local_storage support. + std::vector pls_inputs; + std::vector pls_outputs; + std::string pls_decl(const PlsRemap &variable); + const char *to_pls_qualifiers_glsl(const SPIRVariable &variable); + void emit_pls(); + void remap_pls_variables(); + + // GL_EXT_shader_framebuffer_fetch support. + std::vector> subpass_to_framebuffer_fetch_attachment; + std::vector> inout_color_attachments; + bool location_is_framebuffer_fetch(uint32_t location) const; + bool location_is_non_coherent_framebuffer_fetch(uint32_t location) const; + bool subpass_input_is_framebuffer_fetch(uint32_t id) const; + void emit_inout_fragment_outputs_copy_to_subpass_inputs(); + const SPIRVariable *find_subpass_input_by_attachment_index(uint32_t index) const; + const SPIRVariable *find_color_output_by_location(uint32_t location) const; + + // A variant which takes two sets of name. The secondary is only used to verify there are no collisions, + // but the set is not updated when we have found a new name. + // Used primarily when adding block interface names. + void add_variable(std::unordered_set &variables_primary, + const std::unordered_set &variables_secondary, std::string &name); + + void check_function_call_constraints(const uint32_t *args, uint32_t length); + void handle_invalid_expression(uint32_t id); + void force_temporary_and_recompile(uint32_t id); + void find_static_extensions(); + + uint32_t consume_temporary_in_precision_context(uint32_t type_id, uint32_t id, Options::Precision precision); + std::unordered_map temporary_to_mirror_precision_alias; + std::unordered_set composite_insert_overwritten; + std::unordered_set block_composite_insert_overwrite; + + std::string emit_for_loop_initializers(const SPIRBlock &block); + void emit_while_loop_initializers(const SPIRBlock &block); + bool for_loop_initializers_are_same_type(const SPIRBlock &block); + bool optimize_read_modify_write(const SPIRType &type, const std::string &lhs, const std::string &rhs); + void fixup_image_load_store_access(); + + bool type_is_empty(const SPIRType &type); + + bool can_use_io_location(spv::StorageClass storage, bool block); + const Instruction *get_next_instruction_in_block(const Instruction &instr); + static uint32_t mask_relevant_memory_semantics(uint32_t semantics); + + std::string convert_half_to_string(const SPIRConstant &value, uint32_t col, uint32_t row); + std::string convert_float_to_string(const SPIRConstant &value, uint32_t col, uint32_t row); + std::string convert_double_to_string(const SPIRConstant &value, uint32_t col, uint32_t row); + + std::string convert_separate_image_to_expression(uint32_t id); + + // Builtins in GLSL are always specific signedness, but the SPIR-V can declare them + // as either unsigned or signed. + // Sometimes we will need to automatically perform casts on load and store to make this work. + virtual void cast_to_variable_store(uint32_t target_id, std::string &expr, const SPIRType &expr_type); + virtual void cast_from_variable_load(uint32_t source_id, std::string &expr, const SPIRType &expr_type); + void unroll_array_from_complex_load(uint32_t target_id, uint32_t source_id, std::string &expr); + bool unroll_array_to_complex_store(uint32_t target_id, uint32_t source_id); + void convert_non_uniform_expression(std::string &expr, uint32_t ptr_id); + + void handle_store_to_invariant_variable(uint32_t store_id, uint32_t value_id); + void disallow_forwarding_in_expression_chain(const SPIRExpression &expr); + + bool expression_is_constant_null(uint32_t id) const; + bool expression_is_non_value_type_array(uint32_t ptr); + virtual void emit_store_statement(uint32_t lhs_expression, uint32_t rhs_expression); + + uint32_t get_integer_width_for_instruction(const Instruction &instr) const; + uint32_t get_integer_width_for_glsl_instruction(GLSLstd450 op, const uint32_t *arguments, uint32_t length) const; + + bool variable_is_lut(const SPIRVariable &var) const; + + char current_locale_radix_character = '.'; + + void fixup_type_alias(); + void reorder_type_alias(); + void fixup_anonymous_struct_names(); + void fixup_anonymous_struct_names(std::unordered_set &visited, const SPIRType &type); + + static const char *vector_swizzle(int vecsize, int index); + + bool is_stage_output_location_masked(uint32_t location, uint32_t component) const; + bool is_stage_output_builtin_masked(spv::BuiltIn builtin) const; + bool is_stage_output_variable_masked(const SPIRVariable &var) const; + bool is_stage_output_block_member_masked(const SPIRVariable &var, uint32_t index, bool strip_array) const; + bool is_per_primitive_variable(const SPIRVariable &var) const; + uint32_t get_accumulated_member_location(const SPIRVariable &var, uint32_t mbr_idx, bool strip_array) const; + uint32_t get_declared_member_location(const SPIRVariable &var, uint32_t mbr_idx, bool strip_array) const; + std::unordered_set masked_output_locations; + std::unordered_set masked_output_builtins; + +private: + void init(); + + SmallVector get_composite_constant_ids(ConstantID const_id); + void fill_composite_constant(SPIRConstant &constant, TypeID type_id, const SmallVector &initializers); + void set_composite_constant(ConstantID const_id, TypeID type_id, const SmallVector &initializers); + TypeID get_composite_member_type(TypeID type_id, uint32_t member_idx); + std::unordered_map> const_composite_insert_ids; +}; +} // namespace SPIRV_CROSS_NAMESPACE + +#endif diff --git a/local_packages/include/spirv_cross/spirv_hlsl.hpp b/local_packages/include/spirv_cross/spirv_hlsl.hpp new file mode 100644 index 0000000..57d1c2c --- /dev/null +++ b/local_packages/include/spirv_cross/spirv_hlsl.hpp @@ -0,0 +1,406 @@ +/* + * Copyright 2016-2021 Robert Konrad + * SPDX-License-Identifier: Apache-2.0 OR MIT + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * At your option, you may choose to accept this material under either: + * 1. The Apache License, Version 2.0, found at , or + * 2. The MIT License, found at . + */ + +#ifndef SPIRV_HLSL_HPP +#define SPIRV_HLSL_HPP + +#include "spirv_glsl.hpp" +#include + +namespace SPIRV_CROSS_NAMESPACE +{ +// Interface which remaps vertex inputs to a fixed semantic name to make linking easier. +struct HLSLVertexAttributeRemap +{ + uint32_t location; + std::string semantic; +}; +// Specifying a root constant (d3d12) or push constant range (vulkan). +// +// `start` and `end` denotes the range of the root constant in bytes. +// Both values need to be multiple of 4. +struct RootConstants +{ + uint32_t start; + uint32_t end; + + uint32_t binding; + uint32_t space; +}; + +// For finer control, decorations may be removed from specific resources instead with unset_decoration(). +enum HLSLBindingFlagBits +{ + HLSL_BINDING_AUTO_NONE_BIT = 0, + + // Push constant (root constant) resources will be declared as CBVs (b-space) without a register() declaration. + // A register will be automatically assigned by the D3D compiler, but must therefore be reflected in D3D-land. + // Push constants do not normally have a DecorationBinding set, but if they do, this can be used to ignore it. + HLSL_BINDING_AUTO_PUSH_CONSTANT_BIT = 1 << 0, + + // cbuffer resources will be declared as CBVs (b-space) without a register() declaration. + // A register will be automatically assigned, but must be reflected in D3D-land. + HLSL_BINDING_AUTO_CBV_BIT = 1 << 1, + + // All SRVs (t-space) will be declared without a register() declaration. + HLSL_BINDING_AUTO_SRV_BIT = 1 << 2, + + // All UAVs (u-space) will be declared without a register() declaration. + HLSL_BINDING_AUTO_UAV_BIT = 1 << 3, + + // All samplers (s-space) will be declared without a register() declaration. + HLSL_BINDING_AUTO_SAMPLER_BIT = 1 << 4, + + // No resources will be declared with register(). + HLSL_BINDING_AUTO_ALL = 0x7fffffff +}; +using HLSLBindingFlags = uint32_t; + +// By matching stage, desc_set and binding for a SPIR-V resource, +// register bindings are set based on whether the HLSL resource is a +// CBV, UAV, SRV or Sampler. A single binding in SPIR-V might contain multiple +// resource types, e.g. COMBINED_IMAGE_SAMPLER, and SRV/Sampler bindings will be used respectively. +// On SM 5.0 and lower, register_space is ignored. +// +// To remap a push constant block which does not have any desc_set/binding associated with it, +// use ResourceBindingPushConstant{DescriptorSet,Binding} as values for desc_set/binding. +// For deeper control of push constants, set_root_constant_layouts() can be used instead. +struct HLSLResourceBinding +{ + spv::ExecutionModel stage = spv::ExecutionModelMax; + uint32_t desc_set = 0; + uint32_t binding = 0; + + struct Binding + { + uint32_t register_space = 0; + uint32_t register_binding = 0; + } cbv, uav, srv, sampler; +}; + +enum HLSLAuxBinding +{ + HLSL_AUX_BINDING_BASE_VERTEX_INSTANCE = 0 +}; + +class CompilerHLSL : public CompilerGLSL +{ +public: + struct Options + { + uint32_t shader_model = 30; // TODO: map ps_4_0_level_9_0,... somehow + + // Allows the PointSize builtin, and ignores it, as PointSize is not supported in HLSL. + bool point_size_compat = false; + + // Allows the PointCoord builtin, returns float2(0.5, 0.5), as PointCoord is not supported in HLSL. + bool point_coord_compat = false; + + // If true, the backend will assume that VertexIndex and InstanceIndex will need to apply + // a base offset, and you will need to fill in a cbuffer with offsets. + // Set to false if you know you will never use base instance or base vertex + // functionality as it might remove an internal cbuffer. + bool support_nonzero_base_vertex_base_instance = false; + + // Forces a storage buffer to always be declared as UAV, even if the readonly decoration is used. + // By default, a readonly storage buffer will be declared as ByteAddressBuffer (SRV) instead. + // Alternatively, use set_hlsl_force_storage_buffer_as_uav to specify individually. + bool force_storage_buffer_as_uav = false; + + // Forces any storage image type marked as NonWritable to be considered an SRV instead. + // For this to work with function call parameters, NonWritable must be considered to be part of the type system + // so that NonWritable image arguments are also translated to Texture rather than RWTexture. + bool nonwritable_uav_texture_as_srv = false; + + // Enables native 16-bit types. Needs SM 6.2. + // Uses half/int16_t/uint16_t instead of min16* types. + // Also adds support for 16-bit load-store from (RW)ByteAddressBuffer. + bool enable_16bit_types = false; + + // If matrices are used as IO variables, flatten the attribute declaration to use + // TEXCOORD{N,N+1,N+2,...} rather than TEXCOORDN_{0,1,2,3}. + // If add_vertex_attribute_remap is used and this feature is used, + // the semantic name will be queried once per active location. + bool flatten_matrix_vertex_input_semantics = false; + + // Rather than emitting main() for the entry point, use the name in SPIR-V. + bool use_entry_point_name = false; + }; + + explicit CompilerHLSL(std::vector spirv_) + : CompilerGLSL(std::move(spirv_)) + { + } + + CompilerHLSL(const uint32_t *ir_, size_t size) + : CompilerGLSL(ir_, size) + { + } + + explicit CompilerHLSL(const ParsedIR &ir_) + : CompilerGLSL(ir_) + { + } + + explicit CompilerHLSL(ParsedIR &&ir_) + : CompilerGLSL(std::move(ir_)) + { + } + + const Options &get_hlsl_options() const + { + return hlsl_options; + } + + void set_hlsl_options(const Options &opts) + { + hlsl_options = opts; + } + + // Optionally specify a custom root constant layout. + // + // Push constants ranges will be split up according to the + // layout specified. + void set_root_constant_layouts(std::vector layout); + + // Compiles and remaps vertex attributes at specific locations to a fixed semantic. + // The default is TEXCOORD# where # denotes location. + // Matrices are unrolled to vectors with notation ${SEMANTIC}_#, where # denotes row. + // $SEMANTIC is either TEXCOORD# or a semantic name specified here. + void add_vertex_attribute_remap(const HLSLVertexAttributeRemap &vertex_attributes); + std::string compile() override; + + // This is a special HLSL workaround for the NumWorkGroups builtin. + // This does not exist in HLSL, so the calling application must create a dummy cbuffer in + // which the application will store this builtin. + // The cbuffer layout will be: + // cbuffer SPIRV_Cross_NumWorkgroups : register(b#, space#) { uint3 SPIRV_Cross_NumWorkgroups_count; }; + // This must be called before compile(). + // The function returns 0 if NumWorkGroups builtin is not statically used in the shader from the current entry point. + // If non-zero, this returns the variable ID of a cbuffer which corresponds to + // the cbuffer declared above. By default, no binding or descriptor set decoration is set, + // so the calling application should declare explicit bindings on this ID before calling compile(). + VariableID remap_num_workgroups_builtin(); + + // Controls how resource bindings are declared in the output HLSL. + void set_resource_binding_flags(HLSLBindingFlags flags); + + // resource is a resource binding to indicate the HLSL CBV, SRV, UAV or sampler binding + // to use for a particular SPIR-V description set + // and binding. If resource bindings are provided, + // is_hlsl_resource_binding_used() will return true after calling ::compile() if + // the set/binding combination was used by the HLSL code. + void add_hlsl_resource_binding(const HLSLResourceBinding &resource); + bool is_hlsl_resource_binding_used(spv::ExecutionModel model, uint32_t set, uint32_t binding) const; + + // Controls which storage buffer bindings will be forced to be declared as UAVs. + void set_hlsl_force_storage_buffer_as_uav(uint32_t desc_set, uint32_t binding); + + // By default, these magic buffers are not assigned a specific binding. + void set_hlsl_aux_buffer_binding(HLSLAuxBinding binding, uint32_t register_index, uint32_t register_space); + void unset_hlsl_aux_buffer_binding(HLSLAuxBinding binding); + bool is_hlsl_aux_buffer_binding_used(HLSLAuxBinding binding) const; + +private: + std::string type_to_glsl(const SPIRType &type, uint32_t id = 0) override; + std::string image_type_hlsl(const SPIRType &type, uint32_t id); + std::string image_type_hlsl_modern(const SPIRType &type, uint32_t id); + std::string image_type_hlsl_legacy(const SPIRType &type, uint32_t id); + void emit_function_prototype(SPIRFunction &func, const Bitset &return_flags) override; + void emit_hlsl_entry_point(); + void emit_header() override; + void emit_resources(); + void emit_interface_block_globally(const SPIRVariable &type); + void emit_interface_block_in_struct(const SPIRVariable &var, std::unordered_set &active_locations); + void emit_interface_block_member_in_struct(const SPIRVariable &var, uint32_t member_index, uint32_t location, + std::unordered_set &active_locations); + void emit_builtin_inputs_in_struct(); + void emit_builtin_outputs_in_struct(); + void emit_builtin_primitive_outputs_in_struct(); + void emit_texture_op(const Instruction &i, bool sparse) override; + void emit_instruction(const Instruction &instruction) override; + void emit_glsl_op(uint32_t result_type, uint32_t result_id, uint32_t op, const uint32_t *args, + uint32_t count) override; + void emit_buffer_block(const SPIRVariable &type) override; + void emit_push_constant_block(const SPIRVariable &var) override; + void emit_uniform(const SPIRVariable &var) override; + void emit_modern_uniform(const SPIRVariable &var); + void emit_legacy_uniform(const SPIRVariable &var); + void emit_specialization_constants_and_structs(); + void emit_composite_constants(); + void emit_fixup() override; + std::string builtin_to_glsl(spv::BuiltIn builtin, spv::StorageClass storage) override; + std::string layout_for_member(const SPIRType &type, uint32_t index) override; + std::string to_interpolation_qualifiers(const Bitset &flags) override; + std::string bitcast_glsl_op(const SPIRType &result_type, const SPIRType &argument_type) override; + bool emit_complex_bitcast(uint32_t result_type, uint32_t id, uint32_t op0) override; + std::string to_func_call_arg(const SPIRFunction::Parameter &arg, uint32_t id) override; + std::string to_sampler_expression(uint32_t id); + std::string to_resource_binding(const SPIRVariable &var); + std::string to_resource_binding_sampler(const SPIRVariable &var); + std::string to_resource_register(HLSLBindingFlagBits flag, char space, uint32_t binding, uint32_t set); + std::string to_initializer_expression(const SPIRVariable &var) override; + void emit_sampled_image_op(uint32_t result_type, uint32_t result_id, uint32_t image_id, uint32_t samp_id) override; + void emit_access_chain(const Instruction &instruction); + void emit_load(const Instruction &instruction); + void read_access_chain(std::string *expr, const std::string &lhs, const SPIRAccessChain &chain); + void read_access_chain_struct(const std::string &lhs, const SPIRAccessChain &chain); + void read_access_chain_array(const std::string &lhs, const SPIRAccessChain &chain); + void write_access_chain(const SPIRAccessChain &chain, uint32_t value, const SmallVector &composite_chain); + void write_access_chain_struct(const SPIRAccessChain &chain, uint32_t value, + const SmallVector &composite_chain); + void write_access_chain_array(const SPIRAccessChain &chain, uint32_t value, + const SmallVector &composite_chain); + std::string write_access_chain_value(uint32_t value, const SmallVector &composite_chain, bool enclose); + void emit_store(const Instruction &instruction); + void emit_atomic(const uint32_t *ops, uint32_t length, spv::Op op); + void emit_subgroup_op(const Instruction &i) override; + void emit_block_hints(const SPIRBlock &block) override; + + void emit_struct_member(const SPIRType &type, uint32_t member_type_id, uint32_t index, const std::string &qualifier, + uint32_t base_offset = 0) override; + void emit_rayquery_function(const char *commited, const char *candidate, const uint32_t *ops); + + const char *to_storage_qualifiers_glsl(const SPIRVariable &var) override; + void replace_illegal_names() override; + + bool is_hlsl_force_storage_buffer_as_uav(ID id) const; + + Options hlsl_options; + + // TODO: Refactor this to be more similar to MSL, maybe have some common system in place? + bool requires_op_fmod = false; + bool requires_fp16_packing = false; + bool requires_uint2_packing = false; + bool requires_explicit_fp16_packing = false; + bool requires_unorm8_packing = false; + bool requires_snorm8_packing = false; + bool requires_unorm16_packing = false; + bool requires_snorm16_packing = false; + bool requires_bitfield_insert = false; + bool requires_bitfield_extract = false; + bool requires_inverse_2x2 = false; + bool requires_inverse_3x3 = false; + bool requires_inverse_4x4 = false; + bool requires_scalar_reflect = false; + bool requires_scalar_refract = false; + bool requires_scalar_faceforward = false; + + struct TextureSizeVariants + { + // MSVC 2013 workaround. + TextureSizeVariants() + { + srv = 0; + for (auto &unorm : uav) + for (auto &u : unorm) + u = 0; + } + uint64_t srv; + uint64_t uav[3][4]; + } required_texture_size_variants; + + void require_texture_query_variant(uint32_t var_id); + void emit_texture_size_variants(uint64_t variant_mask, const char *vecsize_qualifier, bool uav, + const char *type_qualifier); + + enum TextureQueryVariantDim + { + Query1D = 0, + Query1DArray, + Query2D, + Query2DArray, + Query3D, + QueryBuffer, + QueryCube, + QueryCubeArray, + Query2DMS, + Query2DMSArray, + QueryDimCount + }; + + enum TextureQueryVariantType + { + QueryTypeFloat = 0, + QueryTypeInt = 16, + QueryTypeUInt = 32, + QueryTypeCount = 3 + }; + + enum BitcastType + { + TypeNormal, + TypePackUint2x32, + TypeUnpackUint64 + }; + + void analyze_meshlet_writes(); + void analyze_meshlet_writes(uint32_t func_id, uint32_t id_per_vertex, uint32_t id_per_primitive, + std::unordered_set &processed_func_ids); + + BitcastType get_bitcast_type(uint32_t result_type, uint32_t op0); + + void emit_builtin_variables(); + bool require_output = false; + bool require_input = false; + SmallVector remap_vertex_attributes; + + uint32_t type_to_consumed_locations(const SPIRType &type) const; + + std::string to_semantic(uint32_t location, spv::ExecutionModel em, spv::StorageClass sc); + + uint32_t num_workgroups_builtin = 0; + HLSLBindingFlags resource_binding_flags = 0; + + // Custom root constant layout, which should be emitted + // when translating push constant ranges. + std::vector root_constants_layout; + + void validate_shader_model(); + + std::string get_unique_identifier(); + uint32_t unique_identifier_count = 0; + + std::unordered_map, InternalHasher> resource_bindings; + void remap_hlsl_resource_binding(HLSLBindingFlagBits type, uint32_t &desc_set, uint32_t &binding); + + std::unordered_set force_uav_buffer_bindings; + + struct + { + uint32_t register_index = 0; + uint32_t register_space = 0; + bool explicit_binding = false; + bool used = false; + } base_vertex_info; + + // Returns true for BuiltInSampleMask because gl_SampleMask[] is an array in SPIR-V, but SV_Coverage is a scalar in HLSL. + bool builtin_translates_to_nonarray(spv::BuiltIn builtin) const override; + + std::vector composite_selection_workaround_types; + + std::string get_inner_entry_point_name() const; +}; +} // namespace SPIRV_CROSS_NAMESPACE + +#endif diff --git a/local_packages/include/spirv_cross/spirv_msl.hpp b/local_packages/include/spirv_cross/spirv_msl.hpp new file mode 100644 index 0000000..7cd34c7 --- /dev/null +++ b/local_packages/include/spirv_cross/spirv_msl.hpp @@ -0,0 +1,1273 @@ +/* + * Copyright 2016-2021 The Brenwill Workshop Ltd. + * SPDX-License-Identifier: Apache-2.0 OR MIT + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * At your option, you may choose to accept this material under either: + * 1. The Apache License, Version 2.0, found at , or + * 2. The MIT License, found at . + */ + +#ifndef SPIRV_CROSS_MSL_HPP +#define SPIRV_CROSS_MSL_HPP + +#include "spirv_glsl.hpp" +#include +#include +#include +#include +#include + +namespace SPIRV_CROSS_NAMESPACE +{ + +// Indicates the format of a shader interface variable. Currently limited to specifying +// if the input is an 8-bit unsigned integer, 16-bit unsigned integer, or +// some other format. +enum MSLShaderVariableFormat +{ + MSL_SHADER_VARIABLE_FORMAT_OTHER = 0, + MSL_SHADER_VARIABLE_FORMAT_UINT8 = 1, + MSL_SHADER_VARIABLE_FORMAT_UINT16 = 2, + MSL_SHADER_VARIABLE_FORMAT_ANY16 = 3, + MSL_SHADER_VARIABLE_FORMAT_ANY32 = 4, + + // Deprecated aliases. + MSL_VERTEX_FORMAT_OTHER = MSL_SHADER_VARIABLE_FORMAT_OTHER, + MSL_VERTEX_FORMAT_UINT8 = MSL_SHADER_VARIABLE_FORMAT_UINT8, + MSL_VERTEX_FORMAT_UINT16 = MSL_SHADER_VARIABLE_FORMAT_UINT16, + MSL_SHADER_INPUT_FORMAT_OTHER = MSL_SHADER_VARIABLE_FORMAT_OTHER, + MSL_SHADER_INPUT_FORMAT_UINT8 = MSL_SHADER_VARIABLE_FORMAT_UINT8, + MSL_SHADER_INPUT_FORMAT_UINT16 = MSL_SHADER_VARIABLE_FORMAT_UINT16, + MSL_SHADER_INPUT_FORMAT_ANY16 = MSL_SHADER_VARIABLE_FORMAT_ANY16, + MSL_SHADER_INPUT_FORMAT_ANY32 = MSL_SHADER_VARIABLE_FORMAT_ANY32, + + MSL_SHADER_VARIABLE_FORMAT_INT_MAX = 0x7fffffff +}; + +// Indicates the rate at which a variable changes value, one of: per-vertex, +// per-primitive, or per-patch. +enum MSLShaderVariableRate +{ + MSL_SHADER_VARIABLE_RATE_PER_VERTEX = 0, + MSL_SHADER_VARIABLE_RATE_PER_PRIMITIVE = 1, + MSL_SHADER_VARIABLE_RATE_PER_PATCH = 2, + + MSL_SHADER_VARIABLE_RATE_INT_MAX = 0x7fffffff, +}; + +// Defines MSL characteristics of a shader interface variable at a particular location. +// After compilation, it is possible to query whether or not this location was used. +// If vecsize is nonzero, it must be greater than or equal to the vecsize declared in the shader, +// or behavior is undefined. +struct MSLShaderInterfaceVariable +{ + uint32_t location = 0; + uint32_t component = 0; + MSLShaderVariableFormat format = MSL_SHADER_VARIABLE_FORMAT_OTHER; + spv::BuiltIn builtin = spv::BuiltInMax; + uint32_t vecsize = 0; + MSLShaderVariableRate rate = MSL_SHADER_VARIABLE_RATE_PER_VERTEX; +}; + +// Matches the binding index of a MSL resource for a binding within a descriptor set. +// Taken together, the stage, desc_set and binding combine to form a reference to a resource +// descriptor used in a particular shading stage. The count field indicates the number of +// resources consumed by this binding, if the binding represents an array of resources. +// If the resource array is a run-time-sized array, which are legal in GLSL or SPIR-V, this value +// will be used to declare the array size in MSL, which does not support run-time-sized arrays. +// If pad_argument_buffer_resources is enabled, the base_type and count values are used to +// specify the base type and array size of the resource in the argument buffer, if that resource +// is not defined and used by the shader. With pad_argument_buffer_resources enabled, this +// information will be used to pad the argument buffer structure, in order to align that +// structure consistently for all uses, across all shaders, of the descriptor set represented +// by the arugment buffer. If pad_argument_buffer_resources is disabled, base_type does not +// need to be populated, and if the resource is also not a run-time sized array, the count +// field does not need to be populated. +// If using MSL 2.0 argument buffers, the descriptor set is not marked as a discrete descriptor set, +// and (for iOS only) the resource is not a storage image (sampled != 2), the binding reference we +// remap to will become an [[id(N)]] attribute within the "descriptor set" argument buffer structure. +// For resources which are bound in the "classic" MSL 1.0 way or discrete descriptors, the remap will +// become a [[buffer(N)]], [[texture(N)]] or [[sampler(N)]] depending on the resource types used. +struct MSLResourceBinding +{ + spv::ExecutionModel stage = spv::ExecutionModelMax; + SPIRType::BaseType basetype = SPIRType::Unknown; + uint32_t desc_set = 0; + uint32_t binding = 0; + uint32_t count = 0; + uint32_t msl_buffer = 0; + uint32_t msl_texture = 0; + uint32_t msl_sampler = 0; +}; + +enum MSLSamplerCoord +{ + MSL_SAMPLER_COORD_NORMALIZED = 0, + MSL_SAMPLER_COORD_PIXEL = 1, + MSL_SAMPLER_INT_MAX = 0x7fffffff +}; + +enum MSLSamplerFilter +{ + MSL_SAMPLER_FILTER_NEAREST = 0, + MSL_SAMPLER_FILTER_LINEAR = 1, + MSL_SAMPLER_FILTER_INT_MAX = 0x7fffffff +}; + +enum MSLSamplerMipFilter +{ + MSL_SAMPLER_MIP_FILTER_NONE = 0, + MSL_SAMPLER_MIP_FILTER_NEAREST = 1, + MSL_SAMPLER_MIP_FILTER_LINEAR = 2, + MSL_SAMPLER_MIP_FILTER_INT_MAX = 0x7fffffff +}; + +enum MSLSamplerAddress +{ + MSL_SAMPLER_ADDRESS_CLAMP_TO_ZERO = 0, + MSL_SAMPLER_ADDRESS_CLAMP_TO_EDGE = 1, + MSL_SAMPLER_ADDRESS_CLAMP_TO_BORDER = 2, + MSL_SAMPLER_ADDRESS_REPEAT = 3, + MSL_SAMPLER_ADDRESS_MIRRORED_REPEAT = 4, + MSL_SAMPLER_ADDRESS_INT_MAX = 0x7fffffff +}; + +enum MSLSamplerCompareFunc +{ + MSL_SAMPLER_COMPARE_FUNC_NEVER = 0, + MSL_SAMPLER_COMPARE_FUNC_LESS = 1, + MSL_SAMPLER_COMPARE_FUNC_LESS_EQUAL = 2, + MSL_SAMPLER_COMPARE_FUNC_GREATER = 3, + MSL_SAMPLER_COMPARE_FUNC_GREATER_EQUAL = 4, + MSL_SAMPLER_COMPARE_FUNC_EQUAL = 5, + MSL_SAMPLER_COMPARE_FUNC_NOT_EQUAL = 6, + MSL_SAMPLER_COMPARE_FUNC_ALWAYS = 7, + MSL_SAMPLER_COMPARE_FUNC_INT_MAX = 0x7fffffff +}; + +enum MSLSamplerBorderColor +{ + MSL_SAMPLER_BORDER_COLOR_TRANSPARENT_BLACK = 0, + MSL_SAMPLER_BORDER_COLOR_OPAQUE_BLACK = 1, + MSL_SAMPLER_BORDER_COLOR_OPAQUE_WHITE = 2, + MSL_SAMPLER_BORDER_COLOR_INT_MAX = 0x7fffffff +}; + +enum MSLFormatResolution +{ + MSL_FORMAT_RESOLUTION_444 = 0, + MSL_FORMAT_RESOLUTION_422, + MSL_FORMAT_RESOLUTION_420, + MSL_FORMAT_RESOLUTION_INT_MAX = 0x7fffffff +}; + +enum MSLChromaLocation +{ + MSL_CHROMA_LOCATION_COSITED_EVEN = 0, + MSL_CHROMA_LOCATION_MIDPOINT, + MSL_CHROMA_LOCATION_INT_MAX = 0x7fffffff +}; + +enum MSLComponentSwizzle +{ + MSL_COMPONENT_SWIZZLE_IDENTITY = 0, + MSL_COMPONENT_SWIZZLE_ZERO, + MSL_COMPONENT_SWIZZLE_ONE, + MSL_COMPONENT_SWIZZLE_R, + MSL_COMPONENT_SWIZZLE_G, + MSL_COMPONENT_SWIZZLE_B, + MSL_COMPONENT_SWIZZLE_A, + MSL_COMPONENT_SWIZZLE_INT_MAX = 0x7fffffff +}; + +enum MSLSamplerYCbCrModelConversion +{ + MSL_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY = 0, + MSL_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_IDENTITY, + MSL_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_BT_709, + MSL_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_BT_601, + MSL_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_BT_2020, + MSL_SAMPLER_YCBCR_MODEL_CONVERSION_INT_MAX = 0x7fffffff +}; + +enum MSLSamplerYCbCrRange +{ + MSL_SAMPLER_YCBCR_RANGE_ITU_FULL = 0, + MSL_SAMPLER_YCBCR_RANGE_ITU_NARROW, + MSL_SAMPLER_YCBCR_RANGE_INT_MAX = 0x7fffffff +}; + +struct MSLConstexprSampler +{ + MSLSamplerCoord coord = MSL_SAMPLER_COORD_NORMALIZED; + MSLSamplerFilter min_filter = MSL_SAMPLER_FILTER_NEAREST; + MSLSamplerFilter mag_filter = MSL_SAMPLER_FILTER_NEAREST; + MSLSamplerMipFilter mip_filter = MSL_SAMPLER_MIP_FILTER_NONE; + MSLSamplerAddress s_address = MSL_SAMPLER_ADDRESS_CLAMP_TO_EDGE; + MSLSamplerAddress t_address = MSL_SAMPLER_ADDRESS_CLAMP_TO_EDGE; + MSLSamplerAddress r_address = MSL_SAMPLER_ADDRESS_CLAMP_TO_EDGE; + MSLSamplerCompareFunc compare_func = MSL_SAMPLER_COMPARE_FUNC_NEVER; + MSLSamplerBorderColor border_color = MSL_SAMPLER_BORDER_COLOR_TRANSPARENT_BLACK; + float lod_clamp_min = 0.0f; + float lod_clamp_max = 1000.0f; + int max_anisotropy = 1; + + // Sampler Y'CbCr conversion parameters + uint32_t planes = 0; + MSLFormatResolution resolution = MSL_FORMAT_RESOLUTION_444; + MSLSamplerFilter chroma_filter = MSL_SAMPLER_FILTER_NEAREST; + MSLChromaLocation x_chroma_offset = MSL_CHROMA_LOCATION_COSITED_EVEN; + MSLChromaLocation y_chroma_offset = MSL_CHROMA_LOCATION_COSITED_EVEN; + MSLComponentSwizzle swizzle[4]; // IDENTITY, IDENTITY, IDENTITY, IDENTITY + MSLSamplerYCbCrModelConversion ycbcr_model = MSL_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY; + MSLSamplerYCbCrRange ycbcr_range = MSL_SAMPLER_YCBCR_RANGE_ITU_FULL; + uint32_t bpc = 8; + + bool compare_enable = false; + bool lod_clamp_enable = false; + bool anisotropy_enable = false; + bool ycbcr_conversion_enable = false; + + MSLConstexprSampler() + { + for (uint32_t i = 0; i < 4; i++) + swizzle[i] = MSL_COMPONENT_SWIZZLE_IDENTITY; + } + bool swizzle_is_identity() const + { + return (swizzle[0] == MSL_COMPONENT_SWIZZLE_IDENTITY && swizzle[1] == MSL_COMPONENT_SWIZZLE_IDENTITY && + swizzle[2] == MSL_COMPONENT_SWIZZLE_IDENTITY && swizzle[3] == MSL_COMPONENT_SWIZZLE_IDENTITY); + } + bool swizzle_has_one_or_zero() const + { + return (swizzle[0] == MSL_COMPONENT_SWIZZLE_ZERO || swizzle[0] == MSL_COMPONENT_SWIZZLE_ONE || + swizzle[1] == MSL_COMPONENT_SWIZZLE_ZERO || swizzle[1] == MSL_COMPONENT_SWIZZLE_ONE || + swizzle[2] == MSL_COMPONENT_SWIZZLE_ZERO || swizzle[2] == MSL_COMPONENT_SWIZZLE_ONE || + swizzle[3] == MSL_COMPONENT_SWIZZLE_ZERO || swizzle[3] == MSL_COMPONENT_SWIZZLE_ONE); + } +}; + +// Special constant used in a MSLResourceBinding desc_set +// element to indicate the bindings for the push constants. +// Kinda deprecated. Just use ResourceBindingPushConstant{DescriptorSet,Binding} directly. +static const uint32_t kPushConstDescSet = ResourceBindingPushConstantDescriptorSet; + +// Special constant used in a MSLResourceBinding binding +// element to indicate the bindings for the push constants. +// Kinda deprecated. Just use ResourceBindingPushConstant{DescriptorSet,Binding} directly. +static const uint32_t kPushConstBinding = ResourceBindingPushConstantBinding; + +// Special constant used in a MSLResourceBinding binding +// element to indicate the buffer binding for swizzle buffers. +static const uint32_t kSwizzleBufferBinding = ~(1u); + +// Special constant used in a MSLResourceBinding binding +// element to indicate the buffer binding for buffer size buffers to support OpArrayLength. +static const uint32_t kBufferSizeBufferBinding = ~(2u); + +// Special constant used in a MSLResourceBinding binding +// element to indicate the buffer binding used for the argument buffer itself. +// This buffer binding should be kept as small as possible as all automatic bindings for buffers +// will start at max(kArgumentBufferBinding) + 1. +static const uint32_t kArgumentBufferBinding = ~(3u); + +static const uint32_t kMaxArgumentBuffers = 8; + +// The arbitrary maximum for the nesting of array of array copies. +static const uint32_t kArrayCopyMultidimMax = 6; + +// Decompiles SPIR-V to Metal Shading Language +class CompilerMSL : public CompilerGLSL +{ +public: + // Options for compiling to Metal Shading Language + struct Options + { + typedef enum + { + iOS = 0, + macOS = 1 + } Platform; + + Platform platform = macOS; + uint32_t msl_version = make_msl_version(1, 2); + uint32_t texel_buffer_texture_width = 4096; // Width of 2D Metal textures used as 1D texel buffers + uint32_t r32ui_linear_texture_alignment = 4; + uint32_t r32ui_alignment_constant_id = 65535; + uint32_t swizzle_buffer_index = 30; + uint32_t indirect_params_buffer_index = 29; + uint32_t shader_output_buffer_index = 28; + uint32_t shader_patch_output_buffer_index = 27; + uint32_t shader_tess_factor_buffer_index = 26; + uint32_t buffer_size_buffer_index = 25; + uint32_t view_mask_buffer_index = 24; + uint32_t dynamic_offsets_buffer_index = 23; + uint32_t shader_input_buffer_index = 22; + uint32_t shader_index_buffer_index = 21; + uint32_t shader_patch_input_buffer_index = 20; + uint32_t shader_input_wg_index = 0; + uint32_t device_index = 0; + uint32_t enable_frag_output_mask = 0xffffffff; + // Metal doesn't allow setting a fixed sample mask directly in the pipeline. + // We can evade this restriction by ANDing the internal sample_mask output + // of the shader with the additional fixed sample mask. + uint32_t additional_fixed_sample_mask = 0xffffffff; + bool enable_point_size_builtin = true; + bool enable_frag_depth_builtin = true; + bool enable_frag_stencil_ref_builtin = true; + bool disable_rasterization = false; + bool capture_output_to_buffer = false; + bool swizzle_texture_samples = false; + bool tess_domain_origin_lower_left = false; + bool multiview = false; + bool multiview_layered_rendering = true; + bool view_index_from_device_index = false; + bool dispatch_base = false; + bool texture_1D_as_2D = false; + + // Enable use of MSL 2.0 indirect argument buffers. + // MSL 2.0 must also be enabled. + bool argument_buffers = false; + + // Ensures vertex and instance indices start at zero. This reflects the behavior of HLSL with SV_VertexID and SV_InstanceID. + bool enable_base_index_zero = false; + + // Fragment output in MSL must have at least as many components as the render pass. + // Add support to explicit pad out components. + bool pad_fragment_output_components = false; + + // Specifies whether the iOS target version supports the [[base_vertex]] and [[base_instance]] attributes. + bool ios_support_base_vertex_instance = false; + + // Use Metal's native frame-buffer fetch API for subpass inputs. + bool use_framebuffer_fetch_subpasses = false; + + // Enables use of "fma" intrinsic for invariant float math + bool invariant_float_math = false; + + // Emulate texturecube_array with texture2d_array for iOS where this type is not available + bool emulate_cube_array = false; + + // Allow user to enable decoration binding + bool enable_decoration_binding = false; + + // Requires MSL 2.1, use the native support for texel buffers. + bool texture_buffer_native = false; + + // Forces all resources which are part of an argument buffer to be considered active. + // This ensures ABI compatibility between shaders where some resources might be unused, + // and would otherwise declare a different IAB. + bool force_active_argument_buffer_resources = false; + + // Aligns each resource in an argument buffer to its assigned index value, id(N), + // by adding synthetic padding members in the argument buffer struct for any resources + // in the argument buffer that are not defined and used by the shader. This allows + // the shader to index into the correct argument in a descriptor set argument buffer + // that is shared across shaders, where not all resources in the argument buffer are + // defined in each shader. For this to work, an MSLResourceBinding must be provided for + // all descriptors in any descriptor set held in an argument buffer in the shader, and + // that MSLResourceBinding must have the basetype and count members populated correctly. + // The implementation here assumes any inline blocks in the argument buffer is provided + // in a Metal buffer, and doesn't take into consideration inline blocks that are + // optionally embedded directly into the argument buffer via add_inline_uniform_block(). + bool pad_argument_buffer_resources = false; + + // Forces the use of plain arrays, which works around certain driver bugs on certain versions + // of Intel Macbooks. See https://github.com/KhronosGroup/SPIRV-Cross/issues/1210. + // May reduce performance in scenarios where arrays are copied around as value-types. + bool force_native_arrays = false; + + // If a shader writes clip distance, also emit user varyings which + // can be read in subsequent stages. + bool enable_clip_distance_user_varying = true; + + // In a tessellation control shader, assume that more than one patch can be processed in a + // single workgroup. This requires changes to the way the InvocationId and PrimitiveId + // builtins are processed, but should result in more efficient usage of the GPU. + bool multi_patch_workgroup = false; + + // Use storage buffers instead of vertex-style attributes for tessellation evaluation + // input. This may require conversion of inputs in the generated post-tessellation + // vertex shader, but allows the use of nested arrays. + bool raw_buffer_tese_input = false; + + // If set, a vertex shader will be compiled as part of a tessellation pipeline. + // It will be translated as a compute kernel, so it can use the global invocation ID + // to index the output buffer. + bool vertex_for_tessellation = false; + + // Assume that SubpassData images have multiple layers. Layered input attachments + // are addressed relative to the Layer output from the vertex pipeline. This option + // has no effect with multiview, since all input attachments are assumed to be layered + // and will be addressed using the current ViewIndex. + bool arrayed_subpass_input = false; + + // Whether to use SIMD-group or quadgroup functions to implement group non-uniform + // operations. Some GPUs on iOS do not support the SIMD-group functions, only the + // quadgroup functions. + bool ios_use_simdgroup_functions = false; + + // If set, the subgroup size will be assumed to be one, and subgroup-related + // builtins and operations will be emitted accordingly. This mode is intended to + // be used by MoltenVK on hardware/software configurations which do not provide + // sufficient support for subgroups. + bool emulate_subgroups = false; + + // If nonzero, a fixed subgroup size to assume. Metal, similarly to VK_EXT_subgroup_size_control, + // allows the SIMD-group size (aka thread execution width) to vary depending on + // register usage and requirements. In certain circumstances--for example, a pipeline + // in MoltenVK without VK_PIPELINE_SHADER_STAGE_CREATE_ALLOW_VARYING_SUBGROUP_SIZE_BIT_EXT-- + // this is undesirable. This fixes the value of the SubgroupSize builtin, instead of + // mapping it to the Metal builtin [[thread_execution_width]]. If the thread + // execution width is reduced, the extra invocations will appear to be inactive. + // If zero, the SubgroupSize will be allowed to vary, and the builtin will be mapped + // to the Metal [[thread_execution_width]] builtin. + uint32_t fixed_subgroup_size = 0; + + enum class IndexType + { + None = 0, + UInt16 = 1, + UInt32 = 2 + }; + + // The type of index in the index buffer, if present. For a compute shader, Metal + // requires specifying the indexing at pipeline creation, rather than at draw time + // as with graphics pipelines. This means we must create three different pipelines, + // for no indexing, 16-bit indices, and 32-bit indices. Each requires different + // handling for the gl_VertexIndex builtin. We may as well, then, create three + // different shaders for these three scenarios. + IndexType vertex_index_type = IndexType::None; + + // If set, a dummy [[sample_id]] input is added to a fragment shader if none is present. + // This will force the shader to run at sample rate, assuming Metal does not optimize + // the extra threads away. + bool force_sample_rate_shading = false; + + // If set, gl_HelperInvocation will be set manually whenever a fragment is discarded. + // Some Metal devices have a bug where simd_is_helper_thread() does not return true + // after a fragment has been discarded. This is a workaround that is only expected to be needed + // until the bug is fixed in Metal; it is provided as an option to allow disabling it when that occurs. + bool manual_helper_invocation_updates = true; + + // If set, extra checks will be emitted in fragment shaders to prevent writes + // from discarded fragments. Some Metal devices have a bug where writes to storage resources + // from discarded fragment threads continue to occur, despite the fragment being + // discarded. This is a workaround that is only expected to be needed until the + // bug is fixed in Metal; it is provided as an option so it can be enabled + // only when the bug is present. + bool check_discarded_frag_stores = false; + + bool is_ios() const + { + return platform == iOS; + } + + bool is_macos() const + { + return platform == macOS; + } + + bool use_quadgroup_operation() const + { + return is_ios() && !ios_use_simdgroup_functions; + } + + void set_msl_version(uint32_t major, uint32_t minor = 0, uint32_t patch = 0) + { + msl_version = make_msl_version(major, minor, patch); + } + + bool supports_msl_version(uint32_t major, uint32_t minor = 0, uint32_t patch = 0) const + { + return msl_version >= make_msl_version(major, minor, patch); + } + + static uint32_t make_msl_version(uint32_t major, uint32_t minor = 0, uint32_t patch = 0) + { + return (major * 10000) + (minor * 100) + patch; + } + }; + + const Options &get_msl_options() const + { + return msl_options; + } + + void set_msl_options(const Options &opts) + { + msl_options = opts; + } + + // Provide feedback to calling API to allow runtime to disable pipeline + // rasterization if vertex shader requires rasterization to be disabled. + bool get_is_rasterization_disabled() const + { + return is_rasterization_disabled && (get_entry_point().model == spv::ExecutionModelVertex || + get_entry_point().model == spv::ExecutionModelTessellationControl || + get_entry_point().model == spv::ExecutionModelTessellationEvaluation); + } + + // Provide feedback to calling API to allow it to pass an auxiliary + // swizzle buffer if the shader needs it. + bool needs_swizzle_buffer() const + { + return used_swizzle_buffer; + } + + // Provide feedback to calling API to allow it to pass a buffer + // containing STORAGE_BUFFER buffer sizes to support OpArrayLength. + bool needs_buffer_size_buffer() const + { + return !buffers_requiring_array_length.empty(); + } + + bool buffer_requires_array_length(VariableID id) const + { + return buffers_requiring_array_length.count(id) != 0; + } + + // Provide feedback to calling API to allow it to pass a buffer + // containing the view mask for the current multiview subpass. + bool needs_view_mask_buffer() const + { + return msl_options.multiview && !msl_options.view_index_from_device_index; + } + + // Provide feedback to calling API to allow it to pass a buffer + // containing the dispatch base workgroup ID. + bool needs_dispatch_base_buffer() const + { + return msl_options.dispatch_base && !msl_options.supports_msl_version(1, 2); + } + + // Provide feedback to calling API to allow it to pass an output + // buffer if the shader needs it. + bool needs_output_buffer() const + { + return capture_output_to_buffer && stage_out_var_id != ID(0); + } + + // Provide feedback to calling API to allow it to pass a patch output + // buffer if the shader needs it. + bool needs_patch_output_buffer() const + { + return capture_output_to_buffer && patch_stage_out_var_id != ID(0); + } + + // Provide feedback to calling API to allow it to pass an input threadgroup + // buffer if the shader needs it. + bool needs_input_threadgroup_mem() const + { + return capture_output_to_buffer && stage_in_var_id != ID(0); + } + + explicit CompilerMSL(std::vector spirv); + CompilerMSL(const uint32_t *ir, size_t word_count); + explicit CompilerMSL(const ParsedIR &ir); + explicit CompilerMSL(ParsedIR &&ir); + + // input is a shader interface variable description used to fix up shader input variables. + // If shader inputs are provided, is_msl_shader_input_used() will return true after + // calling ::compile() if the location were used by the MSL code. + void add_msl_shader_input(const MSLShaderInterfaceVariable &input); + + // output is a shader interface variable description used to fix up shader output variables. + // If shader outputs are provided, is_msl_shader_output_used() will return true after + // calling ::compile() if the location were used by the MSL code. + void add_msl_shader_output(const MSLShaderInterfaceVariable &output); + + // resource is a resource binding to indicate the MSL buffer, + // texture or sampler index to use for a particular SPIR-V description set + // and binding. If resource bindings are provided, + // is_msl_resource_binding_used() will return true after calling ::compile() if + // the set/binding combination was used by the MSL code. + void add_msl_resource_binding(const MSLResourceBinding &resource); + + // desc_set and binding are the SPIR-V descriptor set and binding of a buffer resource + // in this shader. index is the index within the dynamic offset buffer to use. This + // function marks that resource as using a dynamic offset (VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC + // or VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC). This function only has any effect if argument buffers + // are enabled. If so, the buffer will have its address adjusted at the beginning of the shader with + // an offset taken from the dynamic offset buffer. + void add_dynamic_buffer(uint32_t desc_set, uint32_t binding, uint32_t index); + + // desc_set and binding are the SPIR-V descriptor set and binding of a buffer resource + // in this shader. This function marks that resource as an inline uniform block + // (VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT). This function only has any effect if argument buffers + // are enabled. If so, the buffer block will be directly embedded into the argument + // buffer, instead of being referenced indirectly via pointer. + void add_inline_uniform_block(uint32_t desc_set, uint32_t binding); + + // When using MSL argument buffers, we can force "classic" MSL 1.0 binding schemes for certain descriptor sets. + // This corresponds to VK_KHR_push_descriptor in Vulkan. + void add_discrete_descriptor_set(uint32_t desc_set); + + // If an argument buffer is large enough, it may need to be in the device storage space rather than + // constant. Opt-in to this behavior here on a per set basis. + void set_argument_buffer_device_address_space(uint32_t desc_set, bool device_storage); + + // Query after compilation is done. This allows you to check if an input location was used by the shader. + bool is_msl_shader_input_used(uint32_t location); + + // Query after compilation is done. This allows you to check if an output location were used by the shader. + bool is_msl_shader_output_used(uint32_t location); + + // If not using add_msl_shader_input, it's possible + // that certain builtin attributes need to be automatically assigned locations. + // This is typical for tessellation builtin inputs such as tess levels, gl_Position, etc. + // This returns k_unknown_location if the location was explicitly assigned with + // add_msl_shader_input or the builtin is not used, otherwise returns N in [[attribute(N)]]. + uint32_t get_automatic_builtin_input_location(spv::BuiltIn builtin) const; + + // If not using add_msl_shader_output, it's possible + // that certain builtin attributes need to be automatically assigned locations. + // This is typical for tessellation builtin outputs such as tess levels, gl_Position, etc. + // This returns k_unknown_location if the location were explicitly assigned with + // add_msl_shader_output or the builtin were not used, otherwise returns N in [[attribute(N)]]. + uint32_t get_automatic_builtin_output_location(spv::BuiltIn builtin) const; + + // NOTE: Only resources which are remapped using add_msl_resource_binding will be reported here. + // Constexpr samplers are always assumed to be emitted. + // No specific MSLResourceBinding remapping is required for constexpr samplers as long as they are remapped + // by remap_constexpr_sampler(_by_binding). + bool is_msl_resource_binding_used(spv::ExecutionModel model, uint32_t set, uint32_t binding) const; + + // This must only be called after a successful call to CompilerMSL::compile(). + // For a variable resource ID obtained through reflection API, report the automatically assigned resource index. + // If the descriptor set was part of an argument buffer, report the [[id(N)]], + // or [[buffer/texture/sampler]] binding for other resources. + // If the resource was a combined image sampler, report the image binding here, + // use the _secondary version of this call to query the sampler half of the resource. + // If no binding exists, uint32_t(-1) is returned. + uint32_t get_automatic_msl_resource_binding(uint32_t id) const; + + // Same as get_automatic_msl_resource_binding, but should only be used for combined image samplers, in which case the + // sampler's binding is returned instead. For any other resource type, -1 is returned. + // Secondary bindings are also used for the auxillary image atomic buffer. + uint32_t get_automatic_msl_resource_binding_secondary(uint32_t id) const; + + // Same as get_automatic_msl_resource_binding, but should only be used for combined image samplers for multiplanar images, + // in which case the second plane's binding is returned instead. For any other resource type, -1 is returned. + uint32_t get_automatic_msl_resource_binding_tertiary(uint32_t id) const; + + // Same as get_automatic_msl_resource_binding, but should only be used for combined image samplers for triplanar images, + // in which case the third plane's binding is returned instead. For any other resource type, -1 is returned. + uint32_t get_automatic_msl_resource_binding_quaternary(uint32_t id) const; + + // Compiles the SPIR-V code into Metal Shading Language. + std::string compile() override; + + // Remap a sampler with ID to a constexpr sampler. + // Older iOS targets must use constexpr samplers in certain cases (PCF), + // so a static sampler must be used. + // The sampler will not consume a binding, but be declared in the entry point as a constexpr sampler. + // This can be used on both combined image/samplers (sampler2D) or standalone samplers. + // The remapped sampler must not be an array of samplers. + // Prefer remap_constexpr_sampler_by_binding unless you're also doing reflection anyways. + void remap_constexpr_sampler(VariableID id, const MSLConstexprSampler &sampler); + + // Same as remap_constexpr_sampler, except you provide set/binding, rather than variable ID. + // Remaps based on ID take priority over set/binding remaps. + void remap_constexpr_sampler_by_binding(uint32_t desc_set, uint32_t binding, const MSLConstexprSampler &sampler); + + // If using CompilerMSL::Options::pad_fragment_output_components, override the number of components we expect + // to use for a particular location. The default is 4 if number of components is not overridden. + void set_fragment_output_components(uint32_t location, uint32_t components); + + void set_combined_sampler_suffix(const char *suffix); + const char *get_combined_sampler_suffix() const; + +protected: + // An enum of SPIR-V functions that are implemented in additional + // source code that is added to the shader if necessary. + enum SPVFuncImpl : uint8_t + { + SPVFuncImplNone, + SPVFuncImplMod, + SPVFuncImplRadians, + SPVFuncImplDegrees, + SPVFuncImplFindILsb, + SPVFuncImplFindSMsb, + SPVFuncImplFindUMsb, + SPVFuncImplSSign, + SPVFuncImplArrayCopyMultidimBase, + // Unfortunately, we cannot use recursive templates in the MSL compiler properly, + // so stamp out variants up to some arbitrary maximum. + SPVFuncImplArrayCopy = SPVFuncImplArrayCopyMultidimBase + 1, + SPVFuncImplArrayOfArrayCopy2Dim = SPVFuncImplArrayCopyMultidimBase + 2, + SPVFuncImplArrayOfArrayCopy3Dim = SPVFuncImplArrayCopyMultidimBase + 3, + SPVFuncImplArrayOfArrayCopy4Dim = SPVFuncImplArrayCopyMultidimBase + 4, + SPVFuncImplArrayOfArrayCopy5Dim = SPVFuncImplArrayCopyMultidimBase + 5, + SPVFuncImplArrayOfArrayCopy6Dim = SPVFuncImplArrayCopyMultidimBase + 6, + SPVFuncImplTexelBufferCoords, + SPVFuncImplImage2DAtomicCoords, // Emulate texture2D atomic operations + SPVFuncImplFMul, + SPVFuncImplFAdd, + SPVFuncImplFSub, + SPVFuncImplQuantizeToF16, + SPVFuncImplCubemapTo2DArrayFace, + SPVFuncImplUnsafeArray, // Allow Metal to use the array template to make arrays a value type + SPVFuncImplStorageMatrix, // Allow threadgroup construction of matrices + SPVFuncImplInverse4x4, + SPVFuncImplInverse3x3, + SPVFuncImplInverse2x2, + // It is very important that this come before *Swizzle and ChromaReconstruct*, to ensure it's + // emitted before them. + SPVFuncImplForwardArgs, + // Likewise, this must come before *Swizzle. + SPVFuncImplGetSwizzle, + SPVFuncImplTextureSwizzle, + SPVFuncImplGatherSwizzle, + SPVFuncImplGatherCompareSwizzle, + SPVFuncImplSubgroupBroadcast, + SPVFuncImplSubgroupBroadcastFirst, + SPVFuncImplSubgroupBallot, + SPVFuncImplSubgroupBallotBitExtract, + SPVFuncImplSubgroupBallotFindLSB, + SPVFuncImplSubgroupBallotFindMSB, + SPVFuncImplSubgroupBallotBitCount, + SPVFuncImplSubgroupAllEqual, + SPVFuncImplSubgroupShuffle, + SPVFuncImplSubgroupShuffleXor, + SPVFuncImplSubgroupShuffleUp, + SPVFuncImplSubgroupShuffleDown, + SPVFuncImplQuadBroadcast, + SPVFuncImplQuadSwap, + SPVFuncImplReflectScalar, + SPVFuncImplRefractScalar, + SPVFuncImplFaceForwardScalar, + SPVFuncImplChromaReconstructNearest2Plane, + SPVFuncImplChromaReconstructNearest3Plane, + SPVFuncImplChromaReconstructLinear422CositedEven2Plane, + SPVFuncImplChromaReconstructLinear422CositedEven3Plane, + SPVFuncImplChromaReconstructLinear422Midpoint2Plane, + SPVFuncImplChromaReconstructLinear422Midpoint3Plane, + SPVFuncImplChromaReconstructLinear420XCositedEvenYCositedEven2Plane, + SPVFuncImplChromaReconstructLinear420XCositedEvenYCositedEven3Plane, + SPVFuncImplChromaReconstructLinear420XMidpointYCositedEven2Plane, + SPVFuncImplChromaReconstructLinear420XMidpointYCositedEven3Plane, + SPVFuncImplChromaReconstructLinear420XCositedEvenYMidpoint2Plane, + SPVFuncImplChromaReconstructLinear420XCositedEvenYMidpoint3Plane, + SPVFuncImplChromaReconstructLinear420XMidpointYMidpoint2Plane, + SPVFuncImplChromaReconstructLinear420XMidpointYMidpoint3Plane, + SPVFuncImplExpandITUFullRange, + SPVFuncImplExpandITUNarrowRange, + SPVFuncImplConvertYCbCrBT709, + SPVFuncImplConvertYCbCrBT601, + SPVFuncImplConvertYCbCrBT2020, + SPVFuncImplDynamicImageSampler, + }; + + // If the underlying resource has been used for comparison then duplicate loads of that resource must be too + // Use Metal's native frame-buffer fetch API for subpass inputs. + void emit_texture_op(const Instruction &i, bool sparse) override; + void emit_binary_ptr_op(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, const char *op); + std::string to_ptr_expression(uint32_t id, bool register_expression_read = true); + void emit_binary_unord_op(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, const char *op); + void emit_instruction(const Instruction &instr) override; + void emit_glsl_op(uint32_t result_type, uint32_t result_id, uint32_t op, const uint32_t *args, + uint32_t count) override; + void emit_spv_amd_shader_trinary_minmax_op(uint32_t result_type, uint32_t result_id, uint32_t op, + const uint32_t *args, uint32_t count) override; + void emit_header() override; + void emit_function_prototype(SPIRFunction &func, const Bitset &return_flags) override; + void emit_sampled_image_op(uint32_t result_type, uint32_t result_id, uint32_t image_id, uint32_t samp_id) override; + void emit_subgroup_op(const Instruction &i) override; + std::string to_texture_op(const Instruction &i, bool sparse, bool *forward, + SmallVector &inherited_expressions) override; + void emit_fixup() override; + std::string to_struct_member(const SPIRType &type, uint32_t member_type_id, uint32_t index, + const std::string &qualifier = ""); + void emit_struct_member(const SPIRType &type, uint32_t member_type_id, uint32_t index, + const std::string &qualifier = "", uint32_t base_offset = 0) override; + void emit_struct_padding_target(const SPIRType &type) override; + std::string type_to_glsl(const SPIRType &type, uint32_t id, bool member); + std::string type_to_glsl(const SPIRType &type, uint32_t id = 0) override; + void emit_block_hints(const SPIRBlock &block) override; + + // Allow Metal to use the array template to make arrays a value type + std::string type_to_array_glsl(const SPIRType &type) override; + std::string constant_op_expression(const SPIRConstantOp &cop) override; + + // Threadgroup arrays can't have a wrapper type + std::string variable_decl(const SPIRVariable &variable) override; + + bool variable_decl_is_remapped_storage(const SPIRVariable &variable, spv::StorageClass storage) const override; + + // GCC workaround of lambdas calling protected functions (for older GCC versions) + std::string variable_decl(const SPIRType &type, const std::string &name, uint32_t id = 0) override; + + std::string image_type_glsl(const SPIRType &type, uint32_t id = 0) override; + std::string sampler_type(const SPIRType &type, uint32_t id); + std::string builtin_to_glsl(spv::BuiltIn builtin, spv::StorageClass storage) override; + std::string to_func_call_arg(const SPIRFunction::Parameter &arg, uint32_t id) override; + std::string to_name(uint32_t id, bool allow_alias = true) const override; + std::string to_function_name(const TextureFunctionNameArguments &args) override; + std::string to_function_args(const TextureFunctionArguments &args, bool *p_forward) override; + std::string to_initializer_expression(const SPIRVariable &var) override; + std::string to_zero_initialized_expression(uint32_t type_id) override; + + std::string unpack_expression_type(std::string expr_str, const SPIRType &type, uint32_t physical_type_id, + bool is_packed, bool row_major) override; + + // Returns true for BuiltInSampleMask because gl_SampleMask[] is an array in SPIR-V, but [[sample_mask]] is a scalar in Metal. + bool builtin_translates_to_nonarray(spv::BuiltIn builtin) const override; + + std::string bitcast_glsl_op(const SPIRType &result_type, const SPIRType &argument_type) override; + bool emit_complex_bitcast(uint32_t result_id, uint32_t id, uint32_t op0) override; + bool skip_argument(uint32_t id) const override; + std::string to_member_reference(uint32_t base, const SPIRType &type, uint32_t index, bool ptr_chain_is_resolved) override; + std::string to_qualifiers_glsl(uint32_t id) override; + void replace_illegal_names() override; + void declare_constant_arrays(); + + void replace_illegal_entry_point_names(); + void sync_entry_point_aliases_and_names(); + + static const std::unordered_set &get_reserved_keyword_set(); + static const std::unordered_set &get_illegal_func_names(); + + // Constant arrays of non-primitive types (i.e. matrices) won't link properly into Metal libraries + void declare_complex_constant_arrays(); + + bool is_patch_block(const SPIRType &type); + bool is_non_native_row_major_matrix(uint32_t id) override; + bool member_is_non_native_row_major_matrix(const SPIRType &type, uint32_t index) override; + std::string convert_row_major_matrix(std::string exp_str, const SPIRType &exp_type, uint32_t physical_type_id, + bool is_packed) override; + + bool is_tesc_shader() const; + bool is_tese_shader() const; + + void preprocess_op_codes(); + void localize_global_variables(); + void extract_global_variables_from_functions(); + void mark_packable_structs(); + void mark_as_packable(SPIRType &type); + void mark_as_workgroup_struct(SPIRType &type); + + std::unordered_map> function_global_vars; + void extract_global_variables_from_function(uint32_t func_id, std::set &added_arg_ids, + std::unordered_set &global_var_ids, + std::unordered_set &processed_func_ids); + uint32_t add_interface_block(spv::StorageClass storage, bool patch = false); + uint32_t add_interface_block_pointer(uint32_t ib_var_id, spv::StorageClass storage); + + struct InterfaceBlockMeta + { + struct LocationMeta + { + uint32_t base_type_id = 0; + uint32_t num_components = 0; + bool flat = false; + bool noperspective = false; + bool centroid = false; + bool sample = false; + }; + std::unordered_map location_meta; + bool strip_array = false; + bool allow_local_declaration = false; + }; + + std::string to_tesc_invocation_id(); + void emit_local_masked_variable(const SPIRVariable &masked_var, bool strip_array); + void add_variable_to_interface_block(spv::StorageClass storage, const std::string &ib_var_ref, SPIRType &ib_type, + SPIRVariable &var, InterfaceBlockMeta &meta); + void add_composite_variable_to_interface_block(spv::StorageClass storage, const std::string &ib_var_ref, + SPIRType &ib_type, SPIRVariable &var, InterfaceBlockMeta &meta); + void add_plain_variable_to_interface_block(spv::StorageClass storage, const std::string &ib_var_ref, + SPIRType &ib_type, SPIRVariable &var, InterfaceBlockMeta &meta); + bool add_component_variable_to_interface_block(spv::StorageClass storage, const std::string &ib_var_ref, + SPIRVariable &var, const SPIRType &type, + InterfaceBlockMeta &meta); + void add_plain_member_variable_to_interface_block(spv::StorageClass storage, + const std::string &ib_var_ref, SPIRType &ib_type, + SPIRVariable &var, SPIRType &var_type, + uint32_t mbr_idx, InterfaceBlockMeta &meta, + const std::string &mbr_name_qual, + const std::string &var_chain_qual, + uint32_t &location, uint32_t &var_mbr_idx); + void add_composite_member_variable_to_interface_block(spv::StorageClass storage, + const std::string &ib_var_ref, SPIRType &ib_type, + SPIRVariable &var, SPIRType &var_type, + uint32_t mbr_idx, InterfaceBlockMeta &meta, + const std::string &mbr_name_qual, + const std::string &var_chain_qual, + uint32_t &location, uint32_t &var_mbr_idx); + void add_tess_level_input_to_interface_block(const std::string &ib_var_ref, SPIRType &ib_type, SPIRVariable &var); + void add_tess_level_input(const std::string &base_ref, const std::string &mbr_name, SPIRVariable &var); + + void fix_up_interface_member_indices(spv::StorageClass storage, uint32_t ib_type_id); + + void mark_location_as_used_by_shader(uint32_t location, const SPIRType &type, + spv::StorageClass storage, bool fallback = false); + uint32_t ensure_correct_builtin_type(uint32_t type_id, spv::BuiltIn builtin); + uint32_t ensure_correct_input_type(uint32_t type_id, uint32_t location, uint32_t component, + uint32_t num_components, bool strip_array); + + void emit_custom_templates(); + void emit_custom_functions(); + void emit_resources(); + void emit_specialization_constants_and_structs(); + void emit_interface_block(uint32_t ib_var_id); + bool maybe_emit_array_assignment(uint32_t id_lhs, uint32_t id_rhs); + uint32_t get_resource_array_size(uint32_t id) const; + + void fix_up_shader_inputs_outputs(); + + std::string func_type_decl(SPIRType &type); + std::string entry_point_args_classic(bool append_comma); + std::string entry_point_args_argument_buffer(bool append_comma); + std::string entry_point_arg_stage_in(); + void entry_point_args_builtin(std::string &args); + void entry_point_args_discrete_descriptors(std::string &args); + std::string append_member_name(const std::string &qualifier, const SPIRType &type, uint32_t index); + std::string ensure_valid_name(std::string name, std::string pfx); + std::string to_sampler_expression(uint32_t id); + std::string to_swizzle_expression(uint32_t id); + std::string to_buffer_size_expression(uint32_t id); + bool is_sample_rate() const; + bool is_intersection_query() const; + bool is_direct_input_builtin(spv::BuiltIn builtin); + std::string builtin_qualifier(spv::BuiltIn builtin); + std::string builtin_type_decl(spv::BuiltIn builtin, uint32_t id = 0); + std::string built_in_func_arg(spv::BuiltIn builtin, bool prefix_comma); + std::string member_attribute_qualifier(const SPIRType &type, uint32_t index); + std::string member_location_attribute_qualifier(const SPIRType &type, uint32_t index); + std::string argument_decl(const SPIRFunction::Parameter &arg); + const char *descriptor_address_space(uint32_t id, spv::StorageClass storage, const char *plain_address_space) const; + std::string round_fp_tex_coords(std::string tex_coords, bool coord_is_fp); + uint32_t get_metal_resource_index(SPIRVariable &var, SPIRType::BaseType basetype, uint32_t plane = 0); + uint32_t get_member_location(uint32_t type_id, uint32_t index, uint32_t *comp = nullptr) const; + uint32_t get_or_allocate_builtin_input_member_location(spv::BuiltIn builtin, + uint32_t type_id, uint32_t index, uint32_t *comp = nullptr); + uint32_t get_or_allocate_builtin_output_member_location(spv::BuiltIn builtin, + uint32_t type_id, uint32_t index, uint32_t *comp = nullptr); + + uint32_t get_physical_tess_level_array_size(spv::BuiltIn builtin) const; + + // MSL packing rules. These compute the effective packing rules as observed by the MSL compiler in the MSL output. + // These values can change depending on various extended decorations which control packing rules. + // We need to make these rules match up with SPIR-V declared rules. + uint32_t get_declared_type_size_msl(const SPIRType &type, bool packed, bool row_major) const; + uint32_t get_declared_type_array_stride_msl(const SPIRType &type, bool packed, bool row_major) const; + uint32_t get_declared_type_matrix_stride_msl(const SPIRType &type, bool packed, bool row_major) const; + uint32_t get_declared_type_alignment_msl(const SPIRType &type, bool packed, bool row_major) const; + + uint32_t get_declared_struct_member_size_msl(const SPIRType &struct_type, uint32_t index) const; + uint32_t get_declared_struct_member_array_stride_msl(const SPIRType &struct_type, uint32_t index) const; + uint32_t get_declared_struct_member_matrix_stride_msl(const SPIRType &struct_type, uint32_t index) const; + uint32_t get_declared_struct_member_alignment_msl(const SPIRType &struct_type, uint32_t index) const; + + uint32_t get_declared_input_size_msl(const SPIRType &struct_type, uint32_t index) const; + uint32_t get_declared_input_array_stride_msl(const SPIRType &struct_type, uint32_t index) const; + uint32_t get_declared_input_matrix_stride_msl(const SPIRType &struct_type, uint32_t index) const; + uint32_t get_declared_input_alignment_msl(const SPIRType &struct_type, uint32_t index) const; + + const SPIRType &get_physical_member_type(const SPIRType &struct_type, uint32_t index) const; + SPIRType get_presumed_input_type(const SPIRType &struct_type, uint32_t index) const; + + uint32_t get_declared_struct_size_msl(const SPIRType &struct_type, bool ignore_alignment = false, + bool ignore_padding = false) const; + + std::string to_component_argument(uint32_t id); + void align_struct(SPIRType &ib_type, std::unordered_set &aligned_structs); + void mark_scalar_layout_structs(const SPIRType &ib_type); + void mark_struct_members_packed(const SPIRType &type); + void ensure_member_packing_rules_msl(SPIRType &ib_type, uint32_t index); + bool validate_member_packing_rules_msl(const SPIRType &type, uint32_t index) const; + std::string get_argument_address_space(const SPIRVariable &argument); + std::string get_type_address_space(const SPIRType &type, uint32_t id, bool argument = false); + const char *to_restrict(uint32_t id, bool space); + SPIRType &get_stage_in_struct_type(); + SPIRType &get_stage_out_struct_type(); + SPIRType &get_patch_stage_in_struct_type(); + SPIRType &get_patch_stage_out_struct_type(); + std::string get_tess_factor_struct_name(); + SPIRType &get_uint_type(); + uint32_t get_uint_type_id(); + void emit_atomic_func_op(uint32_t result_type, uint32_t result_id, const char *op, spv::Op opcode, + uint32_t mem_order_1, uint32_t mem_order_2, bool has_mem_order_2, uint32_t op0, uint32_t op1 = 0, + bool op1_is_pointer = false, bool op1_is_literal = false, uint32_t op2 = 0); + const char *get_memory_order(uint32_t spv_mem_sem); + void add_pragma_line(const std::string &line); + void add_typedef_line(const std::string &line); + void emit_barrier(uint32_t id_exe_scope, uint32_t id_mem_scope, uint32_t id_mem_sem); + void emit_array_copy(const std::string &lhs, uint32_t lhs_id, uint32_t rhs_id, + spv::StorageClass lhs_storage, spv::StorageClass rhs_storage) override; + void build_implicit_builtins(); + uint32_t build_constant_uint_array_pointer(); + void emit_entry_point_declarations() override; + bool uses_explicit_early_fragment_test(); + + uint32_t builtin_frag_coord_id = 0; + uint32_t builtin_sample_id_id = 0; + uint32_t builtin_sample_mask_id = 0; + uint32_t builtin_helper_invocation_id = 0; + uint32_t builtin_vertex_idx_id = 0; + uint32_t builtin_base_vertex_id = 0; + uint32_t builtin_instance_idx_id = 0; + uint32_t builtin_base_instance_id = 0; + uint32_t builtin_view_idx_id = 0; + uint32_t builtin_layer_id = 0; + uint32_t builtin_invocation_id_id = 0; + uint32_t builtin_primitive_id_id = 0; + uint32_t builtin_subgroup_invocation_id_id = 0; + uint32_t builtin_subgroup_size_id = 0; + uint32_t builtin_dispatch_base_id = 0; + uint32_t builtin_stage_input_size_id = 0; + uint32_t builtin_local_invocation_index_id = 0; + uint32_t builtin_workgroup_size_id = 0; + uint32_t swizzle_buffer_id = 0; + uint32_t buffer_size_buffer_id = 0; + uint32_t view_mask_buffer_id = 0; + uint32_t dynamic_offsets_buffer_id = 0; + uint32_t uint_type_id = 0; + uint32_t argument_buffer_padding_buffer_type_id = 0; + uint32_t argument_buffer_padding_image_type_id = 0; + uint32_t argument_buffer_padding_sampler_type_id = 0; + + bool does_shader_write_sample_mask = false; + bool frag_shader_needs_discard_checks = false; + + void cast_to_variable_store(uint32_t target_id, std::string &expr, const SPIRType &expr_type) override; + void cast_from_variable_load(uint32_t source_id, std::string &expr, const SPIRType &expr_type) override; + void emit_store_statement(uint32_t lhs_expression, uint32_t rhs_expression) override; + + void analyze_sampled_image_usage(); + + bool access_chain_needs_stage_io_builtin_translation(uint32_t base) override; + void prepare_access_chain_for_scalar_access(std::string &expr, const SPIRType &type, spv::StorageClass storage, + bool &is_packed) override; + void fix_up_interpolant_access_chain(const uint32_t *ops, uint32_t length); + void check_physical_type_cast(std::string &expr, const SPIRType *type, uint32_t physical_type) override; + + bool emit_tessellation_access_chain(const uint32_t *ops, uint32_t length); + bool emit_tessellation_io_load(uint32_t result_type, uint32_t id, uint32_t ptr); + bool is_out_of_bounds_tessellation_level(uint32_t id_lhs); + + void ensure_builtin(spv::StorageClass storage, spv::BuiltIn builtin); + + void mark_implicit_builtin(spv::StorageClass storage, spv::BuiltIn builtin, uint32_t id); + + std::string convert_to_f32(const std::string &expr, uint32_t components); + + Options msl_options; + std::set spv_function_implementations; + // Must be ordered to ensure declarations are in a specific order. + std::map inputs_by_location; + std::unordered_map inputs_by_builtin; + std::map outputs_by_location; + std::unordered_map outputs_by_builtin; + std::unordered_set location_inputs_in_use; + std::unordered_set location_inputs_in_use_fallback; + std::unordered_set location_outputs_in_use; + std::unordered_set location_outputs_in_use_fallback; + std::unordered_map fragment_output_components; + std::unordered_map builtin_to_automatic_input_location; + std::unordered_map builtin_to_automatic_output_location; + std::set pragma_lines; + std::set typedef_lines; + SmallVector vars_needing_early_declaration; + + std::unordered_map, InternalHasher> resource_bindings; + std::unordered_map resource_arg_buff_idx_to_binding_number; + + uint32_t next_metal_resource_index_buffer = 0; + uint32_t next_metal_resource_index_texture = 0; + uint32_t next_metal_resource_index_sampler = 0; + // Intentionally uninitialized, works around MSVC 2013 bug. + uint32_t next_metal_resource_ids[kMaxArgumentBuffers]; + + VariableID stage_in_var_id = 0; + VariableID stage_out_var_id = 0; + VariableID patch_stage_in_var_id = 0; + VariableID patch_stage_out_var_id = 0; + VariableID stage_in_ptr_var_id = 0; + VariableID stage_out_ptr_var_id = 0; + VariableID tess_level_inner_var_id = 0; + VariableID tess_level_outer_var_id = 0; + VariableID stage_out_masked_builtin_type_id = 0; + + // Handle HLSL-style 0-based vertex/instance index. + enum class TriState + { + Neutral, + No, + Yes + }; + TriState needs_base_vertex_arg = TriState::Neutral; + TriState needs_base_instance_arg = TriState::Neutral; + + bool has_sampled_images = false; + bool builtin_declaration = false; // Handle HLSL-style 0-based vertex/instance index. + + bool is_using_builtin_array = false; // Force the use of C style array declaration. + bool using_builtin_array() const; + + bool is_rasterization_disabled = false; + bool capture_output_to_buffer = false; + bool needs_swizzle_buffer_def = false; + bool used_swizzle_buffer = false; + bool added_builtin_tess_level = false; + bool needs_subgroup_invocation_id = false; + bool needs_subgroup_size = false; + bool needs_sample_id = false; + bool needs_helper_invocation = false; + std::string qual_pos_var_name; + std::string stage_in_var_name = "in"; + std::string stage_out_var_name = "out"; + std::string patch_stage_in_var_name = "patchIn"; + std::string patch_stage_out_var_name = "patchOut"; + std::string sampler_name_suffix = "Smplr"; + std::string swizzle_name_suffix = "Swzl"; + std::string buffer_size_name_suffix = "BufferSize"; + std::string plane_name_suffix = "Plane"; + std::string input_wg_var_name = "gl_in"; + std::string input_buffer_var_name = "spvIn"; + std::string output_buffer_var_name = "spvOut"; + std::string patch_input_buffer_var_name = "spvPatchIn"; + std::string patch_output_buffer_var_name = "spvPatchOut"; + std::string tess_factor_buffer_var_name = "spvTessLevel"; + std::string index_buffer_var_name = "spvIndices"; + spv::Op previous_instruction_opcode = spv::OpNop; + + // Must be ordered since declaration is in a specific order. + std::map constexpr_samplers_by_id; + std::unordered_map constexpr_samplers_by_binding; + const MSLConstexprSampler *find_constexpr_sampler(uint32_t id) const; + + std::unordered_set buffers_requiring_array_length; + SmallVector buffer_arrays_discrete; + SmallVector> buffer_aliases_argument; + SmallVector buffer_aliases_discrete; + std::unordered_set atomic_image_vars; // Emulate texture2D atomic operations + std::unordered_set pull_model_inputs; + + // Must be ordered since array is in a specific order. + std::map> buffers_requiring_dynamic_offset; + + SmallVector disabled_frag_outputs; + + std::unordered_set inline_uniform_blocks; + + uint32_t argument_buffer_ids[kMaxArgumentBuffers]; + uint32_t argument_buffer_discrete_mask = 0; + uint32_t argument_buffer_device_storage_mask = 0; + + void analyze_argument_buffers(); + bool descriptor_set_is_argument_buffer(uint32_t desc_set) const; + MSLResourceBinding &get_argument_buffer_resource(uint32_t desc_set, uint32_t arg_idx); + void add_argument_buffer_padding_buffer_type(SPIRType &struct_type, uint32_t &mbr_idx, uint32_t &arg_buff_index, MSLResourceBinding &rez_bind); + void add_argument_buffer_padding_image_type(SPIRType &struct_type, uint32_t &mbr_idx, uint32_t &arg_buff_index, MSLResourceBinding &rez_bind); + void add_argument_buffer_padding_sampler_type(SPIRType &struct_type, uint32_t &mbr_idx, uint32_t &arg_buff_index, MSLResourceBinding &rez_bind); + void add_argument_buffer_padding_type(uint32_t mbr_type_id, SPIRType &struct_type, uint32_t &mbr_idx, uint32_t &arg_buff_index, uint32_t count); + + uint32_t get_target_components_for_fragment_location(uint32_t location) const; + uint32_t build_extended_vector_type(uint32_t type_id, uint32_t components, + SPIRType::BaseType basetype = SPIRType::Unknown); + uint32_t build_msl_interpolant_type(uint32_t type_id, bool is_noperspective); + + bool suppress_missing_prototypes = false; + + void add_spv_func_and_recompile(SPVFuncImpl spv_func); + + void activate_argument_buffer_resources(); + + bool type_is_msl_framebuffer_fetch(const SPIRType &type) const; + bool type_is_pointer(const SPIRType &type) const; + bool type_is_pointer_to_pointer(const SPIRType &type) const; + bool is_supported_argument_buffer_type(const SPIRType &type) const; + + bool variable_storage_requires_stage_io(spv::StorageClass storage) const; + + bool needs_manual_helper_invocation_updates() const + { + return msl_options.manual_helper_invocation_updates && msl_options.supports_msl_version(2, 3); + } + bool needs_frag_discard_checks() const + { + return get_execution_model() == spv::ExecutionModelFragment && msl_options.supports_msl_version(2, 3) && + msl_options.check_discarded_frag_stores && frag_shader_needs_discard_checks; + } + + bool has_additional_fixed_sample_mask() const { return msl_options.additional_fixed_sample_mask != 0xffffffff; } + std::string additional_fixed_sample_mask_str() const; + + // OpcodeHandler that handles several MSL preprocessing operations. + struct OpCodePreprocessor : OpcodeHandler + { + OpCodePreprocessor(CompilerMSL &compiler_) + : compiler(compiler_) + { + } + + bool handle(spv::Op opcode, const uint32_t *args, uint32_t length) override; + CompilerMSL::SPVFuncImpl get_spv_func_impl(spv::Op opcode, const uint32_t *args); + void check_resource_write(uint32_t var_id); + + CompilerMSL &compiler; + std::unordered_map result_types; + std::unordered_map image_pointers; // Emulate texture2D atomic operations + bool suppress_missing_prototypes = false; + bool uses_atomics = false; + bool uses_image_write = false; + bool uses_buffer_write = false; + bool uses_discard = false; + bool needs_subgroup_invocation_id = false; + bool needs_subgroup_size = false; + bool needs_sample_id = false; + bool needs_helper_invocation = false; + }; + + // OpcodeHandler that scans for uses of sampled images + struct SampledImageScanner : OpcodeHandler + { + SampledImageScanner(CompilerMSL &compiler_) + : compiler(compiler_) + { + } + + bool handle(spv::Op opcode, const uint32_t *args, uint32_t) override; + + CompilerMSL &compiler; + }; + + // Sorts the members of a SPIRType and associated Meta info based on a settable sorting + // aspect, which defines which aspect of the struct members will be used to sort them. + // Regardless of the sorting aspect, built-in members always appear at the end of the struct. + struct MemberSorter + { + enum SortAspect + { + LocationThenBuiltInType, + Offset + }; + + void sort(); + bool operator()(uint32_t mbr_idx1, uint32_t mbr_idx2); + MemberSorter(SPIRType &t, Meta &m, SortAspect sa); + + SPIRType &type; + Meta &meta; + SortAspect sort_aspect; + }; +}; +} // namespace SPIRV_CROSS_NAMESPACE + +#endif diff --git a/local_packages/include/spirv_cross/spirv_parser.hpp b/local_packages/include/spirv_cross/spirv_parser.hpp new file mode 100644 index 0000000..dabc0e2 --- /dev/null +++ b/local_packages/include/spirv_cross/spirv_parser.hpp @@ -0,0 +1,103 @@ +/* + * Copyright 2018-2021 Arm Limited + * SPDX-License-Identifier: Apache-2.0 OR MIT + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * At your option, you may choose to accept this material under either: + * 1. The Apache License, Version 2.0, found at , or + * 2. The MIT License, found at . + */ + +#ifndef SPIRV_CROSS_PARSER_HPP +#define SPIRV_CROSS_PARSER_HPP + +#include "spirv_cross_parsed_ir.hpp" +#include + +namespace SPIRV_CROSS_NAMESPACE +{ +class Parser +{ +public: + Parser(const uint32_t *spirv_data, size_t word_count); + Parser(std::vector spirv); + + void parse(); + + ParsedIR &get_parsed_ir() + { + return ir; + } + +private: + ParsedIR ir; + SPIRFunction *current_function = nullptr; + SPIRBlock *current_block = nullptr; + // For workarounds. + bool ignore_trailing_block_opcodes = false; + + void parse(const Instruction &instr); + const uint32_t *stream(const Instruction &instr) const; + + template + T &set(uint32_t id, P &&... args) + { + ir.add_typed_id(static_cast(T::type), id); + auto &var = variant_set(ir.ids[id], std::forward

(args)...); + var.self = id; + return var; + } + + template + T &get(uint32_t id) + { + return variant_get(ir.ids[id]); + } + + template + T *maybe_get(uint32_t id) + { + if (ir.ids[id].get_type() == static_cast(T::type)) + return &get(id); + else + return nullptr; + } + + template + const T &get(uint32_t id) const + { + return variant_get(ir.ids[id]); + } + + template + const T *maybe_get(uint32_t id) const + { + if (ir.ids[id].get_type() == T::type) + return &get(id); + else + return nullptr; + } + + // This must be an ordered data structure so we always pick the same type aliases. + SmallVector global_struct_cache; + SmallVector> forward_pointer_fixups; + + bool types_are_logically_equivalent(const SPIRType &a, const SPIRType &b) const; + bool variable_storage_is_aliased(const SPIRVariable &v) const; +}; +} // namespace SPIRV_CROSS_NAMESPACE + +#endif diff --git a/local_packages/include/spirv_cross/spirv_reflect.hpp b/local_packages/include/spirv_cross/spirv_reflect.hpp new file mode 100644 index 0000000..a129ba5 --- /dev/null +++ b/local_packages/include/spirv_cross/spirv_reflect.hpp @@ -0,0 +1,91 @@ +/* + * Copyright 2018-2021 Bradley Austin Davis + * SPDX-License-Identifier: Apache-2.0 OR MIT + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * At your option, you may choose to accept this material under either: + * 1. The Apache License, Version 2.0, found at , or + * 2. The MIT License, found at . + */ + +#ifndef SPIRV_CROSS_REFLECT_HPP +#define SPIRV_CROSS_REFLECT_HPP + +#include "spirv_glsl.hpp" +#include + +namespace simple_json +{ +class Stream; +} + +namespace SPIRV_CROSS_NAMESPACE +{ +class CompilerReflection : public CompilerGLSL +{ + using Parent = CompilerGLSL; + +public: + explicit CompilerReflection(std::vector spirv_) + : Parent(std::move(spirv_)) + { + options.vulkan_semantics = true; + } + + CompilerReflection(const uint32_t *ir_, size_t word_count) + : Parent(ir_, word_count) + { + options.vulkan_semantics = true; + } + + explicit CompilerReflection(const ParsedIR &ir_) + : CompilerGLSL(ir_) + { + options.vulkan_semantics = true; + } + + explicit CompilerReflection(ParsedIR &&ir_) + : CompilerGLSL(std::move(ir_)) + { + options.vulkan_semantics = true; + } + + void set_format(const std::string &format); + std::string compile() override; + +private: + static std::string execution_model_to_str(spv::ExecutionModel model); + + void emit_entry_points(); + void emit_types(); + void emit_resources(); + void emit_specialization_constants(); + + void emit_type(uint32_t type_id, bool &emitted_open_tag); + void emit_type_member(const SPIRType &type, uint32_t index); + void emit_type_member_qualifiers(const SPIRType &type, uint32_t index); + void emit_type_array(const SPIRType &type); + void emit_resources(const char *tag, const SmallVector &resources); + bool type_is_reference(const SPIRType &type) const; + + std::string to_member_name(const SPIRType &type, uint32_t index) const; + + std::shared_ptr json_stream; +}; + +} // namespace SPIRV_CROSS_NAMESPACE + +#endif diff --git a/local_packages/markdown_it/__init__.py b/local_packages/markdown_it/__init__.py new file mode 100644 index 0000000..9fac279 --- /dev/null +++ b/local_packages/markdown_it/__init__.py @@ -0,0 +1,6 @@ +"""A Python port of Markdown-It""" + +__all__ = ("MarkdownIt",) +__version__ = "4.0.0" + +from .main import MarkdownIt diff --git a/local_packages/markdown_it/_compat.py b/local_packages/markdown_it/_compat.py new file mode 100644 index 0000000..9d48db4 --- /dev/null +++ b/local_packages/markdown_it/_compat.py @@ -0,0 +1 @@ +from __future__ import annotations diff --git a/local_packages/markdown_it/_punycode.py b/local_packages/markdown_it/_punycode.py new file mode 100644 index 0000000..312048b --- /dev/null +++ b/local_packages/markdown_it/_punycode.py @@ -0,0 +1,67 @@ +# Copyright 2014 Mathias Bynens +# Copyright 2021 Taneli Hukkinen +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +import codecs +from collections.abc import Callable +import re + +REGEX_SEPARATORS = re.compile(r"[\x2E\u3002\uFF0E\uFF61]") +REGEX_NON_ASCII = re.compile(r"[^\0-\x7E]") + + +def encode(uni: str) -> str: + return codecs.encode(uni, encoding="punycode").decode() + + +def decode(ascii: str) -> str: + return codecs.decode(ascii, encoding="punycode") # type: ignore + + +def map_domain(string: str, fn: Callable[[str], str]) -> str: + parts = string.split("@") + result = "" + if len(parts) > 1: + # In email addresses, only the domain name should be punycoded. Leave + # the local part (i.e. everything up to `@`) intact. + result = parts[0] + "@" + string = parts[1] + labels = REGEX_SEPARATORS.split(string) + encoded = ".".join(fn(label) for label in labels) + return result + encoded + + +def to_unicode(obj: str) -> str: + def mapping(obj: str) -> str: + if obj.startswith("xn--"): + return decode(obj[4:].lower()) + return obj + + return map_domain(obj, mapping) + + +def to_ascii(obj: str) -> str: + def mapping(obj: str) -> str: + if REGEX_NON_ASCII.search(obj): + return "xn--" + encode(obj) + return obj + + return map_domain(obj, mapping) diff --git a/local_packages/markdown_it/cli/__init__.py b/local_packages/markdown_it/cli/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/local_packages/markdown_it/cli/parse.py b/local_packages/markdown_it/cli/parse.py new file mode 100644 index 0000000..fe346b2 --- /dev/null +++ b/local_packages/markdown_it/cli/parse.py @@ -0,0 +1,110 @@ +#!/usr/bin/env python +""" +CLI interface to markdown-it-py + +Parse one or more markdown files, convert each to HTML, and print to stdout. +""" + +from __future__ import annotations + +import argparse +from collections.abc import Iterable, Sequence +import sys + +from markdown_it import __version__ +from markdown_it.main import MarkdownIt + +version_str = f"markdown-it-py [version {__version__}]" + + +def main(args: Sequence[str] | None = None) -> int: + namespace = parse_args(args) + if namespace.filenames: + convert(namespace.filenames) + else: + interactive() + return 0 + + +def convert(filenames: Iterable[str]) -> None: + for filename in filenames: + convert_file(filename) + + +def convert_file(filename: str) -> None: + """ + Parse a Markdown file and dump the output to stdout. + """ + try: + with open(filename, encoding="utf8", errors="ignore") as fin: + rendered = MarkdownIt().render(fin.read()) + print(rendered, end="") + except OSError: + sys.stderr.write(f'Cannot open file "{filename}".\n') + sys.exit(1) + + +def interactive() -> None: + """ + Parse user input, dump to stdout, rinse and repeat. + Python REPL style. + """ + print_heading() + contents = [] + more = False + while True: + try: + prompt, more = ("... ", True) if more else (">>> ", True) + contents.append(input(prompt) + "\n") + except EOFError: + print("\n" + MarkdownIt().render("\n".join(contents)), end="") + more = False + contents = [] + except KeyboardInterrupt: + print("\nExiting.") + break + + +def parse_args(args: Sequence[str] | None) -> argparse.Namespace: + """Parse input CLI arguments.""" + parser = argparse.ArgumentParser( + description="Parse one or more markdown files, " + "convert each to HTML, and print to stdout", + # NOTE: Remember to update README.md w/ the output of `markdown-it -h` + epilog=( + f""" +Interactive: + + $ markdown-it + markdown-it-py [version {__version__}] (interactive) + Type Ctrl-D to complete input, or Ctrl-C to exit. + >>> # Example + ... > markdown *input* + ... +

Example

+
+

markdown input

+
+ +Batch: + + $ markdown-it README.md README.footer.md > index.html +""" + ), + formatter_class=argparse.RawDescriptionHelpFormatter, + ) + parser.add_argument("-v", "--version", action="version", version=version_str) + parser.add_argument( + "filenames", nargs="*", help="specify an optional list of files to convert" + ) + return parser.parse_args(args) + + +def print_heading() -> None: + print(f"{version_str} (interactive)") + print("Type Ctrl-D to complete input, or Ctrl-C to exit.") + + +if __name__ == "__main__": + exit_code = main(sys.argv[1:]) + sys.exit(exit_code) diff --git a/local_packages/markdown_it/common/__init__.py b/local_packages/markdown_it/common/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/local_packages/markdown_it/common/entities.py b/local_packages/markdown_it/common/entities.py new file mode 100644 index 0000000..14d08ec --- /dev/null +++ b/local_packages/markdown_it/common/entities.py @@ -0,0 +1,5 @@ +"""HTML5 entities map: { name -> characters }.""" + +import html.entities + +entities = {name.rstrip(";"): chars for name, chars in html.entities.html5.items()} diff --git a/local_packages/markdown_it/common/html_blocks.py b/local_packages/markdown_it/common/html_blocks.py new file mode 100644 index 0000000..8a3b0b7 --- /dev/null +++ b/local_packages/markdown_it/common/html_blocks.py @@ -0,0 +1,69 @@ +"""List of valid html blocks names, according to commonmark spec +http://jgm.github.io/CommonMark/spec.html#html-blocks +""" + +# see https://spec.commonmark.org/0.31.2/#html-blocks +block_names = [ + "address", + "article", + "aside", + "base", + "basefont", + "blockquote", + "body", + "caption", + "center", + "col", + "colgroup", + "dd", + "details", + "dialog", + "dir", + "div", + "dl", + "dt", + "fieldset", + "figcaption", + "figure", + "footer", + "form", + "frame", + "frameset", + "h1", + "h2", + "h3", + "h4", + "h5", + "h6", + "head", + "header", + "hr", + "html", + "iframe", + "legend", + "li", + "link", + "main", + "menu", + "menuitem", + "nav", + "noframes", + "ol", + "optgroup", + "option", + "p", + "param", + "search", + "section", + "summary", + "table", + "tbody", + "td", + "tfoot", + "th", + "thead", + "title", + "tr", + "track", + "ul", +] diff --git a/local_packages/markdown_it/common/html_re.py b/local_packages/markdown_it/common/html_re.py new file mode 100644 index 0000000..ab822c5 --- /dev/null +++ b/local_packages/markdown_it/common/html_re.py @@ -0,0 +1,39 @@ +"""Regexps to match html elements""" + +import re + +attr_name = "[a-zA-Z_:][a-zA-Z0-9:._-]*" + +unquoted = "[^\"'=<>`\\x00-\\x20]+" +single_quoted = "'[^']*'" +double_quoted = '"[^"]*"' + +attr_value = "(?:" + unquoted + "|" + single_quoted + "|" + double_quoted + ")" + +attribute = "(?:\\s+" + attr_name + "(?:\\s*=\\s*" + attr_value + ")?)" + +open_tag = "<[A-Za-z][A-Za-z0-9\\-]*" + attribute + "*\\s*\\/?>" + +close_tag = "<\\/[A-Za-z][A-Za-z0-9\\-]*\\s*>" +comment = "" +processing = "<[?][\\s\\S]*?[?]>" +declaration = "]*>" +cdata = "" + +HTML_TAG_RE = re.compile( + "^(?:" + + open_tag + + "|" + + close_tag + + "|" + + comment + + "|" + + processing + + "|" + + declaration + + "|" + + cdata + + ")" +) +HTML_OPEN_CLOSE_TAG_STR = "^(?:" + open_tag + "|" + close_tag + ")" +HTML_OPEN_CLOSE_TAG_RE = re.compile(HTML_OPEN_CLOSE_TAG_STR) diff --git a/local_packages/markdown_it/common/normalize_url.py b/local_packages/markdown_it/common/normalize_url.py new file mode 100644 index 0000000..92720b3 --- /dev/null +++ b/local_packages/markdown_it/common/normalize_url.py @@ -0,0 +1,81 @@ +from __future__ import annotations + +from collections.abc import Callable +from contextlib import suppress +import re +from urllib.parse import quote, unquote, urlparse, urlunparse # noqa: F401 + +import mdurl + +from .. import _punycode + +RECODE_HOSTNAME_FOR = ("http:", "https:", "mailto:") + + +def normalizeLink(url: str) -> str: + """Normalize destination URLs in links + + :: + + [label]: destination 'title' + ^^^^^^^^^^^ + """ + parsed = mdurl.parse(url, slashes_denote_host=True) + + # Encode hostnames in urls like: + # `http://host/`, `https://host/`, `mailto:user@host`, `//host/` + # + # We don't encode unknown schemas, because it's likely that we encode + # something we shouldn't (e.g. `skype:name` treated as `skype:host`) + # + if parsed.hostname and ( + not parsed.protocol or parsed.protocol in RECODE_HOSTNAME_FOR + ): + with suppress(Exception): + parsed = parsed._replace(hostname=_punycode.to_ascii(parsed.hostname)) + + return mdurl.encode(mdurl.format(parsed)) + + +def normalizeLinkText(url: str) -> str: + """Normalize autolink content + + :: + + + ~~~~~~~~~~~ + """ + parsed = mdurl.parse(url, slashes_denote_host=True) + + # Encode hostnames in urls like: + # `http://host/`, `https://host/`, `mailto:user@host`, `//host/` + # + # We don't encode unknown schemas, because it's likely that we encode + # something we shouldn't (e.g. `skype:name` treated as `skype:host`) + # + if parsed.hostname and ( + not parsed.protocol or parsed.protocol in RECODE_HOSTNAME_FOR + ): + with suppress(Exception): + parsed = parsed._replace(hostname=_punycode.to_unicode(parsed.hostname)) + + # add '%' to exclude list because of https://github.com/markdown-it/markdown-it/issues/720 + return mdurl.decode(mdurl.format(parsed), mdurl.DECODE_DEFAULT_CHARS + "%") + + +BAD_PROTO_RE = re.compile(r"^(vbscript|javascript|file|data):") +GOOD_DATA_RE = re.compile(r"^data:image\/(gif|png|jpeg|webp);") + + +def validateLink(url: str, validator: Callable[[str], bool] | None = None) -> bool: + """Validate URL link is allowed in output. + + This validator can prohibit more than really needed to prevent XSS. + It's a tradeoff to keep code simple and to be secure by default. + + Note: url should be normalized at this point, and existing entities decoded. + """ + if validator is not None: + return validator(url) + url = url.strip().lower() + return bool(GOOD_DATA_RE.search(url)) if BAD_PROTO_RE.search(url) else True diff --git a/local_packages/markdown_it/common/utils.py b/local_packages/markdown_it/common/utils.py new file mode 100644 index 0000000..11bda64 --- /dev/null +++ b/local_packages/markdown_it/common/utils.py @@ -0,0 +1,313 @@ +"""Utilities for parsing source text""" + +from __future__ import annotations + +import re +from re import Match +from typing import TypeVar +import unicodedata + +from .entities import entities + + +def charCodeAt(src: str, pos: int) -> int | None: + """ + Returns the Unicode value of the character at the specified location. + + @param - index The zero-based index of the desired character. + If there is no character at the specified index, NaN is returned. + + This was added for compatibility with python + """ + try: + return ord(src[pos]) + except IndexError: + return None + + +def charStrAt(src: str, pos: int) -> str | None: + """ + Returns the Unicode value of the character at the specified location. + + @param - index The zero-based index of the desired character. + If there is no character at the specified index, NaN is returned. + + This was added for compatibility with python + """ + try: + return src[pos] + except IndexError: + return None + + +_ItemTV = TypeVar("_ItemTV") + + +def arrayReplaceAt( + src: list[_ItemTV], pos: int, newElements: list[_ItemTV] +) -> list[_ItemTV]: + """ + Remove element from array and put another array at those position. + Useful for some operations with tokens + """ + return src[:pos] + newElements + src[pos + 1 :] + + +def isValidEntityCode(c: int) -> bool: + # broken sequence + if c >= 0xD800 and c <= 0xDFFF: + return False + # never used + if c >= 0xFDD0 and c <= 0xFDEF: + return False + if ((c & 0xFFFF) == 0xFFFF) or ((c & 0xFFFF) == 0xFFFE): + return False + # control codes + if c >= 0x00 and c <= 0x08: + return False + if c == 0x0B: + return False + if c >= 0x0E and c <= 0x1F: + return False + if c >= 0x7F and c <= 0x9F: + return False + # out of range + return not (c > 0x10FFFF) + + +def fromCodePoint(c: int) -> str: + """Convert ordinal to unicode. + + Note, in the original Javascript two string characters were required, + for codepoints larger than `0xFFFF`. + But Python 3 can represent any unicode codepoint in one character. + """ + return chr(c) + + +# UNESCAPE_MD_RE = re.compile(r'\\([!"#$%&\'()*+,\-.\/:;<=>?@[\\\]^_`{|}~])') +# ENTITY_RE_g = re.compile(r'&([a-z#][a-z0-9]{1,31})', re.IGNORECASE) +UNESCAPE_ALL_RE = re.compile( + r'\\([!"#$%&\'()*+,\-.\/:;<=>?@[\\\]^_`{|}~])' + "|" + r"&([a-z#][a-z0-9]{1,31});", + re.IGNORECASE, +) +DIGITAL_ENTITY_BASE10_RE = re.compile(r"#([0-9]{1,8})") +DIGITAL_ENTITY_BASE16_RE = re.compile(r"#x([a-f0-9]{1,8})", re.IGNORECASE) + + +def replaceEntityPattern(match: str, name: str) -> str: + """Convert HTML entity patterns, + see https://spec.commonmark.org/0.30/#entity-references + """ + if name in entities: + return entities[name] + + code: None | int = None + if pat := DIGITAL_ENTITY_BASE10_RE.fullmatch(name): + code = int(pat.group(1), 10) + elif pat := DIGITAL_ENTITY_BASE16_RE.fullmatch(name): + code = int(pat.group(1), 16) + + if code is not None and isValidEntityCode(code): + return fromCodePoint(code) + + return match + + +def unescapeAll(string: str) -> str: + def replacer_func(match: Match[str]) -> str: + escaped = match.group(1) + if escaped: + return escaped + entity = match.group(2) + return replaceEntityPattern(match.group(), entity) + + if "\\" not in string and "&" not in string: + return string + return UNESCAPE_ALL_RE.sub(replacer_func, string) + + +ESCAPABLE = r"""\\!"#$%&'()*+,./:;<=>?@\[\]^`{}|_~-""" +ESCAPE_CHAR = re.compile(r"\\([" + ESCAPABLE + r"])") + + +def stripEscape(string: str) -> str: + """Strip escape \\ characters""" + return ESCAPE_CHAR.sub(r"\1", string) + + +def escapeHtml(raw: str) -> str: + """Replace special characters "&", "<", ">" and '"' to HTML-safe sequences.""" + # like html.escape, but without escaping single quotes + raw = raw.replace("&", "&") # Must be done first! + raw = raw.replace("<", "<") + raw = raw.replace(">", ">") + raw = raw.replace('"', """) + return raw + + +# ////////////////////////////////////////////////////////////////////////////// + +REGEXP_ESCAPE_RE = re.compile(r"[.?*+^$[\]\\(){}|-]") + + +def escapeRE(string: str) -> str: + string = REGEXP_ESCAPE_RE.sub("\\$&", string) + return string + + +# ////////////////////////////////////////////////////////////////////////////// + + +def isSpace(code: int | None) -> bool: + """Check if character code is a whitespace.""" + return code in (0x09, 0x20) + + +def isStrSpace(ch: str | None) -> bool: + """Check if character is a whitespace.""" + return ch in ("\t", " ") + + +MD_WHITESPACE = { + 0x09, # \t + 0x0A, # \n + 0x0B, # \v + 0x0C, # \f + 0x0D, # \r + 0x20, # space + 0xA0, + 0x1680, + 0x202F, + 0x205F, + 0x3000, +} + + +def isWhiteSpace(code: int) -> bool: + r"""Zs (unicode class) || [\t\f\v\r\n]""" + if code >= 0x2000 and code <= 0x200A: + return True + return code in MD_WHITESPACE + + +# ////////////////////////////////////////////////////////////////////////////// + + +def isPunctChar(ch: str) -> bool: + """Check if character is a punctuation character.""" + return unicodedata.category(ch).startswith(("P", "S")) + + +MD_ASCII_PUNCT = { + 0x21, # /* ! */ + 0x22, # /* " */ + 0x23, # /* # */ + 0x24, # /* $ */ + 0x25, # /* % */ + 0x26, # /* & */ + 0x27, # /* ' */ + 0x28, # /* ( */ + 0x29, # /* ) */ + 0x2A, # /* * */ + 0x2B, # /* + */ + 0x2C, # /* , */ + 0x2D, # /* - */ + 0x2E, # /* . */ + 0x2F, # /* / */ + 0x3A, # /* : */ + 0x3B, # /* ; */ + 0x3C, # /* < */ + 0x3D, # /* = */ + 0x3E, # /* > */ + 0x3F, # /* ? */ + 0x40, # /* @ */ + 0x5B, # /* [ */ + 0x5C, # /* \ */ + 0x5D, # /* ] */ + 0x5E, # /* ^ */ + 0x5F, # /* _ */ + 0x60, # /* ` */ + 0x7B, # /* { */ + 0x7C, # /* | */ + 0x7D, # /* } */ + 0x7E, # /* ~ */ +} + + +def isMdAsciiPunct(ch: int) -> bool: + """Markdown ASCII punctuation characters. + + :: + + !, ", #, $, %, &, ', (, ), *, +, ,, -, ., /, :, ;, <, =, >, ?, @, [, \\, ], ^, _, `, {, |, }, or ~ + + See http://spec.commonmark.org/0.15/#ascii-punctuation-character + + Don't confuse with unicode punctuation !!! It lacks some chars in ascii range. + + """ + return ch in MD_ASCII_PUNCT + + +def normalizeReference(string: str) -> str: + """Helper to unify [reference labels].""" + # Trim and collapse whitespace + # + string = re.sub(r"\s+", " ", string.strip()) + + # In node v10 'ẞ'.toLowerCase() === 'Ṿ', which is presumed to be a bug + # fixed in v12 (couldn't find any details). + # + # So treat this one as a special case + # (remove this when node v10 is no longer supported). + # + # if ('ẞ'.toLowerCase() === 'Ṿ') { + # str = str.replace(/ẞ/g, 'ß') + # } + + # .toLowerCase().toUpperCase() should get rid of all differences + # between letter variants. + # + # Simple .toLowerCase() doesn't normalize 125 code points correctly, + # and .toUpperCase doesn't normalize 6 of them (list of exceptions: + # İ, ϴ, ẞ, Ω, K, Å - those are already uppercased, but have differently + # uppercased versions). + # + # Here's an example showing how it happens. Lets take greek letter omega: + # uppercase U+0398 (Θ), U+03f4 (ϴ) and lowercase U+03b8 (θ), U+03d1 (ϑ) + # + # Unicode entries: + # 0398;GREEK CAPITAL LETTER THETA;Lu;0;L;;;;;N;;;;03B8 + # 03B8;GREEK SMALL LETTER THETA;Ll;0;L;;;;;N;;;0398;;0398 + # 03D1;GREEK THETA SYMBOL;Ll;0;L; 03B8;;;;N;GREEK SMALL LETTER SCRIPT THETA;;0398;;0398 + # 03F4;GREEK CAPITAL THETA SYMBOL;Lu;0;L; 0398;;;;N;;;;03B8 + # + # Case-insensitive comparison should treat all of them as equivalent. + # + # But .toLowerCase() doesn't change ϑ (it's already lowercase), + # and .toUpperCase() doesn't change ϴ (already uppercase). + # + # Applying first lower then upper case normalizes any character: + # '\u0398\u03f4\u03b8\u03d1'.toLowerCase().toUpperCase() === '\u0398\u0398\u0398\u0398' + # + # Note: this is equivalent to unicode case folding; unicode normalization + # is a different step that is not required here. + # + # Final result should be uppercased, because it's later stored in an object + # (this avoid a conflict with Object.prototype members, + # most notably, `__proto__`) + # + return string.lower().upper() + + +LINK_OPEN_RE = re.compile(r"^\s]", flags=re.IGNORECASE) +LINK_CLOSE_RE = re.compile(r"^", flags=re.IGNORECASE) + + +def isLinkOpen(string: str) -> bool: + return bool(LINK_OPEN_RE.search(string)) + + +def isLinkClose(string: str) -> bool: + return bool(LINK_CLOSE_RE.search(string)) diff --git a/local_packages/markdown_it/helpers/__init__.py b/local_packages/markdown_it/helpers/__init__.py new file mode 100644 index 0000000..f4e2cd2 --- /dev/null +++ b/local_packages/markdown_it/helpers/__init__.py @@ -0,0 +1,6 @@ +"""Functions for parsing Links""" + +__all__ = ("parseLinkDestination", "parseLinkLabel", "parseLinkTitle") +from .parse_link_destination import parseLinkDestination +from .parse_link_label import parseLinkLabel +from .parse_link_title import parseLinkTitle diff --git a/local_packages/markdown_it/helpers/parse_link_destination.py b/local_packages/markdown_it/helpers/parse_link_destination.py new file mode 100644 index 0000000..c98323c --- /dev/null +++ b/local_packages/markdown_it/helpers/parse_link_destination.py @@ -0,0 +1,83 @@ +""" +Parse link destination +""" + +from ..common.utils import charCodeAt, unescapeAll + + +class _Result: + __slots__ = ("ok", "pos", "str") + + def __init__(self) -> None: + self.ok = False + self.pos = 0 + self.str = "" + + +def parseLinkDestination(string: str, pos: int, maximum: int) -> _Result: + start = pos + result = _Result() + + if charCodeAt(string, pos) == 0x3C: # /* < */ + pos += 1 + while pos < maximum: + code = charCodeAt(string, pos) + if code == 0x0A: # /* \n */) + return result + if code == 0x3C: # / * < * / + return result + if code == 0x3E: # /* > */) { + result.pos = pos + 1 + result.str = unescapeAll(string[start + 1 : pos]) + result.ok = True + return result + + if code == 0x5C and pos + 1 < maximum: # \ + pos += 2 + continue + + pos += 1 + + # no closing '>' + return result + + # this should be ... } else { ... branch + + level = 0 + while pos < maximum: + code = charCodeAt(string, pos) + + if code is None or code == 0x20: + break + + # ascii control characters + if code < 0x20 or code == 0x7F: + break + + if code == 0x5C and pos + 1 < maximum: + if charCodeAt(string, pos + 1) == 0x20: + break + pos += 2 + continue + + if code == 0x28: # /* ( */) + level += 1 + if level > 32: + return result + + if code == 0x29: # /* ) */) + if level == 0: + break + level -= 1 + + pos += 1 + + if start == pos: + return result + if level != 0: + return result + + result.str = unescapeAll(string[start:pos]) + result.pos = pos + result.ok = True + return result diff --git a/local_packages/markdown_it/helpers/parse_link_label.py b/local_packages/markdown_it/helpers/parse_link_label.py new file mode 100644 index 0000000..c80da5a --- /dev/null +++ b/local_packages/markdown_it/helpers/parse_link_label.py @@ -0,0 +1,44 @@ +""" +Parse link label + +this function assumes that first character ("[") already matches +returns the end of the label + +""" + +from markdown_it.rules_inline import StateInline + + +def parseLinkLabel(state: StateInline, start: int, disableNested: bool = False) -> int: + labelEnd = -1 + oldPos = state.pos + found = False + + state.pos = start + 1 + level = 1 + + while state.pos < state.posMax: + marker = state.src[state.pos] + if marker == "]": + level -= 1 + if level == 0: + found = True + break + + prevPos = state.pos + state.md.inline.skipToken(state) + if marker == "[": + if prevPos == state.pos - 1: + # increase level if we find text `[`, + # which is not a part of any token + level += 1 + elif disableNested: + state.pos = oldPos + return -1 + if found: + labelEnd = state.pos + + # restore old state + state.pos = oldPos + + return labelEnd diff --git a/local_packages/markdown_it/helpers/parse_link_title.py b/local_packages/markdown_it/helpers/parse_link_title.py new file mode 100644 index 0000000..a38ff0d --- /dev/null +++ b/local_packages/markdown_it/helpers/parse_link_title.py @@ -0,0 +1,75 @@ +"""Parse link title""" + +from ..common.utils import charCodeAt, unescapeAll + + +class _State: + __slots__ = ("can_continue", "marker", "ok", "pos", "str") + + def __init__(self) -> None: + self.ok = False + """if `true`, this is a valid link title""" + self.can_continue = False + """if `true`, this link can be continued on the next line""" + self.pos = 0 + """if `ok`, it's the position of the first character after the closing marker""" + self.str = "" + """if `ok`, it's the unescaped title""" + self.marker = 0 + """expected closing marker character code""" + + def __str__(self) -> str: + return self.str + + +def parseLinkTitle( + string: str, start: int, maximum: int, prev_state: _State | None = None +) -> _State: + """Parse link title within `str` in [start, max] range, + or continue previous parsing if `prev_state` is defined (equal to result of last execution). + """ + pos = start + state = _State() + + if prev_state is not None: + # this is a continuation of a previous parseLinkTitle call on the next line, + # used in reference links only + state.str = prev_state.str + state.marker = prev_state.marker + else: + if pos >= maximum: + return state + + marker = charCodeAt(string, pos) + + # /* " */ /* ' */ /* ( */ + if marker != 0x22 and marker != 0x27 and marker != 0x28: + return state + + start += 1 + pos += 1 + + # if opening marker is "(", switch it to closing marker ")" + if marker == 0x28: + marker = 0x29 + + state.marker = marker + + while pos < maximum: + code = charCodeAt(string, pos) + if code == state.marker: + state.pos = pos + 1 + state.str += unescapeAll(string[start:pos]) + state.ok = True + return state + elif code == 0x28 and state.marker == 0x29: # /* ( */ /* ) */ + return state + elif code == 0x5C and pos + 1 < maximum: # /* \ */ + pos += 1 + + pos += 1 + + # no closing marker found, but this link title may continue on the next line (for references) + state.can_continue = True + state.str += unescapeAll(string[start:pos]) + return state diff --git a/local_packages/markdown_it/main.py b/local_packages/markdown_it/main.py new file mode 100644 index 0000000..bf9fd18 --- /dev/null +++ b/local_packages/markdown_it/main.py @@ -0,0 +1,350 @@ +from __future__ import annotations + +from collections.abc import Callable, Generator, Iterable, Mapping, MutableMapping +from contextlib import contextmanager +from typing import Any, Literal, overload + +from . import helpers, presets +from .common import normalize_url, utils +from .parser_block import ParserBlock +from .parser_core import ParserCore +from .parser_inline import ParserInline +from .renderer import RendererHTML, RendererProtocol +from .rules_core.state_core import StateCore +from .token import Token +from .utils import EnvType, OptionsDict, OptionsType, PresetType + +try: + import linkify_it +except ModuleNotFoundError: + linkify_it = None + + +_PRESETS: dict[str, PresetType] = { + "default": presets.default.make(), + "js-default": presets.js_default.make(), + "zero": presets.zero.make(), + "commonmark": presets.commonmark.make(), + "gfm-like": presets.gfm_like.make(), +} + + +class MarkdownIt: + def __init__( + self, + config: str | PresetType = "commonmark", + options_update: Mapping[str, Any] | None = None, + *, + renderer_cls: Callable[[MarkdownIt], RendererProtocol] = RendererHTML, + ): + """Main parser class + + :param config: name of configuration to load or a pre-defined dictionary + :param options_update: dictionary that will be merged into ``config["options"]`` + :param renderer_cls: the class to load as the renderer: + ``self.renderer = renderer_cls(self) + """ + # add modules + self.utils = utils + self.helpers = helpers + + # initialise classes + self.inline = ParserInline() + self.block = ParserBlock() + self.core = ParserCore() + self.renderer = renderer_cls(self) + self.linkify = linkify_it.LinkifyIt() if linkify_it else None + + # set the configuration + if options_update and not isinstance(options_update, Mapping): + # catch signature change where renderer_cls was not used as a key-word + raise TypeError( + f"options_update should be a mapping: {options_update}" + "\n(Perhaps you intended this to be the renderer_cls?)" + ) + self.configure(config, options_update=options_update) + + def __repr__(self) -> str: + return f"{self.__class__.__module__}.{self.__class__.__name__}()" + + @overload + def __getitem__(self, name: Literal["inline"]) -> ParserInline: ... + + @overload + def __getitem__(self, name: Literal["block"]) -> ParserBlock: ... + + @overload + def __getitem__(self, name: Literal["core"]) -> ParserCore: ... + + @overload + def __getitem__(self, name: Literal["renderer"]) -> RendererProtocol: ... + + @overload + def __getitem__(self, name: str) -> Any: ... + + def __getitem__(self, name: str) -> Any: + return { + "inline": self.inline, + "block": self.block, + "core": self.core, + "renderer": self.renderer, + }[name] + + def set(self, options: OptionsType) -> None: + """Set parser options (in the same format as in constructor). + Probably, you will never need it, but you can change options after constructor call. + + __Note:__ To achieve the best possible performance, don't modify a + `markdown-it` instance options on the fly. If you need multiple configurations + it's best to create multiple instances and initialize each with separate config. + """ + self.options = OptionsDict(options) + + def configure( + self, presets: str | PresetType, options_update: Mapping[str, Any] | None = None + ) -> MarkdownIt: + """Batch load of all options and component settings. + This is an internal method, and you probably will not need it. + But if you will - see available presets and data structure + [here](https://github.com/markdown-it/markdown-it/tree/master/lib/presets) + + We strongly recommend to use presets instead of direct config loads. + That will give better compatibility with next versions. + """ + if isinstance(presets, str): + if presets not in _PRESETS: + raise KeyError(f"Wrong `markdown-it` preset '{presets}', check name") + config = _PRESETS[presets] + else: + config = presets + + if not config: + raise ValueError("Wrong `markdown-it` config, can't be empty") + + options = config.get("options", {}) or {} + if options_update: + options = {**options, **options_update} # type: ignore + + self.set(options) # type: ignore + + if "components" in config: + for name, component in config["components"].items(): + rules = component.get("rules", None) + if rules: + self[name].ruler.enableOnly(rules) + rules2 = component.get("rules2", None) + if rules2: + self[name].ruler2.enableOnly(rules2) + + return self + + def get_all_rules(self) -> dict[str, list[str]]: + """Return the names of all active rules.""" + rules = { + chain: self[chain].ruler.get_all_rules() + for chain in ["core", "block", "inline"] + } + rules["inline2"] = self.inline.ruler2.get_all_rules() + return rules + + def get_active_rules(self) -> dict[str, list[str]]: + """Return the names of all active rules.""" + rules = { + chain: self[chain].ruler.get_active_rules() + for chain in ["core", "block", "inline"] + } + rules["inline2"] = self.inline.ruler2.get_active_rules() + return rules + + def enable( + self, names: str | Iterable[str], ignoreInvalid: bool = False + ) -> MarkdownIt: + """Enable list or rules. (chainable) + + :param names: rule name or list of rule names to enable. + :param ignoreInvalid: set `true` to ignore errors when rule not found. + + It will automatically find appropriate components, + containing rules with given names. If rule not found, and `ignoreInvalid` + not set - throws exception. + + Example:: + + md = MarkdownIt().enable(['sub', 'sup']).disable('smartquotes') + + """ + result = [] + + if isinstance(names, str): + names = [names] + + for chain in ["core", "block", "inline"]: + result.extend(self[chain].ruler.enable(names, True)) + result.extend(self.inline.ruler2.enable(names, True)) + + missed = [name for name in names if name not in result] + if missed and not ignoreInvalid: + raise ValueError(f"MarkdownIt. Failed to enable unknown rule(s): {missed}") + + return self + + def disable( + self, names: str | Iterable[str], ignoreInvalid: bool = False + ) -> MarkdownIt: + """The same as [[MarkdownIt.enable]], but turn specified rules off. (chainable) + + :param names: rule name or list of rule names to disable. + :param ignoreInvalid: set `true` to ignore errors when rule not found. + + """ + result = [] + + if isinstance(names, str): + names = [names] + + for chain in ["core", "block", "inline"]: + result.extend(self[chain].ruler.disable(names, True)) + result.extend(self.inline.ruler2.disable(names, True)) + + missed = [name for name in names if name not in result] + if missed and not ignoreInvalid: + raise ValueError(f"MarkdownIt. Failed to disable unknown rule(s): {missed}") + return self + + @contextmanager + def reset_rules(self) -> Generator[None, None, None]: + """A context manager, that will reset the current enabled rules on exit.""" + chain_rules = self.get_active_rules() + yield + for chain, rules in chain_rules.items(): + if chain != "inline2": + self[chain].ruler.enableOnly(rules) + self.inline.ruler2.enableOnly(chain_rules["inline2"]) + + def add_render_rule( + self, name: str, function: Callable[..., Any], fmt: str = "html" + ) -> None: + """Add a rule for rendering a particular Token type. + + Only applied when ``renderer.__output__ == fmt`` + """ + if self.renderer.__output__ == fmt: + self.renderer.rules[name] = function.__get__(self.renderer) # type: ignore + + def use( + self, plugin: Callable[..., None], *params: Any, **options: Any + ) -> MarkdownIt: + """Load specified plugin with given params into current parser instance. (chainable) + + It's just a sugar to call `plugin(md, params)` with curring. + + Example:: + + def func(tokens, idx): + tokens[idx].content = tokens[idx].content.replace('foo', 'bar') + md = MarkdownIt().use(plugin, 'foo_replace', 'text', func) + + """ + plugin(self, *params, **options) + return self + + def parse(self, src: str, env: EnvType | None = None) -> list[Token]: + """Parse the source string to a token stream + + :param src: source string + :param env: environment sandbox + + Parse input string and return list of block tokens (special token type + "inline" will contain list of inline tokens). + + `env` is used to pass data between "distributed" rules and return additional + metadata like reference info, needed for the renderer. It also can be used to + inject data in specific cases. Usually, you will be ok to pass `{}`, + and then pass updated object to renderer. + """ + env = {} if env is None else env + if not isinstance(env, MutableMapping): + raise TypeError(f"Input data should be a MutableMapping, not {type(env)}") + if not isinstance(src, str): + raise TypeError(f"Input data should be a string, not {type(src)}") + state = StateCore(src, self, env) + self.core.process(state) + return state.tokens + + def render(self, src: str, env: EnvType | None = None) -> Any: + """Render markdown string into html. It does all magic for you :). + + :param src: source string + :param env: environment sandbox + :returns: The output of the loaded renderer + + `env` can be used to inject additional metadata (`{}` by default). + But you will not need it with high probability. See also comment + in [[MarkdownIt.parse]]. + """ + env = {} if env is None else env + return self.renderer.render(self.parse(src, env), self.options, env) + + def parseInline(self, src: str, env: EnvType | None = None) -> list[Token]: + """The same as [[MarkdownIt.parse]] but skip all block rules. + + :param src: source string + :param env: environment sandbox + + It returns the + block tokens list with the single `inline` element, containing parsed inline + tokens in `children` property. Also updates `env` object. + """ + env = {} if env is None else env + if not isinstance(env, MutableMapping): + raise TypeError(f"Input data should be an MutableMapping, not {type(env)}") + if not isinstance(src, str): + raise TypeError(f"Input data should be a string, not {type(src)}") + state = StateCore(src, self, env) + state.inlineMode = True + self.core.process(state) + return state.tokens + + def renderInline(self, src: str, env: EnvType | None = None) -> Any: + """Similar to [[MarkdownIt.render]] but for single paragraph content. + + :param src: source string + :param env: environment sandbox + + Similar to [[MarkdownIt.render]] but for single paragraph content. Result + will NOT be wrapped into `

` tags. + """ + env = {} if env is None else env + return self.renderer.render(self.parseInline(src, env), self.options, env) + + # link methods + + def validateLink(self, url: str) -> bool: + """Validate if the URL link is allowed in output. + + This validator can prohibit more than really needed to prevent XSS. + It's a tradeoff to keep code simple and to be secure by default. + + Note: the url should be normalized at this point, and existing entities decoded. + """ + return normalize_url.validateLink(url) + + def normalizeLink(self, url: str) -> str: + """Normalize destination URLs in links + + :: + + [label]: destination 'title' + ^^^^^^^^^^^ + """ + return normalize_url.normalizeLink(url) + + def normalizeLinkText(self, link: str) -> str: + """Normalize autolink content + + :: + + + ~~~~~~~~~~~ + """ + return normalize_url.normalizeLinkText(link) diff --git a/local_packages/markdown_it/parser_block.py b/local_packages/markdown_it/parser_block.py new file mode 100644 index 0000000..50a7184 --- /dev/null +++ b/local_packages/markdown_it/parser_block.py @@ -0,0 +1,113 @@ +"""Block-level tokenizer.""" + +from __future__ import annotations + +from collections.abc import Callable +import logging +from typing import TYPE_CHECKING + +from . import rules_block +from .ruler import Ruler +from .rules_block.state_block import StateBlock +from .token import Token +from .utils import EnvType + +if TYPE_CHECKING: + from markdown_it import MarkdownIt + +LOGGER = logging.getLogger(__name__) + + +RuleFuncBlockType = Callable[[StateBlock, int, int, bool], bool] +"""(state: StateBlock, startLine: int, endLine: int, silent: bool) -> matched: bool) + +`silent` disables token generation, useful for lookahead. +""" + +_rules: list[tuple[str, RuleFuncBlockType, list[str]]] = [ + # First 2 params - rule name & source. Secondary array - list of rules, + # which can be terminated by this one. + ("table", rules_block.table, ["paragraph", "reference"]), + ("code", rules_block.code, []), + ("fence", rules_block.fence, ["paragraph", "reference", "blockquote", "list"]), + ( + "blockquote", + rules_block.blockquote, + ["paragraph", "reference", "blockquote", "list"], + ), + ("hr", rules_block.hr, ["paragraph", "reference", "blockquote", "list"]), + ("list", rules_block.list_block, ["paragraph", "reference", "blockquote"]), + ("reference", rules_block.reference, []), + ("html_block", rules_block.html_block, ["paragraph", "reference", "blockquote"]), + ("heading", rules_block.heading, ["paragraph", "reference", "blockquote"]), + ("lheading", rules_block.lheading, []), + ("paragraph", rules_block.paragraph, []), +] + + +class ParserBlock: + """ + ParserBlock#ruler -> Ruler + + [[Ruler]] instance. Keep configuration of block rules. + """ + + def __init__(self) -> None: + self.ruler = Ruler[RuleFuncBlockType]() + for name, rule, alt in _rules: + self.ruler.push(name, rule, {"alt": alt}) + + def tokenize(self, state: StateBlock, startLine: int, endLine: int) -> None: + """Generate tokens for input range.""" + rules = self.ruler.getRules("") + line = startLine + maxNesting = state.md.options.maxNesting + hasEmptyLines = False + + while line < endLine: + state.line = line = state.skipEmptyLines(line) + if line >= endLine: + break + if state.sCount[line] < state.blkIndent: + # Termination condition for nested calls. + # Nested calls currently used for blockquotes & lists + break + if state.level >= maxNesting: + # If nesting level exceeded - skip tail to the end. + # That's not ordinary situation and we should not care about content. + state.line = endLine + break + + # Try all possible rules. + # On success, rule should: + # - update `state.line` + # - update `state.tokens` + # - return True + for rule in rules: + if rule(state, line, endLine, False): + break + + # set state.tight if we had an empty line before current tag + # i.e. latest empty line should not count + state.tight = not hasEmptyLines + + line = state.line + + # paragraph might "eat" one newline after it in nested lists + if (line - 1) < endLine and state.isEmpty(line - 1): + hasEmptyLines = True + + if line < endLine and state.isEmpty(line): + hasEmptyLines = True + line += 1 + state.line = line + + def parse( + self, src: str, md: MarkdownIt, env: EnvType, outTokens: list[Token] + ) -> list[Token] | None: + """Process input string and push block tokens into `outTokens`.""" + if not src: + return None + state = StateBlock(src, md, env, outTokens) + self.tokenize(state, state.line, state.lineMax) + return state.tokens diff --git a/local_packages/markdown_it/parser_core.py b/local_packages/markdown_it/parser_core.py new file mode 100644 index 0000000..8f5b921 --- /dev/null +++ b/local_packages/markdown_it/parser_core.py @@ -0,0 +1,46 @@ +""" +* class Core +* +* Top-level rules executor. Glues block/inline parsers and does intermediate +* transformations. +""" + +from __future__ import annotations + +from collections.abc import Callable + +from .ruler import Ruler +from .rules_core import ( + block, + inline, + linkify, + normalize, + replace, + smartquotes, + text_join, +) +from .rules_core.state_core import StateCore + +RuleFuncCoreType = Callable[[StateCore], None] + +_rules: list[tuple[str, RuleFuncCoreType]] = [ + ("normalize", normalize), + ("block", block), + ("inline", inline), + ("linkify", linkify), + ("replacements", replace), + ("smartquotes", smartquotes), + ("text_join", text_join), +] + + +class ParserCore: + def __init__(self) -> None: + self.ruler = Ruler[RuleFuncCoreType]() + for name, rule in _rules: + self.ruler.push(name, rule) + + def process(self, state: StateCore) -> None: + """Executes core chain rules.""" + for rule in self.ruler.getRules(""): + rule(state) diff --git a/local_packages/markdown_it/parser_inline.py b/local_packages/markdown_it/parser_inline.py new file mode 100644 index 0000000..26ec2e6 --- /dev/null +++ b/local_packages/markdown_it/parser_inline.py @@ -0,0 +1,148 @@ +"""Tokenizes paragraph content.""" + +from __future__ import annotations + +from collections.abc import Callable +from typing import TYPE_CHECKING + +from . import rules_inline +from .ruler import Ruler +from .rules_inline.state_inline import StateInline +from .token import Token +from .utils import EnvType + +if TYPE_CHECKING: + from markdown_it import MarkdownIt + + +# Parser rules +RuleFuncInlineType = Callable[[StateInline, bool], bool] +"""(state: StateInline, silent: bool) -> matched: bool) + +`silent` disables token generation, useful for lookahead. +""" +_rules: list[tuple[str, RuleFuncInlineType]] = [ + ("text", rules_inline.text), + ("linkify", rules_inline.linkify), + ("newline", rules_inline.newline), + ("escape", rules_inline.escape), + ("backticks", rules_inline.backtick), + ("strikethrough", rules_inline.strikethrough.tokenize), + ("emphasis", rules_inline.emphasis.tokenize), + ("link", rules_inline.link), + ("image", rules_inline.image), + ("autolink", rules_inline.autolink), + ("html_inline", rules_inline.html_inline), + ("entity", rules_inline.entity), +] + +# Note `rule2` ruleset was created specifically for emphasis/strikethrough +# post-processing and may be changed in the future. +# +# Don't use this for anything except pairs (plugins working with `balance_pairs`). +# +RuleFuncInline2Type = Callable[[StateInline], None] +_rules2: list[tuple[str, RuleFuncInline2Type]] = [ + ("balance_pairs", rules_inline.link_pairs), + ("strikethrough", rules_inline.strikethrough.postProcess), + ("emphasis", rules_inline.emphasis.postProcess), + # rules for pairs separate '**' into its own text tokens, which may be left unused, + # rule below merges unused segments back with the rest of the text + ("fragments_join", rules_inline.fragments_join), +] + + +class ParserInline: + def __init__(self) -> None: + self.ruler = Ruler[RuleFuncInlineType]() + for name, rule in _rules: + self.ruler.push(name, rule) + # Second ruler used for post-processing (e.g. in emphasis-like rules) + self.ruler2 = Ruler[RuleFuncInline2Type]() + for name, rule2 in _rules2: + self.ruler2.push(name, rule2) + + def skipToken(self, state: StateInline) -> None: + """Skip single token by running all rules in validation mode; + returns `True` if any rule reported success + """ + ok = False + pos = state.pos + rules = self.ruler.getRules("") + maxNesting = state.md.options["maxNesting"] + cache = state.cache + + if pos in cache: + state.pos = cache[pos] + return + + if state.level < maxNesting: + for rule in rules: + # Increment state.level and decrement it later to limit recursion. + # It's harmless to do here, because no tokens are created. + # But ideally, we'd need a separate private state variable for this purpose. + state.level += 1 + ok = rule(state, True) + state.level -= 1 + if ok: + break + else: + # Too much nesting, just skip until the end of the paragraph. + # + # NOTE: this will cause links to behave incorrectly in the following case, + # when an amount of `[` is exactly equal to `maxNesting + 1`: + # + # [[[[[[[[[[[[[[[[[[[[[foo]() + # + # TODO: remove this workaround when CM standard will allow nested links + # (we can replace it by preventing links from being parsed in + # validation mode) + # + state.pos = state.posMax + + if not ok: + state.pos += 1 + cache[pos] = state.pos + + def tokenize(self, state: StateInline) -> None: + """Generate tokens for input range.""" + ok = False + rules = self.ruler.getRules("") + end = state.posMax + maxNesting = state.md.options["maxNesting"] + + while state.pos < end: + # Try all possible rules. + # On success, rule should: + # + # - update `state.pos` + # - update `state.tokens` + # - return true + + if state.level < maxNesting: + for rule in rules: + ok = rule(state, False) + if ok: + break + + if ok: + if state.pos >= end: + break + continue + + state.pending += state.src[state.pos] + state.pos += 1 + + if state.pending: + state.pushPending() + + def parse( + self, src: str, md: MarkdownIt, env: EnvType, tokens: list[Token] + ) -> list[Token]: + """Process input string and push inline tokens into `tokens`""" + state = StateInline(src, md, env, tokens) + self.tokenize(state) + rules2 = self.ruler2.getRules("") + for rule in rules2: + rule(state) + return state.tokens diff --git a/local_packages/markdown_it/port.yaml b/local_packages/markdown_it/port.yaml new file mode 100644 index 0000000..ce2dde9 --- /dev/null +++ b/local_packages/markdown_it/port.yaml @@ -0,0 +1,48 @@ +- package: markdown-it/markdown-it + version: 14.1.0 + commit: 0fe7ccb4b7f30236fb05f623be6924961d296d3d + date: Mar 19, 2024 + notes: + - Rename variables that use python built-in names, e.g. + - `max` -> `maximum` + - `len` -> `length` + - `str` -> `string` + - | + Convert JS `for` loops to `while` loops + this is generally the main difference between the codes, + because in python you can't do e.g. `for {i=1;i PresetType: + config = commonmark.make() + config["components"]["core"]["rules"].append("linkify") + config["components"]["block"]["rules"].append("table") + config["components"]["inline"]["rules"].extend(["strikethrough", "linkify"]) + config["components"]["inline"]["rules2"].append("strikethrough") + config["options"]["linkify"] = True + config["options"]["html"] = True + return config diff --git a/local_packages/markdown_it/presets/commonmark.py b/local_packages/markdown_it/presets/commonmark.py new file mode 100644 index 0000000..ed0de0f --- /dev/null +++ b/local_packages/markdown_it/presets/commonmark.py @@ -0,0 +1,75 @@ +"""Commonmark default options. + +This differs to presets.default, +primarily in that it allows HTML and does not enable components: + +- block: table +- inline: strikethrough +""" + +from ..utils import PresetType + + +def make() -> PresetType: + return { + "options": { + "maxNesting": 20, # Internal protection, recursion limit + "html": True, # Enable HTML tags in source, + # this is just a shorthand for .enable(["html_inline", "html_block"]) + # used by the linkify rule: + "linkify": False, # autoconvert URL-like texts to links + # used by the replacements and smartquotes rules + # Enable some language-neutral replacements + quotes beautification + "typographer": False, + # used by the smartquotes rule: + # Double + single quotes replacement pairs, when typographer enabled, + # and smartquotes on. Could be either a String or an Array. + # + # For example, you can use '«»„“' for Russian, '„“‚‘' for German, + # and ['«\xA0', '\xA0»', '‹\xA0', '\xA0›'] for French (including nbsp). + "quotes": "\u201c\u201d\u2018\u2019", # /* “”‘’ */ + # Renderer specific; these options are used directly in the HTML renderer + "xhtmlOut": True, # Use '/' to close single tags (
) + "breaks": False, # Convert '\n' in paragraphs into
+ "langPrefix": "language-", # CSS language prefix for fenced blocks + # Highlighter function. Should return escaped HTML, + # or '' if the source string is not changed and should be escaped externally. + # If result starts with PresetType: + return { + "options": { + "maxNesting": 100, # Internal protection, recursion limit + "html": False, # Enable HTML tags in source + # this is just a shorthand for .disable(["html_inline", "html_block"]) + # used by the linkify rule: + "linkify": False, # autoconvert URL-like texts to links + # used by the replacements and smartquotes rules: + # Enable some language-neutral replacements + quotes beautification + "typographer": False, + # used by the smartquotes rule: + # Double + single quotes replacement pairs, when typographer enabled, + # and smartquotes on. Could be either a String or an Array. + # For example, you can use '«»„“' for Russian, '„“‚‘' for German, + # and ['«\xA0', '\xA0»', '‹\xA0', '\xA0›'] for French (including nbsp). + "quotes": "\u201c\u201d\u2018\u2019", # /* “”‘’ */ + # Renderer specific; these options are used directly in the HTML renderer + "xhtmlOut": False, # Use '/' to close single tags (
) + "breaks": False, # Convert '\n' in paragraphs into
+ "langPrefix": "language-", # CSS language prefix for fenced blocks + # Highlighter function. Should return escaped HTML, + # or '' if the source string is not changed and should be escaped externally. + # If result starts with PresetType: + return { + "options": { + "maxNesting": 20, # Internal protection, recursion limit + "html": False, # Enable HTML tags in source + # this is just a shorthand for .disable(["html_inline", "html_block"]) + # used by the linkify rule: + "linkify": False, # autoconvert URL-like texts to links + # used by the replacements and smartquotes rules: + # Enable some language-neutral replacements + quotes beautification + "typographer": False, + # used by the smartquotes rule: + # Double + single quotes replacement pairs, when typographer enabled, + # and smartquotes on. Could be either a String or an Array. + # For example, you can use '«»„“' for Russian, '„“‚‘' for German, + # and ['«\xA0', '\xA0»', '‹\xA0', '\xA0›'] for French (including nbsp). + "quotes": "\u201c\u201d\u2018\u2019", # /* “”‘’ */ + # Renderer specific; these options are used directly in the HTML renderer + "xhtmlOut": False, # Use '/' to close single tags (
) + "breaks": False, # Convert '\n' in paragraphs into
+ "langPrefix": "language-", # CSS language prefix for fenced blocks + # Highlighter function. Should return escaped HTML, + # or '' if the source string is not changed and should be escaped externally. + # If result starts with Any: ... + + +class RendererHTML(RendererProtocol): + """Contains render rules for tokens. Can be updated and extended. + + Example: + + Each rule is called as independent static function with fixed signature: + + :: + + class Renderer: + def token_type_name(self, tokens, idx, options, env) { + # ... + return renderedHTML + + :: + + class CustomRenderer(RendererHTML): + def strong_open(self, tokens, idx, options, env): + return '' + def strong_close(self, tokens, idx, options, env): + return '' + + md = MarkdownIt(renderer_cls=CustomRenderer) + + result = md.render(...) + + See https://github.com/markdown-it/markdown-it/blob/master/lib/renderer.js + for more details and examples. + """ + + __output__ = "html" + + def __init__(self, parser: Any = None): + self.rules = { + k: v + for k, v in inspect.getmembers(self, predicate=inspect.ismethod) + if not (k.startswith("render") or k.startswith("_")) + } + + def render( + self, tokens: Sequence[Token], options: OptionsDict, env: EnvType + ) -> str: + """Takes token stream and generates HTML. + + :param tokens: list on block tokens to render + :param options: params of parser instance + :param env: additional data from parsed input + + """ + result = "" + + for i, token in enumerate(tokens): + if token.type == "inline": + if token.children: + result += self.renderInline(token.children, options, env) + elif token.type in self.rules: + result += self.rules[token.type](tokens, i, options, env) + else: + result += self.renderToken(tokens, i, options, env) + + return result + + def renderInline( + self, tokens: Sequence[Token], options: OptionsDict, env: EnvType + ) -> str: + """The same as ``render``, but for single token of `inline` type. + + :param tokens: list on block tokens to render + :param options: params of parser instance + :param env: additional data from parsed input (references, for example) + """ + result = "" + + for i, token in enumerate(tokens): + if token.type in self.rules: + result += self.rules[token.type](tokens, i, options, env) + else: + result += self.renderToken(tokens, i, options, env) + + return result + + def renderToken( + self, + tokens: Sequence[Token], + idx: int, + options: OptionsDict, + env: EnvType, + ) -> str: + """Default token renderer. + + Can be overridden by custom function + + :param idx: token index to render + :param options: params of parser instance + """ + result = "" + needLf = False + token = tokens[idx] + + # Tight list paragraphs + if token.hidden: + return "" + + # Insert a newline between hidden paragraph and subsequent opening + # block-level tag. + # + # For example, here we should insert a newline before blockquote: + # - a + # > + # + if token.block and token.nesting != -1 and idx and tokens[idx - 1].hidden: + result += "\n" + + # Add token name, e.g. ``. + # + needLf = False + + result += ">\n" if needLf else ">" + + return result + + @staticmethod + def renderAttrs(token: Token) -> str: + """Render token attributes to string.""" + result = "" + + for key, value in token.attrItems(): + result += " " + escapeHtml(key) + '="' + escapeHtml(str(value)) + '"' + + return result + + def renderInlineAsText( + self, + tokens: Sequence[Token] | None, + options: OptionsDict, + env: EnvType, + ) -> str: + """Special kludge for image `alt` attributes to conform CommonMark spec. + + Don't try to use it! Spec requires to show `alt` content with stripped markup, + instead of simple escaping. + + :param tokens: list on block tokens to render + :param options: params of parser instance + :param env: additional data from parsed input + """ + result = "" + + for token in tokens or []: + if token.type == "text": + result += token.content + elif token.type == "image": + if token.children: + result += self.renderInlineAsText(token.children, options, env) + elif token.type == "softbreak": + result += "\n" + + return result + + ################################################### + + def code_inline( + self, tokens: Sequence[Token], idx: int, options: OptionsDict, env: EnvType + ) -> str: + token = tokens[idx] + return ( + "" + + escapeHtml(tokens[idx].content) + + "" + ) + + def code_block( + self, + tokens: Sequence[Token], + idx: int, + options: OptionsDict, + env: EnvType, + ) -> str: + token = tokens[idx] + + return ( + "" + + escapeHtml(tokens[idx].content) + + "\n" + ) + + def fence( + self, + tokens: Sequence[Token], + idx: int, + options: OptionsDict, + env: EnvType, + ) -> str: + token = tokens[idx] + info = unescapeAll(token.info).strip() if token.info else "" + langName = "" + langAttrs = "" + + if info: + arr = info.split(maxsplit=1) + langName = arr[0] + if len(arr) == 2: + langAttrs = arr[1] + + if options.highlight: + highlighted = options.highlight( + token.content, langName, langAttrs + ) or escapeHtml(token.content) + else: + highlighted = escapeHtml(token.content) + + if highlighted.startswith("" + + highlighted + + "\n" + ) + + return ( + "

"
+            + highlighted
+            + "
\n" + ) + + def image( + self, + tokens: Sequence[Token], + idx: int, + options: OptionsDict, + env: EnvType, + ) -> str: + token = tokens[idx] + + # "alt" attr MUST be set, even if empty. Because it's mandatory and + # should be placed on proper position for tests. + if token.children: + token.attrSet("alt", self.renderInlineAsText(token.children, options, env)) + else: + token.attrSet("alt", "") + + return self.renderToken(tokens, idx, options, env) + + def hardbreak( + self, tokens: Sequence[Token], idx: int, options: OptionsDict, env: EnvType + ) -> str: + return "
\n" if options.xhtmlOut else "
\n" + + def softbreak( + self, tokens: Sequence[Token], idx: int, options: OptionsDict, env: EnvType + ) -> str: + return ( + ("
\n" if options.xhtmlOut else "
\n") if options.breaks else "\n" + ) + + def text( + self, tokens: Sequence[Token], idx: int, options: OptionsDict, env: EnvType + ) -> str: + return escapeHtml(tokens[idx].content) + + def html_block( + self, tokens: Sequence[Token], idx: int, options: OptionsDict, env: EnvType + ) -> str: + return tokens[idx].content + + def html_inline( + self, tokens: Sequence[Token], idx: int, options: OptionsDict, env: EnvType + ) -> str: + return tokens[idx].content diff --git a/local_packages/markdown_it/ruler.py b/local_packages/markdown_it/ruler.py new file mode 100644 index 0000000..91ab580 --- /dev/null +++ b/local_packages/markdown_it/ruler.py @@ -0,0 +1,275 @@ +""" +class Ruler + +Helper class, used by [[MarkdownIt#core]], [[MarkdownIt#block]] and +[[MarkdownIt#inline]] to manage sequences of functions (rules): + +- keep rules in defined order +- assign the name to each rule +- enable/disable rules +- add/replace rules +- allow assign rules to additional named chains (in the same) +- caching lists of active rules + +You will not need use this class directly until write plugins. For simple +rules control use [[MarkdownIt.disable]], [[MarkdownIt.enable]] and +[[MarkdownIt.use]]. +""" + +from __future__ import annotations + +from collections.abc import Iterable +from dataclasses import dataclass, field +from typing import TYPE_CHECKING, Generic, TypedDict, TypeVar +import warnings + +from .utils import EnvType + +if TYPE_CHECKING: + from markdown_it import MarkdownIt + + +class StateBase: + def __init__(self, src: str, md: MarkdownIt, env: EnvType): + self.src = src + self.env = env + self.md = md + + @property + def src(self) -> str: + return self._src + + @src.setter + def src(self, value: str) -> None: + self._src = value + self._srcCharCode: tuple[int, ...] | None = None + + @property + def srcCharCode(self) -> tuple[int, ...]: + warnings.warn( + "StateBase.srcCharCode is deprecated. Use StateBase.src instead.", + DeprecationWarning, + stacklevel=2, + ) + if self._srcCharCode is None: + self._srcCharCode = tuple(ord(c) for c in self._src) + return self._srcCharCode + + +class RuleOptionsType(TypedDict, total=False): + alt: list[str] + + +RuleFuncTv = TypeVar("RuleFuncTv") +"""A rule function, whose signature is dependent on the state type.""" + + +@dataclass(slots=True) +class Rule(Generic[RuleFuncTv]): + name: str + enabled: bool + fn: RuleFuncTv = field(repr=False) + alt: list[str] + + +class Ruler(Generic[RuleFuncTv]): + def __init__(self) -> None: + # List of added rules. + self.__rules__: list[Rule[RuleFuncTv]] = [] + # Cached rule chains. + # First level - chain name, '' for default. + # Second level - diginal anchor for fast filtering by charcodes. + self.__cache__: dict[str, list[RuleFuncTv]] | None = None + + def __find__(self, name: str) -> int: + """Find rule index by name""" + for i, rule in enumerate(self.__rules__): + if rule.name == name: + return i + return -1 + + def __compile__(self) -> None: + """Build rules lookup cache""" + chains = {""} + # collect unique names + for rule in self.__rules__: + if not rule.enabled: + continue + for name in rule.alt: + chains.add(name) + self.__cache__ = {} + for chain in chains: + self.__cache__[chain] = [] + for rule in self.__rules__: + if not rule.enabled: + continue + if chain and (chain not in rule.alt): + continue + self.__cache__[chain].append(rule.fn) + + def at( + self, ruleName: str, fn: RuleFuncTv, options: RuleOptionsType | None = None + ) -> None: + """Replace rule by name with new function & options. + + :param ruleName: rule name to replace. + :param fn: new rule function. + :param options: new rule options (not mandatory). + :raises: KeyError if name not found + """ + index = self.__find__(ruleName) + options = options or {} + if index == -1: + raise KeyError(f"Parser rule not found: {ruleName}") + self.__rules__[index].fn = fn + self.__rules__[index].alt = options.get("alt", []) + self.__cache__ = None + + def before( + self, + beforeName: str, + ruleName: str, + fn: RuleFuncTv, + options: RuleOptionsType | None = None, + ) -> None: + """Add new rule to chain before one with given name. + + :param beforeName: new rule will be added before this one. + :param ruleName: new rule will be added before this one. + :param fn: new rule function. + :param options: new rule options (not mandatory). + :raises: KeyError if name not found + """ + index = self.__find__(beforeName) + options = options or {} + if index == -1: + raise KeyError(f"Parser rule not found: {beforeName}") + self.__rules__.insert( + index, Rule[RuleFuncTv](ruleName, True, fn, options.get("alt", [])) + ) + self.__cache__ = None + + def after( + self, + afterName: str, + ruleName: str, + fn: RuleFuncTv, + options: RuleOptionsType | None = None, + ) -> None: + """Add new rule to chain after one with given name. + + :param afterName: new rule will be added after this one. + :param ruleName: new rule will be added after this one. + :param fn: new rule function. + :param options: new rule options (not mandatory). + :raises: KeyError if name not found + """ + index = self.__find__(afterName) + options = options or {} + if index == -1: + raise KeyError(f"Parser rule not found: {afterName}") + self.__rules__.insert( + index + 1, Rule[RuleFuncTv](ruleName, True, fn, options.get("alt", [])) + ) + self.__cache__ = None + + def push( + self, ruleName: str, fn: RuleFuncTv, options: RuleOptionsType | None = None + ) -> None: + """Push new rule to the end of chain. + + :param ruleName: new rule will be added to the end of chain. + :param fn: new rule function. + :param options: new rule options (not mandatory). + + """ + self.__rules__.append( + Rule[RuleFuncTv](ruleName, True, fn, (options or {}).get("alt", [])) + ) + self.__cache__ = None + + def enable( + self, names: str | Iterable[str], ignoreInvalid: bool = False + ) -> list[str]: + """Enable rules with given names. + + :param names: name or list of rule names to enable. + :param ignoreInvalid: ignore errors when rule not found + :raises: KeyError if name not found and not ignoreInvalid + :return: list of found rule names + """ + if isinstance(names, str): + names = [names] + result: list[str] = [] + for name in names: + idx = self.__find__(name) + if (idx < 0) and ignoreInvalid: + continue + if (idx < 0) and not ignoreInvalid: + raise KeyError(f"Rules manager: invalid rule name {name}") + self.__rules__[idx].enabled = True + result.append(name) + self.__cache__ = None + return result + + def enableOnly( + self, names: str | Iterable[str], ignoreInvalid: bool = False + ) -> list[str]: + """Enable rules with given names, and disable everything else. + + :param names: name or list of rule names to enable. + :param ignoreInvalid: ignore errors when rule not found + :raises: KeyError if name not found and not ignoreInvalid + :return: list of found rule names + """ + if isinstance(names, str): + names = [names] + for rule in self.__rules__: + rule.enabled = False + return self.enable(names, ignoreInvalid) + + def disable( + self, names: str | Iterable[str], ignoreInvalid: bool = False + ) -> list[str]: + """Disable rules with given names. + + :param names: name or list of rule names to enable. + :param ignoreInvalid: ignore errors when rule not found + :raises: KeyError if name not found and not ignoreInvalid + :return: list of found rule names + """ + if isinstance(names, str): + names = [names] + result = [] + for name in names: + idx = self.__find__(name) + if (idx < 0) and ignoreInvalid: + continue + if (idx < 0) and not ignoreInvalid: + raise KeyError(f"Rules manager: invalid rule name {name}") + self.__rules__[idx].enabled = False + result.append(name) + self.__cache__ = None + return result + + def getRules(self, chainName: str = "") -> list[RuleFuncTv]: + """Return array of active functions (rules) for given chain name. + It analyzes rules configuration, compiles caches if not exists and returns result. + + Default chain name is `''` (empty string). It can't be skipped. + That's done intentionally, to keep signature monomorphic for high speed. + + """ + if self.__cache__ is None: + self.__compile__() + assert self.__cache__ is not None + # Chain can be empty, if rules disabled. But we still have to return Array. + return self.__cache__.get(chainName, []) or [] + + def get_all_rules(self) -> list[str]: + """Return all available rule names.""" + return [r.name for r in self.__rules__] + + def get_active_rules(self) -> list[str]: + """Return the active rule names.""" + return [r.name for r in self.__rules__ if r.enabled] diff --git a/local_packages/markdown_it/rules_block/__init__.py b/local_packages/markdown_it/rules_block/__init__.py new file mode 100644 index 0000000..517da23 --- /dev/null +++ b/local_packages/markdown_it/rules_block/__init__.py @@ -0,0 +1,27 @@ +__all__ = ( + "StateBlock", + "blockquote", + "code", + "fence", + "heading", + "hr", + "html_block", + "lheading", + "list_block", + "paragraph", + "reference", + "table", +) + +from .blockquote import blockquote +from .code import code +from .fence import fence +from .heading import heading +from .hr import hr +from .html_block import html_block +from .lheading import lheading +from .list import list_block +from .paragraph import paragraph +from .reference import reference +from .state_block import StateBlock +from .table import table diff --git a/local_packages/markdown_it/rules_block/blockquote.py b/local_packages/markdown_it/rules_block/blockquote.py new file mode 100644 index 0000000..0c9081b --- /dev/null +++ b/local_packages/markdown_it/rules_block/blockquote.py @@ -0,0 +1,299 @@ +# Block quotes +from __future__ import annotations + +import logging + +from ..common.utils import isStrSpace +from .state_block import StateBlock + +LOGGER = logging.getLogger(__name__) + + +def blockquote(state: StateBlock, startLine: int, endLine: int, silent: bool) -> bool: + LOGGER.debug( + "entering blockquote: %s, %s, %s, %s", state, startLine, endLine, silent + ) + + oldLineMax = state.lineMax + pos = state.bMarks[startLine] + state.tShift[startLine] + max = state.eMarks[startLine] + + if state.is_code_block(startLine): + return False + + # check the block quote marker + try: + if state.src[pos] != ">": + return False + except IndexError: + return False + pos += 1 + + # we know that it's going to be a valid blockquote, + # so no point trying to find the end of it in silent mode + if silent: + return True + + # set offset past spaces and ">" + initial = offset = state.sCount[startLine] + 1 + + try: + second_char: str | None = state.src[pos] + except IndexError: + second_char = None + + # skip one optional space after '>' + if second_char == " ": + # ' > test ' + # ^ -- position start of line here: + pos += 1 + initial += 1 + offset += 1 + adjustTab = False + spaceAfterMarker = True + elif second_char == "\t": + spaceAfterMarker = True + + if (state.bsCount[startLine] + offset) % 4 == 3: + # ' >\t test ' + # ^ -- position start of line here (tab has width==1) + pos += 1 + initial += 1 + offset += 1 + adjustTab = False + else: + # ' >\t test ' + # ^ -- position start of line here + shift bsCount slightly + # to make extra space appear + adjustTab = True + + else: + spaceAfterMarker = False + + oldBMarks = [state.bMarks[startLine]] + state.bMarks[startLine] = pos + + while pos < max: + ch = state.src[pos] + + if isStrSpace(ch): + if ch == "\t": + offset += ( + 4 + - (offset + state.bsCount[startLine] + (1 if adjustTab else 0)) % 4 + ) + else: + offset += 1 + + else: + break + + pos += 1 + + oldBSCount = [state.bsCount[startLine]] + state.bsCount[startLine] = ( + state.sCount[startLine] + 1 + (1 if spaceAfterMarker else 0) + ) + + lastLineEmpty = pos >= max + + oldSCount = [state.sCount[startLine]] + state.sCount[startLine] = offset - initial + + oldTShift = [state.tShift[startLine]] + state.tShift[startLine] = pos - state.bMarks[startLine] + + terminatorRules = state.md.block.ruler.getRules("blockquote") + + oldParentType = state.parentType + state.parentType = "blockquote" + + # Search the end of the block + # + # Block ends with either: + # 1. an empty line outside: + # ``` + # > test + # + # ``` + # 2. an empty line inside: + # ``` + # > + # test + # ``` + # 3. another tag: + # ``` + # > test + # - - - + # ``` + + # for (nextLine = startLine + 1; nextLine < endLine; nextLine++) { + nextLine = startLine + 1 + while nextLine < endLine: + # check if it's outdented, i.e. it's inside list item and indented + # less than said list item: + # + # ``` + # 1. anything + # > current blockquote + # 2. checking this line + # ``` + isOutdented = state.sCount[nextLine] < state.blkIndent + + pos = state.bMarks[nextLine] + state.tShift[nextLine] + max = state.eMarks[nextLine] + + if pos >= max: + # Case 1: line is not inside the blockquote, and this line is empty. + break + + evaluatesTrue = state.src[pos] == ">" and not isOutdented + pos += 1 + if evaluatesTrue: + # This line is inside the blockquote. + + # set offset past spaces and ">" + initial = offset = state.sCount[nextLine] + 1 + + try: + next_char: str | None = state.src[pos] + except IndexError: + next_char = None + + # skip one optional space after '>' + if next_char == " ": + # ' > test ' + # ^ -- position start of line here: + pos += 1 + initial += 1 + offset += 1 + adjustTab = False + spaceAfterMarker = True + elif next_char == "\t": + spaceAfterMarker = True + + if (state.bsCount[nextLine] + offset) % 4 == 3: + # ' >\t test ' + # ^ -- position start of line here (tab has width==1) + pos += 1 + initial += 1 + offset += 1 + adjustTab = False + else: + # ' >\t test ' + # ^ -- position start of line here + shift bsCount slightly + # to make extra space appear + adjustTab = True + + else: + spaceAfterMarker = False + + oldBMarks.append(state.bMarks[nextLine]) + state.bMarks[nextLine] = pos + + while pos < max: + ch = state.src[pos] + + if isStrSpace(ch): + if ch == "\t": + offset += ( + 4 + - ( + offset + + state.bsCount[nextLine] + + (1 if adjustTab else 0) + ) + % 4 + ) + else: + offset += 1 + else: + break + + pos += 1 + + lastLineEmpty = pos >= max + + oldBSCount.append(state.bsCount[nextLine]) + state.bsCount[nextLine] = ( + state.sCount[nextLine] + 1 + (1 if spaceAfterMarker else 0) + ) + + oldSCount.append(state.sCount[nextLine]) + state.sCount[nextLine] = offset - initial + + oldTShift.append(state.tShift[nextLine]) + state.tShift[nextLine] = pos - state.bMarks[nextLine] + + nextLine += 1 + continue + + # Case 2: line is not inside the blockquote, and the last line was empty. + if lastLineEmpty: + break + + # Case 3: another tag found. + terminate = False + + for terminatorRule in terminatorRules: + if terminatorRule(state, nextLine, endLine, True): + terminate = True + break + + if terminate: + # Quirk to enforce "hard termination mode" for paragraphs; + # normally if you call `tokenize(state, startLine, nextLine)`, + # paragraphs will look below nextLine for paragraph continuation, + # but if blockquote is terminated by another tag, they shouldn't + state.lineMax = nextLine + + if state.blkIndent != 0: + # state.blkIndent was non-zero, we now set it to zero, + # so we need to re-calculate all offsets to appear as + # if indent wasn't changed + oldBMarks.append(state.bMarks[nextLine]) + oldBSCount.append(state.bsCount[nextLine]) + oldTShift.append(state.tShift[nextLine]) + oldSCount.append(state.sCount[nextLine]) + state.sCount[nextLine] -= state.blkIndent + + break + + oldBMarks.append(state.bMarks[nextLine]) + oldBSCount.append(state.bsCount[nextLine]) + oldTShift.append(state.tShift[nextLine]) + oldSCount.append(state.sCount[nextLine]) + + # A negative indentation means that this is a paragraph continuation + # + state.sCount[nextLine] = -1 + + nextLine += 1 + + oldIndent = state.blkIndent + state.blkIndent = 0 + + token = state.push("blockquote_open", "blockquote", 1) + token.markup = ">" + token.map = lines = [startLine, 0] + + state.md.block.tokenize(state, startLine, nextLine) + + token = state.push("blockquote_close", "blockquote", -1) + token.markup = ">" + + state.lineMax = oldLineMax + state.parentType = oldParentType + lines[1] = state.line + + # Restore original tShift; this might not be necessary since the parser + # has already been here, but just to make sure we can do that. + for i, item in enumerate(oldTShift): + state.bMarks[i + startLine] = oldBMarks[i] + state.tShift[i + startLine] = item + state.sCount[i + startLine] = oldSCount[i] + state.bsCount[i + startLine] = oldBSCount[i] + + state.blkIndent = oldIndent + + return True diff --git a/local_packages/markdown_it/rules_block/code.py b/local_packages/markdown_it/rules_block/code.py new file mode 100644 index 0000000..af8a41c --- /dev/null +++ b/local_packages/markdown_it/rules_block/code.py @@ -0,0 +1,36 @@ +"""Code block (4 spaces padded).""" + +import logging + +from .state_block import StateBlock + +LOGGER = logging.getLogger(__name__) + + +def code(state: StateBlock, startLine: int, endLine: int, silent: bool) -> bool: + LOGGER.debug("entering code: %s, %s, %s, %s", state, startLine, endLine, silent) + + if not state.is_code_block(startLine): + return False + + last = nextLine = startLine + 1 + + while nextLine < endLine: + if state.isEmpty(nextLine): + nextLine += 1 + continue + + if state.is_code_block(nextLine): + nextLine += 1 + last = nextLine + continue + + break + + state.line = last + + token = state.push("code_block", "code", 0) + token.content = state.getLines(startLine, last, 4 + state.blkIndent, False) + "\n" + token.map = [startLine, state.line] + + return True diff --git a/local_packages/markdown_it/rules_block/fence.py b/local_packages/markdown_it/rules_block/fence.py new file mode 100644 index 0000000..263f1b8 --- /dev/null +++ b/local_packages/markdown_it/rules_block/fence.py @@ -0,0 +1,101 @@ +# fences (``` lang, ~~~ lang) +import logging + +from .state_block import StateBlock + +LOGGER = logging.getLogger(__name__) + + +def fence(state: StateBlock, startLine: int, endLine: int, silent: bool) -> bool: + LOGGER.debug("entering fence: %s, %s, %s, %s", state, startLine, endLine, silent) + + haveEndMarker = False + pos = state.bMarks[startLine] + state.tShift[startLine] + maximum = state.eMarks[startLine] + + if state.is_code_block(startLine): + return False + + if pos + 3 > maximum: + return False + + marker = state.src[pos] + + if marker not in ("~", "`"): + return False + + # scan marker length + mem = pos + pos = state.skipCharsStr(pos, marker) + + length = pos - mem + + if length < 3: + return False + + markup = state.src[mem:pos] + params = state.src[pos:maximum] + + if marker == "`" and marker in params: + return False + + # Since start is found, we can report success here in validation mode + if silent: + return True + + # search end of block + nextLine = startLine + + while True: + nextLine += 1 + if nextLine >= endLine: + # unclosed block should be autoclosed by end of document. + # also block seems to be autoclosed by end of parent + break + + pos = mem = state.bMarks[nextLine] + state.tShift[nextLine] + maximum = state.eMarks[nextLine] + + if pos < maximum and state.sCount[nextLine] < state.blkIndent: + # non-empty line with negative indent should stop the list: + # - ``` + # test + break + + try: + if state.src[pos] != marker: + continue + except IndexError: + break + + if state.is_code_block(nextLine): + continue + + pos = state.skipCharsStr(pos, marker) + + # closing code fence must be at least as long as the opening one + if pos - mem < length: + continue + + # make sure tail has spaces only + pos = state.skipSpaces(pos) + + if pos < maximum: + continue + + haveEndMarker = True + # found! + break + + # If a fence has heading spaces, they should be removed from its inner block + length = state.sCount[startLine] + + state.line = nextLine + (1 if haveEndMarker else 0) + + token = state.push("fence", "code", 0) + token.info = params + token.content = state.getLines(startLine + 1, nextLine, length, True) + token.markup = markup + token.map = [startLine, state.line] + + return True diff --git a/local_packages/markdown_it/rules_block/heading.py b/local_packages/markdown_it/rules_block/heading.py new file mode 100644 index 0000000..afcf9ed --- /dev/null +++ b/local_packages/markdown_it/rules_block/heading.py @@ -0,0 +1,69 @@ +"""Atex heading (#, ##, ...)""" + +from __future__ import annotations + +import logging + +from ..common.utils import isStrSpace +from .state_block import StateBlock + +LOGGER = logging.getLogger(__name__) + + +def heading(state: StateBlock, startLine: int, endLine: int, silent: bool) -> bool: + LOGGER.debug("entering heading: %s, %s, %s, %s", state, startLine, endLine, silent) + + pos = state.bMarks[startLine] + state.tShift[startLine] + maximum = state.eMarks[startLine] + + if state.is_code_block(startLine): + return False + + ch: str | None = state.src[pos] + + if ch != "#" or pos >= maximum: + return False + + # count heading level + level = 1 + pos += 1 + try: + ch = state.src[pos] + except IndexError: + ch = None + while ch == "#" and pos < maximum and level <= 6: + level += 1 + pos += 1 + try: + ch = state.src[pos] + except IndexError: + ch = None + + if level > 6 or (pos < maximum and not isStrSpace(ch)): + return False + + if silent: + return True + + # Let's cut tails like ' ### ' from the end of string + + maximum = state.skipSpacesBack(maximum, pos) + tmp = state.skipCharsStrBack(maximum, "#", pos) + if tmp > pos and isStrSpace(state.src[tmp - 1]): + maximum = tmp + + state.line = startLine + 1 + + token = state.push("heading_open", "h" + str(level), 1) + token.markup = "########"[:level] + token.map = [startLine, state.line] + + token = state.push("inline", "", 0) + token.content = state.src[pos:maximum].strip() + token.map = [startLine, state.line] + token.children = [] + + token = state.push("heading_close", "h" + str(level), -1) + token.markup = "########"[:level] + + return True diff --git a/local_packages/markdown_it/rules_block/hr.py b/local_packages/markdown_it/rules_block/hr.py new file mode 100644 index 0000000..fca7d79 --- /dev/null +++ b/local_packages/markdown_it/rules_block/hr.py @@ -0,0 +1,56 @@ +"""Horizontal rule + +At least 3 of these characters on a line * - _ +""" + +import logging + +from ..common.utils import isStrSpace +from .state_block import StateBlock + +LOGGER = logging.getLogger(__name__) + + +def hr(state: StateBlock, startLine: int, endLine: int, silent: bool) -> bool: + LOGGER.debug("entering hr: %s, %s, %s, %s", state, startLine, endLine, silent) + + pos = state.bMarks[startLine] + state.tShift[startLine] + maximum = state.eMarks[startLine] + + if state.is_code_block(startLine): + return False + + try: + marker = state.src[pos] + except IndexError: + return False + pos += 1 + + # Check hr marker + if marker not in ("*", "-", "_"): + return False + + # markers can be mixed with spaces, but there should be at least 3 of them + + cnt = 1 + while pos < maximum: + ch = state.src[pos] + pos += 1 + if ch != marker and not isStrSpace(ch): + return False + if ch == marker: + cnt += 1 + + if cnt < 3: + return False + + if silent: + return True + + state.line = startLine + 1 + + token = state.push("hr", "hr", 0) + token.map = [startLine, state.line] + token.markup = marker * (cnt + 1) + + return True diff --git a/local_packages/markdown_it/rules_block/html_block.py b/local_packages/markdown_it/rules_block/html_block.py new file mode 100644 index 0000000..3d43f6e --- /dev/null +++ b/local_packages/markdown_it/rules_block/html_block.py @@ -0,0 +1,90 @@ +# HTML block +from __future__ import annotations + +import logging +import re + +from ..common.html_blocks import block_names +from ..common.html_re import HTML_OPEN_CLOSE_TAG_STR +from .state_block import StateBlock + +LOGGER = logging.getLogger(__name__) + +# An array of opening and corresponding closing sequences for html tags, +# last argument defines whether it can terminate a paragraph or not +HTML_SEQUENCES: list[tuple[re.Pattern[str], re.Pattern[str], bool]] = [ + ( + re.compile(r"^<(script|pre|style|textarea)(?=(\s|>|$))", re.IGNORECASE), + re.compile(r"<\/(script|pre|style|textarea)>", re.IGNORECASE), + True, + ), + (re.compile(r"^"), True), + (re.compile(r"^<\?"), re.compile(r"\?>"), True), + (re.compile(r"^"), True), + (re.compile(r"^"), True), + ( + re.compile("^|$))", re.IGNORECASE), + re.compile(r"^$"), + True, + ), + (re.compile(HTML_OPEN_CLOSE_TAG_STR + "\\s*$"), re.compile(r"^$"), False), +] + + +def html_block(state: StateBlock, startLine: int, endLine: int, silent: bool) -> bool: + LOGGER.debug( + "entering html_block: %s, %s, %s, %s", state, startLine, endLine, silent + ) + pos = state.bMarks[startLine] + state.tShift[startLine] + maximum = state.eMarks[startLine] + + if state.is_code_block(startLine): + return False + + if not state.md.options.get("html", None): + return False + + if state.src[pos] != "<": + return False + + lineText = state.src[pos:maximum] + + html_seq = None + for HTML_SEQUENCE in HTML_SEQUENCES: + if HTML_SEQUENCE[0].search(lineText): + html_seq = HTML_SEQUENCE + break + + if not html_seq: + return False + + if silent: + # true if this sequence can be a terminator, false otherwise + return html_seq[2] + + nextLine = startLine + 1 + + # If we are here - we detected HTML block. + # Let's roll down till block end. + if not html_seq[1].search(lineText): + while nextLine < endLine: + if state.sCount[nextLine] < state.blkIndent: + break + + pos = state.bMarks[nextLine] + state.tShift[nextLine] + maximum = state.eMarks[nextLine] + lineText = state.src[pos:maximum] + + if html_seq[1].search(lineText): + if len(lineText) != 0: + nextLine += 1 + break + nextLine += 1 + + state.line = nextLine + + token = state.push("html_block", "", 0) + token.map = [startLine, nextLine] + token.content = state.getLines(startLine, nextLine, state.blkIndent, True) + + return True diff --git a/local_packages/markdown_it/rules_block/lheading.py b/local_packages/markdown_it/rules_block/lheading.py new file mode 100644 index 0000000..3522207 --- /dev/null +++ b/local_packages/markdown_it/rules_block/lheading.py @@ -0,0 +1,86 @@ +# lheading (---, ==) +import logging + +from .state_block import StateBlock + +LOGGER = logging.getLogger(__name__) + + +def lheading(state: StateBlock, startLine: int, endLine: int, silent: bool) -> bool: + LOGGER.debug("entering lheading: %s, %s, %s, %s", state, startLine, endLine, silent) + + level = None + nextLine = startLine + 1 + ruler = state.md.block.ruler + terminatorRules = ruler.getRules("paragraph") + + if state.is_code_block(startLine): + return False + + oldParentType = state.parentType + state.parentType = "paragraph" # use paragraph to match terminatorRules + + # jump line-by-line until empty one or EOF + while nextLine < endLine and not state.isEmpty(nextLine): + # this would be a code block normally, but after paragraph + # it's considered a lazy continuation regardless of what's there + if state.sCount[nextLine] - state.blkIndent > 3: + nextLine += 1 + continue + + # Check for underline in setext header + if state.sCount[nextLine] >= state.blkIndent: + pos = state.bMarks[nextLine] + state.tShift[nextLine] + maximum = state.eMarks[nextLine] + + if pos < maximum: + marker = state.src[pos] + + if marker in ("-", "="): + pos = state.skipCharsStr(pos, marker) + pos = state.skipSpaces(pos) + + # /* = */ + if pos >= maximum: + level = 1 if marker == "=" else 2 + break + + # quirk for blockquotes, this line should already be checked by that rule + if state.sCount[nextLine] < 0: + nextLine += 1 + continue + + # Some tags can terminate paragraph without empty line. + terminate = False + for terminatorRule in terminatorRules: + if terminatorRule(state, nextLine, endLine, True): + terminate = True + break + if terminate: + break + + nextLine += 1 + + if not level: + # Didn't find valid underline + return False + + content = state.getLines(startLine, nextLine, state.blkIndent, False).strip() + + state.line = nextLine + 1 + + token = state.push("heading_open", "h" + str(level), 1) + token.markup = marker + token.map = [startLine, state.line] + + token = state.push("inline", "", 0) + token.content = content + token.map = [startLine, state.line - 1] + token.children = [] + + token = state.push("heading_close", "h" + str(level), -1) + token.markup = marker + + state.parentType = oldParentType + + return True diff --git a/local_packages/markdown_it/rules_block/list.py b/local_packages/markdown_it/rules_block/list.py new file mode 100644 index 0000000..d8070d7 --- /dev/null +++ b/local_packages/markdown_it/rules_block/list.py @@ -0,0 +1,345 @@ +# Lists +import logging + +from ..common.utils import isStrSpace +from .state_block import StateBlock + +LOGGER = logging.getLogger(__name__) + + +# Search `[-+*][\n ]`, returns next pos after marker on success +# or -1 on fail. +def skipBulletListMarker(state: StateBlock, startLine: int) -> int: + pos = state.bMarks[startLine] + state.tShift[startLine] + maximum = state.eMarks[startLine] + + try: + marker = state.src[pos] + except IndexError: + return -1 + pos += 1 + + if marker not in ("*", "-", "+"): + return -1 + + if pos < maximum: + ch = state.src[pos] + + if not isStrSpace(ch): + # " -test " - is not a list item + return -1 + + return pos + + +# Search `\d+[.)][\n ]`, returns next pos after marker on success +# or -1 on fail. +def skipOrderedListMarker(state: StateBlock, startLine: int) -> int: + start = state.bMarks[startLine] + state.tShift[startLine] + pos = start + maximum = state.eMarks[startLine] + + # List marker should have at least 2 chars (digit + dot) + if pos + 1 >= maximum: + return -1 + + ch = state.src[pos] + pos += 1 + + ch_ord = ord(ch) + # /* 0 */ /* 9 */ + if ch_ord < 0x30 or ch_ord > 0x39: + return -1 + + while True: + # EOL -> fail + if pos >= maximum: + return -1 + + ch = state.src[pos] + pos += 1 + + # /* 0 */ /* 9 */ + ch_ord = ord(ch) + if ch_ord >= 0x30 and ch_ord <= 0x39: + # List marker should have no more than 9 digits + # (prevents integer overflow in browsers) + if pos - start >= 10: + return -1 + + continue + + # found valid marker + if ch in (")", "."): + break + + return -1 + + if pos < maximum: + ch = state.src[pos] + + if not isStrSpace(ch): + # " 1.test " - is not a list item + return -1 + + return pos + + +def markTightParagraphs(state: StateBlock, idx: int) -> None: + level = state.level + 2 + + i = idx + 2 + length = len(state.tokens) - 2 + while i < length: + if state.tokens[i].level == level and state.tokens[i].type == "paragraph_open": + state.tokens[i + 2].hidden = True + state.tokens[i].hidden = True + i += 2 + i += 1 + + +def list_block(state: StateBlock, startLine: int, endLine: int, silent: bool) -> bool: + LOGGER.debug("entering list: %s, %s, %s, %s", state, startLine, endLine, silent) + + isTerminatingParagraph = False + tight = True + + if state.is_code_block(startLine): + return False + + # Special case: + # - item 1 + # - item 2 + # - item 3 + # - item 4 + # - this one is a paragraph continuation + if ( + state.listIndent >= 0 + and state.sCount[startLine] - state.listIndent >= 4 + and state.sCount[startLine] < state.blkIndent + ): + return False + + # limit conditions when list can interrupt + # a paragraph (validation mode only) + # Next list item should still terminate previous list item + # + # This code can fail if plugins use blkIndent as well as lists, + # but I hope the spec gets fixed long before that happens. + # + if ( + silent + and state.parentType == "paragraph" + and state.sCount[startLine] >= state.blkIndent + ): + isTerminatingParagraph = True + + # Detect list type and position after marker + posAfterMarker = skipOrderedListMarker(state, startLine) + if posAfterMarker >= 0: + isOrdered = True + start = state.bMarks[startLine] + state.tShift[startLine] + markerValue = int(state.src[start : posAfterMarker - 1]) + + # If we're starting a new ordered list right after + # a paragraph, it should start with 1. + if isTerminatingParagraph and markerValue != 1: + return False + else: + posAfterMarker = skipBulletListMarker(state, startLine) + if posAfterMarker >= 0: + isOrdered = False + else: + return False + + # If we're starting a new unordered list right after + # a paragraph, first line should not be empty. + if ( + isTerminatingParagraph + and state.skipSpaces(posAfterMarker) >= state.eMarks[startLine] + ): + return False + + # We should terminate list on style change. Remember first one to compare. + markerChar = state.src[posAfterMarker - 1] + + # For validation mode we can terminate immediately + if silent: + return True + + # Start list + listTokIdx = len(state.tokens) + + if isOrdered: + token = state.push("ordered_list_open", "ol", 1) + if markerValue != 1: + token.attrs = {"start": markerValue} + + else: + token = state.push("bullet_list_open", "ul", 1) + + token.map = listLines = [startLine, 0] + token.markup = markerChar + + # + # Iterate list items + # + + nextLine = startLine + prevEmptyEnd = False + terminatorRules = state.md.block.ruler.getRules("list") + + oldParentType = state.parentType + state.parentType = "list" + + while nextLine < endLine: + pos = posAfterMarker + maximum = state.eMarks[nextLine] + + initial = offset = ( + state.sCount[nextLine] + + posAfterMarker + - (state.bMarks[startLine] + state.tShift[startLine]) + ) + + while pos < maximum: + ch = state.src[pos] + + if ch == "\t": + offset += 4 - (offset + state.bsCount[nextLine]) % 4 + elif ch == " ": + offset += 1 + else: + break + + pos += 1 + + contentStart = pos + + # trimming space in "- \n 3" case, indent is 1 here + indentAfterMarker = 1 if contentStart >= maximum else offset - initial + + # If we have more than 4 spaces, the indent is 1 + # (the rest is just indented code block) + if indentAfterMarker > 4: + indentAfterMarker = 1 + + # " - test" + # ^^^^^ - calculating total length of this thing + indent = initial + indentAfterMarker + + # Run subparser & write tokens + token = state.push("list_item_open", "li", 1) + token.markup = markerChar + token.map = itemLines = [startLine, 0] + if isOrdered: + token.info = state.src[start : posAfterMarker - 1] + + # change current state, then restore it after parser subcall + oldTight = state.tight + oldTShift = state.tShift[startLine] + oldSCount = state.sCount[startLine] + + # - example list + # ^ listIndent position will be here + # ^ blkIndent position will be here + # + oldListIndent = state.listIndent + state.listIndent = state.blkIndent + state.blkIndent = indent + + state.tight = True + state.tShift[startLine] = contentStart - state.bMarks[startLine] + state.sCount[startLine] = offset + + if contentStart >= maximum and state.isEmpty(startLine + 1): + # workaround for this case + # (list item is empty, list terminates before "foo"): + # ~~~~~~~~ + # - + # + # foo + # ~~~~~~~~ + state.line = min(state.line + 2, endLine) + else: + # NOTE in list.js this was: + # state.md.block.tokenize(state, startLine, endLine, True) + # but tokeniz does not take the final parameter + state.md.block.tokenize(state, startLine, endLine) + + # If any of list item is tight, mark list as tight + if (not state.tight) or prevEmptyEnd: + tight = False + + # Item become loose if finish with empty line, + # but we should filter last element, because it means list finish + prevEmptyEnd = (state.line - startLine) > 1 and state.isEmpty(state.line - 1) + + state.blkIndent = state.listIndent + state.listIndent = oldListIndent + state.tShift[startLine] = oldTShift + state.sCount[startLine] = oldSCount + state.tight = oldTight + + token = state.push("list_item_close", "li", -1) + token.markup = markerChar + + nextLine = startLine = state.line + itemLines[1] = nextLine + + if nextLine >= endLine: + break + + contentStart = state.bMarks[startLine] + + # + # Try to check if list is terminated or continued. + # + if state.sCount[nextLine] < state.blkIndent: + break + + if state.is_code_block(startLine): + break + + # fail if terminating block found + terminate = False + for terminatorRule in terminatorRules: + if terminatorRule(state, nextLine, endLine, True): + terminate = True + break + + if terminate: + break + + # fail if list has another type + if isOrdered: + posAfterMarker = skipOrderedListMarker(state, nextLine) + if posAfterMarker < 0: + break + start = state.bMarks[nextLine] + state.tShift[nextLine] + else: + posAfterMarker = skipBulletListMarker(state, nextLine) + if posAfterMarker < 0: + break + + if markerChar != state.src[posAfterMarker - 1]: + break + + # Finalize list + if isOrdered: + token = state.push("ordered_list_close", "ol", -1) + else: + token = state.push("bullet_list_close", "ul", -1) + + token.markup = markerChar + + listLines[1] = nextLine + state.line = nextLine + + state.parentType = oldParentType + + # mark paragraphs tight if needed + if tight: + markTightParagraphs(state, listTokIdx) + + return True diff --git a/local_packages/markdown_it/rules_block/paragraph.py b/local_packages/markdown_it/rules_block/paragraph.py new file mode 100644 index 0000000..30ba877 --- /dev/null +++ b/local_packages/markdown_it/rules_block/paragraph.py @@ -0,0 +1,66 @@ +"""Paragraph.""" + +import logging + +from .state_block import StateBlock + +LOGGER = logging.getLogger(__name__) + + +def paragraph(state: StateBlock, startLine: int, endLine: int, silent: bool) -> bool: + LOGGER.debug( + "entering paragraph: %s, %s, %s, %s", state, startLine, endLine, silent + ) + + nextLine = startLine + 1 + ruler = state.md.block.ruler + terminatorRules = ruler.getRules("paragraph") + endLine = state.lineMax + + oldParentType = state.parentType + state.parentType = "paragraph" + + # jump line-by-line until empty one or EOF + while nextLine < endLine: + if state.isEmpty(nextLine): + break + # this would be a code block normally, but after paragraph + # it's considered a lazy continuation regardless of what's there + if state.sCount[nextLine] - state.blkIndent > 3: + nextLine += 1 + continue + + # quirk for blockquotes, this line should already be checked by that rule + if state.sCount[nextLine] < 0: + nextLine += 1 + continue + + # Some tags can terminate paragraph without empty line. + terminate = False + for terminatorRule in terminatorRules: + if terminatorRule(state, nextLine, endLine, True): + terminate = True + break + + if terminate: + break + + nextLine += 1 + + content = state.getLines(startLine, nextLine, state.blkIndent, False).strip() + + state.line = nextLine + + token = state.push("paragraph_open", "p", 1) + token.map = [startLine, state.line] + + token = state.push("inline", "", 0) + token.content = content + token.map = [startLine, state.line] + token.children = [] + + token = state.push("paragraph_close", "p", -1) + + state.parentType = oldParentType + + return True diff --git a/local_packages/markdown_it/rules_block/reference.py b/local_packages/markdown_it/rules_block/reference.py new file mode 100644 index 0000000..ad94d40 --- /dev/null +++ b/local_packages/markdown_it/rules_block/reference.py @@ -0,0 +1,235 @@ +import logging + +from ..common.utils import charCodeAt, isSpace, normalizeReference +from .state_block import StateBlock + +LOGGER = logging.getLogger(__name__) + + +def reference(state: StateBlock, startLine: int, _endLine: int, silent: bool) -> bool: + LOGGER.debug( + "entering reference: %s, %s, %s, %s", state, startLine, _endLine, silent + ) + + pos = state.bMarks[startLine] + state.tShift[startLine] + maximum = state.eMarks[startLine] + nextLine = startLine + 1 + + if state.is_code_block(startLine): + return False + + if state.src[pos] != "[": + return False + + string = state.src[pos : maximum + 1] + + # string = state.getLines(startLine, nextLine, state.blkIndent, False).strip() + maximum = len(string) + + labelEnd = None + pos = 1 + while pos < maximum: + ch = charCodeAt(string, pos) + if ch == 0x5B: # /* [ */ + return False + elif ch == 0x5D: # /* ] */ + labelEnd = pos + break + elif ch == 0x0A: # /* \n */ + if (lineContent := getNextLine(state, nextLine)) is not None: + string += lineContent + maximum = len(string) + nextLine += 1 + elif ch == 0x5C: # /* \ */ + pos += 1 + if ( + pos < maximum + and charCodeAt(string, pos) == 0x0A + and (lineContent := getNextLine(state, nextLine)) is not None + ): + string += lineContent + maximum = len(string) + nextLine += 1 + pos += 1 + + if ( + labelEnd is None or labelEnd < 0 or charCodeAt(string, labelEnd + 1) != 0x3A + ): # /* : */ + return False + + # [label]: destination 'title' + # ^^^ skip optional whitespace here + pos = labelEnd + 2 + while pos < maximum: + ch = charCodeAt(string, pos) + if ch == 0x0A: + if (lineContent := getNextLine(state, nextLine)) is not None: + string += lineContent + maximum = len(string) + nextLine += 1 + elif isSpace(ch): + pass + else: + break + pos += 1 + + # [label]: destination 'title' + # ^^^^^^^^^^^ parse this + destRes = state.md.helpers.parseLinkDestination(string, pos, maximum) + if not destRes.ok: + return False + + href = state.md.normalizeLink(destRes.str) + if not state.md.validateLink(href): + return False + + pos = destRes.pos + + # save cursor state, we could require to rollback later + destEndPos = pos + destEndLineNo = nextLine + + # [label]: destination 'title' + # ^^^ skipping those spaces + start = pos + while pos < maximum: + ch = charCodeAt(string, pos) + if ch == 0x0A: + if (lineContent := getNextLine(state, nextLine)) is not None: + string += lineContent + maximum = len(string) + nextLine += 1 + elif isSpace(ch): + pass + else: + break + pos += 1 + + # [label]: destination 'title' + # ^^^^^^^ parse this + titleRes = state.md.helpers.parseLinkTitle(string, pos, maximum, None) + while titleRes.can_continue: + if (lineContent := getNextLine(state, nextLine)) is None: + break + string += lineContent + pos = maximum + maximum = len(string) + nextLine += 1 + titleRes = state.md.helpers.parseLinkTitle(string, pos, maximum, titleRes) + + if pos < maximum and start != pos and titleRes.ok: + title = titleRes.str + pos = titleRes.pos + else: + title = "" + pos = destEndPos + nextLine = destEndLineNo + + # skip trailing spaces until the rest of the line + while pos < maximum: + ch = charCodeAt(string, pos) + if not isSpace(ch): + break + pos += 1 + + if pos < maximum and charCodeAt(string, pos) != 0x0A and title: + # garbage at the end of the line after title, + # but it could still be a valid reference if we roll back + title = "" + pos = destEndPos + nextLine = destEndLineNo + while pos < maximum: + ch = charCodeAt(string, pos) + if not isSpace(ch): + break + pos += 1 + + if pos < maximum and charCodeAt(string, pos) != 0x0A: + # garbage at the end of the line + return False + + label = normalizeReference(string[1:labelEnd]) + if not label: + # CommonMark 0.20 disallows empty labels + return False + + # Reference can not terminate anything. This check is for safety only. + if silent: + return True + + if "references" not in state.env: + state.env["references"] = {} + + state.line = nextLine + + # note, this is not part of markdown-it JS, but is useful for renderers + if state.md.options.get("inline_definitions", False): + token = state.push("definition", "", 0) + token.meta = { + "id": label, + "title": title, + "url": href, + "label": string[1:labelEnd], + } + token.map = [startLine, state.line] + + if label not in state.env["references"]: + state.env["references"][label] = { + "title": title, + "href": href, + "map": [startLine, state.line], + } + else: + state.env.setdefault("duplicate_refs", []).append( + { + "title": title, + "href": href, + "label": label, + "map": [startLine, state.line], + } + ) + + return True + + +def getNextLine(state: StateBlock, nextLine: int) -> None | str: + endLine = state.lineMax + + if nextLine >= endLine or state.isEmpty(nextLine): + # empty line or end of input + return None + + isContinuation = False + + # this would be a code block normally, but after paragraph + # it's considered a lazy continuation regardless of what's there + if state.is_code_block(nextLine): + isContinuation = True + + # quirk for blockquotes, this line should already be checked by that rule + if state.sCount[nextLine] < 0: + isContinuation = True + + if not isContinuation: + terminatorRules = state.md.block.ruler.getRules("reference") + oldParentType = state.parentType + state.parentType = "reference" + + # Some tags can terminate paragraph without empty line. + terminate = False + for terminatorRule in terminatorRules: + if terminatorRule(state, nextLine, endLine, True): + terminate = True + break + + state.parentType = oldParentType + + if terminate: + # terminated by another block + return None + + pos = state.bMarks[nextLine] + state.tShift[nextLine] + maximum = state.eMarks[nextLine] + + # max + 1 explicitly includes the newline + return state.src[pos : maximum + 1] diff --git a/local_packages/markdown_it/rules_block/state_block.py b/local_packages/markdown_it/rules_block/state_block.py new file mode 100644 index 0000000..445ad26 --- /dev/null +++ b/local_packages/markdown_it/rules_block/state_block.py @@ -0,0 +1,261 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING, Literal + +from ..common.utils import isStrSpace +from ..ruler import StateBase +from ..token import Token +from ..utils import EnvType + +if TYPE_CHECKING: + from markdown_it.main import MarkdownIt + + +class StateBlock(StateBase): + def __init__( + self, src: str, md: MarkdownIt, env: EnvType, tokens: list[Token] + ) -> None: + self.src = src + + # link to parser instance + self.md = md + + self.env = env + + # + # Internal state variables + # + + self.tokens = tokens + + self.bMarks: list[int] = [] # line begin offsets for fast jumps + self.eMarks: list[int] = [] # line end offsets for fast jumps + # offsets of the first non-space characters (tabs not expanded) + self.tShift: list[int] = [] + self.sCount: list[int] = [] # indents for each line (tabs expanded) + + # An amount of virtual spaces (tabs expanded) between beginning + # of each line (bMarks) and real beginning of that line. + # + # It exists only as a hack because blockquotes override bMarks + # losing information in the process. + # + # It's used only when expanding tabs, you can think about it as + # an initial tab length, e.g. bsCount=21 applied to string `\t123` + # means first tab should be expanded to 4-21%4 === 3 spaces. + # + self.bsCount: list[int] = [] + + # block parser variables + self.blkIndent = 0 # required block content indent (for example, if we are + # inside a list, it would be positioned after list marker) + self.line = 0 # line index in src + self.lineMax = 0 # lines count + self.tight = False # loose/tight mode for lists + self.ddIndent = -1 # indent of the current dd block (-1 if there isn't any) + self.listIndent = -1 # indent of the current list block (-1 if there isn't any) + + # can be 'blockquote', 'list', 'root', 'paragraph' or 'reference' + # used in lists to determine if they interrupt a paragraph + self.parentType = "root" + + self.level = 0 + + # renderer + self.result = "" + + # Create caches + # Generate markers. + indent_found = False + + start = pos = indent = offset = 0 + length = len(self.src) + + for pos, character in enumerate(self.src): + if not indent_found: + if isStrSpace(character): + indent += 1 + + if character == "\t": + offset += 4 - offset % 4 + else: + offset += 1 + continue + else: + indent_found = True + + if character == "\n" or pos == length - 1: + if character != "\n": + pos += 1 + self.bMarks.append(start) + self.eMarks.append(pos) + self.tShift.append(indent) + self.sCount.append(offset) + self.bsCount.append(0) + + indent_found = False + indent = 0 + offset = 0 + start = pos + 1 + + # Push fake entry to simplify cache bounds checks + self.bMarks.append(length) + self.eMarks.append(length) + self.tShift.append(0) + self.sCount.append(0) + self.bsCount.append(0) + + self.lineMax = len(self.bMarks) - 1 # don't count last fake line + + # pre-check if code blocks are enabled, to speed up is_code_block method + self._code_enabled = "code" in self.md["block"].ruler.get_active_rules() + + def __repr__(self) -> str: + return ( + f"{self.__class__.__name__}" + f"(line={self.line},level={self.level},tokens={len(self.tokens)})" + ) + + def push(self, ttype: str, tag: str, nesting: Literal[-1, 0, 1]) -> Token: + """Push new token to "stream".""" + token = Token(ttype, tag, nesting) + token.block = True + if nesting < 0: + self.level -= 1 # closing tag + token.level = self.level + if nesting > 0: + self.level += 1 # opening tag + self.tokens.append(token) + return token + + def isEmpty(self, line: int) -> bool: + """.""" + return (self.bMarks[line] + self.tShift[line]) >= self.eMarks[line] + + def skipEmptyLines(self, from_pos: int) -> int: + """.""" + while from_pos < self.lineMax: + try: + if (self.bMarks[from_pos] + self.tShift[from_pos]) < self.eMarks[ + from_pos + ]: + break + except IndexError: + pass + from_pos += 1 + return from_pos + + def skipSpaces(self, pos: int) -> int: + """Skip spaces from given position.""" + while True: + try: + current = self.src[pos] + except IndexError: + break + if not isStrSpace(current): + break + pos += 1 + return pos + + def skipSpacesBack(self, pos: int, minimum: int) -> int: + """Skip spaces from given position in reverse.""" + if pos <= minimum: + return pos + while pos > minimum: + pos -= 1 + if not isStrSpace(self.src[pos]): + return pos + 1 + return pos + + def skipChars(self, pos: int, code: int) -> int: + """Skip character code from given position.""" + while True: + try: + current = self.srcCharCode[pos] + except IndexError: + break + if current != code: + break + pos += 1 + return pos + + def skipCharsStr(self, pos: int, ch: str) -> int: + """Skip character string from given position.""" + while True: + try: + current = self.src[pos] + except IndexError: + break + if current != ch: + break + pos += 1 + return pos + + def skipCharsBack(self, pos: int, code: int, minimum: int) -> int: + """Skip character code reverse from given position - 1.""" + if pos <= minimum: + return pos + while pos > minimum: + pos -= 1 + if code != self.srcCharCode[pos]: + return pos + 1 + return pos + + def skipCharsStrBack(self, pos: int, ch: str, minimum: int) -> int: + """Skip character string reverse from given position - 1.""" + if pos <= minimum: + return pos + while pos > minimum: + pos -= 1 + if ch != self.src[pos]: + return pos + 1 + return pos + + def getLines(self, begin: int, end: int, indent: int, keepLastLF: bool) -> str: + """Cut lines range from source.""" + line = begin + if begin >= end: + return "" + + queue = [""] * (end - begin) + + i = 1 + while line < end: + lineIndent = 0 + lineStart = first = self.bMarks[line] + last = ( + self.eMarks[line] + 1 + if line + 1 < end or keepLastLF + else self.eMarks[line] + ) + + while (first < last) and (lineIndent < indent): + ch = self.src[first] + if isStrSpace(ch): + if ch == "\t": + lineIndent += 4 - (lineIndent + self.bsCount[line]) % 4 + else: + lineIndent += 1 + elif first - lineStart < self.tShift[line]: + lineIndent += 1 + else: + break + first += 1 + + if lineIndent > indent: + # partially expanding tabs in code blocks, e.g '\t\tfoobar' + # with indent=2 becomes ' \tfoobar' + queue[i - 1] = (" " * (lineIndent - indent)) + self.src[first:last] + else: + queue[i - 1] = self.src[first:last] + + line += 1 + i += 1 + + return "".join(queue) + + def is_code_block(self, line: int) -> bool: + """Check if line is a code block, + i.e. the code block rule is enabled and text is indented by more than 3 spaces. + """ + return self._code_enabled and (self.sCount[line] - self.blkIndent) >= 4 diff --git a/local_packages/markdown_it/rules_block/table.py b/local_packages/markdown_it/rules_block/table.py new file mode 100644 index 0000000..c52553d --- /dev/null +++ b/local_packages/markdown_it/rules_block/table.py @@ -0,0 +1,250 @@ +# GFM table, https://github.github.com/gfm/#tables-extension- +from __future__ import annotations + +import re + +from ..common.utils import charStrAt, isStrSpace +from .state_block import StateBlock + +headerLineRe = re.compile(r"^:?-+:?$") +enclosingPipesRe = re.compile(r"^\||\|$") + +# Limit the amount of empty autocompleted cells in a table, +# see https://github.com/markdown-it/markdown-it/issues/1000, +# Both pulldown-cmark and commonmark-hs limit the number of cells this way to ~200k. +# We set it to 65k, which can expand user input by a factor of x370 +# (256x256 square is 1.8kB expanded into 650kB). +MAX_AUTOCOMPLETED_CELLS = 0x10000 + + +def getLine(state: StateBlock, line: int) -> str: + pos = state.bMarks[line] + state.tShift[line] + maximum = state.eMarks[line] + + # return state.src.substr(pos, max - pos) + return state.src[pos:maximum] + + +def escapedSplit(string: str) -> list[str]: + result: list[str] = [] + pos = 0 + max = len(string) + isEscaped = False + lastPos = 0 + current = "" + ch = charStrAt(string, pos) + + while pos < max: + if ch == "|": + if not isEscaped: + # pipe separating cells, '|' + result.append(current + string[lastPos:pos]) + current = "" + lastPos = pos + 1 + else: + # escaped pipe, '\|' + current += string[lastPos : pos - 1] + lastPos = pos + + isEscaped = ch == "\\" + pos += 1 + + ch = charStrAt(string, pos) + + result.append(current + string[lastPos:]) + + return result + + +def table(state: StateBlock, startLine: int, endLine: int, silent: bool) -> bool: + tbodyLines = None + + # should have at least two lines + if startLine + 2 > endLine: + return False + + nextLine = startLine + 1 + + if state.sCount[nextLine] < state.blkIndent: + return False + + if state.is_code_block(nextLine): + return False + + # first character of the second line should be '|', '-', ':', + # and no other characters are allowed but spaces; + # basically, this is the equivalent of /^[-:|][-:|\s]*$/ regexp + + pos = state.bMarks[nextLine] + state.tShift[nextLine] + if pos >= state.eMarks[nextLine]: + return False + first_ch = state.src[pos] + pos += 1 + if first_ch not in ("|", "-", ":"): + return False + + if pos >= state.eMarks[nextLine]: + return False + second_ch = state.src[pos] + pos += 1 + if second_ch not in ("|", "-", ":") and not isStrSpace(second_ch): + return False + + # if first character is '-', then second character must not be a space + # (due to parsing ambiguity with list) + if first_ch == "-" and isStrSpace(second_ch): + return False + + while pos < state.eMarks[nextLine]: + ch = state.src[pos] + + if ch not in ("|", "-", ":") and not isStrSpace(ch): + return False + + pos += 1 + + lineText = getLine(state, startLine + 1) + + columns = lineText.split("|") + aligns = [] + for i in range(len(columns)): + t = columns[i].strip() + if not t: + # allow empty columns before and after table, but not in between columns; + # e.g. allow ` |---| `, disallow ` ---||--- ` + if i == 0 or i == len(columns) - 1: + continue + else: + return False + + if not headerLineRe.search(t): + return False + if charStrAt(t, len(t) - 1) == ":": + aligns.append("center" if charStrAt(t, 0) == ":" else "right") + elif charStrAt(t, 0) == ":": + aligns.append("left") + else: + aligns.append("") + + lineText = getLine(state, startLine).strip() + if "|" not in lineText: + return False + if state.is_code_block(startLine): + return False + columns = escapedSplit(lineText) + if columns and columns[0] == "": + columns.pop(0) + if columns and columns[-1] == "": + columns.pop() + + # header row will define an amount of columns in the entire table, + # and align row should be exactly the same (the rest of the rows can differ) + columnCount = len(columns) + if columnCount == 0 or columnCount != len(aligns): + return False + + if silent: + return True + + oldParentType = state.parentType + state.parentType = "table" + + # use 'blockquote' lists for termination because it's + # the most similar to tables + terminatorRules = state.md.block.ruler.getRules("blockquote") + + token = state.push("table_open", "table", 1) + token.map = tableLines = [startLine, 0] + + token = state.push("thead_open", "thead", 1) + token.map = [startLine, startLine + 1] + + token = state.push("tr_open", "tr", 1) + token.map = [startLine, startLine + 1] + + for i in range(len(columns)): + token = state.push("th_open", "th", 1) + if aligns[i]: + token.attrs = {"style": "text-align:" + aligns[i]} + + token = state.push("inline", "", 0) + # note in markdown-it this map was removed in v12.0.0 however, we keep it, + # since it is helpful to propagate to children tokens + token.map = [startLine, startLine + 1] + token.content = columns[i].strip() + token.children = [] + + token = state.push("th_close", "th", -1) + + token = state.push("tr_close", "tr", -1) + token = state.push("thead_close", "thead", -1) + + autocompleted_cells = 0 + nextLine = startLine + 2 + while nextLine < endLine: + if state.sCount[nextLine] < state.blkIndent: + break + + terminate = False + for i in range(len(terminatorRules)): + if terminatorRules[i](state, nextLine, endLine, True): + terminate = True + break + + if terminate: + break + lineText = getLine(state, nextLine).strip() + if not lineText: + break + if state.is_code_block(nextLine): + break + columns = escapedSplit(lineText) + if columns and columns[0] == "": + columns.pop(0) + if columns and columns[-1] == "": + columns.pop() + + # note: autocomplete count can be negative if user specifies more columns than header, + # but that does not affect intended use (which is limiting expansion) + autocompleted_cells += columnCount - len(columns) + if autocompleted_cells > MAX_AUTOCOMPLETED_CELLS: + break + + if nextLine == startLine + 2: + token = state.push("tbody_open", "tbody", 1) + token.map = tbodyLines = [startLine + 2, 0] + + token = state.push("tr_open", "tr", 1) + token.map = [nextLine, nextLine + 1] + + for i in range(columnCount): + token = state.push("td_open", "td", 1) + if aligns[i]: + token.attrs = {"style": "text-align:" + aligns[i]} + + token = state.push("inline", "", 0) + # note in markdown-it this map was removed in v12.0.0 however, we keep it, + # since it is helpful to propagate to children tokens + token.map = [nextLine, nextLine + 1] + try: + token.content = columns[i].strip() if columns[i] else "" + except IndexError: + token.content = "" + token.children = [] + + token = state.push("td_close", "td", -1) + + token = state.push("tr_close", "tr", -1) + + nextLine += 1 + + if tbodyLines: + token = state.push("tbody_close", "tbody", -1) + tbodyLines[1] = nextLine + + token = state.push("table_close", "table", -1) + + tableLines[1] = nextLine + state.parentType = oldParentType + state.line = nextLine + return True diff --git a/local_packages/markdown_it/rules_core/__init__.py b/local_packages/markdown_it/rules_core/__init__.py new file mode 100644 index 0000000..e7d7753 --- /dev/null +++ b/local_packages/markdown_it/rules_core/__init__.py @@ -0,0 +1,19 @@ +__all__ = ( + "StateCore", + "block", + "inline", + "linkify", + "normalize", + "replace", + "smartquotes", + "text_join", +) + +from .block import block +from .inline import inline +from .linkify import linkify +from .normalize import normalize +from .replacements import replace +from .smartquotes import smartquotes +from .state_core import StateCore +from .text_join import text_join diff --git a/local_packages/markdown_it/rules_core/block.py b/local_packages/markdown_it/rules_core/block.py new file mode 100644 index 0000000..a6c3bb8 --- /dev/null +++ b/local_packages/markdown_it/rules_core/block.py @@ -0,0 +1,13 @@ +from ..token import Token +from .state_core import StateCore + + +def block(state: StateCore) -> None: + if state.inlineMode: + token = Token("inline", "", 0) + token.content = state.src + token.map = [0, 1] + token.children = [] + state.tokens.append(token) + else: + state.md.block.parse(state.src, state.md, state.env, state.tokens) diff --git a/local_packages/markdown_it/rules_core/inline.py b/local_packages/markdown_it/rules_core/inline.py new file mode 100644 index 0000000..c3fd0b5 --- /dev/null +++ b/local_packages/markdown_it/rules_core/inline.py @@ -0,0 +1,10 @@ +from .state_core import StateCore + + +def inline(state: StateCore) -> None: + """Parse inlines""" + for token in state.tokens: + if token.type == "inline": + if token.children is None: + token.children = [] + state.md.inline.parse(token.content, state.md, state.env, token.children) diff --git a/local_packages/markdown_it/rules_core/linkify.py b/local_packages/markdown_it/rules_core/linkify.py new file mode 100644 index 0000000..efbc9d4 --- /dev/null +++ b/local_packages/markdown_it/rules_core/linkify.py @@ -0,0 +1,149 @@ +from __future__ import annotations + +import re +from typing import Protocol + +from ..common.utils import arrayReplaceAt, isLinkClose, isLinkOpen +from ..token import Token +from .state_core import StateCore + +HTTP_RE = re.compile(r"^http://") +MAILTO_RE = re.compile(r"^mailto:") +TEST_MAILTO_RE = re.compile(r"^mailto:", flags=re.IGNORECASE) + + +def linkify(state: StateCore) -> None: + """Rule for identifying plain-text links.""" + if not state.md.options.linkify: + return + + if not state.md.linkify: + raise ModuleNotFoundError("Linkify enabled but not installed.") + + for inline_token in state.tokens: + if inline_token.type != "inline" or not state.md.linkify.pretest( + inline_token.content + ): + continue + + tokens = inline_token.children + + htmlLinkLevel = 0 + + # We scan from the end, to keep position when new tags added. + # Use reversed logic in links start/end match + assert tokens is not None + i = len(tokens) + while i >= 1: + i -= 1 + assert isinstance(tokens, list) + currentToken = tokens[i] + + # Skip content of markdown links + if currentToken.type == "link_close": + i -= 1 + while ( + tokens[i].level != currentToken.level + and tokens[i].type != "link_open" + ): + i -= 1 + continue + + # Skip content of html tag links + if currentToken.type == "html_inline": + if isLinkOpen(currentToken.content) and htmlLinkLevel > 0: + htmlLinkLevel -= 1 + if isLinkClose(currentToken.content): + htmlLinkLevel += 1 + if htmlLinkLevel > 0: + continue + + if currentToken.type == "text" and state.md.linkify.test( + currentToken.content + ): + text = currentToken.content + links: list[_LinkType] = state.md.linkify.match(text) or [] + + # Now split string to nodes + nodes = [] + level = currentToken.level + lastPos = 0 + + # forbid escape sequence at the start of the string, + # this avoids http\://example.com/ from being linkified as + # http:
//example.com/ + if ( + links + and links[0].index == 0 + and i > 0 + and tokens[i - 1].type == "text_special" + ): + links = links[1:] + + for link in links: + url = link.url + fullUrl = state.md.normalizeLink(url) + if not state.md.validateLink(fullUrl): + continue + + urlText = link.text + + # Linkifier might send raw hostnames like "example.com", where url + # starts with domain name. So we prepend http:// in those cases, + # and remove it afterwards. + if not link.schema: + urlText = HTTP_RE.sub( + "", state.md.normalizeLinkText("http://" + urlText) + ) + elif link.schema == "mailto:" and TEST_MAILTO_RE.search(urlText): + urlText = MAILTO_RE.sub( + "", state.md.normalizeLinkText("mailto:" + urlText) + ) + else: + urlText = state.md.normalizeLinkText(urlText) + + pos = link.index + + if pos > lastPos: + token = Token("text", "", 0) + token.content = text[lastPos:pos] + token.level = level + nodes.append(token) + + token = Token("link_open", "a", 1) + token.attrs = {"href": fullUrl} + token.level = level + level += 1 + token.markup = "linkify" + token.info = "auto" + nodes.append(token) + + token = Token("text", "", 0) + token.content = urlText + token.level = level + nodes.append(token) + + token = Token("link_close", "a", -1) + level -= 1 + token.level = level + token.markup = "linkify" + token.info = "auto" + nodes.append(token) + + lastPos = link.last_index + + if lastPos < len(text): + token = Token("text", "", 0) + token.content = text[lastPos:] + token.level = level + nodes.append(token) + + inline_token.children = tokens = arrayReplaceAt(tokens, i, nodes) + + +class _LinkType(Protocol): + url: str + text: str + index: int + last_index: int + schema: str | None diff --git a/local_packages/markdown_it/rules_core/normalize.py b/local_packages/markdown_it/rules_core/normalize.py new file mode 100644 index 0000000..3243924 --- /dev/null +++ b/local_packages/markdown_it/rules_core/normalize.py @@ -0,0 +1,19 @@ +"""Normalize input string.""" + +import re + +from .state_core import StateCore + +# https://spec.commonmark.org/0.29/#line-ending +NEWLINES_RE = re.compile(r"\r\n?|\n") +NULL_RE = re.compile(r"\0") + + +def normalize(state: StateCore) -> None: + # Normalize newlines + string = NEWLINES_RE.sub("\n", state.src) + + # Replace NULL characters + string = NULL_RE.sub("\ufffd", string) + + state.src = string diff --git a/local_packages/markdown_it/rules_core/replacements.py b/local_packages/markdown_it/rules_core/replacements.py new file mode 100644 index 0000000..bcc9980 --- /dev/null +++ b/local_packages/markdown_it/rules_core/replacements.py @@ -0,0 +1,127 @@ +"""Simple typographic replacements + +* ``(c)``, ``(C)`` → © +* ``(tm)``, ``(TM)`` → ™ +* ``(r)``, ``(R)`` → ® +* ``+-`` → ± +* ``...`` → … +* ``?....`` → ?.. +* ``!....`` → !.. +* ``????????`` → ??? +* ``!!!!!`` → !!! +* ``,,,`` → , +* ``--`` → &ndash +* ``---`` → &mdash +""" + +from __future__ import annotations + +import logging +import re + +from ..token import Token +from .state_core import StateCore + +LOGGER = logging.getLogger(__name__) + +# TODO: +# - fractionals 1/2, 1/4, 3/4 -> ½, ¼, ¾ +# - multiplication 2 x 4 -> 2 × 4 + +RARE_RE = re.compile(r"\+-|\.\.|\?\?\?\?|!!!!|,,|--") + +# Workaround for phantomjs - need regex without /g flag, +# or root check will fail every second time +# SCOPED_ABBR_TEST_RE = r"\((c|tm|r)\)" + +SCOPED_ABBR_RE = re.compile(r"\((c|tm|r)\)", flags=re.IGNORECASE) + +PLUS_MINUS_RE = re.compile(r"\+-") + +ELLIPSIS_RE = re.compile(r"\.{2,}") + +ELLIPSIS_QUESTION_EXCLAMATION_RE = re.compile(r"([?!])…") + +QUESTION_EXCLAMATION_RE = re.compile(r"([?!]){4,}") + +COMMA_RE = re.compile(r",{2,}") + +EM_DASH_RE = re.compile(r"(^|[^-])---(?=[^-]|$)", flags=re.MULTILINE) + +EN_DASH_RE = re.compile(r"(^|\s)--(?=\s|$)", flags=re.MULTILINE) + +EN_DASH_INDENT_RE = re.compile(r"(^|[^-\s])--(?=[^-\s]|$)", flags=re.MULTILINE) + + +SCOPED_ABBR = {"c": "©", "r": "®", "tm": "™"} + + +def replaceFn(match: re.Match[str]) -> str: + return SCOPED_ABBR[match.group(1).lower()] + + +def replace_scoped(inlineTokens: list[Token]) -> None: + inside_autolink = 0 + + for token in inlineTokens: + if token.type == "text" and not inside_autolink: + token.content = SCOPED_ABBR_RE.sub(replaceFn, token.content) + + if token.type == "link_open" and token.info == "auto": + inside_autolink -= 1 + + if token.type == "link_close" and token.info == "auto": + inside_autolink += 1 + + +def replace_rare(inlineTokens: list[Token]) -> None: + inside_autolink = 0 + + for token in inlineTokens: + if ( + token.type == "text" + and (not inside_autolink) + and RARE_RE.search(token.content) + ): + # +- -> ± + token.content = PLUS_MINUS_RE.sub("±", token.content) + + # .., ..., ....... -> … + token.content = ELLIPSIS_RE.sub("…", token.content) + + # but ?..... & !..... -> ?.. & !.. + token.content = ELLIPSIS_QUESTION_EXCLAMATION_RE.sub("\\1..", token.content) + token.content = QUESTION_EXCLAMATION_RE.sub("\\1\\1\\1", token.content) + + # ,, ,,, ,,,, -> , + token.content = COMMA_RE.sub(",", token.content) + + # em-dash + token.content = EM_DASH_RE.sub("\\1\u2014", token.content) + + # en-dash + token.content = EN_DASH_RE.sub("\\1\u2013", token.content) + token.content = EN_DASH_INDENT_RE.sub("\\1\u2013", token.content) + + if token.type == "link_open" and token.info == "auto": + inside_autolink -= 1 + + if token.type == "link_close" and token.info == "auto": + inside_autolink += 1 + + +def replace(state: StateCore) -> None: + if not state.md.options.typographer: + return + + for token in state.tokens: + if token.type != "inline": + continue + if token.children is None: + continue + + if SCOPED_ABBR_RE.search(token.content): + replace_scoped(token.children) + + if RARE_RE.search(token.content): + replace_rare(token.children) diff --git a/local_packages/markdown_it/rules_core/smartquotes.py b/local_packages/markdown_it/rules_core/smartquotes.py new file mode 100644 index 0000000..f9b8b45 --- /dev/null +++ b/local_packages/markdown_it/rules_core/smartquotes.py @@ -0,0 +1,202 @@ +"""Convert straight quotation marks to typographic ones""" + +from __future__ import annotations + +import re +from typing import Any + +from ..common.utils import charCodeAt, isMdAsciiPunct, isPunctChar, isWhiteSpace +from ..token import Token +from .state_core import StateCore + +QUOTE_TEST_RE = re.compile(r"['\"]") +QUOTE_RE = re.compile(r"['\"]") +APOSTROPHE = "\u2019" # ’ + + +def replaceAt(string: str, index: int, ch: str) -> str: + # When the index is negative, the behavior is different from the js version. + # But basically, the index will not be negative. + assert index >= 0 + return string[:index] + ch + string[index + 1 :] + + +def process_inlines(tokens: list[Token], state: StateCore) -> None: + stack: list[dict[str, Any]] = [] + + for i, token in enumerate(tokens): + thisLevel = token.level + + j = 0 + for j in range(len(stack))[::-1]: + if stack[j]["level"] <= thisLevel: + break + else: + # When the loop is terminated without a "break". + # Subtract 1 to get the same index as the js version. + j -= 1 + + stack = stack[: j + 1] + + if token.type != "text": + continue + + text = token.content + pos = 0 + maximum = len(text) + + while pos < maximum: + goto_outer = False + lastIndex = pos + t = QUOTE_RE.search(text[lastIndex:]) + if not t: + break + + canOpen = canClose = True + pos = t.start(0) + lastIndex + 1 + isSingle = t.group(0) == "'" + + # Find previous character, + # default to space if it's the beginning of the line + lastChar: None | int = 0x20 + + if t.start(0) + lastIndex - 1 >= 0: + lastChar = charCodeAt(text, t.start(0) + lastIndex - 1) + else: + for j in range(i)[::-1]: + if tokens[j].type == "softbreak" or tokens[j].type == "hardbreak": + break + # should skip all tokens except 'text', 'html_inline' or 'code_inline' + if not tokens[j].content: + continue + + lastChar = charCodeAt(tokens[j].content, len(tokens[j].content) - 1) + break + + # Find next character, + # default to space if it's the end of the line + nextChar: None | int = 0x20 + + if pos < maximum: + nextChar = charCodeAt(text, pos) + else: + for j in range(i + 1, len(tokens)): + # nextChar defaults to 0x20 + if tokens[j].type == "softbreak" or tokens[j].type == "hardbreak": + break + # should skip all tokens except 'text', 'html_inline' or 'code_inline' + if not tokens[j].content: + continue + + nextChar = charCodeAt(tokens[j].content, 0) + break + + isLastPunctChar = lastChar is not None and ( + isMdAsciiPunct(lastChar) or isPunctChar(chr(lastChar)) + ) + isNextPunctChar = nextChar is not None and ( + isMdAsciiPunct(nextChar) or isPunctChar(chr(nextChar)) + ) + + isLastWhiteSpace = lastChar is not None and isWhiteSpace(lastChar) + isNextWhiteSpace = nextChar is not None and isWhiteSpace(nextChar) + + if isNextWhiteSpace: # noqa: SIM114 + canOpen = False + elif isNextPunctChar and not (isLastWhiteSpace or isLastPunctChar): + canOpen = False + + if isLastWhiteSpace: # noqa: SIM114 + canClose = False + elif isLastPunctChar and not (isNextWhiteSpace or isNextPunctChar): + canClose = False + + if nextChar == 0x22 and t.group(0) == '"': # 0x22: " # noqa: SIM102 + if ( + lastChar is not None and lastChar >= 0x30 and lastChar <= 0x39 + ): # 0x30: 0, 0x39: 9 + # special case: 1"" - count first quote as an inch + canClose = canOpen = False + + if canOpen and canClose: + # Replace quotes in the middle of punctuation sequence, but not + # in the middle of the words, i.e.: + # + # 1. foo " bar " baz - not replaced + # 2. foo-"-bar-"-baz - replaced + # 3. foo"bar"baz - not replaced + canOpen = isLastPunctChar + canClose = isNextPunctChar + + if not canOpen and not canClose: + # middle of word + if isSingle: + token.content = replaceAt( + token.content, t.start(0) + lastIndex, APOSTROPHE + ) + continue + + if canClose: + # this could be a closing quote, rewind the stack to get a match + for j in range(len(stack))[::-1]: + item = stack[j] + if stack[j]["level"] < thisLevel: + break + if item["single"] == isSingle and stack[j]["level"] == thisLevel: + item = stack[j] + + if isSingle: + openQuote = state.md.options.quotes[2] + closeQuote = state.md.options.quotes[3] + else: + openQuote = state.md.options.quotes[0] + closeQuote = state.md.options.quotes[1] + + # replace token.content *before* tokens[item.token].content, + # because, if they are pointing at the same token, replaceAt + # could mess up indices when quote length != 1 + token.content = replaceAt( + token.content, t.start(0) + lastIndex, closeQuote + ) + tokens[item["token"]].content = replaceAt( + tokens[item["token"]].content, item["pos"], openQuote + ) + + pos += len(closeQuote) - 1 + if item["token"] == i: + pos += len(openQuote) - 1 + + text = token.content + maximum = len(text) + + stack = stack[:j] + goto_outer = True + break + if goto_outer: + goto_outer = False + continue + + if canOpen: + stack.append( + { + "token": i, + "pos": t.start(0) + lastIndex, + "single": isSingle, + "level": thisLevel, + } + ) + elif canClose and isSingle: + token.content = replaceAt( + token.content, t.start(0) + lastIndex, APOSTROPHE + ) + + +def smartquotes(state: StateCore) -> None: + if not state.md.options.typographer: + return + + for token in state.tokens: + if token.type != "inline" or not QUOTE_RE.search(token.content): + continue + if token.children is not None: + process_inlines(token.children, state) diff --git a/local_packages/markdown_it/rules_core/state_core.py b/local_packages/markdown_it/rules_core/state_core.py new file mode 100644 index 0000000..a938041 --- /dev/null +++ b/local_packages/markdown_it/rules_core/state_core.py @@ -0,0 +1,25 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +from ..ruler import StateBase +from ..token import Token +from ..utils import EnvType + +if TYPE_CHECKING: + from markdown_it import MarkdownIt + + +class StateCore(StateBase): + def __init__( + self, + src: str, + md: MarkdownIt, + env: EnvType, + tokens: list[Token] | None = None, + ) -> None: + self.src = src + self.md = md # link to parser instance + self.env = env + self.tokens: list[Token] = tokens or [] + self.inlineMode = False diff --git a/local_packages/markdown_it/rules_core/text_join.py b/local_packages/markdown_it/rules_core/text_join.py new file mode 100644 index 0000000..5379f6d --- /dev/null +++ b/local_packages/markdown_it/rules_core/text_join.py @@ -0,0 +1,35 @@ +"""Join raw text tokens with the rest of the text + +This is set as a separate rule to provide an opportunity for plugins +to run text replacements after text join, but before escape join. + +For example, `\\:)` shouldn't be replaced with an emoji. +""" + +from __future__ import annotations + +from ..token import Token +from .state_core import StateCore + + +def text_join(state: StateCore) -> None: + """Join raw text for escape sequences (`text_special`) tokens with the rest of the text""" + + for inline_token in state.tokens[:]: + if inline_token.type != "inline": + continue + + # convert text_special to text and join all adjacent text nodes + new_tokens: list[Token] = [] + for child_token in inline_token.children or []: + if child_token.type == "text_special": + child_token.type = "text" + if ( + child_token.type == "text" + and new_tokens + and new_tokens[-1].type == "text" + ): + new_tokens[-1].content += child_token.content + else: + new_tokens.append(child_token) + inline_token.children = new_tokens diff --git a/local_packages/markdown_it/rules_inline/__init__.py b/local_packages/markdown_it/rules_inline/__init__.py new file mode 100644 index 0000000..d82ef8f --- /dev/null +++ b/local_packages/markdown_it/rules_inline/__init__.py @@ -0,0 +1,31 @@ +__all__ = ( + "StateInline", + "autolink", + "backtick", + "emphasis", + "entity", + "escape", + "fragments_join", + "html_inline", + "image", + "link", + "link_pairs", + "linkify", + "newline", + "strikethrough", + "text", +) +from . import emphasis, strikethrough +from .autolink import autolink +from .backticks import backtick +from .balance_pairs import link_pairs +from .entity import entity +from .escape import escape +from .fragments_join import fragments_join +from .html_inline import html_inline +from .image import image +from .link import link +from .linkify import linkify +from .newline import newline +from .state_inline import StateInline +from .text import text diff --git a/local_packages/markdown_it/rules_inline/autolink.py b/local_packages/markdown_it/rules_inline/autolink.py new file mode 100644 index 0000000..6546e25 --- /dev/null +++ b/local_packages/markdown_it/rules_inline/autolink.py @@ -0,0 +1,77 @@ +# Process autolinks '' +import re + +from .state_inline import StateInline + +EMAIL_RE = re.compile( + r"^([a-zA-Z0-9.!#$%&\'*+\/=?^_`{|}~-]+@[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*)$" +) +AUTOLINK_RE = re.compile(r"^([a-zA-Z][a-zA-Z0-9+.\-]{1,31}):([^<>\x00-\x20]*)$") + + +def autolink(state: StateInline, silent: bool) -> bool: + pos = state.pos + + if state.src[pos] != "<": + return False + + start = state.pos + maximum = state.posMax + + while True: + pos += 1 + if pos >= maximum: + return False + + ch = state.src[pos] + + if ch == "<": + return False + if ch == ">": + break + + url = state.src[start + 1 : pos] + + if AUTOLINK_RE.search(url) is not None: + fullUrl = state.md.normalizeLink(url) + if not state.md.validateLink(fullUrl): + return False + + if not silent: + token = state.push("link_open", "a", 1) + token.attrs = {"href": fullUrl} + token.markup = "autolink" + token.info = "auto" + + token = state.push("text", "", 0) + token.content = state.md.normalizeLinkText(url) + + token = state.push("link_close", "a", -1) + token.markup = "autolink" + token.info = "auto" + + state.pos += len(url) + 2 + return True + + if EMAIL_RE.search(url) is not None: + fullUrl = state.md.normalizeLink("mailto:" + url) + if not state.md.validateLink(fullUrl): + return False + + if not silent: + token = state.push("link_open", "a", 1) + token.attrs = {"href": fullUrl} + token.markup = "autolink" + token.info = "auto" + + token = state.push("text", "", 0) + token.content = state.md.normalizeLinkText(url) + + token = state.push("link_close", "a", -1) + token.markup = "autolink" + token.info = "auto" + + state.pos += len(url) + 2 + return True + + return False diff --git a/local_packages/markdown_it/rules_inline/backticks.py b/local_packages/markdown_it/rules_inline/backticks.py new file mode 100644 index 0000000..fc60d6b --- /dev/null +++ b/local_packages/markdown_it/rules_inline/backticks.py @@ -0,0 +1,72 @@ +# Parse backticks +import re + +from .state_inline import StateInline + +regex = re.compile("^ (.+) $") + + +def backtick(state: StateInline, silent: bool) -> bool: + pos = state.pos + + if state.src[pos] != "`": + return False + + start = pos + pos += 1 + maximum = state.posMax + + # scan marker length + while pos < maximum and (state.src[pos] == "`"): + pos += 1 + + marker = state.src[start:pos] + openerLength = len(marker) + + if state.backticksScanned and state.backticks.get(openerLength, 0) <= start: + if not silent: + state.pending += marker + state.pos += openerLength + return True + + matchStart = matchEnd = pos + + # Nothing found in the cache, scan until the end of the line (or until marker is found) + while True: + try: + matchStart = state.src.index("`", matchEnd) + except ValueError: + break + matchEnd = matchStart + 1 + + # scan marker length + while matchEnd < maximum and (state.src[matchEnd] == "`"): + matchEnd += 1 + + closerLength = matchEnd - matchStart + + if closerLength == openerLength: + # Found matching closer length. + if not silent: + token = state.push("code_inline", "code", 0) + token.markup = marker + token.content = state.src[pos:matchStart].replace("\n", " ") + if ( + token.content.startswith(" ") + and token.content.endswith(" ") + and len(token.content.strip()) > 0 + ): + token.content = token.content[1:-1] + state.pos = matchEnd + return True + + # Some different length found, put it in cache as upper limit of where closer can be found + state.backticks[closerLength] = matchStart + + # Scanned through the end, didn't find anything + state.backticksScanned = True + + if not silent: + state.pending += marker + state.pos += openerLength + return True diff --git a/local_packages/markdown_it/rules_inline/balance_pairs.py b/local_packages/markdown_it/rules_inline/balance_pairs.py new file mode 100644 index 0000000..9c63b27 --- /dev/null +++ b/local_packages/markdown_it/rules_inline/balance_pairs.py @@ -0,0 +1,138 @@ +"""Balance paired characters (*, _, etc) in inline tokens.""" + +from __future__ import annotations + +from .state_inline import Delimiter, StateInline + + +def processDelimiters(state: StateInline, delimiters: list[Delimiter]) -> None: + """For each opening emphasis-like marker find a matching closing one.""" + if not delimiters: + return + + openersBottom = {} + maximum = len(delimiters) + + # headerIdx is the first delimiter of the current (where closer is) delimiter run + headerIdx = 0 + lastTokenIdx = -2 # needs any value lower than -1 + jumps: list[int] = [] + closerIdx = 0 + while closerIdx < maximum: + closer = delimiters[closerIdx] + + jumps.append(0) + + # markers belong to same delimiter run if: + # - they have adjacent tokens + # - AND markers are the same + # + if ( + delimiters[headerIdx].marker != closer.marker + or lastTokenIdx != closer.token - 1 + ): + headerIdx = closerIdx + lastTokenIdx = closer.token + + # Length is only used for emphasis-specific "rule of 3", + # if it's not defined (in strikethrough or 3rd party plugins), + # we can default it to 0 to disable those checks. + # + closer.length = closer.length or 0 + + if not closer.close: + closerIdx += 1 + continue + + # Previously calculated lower bounds (previous fails) + # for each marker, each delimiter length modulo 3, + # and for whether this closer can be an opener; + # https://github.com/commonmark/cmark/commit/34250e12ccebdc6372b8b49c44fab57c72443460 + if closer.marker not in openersBottom: + openersBottom[closer.marker] = [-1, -1, -1, -1, -1, -1] + + minOpenerIdx = openersBottom[closer.marker][ + (3 if closer.open else 0) + (closer.length % 3) + ] + + openerIdx = headerIdx - jumps[headerIdx] - 1 + + newMinOpenerIdx = openerIdx + + while openerIdx > minOpenerIdx: + opener = delimiters[openerIdx] + + if opener.marker != closer.marker: + openerIdx -= jumps[openerIdx] + 1 + continue + + if opener.open and opener.end < 0: + isOddMatch = False + + # from spec: + # + # If one of the delimiters can both open and close emphasis, then the + # sum of the lengths of the delimiter runs containing the opening and + # closing delimiters must not be a multiple of 3 unless both lengths + # are multiples of 3. + # + if ( + (opener.close or closer.open) + and ((opener.length + closer.length) % 3 == 0) + and (opener.length % 3 != 0 or closer.length % 3 != 0) + ): + isOddMatch = True + + if not isOddMatch: + # If previous delimiter cannot be an opener, we can safely skip + # the entire sequence in future checks. This is required to make + # sure algorithm has linear complexity (see *_*_*_*_*_... case). + # + if openerIdx > 0 and not delimiters[openerIdx - 1].open: + lastJump = jumps[openerIdx - 1] + 1 + else: + lastJump = 0 + + jumps[closerIdx] = closerIdx - openerIdx + lastJump + jumps[openerIdx] = lastJump + + closer.open = False + opener.end = closerIdx + opener.close = False + newMinOpenerIdx = -1 + + # treat next token as start of run, + # it optimizes skips in **<...>**a**<...>** pathological case + lastTokenIdx = -2 + + break + + openerIdx -= jumps[openerIdx] + 1 + + if newMinOpenerIdx != -1: + # If match for this delimiter run failed, we want to set lower bound for + # future lookups. This is required to make sure algorithm has linear + # complexity. + # + # See details here: + # https:#github.com/commonmark/cmark/issues/178#issuecomment-270417442 + # + openersBottom[closer.marker][ + (3 if closer.open else 0) + ((closer.length or 0) % 3) + ] = newMinOpenerIdx + + closerIdx += 1 + + +def link_pairs(state: StateInline) -> None: + tokens_meta = state.tokens_meta + maximum = len(state.tokens_meta) + + processDelimiters(state, state.delimiters) + + curr = 0 + while curr < maximum: + curr_meta = tokens_meta[curr] + if curr_meta and "delimiters" in curr_meta: + processDelimiters(state, curr_meta["delimiters"]) + curr += 1 diff --git a/local_packages/markdown_it/rules_inline/emphasis.py b/local_packages/markdown_it/rules_inline/emphasis.py new file mode 100644 index 0000000..9a98f9e --- /dev/null +++ b/local_packages/markdown_it/rules_inline/emphasis.py @@ -0,0 +1,102 @@ +# Process *this* and _that_ +# +from __future__ import annotations + +from .state_inline import Delimiter, StateInline + + +def tokenize(state: StateInline, silent: bool) -> bool: + """Insert each marker as a separate text token, and add it to delimiter list""" + start = state.pos + marker = state.src[start] + + if silent: + return False + + if marker not in ("_", "*"): + return False + + scanned = state.scanDelims(state.pos, marker == "*") + + for _ in range(scanned.length): + token = state.push("text", "", 0) + token.content = marker + state.delimiters.append( + Delimiter( + marker=ord(marker), + length=scanned.length, + token=len(state.tokens) - 1, + end=-1, + open=scanned.can_open, + close=scanned.can_close, + ) + ) + + state.pos += scanned.length + + return True + + +def _postProcess(state: StateInline, delimiters: list[Delimiter]) -> None: + i = len(delimiters) - 1 + while i >= 0: + startDelim = delimiters[i] + + # /* _ */ /* * */ + if startDelim.marker != 0x5F and startDelim.marker != 0x2A: + i -= 1 + continue + + # Process only opening markers + if startDelim.end == -1: + i -= 1 + continue + + endDelim = delimiters[startDelim.end] + + # If the previous delimiter has the same marker and is adjacent to this one, + # merge those into one strong delimiter. + # + # `whatever` -> `whatever` + # + isStrong = ( + i > 0 + and delimiters[i - 1].end == startDelim.end + 1 + # check that first two markers match and adjacent + and delimiters[i - 1].marker == startDelim.marker + and delimiters[i - 1].token == startDelim.token - 1 + # check that last two markers are adjacent (we can safely assume they match) + and delimiters[startDelim.end + 1].token == endDelim.token + 1 + ) + + ch = chr(startDelim.marker) + + token = state.tokens[startDelim.token] + token.type = "strong_open" if isStrong else "em_open" + token.tag = "strong" if isStrong else "em" + token.nesting = 1 + token.markup = ch + ch if isStrong else ch + token.content = "" + + token = state.tokens[endDelim.token] + token.type = "strong_close" if isStrong else "em_close" + token.tag = "strong" if isStrong else "em" + token.nesting = -1 + token.markup = ch + ch if isStrong else ch + token.content = "" + + if isStrong: + state.tokens[delimiters[i - 1].token].content = "" + state.tokens[delimiters[startDelim.end + 1].token].content = "" + i -= 1 + + i -= 1 + + +def postProcess(state: StateInline) -> None: + """Walk through delimiter list and replace text tokens with tags.""" + _postProcess(state, state.delimiters) + + for token in state.tokens_meta: + if token and "delimiters" in token: + _postProcess(state, token["delimiters"]) diff --git a/local_packages/markdown_it/rules_inline/entity.py b/local_packages/markdown_it/rules_inline/entity.py new file mode 100644 index 0000000..ec9d396 --- /dev/null +++ b/local_packages/markdown_it/rules_inline/entity.py @@ -0,0 +1,53 @@ +# Process html entity - {, ¯, ", ... +import re + +from ..common.entities import entities +from ..common.utils import fromCodePoint, isValidEntityCode +from .state_inline import StateInline + +DIGITAL_RE = re.compile(r"^&#((?:x[a-f0-9]{1,6}|[0-9]{1,7}));", re.IGNORECASE) +NAMED_RE = re.compile(r"^&([a-z][a-z0-9]{1,31});", re.IGNORECASE) + + +def entity(state: StateInline, silent: bool) -> bool: + pos = state.pos + maximum = state.posMax + + if state.src[pos] != "&": + return False + + if pos + 1 >= maximum: + return False + + if state.src[pos + 1] == "#": + if match := DIGITAL_RE.search(state.src[pos:]): + if not silent: + match1 = match.group(1) + code = ( + int(match1[1:], 16) if match1[0].lower() == "x" else int(match1, 10) + ) + + token = state.push("text_special", "", 0) + token.content = ( + fromCodePoint(code) + if isValidEntityCode(code) + else fromCodePoint(0xFFFD) + ) + token.markup = match.group(0) + token.info = "entity" + + state.pos += len(match.group(0)) + return True + + else: + if (match := NAMED_RE.search(state.src[pos:])) and match.group(1) in entities: + if not silent: + token = state.push("text_special", "", 0) + token.content = entities[match.group(1)] + token.markup = match.group(0) + token.info = "entity" + + state.pos += len(match.group(0)) + return True + + return False diff --git a/local_packages/markdown_it/rules_inline/escape.py b/local_packages/markdown_it/rules_inline/escape.py new file mode 100644 index 0000000..0fca6c8 --- /dev/null +++ b/local_packages/markdown_it/rules_inline/escape.py @@ -0,0 +1,93 @@ +""" +Process escaped chars and hardbreaks +""" + +from ..common.utils import isStrSpace +from .state_inline import StateInline + + +def escape(state: StateInline, silent: bool) -> bool: + """Process escaped chars and hardbreaks.""" + pos = state.pos + maximum = state.posMax + + if state.src[pos] != "\\": + return False + + pos += 1 + + # '\' at the end of the inline block + if pos >= maximum: + return False + + ch1 = state.src[pos] + ch1_ord = ord(ch1) + if ch1 == "\n": + if not silent: + state.push("hardbreak", "br", 0) + pos += 1 + # skip leading whitespaces from next line + while pos < maximum: + ch = state.src[pos] + if not isStrSpace(ch): + break + pos += 1 + + state.pos = pos + return True + + escapedStr = state.src[pos] + + if ch1_ord >= 0xD800 and ch1_ord <= 0xDBFF and pos + 1 < maximum: + ch2 = state.src[pos + 1] + ch2_ord = ord(ch2) + if ch2_ord >= 0xDC00 and ch2_ord <= 0xDFFF: + escapedStr += ch2 + pos += 1 + + origStr = "\\" + escapedStr + + if not silent: + token = state.push("text_special", "", 0) + token.content = escapedStr if ch1 in _ESCAPED else origStr + token.markup = origStr + token.info = "escape" + + state.pos = pos + 1 + return True + + +_ESCAPED = { + "!", + '"', + "#", + "$", + "%", + "&", + "'", + "(", + ")", + "*", + "+", + ",", + "-", + ".", + "/", + ":", + ";", + "<", + "=", + ">", + "?", + "@", + "[", + "\\", + "]", + "^", + "_", + "`", + "{", + "|", + "}", + "~", +} diff --git a/local_packages/markdown_it/rules_inline/fragments_join.py b/local_packages/markdown_it/rules_inline/fragments_join.py new file mode 100644 index 0000000..f795c13 --- /dev/null +++ b/local_packages/markdown_it/rules_inline/fragments_join.py @@ -0,0 +1,43 @@ +from .state_inline import StateInline + + +def fragments_join(state: StateInline) -> None: + """ + Clean up tokens after emphasis and strikethrough postprocessing: + merge adjacent text nodes into one and re-calculate all token levels + + This is necessary because initially emphasis delimiter markers (``*, _, ~``) + are treated as their own separate text tokens. Then emphasis rule either + leaves them as text (needed to merge with adjacent text) or turns them + into opening/closing tags (which messes up levels inside). + """ + level = 0 + maximum = len(state.tokens) + + curr = last = 0 + while curr < maximum: + # re-calculate levels after emphasis/strikethrough turns some text nodes + # into opening/closing tags + if state.tokens[curr].nesting < 0: + level -= 1 # closing tag + state.tokens[curr].level = level + if state.tokens[curr].nesting > 0: + level += 1 # opening tag + + if ( + state.tokens[curr].type == "text" + and curr + 1 < maximum + and state.tokens[curr + 1].type == "text" + ): + # collapse two adjacent text nodes + state.tokens[curr + 1].content = ( + state.tokens[curr].content + state.tokens[curr + 1].content + ) + else: + if curr != last: + state.tokens[last] = state.tokens[curr] + last += 1 + curr += 1 + + if curr != last: + del state.tokens[last:] diff --git a/local_packages/markdown_it/rules_inline/html_inline.py b/local_packages/markdown_it/rules_inline/html_inline.py new file mode 100644 index 0000000..9065e1d --- /dev/null +++ b/local_packages/markdown_it/rules_inline/html_inline.py @@ -0,0 +1,43 @@ +# Process html tags +from ..common.html_re import HTML_TAG_RE +from ..common.utils import isLinkClose, isLinkOpen +from .state_inline import StateInline + + +def isLetter(ch: int) -> bool: + lc = ch | 0x20 # to lower case + # /* a */ and /* z */ + return (lc >= 0x61) and (lc <= 0x7A) + + +def html_inline(state: StateInline, silent: bool) -> bool: + pos = state.pos + + if not state.md.options.get("html", None): + return False + + # Check start + maximum = state.posMax + if state.src[pos] != "<" or pos + 2 >= maximum: + return False + + # Quick fail on second char + ch = state.src[pos + 1] + if ch not in ("!", "?", "/") and not isLetter(ord(ch)): # /* / */ + return False + + match = HTML_TAG_RE.search(state.src[pos:]) + if not match: + return False + + if not silent: + token = state.push("html_inline", "", 0) + token.content = state.src[pos : pos + len(match.group(0))] + + if isLinkOpen(token.content): + state.linkLevel += 1 + if isLinkClose(token.content): + state.linkLevel -= 1 + + state.pos += len(match.group(0)) + return True diff --git a/local_packages/markdown_it/rules_inline/image.py b/local_packages/markdown_it/rules_inline/image.py new file mode 100644 index 0000000..005105b --- /dev/null +++ b/local_packages/markdown_it/rules_inline/image.py @@ -0,0 +1,148 @@ +# Process ![image]( "title") +from __future__ import annotations + +from ..common.utils import isStrSpace, normalizeReference +from ..token import Token +from .state_inline import StateInline + + +def image(state: StateInline, silent: bool) -> bool: + label = None + href = "" + oldPos = state.pos + max = state.posMax + + if state.src[state.pos] != "!": + return False + + if state.pos + 1 < state.posMax and state.src[state.pos + 1] != "[": + return False + + labelStart = state.pos + 2 + labelEnd = state.md.helpers.parseLinkLabel(state, state.pos + 1, False) + + # parser failed to find ']', so it's not a valid link + if labelEnd < 0: + return False + + pos = labelEnd + 1 + + if pos < max and state.src[pos] == "(": + # + # Inline link + # + + # [link]( "title" ) + # ^^ skipping these spaces + pos += 1 + while pos < max: + ch = state.src[pos] + if not isStrSpace(ch) and ch != "\n": + break + pos += 1 + + if pos >= max: + return False + + # [link]( "title" ) + # ^^^^^^ parsing link destination + start = pos + res = state.md.helpers.parseLinkDestination(state.src, pos, state.posMax) + if res.ok: + href = state.md.normalizeLink(res.str) + if state.md.validateLink(href): + pos = res.pos + else: + href = "" + + # [link]( "title" ) + # ^^ skipping these spaces + start = pos + while pos < max: + ch = state.src[pos] + if not isStrSpace(ch) and ch != "\n": + break + pos += 1 + + # [link]( "title" ) + # ^^^^^^^ parsing link title + res = state.md.helpers.parseLinkTitle(state.src, pos, state.posMax, None) + if pos < max and start != pos and res.ok: + title = res.str + pos = res.pos + + # [link]( "title" ) + # ^^ skipping these spaces + while pos < max: + ch = state.src[pos] + if not isStrSpace(ch) and ch != "\n": + break + pos += 1 + else: + title = "" + + if pos >= max or state.src[pos] != ")": + state.pos = oldPos + return False + + pos += 1 + + else: + # + # Link reference + # + if "references" not in state.env: + return False + + # /* [ */ + if pos < max and state.src[pos] == "[": + start = pos + 1 + pos = state.md.helpers.parseLinkLabel(state, pos) + if pos >= 0: + label = state.src[start:pos] + pos += 1 + else: + pos = labelEnd + 1 + else: + pos = labelEnd + 1 + + # covers label == '' and label == undefined + # (collapsed reference link and shortcut reference link respectively) + if not label: + label = state.src[labelStart:labelEnd] + + label = normalizeReference(label) + + ref = state.env["references"].get(label, None) + if not ref: + state.pos = oldPos + return False + + href = ref["href"] + title = ref["title"] + + # + # We found the end of the link, and know for a fact it's a valid link + # so all that's left to do is to call tokenizer. + # + if not silent: + content = state.src[labelStart:labelEnd] + + tokens: list[Token] = [] + state.md.inline.parse(content, state.md, state.env, tokens) + + token = state.push("image", "img", 0) + token.attrs = {"src": href, "alt": ""} + token.children = tokens or None + token.content = content + + if title: + token.attrSet("title", title) + + # note, this is not part of markdown-it JS, but is useful for renderers + if label and state.md.options.get("store_labels", False): + token.meta["label"] = label + + state.pos = pos + state.posMax = max + return True diff --git a/local_packages/markdown_it/rules_inline/link.py b/local_packages/markdown_it/rules_inline/link.py new file mode 100644 index 0000000..2e92c7d --- /dev/null +++ b/local_packages/markdown_it/rules_inline/link.py @@ -0,0 +1,149 @@ +# Process [link]( "stuff") + +from ..common.utils import isStrSpace, normalizeReference +from .state_inline import StateInline + + +def link(state: StateInline, silent: bool) -> bool: + href = "" + title = "" + label = None + oldPos = state.pos + maximum = state.posMax + start = state.pos + parseReference = True + + if state.src[state.pos] != "[": + return False + + labelStart = state.pos + 1 + labelEnd = state.md.helpers.parseLinkLabel(state, state.pos, True) + + # parser failed to find ']', so it's not a valid link + if labelEnd < 0: + return False + + pos = labelEnd + 1 + + if pos < maximum and state.src[pos] == "(": + # + # Inline link + # + + # might have found a valid shortcut link, disable reference parsing + parseReference = False + + # [link]( "title" ) + # ^^ skipping these spaces + pos += 1 + while pos < maximum: + ch = state.src[pos] + if not isStrSpace(ch) and ch != "\n": + break + pos += 1 + + if pos >= maximum: + return False + + # [link]( "title" ) + # ^^^^^^ parsing link destination + start = pos + res = state.md.helpers.parseLinkDestination(state.src, pos, state.posMax) + if res.ok: + href = state.md.normalizeLink(res.str) + if state.md.validateLink(href): + pos = res.pos + else: + href = "" + + # [link]( "title" ) + # ^^ skipping these spaces + start = pos + while pos < maximum: + ch = state.src[pos] + if not isStrSpace(ch) and ch != "\n": + break + pos += 1 + + # [link]( "title" ) + # ^^^^^^^ parsing link title + res = state.md.helpers.parseLinkTitle(state.src, pos, state.posMax) + if pos < maximum and start != pos and res.ok: + title = res.str + pos = res.pos + + # [link]( "title" ) + # ^^ skipping these spaces + while pos < maximum: + ch = state.src[pos] + if not isStrSpace(ch) and ch != "\n": + break + pos += 1 + + if pos >= maximum or state.src[pos] != ")": + # parsing a valid shortcut link failed, fallback to reference + parseReference = True + + pos += 1 + + if parseReference: + # + # Link reference + # + if "references" not in state.env: + return False + + if pos < maximum and state.src[pos] == "[": + start = pos + 1 + pos = state.md.helpers.parseLinkLabel(state, pos) + if pos >= 0: + label = state.src[start:pos] + pos += 1 + else: + pos = labelEnd + 1 + + else: + pos = labelEnd + 1 + + # covers label == '' and label == undefined + # (collapsed reference link and shortcut reference link respectively) + if not label: + label = state.src[labelStart:labelEnd] + + label = normalizeReference(label) + + ref = state.env["references"].get(label, None) + if not ref: + state.pos = oldPos + return False + + href = ref["href"] + title = ref["title"] + + # + # We found the end of the link, and know for a fact it's a valid link + # so all that's left to do is to call tokenizer. + # + if not silent: + state.pos = labelStart + state.posMax = labelEnd + + token = state.push("link_open", "a", 1) + token.attrs = {"href": href} + + if title: + token.attrSet("title", title) + + # note, this is not part of markdown-it JS, but is useful for renderers + if label and state.md.options.get("store_labels", False): + token.meta["label"] = label + + state.linkLevel += 1 + state.md.inline.tokenize(state) + state.linkLevel -= 1 + + token = state.push("link_close", "a", -1) + + state.pos = pos + state.posMax = maximum + return True diff --git a/local_packages/markdown_it/rules_inline/linkify.py b/local_packages/markdown_it/rules_inline/linkify.py new file mode 100644 index 0000000..3669396 --- /dev/null +++ b/local_packages/markdown_it/rules_inline/linkify.py @@ -0,0 +1,62 @@ +"""Process links like https://example.org/""" + +import re + +from .state_inline import StateInline + +# RFC3986: scheme = ALPHA *( ALPHA / DIGIT / "+" / "-" / "." ) +SCHEME_RE = re.compile(r"(?:^|[^a-z0-9.+-])([a-z][a-z0-9.+-]*)$", re.IGNORECASE) + + +def linkify(state: StateInline, silent: bool) -> bool: + """Rule for identifying plain-text links.""" + if not state.md.options.linkify: + return False + if state.linkLevel > 0: + return False + if not state.md.linkify: + raise ModuleNotFoundError("Linkify enabled but not installed.") + + pos = state.pos + maximum = state.posMax + + if ( + (pos + 3) > maximum + or state.src[pos] != ":" + or state.src[pos + 1] != "/" + or state.src[pos + 2] != "/" + ): + return False + + if not (match := SCHEME_RE.search(state.pending)): + return False + + proto = match.group(1) + if not (link := state.md.linkify.match_at_start(state.src[pos - len(proto) :])): + return False + url: str = link.url + + # disallow '*' at the end of the link (conflicts with emphasis) + url = url.rstrip("*") + + full_url = state.md.normalizeLink(url) + if not state.md.validateLink(full_url): + return False + + if not silent: + state.pending = state.pending[: -len(proto)] + + token = state.push("link_open", "a", 1) + token.attrs = {"href": full_url} + token.markup = "linkify" + token.info = "auto" + + token = state.push("text", "", 0) + token.content = state.md.normalizeLinkText(url) + + token = state.push("link_close", "a", -1) + token.markup = "linkify" + token.info = "auto" + + state.pos += len(url) - len(proto) + return True diff --git a/local_packages/markdown_it/rules_inline/newline.py b/local_packages/markdown_it/rules_inline/newline.py new file mode 100644 index 0000000..d05ee6d --- /dev/null +++ b/local_packages/markdown_it/rules_inline/newline.py @@ -0,0 +1,44 @@ +"""Proceess '\n'.""" + +from ..common.utils import charStrAt, isStrSpace +from .state_inline import StateInline + + +def newline(state: StateInline, silent: bool) -> bool: + pos = state.pos + + if state.src[pos] != "\n": + return False + + pmax = len(state.pending) - 1 + maximum = state.posMax + + # ' \n' -> hardbreak + # Lookup in pending chars is bad practice! Don't copy to other rules! + # Pending string is stored in concat mode, indexed lookups will cause + # conversion to flat mode. + if not silent: + if pmax >= 0 and charStrAt(state.pending, pmax) == " ": + if pmax >= 1 and charStrAt(state.pending, pmax - 1) == " ": + # Find whitespaces tail of pending chars. + ws = pmax - 1 + while ws >= 1 and charStrAt(state.pending, ws - 1) == " ": + ws -= 1 + state.pending = state.pending[:ws] + + state.push("hardbreak", "br", 0) + else: + state.pending = state.pending[:-1] + state.push("softbreak", "br", 0) + + else: + state.push("softbreak", "br", 0) + + pos += 1 + + # skip heading spaces for next line + while pos < maximum and isStrSpace(state.src[pos]): + pos += 1 + + state.pos = pos + return True diff --git a/local_packages/markdown_it/rules_inline/state_inline.py b/local_packages/markdown_it/rules_inline/state_inline.py new file mode 100644 index 0000000..50dc412 --- /dev/null +++ b/local_packages/markdown_it/rules_inline/state_inline.py @@ -0,0 +1,165 @@ +from __future__ import annotations + +from collections import namedtuple +from dataclasses import dataclass +from typing import TYPE_CHECKING, Any, Literal + +from ..common.utils import isMdAsciiPunct, isPunctChar, isWhiteSpace +from ..ruler import StateBase +from ..token import Token +from ..utils import EnvType + +if TYPE_CHECKING: + from markdown_it import MarkdownIt + + +@dataclass(slots=True) +class Delimiter: + # Char code of the starting marker (number). + marker: int + + # Total length of these series of delimiters. + length: int + + # A position of the token this delimiter corresponds to. + token: int + + # If this delimiter is matched as a valid opener, `end` will be + # equal to its position, otherwise it's `-1`. + end: int + + # Boolean flags that determine if this delimiter could open or close + # an emphasis. + open: bool + close: bool + + level: bool | None = None + + +Scanned = namedtuple("Scanned", ["can_open", "can_close", "length"]) + + +class StateInline(StateBase): + def __init__( + self, src: str, md: MarkdownIt, env: EnvType, outTokens: list[Token] + ) -> None: + self.src = src + self.env = env + self.md = md + self.tokens = outTokens + self.tokens_meta: list[dict[str, Any] | None] = [None] * len(outTokens) + + self.pos = 0 + self.posMax = len(self.src) + self.level = 0 + self.pending = "" + self.pendingLevel = 0 + + # Stores { start: end } pairs. Useful for backtrack + # optimization of pairs parse (emphasis, strikes). + self.cache: dict[int, int] = {} + + # List of emphasis-like delimiters for current tag + self.delimiters: list[Delimiter] = [] + + # Stack of delimiter lists for upper level tags + self._prev_delimiters: list[list[Delimiter]] = [] + + # backticklength => last seen position + self.backticks: dict[int, int] = {} + self.backticksScanned = False + + # Counter used to disable inline linkify-it execution + # inside and markdown links + self.linkLevel = 0 + + def __repr__(self) -> str: + return ( + f"{self.__class__.__name__}" + f"(pos=[{self.pos} of {self.posMax}], token={len(self.tokens)})" + ) + + def pushPending(self) -> Token: + token = Token("text", "", 0) + token.content = self.pending + token.level = self.pendingLevel + self.tokens.append(token) + self.pending = "" + return token + + def push(self, ttype: str, tag: str, nesting: Literal[-1, 0, 1]) -> Token: + """Push new token to "stream". + If pending text exists - flush it as text token + """ + if self.pending: + self.pushPending() + + token = Token(ttype, tag, nesting) + token_meta = None + + if nesting < 0: + # closing tag + self.level -= 1 + self.delimiters = self._prev_delimiters.pop() + + token.level = self.level + + if nesting > 0: + # opening tag + self.level += 1 + self._prev_delimiters.append(self.delimiters) + self.delimiters = [] + token_meta = {"delimiters": self.delimiters} + + self.pendingLevel = self.level + self.tokens.append(token) + self.tokens_meta.append(token_meta) + return token + + def scanDelims(self, start: int, canSplitWord: bool) -> Scanned: + """ + Scan a sequence of emphasis-like markers, and determine whether + it can start an emphasis sequence or end an emphasis sequence. + + - start - position to scan from (it should point at a valid marker); + - canSplitWord - determine if these markers can be found inside a word + + """ + pos = start + maximum = self.posMax + marker = self.src[start] + + # treat beginning of the line as a whitespace + lastChar = self.src[start - 1] if start > 0 else " " + + while pos < maximum and self.src[pos] == marker: + pos += 1 + + count = pos - start + + # treat end of the line as a whitespace + nextChar = self.src[pos] if pos < maximum else " " + + isLastPunctChar = isMdAsciiPunct(ord(lastChar)) or isPunctChar(lastChar) + isNextPunctChar = isMdAsciiPunct(ord(nextChar)) or isPunctChar(nextChar) + + isLastWhiteSpace = isWhiteSpace(ord(lastChar)) + isNextWhiteSpace = isWhiteSpace(ord(nextChar)) + + left_flanking = not ( + isNextWhiteSpace + or (isNextPunctChar and not (isLastWhiteSpace or isLastPunctChar)) + ) + right_flanking = not ( + isLastWhiteSpace + or (isLastPunctChar and not (isNextWhiteSpace or isNextPunctChar)) + ) + + can_open = left_flanking and ( + canSplitWord or (not right_flanking) or isLastPunctChar + ) + can_close = right_flanking and ( + canSplitWord or (not left_flanking) or isNextPunctChar + ) + + return Scanned(can_open, can_close, count) diff --git a/local_packages/markdown_it/rules_inline/strikethrough.py b/local_packages/markdown_it/rules_inline/strikethrough.py new file mode 100644 index 0000000..ec81628 --- /dev/null +++ b/local_packages/markdown_it/rules_inline/strikethrough.py @@ -0,0 +1,127 @@ +# ~~strike through~~ +from __future__ import annotations + +from .state_inline import Delimiter, StateInline + + +def tokenize(state: StateInline, silent: bool) -> bool: + """Insert each marker as a separate text token, and add it to delimiter list""" + start = state.pos + ch = state.src[start] + + if silent: + return False + + if ch != "~": + return False + + scanned = state.scanDelims(state.pos, True) + length = scanned.length + + if length < 2: + return False + + if length % 2: + token = state.push("text", "", 0) + token.content = ch + length -= 1 + + i = 0 + while i < length: + token = state.push("text", "", 0) + token.content = ch + ch + state.delimiters.append( + Delimiter( + marker=ord(ch), + length=0, # disable "rule of 3" length checks meant for emphasis + token=len(state.tokens) - 1, + end=-1, + open=scanned.can_open, + close=scanned.can_close, + ) + ) + + i += 2 + + state.pos += scanned.length + + return True + + +def _postProcess(state: StateInline, delimiters: list[Delimiter]) -> None: + loneMarkers = [] + maximum = len(delimiters) + + i = 0 + while i < maximum: + startDelim = delimiters[i] + + if startDelim.marker != 0x7E: # /* ~ */ + i += 1 + continue + + if startDelim.end == -1: + i += 1 + continue + + endDelim = delimiters[startDelim.end] + + token = state.tokens[startDelim.token] + token.type = "s_open" + token.tag = "s" + token.nesting = 1 + token.markup = "~~" + token.content = "" + + token = state.tokens[endDelim.token] + token.type = "s_close" + token.tag = "s" + token.nesting = -1 + token.markup = "~~" + token.content = "" + + if ( + state.tokens[endDelim.token - 1].type == "text" + and state.tokens[endDelim.token - 1].content == "~" + ): + loneMarkers.append(endDelim.token - 1) + + i += 1 + + # If a marker sequence has an odd number of characters, it's split + # like this: `~~~~~` -> `~` + `~~` + `~~`, leaving one marker at the + # start of the sequence. + # + # So, we have to move all those markers after subsequent s_close tags. + # + while loneMarkers: + i = loneMarkers.pop() + j = i + 1 + + while (j < len(state.tokens)) and (state.tokens[j].type == "s_close"): + j += 1 + + j -= 1 + + if i != j: + token = state.tokens[j] + state.tokens[j] = state.tokens[i] + state.tokens[i] = token + + +def postProcess(state: StateInline) -> None: + """Walk through delimiter list and replace text tokens with tags.""" + tokens_meta = state.tokens_meta + maximum = len(state.tokens_meta) + _postProcess(state, state.delimiters) + + curr = 0 + while curr < maximum: + try: + curr_meta = tokens_meta[curr] + except IndexError: + pass + else: + if curr_meta and "delimiters" in curr_meta: + _postProcess(state, curr_meta["delimiters"]) + curr += 1 diff --git a/local_packages/markdown_it/rules_inline/text.py b/local_packages/markdown_it/rules_inline/text.py new file mode 100644 index 0000000..18b2fcc --- /dev/null +++ b/local_packages/markdown_it/rules_inline/text.py @@ -0,0 +1,62 @@ +import functools +import re + +# Skip text characters for text token, place those to pending buffer +# and increment current pos +from .state_inline import StateInline + +# Rule to skip pure text +# '{}$%@~+=:' reserved for extensions + +# !!!! Don't confuse with "Markdown ASCII Punctuation" chars +# http://spec.commonmark.org/0.15/#ascii-punctuation-character + + +_TerminatorChars = { + "\n", + "!", + "#", + "$", + "%", + "&", + "*", + "+", + "-", + ":", + "<", + "=", + ">", + "@", + "[", + "\\", + "]", + "^", + "_", + "`", + "{", + "}", + "~", +} + + +@functools.cache +def _terminator_char_regex() -> re.Pattern[str]: + return re.compile("[" + re.escape("".join(_TerminatorChars)) + "]") + + +def text(state: StateInline, silent: bool) -> bool: + pos = state.pos + posMax = state.posMax + + terminator_char = _terminator_char_regex().search(state.src, pos) + pos = terminator_char.start() if terminator_char else posMax + + if pos == state.pos: + return False + + if not silent: + state.pending += state.src[state.pos : pos] + + state.pos = pos + + return True diff --git a/local_packages/markdown_it/token.py b/local_packages/markdown_it/token.py new file mode 100644 index 0000000..d6d0b45 --- /dev/null +++ b/local_packages/markdown_it/token.py @@ -0,0 +1,178 @@ +from __future__ import annotations + +from collections.abc import Callable, MutableMapping +import dataclasses as dc +from typing import Any, Literal +import warnings + + +def convert_attrs(value: Any) -> Any: + """Convert Token.attrs set as ``None`` or ``[[key, value], ...]`` to a dict. + + This improves compatibility with upstream markdown-it. + """ + if not value: + return {} + if isinstance(value, list): + return dict(value) + return value + + +@dc.dataclass(slots=True) +class Token: + type: str + """Type of the token (string, e.g. "paragraph_open")""" + + tag: str + """HTML tag name, e.g. 'p'""" + + nesting: Literal[-1, 0, 1] + """Level change (number in {-1, 0, 1} set), where: + - `1` means the tag is opening + - `0` means the tag is self-closing + - `-1` means the tag is closing + """ + + attrs: dict[str, str | int | float] = dc.field(default_factory=dict) + """HTML attributes. + Note this differs from the upstream "list of lists" format, + although than an instance can still be initialised with this format. + """ + + map: list[int] | None = None + """Source map info. Format: `[ line_begin, line_end ]`""" + + level: int = 0 + """Nesting level, the same as `state.level`""" + + children: list[Token] | None = None + """Array of child nodes (inline and img tokens).""" + + content: str = "" + """Inner content, in the case of a self-closing tag (code, html, fence, etc.),""" + + markup: str = "" + """'*' or '_' for emphasis, fence string for fence, etc.""" + + info: str = "" + """Additional information: + - Info string for "fence" tokens + - The value "auto" for autolink "link_open" and "link_close" tokens + - The string value of the item marker for ordered-list "list_item_open" tokens + """ + + meta: dict[Any, Any] = dc.field(default_factory=dict) + """A place for plugins to store any arbitrary data""" + + block: bool = False + """True for block-level tokens, false for inline tokens. + Used in renderer to calculate line breaks + """ + + hidden: bool = False + """If true, ignore this element when rendering. + Used for tight lists to hide paragraphs. + """ + + def __post_init__(self) -> None: + self.attrs = convert_attrs(self.attrs) + + def attrIndex(self, name: str) -> int: + warnings.warn( # noqa: B028 + "Token.attrIndex should not be used, since Token.attrs is a dictionary", + UserWarning, + ) + if name not in self.attrs: + return -1 + return list(self.attrs.keys()).index(name) + + def attrItems(self) -> list[tuple[str, str | int | float]]: + """Get (key, value) list of attrs.""" + return list(self.attrs.items()) + + def attrPush(self, attrData: tuple[str, str | int | float]) -> None: + """Add `[ name, value ]` attribute to list. Init attrs if necessary.""" + name, value = attrData + self.attrSet(name, value) + + def attrSet(self, name: str, value: str | int | float) -> None: + """Set `name` attribute to `value`. Override old value if exists.""" + self.attrs[name] = value + + def attrGet(self, name: str) -> None | str | int | float: + """Get the value of attribute `name`, or null if it does not exist.""" + return self.attrs.get(name, None) + + def attrJoin(self, name: str, value: str) -> None: + """Join value to existing attribute via space. + Or create new attribute if not exists. + Useful to operate with token classes. + """ + if name in self.attrs: + current = self.attrs[name] + if not isinstance(current, str): + raise TypeError( + f"existing attr 'name' is not a str: {self.attrs[name]}" + ) + self.attrs[name] = f"{current} {value}" + else: + self.attrs[name] = value + + def copy(self, **changes: Any) -> Token: + """Return a shallow copy of the instance.""" + return dc.replace(self, **changes) + + def as_dict( + self, + *, + children: bool = True, + as_upstream: bool = True, + meta_serializer: Callable[[dict[Any, Any]], Any] | None = None, + filter: Callable[[str, Any], bool] | None = None, + dict_factory: Callable[..., MutableMapping[str, Any]] = dict, + ) -> MutableMapping[str, Any]: + """Return the token as a dictionary. + + :param children: Also convert children to dicts + :param as_upstream: Ensure the output dictionary is equal to that created by markdown-it + For example, attrs are converted to null or lists + :param meta_serializer: hook for serializing ``Token.meta`` + :param filter: A callable whose return code determines whether an + attribute or element is included (``True``) or dropped (``False``). + Is called with the (key, value) pair. + :param dict_factory: A callable to produce dictionaries from. + For example, to produce ordered dictionaries instead of normal Python + dictionaries, pass in ``collections.OrderedDict``. + + """ + mapping = dict_factory((f.name, getattr(self, f.name)) for f in dc.fields(self)) + if filter: + mapping = dict_factory((k, v) for k, v in mapping.items() if filter(k, v)) + if as_upstream and "attrs" in mapping: + mapping["attrs"] = ( + None + if not mapping["attrs"] + else [[k, v] for k, v in mapping["attrs"].items()] + ) + if meta_serializer and "meta" in mapping: + mapping["meta"] = meta_serializer(mapping["meta"]) + if children and mapping.get("children", None): + mapping["children"] = [ + child.as_dict( + children=children, + filter=filter, + dict_factory=dict_factory, + as_upstream=as_upstream, + meta_serializer=meta_serializer, + ) + for child in mapping["children"] + ] + return mapping + + @classmethod + def from_dict(cls, dct: MutableMapping[str, Any]) -> Token: + """Convert a dict to a Token.""" + token = cls(**dct) + if token.children: + token.children = [cls.from_dict(c) for c in token.children] # type: ignore[arg-type] + return token diff --git a/local_packages/markdown_it/tree.py b/local_packages/markdown_it/tree.py new file mode 100644 index 0000000..5369157 --- /dev/null +++ b/local_packages/markdown_it/tree.py @@ -0,0 +1,333 @@ +"""A tree representation of a linear markdown-it token stream. + +This module is not part of upstream JavaScript markdown-it. +""" + +from __future__ import annotations + +from collections.abc import Generator, Sequence +import textwrap +from typing import Any, NamedTuple, TypeVar, overload + +from .token import Token + + +class _NesterTokens(NamedTuple): + opening: Token + closing: Token + + +_NodeType = TypeVar("_NodeType", bound="SyntaxTreeNode") + + +class SyntaxTreeNode: + """A Markdown syntax tree node. + + A class that can be used to construct a tree representation of a linear + `markdown-it-py` token stream. + + Each node in the tree represents either: + - root of the Markdown document + - a single unnested `Token` + - a `Token` "_open" and "_close" token pair, and the tokens nested in + between + """ + + def __init__( + self, tokens: Sequence[Token] = (), *, create_root: bool = True + ) -> None: + """Initialize a `SyntaxTreeNode` from a token stream. + + If `create_root` is True, create a root node for the document. + """ + # Only nodes representing an unnested token have self.token + self.token: Token | None = None + + # Only containers have nester tokens + self.nester_tokens: _NesterTokens | None = None + + # Root node does not have self.parent + self._parent: Any = None + + # Empty list unless a non-empty container, or unnested token that has + # children (i.e. inline or img) + self._children: list[Any] = [] + + if create_root: + self._set_children_from_tokens(tokens) + return + + if not tokens: + raise ValueError( + "Can only create root from empty token sequence." + " Set `create_root=True`." + ) + elif len(tokens) == 1: + inline_token = tokens[0] + if inline_token.nesting: + raise ValueError( + "Unequal nesting level at the start and end of token stream." + ) + self.token = inline_token + if inline_token.children: + self._set_children_from_tokens(inline_token.children) + else: + self.nester_tokens = _NesterTokens(tokens[0], tokens[-1]) + self._set_children_from_tokens(tokens[1:-1]) + + def __repr__(self) -> str: + return f"{type(self).__name__}({self.type})" + + @overload + def __getitem__(self: _NodeType, item: int) -> _NodeType: ... + + @overload + def __getitem__(self: _NodeType, item: slice) -> list[_NodeType]: ... + + def __getitem__(self: _NodeType, item: int | slice) -> _NodeType | list[_NodeType]: + return self.children[item] + + def to_tokens(self: _NodeType) -> list[Token]: + """Recover the linear token stream.""" + + def recursive_collect_tokens(node: _NodeType, token_list: list[Token]) -> None: + if node.type == "root": + for child in node.children: + recursive_collect_tokens(child, token_list) + elif node.token: + token_list.append(node.token) + else: + assert node.nester_tokens + token_list.append(node.nester_tokens.opening) + for child in node.children: + recursive_collect_tokens(child, token_list) + token_list.append(node.nester_tokens.closing) + + tokens: list[Token] = [] + recursive_collect_tokens(self, tokens) + return tokens + + @property + def children(self: _NodeType) -> list[_NodeType]: + return self._children + + @children.setter + def children(self: _NodeType, value: list[_NodeType]) -> None: + self._children = value + + @property + def parent(self: _NodeType) -> _NodeType | None: + return self._parent # type: ignore + + @parent.setter + def parent(self: _NodeType, value: _NodeType | None) -> None: + self._parent = value + + @property + def is_root(self) -> bool: + """Is the node a special root node?""" + return not (self.token or self.nester_tokens) + + @property + def is_nested(self) -> bool: + """Is this node nested?. + + Returns `True` if the node represents a `Token` pair and tokens in the + sequence between them, where `Token.nesting` of the first `Token` in + the pair is 1 and nesting of the other `Token` is -1. + """ + return bool(self.nester_tokens) + + @property + def siblings(self: _NodeType) -> Sequence[_NodeType]: + """Get siblings of the node. + + Gets the whole group of siblings, including self. + """ + if not self.parent: + return [self] + return self.parent.children + + @property + def type(self) -> str: + """Get a string type of the represented syntax. + + - "root" for root nodes + - `Token.type` if the node represents an unnested token + - `Token.type` of the opening token, with "_open" suffix stripped, if + the node represents a nester token pair + """ + if self.is_root: + return "root" + if self.token: + return self.token.type + assert self.nester_tokens + return self.nester_tokens.opening.type.removesuffix("_open") + + @property + def next_sibling(self: _NodeType) -> _NodeType | None: + """Get the next node in the sequence of siblings. + + Returns `None` if this is the last sibling. + """ + self_index = self.siblings.index(self) + if self_index + 1 < len(self.siblings): + return self.siblings[self_index + 1] + return None + + @property + def previous_sibling(self: _NodeType) -> _NodeType | None: + """Get the previous node in the sequence of siblings. + + Returns `None` if this is the first sibling. + """ + self_index = self.siblings.index(self) + if self_index - 1 >= 0: + return self.siblings[self_index - 1] + return None + + def _add_child( + self, + tokens: Sequence[Token], + ) -> None: + """Make a child node for `self`.""" + child = type(self)(tokens, create_root=False) + child.parent = self + self.children.append(child) + + def _set_children_from_tokens(self, tokens: Sequence[Token]) -> None: + """Convert the token stream to a tree structure and set the resulting + nodes as children of `self`.""" + reversed_tokens = list(reversed(tokens)) + while reversed_tokens: + token = reversed_tokens.pop() + + if not token.nesting: + self._add_child([token]) + continue + if token.nesting != 1: + raise ValueError("Invalid token nesting") + + nested_tokens = [token] + nesting = 1 + while reversed_tokens and nesting: + token = reversed_tokens.pop() + nested_tokens.append(token) + nesting += token.nesting + if nesting: + raise ValueError(f"unclosed tokens starting {nested_tokens[0]}") + + self._add_child(nested_tokens) + + def pretty( + self, *, indent: int = 2, show_text: bool = False, _current: int = 0 + ) -> str: + """Create an XML style string of the tree.""" + prefix = " " * _current + text = prefix + f"<{self.type}" + if not self.is_root and self.attrs: + text += " " + " ".join(f"{k}={v!r}" for k, v in self.attrs.items()) + text += ">" + if ( + show_text + and not self.is_root + and self.type in ("text", "text_special") + and self.content + ): + text += "\n" + textwrap.indent(self.content, prefix + " " * indent) + for child in self.children: + text += "\n" + child.pretty( + indent=indent, show_text=show_text, _current=_current + indent + ) + return text + + def walk( + self: _NodeType, *, include_self: bool = True + ) -> Generator[_NodeType, None, None]: + """Recursively yield all descendant nodes in the tree starting at self. + + The order mimics the order of the underlying linear token + stream (i.e. depth first). + """ + if include_self: + yield self + for child in self.children: + yield from child.walk(include_self=True) + + # NOTE: + # The values of the properties defined below directly map to properties + # of the underlying `Token`s. A root node does not translate to a `Token` + # object, so calling these property getters on a root node will raise an + # `AttributeError`. + # + # There is no mapping for `Token.nesting` because the `is_nested` property + # provides that data, and can be called on any node type, including root. + + def _attribute_token(self) -> Token: + """Return the `Token` that is used as the data source for the + properties defined below.""" + if self.token: + return self.token + if self.nester_tokens: + return self.nester_tokens.opening + raise AttributeError("Root node does not have the accessed attribute") + + @property + def tag(self) -> str: + """html tag name, e.g. \"p\" """ + return self._attribute_token().tag + + @property + def attrs(self) -> dict[str, str | int | float]: + """Html attributes.""" + return self._attribute_token().attrs + + def attrGet(self, name: str) -> None | str | int | float: + """Get the value of attribute `name`, or null if it does not exist.""" + return self._attribute_token().attrGet(name) + + @property + def map(self) -> tuple[int, int] | None: + """Source map info. Format: `tuple[ line_begin, line_end ]`""" + map_ = self._attribute_token().map + if map_: + # Type ignore because `Token`s attribute types are not perfect + return tuple(map_) # type: ignore + return None + + @property + def level(self) -> int: + """nesting level, the same as `state.level`""" + return self._attribute_token().level + + @property + def content(self) -> str: + """In a case of self-closing tag (code, html, fence, etc.), it + has contents of this tag.""" + return self._attribute_token().content + + @property + def markup(self) -> str: + """'*' or '_' for emphasis, fence string for fence, etc.""" + return self._attribute_token().markup + + @property + def info(self) -> str: + """fence infostring""" + return self._attribute_token().info + + @property + def meta(self) -> dict[Any, Any]: + """A place for plugins to store an arbitrary data.""" + return self._attribute_token().meta + + @property + def block(self) -> bool: + """True for block-level tokens, false for inline tokens.""" + return self._attribute_token().block + + @property + def hidden(self) -> bool: + """If it's true, ignore this element when rendering. + Used for tight lists to hide paragraphs.""" + return self._attribute_token().hidden diff --git a/local_packages/markdown_it/utils.py b/local_packages/markdown_it/utils.py new file mode 100644 index 0000000..2571a15 --- /dev/null +++ b/local_packages/markdown_it/utils.py @@ -0,0 +1,186 @@ +from __future__ import annotations + +from collections.abc import Callable, Iterable, MutableMapping +from collections.abc import MutableMapping as MutableMappingABC +from pathlib import Path +from typing import TYPE_CHECKING, Any, TypedDict, cast + +if TYPE_CHECKING: + from typing_extensions import NotRequired + + +EnvType = MutableMapping[str, Any] # note: could use TypeAlias in python 3.10 +"""Type for the environment sandbox used in parsing and rendering, +which stores mutable variables for use by plugins and rules. +""" + + +class OptionsType(TypedDict): + """Options for parsing.""" + + maxNesting: int + """Internal protection, recursion limit.""" + html: bool + """Enable HTML tags in source.""" + linkify: bool + """Enable autoconversion of URL-like texts to links.""" + typographer: bool + """Enable smartquotes and replacements.""" + quotes: str + """Quote characters.""" + xhtmlOut: bool + """Use '/' to close single tags (
).""" + breaks: bool + """Convert newlines in paragraphs into
.""" + langPrefix: str + """CSS language prefix for fenced blocks.""" + highlight: Callable[[str, str, str], str] | None + """Highlighter function: (content, lang, attrs) -> str.""" + store_labels: NotRequired[bool] + """Store link label in link/image token's metadata (under Token.meta['label']). + + This is a Python only option, and is intended for the use of round-trip parsing. + """ + + +class PresetType(TypedDict): + """Preset configuration for markdown-it.""" + + options: OptionsType + """Options for parsing.""" + components: MutableMapping[str, MutableMapping[str, list[str]]] + """Components for parsing and rendering.""" + + +class OptionsDict(MutableMappingABC): # type: ignore + """A dictionary, with attribute access to core markdownit configuration options.""" + + # Note: ideally we would probably just remove attribute access entirely, + # but we keep it for backwards compatibility. + + def __init__(self, options: OptionsType) -> None: + self._options = cast(OptionsType, dict(options)) + + def __getitem__(self, key: str) -> Any: + return self._options[key] # type: ignore[literal-required] + + def __setitem__(self, key: str, value: Any) -> None: + self._options[key] = value # type: ignore[literal-required] + + def __delitem__(self, key: str) -> None: + del self._options[key] # type: ignore + + def __iter__(self) -> Iterable[str]: # type: ignore + return iter(self._options) + + def __len__(self) -> int: + return len(self._options) + + def __repr__(self) -> str: + return repr(self._options) + + def __str__(self) -> str: + return str(self._options) + + @property + def maxNesting(self) -> int: + """Internal protection, recursion limit.""" + return self._options["maxNesting"] + + @maxNesting.setter + def maxNesting(self, value: int) -> None: + self._options["maxNesting"] = value + + @property + def html(self) -> bool: + """Enable HTML tags in source.""" + return self._options["html"] + + @html.setter + def html(self, value: bool) -> None: + self._options["html"] = value + + @property + def linkify(self) -> bool: + """Enable autoconversion of URL-like texts to links.""" + return self._options["linkify"] + + @linkify.setter + def linkify(self, value: bool) -> None: + self._options["linkify"] = value + + @property + def typographer(self) -> bool: + """Enable smartquotes and replacements.""" + return self._options["typographer"] + + @typographer.setter + def typographer(self, value: bool) -> None: + self._options["typographer"] = value + + @property + def quotes(self) -> str: + """Quote characters.""" + return self._options["quotes"] + + @quotes.setter + def quotes(self, value: str) -> None: + self._options["quotes"] = value + + @property + def xhtmlOut(self) -> bool: + """Use '/' to close single tags (
).""" + return self._options["xhtmlOut"] + + @xhtmlOut.setter + def xhtmlOut(self, value: bool) -> None: + self._options["xhtmlOut"] = value + + @property + def breaks(self) -> bool: + """Convert newlines in paragraphs into
.""" + return self._options["breaks"] + + @breaks.setter + def breaks(self, value: bool) -> None: + self._options["breaks"] = value + + @property + def langPrefix(self) -> str: + """CSS language prefix for fenced blocks.""" + return self._options["langPrefix"] + + @langPrefix.setter + def langPrefix(self, value: str) -> None: + self._options["langPrefix"] = value + + @property + def highlight(self) -> Callable[[str, str, str], str] | None: + """Highlighter function: (content, langName, langAttrs) -> escaped HTML.""" + return self._options["highlight"] + + @highlight.setter + def highlight(self, value: Callable[[str, str, str], str] | None) -> None: + self._options["highlight"] = value + + +def read_fixture_file(path: str | Path) -> list[list[Any]]: + text = Path(path).read_text(encoding="utf-8") + tests = [] + section = 0 + last_pos = 0 + lines = text.splitlines(keepends=True) + for i in range(len(lines)): + if lines[i].rstrip() == ".": + if section == 0: + tests.append([i, lines[i - 1].strip()]) + section = 1 + elif section == 1: + tests[-1].append("".join(lines[last_pos + 1 : i])) + section = 2 + elif section == 2: + tests[-1].append("".join(lines[last_pos + 1 : i])) + section = 0 + + last_pos = i + return tests diff --git a/local_packages/markdown_it_py-4.0.0.dist-info/INSTALLER b/local_packages/markdown_it_py-4.0.0.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/local_packages/markdown_it_py-4.0.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/local_packages/markdown_it_py-4.0.0.dist-info/METADATA b/local_packages/markdown_it_py-4.0.0.dist-info/METADATA new file mode 100644 index 0000000..0f2b466 --- /dev/null +++ b/local_packages/markdown_it_py-4.0.0.dist-info/METADATA @@ -0,0 +1,219 @@ +Metadata-Version: 2.4 +Name: markdown-it-py +Version: 4.0.0 +Summary: Python port of markdown-it. Markdown parsing, done right! +Keywords: markdown,lexer,parser,commonmark,markdown-it +Author-email: Chris Sewell +Requires-Python: >=3.10 +Description-Content-Type: text/markdown +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3.13 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Topic :: Text Processing :: Markup +License-File: LICENSE +License-File: LICENSE.markdown-it +Requires-Dist: mdurl~=0.1 +Requires-Dist: psutil ; extra == "benchmarking" +Requires-Dist: pytest ; extra == "benchmarking" +Requires-Dist: pytest-benchmark ; extra == "benchmarking" +Requires-Dist: commonmark~=0.9 ; extra == "compare" +Requires-Dist: markdown~=3.4 ; extra == "compare" +Requires-Dist: mistletoe~=1.0 ; extra == "compare" +Requires-Dist: mistune~=3.0 ; extra == "compare" +Requires-Dist: panflute~=2.3 ; extra == "compare" +Requires-Dist: markdown-it-pyrs ; extra == "compare" +Requires-Dist: linkify-it-py>=1,<3 ; extra == "linkify" +Requires-Dist: mdit-py-plugins>=0.5.0 ; extra == "plugins" +Requires-Dist: gprof2dot ; extra == "profiling" +Requires-Dist: mdit-py-plugins>=0.5.0 ; extra == "rtd" +Requires-Dist: myst-parser ; extra == "rtd" +Requires-Dist: pyyaml ; extra == "rtd" +Requires-Dist: sphinx ; extra == "rtd" +Requires-Dist: sphinx-copybutton ; extra == "rtd" +Requires-Dist: sphinx-design ; extra == "rtd" +Requires-Dist: sphinx-book-theme~=1.0 ; extra == "rtd" +Requires-Dist: jupyter_sphinx ; extra == "rtd" +Requires-Dist: ipykernel ; extra == "rtd" +Requires-Dist: coverage ; extra == "testing" +Requires-Dist: pytest ; extra == "testing" +Requires-Dist: pytest-cov ; extra == "testing" +Requires-Dist: pytest-regressions ; extra == "testing" +Requires-Dist: requests ; extra == "testing" +Project-URL: Documentation, https://markdown-it-py.readthedocs.io +Project-URL: Homepage, https://github.com/executablebooks/markdown-it-py +Provides-Extra: benchmarking +Provides-Extra: compare +Provides-Extra: linkify +Provides-Extra: plugins +Provides-Extra: profiling +Provides-Extra: rtd +Provides-Extra: testing + +# markdown-it-py + +[![Github-CI][github-ci]][github-link] +[![Coverage Status][codecov-badge]][codecov-link] +[![PyPI][pypi-badge]][pypi-link] +[![Conda][conda-badge]][conda-link] +[![PyPI - Downloads][install-badge]][install-link] + +

+ markdown-it-py icon +

+ +> Markdown parser done right. + +- Follows the __[CommonMark spec](http://spec.commonmark.org/)__ for baseline parsing +- Configurable syntax: you can add new rules and even replace existing ones. +- Pluggable: Adds syntax extensions to extend the parser (see the [plugin list][md-plugins]). +- High speed (see our [benchmarking tests][md-performance]) +- Easy to configure for [security][md-security] +- Member of [Google's Assured Open Source Software](https://cloud.google.com/assured-open-source-software/docs/supported-packages) + +This is a Python port of [markdown-it], and some of its associated plugins. +For more details see: . + +For details on [markdown-it] itself, see: + +- The __[Live demo](https://markdown-it.github.io)__ +- [The markdown-it README][markdown-it-readme] + +**See also:** [markdown-it-pyrs](https://github.com/chrisjsewell/markdown-it-pyrs) for an experimental Rust binding, +for even more speed! + +## Installation + +### PIP + +```bash +pip install markdown-it-py[plugins] +``` + +or with extras + +```bash +pip install markdown-it-py[linkify,plugins] +``` + +### Conda + +```bash +conda install -c conda-forge markdown-it-py +``` + +or with extras + +```bash +conda install -c conda-forge markdown-it-py linkify-it-py mdit-py-plugins +``` + +## Usage + +### Python API Usage + +Render markdown to HTML with markdown-it-py and a custom configuration +with and without plugins and features: + +```python +from markdown_it import MarkdownIt +from mdit_py_plugins.front_matter import front_matter_plugin +from mdit_py_plugins.footnote import footnote_plugin + +md = ( + MarkdownIt('commonmark', {'breaks':True,'html':True}) + .use(front_matter_plugin) + .use(footnote_plugin) + .enable('table') +) +text = (""" +--- +a: 1 +--- + +a | b +- | - +1 | 2 + +A footnote [^1] + +[^1]: some details +""") +tokens = md.parse(text) +html_text = md.render(text) + +## To export the html to a file, uncomment the lines below: +# from pathlib import Path +# Path("output.html").write_text(html_text) +``` + +### Command-line Usage + +Render markdown to HTML with markdown-it-py from the +command-line: + +```console +usage: markdown-it [-h] [-v] [filenames [filenames ...]] + +Parse one or more markdown files, convert each to HTML, and print to stdout + +positional arguments: + filenames specify an optional list of files to convert + +optional arguments: + -h, --help show this help message and exit + -v, --version show program's version number and exit + +Interactive: + + $ markdown-it + markdown-it-py [version 0.0.0] (interactive) + Type Ctrl-D to complete input, or Ctrl-C to exit. + >>> # Example + ... > markdown *input* + ... +

Example

+
+

markdown input

+
+ +Batch: + + $ markdown-it README.md README.footer.md > index.html + +``` + +## References / Thanks + +Big thanks to the authors of [markdown-it]: + +- Alex Kocharin [github/rlidwka](https://github.com/rlidwka) +- Vitaly Puzrin [github/puzrin](https://github.com/puzrin) + +Also [John MacFarlane](https://github.com/jgm) for his work on the CommonMark spec and reference implementations. + +[github-ci]: https://github.com/executablebooks/markdown-it-py/actions/workflows/tests.yml/badge.svg?branch=master +[github-link]: https://github.com/executablebooks/markdown-it-py +[pypi-badge]: https://img.shields.io/pypi/v/markdown-it-py.svg +[pypi-link]: https://pypi.org/project/markdown-it-py +[conda-badge]: https://anaconda.org/conda-forge/markdown-it-py/badges/version.svg +[conda-link]: https://anaconda.org/conda-forge/markdown-it-py +[codecov-badge]: https://codecov.io/gh/executablebooks/markdown-it-py/branch/master/graph/badge.svg +[codecov-link]: https://codecov.io/gh/executablebooks/markdown-it-py +[install-badge]: https://img.shields.io/pypi/dw/markdown-it-py?label=pypi%20installs +[install-link]: https://pypistats.org/packages/markdown-it-py + +[CommonMark spec]: http://spec.commonmark.org/ +[markdown-it]: https://github.com/markdown-it/markdown-it +[markdown-it-readme]: https://github.com/markdown-it/markdown-it/blob/master/README.md +[md-security]: https://markdown-it-py.readthedocs.io/en/latest/security.html +[md-performance]: https://markdown-it-py.readthedocs.io/en/latest/performance.html +[md-plugins]: https://markdown-it-py.readthedocs.io/en/latest/plugins.html + diff --git a/local_packages/markdown_it_py-4.0.0.dist-info/RECORD b/local_packages/markdown_it_py-4.0.0.dist-info/RECORD new file mode 100644 index 0000000..d2e4bfc --- /dev/null +++ b/local_packages/markdown_it_py-4.0.0.dist-info/RECORD @@ -0,0 +1,142 @@ +../../bin/markdown-it.exe,sha256=yuFxtGh4pZ_zpS2d31_X5pR3yix_iTsKcqnRkyuqZ8o,108393 +markdown_it/__init__.py,sha256=R7fMvDxageYJ4Q6doBcimogy1ctcV1eBuCFu5Pr8bbA,114 +markdown_it/__pycache__/__init__.cpython-312.pyc,, +markdown_it/__pycache__/_compat.cpython-312.pyc,, +markdown_it/__pycache__/_punycode.cpython-312.pyc,, +markdown_it/__pycache__/main.cpython-312.pyc,, +markdown_it/__pycache__/parser_block.cpython-312.pyc,, +markdown_it/__pycache__/parser_core.cpython-312.pyc,, +markdown_it/__pycache__/parser_inline.cpython-312.pyc,, +markdown_it/__pycache__/renderer.cpython-312.pyc,, +markdown_it/__pycache__/ruler.cpython-312.pyc,, +markdown_it/__pycache__/token.cpython-312.pyc,, +markdown_it/__pycache__/tree.cpython-312.pyc,, +markdown_it/__pycache__/utils.cpython-312.pyc,, +markdown_it/_compat.py,sha256=U4S_2y3zgLZVfMenHRaJFBW8yqh2mUBuI291LGQVOJ8,35 +markdown_it/_punycode.py,sha256=JvSOZJ4VKr58z7unFGM0KhfTxqHMk2w8gglxae2QszM,2373 +markdown_it/cli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +markdown_it/cli/__pycache__/__init__.cpython-312.pyc,, +markdown_it/cli/__pycache__/parse.cpython-312.pyc,, +markdown_it/cli/parse.py,sha256=Un3N7fyGHhZAQouGVnRx-WZcpKwEK2OF08rzVAEBie8,2881 +markdown_it/common/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +markdown_it/common/__pycache__/__init__.cpython-312.pyc,, +markdown_it/common/__pycache__/entities.cpython-312.pyc,, +markdown_it/common/__pycache__/html_blocks.cpython-312.pyc,, +markdown_it/common/__pycache__/html_re.cpython-312.pyc,, +markdown_it/common/__pycache__/normalize_url.cpython-312.pyc,, +markdown_it/common/__pycache__/utils.cpython-312.pyc,, +markdown_it/common/entities.py,sha256=EYRCmUL7ZU1FRGLSXQlPx356lY8EUBdFyx96eSGc6d0,157 +markdown_it/common/html_blocks.py,sha256=QXbUDMoN9lXLgYFk2DBYllnLiFukL6dHn2X98Y6Wews,986 +markdown_it/common/html_re.py,sha256=FggAEv9IL8gHQqsGTkHcf333rTojwG0DQJMH9oVu0fU,926 +markdown_it/common/normalize_url.py,sha256=avOXnLd9xw5jU1q5PLftjAM9pvGx8l9QDEkmZSyrMgg,2568 +markdown_it/common/utils.py,sha256=pMgvMOE3ZW-BdJ7HfuzlXNKyD1Ivk7jHErc2J_B8J5M,8734 +markdown_it/helpers/__init__.py,sha256=YH2z7dS0WUc_9l51MWPvrLtFoBPh4JLGw58OuhGRCK0,253 +markdown_it/helpers/__pycache__/__init__.cpython-312.pyc,, +markdown_it/helpers/__pycache__/parse_link_destination.cpython-312.pyc,, +markdown_it/helpers/__pycache__/parse_link_label.cpython-312.pyc,, +markdown_it/helpers/__pycache__/parse_link_title.cpython-312.pyc,, +markdown_it/helpers/parse_link_destination.py,sha256=u-xxWVP3g1s7C1bQuQItiYyDrYoYHJzXaZXPgr-o6mY,1906 +markdown_it/helpers/parse_link_label.py,sha256=PIHG6ZMm3BUw0a2m17lCGqNrl3vaz911tuoGviWD3I4,1037 +markdown_it/helpers/parse_link_title.py,sha256=jkLoYQMKNeX9bvWQHkaSroiEo27HylkEUNmj8xBRlp4,2273 +markdown_it/main.py,sha256=vzuT23LJyKrPKNyHKKAbOHkNWpwIldOGUM-IGsv2DHM,12732 +markdown_it/parser_block.py,sha256=-MyugXB63Te71s4NcSQZiK5bE6BHkdFyZv_bviuatdI,3939 +markdown_it/parser_core.py,sha256=SRmJjqe8dC6GWzEARpWba59cBmxjCr3Gsg8h29O8sQk,1016 +markdown_it/parser_inline.py,sha256=y0jCig8CJxQO7hBz0ZY3sGvPlAKTohOwIgaqnlSaS5A,5024 +markdown_it/port.yaml,sha256=jt_rdwOnfocOV5nc35revTybAAQMIp_-1fla_527sVE,2447 +markdown_it/presets/__init__.py,sha256=22vFtwJEY7iqFRtgVZ-pJthcetfpr1Oig8XOF9x1328,970 +markdown_it/presets/__pycache__/__init__.cpython-312.pyc,, +markdown_it/presets/__pycache__/commonmark.cpython-312.pyc,, +markdown_it/presets/__pycache__/default.cpython-312.pyc,, +markdown_it/presets/__pycache__/zero.cpython-312.pyc,, +markdown_it/presets/commonmark.py,sha256=ygfb0R7WQ_ZoyQP3df-B0EnYMqNXCVOSw9SAdMjsGow,2869 +markdown_it/presets/default.py,sha256=FfKVUI0HH3M-_qy6RwotLStdC4PAaAxE7Dq0_KQtRtc,1811 +markdown_it/presets/zero.py,sha256=okXWTBEI-2nmwx5XKeCjxInRf65oC11gahtRl-QNtHM,2113 +markdown_it/py.typed,sha256=8PjyZ1aVoQpRVvt71muvuq5qE-jTFZkK-GLHkhdebmc,26 +markdown_it/renderer.py,sha256=Lzr0glqd5oxFL10DOfjjW8kg4Gp41idQ4viEQaE47oA,9947 +markdown_it/ruler.py,sha256=eMAtWGRAfSM33aiJed0k5923BEkuMVsMq1ct8vU-ql4,9142 +markdown_it/rules_block/__init__.py,sha256=SQpg0ocmsHeILPAWRHhzgLgJMKIcNkQyELH13o_6Ktc,553 +markdown_it/rules_block/__pycache__/__init__.cpython-312.pyc,, +markdown_it/rules_block/__pycache__/blockquote.cpython-312.pyc,, +markdown_it/rules_block/__pycache__/code.cpython-312.pyc,, +markdown_it/rules_block/__pycache__/fence.cpython-312.pyc,, +markdown_it/rules_block/__pycache__/heading.cpython-312.pyc,, +markdown_it/rules_block/__pycache__/hr.cpython-312.pyc,, +markdown_it/rules_block/__pycache__/html_block.cpython-312.pyc,, +markdown_it/rules_block/__pycache__/lheading.cpython-312.pyc,, +markdown_it/rules_block/__pycache__/list.cpython-312.pyc,, +markdown_it/rules_block/__pycache__/paragraph.cpython-312.pyc,, +markdown_it/rules_block/__pycache__/reference.cpython-312.pyc,, +markdown_it/rules_block/__pycache__/state_block.cpython-312.pyc,, +markdown_it/rules_block/__pycache__/table.cpython-312.pyc,, +markdown_it/rules_block/blockquote.py,sha256=7uymS36dcrned3DsIaRcqcbFU1NlymhvsZpEXTD3_n8,8887 +markdown_it/rules_block/code.py,sha256=iTAxv0U1-MDhz88M1m1pi2vzOhEMSEROsXMo2Qq--kU,860 +markdown_it/rules_block/fence.py,sha256=BJgU-PqZ4vAlCqGcrc8UtdLpJJyMeRWN-G-Op-zxrMc,2537 +markdown_it/rules_block/heading.py,sha256=4Lh15rwoVsQjE1hVhpbhidQ0k9xKHihgjAeYSbwgO5k,1745 +markdown_it/rules_block/hr.py,sha256=QCoY5kImaQRvF7PyP8OoWft6A8JVH1v6MN-0HR9Ikpg,1227 +markdown_it/rules_block/html_block.py,sha256=wA8pb34LtZr1BkIATgGKQBIGX5jQNOkwZl9UGEqvb5M,2721 +markdown_it/rules_block/lheading.py,sha256=fWoEuUo7S2svr5UMKmyQMkh0hheYAHg2gMM266Mogs4,2625 +markdown_it/rules_block/list.py,sha256=gIodkAJFyOIyKCZCj5lAlL7jIj5kAzrDb-K-2MFNplY,9668 +markdown_it/rules_block/paragraph.py,sha256=9pmCwA7eMu4LBdV4fWKzC4EdwaOoaGw2kfeYSQiLye8,1819 +markdown_it/rules_block/reference.py,sha256=ue1qZbUaUP0GIvwTjh6nD1UtCij8uwsIMuYW1xBkckc,6983 +markdown_it/rules_block/state_block.py,sha256=HowsQyy5hGUibH4HRZWKfLIlXeDUnuWL7kpF0-rSwoM,8422 +markdown_it/rules_block/table.py,sha256=8nMd9ONGOffER7BXmc9kbbhxkLjtpX79dVLR0iatGnM,7682 +markdown_it/rules_core/__init__.py,sha256=QFGBe9TUjnRQJDU7xY4SQYpxyTHNwg8beTSwXpNGRjE,394 +markdown_it/rules_core/__pycache__/__init__.cpython-312.pyc,, +markdown_it/rules_core/__pycache__/block.cpython-312.pyc,, +markdown_it/rules_core/__pycache__/inline.cpython-312.pyc,, +markdown_it/rules_core/__pycache__/linkify.cpython-312.pyc,, +markdown_it/rules_core/__pycache__/normalize.cpython-312.pyc,, +markdown_it/rules_core/__pycache__/replacements.cpython-312.pyc,, +markdown_it/rules_core/__pycache__/smartquotes.cpython-312.pyc,, +markdown_it/rules_core/__pycache__/state_core.cpython-312.pyc,, +markdown_it/rules_core/__pycache__/text_join.cpython-312.pyc,, +markdown_it/rules_core/block.py,sha256=0_JY1CUy-H2OooFtIEZAACtuoGUMohgxo4Z6A_UinSg,372 +markdown_it/rules_core/inline.py,sha256=9oWmeBhJHE7x47oJcN9yp6UsAZtrEY_A-VmfoMvKld4,325 +markdown_it/rules_core/linkify.py,sha256=mjQqpk_lHLh2Nxw4UFaLxa47Fgi-OHnmDamlgXnhmv0,5141 +markdown_it/rules_core/normalize.py,sha256=AJm4femtFJ_QBnM0dzh0UNqTTJk9K6KMtwRPaioZFqM,403 +markdown_it/rules_core/replacements.py,sha256=CH75mie-tdzdLKQtMBuCTcXAl1ijegdZGfbV_Vk7st0,3471 +markdown_it/rules_core/smartquotes.py,sha256=izK9fSyuTzA-zAUGkRkz9KwwCQWo40iRqcCKqOhFbEE,7443 +markdown_it/rules_core/state_core.py,sha256=HqWZCUr5fW7xG6jeQZDdO0hE9hxxyl3_-bawgOy57HY,570 +markdown_it/rules_core/text_join.py,sha256=rLXxNuLh_es5RvH31GsXi7en8bMNO9UJ5nbJMDBPltY,1173 +markdown_it/rules_inline/__init__.py,sha256=qqHZk6-YE8Rc12q6PxvVKBaxv2wmZeeo45H1XMR_Vxs,696 +markdown_it/rules_inline/__pycache__/__init__.cpython-312.pyc,, +markdown_it/rules_inline/__pycache__/autolink.cpython-312.pyc,, +markdown_it/rules_inline/__pycache__/backticks.cpython-312.pyc,, +markdown_it/rules_inline/__pycache__/balance_pairs.cpython-312.pyc,, +markdown_it/rules_inline/__pycache__/emphasis.cpython-312.pyc,, +markdown_it/rules_inline/__pycache__/entity.cpython-312.pyc,, +markdown_it/rules_inline/__pycache__/escape.cpython-312.pyc,, +markdown_it/rules_inline/__pycache__/fragments_join.cpython-312.pyc,, +markdown_it/rules_inline/__pycache__/html_inline.cpython-312.pyc,, +markdown_it/rules_inline/__pycache__/image.cpython-312.pyc,, +markdown_it/rules_inline/__pycache__/link.cpython-312.pyc,, +markdown_it/rules_inline/__pycache__/linkify.cpython-312.pyc,, +markdown_it/rules_inline/__pycache__/newline.cpython-312.pyc,, +markdown_it/rules_inline/__pycache__/state_inline.cpython-312.pyc,, +markdown_it/rules_inline/__pycache__/strikethrough.cpython-312.pyc,, +markdown_it/rules_inline/__pycache__/text.cpython-312.pyc,, +markdown_it/rules_inline/autolink.py,sha256=pPoqJY8i99VtFn7KgUzMackMeq1hytzioVvWs-VQPRo,2065 +markdown_it/rules_inline/backticks.py,sha256=J7bezjjNxiXlKqvHc0fJkHZwH7-2nBsXVjcKydk8E4M,2037 +markdown_it/rules_inline/balance_pairs.py,sha256=5zgBiGidqdiWmt7Io_cuZOYh5EFEfXrYRce8RXg5m7o,4852 +markdown_it/rules_inline/emphasis.py,sha256=7aDLZx0Jlekuvbu3uEUTDhJp00Z0Pj6g4C3-VLhI8Co,3123 +markdown_it/rules_inline/entity.py,sha256=CE8AIGMi5isEa24RNseo0wRmTTaj5YLbgTFdDmBesAU,1651 +markdown_it/rules_inline/escape.py,sha256=KGulwrP5FnqZM7GXY8lf7pyVv0YkR59taZDeHb5cmKg,1659 +markdown_it/rules_inline/fragments_join.py,sha256=_3JbwWYJz74gRHeZk6T8edVJT2IVSsi7FfmJJlieQlA,1493 +markdown_it/rules_inline/html_inline.py,sha256=SBg6HR0HRqCdrkkec0dfOYuQdAqyfeLRFLeQggtgjvg,1130 +markdown_it/rules_inline/image.py,sha256=Wbsg7jgnOtKXIwXGNJOlG7ORThkMkBVolxItC0ph6C0,4141 +markdown_it/rules_inline/link.py,sha256=2oD-fAdB0xyxDRtZLTjzLeWbzJ1k9bbPVQmohb58RuI,4258 +markdown_it/rules_inline/linkify.py,sha256=ifH6sb5wE8PGMWEw9Sr4x0DhMVfNOEBCfFSwKll2O-s,1706 +markdown_it/rules_inline/newline.py,sha256=329r0V3aDjzNtJcvzA3lsFYjzgBrShLAV5uf9hwQL_M,1297 +markdown_it/rules_inline/state_inline.py,sha256=d-menFzbz5FDy1JNgGBF-BASasnVI-9RuOxWz9PnKn4,5003 +markdown_it/rules_inline/strikethrough.py,sha256=pwcPlyhkh5pqFVxRCSrdW5dNCIOtU4eDit7TVDTPIVA,3214 +markdown_it/rules_inline/text.py,sha256=FQqaQRUqbnMLO9ZSWPWQUMEKH6JqWSSSmlZ5Ii9P48o,1119 +markdown_it/token.py,sha256=cWrt9kodfPdizHq_tYrzyIZNtJYNMN1813DPNlunwTg,6381 +markdown_it/tree.py,sha256=56Cdbwu2Aiks7kNYqO_fQZWpPb_n48CUllzjQQfgu1Y,11111 +markdown_it/utils.py,sha256=lVLeX7Af3GaNFfxmMgUbsn5p7cXbwhLq7RSf56UWuRE,5687 +markdown_it_py-4.0.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +markdown_it_py-4.0.0.dist-info/METADATA,sha256=6fyqHi2vP5bYQKCfuqo5T-qt83o22Ip7a2tnJIfGW_s,7288 +markdown_it_py-4.0.0.dist-info/RECORD,, +markdown_it_py-4.0.0.dist-info/WHEEL,sha256=G2gURzTEtmeR8nrdXUJfNiB3VYVxigPQ-bEQujpNiNs,82 +markdown_it_py-4.0.0.dist-info/entry_points.txt,sha256=T81l7fHQ3pllpQ4wUtQK6a8g_p6wxQbnjKVHCk2WMG4,58 +markdown_it_py-4.0.0.dist-info/licenses/LICENSE,sha256=SiJg1uLND1oVGh6G2_59PtVSseK-q_mUHBulxJy85IQ,1078 +markdown_it_py-4.0.0.dist-info/licenses/LICENSE.markdown-it,sha256=eSxIxahJoV_fnjfovPnm0d0TsytGxkKnSKCkapkZ1HM,1073 diff --git a/local_packages/markdown_it_py-4.0.0.dist-info/WHEEL b/local_packages/markdown_it_py-4.0.0.dist-info/WHEEL new file mode 100644 index 0000000..d8b9936 --- /dev/null +++ b/local_packages/markdown_it_py-4.0.0.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: flit 3.12.0 +Root-Is-Purelib: true +Tag: py3-none-any diff --git a/local_packages/markdown_it_py-4.0.0.dist-info/entry_points.txt b/local_packages/markdown_it_py-4.0.0.dist-info/entry_points.txt new file mode 100644 index 0000000..7d829cd --- /dev/null +++ b/local_packages/markdown_it_py-4.0.0.dist-info/entry_points.txt @@ -0,0 +1,3 @@ +[console_scripts] +markdown-it=markdown_it.cli.parse:main + diff --git a/local_packages/markdown_it_py-4.0.0.dist-info/licenses/LICENSE b/local_packages/markdown_it_py-4.0.0.dist-info/licenses/LICENSE new file mode 100644 index 0000000..582ddf5 --- /dev/null +++ b/local_packages/markdown_it_py-4.0.0.dist-info/licenses/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2020 ExecutableBookProject + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/local_packages/markdown_it_py-4.0.0.dist-info/licenses/LICENSE.markdown-it b/local_packages/markdown_it_py-4.0.0.dist-info/licenses/LICENSE.markdown-it new file mode 100644 index 0000000..7ffa058 --- /dev/null +++ b/local_packages/markdown_it_py-4.0.0.dist-info/licenses/LICENSE.markdown-it @@ -0,0 +1,22 @@ +Copyright (c) 2014 Vitaly Puzrin, Alex Kocharin. + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without +restriction, including without limitation the rights to use, +copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. diff --git a/local_packages/mdurl-0.1.2.dist-info/INSTALLER b/local_packages/mdurl-0.1.2.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/local_packages/mdurl-0.1.2.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/local_packages/mdurl-0.1.2.dist-info/LICENSE b/local_packages/mdurl-0.1.2.dist-info/LICENSE new file mode 100644 index 0000000..2a920c5 --- /dev/null +++ b/local_packages/mdurl-0.1.2.dist-info/LICENSE @@ -0,0 +1,46 @@ +Copyright (c) 2015 Vitaly Puzrin, Alex Kocharin. +Copyright (c) 2021 Taneli Hukkinen + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without +restriction, including without limitation the rights to use, +copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. + +-------------------------------------------------------------------------------- + +.parse() is based on Joyent's node.js `url` code: + +Copyright Joyent, Inc. and other Node contributors. All rights reserved. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to +deal in the Software without restriction, including without limitation the +rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +sell copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +IN THE SOFTWARE. diff --git a/local_packages/mdurl-0.1.2.dist-info/METADATA b/local_packages/mdurl-0.1.2.dist-info/METADATA new file mode 100644 index 0000000..b4670e8 --- /dev/null +++ b/local_packages/mdurl-0.1.2.dist-info/METADATA @@ -0,0 +1,32 @@ +Metadata-Version: 2.1 +Name: mdurl +Version: 0.1.2 +Summary: Markdown URL utilities +Keywords: markdown,commonmark +Author-email: Taneli Hukkinen +Requires-Python: >=3.7 +Description-Content-Type: text/markdown +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: MacOS +Classifier: Operating System :: Microsoft :: Windows +Classifier: Operating System :: POSIX :: Linux +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Typing :: Typed +Project-URL: Homepage, https://github.com/executablebooks/mdurl + +# mdurl + +[![Build Status](https://github.com/executablebooks/mdurl/workflows/Tests/badge.svg?branch=master)](https://github.com/executablebooks/mdurl/actions?query=workflow%3ATests+branch%3Amaster+event%3Apush) +[![codecov.io](https://codecov.io/gh/executablebooks/mdurl/branch/master/graph/badge.svg)](https://codecov.io/gh/executablebooks/mdurl) +[![PyPI version](https://img.shields.io/pypi/v/mdurl)](https://pypi.org/project/mdurl) + +This is a Python port of the JavaScript [mdurl](https://www.npmjs.com/package/mdurl) package. +See the [upstream README.md file](https://github.com/markdown-it/mdurl/blob/master/README.md) for API documentation. + diff --git a/local_packages/mdurl-0.1.2.dist-info/RECORD b/local_packages/mdurl-0.1.2.dist-info/RECORD new file mode 100644 index 0000000..0e7b30f --- /dev/null +++ b/local_packages/mdurl-0.1.2.dist-info/RECORD @@ -0,0 +1,18 @@ +mdurl-0.1.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +mdurl-0.1.2.dist-info/LICENSE,sha256=fGBd9uKGZ6lgMRjpgnT2SknOPu0NJvzM6VNKNF4O-VU,2338 +mdurl-0.1.2.dist-info/METADATA,sha256=tTsp1I9Jk2cFP9o8gefOJ9JVg4Drv4PmYCOwLrfd0l0,1638 +mdurl-0.1.2.dist-info/RECORD,, +mdurl-0.1.2.dist-info/WHEEL,sha256=4TfKIB_xu-04bc2iKz6_zFt-gEFEEDU_31HGhqzOCE8,81 +mdurl/__init__.py,sha256=1vpE89NyXniIRZNC_4f6BPm3Ub4bPntjfyyhLRR7opU,547 +mdurl/__pycache__/__init__.cpython-312.pyc,, +mdurl/__pycache__/_decode.cpython-312.pyc,, +mdurl/__pycache__/_encode.cpython-312.pyc,, +mdurl/__pycache__/_format.cpython-312.pyc,, +mdurl/__pycache__/_parse.cpython-312.pyc,, +mdurl/__pycache__/_url.cpython-312.pyc,, +mdurl/_decode.py,sha256=3Q_gDQqU__TvDbu7x-b9LjbVl4QWy5g_qFwljcuvN_Y,3004 +mdurl/_encode.py,sha256=goJLUFt1h4rVZNqqm9t15Nw2W-bFXYQEy3aR01ImWvs,2602 +mdurl/_format.py,sha256=xZct0mdePXA0H3kAqxjGtlB5O86G35DAYMGkA44CmB4,626 +mdurl/_parse.py,sha256=ezZSkM2_4NQ2Zx047sEdcJG7NYQRFHiZK7Y8INHFzwY,11374 +mdurl/_url.py,sha256=5kQnRQN2A_G4svLnRzZcG0bfoD9AbBrYDXousDHZ3z0,284 +mdurl/py.typed,sha256=8PjyZ1aVoQpRVvt71muvuq5qE-jTFZkK-GLHkhdebmc,26 diff --git a/local_packages/mdurl-0.1.2.dist-info/WHEEL b/local_packages/mdurl-0.1.2.dist-info/WHEEL new file mode 100644 index 0000000..668ba4d --- /dev/null +++ b/local_packages/mdurl-0.1.2.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: flit 3.7.1 +Root-Is-Purelib: true +Tag: py3-none-any diff --git a/local_packages/mdurl/__init__.py b/local_packages/mdurl/__init__.py new file mode 100644 index 0000000..cdbb640 --- /dev/null +++ b/local_packages/mdurl/__init__.py @@ -0,0 +1,18 @@ +__all__ = ( + "decode", + "DECODE_DEFAULT_CHARS", + "DECODE_COMPONENT_CHARS", + "encode", + "ENCODE_DEFAULT_CHARS", + "ENCODE_COMPONENT_CHARS", + "format", + "parse", + "URL", +) +__version__ = "0.1.2" # DO NOT EDIT THIS LINE MANUALLY. LET bump2version UTILITY DO IT + +from mdurl._decode import DECODE_COMPONENT_CHARS, DECODE_DEFAULT_CHARS, decode +from mdurl._encode import ENCODE_COMPONENT_CHARS, ENCODE_DEFAULT_CHARS, encode +from mdurl._format import format +from mdurl._parse import url_parse as parse +from mdurl._url import URL diff --git a/local_packages/mdurl/_decode.py b/local_packages/mdurl/_decode.py new file mode 100644 index 0000000..9b50a2d --- /dev/null +++ b/local_packages/mdurl/_decode.py @@ -0,0 +1,104 @@ +from __future__ import annotations + +from collections.abc import Sequence +import functools +import re + +DECODE_DEFAULT_CHARS = ";/?:@&=+$,#" +DECODE_COMPONENT_CHARS = "" + +decode_cache: dict[str, list[str]] = {} + + +def get_decode_cache(exclude: str) -> Sequence[str]: + if exclude in decode_cache: + return decode_cache[exclude] + + cache: list[str] = [] + decode_cache[exclude] = cache + + for i in range(128): + ch = chr(i) + cache.append(ch) + + for i in range(len(exclude)): + ch_code = ord(exclude[i]) + cache[ch_code] = "%" + ("0" + hex(ch_code)[2:].upper())[-2:] + + return cache + + +# Decode percent-encoded string. +# +def decode(string: str, exclude: str = DECODE_DEFAULT_CHARS) -> str: + cache = get_decode_cache(exclude) + repl_func = functools.partial(repl_func_with_cache, cache=cache) + return re.sub(r"(%[a-f0-9]{2})+", repl_func, string, flags=re.IGNORECASE) + + +def repl_func_with_cache(match: re.Match, cache: Sequence[str]) -> str: + seq = match.group() + result = "" + + i = 0 + l = len(seq) # noqa: E741 + while i < l: + b1 = int(seq[i + 1 : i + 3], 16) + + if b1 < 0x80: + result += cache[b1] + i += 3 # emulate JS for loop statement3 + continue + + if (b1 & 0xE0) == 0xC0 and (i + 3 < l): + # 110xxxxx 10xxxxxx + b2 = int(seq[i + 4 : i + 6], 16) + + if (b2 & 0xC0) == 0x80: + all_bytes = bytes((b1, b2)) + try: + result += all_bytes.decode() + except UnicodeDecodeError: + result += "\ufffd" * 2 + + i += 3 + i += 3 # emulate JS for loop statement3 + continue + + if (b1 & 0xF0) == 0xE0 and (i + 6 < l): + # 1110xxxx 10xxxxxx 10xxxxxx + b2 = int(seq[i + 4 : i + 6], 16) + b3 = int(seq[i + 7 : i + 9], 16) + + if (b2 & 0xC0) == 0x80 and (b3 & 0xC0) == 0x80: + all_bytes = bytes((b1, b2, b3)) + try: + result += all_bytes.decode() + except UnicodeDecodeError: + result += "\ufffd" * 3 + + i += 6 + i += 3 # emulate JS for loop statement3 + continue + + if (b1 & 0xF8) == 0xF0 and (i + 9 < l): + # 111110xx 10xxxxxx 10xxxxxx 10xxxxxx + b2 = int(seq[i + 4 : i + 6], 16) + b3 = int(seq[i + 7 : i + 9], 16) + b4 = int(seq[i + 10 : i + 12], 16) + + if (b2 & 0xC0) == 0x80 and (b3 & 0xC0) == 0x80 and (b4 & 0xC0) == 0x80: + all_bytes = bytes((b1, b2, b3, b4)) + try: + result += all_bytes.decode() + except UnicodeDecodeError: + result += "\ufffd" * 4 + + i += 9 + i += 3 # emulate JS for loop statement3 + continue + + result += "\ufffd" + i += 3 # emulate JS for loop statement3 + + return result diff --git a/local_packages/mdurl/_encode.py b/local_packages/mdurl/_encode.py new file mode 100644 index 0000000..bc2e5b9 --- /dev/null +++ b/local_packages/mdurl/_encode.py @@ -0,0 +1,85 @@ +from __future__ import annotations + +from collections.abc import Sequence +from string import ascii_letters, digits, hexdigits +from urllib.parse import quote as encode_uri_component + +ASCII_LETTERS_AND_DIGITS = ascii_letters + digits + +ENCODE_DEFAULT_CHARS = ";/?:@&=+$,-_.!~*'()#" +ENCODE_COMPONENT_CHARS = "-_.!~*'()" + +encode_cache: dict[str, list[str]] = {} + + +# Create a lookup array where anything but characters in `chars` string +# and alphanumeric chars is percent-encoded. +def get_encode_cache(exclude: str) -> Sequence[str]: + if exclude in encode_cache: + return encode_cache[exclude] + + cache: list[str] = [] + encode_cache[exclude] = cache + + for i in range(128): + ch = chr(i) + + if ch in ASCII_LETTERS_AND_DIGITS: + # always allow unencoded alphanumeric characters + cache.append(ch) + else: + cache.append("%" + ("0" + hex(i)[2:].upper())[-2:]) + + for i in range(len(exclude)): + cache[ord(exclude[i])] = exclude[i] + + return cache + + +# Encode unsafe characters with percent-encoding, skipping already +# encoded sequences. +# +# - string - string to encode +# - exclude - list of characters to ignore (in addition to a-zA-Z0-9) +# - keepEscaped - don't encode '%' in a correct escape sequence (default: true) +def encode( + string: str, exclude: str = ENCODE_DEFAULT_CHARS, *, keep_escaped: bool = True +) -> str: + result = "" + + cache = get_encode_cache(exclude) + + l = len(string) # noqa: E741 + i = 0 + while i < l: + code = ord(string[i]) + + # % + if keep_escaped and code == 0x25 and i + 2 < l: + if all(c in hexdigits for c in string[i + 1 : i + 3]): + result += string[i : i + 3] + i += 2 + i += 1 # JS for loop statement3 + continue + + if code < 128: + result += cache[code] + i += 1 # JS for loop statement3 + continue + + if code >= 0xD800 and code <= 0xDFFF: + if code >= 0xD800 and code <= 0xDBFF and i + 1 < l: + next_code = ord(string[i + 1]) + if next_code >= 0xDC00 and next_code <= 0xDFFF: + result += encode_uri_component(string[i] + string[i + 1]) + i += 1 + i += 1 # JS for loop statement3 + continue + result += "%EF%BF%BD" + i += 1 # JS for loop statement3 + continue + + result += encode_uri_component(string[i]) + i += 1 # JS for loop statement3 + + return result diff --git a/local_packages/mdurl/_format.py b/local_packages/mdurl/_format.py new file mode 100644 index 0000000..12524ca --- /dev/null +++ b/local_packages/mdurl/_format.py @@ -0,0 +1,27 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from mdurl._url import URL + + +def format(url: URL) -> str: # noqa: A001 + result = "" + + result += url.protocol or "" + result += "//" if url.slashes else "" + result += url.auth + "@" if url.auth else "" + + if url.hostname and ":" in url.hostname: + # ipv6 address + result += "[" + url.hostname + "]" + else: + result += url.hostname or "" + + result += ":" + url.port if url.port else "" + result += url.pathname or "" + result += url.search or "" + result += url.hash or "" + + return result diff --git a/local_packages/mdurl/_parse.py b/local_packages/mdurl/_parse.py new file mode 100644 index 0000000..ffeeac7 --- /dev/null +++ b/local_packages/mdurl/_parse.py @@ -0,0 +1,304 @@ +# Copyright Joyent, Inc. and other Node contributors. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the +# following conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN +# NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE +# USE OR OTHER DEALINGS IN THE SOFTWARE. + + +# Changes from joyent/node: +# +# 1. No leading slash in paths, +# e.g. in `url.parse('http://foo?bar')` pathname is ``, not `/` +# +# 2. Backslashes are not replaced with slashes, +# so `http:\\example.org\` is treated like a relative path +# +# 3. Trailing colon is treated like a part of the path, +# i.e. in `http://example.org:foo` pathname is `:foo` +# +# 4. Nothing is URL-encoded in the resulting object, +# (in joyent/node some chars in auth and paths are encoded) +# +# 5. `url.parse()` does not have `parseQueryString` argument +# +# 6. Removed extraneous result properties: `host`, `path`, `query`, etc., +# which can be constructed using other parts of the url. + +from __future__ import annotations + +from collections import defaultdict +import re + +from mdurl._url import URL + +# Reference: RFC 3986, RFC 1808, RFC 2396 + +# define these here so at least they only have to be +# compiled once on the first module load. +PROTOCOL_PATTERN = re.compile(r"^([a-z0-9.+-]+:)", flags=re.IGNORECASE) +PORT_PATTERN = re.compile(r":[0-9]*$") + +# Special case for a simple path URL +SIMPLE_PATH_PATTERN = re.compile(r"^(//?(?!/)[^?\s]*)(\?[^\s]*)?$") + +# RFC 2396: characters reserved for delimiting URLs. +# We actually just auto-escape these. +DELIMS = ("<", ">", '"', "`", " ", "\r", "\n", "\t") + +# RFC 2396: characters not allowed for various reasons. +UNWISE = ("{", "}", "|", "\\", "^", "`") + DELIMS + +# Allowed by RFCs, but cause of XSS attacks. Always escape these. +AUTO_ESCAPE = ("'",) + UNWISE +# Characters that are never ever allowed in a hostname. +# Note that any invalid chars are also handled, but these +# are the ones that are *expected* to be seen, so we fast-path +# them. +NON_HOST_CHARS = ("%", "/", "?", ";", "#") + AUTO_ESCAPE +HOST_ENDING_CHARS = ("/", "?", "#") +HOSTNAME_MAX_LEN = 255 +HOSTNAME_PART_PATTERN = re.compile(r"^[+a-z0-9A-Z_-]{0,63}$") +HOSTNAME_PART_START = re.compile(r"^([+a-z0-9A-Z_-]{0,63})(.*)$") +# protocols that can allow "unsafe" and "unwise" chars. + +# protocols that never have a hostname. +HOSTLESS_PROTOCOL = defaultdict( + bool, + { + "javascript": True, + "javascript:": True, + }, +) +# protocols that always contain a // bit. +SLASHED_PROTOCOL = defaultdict( + bool, + { + "http": True, + "https": True, + "ftp": True, + "gopher": True, + "file": True, + "http:": True, + "https:": True, + "ftp:": True, + "gopher:": True, + "file:": True, + }, +) + + +class MutableURL: + def __init__(self) -> None: + self.protocol: str | None = None + self.slashes: bool = False + self.auth: str | None = None + self.port: str | None = None + self.hostname: str | None = None + self.hash: str | None = None + self.search: str | None = None + self.pathname: str | None = None + + def parse(self, url: str, slashes_denote_host: bool) -> "MutableURL": + lower_proto = "" + slashes = False + rest = url + + # trim before proceeding. + # This is to support parse stuff like " http://foo.com \n" + rest = rest.strip() + + if not slashes_denote_host and len(url.split("#")) == 1: + # Try fast path regexp + simple_path = SIMPLE_PATH_PATTERN.match(rest) + if simple_path: + self.pathname = simple_path.group(1) + if simple_path.group(2): + self.search = simple_path.group(2) + return self + + proto = "" + proto_match = PROTOCOL_PATTERN.match(rest) + if proto_match: + proto = proto_match.group() + lower_proto = proto.lower() + self.protocol = proto + rest = rest[len(proto) :] + + # figure out if it's got a host + # user@server is *always* interpreted as a hostname, and url + # resolution will treat //foo/bar as host=foo,path=bar because that's + # how the browser resolves relative URLs. + if slashes_denote_host or proto or re.search(r"^//[^@/]+@[^@/]+", rest): + slashes = rest.startswith("//") + if slashes and not (proto and HOSTLESS_PROTOCOL[proto]): + rest = rest[2:] + self.slashes = True + + if not HOSTLESS_PROTOCOL[proto] and ( + slashes or (proto and not SLASHED_PROTOCOL[proto]) + ): + + # there's a hostname. + # the first instance of /, ?, ;, or # ends the host. + # + # If there is an @ in the hostname, then non-host chars *are* allowed + # to the left of the last @ sign, unless some host-ending character + # comes *before* the @-sign. + # URLs are obnoxious. + # + # ex: + # http://a@b@c/ => user:a@b host:c + # http://a@b?@c => user:a host:c path:/?@c + + # v0.12 TODO(isaacs): This is not quite how Chrome does things. + # Review our test case against browsers more comprehensively. + + # find the first instance of any hostEndingChars + host_end = -1 + for i in range(len(HOST_ENDING_CHARS)): + hec = rest.find(HOST_ENDING_CHARS[i]) + if hec != -1 and (host_end == -1 or hec < host_end): + host_end = hec + + # at this point, either we have an explicit point where the + # auth portion cannot go past, or the last @ char is the decider. + if host_end == -1: + # atSign can be anywhere. + at_sign = rest.rfind("@") + else: + # atSign must be in auth portion. + # http://a@b/c@d => host:b auth:a path:/c@d + at_sign = rest.rfind("@", 0, host_end + 1) + + # Now we have a portion which is definitely the auth. + # Pull that off. + if at_sign != -1: + auth = rest[:at_sign] + rest = rest[at_sign + 1 :] + self.auth = auth + + # the host is the remaining to the left of the first non-host char + host_end = -1 + for i in range(len(NON_HOST_CHARS)): + hec = rest.find(NON_HOST_CHARS[i]) + if hec != -1 and (host_end == -1 or hec < host_end): + host_end = hec + # if we still have not hit it, then the entire thing is a host. + if host_end == -1: + host_end = len(rest) + + if host_end > 0 and rest[host_end - 1] == ":": + host_end -= 1 + host = rest[:host_end] + rest = rest[host_end:] + + # pull out port. + self.parse_host(host) + + # we've indicated that there is a hostname, + # so even if it's empty, it has to be present. + self.hostname = self.hostname or "" + + # if hostname begins with [ and ends with ] + # assume that it's an IPv6 address. + ipv6_hostname = self.hostname.startswith("[") and self.hostname.endswith( + "]" + ) + + # validate a little. + if not ipv6_hostname: + hostparts = self.hostname.split(".") + l = len(hostparts) # noqa: E741 + i = 0 + while i < l: + part = hostparts[i] + if not part: + i += 1 # emulate statement3 in JS for loop + continue + if not HOSTNAME_PART_PATTERN.search(part): + newpart = "" + k = len(part) + j = 0 + while j < k: + if ord(part[j]) > 127: + # we replace non-ASCII char with a temporary placeholder + # we need this to make sure size of hostname is not + # broken by replacing non-ASCII by nothing + newpart += "x" + else: + newpart += part[j] + j += 1 # emulate statement3 in JS for loop + + # we test again with ASCII char only + if not HOSTNAME_PART_PATTERN.search(newpart): + valid_parts = hostparts[:i] + not_host = hostparts[i + 1 :] + bit = HOSTNAME_PART_START.search(part) + if bit: + valid_parts.append(bit.group(1)) + not_host.insert(0, bit.group(2)) + if not_host: + rest = ".".join(not_host) + rest + self.hostname = ".".join(valid_parts) + break + i += 1 # emulate statement3 in JS for loop + + if len(self.hostname) > HOSTNAME_MAX_LEN: + self.hostname = "" + + # strip [ and ] from the hostname + # the host field still retains them, though + if ipv6_hostname: + self.hostname = self.hostname[1:-1] + + # chop off from the tail first. + hash = rest.find("#") # noqa: A001 + if hash != -1: + # got a fragment string. + self.hash = rest[hash:] + rest = rest[:hash] + qm = rest.find("?") + if qm != -1: + self.search = rest[qm:] + rest = rest[:qm] + if rest: + self.pathname = rest + if SLASHED_PROTOCOL[lower_proto] and self.hostname and not self.pathname: + self.pathname = "" + + return self + + def parse_host(self, host: str) -> None: + port_match = PORT_PATTERN.search(host) + if port_match: + port = port_match.group() + if port != ":": + self.port = port[1:] + host = host[: -len(port)] + if host: + self.hostname = host + + +def url_parse(url: URL | str, *, slashes_denote_host: bool = False) -> URL: + if isinstance(url, URL): + return url + u = MutableURL() + u.parse(url, slashes_denote_host) + return URL( + u.protocol, u.slashes, u.auth, u.port, u.hostname, u.hash, u.search, u.pathname + ) diff --git a/local_packages/mdurl/_url.py b/local_packages/mdurl/_url.py new file mode 100644 index 0000000..f866e7a --- /dev/null +++ b/local_packages/mdurl/_url.py @@ -0,0 +1,14 @@ +from __future__ import annotations + +from typing import NamedTuple + + +class URL(NamedTuple): + protocol: str | None + slashes: bool + auth: str | None + port: str | None + hostname: str | None + hash: str | None # noqa: A003 + search: str | None + pathname: str | None diff --git a/local_packages/mdurl/py.typed b/local_packages/mdurl/py.typed new file mode 100644 index 0000000..7632ecf --- /dev/null +++ b/local_packages/mdurl/py.typed @@ -0,0 +1 @@ +# Marker file for PEP 561 diff --git a/local_packages/numpy-2.4.4.dist-info/DELVEWHEEL b/local_packages/numpy-2.4.4.dist-info/DELVEWHEEL new file mode 100644 index 0000000..63bbc69 --- /dev/null +++ b/local_packages/numpy-2.4.4.dist-info/DELVEWHEEL @@ -0,0 +1,2 @@ +Version: 1.11.2 +Arguments: ['C:\\Users\\runneradmin\\AppData\\Local\\Temp\\cibw-run-mo_5b201\\cp312-win_amd64\\build\\venv\\Scripts\\delvewheel', 'repair', '--add-path', 'D:/a/numpy-release/numpy-release/.openblas/lib', '-w', 'C:\\Users\\runneradmin\\AppData\\Local\\Temp\\cibw-run-mo_5b201\\cp312-win_amd64\\repaired_wheel', 'C:\\Users\\runneradmin\\AppData\\Local\\Temp\\cibw-run-mo_5b201\\cp312-win_amd64\\built_wheel\\numpy-2.4.4-cp312-cp312-win_amd64.whl'] diff --git a/local_packages/numpy-2.4.4.dist-info/INSTALLER b/local_packages/numpy-2.4.4.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/local_packages/numpy-2.4.4.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/local_packages/numpy-2.4.4.dist-info/METADATA b/local_packages/numpy-2.4.4.dist-info/METADATA new file mode 100644 index 0000000..a8e19c9 --- /dev/null +++ b/local_packages/numpy-2.4.4.dist-info/METADATA @@ -0,0 +1,139 @@ +Metadata-Version: 2.4 +Name: numpy +Version: 2.4.4 +Summary: Fundamental package for array computing in Python +Author: Travis E. Oliphant et al. +Maintainer-Email: NumPy Developers +License-Expression: BSD-3-Clause AND 0BSD AND MIT AND Zlib AND CC0-1.0 +License-File: LICENSE.txt +License-File: numpy/_core/include/numpy/libdivide/LICENSE.txt +License-File: numpy/_core/src/common/pythoncapi-compat/COPYING +License-File: numpy/_core/src/highway/LICENSE +License-File: numpy/_core/src/multiarray/dragon4_LICENSE.txt +License-File: numpy/_core/src/npysort/x86-simd-sort/LICENSE.md +License-File: numpy/_core/src/umath/svml/LICENSE +License-File: numpy/fft/pocketfft/LICENSE.md +License-File: numpy/linalg/lapack_lite/LICENSE.txt +License-File: numpy/ma/LICENSE +License-File: numpy/random/LICENSE.md +License-File: numpy/random/src/distributions/LICENSE.md +License-File: numpy/random/src/mt19937/LICENSE.md +License-File: numpy/random/src/pcg64/LICENSE.md +License-File: numpy/random/src/philox/LICENSE.md +License-File: numpy/random/src/sfc64/LICENSE.md +License-File: numpy/random/src/splitmix64/LICENSE.md +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Science/Research +Classifier: Intended Audience :: Developers +Classifier: Programming Language :: C +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3.13 +Classifier: Programming Language :: Python :: 3.14 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Topic :: Software Development +Classifier: Topic :: Scientific/Engineering +Classifier: Typing :: Typed +Classifier: Operating System :: Microsoft :: Windows +Classifier: Operating System :: POSIX +Classifier: Operating System :: Unix +Classifier: Operating System :: MacOS +Project-URL: homepage, https://numpy.org +Project-URL: documentation, https://numpy.org/doc/ +Project-URL: source, https://github.com/numpy/numpy +Project-URL: download, https://pypi.org/project/numpy/#files +Project-URL: tracker, https://github.com/numpy/numpy/issues +Project-URL: release notes, https://numpy.org/doc/stable/release +Requires-Python: >=3.11 +Description-Content-Type: text/markdown + +

+ +


+ + +[![Powered by NumFOCUS](https://img.shields.io/badge/powered%20by-NumFOCUS-orange.svg?style=flat&colorA=E1523D&colorB=007D8A)]( +https://numfocus.org) +[![PyPI Downloads](https://img.shields.io/pypi/dm/numpy.svg?label=PyPI%20downloads)]( +https://pypi.org/project/numpy/) +[![Conda Downloads](https://img.shields.io/conda/dn/conda-forge/numpy.svg?label=Conda%20downloads)]( +https://anaconda.org/conda-forge/numpy) +[![Stack Overflow](https://img.shields.io/badge/stackoverflow-Ask%20questions-blue.svg)]( +https://stackoverflow.com/questions/tagged/numpy) +[![Nature Paper](https://img.shields.io/badge/DOI-10.1038%2Fs41586--020--2649--2-blue)]( +https://doi.org/10.1038/s41586-020-2649-2) +[![LFX Health Score](https://insights.linuxfoundation.org/api/badge/health-score?project=numpy)](https://insights.linuxfoundation.org/project/numpy) +[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/numpy/numpy/badge)](https://securityscorecards.dev/viewer/?uri=github.com/numpy/numpy) +[![Typing](https://img.shields.io/pypi/types/numpy)](https://pypi.org/project/numpy/) + + +NumPy is the fundamental package for scientific computing with Python. + +- **Website:** https://numpy.org +- **Documentation:** https://numpy.org/doc +- **Mailing list:** https://mail.python.org/mailman/listinfo/numpy-discussion +- **Source code:** https://github.com/numpy/numpy +- **Contributing:** https://numpy.org/devdocs/dev/index.html +- **Bug reports:** https://github.com/numpy/numpy/issues +- **Report a security vulnerability:** https://tidelift.com/docs/security + +It provides: + +- a powerful N-dimensional array object +- sophisticated (broadcasting) functions +- tools for integrating C/C++ and Fortran code +- useful linear algebra, Fourier transform, and random number capabilities + +Testing: + +NumPy requires `pytest` and `hypothesis`. Tests can then be run after installation with: + + python -c "import numpy, sys; sys.exit(numpy.test() is False)" + +Code of Conduct +---------------------- + +NumPy is a community-driven open source project developed by a diverse group of +[contributors](https://numpy.org/teams/). The NumPy leadership has made a strong +commitment to creating an open, inclusive, and positive community. Please read the +[NumPy Code of Conduct](https://numpy.org/code-of-conduct/) for guidance on how to interact +with others in a way that makes our community thrive. + +Call for Contributions +---------------------- + +The NumPy project welcomes your expertise and enthusiasm! + +Small improvements or fixes are always appreciated. If you are considering larger contributions +to the source code, please contact us through the [mailing +list](https://mail.python.org/mailman/listinfo/numpy-discussion) first. + +Writing code isn’t the only way to contribute to NumPy. You can also: +- review pull requests +- help us stay on top of new and old issues +- develop tutorials, presentations, and other educational materials +- maintain and improve [our website](https://github.com/numpy/numpy.org) +- develop graphic design for our brand assets and promotional materials +- translate website content +- help with outreach and onboard new contributors +- write grant proposals and help with other fundraising efforts + +For more information about the ways you can contribute to NumPy, visit [our website](https://numpy.org/contribute/). +If you’re unsure where to start or how your skills fit in, reach out! You can +ask on the mailing list or here, on GitHub, by opening a new issue or leaving a +comment on a relevant issue that is already open. + +Our preferred channels of communication are all public, but if you’d like to +speak to us in private first, contact our community coordinators at +numpy-team@googlegroups.com or on Slack (write numpy-team@googlegroups.com for +an invitation). + +We also have a biweekly community call, details of which are announced on the +mailing list. You are very welcome to join. + +If you are new to contributing to open source, [this +guide](https://opensource.guide/how-to-contribute/) helps explain why, what, +and how to successfully get involved. diff --git a/local_packages/numpy-2.4.4.dist-info/RECORD b/local_packages/numpy-2.4.4.dist-info/RECORD new file mode 100644 index 0000000..5c854ec --- /dev/null +++ b/local_packages/numpy-2.4.4.dist-info/RECORD @@ -0,0 +1,1341 @@ +../../bin/f2py.exe,sha256=RFuUH0pwxWsyzql2b2h6AXUVW4uZ1DhzP-eQn1lXaqc,108389 +../../bin/numpy-config.exe,sha256=AedyDtfLJhTdOi9ip608PiLb7ppBTxCPdT3Fcw3ptT4,108389 +numpy-2.4.4.dist-info/DELVEWHEEL,sha256=KO5zV8RoGD-sJvFLKwyHBPEIAlH21Y5EGJcEo6bU-Fc,462 +numpy-2.4.4.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +numpy-2.4.4.dist-info/METADATA,sha256=j54EM4_6kwhootFXzkT25Mt6xSS5aPsT0EGHpqxXUoI,6608 +numpy-2.4.4.dist-info/RECORD,, +numpy-2.4.4.dist-info/WHEEL,sha256=50PeAbplA6PkI0hYOYoeacB9US1R6EguyfOnsccH0WU,85 +numpy-2.4.4.dist-info/entry_points.txt,sha256=7Cb63gyL2sIRpsHdADpl6xaIW5JTlUI-k_yqEVr0BSw,220 +numpy-2.4.4.dist-info/licenses/LICENSE.txt,sha256=qATf8OrZ-txSk0VkELy_wyvwJL6cRRNFlmP7e0QtI0E,45831 +numpy-2.4.4.dist-info/licenses/numpy/_core/include/numpy/libdivide/LICENSE.txt,sha256=1UR2FVi1EIZsIffootVxb8p24LmBF-O2uGMU23JE0VA,1039 +numpy-2.4.4.dist-info/licenses/numpy/_core/src/common/pythoncapi-compat/COPYING,sha256=rqQZdscg-RvRlyXBaRahrOQOsepCYg00Dlh6XeS7zqA,704 +numpy-2.4.4.dist-info/licenses/numpy/_core/src/highway/LICENSE,sha256=0ggVLid3_9ASbqzBvSHAMg1p5Oa2ItIaKhKkfo73N2M,21155 +numpy-2.4.4.dist-info/licenses/numpy/_core/src/multiarray/dragon4_LICENSE.txt,sha256=sP1ZLU6tVjFI45d-e1iRadRqXvL_Ezx1WoGviAo_yRk,1441 +numpy-2.4.4.dist-info/licenses/numpy/_core/src/npysort/x86-simd-sort/LICENSE.md,sha256=pzKXeFNTAGZ5DEKBcIf0gptRjdmNNAyRasv9In4508s,1542 +numpy-2.4.4.dist-info/licenses/numpy/_core/src/umath/svml/LICENSE,sha256=WFSDkFsT1LQvgAxVupnonW4uM8zV9QPiAcB1MdFI_0E,1573 +numpy-2.4.4.dist-info/licenses/numpy/fft/pocketfft/LICENSE.md,sha256=jbAA4VjBYuckl4eFLYxpdkcGUCzO5yXAZkGzHB6VyK8,1523 +numpy-2.4.4.dist-info/licenses/numpy/linalg/lapack_lite/LICENSE.txt,sha256=GQYO9GXheX1oLuD8MIG9JasTlemV5D0lydE8ldmjzWs,2314 +numpy-2.4.4.dist-info/licenses/numpy/ma/LICENSE,sha256=1427IIuA2StNMz5BpLquUNEkRPRuUxmfp3Jqkd5uLac,1616 +numpy-2.4.4.dist-info/licenses/numpy/random/LICENSE.md,sha256=tLwvT6HJV3jx7T3Y8UcGvs45lHW5ePnzS1081yUhtIo,3582 +numpy-2.4.4.dist-info/licenses/numpy/random/src/distributions/LICENSE.md,sha256=z1YZ8lsRLVznyJbv_8jmELyzRd-tzBnywUYKpI__LJ4,2805 +numpy-2.4.4.dist-info/licenses/numpy/random/src/mt19937/LICENSE.md,sha256=ksEGmYhDIfHClH_ipwqyOw-_FQf3Vtid0I84xh_D1Po,2986 +numpy-2.4.4.dist-info/licenses/numpy/random/src/pcg64/LICENSE.md,sha256=QRkGY7d77lMCOGSVoHt-q_v6Vxqgri8R2mdkFiBi_E8,1176 +numpy-2.4.4.dist-info/licenses/numpy/random/src/philox/LICENSE.md,sha256=MirEbYcMiu6tbFOtzMyDkpb7PI5Fbq4VUIEpfa9_gBM,1540 +numpy-2.4.4.dist-info/licenses/numpy/random/src/sfc64/LICENSE.md,sha256=KMrYLOE3ncUp3zDaeOwBLX0NHVdrA93LkziK7lNln1k,1253 +numpy-2.4.4.dist-info/licenses/numpy/random/src/splitmix64/LICENSE.md,sha256=RU7HNQ5HjgNwYYULZzlXU7QlscsKNLvCYpCwIL-J12M,340 +numpy.libs/libscipy_openblas64_-63c857e738469261263c764a36be9436.dll,sha256=Y8hX5zhGkmEmPHZKNr6UNuveqicuNAqCj0IEepcTEIA,20415488 +numpy.libs/msvcp140-a4c2229bdc2a2a630acdc095b4d86008.dll,sha256=pMIim9wqKmMKzcCVtNhgCOXD47x3cxdDVPPaT1vrnN4,575056 +numpy/__config__.py,sha256=VoigzY8bJxeS_fDXYElk-6gQrliYTdnTpoou3Ztp5Q8,5749 +numpy/__config__.pyi,sha256=L6Ml7eJWLoOvEOeI5FCVERMuiPVQUkFOuTmG5lduhzc,2486 +numpy/__init__.cython-30.pxd,sha256=PByDh5eNFDlvCdDfpw7J1RwWPUItbLHUeuoneaoWx7g,48396 +numpy/__init__.pxd,sha256=C1ZTNkpIVrh_YD9ZYrC0beDjHbDJgjcqFKUKddj28k4,44944 +numpy/__init__.py,sha256=sWpPNHxlg8h44Zc6IIVk4BaGt5zHCRGtAVfgXY7s2jc,27212 +numpy/__init__.pyi,sha256=36E0y_ydNWWLd4TItoFZjWMKOF_ILN0m2PyA1XAurZI,242576 +numpy/__pycache__/__config__.cpython-312.pyc,, +numpy/__pycache__/__init__.cpython-312.pyc,, +numpy/__pycache__/_array_api_info.cpython-312.pyc,, +numpy/__pycache__/_configtool.cpython-312.pyc,, +numpy/__pycache__/_distributor_init.cpython-312.pyc,, +numpy/__pycache__/_expired_attrs_2_0.cpython-312.pyc,, +numpy/__pycache__/_globals.cpython-312.pyc,, +numpy/__pycache__/_pytesttester.cpython-312.pyc,, +numpy/__pycache__/conftest.cpython-312.pyc,, +numpy/__pycache__/dtypes.cpython-312.pyc,, +numpy/__pycache__/exceptions.cpython-312.pyc,, +numpy/__pycache__/matlib.cpython-312.pyc,, +numpy/__pycache__/version.cpython-312.pyc,, +numpy/_array_api_info.py,sha256=AeGuRBPBcJ0PeOSi2AB3rkOHFTkWa1N7tYq_Gj9ri3o,10731 +numpy/_array_api_info.pyi,sha256=c77RlN8Pan3QlqXonfTzus7yWI3F4BDMtedWlVO3q-s,5068 +numpy/_configtool.py,sha256=qqay_oP0bqkryU55jFBE6RzgKJkquJ0bBvaYt9D-Gbs,1046 +numpy/_configtool.pyi,sha256=IlC395h8TlcZ4DiSW5i6NBQO9I74ERfXpwSYAktzoaU,25 +numpy/_core/__init__.py,sha256=c8V6fzpU4zOZ3bmxQusn7TquK_S-L0wVk5Z4jha2IOU,6865 +numpy/_core/__init__.pyi,sha256=eRDqlKeg5cDyDbN2Mfq8mUDRvD-hxmIkoEx-89UmMPk,10639 +numpy/_core/__pycache__/__init__.cpython-312.pyc,, +numpy/_core/__pycache__/_add_newdocs.cpython-312.pyc,, +numpy/_core/__pycache__/_add_newdocs_scalars.cpython-312.pyc,, +numpy/_core/__pycache__/_asarray.cpython-312.pyc,, +numpy/_core/__pycache__/_dtype.cpython-312.pyc,, +numpy/_core/__pycache__/_dtype_ctypes.cpython-312.pyc,, +numpy/_core/__pycache__/_exceptions.cpython-312.pyc,, +numpy/_core/__pycache__/_internal.cpython-312.pyc,, +numpy/_core/__pycache__/_methods.cpython-312.pyc,, +numpy/_core/__pycache__/_string_helpers.cpython-312.pyc,, +numpy/_core/__pycache__/_type_aliases.cpython-312.pyc,, +numpy/_core/__pycache__/_ufunc_config.cpython-312.pyc,, +numpy/_core/__pycache__/arrayprint.cpython-312.pyc,, +numpy/_core/__pycache__/cversions.cpython-312.pyc,, +numpy/_core/__pycache__/defchararray.cpython-312.pyc,, +numpy/_core/__pycache__/einsumfunc.cpython-312.pyc,, +numpy/_core/__pycache__/fromnumeric.cpython-312.pyc,, +numpy/_core/__pycache__/function_base.cpython-312.pyc,, +numpy/_core/__pycache__/getlimits.cpython-312.pyc,, +numpy/_core/__pycache__/memmap.cpython-312.pyc,, +numpy/_core/__pycache__/multiarray.cpython-312.pyc,, +numpy/_core/__pycache__/numeric.cpython-312.pyc,, +numpy/_core/__pycache__/numerictypes.cpython-312.pyc,, +numpy/_core/__pycache__/overrides.cpython-312.pyc,, +numpy/_core/__pycache__/printoptions.cpython-312.pyc,, +numpy/_core/__pycache__/records.cpython-312.pyc,, +numpy/_core/__pycache__/shape_base.cpython-312.pyc,, +numpy/_core/__pycache__/strings.cpython-312.pyc,, +numpy/_core/__pycache__/umath.cpython-312.pyc,, +numpy/_core/_add_newdocs.py,sha256=AqC1zmg0pjGzVqgNhGsKfhie5y_ejJDXECUqQ6k5kf8,223247 +numpy/_core/_add_newdocs.pyi,sha256=ipvUNHAUG15ziSDqc4aGEfQ1jrqpW-zJrogC8G0DOog,136 +numpy/_core/_add_newdocs_scalars.py,sha256=ViMWUAIlsUKosd1mEpLo907cZeaS7Vg_GerzCDIAUUQ,13314 +numpy/_core/_add_newdocs_scalars.pyi,sha256=NNXiuLe2kckDpvpR_WTesOZG1AWSQapqIlGD7tgVtxE,565 +numpy/_core/_asarray.py,sha256=MU1nK76mQ4gDZCWZbdV-zmkmZphAVAkNwghbk4TgqlQ,4024 +numpy/_core/_asarray.pyi,sha256=7oijVMV32GQMEv5xV8sOYfjHHJCjZSo_oJT4zr6u-Kw,1190 +numpy/_core/_dtype.py,sha256=TEjPZXHmpw5-HhfGfKS-PfQT9A9sk8cnaE6icAXVb34,10913 +numpy/_core/_dtype.pyi,sha256=1_fGj6V5NxTYRkN2-cfZBOQOkc1rJ1wET2deS1ANpdw,1888 +numpy/_core/_dtype_ctypes.py,sha256=e8EgfaqXiJ8-UYi8FM5sm9W8ehqvcG_rTpDROaKwTKg,3846 +numpy/_core/_dtype_ctypes.pyi,sha256=d5BudSdtj6n046OX9c-rUoX5zVGghdoO22yEhkjVRoM,3765 +numpy/_core/_exceptions.py,sha256=umxWh9TLhDXy9LmW77wkIHMegFk_KFfBB5ndxsJd5F4,5321 +numpy/_core/_exceptions.pyi,sha256=cWcq9Uf4GrYuKI5IEH-ioVaYDYU1adD9WIPpNu02d-M,1904 +numpy/_core/_internal.py,sha256=LDBumIfjDMIO8cNSfo014AcYngsdkXms6TFLNetZuxY,30405 +numpy/_core/_internal.pyi,sha256=Kax8zq8oAmuPIpkUDU-7ojkjR3C_wtHcA8uNHeftJmI,2212 +numpy/_core/_methods.py,sha256=XdcMBZ5zWZFpUJiL-im2SCtsvyYn_NDirSWICkzt2fw,9645 +numpy/_core/_methods.pyi,sha256=7Mc4H9O3KYU9VwCsOo8X0q6mg9vDr2S6xbwuJ7PXPX4,548 +numpy/_core/_multiarray_tests.cp312-win_amd64.lib,sha256=k0-cBmU3U8rRlWwPAaXOkvcuriKjCdMisPPZoPNrOfo,2418 +numpy/_core/_multiarray_tests.cp312-win_amd64.pyd,sha256=o3cSt6DhL7sotcd4c4hhQPejbcfBHQ_7Fzn7B_h5rUs,65536 +numpy/_core/_multiarray_umath.cp312-win_amd64.lib,sha256=rk8jrztqX0mKn-a1sKdrmG4wchiKSMna7I7pfWEtls4,2192 +numpy/_core/_multiarray_umath.cp312-win_amd64.pyd,sha256=9FI8Lf5kGpBGBQKc5KUEY4i9YbGK_AWU00AYtebYSPg,3714560 +numpy/_core/_operand_flag_tests.cp312-win_amd64.lib,sha256=4zuvIWCgAioiVr9WW1V79A5ApVFKwGor2YFXTZkBEgA,2228 +numpy/_core/_operand_flag_tests.cp312-win_amd64.pyd,sha256=fk6XOPVUzP_pis4QYY2WtAeE-m3-ExOl5z-3LcV1xzY,12288 +numpy/_core/_rational_tests.cp312-win_amd64.lib,sha256=-TfsZYLNOb7MjF_uwL9wfwtjV_sw_gvOYZGa_rZtGxs,2156 +numpy/_core/_rational_tests.cp312-win_amd64.pyd,sha256=kiw8oW3VvhyJqrMvWEJyy5qc6jLf9yiyf90XzpvoXdI,39936 +numpy/_core/_simd.cp312-win_amd64.lib,sha256=JWRZmOVnXvW6wdG0uRVpFgFMEFRddVt6QdY_3KcoJuk,1976 +numpy/_core/_simd.cp312-win_amd64.pyd,sha256=X7GFc1lP8lr7VjWaY0nwBuN9-3Hk3iww2tjA-5GVe_U,832000 +numpy/_core/_simd.pyi,sha256=vLr3cmfU5D-UytIPEzQHUwoEOZGzfLI63uwPYN-bvIU,1013 +numpy/_core/_string_helpers.py,sha256=aX1N5UsNeUFy54o5tuTC6X6N9AJueFN2_6QIyIUD2Xg,2945 +numpy/_core/_string_helpers.pyi,sha256=bThH7ichGlrmQ6O38n71QcJ7Oi_mRPUf-DFMU5hdYU0,370 +numpy/_core/_struct_ufunc_tests.cp312-win_amd64.lib,sha256=0MVBvXjaolM30TfYRYYEgaN1ldzfc7HZgvukG3lW4TA,2228 +numpy/_core/_struct_ufunc_tests.cp312-win_amd64.pyd,sha256=m7XafLFmecEwAlRy_7EIJIIaGXLTdaaYdnRa537LBak,14336 +numpy/_core/_type_aliases.py,sha256=U51em1dU7qluFYICpmrygSHLfk-y4UoqD0JJM-psCjE,3886 +numpy/_core/_type_aliases.pyi,sha256=-t_aBa6lQ06Z0wsOzpiE84XW0-2FU-hkA3HSTWlavAk,2306 +numpy/_core/_ufunc_config.py,sha256=xeJy3K7KPV5PpJbkW9ncWY_UnnLHlM9M6rX4C9eNs2k,16126 +numpy/_core/_ufunc_config.pyi,sha256=Fpec58dVzJmrKRSiOsnWZvdS5wuyORMmPkzoOy15YwY,1930 +numpy/_core/_umath_tests.cp312-win_amd64.lib,sha256=2KfxC8AutTLkby3KPfdmEPVQP2244OdVLbtHq7JtR5w,2104 +numpy/_core/_umath_tests.cp312-win_amd64.pyd,sha256=rbSNiCvIbiz7Me3iMW_WmgRlkLZ-XymmJP5EtHeVqgc,33280 +numpy/_core/_umath_tests.pyi,sha256=KwadAqagA7lloXXeOQQp3Gkp0T_HuuT87pI4dz3ttTY,2344 +numpy/_core/arrayprint.py,sha256=lpxDgWjCALcJThUtdKVUVOiM4Vrd4Xn7qaHnquc0tzw,67050 +numpy/_core/arrayprint.pyi,sha256=El-bKIjihbdXa2N05qohoYm_amZG6gwGn1lLsc19f1E,4688 +numpy/_core/cversions.py,sha256=FISv1d4R917Bi5xJjKKy8Lo6AlFkV00WvSoB7l3acA4,360 +numpy/_core/defchararray.py,sha256=4S8fU1YI2vGGUGYxRSr0536JJTO-S-Y9aMYe6BnQipY,39231 +numpy/_core/defchararray.pyi,sha256=qeYhLiloeDFogxBxYd_Ax9DDHQNI8nu2wyp3X2qfLYw,29794 +numpy/_core/einsumfunc.py,sha256=_mRaqmbQeZ3TDW4Y6gLz4nmDE4WfFL9UxKSAJBzGujc,59578 +numpy/_core/einsumfunc.pyi,sha256=j7eufFy3WhOcAGWT9FO8dxHB2kTLJNH6S84MPxtALS8,5109 +numpy/_core/fromnumeric.py,sha256=5T-r7fxkHbdvJiBfWVD6mwI7bbi2E95F0z9XFeBq-CI,146891 +numpy/_core/fromnumeric.pyi,sha256=o6l_ETLBkqmhOq8Ga3QBxWznwpZbfY94fX1UDrJKsXM,46598 +numpy/_core/function_base.py,sha256=zBvtIUZoUVe_sGSPdnDmOXc9GMX232Iqx5FtkMp5e8c,20224 +numpy/_core/function_base.pyi,sha256=Z5DaKdqR3t1sun85xnT2t6OkUsyDSW6gAO8oNBmfEjY,7321 +numpy/_core/getlimits.py,sha256=tX7eDWVY3113HFBztt5JOttUTI31S4-rC0bVXYJRG4M,15465 +numpy/_core/getlimits.pyi,sha256=Ok2rv9SELwkPHbC4qttkIDRpUZbv7ey6LDvCGvUSEeE,3921 +numpy/_core/include/numpy/__multiarray_api.c,sha256=ucLypGZeaaHhl2OX-4YQrrCGTUl-8XwObmmN8zQjRjU,13074 +numpy/_core/include/numpy/__multiarray_api.h,sha256=9OcLvWKEidFXT_YzbInua3Ft8ruCXgUtG-vl9w9WluQ,63371 +numpy/_core/include/numpy/__ufunc_api.c,sha256=7mulmWQI9Hdlx3IjZF-GWhZYZtrFn4I6tCG0Uoo0LCc,1854 +numpy/_core/include/numpy/__ufunc_api.h,sha256=oxLZE6JL-8gWdzf5RA760Gr7KnjqXhMwycaO53cxaRk,13754 +numpy/_core/include/numpy/_neighborhood_iterator_imp.h,sha256=s5TK2aPpClbw4CbVJCij__hzoh5IgHIIZK0k6FKtqfc,1947 +numpy/_core/include/numpy/_numpyconfig.h,sha256=jIeId55dTsklMKBXl8zsicKcReSVSYn7kIFMLOn-qKE,902 +numpy/_core/include/numpy/_public_dtype_api_table.h,sha256=4ylG8s52kZEx__QODt_7Do8QitmhDSvTeZ7Lar0fOgo,4660 +numpy/_core/include/numpy/arrayobject.h,sha256=ghWzloPUkSaVkcsAnBnpbrxtXeXL-mkzVGJQEHFxjnk,211 +numpy/_core/include/numpy/arrayscalars.h,sha256=LKd4F3obZd65HWDmHD6GS9LJ91r2J0GX0VPVPtTBztE,4522 +numpy/_core/include/numpy/dtype_api.h,sha256=7HG7Pn8WOChR0ifvAOmVARSVnql7dqL07MQLWhRHpco,22002 +numpy/_core/include/numpy/halffloat.h,sha256=qYgX5iQfNzXICsnd0MCRq5ELhhfFjlRGm1xXGimQm44,2029 +numpy/_core/include/numpy/ndarrayobject.h,sha256=WWy6pljDNLhzFf0QWNfvH7-jO2afkRbbrtFYSet16v4,12359 +numpy/_core/include/numpy/ndarraytypes.h,sha256=N8FTdkiDL8l04ZriXBDDDEA0ycaPUVkb8WCAtkdLoLk,68964 +numpy/_core/include/numpy/npy_2_compat.h,sha256=VxsRXAtDfLlXkvH-ErZRSuH49k9EjcFwcSUSfTPRzAU,8795 +numpy/_core/include/numpy/npy_2_complexcompat.h,sha256=uW0iF-qMwQNn4PvIfWCrYce6b4OrYUO4BWu-VYYAZag,885 +numpy/_core/include/numpy/npy_3kcompat.h,sha256=0yiCfXGefB938r_IykVf5DaRUJQeYOLvFY__VOF0ezI,10047 +numpy/_core/include/numpy/npy_common.h,sha256=fjuQ8bWFPrfhmrzet2yUQEmAeNwWn5AwgLYTbTMSEZE,33873 +numpy/_core/include/numpy/npy_cpu.h,sha256=G1W2dMD7IbMjCrOmAtEv7nNOH6awBsY0fotNAp3ebaE,4478 +numpy/_core/include/numpy/npy_endian.h,sha256=f1BT0ALH-Gca3cco_CgTm53HMLxmBmo4ZfzVxxLNlKM,2957 +numpy/_core/include/numpy/npy_math.h,sha256=ksdiKBXDfpEHB1s9m5yinyhjdcc0h-zJcfXEuoVHAd8,19460 +numpy/_core/include/numpy/npy_no_deprecated_api.h,sha256=jIcjEP2AbovDTfgE-qtvdP51_dVGjVnEGBX86rlGSKE,698 +numpy/_core/include/numpy/npy_os.h,sha256=j044vd1C1oCcW52r3htiVNhUaJSEqCjKrODwMHq3TU0,1298 +numpy/_core/include/numpy/numpyconfig.h,sha256=1EsdOVVuZb1tH7Yi1UO0q0yyZtuMRqMsnI0gBFhKNLo,7651 +numpy/_core/include/numpy/random/LICENSE.txt,sha256=1UR2FVi1EIZsIffootVxb8p24LmBF-O2uGMU23JE0VA,1039 +numpy/_core/include/numpy/random/bitgen.h,sha256=_H0uXqmnub4PxnJWdMWaNqfpyFDu2KB0skf2wc5vjUc,508 +numpy/_core/include/numpy/random/distributions.h,sha256=GLURa3sFESZE0_0RK-3Gqmfa96itBHw8LlsNyy9EPt4,10070 +numpy/_core/include/numpy/random/libdivide.h,sha256=F9PLx6TcOk-sd0dObe0nWLyz4HhbHv2K7voR_kolpGU,82217 +numpy/_core/include/numpy/ufuncobject.h,sha256=uI5m_WOrFQtaL3BwgRmiZ7BN8CypKXfC5EcQfdhH-Eg,12123 +numpy/_core/include/numpy/utils.h,sha256=vzJAbatJYfxHmX2yL_xBirmB4mEGLOhJ92JlV9s8yPs,1222 +numpy/_core/lib/npy-pkg-config/mlib.ini,sha256=hYWFyoBxE036dh19si8UPka01H2cv64qlc4ZtgoA_7A,156 +numpy/_core/lib/npy-pkg-config/npymath.ini,sha256=e0rdsb00Y93VuammuvIIFlzZtnUAXwsS1XNKlCU8mFQ,381 +numpy/_core/lib/npymath.lib,sha256=zBN3lMslL0WLTnR9FAxEWNb_FxRTNJgoTsstGz-RbNU,156686 +numpy/_core/lib/pkgconfig/numpy.pc,sha256=61kHXIsM928Dw-8UBP-ER_hafUdzMYvkec7wUwEhtw8,198 +numpy/_core/memmap.py,sha256=7HWQGjK5bS3SzAPx4wAlHwH6YFrX13sBpa6E52zxatc,13014 +numpy/_core/memmap.pyi,sha256=n0kBe4iQD5lcWvAvVhdUU18YIoPX6Sf5e2qh9IdO5uQ,50 +numpy/_core/multiarray.py,sha256=fiWmU-pL0WOeS7TT_flBueqKmz08gibdrBFCoanSSoE,58526 +numpy/_core/multiarray.pyi,sha256=JPMPttIq_yU0K1QgJ6r3mQwUWJzCcvoXqiTcSUWZ46k,36157 +numpy/_core/numeric.py,sha256=f36Y3XitfH2ga7zH2EZ4gctI1TywvTYoJyqtFepXpNA,85809 +numpy/_core/numeric.pyi,sha256=TwBDlYXpbMmBHv-uAIzagUCnTZdaxfKHLU-OblqprBw,32989 +numpy/_core/numerictypes.py,sha256=sBii4N4PX66DFZJ_QlDepfugXHToV4MqVTl9DrkTCko,16600 +numpy/_core/numerictypes.pyi,sha256=BuVBOeILfBBRLLVinSA_mCVqYtgoaM4ukXK2MOr4xVQ,3699 +numpy/_core/overrides.py,sha256=7xQXbKNNTDqDFR3cL6vcBbgczarMVdOiKjWfHM5zS4A,7668 +numpy/_core/overrides.pyi,sha256=yiWM44pF9yPJn3-VOebjxtwU0W9PyHV0QxCA2lkDrMQ,1777 +numpy/_core/printoptions.py,sha256=ZXekBr6fI18dVxsM6bxAGi80CiMlaMN4dpbPHDQiBOI,1088 +numpy/_core/printoptions.pyi,sha256=QE36MVL3BgqflyQuj6UOzywbnELMiLeyNz_1sALvOSU,622 +numpy/_core/records.py,sha256=AOi0UTbYqHe8U5AntKvc967O8hmtVw8pSqiahfJhtI8,37842 +numpy/_core/records.pyi,sha256=ecWlyL8cskXhQihOa2iyNNANhqYLhACgf2u7UaINjRk,9515 +numpy/_core/shape_base.py,sha256=oy42iPojaLjQjsQy1c35Xl72w0VtLZCbtipZlb8XGoo,33708 +numpy/_core/shape_base.pyi,sha256=_k-bDbnUgvQTYBn1I_Y75sUz23B-N3nhw07obXhbmp4,5437 +numpy/_core/strings.py,sha256=CJ6R0-LYceFfxuaOLhZL1tZwWvYdlaoaT3f0qLXpj1o,52391 +numpy/_core/strings.pyi,sha256=d3MRp3JMOqg7qiMNM5E3lJ6qWZmpR9m-h8DCNsrf0KI,14127 +numpy/_core/tests/__pycache__/_locales.cpython-312.pyc,, +numpy/_core/tests/__pycache__/_natype.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test__exceptions.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_abc.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_api.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_argparse.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_array_api_info.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_array_coercion.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_array_interface.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_arraymethod.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_arrayobject.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_arrayprint.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_casting_floatingpoint_errors.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_casting_unittests.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_conversion_utils.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_cpu_dispatcher.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_cpu_features.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_custom_dtypes.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_cython.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_datetime.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_defchararray.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_deprecations.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_dlpack.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_dtype.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_einsum.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_errstate.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_extint128.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_finfo.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_function_base.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_getlimits.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_half.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_hashtable.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_indexerrors.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_indexing.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_item_selection.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_limited_api.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_longdouble.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_mem_overlap.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_mem_policy.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_memmap.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_multiarray.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_multiprocessing.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_multithreading.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_nditer.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_nep50_promotions.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_numeric.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_numerictypes.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_overrides.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_print.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_protocols.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_records.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_regression.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_scalar_ctors.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_scalar_methods.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_scalarbuffer.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_scalarinherit.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_scalarmath.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_scalarprint.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_shape_base.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_simd.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_simd_module.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_stringdtype.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_strings.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_ufunc.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_umath.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_umath_accuracy.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_umath_complex.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_unicode.cpython-312.pyc,, +numpy/_core/tests/_locales.py,sha256=byq7PFI0o_eF8Ddsvgj2EQ7oEjgxYZEa2EW0SJmR_xc,2248 +numpy/_core/tests/_natype.py,sha256=ncMM01bhYe4KxM62PTwIaH6Xpze6D9mSVFZSQqqaZzQ,4531 +numpy/_core/tests/data/astype_copy.pkl,sha256=lWSzCcvzRB_wpuRGj92spGIw-rNPFcd9hwJaRVvfWdk,716 +numpy/_core/tests/data/generate_umath_validation_data.cpp,sha256=9TBdxpPo0djv1CKxQ6_DbGKRxIZVawitAm7AMmWKroI,6012 +numpy/_core/tests/data/recarray_from_file.fits,sha256=NA0kliz31FlLnYxv3ppzeruONqNYkuEvts5wzXEeIc4,8640 +numpy/_core/tests/data/umath-validation-set-README.txt,sha256=GfrkmU_wTjpLkOftWDuGayEDdV3RPpN2GRVQX61VgWI,982 +numpy/_core/tests/data/umath-validation-set-arccos.csv,sha256=VUdQdKBFrpXHLlPtX2WYIK_uwkaXgky85CZ4aNuvmD4,62794 +numpy/_core/tests/data/umath-validation-set-arccosh.csv,sha256=tbuOQkvnYxSyJf_alGk3Zw3Vyv0HO5dMC1hUle2hWwQ,62794 +numpy/_core/tests/data/umath-validation-set-arcsin.csv,sha256=JPEWWMxgPKdNprDq0pH5QhJ2oiVCzuDbK-3WhTKny8o,62768 +numpy/_core/tests/data/umath-validation-set-arcsinh.csv,sha256=fwuq25xeS57kBExBuSNfewgHb-mgoR9wUGVqcOXbfoI,61718 +numpy/_core/tests/data/umath-validation-set-arctan.csv,sha256=nu33YyL-ALXSSF5cupCTaf_jTPLK_QyUfciNQGpffkY,61734 +numpy/_core/tests/data/umath-validation-set-arctanh.csv,sha256=wHSKFY2Yvbv3fnmmfLqPYpjhkEM88YHkFVpZQioyBDw,62768 +numpy/_core/tests/data/umath-validation-set-cbrt.csv,sha256=FFi_XxEnGrfJd7OxtjVFT6WFC2tUqKhVV8fmQfb0z8o,62275 +numpy/_core/tests/data/umath-validation-set-cos.csv,sha256=ccDri5_jQ84D_kAmSwZ_ztNUPIhzhgycDtNsPB7m8dc,60497 +numpy/_core/tests/data/umath-validation-set-cosh.csv,sha256=DnN6RGvKQHAWIofchmhGH7kkJej2VtNwGGMRZGzBkTQ,62298 +numpy/_core/tests/data/umath-validation-set-exp.csv,sha256=mPhjF4KLe0bdwx38SJiNipD24ntLI_5aWc8h-V0UMgM,17903 +numpy/_core/tests/data/umath-validation-set-exp2.csv,sha256=sD94pK2EAZAyD2fDEocfw1oXNw1qTlW1TBwRlcpbcsI,60053 +numpy/_core/tests/data/umath-validation-set-expm1.csv,sha256=tyfZN5D8tlm7APgxCIPyuy774AZHytMOB59H9KewxEs,61728 +numpy/_core/tests/data/umath-validation-set-log.csv,sha256=CDPky64PjaURWhqkHxkLElmMiI21v5ugGGyzhdfUbnI,11963 +numpy/_core/tests/data/umath-validation-set-log10.csv,sha256=dW6FPEBlRx2pcS-7eui_GtqTpXzOy147il55qdP-8Ak,70551 +numpy/_core/tests/data/umath-validation-set-log1p.csv,sha256=2aEsHVcvRym-4535CkvJTsmHywkt01ZMfmjl-d4fvVI,61732 +numpy/_core/tests/data/umath-validation-set-log2.csv,sha256=aVZ7VMQ5urGOx5MMMOUmMKBhFLFE-U7y6DVCTeXQfo0,70546 +numpy/_core/tests/data/umath-validation-set-sin.csv,sha256=GvPrQUEYMX1iB2zjbfK26JUJOxtqbfiRUgXuAO1QcP0,59981 +numpy/_core/tests/data/umath-validation-set-sinh.csv,sha256=lc7OYcYWWpkxbMuRAWmogQ5cKi7EwsQ2ibiMdpJWYbw,61722 +numpy/_core/tests/data/umath-validation-set-tan.csv,sha256=fn7Dr9s6rcqGUzsmyJxve_Z18J4AUaSm-uo2N3N_hfk,61728 +numpy/_core/tests/data/umath-validation-set-tanh.csv,sha256=xSY5fgfeBXN6fal4XDed-VUcgFIy9qKOosa7vQ5v1-U,61728 +numpy/_core/tests/examples/cython/__pycache__/setup.cpython-312.pyc,, +numpy/_core/tests/examples/cython/checks.pyx,sha256=aK7SQ35m2pWVaf_C8c06fn8Onj7YVzcpR--gDezu_Wk,11324 +numpy/_core/tests/examples/cython/meson.build,sha256=EaUdTgpleUBROExDaFVMnWIYW4XDxFLFGK9ej_pTtQg,1311 +numpy/_core/tests/examples/cython/setup.py,sha256=h5vJxfwGpwRWaa7iWTYeCstbcDNHN0Yd_rP963v7sZ0,898 +numpy/_core/tests/examples/limited_api/__pycache__/setup.cpython-312.pyc,, +numpy/_core/tests/examples/limited_api/limited_api1.c,sha256=93ZjLJTtIfXUE7sc2QuEohjHzNSxapzZhgFXtTn8Yyg,326 +numpy/_core/tests/examples/limited_api/limited_api2.pyx,sha256=4P5-yu0yr8NBa-TFtw4v30LGjccRroRAQFFLaztEK9I,214 +numpy/_core/tests/examples/limited_api/limited_api_latest.c,sha256=YRgkeYJEtIfijcJwRqyz97ItrkUwOoiyrKy90WTkQW4,471 +numpy/_core/tests/examples/limited_api/meson.build,sha256=Sin_YDuMzgpDW8n2_WWJ5EYgRQg7FPs7JiJPaUEbFqw,1725 +numpy/_core/tests/examples/limited_api/setup.py,sha256=47iWsN-5wYB29Lb7vqSjzrAS3UtkdFufkt93XzzG-lE,461 +numpy/_core/tests/test__exceptions.py,sha256=ov3cdaYBfP28w_FcLF57ROlF5w6fwCFRNcmOVyRA-IU,3012 +numpy/_core/tests/test_abc.py,sha256=qdC7_lkQvaF_3A4xJ9H_Ih3FDlMpA9dxQHjsg4Tn-uc,2275 +numpy/_core/tests/test_api.py,sha256=mFEuLfhn2f0piUdgc-MvYw6jKKQJLdrkaN1tPZBbAUE,24915 +numpy/_core/tests/test_argparse.py,sha256=vPctuxToPkZMlbgjnzE924XkxXYUdBxlR6LsP2_-aQM,2914 +numpy/_core/tests/test_array_api_info.py,sha256=YySxzABrxjo2XVC9bwslv5VGBIiDK5N0DXpKLfhwBio,3176 +numpy/_core/tests/test_array_coercion.py,sha256=tjGrdd1RGUC9vyvu9SST6L06EubVwAoEPJD40CbtL1c,36319 +numpy/_core/tests/test_array_interface.py,sha256=s-mrGDOBpWOfIShNHrnfPuUeZDTBX5eD8R1kY4-JrUc,8065 +numpy/_core/tests/test_arraymethod.py,sha256=piiJcgPMH7cx15UykJyj_WVnzH51wIyxQIyftLtsmHE,3307 +numpy/_core/tests/test_arrayobject.py,sha256=xM3NSqoUuPJYGf_1v7DG0_g-1JCeq7KnevbIllgSbi8,3431 +numpy/_core/tests/test_arrayprint.py,sha256=UGJplHWOJaaZb6bYeKgK0akATbJP9l6H2NDvtLwb3kA,51898 +numpy/_core/tests/test_casting_floatingpoint_errors.py,sha256=fMotyIWxYMxJ_mF7zZMg3j3l7j-C6nfm_YUPw1ln5dA,5230 +numpy/_core/tests/test_casting_unittests.py,sha256=BUtpp1cJcfH-vfdicDThvcEUUtXpadhk6ulUvoVm0Aw,41899 +numpy/_core/tests/test_conversion_utils.py,sha256=Kh56ducSAax3n8E9cXQ66GvE1ZXZ_pkWTpQy0jTEwAk,6715 +numpy/_core/tests/test_cpu_dispatcher.py,sha256=oS8EAcRN88tQJ8DhSnwcLNNotmxbfMY-xrzntwcKFxw,1619 +numpy/_core/tests/test_cpu_features.py,sha256=-EjCEVnOKGEXRJMcNlTwH9PfGZH5IhEor6kBzJ0plwY,17422 +numpy/_core/tests/test_custom_dtypes.py,sha256=QBlykvsEq9Wns38ey1gRHn0uaJjwsp2Cfm8FKiQiRHY,14889 +numpy/_core/tests/test_cython.py,sha256=_HkOSg0ZZva900rGpqOQueHPGZYlViblavjBfg9j3Sg,11020 +numpy/_core/tests/test_datetime.py,sha256=yOjI6ZCUrH1ZynGipzoHoNR7roETnRrIDp9eIbjEU_w,127760 +numpy/_core/tests/test_defchararray.py,sha256=bADzj5CgBanE83hUp9-bPgAIsD6X9nVP59M8koH_4nQ,31541 +numpy/_core/tests/test_deprecations.py,sha256=2c7iQRBI7123mHRy-9BkgNwtW57gq9Ooy5JtUfHdeG4,17940 +numpy/_core/tests/test_dlpack.py,sha256=YJu603N6gqqraJx8f5xzArbmYVFPFDJ32DZyYom2q5c,6021 +numpy/_core/tests/test_dtype.py,sha256=4TKjus3b0tRykFwp36QF3gzagffeyi6KKZJ7N_KS4ig,85332 +numpy/_core/tests/test_einsum.py,sha256=4XOd0yyzDKW4bh5bkFGT5QD2Qs5S_EgDgq0XHyDf4qc,59244 +numpy/_core/tests/test_errstate.py,sha256=UbPwl97JxUu050tMPJN2NTyqFkUaNpRKwqex34DxJ1E,4759 +numpy/_core/tests/test_extint128.py,sha256=pKScJ8lsYGfvX4rMTAf9bwoTzLYkK1hnLZmfF1jKAo4,5842 +numpy/_core/tests/test_finfo.py,sha256=Py3BfM_eaxrbYg0bhesDMT6LtiaGfCOjlXcTZ_CnvSs,2574 +numpy/_core/tests/test_function_base.py,sha256=NuclhwR2CVPl_bnKrFOPAksM7ssK9dmaFKaGrzyGqQs,18187 +numpy/_core/tests/test_getlimits.py,sha256=PV40xaQ1thmbtePVWRpYAvFrkvkS6RWi38cB-gbYxOE,5624 +numpy/_core/tests/test_half.py,sha256=SLfewT9i3igCumuVhWpRPT9YqyBYBYSMQ0HgXNuPKnY,25853 +numpy/_core/tests/test_hashtable.py,sha256=SDZHeVow_7hEus0A0hpG-yYorz6Z4jA734xJh2YP6SA,1184 +numpy/_core/tests/test_indexerrors.py,sha256=lQZFzPModGwJDh8kPxU_F1CJAhFJXAVZok31V6JkJyo,4835 +numpy/_core/tests/test_indexing.py,sha256=WRXgPDF4_20vmOi5xqV8U5HXjTghRTeadof0Zpm9Ylw,66315 +numpy/_core/tests/test_item_selection.py,sha256=erSTKqbX9C5i9EJeTI4tIenDW4vlo1PO-wqFRKtBB68,6798 +numpy/_core/tests/test_limited_api.py,sha256=bN6sU8V2vtFOxwhf3n0EtEXJuT5ifw2i6S2DHTDJHMQ,3565 +numpy/_core/tests/test_longdouble.py,sha256=JZ3s4NRsteMn5tW3nG8rPU4XKGlJ50Udu2mkRh0omAg,12824 +numpy/_core/tests/test_mem_overlap.py,sha256=-EocVf7okOS3d3khQz12zm1fRxsnfkXQBdJ2y5lZz8w,30250 +numpy/_core/tests/test_mem_policy.py,sha256=vAjCbfroLALRl2hd-5iqNbCiJbRdwEriKKEB_umWg5M,17301 +numpy/_core/tests/test_memmap.py,sha256=QSo4Z23N-GGkIVXqa8TXOVsCpY-vS8-Aei4wEQiELV8,8546 +numpy/_core/tests/test_multiarray.py,sha256=XoDfL8CmW9G4Btns6783UdY14v7-i5mSg0E2e3dyO5Q,431981 +numpy/_core/tests/test_multiprocessing.py,sha256=fxmHyEXpaYp2f-QEKFSnx5wBJqn3aH0dy_OHNcVb5-s,2089 +numpy/_core/tests/test_multithreading.py,sha256=kT3i0zCxPm7r3RHcfykhm2dKo4PqexVCYkCfEV8Pzyg,12498 +numpy/_core/tests/test_nditer.py,sha256=6jzzlDHpN0za_lCN9ghG8rB14YSWoBPwwlXYjmqYNaI,141372 +numpy/_core/tests/test_nep50_promotions.py,sha256=sw60eH2T2EBrpO0VeulZrHpnRCz6wE0MiA4U9B3_MFQ,10355 +numpy/_core/tests/test_numeric.py,sha256=VCpaYp6i1GdubO_mHtavO6ofEMjMJjZVnyKf5xbaTmM,164905 +numpy/_core/tests/test_numerictypes.py,sha256=fSNA-EhkNQwJU33c4wEBAEgV4rLeibDTdXK0bZG0FOg,24824 +numpy/_core/tests/test_overrides.py,sha256=UDdhmwueEuHjBJQf6NvJ7o8K5A2JRq92a5XG2LQaiEY,28577 +numpy/_core/tests/test_print.py,sha256=UPdSzvGrdCZnB8NoBkY9gKXOkr4NZsUjfaypNgm3iA4,7117 +numpy/_core/tests/test_protocols.py,sha256=b1clvp3Rr7EQ6x8Mtxm9jAiHxPymEU_VJBjwnMingUU,1219 +numpy/_core/tests/test_records.py,sha256=3_z_6E-TKMowzkXujZG0JyxkeiH471GMHWOQeheVm1g,21156 +numpy/_core/tests/test_regression.py,sha256=kmmkC60tAsF1tsl-TAGWm9bHoczfMHQpQ6ooNs5HBXg,98849 +numpy/_core/tests/test_scalar_ctors.py,sha256=RZBTUxU_z4rg7q2fMcRce_VYqjUgcBvL7SF3txjtrzc,6896 +numpy/_core/tests/test_scalar_methods.py,sha256=lJyS9hUf_99haU6K0FKeADApcIdD8iP6GALWb2ECIF8,12839 +numpy/_core/tests/test_scalarbuffer.py,sha256=FkcZR3rDC5tEYhknI_pMhVs-G9jQXcx7hu1WZCR6kvE,5787 +numpy/_core/tests/test_scalarinherit.py,sha256=WVjRrpNkKvQbO2n-joJ7EF6lG1zFcT4pgANAtvT7C5M,2692 +numpy/_core/tests/test_scalarmath.py,sha256=Q4vg6st6pVkDxGGwfJ85d0KFDKfcwzq2rZedow67dHQ,47385 +numpy/_core/tests/test_scalarprint.py,sha256=aDQz8rz3KXQ5EBiaIaxmvHpLaIUkZeI-8PoTqZ4kvyM,20108 +numpy/_core/tests/test_shape_base.py,sha256=CkSHU_VkczeAvIOHSpXt-YQzsBmVt5HnnQVG7i8qFi8,32771 +numpy/_core/tests/test_simd.py,sha256=85MWw13IIWQ88SI0I61gyQar94AaLTfJTvxyFFOxrVs,50239 +numpy/_core/tests/test_simd_module.py,sha256=ml8Wu6vrAwx7Qbs_nft2eR0o3TunJuNkETtVJ271mT4,4055 +numpy/_core/tests/test_stringdtype.py,sha256=jgjl_8BBUjcGCVve2NjOUYbWo7rlKp3uUKfWRukwO6o,60358 +numpy/_core/tests/test_strings.py,sha256=Id52CzqaLS3fF5A5fFbaKNept1FeipXZ92pwcuIRMEM,61679 +numpy/_core/tests/test_ufunc.py,sha256=yBYaC3RnPRPCBOUEEw6wOETcfE8r8FU5RI5g45dsCfU,143272 +numpy/_core/tests/test_umath.py,sha256=pOHP5qxLiSSJ2VhCFJ7Jm58DJ2IEFXpfuE4r8LBetbs,201458 +numpy/_core/tests/test_umath_accuracy.py,sha256=ZCqjVUPFdn_WaKDBwmND_aZZFzdPWomf6_Yui6BV_tk,5953 +numpy/_core/tests/test_umath_complex.py,sha256=CKzN_RQ8o9LPK7Ax9WaF-1kQpSZj8Lg9BGN45GrEMv4,24242 +numpy/_core/tests/test_unicode.py,sha256=6_hND3jRcv1MkmOpjKxZSG-uBKS3WvjUz3rozgimq8U,13356 +numpy/_core/umath.py,sha256=NWmvoyWQXFuCOCQi3rOXYAlNcIfhSiKm4WNzxCCn9fc,2200 +numpy/_core/umath.pyi,sha256=dTF_yXVG0t6Sw-q7oa2otpZV8E0LweJ9Ru6jF-ENQ7M,3948 +numpy/_distributor_init.py,sha256=h5_Cq7ItDrt1JZoAh04aO54ZXsXRRkyGoNFFH9T-08U,436 +numpy/_distributor_init.pyi,sha256=CSrbSp2YYxHTxlX7R0nT3RpH7EloB1wIvo7YOA7QWy8,28 +numpy/_expired_attrs_2_0.py,sha256=Qjxzbnge_WAK5nO2glybK2Bv9gNulmxGofjbGwLLkw8,3849 +numpy/_expired_attrs_2_0.pyi,sha256=V7NCR-ik42HqU4sRKPbPOyFNYoFwREWyw6MLxh28p9Y,1300 +numpy/_globals.py,sha256=QC41LPui5xIF0vbXrBtrBlxJV8JKvxZsCc3d9yPTmLY,4276 +numpy/_globals.pyi,sha256=kst3Vm7ZbznOtHsPya0PzU0KbjRGZ8xhMmTNMafvT-4,297 +numpy/_pyinstaller/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/_pyinstaller/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/_pyinstaller/__pycache__/__init__.cpython-312.pyc,, +numpy/_pyinstaller/__pycache__/hook-numpy.cpython-312.pyc,, +numpy/_pyinstaller/hook-numpy.py,sha256=bJTm7LIuDHC5QyGTqUWE48gYsRcolKm3naQXoE1o_C4,1398 +numpy/_pyinstaller/hook-numpy.pyi,sha256=28DtDC-8ixBS_9WXTKnyzC1o_7K92wUk8_UfN7WBd78,156 +numpy/_pyinstaller/tests/__init__.py,sha256=l38bo7dpp3u1lVMPErlct_5uBLKj35zuS_r35e7c19c,345 +numpy/_pyinstaller/tests/__pycache__/__init__.cpython-312.pyc,, +numpy/_pyinstaller/tests/__pycache__/pyinstaller-smoke.cpython-312.pyc,, +numpy/_pyinstaller/tests/__pycache__/test_pyinstaller.cpython-312.pyc,, +numpy/_pyinstaller/tests/pyinstaller-smoke.py,sha256=xt3dl_DjxuzVTPrqmVmMOZm5-24wBG2TxldQl78Xt1g,1175 +numpy/_pyinstaller/tests/test_pyinstaller.py,sha256=31zWlvlAC2sfhdew97x8aDvcYUaV3Tc_0CwFk8pgKaM,1170 +numpy/_pytesttester.py,sha256=USOh37bWhXAzQLeGD1U63XpDnbkatfr-aiQu-gHkdBA,6529 +numpy/_pytesttester.pyi,sha256=Rbl_BWY754H8JGfNwexVusncuX-3XOQv2PdW0Ua-FxI,521 +numpy/_typing/__init__.py,sha256=KACZs_8l8cNG4TolvZkqv5YKLA7EyG6om9ct4fsXppg,5639 +numpy/_typing/__pycache__/__init__.cpython-312.pyc,, +numpy/_typing/__pycache__/_add_docstring.cpython-312.pyc,, +numpy/_typing/__pycache__/_array_like.cpython-312.pyc,, +numpy/_typing/__pycache__/_char_codes.cpython-312.pyc,, +numpy/_typing/__pycache__/_dtype_like.cpython-312.pyc,, +numpy/_typing/__pycache__/_extended_precision.cpython-312.pyc,, +numpy/_typing/__pycache__/_nbit.cpython-312.pyc,, +numpy/_typing/__pycache__/_nbit_base.cpython-312.pyc,, +numpy/_typing/__pycache__/_nested_sequence.cpython-312.pyc,, +numpy/_typing/__pycache__/_scalars.cpython-312.pyc,, +numpy/_typing/__pycache__/_shape.cpython-312.pyc,, +numpy/_typing/__pycache__/_ufunc.cpython-312.pyc,, +numpy/_typing/_add_docstring.py,sha256=Oje462jvQMs5dDxRFWrDiKdK08-5sU-b6WKoSRAg2B4,4152 +numpy/_typing/_array_like.py,sha256=N-e4p17RNfe7H3jnpcX0cUXur4AIcjiTi95baW-0EZY,4294 +numpy/_typing/_char_codes.py,sha256=VZvjzpRG1Ehf2frndiRLLbPRa59A6FocdwGwjHEOorM,8977 +numpy/_typing/_dtype_like.py,sha256=HCEugkIDjsGQBYhk6QfnFtRtD-2ZigCSKeSm_7Z9cz0,3978 +numpy/_typing/_extended_precision.py,sha256=3jaNHY4qJwWODLFWvlfUQROLblfqqFDjOlp8bHnhMBI,449 +numpy/_typing/_nbit.py,sha256=pjOpz0sIdhphsXMK0dCQeQWXsrDpxCVZrYJ1wmALf04,651 +numpy/_typing/_nbit_base.py,sha256=PnQt_VbBKX_Uj17g_0yfoUqwh0bnN3LEyFHgR6GzNaw,3152 +numpy/_typing/_nbit_base.pyi,sha256=-9bQ2dXrhwqyROh5WXWM1APeDzi8QlwjJEAox-sriOU,778 +numpy/_typing/_nested_sequence.py,sha256=gZZRCnko04ZbsGaLbAx9VSsvKPR7UuwuzRAxwd1FYX0,2584 +numpy/_typing/_scalars.py,sha256=rTil_dSaoBGmmGD9QQZ0NqEP2BeZtkOEK9ZayDMB-l0,964 +numpy/_typing/_shape.py,sha256=5csdB-yj390thRrWPnwU7LcVfq-wYnd8QvXyuGdjAX4,283 +numpy/_typing/_ufunc.py,sha256=lok5QhQ5aJBARpyVoffrbeuEJsJ5vA6DaJ4aHTeUhms,163 +numpy/_typing/_ufunc.pyi,sha256=KwQANghYtbDNTZT7QvVj_aW99FgEfnpxhb5zU__W0Nk,30596 +numpy/_utils/__init__.py,sha256=q3vMrxeBeeU9pvCvLOkodDgzZS5V1jeI1_UZd4BbzDU,3572 +numpy/_utils/__init__.pyi,sha256=j1KKiEZW0MOWI3lwRzyIr-pHVqjJWkjb-NlwQok8838,728 +numpy/_utils/__pycache__/__init__.cpython-312.pyc,, +numpy/_utils/__pycache__/_convertions.cpython-312.pyc,, +numpy/_utils/__pycache__/_inspect.cpython-312.pyc,, +numpy/_utils/__pycache__/_pep440.cpython-312.pyc,, +numpy/_utils/_convertions.py,sha256=vetZFqC1qB-Z9jvc7RKuU_5ETOaSbjhbKa-sVwYV8TU,347 +numpy/_utils/_convertions.pyi,sha256=zkZfkdBk6-XcyD3zmr7E5sJbYasvyDCInUtWvrtjVhY,122 +numpy/_utils/_inspect.py,sha256=fpHbL1Gx7flw4HHjnNHNN-v8NKx1WgFBWgnX8T5hliY,7628 +numpy/_utils/_inspect.pyi,sha256=JLkhqPtHYvfBg5CN0VfGhO0u3ilzZGBThIfwxbk8YrI,2324 +numpy/_utils/_pep440.py,sha256=MZ5ZR1-o_4kA-68YcdUfkHkqUf3wRcKxQm08uv2GoE8,14474 +numpy/_utils/_pep440.pyi,sha256=cFEepudci4bYPJbIVshObOtflKvxGJVISkfemy8FPz8,3964 +numpy/char/__init__.py,sha256=KAKgke3wwjmEwxfiwkEXehe17DoN1OR_vkLBA9WFaGs,95 +numpy/char/__init__.pyi,sha256=XN-Twg_XKK4bMmir1UZ4nCtlW7nOezcU5Ix4N7X4OhQ,1651 +numpy/char/__pycache__/__init__.cpython-312.pyc,, +numpy/conftest.py,sha256=XdNfW6RkzEfbsZzdGxheAxmiAlH84kBueZKTN75WR7c,8900 +numpy/core/__init__.py,sha256=mCDTG1UnW38pcRG0sikf7oE2oP4MpO86ndHjquhL85U,1323 +numpy/core/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/core/__pycache__/__init__.cpython-312.pyc,, +numpy/core/__pycache__/_dtype.cpython-312.pyc,, +numpy/core/__pycache__/_dtype_ctypes.cpython-312.pyc,, +numpy/core/__pycache__/_internal.cpython-312.pyc,, +numpy/core/__pycache__/_multiarray_umath.cpython-312.pyc,, +numpy/core/__pycache__/_utils.cpython-312.pyc,, +numpy/core/__pycache__/arrayprint.cpython-312.pyc,, +numpy/core/__pycache__/defchararray.cpython-312.pyc,, +numpy/core/__pycache__/einsumfunc.cpython-312.pyc,, +numpy/core/__pycache__/fromnumeric.cpython-312.pyc,, +numpy/core/__pycache__/function_base.cpython-312.pyc,, +numpy/core/__pycache__/getlimits.cpython-312.pyc,, +numpy/core/__pycache__/multiarray.cpython-312.pyc,, +numpy/core/__pycache__/numeric.cpython-312.pyc,, +numpy/core/__pycache__/numerictypes.cpython-312.pyc,, +numpy/core/__pycache__/overrides.cpython-312.pyc,, +numpy/core/__pycache__/records.cpython-312.pyc,, +numpy/core/__pycache__/shape_base.cpython-312.pyc,, +numpy/core/__pycache__/umath.cpython-312.pyc,, +numpy/core/_dtype.py,sha256=BW-GFvu8BQiN-j6-3mESSWN3IQv9w8wNa3N53lisryI,333 +numpy/core/_dtype.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/core/_dtype_ctypes.py,sha256=pwXec_vp-L06nnzFO66mwjBuPpJPhICecnyfvW2yEMg,361 +numpy/core/_dtype_ctypes.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/core/_internal.py,sha256=i8Uf68tmcvQEmYoRWF3YqnJGCWI3GxZZEAecy5euNqg,976 +numpy/core/_multiarray_umath.py,sha256=zstXKBlwOv7q3YjVdb-zn4ypnyyI6fAGc6ahkEBav-g,2155 +numpy/core/_utils.py,sha256=dAaZtXVWhOEFiwmVsz8Mn77HsynMDKhZ7HkrjD1Q3vc,944 +numpy/core/arrayprint.py,sha256=zsOt7vFu-b1_7rPlKr7iGh12n4CmwzmU_fWV2CedPi4,349 +numpy/core/defchararray.py,sha256=4JjDjl62Abk7fNp-HZeuNSQUNNKJddNro6mD6ef4gq8,357 +numpy/core/einsumfunc.py,sha256=cW79vhPJJsi2oD-rXc_w5EKq4hlRo1YlyqI3ZnmsMx8,349 +numpy/core/fromnumeric.py,sha256=HD3e5PrYjtMOYQCMXTGZlgkikwvYWc1XRuNT5xhSxVg,353 +numpy/core/function_base.py,sha256=z5aEiXHQ4AAkHfGLQ8ul_hvjCWL2lDkgPLlWJE_4w-M,361 +numpy/core/getlimits.py,sha256=Tut0lg_HyjJXHSfB3c0Z0DvOR7amxUksqI_1MpOlXAo,345 +numpy/core/multiarray.py,sha256=nN54eP9dzhnY4oNoVBN2q7yDjF1w7PbMQx4a7oMqTVI,818 +numpy/core/numeric.py,sha256=Qev9oaDAGdyblrDjVXBzTY069E8Z-R3PIOnDp3K6XRY,372 +numpy/core/numerictypes.py,sha256=CHNOCimC3CarkejHOm-rV7b7bmykIlJAAhXj923pKq0,357 +numpy/core/overrides.py,sha256=wETB95vH9MSwFC3rg3GAUGozKJbCuKdVMhS_zC5baUw,345 +numpy/core/overrides.pyi,sha256=HScieJk23k4Lk14q8u9CEc3ZEVOQ6hGu_FeWDR2Tyu8,532 +numpy/core/records.py,sha256=xWh78TWkPZxZx5VY05Jia8-y1HyIzkEvmrF8BiKBb68,337 +numpy/core/shape_base.py,sha256=BWl-Of1Gl8nr0eBguDIdKbS5h9OdmO-VZPUQOe2e62Y,349 +numpy/core/umath.py,sha256=XggXI2bTIR9O4U2vyfgoja0kKI2q7sF3dMCWmukWIqQ,329 +numpy/ctypeslib/__init__.py,sha256=o9oMM6-vOwS4PVageFyXsh6x23hQtcsemoAVVR3kuHw,206 +numpy/ctypeslib/__init__.pyi,sha256=GTndWDhLTrUX0PBarv8JhXsXJXDPghNd6tZL3nd1ZYY,382 +numpy/ctypeslib/__pycache__/__init__.cpython-312.pyc,, +numpy/ctypeslib/__pycache__/_ctypeslib.cpython-312.pyc,, +numpy/ctypeslib/_ctypeslib.py,sha256=ry2JWVuqOh8IjE_m_UIyQoOpWkYoDue2xZAa3Mz8zdc,19682 +numpy/ctypeslib/_ctypeslib.pyi,sha256=7WyPfrcJRmdXKjiArSGGkJ9d3PcYJTi87CXgtes7ARU,8531 +numpy/doc/__pycache__/ufuncs.cpython-312.pyc,, +numpy/doc/ufuncs.py,sha256=jMnfQhRknVIhgFVS9z2l5oYM8N1tuQtf5bXMBL449oI,5552 +numpy/dtypes.py,sha256=cPkS6BLRvpfsUzhd7Vk1L7_VcenWb1nuHuCxc9fYC4I,1353 +numpy/dtypes.pyi,sha256=FqSJWLCA-I3MV8OHMluYNSLG6r1doOr6gFcOsYeFVxk,16159 +numpy/exceptions.py,sha256=HgT_ErZLiTfceCv0y2dYoestDpKt2IR66MHu-LAVpNI,7955 +numpy/exceptions.pyi,sha256=EdR0sub_Tjf2-7aPz_4VUGkLqc1w2MIDQjywR68x9C4,821 +numpy/f2py/__init__.py,sha256=1sHuSvD-wFPLK6vD_pjY527X4KP8Jlz3bsbqY2ImAaI,2534 +numpy/f2py/__init__.pyi,sha256=PDHjLyKbEWWTjnWcBuO0A6yVpG5lkMcyNPbwNl-R6IQ,118 +numpy/f2py/__main__.py,sha256=TDesy_2fDX-g27uJt4yXIXWzSor138R2t2V7HFHwqAk,135 +numpy/f2py/__pycache__/__init__.cpython-312.pyc,, +numpy/f2py/__pycache__/__main__.cpython-312.pyc,, +numpy/f2py/__pycache__/__version__.cpython-312.pyc,, +numpy/f2py/__pycache__/_isocbind.cpython-312.pyc,, +numpy/f2py/__pycache__/_src_pyf.cpython-312.pyc,, +numpy/f2py/__pycache__/auxfuncs.cpython-312.pyc,, +numpy/f2py/__pycache__/capi_maps.cpython-312.pyc,, +numpy/f2py/__pycache__/cb_rules.cpython-312.pyc,, +numpy/f2py/__pycache__/cfuncs.cpython-312.pyc,, +numpy/f2py/__pycache__/common_rules.cpython-312.pyc,, +numpy/f2py/__pycache__/crackfortran.cpython-312.pyc,, +numpy/f2py/__pycache__/diagnose.cpython-312.pyc,, +numpy/f2py/__pycache__/f2py2e.cpython-312.pyc,, +numpy/f2py/__pycache__/f90mod_rules.cpython-312.pyc,, +numpy/f2py/__pycache__/func2subr.cpython-312.pyc,, +numpy/f2py/__pycache__/rules.cpython-312.pyc,, +numpy/f2py/__pycache__/symbolic.cpython-312.pyc,, +numpy/f2py/__pycache__/use_rules.cpython-312.pyc,, +numpy/f2py/__version__.py,sha256=u3yEZEhZzW9QwLBqzFEO-zZDqsECiHs3ixdOlRnv9Jo,49 +numpy/f2py/__version__.pyi,sha256=8GyGk3Z3JL6jXsqXbhheqYSqtp9zqapNanxA7fHf_uA,46 +numpy/f2py/_backends/__init__.py,sha256=xIVHiF-velkBDPKwFS20PSg-XkFW5kLAVj5CSqNLddM,308 +numpy/f2py/_backends/__init__.pyi,sha256=RC41nCG_RhaOllATOhrOdFFDHGDEErv56plcdVo2GMM,141 +numpy/f2py/_backends/__pycache__/__init__.cpython-312.pyc,, +numpy/f2py/_backends/__pycache__/_backend.cpython-312.pyc,, +numpy/f2py/_backends/__pycache__/_distutils.cpython-312.pyc,, +numpy/f2py/_backends/__pycache__/_meson.cpython-312.pyc,, +numpy/f2py/_backends/_backend.py,sha256=9cxRVrA-5wcm2fnVdR-F08sYwGBf89ZZ7bl5VHqKabU,1195 +numpy/f2py/_backends/_backend.pyi,sha256=S3xxAntiuMAjkWSQgBX32XiB7AMlwb4SNahYmpUeSG0,1388 +numpy/f2py/_backends/_distutils.py,sha256=0SMBqxZgJBhfgX3HW0pEcL3S0qUFVCdSEsTLv1cEcJs,2461 +numpy/f2py/_backends/_distutils.pyi,sha256=HHVnI_ozA7-RQIcj-x_DW_crVJPNDSDk6CYVECtHABM,476 +numpy/f2py/_backends/_meson.py,sha256=8lnllh-SRsgnpwI-cZTWYLdG93wnkkXfjMH3uIejOaQ,8870 +numpy/f2py/_backends/_meson.pyi,sha256=kH_ZNzIbH0dM7oqB71VIuyDlkNDQQXlysJ6fzbt55cA,1960 +numpy/f2py/_backends/meson.build.template,sha256=0RdmYsEP0o6g7DJgWvSl3Mo18bu_IFS3zMusQ_T7fsI,1725 +numpy/f2py/_isocbind.py,sha256=QVoR_pD_bY9IgTaSHHUw_8EBg0mkaf3JZfwhLfHbz1Q,2422 +numpy/f2py/_isocbind.pyi,sha256=ByVGplEnG_CaErwiRY5khEoIICe4kFGm416sJnIh68s,352 +numpy/f2py/_src_pyf.py,sha256=u6eLk_jbxlnY3roCebGAk6wYrYJ79ZQUgl89Ck1uafI,7942 +numpy/f2py/_src_pyf.pyi,sha256=5bfPvrUIVP_e2hqhEbpe5y_ja6rcHxlDVe6gXVR69fI,1039 +numpy/f2py/auxfuncs.py,sha256=np0s118cTsP2XIJVdyFbyDMRq-3KF_IbuKQaE3Xt-rQ,27924 +numpy/f2py/auxfuncs.pyi,sha256=MizhsYeYkzJfY1HPxFC6YSr-Vwu2xpd6ImyLuItTmlQ,8254 +numpy/f2py/capi_maps.py,sha256=ncBlu6yE8AVLpMuyTeKL4gVncCeJj708gSXiHxnwgmY,30890 +numpy/f2py/capi_maps.pyi,sha256=2b-Sg7dCr0RqxWZ9FmLm4Vgfs9chnuaVWYZCsC4gDFY,1099 +numpy/f2py/cb_rules.py,sha256=Ad-tkBGZdwjsPyC4v8zmh7c5v5mIOKybO6k9lNrlf9g,25716 +numpy/f2py/cb_rules.pyi,sha256=VYhLJlRKpe2jE2XTKXHmljol-R09YK8tWscZ28pMicI,512 +numpy/f2py/cfuncs.py,sha256=V9GZ2E3s0_rragphRwNaCxq5Z3Mvbw-UDBR7wdApbnE,54223 +numpy/f2py/cfuncs.pyi,sha256=fWlbI1vH3IdXj3hmrda5Cl_wocO5Fn3uwTUoHBc_6Mg,833 +numpy/f2py/common_rules.py,sha256=HJ21QrdclhsGHj883Ab337-bSlPZopPALzXIMNfkT6c,5173 +numpy/f2py/common_rules.pyi,sha256=2d2LfXQr_st4cPnCZPQq5_hK9sTqj2436_t7Bf0PiSs,332 +numpy/f2py/crackfortran.py,sha256=3uovq4FHMoecI-qTZLi7vdUjFWJRdn94JidlFCHeQ_E,150604 +numpy/f2py/crackfortran.pyi,sha256=10qrHERdQbbr2hOSIP3V6w9gQJT6wrYpFCgS9-C4Bb8,10564 +numpy/f2py/diagnose.py,sha256=-UK2lwqufbuTqSex3w2H4-Qld7Z1NeutRllDNt6TDoA,5224 +numpy/f2py/diagnose.pyi,sha256=IW41dCKF39vknytu9aOQwmIWuk_WsCfkin8K1iFbmAc,24 +numpy/f2py/f2py2e.py,sha256=GJ-p9MAkTyhkyQ589ywmv5XiCPZkGy8larMERXyUi8c,29615 +numpy/f2py/f2py2e.pyi,sha256=2pETEpU4CJSky8mY9eBk_6z6Py63KDpINHwTFo-e1n8,2205 +numpy/f2py/f90mod_rules.py,sha256=GjvlboOdjc-lLmC0Tkxa8q_43fkfmo9dbWwv70xN_zI,10079 +numpy/f2py/f90mod_rules.pyi,sha256=0LIlPT9YI3Oit8aiP-i_JJPRsczDwzzQ96v14gMS1T0,467 +numpy/f2py/func2subr.py,sha256=p15rYW8c2kO-toes2Q9B9cjw9o9Jn-7pphHuxl0Jws0,10374 +numpy/f2py/func2subr.pyi,sha256=ide-SEoLyEEfa51Wqe6eKRJZvnuNYDJqD7BS0akqqzQ,393 +numpy/f2py/rules.py,sha256=U2u9J0IuC8hTJyNDO_jVdhfFrAxjsmW40y5edKDYMXQ,64723 +numpy/f2py/rules.pyi,sha256=x84T8VJwo8hmlLwpW54Q_q2ifUS5Dop9o0SHhyuxF48,1348 +numpy/f2py/setup.cfg,sha256=828sy3JvJmMzVxLkC-y0lxcEMaDTnMc3l9dWqP4jYng,50 +numpy/f2py/src/fortranobject.c,sha256=1SGmjDcW3_Ncx-aecMyjA_JfxyaW0TX6FL3mmG6wO10,47892 +numpy/f2py/src/fortranobject.h,sha256=uCcHO8mjuANlKb3c7YAZwM4pgT0CTaXWLYqgE27Mnt0,5996 +numpy/f2py/symbolic.py,sha256=YFKXeeLb5pq9cBDHLcJRq1eECpTlj0xQAsN8IKbMWgU,54828 +numpy/f2py/symbolic.pyi,sha256=BZrNj7NiDC5YZCVpnO7c7jtQETAnRokXEuUIMbg9hzM,6283 +numpy/f2py/tests/__init__.py,sha256=l38bo7dpp3u1lVMPErlct_5uBLKj35zuS_r35e7c19c,345 +numpy/f2py/tests/__pycache__/__init__.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_abstract_interface.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_array_from_pyobj.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_assumed_shape.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_block_docstring.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_callback.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_character.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_common.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_crackfortran.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_data.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_docs.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_f2cmap.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_f2py2e.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_isoc.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_kind.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_mixed.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_modules.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_parameter.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_pyf_src.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_quoted_character.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_regression.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_return_character.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_return_complex.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_return_integer.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_return_logical.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_return_real.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_routines.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_semicolon_split.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_size.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_string.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_symbolic.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_value_attrspec.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/util.cpython-312.pyc,, +numpy/f2py/tests/src/abstract_interface/foo.f90,sha256=aCaFEqfXp79pVXnTFtjZBWUY_5pu8wsehp1dEauOkSE,692 +numpy/f2py/tests/src/abstract_interface/gh18403_mod.f90,sha256=y3R2dDn0BUz-0bMggfT1jwXbhz_gniz7ONMpureEQew,111 +numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c,sha256=bwr4ytJ-NtqWE_1Map5U3wPf_BC7pOe1oK94yNNQ5DM,7716 +numpy/f2py/tests/src/assumed_shape/.f2py_f2cmap,sha256=zfuOShmuotzcLIQDnVFaARwvM66iLrOYzpquIGDbiKU,30 +numpy/f2py/tests/src/assumed_shape/foo_free.f90,sha256=fqbSr7VlKfVrBulFgQtQA9fQf0mQvVbLi94e4FTST3k,494 +numpy/f2py/tests/src/assumed_shape/foo_mod.f90,sha256=9pbi88-uSNP5IwS49Kim982jDAuopo3tpEhg2SOU7no,540 +numpy/f2py/tests/src/assumed_shape/foo_use.f90,sha256=9Cl1sdrihB8cCSsjoQGmOO8VRv9ni8Fjr0Aku1UdEWM,288 +numpy/f2py/tests/src/assumed_shape/precision.f90,sha256=3L_F7n5ju9F0nxw95uBUaPeuiDOw6uHvB580eIj7bqI,134 +numpy/f2py/tests/src/block_docstring/foo.f,sha256=KVTeqSFpI94ibYIVvUW6lOQ9T2Bx5UzZEayP8Maf2H0,103 +numpy/f2py/tests/src/callback/foo.f,sha256=rLqaaaUpWFTaGVxNoGERtDKGCa5dLCTW5DglsFIx-wU,1316 +numpy/f2py/tests/src/callback/gh17797.f90,sha256=-_NvQK0MzlSR72PSuUE1FeUzzsMBUcPKsbraHIF7O24,155 +numpy/f2py/tests/src/callback/gh18335.f90,sha256=n_Rr99cI7iHBEPV3KGLEt0QKZtItEUKDdQkBt0GKKy4,523 +numpy/f2py/tests/src/callback/gh25211.f,sha256=ejY_ssadbZQfD5_-Xnx_ayzWXWLjkdy7DGp6C_uCUCY,189 +numpy/f2py/tests/src/callback/gh25211.pyf,sha256=nrzvt2QHZRCcugg0R-4FDMMl1MJmWCOAjR7Ta-pXz7Y,465 +numpy/f2py/tests/src/callback/gh26681.f90,sha256=ykwNXWyja5FfZk1bPihbYiMmMlbKhRPoPKva9dNFtLM,584 +numpy/f2py/tests/src/cli/gh_22819.pyf,sha256=e3zYjFmiOxzdXoxzgkaQ-CV6sZ1t4aKugyhqRXmBNdQ,148 +numpy/f2py/tests/src/cli/hi77.f,sha256=bgBERF4EYxHlzJCvZCJOlEmUE1FIvipdmj4LjdmL_dE,74 +numpy/f2py/tests/src/cli/hiworld.f90,sha256=RncaEqGWmsH9Z8BMV-UmOTUyo3-e9xOQGAmNgDv6SfY,54 +numpy/f2py/tests/src/common/block.f,sha256=tcGKa42S-6bfA6fybpM0Su_xjysEVustkEJoF51o_pE,235 +numpy/f2py/tests/src/common/gh19161.f90,sha256=Vpb34lRVC96STWaJerqkDQeZf7mDOwWbud6pW62Tvm4,203 +numpy/f2py/tests/src/crackfortran/accesstype.f90,sha256=3ONHb4ZNx0XISvp8fArnUwR1W9rzetLFILTiETPUd80,221 +numpy/f2py/tests/src/crackfortran/common_with_division.f,sha256=JAzHD5aluoYw0jVGZjBYd1wTABU0PwNBD0cz3Av5AAk,511 +numpy/f2py/tests/src/crackfortran/data_common.f,sha256=rP3avnulWqJbGCFLWayjoFKSspGDHZMidPTurjz33Tc,201 +numpy/f2py/tests/src/crackfortran/data_multiplier.f,sha256=LaPXVuo5lX0gFZVh76Hc7LM1sMk9EBPALuXBnHAGdOA,202 +numpy/f2py/tests/src/crackfortran/data_stmts.f90,sha256=MAZ3gstsPqECk3nWQ5Ql-C5udrIv3sAciW1ZGTtHLts,713 +numpy/f2py/tests/src/crackfortran/data_with_comments.f,sha256=FUPluNth5uHgyKqjQW7HKmyWg4wDXj3XPJCIC9ZZuOs,183 +numpy/f2py/tests/src/crackfortran/foo_deps.f90,sha256=D9FT8Rx-mK2p8R6r4bWxxqgYhkXR6lNmPj2RXOseMpw,134 +numpy/f2py/tests/src/crackfortran/gh15035.f,sha256=0G9bmfVafpuux4-ZgktYZ6ormwrWDTOhKMK4wmiSZlQ,391 +numpy/f2py/tests/src/crackfortran/gh17859.f,sha256=acknjwoWYdA038oliYLjB4T1PHhXkKRLeJobIgB_Lbo,352 +numpy/f2py/tests/src/crackfortran/gh22648.pyf,sha256=xPnKx4RcT1568q-q_O83DYpCgVYJ8z4WQ-yLmHPchJA,248 +numpy/f2py/tests/src/crackfortran/gh23533.f,sha256=k2xjRpRaajMYpi5O-cldYPTZGFGB12PUGcj5Fm9joyk,131 +numpy/f2py/tests/src/crackfortran/gh23598.f90,sha256=20ukdZXq-qU0Zxzt4W6cO8tRxlNlQ456zgD09zdozCE,105 +numpy/f2py/tests/src/crackfortran/gh23598Warn.f90,sha256=FvnIxy5fEOvzNb5WSkWzPk7yZ9yIv0yPZk9vNnS-83w,216 +numpy/f2py/tests/src/crackfortran/gh23879.f90,sha256=jELVfEGEF66z_Pv_iBHp3yGsGhadB0dnKCDtPcaz_CM,352 +numpy/f2py/tests/src/crackfortran/gh27697.f90,sha256=mTOEncxZlam6N-3I-IL0ua-iLkgqDrrVXNsE-7y7jAM,376 +numpy/f2py/tests/src/crackfortran/gh2848.f90,sha256=-IpkeTz0j9_lkQeN9mT7w3U1cAJjQxSMdAmyHdF8oVg,295 +numpy/f2py/tests/src/crackfortran/operators.f90,sha256=cb1JO2hIMCQejZO_UJWluBCP8LdXQbBJw2XN6YHB3JA,1233 +numpy/f2py/tests/src/crackfortran/privatemod.f90,sha256=9O2oWEquIUcbDB1wIzNeae3hx4gvXAoYW5tGfBt3KWk,185 +numpy/f2py/tests/src/crackfortran/publicmod.f90,sha256=nU_VXCKiniiUq_78KAWkXiN6oiMQh39emMxbgOVf9cg,177 +numpy/f2py/tests/src/crackfortran/pubprivmod.f90,sha256=-uz75kquU4wobaAPZ1DLKXJg6ySCZoDME1ce6YZ2q5Y,175 +numpy/f2py/tests/src/crackfortran/unicode_comment.f90,sha256=wDMoF7F7VFYdeocfTyWIh7noniEwExVb364HrhUSbSg,102 +numpy/f2py/tests/src/f2cmap/.f2py_f2cmap,sha256=fwszymaWhcWO296u5ThHW5yMAkFhB6EtHWqqpc9FAVI,83 +numpy/f2py/tests/src/f2cmap/isoFortranEnvMap.f90,sha256=rphN_mmzjCCCkdPM0HjsiJV7rmxpo4GoCNp5qmBzv8U,307 +numpy/f2py/tests/src/isocintrin/isoCtests.f90,sha256=Oir0PfE3mErnUQ42aFxiqAkcYn3B6b1FHIPGipDdekg,1032 +numpy/f2py/tests/src/kind/foo.f90,sha256=6_zq3OAWsuNJ5ftGTQAEynkHy-MnuLgBXmMIgbvL7yU,367 +numpy/f2py/tests/src/mixed/foo.f,sha256=Zgn0xDhhzfas3HrzgVSxIL1lGEF2mFRVohrvXN1thU0,90 +numpy/f2py/tests/src/mixed/foo_fixed.f90,sha256=6eEEYCH71gPp6lZ6e2afLrfS6F_fdP7GZDbgGJJ_6ns,187 +numpy/f2py/tests/src/mixed/foo_free.f90,sha256=UC6iVRcm0-aVXAILE5jZhivoGQbKU-prqv59HTbxUJA,147 +numpy/f2py/tests/src/modules/gh25337/data.f90,sha256=EqMEuEV0_sx4XbFzftbU_6VfGtOw9Tbs0pm0eVEp2cA,188 +numpy/f2py/tests/src/modules/gh25337/use_data.f90,sha256=DChVLgD7qTOpbYNmfGjPjfOx5YsphMIYwdwnF12X4xM,185 +numpy/f2py/tests/src/modules/gh26920/two_mods_with_no_public_entities.f90,sha256=MMLPSzBwuGS4UwCXws9djH11F5tG5xFLc80CDb4U9Mk,423 +numpy/f2py/tests/src/modules/gh26920/two_mods_with_one_public_routine.f90,sha256=1dJD1kDC_wwn7v_zF49D3n62T1x9wFxGKanQQz_VI7k,424 +numpy/f2py/tests/src/modules/module_data_docstring.f90,sha256=-asnMH7vZMwVIeMU2YiLWgYCUUUxZgPTpbAomgWByHs,236 +numpy/f2py/tests/src/modules/use_modules.f90,sha256=bveSAqXIZtd4NMlDfFei1ZlesFAa9An5LjkD-gDk2ms,418 +numpy/f2py/tests/src/negative_bounds/issue_20853.f90,sha256=IxBGWem-uv9eHgDhysEdGTmNKHR1gAiU7YJPo20eveM,164 +numpy/f2py/tests/src/parameter/constant_array.f90,sha256=fkYemwIBKsP63-FGKBW8mzOAp6k13eZOin8sQe1pyno,1513 +numpy/f2py/tests/src/parameter/constant_both.f90,sha256=L0rG6-ClvHx7Qsch46BUXRi_oIEL0uw5dpRHdOUQuv0,1996 +numpy/f2py/tests/src/parameter/constant_compound.f90,sha256=lAT76HcXGMgr1NfKof-RIX3W2P_ik1PPqkRdJ6EyBmM,484 +numpy/f2py/tests/src/parameter/constant_integer.f90,sha256=42jROArrG7vIag9wFa_Rr5DBnnNvGsrEUgpPU14vfIo,634 +numpy/f2py/tests/src/parameter/constant_non_compound.f90,sha256=u9MRf894Cw0MVlSOUbMSnFSHP4Icz7RBO21QfMkIl-Q,632 +numpy/f2py/tests/src/parameter/constant_real.f90,sha256=QoPgKiHWrwI7w5ctYZugXWzaQsqSfGMO7Jskbg4CLTc,633 +numpy/f2py/tests/src/quoted_character/foo.f,sha256=0zXQbdaqB9nB8R4LF07KDMFDbxlNdiJjVdR8Nb3nzIM,496 +numpy/f2py/tests/src/regression/AB.inc,sha256=ydjTVb6QEw1iYw2tRiziqqzWcDHrJsNWr3m51-rqFXQ,17 +numpy/f2py/tests/src/regression/assignOnlyModule.f90,sha256=vPJbhOlNsLrgN3su4ohHUSbxE4GGKU7SiJh7dhBvX3o,633 +numpy/f2py/tests/src/regression/datonly.f90,sha256=HuBLuEw0kNEplJ9TxxSNr7hLj-jx9ZNGaXC8iLm_kf8,409 +numpy/f2py/tests/src/regression/f77comments.f,sha256=FjP-07suTBdqgtwiENT04P-47UB4g9J5-20IQdXAHhM,652 +numpy/f2py/tests/src/regression/f77fixedform.f95,sha256=KdKFcAc3ZrID-h4nTOJDdEYfQzR2kkn9VqQCorfJGpM,144 +numpy/f2py/tests/src/regression/f90continuation.f90,sha256=VweFIi5-xxZhtgSOh8i_FjMPXu_od9qjrDHq6ma5X5k,285 +numpy/f2py/tests/src/regression/incfile.f90,sha256=gq87H2CtCZUON9V5UzcK6x_fthnWDVuPFQLa0fece1M,97 +numpy/f2py/tests/src/regression/inout.f90,sha256=TlMxJjhjjiuLI--Tg2LshLnbfZpiKz37EpR_tPKKSx8,286 +numpy/f2py/tests/src/regression/lower_f2py_fortran.f90,sha256=bWlj2Frch3onnUpd6DTaoLDa6htrrbkBiI9JIRbQPfE,105 +numpy/f2py/tests/src/regression/mod_derived_types.f90,sha256=Cb9WV1sxoKt2wJCl1Z9QR42iLYX226f_boX-_ehDLAQ,589 +numpy/f2py/tests/src/return_character/foo77.f,sha256=tRyQSu9vNWtMRi7gjmMN-IZnS7ogr5YS0n38uax_Eo0,1025 +numpy/f2py/tests/src/return_character/foo90.f90,sha256=WPQZC6CjXLbUYpzy5LItEoHmRDFxW0ABB3emRACsjZU,1296 +numpy/f2py/tests/src/return_complex/foo77.f,sha256=7-iKoamJ-VObPFR-Tslhiw9E-ItIvankWMyxU5HqxII,1018 +numpy/f2py/tests/src/return_complex/foo90.f90,sha256=_GOKOZeooWp3pEaTBrZNmPmkgGodj33pJnJmySnp7aE,1286 +numpy/f2py/tests/src/return_integer/foo77.f,sha256=EKs1KeAOQBkIO99tMCx0H7_lpqvqpjie8zWZ6T_bAR4,1234 +numpy/f2py/tests/src/return_integer/foo90.f90,sha256=0aYWcaAVs7Lw3Qbf8hupfLC8YavRuPZVIwjHecIlMOo,1590 +numpy/f2py/tests/src/return_logical/foo77.f,sha256=Ax3tBVNAlxFtHhV8fziFcsTnoa8YJdapecMr6Qj7fLk,1244 +numpy/f2py/tests/src/return_logical/foo90.f90,sha256=IZXCerFecYT24zTQ_spIoPr6n-fRncaM0tkTs8JqO1E,1590 +numpy/f2py/tests/src/return_real/foo77.f,sha256=3nAY1YtzGk4osR2jZkHMVIUHxFoOtF1OLfWswpcV7kA,978 +numpy/f2py/tests/src/return_real/foo90.f90,sha256=38ZCnBGWb9arlJdnVWvZjVk8uesrQN8wG2GrXGcSIJs,1242 +numpy/f2py/tests/src/routines/funcfortranname.f,sha256=ruyXK6eQSLQnQ_rODT1qm1cJvpHrFhI6NRrnWvEIK0U,128 +numpy/f2py/tests/src/routines/funcfortranname.pyf,sha256=EgRw8ZWGdd2uK4qCZD89r9VQtEXmnKDx59OpB0K58as,451 +numpy/f2py/tests/src/routines/subrout.f,sha256=35DjHIj85ZLkxRxP4bs-WFTQ5y1AyDqBKAXTzSSTAxE,94 +numpy/f2py/tests/src/routines/subrout.pyf,sha256=xT_WnDpvpyPb0FMRAVTRRgm3nlfALf1Ojg8x3qZNv_4,332 +numpy/f2py/tests/src/size/foo.f90,sha256=nK_767f1TtqVr-dMalNkXmcKbSbLCiabhRkxSDCzLz0,859 +numpy/f2py/tests/src/string/char.f90,sha256=X_soOEV8cKsVZefi3iLT7ilHljjvJJ_i9VEHWOt0T9Y,647 +numpy/f2py/tests/src/string/fixed_string.f90,sha256=tCN5sA6e7M1ViZtBNvTnO7_efk7BHIjyhFKBoLC3US0,729 +numpy/f2py/tests/src/string/gh24008.f,sha256=Z6cq8SFGvmaA72qeH9tu1rP8pYjqm0ONpHn7nGbhoLA,225 +numpy/f2py/tests/src/string/gh24662.f90,sha256=xJkiYvrMT9Ipb9Cq7OXl1Ev6TISl8pq1MGemySzfGd0,204 +numpy/f2py/tests/src/string/gh25286.f90,sha256=lqEl81Iu9GIDTAbOfkkNGcGgDyyGnPB44mJw2iK1kng,318 +numpy/f2py/tests/src/string/gh25286.pyf,sha256=wYkkr5gEN9_RtGjpqh28X1k8KCgh0-Ds9XAt8IC9j4A,393 +numpy/f2py/tests/src/string/gh25286_bc.pyf,sha256=ZRvgSzRlaPEx8GyNt97FrRhtCg-r4ZTEDsHNBfit4m8,396 +numpy/f2py/tests/src/string/scalar_string.f90,sha256=U1QqVgbF1DbxdFekRjchyDlFRPnXwzG72kuE8A44Za8,185 +numpy/f2py/tests/src/string/string.f,sha256=JCwLuH21Ltag5cw_9geIQQJ4Hv_39NqG8Dzbqj1eDKE,260 +numpy/f2py/tests/src/value_attrspec/gh21665.f90,sha256=MbbSUQI5Enzq46KWFHRzQbY7q6ZHJH_9NRL-C9i13Wg,199 +numpy/f2py/tests/test_abstract_interface.py,sha256=2fTmp5-yLaNKtWvP0jQ6_kqkyWI73kgjIl7Ara25cII,837 +numpy/f2py/tests/test_array_from_pyobj.py,sha256=Yy6I46hlJLgSAlQ_RkRbZgZ_vTyH9BVDE5OxQ5TQRvM,24395 +numpy/f2py/tests/test_assumed_shape.py,sha256=WaIBz38eV2AzRwOvTvTaRkNks8c3H_61TGKtAReP6gk,1517 +numpy/f2py/tests/test_block_docstring.py,sha256=DOTSbdInRJCunaEycMGWQUy0b5rIeugPxUKmNg8FA34,604 +numpy/f2py/tests/test_callback.py,sha256=uVRfXR6q4ukZfbRUBCnc1cvKIEiaPFr6gc3mrgMzt1w,7362 +numpy/f2py/tests/test_character.py,sha256=dLj5WhKbP5CYnuQMsB3uWV5pcgoQCEscujuBSCXwkwA,22572 +numpy/f2py/tests/test_common.py,sha256=r_nJN4ZCZ3DstAadAoO_R_igybcb5zxwOJsv_-fRBa8,667 +numpy/f2py/tests/test_crackfortran.py,sha256=QNZ9VI61XDF5KoO8r9012AaGaVHzM7g1e2UVK0A4uUU,16834 +numpy/f2py/tests/test_data.py,sha256=XBQTj0WqR-XWHuRhT1PWXankrhlNxeqC6S03XA2b7AI,2966 +numpy/f2py/tests/test_docs.py,sha256=YwIFQGu4gwCrnNQ0i4O3sCs_ZCwovgr2fGt13DrfKbU,1996 +numpy/f2py/tests/test_f2cmap.py,sha256=hyzKOv261wPDWAmpuidAVFK3x9WXm64U_r3E_bmxNXg,404 +numpy/f2py/tests/test_f2py2e.py,sha256=QHYd7ghKHeUPB-piK0zvoM-BkmrH1Tvg8-wa4r7wMMI,29552 +numpy/f2py/tests/test_isoc.py,sha256=KK4VeoPhjF658msdnRYCWlzagFEea8h5SzdjB5FaDk0,1490 +numpy/f2py/tests/test_kind.py,sha256=0kASrNopTkYDN_d2j-i9Jf22nBjQof0eqGGcuEfEqI0,1897 +numpy/f2py/tests/test_mixed.py,sha256=3J9eftoqFIXViiqd21Ej02eNztjbIuzaqb_2OZYZUSw,897 +numpy/f2py/tests/test_modules.py,sha256=rbm9cPZilhdIuFA6rxqtRlPtyd0_IxKoFlB8VFA-1Vc,2384 +numpy/f2py/tests/test_parameter.py,sha256=wwyq8vA5FFljYfF55srLYqFvJ-ybdG2vEpr2CRdPqVs,4763 +numpy/f2py/tests/test_pyf_src.py,sha256=Tg8PzypY1P2pda3k2VbuKX97VCD7CtdgyJajP-U7AIc,1177 +numpy/f2py/tests/test_quoted_character.py,sha256=kvjgkp3bIP2lnkZ2MzS2yB5ytEJfHZydz24VAqs3UKM,495 +numpy/f2py/tests/test_regression.py,sha256=TriZy41MAIYRAHweZ-dg9GYyVmIjaIkPL4bAqmiCm50,6369 +numpy/f2py/tests/test_return_character.py,sha256=bdryZo5fXfTUE35M3_8gDqPCa9RKtkxROqBcnTjuwYo,1582 +numpy/f2py/tests/test_return_complex.py,sha256=1Nb6IsRfzHTCCiBTU5nHoY36w--Uh2goTedK-d8Xrc4,2507 +numpy/f2py/tests/test_return_integer.py,sha256=X3hYAJX9QaC0MYsIXg1Po-FRh4KCRN0KW0UFVpdkDww,1868 +numpy/f2py/tests/test_return_logical.py,sha256=ob4-_KkwWohpwF45SWKxiCKtktReLuqg4WQ8YFcMOQ0,2113 +numpy/f2py/tests/test_return_real.py,sha256=e7I27OmdJGCDQ9vX12U29M3WCOcmU1KRsivRb6BkKvo,3382 +numpy/f2py/tests/test_routines.py,sha256=6hOB8Rn4M-MCgDhKMoqI3YIn274wyWBzaLTjrJrTYCw,824 +numpy/f2py/tests/test_semicolon_split.py,sha256=5K_jZ2rJLxvwRDQu0gd_yiOEKiJmngvrH8vLeqsbC0Y,1702 +numpy/f2py/tests/test_size.py,sha256=D-7AOZtUn0DekZr-K753WQIcnt0prEdLigSuzjEicFg,1200 +numpy/f2py/tests/test_string.py,sha256=X6ECwK-mh-0Dfp8Pcag-jWxidoXtt009QDfjc8sfGT8,3038 +numpy/f2py/tests/test_symbolic.py,sha256=-NUoAJeeHZ0_Uj4NFnjz0awFk7vTZJ2rtHOErY0kkyk,19061 +numpy/f2py/tests/test_value_attrspec.py,sha256=P3ypxCXsakygnP1IdXH1hzw5VaYKGN40z3SOwK61IPU,345 +numpy/f2py/tests/util.py,sha256=YVm0U_jGp_LRO7k4FszcuC5AqoNsnf-cc564vEljABk,12554 +numpy/f2py/use_rules.py,sha256=MX3S-9SkSznXTwWWwaccMBhhyE_ZyoG3MCLLnsXXHEE,3475 +numpy/f2py/use_rules.pyi,sha256=J7S58xd70JkQBKtzl01T3uKmGfskGO6TiosmxUMW62Y,433 +numpy/fft/__init__.py,sha256=G7Nr6rpJggN1e3PgGggExF6Ei_Wt8n57eQIXf5vtzLM,8369 +numpy/fft/__init__.pyi,sha256=a2GtovDgo6O66DqPN7iVMBhl-6v_MjkTi2F5GGnKIAs,531 +numpy/fft/__pycache__/__init__.cpython-312.pyc,, +numpy/fft/__pycache__/_helper.cpython-312.pyc,, +numpy/fft/__pycache__/_pocketfft.cpython-312.pyc,, +numpy/fft/_helper.py,sha256=VpGmqY4O7zZWm0vg72mGu3AsZ-bca_mVG-olxoJYmmI,7022 +numpy/fft/_helper.pyi,sha256=UHKdQi3Rjz73WadYmvDCyRf95MSBNao3l1WZlJJHljs,1420 +numpy/fft/_pocketfft.py,sha256=UVXDxPok9kPHCjqH2XqrSTQ33fSCMIFmx_46Wj6V0TU,64292 +numpy/fft/_pocketfft.pyi,sha256=ksJK_WTkhCPo4ZAfdApGI3-uoK6WGH3NIbrrBx1jg5k,3353 +numpy/fft/_pocketfft_umath.cp312-win_amd64.lib,sha256=T0rQEfuHHcQSXi-j59y0ebqxObtvx1FhoyO162M31xc,2176 +numpy/fft/_pocketfft_umath.cp312-win_amd64.pyd,sha256=fACTcvT0SMLXQZcmtnH3EiaTcFyQXVI53x7ytJww6tw,276480 +numpy/fft/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/fft/tests/__pycache__/__init__.cpython-312.pyc,, +numpy/fft/tests/__pycache__/test_helper.cpython-312.pyc,, +numpy/fft/tests/__pycache__/test_pocketfft.cpython-312.pyc,, +numpy/fft/tests/test_helper.py,sha256=Yff4EXasyH-nD6dyEcZfX4g61ZjQcAY7XHfQLnXI1EY,6321 +numpy/fft/tests/test_pocketfft.py,sha256=m98HSj9duHLP2IBxOXllDCj-92hNxCHgxFXXuExgqmI,25035 +numpy/lib/__init__.py,sha256=sZL_BFMWHTRmMAUWlbBJ-ngMCdk6UJ9FdU3n0S0SMEg,3101 +numpy/lib/__init__.pyi,sha256=PjSJHE90GngKOVep78JzBhBJFSBVCP5ZRrj0vMtG7G8,1547 +numpy/lib/__pycache__/__init__.cpython-312.pyc,, +numpy/lib/__pycache__/_array_utils_impl.cpython-312.pyc,, +numpy/lib/__pycache__/_arraypad_impl.cpython-312.pyc,, +numpy/lib/__pycache__/_arraysetops_impl.cpython-312.pyc,, +numpy/lib/__pycache__/_arrayterator_impl.cpython-312.pyc,, +numpy/lib/__pycache__/_datasource.cpython-312.pyc,, +numpy/lib/__pycache__/_format_impl.cpython-312.pyc,, +numpy/lib/__pycache__/_function_base_impl.cpython-312.pyc,, +numpy/lib/__pycache__/_histograms_impl.cpython-312.pyc,, +numpy/lib/__pycache__/_index_tricks_impl.cpython-312.pyc,, +numpy/lib/__pycache__/_iotools.cpython-312.pyc,, +numpy/lib/__pycache__/_nanfunctions_impl.cpython-312.pyc,, +numpy/lib/__pycache__/_npyio_impl.cpython-312.pyc,, +numpy/lib/__pycache__/_polynomial_impl.cpython-312.pyc,, +numpy/lib/__pycache__/_scimath_impl.cpython-312.pyc,, +numpy/lib/__pycache__/_shape_base_impl.cpython-312.pyc,, +numpy/lib/__pycache__/_stride_tricks_impl.cpython-312.pyc,, +numpy/lib/__pycache__/_twodim_base_impl.cpython-312.pyc,, +numpy/lib/__pycache__/_type_check_impl.cpython-312.pyc,, +numpy/lib/__pycache__/_ufunclike_impl.cpython-312.pyc,, +numpy/lib/__pycache__/_user_array_impl.cpython-312.pyc,, +numpy/lib/__pycache__/_utils_impl.cpython-312.pyc,, +numpy/lib/__pycache__/_version.cpython-312.pyc,, +numpy/lib/__pycache__/array_utils.cpython-312.pyc,, +numpy/lib/__pycache__/format.cpython-312.pyc,, +numpy/lib/__pycache__/introspect.cpython-312.pyc,, +numpy/lib/__pycache__/mixins.cpython-312.pyc,, +numpy/lib/__pycache__/npyio.cpython-312.pyc,, +numpy/lib/__pycache__/recfunctions.cpython-312.pyc,, +numpy/lib/__pycache__/scimath.cpython-312.pyc,, +numpy/lib/__pycache__/stride_tricks.cpython-312.pyc,, +numpy/lib/__pycache__/user_array.cpython-312.pyc,, +numpy/lib/_array_utils_impl.py,sha256=JQE9Ul515em350VcRXQaO4BmE0HwIRPdBpFNXu4Hnws,1759 +numpy/lib/_array_utils_impl.pyi,sha256=bymzyS0JT02-btcDJ90mhAVpoxnaZ9qJmIrfAg4I4nY,512 +numpy/lib/_arraypad_impl.py,sha256=rH_OU6Xvom3l4p2DbrhArQIPjsjM9EONxtSqKNzxd6g,34445 +numpy/lib/_arraypad_impl.pyi,sha256=aT7ma7wLUliNkdRK4j2XHEza25i0WzUJ-ETUPxYjCFU,2017 +numpy/lib/_arraysetops_impl.py,sha256=EVRhEQCqRxzheU68u5kGgS8pkls_sUSM9WXa9Z9Y3ho,38499 +numpy/lib/_arraysetops_impl.pyi,sha256=s-mUcIIa8xVOtYJx8WNnhPRrsnfMNUlBq1Imt66nCI0,13617 +numpy/lib/_arrayterator_impl.py,sha256=HUtCLBXcG7mC5AX3KuJcDjHD9FEPheCcHypT6PtSswY,7442 +numpy/lib/_arrayterator_impl.pyi,sha256=_vhCb927PfSM5E40NbAhIxxmcBxPhcpk9V4ZdzBTOIQ,1920 +numpy/lib/_datasource.py,sha256=csFfOVL00V76KM--z9wROAImHDPB5l5b-8h1AJli53c,23431 +numpy/lib/_datasource.pyi,sha256=g3laWGQw8jdnR8h_Bp9f1Mp71k5PmRxFBNuT1suuNqc,1025 +numpy/lib/_format_impl.py,sha256=_TFgQtrgndkQWxPSjdO95ld_cCgESpjsJ04BLqZPGNk,37920 +numpy/lib/_format_impl.pyi,sha256=OAyT3J74w528YjS9J_TV7rWtC8gXgBtXGn9IQ2Af5f8,2139 +numpy/lib/_function_base_impl.py,sha256=T8U10Vg7R3xpf9B5zvrWtLZmGSeb0OZfCQLIytnyeUk,199498 +numpy/lib/_function_base_impl.pyi,sha256=m0JuSD13oEeESW5jBSFnjJHkgLFsreEg0r-KwtqGNhU,77346 +numpy/lib/_histograms_impl.py,sha256=2dceYr7BOVTkypST4CVgskJzjI-ka_FrL5oxBW7rHEQ,39517 +numpy/lib/_histograms_impl.pyi,sha256=Kg1Q0AgAi6QatGM3paKB2VluGkUCKbtCM1gy4_atEhU,1095 +numpy/lib/_index_tricks_impl.py,sha256=ZzRtFdpbYtDEYox3-uzDtzEOILMZ0LdX67VkWNIyBpY,32567 +numpy/lib/_index_tricks_impl.pyi,sha256=So1jOZ2__eFpN9oS7NXgHDfKHBP9kVmX4IQp11KufK8,8483 +numpy/lib/_iotools.py,sha256=Yh7xIu5OnSNnj2aX-yW8viJNHNC33nhCDZtaep8TszE,31776 +numpy/lib/_iotools.pyi,sha256=epBkUTN7SvJbWiZ63WJOHCR6YUNICzk7UXE6ZqqPgXM,3768 +numpy/lib/_nanfunctions_impl.py,sha256=qmmx5H7q-atgoYQAWcDAHWOQ-WWXCrXyUewwzRygjss,73405 +numpy/lib/_nanfunctions_impl.pyi,sha256=cq5lGiV_WoVNwboSEenA6Zmzi0rBTMfK_NNiTHeZx7w,864 +numpy/lib/_npyio_impl.py,sha256=9mlk6CAckDnBIaOW1KxyePnNNUguRCE2co3lk2Xetjw,101219 +numpy/lib/_npyio_impl.pyi,sha256=fs-QJUwOMh6MTLE2fTfzK_e2Q16B7tszlProdbpcgUI,9793 +numpy/lib/_polynomial_impl.py,sha256=UqyQMpVAJRR6z9Hj9Rq7MK509cI8OncTkM2XLsYsmvU,45590 +numpy/lib/_polynomial_impl.pyi,sha256=Z92OU7G7sWt4lBdO9LYq_kn_3nA3HxOUEYQkP70MfEk,7887 +numpy/lib/_scimath_impl.py,sha256=VjysRI24mMd4GaaEy5e1npARDc0_TKgQqq6TRoiNdAY,16326 +numpy/lib/_scimath_impl.pyi,sha256=3-C37vHfGAP84SLbuxHrPAO8ZQ-vgTSXTyh5EjNHXh0,2867 +numpy/lib/_shape_base_impl.py,sha256=BbDGec_0jGK-YkAG0hY36eCE86a0tksMCF6ooLgYmVQ,40302 +numpy/lib/_shape_base_impl.pyi,sha256=ENzamOUsGz57nNl3w5juRq2RkWC4k0MpeVg1aWjV9W4,5777 +numpy/lib/_stride_tricks_impl.py,sha256=uuGAiZWcaK-N9dQkohbAtRlrYItoany4KAt4hIQs6N8,19697 +numpy/lib/_stride_tricks_impl.pyi,sha256=7zWJcIwwy9bIu-FjDQFZJA79GO6drAzf8QLkWzGQ7W0,2032 +numpy/lib/_twodim_base_impl.py,sha256=bKyDb9b4wvQADhgsUujOtJci0ztuvFo_5zQtsBHPlhI,35124 +numpy/lib/_twodim_base_impl.pyi,sha256=-vD7xZiLmOrK49OiYNwbF6vzkKXbSAYUnYIhqBrcJrM,13432 +numpy/lib/_type_check_impl.py,sha256=ONqM3mhe3ITonSZJZISpOJ_fk2WA4PrdecW-HUFvRHM,20624 +numpy/lib/_type_check_impl.pyi,sha256=C7guCWlJFMl_w0M2GjYFql8eOoF3kN1hgda1YKSbBU8,10042 +numpy/lib/_ufunclike_impl.py,sha256=DLbbgYDRSbyqmX3XzJTjOBRD5oOjUUEvpCViahyJqoA,6213 +numpy/lib/_ufunclike_impl.pyi,sha256=rS_jOTzvFASpAHFk6cdH8RGUrze2n5_FZnlgQeCRi5I,1774 +numpy/lib/_user_array_impl.py,sha256=NZi7p0dSNoEmUN4KR1aB1_gf9r0lYAe_rT1VNZCQc5w,8336 +numpy/lib/_user_array_impl.pyi,sha256=cbqaYFs4J-WBtMNKnSBviiIR0NDA5_g6_tAJpnuhqWE,9494 +numpy/lib/_utils_impl.py,sha256=QdnCyIkxeZaR2wwn4QXxXBCokxuXi-RG-J_GHG4eL-s,24283 +numpy/lib/_utils_impl.pyi,sha256=KTI0vNECNqvxwzhp3SrutN02KGV-WXna7q56IfDW8Uw,746 +numpy/lib/_version.py,sha256=J1pqujOE4R_kH7R1yxtimwbWVRxDNnBD82XIZ9ly1yY,4936 +numpy/lib/_version.pyi,sha256=zAmfNnFeke7_lHsvR94fafNBcuJHpZ1jaB2PyzEostc,658 +numpy/lib/array_utils.py,sha256=zmrUIVleWEWzl9XjEpUlDUQHt9qbbsytyu_jLj-OgnE,151 +numpy/lib/array_utils.pyi,sha256=YYnx_V4CMdSbJTCnYboN1swcswmlOD2e4ZvQj5WsSak,197 +numpy/lib/format.py,sha256=ii8MRQmPZ1nAaqnMqeYtKgdNRP52iZsHPo5rODl20UM,501 +numpy/lib/format.pyi,sha256=stg21MwwoAp4mfTkx9DD4lxKKXZzfABbqqiKim985Bk,876 +numpy/lib/introspect.py,sha256=HBySrZfK5neieljp9q29t7BPag8cFfqAuD8njgJDZ3g,2843 +numpy/lib/introspect.pyi,sha256=IsntuFrlFhRBZcGGhRUTAgnONUHEbYw_2ApPmffx8QE,155 +numpy/lib/mixins.py,sha256=uY-4dCmzviGP1kpfksxSLn__EA_oTwmLqyr4Dl_KA04,7375 +numpy/lib/mixins.pyi,sha256=QcVCn2u2NAWm5JiUNNpZZWIT43ZaB-rWBbP7fh3NktU,3260 +numpy/lib/npyio.py,sha256=EY5_tqGplRo-B3cJtqvf9HC34nQKr5JNgyykHWj5q_E,69 +numpy/lib/npyio.pyi,sha256=6xZ6zF-6qKuSOfjjDL4YN43xKPYcD6IpzJiDiLpmSSs,121 +numpy/lib/recfunctions.py,sha256=7to2wo6f8bxxUfVzHrMbo0gR9FNly3zdJK3jRirqrfE,61220 +numpy/lib/recfunctions.pyi,sha256=WuYzU-PEbvywxFThpCRCC9um0boqijX3tW8TJdrIeto,13896 +numpy/lib/scimath.py,sha256=3nsYqFdGoo0danaHnGK8Qrz0AAA31THUX0qnQsp5eu8,182 +numpy/lib/scimath.pyi,sha256=9y5MNnmU1oLBK7zs-tP8zCmh6QiY0gvmNLCIw3WjsNU,245 +numpy/lib/stride_tricks.py,sha256=qXan9_UpXFAoDLBAaJ1wYb0B86DgP48ogp5sUu3s1Lw,89 +numpy/lib/stride_tricks.pyi,sha256=6-K3R7XBw_fcpHaAIs9y4LEc5i4r5gZUG-tg4EOR-ew,128 +numpy/lib/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/lib/tests/__pycache__/__init__.cpython-312.pyc,, +numpy/lib/tests/__pycache__/test__datasource.cpython-312.pyc,, +numpy/lib/tests/__pycache__/test__iotools.cpython-312.pyc,, +numpy/lib/tests/__pycache__/test__version.cpython-312.pyc,, +numpy/lib/tests/__pycache__/test_array_utils.cpython-312.pyc,, +numpy/lib/tests/__pycache__/test_arraypad.cpython-312.pyc,, +numpy/lib/tests/__pycache__/test_arraysetops.cpython-312.pyc,, +numpy/lib/tests/__pycache__/test_arrayterator.cpython-312.pyc,, +numpy/lib/tests/__pycache__/test_format.cpython-312.pyc,, +numpy/lib/tests/__pycache__/test_function_base.cpython-312.pyc,, +numpy/lib/tests/__pycache__/test_histograms.cpython-312.pyc,, +numpy/lib/tests/__pycache__/test_index_tricks.cpython-312.pyc,, +numpy/lib/tests/__pycache__/test_io.cpython-312.pyc,, +numpy/lib/tests/__pycache__/test_loadtxt.cpython-312.pyc,, +numpy/lib/tests/__pycache__/test_mixins.cpython-312.pyc,, +numpy/lib/tests/__pycache__/test_nanfunctions.cpython-312.pyc,, +numpy/lib/tests/__pycache__/test_packbits.cpython-312.pyc,, +numpy/lib/tests/__pycache__/test_polynomial.cpython-312.pyc,, +numpy/lib/tests/__pycache__/test_recfunctions.cpython-312.pyc,, +numpy/lib/tests/__pycache__/test_regression.cpython-312.pyc,, +numpy/lib/tests/__pycache__/test_shape_base.cpython-312.pyc,, +numpy/lib/tests/__pycache__/test_stride_tricks.cpython-312.pyc,, +numpy/lib/tests/__pycache__/test_twodim_base.cpython-312.pyc,, +numpy/lib/tests/__pycache__/test_type_check.cpython-312.pyc,, +numpy/lib/tests/__pycache__/test_ufunclike.cpython-312.pyc,, +numpy/lib/tests/__pycache__/test_utils.cpython-312.pyc,, +numpy/lib/tests/data/py2-np0-objarr.npy,sha256=ZLoI7K3iQpXDkuoDF1Ymyc6Jbw4JngbQKC9grauVRsk,258 +numpy/lib/tests/data/py2-objarr.npy,sha256=F4cyUC-_TB9QSFLAo2c7c44rC6NUYIgrfGx9PqWPSKk,258 +numpy/lib/tests/data/py2-objarr.npz,sha256=xo13HBT0FbFZ2qvZz0LWGDb3SuQASSaXh7rKfVcJjx4,366 +numpy/lib/tests/data/py3-objarr.npy,sha256=7mtikKlHXp4unZhM8eBot8Cknlx1BofJdd73Np2PW8o,325 +numpy/lib/tests/data/py3-objarr.npz,sha256=vVRl9_NZ7_q-hjduUr8YWnzRy8ESNlmvMPlaSSC69fk,453 +numpy/lib/tests/data/python3.npy,sha256=X0ad3hAaLGXig9LtSHAo-BgOvLlFfPYMnZuVIxRmj-0,96 +numpy/lib/tests/data/win64python2.npy,sha256=agOcgHVYFJrV-nrRJDbGnUnF4ZTPYXuSeF-Mtg7GMpc,96 +numpy/lib/tests/test__datasource.py,sha256=davD8e4HZO8IzcSplB6w-j3G133L0AUBAa2GMAJpgYY,10892 +numpy/lib/tests/test__iotools.py,sha256=-BFgfKSpxhDXtn7OK_puB9fKKikBQaAyS_C8dJvb0KM,14188 +numpy/lib/tests/test__version.py,sha256=I6-cyr_7w1TUvC25hR1gTE3x8S-QRAXRsB4IjPxY3tg,2063 +numpy/lib/tests/test_array_utils.py,sha256=gRsql9I0f7RQk-1b8iPa1jDZsd_auCF05-ulxBCVfN4,1150 +numpy/lib/tests/test_arraypad.py,sha256=mlTByN5tvRlSQg3JGNY6GkROLS_ZkXieSkBst-Ct4Hk,58043 +numpy/lib/tests/test_arraysetops.py,sha256=lqLU-cpVmzWphPp7LfDCmFG-yCzPwuIuIF6ql3n60mU,49048 +numpy/lib/tests/test_arrayterator.py,sha256=4WinyEB5liYMGS__8tCeZJcoODkNMFdSMPFl4W-C_y0,1341 +numpy/lib/tests/test_format.py,sha256=SV3W7WdY-HSo_ASpICUt6XgN_va6Dngrm26o16aBx2c,43012 +numpy/lib/tests/test_function_base.py,sha256=zgPaj_Ikxstklgi4KFo2c41S47GYsbBaeGCDzOa1wck,182973 +numpy/lib/tests/test_histograms.py,sha256=ueQJepW8kS-_f2IsdXxEKIkXi2hwcaDJw2D3u3WhCCA,34806 +numpy/lib/tests/test_index_tricks.py,sha256=QeDRZu7H4gq8kUgOZX6awGC6uq3PTMV0hKXAlFj6rb0,25100 +numpy/lib/tests/test_io.py,sha256=-QP2cB5tyOwpBZms37WLtbr0-m2SdfrPxtbOl1u7qK4,114257 +numpy/lib/tests/test_loadtxt.py,sha256=-nMbVMrTNE9t-lPc4qDNfUrBGjTeAAsyWzm6fszlBjE,41590 +numpy/lib/tests/test_mixins.py,sha256=eWaFNkjo_IPlP4-7T-sitpZqhAgjUOjqkglUaPIdWXA,7224 +numpy/lib/tests/test_nanfunctions.py,sha256=sPWiCbDHTp4y3CEG7ART_UdFiaEWeSYJYPLfHeG54zQ,55735 +numpy/lib/tests/test_packbits.py,sha256=RAk590EWlPvH9M3trkBKb6MbKNhMjo-otRLEYznT7dM,17919 +numpy/lib/tests/test_polynomial.py,sha256=cWODMJ0jNINUibSTDTpa-DR6V92qFXd0qs9E9Tx8btk,12730 +numpy/lib/tests/test_recfunctions.py,sha256=DbbWdC6KvS3z6zekexcs4o6_drTwtGGq3QLba_hYPrw,45015 +numpy/lib/tests/test_regression.py,sha256=6Us-PZWNVT__IW4vj2-rA77Sdhpaoqo-_JgySVXL3dg,7947 +numpy/lib/tests/test_shape_base.py,sha256=R65ZPBfWnvzIyIrYbyJAUBUQqDbZX3gN0zx8sCydjKc,28219 +numpy/lib/tests/test_stride_tricks.py,sha256=TkLvnfcLK0CGyWu2I-60fYDgkxDskvjgr1de20z4VE4,23667 +numpy/lib/tests/test_twodim_base.py,sha256=j9PmcG03wmHEZjOL6pMp-kKOczqjcYYYh8ptdFSBPnA,19484 +numpy/lib/tests/test_type_check.py,sha256=UzaOYqNOWjSAxiur6FtERAEMIfYxk84nL_lpeDJGzxU,15269 +numpy/lib/tests/test_ufunclike.py,sha256=7oc71qsMf8NSPU-bMOZNw7H5wwksjPlxy9jJHTMK9Bc,3112 +numpy/lib/tests/test_utils.py,sha256=9XtDAa79N5LOqpLUHKY88ajRuY2gLzRMyJ1dry_4dkU,2454 +numpy/lib/user_array.py,sha256=5z7-hfXnWT5Oq4_WPnjNWGPc9RHUWZcyq4L9ZM3kkGQ,64 +numpy/lib/user_array.pyi,sha256=IaCNerLboKjt3Fm-_k_d8IqeyJf7Lc9Pr5ROUr6wleM,54 +numpy/linalg/__init__.py,sha256=N4KqBOUEZURrj_00q3Df_yLwPrmRMKHdm6UR1sibu2Y,2171 +numpy/linalg/__init__.pyi,sha256=5JqTmDBoOCqKUsQrQdMPV_kSfyan8ScHtFqqZRjrEE4,1087 +numpy/linalg/__pycache__/__init__.cpython-312.pyc,, +numpy/linalg/__pycache__/_linalg.cpython-312.pyc,, +numpy/linalg/_linalg.py,sha256=mYzZDZvLJE8w9TQTaS0QgQOeZaMto-wXb-9CKUi81Rg,118474 +numpy/linalg/_linalg.pyi,sha256=QDmN2W5x-FJRaqC0KdhXVn20skjIgTKkIeWW5crRht8,14024 +numpy/linalg/_umath_linalg.cp312-win_amd64.lib,sha256=yNrX3pqZ-qMWAQuka0797w4HZZixTufSdfoZ71h4QSc,2120 +numpy/linalg/_umath_linalg.cp312-win_amd64.pyd,sha256=hzL66lfB6XUCl7xd8u0JCVQVnigCTGbxzb8dTreYzLg,112128 +numpy/linalg/_umath_linalg.pyi,sha256=Q6Cr6NvaeEiY7GL9Oe57YGLl2Eby87BW6bswPmMUqKc,1451 +numpy/linalg/lapack_lite.cp312-win_amd64.lib,sha256=u-lo9lvXL_Q0QDfeJtVpqI1Px5dI-DSQtoJuvPqYvAU,2084 +numpy/linalg/lapack_lite.cp312-win_amd64.pyd,sha256=fQso3nIp1TPxWHCGdq5GYbBVS63YtMp779n1c7b924A,18944 +numpy/linalg/lapack_lite.pyi,sha256=CuG_G6XugM6ZNqvhXqw6JUPIoeGSpQyuCMTEudYIqzU,2850 +numpy/linalg/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/linalg/tests/__pycache__/__init__.cpython-312.pyc,, +numpy/linalg/tests/__pycache__/test_deprecations.cpython-312.pyc,, +numpy/linalg/tests/__pycache__/test_linalg.cpython-312.pyc,, +numpy/linalg/tests/__pycache__/test_regression.cpython-312.pyc,, +numpy/linalg/tests/test_deprecations.py,sha256=wEkvYP_0k-iqpVJh-bhgjFg67w4TCUthEe4adSSA8T8,637 +numpy/linalg/tests/test_linalg.py,sha256=mGBNdDIZ0SwUie32e0gbGUWEdgOkodVjoNvZGLkAp0U,87492 +numpy/linalg/tests/test_regression.py,sha256=bGZZUH9daBj4MkCxsmrgc2ETvd4DL1RyqsYYWm3Uybs,7237 +numpy/ma/API_CHANGES.txt,sha256=U39zA87aM_OIJhEKvHgL1RY1lhMJZc1Yj3DGLwbPbF0,3540 +numpy/ma/LICENSE,sha256=1427IIuA2StNMz5BpLquUNEkRPRuUxmfp3Jqkd5uLac,1616 +numpy/ma/README.rst,sha256=_MHrqHTE8L4wiJJqvaOh1l-xTxidwdilc_SZkFbgubM,10110 +numpy/ma/__init__.py,sha256=Zh2Hil4sdNNkf-0aJQrnOPmRkRwR4rOAzhN-n3RHsbU,1459 +numpy/ma/__init__.pyi,sha256=IorrWDELrFWTc_WfqWNCtWIcL0PrRE9aEZSzAABKMks,7404 +numpy/ma/__pycache__/__init__.cpython-312.pyc,, +numpy/ma/__pycache__/core.cpython-312.pyc,, +numpy/ma/__pycache__/extras.cpython-312.pyc,, +numpy/ma/__pycache__/mrecords.cpython-312.pyc,, +numpy/ma/__pycache__/testutils.cpython-312.pyc,, +numpy/ma/core.py,sha256=GVgC3tzeALlbC3EKXhfCO-V8MAr77kCWGlXo1NQxQ3U,298184 +numpy/ma/core.pyi,sha256=3JZf_2zQ0BwAItjUGj9nXNsdM4igsMBfaqqvZBXWBv4,135211 +numpy/ma/extras.py,sha256=AJdPH7w71TeGQYgesG2iUkyLZBzGrpZzl6yLdT4UKmc,70056 +numpy/ma/extras.pyi,sha256=M3U-bBns8ejf-mZUUdTct4oMCReRmg6gUkN507SN7hA,9233 +numpy/ma/mrecords.py,sha256=wKhzJCGS9tJKoua0OBWeu41us9iWLnCfuXhMjeIKWTg,27248 +numpy/ma/mrecords.pyi,sha256=uKY5QR8liuxaLw8tNfDm9TBXxLdToXZ3tunufVcfPIk,2169 +numpy/ma/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/ma/tests/__pycache__/__init__.cpython-312.pyc,, +numpy/ma/tests/__pycache__/test_arrayobject.cpython-312.pyc,, +numpy/ma/tests/__pycache__/test_core.cpython-312.pyc,, +numpy/ma/tests/__pycache__/test_deprecations.cpython-312.pyc,, +numpy/ma/tests/__pycache__/test_extras.cpython-312.pyc,, +numpy/ma/tests/__pycache__/test_mrecords.cpython-312.pyc,, +numpy/ma/tests/__pycache__/test_old_ma.cpython-312.pyc,, +numpy/ma/tests/__pycache__/test_regression.cpython-312.pyc,, +numpy/ma/tests/__pycache__/test_subclassing.cpython-312.pyc,, +numpy/ma/tests/test_arrayobject.py,sha256=ap06C0a0dGWcOknpctbhLbzHSNd2M9p_JL2jESqBBGk,1139 +numpy/ma/tests/test_core.py,sha256=vq8KJFS7GBhtzD-QsNU1pHfiFRiPyA843k9fth9m-1A,230866 +numpy/ma/tests/test_deprecations.py,sha256=K5g3yWbztFEvCHXO70DzI5hSr2QivnNzTOdMv8lipZg,2089 +numpy/ma/tests/test_extras.py,sha256=e-1k5fcukD-9faz-d-TPNMqnEmHX5WAt8LQTYBxOfOM,77205 +numpy/ma/tests/test_mrecords.py,sha256=vQb7I7b4_jqr31YFEJiH7XYOGcMGAmOGKq2sESYHQdk,20332 +numpy/ma/tests/test_old_ma.py,sha256=Urg21tx11ipDOWbXG6MhWVrKLKMx1oGUDyHopcG3u7A,34014 +numpy/ma/tests/test_regression.py,sha256=XExMB46BYWyB7l6Kh9iDIJJuPz-Imbifp1RGgMVgqLk,2802 +numpy/ma/tests/test_subclassing.py,sha256=gvAxc0vLQ5sjiARATy28NUS0McpNkSwhPJfpOSEGvvc,17439 +numpy/ma/testutils.py,sha256=HY8srt3kH3lIOo9cbh1cQ027EikCsvOLKiJzWJTKs24,10507 +numpy/ma/testutils.pyi,sha256=7DxEhRCp8m6AA0fSvWi9N2GYry2Q1h1x7X1sdxwcIDU,2359 +numpy/matlib.py,sha256=xGJk9kOBs7qqA8IqhqQuwufNMUvq6Af_mErXxmZHZxw,11018 +numpy/matlib.pyi,sha256=YnglKouzLRQ8slswi9HDGOViaN6pmeD9GWgy8pJOo5o,10313 +numpy/matrixlib/__init__.py,sha256=aPXbaN4OYDp9TFA8kGzt2gTBHb3o8Nanw-uM_3XoDF4,255 +numpy/matrixlib/__init__.pyi,sha256=HrRbMtTKOizGPMnwXzAi490CiNfiIQewrxYFkUw7dZI,91 +numpy/matrixlib/__pycache__/__init__.cpython-312.pyc,, +numpy/matrixlib/__pycache__/defmatrix.cpython-312.pyc,, +numpy/matrixlib/defmatrix.py,sha256=HaFYtHEIhmi4KKbwzUJw-5uk-Xah8JS9dRVi2bZH20w,31994 +numpy/matrixlib/defmatrix.pyi,sha256=ysdXgQgGzo2xugU7Gt2EVlhTvkbHKqFA58U2qU_xB5k,11284 +numpy/matrixlib/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/matrixlib/tests/__pycache__/__init__.cpython-312.pyc,, +numpy/matrixlib/tests/__pycache__/test_defmatrix.cpython-312.pyc,, +numpy/matrixlib/tests/__pycache__/test_interaction.cpython-312.pyc,, +numpy/matrixlib/tests/__pycache__/test_masked_matrix.cpython-312.pyc,, +numpy/matrixlib/tests/__pycache__/test_matrix_linalg.cpython-312.pyc,, +numpy/matrixlib/tests/__pycache__/test_multiarray.cpython-312.pyc,, +numpy/matrixlib/tests/__pycache__/test_numeric.cpython-312.pyc,, +numpy/matrixlib/tests/__pycache__/test_regression.cpython-312.pyc,, +numpy/matrixlib/tests/test_defmatrix.py,sha256=i3KYI4VmOn7uhvUQsDzwkKKjAGdeUStamQgkjGoLgNY,15405 +numpy/matrixlib/tests/test_interaction.py,sha256=1nkRPWmfWfzOS9GjwOQV1D-Ke9W6UtkdwuMMVYcRIcg,12234 +numpy/matrixlib/tests/test_masked_matrix.py,sha256=EDUuKmHgap_OJuKbWmOsvOFKChSLasDsjXr20O_OjS0,9062 +numpy/matrixlib/tests/test_matrix_linalg.py,sha256=c7ldJ6xG66FvJH5FD2TSr617E9PfPMYwkAzrjhTxVYY,2341 +numpy/matrixlib/tests/test_multiarray.py,sha256=WgUxpIxXbzpXTTnt8iG0HC6RoAcTUk0PsEiMHT2361E,572 +numpy/matrixlib/tests/test_numeric.py,sha256=4XC1O2mv7NYYP1siT6I0YAz-Cuhlw6ZN9Du2FmCUz8Y,465 +numpy/matrixlib/tests/test_regression.py,sha256=2-c4B5aKWbySZ95RlG5WUm8OLBO7sLtM2zIzQMwMv0U,965 +numpy/polynomial/__init__.py,sha256=ynOHE1Mc9eBZMNlroaE9meIW7wZkvo7bpGjFjtDB_AU,6913 +numpy/polynomial/__init__.pyi,sha256=PYepBP5jjaCvPS3pQsl1xwNxDlrr1fSr122kM5g-ITk,743 +numpy/polynomial/__pycache__/__init__.cpython-312.pyc,, +numpy/polynomial/__pycache__/_polybase.cpython-312.pyc,, +numpy/polynomial/__pycache__/chebyshev.cpython-312.pyc,, +numpy/polynomial/__pycache__/hermite.cpython-312.pyc,, +numpy/polynomial/__pycache__/hermite_e.cpython-312.pyc,, +numpy/polynomial/__pycache__/laguerre.cpython-312.pyc,, +numpy/polynomial/__pycache__/legendre.cpython-312.pyc,, +numpy/polynomial/__pycache__/polynomial.cpython-312.pyc,, +numpy/polynomial/__pycache__/polyutils.cpython-312.pyc,, +numpy/polynomial/_polybase.py,sha256=8YgTcSVA4nRyITvWfrvuFGkXAdRB8Cc01R3cMCaa2wI,40549 +numpy/polynomial/_polybase.pyi,sha256=cNLZAPgtsUwASh2wryJIYvL_Mb1QxTDg9W7hq1JJBGM,8029 +numpy/polynomial/_polytypes.pyi,sha256=n2k83CrvRgHiu0iXFX0fJi5ZZn6NdMrEiIXBUwfNGpA,16735 +numpy/polynomial/chebyshev.py,sha256=Ynm99PaDojlSocfwoxNPq9UtK5j8OXYewWcf4OmEGY0,64287 +numpy/polynomial/chebyshev.pyi,sha256=lXlO2RgDRAKae9-eXVqQxpvYAMVqVdzHbCbGhQyCZcs,5278 +numpy/polynomial/hermite.py,sha256=N27hINDzF30xIQYtoba5wjTQQfxawYJTFxTWs1rUad8,56309 +numpy/polynomial/hermite.pyi,sha256=0FPL8PWTXACYXPPwd0VYyLMqsInOueZcMLMyUOOZMZY,2744 +numpy/polynomial/hermite_e.py,sha256=55-P3r7rKLh-ow8GkLKnGRkBGCznDaAi5EsMqtTM_vI,53913 +numpy/polynomial/hermite_e.pyi,sha256=1GyXRnzSLmS9c8Mkc65DicVGm6eA7GeTLWJALXp64Qg,2811 +numpy/polynomial/laguerre.py,sha256=Ka3rdMc-r4ORg5I86B0D6d8OjZi_qF81QvZycmOIvN0,54120 +numpy/polynomial/laguerre.pyi,sha256=RdPkWG5ccTjbTQAGrVhR9G02f9kIJHUYhTBNy8n_2wE,2466 +numpy/polynomial/legendre.py,sha256=xaD3gIuBJoEOD1UmpYxKfpxG7IY20zUdx5_mxgmR2Mk,52705 +numpy/polynomial/legendre.pyi,sha256=QK2mezoldWP4o-KeMKZB0SegU9YVqDWICgDvQVFI06Q,2466 +numpy/polynomial/polynomial.py,sha256=0Gq3tj431sv96iLtzZ_6aWo_Qtl1ltPTgdDeMSVBTp0,54294 +numpy/polynomial/polynomial.pyi,sha256=2oxYaruIMqD7kEAveWpuRdHWL_0gvisBrESYgQQaHpw,3098 +numpy/polynomial/polyutils.py,sha256=VbYrTs-TW2PMEGqNIDkCQg20yrtGvNkeUOoLnhJ7B9U,23394 +numpy/polynomial/polyutils.pyi,sha256=0VlzUyEb2ISaDekQ-QV__RhEeXaxHiBKi4YVV4qaAIE,10801 +numpy/polynomial/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/polynomial/tests/__pycache__/__init__.cpython-312.pyc,, +numpy/polynomial/tests/__pycache__/test_chebyshev.cpython-312.pyc,, +numpy/polynomial/tests/__pycache__/test_classes.cpython-312.pyc,, +numpy/polynomial/tests/__pycache__/test_hermite.cpython-312.pyc,, +numpy/polynomial/tests/__pycache__/test_hermite_e.cpython-312.pyc,, +numpy/polynomial/tests/__pycache__/test_laguerre.cpython-312.pyc,, +numpy/polynomial/tests/__pycache__/test_legendre.cpython-312.pyc,, +numpy/polynomial/tests/__pycache__/test_polynomial.cpython-312.pyc,, +numpy/polynomial/tests/__pycache__/test_polyutils.cpython-312.pyc,, +numpy/polynomial/tests/__pycache__/test_printing.cpython-312.pyc,, +numpy/polynomial/tests/__pycache__/test_symbol.cpython-312.pyc,, +numpy/polynomial/tests/test_chebyshev.py,sha256=N8cKoEf-jyLt-8KnMv56eKm72Jeikk2SZoJBjmYAtug,21247 +numpy/polynomial/tests/test_classes.py,sha256=Nwokju4FZgTYZVO1kEzwDOa2oZ09s-33LUGR6t3Fc0g,19144 +numpy/polynomial/tests/test_hermite.py,sha256=D0oETFi8zgQdqKi79LbAOG5ElwpGkpJvKWduG21qPZA,19219 +numpy/polynomial/tests/test_hermite_e.py,sha256=mXIdu3lCRU8DANPKC9kHQMWz7AOkju0l4iLRSMMNSmo,19559 +numpy/polynomial/tests/test_laguerre.py,sha256=QHc3JnCbNsrjB9-0BWeDoj9UZMiVZ6kmvnXqoJi8Gfw,18151 +numpy/polynomial/tests/test_legendre.py,sha256=Q7dUXM6nMlIXlhn11vXJKXRfDtttjSjXdXsYyfI0yu4,19350 +numpy/polynomial/tests/test_polynomial.py,sha256=qd4oOgjOYMUC5aoJvofgxMITaarVN1DxDhWs973V0oc,24255 +numpy/polynomial/tests/test_polyutils.py,sha256=fmZ2xqo6-eas1wM-QelCEjB2lFezOPPuAVhDsu-dpKc,3882 +numpy/polynomial/tests/test_printing.py,sha256=W0RgyZlRAiKL-3uJcNgcXPU7-TCX1wvtIdWIBvGyepQ,22116 +numpy/polynomial/tests/test_symbol.py,sha256=JNF0yt5xmUERjflLOydG1pMo6wMRLtaPDcYUkvXcD-o,5592 +numpy/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/random/LICENSE.md,sha256=tLwvT6HJV3jx7T3Y8UcGvs45lHW5ePnzS1081yUhtIo,3582 +numpy/random/__init__.pxd,sha256=g3EaMi3yfmnqT-KEWj0cp6SWIxVN9ChFjEYXGOfOifE,445 +numpy/random/__init__.py,sha256=8h45GRbXpL10xJzqw_n6Xgnm6SY_JUYCqH6SpeCnqUY,7693 +numpy/random/__init__.pyi,sha256=aqBCk_fpEZeoE94eNpGu37F6ZWfaDaajac9cWNm97So,2233 +numpy/random/__pycache__/__init__.cpython-312.pyc,, +numpy/random/__pycache__/_pickle.cpython-312.pyc,, +numpy/random/_bounded_integers.cp312-win_amd64.lib,sha256=kNkhC90SgHEbZian7XsGuREMuECj6xWKE40-f0IHobg,18000 +numpy/random/_bounded_integers.cp312-win_amd64.pyd,sha256=3ZjGH5x4xt-_FrfvUGBkerpBKm6nSrbNgNk8NN1IXVc,207360 +numpy/random/_bounded_integers.pxd,sha256=EOKKUlF9bh0CLNEP8TzXzX4w_xV5kivr1Putfdf6yvU,1763 +numpy/random/_bounded_integers.pyi,sha256=PFr_V0xYQhWjKk5oc83cYg_JcNZ2FEKTsjXlnxmkyB8,25 +numpy/random/_common.cp312-win_amd64.lib,sha256=6Q5eZBYswVueG1TBXNLiK13qHueBsauLFEEvJUG_CEA,2012 +numpy/random/_common.cp312-win_amd64.pyd,sha256=lbgXChjslZpufV6zxexilF4l7wAOfT4HljT2pvECVZk,167424 +numpy/random/_common.pxd,sha256=e1YxzdJoTvmMIyge3O9yOzpVU7aKRQa2q6cR9c9DB3k,5156 +numpy/random/_common.pyi,sha256=UlOkH40kVn6TU0c6OhG3CocvGnuYAC_46fxZJ7B_7y8,437 +numpy/random/_examples/cffi/__pycache__/extending.cpython-312.pyc,, +numpy/random/_examples/cffi/__pycache__/parse.cpython-312.pyc,, +numpy/random/_examples/cffi/extending.py,sha256=jSc3Vc6Uxl3VWHmoaffez8qG0GTfrFMuUxDhuB9Y5z4,928 +numpy/random/_examples/cffi/parse.py,sha256=2hy5736s-oL5uYvlQf_acpo7srBC8WfffLUhMcm218c,1803 +numpy/random/_examples/cython/extending.pyx,sha256=1lkq6zFifnwaMtAkVG0i_9SbMiNqplvqnHaqUpxqNzs,2344 +numpy/random/_examples/cython/extending_distributions.pyx,sha256=coVzQ6tOCHgZLO4tXIelEKcL3Rh5PIji8xJZGX4YuLA,3961 +numpy/random/_examples/cython/meson.build,sha256=q_IFcVs_qzERJD_-8uaDnjps3QdaW49okZMbFtwkAPo,1747 +numpy/random/_examples/numba/__pycache__/extending.cpython-312.pyc,, +numpy/random/_examples/numba/__pycache__/extending_distributions.cpython-312.pyc,, +numpy/random/_examples/numba/extending.py,sha256=mo0o4VM-K1vUQxNl_Uqr35Acj9UewnkglS7-dFX8yuw,2045 +numpy/random/_examples/numba/extending_distributions.py,sha256=vQdhhOpuGlpG8hk-mKWv7Li3-rwvelv-1c67odurt9o,2103 +numpy/random/_generator.cp312-win_amd64.lib,sha256=tLEq6ZC_AWa3Q3lNwMX6kPBqFKrXG0hrKWSa31NYhZ4,18400 +numpy/random/_generator.cp312-win_amd64.pyd,sha256=3SF2O3y9CTTRHhiiBbQ6L5QoF0nNw64pqLBVHYm-wGk,585216 +numpy/random/_generator.pyi,sha256=89Tnzx4Si2MtC1aqF8MXXs5h66sm1CkBU6uXkec_eCg,25321 +numpy/random/_mt19937.cp312-win_amd64.lib,sha256=f7UGWnF23FTPfMxjPf_JIF8z4pp3r6xpozpcwUZTwmk,2032 +numpy/random/_mt19937.cp312-win_amd64.pyd,sha256=37VCKvQcJIH3KEz7JNbCLSD3M1o7eCOBuWXxrl27tYs,83968 +numpy/random/_mt19937.pyi,sha256=50ES_I-RoPvqqAYCqm3gX8NVcu0R7O5Ru5LSLpEuHzg,849 +numpy/random/_pcg64.cp312-win_amd64.lib,sha256=j8qEy8O8XPz3VF58gHy5lXReYp209T5yujRuAGBk5fI,1996 +numpy/random/_pcg64.cp312-win_amd64.pyd,sha256=cjP5bWLm6QiluMBdlEurYkhOHoCU7XzrZ8TQOdqbA-M,94720 +numpy/random/_pcg64.pyi,sha256=PyZveXgEMEoHRprjXceyFBCwHkNctxcnSxk-tUWEzXE,1214 +numpy/random/_philox.cp312-win_amd64.lib,sha256=3kMrFmmIKfNrRMkjavqYPR3YumS0X_zfeGLqlJGyzmw,2012 +numpy/random/_philox.cp312-win_amd64.pyd,sha256=pC6QM0H1q1cKUIxDR-g4lVudAd-0L74qknFs0XyCY78,78336 +numpy/random/_philox.pyi,sha256=qTNeVdTZEZubisdfN5avuGoR6qMElesP5MC4gEMna9A,1049 +numpy/random/_pickle.py,sha256=8fmUcgzHhq_F_eyesNUdFjV07Br1yzLBLsfe-GWyQrE,2830 +numpy/random/_pickle.pyi,sha256=hj1oBasr_ejSeUlT3-q7luwC4DQFqgCPXIL0wxuFjt4,1651 +numpy/random/_sfc64.cp312-win_amd64.lib,sha256=7V-OLrKn5lB5meFoSF3AARnSKidyQwKw5U50UYqSCqc,1996 +numpy/random/_sfc64.cp312-win_amd64.pyd,sha256=qE_UvtK7_lTW4LIZkOkmDPgncOgMIvWM52eRetLXKps,59904 +numpy/random/_sfc64.pyi,sha256=qGwQgSmP2_RUa-C2y0r93o9VTMxJnD1vp_LVgWsTAC8,716 +numpy/random/bit_generator.cp312-win_amd64.lib,sha256=D_dhO8n8gry6exnFIwvmFJPKq3VWuma4sEJQh_Iu_Xs,2120 +numpy/random/bit_generator.cp312-win_amd64.pyd,sha256=E8hVJm2q6RBG_eztUqzWrRo5aAOB8R4smVmxc_X4tvs,157696 +numpy/random/bit_generator.pxd,sha256=TFR72-UsWpKMmiBsTg7eyiW-FT3hy6miVx2N49PVt-4,1244 +numpy/random/bit_generator.pyi,sha256=y9MsyLg-HH6BgrFRXtSyc4PY_H-pszQw57H8frQS5wQ,3726 +numpy/random/c_distributions.pxd,sha256=02WeqbzQ4heQ1cZ7ShePejxmt5AOI5kTstBZ5w2WxD0,6454 +numpy/random/lib/npyrandom.lib,sha256=D3g8BJdMhKC3LKg8SGQi5N729QRcp_Ty7B-jXIqpvJ0,149644 +numpy/random/mtrand.cp312-win_amd64.lib,sha256=i0V9sU8bBaK4UHT6VG6HQOnQt-NODDxdAm7Xn98CQf4,17122 +numpy/random/mtrand.cp312-win_amd64.pyd,sha256=oQDgPSlYESIbwxfZun4P8NvNWKje2n0vc0cD_H4iPmo,488960 +numpy/random/mtrand.pyi,sha256=BnWlQNKdxYKPNaGys4YZeAqigQNcMyKNagl6u2-Wzu0,24560 +numpy/random/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/random/tests/__pycache__/__init__.cpython-312.pyc,, +numpy/random/tests/__pycache__/test_direct.cpython-312.pyc,, +numpy/random/tests/__pycache__/test_extending.cpython-312.pyc,, +numpy/random/tests/__pycache__/test_generator_mt19937.cpython-312.pyc,, +numpy/random/tests/__pycache__/test_generator_mt19937_regressions.cpython-312.pyc,, +numpy/random/tests/__pycache__/test_random.cpython-312.pyc,, +numpy/random/tests/__pycache__/test_randomstate.cpython-312.pyc,, +numpy/random/tests/__pycache__/test_randomstate_regression.cpython-312.pyc,, +numpy/random/tests/__pycache__/test_regression.cpython-312.pyc,, +numpy/random/tests/__pycache__/test_seed_sequence.cpython-312.pyc,, +numpy/random/tests/__pycache__/test_smoke.cpython-312.pyc,, +numpy/random/tests/data/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/random/tests/data/__pycache__/__init__.cpython-312.pyc,, +numpy/random/tests/data/generator_pcg64_np121.pkl.gz,sha256=EfQ-X70KkHgBAFX2pIPcCUl4MNP1ZNROaXOU75vdiqM,203 +numpy/random/tests/data/generator_pcg64_np126.pkl.gz,sha256=fN8deNVxX-HELA1eIZ32kdtYvc4hwKya6wv00GJeH0Y,208 +numpy/random/tests/data/mt19937-testset-1.csv,sha256=bA5uuOXgLpkAwJjfV8oUePg3-eyaH4-gKe8AMcl2Xn0,16845 +numpy/random/tests/data/mt19937-testset-2.csv,sha256=SnOL1nyRbblYlC254PBUSc37NguV5xN-0W_B32IxDGE,16826 +numpy/random/tests/data/pcg64-testset-1.csv,sha256=wHoS7fIR3hMEdta7MtJ8EpIWX-Bw1yfSaVxiC15vxVs,24840 +numpy/random/tests/data/pcg64-testset-2.csv,sha256=6vlnVuW_4i6LEsVn6b40HjcBWWjoX5lboSCBDpDrzFs,24846 +numpy/random/tests/data/pcg64dxsm-testset-1.csv,sha256=Fhha5-jrCmRk__rsvx6CbDFZ7EPc8BOPDTh-myZLkhM,24834 +numpy/random/tests/data/pcg64dxsm-testset-2.csv,sha256=mNYzkCh0NMt1VvTrN08BbkpAbfkFxztNcsofgeW_0ns,24840 +numpy/random/tests/data/philox-testset-1.csv,sha256=QvpTynWHQjqTz3P2MPvtMLdg2VnM6TGTpXgp-_LeJ5g,24853 +numpy/random/tests/data/philox-testset-2.csv,sha256=-BNO1OCYtDIjnN5Q-AsQezBCGmVJUIs3qAMyj8SNtsA,24839 +numpy/random/tests/data/sfc64-testset-1.csv,sha256=sgkemW0lbKJ2wh1sBj6CfmXwFYTqfAk152P0r8emO38,24841 +numpy/random/tests/data/sfc64-testset-2.csv,sha256=mkp21SG8eCqsfNyQZdmiV41-xKcsV8eutT7rVnVEG50,24834 +numpy/random/tests/data/sfc64_np126.pkl.gz,sha256=MVa1ylFy7DUPgUBK-oIeKSdVl4UYEiN3AZ7G3sdzzaw,290 +numpy/random/tests/test_direct.py,sha256=sHeziHsn1iDt9kFJvoQqa4X_EV8RmdJkR2zM7zn1HQI,20618 +numpy/random/tests/test_extending.py,sha256=Kbem3Is-tYB0hiTem8_B6u5jYGv_tZNabrg2WAMTEzY,4820 +numpy/random/tests/test_generator_mt19937.py,sha256=t6iPLJTxdqQKvlrvmSNJn_umqit25A3X1y_nlexd7CQ,121685 +numpy/random/tests/test_generator_mt19937_regressions.py,sha256=y5urqQyDTYBy7xuuYmRMVq7sZFLC2a5HIQV42ZQic4c,8859 +numpy/random/tests/test_random.py,sha256=NHNX5yUlOmhSqUAkgnFuAR5vMk6VpGHFtiUABW9PAGA,73002 +numpy/random/tests/test_randomstate.py,sha256=tjY3PCEP4KkqdxWGHq5w36aLrpeaXjk4QMe97xYJA4Q,89848 +numpy/random/tests/test_randomstate_regression.py,sha256=yZ_PzRwhYsce-5W2aAPJbiZMNXjOU4bK-WdomdHwes4,8260 +numpy/random/tests/test_regression.py,sha256=i-v9J5LdvqrurawZrE-q657A_85y-ivVB4Bj7bp5z1U,6499 +numpy/random/tests/test_seed_sequence.py,sha256=J_peqBY4MhduYR1fFYAfcN2wvyC7P6xmV4xbu6j9nbQ,3389 +numpy/random/tests/test_smoke.py,sha256=aTjGNBx4HDSe5M7SbmhO1-Fj1CqVhDyE5Ldgjk_a4-Y,30817 +numpy/rec/__init__.py,sha256=cgaZYq6w4qNo81NZGO-E4vkSj9eSO4SgNMOqiLglp4k,85 +numpy/rec/__init__.pyi,sha256=gGrssJCiTrltTcwaCjXB8saZBWiWCHOr2mJmUsFdU50,370 +numpy/rec/__pycache__/__init__.cpython-312.pyc,, +numpy/strings/__init__.py,sha256=JzKUIYVjG4wRzsKVAVg9XWrq2vjjdi679CZc_Txfn4w,85 +numpy/strings/__init__.pyi,sha256=LvbeB_oUcN7O0GUJx3gzfYs-KPPVhHm6EhJIG3hJAHQ,1416 +numpy/strings/__pycache__/__init__.cpython-312.pyc,, +numpy/testing/__init__.py,sha256=0Qkz0ITfPKHDe9kObKTEx3fdDGAb9tfMikpdPuiapyA,603 +numpy/testing/__init__.pyi,sha256=xZNpS-qElFjn5Ueka4pywGO-rI_jf52GKH6uDNCfdXg,2295 +numpy/testing/__pycache__/__init__.cpython-312.pyc,, +numpy/testing/__pycache__/overrides.cpython-312.pyc,, +numpy/testing/__pycache__/print_coercion_tables.cpython-312.pyc,, +numpy/testing/_private/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/testing/_private/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/testing/_private/__pycache__/__init__.cpython-312.pyc,, +numpy/testing/_private/__pycache__/extbuild.cpython-312.pyc,, +numpy/testing/_private/__pycache__/utils.cpython-312.pyc,, +numpy/testing/_private/extbuild.py,sha256=ausYJDf2eOq9jYC3x_tFO_vBrMhjqjruVqHxgHjla8w,7966 +numpy/testing/_private/extbuild.pyi,sha256=f2h7VxBrN2uwqJIwB8pmv3c1G-sH7d3V4HCIL3GHYTA,678 +numpy/testing/_private/utils.py,sha256=rzwcAKdWkUIdsT79H3_OgbokFXbCRls7glwECNCpj9k,101550 +numpy/testing/_private/utils.pyi,sha256=MAJBkXgDX7w49uUn1a8_orw9Gumd2aSzlPtgQ9ZfpAU,13730 +numpy/testing/overrides.py,sha256=rldmRQXc5c9jEs4hghDXvHA4sJD7HuMcpfGMmzSML9I,2218 +numpy/testing/overrides.pyi,sha256=ceLN7L1s2pQvLZQTIh89A_MoOnkkJoe-abXKT0Sk1bk,406 +numpy/testing/print_coercion_tables.py,sha256=lT8IdI1_lantwFVG0C0JagO0mUxnnXCA5wnPI81czYQ,6493 +numpy/testing/print_coercion_tables.pyi,sha256=ecFs2Qse4_H9AQC93fv5b2jmWMol941uoYlSuxhmNSU,846 +numpy/testing/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/testing/tests/__pycache__/__init__.cpython-312.pyc,, +numpy/testing/tests/__pycache__/test_utils.cpython-312.pyc,, +numpy/testing/tests/test_utils.py,sha256=BV8Aj5dGqZr32CvSO1Ag-idnHAJdDLnYKFSrJkXHcnQ,81824 +numpy/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/tests/__pycache__/__init__.cpython-312.pyc,, +numpy/tests/__pycache__/test__all__.cpython-312.pyc,, +numpy/tests/__pycache__/test_configtool.cpython-312.pyc,, +numpy/tests/__pycache__/test_ctypeslib.cpython-312.pyc,, +numpy/tests/__pycache__/test_lazyloading.cpython-312.pyc,, +numpy/tests/__pycache__/test_matlib.cpython-312.pyc,, +numpy/tests/__pycache__/test_numpy_config.cpython-312.pyc,, +numpy/tests/__pycache__/test_numpy_version.cpython-312.pyc,, +numpy/tests/__pycache__/test_public_api.cpython-312.pyc,, +numpy/tests/__pycache__/test_reloading.cpython-312.pyc,, +numpy/tests/__pycache__/test_scripts.cpython-312.pyc,, +numpy/tests/__pycache__/test_warnings.cpython-312.pyc,, +numpy/tests/test__all__.py,sha256=xZkp3RbMNpx4bFTvILKV8KTEMh5lvId7xcrhQS4LTe0,232 +numpy/tests/test_configtool.py,sha256=zieFjnFWqOsyLbtJJYIIdMMdThkDm-glvNGvN3_y7ow,1863 +numpy/tests/test_ctypeslib.py,sha256=7wJt8Im7-BUmTmtZ6rVeuHt__erJRRlK87dMaFZmL8E,13215 +numpy/tests/test_lazyloading.py,sha256=5YyD-WDS6uI_rIQBWmP6z7rCA9jtv4HAQ57NxysQKDs,1304 +numpy/tests/test_matlib.py,sha256=KmBMo3M7IARB8K5NLYk611RtsfW10_LgCQEBjdLEM9g,1913 +numpy/tests/test_numpy_config.py,sha256=8tTLdQi34xV1QTZw5TdaDQ9y25TVncGjvcmcXnQ2PEI,1364 +numpy/tests/test_numpy_version.py,sha256=EhDAFEamNCmRAiJEUSGtPa21IipODWrf6MN2Bem0az8,1798 +numpy/tests/test_public_api.py,sha256=iH-YBfcSa3Jc7_Mbxg4uWTNnJ1ZD9BDZn40L9jzIZ9g,28813 +numpy/tests/test_reloading.py,sha256=tEcoOR45hGgiQ-ocqrcRaKc7qKZXDwKM3ia6ivYpbm0,2762 +numpy/tests/test_scripts.py,sha256=KdTVn4N22QPEssua-dM83xExBCoVqIccBKaNBcjdVkE,1694 +numpy/tests/test_warnings.py,sha256=9-KePaaWfHgIAn6hVIy1gyio31zwMw8esAUYtvWTfzY,2499 +numpy/typing/__init__.py,sha256=7MEzTbsBZHmZmwuaEt2VsYQZPAjM2EGpa10Mdn35ceU,7217 +numpy/typing/__init__.pyi,sha256=gbeO9KFv0nrMve7BkrerjG92x442mEqEBDQZQuw2maI,130 +numpy/typing/__pycache__/__init__.cpython-312.pyc,, +numpy/typing/__pycache__/mypy_plugin.cpython-312.pyc,, +numpy/typing/mypy_plugin.py,sha256=eI-0xwotV4uhV7Dkpl6uuH15jSEwovgjfukbcLBRr34,7046 +numpy/typing/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/typing/tests/__pycache__/__init__.cpython-312.pyc,, +numpy/typing/tests/__pycache__/test_isfile.cpython-312.pyc,, +numpy/typing/tests/__pycache__/test_runtime.cpython-312.pyc,, +numpy/typing/tests/__pycache__/test_typing.cpython-312.pyc,, +numpy/typing/tests/data/fail/arithmetic.pyi,sha256=AMohW1_h2jlX8YPHH_sMm8lGS8ReUC0RkyHAQYe2k8I,3812 +numpy/typing/tests/data/fail/array_constructors.pyi,sha256=oDNMGHZnzHOO5_3zGxv5rQSu6kUlE_tgSuHcLpHvtKk,1234 +numpy/typing/tests/data/fail/array_like.pyi,sha256=9EcZ306eJOZrHSdWlTe0cEpYvmODaQ_nItcbhWaxFy8,511 +numpy/typing/tests/data/fail/array_pad.pyi,sha256=ExxQs_3s8xvgEsvj6O4aGWTIc0ukXtAtUM9JUs5D6pQ,143 +numpy/typing/tests/data/fail/arrayprint.pyi,sha256=TzEdr6oiRnYV1ZOGAYS6zzDWuRHl-aV8e407NnMzlxg,543 +numpy/typing/tests/data/fail/arrayterator.pyi,sha256=L2AC5qFmBrw9klehdV7d7gHvKpPX-7H1Iz9wryDQVQA,477 +numpy/typing/tests/data/fail/bitwise_ops.pyi,sha256=R-h3fWesS5CxenzKEYuyr2IpqSyMlGHQOAlro_smez4,397 +numpy/typing/tests/data/fail/char.pyi,sha256=PsgOrQuNsO2TD2HohOiOt7sYbs2q08hSEWxiEuiks1E,2737 +numpy/typing/tests/data/fail/chararray.pyi,sha256=LrTSnLGGEZrjeW_qpS-TGWz8vLVT0l7K5NdcFFucw24,2310 +numpy/typing/tests/data/fail/comparisons.pyi,sha256=_T0ZdZ0GYV_BkOqJjIzuWEQpN0lJgCc0fvx250U4x4o,763 +numpy/typing/tests/data/fail/constants.pyi,sha256=MnjvGyU_QKKiSZ-BzsHdojlRKz7GGvqQmu0mxbewcao,81 +numpy/typing/tests/data/fail/datasource.pyi,sha256=JO5en3G84i6LDK5b9JRDMg2GoOn5QBFI5qp_pAvupPY,436 +numpy/typing/tests/data/fail/dtype.pyi,sha256=TeNsugh9LvvX-u08MkROtFM_B080JZheXexyw9Wsq6E,322 +numpy/typing/tests/data/fail/einsumfunc.pyi,sha256=DeLM2jL7ZBirjw9H2dfjXxbHwx2Y9gu3wp2NwDLYuIQ,470 +numpy/typing/tests/data/fail/flatiter.pyi,sha256=eWphjcSJXi_EM1MTT_6kB0MgO_jy_vPJkYtk1CMz6Ec,1169 +numpy/typing/tests/data/fail/fromnumeric.pyi,sha256=hC2Nc8tUVV5-i43ak0WzB2wejwPsn2yIgQ6H13b_Sh8,5836 +numpy/typing/tests/data/fail/histograms.pyi,sha256=vkSk1v1Na4VUnRlwM8sqhxqPjhg2HH_9YtXnYNEHgPM,388 +numpy/typing/tests/data/fail/index_tricks.pyi,sha256=2VDHr1Of7fqh2uu0wf-epw7VZD2Fy0-W9ZPe1Fa87-8,531 +numpy/typing/tests/data/fail/lib_function_base.pyi,sha256=HTiFQTO8plsdCb-F1VvA_wz50GswJhwB3od9mTDkkIE,2723 +numpy/typing/tests/data/fail/lib_polynomial.pyi,sha256=xjpuJ7DVIRxQok6f0-CGsSVg6QQuzZmyiURslKF3ctw,966 +numpy/typing/tests/data/fail/lib_utils.pyi,sha256=iBUetgF7F39F-yV2DBhIqvKywzK8kZoAL9N-kpR3pyk,101 +numpy/typing/tests/data/fail/lib_version.pyi,sha256=EfAZTQpzTJ1UCY3p9envuaJUCqforENP_QP_DVWU7Do,160 +numpy/typing/tests/data/fail/linalg.pyi,sha256=r9prpIwrCPu7pnoS2JuasfnA8TJASyJR6TCUfYaYAG0,1592 +numpy/typing/tests/data/fail/ma.pyi,sha256=bj3awspyo8Cla0q39oY9G6UQGBddG5RGTewTYAO4lcQ,7161 +numpy/typing/tests/data/fail/memmap.pyi,sha256=U8_bCFw8m8x7ZlWSpYmmKpC1BS8oCEwUEdUgCRf2FSg,174 +numpy/typing/tests/data/fail/modules.pyi,sha256=f14qw9HXlwJ7FARKYxNhaPou8OZ8mQXsU4P9cpFxbWI,620 +numpy/typing/tests/data/fail/multiarray.pyi,sha256=qPGgrMMc0-SLuHJk1k_RaiPvs3CO-Ob6B9vFOKaFr44,1718 +numpy/typing/tests/data/fail/ndarray.pyi,sha256=8wZpNNatpxbxNu8G2N_R0P-3UVZLVE_z6ZGmdndSWPM,392 +numpy/typing/tests/data/fail/ndarray_misc.pyi,sha256=S5wtoAY-Y1f7MM7kjW4OOMtJnuF5uLTvCll21nJgyvc,1451 +numpy/typing/tests/data/fail/nditer.pyi,sha256=Sp3-l4RWopWY5ekm7yoINo4UhVYaKl9vW56aV14QDcc,332 +numpy/typing/tests/data/fail/nested_sequence.pyi,sha256=RZOHSAb-tA5eihm-dsD-r2WJqKaX-JbQ-fek0hw94nY,481 +numpy/typing/tests/data/fail/npyio.pyi,sha256=RrjdSUJE-K_dOF8Or_5bfpT2LYgFgHbS9fwVmWuJCBg,626 +numpy/typing/tests/data/fail/numerictypes.pyi,sha256=igYnLB91EuhrNDihL9IxwlM_xhwphCgMjWimzDaSNrk,129 +numpy/typing/tests/data/fail/random.pyi,sha256=ng4fxmdk1RFQdn3PNF7qSO2c2hVxQ07lwYwQGrSPsKQ,2965 +numpy/typing/tests/data/fail/rec.pyi,sha256=HkJQLYK6u8thA6alMB4pUeMBPXr6mgnqqTYmXSiM2PM,758 +numpy/typing/tests/data/fail/scalars.pyi,sha256=3ryg0rO8QAcUsN1VrXDnVp43AUSGBoEczk2Vm2BAl6Q,2924 +numpy/typing/tests/data/fail/shape.pyi,sha256=0uCGDpXresJPxRTEgNH8bP_FvCT0wV8Ob5Bu2_ZS428,139 +numpy/typing/tests/data/fail/shape_base.pyi,sha256=jfOTNjSqDVIGlAmwbORYfifftjvW-W4ngMRP883oSrU,165 +numpy/typing/tests/data/fail/stride_tricks.pyi,sha256=kjsv-sHn8i-oxtLUcpeIrOzS00Hk6Af8clqnY-vwg6A,339 +numpy/typing/tests/data/fail/strings.pyi,sha256=T7nJ-2UqEa2ld_bZW4king8PnAG9w9LompbD9rtIb7Y,2385 +numpy/typing/tests/data/fail/testing.pyi,sha256=zBTsTLi6xASHSeRK4f9ub1810m33xrb2qFyGvH93iqo,1427 +numpy/typing/tests/data/fail/twodim_base.pyi,sha256=XqAijZdegv0eHZ4pZPTo9ruwkVaMT168nbqyZV0HcHM,1171 +numpy/typing/tests/data/fail/type_check.pyi,sha256=u65AFyNm-J4gQu1brfsGuyJ1pN8tDJQxtRSYDxebUfE,382 +numpy/typing/tests/data/fail/ufunc_config.pyi,sha256=G17kqlgWREHukftCiDnCBRDjum5H1f8UIIifakzNBZc,610 +numpy/typing/tests/data/fail/ufunclike.pyi,sha256=7dpF86m9EeY_69dl_zX0joQvFIpKPXGQRRUJOipWTOw,670 +numpy/typing/tests/data/fail/ufuncs.pyi,sha256=7fKHGG69SFRRkqO5I-QqV-pVkQ3LTCPWgwgzOHGmhwI,522 +numpy/typing/tests/data/fail/warnings_and_errors.pyi,sha256=kKR53mZ3zy1-JmyGgmWjBmJw04ipqnTzqu9B4nU5OlQ,205 +numpy/typing/tests/data/misc/extended_precision.pyi,sha256=g80l5fCRis1PKm55PutBymIhrs-v3vYF2d5ZqiajcbI,331 +numpy/typing/tests/data/mypy.ini,sha256=6Uoh_q6A2Jod1K_65IRTcaDffDktTEIK6FaUHHzCWPA,222 +numpy/typing/tests/data/pass/__pycache__/arithmetic.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/array_constructors.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/array_like.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/arrayprint.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/arrayterator.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/bitwise_ops.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/comparisons.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/dtype.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/einsumfunc.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/flatiter.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/fromnumeric.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/index_tricks.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/lib_user_array.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/lib_utils.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/lib_version.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/literal.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/ma.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/mod.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/modules.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/multiarray.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/ndarray_conversion.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/ndarray_misc.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/ndarray_shape_manipulation.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/nditer.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/numeric.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/numerictypes.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/random.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/recfunctions.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/scalars.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/shape.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/simple.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/ufunc_config.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/ufunclike.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/ufuncs.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/warnings_and_errors.cpython-312.pyc,, +numpy/typing/tests/data/pass/arithmetic.py,sha256=HuUKU7DSCqnO9oCLKQloFSTTgp4JBdhfyg6bp-lZdNU,8380 +numpy/typing/tests/data/pass/array_constructors.py,sha256=NXGxCHOAeh8uxlP46EUnbo_PXl0OX62WnAgBvrDvZno,2586 +numpy/typing/tests/data/pass/array_like.py,sha256=VWGx8wSe5z5XR2uOkNVvYNhMMkaBbhLdQHVlixbl4nM,1075 +numpy/typing/tests/data/pass/arrayprint.py,sha256=NTw1gJ9v3TDVwRov4zsg_27rI-ndKuG4mDidBWEKVyc,803 +numpy/typing/tests/data/pass/arrayterator.py,sha256=qPyDI_38M155brLW25CyVnf6-2Zfn0MEnj8NqadpEgA,422 +numpy/typing/tests/data/pass/bitwise_ops.py,sha256=i5NHgKmg2q27z6mlCjKjO3haydGRPZmUL3Js-5jAt20,1090 +numpy/typing/tests/data/pass/comparisons.py,sha256=gWDQU6bGY09YVG9MEYLjkJH9pdOGql1PFN6h-5pJNEI,3606 +numpy/typing/tests/data/pass/dtype.py,sha256=YRsTwKEQ5iJtdKCEQIybU_nL8z8Wq9hU-BZmEO7HjQE,1127 +numpy/typing/tests/data/pass/einsumfunc.py,sha256=CXdLvQsU2iDqQc7d2TRRCSwguQzJ0SJDFn23SDeOOuY,1406 +numpy/typing/tests/data/pass/flatiter.py,sha256=JWG5gQ9RqSDrg9V2KlSgGtANnB7JDRFJM9e14wJ0Y_g,288 +numpy/typing/tests/data/pass/fromnumeric.py,sha256=bP0hEQYYQJOn7-ce0rAf8cvuxZX3Ja6GSSlCtNhEBUM,4263 +numpy/typing/tests/data/pass/index_tricks.py,sha256=ymUrTbHcpYRgAPVxhwdXF0f_1aFBDBZEp_Ue3BxddZE,1466 +numpy/typing/tests/data/pass/lib_user_array.py,sha256=qvCmq32uZlgGL76u8sygvguBiTphLrt8X-mVaYnex0A,640 +numpy/typing/tests/data/pass/lib_utils.py,sha256=XEc0v7bwES-C5D4GkSJQSSTSAl5ng7tq6tCWj3jxbCM,336 +numpy/typing/tests/data/pass/lib_version.py,sha256=TlLZK8sekCMm__WWo22FZfZc40zpczENf6y_TNjBpCw,317 +numpy/typing/tests/data/pass/literal.py,sha256=wZ7S1bDJFyTkrQbgYRSXZGobU976J2F_620-lv_hZTk,1561 +numpy/typing/tests/data/pass/ma.py,sha256=_gssQTkMQTtFIec3A9OSSk50o_LHhFp1vp3jBlvkuIs,4121 +numpy/typing/tests/data/pass/mod.py,sha256=IZbpRvH19U4hbLJfm0CKyyihJ9bv-g561cYr74bDlao,1720 +numpy/typing/tests/data/pass/modules.py,sha256=buzLurat4TIGmJuW3mGsGk7dKNmpBDfQOWWQXFfb9Uc,670 +numpy/typing/tests/data/pass/multiarray.py,sha256=OGU_AH571v22-L_IHGNRdd4XcbqlZXtZLxZLYZdiBqs,1456 +numpy/typing/tests/data/pass/ndarray_conversion.py,sha256=5Sy_SGRL_Nkb9j0yhsj0DtN9ba4aBc_KRLsUZK1SyNA,1530 +numpy/typing/tests/data/pass/ndarray_misc.py,sha256=A7PIVuXhVcN06uP_D-ZUdo_I8Xl1m74DZpjS4WLdTrY,3555 +numpy/typing/tests/data/pass/ndarray_shape_manipulation.py,sha256=yaBK3hW5fe2VpvARkn_NMeF-JX-OajI8JiRWOA_Uk7Y,687 +numpy/typing/tests/data/pass/nditer.py,sha256=1wpRitCNZKCC3WJVrFSh22Z1D8jP2VxQAMtzH8NcpV8,67 +numpy/typing/tests/data/pass/numeric.py,sha256=ZWN3MW8Lk_qeqx3VC9cMOekFK21o02xnkpUJrTWYeUI,1582 +numpy/typing/tests/data/pass/numerictypes.py,sha256=JaCjk4zQPOI67XzqGyi3dI-GUMFM2AvDuniwzSQ7_Rk,348 +numpy/typing/tests/data/pass/random.py,sha256=zMhPsSbYUjSou6PggCqp3Q-hYDYZytT7IeF-wBAB5R0,63323 +numpy/typing/tests/data/pass/recfunctions.py,sha256=8nqw-j8pbjUNo6f92fFjAwm57zzDLP2PrY6DehmWv8k,5138 +numpy/typing/tests/data/pass/scalars.py,sha256=IEkuEFLTVcsPQsSqft9K3iRDJNWRoVB6r_y3m4yzGos,3974 +numpy/typing/tests/data/pass/shape.py,sha256=oSkR0akFaIBrXFcX4D14ZiR2Yw1rHsZSoLKFL_Qjav8,458 +numpy/typing/tests/data/pass/simple.py,sha256=F6Qi7OLrbUrcZJiBGGPEjuyvk1M7jpOaIyewTCJVW5E,2925 +numpy/typing/tests/data/pass/ufunc_config.py,sha256=gmMTPrq8gLXJZSBQoOpJcgzIzWgMx-k_etKPV4KSTJk,1269 +numpy/typing/tests/data/pass/ufunclike.py,sha256=9t-6R0b7HmE5jczJp0dcNOP7Iz9zaUE0918AgFfeJfY,1403 +numpy/typing/tests/data/pass/ufuncs.py,sha256=gvdcCNoGUfN0CnQmn6k1j6ghdt8zGkJdcRcgctmU48A,438 +numpy/typing/tests/data/pass/warnings_and_errors.py,sha256=q3c1SmMwhyYLYQsLjK02AXphk3-96YltSTdTfrElJzQ,167 +numpy/typing/tests/data/reveal/arithmetic.pyi,sha256=JHcs_4SzHUK1uDM91UBlh2WTaXpU8ShW97TWT-RMcPA,27518 +numpy/typing/tests/data/reveal/array_api_info.pyi,sha256=TOvbhGUiNYcKv9KQs0A4-vzM_l9vYWvDXoqBOOLo1d4,3087 +numpy/typing/tests/data/reveal/array_constructors.pyi,sha256=eqe96eYVSybTarmGhyIe3CxsOSB3Pkfah6DQxYWl650,15042 +numpy/typing/tests/data/reveal/arraypad.pyi,sha256=hJeTeX66VR9hDRliaP195klegP9cmBnrZqDsEGLm30A,931 +numpy/typing/tests/data/reveal/arrayprint.pyi,sha256=oQGscSvF8perObX6j1LduWS1eogRYyl7NM8piYuZPxc,797 +numpy/typing/tests/data/reveal/arraysetops.pyi,sha256=-Fhtnyc-uzDT5zUlh7xSLmdkK05Y14MqpgdG38Xliu0,4485 +numpy/typing/tests/data/reveal/arrayterator.pyi,sha256=-bqtQE71AcS2cisOcRAaDRxvp_STs41Dbzu7qUvaGRc,1054 +numpy/typing/tests/data/reveal/bitwise_ops.pyi,sha256=z60Mwun7flBSAxDh_BHksPzPSikxvKnBkde6cRRnNuE,4803 +numpy/typing/tests/data/reveal/char.pyi,sha256=KekJM9d_iNnAa9OU98yiWrR7bJ4ZpxvUr04mVkWOdrA,11777 +numpy/typing/tests/data/reveal/chararray.pyi,sha256=vxQbEhNEOX6SghlIBBtJLNIc5XTiP_msVTRqBHja4gw,5403 +numpy/typing/tests/data/reveal/comparisons.pyi,sha256=h0PYWf2F-mRNAK7e_3H4-hu0msj2KxU3xaHb2aoqFnc,7445 +numpy/typing/tests/data/reveal/constants.pyi,sha256=DHycCQpNsu52JrhZ_Qds7f0F4U0rD4zWaVMOLwWR08o,347 +numpy/typing/tests/data/reveal/ctypeslib.pyi,sha256=TuMyVAki_VnT2jYiCGPNOvb9W3iWGL2sKKkbKul_5P0,4215 +numpy/typing/tests/data/reveal/datasource.pyi,sha256=07PFHAOF4kL5Wqq5pt1IKvx6VPYBZ9IK1_WXo_3II1E,606 +numpy/typing/tests/data/reveal/dtype.pyi,sha256=psnbSgAYVrU3MXJyV470NjQeqbdd9Re6q4_3n334ETg,4865 +numpy/typing/tests/data/reveal/einsumfunc.pyi,sha256=R-ve3Dda6S1ewKjlDoQ1XWx4Adj-JxFe9TFdrPeX2sA,1965 +numpy/typing/tests/data/reveal/emath.pyi,sha256=HGPBuE6CTTSHirCCQMklDGkvamoIILqpREfP9NJh49I,2179 +numpy/typing/tests/data/reveal/fft.pyi,sha256=uNGippaypaPH9ZmwaOXwPdHXDh_TW4BJlEkv9idK08Y,1638 +numpy/typing/tests/data/reveal/flatiter.pyi,sha256=WptTeykBGKmYBKse08jQ2nlP_706LL8qaqhm3IHjLT0,3348 +numpy/typing/tests/data/reveal/fromnumeric.pyi,sha256=FZNJHn7P-ddjrSSSUr-3qqWShbiGC5qkodDXXfvHp24,15526 +numpy/typing/tests/data/reveal/getlimits.pyi,sha256=DoHEcyug87JBCouEKZrf2pF2vliRF9rcQenn0Nd8MKo,1639 +numpy/typing/tests/data/reveal/histograms.pyi,sha256=2l6Af9c4cru2C6xSuL0iqW78OqEo4nrWFqARC5nA0Xo,1282 +numpy/typing/tests/data/reveal/index_tricks.pyi,sha256=UoYlUdqV2jgImmKFDOkLECoLN1ZuTWs7P0VLczBi8a8,3311 +numpy/typing/tests/data/reveal/lib_function_base.pyi,sha256=mq7dYJPYd98XqBnc_n1V6JgX2HvgoB8Q_weGyUC8TjA,20372 +numpy/typing/tests/data/reveal/lib_polynomial.pyi,sha256=fpINlyW5XcQl5I2sjXbb6REu8qosYsemET_GxjBAOtY,5849 +numpy/typing/tests/data/reveal/lib_utils.pyi,sha256=0mEaIvr9BkYx2Gmmj7M0vvG_o4c0TgD0uU8uJsY7Jl0,453 +numpy/typing/tests/data/reveal/lib_version.pyi,sha256=5-H7IY5M-OT0Wu7d0FhO95s5jYafpMb2s-sYPNEisaQ,592 +numpy/typing/tests/data/reveal/linalg.pyi,sha256=4kzgLlgrDlYjvNlzosyqdJ2AV83AJqJsr08aZcGRl-4,7346 +numpy/typing/tests/data/reveal/ma.pyi,sha256=vfKs_ua565soL5Ik4M_RMNbd3EbNx_vKNXSKh32qhaY,51706 +numpy/typing/tests/data/reveal/matrix.pyi,sha256=xyhYRFmQUL9GdFmZnWNLPJPaRRmjJLdpozP6uUBHnUE,2903 +numpy/typing/tests/data/reveal/memmap.pyi,sha256=KdMkvJgXmWwOAE6H0BKQjB-A3go2keJAem59FUJtmVM,738 +numpy/typing/tests/data/reveal/mod.pyi,sha256=4d8e_54VcJJKaSHy_vVwLzQ25Nt6GJ5qHxFIxkkl0ec,7356 +numpy/typing/tests/data/reveal/modules.pyi,sha256=dgB-VZ5GQpABSsnAsnwfUx781fv7dia3TrDnguVippg,1909 +numpy/typing/tests/data/reveal/multiarray.pyi,sha256=C_u26_2lvuhh3x5zy-qQctGO1ks1gMsC_pQ3DtXhu5o,8250 +numpy/typing/tests/data/reveal/nbit_base_example.pyi,sha256=9eWyNVc8VjggFJ112bPPKx5MOTLr5iurhIaETKjDc0s,694 +numpy/typing/tests/data/reveal/ndarray_assignability.pyi,sha256=F01Uub9Ect05XD8qz18nWpZ3AzhnTt-Ti8wsROZ1HWg,2993 +numpy/typing/tests/data/reveal/ndarray_conversion.pyi,sha256=KQIltfDd9yLcDTg86NCRDzQjOHXn1e6sYEQX3PcYb64,3335 +numpy/typing/tests/data/reveal/ndarray_misc.pyi,sha256=um6vXze3eHUfMB_yWg9TFuIT4lKhB-o-YXgSLThtiEE,8937 +numpy/typing/tests/data/reveal/ndarray_shape_manipulation.pyi,sha256=xMPirePw0fwNYuTpnts3Bpt-CxorAUaF0ea0oopzgM8,1745 +numpy/typing/tests/data/reveal/nditer.pyi,sha256=eHIBPj1I75kErxXsTi0CQEjcJ_WvHXIx7QzEeTmxYDk,1947 +numpy/typing/tests/data/reveal/nested_sequence.pyi,sha256=aRkKbnRxI2A-xkg8iQZWOHU7672PlyMNpUHIkONymLw,637 +numpy/typing/tests/data/reveal/npyio.pyi,sha256=KA_lenEDj92tycocaW9y4Bz_jpZ5gsbHPxhYFn5nBDw,3583 +numpy/typing/tests/data/reveal/numeric.pyi,sha256=CivGCAYe2BlStSBMhO2hRUtmqFfRFrhuj97aK0PpQ-0,9242 +numpy/typing/tests/data/reveal/numerictypes.pyi,sha256=5ouW5fRTVciZdx0svYChyTsWcmwPpqZqhmlfpdpa8b0,584 +numpy/typing/tests/data/reveal/polynomial_polybase.pyi,sha256=1zRBBhPgpZXTa2jJtXGDfVQL5L4zxozjSZ0MgQsdgnM,7843 +numpy/typing/tests/data/reveal/polynomial_polyutils.pyi,sha256=vo7CfyWW7vuMhbyI95u53yS9t6ADyc2lGBJOtHbYD6c,10837 +numpy/typing/tests/data/reveal/polynomial_series.pyi,sha256=ppgLW-xn2DpkLOP8vfteKTmtCrAErE5k6_m6IbzNIm4,7018 +numpy/typing/tests/data/reveal/random.pyi,sha256=JlNcrkvVoyI8uezvQ9BBCTtFoVQPRgZ5zPXKiDdCFwo,105842 +numpy/typing/tests/data/reveal/rec.pyi,sha256=2Pr6-v4uSA_t13cYdLr2sKQSb1Ko9YuZQS-_UUAZwFo,3549 +numpy/typing/tests/data/reveal/scalars.pyi,sha256=SZ3zC7GNT-DeRXlLhz9_jvRY-TNSQvFqxXCdH3pL4HE,6569 +numpy/typing/tests/data/reveal/shape.pyi,sha256=9IilbiRez0Lbu7Zv_HvqdEwQhjRLzcbnKm-4wnG6d9c,275 +numpy/typing/tests/data/reveal/shape_base.pyi,sha256=1_G5HGC45IFPnetUyXXnyIhJYjNu0Mf7AEEtSz3QrmI,2058 +numpy/typing/tests/data/reveal/stride_tricks.pyi,sha256=PqrKANjOAnn_CQ2CjERvonBbM89PjhZpgnekiQpT04k,1342 +numpy/typing/tests/data/reveal/strings.pyi,sha256=D2r-lCrWhVM3HQ8X2cjLT2cwujxYLOb1ASrnHs2qPD8,9743 +numpy/typing/tests/data/reveal/testing.pyi,sha256=w2dTmMJnGi_jtBtCMjnUTqnrGLiYAvsksrTIwzk41HE,9031 +numpy/typing/tests/data/reveal/twodim_base.pyi,sha256=4devsums2EaY0qVWJxJG8kopVXHxYHC_uV9cj_ml_4k,7700 +numpy/typing/tests/data/reveal/type_check.pyi,sha256=3on6Yhb-vylW0Etlpl_VP6bevzBipPn8J8W6EVE66eU,2459 +numpy/typing/tests/data/reveal/ufunc_config.pyi,sha256=fhhSCMGh5L2WU9x8DGHF3dxHugJEkIKiwf_J5mq62Js,1190 +numpy/typing/tests/data/reveal/ufunclike.pyi,sha256=FrHumRFEU0_Ji-0HIIoOx1oHriVaTXKpNzLqiudAIcI,1377 +numpy/typing/tests/data/reveal/ufuncs.pyi,sha256=Bn60mPM7tO4Uv1dgSRY8OgT_NvmW75BhnKTQfvZsey4,6487 +numpy/typing/tests/data/reveal/warnings_and_errors.pyi,sha256=kdpx5u0-zsWOPcsnciaqsACr7OKUuFWmMeMCizZDun8,460 +numpy/typing/tests/test_isfile.py,sha256=US1HhRtoDrAmlY_RuoE7ILspD0ujMifE5dydGhligTM,1085 +numpy/typing/tests/test_runtime.py,sha256=CoPFvSUANjIF7-3kPOxGXmzH6kSDFrqPq6u0__nbxXE,3186 +numpy/typing/tests/test_typing.py,sha256=j6wK6nH2Jx9V8ijLJrr8QdJjDv3NbJKCrEUNF0j7AxE,6494 +numpy/version.py,sha256=8FfhaHm-fpZse6IRxY_p5JKwZ-kkGhZk-g3nhSV6Tpw,304 +numpy/version.pyi,sha256=-JbleHX_16pnboC4DmzPym2X1EcI-w5cRoH0utivI34,278 diff --git a/local_packages/numpy-2.4.4.dist-info/WHEEL b/local_packages/numpy-2.4.4.dist-info/WHEEL new file mode 100644 index 0000000..0a709ed --- /dev/null +++ b/local_packages/numpy-2.4.4.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: meson +Root-Is-Purelib: false +Tag: cp312-cp312-win_amd64 \ No newline at end of file diff --git a/local_packages/numpy-2.4.4.dist-info/entry_points.txt b/local_packages/numpy-2.4.4.dist-info/entry_points.txt new file mode 100644 index 0000000..48c4f64 --- /dev/null +++ b/local_packages/numpy-2.4.4.dist-info/entry_points.txt @@ -0,0 +1,13 @@ +[pkg_config] +numpy = numpy._core.lib.pkgconfig + +[array_api] +numpy = numpy + +[pyinstaller40] +hook-dirs = numpy:_pyinstaller_hooks_dir + +[console_scripts] +f2py = numpy.f2py.f2py2e:main +numpy-config = numpy._configtool:main + diff --git a/local_packages/numpy-2.4.4.dist-info/licenses/LICENSE.txt b/local_packages/numpy-2.4.4.dist-info/licenses/LICENSE.txt new file mode 100644 index 0000000..a394e46 --- /dev/null +++ b/local_packages/numpy-2.4.4.dist-info/licenses/LICENSE.txt @@ -0,0 +1,914 @@ +Copyright (c) 2005-2025, NumPy Developers. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + + * Neither the name of the NumPy Developers nor the names of any + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +---- + + +---- + +This binary distribution of NumPy also bundles the following software: + + +Name: OpenBLAS +Files: numpy.libs\libscipy_openblas*.dll +Description: bundled as a dynamically linked library +Availability: https://github.com/OpenMathLib/OpenBLAS/ +License: BSD-3-Clause + Copyright (c) 2011-2014, The OpenBLAS Project + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + 3. Neither the name of the OpenBLAS project nor the names of + its contributors may be used to endorse or promote products + derived from this software without specific prior written + permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +Name: LAPACK +Files: numpy.libs\libscipy_openblas*.dll +Description: bundled in OpenBLAS +Availability: https://github.com/OpenMathLib/OpenBLAS/ +License: BSD-3-Clause-Open-MPI + Copyright (c) 1992-2013 The University of Tennessee and The University + of Tennessee Research Foundation. All rights + reserved. + Copyright (c) 2000-2013 The University of California Berkeley. All + rights reserved. + Copyright (c) 2006-2013 The University of Colorado Denver. All rights + reserved. + + $COPYRIGHT$ + + Additional copyrights may follow + + $HEADER$ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + - Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + - Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer listed + in this license in the documentation and/or other materials + provided with the distribution. + + - Neither the name of the copyright holders nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + The copyright holders provide no reassurances that the source code + provided does not infringe any patent, copyright, or any other + intellectual property rights of third parties. The copyright holders + disclaim any liability to any recipient for claims brought against + recipient by any third party for infringement of that parties + intellectual property rights. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +Name: GCC runtime library +Files: numpy.libs\libscipy_openblas*.dll +Description: statically linked to files compiled with gcc +Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libgfortran +License: GPL-3.0-or-later WITH GCC-exception-3.1 + Copyright (C) 2002-2017 Free Software Foundation, Inc. + + Libgfortran is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + Libgfortran is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . + +---- + +Full text of license texts referred to above follows (that they are +listed below does not necessarily imply the conditions apply to the +present binary release): + +---- + +GCC RUNTIME LIBRARY EXCEPTION + +Version 3.1, 31 March 2009 + +Copyright (C) 2009 Free Software Foundation, Inc. + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + +This GCC Runtime Library Exception ("Exception") is an additional +permission under section 7 of the GNU General Public License, version +3 ("GPLv3"). It applies to a given file (the "Runtime Library") that +bears a notice placed by the copyright holder of the file stating that +the file is governed by GPLv3 along with this Exception. + +When you use GCC to compile a program, GCC may combine portions of +certain GCC header files and runtime libraries with the compiled +program. The purpose of this Exception is to allow compilation of +non-GPL (including proprietary) programs to use, in this way, the +header files and runtime libraries covered by this Exception. + +0. Definitions. + +A file is an "Independent Module" if it either requires the Runtime +Library for execution after a Compilation Process, or makes use of an +interface provided by the Runtime Library, but is not otherwise based +on the Runtime Library. + +"GCC" means a version of the GNU Compiler Collection, with or without +modifications, governed by version 3 (or a specified later version) of +the GNU General Public License (GPL) with the option of using any +subsequent versions published by the FSF. + +"GPL-compatible Software" is software whose conditions of propagation, +modification and use would permit combination with GCC in accord with +the license of GCC. + +"Target Code" refers to output from any compiler for a real or virtual +target processor architecture, in executable form or suitable for +input to an assembler, loader, linker and/or execution +phase. Notwithstanding that, Target Code does not include data in any +format that is used as a compiler intermediate representation, or used +for producing a compiler intermediate representation. + +The "Compilation Process" transforms code entirely represented in +non-intermediate languages designed for human-written code, and/or in +Java Virtual Machine byte code, into Target Code. Thus, for example, +use of source code generators and preprocessors need not be considered +part of the Compilation Process, since the Compilation Process can be +understood as starting with the output of the generators or +preprocessors. + +A Compilation Process is "Eligible" if it is done using GCC, alone or +with other GPL-compatible software, or if it is done without using any +work based on GCC. For example, using non-GPL-compatible Software to +optimize any GCC intermediate representations would not qualify as an +Eligible Compilation Process. + +1. Grant of Additional Permission. + +You have permission to propagate a work of Target Code formed by +combining the Runtime Library with Independent Modules, even if such +propagation would otherwise violate the terms of GPLv3, provided that +all Target Code was generated by Eligible Compilation Processes. You +may then convey such a combination under terms of your choice, +consistent with the licensing of the Independent Modules. + +2. No Weakening of GCC Copyleft. + +The availability of this Exception does not imply any general +presumption that third-party software is unaffected by the copyleft +requirements of the license of GCC. + +---- + + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. + diff --git a/local_packages/numpy-2.4.4.dist-info/licenses/numpy/_core/include/numpy/libdivide/LICENSE.txt b/local_packages/numpy-2.4.4.dist-info/licenses/numpy/_core/include/numpy/libdivide/LICENSE.txt new file mode 100644 index 0000000..d72a7c3 --- /dev/null +++ b/local_packages/numpy-2.4.4.dist-info/licenses/numpy/_core/include/numpy/libdivide/LICENSE.txt @@ -0,0 +1,21 @@ + zlib License + ------------ + + Copyright (C) 2010 - 2019 ridiculous_fish, + Copyright (C) 2016 - 2019 Kim Walisch, + + This software is provided 'as-is', without any express or implied + warranty. In no event will the authors be held liable for any damages + arising from the use of this software. + + Permission is granted to anyone to use this software for any purpose, + including commercial applications, and to alter it and redistribute it + freely, subject to the following restrictions: + + 1. The origin of this software must not be misrepresented; you must not + claim that you wrote the original software. If you use this software + in a product, an acknowledgment in the product documentation would be + appreciated but is not required. + 2. Altered source versions must be plainly marked as such, and must not be + misrepresented as being the original software. + 3. This notice may not be removed or altered from any source distribution. diff --git a/local_packages/numpy-2.4.4.dist-info/licenses/numpy/_core/src/common/pythoncapi-compat/COPYING b/local_packages/numpy-2.4.4.dist-info/licenses/numpy/_core/src/common/pythoncapi-compat/COPYING new file mode 100644 index 0000000..59a39fb --- /dev/null +++ b/local_packages/numpy-2.4.4.dist-info/licenses/numpy/_core/src/common/pythoncapi-compat/COPYING @@ -0,0 +1,14 @@ +BSD Zero Clause License + +Copyright Contributors to the pythoncapi_compat project. + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH +REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, +INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM +LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR +OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +PERFORMANCE OF THIS SOFTWARE. diff --git a/local_packages/numpy-2.4.4.dist-info/licenses/numpy/_core/src/highway/LICENSE b/local_packages/numpy-2.4.4.dist-info/licenses/numpy/_core/src/highway/LICENSE new file mode 100644 index 0000000..1af4f15 --- /dev/null +++ b/local_packages/numpy-2.4.4.dist-info/licenses/numpy/_core/src/highway/LICENSE @@ -0,0 +1,371 @@ +This project is primarily dual-licensed under your choice of either the Apache +License 2.0 or the BSD 3-Clause License. + +The following files are licensed under different terms: +* hwy/contrib/random/random-inl.h: CC0 1.0 Universal + +The full texts of all applicable licenses are included below, separated by +'---'. + +-------------------------------------------------------------------------------- +Apache License 2.0 +-------------------------------------------------------------------------------- + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------------------------------------------------------------------------------- +BSD 3-Clause License +-------------------------------------------------------------------------------- + +Copyright (c) The Highway Project Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- +CC0 1.0 Universal +-------------------------------------------------------------------------------- + +Creative Commons Legal Code + +CC0 1.0 Universal + + CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE + LEGAL SERVICES. DISTRIBUTION OF THIS DOCUMENT DOES NOT CREATE AN + ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS + INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES + REGARDING THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS + PROVIDED HEREUNDER, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM + THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED + HEREUNDER. + +Statement of Purpose + +The laws of most jurisdictions throughout the world automatically confer +exclusive Copyright and Related Rights (defined below) upon the creator +and subsequent owner(s) (each and all, an "owner") of an original work of +authorship and/or a database (each, a "Work"). + +Certain owners wish to permanently relinquish those rights to a Work for +the purpose of contributing to a commons of creative, cultural and +scientific works ("Commons") that the public can reliably and without fear +of later claims of infringement build upon, modify, incorporate in other +works, reuse and redistribute as freely as possible in any form whatsoever +and for any purposes, including without limitation commercial purposes. +These owners may contribute to the Commons to promote the ideal of a free +culture and the further production of creative, cultural and scientific +works, or to gain reputation or greater distribution for their Work in +part through the use and efforts of others. + +For these and/or other purposes and motivations, and without any +expectation of additional consideration or compensation, the person +associating CC0 with a Work (the "Affirmer"), to the extent that he or she +is an owner of Copyright and Related Rights in the Work, voluntarily +elects to apply CC0 to the Work and publicly distribute the Work under its +terms, with knowledge of his or her Copyright and Related Rights in the +Work and the meaning and intended legal effect of CC0 on those rights. + +1. Copyright and Related Rights. A Work made available under CC0 may be +protected by copyright and related or neighboring rights ("Copyright and +Related Rights"). Copyright and Related Rights include, but are not +limited to, the following: + + i. the right to reproduce, adapt, distribute, perform, display, + communicate, and translate a Work; + ii. moral rights retained by the original author(s) and/or performer(s); +iii. publicity and privacy rights pertaining to a person's image or + likeness depicted in a Work; + iv. rights protecting against unfair competition in regards to a Work, + subject to the limitations in paragraph 4(a), below; + v. rights protecting the extraction, dissemination, use and reuse of data + in a Work; + vi. database rights (such as those arising under Directive 96/9/EC of the + European Parliament and of the Council of 11 March 1996 on the legal + protection of databases, and under any national implementation + thereof, including any amended or successor version of such + directive); and +vii. other similar, equivalent or corresponding rights throughout the + world based on applicable law or treaty, and any national + implementations thereof. + +2. Waiver. To the greatest extent permitted by, but not in contravention +of, applicable law, Affirmer hereby overtly, fully, permanently, +irrevocably and unconditionally waives, abandons, and surrenders all of +Affirmer's Copyright and Related Rights and associated claims and causes +of action, whether now known or unknown (including existing as well as +future claims and causes of action), in the Work (i) in all territories +worldwide, (ii) for the maximum duration provided by applicable law or +treaty (including future time extensions), (iii) in any current or future +medium and for any number of copies, and (iv) for any purpose whatsoever, +including without limitation commercial, advertising or promotional +purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each +member of the public at large and to the detriment of Affirmer's heirs and +successors, fully intending that such Waiver shall not be subject to +revocation, rescission, cancellation, termination, or any other legal or +equitable action to disrupt the quiet enjoyment of the Work by the public +as contemplated by Affirmer's express Statement of Purpose. + +3. Public License Fallback. Should any part of the Waiver for any reason +be judged legally invalid or ineffective under applicable law, then the +Waiver shall be preserved to the maximum extent permitted taking into +account Affirmer's express Statement of Purpose. In addition, to the +extent the Waiver is so judged Affirmer hereby grants to each affected +person a royalty-free, non transferable, non sublicensable, non exclusive, +irrevocable and unconditional license to exercise Affirmer's Copyright and +Related Rights in the Work (i) in all territories worldwide, (ii) for the +maximum duration provided by applicable law or treaty (including future +time extensions), (iii) in any current or future medium and for any number +of copies, and (iv) for any purpose whatsoever, including without +limitation commercial, advertising or promotional purposes (the +"License"). The License shall be deemed effective as of the date CC0 was +applied by Affirmer to the Work. Should any part of the License for any +reason be judged legally invalid or ineffective under applicable law, such +partial invalidity or ineffectiveness shall not invalidate the remainder +of the License, and in such case Affirmer hereby affirms that he or she +will not (i) exercise any of his or her remaining Copyright and Related +Rights in the Work or (ii) assert any associated claims and causes of +action with respect to the Work, in either case contrary to Affirmer's +express Statement of Purpose. + +4. Limitations and Disclaimers. + + a. No trademark or patent rights held by Affirmer are waived, abandoned, + surrendered, licensed or otherwise affected by this document. + b. Affirmer offers the Work as-is and makes no representations or + warranties of any kind concerning the Work, express, implied, + statutory or otherwise, including without limitation warranties of + title, merchantability, fitness for a particular purpose, non + infringement, or the absence of latent or other defects, accuracy, or + the present or absence of errors, whether or not discoverable, all to + the greatest extent permissible under applicable law. + c. Affirmer disclaims responsibility for clearing rights of other persons + that may apply to the Work or any use thereof, including without + limitation any person's Copyright and Related Rights in the Work. + Further, Affirmer disclaims responsibility for obtaining any necessary + consents, permissions or other rights required for any use of the + Work. + d. Affirmer understands and acknowledges that Creative Commons is not a + party to this document and has no duty or obligation with respect to + this CC0 or use of the Work. \ No newline at end of file diff --git a/local_packages/numpy-2.4.4.dist-info/licenses/numpy/_core/src/multiarray/dragon4_LICENSE.txt b/local_packages/numpy-2.4.4.dist-info/licenses/numpy/_core/src/multiarray/dragon4_LICENSE.txt new file mode 100644 index 0000000..7bd49e7 --- /dev/null +++ b/local_packages/numpy-2.4.4.dist-info/licenses/numpy/_core/src/multiarray/dragon4_LICENSE.txt @@ -0,0 +1,27 @@ +Copyright (c) 2014 Ryan Juckett + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to +deal in the Software without restriction, including without limitation the +rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +sell copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +IN THE SOFTWARE. + +dragon4.c|h h contains a modified version of Ryan Juckett's Dragon4 +implementation, obtained from https://www.ryanjuckett.com, +which has been ported from C++ to C and which has +modifications specific to printing floats in numpy. + +Ryan Juckett's original code was under the Zlib license; he gave numpy +permission to include it under the MIT license instead. diff --git a/local_packages/numpy-2.4.4.dist-info/licenses/numpy/_core/src/npysort/x86-simd-sort/LICENSE.md b/local_packages/numpy-2.4.4.dist-info/licenses/numpy/_core/src/npysort/x86-simd-sort/LICENSE.md new file mode 100644 index 0000000..3e32165 --- /dev/null +++ b/local_packages/numpy-2.4.4.dist-info/licenses/numpy/_core/src/npysort/x86-simd-sort/LICENSE.md @@ -0,0 +1,28 @@ +BSD 3-Clause License + +Copyright (c) 2022, Intel. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/local_packages/numpy-2.4.4.dist-info/licenses/numpy/_core/src/umath/svml/LICENSE b/local_packages/numpy-2.4.4.dist-info/licenses/numpy/_core/src/umath/svml/LICENSE new file mode 100644 index 0000000..4723d4e --- /dev/null +++ b/local_packages/numpy-2.4.4.dist-info/licenses/numpy/_core/src/umath/svml/LICENSE @@ -0,0 +1,30 @@ +Copyright (c) 2005-2021, NumPy Developers. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + + * Neither the name of the NumPy Developers nor the names of any + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/local_packages/numpy-2.4.4.dist-info/licenses/numpy/fft/pocketfft/LICENSE.md b/local_packages/numpy-2.4.4.dist-info/licenses/numpy/fft/pocketfft/LICENSE.md new file mode 100644 index 0000000..c3a4c06 --- /dev/null +++ b/local_packages/numpy-2.4.4.dist-info/licenses/numpy/fft/pocketfft/LICENSE.md @@ -0,0 +1,25 @@ +Copyright (C) 2010-2018 Max-Planck-Society +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright notice, this + list of conditions and the following disclaimer in the documentation and/or + other materials provided with the distribution. +* Neither the name of the copyright holder nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/local_packages/numpy-2.4.4.dist-info/licenses/numpy/linalg/lapack_lite/LICENSE.txt b/local_packages/numpy-2.4.4.dist-info/licenses/numpy/linalg/lapack_lite/LICENSE.txt new file mode 100644 index 0000000..9b379c9 --- /dev/null +++ b/local_packages/numpy-2.4.4.dist-info/licenses/numpy/linalg/lapack_lite/LICENSE.txt @@ -0,0 +1,48 @@ +Copyright (c) 1992-2013 The University of Tennessee and The University + of Tennessee Research Foundation. All rights + reserved. +Copyright (c) 2000-2013 The University of California Berkeley. All + rights reserved. +Copyright (c) 2006-2013 The University of Colorado Denver. All rights + reserved. + +$COPYRIGHT$ + +Additional copyrights may follow + +$HEADER$ + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +- Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +- Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer listed + in this license in the documentation and/or other materials + provided with the distribution. + +- Neither the name of the copyright holders nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +The copyright holders provide no reassurances that the source code +provided does not infringe any patent, copyright, or any other +intellectual property rights of third parties. The copyright holders +disclaim any liability to any recipient for claims brought against +recipient by any third party for infringement of that parties +intellectual property rights. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/local_packages/numpy-2.4.4.dist-info/licenses/numpy/ma/LICENSE b/local_packages/numpy-2.4.4.dist-info/licenses/numpy/ma/LICENSE new file mode 100644 index 0000000..b41aae0 --- /dev/null +++ b/local_packages/numpy-2.4.4.dist-info/licenses/numpy/ma/LICENSE @@ -0,0 +1,24 @@ +* Copyright (c) 2006, University of Georgia and Pierre G.F. Gerard-Marchant +* All rights reserved. +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions are met: +* +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above copyright +* notice, this list of conditions and the following disclaimer in the +* documentation and/or other materials provided with the distribution. +* * Neither the name of the University of Georgia nor the +* names of its contributors may be used to endorse or promote products +* derived from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY +* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +* DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/local_packages/numpy-2.4.4.dist-info/licenses/numpy/random/LICENSE.md b/local_packages/numpy-2.4.4.dist-info/licenses/numpy/random/LICENSE.md new file mode 100644 index 0000000..a6cf1b1 --- /dev/null +++ b/local_packages/numpy-2.4.4.dist-info/licenses/numpy/random/LICENSE.md @@ -0,0 +1,71 @@ +**This software is dual-licensed under the The University of Illinois/NCSA +Open Source License (NCSA) and The 3-Clause BSD License** + +# NCSA Open Source License +**Copyright (c) 2019 Kevin Sheppard. All rights reserved.** + +Developed by: Kevin Sheppard (, +) +[http://www.kevinsheppard.com](http://www.kevinsheppard.com) + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal with +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +Redistributions of source code must retain the above copyright notice, this +list of conditions and the following disclaimers. + +Redistributions in binary form must reproduce the above copyright notice, this +list of conditions and the following disclaimers in the documentation and/or +other materials provided with the distribution. + +Neither the names of Kevin Sheppard, nor the names of any contributors may be +used to endorse or promote products derived from this Software without specific +prior written permission. + +**THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH +THE SOFTWARE.** + + +# 3-Clause BSD License +**Copyright (c) 2019 Kevin Sheppard. All rights reserved.** + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +**THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +THE POSSIBILITY OF SUCH DAMAGE.** + +# Components + +Many parts of this module have been derived from original sources, +often the algorithm's designer. Component licenses are located with +the component code. diff --git a/local_packages/numpy-2.4.4.dist-info/licenses/numpy/random/src/distributions/LICENSE.md b/local_packages/numpy-2.4.4.dist-info/licenses/numpy/random/src/distributions/LICENSE.md new file mode 100644 index 0000000..31576ba --- /dev/null +++ b/local_packages/numpy-2.4.4.dist-info/licenses/numpy/random/src/distributions/LICENSE.md @@ -0,0 +1,61 @@ +## NumPy + +Copyright (c) 2005-2017, NumPy Developers. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +* Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + +* Neither the name of the NumPy Developers nor the names of any + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +## Julia + +The ziggurat methods were derived from Julia. + +Copyright (c) 2009-2019: Jeff Bezanson, Stefan Karpinski, Viral B. Shah, +and other contributors: + +https://github.com/JuliaLang/julia/contributors + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/local_packages/numpy-2.4.4.dist-info/licenses/numpy/random/src/mt19937/LICENSE.md b/local_packages/numpy-2.4.4.dist-info/licenses/numpy/random/src/mt19937/LICENSE.md new file mode 100644 index 0000000..f65c3d4 --- /dev/null +++ b/local_packages/numpy-2.4.4.dist-info/licenses/numpy/random/src/mt19937/LICENSE.md @@ -0,0 +1,61 @@ +# MT19937 + +Copyright (c) 2003-2005, Jean-Sebastien Roy (js@jeannot.org) + +The rk_random and rk_seed functions algorithms and the original design of +the Mersenne Twister RNG: + + Copyright (C) 1997 - 2002, Makoto Matsumoto and Takuji Nishimura, + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. The names of its contributors may not be used to endorse or promote + products derived from this software without specific prior written + permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER +OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Original algorithm for the implementation of rk_interval function from +Richard J. Wagner's implementation of the Mersenne Twister RNG, optimised by +Magnus Jonsson. + +Constants used in the rk_double implementation by Isaku Wada. + +Permission is hereby granted, free of charge, to any person obtaining a +copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be included +in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/local_packages/numpy-2.4.4.dist-info/licenses/numpy/random/src/pcg64/LICENSE.md b/local_packages/numpy-2.4.4.dist-info/licenses/numpy/random/src/pcg64/LICENSE.md new file mode 100644 index 0000000..7aac7a5 --- /dev/null +++ b/local_packages/numpy-2.4.4.dist-info/licenses/numpy/random/src/pcg64/LICENSE.md @@ -0,0 +1,22 @@ +# PCG64 + +## The MIT License + +PCG Random Number Generation for C. + +Copyright 2014 Melissa O'Neill + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the "Software"), +to deal in the Software without restriction, including without limitation +the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/local_packages/numpy-2.4.4.dist-info/licenses/numpy/random/src/philox/LICENSE.md b/local_packages/numpy-2.4.4.dist-info/licenses/numpy/random/src/philox/LICENSE.md new file mode 100644 index 0000000..9738e44 --- /dev/null +++ b/local_packages/numpy-2.4.4.dist-info/licenses/numpy/random/src/philox/LICENSE.md @@ -0,0 +1,31 @@ +# PHILOX + +Copyright 2010-2012, D. E. Shaw Research. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +* Redistributions of source code must retain the above copyright + notice, this list of conditions, and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright + notice, this list of conditions, and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +* Neither the name of D. E. Shaw Research nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/local_packages/numpy-2.4.4.dist-info/licenses/numpy/random/src/sfc64/LICENSE.md b/local_packages/numpy-2.4.4.dist-info/licenses/numpy/random/src/sfc64/LICENSE.md new file mode 100644 index 0000000..21dd604 --- /dev/null +++ b/local_packages/numpy-2.4.4.dist-info/licenses/numpy/random/src/sfc64/LICENSE.md @@ -0,0 +1,27 @@ +# SFC64 + +## The MIT License + +Adapted from a C++ implementation of Chris Doty-Humphrey's SFC PRNG. + +https://gist.github.com/imneme/f1f7821f07cf76504a97f6537c818083 + +Copyright (c) 2018 Melissa E. O'Neill + +Permission is hereby granted, free of charge, to any person obtaining a +copy of this software and associated documentation files (the "Software"), +to deal in the Software without restriction, including without limitation +the rights to use, copy, modify, merge, publish, distribute, sublicense, +and/or sell copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/local_packages/numpy-2.4.4.dist-info/licenses/numpy/random/src/splitmix64/LICENSE.md b/local_packages/numpy-2.4.4.dist-info/licenses/numpy/random/src/splitmix64/LICENSE.md new file mode 100644 index 0000000..3c4d73b --- /dev/null +++ b/local_packages/numpy-2.4.4.dist-info/licenses/numpy/random/src/splitmix64/LICENSE.md @@ -0,0 +1,9 @@ +# SPLITMIX64 + +Written in 2015 by Sebastiano Vigna (vigna@acm.org) + +To the extent possible under law, the author has dedicated all copyright +and related and neighboring rights to this software to the public domain +worldwide. This software is distributed without any warranty. + +See . \ No newline at end of file diff --git a/local_packages/numpy.libs/libscipy_openblas64_-63c857e738469261263c764a36be9436.dll b/local_packages/numpy.libs/libscipy_openblas64_-63c857e738469261263c764a36be9436.dll new file mode 100644 index 0000000..33b36b8 Binary files /dev/null and b/local_packages/numpy.libs/libscipy_openblas64_-63c857e738469261263c764a36be9436.dll differ diff --git a/local_packages/numpy.libs/msvcp140-a4c2229bdc2a2a630acdc095b4d86008.dll b/local_packages/numpy.libs/msvcp140-a4c2229bdc2a2a630acdc095b4d86008.dll new file mode 100644 index 0000000..0a50239 Binary files /dev/null and b/local_packages/numpy.libs/msvcp140-a4c2229bdc2a2a630acdc095b4d86008.dll differ diff --git a/local_packages/numpy/__config__.py b/local_packages/numpy/__config__.py new file mode 100644 index 0000000..8cbf3ee --- /dev/null +++ b/local_packages/numpy/__config__.py @@ -0,0 +1,170 @@ +# This file is generated by numpy's build process +# It contains system_info results at the time of building this package. +from enum import Enum +from numpy._core._multiarray_umath import ( + __cpu_features__, + __cpu_baseline__, + __cpu_dispatch__, +) + +__all__ = ["show_config"] +_built_with_meson = True + + +class DisplayModes(Enum): + stdout = "stdout" + dicts = "dicts" + + +def _cleanup(d): + """ + Removes empty values in a `dict` recursively + This ensures we remove values that Meson could not provide to CONFIG + """ + if isinstance(d, dict): + return {k: _cleanup(v) for k, v in d.items() if v and _cleanup(v)} + else: + return d + + +CONFIG = _cleanup( + { + "Compilers": { + "c": { + "name": "msvc", + "linker": r"link", + "version": "19.44.35225", + "commands": r"cl", + "args": r"", + "linker args": r"", + }, + "cython": { + "name": "cython", + "linker": r"cython", + "version": "3.2.4", + "commands": r"cython", + "args": r"", + "linker args": r"", + }, + "c++": { + "name": "msvc", + "linker": r"link", + "version": "19.44.35225", + "commands": r"cl", + "args": r"", + "linker args": r"", + }, + }, + "Machine Information": { + "host": { + "cpu": "x86_64", + "family": "x86_64", + "endian": "little", + "system": "windows", + }, + "build": { + "cpu": "x86_64", + "family": "x86_64", + "endian": "little", + "system": "windows", + }, + "cross-compiled": bool("False".lower().replace("false", "")), + }, + "Build Dependencies": { + "blas": { + "name": "scipy-openblas", + "found": bool("True".lower().replace("false", "")), + "version": "0.3.31.188.0", + "detection method": "pkgconfig", + "include directory": r"C:/Users/runneradmin/AppData/Local/Temp/cibw-run-mo_5b201/cp312-win_amd64/build/venv/Lib/site-packages/scipy_openblas64/include", + "lib directory": r"C:/Users/runneradmin/AppData/Local/Temp/cibw-run-mo_5b201/cp312-win_amd64/build/venv/Lib/site-packages/scipy_openblas64/lib", + "openblas configuration": r"OpenBLAS 0.3.31.188.0 USE64BITINT DYNAMIC_ARCH NO_AFFINITY Haswell MAX_THREADS=24", + "pc file directory": r"D:/a/numpy-release/numpy-release/.openblas", + }, + "lapack": { + "name": "scipy-openblas", + "found": bool("True".lower().replace("false", "")), + "version": "0.3.31.188.0", + "detection method": "pkgconfig", + "include directory": r"C:/Users/runneradmin/AppData/Local/Temp/cibw-run-mo_5b201/cp312-win_amd64/build/venv/Lib/site-packages/scipy_openblas64/include", + "lib directory": r"C:/Users/runneradmin/AppData/Local/Temp/cibw-run-mo_5b201/cp312-win_amd64/build/venv/Lib/site-packages/scipy_openblas64/lib", + "openblas configuration": r"OpenBLAS 0.3.31.188.0 USE64BITINT DYNAMIC_ARCH NO_AFFINITY Haswell MAX_THREADS=24", + "pc file directory": r"D:/a/numpy-release/numpy-release/.openblas", + }, + }, + "Python Information": { + "path": r"C:\Users\runneradmin\AppData\Local\Temp\build-env-z5k2oeff\Scripts\python.exe", + "version": "3.12", + }, + "SIMD Extensions": { + "baseline": __cpu_baseline__, + "found": [ + feature for feature in __cpu_dispatch__ if __cpu_features__[feature] + ], + "not found": [ + feature for feature in __cpu_dispatch__ if not __cpu_features__[feature] + ], + }, + } +) + + +def _check_pyyaml(): + import yaml + + return yaml + + +def show(mode=DisplayModes.stdout.value): + """ + Show libraries and system information on which NumPy was built + and is being used + + Parameters + ---------- + mode : {`'stdout'`, `'dicts'`}, optional. + Indicates how to display the config information. + `'stdout'` prints to console, `'dicts'` returns a dictionary + of the configuration. + + Returns + ------- + out : {`dict`, `None`} + If mode is `'dicts'`, a dict is returned, else None + + See Also + -------- + get_include : Returns the directory containing NumPy C + header files. + + Notes + ----- + 1. The `'stdout'` mode will give more readable + output if ``pyyaml`` is installed + + """ + if mode == DisplayModes.stdout.value: + try: # Non-standard library, check import + yaml = _check_pyyaml() + + print(yaml.dump(CONFIG)) + except ModuleNotFoundError: + import warnings + import json + + warnings.warn("Install `pyyaml` for better output", stacklevel=1) + print(json.dumps(CONFIG, indent=2)) + elif mode == DisplayModes.dicts.value: + return CONFIG + else: + raise AttributeError( + f"Invalid `mode`, use one of: {', '.join([e.value for e in DisplayModes])}" + ) + + +def show_config(mode=DisplayModes.stdout.value): + return show(mode) + + +show_config.__doc__ = show.__doc__ +show_config.__module__ = "numpy" diff --git a/local_packages/numpy/__config__.pyi b/local_packages/numpy/__config__.pyi new file mode 100644 index 0000000..21e8b01 --- /dev/null +++ b/local_packages/numpy/__config__.pyi @@ -0,0 +1,108 @@ +from enum import Enum +from types import ModuleType +from typing import ( + Final, + Literal as L, + NotRequired, + TypedDict, + overload, + type_check_only, +) + +_CompilerConfigDictValue = TypedDict( + "_CompilerConfigDictValue", + { + "name": str, + "linker": str, + "version": str, + "commands": str, + "args": str, + "linker args": str, + }, +) +_CompilerConfigDict = TypedDict( + "_CompilerConfigDict", + { + "c": _CompilerConfigDictValue, + "cython": _CompilerConfigDictValue, + "c++": _CompilerConfigDictValue, + }, +) +_MachineInformationDict = TypedDict( + "_MachineInformationDict", + { + "host": _MachineInformationDictValue, + "build": _MachineInformationDictValue, + "cross-compiled": NotRequired[L[True]], + }, +) + +@type_check_only +class _MachineInformationDictValue(TypedDict): + cpu: str + family: str + endian: L["little", "big"] + system: str + +_BuildDependenciesDictValue = TypedDict( + "_BuildDependenciesDictValue", + { + "name": str, + "found": NotRequired[L[True]], + "version": str, + "include directory": str, + "lib directory": str, + "openblas configuration": str, + "pc file directory": str, + }, +) + +class _BuildDependenciesDict(TypedDict): + blas: _BuildDependenciesDictValue + lapack: _BuildDependenciesDictValue + +class _PythonInformationDict(TypedDict): + path: str + version: str + +_SIMDExtensionsDict = TypedDict( + "_SIMDExtensionsDict", + { + "baseline": list[str], + "found": list[str], + "not found": list[str], + }, +) + +_ConfigDict = TypedDict( + "_ConfigDict", + { + "Compilers": _CompilerConfigDict, + "Machine Information": _MachineInformationDict, + "Build Dependencies": _BuildDependenciesDict, + "Python Information": _PythonInformationDict, + "SIMD Extensions": _SIMDExtensionsDict, + }, +) + +### + +__all__ = ["show_config"] + +CONFIG: Final[_ConfigDict] = ... + +class DisplayModes(Enum): + stdout = "stdout" + dicts = "dicts" + +def _check_pyyaml() -> ModuleType: ... + +@overload +def show(mode: L["stdout"] = "stdout") -> None: ... +@overload +def show(mode: L["dicts"]) -> _ConfigDict: ... + +@overload +def show_config(mode: L["stdout"] = "stdout") -> None: ... +@overload +def show_config(mode: L["dicts"]) -> _ConfigDict: ... diff --git a/local_packages/numpy/__init__.cython-30.pxd b/local_packages/numpy/__init__.cython-30.pxd new file mode 100644 index 0000000..c718986 --- /dev/null +++ b/local_packages/numpy/__init__.cython-30.pxd @@ -0,0 +1,1242 @@ +# NumPy static imports for Cython >= 3.0 +# +# If any of the PyArray_* functions are called, import_array must be +# called first. This is done automatically by Cython 3.0+ if a call +# is not detected inside of the module. +# +# Author: Dag Sverre Seljebotn +# + +from cpython.ref cimport Py_INCREF +from cpython.object cimport PyObject, PyTypeObject, PyObject_TypeCheck +cimport libc.stdio as stdio + + +cdef extern from *: + # Leave a marker that the NumPy declarations came from NumPy itself and not from Cython. + # See https://github.com/cython/cython/issues/3573 + """ + /* Using NumPy API declarations from "numpy/__init__.cython-30.pxd" */ + """ + + +cdef extern from "numpy/arrayobject.h": + # It would be nice to use size_t and ssize_t, but ssize_t has special + # implicit conversion rules, so just use "long". + # Note: The actual type only matters for Cython promotion, so long + # is closer than int, but could lead to incorrect promotion. + # (Not to worrying, and always the status-quo.) + ctypedef signed long npy_intp + ctypedef unsigned long npy_uintp + + ctypedef unsigned char npy_bool + + ctypedef signed char npy_byte + ctypedef signed short npy_short + ctypedef signed int npy_int + ctypedef signed long npy_long + ctypedef signed long long npy_longlong + + ctypedef unsigned char npy_ubyte + ctypedef unsigned short npy_ushort + ctypedef unsigned int npy_uint + ctypedef unsigned long npy_ulong + ctypedef unsigned long long npy_ulonglong + + ctypedef float npy_float + ctypedef double npy_double + ctypedef long double npy_longdouble + + ctypedef signed char npy_int8 + ctypedef signed short npy_int16 + ctypedef signed int npy_int32 + ctypedef signed long long npy_int64 + + ctypedef unsigned char npy_uint8 + ctypedef unsigned short npy_uint16 + ctypedef unsigned int npy_uint32 + ctypedef unsigned long long npy_uint64 + + ctypedef float npy_float32 + ctypedef double npy_float64 + ctypedef long double npy_float80 + ctypedef long double npy_float96 + ctypedef long double npy_float128 + + ctypedef struct npy_cfloat: + pass + + ctypedef struct npy_cdouble: + pass + + ctypedef struct npy_clongdouble: + pass + + ctypedef struct npy_complex64: + pass + + ctypedef struct npy_complex128: + pass + + ctypedef struct npy_complex160: + pass + + ctypedef struct npy_complex192: + pass + + ctypedef struct npy_complex256: + pass + + ctypedef struct PyArray_Dims: + npy_intp *ptr + int len + + + cdef enum NPY_TYPES: + NPY_BOOL + NPY_BYTE + NPY_UBYTE + NPY_SHORT + NPY_USHORT + NPY_INT + NPY_UINT + NPY_LONG + NPY_ULONG + NPY_LONGLONG + NPY_ULONGLONG + NPY_FLOAT + NPY_DOUBLE + NPY_LONGDOUBLE + NPY_CFLOAT + NPY_CDOUBLE + NPY_CLONGDOUBLE + NPY_OBJECT + NPY_STRING + NPY_UNICODE + NPY_VSTRING + NPY_VOID + NPY_DATETIME + NPY_TIMEDELTA + NPY_NTYPES_LEGACY + NPY_NOTYPE + + NPY_INT8 + NPY_INT16 + NPY_INT32 + NPY_INT64 + NPY_UINT8 + NPY_UINT16 + NPY_UINT32 + NPY_UINT64 + NPY_FLOAT16 + NPY_FLOAT32 + NPY_FLOAT64 + NPY_FLOAT80 + NPY_FLOAT96 + NPY_FLOAT128 + NPY_COMPLEX64 + NPY_COMPLEX128 + NPY_COMPLEX160 + NPY_COMPLEX192 + NPY_COMPLEX256 + + NPY_INTP + NPY_UINTP + NPY_DEFAULT_INT # Not a compile time constant (normally)! + + ctypedef enum NPY_ORDER: + NPY_ANYORDER + NPY_CORDER + NPY_FORTRANORDER + NPY_KEEPORDER + + ctypedef enum NPY_CASTING: + NPY_NO_CASTING + NPY_EQUIV_CASTING + NPY_SAFE_CASTING + NPY_SAME_KIND_CASTING + NPY_UNSAFE_CASTING + NPY_SAME_VALUE_CASTING + + ctypedef enum NPY_CLIPMODE: + NPY_CLIP + NPY_WRAP + NPY_RAISE + + ctypedef enum NPY_SCALARKIND: + NPY_NOSCALAR, + NPY_BOOL_SCALAR, + NPY_INTPOS_SCALAR, + NPY_INTNEG_SCALAR, + NPY_FLOAT_SCALAR, + NPY_COMPLEX_SCALAR, + NPY_OBJECT_SCALAR + + ctypedef enum NPY_SORTKIND: + NPY_QUICKSORT + NPY_HEAPSORT + NPY_MERGESORT + + ctypedef enum NPY_SEARCHSIDE: + NPY_SEARCHLEFT + NPY_SEARCHRIGHT + + enum: + NPY_ARRAY_C_CONTIGUOUS + NPY_ARRAY_F_CONTIGUOUS + NPY_ARRAY_OWNDATA + NPY_ARRAY_FORCECAST + NPY_ARRAY_ENSURECOPY + NPY_ARRAY_ENSUREARRAY + NPY_ARRAY_ELEMENTSTRIDES + NPY_ARRAY_ALIGNED + NPY_ARRAY_NOTSWAPPED + NPY_ARRAY_WRITEABLE + NPY_ARRAY_WRITEBACKIFCOPY + + NPY_ARRAY_BEHAVED + NPY_ARRAY_BEHAVED_NS + NPY_ARRAY_CARRAY + NPY_ARRAY_CARRAY_RO + NPY_ARRAY_FARRAY + NPY_ARRAY_FARRAY_RO + NPY_ARRAY_DEFAULT + + NPY_ARRAY_IN_ARRAY + NPY_ARRAY_OUT_ARRAY + NPY_ARRAY_INOUT_ARRAY + NPY_ARRAY_IN_FARRAY + NPY_ARRAY_OUT_FARRAY + NPY_ARRAY_INOUT_FARRAY + + NPY_ARRAY_UPDATE_ALL + + cdef enum: + NPY_MAXDIMS # 64 on NumPy 2.x and 32 on NumPy 1.x + NPY_RAVEL_AXIS # Used for functions like PyArray_Mean + + ctypedef void (*PyArray_VectorUnaryFunc)(void *, void *, npy_intp, void *, void *) + + ctypedef struct PyArray_ArrayDescr: + # shape is a tuple, but Cython doesn't support "tuple shape" + # inside a non-PyObject declaration, so we have to declare it + # as just a PyObject*. + PyObject* shape + + ctypedef struct PyArray_Descr: + pass + + ctypedef class numpy.dtype [object PyArray_Descr, check_size ignore]: + # Use PyDataType_* macros when possible, however there are no macros + # for accessing some of the fields, so some are defined. + cdef PyTypeObject* typeobj + cdef char kind + cdef char type + # Numpy sometimes mutates this without warning (e.g. it'll + # sometimes change "|" to "<" in shared dtype objects on + # little-endian machines). If this matters to you, use + # PyArray_IsNativeByteOrder(dtype.byteorder) instead of + # directly accessing this field. + cdef char byteorder + cdef int type_num + + @property + cdef inline npy_intp itemsize(self) noexcept nogil: + return PyDataType_ELSIZE(self) + + @property + cdef inline npy_intp alignment(self) noexcept nogil: + return PyDataType_ALIGNMENT(self) + + # Use fields/names with care as they may be NULL. You must check + # for this using PyDataType_HASFIELDS. + @property + cdef inline object fields(self): + return PyDataType_FIELDS(self) + + @property + cdef inline tuple names(self): + return PyDataType_NAMES(self) + + # Use PyDataType_HASSUBARRAY to test whether this field is + # valid (the pointer can be NULL). Most users should access + # this field via the inline helper method PyDataType_SHAPE. + @property + cdef inline PyArray_ArrayDescr* subarray(self) noexcept nogil: + return PyDataType_SUBARRAY(self) + + @property + cdef inline npy_uint64 flags(self) noexcept nogil: + """The data types flags.""" + return PyDataType_FLAGS(self) + + + ctypedef class numpy.flatiter [object PyArrayIterObject, check_size ignore]: + # Use through macros + pass + + ctypedef class numpy.broadcast [object PyArrayMultiIterObject, check_size ignore]: + + @property + cdef inline int numiter(self) noexcept nogil: + """The number of arrays that need to be broadcast to the same shape.""" + return PyArray_MultiIter_NUMITER(self) + + @property + cdef inline npy_intp size(self) noexcept nogil: + """The total broadcasted size.""" + return PyArray_MultiIter_SIZE(self) + + @property + cdef inline npy_intp index(self) noexcept nogil: + """The current (1-d) index into the broadcasted result.""" + return PyArray_MultiIter_INDEX(self) + + @property + cdef inline int nd(self) noexcept nogil: + """The number of dimensions in the broadcasted result.""" + return PyArray_MultiIter_NDIM(self) + + @property + cdef inline npy_intp* dimensions(self) noexcept nogil: + """The shape of the broadcasted result.""" + return PyArray_MultiIter_DIMS(self) + + @property + cdef inline void** iters(self) noexcept nogil: + """An array of iterator objects that holds the iterators for the arrays to be broadcast together. + On return, the iterators are adjusted for broadcasting.""" + return PyArray_MultiIter_ITERS(self) + + + ctypedef struct PyArrayObject: + # For use in situations where ndarray can't replace PyArrayObject*, + # like PyArrayObject**. + pass + + ctypedef class numpy.ndarray [object PyArrayObject, check_size ignore]: + cdef __cythonbufferdefaults__ = {"mode": "strided"} + + # NOTE: no field declarations since direct access is deprecated since NumPy 1.7 + # Instead, we use properties that map to the corresponding C-API functions. + + @property + cdef inline PyObject* base(self) noexcept nogil: + """Returns a borrowed reference to the object owning the data/memory. + """ + return PyArray_BASE(self) + + @property + cdef inline dtype descr(self): + """Returns an owned reference to the dtype of the array. + """ + return PyArray_DESCR(self) + + @property + cdef inline int ndim(self) noexcept nogil: + """Returns the number of dimensions in the array. + """ + return PyArray_NDIM(self) + + @property + cdef inline npy_intp *shape(self) noexcept nogil: + """Returns a pointer to the dimensions/shape of the array. + The number of elements matches the number of dimensions of the array (ndim). + Can return NULL for 0-dimensional arrays. + """ + return PyArray_DIMS(self) + + @property + cdef inline npy_intp *strides(self) noexcept nogil: + """Returns a pointer to the strides of the array. + The number of elements matches the number of dimensions of the array (ndim). + """ + return PyArray_STRIDES(self) + + @property + cdef inline npy_intp size(self) noexcept nogil: + """Returns the total size (in number of elements) of the array. + """ + return PyArray_SIZE(self) + + @property + cdef inline char* data(self) noexcept nogil: + """The pointer to the data buffer as a char*. + This is provided for legacy reasons to avoid direct struct field access. + For new code that needs this access, you probably want to cast the result + of `PyArray_DATA()` instead, which returns a 'void*'. + """ + return PyArray_BYTES(self) + + + int _import_array() except -1 + # A second definition so _import_array isn't marked as used when we use it here. + # Do not use - subject to change any time. + int __pyx_import_array "_import_array"() except -1 + + # + # Macros from ndarrayobject.h + # + bint PyArray_CHKFLAGS(ndarray m, int flags) nogil + bint PyArray_IS_C_CONTIGUOUS(ndarray arr) nogil + bint PyArray_IS_F_CONTIGUOUS(ndarray arr) nogil + bint PyArray_ISCONTIGUOUS(ndarray m) nogil + bint PyArray_ISWRITEABLE(ndarray m) nogil + bint PyArray_ISALIGNED(ndarray m) nogil + + int PyArray_NDIM(ndarray) nogil + bint PyArray_ISONESEGMENT(ndarray) nogil + bint PyArray_ISFORTRAN(ndarray) nogil + int PyArray_FORTRANIF(ndarray) nogil + + void* PyArray_DATA(ndarray) nogil + char* PyArray_BYTES(ndarray) nogil + + npy_intp* PyArray_DIMS(ndarray) nogil + npy_intp* PyArray_STRIDES(ndarray) nogil + npy_intp PyArray_DIM(ndarray, size_t) nogil + npy_intp PyArray_STRIDE(ndarray, size_t) nogil + + PyObject *PyArray_BASE(ndarray) nogil # returns borrowed reference! + PyArray_Descr *PyArray_DESCR(ndarray) nogil # returns borrowed reference to dtype! + PyArray_Descr *PyArray_DTYPE(ndarray) nogil # returns borrowed reference to dtype! NP 1.7+ alias for descr. + int PyArray_FLAGS(ndarray) nogil + void PyArray_CLEARFLAGS(ndarray, int flags) nogil # Added in NumPy 1.7 + void PyArray_ENABLEFLAGS(ndarray, int flags) nogil # Added in NumPy 1.7 + npy_intp PyArray_ITEMSIZE(ndarray) nogil + int PyArray_TYPE(ndarray arr) nogil + + object PyArray_GETITEM(ndarray arr, void *itemptr) + int PyArray_SETITEM(ndarray arr, void *itemptr, object obj) except -1 + + bint PyTypeNum_ISBOOL(int) nogil + bint PyTypeNum_ISUNSIGNED(int) nogil + bint PyTypeNum_ISSIGNED(int) nogil + bint PyTypeNum_ISINTEGER(int) nogil + bint PyTypeNum_ISFLOAT(int) nogil + bint PyTypeNum_ISNUMBER(int) nogil + bint PyTypeNum_ISSTRING(int) nogil + bint PyTypeNum_ISCOMPLEX(int) nogil + bint PyTypeNum_ISFLEXIBLE(int) nogil + bint PyTypeNum_ISUSERDEF(int) nogil + bint PyTypeNum_ISEXTENDED(int) nogil + bint PyTypeNum_ISOBJECT(int) nogil + + npy_intp PyDataType_ELSIZE(dtype) nogil + npy_intp PyDataType_ALIGNMENT(dtype) nogil + PyObject* PyDataType_METADATA(dtype) nogil + PyArray_ArrayDescr* PyDataType_SUBARRAY(dtype) nogil + PyObject* PyDataType_NAMES(dtype) nogil + PyObject* PyDataType_FIELDS(dtype) nogil + + bint PyDataType_ISBOOL(dtype) nogil + bint PyDataType_ISUNSIGNED(dtype) nogil + bint PyDataType_ISSIGNED(dtype) nogil + bint PyDataType_ISINTEGER(dtype) nogil + bint PyDataType_ISFLOAT(dtype) nogil + bint PyDataType_ISNUMBER(dtype) nogil + bint PyDataType_ISSTRING(dtype) nogil + bint PyDataType_ISCOMPLEX(dtype) nogil + bint PyDataType_ISFLEXIBLE(dtype) nogil + bint PyDataType_ISUSERDEF(dtype) nogil + bint PyDataType_ISEXTENDED(dtype) nogil + bint PyDataType_ISOBJECT(dtype) nogil + bint PyDataType_HASFIELDS(dtype) nogil + bint PyDataType_HASSUBARRAY(dtype) nogil + npy_uint64 PyDataType_FLAGS(dtype) nogil + + bint PyArray_ISBOOL(ndarray) nogil + bint PyArray_ISUNSIGNED(ndarray) nogil + bint PyArray_ISSIGNED(ndarray) nogil + bint PyArray_ISINTEGER(ndarray) nogil + bint PyArray_ISFLOAT(ndarray) nogil + bint PyArray_ISNUMBER(ndarray) nogil + bint PyArray_ISSTRING(ndarray) nogil + bint PyArray_ISCOMPLEX(ndarray) nogil + bint PyArray_ISFLEXIBLE(ndarray) nogil + bint PyArray_ISUSERDEF(ndarray) nogil + bint PyArray_ISEXTENDED(ndarray) nogil + bint PyArray_ISOBJECT(ndarray) nogil + bint PyArray_HASFIELDS(ndarray) nogil + + bint PyArray_ISVARIABLE(ndarray) nogil + + bint PyArray_SAFEALIGNEDCOPY(ndarray) nogil + bint PyArray_ISNBO(char) nogil # works on ndarray.byteorder + bint PyArray_IsNativeByteOrder(char) nogil # works on ndarray.byteorder + bint PyArray_ISNOTSWAPPED(ndarray) nogil + bint PyArray_ISBYTESWAPPED(ndarray) nogil + + bint PyArray_FLAGSWAP(ndarray, int) nogil + + bint PyArray_ISCARRAY(ndarray) nogil + bint PyArray_ISCARRAY_RO(ndarray) nogil + bint PyArray_ISFARRAY(ndarray) nogil + bint PyArray_ISFARRAY_RO(ndarray) nogil + bint PyArray_ISBEHAVED(ndarray) nogil + bint PyArray_ISBEHAVED_RO(ndarray) nogil + + + bint PyDataType_ISNOTSWAPPED(dtype) nogil + bint PyDataType_ISBYTESWAPPED(dtype) nogil + + bint PyArray_DescrCheck(object) + + bint PyArray_Check(object) + bint PyArray_CheckExact(object) + + # Cannot be supported due to out arg: + # bint PyArray_HasArrayInterfaceType(object, dtype, object, object&) + # bint PyArray_HasArrayInterface(op, out) + + + bint PyArray_IsZeroDim(object) + # Cannot be supported due to ## ## in macro: + # bint PyArray_IsScalar(object, verbatim work) + bint PyArray_CheckScalar(object) + bint PyArray_IsPythonNumber(object) + bint PyArray_IsPythonScalar(object) + bint PyArray_IsAnyScalar(object) + bint PyArray_CheckAnyScalar(object) + + ndarray PyArray_GETCONTIGUOUS(ndarray) + bint PyArray_SAMESHAPE(ndarray, ndarray) nogil + npy_intp PyArray_SIZE(ndarray) nogil + npy_intp PyArray_NBYTES(ndarray) nogil + + object PyArray_FROM_O(object) + object PyArray_FROM_OF(object m, int flags) + object PyArray_FROM_OT(object m, int type) + object PyArray_FROM_OTF(object m, int type, int flags) + object PyArray_FROMANY(object m, int type, int min, int max, int flags) + object PyArray_ZEROS(int nd, npy_intp* dims, int type, int fortran) + object PyArray_EMPTY(int nd, npy_intp* dims, int type, int fortran) + void PyArray_FILLWBYTE(ndarray, int val) + object PyArray_ContiguousFromAny(op, int, int min_depth, int max_depth) + unsigned char PyArray_EquivArrTypes(ndarray a1, ndarray a2) + bint PyArray_EquivByteorders(int b1, int b2) nogil + object PyArray_SimpleNew(int nd, npy_intp* dims, int typenum) + object PyArray_SimpleNewFromData(int nd, npy_intp* dims, int typenum, void* data) + #object PyArray_SimpleNewFromDescr(int nd, npy_intp* dims, dtype descr) + object PyArray_ToScalar(void* data, ndarray arr) + + void* PyArray_GETPTR1(ndarray m, npy_intp i) nogil + void* PyArray_GETPTR2(ndarray m, npy_intp i, npy_intp j) nogil + void* PyArray_GETPTR3(ndarray m, npy_intp i, npy_intp j, npy_intp k) nogil + void* PyArray_GETPTR4(ndarray m, npy_intp i, npy_intp j, npy_intp k, npy_intp l) nogil + + # Cannot be supported due to out arg + # void PyArray_DESCR_REPLACE(descr) + + + object PyArray_Copy(ndarray) + object PyArray_FromObject(object op, int type, int min_depth, int max_depth) + object PyArray_ContiguousFromObject(object op, int type, int min_depth, int max_depth) + object PyArray_CopyFromObject(object op, int type, int min_depth, int max_depth) + + object PyArray_Cast(ndarray mp, int type_num) + object PyArray_Take(ndarray ap, object items, int axis) + object PyArray_Put(ndarray ap, object items, object values) + + void PyArray_ITER_RESET(flatiter it) nogil + void PyArray_ITER_NEXT(flatiter it) nogil + void PyArray_ITER_GOTO(flatiter it, npy_intp* destination) nogil + void PyArray_ITER_GOTO1D(flatiter it, npy_intp ind) nogil + void* PyArray_ITER_DATA(flatiter it) nogil + bint PyArray_ITER_NOTDONE(flatiter it) nogil + + void PyArray_MultiIter_RESET(broadcast multi) nogil + void PyArray_MultiIter_NEXT(broadcast multi) nogil + void PyArray_MultiIter_GOTO(broadcast multi, npy_intp dest) nogil + void PyArray_MultiIter_GOTO1D(broadcast multi, npy_intp ind) nogil + void* PyArray_MultiIter_DATA(broadcast multi, npy_intp i) nogil + void PyArray_MultiIter_NEXTi(broadcast multi, npy_intp i) nogil + bint PyArray_MultiIter_NOTDONE(broadcast multi) nogil + npy_intp PyArray_MultiIter_SIZE(broadcast multi) nogil + int PyArray_MultiIter_NDIM(broadcast multi) nogil + npy_intp PyArray_MultiIter_INDEX(broadcast multi) nogil + int PyArray_MultiIter_NUMITER(broadcast multi) nogil + npy_intp* PyArray_MultiIter_DIMS(broadcast multi) nogil + void** PyArray_MultiIter_ITERS(broadcast multi) nogil + + # Functions from __multiarray_api.h + + # Functions taking dtype and returning object/ndarray are disabled + # for now as they steal dtype references. I'm conservative and disable + # more than is probably needed until it can be checked further. + int PyArray_INCREF (ndarray) except * # uses PyArray_Item_INCREF... + int PyArray_XDECREF (ndarray) except * # uses PyArray_Item_DECREF... + dtype PyArray_DescrFromType (int) + object PyArray_TypeObjectFromType (int) + char * PyArray_Zero (ndarray) + char * PyArray_One (ndarray) + #object PyArray_CastToType (ndarray, dtype, int) + int PyArray_CanCastSafely (int, int) # writes errors + npy_bool PyArray_CanCastTo (dtype, dtype) # writes errors + int PyArray_ObjectType (object, int) except 0 + dtype PyArray_DescrFromObject (object, dtype) + #ndarray* PyArray_ConvertToCommonType (object, int *) + dtype PyArray_DescrFromScalar (object) + dtype PyArray_DescrFromTypeObject (object) + npy_intp PyArray_Size (object) + #object PyArray_Scalar (void *, dtype, object) + #object PyArray_FromScalar (object, dtype) + void PyArray_ScalarAsCtype (object, void *) + #int PyArray_CastScalarToCtype (object, void *, dtype) + #int PyArray_CastScalarDirect (object, dtype, void *, int) + #PyArray_VectorUnaryFunc * PyArray_GetCastFunc (dtype, int) + #object PyArray_FromAny (object, dtype, int, int, int, object) + object PyArray_EnsureArray (object) + object PyArray_EnsureAnyArray (object) + #object PyArray_FromFile (stdio.FILE *, dtype, npy_intp, char *) + #object PyArray_FromString (char *, npy_intp, dtype, npy_intp, char *) + #object PyArray_FromBuffer (object, dtype, npy_intp, npy_intp) + #object PyArray_FromIter (object, dtype, npy_intp) + object PyArray_Return (ndarray) + #object PyArray_GetField (ndarray, dtype, int) + #int PyArray_SetField (ndarray, dtype, int, object) except -1 + object PyArray_Byteswap (ndarray, npy_bool) + object PyArray_Resize (ndarray, PyArray_Dims *, int, NPY_ORDER) + int PyArray_CopyInto (ndarray, ndarray) except -1 + int PyArray_CopyAnyInto (ndarray, ndarray) except -1 + int PyArray_CopyObject (ndarray, object) except -1 + object PyArray_NewCopy (ndarray, NPY_ORDER) + object PyArray_ToList (ndarray) + object PyArray_ToString (ndarray, NPY_ORDER) + int PyArray_ToFile (ndarray, stdio.FILE *, char *, char *) except -1 + int PyArray_Dump (object, object, int) except -1 + object PyArray_Dumps (object, int) + int PyArray_ValidType (int) # Cannot error + void PyArray_UpdateFlags (ndarray, int) + object PyArray_New (type, int, npy_intp *, int, npy_intp *, void *, int, int, object) + #object PyArray_NewFromDescr (type, dtype, int, npy_intp *, npy_intp *, void *, int, object) + #dtype PyArray_DescrNew (dtype) + dtype PyArray_DescrNewFromType (int) + double PyArray_GetPriority (object, double) # clears errors as of 1.25 + object PyArray_IterNew (object) + object PyArray_MultiIterNew (int, ...) + + int PyArray_PyIntAsInt (object) except? -1 + npy_intp PyArray_PyIntAsIntp (object) + int PyArray_Broadcast (broadcast) except -1 + int PyArray_FillWithScalar (ndarray, object) except -1 + npy_bool PyArray_CheckStrides (int, int, npy_intp, npy_intp, npy_intp *, npy_intp *) + dtype PyArray_DescrNewByteorder (dtype, char) + object PyArray_IterAllButAxis (object, int *) + #object PyArray_CheckFromAny (object, dtype, int, int, int, object) + #object PyArray_FromArray (ndarray, dtype, int) + object PyArray_FromInterface (object) + object PyArray_FromStructInterface (object) + #object PyArray_FromArrayAttr (object, dtype, object) + #NPY_SCALARKIND PyArray_ScalarKind (int, ndarray*) + int PyArray_CanCoerceScalar (int, int, NPY_SCALARKIND) + npy_bool PyArray_CanCastScalar (type, type) + int PyArray_RemoveSmallest (broadcast) except -1 + int PyArray_ElementStrides (object) + void PyArray_Item_INCREF (char *, dtype) except * + void PyArray_Item_XDECREF (char *, dtype) except * + object PyArray_Transpose (ndarray, PyArray_Dims *) + object PyArray_TakeFrom (ndarray, object, int, ndarray, NPY_CLIPMODE) + object PyArray_PutTo (ndarray, object, object, NPY_CLIPMODE) + object PyArray_PutMask (ndarray, object, object) + object PyArray_Repeat (ndarray, object, int) + object PyArray_Choose (ndarray, object, ndarray, NPY_CLIPMODE) + int PyArray_Sort (ndarray, int, NPY_SORTKIND) except -1 + object PyArray_ArgSort (ndarray, int, NPY_SORTKIND) + object PyArray_SearchSorted (ndarray, object, NPY_SEARCHSIDE, PyObject *) + object PyArray_ArgMax (ndarray, int, ndarray) + object PyArray_ArgMin (ndarray, int, ndarray) + object PyArray_Reshape (ndarray, object) + object PyArray_Newshape (ndarray, PyArray_Dims *, NPY_ORDER) + object PyArray_Squeeze (ndarray) + #object PyArray_View (ndarray, dtype, type) + object PyArray_SwapAxes (ndarray, int, int) + object PyArray_Max (ndarray, int, ndarray) + object PyArray_Min (ndarray, int, ndarray) + object PyArray_Ptp (ndarray, int, ndarray) + object PyArray_Mean (ndarray, int, int, ndarray) + object PyArray_Trace (ndarray, int, int, int, int, ndarray) + object PyArray_Diagonal (ndarray, int, int, int) + object PyArray_Clip (ndarray, object, object, ndarray) + object PyArray_Conjugate (ndarray, ndarray) + object PyArray_Nonzero (ndarray) + object PyArray_Std (ndarray, int, int, ndarray, int) + object PyArray_Sum (ndarray, int, int, ndarray) + object PyArray_CumSum (ndarray, int, int, ndarray) + object PyArray_Prod (ndarray, int, int, ndarray) + object PyArray_CumProd (ndarray, int, int, ndarray) + object PyArray_All (ndarray, int, ndarray) + object PyArray_Any (ndarray, int, ndarray) + object PyArray_Compress (ndarray, object, int, ndarray) + object PyArray_Flatten (ndarray, NPY_ORDER) + object PyArray_Ravel (ndarray, NPY_ORDER) + npy_intp PyArray_MultiplyList (npy_intp *, int) + int PyArray_MultiplyIntList (int *, int) + void * PyArray_GetPtr (ndarray, npy_intp*) + int PyArray_CompareLists (npy_intp *, npy_intp *, int) + #int PyArray_AsCArray (object*, void *, npy_intp *, int, dtype) + int PyArray_Free (object, void *) + #int PyArray_Converter (object, object*) + int PyArray_IntpFromSequence (object, npy_intp *, int) except -1 + object PyArray_Concatenate (object, int) + object PyArray_InnerProduct (object, object) + object PyArray_MatrixProduct (object, object) + object PyArray_Correlate (object, object, int) + #int PyArray_DescrConverter (object, dtype*) except 0 + #int PyArray_DescrConverter2 (object, dtype*) except 0 + int PyArray_IntpConverter (object, PyArray_Dims *) except 0 + #int PyArray_BufferConverter (object, chunk) except 0 + int PyArray_AxisConverter (object, int *) except 0 + int PyArray_BoolConverter (object, npy_bool *) except 0 + int PyArray_ByteorderConverter (object, char *) except 0 + int PyArray_OrderConverter (object, NPY_ORDER *) except 0 + unsigned char PyArray_EquivTypes (dtype, dtype) # clears errors + #object PyArray_Zeros (int, npy_intp *, dtype, int) + #object PyArray_Empty (int, npy_intp *, dtype, int) + object PyArray_Where (object, object, object) + object PyArray_Arange (double, double, double, int) + #object PyArray_ArangeObj (object, object, object, dtype) + int PyArray_SortkindConverter (object, NPY_SORTKIND *) except 0 + object PyArray_LexSort (object, int) + object PyArray_Round (ndarray, int, ndarray) + unsigned char PyArray_EquivTypenums (int, int) + int PyArray_RegisterDataType (dtype) except -1 + int PyArray_RegisterCastFunc (dtype, int, PyArray_VectorUnaryFunc *) except -1 + int PyArray_RegisterCanCast (dtype, int, NPY_SCALARKIND) except -1 + #void PyArray_InitArrFuncs (PyArray_ArrFuncs *) + object PyArray_IntTupleFromIntp (int, npy_intp *) + int PyArray_ClipmodeConverter (object, NPY_CLIPMODE *) except 0 + #int PyArray_OutputConverter (object, ndarray*) except 0 + object PyArray_BroadcastToShape (object, npy_intp *, int) + #int PyArray_DescrAlignConverter (object, dtype*) except 0 + #int PyArray_DescrAlignConverter2 (object, dtype*) except 0 + int PyArray_SearchsideConverter (object, void *) except 0 + object PyArray_CheckAxis (ndarray, int *, int) + npy_intp PyArray_OverflowMultiplyList (npy_intp *, int) + int PyArray_SetBaseObject(ndarray, base) except -1 # NOTE: steals a reference to base! Use "set_array_base()" instead. + + # The memory handler functions require the NumPy 1.22 API + # and may require defining NPY_TARGET_VERSION + ctypedef struct PyDataMemAllocator: + void *ctx + void* (*malloc) (void *ctx, size_t size) + void* (*calloc) (void *ctx, size_t nelem, size_t elsize) + void* (*realloc) (void *ctx, void *ptr, size_t new_size) + void (*free) (void *ctx, void *ptr, size_t size) + + ctypedef struct PyDataMem_Handler: + char* name + npy_uint8 version + PyDataMemAllocator allocator + + object PyDataMem_SetHandler(object handler) + object PyDataMem_GetHandler() + + # additional datetime related functions are defined below + + +# Typedefs that matches the runtime dtype objects in +# the numpy module. + +# The ones that are commented out needs an IFDEF function +# in Cython to enable them only on the right systems. + +ctypedef npy_int8 int8_t +ctypedef npy_int16 int16_t +ctypedef npy_int32 int32_t +ctypedef npy_int64 int64_t + +ctypedef npy_uint8 uint8_t +ctypedef npy_uint16 uint16_t +ctypedef npy_uint32 uint32_t +ctypedef npy_uint64 uint64_t + +ctypedef npy_float32 float32_t +ctypedef npy_float64 float64_t +#ctypedef npy_float80 float80_t +#ctypedef npy_float128 float128_t + +ctypedef float complex complex64_t +ctypedef double complex complex128_t + +ctypedef npy_longlong longlong_t +ctypedef npy_ulonglong ulonglong_t + +ctypedef npy_intp intp_t +ctypedef npy_uintp uintp_t + +ctypedef npy_double float_t +ctypedef npy_double double_t +ctypedef npy_longdouble longdouble_t + +ctypedef float complex cfloat_t +ctypedef double complex cdouble_t +ctypedef double complex complex_t +ctypedef long double complex clongdouble_t + +cdef inline object PyArray_MultiIterNew1(a): + return PyArray_MultiIterNew(1, a) + +cdef inline object PyArray_MultiIterNew2(a, b): + return PyArray_MultiIterNew(2, a, b) + +cdef inline object PyArray_MultiIterNew3(a, b, c): + return PyArray_MultiIterNew(3, a, b, c) + +cdef inline object PyArray_MultiIterNew4(a, b, c, d): + return PyArray_MultiIterNew(4, a, b, c, d) + +cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): + return PyArray_MultiIterNew(5, a, b, c, d, e) + +cdef inline tuple PyDataType_SHAPE(dtype d): + if PyDataType_HASSUBARRAY(d): + return d.subarray.shape + else: + return () + + +cdef extern from "numpy/ndarrayobject.h": + PyTypeObject PyTimedeltaArrType_Type + PyTypeObject PyDatetimeArrType_Type + ctypedef int64_t npy_timedelta + ctypedef int64_t npy_datetime + +cdef extern from "numpy/ndarraytypes.h": + ctypedef struct PyArray_DatetimeMetaData: + NPY_DATETIMEUNIT base + int64_t num + + ctypedef struct npy_datetimestruct: + int64_t year + int32_t month, day, hour, min, sec, us, ps, as + + # Iterator API added in v1.6 + # + # These don't match the definition in the C API because Cython can't wrap + # function pointers that return functions. + # https://github.com/cython/cython/issues/6720 + ctypedef int (*NpyIter_IterNextFunc "NpyIter_IterNextFunc *")(NpyIter* it) noexcept nogil + ctypedef void (*NpyIter_GetMultiIndexFunc "NpyIter_GetMultiIndexFunc *")(NpyIter* it, npy_intp* outcoords) noexcept nogil + + +cdef extern from "numpy/arrayscalars.h": + + # abstract types + ctypedef class numpy.generic [object PyObject]: + pass + ctypedef class numpy.number [object PyObject]: + pass + ctypedef class numpy.integer [object PyObject]: + pass + ctypedef class numpy.signedinteger [object PyObject]: + pass + ctypedef class numpy.unsignedinteger [object PyObject]: + pass + ctypedef class numpy.inexact [object PyObject]: + pass + ctypedef class numpy.floating [object PyObject]: + pass + ctypedef class numpy.complexfloating [object PyObject]: + pass + ctypedef class numpy.flexible [object PyObject]: + pass + ctypedef class numpy.character [object PyObject]: + pass + + ctypedef struct PyDatetimeScalarObject: + # PyObject_HEAD + npy_datetime obval + PyArray_DatetimeMetaData obmeta + + ctypedef struct PyTimedeltaScalarObject: + # PyObject_HEAD + npy_timedelta obval + PyArray_DatetimeMetaData obmeta + + ctypedef enum NPY_DATETIMEUNIT: + NPY_FR_Y + NPY_FR_M + NPY_FR_W + NPY_FR_D + NPY_FR_B + NPY_FR_h + NPY_FR_m + NPY_FR_s + NPY_FR_ms + NPY_FR_us + NPY_FR_ns + NPY_FR_ps + NPY_FR_fs + NPY_FR_as + NPY_FR_GENERIC + + +cdef extern from "numpy/arrayobject.h": + # These are part of the C-API defined in `__multiarray_api.h` + + # NumPy internal definitions in datetime_strings.c: + int get_datetime_iso_8601_strlen "NpyDatetime_GetDatetimeISO8601StrLen" ( + int local, NPY_DATETIMEUNIT base) + int make_iso_8601_datetime "NpyDatetime_MakeISO8601Datetime" ( + npy_datetimestruct *dts, char *outstr, npy_intp outlen, + int local, int utc, NPY_DATETIMEUNIT base, int tzoffset, + NPY_CASTING casting) except -1 + + # NumPy internal definition in datetime.c: + # May return 1 to indicate that object does not appear to be a datetime + # (returns 0 on success). + int convert_pydatetime_to_datetimestruct "NpyDatetime_ConvertPyDateTimeToDatetimeStruct" ( + PyObject *obj, npy_datetimestruct *out, + NPY_DATETIMEUNIT *out_bestunit, int apply_tzinfo) except -1 + int convert_datetime64_to_datetimestruct "NpyDatetime_ConvertDatetime64ToDatetimeStruct" ( + PyArray_DatetimeMetaData *meta, npy_datetime dt, + npy_datetimestruct *out) except -1 + int convert_datetimestruct_to_datetime64 "NpyDatetime_ConvertDatetimeStructToDatetime64"( + PyArray_DatetimeMetaData *meta, const npy_datetimestruct *dts, + npy_datetime *out) except -1 + + +# +# ufunc API +# + +cdef extern from "numpy/ufuncobject.h": + + ctypedef void (*PyUFuncGenericFunction) (char **, npy_intp *, npy_intp *, void *) + + ctypedef class numpy.ufunc [object PyUFuncObject, check_size ignore]: + cdef: + int nin, nout, nargs + int identity + PyUFuncGenericFunction *functions + void **data + int ntypes + int check_return + char *name + char *types + char *doc + void *ptr + PyObject *obj + PyObject *userloops + + cdef enum: + PyUFunc_Zero + PyUFunc_One + PyUFunc_None + # deprecated + UFUNC_FPE_DIVIDEBYZERO + UFUNC_FPE_OVERFLOW + UFUNC_FPE_UNDERFLOW + UFUNC_FPE_INVALID + # use these instead + NPY_FPE_DIVIDEBYZERO + NPY_FPE_OVERFLOW + NPY_FPE_UNDERFLOW + NPY_FPE_INVALID + + + object PyUFunc_FromFuncAndData(PyUFuncGenericFunction *, + void **, char *, int, int, int, int, char *, char *, int) + int PyUFunc_RegisterLoopForType(ufunc, int, + PyUFuncGenericFunction, int *, void *) except -1 + void PyUFunc_f_f_As_d_d \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_d_d \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_f_f \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_g_g \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_F_F_As_D_D \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_F_F \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_D_D \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_G_G \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_O_O \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_ff_f_As_dd_d \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_ff_f \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_dd_d \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_gg_g \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_FF_F_As_DD_D \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_DD_D \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_FF_F \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_GG_G \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_OO_O \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_O_O_method \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_OO_O_method \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_On_Om \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_clearfperr() + int PyUFunc_getfperr() + int PyUFunc_ReplaceLoopBySignature \ + (ufunc, PyUFuncGenericFunction, int *, PyUFuncGenericFunction *) + object PyUFunc_FromFuncAndDataAndSignature \ + (PyUFuncGenericFunction *, void **, char *, int, int, int, + int, char *, char *, int, char *) + + int _import_umath() except -1 + +cdef inline void set_array_base(ndarray arr, object base) except *: + Py_INCREF(base) # important to do this before stealing the reference below! + PyArray_SetBaseObject(arr, base) + +cdef inline object get_array_base(ndarray arr): + base = PyArray_BASE(arr) + if base is NULL: + return None + return base + +# Versions of the import_* functions which are more suitable for +# Cython code. +cdef inline int import_array() except -1: + try: + __pyx_import_array() + except Exception: + raise ImportError("numpy._core.multiarray failed to import") + +cdef inline int import_umath() except -1: + try: + _import_umath() + except Exception: + raise ImportError("numpy._core.umath failed to import") + +cdef inline int import_ufunc() except -1: + try: + _import_umath() + except Exception: + raise ImportError("numpy._core.umath failed to import") + + +cdef inline bint is_timedelta64_object(object obj) noexcept: + """ + Cython equivalent of `isinstance(obj, np.timedelta64)` + + Parameters + ---------- + obj : object + + Returns + ------- + bool + """ + return PyObject_TypeCheck(obj, &PyTimedeltaArrType_Type) + + +cdef inline bint is_datetime64_object(object obj) noexcept: + """ + Cython equivalent of `isinstance(obj, np.datetime64)` + + Parameters + ---------- + obj : object + + Returns + ------- + bool + """ + return PyObject_TypeCheck(obj, &PyDatetimeArrType_Type) + + +cdef inline npy_datetime get_datetime64_value(object obj) noexcept nogil: + """ + returns the int64 value underlying scalar numpy datetime64 object + + Note that to interpret this as a datetime, the corresponding unit is + also needed. That can be found using `get_datetime64_unit`. + """ + return (obj).obval + + +cdef inline npy_timedelta get_timedelta64_value(object obj) noexcept nogil: + """ + returns the int64 value underlying scalar numpy timedelta64 object + """ + return (obj).obval + + +cdef inline NPY_DATETIMEUNIT get_datetime64_unit(object obj) noexcept nogil: + """ + returns the unit part of the dtype for a numpy datetime64 object. + """ + return (obj).obmeta.base + + +cdef extern from "numpy/arrayobject.h": + + ctypedef struct NpyIter: + pass + + cdef enum: + NPY_FAIL + NPY_SUCCEED + + cdef enum: + # Track an index representing C order + NPY_ITER_C_INDEX + # Track an index representing Fortran order + NPY_ITER_F_INDEX + # Track a multi-index + NPY_ITER_MULTI_INDEX + # User code external to the iterator does the 1-dimensional innermost loop + NPY_ITER_EXTERNAL_LOOP + # Convert all the operands to a common data type + NPY_ITER_COMMON_DTYPE + # Operands may hold references, requiring API access during iteration + NPY_ITER_REFS_OK + # Zero-sized operands should be permitted, iteration checks IterSize for 0 + NPY_ITER_ZEROSIZE_OK + # Permits reductions (size-0 stride with dimension size > 1) + NPY_ITER_REDUCE_OK + # Enables sub-range iteration + NPY_ITER_RANGED + # Enables buffering + NPY_ITER_BUFFERED + # When buffering is enabled, grows the inner loop if possible + NPY_ITER_GROWINNER + # Delay allocation of buffers until first Reset* call + NPY_ITER_DELAY_BUFALLOC + # When NPY_KEEPORDER is specified, disable reversing negative-stride axes + NPY_ITER_DONT_NEGATE_STRIDES + NPY_ITER_COPY_IF_OVERLAP + # The operand will be read from and written to + NPY_ITER_READWRITE + # The operand will only be read from + NPY_ITER_READONLY + # The operand will only be written to + NPY_ITER_WRITEONLY + # The operand's data must be in native byte order + NPY_ITER_NBO + # The operand's data must be aligned + NPY_ITER_ALIGNED + # The operand's data must be contiguous (within the inner loop) + NPY_ITER_CONTIG + # The operand may be copied to satisfy requirements + NPY_ITER_COPY + # The operand may be copied with WRITEBACKIFCOPY to satisfy requirements + NPY_ITER_UPDATEIFCOPY + # Allocate the operand if it is NULL + NPY_ITER_ALLOCATE + # If an operand is allocated, don't use any subtype + NPY_ITER_NO_SUBTYPE + # This is a virtual array slot, operand is NULL but temporary data is there + NPY_ITER_VIRTUAL + # Require that the dimension match the iterator dimensions exactly + NPY_ITER_NO_BROADCAST + # A mask is being used on this array, affects buffer -> array copy + NPY_ITER_WRITEMASKED + # This array is the mask for all WRITEMASKED operands + NPY_ITER_ARRAYMASK + # Assume iterator order data access for COPY_IF_OVERLAP + NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE + + # construction and destruction functions + NpyIter* NpyIter_New(ndarray arr, npy_uint32 flags, NPY_ORDER order, + NPY_CASTING casting, dtype datatype) except NULL + NpyIter* NpyIter_MultiNew(npy_intp nop, PyArrayObject** op, npy_uint32 flags, + NPY_ORDER order, NPY_CASTING casting, npy_uint32* + op_flags, PyArray_Descr** op_dtypes) except NULL + NpyIter* NpyIter_AdvancedNew(npy_intp nop, PyArrayObject** op, + npy_uint32 flags, NPY_ORDER order, + NPY_CASTING casting, npy_uint32* op_flags, + PyArray_Descr** op_dtypes, int oa_ndim, + int** op_axes, const npy_intp* itershape, + npy_intp buffersize) except NULL + NpyIter* NpyIter_Copy(NpyIter* it) except NULL + int NpyIter_RemoveAxis(NpyIter* it, int axis) except NPY_FAIL + int NpyIter_RemoveMultiIndex(NpyIter* it) except NPY_FAIL + int NpyIter_EnableExternalLoop(NpyIter* it) except NPY_FAIL + int NpyIter_Deallocate(NpyIter* it) except NPY_FAIL + int NpyIter_Reset(NpyIter* it, char** errmsg) except NPY_FAIL + int NpyIter_ResetToIterIndexRange(NpyIter* it, npy_intp istart, + npy_intp iend, char** errmsg) except NPY_FAIL + int NpyIter_ResetBasePointers(NpyIter* it, char** baseptrs, char** errmsg) except NPY_FAIL + int NpyIter_GotoMultiIndex(NpyIter* it, const npy_intp* multi_index) except NPY_FAIL + int NpyIter_GotoIndex(NpyIter* it, npy_intp index) except NPY_FAIL + npy_intp NpyIter_GetIterSize(NpyIter* it) nogil + npy_intp NpyIter_GetIterIndex(NpyIter* it) nogil + void NpyIter_GetIterIndexRange(NpyIter* it, npy_intp* istart, + npy_intp* iend) nogil + int NpyIter_GotoIterIndex(NpyIter* it, npy_intp iterindex) except NPY_FAIL + npy_bool NpyIter_HasDelayedBufAlloc(NpyIter* it) nogil + npy_bool NpyIter_HasExternalLoop(NpyIter* it) nogil + npy_bool NpyIter_HasMultiIndex(NpyIter* it) nogil + npy_bool NpyIter_HasIndex(NpyIter* it) nogil + npy_bool NpyIter_RequiresBuffering(NpyIter* it) nogil + npy_bool NpyIter_IsBuffered(NpyIter* it) nogil + npy_bool NpyIter_IsGrowInner(NpyIter* it) nogil + npy_intp NpyIter_GetBufferSize(NpyIter* it) nogil + int NpyIter_GetNDim(NpyIter* it) nogil + int NpyIter_GetNOp(NpyIter* it) nogil + npy_intp* NpyIter_GetAxisStrideArray(NpyIter* it, int axis) except NULL + int NpyIter_GetShape(NpyIter* it, npy_intp* outshape) nogil + PyArray_Descr** NpyIter_GetDescrArray(NpyIter* it) + PyArrayObject** NpyIter_GetOperandArray(NpyIter* it) + ndarray NpyIter_GetIterView(NpyIter* it, npy_intp i) + void NpyIter_GetReadFlags(NpyIter* it, char* outreadflags) + void NpyIter_GetWriteFlags(NpyIter* it, char* outwriteflags) + int NpyIter_CreateCompatibleStrides(NpyIter* it, npy_intp itemsize, + npy_intp* outstrides) except NPY_FAIL + npy_bool NpyIter_IsFirstVisit(NpyIter* it, int iop) nogil + # functions for iterating an NpyIter object + # + # These don't match the definition in the C API because Cython can't wrap + # function pointers that return functions. + NpyIter_IterNextFunc NpyIter_GetIterNext(NpyIter* it, char** errmsg) except NULL + NpyIter_GetMultiIndexFunc NpyIter_GetGetMultiIndex(NpyIter* it, + char** errmsg) except NULL + char** NpyIter_GetDataPtrArray(NpyIter* it) nogil + char** NpyIter_GetInitialDataPtrArray(NpyIter* it) nogil + npy_intp* NpyIter_GetIndexPtr(NpyIter* it) + npy_intp* NpyIter_GetInnerStrideArray(NpyIter* it) nogil + npy_intp* NpyIter_GetInnerLoopSizePtr(NpyIter* it) nogil + void NpyIter_GetInnerFixedStrideArray(NpyIter* it, npy_intp* outstrides) nogil + npy_bool NpyIter_IterationNeedsAPI(NpyIter* it) nogil + void NpyIter_DebugPrint(NpyIter* it) + +# NpyString API +cdef extern from "numpy/ndarraytypes.h": + ctypedef struct npy_string_allocator: + pass + + ctypedef struct npy_packed_static_string: + pass + + ctypedef struct npy_static_string: + size_t size + const char *buf + + ctypedef struct PyArray_StringDTypeObject: + PyArray_Descr base + PyObject *na_object + char coerce + char has_nan_na + char has_string_na + char array_owned + npy_static_string default_string + npy_static_string na_name + npy_string_allocator *allocator + +cdef extern from "numpy/arrayobject.h": + npy_string_allocator *NpyString_acquire_allocator(const PyArray_StringDTypeObject *descr) + void NpyString_acquire_allocators(size_t n_descriptors, PyArray_Descr *const descrs[], npy_string_allocator *allocators[]) + void NpyString_release_allocator(npy_string_allocator *allocator) + void NpyString_release_allocators(size_t length, npy_string_allocator *allocators[]) + int NpyString_load(npy_string_allocator *allocator, const npy_packed_static_string *packed_string, npy_static_string *unpacked_string) + int NpyString_pack_null(npy_string_allocator *allocator, npy_packed_static_string *packed_string) + int NpyString_pack(npy_string_allocator *allocator, npy_packed_static_string *packed_string, const char *buf, size_t size) diff --git a/local_packages/numpy/__init__.pxd b/local_packages/numpy/__init__.pxd new file mode 100644 index 0000000..40a24b6 --- /dev/null +++ b/local_packages/numpy/__init__.pxd @@ -0,0 +1,1155 @@ +# NumPy static imports for Cython < 3.0 +# +# If any of the PyArray_* functions are called, import_array must be +# called first. +# +# Author: Dag Sverre Seljebotn +# + +DEF _buffer_format_string_len = 255 + +cimport cpython.buffer as pybuf +from cpython.ref cimport Py_INCREF +from cpython.mem cimport PyObject_Malloc, PyObject_Free +from cpython.object cimport PyObject, PyTypeObject +from cpython.buffer cimport PyObject_GetBuffer +from cpython.type cimport type +cimport libc.stdio as stdio + + +cdef extern from *: + # Leave a marker that the NumPy declarations came from NumPy itself and not from Cython. + # See https://github.com/cython/cython/issues/3573 + """ + /* Using NumPy API declarations from "numpy/__init__.pxd" */ + """ + + +cdef extern from "Python.h": + ctypedef int Py_intptr_t + bint PyObject_TypeCheck(object obj, PyTypeObject* type) + +cdef extern from "numpy/arrayobject.h": + # It would be nice to use size_t and ssize_t, but ssize_t has special + # implicit conversion rules, so just use "long". + # Note: The actual type only matters for Cython promotion, so long + # is closer than int, but could lead to incorrect promotion. + # (Not to worrying, and always the status-quo.) + ctypedef signed long npy_intp + ctypedef unsigned long npy_uintp + + ctypedef unsigned char npy_bool + + ctypedef signed char npy_byte + ctypedef signed short npy_short + ctypedef signed int npy_int + ctypedef signed long npy_long + ctypedef signed long long npy_longlong + + ctypedef unsigned char npy_ubyte + ctypedef unsigned short npy_ushort + ctypedef unsigned int npy_uint + ctypedef unsigned long npy_ulong + ctypedef unsigned long long npy_ulonglong + + ctypedef float npy_float + ctypedef double npy_double + ctypedef long double npy_longdouble + + ctypedef signed char npy_int8 + ctypedef signed short npy_int16 + ctypedef signed int npy_int32 + ctypedef signed long long npy_int64 + + ctypedef unsigned char npy_uint8 + ctypedef unsigned short npy_uint16 + ctypedef unsigned int npy_uint32 + ctypedef unsigned long long npy_uint64 + + ctypedef float npy_float32 + ctypedef double npy_float64 + ctypedef long double npy_float80 + ctypedef long double npy_float96 + ctypedef long double npy_float128 + + ctypedef struct npy_cfloat: + pass + + ctypedef struct npy_cdouble: + pass + + ctypedef struct npy_clongdouble: + pass + + ctypedef struct npy_complex64: + pass + + ctypedef struct npy_complex128: + pass + + ctypedef struct npy_complex160: + pass + + ctypedef struct npy_complex192: + pass + + ctypedef struct npy_complex256: + pass + + ctypedef struct PyArray_Dims: + npy_intp *ptr + int len + + + cdef enum NPY_TYPES: + NPY_BOOL + NPY_BYTE + NPY_UBYTE + NPY_SHORT + NPY_USHORT + NPY_INT + NPY_UINT + NPY_LONG + NPY_ULONG + NPY_LONGLONG + NPY_ULONGLONG + NPY_FLOAT + NPY_DOUBLE + NPY_LONGDOUBLE + NPY_CFLOAT + NPY_CDOUBLE + NPY_CLONGDOUBLE + NPY_OBJECT + NPY_STRING + NPY_UNICODE + NPY_VSTRING + NPY_VOID + NPY_DATETIME + NPY_TIMEDELTA + NPY_NTYPES_LEGACY + NPY_NOTYPE + + NPY_INT8 + NPY_INT16 + NPY_INT32 + NPY_INT64 + NPY_UINT8 + NPY_UINT16 + NPY_UINT32 + NPY_UINT64 + NPY_FLOAT16 + NPY_FLOAT32 + NPY_FLOAT64 + NPY_FLOAT80 + NPY_FLOAT96 + NPY_FLOAT128 + NPY_COMPLEX64 + NPY_COMPLEX128 + NPY_COMPLEX160 + NPY_COMPLEX192 + NPY_COMPLEX256 + + NPY_INTP + NPY_UINTP + NPY_DEFAULT_INT # Not a compile time constant (normally)! + + ctypedef enum NPY_ORDER: + NPY_ANYORDER + NPY_CORDER + NPY_FORTRANORDER + NPY_KEEPORDER + + ctypedef enum NPY_CASTING: + NPY_NO_CASTING + NPY_EQUIV_CASTING + NPY_SAFE_CASTING + NPY_SAME_KIND_CASTING + NPY_UNSAFE_CASTING + NPY_SAME_VALUE_CASTING + + ctypedef enum NPY_CLIPMODE: + NPY_CLIP + NPY_WRAP + NPY_RAISE + + ctypedef enum NPY_SCALARKIND: + NPY_NOSCALAR, + NPY_BOOL_SCALAR, + NPY_INTPOS_SCALAR, + NPY_INTNEG_SCALAR, + NPY_FLOAT_SCALAR, + NPY_COMPLEX_SCALAR, + NPY_OBJECT_SCALAR + + ctypedef enum NPY_SORTKIND: + NPY_QUICKSORT + NPY_HEAPSORT + NPY_MERGESORT + + ctypedef enum NPY_SEARCHSIDE: + NPY_SEARCHLEFT + NPY_SEARCHRIGHT + + enum: + NPY_ARRAY_C_CONTIGUOUS + NPY_ARRAY_F_CONTIGUOUS + NPY_ARRAY_OWNDATA + NPY_ARRAY_FORCECAST + NPY_ARRAY_ENSURECOPY + NPY_ARRAY_ENSUREARRAY + NPY_ARRAY_ELEMENTSTRIDES + NPY_ARRAY_ALIGNED + NPY_ARRAY_NOTSWAPPED + NPY_ARRAY_WRITEABLE + NPY_ARRAY_WRITEBACKIFCOPY + + NPY_ARRAY_BEHAVED + NPY_ARRAY_BEHAVED_NS + NPY_ARRAY_CARRAY + NPY_ARRAY_CARRAY_RO + NPY_ARRAY_FARRAY + NPY_ARRAY_FARRAY_RO + NPY_ARRAY_DEFAULT + + NPY_ARRAY_IN_ARRAY + NPY_ARRAY_OUT_ARRAY + NPY_ARRAY_INOUT_ARRAY + NPY_ARRAY_IN_FARRAY + NPY_ARRAY_OUT_FARRAY + NPY_ARRAY_INOUT_FARRAY + + NPY_ARRAY_UPDATE_ALL + + cdef enum: + NPY_MAXDIMS # 64 on NumPy 2.x and 32 on NumPy 1.x + NPY_RAVEL_AXIS # Used for functions like PyArray_Mean + + ctypedef void (*PyArray_VectorUnaryFunc)(void *, void *, npy_intp, void *, void *) + + ctypedef struct PyArray_ArrayDescr: + # shape is a tuple, but Cython doesn't support "tuple shape" + # inside a non-PyObject declaration, so we have to declare it + # as just a PyObject*. + PyObject* shape + + ctypedef struct PyArray_Descr: + pass + + ctypedef class numpy.dtype [object PyArray_Descr, check_size ignore]: + # Use PyDataType_* macros when possible, however there are no macros + # for accessing some of the fields, so some are defined. + cdef PyTypeObject* typeobj + cdef char kind + cdef char type + # Numpy sometimes mutates this without warning (e.g. it'll + # sometimes change "|" to "<" in shared dtype objects on + # little-endian machines). If this matters to you, use + # PyArray_IsNativeByteOrder(dtype.byteorder) instead of + # directly accessing this field. + cdef char byteorder + # Flags are not directly accessible on Cython <3. Use PyDataType_FLAGS. + # cdef char flags + cdef int type_num + # itemsize/elsize, alignment, fields, names, and subarray must + # use the `PyDataType_*` accessor macros. With Cython 3 you can + # still use getter attributes `dtype.itemsize` + + ctypedef class numpy.flatiter [object PyArrayIterObject, check_size ignore]: + # Use through macros + pass + + ctypedef class numpy.broadcast [object PyArrayMultiIterObject, check_size ignore]: + cdef int numiter + cdef npy_intp size, index + cdef int nd + cdef npy_intp *dimensions + cdef void **iters + + ctypedef struct PyArrayObject: + # For use in situations where ndarray can't replace PyArrayObject*, + # like PyArrayObject**. + pass + + ctypedef class numpy.ndarray [object PyArrayObject, check_size ignore]: + cdef __cythonbufferdefaults__ = {"mode": "strided"} + + cdef: + # Only taking a few of the most commonly used and stable fields. + # One should use PyArray_* macros instead to access the C fields. + char *data + int ndim "nd" + npy_intp *shape "dimensions" + npy_intp *strides + dtype descr # deprecated since NumPy 1.7 ! + PyObject* base # NOT PUBLIC, DO NOT USE ! + + + int _import_array() except -1 + # A second definition so _import_array isn't marked as used when we use it here. + # Do not use - subject to change any time. + int __pyx_import_array "_import_array"() except -1 + + # + # Macros from ndarrayobject.h + # + bint PyArray_CHKFLAGS(ndarray m, int flags) nogil + bint PyArray_IS_C_CONTIGUOUS(ndarray arr) nogil + bint PyArray_IS_F_CONTIGUOUS(ndarray arr) nogil + bint PyArray_ISCONTIGUOUS(ndarray m) nogil + bint PyArray_ISWRITEABLE(ndarray m) nogil + bint PyArray_ISALIGNED(ndarray m) nogil + + int PyArray_NDIM(ndarray) nogil + bint PyArray_ISONESEGMENT(ndarray) nogil + bint PyArray_ISFORTRAN(ndarray) nogil + int PyArray_FORTRANIF(ndarray) nogil + + void* PyArray_DATA(ndarray) nogil + char* PyArray_BYTES(ndarray) nogil + + npy_intp* PyArray_DIMS(ndarray) nogil + npy_intp* PyArray_STRIDES(ndarray) nogil + npy_intp PyArray_DIM(ndarray, size_t) nogil + npy_intp PyArray_STRIDE(ndarray, size_t) nogil + + PyObject *PyArray_BASE(ndarray) nogil # returns borrowed reference! + PyArray_Descr *PyArray_DESCR(ndarray) nogil # returns borrowed reference to dtype! + PyArray_Descr *PyArray_DTYPE(ndarray) nogil # returns borrowed reference to dtype! NP 1.7+ alias for descr. + int PyArray_FLAGS(ndarray) nogil + void PyArray_CLEARFLAGS(ndarray, int flags) nogil # Added in NumPy 1.7 + void PyArray_ENABLEFLAGS(ndarray, int flags) nogil # Added in NumPy 1.7 + npy_intp PyArray_ITEMSIZE(ndarray) nogil + int PyArray_TYPE(ndarray arr) nogil + + object PyArray_GETITEM(ndarray arr, void *itemptr) + int PyArray_SETITEM(ndarray arr, void *itemptr, object obj) except -1 + + bint PyTypeNum_ISBOOL(int) nogil + bint PyTypeNum_ISUNSIGNED(int) nogil + bint PyTypeNum_ISSIGNED(int) nogil + bint PyTypeNum_ISINTEGER(int) nogil + bint PyTypeNum_ISFLOAT(int) nogil + bint PyTypeNum_ISNUMBER(int) nogil + bint PyTypeNum_ISSTRING(int) nogil + bint PyTypeNum_ISCOMPLEX(int) nogil + bint PyTypeNum_ISFLEXIBLE(int) nogil + bint PyTypeNum_ISUSERDEF(int) nogil + bint PyTypeNum_ISEXTENDED(int) nogil + bint PyTypeNum_ISOBJECT(int) nogil + + npy_intp PyDataType_ELSIZE(dtype) nogil + npy_intp PyDataType_ALIGNMENT(dtype) nogil + PyObject* PyDataType_METADATA(dtype) nogil + PyArray_ArrayDescr* PyDataType_SUBARRAY(dtype) nogil + PyObject* PyDataType_NAMES(dtype) nogil + PyObject* PyDataType_FIELDS(dtype) nogil + + bint PyDataType_ISBOOL(dtype) nogil + bint PyDataType_ISUNSIGNED(dtype) nogil + bint PyDataType_ISSIGNED(dtype) nogil + bint PyDataType_ISINTEGER(dtype) nogil + bint PyDataType_ISFLOAT(dtype) nogil + bint PyDataType_ISNUMBER(dtype) nogil + bint PyDataType_ISSTRING(dtype) nogil + bint PyDataType_ISCOMPLEX(dtype) nogil + bint PyDataType_ISFLEXIBLE(dtype) nogil + bint PyDataType_ISUSERDEF(dtype) nogil + bint PyDataType_ISEXTENDED(dtype) nogil + bint PyDataType_ISOBJECT(dtype) nogil + bint PyDataType_HASFIELDS(dtype) nogil + bint PyDataType_HASSUBARRAY(dtype) nogil + npy_uint64 PyDataType_FLAGS(dtype) nogil + + bint PyArray_ISBOOL(ndarray) nogil + bint PyArray_ISUNSIGNED(ndarray) nogil + bint PyArray_ISSIGNED(ndarray) nogil + bint PyArray_ISINTEGER(ndarray) nogil + bint PyArray_ISFLOAT(ndarray) nogil + bint PyArray_ISNUMBER(ndarray) nogil + bint PyArray_ISSTRING(ndarray) nogil + bint PyArray_ISCOMPLEX(ndarray) nogil + bint PyArray_ISFLEXIBLE(ndarray) nogil + bint PyArray_ISUSERDEF(ndarray) nogil + bint PyArray_ISEXTENDED(ndarray) nogil + bint PyArray_ISOBJECT(ndarray) nogil + bint PyArray_HASFIELDS(ndarray) nogil + + bint PyArray_ISVARIABLE(ndarray) nogil + + bint PyArray_SAFEALIGNEDCOPY(ndarray) nogil + bint PyArray_ISNBO(char) nogil # works on ndarray.byteorder + bint PyArray_IsNativeByteOrder(char) nogil # works on ndarray.byteorder + bint PyArray_ISNOTSWAPPED(ndarray) nogil + bint PyArray_ISBYTESWAPPED(ndarray) nogil + + bint PyArray_FLAGSWAP(ndarray, int) nogil + + bint PyArray_ISCARRAY(ndarray) nogil + bint PyArray_ISCARRAY_RO(ndarray) nogil + bint PyArray_ISFARRAY(ndarray) nogil + bint PyArray_ISFARRAY_RO(ndarray) nogil + bint PyArray_ISBEHAVED(ndarray) nogil + bint PyArray_ISBEHAVED_RO(ndarray) nogil + + + bint PyDataType_ISNOTSWAPPED(dtype) nogil + bint PyDataType_ISBYTESWAPPED(dtype) nogil + + bint PyArray_DescrCheck(object) + + bint PyArray_Check(object) + bint PyArray_CheckExact(object) + + # Cannot be supported due to out arg: + # bint PyArray_HasArrayInterfaceType(object, dtype, object, object&) + # bint PyArray_HasArrayInterface(op, out) + + + bint PyArray_IsZeroDim(object) + # Cannot be supported due to ## ## in macro: + # bint PyArray_IsScalar(object, verbatim work) + bint PyArray_CheckScalar(object) + bint PyArray_IsPythonNumber(object) + bint PyArray_IsPythonScalar(object) + bint PyArray_IsAnyScalar(object) + bint PyArray_CheckAnyScalar(object) + + ndarray PyArray_GETCONTIGUOUS(ndarray) + bint PyArray_SAMESHAPE(ndarray, ndarray) nogil + npy_intp PyArray_SIZE(ndarray) nogil + npy_intp PyArray_NBYTES(ndarray) nogil + + object PyArray_FROM_O(object) + object PyArray_FROM_OF(object m, int flags) + object PyArray_FROM_OT(object m, int type) + object PyArray_FROM_OTF(object m, int type, int flags) + object PyArray_FROMANY(object m, int type, int min, int max, int flags) + object PyArray_ZEROS(int nd, npy_intp* dims, int type, int fortran) + object PyArray_EMPTY(int nd, npy_intp* dims, int type, int fortran) + void PyArray_FILLWBYTE(ndarray, int val) + object PyArray_ContiguousFromAny(op, int, int min_depth, int max_depth) + unsigned char PyArray_EquivArrTypes(ndarray a1, ndarray a2) + bint PyArray_EquivByteorders(int b1, int b2) nogil + object PyArray_SimpleNew(int nd, npy_intp* dims, int typenum) + object PyArray_SimpleNewFromData(int nd, npy_intp* dims, int typenum, void* data) + #object PyArray_SimpleNewFromDescr(int nd, npy_intp* dims, dtype descr) + object PyArray_ToScalar(void* data, ndarray arr) + + void* PyArray_GETPTR1(ndarray m, npy_intp i) nogil + void* PyArray_GETPTR2(ndarray m, npy_intp i, npy_intp j) nogil + void* PyArray_GETPTR3(ndarray m, npy_intp i, npy_intp j, npy_intp k) nogil + void* PyArray_GETPTR4(ndarray m, npy_intp i, npy_intp j, npy_intp k, npy_intp l) nogil + + # Cannot be supported due to out arg + # void PyArray_DESCR_REPLACE(descr) + + + object PyArray_Copy(ndarray) + object PyArray_FromObject(object op, int type, int min_depth, int max_depth) + object PyArray_ContiguousFromObject(object op, int type, int min_depth, int max_depth) + object PyArray_CopyFromObject(object op, int type, int min_depth, int max_depth) + + object PyArray_Cast(ndarray mp, int type_num) + object PyArray_Take(ndarray ap, object items, int axis) + object PyArray_Put(ndarray ap, object items, object values) + + void PyArray_ITER_RESET(flatiter it) nogil + void PyArray_ITER_NEXT(flatiter it) nogil + void PyArray_ITER_GOTO(flatiter it, npy_intp* destination) nogil + void PyArray_ITER_GOTO1D(flatiter it, npy_intp ind) nogil + void* PyArray_ITER_DATA(flatiter it) nogil + bint PyArray_ITER_NOTDONE(flatiter it) nogil + + void PyArray_MultiIter_RESET(broadcast multi) nogil + void PyArray_MultiIter_NEXT(broadcast multi) nogil + void PyArray_MultiIter_GOTO(broadcast multi, npy_intp dest) nogil + void PyArray_MultiIter_GOTO1D(broadcast multi, npy_intp ind) nogil + void* PyArray_MultiIter_DATA(broadcast multi, npy_intp i) nogil + void PyArray_MultiIter_NEXTi(broadcast multi, npy_intp i) nogil + bint PyArray_MultiIter_NOTDONE(broadcast multi) nogil + npy_intp PyArray_MultiIter_SIZE(broadcast multi) nogil + int PyArray_MultiIter_NDIM(broadcast multi) nogil + npy_intp PyArray_MultiIter_INDEX(broadcast multi) nogil + int PyArray_MultiIter_NUMITER(broadcast multi) nogil + npy_intp* PyArray_MultiIter_DIMS(broadcast multi) nogil + void** PyArray_MultiIter_ITERS(broadcast multi) nogil + + # Functions from __multiarray_api.h + + # Functions taking dtype and returning object/ndarray are disabled + # for now as they steal dtype references. I'm conservative and disable + # more than is probably needed until it can be checked further. + int PyArray_INCREF (ndarray) except * # uses PyArray_Item_INCREF... + int PyArray_XDECREF (ndarray) except * # uses PyArray_Item_DECREF... + dtype PyArray_DescrFromType (int) + object PyArray_TypeObjectFromType (int) + char * PyArray_Zero (ndarray) + char * PyArray_One (ndarray) + #object PyArray_CastToType (ndarray, dtype, int) + int PyArray_CanCastSafely (int, int) # writes errors + npy_bool PyArray_CanCastTo (dtype, dtype) # writes errors + int PyArray_ObjectType (object, int) except 0 + dtype PyArray_DescrFromObject (object, dtype) + #ndarray* PyArray_ConvertToCommonType (object, int *) + dtype PyArray_DescrFromScalar (object) + dtype PyArray_DescrFromTypeObject (object) + npy_intp PyArray_Size (object) + #object PyArray_Scalar (void *, dtype, object) + #object PyArray_FromScalar (object, dtype) + void PyArray_ScalarAsCtype (object, void *) + #int PyArray_CastScalarToCtype (object, void *, dtype) + #int PyArray_CastScalarDirect (object, dtype, void *, int) + #PyArray_VectorUnaryFunc * PyArray_GetCastFunc (dtype, int) + #object PyArray_FromAny (object, dtype, int, int, int, object) + object PyArray_EnsureArray (object) + object PyArray_EnsureAnyArray (object) + #object PyArray_FromFile (stdio.FILE *, dtype, npy_intp, char *) + #object PyArray_FromString (char *, npy_intp, dtype, npy_intp, char *) + #object PyArray_FromBuffer (object, dtype, npy_intp, npy_intp) + #object PyArray_FromIter (object, dtype, npy_intp) + object PyArray_Return (ndarray) + #object PyArray_GetField (ndarray, dtype, int) + #int PyArray_SetField (ndarray, dtype, int, object) except -1 + object PyArray_Byteswap (ndarray, npy_bool) + object PyArray_Resize (ndarray, PyArray_Dims *, int, NPY_ORDER) + int PyArray_CopyInto (ndarray, ndarray) except -1 + int PyArray_CopyAnyInto (ndarray, ndarray) except -1 + int PyArray_CopyObject (ndarray, object) except -1 + object PyArray_NewCopy (ndarray, NPY_ORDER) + object PyArray_ToList (ndarray) + object PyArray_ToString (ndarray, NPY_ORDER) + int PyArray_ToFile (ndarray, stdio.FILE *, char *, char *) except -1 + int PyArray_Dump (object, object, int) except -1 + object PyArray_Dumps (object, int) + int PyArray_ValidType (int) # Cannot error + void PyArray_UpdateFlags (ndarray, int) + object PyArray_New (type, int, npy_intp *, int, npy_intp *, void *, int, int, object) + #object PyArray_NewFromDescr (type, dtype, int, npy_intp *, npy_intp *, void *, int, object) + #dtype PyArray_DescrNew (dtype) + dtype PyArray_DescrNewFromType (int) + double PyArray_GetPriority (object, double) # clears errors as of 1.25 + object PyArray_IterNew (object) + object PyArray_MultiIterNew (int, ...) + + int PyArray_PyIntAsInt (object) except? -1 + npy_intp PyArray_PyIntAsIntp (object) + int PyArray_Broadcast (broadcast) except -1 + int PyArray_FillWithScalar (ndarray, object) except -1 + npy_bool PyArray_CheckStrides (int, int, npy_intp, npy_intp, npy_intp *, npy_intp *) + dtype PyArray_DescrNewByteorder (dtype, char) + object PyArray_IterAllButAxis (object, int *) + #object PyArray_CheckFromAny (object, dtype, int, int, int, object) + #object PyArray_FromArray (ndarray, dtype, int) + object PyArray_FromInterface (object) + object PyArray_FromStructInterface (object) + #object PyArray_FromArrayAttr (object, dtype, object) + #NPY_SCALARKIND PyArray_ScalarKind (int, ndarray*) + int PyArray_CanCoerceScalar (int, int, NPY_SCALARKIND) + npy_bool PyArray_CanCastScalar (type, type) + int PyArray_RemoveSmallest (broadcast) except -1 + int PyArray_ElementStrides (object) + void PyArray_Item_INCREF (char *, dtype) except * + void PyArray_Item_XDECREF (char *, dtype) except * + object PyArray_Transpose (ndarray, PyArray_Dims *) + object PyArray_TakeFrom (ndarray, object, int, ndarray, NPY_CLIPMODE) + object PyArray_PutTo (ndarray, object, object, NPY_CLIPMODE) + object PyArray_PutMask (ndarray, object, object) + object PyArray_Repeat (ndarray, object, int) + object PyArray_Choose (ndarray, object, ndarray, NPY_CLIPMODE) + int PyArray_Sort (ndarray, int, NPY_SORTKIND) except -1 + object PyArray_ArgSort (ndarray, int, NPY_SORTKIND) + object PyArray_SearchSorted (ndarray, object, NPY_SEARCHSIDE, PyObject *) + object PyArray_ArgMax (ndarray, int, ndarray) + object PyArray_ArgMin (ndarray, int, ndarray) + object PyArray_Reshape (ndarray, object) + object PyArray_Newshape (ndarray, PyArray_Dims *, NPY_ORDER) + object PyArray_Squeeze (ndarray) + #object PyArray_View (ndarray, dtype, type) + object PyArray_SwapAxes (ndarray, int, int) + object PyArray_Max (ndarray, int, ndarray) + object PyArray_Min (ndarray, int, ndarray) + object PyArray_Ptp (ndarray, int, ndarray) + object PyArray_Mean (ndarray, int, int, ndarray) + object PyArray_Trace (ndarray, int, int, int, int, ndarray) + object PyArray_Diagonal (ndarray, int, int, int) + object PyArray_Clip (ndarray, object, object, ndarray) + object PyArray_Conjugate (ndarray, ndarray) + object PyArray_Nonzero (ndarray) + object PyArray_Std (ndarray, int, int, ndarray, int) + object PyArray_Sum (ndarray, int, int, ndarray) + object PyArray_CumSum (ndarray, int, int, ndarray) + object PyArray_Prod (ndarray, int, int, ndarray) + object PyArray_CumProd (ndarray, int, int, ndarray) + object PyArray_All (ndarray, int, ndarray) + object PyArray_Any (ndarray, int, ndarray) + object PyArray_Compress (ndarray, object, int, ndarray) + object PyArray_Flatten (ndarray, NPY_ORDER) + object PyArray_Ravel (ndarray, NPY_ORDER) + npy_intp PyArray_MultiplyList (npy_intp *, int) + int PyArray_MultiplyIntList (int *, int) + void * PyArray_GetPtr (ndarray, npy_intp*) + int PyArray_CompareLists (npy_intp *, npy_intp *, int) + #int PyArray_AsCArray (object*, void *, npy_intp *, int, dtype) + int PyArray_Free (object, void *) + #int PyArray_Converter (object, object*) + int PyArray_IntpFromSequence (object, npy_intp *, int) except -1 + object PyArray_Concatenate (object, int) + object PyArray_InnerProduct (object, object) + object PyArray_MatrixProduct (object, object) + object PyArray_Correlate (object, object, int) + #int PyArray_DescrConverter (object, dtype*) except 0 + #int PyArray_DescrConverter2 (object, dtype*) except 0 + int PyArray_IntpConverter (object, PyArray_Dims *) except 0 + #int PyArray_BufferConverter (object, chunk) except 0 + int PyArray_AxisConverter (object, int *) except 0 + int PyArray_BoolConverter (object, npy_bool *) except 0 + int PyArray_ByteorderConverter (object, char *) except 0 + int PyArray_OrderConverter (object, NPY_ORDER *) except 0 + unsigned char PyArray_EquivTypes (dtype, dtype) # clears errors + #object PyArray_Zeros (int, npy_intp *, dtype, int) + #object PyArray_Empty (int, npy_intp *, dtype, int) + object PyArray_Where (object, object, object) + object PyArray_Arange (double, double, double, int) + #object PyArray_ArangeObj (object, object, object, dtype) + int PyArray_SortkindConverter (object, NPY_SORTKIND *) except 0 + object PyArray_LexSort (object, int) + object PyArray_Round (ndarray, int, ndarray) + unsigned char PyArray_EquivTypenums (int, int) + int PyArray_RegisterDataType (dtype) except -1 + int PyArray_RegisterCastFunc (dtype, int, PyArray_VectorUnaryFunc *) except -1 + int PyArray_RegisterCanCast (dtype, int, NPY_SCALARKIND) except -1 + #void PyArray_InitArrFuncs (PyArray_ArrFuncs *) + object PyArray_IntTupleFromIntp (int, npy_intp *) + int PyArray_ClipmodeConverter (object, NPY_CLIPMODE *) except 0 + #int PyArray_OutputConverter (object, ndarray*) except 0 + object PyArray_BroadcastToShape (object, npy_intp *, int) + #int PyArray_DescrAlignConverter (object, dtype*) except 0 + #int PyArray_DescrAlignConverter2 (object, dtype*) except 0 + int PyArray_SearchsideConverter (object, void *) except 0 + object PyArray_CheckAxis (ndarray, int *, int) + npy_intp PyArray_OverflowMultiplyList (npy_intp *, int) + int PyArray_SetBaseObject(ndarray, base) except -1 # NOTE: steals a reference to base! Use "set_array_base()" instead. + + # The memory handler functions require the NumPy 1.22 API + # and may require defining NPY_TARGET_VERSION + ctypedef struct PyDataMemAllocator: + void *ctx + void* (*malloc) (void *ctx, size_t size) + void* (*calloc) (void *ctx, size_t nelem, size_t elsize) + void* (*realloc) (void *ctx, void *ptr, size_t new_size) + void (*free) (void *ctx, void *ptr, size_t size) + + ctypedef struct PyDataMem_Handler: + char* name + npy_uint8 version + PyDataMemAllocator allocator + + object PyDataMem_SetHandler(object handler) + object PyDataMem_GetHandler() + + # additional datetime related functions are defined below + + +# Typedefs that matches the runtime dtype objects in +# the numpy module. + +# The ones that are commented out needs an IFDEF function +# in Cython to enable them only on the right systems. + +ctypedef npy_int8 int8_t +ctypedef npy_int16 int16_t +ctypedef npy_int32 int32_t +ctypedef npy_int64 int64_t + +ctypedef npy_uint8 uint8_t +ctypedef npy_uint16 uint16_t +ctypedef npy_uint32 uint32_t +ctypedef npy_uint64 uint64_t + +ctypedef npy_float32 float32_t +ctypedef npy_float64 float64_t +#ctypedef npy_float80 float80_t +#ctypedef npy_float128 float128_t + +ctypedef float complex complex64_t +ctypedef double complex complex128_t + +ctypedef npy_longlong longlong_t +ctypedef npy_ulonglong ulonglong_t + +ctypedef npy_intp intp_t +ctypedef npy_uintp uintp_t + +ctypedef npy_double float_t +ctypedef npy_double double_t +ctypedef npy_longdouble longdouble_t + +ctypedef float complex cfloat_t +ctypedef double complex cdouble_t +ctypedef double complex complex_t +ctypedef long double complex clongdouble_t + +cdef inline object PyArray_MultiIterNew1(a): + return PyArray_MultiIterNew(1, a) + +cdef inline object PyArray_MultiIterNew2(a, b): + return PyArray_MultiIterNew(2, a, b) + +cdef inline object PyArray_MultiIterNew3(a, b, c): + return PyArray_MultiIterNew(3, a, b, c) + +cdef inline object PyArray_MultiIterNew4(a, b, c, d): + return PyArray_MultiIterNew(4, a, b, c, d) + +cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): + return PyArray_MultiIterNew(5, a, b, c, d, e) + +cdef inline tuple PyDataType_SHAPE(dtype d): + if PyDataType_HASSUBARRAY(d): + return d.subarray.shape + else: + return () + + +cdef extern from "numpy/ndarrayobject.h": + PyTypeObject PyTimedeltaArrType_Type + PyTypeObject PyDatetimeArrType_Type + ctypedef int64_t npy_timedelta + ctypedef int64_t npy_datetime + +cdef extern from "numpy/ndarraytypes.h": + ctypedef struct PyArray_DatetimeMetaData: + NPY_DATETIMEUNIT base + int64_t num + + ctypedef struct npy_datetimestruct: + int64_t year + int32_t month, day, hour, min, sec, us, ps, as + + # Iterator API added in v1.6 + # + # These don't match the definition in the C API because Cython can't wrap + # function pointers that return functions. + # https://github.com/cython/cython/issues/6720 + ctypedef int (*NpyIter_IterNextFunc "NpyIter_IterNextFunc *")(NpyIter* it) noexcept nogil + ctypedef void (*NpyIter_GetMultiIndexFunc "NpyIter_GetMultiIndexFunc *")(NpyIter* it, npy_intp* outcoords) noexcept nogil + +cdef extern from "numpy/arrayscalars.h": + + # abstract types + ctypedef class numpy.generic [object PyObject]: + pass + ctypedef class numpy.number [object PyObject]: + pass + ctypedef class numpy.integer [object PyObject]: + pass + ctypedef class numpy.signedinteger [object PyObject]: + pass + ctypedef class numpy.unsignedinteger [object PyObject]: + pass + ctypedef class numpy.inexact [object PyObject]: + pass + ctypedef class numpy.floating [object PyObject]: + pass + ctypedef class numpy.complexfloating [object PyObject]: + pass + ctypedef class numpy.flexible [object PyObject]: + pass + ctypedef class numpy.character [object PyObject]: + pass + + ctypedef struct PyDatetimeScalarObject: + # PyObject_HEAD + npy_datetime obval + PyArray_DatetimeMetaData obmeta + + ctypedef struct PyTimedeltaScalarObject: + # PyObject_HEAD + npy_timedelta obval + PyArray_DatetimeMetaData obmeta + + ctypedef enum NPY_DATETIMEUNIT: + NPY_FR_Y + NPY_FR_M + NPY_FR_W + NPY_FR_D + NPY_FR_B + NPY_FR_h + NPY_FR_m + NPY_FR_s + NPY_FR_ms + NPY_FR_us + NPY_FR_ns + NPY_FR_ps + NPY_FR_fs + NPY_FR_as + NPY_FR_GENERIC + + +cdef extern from "numpy/arrayobject.h": + # These are part of the C-API defined in `__multiarray_api.h` + + # NumPy internal definitions in datetime_strings.c: + int get_datetime_iso_8601_strlen "NpyDatetime_GetDatetimeISO8601StrLen" ( + int local, NPY_DATETIMEUNIT base) + int make_iso_8601_datetime "NpyDatetime_MakeISO8601Datetime" ( + npy_datetimestruct *dts, char *outstr, npy_intp outlen, + int local, int utc, NPY_DATETIMEUNIT base, int tzoffset, + NPY_CASTING casting) except -1 + + # NumPy internal definition in datetime.c: + # May return 1 to indicate that object does not appear to be a datetime + # (returns 0 on success). + int convert_pydatetime_to_datetimestruct "NpyDatetime_ConvertPyDateTimeToDatetimeStruct" ( + PyObject *obj, npy_datetimestruct *out, + NPY_DATETIMEUNIT *out_bestunit, int apply_tzinfo) except -1 + int convert_datetime64_to_datetimestruct "NpyDatetime_ConvertDatetime64ToDatetimeStruct" ( + PyArray_DatetimeMetaData *meta, npy_datetime dt, + npy_datetimestruct *out) except -1 + int convert_datetimestruct_to_datetime64 "NpyDatetime_ConvertDatetimeStructToDatetime64"( + PyArray_DatetimeMetaData *meta, const npy_datetimestruct *dts, + npy_datetime *out) except -1 + + +# +# ufunc API +# + +cdef extern from "numpy/ufuncobject.h": + + ctypedef void (*PyUFuncGenericFunction) (char **, npy_intp *, npy_intp *, void *) + + ctypedef class numpy.ufunc [object PyUFuncObject, check_size ignore]: + cdef: + int nin, nout, nargs + int identity + PyUFuncGenericFunction *functions + void **data + int ntypes + int check_return + char *name + char *types + char *doc + void *ptr + PyObject *obj + PyObject *userloops + + cdef enum: + PyUFunc_Zero + PyUFunc_One + PyUFunc_None + # deprecated + UFUNC_FPE_DIVIDEBYZERO + UFUNC_FPE_OVERFLOW + UFUNC_FPE_UNDERFLOW + UFUNC_FPE_INVALID + # use these instead + NPY_FPE_DIVIDEBYZERO + NPY_FPE_OVERFLOW + NPY_FPE_UNDERFLOW + NPY_FPE_INVALID + + object PyUFunc_FromFuncAndData(PyUFuncGenericFunction *, + void **, char *, int, int, int, int, char *, char *, int) + int PyUFunc_RegisterLoopForType(ufunc, int, + PyUFuncGenericFunction, int *, void *) except -1 + void PyUFunc_f_f_As_d_d \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_d_d \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_f_f \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_g_g \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_F_F_As_D_D \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_F_F \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_D_D \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_G_G \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_O_O \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_ff_f_As_dd_d \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_ff_f \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_dd_d \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_gg_g \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_FF_F_As_DD_D \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_DD_D \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_FF_F \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_GG_G \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_OO_O \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_O_O_method \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_OO_O_method \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_On_Om \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_clearfperr() + int PyUFunc_getfperr() + int PyUFunc_ReplaceLoopBySignature \ + (ufunc, PyUFuncGenericFunction, int *, PyUFuncGenericFunction *) + object PyUFunc_FromFuncAndDataAndSignature \ + (PyUFuncGenericFunction *, void **, char *, int, int, int, + int, char *, char *, int, char *) + + int _import_umath() except -1 + +cdef inline void set_array_base(ndarray arr, object base): + Py_INCREF(base) # important to do this before stealing the reference below! + PyArray_SetBaseObject(arr, base) + +cdef inline object get_array_base(ndarray arr): + base = PyArray_BASE(arr) + if base is NULL: + return None + return base + +# Versions of the import_* functions which are more suitable for +# Cython code. +cdef inline int import_array() except -1: + try: + __pyx_import_array() + except Exception: + raise ImportError("numpy._core.multiarray failed to import") + +cdef inline int import_umath() except -1: + try: + _import_umath() + except Exception: + raise ImportError("numpy._core.umath failed to import") + +cdef inline int import_ufunc() except -1: + try: + _import_umath() + except Exception: + raise ImportError("numpy._core.umath failed to import") + + +cdef inline bint is_timedelta64_object(object obj): + """ + Cython equivalent of `isinstance(obj, np.timedelta64)` + + Parameters + ---------- + obj : object + + Returns + ------- + bool + """ + return PyObject_TypeCheck(obj, &PyTimedeltaArrType_Type) + + +cdef inline bint is_datetime64_object(object obj): + """ + Cython equivalent of `isinstance(obj, np.datetime64)` + + Parameters + ---------- + obj : object + + Returns + ------- + bool + """ + return PyObject_TypeCheck(obj, &PyDatetimeArrType_Type) + + +cdef inline npy_datetime get_datetime64_value(object obj) nogil: + """ + returns the int64 value underlying scalar numpy datetime64 object + + Note that to interpret this as a datetime, the corresponding unit is + also needed. That can be found using `get_datetime64_unit`. + """ + return (obj).obval + + +cdef inline npy_timedelta get_timedelta64_value(object obj) nogil: + """ + returns the int64 value underlying scalar numpy timedelta64 object + """ + return (obj).obval + + +cdef inline NPY_DATETIMEUNIT get_datetime64_unit(object obj) nogil: + """ + returns the unit part of the dtype for a numpy datetime64 object. + """ + return (obj).obmeta.base + + +cdef extern from "numpy/arrayobject.h": + + ctypedef struct NpyIter: + pass + + cdef enum: + NPY_FAIL + NPY_SUCCEED + + cdef enum: + # Track an index representing C order + NPY_ITER_C_INDEX + # Track an index representing Fortran order + NPY_ITER_F_INDEX + # Track a multi-index + NPY_ITER_MULTI_INDEX + # User code external to the iterator does the 1-dimensional innermost loop + NPY_ITER_EXTERNAL_LOOP + # Convert all the operands to a common data type + NPY_ITER_COMMON_DTYPE + # Operands may hold references, requiring API access during iteration + NPY_ITER_REFS_OK + # Zero-sized operands should be permitted, iteration checks IterSize for 0 + NPY_ITER_ZEROSIZE_OK + # Permits reductions (size-0 stride with dimension size > 1) + NPY_ITER_REDUCE_OK + # Enables sub-range iteration + NPY_ITER_RANGED + # Enables buffering + NPY_ITER_BUFFERED + # When buffering is enabled, grows the inner loop if possible + NPY_ITER_GROWINNER + # Delay allocation of buffers until first Reset* call + NPY_ITER_DELAY_BUFALLOC + # When NPY_KEEPORDER is specified, disable reversing negative-stride axes + NPY_ITER_DONT_NEGATE_STRIDES + NPY_ITER_COPY_IF_OVERLAP + # The operand will be read from and written to + NPY_ITER_READWRITE + # The operand will only be read from + NPY_ITER_READONLY + # The operand will only be written to + NPY_ITER_WRITEONLY + # The operand's data must be in native byte order + NPY_ITER_NBO + # The operand's data must be aligned + NPY_ITER_ALIGNED + # The operand's data must be contiguous (within the inner loop) + NPY_ITER_CONTIG + # The operand may be copied to satisfy requirements + NPY_ITER_COPY + # The operand may be copied with WRITEBACKIFCOPY to satisfy requirements + NPY_ITER_UPDATEIFCOPY + # Allocate the operand if it is NULL + NPY_ITER_ALLOCATE + # If an operand is allocated, don't use any subtype + NPY_ITER_NO_SUBTYPE + # This is a virtual array slot, operand is NULL but temporary data is there + NPY_ITER_VIRTUAL + # Require that the dimension match the iterator dimensions exactly + NPY_ITER_NO_BROADCAST + # A mask is being used on this array, affects buffer -> array copy + NPY_ITER_WRITEMASKED + # This array is the mask for all WRITEMASKED operands + NPY_ITER_ARRAYMASK + # Assume iterator order data access for COPY_IF_OVERLAP + NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE + + # construction and destruction functions + NpyIter* NpyIter_New(ndarray arr, npy_uint32 flags, NPY_ORDER order, + NPY_CASTING casting, dtype datatype) except NULL + NpyIter* NpyIter_MultiNew(npy_intp nop, PyArrayObject** op, npy_uint32 flags, + NPY_ORDER order, NPY_CASTING casting, npy_uint32* + op_flags, PyArray_Descr** op_dtypes) except NULL + NpyIter* NpyIter_AdvancedNew(npy_intp nop, PyArrayObject** op, + npy_uint32 flags, NPY_ORDER order, + NPY_CASTING casting, npy_uint32* op_flags, + PyArray_Descr** op_dtypes, int oa_ndim, + int** op_axes, const npy_intp* itershape, + npy_intp buffersize) except NULL + NpyIter* NpyIter_Copy(NpyIter* it) except NULL + int NpyIter_RemoveAxis(NpyIter* it, int axis) except NPY_FAIL + int NpyIter_RemoveMultiIndex(NpyIter* it) except NPY_FAIL + int NpyIter_EnableExternalLoop(NpyIter* it) except NPY_FAIL + int NpyIter_Deallocate(NpyIter* it) except NPY_FAIL + int NpyIter_Reset(NpyIter* it, char** errmsg) except NPY_FAIL + int NpyIter_ResetToIterIndexRange(NpyIter* it, npy_intp istart, + npy_intp iend, char** errmsg) except NPY_FAIL + int NpyIter_ResetBasePointers(NpyIter* it, char** baseptrs, char** errmsg) except NPY_FAIL + int NpyIter_GotoMultiIndex(NpyIter* it, const npy_intp* multi_index) except NPY_FAIL + int NpyIter_GotoIndex(NpyIter* it, npy_intp index) except NPY_FAIL + npy_intp NpyIter_GetIterSize(NpyIter* it) nogil + npy_intp NpyIter_GetIterIndex(NpyIter* it) nogil + void NpyIter_GetIterIndexRange(NpyIter* it, npy_intp* istart, + npy_intp* iend) nogil + int NpyIter_GotoIterIndex(NpyIter* it, npy_intp iterindex) except NPY_FAIL + npy_bool NpyIter_HasDelayedBufAlloc(NpyIter* it) nogil + npy_bool NpyIter_HasExternalLoop(NpyIter* it) nogil + npy_bool NpyIter_HasMultiIndex(NpyIter* it) nogil + npy_bool NpyIter_HasIndex(NpyIter* it) nogil + npy_bool NpyIter_RequiresBuffering(NpyIter* it) nogil + npy_bool NpyIter_IsBuffered(NpyIter* it) nogil + npy_bool NpyIter_IsGrowInner(NpyIter* it) nogil + npy_intp NpyIter_GetBufferSize(NpyIter* it) nogil + int NpyIter_GetNDim(NpyIter* it) nogil + int NpyIter_GetNOp(NpyIter* it) nogil + npy_intp* NpyIter_GetAxisStrideArray(NpyIter* it, int axis) except NULL + int NpyIter_GetShape(NpyIter* it, npy_intp* outshape) nogil + PyArray_Descr** NpyIter_GetDescrArray(NpyIter* it) + PyArrayObject** NpyIter_GetOperandArray(NpyIter* it) + ndarray NpyIter_GetIterView(NpyIter* it, npy_intp i) + void NpyIter_GetReadFlags(NpyIter* it, char* outreadflags) + void NpyIter_GetWriteFlags(NpyIter* it, char* outwriteflags) + int NpyIter_CreateCompatibleStrides(NpyIter* it, npy_intp itemsize, + npy_intp* outstrides) except NPY_FAIL + npy_bool NpyIter_IsFirstVisit(NpyIter* it, int iop) nogil + # functions for iterating an NpyIter object + # + # These don't match the definition in the C API because Cython can't wrap + # function pointers that return functions. + NpyIter_IterNextFunc* NpyIter_GetIterNext(NpyIter* it, char** errmsg) except NULL + NpyIter_GetMultiIndexFunc* NpyIter_GetGetMultiIndex(NpyIter* it, + char** errmsg) except NULL + char** NpyIter_GetDataPtrArray(NpyIter* it) nogil + char** NpyIter_GetInitialDataPtrArray(NpyIter* it) nogil + npy_intp* NpyIter_GetIndexPtr(NpyIter* it) + npy_intp* NpyIter_GetInnerStrideArray(NpyIter* it) nogil + npy_intp* NpyIter_GetInnerLoopSizePtr(NpyIter* it) nogil + void NpyIter_GetInnerFixedStrideArray(NpyIter* it, npy_intp* outstrides) nogil + npy_bool NpyIter_IterationNeedsAPI(NpyIter* it) nogil + void NpyIter_DebugPrint(NpyIter* it) + +# NpyString API +cdef extern from "numpy/ndarraytypes.h": + ctypedef struct npy_string_allocator: + pass + + ctypedef struct npy_packed_static_string: + pass + + ctypedef struct npy_static_string: + size_t size + const char *buf + + ctypedef struct PyArray_StringDTypeObject: + PyArray_Descr base + PyObject *na_object + char coerce + char has_nan_na + char has_string_na + char array_owned + npy_static_string default_string + npy_static_string na_name + npy_string_allocator *allocator + +cdef extern from "numpy/arrayobject.h": + npy_string_allocator *NpyString_acquire_allocator(const PyArray_StringDTypeObject *descr) + void NpyString_acquire_allocators(size_t n_descriptors, PyArray_Descr *const descrs[], npy_string_allocator *allocators[]) + void NpyString_release_allocator(npy_string_allocator *allocator) + void NpyString_release_allocators(size_t length, npy_string_allocator *allocators[]) + int NpyString_load(npy_string_allocator *allocator, const npy_packed_static_string *packed_string, npy_static_string *unpacked_string) + int NpyString_pack_null(npy_string_allocator *allocator, npy_packed_static_string *packed_string) + int NpyString_pack(npy_string_allocator *allocator, npy_packed_static_string *packed_string, const char *buf, size_t size) diff --git a/local_packages/numpy/__init__.py b/local_packages/numpy/__init__.py new file mode 100644 index 0000000..840f642 --- /dev/null +++ b/local_packages/numpy/__init__.py @@ -0,0 +1,955 @@ +""" +NumPy +===== + +Provides + 1. An array object of arbitrary homogeneous items + 2. Fast mathematical operations over arrays + 3. Linear Algebra, Fourier Transforms, Random Number Generation + +How to use the documentation +---------------------------- +Documentation is available in two forms: docstrings provided +with the code, and a loose standing reference guide, available from +`the NumPy homepage `_. + +We recommend exploring the docstrings using +`IPython `_, an advanced Python shell with +TAB-completion and introspection capabilities. See below for further +instructions. + +The docstring examples assume that `numpy` has been imported as ``np``:: + + >>> import numpy as np + +Code snippets are indicated by three greater-than signs:: + + >>> x = 42 + >>> x = x + 1 + +Use the built-in ``help`` function to view a function's docstring:: + + >>> help(np.sort) + ... # doctest: +SKIP + +For some objects, ``np.info(obj)`` may provide additional help. This is +particularly true if you see the line "Help on ufunc object:" at the top +of the help() page. Ufuncs are implemented in C, not Python, for speed. +The native Python help() does not know how to view their help, but our +np.info() function does. + +Available subpackages +--------------------- +lib + Basic functions used by several sub-packages. +random + Core Random Tools +linalg + Core Linear Algebra Tools +fft + Core FFT routines +polynomial + Polynomial tools +testing + NumPy testing tools +distutils + Enhancements to distutils with support for + Fortran compilers support and more (for Python <= 3.11) + +Utilities +--------- +test + Run numpy unittests +show_config + Show numpy build configuration +__version__ + NumPy version string + +Viewing documentation using IPython +----------------------------------- + +Start IPython and import `numpy` usually under the alias ``np``: `import +numpy as np`. Then, directly past or use the ``%cpaste`` magic to paste +examples into the shell. To see which functions are available in `numpy`, +type ``np.`` (where ```` refers to the TAB key), or use +``np.*cos*?`` (where ```` refers to the ENTER key) to narrow +down the list. To view the docstring for a function, use +``np.cos?`` (to view the docstring) and ``np.cos??`` (to view +the source code). + +Copies vs. in-place operation +----------------------------- +Most of the functions in `numpy` return a copy of the array argument +(e.g., `np.sort`). In-place versions of these functions are often +available as array methods, i.e. ``x = np.array([1,2,3]); x.sort()``. +Exceptions to this rule are documented. + +""" + + +# start delvewheel patch +def _delvewheel_patch_1_11_2(): + import os + if os.path.isdir(libs_dir := os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, 'numpy.libs'))): + os.add_dll_directory(libs_dir) + + +_delvewheel_patch_1_11_2() +del _delvewheel_patch_1_11_2 +# end delvewheel patch + +import os +import sys +import warnings + +# If a version with git hash was stored, use that instead +from . import version +from ._expired_attrs_2_0 import __expired_attributes__ +from ._globals import _CopyMode, _NoValue +from .version import __version__ + +# We first need to detect if we're being called as part of the numpy setup +# procedure itself in a reliable manner. +try: + __NUMPY_SETUP__ # noqa: B018 +except NameError: + __NUMPY_SETUP__ = False + +if __NUMPY_SETUP__: + sys.stderr.write('Running from numpy source directory.\n') +else: + # Allow distributors to run custom init code before importing numpy._core + from . import _distributor_init + + try: + from numpy.__config__ import show_config + except ImportError as e: + if isinstance(e, ModuleNotFoundError) and e.name == "numpy.__config__": + # The __config__ module itself was not found, so add this info: + msg = """Error importing numpy: you should not try to import numpy from + its source directory; please exit the numpy source tree, and relaunch + your python interpreter from there.""" + raise ImportError(msg) from e + raise + + from . import _core + from ._core import ( + False_, + ScalarType, + True_, + abs, + absolute, + acos, + acosh, + add, + all, + allclose, + amax, + amin, + any, + arange, + arccos, + arccosh, + arcsin, + arcsinh, + arctan, + arctan2, + arctanh, + argmax, + argmin, + argpartition, + argsort, + argwhere, + around, + array, + array2string, + array_equal, + array_equiv, + array_repr, + array_str, + asanyarray, + asarray, + ascontiguousarray, + asfortranarray, + asin, + asinh, + astype, + atan, + atan2, + atanh, + atleast_1d, + atleast_2d, + atleast_3d, + base_repr, + binary_repr, + bitwise_and, + bitwise_count, + bitwise_invert, + bitwise_left_shift, + bitwise_not, + bitwise_or, + bitwise_right_shift, + bitwise_xor, + block, + bool, + bool_, + broadcast, + busday_count, + busday_offset, + busdaycalendar, + byte, + bytes_, + can_cast, + cbrt, + cdouble, + ceil, + character, + choose, + clip, + clongdouble, + complex64, + complex128, + complexfloating, + compress, + concat, + concatenate, + conj, + conjugate, + convolve, + copysign, + copyto, + correlate, + cos, + cosh, + count_nonzero, + cross, + csingle, + cumprod, + cumsum, + cumulative_prod, + cumulative_sum, + datetime64, + datetime_as_string, + datetime_data, + deg2rad, + degrees, + diagonal, + divide, + divmod, + dot, + double, + dtype, + e, + einsum, + einsum_path, + empty, + empty_like, + equal, + errstate, + euler_gamma, + exp, + exp2, + expm1, + fabs, + finfo, + flatiter, + flatnonzero, + flexible, + float16, + float32, + float64, + float_power, + floating, + floor, + floor_divide, + fmax, + fmin, + fmod, + format_float_positional, + format_float_scientific, + frexp, + from_dlpack, + frombuffer, + fromfile, + fromfunction, + fromiter, + frompyfunc, + fromstring, + full, + full_like, + gcd, + generic, + geomspace, + get_printoptions, + getbufsize, + geterr, + geterrcall, + greater, + greater_equal, + half, + heaviside, + hstack, + hypot, + identity, + iinfo, + indices, + inexact, + inf, + inner, + int8, + int16, + int32, + int64, + int_, + intc, + integer, + intp, + invert, + is_busday, + isclose, + isdtype, + isfinite, + isfortran, + isinf, + isnan, + isnat, + isscalar, + issubdtype, + lcm, + ldexp, + left_shift, + less, + less_equal, + lexsort, + linspace, + little_endian, + log, + log1p, + log2, + log10, + logaddexp, + logaddexp2, + logical_and, + logical_not, + logical_or, + logical_xor, + logspace, + long, + longdouble, + longlong, + matmul, + matrix_transpose, + matvec, + max, + maximum, + may_share_memory, + mean, + memmap, + min, + min_scalar_type, + minimum, + mod, + modf, + moveaxis, + multiply, + nan, + ndarray, + ndim, + nditer, + negative, + nested_iters, + newaxis, + nextafter, + nonzero, + not_equal, + number, + object_, + ones, + ones_like, + outer, + partition, + permute_dims, + pi, + positive, + pow, + power, + printoptions, + prod, + promote_types, + ptp, + put, + putmask, + rad2deg, + radians, + ravel, + recarray, + reciprocal, + record, + remainder, + repeat, + require, + reshape, + resize, + result_type, + right_shift, + rint, + roll, + rollaxis, + round, + sctypeDict, + searchsorted, + set_printoptions, + setbufsize, + seterr, + seterrcall, + shape, + shares_memory, + short, + sign, + signbit, + signedinteger, + sin, + single, + sinh, + size, + sort, + spacing, + sqrt, + square, + squeeze, + stack, + std, + str_, + subtract, + sum, + swapaxes, + take, + tan, + tanh, + tensordot, + timedelta64, + trace, + transpose, + true_divide, + trunc, + typecodes, + ubyte, + ufunc, + uint, + uint8, + uint16, + uint32, + uint64, + uintc, + uintp, + ulong, + ulonglong, + unsignedinteger, + unstack, + ushort, + var, + vdot, + vecdot, + vecmat, + void, + vstack, + where, + zeros, + zeros_like, + ) + + # NOTE: It's still under discussion whether these aliases + # should be removed. + for ta in ["float96", "float128", "complex192", "complex256"]: + try: + globals()[ta] = getattr(_core, ta) + except AttributeError: + pass + del ta + + from . import lib, matrixlib as _mat + from .lib import scimath as emath + from .lib._arraypad_impl import pad + from .lib._arraysetops_impl import ( + ediff1d, + intersect1d, + isin, + setdiff1d, + setxor1d, + union1d, + unique, + unique_all, + unique_counts, + unique_inverse, + unique_values, + ) + from .lib._function_base_impl import ( + angle, + append, + asarray_chkfinite, + average, + bartlett, + bincount, + blackman, + copy, + corrcoef, + cov, + delete, + diff, + digitize, + extract, + flip, + gradient, + hamming, + hanning, + i0, + insert, + interp, + iterable, + kaiser, + median, + meshgrid, + percentile, + piecewise, + place, + quantile, + rot90, + select, + sinc, + sort_complex, + trapezoid, + trim_zeros, + unwrap, + vectorize, + ) + from .lib._histograms_impl import histogram, histogram_bin_edges, histogramdd + from .lib._index_tricks_impl import ( + c_, + diag_indices, + diag_indices_from, + fill_diagonal, + index_exp, + ix_, + mgrid, + ndenumerate, + ndindex, + ogrid, + r_, + ravel_multi_index, + s_, + unravel_index, + ) + from .lib._nanfunctions_impl import ( + nanargmax, + nanargmin, + nancumprod, + nancumsum, + nanmax, + nanmean, + nanmedian, + nanmin, + nanpercentile, + nanprod, + nanquantile, + nanstd, + nansum, + nanvar, + ) + from .lib._npyio_impl import ( + fromregex, + genfromtxt, + load, + loadtxt, + packbits, + save, + savetxt, + savez, + savez_compressed, + unpackbits, + ) + from .lib._polynomial_impl import ( + poly, + poly1d, + polyadd, + polyder, + polydiv, + polyfit, + polyint, + polymul, + polysub, + polyval, + roots, + ) + from .lib._shape_base_impl import ( + apply_along_axis, + apply_over_axes, + array_split, + column_stack, + dsplit, + dstack, + expand_dims, + hsplit, + kron, + put_along_axis, + row_stack, + split, + take_along_axis, + tile, + vsplit, + ) + from .lib._stride_tricks_impl import ( + broadcast_arrays, + broadcast_shapes, + broadcast_to, + ) + from .lib._twodim_base_impl import ( + diag, + diagflat, + eye, + fliplr, + flipud, + histogram2d, + mask_indices, + tri, + tril, + tril_indices, + tril_indices_from, + triu, + triu_indices, + triu_indices_from, + vander, + ) + from .lib._type_check_impl import ( + common_type, + imag, + iscomplex, + iscomplexobj, + isreal, + isrealobj, + mintypecode, + nan_to_num, + real, + real_if_close, + typename, + ) + from .lib._ufunclike_impl import fix, isneginf, isposinf + from .lib._utils_impl import get_include, info, show_runtime + from .matrixlib import asmatrix, bmat, matrix + + # public submodules are imported lazily, therefore are accessible from + # __getattr__. Note that `distutils` (deprecated) and `array_api` + # (experimental label) are not added here, because `from numpy import *` + # must not raise any warnings - that's too disruptive. + __numpy_submodules__ = { + "linalg", "fft", "dtypes", "random", "polynomial", "ma", + "exceptions", "lib", "ctypeslib", "testing", "typing", + "f2py", "test", "rec", "char", "core", "strings", + } + + # We build warning messages for former attributes + _msg = ( + "module 'numpy' has no attribute '{n}'.\n" + "`np.{n}` was a deprecated alias for the builtin `{n}`. " + "To avoid this error in existing code, use `{n}` by itself. " + "Doing this will not modify any behavior and is safe. {extended_msg}\n" + "The aliases was originally deprecated in NumPy 1.20; for more " + "details and guidance see the original release note at:\n" + " https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations") + + _specific_msg = ( + "If you specifically wanted the numpy scalar type, use `np.{}` here.") + + _int_extended_msg = ( + "When replacing `np.{}`, you may wish to use e.g. `np.int64` " + "or `np.int32` to specify the precision. If you wish to review " + "your current use, check the release note link for " + "additional information.") + + _type_info = [ + ("object", ""), # The NumPy scalar only exists by name. + ("float", _specific_msg.format("float64")), + ("complex", _specific_msg.format("complex128")), + ("str", _specific_msg.format("str_")), + ("int", _int_extended_msg.format("int"))] + + __former_attrs__ = { + n: _msg.format(n=n, extended_msg=extended_msg) + for n, extended_msg in _type_info + } + + # Some of these could be defined right away, but most were aliases to + # the Python objects and only removed in NumPy 1.24. Defining them should + # probably wait for NumPy 1.26 or 2.0. + # When defined, these should possibly not be added to `__all__` to avoid + # import with `from numpy import *`. + __future_scalars__ = {"str", "bytes", "object"} + + __array_api_version__ = "2024.12" + + from ._array_api_info import __array_namespace_info__ + + __all__ = list( + __numpy_submodules__ | + set(_core.__all__) | + set(_mat.__all__) | + set(lib._histograms_impl.__all__) | + set(lib._nanfunctions_impl.__all__) | + set(lib._function_base_impl.__all__) | + set(lib._twodim_base_impl.__all__) | + set(lib._shape_base_impl.__all__) | + set(lib._type_check_impl.__all__) | + set(lib._arraysetops_impl.__all__) | + set(lib._ufunclike_impl.__all__) | + set(lib._arraypad_impl.__all__) | + set(lib._utils_impl.__all__) | + set(lib._stride_tricks_impl.__all__) | + set(lib._polynomial_impl.__all__) | + set(lib._npyio_impl.__all__) | + set(lib._index_tricks_impl.__all__) | + {"emath", "show_config", "__version__", "__array_namespace_info__"} + ) + + # Filter out Cython harmless warnings + warnings.filterwarnings("ignore", message="numpy.dtype size changed") + warnings.filterwarnings("ignore", message="numpy.ufunc size changed") + warnings.filterwarnings("ignore", message="numpy.ndarray size changed") + + def __getattr__(attr): + # Warn for expired attributes + import warnings + + if attr == "linalg": + import numpy.linalg as linalg + return linalg + elif attr == "fft": + import numpy.fft as fft + return fft + elif attr == "dtypes": + import numpy.dtypes as dtypes + return dtypes + elif attr == "random": + import numpy.random as random + return random + elif attr == "polynomial": + import numpy.polynomial as polynomial + return polynomial + elif attr == "ma": + import numpy.ma as ma + return ma + elif attr == "ctypeslib": + import numpy.ctypeslib as ctypeslib + return ctypeslib + elif attr == "exceptions": + import numpy.exceptions as exceptions + return exceptions + elif attr == "testing": + import numpy.testing as testing + return testing + elif attr == "matlib": + import numpy.matlib as matlib + return matlib + elif attr == "f2py": + import numpy.f2py as f2py + return f2py + elif attr == "typing": + import numpy.typing as typing + return typing + elif attr == "rec": + import numpy.rec as rec + return rec + elif attr == "char": + import numpy.char as char + return char + elif attr == "array_api": + raise AttributeError("`numpy.array_api` is not available from " + "numpy 2.0 onwards", name=None) + elif attr == "core": + import numpy.core as core + return core + elif attr == "strings": + import numpy.strings as strings + return strings + elif attr == "distutils": + if 'distutils' in __numpy_submodules__: + import numpy.distutils as distutils + return distutils + else: + raise AttributeError("`numpy.distutils` is not available from " + "Python 3.12 onwards", name=None) + + if attr in __future_scalars__: + # And future warnings for those that will change, but also give + # the AttributeError + warnings.warn( + f"In the future `np.{attr}` will be defined as the " + "corresponding NumPy scalar.", FutureWarning, stacklevel=2) + + if attr in __former_attrs__: + raise AttributeError(__former_attrs__[attr], name=None) + + if attr in __expired_attributes__: + raise AttributeError( + f"`np.{attr}` was removed in the NumPy 2.0 release. " + f"{__expired_attributes__[attr]}", + name=None + ) + + if attr == "chararray": + warnings.warn( + "`np.chararray` is deprecated and will be removed from " + "the main namespace in the future. Use an array with a string " + "or bytes dtype instead.", DeprecationWarning, stacklevel=2) + import numpy.char as char + return char.chararray + + raise AttributeError(f"module {__name__!r} has no attribute {attr!r}") + + def __dir__(): + public_symbols = ( + globals().keys() | __numpy_submodules__ + ) + public_symbols -= { + "matrixlib", "matlib", "tests", "conftest", "version", + "distutils", "array_api" + } + return list(public_symbols) + + # Pytest testing + from numpy._pytesttester import PytestTester + test = PytestTester(__name__) + del PytestTester + + def _sanity_check(): + """ + Quick sanity checks for common bugs caused by environment. + There are some cases e.g. with wrong BLAS ABI that cause wrong + results under specific runtime conditions that are not necessarily + achieved during test suite runs, and it is useful to catch those early. + + See https://github.com/numpy/numpy/issues/8577 and other + similar bug reports. + + """ + try: + x = ones(2, dtype=float32) + if not abs(x.dot(x) - float32(2.0)) < 1e-5: + raise AssertionError + except AssertionError: + msg = ("The current Numpy installation ({!r}) fails to " + "pass simple sanity checks. This can be caused for example " + "by incorrect BLAS library being linked in, or by mixing " + "package managers (pip, conda, apt, ...). Search closed " + "numpy issues for similar problems.") + raise RuntimeError(msg.format(__file__)) from None + + _sanity_check() + del _sanity_check + + def _mac_os_check(): + """ + Quick Sanity check for Mac OS look for accelerate build bugs. + Testing numpy polyfit calls init_dgelsd(LAPACK) + """ + try: + c = array([3., 2., 1.]) + x = linspace(0, 2, 5) + y = polyval(c, x) + _ = polyfit(x, y, 2, cov=True) + except ValueError: + pass + + if sys.platform == "darwin": + from . import exceptions + with warnings.catch_warnings(record=True) as w: + _mac_os_check() + # Throw runtime error, if the test failed + # Check for warning and report the error_message + if len(w) > 0: + for _wn in w: + if _wn.category is exceptions.RankWarning: + # Ignore other warnings, they may not be relevant (see gh-25433) + error_message = ( + f"{_wn.category.__name__}: {_wn.message}" + ) + msg = ( + "Polyfit sanity test emitted a warning, most likely due " + "to using a buggy Accelerate backend." + "\nIf you compiled yourself, more information is available at:" # noqa: E501 + "\nhttps://numpy.org/devdocs/building/index.html" + "\nOtherwise report this to the vendor " + f"that provided NumPy.\n\n{error_message}\n") + raise RuntimeError(msg) + del _wn + del w + del _mac_os_check + + def blas_fpe_check(): + # Check if BLAS adds spurious FPEs, mostly seen on M4 arms with Accelerate. + with errstate(all='raise'): + x = ones((20, 20)) + try: + x @ x + except FloatingPointError: + res = _core._multiarray_umath._blas_supports_fpe(False) + if res: # res was not modified (hardcoded to True for now) + warnings.warn( + "Spurious warnings given by blas but suppression not " + "set up on this platform. Please open a NumPy issue.", + UserWarning, stacklevel=2) + + blas_fpe_check() + del blas_fpe_check + + def hugepage_setup(): + """ + We usually use madvise hugepages support, but on some old kernels it + is slow and thus better avoided. Specifically kernel version 4.6 + had a bug fix which probably fixed this: + https://github.com/torvalds/linux/commit/7cf91a98e607c2f935dbcc177d70011e95b8faff + """ + use_hugepage = os.environ.get("NUMPY_MADVISE_HUGEPAGE", None) + if sys.platform == "linux" and use_hugepage is None: + # If there is an issue with parsing the kernel version, + # set use_hugepage to 0. Usage of LooseVersion will handle + # the kernel version parsing better, but avoided since it + # will increase the import time. + # See: #16679 for related discussion. + try: + use_hugepage = 1 + kernel_version = os.uname().release.split(".")[:2] + kernel_version = tuple(int(v) for v in kernel_version) + if kernel_version < (4, 6): + use_hugepage = 0 + except ValueError: + use_hugepage = 0 + elif use_hugepage is None: + # This is not Linux, so it should not matter, just enable anyway + use_hugepage = 1 + else: + use_hugepage = int(use_hugepage) + return use_hugepage + + # Note that this will currently only make a difference on Linux + _core.multiarray._set_madvise_hugepage(hugepage_setup()) + del hugepage_setup + + # Give a warning if NumPy is reloaded or imported on a sub-interpreter + # We do this from python, since the C-module may not be reloaded and + # it is tidier organized. + _core.multiarray._multiarray_umath._reload_guard() + + # TODO: Remove the environment variable entirely now that it is "weak" + if (os.environ.get("NPY_PROMOTION_STATE", "weak") != "weak"): + warnings.warn( + "NPY_PROMOTION_STATE was a temporary feature for NumPy 2.0 " + "transition and is ignored after NumPy 2.2.", + UserWarning, stacklevel=2) + + # Tell PyInstaller where to find hook-numpy.py + def _pyinstaller_hooks_dir(): + from pathlib import Path + return [str(Path(__file__).with_name("_pyinstaller").resolve())] + + +# Remove symbols imported for internal use +del os, sys, warnings diff --git a/local_packages/numpy/__init__.pyi b/local_packages/numpy/__init__.pyi new file mode 100644 index 0000000..28bbe31 --- /dev/null +++ b/local_packages/numpy/__init__.pyi @@ -0,0 +1,6202 @@ +# ruff: noqa: I001 +import builtins +import sys +import mmap +import ctypes as ct +import array as _array +import datetime as dt +import inspect +from abc import abstractmethod +from types import EllipsisType, ModuleType, TracebackType, MappingProxyType, GenericAlias +from decimal import Decimal +from fractions import Fraction +from uuid import UUID + +import numpy as np +from numpy.__config__ import show as show_config +from numpy._pytesttester import PytestTester +from numpy._core._internal import _ctypes + +from numpy._typing import ( # type: ignore[deprecated] + # Arrays + ArrayLike, + NDArray, + _SupportsArray, + _NestedSequence, + _ArrayLike, + _ArrayLikeBool_co, + _ArrayLikeUInt_co, + _ArrayLikeInt, + _ArrayLikeInt_co, + _ArrayLikeFloat64_co, + _ArrayLikeFloat_co, + _ArrayLikeComplex128_co, + _ArrayLikeComplex_co, + _ArrayLikeNumber_co, + _ArrayLikeObject_co, + _ArrayLikeBytes_co, + _ArrayLikeStr_co, + _ArrayLikeString_co, + _ArrayLikeTD64_co, + _ArrayLikeDT64_co, + # DTypes + DTypeLike, + _DTypeLike, + _DTypeLikeVoid, + _VoidDTypeLike, + # Shapes + _AnyShape, + _Shape, + _ShapeLike, + # Scalars + _CharLike_co, + _IntLike_co, + _FloatLike_co, + _TD64Like_co, + _NumberLike_co, + _ScalarLike_co, + # `number` precision + NBitBase, + # NOTE: Do not remove the extended precision bit-types even if seemingly unused; + # they're used by the mypy plugin + _128Bit, + _96Bit, + _64Bit, + _32Bit, + _16Bit, + _8Bit, + _NBitByte, + _NBitShort, + _NBitIntC, + _NBitIntP, + _NBitLong, + _NBitLongLong, + _NBitHalf, + _NBitSingle, + _NBitDouble, + _NBitLongDouble, + # Character codes + _BoolCodes, + _UInt8Codes, + _UInt16Codes, + _UInt32Codes, + _UInt64Codes, + _Int8Codes, + _Int16Codes, + _Int32Codes, + _Int64Codes, + _Float16Codes, + _Float32Codes, + _Float64Codes, + _Complex64Codes, + _Complex128Codes, + _ByteCodes, + _ShortCodes, + _IntCCodes, + _IntPCodes, + _LongCodes, + _LongLongCodes, + _UByteCodes, + _UShortCodes, + _UIntCCodes, + _UIntPCodes, + _ULongCodes, + _ULongLongCodes, + _HalfCodes, + _SingleCodes, + _DoubleCodes, + _LongDoubleCodes, + _CSingleCodes, + _CDoubleCodes, + _CLongDoubleCodes, + _DT64Codes, + _TD64Codes, + _StrCodes, + _BytesCodes, + _VoidCodes, + _ObjectCodes, + _StringCodes, + _UnsignedIntegerCodes, + _SignedIntegerCodes, + _IntegerCodes, + _FloatingCodes, + _ComplexFloatingCodes, + _InexactCodes, + _CharacterCodes, + # Ufuncs + _UFunc_Nin1_Nout1, + _UFunc_Nin2_Nout1, + _UFunc_Nin1_Nout2, + _UFunc_Nin2_Nout2, + _GUFunc_Nin2_Nout1, +) + +# NOTE: Numpy's mypy plugin is used for removing the types unavailable to the specific platform +from numpy._typing._extended_precision import ( + float96, + float128, + complex192, + complex256, +) + +from numpy._array_api_info import __array_namespace_info__ + +from collections.abc import ( + Callable, + Iterable, + Iterator, + Mapping, + Sequence, +) + +if sys.version_info >= (3, 12): + from collections.abc import Buffer as _SupportsBuffer +else: + _SupportsBuffer: TypeAlias = ( + bytes + | bytearray + | memoryview + | _array.array[Any] + | mmap.mmap + | NDArray[Any] + | generic + ) + +from typing import ( + Any, + ClassVar, + Final, + Generic, + Literal as L, + LiteralString, + Never, + NoReturn, + Protocol, + Self, + SupportsComplex, + SupportsFloat, + SupportsInt, + SupportsIndex, + TypeAlias, + TypedDict, + final, + overload, + type_check_only, +) + +# NOTE: `typing_extensions` and `_typeshed` are always available in `.pyi` stubs, even +# if not available at runtime. This is because the `typeshed` stubs for the standard +# library include `typing_extensions` stubs: +# https://github.com/python/typeshed/blob/main/stdlib/typing_extensions.pyi +from _typeshed import Incomplete, StrOrBytesPath, SupportsFlush, SupportsLenAndGetItem, SupportsWrite +from typing_extensions import CapsuleType, TypeVar, deprecated, override + +from numpy import ( + char, + core, + ctypeslib, + dtypes, + exceptions, + f2py, + fft, + lib, + linalg, + ma, + polynomial, + random, + rec, + strings, + testing, + typing, +) + +# available through `__getattr__`, but not in `__all__` or `__dir__` +from numpy import ( + __config__ as __config__, + matlib as matlib, + matrixlib as matrixlib, + version as version, +) +if sys.version_info < (3, 12): + from numpy import distutils as distutils + +from numpy._core.records import ( + record, + recarray, +) + +from numpy._core.function_base import ( + linspace, + logspace, + geomspace, +) + +from numpy._core.fromnumeric import ( + take, + reshape, + choose, + repeat, + put, + swapaxes, + transpose, + matrix_transpose, + partition, + argpartition, + sort, + argsort, + argmax, + argmin, + searchsorted, + resize, + squeeze, + diagonal, + trace, + ravel, + nonzero, + shape, + compress, + clip, + sum, + all, + any, + cumsum, + cumulative_sum, + ptp, + max, + min, + amax, + amin, + prod, + cumprod, + cumulative_prod, + ndim, + size, + around, + round, + mean, + std, + var, +) + +from numpy._core._asarray import ( + require, +) + +from numpy._core._type_aliases import ( + sctypeDict, +) + +from numpy._core._ufunc_config import ( + seterr, + geterr, + setbufsize, + getbufsize, + seterrcall, + geterrcall, + errstate, +) + +from numpy._core.arrayprint import ( + set_printoptions, + get_printoptions, + array2string, + format_float_scientific, + format_float_positional, + array_repr, + array_str, + printoptions, +) + +from numpy._core.einsumfunc import ( + einsum, + einsum_path, +) +from numpy._core.getlimits import ( + finfo, + iinfo, +) + +from numpy._core.multiarray import ( + array, + empty_like, + empty, + zeros, + concatenate, + inner, + where, + lexsort, + can_cast, + min_scalar_type, + result_type, + dot, + vdot, + bincount, + copyto, + putmask, + packbits, + unpackbits, + shares_memory, + may_share_memory, + asarray, + asanyarray, + ascontiguousarray, + asfortranarray, + arange, + busday_count, + busday_offset, + datetime_as_string, + datetime_data, + frombuffer, + fromfile, + fromiter, + is_busday, + promote_types, + fromstring, + frompyfunc, + nested_iters, + flagsobj, +) + +from numpy._core.numeric import ( + zeros_like, + ones, + ones_like, + full, + full_like, + count_nonzero, + isfortran, + argwhere, + flatnonzero, + correlate, + convolve, + outer, + tensordot, + roll, + rollaxis, + moveaxis, + cross, + indices, + fromfunction, + isscalar, + binary_repr, + base_repr, + identity, + allclose, + isclose, + array_equal, + array_equiv, + astype, +) + +from numpy._core.numerictypes import ( + isdtype, + issubdtype, + ScalarType, + typecodes, +) + +from numpy._core.shape_base import ( + atleast_1d, + atleast_2d, + atleast_3d, + block, + hstack, + stack, + vstack, + unstack, +) + +from ._expired_attrs_2_0 import __expired_attributes__ as __expired_attributes__ +from ._globals import _CopyMode as _CopyMode +from ._globals import _NoValue as _NoValue, _NoValueType + +from numpy.lib import ( + scimath as emath, +) + +from numpy.lib._arraypad_impl import ( + pad, +) + +from numpy.lib._arraysetops_impl import ( + ediff1d, + intersect1d, + isin, + setdiff1d, + setxor1d, + union1d, + unique, + unique_all, + unique_counts, + unique_inverse, + unique_values, +) + +from numpy.lib._function_base_impl import ( # type: ignore[deprecated] + select, + piecewise, + trim_zeros, + copy, + iterable, + percentile, + diff, + gradient, + angle, + unwrap, + sort_complex, + flip, + rot90, + extract, + place, + asarray_chkfinite, + average, + digitize, + cov, + corrcoef, + median, + sinc, + hamming, + hanning, + bartlett, + blackman, + kaiser, + trapezoid, + i0, + meshgrid, + delete, + insert, + append, + interp, + quantile, + vectorize, +) + +from numpy.lib._histograms_impl import ( + histogram_bin_edges, + histogram, + histogramdd, +) + +from numpy.lib._index_tricks_impl import ( + ndenumerate, + ndindex, + ravel_multi_index, + unravel_index, + mgrid, + ogrid, + r_, + c_, + s_, + index_exp, + ix_, + fill_diagonal, + diag_indices, + diag_indices_from, +) + +from numpy.lib._nanfunctions_impl import ( + nansum, + nanmax, + nanmin, + nanargmax, + nanargmin, + nanmean, + nanmedian, + nanpercentile, + nanvar, + nanstd, + nanprod, + nancumsum, + nancumprod, + nanquantile, +) + +from numpy.lib._npyio_impl import ( + savetxt, + loadtxt, + genfromtxt, + load, + save, + savez, + savez_compressed, + fromregex, +) + +from numpy.lib._polynomial_impl import ( + poly, + roots, + polyint, + polyder, + polyadd, + polysub, + polymul, + polydiv, + polyval, + polyfit, +) + +from numpy.lib._shape_base_impl import ( # type: ignore[deprecated] + column_stack, + row_stack, + dstack, + array_split, + split, + hsplit, + vsplit, + dsplit, + apply_over_axes, + expand_dims, + apply_along_axis, + kron, + tile, + take_along_axis, + put_along_axis, +) + +from numpy.lib._stride_tricks_impl import ( + broadcast_to, + broadcast_arrays, + broadcast_shapes, +) + +from numpy.lib._twodim_base_impl import ( + diag, + diagflat, + eye, + fliplr, + flipud, + tri, + triu, + tril, + vander, + histogram2d, + mask_indices, + tril_indices, + tril_indices_from, + triu_indices, + triu_indices_from, +) + +from numpy.lib._type_check_impl import ( + mintypecode, + real, + imag, + iscomplex, + isreal, + iscomplexobj, + isrealobj, + nan_to_num, + real_if_close, + typename, + common_type, +) + +from numpy.lib._ufunclike_impl import ( + fix, + isposinf, + isneginf, +) + +from numpy.lib._utils_impl import ( + get_include, + info, + show_runtime, +) + +from numpy.matrixlib import ( + asmatrix, + bmat, + matrix, +) + +__all__ = [ # noqa: RUF022 + # __numpy_submodules__ + "char", "core", "ctypeslib", "dtypes", "exceptions", "f2py", "fft", "lib", "linalg", + "ma", "polynomial", "random", "rec", "strings", "test", "testing", "typing", + + # _core.__all__ + "abs", "acos", "acosh", "asin", "asinh", "atan", "atanh", "atan2", "bitwise_invert", + "bitwise_left_shift", "bitwise_right_shift", "concat", "pow", "permute_dims", + "memmap", "sctypeDict", "record", "recarray", + + # _core.numeric.__all__ + "newaxis", "ndarray", "flatiter", "nditer", "nested_iters", "ufunc", "arange", + "array", "asarray", "asanyarray", "ascontiguousarray", "asfortranarray", "zeros", + "count_nonzero", "empty", "broadcast", "dtype", "fromstring", "fromfile", + "frombuffer", "from_dlpack", "where", "argwhere", "copyto", "concatenate", + "lexsort", "astype", "can_cast", "promote_types", "min_scalar_type", "result_type", + "isfortran", "empty_like", "zeros_like", "ones_like", "correlate", "convolve", + "inner", "dot", "outer", "vdot", "roll", "rollaxis", "moveaxis", "cross", + "tensordot", "little_endian", "fromiter", "array_equal", "array_equiv", "indices", + "fromfunction", "isclose", "isscalar", "binary_repr", "base_repr", "ones", + "identity", "allclose", "putmask", "flatnonzero", "inf", "nan", "False_", "True_", + "bitwise_not", "full", "full_like", "matmul", "vecdot", "vecmat", + "shares_memory", "may_share_memory", + "all", "amax", "amin", "any", "argmax", "argmin", "argpartition", "argsort", + "around", "choose", "clip", "compress", "cumprod", "cumsum", "cumulative_prod", + "cumulative_sum", "diagonal", "mean", "max", "min", "matrix_transpose", "ndim", + "nonzero", "partition", "prod", "ptp", "put", "ravel", "repeat", "reshape", + "resize", "round", "searchsorted", "shape", "size", "sort", "squeeze", "std", "sum", + "swapaxes", "take", "trace", "transpose", "var", + "absolute", "add", "arccos", "arccosh", "arcsin", "arcsinh", "arctan", "arctan2", + "arctanh", "bitwise_and", "bitwise_or", "bitwise_xor", "cbrt", "ceil", "conj", + "conjugate", "copysign", "cos", "cosh", "bitwise_count", "deg2rad", "degrees", + "divide", "divmod", "e", "equal", "euler_gamma", "exp", "exp2", "expm1", "fabs", + "floor", "floor_divide", "float_power", "fmax", "fmin", "fmod", "frexp", + "frompyfunc", "gcd", "greater", "greater_equal", "heaviside", "hypot", "invert", + "isfinite", "isinf", "isnan", "isnat", "lcm", "ldexp", "left_shift", "less", + "less_equal", "log", "log10", "log1p", "log2", "logaddexp", "logaddexp2", + "logical_and", "logical_not", "logical_or", "logical_xor", "matvec", "maximum", "minimum", + "mod", "modf", "multiply", "negative", "nextafter", "not_equal", "pi", "positive", + "power", "rad2deg", "radians", "reciprocal", "remainder", "right_shift", "rint", + "sign", "signbit", "sin", "sinh", "spacing", "sqrt", "square", "subtract", "tan", + "tanh", "true_divide", "trunc", "ScalarType", "typecodes", "issubdtype", + "datetime_data", "datetime_as_string", "busday_offset", "busday_count", "is_busday", + "busdaycalendar", "isdtype", + "complexfloating", "character", "unsignedinteger", "inexact", "generic", "floating", + "integer", "signedinteger", "number", "flexible", "bool", "float16", "float32", + "float64", "longdouble", "complex64", "complex128", "clongdouble", + "bytes_", "str_", "void", "object_", "datetime64", "timedelta64", "int8", "byte", + "uint8", "ubyte", "int16", "short", "uint16", "ushort", "int32", "intc", "uint32", + "uintc", "int64", "long", "uint64", "ulong", "longlong", "ulonglong", "intp", + "uintp", "double", "cdouble", "single", "csingle", "half", "bool_", "int_", "uint", + "float96", "float128", "complex192", "complex256", + "array2string", "array_str", "array_repr", "set_printoptions", "get_printoptions", + "printoptions", "format_float_positional", "format_float_scientific", "require", + "seterr", "geterr", "setbufsize", "getbufsize", "seterrcall", "geterrcall", + "errstate", + # _core.function_base.__all__ + "logspace", "linspace", "geomspace", + # _core.getlimits.__all__ + "finfo", "iinfo", + # _core.shape_base.__all__ + "atleast_1d", "atleast_2d", "atleast_3d", "block", "hstack", "stack", "unstack", + "vstack", + # _core.einsumfunc.__all__ + "einsum", "einsum_path", + # matrixlib.__all__ + "matrix", "bmat", "asmatrix", + # lib._histograms_impl.__all__ + "histogram", "histogramdd", "histogram_bin_edges", + # lib._nanfunctions_impl.__all__ + "nansum", "nanmax", "nanmin", "nanargmax", "nanargmin", "nanmean", "nanmedian", + "nanpercentile", "nanvar", "nanstd", "nanprod", "nancumsum", "nancumprod", + "nanquantile", + # lib._function_base_impl.__all__ + "select", "piecewise", "trim_zeros", "copy", "iterable", "percentile", "diff", + "gradient", "angle", "unwrap", "sort_complex", "flip", "rot90", "extract", "place", + "vectorize", "asarray_chkfinite", "average", "bincount", "digitize", "cov", + "corrcoef", "median", "sinc", "hamming", "hanning", "bartlett", "blackman", + "kaiser", "trapezoid", "i0", "meshgrid", "delete", "insert", "append", + "interp", "quantile", + # lib._twodim_base_impl.__all__ + "diag", "diagflat", "eye", "fliplr", "flipud", "tri", "triu", "tril", "vander", + "histogram2d", "mask_indices", "tril_indices", "tril_indices_from", "triu_indices", + "triu_indices_from", + # lib._shape_base_impl.__all__ + "column_stack", "dstack", "array_split", "split", "hsplit", "vsplit", "dsplit", + "apply_over_axes", "expand_dims", "apply_along_axis", "kron", "tile", + "take_along_axis", "put_along_axis", "row_stack", + # lib._type_check_impl.__all__ + "iscomplexobj", "isrealobj", "imag", "iscomplex", "isreal", "nan_to_num", "real", + "real_if_close", "typename", "mintypecode", "common_type", + # lib._arraysetops_impl.__all__ + "ediff1d", "intersect1d", "isin", "setdiff1d", "setxor1d", "union1d", + "unique", "unique_all", "unique_counts", "unique_inverse", "unique_values", + # lib._ufunclike_impl.__all__ + "fix", "isneginf", "isposinf", + # lib._arraypad_impl.__all__ + "pad", + # lib._utils_impl.__all__ + "get_include", "info", "show_runtime", + # lib._stride_tricks_impl.__all__ + "broadcast_to", "broadcast_arrays", "broadcast_shapes", + # lib._polynomial_impl.__all__ + "poly", "roots", "polyint", "polyder", "polyadd", "polysub", "polymul", "polydiv", + "polyval", "poly1d", "polyfit", + # lib._npyio_impl.__all__ + "savetxt", "loadtxt", "genfromtxt", "load", "save", "savez", "savez_compressed", + "packbits", "unpackbits", "fromregex", + # lib._index_tricks_impl.__all__ + "ravel_multi_index", "unravel_index", "mgrid", "ogrid", "r_", "c_", "s_", + "index_exp", "ix_", "ndenumerate", "ndindex", "fill_diagonal", "diag_indices", + "diag_indices_from", + + # __init__.__all__ + "emath", "show_config", "__version__", "__array_namespace_info__", +] # fmt: skip + +### Constrained types (for internal use only) +# Only use these for functions; never as generic type parameter. + +_AnyStr = TypeVar("_AnyStr", LiteralString, str, bytes) +_AnyShapeT = TypeVar( + "_AnyShapeT", + tuple[()], # 0-d + tuple[int], # 1-d + tuple[int, int], # 2-d + tuple[int, int, int], # 3-d + tuple[int, int, int, int], # 4-d + tuple[int, int, int, int, int], # 5-d + tuple[int, int, int, int, int, int], # 6-d + tuple[int, int, int, int, int, int, int], # 7-d + tuple[int, int, int, int, int, int, int, int], # 8-d + tuple[int, ...], # N-d +) +_AnyTD64Item = TypeVar("_AnyTD64Item", dt.timedelta, int, None, dt.timedelta | int | None) +_AnyDT64Arg = TypeVar("_AnyDT64Arg", dt.datetime, dt.date, None) +_AnyDT64Item = TypeVar("_AnyDT64Item", dt.datetime, dt.date, int, None, dt.date, int | None) +_AnyDate = TypeVar("_AnyDate", dt.date, dt.datetime) +_AnyDateOrTime = TypeVar("_AnyDateOrTime", dt.date, dt.datetime, dt.timedelta) + +### Type parameters (for internal use only) + +_T = TypeVar("_T") +_T_co = TypeVar("_T_co", covariant=True) +_T_contra = TypeVar("_T_contra", contravariant=True) +_RealT_co = TypeVar("_RealT_co", covariant=True) +_ImagT_co = TypeVar("_ImagT_co", covariant=True) + +_DTypeT = TypeVar("_DTypeT", bound=dtype) +_DTypeT_co = TypeVar("_DTypeT_co", bound=dtype, default=dtype, covariant=True) +_FlexDTypeT = TypeVar("_FlexDTypeT", bound=dtype[flexible]) + +_ArrayT = TypeVar("_ArrayT", bound=ndarray) +_ArrayT_co = TypeVar("_ArrayT_co", bound=ndarray, default=ndarray, covariant=True) +_BoolArrayT = TypeVar("_BoolArrayT", bound=NDArray[np.bool]) +_IntegerArrayT = TypeVar("_IntegerArrayT", bound=NDArray[integer]) +_IntegralArrayT = TypeVar("_IntegralArrayT", bound=NDArray[np.bool | integer | object_]) +_FloatingArrayT = TypeVar("_FloatingArrayT", bound=NDArray[floating]) +_FloatingTimedeltaArrayT = TypeVar("_FloatingTimedeltaArrayT", bound=NDArray[floating | timedelta64]) +_ComplexFloatingArrayT = TypeVar("_ComplexFloatingArrayT", bound=NDArray[complexfloating]) +_InexactArrayT = TypeVar("_InexactArrayT", bound=NDArray[inexact]) +_InexactTimedeltaArrayT = TypeVar("_InexactTimedeltaArrayT", bound=NDArray[inexact | timedelta64]) +_NumberArrayT = TypeVar("_NumberArrayT", bound=NDArray[number]) +_NumberCharacterArrayT = TypeVar("_NumberCharacterArrayT", bound=ndarray[Any, dtype[number | character] | dtypes.StringDType]) +_TimedeltaArrayT = TypeVar("_TimedeltaArrayT", bound=NDArray[timedelta64]) +_TimeArrayT = TypeVar("_TimeArrayT", bound=NDArray[datetime64 | timedelta64]) +_ObjectArrayT = TypeVar("_ObjectArrayT", bound=NDArray[object_]) +_BytesArrayT = TypeVar("_BytesArrayT", bound=NDArray[bytes_]) +_StringArrayT = TypeVar("_StringArrayT", bound=ndarray[Any, dtype[str_] | dtypes.StringDType]) +_RealArrayT = TypeVar("_RealArrayT", bound=NDArray[floating | integer | timedelta64 | np.bool | object_]) +_NumericArrayT = TypeVar("_NumericArrayT", bound=NDArray[number | timedelta64 | object_]) + +_ShapeT = TypeVar("_ShapeT", bound=_Shape) +_Shape1T = TypeVar("_Shape1T", bound=tuple[int, *tuple[int, ...]]) +_ShapeT_co = TypeVar("_ShapeT_co", bound=_Shape, default=_AnyShape, covariant=True) +_2DShapeT_co = TypeVar("_2DShapeT_co", bound=_2D, default=_2D, covariant=True) +_1NShapeT = TypeVar("_1NShapeT", bound=tuple[L[1], *tuple[L[1], ...]]) # (1,) | (1, 1) | (1, 1, 1) | ... + +_ScalarT = TypeVar("_ScalarT", bound=generic) +_ScalarT_co = TypeVar("_ScalarT_co", bound=generic, default=Any, covariant=True) +_NumberT = TypeVar("_NumberT", bound=number) +_InexactT = TypeVar("_InexactT", bound=inexact) +_RealNumberT = TypeVar("_RealNumberT", bound=floating | integer) +_IntegerT = TypeVar("_IntegerT", bound=integer) +_NonObjectScalarT = TypeVar("_NonObjectScalarT", bound=np.bool | number | flexible | datetime64 | timedelta64) + +_NBit = TypeVar("_NBit", bound=NBitBase, default=Any) # pyright: ignore[reportDeprecated] +_NBit1 = TypeVar("_NBit1", bound=NBitBase, default=Any) # pyright: ignore[reportDeprecated] +_NBit2 = TypeVar("_NBit2", bound=NBitBase, default=_NBit1) # pyright: ignore[reportDeprecated] + +_ItemT_co = TypeVar("_ItemT_co", default=Any, covariant=True) +_BoolItemT = TypeVar("_BoolItemT", bound=builtins.bool) +_BoolItemT_co = TypeVar("_BoolItemT_co", bound=builtins.bool, default=builtins.bool, covariant=True) +_NumberItemT_co = TypeVar("_NumberItemT_co", bound=complex, default=int | float | complex, covariant=True) +_InexactItemT_co = TypeVar("_InexactItemT_co", bound=complex, default=float | complex, covariant=True) +_FlexibleItemT_co = TypeVar( + "_FlexibleItemT_co", + bound=_CharLike_co | tuple[Any, ...], + default=_CharLike_co | tuple[Any, ...], + covariant=True, +) +_CharacterItemT_co = TypeVar("_CharacterItemT_co", bound=_CharLike_co, default=_CharLike_co, covariant=True) +_TD64ItemT_co = TypeVar("_TD64ItemT_co", bound=dt.timedelta | int | None, default=dt.timedelta | int | None, covariant=True) +_DT64ItemT_co = TypeVar("_DT64ItemT_co", bound=dt.date | int | None, default=dt.date | int | None, covariant=True) +_TD64UnitT = TypeVar("_TD64UnitT", bound=_TD64Unit, default=_TD64Unit) +_BoolOrIntArrayT = TypeVar("_BoolOrIntArrayT", bound=NDArray[integer | np.bool]) + +### Type Aliases (for internal use only) + +_Falsy: TypeAlias = L[False, 0] | np.bool[L[False]] +_Truthy: TypeAlias = L[True, 1] | np.bool[L[True]] + +_1D: TypeAlias = tuple[int] +_2D: TypeAlias = tuple[int, int] +_2Tuple: TypeAlias = tuple[_T, _T] + +_ArrayUInt_co: TypeAlias = NDArray[unsignedinteger | np.bool] +_ArrayInt_co: TypeAlias = NDArray[integer | np.bool] +_ArrayFloat64_co: TypeAlias = NDArray[floating[_64Bit] | float32 | float16 | integer | np.bool] +_ArrayFloat_co: TypeAlias = NDArray[floating | integer | np.bool] +_ArrayComplex128_co: TypeAlias = NDArray[number[_64Bit] | number[_32Bit] | float16 | integer | np.bool] +_ArrayComplex_co: TypeAlias = NDArray[inexact | integer | np.bool] +_ArrayNumber_co: TypeAlias = NDArray[number | np.bool] +_ArrayTD64_co: TypeAlias = NDArray[timedelta64 | integer | np.bool] + +_Float64_co: TypeAlias = float | floating[_64Bit] | float32 | float16 | integer | np.bool +_Complex64_co: TypeAlias = number[_32Bit] | number[_16Bit] | number[_8Bit] | builtins.bool | np.bool +_Complex128_co: TypeAlias = complex | number[_64Bit] | _Complex64_co + +_ToIndex: TypeAlias = SupportsIndex | slice | EllipsisType | _ArrayLikeInt_co | None +_ToIndices: TypeAlias = _ToIndex | tuple[_ToIndex, ...] + +_UnsignedIntegerCType: TypeAlias = type[ + ct.c_uint8 | ct.c_uint16 | ct.c_uint32 | ct.c_uint64 + | ct.c_ushort | ct.c_uint | ct.c_ulong | ct.c_ulonglong + | ct.c_size_t | ct.c_void_p +] # fmt: skip +_SignedIntegerCType: TypeAlias = type[ + ct.c_int8 | ct.c_int16 | ct.c_int32 | ct.c_int64 + | ct.c_short | ct.c_int | ct.c_long | ct.c_longlong + | ct.c_ssize_t +] # fmt: skip +_FloatingCType: TypeAlias = type[ct.c_float | ct.c_double | ct.c_longdouble] +_IntegerCType: TypeAlias = _UnsignedIntegerCType | _SignedIntegerCType + +# some commonly used builtin types that are known to result in a +# `dtype[object_]`, when their *type* is passed to the `dtype` constructor +# NOTE: `builtins.object` should not be included here +_BuiltinObjectLike: TypeAlias = ( + slice | Decimal | Fraction | UUID + | dt.date | dt.time | dt.timedelta | dt.tzinfo + | tuple[Any, ...] | list[Any] | set[Any] | frozenset[Any] | dict[Any, Any] +) # fmt: skip + +# Introduce an alias for `dtype` to avoid naming conflicts. +_dtype: TypeAlias = dtype[_ScalarT] + +_ByteOrderChar: TypeAlias = L["<", ">", "=", "|"] +# can be anything, is case-insensitive, and only the first character matters +_ByteOrder: TypeAlias = L[ + "S", # swap the current order (default) + "<", "L", "little", # little-endian + ">", "B", "big", # big endian + "=", "N", "native", # native order + "|", "I", # ignore +] # fmt: skip +_DTypeKind: TypeAlias = L[ + "b", # boolean + "i", # signed integer + "u", # unsigned integer + "f", # floating-point + "c", # complex floating-point + "m", # timedelta64 + "M", # datetime64 + "O", # python object + "S", # byte-string (fixed-width) + "U", # unicode-string (fixed-width) + "V", # void + "T", # unicode-string (variable-width) +] +_DTypeChar: TypeAlias = L[ + "?", # bool + "b", # byte + "B", # ubyte + "h", # short + "H", # ushort + "i", # intc + "I", # uintc + "l", # long + "L", # ulong + "q", # longlong + "Q", # ulonglong + "e", # half + "f", # single + "d", # double + "g", # longdouble + "F", # csingle + "D", # cdouble + "G", # clongdouble + "O", # object + "S", # bytes_ (S0) + "a", # bytes_ (deprecated) + "U", # str_ + "V", # void + "M", # datetime64 + "m", # timedelta64 + "c", # bytes_ (S1) + "T", # StringDType +] +_DTypeNum: TypeAlias = L[ + 0, # bool + 1, # byte + 2, # ubyte + 3, # short + 4, # ushort + 5, # intc + 6, # uintc + 7, # long + 8, # ulong + 9, # longlong + 10, # ulonglong + 23, # half + 11, # single + 12, # double + 13, # longdouble + 14, # csingle + 15, # cdouble + 16, # clongdouble + 17, # object + 18, # bytes_ + 19, # str_ + 20, # void + 21, # datetime64 + 22, # timedelta64 + 25, # no type + 256, # user-defined + 2056, # StringDType +] +_DTypeBuiltinKind: TypeAlias = L[0, 1, 2] + +_ArrayAPIVersion: TypeAlias = L["2021.12", "2022.12", "2023.12", "2024.12"] + +_CastingKind: TypeAlias = L["no", "equiv", "safe", "same_kind", "same_value", "unsafe"] + +_OrderKACF: TypeAlias = L["K", "A", "C", "F"] | None +_OrderACF: TypeAlias = L["A", "C", "F"] | None +_OrderCF: TypeAlias = L["C", "F"] | None # noqa: PYI047 + +_ModeKind: TypeAlias = L["raise", "wrap", "clip"] +_PartitionKind: TypeAlias = L["introselect"] +# in practice, only the first case-insensitive character is considered (so e.g. +# "QuantumSort3000" will be interpreted as quicksort). +_SortKind: TypeAlias = L[ + "Q", "quick", "quicksort", + "M", "merge", "mergesort", + "H", "heap", "heapsort", + "S", "stable", "stablesort", +] +_SortSide: TypeAlias = L["left", "right"] + +_ConvertibleToInt: TypeAlias = SupportsInt | SupportsIndex | _CharLike_co +_ConvertibleToFloat: TypeAlias = SupportsFloat | SupportsIndex | _CharLike_co +_ConvertibleToComplex: TypeAlias = SupportsComplex | SupportsFloat | SupportsIndex | _CharLike_co +_ConvertibleToTD64: TypeAlias = dt.timedelta | int | _CharLike_co | character | number | timedelta64 | np.bool | None +_ConvertibleToDT64: TypeAlias = dt.date | int | _CharLike_co | character | number | datetime64 | np.bool | None + +_NDIterFlagsKind: TypeAlias = L[ + "buffered", + "c_index", + "copy_if_overlap", + "common_dtype", + "delay_bufalloc", + "external_loop", + "f_index", + "grow_inner", "growinner", + "multi_index", + "ranged", + "refs_ok", + "reduce_ok", + "zerosize_ok", +] +_NDIterFlagsOp: TypeAlias = L[ + "aligned", + "allocate", + "arraymask", + "copy", + "config", + "nbo", + "no_subtype", + "no_broadcast", + "overlap_assume_elementwise", + "readonly", + "readwrite", + "updateifcopy", + "virtual", + "writeonly", + "writemasked", +] + +_MemMapModeKind: TypeAlias = L[ + "readonly", "r", + "copyonwrite", "c", + "readwrite", "r+", + "write", "w+", +] + +_DT64Date: TypeAlias = _HasDateAttributes | L["TODAY", "today", b"TODAY", b"today"] +_DT64Now: TypeAlias = L["NOW", "now", b"NOW", b"now"] +_NaTValue: TypeAlias = L["NAT", "NaT", "nat", b"NAT", b"NaT", b"nat"] + +_MonthUnit: TypeAlias = L["Y", "M", b"Y", b"M"] +_DayUnit: TypeAlias = L["W", "D", b"W", b"D"] +_DateUnit: TypeAlias = L[_MonthUnit, _DayUnit] +_NativeTimeUnit: TypeAlias = L["h", "m", "s", "ms", "us", "μs", b"h", b"m", b"s", b"ms", b"us"] +_IntTimeUnit: TypeAlias = L["ns", "ps", "fs", "as", b"ns", b"ps", b"fs", b"as"] +_TimeUnit: TypeAlias = L[_NativeTimeUnit, _IntTimeUnit] +_NativeTD64Unit: TypeAlias = L[_DayUnit, _NativeTimeUnit] +_IntTD64Unit: TypeAlias = L[_MonthUnit, _IntTimeUnit] +_TD64Unit: TypeAlias = L[_DateUnit, _TimeUnit] +_TimeUnitSpec: TypeAlias = _TD64UnitT | tuple[_TD64UnitT, SupportsIndex] + +### TypedDict's (for internal use only) + +@type_check_only +class _FormerAttrsDict(TypedDict): + object: LiteralString + float: LiteralString + complex: LiteralString + str: LiteralString + int: LiteralString + +### Protocols (for internal use only) + +@final +@type_check_only +class _SupportsLT(Protocol): + def __lt__(self, other: Any, /) -> Any: ... + +@final +@type_check_only +class _SupportsLE(Protocol): + def __le__(self, other: Any, /) -> Any: ... + +@final +@type_check_only +class _SupportsGT(Protocol): + def __gt__(self, other: Any, /) -> Any: ... + +@final +@type_check_only +class _SupportsGE(Protocol): + def __ge__(self, other: Any, /) -> Any: ... + +@type_check_only +class _SupportsFileMethods(SupportsFlush, Protocol): + # Protocol for representing file-like-objects accepted by `ndarray.tofile` and `fromfile` + def fileno(self) -> SupportsIndex: ... + def tell(self) -> SupportsIndex: ... + def seek(self, offset: int, whence: int, /) -> object: ... + +@type_check_only +class _SupportsFileMethodsRW(SupportsWrite[bytes], _SupportsFileMethods, Protocol): ... + +@type_check_only +class _SupportsDLPack(Protocol[_T_contra]): + def __dlpack__(self, /, *, stream: _T_contra | None = None) -> CapsuleType: ... + +@type_check_only +class _HasDType(Protocol[_T_co]): + @property + def dtype(self, /) -> _T_co: ... + +@type_check_only +class _HasRealAndImag(Protocol[_RealT_co, _ImagT_co]): + @property + def real(self, /) -> _RealT_co: ... + @property + def imag(self, /) -> _ImagT_co: ... + +@type_check_only +class _HasTypeWithRealAndImag(Protocol[_RealT_co, _ImagT_co]): + @property + def type(self, /) -> type[_HasRealAndImag[_RealT_co, _ImagT_co]]: ... + +@type_check_only +class _HasDTypeWithRealAndImag(Protocol[_RealT_co, _ImagT_co]): + @property + def dtype(self, /) -> _HasTypeWithRealAndImag[_RealT_co, _ImagT_co]: ... + +@type_check_only +class _HasDateAttributes(Protocol): + # The `datetime64` constructors requires an object with the three attributes below, + # and thus supports datetime duck typing + @property + def day(self) -> int: ... + @property + def month(self) -> int: ... + @property + def year(self) -> int: ... + +### Mixins (for internal use only) + +@type_check_only +class _RealMixin: + @property + def real(self) -> Self: ... + @property + def imag(self) -> Self: ... + +@type_check_only +class _RoundMixin: + @overload + def __round__(self, /, ndigits: None = None) -> int: ... + @overload + def __round__(self, /, ndigits: SupportsIndex) -> Self: ... + +@type_check_only +class _IntegralMixin(_RealMixin): + @property + def numerator(self) -> Self: ... + @property + def denominator(self) -> L[1]: ... + + def is_integer(self, /) -> L[True]: ... + +### Public API + +__version__: Final[LiteralString] = ... + +e: Final[float] = ... +euler_gamma: Final[float] = ... +pi: Final[float] = ... +inf: Final[float] = ... +nan: Final[float] = ... +little_endian: Final[builtins.bool] = ... +False_: Final[np.bool[L[False]]] = ... +True_: Final[np.bool[L[True]]] = ... +newaxis: Final[None] = None + +# not in __all__ +__NUMPY_SETUP__: Final[L[False]] = False +__numpy_submodules__: Final[set[LiteralString]] = ... +__former_attrs__: Final[_FormerAttrsDict] = ... +__future_scalars__: Final[set[L["bytes", "str", "object"]]] = ... +__array_api_version__: Final[L["2024.12"]] = "2024.12" +test: Final[PytestTester] = ... + +@type_check_only +class _DTypeMeta(type): + @property + def type(cls, /) -> type[generic] | None: ... + @property + def _abstract(cls, /) -> bool: ... + @property + def _is_numeric(cls, /) -> bool: ... + @property + def _parametric(cls, /) -> bool: ... + @property + def _legacy(cls, /) -> bool: ... + +@final +class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): + names: tuple[builtins.str, ...] | None + def __hash__(self) -> int: ... + + # `None` results in the default dtype + @overload + def __new__( + cls, + dtype: type[float64 | ct.c_double] | _Float64Codes | _DoubleCodes | None, + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ... + ) -> dtype[float64]: ... + + # Overload for `dtype` instances, scalar types, and instances that have a + # `dtype: dtype[_ScalarT]` attribute + @overload + def __new__( + cls, + dtype: _DTypeLike[_ScalarT], + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[_ScalarT]: ... + + # Builtin types + # + # NOTE: Typecheckers act as if `bool <: int <: float <: complex <: object`, + # even though at runtime `int`, `float`, and `complex` aren't subtypes.. + # This makes it impossible to express e.g. "a float that isn't an int", + # since type checkers treat `_: float` like `_: float | int`. + # + # For more details, see: + # - https://github.com/numpy/numpy/issues/27032#issuecomment-2278958251 + # - https://typing.readthedocs.io/en/latest/spec/special-types.html#special-cases-for-float-and-complex + @overload + def __new__( + cls, + dtype: type[builtins.bool | np.bool | ct.c_bool] | _BoolCodes, + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[str, Any] = ..., + ) -> dtype[np.bool]: ... + @overload + def __new__( + cls, + dtype: type[int], # also accepts `type[builtins.bool]` + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[str, Any] = ..., + ) -> dtype[int_ | np.bool]: ... + @overload + def __new__( + cls, + dtype: type[float], # also accepts `type[int | bool]` + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[str, Any] = ..., + ) -> dtype[float64 | int_ | np.bool]: ... + @overload + def __new__( + cls, + dtype: type[complex], # also accepts `type[float | int | bool]` + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[str, Any] = ..., + ) -> dtype[complex128 | float64 | int_ | np.bool]: ... + @overload + def __new__( + cls, + dtype: type[bytes | ct.c_char] | _BytesCodes, + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[str, Any] = ..., + ) -> dtype[bytes_]: ... + @overload + def __new__( + cls, + dtype: type[str] | _StrCodes, + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[str, Any] = ..., + ) -> dtype[str_]: ... + # NOTE: These `memoryview` overloads assume PEP 688, which requires mypy to + # be run with the (undocumented) `--disable-memoryview-promotion` flag, + # This will be the default in a future mypy release, see: + # https://github.com/python/mypy/issues/15313 + # Pyright / Pylance requires setting `disableBytesTypePromotions=true`, + # which is the default in strict mode + @overload + def __new__( + cls, + dtype: type[void | memoryview] | _VoidDTypeLike | _VoidCodes, + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[str, Any] = ..., + ) -> dtype[void]: ... + # NOTE: `_: type[object]` would also accept e.g. `type[object | complex]`, + # and is therefore not included here + @overload + def __new__( + cls, + dtype: type[object_ | _BuiltinObjectLike | ct.py_object[Any]] | _ObjectCodes, + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[str, Any] = ..., + ) -> dtype[object_]: ... + + # `unsignedinteger` string-based representations and ctypes + @overload + def __new__( + cls, + dtype: _UInt8Codes | _UByteCodes | type[ct.c_uint8 | ct.c_ubyte], + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[uint8]: ... + @overload + def __new__( + cls, + dtype: _UInt16Codes | _UShortCodes | type[ct.c_uint16 | ct.c_ushort], + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[uint16]: ... + @overload + def __new__( + cls, + dtype: _UInt32Codes | _UIntCCodes | type[ct.c_uint32 | ct.c_uint], + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[uint32]: ... + @overload + def __new__( + cls, + dtype: _UInt64Codes | _ULongLongCodes | type[ct.c_uint64 | ct.c_ulonglong], + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[uint64]: ... + @overload + def __new__( + cls, + dtype: _UIntPCodes | type[ct.c_void_p | ct.c_size_t], + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[uintp]: ... + @overload + def __new__( + cls, + dtype: _ULongCodes | type[ct.c_ulong], + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[ulong]: ... + + # `signedinteger` string-based representations and ctypes + @overload + def __new__( + cls, + dtype: _Int8Codes | _ByteCodes | type[ct.c_int8 | ct.c_byte], + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[int8]: ... + @overload + def __new__( + cls, + dtype: _Int16Codes | _ShortCodes | type[ct.c_int16 | ct.c_short], + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[int16]: ... + @overload + def __new__( + cls, + dtype: _Int32Codes | _IntCCodes | type[ct.c_int32 | ct.c_int], + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[int32]: ... + @overload + def __new__( + cls, + dtype: _Int64Codes | _LongLongCodes | type[ct.c_int64 | ct.c_longlong], + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[int64]: ... + @overload + def __new__( + cls, + dtype: _IntPCodes | type[intp | ct.c_ssize_t], + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[intp]: ... + @overload + def __new__( + cls, + dtype: _LongCodes | type[ct.c_long], + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[long]: ... + + # `floating` string-based representations and ctypes + @overload + def __new__( + cls, + dtype: _Float16Codes | _HalfCodes, + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[float16]: ... + @overload + def __new__( + cls, + dtype: _Float32Codes | _SingleCodes, + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[float32]: ... + # float64 codes are covered by overload 1 + @overload + def __new__( + cls, + dtype: _LongDoubleCodes | type[ct.c_longdouble], + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[longdouble]: ... + + # `complexfloating` string-based representations + @overload + def __new__( + cls, + dtype: _Complex64Codes | _CSingleCodes, + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[complex64]: ... + @overload + def __new__( + cls, + dtype: _Complex128Codes | _CDoubleCodes, + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[complex128]: ... + @overload + def __new__( + cls, + dtype: _CLongDoubleCodes, + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[clongdouble]: ... + + # Miscellaneous string-based representations and ctypes + @overload + def __new__( + cls, + dtype: _TD64Codes, + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[timedelta64]: ... + @overload + def __new__( + cls, + dtype: _DT64Codes, + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[datetime64]: ... + + # `StringDType` requires special treatment because it has no scalar type + @overload + def __new__( + cls, + dtype: dtypes.StringDType | _StringCodes, + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtypes.StringDType: ... + + # Combined char-codes and ctypes, analogous to the scalar-type hierarchy + @overload + def __new__( + cls, + dtype: _UnsignedIntegerCodes | _UnsignedIntegerCType, + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[unsignedinteger]: ... + @overload + def __new__( + cls, + dtype: _SignedIntegerCodes | _SignedIntegerCType, + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[signedinteger]: ... + @overload + def __new__( + cls, + dtype: _IntegerCodes | _IntegerCType, + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[integer]: ... + @overload + def __new__( + cls, + dtype: _FloatingCodes | _FloatingCType, + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[floating]: ... + @overload + def __new__( + cls, + dtype: _ComplexFloatingCodes, + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[complexfloating]: ... + @overload + def __new__( + cls, + dtype: _InexactCodes | _FloatingCType, + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[inexact]: ... + @overload + def __new__( + cls, + dtype: _CharacterCodes | type[bytes | builtins.str | ct.c_char], + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[str, Any] = ..., + ) -> dtype[character]: ... + + # Handle strings that can't be expressed as literals; i.e. "S1", "S2", ... + @overload + def __new__( + cls, + dtype: builtins.str, + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype: ... + + # Catch-all overload for object-likes + # NOTE: `object_ | Any` is NOT equivalent to `Any`. It is specified to behave + # like a "sum type" (a.k.a. variant type, discriminated union, or tagged union). + # So the union of a type and `Any` is not the same "union type" that all other + # unions are (by definition). + # https://typing.python.org/en/latest/spec/concepts.html#union-types + @overload + def __new__( + cls, + dtype: type[object], + align: builtins.bool = False, + copy: builtins.bool = False, + *, + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[object_ | Any]: ... + + def __class_getitem__(cls, item: Any, /) -> GenericAlias: ... + + @overload + def __getitem__(self: dtype[void], key: list[builtins.str], /) -> dtype[void]: ... + @overload + def __getitem__(self: dtype[void], key: builtins.str | SupportsIndex, /) -> dtype: ... + + # NOTE: In the future 1-based multiplications will also yield `flexible` dtypes + @overload + def __mul__(self: _DTypeT, value: L[1], /) -> _DTypeT: ... + @overload + def __mul__(self: _FlexDTypeT, value: SupportsIndex, /) -> _FlexDTypeT: ... + @overload + def __mul__(self, value: SupportsIndex, /) -> dtype[void]: ... + + # NOTE: `__rmul__` seems to be broken when used in combination with + # literals as of mypy 0.902. Set the return-type to `dtype` for + # now for non-flexible dtypes. + @overload + def __rmul__(self: _FlexDTypeT, value: SupportsIndex, /) -> _FlexDTypeT: ... + @overload + def __rmul__(self, value: SupportsIndex, /) -> dtype: ... + + def __gt__(self, other: DTypeLike | None, /) -> builtins.bool: ... + def __ge__(self, other: DTypeLike | None, /) -> builtins.bool: ... + def __lt__(self, other: DTypeLike | None, /) -> builtins.bool: ... + def __le__(self, other: DTypeLike | None, /) -> builtins.bool: ... + + # Explicitly defined `__eq__` and `__ne__` to get around mypy's + # `strict_equality` option; even though their signatures are + # identical to their `object`-based counterpart + def __eq__(self, other: Any, /) -> builtins.bool: ... + def __ne__(self, other: Any, /) -> builtins.bool: ... + + @property + def alignment(self) -> int: ... + @property + def base(self) -> dtype: ... + @property + def byteorder(self) -> _ByteOrderChar: ... + @property + def char(self) -> _DTypeChar: ... + @property + def descr(self) -> list[tuple[LiteralString, LiteralString] | tuple[LiteralString, LiteralString, _Shape]]: ... + @property + def fields(self,) -> MappingProxyType[LiteralString, tuple[dtype, int] | tuple[dtype, int, Any]] | None: ... + @property + def flags(self) -> int: ... + @property + def hasobject(self) -> builtins.bool: ... + @property + def isbuiltin(self) -> _DTypeBuiltinKind: ... + @property + def isnative(self) -> builtins.bool: ... + @property + def isalignedstruct(self) -> builtins.bool: ... + @property + def itemsize(self) -> int: ... + @property + def kind(self) -> _DTypeKind: ... + @property + def metadata(self) -> MappingProxyType[builtins.str, Any] | None: ... + @property + def name(self) -> LiteralString: ... + @property + def num(self) -> _DTypeNum: ... + @property + def shape(self) -> _AnyShape: ... + @property + def ndim(self) -> int: ... + @property + def subdtype(self) -> tuple[dtype, _AnyShape] | None: ... + def newbyteorder(self, new_order: _ByteOrder = ..., /) -> Self: ... + @property + def str(self) -> LiteralString: ... + @property + def type(self) -> type[_ScalarT_co]: ... + +@final +class flatiter(Generic[_ArrayT_co]): + __hash__: ClassVar[None] = None # type: ignore[assignment] # pyright: ignore[reportIncompatibleMethodOverride] + + @property + def base(self, /) -> _ArrayT_co: ... + @property + def coords(self: flatiter[ndarray[_ShapeT]], /) -> _ShapeT: ... + @property + def index(self, /) -> int: ... + + # iteration + def __len__(self, /) -> int: ... + def __iter__(self, /) -> Self: ... + def __next__(self: flatiter[NDArray[_ScalarT]], /) -> _ScalarT: ... + + # indexing + @overload # nd: _[()] + def __getitem__(self, key: tuple[()], /) -> _ArrayT_co: ... + @overload # 0d; _[] + def __getitem__(self: flatiter[NDArray[_ScalarT]], key: int | integer, /) -> _ScalarT: ... + @overload # 1d; _[[*]], _[:], _[...] + def __getitem__( + self: flatiter[ndarray[Any, _DTypeT]], + key: list[int] | slice | EllipsisType | flatiter[NDArray[integer]], + /, + ) -> ndarray[tuple[int], _DTypeT]: ... + @overload # 2d; _[[*[*]]] + def __getitem__( + self: flatiter[ndarray[Any, _DTypeT]], + key: list[list[int]], + /, + ) -> ndarray[tuple[int, int], _DTypeT]: ... + @overload # ?d + def __getitem__( + self: flatiter[ndarray[Any, _DTypeT]], + key: NDArray[integer] | _NestedSequence[int], + /, + ) -> ndarray[_AnyShape, _DTypeT]: ... + + # NOTE: `__setitem__` operates via `unsafe` casting rules, and can thus accept any + # type accepted by the relevant underlying `np.generic` constructor, which isn't + # known statically. So we cannot meaningfully annotate the value parameter. + def __setitem__(self, key: slice | EllipsisType | _ArrayLikeInt, val: object, /) -> None: ... + + # NOTE: `dtype` and `copy` are no-ops at runtime, so we don't support them here to + # avoid confusion + def __array__( + self: flatiter[ndarray[Any, _DTypeT]], + dtype: None = None, + /, + *, + copy: None = None, + ) -> ndarray[tuple[int], _DTypeT]: ... + + # This returns a flat copy of the underlying array, not of the iterator itself + def copy(self: flatiter[ndarray[Any, _DTypeT]], /) -> ndarray[tuple[int], _DTypeT]: ... + +@type_check_only +class _ArrayOrScalarCommon: + @property + def real(self, /) -> Any: ... + @property + def imag(self, /) -> Any: ... + @property + def T(self) -> Self: ... + @property + def mT(self) -> Self: ... + @property + def data(self) -> memoryview: ... + @property + def flags(self) -> flagsobj: ... + @property + def itemsize(self) -> int: ... + @property + def nbytes(self) -> int: ... + @property + def device(self) -> L["cpu"]: ... + + def __bool__(self, /) -> builtins.bool: ... + def __int__(self, /) -> int: ... + def __float__(self, /) -> float: ... + def __copy__(self) -> Self: ... + def __deepcopy__(self, memo: dict[int, Any] | None, /) -> Self: ... + + # TODO: How to deal with the non-commutative nature of `==` and `!=`? + # xref numpy/numpy#17368 + def __eq__(self, other: Any, /) -> Any: ... + def __ne__(self, other: Any, /) -> Any: ... + + def copy(self, order: _OrderKACF = ...) -> Self: ... + def dump(self, file: StrOrBytesPath | SupportsWrite[bytes]) -> None: ... + def dumps(self) -> bytes: ... + def tobytes(self, order: _OrderKACF = ...) -> bytes: ... + def tofile(self, fid: StrOrBytesPath | _SupportsFileMethods, /, sep: str = "", format: str = "%s") -> None: ... + # generics and 0d arrays return builtin scalars + def tolist(self) -> Any: ... + def to_device(self, device: L["cpu"], /, *, stream: int | Any | None = ...) -> Self: ... + + # NOTE: for `generic`, these two methods don't do anything + def fill(self, /, value: Incomplete) -> None: ... + def put(self, indices: _ArrayLikeInt_co, values: ArrayLike, /, mode: _ModeKind = "raise") -> None: ... + + # NOTE: even on `generic` this seems to work + def setflags( + self, + /, + *, + write: builtins.bool | None = None, + align: builtins.bool | None = None, + uic: builtins.bool | None = None, + ) -> None: ... + + @property + def __array_interface__(self) -> dict[str, Any]: ... + @property + def __array_priority__(self) -> float: ... + @property + def __array_struct__(self) -> CapsuleType: ... # builtins.PyCapsule + def __array_namespace__(self, /, *, api_version: _ArrayAPIVersion | None = None) -> ModuleType: ... + def __setstate__(self, state: tuple[ + SupportsIndex, # version + _ShapeLike, # Shape + _DTypeT_co, # DType + np.bool, # F-continuous + bytes | list[Any], # Data + ], /) -> None: ... + + def conj(self) -> Self: ... + def conjugate(self) -> Self: ... + + def argsort( + self, + axis: SupportsIndex | None = ..., + kind: _SortKind | None = ..., + order: str | Sequence[str] | None = ..., + *, + stable: builtins.bool | None = ..., + ) -> NDArray[intp]: ... + + @overload # axis=None (default), out=None (default), keepdims=False (default) + def argmax(self, /, axis: None = None, out: None = None, *, keepdims: L[False] = False) -> intp: ... + @overload # axis=index, out=None (default) + def argmax(self, /, axis: SupportsIndex, out: None = None, *, keepdims: builtins.bool = False) -> Any: ... + @overload # axis=index, out=ndarray + def argmax(self, /, axis: SupportsIndex | None, out: _BoolOrIntArrayT, *, keepdims: builtins.bool = False) -> _BoolOrIntArrayT: ... + @overload + def argmax(self, /, axis: SupportsIndex | None = None, *, out: _BoolOrIntArrayT, keepdims: builtins.bool = False) -> _BoolOrIntArrayT: ... + + @overload # axis=None (default), out=None (default), keepdims=False (default) + def argmin(self, /, axis: None = None, out: None = None, *, keepdims: L[False] = False) -> intp: ... + @overload # axis=index, out=None (default) + def argmin(self, /, axis: SupportsIndex, out: None = None, *, keepdims: builtins.bool = False) -> Any: ... + @overload # axis=index, out=ndarray + def argmin(self, /, axis: SupportsIndex | None, out: _BoolOrIntArrayT, *, keepdims: builtins.bool = False) -> _BoolOrIntArrayT: ... + @overload + def argmin(self, /, axis: SupportsIndex | None = None, *, out: _BoolOrIntArrayT, keepdims: builtins.bool = False) -> _BoolOrIntArrayT: ... + + # Keep in sync with `MaskedArray.round` + @overload # out=None (default) + def round(self, /, decimals: SupportsIndex = 0, out: None = None) -> Self: ... + @overload # out=ndarray + def round(self, /, decimals: SupportsIndex, out: _ArrayT) -> _ArrayT: ... + @overload + def round(self, /, decimals: SupportsIndex = 0, *, out: _ArrayT) -> _ArrayT: ... + + @overload # out=None (default) + def choose(self, /, choices: ArrayLike, out: None = None, mode: _ModeKind = "raise") -> NDArray[Any]: ... + @overload # out=ndarray + def choose(self, /, choices: ArrayLike, out: _ArrayT, mode: _ModeKind = "raise") -> _ArrayT: ... + + # TODO: Annotate kwargs with an unpacked `TypedDict` + @overload # out: None (default) + def clip(self, /, min: ArrayLike, max: ArrayLike | None = None, out: None = None, **kwargs: Any) -> NDArray[Any]: ... + @overload + def clip(self, /, min: None, max: ArrayLike, out: None = None, **kwargs: Any) -> NDArray[Any]: ... + @overload + def clip(self, /, min: None = None, *, max: ArrayLike, out: None = None, **kwargs: Any) -> NDArray[Any]: ... + @overload # out: ndarray + def clip(self, /, min: ArrayLike, max: ArrayLike | None, out: _ArrayT, **kwargs: Any) -> _ArrayT: ... + @overload + def clip(self, /, min: ArrayLike, max: ArrayLike | None = None, *, out: _ArrayT, **kwargs: Any) -> _ArrayT: ... + @overload + def clip(self, /, min: None, max: ArrayLike, out: _ArrayT, **kwargs: Any) -> _ArrayT: ... + @overload + def clip(self, /, min: None = None, *, max: ArrayLike, out: _ArrayT, **kwargs: Any) -> _ArrayT: ... + + @overload + def compress(self, /, condition: _ArrayLikeInt_co, axis: SupportsIndex | None = None, out: None = None) -> NDArray[Any]: ... + @overload + def compress(self, /, condition: _ArrayLikeInt_co, axis: SupportsIndex | None, out: _ArrayT) -> _ArrayT: ... + @overload + def compress(self, /, condition: _ArrayLikeInt_co, axis: SupportsIndex | None = None, *, out: _ArrayT) -> _ArrayT: ... + + # Keep in sync with `MaskedArray.cumprod` + @overload # out: None (default) + def cumprod(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, out: None = None) -> NDArray[Any]: ... + @overload # out: ndarray + def cumprod(self, /, axis: SupportsIndex | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ... + @overload + def cumprod(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... + + # Keep in sync with `MaskedArray.cumsum` + @overload # out: None (default) + def cumsum(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, out: None = None) -> NDArray[Any]: ... + @overload # out: ndarray + def cumsum(self, /, axis: SupportsIndex | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ... + @overload + def cumsum(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... + + @overload + def max( + self, + /, + axis: _ShapeLike | None = None, + out: None = None, + *, + keepdims: builtins.bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> Any: ... + @overload + def max( + self, + /, + axis: _ShapeLike | None, + out: _ArrayT, + *, + keepdims: builtins.bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> _ArrayT: ... + @overload + def max( + self, + /, + axis: _ShapeLike | None = None, + *, + out: _ArrayT, + keepdims: builtins.bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> _ArrayT: ... + + @overload + def min( + self, + /, + axis: _ShapeLike | None = None, + out: None = None, + *, + keepdims: builtins.bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> Any: ... + @overload + def min( + self, + /, + axis: _ShapeLike | None, + out: _ArrayT, + *, + keepdims: builtins.bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> _ArrayT: ... + @overload + def min( + self, + /, + axis: _ShapeLike | None = None, + *, + out: _ArrayT, + keepdims: builtins.bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> _ArrayT: ... + + @overload + def sum( + self, + /, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + *, + keepdims: builtins.bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> Any: ... + @overload + def sum( + self, + /, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: _ArrayT, + *, + keepdims: builtins.bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> _ArrayT: ... + @overload + def sum( + self, + /, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, + keepdims: builtins.bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> _ArrayT: ... + + @overload + def prod( + self, + /, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + *, + keepdims: builtins.bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> Any: ... + @overload + def prod( + self, + /, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: _ArrayT, + *, + keepdims: builtins.bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> _ArrayT: ... + @overload + def prod( + self, + /, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, + keepdims: builtins.bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> _ArrayT: ... + + @overload + def mean( + self, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + *, + keepdims: builtins.bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> Any: ... + @overload + def mean( + self, + /, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: _ArrayT, + *, + keepdims: builtins.bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> _ArrayT: ... + @overload + def mean( + self, + /, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, + keepdims: builtins.bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + ) -> _ArrayT: ... + + @overload + def std( + self, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + ddof: float = 0, + *, + keepdims: builtins.bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., + ) -> Any: ... + @overload + def std( + self, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: _ArrayT, + ddof: float = 0, + *, + keepdims: builtins.bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., + ) -> _ArrayT: ... + @overload + def std( + self, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, + ddof: float = 0, + keepdims: builtins.bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., + ) -> _ArrayT: ... + + @overload + def var( + self, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + ddof: float = 0, + *, + keepdims: builtins.bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., + ) -> Any: ... + @overload + def var( + self, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: _ArrayT, + ddof: float = 0, + *, + keepdims: builtins.bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., + ) -> _ArrayT: ... + @overload + def var( + self, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, + ddof: float = 0, + keepdims: builtins.bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + correction: float | _NoValueType = ..., + ) -> _ArrayT: ... + +class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): + __hash__: ClassVar[None] # type: ignore[assignment] # pyright: ignore[reportIncompatibleMethodOverride] + @property + def base(self) -> NDArray[Any] | None: ... + @property + def ndim(self) -> int: ... + @property + def size(self) -> int: ... + @property + def real(self: _HasDTypeWithRealAndImag[_ScalarT, object], /) -> ndarray[_ShapeT_co, dtype[_ScalarT]]: ... + @real.setter + def real(self, value: ArrayLike, /) -> None: ... + @property + def imag(self: _HasDTypeWithRealAndImag[object, _ScalarT], /) -> ndarray[_ShapeT_co, dtype[_ScalarT]]: ... + @imag.setter + def imag(self, value: ArrayLike, /) -> None: ... + + def __new__( + cls, + shape: _ShapeLike, + dtype: DTypeLike | None = ..., + buffer: _SupportsBuffer | None = ..., + offset: SupportsIndex = ..., + strides: _ShapeLike | None = ..., + order: _OrderKACF = ..., + ) -> Self: ... + + if sys.version_info >= (3, 12): + def __buffer__(self, flags: int, /) -> memoryview: ... + + def __class_getitem__(cls, item: Any, /) -> GenericAlias: ... + + @overload + def __array__(self, dtype: None = None, /, *, copy: builtins.bool | None = None) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __array__(self, dtype: _DTypeT, /, *, copy: builtins.bool | None = None) -> ndarray[_ShapeT_co, _DTypeT]: ... + + def __array_ufunc__( + self, + ufunc: ufunc, + method: L["__call__", "reduce", "reduceat", "accumulate", "outer", "at"], + *inputs: Any, + **kwargs: Any, + ) -> Any: ... + + def __array_function__( + self, + func: Callable[..., Any], + types: Iterable[type], + args: Iterable[Any], + kwargs: Mapping[str, Any], + ) -> Any: ... + + # NOTE: In practice any object is accepted by `obj`, but as `__array_finalize__` + # is a pseudo-abstract method the type has been narrowed down in order to + # grant subclasses a bit more flexibility + def __array_finalize__(self, obj: NDArray[Any] | None, /) -> None: ... + + def __array_wrap__( + self, + array: ndarray[_ShapeT, _DTypeT], + context: tuple[ufunc, tuple[Any, ...], int] | None = ..., + return_scalar: builtins.bool = ..., + /, + ) -> ndarray[_ShapeT, _DTypeT]: ... + + # Keep in sync with `MaskedArray.__getitem__` + @overload + def __getitem__(self, key: _ArrayInt_co | tuple[_ArrayInt_co, ...], /) -> ndarray[_AnyShape, _DTypeT_co]: ... + @overload + def __getitem__(self, key: SupportsIndex | tuple[SupportsIndex, ...], /) -> Any: ... + @overload + def __getitem__(self, key: _ToIndices, /) -> ndarray[_AnyShape, _DTypeT_co]: ... + @overload # can be of any shape + def __getitem__(self: NDArray[void], key: str, /) -> ndarray[_ShapeT_co | _AnyShape]: ... + @overload + def __getitem__(self: NDArray[void], key: list[str], /) -> ndarray[_ShapeT_co | _AnyShape, dtype[void]]: ... + + @overload # flexible | object_ | bool + def __setitem__( + self: ndarray[Any, dtype[flexible | object_ | np.bool] | dtypes.StringDType], + key: _ToIndices, + value: object, + /, + ) -> None: ... + @overload # integer + def __setitem__( + self: NDArray[integer], + key: _ToIndices, + value: _ConvertibleToInt | _NestedSequence[_ConvertibleToInt] | _ArrayLikeInt_co, + /, + ) -> None: ... + @overload # floating + def __setitem__( + self: NDArray[floating], + key: _ToIndices, + value: _ConvertibleToFloat | _NestedSequence[_ConvertibleToFloat | None] | _ArrayLikeFloat_co | None, + /, + ) -> None: ... + @overload # complexfloating + def __setitem__( + self: NDArray[complexfloating], + key: _ToIndices, + value: _ConvertibleToComplex | _NestedSequence[_ConvertibleToComplex | None] | _ArrayLikeNumber_co | None, + /, + ) -> None: ... + @overload # timedelta64 + def __setitem__( + self: NDArray[timedelta64], + key: _ToIndices, + value: _ConvertibleToTD64 | _NestedSequence[_ConvertibleToTD64], + /, + ) -> None: ... + @overload # datetime64 + def __setitem__( + self: NDArray[datetime64], + key: _ToIndices, + value: _ConvertibleToDT64 | _NestedSequence[_ConvertibleToDT64], + /, + ) -> None: ... + @overload # void + def __setitem__(self: NDArray[void], key: str | list[str], value: object, /) -> None: ... + @overload # catch-all + def __setitem__(self, key: _ToIndices, value: ArrayLike, /) -> None: ... + + @property + def ctypes(self) -> _ctypes[int]: ... + + # + @property + def shape(self) -> _ShapeT_co: ... + @shape.setter + @deprecated("In-place shape modification will be deprecated in NumPy 2.5.", category=PendingDeprecationWarning) + def shape(self, value: _ShapeLike) -> None: ... + + # + @property + def strides(self) -> _Shape: ... + @strides.setter + @deprecated("Setting the strides on a NumPy array has been deprecated in NumPy 2.4") + def strides(self, value: _ShapeLike) -> None: ... + + # + def byteswap(self, inplace: builtins.bool = ...) -> Self: ... + @property + def flat(self) -> flatiter[Self]: ... + + @overload # use the same output type as that of the underlying `generic` + def item(self: NDArray[generic[_T]], i0: SupportsIndex | tuple[SupportsIndex, ...] = ..., /, *args: SupportsIndex) -> _T: ... + @overload # special casing for `StringDType`, which has no scalar type + def item( + self: ndarray[Any, dtypes.StringDType], + arg0: SupportsIndex | tuple[SupportsIndex, ...] = ..., + /, + *args: SupportsIndex, + ) -> str: ... + + # keep in sync with `ma.MaskedArray.tolist` + @overload # this first overload prevents mypy from over-eagerly selecting `tuple[()]` in case of `_AnyShape` + def tolist(self: ndarray[tuple[Never], dtype[generic[_T]]], /) -> Any: ... + @overload + def tolist(self: ndarray[tuple[()], dtype[generic[_T]]], /) -> _T: ... + @overload + def tolist(self: ndarray[tuple[int], dtype[generic[_T]]], /) -> list[_T]: ... + @overload + def tolist(self: ndarray[tuple[int, int], dtype[generic[_T]]], /) -> list[list[_T]]: ... + @overload + def tolist(self: ndarray[tuple[int, int, int], dtype[generic[_T]]], /) -> list[list[list[_T]]]: ... + @overload + def tolist(self, /) -> Any: ... + + @overload + def resize(self, new_shape: _ShapeLike, /, *, refcheck: builtins.bool = True) -> None: ... + @overload + def resize(self, /, *new_shape: SupportsIndex, refcheck: builtins.bool = True) -> None: ... + + # keep in sync with `ma.MaskedArray.squeeze` + def squeeze( + self, + /, + axis: SupportsIndex | tuple[SupportsIndex, ...] | None = ..., + ) -> ndarray[_AnyShape, _DTypeT_co]: ... + + def swapaxes(self, axis1: SupportsIndex, axis2: SupportsIndex, /) -> Self: ... + + @overload + def transpose(self, axes: _ShapeLike | None, /) -> Self: ... + @overload + def transpose(self, /, *axes: SupportsIndex) -> Self: ... + + @overload + def all( + self, + axis: None = None, + out: None = None, + keepdims: L[False, 0] = False, + *, + where: _ArrayLikeBool_co = True + ) -> np.bool: ... + @overload + def all( + self, + axis: int | tuple[int, ...] | None = None, + out: None = None, + keepdims: SupportsIndex = False, + *, + where: _ArrayLikeBool_co = True, + ) -> np.bool | NDArray[np.bool]: ... + @overload + def all( + self, + axis: int | tuple[int, ...] | None, + out: _ArrayT, + keepdims: SupportsIndex = False, + *, + where: _ArrayLikeBool_co = True, + ) -> _ArrayT: ... + @overload + def all( + self, + axis: int | tuple[int, ...] | None = None, + *, + out: _ArrayT, + keepdims: SupportsIndex = False, + where: _ArrayLikeBool_co = True, + ) -> _ArrayT: ... + + @overload + def any( + self, + axis: None = None, + out: None = None, + keepdims: L[False, 0] = False, + *, + where: _ArrayLikeBool_co = True + ) -> np.bool: ... + @overload + def any( + self, + axis: int | tuple[int, ...] | None = None, + out: None = None, + keepdims: SupportsIndex = False, + *, + where: _ArrayLikeBool_co = True, + ) -> np.bool | NDArray[np.bool]: ... + @overload + def any( + self, + axis: int | tuple[int, ...] | None, + out: _ArrayT, + keepdims: SupportsIndex = False, + *, + where: _ArrayLikeBool_co = True, + ) -> _ArrayT: ... + @overload + def any( + self, + axis: int | tuple[int, ...] | None = None, + *, + out: _ArrayT, + keepdims: SupportsIndex = False, + where: _ArrayLikeBool_co = True, + ) -> _ArrayT: ... + + # + @overload + def partition( + self, + kth: _ArrayLikeInt, + /, + axis: SupportsIndex = -1, + kind: _PartitionKind = "introselect", + order: None = None, + ) -> None: ... + @overload + def partition( + self: NDArray[void], + kth: _ArrayLikeInt, + /, + axis: SupportsIndex = -1, + kind: _PartitionKind = "introselect", + order: str | Sequence[str] | None = None, + ) -> None: ... + + # + @overload + def argpartition( + self, + kth: _ArrayLikeInt, + /, + axis: SupportsIndex | None = -1, + kind: _PartitionKind = "introselect", + order: None = None, + ) -> NDArray[intp]: ... + @overload + def argpartition( + self: NDArray[void], + kth: _ArrayLikeInt, + /, + axis: SupportsIndex | None = -1, + kind: _PartitionKind = "introselect", + order: str | Sequence[str] | None = None, + ) -> NDArray[intp]: ... + + # keep in sync with `ma.MaskedArray.diagonal` + def diagonal( + self, + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, + ) -> ndarray[_AnyShape, _DTypeT_co]: ... + + # 1D + 1D returns a scalar; + # all other with at least 1 non-0D array return an ndarray. + @overload + def dot(self, b: _ScalarLike_co, /, out: None = None) -> NDArray[Any]: ... + @overload + def dot(self, b: ArrayLike, /, out: None = None) -> Any: ... + @overload + def dot(self, b: ArrayLike, /, out: _ArrayT) -> _ArrayT: ... + + # `nonzero()` raises for 0d arrays/generics + def nonzero(self) -> tuple[ndarray[tuple[int], np.dtype[intp]], ...]: ... + + @overload + def searchsorted( + self, # >= 1D array + v: _ScalarLike_co, # 0D array-like + /, + side: _SortSide = "left", + sorter: _ArrayLikeInt_co | None = None, + ) -> intp: ... + @overload + def searchsorted( + self, # >= 1D array + v: ArrayLike, + /, + side: _SortSide = "left", + sorter: _ArrayLikeInt_co | None = None, + ) -> NDArray[intp]: ... + + def sort( + self, + /, + axis: SupportsIndex = -1, + kind: _SortKind | None = None, + order: str | Sequence[str] | None = None, + *, + stable: builtins.bool | None = None, + ) -> None: ... + + # Keep in sync with `MaskedArray.trace` + @overload + def trace( + self, # >= 2D array + /, + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, + dtype: DTypeLike | None = None, + out: None = None, + ) -> Any: ... + @overload + def trace( + self, # >= 2D array + /, + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, + ) -> _ArrayT: ... + @overload + def trace( + self, # >= 2D array + /, + offset: SupportsIndex, + axis1: SupportsIndex, + axis2: SupportsIndex, + dtype: DTypeLike | None, + out: _ArrayT, + ) -> _ArrayT: ... + + @overload + def take( + self: NDArray[_ScalarT], + indices: _IntLike_co, + /, + axis: SupportsIndex | None = ..., + out: None = None, + mode: _ModeKind = ..., + ) -> _ScalarT: ... + @overload + def take( + self, + indices: _ArrayLikeInt_co, + /, + axis: SupportsIndex | None = ..., + out: None = None, + mode: _ModeKind = ..., + ) -> ndarray[_AnyShape, _DTypeT_co]: ... + @overload + def take( + self, + indices: _ArrayLikeInt_co, + /, + axis: SupportsIndex | None = ..., + *, + out: _ArrayT, + mode: _ModeKind = ..., + ) -> _ArrayT: ... + @overload + def take( + self, + indices: _ArrayLikeInt_co, + /, + axis: SupportsIndex | None, + out: _ArrayT, + mode: _ModeKind = ..., + ) -> _ArrayT: ... + + # keep in sync with `ma.MaskedArray.repeat` + @overload + def repeat(self, repeats: _ArrayLikeInt_co, /, axis: None = None) -> ndarray[tuple[int], _DTypeT_co]: ... + @overload + def repeat(self, repeats: _ArrayLikeInt_co, /, axis: SupportsIndex) -> ndarray[_AnyShape, _DTypeT_co]: ... + + # keep in sync with `ma.MaskedArray.flatten` and `ma.MaskedArray.ravel` + def flatten(self, /, order: _OrderKACF = "C") -> ndarray[tuple[int], _DTypeT_co]: ... + def ravel(self, /, order: _OrderKACF = "C") -> ndarray[tuple[int], _DTypeT_co]: ... + + # Keep in sync with `MaskedArray.reshape` + # NOTE: reshape also accepts negative integers, so we can't use integer literals + @overload # (None) + def reshape(self, shape: None, /, *, order: _OrderACF = "C", copy: builtins.bool | None = None) -> Self: ... + @overload # (empty_sequence) + def reshape( # type: ignore[overload-overlap] # mypy false positive + self, + shape: Sequence[Never], + /, + *, + order: _OrderACF = "C", + copy: builtins.bool | None = None, + ) -> ndarray[tuple[()], _DTypeT_co]: ... + @overload # (() | (int) | (int, int) | ....) # up to 8-d + def reshape( + self, + shape: _AnyShapeT, + /, + *, + order: _OrderACF = "C", + copy: builtins.bool | None = None, + ) -> ndarray[_AnyShapeT, _DTypeT_co]: ... + @overload # (index) + def reshape( + self, + size1: SupportsIndex, + /, + *, + order: _OrderACF = "C", + copy: builtins.bool | None = None, + ) -> ndarray[tuple[int], _DTypeT_co]: ... + @overload # (index, index) + def reshape( + self, + size1: SupportsIndex, + size2: SupportsIndex, + /, + *, + order: _OrderACF = "C", + copy: builtins.bool | None = None, + ) -> ndarray[tuple[int, int], _DTypeT_co]: ... + @overload # (index, index, index) + def reshape( + self, + size1: SupportsIndex, + size2: SupportsIndex, + size3: SupportsIndex, + /, + *, + order: _OrderACF = "C", + copy: builtins.bool | None = None, + ) -> ndarray[tuple[int, int, int], _DTypeT_co]: ... + @overload # (index, index, index, index) + def reshape( + self, + size1: SupportsIndex, + size2: SupportsIndex, + size3: SupportsIndex, + size4: SupportsIndex, + /, + *, + order: _OrderACF = "C", + copy: builtins.bool | None = None, + ) -> ndarray[tuple[int, int, int, int], _DTypeT_co]: ... + @overload # (int, *(index, ...)) + def reshape( + self, + size0: SupportsIndex, + /, + *shape: SupportsIndex, + order: _OrderACF = "C", + copy: builtins.bool | None = None, + ) -> ndarray[_AnyShape, _DTypeT_co]: ... + @overload # (sequence[index]) + def reshape( + self, + shape: Sequence[SupportsIndex], + /, + *, + order: _OrderACF = "C", + copy: builtins.bool | None = None, + ) -> ndarray[_AnyShape, _DTypeT_co]: ... + + @overload + def astype( + self, + dtype: _DTypeLike[_ScalarT], + order: _OrderKACF = ..., + casting: _CastingKind = ..., + subok: builtins.bool = ..., + copy: builtins.bool | _CopyMode = ..., + ) -> ndarray[_ShapeT_co, dtype[_ScalarT]]: ... + @overload + def astype( + self, + dtype: DTypeLike | None, + order: _OrderKACF = ..., + casting: _CastingKind = ..., + subok: builtins.bool = ..., + copy: builtins.bool | _CopyMode = ..., + ) -> ndarray[_ShapeT_co, dtype]: ... + + # + @overload # () + def view(self, /) -> Self: ... + @overload # (dtype: T) + def view(self, /, dtype: _DTypeT | _HasDType[_DTypeT]) -> ndarray[_ShapeT_co, _DTypeT]: ... + @overload # (dtype: dtype[T]) + def view(self, /, dtype: _DTypeLike[_ScalarT]) -> ndarray[_ShapeT_co, dtype[_ScalarT]]: ... + @overload # (type: T) + def view(self, /, *, type: type[_ArrayT]) -> _ArrayT: ... + @overload # (_: T) + def view(self, /, dtype: type[_ArrayT]) -> _ArrayT: ... + @overload # (dtype: ?) + def view(self, /, dtype: DTypeLike) -> ndarray[_ShapeT_co, dtype]: ... + @overload # (dtype: ?, type: T) + def view(self, /, dtype: DTypeLike, type: type[_ArrayT]) -> _ArrayT: ... + + def setfield(self, val: ArrayLike, /, dtype: DTypeLike, offset: SupportsIndex = 0) -> None: ... + @overload + def getfield(self, /, dtype: _DTypeLike[_ScalarT], offset: SupportsIndex = 0) -> NDArray[_ScalarT]: ... + @overload + def getfield(self, /, dtype: DTypeLike, offset: SupportsIndex = 0) -> NDArray[Any]: ... + + def __index__(self: NDArray[integer], /) -> int: ... + def __complex__(self: NDArray[number | np.bool | object_], /) -> complex: ... + + def __len__(self) -> int: ... + def __contains__(self, value: object, /) -> builtins.bool: ... + + # NOTE: This weird `Never` tuple works around a strange mypy issue where it assigns + # `tuple[int]` to `tuple[Never]` or `tuple[int, int]` to `tuple[Never, Never]`. + # This way the bug only occurs for 9-D arrays, which are probably not very common. + @overload + def __iter__( + self: ndarray[tuple[Never, Never, Never, Never, Never, Never, Never, Never, Never], Any], / + ) -> Iterator[Any]: ... + @overload # == 1-d & dtype[T \ object_] + def __iter__(self: ndarray[tuple[int], dtype[_NonObjectScalarT]], /) -> Iterator[_NonObjectScalarT]: ... + @overload # == 1-d & StringDType + def __iter__(self: ndarray[tuple[int], dtypes.StringDType], /) -> Iterator[str]: ... + @overload # >= 2-d + def __iter__(self: ndarray[tuple[int, int, *tuple[int, ...]], _DTypeT], /) -> Iterator[ndarray[_AnyShape, _DTypeT]]: ... + @overload # ?-d + def __iter__(self, /) -> Iterator[Any]: ... + + # + @overload + def __lt__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co, /) -> NDArray[np.bool]: ... + @overload + def __lt__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[np.bool]: ... + @overload + def __lt__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[np.bool]: ... + @overload + def __lt__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[np.bool]: ... + @overload + def __lt__( + self: ndarray[Any, dtype[str_] | dtypes.StringDType], other: _ArrayLikeStr_co | _ArrayLikeString_co, / + ) -> NDArray[np.bool]: ... + @overload + def __lt__(self: NDArray[object_], other: object, /) -> NDArray[np.bool]: ... + @overload + def __lt__(self, other: _ArrayLikeObject_co, /) -> NDArray[np.bool]: ... + + # + @overload + def __le__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co, /) -> NDArray[np.bool]: ... + @overload + def __le__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[np.bool]: ... + @overload + def __le__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[np.bool]: ... + @overload + def __le__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[np.bool]: ... + @overload + def __le__( + self: ndarray[Any, dtype[str_] | dtypes.StringDType], other: _ArrayLikeStr_co | _ArrayLikeString_co, / + ) -> NDArray[np.bool]: ... + @overload + def __le__(self: NDArray[object_], other: object, /) -> NDArray[np.bool]: ... + @overload + def __le__(self, other: _ArrayLikeObject_co, /) -> NDArray[np.bool]: ... + + # + @overload + def __gt__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co, /) -> NDArray[np.bool]: ... + @overload + def __gt__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[np.bool]: ... + @overload + def __gt__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[np.bool]: ... + @overload + def __gt__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[np.bool]: ... + @overload + def __gt__( + self: ndarray[Any, dtype[str_] | dtypes.StringDType], other: _ArrayLikeStr_co | _ArrayLikeString_co, / + ) -> NDArray[np.bool]: ... + @overload + def __gt__(self: NDArray[object_], other: object, /) -> NDArray[np.bool]: ... + @overload + def __gt__(self, other: _ArrayLikeObject_co, /) -> NDArray[np.bool]: ... + + # + @overload + def __ge__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co, /) -> NDArray[np.bool]: ... + @overload + def __ge__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[np.bool]: ... + @overload + def __ge__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[np.bool]: ... + @overload + def __ge__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[np.bool]: ... + @overload + def __ge__( + self: ndarray[Any, dtype[str_] | dtypes.StringDType], other: _ArrayLikeStr_co | _ArrayLikeString_co, / + ) -> NDArray[np.bool]: ... + @overload + def __ge__(self: NDArray[object_], other: object, /) -> NDArray[np.bool]: ... + @overload + def __ge__(self, other: _ArrayLikeObject_co, /) -> NDArray[np.bool]: ... + + # Unary ops + + # TODO: Uncomment once https://github.com/python/mypy/issues/14070 is fixed + # @overload + # def __abs__(self: ndarray[_ShapeT, dtypes.Complex64DType], /) -> ndarray[_ShapeT, dtypes.Float32DType]: ... + # @overload + # def __abs__(self: ndarray[_ShapeT, dtypes.Complex128DType], /) -> ndarray[_ShapeT, dtypes.Float64DType]: ... + # @overload + # def __abs__(self: ndarray[_ShapeT, dtypes.CLongDoubleDType], /) -> ndarray[_ShapeT, dtypes.LongDoubleDType]: ... + # @overload + # def __abs__(self: ndarray[_ShapeT, dtype[complex128]], /) -> ndarray[_ShapeT, dtype[float64]]: ... + @overload + def __abs__(self: ndarray[_ShapeT, dtype[complexfloating[_NBit]]], /) -> ndarray[_ShapeT, dtype[floating[_NBit]]]: ... + @overload + def __abs__(self: _RealArrayT, /) -> _RealArrayT: ... + + def __invert__(self: _IntegralArrayT, /) -> _IntegralArrayT: ... # noqa: PYI019 + def __neg__(self: _NumericArrayT, /) -> _NumericArrayT: ... # noqa: PYI019 + def __pos__(self: _NumericArrayT, /) -> _NumericArrayT: ... # noqa: PYI019 + + # Binary ops + + # TODO: Support the "1d @ 1d -> scalar" case + @overload + def __matmul__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... + @overload + def __matmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[overload-overlap] + @overload + def __matmul__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __matmul__(self: NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + @overload + def __matmul__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... + @overload + def __matmul__(self: NDArray[complexfloating[_64Bit]], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... + @overload + def __matmul__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... + @overload + def __matmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __matmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __matmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __matmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... + @overload + def __matmul__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... + @overload + def __matmul__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __matmul__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + @overload # signature equivalent to __matmul__ + def __rmatmul__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... + @overload + def __rmatmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[overload-overlap] + @overload + def __rmatmul__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __rmatmul__(self: NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + @overload + def __rmatmul__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... + @overload + def __rmatmul__(self: NDArray[complexfloating[_64Bit]], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... + @overload + def __rmatmul__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... + @overload + def __rmatmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rmatmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rmatmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __rmatmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... + @overload + def __rmatmul__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... + @overload + def __rmatmul__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __rmatmul__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + @overload + def __mod__(self: NDArray[_RealNumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_RealNumberT]]: ... + @overload + def __mod__(self: NDArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] + @overload + def __mod__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[overload-overlap] + @overload + def __mod__(self: NDArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] + @overload + def __mod__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + @overload + def __mod__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... + @overload + def __mod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __mod__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __mod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... + @overload + def __mod__(self: NDArray[timedelta64], other: _ArrayLike[timedelta64], /) -> NDArray[timedelta64]: ... + @overload + def __mod__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __mod__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + @overload # signature equivalent to __mod__ + def __rmod__(self: NDArray[_RealNumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_RealNumberT]]: ... + @overload + def __rmod__(self: NDArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] + @overload + def __rmod__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[overload-overlap] + @overload + def __rmod__(self: NDArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] + @overload + def __rmod__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + @overload + def __rmod__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... + @overload + def __rmod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rmod__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rmod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... + @overload + def __rmod__(self: NDArray[timedelta64], other: _ArrayLike[timedelta64], /) -> NDArray[timedelta64]: ... + @overload + def __rmod__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __rmod__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + @overload + def __divmod__(self: NDArray[_RealNumberT], rhs: int | np.bool, /) -> _2Tuple[ndarray[_ShapeT_co, dtype[_RealNumberT]]]: ... + @overload + def __divmod__(self: NDArray[_RealNumberT], rhs: _ArrayLikeBool_co, /) -> _2Tuple[NDArray[_RealNumberT]]: ... # type: ignore[overload-overlap] + @overload + def __divmod__(self: NDArray[np.bool], rhs: _ArrayLikeBool_co, /) -> _2Tuple[NDArray[int8]]: ... # type: ignore[overload-overlap] + @overload + def __divmod__(self: NDArray[np.bool], rhs: _ArrayLike[_RealNumberT], /) -> _2Tuple[NDArray[_RealNumberT]]: ... # type: ignore[overload-overlap] + @overload + def __divmod__(self: NDArray[float64], rhs: _ArrayLikeFloat64_co, /) -> _2Tuple[NDArray[float64]]: ... + @overload + def __divmod__(self: _ArrayFloat64_co, rhs: _ArrayLike[floating[_64Bit]], /) -> _2Tuple[NDArray[float64]]: ... + @overload + def __divmod__(self: _ArrayUInt_co, rhs: _ArrayLikeUInt_co, /) -> _2Tuple[NDArray[unsignedinteger]]: ... # type: ignore[overload-overlap] + @overload + def __divmod__(self: _ArrayInt_co, rhs: _ArrayLikeInt_co, /) -> _2Tuple[NDArray[signedinteger]]: ... # type: ignore[overload-overlap] + @overload + def __divmod__(self: _ArrayFloat_co, rhs: _ArrayLikeFloat_co, /) -> _2Tuple[NDArray[floating]]: ... + @overload + def __divmod__(self: NDArray[timedelta64], rhs: _ArrayLike[timedelta64], /) -> tuple[NDArray[int64], NDArray[timedelta64]]: ... + + @overload # signature equivalent to __divmod__ + def __rdivmod__(self: NDArray[_RealNumberT], lhs: int | np.bool, /) -> _2Tuple[ndarray[_ShapeT_co, dtype[_RealNumberT]]]: ... + @overload + def __rdivmod__(self: NDArray[_RealNumberT], lhs: _ArrayLikeBool_co, /) -> _2Tuple[NDArray[_RealNumberT]]: ... # type: ignore[overload-overlap] + @overload + def __rdivmod__(self: NDArray[np.bool], lhs: _ArrayLikeBool_co, /) -> _2Tuple[NDArray[int8]]: ... # type: ignore[overload-overlap] + @overload + def __rdivmod__(self: NDArray[np.bool], lhs: _ArrayLike[_RealNumberT], /) -> _2Tuple[NDArray[_RealNumberT]]: ... # type: ignore[overload-overlap] + @overload + def __rdivmod__(self: NDArray[float64], lhs: _ArrayLikeFloat64_co, /) -> _2Tuple[NDArray[float64]]: ... + @overload + def __rdivmod__(self: _ArrayFloat64_co, lhs: _ArrayLike[floating[_64Bit]], /) -> _2Tuple[NDArray[float64]]: ... + @overload + def __rdivmod__(self: _ArrayUInt_co, lhs: _ArrayLikeUInt_co, /) -> _2Tuple[NDArray[unsignedinteger]]: ... # type: ignore[overload-overlap] + @overload + def __rdivmod__(self: _ArrayInt_co, lhs: _ArrayLikeInt_co, /) -> _2Tuple[NDArray[signedinteger]]: ... # type: ignore[overload-overlap] + @overload + def __rdivmod__(self: _ArrayFloat_co, lhs: _ArrayLikeFloat_co, /) -> _2Tuple[NDArray[floating]]: ... + @overload + def __rdivmod__(self: NDArray[timedelta64], lhs: _ArrayLike[timedelta64], /) -> tuple[NDArray[int64], NDArray[timedelta64]]: ... + + # Keep in sync with `MaskedArray.__add__` + @overload + def __add__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __add__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + @overload + def __add__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... + @overload + def __add__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... + @overload + def __add__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... + @overload + def __add__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... + @overload + def __add__(self: _ArrayTD64_co, other: _ArrayLikeDT64_co, /) -> NDArray[datetime64]: ... + @overload + def __add__(self: NDArray[datetime64], other: _ArrayLikeTD64_co, /) -> NDArray[datetime64]: ... + @overload + def __add__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[bytes_]: ... + @overload + def __add__(self: NDArray[str_], other: _ArrayLikeStr_co, /) -> NDArray[str_]: ... + @overload + def __add__( + self: ndarray[Any, dtypes.StringDType], + other: _ArrayLikeStr_co | _ArrayLikeString_co, + /, + ) -> ndarray[tuple[Any, ...], dtypes.StringDType]: ... + @overload + def __add__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __add__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `MaskedArray.__radd__` + @overload # signature equivalent to __add__ + def __radd__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __radd__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + @overload + def __radd__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... + @overload + def __radd__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... + @overload + def __radd__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... + @overload + def __radd__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... + @overload + def __radd__(self: _ArrayTD64_co, other: _ArrayLikeDT64_co, /) -> NDArray[datetime64]: ... + @overload + def __radd__(self: NDArray[datetime64], other: _ArrayLikeTD64_co, /) -> NDArray[datetime64]: ... + @overload + def __radd__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[bytes_]: ... + @overload + def __radd__(self: NDArray[str_], other: _ArrayLikeStr_co, /) -> NDArray[str_]: ... + @overload + def __radd__( + self: ndarray[Any, dtypes.StringDType], + other: _ArrayLikeStr_co | _ArrayLikeString_co, + /, + ) -> ndarray[tuple[Any, ...], dtypes.StringDType]: ... + @overload + def __radd__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __radd__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `MaskedArray.__sub__` + @overload + def __sub__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __sub__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __sub__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NoReturn: ... + @overload + def __sub__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __sub__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + @overload + def __sub__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... + @overload + def __sub__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... + @overload + def __sub__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... + @overload + def __sub__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __sub__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __sub__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __sub__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... # type: ignore[overload-overlap] + @overload + def __sub__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... # type: ignore[overload-overlap] + @overload + def __sub__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... + @overload + def __sub__(self: NDArray[datetime64], other: _ArrayLikeTD64_co, /) -> NDArray[datetime64]: ... + @overload + def __sub__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[timedelta64]: ... + @overload + def __sub__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __sub__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `MaskedArray.__rsub__` + @overload + def __rsub__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __rsub__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __rsub__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NoReturn: ... + @overload + def __rsub__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __rsub__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + @overload + def __rsub__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... + @overload + def __rsub__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... + @overload + def __rsub__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... + @overload + def __rsub__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rsub__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rsub__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __rsub__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... # type: ignore[overload-overlap] + @overload + def __rsub__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... # type: ignore[overload-overlap] + @overload + def __rsub__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... + @overload + def __rsub__(self: _ArrayTD64_co, other: _ArrayLikeDT64_co, /) -> NDArray[datetime64]: ... + @overload + def __rsub__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[timedelta64]: ... + @overload + def __rsub__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __rsub__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `MaskedArray.__mul__` + @overload + def __mul__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __mul__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __mul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[overload-overlap] + @overload + def __mul__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __mul__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + @overload + def __mul__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... + @overload + def __mul__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... + @overload + def __mul__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... + @overload + def __mul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __mul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __mul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __mul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... + @overload + def __mul__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... + @overload + def __mul__(self: NDArray[timedelta64], other: _ArrayLikeFloat_co, /) -> NDArray[timedelta64]: ... + @overload + def __mul__(self: _ArrayFloat_co, other: _ArrayLike[timedelta64], /) -> NDArray[timedelta64]: ... + @overload + def __mul__( + self: ndarray[Any, dtype[character] | dtypes.StringDType], + other: _ArrayLikeInt, + /, + ) -> ndarray[tuple[Any, ...], _DTypeT_co]: ... + @overload + def __mul__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __mul__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `MaskedArray.__rmul__` + @overload # signature equivalent to __mul__ + def __rmul__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __rmul__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __rmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[overload-overlap] + @overload + def __rmul__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __rmul__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + @overload + def __rmul__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... + @overload + def __rmul__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... + @overload + def __rmul__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... + @overload + def __rmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __rmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... + @overload + def __rmul__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... + @overload + def __rmul__(self: NDArray[timedelta64], other: _ArrayLikeFloat_co, /) -> NDArray[timedelta64]: ... + @overload + def __rmul__(self: _ArrayFloat_co, other: _ArrayLike[timedelta64], /) -> NDArray[timedelta64]: ... + @overload + def __rmul__( + self: ndarray[Any, dtype[character] | dtypes.StringDType], + other: _ArrayLikeInt, + /, + ) -> ndarray[tuple[Any, ...], _DTypeT_co]: ... + @overload + def __rmul__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __rmul__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `MaskedArray.__truediv__` + @overload + def __truediv__(self: _ArrayInt_co | NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + @overload + def __truediv__(self: _ArrayFloat64_co, other: _ArrayLikeInt_co | _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... + @overload + def __truediv__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... + @overload + def __truediv__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... + @overload + def __truediv__(self: NDArray[floating], other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... + @overload + def __truediv__(self: _ArrayFloat_co, other: _ArrayLike[floating], /) -> NDArray[floating]: ... + @overload + def __truediv__(self: NDArray[complexfloating], other: _ArrayLikeNumber_co, /) -> NDArray[complexfloating]: ... + @overload + def __truediv__(self: _ArrayNumber_co, other: _ArrayLike[complexfloating], /) -> NDArray[complexfloating]: ... + @overload + def __truediv__(self: NDArray[inexact], other: _ArrayLikeNumber_co, /) -> NDArray[inexact]: ... + @overload + def __truediv__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... + @overload + def __truediv__(self: NDArray[timedelta64], other: _ArrayLike[timedelta64], /) -> NDArray[float64]: ... + @overload + def __truediv__(self: NDArray[timedelta64], other: _ArrayLikeBool_co, /) -> NoReturn: ... + @overload + def __truediv__(self: NDArray[timedelta64], other: _ArrayLikeFloat_co, /) -> NDArray[timedelta64]: ... + @overload + def __truediv__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __truediv__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `MaskedArray.__rtruediv__` + @overload + def __rtruediv__(self: _ArrayInt_co | NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + @overload + def __rtruediv__(self: _ArrayFloat64_co, other: _ArrayLikeInt_co | _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... + @overload + def __rtruediv__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... + @overload + def __rtruediv__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... + @overload + def __rtruediv__(self: NDArray[floating], other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... + @overload + def __rtruediv__(self: _ArrayFloat_co, other: _ArrayLike[floating], /) -> NDArray[floating]: ... + @overload + def __rtruediv__(self: NDArray[complexfloating], other: _ArrayLikeNumber_co, /) -> NDArray[complexfloating]: ... + @overload + def __rtruediv__(self: _ArrayNumber_co, other: _ArrayLike[complexfloating], /) -> NDArray[complexfloating]: ... + @overload + def __rtruediv__(self: NDArray[inexact], other: _ArrayLikeNumber_co, /) -> NDArray[inexact]: ... + @overload + def __rtruediv__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... + @overload + def __rtruediv__(self: NDArray[timedelta64], other: _ArrayLike[timedelta64], /) -> NDArray[float64]: ... + @overload + def __rtruediv__(self: NDArray[integer | floating], other: _ArrayLike[timedelta64], /) -> NDArray[timedelta64]: ... + @overload + def __rtruediv__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __rtruediv__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `MaskedArray.__floordiv__` + @overload + def __floordiv__(self: NDArray[_RealNumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_RealNumberT]]: ... + @overload + def __floordiv__(self: NDArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] + @overload + def __floordiv__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[overload-overlap] + @overload + def __floordiv__(self: NDArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] + @overload + def __floordiv__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + @overload + def __floordiv__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... + @overload + def __floordiv__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __floordiv__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __floordiv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... + @overload + def __floordiv__(self: NDArray[timedelta64], other: _ArrayLike[timedelta64], /) -> NDArray[int64]: ... + @overload + def __floordiv__(self: NDArray[timedelta64], other: _ArrayLikeBool_co, /) -> NoReturn: ... + @overload + def __floordiv__(self: NDArray[timedelta64], other: _ArrayLikeFloat_co, /) -> NDArray[timedelta64]: ... + @overload + def __floordiv__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __floordiv__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `MaskedArray.__rfloordiv__` + @overload + def __rfloordiv__(self: NDArray[_RealNumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_RealNumberT]]: ... + @overload + def __rfloordiv__(self: NDArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] + @overload + def __rfloordiv__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[overload-overlap] + @overload + def __rfloordiv__(self: NDArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] + @overload + def __rfloordiv__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + @overload + def __rfloordiv__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... + @overload + def __rfloordiv__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rfloordiv__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rfloordiv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... + @overload + def __rfloordiv__(self: NDArray[timedelta64], other: _ArrayLike[timedelta64], /) -> NDArray[int64]: ... + @overload + def __rfloordiv__(self: NDArray[floating | integer], other: _ArrayLike[timedelta64], /) -> NDArray[timedelta64]: ... + @overload + def __rfloordiv__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __rfloordiv__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `MaskedArray.__pow__` + @overload + def __pow__(self: NDArray[_NumberT], other: int | np.bool, mod: None = None, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __pow__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __pow__(self: NDArray[np.bool], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[int8]: ... # type: ignore[overload-overlap] + @overload + def __pow__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], mod: None = None, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __pow__(self: NDArray[float64], other: _ArrayLikeFloat64_co, mod: None = None, /) -> NDArray[float64]: ... + @overload + def __pow__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], mod: None = None, /) -> NDArray[float64]: ... + @overload + def __pow__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, mod: None = None, /) -> NDArray[complex128]: ... + @overload + def __pow__( + self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], mod: None = None, / + ) -> NDArray[complex128]: ... + @overload + def __pow__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, mod: None = None, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __pow__(self: _ArrayInt_co, other: _ArrayLikeInt_co, mod: None = None, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __pow__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, mod: None = None, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __pow__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, mod: None = None, /) -> NDArray[complexfloating]: ... + @overload + def __pow__(self: NDArray[number], other: _ArrayLikeNumber_co, mod: None = None, /) -> NDArray[number]: ... + @overload + def __pow__(self: NDArray[object_], other: Any, mod: None = None, /) -> Any: ... + @overload + def __pow__(self: NDArray[Any], other: _ArrayLikeObject_co, mod: None = None, /) -> Any: ... + + # Keep in sync with `MaskedArray.__rpow__` + @overload + def __rpow__(self: NDArray[_NumberT], other: int | np.bool, mod: None = None, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __rpow__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __rpow__(self: NDArray[np.bool], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[int8]: ... # type: ignore[overload-overlap] + @overload + def __rpow__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], mod: None = None, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __rpow__(self: NDArray[float64], other: _ArrayLikeFloat64_co, mod: None = None, /) -> NDArray[float64]: ... + @overload + def __rpow__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], mod: None = None, /) -> NDArray[float64]: ... + @overload + def __rpow__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, mod: None = None, /) -> NDArray[complex128]: ... + @overload + def __rpow__( + self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], mod: None = None, / + ) -> NDArray[complex128]: ... + @overload + def __rpow__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, mod: None = None, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rpow__(self: _ArrayInt_co, other: _ArrayLikeInt_co, mod: None = None, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rpow__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, mod: None = None, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __rpow__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, mod: None = None, /) -> NDArray[complexfloating]: ... + @overload + def __rpow__(self: NDArray[number], other: _ArrayLikeNumber_co, mod: None = None, /) -> NDArray[number]: ... + @overload + def __rpow__(self: NDArray[object_], other: Any, mod: None = None, /) -> Any: ... + @overload + def __rpow__(self: NDArray[Any], other: _ArrayLikeObject_co, mod: None = None, /) -> Any: ... + + @overload + def __lshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... + @overload + def __lshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __lshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... + @overload + def __lshift__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __lshift__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + @overload + def __rlshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... + @overload + def __rlshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rlshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... + @overload + def __rlshift__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __rlshift__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + @overload + def __rshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... + @overload + def __rshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... + @overload + def __rshift__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __rshift__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + @overload + def __rrshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... + @overload + def __rrshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rrshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... + @overload + def __rrshift__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __rrshift__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + @overload + def __and__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... + @overload + def __and__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __and__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... + @overload + def __and__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __and__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + @overload + def __rand__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... + @overload + def __rand__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rand__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... + @overload + def __rand__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __rand__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + @overload + def __xor__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... + @overload + def __xor__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __xor__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... + @overload + def __xor__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __xor__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + @overload + def __rxor__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... + @overload + def __rxor__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rxor__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... + @overload + def __rxor__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __rxor__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + @overload + def __or__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... + @overload + def __or__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __or__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... + @overload + def __or__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __or__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + @overload + def __ror__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... + @overload + def __ror__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __ror__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... + @overload + def __ror__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __ror__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # `np.generic` does not support inplace operations + + # NOTE: Inplace ops generally use "same_kind" casting w.r.t. to the left + # operand. An exception to this rule are unsigned integers though, which + # also accepts a signed integer for the right operand as long it is a 0D + # object and its value is >= 0 + # NOTE: Due to a mypy bug, overloading on e.g. `self: NDArray[SCT_floating]` won't + # work, as this will lead to `false negatives` when using these inplace ops. + + # += + @overload # type: ignore[misc] + def __iadd__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ... + @overload + def __iadd__(self: _ComplexFloatingArrayT, other: _ArrayLikeComplex_co, /) -> _ComplexFloatingArrayT: ... + @overload + def __iadd__(self: _InexactArrayT, other: _ArrayLikeFloat_co, /) -> _InexactArrayT: ... + @overload + def __iadd__(self: _NumberArrayT, other: _ArrayLikeInt_co, /) -> _NumberArrayT: ... + @overload + def __iadd__(self: _TimeArrayT, other: _ArrayLikeTD64_co, /) -> _TimeArrayT: ... + @overload + def __iadd__(self: _BytesArrayT, other: _ArrayLikeBytes_co, /) -> _BytesArrayT: ... + @overload + def __iadd__(self: _StringArrayT, other: _ArrayLikeStr_co | _ArrayLikeString_co, /) -> _StringArrayT: ... + @overload + def __iadd__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + + # -= + @overload # type: ignore[misc] + def __isub__(self: _ComplexFloatingArrayT, other: _ArrayLikeComplex_co, /) -> _ComplexFloatingArrayT: ... + @overload + def __isub__(self: _InexactArrayT, other: _ArrayLikeFloat_co, /) -> _InexactArrayT: ... + @overload + def __isub__(self: _NumberArrayT, other: _ArrayLikeInt_co, /) -> _NumberArrayT: ... + @overload + def __isub__(self: _TimeArrayT, other: _ArrayLikeTD64_co, /) -> _TimeArrayT: ... + @overload + def __isub__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + + # *= + @overload # type: ignore[misc] + def __imul__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ... + @overload + def __imul__(self: _ComplexFloatingArrayT, other: _ArrayLikeComplex_co, /) -> _ComplexFloatingArrayT: ... + @overload + def __imul__(self: _InexactTimedeltaArrayT, other: _ArrayLikeFloat_co, /) -> _InexactTimedeltaArrayT: ... + @overload + def __imul__(self: _NumberCharacterArrayT, other: _ArrayLikeInt_co, /) -> _NumberCharacterArrayT: ... + @overload + def __imul__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + + # @= + @overload # type: ignore[misc] + def __imatmul__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ... + @overload + def __imatmul__(self: _ComplexFloatingArrayT, other: _ArrayLikeComplex_co, /) -> _ComplexFloatingArrayT: ... + @overload + def __imatmul__(self: _InexactArrayT, other: _ArrayLikeFloat_co, /) -> _InexactArrayT: ... + @overload + def __imatmul__(self: _NumberArrayT, other: _ArrayLikeInt_co, /) -> _NumberArrayT: ... + @overload + def __imatmul__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + + # **= + @overload # type: ignore[misc] + def __ipow__(self: _ComplexFloatingArrayT, other: _ArrayLikeComplex_co, /) -> _ComplexFloatingArrayT: ... + @overload + def __ipow__(self: _InexactArrayT, other: _ArrayLikeFloat_co, /) -> _InexactArrayT: ... + @overload + def __ipow__(self: _NumberArrayT, other: _ArrayLikeInt_co, /) -> _NumberArrayT: ... + @overload + def __ipow__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + + # /= + @overload # type: ignore[misc] + def __itruediv__(self: _ComplexFloatingArrayT, other: _ArrayLikeComplex_co, /) -> _ComplexFloatingArrayT: ... + @overload + def __itruediv__(self: _InexactTimedeltaArrayT, other: _ArrayLikeFloat_co, /) -> _InexactTimedeltaArrayT: ... + @overload + def __itruediv__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + + # //= + # keep in sync with `__imod__` + @overload # type: ignore[misc] + def __ifloordiv__(self: _IntegerArrayT, other: _ArrayLikeInt_co, /) -> _IntegerArrayT: ... + @overload + def __ifloordiv__(self: _FloatingTimedeltaArrayT, other: _ArrayLikeFloat_co, /) -> _FloatingTimedeltaArrayT: ... + @overload + def __ifloordiv__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + + # %= + # keep in sync with `__ifloordiv__` + @overload # type: ignore[misc] + def __imod__(self: _IntegerArrayT, other: _ArrayLikeInt_co, /) -> _IntegerArrayT: ... + @overload + def __imod__(self: _FloatingArrayT, other: _ArrayLikeFloat_co, /) -> _FloatingArrayT: ... + @overload + def __imod__(self: _TimedeltaArrayT, other: _ArrayLike[timedelta64], /) -> _TimedeltaArrayT: ... + @overload + def __imod__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + + # <<= + # keep in sync with `__irshift__` + @overload # type: ignore[misc] + def __ilshift__(self: _IntegerArrayT, other: _ArrayLikeInt_co, /) -> _IntegerArrayT: ... + @overload + def __ilshift__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + + # >>= + # keep in sync with `__ilshift__` + @overload # type: ignore[misc] + def __irshift__(self: _IntegerArrayT, other: _ArrayLikeInt_co, /) -> _IntegerArrayT: ... + @overload + def __irshift__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + + # &= + # keep in sync with `__ixor__` and `__ior__` + @overload # type: ignore[misc] + def __iand__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ... + @overload + def __iand__(self: _IntegerArrayT, other: _ArrayLikeInt_co, /) -> _IntegerArrayT: ... + @overload + def __iand__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + + # ^= + # keep in sync with `__iand__` and `__ior__` + @overload # type: ignore[misc] + def __ixor__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ... + @overload + def __ixor__(self: _IntegerArrayT, other: _ArrayLikeInt_co, /) -> _IntegerArrayT: ... + @overload + def __ixor__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + + # |= + # keep in sync with `__iand__` and `__ixor__` + @overload # type: ignore[misc] + def __ior__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ... + @overload + def __ior__(self: _IntegerArrayT, other: _ArrayLikeInt_co, /) -> _IntegerArrayT: ... + @overload + def __ior__(self: _ObjectArrayT, other: object, /) -> _ObjectArrayT: ... + + # + def __dlpack__( + self: NDArray[number], + /, + *, + stream: int | Any | None = None, + max_version: tuple[int, int] | None = None, + dl_device: tuple[int, int] | None = None, + copy: builtins.bool | None = None, + ) -> CapsuleType: ... + def __dlpack_device__(self, /) -> tuple[L[1], L[0]]: ... + + # Keep `dtype` at the bottom to avoid name conflicts with `np.dtype` + @property + def dtype(self) -> _DTypeT_co: ... + +# NOTE: while `np.generic` is not technically an instance of `ABCMeta`, +# the `@abstractmethod` decorator is herein used to (forcefully) deny +# the creation of `np.generic` instances. +# The `# type: ignore` comments are necessary to silence mypy errors regarding +# the missing `ABCMeta` metaclass. +# See https://github.com/numpy/numpy-stubs/pull/80 for more details. +class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): + @abstractmethod + def __new__(cls, /, *args: Any, **kwargs: Any) -> Self: ... + + # NOTE: Technically this doesn't exist at runtime, but it is unlikely to lead to + # type-unsafe situations (the abstract scalar types cannot be instantiated + # themselves) and is convenient to have, so we include it regardless. See + # https://github.com/numpy/numpy/issues/30445 for use-cases and discussion. + def __hash__(self, /) -> int: ... + + if sys.version_info >= (3, 12): + def __buffer__(self, flags: int, /) -> memoryview: ... + + @overload + def __array__(self, dtype: None = None, /) -> ndarray[tuple[()], dtype[Self]]: ... + @overload + def __array__(self, dtype: _DTypeT, /) -> ndarray[tuple[()], _DTypeT]: ... + + @overload + def __array_wrap__( + self, + array: ndarray[_ShapeT, _DTypeT], + context: tuple[ufunc, tuple[object, ...], int] | None, + return_scalar: L[False], + /, + ) -> ndarray[_ShapeT, _DTypeT]: ... + @overload + def __array_wrap__( + self, + array: ndarray[tuple[()], dtype[_ScalarT]], + context: tuple[ufunc, tuple[object, ...], int] | None = None, + return_scalar: L[True] = True, + /, + ) -> _ScalarT: ... + @overload + def __array_wrap__( + self, + array: ndarray[_Shape1T, _DTypeT], + context: tuple[ufunc, tuple[object, ...], int] | None = None, + return_scalar: L[True] = True, + /, + ) -> ndarray[_Shape1T, _DTypeT]: ... + @overload + def __array_wrap__( + self, + array: ndarray[_ShapeT, dtype[_ScalarT]], + context: tuple[ufunc, tuple[object, ...], int] | None = None, + return_scalar: L[True] = True, + /, + ) -> _ScalarT | ndarray[_ShapeT, dtype[_ScalarT]]: ... + + @property + def base(self) -> None: ... + @property + def ndim(self) -> L[0]: ... + @property + def size(self) -> L[1]: ... + @property + def shape(self) -> tuple[()]: ... + @property + def strides(self) -> tuple[()]: ... + @property + def flat(self) -> flatiter[ndarray[tuple[int], dtype[Self]]]: ... + + @overload + def item(self, /) -> _ItemT_co: ... + @overload + def item(self, arg0: L[0, -1] | tuple[L[0, -1]] | tuple[()] = ..., /) -> _ItemT_co: ... + @override + def tolist(self, /) -> _ItemT_co: ... + + # NOTE: these technically exist, but will always raise when called + def trace( # type: ignore[misc] + self: Never, + /, + offset: L[0] = 0, + axis1: L[0] = 0, + axis2: L[1] = 1, + dtype: None = None, + out: None = None, + ) -> Never: ... + def diagonal(self: Never, /, offset: L[0] = 0, axis1: L[0] = 0, axis2: L[1] = 1) -> Never: ... # type: ignore[misc] + def swapaxes(self: Never, axis1: Never, axis2: Never, /) -> Never: ... # type: ignore[misc] + def sort(self: Never, /, axis: L[-1] = -1, kind: None = None, order: None = None, *, stable: None = None) -> Never: ... # type: ignore[misc] + def nonzero(self: Never, /) -> Never: ... # type: ignore[misc] + def setfield(self: Never, val: Never, /, dtype: Never, offset: L[0] = 0) -> None: ... # type: ignore[misc] + def searchsorted(self: Never, v: Never, /, side: L["left"] = "left", sorter: None = None) -> Never: ... # type: ignore[misc] + + # NOTE: this wont't raise, but won't do anything either + @overload + def resize(self, /, *, refcheck: builtins.bool = True) -> None: ... + @overload + def resize(self, new_shape: L[0, -1] | tuple[L[0, -1]] | tuple[()], /, *, refcheck: builtins.bool = True) -> None: ... + + # + def byteswap(self, /, inplace: L[False] = False) -> Self: ... + + # + @overload + def astype( + self, + /, + dtype: _DTypeLike[_ScalarT], + order: _OrderKACF = "K", + casting: _CastingKind = "unsafe", + subok: builtins.bool = True, + copy: builtins.bool | _CopyMode = True, + ) -> _ScalarT: ... + @overload + def astype( + self, + /, + dtype: DTypeLike | None, + order: _OrderKACF = "K", + casting: _CastingKind = "unsafe", + subok: builtins.bool = True, + copy: builtins.bool | _CopyMode = True, + ) -> Incomplete: ... + + # NOTE: `view` will perform a 0D->scalar cast, + # thus the array `type` is irrelevant to the output type + @overload + def view(self, type: type[ndarray] = ...) -> Self: ... + @overload + def view(self, /, dtype: _DTypeLike[_ScalarT], type: type[ndarray] = ...) -> _ScalarT: ... + @overload + def view(self, /, dtype: DTypeLike, type: type[ndarray] = ...) -> Incomplete: ... + + @overload + def getfield(self, /, dtype: _DTypeLike[_ScalarT], offset: SupportsIndex = 0) -> _ScalarT: ... + @overload + def getfield(self, /, dtype: DTypeLike, offset: SupportsIndex = 0) -> Incomplete: ... + + @overload + def take( + self, + indices: _IntLike_co, + /, + axis: SupportsIndex | None = None, + out: None = None, + mode: _ModeKind = "raise", + ) -> Self: ... + @overload + def take( + self, + indices: _ArrayLikeInt_co, + /, + axis: SupportsIndex | None = None, + out: None = None, + mode: _ModeKind = "raise", + ) -> NDArray[Self]: ... + @overload + def take( + self, + indices: _ArrayLikeInt_co, + /, + axis: SupportsIndex | None = None, + *, + out: _ArrayT, + mode: _ModeKind = "raise", + ) -> _ArrayT: ... + @overload + def take( + self, + indices: _ArrayLikeInt_co, + /, + axis: SupportsIndex | None, + out: _ArrayT, + mode: _ModeKind = "raise", + ) -> _ArrayT: ... + + def repeat(self, repeats: _ArrayLikeInt_co, /, axis: SupportsIndex | None = None) -> ndarray[tuple[int], dtype[Self]]: ... + def flatten(self, /, order: _OrderKACF = "C") -> ndarray[tuple[int], dtype[Self]]: ... + def ravel(self, /, order: _OrderKACF = "C") -> ndarray[tuple[int], dtype[Self]]: ... + + @overload # (() | []) + def reshape( + self, + shape: tuple[()] | list[Never], + /, + *, + order: _OrderACF = "C", + copy: builtins.bool | None = None, + ) -> Self: ... + @overload # ((1, *(1, ...))@_ShapeT) + def reshape( + self, + shape: _1NShapeT, + /, + *, + order: _OrderACF = "C", + copy: builtins.bool | None = None, + ) -> ndarray[_1NShapeT, dtype[Self]]: ... + @overload # (Sequence[index, ...]) # not recommended + def reshape( + self, + shape: Sequence[SupportsIndex], + /, + *, + order: _OrderACF = "C", + copy: builtins.bool | None = None, + ) -> Self | ndarray[tuple[L[1], ...], dtype[Self]]: ... + @overload # _(index) + def reshape( + self, + size1: SupportsIndex, + /, + *, + order: _OrderACF = "C", + copy: builtins.bool | None = None, + ) -> ndarray[tuple[L[1]], dtype[Self]]: ... + @overload # _(index, index) + def reshape( + self, + size1: SupportsIndex, + size2: SupportsIndex, + /, + *, + order: _OrderACF = "C", + copy: builtins.bool | None = None, + ) -> ndarray[tuple[L[1], L[1]], dtype[Self]]: ... + @overload # _(index, index, index) + def reshape( + self, + size1: SupportsIndex, + size2: SupportsIndex, + size3: SupportsIndex, + /, + *, + order: _OrderACF = "C", + copy: builtins.bool | None = None, + ) -> ndarray[tuple[L[1], L[1], L[1]], dtype[Self]]: ... + @overload # _(index, index, index, index) + def reshape( + self, + size1: SupportsIndex, + size2: SupportsIndex, + size3: SupportsIndex, + size4: SupportsIndex, + /, + *, + order: _OrderACF = "C", + copy: builtins.bool | None = None, + ) -> ndarray[tuple[L[1], L[1], L[1], L[1]], dtype[Self]]: ... + @overload # _(index, index, index, index, index, *index) # ndim >= 5 + def reshape( + self, + size1: SupportsIndex, + size2: SupportsIndex, + size3: SupportsIndex, + size4: SupportsIndex, + size5: SupportsIndex, + /, + *sizes6_: SupportsIndex, + order: _OrderACF = "C", + copy: builtins.bool | None = None, + ) -> ndarray[tuple[L[1], L[1], L[1], L[1], L[1], *tuple[L[1], ...]], dtype[Self]]: ... + + def squeeze(self, axis: L[0] | tuple[()] | None = ...) -> Self: ... + def transpose(self, axes: tuple[()] | None = ..., /) -> Self: ... + + @overload + def all( + self, + /, + axis: L[0, -1] | tuple[()] | None = None, + out: None = None, + keepdims: SupportsIndex = False, + *, + where: builtins.bool | np.bool | ndarray[tuple[()], dtype[np.bool]] = True + ) -> np.bool: ... + @overload + def all( + self, + /, + axis: L[0, -1] | tuple[()] | None, + out: ndarray[tuple[()], dtype[_ScalarT]], + keepdims: SupportsIndex = False, + *, + where: builtins.bool | np.bool | ndarray[tuple[()], dtype[np.bool]] = True, + ) -> _ScalarT: ... + @overload + def all( + self, + /, + axis: L[0, -1] | tuple[()] | None = None, + *, + out: ndarray[tuple[()], dtype[_ScalarT]], + keepdims: SupportsIndex = False, + where: builtins.bool | np.bool | ndarray[tuple[()], dtype[np.bool]] = True, + ) -> _ScalarT: ... + + @overload + def any( + self, + /, + axis: L[0, -1] | tuple[()] | None = None, + out: None = None, + keepdims: SupportsIndex = False, + *, + where: builtins.bool | np.bool | ndarray[tuple[()], dtype[np.bool]] = True + ) -> np.bool: ... + @overload + def any( + self, + /, + axis: L[0, -1] | tuple[()] | None, + out: ndarray[tuple[()], dtype[_ScalarT]], + keepdims: SupportsIndex = False, + *, + where: builtins.bool | np.bool | ndarray[tuple[()], dtype[np.bool]] = True, + ) -> _ScalarT: ... + @overload + def any( + self, + /, + axis: L[0, -1] | tuple[()] | None = None, + *, + out: ndarray[tuple[()], dtype[_ScalarT]], + keepdims: SupportsIndex = False, + where: builtins.bool | np.bool | ndarray[tuple[()], dtype[np.bool]] = True, + ) -> _ScalarT: ... + + # Keep `dtype` at the bottom to avoid name conflicts with `np.dtype` + @property + def dtype(self) -> _dtype[Self]: ... + +class number(generic[_NumberItemT_co], Generic[_NBit, _NumberItemT_co]): + @abstractmethod # `SupportsIndex | str | bytes` equivs `_ConvertibleToInt & _ConvertibleToFloat` + def __new__(cls, value: SupportsIndex | str | bytes = 0, /) -> Self: ... + def __class_getitem__(cls, item: Any, /) -> GenericAlias: ... + + def __neg__(self) -> Self: ... + def __pos__(self) -> Self: ... + def __abs__(self) -> Self: ... + + def __add__(self, other: _NumberLike_co, /) -> Incomplete: ... + def __radd__(self, other: _NumberLike_co, /) -> Incomplete: ... + def __sub__(self, other: _NumberLike_co, /) -> Incomplete: ... + def __rsub__(self, other: _NumberLike_co, /) -> Incomplete: ... + def __mul__(self, other: _NumberLike_co, /) -> Incomplete: ... + def __rmul__(self, other: _NumberLike_co, /) -> Incomplete: ... + def __pow__(self, other: _NumberLike_co, mod: None = None, /) -> Incomplete: ... + def __rpow__(self, other: _NumberLike_co, mod: None = None, /) -> Incomplete: ... + def __truediv__(self, other: _NumberLike_co, /) -> Incomplete: ... + def __rtruediv__(self, other: _NumberLike_co, /) -> Incomplete: ... + + @overload + def __lt__(self, other: _NumberLike_co, /) -> bool_: ... + @overload + def __lt__(self, other: _ArrayLikeNumber_co | _NestedSequence[_SupportsGT], /) -> NDArray[bool_]: ... + @overload + def __lt__(self, other: _SupportsGT, /) -> bool_: ... + + @overload + def __le__(self, other: _NumberLike_co, /) -> bool_: ... + @overload + def __le__(self, other: _ArrayLikeNumber_co | _NestedSequence[_SupportsGE], /) -> NDArray[bool_]: ... + @overload + def __le__(self, other: _SupportsGE, /) -> bool_: ... + + @overload + def __gt__(self, other: _NumberLike_co, /) -> bool_: ... + @overload + def __gt__(self, other: _ArrayLikeNumber_co | _NestedSequence[_SupportsLT], /) -> NDArray[bool_]: ... + @overload + def __gt__(self, other: _SupportsLT, /) -> bool_: ... + + @overload + def __ge__(self, other: _NumberLike_co, /) -> bool_: ... + @overload + def __ge__(self, other: _ArrayLikeNumber_co | _NestedSequence[_SupportsLE], /) -> NDArray[bool_]: ... + @overload + def __ge__(self, other: _SupportsLE, /) -> bool_: ... + +class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): + @property + def itemsize(self) -> L[1]: ... + @property + def nbytes(self) -> L[1]: ... + @property + def real(self) -> Self: ... + @property + def imag(self) -> np.bool[L[False]]: ... + + @overload # mypy bug workaround: https://github.com/numpy/numpy/issues/29245 + def __new__(cls, value: Never, /) -> np.bool[builtins.bool]: ... + @overload + def __new__(cls, value: _Falsy = ..., /) -> np.bool[L[False]]: ... + @overload + def __new__(cls, value: _Truthy, /) -> np.bool[L[True]]: ... + @overload + def __new__(cls, value: object, /) -> np.bool[builtins.bool]: ... + + def __class_getitem__(cls, type_arg: type | object, /) -> GenericAlias: ... + + def __bool__(self, /) -> _BoolItemT_co: ... + + @overload + def __int__(self: np.bool[L[False]], /) -> L[0]: ... + @overload + def __int__(self: np.bool[L[True]], /) -> L[1]: ... + @overload + def __int__(self, /) -> L[0, 1]: ... + + def __abs__(self) -> Self: ... + + @overload + def __invert__(self: np.bool[L[False]], /) -> np.bool[L[True]]: ... + @overload + def __invert__(self: np.bool[L[True]], /) -> np.bool[L[False]]: ... + @overload + def __invert__(self, /) -> np.bool: ... + + @overload + def __add__(self, other: _NumberT, /) -> _NumberT: ... + @overload + def __add__(self, other: builtins.bool | bool_, /) -> bool_: ... + @overload + def __add__(self, other: int, /) -> int_: ... + @overload + def __add__(self, other: float, /) -> float64: ... + @overload + def __add__(self, other: complex, /) -> complex128: ... + + @overload + def __radd__(self, other: _NumberT, /) -> _NumberT: ... + @overload + def __radd__(self, other: builtins.bool, /) -> bool_: ... + @overload + def __radd__(self, other: int, /) -> int_: ... + @overload + def __radd__(self, other: float, /) -> float64: ... + @overload + def __radd__(self, other: complex, /) -> complex128: ... + + @overload + def __sub__(self, other: _NumberT, /) -> _NumberT: ... + @overload + def __sub__(self, other: int, /) -> int_: ... + @overload + def __sub__(self, other: float, /) -> float64: ... + @overload + def __sub__(self, other: complex, /) -> complex128: ... + + @overload + def __rsub__(self, other: _NumberT, /) -> _NumberT: ... + @overload + def __rsub__(self, other: int, /) -> int_: ... + @overload + def __rsub__(self, other: float, /) -> float64: ... + @overload + def __rsub__(self, other: complex, /) -> complex128: ... + + @overload + def __mul__(self, other: _NumberT, /) -> _NumberT: ... + @overload + def __mul__(self, other: builtins.bool | bool_, /) -> bool_: ... + @overload + def __mul__(self, other: int, /) -> int_: ... + @overload + def __mul__(self, other: float, /) -> float64: ... + @overload + def __mul__(self, other: complex, /) -> complex128: ... + + @overload + def __rmul__(self, other: _NumberT, /) -> _NumberT: ... + @overload + def __rmul__(self, other: builtins.bool, /) -> bool_: ... + @overload + def __rmul__(self, other: int, /) -> int_: ... + @overload + def __rmul__(self, other: float, /) -> float64: ... + @overload + def __rmul__(self, other: complex, /) -> complex128: ... + + @overload + def __pow__(self, other: _NumberT, mod: None = None, /) -> _NumberT: ... + @overload + def __pow__(self, other: builtins.bool | bool_, mod: None = None, /) -> int8: ... + @overload + def __pow__(self, other: int, mod: None = None, /) -> int_: ... + @overload + def __pow__(self, other: float, mod: None = None, /) -> float64: ... + @overload + def __pow__(self, other: complex, mod: None = None, /) -> complex128: ... + + @overload + def __rpow__(self, other: _NumberT, mod: None = None, /) -> _NumberT: ... + @overload + def __rpow__(self, other: builtins.bool, mod: None = None, /) -> int8: ... + @overload + def __rpow__(self, other: int, mod: None = None, /) -> int_: ... + @overload + def __rpow__(self, other: float, mod: None = None, /) -> float64: ... + @overload + def __rpow__(self, other: complex, mod: None = None, /) -> complex128: ... + + @overload + def __truediv__(self, other: _InexactT, /) -> _InexactT: ... + @overload + def __truediv__(self, other: float | integer | bool_, /) -> float64: ... + @overload + def __truediv__(self, other: complex, /) -> complex128: ... + + @overload + def __rtruediv__(self, other: _InexactT, /) -> _InexactT: ... + @overload + def __rtruediv__(self, other: float | integer, /) -> float64: ... + @overload + def __rtruediv__(self, other: complex, /) -> complex128: ... + + @overload + def __floordiv__(self, other: _RealNumberT, /) -> _RealNumberT: ... + @overload + def __floordiv__(self, other: builtins.bool | bool_, /) -> int8: ... + @overload + def __floordiv__(self, other: int, /) -> int_: ... + @overload + def __floordiv__(self, other: float, /) -> float64: ... + + @overload + def __rfloordiv__(self, other: _RealNumberT, /) -> _RealNumberT: ... + @overload + def __rfloordiv__(self, other: builtins.bool, /) -> int8: ... + @overload + def __rfloordiv__(self, other: int, /) -> int_: ... + @overload + def __rfloordiv__(self, other: float, /) -> float64: ... + + # keep in sync with __floordiv__ + @overload + def __mod__(self, other: _RealNumberT, /) -> _RealNumberT: ... + @overload + def __mod__(self, other: builtins.bool | bool_, /) -> int8: ... + @overload + def __mod__(self, other: int, /) -> int_: ... + @overload + def __mod__(self, other: float, /) -> float64: ... + + # keep in sync with __rfloordiv__ + @overload + def __rmod__(self, other: _RealNumberT, /) -> _RealNumberT: ... + @overload + def __rmod__(self, other: builtins.bool, /) -> int8: ... + @overload + def __rmod__(self, other: int, /) -> int_: ... + @overload + def __rmod__(self, other: float, /) -> float64: ... + + # keep in sync with __mod__ + @overload + def __divmod__(self, other: _RealNumberT, /) -> _2Tuple[_RealNumberT]: ... + @overload + def __divmod__(self, other: builtins.bool | bool_, /) -> _2Tuple[int8]: ... + @overload + def __divmod__(self, other: int, /) -> _2Tuple[int_]: ... + @overload + def __divmod__(self, other: float, /) -> _2Tuple[float64]: ... + + # keep in sync with __rmod__ + @overload + def __rdivmod__(self, other: _RealNumberT, /) -> _2Tuple[_RealNumberT]: ... + @overload + def __rdivmod__(self, other: builtins.bool, /) -> _2Tuple[int8]: ... + @overload + def __rdivmod__(self, other: int, /) -> _2Tuple[int_]: ... + @overload + def __rdivmod__(self, other: float, /) -> _2Tuple[float64]: ... + + @overload + def __lshift__(self, other: _IntegerT, /) -> _IntegerT: ... + @overload + def __lshift__(self, other: builtins.bool | bool_, /) -> int8: ... + @overload + def __lshift__(self, other: int, /) -> int_: ... + + @overload + def __rlshift__(self, other: _IntegerT, /) -> _IntegerT: ... + @overload + def __rlshift__(self, other: builtins.bool, /) -> int8: ... + @overload + def __rlshift__(self, other: int, /) -> int_: ... + + # keep in sync with __lshift__ + @overload + def __rshift__(self, other: _IntegerT, /) -> _IntegerT: ... + @overload + def __rshift__(self, other: builtins.bool | bool_, /) -> int8: ... + @overload + def __rshift__(self, other: int, /) -> int_: ... + + # keep in sync with __rlshift__ + @overload + def __rrshift__(self, other: _IntegerT, /) -> _IntegerT: ... + @overload + def __rrshift__(self, other: builtins.bool, /) -> int8: ... + @overload + def __rrshift__(self, other: int, /) -> int_: ... + + @overload + def __and__(self: np.bool[L[False]], other: builtins.bool | np.bool, /) -> np.bool[L[False]]: ... + @overload + def __and__(self, other: L[False] | np.bool[L[False]], /) -> np.bool[L[False]]: ... + @overload + def __and__(self, other: L[True] | np.bool[L[True]], /) -> Self: ... + @overload + def __and__(self, other: builtins.bool | np.bool, /) -> np.bool: ... + @overload + def __and__(self, other: _IntegerT, /) -> _IntegerT: ... + @overload + def __and__(self, other: int, /) -> np.bool | intp: ... + __rand__ = __and__ + + @overload + def __xor__(self: np.bool[L[False]], other: _BoolItemT | np.bool[_BoolItemT], /) -> np.bool[_BoolItemT]: ... + @overload + def __xor__(self: np.bool[L[True]], other: L[True] | np.bool[L[True]], /) -> np.bool[L[False]]: ... + @overload + def __xor__(self, other: L[False] | np.bool[L[False]], /) -> Self: ... + @overload + def __xor__(self, other: builtins.bool | np.bool, /) -> np.bool: ... + @overload + def __xor__(self, other: _IntegerT, /) -> _IntegerT: ... + @overload + def __xor__(self, other: int, /) -> np.bool | intp: ... + __rxor__ = __xor__ + + @overload + def __or__(self: np.bool[L[True]], other: builtins.bool | np.bool, /) -> np.bool[L[True]]: ... + @overload + def __or__(self, other: L[False] | np.bool[L[False]], /) -> Self: ... + @overload + def __or__(self, other: L[True] | np.bool[L[True]], /) -> np.bool[L[True]]: ... + @overload + def __or__(self, other: builtins.bool | np.bool, /) -> np.bool: ... + @overload + def __or__(self, other: _IntegerT, /) -> _IntegerT: ... + @overload + def __or__(self, other: int, /) -> np.bool | intp: ... + __ror__ = __or__ + + @overload + def __lt__(self, other: _NumberLike_co, /) -> bool_: ... + @overload + def __lt__(self, other: _ArrayLikeNumber_co | _NestedSequence[_SupportsGT], /) -> NDArray[bool_]: ... + @overload + def __lt__(self, other: _SupportsGT, /) -> bool_: ... + + @overload + def __le__(self, other: _NumberLike_co, /) -> bool_: ... + @overload + def __le__(self, other: _ArrayLikeNumber_co | _NestedSequence[_SupportsGE], /) -> NDArray[bool_]: ... + @overload + def __le__(self, other: _SupportsGE, /) -> bool_: ... + + @overload + def __gt__(self, other: _NumberLike_co, /) -> bool_: ... + @overload + def __gt__(self, other: _ArrayLikeNumber_co | _NestedSequence[_SupportsLT], /) -> NDArray[bool_]: ... + @overload + def __gt__(self, other: _SupportsLT, /) -> bool_: ... + + @overload + def __ge__(self, other: _NumberLike_co, /) -> bool_: ... + @overload + def __ge__(self, other: _ArrayLikeNumber_co | _NestedSequence[_SupportsLE], /) -> NDArray[bool_]: ... + @overload + def __ge__(self, other: _SupportsLE, /) -> bool_: ... + +# NOTE: This should _not_ be `Final` or a `TypeAlias` +bool_ = bool + +# NOTE: The `object_` constructor returns the passed object, so instances with type +# `object_` cannot exists (at runtime). +# NOTE: Because mypy has some long-standing bugs related to `__new__`, `object_` can't +# be made generic. +@final +class object_(_RealMixin, generic): + @overload + def __new__(cls, value: None = None, /) -> None: ... # type: ignore[misc] + @overload + def __new__(cls, value: _AnyStr, /) -> _AnyStr: ... # type: ignore[misc] + @overload + def __new__(cls, value: ndarray[_ShapeT, Any], /) -> ndarray[_ShapeT, dtype[Self]]: ... # type: ignore[misc] + @overload + def __new__(cls, value: SupportsLenAndGetItem[object], /) -> NDArray[Self]: ... # type: ignore[misc] + @overload + def __new__(cls, value: _T, /) -> _T: ... # type: ignore[misc] + @overload # catch-all + def __new__(cls, value: Any = ..., /) -> object | NDArray[Self]: ... # type: ignore[misc] + + def __hash__(self, /) -> int: ... + def __abs__(self, /) -> object_: ... # this affects NDArray[object_].__abs__ + def __call__(self, /, *args: object, **kwargs: object) -> Any: ... + + if sys.version_info >= (3, 12): + def __release_buffer__(self, buffer: memoryview, /) -> None: ... + +class integer(_IntegralMixin, _RoundMixin, number[_NBit, int]): + @abstractmethod + def __new__(cls, value: _ConvertibleToInt = 0, /) -> Self: ... + + # NOTE: `bit_count` and `__index__` are technically defined in the concrete subtypes + def bit_count(self, /) -> int: ... + def __index__(self, /) -> int: ... + def __invert__(self, /) -> Self: ... + + @override # type: ignore[override] + @overload + def __truediv__(self, other: float | integer, /) -> float64: ... + @overload + def __truediv__(self, other: complex, /) -> complex128: ... + + @override # type: ignore[override] + @overload + def __rtruediv__(self, other: float | integer, /) -> float64: ... + @overload + def __rtruediv__(self, other: complex, /) -> complex128: ... + + def __floordiv__(self, value: _IntLike_co, /) -> integer: ... + def __rfloordiv__(self, value: _IntLike_co, /) -> integer: ... + def __mod__(self, value: _IntLike_co, /) -> integer: ... + def __rmod__(self, value: _IntLike_co, /) -> integer: ... + def __divmod__(self, value: _IntLike_co, /) -> _2Tuple[integer]: ... + def __rdivmod__(self, value: _IntLike_co, /) -> _2Tuple[integer]: ... + + # Ensure that objects annotated as `integer` support bit-wise operations + def __lshift__(self, other: _IntLike_co, /) -> integer: ... + def __rlshift__(self, other: _IntLike_co, /) -> integer: ... + def __rshift__(self, other: _IntLike_co, /) -> integer: ... + def __rrshift__(self, other: _IntLike_co, /) -> integer: ... + def __and__(self, other: _IntLike_co, /) -> integer: ... + def __rand__(self, other: _IntLike_co, /) -> integer: ... + def __or__(self, other: _IntLike_co, /) -> integer: ... + def __ror__(self, other: _IntLike_co, /) -> integer: ... + def __xor__(self, other: _IntLike_co, /) -> integer: ... + def __rxor__(self, other: _IntLike_co, /) -> integer: ... + +class signedinteger(integer[_NBit]): + def __new__(cls, value: _ConvertibleToInt = 0, /) -> Self: ... + + # arithmetic ops + + @override # type: ignore[override] + @overload + def __add__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __add__(self, other: float, /) -> float64: ... + @overload + def __add__(self, other: complex, /) -> complex128: ... + @overload + def __add__(self, other: signedinteger, /) -> signedinteger: ... + @overload + def __add__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __radd__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __radd__(self, other: float, /) -> float64: ... + @overload + def __radd__(self, other: complex, /) -> complex128: ... + @overload + def __radd__(self, other: signedinteger, /) -> signedinteger: ... + @overload + def __radd__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __sub__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __sub__(self, other: float, /) -> float64: ... + @overload + def __sub__(self, other: complex, /) -> complex128: ... + @overload + def __sub__(self, other: signedinteger, /) -> signedinteger: ... + @overload + def __sub__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __rsub__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rsub__(self, other: float, /) -> float64: ... + @overload + def __rsub__(self, other: complex, /) -> complex128: ... + @overload + def __rsub__(self, other: signedinteger, /) -> signedinteger: ... + @overload + def __rsub__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __mul__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __mul__(self, other: float, /) -> float64: ... + @overload + def __mul__(self, other: complex, /) -> complex128: ... + @overload + def __mul__(self, other: signedinteger, /) -> signedinteger: ... + @overload + def __mul__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __rmul__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rmul__(self, other: float, /) -> float64: ... + @overload + def __rmul__(self, other: complex, /) -> complex128: ... + @overload + def __rmul__(self, other: signedinteger, /) -> signedinteger: ... + @overload + def __rmul__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __pow__(self, other: int | int8 | bool_ | Self, mod: None = None, /) -> Self: ... + @overload + def __pow__(self, other: float, mod: None = None, /) -> float64: ... + @overload + def __pow__(self, other: complex, mod: None = None, /) -> complex128: ... + @overload + def __pow__(self, other: signedinteger, mod: None = None, /) -> signedinteger: ... + @overload + def __pow__(self, other: integer, mod: None = None, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __rpow__(self, other: int | int8 | bool_, mod: None = None, /) -> Self: ... + @overload + def __rpow__(self, other: float, mod: None = None, /) -> float64: ... + @overload + def __rpow__(self, other: complex, mod: None = None, /) -> complex128: ... + @overload + def __rpow__(self, other: signedinteger, mod: None = None, /) -> signedinteger: ... + @overload + def __rpow__(self, other: integer, mod: None = None, /) -> Incomplete: ... + + # modular division ops + + @override # type: ignore[override] + @overload + def __floordiv__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __floordiv__(self, other: float, /) -> float64: ... + @overload + def __floordiv__(self, other: signedinteger, /) -> signedinteger: ... + @overload + def __floordiv__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __rfloordiv__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rfloordiv__(self, other: float, /) -> float64: ... + @overload + def __rfloordiv__(self, other: signedinteger, /) -> signedinteger: ... + @overload + def __rfloordiv__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __mod__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __mod__(self, other: float, /) -> float64: ... + @overload + def __mod__(self, other: signedinteger, /) -> signedinteger: ... + @overload + def __mod__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __rmod__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rmod__(self, other: float, /) -> float64: ... + @overload + def __rmod__(self, other: signedinteger, /) -> signedinteger: ... + @overload + def __rmod__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __divmod__(self, other: int | int8 | bool_ | Self, /) -> _2Tuple[Self]: ... + @overload + def __divmod__(self, other: float, /) -> _2Tuple[float64]: ... + @overload + def __divmod__(self, other: signedinteger, /) -> _2Tuple[signedinteger]: ... + @overload + def __divmod__(self, other: integer, /) -> _2Tuple[Incomplete]: ... + + @override # type: ignore[override] + @overload + def __rdivmod__(self, other: int | int8 | bool_, /) -> _2Tuple[Self]: ... + @overload + def __rdivmod__(self, other: float, /) -> _2Tuple[float64]: ... + @overload + def __rdivmod__(self, other: signedinteger, /) -> _2Tuple[signedinteger]: ... + @overload + def __rdivmod__(self, other: integer, /) -> _2Tuple[Incomplete]: ... + + # bitwise ops + + @override # type: ignore[override] + @overload + def __lshift__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __lshift__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rlshift__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rlshift__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rshift__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __rshift__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rrshift__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rrshift__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __and__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __and__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rand__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rand__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __xor__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __xor__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rxor__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rxor__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __or__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __or__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __ror__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __ror__(self, other: integer, /) -> signedinteger: ... + +int8 = signedinteger[_8Bit] +int16 = signedinteger[_16Bit] +int32 = signedinteger[_32Bit] +int64 = signedinteger[_64Bit] + +byte = signedinteger[_NBitByte] +short = signedinteger[_NBitShort] +intc = signedinteger[_NBitIntC] +intp = signedinteger[_NBitIntP] +int_ = intp +long = signedinteger[_NBitLong] +longlong = signedinteger[_NBitLongLong] + +class unsignedinteger(integer[_NBit1]): + def __new__(cls, value: _ConvertibleToInt = 0, /) -> Self: ... + + # arithmetic ops + + @override # type: ignore[override] + @overload + def __add__(self, other: int | uint8 | bool_ | Self, /) -> Self: ... + @overload + def __add__(self, other: float, /) -> float64: ... + @overload + def __add__(self, other: complex, /) -> complex128: ... + @overload + def __add__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __add__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __radd__(self, other: int | uint8 | bool_, /) -> Self: ... + @overload + def __radd__(self, other: float, /) -> float64: ... + @overload + def __radd__(self, other: complex, /) -> complex128: ... + @overload + def __radd__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __radd__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __sub__(self, other: int | uint8 | bool_ | Self, /) -> Self: ... + @overload + def __sub__(self, other: float, /) -> float64: ... + @overload + def __sub__(self, other: complex, /) -> complex128: ... + @overload + def __sub__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __sub__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __rsub__(self, other: int | uint8 | bool_, /) -> Self: ... + @overload + def __rsub__(self, other: float, /) -> float64: ... + @overload + def __rsub__(self, other: complex, /) -> complex128: ... + @overload + def __rsub__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __rsub__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __mul__(self, other: int | uint8 | bool_ | Self, /) -> Self: ... + @overload + def __mul__(self, other: float, /) -> float64: ... + @overload + def __mul__(self, other: complex, /) -> complex128: ... + @overload + def __mul__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __mul__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __rmul__(self, other: int | uint8 | bool_, /) -> Self: ... + @overload + def __rmul__(self, other: float, /) -> float64: ... + @overload + def __rmul__(self, other: complex, /) -> complex128: ... + @overload + def __rmul__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __rmul__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __pow__(self, other: int | uint8 | bool_ | Self, mod: None = None, /) -> Self: ... + @overload + def __pow__(self, other: float, mod: None = None, /) -> float64: ... + @overload + def __pow__(self, other: complex, mod: None = None, /) -> complex128: ... + @overload + def __pow__(self, other: unsignedinteger, mod: None = None, /) -> unsignedinteger: ... + @overload + def __pow__(self, other: integer, mod: None = None, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __rpow__(self, other: int | uint8 | bool_, mod: None = None, /) -> Self: ... + @overload + def __rpow__(self, other: float, mod: None = None, /) -> float64: ... + @overload + def __rpow__(self, other: complex, mod: None = None, /) -> complex128: ... + @overload + def __rpow__(self, other: unsignedinteger, mod: None = None, /) -> unsignedinteger: ... + @overload + def __rpow__(self, other: integer, mod: None = None, /) -> Incomplete: ... + + # modular division ops + + @override # type: ignore[override] + @overload + def __floordiv__(self, other: int | uint8 | bool_ | Self, /) -> Self: ... + @overload + def __floordiv__(self, other: float, /) -> float64: ... + @overload + def __floordiv__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __floordiv__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __rfloordiv__(self, other: int | uint8 | bool_, /) -> Self: ... + @overload + def __rfloordiv__(self, other: float, /) -> float64: ... + @overload + def __rfloordiv__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __rfloordiv__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __mod__(self, other: int | uint8 | bool_ | Self, /) -> Self: ... + @overload + def __mod__(self, other: float, /) -> float64: ... + @overload + def __mod__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __mod__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __rmod__(self, other: int | uint8 | bool_, /) -> Self: ... + @overload + def __rmod__(self, other: float, /) -> float64: ... + @overload + def __rmod__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __rmod__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __divmod__(self, other: int | uint8 | bool_ | Self, /) -> _2Tuple[Self]: ... + @overload + def __divmod__(self, other: float, /) -> _2Tuple[float64]: ... + @overload + def __divmod__(self, other: unsignedinteger, /) -> _2Tuple[unsignedinteger]: ... + @overload + def __divmod__(self, other: integer, /) -> _2Tuple[Incomplete]: ... + + @override # type: ignore[override] + @overload + def __rdivmod__(self, other: int | uint8 | bool_, /) -> _2Tuple[Self]: ... + @overload + def __rdivmod__(self, other: float, /) -> _2Tuple[float64]: ... + @overload + def __rdivmod__(self, other: unsignedinteger, /) -> _2Tuple[unsignedinteger]: ... + @overload + def __rdivmod__(self, other: integer, /) -> _2Tuple[Incomplete]: ... + + # bitwise ops + + @override # type: ignore[override] + @overload + def __lshift__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __lshift__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __lshift__(self, other: signedinteger, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rlshift__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rlshift__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __rlshift__(self, other: signedinteger, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rshift__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __rshift__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __rshift__(self, other: signedinteger, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rrshift__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rrshift__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __rrshift__(self, other: signedinteger, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __and__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __and__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __and__(self, other: signedinteger, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rand__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rand__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __rand__(self, other: signedinteger, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __xor__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __xor__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __xor__(self, other: signedinteger, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rxor__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rxor__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __rxor__(self, other: signedinteger, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __or__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __or__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __or__(self, other: signedinteger, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __ror__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __ror__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __ror__(self, other: signedinteger, /) -> signedinteger: ... + +uint8: TypeAlias = unsignedinteger[_8Bit] +uint16: TypeAlias = unsignedinteger[_16Bit] +uint32: TypeAlias = unsignedinteger[_32Bit] +uint64: TypeAlias = unsignedinteger[_64Bit] + +ubyte: TypeAlias = unsignedinteger[_NBitByte] +ushort: TypeAlias = unsignedinteger[_NBitShort] +uintc: TypeAlias = unsignedinteger[_NBitIntC] +uintp: TypeAlias = unsignedinteger[_NBitIntP] +uint: TypeAlias = uintp +ulong: TypeAlias = unsignedinteger[_NBitLong] +ulonglong: TypeAlias = unsignedinteger[_NBitLongLong] + +class inexact(number[_NBit, _InexactItemT_co], Generic[_NBit, _InexactItemT_co]): + @abstractmethod + def __new__(cls, value: _ConvertibleToFloat | None = 0, /) -> Self: ... + +class floating(_RealMixin, _RoundMixin, inexact[_NBit1, float]): + def __new__(cls, value: _ConvertibleToFloat | None = 0, /) -> Self: ... + + # arithmetic ops + + @override # type: ignore[override] + @overload + def __add__(self, other: int | float16 | uint8 | int8 | bool_ | Self, /) -> Self: ... + @overload + def __add__(self, other: integer | floating, /) -> floating: ... + @overload + def __add__(self, other: float, /) -> Self: ... + @overload + def __add__(self, other: complex, /) -> complexfloating: ... + + @override # type: ignore[override] + @overload + def __radd__(self, other: int | float16 | uint8 | int8 | bool_, /) -> Self: ... + @overload + def __radd__(self, other: integer | floating, /) -> floating: ... + @overload + def __radd__(self, other: float, /) -> Self: ... + @overload + def __radd__(self, other: complex, /) -> complexfloating: ... + + @override # type: ignore[override] + @overload + def __sub__(self, other: int | float16 | uint8 | int8 | bool_ | Self, /) -> Self: ... + @overload + def __sub__(self, other: integer | floating, /) -> floating: ... + @overload + def __sub__(self, other: float, /) -> Self: ... + @overload + def __sub__(self, other: complex, /) -> complexfloating: ... + + @override # type: ignore[override] + @overload + def __rsub__(self, other: int | float16 | uint8 | int8 | bool_, /) -> Self: ... + @overload + def __rsub__(self, other: integer | floating, /) -> floating: ... + @overload + def __rsub__(self, other: float, /) -> Self: ... + @overload + def __rsub__(self, other: complex, /) -> complexfloating: ... + + @override # type: ignore[override] + @overload + def __mul__(self, other: int | float16 | uint8 | int8 | bool_ | Self, /) -> Self: ... + @overload + def __mul__(self, other: integer | floating, /) -> floating: ... + @overload + def __mul__(self, other: float, /) -> Self: ... + @overload + def __mul__(self, other: complex, /) -> complexfloating: ... + + @override # type: ignore[override] + @overload + def __rmul__(self, other: int | float16 | uint8 | int8 | bool_, /) -> Self: ... + @overload + def __rmul__(self, other: integer | floating, /) -> floating: ... + @overload + def __rmul__(self, other: float, /) -> Self: ... + @overload + def __rmul__(self, other: complex, /) -> complexfloating: ... + + @override # type: ignore[override] + @overload + def __pow__(self, other: int | float16 | uint8 | int8 | bool_ | Self, mod: None = None, /) -> Self: ... + @overload + def __pow__(self, other: integer | floating, mod: None = None, /) -> floating: ... + @overload + def __pow__(self, other: float, mod: None = None, /) -> Self: ... + @overload + def __pow__(self, other: complex, mod: None = None, /) -> complexfloating: ... + + @override # type: ignore[override] + @overload + def __rpow__(self, other: int | float16 | uint8 | int8 | bool_, mod: None = None, /) -> Self: ... + @overload + def __rpow__(self, other: integer | floating, mod: None = None, /) -> floating: ... + @overload + def __rpow__(self, other: float, mod: None = None, /) -> Self: ... + @overload + def __rpow__(self, other: complex, mod: None = None, /) -> complexfloating: ... + + @override # type: ignore[override] + @overload + def __truediv__(self, other: int | float16 | uint8 | int8 | bool_ | Self, /) -> Self: ... + @overload + def __truediv__(self, other: integer | floating, /) -> floating: ... + @overload + def __truediv__(self, other: float, /) -> Self: ... + @overload + def __truediv__(self, other: complex, /) -> complexfloating: ... + + @override # type: ignore[override] + @overload + def __rtruediv__(self, other: int | float16 | uint8 | int8 | bool_, /) -> Self: ... + @overload + def __rtruediv__(self, other: integer | floating, /) -> floating: ... + @overload + def __rtruediv__(self, other: float, /) -> Self: ... + @overload + def __rtruediv__(self, other: complex, /) -> complexfloating: ... + + # modular division ops + + @overload + def __floordiv__(self, other: int | float16 | uint8 | int8 | bool_ | Self, /) -> Self: ... + @overload + def __floordiv__(self, other: integer | floating, /) -> floating: ... + @overload + def __floordiv__(self, other: float, /) -> Self: ... + + @overload + def __rfloordiv__(self, other: int | float16 | uint8 | int8 | bool_, /) -> Self: ... + @overload + def __rfloordiv__(self, other: integer | floating, /) -> floating: ... + @overload + def __rfloordiv__(self, other: float, /) -> Self: ... + + @overload + def __mod__(self, other: int | float16 | uint8 | int8 | bool_ | Self, /) -> Self: ... + @overload + def __mod__(self, other: integer | floating, /) -> floating: ... + @overload + def __mod__(self, other: float, /) -> Self: ... + + @overload + def __rmod__(self, other: int | float16 | uint8 | int8 | bool_, /) -> Self: ... + @overload + def __rmod__(self, other: integer | floating, /) -> floating: ... + @overload + def __rmod__(self, other: float, /) -> Self: ... + + @overload + def __divmod__(self, other: int | float16 | uint8 | int8 | bool_ | Self, /) -> _2Tuple[Self]: ... + @overload + def __divmod__(self, other: integer | floating, /) -> _2Tuple[floating]: ... + @overload + def __divmod__(self, other: float, /) -> _2Tuple[Self]: ... + + @overload + def __rdivmod__(self, other: int | float16 | uint8 | int8 | bool_, /) -> _2Tuple[Self]: ... + @overload + def __rdivmod__(self, other: integer | floating, /) -> _2Tuple[floating]: ... + @overload + def __rdivmod__(self, other: float, /) -> _2Tuple[Self]: ... + + # NOTE: `is_integer` and `as_integer_ratio` are technically defined in the concrete subtypes + def is_integer(self, /) -> builtins.bool: ... + def as_integer_ratio(self, /) -> tuple[int, int]: ... + +float16: TypeAlias = floating[_16Bit] +float32: TypeAlias = floating[_32Bit] + +# either a C `double`, `float`, or `longdouble` +class float64(floating[_64Bit], float): # type: ignore[misc] + @property + def itemsize(self) -> L[8]: ... + @property + def nbytes(self) -> L[8]: ... + + # overrides for `floating` and `builtins.float` compatibility (`_RealMixin` doesn't work) + @property + def real(self) -> Self: ... + @property + def imag(self) -> Self: ... + def conjugate(self) -> Self: ... + def __getnewargs__(self, /) -> tuple[float]: ... + + @classmethod + def __getformat__(cls, typestr: L["double", "float"], /) -> str: ... # undocumented + + # float64-specific operator overrides + # NOTE: Mypy reports [misc] errors about "unsafely overlapping signatures" for the + # reflected methods. But since they are identical to the non-reflected versions, + # these errors appear to be false positives. + + @overload # type: ignore[override] + def __add__(self, other: _Float64_co, /) -> float64: ... + @overload + def __add__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... + @overload + def __add__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + @overload + def __add__(self, other: complex, /) -> float64 | complex128: ... + + @overload # type: ignore[override] + def __radd__(self, other: _Float64_co, /) -> float64: ... # type: ignore[misc] + @overload + def __radd__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... # type: ignore[misc] + @overload + def __radd__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + @overload + def __radd__(self, other: complex, /) -> float64 | complex128: ... + + @overload # type: ignore[override] + def __sub__(self, other: _Float64_co, /) -> float64: ... + @overload + def __sub__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... + @overload + def __sub__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + @overload + def __sub__(self, other: complex, /) -> float64 | complex128: ... + + @overload # type: ignore[override] + def __rsub__(self, other: _Float64_co, /) -> float64: ... # type: ignore[misc] + @overload + def __rsub__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... # type: ignore[misc] + @overload + def __rsub__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + @overload + def __rsub__(self, other: complex, /) -> float64 | complex128: ... + + @overload # type: ignore[override] + def __mul__(self, other: _Float64_co, /) -> float64: ... + @overload + def __mul__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... + @overload + def __mul__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + @overload + def __mul__(self, other: complex, /) -> float64 | complex128: ... + + @overload # type: ignore[override] + def __rmul__(self, other: _Float64_co, /) -> float64: ... # type: ignore[misc] + @overload + def __rmul__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... # type: ignore[misc] + @overload + def __rmul__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + @overload + def __rmul__(self, other: complex, /) -> float64 | complex128: ... + + @overload # type: ignore[override] + def __truediv__(self, other: _Float64_co, /) -> float64: ... + @overload + def __truediv__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... + @overload + def __truediv__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + @overload + def __truediv__(self, other: complex, /) -> float64 | complex128: ... + + @overload # type: ignore[override] + def __rtruediv__(self, other: _Float64_co, /) -> float64: ... # type: ignore[misc] + @overload + def __rtruediv__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... # type: ignore[misc] + @overload + def __rtruediv__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + @overload + def __rtruediv__(self, other: complex, /) -> float64 | complex128: ... + + @overload # type: ignore[override] + def __floordiv__(self, other: _Float64_co, /) -> float64: ... + @overload + def __floordiv__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... + @overload + def __floordiv__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + @overload + def __floordiv__(self, other: complex, /) -> float64 | complex128: ... + + @overload + def __rfloordiv__(self, other: _Float64_co, /) -> float64: ... # type: ignore[misc] + @overload + def __rfloordiv__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... + @overload + def __rfloordiv__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + @overload + def __rfloordiv__(self, other: complex, /) -> float64 | complex128: ... + + @overload # type: ignore[override] + def __pow__(self, other: _Float64_co, mod: None = None, /) -> float64: ... + @overload + def __pow__(self, other: complexfloating[_64Bit, _64Bit], mod: None = None, /) -> complex128: ... + @overload + def __pow__( + self, other: complexfloating[_NBit1, _NBit2], mod: None = None, / + ) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + @overload + def __pow__(self, other: complex, mod: None = None, /) -> float64 | complex128: ... + + @overload # type: ignore[override] + def __rpow__(self, other: _Float64_co, mod: None = None, /) -> float64: ... # type: ignore[misc] + @overload + def __rpow__(self, other: complexfloating[_64Bit, _64Bit], mod: None = None, /) -> complex128: ... # type: ignore[misc] + @overload + def __rpow__( + self, other: complexfloating[_NBit1, _NBit2], mod: None = None, / + ) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + @overload + def __rpow__(self, other: complex, mod: None = None, /) -> float64 | complex128: ... + + def __mod__(self, other: _Float64_co, /) -> float64: ... + def __rmod__(self, other: _Float64_co, /) -> float64: ... # type: ignore[misc] + + def __divmod__(self, other: _Float64_co, /) -> _2Tuple[float64]: ... + def __rdivmod__(self, other: _Float64_co, /) -> _2Tuple[float64]: ... # type: ignore[misc] + +half: TypeAlias = float16 +single: TypeAlias = float32 +double: TypeAlias = float64 +longdouble: TypeAlias = floating[_NBitLongDouble] + +# The main reason for `complexfloating` having two typevars is cosmetic. +# It is used to clarify why `complex128`s precision is `_64Bit`, the latter +# describing the two 64 bit floats representing its real and imaginary component + +class complexfloating(inexact[_NBit1, complex], Generic[_NBit1, _NBit2]): + @overload + def __new__( + cls, + real: complex | SupportsComplex | SupportsFloat | SupportsIndex = 0, + imag: complex | SupportsFloat | SupportsIndex = 0, + /, + ) -> Self: ... + @overload + def __new__(cls, real: _ConvertibleToComplex | None = 0, /) -> Self: ... + + @property + def real(self) -> floating[_NBit1]: ... + @property + def imag(self) -> floating[_NBit2]: ... + + # NOTE: `__complex__` is technically defined in the concrete subtypes + def __complex__(self, /) -> complex: ... + def __abs__(self, /) -> floating[_NBit1 | _NBit2]: ... # type: ignore[override] + + @overload # type: ignore[override] + def __add__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... + @overload + def __add__(self, other: complex | float64 | complex128, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... + @overload + def __add__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + + @overload # type: ignore[override] + def __radd__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... + @overload + def __radd__(self, other: complex, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... + @overload + def __radd__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + + @overload # type: ignore[override] + def __sub__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... + @overload + def __sub__(self, other: complex | float64 | complex128, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... + @overload + def __sub__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + + @overload # type: ignore[override] + def __rsub__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... + @overload + def __rsub__(self, other: complex, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... + @overload + def __rsub__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + + @overload # type: ignore[override] + def __mul__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... + @overload + def __mul__(self, other: complex | float64 | complex128, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... + @overload + def __mul__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + + @overload # type: ignore[override] + def __rmul__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... + @overload + def __rmul__(self, other: complex, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... + @overload + def __rmul__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + + @overload # type: ignore[override] + def __truediv__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... + @overload + def __truediv__(self, other: complex | float64 | complex128, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... + @overload + def __truediv__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + + @overload # type: ignore[override] + def __rtruediv__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... + @overload + def __rtruediv__(self, other: complex, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... + @overload + def __rtruediv__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + + @overload # type: ignore[override] + def __pow__(self, other: _Complex64_co, mod: None = None, /) -> complexfloating[_NBit1, _NBit2]: ... + @overload + def __pow__( + self, other: complex | float64 | complex128, mod: None = None, / + ) -> complexfloating[_NBit1, _NBit2] | complex128: ... + @overload + def __pow__( + self, other: number[_NBit], mod: None = None, / + ) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + + @overload # type: ignore[override] + def __rpow__(self, other: _Complex64_co, mod: None = None, /) -> complexfloating[_NBit1, _NBit2]: ... + @overload + def __rpow__(self, other: complex, mod: None = None, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... + @overload + def __rpow__( + self, other: number[_NBit], mod: None = None, / + ) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + +complex64: TypeAlias = complexfloating[_32Bit] + +class complex128(complexfloating[_64Bit, _64Bit], complex): + @property + def itemsize(self) -> L[16]: ... + @property + def nbytes(self) -> L[16]: ... + + # overrides for `floating` and `builtins.float` compatibility + @property + def real(self) -> float64: ... + @property + def imag(self) -> float64: ... + def conjugate(self) -> Self: ... + def __abs__(self) -> float64: ... # type: ignore[override] + def __getnewargs__(self, /) -> tuple[float, float]: ... + + # complex128-specific operator overrides + @overload # type: ignore[override] + def __add__(self, other: _Complex128_co, /) -> complex128: ... + @overload + def __add__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __radd__(self, other: _Complex128_co, /) -> complex128: ... # type: ignore[override] + + @overload # type: ignore[override] + def __sub__(self, other: _Complex128_co, /) -> complex128: ... + @overload + def __sub__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __rsub__(self, other: _Complex128_co, /) -> complex128: ... # type: ignore[override] + + @overload # type: ignore[override] + def __mul__(self, other: _Complex128_co, /) -> complex128: ... + @overload + def __mul__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __rmul__(self, other: _Complex128_co, /) -> complex128: ... # type: ignore[override] + + @overload # type: ignore[override] + def __truediv__(self, other: _Complex128_co, /) -> complex128: ... + @overload + def __truediv__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __rtruediv__(self, other: _Complex128_co, /) -> complex128: ... # type: ignore[override] + + @overload # type: ignore[override] + def __pow__(self, other: _Complex128_co, mod: None = None, /) -> complex128: ... + @overload + def __pow__( + self, other: complexfloating[_NBit1, _NBit2], mod: None = None, / + ) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __rpow__(self, other: _Complex128_co, mod: None = None, /) -> complex128: ... # type: ignore[override] + +csingle: TypeAlias = complex64 +cdouble: TypeAlias = complex128 +clongdouble: TypeAlias = complexfloating[_NBitLongDouble] + +class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co]): + @property + def itemsize(self) -> L[8]: ... + @property + def nbytes(self) -> L[8]: ... + + @overload + def __new__(cls, value: _TD64ItemT_co | timedelta64[_TD64ItemT_co], /) -> Self: ... + @overload + def __new__(cls, /) -> timedelta64[L[0]]: ... + @overload + def __new__(cls, value: _NaTValue | None, format: _TimeUnitSpec, /) -> timedelta64[None]: ... + @overload + def __new__(cls, value: L[0], format: _TimeUnitSpec[_IntTD64Unit] = ..., /) -> timedelta64[L[0]]: ... + @overload + def __new__(cls, value: _IntLike_co, format: _TimeUnitSpec[_IntTD64Unit] = ..., /) -> timedelta64[int]: ... + @overload + def __new__(cls, value: dt.timedelta, format: _TimeUnitSpec[_IntTimeUnit], /) -> timedelta64[int]: ... + @overload + def __new__( + cls, + value: dt.timedelta | _IntLike_co, + format: _TimeUnitSpec[_NativeTD64Unit] = ..., + /, + ) -> timedelta64[dt.timedelta]: ... + @overload + def __new__(cls, value: _ConvertibleToTD64, format: _TimeUnitSpec = ..., /) -> Self: ... + + # inherited at runtime from `signedinteger` + def __class_getitem__(cls, type_arg: type | object, /) -> GenericAlias: ... + + # NOTE: Only a limited number of units support conversion + # to builtin scalar types: `Y`, `M`, `ns`, `ps`, `fs`, `as` + def __int__(self: timedelta64[int], /) -> int: ... + def __float__(self: timedelta64[int], /) -> float: ... + + def __neg__(self, /) -> Self: ... + def __pos__(self, /) -> Self: ... + def __abs__(self, /) -> Self: ... + + @overload + def __add__(self: timedelta64[None], x: _TD64Like_co, /) -> timedelta64[None]: ... + @overload + def __add__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> timedelta64[int]: ... + @overload + def __add__(self: timedelta64[int], x: timedelta64, /) -> timedelta64[int | None]: ... + @overload + def __add__(self: timedelta64[dt.timedelta], x: _AnyDateOrTime, /) -> _AnyDateOrTime: ... + @overload + def __add__(self: timedelta64[_AnyTD64Item], x: timedelta64[_AnyTD64Item] | _IntLike_co, /) -> timedelta64[_AnyTD64Item]: ... + @overload + def __add__(self, x: timedelta64[None], /) -> timedelta64[None]: ... # type: ignore[overload-cannot-match] + __radd__ = __add__ + + @overload + def __mul__(self: timedelta64[_AnyTD64Item], x: int | np.integer | np.bool, /) -> timedelta64[_AnyTD64Item]: ... + @overload + def __mul__(self: timedelta64[_AnyTD64Item], x: float | np.floating, /) -> timedelta64[_AnyTD64Item | None]: ... + @overload + def __mul__(self, x: float | np.floating | np.integer | np.bool, /) -> timedelta64: ... + __rmul__ = __mul__ + + @overload + def __mod__(self, x: timedelta64[L[0] | None], /) -> timedelta64[None]: ... + @overload + def __mod__(self: timedelta64[None], x: timedelta64, /) -> timedelta64[None]: ... + @overload + def __mod__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> timedelta64[int | None]: ... + @overload + def __mod__(self: timedelta64[dt.timedelta], x: timedelta64[_AnyTD64Item], /) -> timedelta64[_AnyTD64Item | None]: ... + @overload + def __mod__(self: timedelta64[dt.timedelta], x: dt.timedelta, /) -> dt.timedelta: ... + @overload + def __mod__(self, x: timedelta64[int], /) -> timedelta64[int | None]: ... + @overload + def __mod__(self, x: timedelta64, /) -> timedelta64: ... + + # NOTE: The L[0] makes __mod__ non-commutative, which the first two overloads + # reflect. However, mypy does not seem to like this, so we ignore the errors. + @overload + def __rmod__(self, x: timedelta64[None], /) -> timedelta64[None]: ... # type: ignore[misc] + @overload + def __rmod__(self: timedelta64[L[0] | None], x: timedelta64, /) -> timedelta64[None]: ... + @overload + def __rmod__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> timedelta64[int | None]: ... # type: ignore[misc] + @overload + def __rmod__(self: timedelta64[dt.timedelta], x: timedelta64[_AnyTD64Item], /) -> timedelta64[_AnyTD64Item | None]: ... # type: ignore[misc] + @overload + def __rmod__(self: timedelta64[dt.timedelta], x: dt.timedelta, /) -> dt.timedelta: ... + @overload + def __rmod__(self, x: timedelta64[int], /) -> timedelta64[int | None]: ... # type: ignore[misc] + @overload + def __rmod__(self, x: timedelta64, /) -> timedelta64: ... # type: ignore[misc] + + # keep in sync with __mod__ + @overload + def __divmod__(self, x: timedelta64[L[0] | None], /) -> tuple[int64, timedelta64[None]]: ... + @overload + def __divmod__(self: timedelta64[None], x: timedelta64, /) -> tuple[int64, timedelta64[None]]: ... + @overload + def __divmod__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> tuple[int64, timedelta64[int | None]]: ... + @overload + def __divmod__( + self: timedelta64[dt.timedelta], x: timedelta64[_AnyTD64Item], / + ) -> tuple[int64, timedelta64[_AnyTD64Item | None]]: ... + @overload + def __divmod__(self: timedelta64[dt.timedelta], x: dt.timedelta, /) -> tuple[int, dt.timedelta]: ... + @overload + def __divmod__(self, x: timedelta64[int], /) -> tuple[int64, timedelta64[int | None]]: ... + @overload + def __divmod__(self, x: timedelta64, /) -> tuple[int64, timedelta64]: ... + + # keep in sync with __rmod__ + @overload + def __rdivmod__(self, x: timedelta64[None], /) -> tuple[int64, timedelta64[None]]: ... # type: ignore[misc] + @overload + def __rdivmod__(self: timedelta64[L[0] | None], x: timedelta64, /) -> tuple[int64, timedelta64[None]]: ... # type: ignore[misc] + @overload + def __rdivmod__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> tuple[int64, timedelta64[int | None]]: ... # type: ignore[misc] + @overload + def __rdivmod__( # type: ignore[misc] + self: timedelta64[dt.timedelta], x: timedelta64[_AnyTD64Item], / + ) -> tuple[int64, timedelta64[_AnyTD64Item | None]]: ... + @overload + def __rdivmod__(self: timedelta64[dt.timedelta], x: dt.timedelta, /) -> tuple[int, dt.timedelta]: ... + @overload + def __rdivmod__(self, x: timedelta64[int], /) -> tuple[int64, timedelta64[int | None]]: ... # type: ignore[misc] + @overload + def __rdivmod__(self, x: timedelta64, /) -> tuple[int64, timedelta64]: ... # type: ignore[misc] + + @overload + def __sub__(self: timedelta64[None], b: _TD64Like_co, /) -> timedelta64[None]: ... + @overload + def __sub__(self: timedelta64[int], b: timedelta64[int | dt.timedelta], /) -> timedelta64[int]: ... + @overload + def __sub__(self: timedelta64[int], b: timedelta64, /) -> timedelta64[int | None]: ... + @overload + def __sub__(self: timedelta64[dt.timedelta], b: dt.timedelta, /) -> dt.timedelta: ... + @overload + def __sub__(self: timedelta64[_AnyTD64Item], b: timedelta64[_AnyTD64Item] | _IntLike_co, /) -> timedelta64[_AnyTD64Item]: ... + @overload + def __sub__(self, b: timedelta64[None], /) -> timedelta64[None]: ... # type: ignore[overload-cannot-match] + + # NOTE: subtraction is not commutative, so __rsub__ differs from __sub__. + # This confuses mypy, so we ignore the [misc] errors it reports. + @overload + def __rsub__(self: timedelta64[None], a: _TD64Like_co, /) -> timedelta64[None]: ... + @overload + def __rsub__(self: timedelta64[dt.timedelta], a: _AnyDateOrTime, /) -> _AnyDateOrTime: ... + @overload + def __rsub__(self: timedelta64[dt.timedelta], a: timedelta64[_AnyTD64Item], /) -> timedelta64[_AnyTD64Item]: ... # type: ignore[misc] + @overload + def __rsub__(self: timedelta64[_AnyTD64Item], a: timedelta64[_AnyTD64Item] | _IntLike_co, /) -> timedelta64[_AnyTD64Item]: ... # type: ignore[misc] + @overload + def __rsub__(self, a: timedelta64[None], /) -> timedelta64[None]: ... # type: ignore[overload-cannot-match] + @overload + def __rsub__(self, a: datetime64[None], /) -> datetime64[None]: ... # type: ignore[misc] + + @overload + def __truediv__(self: timedelta64[dt.timedelta], b: dt.timedelta, /) -> float: ... + @overload + def __truediv__(self, b: timedelta64, /) -> float64: ... + @overload + def __truediv__(self: timedelta64[_AnyTD64Item], b: int | integer, /) -> timedelta64[_AnyTD64Item]: ... + @overload + def __truediv__(self: timedelta64[_AnyTD64Item], b: float | floating, /) -> timedelta64[_AnyTD64Item | None]: ... + @overload + def __truediv__(self, b: float | floating | integer, /) -> timedelta64: ... + + @overload + def __rtruediv__(self: timedelta64[dt.timedelta], a: dt.timedelta, /) -> float: ... + @overload + def __rtruediv__(self, a: timedelta64, /) -> float64: ... + + @overload + def __floordiv__(self: timedelta64[dt.timedelta], b: dt.timedelta, /) -> int: ... + @overload + def __floordiv__(self, b: timedelta64, /) -> int64: ... + @overload + def __floordiv__(self: timedelta64[_AnyTD64Item], b: int | integer, /) -> timedelta64[_AnyTD64Item]: ... + @overload + def __floordiv__(self: timedelta64[_AnyTD64Item], b: float | floating, /) -> timedelta64[_AnyTD64Item | None]: ... + + @overload + def __rfloordiv__(self: timedelta64[dt.timedelta], a: dt.timedelta, /) -> int: ... + @overload + def __rfloordiv__(self, a: timedelta64, /) -> int64: ... + + # comparison ops + + @overload + def __lt__(self, other: _TD64Like_co, /) -> bool_: ... + @overload + def __lt__(self, other: _ArrayLikeTD64_co | _NestedSequence[_SupportsGT], /) -> NDArray[bool_]: ... + @overload + def __lt__(self, other: _SupportsGT, /) -> bool_: ... + + @overload + def __le__(self, other: _TD64Like_co, /) -> bool_: ... + @overload + def __le__(self, other: _ArrayLikeTD64_co | _NestedSequence[_SupportsGE], /) -> NDArray[bool_]: ... + @overload + def __le__(self, other: _SupportsGT, /) -> bool_: ... + + @overload + def __gt__(self, other: _TD64Like_co, /) -> bool_: ... + @overload + def __gt__(self, other: _ArrayLikeTD64_co | _NestedSequence[_SupportsLT], /) -> NDArray[bool_]: ... + @overload + def __gt__(self, other: _SupportsGT, /) -> bool_: ... + + @overload + def __ge__(self, other: _TD64Like_co, /) -> bool_: ... + @overload + def __ge__(self, other: _ArrayLikeTD64_co | _NestedSequence[_SupportsLE], /) -> NDArray[bool_]: ... + @overload + def __ge__(self, other: _SupportsGT, /) -> bool_: ... + +class datetime64(_RealMixin, generic[_DT64ItemT_co], Generic[_DT64ItemT_co]): + @property + def itemsize(self) -> L[8]: ... + @property + def nbytes(self) -> L[8]: ... + + @overload + def __new__(cls, value: datetime64[_DT64ItemT_co], /) -> Self: ... + @overload + def __new__(cls, value: _AnyDT64Arg, /) -> datetime64[_AnyDT64Arg]: ... + @overload + def __new__(cls, value: _NaTValue | None = ..., format: _TimeUnitSpec = ..., /) -> datetime64[None]: ... + @overload + def __new__(cls, value: _DT64Now, format: _TimeUnitSpec[_NativeTimeUnit] = ..., /) -> datetime64[dt.datetime]: ... + @overload + def __new__(cls, value: _DT64Date, format: _TimeUnitSpec[_DateUnit] = ..., /) -> datetime64[dt.date]: ... + @overload + def __new__(cls, value: int | bytes | str | dt.date, format: _TimeUnitSpec[_IntTimeUnit], /) -> datetime64[int]: ... + @overload + def __new__( + cls, value: int | bytes | str | dt.date, format: _TimeUnitSpec[_NativeTimeUnit], / + ) -> datetime64[dt.datetime]: ... + @overload + def __new__(cls, value: int | bytes | str | dt.date, format: _TimeUnitSpec[_DateUnit], /) -> datetime64[dt.date]: ... + @overload + def __new__(cls, value: bytes | str | dt.date | None, format: _TimeUnitSpec = ..., /) -> Self: ... + + def __class_getitem__(cls, type_arg: type | object, /) -> GenericAlias: ... + + @overload + def __add__(self: datetime64[_AnyDT64Item], x: int | integer | np.bool, /) -> datetime64[_AnyDT64Item]: ... + @overload + def __add__(self: datetime64[None], x: _TD64Like_co, /) -> datetime64[None]: ... + @overload + def __add__(self: datetime64[int], x: timedelta64[int | dt.timedelta], /) -> datetime64[int]: ... + @overload + def __add__(self: datetime64[dt.datetime], x: timedelta64[dt.timedelta], /) -> datetime64[dt.datetime]: ... + @overload + def __add__(self: datetime64[dt.date], x: timedelta64[dt.timedelta], /) -> datetime64[dt.date]: ... + @overload + def __add__(self: datetime64[dt.date], x: timedelta64[int], /) -> datetime64[int]: ... + @overload + def __add__(self, x: datetime64[None], /) -> datetime64[None]: ... + @overload + def __add__(self, x: _TD64Like_co, /) -> datetime64: ... + __radd__ = __add__ + + @overload + def __sub__(self: datetime64[_AnyDT64Item], x: int | integer | np.bool, /) -> datetime64[_AnyDT64Item]: ... + @overload + def __sub__(self: datetime64[_AnyDate], x: _AnyDate, /) -> dt.timedelta: ... + @overload + def __sub__(self: datetime64[None], x: timedelta64, /) -> datetime64[None]: ... + @overload + def __sub__(self: datetime64[None], x: datetime64, /) -> timedelta64[None]: ... + @overload + def __sub__(self: datetime64[int], x: timedelta64, /) -> datetime64[int]: ... + @overload + def __sub__(self: datetime64[int], x: datetime64, /) -> timedelta64[int]: ... + @overload + def __sub__(self: datetime64[dt.datetime], x: timedelta64[int], /) -> datetime64[int]: ... + @overload + def __sub__(self: datetime64[dt.datetime], x: timedelta64[dt.timedelta], /) -> datetime64[dt.datetime]: ... + @overload + def __sub__(self: datetime64[dt.datetime], x: datetime64[int], /) -> timedelta64[int]: ... + @overload + def __sub__(self: datetime64[dt.date], x: timedelta64[int], /) -> datetime64[dt.date | int]: ... + @overload + def __sub__(self: datetime64[dt.date], x: timedelta64[dt.timedelta], /) -> datetime64[dt.date]: ... + @overload + def __sub__(self: datetime64[dt.date], x: datetime64[dt.date], /) -> timedelta64[dt.timedelta]: ... + @overload + def __sub__(self, x: timedelta64[None], /) -> datetime64[None]: ... + @overload + def __sub__(self, x: datetime64[None], /) -> timedelta64[None]: ... + @overload + def __sub__(self, x: _TD64Like_co, /) -> datetime64: ... + @overload + def __sub__(self, x: datetime64, /) -> timedelta64: ... + + # NOTE: mypy gets confused by the non-commutativity of subtraction here + @overload + def __rsub__(self: datetime64[_AnyDT64Item], x: int | integer | np.bool, /) -> datetime64[_AnyDT64Item]: ... + @overload + def __rsub__(self: datetime64[_AnyDate], x: _AnyDate, /) -> dt.timedelta: ... + @overload + def __rsub__(self: datetime64[None], x: datetime64, /) -> timedelta64[None]: ... + @overload + def __rsub__(self: datetime64[int], x: datetime64, /) -> timedelta64[int]: ... + @overload + def __rsub__(self: datetime64[dt.datetime], x: datetime64[int], /) -> timedelta64[int]: ... # type: ignore[misc] + @overload + def __rsub__(self: datetime64[dt.datetime], x: datetime64[dt.date], /) -> timedelta64[dt.timedelta]: ... # type: ignore[misc] + @overload + def __rsub__(self, x: datetime64[None], /) -> timedelta64[None]: ... # type: ignore[misc] + @overload + def __rsub__(self, x: datetime64, /) -> timedelta64: ... # type: ignore[misc] + + @overload + def __lt__(self, other: datetime64, /) -> bool_: ... + @overload + def __lt__(self, other: _ArrayLikeDT64_co | _NestedSequence[_SupportsGT], /) -> NDArray[bool_]: ... + @overload + def __lt__(self, other: _SupportsGT, /) -> bool_: ... + + @overload + def __le__(self, other: datetime64, /) -> bool_: ... + @overload + def __le__(self, other: _ArrayLikeDT64_co | _NestedSequence[_SupportsGE], /) -> NDArray[bool_]: ... + @overload + def __le__(self, other: _SupportsGT, /) -> bool_: ... + + @overload + def __gt__(self, other: datetime64, /) -> bool_: ... + @overload + def __gt__(self, other: _ArrayLikeDT64_co | _NestedSequence[_SupportsLT], /) -> NDArray[bool_]: ... + @overload + def __gt__(self, other: _SupportsGT, /) -> bool_: ... + + @overload + def __ge__(self, other: datetime64, /) -> bool_: ... + @overload + def __ge__(self, other: _ArrayLikeDT64_co | _NestedSequence[_SupportsLE], /) -> NDArray[bool_]: ... + @overload + def __ge__(self, other: _SupportsGT, /) -> bool_: ... + +@final # cannot be subclassed at runtime +class flexible(_RealMixin, generic[_FlexibleItemT_co], Generic[_FlexibleItemT_co]): ... # type: ignore[misc] + +class void(flexible[bytes | tuple[Any, ...]]): # type: ignore[misc] + @overload + def __new__(cls, length_or_data: _IntLike_co | bytes, /, dtype: None = None) -> Self: ... + @overload + def __new__(cls, length_or_data: object, /, dtype: _DTypeLikeVoid) -> Self: ... + + @overload + def __getitem__(self, key: str | SupportsIndex, /) -> Any: ... + @overload + def __getitem__(self, key: list[str], /) -> void: ... + def __setitem__(self, key: str | list[str] | SupportsIndex, value: ArrayLike, /) -> None: ... + + def setfield(self, val: ArrayLike, dtype: DTypeLike, offset: int = ...) -> None: ... + +class character(flexible[_CharacterItemT_co], Generic[_CharacterItemT_co]): # type: ignore[misc] + @abstractmethod + def __new__(cls, value: object = ..., /) -> Self: ... + +# NOTE: Most `np.bytes_` / `np.str_` methods return their builtin `bytes` / `str` counterpart + +class bytes_(character[bytes], bytes): # type: ignore[misc] + @overload + def __new__(cls, value: object = b"", /) -> Self: ... + @overload + def __new__(cls, value: str, /, encoding: str, errors: str = "strict") -> Self: ... + + # + @override + def __hash__(self, /) -> int: ... + + # + def __bytes__(self, /) -> bytes: ... + +class str_(character[str], str): # type: ignore[misc] + @overload + def __new__(cls, value: object = "", /) -> Self: ... + @overload + def __new__(cls, value: bytes, /, encoding: str, errors: str = "strict") -> Self: ... + + # + @override + def __hash__(self, /) -> int: ... + +# See `numpy._typing._ufunc` for more concrete nin-/nout-specific stubs +@final +class ufunc: + __signature__: Final[inspect.Signature] + + @property + def __name__(self) -> LiteralString: ... + @property + def __qualname__(self) -> LiteralString: ... # pyright: ignore[reportIncompatibleVariableOverride] + @property + def __doc__(self) -> str: ... # type: ignore[override] + @property + def nin(self) -> int: ... + @property + def nout(self) -> int: ... + @property + def nargs(self) -> int: ... + @property + def ntypes(self) -> int: ... + @property + def types(self) -> list[LiteralString]: ... + # Broad return type because it has to encompass things like + # + # >>> np.logical_and.identity is True + # True + # >>> np.add.identity is 0 + # True + # >>> np.sin.identity is None + # True + # + # and any user-defined ufuncs. + @property + def identity(self) -> Any: ... + # This is None for ufuncs and a string for gufuncs. + @property + def signature(self) -> LiteralString | None: ... + + def __call__(self, /, *args: Any, **kwargs: Any) -> Any: ... + + # The next four methods will always exist, but they will just + # raise a ValueError ufuncs with that don't accept two input + # arguments and return one output argument. Because of that we + # can't type them very precisely. + def accumulate( + self, + array: ArrayLike, + /, + axis: SupportsIndex = 0, + dtype: DTypeLike | None = None, + out: ndarray | EllipsisType | None = None, + ) -> NDArray[Incomplete]: ... + def reduce( + self, + array: ArrayLike, + /, + axis: _ShapeLike | None = 0, + dtype: DTypeLike | None = None, + out: ndarray | EllipsisType | None = None, + **kwargs: Incomplete, + ) -> Incomplete: ... + def reduceat( + self, + array: ArrayLike, + /, + indices: _ArrayLikeInt_co, + axis: SupportsIndex = 0, + dtype: DTypeLike | None = None, + out: ndarray | EllipsisType | None = None, + ) -> NDArray[Incomplete]: ... + def outer(self, A: ArrayLike, B: ArrayLike, /, **kwargs: Incomplete) -> NDArray[Incomplete]: ... + + # Similarly `at` won't be defined for ufuncs that return multiple + # outputs, so we can't type it very precisely. + def at(self, a: ndarray, indices: _ArrayLikeInt_co, b: ArrayLike | None = None, /) -> None: ... + + # + def resolve_dtypes( + self, + /, + dtypes: tuple[dtype | type | None, ...], + *, + signature: tuple[dtype | None, ...] | None = None, + casting: _CastingKind | None = None, + reduction: builtins.bool = False, + ) -> tuple[dtype, ...]: ... + +# Parameters: `__name__`, `ntypes` and `identity` +absolute: _UFunc_Nin1_Nout1[L["absolute"], L[20], None] +add: _UFunc_Nin2_Nout1[L["add"], L[22], L[0]] +arccos: _UFunc_Nin1_Nout1[L["arccos"], L[8], None] +arccosh: _UFunc_Nin1_Nout1[L["arccosh"], L[8], None] +arcsin: _UFunc_Nin1_Nout1[L["arcsin"], L[8], None] +arcsinh: _UFunc_Nin1_Nout1[L["arcsinh"], L[8], None] +arctan2: _UFunc_Nin2_Nout1[L["arctan2"], L[5], None] +arctan: _UFunc_Nin1_Nout1[L["arctan"], L[8], None] +arctanh: _UFunc_Nin1_Nout1[L["arctanh"], L[8], None] +bitwise_and: _UFunc_Nin2_Nout1[L["bitwise_and"], L[12], L[-1]] +bitwise_count: _UFunc_Nin1_Nout1[L["bitwise_count"], L[11], None] +bitwise_or: _UFunc_Nin2_Nout1[L["bitwise_or"], L[12], L[0]] +bitwise_xor: _UFunc_Nin2_Nout1[L["bitwise_xor"], L[12], L[0]] +cbrt: _UFunc_Nin1_Nout1[L["cbrt"], L[5], None] +ceil: _UFunc_Nin1_Nout1[L["ceil"], L[7], None] +conjugate: _UFunc_Nin1_Nout1[L["conjugate"], L[18], None] +copysign: _UFunc_Nin2_Nout1[L["copysign"], L[4], None] +cos: _UFunc_Nin1_Nout1[L["cos"], L[9], None] +cosh: _UFunc_Nin1_Nout1[L["cosh"], L[8], None] +deg2rad: _UFunc_Nin1_Nout1[L["deg2rad"], L[5], None] +degrees: _UFunc_Nin1_Nout1[L["degrees"], L[5], None] +divide: _UFunc_Nin2_Nout1[L["divide"], L[11], None] +divmod: _UFunc_Nin2_Nout2[L["divmod"], L[15], None] +equal: _UFunc_Nin2_Nout1[L["equal"], L[23], None] +exp2: _UFunc_Nin1_Nout1[L["exp2"], L[8], None] +exp: _UFunc_Nin1_Nout1[L["exp"], L[10], None] +expm1: _UFunc_Nin1_Nout1[L["expm1"], L[8], None] +fabs: _UFunc_Nin1_Nout1[L["fabs"], L[5], None] +float_power: _UFunc_Nin2_Nout1[L["float_power"], L[4], None] +floor: _UFunc_Nin1_Nout1[L["floor"], L[7], None] +floor_divide: _UFunc_Nin2_Nout1[L["floor_divide"], L[21], None] +fmax: _UFunc_Nin2_Nout1[L["fmax"], L[21], None] +fmin: _UFunc_Nin2_Nout1[L["fmin"], L[21], None] +fmod: _UFunc_Nin2_Nout1[L["fmod"], L[15], None] +frexp: _UFunc_Nin1_Nout2[L["frexp"], L[4], None] +gcd: _UFunc_Nin2_Nout1[L["gcd"], L[11], L[0]] +greater: _UFunc_Nin2_Nout1[L["greater"], L[23], None] +greater_equal: _UFunc_Nin2_Nout1[L["greater_equal"], L[23], None] +heaviside: _UFunc_Nin2_Nout1[L["heaviside"], L[4], None] +hypot: _UFunc_Nin2_Nout1[L["hypot"], L[5], L[0]] +invert: _UFunc_Nin1_Nout1[L["invert"], L[12], None] +isfinite: _UFunc_Nin1_Nout1[L["isfinite"], L[20], None] +isinf: _UFunc_Nin1_Nout1[L["isinf"], L[20], None] +isnan: _UFunc_Nin1_Nout1[L["isnan"], L[20], None] +isnat: _UFunc_Nin1_Nout1[L["isnat"], L[2], None] +lcm: _UFunc_Nin2_Nout1[L["lcm"], L[11], None] +ldexp: _UFunc_Nin2_Nout1[L["ldexp"], L[8], None] +left_shift: _UFunc_Nin2_Nout1[L["left_shift"], L[11], None] +less: _UFunc_Nin2_Nout1[L["less"], L[23], None] +less_equal: _UFunc_Nin2_Nout1[L["less_equal"], L[23], None] +log10: _UFunc_Nin1_Nout1[L["log10"], L[8], None] +log1p: _UFunc_Nin1_Nout1[L["log1p"], L[8], None] +log2: _UFunc_Nin1_Nout1[L["log2"], L[8], None] +log: _UFunc_Nin1_Nout1[L["log"], L[10], None] +logaddexp2: _UFunc_Nin2_Nout1[L["logaddexp2"], L[4], float] +logaddexp: _UFunc_Nin2_Nout1[L["logaddexp"], L[4], float] +logical_and: _UFunc_Nin2_Nout1[L["logical_and"], L[20], L[True]] +logical_not: _UFunc_Nin1_Nout1[L["logical_not"], L[20], None] +logical_or: _UFunc_Nin2_Nout1[L["logical_or"], L[20], L[False]] +logical_xor: _UFunc_Nin2_Nout1[L["logical_xor"], L[19], L[False]] +matmul: _GUFunc_Nin2_Nout1[L["matmul"], L[19], None, L["(n?,k),(k,m?)->(n?,m?)"]] +matvec: _GUFunc_Nin2_Nout1[L["matvec"], L[19], None, L["(m,n),(n)->(m)"]] +maximum: _UFunc_Nin2_Nout1[L["maximum"], L[21], None] +minimum: _UFunc_Nin2_Nout1[L["minimum"], L[21], None] +modf: _UFunc_Nin1_Nout2[L["modf"], L[4], None] +multiply: _UFunc_Nin2_Nout1[L["multiply"], L[23], L[1]] +negative: _UFunc_Nin1_Nout1[L["negative"], L[19], None] +nextafter: _UFunc_Nin2_Nout1[L["nextafter"], L[4], None] +not_equal: _UFunc_Nin2_Nout1[L["not_equal"], L[23], None] +positive: _UFunc_Nin1_Nout1[L["positive"], L[19], None] +power: _UFunc_Nin2_Nout1[L["power"], L[18], None] +rad2deg: _UFunc_Nin1_Nout1[L["rad2deg"], L[5], None] +radians: _UFunc_Nin1_Nout1[L["radians"], L[5], None] +reciprocal: _UFunc_Nin1_Nout1[L["reciprocal"], L[18], None] +remainder: _UFunc_Nin2_Nout1[L["remainder"], L[16], None] +right_shift: _UFunc_Nin2_Nout1[L["right_shift"], L[11], None] +rint: _UFunc_Nin1_Nout1[L["rint"], L[10], None] +sign: _UFunc_Nin1_Nout1[L["sign"], L[19], None] +signbit: _UFunc_Nin1_Nout1[L["signbit"], L[4], None] +sin: _UFunc_Nin1_Nout1[L["sin"], L[9], None] +sinh: _UFunc_Nin1_Nout1[L["sinh"], L[8], None] +spacing: _UFunc_Nin1_Nout1[L["spacing"], L[4], None] +sqrt: _UFunc_Nin1_Nout1[L["sqrt"], L[10], None] +square: _UFunc_Nin1_Nout1[L["square"], L[18], None] +subtract: _UFunc_Nin2_Nout1[L["subtract"], L[21], None] +tan: _UFunc_Nin1_Nout1[L["tan"], L[8], None] +tanh: _UFunc_Nin1_Nout1[L["tanh"], L[8], None] +trunc: _UFunc_Nin1_Nout1[L["trunc"], L[7], None] +vecdot: _GUFunc_Nin2_Nout1[L["vecdot"], L[19], None, L["(n),(n)->()"]] +vecmat: _GUFunc_Nin2_Nout1[L["vecmat"], L[19], None, L["(n),(n,m)->(m)"]] + +abs = absolute +acos = arccos +acosh = arccosh +asin = arcsin +asinh = arcsinh +atan = arctan +atanh = arctanh +atan2 = arctan2 +concat = concatenate +bitwise_left_shift = left_shift +bitwise_not = invert +bitwise_invert = invert +bitwise_right_shift = right_shift +conj = conjugate +mod = remainder +permute_dims = transpose +pow = power +true_divide = divide + +# TODO: The type of each `__next__` and `iters` return-type depends +# on the length and dtype of `args`; we can't describe this behavior yet +# as we lack variadics (PEP 646). +@final +class broadcast: + def __new__(cls, *args: ArrayLike) -> broadcast: ... + @property + def index(self) -> int: ... + @property + def iters(self) -> tuple[flatiter[Any], ...]: ... + @property + def nd(self) -> int: ... + @property + def ndim(self) -> int: ... + @property + def numiter(self) -> int: ... + @property + def shape(self) -> _AnyShape: ... + @property + def size(self) -> int: ... + def __next__(self) -> tuple[Any, ...]: ... + def __iter__(self) -> Self: ... + def reset(self) -> None: ... + +@final +class busdaycalendar: + def __init__( + self, + /, + weekmask: str | Sequence[int | bool_ | integer] | _SupportsArray[dtype[bool_ | integer]] = "1111100", + holidays: Sequence[dt.date | datetime64] | _SupportsArray[dtype[datetime64]] | None = None, + ) -> None: ... + @property + def weekmask(self) -> ndarray[tuple[int], dtype[bool_]]: ... + @property + def holidays(self) -> ndarray[tuple[int], dtype[datetime64[dt.date]]]: ... + +@final +class nditer: + @overload + def __init__( + self, + /, + op: ArrayLike, + flags: Sequence[_NDIterFlagsKind] | None = None, + op_flags: Sequence[_NDIterFlagsOp] | None = None, + op_dtypes: DTypeLike | None = None, + order: _OrderKACF = "K", + casting: _CastingKind = "safe", + op_axes: Sequence[SupportsIndex] | None = None, + itershape: _ShapeLike | None = None, + buffersize: SupportsIndex = 0, + ) -> None: ... + @overload + def __init__( + self, + /, + op: Sequence[ArrayLike | None], + flags: Sequence[_NDIterFlagsKind] | None = None, + op_flags: Sequence[Sequence[_NDIterFlagsOp]] | None = None, + op_dtypes: Sequence[DTypeLike | None] | None = None, + order: _OrderKACF = "K", + casting: _CastingKind = "safe", + op_axes: Sequence[Sequence[SupportsIndex]] | None = None, + itershape: _ShapeLike | None = None, + buffersize: SupportsIndex = 0, + ) -> None: ... + + def __enter__(self) -> nditer: ... + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_value: BaseException | None, + traceback: TracebackType | None, + ) -> None: ... + def __iter__(self) -> nditer: ... + def __next__(self) -> tuple[NDArray[Any], ...]: ... + def __len__(self) -> int: ... + def __copy__(self) -> nditer: ... + @overload + def __getitem__(self, index: SupportsIndex) -> NDArray[Any]: ... + @overload + def __getitem__(self, index: slice) -> tuple[NDArray[Any], ...]: ... + def __setitem__(self, index: slice | SupportsIndex, value: ArrayLike) -> None: ... + def close(self) -> None: ... + def copy(self) -> nditer: ... + def debug_print(self) -> None: ... + def enable_external_loop(self) -> None: ... + def iternext(self) -> builtins.bool: ... + def remove_axis(self, i: SupportsIndex, /) -> None: ... + def remove_multi_index(self) -> None: ... + def reset(self) -> None: ... + @property + def dtypes(self) -> tuple[dtype, ...]: ... + @property + def finished(self) -> builtins.bool: ... + @property + def has_delayed_bufalloc(self) -> builtins.bool: ... + @property + def has_index(self) -> builtins.bool: ... + @property + def has_multi_index(self) -> builtins.bool: ... + @property + def index(self) -> int: ... + @property + def iterationneedsapi(self) -> builtins.bool: ... + @property + def iterindex(self) -> int: ... + @property + def iterrange(self) -> tuple[int, ...]: ... + @property + def itersize(self) -> int: ... + @property + def itviews(self) -> tuple[NDArray[Any], ...]: ... + @property + def multi_index(self) -> tuple[int, ...]: ... + @property + def ndim(self) -> int: ... + @property + def nop(self) -> int: ... + @property + def operands(self) -> tuple[NDArray[Any], ...]: ... + @property + def shape(self) -> tuple[int, ...]: ... + @property + def value(self) -> tuple[NDArray[Any], ...]: ... + +class memmap(ndarray[_ShapeT_co, _DTypeT_co]): + __array_priority__: ClassVar[float] + filename: str | None + offset: int + mode: str + @overload + def __new__( + subtype, + filename: StrOrBytesPath | _SupportsFileMethodsRW, + dtype: type[uint8] = ..., + mode: _MemMapModeKind = "r+", + offset: int = 0, + shape: int | tuple[int, ...] | None = None, + order: _OrderKACF = "C", + ) -> memmap[Any, dtype[uint8]]: ... + @overload + def __new__( + subtype, + filename: StrOrBytesPath | _SupportsFileMethodsRW, + dtype: _DTypeLike[_ScalarT], + mode: _MemMapModeKind = "r+", + offset: int = 0, + shape: int | tuple[int, ...] | None = None, + order: _OrderKACF = "C", + ) -> memmap[Any, dtype[_ScalarT]]: ... + @overload + def __new__( + subtype, + filename: StrOrBytesPath | _SupportsFileMethodsRW, + dtype: DTypeLike, + mode: _MemMapModeKind = "r+", + offset: int = 0, + shape: int | tuple[int, ...] | None = None, + order: _OrderKACF = "C", + ) -> memmap[Any, dtype]: ... + def __array_finalize__(self, obj: object) -> None: ... + def __array_wrap__( + self, + array: memmap[_ShapeT_co, _DTypeT_co], # type: ignore[override] + context: tuple[ufunc, tuple[Any, ...], int] | None = None, + return_scalar: builtins.bool = False, + ) -> Any: ... + def flush(self) -> None: ... + +class poly1d: + @property + def variable(self) -> LiteralString: ... + @property + def order(self) -> int: ... + @property + def o(self) -> int: ... + @property + def roots(self) -> NDArray[Any]: ... + @property + def r(self) -> NDArray[Any]: ... + + @property + def coeffs(self) -> NDArray[Any]: ... + @coeffs.setter + def coeffs(self, value: NDArray[Any]) -> None: ... + + @property + def c(self) -> NDArray[Any]: ... + @c.setter + def c(self, value: NDArray[Any]) -> None: ... + + @property + def coef(self) -> NDArray[Any]: ... + @coef.setter + def coef(self, value: NDArray[Any]) -> None: ... + + @property + def coefficients(self) -> NDArray[Any]: ... + @coefficients.setter + def coefficients(self, value: NDArray[Any]) -> None: ... + + __hash__: ClassVar[None] # type: ignore[assignment] # pyright: ignore[reportIncompatibleMethodOverride] + + @overload + def __array__(self, /, t: None = None, copy: builtins.bool | None = None) -> ndarray[tuple[int], dtype]: ... + @overload + def __array__(self, /, t: _DTypeT, copy: builtins.bool | None = None) -> ndarray[tuple[int], _DTypeT]: ... + + @overload + def __call__(self, val: _ScalarLike_co) -> Any: ... + @overload + def __call__(self, val: poly1d) -> poly1d: ... + @overload + def __call__(self, val: ArrayLike) -> NDArray[Any]: ... + + def __init__( + self, + c_or_r: ArrayLike, + r: builtins.bool = False, + variable: str | None = None, + ) -> None: ... + def __len__(self) -> int: ... + def __neg__(self) -> poly1d: ... + def __pos__(self) -> poly1d: ... + def __mul__(self, other: ArrayLike, /) -> poly1d: ... + def __rmul__(self, other: ArrayLike, /) -> poly1d: ... + def __add__(self, other: ArrayLike, /) -> poly1d: ... + def __radd__(self, other: ArrayLike, /) -> poly1d: ... + def __pow__(self, val: _FloatLike_co, /) -> poly1d: ... # Integral floats are accepted + def __sub__(self, other: ArrayLike, /) -> poly1d: ... + def __rsub__(self, other: ArrayLike, /) -> poly1d: ... + def __truediv__(self, other: ArrayLike, /) -> poly1d: ... + def __rtruediv__(self, other: ArrayLike, /) -> poly1d: ... + def __getitem__(self, val: int, /) -> Any: ... + def __setitem__(self, key: int, val: Any, /) -> None: ... + def __iter__(self) -> Iterator[Any]: ... + def deriv(self, m: SupportsInt | SupportsIndex = 1) -> poly1d: ... + def integ( + self, + m: SupportsInt | SupportsIndex = 1, + k: _ArrayLikeComplex_co | _ArrayLikeObject_co | None = 0, + ) -> poly1d: ... + +def from_dlpack( + x: _SupportsDLPack[None], + /, + *, + device: L["cpu"] | None = None, + copy: builtins.bool | None = None, +) -> NDArray[number | np.bool]: ... diff --git a/local_packages/numpy/_array_api_info.py b/local_packages/numpy/_array_api_info.py new file mode 100644 index 0000000..41adb83 --- /dev/null +++ b/local_packages/numpy/_array_api_info.py @@ -0,0 +1,346 @@ +""" +Array API Inspection namespace + +This is the namespace for inspection functions as defined by the array API +standard. See +https://data-apis.org/array-api/latest/API_specification/inspection.html for +more details. + +""" +from numpy._core import ( + bool, + complex64, + complex128, + dtype, + float32, + float64, + int8, + int16, + int32, + int64, + intp, + uint8, + uint16, + uint32, + uint64, +) +from numpy._utils import set_module + + +@set_module('numpy') +class __array_namespace_info__: + """ + Get the array API inspection namespace for NumPy. + + The array API inspection namespace defines the following functions: + + - capabilities() + - default_device() + - default_dtypes() + - dtypes() + - devices() + + See + https://data-apis.org/array-api/latest/API_specification/inspection.html + for more details. + + Returns + ------- + info : ModuleType + The array API inspection namespace for NumPy. + + Examples + -------- + >>> info = np.__array_namespace_info__() + >>> info.default_dtypes() + {'real floating': numpy.float64, + 'complex floating': numpy.complex128, + 'integral': numpy.int64, + 'indexing': numpy.int64} + + """ + + def capabilities(self): + """ + Return a dictionary of array API library capabilities. + + The resulting dictionary has the following keys: + + - **"boolean indexing"**: boolean indicating whether an array library + supports boolean indexing. Always ``True`` for NumPy. + + - **"data-dependent shapes"**: boolean indicating whether an array + library supports data-dependent output shapes. Always ``True`` for + NumPy. + + See + https://data-apis.org/array-api/latest/API_specification/generated/array_api.info.capabilities.html + for more details. + + See Also + -------- + __array_namespace_info__.default_device, + __array_namespace_info__.default_dtypes, + __array_namespace_info__.dtypes, + __array_namespace_info__.devices + + Returns + ------- + capabilities : dict + A dictionary of array API library capabilities. + + Examples + -------- + >>> info = np.__array_namespace_info__() + >>> info.capabilities() + {'boolean indexing': True, + 'data-dependent shapes': True, + 'max dimensions': 64} + + """ + return { + "boolean indexing": True, + "data-dependent shapes": True, + "max dimensions": 64, + } + + def default_device(self): + """ + The default device used for new NumPy arrays. + + For NumPy, this always returns ``'cpu'``. + + See Also + -------- + __array_namespace_info__.capabilities, + __array_namespace_info__.default_dtypes, + __array_namespace_info__.dtypes, + __array_namespace_info__.devices + + Returns + ------- + device : str + The default device used for new NumPy arrays. + + Examples + -------- + >>> info = np.__array_namespace_info__() + >>> info.default_device() + 'cpu' + + """ + return "cpu" + + def default_dtypes(self, *, device=None): + """ + The default data types used for new NumPy arrays. + + For NumPy, this always returns the following dictionary: + + - **"real floating"**: ``numpy.float64`` + - **"complex floating"**: ``numpy.complex128`` + - **"integral"**: ``numpy.intp`` + - **"indexing"**: ``numpy.intp`` + + Parameters + ---------- + device : str, optional + The device to get the default data types for. For NumPy, only + ``'cpu'`` is allowed. + + Returns + ------- + dtypes : dict + A dictionary describing the default data types used for new NumPy + arrays. + + See Also + -------- + __array_namespace_info__.capabilities, + __array_namespace_info__.default_device, + __array_namespace_info__.dtypes, + __array_namespace_info__.devices + + Examples + -------- + >>> info = np.__array_namespace_info__() + >>> info.default_dtypes() + {'real floating': numpy.float64, + 'complex floating': numpy.complex128, + 'integral': numpy.int64, + 'indexing': numpy.int64} + + """ + if device not in ["cpu", None]: + raise ValueError( + 'Device not understood. Only "cpu" is allowed, but received:' + f' {device}' + ) + return { + "real floating": dtype(float64), + "complex floating": dtype(complex128), + "integral": dtype(intp), + "indexing": dtype(intp), + } + + def dtypes(self, *, device=None, kind=None): + """ + The array API data types supported by NumPy. + + Note that this function only returns data types that are defined by + the array API. + + Parameters + ---------- + device : str, optional + The device to get the data types for. For NumPy, only ``'cpu'`` is + allowed. + kind : str or tuple of str, optional + The kind of data types to return. If ``None``, all data types are + returned. If a string, only data types of that kind are returned. + If a tuple, a dictionary containing the union of the given kinds + is returned. The following kinds are supported: + + - ``'bool'``: boolean data types (i.e., ``bool``). + - ``'signed integer'``: signed integer data types (i.e., ``int8``, + ``int16``, ``int32``, ``int64``). + - ``'unsigned integer'``: unsigned integer data types (i.e., + ``uint8``, ``uint16``, ``uint32``, ``uint64``). + - ``'integral'``: integer data types. Shorthand for ``('signed + integer', 'unsigned integer')``. + - ``'real floating'``: real-valued floating-point data types + (i.e., ``float32``, ``float64``). + - ``'complex floating'``: complex floating-point data types (i.e., + ``complex64``, ``complex128``). + - ``'numeric'``: numeric data types. Shorthand for ``('integral', + 'real floating', 'complex floating')``. + + Returns + ------- + dtypes : dict + A dictionary mapping the names of data types to the corresponding + NumPy data types. + + See Also + -------- + __array_namespace_info__.capabilities, + __array_namespace_info__.default_device, + __array_namespace_info__.default_dtypes, + __array_namespace_info__.devices + + Examples + -------- + >>> info = np.__array_namespace_info__() + >>> info.dtypes(kind='signed integer') + {'int8': numpy.int8, + 'int16': numpy.int16, + 'int32': numpy.int32, + 'int64': numpy.int64} + + """ + if device not in ["cpu", None]: + raise ValueError( + 'Device not understood. Only "cpu" is allowed, but received:' + f' {device}' + ) + if kind is None: + return { + "bool": dtype(bool), + "int8": dtype(int8), + "int16": dtype(int16), + "int32": dtype(int32), + "int64": dtype(int64), + "uint8": dtype(uint8), + "uint16": dtype(uint16), + "uint32": dtype(uint32), + "uint64": dtype(uint64), + "float32": dtype(float32), + "float64": dtype(float64), + "complex64": dtype(complex64), + "complex128": dtype(complex128), + } + if kind == "bool": + return {"bool": bool} + if kind == "signed integer": + return { + "int8": dtype(int8), + "int16": dtype(int16), + "int32": dtype(int32), + "int64": dtype(int64), + } + if kind == "unsigned integer": + return { + "uint8": dtype(uint8), + "uint16": dtype(uint16), + "uint32": dtype(uint32), + "uint64": dtype(uint64), + } + if kind == "integral": + return { + "int8": dtype(int8), + "int16": dtype(int16), + "int32": dtype(int32), + "int64": dtype(int64), + "uint8": dtype(uint8), + "uint16": dtype(uint16), + "uint32": dtype(uint32), + "uint64": dtype(uint64), + } + if kind == "real floating": + return { + "float32": dtype(float32), + "float64": dtype(float64), + } + if kind == "complex floating": + return { + "complex64": dtype(complex64), + "complex128": dtype(complex128), + } + if kind == "numeric": + return { + "int8": dtype(int8), + "int16": dtype(int16), + "int32": dtype(int32), + "int64": dtype(int64), + "uint8": dtype(uint8), + "uint16": dtype(uint16), + "uint32": dtype(uint32), + "uint64": dtype(uint64), + "float32": dtype(float32), + "float64": dtype(float64), + "complex64": dtype(complex64), + "complex128": dtype(complex128), + } + if isinstance(kind, tuple): + res = {} + for k in kind: + res.update(self.dtypes(kind=k)) + return res + raise ValueError(f"unsupported kind: {kind!r}") + + def devices(self): + """ + The devices supported by NumPy. + + For NumPy, this always returns ``['cpu']``. + + Returns + ------- + devices : list of str + The devices supported by NumPy. + + See Also + -------- + __array_namespace_info__.capabilities, + __array_namespace_info__.default_device, + __array_namespace_info__.default_dtypes, + __array_namespace_info__.dtypes + + Examples + -------- + >>> info = np.__array_namespace_info__() + >>> info.devices() + ['cpu'] + + """ + return ["cpu"] diff --git a/local_packages/numpy/_array_api_info.pyi b/local_packages/numpy/_array_api_info.pyi new file mode 100644 index 0000000..3961251 --- /dev/null +++ b/local_packages/numpy/_array_api_info.pyi @@ -0,0 +1,206 @@ +from typing import ( + Literal, + Never, + TypeAlias, + TypedDict, + TypeVar, + final, + overload, + type_check_only, +) + +import numpy as np + +_Device: TypeAlias = Literal["cpu"] +_DeviceLike: TypeAlias = _Device | None + +_Capabilities = TypedDict( + "_Capabilities", + { + "boolean indexing": Literal[True], + "data-dependent shapes": Literal[True], + }, +) + +_DefaultDTypes = TypedDict( + "_DefaultDTypes", + { + "real floating": np.dtype[np.float64], + "complex floating": np.dtype[np.complex128], + "integral": np.dtype[np.intp], + "indexing": np.dtype[np.intp], + }, +) + +_KindBool: TypeAlias = Literal["bool"] +_KindInt: TypeAlias = Literal["signed integer"] +_KindUInt: TypeAlias = Literal["unsigned integer"] +_KindInteger: TypeAlias = Literal["integral"] +_KindFloat: TypeAlias = Literal["real floating"] +_KindComplex: TypeAlias = Literal["complex floating"] +_KindNumber: TypeAlias = Literal["numeric"] +_Kind: TypeAlias = ( + _KindBool + | _KindInt + | _KindUInt + | _KindInteger + | _KindFloat + | _KindComplex + | _KindNumber +) + +_T1 = TypeVar("_T1") +_T2 = TypeVar("_T2") +_T3 = TypeVar("_T3") +_Permute1: TypeAlias = _T1 | tuple[_T1] +_Permute2: TypeAlias = tuple[_T1, _T2] | tuple[_T2, _T1] +_Permute3: TypeAlias = ( + tuple[_T1, _T2, _T3] | tuple[_T1, _T3, _T2] + | tuple[_T2, _T1, _T3] | tuple[_T2, _T3, _T1] + | tuple[_T3, _T1, _T2] | tuple[_T3, _T2, _T1] +) + +@type_check_only +class _DTypesBool(TypedDict): + bool: np.dtype[np.bool] + +@type_check_only +class _DTypesInt(TypedDict): + int8: np.dtype[np.int8] + int16: np.dtype[np.int16] + int32: np.dtype[np.int32] + int64: np.dtype[np.int64] + +@type_check_only +class _DTypesUInt(TypedDict): + uint8: np.dtype[np.uint8] + uint16: np.dtype[np.uint16] + uint32: np.dtype[np.uint32] + uint64: np.dtype[np.uint64] + +@type_check_only +class _DTypesInteger(_DTypesInt, _DTypesUInt): ... + +@type_check_only +class _DTypesFloat(TypedDict): + float32: np.dtype[np.float32] + float64: np.dtype[np.float64] + +@type_check_only +class _DTypesComplex(TypedDict): + complex64: np.dtype[np.complex64] + complex128: np.dtype[np.complex128] + +@type_check_only +class _DTypesNumber(_DTypesInteger, _DTypesFloat, _DTypesComplex): ... + +@type_check_only +class _DTypes(_DTypesBool, _DTypesNumber): ... + +@type_check_only +class _DTypesUnion(TypedDict, total=False): + bool: np.dtype[np.bool] + int8: np.dtype[np.int8] + int16: np.dtype[np.int16] + int32: np.dtype[np.int32] + int64: np.dtype[np.int64] + uint8: np.dtype[np.uint8] + uint16: np.dtype[np.uint16] + uint32: np.dtype[np.uint32] + uint64: np.dtype[np.uint64] + float32: np.dtype[np.float32] + float64: np.dtype[np.float64] + complex64: np.dtype[np.complex64] + complex128: np.dtype[np.complex128] + +_EmptyDict: TypeAlias = dict[Never, Never] + +@final +class __array_namespace_info__: + __module__: Literal["numpy"] = "numpy" + + def capabilities(self) -> _Capabilities: ... + def default_device(self) -> _Device: ... + def default_dtypes( + self, + *, + device: _DeviceLike = None, + ) -> _DefaultDTypes: ... + def devices(self) -> list[_Device]: ... + + @overload + def dtypes( + self, + *, + device: _DeviceLike = None, + kind: None = None, + ) -> _DTypes: ... + @overload + def dtypes( + self, + *, + device: _DeviceLike = None, + kind: _Permute1[_KindBool], + ) -> _DTypesBool: ... + @overload + def dtypes( + self, + *, + device: _DeviceLike = None, + kind: _Permute1[_KindInt], + ) -> _DTypesInt: ... + @overload + def dtypes( + self, + *, + device: _DeviceLike = None, + kind: _Permute1[_KindUInt], + ) -> _DTypesUInt: ... + @overload + def dtypes( + self, + *, + device: _DeviceLike = None, + kind: _Permute1[_KindFloat], + ) -> _DTypesFloat: ... + @overload + def dtypes( + self, + *, + device: _DeviceLike = None, + kind: _Permute1[_KindComplex], + ) -> _DTypesComplex: ... + @overload + def dtypes( + self, + *, + device: _DeviceLike = None, + kind: ( + _Permute1[_KindInteger] + | _Permute2[_KindInt, _KindUInt] + ), + ) -> _DTypesInteger: ... + @overload + def dtypes( + self, + *, + device: _DeviceLike = None, + kind: ( + _Permute1[_KindNumber] + | _Permute3[_KindInteger, _KindFloat, _KindComplex] + ), + ) -> _DTypesNumber: ... + @overload + def dtypes( + self, + *, + device: _DeviceLike = None, + kind: tuple[()], + ) -> _EmptyDict: ... + @overload + def dtypes( + self, + *, + device: _DeviceLike = None, + kind: tuple[_Kind, ...], + ) -> _DTypesUnion: ... diff --git a/local_packages/numpy/_configtool.py b/local_packages/numpy/_configtool.py new file mode 100644 index 0000000..db7831c --- /dev/null +++ b/local_packages/numpy/_configtool.py @@ -0,0 +1,39 @@ +import argparse +import sys +from pathlib import Path + +from .lib._utils_impl import get_include +from .version import __version__ + + +def main() -> None: + parser = argparse.ArgumentParser() + parser.add_argument( + "--version", + action="version", + version=__version__, + help="Print the version and exit.", + ) + parser.add_argument( + "--cflags", + action="store_true", + help="Compile flag needed when using the NumPy headers.", + ) + parser.add_argument( + "--pkgconfigdir", + action="store_true", + help=("Print the pkgconfig directory in which `numpy.pc` is stored " + "(useful for setting $PKG_CONFIG_PATH)."), + ) + args = parser.parse_args() + if not sys.argv[1:]: + parser.print_help() + if args.cflags: + print("-I" + get_include()) + if args.pkgconfigdir: + _path = Path(get_include()) / '..' / 'lib' / 'pkgconfig' + print(_path.resolve()) + + +if __name__ == "__main__": + main() diff --git a/local_packages/numpy/_configtool.pyi b/local_packages/numpy/_configtool.pyi new file mode 100644 index 0000000..7e7363e --- /dev/null +++ b/local_packages/numpy/_configtool.pyi @@ -0,0 +1 @@ +def main() -> None: ... diff --git a/local_packages/numpy/_core/__init__.py b/local_packages/numpy/_core/__init__.py new file mode 100644 index 0000000..ede50aa --- /dev/null +++ b/local_packages/numpy/_core/__init__.py @@ -0,0 +1,203 @@ +""" +Contains the core of NumPy: ndarray, ufuncs, dtypes, etc. + +Please note that this module is private. All functions and objects +are available in the main ``numpy`` namespace - use that instead. + +""" + +import os + +from numpy.version import version as __version__ + +# disables OpenBLAS affinity setting of the main thread that limits +# python threads or processes to one core +env_added = [] +for envkey in ['OPENBLAS_MAIN_FREE']: + if envkey not in os.environ: + # Note: using `putenv` (and `unsetenv` further down) instead of updating + # `os.environ` on purpose to avoid a race condition, see gh-30627. + os.putenv(envkey, '1') + env_added.append(envkey) + +try: + from . import multiarray +except ImportError as exc: + import sys + + # Bypass for the module re-initialization opt-out + if exc.msg == "cannot load module more than once per process": + raise + + # Basically always, the problem should be that the C module is wrong/missing... + if ( + isinstance(exc, ModuleNotFoundError) + and exc.name == "numpy._core._multiarray_umath" + ): + import sys + candidates = [] + for path in __path__: + candidates.extend( + f for f in os.listdir(path) if f.startswith("_multiarray_umath")) + if len(candidates) == 0: + bad_c_module_info = ( + "We found no compiled module, did NumPy build successfully?\n") + else: + candidate_str = '\n * '.join(candidates) + # cache_tag is documented to be possibly None, so just use name if it is + # this guesses at cache_tag being the same as the extension module scheme + tag = sys.implementation.cache_tag or sys.implementation.name + bad_c_module_info = ( + f"The following compiled module files exist, but seem incompatible\n" + f"with with either python '{tag}' or the " + f"platform '{sys.platform}':\n\n * {candidate_str}\n" + ) + else: + bad_c_module_info = "" + + major, minor, *_ = sys.version_info + msg = f""" + +IMPORTANT: PLEASE READ THIS FOR ADVICE ON HOW TO SOLVE THIS ISSUE! + +Importing the numpy C-extensions failed. This error can happen for +many reasons, often due to issues with your setup or how NumPy was +installed. +{bad_c_module_info} +We have compiled some common reasons and troubleshooting tips at: + + https://numpy.org/devdocs/user/troubleshooting-importerror.html + +Please note and check the following: + + * The Python version is: Python {major}.{minor} from "{sys.executable}" + * The NumPy version is: "{__version__}" + +and make sure that they are the versions you expect. + +Please carefully study the information and documentation linked above. +This is unlikely to be a NumPy issue but will be caused by a bad install +or environment on your machine. + +Original error was: {exc} +""" + + raise ImportError(msg) from exc +finally: + for envkey in env_added: + os.unsetenv(envkey) +del envkey +del env_added +del os + +from . import umath + +# Check that multiarray,umath are pure python modules wrapping +# _multiarray_umath and not either of the old c-extension modules +if not (hasattr(multiarray, '_multiarray_umath') and + hasattr(umath, '_multiarray_umath')): + import sys + path = sys.modules['numpy'].__path__ + msg = ("Something is wrong with the numpy installation. " + "While importing we detected an older version of " + "numpy in {}. One method of fixing this is to repeatedly uninstall " + "numpy until none is found, then reinstall this version.") + raise ImportError(msg.format(path)) + +from . import numerictypes as nt +from .numerictypes import sctypeDict, sctypes + +multiarray.set_typeDict(nt.sctypeDict) +from . import einsumfunc, fromnumeric, function_base, getlimits, numeric, shape_base +from .einsumfunc import * +from .fromnumeric import * +from .function_base import * +from .getlimits import * + +# Note: module name memmap is overwritten by a class with same name +from .memmap import * +from .numeric import * +from .records import recarray, record +from .shape_base import * + +del nt + +# do this after everything else, to minimize the chance of this misleadingly +# appearing in an import-time traceback +# add these for module-freeze analysis (like PyInstaller) +from . import ( + _add_newdocs, + _add_newdocs_scalars, + _dtype, + _dtype_ctypes, + _internal, + _methods, +) +from .numeric import absolute as abs + +acos = numeric.arccos +acosh = numeric.arccosh +asin = numeric.arcsin +asinh = numeric.arcsinh +atan = numeric.arctan +atanh = numeric.arctanh +atan2 = numeric.arctan2 +concat = numeric.concatenate +bitwise_left_shift = numeric.left_shift +bitwise_invert = numeric.invert +bitwise_right_shift = numeric.right_shift +permute_dims = numeric.transpose +pow = numeric.power + +__all__ = [ + "abs", "acos", "acosh", "asin", "asinh", "atan", "atanh", "atan2", + "bitwise_invert", "bitwise_left_shift", "bitwise_right_shift", "concat", + "pow", "permute_dims", "memmap", "sctypeDict", "record", "recarray" +] +__all__ += numeric.__all__ +__all__ += function_base.__all__ +__all__ += getlimits.__all__ +__all__ += shape_base.__all__ +__all__ += einsumfunc.__all__ + + +def _ufunc_reduce(func): + # Report the `__name__`. pickle will try to find the module. Note that + # pickle supports for this `__name__` to be a `__qualname__`. It may + # make sense to add a `__qualname__` to ufuncs, to allow this more + # explicitly (Numba has ufuncs as attributes). + # See also: https://github.com/dask/distributed/issues/3450 + return func.__name__ + + +def _DType_reconstruct(scalar_type): + # This is a work-around to pickle type(np.dtype(np.float64)), etc. + # and it should eventually be replaced with a better solution, e.g. when + # DTypes become HeapTypes. + return type(dtype(scalar_type)) + + +def _DType_reduce(DType): + # As types/classes, most DTypes can simply be pickled by their name: + if not DType._legacy or DType.__module__ == "numpy.dtypes": + return DType.__name__ + + # However, user defined legacy dtypes (like rational) do not end up in + # `numpy.dtypes` as module and do not have a public class at all. + # For these, we pickle them by reconstructing them from the scalar type: + scalar_type = DType.type + return _DType_reconstruct, (scalar_type,) + + +import copyreg + +copyreg.pickle(ufunc, _ufunc_reduce) +copyreg.pickle(type(dtype), _DType_reduce, _DType_reconstruct) + +# Unclutter namespace (must keep _*_reconstruct for unpickling) +del copyreg, _ufunc_reduce, _DType_reduce + +from numpy._pytesttester import PytestTester + +test = PytestTester(__name__) +del PytestTester diff --git a/local_packages/numpy/_core/__init__.pyi b/local_packages/numpy/_core/__init__.pyi new file mode 100644 index 0000000..ce5427b --- /dev/null +++ b/local_packages/numpy/_core/__init__.pyi @@ -0,0 +1,666 @@ +# keep in sync with https://github.com/numpy/numtype/blob/main/src/numpy-stubs/_core/__init__.pyi + +from ._asarray import require +from ._ufunc_config import ( + errstate, + getbufsize, + geterr, + geterrcall, + setbufsize, + seterr, + seterrcall, +) +from .arrayprint import ( + array2string, + array_repr, + array_str, + format_float_positional, + format_float_scientific, + get_printoptions, + printoptions, + set_printoptions, +) +from .einsumfunc import einsum, einsum_path +from .fromnumeric import ( + all, + amax, + amin, + any, + argmax, + argmin, + argpartition, + argsort, + around, + choose, + clip, + compress, + cumprod, + cumsum, + cumulative_prod, + cumulative_sum, + diagonal, + matrix_transpose, + max, + mean, + min, + ndim, + nonzero, + partition, + prod, + ptp, + put, + ravel, + repeat, + reshape, + resize, + round, + searchsorted, + shape, + size, + sort, + squeeze, + std, + sum, + swapaxes, + take, + trace, + transpose, + transpose as permute_dims, + var, +) +from .function_base import geomspace, linspace, logspace +from .getlimits import finfo, iinfo +from .memmap import memmap +from .numeric import ( + False_, + True_, + allclose, + arange, + argwhere, + array, + array_equal, + array_equiv, + asanyarray, + asarray, + ascontiguousarray, + asfortranarray, + astype, + base_repr, + binary_repr, + bitwise_not, + broadcast, + can_cast, + concatenate, + concatenate as concat, + convolve, + copyto, + correlate, + count_nonzero, + cross, + dot, + dtype, + empty, + empty_like, + flatiter, + flatnonzero, + from_dlpack, + frombuffer, + fromfile, + fromfunction, + fromiter, + fromstring, + full, + full_like, + identity, + indices, + inf, + inner, + isclose, + isfortran, + isscalar, + lexsort, + little_endian, + matmul, + may_share_memory, + min_scalar_type, + moveaxis, + nan, + ndarray, + nditer, + nested_iters, + newaxis, + ones, + ones_like, + outer, + promote_types, + putmask, + result_type, + roll, + rollaxis, + shares_memory, + tensordot, + ufunc, + vdot, + vecdot, + where, + zeros, + zeros_like, +) +from .numerictypes import ( + ScalarType, + bool, + bool_, + busday_count, + busday_offset, + busdaycalendar, + byte, + bytes_, + cdouble, + character, + clongdouble, + complex64, + complex128, + complex192, + complex256, + complexfloating, + csingle, + datetime64, + datetime_as_string, + datetime_data, + double, + flexible, + float16, + float32, + float64, + float96, + float128, + floating, + generic, + half, + inexact, + int8, + int16, + int32, + int64, + int_, + intc, + integer, + intp, + is_busday, + isdtype, + issubdtype, + long, + longdouble, + longlong, + number, + object_, + sctypeDict, + short, + signedinteger, + single, + str_, + timedelta64, + typecodes, + ubyte, + uint, + uint8, + uint16, + uint32, + uint64, + uintc, + uintp, + ulong, + ulonglong, + unsignedinteger, + ushort, + void, +) +from .records import recarray, record +from .shape_base import ( + atleast_1d, + atleast_2d, + atleast_3d, + block, + hstack, + stack, + unstack, + vstack, +) +from .umath import ( + absolute, + absolute as abs, + add, + arccos, + arccos as acos, + arccosh, + arccosh as acosh, + arcsin, + arcsin as asin, + arcsinh, + arcsinh as asinh, + arctan, + arctan as atan, + arctan2, + arctan2 as atan2, + arctanh, + arctanh as atanh, + bitwise_and, + bitwise_count, + bitwise_or, + bitwise_xor, + cbrt, + ceil, + conj, + conjugate, + copysign, + cos, + cosh, + deg2rad, + degrees, + divide, + divmod, + e, + equal, + euler_gamma, + exp, + exp2, + expm1, + fabs, + float_power, + floor, + floor_divide, + fmax, + fmin, + fmod, + frexp, + frompyfunc, + gcd, + greater, + greater_equal, + heaviside, + hypot, + invert, + invert as bitwise_invert, + isfinite, + isinf, + isnan, + isnat, + lcm, + ldexp, + left_shift, + left_shift as bitwise_left_shift, + less, + less_equal, + log, + log1p, + log2, + log10, + logaddexp, + logaddexp2, + logical_and, + logical_not, + logical_or, + logical_xor, + matvec, + maximum, + minimum, + mod, + modf, + multiply, + negative, + nextafter, + not_equal, + pi, + positive, + power, + power as pow, + rad2deg, + radians, + reciprocal, + remainder, + right_shift, + right_shift as bitwise_right_shift, + rint, + sign, + signbit, + sin, + sinh, + spacing, + sqrt, + square, + subtract, + tan, + tanh, + true_divide, + trunc, + vecmat, +) + +__all__ = [ + "False_", + "ScalarType", + "True_", + "abs", + "absolute", + "acos", + "acosh", + "add", + "all", + "allclose", + "amax", + "amin", + "any", + "arange", + "arccos", + "arccosh", + "arcsin", + "arcsinh", + "arctan", + "arctan2", + "arctanh", + "argmax", + "argmin", + "argpartition", + "argsort", + "argwhere", + "around", + "array", + "array2string", + "array_equal", + "array_equiv", + "array_repr", + "array_str", + "asanyarray", + "asarray", + "ascontiguousarray", + "asfortranarray", + "asin", + "asinh", + "astype", + "atan", + "atan2", + "atanh", + "atleast_1d", + "atleast_2d", + "atleast_3d", + "base_repr", + "binary_repr", + "bitwise_and", + "bitwise_count", + "bitwise_invert", + "bitwise_left_shift", + "bitwise_not", + "bitwise_or", + "bitwise_right_shift", + "bitwise_xor", + "block", + "bool", + "bool_", + "broadcast", + "busday_count", + "busday_offset", + "busdaycalendar", + "byte", + "bytes_", + "can_cast", + "cbrt", + "cdouble", + "ceil", + "character", + "choose", + "clip", + "clongdouble", + "complex64", + "complex128", + "complex192", + "complex256", + "complexfloating", + "compress", + "concat", + "concatenate", + "conj", + "conjugate", + "convolve", + "copysign", + "copyto", + "correlate", + "cos", + "cosh", + "count_nonzero", + "cross", + "csingle", + "cumprod", + "cumsum", + "cumulative_prod", + "cumulative_sum", + "datetime64", + "datetime_as_string", + "datetime_data", + "deg2rad", + "degrees", + "diagonal", + "divide", + "divmod", + "dot", + "double", + "dtype", + "e", + "einsum", + "einsum_path", + "empty", + "empty_like", + "equal", + "errstate", + "euler_gamma", + "exp", + "exp2", + "expm1", + "fabs", + "finfo", + "flatiter", + "flatnonzero", + "flexible", + "float16", + "float32", + "float64", + "float96", + "float128", + "float_power", + "floating", + "floor", + "floor_divide", + "fmax", + "fmin", + "fmod", + "format_float_positional", + "format_float_scientific", + "frexp", + "from_dlpack", + "frombuffer", + "fromfile", + "fromfunction", + "fromiter", + "frompyfunc", + "fromstring", + "full", + "full_like", + "gcd", + "generic", + "geomspace", + "get_printoptions", + "getbufsize", + "geterr", + "geterrcall", + "greater", + "greater_equal", + "half", + "heaviside", + "hstack", + "hypot", + "identity", + "iinfo", + "indices", + "inexact", + "inf", + "inner", + "int8", + "int16", + "int32", + "int64", + "int_", + "intc", + "integer", + "intp", + "invert", + "is_busday", + "isclose", + "isdtype", + "isfinite", + "isfortran", + "isinf", + "isnan", + "isnat", + "isscalar", + "issubdtype", + "lcm", + "ldexp", + "left_shift", + "less", + "less_equal", + "lexsort", + "linspace", + "little_endian", + "log", + "log1p", + "log2", + "log10", + "logaddexp", + "logaddexp2", + "logical_and", + "logical_not", + "logical_or", + "logical_xor", + "logspace", + "long", + "longdouble", + "longlong", + "matmul", + "matrix_transpose", + "matvec", + "max", + "maximum", + "may_share_memory", + "mean", + "memmap", + "min", + "min_scalar_type", + "minimum", + "mod", + "modf", + "moveaxis", + "multiply", + "nan", + "ndarray", + "ndim", + "nditer", + "negative", + "nested_iters", + "newaxis", + "nextafter", + "nonzero", + "not_equal", + "number", + "object_", + "ones", + "ones_like", + "outer", + "partition", + "permute_dims", + "pi", + "positive", + "pow", + "power", + "printoptions", + "prod", + "promote_types", + "ptp", + "put", + "putmask", + "rad2deg", + "radians", + "ravel", + "recarray", + "reciprocal", + "record", + "remainder", + "repeat", + "require", + "reshape", + "resize", + "result_type", + "right_shift", + "rint", + "roll", + "rollaxis", + "round", + "sctypeDict", + "searchsorted", + "set_printoptions", + "setbufsize", + "seterr", + "seterrcall", + "shape", + "shares_memory", + "short", + "sign", + "signbit", + "signedinteger", + "sin", + "single", + "sinh", + "size", + "sort", + "spacing", + "sqrt", + "square", + "squeeze", + "stack", + "std", + "str_", + "subtract", + "sum", + "swapaxes", + "take", + "tan", + "tanh", + "tensordot", + "timedelta64", + "trace", + "transpose", + "true_divide", + "trunc", + "typecodes", + "ubyte", + "ufunc", + "uint", + "uint8", + "uint16", + "uint32", + "uint64", + "uintc", + "uintp", + "ulong", + "ulonglong", + "unsignedinteger", + "unstack", + "ushort", + "var", + "vdot", + "vecdot", + "vecmat", + "void", + "vstack", + "where", + "zeros", + "zeros_like", +] diff --git a/local_packages/numpy/_core/_add_newdocs.py b/local_packages/numpy/_core/_add_newdocs.py new file mode 100644 index 0000000..178bcc9 --- /dev/null +++ b/local_packages/numpy/_core/_add_newdocs.py @@ -0,0 +1,7161 @@ +""" +This is only meant to add docs to objects defined in C-extension modules. +The purpose is to allow easier editing of the docstrings without +requiring a re-compile. + +NOTE: Many of the methods of ndarray have corresponding functions. + If you update these docstrings, please keep also the ones in + _core/fromnumeric.py, matrixlib/defmatrix.py up-to-date. + +""" + +import textwrap + +from numpy._core.function_base import add_newdoc +from numpy._core.overrides import get_array_function_like_doc # noqa: F401 + +############################################################################### +# +# flatiter +# +# flatiter needs a toplevel description +# +############################################################################### + +add_newdoc('numpy._core', 'flatiter', + """ + Flat iterator object to iterate over arrays. + + A `flatiter` iterator is returned by ``x.flat`` for any array `x`. + It allows iterating over the array as if it were a 1-D array, + either in a for-loop or by calling its `next` method. + + Iteration is done in row-major, C-style order (the last + index varying the fastest). The iterator can also be indexed using + basic slicing or advanced indexing. + + See Also + -------- + ndarray.flat : Return a flat iterator over an array. + ndarray.flatten : Returns a flattened copy of an array. + + Notes + ----- + A `flatiter` iterator can not be constructed directly from Python code + by calling the `flatiter` constructor. + + Examples + -------- + >>> import numpy as np + >>> x = np.arange(6).reshape(2, 3) + >>> fl = x.flat + >>> type(fl) + + >>> for item in fl: + ... print(item) + ... + 0 + 1 + 2 + 3 + 4 + 5 + + >>> fl[2:4] + array([2, 3]) + + """) + +# flatiter attributes + +add_newdoc('numpy._core', 'flatiter', ('base', + """ + A reference to the array that is iterated over. + + Examples + -------- + >>> import numpy as np + >>> x = np.arange(5) + >>> fl = x.flat + >>> fl.base is x + True + + """)) + +add_newdoc('numpy._core', 'flatiter', ('coords', + """ + An N-dimensional tuple of current coordinates. + + Examples + -------- + >>> import numpy as np + >>> x = np.arange(6).reshape(2, 3) + >>> fl = x.flat + >>> fl.coords + (0, 0) + >>> next(fl) + 0 + >>> fl.coords + (0, 1) + + """)) + +add_newdoc('numpy._core', 'flatiter', ('index', + """ + Current flat index into the array. + + Examples + -------- + >>> import numpy as np + >>> x = np.arange(6).reshape(2, 3) + >>> fl = x.flat + >>> fl.index + 0 + >>> next(fl) + 0 + >>> fl.index + 1 + + """)) + +# flatiter methods + +add_newdoc('numpy._core', 'flatiter', ('__array__', + """ + __array__($self, dtype=None, /, *, copy=None) + -- + + flat.__array__([dtype], *, copy=None) + + Get array from iterator + + """)) + +add_newdoc('numpy._core', 'flatiter', ('copy', + """ + copy($self, /) + -- + + flat.copy() + + Get a copy of the iterator as a 1-D array. + + Examples + -------- + >>> import numpy as np + >>> x = np.arange(6).reshape(2, 3) + >>> x + array([[0, 1, 2], + [3, 4, 5]]) + >>> fl = x.flat + >>> fl.copy() + array([0, 1, 2, 3, 4, 5]) + + """)) + + +############################################################################### +# +# nditer +# +############################################################################### + +add_newdoc('numpy._core', 'nditer', + """ + nditer( + op, + flags=None, + op_flags=None, + op_dtypes=None, + order='K', + casting='safe', + op_axes=None, + itershape=None, + buffersize=0, + ) + -- + + nditer(op, flags=None, op_flags=None, op_dtypes=None, order='K', + casting='safe', op_axes=None, itershape=None, buffersize=0) + + Efficient multi-dimensional iterator object to iterate over arrays. + To get started using this object, see the + :ref:`introductory guide to array iteration `. + + Parameters + ---------- + op : ndarray or sequence of array_like + The array(s) to iterate over. + flags : sequence of str, optional + Flags to control the behavior of the iterator. + + * ``buffered`` enables buffering when required. + * ``c_index`` causes a C-order index to be tracked. + * ``f_index`` causes a Fortran-order index to be tracked. + * ``multi_index`` causes a multi-index, or a tuple of indices + with one per iteration dimension, to be tracked. + * ``common_dtype`` causes all the operands to be converted to + a common data type, with copying or buffering as necessary. + * ``copy_if_overlap`` causes the iterator to determine if read + operands have overlap with write operands, and make temporary + copies as necessary to avoid overlap. False positives (needless + copying) are possible in some cases. + * ``delay_bufalloc`` delays allocation of the buffers until + a reset() call is made. Allows ``allocate`` operands to + be initialized before their values are copied into the buffers. + * ``external_loop`` causes the ``values`` given to be + one-dimensional arrays with multiple values instead of + zero-dimensional arrays. + * ``grow_inner`` allows the ``value`` array sizes to be made + larger than the buffer size when both ``buffered`` and + ``external_loop`` is used. + * ``ranged`` allows the iterator to be restricted to a sub-range + of the iterindex values. + * ``refs_ok`` enables iteration of reference types, such as + object arrays. + * ``reduce_ok`` enables iteration of ``readwrite`` operands + which are broadcasted, also known as reduction operands. + * ``zerosize_ok`` allows `itersize` to be zero. + op_flags : list of list of str, optional + This is a list of flags for each operand. At minimum, one of + ``readonly``, ``readwrite``, or ``writeonly`` must be specified. + + * ``readonly`` indicates the operand will only be read from. + * ``readwrite`` indicates the operand will be read from and written to. + * ``writeonly`` indicates the operand will only be written to. + * ``no_broadcast`` prevents the operand from being broadcasted. + * ``contig`` forces the operand data to be contiguous. + * ``aligned`` forces the operand data to be aligned. + * ``nbo`` forces the operand data to be in native byte order. + * ``copy`` allows a temporary read-only copy if required. + * ``updateifcopy`` allows a temporary read-write copy if required. + * ``allocate`` causes the array to be allocated if it is None + in the ``op`` parameter. + * ``no_subtype`` prevents an ``allocate`` operand from using a subtype. + * ``arraymask`` indicates that this operand is the mask to use + for selecting elements when writing to operands with the + 'writemasked' flag set. The iterator does not enforce this, + but when writing from a buffer back to the array, it only + copies those elements indicated by this mask. + * ``writemasked`` indicates that only elements where the chosen + ``arraymask`` operand is True will be written to. + * ``overlap_assume_elementwise`` can be used to mark operands that are + accessed only in the iterator order, to allow less conservative + copying when ``copy_if_overlap`` is present. + op_dtypes : dtype or tuple of dtype(s), optional + The required data type(s) of the operands. If copying or buffering + is enabled, the data will be converted to/from their original types. + order : {'C', 'F', 'A', 'K'}, optional + Controls the iteration order. 'C' means C order, 'F' means + Fortran order, 'A' means 'F' order if all the arrays are Fortran + contiguous, 'C' order otherwise, and 'K' means as close to the + order the array elements appear in memory as possible. This also + affects the element memory order of ``allocate`` operands, as they + are allocated to be compatible with iteration order. + Default is 'K'. + casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + Controls what kind of data casting may occur when making a copy + or buffering. Setting this to 'unsafe' is not recommended, + as it can adversely affect accumulations. + + * 'no' means the data types should not be cast at all. + * 'equiv' means only byte-order changes are allowed. + * 'safe' means only casts which can preserve values are allowed. + * 'same_kind' means only safe casts or casts within a kind, + like float64 to float32, are allowed. + * 'unsafe' means any data conversions may be done. + op_axes : list of list of ints, optional + If provided, is a list of ints or None for each operands. + The list of axes for an operand is a mapping from the dimensions + of the iterator to the dimensions of the operand. A value of + -1 can be placed for entries, causing that dimension to be + treated as `newaxis`. + itershape : tuple of ints, optional + The desired shape of the iterator. This allows ``allocate`` operands + with a dimension mapped by op_axes not corresponding to a dimension + of a different operand to get a value not equal to 1 for that + dimension. + buffersize : int, optional + When buffering is enabled, controls the size of the temporary + buffers. Set to 0 for the default value. + + Attributes + ---------- + dtypes : tuple of dtype(s) + The data types of the values provided in `value`. This may be + different from the operand data types if buffering is enabled. + Valid only before the iterator is closed. + finished : bool + Whether the iteration over the operands is finished or not. + has_delayed_bufalloc : bool + If True, the iterator was created with the ``delay_bufalloc`` flag, + and no reset() function was called on it yet. + has_index : bool + If True, the iterator was created with either the ``c_index`` or + the ``f_index`` flag, and the property `index` can be used to + retrieve it. + has_multi_index : bool + If True, the iterator was created with the ``multi_index`` flag, + and the property `multi_index` can be used to retrieve it. + index + When the ``c_index`` or ``f_index`` flag was used, this property + provides access to the index. Raises a ValueError if accessed + and ``has_index`` is False. + iterationneedsapi : bool + Whether iteration requires access to the Python API, for example + if one of the operands is an object array. + iterindex : int + An index which matches the order of iteration. + itersize : int + Size of the iterator. + itviews + Structured view(s) of `operands` in memory, matching the reordered + and optimized iterator access pattern. Valid only before the iterator + is closed. + multi_index + When the ``multi_index`` flag was used, this property + provides access to the index. Raises a ValueError if accessed + accessed and ``has_multi_index`` is False. + ndim : int + The dimensions of the iterator. + nop : int + The number of iterator operands. + operands : tuple of operand(s) + The array(s) to be iterated over. Valid only before the iterator is + closed. + shape : tuple of ints + Shape tuple, the shape of the iterator. + value + Value of ``operands`` at current iteration. Normally, this is a + tuple of array scalars, but if the flag ``external_loop`` is used, + it is a tuple of one dimensional arrays. + + Notes + ----- + `nditer` supersedes `flatiter`. The iterator implementation behind + `nditer` is also exposed by the NumPy C API. + + The Python exposure supplies two iteration interfaces, one which follows + the Python iterator protocol, and another which mirrors the C-style + do-while pattern. The native Python approach is better in most cases, but + if you need the coordinates or index of an iterator, use the C-style pattern. + + Examples + -------- + Here is how we might write an ``iter_add`` function, using the + Python iterator protocol: + + >>> import numpy as np + + >>> def iter_add_py(x, y, out=None): + ... addop = np.add + ... it = np.nditer([x, y, out], [], + ... [['readonly'], ['readonly'], ['writeonly','allocate']]) + ... with it: + ... for (a, b, c) in it: + ... addop(a, b, out=c) + ... return it.operands[2] + + Here is the same function, but following the C-style pattern: + + >>> def iter_add(x, y, out=None): + ... addop = np.add + ... it = np.nditer([x, y, out], [], + ... [['readonly'], ['readonly'], ['writeonly','allocate']]) + ... with it: + ... while not it.finished: + ... addop(it[0], it[1], out=it[2]) + ... it.iternext() + ... return it.operands[2] + + Here is an example outer product function: + + >>> def outer_it(x, y, out=None): + ... mulop = np.multiply + ... it = np.nditer([x, y, out], ['external_loop'], + ... [['readonly'], ['readonly'], ['writeonly', 'allocate']], + ... op_axes=[list(range(x.ndim)) + [-1] * y.ndim, + ... [-1] * x.ndim + list(range(y.ndim)), + ... None]) + ... with it: + ... for (a, b, c) in it: + ... mulop(a, b, out=c) + ... return it.operands[2] + + >>> a = np.arange(2)+1 + >>> b = np.arange(3)+1 + >>> outer_it(a,b) + array([[1, 2, 3], + [2, 4, 6]]) + + Here is an example function which operates like a "lambda" ufunc: + + >>> def luf(lamdaexpr, *args, **kwargs): + ... '''luf(lambdaexpr, op1, ..., opn, out=None, order='K', casting='safe', buffersize=0)''' + ... nargs = len(args) + ... op = (kwargs.get('out',None),) + args + ... it = np.nditer(op, ['buffered','external_loop'], + ... [['writeonly','allocate','no_broadcast']] + + ... [['readonly','nbo','aligned']]*nargs, + ... order=kwargs.get('order','K'), + ... casting=kwargs.get('casting','safe'), + ... buffersize=kwargs.get('buffersize',0)) + ... while not it.finished: + ... it[0] = lamdaexpr(*it[1:]) + ... it.iternext() + ... return it.operands[0] + + >>> a = np.arange(5) + >>> b = np.ones(5) + >>> luf(lambda i,j:i*i + j/2, a, b) + array([ 0.5, 1.5, 4.5, 9.5, 16.5]) + + If operand flags ``"writeonly"`` or ``"readwrite"`` are used the + operands may be views into the original data with the + `WRITEBACKIFCOPY` flag. In this case `nditer` must be used as a + context manager or the `nditer.close` method must be called before + using the result. The temporary data will be written back to the + original data when the :meth:`~object.__exit__` function is called + but not before: + + >>> a = np.arange(6, dtype='i4')[::-2] + >>> with np.nditer(a, [], + ... [['writeonly', 'updateifcopy']], + ... casting='unsafe', + ... op_dtypes=[np.dtype('f4')]) as i: + ... x = i.operands[0] + ... x[:] = [-1, -2, -3] + ... # a still unchanged here + >>> a, x + (array([-1, -2, -3], dtype=int32), array([-1., -2., -3.], dtype=float32)) + + It is important to note that once the iterator is exited, dangling + references (like `x` in the example) may or may not share data with + the original data `a`. If writeback semantics were active, i.e. if + `x.base.flags.writebackifcopy` is `True`, then exiting the iterator + will sever the connection between `x` and `a`, writing to `x` will + no longer write to `a`. If writeback semantics are not active, then + `x.data` will still point at some part of `a.data`, and writing to + one will affect the other. + + Context management and the `close` method appeared in version 1.15.0. + + """) + +# nditer attributes + +add_newdoc('numpy._core', 'nditer', ('operands', + """ + operands[`Slice`] + + The array(s) to be iterated over. Valid only before the iterator is closed. + """)) + +# nditer methods + +add_newdoc('numpy._core', 'nditer', ('copy', + """ + copy($self, /) + -- + + copy() + + Get a copy of the iterator in its current state. + + Examples + -------- + >>> import numpy as np + >>> x = np.arange(10) + >>> y = x + 1 + >>> it = np.nditer([x, y]) + >>> next(it) + (array(0), array(1)) + >>> it2 = it.copy() + >>> next(it2) + (array(1), array(2)) + + """)) + +add_newdoc('numpy._core', 'nditer', ('debug_print', + """ + debug_print($self, /) + -- + + debug_print() + + Print the current state of the `nditer` instance and debug info to stdout. + + """)) + +add_newdoc('numpy._core', 'nditer', ('enable_external_loop', + """ + enable_external_loop($self, /) + -- + + enable_external_loop() + + When the "external_loop" was not used during construction, but + is desired, this modifies the iterator to behave as if the flag + was specified. + + """)) + +add_newdoc('numpy._core', 'nditer', ('iternext', + """ + iternext($self, /) + -- + + iternext() + + Check whether iterations are left, and perform a single internal iteration + without returning the result. Used in the C-style pattern do-while + pattern. For an example, see `nditer`. + + Returns + ------- + iternext : bool + Whether or not there are iterations left. + + """)) + +add_newdoc('numpy._core', 'nditer', ('remove_axis', + """ + remove_axis($self, i, /) + -- + + remove_axis(i, /) + + Removes axis `i` from the iterator. Requires that the flag "multi_index" + be enabled. + + """)) + +add_newdoc('numpy._core', 'nditer', ('remove_multi_index', + """ + remove_multi_index($self, /) + -- + + remove_multi_index() + + When the "multi_index" flag was specified, this removes it, allowing + the internal iteration structure to be optimized further. + + """)) + +add_newdoc('numpy._core', 'nditer', ('reset', + """ + reset($self, /) + -- + + reset() + + Reset the iterator to its initial state. + + """)) + +add_newdoc('numpy._core', 'nditer', ('close', + """ + close($self, /) + -- + + close() + + Resolve all writeback semantics in writeable operands. + + See Also + -------- + :ref:`nditer-context-manager` + + """)) + +# nested_iters + +add_newdoc('numpy._core', 'nested_iters', + """ + nested_iters( + op, + axes, + flags=None, + op_flags=None, + op_dtypes=None, + order='K', + casting='safe', + buffersize=0, + ) + -- + + nested_iters(op, axes, flags=None, op_flags=None, op_dtypes=None, + order='K', casting='safe', buffersize=0) + + Create nditers for use in nested loops + + Create a tuple of `nditer` objects which iterate in nested loops over + different axes of the op argument. The first iterator is used in the + outermost loop, the last in the innermost loop. Advancing one will + change the subsequent iterators to point at its new element. + + Parameters + ---------- + op : ndarray or sequence of array_like + The array(s) to iterate over. + axes : list of list of int + Each item is used as an "op_axes" argument to an nditer + flags, op_flags, op_dtypes, order, casting, buffersize (optional) + See `nditer` parameters of the same name + + Returns + ------- + iters : tuple of nditer + An nditer for each item in `axes`, outermost first + + See Also + -------- + nditer + + Examples + -------- + + Basic usage. Note how y is the "flattened" version of + [a[:, 0, :], a[:, 1, 0], a[:, 2, :]] since we specified + the first iter's axes as [1] + + >>> import numpy as np + >>> a = np.arange(12).reshape(2, 3, 2) + >>> i, j = np.nested_iters(a, [[1], [0, 2]], flags=["multi_index"]) + >>> for x in i: + ... print(i.multi_index) + ... for y in j: + ... print('', j.multi_index, y) + (0,) + (0, 0) 0 + (0, 1) 1 + (1, 0) 6 + (1, 1) 7 + (1,) + (0, 0) 2 + (0, 1) 3 + (1, 0) 8 + (1, 1) 9 + (2,) + (0, 0) 4 + (0, 1) 5 + (1, 0) 10 + (1, 1) 11 + + """) + +############################################################################### +# +# broadcast +# +############################################################################### + +add_newdoc('numpy._core', 'broadcast', + """ + broadcast(*arrays) + -- + + Produce an object that mimics broadcasting. + + Parameters + ---------- + in1, in2, ... : array_like + Input parameters. + + Returns + ------- + b : broadcast object + Broadcast the input parameters against one another, and + return an object that encapsulates the result. + Amongst others, it has ``shape`` and ``nd`` properties, and + may be used as an iterator. + + See Also + -------- + broadcast_arrays + broadcast_to + broadcast_shapes + + Examples + -------- + + Manually adding two vectors, using broadcasting: + + >>> import numpy as np + >>> x = np.array([[1], [2], [3]]) + >>> y = np.array([4, 5, 6]) + >>> b = np.broadcast(x, y) + + >>> out = np.empty(b.shape) + >>> out.flat = [u+v for (u,v) in b] + >>> out + array([[5., 6., 7.], + [6., 7., 8.], + [7., 8., 9.]]) + + Compare against built-in broadcasting: + + >>> x + y + array([[5, 6, 7], + [6, 7, 8], + [7, 8, 9]]) + + """) + +# attributes + +add_newdoc('numpy._core', 'broadcast', ('index', + """ + current index in broadcasted result + + Examples + -------- + + >>> import numpy as np + >>> x = np.array([[1], [2], [3]]) + >>> y = np.array([4, 5, 6]) + >>> b = np.broadcast(x, y) + >>> b.index + 0 + >>> next(b), next(b), next(b) + ((1, 4), (1, 5), (1, 6)) + >>> b.index + 3 + + """)) + +add_newdoc('numpy._core', 'broadcast', ('iters', + """ + tuple of iterators along ``self``'s "components." + + Returns a tuple of `numpy.flatiter` objects, one for each "component" + of ``self``. + + See Also + -------- + numpy.flatiter + + Examples + -------- + + >>> import numpy as np + >>> x = np.array([1, 2, 3]) + >>> y = np.array([[4], [5], [6]]) + >>> b = np.broadcast(x, y) + >>> row, col = b.iters + >>> next(row), next(col) + (1, 4) + + """)) + +add_newdoc('numpy._core', 'broadcast', ('ndim', + """ + Number of dimensions of broadcasted result. Alias for `nd`. + + Examples + -------- + >>> import numpy as np + >>> x = np.array([1, 2, 3]) + >>> y = np.array([[4], [5], [6]]) + >>> b = np.broadcast(x, y) + >>> b.ndim + 2 + + """)) + +add_newdoc('numpy._core', 'broadcast', ('nd', + """ + Number of dimensions of broadcasted result. For code intended for NumPy + 1.12.0 and later the more consistent `ndim` is preferred. + + Examples + -------- + >>> import numpy as np + >>> x = np.array([1, 2, 3]) + >>> y = np.array([[4], [5], [6]]) + >>> b = np.broadcast(x, y) + >>> b.nd + 2 + + """)) + +add_newdoc('numpy._core', 'broadcast', ('numiter', + """ + Number of iterators possessed by the broadcasted result. + + Examples + -------- + >>> import numpy as np + >>> x = np.array([1, 2, 3]) + >>> y = np.array([[4], [5], [6]]) + >>> b = np.broadcast(x, y) + >>> b.numiter + 2 + + """)) + +add_newdoc('numpy._core', 'broadcast', ('shape', + """ + Shape of broadcasted result. + + Examples + -------- + >>> import numpy as np + >>> x = np.array([1, 2, 3]) + >>> y = np.array([[4], [5], [6]]) + >>> b = np.broadcast(x, y) + >>> b.shape + (3, 3) + + """)) + +add_newdoc('numpy._core', 'broadcast', ('size', + """ + Total size of broadcasted result. + + Examples + -------- + >>> import numpy as np + >>> x = np.array([1, 2, 3]) + >>> y = np.array([[4], [5], [6]]) + >>> b = np.broadcast(x, y) + >>> b.size + 9 + + """)) + +# methods + +add_newdoc('numpy._core', 'broadcast', ('reset', + """ + reset($self, /) + -- + + reset() + + Reset the broadcasted result's iterator(s). + + Parameters + ---------- + None + + Returns + ------- + None + + Examples + -------- + >>> import numpy as np + >>> x = np.array([1, 2, 3]) + >>> y = np.array([[4], [5], [6]]) + >>> b = np.broadcast(x, y) + >>> b.index + 0 + >>> next(b), next(b), next(b) + ((1, 4), (2, 4), (3, 4)) + >>> b.index + 3 + >>> b.reset() + >>> b.index + 0 + + """)) + +############################################################################### +# +# numpy functions +# +############################################################################### + +add_newdoc('numpy._core.multiarray', 'array', + """ + array( + object, + dtype=None, + *, + copy=True, + order='K', + subok=False, + ndmin=0, + ndmax=0, + like=None, + ) + -- + + array(object, dtype=None, *, copy=True, order='K', subok=False, ndmin=0, + ndmax=0, like=None) + + Create an array. + + Parameters + ---------- + object : array_like + An array, any object exposing the array interface, an object whose + ``__array__`` method returns an array, or any (nested) sequence. + If object is a scalar, a 0-dimensional array containing object is + returned. + dtype : data-type, optional + The desired data-type for the array. If not given, NumPy will try to use + a default ``dtype`` that can represent the values (by applying promotion + rules when necessary.) + copy : bool, optional + If ``True`` (default), then the array data is copied. If ``None``, + a copy will only be made if ``__array__`` returns a copy, if obj is + a nested sequence, or if a copy is needed to satisfy any of the other + requirements (``dtype``, ``order``, etc.). Note that any copy of + the data is shallow, i.e., for arrays with object dtype, the new + array will point to the same objects. See Examples for `ndarray.copy`. + For ``False`` it raises a ``ValueError`` if a copy cannot be avoided. + Default: ``True``. + order : {'K', 'A', 'C', 'F'}, optional + Specify the memory layout of the array. If object is not an array, the + newly created array will be in C order (row major) unless 'F' is + specified, in which case it will be in Fortran order (column major). + If object is an array the following holds. + + ===== ========= =================================================== + order no copy copy=True + ===== ========= =================================================== + 'K' unchanged F & C order preserved, otherwise most similar order + 'A' unchanged F order if input is F and not C, otherwise C order + 'C' C order C order + 'F' F order F order + ===== ========= =================================================== + + When ``copy=None`` and a copy is made for other reasons, the result is + the same as if ``copy=True``, with some exceptions for 'A', see the + Notes section. The default order is 'K'. + subok : bool, optional + If True, then sub-classes will be passed-through, otherwise + the returned array will be forced to be a base-class array (default). + ndmin : int, optional + Specifies the minimum number of dimensions that the resulting + array should have. Ones will be prepended to the shape as + needed to meet this requirement. + ndmax : int, optional + Specifies the maximum number of dimensions to create when inferring + shape from nested sequences. By default (ndmax=0), NumPy recurses + through all nesting levels (up to the compile-time constant + ``NPY_MAXDIMS``). + Setting ``ndmax`` stops recursion at the specified depth, preserving + deeper nested structures as objects instead of promoting them to + higher-dimensional arrays. In this case, ``dtype=object`` is required. + + .. versionadded:: 2.4.0 + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + out : ndarray + An array object satisfying the specified requirements. + + See Also + -------- + empty_like : Return an empty array with shape and type of input. + ones_like : Return an array of ones with shape and type of input. + zeros_like : Return an array of zeros with shape and type of input. + full_like : Return a new array with shape of input filled with value. + empty : Return a new uninitialized array. + ones : Return a new array setting values to one. + zeros : Return a new array setting values to zero. + full : Return a new array of given shape filled with value. + copy : Return an array copy of the given object. + + + Notes + ----- + When order is 'A' and ``object`` is an array in neither 'C' nor 'F' order, + and a copy is forced by a change in dtype, then the order of the result is + not necessarily 'C' as expected. This is likely a bug. + + Examples + -------- + >>> import numpy as np + >>> np.array([1, 2, 3]) + array([1, 2, 3]) + + Upcasting: + + >>> np.array([1, 2, 3.0]) + array([ 1., 2., 3.]) + + More than one dimension: + + >>> np.array([[1, 2], [3, 4]]) + array([[1, 2], + [3, 4]]) + + Minimum dimensions 2: + + >>> np.array([1, 2, 3], ndmin=2) + array([[1, 2, 3]]) + + Type provided: + + >>> np.array([1, 2, 3], dtype=complex) + array([ 1.+0.j, 2.+0.j, 3.+0.j]) + + Data-type consisting of more than one element: + + >>> x = np.array([(1,2),(3,4)],dtype=[('a','>> x['a'] + array([1, 3], dtype=int32) + + Creating an array from sub-classes: + + >>> np.array(np.asmatrix('1 2; 3 4')) + array([[1, 2], + [3, 4]]) + + >>> np.array(np.asmatrix('1 2; 3 4'), subok=True) + matrix([[1, 2], + [3, 4]]) + + Limiting the maximum dimensions with ``ndmax``: + + >>> a = np.array([[1, 2], [3, 4]], dtype=object, ndmax=2) + >>> a + array([[1, 2], + [3, 4]], dtype=object) + >>> a.shape + (2, 2) + + >>> b = np.array([[1, 2], [3, 4]], dtype=object, ndmax=1) + >>> b + array([list([1, 2]), list([3, 4])], dtype=object) + >>> b.shape + (2,) + + """) + +add_newdoc('numpy._core.multiarray', 'asarray', + """ + asarray(a, dtype=None, order=None, *, device=None, copy=None, like=None) + -- + + asarray(a, dtype=None, order=None, *, device=None, copy=None, like=None) + + Convert the input to an array. + + Parameters + ---------- + a : array_like + Input data, in any form that can be converted to an array. This + includes lists, lists of tuples, tuples, tuples of tuples, tuples + of lists and ndarrays. + dtype : data-type, optional + By default, the data-type is inferred from the input data. + order : {'C', 'F', 'A', 'K'}, optional + The memory layout of the output. + 'C' gives a row-major layout (C-style), + 'F' gives a column-major layout (Fortran-style). + 'C' and 'F' will copy if needed to ensure the output format. + 'A' (any) is equivalent to 'F' if input a is non-contiguous or Fortran-contiguous, otherwise, it is equivalent to 'C'. + Unlike 'C' or 'F', 'A' does not ensure that the result is contiguous. + 'K' (keep) is the default and preserves the input order for the output. + device : str, optional + The device on which to place the created array. Default: ``None``. + For Array-API interoperability only, so must be ``"cpu"`` if passed. + + .. versionadded:: 2.0.0 + copy : bool, optional + If ``True``, then the object is copied. If ``None`` then the object is + copied only if needed, i.e. if ``__array__`` returns a copy, if obj + is a nested sequence, or if a copy is needed to satisfy any of + the other requirements (``dtype``, ``order``, etc.). + For ``False`` it raises a ``ValueError`` if a copy cannot be avoided. + Default: ``None``. + + .. versionadded:: 2.0.0 + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + out : ndarray + Array interpretation of ``a``. No copy is performed if the input + is already an ndarray with matching dtype and order. If ``a`` is a + subclass of ndarray, a base class ndarray is returned. + + See Also + -------- + asanyarray : Similar function which passes through subclasses. + ascontiguousarray : Convert input to a contiguous array. + asfortranarray : Convert input to an ndarray with column-major memory order. + asarray_chkfinite : Similar function which checks input for NaNs and Infs. + fromiter : Create an array from an iterator. + fromfunction : Construct an array by executing a function on grid positions. + + Examples + -------- + Convert a list into an array: + + >>> a = [1, 2] + >>> import numpy as np + >>> np.asarray(a) + array([1, 2]) + + Existing arrays are not copied: + + >>> a = np.array([1, 2]) + >>> np.asarray(a) is a + True + + If `dtype` is set, array is copied only if dtype does not match: + + >>> a = np.array([1, 2], dtype=np.float32) + >>> np.shares_memory(np.asarray(a, dtype=np.float32), a) + True + >>> np.shares_memory(np.asarray(a, dtype=np.float64), a) + False + + Contrary to `asanyarray`, ndarray subclasses are not passed through: + + >>> issubclass(np.recarray, np.ndarray) + True + >>> a = np.array([(1., 2), (3., 4)], dtype='f4,i4').view(np.recarray) + >>> np.asarray(a) is a + False + >>> np.asanyarray(a) is a + True + + """) + +add_newdoc('numpy._core.multiarray', 'asanyarray', + """ + asanyarray(a, dtype=None, order=None, *, device=None, copy=None, like=None) + -- + + asanyarray(a, dtype=None, order=None, *, device=None, copy=None, like=None) + + Convert the input to an ndarray, but pass ndarray subclasses through. + + Parameters + ---------- + a : array_like + Input data, in any form that can be converted to an array. This + includes scalars, lists, lists of tuples, tuples, tuples of tuples, + tuples of lists, and ndarrays. + dtype : data-type, optional + By default, the data-type is inferred from the input data. + order : {'C', 'F', 'A', 'K'}, optional + The memory layout of the output. + 'C' gives a row-major layout (C-style), + 'F' gives a column-major layout (Fortran-style). + 'C' and 'F' will copy if needed to ensure the output format. + 'A' (any) is equivalent to 'F' if input a is non-contiguous or Fortran-contiguous, otherwise, it is equivalent to 'C'. + Unlike 'C' or 'F', 'A' does not ensure that the result is contiguous. + 'K' (keep) preserves the input order for the output. + 'C' is the default. + device : str, optional + The device on which to place the created array. Default: ``None``. + For Array-API interoperability only, so must be ``"cpu"`` if passed. + + .. versionadded:: 2.1.0 + + copy : bool, optional + If ``True``, then the object is copied. If ``None`` then the object is + copied only if needed, i.e. if ``__array__`` returns a copy, if obj + is a nested sequence, or if a copy is needed to satisfy any of + the other requirements (``dtype``, ``order``, etc.). + For ``False`` it raises a ``ValueError`` if a copy cannot be avoided. + Default: ``None``. + + .. versionadded:: 2.1.0 + + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + out : ndarray or an ndarray subclass + Array interpretation of `a`. If `a` is an ndarray or a subclass + of ndarray, it is returned as-is and no copy is performed. + + See Also + -------- + asarray : Similar function which always returns ndarrays. + ascontiguousarray : Convert input to a contiguous array. + asfortranarray : Convert input to an ndarray with column-major memory order. + asarray_chkfinite : Similar function which checks input for NaNs and Infs. + fromiter : Create an array from an iterator. + fromfunction : Construct an array by executing a function on grid positions. + + Examples + -------- + Convert a list into an array: + + >>> a = [1, 2] + >>> import numpy as np + >>> np.asanyarray(a) + array([1, 2]) + + Instances of `ndarray` subclasses are passed through as-is: + + >>> a = np.array([(1., 2), (3., 4)], dtype='f4,i4').view(np.recarray) + >>> np.asanyarray(a) is a + True + + """) + +add_newdoc('numpy._core.multiarray', 'ascontiguousarray', + """ + ascontiguousarray(a, dtype=None, *, like=None) + -- + + ascontiguousarray(a, dtype=None, *, like=None) + + Return a contiguous array (ndim >= 1) in memory (C order). + + Parameters + ---------- + a : array_like + Input array. + dtype : str or dtype object, optional + Data-type of returned array. + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + out : ndarray + Contiguous array of same shape and content as `a`, with type `dtype` + if specified. + + See Also + -------- + asfortranarray : Convert input to an ndarray with column-major memory order. + require : Return an ndarray that satisfies requirements. + ndarray.flags : Information about the memory layout of the array. + + Examples + -------- + Starting with a Fortran-contiguous array: + + >>> import numpy as np + >>> x = np.ones((2, 3), order='F') + >>> x.flags['F_CONTIGUOUS'] + True + + Calling ``ascontiguousarray`` makes a C-contiguous copy: + + >>> y = np.ascontiguousarray(x) + >>> y.flags['C_CONTIGUOUS'] + True + >>> np.may_share_memory(x, y) + False + + Now, starting with a C-contiguous array: + + >>> x = np.ones((2, 3), order='C') + >>> x.flags['C_CONTIGUOUS'] + True + + Then, calling ``ascontiguousarray`` returns the same object: + + >>> y = np.ascontiguousarray(x) + >>> x is y + True + + Note: This function returns an array with at least one-dimension (1-d) + so it will not preserve 0-d arrays. + + """) + +add_newdoc('numpy._core.multiarray', 'asfortranarray', + """ + asfortranarray(a, dtype=None, *, like=None) + -- + + asfortranarray(a, dtype=None, *, like=None) + + Return an array (ndim >= 1) laid out in Fortran order in memory. + + Parameters + ---------- + a : array_like + Input array. + dtype : str or dtype object, optional + By default, the data-type is inferred from the input data. + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + out : ndarray + The input `a` in Fortran, or column-major, order. + + See Also + -------- + ascontiguousarray : Convert input to a contiguous (C order) array. + asanyarray : Convert input to an ndarray with either row or + column-major memory order. + require : Return an ndarray that satisfies requirements. + ndarray.flags : Information about the memory layout of the array. + + Examples + -------- + Starting with a C-contiguous array: + + >>> import numpy as np + >>> x = np.ones((2, 3), order='C') + >>> x.flags['C_CONTIGUOUS'] + True + + Calling ``asfortranarray`` makes a Fortran-contiguous copy: + + >>> y = np.asfortranarray(x) + >>> y.flags['F_CONTIGUOUS'] + True + >>> np.may_share_memory(x, y) + False + + Now, starting with a Fortran-contiguous array: + + >>> x = np.ones((2, 3), order='F') + >>> x.flags['F_CONTIGUOUS'] + True + + Then, calling ``asfortranarray`` returns the same object: + + >>> y = np.asfortranarray(x) + >>> x is y + True + + Note: This function returns an array with at least one-dimension (1-d) + so it will not preserve 0-d arrays. + + """) + +add_newdoc('numpy._core.multiarray', 'empty', + """ + empty(shape, dtype=None, order='C', *, device=None, like=None) + -- + + empty(shape, dtype=None, order='C', *, device=None, like=None) + + Return a new array of given shape and type, without initializing entries. + + Parameters + ---------- + shape : int or tuple of int + Shape of the empty array, e.g., ``(2, 3)`` or ``2``. + dtype : data-type, optional + Desired output data-type for the array, e.g, `numpy.int8`. Default is + `numpy.float64`. + order : {'C', 'F'}, optional, default: 'C' + Whether to store multi-dimensional data in row-major + (C-style) or column-major (Fortran-style) order in memory. + device : str, optional + The device on which to place the created array. Default: ``None``. + For Array-API interoperability only, so must be ``"cpu"`` if passed. + + .. versionadded:: 2.0.0 + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + out : ndarray + Array of uninitialized (arbitrary) data of the given shape, dtype, and + order. Object arrays will be initialized to None. + + See Also + -------- + empty_like : Return an empty array with shape and type of input. + ones : Return a new array setting values to one. + zeros : Return a new array setting values to zero. + full : Return a new array of given shape filled with value. + + Notes + ----- + Unlike other array creation functions (e.g. `zeros`, `ones`, `full`), + `empty` does not initialize the values of the array, and may therefore be + marginally faster. However, the values stored in the newly allocated array + are arbitrary. For reproducible behavior, be sure to set each element of + the array before reading. + + Examples + -------- + >>> import numpy as np + >>> np.empty([2, 2]) + array([[ -9.74499359e+001, 6.69583040e-309], + [ 2.13182611e-314, 3.06959433e-309]]) #uninitialized + + >>> np.empty([2, 2], dtype=int) + array([[-1073741821, -1067949133], + [ 496041986, 19249760]]) #uninitialized + + """) + +add_newdoc('numpy._core.multiarray', 'scalar', + """ + scalar(dtype, obj) + + Return a new scalar array of the given type initialized with obj. + + This function is meant mainly for pickle support. `dtype` must be a + valid data-type descriptor. If `dtype` corresponds to an object + descriptor, then `obj` can be any object, otherwise `obj` must be a + string. If `obj` is not given, it will be interpreted as None for object + type and as zeros for all other types. + + """) # sufficient null bytes for all number dtypes + +add_newdoc('numpy._core.multiarray', 'zeros', + """ + zeros(shape, dtype=None, order='C', *, device=None, like=None) + -- + + zeros(shape, dtype=None, order='C', *, device=None, like=None) + + Return a new array of given shape and type, filled with zeros. + + Parameters + ---------- + shape : int or tuple of ints + Shape of the new array, e.g., ``(2, 3)`` or ``2``. + dtype : data-type, optional + The desired data-type for the array, e.g., `numpy.int8`. Default is + `numpy.float64`. + order : {'C', 'F'}, optional, default: 'C' + Whether to store multi-dimensional data in row-major + (C-style) or column-major (Fortran-style) order in memory. + device : str, optional + The device on which to place the created array. Default: ``None``. + For Array-API interoperability only, so must be ``"cpu"`` if passed. + + .. versionadded:: 2.0.0 + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + out : ndarray + Array of zeros with the given shape, dtype, and order. + + See Also + -------- + zeros_like : Return an array of zeros with shape and type of input. + empty : Return a new uninitialized array. + ones : Return a new array setting values to one. + full : Return a new array of given shape filled with value. + + Examples + -------- + >>> import numpy as np + >>> np.zeros(5) + array([ 0., 0., 0., 0., 0.]) + + >>> np.zeros((5,), dtype=int) + array([0, 0, 0, 0, 0]) + + >>> np.zeros((2, 1)) + array([[ 0.], + [ 0.]]) + + >>> s = (2,2) + >>> np.zeros(s) + array([[ 0., 0.], + [ 0., 0.]]) + + >>> np.zeros((2,), dtype=[('x', 'i4'), ('y', 'i4')]) # custom dtype + array([(0, 0), (0, 0)], + dtype=[('x', '>> import numpy as np + >>> np.fromstring('1 2', dtype=int, sep=' ') + array([1, 2]) + >>> np.fromstring('1, 2', dtype=int, sep=',') + array([1, 2]) + + """) + +add_newdoc('numpy._core.multiarray', 'compare_chararrays', + """ + compare_chararrays(a1, a2, cmp, rstrip) + -- + + compare_chararrays(a1, a2, cmp, rstrip) + + Performs element-wise comparison of two string arrays using the + comparison operator specified by `cmp`. + + Parameters + ---------- + a1, a2 : array_like + Arrays to be compared. + cmp : {"<", "<=", "==", ">=", ">", "!="} + Type of comparison. + rstrip : bool + If True, the spaces at the end of strings are removed before the comparison. + + Returns + ------- + out : ndarray + The output array of type `numpy.bool` with the same shape as `a1` and `a2`. + + Raises + ------ + ValueError + If `cmp` is not valid. + TypeError + If at least one of `a1` or `a2` is a non-string array + + Examples + -------- + >>> import numpy as np + >>> a = np.array(["a", "b", "cde"]) + >>> b = np.array(["a", "a", "dec"]) + >>> np.char.compare_chararrays(a, b, ">", True) + array([False, True, False]) + + """) + +add_newdoc('numpy._core.multiarray', 'fromiter', + """ + fromiter(iter, dtype, count=-1, *, like=None) + -- + + fromiter(iter, dtype, count=-1, *, like=None) + + Create a new 1-dimensional array from an iterable object. + + Parameters + ---------- + iter : iterable object + An iterable object providing data for the array. + dtype : data-type + The data-type of the returned array. + + .. versionchanged:: 1.23 + Object and subarray dtypes are now supported (note that the final + result is not 1-D for a subarray dtype). + + count : int, optional + The number of items to read from *iterable*. The default is -1, + which means all data is read. + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + out : ndarray + The output array. + + Notes + ----- + Specify `count` to improve performance. It allows ``fromiter`` to + pre-allocate the output array, instead of resizing it on demand. + + Examples + -------- + >>> import numpy as np + >>> iterable = (x*x for x in range(5)) + >>> np.fromiter(iterable, float) + array([ 0., 1., 4., 9., 16.]) + + A carefully constructed subarray dtype will lead to higher dimensional + results: + + >>> iterable = ((x+1, x+2) for x in range(5)) + >>> np.fromiter(iterable, dtype=np.dtype((int, 2))) + array([[1, 2], + [2, 3], + [3, 4], + [4, 5], + [5, 6]]) + + + """) + +add_newdoc('numpy._core.multiarray', 'fromfile', + """ + fromfile(file, dtype=None, count=-1, sep='', offset=0, *, like=None) + -- + + fromfile(file, dtype=float, count=-1, sep='', offset=0, *, like=None) + + Construct an array from data in a text or binary file. + + A highly efficient way of reading binary data with a known data-type, + as well as parsing simply formatted text files. Data written using the + `tofile` method can be read using this function. + + Parameters + ---------- + file : file or str or Path + An open file object, a string containing the filename, or a Path object. + When reading from a file object it must support random access + (i.e. it must have tell and seek methods). + dtype : data-type + Data type of the returned array. + For binary files, it is used to determine the size and byte-order + of the items in the file. + Most builtin numeric types are supported and extension types may be supported. + count : int + Number of items to read. ``-1`` means all items (i.e., the complete + file). + sep : str + Separator between items if file is a text file. + Empty ("") separator means the file should be treated as binary. + Spaces (" ") in the separator match zero or more whitespace characters. + A separator consisting only of spaces must match at least one + whitespace. + offset : int + The offset (in bytes) from the file's current position. Defaults to 0. + Only permitted for binary files. + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + See also + -------- + load, save + ndarray.tofile + loadtxt : More flexible way of loading data from a text file. + + Notes + ----- + Do not rely on the combination of `tofile` and `fromfile` for + data storage, as the binary files generated are not platform + independent. In particular, no byte-order or data-type information is + saved. Data can be stored in the platform independent ``.npy`` format + using `save` and `load` instead. + + Examples + -------- + Construct an ndarray: + + >>> import numpy as np + >>> dt = np.dtype([('time', [('min', np.int64), ('sec', np.int64)]), + ... ('temp', float)]) + >>> x = np.zeros((1,), dtype=dt) + >>> x['time']['min'] = 10; x['temp'] = 98.25 + >>> x + array([((10, 0), 98.25)], + dtype=[('time', [('min', '>> import tempfile + >>> fname = tempfile.mkstemp()[1] + >>> x.tofile(fname) + + Read the raw data from disk: + + >>> np.fromfile(fname, dtype=dt) + array([((10, 0), 98.25)], + dtype=[('time', [('min', '>> np.save(fname, x) + >>> np.load(fname + '.npy') + array([((10, 0), 98.25)], + dtype=[('time', [('min', '>> dt = np.dtype(int) + >>> dt = dt.newbyteorder('>') + >>> np.frombuffer(buf, dtype=dt) # doctest: +SKIP + + The data of the resulting array will not be byteswapped, but will be + interpreted correctly. + + This function creates a view into the original object. This should be safe + in general, but it may make sense to copy the result when the original + object is mutable or untrusted. + + Examples + -------- + >>> import numpy as np + >>> s = b'hello world' + >>> np.frombuffer(s, dtype='S1', count=5, offset=6) + array([b'w', b'o', b'r', b'l', b'd'], dtype='|S1') + + >>> np.frombuffer(b'\\x01\\x02', dtype=np.uint8) + array([1, 2], dtype=uint8) + >>> np.frombuffer(b'\\x01\\x02\\x03\\x04\\x05', dtype=np.uint8, count=3) + array([1, 2, 3], dtype=uint8) + + """) + +add_newdoc('numpy._core.multiarray', 'from_dlpack', + """ + from_dlpack(x, /, *, device=None, copy=None) + -- + + from_dlpack(x, /, *, device=None, copy=None) + + Create a NumPy array from an object implementing the ``__dlpack__`` + protocol. Generally, the returned NumPy array is a view of the input + object. See [1]_ and [2]_ for more details. + + Parameters + ---------- + x : object + A Python object that implements the ``__dlpack__`` and + ``__dlpack_device__`` methods. + device : device, optional + Device on which to place the created array. Default: ``None``. + Must be ``"cpu"`` if passed which may allow importing an array + that is not already CPU available. + copy : bool, optional + Boolean indicating whether or not to copy the input. If ``True``, + the copy will be made. If ``False``, the function will never copy, + and will raise ``BufferError`` in case a copy is deemed necessary. + Passing it requests a copy from the exporter who may or may not + implement the capability. + If ``None``, the function will reuse the existing memory buffer if + possible and copy otherwise. Default: ``None``. + + + Returns + ------- + out : ndarray + + References + ---------- + .. [1] Array API documentation, + https://data-apis.org/array-api/latest/design_topics/data_interchange.html#syntax-for-data-interchange-with-dlpack + + .. [2] Python specification for DLPack, + https://dmlc.github.io/dlpack/latest/python_spec.html + + Examples + -------- + >>> import torch # doctest: +SKIP + >>> x = torch.arange(10) # doctest: +SKIP + >>> # create a view of the torch tensor "x" in NumPy + >>> y = np.from_dlpack(x) # doctest: +SKIP + """) + +add_newdoc('numpy._core.multiarray', 'correlate', + """cross_correlate(a,v, mode=0)""") + +add_newdoc('numpy._core.multiarray', 'arange', + """ + arange(start_or_stop, /, stop=None, step=1, *, dtype=None, device=None, like=None) + -- + + arange([start,] stop[, step,], dtype=None, *, device=None, like=None) + + Return evenly spaced values within a given interval. + + ``arange`` can be called with a varying number of positional arguments: + + * ``arange(stop)``: Values are generated within the half-open interval + ``[0, stop)`` (in other words, the interval including `start` but + excluding `stop`). + * ``arange(start, stop)``: Values are generated within the half-open + interval ``[start, stop)``. + * ``arange(start, stop, step)`` Values are generated within the half-open + interval ``[start, stop)``, with spacing between values given by + ``step``. + + For integer arguments the function is roughly equivalent to the Python + built-in :py:class:`range`, but returns an ndarray rather than a ``range`` + instance. + + When using a non-integer step, such as 0.1, it is often better to use + `numpy.linspace`. + + See the Warning sections below for more information. + + Parameters + ---------- + start : integer or real, optional + Start of interval. The interval includes this value. The default + start value is 0. + stop : integer or real + End of interval. The interval does not include this value, except + in some cases where `step` is not an integer and floating point + round-off affects the length of `out`. + step : integer or real, optional + Spacing between values. For any output `out`, this is the distance + between two adjacent values, ``out[i+1] - out[i]``. The default + step size is 1. If `step` is specified as a position argument, + `start` must also be given. + dtype : dtype, optional + The type of the output array. If `dtype` is not given, infer the data + type from the other input arguments. + device : str, optional + The device on which to place the created array. Default: ``None``. + For Array-API interoperability only, so must be ``"cpu"`` if passed. + + .. versionadded:: 2.0.0 + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + arange : ndarray + Array of evenly spaced values. + + For floating point arguments, the length of the result is + ``ceil((stop - start)/step)``. Because of floating point overflow, + this rule may result in the last element of `out` being greater + than `stop`. + + Warnings + -------- + The length of the output might not be numerically stable. + + Another stability issue is due to the internal implementation of + `numpy.arange`. + The actual step value used to populate the array is + ``dtype(start + step) - dtype(start)`` and not `step`. Precision loss + can occur here, due to casting or due to using floating points when + `start` is much larger than `step`. This can lead to unexpected + behaviour. For example:: + + >>> np.arange(0, 5, 0.5, dtype=int) + array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) + >>> np.arange(-3, 3, 0.5, dtype=int) + array([-3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8]) + + In such cases, the use of `numpy.linspace` should be preferred. + + The built-in :py:class:`range` generates :std:doc:`Python built-in integers + that have arbitrary size `, while `numpy.arange` + produces `numpy.int32` or `numpy.int64` numbers. This may result in + incorrect results for large integer values:: + + >>> power = 40 + >>> modulo = 10000 + >>> x1 = [(n ** power) % modulo for n in range(8)] + >>> x2 = [(n ** power) % modulo for n in np.arange(8)] + >>> print(x1) + [0, 1, 7776, 8801, 6176, 625, 6576, 4001] # correct + >>> print(x2) + [0, 1, 7776, 7185, 0, 5969, 4816, 3361] # incorrect + + See Also + -------- + numpy.linspace : Evenly spaced numbers with careful handling of endpoints. + numpy.ogrid: Arrays of evenly spaced numbers in N-dimensions. + numpy.mgrid: Grid-shaped arrays of evenly spaced numbers in N-dimensions. + :ref:`how-to-partition` + + Examples + -------- + >>> import numpy as np + >>> np.arange(3) + array([0, 1, 2]) + >>> np.arange(3.0) + array([ 0., 1., 2.]) + >>> np.arange(3,7) + array([3, 4, 5, 6]) + >>> np.arange(3,7,2) + array([3, 5]) + + """) + +add_newdoc('numpy._core.multiarray', '_get_ndarray_c_version', + """_get_ndarray_c_version() + + Return the compile time NPY_VERSION (formerly called NDARRAY_VERSION) number. + + """) + +add_newdoc('numpy._core.multiarray', '_reconstruct', + """_reconstruct(subtype, shape, dtype) + + Construct an empty array. Used by Pickle. + + """) + +add_newdoc('numpy._core.multiarray', 'promote_types', + """ + promote_types(type1, type2, /) + -- + + promote_types(type1, type2, /) + + Returns the data type with the smallest size and smallest scalar + kind to which both ``type1`` and ``type2`` may be safely cast. + The returned data type is always considered "canonical", this mainly + means that the promoted dtype will always be in native byte order. + + This function is symmetric, but rarely associative. + + Parameters + ---------- + type1 : dtype or dtype specifier + First data type. + type2 : dtype or dtype specifier + Second data type. + + Returns + ------- + out : dtype + The promoted data type. + + Notes + ----- + Please see `numpy.result_type` for additional information about promotion. + + Starting in NumPy 1.9, promote_types function now returns a valid string + length when given an integer or float dtype as one argument and a string + dtype as another argument. Previously it always returned the input string + dtype, even if it wasn't long enough to store the max integer/float value + converted to a string. + + .. versionchanged:: 1.23.0 + + NumPy now supports promotion for more structured dtypes. It will now + remove unnecessary padding from a structure dtype and promote included + fields individually. + + See Also + -------- + result_type, dtype, can_cast + + Examples + -------- + >>> import numpy as np + >>> np.promote_types('f4', 'f8') + dtype('float64') + + >>> np.promote_types('i8', 'f4') + dtype('float64') + + >>> np.promote_types('>i8', '>> np.promote_types('i4', 'S8') + dtype('S11') + + An example of a non-associative case: + + >>> p = np.promote_types + >>> p('S', p('i1', 'u1')) + dtype('S6') + >>> p(p('S', 'i1'), 'u1') + dtype('S4') + + """) + +add_newdoc('numpy._core.multiarray', 'c_einsum', + """ + c_einsum(subscripts, *operands, out=None, dtype=None, order='K', + casting='safe') + + *This documentation shadows that of the native python implementation of the `einsum` function, + except all references and examples related to the `optimize` argument (v 0.12.0) have been removed.* + + Evaluates the Einstein summation convention on the operands. + + Using the Einstein summation convention, many common multi-dimensional, + linear algebraic array operations can be represented in a simple fashion. + In *implicit* mode `einsum` computes these values. + + In *explicit* mode, `einsum` provides further flexibility to compute + other array operations that might not be considered classical Einstein + summation operations, by disabling, or forcing summation over specified + subscript labels. + + See the notes and examples for clarification. + + Parameters + ---------- + subscripts : str + Specifies the subscripts for summation as comma separated list of + subscript labels. An implicit (classical Einstein summation) + calculation is performed unless the explicit indicator '->' is + included as well as subscript labels of the precise output form. + operands : list of array_like + These are the arrays for the operation. + out : ndarray, optional + If provided, the calculation is done into this array. + dtype : {data-type, None}, optional + If provided, forces the calculation to use the data type specified. + Note that you may have to also give a more liberal `casting` + parameter to allow the conversions. Default is None. + order : {'C', 'F', 'A', 'K'}, optional + Controls the memory layout of the output. 'C' means it should + be C contiguous. 'F' means it should be Fortran contiguous, + 'A' means it should be 'F' if the inputs are all 'F', 'C' otherwise. + 'K' means it should be as close to the layout of the inputs as + is possible, including arbitrarily permuted axes. + Default is 'K'. + casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + Controls what kind of data casting may occur. Setting this to + 'unsafe' is not recommended, as it can adversely affect accumulations. + + * 'no' means the data types should not be cast at all. + * 'equiv' means only byte-order changes are allowed. + * 'safe' means only casts which can preserve values are allowed. + * 'same_kind' means only safe casts or casts within a kind, + like float64 to float32, are allowed. + * 'unsafe' means any data conversions may be done. + + Default is 'safe'. + optimize : {False, True, 'greedy', 'optimal'}, optional + Controls if intermediate optimization should occur. No optimization + will occur if False and True will default to the 'greedy' algorithm. + Also accepts an explicit contraction list from the ``np.einsum_path`` + function. See ``np.einsum_path`` for more details. Defaults to False. + + Returns + ------- + output : ndarray + The calculation based on the Einstein summation convention. + + See Also + -------- + einsum_path, dot, inner, outer, tensordot, linalg.multi_dot + + Notes + ----- + The Einstein summation convention can be used to compute + many multi-dimensional, linear algebraic array operations. `einsum` + provides a succinct way of representing these. + + A non-exhaustive list of these operations, + which can be computed by `einsum`, is shown below along with examples: + + * Trace of an array, :py:func:`numpy.trace`. + * Return a diagonal, :py:func:`numpy.diag`. + * Array axis summations, :py:func:`numpy.sum`. + * Transpositions and permutations, :py:func:`numpy.transpose`. + * Matrix multiplication and dot product, :py:func:`numpy.matmul` :py:func:`numpy.dot`. + * Vector inner and outer products, :py:func:`numpy.inner` :py:func:`numpy.outer`. + * Broadcasting, element-wise and scalar multiplication, :py:func:`numpy.multiply`. + * Tensor contractions, :py:func:`numpy.tensordot`. + * Chained array operations, in efficient calculation order, :py:func:`numpy.einsum_path`. + + The subscripts string is a comma-separated list of subscript labels, + where each label refers to a dimension of the corresponding operand. + Whenever a label is repeated it is summed, so ``np.einsum('i,i', a, b)`` + is equivalent to :py:func:`np.inner(a,b) `. If a label + appears only once, it is not summed, so ``np.einsum('i', a)`` produces a + view of ``a`` with no changes. A further example ``np.einsum('ij,jk', a, b)`` + describes traditional matrix multiplication and is equivalent to + :py:func:`np.matmul(a,b) `. Repeated subscript labels in one + operand take the diagonal. For example, ``np.einsum('ii', a)`` is equivalent + to :py:func:`np.trace(a) `. + + In *implicit mode*, the chosen subscripts are important + since the axes of the output are reordered alphabetically. This + means that ``np.einsum('ij', a)`` doesn't affect a 2D array, while + ``np.einsum('ji', a)`` takes its transpose. Additionally, + ``np.einsum('ij,jk', a, b)`` returns a matrix multiplication, while, + ``np.einsum('ij,jh', a, b)`` returns the transpose of the + multiplication since subscript 'h' precedes subscript 'i'. + + In *explicit mode* the output can be directly controlled by + specifying output subscript labels. This requires the + identifier '->' as well as the list of output subscript labels. + This feature increases the flexibility of the function since + summing can be disabled or forced when required. The call + ``np.einsum('i->', a)`` is like :py:func:`np.sum(a) ` + if ``a`` is a 1-D array, and ``np.einsum('ii->i', a)`` + is like :py:func:`np.diag(a) ` if ``a`` is a square 2-D array. + The difference is that `einsum` does not allow broadcasting by default. + Additionally ``np.einsum('ij,jh->ih', a, b)`` directly specifies the + order of the output subscript labels and therefore returns matrix + multiplication, unlike the example above in implicit mode. + + To enable and control broadcasting, use an ellipsis. Default + NumPy-style broadcasting is done by adding an ellipsis + to the left of each term, like ``np.einsum('...ii->...i', a)``. + ``np.einsum('...i->...', a)`` is like + :py:func:`np.sum(a, axis=-1) ` for array ``a`` of any shape. + To take the trace along the first and last axes, + you can do ``np.einsum('i...i', a)``, or to do a matrix-matrix + product with the left-most indices instead of rightmost, one can do + ``np.einsum('ij...,jk...->ik...', a, b)``. + + When there is only one operand, no axes are summed, and no output + parameter is provided, a view into the operand is returned instead + of a new array. Thus, taking the diagonal as ``np.einsum('ii->i', a)`` + produces a view (changed in version 1.10.0). + + `einsum` also provides an alternative way to provide the subscripts + and operands as ``einsum(op0, sublist0, op1, sublist1, ..., [sublistout])``. + If the output shape is not provided in this format `einsum` will be + calculated in implicit mode, otherwise it will be performed explicitly. + The examples below have corresponding `einsum` calls with the two + parameter methods. + + Views returned from einsum are now writeable whenever the input array + is writeable. For example, ``np.einsum('ijk...->kji...', a)`` will now + have the same effect as :py:func:`np.swapaxes(a, 0, 2) ` + and ``np.einsum('ii->i', a)`` will return a writeable view of the diagonal + of a 2D array. + + Examples + -------- + >>> import numpy as np + >>> a = np.arange(25).reshape(5,5) + >>> b = np.arange(5) + >>> c = np.arange(6).reshape(2,3) + + Trace of a matrix: + + >>> np.einsum('ii', a) + 60 + >>> np.einsum(a, [0,0]) + 60 + >>> np.trace(a) + 60 + + Extract the diagonal (requires explicit form): + + >>> np.einsum('ii->i', a) + array([ 0, 6, 12, 18, 24]) + >>> np.einsum(a, [0,0], [0]) + array([ 0, 6, 12, 18, 24]) + >>> np.diag(a) + array([ 0, 6, 12, 18, 24]) + + Sum over an axis (requires explicit form): + + >>> np.einsum('ij->i', a) + array([ 10, 35, 60, 85, 110]) + >>> np.einsum(a, [0,1], [0]) + array([ 10, 35, 60, 85, 110]) + >>> np.sum(a, axis=1) + array([ 10, 35, 60, 85, 110]) + + For higher dimensional arrays summing a single axis can be done with ellipsis: + + >>> np.einsum('...j->...', a) + array([ 10, 35, 60, 85, 110]) + >>> np.einsum(a, [Ellipsis,1], [Ellipsis]) + array([ 10, 35, 60, 85, 110]) + + Compute a matrix transpose, or reorder any number of axes: + + >>> np.einsum('ji', c) + array([[0, 3], + [1, 4], + [2, 5]]) + >>> np.einsum('ij->ji', c) + array([[0, 3], + [1, 4], + [2, 5]]) + >>> np.einsum(c, [1,0]) + array([[0, 3], + [1, 4], + [2, 5]]) + >>> np.transpose(c) + array([[0, 3], + [1, 4], + [2, 5]]) + + Vector inner products: + + >>> np.einsum('i,i', b, b) + 30 + >>> np.einsum(b, [0], b, [0]) + 30 + >>> np.inner(b,b) + 30 + + Matrix vector multiplication: + + >>> np.einsum('ij,j', a, b) + array([ 30, 80, 130, 180, 230]) + >>> np.einsum(a, [0,1], b, [1]) + array([ 30, 80, 130, 180, 230]) + >>> np.dot(a, b) + array([ 30, 80, 130, 180, 230]) + >>> np.einsum('...j,j', a, b) + array([ 30, 80, 130, 180, 230]) + + Broadcasting and scalar multiplication: + + >>> np.einsum('..., ...', 3, c) + array([[ 0, 3, 6], + [ 9, 12, 15]]) + >>> np.einsum(',ij', 3, c) + array([[ 0, 3, 6], + [ 9, 12, 15]]) + >>> np.einsum(3, [Ellipsis], c, [Ellipsis]) + array([[ 0, 3, 6], + [ 9, 12, 15]]) + >>> np.multiply(3, c) + array([[ 0, 3, 6], + [ 9, 12, 15]]) + + Vector outer product: + + >>> np.einsum('i,j', np.arange(2)+1, b) + array([[0, 1, 2, 3, 4], + [0, 2, 4, 6, 8]]) + >>> np.einsum(np.arange(2)+1, [0], b, [1]) + array([[0, 1, 2, 3, 4], + [0, 2, 4, 6, 8]]) + >>> np.outer(np.arange(2)+1, b) + array([[0, 1, 2, 3, 4], + [0, 2, 4, 6, 8]]) + + Tensor contraction: + + >>> a = np.arange(60.).reshape(3,4,5) + >>> b = np.arange(24.).reshape(4,3,2) + >>> np.einsum('ijk,jil->kl', a, b) + array([[ 4400., 4730.], + [ 4532., 4874.], + [ 4664., 5018.], + [ 4796., 5162.], + [ 4928., 5306.]]) + >>> np.einsum(a, [0,1,2], b, [1,0,3], [2,3]) + array([[ 4400., 4730.], + [ 4532., 4874.], + [ 4664., 5018.], + [ 4796., 5162.], + [ 4928., 5306.]]) + >>> np.tensordot(a,b, axes=([1,0],[0,1])) + array([[ 4400., 4730.], + [ 4532., 4874.], + [ 4664., 5018.], + [ 4796., 5162.], + [ 4928., 5306.]]) + + Writeable returned arrays (since version 1.10.0): + + >>> a = np.zeros((3, 3)) + >>> np.einsum('ii->i', a)[:] = 1 + >>> a + array([[ 1., 0., 0.], + [ 0., 1., 0.], + [ 0., 0., 1.]]) + + Example of ellipsis use: + + >>> a = np.arange(6).reshape((3,2)) + >>> b = np.arange(12).reshape((4,3)) + >>> np.einsum('ki,jk->ij', a, b) + array([[10, 28, 46, 64], + [13, 40, 67, 94]]) + >>> np.einsum('ki,...k->i...', a, b) + array([[10, 28, 46, 64], + [13, 40, 67, 94]]) + >>> np.einsum('k...,jk', a, b) + array([[10, 28, 46, 64], + [13, 40, 67, 94]]) + + """) + + +############################################################################## +# +# Documentation for ndarray attributes and methods +# +############################################################################## + + +############################################################################## +# +# ndarray object +# +############################################################################## + + +add_newdoc('numpy._core.multiarray', 'ndarray', + """ + ndarray(shape, dtype=None, buffer=None, offset=0, strides=None, order=None) + -- + + ndarray(shape, dtype=float, buffer=None, offset=0, strides=None, order=None) + + An array object represents a multidimensional, homogeneous array + of fixed-size items. An associated data-type object describes the + format of each element in the array (its byte-order, how many bytes it + occupies in memory, whether it is an integer, a floating point number, + or something else, etc.) + + Arrays should be constructed using `array`, `zeros` or `empty` (refer + to the See Also section below). The parameters given here refer to + a low-level method (`ndarray(...)`) for instantiating an array. + + For more information, refer to the `numpy` module and examine the + methods and attributes of an array. + + Parameters + ---------- + (for the __new__ method; see Notes below) + + shape : tuple of ints + Shape of created array. + dtype : data-type, optional + Any object that can be interpreted as a numpy data type. + Default is `numpy.float64`. + buffer : object exposing buffer interface, optional + Used to fill the array with data. + offset : int, optional + Offset of array data in buffer. + strides : tuple of ints, optional + Strides of data in memory. + order : {'C', 'F'}, optional + Row-major (C-style) or column-major (Fortran-style) order. + + Attributes + ---------- + T : ndarray + Transpose of the array. + data : buffer + The array's elements, in memory. + dtype : dtype object + Describes the format of the elements in the array. + flags : dict + Dictionary containing information related to memory use, e.g., + 'C_CONTIGUOUS', 'OWNDATA', 'WRITEABLE', etc. + flat : numpy.flatiter object + Flattened version of the array as an iterator. The iterator + allows assignments, e.g., ``x.flat = 3`` (See `ndarray.flat` for + assignment examples; TODO). + imag : ndarray + Imaginary part of the array. + real : ndarray + Real part of the array. + size : int + Number of elements in the array. + itemsize : int + The memory use of each array element in bytes. + nbytes : int + The total number of bytes required to store the array data, + i.e., ``itemsize * size``. + ndim : int + The array's number of dimensions. + shape : tuple of ints + Shape of the array. + strides : tuple of ints + The step-size required to move from one element to the next in + memory. For example, a contiguous ``(3, 4)`` array of type + ``int16`` in C-order has strides ``(8, 2)``. This implies that + to move from element to element in memory requires jumps of 2 bytes. + To move from row-to-row, one needs to jump 8 bytes at a time + (``2 * 4``). + ctypes : ctypes object + Class containing properties of the array needed for interaction + with ctypes. + base : ndarray + If the array is a view into another array, that array is its `base` + (unless that array is also a view). The `base` array is where the + array data is actually stored. + + See Also + -------- + array : Construct an array. + zeros : Create an array, each element of which is zero. + empty : Create an array, but leave its allocated memory unchanged (i.e., + it contains "garbage"). + dtype : Create a data-type. + numpy.typing.NDArray : An ndarray alias :term:`generic ` + w.r.t. its `dtype.type `. + + Notes + ----- + There are two modes of creating an array using ``__new__``: + + 1. If `buffer` is None, then only `shape`, `dtype`, and `order` + are used. + 2. If `buffer` is an object exposing the buffer interface, then + all keywords are interpreted. + + No ``__init__`` method is needed because the array is fully initialized + after the ``__new__`` method. + + Examples + -------- + These examples illustrate the low-level `ndarray` constructor. Refer + to the `See Also` section above for easier ways of constructing an + ndarray. + + First mode, `buffer` is None: + + >>> import numpy as np + >>> np.ndarray(shape=(2,2), dtype=float, order='F') + array([[0.0e+000, 0.0e+000], # random + [ nan, 2.5e-323]]) + + Second mode: + + >>> np.ndarray((2,), buffer=np.array([1,2,3]), + ... offset=np.int_().itemsize, + ... dtype=int) # offset = 1*itemsize, i.e. skip first element + array([2, 3]) + + """) + + +############################################################################## +# +# ndarray attributes +# +############################################################################## + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('__array_interface__', + """Array protocol: Python side.""")) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('__array_priority__', + """Array priority.""")) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('__array_struct__', + """Array protocol: C-struct side.""")) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('base', + """ + Base object if memory is from some other object. + + Examples + -------- + The base of an array that owns its memory is None: + + >>> import numpy as np + >>> x = np.array([1,2,3,4]) + >>> x.base is None + True + + Slicing creates a view, whose memory is shared with x: + + >>> y = x[2:] + >>> y.base is x + True + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('ctypes', + """ + An object to simplify the interaction of the array with the ctypes + module. + + This attribute creates an object that makes it easier to use arrays + when calling shared libraries with the ctypes module. The returned + object has, among others, data, shape, and strides attributes (see + Notes below) which themselves return ctypes objects that can be used + as arguments to a shared library. + + Parameters + ---------- + None + + Returns + ------- + c : Python object + Possessing attributes data, shape, strides, etc. + + See Also + -------- + numpy.ctypeslib + + Notes + ----- + Below are the public attributes of this object which were documented + in "Guide to NumPy" (we have omitted undocumented public attributes, + as well as documented private attributes): + + .. autoattribute:: numpy._core._internal._ctypes.data + :noindex: + + .. autoattribute:: numpy._core._internal._ctypes.shape + :noindex: + + .. autoattribute:: numpy._core._internal._ctypes.strides + :noindex: + + .. automethod:: numpy._core._internal._ctypes.data_as + :noindex: + + .. automethod:: numpy._core._internal._ctypes.shape_as + :noindex: + + .. automethod:: numpy._core._internal._ctypes.strides_as + :noindex: + + If the ctypes module is not available, then the ctypes attribute + of array objects still returns something useful, but ctypes objects + are not returned and errors may be raised instead. In particular, + the object will still have the ``as_parameter`` attribute which will + return an integer equal to the data attribute. + + Examples + -------- + >>> import numpy as np + >>> import ctypes + >>> x = np.array([[0, 1], [2, 3]], dtype=np.int32) + >>> x + array([[0, 1], + [2, 3]], dtype=int32) + >>> x.ctypes.data + 31962608 # may vary + >>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_uint32)) + <__main__.LP_c_uint object at 0x7ff2fc1fc200> # may vary + >>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_uint32)).contents + c_uint(0) + >>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_uint64)).contents + c_ulong(4294967296) + >>> x.ctypes.shape + # may vary + >>> x.ctypes.strides + # may vary + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('data', + """Python buffer object pointing to the start of the array's data.""")) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('dtype', + """ + Data-type of the array's elements. + + .. warning:: + + Setting ``arr.dtype`` is discouraged and may be deprecated in the + future. Setting will replace the ``dtype`` without modifying the + memory (see also `ndarray.view` and `ndarray.astype`). + + Parameters + ---------- + None + + Returns + ------- + d : numpy dtype object + + See Also + -------- + ndarray.astype : Cast the values contained in the array to a new data-type. + ndarray.view : Create a view of the same data but a different data-type. + numpy.dtype + + Examples + -------- + >>> import numpy as np + >>> x = np.arange(4).reshape((2, 2)) + >>> x + array([[0, 1], + [2, 3]]) + >>> x.dtype + dtype('int64') # may vary (OS, bitness) + >>> isinstance(x.dtype, np.dtype) + True + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('imag', + """ + The imaginary part of the array. + + Examples + -------- + >>> import numpy as np + >>> x = np.sqrt([1+0j, 0+1j]) + >>> x.imag + array([ 0. , 0.70710678]) + >>> x.imag.dtype + dtype('float64') + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('itemsize', + """ + Length of one array element in bytes. + + Examples + -------- + >>> import numpy as np + >>> x = np.array([1,2,3], dtype=np.float64) + >>> x.itemsize + 8 + >>> x = np.array([1,2,3], dtype=np.complex128) + >>> x.itemsize + 16 + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('flags', + """ + Information about the memory layout of the array. + + Attributes + ---------- + C_CONTIGUOUS (C) + The data is in a single, C-style contiguous segment. + F_CONTIGUOUS (F) + The data is in a single, Fortran-style contiguous segment. + OWNDATA (O) + The array owns the memory it uses or borrows it from another object. + WRITEABLE (W) + The data area can be written to. Setting this to False locks + the data, making it read-only. A view (slice, etc.) inherits WRITEABLE + from its base array at creation time, but a view of a writeable + array may be subsequently locked while the base array remains writeable. + (The opposite is not true, in that a view of a locked array may not + be made writeable. However, currently, locking a base object does not + lock any views that already reference it, so under that circumstance it + is possible to alter the contents of a locked array via a previously + created writeable view onto it.) Attempting to change a non-writeable + array raises a RuntimeError exception. + ALIGNED (A) + The data and all elements are aligned appropriately for the hardware. + WRITEBACKIFCOPY (X) + This array is a copy of some other array. The C-API function + PyArray_ResolveWritebackIfCopy must be called before deallocating + to the base array will be updated with the contents of this array. + FNC + F_CONTIGUOUS and not C_CONTIGUOUS. + FORC + F_CONTIGUOUS or C_CONTIGUOUS (one-segment test). + BEHAVED (B) + ALIGNED and WRITEABLE. + CARRAY (CA) + BEHAVED and C_CONTIGUOUS. + FARRAY (FA) + BEHAVED and F_CONTIGUOUS and not C_CONTIGUOUS. + + Notes + ----- + The `flags` object can be accessed dictionary-like (as in ``a.flags['WRITEABLE']``), + or by using lowercased attribute names (as in ``a.flags.writeable``). Short flag + names are only supported in dictionary access. + + Only the WRITEBACKIFCOPY, WRITEABLE, and ALIGNED flags can be + changed by the user, via direct assignment to the attribute or dictionary + entry, or by calling `ndarray.setflags`. + + The array flags cannot be set arbitrarily: + + - WRITEBACKIFCOPY can only be set ``False``. + - ALIGNED can only be set ``True`` if the data is truly aligned. + - WRITEABLE can only be set ``True`` if the array owns its own memory + or the ultimate owner of the memory exposes a writeable buffer + interface or is a string. + + Arrays can be both C-style and Fortran-style contiguous simultaneously. + This is clear for 1-dimensional arrays, but can also be true for higher + dimensional arrays. + + Even for contiguous arrays a stride for a given dimension + ``arr.strides[dim]`` may be *arbitrary* if ``arr.shape[dim] == 1`` + or the array has no elements. + It does *not* generally hold that ``self.strides[-1] == self.itemsize`` + for C-style contiguous arrays or ``self.strides[0] == self.itemsize`` for + Fortran-style contiguous arrays is true. + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('flat', + """ + A 1-D iterator over the array. + + This is a `numpy.flatiter` instance, which acts similarly to, but is not + a subclass of, Python's built-in iterator object. + + See Also + -------- + flatten : Return a copy of the array collapsed into one dimension. + + flatiter + + Examples + -------- + >>> import numpy as np + >>> x = np.arange(1, 7).reshape(2, 3) + >>> x + array([[1, 2, 3], + [4, 5, 6]]) + >>> x.flat[3] + 4 + >>> x.T + array([[1, 4], + [2, 5], + [3, 6]]) + >>> x.T.flat[3] + 5 + >>> type(x.flat) + + + An assignment example: + + >>> x.flat = 3; x + array([[3, 3, 3], + [3, 3, 3]]) + >>> x.flat[[1,4]] = 1; x + array([[3, 1, 3], + [3, 1, 3]]) + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('nbytes', + """ + Total bytes consumed by the elements of the array. + + Notes + ----- + Does not include memory consumed by non-element attributes of the + array object. + + See Also + -------- + sys.getsizeof + Memory consumed by the object itself without parents in case view. + This does include memory consumed by non-element attributes. + + Examples + -------- + >>> import numpy as np + >>> x = np.zeros((3,5,2), dtype=np.complex128) + >>> x.nbytes + 480 + >>> np.prod(x.shape) * x.itemsize + 480 + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('ndim', + """ + Number of array dimensions. + + Examples + -------- + >>> import numpy as np + >>> x = np.array([1, 2, 3]) + >>> x.ndim + 1 + >>> y = np.zeros((2, 3, 4)) + >>> y.ndim + 3 + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('real', + """ + The real part of the array. + + Examples + -------- + >>> import numpy as np + >>> x = np.sqrt([1+0j, 0+1j]) + >>> x.real + array([ 1. , 0.70710678]) + >>> x.real.dtype + dtype('float64') + + See Also + -------- + numpy.real : equivalent function + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('shape', + """ + Tuple of array dimensions. + + The shape property is usually used to get the current shape of an array, + but may also be used to reshape the array in-place by assigning a tuple of + array dimensions to it. As with `numpy.reshape`, one of the new shape + dimensions can be -1, in which case its value is inferred from the size of + the array and the remaining dimensions. Reshaping an array in-place will + fail if a copy is required. + + .. warning:: + + Setting ``arr.shape`` is discouraged and may be deprecated in the + future. Using `ndarray.reshape` is the preferred approach. + + Examples + -------- + >>> import numpy as np + >>> x = np.array([1, 2, 3, 4]) + >>> x.shape + (4,) + >>> y = np.zeros((2, 3, 4)) + >>> y.shape + (2, 3, 4) + >>> y.shape = (3, 8) + >>> y + array([[ 0., 0., 0., 0., 0., 0., 0., 0.], + [ 0., 0., 0., 0., 0., 0., 0., 0.], + [ 0., 0., 0., 0., 0., 0., 0., 0.]]) + >>> y.shape = (3, 6) + Traceback (most recent call last): + File "", line 1, in + ValueError: cannot reshape array of size 24 into shape (3,6) + >>> np.zeros((4,2))[::2].shape = (-1,) + Traceback (most recent call last): + File "", line 1, in + AttributeError: Incompatible shape for in-place modification. Use + `.reshape()` to make a copy with the desired shape. + + See Also + -------- + numpy.shape : Equivalent getter function. + numpy.reshape : Function similar to setting ``shape``. + ndarray.reshape : Method similar to setting ``shape``. + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('size', + """ + Number of elements in the array. + + Equal to ``np.prod(a.shape)``, i.e., the product of the array's + dimensions. + + Notes + ----- + `a.size` returns a standard arbitrary precision Python integer. This + may not be the case with other methods of obtaining the same value + (like the suggested ``np.prod(a.shape)``, which returns an instance + of ``np.int_``), and may be relevant if the value is used further in + calculations that may overflow a fixed size integer type. + + Examples + -------- + >>> import numpy as np + >>> x = np.zeros((3, 5, 2), dtype=np.complex128) + >>> x.size + 30 + >>> np.prod(x.shape) + 30 + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('strides', + """ + Tuple of bytes to step in each dimension when traversing an array. + + The byte offset of element ``(i[0], i[1], ..., i[n])`` in an array `a` + is:: + + offset = sum(np.array(i) * a.strides) + + A more detailed explanation of strides can be found in + :ref:`arrays.ndarray`. + + .. warning:: + + Setting ``arr.strides`` is discouraged and may be deprecated in the + future. `numpy.lib.stride_tricks.as_strided` should be preferred + to create a new view of the same data in a safer way. + + Notes + ----- + Imagine an array of 32-bit integers (each 4 bytes):: + + x = np.array([[0, 1, 2, 3, 4], + [5, 6, 7, 8, 9]], dtype=np.int32) + + This array is stored in memory as 40 bytes, one after the other + (known as a contiguous block of memory). The strides of an array tell + us how many bytes we have to skip in memory to move to the next position + along a certain axis. For example, we have to skip 4 bytes (1 value) to + move to the next column, but 20 bytes (5 values) to get to the same + position in the next row. As such, the strides for the array `x` will be + ``(20, 4)``. + + See Also + -------- + numpy.lib.stride_tricks.as_strided + + Examples + -------- + >>> import numpy as np + >>> y = np.reshape(np.arange(2 * 3 * 4, dtype=np.int32), (2, 3, 4)) + >>> y + array([[[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]], + [[12, 13, 14, 15], + [16, 17, 18, 19], + [20, 21, 22, 23]]], dtype=np.int32) + >>> y.strides + (48, 16, 4) + >>> y[1, 1, 1] + np.int32(17) + >>> offset = sum(y.strides * np.array((1, 1, 1))) + >>> offset // y.itemsize + np.int64(17) + + >>> x = np.reshape(np.arange(5*6*7*8, dtype=np.int32), (5, 6, 7, 8)) + >>> x = x.transpose(2, 3, 1, 0) + >>> x.strides + (32, 4, 224, 1344) + >>> i = np.array([3, 5, 2, 2], dtype=np.int32) + >>> offset = sum(i * x.strides) + >>> x[3, 5, 2, 2] + np.int32(813) + >>> offset // x.itemsize + np.int64(813) + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('T', + """ + View of the transposed array. + + Same as ``self.transpose()``. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[1, 2], [3, 4]]) + >>> a + array([[1, 2], + [3, 4]]) + >>> a.T + array([[1, 3], + [2, 4]]) + + >>> a = np.array([1, 2, 3, 4]) + >>> a + array([1, 2, 3, 4]) + >>> a.T + array([1, 2, 3, 4]) + + See Also + -------- + transpose + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('mT', + """ + View of the matrix transposed array. + + The matrix transpose is the transpose of the last two dimensions, even + if the array is of higher dimension. + + .. versionadded:: 2.0 + + Raises + ------ + ValueError + If the array is of dimension less than 2. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[1, 2], [3, 4]]) + >>> a + array([[1, 2], + [3, 4]]) + >>> a.mT + array([[1, 3], + [2, 4]]) + + >>> a = np.arange(8).reshape((2, 2, 2)) + >>> a + array([[[0, 1], + [2, 3]], + + [[4, 5], + [6, 7]]]) + >>> a.mT + array([[[0, 2], + [1, 3]], + + [[4, 6], + [5, 7]]]) + + """)) + +############################################################################## +# +# ndarray methods +# +############################################################################## + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('__array__', + """ + __array__($self, dtype=None, /, *, copy=None) + -- + + a.__array__([dtype], *, copy=None) + + For ``dtype`` parameter it returns a new reference to self if + ``dtype`` is not given or it matches array's data type. + A new array of provided data type is returned if ``dtype`` + is different from the current data type of the array. + For ``copy`` parameter it returns a new reference to self if + ``copy=False`` or ``copy=None`` and copying isn't enforced by ``dtype`` + parameter. The method returns a new array for ``copy=True``, regardless of + ``dtype`` parameter. + + A more detailed explanation of the ``__array__`` interface + can be found in :ref:`dunder_array.interface`. + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('__array_finalize__', + """ + __array_finalize__($self, obj, /) + -- + + a.__array_finalize__(obj, /) + + Present so subclasses can call super. Does nothing. + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('__array_function__', + """ + __array_function__($self, /, func, types, args, kwargs) + -- + + a.__array_function__(func, types, args, kwargs) + + See :ref:`NEP 18 ` and :ref:`NEP 35 ` for details. + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('__array_ufunc__', + """ + __array_ufunc__($self, ufunc, method, /, *inputs, **kwargs) + -- + + a.__array_ufunc__(ufunc, method, /, *inputs, **kwargs) + + See :ref:`NEP 13 ` for details. + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('__array_wrap__', + """ + __array_wrap__($self, array, context=None, return_scalar=True, /) + -- + + a.__array_wrap__(array[, context[, return_scalar]], /) + + Returns a view of `array` with the same type as self. + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('__class_getitem__', + """ + __class_getitem__($cls, item, /) + -- + + ndarray[shape, dtype] + + Return a parametrized wrapper around the `~numpy.ndarray` type. + + .. versionadded:: 1.22 + + Returns + ------- + alias : types.GenericAlias + A parametrized `~numpy.ndarray` type. + + Examples + -------- + >>> import numpy as np + + >>> np.ndarray[tuple[int], np.dtype[np.uint8]] + numpy.ndarray[tuple[int], numpy.dtype[numpy.uint8]] + + See Also + -------- + :pep:`585` : Type hinting generics in standard collections. + numpy.typing.NDArray : An ndarray alias :term:`generic ` + w.r.t. its `dtype.type `. + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('__dlpack__', + """ + __dlpack__($self, /, *, stream=None, max_version=None, dl_device=None, copy=None) + -- + + a.__dlpack__(*, stream=None, max_version=None, dl_device=None, copy=None) + + Exports the array for consumption by ``from_dlpack()`` as a DLPack capsule. + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('__dlpack_device__', + """ + __dlpack_device__($self, /) + -- + + a.__dlpack_device__() + + Returns device type (``1``) and device ID (``0``) in DLPack format. + Meant for use within ``from_dlpack()``. + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('__reduce__', + """ + __reduce__($self, /) + -- + + a.__reduce__() + + For pickling. + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('__reduce_ex__', + """ + __reduce_ex__($self, protocol, /) + -- + + a.__reduce_ex__(protocol, /) + + For pickling. + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('__setstate__', + """ + __setstate__($self, state, /) + -- + + a.__setstate__(state, /) + + For unpickling. + + The `state` argument must be a sequence that contains the following + elements: + + Parameters + ---------- + version : int + optional pickle version. If omitted defaults to 0. + shape : tuple + dtype : data-type + isFortran : bool + rawdata : string or list + a binary string with the data (or a list if 'a' is an object array) + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('dot', + """ + dot($self, other, /, out=None) + -- + + a.dot(other, /, out=None) + + Refer to :func:`numpy.dot` for full documentation. + + See Also + -------- + numpy.dot : equivalent function + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('argpartition', + """ + argpartition($self, kth, /, axis=-1, kind='introselect', order=None) + -- + + a.argpartition(kth, axis=-1, kind='introselect', order=None) + + Returns the indices that would partition this array. + + Refer to `numpy.argpartition` for full documentation. + + See Also + -------- + numpy.argpartition : equivalent function + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('partition', + """ + partition($self, kth, /, axis=-1, kind='introselect', order=None) + -- + + a.partition(kth, axis=-1, kind='introselect', order=None) + + Partially sorts the elements in the array in such a way that the value of + the element in k-th position is in the position it would be in a sorted + array. In the output array, all elements smaller than the k-th element + are located to the left of this element and all equal or greater are + located to its right. The ordering of the elements in the two partitions + on the either side of the k-th element in the output array is undefined. + + Parameters + ---------- + kth : int or sequence of ints + Element index to partition by. The kth element value will be in its + final sorted position and all smaller elements will be moved before it + and all equal or greater elements behind it. + The order of all elements in the partitions is undefined. + If provided with a sequence of kth it will partition all elements + indexed by kth of them into their sorted position at once. + + .. deprecated:: 1.22.0 + Passing booleans as index is deprecated. + axis : int, optional + Axis along which to sort. Default is -1, which means sort along the + last axis. + kind : {'introselect'}, optional + Selection algorithm. Default is 'introselect'. + order : str or list of str, optional + When `a` is an array with fields defined, this argument specifies + which fields to compare first, second, etc. A single field can + be specified as a string, and not all fields need to be specified, + but unspecified fields will still be used, in the order in which + they come up in the dtype, to break ties. + + See Also + -------- + numpy.partition : Return a partitioned copy of an array. + argpartition : Indirect partition. + sort : Full sort. + + Notes + ----- + See ``np.partition`` for notes on the different algorithms. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([3, 4, 2, 1]) + >>> a.partition(3) + >>> a + array([2, 1, 3, 4]) # may vary + + >>> a.partition((1, 3)) + >>> a + array([1, 2, 3, 4]) + + """)) + + +############################################################################## +# +# methods from both `ndarray` and `generic` +# +############################################################################## + +_METHOD_DOC_TEMPLATE = """{name}({params}) +-- + +{doc}""" + +def _array_method_doc(name: str, params: str, doc: str) -> None: + """ + Interenal helper function for adding docstrings to a common method of + `numpy.ndarray` and `numpy.generic`. + + The provided docstring will be added to the given `numpy.ndarray` method. + For the `numpy.generic` method, a shorter docstring indicating that it is + identical to the `ndarray` method will be created. + Both methods will have a proper and identical `__text_signature__`. + + Parameters + ---------- + name : str + Name of the method. + params : str + Parameter signature for the method without parentheses, for example, + ``"a, /, dtype=None, *, copy=False"``. + Parameter defaults must be understood by `ast.literal_eval`, i.e. strings, + bytes, numbers, tuples, lists, dicts, sets, booleans, or None. + doc : str + The full docstring for the `ndarray` method. + """ + + # prepend the pos-only `$self` parameter to the method signature + if "/" not in params: + params = f"/, {params}" if params else "/" + params = f"$self, {params}" + + # add docstring to `np.ndarray.{name}` + doc = textwrap.dedent(doc).strip() + doc_array = _METHOD_DOC_TEMPLATE.format(name=name, params=params, doc=doc) + add_newdoc("numpy._core.multiarray", "ndarray", (name, doc_array)) + + # add docstring to `np.generic.{name}` + doc_scalar = f"Scalar method identical to `ndarray.{name}`." + doc_scalar = _METHOD_DOC_TEMPLATE.format(name=name, params=params, doc=doc_scalar) + add_newdoc("numpy._core.numerictypes", "generic", (name, doc_scalar)) + + +_array_method_doc('__array_namespace__', "*, api_version=None", + """ + a.__array_namespace__(*, api_version=None) + + For Array API compatibility. + """) + +_array_method_doc('__copy__', "", + """ + a.__copy__() + + Used if :func:`copy.copy` is called on an array. Returns a copy of the array. + + Equivalent to ``a.copy(order='K')``. + """) + +_array_method_doc('__deepcopy__', "memo, /", + """ + a.__deepcopy__(memo, /) + + Used if :func:`copy.deepcopy` is called on an array. + """) + +_array_method_doc('all', "axis=None, out=None, keepdims=False, *, where=True", + """ + a.all(axis=None, out=None, *, keepdims=, where=) + + Returns True if all elements evaluate to True. + + Refer to `numpy.all` for full documentation. + + See Also + -------- + numpy.all : equivalent function + """) + +_array_method_doc('any', "axis=None, out=None, keepdims=False, *, where=True", + """ + a.any(axis=None, out=None, *, keepdims=, where=) + + Returns True if any of the elements of `a` evaluate to True. + + Refer to `numpy.any` for full documentation. + + See Also + -------- + numpy.any : equivalent function + """) + +_array_method_doc('argmax', "axis=None, out=None, *, keepdims=False", + """ + a.argmax(axis=None, out=None, *, keepdims=False) + + Return indices of the maximum values along the given axis. + + Refer to `numpy.argmax` for full documentation. + + See Also + -------- + numpy.argmax : equivalent function + """) + +_array_method_doc('argmin', "axis=None, out=None, *, keepdims=False", + """ + a.argmin(axis=None, out=None, *, keepdims=False) + + Return indices of the minimum values along the given axis. + + Refer to `numpy.argmin` for detailed documentation. + + See Also + -------- + numpy.argmin : equivalent function + """) + +_array_method_doc('argsort', "axis=-1, kind=None, order=None, *, stable=None", + """ + a.argsort(axis=-1, kind=None, order=None, *, stable=None) + + Returns the indices that would sort this array. + + Refer to `numpy.argsort` for full documentation. + + See Also + -------- + numpy.argsort : equivalent function + """) + +_array_method_doc('astype', "dtype, order='K', casting='unsafe', subok=True, copy=True", + """ + a.astype(dtype, order='K', casting='unsafe', subok=True, copy=True) + + Copy of the array, cast to a specified type. + + Parameters + ---------- + dtype : str or dtype + Typecode or data-type to which the array is cast. + order : {'C', 'F', 'A', 'K'}, optional + Controls the memory layout order of the result. + 'C' means C order, 'F' means Fortran order, 'A' + means 'F' order if all the arrays are Fortran contiguous, + 'C' order otherwise, and 'K' means as close to the + order the array elements appear in memory as possible. + Default is 'K'. + casting : {'no', 'equiv', 'safe', 'same_kind', 'same_value', 'unsafe'}, optional + Controls what kind of data casting may occur. Defaults to 'unsafe' + for backwards compatibility. + + * 'no' means the data types should not be cast at all. + * 'equiv' means only byte-order changes are allowed. + * 'safe' means only casts which can preserve values are allowed. + * 'same_kind' means only safe casts or casts within a kind, + like float64 to float32, are allowed. + * 'unsafe' means any data conversions may be done. + * 'same_value' means any data conversions may be done, but the values + must not change, including rounding of floats or overflow of ints + + .. versionadded:: 2.4 + Support for ``'same_value'`` was added. + + subok : bool, optional + If True, then sub-classes will be passed-through (default), otherwise + the returned array will be forced to be a base-class array. + copy : bool, optional + By default, astype always returns a newly allocated array. If this + is set to false, and the `dtype`, `order`, and `subok` + requirements are satisfied, the input array is returned instead + of a copy. + + Returns + ------- + arr_t : ndarray + Unless `copy` is False and the other conditions for returning the input + array are satisfied (see description for `copy` input parameter), `arr_t` + is a new array of the same shape as the input array, with dtype, order + given by `dtype`, `order`. + + Raises + ------ + ComplexWarning + When casting from complex to float or int. To avoid this, + one should use ``a.real.astype(t)``. + ValueError + When casting using ``'same_value'`` and the values change or would + overflow + + Examples + -------- + >>> import numpy as np + >>> x = np.array([1, 2, 2.5]) + >>> x + array([1. , 2. , 2.5]) + + >>> x.astype(int) + array([1, 2, 2]) + + >>> x.astype(int, casting="same_value") + Traceback (most recent call last): + ... + ValueError: could not cast 'same_value' double to long + + >>> x[:2].astype(int, casting="same_value") + array([1, 2]) + """) + +_array_method_doc('byteswap', "inplace=False", + """ + a.byteswap(inplace=False) + + Swap the bytes of the array elements + + Toggle between low-endian and big-endian data representation by + returning a byteswapped array, optionally swapped in-place. + Arrays of byte-strings are not swapped. The real and imaginary + parts of a complex number are swapped individually. + + Parameters + ---------- + inplace : bool, optional + If ``True``, swap bytes in-place, default is ``False``. + + Returns + ------- + out : ndarray + The byteswapped array. If `inplace` is ``True``, this is + a view to self. + + Examples + -------- + >>> import numpy as np + >>> A = np.array([1, 256, 8755], dtype=np.int16) + >>> list(map(hex, A)) + ['0x1', '0x100', '0x2233'] + >>> A.byteswap(inplace=True) + array([ 256, 1, 13090], dtype=int16) + >>> list(map(hex, A)) + ['0x100', '0x1', '0x3322'] + + Arrays of byte-strings are not swapped + + >>> A = np.array([b'ceg', b'fac']) + >>> A.byteswap() + array([b'ceg', b'fac'], dtype='|S3') + + ``A.view(A.dtype.newbyteorder()).byteswap()`` produces an array with + the same values but different representation in memory + + >>> A = np.array([1, 2, 3],dtype=np.int64) + >>> A.view(np.uint8) + array([1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, + 0, 0], dtype=uint8) + >>> A.view(A.dtype.newbyteorder()).byteswap(inplace=True) + array([1, 2, 3], dtype='>i8') + >>> A.view(np.uint8) + array([0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, + 0, 3], dtype=uint8) + """) + +_array_method_doc('choose', "choices, out=None, mode='raise'", + """ + a.choose(choices, out=None, mode='raise') + + Use an index array to construct a new array from a set of choices. + + Refer to `numpy.choose` for full documentation. + + See Also + -------- + numpy.choose : equivalent function + """) + +_array_method_doc('clip', "min=None, max=None, out=None, **kwargs", + """ + a.clip(min=, max=, out=None, **kwargs) + + Return an array whose values are limited to ``[min, max]``. + One of max or min must be given. + + Refer to `numpy.clip` for full documentation. + + See Also + -------- + numpy.clip : equivalent function + """) + +_array_method_doc('compress', "condition, axis=None, out=None", + """ + a.compress(condition, axis=None, out=None) + + Return selected slices of this array along given axis. + + Refer to `numpy.compress` for full documentation. + + See Also + -------- + numpy.compress : equivalent function + """) + +_array_method_doc('conj', "", + """ + a.conj() + + Complex-conjugate all elements. + + Refer to `numpy.conjugate` for full documentation. + + See Also + -------- + numpy.conjugate : equivalent function + """) + +_array_method_doc('conjugate', "", + """ + a.conjugate() + + Return the complex conjugate, element-wise. + + Refer to `numpy.conjugate` for full documentation. + + See Also + -------- + numpy.conjugate : equivalent function + """) + +_array_method_doc('copy', "order='C'", + """ + a.copy(order='C') + + Return a copy of the array. + + Parameters + ---------- + order : {'C', 'F', 'A', 'K'}, optional + Controls the memory layout of the copy. 'C' means C-order, + 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous, + 'C' otherwise. 'K' means match the layout of `a` as closely + as possible. (Note that this function and :func:`numpy.copy` are very + similar but have different default values for their order= + arguments, and this function always passes sub-classes through.) + + See also + -------- + numpy.copy : Similar function with different default behavior + numpy.copyto + + Notes + ----- + This function is the preferred method for creating an array copy. The + function :func:`numpy.copy` is similar, but it defaults to using order 'K', + and will not pass sub-classes through by default. + + Examples + -------- + >>> import numpy as np + >>> x = np.array([[1,2,3],[4,5,6]], order='F') + + >>> y = x.copy() + + >>> x.fill(0) + + >>> x + array([[0, 0, 0], + [0, 0, 0]]) + + >>> y + array([[1, 2, 3], + [4, 5, 6]]) + + >>> y.flags['C_CONTIGUOUS'] + True + + For arrays containing Python objects (e.g. dtype=object), + the copy is a shallow one. The new array will contain the + same object which may lead to surprises if that object can + be modified (is mutable): + + >>> a = np.array([1, 'm', [2, 3, 4]], dtype=object) + >>> b = a.copy() + >>> b[2][0] = 10 + >>> a + array([1, 'm', list([10, 3, 4])], dtype=object) + + To ensure all elements within an ``object`` array are copied, + use `copy.deepcopy`: + + >>> import copy + >>> a = np.array([1, 'm', [2, 3, 4]], dtype=object) + >>> c = copy.deepcopy(a) + >>> c[2][0] = 10 + >>> c + array([1, 'm', list([10, 3, 4])], dtype=object) + >>> a + array([1, 'm', list([2, 3, 4])], dtype=object) + """) + +_array_method_doc('cumprod', "axis=None, dtype=None, out=None", + """ + a.cumprod(axis=None, dtype=None, out=None) + + Return the cumulative product of the elements along the given axis. + + Refer to `numpy.cumprod` for full documentation. + + See Also + -------- + numpy.cumprod : equivalent function + """) + +_array_method_doc('cumsum', "axis=None, dtype=None, out=None", + """ + a.cumsum(axis=None, dtype=None, out=None) + + Return the cumulative sum of the elements along the given axis. + + Refer to `numpy.cumsum` for full documentation. + + See Also + -------- + numpy.cumsum : equivalent function + """) + +_array_method_doc('diagonal', "offset=0, axis1=0, axis2=1", + """ + a.diagonal(offset=0, axis1=0, axis2=1) + + Return specified diagonals. In NumPy 1.9 the returned array is a + read-only view instead of a copy as in previous NumPy versions. In + a future version the read-only restriction will be removed. + + Refer to :func:`numpy.diagonal` for full documentation. + + See Also + -------- + numpy.diagonal : equivalent function + """) + +_array_method_doc('dump', "file", + """ + a.dump(file) + + Dump a pickle of the array to the specified file. + The array can be read back with pickle.load or numpy.load. + + Parameters + ---------- + file : str or Path + A string naming the dump file. + """) + +_array_method_doc('dumps', "", + """ + a.dumps() + + Returns the pickle of the array as a string. + ``pickle.loads`` will convert the string back to an array. + + Parameters + ---------- + None + """) + +_array_method_doc('fill', "value", + """ + a.fill(value) + + Fill the array with a scalar value. + + Parameters + ---------- + value : scalar + All elements of `a` will be assigned this value. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([1, 2]) + >>> a.fill(0) + >>> a + array([0, 0]) + >>> a = np.empty(2) + >>> a.fill(1) + >>> a + array([1., 1.]) + + Fill expects a scalar value and always behaves the same as assigning + to a single array element. The following is a rare example where this + distinction is important: + + >>> a = np.array([None, None], dtype=object) + >>> a[0] = np.array(3) + >>> a + array([array(3), None], dtype=object) + >>> a.fill(np.array(3)) + >>> a + array([array(3), array(3)], dtype=object) + + Where other forms of assignments will unpack the array being assigned: + + >>> a[...] = np.array(3) + >>> a + array([3, 3], dtype=object) + """) + +_array_method_doc('flatten', "order='C'", + """ + a.flatten(order='C') + + Return a copy of the array collapsed into one dimension. + + Parameters + ---------- + order : {'C', 'F', 'A', 'K'}, optional + 'C' means to flatten in row-major (C-style) order. + 'F' means to flatten in column-major (Fortran- + style) order. 'A' means to flatten in column-major + order if `a` is Fortran *contiguous* in memory, + row-major order otherwise. 'K' means to flatten + `a` in the order the elements occur in memory. + The default is 'C'. + + Returns + ------- + y : ndarray + A copy of the input array, flattened to one dimension. + + See Also + -------- + ravel : Return a flattened array. + flat : A 1-D flat iterator over the array. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[1,2], [3,4]]) + >>> a.flatten() + array([1, 2, 3, 4]) + >>> a.flatten('F') + array([1, 3, 2, 4]) + """) + +_array_method_doc('getfield', "dtype, offset=0", + """ + a.getfield(dtype, offset=0) + + Returns a field of the given array as a certain type. + + A field is a view of the array data with a given data-type. The values in + the view are determined by the given type and the offset into the current + array in bytes. The offset needs to be such that the view dtype fits in the + array dtype; for example an array of dtype complex128 has 16-byte elements. + If taking a view with a 32-bit integer (4 bytes), the offset needs to be + between 0 and 12 bytes. + + Parameters + ---------- + dtype : str or dtype + The data type of the view. The dtype size of the view can not be larger + than that of the array itself. + offset : int + Number of bytes to skip before beginning the element view. + + Examples + -------- + >>> import numpy as np + >>> x = np.diag([1.+1.j]*2) + >>> x[1, 1] = 2 + 4.j + >>> x + array([[1.+1.j, 0.+0.j], + [0.+0.j, 2.+4.j]]) + >>> x.getfield(np.float64) + array([[1., 0.], + [0., 2.]]) + + By choosing an offset of 8 bytes we can select the complex part of the + array for our view: + + >>> x.getfield(np.float64, offset=8) + array([[1., 0.], + [0., 4.]]) + """) + +_array_method_doc('item', "*args", + """ + a.item(*args) + + Copy an element of an array to a standard Python scalar and return it. + + Parameters + ---------- + \\*args : Arguments (variable number and type) + + * none: in this case, the method only works for arrays + with one element (`a.size == 1`), which element is + copied into a standard Python scalar object and returned. + + * int_type: this argument is interpreted as a flat index into + the array, specifying which element to copy and return. + + * tuple of int_types: functions as does a single int_type argument, + except that the argument is interpreted as an nd-index into the + array. + + Returns + ------- + z : Standard Python scalar object + A copy of the specified element of the array as a suitable + Python scalar + + Notes + ----- + When the data type of `a` is longdouble or clongdouble, item() returns + a scalar array object because there is no available Python scalar that + would not lose information. Void arrays return a buffer object for item(), + unless fields are defined, in which case a tuple is returned. + + `item` is very similar to a[args], except, instead of an array scalar, + a standard Python scalar is returned. This can be useful for speeding up + access to elements of the array and doing arithmetic on elements of the + array using Python's optimized math. + + Examples + -------- + >>> import numpy as np + >>> np.random.seed(123) + >>> x = np.random.randint(9, size=(3, 3)) + >>> x + array([[2, 2, 6], + [1, 3, 6], + [1, 0, 1]]) + >>> x.item(3) + 1 + >>> x.item(7) + 0 + >>> x.item((0, 1)) + 2 + >>> x.item((2, 2)) + 1 + + For an array with object dtype, elements are returned as-is. + + >>> a = np.array([np.int64(1)], dtype=object) + >>> a.item() #return np.int64 + np.int64(1) + """) + +_KWARGS_REDUCE = "keepdims=, initial=, where=" + +_array_method_doc('max', "axis=None, out=None, **kwargs", + f""" + a.max(axis=None, out=None, *, {_KWARGS_REDUCE}) + + Return the maximum along a given axis. + + Refer to `numpy.amax` for full documentation. + + See Also + -------- + numpy.amax : equivalent function + """) + +_array_method_doc('min', "axis=None, out=None, **kwargs", + f""" + a.min(axis=None, out=None, *, {_KWARGS_REDUCE}) + + Return the minimum along a given axis. + + Refer to `numpy.amin` for full documentation. + + See Also + -------- + numpy.amin : equivalent function + """) + +_array_method_doc('prod', "axis=None, dtype=None, out=None, **kwargs", + f""" + a.prod(axis=None, dtype=None, out=None, *, {_KWARGS_REDUCE}) + + Return the product of the array elements over the given axis + + Refer to `numpy.prod` for full documentation. + + See Also + -------- + numpy.prod : equivalent function + """) + +_array_method_doc('sum', "axis=None, dtype=None, out=None, **kwargs", + f""" + a.sum(axis=None, dtype=None, out=None, *, {_KWARGS_REDUCE}) + + Return the sum of the array elements over the given axis. + + Refer to `numpy.sum` for full documentation. + + See Also + -------- + numpy.sum : equivalent function + """) + +_array_method_doc('mean', "axis=None, dtype=None, out=None, **kwargs", + """ + a.mean(axis=None, dtype=None, out=None, *, keepdims=, where=) + + Returns the average of the array elements along given axis. + + Refer to `numpy.mean` for full documentation. + + See Also + -------- + numpy.mean : equivalent function + """) + +_array_method_doc('nonzero', "", + """ + a.nonzero() + + Return the indices of the elements that are non-zero. + + Refer to `numpy.nonzero` for full documentation. + + See Also + -------- + numpy.nonzero : equivalent function + """) + +_array_method_doc('put', "indices, values, /, mode='raise'", + """ + a.put(indices, values, mode='raise') + + Set ``a.flat[n] = values[n]`` for all ``n`` in indices. + + Refer to `numpy.put` for full documentation. + + See Also + -------- + numpy.put : equivalent function + """) + +_array_method_doc('ravel', "order='C'", + """ + a.ravel(order='C') + + Return a flattened array. + + Refer to `numpy.ravel` for full documentation. + + See Also + -------- + numpy.ravel : equivalent function + ndarray.flat : a flat iterator on the array. + """) + +_array_method_doc('repeat', "repeats, /, axis=None", + """ + a.repeat(repeats, axis=None) + + Repeat elements of an array. + + Refer to `numpy.repeat` for full documentation. + + See Also + -------- + numpy.repeat : equivalent function + """) + +_array_method_doc('reshape', "*shape, order='C', copy=None", + """ + a.reshape(shape, /, *, order='C', copy=None) + a.reshape(*shape, order='C', copy=None) + + Returns an array containing the same data with a new shape. + + Refer to `numpy.reshape` for full documentation. + + See Also + -------- + numpy.reshape : equivalent function + + Notes + ----- + Unlike the free function `numpy.reshape`, this method on `ndarray` allows + the elements of the shape parameter to be passed in as separate arguments. + For example, ``a.reshape(4, 2)`` is equivalent to ``a.reshape((4, 2))``. + """) + +_array_method_doc('resize', "*new_shape, refcheck=True", + """ + a.resize(new_shape, /, *, refcheck=True) + a.resize(*new_shape, refcheck=True) + + Change shape and size of array in-place. + + Parameters + ---------- + new_shape : tuple of ints, or `n` ints + Shape of resized array. + refcheck : bool, optional + If False, reference count will not be checked. Default is True. + See Notes below for more explanation. + + Returns + ------- + None + + Raises + ------ + ValueError + If `a` does not own its own data or references or views to may exist. + + See Also + -------- + resize : Return a new array with the specified shape. + + Notes + ----- + This reallocates space for the data area if necessary. + + Only contiguous arrays (data elements consecutive in memory) can be + resized. + + Reallocating arrays in-place can often lead to memory fragmentation and + should be avoided. If the goal is to reclaim over-allocated memory, + alternatives are to create a view or a copy of just the desired data, or + using two passes to build the array: one to cheaply determine the shape and + another to allocate and fill. Benchmark your use case to determine what is + optimum. You may be surprised to find ``resize`` actually slows down or + bloats your application. + + The purpose of the reference count check is to make sure you + do not use this array as a buffer for another Python object and then + reallocate the memory. + + On Python 3.13 and older, the check allows objects with exactly one + reference to be reallocated in-place. On Python 3.14 and newer, the array + must be uniquely referenced. See [1]_ for more details. + + If you are sure that you have not shared the memory for this array with + another Python object, then you may safely set `refcheck` to False. + + + References + ---------- + .. [1] Python 3.14 What's New, https://docs.python.org/3/whatsnew/3.14.html#whatsnew314-refcount + + Examples + -------- + Shrinking an array: array is flattened (in the order that the data are + stored in memory), resized, and reshaped: + + >>> import numpy as np + + >>> a = np.array([[0, 1], [2, 3]], order='C') + >>> a.resize((2, 1)) + >>> a + array([[0], + [1]]) + + >>> a = np.array([[0, 1], [2, 3]], order='F') + >>> a.resize((2, 1)) + >>> a + array([[0], + [2]]) + + Enlarging an array: as above, but missing entries are filled with zeros: + + >>> b = np.array([[0, 1], [2, 3]]) + >>> b.resize(2, 3) # new_shape parameter doesn't have to be a tuple + >>> b + array([[0, 1, 2], + [3, 0, 0]]) + + Referencing an array prevents resizing... + + >>> c = a + >>> a.resize((1, 1)) + Traceback (most recent call last): + ... + ValueError: cannot resize an array that references or is referenced ... + + Unless `refcheck` is False: + + >>> a.resize((1, 1), refcheck=False) + >>> a + array([[0]]) + >>> c + array([[0]]) + """) + +_array_method_doc('round', "decimals=0, out=None", + """ + a.round(decimals=0, out=None) + + Return `a` with each element rounded to the given number of decimals. + + Refer to `numpy.around` for full documentation. + + See Also + -------- + numpy.around : equivalent function + """) + +_array_method_doc('searchsorted', "v, /, side='left', sorter=None", + """ + a.searchsorted(v, side='left', sorter=None) + + Find indices where elements of `v` should be inserted in `a` to maintain order. + + For full documentation, see `numpy.searchsorted`. + + See Also + -------- + numpy.searchsorted : equivalent function + """) + +_array_method_doc('setfield', "val, /, dtype, offset=0", + """ + a.setfield(val, dtype, offset=0) + + Put a value into a specified place in a field defined by a data-type. + + Place `val` into `a`'s field defined by `dtype` and beginning `offset` + bytes into the field. + + Parameters + ---------- + val : object + Value to be placed in field. + dtype : dtype object + Data-type of the field in which to place `val`. + offset : int, optional + The number of bytes into the field at which to place `val`. + + Returns + ------- + None + + See Also + -------- + getfield + + Examples + -------- + >>> import numpy as np + >>> x = np.eye(3) + >>> x.getfield(np.float64) + array([[1., 0., 0.], + [0., 1., 0.], + [0., 0., 1.]]) + >>> x.setfield(3, np.int32) + >>> x.getfield(np.int32) + array([[3, 3, 3], + [3, 3, 3], + [3, 3, 3]], dtype=int32) + >>> x + array([[1.0e+000, 1.5e-323, 1.5e-323], + [1.5e-323, 1.0e+000, 1.5e-323], + [1.5e-323, 1.5e-323, 1.0e+000]]) + >>> x.setfield(np.eye(3), np.int32) + >>> x + array([[1., 0., 0.], + [0., 1., 0.], + [0., 0., 1.]]) + """) + +_array_method_doc('setflags', "*, write=None, align=None, uic=None", + """ + a.setflags(write=None, align=None, uic=None) + + Set array flags WRITEABLE, ALIGNED, WRITEBACKIFCOPY, + respectively. + + These Boolean-valued flags affect how numpy interprets the memory + area used by `a` (see Notes below). The ALIGNED flag can only + be set to True if the data is actually aligned according to the type. + The WRITEBACKIFCOPY flag can never be set + to True. The flag WRITEABLE can only be set to True if the array owns its + own memory, or the ultimate owner of the memory exposes a writeable buffer + interface, or is a string. (The exception for string is made so that + unpickling can be done without copying memory.) + + Parameters + ---------- + write : bool, optional + Describes whether or not `a` can be written to. + align : bool, optional + Describes whether or not `a` is aligned properly for its type. + uic : bool, optional + Describes whether or not `a` is a copy of another "base" array. + + Notes + ----- + Array flags provide information about how the memory area used + for the array is to be interpreted. There are 7 Boolean flags + in use, only three of which can be changed by the user: + WRITEBACKIFCOPY, WRITEABLE, and ALIGNED. + + WRITEABLE (W) the data area can be written to; + + ALIGNED (A) the data and strides are aligned appropriately for the hardware + (as determined by the compiler); + + WRITEBACKIFCOPY (X) this array is a copy of some other array (referenced + by .base). When the C-API function PyArray_ResolveWritebackIfCopy is + called, the base array will be updated with the contents of this array. + + All flags can be accessed using the single (upper case) letter as well + as the full name. + + Examples + -------- + >>> import numpy as np + >>> y = np.array([[3, 1, 7], + ... [2, 0, 0], + ... [8, 5, 9]]) + >>> y + array([[3, 1, 7], + [2, 0, 0], + [8, 5, 9]]) + >>> y.flags + C_CONTIGUOUS : True + F_CONTIGUOUS : False + OWNDATA : True + WRITEABLE : True + ALIGNED : True + WRITEBACKIFCOPY : False + >>> y.setflags(write=0, align=0) + >>> y.flags + C_CONTIGUOUS : True + F_CONTIGUOUS : False + OWNDATA : True + WRITEABLE : False + ALIGNED : False + WRITEBACKIFCOPY : False + >>> y.setflags(uic=1) + Traceback (most recent call last): + File "", line 1, in + ValueError: cannot set WRITEBACKIFCOPY flag to True + """) + +_array_method_doc('sort', "axis=-1, kind=None, order=None, *, stable=None", + """ + a.sort(axis=-1, kind=None, order=None, *, stable=None) + + Sort an array in-place. Refer to `numpy.sort` for full documentation. + + Parameters + ---------- + axis : int, optional + Axis along which to sort. Default is -1, which means sort along the + last axis. + kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional + Sorting algorithm. The default is 'quicksort'. Note that both 'stable' + and 'mergesort' use timsort under the covers and, in general, the + actual implementation will vary with datatype. The 'mergesort' option + is retained for backwards compatibility. + order : str or list of str, optional + When `a` is an array with fields defined, this argument specifies + which fields to compare first, second, etc. A single field can + be specified as a string, and not all fields need be specified, + but unspecified fields will still be used, in the order in which + they come up in the dtype, to break ties. + stable : bool, optional + Sort stability. If ``True``, the returned array will maintain + the relative order of ``a`` values which compare as equal. + If ``False`` or ``None``, this is not guaranteed. Internally, + this option selects ``kind='stable'``. Default: ``None``. + + .. versionadded:: 2.0.0 + + See Also + -------- + numpy.sort : Return a sorted copy of an array. + numpy.argsort : Indirect sort. + numpy.lexsort : Indirect stable sort on multiple keys. + numpy.searchsorted : Find elements in sorted array. + numpy.partition: Partial sort. + + Notes + ----- + See `numpy.sort` for notes on the different sorting algorithms. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[1,4], [3,1]]) + >>> a.sort(axis=1) + >>> a + array([[1, 4], + [1, 3]]) + >>> a.sort(axis=0) + >>> a + array([[1, 3], + [1, 4]]) + + Use the `order` keyword to specify a field to use when sorting a + structured array: + + >>> a = np.array([('a', 2), ('c', 1)], dtype=[('x', 'S1'), ('y', int)]) + >>> a.sort(order='y') + >>> a + array([(b'c', 1), (b'a', 2)], + dtype=[('x', 'S1'), ('y', '>> import numpy as np + >>> a = np.uint32([1, 2]) + >>> a_list = list(a) + >>> a_list + [np.uint32(1), np.uint32(2)] + >>> type(a_list[0]) + + >>> a_tolist = a.tolist() + >>> a_tolist + [1, 2] + >>> type(a_tolist[0]) + + + Additionally, for a 2D array, ``tolist`` applies recursively: + + >>> a = np.array([[1, 2], [3, 4]]) + >>> list(a) + [array([1, 2]), array([3, 4])] + >>> a.tolist() + [[1, 2], [3, 4]] + + The base case for this recursion is a 0D array: + + >>> a = np.array(1) + >>> list(a) + Traceback (most recent call last): + ... + TypeError: iteration over a 0-d array + >>> a.tolist() + 1 + """) + +_array_method_doc('tobytes', "order='C'", + """ + a.tobytes(order='C') + + Construct Python bytes containing the raw data bytes in the array. + + Constructs Python bytes showing a copy of the raw contents of + data memory. The bytes object is produced in C-order by default. + This behavior is controlled by the ``order`` parameter. + + Parameters + ---------- + order : {'C', 'F', 'A'}, optional + Controls the memory layout of the bytes object. 'C' means C-order, + 'F' means F-order, 'A' (short for *Any*) means 'F' if `a` is + Fortran contiguous, 'C' otherwise. Default is 'C'. + + Returns + ------- + s : bytes + Python bytes exhibiting a copy of `a`'s raw data. + + See also + -------- + frombuffer + Inverse of this operation, construct a 1-dimensional array from Python + bytes. + + Examples + -------- + >>> import numpy as np + >>> x = np.array([[0, 1], [2, 3]], dtype='>> x.tobytes() + b'\\x00\\x00\\x01\\x00\\x02\\x00\\x03\\x00' + >>> x.tobytes('C') == x.tobytes() + True + >>> x.tobytes('F') + b'\\x00\\x00\\x02\\x00\\x01\\x00\\x03\\x00' + """) + +_array_method_doc('trace', "offset=0, axis1=0, axis2=1, dtype=None, out=None", + """ + a.trace(offset=0, axis1=0, axis2=1, dtype=None, out=None) + + Return the sum along diagonals of the array. + + Refer to `numpy.trace` for full documentation. + + See Also + -------- + numpy.trace : equivalent function + """) + +_array_method_doc('transpose', "*axes", + """ + a.transpose(*axes) + + Returns a view of the array with axes transposed. + + Refer to `numpy.transpose` for full documentation. + + Parameters + ---------- + axes : None, tuple of ints, or `n` ints + + * None or no argument: reverses the order of the axes. + + * tuple of ints: `i` in the `j`-th place in the tuple means that the + array's `i`-th axis becomes the transposed array's `j`-th axis. + + * `n` ints: same as an n-tuple of the same ints (this form is + intended simply as a "convenience" alternative to the tuple form). + + Returns + ------- + p : ndarray + View of the array with its axes suitably permuted. + + See Also + -------- + transpose : Equivalent function. + ndarray.T : Array property returning the array transposed. + ndarray.reshape : Give a new shape to an array without changing its data. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[1, 2], [3, 4]]) + >>> a + array([[1, 2], + [3, 4]]) + >>> a.transpose() + array([[1, 3], + [2, 4]]) + >>> a.transpose((1, 0)) + array([[1, 3], + [2, 4]]) + >>> a.transpose(1, 0) + array([[1, 3], + [2, 4]]) + + >>> a = np.array([1, 2, 3, 4]) + >>> a + array([1, 2, 3, 4]) + >>> a.transpose() + array([1, 2, 3, 4]) + """) + +_array_method_doc('view', "*args, **kwargs", + """ + a.view([dtype][, type]) + + New view of array with the same data. + + .. note:: + Passing None for ``dtype`` is different from omitting the parameter, + since the former invokes ``dtype(None)`` which is an alias for + ``dtype('float64')``. + + Parameters + ---------- + dtype : data-type or ndarray sub-class, optional + Data-type descriptor of the returned view, e.g., float32 or int16. + Omitting it results in the view having the same data-type as `a`. + This argument can also be specified as an ndarray sub-class, which + then specifies the type of the returned object (this is equivalent to + setting the ``type`` parameter). + type : Python type, optional + Type of the returned view, e.g., ndarray or matrix. Again, omission + of the parameter results in type preservation. + + Notes + ----- + ``a.view()`` is used two different ways: + + ``a.view(some_dtype)`` or ``a.view(dtype=some_dtype)`` constructs a view + of the array's memory with a different data-type. This can cause a + reinterpretation of the bytes of memory. + + ``a.view(ndarray_subclass)`` or ``a.view(type=ndarray_subclass)`` just + returns an instance of `ndarray_subclass` that looks at the same array + (same shape, dtype, etc.) This does not cause a reinterpretation of the + memory. + + For ``a.view(some_dtype)``, if ``some_dtype`` has a different number of + bytes per entry than the previous dtype (for example, converting a regular + array to a structured array), then the last axis of ``a`` must be + contiguous. This axis will be resized in the result. + + .. versionchanged:: 1.23.0 + Only the last axis needs to be contiguous. Previously, the entire array + had to be C-contiguous. + + Examples + -------- + >>> import numpy as np + >>> x = np.array([(-1, 2)], dtype=[('a', np.int8), ('b', np.int8)]) + + Viewing array data using a different type and dtype: + + >>> nonneg = np.dtype([("a", np.uint8), ("b", np.uint8)]) + >>> y = x.view(dtype=nonneg, type=np.recarray) + >>> x["a"] + array([-1], dtype=int8) + >>> y.a + array([255], dtype=uint8) + + Creating a view on a structured array so it can be used in calculations + + >>> x = np.array([(1, 2),(3,4)], dtype=[('a', np.int8), ('b', np.int8)]) + >>> xv = x.view(dtype=np.int8).reshape(-1,2) + >>> xv + array([[1, 2], + [3, 4]], dtype=int8) + >>> xv.mean(0) + array([2., 3.]) + + Making changes to the view changes the underlying array + + >>> xv[0,1] = 20 + >>> x + array([(1, 20), (3, 4)], dtype=[('a', 'i1'), ('b', 'i1')]) + + Using a view to convert an array to a recarray: + + >>> z = x.view(np.recarray) + >>> z.a + array([1, 3], dtype=int8) + + Views share data: + + >>> x[0] = (9, 10) + >>> z[0] + np.record((9, 10), dtype=[('a', 'i1'), ('b', 'i1')]) + + Views that change the dtype size (bytes per entry) should normally be + avoided on arrays defined by slices, transposes, fortran-ordering, etc.: + + >>> x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int16) + >>> y = x[:, ::2] + >>> y + array([[1, 3], + [4, 6]], dtype=int16) + >>> y.view(dtype=[('width', np.int16), ('length', np.int16)]) + Traceback (most recent call last): + ... + ValueError: To change to a dtype of a different size, the last axis must be contiguous + >>> z = y.copy() + >>> z.view(dtype=[('width', np.int16), ('length', np.int16)]) + array([[(1, 3)], + [(4, 6)]], dtype=[('width', '>> x = np.arange(2 * 3 * 4, dtype=np.int8).reshape(2, 3, 4) + >>> x.transpose(1, 0, 2).view(np.int16) + array([[[ 256, 770], + [3340, 3854]], + + [[1284, 1798], + [4368, 4882]], + + [[2312, 2826], + [5396, 5910]]], dtype=int16) + """) + + +############################################################################## +# +# umath functions +# +############################################################################## + +add_newdoc('numpy._core.umath', 'frompyfunc', + """ + frompyfunc(func, /, nin, nout, **kwargs) + -- + + frompyfunc(func, /, nin, nout, *[, identity]) + + Takes an arbitrary Python function and returns a NumPy ufunc. + + Can be used, for example, to add broadcasting to a built-in Python + function (see Examples section). + + Parameters + ---------- + func : Python function object + An arbitrary Python function. + nin : int + The number of input arguments. + nout : int + The number of objects returned by `func`. + identity : object, optional + The value to use for the `~numpy.ufunc.identity` attribute of the resulting + object. If specified, this is equivalent to setting the underlying + C ``identity`` field to ``PyUFunc_IdentityValue``. + If omitted, the identity is set to ``PyUFunc_None``. Note that this is + _not_ equivalent to setting the identity to ``None``, which implies the + operation is reorderable. + + Returns + ------- + out : ufunc + Returns a NumPy universal function (``ufunc``) object. + + See Also + -------- + vectorize : Evaluates pyfunc over input arrays using broadcasting rules of numpy. + + Notes + ----- + The returned ufunc always returns PyObject arrays. + + Examples + -------- + Use frompyfunc to add broadcasting to the Python function ``oct``: + + >>> import numpy as np + >>> oct_array = np.frompyfunc(oct, 1, 1) + >>> oct_array(np.array((10, 30, 100))) + array(['0o12', '0o36', '0o144'], dtype=object) + >>> np.array((oct(10), oct(30), oct(100))) # for comparison + array(['0o12', '0o36', '0o144'], dtype='doc is NULL.) + + Parameters + ---------- + ufunc : numpy.ufunc + A ufunc whose current doc is NULL. + new_docstring : string + The new docstring for the ufunc. + + Notes + ----- + This method allocates memory for new_docstring on + the heap. Technically this creates a memory leak, since this + memory will not be reclaimed until the end of the program + even if the ufunc itself is removed. However this will only + be a problem if the user is repeatedly creating ufuncs with + no documentation, adding documentation via add_newdoc_ufunc, + and then throwing away the ufunc. + """) + +add_newdoc('numpy._core.multiarray', 'get_handler_name', + """ + get_handler_name(a: ndarray) -> str | None + + Return the name of the memory handler used by `a`. If not provided, return + the name of the memory handler that will be used to allocate data for the + next `ndarray` in this context. May return None if `a` does not own its + memory, in which case you can traverse ``a.base`` for a memory handler. + """) + +add_newdoc('numpy._core.multiarray', 'get_handler_version', + """ + get_handler_version(a: ndarray) -> int,None + + Return the version of the memory handler used by `a`. If not provided, + return the version of the memory handler that will be used to allocate data + for the next `ndarray` in this context. May return None if `a` does not own + its memory, in which case you can traverse ``a.base`` for a memory handler. + """) + +add_newdoc('numpy._core._multiarray_umath', '_array_converter', + """ + _array_converter(*array_likes) + + Helper to convert one or more objects to arrays. Integrates machinery + to deal with the ``result_type`` and ``__array_wrap__``. + + The reason for this is that e.g. ``result_type`` needs to convert to arrays + to find the ``dtype``. But converting to an array before calling + ``result_type`` would incorrectly "forget" whether it was a Python int, + float, or complex. + """) + +add_newdoc( + 'numpy._core._multiarray_umath', '_array_converter', ('scalar_input', + """ + A tuple which indicates for each input whether it was a scalar that + was coerced to a 0-D array (and was not already an array or something + converted via a protocol like ``__array__()``). + """)) + +add_newdoc('numpy._core._multiarray_umath', '_array_converter', ('as_arrays', + """ + as_arrays(/, subok=True, pyscalars="convert_if_no_array") + + Return the inputs as arrays or scalars. + + Parameters + ---------- + subok : True or False, optional + Whether array subclasses are preserved. + pyscalars : {"convert", "preserve", "convert_if_no_array"}, optional + To allow NEP 50 weak promotion later, it may be desirable to preserve + Python scalars. As default, these are preserved unless all inputs + are Python scalars. "convert" enforces an array return. + """)) + +add_newdoc('numpy._core._multiarray_umath', '_array_converter', ('result_type', + """result_type(/, extra_dtype=None, ensure_inexact=False) + + Find the ``result_type`` just as ``np.result_type`` would, but taking + into account that the original inputs (before converting to an array) may + have been Python scalars with weak promotion. + + Parameters + ---------- + extra_dtype : dtype instance or class + An additional DType or dtype instance to promote (e.g. could be used + to ensure the result precision is at least float32). + ensure_inexact : True or False + When ``True``, ensures a floating point (or complex) result replacing + the ``arr * 1.`` or ``result_type(..., 0.0)`` pattern. + """)) + +add_newdoc('numpy._core._multiarray_umath', '_array_converter', ('wrap', + """ + wrap(arr, /, to_scalar=None) + + Call ``__array_wrap__`` on ``arr`` if ``arr`` is not the same subclass + as the input the ``__array_wrap__`` method was retrieved from. + + Parameters + ---------- + arr : ndarray + The object to be wrapped. Normally an ndarray or subclass, + although for backward compatibility NumPy scalars are also accepted + (these will be converted to a NumPy array before being passed on to + the ``__array_wrap__`` method). + to_scalar : {True, False, None}, optional + When ``True`` will convert a 0-d array to a scalar via ``result[()]`` + (with a fast-path for non-subclasses). If ``False`` the result should + be an array-like (as ``__array_wrap__`` is free to return a non-array). + By default (``None``), a scalar is returned if all inputs were scalar. + """)) + + +add_newdoc('numpy._core.multiarray', '_get_madvise_hugepage', + """ + _get_madvise_hugepage() -> bool + + Get use of ``madvise (2)`` MADV_HUGEPAGE support when + allocating the array data. Returns the currently set value. + See `global_state` for more information. + """) + +add_newdoc('numpy._core.multiarray', '_set_madvise_hugepage', + """ + _set_madvise_hugepage(enabled: bool) -> bool + + Set or unset use of ``madvise (2)`` MADV_HUGEPAGE support when + allocating the array data. Returns the previously set value. + See `global_state` for more information. + """) + + +############################################################################## +# +# Documentation for ufunc attributes and methods +# +############################################################################## + + +############################################################################## +# +# ufunc object +# +############################################################################## + +add_newdoc('numpy._core', 'ufunc', + """ + Functions that operate element by element on whole arrays. + + To see the documentation for a specific ufunc, use `info`. For + example, ``np.info(np.sin)``. Because ufuncs are written in C + (for speed) and linked into Python with NumPy's ufunc facility, + Python's help() function finds this page whenever help() is called + on a ufunc. + + A detailed explanation of ufuncs can be found in the docs for :ref:`ufuncs`. + + **Calling ufuncs:** ``op(*x[, out], where=True, **kwargs)`` + + Apply `op` to the arguments `*x` elementwise, broadcasting the arguments. + + The broadcasting rules are: + + * Dimensions of length 1 may be prepended to either array. + * Arrays may be repeated along dimensions of length 1. + + Parameters + ---------- + *x : array_like + Input arrays. + out : ndarray, None, ..., or tuple of ndarray and None, optional + Location(s) into which the result(s) are stored. + If not provided or None, new array(s) are created by the ufunc. + If passed as a keyword argument, can be Ellipses (``out=...``) to + ensure an array is returned even if the result is 0-dimensional, + or a tuple with length equal to the number of outputs (where None + can be used for allocation by the ufunc). + + .. versionadded:: 2.3 + Support for ``out=...`` was added. + + where : array_like, optional + This condition is broadcast over the input. At locations where the + condition is True, the `out` array will be set to the ufunc result. + Elsewhere, the `out` array will retain its original value. + Note that if an uninitialized `out` array is created via the default + ``out=None``, locations within it where the condition is False will + remain uninitialized. + **kwargs + For other keyword-only arguments, see the :ref:`ufunc docs `. + + Returns + ------- + r : ndarray or tuple of ndarray + `r` will have the shape that the arrays in `x` broadcast to; if `out` is + provided, it will be returned. If not, `r` will be allocated and + may contain uninitialized values. If the function has more than one + output, then the result will be a tuple of arrays. + + """) + + +############################################################################## +# +# ufunc attributes +# +############################################################################## + +add_newdoc('numpy._core', 'ufunc', ('identity', + """ + The identity value. + + Data attribute containing the identity element for the ufunc, + if it has one. If it does not, the attribute value is None. + + Examples + -------- + >>> import numpy as np + >>> np.add.identity + 0 + >>> np.multiply.identity + 1 + >>> print(np.power.identity) + None + >>> print(np.exp.identity) + None + """)) + +add_newdoc('numpy._core', 'ufunc', ('nargs', + """ + The number of arguments. + + Data attribute containing the number of arguments the ufunc takes, including + optional ones. + + Notes + ----- + Typically this value will be one more than what you might expect + because all ufuncs take the optional "out" argument. + + Examples + -------- + >>> import numpy as np + >>> np.add.nargs + 3 + >>> np.multiply.nargs + 3 + >>> np.power.nargs + 3 + >>> np.exp.nargs + 2 + """)) + +add_newdoc('numpy._core', 'ufunc', ('nin', + """ + The number of inputs. + + Data attribute containing the number of arguments the ufunc treats as input. + + Examples + -------- + >>> import numpy as np + >>> np.add.nin + 2 + >>> np.multiply.nin + 2 + >>> np.power.nin + 2 + >>> np.exp.nin + 1 + """)) + +add_newdoc('numpy._core', 'ufunc', ('nout', + """ + The number of outputs. + + Data attribute containing the number of arguments the ufunc treats as output. + + Notes + ----- + Since all ufuncs can take output arguments, this will always be at least 1. + + Examples + -------- + >>> import numpy as np + >>> np.add.nout + 1 + >>> np.multiply.nout + 1 + >>> np.power.nout + 1 + >>> np.exp.nout + 1 + + """)) + +add_newdoc('numpy._core', 'ufunc', ('ntypes', + """ + The number of types. + + The number of numerical NumPy types - of which there are 18 total - on which + the ufunc can operate. + + See Also + -------- + numpy.ufunc.types + + Examples + -------- + >>> import numpy as np + >>> np.add.ntypes + 22 + >>> np.multiply.ntypes + 23 + >>> np.power.ntypes + 21 + >>> np.exp.ntypes + 10 + >>> np.remainder.ntypes + 16 + + """)) + +add_newdoc('numpy._core', 'ufunc', ('types', + """ + Returns a list with types grouped input->output. + + Data attribute listing the data-type "Domain-Range" groupings the ufunc can + deliver. The data-types are given using the character codes. + + See Also + -------- + numpy.ufunc.ntypes + + Examples + -------- + >>> import numpy as np + >>> np.add.types + ['??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', ... + + >>> np.power.types + ['bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', ... + + >>> np.exp.types + ['e->e', 'f->f', 'd->d', 'f->f', 'd->d', 'g->g', 'F->F', 'D->D', 'G->G', 'O->O'] + + >>> np.remainder.types + ['bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', ... + + """)) + +add_newdoc('numpy._core', 'ufunc', ('signature', + """ + Definition of the core elements a generalized ufunc operates on. + + The signature determines how the dimensions of each input/output array + are split into core and loop dimensions: + + 1. Each dimension in the signature is matched to a dimension of the + corresponding passed-in array, starting from the end of the shape tuple. + 2. Core dimensions assigned to the same label in the signature must have + exactly matching sizes, no broadcasting is performed. + 3. The core dimensions are removed from all inputs and the remaining + dimensions are broadcast together, defining the loop dimensions. + + Notes + ----- + Generalized ufuncs are used internally in many linalg functions, and in + the testing suite; the examples below are taken from these. + For ufuncs that operate on scalars, the signature is None, which is + equivalent to '()' for every argument. + + Examples + -------- + >>> import numpy as np + >>> np.linalg._umath_linalg.det.signature + '(m,m)->()' + >>> np.matmul.signature + '(n?,k),(k,m?)->(n?,m?)' + >>> np.add.signature is None + True # equivalent to '(),()->()' + """)) + +############################################################################## +# +# ufunc methods +# +############################################################################## + +add_newdoc('numpy._core', 'ufunc', ('reduce', + """ + reduce($self, array, /, axis=0, dtype=None, out=None, **kwargs) + -- + + reduce(array, axis=0, dtype=None, out=None, keepdims=False, initial=, where=True) + + Reduces `array`'s dimension by one, by applying ufunc along one axis. + + Let :math:`array.shape = (N_0, ..., N_i, ..., N_{M-1})`. Then + :math:`ufunc.reduce(array, axis=i)[k_0, ..,k_{i-1}, k_{i+1}, .., k_{M-1}]` = + the result of iterating `j` over :math:`range(N_i)`, cumulatively applying + ufunc to each :math:`array[k_0, ..,k_{i-1}, j, k_{i+1}, .., k_{M-1}]`. + For a one-dimensional array, reduce produces results equivalent to: + :: + + r = op.identity # op = ufunc + for i in range(len(A)): + r = op(r, A[i]) + return r + + For example, add.reduce() is equivalent to sum(). + + Parameters + ---------- + array : array_like + The array to act on. + axis : None or int or tuple of ints, optional + Axis or axes along which a reduction is performed. + The default (`axis` = 0) is perform a reduction over the first + dimension of the input array. `axis` may be negative, in + which case it counts from the last to the first axis. + + If this is None, a reduction is performed over all the axes. + If this is a tuple of ints, a reduction is performed on multiple + axes, instead of a single axis or all the axes as before. + + For operations which are either not commutative or not associative, + doing a reduction over multiple axes is not well-defined. The + ufuncs do not currently raise an exception in this case, but will + likely do so in the future. + dtype : data-type code, optional + The data type used to perform the operation. Defaults to that of + ``out`` if given, and the data type of ``array`` otherwise (though + upcast to conserve precision for some cases, such as + ``numpy.add.reduce`` for integer or boolean input). + out : ndarray, None, ..., or tuple of ndarray and None, optional + Location into which the result is stored. + If not provided or None, a freshly-allocated array is returned. + If passed as a keyword argument, can be Ellipses (``out=...``) to + ensure an array is returned even if the result is 0-dimensional + (which is useful especially for object dtype), or a 1-element tuple + (latter for consistency with ``ufunc.__call__``). + + .. versionadded:: 2.3 + Support for ``out=...`` was added. + + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `array`. + initial : scalar, optional + The value with which to start the reduction. + If the ufunc has no identity or the dtype is object, this defaults + to None - otherwise it defaults to ufunc.identity. + If ``None`` is given, the first element of the reduction is used, + and an error is thrown if the reduction is empty. + where : array_like of bool, optional + A boolean array which is broadcasted to match the dimensions + of `array`, and selects elements to include in the reduction. Note + that for ufuncs like ``minimum`` that do not have an identity + defined, one has to pass in also ``initial``. + + Returns + ------- + r : ndarray + The reduced array. If `out` was supplied, `r` is a reference to it. + + Examples + -------- + >>> import numpy as np + >>> np.multiply.reduce([2,3,5]) + 30 + + A multi-dimensional array example: + + >>> X = np.arange(8).reshape((2,2,2)) + >>> X + array([[[0, 1], + [2, 3]], + [[4, 5], + [6, 7]]]) + >>> np.add.reduce(X, 0) + array([[ 4, 6], + [ 8, 10]]) + >>> np.add.reduce(X) # confirm: default axis value is 0 + array([[ 4, 6], + [ 8, 10]]) + >>> np.add.reduce(X, 1) + array([[ 2, 4], + [10, 12]]) + >>> np.add.reduce(X, 2) + array([[ 1, 5], + [ 9, 13]]) + + You can use the ``initial`` keyword argument to initialize the reduction + with a different value, and ``where`` to select specific elements to include: + + >>> np.add.reduce([10], initial=5) + 15 + >>> np.add.reduce(np.ones((2, 2, 2)), axis=(0, 2), initial=10) + array([14., 14.]) + >>> a = np.array([10., np.nan, 10]) + >>> np.add.reduce(a, where=~np.isnan(a)) + 20.0 + + Allows reductions of empty arrays where they would normally fail, i.e. + for ufuncs without an identity. + + >>> np.minimum.reduce([], initial=np.inf) + inf + >>> np.minimum.reduce([[1., 2.], [3., 4.]], initial=10., where=[True, False]) + array([ 1., 10.]) + >>> np.minimum.reduce([]) + Traceback (most recent call last): + ... + ValueError: zero-size array to reduction operation minimum which has no identity + """)) + +add_newdoc('numpy._core', 'ufunc', ('accumulate', + """ + accumulate($self, array, /, axis=0, dtype=None, out=None) + -- + + accumulate(array, axis=0, dtype=None, out=None) + + Accumulate the result of applying the operator to all elements. + + For a one-dimensional array, accumulate produces results equivalent to:: + + r = np.empty(len(A)) + t = op.identity # op = the ufunc being applied to A's elements + for i in range(len(A)): + t = op(t, A[i]) + r[i] = t + return r + + For example, add.accumulate() is equivalent to np.cumsum(). + + For a multi-dimensional array, accumulate is applied along only one + axis (axis zero by default; see Examples below) so repeated use is + necessary if one wants to accumulate over multiple axes. + + Parameters + ---------- + array : array_like + The array to act on. + axis : int, optional + The axis along which to apply the accumulation; default is zero. + dtype : data-type code, optional + The data-type used to represent the intermediate results. Defaults + to the data-type of the output array if such is provided, or the + data-type of the input array if no output array is provided. + out : ndarray, None, or tuple of ndarray and None, optional + Location into which the result is stored. + If not provided or None, a freshly-allocated array is returned. + For consistency with ``ufunc.__call__``, if passed as a keyword + argument, can be Ellipses (``out=...``, which has the same effect + as None as an array is always returned), or a 1-element tuple. + + Returns + ------- + r : ndarray + The accumulated values. If `out` was supplied, `r` is a reference to + `out`. + + Examples + -------- + 1-D array examples: + + >>> import numpy as np + >>> np.add.accumulate([2, 3, 5]) + array([ 2, 5, 10]) + >>> np.multiply.accumulate([2, 3, 5]) + array([ 2, 6, 30]) + + 2-D array examples: + + >>> I = np.eye(2) + >>> I + array([[1., 0.], + [0., 1.]]) + + Accumulate along axis 0 (rows), down columns: + + >>> np.add.accumulate(I, 0) + array([[1., 0.], + [1., 1.]]) + >>> np.add.accumulate(I) # no axis specified = axis zero + array([[1., 0.], + [1., 1.]]) + + Accumulate along axis 1 (columns), through rows: + + >>> np.add.accumulate(I, 1) + array([[1., 1.], + [0., 1.]]) + + """)) + +add_newdoc('numpy._core', 'ufunc', ('reduceat', + """ + reduceat($self, array, /, indices, axis=0, dtype=None, out=None) + -- + + reduceat(array, indices, axis=0, dtype=None, out=None) + + Performs a (local) reduce with specified slices over a single axis. + + For i in ``range(len(indices))``, `reduceat` computes + ``ufunc.reduce(array[indices[i]:indices[i+1]])``, which becomes the i-th + generalized "row" parallel to `axis` in the final result (i.e., in a + 2-D array, for example, if `axis = 0`, it becomes the i-th row, but if + `axis = 1`, it becomes the i-th column). There are three exceptions to this: + + * when ``i = len(indices) - 1`` (so for the last index), + ``indices[i+1] = array.shape[axis]``. + * if ``indices[i] >= indices[i + 1]``, the i-th generalized "row" is + simply ``array[indices[i]]``. + * if ``indices[i] >= len(array)`` or ``indices[i] < 0``, an error is raised. + + The shape of the output depends on the size of `indices`, and may be + larger than `array` (this happens if ``len(indices) > array.shape[axis]``). + + Parameters + ---------- + array : array_like + The array to act on. + indices : array_like + Paired indices, comma separated (not colon), specifying slices to + reduce. + axis : int, optional + The axis along which to apply the reduceat. + dtype : data-type code, optional + The data type used to perform the operation. Defaults to that of + ``out`` if given, and the data type of ``array`` otherwise (though + upcast to conserve precision for some cases, such as + ``numpy.add.reduce`` for integer or boolean input). + out : ndarray, None, or tuple of ndarray and None, optional + Location into which the result is stored. + If not provided or None, a freshly-allocated array is returned. + For consistency with ``ufunc.__call__``, if passed as a keyword + argument, can be Ellipses (``out=...``, which has the same effect + as None as an array is always returned), or a 1-element tuple. + + Returns + ------- + r : ndarray + The reduced values. If `out` was supplied, `r` is a reference to + `out`. + + Notes + ----- + A descriptive example: + + If `array` is 1-D, the function `ufunc.accumulate(array)` is the same as + ``ufunc.reduceat(array, indices)[::2]`` where `indices` is + ``range(len(array) - 1)`` with a zero placed + in every other element: + ``indices = zeros(2 * len(array) - 1)``, + ``indices[1::2] = range(1, len(array))``. + + Don't be fooled by this attribute's name: `reduceat(array)` is not + necessarily smaller than `array`. + + Examples + -------- + To take the running sum of four successive values: + + >>> import numpy as np + >>> np.add.reduceat(np.arange(8),[0,4, 1,5, 2,6, 3,7])[::2] + array([ 6, 10, 14, 18]) + + A 2-D example: + + >>> x = np.linspace(0, 15, 16).reshape(4,4) + >>> x + array([[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.], + [12., 13., 14., 15.]]) + + :: + + # reduce such that the result has the following five rows: + # [row1 + row2 + row3] + # [row4] + # [row2] + # [row3] + # [row1 + row2 + row3 + row4] + + >>> np.add.reduceat(x, [0, 3, 1, 2, 0]) + array([[12., 15., 18., 21.], + [12., 13., 14., 15.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.], + [24., 28., 32., 36.]]) + + :: + + # reduce such that result has the following two columns: + # [col1 * col2 * col3, col4] + + >>> np.multiply.reduceat(x, [0, 3], 1) + array([[ 0., 3.], + [ 120., 7.], + [ 720., 11.], + [2184., 15.]]) + + """)) + +add_newdoc('numpy._core', 'ufunc', ('outer', + r""" + outer($self, A, B, /, **kwargs) + -- + + outer(A, B, /, **kwargs) + + Apply the ufunc `op` to all pairs (a, b) with a in `A` and b in `B`. + + Let ``M = A.ndim``, ``N = B.ndim``. Then the result, `C`, of + ``op.outer(A, B)`` is an array of dimension M + N such that: + + .. math:: C[i_0, ..., i_{M-1}, j_0, ..., j_{N-1}] = + op(A[i_0, ..., i_{M-1}], B[j_0, ..., j_{N-1}]) + + For `A` and `B` one-dimensional, this is equivalent to:: + + r = empty(len(A),len(B)) + for i in range(len(A)): + for j in range(len(B)): + r[i,j] = op(A[i], B[j]) # op = ufunc in question + + Parameters + ---------- + A : array_like + First array + B : array_like + Second array + kwargs : any + Arguments to pass on to the ufunc. Typically `dtype` or `out`. + See `ufunc` for a comprehensive overview of all available arguments. + + Returns + ------- + r : ndarray + Output array + + See Also + -------- + numpy.outer : A less powerful version of ``np.multiply.outer`` + that `ravel`\ s all inputs to 1D. This exists + primarily for compatibility with old code. + + tensordot : ``np.tensordot(a, b, axes=((), ()))`` and + ``np.multiply.outer(a, b)`` behave same for all + dimensions of a and b. + + Examples + -------- + >>> np.multiply.outer([1, 2, 3], [4, 5, 6]) + array([[ 4, 5, 6], + [ 8, 10, 12], + [12, 15, 18]]) + + A multi-dimensional example: + + >>> A = np.array([[1, 2, 3], [4, 5, 6]]) + >>> A.shape + (2, 3) + >>> B = np.array([[1, 2, 3, 4]]) + >>> B.shape + (1, 4) + >>> C = np.multiply.outer(A, B) + >>> C.shape; C + (2, 3, 1, 4) + array([[[[ 1, 2, 3, 4]], + [[ 2, 4, 6, 8]], + [[ 3, 6, 9, 12]]], + [[[ 4, 8, 12, 16]], + [[ 5, 10, 15, 20]], + [[ 6, 12, 18, 24]]]]) + + """)) + +add_newdoc('numpy._core', 'ufunc', ('at', + """ + at($self, a, indices, b=None, /) + -- + + at(a, indices, b=None, /) + + Performs unbuffered in place operation on operand 'a' for elements + specified by 'indices'. For addition ufunc, this method is equivalent to + ``a[indices] += b``, except that results are accumulated for elements that + are indexed more than once. For example, ``a[[0,0]] += 1`` will only + increment the first element once because of buffering, whereas + ``add.at(a, [0,0], 1)`` will increment the first element twice. + + Parameters + ---------- + a : array_like + The array to perform in place operation on. + indices : array_like or tuple + Array like index object or slice object for indexing into first + operand. If first operand has multiple dimensions, indices can be a + tuple of array like index objects or slice objects. + b : array_like + Second operand for ufuncs requiring two operands. Operand must be + broadcastable over first operand after indexing or slicing. + + Examples + -------- + Set items 0 and 1 to their negative values: + + >>> import numpy as np + >>> a = np.array([1, 2, 3, 4]) + >>> np.negative.at(a, [0, 1]) + >>> a + array([-1, -2, 3, 4]) + + Increment items 0 and 1, and increment item 2 twice: + + >>> a = np.array([1, 2, 3, 4]) + >>> np.add.at(a, [0, 1, 2, 2], 1) + >>> a + array([2, 3, 5, 4]) + + Add items 0 and 1 in first array to second array, + and store results in first array: + + >>> a = np.array([1, 2, 3, 4]) + >>> b = np.array([1, 2]) + >>> np.add.at(a, [0, 1], b) + >>> a + array([2, 4, 3, 4]) + + """)) + +add_newdoc('numpy._core', 'ufunc', ('resolve_dtypes', + """ + resolve_dtypes($self, dtypes, *, signature=None, casting=None, reduction=False) + -- + + resolve_dtypes(dtypes, *, signature=None, casting=None, reduction=False) + + Find the dtypes NumPy will use for the operation. Both input and + output dtypes are returned and may differ from those provided. + + .. note:: + + This function always applies NEP 50 rules since it is not provided + any actual values. The Python types ``int``, ``float``, and + ``complex`` thus behave weak and should be passed for "untyped" + Python input. + + Parameters + ---------- + dtypes : tuple of dtypes, None, or literal int, float, complex + The input dtypes for each operand. Output operands can be + None, indicating that the dtype must be found. + signature : tuple of DTypes or None, optional + If given, enforces exact DType (classes) of the specific operand. + The ufunc ``dtype`` argument is equivalent to passing a tuple with + only output dtypes set. + casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + The casting mode when casting is necessary. This is identical to + the ufunc call casting modes. + reduction : boolean + If given, the resolution assumes a reduce operation is happening + which slightly changes the promotion and type resolution rules. + `dtypes` is usually something like ``(None, np.dtype("i2"), None)`` + for reductions (first input is also the output). + + .. note:: + + The default casting mode is "same_kind", however, as of + NumPy 1.24, NumPy uses "unsafe" for reductions. + + Returns + ------- + dtypes : tuple of dtypes + The dtypes which NumPy would use for the calculation. Note that + dtypes may not match the passed in ones (casting is necessary). + + + Examples + -------- + This API requires passing dtypes, define them for convenience: + + >>> import numpy as np + >>> int32 = np.dtype("int32") + >>> float32 = np.dtype("float32") + + The typical ufunc call does not pass an output dtype. `numpy.add` has two + inputs and one output, so leave the output as ``None`` (not provided): + + >>> np.add.resolve_dtypes((int32, float32, None)) + (dtype('float64'), dtype('float64'), dtype('float64')) + + The loop found uses "float64" for all operands (including the output), the + first input would be cast. + + ``resolve_dtypes`` supports "weak" handling for Python scalars by passing + ``int``, ``float``, or ``complex``: + + >>> np.add.resolve_dtypes((float32, float, None)) + (dtype('float32'), dtype('float32'), dtype('float32')) + + Where the Python ``float`` behaves similar to a Python value ``0.0`` + in a ufunc call. (See :ref:`NEP 50 ` for details.) + + """)) + +add_newdoc('numpy._core', 'ufunc', ('_resolve_dtypes_and_context', + """ + _resolve_dtypes_and_context($self, dtypes, *, signature=None, casting=None, reduction=False) + -- + + _resolve_dtypes_and_context(dtypes, *, signature=None, casting=None, reduction=False) + + See `numpy.ufunc.resolve_dtypes` for parameter information. This + function is considered *unstable*. You may use it, but the returned + information is NumPy version specific and expected to change. + Large API/ABI changes are not expected, but a new NumPy version is + expected to require updating code using this functionality. + + This function is designed to be used in conjunction with + `numpy.ufunc._get_strided_loop`. The calls are split to mirror the C API + and allow future improvements. + + Returns + ------- + dtypes : tuple of dtypes + call_info : + PyCapsule with all necessary information to get access to low level + C calls. See `numpy.ufunc._get_strided_loop` for more information. + + """)) + +add_newdoc('numpy._core', 'ufunc', ('_get_strided_loop', + """ + _get_strided_loop($self, call_info, /, *, fixed_strides=None) + -- + + _get_strided_loop(call_info, /, *, fixed_strides=None) + + This function fills in the ``call_info`` capsule to include all + information necessary to call the low-level strided loop from NumPy. + + See notes for more information. + + Parameters + ---------- + call_info : PyCapsule + The PyCapsule returned by `numpy.ufunc._resolve_dtypes_and_context`. + fixed_strides : tuple of int or None, optional + A tuple with fixed byte strides of all input arrays. NumPy may use + this information to find specialized loops, so any call must follow + the given stride. Use ``None`` to indicate that the stride is not + known (or not fixed) for all calls. + + Notes + ----- + Together with `numpy.ufunc._resolve_dtypes_and_context` this function + gives low-level access to the NumPy ufunc loops. + The first function does general preparation and returns the required + information. It returns this as a C capsule with the version specific + name ``numpy_1.24_ufunc_call_info``. + The NumPy 1.24 ufunc call info capsule has the following layout:: + + typedef struct { + PyArrayMethod_StridedLoop *strided_loop; + PyArrayMethod_Context *context; + NpyAuxData *auxdata; + + /* Flag information (expected to change) */ + npy_bool requires_pyapi; /* GIL is required by loop */ + + /* Loop doesn't set FPE flags; if not set check FPE flags */ + npy_bool no_floatingpoint_errors; + } ufunc_call_info; + + Note that the first call only fills in the ``context``. The call to + ``_get_strided_loop`` fills in all other data. The main thing to note is + that the new-style loops return 0 on success, -1 on failure. They are + passed context as new first input and ``auxdata`` as (replaced) last. + + Only the ``strided_loop``signature is considered guaranteed stable + for NumPy bug-fix releases. All other API is tied to the experimental + API versioning. + + The reason for the split call is that cast information is required to + decide what the fixed-strides will be. + + NumPy ties the lifetime of the ``auxdata`` information to the capsule. + + """)) + + +############################################################################## +# +# Documentation for dtype attributes and methods +# +############################################################################## + +############################################################################## +# +# dtype object +# +############################################################################## + +add_newdoc('numpy._core.multiarray', 'dtype', + """ + dtype(dtype, align=False, copy=False, **kwargs) + -- + + dtype(dtype, align=False, copy=False, [metadata]) + + Create a data type object. + + A numpy array is homogeneous, and contains elements described by a + dtype object. A dtype object can be constructed from different + combinations of fundamental numeric types. + + Parameters + ---------- + dtype + Object to be converted to a data type object. + align : bool, optional + Add padding to the fields to match what a C compiler would output + for a similar C-struct. Can be ``True`` only if `obj` is a dictionary + or a comma-separated string. If a struct dtype is being created, + this also sets a sticky alignment flag ``isalignedstruct``. + copy : bool, optional + Make a new copy of the data-type object. If ``False``, the result + may just be a reference to a built-in data-type object. + metadata : dict, optional + An optional dictionary with dtype metadata. + + See also + -------- + result_type + + Examples + -------- + Using array-scalar type: + + >>> import numpy as np + >>> np.dtype(np.int16) + dtype('int16') + + Structured type, one field name 'f1', containing int16: + + >>> np.dtype([('f1', np.int16)]) + dtype([('f1', '>> np.dtype([('f1', [('f1', np.int16)])]) + dtype([('f1', [('f1', '>> np.dtype([('f1', np.uint64), ('f2', np.int32)]) + dtype([('f1', '>> np.dtype([('a','f8'),('b','S10')]) + dtype([('a', '>> np.dtype("i4, (2,3)f8") + dtype([('f0', '>> np.dtype([('hello',(np.int64,3)),('world',np.void,10)]) + dtype([('hello', '>> np.dtype((np.int16, {'x':(np.int8,0), 'y':(np.int8,1)})) + dtype((numpy.int16, [('x', 'i1'), ('y', 'i1')])) + + Using dictionaries. Two fields named 'gender' and 'age': + + >>> np.dtype({'names':['gender','age'], 'formats':['S1',np.uint8]}) + dtype([('gender', 'S1'), ('age', 'u1')]) + + Offsets in bytes, here 0 and 25: + + >>> np.dtype({'surname':('S25',0),'age':(np.uint8,25)}) + dtype([('surname', 'S25'), ('age', 'u1')]) + + """) + +############################################################################## +# +# dtype attributes +# +############################################################################## + +add_newdoc('numpy._core.multiarray', 'dtype', ('alignment', + """ + The required alignment (bytes) of this data-type according to the compiler. + + More information is available in the C-API section of the manual. + + Examples + -------- + + >>> import numpy as np + >>> x = np.dtype('i4') + >>> x.alignment + 4 + + >>> x = np.dtype(float) + >>> x.alignment + 8 + + """)) + +add_newdoc('numpy._core.multiarray', 'dtype', ('byteorder', + """ + A character indicating the byte-order of this data-type object. + + One of: + + === ============== + '=' native + '<' little-endian + '>' big-endian + '|' not applicable + === ============== + + All built-in data-type objects have byteorder either '=' or '|'. + + Examples + -------- + + >>> import numpy as np + >>> dt = np.dtype('i2') + >>> dt.byteorder + '=' + >>> # endian is not relevant for 8 bit numbers + >>> np.dtype('i1').byteorder + '|' + >>> # or ASCII strings + >>> np.dtype('S2').byteorder + '|' + >>> # Even if specific code is given, and it is native + >>> # '=' is the byteorder + >>> import sys + >>> sys_is_le = sys.byteorder == 'little' + >>> native_code = '<' if sys_is_le else '>' + >>> swapped_code = '>' if sys_is_le else '<' + >>> dt = np.dtype(native_code + 'i2') + >>> dt.byteorder + '=' + >>> # Swapped code shows up as itself + >>> dt = np.dtype(swapped_code + 'i2') + >>> dt.byteorder == swapped_code + True + + """)) + +add_newdoc('numpy._core.multiarray', 'dtype', ('char', + """A unique character code for each of the 21 different built-in types. + + Examples + -------- + + >>> import numpy as np + >>> x = np.dtype(float) + >>> x.char + 'd' + + """)) + +add_newdoc('numpy._core.multiarray', 'dtype', ('descr', + """ + `__array_interface__` description of the data-type. + + The format is that required by the 'descr' key in the + `__array_interface__` attribute. + + Warning: This attribute exists specifically for `__array_interface__`, + and passing it directly to `numpy.dtype` will not accurately reconstruct + some dtypes (e.g., scalar and subarray dtypes). + + Examples + -------- + + >>> import numpy as np + >>> x = np.dtype(float) + >>> x.descr + [('', '>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) + >>> dt.descr + [('name', '>> import numpy as np + >>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) + >>> print(dt.fields) + {'name': (dtype('>> import numpy as np + >>> x = np.dtype([('a', np.int32, 8), ('b', np.float64, 6)]) + >>> x.flags + 16 + >>> np._core.multiarray.NEEDS_PYAPI + 16 + + """)) + +add_newdoc('numpy._core.multiarray', 'dtype', ('hasobject', + """ + Boolean indicating whether this dtype contains any reference-counted + objects in any fields or sub-dtypes. + + Recall that what is actually in the ndarray memory representing + the Python object is the memory address of that object (a pointer). + Special handling may be required, and this attribute is useful for + distinguishing data types that may contain arbitrary Python objects + and data-types that won't. + + """)) + +add_newdoc('numpy._core.multiarray', 'dtype', ('isbuiltin', + """ + Integer indicating how this dtype relates to the built-in dtypes. + + Read-only. + + = ======================================================================== + 0 if this is a structured array type, with fields + 1 if this is a dtype compiled into numpy (such as ints, floats etc) + 2 if the dtype is for a user-defined numpy type + A user-defined type uses the numpy C-API machinery to extend + numpy to handle a new array type. See + :ref:`user.user-defined-data-types` in the NumPy manual. + = ======================================================================== + + Examples + -------- + + >>> import numpy as np + >>> dt = np.dtype('i2') + >>> dt.isbuiltin + 1 + >>> dt = np.dtype('f8') + >>> dt.isbuiltin + 1 + >>> dt = np.dtype([('field1', 'f8')]) + >>> dt.isbuiltin + 0 + + """)) + +add_newdoc('numpy._core.multiarray', 'dtype', ('isnative', + """ + Boolean indicating whether the byte order of this dtype is native + to the platform. + + """)) + +add_newdoc('numpy._core.multiarray', 'dtype', ('isalignedstruct', + """ + Boolean indicating whether the dtype is a struct which maintains + field alignment. This flag is sticky, so when combining multiple + structs together, it is preserved and produces new dtypes which + are also aligned. + + """)) + +add_newdoc('numpy._core.multiarray', 'dtype', ('itemsize', + """ + The element size of this data-type object. + + For 18 of the 21 types this number is fixed by the data-type. + For the flexible data-types, this number can be anything. + + Examples + -------- + + >>> import numpy as np + >>> arr = np.array([[1, 2], [3, 4]]) + >>> arr.dtype + dtype('int64') + >>> arr.itemsize + 8 + + >>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) + >>> dt.itemsize + 80 + + """)) + +add_newdoc('numpy._core.multiarray', 'dtype', ('kind', + """ + A character code (one of 'biufcmMOSTUV') identifying the general kind of data. + + = ====================== + b boolean + i signed integer + u unsigned integer + f floating-point + c complex floating-point + m timedelta + M datetime + O object + S (byte-)string + T string (StringDType) + U Unicode + V void + = ====================== + + Examples + -------- + + >>> import numpy as np + >>> dt = np.dtype('i4') + >>> dt.kind + 'i' + >>> dt = np.dtype('f8') + >>> dt.kind + 'f' + >>> dt = np.dtype([('field1', 'f8')]) + >>> dt.kind + 'V' + + """)) + +add_newdoc('numpy._core.multiarray', 'dtype', ('metadata', + """ + Either ``None`` or a readonly dictionary of metadata (mappingproxy). + + The metadata field can be set using any dictionary at data-type + creation. NumPy currently has no uniform approach to propagating + metadata; although some array operations preserve it, there is no + guarantee that others will. + + .. warning:: + + Although used in certain projects, this feature was long undocumented + and is not well supported. Some aspects of metadata propagation + are expected to change in the future. + + Examples + -------- + + >>> import numpy as np + >>> dt = np.dtype(float, metadata={"key": "value"}) + >>> dt.metadata["key"] + 'value' + >>> arr = np.array([1, 2, 3], dtype=dt) + >>> arr.dtype.metadata + mappingproxy({'key': 'value'}) + + Adding arrays with identical datatypes currently preserves the metadata: + + >>> (arr + arr).dtype.metadata + mappingproxy({'key': 'value'}) + + If the arrays have different dtype metadata, the first one wins: + + >>> dt2 = np.dtype(float, metadata={"key2": "value2"}) + >>> arr2 = np.array([3, 2, 1], dtype=dt2) + >>> print((arr + arr2).dtype.metadata) + {'key': 'value'} + """)) + +add_newdoc('numpy._core.multiarray', 'dtype', ('name', + """ + A bit-width name for this data-type. + + Un-sized flexible data-type objects do not have this attribute. + + Examples + -------- + + >>> import numpy as np + >>> x = np.dtype(float) + >>> x.name + 'float64' + >>> x = np.dtype([('a', np.int32, 8), ('b', np.float64, 6)]) + >>> x.name + 'void640' + + """)) + +add_newdoc('numpy._core.multiarray', 'dtype', ('names', + """ + Ordered list of field names, or ``None`` if there are no fields. + + The names are ordered according to increasing byte offset. This can be + used, for example, to walk through all of the named fields in offset order. + + Examples + -------- + >>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) + >>> dt.names + ('name', 'grades') + + """)) + +add_newdoc('numpy._core.multiarray', 'dtype', ('num', + """ + A unique number for each of the 21 different built-in types. + + These are roughly ordered from least-to-most precision. + + Examples + -------- + + >>> import numpy as np + >>> dt = np.dtype(str) + >>> dt.num + 19 + + >>> dt = np.dtype(float) + >>> dt.num + 12 + + """)) + +add_newdoc('numpy._core.multiarray', 'dtype', ('shape', + """ + Shape tuple of the sub-array if this data type describes a sub-array, + and ``()`` otherwise. + + Examples + -------- + + >>> import numpy as np + >>> dt = np.dtype(('i4', 4)) + >>> dt.shape + (4,) + + >>> dt = np.dtype(('i4', (2, 3))) + >>> dt.shape + (2, 3) + + """)) + +add_newdoc('numpy._core.multiarray', 'dtype', ('ndim', + """ + Number of dimensions of the sub-array if this data type describes a + sub-array, and ``0`` otherwise. + + Examples + -------- + >>> import numpy as np + >>> x = np.dtype(float) + >>> x.ndim + 0 + + >>> x = np.dtype((float, 8)) + >>> x.ndim + 1 + + >>> x = np.dtype(('i4', (3, 4))) + >>> x.ndim + 2 + + """)) + +add_newdoc('numpy._core.multiarray', 'dtype', ('str', + """The array-protocol typestring of this data-type object.""")) + +add_newdoc('numpy._core.multiarray', 'dtype', ('subdtype', + """ + Tuple ``(item_dtype, shape)`` if this `dtype` describes a sub-array, and + None otherwise. + + The *shape* is the fixed shape of the sub-array described by this + data type, and *item_dtype* the data type of the array. + + If a field whose dtype object has this attribute is retrieved, + then the extra dimensions implied by *shape* are tacked on to + the end of the retrieved array. + + See Also + -------- + dtype.base + + Examples + -------- + >>> import numpy as np + >>> x = np.dtype('8f') + >>> x.subdtype + (dtype('float32'), (8,)) + + >>> x = np.dtype('i2') + >>> x.subdtype + >>> + + """)) + +add_newdoc('numpy._core.multiarray', 'dtype', ('base', + """ + Returns dtype for the base element of the subarrays, + regardless of their dimension or shape. + + See Also + -------- + dtype.subdtype + + Examples + -------- + >>> import numpy as np + >>> x = np.dtype('8f') + >>> x.base + dtype('float32') + + >>> x = np.dtype('i2') + >>> x.base + dtype('int16') + + """)) + +add_newdoc('numpy._core.multiarray', 'dtype', ('type', + """The type object used to instantiate a scalar of this data-type.""")) + +############################################################################## +# +# dtype methods +# +############################################################################## + +add_newdoc('numpy._core.multiarray', 'dtype', ('newbyteorder', + """ + newbyteorder($self, new_order='S', /) + -- + + newbyteorder(new_order='S', /) + + Return a new dtype with a different byte order. + + Changes are also made in all fields and sub-arrays of the data type. + + Parameters + ---------- + new_order : string, optional + Byte order to force; a value from the byte order specifications + below. The default value ('S') results in swapping the current + byte order. `new_order` codes can be any of: + + * 'S' - swap dtype from current to opposite endian + * {'<', 'little'} - little endian + * {'>', 'big'} - big endian + * {'=', 'native'} - native order + * {'|', 'I'} - ignore (no change to byte order) + + Returns + ------- + new_dtype : dtype + New dtype object with the given change to the byte order. + + Notes + ----- + Changes are also made in all fields and sub-arrays of the data type. + + Examples + -------- + >>> import sys + >>> sys_is_le = sys.byteorder == 'little' + >>> native_code = '<' if sys_is_le else '>' + >>> swapped_code = '>' if sys_is_le else '<' + >>> import numpy as np + >>> native_dt = np.dtype(native_code+'i2') + >>> swapped_dt = np.dtype(swapped_code+'i2') + >>> native_dt.newbyteorder('S') == swapped_dt + True + >>> native_dt.newbyteorder() == swapped_dt + True + >>> native_dt == swapped_dt.newbyteorder('S') + True + >>> native_dt == swapped_dt.newbyteorder('=') + True + >>> native_dt == swapped_dt.newbyteorder('N') + True + >>> native_dt == native_dt.newbyteorder('|') + True + >>> np.dtype('>> np.dtype('>> np.dtype('>i2') == native_dt.newbyteorder('>') + True + >>> np.dtype('>i2') == native_dt.newbyteorder('B') + True + + """)) + +add_newdoc('numpy._core.multiarray', 'dtype', ('__class_getitem__', + """ + __class_getitem__(item, /) + + Return a parametrized wrapper around the `~numpy.dtype` type. + + .. versionadded:: 1.22 + + Returns + ------- + alias : types.GenericAlias + A parametrized `~numpy.dtype` type. + + Examples + -------- + >>> import numpy as np + + >>> np.dtype[np.int64] + numpy.dtype[numpy.int64] + + See Also + -------- + :pep:`585` : Type hinting generics in standard collections. + + """)) + +add_newdoc('numpy._core.multiarray', 'dtype', ('__ge__', + """ + __ge__(value, /) + + Return ``self >= value``. + + Equivalent to ``np.can_cast(value, self, casting="safe")``. + + See Also + -------- + can_cast : Returns True if cast between data types can occur according to + the casting rule. + + """)) + +add_newdoc('numpy._core.multiarray', 'dtype', ('__le__', + """ + __le__(value, /) + + Return ``self <= value``. + + Equivalent to ``np.can_cast(self, value, casting="safe")``. + + See Also + -------- + can_cast : Returns True if cast between data types can occur according to + the casting rule. + + """)) + +add_newdoc('numpy._core.multiarray', 'dtype', ('__gt__', + """ + __gt__(value, /) + + Return ``self > value``. + + Equivalent to + ``self != value and np.can_cast(value, self, casting="safe")``. + + See Also + -------- + can_cast : Returns True if cast between data types can occur according to + the casting rule. + + """)) + +add_newdoc('numpy._core.multiarray', 'dtype', ('__lt__', + """ + __lt__(value, /) + + Return ``self < value``. + + Equivalent to + ``self != value and np.can_cast(self, value, casting="safe")``. + + See Also + -------- + can_cast : Returns True if cast between data types can occur according to + the casting rule. + + """)) + +############################################################################## +# +# Datetime-related Methods +# +############################################################################## + +add_newdoc('numpy._core.multiarray', 'busdaycalendar', + """ + busdaycalendar(weekmask='1111100', holidays=None) + -- + + busdaycalendar(weekmask='1111100', holidays=None) + + A business day calendar object that efficiently stores information + defining valid days for the busday family of functions. + + The default valid days are Monday through Friday ("business days"). + A busdaycalendar object can be specified with any set of weekly + valid days, plus an optional "holiday" dates that always will be invalid. + + Once a busdaycalendar object is created, the weekmask and holidays + cannot be modified. + + Parameters + ---------- + weekmask : str or array_like of bool, optional + A seven-element array indicating which of Monday through Sunday are + valid days. May be specified as a length-seven list or array, like + [1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string + like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for + weekdays, optionally separated by white space. Valid abbreviations + are: Mon Tue Wed Thu Fri Sat Sun + holidays : array_like of datetime64[D], optional + An array of dates to consider as invalid dates, no matter which + weekday they fall upon. Holiday dates may be specified in any + order, and NaT (not-a-time) dates are ignored. This list is + saved in a normalized form that is suited for fast calculations + of valid days. + + Returns + ------- + out : busdaycalendar + A business day calendar object containing the specified + weekmask and holidays values. + + See Also + -------- + is_busday : Returns a boolean array indicating valid days. + busday_offset : Applies an offset counted in valid days. + busday_count : Counts how many valid days are in a half-open date range. + + Attributes + ---------- + weekmask : (copy) seven-element array of bool + holidays : (copy) sorted array of datetime64[D] + + Notes + ----- + Once a busdaycalendar object is created, you cannot modify the + weekmask or holidays. The attributes return copies of internal data. + + Examples + -------- + >>> import numpy as np + >>> # Some important days in July + ... bdd = np.busdaycalendar( + ... holidays=['2011-07-01', '2011-07-04', '2011-07-17']) + >>> # Default is Monday to Friday weekdays + ... bdd.weekmask + array([ True, True, True, True, True, False, False]) + >>> # Any holidays already on the weekend are removed + ... bdd.holidays + array(['2011-07-01', '2011-07-04'], dtype='datetime64[D]') + """) + +add_newdoc('numpy._core.multiarray', 'busdaycalendar', ('weekmask', + """A copy of the seven-element boolean mask indicating valid days.""")) + +add_newdoc('numpy._core.multiarray', 'busdaycalendar', ('holidays', + """A copy of the holiday array indicating additional invalid days.""")) + +add_newdoc('numpy._core.multiarray', 'normalize_axis_index', + """ + normalize_axis_index(axis, ndim, msg_prefix=None) + + Normalizes an axis index, `axis`, such that is a valid positive index into + the shape of array with `ndim` dimensions. Raises an AxisError with an + appropriate message if this is not possible. + + Used internally by all axis-checking logic. + + Parameters + ---------- + axis : int + The un-normalized index of the axis. Can be negative + ndim : int + The number of dimensions of the array that `axis` should be normalized + against + msg_prefix : str + A prefix to put before the message, typically the name of the argument + + Returns + ------- + normalized_axis : int + The normalized axis index, such that `0 <= normalized_axis < ndim` + + Raises + ------ + AxisError + If the axis index is invalid, when `-ndim <= axis < ndim` is false. + + Examples + -------- + >>> import numpy as np + >>> from numpy.lib.array_utils import normalize_axis_index + >>> normalize_axis_index(0, ndim=3) + 0 + >>> normalize_axis_index(1, ndim=3) + 1 + >>> normalize_axis_index(-1, ndim=3) + 2 + + >>> normalize_axis_index(3, ndim=3) + Traceback (most recent call last): + ... + numpy.exceptions.AxisError: axis 3 is out of bounds for array ... + >>> normalize_axis_index(-4, ndim=3, msg_prefix='axes_arg') + Traceback (most recent call last): + ... + numpy.exceptions.AxisError: axes_arg: axis -4 is out of bounds ... + """) + +add_newdoc('numpy._core.multiarray', 'datetime_data', + """ + datetime_data(dtype, /) + -- + + datetime_data(dtype, /) + + Get information about the step size of a date or time type. + + The returned tuple can be passed as the second argument of `numpy.datetime64` and + `numpy.timedelta64`. + + Parameters + ---------- + dtype : dtype + The dtype object, which must be a `datetime64` or `timedelta64` type. + + Returns + ------- + unit : str + The :ref:`datetime unit ` on which this dtype + is based. + count : int + The number of base units in a step. + + Examples + -------- + >>> import numpy as np + >>> dt_25s = np.dtype('timedelta64[25s]') + >>> np.datetime_data(dt_25s) + ('s', 25) + >>> np.array(10, dt_25s).astype('timedelta64[s]') + array(250, dtype='timedelta64[s]') + + The result can be used to construct a datetime that uses the same units + as a timedelta + + >>> np.datetime64('2010', np.datetime_data(dt_25s)) + np.datetime64('2010-01-01T00:00:00','25s') + """) + + +############################################################################## +# +# Documentation for `generic` attributes and methods +# +############################################################################## + +add_newdoc('numpy._core.numerictypes', 'generic', + """ + Base class for numpy scalar types. + + Class from which most (all?) numpy scalar types are derived. For + consistency, exposes the same API as `ndarray`, despite many + consequent attributes being either "get-only," or completely irrelevant. + This is the class from which it is strongly suggested users should derive + custom scalar types. + + """) + +# Attributes + +add_newdoc('numpy._core.numerictypes', 'generic', ('T', + """Scalar attribute identical to `ndarray.T`.""")) + +add_newdoc('numpy._core.numerictypes', 'generic', ('base', + """Scalar attribute identical to `ndarray.base`.""")) + +add_newdoc('numpy._core.numerictypes', 'generic', ('data', + """Pointer to start of data.""")) + +add_newdoc('numpy._core.numerictypes', 'generic', ('dtype', + """Get array data-descriptor.""")) + +add_newdoc('numpy._core.numerictypes', 'generic', ('flags', + """The integer value of flags.""")) + +add_newdoc('numpy._core.numerictypes', 'generic', ('flat', + """A 1-D view of the scalar.""")) + +add_newdoc('numpy._core.numerictypes', 'generic', ('imag', + """The imaginary part of the scalar.""")) + +add_newdoc('numpy._core.numerictypes', 'generic', ('itemsize', + """The length of one element in bytes.""")) + +add_newdoc('numpy._core.numerictypes', 'generic', ('ndim', + """The number of array dimensions.""")) + +add_newdoc('numpy._core.numerictypes', 'generic', ('real', + """The real part of the scalar.""")) + +add_newdoc('numpy._core.numerictypes', 'generic', ('shape', + """Tuple of array dimensions.""")) + +add_newdoc('numpy._core.numerictypes', 'generic', ('size', + """The number of elements in the gentype.""")) + +add_newdoc('numpy._core.numerictypes', 'generic', ('strides', + """Tuple of bytes steps in each dimension.""")) + +# Methods + +add_newdoc('numpy._core.numerictypes', 'number', ('__class_getitem__', + """ + __class_getitem__($cls, item, /) + -- + + number.__class_getitem__(item, /) + + Return a parametrized wrapper around the `~numpy.number` type. + + .. versionadded:: 1.22 + + Returns + ------- + alias : types.GenericAlias + A parametrized `~numpy.number` type. + + Examples + -------- + >>> from typing import Any + >>> import numpy as np + + >>> np.signedinteger[Any] + numpy.signedinteger[typing.Any] + + See Also + -------- + :pep:`585` : Type hinting generics in standard collections. + + """)) + +############################################################################## +# +# Documentation for scalar type abstract base classes in type hierarchy +# +############################################################################## + + +add_newdoc('numpy._core.numerictypes', 'number', + """ + Abstract base class of all numeric scalar types. + + """) + +add_newdoc('numpy._core.numerictypes', 'integer', + """ + Abstract base class of all integer scalar types. + + """) + +add_newdoc('numpy._core.numerictypes', 'signedinteger', + """ + Abstract base class of all signed integer scalar types. + + """) + +add_newdoc('numpy._core.numerictypes', 'unsignedinteger', + """ + Abstract base class of all unsigned integer scalar types. + + """) + +add_newdoc('numpy._core.numerictypes', 'inexact', + """ + Abstract base class of all numeric scalar types with a (potentially) + inexact representation of the values in its range, such as + floating-point numbers. + + """) + +add_newdoc('numpy._core.numerictypes', 'floating', + """ + Abstract base class of all floating-point scalar types. + + """) + +add_newdoc('numpy._core.numerictypes', 'complexfloating', + """ + Abstract base class of all complex number scalar types that are made up of + floating-point numbers. + + """) + +add_newdoc('numpy._core.numerictypes', 'flexible', + """ + Abstract base class of all scalar types without predefined length. + The actual size of these types depends on the specific `numpy.dtype` + instantiation. + + """) + +add_newdoc('numpy._core.numerictypes', 'character', + """ + Abstract base class of all character string scalar types. + + """) + +############################################################################## +# +# Documentation for `dtypes.*` classes +# +############################################################################## + +for _dtype_name, _signature, _sctype_name in ( + ("BoolDType", "()", "bool"), + ("Int8DType", "()", "int8"), + ("UInt8DType", "()", "uint8"), + ("Int16DType", "()", "int16"), + ("UInt16DType", "()", "uint16"), + ("Int32DType", "()", "int32"), + ("IntDType", "()", "intc"), + ("UInt32DType", "()", "uint32"), + ("UIntDType", "()", "uintc"), + ("Int64DType", "()", "int64"), + ("UInt64DType", "()", "uint64"), + ("LongLongDType", "()", "longlong"), + ("ULongLongDType", "()", "ulonglong"), + ("Float16DType", "()", "float16"), + ("Float32DType", "()", "float32"), + ("Float64DType", "()", "float64"), + ("LongDoubleDType", "()", "longdouble"), + ("Complex64DType", "()", "complex64"), + ("Complex128DType", "()", "complex128"), + ("CLongDoubleDType", "()", "clongdouble"), + ("ObjectDType", "()", "object"), + ("BytesDType", "(size, /)", "bytes_"), + ("StrDType", "(size, /)", "str_"), + ("VoidDType", "(length, /)", "void"), + ("DateTime64DType", "(unit, /)", "datetime64"), + ("TimeDelta64DType", "(unit, /)", "timedelta64"), +): + _extra_docs = "" + if _dtype_name in {"VoidDType", "DateTime64DType", "TimeDelta64DType"}: + _extra_docs = f""" + .. warning:: + ``np.dtypes.{_dtype_name}`` cannot be instantiated directly. + Use ``np.dtype("{_sctype_name}[{{unit}}]")`` instead. + """ + + add_newdoc('numpy.dtypes', _dtype_name, + f""" + {_dtype_name}{_signature} + -- + + DType class corresponding to the `numpy.{_sctype_name}` scalar type. + {_extra_docs} + See `numpy.dtype` for the typical way to create dtype instances + and :ref:`arrays.dtypes` for additional information. + """) + + del _dtype_name, _signature, _sctype_name, _extra_docs # avoid namespace pollution + + +add_newdoc('numpy._core.multiarray', 'StringDType', + """ + StringDType(*, coerce=True, **kwargs) + -- + + StringDType(*, na_object=np._NoValue, coerce=True) + + Create a StringDType instance. + + StringDType can be used to store UTF-8 encoded variable-width strings in + a NumPy array. + + Parameters + ---------- + na_object : object, optional + Object used to represent missing data. If unset, the array will not + use a missing data sentinel. + coerce : bool, optional + Whether or not items in an array-like passed to an array creation + function that are neither a str or str subtype should be coerced to + str. Defaults to True. If set to False, creating a StringDType + array from an array-like containing entries that are not already + strings will raise an error. + + Examples + -------- + + >>> import numpy as np + + >>> from numpy.dtypes import StringDType + >>> np.array(["hello", "world"], dtype=StringDType()) + array(["hello", "world"], dtype=StringDType()) + + >>> arr = np.array(["hello", None, "world"], + ... dtype=StringDType(na_object=None)) + >>> arr + array(["hello", None, "world"], dtype=StringDType(na_object=None)) + >>> arr[1] is None + True + + >>> arr = np.array(["hello", np.nan, "world"], + ... dtype=StringDType(na_object=np.nan)) + >>> np.isnan(arr) + array([False, True, False]) + + >>> np.array([1.2, object(), "hello world"], + ... dtype=StringDType(coerce=False)) + Traceback (most recent call last): + ... + ValueError: StringDType only allows string data when string coercion is disabled. + + >>> np.array(["hello", "world"], dtype=StringDType(coerce=True)) + array(["hello", "world"], dtype=StringDType(coerce=True)) + """) diff --git a/local_packages/numpy/_core/_add_newdocs.pyi b/local_packages/numpy/_core/_add_newdocs.pyi new file mode 100644 index 0000000..2d00481 --- /dev/null +++ b/local_packages/numpy/_core/_add_newdocs.pyi @@ -0,0 +1,2 @@ +from .function_base import add_newdoc as add_newdoc +from .overrides import get_array_function_like_doc as get_array_function_like_doc diff --git a/local_packages/numpy/_core/_add_newdocs_scalars.py b/local_packages/numpy/_core/_add_newdocs_scalars.py new file mode 100644 index 0000000..3f9eca5 --- /dev/null +++ b/local_packages/numpy/_core/_add_newdocs_scalars.py @@ -0,0 +1,381 @@ +""" +This file is separate from ``_add_newdocs.py`` so that it can be mocked out by +our sphinx ``conf.py`` during doc builds, where we want to avoid showing +platform-dependent information. +""" +import os +import sys + +from numpy._core import dtype, numerictypes as _numerictypes +from numpy._core.function_base import add_newdoc + +############################################################################## +# +# Documentation for concrete scalar classes +# +############################################################################## + +def numeric_type_aliases(aliases): + def type_aliases_gen(): + for alias, doc in aliases: + try: + alias_type = getattr(_numerictypes, alias) + except AttributeError: + # The set of aliases that actually exist varies between platforms + pass + else: + yield (alias_type, alias, doc) + return list(type_aliases_gen()) + + +possible_aliases = numeric_type_aliases([ + ('int8', '8-bit signed integer (``-128`` to ``127``)'), + ('int16', '16-bit signed integer (``-32_768`` to ``32_767``)'), + ('int32', '32-bit signed integer (``-2_147_483_648`` to ``2_147_483_647``)'), + ('int64', '64-bit signed integer (``-9_223_372_036_854_775_808`` to ``9_223_372_036_854_775_807``)'), + ('intp', 'Signed integer large enough to fit pointer, compatible with C ``intptr_t``'), + ('uint8', '8-bit unsigned integer (``0`` to ``255``)'), + ('uint16', '16-bit unsigned integer (``0`` to ``65_535``)'), + ('uint32', '32-bit unsigned integer (``0`` to ``4_294_967_295``)'), + ('uint64', '64-bit unsigned integer (``0`` to ``18_446_744_073_709_551_615``)'), + ('uintp', 'Unsigned integer large enough to fit pointer, compatible with C ``uintptr_t``'), + ('float16', '16-bit-precision floating-point number type: sign bit, 5 bits exponent, 10 bits mantissa'), + ('float32', '32-bit-precision floating-point number type: sign bit, 8 bits exponent, 23 bits mantissa'), + ('float64', '64-bit precision floating-point number type: sign bit, 11 bits exponent, 52 bits mantissa'), + ('float96', '96-bit extended-precision floating-point number type'), + ('float128', '128-bit extended-precision floating-point number type'), + ('complex64', 'Complex number type composed of 2 32-bit-precision floating-point numbers'), + ('complex128', 'Complex number type composed of 2 64-bit-precision floating-point numbers'), + ('complex192', 'Complex number type composed of 2 96-bit extended-precision floating-point numbers'), + ('complex256', 'Complex number type composed of 2 128-bit extended-precision floating-point numbers'), +]) + + +def _get_platform_and_machine(): + try: + system, _, _, _, machine = os.uname() + except AttributeError: + system = sys.platform + if system == 'win32': + machine = os.environ.get('PROCESSOR_ARCHITEW6432', '') \ + or os.environ.get('PROCESSOR_ARCHITECTURE', '') + else: + machine = 'unknown' + return system, machine + + +_system, _machine = _get_platform_and_machine() +_doc_alias_string = f":Alias on this platform ({_system} {_machine}):" + +# docstring prefix that cpython uses to populate `__text_signature__` +_ARGUMENT_CLINIC_TEMPLATE = """{name}{signature} +-- + +{docstring}""" + +def add_newdoc_for_scalar_type(name: str, text_signature: str, doc: str) -> None: + # note: `:field: value` is rST syntax which renders as field lists. + cls = getattr(_numerictypes, name) + module = cls.__module__ + + lines_extra = [ + "", # blank line after main doc + f":Character code: ``{dtype(cls).char!r}``", + ] + + if name != cls.__name__: + lines_extra.append(f":Canonical name: `{module}.{name}`") + + lines_extra.extend( + f"{_doc_alias_string} `{module}.{alias}`: {doc}." + for alias_type, alias, doc in possible_aliases + if alias_type is cls + ) + + docstring = _ARGUMENT_CLINIC_TEMPLATE.format( + name=cls.__name__, # must match the class name + signature=text_signature, + docstring="\n".join([doc.strip(), *lines_extra]), + ) + add_newdoc('numpy._core.numerictypes', name, docstring) + + +for bool_name in ('bool', 'bool_'): + add_newdoc_for_scalar_type(bool_name, '(value=False, /)', """ +Boolean type (True or False), stored as a byte. + +.. warning:: + + The :class:`bool` type is not a subclass of the :class:`int_` type + (the :class:`bool` is not even a number type). This is different + than Python's default implementation of :class:`bool` as a + sub-class of :class:`int`. +""") + +add_newdoc_for_scalar_type('byte', '(value=0, /)', """ +Signed integer type, compatible with C ``char``. +""") + +add_newdoc_for_scalar_type('short', '(value=0, /)', """ +Signed integer type, compatible with C ``short``. +""") + +add_newdoc_for_scalar_type('intc', '(value=0, /)', """ +Signed integer type, compatible with C ``int``. +""") + +add_newdoc_for_scalar_type('long', '(value=0, /)', """ +Signed integer type, compatible with C ``long``. +""") + +# TODO: These docs probably need an if to highlight the default rather than +# the C-types (and be correct). +add_newdoc_for_scalar_type('int_', '(value=0, /)', """ +Default signed integer type, 64bit on 64bit systems and 32bit on 32bit systems. +""") + +add_newdoc_for_scalar_type('longlong', '(value=0, /)', """ +Signed integer type, compatible with C ``long long``. +""") + +add_newdoc_for_scalar_type('ubyte', '(value=0, /)', """ +Unsigned integer type, compatible with C ``unsigned char``. +""") + +add_newdoc_for_scalar_type('ushort', '(value=0, /)', """ +Unsigned integer type, compatible with C ``unsigned short``. +""") + +add_newdoc_for_scalar_type('uintc', '(value=0, /)', """ +Unsigned integer type, compatible with C ``unsigned int``. +""") + +add_newdoc_for_scalar_type('uint', '(value=0, /)', """ +Unsigned signed integer type, 64bit on 64bit systems and 32bit on 32bit systems. +""") + +add_newdoc_for_scalar_type('ulong', '(value=0, /)', """ +Unsigned integer type, compatible with C ``unsigned long``. +""") + +add_newdoc_for_scalar_type('ulonglong', '(value=0, /)', """ +Unsigned integer type, compatible with C ``unsigned long long``. +""") + +add_newdoc_for_scalar_type('half', '(value=0, /)', """ +Half-precision floating-point number type. +""") + +add_newdoc_for_scalar_type('single', '(value=0, /)', """ +Single-precision floating-point number type, compatible with C ``float``. +""") + +add_newdoc_for_scalar_type('double', '(value=0, /)', """ +Double-precision floating-point number type, compatible with Python :class:`float` and C ``double``. +""") + +add_newdoc_for_scalar_type('longdouble', '(value=0, /)', """ +Extended-precision floating-point number type, compatible with C ``long double`` +but not necessarily with IEEE 754 quadruple-precision. +""") + +add_newdoc_for_scalar_type('csingle', '(real=0, imag=0, /)', """ +Complex number type composed of two single-precision floating-point numbers. +""") + +add_newdoc_for_scalar_type('cdouble', '(real=0, imag=0, /)', """ +Complex number type composed of two double-precision floating-point numbers, +compatible with Python :class:`complex`. +""") + +add_newdoc_for_scalar_type('clongdouble', '(real=0, imag=0, /)', """ +Complex number type composed of two extended-precision floating-point numbers. +""") + +add_newdoc_for_scalar_type('object_', '(value=None, /)', """ +Any Python object. +""") + +add_newdoc_for_scalar_type('str_', '(value="", /, *args, **kwargs)', r""" +A unicode string. + +This type strips trailing null codepoints. + +>>> s = np.str_("abc\x00") +>>> s +'abc' + +Unlike the builtin :class:`str`, this supports the +:ref:`python:bufferobjects`, exposing its contents as UCS4: + +>>> m = memoryview(np.str_("abc")) +>>> m.format +'3w' +>>> m.tobytes() +b'a\x00\x00\x00b\x00\x00\x00c\x00\x00\x00' +""") + +add_newdoc_for_scalar_type('bytes_', '(value="", /, *args, **kwargs)', r""" +A byte string. + +When used in arrays, this type strips trailing null bytes. +""") + +add_newdoc_for_scalar_type('void', '(length_or_data, /, dtype=None)', r""" +np.void(length_or_data, /, dtype=None) + +Create a new structured or unstructured void scalar. + +Parameters +---------- +length_or_data : int, array-like, bytes-like, object + One of multiple meanings (see notes). The length or + bytes data of an unstructured void. Or alternatively, + the data to be stored in the new scalar when `dtype` + is provided. + This can be an array-like, in which case an array may + be returned. +dtype : dtype, optional + If provided the dtype of the new scalar. This dtype must + be "void" dtype (i.e. a structured or unstructured void, + see also :ref:`defining-structured-types`). + + .. versionadded:: 1.24 + +Notes +----- +For historical reasons and because void scalars can represent both +arbitrary byte data and structured dtypes, the void constructor +has three calling conventions: + +1. ``np.void(5)`` creates a ``dtype="V5"`` scalar filled with five + ``\0`` bytes. The 5 can be a Python or NumPy integer. +2. ``np.void(b"bytes-like")`` creates a void scalar from the byte string. + The dtype itemsize will match the byte string length, here ``"V10"``. +3. When a ``dtype=`` is passed the call is roughly the same as an + array creation. However, a void scalar rather than array is returned. + +Please see the examples which show all three different conventions. + +Examples +-------- +>>> np.void(5) +np.void(b'\x00\x00\x00\x00\x00') +>>> np.void(b'abcd') +np.void(b'\x61\x62\x63\x64') +>>> np.void((3.2, b'eggs'), dtype="d,S5") +np.void((3.2, b'eggs'), dtype=[('f0', '>> np.void(3, dtype=[('x', np.int8), ('y', np.int8)]) +np.void((3, 3), dtype=[('x', 'i1'), ('y', 'i1')]) +""") + +add_newdoc_for_scalar_type('datetime64', '(value=None, /, *args)', """ +If created from a 64-bit integer, it represents an offset from ``1970-01-01T00:00:00``. +If created from string, the string can be in ISO 8601 date or datetime format. + +When parsing a string to create a datetime object, if the string contains +a trailing timezone (A 'Z' or a timezone offset), the timezone will be +dropped and a User Warning is given. + +Datetime64 objects should be considered to be UTC and therefore have an +offset of +0000. + +>>> np.datetime64(10, 'Y') +np.datetime64('1980') +>>> np.datetime64('1980', 'Y') +np.datetime64('1980') +>>> np.datetime64(10, 'D') +np.datetime64('1970-01-11') + +See :ref:`arrays.datetime` for more information. +""") + +add_newdoc_for_scalar_type('timedelta64', '(value=0, /, *args)', """ +A timedelta stored as a 64-bit integer. + +See :ref:`arrays.datetime` for more information. +""") + +add_newdoc('numpy._core.numerictypes', "integer", ('is_integer', + """ + is_integer($self, /) + -- + + integer.is_integer() -> bool + + Return ``True`` if the number is finite with integral value. + + .. versionadded:: 1.22 + + Examples + -------- + >>> import numpy as np + >>> np.int64(-2).is_integer() + True + >>> np.uint32(5).is_integer() + True + """)) + +# TODO: work out how to put this on the base class, np.floating +for float_name in ('half', 'single', 'double', 'longdouble'): + add_newdoc('numpy._core.numerictypes', float_name, ('as_integer_ratio', + f""" + as_integer_ratio($self, /) + -- + + {float_name}.as_integer_ratio() -> (int, int) + + Return a pair of integers, whose ratio is exactly equal to the original + floating point number, and with a positive denominator. + Raise `OverflowError` on infinities and a `ValueError` on NaNs. + + >>> np.{float_name}(10.0).as_integer_ratio() + (10, 1) + >>> np.{float_name}(0.0).as_integer_ratio() + (0, 1) + >>> np.{float_name}(-.25).as_integer_ratio() + (-1, 4) + """)) + + add_newdoc('numpy._core.numerictypes', float_name, ('is_integer', + f""" + is_integer($self, /) + -- + + {float_name}.is_integer() -> bool + + Return ``True`` if the floating point number is finite with integral + value, and ``False`` otherwise. + + .. versionadded:: 1.22 + + Examples + -------- + >>> np.{float_name}(-2.0).is_integer() + True + >>> np.{float_name}(3.2).is_integer() + False + """)) + +for int_name in ('int8', 'uint8', 'int16', 'uint16', 'int32', 'uint32', + 'int64', 'uint64', 'int64', 'uint64', 'int64', 'uint64', + 'longlong', 'ulonglong'): + # Add negative examples for signed cases by checking typecode + add_newdoc('numpy._core.numerictypes', int_name, ('bit_count', + f""" + bit_count($self, /) + -- + + {int_name}.bit_count() -> int + + Computes the number of 1-bits in the absolute value of the input. + Analogous to the builtin `int.bit_count` or ``popcount`` in C++. + + Examples + -------- + >>> np.{int_name}(127).bit_count() + 7""" + + (f""" + >>> np.{int_name}(-127).bit_count() + 7 + """ if dtype(int_name).char.islower() else ""))) diff --git a/local_packages/numpy/_core/_add_newdocs_scalars.pyi b/local_packages/numpy/_core/_add_newdocs_scalars.pyi new file mode 100644 index 0000000..241f4a0 --- /dev/null +++ b/local_packages/numpy/_core/_add_newdocs_scalars.pyi @@ -0,0 +1,16 @@ +from typing import Final + +import numpy as np + +possible_aliases: Final[list[tuple[type[np.number], str, str]]] = ... +_system: Final[str] = ... +_machine: Final[str] = ... +_doc_alias_string: Final[str] = ... +_bool_docstring: Final[str] = ... +bool_name: str = ... +int_name: str = ... +float_name: str = ... + +def numeric_type_aliases(aliases: list[tuple[str, str]]) -> list[tuple[type[np.number], str, str]]: ... +def add_newdoc_for_scalar_type(name: str, text_signature: str, doc: str) -> None: ... +def _get_platform_and_machine() -> tuple[str, str]: ... diff --git a/local_packages/numpy/_core/_asarray.py b/local_packages/numpy/_core/_asarray.py new file mode 100644 index 0000000..edaff52 --- /dev/null +++ b/local_packages/numpy/_core/_asarray.py @@ -0,0 +1,130 @@ +""" +Functions in the ``as*array`` family that promote array-likes into arrays. + +`require` fits this category despite its name not matching this pattern. +""" +from .multiarray import array, asanyarray +from .overrides import array_function_dispatch, finalize_array_function_like, set_module + +__all__ = ["require"] + + +POSSIBLE_FLAGS = { + 'C': 'C', 'C_CONTIGUOUS': 'C', 'CONTIGUOUS': 'C', + 'F': 'F', 'F_CONTIGUOUS': 'F', 'FORTRAN': 'F', + 'A': 'A', 'ALIGNED': 'A', + 'W': 'W', 'WRITEABLE': 'W', + 'O': 'O', 'OWNDATA': 'O', + 'E': 'E', 'ENSUREARRAY': 'E' +} + + +@finalize_array_function_like +@set_module('numpy') +def require(a, dtype=None, requirements=None, *, like=None): + """ + Return an ndarray of the provided type that satisfies requirements. + + This function is useful to be sure that an array with the correct flags + is returned for passing to compiled code (perhaps through ctypes). + + Parameters + ---------- + a : array_like + The object to be converted to a type-and-requirement-satisfying array. + dtype : data-type + The required data-type. If None preserve the current dtype. If your + application requires the data to be in native byteorder, include + a byteorder specification as a part of the dtype specification. + requirements : str or sequence of str + The requirements list can be any of the following + + * 'F_CONTIGUOUS' ('F') - ensure a Fortran-contiguous array + * 'C_CONTIGUOUS' ('C') - ensure a C-contiguous array + * 'ALIGNED' ('A') - ensure a data-type aligned array + * 'WRITEABLE' ('W') - ensure a writable array + * 'OWNDATA' ('O') - ensure an array that owns its own data + * 'ENSUREARRAY', ('E') - ensure a base array, instead of a subclass + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + out : ndarray + Array with specified requirements and type if given. + + See Also + -------- + asarray : Convert input to an ndarray. + asanyarray : Convert to an ndarray, but pass through ndarray subclasses. + ascontiguousarray : Convert input to a contiguous array. + asfortranarray : Convert input to an ndarray with column-major + memory order. + ndarray.flags : Information about the memory layout of the array. + + Notes + ----- + The returned array will be guaranteed to have the listed requirements + by making a copy if needed. + + Examples + -------- + >>> import numpy as np + >>> x = np.arange(6).reshape(2,3) + >>> x.flags + C_CONTIGUOUS : True + F_CONTIGUOUS : False + OWNDATA : False + WRITEABLE : True + ALIGNED : True + WRITEBACKIFCOPY : False + + >>> y = np.require(x, dtype=np.float32, requirements=['A', 'O', 'W', 'F']) + >>> y.flags + C_CONTIGUOUS : False + F_CONTIGUOUS : True + OWNDATA : True + WRITEABLE : True + ALIGNED : True + WRITEBACKIFCOPY : False + + """ + if like is not None: + return _require_with_like( + like, + a, + dtype=dtype, + requirements=requirements, + ) + + if not requirements: + return asanyarray(a, dtype=dtype) + + requirements = {POSSIBLE_FLAGS[x.upper()] for x in requirements} + + if 'E' in requirements: + requirements.remove('E') + subok = False + else: + subok = True + + order = 'A' + if requirements >= {'C', 'F'}: + raise ValueError('Cannot specify both "C" and "F" order') + elif 'F' in requirements: + order = 'F' + requirements.remove('F') + elif 'C' in requirements: + order = 'C' + requirements.remove('C') + + arr = array(a, dtype=dtype, order=order, copy=None, subok=subok) + + for prop in requirements: + if not arr.flags[prop]: + return arr.copy(order) + return arr + + +_require_with_like = array_function_dispatch()(require) diff --git a/local_packages/numpy/_core/_asarray.pyi b/local_packages/numpy/_core/_asarray.pyi new file mode 100644 index 0000000..6bef69d --- /dev/null +++ b/local_packages/numpy/_core/_asarray.pyi @@ -0,0 +1,43 @@ +from collections.abc import Iterable +from typing import Any, Literal, TypeAlias, TypeVar, overload + +from numpy._typing import DTypeLike, NDArray, _SupportsArrayFunc + +__all__ = ["require"] + +_ArrayT = TypeVar("_ArrayT", bound=NDArray[Any]) + +_Requirements: TypeAlias = Literal[ + "C", "C_CONTIGUOUS", "CONTIGUOUS", + "F", "F_CONTIGUOUS", "FORTRAN", + "A", "ALIGNED", + "W", "WRITEABLE", + "O", "OWNDATA" +] +_E: TypeAlias = Literal["E", "ENSUREARRAY"] +_RequirementsWithE: TypeAlias = _Requirements | _E + +@overload +def require( + a: _ArrayT, + dtype: None = None, + requirements: _Requirements | Iterable[_Requirements] | None = None, + *, + like: _SupportsArrayFunc | None = None +) -> _ArrayT: ... +@overload +def require( + a: object, + dtype: DTypeLike | None = None, + requirements: _E | Iterable[_RequirementsWithE] | None = None, + *, + like: _SupportsArrayFunc | None = None +) -> NDArray[Any]: ... +@overload +def require( + a: object, + dtype: DTypeLike | None = None, + requirements: _Requirements | Iterable[_Requirements] | None = None, + *, + like: _SupportsArrayFunc | None = None +) -> NDArray[Any]: ... diff --git a/local_packages/numpy/_core/_dtype.py b/local_packages/numpy/_core/_dtype.py new file mode 100644 index 0000000..6a8a091 --- /dev/null +++ b/local_packages/numpy/_core/_dtype.py @@ -0,0 +1,366 @@ +""" +A place for code to be called from the implementation of np.dtype + +String handling is much easier to do correctly in python. +""" +import numpy as np + +_kind_to_stem = { + 'u': 'uint', + 'i': 'int', + 'c': 'complex', + 'f': 'float', + 'b': 'bool', + 'V': 'void', + 'O': 'object', + 'M': 'datetime', + 'm': 'timedelta', + 'S': 'bytes', + 'U': 'str', +} + + +def _kind_name(dtype): + try: + return _kind_to_stem[dtype.kind] + except KeyError as e: + raise RuntimeError( + f"internal dtype error, unknown kind {dtype.kind!r}" + ) from None + + +def __str__(dtype): + if dtype.fields is not None: + return _struct_str(dtype, include_align=True) + elif dtype.subdtype: + return _subarray_str(dtype) + elif issubclass(dtype.type, np.flexible) or not dtype.isnative: + return dtype.str + else: + return dtype.name + + +def __repr__(dtype): + arg_str = _construction_repr(dtype, include_align=False) + if dtype.isalignedstruct: + arg_str = arg_str + ", align=True" + return f"dtype({arg_str})" + + +def _unpack_field(dtype, offset, title=None): + """ + Helper function to normalize the items in dtype.fields. + + Call as: + + dtype, offset, title = _unpack_field(*dtype.fields[name]) + """ + return dtype, offset, title + + +def _isunsized(dtype): + # PyDataType_ISUNSIZED + return dtype.itemsize == 0 + + +def _construction_repr(dtype, include_align=False, short=False): + """ + Creates a string repr of the dtype, excluding the 'dtype()' part + surrounding the object. This object may be a string, a list, or + a dict depending on the nature of the dtype. This + is the object passed as the first parameter to the dtype + constructor, and if no additional constructor parameters are + given, will reproduce the exact memory layout. + + Parameters + ---------- + short : bool + If true, this creates a shorter repr using 'kind' and 'itemsize', + instead of the longer type name. + + include_align : bool + If true, this includes the 'align=True' parameter + inside the struct dtype construction dict when needed. Use this flag + if you want a proper repr string without the 'dtype()' part around it. + + If false, this does not preserve the + 'align=True' parameter or sticky NPY_ALIGNED_STRUCT flag for + struct arrays like the regular repr does, because the 'align' + flag is not part of first dtype constructor parameter. This + mode is intended for a full 'repr', where the 'align=True' is + provided as the second parameter. + """ + if dtype.fields is not None: + return _struct_str(dtype, include_align=include_align) + elif dtype.subdtype: + return _subarray_str(dtype) + else: + return _scalar_str(dtype, short=short) + + +def _scalar_str(dtype, short): + byteorder = _byte_order_str(dtype) + + if dtype.type == np.bool: + if short: + return "'?'" + else: + return "'bool'" + + elif dtype.type == np.object_: + # The object reference may be different sizes on different + # platforms, so it should never include the itemsize here. + return "'O'" + + elif dtype.type == np.bytes_: + if _isunsized(dtype): + return "'S'" + else: + return "'S%d'" % dtype.itemsize + + elif dtype.type == np.str_: + if _isunsized(dtype): + return f"'{byteorder}U'" + else: + return "'%sU%d'" % (byteorder, dtype.itemsize / 4) + + elif dtype.type == str: + return "'T'" + + elif not type(dtype)._legacy: + return f"'{byteorder}{type(dtype).__name__}{dtype.itemsize * 8}'" + + # unlike the other types, subclasses of void are preserved - but + # historically the repr does not actually reveal the subclass + elif issubclass(dtype.type, np.void): + if _isunsized(dtype): + return "'V'" + else: + return "'V%d'" % dtype.itemsize + + elif dtype.type == np.datetime64: + return f"'{byteorder}M8{_datetime_metadata_str(dtype)}'" + + elif dtype.type == np.timedelta64: + return f"'{byteorder}m8{_datetime_metadata_str(dtype)}'" + + elif dtype.isbuiltin == 2: + return dtype.type.__name__ + + elif np.issubdtype(dtype, np.number): + # Short repr with endianness, like '' """ + # hack to obtain the native and swapped byte order characters + swapped = np.dtype(int).newbyteorder('S') + native = swapped.newbyteorder('S') + + byteorder = dtype.byteorder + if byteorder == '=': + return native.byteorder + if byteorder == 'S': + # TODO: this path can never be reached + return swapped.byteorder + elif byteorder == '|': + return '' + else: + return byteorder + + +def _datetime_metadata_str(dtype): + # TODO: this duplicates the C metastr_to_unicode functionality + unit, count = np.datetime_data(dtype) + if unit == 'generic': + return '' + elif count == 1: + return f'[{unit}]' + else: + return f'[{count}{unit}]' + + +def _struct_dict_str(dtype, includealignedflag): + # unpack the fields dictionary into ls + names = dtype.names + fld_dtypes = [] + offsets = [] + titles = [] + for name in names: + fld_dtype, offset, title = _unpack_field(*dtype.fields[name]) + fld_dtypes.append(fld_dtype) + offsets.append(offset) + titles.append(title) + + # Build up a string to make the dictionary + + if np._core.arrayprint._get_legacy_print_mode() <= 121: + colon = ":" + fieldsep = "," + else: + colon = ": " + fieldsep = ", " + + # First, the names + ret = "{'names'%s[" % colon + ret += fieldsep.join(repr(name) for name in names) + + # Second, the formats + ret += f"], 'formats'{colon}[" + ret += fieldsep.join( + _construction_repr(fld_dtype, short=True) for fld_dtype in fld_dtypes) + + # Third, the offsets + ret += f"], 'offsets'{colon}[" + ret += fieldsep.join("%d" % offset for offset in offsets) + + # Fourth, the titles + if any(title is not None for title in titles): + ret += f"], 'titles'{colon}[" + ret += fieldsep.join(repr(title) for title in titles) + + # Fifth, the itemsize + ret += "], 'itemsize'%s%d" % (colon, dtype.itemsize) + + if (includealignedflag and dtype.isalignedstruct): + # Finally, the aligned flag + ret += ", 'aligned'%sTrue}" % colon + else: + ret += "}" + + return ret + + +def _aligned_offset(offset, alignment): + # round up offset: + return - (-offset // alignment) * alignment + + +def _is_packed(dtype): + """ + Checks whether the structured data type in 'dtype' + has a simple layout, where all the fields are in order, + and follow each other with no alignment padding. + + When this returns true, the dtype can be reconstructed + from a list of the field names and dtypes with no additional + dtype parameters. + + Duplicates the C `is_dtype_struct_simple_unaligned_layout` function. + """ + align = dtype.isalignedstruct + max_alignment = 1 + total_offset = 0 + for name in dtype.names: + fld_dtype, fld_offset, title = _unpack_field(*dtype.fields[name]) + + if align: + total_offset = _aligned_offset(total_offset, fld_dtype.alignment) + max_alignment = max(max_alignment, fld_dtype.alignment) + + if fld_offset != total_offset: + return False + total_offset += fld_dtype.itemsize + + if align: + total_offset = _aligned_offset(total_offset, max_alignment) + + return total_offset == dtype.itemsize + + +def _struct_list_str(dtype): + items = [] + for name in dtype.names: + fld_dtype, fld_offset, title = _unpack_field(*dtype.fields[name]) + + item = "(" + if title is not None: + item += f"({title!r}, {name!r}), " + else: + item += f"{name!r}, " + # Special case subarray handling here + if fld_dtype.subdtype is not None: + base, shape = fld_dtype.subdtype + item += f"{_construction_repr(base, short=True)}, {shape}" + else: + item += _construction_repr(fld_dtype, short=True) + + item += ")" + items.append(item) + + return "[" + ", ".join(items) + "]" + + +def _struct_str(dtype, include_align): + # The list str representation can't include the 'align=' flag, + # so if it is requested and the struct has the aligned flag set, + # we must use the dict str instead. + if not (include_align and dtype.isalignedstruct) and _is_packed(dtype): + sub = _struct_list_str(dtype) + + else: + sub = _struct_dict_str(dtype, include_align) + + # If the data type isn't the default, void, show it + if dtype.type != np.void: + return f"({dtype.type.__module__}.{dtype.type.__name__}, {sub})" + else: + return sub + + +def _subarray_str(dtype): + base, shape = dtype.subdtype + return f"({_construction_repr(base, short=True)}, {shape})" + + +def _name_includes_bit_suffix(dtype): + if dtype.type == np.object_: + # pointer size varies by system, best to omit it + return False + elif dtype.type == np.bool: + # implied + return False + elif dtype.type is None: + return True + elif np.issubdtype(dtype, np.flexible) and _isunsized(dtype): + # unspecified + return False + else: + return True + + +def _name_get(dtype): + # provides dtype.name.__get__, documented as returning a "bit name" + + if dtype.isbuiltin == 2: + # user dtypes don't promise to do anything special + return dtype.type.__name__ + + if not type(dtype)._legacy: + name = type(dtype).__name__ + + elif issubclass(dtype.type, np.void): + # historically, void subclasses preserve their name, eg `record64` + name = dtype.type.__name__ + else: + name = _kind_name(dtype) + + # append bit counts + if _name_includes_bit_suffix(dtype): + name += f"{dtype.itemsize * 8}" + + # append metadata to datetimes + if dtype.type in (np.datetime64, np.timedelta64): + name += _datetime_metadata_str(dtype) + + return name diff --git a/local_packages/numpy/_core/_dtype.pyi b/local_packages/numpy/_core/_dtype.pyi new file mode 100644 index 0000000..28adecf --- /dev/null +++ b/local_packages/numpy/_core/_dtype.pyi @@ -0,0 +1,56 @@ +from typing import Final, Literal as L, TypeAlias, TypedDict, overload, type_check_only +from typing_extensions import ReadOnly, TypeVar + +import numpy as np + +### + +_T = TypeVar("_T") + +_Name: TypeAlias = L["uint", "int", "complex", "float", "bool", "void", "object", "datetime", "timedelta", "bytes", "str"] + +@type_check_only +class _KindToStemType(TypedDict): + u: ReadOnly[L["uint"]] + i: ReadOnly[L["int"]] + c: ReadOnly[L["complex"]] + f: ReadOnly[L["float"]] + b: ReadOnly[L["bool"]] + V: ReadOnly[L["void"]] + O: ReadOnly[L["object"]] + M: ReadOnly[L["datetime"]] + m: ReadOnly[L["timedelta"]] + S: ReadOnly[L["bytes"]] + U: ReadOnly[L["str"]] + +### + +_kind_to_stem: Final[_KindToStemType] = ... + +# +def _kind_name(dtype: np.dtype) -> _Name: ... +def __str__(dtype: np.dtype) -> str: ... +def __repr__(dtype: np.dtype) -> str: ... + +# +def _isunsized(dtype: np.dtype) -> bool: ... +def _is_packed(dtype: np.dtype) -> bool: ... +def _name_includes_bit_suffix(dtype: np.dtype) -> bool: ... + +# +def _construction_repr(dtype: np.dtype, include_align: bool = False, short: bool = False) -> str: ... +def _scalar_str(dtype: np.dtype, short: bool) -> str: ... +def _byte_order_str(dtype: np.dtype) -> str: ... +def _datetime_metadata_str(dtype: np.dtype) -> str: ... +def _struct_dict_str(dtype: np.dtype, includealignedflag: bool) -> str: ... +def _struct_list_str(dtype: np.dtype) -> str: ... +def _struct_str(dtype: np.dtype, include_align: bool) -> str: ... +def _subarray_str(dtype: np.dtype) -> str: ... +def _name_get(dtype: np.dtype) -> str: ... + +# +@overload +def _unpack_field(dtype: np.dtype, offset: int, title: _T) -> tuple[np.dtype, int, _T]: ... +@overload +def _unpack_field(dtype: np.dtype, offset: int, title: None = None) -> tuple[np.dtype, int, None]: ... +def _aligned_offset(offset: int, alignment: int) -> int: ... diff --git a/local_packages/numpy/_core/_dtype_ctypes.py b/local_packages/numpy/_core/_dtype_ctypes.py new file mode 100644 index 0000000..4de6df6 --- /dev/null +++ b/local_packages/numpy/_core/_dtype_ctypes.py @@ -0,0 +1,120 @@ +""" +Conversion from ctypes to dtype. + +In an ideal world, we could achieve this through the PEP3118 buffer protocol, +something like:: + + def dtype_from_ctypes_type(t): + # needed to ensure that the shape of `t` is within memoryview.format + class DummyStruct(ctypes.Structure): + _fields_ = [('a', t)] + + # empty to avoid memory allocation + ctype_0 = (DummyStruct * 0)() + mv = memoryview(ctype_0) + + # convert the struct, and slice back out the field + return _dtype_from_pep3118(mv.format)['a'] + +Unfortunately, this fails because: + +* ctypes cannot handle length-0 arrays with PEP3118 (bpo-32782) +* PEP3118 cannot represent unions, but both numpy and ctypes can +* ctypes cannot handle big-endian structs with PEP3118 (bpo-32780) +""" + +# We delay-import ctypes for distributions that do not include it. +# While this module is not used unless the user passes in ctypes +# members, it is eagerly imported from numpy/_core/__init__.py. +import numpy as np + + +def _from_ctypes_array(t): + return np.dtype((dtype_from_ctypes_type(t._type_), (t._length_,))) + + +def _from_ctypes_structure(t): + for item in t._fields_: + if len(item) > 2: + raise TypeError( + "ctypes bitfields have no dtype equivalent") + + if hasattr(t, "_pack_"): + import ctypes + formats = [] + offsets = [] + names = [] + current_offset = 0 + for fname, ftyp in t._fields_: + names.append(fname) + formats.append(dtype_from_ctypes_type(ftyp)) + # Each type has a default offset, this is platform dependent + # for some types. + effective_pack = min(t._pack_, ctypes.alignment(ftyp)) + current_offset = ( + (current_offset + effective_pack - 1) // effective_pack + ) * effective_pack + offsets.append(current_offset) + current_offset += ctypes.sizeof(ftyp) + + return np.dtype({ + "formats": formats, + "offsets": offsets, + "names": names, + "itemsize": ctypes.sizeof(t)}) + else: + fields = [] + for fname, ftyp in t._fields_: + fields.append((fname, dtype_from_ctypes_type(ftyp))) + + # by default, ctypes structs are aligned + return np.dtype(fields, align=True) + + +def _from_ctypes_scalar(t): + """ + Return the dtype type with endianness included if it's the case + """ + if getattr(t, '__ctype_be__', None) is t: + return np.dtype('>' + t._type_) + elif getattr(t, '__ctype_le__', None) is t: + return np.dtype('<' + t._type_) + else: + return np.dtype(t._type_) + + +def _from_ctypes_union(t): + import ctypes + formats = [] + offsets = [] + names = [] + for fname, ftyp in t._fields_: + names.append(fname) + formats.append(dtype_from_ctypes_type(ftyp)) + offsets.append(0) # Union fields are offset to 0 + + return np.dtype({ + "formats": formats, + "offsets": offsets, + "names": names, + "itemsize": ctypes.sizeof(t)}) + + +def dtype_from_ctypes_type(t): + """ + Construct a dtype object from a ctypes type + """ + import _ctypes + if issubclass(t, _ctypes.Array): + return _from_ctypes_array(t) + elif issubclass(t, _ctypes._Pointer): + raise TypeError("ctypes pointers have no dtype equivalent") + elif issubclass(t, _ctypes.Structure): + return _from_ctypes_structure(t) + elif issubclass(t, _ctypes.Union): + return _from_ctypes_union(t) + elif isinstance(getattr(t, '_type_', None), str): + return _from_ctypes_scalar(t) + else: + raise NotImplementedError( + f"Unknown ctypes type {t.__name__}") diff --git a/local_packages/numpy/_core/_dtype_ctypes.pyi b/local_packages/numpy/_core/_dtype_ctypes.pyi new file mode 100644 index 0000000..69438a2 --- /dev/null +++ b/local_packages/numpy/_core/_dtype_ctypes.pyi @@ -0,0 +1,83 @@ +import _ctypes +import ctypes as ct +from typing import Any, overload + +import numpy as np + +# +@overload +def dtype_from_ctypes_type(t: type[_ctypes.Array[Any] | _ctypes.Structure]) -> np.dtype[np.void]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_bool]) -> np.dtype[np.bool]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_int8 | ct.c_byte]) -> np.dtype[np.int8]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_uint8 | ct.c_ubyte]) -> np.dtype[np.uint8]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_int16 | ct.c_short]) -> np.dtype[np.int16]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_uint16 | ct.c_ushort]) -> np.dtype[np.uint16]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_int32 | ct.c_int]) -> np.dtype[np.int32]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_uint32 | ct.c_uint]) -> np.dtype[np.uint32]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_ssize_t | ct.c_long]) -> np.dtype[np.int32 | np.int64]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_size_t | ct.c_ulong]) -> np.dtype[np.uint32 | np.uint64]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_int64 | ct.c_longlong]) -> np.dtype[np.int64]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_uint64 | ct.c_ulonglong]) -> np.dtype[np.uint64]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_float]) -> np.dtype[np.float32]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_double]) -> np.dtype[np.float64]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_longdouble]) -> np.dtype[np.longdouble]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_char]) -> np.dtype[np.bytes_]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.py_object[Any]]) -> np.dtype[np.object_]: ... + +# NOTE: the complex ctypes on python>=3.14 are not yet supported at runtim, see +# https://github.com/numpy/numpy/issues/28360 + +# +def _from_ctypes_array(t: type[_ctypes.Array[Any]]) -> np.dtype[np.void]: ... +def _from_ctypes_structure(t: type[_ctypes.Structure]) -> np.dtype[np.void]: ... +def _from_ctypes_union(t: type[_ctypes.Union]) -> np.dtype[np.void]: ... + +# keep in sync with `dtype_from_ctypes_type` (minus the first overload) +@overload +def _from_ctypes_scalar(t: type[ct.c_bool]) -> np.dtype[np.bool]: ... +@overload +def _from_ctypes_scalar(t: type[ct.c_int8 | ct.c_byte]) -> np.dtype[np.int8]: ... +@overload +def _from_ctypes_scalar(t: type[ct.c_uint8 | ct.c_ubyte]) -> np.dtype[np.uint8]: ... +@overload +def _from_ctypes_scalar(t: type[ct.c_int16 | ct.c_short]) -> np.dtype[np.int16]: ... +@overload +def _from_ctypes_scalar(t: type[ct.c_uint16 | ct.c_ushort]) -> np.dtype[np.uint16]: ... +@overload +def _from_ctypes_scalar(t: type[ct.c_int32 | ct.c_int]) -> np.dtype[np.int32]: ... +@overload +def _from_ctypes_scalar(t: type[ct.c_uint32 | ct.c_uint]) -> np.dtype[np.uint32]: ... +@overload +def _from_ctypes_scalar(t: type[ct.c_ssize_t | ct.c_long]) -> np.dtype[np.int32 | np.int64]: ... +@overload +def _from_ctypes_scalar(t: type[ct.c_size_t | ct.c_ulong]) -> np.dtype[np.uint32 | np.uint64]: ... +@overload +def _from_ctypes_scalar(t: type[ct.c_int64 | ct.c_longlong]) -> np.dtype[np.int64]: ... +@overload +def _from_ctypes_scalar(t: type[ct.c_uint64 | ct.c_ulonglong]) -> np.dtype[np.uint64]: ... +@overload +def _from_ctypes_scalar(t: type[ct.c_float]) -> np.dtype[np.float32]: ... +@overload +def _from_ctypes_scalar(t: type[ct.c_double]) -> np.dtype[np.float64]: ... +@overload +def _from_ctypes_scalar(t: type[ct.c_longdouble]) -> np.dtype[np.longdouble]: ... +@overload +def _from_ctypes_scalar(t: type[ct.c_char]) -> np.dtype[np.bytes_]: ... +@overload +def _from_ctypes_scalar(t: type[ct.py_object[Any]]) -> np.dtype[np.object_]: ... diff --git a/local_packages/numpy/_core/_exceptions.py b/local_packages/numpy/_core/_exceptions.py new file mode 100644 index 0000000..73b07d2 --- /dev/null +++ b/local_packages/numpy/_core/_exceptions.py @@ -0,0 +1,162 @@ +""" +Various richly-typed exceptions, that also help us deal with string formatting +in python where it's easier. + +By putting the formatting in `__str__`, we also avoid paying the cost for +users who silence the exceptions. +""" + +def _unpack_tuple(tup): + if len(tup) == 1: + return tup[0] + else: + return tup + + +def _display_as_base(cls): + """ + A decorator that makes an exception class look like its base. + + We use this to hide subclasses that are implementation details - the user + should catch the base type, which is what the traceback will show them. + + Classes decorated with this decorator are subject to removal without a + deprecation warning. + """ + assert issubclass(cls, Exception) + cls.__name__ = cls.__base__.__name__ + return cls + + +class UFuncTypeError(TypeError): + """ Base class for all ufunc exceptions """ + def __init__(self, ufunc): + self.ufunc = ufunc + + +@_display_as_base +class _UFuncNoLoopError(UFuncTypeError): + """ Thrown when a ufunc loop cannot be found """ + def __init__(self, ufunc, dtypes): + super().__init__(ufunc) + self.dtypes = tuple(dtypes) + + def __str__(self): + return ( + f"ufunc {self.ufunc.__name__!r} did not contain a loop with signature " + f"matching types {_unpack_tuple(self.dtypes[:self.ufunc.nin])!r} " + f"-> {_unpack_tuple(self.dtypes[self.ufunc.nin:])!r}" + ) + + +@_display_as_base +class _UFuncBinaryResolutionError(_UFuncNoLoopError): + """ Thrown when a binary resolution fails """ + def __init__(self, ufunc, dtypes): + super().__init__(ufunc, dtypes) + assert len(self.dtypes) == 2 + + def __str__(self): + return ( + "ufunc {!r} cannot use operands with types {!r} and {!r}" + ).format( + self.ufunc.__name__, *self.dtypes + ) + + +@_display_as_base +class _UFuncCastingError(UFuncTypeError): + def __init__(self, ufunc, casting, from_, to): + super().__init__(ufunc) + self.casting = casting + self.from_ = from_ + self.to = to + + +@_display_as_base +class _UFuncInputCastingError(_UFuncCastingError): + """ Thrown when a ufunc input cannot be casted """ + def __init__(self, ufunc, casting, from_, to, i): + super().__init__(ufunc, casting, from_, to) + self.in_i = i + + def __str__(self): + # only show the number if more than one input exists + i_str = f"{self.in_i} " if self.ufunc.nin != 1 else "" + return ( + f"Cannot cast ufunc {self.ufunc.__name__!r} input {i_str}from " + f"{self.from_!r} to {self.to!r} with casting rule {self.casting!r}" + ) + + +@_display_as_base +class _UFuncOutputCastingError(_UFuncCastingError): + """ Thrown when a ufunc output cannot be casted """ + def __init__(self, ufunc, casting, from_, to, i): + super().__init__(ufunc, casting, from_, to) + self.out_i = i + + def __str__(self): + # only show the number if more than one output exists + i_str = f"{self.out_i} " if self.ufunc.nout != 1 else "" + return ( + f"Cannot cast ufunc {self.ufunc.__name__!r} output {i_str}from " + f"{self.from_!r} to {self.to!r} with casting rule {self.casting!r}" + ) + + +@_display_as_base +class _ArrayMemoryError(MemoryError): + """ Thrown when an array cannot be allocated""" + def __init__(self, shape, dtype): + self.shape = shape + self.dtype = dtype + + @property + def _total_size(self): + num_bytes = self.dtype.itemsize + for dim in self.shape: + num_bytes *= dim + return num_bytes + + @staticmethod + def _size_to_string(num_bytes): + """ Convert a number of bytes into a binary size string """ + + # https://en.wikipedia.org/wiki/Binary_prefix + LOG2_STEP = 10 + STEP = 1024 + units = ['bytes', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB'] + + unit_i = max(num_bytes.bit_length() - 1, 1) // LOG2_STEP + unit_val = 1 << (unit_i * LOG2_STEP) + n_units = num_bytes / unit_val + del unit_val + + # ensure we pick a unit that is correct after rounding + if round(n_units) == STEP: + unit_i += 1 + n_units /= STEP + + # deal with sizes so large that we don't have units for them + if unit_i >= len(units): + new_unit_i = len(units) - 1 + n_units *= 1 << ((unit_i - new_unit_i) * LOG2_STEP) + unit_i = new_unit_i + + unit_name = units[unit_i] + # format with a sensible number of digits + if unit_i == 0: + # no decimal point on bytes + return f'{n_units:.0f} {unit_name}' + elif round(n_units) < 1000: + # 3 significant figures, if none are dropped to the left of the . + return f'{n_units:#.3g} {unit_name}' + else: + # just give all the digits otherwise + return f'{n_units:#.0f} {unit_name}' + + def __str__(self): + size_str = self._size_to_string(self._total_size) + return (f"Unable to allocate {size_str} for an array with shape " + f"{self.shape} and data type {self.dtype}") diff --git a/local_packages/numpy/_core/_exceptions.pyi b/local_packages/numpy/_core/_exceptions.pyi new file mode 100644 index 0000000..b340fde --- /dev/null +++ b/local_packages/numpy/_core/_exceptions.pyi @@ -0,0 +1,54 @@ +from collections.abc import Iterable +from typing import Any, Final, TypeVar, overload + +import numpy as np +from numpy import _CastingKind + +### + +_T = TypeVar("_T") +_TupleT = TypeVar("_TupleT", bound=tuple[()] | tuple[Any, Any, *tuple[Any, ...]]) +_ExceptionT = TypeVar("_ExceptionT", bound=Exception) + +### + +class UFuncTypeError(TypeError): + ufunc: Final[np.ufunc] + def __init__(self, /, ufunc: np.ufunc) -> None: ... + +class _UFuncNoLoopError(UFuncTypeError): + dtypes: tuple[np.dtype, ...] + def __init__(self, /, ufunc: np.ufunc, dtypes: Iterable[np.dtype]) -> None: ... + +class _UFuncBinaryResolutionError(_UFuncNoLoopError): + dtypes: tuple[np.dtype, np.dtype] + def __init__(self, /, ufunc: np.ufunc, dtypes: Iterable[np.dtype]) -> None: ... + +class _UFuncCastingError(UFuncTypeError): + casting: Final[_CastingKind] + from_: Final[np.dtype] + to: Final[np.dtype] + def __init__(self, /, ufunc: np.ufunc, casting: _CastingKind, from_: np.dtype, to: np.dtype) -> None: ... + +class _UFuncInputCastingError(_UFuncCastingError): + in_i: Final[int] + def __init__(self, /, ufunc: np.ufunc, casting: _CastingKind, from_: np.dtype, to: np.dtype, i: int) -> None: ... + +class _UFuncOutputCastingError(_UFuncCastingError): + out_i: Final[int] + def __init__(self, /, ufunc: np.ufunc, casting: _CastingKind, from_: np.dtype, to: np.dtype, i: int) -> None: ... + +class _ArrayMemoryError(MemoryError): + shape: tuple[int, ...] + dtype: np.dtype + def __init__(self, /, shape: tuple[int, ...], dtype: np.dtype) -> None: ... + @property + def _total_size(self) -> int: ... + @staticmethod + def _size_to_string(num_bytes: int) -> str: ... + +@overload +def _unpack_tuple(tup: tuple[_T]) -> _T: ... +@overload +def _unpack_tuple(tup: _TupleT) -> _TupleT: ... +def _display_as_base(cls: type[_ExceptionT]) -> type[_ExceptionT]: ... diff --git a/local_packages/numpy/_core/_internal.py b/local_packages/numpy/_core/_internal.py new file mode 100644 index 0000000..7c64daf --- /dev/null +++ b/local_packages/numpy/_core/_internal.py @@ -0,0 +1,968 @@ +""" +A place for internal code + +Some things are more easily handled Python. + +""" +import ast +import math +import re +import sys +import warnings + +from numpy import _NoValue +from numpy.exceptions import DTypePromotionError + +from .multiarray import StringDType, array, dtype, promote_types + +try: + import ctypes +except ImportError: + ctypes = None + +IS_PYPY = sys.implementation.name == 'pypy' + +if sys.byteorder == 'little': + _nbo = '<' +else: + _nbo = '>' + +def _makenames_list(adict, align): + allfields = [] + + for fname, obj in adict.items(): + n = len(obj) + if not isinstance(obj, tuple) or n not in (2, 3): + raise ValueError("entry not a 2- or 3- tuple") + if n > 2 and obj[2] == fname: + continue + num = int(obj[1]) + if num < 0: + raise ValueError("invalid offset.") + format = dtype(obj[0], align=align) + if n > 2: + title = obj[2] + else: + title = None + allfields.append((fname, format, num, title)) + # sort by offsets + allfields.sort(key=lambda x: x[2]) + names = [x[0] for x in allfields] + formats = [x[1] for x in allfields] + offsets = [x[2] for x in allfields] + titles = [x[3] for x in allfields] + + return names, formats, offsets, titles + +# Called in PyArray_DescrConverter function when +# a dictionary without "names" and "formats" +# fields is used as a data-type descriptor. +def _usefields(adict, align): + try: + names = adict[-1] + except KeyError: + names = None + if names is None: + names, formats, offsets, titles = _makenames_list(adict, align) + else: + formats = [] + offsets = [] + titles = [] + for name in names: + res = adict[name] + formats.append(res[0]) + offsets.append(res[1]) + if len(res) > 2: + titles.append(res[2]) + else: + titles.append(None) + + return dtype({"names": names, + "formats": formats, + "offsets": offsets, + "titles": titles}, align) + + +# construct an array_protocol descriptor list +# from the fields attribute of a descriptor +# This calls itself recursively but should eventually hit +# a descriptor that has no fields and then return +# a simple typestring + +def _array_descr(descriptor): + fields = descriptor.fields + if fields is None: + subdtype = descriptor.subdtype + if subdtype is None: + if descriptor.metadata is None: + return descriptor.str + else: + new = descriptor.metadata.copy() + if new: + return (descriptor.str, new) + else: + return descriptor.str + else: + return (_array_descr(subdtype[0]), subdtype[1]) + + names = descriptor.names + ordered_fields = [fields[x] + (x,) for x in names] + result = [] + offset = 0 + for field in ordered_fields: + if field[1] > offset: + num = field[1] - offset + result.append(('', f'|V{num}')) + offset += num + elif field[1] < offset: + raise ValueError( + "dtype.descr is not defined for types with overlapping or " + "out-of-order fields") + if len(field) > 3: + name = (field[2], field[3]) + else: + name = field[2] + if field[0].subdtype: + tup = (name, _array_descr(field[0].subdtype[0]), + field[0].subdtype[1]) + else: + tup = (name, _array_descr(field[0])) + offset += field[0].itemsize + result.append(tup) + + if descriptor.itemsize > offset: + num = descriptor.itemsize - offset + result.append(('', f'|V{num}')) + + return result + + +# format_re was originally from numarray by J. Todd Miller + +format_re = re.compile(r'(?P[<>|=]?)' + r'(?P *[(]?[ ,0-9]*[)]? *)' + r'(?P[<>|=]?)' + r'(?P[A-Za-z0-9.?]*(?:\[[a-zA-Z0-9,.]+\])?)') +sep_re = re.compile(r'\s*,\s*') +space_re = re.compile(r'\s+$') + +# astr is a string (perhaps comma separated) + +_convorder = {'=': _nbo} + +def _commastring(astr): + startindex = 0 + result = [] + islist = False + while startindex < len(astr): + mo = format_re.match(astr, pos=startindex) + try: + (order1, repeats, order2, dtype) = mo.groups() + except (TypeError, AttributeError): + raise ValueError( + f'format number {len(result) + 1} of "{astr}" is not recognized' + ) from None + startindex = mo.end() + # Separator or ending padding + if startindex < len(astr): + if space_re.match(astr, pos=startindex): + startindex = len(astr) + else: + mo = sep_re.match(astr, pos=startindex) + if not mo: + raise ValueError( + 'format number %d of "%s" is not recognized' % + (len(result) + 1, astr)) + startindex = mo.end() + islist = True + + if order2 == '': + order = order1 + elif order1 == '': + order = order2 + else: + order1 = _convorder.get(order1, order1) + order2 = _convorder.get(order2, order2) + if (order1 != order2): + raise ValueError( + f'inconsistent byte-order specification {order1} and {order2}') + order = order1 + + if order in ('|', '=', _nbo): + order = '' + dtype = order + dtype + if repeats == '': + newitem = dtype + else: + if (repeats[0] == "(" and repeats[-1] == ")" + and repeats[1:-1].strip() != "" + and "," not in repeats): + warnings.warn( + 'Passing in a parenthesized single number for repeats ' + 'is deprecated; pass either a single number or indicate ' + 'a tuple with a comma, like "(2,)".', DeprecationWarning, + stacklevel=2) + newitem = (dtype, ast.literal_eval(repeats)) + + result.append(newitem) + + return result if islist else result[0] + +class dummy_ctype: + + def __init__(self, cls): + self._cls = cls + + def __mul__(self, other): + return self + + def __call__(self, *other): + return self._cls(other) + + def __eq__(self, other): + return self._cls == other._cls + + def __ne__(self, other): + return self._cls != other._cls + +def _getintp_ctype(): + val = _getintp_ctype.cache + if val is not None: + return val + if ctypes is None: + import numpy as np + val = dummy_ctype(np.intp) + else: + char = dtype('n').char + if char == 'i': + val = ctypes.c_int + elif char == 'l': + val = ctypes.c_long + elif char == 'q': + val = ctypes.c_longlong + else: + val = ctypes.c_long + _getintp_ctype.cache = val + return val + + +_getintp_ctype.cache = None + +# Used for .ctypes attribute of ndarray + +class _missing_ctypes: + def cast(self, num, obj): + return num.value + + class c_void_p: + def __init__(self, ptr): + self.value = ptr + + +class _ctypes: + def __init__(self, array, ptr=None): + self._arr = array + + if ctypes: + self._ctypes = ctypes + self._data = self._ctypes.c_void_p(ptr) + else: + # fake a pointer-like object that holds onto the reference + self._ctypes = _missing_ctypes() + self._data = self._ctypes.c_void_p(ptr) + self._data._objects = array + + if self._arr.ndim == 0: + self._zerod = True + else: + self._zerod = False + + def data_as(self, obj): + """ + Return the data pointer cast to a particular c-types object. + For example, calling ``self._as_parameter_`` is equivalent to + ``self.data_as(ctypes.c_void_p)``. Perhaps you want to use + the data as a pointer to a ctypes array of floating-point data: + ``self.data_as(ctypes.POINTER(ctypes.c_double))``. + + The returned pointer will keep a reference to the array. + """ + # _ctypes.cast function causes a circular reference of self._data in + # self._data._objects. Attributes of self._data cannot be released + # until gc.collect is called. Make a copy of the pointer first then + # let it hold the array reference. This is a workaround to circumvent + # the CPython bug https://bugs.python.org/issue12836. + ptr = self._ctypes.cast(self._data, obj) + ptr._arr = self._arr + return ptr + + def shape_as(self, obj): + """ + Return the shape tuple as an array of some other c-types + type. For example: ``self.shape_as(ctypes.c_short)``. + """ + if self._zerod: + return None + return (obj * self._arr.ndim)(*self._arr.shape) + + def strides_as(self, obj): + """ + Return the strides tuple as an array of some other + c-types type. For example: ``self.strides_as(ctypes.c_longlong)``. + """ + if self._zerod: + return None + return (obj * self._arr.ndim)(*self._arr.strides) + + @property + def data(self): + """ + A pointer to the memory area of the array as a Python integer. + This memory area may contain data that is not aligned, or not in + correct byte-order. The memory area may not even be writeable. + The array flags and data-type of this array should be respected + when passing this attribute to arbitrary C-code to avoid trouble + that can include Python crashing. User Beware! The value of this + attribute is exactly the same as: + ``self._array_interface_['data'][0]``. + + Note that unlike ``data_as``, a reference won't be kept to the array: + code like ``ctypes.c_void_p((a + b).ctypes.data)`` will result in a + pointer to a deallocated array, and should be spelt + ``(a + b).ctypes.data_as(ctypes.c_void_p)`` + """ + return self._data.value + + @property + def shape(self): + """ + (c_intp*self.ndim): A ctypes array of length self.ndim where + the basetype is the C-integer corresponding to ``dtype('p')`` on this + platform (see `~numpy.ctypeslib.c_intp`). This base-type could be + `ctypes.c_int`, `ctypes.c_long`, or `ctypes.c_longlong` depending on + the platform. The ctypes array contains the shape of + the underlying array. + """ + return self.shape_as(_getintp_ctype()) + + @property + def strides(self): + """ + (c_intp*self.ndim): A ctypes array of length self.ndim where + the basetype is the same as for the shape attribute. This ctypes + array contains the strides information from the underlying array. + This strides information is important for showing how many bytes + must be jumped to get to the next element in the array. + """ + return self.strides_as(_getintp_ctype()) + + @property + def _as_parameter_(self): + """ + Overrides the ctypes semi-magic method + + Enables `c_func(some_array.ctypes)` + """ + return self.data_as(ctypes.c_void_p) + + +def _newnames(datatype, order): + """ + Given a datatype and an order object, return a new names tuple, with the + order indicated + """ + oldnames = datatype.names + nameslist = list(oldnames) + if isinstance(order, str): + order = [order] + seen = set() + if isinstance(order, (list, tuple)): + for name in order: + try: + nameslist.remove(name) + except ValueError: + if name in seen: + raise ValueError(f"duplicate field name: {name}") from None + else: + raise ValueError(f"unknown field name: {name}") from None + seen.add(name) + return tuple(list(order) + nameslist) + raise ValueError(f"unsupported order value: {order}") + +def _copy_fields(ary): + """Return copy of structured array with padding between fields removed. + + Parameters + ---------- + ary : ndarray + Structured array from which to remove padding bytes + + Returns + ------- + ary_copy : ndarray + Copy of ary with padding bytes removed + """ + dt = ary.dtype + copy_dtype = {'names': dt.names, + 'formats': [dt.fields[name][0] for name in dt.names]} + return array(ary, dtype=copy_dtype, copy=True) + +def _promote_fields(dt1, dt2): + """ Perform type promotion for two structured dtypes. + + Parameters + ---------- + dt1 : structured dtype + First dtype. + dt2 : structured dtype + Second dtype. + + Returns + ------- + out : dtype + The promoted dtype + + Notes + ----- + If one of the inputs is aligned, the result will be. The titles of + both descriptors must match (point to the same field). + """ + # Both must be structured and have the same names in the same order + if (dt1.names is None or dt2.names is None) or dt1.names != dt2.names: + raise DTypePromotionError( + f"field names `{dt1.names}` and `{dt2.names}` mismatch.") + + # if both are identical, we can (maybe!) just return the same dtype. + identical = dt1 is dt2 + new_fields = [] + for name in dt1.names: + field1 = dt1.fields[name] + field2 = dt2.fields[name] + new_descr = promote_types(field1[0], field2[0]) + identical = identical and new_descr is field1[0] + + # Check that the titles match (if given): + if field1[2:] != field2[2:]: + raise DTypePromotionError( + f"field titles of field '{name}' mismatch") + if len(field1) == 2: + new_fields.append((name, new_descr)) + else: + new_fields.append(((field1[2], name), new_descr)) + + res = dtype(new_fields, align=dt1.isalignedstruct or dt2.isalignedstruct) + + # Might as well preserve identity (and metadata) if the dtype is identical + # and the itemsize, offsets are also unmodified. This could probably be + # sped up, but also probably just be removed entirely. + if identical and res.itemsize == dt1.itemsize: + for name in dt1.names: + if dt1.fields[name][1] != res.fields[name][1]: + return res # the dtype changed. + return dt1 + + return res + + +def _getfield_is_safe(oldtype, newtype, offset): + """ Checks safety of getfield for object arrays. + + As in _view_is_safe, we need to check that memory containing objects is not + reinterpreted as a non-object datatype and vice versa. + + Parameters + ---------- + oldtype : data-type + Data type of the original ndarray. + newtype : data-type + Data type of the field being accessed by ndarray.getfield + offset : int + Offset of the field being accessed by ndarray.getfield + + Raises + ------ + TypeError + If the field access is invalid + + """ + if newtype.hasobject or oldtype.hasobject: + if offset == 0 and newtype == oldtype: + return + if oldtype.names is not None: + for name in oldtype.names: + if (oldtype.fields[name][1] == offset and + oldtype.fields[name][0] == newtype): + return + raise TypeError("Cannot get/set field of an object array") + return + +def _view_is_safe(oldtype, newtype): + """ Checks safety of a view involving object arrays, for example when + doing:: + + np.zeros(10, dtype=oldtype).view(newtype) + + Parameters + ---------- + oldtype : data-type + Data type of original ndarray + newtype : data-type + Data type of the view + + Raises + ------ + TypeError + If the new type is incompatible with the old type. + + """ + + # if the types are equivalent, there is no problem. + # for example: dtype((np.record, 'i4,i4')) == dtype((np.void, 'i4,i4')) + if oldtype == newtype: + return + + if newtype.hasobject or oldtype.hasobject: + raise TypeError("Cannot change data-type for array of references.") + return + + +# Given a string containing a PEP 3118 format specifier, +# construct a NumPy dtype + +_pep3118_native_map = { + '?': '?', + 'c': 'S1', + 'b': 'b', + 'B': 'B', + 'h': 'h', + 'H': 'H', + 'i': 'i', + 'I': 'I', + 'l': 'l', + 'L': 'L', + 'q': 'q', + 'Q': 'Q', + 'e': 'e', + 'f': 'f', + 'd': 'd', + 'g': 'g', + 'Zf': 'F', + 'Zd': 'D', + 'Zg': 'G', + 's': 'S', + 'w': 'U', + 'O': 'O', + 'x': 'V', # padding +} +_pep3118_native_typechars = ''.join(_pep3118_native_map.keys()) + +_pep3118_standard_map = { + '?': '?', + 'c': 'S1', + 'b': 'b', + 'B': 'B', + 'h': 'i2', + 'H': 'u2', + 'i': 'i4', + 'I': 'u4', + 'l': 'i4', + 'L': 'u4', + 'q': 'i8', + 'Q': 'u8', + 'e': 'f2', + 'f': 'f', + 'd': 'd', + 'Zf': 'F', + 'Zd': 'D', + 's': 'S', + 'w': 'U', + 'O': 'O', + 'x': 'V', # padding +} +_pep3118_standard_typechars = ''.join(_pep3118_standard_map.keys()) + +_pep3118_unsupported_map = { + 'u': 'UCS-2 strings', + '&': 'pointers', + 't': 'bitfields', + 'X': 'function pointers', +} + +class _Stream: + def __init__(self, s): + self.s = s + self.byteorder = '@' + + def advance(self, n): + res = self.s[:n] + self.s = self.s[n:] + return res + + def consume(self, c): + if self.s[:len(c)] == c: + self.advance(len(c)) + return True + return False + + def consume_until(self, c): + if callable(c): + i = 0 + while i < len(self.s) and not c(self.s[i]): + i = i + 1 + return self.advance(i) + else: + i = self.s.index(c) + res = self.advance(i) + self.advance(len(c)) + return res + + @property + def next(self): + return self.s[0] + + def __bool__(self): + return bool(self.s) + + +def _dtype_from_pep3118(spec): + stream = _Stream(spec) + dtype, align = __dtype_from_pep3118(stream, is_subdtype=False) + return dtype + +def __dtype_from_pep3118(stream, is_subdtype): + field_spec = { + 'names': [], + 'formats': [], + 'offsets': [], + 'itemsize': 0 + } + offset = 0 + common_alignment = 1 + is_padding = False + + # Parse spec + while stream: + value = None + + # End of structure, bail out to upper level + if stream.consume('}'): + break + + # Sub-arrays (1) + shape = None + if stream.consume('('): + shape = stream.consume_until(')') + shape = tuple(map(int, shape.split(','))) + + # Byte order + if stream.next in ('@', '=', '<', '>', '^', '!'): + byteorder = stream.advance(1) + if byteorder == '!': + byteorder = '>' + stream.byteorder = byteorder + + # Byte order characters also control native vs. standard type sizes + if stream.byteorder in ('@', '^'): + type_map = _pep3118_native_map + type_map_chars = _pep3118_native_typechars + else: + type_map = _pep3118_standard_map + type_map_chars = _pep3118_standard_typechars + + # Item sizes + itemsize_str = stream.consume_until(lambda c: not c.isdigit()) + if itemsize_str: + itemsize = int(itemsize_str) + else: + itemsize = 1 + + # Data types + is_padding = False + + if stream.consume('T{'): + value, align = __dtype_from_pep3118( + stream, is_subdtype=True) + elif stream.next in type_map_chars: + if stream.next == 'Z': + typechar = stream.advance(2) + else: + typechar = stream.advance(1) + + is_padding = (typechar == 'x') + dtypechar = type_map[typechar] + if dtypechar in 'USV': + dtypechar += '%d' % itemsize + itemsize = 1 + numpy_byteorder = {'@': '=', '^': '='}.get( + stream.byteorder, stream.byteorder) + value = dtype(numpy_byteorder + dtypechar) + align = value.alignment + elif stream.next in _pep3118_unsupported_map: + desc = _pep3118_unsupported_map[stream.next] + raise NotImplementedError( + f"Unrepresentable PEP 3118 data type {stream.next!r} ({desc})") + else: + raise ValueError( + f"Unknown PEP 3118 data type specifier {stream.s!r}" + ) + + # + # Native alignment may require padding + # + # Here we assume that the presence of a '@' character implicitly + # implies that the start of the array is *already* aligned. + # + extra_offset = 0 + if stream.byteorder == '@': + start_padding = (-offset) % align + intra_padding = (-value.itemsize) % align + + offset += start_padding + + if intra_padding != 0: + if itemsize > 1 or (shape is not None and _prod(shape) > 1): + # Inject internal padding to the end of the sub-item + value = _add_trailing_padding(value, intra_padding) + else: + # We can postpone the injection of internal padding, + # as the item appears at most once + extra_offset += intra_padding + + # Update common alignment + common_alignment = _lcm(align, common_alignment) + + # Convert itemsize to sub-array + if itemsize != 1: + value = dtype((value, (itemsize,))) + + # Sub-arrays (2) + if shape is not None: + value = dtype((value, shape)) + + # Field name + if stream.consume(':'): + name = stream.consume_until(':') + else: + name = None + + if not (is_padding and name is None): + if name is not None and name in field_spec['names']: + raise RuntimeError( + f"Duplicate field name '{name}' in PEP3118 format" + ) + field_spec['names'].append(name) + field_spec['formats'].append(value) + field_spec['offsets'].append(offset) + + offset += value.itemsize + offset += extra_offset + + field_spec['itemsize'] = offset + + # extra final padding for aligned types + if stream.byteorder == '@': + field_spec['itemsize'] += (-offset) % common_alignment + + # Check if this was a simple 1-item type, and unwrap it + if (field_spec['names'] == [None] + and field_spec['offsets'][0] == 0 + and field_spec['itemsize'] == field_spec['formats'][0].itemsize + and not is_subdtype): + ret = field_spec['formats'][0] + else: + _fix_names(field_spec) + ret = dtype(field_spec) + + # Finished + return ret, common_alignment + +def _fix_names(field_spec): + """ Replace names which are None with the next unused f%d name """ + names = field_spec['names'] + for i, name in enumerate(names): + if name is not None: + continue + + j = 0 + while True: + name = f'f{j}' + if name not in names: + break + j = j + 1 + names[i] = name + +def _add_trailing_padding(value, padding): + """Inject the specified number of padding bytes at the end of a dtype""" + if value.fields is None: + field_spec = { + 'names': ['f0'], + 'formats': [value], + 'offsets': [0], + 'itemsize': value.itemsize + } + else: + fields = value.fields + names = value.names + field_spec = { + 'names': names, + 'formats': [fields[name][0] for name in names], + 'offsets': [fields[name][1] for name in names], + 'itemsize': value.itemsize + } + + field_spec['itemsize'] += padding + return dtype(field_spec) + +def _prod(a): + p = 1 + for x in a: + p *= x + return p + +def _gcd(a, b): + """Calculate the greatest common divisor of a and b""" + if not (math.isfinite(a) and math.isfinite(b)): + raise ValueError('Can only find greatest common divisor of ' + f'finite arguments, found "{a}" and "{b}"') + while b: + a, b = b, a % b + return a + +def _lcm(a, b): + return a // _gcd(a, b) * b + +def array_ufunc_errmsg_formatter(dummy, ufunc, method, *inputs, **kwargs): + """ Format the error message for when __array_ufunc__ gives up. """ + args_string = ', '.join([f'{arg!r}' for arg in inputs] + + [f'{k}={v!r}' + for k, v in kwargs.items()]) + args = inputs + kwargs.get('out', ()) + types_string = ', '.join(repr(type(arg).__name__) for arg in args) + return ('operand type(s) all returned NotImplemented from ' + f'__array_ufunc__({ufunc!r}, {method!r}, {args_string}): {types_string}' + ) + + +def array_function_errmsg_formatter(public_api, types): + """ Format the error message for when __array_ufunc__ gives up. """ + func_name = f'{public_api.__module__}.{public_api.__name__}' + return (f"no implementation found for '{func_name}' on types that implement " + f'__array_function__: {list(types)}') + + +def _ufunc_doc_signature_formatter(ufunc): + """ + Builds a signature string which resembles PEP 457 + + This is used to construct the first line of the docstring + + Keep in sync with `_ufunc_inspect_signature_builder`. + """ + + # input arguments are simple + if ufunc.nin == 1: + in_args = 'x' + else: + in_args = ', '.join(f'x{i + 1}' for i in range(ufunc.nin)) + + # output arguments are both keyword or positional + if ufunc.nout == 0: + out_args = ', /, out=()' + elif ufunc.nout == 1: + out_args = ', /, out=None' + else: + out_args = '[, {positional}], / [, out={default}]'.format( + positional=', '.join( + f'out{i + 1}' for i in range(ufunc.nout)), + default=repr((None,) * ufunc.nout) + ) + + # keyword only args depend on whether this is a gufunc + kwargs = ( + ", casting='same_kind'" + ", order='K'" + ", dtype=None" + ", subok=True" + ) + + # NOTE: gufuncs may or may not support the `axis` parameter + if ufunc.signature is None: + kwargs = f", where=True{kwargs}[, signature]" + else: + kwargs += "[, signature, axes, axis]" + + # join all the parts together + return f'{ufunc.__name__}({in_args}{out_args}, *{kwargs})' + + +def _ufunc_inspect_signature_builder(ufunc): + """ + Builds a ``__signature__`` string. + + Should be kept in sync with `_ufunc_doc_signature_formatter`. + """ + + from inspect import Parameter, Signature + + params = [] + + # positional-only input parameters + if ufunc.nin == 1: + params.append(Parameter("x", Parameter.POSITIONAL_ONLY)) + else: + params.extend( + Parameter(f"x{i}", Parameter.POSITIONAL_ONLY) + for i in range(1, ufunc.nin + 1) + ) + + # for the sake of simplicity, we only consider a single output parameter + if ufunc.nout == 1: + out_default = None + else: + out_default = (None,) * ufunc.nout + params.append( + Parameter("out", Parameter.POSITIONAL_OR_KEYWORD, default=out_default), + ) + + if ufunc.signature is None: + params.append(Parameter("where", Parameter.KEYWORD_ONLY, default=True)) + else: + # NOTE: not all gufuncs support the `axis` parameters + params.append(Parameter("axes", Parameter.KEYWORD_ONLY, default=_NoValue)) + params.append(Parameter("axis", Parameter.KEYWORD_ONLY, default=_NoValue)) + params.append(Parameter("keepdims", Parameter.KEYWORD_ONLY, default=False)) + + params.extend(( + Parameter("casting", Parameter.KEYWORD_ONLY, default='same_kind'), + Parameter("order", Parameter.KEYWORD_ONLY, default='K'), + Parameter("dtype", Parameter.KEYWORD_ONLY, default=None), + Parameter("subok", Parameter.KEYWORD_ONLY, default=True), + Parameter("signature", Parameter.KEYWORD_ONLY, default=None), + )) + + return Signature(params) + + +def npy_ctypes_check(cls): + # determine if a class comes from ctypes, in order to work around + # a bug in the buffer protocol for those objects, bpo-10746 + try: + # ctypes class are new-style, so have an __mro__. This probably fails + # for ctypes classes with multiple inheritance. + if IS_PYPY: + # (..., _ctypes.basics._CData, Bufferable, object) + ctype_base = cls.__mro__[-3] + else: + # # (..., _ctypes._CData, object) + ctype_base = cls.__mro__[-2] + # right now, they're part of the _ctypes module + return '_ctypes' in ctype_base.__module__ + except Exception: + return False + +# used to handle the _NoValue default argument for na_object +# in the C implementation of the __reduce__ method for stringdtype +def _convert_to_stringdtype_kwargs(coerce, na_object=_NoValue): + if na_object is _NoValue: + return StringDType(coerce=coerce) + return StringDType(coerce=coerce, na_object=na_object) diff --git a/local_packages/numpy/_core/_internal.pyi b/local_packages/numpy/_core/_internal.pyi new file mode 100644 index 0000000..6e37022 --- /dev/null +++ b/local_packages/numpy/_core/_internal.pyi @@ -0,0 +1,61 @@ +import ctypes as ct +import re +from collections.abc import Callable, Iterable +from typing import Any, Final, Generic, Self, overload +from typing_extensions import TypeVar + +import numpy as np +import numpy.typing as npt +from numpy.ctypeslib import c_intp + +_CastT = TypeVar("_CastT", bound=ct._CanCastTo) +_T_co = TypeVar("_T_co", covariant=True) +_CT = TypeVar("_CT", bound=ct._CData) +_PT_co = TypeVar("_PT_co", bound=int | None, default=None, covariant=True) + +### + +IS_PYPY: Final[bool] = ... + +format_re: Final[re.Pattern[str]] = ... +sep_re: Final[re.Pattern[str]] = ... +space_re: Final[re.Pattern[str]] = ... + +### + +# TODO: Let the likes of `shape_as` and `strides_as` return `None` +# for 0D arrays once we've got shape-support + +class _ctypes(Generic[_PT_co]): + @overload + def __init__(self: _ctypes[None], /, array: npt.NDArray[Any], ptr: None = None) -> None: ... + @overload + def __init__(self, /, array: npt.NDArray[Any], ptr: _PT_co) -> None: ... + + # + @property + def data(self) -> _PT_co: ... + @property + def shape(self) -> ct.Array[c_intp]: ... + @property + def strides(self) -> ct.Array[c_intp]: ... + @property + def _as_parameter_(self) -> ct.c_void_p: ... + + # + def data_as(self, /, obj: type[_CastT]) -> _CastT: ... + def shape_as(self, /, obj: type[_CT]) -> ct.Array[_CT]: ... + def strides_as(self, /, obj: type[_CT]) -> ct.Array[_CT]: ... + +class dummy_ctype(Generic[_T_co]): + _cls: type[_T_co] + + def __init__(self, /, cls: type[_T_co]) -> None: ... + def __eq__(self, other: Self, /) -> bool: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + def __ne__(self, other: Self, /) -> bool: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + def __mul__(self, other: object, /) -> Self: ... + def __call__(self, /, *other: object) -> _T_co: ... + +def array_ufunc_errmsg_formatter(dummy: object, ufunc: np.ufunc, method: str, *inputs: object, **kwargs: object) -> str: ... +def array_function_errmsg_formatter(public_api: Callable[..., object], types: Iterable[str]) -> str: ... +def npy_ctypes_check(cls: type) -> bool: ... diff --git a/local_packages/numpy/_core/_methods.py b/local_packages/numpy/_core/_methods.py new file mode 100644 index 0000000..1c29831 --- /dev/null +++ b/local_packages/numpy/_core/_methods.py @@ -0,0 +1,252 @@ +""" +Array methods which are called by both the C-code for the method +and the Python code for the NumPy-namespace function + +""" +import os +import pickle +import warnings +from contextlib import nullcontext + +import numpy as np +from numpy._core import multiarray as mu, numerictypes as nt, umath as um +from numpy._core.multiarray import asanyarray +from numpy._globals import _NoValue + +# save those O(100) nanoseconds! +bool_dt = mu.dtype("bool") +umr_maximum = um.maximum.reduce +umr_minimum = um.minimum.reduce +umr_sum = um.add.reduce +umr_prod = um.multiply.reduce +umr_bitwise_count = um.bitwise_count +umr_any = um.logical_or.reduce +umr_all = um.logical_and.reduce + +# Complex types to -> (2,)float view for fast-path computation in _var() +_complex_to_float = { + nt.dtype(nt.csingle): nt.dtype(nt.single), + nt.dtype(nt.cdouble): nt.dtype(nt.double), +} +# Special case for windows: ensure double takes precedence +if nt.dtype(nt.longdouble) != nt.dtype(nt.double): + _complex_to_float.update({ + nt.dtype(nt.clongdouble): nt.dtype(nt.longdouble), + }) + +# avoid keyword arguments to speed up parsing, saves about 15%-20% for very +# small reductions +def _amax(a, axis=None, out=None, keepdims=False, + initial=_NoValue, where=True): + return umr_maximum(a, axis, None, out, keepdims, initial, where) + +def _amin(a, axis=None, out=None, keepdims=False, + initial=_NoValue, where=True): + return umr_minimum(a, axis, None, out, keepdims, initial, where) + +def _sum(a, axis=None, dtype=None, out=None, keepdims=False, + initial=_NoValue, where=True): + return umr_sum(a, axis, dtype, out, keepdims, initial, where) + +def _prod(a, axis=None, dtype=None, out=None, keepdims=False, + initial=_NoValue, where=True): + return umr_prod(a, axis, dtype, out, keepdims, initial, where) + +def _any(a, axis=None, dtype=None, out=None, keepdims=False, *, where=True): + # By default, return a boolean for any and all + if dtype is None: + dtype = bool_dt + # Parsing keyword arguments is currently fairly slow, so avoid it for now + if where is True: + return umr_any(a, axis, dtype, out, keepdims) + return umr_any(a, axis, dtype, out, keepdims, where=where) + +def _all(a, axis=None, dtype=None, out=None, keepdims=False, *, where=True): + # By default, return a boolean for any and all + if dtype is None: + dtype = bool_dt + # Parsing keyword arguments is currently fairly slow, so avoid it for now + if where is True: + return umr_all(a, axis, dtype, out, keepdims) + return umr_all(a, axis, dtype, out, keepdims, where=where) + +def _count_reduce_items(arr, axis, keepdims=False, where=True): + # fast-path for the default case + if where is True: + # no boolean mask given, calculate items according to axis + if axis is None: + axis = tuple(range(arr.ndim)) + elif not isinstance(axis, tuple): + axis = (axis,) + items = 1 + for ax in axis: + items *= arr.shape[mu.normalize_axis_index(ax, arr.ndim)] + items = nt.intp(items) + else: + # TODO: Optimize case when `where` is broadcast along a non-reduction + # axis and full sum is more excessive than needed. + + # guarded to protect circular imports + from numpy.lib._stride_tricks_impl import broadcast_to + # count True values in (potentially broadcasted) boolean mask + items = umr_sum(broadcast_to(where, arr.shape), axis, nt.intp, None, + keepdims) + return items + +def _clip(a, min=None, max=None, out=None, **kwargs): + if a.dtype.kind in "iu": + # If min/max is a Python integer, deal with out-of-bound values here. + # (This enforces NEP 50 rules as no value based promotion is done.) + if type(min) is int and min <= np.iinfo(a.dtype).min: + min = None + if type(max) is int and max >= np.iinfo(a.dtype).max: + max = None + + if min is None and max is None: + # return identity + return um.positive(a, out=out, **kwargs) + elif min is None: + return um.minimum(a, max, out=out, **kwargs) + elif max is None: + return um.maximum(a, min, out=out, **kwargs) + else: + return um.clip(a, min, max, out=out, **kwargs) + +def _mean(a, axis=None, dtype=None, out=None, keepdims=False, *, where=True): + arr = asanyarray(a) + + is_float16_result = False + + rcount = _count_reduce_items(arr, axis, keepdims=keepdims, where=where) + if rcount == 0 if where is True else umr_any(rcount == 0, axis=None): + warnings.warn("Mean of empty slice", RuntimeWarning, stacklevel=2) + + # Cast bool, unsigned int, and int to float64 by default + if dtype is None: + if issubclass(arr.dtype.type, (nt.integer, nt.bool)): + dtype = mu.dtype('f8') + elif issubclass(arr.dtype.type, nt.float16): + dtype = mu.dtype('f4') + is_float16_result = True + + ret = umr_sum(arr, axis, dtype, out, keepdims, where=where) + if isinstance(ret, mu.ndarray): + ret = um.true_divide( + ret, rcount, out=ret, casting='unsafe', subok=False) + if is_float16_result and out is None: + ret = arr.dtype.type(ret) + elif hasattr(ret, 'dtype'): + if is_float16_result: + ret = arr.dtype.type(ret / rcount) + else: + ret = ret.dtype.type(ret / rcount) + else: + ret = ret / rcount + + return ret + +def _var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, + where=True, mean=None): + arr = asanyarray(a) + + rcount = _count_reduce_items(arr, axis, keepdims=keepdims, where=where) + # Make this warning show up on top. + if ddof >= rcount if where is True else umr_any(ddof >= rcount, axis=None): + warnings.warn("Degrees of freedom <= 0 for slice", RuntimeWarning, + stacklevel=2) + + # Cast bool, unsigned int, and int to float64 by default + if dtype is None and issubclass(arr.dtype.type, (nt.integer, nt.bool)): + dtype = mu.dtype('f8') + + if mean is not None: + arrmean = mean + else: + # Compute the mean. + # Note that if dtype is not of inexact type then arraymean will + # not be either. + arrmean = umr_sum(arr, axis, dtype, keepdims=True, where=where) + # The shape of rcount has to match arrmean to not change the shape of + # out in broadcasting. Otherwise, it cannot be stored back to arrmean. + if rcount.ndim == 0: + # fast-path for default case when where is True + div = rcount + else: + # matching rcount to arrmean when where is specified as array + div = rcount.reshape(arrmean.shape) + if isinstance(arrmean, mu.ndarray): + arrmean = um.true_divide(arrmean, div, out=arrmean, + casting='unsafe', subok=False) + elif hasattr(arrmean, "dtype"): + arrmean = arrmean.dtype.type(arrmean / rcount) + else: + arrmean = arrmean / rcount + + # Compute sum of squared deviations from mean + # Note that x may not be inexact and that we need it to be an array, + # not a scalar. + x = um.subtract(arr, arrmean, out=...) + if issubclass(arr.dtype.type, (nt.floating, nt.integer)): + x = um.square(x, out=x) + # Fast-paths for built-in complex types + elif (_float_dtype := _complex_to_float.get(x.dtype)) is not None: + xv = x.view(dtype=(_float_dtype, (2,))) + um.square(xv, out=xv) + x = um.add(xv[..., 0], xv[..., 1], out=x.real) + # Most general case; includes handling object arrays containing imaginary + # numbers and complex types with non-native byteorder + else: + x = um.multiply(x, um.conjugate(x), out=x).real + + ret = umr_sum(x, axis, dtype, out, keepdims=keepdims, where=where) + + # Compute degrees of freedom and make sure it is not negative. + rcount = um.maximum(rcount - ddof, 0) + + # divide by degrees of freedom + if isinstance(ret, mu.ndarray): + ret = um.true_divide( + ret, rcount, out=ret, casting='unsafe', subok=False) + elif hasattr(ret, 'dtype'): + ret = ret.dtype.type(ret / rcount) + else: + ret = ret / rcount + + return ret + +def _std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, + where=True, mean=None): + ret = _var(a, axis=axis, dtype=dtype, out=out, ddof=ddof, + keepdims=keepdims, where=where, mean=mean) + + if isinstance(ret, mu.ndarray): + ret = um.sqrt(ret, out=ret) + elif hasattr(ret, 'dtype'): + ret = ret.dtype.type(um.sqrt(ret)) + else: + ret = um.sqrt(ret) + + return ret + +def _ptp(a, axis=None, out=None, keepdims=False): + return um.subtract( + umr_maximum(a, axis, None, out, keepdims), + umr_minimum(a, axis, None, None, keepdims), + out + ) + +def _dump(self, file, protocol=2): + if hasattr(file, 'write'): + ctx = nullcontext(file) + else: + ctx = open(os.fspath(file), "wb") + with ctx as f: + pickle.dump(self, f, protocol=protocol) + +def _dumps(self, protocol=2): + return pickle.dumps(self, protocol=protocol) + +def _bitwise_count(a, out=None, *, where=True, casting='same_kind', + order='K', dtype=None, subok=True): + return umr_bitwise_count(a, out, where=where, casting=casting, + order=order, dtype=dtype, subok=subok) diff --git a/local_packages/numpy/_core/_methods.pyi b/local_packages/numpy/_core/_methods.pyi new file mode 100644 index 0000000..3c80683 --- /dev/null +++ b/local_packages/numpy/_core/_methods.pyi @@ -0,0 +1,22 @@ +from collections.abc import Callable +from typing import Any, Concatenate, TypeAlias + +import numpy as np + +from . import _exceptions as _exceptions + +### + +_Reduce2: TypeAlias = Callable[Concatenate[object, ...], Any] + +### + +bool_dt: np.dtype[np.bool] = ... +umr_maximum: _Reduce2 = ... +umr_minimum: _Reduce2 = ... +umr_sum: _Reduce2 = ... +umr_prod: _Reduce2 = ... +umr_bitwise_count = np.bitwise_count +umr_any: _Reduce2 = ... +umr_all: _Reduce2 = ... +_complex_to_float: dict[np.dtype[np.complexfloating], np.dtype[np.floating]] = ... diff --git a/local_packages/numpy/_core/_multiarray_tests.cp312-win_amd64.lib b/local_packages/numpy/_core/_multiarray_tests.cp312-win_amd64.lib new file mode 100644 index 0000000..2fa0ae5 Binary files /dev/null and b/local_packages/numpy/_core/_multiarray_tests.cp312-win_amd64.lib differ diff --git a/local_packages/numpy/_core/_multiarray_tests.cp312-win_amd64.pyd b/local_packages/numpy/_core/_multiarray_tests.cp312-win_amd64.pyd new file mode 100644 index 0000000..fa1123b Binary files /dev/null and b/local_packages/numpy/_core/_multiarray_tests.cp312-win_amd64.pyd differ diff --git a/local_packages/numpy/_core/_multiarray_umath.cp312-win_amd64.lib b/local_packages/numpy/_core/_multiarray_umath.cp312-win_amd64.lib new file mode 100644 index 0000000..be354b3 Binary files /dev/null and b/local_packages/numpy/_core/_multiarray_umath.cp312-win_amd64.lib differ diff --git a/local_packages/numpy/_core/_multiarray_umath.cp312-win_amd64.pyd b/local_packages/numpy/_core/_multiarray_umath.cp312-win_amd64.pyd new file mode 100644 index 0000000..ad25eee Binary files /dev/null and b/local_packages/numpy/_core/_multiarray_umath.cp312-win_amd64.pyd differ diff --git a/local_packages/numpy/_core/_operand_flag_tests.cp312-win_amd64.lib b/local_packages/numpy/_core/_operand_flag_tests.cp312-win_amd64.lib new file mode 100644 index 0000000..efd4e9c Binary files /dev/null and b/local_packages/numpy/_core/_operand_flag_tests.cp312-win_amd64.lib differ diff --git a/local_packages/numpy/_core/_operand_flag_tests.cp312-win_amd64.pyd b/local_packages/numpy/_core/_operand_flag_tests.cp312-win_amd64.pyd new file mode 100644 index 0000000..cea44d6 Binary files /dev/null and b/local_packages/numpy/_core/_operand_flag_tests.cp312-win_amd64.pyd differ diff --git a/local_packages/numpy/_core/_rational_tests.cp312-win_amd64.lib b/local_packages/numpy/_core/_rational_tests.cp312-win_amd64.lib new file mode 100644 index 0000000..58963da Binary files /dev/null and b/local_packages/numpy/_core/_rational_tests.cp312-win_amd64.lib differ diff --git a/local_packages/numpy/_core/_rational_tests.cp312-win_amd64.pyd b/local_packages/numpy/_core/_rational_tests.cp312-win_amd64.pyd new file mode 100644 index 0000000..4e6110b Binary files /dev/null and b/local_packages/numpy/_core/_rational_tests.cp312-win_amd64.pyd differ diff --git a/local_packages/numpy/_core/_simd.cp312-win_amd64.lib b/local_packages/numpy/_core/_simd.cp312-win_amd64.lib new file mode 100644 index 0000000..ad0da5d Binary files /dev/null and b/local_packages/numpy/_core/_simd.cp312-win_amd64.lib differ diff --git a/local_packages/numpy/_core/_simd.cp312-win_amd64.pyd b/local_packages/numpy/_core/_simd.cp312-win_amd64.pyd new file mode 100644 index 0000000..73436e1 Binary files /dev/null and b/local_packages/numpy/_core/_simd.cp312-win_amd64.pyd differ diff --git a/local_packages/numpy/_core/_simd.pyi b/local_packages/numpy/_core/_simd.pyi new file mode 100644 index 0000000..0ba7d78 --- /dev/null +++ b/local_packages/numpy/_core/_simd.pyi @@ -0,0 +1,35 @@ +from types import ModuleType +from typing import TypedDict, type_check_only + +# NOTE: these 5 are only defined on systems with an intel processor +SSE42: ModuleType | None = ... +FMA3: ModuleType | None = ... +AVX2: ModuleType | None = ... +AVX512F: ModuleType | None = ... +AVX512_SKX: ModuleType | None = ... + +# NOTE: these 2 are only defined on systems with an arm processor +ASIMD: ModuleType | None = ... +NEON: ModuleType | None = ... + +# NOTE: This is only defined on systems with an riscv64 processor. +RVV: ModuleType | None = ... + +baseline: ModuleType | None = ... + +@type_check_only +class SimdTargets(TypedDict): + SSE42: ModuleType | None + AVX2: ModuleType | None + FMA3: ModuleType | None + AVX512F: ModuleType | None + AVX512_SKX: ModuleType | None + ASIMD: ModuleType | None + NEON: ModuleType | None + RVV: ModuleType | None + baseline: ModuleType | None + +targets: SimdTargets = ... + +def clear_floatstatus() -> None: ... +def get_floatstatus() -> int: ... diff --git a/local_packages/numpy/_core/_string_helpers.py b/local_packages/numpy/_core/_string_helpers.py new file mode 100644 index 0000000..87085d4 --- /dev/null +++ b/local_packages/numpy/_core/_string_helpers.py @@ -0,0 +1,100 @@ +""" +String-handling utilities to avoid locale-dependence. + +Used primarily to generate type name aliases. +""" +# "import string" is costly to import! +# Construct the translation tables directly +# "A" = chr(65), "a" = chr(97) +_all_chars = tuple(map(chr, range(256))) +_ascii_upper = _all_chars[65:65 + 26] +_ascii_lower = _all_chars[97:97 + 26] +LOWER_TABLE = _all_chars[:65] + _ascii_lower + _all_chars[65 + 26:] +UPPER_TABLE = _all_chars[:97] + _ascii_upper + _all_chars[97 + 26:] + + +def english_lower(s): + """ Apply English case rules to convert ASCII strings to all lower case. + + This is an internal utility function to replace calls to str.lower() such + that we can avoid changing behavior with changing locales. In particular, + Turkish has distinct dotted and dotless variants of the Latin letter "I" in + both lowercase and uppercase. Thus, "I".lower() != "i" in a "tr" locale. + + Parameters + ---------- + s : str + + Returns + ------- + lowered : str + + Examples + -------- + >>> from numpy._core.numerictypes import english_lower + >>> english_lower('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_') + 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz0123456789_' + >>> english_lower('') + '' + """ + lowered = s.translate(LOWER_TABLE) + return lowered + + +def english_upper(s): + """ Apply English case rules to convert ASCII strings to all upper case. + + This is an internal utility function to replace calls to str.upper() such + that we can avoid changing behavior with changing locales. In particular, + Turkish has distinct dotted and dotless variants of the Latin letter "I" in + both lowercase and uppercase. Thus, "i".upper() != "I" in a "tr" locale. + + Parameters + ---------- + s : str + + Returns + ------- + uppered : str + + Examples + -------- + >>> from numpy._core.numerictypes import english_upper + >>> english_upper('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_') + 'ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_' + >>> english_upper('') + '' + """ + uppered = s.translate(UPPER_TABLE) + return uppered + + +def english_capitalize(s): + """ Apply English case rules to convert the first character of an ASCII + string to upper case. + + This is an internal utility function to replace calls to str.capitalize() + such that we can avoid changing behavior with changing locales. + + Parameters + ---------- + s : str + + Returns + ------- + capitalized : str + + Examples + -------- + >>> from numpy._core.numerictypes import english_capitalize + >>> english_capitalize('int8') + 'Int8' + >>> english_capitalize('Int8') + 'Int8' + >>> english_capitalize('') + '' + """ + if s: + return english_upper(s[0]) + s[1:] + else: + return s diff --git a/local_packages/numpy/_core/_string_helpers.pyi b/local_packages/numpy/_core/_string_helpers.pyi new file mode 100644 index 0000000..6a85832 --- /dev/null +++ b/local_packages/numpy/_core/_string_helpers.pyi @@ -0,0 +1,12 @@ +from typing import Final + +_all_chars: Final[tuple[str, ...]] = ... +_ascii_upper: Final[tuple[str, ...]] = ... +_ascii_lower: Final[tuple[str, ...]] = ... + +LOWER_TABLE: Final[tuple[str, ...]] = ... +UPPER_TABLE: Final[tuple[str, ...]] = ... + +def english_lower(s: str) -> str: ... +def english_upper(s: str) -> str: ... +def english_capitalize(s: str) -> str: ... diff --git a/local_packages/numpy/_core/_struct_ufunc_tests.cp312-win_amd64.lib b/local_packages/numpy/_core/_struct_ufunc_tests.cp312-win_amd64.lib new file mode 100644 index 0000000..cf95734 Binary files /dev/null and b/local_packages/numpy/_core/_struct_ufunc_tests.cp312-win_amd64.lib differ diff --git a/local_packages/numpy/_core/_struct_ufunc_tests.cp312-win_amd64.pyd b/local_packages/numpy/_core/_struct_ufunc_tests.cp312-win_amd64.pyd new file mode 100644 index 0000000..dbff3a9 Binary files /dev/null and b/local_packages/numpy/_core/_struct_ufunc_tests.cp312-win_amd64.pyd differ diff --git a/local_packages/numpy/_core/_type_aliases.py b/local_packages/numpy/_core/_type_aliases.py new file mode 100644 index 0000000..51c8e6c --- /dev/null +++ b/local_packages/numpy/_core/_type_aliases.py @@ -0,0 +1,131 @@ +""" +Due to compatibility, numpy has a very large number of different naming +conventions for the scalar types (those subclassing from `numpy.generic`). +This file produces a convoluted set of dictionaries mapping names to types, +and sometimes other mappings too. + +.. data:: allTypes + A dictionary of names to types that will be exposed as attributes through + ``np._core.numerictypes.*`` + +.. data:: sctypeDict + Similar to `allTypes`, but maps a broader set of aliases to their types. + +.. data:: sctypes + A dictionary keyed by a "type group" string, providing a list of types + under that group. + +""" + +import numpy._core.multiarray as ma +from numpy._core.multiarray import dtype, typeinfo + +###################################### +# Building `sctypeDict` and `allTypes` +###################################### + +sctypeDict = {} +allTypes = {} +c_names_dict = {} + +_abstract_type_names = { + "generic", "integer", "inexact", "floating", "number", + "flexible", "character", "complexfloating", "unsignedinteger", + "signedinteger" +} + +for _abstract_type_name in _abstract_type_names: + allTypes[_abstract_type_name] = getattr(ma, _abstract_type_name) + del _abstract_type_name + +for k, v in typeinfo.items(): + if k.startswith("NPY_") and v not in c_names_dict: + c_names_dict[k[4:]] = v + else: + concrete_type = v.type + allTypes[k] = concrete_type + sctypeDict[k] = concrete_type + del concrete_type + del k, v + +_aliases = { + "double": "float64", + "cdouble": "complex128", + "single": "float32", + "csingle": "complex64", + "half": "float16", + "bool_": "bool", + # Default integer: + "int_": "intp", + "uint": "uintp", +} + +for k, v in _aliases.items(): + sctypeDict[k] = allTypes[v] + allTypes[k] = allTypes[v] + del k, v + +# extra aliases are added only to `sctypeDict` +# to support dtype name access, such as`np.dtype("float")` +_extra_aliases = { + "float": "float64", + "complex": "complex128", + "object": "object_", + "bytes": "bytes_", + "a": "bytes_", + "int": "int_", + "str": "str_", + "unicode": "str_", +} + +for k, v in _extra_aliases.items(): + sctypeDict[k] = allTypes[v] + del k, v + +# include extended precision sized aliases +for is_complex, full_name in [(False, "longdouble"), (True, "clongdouble")]: + longdouble_type = allTypes[full_name] + + bits = dtype(longdouble_type).itemsize * 8 + base_name = "complex" if is_complex else "float" + extended_prec_name = f"{base_name}{bits}" + if extended_prec_name not in allTypes: + sctypeDict[extended_prec_name] = longdouble_type + allTypes[extended_prec_name] = longdouble_type + + del is_complex, full_name, longdouble_type, bits, base_name, extended_prec_name + + +#################### +# Building `sctypes` +#################### + +sctypes = {"int": set(), "uint": set(), "float": set(), + "complex": set(), "others": set()} + +for type_info in typeinfo.values(): + if type_info.kind in ["M", "m"]: # exclude timedelta and datetime + continue + + concrete_type = type_info.type + + # find proper group for each concrete type + for type_group, abstract_type in [ + ("int", ma.signedinteger), ("uint", ma.unsignedinteger), + ("float", ma.floating), ("complex", ma.complexfloating), + ("others", ma.generic) + ]: + if issubclass(concrete_type, abstract_type): + sctypes[type_group].add(concrete_type) + del type_group, abstract_type + break + + del type_info, concrete_type + +# sort sctype groups by bitsize +for sctype_key in sctypes.keys(): + sctype_list = list(sctypes[sctype_key]) + sctype_list.sort(key=lambda x: dtype(x).itemsize) + sctypes[sctype_key] = sctype_list + + del sctype_key, sctype_list diff --git a/local_packages/numpy/_core/_type_aliases.pyi b/local_packages/numpy/_core/_type_aliases.pyi new file mode 100644 index 0000000..e28541c --- /dev/null +++ b/local_packages/numpy/_core/_type_aliases.pyi @@ -0,0 +1,86 @@ +from collections.abc import Collection +from typing import Final, Literal as L, TypeAlias, TypedDict, type_check_only + +import numpy as np + +sctypeDict: Final[dict[str, type[np.generic]]] +allTypes: Final[dict[str, type[np.generic]]] + +@type_check_only +class _CNamesDict(TypedDict): + BOOL: np.dtype[np.bool] + HALF: np.dtype[np.half] + FLOAT: np.dtype[np.single] + DOUBLE: np.dtype[np.double] + LONGDOUBLE: np.dtype[np.longdouble] + CFLOAT: np.dtype[np.csingle] + CDOUBLE: np.dtype[np.cdouble] + CLONGDOUBLE: np.dtype[np.clongdouble] + STRING: np.dtype[np.bytes_] + UNICODE: np.dtype[np.str_] + VOID: np.dtype[np.void] + OBJECT: np.dtype[np.object_] + DATETIME: np.dtype[np.datetime64] + TIMEDELTA: np.dtype[np.timedelta64] + BYTE: np.dtype[np.byte] + UBYTE: np.dtype[np.ubyte] + SHORT: np.dtype[np.short] + USHORT: np.dtype[np.ushort] + INT: np.dtype[np.intc] + UINT: np.dtype[np.uintc] + LONG: np.dtype[np.long] + ULONG: np.dtype[np.ulong] + LONGLONG: np.dtype[np.longlong] + ULONGLONG: np.dtype[np.ulonglong] + +c_names_dict: Final[_CNamesDict] + +_AbstractTypeName: TypeAlias = L[ + "generic", + "flexible", + "character", + "number", + "integer", + "inexact", + "unsignedinteger", + "signedinteger", + "floating", + "complexfloating", +] +_abstract_type_names: Final[set[_AbstractTypeName]] + +@type_check_only +class _AliasesType(TypedDict): + double: L["float64"] + cdouble: L["complex128"] + single: L["float32"] + csingle: L["complex64"] + half: L["float16"] + bool_: L["bool"] + int_: L["intp"] + uint: L["intp"] + +_aliases: Final[_AliasesType] + +@type_check_only +class _ExtraAliasesType(TypedDict): + float: L["float64"] + complex: L["complex128"] + object: L["object_"] + bytes: L["bytes_"] + a: L["bytes_"] + int: L["int_"] + str: L["str_"] + unicode: L["str_"] + +_extra_aliases: Final[_ExtraAliasesType] + +@type_check_only +class _SCTypes(TypedDict): + int: Collection[type[np.signedinteger]] + uint: Collection[type[np.unsignedinteger]] + float: Collection[type[np.floating]] + complex: Collection[type[np.complexfloating]] + others: Collection[type[np.flexible | np.bool | np.object_]] + +sctypes: Final[_SCTypes] diff --git a/local_packages/numpy/_core/_ufunc_config.py b/local_packages/numpy/_core/_ufunc_config.py new file mode 100644 index 0000000..6a74766 --- /dev/null +++ b/local_packages/numpy/_core/_ufunc_config.py @@ -0,0 +1,515 @@ +""" +Functions for changing global ufunc configuration + +This provides helpers which wrap `_get_extobj_dict` and `_make_extobj`, and +`_extobj_contextvar` from umath. +""" +import functools + +from numpy._utils import set_module + +from .umath import _extobj_contextvar, _get_extobj_dict, _make_extobj + +__all__ = [ + "seterr", "geterr", "setbufsize", "getbufsize", "seterrcall", "geterrcall", + "errstate" +] + + +@set_module('numpy') +def seterr(all=None, divide=None, over=None, under=None, invalid=None): + """ + Set how floating-point errors are handled. + + Note that operations on integer scalar types (such as `int16`) are + handled like floating point, and are affected by these settings. + + Parameters + ---------- + all : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional + Set treatment for all types of floating-point errors at once: + + - ignore: Take no action when the exception occurs. + - warn: Print a :exc:`RuntimeWarning` (via the Python `warnings` + module). + - raise: Raise a :exc:`FloatingPointError`. + - call: Call a function specified using the `seterrcall` function. + - print: Print a warning directly to ``stdout``. + - log: Record error in a Log object specified by `seterrcall`. + + The default is not to change the current behavior. + divide : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional + Treatment for division by zero. + over : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional + Treatment for floating-point overflow. + under : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional + Treatment for floating-point underflow. + invalid : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional + Treatment for invalid floating-point operation. + + Returns + ------- + old_settings : dict + Dictionary containing the old settings. + + See also + -------- + seterrcall : Set a callback function for the 'call' mode. + geterr, geterrcall, errstate + + + Notes + ----- + The floating-point exceptions are defined in the IEEE 754 standard [1]_: + + - Division by zero: infinite result obtained from finite numbers. + - Overflow: result too large to be expressed. + - Underflow: result so close to zero that some precision + was lost. + - Invalid operation: result is not an expressible number, typically + indicates that a NaN was produced. + + **Concurrency note:** see :ref:`fp_error_handling` + + .. [1] https://en.wikipedia.org/wiki/IEEE_754 + + Examples + -------- + >>> import numpy as np + >>> orig_settings = np.seterr(all='ignore') # seterr to known value + >>> np.int16(32000) * np.int16(3) + np.int16(30464) + >>> np.seterr(over='raise') + {'divide': 'ignore', 'over': 'ignore', 'under': 'ignore', 'invalid': 'ignore'} + >>> old_settings = np.seterr(all='warn', over='raise') + >>> np.int16(32000) * np.int16(3) + Traceback (most recent call last): + File "", line 1, in + FloatingPointError: overflow encountered in scalar multiply + + >>> old_settings = np.seterr(all='print') + >>> np.geterr() + {'divide': 'print', 'over': 'print', 'under': 'print', 'invalid': 'print'} + >>> np.int16(32000) * np.int16(3) + np.int16(30464) + >>> np.seterr(**orig_settings) # restore original + {'divide': 'print', 'over': 'print', 'under': 'print', 'invalid': 'print'} + + """ + + old = _get_extobj_dict() + # The errstate doesn't include call and bufsize, so pop them: + old.pop("call", None) + old.pop("bufsize", None) + + extobj = _make_extobj( + all=all, divide=divide, over=over, under=under, invalid=invalid) + _extobj_contextvar.set(extobj) + return old + + +@set_module('numpy') +def geterr(): + """ + Get the current way of handling floating-point errors. + + Returns + ------- + res : dict + A dictionary with keys "divide", "over", "under", and "invalid", + whose values are from the strings "ignore", "print", "log", "warn", + "raise", and "call". The keys represent possible floating-point + exceptions, and the values define how these exceptions are handled. + + See Also + -------- + geterrcall, seterr, seterrcall + + Notes + ----- + For complete documentation of the types of floating-point exceptions and + treatment options, see `seterr`. + + **Concurrency note:** see :doc:`/reference/routines.err` + + Examples + -------- + >>> import numpy as np + >>> np.geterr() + {'divide': 'warn', 'over': 'warn', 'under': 'ignore', 'invalid': 'warn'} + >>> np.arange(3.) / np.arange(3.) # doctest: +SKIP + array([nan, 1., 1.]) + RuntimeWarning: invalid value encountered in divide + + >>> oldsettings = np.seterr(all='warn', invalid='raise') + >>> np.geterr() + {'divide': 'warn', 'over': 'warn', 'under': 'warn', 'invalid': 'raise'} + >>> np.arange(3.) / np.arange(3.) + Traceback (most recent call last): + ... + FloatingPointError: invalid value encountered in divide + >>> oldsettings = np.seterr(**oldsettings) # restore original + + """ + res = _get_extobj_dict() + # The "geterr" doesn't include call and bufsize,: + res.pop("call", None) + res.pop("bufsize", None) + return res + + +@set_module('numpy') +def setbufsize(size): + """ + Set the size of the buffer used in ufuncs. + + .. versionchanged:: 2.0 + The scope of setting the buffer is tied to the `numpy.errstate` + context. Exiting a ``with errstate():`` will also restore the bufsize. + + Parameters + ---------- + size : int + Size of buffer. + + Returns + ------- + bufsize : int + Previous size of ufunc buffer in bytes. + + Notes + ----- + **Concurrency note:** see :doc:`/reference/routines.err` + + Examples + -------- + When exiting a `numpy.errstate` context manager the bufsize is restored: + + >>> import numpy as np + >>> with np.errstate(): + ... np.setbufsize(4096) + ... print(np.getbufsize()) + ... + 8192 + 4096 + >>> np.getbufsize() + 8192 + + """ + if size < 0: + raise ValueError("buffer size must be non-negative") + old = _get_extobj_dict()["bufsize"] + extobj = _make_extobj(bufsize=size) + _extobj_contextvar.set(extobj) + return old + + +@set_module('numpy') +def getbufsize(): + """ + Return the size of the buffer used in ufuncs. + + Returns + ------- + getbufsize : int + Size of ufunc buffer in bytes. + + Notes + ----- + + **Concurrency note:** see :doc:`/reference/routines.err` + + + Examples + -------- + >>> import numpy as np + >>> np.getbufsize() + 8192 + + """ + return _get_extobj_dict()["bufsize"] + + +@set_module('numpy') +def seterrcall(func): + """ + Set the floating-point error callback function or log object. + + There are two ways to capture floating-point error messages. The first + is to set the error-handler to 'call', using `seterr`. Then, set + the function to call using this function. + + The second is to set the error-handler to 'log', using `seterr`. + Floating-point errors then trigger a call to the 'write' method of + the provided object. + + Parameters + ---------- + func : callable f(err, flag) or object with write method + Function to call upon floating-point errors ('call'-mode) or + object whose 'write' method is used to log such message ('log'-mode). + + The call function takes two arguments. The first is a string describing + the type of error (such as "divide by zero", "overflow", "underflow", + or "invalid value"), and the second is the status flag. The flag is a + byte, whose four least-significant bits indicate the type of error, one + of "divide", "over", "under", "invalid":: + + [0 0 0 0 divide over under invalid] + + In other words, ``flags = divide + 2*over + 4*under + 8*invalid``. + + If an object is provided, its write method should take one argument, + a string. + + Returns + ------- + h : callable, log instance or None + The old error handler. + + See Also + -------- + seterr, geterr, geterrcall + + Notes + ----- + + **Concurrency note:** see :doc:`/reference/routines.err` + + Examples + -------- + Callback upon error: + + >>> def err_handler(type, flag): + ... print("Floating point error (%s), with flag %s" % (type, flag)) + ... + + >>> import numpy as np + + >>> orig_handler = np.seterrcall(err_handler) + >>> orig_err = np.seterr(all='call') + + >>> np.array([1, 2, 3]) / 0.0 + Floating point error (divide by zero), with flag 1 + array([inf, inf, inf]) + + >>> np.seterrcall(orig_handler) + + >>> np.seterr(**orig_err) + {'divide': 'call', 'over': 'call', 'under': 'call', 'invalid': 'call'} + + Log error message: + + >>> class Log: + ... def write(self, msg): + ... print("LOG: %s" % msg) + ... + + >>> log = Log() + >>> saved_handler = np.seterrcall(log) + >>> save_err = np.seterr(all='log') + + >>> np.array([1, 2, 3]) / 0.0 + LOG: Warning: divide by zero encountered in divide + array([inf, inf, inf]) + + >>> np.seterrcall(orig_handler) + + >>> np.seterr(**orig_err) + {'divide': 'log', 'over': 'log', 'under': 'log', 'invalid': 'log'} + + """ + old = _get_extobj_dict()["call"] + extobj = _make_extobj(call=func) + _extobj_contextvar.set(extobj) + return old + + +@set_module('numpy') +def geterrcall(): + """ + Return the current callback function used on floating-point errors. + + When the error handling for a floating-point error (one of "divide", + "over", "under", or "invalid") is set to 'call' or 'log', the function + that is called or the log instance that is written to is returned by + `geterrcall`. This function or log instance has been set with + `seterrcall`. + + Returns + ------- + errobj : callable, log instance or None + The current error handler. If no handler was set through `seterrcall`, + ``None`` is returned. + + See Also + -------- + seterrcall, seterr, geterr + + Notes + ----- + For complete documentation of the types of floating-point exceptions and + treatment options, see `seterr`. + + **Concurrency note:** see :ref:`fp_error_handling` + + Examples + -------- + >>> import numpy as np + >>> np.geterrcall() # we did not yet set a handler, returns None + + >>> orig_settings = np.seterr(all='call') + >>> def err_handler(type, flag): + ... print("Floating point error (%s), with flag %s" % (type, flag)) + >>> old_handler = np.seterrcall(err_handler) + >>> np.array([1, 2, 3]) / 0.0 + Floating point error (divide by zero), with flag 1 + array([inf, inf, inf]) + + >>> cur_handler = np.geterrcall() + >>> cur_handler is err_handler + True + >>> old_settings = np.seterr(**orig_settings) # restore original + >>> old_handler = np.seterrcall(None) # restore original + + """ + return _get_extobj_dict()["call"] + + +class _unspecified: + pass + + +_Unspecified = _unspecified() + + +@set_module('numpy') +class errstate: + """ + errstate(**kwargs) + + Context manager for floating-point error handling. + + Using an instance of `errstate` as a context manager allows statements in + that context to execute with a known error handling behavior. Upon entering + the context the error handling is set with `seterr` and `seterrcall`, and + upon exiting it is reset to what it was before. + + .. versionchanged:: 1.17.0 + `errstate` is also usable as a function decorator, saving + a level of indentation if an entire function is wrapped. + + .. versionchanged:: 2.0 + `errstate` is now fully thread and asyncio safe, but may not be + entered more than once. + It is not safe to decorate async functions using ``errstate``. + + Parameters + ---------- + kwargs : {divide, over, under, invalid} + Keyword arguments. The valid keywords are the possible floating-point + exceptions. Each keyword should have a string value that defines the + treatment for the particular error. Possible values are + {'ignore', 'warn', 'raise', 'call', 'print', 'log'}. + + See Also + -------- + seterr, geterr, seterrcall, geterrcall + + Notes + ----- + For complete documentation of the types of floating-point exceptions and + treatment options, see `seterr`. + + **Concurrency note:** see :ref:`fp_error_handling` + + Examples + -------- + >>> import numpy as np + >>> olderr = np.seterr(all='ignore') # Set error handling to known state. + + >>> np.arange(3) / 0. + array([nan, inf, inf]) + >>> with np.errstate(divide='ignore'): + ... np.arange(3) / 0. + array([nan, inf, inf]) + + >>> np.sqrt(-1) + np.float64(nan) + >>> with np.errstate(invalid='raise'): + ... np.sqrt(-1) + Traceback (most recent call last): + File "", line 2, in + FloatingPointError: invalid value encountered in sqrt + + Outside the context the error handling behavior has not changed: + + >>> np.geterr() + {'divide': 'ignore', 'over': 'ignore', 'under': 'ignore', 'invalid': 'ignore'} + >>> olderr = np.seterr(**olderr) # restore original state + + """ + __slots__ = ( + "_all", + "_call", + "_divide", + "_invalid", + "_over", + "_token", + "_under", + ) + + def __init__(self, *, call=_Unspecified, + all=None, divide=None, over=None, under=None, invalid=None): + self._token = None + self._call = call + self._all = all + self._divide = divide + self._over = over + self._under = under + self._invalid = invalid + + def __enter__(self): + # Note that __call__ duplicates much of this logic + if self._token is not None: + raise TypeError("Cannot enter `np.errstate` twice.") + if self._call is _Unspecified: + extobj = _make_extobj( + all=self._all, divide=self._divide, over=self._over, + under=self._under, invalid=self._invalid) + else: + extobj = _make_extobj( + call=self._call, + all=self._all, divide=self._divide, over=self._over, + under=self._under, invalid=self._invalid) + + self._token = _extobj_contextvar.set(extobj) + + def __exit__(self, *exc_info): + _extobj_contextvar.reset(self._token) + + def __call__(self, func): + # We need to customize `__call__` compared to `ContextDecorator` + # because we must store the token per-thread so cannot store it on + # the instance (we could create a new instance for this). + # This duplicates the code from `__enter__`. + @functools.wraps(func) + def inner(*args, **kwargs): + if self._call is _Unspecified: + extobj = _make_extobj( + all=self._all, divide=self._divide, over=self._over, + under=self._under, invalid=self._invalid) + else: + extobj = _make_extobj( + call=self._call, + all=self._all, divide=self._divide, over=self._over, + under=self._under, invalid=self._invalid) + + _token = _extobj_contextvar.set(extobj) + try: + # Call the original, decorated, function: + return func(*args, **kwargs) + finally: + _extobj_contextvar.reset(_token) + + return inner diff --git a/local_packages/numpy/_core/_ufunc_config.pyi b/local_packages/numpy/_core/_ufunc_config.pyi new file mode 100644 index 0000000..f1f0d88 --- /dev/null +++ b/local_packages/numpy/_core/_ufunc_config.pyi @@ -0,0 +1,69 @@ +from _typeshed import SupportsWrite +from collections.abc import Callable +from types import TracebackType +from typing import Any, Final, Literal, TypeAlias, TypedDict, TypeVar, type_check_only + +__all__ = [ + "seterr", + "geterr", + "setbufsize", + "getbufsize", + "seterrcall", + "geterrcall", + "errstate", +] + +_ErrKind: TypeAlias = Literal["ignore", "warn", "raise", "call", "print", "log"] +_ErrCall: TypeAlias = Callable[[str, int], Any] | SupportsWrite[str] + +_CallableT = TypeVar("_CallableT", bound=Callable[..., object]) + +@type_check_only +class _ErrDict(TypedDict): + divide: _ErrKind + over: _ErrKind + under: _ErrKind + invalid: _ErrKind + +### + +class _unspecified: ... + +_Unspecified: Final[_unspecified] + +class errstate: + __slots__ = "_all", "_call", "_divide", "_invalid", "_over", "_token", "_under" + + def __init__( + self, + /, + *, + call: _ErrCall | _unspecified = ..., # = _Unspecified + all: _ErrKind | None = None, + divide: _ErrKind | None = None, + over: _ErrKind | None = None, + under: _ErrKind | None = None, + invalid: _ErrKind | None = None, + ) -> None: ... + def __call__(self, /, func: _CallableT) -> _CallableT: ... + def __enter__(self) -> None: ... + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_value: BaseException | None, + traceback: TracebackType | None, + /, + ) -> None: ... + +def seterr( + all: _ErrKind | None = None, + divide: _ErrKind | None = None, + over: _ErrKind | None = None, + under: _ErrKind | None = None, + invalid: _ErrKind | None = None, +) -> _ErrDict: ... +def geterr() -> _ErrDict: ... +def setbufsize(size: int) -> int: ... +def getbufsize() -> int: ... +def seterrcall(func: _ErrCall | None) -> _ErrCall | None: ... +def geterrcall() -> _ErrCall | None: ... diff --git a/local_packages/numpy/_core/_umath_tests.cp312-win_amd64.lib b/local_packages/numpy/_core/_umath_tests.cp312-win_amd64.lib new file mode 100644 index 0000000..09e1495 Binary files /dev/null and b/local_packages/numpy/_core/_umath_tests.cp312-win_amd64.lib differ diff --git a/local_packages/numpy/_core/_umath_tests.cp312-win_amd64.pyd b/local_packages/numpy/_core/_umath_tests.cp312-win_amd64.pyd new file mode 100644 index 0000000..fec0c11 Binary files /dev/null and b/local_packages/numpy/_core/_umath_tests.cp312-win_amd64.pyd differ diff --git a/local_packages/numpy/_core/_umath_tests.pyi b/local_packages/numpy/_core/_umath_tests.pyi new file mode 100644 index 0000000..696cec3 --- /dev/null +++ b/local_packages/numpy/_core/_umath_tests.pyi @@ -0,0 +1,47 @@ +# undocumented internal testing module for ufunc features, defined in +# numpy/_core/src/umath/_umath_tests.c.src + +from typing import Final, Literal as L, TypedDict, type_check_only + +import numpy as np +from numpy._typing import _GUFunc_Nin2_Nout1, _UFunc_Nin1_Nout1, _UFunc_Nin2_Nout1 + +@type_check_only +class _TestDispatchResult(TypedDict): + func: str # e.g. 'func_AVX2' + var: str # e.g. 'var_AVX2' + func_xb: str # e.g. 'func_AVX2' + var_xb: str # e.g. 'var_AVX2' + all: list[str] # e.g. ['func_AVX2', 'func_SSE41', 'func'] + +### + +# undocumented +def test_signature( + nin: int, nout: int, signature: str, / +) -> tuple[ + L[0, 1], # core_enabled (0 for scalar ufunc; 1 for generalized ufunc) + tuple[int, ...] | None, # core_num_dims + tuple[int, ...] | None, # core_dim_ixs + tuple[int, ...] | None, # core_dim_flags + tuple[int, ...] | None, # core_dim_sizes +]: ... + +# undocumented +def test_dispatch() -> _TestDispatchResult: ... + +# undocumented ufuncs and gufuncs +always_error: Final[_UFunc_Nin2_Nout1[L["always_error"], L[1], None]] = ... +always_error_unary: Final[_UFunc_Nin1_Nout1[L["always_error_unary"], L[1], None]] = ... +always_error_gufunc: Final[_GUFunc_Nin2_Nout1[L["always_error_gufunc"], L[1], None, L["(i),()->()"]]] = ... +inner1d: Final[_GUFunc_Nin2_Nout1[L["inner1d"], L[2], None, L["(i),(i)->()"]]] = ... +innerwt: Final[np.ufunc] = ... # we have no specialized type for 3->1 gufuncs +matrix_multiply: Final[_GUFunc_Nin2_Nout1[L["matrix_multiply"], L[3], None, L["(m,n),(n,p)->(m,p)"]]] = ... +matmul: Final[_GUFunc_Nin2_Nout1[L["matmul"], L[3], None, L["(m?,n),(n,p?)->(m?,p?)"]]] = ... +euclidean_pdist: Final[_GUFunc_Nin2_Nout1[L["euclidean_pdist"], L[2], None, L["(n,d)->(p)"]]] = ... +cumsum: Final[np.ufunc] = ... # we have no specialized type for 1->1 gufuncs +inner1d_no_doc: Final[_GUFunc_Nin2_Nout1[L["inner1d_no_doc"], L[2], None, L["(i),(i)->()"]]] = ... +cross1d: Final[_GUFunc_Nin2_Nout1[L["cross1d"], L[2], None, L["(3),(3)->(3)"]]] = ... +_pickleable_module_global_ufunc: Final[np.ufunc] = ... # 0->0 ufunc; segfaults if called +indexed_negative: Final[_UFunc_Nin1_Nout1[L["indexed_negative"], L[0], L[0]]] = ... # ntypes=0; can't be called +conv1d_full: Final[_GUFunc_Nin2_Nout1[L["conv1d_full"], L[1], None, L["(m),(n)->(p)"]]] = ... diff --git a/local_packages/numpy/_core/arrayprint.py b/local_packages/numpy/_core/arrayprint.py new file mode 100644 index 0000000..96c1728 --- /dev/null +++ b/local_packages/numpy/_core/arrayprint.py @@ -0,0 +1,1779 @@ +"""Array printing function + +$Id: arrayprint.py,v 1.9 2005/09/13 13:58:44 teoliphant Exp $ + +""" +__all__ = ["array2string", "array_str", "array_repr", + "set_printoptions", "get_printoptions", "printoptions", + "format_float_positional", "format_float_scientific"] +__docformat__ = 'restructuredtext' + +# +# Written by Konrad Hinsen +# last revision: 1996-3-13 +# modified by Jim Hugunin 1997-3-3 for repr's and str's (and other details) +# and by Perry Greenfield 2000-4-1 for numarray +# and by Travis Oliphant 2005-8-22 for numpy + + +# Note: Both scalartypes.c.src and arrayprint.py implement strs for numpy +# scalars but for different purposes. scalartypes.c.src has str/reprs for when +# the scalar is printed on its own, while arrayprint.py has strs for when +# scalars are printed inside an ndarray. Only the latter strs are currently +# user-customizable. + +import functools +import numbers +import sys + +try: + from _thread import get_ident +except ImportError: + from _dummy_thread import get_ident + +import contextlib +import operator +import warnings + +import numpy as np + +from . import numerictypes as _nt +from .fromnumeric import any +from .multiarray import ( + array, + datetime_as_string, + datetime_data, + dragon4_positional, + dragon4_scientific, + ndarray, +) +from .numeric import asarray, concatenate, errstate +from .numerictypes import complex128, flexible, float64, int_ +from .overrides import array_function_dispatch, set_module +from .printoptions import format_options +from .umath import absolute, isfinite, isinf, isnat + + +def _make_options_dict(precision=None, threshold=None, edgeitems=None, + linewidth=None, suppress=None, nanstr=None, infstr=None, + sign=None, formatter=None, floatmode=None, legacy=None, + override_repr=None): + """ + Make a dictionary out of the non-None arguments, plus conversion of + *legacy* and sanity checks. + """ + + options = {k: v for k, v in list(locals().items()) if v is not None} + + if suppress is not None: + options['suppress'] = bool(suppress) + + modes = ['fixed', 'unique', 'maxprec', 'maxprec_equal'] + if floatmode not in modes + [None]: + raise ValueError("floatmode option must be one of " + + ", ".join(f'"{m}"' for m in modes)) + + if sign not in [None, '-', '+', ' ']: + raise ValueError("sign option must be one of ' ', '+', or '-'") + + if legacy is False: + options['legacy'] = sys.maxsize + elif legacy == False: # noqa: E712 + warnings.warn( + f"Passing `legacy={legacy!r}` is deprecated.", + FutureWarning, stacklevel=3 + ) + options['legacy'] = sys.maxsize + elif legacy == '1.13': + options['legacy'] = 113 + elif legacy == '1.21': + options['legacy'] = 121 + elif legacy == '1.25': + options['legacy'] = 125 + elif legacy == '2.1': + options['legacy'] = 201 + elif legacy == '2.2': + options['legacy'] = 202 + elif legacy is None: + pass # OK, do nothing. + else: + warnings.warn( + "legacy printing option can currently only be '1.13', '1.21', " + "'1.25', '2.1', '2.2' or `False`", stacklevel=3) + + if threshold is not None: + # forbid the bad threshold arg suggested by stack overflow, gh-12351 + if not isinstance(threshold, numbers.Number): + raise TypeError("threshold must be numeric") + if np.isnan(threshold): + raise ValueError("threshold must be non-NAN, try " + "sys.maxsize for untruncated representation") + + if precision is not None: + # forbid the bad precision arg as suggested by issue #18254 + try: + options['precision'] = operator.index(precision) + except TypeError as e: + raise TypeError('precision must be an integer') from e + + return options + + +@set_module('numpy') +def set_printoptions(precision=None, threshold=None, edgeitems=None, + linewidth=None, suppress=None, nanstr=None, + infstr=None, formatter=None, sign=None, floatmode=None, + *, legacy=None, override_repr=None): + """ + Set printing options. + + These options determine the way floating point numbers, arrays and + other NumPy objects are displayed. + + Parameters + ---------- + precision : int or None, optional + Number of digits of precision for floating point output (default 8). + May be None if `floatmode` is not `fixed`, to print as many digits as + necessary to uniquely specify the value. + threshold : int, optional + Total number of array elements which trigger summarization + rather than full repr (default 1000). + To always use the full repr without summarization, pass `sys.maxsize`. + edgeitems : int, optional + Number of array items in summary at beginning and end of + each dimension (default 3). + linewidth : int, optional + The number of characters per line for the purpose of inserting + line breaks (default 75). + suppress : bool, optional + If True, always print floating point numbers using fixed point + notation, in which case numbers equal to zero in the current precision + will print as zero. If False, then scientific notation is used when + absolute value of the smallest number is < 1e-4 or the ratio of the + maximum absolute value to the minimum is > 1e3. The default is False. + nanstr : str, optional + String representation of floating point not-a-number (default nan). + infstr : str, optional + String representation of floating point infinity (default inf). + sign : string, either '-', '+', or ' ', optional + Controls printing of the sign of floating-point types. If '+', always + print the sign of positive values. If ' ', always prints a space + (whitespace character) in the sign position of positive values. If + '-', omit the sign character of positive values. (default '-') + + .. versionchanged:: 2.0 + The sign parameter can now be an integer type, previously + types were floating-point types. + + formatter : dict of callables, optional + If not None, the keys should indicate the type(s) that the respective + formatting function applies to. Callables should return a string. + Types that are not specified (by their corresponding keys) are handled + by the default formatters. Individual types for which a formatter + can be set are: + + - 'bool' + - 'int' + - 'timedelta' : a `numpy.timedelta64` + - 'datetime' : a `numpy.datetime64` + - 'float' + - 'longfloat' : 128-bit floats + - 'complexfloat' + - 'longcomplexfloat' : composed of two 128-bit floats + - 'numpystr' : types `numpy.bytes_` and `numpy.str_` + - 'object' : `np.object_` arrays + + Other keys that can be used to set a group of types at once are: + + - 'all' : sets all types + - 'int_kind' : sets 'int' + - 'float_kind' : sets 'float' and 'longfloat' + - 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat' + - 'str_kind' : sets 'numpystr' + floatmode : str, optional + Controls the interpretation of the `precision` option for + floating-point types. Can take the following values + (default maxprec_equal): + + * 'fixed': Always print exactly `precision` fractional digits, + even if this would print more or fewer digits than + necessary to specify the value uniquely. + * 'unique': Print the minimum number of fractional digits necessary + to represent each value uniquely. Different elements may + have a different number of digits. The value of the + `precision` option is ignored. + * 'maxprec': Print at most `precision` fractional digits, but if + an element can be uniquely represented with fewer digits + only print it with that many. + * 'maxprec_equal': Print at most `precision` fractional digits, + but if every element in the array can be uniquely + represented with an equal number of fewer digits, use that + many digits for all elements. + legacy : string or `False`, optional + If set to the string ``'1.13'`` enables 1.13 legacy printing mode. This + approximates numpy 1.13 print output by including a space in the sign + position of floats and different behavior for 0d arrays. This also + enables 1.21 legacy printing mode (described below). + + If set to the string ``'1.21'`` enables 1.21 legacy printing mode. This + approximates numpy 1.21 print output of complex structured dtypes + by not inserting spaces after commas that separate fields and after + colons. + + If set to ``'1.25'`` approximates printing of 1.25 which mainly means + that numeric scalars are printed without their type information, e.g. + as ``3.0`` rather than ``np.float64(3.0)``. + + If set to ``'2.1'``, shape information is not given when arrays are + summarized (i.e., multiple elements replaced with ``...``). + + If set to ``'2.2'``, the transition to use scientific notation for + printing ``np.float16`` and ``np.float32`` types may happen later or + not at all for larger values. + + If set to `False`, disables legacy mode. + + Unrecognized strings will be ignored with a warning for forward + compatibility. + + .. versionchanged:: 1.22.0 + .. versionchanged:: 2.2 + + override_repr: callable, optional + If set a passed function will be used for generating arrays' repr. + Other options will be ignored. + + See Also + -------- + get_printoptions, printoptions, array2string + + + Notes + ----- + + * ``formatter`` is always reset with a call to `set_printoptions`. + * Use `printoptions` as a context manager to set the values temporarily. + * These print options apply only to NumPy ndarrays, not to scalars. + + **Concurrency note:** see :ref:`text_formatting_options` + + Examples + -------- + Floating point precision can be set: + + >>> import numpy as np + >>> np.set_printoptions(precision=4) + >>> np.array([1.123456789]) + [1.1235] + + Long arrays can be summarised: + + >>> np.set_printoptions(threshold=5) + >>> np.arange(10) + array([0, 1, 2, ..., 7, 8, 9], shape=(10,)) + + Small results can be suppressed: + + >>> eps = np.finfo(float).eps + >>> x = np.arange(4.) + >>> x**2 - (x + eps)**2 + array([-4.9304e-32, -4.4409e-16, 0.0000e+00, 0.0000e+00]) + >>> np.set_printoptions(suppress=True) + >>> x**2 - (x + eps)**2 + array([-0., -0., 0., 0.]) + + A custom formatter can be used to display array elements as desired: + + >>> np.set_printoptions(formatter={'all':lambda x: 'int: '+str(-x)}) + >>> x = np.arange(3) + >>> x + array([int: 0, int: -1, int: -2]) + >>> np.set_printoptions() # formatter gets reset + >>> x + array([0, 1, 2]) + + To put back the default options, you can use: + + >>> np.set_printoptions(edgeitems=3, infstr='inf', + ... linewidth=75, nanstr='nan', precision=8, + ... suppress=False, threshold=1000, formatter=None) + + Also to temporarily override options, use `printoptions` + as a context manager: + + >>> with np.printoptions(precision=2, suppress=True, threshold=5): + ... np.linspace(0, 10, 10) + array([ 0. , 1.11, 2.22, ..., 7.78, 8.89, 10. ], shape=(10,)) + + """ + _set_printoptions(precision, threshold, edgeitems, linewidth, suppress, + nanstr, infstr, formatter, sign, floatmode, + legacy=legacy, override_repr=override_repr) + + +def _set_printoptions(precision=None, threshold=None, edgeitems=None, + linewidth=None, suppress=None, nanstr=None, + infstr=None, formatter=None, sign=None, floatmode=None, + *, legacy=None, override_repr=None): + new_opt = _make_options_dict(precision, threshold, edgeitems, linewidth, + suppress, nanstr, infstr, sign, formatter, + floatmode, legacy) + # formatter and override_repr are always reset + new_opt['formatter'] = formatter + new_opt['override_repr'] = override_repr + + updated_opt = format_options.get() | new_opt + updated_opt.update(new_opt) + + if updated_opt['legacy'] == 113: + updated_opt['sign'] = '-' + + return format_options.set(updated_opt) + + +@set_module('numpy') +def get_printoptions(): + """ + Return the current print options. + + Returns + ------- + print_opts : dict + Dictionary of current print options with keys + + - precision : int + - threshold : int + - edgeitems : int + - linewidth : int + - suppress : bool + - nanstr : str + - infstr : str + - sign : str + - formatter : dict of callables + - floatmode : str + - legacy : str or False + + For a full description of these options, see `set_printoptions`. + + Notes + ----- + These print options apply only to NumPy ndarrays, not to scalars. + + **Concurrency note:** see :ref:`text_formatting_options` + + See Also + -------- + set_printoptions, printoptions + + Examples + -------- + >>> import numpy as np + + >>> np.get_printoptions() + {'edgeitems': 3, 'threshold': 1000, ..., 'override_repr': None} + + >>> np.get_printoptions()['linewidth'] + 75 + >>> np.set_printoptions(linewidth=100) + >>> np.get_printoptions()['linewidth'] + 100 + + """ + opts = format_options.get().copy() + opts['legacy'] = { + 113: '1.13', 121: '1.21', 125: '1.25', 201: '2.1', + 202: '2.2', sys.maxsize: False, + }[opts['legacy']] + return opts + + +def _get_legacy_print_mode(): + """Return the legacy print mode as an int.""" + return format_options.get()['legacy'] + + +@set_module('numpy') +@contextlib.contextmanager +def printoptions(*args, **kwargs): + """Context manager for setting print options. + + Set print options for the scope of the `with` block, and restore the old + options at the end. See `set_printoptions` for the full description of + available options. + + Examples + -------- + >>> import numpy as np + + >>> from numpy.testing import assert_equal + >>> with np.printoptions(precision=2): + ... np.array([2.0]) / 3 + array([0.67]) + + The `as`-clause of the `with`-statement gives the current print options: + + >>> with np.printoptions(precision=2) as opts: + ... assert_equal(opts, np.get_printoptions()) + + See Also + -------- + set_printoptions, get_printoptions + + Notes + ----- + These print options apply only to NumPy ndarrays, not to scalars. + + **Concurrency note:** see :ref:`text_formatting_options` + + """ + token = _set_printoptions(*args, **kwargs) + + try: + yield get_printoptions() + finally: + format_options.reset(token) + + +def _leading_trailing(a, edgeitems, index=()): + """ + Keep only the N-D corners (leading and trailing edges) of an array. + + Should be passed a base-class ndarray, since it makes no guarantees about + preserving subclasses. + """ + axis = len(index) + if axis == a.ndim: + return a[index] + + if a.shape[axis] > 2 * edgeitems: + return concatenate(( + _leading_trailing(a, edgeitems, index + np.index_exp[:edgeitems]), + _leading_trailing(a, edgeitems, index + np.index_exp[-edgeitems:]) + ), axis=axis) + else: + return _leading_trailing(a, edgeitems, index + np.index_exp[:]) + + +def _object_format(o): + """ Object arrays containing lists should be printed unambiguously """ + if type(o) is list: + fmt = 'list({!r})' + else: + fmt = '{!r}' + return fmt.format(o) + +def repr_format(x): + if isinstance(x, (np.str_, np.bytes_)): + return repr(x.item()) + return repr(x) + +def str_format(x): + if isinstance(x, (np.str_, np.bytes_)): + return str(x.item()) + return str(x) + +def _get_formatdict(data, *, precision, floatmode, suppress, sign, legacy, + formatter, **kwargs): + # note: extra arguments in kwargs are ignored + + # wrapped in lambdas to avoid taking a code path + # with the wrong type of data + formatdict = { + 'bool': lambda: BoolFormat(data), + 'int': lambda: IntegerFormat(data, sign), + 'float': lambda: FloatingFormat( + data, precision, floatmode, suppress, sign, legacy=legacy), + 'longfloat': lambda: FloatingFormat( + data, precision, floatmode, suppress, sign, legacy=legacy), + 'complexfloat': lambda: ComplexFloatingFormat( + data, precision, floatmode, suppress, sign, legacy=legacy), + 'longcomplexfloat': lambda: ComplexFloatingFormat( + data, precision, floatmode, suppress, sign, legacy=legacy), + 'datetime': lambda: DatetimeFormat(data, legacy=legacy), + 'timedelta': lambda: TimedeltaFormat(data), + 'object': lambda: _object_format, + 'void': lambda: str_format, + 'numpystr': lambda: repr_format} + + # we need to wrap values in `formatter` in a lambda, so that the interface + # is the same as the above values. + def indirect(x): + return lambda: x + + if formatter is not None: + fkeys = [k for k in formatter.keys() if formatter[k] is not None] + if 'all' in fkeys: + for key in formatdict.keys(): + formatdict[key] = indirect(formatter['all']) + if 'int_kind' in fkeys: + for key in ['int']: + formatdict[key] = indirect(formatter['int_kind']) + if 'float_kind' in fkeys: + for key in ['float', 'longfloat']: + formatdict[key] = indirect(formatter['float_kind']) + if 'complex_kind' in fkeys: + for key in ['complexfloat', 'longcomplexfloat']: + formatdict[key] = indirect(formatter['complex_kind']) + if 'str_kind' in fkeys: + formatdict['numpystr'] = indirect(formatter['str_kind']) + for key in formatdict.keys(): + if key in fkeys: + formatdict[key] = indirect(formatter[key]) + + return formatdict + +def _get_format_function(data, **options): + """ + find the right formatting function for the dtype_ + """ + dtype_ = data.dtype + dtypeobj = dtype_.type + formatdict = _get_formatdict(data, **options) + if dtypeobj is None: + return formatdict["numpystr"]() + elif issubclass(dtypeobj, _nt.bool): + return formatdict['bool']() + elif issubclass(dtypeobj, _nt.integer): + if issubclass(dtypeobj, _nt.timedelta64): + return formatdict['timedelta']() + else: + return formatdict['int']() + elif issubclass(dtypeobj, _nt.floating): + if issubclass(dtypeobj, _nt.longdouble): + return formatdict['longfloat']() + else: + return formatdict['float']() + elif issubclass(dtypeobj, _nt.complexfloating): + if issubclass(dtypeobj, _nt.clongdouble): + return formatdict['longcomplexfloat']() + else: + return formatdict['complexfloat']() + elif issubclass(dtypeobj, (_nt.str_, _nt.bytes_)): + return formatdict['numpystr']() + elif issubclass(dtypeobj, _nt.datetime64): + return formatdict['datetime']() + elif issubclass(dtypeobj, _nt.object_): + return formatdict['object']() + elif issubclass(dtypeobj, _nt.void): + if dtype_.names is not None: + return StructuredVoidFormat.from_data(data, **options) + else: + return formatdict['void']() + else: + return formatdict['numpystr']() + + +def _recursive_guard(fillvalue='...'): + """ + Like the python 3.2 reprlib.recursive_repr, but forwards *args and **kwargs + + Decorates a function such that if it calls itself with the same first + argument, it returns `fillvalue` instead of recursing. + + Largely copied from reprlib.recursive_repr + """ + + def decorating_function(f): + repr_running = set() + + @functools.wraps(f) + def wrapper(self, *args, **kwargs): + key = id(self), get_ident() + if key in repr_running: + return fillvalue + repr_running.add(key) + try: + return f(self, *args, **kwargs) + finally: + repr_running.discard(key) + + return wrapper + + return decorating_function + + +# gracefully handle recursive calls, when object arrays contain themselves +@_recursive_guard() +def _array2string(a, options, separator=' ', prefix=""): + # The formatter __init__s in _get_format_function cannot deal with + # subclasses yet, and we also need to avoid recursion issues in + # _formatArray with subclasses which return 0d arrays in place of scalars + data = asarray(a) + if a.shape == (): + a = data + + if a.size > options['threshold']: + summary_insert = "..." + data = _leading_trailing(data, options['edgeitems']) + else: + summary_insert = "" + + # find the right formatting function for the array + format_function = _get_format_function(data, **options) + + # skip over "[" + next_line_prefix = " " + # skip over array( + next_line_prefix += " " * len(prefix) + + lst = _formatArray(a, format_function, options['linewidth'], + next_line_prefix, separator, options['edgeitems'], + summary_insert, options['legacy']) + return lst + + +def _array2string_dispatcher( + a, max_line_width=None, precision=None, + suppress_small=None, separator=None, prefix=None, + *, formatter=None, threshold=None, + edgeitems=None, sign=None, floatmode=None, suffix=None, + legacy=None): + return (a,) + + +@array_function_dispatch(_array2string_dispatcher, module='numpy') +def array2string(a, max_line_width=None, precision=None, + suppress_small=None, separator=' ', prefix="", + *, formatter=None, threshold=None, + edgeitems=None, sign=None, floatmode=None, suffix="", + legacy=None): + """ + Return a string representation of an array. + + Parameters + ---------- + a : ndarray + Input array. + max_line_width : int, optional + Inserts newlines if text is longer than `max_line_width`. + Defaults to ``numpy.get_printoptions()['linewidth']``. + precision : int or None, optional + Floating point precision. + Defaults to ``numpy.get_printoptions()['precision']``. + suppress_small : bool, optional + Represent numbers "very close" to zero as zero; default is False. + Very close is defined by precision: if the precision is 8, e.g., + numbers smaller (in absolute value) than 5e-9 are represented as + zero. + Defaults to ``numpy.get_printoptions()['suppress']``. + separator : str, optional + Inserted between elements. + prefix : str, optional + suffix : str, optional + The length of the prefix and suffix strings are used to respectively + align and wrap the output. An array is typically printed as:: + + prefix + array2string(a) + suffix + + The output is left-padded by the length of the prefix string, and + wrapping is forced at the column ``max_line_width - len(suffix)``. + It should be noted that the content of prefix and suffix strings are + not included in the output. + formatter : dict of callables, optional + If not None, the keys should indicate the type(s) that the respective + formatting function applies to. Callables should return a string. + Types that are not specified (by their corresponding keys) are handled + by the default formatters. Individual types for which a formatter + can be set are: + + - 'bool' + - 'int' + - 'timedelta' : a `numpy.timedelta64` + - 'datetime' : a `numpy.datetime64` + - 'float' + - 'longfloat' : 128-bit floats + - 'complexfloat' + - 'longcomplexfloat' : composed of two 128-bit floats + - 'void' : type `numpy.void` + - 'numpystr' : types `numpy.bytes_` and `numpy.str_` + + Other keys that can be used to set a group of types at once are: + + - 'all' : sets all types + - 'int_kind' : sets 'int' + - 'float_kind' : sets 'float' and 'longfloat' + - 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat' + - 'str_kind' : sets 'numpystr' + threshold : int, optional + Total number of array elements which trigger summarization + rather than full repr. + Defaults to ``numpy.get_printoptions()['threshold']``. + edgeitems : int, optional + Number of array items in summary at beginning and end of + each dimension. + Defaults to ``numpy.get_printoptions()['edgeitems']``. + sign : string, either '-', '+', or ' ', optional + Controls printing of the sign of floating-point types. If '+', always + print the sign of positive values. If ' ', always prints a space + (whitespace character) in the sign position of positive values. If + '-', omit the sign character of positive values. + Defaults to ``numpy.get_printoptions()['sign']``. + + .. versionchanged:: 2.0 + The sign parameter can now be an integer type, previously + types were floating-point types. + + floatmode : str, optional + Controls the interpretation of the `precision` option for + floating-point types. + Defaults to ``numpy.get_printoptions()['floatmode']``. + Can take the following values: + + - 'fixed': Always print exactly `precision` fractional digits, + even if this would print more or fewer digits than + necessary to specify the value uniquely. + - 'unique': Print the minimum number of fractional digits necessary + to represent each value uniquely. Different elements may + have a different number of digits. The value of the + `precision` option is ignored. + - 'maxprec': Print at most `precision` fractional digits, but if + an element can be uniquely represented with fewer digits + only print it with that many. + - 'maxprec_equal': Print at most `precision` fractional digits, + but if every element in the array can be uniquely + represented with an equal number of fewer digits, use that + many digits for all elements. + legacy : string or `False`, optional + If set to the string ``'1.13'`` enables 1.13 legacy printing mode. This + approximates numpy 1.13 print output by including a space in the sign + position of floats and different behavior for 0d arrays. If set to + `False`, disables legacy mode. Unrecognized strings will be ignored + with a warning for forward compatibility. + + Returns + ------- + array_str : str + String representation of the array. + + Raises + ------ + TypeError + if a callable in `formatter` does not return a string. + + See Also + -------- + array_str, array_repr, set_printoptions, get_printoptions + + Notes + ----- + If a formatter is specified for a certain type, the `precision` keyword is + ignored for that type. + + This is a very flexible function; `array_repr` and `array_str` are using + `array2string` internally so keywords with the same name should work + identically in all three functions. + + Examples + -------- + >>> import numpy as np + >>> x = np.array([1e-16,1,2,3]) + >>> np.array2string(x, precision=2, separator=',', + ... suppress_small=True) + '[0.,1.,2.,3.]' + + >>> x = np.arange(3.) + >>> np.array2string(x, formatter={'float_kind':lambda x: "%.2f" % x}) + '[0.00 1.00 2.00]' + + >>> x = np.arange(3) + >>> np.array2string(x, formatter={'int':lambda x: hex(x)}) + '[0x0 0x1 0x2]' + + """ + + overrides = _make_options_dict(precision, threshold, edgeitems, + max_line_width, suppress_small, None, None, + sign, formatter, floatmode, legacy) + options = format_options.get().copy() + options.update(overrides) + + if options['legacy'] <= 113: + if a.shape == () and a.dtype.names is None: + return repr(a.item()) + + if options['legacy'] > 113: + options['linewidth'] -= len(suffix) + + # treat as a null array if any of shape elements == 0 + if a.size == 0: + return "[]" + + return _array2string(a, options, separator, prefix) + + +def _extendLine(s, line, word, line_width, next_line_prefix, legacy): + needs_wrap = len(line) + len(word) > line_width + if legacy > 113: + # don't wrap lines if it won't help + if len(line) <= len(next_line_prefix): + needs_wrap = False + + if needs_wrap: + s += line.rstrip() + "\n" + line = next_line_prefix + line += word + return s, line + + +def _extendLine_pretty(s, line, word, line_width, next_line_prefix, legacy): + """ + Extends line with nicely formatted (possibly multi-line) string ``word``. + """ + words = word.splitlines() + if len(words) == 1 or legacy <= 113: + return _extendLine(s, line, word, line_width, next_line_prefix, legacy) + + max_word_length = max(len(word) for word in words) + if (len(line) + max_word_length > line_width and + len(line) > len(next_line_prefix)): + s += line.rstrip() + '\n' + line = next_line_prefix + words[0] + indent = next_line_prefix + else: + indent = len(line) * ' ' + line += words[0] + + for word in words[1::]: + s += line.rstrip() + '\n' + line = indent + word + + suffix_length = max_word_length - len(words[-1]) + line += suffix_length * ' ' + + return s, line + +def _formatArray(a, format_function, line_width, next_line_prefix, + separator, edge_items, summary_insert, legacy): + """formatArray is designed for two modes of operation: + + 1. Full output + + 2. Summarized output + + """ + def recurser(index, hanging_indent, curr_width): + """ + By using this local function, we don't need to recurse with all the + arguments. Since this function is not created recursively, the cost is + not significant + """ + axis = len(index) + axes_left = a.ndim - axis + + if axes_left == 0: + return format_function(a[index]) + + # when recursing, add a space to align with the [ added, and reduce the + # length of the line by 1 + next_hanging_indent = hanging_indent + ' ' + if legacy <= 113: + next_width = curr_width + else: + next_width = curr_width - len(']') + + a_len = a.shape[axis] + show_summary = summary_insert and 2 * edge_items < a_len + if show_summary: + leading_items = edge_items + trailing_items = edge_items + else: + leading_items = 0 + trailing_items = a_len + + # stringify the array with the hanging indent on the first line too + s = '' + + # last axis (rows) - wrap elements if they would not fit on one line + if axes_left == 1: + # the length up until the beginning of the separator / bracket + if legacy <= 113: + elem_width = curr_width - len(separator.rstrip()) + else: + elem_width = curr_width - max( + len(separator.rstrip()), len(']') + ) + + line = hanging_indent + for i in range(leading_items): + word = recurser(index + (i,), next_hanging_indent, next_width) + s, line = _extendLine_pretty( + s, line, word, elem_width, hanging_indent, legacy) + line += separator + + if show_summary: + s, line = _extendLine( + s, line, summary_insert, elem_width, hanging_indent, legacy + ) + if legacy <= 113: + line += ", " + else: + line += separator + + for i in range(trailing_items, 1, -1): + word = recurser(index + (-i,), next_hanging_indent, next_width) + s, line = _extendLine_pretty( + s, line, word, elem_width, hanging_indent, legacy) + line += separator + + if legacy <= 113: + # width of the separator is not considered on 1.13 + elem_width = curr_width + word = recurser(index + (-1,), next_hanging_indent, next_width) + s, line = _extendLine_pretty( + s, line, word, elem_width, hanging_indent, legacy) + + s += line + + # other axes - insert newlines between rows + else: + s = '' + line_sep = separator.rstrip() + '\n' * (axes_left - 1) + + for i in range(leading_items): + nested = recurser( + index + (i,), next_hanging_indent, next_width + ) + s += hanging_indent + nested + line_sep + + if show_summary: + if legacy <= 113: + # trailing space, fixed nbr of newlines, + # and fixed separator + s += hanging_indent + summary_insert + ", \n" + else: + s += hanging_indent + summary_insert + line_sep + + for i in range(trailing_items, 1, -1): + nested = recurser(index + (-i,), next_hanging_indent, + next_width) + s += hanging_indent + nested + line_sep + + nested = recurser(index + (-1,), next_hanging_indent, next_width) + s += hanging_indent + nested + + # remove the hanging indent, and wrap in [] + s = '[' + s[len(hanging_indent):] + ']' + return s + + try: + # invoke the recursive part with an initial index and prefix + return recurser(index=(), + hanging_indent=next_line_prefix, + curr_width=line_width) + finally: + # recursive closures have a cyclic reference to themselves, which + # requires gc to collect (gh-10620). To avoid this problem, for + # performance and PyPy friendliness, we break the cycle: + recurser = None + +def _none_or_positive_arg(x, name): + if x is None: + return -1 + if x < 0: + raise ValueError(f"{name} must be >= 0") + return x + +class FloatingFormat: + """ Formatter for subtypes of np.floating """ + def __init__(self, data, precision, floatmode, suppress_small, sign=False, + *, legacy=None): + # for backcompatibility, accept bools + if isinstance(sign, bool): + sign = '+' if sign else '-' + + self._legacy = legacy + if self._legacy <= 113: + # when not 0d, legacy does not support '-' + if data.shape != () and sign == '-': + sign = ' ' + + self.floatmode = floatmode + if floatmode == 'unique': + self.precision = None + else: + self.precision = precision + + self.precision = _none_or_positive_arg(self.precision, 'precision') + + self.suppress_small = suppress_small + self.sign = sign + self.exp_format = False + self.large_exponent = False + self.fillFormat(data) + + def fillFormat(self, data): + # only the finite values are used to compute the number of digits + finite_vals = data[isfinite(data)] + + # choose exponential mode based on the non-zero finite values: + abs_non_zero = absolute(finite_vals[finite_vals != 0]) + if len(abs_non_zero) != 0: + max_val = np.max(abs_non_zero) + min_val = np.min(abs_non_zero) + if self._legacy <= 202: + exp_cutoff_max = 1.e8 + else: + # consider data type while deciding the max cutoff for exp format + exp_cutoff_max = 10.**min(8, np.finfo(data.dtype).precision) + with errstate(over='ignore'): # division can overflow + if max_val >= exp_cutoff_max or (not self.suppress_small and + (min_val < 0.0001 or max_val / min_val > 1000.)): + self.exp_format = True + + # do a first pass of printing all the numbers, to determine sizes + if len(finite_vals) == 0: + self.pad_left = 0 + self.pad_right = 0 + self.trim = '.' + self.exp_size = -1 + self.unique = True + self.min_digits = None + elif self.exp_format: + trim, unique = '.', True + if self.floatmode == 'fixed' or self._legacy <= 113: + trim, unique = 'k', False + strs = (dragon4_scientific(x, precision=self.precision, + unique=unique, trim=trim, sign=self.sign == '+') + for x in finite_vals) + frac_strs, _, exp_strs = zip(*(s.partition('e') for s in strs)) + int_part, frac_part = zip(*(s.split('.') for s in frac_strs)) + self.exp_size = max(len(s) for s in exp_strs) - 1 + + self.trim = 'k' + self.precision = max(len(s) for s in frac_part) + self.min_digits = self.precision + self.unique = unique + + # for back-compat with np 1.13, use 2 spaces & sign and full prec + if self._legacy <= 113: + self.pad_left = 3 + else: + # this should be only 1 or 2. Can be calculated from sign. + self.pad_left = max(len(s) for s in int_part) + # pad_right is only needed for nan length calculation + self.pad_right = self.exp_size + 2 + self.precision + else: + trim, unique = '.', True + if self.floatmode == 'fixed': + trim, unique = 'k', False + strs = (dragon4_positional(x, precision=self.precision, + fractional=True, + unique=unique, trim=trim, + sign=self.sign == '+') + for x in finite_vals) + int_part, frac_part = zip(*(s.split('.') for s in strs)) + if self._legacy <= 113: + self.pad_left = 1 + max(len(s.lstrip('-+')) for s in int_part) + else: + self.pad_left = max(len(s) for s in int_part) + self.pad_right = max(len(s) for s in frac_part) + self.exp_size = -1 + self.unique = unique + + if self.floatmode in ['fixed', 'maxprec_equal']: + self.precision = self.min_digits = self.pad_right + self.trim = 'k' + else: + self.trim = '.' + self.min_digits = 0 + + if self._legacy > 113: + # account for sign = ' ' by adding one to pad_left + if self.sign == ' ' and not any(np.signbit(finite_vals)): + self.pad_left += 1 + + # if there are non-finite values, may need to increase pad_left + if data.size != finite_vals.size: + neginf = self.sign != '-' or any(data[isinf(data)] < 0) + offset = self.pad_right + 1 # +1 for decimal pt + current_options = format_options.get() + self.pad_left = max( + self.pad_left, len(current_options['nanstr']) - offset, + len(current_options['infstr']) + neginf - offset + ) + + def __call__(self, x): + if not np.isfinite(x): + with errstate(invalid='ignore'): + current_options = format_options.get() + if np.isnan(x): + sign = '+' if self.sign == '+' else '' + ret = sign + current_options['nanstr'] + else: # isinf + sign = '-' if x < 0 else '+' if self.sign == '+' else '' + ret = sign + current_options['infstr'] + return ' ' * ( + self.pad_left + self.pad_right + 1 - len(ret) + ) + ret + + if self.exp_format: + return dragon4_scientific(x, + precision=self.precision, + min_digits=self.min_digits, + unique=self.unique, + trim=self.trim, + sign=self.sign == '+', + pad_left=self.pad_left, + exp_digits=self.exp_size) + else: + return dragon4_positional(x, + precision=self.precision, + min_digits=self.min_digits, + unique=self.unique, + fractional=True, + trim=self.trim, + sign=self.sign == '+', + pad_left=self.pad_left, + pad_right=self.pad_right) + + +@set_module('numpy') +def format_float_scientific(x, precision=None, unique=True, trim='k', + sign=False, pad_left=None, exp_digits=None, + min_digits=None): + """ + Format a floating-point scalar as a decimal string in scientific notation. + + Provides control over rounding, trimming and padding. Uses and assumes + IEEE unbiased rounding. Uses the "Dragon4" algorithm. + + Parameters + ---------- + x : python float or numpy floating scalar + Value to format. + precision : non-negative integer or None, optional + Maximum number of digits to print. May be None if `unique` is + `True`, but must be an integer if unique is `False`. + unique : boolean, optional + If `True`, use a digit-generation strategy which gives the shortest + representation which uniquely identifies the floating-point number from + other values of the same type, by judicious rounding. If `precision` + is given fewer digits than necessary can be printed. If `min_digits` + is given more can be printed, in which cases the last digit is rounded + with unbiased rounding. + If `False`, digits are generated as if printing an infinite-precision + value and stopping after `precision` digits, rounding the remaining + value with unbiased rounding + trim : one of 'k', '.', '0', '-', optional + Controls post-processing trimming of trailing digits, as follows: + + * 'k' : keep trailing zeros, keep decimal point (no trimming) + * '.' : trim all trailing zeros, leave decimal point + * '0' : trim all but the zero before the decimal point. Insert the + zero if it is missing. + * '-' : trim trailing zeros and any trailing decimal point + sign : boolean, optional + Whether to show the sign for positive values. + pad_left : non-negative integer, optional + Pad the left side of the string with whitespace until at least that + many characters are to the left of the decimal point. + exp_digits : non-negative integer, optional + Pad the exponent with zeros until it contains at least this + many digits. If omitted, the exponent will be at least 2 digits. + min_digits : non-negative integer or None, optional + Minimum number of digits to print. This only has an effect for + `unique=True`. In that case more digits than necessary to uniquely + identify the value may be printed and rounded unbiased. + + .. versionadded:: 1.21.0 + + Returns + ------- + rep : string + The string representation of the floating point value + + See Also + -------- + format_float_positional + + Examples + -------- + >>> import numpy as np + >>> np.format_float_scientific(np.float32(np.pi)) + '3.1415927e+00' + >>> s = np.float32(1.23e24) + >>> np.format_float_scientific(s, unique=False, precision=15) + '1.230000071797338e+24' + >>> np.format_float_scientific(s, exp_digits=4) + '1.23e+0024' + """ + precision = _none_or_positive_arg(precision, 'precision') + pad_left = _none_or_positive_arg(pad_left, 'pad_left') + exp_digits = _none_or_positive_arg(exp_digits, 'exp_digits') + min_digits = _none_or_positive_arg(min_digits, 'min_digits') + if min_digits > 0 and precision > 0 and min_digits > precision: + raise ValueError("min_digits must be less than or equal to precision") + return dragon4_scientific(x, precision=precision, unique=unique, + trim=trim, sign=sign, pad_left=pad_left, + exp_digits=exp_digits, min_digits=min_digits) + + +@set_module('numpy') +def format_float_positional(x, precision=None, unique=True, + fractional=True, trim='k', sign=False, + pad_left=None, pad_right=None, min_digits=None): + """ + Format a floating-point scalar as a decimal string in positional notation. + + Provides control over rounding, trimming and padding. Uses and assumes + IEEE unbiased rounding. Uses the "Dragon4" algorithm. + + Parameters + ---------- + x : python float or numpy floating scalar + Value to format. + precision : non-negative integer or None, optional + Maximum number of digits to print. May be None if `unique` is + `True`, but must be an integer if unique is `False`. + unique : boolean, optional + If `True`, use a digit-generation strategy which gives the shortest + representation which uniquely identifies the floating-point number from + other values of the same type, by judicious rounding. If `precision` + is given fewer digits than necessary can be printed, or if `min_digits` + is given more can be printed, in which cases the last digit is rounded + with unbiased rounding. + If `False`, digits are generated as if printing an infinite-precision + value and stopping after `precision` digits, rounding the remaining + value with unbiased rounding + fractional : boolean, optional + If `True`, the cutoffs of `precision` and `min_digits` refer to the + total number of digits after the decimal point, including leading + zeros. + If `False`, `precision` and `min_digits` refer to the total number of + significant digits, before or after the decimal point, ignoring leading + zeros. + trim : one of 'k', '.', '0', '-', optional + Controls post-processing trimming of trailing digits, as follows: + + * 'k' : keep trailing zeros, keep decimal point (no trimming) + * '.' : trim all trailing zeros, leave decimal point + * '0' : trim all but the zero before the decimal point. Insert the + zero if it is missing. + * '-' : trim trailing zeros and any trailing decimal point + sign : boolean, optional + Whether to show the sign for positive values. + pad_left : non-negative integer, optional + Pad the left side of the string with whitespace until at least that + many characters are to the left of the decimal point. + pad_right : non-negative integer, optional + Pad the right side of the string with whitespace until at least that + many characters are to the right of the decimal point. + min_digits : non-negative integer or None, optional + Minimum number of digits to print. Only has an effect if `unique=True` + in which case additional digits past those necessary to uniquely + identify the value may be printed, rounding the last additional digit. + + .. versionadded:: 1.21.0 + + Returns + ------- + rep : string + The string representation of the floating point value + + See Also + -------- + format_float_scientific + + Examples + -------- + >>> import numpy as np + >>> np.format_float_positional(np.float32(np.pi)) + '3.1415927' + >>> np.format_float_positional(np.float16(np.pi)) + '3.14' + >>> np.format_float_positional(np.float16(0.3)) + '0.3' + >>> np.format_float_positional(np.float16(0.3), unique=False, precision=10) + '0.3000488281' + """ + precision = _none_or_positive_arg(precision, 'precision') + pad_left = _none_or_positive_arg(pad_left, 'pad_left') + pad_right = _none_or_positive_arg(pad_right, 'pad_right') + min_digits = _none_or_positive_arg(min_digits, 'min_digits') + if not fractional and precision == 0: + raise ValueError("precision must be greater than 0 if " + "fractional=False") + if min_digits > 0 and precision > 0 and min_digits > precision: + raise ValueError("min_digits must be less than or equal to precision") + return dragon4_positional(x, precision=precision, unique=unique, + fractional=fractional, trim=trim, + sign=sign, pad_left=pad_left, + pad_right=pad_right, min_digits=min_digits) + +class IntegerFormat: + def __init__(self, data, sign='-'): + if data.size > 0: + data_max = np.max(data) + data_min = np.min(data) + data_max_str_len = len(str(data_max)) + if sign == ' ' and data_min < 0: + sign = '-' + if data_max >= 0 and sign in "+ ": + data_max_str_len += 1 + max_str_len = max(data_max_str_len, + len(str(data_min))) + else: + max_str_len = 0 + self.format = f'{{:{sign}{max_str_len}d}}' + + def __call__(self, x): + return self.format.format(x) + +class BoolFormat: + def __init__(self, data, **kwargs): + # add an extra space so " True" and "False" have the same length and + # array elements align nicely when printed, except in 0d arrays + self.truestr = ' True' if data.shape != () else 'True' + + def __call__(self, x): + return self.truestr if x else "False" + + +class ComplexFloatingFormat: + """ Formatter for subtypes of np.complexfloating """ + def __init__(self, x, precision, floatmode, suppress_small, + sign=False, *, legacy=None): + # for backcompatibility, accept bools + if isinstance(sign, bool): + sign = '+' if sign else '-' + + floatmode_real = floatmode_imag = floatmode + if legacy <= 113: + floatmode_real = 'maxprec_equal' + floatmode_imag = 'maxprec' + + self.real_format = FloatingFormat( + x.real, precision, floatmode_real, suppress_small, + sign=sign, legacy=legacy + ) + self.imag_format = FloatingFormat( + x.imag, precision, floatmode_imag, suppress_small, + sign='+', legacy=legacy + ) + + def __call__(self, x): + r = self.real_format(x.real) + i = self.imag_format(x.imag) + + # add the 'j' before the terminal whitespace in i + sp = len(i.rstrip()) + i = i[:sp] + 'j' + i[sp:] + + return r + i + + +class _TimelikeFormat: + def __init__(self, data): + non_nat = data[~isnat(data)] + if len(non_nat) > 0: + # Max str length of non-NaT elements + max_str_len = max(len(self._format_non_nat(np.max(non_nat))), + len(self._format_non_nat(np.min(non_nat)))) + else: + max_str_len = 0 + if len(non_nat) < data.size: + # data contains a NaT + max_str_len = max(max_str_len, 5) + self._format = f'%{max_str_len}s' + self._nat = "'NaT'".rjust(max_str_len) + + def _format_non_nat(self, x): + # override in subclass + raise NotImplementedError + + def __call__(self, x): + if isnat(x): + return self._nat + else: + return self._format % self._format_non_nat(x) + + +class DatetimeFormat(_TimelikeFormat): + def __init__(self, x, unit=None, timezone=None, casting='same_kind', + legacy=False): + # Get the unit from the dtype + if unit is None: + if x.dtype.kind == 'M': + unit = datetime_data(x.dtype)[0] + else: + unit = 's' + + if timezone is None: + timezone = 'naive' + self.timezone = timezone + self.unit = unit + self.casting = casting + self.legacy = legacy + + # must be called after the above are configured + super().__init__(x) + + def __call__(self, x): + if self.legacy <= 113: + return self._format_non_nat(x) + return super().__call__(x) + + def _format_non_nat(self, x): + return "'%s'" % datetime_as_string(x, + unit=self.unit, + timezone=self.timezone, + casting=self.casting) + + +class TimedeltaFormat(_TimelikeFormat): + def _format_non_nat(self, x): + return str(x.astype('i8')) + + +class SubArrayFormat: + def __init__(self, format_function, **options): + self.format_function = format_function + self.threshold = options['threshold'] + self.edge_items = options['edgeitems'] + + def __call__(self, a): + self.summary_insert = "..." if a.size > self.threshold else "" + return self.format_array(a) + + def format_array(self, a): + if np.ndim(a) == 0: + return self.format_function(a) + + if self.summary_insert and a.shape[0] > 2 * self.edge_items: + formatted = ( + [self.format_array(a_) for a_ in a[:self.edge_items]] + + [self.summary_insert] + + [self.format_array(a_) for a_ in a[-self.edge_items:]] + ) + else: + formatted = [self.format_array(a_) for a_ in a] + + return "[" + ", ".join(formatted) + "]" + + +class StructuredVoidFormat: + """ + Formatter for structured np.void objects. + + This does not work on structured alias types like + np.dtype(('i4', 'i2,i2')), as alias scalars lose their field information, + and the implementation relies upon np.void.__getitem__. + """ + def __init__(self, format_functions): + self.format_functions = format_functions + + @classmethod + def from_data(cls, data, **options): + """ + This is a second way to initialize StructuredVoidFormat, + using the raw data as input. Added to avoid changing + the signature of __init__. + """ + format_functions = [] + for field_name in data.dtype.names: + format_function = _get_format_function(data[field_name], **options) + if data.dtype[field_name].shape != (): + format_function = SubArrayFormat(format_function, **options) + format_functions.append(format_function) + return cls(format_functions) + + def __call__(self, x): + str_fields = [ + format_function(field) + for field, format_function in zip(x, self.format_functions) + ] + if len(str_fields) == 1: + return f"({str_fields[0]},)" + else: + return f"({', '.join(str_fields)})" + + +def _void_scalar_to_string(x, is_repr=True): + """ + Implements the repr for structured-void scalars. It is called from the + scalartypes.c.src code, and is placed here because it uses the elementwise + formatters defined above. + """ + options = format_options.get().copy() + + if options["legacy"] <= 125: + return StructuredVoidFormat.from_data(array(x), **options)(x) + + if options.get('formatter') is None: + options['formatter'] = {} + options['formatter'].setdefault('float_kind', str) + val_repr = StructuredVoidFormat.from_data(array(x), **options)(x) + if not is_repr: + return val_repr + cls = type(x) + cls_fqn = cls.__module__.replace("numpy", "np") + "." + cls.__name__ + void_dtype = np.dtype((np.void, x.dtype)) + return f"{cls_fqn}({val_repr}, dtype={void_dtype!s})" + + +_typelessdata = [int_, float64, complex128, _nt.bool] + + +def dtype_is_implied(dtype): + """ + Determine if the given dtype is implied by the representation + of its values. + + Parameters + ---------- + dtype : dtype + Data type + + Returns + ------- + implied : bool + True if the dtype is implied by the representation of its values. + + Examples + -------- + >>> import numpy as np + >>> np._core.arrayprint.dtype_is_implied(int) + True + >>> np.array([1, 2, 3], int) + array([1, 2, 3]) + >>> np._core.arrayprint.dtype_is_implied(np.int8) + False + >>> np.array([1, 2, 3], np.int8) + array([1, 2, 3], dtype=int8) + """ + dtype = np.dtype(dtype) + if format_options.get()['legacy'] <= 113 and dtype.type == np.bool: + return False + + # not just void types can be structured, and names are not part of the repr + if dtype.names is not None: + return False + + # should care about endianness *unless size is 1* (e.g., int8, bool) + if not dtype.isnative: + return False + + return dtype.type in _typelessdata + + +def dtype_short_repr(dtype): + """ + Convert a dtype to a short form which evaluates to the same dtype. + + The intent is roughly that the following holds + + >>> from numpy import * + >>> dt = np.int64([1, 2]).dtype + >>> assert eval(dtype_short_repr(dt)) == dt + """ + if type(dtype).__repr__ != np.dtype.__repr__: + # TODO: Custom repr for user DTypes, logic should likely move. + return repr(dtype) + if dtype.names is not None: + # structured dtypes give a list or tuple repr + return str(dtype) + elif issubclass(dtype.type, flexible): + # handle these separately so they don't give garbage like str256 + return f"'{str(dtype)}'" + + typename = dtype.name + if not dtype.isnative: + # deal with cases like dtype(' 210 + and arr.size > current_options['threshold'])): + extras.append(f"shape={arr.shape}") + if not dtype_is_implied(arr.dtype) or arr.size == 0: + extras.append(f"dtype={dtype_short_repr(arr.dtype)}") + + if not extras: + return prefix + lst + ")" + + arr_str = prefix + lst + "," + extra_str = ", ".join(extras) + ")" + # compute whether we should put extras on a new line: Do so if adding the + # extras would extend the last line past max_line_width. + # Note: This line gives the correct result even when rfind returns -1. + last_line_len = len(arr_str) - (arr_str.rfind('\n') + 1) + spacer = " " + if current_options['legacy'] <= 113: + if issubclass(arr.dtype.type, flexible): + spacer = '\n' + ' ' * len(prefix) + elif last_line_len + len(extra_str) + 1 > max_line_width: + spacer = '\n' + ' ' * len(prefix) + + return arr_str + spacer + extra_str + + +def _array_repr_dispatcher( + arr, max_line_width=None, precision=None, suppress_small=None): + return (arr,) + + +@array_function_dispatch(_array_repr_dispatcher, module='numpy') +def array_repr(arr, max_line_width=None, precision=None, suppress_small=None): + """ + Return the string representation of an array. + + Parameters + ---------- + arr : ndarray + Input array. + max_line_width : int, optional + Inserts newlines if text is longer than `max_line_width`. + Defaults to ``numpy.get_printoptions()['linewidth']``. + precision : int, optional + Floating point precision. + Defaults to ``numpy.get_printoptions()['precision']``. + suppress_small : bool, optional + Represent numbers "very close" to zero as zero; default is False. + Very close is defined by precision: if the precision is 8, e.g., + numbers smaller (in absolute value) than 5e-9 are represented as + zero. + Defaults to ``numpy.get_printoptions()['suppress']``. + + Returns + ------- + string : str + The string representation of an array. + + See Also + -------- + array_str, array2string, set_printoptions + + Examples + -------- + >>> import numpy as np + >>> np.array_repr(np.array([1,2])) + 'array([1, 2])' + >>> np.array_repr(np.ma.array([0.])) + 'MaskedArray([0.])' + >>> np.array_repr(np.array([], np.int32)) + 'array([], dtype=int32)' + + >>> x = np.array([1e-6, 4e-7, 2, 3]) + >>> np.array_repr(x, precision=6, suppress_small=True) + 'array([0.000001, 0. , 2. , 3. ])' + + """ + return _array_repr_implementation( + arr, max_line_width, precision, suppress_small) + + +@_recursive_guard() +def _guarded_repr_or_str(v): + if isinstance(v, bytes): + return repr(v) + return str(v) + + +def _array_str_implementation( + a, max_line_width=None, precision=None, suppress_small=None, + array2string=array2string): + """Internal version of array_str() that allows overriding array2string.""" + if (format_options.get()['legacy'] <= 113 and + a.shape == () and not a.dtype.names): + return str(a.item()) + + # the str of 0d arrays is a special case: It should appear like a scalar, + # so floats are not truncated by `precision`, and strings are not wrapped + # in quotes. So we return the str of the scalar value. + if a.shape == (): + # obtain a scalar and call str on it, avoiding problems for subclasses + # for which indexing with () returns a 0d instead of a scalar by using + # ndarray's getindex. Also guard against recursive 0d object arrays. + return _guarded_repr_or_str(np.ndarray.__getitem__(a, ())) + + return array2string(a, max_line_width, precision, suppress_small, ' ', "") + + +def _array_str_dispatcher( + a, max_line_width=None, precision=None, suppress_small=None): + return (a,) + + +@array_function_dispatch(_array_str_dispatcher, module='numpy') +def array_str(a, max_line_width=None, precision=None, suppress_small=None): + """ + Return a string representation of the data in an array. + + The data in the array is returned as a single string. This function is + similar to `array_repr`, the difference being that `array_repr` also + returns information on the kind of array and its data type. + + Parameters + ---------- + a : ndarray + Input array. + max_line_width : int, optional + Inserts newlines if text is longer than `max_line_width`. + Defaults to ``numpy.get_printoptions()['linewidth']``. + precision : int, optional + Floating point precision. + Defaults to ``numpy.get_printoptions()['precision']``. + suppress_small : bool, optional + Represent numbers "very close" to zero as zero; default is False. + Very close is defined by precision: if the precision is 8, e.g., + numbers smaller (in absolute value) than 5e-9 are represented as + zero. + Defaults to ``numpy.get_printoptions()['suppress']``. + + See Also + -------- + array2string, array_repr, set_printoptions + + Examples + -------- + >>> import numpy as np + >>> np.array_str(np.arange(3)) + '[0 1 2]' + + """ + return _array_str_implementation( + a, max_line_width, precision, suppress_small) + + +# needed if __array_function__ is disabled +_array2string_impl = getattr(array2string, '__wrapped__', array2string) +_default_array_str = functools.partial(_array_str_implementation, + array2string=_array2string_impl) +_default_array_repr = functools.partial(_array_repr_implementation, + array2string=_array2string_impl) diff --git a/local_packages/numpy/_core/arrayprint.pyi b/local_packages/numpy/_core/arrayprint.pyi new file mode 100644 index 0000000..307f844 --- /dev/null +++ b/local_packages/numpy/_core/arrayprint.pyi @@ -0,0 +1,158 @@ +from collections.abc import Callable + +# Using a private class is by no means ideal, but it is simply a consequence +# of a `contextlib.context` returning an instance of aforementioned class +from contextlib import _GeneratorContextManager +from typing import ( + Any, + Final, + Literal, + SupportsIndex, + TypeAlias, + TypedDict, + type_check_only, +) + +import numpy as np +from numpy._typing import NDArray, _CharLike_co, _FloatLike_co + +__all__ = [ + "array2string", + "array_repr", + "array_str", + "format_float_positional", + "format_float_scientific", + "get_printoptions", + "printoptions", + "set_printoptions", +] + +### + +_FloatMode: TypeAlias = Literal["fixed", "unique", "maxprec", "maxprec_equal"] +_LegacyNoStyle: TypeAlias = Literal["1.21", "1.25", "2.1", False] +_Legacy: TypeAlias = Literal["1.13", _LegacyNoStyle] +_Sign: TypeAlias = Literal["-", "+", " "] +_Trim: TypeAlias = Literal["k", ".", "0", "-"] +_ReprFunc: TypeAlias = Callable[[NDArray[Any]], str] + +@type_check_only +class _FormatDict(TypedDict, total=False): + bool: Callable[[np.bool], str] + int: Callable[[np.integer], str] + timedelta: Callable[[np.timedelta64], str] + datetime: Callable[[np.datetime64], str] + float: Callable[[np.floating], str] + longfloat: Callable[[np.longdouble], str] + complexfloat: Callable[[np.complexfloating], str] + longcomplexfloat: Callable[[np.clongdouble], str] + void: Callable[[np.void], str] + numpystr: Callable[[_CharLike_co], str] + object: Callable[[object], str] + all: Callable[[object], str] + int_kind: Callable[[np.integer], str] + float_kind: Callable[[np.floating], str] + complex_kind: Callable[[np.complexfloating], str] + str_kind: Callable[[_CharLike_co], str] + +@type_check_only +class _FormatOptions(TypedDict): + precision: int + threshold: int + edgeitems: int + linewidth: int + suppress: bool + nanstr: str + infstr: str + formatter: _FormatDict | None + sign: _Sign + floatmode: _FloatMode + legacy: _Legacy + +### + +__docformat__: Final = "restructuredtext" # undocumented + +def set_printoptions( + precision: SupportsIndex | None = None, + threshold: int | None = None, + edgeitems: int | None = None, + linewidth: int | None = None, + suppress: bool | None = None, + nanstr: str | None = None, + infstr: str | None = None, + formatter: _FormatDict | None = None, + sign: _Sign | None = None, + floatmode: _FloatMode | None = None, + *, + legacy: _Legacy | None = None, + override_repr: _ReprFunc | None = None, +) -> None: ... +def get_printoptions() -> _FormatOptions: ... + +# public numpy export +def array2string( + a: NDArray[Any], + max_line_width: int | None = None, + precision: SupportsIndex | None = None, + suppress_small: bool | None = None, + separator: str = " ", + prefix: str = "", + *, + formatter: _FormatDict | None = None, + threshold: int | None = None, + edgeitems: int | None = None, + sign: _Sign | None = None, + floatmode: _FloatMode | None = None, + suffix: str = "", + legacy: _Legacy | None = None, +) -> str: ... + +def format_float_scientific( + x: _FloatLike_co, + precision: int | None = None, + unique: bool = True, + trim: _Trim = "k", + sign: bool = False, + pad_left: int | None = None, + exp_digits: int | None = None, + min_digits: int | None = None, +) -> str: ... +def format_float_positional( + x: _FloatLike_co, + precision: int | None = None, + unique: bool = True, + fractional: bool = True, + trim: _Trim = "k", + sign: bool = False, + pad_left: int | None = None, + pad_right: int | None = None, + min_digits: int | None = None, +) -> str: ... +def array_repr( + arr: NDArray[Any], + max_line_width: int | None = None, + precision: SupportsIndex | None = None, + suppress_small: bool | None = None, +) -> str: ... +def array_str( + a: NDArray[Any], + max_line_width: int | None = None, + precision: SupportsIndex | None = None, + suppress_small: bool | None = None, +) -> str: ... +def printoptions( + precision: SupportsIndex | None = ..., + threshold: int | None = ..., + edgeitems: int | None = ..., + linewidth: int | None = ..., + suppress: bool | None = ..., + nanstr: str | None = ..., + infstr: str | None = ..., + formatter: _FormatDict | None = ..., + sign: _Sign | None = None, + floatmode: _FloatMode | None = None, + *, + legacy: _Legacy | None = None, + override_repr: _ReprFunc | None = None, +) -> _GeneratorContextManager[_FormatOptions]: ... diff --git a/local_packages/numpy/_core/cversions.py b/local_packages/numpy/_core/cversions.py new file mode 100644 index 0000000..00159c3 --- /dev/null +++ b/local_packages/numpy/_core/cversions.py @@ -0,0 +1,13 @@ +"""Simple script to compute the api hash of the current API. + +The API has is defined by numpy_api_order and ufunc_api_order. + +""" +from os.path import dirname + +from code_generators.genapi import fullapi_hash +from code_generators.numpy_api import full_api + +if __name__ == '__main__': + curdir = dirname(__file__) + print(fullapi_hash(full_api)) diff --git a/local_packages/numpy/_core/defchararray.py b/local_packages/numpy/_core/defchararray.py new file mode 100644 index 0000000..1a87505 --- /dev/null +++ b/local_packages/numpy/_core/defchararray.py @@ -0,0 +1,1414 @@ +""" +This module contains a set of functions for vectorized string +operations and methods. + +.. note:: + The `chararray` class exists for backwards compatibility with + Numarray, it is not recommended for new development. Starting from numpy + 1.4, if one needs arrays of strings, it is recommended to use arrays of + `dtype` `object_`, `bytes_` or `str_`, and use the free functions + in the `numpy.char` module for fast vectorized string operations. + +Some methods will only be available if the corresponding string method is +available in your version of Python. + +The preferred alias for `defchararray` is `numpy.char`. + +""" +import functools + +import numpy as np +from numpy._core import overrides +from numpy._core.multiarray import compare_chararrays +from numpy._core.strings import ( + _join as join, + _rsplit as rsplit, + _split as split, + _splitlines as splitlines, +) +from numpy._utils import set_module +from numpy.strings import * +from numpy.strings import ( + multiply as strings_multiply, + partition as strings_partition, + rpartition as strings_rpartition, +) + +from .numeric import array as narray, asarray as asnarray, ndarray +from .numerictypes import bytes_, character, str_ + +__all__ = [ + 'equal', 'not_equal', 'greater_equal', 'less_equal', + 'greater', 'less', 'str_len', 'add', 'multiply', 'mod', 'capitalize', + 'center', 'count', 'decode', 'encode', 'endswith', 'expandtabs', + 'find', 'index', 'isalnum', 'isalpha', 'isdigit', 'islower', 'isspace', + 'istitle', 'isupper', 'join', 'ljust', 'lower', 'lstrip', 'partition', + 'replace', 'rfind', 'rindex', 'rjust', 'rpartition', 'rsplit', + 'rstrip', 'split', 'splitlines', 'startswith', 'strip', 'swapcase', + 'title', 'translate', 'upper', 'zfill', 'isnumeric', 'isdecimal', + 'array', 'asarray', 'compare_chararrays', 'chararray' + ] + + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy.char') + + +def _binary_op_dispatcher(x1, x2): + return (x1, x2) + + +@array_function_dispatch(_binary_op_dispatcher) +def equal(x1, x2): + """ + Return (x1 == x2) element-wise. + + Unlike `numpy.equal`, this comparison is performed by first + stripping whitespace characters from the end of the string. This + behavior is provided for backward-compatibility with numarray. + + Parameters + ---------- + x1, x2 : array_like of str or unicode + Input arrays of the same shape. + + Returns + ------- + out : ndarray + Output array of bools. + + Examples + -------- + >>> import numpy as np + >>> y = "aa " + >>> x = "aa" + >>> np.char.equal(x, y) + array(True) + + See Also + -------- + not_equal, greater_equal, less_equal, greater, less + """ + return compare_chararrays(x1, x2, '==', True) + + +@array_function_dispatch(_binary_op_dispatcher) +def not_equal(x1, x2): + """ + Return (x1 != x2) element-wise. + + Unlike `numpy.not_equal`, this comparison is performed by first + stripping whitespace characters from the end of the string. This + behavior is provided for backward-compatibility with numarray. + + Parameters + ---------- + x1, x2 : array_like of str or unicode + Input arrays of the same shape. + + Returns + ------- + out : ndarray + Output array of bools. + + See Also + -------- + equal, greater_equal, less_equal, greater, less + + Examples + -------- + >>> import numpy as np + >>> x1 = np.array(['a', 'b', 'c']) + >>> np.char.not_equal(x1, 'b') + array([ True, False, True]) + + """ + return compare_chararrays(x1, x2, '!=', True) + + +@array_function_dispatch(_binary_op_dispatcher) +def greater_equal(x1, x2): + """ + Return (x1 >= x2) element-wise. + + Unlike `numpy.greater_equal`, this comparison is performed by + first stripping whitespace characters from the end of the string. + This behavior is provided for backward-compatibility with + numarray. + + Parameters + ---------- + x1, x2 : array_like of str or unicode + Input arrays of the same shape. + + Returns + ------- + out : ndarray + Output array of bools. + + See Also + -------- + equal, not_equal, less_equal, greater, less + + Examples + -------- + >>> import numpy as np + >>> x1 = np.array(['a', 'b', 'c']) + >>> np.char.greater_equal(x1, 'b') + array([False, True, True]) + + """ + return compare_chararrays(x1, x2, '>=', True) + + +@array_function_dispatch(_binary_op_dispatcher) +def less_equal(x1, x2): + """ + Return (x1 <= x2) element-wise. + + Unlike `numpy.less_equal`, this comparison is performed by first + stripping whitespace characters from the end of the string. This + behavior is provided for backward-compatibility with numarray. + + Parameters + ---------- + x1, x2 : array_like of str or unicode + Input arrays of the same shape. + + Returns + ------- + out : ndarray + Output array of bools. + + See Also + -------- + equal, not_equal, greater_equal, greater, less + + Examples + -------- + >>> import numpy as np + >>> x1 = np.array(['a', 'b', 'c']) + >>> np.char.less_equal(x1, 'b') + array([ True, True, False]) + + """ + return compare_chararrays(x1, x2, '<=', True) + + +@array_function_dispatch(_binary_op_dispatcher) +def greater(x1, x2): + """ + Return (x1 > x2) element-wise. + + Unlike `numpy.greater`, this comparison is performed by first + stripping whitespace characters from the end of the string. This + behavior is provided for backward-compatibility with numarray. + + Parameters + ---------- + x1, x2 : array_like of str or unicode + Input arrays of the same shape. + + Returns + ------- + out : ndarray + Output array of bools. + + See Also + -------- + equal, not_equal, greater_equal, less_equal, less + + Examples + -------- + >>> import numpy as np + >>> x1 = np.array(['a', 'b', 'c']) + >>> np.char.greater(x1, 'b') + array([False, False, True]) + + """ + return compare_chararrays(x1, x2, '>', True) + + +@array_function_dispatch(_binary_op_dispatcher) +def less(x1, x2): + """ + Return (x1 < x2) element-wise. + + Unlike `numpy.greater`, this comparison is performed by first + stripping whitespace characters from the end of the string. This + behavior is provided for backward-compatibility with numarray. + + Parameters + ---------- + x1, x2 : array_like of str or unicode + Input arrays of the same shape. + + Returns + ------- + out : ndarray + Output array of bools. + + See Also + -------- + equal, not_equal, greater_equal, less_equal, greater + + Examples + -------- + >>> import numpy as np + >>> x1 = np.array(['a', 'b', 'c']) + >>> np.char.less(x1, 'b') + array([True, False, False]) + + """ + return compare_chararrays(x1, x2, '<', True) + + +@set_module("numpy.char") +def multiply(a, i): + """ + Return (a * i), that is string multiple concatenation, + element-wise. + + Values in ``i`` of less than 0 are treated as 0 (which yields an + empty string). + + Parameters + ---------- + a : array_like, with `np.bytes_` or `np.str_` dtype + + i : array_like, with any integer dtype + + Returns + ------- + out : ndarray + Output array of str or unicode, depending on input types + + Notes + ----- + This is a thin wrapper around np.strings.multiply that raises + `ValueError` when ``i`` is not an integer. It only + exists for backwards-compatibility. + + Examples + -------- + >>> import numpy as np + >>> a = np.array(["a", "b", "c"]) + >>> np.strings.multiply(a, 3) + array(['aaa', 'bbb', 'ccc'], dtype='>> i = np.array([1, 2, 3]) + >>> np.strings.multiply(a, i) + array(['a', 'bb', 'ccc'], dtype='>> np.strings.multiply(np.array(['a']), i) + array(['a', 'aa', 'aaa'], dtype='>> a = np.array(['a', 'b', 'c', 'd', 'e', 'f']).reshape((2, 3)) + >>> np.strings.multiply(a, 3) + array([['aaa', 'bbb', 'ccc'], + ['ddd', 'eee', 'fff']], dtype='>> np.strings.multiply(a, i) + array([['a', 'bb', 'ccc'], + ['d', 'ee', 'fff']], dtype='>> import numpy as np + >>> x = np.array(["Numpy is nice!"]) + >>> np.char.partition(x, " ") + array([['Numpy', ' ', 'is nice!']], dtype='>> import numpy as np + >>> a = np.array(['aAaAaA', ' aA ', 'abBABba']) + >>> np.char.rpartition(a, 'A') + array([['aAaAa', 'A', ''], + [' a', 'A', ' '], + ['abB', 'A', 'Bba']], dtype='= 2`` and ``order='F'``, in which case `strides` + is in "Fortran order". + + Methods + ------- + astype + argsort + copy + count + decode + dump + dumps + encode + endswith + expandtabs + fill + find + flatten + getfield + index + isalnum + isalpha + isdecimal + isdigit + islower + isnumeric + isspace + istitle + isupper + item + join + ljust + lower + lstrip + nonzero + put + ravel + repeat + replace + reshape + resize + rfind + rindex + rjust + rsplit + rstrip + searchsorted + setfield + setflags + sort + split + splitlines + squeeze + startswith + strip + swapaxes + swapcase + take + title + tofile + tolist + translate + transpose + upper + view + zfill + + Parameters + ---------- + shape : tuple + Shape of the array. + itemsize : int, optional + Length of each array element, in number of characters. Default is 1. + unicode : bool, optional + Are the array elements of type unicode (True) or string (False). + Default is False. + buffer : object exposing the buffer interface or str, optional + Memory address of the start of the array data. Default is None, + in which case a new array is created. + offset : int, optional + Fixed stride displacement from the beginning of an axis? + Default is 0. Needs to be >=0. + strides : array_like of ints, optional + Strides for the array (see `~numpy.ndarray.strides` for + full description). Default is None. + order : {'C', 'F'}, optional + The order in which the array data is stored in memory: 'C' -> + "row major" order (the default), 'F' -> "column major" + (Fortran) order. + + Examples + -------- + >>> import numpy as np + >>> charar = np.char.chararray((3, 3)) + >>> charar[:] = 'a' + >>> charar + chararray([[b'a', b'a', b'a'], + [b'a', b'a', b'a'], + [b'a', b'a', b'a']], dtype='|S1') + + >>> charar = np.char.chararray(charar.shape, itemsize=5) + >>> charar[:] = 'abc' + >>> charar + chararray([[b'abc', b'abc', b'abc'], + [b'abc', b'abc', b'abc'], + [b'abc', b'abc', b'abc']], dtype='|S5') + + """ + def __new__(subtype, shape, itemsize=1, unicode=False, buffer=None, + offset=0, strides=None, order='C'): + if unicode: + dtype = str_ + else: + dtype = bytes_ + + # force itemsize to be a Python int, since using NumPy integer + # types results in itemsize.itemsize being used as the size of + # strings in the new array. + itemsize = int(itemsize) + + if isinstance(buffer, str): + # unicode objects do not have the buffer interface + filler = buffer + buffer = None + else: + filler = None + + if buffer is None: + self = ndarray.__new__(subtype, shape, (dtype, itemsize), + order=order) + else: + self = ndarray.__new__(subtype, shape, (dtype, itemsize), + buffer=buffer, + offset=offset, strides=strides, + order=order) + if filler is not None: + self[...] = filler + + return self + + def __array_wrap__(self, arr, context=None, return_scalar=False): + # When calling a ufunc (and some other functions), we return a + # chararray if the ufunc output is a string-like array, + # or an ndarray otherwise + if arr.dtype.char in "SUbc": + return arr.view(type(self)) + return arr + + def __array_finalize__(self, obj): + # The b is a special case because it is used for reconstructing. + if self.dtype.char not in 'VSUbc': + raise ValueError("Can only create a chararray from string data.") + + def __getitem__(self, obj): + val = ndarray.__getitem__(self, obj) + if isinstance(val, character): + return val.rstrip() + return val + + # IMPLEMENTATION NOTE: Most of the methods of this class are + # direct delegations to the free functions in this module. + # However, those that return an array of strings should instead + # return a chararray, so some extra wrapping is required. + + def __eq__(self, other): + """ + Return (self == other) element-wise. + + See Also + -------- + equal + """ + return equal(self, other) + + def __ne__(self, other): + """ + Return (self != other) element-wise. + + See Also + -------- + not_equal + """ + return not_equal(self, other) + + def __ge__(self, other): + """ + Return (self >= other) element-wise. + + See Also + -------- + greater_equal + """ + return greater_equal(self, other) + + def __le__(self, other): + """ + Return (self <= other) element-wise. + + See Also + -------- + less_equal + """ + return less_equal(self, other) + + def __gt__(self, other): + """ + Return (self > other) element-wise. + + See Also + -------- + greater + """ + return greater(self, other) + + def __lt__(self, other): + """ + Return (self < other) element-wise. + + See Also + -------- + less + """ + return less(self, other) + + def __add__(self, other): + """ + Return (self + other), that is string concatenation, + element-wise for a pair of array_likes of str or unicode. + + See Also + -------- + add + """ + return add(self, other) + + def __radd__(self, other): + """ + Return (other + self), that is string concatenation, + element-wise for a pair of array_likes of `bytes_` or `str_`. + + See Also + -------- + add + """ + return add(other, self) + + def __mul__(self, i): + """ + Return (self * i), that is string multiple concatenation, + element-wise. + + See Also + -------- + multiply + """ + return asarray(multiply(self, i)) + + def __rmul__(self, i): + """ + Return (self * i), that is string multiple concatenation, + element-wise. + + See Also + -------- + multiply + """ + return asarray(multiply(self, i)) + + def __mod__(self, i): + """ + Return (self % i), that is pre-Python 2.6 string formatting + (interpolation), element-wise for a pair of array_likes of `bytes_` + or `str_`. + + See Also + -------- + mod + """ + return asarray(mod(self, i)) + + def __rmod__(self, other): + return NotImplemented + + def argsort(self, axis=-1, kind=None, order=None, *, stable=None): + """ + Return the indices that sort the array lexicographically. + + For full documentation see `numpy.argsort`, for which this method is + in fact merely a "thin wrapper." + + Examples + -------- + >>> c = np.array(['a1b c', '1b ca', 'b ca1', 'Ca1b'], 'S5') + >>> c = c.view(np.char.chararray); c + chararray(['a1b c', '1b ca', 'b ca1', 'Ca1b'], + dtype='|S5') + >>> c[c.argsort()] + chararray(['1b ca', 'Ca1b', 'a1b c', 'b ca1'], + dtype='|S5') + + """ + return self.__array__().argsort(axis, kind, order, stable=stable) + argsort.__doc__ = ndarray.argsort.__doc__ + + def capitalize(self): + """ + Return a copy of `self` with only the first character of each element + capitalized. + + See Also + -------- + char.capitalize + + """ + return asarray(capitalize(self)) + + def center(self, width, fillchar=' '): + """ + Return a copy of `self` with its elements centered in a + string of length `width`. + + See Also + -------- + center + """ + return asarray(center(self, width, fillchar)) + + def count(self, sub, start=0, end=None): + """ + Returns an array with the number of non-overlapping occurrences of + substring `sub` in the range [`start`, `end`]. + + See Also + -------- + char.count + + """ + return count(self, sub, start, end) + + def decode(self, encoding=None, errors=None): + """ + Calls ``bytes.decode`` element-wise. + + See Also + -------- + char.decode + + """ + return decode(self, encoding, errors) + + def encode(self, encoding=None, errors=None): + """ + Calls :meth:`str.encode` element-wise. + + See Also + -------- + char.encode + + """ + return encode(self, encoding, errors) + + def endswith(self, suffix, start=0, end=None): + """ + Returns a boolean array which is `True` where the string element + in `self` ends with `suffix`, otherwise `False`. + + See Also + -------- + char.endswith + + """ + return endswith(self, suffix, start, end) + + def expandtabs(self, tabsize=8): + """ + Return a copy of each string element where all tab characters are + replaced by one or more spaces. + + See Also + -------- + char.expandtabs + + """ + return asarray(expandtabs(self, tabsize)) + + def find(self, sub, start=0, end=None): + """ + For each element, return the lowest index in the string where + substring `sub` is found. + + See Also + -------- + char.find + + """ + return find(self, sub, start, end) + + def index(self, sub, start=0, end=None): + """ + Like `find`, but raises :exc:`ValueError` when the substring is not + found. + + See Also + -------- + char.index + + """ + return index(self, sub, start, end) + + def isalnum(self): + """ + Returns true for each element if all characters in the string + are alphanumeric and there is at least one character, false + otherwise. + + See Also + -------- + char.isalnum + + """ + return isalnum(self) + + def isalpha(self): + """ + Returns true for each element if all characters in the string + are alphabetic and there is at least one character, false + otherwise. + + See Also + -------- + char.isalpha + + """ + return isalpha(self) + + def isdigit(self): + """ + Returns true for each element if all characters in the string are + digits and there is at least one character, false otherwise. + + See Also + -------- + char.isdigit + + """ + return isdigit(self) + + def islower(self): + """ + Returns true for each element if all cased characters in the + string are lowercase and there is at least one cased character, + false otherwise. + + See Also + -------- + char.islower + + """ + return islower(self) + + def isspace(self): + """ + Returns true for each element if there are only whitespace + characters in the string and there is at least one character, + false otherwise. + + See Also + -------- + char.isspace + + """ + return isspace(self) + + def istitle(self): + """ + Returns true for each element if the element is a titlecased + string and there is at least one character, false otherwise. + + See Also + -------- + char.istitle + + """ + return istitle(self) + + def isupper(self): + """ + Returns true for each element if all cased characters in the + string are uppercase and there is at least one character, false + otherwise. + + See Also + -------- + char.isupper + + """ + return isupper(self) + + def join(self, seq): + """ + Return a string which is the concatenation of the strings in the + sequence `seq`. + + See Also + -------- + char.join + + """ + return join(self, seq) + + def ljust(self, width, fillchar=' '): + """ + Return an array with the elements of `self` left-justified in a + string of length `width`. + + See Also + -------- + char.ljust + + """ + return asarray(ljust(self, width, fillchar)) + + def lower(self): + """ + Return an array with the elements of `self` converted to + lowercase. + + See Also + -------- + char.lower + + """ + return asarray(lower(self)) + + def lstrip(self, chars=None): + """ + For each element in `self`, return a copy with the leading characters + removed. + + See Also + -------- + char.lstrip + + """ + return lstrip(self, chars) + + def partition(self, sep): + """ + Partition each element in `self` around `sep`. + + See Also + -------- + partition + """ + return asarray(partition(self, sep)) + + def replace(self, old, new, count=None): + """ + For each element in `self`, return a copy of the string with all + occurrences of substring `old` replaced by `new`. + + See Also + -------- + char.replace + + """ + return replace(self, old, new, count if count is not None else -1) + + def rfind(self, sub, start=0, end=None): + """ + For each element in `self`, return the highest index in the string + where substring `sub` is found, such that `sub` is contained + within [`start`, `end`]. + + See Also + -------- + char.rfind + + """ + return rfind(self, sub, start, end) + + def rindex(self, sub, start=0, end=None): + """ + Like `rfind`, but raises :exc:`ValueError` when the substring `sub` is + not found. + + See Also + -------- + char.rindex + + """ + return rindex(self, sub, start, end) + + def rjust(self, width, fillchar=' '): + """ + Return an array with the elements of `self` + right-justified in a string of length `width`. + + See Also + -------- + char.rjust + + """ + return asarray(rjust(self, width, fillchar)) + + def rpartition(self, sep): + """ + Partition each element in `self` around `sep`. + + See Also + -------- + rpartition + """ + return asarray(rpartition(self, sep)) + + def rsplit(self, sep=None, maxsplit=None): + """ + For each element in `self`, return a list of the words in + the string, using `sep` as the delimiter string. + + See Also + -------- + char.rsplit + + """ + return rsplit(self, sep, maxsplit) + + def rstrip(self, chars=None): + """ + For each element in `self`, return a copy with the trailing + characters removed. + + See Also + -------- + char.rstrip + + """ + return rstrip(self, chars) + + def split(self, sep=None, maxsplit=None): + """ + For each element in `self`, return a list of the words in the + string, using `sep` as the delimiter string. + + See Also + -------- + char.split + + """ + return split(self, sep, maxsplit) + + def splitlines(self, keepends=None): + """ + For each element in `self`, return a list of the lines in the + element, breaking at line boundaries. + + See Also + -------- + char.splitlines + + """ + return splitlines(self, keepends) + + def startswith(self, prefix, start=0, end=None): + """ + Returns a boolean array which is `True` where the string element + in `self` starts with `prefix`, otherwise `False`. + + See Also + -------- + char.startswith + + """ + return startswith(self, prefix, start, end) + + def strip(self, chars=None): + """ + For each element in `self`, return a copy with the leading and + trailing characters removed. + + See Also + -------- + char.strip + + """ + return strip(self, chars) + + def swapcase(self): + """ + For each element in `self`, return a copy of the string with + uppercase characters converted to lowercase and vice versa. + + See Also + -------- + char.swapcase + + """ + return asarray(swapcase(self)) + + def title(self): + """ + For each element in `self`, return a titlecased version of the + string: words start with uppercase characters, all remaining cased + characters are lowercase. + + See Also + -------- + char.title + + """ + return asarray(title(self)) + + def translate(self, table, deletechars=None): + """ + For each element in `self`, return a copy of the string where + all characters occurring in the optional argument + `deletechars` are removed, and the remaining characters have + been mapped through the given translation table. + + See Also + -------- + char.translate + + """ + return asarray(translate(self, table, deletechars)) + + def upper(self): + """ + Return an array with the elements of `self` converted to + uppercase. + + See Also + -------- + char.upper + + """ + return asarray(upper(self)) + + def zfill(self, width): + """ + Return the numeric string left-filled with zeros in a string of + length `width`. + + See Also + -------- + char.zfill + + """ + return asarray(zfill(self, width)) + + def isnumeric(self): + """ + For each element in `self`, return True if there are only + numeric characters in the element. + + See Also + -------- + char.isnumeric + + """ + return isnumeric(self) + + def isdecimal(self): + """ + For each element in `self`, return True if there are only + decimal characters in the element. + + See Also + -------- + char.isdecimal + + """ + return isdecimal(self) + + +@set_module("numpy.char") +def array(obj, itemsize=None, copy=True, unicode=None, order=None): + """ + Create a `~numpy.char.chararray`. + + .. note:: + This class is provided for numarray backward-compatibility. + New code (not concerned with numarray compatibility) should use + arrays of type `bytes_` or `str_` and use the free functions + in :mod:`numpy.char` for fast vectorized string operations instead. + + Versus a NumPy array of dtype `bytes_` or `str_`, this + class adds the following functionality: + + 1) values automatically have whitespace removed from the end + when indexed + + 2) comparison operators automatically remove whitespace from the + end when comparing values + + 3) vectorized string operations are provided as methods + (e.g. `chararray.endswith `) + and infix operators (e.g. ``+, *, %``) + + Parameters + ---------- + obj : array of str or unicode-like + + itemsize : int, optional + `itemsize` is the number of characters per scalar in the + resulting array. If `itemsize` is None, and `obj` is an + object array or a Python list, the `itemsize` will be + automatically determined. If `itemsize` is provided and `obj` + is of type str or unicode, then the `obj` string will be + chunked into `itemsize` pieces. + + copy : bool, optional + If true (default), then the object is copied. Otherwise, a copy + will only be made if ``__array__`` returns a copy, if obj is a + nested sequence, or if a copy is needed to satisfy any of the other + requirements (`itemsize`, unicode, `order`, etc.). + + unicode : bool, optional + When true, the resulting `~numpy.char.chararray` can contain Unicode + characters, when false only 8-bit characters. If unicode is + None and `obj` is one of the following: + + - a `~numpy.char.chararray`, + - an ndarray of type :class:`str_` or :class:`bytes_` + - a Python :class:`str` or :class:`bytes` object, + + then the unicode setting of the output array will be + automatically determined. + + order : {'C', 'F', 'A'}, optional + Specify the order of the array. If order is 'C' (default), then the + array will be in C-contiguous order (last-index varies the + fastest). If order is 'F', then the returned array + will be in Fortran-contiguous order (first-index varies the + fastest). If order is 'A', then the returned array may + be in any order (either C-, Fortran-contiguous, or even + discontiguous). + + Examples + -------- + + >>> import numpy as np + >>> char_array = np.char.array(['hello', 'world', 'numpy','array']) + >>> char_array + chararray(['hello', 'world', 'numpy', 'array'], dtype='`) + and infix operators (e.g. ``+``, ``*``, ``%``) + + Parameters + ---------- + obj : array of str or unicode-like + + itemsize : int, optional + `itemsize` is the number of characters per scalar in the + resulting array. If `itemsize` is None, and `obj` is an + object array or a Python list, the `itemsize` will be + automatically determined. If `itemsize` is provided and `obj` + is of type str or unicode, then the `obj` string will be + chunked into `itemsize` pieces. + + unicode : bool, optional + When true, the resulting `~numpy.char.chararray` can contain Unicode + characters, when false only 8-bit characters. If unicode is + None and `obj` is one of the following: + + - a `~numpy.char.chararray`, + - an ndarray of type `str_` or `unicode_` + - a Python str or unicode object, + + then the unicode setting of the output array will be + automatically determined. + + order : {'C', 'F'}, optional + Specify the order of the array. If order is 'C' (default), then the + array will be in C-contiguous order (last-index varies the + fastest). If order is 'F', then the returned array + will be in Fortran-contiguous order (first-index varies the + fastest). + + Examples + -------- + >>> import numpy as np + >>> np.char.asarray(['hello', 'world']) + chararray(['hello', 'world'], dtype=' _CharArray[bytes_]: ... + @overload + def __new__( + subtype, + shape: _ShapeLike, + itemsize: SupportsIndex | SupportsInt, + unicode: L[True], + buffer: Buffer | np.ndarray | None = None, + offset: SupportsIndex = 0, + strides: _ShapeLike | None = None, + order: _OrderKACF = "C", + ) -> _CharArray[str_]: ... + @overload + def __new__( + subtype, + shape: _ShapeLike, + itemsize: SupportsIndex | SupportsInt = 1, + *, + unicode: L[True], + buffer: Buffer | np.ndarray | None = None, + offset: SupportsIndex = 0, + strides: _ShapeLike | None = None, + order: _OrderKACF = "C", + ) -> _CharArray[str_]: ... + + def __array_finalize__(self, obj: object) -> None: ... + def __mul__(self, other: i_co) -> chararray[_AnyShape, _CharDTypeT_co]: ... # type: ignore[override] + def __rmul__(self, other: i_co) -> chararray[_AnyShape, _CharDTypeT_co]: ... # type: ignore[override] + def __mod__(self, i: Any) -> chararray[_AnyShape, _CharDTypeT_co]: ... # type: ignore[override] + + @overload # type: ignore[override] + def __eq__( + self: _CharArray[str_], + other: U_co, + ) -> NDArray[np.bool]: ... + @overload + def __eq__( + self: _CharArray[bytes_], + other: S_co, + ) -> NDArray[np.bool]: ... + + @overload # type: ignore[override] + def __ne__( + self: _CharArray[str_], + other: U_co, + ) -> NDArray[np.bool]: ... + @overload + def __ne__( + self: _CharArray[bytes_], + other: S_co, + ) -> NDArray[np.bool]: ... + + @overload # type: ignore[override] + def __ge__( + self: _CharArray[str_], + other: U_co, + ) -> NDArray[np.bool]: ... + @overload + def __ge__( + self: _CharArray[bytes_], + other: S_co, + ) -> NDArray[np.bool]: ... + + @overload # type: ignore[override] + def __le__( + self: _CharArray[str_], + other: U_co, + ) -> NDArray[np.bool]: ... + @overload + def __le__( + self: _CharArray[bytes_], + other: S_co, + ) -> NDArray[np.bool]: ... + + @overload # type: ignore[override] + def __gt__( + self: _CharArray[str_], + other: U_co, + ) -> NDArray[np.bool]: ... + @overload + def __gt__( + self: _CharArray[bytes_], + other: S_co, + ) -> NDArray[np.bool]: ... + + @overload # type: ignore[override] + def __lt__( + self: _CharArray[str_], + other: U_co, + ) -> NDArray[np.bool]: ... + @overload + def __lt__( + self: _CharArray[bytes_], + other: S_co, + ) -> NDArray[np.bool]: ... + + @overload # type: ignore[override] + def __add__( + self: _CharArray[str_], + other: U_co, + ) -> _CharArray[str_]: ... + @overload + def __add__( + self: _CharArray[bytes_], + other: S_co, + ) -> _CharArray[bytes_]: ... + + @overload # type: ignore[override] + def __radd__( + self: _CharArray[str_], + other: U_co, + ) -> _CharArray[str_]: ... + @overload + def __radd__( + self: _CharArray[bytes_], + other: S_co, + ) -> _CharArray[bytes_]: ... + + @overload + def center( + self: _CharArray[str_], + width: i_co, + fillchar: U_co = " ", + ) -> _CharArray[str_]: ... + @overload + def center( + self: _CharArray[bytes_], + width: i_co, + fillchar: str | S_co = " ", + ) -> _CharArray[bytes_]: ... + + @overload + def count( + self: _CharArray[str_], + sub: U_co, + start: i_co = 0, + end: i_co | None = None, + ) -> NDArray[int_]: ... + @overload + def count( + self: _CharArray[bytes_], + sub: S_co, + start: i_co = 0, + end: i_co | None = None, + ) -> NDArray[int_]: ... + + def decode( + self: _CharArray[bytes_], + encoding: str | None = None, + errors: str | None = None, + ) -> _CharArray[str_]: ... + + def encode( + self: _CharArray[str_], + encoding: str | None = None, + errors: str | None = None, + ) -> _CharArray[bytes_]: ... + + @overload + def endswith( + self: _CharArray[str_], + suffix: U_co, + start: i_co = 0, + end: i_co | None = None, + ) -> NDArray[np.bool]: ... + @overload + def endswith( + self: _CharArray[bytes_], + suffix: S_co, + start: i_co = 0, + end: i_co | None = None, + ) -> NDArray[np.bool]: ... + + def expandtabs( + self, + tabsize: i_co = 8, + ) -> Self: ... + + @overload + def find( + self: _CharArray[str_], + sub: U_co, + start: i_co = 0, + end: i_co | None = None, + ) -> NDArray[int_]: ... + @overload + def find( + self: _CharArray[bytes_], + sub: S_co, + start: i_co = 0, + end: i_co | None = None, + ) -> NDArray[int_]: ... + + @overload + def index( + self: _CharArray[str_], + sub: U_co, + start: i_co = 0, + end: i_co | None = None, + ) -> NDArray[int_]: ... + @overload + def index( + self: _CharArray[bytes_], + sub: S_co, + start: i_co = 0, + end: i_co | None = None, + ) -> NDArray[int_]: ... + + @overload + def join( + self: _CharArray[str_], + seq: U_co, + ) -> _CharArray[str_]: ... + @overload + def join( + self: _CharArray[bytes_], + seq: S_co, + ) -> _CharArray[bytes_]: ... + + @overload + def ljust( + self: _CharArray[str_], + width: i_co, + fillchar: U_co = " ", + ) -> _CharArray[str_]: ... + @overload + def ljust( + self: _CharArray[bytes_], + width: i_co, + fillchar: str | S_co = " ", + ) -> _CharArray[bytes_]: ... + + @overload + def lstrip( + self: _CharArray[str_], + chars: U_co | None = None, + ) -> _CharArray[str_]: ... + @overload + def lstrip( + self: _CharArray[bytes_], + chars: S_co | None = None, + ) -> _CharArray[bytes_]: ... + + @overload # type: ignore[override] + def partition( + self: _CharArray[str_], + sep: U_co, + ) -> _CharArray[str_]: ... + @overload + def partition( + self: _CharArray[bytes_], + sep: S_co, + ) -> _CharArray[bytes_]: ... + + @overload + def replace( + self: _CharArray[str_], + old: U_co, + new: U_co, + count: i_co | None = None, + ) -> _CharArray[str_]: ... + @overload + def replace( + self: _CharArray[bytes_], + old: S_co, + new: S_co, + count: i_co | None = None, + ) -> _CharArray[bytes_]: ... + + @overload + def rfind( + self: _CharArray[str_], + sub: U_co, + start: i_co = 0, + end: i_co | None = None, + ) -> NDArray[int_]: ... + @overload + def rfind( + self: _CharArray[bytes_], + sub: S_co, + start: i_co = 0, + end: i_co | None = None, + ) -> NDArray[int_]: ... + + @overload + def rindex( + self: _CharArray[str_], + sub: U_co, + start: i_co = 0, + end: i_co | None = None, + ) -> NDArray[int_]: ... + @overload + def rindex( + self: _CharArray[bytes_], + sub: S_co, + start: i_co = 0, + end: i_co | None = None, + ) -> NDArray[int_]: ... + + @overload + def rjust( + self: _CharArray[str_], + width: i_co, + fillchar: U_co = " ", + ) -> _CharArray[str_]: ... + @overload + def rjust( + self: _CharArray[bytes_], + width: i_co, + fillchar: str | S_co = " ", + ) -> _CharArray[bytes_]: ... + + @overload + def rpartition( + self: _CharArray[str_], + sep: U_co, + ) -> _CharArray[str_]: ... + @overload + def rpartition( + self: _CharArray[bytes_], + sep: S_co, + ) -> _CharArray[bytes_]: ... + + @overload + def rsplit( + self: _CharArray[str_], + sep: U_co | None = None, + maxsplit: i_co | None = None, + ) -> NDArray[object_]: ... + @overload + def rsplit( + self: _CharArray[bytes_], + sep: S_co | None = None, + maxsplit: i_co | None = None, + ) -> NDArray[object_]: ... + + @overload + def rstrip( + self: _CharArray[str_], + chars: U_co | None = None, + ) -> _CharArray[str_]: ... + @overload + def rstrip( + self: _CharArray[bytes_], + chars: S_co | None = None, + ) -> _CharArray[bytes_]: ... + + @overload + def split( + self: _CharArray[str_], + sep: U_co | None = None, + maxsplit: i_co | None = None, + ) -> NDArray[object_]: ... + @overload + def split( + self: _CharArray[bytes_], + sep: S_co | None = None, + maxsplit: i_co | None = None, + ) -> NDArray[object_]: ... + + def splitlines(self, keepends: b_co | None = None) -> NDArray[object_]: ... + + @overload + def startswith( + self: _CharArray[str_], + prefix: U_co, + start: i_co = 0, + end: i_co | None = None, + ) -> NDArray[np.bool]: ... + @overload + def startswith( + self: _CharArray[bytes_], + prefix: S_co, + start: i_co = 0, + end: i_co | None = None, + ) -> NDArray[np.bool]: ... + + @overload + def strip( + self: _CharArray[str_], + chars: U_co | None = None, + ) -> _CharArray[str_]: ... + @overload + def strip( + self: _CharArray[bytes_], + chars: S_co | None = None, + ) -> _CharArray[bytes_]: ... + + @overload + def translate( + self: _CharArray[str_], + table: U_co, + deletechars: U_co | None = None, + ) -> _CharArray[str_]: ... + @overload + def translate( + self: _CharArray[bytes_], + table: S_co, + deletechars: S_co | None = None, + ) -> _CharArray[bytes_]: ... + + def zfill(self, width: i_co) -> Self: ... + def capitalize(self) -> Self: ... + def title(self) -> Self: ... + def swapcase(self) -> Self: ... + def lower(self) -> Self: ... + def upper(self) -> Self: ... + def isalnum(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ... + def isalpha(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ... + def isdigit(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ... + def islower(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ... + def isspace(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ... + def istitle(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ... + def isupper(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ... + def isnumeric(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ... + def isdecimal(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ... + +# Comparison +@overload +def equal(x1: U_co, x2: U_co) -> NDArray[np.bool]: ... +@overload +def equal(x1: S_co, x2: S_co) -> NDArray[np.bool]: ... +@overload +def equal(x1: T_co, x2: T_co) -> NDArray[np.bool]: ... + +@overload +def not_equal(x1: U_co, x2: U_co) -> NDArray[np.bool]: ... +@overload +def not_equal(x1: S_co, x2: S_co) -> NDArray[np.bool]: ... +@overload +def not_equal(x1: T_co, x2: T_co) -> NDArray[np.bool]: ... + +@overload +def greater_equal(x1: U_co, x2: U_co) -> NDArray[np.bool]: ... +@overload +def greater_equal(x1: S_co, x2: S_co) -> NDArray[np.bool]: ... +@overload +def greater_equal(x1: T_co, x2: T_co) -> NDArray[np.bool]: ... + +@overload +def less_equal(x1: U_co, x2: U_co) -> NDArray[np.bool]: ... +@overload +def less_equal(x1: S_co, x2: S_co) -> NDArray[np.bool]: ... +@overload +def less_equal(x1: T_co, x2: T_co) -> NDArray[np.bool]: ... + +@overload +def greater(x1: U_co, x2: U_co) -> NDArray[np.bool]: ... +@overload +def greater(x1: S_co, x2: S_co) -> NDArray[np.bool]: ... +@overload +def greater(x1: T_co, x2: T_co) -> NDArray[np.bool]: ... + +@overload +def less(x1: U_co, x2: U_co) -> NDArray[np.bool]: ... +@overload +def less(x1: S_co, x2: S_co) -> NDArray[np.bool]: ... +@overload +def less(x1: T_co, x2: T_co) -> NDArray[np.bool]: ... + +@overload +def add(x1: U_co, x2: U_co) -> NDArray[np.str_]: ... +@overload +def add(x1: S_co, x2: S_co) -> NDArray[np.bytes_]: ... +@overload +def add(x1: _StringDTypeSupportsArray, x2: _StringDTypeSupportsArray) -> _StringDTypeArray: ... +@overload +def add(x1: T_co, x2: T_co) -> _StringDTypeOrUnicodeArray: ... + +@overload +def multiply(a: U_co, i: i_co) -> NDArray[np.str_]: ... +@overload +def multiply(a: S_co, i: i_co) -> NDArray[np.bytes_]: ... +@overload +def multiply(a: _StringDTypeSupportsArray, i: i_co) -> _StringDTypeArray: ... +@overload +def multiply(a: T_co, i: i_co) -> _StringDTypeOrUnicodeArray: ... + +@overload +def mod(a: U_co, value: Any) -> NDArray[np.str_]: ... +@overload +def mod(a: S_co, value: Any) -> NDArray[np.bytes_]: ... +@overload +def mod(a: _StringDTypeSupportsArray, value: Any) -> _StringDTypeArray: ... +@overload +def mod(a: T_co, value: Any) -> _StringDTypeOrUnicodeArray: ... + +@overload +def capitalize(a: U_co) -> NDArray[str_]: ... +@overload +def capitalize(a: S_co) -> NDArray[bytes_]: ... +@overload +def capitalize(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ... +@overload +def capitalize(a: T_co) -> _StringDTypeOrUnicodeArray: ... + +@overload +def center(a: U_co, width: i_co, fillchar: U_co = " ") -> NDArray[str_]: ... +@overload +def center(a: S_co, width: i_co, fillchar: str | S_co = " ") -> NDArray[bytes_]: ... +@overload +def center(a: _StringDTypeSupportsArray, width: i_co, fillchar: str | _StringDTypeSupportsArray = " ") -> _StringDTypeArray: ... +@overload +def center(a: T_co, width: i_co, fillchar: T_co = " ") -> _StringDTypeOrUnicodeArray: ... + +def decode( + a: S_co, + encoding: str | None = None, + errors: str | None = None, +) -> NDArray[str_]: ... +def encode( + a: U_co | T_co, + encoding: str | None = None, + errors: str | None = None, +) -> NDArray[bytes_]: ... + +@overload +def expandtabs(a: U_co, tabsize: i_co = 8) -> NDArray[str_]: ... +@overload +def expandtabs(a: S_co, tabsize: i_co = 8) -> NDArray[bytes_]: ... +@overload +def expandtabs(a: _StringDTypeSupportsArray, tabsize: i_co = 8) -> _StringDTypeArray: ... +@overload +def expandtabs(a: T_co, tabsize: i_co = 8) -> _StringDTypeOrUnicodeArray: ... + +@overload +def join(sep: U_co, seq: U_co) -> NDArray[str_]: ... +@overload +def join(sep: S_co, seq: S_co) -> NDArray[bytes_]: ... +@overload +def join(sep: _StringDTypeSupportsArray, seq: _StringDTypeSupportsArray) -> _StringDTypeArray: ... +@overload +def join(sep: T_co, seq: T_co) -> _StringDTypeOrUnicodeArray: ... + +@overload +def ljust(a: U_co, width: i_co, fillchar: U_co = " ") -> NDArray[str_]: ... +@overload +def ljust(a: S_co, width: i_co, fillchar: str | S_co = " ") -> NDArray[bytes_]: ... +@overload +def ljust(a: _StringDTypeSupportsArray, width: i_co, fillchar: str | _StringDTypeSupportsArray = " ") -> _StringDTypeArray: ... +@overload +def ljust(a: T_co, width: i_co, fillchar: T_co = " ") -> _StringDTypeOrUnicodeArray: ... + +@overload +def lower(a: U_co) -> NDArray[str_]: ... +@overload +def lower(a: S_co) -> NDArray[bytes_]: ... +@overload +def lower(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ... +@overload +def lower(a: T_co) -> _StringDTypeOrUnicodeArray: ... + +@overload +def lstrip(a: U_co, chars: U_co | None = None) -> NDArray[str_]: ... +@overload +def lstrip(a: S_co, chars: S_co | None = None) -> NDArray[bytes_]: ... +@overload +def lstrip(a: _StringDTypeSupportsArray, chars: _StringDTypeSupportsArray | None = None) -> _StringDTypeArray: ... +@overload +def lstrip(a: T_co, chars: T_co | None = None) -> _StringDTypeOrUnicodeArray: ... + +@overload +def partition(a: U_co, sep: U_co) -> NDArray[str_]: ... +@overload +def partition(a: S_co, sep: S_co) -> NDArray[bytes_]: ... +@overload +def partition(a: _StringDTypeSupportsArray, sep: _StringDTypeSupportsArray) -> _StringDTypeArray: ... +@overload +def partition(a: T_co, sep: T_co) -> _StringDTypeOrUnicodeArray: ... + +@overload +def replace( + a: U_co, + old: U_co, + new: U_co, + count: i_co | None = -1, +) -> NDArray[str_]: ... +@overload +def replace( + a: S_co, + old: S_co, + new: S_co, + count: i_co | None = -1, +) -> NDArray[bytes_]: ... +@overload +def replace( + a: _StringDTypeSupportsArray, + old: _StringDTypeSupportsArray, + new: _StringDTypeSupportsArray, + count: i_co = -1, +) -> _StringDTypeArray: ... +@overload +def replace( + a: T_co, + old: T_co, + new: T_co, + count: i_co = -1, +) -> _StringDTypeOrUnicodeArray: ... + +@overload +def rjust( + a: U_co, + width: i_co, + fillchar: U_co = " ", +) -> NDArray[str_]: ... +@overload +def rjust( + a: S_co, + width: i_co, + fillchar: str | S_co = " ", +) -> NDArray[bytes_]: ... +@overload +def rjust( + a: _StringDTypeSupportsArray, + width: i_co, + fillchar: str | _StringDTypeSupportsArray = " ", +) -> _StringDTypeArray: ... +@overload +def rjust( + a: T_co, + width: i_co, + fillchar: T_co = " ", +) -> _StringDTypeOrUnicodeArray: ... + +@overload +def rpartition(a: U_co, sep: U_co) -> NDArray[str_]: ... +@overload +def rpartition(a: S_co, sep: S_co) -> NDArray[bytes_]: ... +@overload +def rpartition(a: _StringDTypeSupportsArray, sep: _StringDTypeSupportsArray) -> _StringDTypeArray: ... +@overload +def rpartition(a: T_co, sep: T_co) -> _StringDTypeOrUnicodeArray: ... + +@overload +def rsplit( + a: U_co, + sep: U_co | None = None, + maxsplit: i_co | None = None, +) -> NDArray[object_]: ... +@overload +def rsplit( + a: S_co, + sep: S_co | None = None, + maxsplit: i_co | None = None, +) -> NDArray[object_]: ... +@overload +def rsplit( + a: _StringDTypeSupportsArray, + sep: _StringDTypeSupportsArray | None = None, + maxsplit: i_co | None = None, +) -> NDArray[object_]: ... +@overload +def rsplit( + a: T_co, + sep: T_co | None = None, + maxsplit: i_co | None = None, +) -> NDArray[object_]: ... + +@overload +def rstrip(a: U_co, chars: U_co | None = None) -> NDArray[str_]: ... +@overload +def rstrip(a: S_co, chars: S_co | None = None) -> NDArray[bytes_]: ... +@overload +def rstrip(a: _StringDTypeSupportsArray, chars: _StringDTypeSupportsArray | None = None) -> _StringDTypeArray: ... +@overload +def rstrip(a: T_co, chars: T_co | None = None) -> _StringDTypeOrUnicodeArray: ... + +@overload +def split( + a: U_co, + sep: U_co | None = None, + maxsplit: i_co | None = None, +) -> NDArray[object_]: ... +@overload +def split( + a: S_co, + sep: S_co | None = None, + maxsplit: i_co | None = None, +) -> NDArray[object_]: ... +@overload +def split( + a: _StringDTypeSupportsArray, + sep: _StringDTypeSupportsArray | None = None, + maxsplit: i_co | None = None, +) -> NDArray[object_]: ... +@overload +def split( + a: T_co, + sep: T_co | None = None, + maxsplit: i_co | None = None, +) -> NDArray[object_]: ... + +def splitlines(a: UST_co, keepends: b_co | None = None) -> NDArray[np.object_]: ... + +@overload +def strip(a: U_co, chars: U_co | None = None) -> NDArray[str_]: ... +@overload +def strip(a: S_co, chars: S_co | None = None) -> NDArray[bytes_]: ... +@overload +def strip(a: _StringDTypeSupportsArray, chars: _StringDTypeSupportsArray | None = None) -> _StringDTypeArray: ... +@overload +def strip(a: T_co, chars: T_co | None = None) -> _StringDTypeOrUnicodeArray: ... + +@overload +def swapcase(a: U_co) -> NDArray[str_]: ... +@overload +def swapcase(a: S_co) -> NDArray[bytes_]: ... +@overload +def swapcase(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ... +@overload +def swapcase(a: T_co) -> _StringDTypeOrUnicodeArray: ... + +@overload +def title(a: U_co) -> NDArray[str_]: ... +@overload +def title(a: S_co) -> NDArray[bytes_]: ... +@overload +def title(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ... +@overload +def title(a: T_co) -> _StringDTypeOrUnicodeArray: ... + +@overload +def translate( + a: U_co, + table: str, + deletechars: str | None = None, +) -> NDArray[str_]: ... +@overload +def translate( + a: S_co, + table: str, + deletechars: str | None = None, +) -> NDArray[bytes_]: ... +@overload +def translate( + a: _StringDTypeSupportsArray, + table: str, + deletechars: str | None = None, +) -> _StringDTypeArray: ... +@overload +def translate( + a: T_co, + table: str, + deletechars: str | None = None, +) -> _StringDTypeOrUnicodeArray: ... + +@overload +def upper(a: U_co) -> NDArray[str_]: ... +@overload +def upper(a: S_co) -> NDArray[bytes_]: ... +@overload +def upper(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ... +@overload +def upper(a: T_co) -> _StringDTypeOrUnicodeArray: ... + +@overload +def zfill(a: U_co, width: i_co) -> NDArray[str_]: ... +@overload +def zfill(a: S_co, width: i_co) -> NDArray[bytes_]: ... +@overload +def zfill(a: _StringDTypeSupportsArray, width: i_co) -> _StringDTypeArray: ... +@overload +def zfill(a: T_co, width: i_co) -> _StringDTypeOrUnicodeArray: ... + +# String information +@overload +def count( + a: U_co, + sub: U_co, + start: i_co = 0, + end: i_co | None = None, +) -> NDArray[int_]: ... +@overload +def count( + a: S_co, + sub: S_co, + start: i_co = 0, + end: i_co | None = None, +) -> NDArray[int_]: ... +@overload +def count( + a: T_co, + sub: T_co, + start: i_co = 0, + end: i_co | None = None, +) -> NDArray[np.int_]: ... + +@overload +def endswith( + a: U_co, + suffix: U_co, + start: i_co = 0, + end: i_co | None = None, +) -> NDArray[np.bool]: ... +@overload +def endswith( + a: S_co, + suffix: S_co, + start: i_co = 0, + end: i_co | None = None, +) -> NDArray[np.bool]: ... +@overload +def endswith( + a: T_co, + suffix: T_co, + start: i_co = 0, + end: i_co | None = None, +) -> NDArray[np.bool]: ... + +@overload +def find( + a: U_co, + sub: U_co, + start: i_co = 0, + end: i_co | None = None, +) -> NDArray[int_]: ... +@overload +def find( + a: S_co, + sub: S_co, + start: i_co = 0, + end: i_co | None = None, +) -> NDArray[int_]: ... +@overload +def find( + a: T_co, + sub: T_co, + start: i_co = 0, + end: i_co | None = None, +) -> NDArray[np.int_]: ... + +@overload +def index( + a: U_co, + sub: U_co, + start: i_co = 0, + end: i_co | None = None, +) -> NDArray[int_]: ... +@overload +def index( + a: S_co, + sub: S_co, + start: i_co = 0, + end: i_co | None = None, +) -> NDArray[int_]: ... +@overload +def index( + a: T_co, + sub: T_co, + start: i_co = 0, + end: i_co | None = None, +) -> NDArray[np.int_]: ... + +def isalpha(a: UST_co) -> NDArray[np.bool]: ... +def isalnum(a: UST_co) -> NDArray[np.bool]: ... +def isdecimal(a: U_co | T_co) -> NDArray[np.bool]: ... +def isdigit(a: UST_co) -> NDArray[np.bool]: ... +def islower(a: UST_co) -> NDArray[np.bool]: ... +def isnumeric(a: U_co | T_co) -> NDArray[np.bool]: ... +def isspace(a: UST_co) -> NDArray[np.bool]: ... +def istitle(a: UST_co) -> NDArray[np.bool]: ... +def isupper(a: UST_co) -> NDArray[np.bool]: ... + +@overload +def rfind( + a: U_co, + sub: U_co, + start: i_co = 0, + end: i_co | None = None, +) -> NDArray[int_]: ... +@overload +def rfind( + a: S_co, + sub: S_co, + start: i_co = 0, + end: i_co | None = None, +) -> NDArray[int_]: ... +@overload +def rfind( + a: T_co, + sub: T_co, + start: i_co = 0, + end: i_co | None = None, +) -> NDArray[np.int_]: ... + +@overload +def rindex( + a: U_co, + sub: U_co, + start: i_co = 0, + end: i_co | None = None, +) -> NDArray[int_]: ... +@overload +def rindex( + a: S_co, + sub: S_co, + start: i_co = 0, + end: i_co | None = None, +) -> NDArray[int_]: ... +@overload +def rindex( + a: T_co, + sub: T_co, + start: i_co = 0, + end: i_co | None = None, +) -> NDArray[np.int_]: ... + +@overload +def startswith( + a: U_co, + prefix: U_co, + start: i_co = 0, + end: i_co | None = None, +) -> NDArray[np.bool]: ... +@overload +def startswith( + a: S_co, + prefix: S_co, + start: i_co = 0, + end: i_co | None = None, +) -> NDArray[np.bool]: ... +@overload +def startswith( + a: T_co, + prefix: T_co, + start: i_co = 0, + end: i_co | None = None, +) -> NDArray[np.bool]: ... + +def str_len(A: UST_co) -> NDArray[int_]: ... + +# Overload 1 and 2: str- or bytes-based array-likes +# overload 3 and 4: arbitrary object with unicode=False (-> bytes_) +# overload 5 and 6: arbitrary object with unicode=True (-> str_) +# overload 7: arbitrary object with unicode=None (default) (-> str_ | bytes_) +@overload +def array( + obj: U_co, + itemsize: int | None = None, + copy: bool = True, + unicode: L[True] | None = None, + order: _OrderKACF = None, +) -> _CharArray[str_]: ... +@overload +def array( + obj: S_co, + itemsize: int | None = None, + copy: bool = True, + unicode: L[False] | None = None, + order: _OrderKACF = None, +) -> _CharArray[bytes_]: ... +@overload +def array( + obj: object, + itemsize: int | None, + copy: bool, + unicode: L[False], + order: _OrderKACF = None, +) -> _CharArray[bytes_]: ... +@overload +def array( + obj: object, + itemsize: int | None = None, + copy: bool = True, + *, + unicode: L[False], + order: _OrderKACF = None, +) -> _CharArray[bytes_]: ... +@overload +def array( + obj: object, + itemsize: int | None, + copy: bool, + unicode: L[True], + order: _OrderKACF = None, +) -> _CharArray[str_]: ... +@overload +def array( + obj: object, + itemsize: int | None = None, + copy: bool = True, + *, + unicode: L[True], + order: _OrderKACF = None, +) -> _CharArray[str_]: ... +@overload +def array( + obj: object, + itemsize: int | None = None, + copy: bool = True, + unicode: bool | None = None, + order: _OrderKACF = None, +) -> _CharArray[str_] | _CharArray[bytes_]: ... + +@overload +def asarray( + obj: U_co, + itemsize: int | None = None, + unicode: L[True] | None = None, + order: _OrderKACF = None, +) -> _CharArray[str_]: ... +@overload +def asarray( + obj: S_co, + itemsize: int | None = None, + unicode: L[False] | None = None, + order: _OrderKACF = None, +) -> _CharArray[bytes_]: ... +@overload +def asarray( + obj: object, + itemsize: int | None, + unicode: L[False], + order: _OrderKACF = None, +) -> _CharArray[bytes_]: ... +@overload +def asarray( + obj: object, + itemsize: int | None = None, + *, + unicode: L[False], + order: _OrderKACF = None, +) -> _CharArray[bytes_]: ... +@overload +def asarray( + obj: object, + itemsize: int | None, + unicode: L[True], + order: _OrderKACF = None, +) -> _CharArray[str_]: ... +@overload +def asarray( + obj: object, + itemsize: int | None = None, + *, + unicode: L[True], + order: _OrderKACF = None, +) -> _CharArray[str_]: ... +@overload +def asarray( + obj: object, + itemsize: int | None = None, + unicode: bool | None = None, + order: _OrderKACF = None, +) -> _CharArray[str_] | _CharArray[bytes_]: ... diff --git a/local_packages/numpy/_core/einsumfunc.py b/local_packages/numpy/_core/einsumfunc.py new file mode 100644 index 0000000..9461994 --- /dev/null +++ b/local_packages/numpy/_core/einsumfunc.py @@ -0,0 +1,1650 @@ +""" +Implementation of optimized einsum. + +""" +import functools +import itertools +import operator + +from numpy._core.multiarray import c_einsum, matmul +from numpy._core.numeric import asanyarray, reshape +from numpy._core.overrides import array_function_dispatch +from numpy._core.umath import multiply + +__all__ = ['einsum', 'einsum_path'] + +# importing string for string.ascii_letters would be too slow +# the first import before caching has been measured to take 800 µs (#23777) +# imports begin with uppercase to mimic ASCII values to avoid sorting issues +einsum_symbols = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz' +einsum_symbols_set = set(einsum_symbols) + + +def _flop_count(idx_contraction, inner, num_terms, size_dictionary): + """ + Computes the number of FLOPS in the contraction. + + Parameters + ---------- + idx_contraction : iterable + The indices involved in the contraction + inner : bool + Does this contraction require an inner product? + num_terms : int + The number of terms in a contraction + size_dictionary : dict + The size of each of the indices in idx_contraction + + Returns + ------- + flop_count : int + The total number of FLOPS required for the contraction. + + Examples + -------- + + >>> _flop_count('abc', False, 1, {'a': 2, 'b':3, 'c':5}) + 30 + + >>> _flop_count('abc', True, 2, {'a': 2, 'b':3, 'c':5}) + 60 + + """ + + overall_size = _compute_size_by_dict(idx_contraction, size_dictionary) + op_factor = max(1, num_terms - 1) + if inner: + op_factor += 1 + + return overall_size * op_factor + +def _compute_size_by_dict(indices, idx_dict): + """ + Computes the product of the elements in indices based on the dictionary + idx_dict. + + Parameters + ---------- + indices : iterable + Indices to base the product on. + idx_dict : dictionary + Dictionary of index sizes + + Returns + ------- + ret : int + The resulting product. + + Examples + -------- + >>> _compute_size_by_dict('abbc', {'a': 2, 'b':3, 'c':5}) + 90 + + """ + ret = 1 + for i in indices: + ret *= idx_dict[i] + return ret + + +def _find_contraction(positions, input_sets, output_set): + """ + Finds the contraction for a given set of input and output sets. + + Parameters + ---------- + positions : iterable + Integer positions of terms used in the contraction. + input_sets : list + List of sets that represent the lhs side of the einsum subscript + output_set : set + Set that represents the rhs side of the overall einsum subscript + + Returns + ------- + new_result : set + The indices of the resulting contraction + remaining : list + List of sets that have not been contracted, the new set is appended to + the end of this list + idx_removed : set + Indices removed from the entire contraction + idx_contraction : set + The indices used in the current contraction + + Examples + -------- + + # A simple dot product test case + >>> pos = (0, 1) + >>> isets = [set('ab'), set('bc')] + >>> oset = set('ac') + >>> _find_contraction(pos, isets, oset) + ({'a', 'c'}, [{'a', 'c'}], {'b'}, {'a', 'b', 'c'}) + + # A more complex case with additional terms in the contraction + >>> pos = (0, 2) + >>> isets = [set('abd'), set('ac'), set('bdc')] + >>> oset = set('ac') + >>> _find_contraction(pos, isets, oset) + ({'a', 'c'}, [{'a', 'c'}, {'a', 'c'}], {'b', 'd'}, {'a', 'b', 'c', 'd'}) + """ + + idx_contract = set() + idx_remain = output_set.copy() + remaining = [] + for ind, value in enumerate(input_sets): + if ind in positions: + idx_contract |= value + else: + remaining.append(value) + idx_remain |= value + + new_result = idx_remain & idx_contract + idx_removed = (idx_contract - new_result) + remaining.append(new_result) + + return (new_result, remaining, idx_removed, idx_contract) + + +def _optimal_path(input_sets, output_set, idx_dict, memory_limit): + """ + Computes all possible pair contractions, sieves the results based + on ``memory_limit`` and returns the lowest cost path. This algorithm + scales factorial with respect to the elements in the list ``input_sets``. + + Parameters + ---------- + input_sets : list + List of sets that represent the lhs side of the einsum subscript + output_set : set + Set that represents the rhs side of the overall einsum subscript + idx_dict : dictionary + Dictionary of index sizes + memory_limit : int + The maximum number of elements in a temporary array + + Returns + ------- + path : list + The optimal contraction order within the memory limit constraint. + + Examples + -------- + >>> isets = [set('abd'), set('ac'), set('bdc')] + >>> oset = set() + >>> idx_sizes = {'a': 1, 'b':2, 'c':3, 'd':4} + >>> _optimal_path(isets, oset, idx_sizes, 5000) + [(0, 2), (0, 1)] + """ + + full_results = [(0, [], input_sets)] + for iteration in range(len(input_sets) - 1): + iter_results = [] + + # Compute all unique pairs + for curr in full_results: + cost, positions, remaining = curr + for con in itertools.combinations( + range(len(input_sets) - iteration), 2 + ): + + # Find the contraction + cont = _find_contraction(con, remaining, output_set) + new_result, new_input_sets, idx_removed, idx_contract = cont + + # Sieve the results based on memory_limit + new_size = _compute_size_by_dict(new_result, idx_dict) + if new_size > memory_limit: + continue + + # Build (total_cost, positions, indices_remaining) + total_cost = cost + _flop_count( + idx_contract, idx_removed, len(con), idx_dict + ) + new_pos = positions + [con] + iter_results.append((total_cost, new_pos, new_input_sets)) + + # Update combinatorial list, if we did not find anything return best + # path + remaining contractions + if iter_results: + full_results = iter_results + else: + path = min(full_results, key=lambda x: x[0])[1] + path += [tuple(range(len(input_sets) - iteration))] + return path + + # If we have not found anything return single einsum contraction + if len(full_results) == 0: + return [tuple(range(len(input_sets)))] + + path = min(full_results, key=lambda x: x[0])[1] + return path + +def _parse_possible_contraction( + positions, input_sets, output_set, idx_dict, + memory_limit, path_cost, naive_cost + ): + """Compute the cost (removed size + flops) and resultant indices for + performing the contraction specified by ``positions``. + + Parameters + ---------- + positions : tuple of int + The locations of the proposed tensors to contract. + input_sets : list of sets + The indices found on each tensors. + output_set : set + The output indices of the expression. + idx_dict : dict + Mapping of each index to its size. + memory_limit : int + The total allowed size for an intermediary tensor. + path_cost : int + The contraction cost so far. + naive_cost : int + The cost of the unoptimized expression. + + Returns + ------- + cost : (int, int) + A tuple containing the size of any indices removed, and the flop cost. + positions : tuple of int + The locations of the proposed tensors to contract. + new_input_sets : list of sets + The resulting new list of indices if this proposed contraction + is performed. + + """ + + # Find the contraction + contract = _find_contraction(positions, input_sets, output_set) + idx_result, new_input_sets, idx_removed, idx_contract = contract + + # Sieve the results based on memory_limit + new_size = _compute_size_by_dict(idx_result, idx_dict) + if new_size > memory_limit: + return None + + # Build sort tuple + old_sizes = ( + _compute_size_by_dict(input_sets[p], idx_dict) for p in positions + ) + removed_size = sum(old_sizes) - new_size + + # NB: removed_size used to be just the size of any removed indices i.e.: + # helpers.compute_size_by_dict(idx_removed, idx_dict) + cost = _flop_count(idx_contract, idx_removed, len(positions), idx_dict) + sort = (-removed_size, cost) + + # Sieve based on total cost as well + if (path_cost + cost) > naive_cost: + return None + + # Add contraction to possible choices + return [sort, positions, new_input_sets] + + +def _update_other_results(results, best): + """Update the positions and provisional input_sets of ``results`` + based on performing the contraction result ``best``. Remove any + involving the tensors contracted. + + Parameters + ---------- + results : list + List of contraction results produced by + ``_parse_possible_contraction``. + best : list + The best contraction of ``results`` i.e. the one that + will be performed. + + Returns + ------- + mod_results : list + The list of modified results, updated with outcome of + ``best`` contraction. + """ + + best_con = best[1] + bx, by = best_con + mod_results = [] + + for cost, (x, y), con_sets in results: + + # Ignore results involving tensors just contracted + if x in best_con or y in best_con: + continue + + # Update the input_sets + del con_sets[by - int(by > x) - int(by > y)] + del con_sets[bx - int(bx > x) - int(bx > y)] + con_sets.insert(-1, best[2][-1]) + + # Update the position indices + mod_con = x - int(x > bx) - int(x > by), y - int(y > bx) - int(y > by) + mod_results.append((cost, mod_con, con_sets)) + + return mod_results + +def _greedy_path(input_sets, output_set, idx_dict, memory_limit): + """ + Finds the path by contracting the best pair until the input list is + exhausted. The best pair is found by minimizing the tuple + ``(-prod(indices_removed), cost)``. What this amounts to is prioritizing + matrix multiplication or inner product operations, then Hadamard like + operations, and finally outer operations. Outer products are limited by + ``memory_limit``. This algorithm scales cubically with respect to the + number of elements in the list ``input_sets``. + + Parameters + ---------- + input_sets : list + List of sets that represent the lhs side of the einsum subscript + output_set : set + Set that represents the rhs side of the overall einsum subscript + idx_dict : dictionary + Dictionary of index sizes + memory_limit : int + The maximum number of elements in a temporary array + + Returns + ------- + path : list + The greedy contraction order within the memory limit constraint. + + Examples + -------- + >>> isets = [set('abd'), set('ac'), set('bdc')] + >>> oset = set() + >>> idx_sizes = {'a': 1, 'b':2, 'c':3, 'd':4} + >>> _greedy_path(isets, oset, idx_sizes, 5000) + [(0, 2), (0, 1)] + """ + + # Handle trivial cases that leaked through + if len(input_sets) == 1: + return [(0,)] + elif len(input_sets) == 2: + return [(0, 1)] + + # Build up a naive cost + contract = _find_contraction( + range(len(input_sets)), input_sets, output_set + ) + idx_result, new_input_sets, idx_removed, idx_contract = contract + naive_cost = _flop_count( + idx_contract, idx_removed, len(input_sets), idx_dict + ) + + # Initially iterate over all pairs + comb_iter = itertools.combinations(range(len(input_sets)), 2) + known_contractions = [] + + path_cost = 0 + path = [] + + for iteration in range(len(input_sets) - 1): + + # Iterate over all pairs on the first step, only previously + # found pairs on subsequent steps + for positions in comb_iter: + + # Always initially ignore outer products + if input_sets[positions[0]].isdisjoint(input_sets[positions[1]]): + continue + + result = _parse_possible_contraction( + positions, input_sets, output_set, idx_dict, + memory_limit, path_cost, naive_cost + ) + if result is not None: + known_contractions.append(result) + + # If we do not have a inner contraction, rescan pairs + # including outer products + if len(known_contractions) == 0: + + # Then check the outer products + for positions in itertools.combinations( + range(len(input_sets)), 2 + ): + result = _parse_possible_contraction( + positions, input_sets, output_set, idx_dict, + memory_limit, path_cost, naive_cost + ) + if result is not None: + known_contractions.append(result) + + # If we still did not find any remaining contractions, + # default back to einsum like behavior + if len(known_contractions) == 0: + path.append(tuple(range(len(input_sets)))) + break + + # Sort based on first index + best = min(known_contractions, key=lambda x: x[0]) + + # Now propagate as many unused contractions as possible + # to the next iteration + known_contractions = _update_other_results(known_contractions, best) + + # Next iteration only compute contractions with the new tensor + # All other contractions have been accounted for + input_sets = best[2] + new_tensor_pos = len(input_sets) - 1 + comb_iter = ((i, new_tensor_pos) for i in range(new_tensor_pos)) + + # Update path and total cost + path.append(best[1]) + path_cost += best[0][1] + + return path + + +def _parse_einsum_input(operands): + """ + A reproduction of einsum c side einsum parsing in python. + + Returns + ------- + input_strings : str + Parsed input strings + output_string : str + Parsed output string + operands : list of array_like + The operands to use in the numpy contraction + + Examples + -------- + The operand list is simplified to reduce printing: + + >>> np.random.seed(123) + >>> a = np.random.rand(4, 4) + >>> b = np.random.rand(4, 4, 4) + >>> _parse_einsum_input(('...a,...a->...', a, b)) + ('za,xza', 'xz', [a, b]) # may vary + + >>> _parse_einsum_input((a, [Ellipsis, 0], b, [Ellipsis, 0])) + ('za,xza', 'xz', [a, b]) # may vary + """ + + if len(operands) == 0: + raise ValueError("No input operands") + + if isinstance(operands[0], str): + subscripts = operands[0].replace(" ", "") + operands = [asanyarray(v) for v in operands[1:]] + + # Ensure all characters are valid + for s in subscripts: + if s in '.,->': + continue + if s not in einsum_symbols: + raise ValueError(f"Character {s} is not a valid symbol.") + + else: + tmp_operands = list(operands) + operand_list = [] + subscript_list = [] + for p in range(len(operands) // 2): + operand_list.append(tmp_operands.pop(0)) + subscript_list.append(tmp_operands.pop(0)) + + output_list = tmp_operands[-1] if len(tmp_operands) else None + operands = [asanyarray(v) for v in operand_list] + subscripts = "" + last = len(subscript_list) - 1 + for num, sub in enumerate(subscript_list): + for s in sub: + if s is Ellipsis: + subscripts += "..." + else: + try: + s = operator.index(s) + except TypeError as e: + raise TypeError( + "For this input type lists must contain " + "either int or Ellipsis" + ) from e + subscripts += einsum_symbols[s] + if num != last: + subscripts += "," + + if output_list is not None: + subscripts += "->" + for s in output_list: + if s is Ellipsis: + subscripts += "..." + else: + try: + s = operator.index(s) + except TypeError as e: + raise TypeError( + "For this input type lists must contain " + "either int or Ellipsis" + ) from e + subscripts += einsum_symbols[s] + # Check for proper "->" + if ("-" in subscripts) or (">" in subscripts): + invalid = (subscripts.count("-") > 1) or (subscripts.count(">") > 1) + if invalid or (subscripts.count("->") != 1): + raise ValueError("Subscripts can only contain one '->'.") + + # Parse ellipses + if "." in subscripts: + used = subscripts.replace(".", "").replace(",", "").replace("->", "") + unused = list(einsum_symbols_set - set(used)) + ellipse_inds = "".join(unused) + longest = 0 + + if "->" in subscripts: + input_tmp, output_sub = subscripts.split("->") + split_subscripts = input_tmp.split(",") + out_sub = True + else: + split_subscripts = subscripts.split(',') + out_sub = False + + for num, sub in enumerate(split_subscripts): + if "." in sub: + if (sub.count(".") != 3) or (sub.count("...") != 1): + raise ValueError("Invalid Ellipses.") + + # Take into account numerical values + if operands[num].shape == (): + ellipse_count = 0 + else: + ellipse_count = max(operands[num].ndim, 1) + ellipse_count -= (len(sub) - 3) + + if ellipse_count > longest: + longest = ellipse_count + + if ellipse_count < 0: + raise ValueError("Ellipses lengths do not match.") + elif ellipse_count == 0: + split_subscripts[num] = sub.replace('...', '') + else: + rep_inds = ellipse_inds[-ellipse_count:] + split_subscripts[num] = sub.replace('...', rep_inds) + + subscripts = ",".join(split_subscripts) + if longest == 0: + out_ellipse = "" + else: + out_ellipse = ellipse_inds[-longest:] + + if out_sub: + subscripts += "->" + output_sub.replace("...", out_ellipse) + else: + # Special care for outputless ellipses + output_subscript = "" + tmp_subscripts = subscripts.replace(",", "") + for s in sorted(set(tmp_subscripts)): + if s not in (einsum_symbols): + raise ValueError(f"Character {s} is not a valid symbol.") + if tmp_subscripts.count(s) == 1: + output_subscript += s + normal_inds = ''.join(sorted(set(output_subscript) - + set(out_ellipse))) + + subscripts += "->" + out_ellipse + normal_inds + + # Build output string if does not exist + if "->" in subscripts: + input_subscripts, output_subscript = subscripts.split("->") + else: + input_subscripts = subscripts + # Build output subscripts + tmp_subscripts = subscripts.replace(",", "") + output_subscript = "" + for s in sorted(set(tmp_subscripts)): + if s not in einsum_symbols: + raise ValueError(f"Character {s} is not a valid symbol.") + if tmp_subscripts.count(s) == 1: + output_subscript += s + + # Make sure output subscripts are in the input + for char in output_subscript: + if output_subscript.count(char) != 1: + raise ValueError("Output character %s appeared more than once in " + "the output." % char) + if char not in input_subscripts: + raise ValueError(f"Output character {char} did not appear in the input") + + # Make sure number operands is equivalent to the number of terms + if len(input_subscripts.split(',')) != len(operands): + raise ValueError("Number of einsum subscripts must be equal to the " + "number of operands.") + + return (input_subscripts, output_subscript, operands) + + +def _einsum_path_dispatcher(*operands, optimize=None, einsum_call=None): + # NOTE: technically, we should only dispatch on array-like arguments, not + # subscripts (given as strings). But separating operands into + # arrays/subscripts is a little tricky/slow (given einsum's two supported + # signatures), so as a practical shortcut we dispatch on everything. + # Strings will be ignored for dispatching since they don't define + # __array_function__. + return operands + + +@array_function_dispatch(_einsum_path_dispatcher, module='numpy') +def einsum_path(*operands, optimize='greedy', einsum_call=False): + """ + einsum_path(subscripts, *operands, optimize='greedy') + + Evaluates the lowest cost contraction order for an einsum expression by + considering the creation of intermediate arrays. + + Parameters + ---------- + subscripts : str + Specifies the subscripts for summation. + *operands : list of array_like + These are the arrays for the operation. + optimize : {bool, list, tuple, 'greedy', 'optimal'} + Choose the type of path. If a tuple is provided, the second argument is + assumed to be the maximum intermediate size created. If only a single + argument is provided the largest input or output array size is used + as a maximum intermediate size. + + * if a list is given that starts with ``einsum_path``, uses this as the + contraction path + * if False no optimization is taken + * if True defaults to the 'greedy' algorithm + * 'optimal' An algorithm that combinatorially explores all possible + ways of contracting the listed tensors and chooses the least costly + path. Scales exponentially with the number of terms in the + contraction. + * 'greedy' An algorithm that chooses the best pair contraction + at each step. Effectively, this algorithm searches the largest inner, + Hadamard, and then outer products at each step. Scales cubically with + the number of terms in the contraction. Equivalent to the 'optimal' + path for most contractions. + + Default is 'greedy'. + + Returns + ------- + path : list of tuples + A list representation of the einsum path. + string_repr : str + A printable representation of the einsum path. + + Notes + ----- + The resulting path indicates which terms of the input contraction should be + contracted first, the result of this contraction is then appended to the + end of the contraction list. This list can then be iterated over until all + intermediate contractions are complete. + + See Also + -------- + einsum, linalg.multi_dot + + Examples + -------- + + We can begin with a chain dot example. In this case, it is optimal to + contract the ``b`` and ``c`` tensors first as represented by the first + element of the path ``(1, 2)``. The resulting tensor is added to the end + of the contraction and the remaining contraction ``(0, 1)`` is then + completed. + + >>> np.random.seed(123) + >>> a = np.random.rand(2, 2) + >>> b = np.random.rand(2, 5) + >>> c = np.random.rand(5, 2) + >>> path_info = np.einsum_path('ij,jk,kl->il', a, b, c, optimize='greedy') + >>> print(path_info[0]) + ['einsum_path', (1, 2), (0, 1)] + >>> print(path_info[1]) + Complete contraction: ij,jk,kl->il # may vary + Naive scaling: 4 + Optimized scaling: 3 + Naive FLOP count: 1.600e+02 + Optimized FLOP count: 5.600e+01 + Theoretical speedup: 2.857 + Largest intermediate: 4.000e+00 elements + ------------------------------------------------------------------------- + scaling current remaining + ------------------------------------------------------------------------- + 3 kl,jk->jl ij,jl->il + 3 jl,ij->il il->il + + + A more complex index transformation example. + + >>> I = np.random.rand(10, 10, 10, 10) + >>> C = np.random.rand(10, 10) + >>> path_info = np.einsum_path('ea,fb,abcd,gc,hd->efgh', C, C, I, C, C, + ... optimize='greedy') + + >>> print(path_info[0]) + ['einsum_path', (0, 2), (0, 3), (0, 2), (0, 1)] + >>> print(path_info[1]) + Complete contraction: ea,fb,abcd,gc,hd->efgh # may vary + Naive scaling: 8 + Optimized scaling: 5 + Naive FLOP count: 8.000e+08 + Optimized FLOP count: 8.000e+05 + Theoretical speedup: 1000.000 + Largest intermediate: 1.000e+04 elements + -------------------------------------------------------------------------- + scaling current remaining + -------------------------------------------------------------------------- + 5 abcd,ea->bcde fb,gc,hd,bcde->efgh + 5 bcde,fb->cdef gc,hd,cdef->efgh + 5 cdef,gc->defg hd,defg->efgh + 5 defg,hd->efgh efgh->efgh + """ + + # Figure out what the path really is + path_type = optimize + if path_type is True: + path_type = 'greedy' + if path_type is None: + path_type = False + + explicit_einsum_path = False + memory_limit = None + + # No optimization or a named path algorithm + if (path_type is False) or isinstance(path_type, str): + pass + + # Given an explicit path + elif len(path_type) and (path_type[0] == 'einsum_path'): + explicit_einsum_path = True + + # Path tuple with memory limit + elif ((len(path_type) == 2) and isinstance(path_type[0], str) and + isinstance(path_type[1], (int, float))): + memory_limit = int(path_type[1]) + path_type = path_type[0] + + else: + raise TypeError(f"Did not understand the path: {str(path_type)}") + + # Hidden option, only einsum should call this + einsum_call_arg = einsum_call + + # Python side parsing + input_subscripts, output_subscript, operands = ( + _parse_einsum_input(operands) + ) + + # Build a few useful list and sets + input_list = input_subscripts.split(',') + num_inputs = len(input_list) + input_sets = [set(x) for x in input_list] + output_set = set(output_subscript) + indices = set(input_subscripts.replace(',', '')) + num_indices = len(indices) + + # Get length of each unique dimension and ensure all dimensions are correct + dimension_dict = {} + for tnum, term in enumerate(input_list): + sh = operands[tnum].shape + if len(sh) != len(term): + raise ValueError("Einstein sum subscript %s does not contain the " + "correct number of indices for operand %d." + % (input_subscripts[tnum], tnum)) + for cnum, char in enumerate(term): + dim = sh[cnum] + + if char in dimension_dict.keys(): + # For broadcasting cases we always want the largest dim size + if dimension_dict[char] == 1: + dimension_dict[char] = dim + elif dim not in (1, dimension_dict[char]): + raise ValueError("Size of label '%s' for operand %d (%d) " + "does not match previous terms (%d)." + % (char, tnum, dimension_dict[char], dim)) + else: + dimension_dict[char] = dim + + # Compute size of each input array plus the output array + size_list = [_compute_size_by_dict(term, dimension_dict) + for term in input_list + [output_subscript]] + max_size = max(size_list) + + if memory_limit is None: + memory_arg = max_size + else: + memory_arg = memory_limit + + # Compute the path + if explicit_einsum_path: + path = path_type[1:] + elif ( + (path_type is False) + or (num_inputs in [1, 2]) + or (indices == output_set) + ): + # Nothing to be optimized, leave it to einsum + path = [tuple(range(num_inputs))] + elif path_type == "greedy": + path = _greedy_path( + input_sets, output_set, dimension_dict, memory_arg + ) + elif path_type == "optimal": + path = _optimal_path( + input_sets, output_set, dimension_dict, memory_arg + ) + else: + raise KeyError("Path name %s not found", path_type) + + cost_list, scale_list, size_list, contraction_list = [], [], [], [] + + # Build contraction tuple (positions, gemm, einsum_str, remaining) + for cnum, contract_inds in enumerate(path): + # Make sure we remove inds from right to left + contract_inds = tuple(sorted(contract_inds, reverse=True)) + + contract = _find_contraction(contract_inds, input_sets, output_set) + out_inds, input_sets, idx_removed, idx_contract = contract + + if not einsum_call_arg: + # these are only needed for printing info + cost = _flop_count( + idx_contract, idx_removed, len(contract_inds), dimension_dict + ) + cost_list.append(cost) + scale_list.append(len(idx_contract)) + size_list.append(_compute_size_by_dict(out_inds, dimension_dict)) + + tmp_inputs = [] + for x in contract_inds: + tmp_inputs.append(input_list.pop(x)) + + # Last contraction + if (cnum - len(path)) == -1: + idx_result = output_subscript + else: + sort_result = [(dimension_dict[ind], ind) for ind in out_inds] + idx_result = "".join([x[1] for x in sorted(sort_result)]) + + input_list.append(idx_result) + einsum_str = ",".join(tmp_inputs) + "->" + idx_result + + contraction = (contract_inds, einsum_str, input_list[:]) + contraction_list.append(contraction) + + if len(input_list) != 1: + # Explicit "einsum_path" is usually trusted, but we detect this kind of + # mistake in order to prevent from returning an intermediate value. + raise RuntimeError( + f"Invalid einsum_path is specified: {len(input_list) - 1} more " + "operands has to be contracted.") + + if einsum_call_arg: + return (operands, contraction_list) + + # Return the path along with a nice string representation + overall_contraction = input_subscripts + "->" + output_subscript + header = ("scaling", "current", "remaining") + + # Compute naive cost + # This isn't quite right, need to look into exactly how einsum does this + inner_product = ( + sum(len(set(x)) for x in input_subscripts.split(',')) - num_indices + ) > 0 + naive_cost = _flop_count( + indices, inner_product, num_inputs, dimension_dict + ) + + opt_cost = sum(cost_list) + 1 + speedup = naive_cost / opt_cost + max_i = max(size_list) + + path_print = f" Complete contraction: {overall_contraction}\n" + path_print += f" Naive scaling: {num_indices}\n" + path_print += " Optimized scaling: %d\n" % max(scale_list) + path_print += f" Naive FLOP count: {naive_cost:.3e}\n" + path_print += f" Optimized FLOP count: {opt_cost:.3e}\n" + path_print += f" Theoretical speedup: {speedup:3.3f}\n" + path_print += f" Largest intermediate: {max_i:.3e} elements\n" + path_print += "-" * 74 + "\n" + path_print += "%6s %24s %40s\n" % header + path_print += "-" * 74 + + for n, contraction in enumerate(contraction_list): + _, einsum_str, remaining = contraction + remaining_str = ",".join(remaining) + "->" + output_subscript + path_run = (scale_list[n], einsum_str, remaining_str) + path_print += "\n%4d %24s %40s" % path_run + + path = ['einsum_path'] + path + return (path, path_print) + + +def _parse_eq_to_pure_multiplication(a_term, shape_a, b_term, shape_b, out): + """If there are no contracted indices, then we can directly transpose and + insert singleton dimensions into ``a`` and ``b`` such that (broadcast) + elementwise multiplication performs the einsum. + + No need to cache this as it is within the cached + ``_parse_eq_to_batch_matmul``. + + """ + desired_a = "" + desired_b = "" + new_shape_a = [] + new_shape_b = [] + for ix in out: + if ix in a_term: + desired_a += ix + new_shape_a.append(shape_a[a_term.index(ix)]) + else: + new_shape_a.append(1) + if ix in b_term: + desired_b += ix + new_shape_b.append(shape_b[b_term.index(ix)]) + else: + new_shape_b.append(1) + + if desired_a != a_term: + eq_a = f"{a_term}->{desired_a}" + else: + eq_a = None + if desired_b != b_term: + eq_b = f"{b_term}->{desired_b}" + else: + eq_b = None + + return ( + eq_a, + eq_b, + new_shape_a, + new_shape_b, + None, # new_shape_ab, not needed since not fusing + None, # perm_ab, not needed as we transpose a and b first + True, # pure_multiplication=True + ) + + +@functools.lru_cache(2**12) +def _parse_eq_to_batch_matmul(eq, shape_a, shape_b): + """Cached parsing of a two term einsum equation into the necessary + sequence of arguments for contracttion via batched matrix multiplication. + The steps we need to specify are: + + 1. Remove repeated and trivial indices from the left and right terms, + and transpose them, done as a single einsum. + 2. Fuse the remaining indices so we have two 3D tensors. + 3. Perform the batched matrix multiplication. + 4. Unfuse the output to get the desired final index order. + + """ + lhs, out = eq.split("->") + a_term, b_term = lhs.split(",") + + if len(a_term) != len(shape_a): + raise ValueError(f"Term '{a_term}' does not match shape {shape_a}.") + if len(b_term) != len(shape_b): + raise ValueError(f"Term '{b_term}' does not match shape {shape_b}.") + + sizes = {} + singletons = set() + + # parse left term to unique indices with size > 1 + left = {} + for ix, d in zip(a_term, shape_a): + if d == 1: + # everything (including broadcasting) works nicely if simply ignore + # such dimensions, but we do need to track if they appear in output + # and thus should be reintroduced later + singletons.add(ix) + continue + if sizes.setdefault(ix, d) != d: + # set and check size + raise ValueError( + f"Index {ix} has mismatched sizes {sizes[ix]} and {d}." + ) + left[ix] = True + + # parse right term to unique indices with size > 1 + right = {} + for ix, d in zip(b_term, shape_b): + # broadcast indices (size 1 on one input and size != 1 + # on the other) should not be treated as singletons + if d == 1: + if ix not in left: + singletons.add(ix) + continue + singletons.discard(ix) + + if sizes.setdefault(ix, d) != d: + # set and check size + raise ValueError( + f"Index {ix} has mismatched sizes {sizes[ix]} and {d}." + ) + right[ix] = True + + # now we classify the unique size > 1 indices only + bat_inds = [] # appears on A, B, O + con_inds = [] # appears on A, B, . + a_keep = [] # appears on A, ., O + b_keep = [] # appears on ., B, O + # other indices (appearing on A or B only) will + # be summed or traced out prior to the matmul + for ix in left: + if right.pop(ix, False): + if ix in out: + bat_inds.append(ix) + else: + con_inds.append(ix) + elif ix in out: + a_keep.append(ix) + # now only indices unique to right remain + for ix in right: + if ix in out: + b_keep.append(ix) + + if not con_inds: + # contraction is pure multiplication, prepare inputs differently + return _parse_eq_to_pure_multiplication( + a_term, shape_a, b_term, shape_b, out + ) + + # only need the size one indices that appear in the output + singletons = [ix for ix in out if ix in singletons] + + # take diagonal, remove any trivial axes and transpose left + desired_a = "".join((*bat_inds, *a_keep, *con_inds)) + if a_term != desired_a: + eq_a = f"{a_term}->{desired_a}" + else: + eq_a = None + + # take diagonal, remove any trivial axes and transpose right + desired_b = "".join((*bat_inds, *con_inds, *b_keep)) + if b_term != desired_b: + eq_b = f"{b_term}->{desired_b}" + else: + eq_b = None + + # then we want to reshape + if bat_inds: + lgroups = (bat_inds, a_keep, con_inds) + rgroups = (bat_inds, con_inds, b_keep) + ogroups = (bat_inds, a_keep, b_keep) + else: + # avoid size 1 batch dimension if no batch indices + lgroups = (a_keep, con_inds) + rgroups = (con_inds, b_keep) + ogroups = (a_keep, b_keep) + + if any(len(group) != 1 for group in lgroups): + # need to fuse 'kept' and contracted indices + # (though could allow batch indices to be broadcast) + new_shape_a = tuple( + functools.reduce(operator.mul, (sizes[ix] for ix in ix_group), 1) + for ix_group in lgroups + ) + else: + new_shape_a = None + + if any(len(group) != 1 for group in rgroups): + # need to fuse 'kept' and contracted indices + # (though could allow batch indices to be broadcast) + new_shape_b = tuple( + functools.reduce(operator.mul, (sizes[ix] for ix in ix_group), 1) + for ix_group in rgroups + ) + else: + new_shape_b = None + + if any(len(group) != 1 for group in ogroups) or singletons: + new_shape_ab = (1,) * len(singletons) + tuple( + sizes[ix] for ix_group in ogroups for ix in ix_group + ) + else: + new_shape_ab = None + + # then we might need to permute the matmul produced output: + out_produced = "".join((*singletons, *bat_inds, *a_keep, *b_keep)) + if out_produced != out: + perm_ab = tuple(out_produced.index(ix) for ix in out) + else: + perm_ab = None + + return ( + eq_a, + eq_b, + new_shape_a, + new_shape_b, + new_shape_ab, + perm_ab, + False, # pure_multiplication=False + ) + + +@functools.lru_cache(maxsize=64) +def _parse_output_order(order, a_is_fcontig, b_is_fcontig): + order = order.upper() + if order == "K": + return None + elif order in "CF": + return order + elif order == "A": + if a_is_fcontig and b_is_fcontig: + return "F" + else: + return "C" + else: + raise ValueError( + "ValueError: order must be one of " + f"'C', 'F', 'A', or 'K' (got '{order}')" + ) + + +def bmm_einsum(eq, a, b, out=None, **kwargs): + """Perform arbitrary pairwise einsums using only ``matmul``, or + ``multiply`` if no contracted indices are involved (plus maybe single term + ``einsum`` to prepare the terms individually). The logic for each is cached + based on the equation and array shape, and each step is only performed if + necessary. + + Parameters + ---------- + eq : str + The einsum equation. + a : array_like + The first array to contract. + b : array_like + The second array to contract. + + Returns + ------- + array_like + + Notes + ----- + A fuller description of this algorithm, and original source for this + implementation, can be found at https://github.com/jcmgray/einsum_bmm. + """ + ( + eq_a, + eq_b, + new_shape_a, + new_shape_b, + new_shape_ab, + perm_ab, + pure_multiplication, + ) = _parse_eq_to_batch_matmul(eq, a.shape, b.shape) + + # n.b. one could special case various cases to call c_einsum directly here + + # need to handle `order` a little manually, since we do transpose + # operations before and potentially after the ufunc calls + output_order = _parse_output_order( + kwargs.pop("order", "K"), a.flags.f_contiguous, b.flags.f_contiguous + ) + + # prepare left + if eq_a is not None: + # diagonals, sums, and tranpose + a = c_einsum(eq_a, a) + if new_shape_a is not None: + a = reshape(a, new_shape_a) + + # prepare right + if eq_b is not None: + # diagonals, sums, and tranpose + b = c_einsum(eq_b, b) + if new_shape_b is not None: + b = reshape(b, new_shape_b) + + if pure_multiplication: + # no contracted indices + if output_order is not None: + kwargs["order"] = output_order + + # do the 'contraction' via multiplication! + return multiply(a, b, out=out, **kwargs) + + # can only supply out here if no other reshaping / transposing + matmul_out_compatible = (new_shape_ab is None) and (perm_ab is None) + if matmul_out_compatible: + kwargs["out"] = out + + # do the contraction! + ab = matmul(a, b, **kwargs) + + # prepare the output + if new_shape_ab is not None: + ab = reshape(ab, new_shape_ab) + if perm_ab is not None: + ab = ab.transpose(perm_ab) + + if (out is not None) and (not matmul_out_compatible): + # handle case where out is specified, but we also needed + # to reshape / transpose ``ab`` after the matmul + out[:] = ab + ab = out + elif output_order is not None: + ab = asanyarray(ab, order=output_order) + + return ab + + +def _einsum_dispatcher(*operands, out=None, optimize=None, **kwargs): + # Arguably we dispatch on more arguments than we really should; see note in + # _einsum_path_dispatcher for why. + yield from operands + yield out + + +# Rewrite einsum to handle different cases +@array_function_dispatch(_einsum_dispatcher, module='numpy') +def einsum(*operands, out=None, optimize=False, **kwargs): + """ + einsum(subscripts, *operands, out=None, dtype=None, order='K', + casting='safe', optimize=False) + + Evaluates the Einstein summation convention on the operands. + + Using the Einstein summation convention, many common multi-dimensional, + linear algebraic array operations can be represented in a simple fashion. + In *implicit* mode `einsum` computes these values. + + In *explicit* mode, `einsum` provides further flexibility to compute + other array operations that might not be considered classical Einstein + summation operations, by disabling, or forcing summation over specified + subscript labels. + + See the notes and examples for clarification. + + Parameters + ---------- + subscripts : str + Specifies the subscripts for summation as comma separated list of + subscript labels. An implicit (classical Einstein summation) + calculation is performed unless the explicit indicator '->' is + included as well as subscript labels of the precise output form. + operands : list of array_like + These are the arrays for the operation. + out : ndarray, optional + If provided, the calculation is done into this array. + dtype : {data-type, None}, optional + If provided, forces the calculation to use the data type specified. + Note that you may have to also give a more liberal `casting` + parameter to allow the conversions. Default is None. + order : {'C', 'F', 'A', 'K'}, optional + Controls the memory layout of the output. 'C' means it should + be C contiguous. 'F' means it should be Fortran contiguous, + 'A' means it should be 'F' if the inputs are all 'F', 'C' otherwise. + 'K' means it should be as close to the layout as the inputs as + is possible, including arbitrarily permuted axes. + Default is 'K'. + casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + Controls what kind of data casting may occur. Setting this to + 'unsafe' is not recommended, as it can adversely affect accumulations. + + * 'no' means the data types should not be cast at all. + * 'equiv' means only byte-order changes are allowed. + * 'safe' means only casts which can preserve values are allowed. + * 'same_kind' means only safe casts or casts within a kind, + like float64 to float32, are allowed. + * 'unsafe' means any data conversions may be done. + + Default is 'safe'. + optimize : {False, True, 'greedy', 'optimal'}, optional + Controls if intermediate optimization should occur. No optimization + will occur if False and True will default to the 'greedy' algorithm. + Also accepts an explicit contraction list from the ``np.einsum_path`` + function. See ``np.einsum_path`` for more details. Defaults to False. + + Returns + ------- + output : ndarray + The calculation based on the Einstein summation convention. + + See Also + -------- + einsum_path, dot, inner, outer, tensordot, linalg.multi_dot + einsum: + Similar verbose interface is provided by the + `einops `_ package to cover + additional operations: transpose, reshape/flatten, repeat/tile, + squeeze/unsqueeze and reductions. + The `opt_einsum `_ + optimizes contraction order for einsum-like expressions + in backend-agnostic manner. + + Notes + ----- + The Einstein summation convention can be used to compute + many multi-dimensional, linear algebraic array operations. `einsum` + provides a succinct way of representing these. + + A non-exhaustive list of these operations, + which can be computed by `einsum`, is shown below along with examples: + + * Trace of an array, :py:func:`numpy.trace`. + * Return a diagonal, :py:func:`numpy.diag`. + * Array axis summations, :py:func:`numpy.sum`. + * Transpositions and permutations, :py:func:`numpy.transpose`. + * Matrix multiplication and dot product, :py:func:`numpy.matmul` + :py:func:`numpy.dot`. + * Vector inner and outer products, :py:func:`numpy.inner` + :py:func:`numpy.outer`. + * Broadcasting, element-wise and scalar multiplication, + :py:func:`numpy.multiply`. + * Tensor contractions, :py:func:`numpy.tensordot`. + * Chained array operations, in efficient calculation order, + :py:func:`numpy.einsum_path`. + + The subscripts string is a comma-separated list of subscript labels, + where each label refers to a dimension of the corresponding operand. + Whenever a label is repeated it is summed, so ``np.einsum('i,i', a, b)`` + is equivalent to :py:func:`np.inner(a,b) `. If a label + appears only once, it is not summed, so ``np.einsum('i', a)`` + produces a view of ``a`` with no changes. A further example + ``np.einsum('ij,jk', a, b)`` describes traditional matrix multiplication + and is equivalent to :py:func:`np.matmul(a,b) `. + Repeated subscript labels in one operand take the diagonal. + For example, ``np.einsum('ii', a)`` is equivalent to + :py:func:`np.trace(a) `. + + In *implicit mode*, the chosen subscripts are important + since the axes of the output are reordered alphabetically. This + means that ``np.einsum('ij', a)`` doesn't affect a 2D array, while + ``np.einsum('ji', a)`` takes its transpose. Additionally, + ``np.einsum('ij,jk', a, b)`` returns a matrix multiplication, while, + ``np.einsum('ij,jh', a, b)`` returns the transpose of the + multiplication since subscript 'h' precedes subscript 'i'. + + In *explicit mode* the output can be directly controlled by + specifying output subscript labels. This requires the + identifier '->' as well as the list of output subscript labels. + This feature increases the flexibility of the function since + summing can be disabled or forced when required. The call + ``np.einsum('i->', a)`` is like :py:func:`np.sum(a) ` + if ``a`` is a 1-D array, and ``np.einsum('ii->i', a)`` + is like :py:func:`np.diag(a) ` if ``a`` is a square 2-D array. + The difference is that `einsum` does not allow broadcasting by default. + Additionally ``np.einsum('ij,jh->ih', a, b)`` directly specifies the + order of the output subscript labels and therefore returns matrix + multiplication, unlike the example above in implicit mode. + + To enable and control broadcasting, use an ellipsis. Default + NumPy-style broadcasting is done by adding an ellipsis + to the left of each term, like ``np.einsum('...ii->...i', a)``. + ``np.einsum('...i->...', a)`` is like + :py:func:`np.sum(a, axis=-1) ` for array ``a`` of any shape. + To take the trace along the first and last axes, + you can do ``np.einsum('i...i', a)``, or to do a matrix-matrix + product with the left-most indices instead of rightmost, one can do + ``np.einsum('ij...,jk...->ik...', a, b)``. + + When there is only one operand, no axes are summed, and no output + parameter is provided, a view into the operand is returned instead + of a new array. Thus, taking the diagonal as ``np.einsum('ii->i', a)`` + produces a view (changed in version 1.10.0). + + `einsum` also provides an alternative way to provide the subscripts and + operands as ``einsum(op0, sublist0, op1, sublist1, ..., [sublistout])``. + If the output shape is not provided in this format `einsum` will be + calculated in implicit mode, otherwise it will be performed explicitly. + The examples below have corresponding `einsum` calls with the two + parameter methods. + + Views returned from einsum are now writeable whenever the input array + is writeable. For example, ``np.einsum('ijk...->kji...', a)`` will now + have the same effect as :py:func:`np.swapaxes(a, 0, 2) ` + and ``np.einsum('ii->i', a)`` will return a writeable view of the diagonal + of a 2D array. + + Added the ``optimize`` argument which will optimize the contraction order + of an einsum expression. For a contraction with three or more operands + this can greatly increase the computational efficiency at the cost of + a larger memory footprint during computation. + + Typically a 'greedy' algorithm is applied which empirical tests have shown + returns the optimal path in the majority of cases. In some cases 'optimal' + will return the superlative path through a more expensive, exhaustive + search. For iterative calculations it may be advisable to calculate + the optimal path once and reuse that path by supplying it as an argument. + An example is given below. + + See :py:func:`numpy.einsum_path` for more details. + + Examples + -------- + >>> a = np.arange(25).reshape(5,5) + >>> b = np.arange(5) + >>> c = np.arange(6).reshape(2,3) + + Trace of a matrix: + + >>> np.einsum('ii', a) + 60 + >>> np.einsum(a, [0,0]) + 60 + >>> np.trace(a) + 60 + + Extract the diagonal (requires explicit form): + + >>> np.einsum('ii->i', a) + array([ 0, 6, 12, 18, 24]) + >>> np.einsum(a, [0,0], [0]) + array([ 0, 6, 12, 18, 24]) + >>> np.diag(a) + array([ 0, 6, 12, 18, 24]) + + Sum over an axis (requires explicit form): + + >>> np.einsum('ij->i', a) + array([ 10, 35, 60, 85, 110]) + >>> np.einsum(a, [0,1], [0]) + array([ 10, 35, 60, 85, 110]) + >>> np.sum(a, axis=1) + array([ 10, 35, 60, 85, 110]) + + For higher dimensional arrays summing a single axis can be done + with ellipsis: + + >>> np.einsum('...j->...', a) + array([ 10, 35, 60, 85, 110]) + >>> np.einsum(a, [Ellipsis,1], [Ellipsis]) + array([ 10, 35, 60, 85, 110]) + + Compute a matrix transpose, or reorder any number of axes: + + >>> np.einsum('ji', c) + array([[0, 3], + [1, 4], + [2, 5]]) + >>> np.einsum('ij->ji', c) + array([[0, 3], + [1, 4], + [2, 5]]) + >>> np.einsum(c, [1,0]) + array([[0, 3], + [1, 4], + [2, 5]]) + >>> np.transpose(c) + array([[0, 3], + [1, 4], + [2, 5]]) + + Vector inner products: + + >>> np.einsum('i,i', b, b) + 30 + >>> np.einsum(b, [0], b, [0]) + 30 + >>> np.inner(b,b) + 30 + + Matrix vector multiplication: + + >>> np.einsum('ij,j', a, b) + array([ 30, 80, 130, 180, 230]) + >>> np.einsum(a, [0,1], b, [1]) + array([ 30, 80, 130, 180, 230]) + >>> np.dot(a, b) + array([ 30, 80, 130, 180, 230]) + >>> np.einsum('...j,j', a, b) + array([ 30, 80, 130, 180, 230]) + + Broadcasting and scalar multiplication: + + >>> np.einsum('..., ...', 3, c) + array([[ 0, 3, 6], + [ 9, 12, 15]]) + >>> np.einsum(',ij', 3, c) + array([[ 0, 3, 6], + [ 9, 12, 15]]) + >>> np.einsum(3, [Ellipsis], c, [Ellipsis]) + array([[ 0, 3, 6], + [ 9, 12, 15]]) + >>> np.multiply(3, c) + array([[ 0, 3, 6], + [ 9, 12, 15]]) + + Vector outer product: + + >>> np.einsum('i,j', np.arange(2)+1, b) + array([[0, 1, 2, 3, 4], + [0, 2, 4, 6, 8]]) + >>> np.einsum(np.arange(2)+1, [0], b, [1]) + array([[0, 1, 2, 3, 4], + [0, 2, 4, 6, 8]]) + >>> np.outer(np.arange(2)+1, b) + array([[0, 1, 2, 3, 4], + [0, 2, 4, 6, 8]]) + + Tensor contraction: + + >>> a = np.arange(60.).reshape(3,4,5) + >>> b = np.arange(24.).reshape(4,3,2) + >>> np.einsum('ijk,jil->kl', a, b) + array([[4400., 4730.], + [4532., 4874.], + [4664., 5018.], + [4796., 5162.], + [4928., 5306.]]) + >>> np.einsum(a, [0,1,2], b, [1,0,3], [2,3]) + array([[4400., 4730.], + [4532., 4874.], + [4664., 5018.], + [4796., 5162.], + [4928., 5306.]]) + >>> np.tensordot(a,b, axes=([1,0],[0,1])) + array([[4400., 4730.], + [4532., 4874.], + [4664., 5018.], + [4796., 5162.], + [4928., 5306.]]) + + Writeable returned arrays (since version 1.10.0): + + >>> a = np.zeros((3, 3)) + >>> np.einsum('ii->i', a)[:] = 1 + >>> a + array([[1., 0., 0.], + [0., 1., 0.], + [0., 0., 1.]]) + + Example of ellipsis use: + + >>> a = np.arange(6).reshape((3,2)) + >>> b = np.arange(12).reshape((4,3)) + >>> np.einsum('ki,jk->ij', a, b) + array([[10, 28, 46, 64], + [13, 40, 67, 94]]) + >>> np.einsum('ki,...k->i...', a, b) + array([[10, 28, 46, 64], + [13, 40, 67, 94]]) + >>> np.einsum('k...,jk', a, b) + array([[10, 28, 46, 64], + [13, 40, 67, 94]]) + + Chained array operations. For more complicated contractions, speed ups + might be achieved by repeatedly computing a 'greedy' path or pre-computing + the 'optimal' path and repeatedly applying it, using an `einsum_path` + insertion (since version 1.12.0). Performance improvements can be + particularly significant with larger arrays: + + >>> a = np.ones(64).reshape(2,4,8) + + Basic `einsum`: ~1520ms (benchmarked on 3.1GHz Intel i5.) + + >>> for iteration in range(500): + ... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a) + + Sub-optimal `einsum` (due to repeated path calculation time): ~330ms + + >>> for iteration in range(500): + ... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, + ... optimize='optimal') + + Greedy `einsum` (faster optimal path approximation): ~160ms + + >>> for iteration in range(500): + ... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize='greedy') + + Optimal `einsum` (best usage pattern in some use cases): ~110ms + + >>> path = np.einsum_path('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, + ... optimize='optimal')[0] + >>> for iteration in range(500): + ... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize=path) + + """ + # Special handling if out is specified + specified_out = out is not None + + # If no optimization, run pure einsum + if optimize is False: + if specified_out: + kwargs['out'] = out + return c_einsum(*operands, **kwargs) + + # Check the kwargs to avoid a more cryptic error later, without having to + # repeat default values here + valid_einsum_kwargs = ['dtype', 'order', 'casting'] + unknown_kwargs = [k for (k, v) in kwargs.items() if + k not in valid_einsum_kwargs] + if len(unknown_kwargs): + raise TypeError(f"Did not understand the following kwargs: {unknown_kwargs}") + + # Build the contraction list and operand + operands, contraction_list = einsum_path(*operands, optimize=optimize, + einsum_call=True) + + # Start contraction loop + for num, contraction in enumerate(contraction_list): + inds, einsum_str, _ = contraction + tmp_operands = [operands.pop(x) for x in inds] + + # Do we need to deal with the output? + handle_out = specified_out and ((num + 1) == len(contraction_list)) + + # If out was specified + if handle_out: + kwargs["out"] = out + + if len(tmp_operands) == 2: + # Call (batched) matrix multiplication if possible + new_view = bmm_einsum(einsum_str, *tmp_operands, **kwargs) + else: + # Call einsum + new_view = c_einsum(einsum_str, *tmp_operands, **kwargs) + + # Append new items and dereference what we can + operands.append(new_view) + del tmp_operands, new_view + + if specified_out: + return out + else: + return operands[0] diff --git a/local_packages/numpy/_core/einsumfunc.pyi b/local_packages/numpy/_core/einsumfunc.pyi new file mode 100644 index 0000000..6d34883 --- /dev/null +++ b/local_packages/numpy/_core/einsumfunc.pyi @@ -0,0 +1,184 @@ +from collections.abc import Sequence +from typing import Any, Literal, TypeAlias, TypeVar, overload + +import numpy as np +from numpy import _OrderKACF, number +from numpy._typing import ( + NDArray, + _ArrayLikeBool_co, + _ArrayLikeComplex_co, + _ArrayLikeFloat_co, + _ArrayLikeInt_co, + _ArrayLikeObject_co, + _ArrayLikeUInt_co, + _DTypeLikeBool, + _DTypeLikeComplex, + _DTypeLikeComplex_co, + _DTypeLikeFloat, + _DTypeLikeInt, + _DTypeLikeObject, + _DTypeLikeUInt, +) + +__all__ = ["einsum", "einsum_path"] + +_ArrayT = TypeVar( + "_ArrayT", + bound=NDArray[np.bool | number], +) + +_OptimizeKind: TypeAlias = bool | Literal["greedy", "optimal"] | Sequence[Any] | None +_CastingSafe: TypeAlias = Literal["no", "equiv", "safe", "same_kind"] +_CastingUnsafe: TypeAlias = Literal["unsafe"] + +# TODO: Properly handle the `casting`-based combinatorics +# TODO: We need to evaluate the content `__subscripts` in order +# to identify whether or an array or scalar is returned. At a cursory +# glance this seems like something that can quite easily be done with +# a mypy plugin. +# Something like `is_scalar = bool(__subscripts.partition("->")[-1])` +@overload +def einsum( + subscripts: str | _ArrayLikeInt_co, + /, + *operands: _ArrayLikeBool_co, + out: None = None, + dtype: _DTypeLikeBool | None = ..., + order: _OrderKACF = ..., + casting: _CastingSafe = ..., + optimize: _OptimizeKind = False, +) -> Any: ... +@overload +def einsum( + subscripts: str | _ArrayLikeInt_co, + /, + *operands: _ArrayLikeUInt_co, + out: None = None, + dtype: _DTypeLikeUInt | None = ..., + order: _OrderKACF = ..., + casting: _CastingSafe = ..., + optimize: _OptimizeKind = False, +) -> Any: ... +@overload +def einsum( + subscripts: str | _ArrayLikeInt_co, + /, + *operands: _ArrayLikeInt_co, + out: None = None, + dtype: _DTypeLikeInt | None = ..., + order: _OrderKACF = ..., + casting: _CastingSafe = ..., + optimize: _OptimizeKind = False, +) -> Any: ... +@overload +def einsum( + subscripts: str | _ArrayLikeInt_co, + /, + *operands: _ArrayLikeFloat_co, + out: None = None, + dtype: _DTypeLikeFloat | None = ..., + order: _OrderKACF = ..., + casting: _CastingSafe = ..., + optimize: _OptimizeKind = False, +) -> Any: ... +@overload +def einsum( + subscripts: str | _ArrayLikeInt_co, + /, + *operands: _ArrayLikeComplex_co, + out: None = None, + dtype: _DTypeLikeComplex | None = ..., + order: _OrderKACF = ..., + casting: _CastingSafe = ..., + optimize: _OptimizeKind = False, +) -> Any: ... +@overload +def einsum( + subscripts: str | _ArrayLikeInt_co, + /, + *operands: Any, + casting: _CastingUnsafe, + dtype: _DTypeLikeComplex_co | None = ..., + out: None = None, + order: _OrderKACF = ..., + optimize: _OptimizeKind = False, +) -> Any: ... +@overload +def einsum( + subscripts: str | _ArrayLikeInt_co, + /, + *operands: _ArrayLikeComplex_co, + out: _ArrayT, + dtype: _DTypeLikeComplex_co | None = ..., + order: _OrderKACF = ..., + casting: _CastingSafe = ..., + optimize: _OptimizeKind = False, +) -> _ArrayT: ... +@overload +def einsum( + subscripts: str | _ArrayLikeInt_co, + /, + *operands: Any, + out: _ArrayT, + casting: _CastingUnsafe, + dtype: _DTypeLikeComplex_co | None = ..., + order: _OrderKACF = ..., + optimize: _OptimizeKind = False, +) -> _ArrayT: ... + +@overload +def einsum( + subscripts: str | _ArrayLikeInt_co, + /, + *operands: _ArrayLikeObject_co, + out: None = None, + dtype: _DTypeLikeObject | None = ..., + order: _OrderKACF = ..., + casting: _CastingSafe = ..., + optimize: _OptimizeKind = False, +) -> Any: ... +@overload +def einsum( + subscripts: str | _ArrayLikeInt_co, + /, + *operands: Any, + casting: _CastingUnsafe, + dtype: _DTypeLikeObject | None = ..., + out: None = None, + order: _OrderKACF = ..., + optimize: _OptimizeKind = False, +) -> Any: ... +@overload +def einsum( + subscripts: str | _ArrayLikeInt_co, + /, + *operands: _ArrayLikeObject_co, + out: _ArrayT, + dtype: _DTypeLikeObject | None = ..., + order: _OrderKACF = ..., + casting: _CastingSafe = ..., + optimize: _OptimizeKind = False, +) -> _ArrayT: ... +@overload +def einsum( + subscripts: str | _ArrayLikeInt_co, + /, + *operands: Any, + out: _ArrayT, + casting: _CastingUnsafe, + dtype: _DTypeLikeObject | None = ..., + order: _OrderKACF = ..., + optimize: _OptimizeKind = False, +) -> _ArrayT: ... + +# NOTE: `einsum_call` is a hidden kwarg unavailable for public use. +# It is therefore excluded from the signatures below. +# NOTE: In practice the list consists of a `str` (first element) +# and a variable number of integer tuples. +def einsum_path( + subscripts: str | _ArrayLikeInt_co, + /, + *operands: _ArrayLikeComplex_co | _DTypeLikeObject, + optimize: _OptimizeKind = "greedy", + einsum_call: Literal[False] = False, +) -> tuple[list[Any], str]: ... diff --git a/local_packages/numpy/_core/fromnumeric.py b/local_packages/numpy/_core/fromnumeric.py new file mode 100644 index 0000000..33fb9ec --- /dev/null +++ b/local_packages/numpy/_core/fromnumeric.py @@ -0,0 +1,4233 @@ +"""Module containing non-deprecated functions borrowed from Numeric. + +""" +import functools +import math +import types + +import numpy as np +from numpy._utils import set_module + +from . import _methods, multiarray as mu, numerictypes as nt, overrides, umath as um +from ._multiarray_umath import _array_converter +from .multiarray import asanyarray, asarray, concatenate + +_dt_ = nt.sctype2char + +# functions that are methods +__all__ = [ + 'all', 'amax', 'amin', 'any', 'argmax', + 'argmin', 'argpartition', 'argsort', 'around', 'choose', 'clip', + 'compress', 'cumprod', 'cumsum', 'cumulative_prod', 'cumulative_sum', + 'diagonal', 'mean', 'max', 'min', 'matrix_transpose', + 'ndim', 'nonzero', 'partition', 'prod', 'ptp', 'put', + 'ravel', 'repeat', 'reshape', 'resize', 'round', + 'searchsorted', 'shape', 'size', 'sort', 'squeeze', + 'std', 'sum', 'swapaxes', 'take', 'trace', 'transpose', 'var', +] + +_gentype = types.GeneratorType +# save away Python sum +_sum_ = sum + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + + +# functions that are now methods +def _wrapit(obj, method, *args, **kwds): + conv = _array_converter(obj) + # As this already tried the method, subok is maybe quite reasonable here + # but this follows what was done before. TODO: revisit this. + arr, = conv.as_arrays(subok=False) + result = getattr(arr, method)(*args, **kwds) + + return conv.wrap(result, to_scalar=False) + + +def _wrapfunc(obj, method, *args, **kwds): + bound = getattr(obj, method, None) + if bound is None: + return _wrapit(obj, method, *args, **kwds) + + try: + return bound(*args, **kwds) + except TypeError: + # A TypeError occurs if the object does have such a method in its + # class, but its signature is not identical to that of NumPy's. This + # situation has occurred in the case of a downstream library like + # 'pandas'. + # + # Call _wrapit from within the except clause to ensure a potential + # exception has a traceback chain. + return _wrapit(obj, method, *args, **kwds) + + +def _wrapreduction(obj, ufunc, method, axis, dtype, out, **kwargs): + passkwargs = {k: v for k, v in kwargs.items() + if v is not np._NoValue} + + if type(obj) is not mu.ndarray: + try: + reduction = getattr(obj, method) + except AttributeError: + pass + else: + # This branch is needed for reductions like any which don't + # support a dtype. + if dtype is not None: + return reduction(axis=axis, dtype=dtype, out=out, **passkwargs) + else: + return reduction(axis=axis, out=out, **passkwargs) + + return ufunc.reduce(obj, axis, dtype, out, **passkwargs) + + +def _wrapreduction_any_all(obj, ufunc, method, axis, out, **kwargs): + # Same as above function, but dtype is always bool (but never passed on) + passkwargs = {k: v for k, v in kwargs.items() + if v is not np._NoValue} + + if type(obj) is not mu.ndarray: + try: + reduction = getattr(obj, method) + except AttributeError: + pass + else: + return reduction(axis=axis, out=out, **passkwargs) + + return ufunc.reduce(obj, axis, bool, out, **passkwargs) + + +def _take_dispatcher(a, indices, axis=None, out=None, mode=None): + return (a, out) + + +@array_function_dispatch(_take_dispatcher) +def take(a, indices, axis=None, out=None, mode='raise'): + """ + Take elements from an array along an axis. + + When axis is not None, this function does the same thing as "fancy" + indexing (indexing arrays using arrays); however, it can be easier to use + if you need elements along a given axis. A call such as + ``np.take(arr, indices, axis=3)`` is equivalent to + ``arr[:,:,:,indices,...]``. + + Explained without fancy indexing, this is equivalent to the following use + of `ndindex`, which sets each of ``ii``, ``jj``, and ``kk`` to a tuple of + indices:: + + Ni, Nk = a.shape[:axis], a.shape[axis+1:] + Nj = indices.shape + for ii in ndindex(Ni): + for jj in ndindex(Nj): + for kk in ndindex(Nk): + out[ii + jj + kk] = a[ii + (indices[jj],) + kk] + + Parameters + ---------- + a : array_like (Ni..., M, Nk...) + The source array. + indices : array_like (Nj...) + The indices of the values to extract. + Also allow scalars for indices. + axis : int, optional + The axis over which to select values. By default, the flattened + input array is used. + out : ndarray, optional (Ni..., Nj..., Nk...) + If provided, the result will be placed in this array. It should + be of the appropriate shape and dtype. Note that `out` is always + buffered if `mode='raise'`; use other modes for better performance. + mode : {'raise', 'wrap', 'clip'}, optional + Specifies how out-of-bounds indices will behave. + + * 'raise' -- raise an error (default) + * 'wrap' -- wrap around + * 'clip' -- clip to the range + + 'clip' mode means that all indices that are too large are replaced + by the index that addresses the last element along that axis. Note + that this disables indexing with negative numbers. + + Returns + ------- + out : ndarray (Ni..., Nj..., Nk...) + The returned array has the same type as `a`. + + See Also + -------- + compress : Take elements using a boolean mask + ndarray.take : equivalent method + take_along_axis : Take elements by matching the array and the index arrays + + Notes + ----- + By eliminating the inner loop in the description above, and using `s_` to + build simple slice objects, `take` can be expressed in terms of applying + fancy indexing to each 1-d slice:: + + Ni, Nk = a.shape[:axis], a.shape[axis+1:] + for ii in ndindex(Ni): + for kk in ndindex(Nk): + out[ii + s_[...,] + kk] = a[ii + s_[:,] + kk][indices] + + For this reason, it is equivalent to (but faster than) the following use + of `apply_along_axis`:: + + out = np.apply_along_axis(lambda a_1d: a_1d[indices], axis, a) + + Examples + -------- + >>> import numpy as np + >>> a = [4, 3, 5, 7, 6, 8] + >>> indices = [0, 1, 4] + >>> np.take(a, indices) + array([4, 3, 6]) + + In this example if `a` is an ndarray, "fancy" indexing can be used. + + >>> a = np.array(a) + >>> a[indices] + array([4, 3, 6]) + + If `indices` is not one dimensional, the output also has these dimensions. + + >>> np.take(a, [[0, 1], [2, 3]]) + array([[4, 3], + [5, 7]]) + """ + return _wrapfunc(a, 'take', indices, axis=axis, out=out, mode=mode) + + +def _reshape_dispatcher(a, /, shape, order=None, *, copy=None): + return (a,) + + +@array_function_dispatch(_reshape_dispatcher) +def reshape(a, /, shape, order='C', *, copy=None): + """ + Gives a new shape to an array without changing its data. + + Parameters + ---------- + a : array_like + Array to be reshaped. + shape : int or tuple of ints + The new shape should be compatible with the original shape. If + an integer, then the result will be a 1-D array of that length. + One shape dimension can be -1. In this case, the value is + inferred from the length of the array and remaining dimensions. + order : {'C', 'F', 'A'}, optional + Read the elements of ``a`` using this index order, and place the + elements into the reshaped array using this index order. 'C' + means to read / write the elements using C-like index order, + with the last axis index changing fastest, back to the first + axis index changing slowest. 'F' means to read / write the + elements using Fortran-like index order, with the first index + changing fastest, and the last index changing slowest. Note that + the 'C' and 'F' options take no account of the memory layout of + the underlying array, and only refer to the order of indexing. + 'A' means to read / write the elements in Fortran-like index + order if ``a`` is Fortran *contiguous* in memory, C-like order + otherwise. + copy : bool, optional + If ``True``, then the array data is copied. If ``None``, a copy will + only be made if it's required by ``order``. For ``False`` it raises + a ``ValueError`` if a copy cannot be avoided. Default: ``None``. + + Returns + ------- + reshaped_array : ndarray + This will be a new view object if possible; otherwise, it will + be a copy. Note there is no guarantee of the *memory layout* (C- or + Fortran- contiguous) of the returned array. + + See Also + -------- + ndarray.reshape : Equivalent method. + + Notes + ----- + It is not always possible to change the shape of an array without copying + the data. + + The ``order`` keyword gives the index ordering both for *fetching* + the values from ``a``, and then *placing* the values into the output + array. For example, let's say you have an array: + + >>> a = np.arange(6).reshape((3, 2)) + >>> a + array([[0, 1], + [2, 3], + [4, 5]]) + + You can think of reshaping as first raveling the array (using the given + index order), then inserting the elements from the raveled array into the + new array using the same kind of index ordering as was used for the + raveling. + + >>> np.reshape(a, (2, 3)) # C-like index ordering + array([[0, 1, 2], + [3, 4, 5]]) + >>> np.reshape(np.ravel(a), (2, 3)) # equivalent to C ravel then C reshape + array([[0, 1, 2], + [3, 4, 5]]) + >>> np.reshape(a, (2, 3), order='F') # Fortran-like index ordering + array([[0, 4, 3], + [2, 1, 5]]) + >>> np.reshape(np.ravel(a, order='F'), (2, 3), order='F') + array([[0, 4, 3], + [2, 1, 5]]) + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[1,2,3], [4,5,6]]) + >>> np.reshape(a, 6) + array([1, 2, 3, 4, 5, 6]) + >>> np.reshape(a, 6, order='F') + array([1, 4, 2, 5, 3, 6]) + + >>> np.reshape(a, (3,-1)) # the unspecified value is inferred to be 2 + array([[1, 2], + [3, 4], + [5, 6]]) + """ + if copy is not None: + return _wrapfunc(a, 'reshape', shape, order=order, copy=copy) + return _wrapfunc(a, 'reshape', shape, order=order) + + +def _choose_dispatcher(a, choices, out=None, mode=None): + yield a + yield from choices + yield out + + +@array_function_dispatch(_choose_dispatcher) +def choose(a, choices, out=None, mode='raise'): + """ + Construct an array from an index array and a list of arrays to choose from. + + First of all, if confused or uncertain, definitely look at the Examples - + in its full generality, this function is less simple than it might + seem from the following code description:: + + np.choose(a,c) == np.array([c[a[I]][I] for I in np.ndindex(a.shape)]) + + But this omits some subtleties. Here is a fully general summary: + + Given an "index" array (`a`) of integers and a sequence of ``n`` arrays + (`choices`), `a` and each choice array are first broadcast, as necessary, + to arrays of a common shape; calling these *Ba* and *Bchoices[i], i = + 0,...,n-1* we have that, necessarily, ``Ba.shape == Bchoices[i].shape`` + for each ``i``. Then, a new array with shape ``Ba.shape`` is created as + follows: + + * if ``mode='raise'`` (the default), then, first of all, each element of + ``a`` (and thus ``Ba``) must be in the range ``[0, n-1]``; now, suppose + that ``i`` (in that range) is the value at the ``(j0, j1, ..., jm)`` + position in ``Ba`` - then the value at the same position in the new array + is the value in ``Bchoices[i]`` at that same position; + + * if ``mode='wrap'``, values in `a` (and thus `Ba`) may be any (signed) + integer; modular arithmetic is used to map integers outside the range + `[0, n-1]` back into that range; and then the new array is constructed + as above; + + * if ``mode='clip'``, values in `a` (and thus ``Ba``) may be any (signed) + integer; negative integers are mapped to 0; values greater than ``n-1`` + are mapped to ``n-1``; and then the new array is constructed as above. + + Parameters + ---------- + a : int array + This array must contain integers in ``[0, n-1]``, where ``n`` is the + number of choices, unless ``mode=wrap`` or ``mode=clip``, in which + cases any integers are permissible. + choices : sequence of arrays + Choice arrays. `a` and all of the choices must be broadcastable to the + same shape. If `choices` is itself an array (not recommended), then + its outermost dimension (i.e., the one corresponding to + ``choices.shape[0]``) is taken as defining the "sequence". + out : array, optional + If provided, the result will be inserted into this array. It should + be of the appropriate shape and dtype. Note that `out` is always + buffered if ``mode='raise'``; use other modes for better performance. + mode : {'raise' (default), 'wrap', 'clip'}, optional + Specifies how indices outside ``[0, n-1]`` will be treated: + + * 'raise' : an exception is raised + * 'wrap' : value becomes value mod ``n`` + * 'clip' : values < 0 are mapped to 0, values > n-1 are mapped to n-1 + + Returns + ------- + merged_array : array + The merged result. + + Raises + ------ + ValueError: shape mismatch + If `a` and each choice array are not all broadcastable to the same + shape. + + See Also + -------- + ndarray.choose : equivalent method + numpy.take_along_axis : Preferable if `choices` is an array + + Notes + ----- + To reduce the chance of misinterpretation, even though the following + "abuse" is nominally supported, `choices` should neither be, nor be + thought of as, a single array, i.e., the outermost sequence-like container + should be either a list or a tuple. + + Examples + -------- + + >>> import numpy as np + >>> choices = [[0, 1, 2, 3], [10, 11, 12, 13], + ... [20, 21, 22, 23], [30, 31, 32, 33]] + >>> np.choose([2, 3, 1, 0], choices + ... # the first element of the result will be the first element of the + ... # third (2+1) "array" in choices, namely, 20; the second element + ... # will be the second element of the fourth (3+1) choice array, i.e., + ... # 31, etc. + ... ) + array([20, 31, 12, 3]) + >>> np.choose([2, 4, 1, 0], choices, mode='clip') # 4 goes to 3 (4-1) + array([20, 31, 12, 3]) + >>> # because there are 4 choice arrays + >>> np.choose([2, 4, 1, 0], choices, mode='wrap') # 4 goes to (4 mod 4) + array([20, 1, 12, 3]) + >>> # i.e., 0 + + A couple examples illustrating how choose broadcasts: + + >>> a = [[1, 0, 1], [0, 1, 0], [1, 0, 1]] + >>> choices = [-10, 10] + >>> np.choose(a, choices) + array([[ 10, -10, 10], + [-10, 10, -10], + [ 10, -10, 10]]) + + >>> # With thanks to Anne Archibald + >>> a = np.array([0, 1]).reshape((2,1,1)) + >>> c1 = np.array([1, 2, 3]).reshape((1,3,1)) + >>> c2 = np.array([-1, -2, -3, -4, -5]).reshape((1,1,5)) + >>> np.choose(a, (c1, c2)) # result is 2x3x5, res[0,:,:]=c1, res[1,:,:]=c2 + array([[[ 1, 1, 1, 1, 1], + [ 2, 2, 2, 2, 2], + [ 3, 3, 3, 3, 3]], + [[-1, -2, -3, -4, -5], + [-1, -2, -3, -4, -5], + [-1, -2, -3, -4, -5]]]) + + """ + return _wrapfunc(a, 'choose', choices, out=out, mode=mode) + + +def _repeat_dispatcher(a, repeats, axis=None): + return (a,) + + +@array_function_dispatch(_repeat_dispatcher) +def repeat(a, repeats, axis=None): + """ + Repeat each element of an array after themselves + + Parameters + ---------- + a : array_like + Input array. + repeats : int or array of ints + The number of repetitions for each element. `repeats` is broadcasted + to fit the shape of the given axis. + axis : int, optional + The axis along which to repeat values. By default, use the + flattened input array, and return a flat output array. + + Returns + ------- + repeated_array : ndarray + Output array which has the same shape as `a`, except along + the given axis. + + See Also + -------- + tile : Tile an array. + unique : Find the unique elements of an array. + + Examples + -------- + >>> import numpy as np + >>> np.repeat(3, 4) + array([3, 3, 3, 3]) + >>> x = np.array([[1,2],[3,4]]) + >>> np.repeat(x, 2) + array([1, 1, 2, 2, 3, 3, 4, 4]) + >>> np.repeat(x, 3, axis=1) + array([[1, 1, 1, 2, 2, 2], + [3, 3, 3, 4, 4, 4]]) + >>> np.repeat(x, [1, 2], axis=0) + array([[1, 2], + [3, 4], + [3, 4]]) + + """ + return _wrapfunc(a, 'repeat', repeats, axis=axis) + + +def _put_dispatcher(a, ind, v, mode=None): + return (a, ind, v) + + +@array_function_dispatch(_put_dispatcher) +def put(a, ind, v, mode='raise'): + """ + Replaces specified elements of an array with given values. + + The indexing works on the flattened target array. `put` is roughly + equivalent to: + + :: + + a.flat[ind] = v + + Parameters + ---------- + a : ndarray + Target array. + ind : array_like + Target indices, interpreted as integers. + v : array_like + Values to place in `a` at target indices. If `v` is shorter than + `ind` it will be repeated as necessary. + mode : {'raise', 'wrap', 'clip'}, optional + Specifies how out-of-bounds indices will behave. + + * 'raise' -- raise an error (default) + * 'wrap' -- wrap around + * 'clip' -- clip to the range + + 'clip' mode means that all indices that are too large are replaced + by the index that addresses the last element along that axis. Note + that this disables indexing with negative numbers. In 'raise' mode, + if an exception occurs the target array may still be modified. + + See Also + -------- + putmask, place + put_along_axis : Put elements by matching the array and the index arrays + + Examples + -------- + >>> import numpy as np + >>> a = np.arange(5) + >>> np.put(a, [0, 2], [-44, -55]) + >>> a + array([-44, 1, -55, 3, 4]) + + >>> a = np.arange(5) + >>> np.put(a, 22, -5, mode='clip') + >>> a + array([ 0, 1, 2, 3, -5]) + + """ + try: + put = a.put + except AttributeError as e: + raise TypeError(f"argument 1 must be numpy.ndarray, not {type(a)}") from e + + return put(ind, v, mode=mode) + + +def _swapaxes_dispatcher(a, axis1, axis2): + return (a,) + + +@array_function_dispatch(_swapaxes_dispatcher) +def swapaxes(a, axis1, axis2): + """ + Interchange two axes of an array. + + Parameters + ---------- + a : array_like + Input array. + axis1 : int + First axis. + axis2 : int + Second axis. + + Returns + ------- + a_swapped : ndarray + For NumPy >= 1.10.0, if `a` is an ndarray, then a view of `a` is + returned; otherwise a new array is created. For earlier NumPy + versions a view of `a` is returned only if the order of the + axes is changed, otherwise the input array is returned. + + Examples + -------- + >>> import numpy as np + >>> x = np.array([[1,2,3]]) + >>> np.swapaxes(x,0,1) + array([[1], + [2], + [3]]) + + >>> x = np.array([[[0,1],[2,3]],[[4,5],[6,7]]]) + >>> x + array([[[0, 1], + [2, 3]], + [[4, 5], + [6, 7]]]) + + >>> np.swapaxes(x,0,2) + array([[[0, 4], + [2, 6]], + [[1, 5], + [3, 7]]]) + + """ + return _wrapfunc(a, 'swapaxes', axis1, axis2) + + +def _transpose_dispatcher(a, axes=None): + return (a,) + + +@array_function_dispatch(_transpose_dispatcher) +def transpose(a, axes=None): + """ + Returns an array with axes transposed. + + For a 1-D array, this returns an unchanged view of the original array, as a + transposed vector is simply the same vector. + To convert a 1-D array into a 2-D column vector, an additional dimension + must be added, e.g., ``np.atleast_2d(a).T`` achieves this, as does + ``a[:, np.newaxis]``. + For a 2-D array, this is the standard matrix transpose. + For an n-D array, if axes are given, their order indicates how the + axes are permuted (see Examples). If axes are not provided, then + ``transpose(a).shape == a.shape[::-1]``. + + Parameters + ---------- + a : array_like + Input array. + axes : tuple or list of ints, optional + If specified, it must be a tuple or list which contains a permutation + of [0, 1, ..., N-1] where N is the number of axes of `a`. Negative + indices can also be used to specify axes. The i-th axis of the returned + array will correspond to the axis numbered ``axes[i]`` of the input. + If not specified, defaults to ``range(a.ndim)[::-1]``, which reverses + the order of the axes. + + Returns + ------- + p : ndarray + `a` with its axes permuted. A view is returned whenever possible. + + See Also + -------- + ndarray.transpose : Equivalent method. + moveaxis : Move axes of an array to new positions. + argsort : Return the indices that would sort an array. + + Notes + ----- + Use ``transpose(a, argsort(axes))`` to invert the transposition of tensors + when using the `axes` keyword argument. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[1, 2], [3, 4]]) + >>> a + array([[1, 2], + [3, 4]]) + >>> np.transpose(a) + array([[1, 3], + [2, 4]]) + + >>> a = np.array([1, 2, 3, 4]) + >>> a + array([1, 2, 3, 4]) + >>> np.transpose(a) + array([1, 2, 3, 4]) + + >>> a = np.ones((1, 2, 3)) + >>> np.transpose(a, (1, 0, 2)).shape + (2, 1, 3) + + >>> a = np.ones((2, 3, 4, 5)) + >>> np.transpose(a).shape + (5, 4, 3, 2) + + >>> a = np.arange(3*4*5).reshape((3, 4, 5)) + >>> np.transpose(a, (-1, 0, -2)).shape + (5, 3, 4) + + """ + return _wrapfunc(a, 'transpose', axes) + + +def _matrix_transpose_dispatcher(x): + return (x,) + +@array_function_dispatch(_matrix_transpose_dispatcher) +def matrix_transpose(x, /): + """ + Transposes a matrix (or a stack of matrices) ``x``. + + This function is Array API compatible. + + Parameters + ---------- + x : array_like + Input array having shape (..., M, N) and whose two innermost + dimensions form ``MxN`` matrices. + + Returns + ------- + out : ndarray + An array containing the transpose for each matrix and having shape + (..., N, M). + + See Also + -------- + transpose : Generic transpose method. + + Examples + -------- + >>> import numpy as np + >>> np.matrix_transpose([[1, 2], [3, 4]]) + array([[1, 3], + [2, 4]]) + + >>> np.matrix_transpose([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) + array([[[1, 3], + [2, 4]], + [[5, 7], + [6, 8]]]) + + """ + x = asanyarray(x) + if x.ndim < 2: + raise ValueError( + f"Input array must be at least 2-dimensional, but it is {x.ndim}" + ) + return swapaxes(x, -1, -2) + + +def _partition_dispatcher(a, kth, axis=None, kind=None, order=None): + return (a,) + + +@array_function_dispatch(_partition_dispatcher) +def partition(a, kth, axis=-1, kind='introselect', order=None): + """ + Return a partitioned copy of an array. + + Creates a copy of the array and partially sorts it in such a way that + the value of the element in k-th position is in the position it would be + in a sorted array. In the output array, all elements smaller than the k-th + element are located to the left of this element and all equal or greater + are located to its right. The ordering of the elements in the two + partitions on the either side of the k-th element in the output array is + undefined. + + Parameters + ---------- + a : array_like + Array to be sorted. + kth : int or sequence of ints + Element index to partition by. The k-th value of the element + will be in its final sorted position and all smaller elements + will be moved before it and all equal or greater elements behind + it. The order of all elements in the partitions is undefined. If + provided with a sequence of k-th it will partition all elements + indexed by k-th of them into their sorted position at once. + + axis : int or None, optional + Axis along which to sort. If None, the array is flattened before + sorting. The default is -1, which sorts along the last axis. + kind : {'introselect'}, optional + Selection algorithm. Default is 'introselect'. + order : str or list of str, optional + When `a` is an array with fields defined, this argument + specifies which fields to compare first, second, etc. A single + field can be specified as a string. Not all fields need be + specified, but unspecified fields will still be used, in the + order in which they come up in the dtype, to break ties. + + Returns + ------- + partitioned_array : ndarray + Array of the same type and shape as `a`. + + See Also + -------- + ndarray.partition : Method to sort an array in-place. + argpartition : Indirect partition. + sort : Full sorting + + Notes + ----- + The various selection algorithms are characterized by their average + speed, worst case performance, work space size, and whether they are + stable. A stable sort keeps items with the same key in the same + relative order. The available algorithms have the following + properties: + + ================= ======= ============= ============ ======= + kind speed worst case work space stable + ================= ======= ============= ============ ======= + 'introselect' 1 O(n) 0 no + ================= ======= ============= ============ ======= + + All the partition algorithms make temporary copies of the data when + partitioning along any but the last axis. Consequently, + partitioning along the last axis is faster and uses less space than + partitioning along any other axis. + + The sort order for complex numbers is lexicographic. If both the + real and imaginary parts are non-nan then the order is determined by + the real parts except when they are equal, in which case the order + is determined by the imaginary parts. + + The sort order of ``np.nan`` is bigger than ``np.inf``. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([7, 1, 7, 7, 1, 5, 7, 2, 3, 2, 6, 2, 3, 0]) + >>> p = np.partition(a, 4) + >>> p + array([0, 1, 2, 1, 2, 5, 2, 3, 3, 6, 7, 7, 7, 7]) # may vary + + ``p[4]`` is 2; all elements in ``p[:4]`` are less than or equal + to ``p[4]``, and all elements in ``p[5:]`` are greater than or + equal to ``p[4]``. The partition is:: + + [0, 1, 2, 1], [2], [5, 2, 3, 3, 6, 7, 7, 7, 7] + + The next example shows the use of multiple values passed to `kth`. + + >>> p2 = np.partition(a, (4, 8)) + >>> p2 + array([0, 1, 2, 1, 2, 3, 3, 2, 5, 6, 7, 7, 7, 7]) + + ``p2[4]`` is 2 and ``p2[8]`` is 5. All elements in ``p2[:4]`` + are less than or equal to ``p2[4]``, all elements in ``p2[5:8]`` + are greater than or equal to ``p2[4]`` and less than or equal to + ``p2[8]``, and all elements in ``p2[9:]`` are greater than or + equal to ``p2[8]``. The partition is:: + + [0, 1, 2, 1], [2], [3, 3, 2], [5], [6, 7, 7, 7, 7] + """ + if axis is None: + # flatten returns (1, N) for np.matrix, so always use the last axis + a = asanyarray(a).flatten() + axis = -1 + else: + a = asanyarray(a).copy(order="K") + a.partition(kth, axis=axis, kind=kind, order=order) + return a + + +def _argpartition_dispatcher(a, kth, axis=None, kind=None, order=None): + return (a,) + + +@array_function_dispatch(_argpartition_dispatcher) +def argpartition(a, kth, axis=-1, kind='introselect', order=None): + """ + Perform an indirect partition along the given axis using the + algorithm specified by the `kind` keyword. It returns an array of + indices of the same shape as `a` that index data along the given + axis in partitioned order. + + Parameters + ---------- + a : array_like + Array to sort. + kth : int or sequence of ints + Element index to partition by. The k-th element will be in its + final sorted position and all smaller elements will be moved + before it and all larger elements behind it. The order of all + elements in the partitions is undefined. If provided with a + sequence of k-th it will partition all of them into their sorted + position at once. + + axis : int or None, optional + Axis along which to sort. The default is -1 (the last axis). If + None, the flattened array is used. + kind : {'introselect'}, optional + Selection algorithm. Default is 'introselect' + order : str or list of str, optional + When `a` is an array with fields defined, this argument + specifies which fields to compare first, second, etc. A single + field can be specified as a string, and not all fields need be + specified, but unspecified fields will still be used, in the + order in which they come up in the dtype, to break ties. + + Returns + ------- + index_array : ndarray, int + Array of indices that partition `a` along the specified axis. + If `a` is one-dimensional, ``a[index_array]`` yields a partitioned `a`. + More generally, ``np.take_along_axis(a, index_array, axis=axis)`` + always yields the partitioned `a`, irrespective of dimensionality. + + See Also + -------- + partition : Describes partition algorithms used. + ndarray.partition : Inplace partition. + argsort : Full indirect sort. + take_along_axis : Apply ``index_array`` from argpartition + to an array as if by calling partition. + + Notes + ----- + The returned indices are not guaranteed to be sorted according to + the values. Furthermore, the default selection algorithm ``introselect`` + is unstable, and hence the returned indices are not guaranteed + to be the earliest/latest occurrence of the element. + + `argpartition` works for real/complex inputs with nan values, + see `partition` for notes on the enhanced sort order and + different selection algorithms. + + Examples + -------- + One dimensional array: + + >>> import numpy as np + >>> x = np.array([3, 4, 2, 1]) + >>> x[np.argpartition(x, 3)] + array([2, 1, 3, 4]) # may vary + >>> x[np.argpartition(x, (1, 3))] + array([1, 2, 3, 4]) # may vary + + >>> x = [3, 4, 2, 1] + >>> np.array(x)[np.argpartition(x, 3)] + array([2, 1, 3, 4]) # may vary + + Multi-dimensional array: + + >>> x = np.array([[3, 4, 2], [1, 3, 1]]) + >>> index_array = np.argpartition(x, kth=1, axis=-1) + >>> # below is the same as np.partition(x, kth=1) + >>> np.take_along_axis(x, index_array, axis=-1) + array([[2, 3, 4], + [1, 1, 3]]) + + """ + return _wrapfunc(a, 'argpartition', kth, axis=axis, kind=kind, order=order) + + +def _sort_dispatcher(a, axis=None, kind=None, order=None, *, stable=None): + return (a,) + + +@array_function_dispatch(_sort_dispatcher) +def sort(a, axis=-1, kind=None, order=None, *, stable=None): + """ + Return a sorted copy of an array. + + Parameters + ---------- + a : array_like + Array to be sorted. + axis : int or None, optional + Axis along which to sort. If None, the array is flattened before + sorting. The default is -1, which sorts along the last axis. + kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional + Sorting algorithm. The default is 'quicksort'. Note that both 'stable' + and 'mergesort' use timsort or radix sort under the covers and, + in general, the actual implementation will vary with data type. + The 'mergesort' option is retained for backwards compatibility. + order : str or list of str, optional + When `a` is an array with fields defined, this argument specifies + which fields to compare first, second, etc. A single field can + be specified as a string, and not all fields need be specified, + but unspecified fields will still be used, in the order in which + they come up in the dtype, to break ties. + stable : bool, optional + Sort stability. If ``True``, the returned array will maintain + the relative order of ``a`` values which compare as equal. + If ``False`` or ``None``, this is not guaranteed. Internally, + this option selects ``kind='stable'``. Default: ``None``. + + .. versionadded:: 2.0.0 + + Returns + ------- + sorted_array : ndarray + Array of the same type and shape as `a`. + + See Also + -------- + ndarray.sort : Method to sort an array in-place. + argsort : Indirect sort. + lexsort : Indirect stable sort on multiple keys. + searchsorted : Find elements in a sorted array. + partition : Partial sort. + + Notes + ----- + The various sorting algorithms are characterized by their average speed, + worst case performance, work space size, and whether they are stable. A + stable sort keeps items with the same key in the same relative + order. The four algorithms implemented in NumPy have the following + properties: + + =========== ======= ============= ============ ======== + kind speed worst case work space stable + =========== ======= ============= ============ ======== + 'quicksort' 1 O(n^2) 0 no + 'heapsort' 3 O(n*log(n)) 0 no + 'mergesort' 2 O(n*log(n)) ~n/2 yes + 'timsort' 2 O(n*log(n)) ~n/2 yes + =========== ======= ============= ============ ======== + + .. note:: The datatype determines which of 'mergesort' or 'timsort' + is actually used, even if 'mergesort' is specified. User selection + at a finer scale is not currently available. + + For performance, ``sort`` makes a temporary copy if needed to make the data + `contiguous `_ + in memory along the sort axis. For even better performance and reduced + memory consumption, ensure that the array is already contiguous along the + sort axis. + + The sort order for complex numbers is lexicographic. If both the real + and imaginary parts are non-nan then the order is determined by the + real parts except when they are equal, in which case the order is + determined by the imaginary parts. + + Previous to numpy 1.4.0 sorting real and complex arrays containing nan + values led to undefined behaviour. In numpy versions >= 1.4.0 nan + values are sorted to the end. The extended sort order is: + + * Real: [R, nan] + * Complex: [R + Rj, R + nanj, nan + Rj, nan + nanj] + + where R is a non-nan real value. Complex values with the same nan + placements are sorted according to the non-nan part if it exists. + Non-nan values are sorted as before. + + quicksort has been changed to: + `introsort `_. + When sorting does not make enough progress it switches to + `heapsort `_. + This implementation makes quicksort O(n*log(n)) in the worst case. + + 'stable' automatically chooses the best stable sorting algorithm + for the data type being sorted. + It, along with 'mergesort' is currently mapped to + `timsort `_ + or `radix sort `_ + depending on the data type. + API forward compatibility currently limits the + ability to select the implementation and it is hardwired for the different + data types. + + Timsort is added for better performance on already or nearly + sorted data. On random data timsort is almost identical to + mergesort. It is now used for stable sort while quicksort is still the + default sort if none is chosen. For timsort details, refer to + `CPython listsort.txt + `_ + 'mergesort' and 'stable' are mapped to radix sort for integer data types. + Radix sort is an O(n) sort instead of O(n log n). + + NaT now sorts to the end of arrays for consistency with NaN. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[1,4],[3,1]]) + >>> np.sort(a) # sort along the last axis + array([[1, 4], + [1, 3]]) + >>> np.sort(a, axis=None) # sort the flattened array + array([1, 1, 3, 4]) + >>> np.sort(a, axis=0) # sort along the first axis + array([[1, 1], + [3, 4]]) + + Use the `order` keyword to specify a field to use when sorting a + structured array: + + >>> dtype = [('name', 'S10'), ('height', float), ('age', int)] + >>> values = [('Arthur', 1.8, 41), ('Lancelot', 1.9, 38), + ... ('Galahad', 1.7, 38)] + >>> a = np.array(values, dtype=dtype) # create a structured array + >>> np.sort(a, order='height') # doctest: +SKIP + array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41), + ('Lancelot', 1.8999999999999999, 38)], + dtype=[('name', '|S10'), ('height', '>> np.sort(a, order=['age', 'height']) # doctest: +SKIP + array([('Galahad', 1.7, 38), ('Lancelot', 1.8999999999999999, 38), + ('Arthur', 1.8, 41)], + dtype=[('name', '|S10'), ('height', '>> import numpy as np + >>> x = np.array([3, 1, 2]) + >>> np.argsort(x) + array([1, 2, 0]) + + Two-dimensional array: + + >>> x = np.array([[0, 3], [2, 2]]) + >>> x + array([[0, 3], + [2, 2]]) + + >>> ind = np.argsort(x, axis=0) # sorts along first axis (down) + >>> ind + array([[0, 1], + [1, 0]]) + >>> np.take_along_axis(x, ind, axis=0) # same as np.sort(x, axis=0) + array([[0, 2], + [2, 3]]) + + >>> ind = np.argsort(x, axis=1) # sorts along last axis (across) + >>> ind + array([[0, 1], + [0, 1]]) + >>> np.take_along_axis(x, ind, axis=1) # same as np.sort(x, axis=1) + array([[0, 3], + [2, 2]]) + + Indices of the sorted elements of a N-dimensional array: + + >>> ind = np.unravel_index(np.argsort(x, axis=None), x.shape) + >>> ind + (array([0, 1, 1, 0]), array([0, 0, 1, 1])) + >>> x[ind] # same as np.sort(x, axis=None) + array([0, 2, 2, 3]) + + Sorting with keys: + + >>> x = np.array([(1, 0), (0, 1)], dtype=[('x', '>> x + array([(1, 0), (0, 1)], + dtype=[('x', '>> np.argsort(x, order=('x','y')) + array([1, 0]) + + >>> np.argsort(x, order=('y','x')) + array([0, 1]) + + """ + return _wrapfunc( + a, 'argsort', axis=axis, kind=kind, order=order, stable=stable + ) + +def _argmax_dispatcher(a, axis=None, out=None, *, keepdims=np._NoValue): + return (a, out) + + +@array_function_dispatch(_argmax_dispatcher) +def argmax(a, axis=None, out=None, *, keepdims=np._NoValue): + """ + Returns the indices of the maximum values along an axis. + + Parameters + ---------- + a : array_like + Input array. + axis : int, optional + By default, the index is into the flattened array, otherwise + along the specified axis. + out : array, optional + If provided, the result will be inserted into this array. It should + be of the appropriate shape and dtype. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the array. + + .. versionadded:: 1.22.0 + + Returns + ------- + index_array : ndarray of ints + Array of indices into the array. It has the same shape as ``a.shape`` + with the dimension along `axis` removed. If `keepdims` is set to True, + then the size of `axis` will be 1 with the resulting array having same + shape as ``a.shape``. + + See Also + -------- + ndarray.argmax, argmin + amax : The maximum value along a given axis. + unravel_index : Convert a flat index into an index tuple. + take_along_axis : Apply ``np.expand_dims(index_array, axis)`` + from argmax to an array as if by calling max. + + Notes + ----- + In case of multiple occurrences of the maximum values, the indices + corresponding to the first occurrence are returned. + + Examples + -------- + >>> import numpy as np + >>> a = np.arange(6).reshape(2,3) + 10 + >>> a + array([[10, 11, 12], + [13, 14, 15]]) + >>> np.argmax(a) + 5 + >>> np.argmax(a, axis=0) + array([1, 1, 1]) + >>> np.argmax(a, axis=1) + array([2, 2]) + + Indexes of the maximal elements of a N-dimensional array: + + >>> a.flat[np.argmax(a)] + 15 + >>> ind = np.unravel_index(np.argmax(a, axis=None), a.shape) + >>> ind + (1, 2) + >>> a[ind] + 15 + + >>> b = np.arange(6) + >>> b[1] = 5 + >>> b + array([0, 5, 2, 3, 4, 5]) + >>> np.argmax(b) # Only the first occurrence is returned. + 1 + + >>> x = np.array([[4,2,3], [1,0,3]]) + >>> index_array = np.argmax(x, axis=-1) + >>> # Same as np.amax(x, axis=-1, keepdims=True) + >>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1) + array([[4], + [3]]) + >>> # Same as np.amax(x, axis=-1) + >>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), + ... axis=-1).squeeze(axis=-1) + array([4, 3]) + + Setting `keepdims` to `True`, + + >>> x = np.arange(24).reshape((2, 3, 4)) + >>> res = np.argmax(x, axis=1, keepdims=True) + >>> res.shape + (2, 1, 4) + """ + kwds = {'keepdims': keepdims} if keepdims is not np._NoValue else {} + return _wrapfunc(a, 'argmax', axis=axis, out=out, **kwds) + + +def _argmin_dispatcher(a, axis=None, out=None, *, keepdims=np._NoValue): + return (a, out) + + +@array_function_dispatch(_argmin_dispatcher) +def argmin(a, axis=None, out=None, *, keepdims=np._NoValue): + """ + Returns the indices of the minimum values along an axis. + + Parameters + ---------- + a : array_like + Input array. + axis : int, optional + By default, the index is into the flattened array, otherwise + along the specified axis. + out : array, optional + If provided, the result will be inserted into this array. It should + be of the appropriate shape and dtype. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the array. + + .. versionadded:: 1.22.0 + + Returns + ------- + index_array : ndarray of ints + Array of indices into the array. It has the same shape as `a.shape` + with the dimension along `axis` removed. If `keepdims` is set to True, + then the size of `axis` will be 1 with the resulting array having same + shape as `a.shape`. + + See Also + -------- + ndarray.argmin, argmax + amin : The minimum value along a given axis. + unravel_index : Convert a flat index into an index tuple. + take_along_axis : Apply ``np.expand_dims(index_array, axis)`` + from argmin to an array as if by calling min. + + Notes + ----- + In case of multiple occurrences of the minimum values, the indices + corresponding to the first occurrence are returned. + + Examples + -------- + >>> import numpy as np + >>> a = np.arange(6).reshape(2,3) + 10 + >>> a + array([[10, 11, 12], + [13, 14, 15]]) + >>> np.argmin(a) + 0 + >>> np.argmin(a, axis=0) + array([0, 0, 0]) + >>> np.argmin(a, axis=1) + array([0, 0]) + + Indices of the minimum elements of a N-dimensional array: + + >>> a.flat[np.argmin(a)] + 10 + >>> ind = np.unravel_index(np.argmin(a, axis=None), a.shape) + >>> ind + (0, 0) + >>> a[ind] + 10 + + >>> b = np.arange(6) + 10 + >>> b[4] = 10 + >>> b + array([10, 11, 12, 13, 10, 15]) + >>> np.argmin(b) # Only the first occurrence is returned. + 0 + + >>> x = np.array([[4,2,3], [1,0,3]]) + >>> index_array = np.argmin(x, axis=-1) + >>> # Same as np.amin(x, axis=-1, keepdims=True) + >>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1) + array([[2], + [0]]) + >>> # Same as np.amax(x, axis=-1) + >>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), + ... axis=-1).squeeze(axis=-1) + array([2, 0]) + + Setting `keepdims` to `True`, + + >>> x = np.arange(24).reshape((2, 3, 4)) + >>> res = np.argmin(x, axis=1, keepdims=True) + >>> res.shape + (2, 1, 4) + """ + kwds = {'keepdims': keepdims} if keepdims is not np._NoValue else {} + return _wrapfunc(a, 'argmin', axis=axis, out=out, **kwds) + + +def _searchsorted_dispatcher(a, v, side=None, sorter=None): + return (a, v, sorter) + + +@array_function_dispatch(_searchsorted_dispatcher) +def searchsorted(a, v, side='left', sorter=None): + """ + Find indices where elements should be inserted to maintain order. + + Find the indices into a sorted array `a` such that, if the + corresponding elements in `v` were inserted before the indices, the + order of `a` would be preserved. + + Assuming that `a` is sorted: + + ====== ============================ + `side` returned index `i` satisfies + ====== ============================ + left ``a[i-1] < v <= a[i]`` + right ``a[i-1] <= v < a[i]`` + ====== ============================ + + Parameters + ---------- + a : 1-D array_like + Input array. If `sorter` is None, then it must be sorted in + ascending order, otherwise `sorter` must be an array of indices + that sort it. + v : array_like + Values to insert into `a`. + side : {'left', 'right'}, optional + If 'left', the index of the first suitable location found is given. + If 'right', return the last such index. If there is no suitable + index, return either 0 or N (where N is the length of `a`). + sorter : 1-D array_like, optional + Optional array of integer indices that sort array a into ascending + order. They are typically the result of argsort. + + Returns + ------- + indices : int or array of ints + Array of insertion points with the same shape as `v`, + or an integer if `v` is a scalar. + + See Also + -------- + sort : Return a sorted copy of an array. + histogram : Produce histogram from 1-D data. + + Notes + ----- + Binary search is used to find the required insertion points. + + As of NumPy 1.4.0 `searchsorted` works with real/complex arrays containing + `nan` values. The enhanced sort order is documented in `sort`. + + This function uses the same algorithm as the builtin python + `bisect.bisect_left` (``side='left'``) and `bisect.bisect_right` + (``side='right'``) functions, which is also vectorized + in the `v` argument. + + Examples + -------- + >>> import numpy as np + >>> np.searchsorted([11,12,13,14,15], 13) + 2 + >>> np.searchsorted([11,12,13,14,15], 13, side='right') + 3 + >>> np.searchsorted([11,12,13,14,15], [-10, 20, 12, 13]) + array([0, 5, 1, 2]) + + When `sorter` is used, the returned indices refer to the sorted + array of `a` and not `a` itself: + + >>> a = np.array([40, 10, 20, 30]) + >>> sorter = np.argsort(a) + >>> sorter + array([1, 2, 3, 0]) # Indices that would sort the array 'a' + >>> result = np.searchsorted(a, 25, sorter=sorter) + >>> result + 2 + >>> a[sorter[result]] + 30 # The element at index 2 of the sorted array is 30. + """ + return _wrapfunc(a, 'searchsorted', v, side=side, sorter=sorter) + + +def _resize_dispatcher(a, new_shape): + return (a,) + + +@array_function_dispatch(_resize_dispatcher) +def resize(a, new_shape): + """ + Return a new array with the specified shape. + + If the new array is larger than the original array, then the new + array is filled with repeated copies of `a`. Note that this behavior + is different from a.resize(new_shape) which fills with zeros instead + of repeated copies of `a`. + + Parameters + ---------- + a : array_like + Array to be resized. + + new_shape : int or tuple of int + Shape of resized array. + + Returns + ------- + reshaped_array : ndarray + The new array is formed from the data in the old array, repeated + if necessary to fill out the required number of elements. The + data are repeated iterating over the array in C-order. + + See Also + -------- + numpy.reshape : Reshape an array without changing the total size. + numpy.pad : Enlarge and pad an array. + numpy.repeat : Repeat elements of an array. + ndarray.resize : resize an array in-place. + + Notes + ----- + When the total size of the array does not change `~numpy.reshape` should + be used. In most other cases either indexing (to reduce the size) + or padding (to increase the size) may be a more appropriate solution. + + Warning: This functionality does **not** consider axes separately, + i.e. it does not apply interpolation/extrapolation. + It fills the return array with the required number of elements, iterating + over `a` in C-order, disregarding axes (and cycling back from the start if + the new shape is larger). This functionality is therefore not suitable to + resize images, or data where each axis represents a separate and distinct + entity. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[0,1],[2,3]]) + >>> np.resize(a,(2,3)) + array([[0, 1, 2], + [3, 0, 1]]) + >>> np.resize(a,(1,4)) + array([[0, 1, 2, 3]]) + >>> np.resize(a,(2,4)) + array([[0, 1, 2, 3], + [0, 1, 2, 3]]) + + """ + if isinstance(new_shape, (int, nt.integer)): + new_shape = (new_shape,) + + a = ravel(a) + + new_size = 1 + for dim_length in new_shape: + new_size *= dim_length + if dim_length < 0: + raise ValueError( + 'all elements of `new_shape` must be non-negative' + ) + + if a.size == 0 or new_size == 0: + # First case must zero fill. The second would have repeats == 0. + return np.zeros_like(a, shape=new_shape) + + # ceiling division without negating new_size + repeats = (new_size + a.size - 1) // a.size + a = concatenate((a,) * repeats)[:new_size] + + return reshape(a, new_shape) + + +def _squeeze_dispatcher(a, axis=None): + return (a,) + + +@array_function_dispatch(_squeeze_dispatcher) +def squeeze(a, axis=None): + """ + Remove axes of length one from `a`. + + Parameters + ---------- + a : array_like + Input data. + axis : None or int or tuple of ints, optional + Selects a subset of the entries of length one in the + shape. If an axis is selected with shape entry greater than + one, an error is raised. + + Returns + ------- + squeezed : ndarray + The input array, but with all or a subset of the + dimensions of length 1 removed. This is always `a` itself + or a view into `a`. Note that if all axes are squeezed, + the result is a 0d array and not a scalar. + + Raises + ------ + ValueError + If `axis` is not None, and an axis being squeezed is not of length 1 + + See Also + -------- + expand_dims : The inverse operation, adding entries of length one + reshape : Insert, remove, and combine dimensions, and resize existing ones + + Examples + -------- + >>> import numpy as np + >>> x = np.array([[[0], [1], [2]]]) + >>> x.shape + (1, 3, 1) + >>> np.squeeze(x).shape + (3,) + >>> np.squeeze(x, axis=0).shape + (3, 1) + >>> np.squeeze(x, axis=1).shape + Traceback (most recent call last): + ... + ValueError: cannot select an axis to squeeze out which has size + not equal to one + >>> np.squeeze(x, axis=2).shape + (1, 3) + >>> x = np.array([[1234]]) + >>> x.shape + (1, 1) + >>> np.squeeze(x) + array(1234) # 0d array + >>> np.squeeze(x).shape + () + >>> np.squeeze(x)[()] + 1234 + + """ + try: + squeeze = a.squeeze + except AttributeError: + return _wrapit(a, 'squeeze', axis=axis) + if axis is None: + return squeeze() + else: + return squeeze(axis=axis) + + +def _diagonal_dispatcher(a, offset=None, axis1=None, axis2=None): + return (a,) + + +@array_function_dispatch(_diagonal_dispatcher) +def diagonal(a, offset=0, axis1=0, axis2=1): + """ + Return specified diagonals. + + If `a` is 2-D, returns the diagonal of `a` with the given offset, + i.e., the collection of elements of the form ``a[i, i+offset]``. If + `a` has more than two dimensions, then the axes specified by `axis1` + and `axis2` are used to determine the 2-D sub-array whose diagonal is + returned. The shape of the resulting array can be determined by + removing `axis1` and `axis2` and appending an index to the right equal + to the size of the resulting diagonals. + + In versions of NumPy prior to 1.7, this function always returned a new, + independent array containing a copy of the values in the diagonal. + + In NumPy 1.7 and 1.8, it continues to return a copy of the diagonal, + but depending on this fact is deprecated. Writing to the resulting + array continues to work as it used to, but a FutureWarning is issued. + + Starting in NumPy 1.9 it returns a read-only view on the original array. + Attempting to write to the resulting array will produce an error. + + In some future release, it will return a read/write view and writing to + the returned array will alter your original array. The returned array + will have the same type as the input array. + + If you don't write to the array returned by this function, then you can + just ignore all of the above. + + If you depend on the current behavior, then we suggest copying the + returned array explicitly, i.e., use ``np.diagonal(a).copy()`` instead + of just ``np.diagonal(a)``. This will work with both past and future + versions of NumPy. + + Parameters + ---------- + a : array_like + Array from which the diagonals are taken. + offset : int, optional + Offset of the diagonal from the main diagonal. Can be positive or + negative. Defaults to main diagonal (0). + axis1 : int, optional + Axis to be used as the first axis of the 2-D sub-arrays from which + the diagonals should be taken. Defaults to first axis (0). + axis2 : int, optional + Axis to be used as the second axis of the 2-D sub-arrays from + which the diagonals should be taken. Defaults to second axis (1). + + Returns + ------- + array_of_diagonals : ndarray + If `a` is 2-D, then a 1-D array containing the diagonal and of the + same type as `a` is returned unless `a` is a `matrix`, in which case + a 1-D array rather than a (2-D) `matrix` is returned in order to + maintain backward compatibility. + + If ``a.ndim > 2``, then the dimensions specified by `axis1` and `axis2` + are removed, and a new axis inserted at the end corresponding to the + diagonal. + + Raises + ------ + ValueError + If the dimension of `a` is less than 2. + + See Also + -------- + diag : MATLAB work-a-like for 1-D and 2-D arrays. + diagflat : Create diagonal arrays. + trace : Sum along diagonals. + + Examples + -------- + >>> import numpy as np + >>> a = np.arange(4).reshape(2,2) + >>> a + array([[0, 1], + [2, 3]]) + >>> a.diagonal() + array([0, 3]) + >>> a.diagonal(1) + array([1]) + + A 3-D example: + + >>> a = np.arange(8).reshape(2,2,2); a + array([[[0, 1], + [2, 3]], + [[4, 5], + [6, 7]]]) + >>> a.diagonal(0, # Main diagonals of two arrays created by skipping + ... 0, # across the outer(left)-most axis last and + ... 1) # the "middle" (row) axis first. + array([[0, 6], + [1, 7]]) + + The sub-arrays whose main diagonals we just obtained; note that each + corresponds to fixing the right-most (column) axis, and that the + diagonals are "packed" in rows. + + >>> a[:,:,0] # main diagonal is [0 6] + array([[0, 2], + [4, 6]]) + >>> a[:,:,1] # main diagonal is [1 7] + array([[1, 3], + [5, 7]]) + + The anti-diagonal can be obtained by reversing the order of elements + using either `numpy.flipud` or `numpy.fliplr`. + + >>> a = np.arange(9).reshape(3, 3) + >>> a + array([[0, 1, 2], + [3, 4, 5], + [6, 7, 8]]) + >>> np.fliplr(a).diagonal() # Horizontal flip + array([2, 4, 6]) + >>> np.flipud(a).diagonal() # Vertical flip + array([6, 4, 2]) + + Note that the order in which the diagonal is retrieved varies depending + on the flip function. + """ + if isinstance(a, np.matrix): + # Make diagonal of matrix 1-D to preserve backward compatibility. + return asarray(a).diagonal(offset=offset, axis1=axis1, axis2=axis2) + else: + return asanyarray(a).diagonal(offset=offset, axis1=axis1, axis2=axis2) + + +def _trace_dispatcher( + a, offset=None, axis1=None, axis2=None, dtype=None, out=None): + return (a, out) + + +@array_function_dispatch(_trace_dispatcher) +def trace(a, offset=0, axis1=0, axis2=1, dtype=None, out=None): + """ + Return the sum along diagonals of the array. + + If `a` is 2-D, the sum along its diagonal with the given offset + is returned, i.e., the sum of elements ``a[i,i+offset]`` for all i. + + If `a` has more than two dimensions, then the axes specified by axis1 and + axis2 are used to determine the 2-D sub-arrays whose traces are returned. + The shape of the resulting array is the same as that of `a` with `axis1` + and `axis2` removed. + + Parameters + ---------- + a : array_like + Input array, from which the diagonals are taken. + offset : int, optional + Offset of the diagonal from the main diagonal. Can be both positive + and negative. Defaults to 0. + axis1, axis2 : int, optional + Axes to be used as the first and second axis of the 2-D sub-arrays + from which the diagonals should be taken. Defaults are the first two + axes of `a`. + dtype : dtype, optional + Determines the data-type of the returned array and of the accumulator + where the elements are summed. If dtype has the value None and `a` is + of integer type of precision less than the default integer + precision, then the default integer precision is used. Otherwise, + the precision is the same as that of `a`. + out : ndarray, optional + Array into which the output is placed. Its type is preserved and + it must be of the right shape to hold the output. + + Returns + ------- + sum_along_diagonals : ndarray + If `a` is 2-D, the sum along the diagonal is returned. If `a` has + larger dimensions, then an array of sums along diagonals is returned. + + See Also + -------- + diag, diagonal, diagflat + + Examples + -------- + >>> import numpy as np + >>> np.trace(np.eye(3)) + 3.0 + >>> a = np.arange(8).reshape((2,2,2)) + >>> np.trace(a) + array([6, 8]) + + >>> a = np.arange(24).reshape((2,2,2,3)) + >>> np.trace(a).shape + (2, 3) + + """ + if isinstance(a, np.matrix): + # Get trace of matrix via an array to preserve backward compatibility. + return asarray(a).trace( + offset=offset, axis1=axis1, axis2=axis2, dtype=dtype, out=out + ) + else: + return asanyarray(a).trace( + offset=offset, axis1=axis1, axis2=axis2, dtype=dtype, out=out + ) + + +def _ravel_dispatcher(a, order=None): + return (a,) + + +@array_function_dispatch(_ravel_dispatcher) +def ravel(a, order='C'): + """Return a contiguous flattened array. + + A 1-D array, containing the elements of the input, is returned. A copy is + made only if needed. + + As of NumPy 1.10, the returned array will have the same type as the input + array. (for example, a masked array will be returned for a masked array + input) + + Parameters + ---------- + a : array_like + Input array. The elements in `a` are read in the order specified by + `order`, and packed as a 1-D array. + order : {'C','F', 'A', 'K'}, optional + + The elements of `a` are read using this index order. 'C' means + to index the elements in row-major, C-style order, + with the last axis index changing fastest, back to the first + axis index changing slowest. 'F' means to index the elements + in column-major, Fortran-style order, with the + first index changing fastest, and the last index changing + slowest. Note that the 'C' and 'F' options take no account of + the memory layout of the underlying array, and only refer to + the order of axis indexing. 'A' means to read the elements in + Fortran-like index order if `a` is Fortran *contiguous* in + memory, C-like order otherwise. 'K' means to read the + elements in the order they occur in memory, except for + reversing the data when strides are negative. By default, 'C' + index order is used. + + Returns + ------- + y : array_like + y is a contiguous 1-D array of the same subtype as `a`, + with shape ``(a.size,)``. + Note that matrices are special cased for backward compatibility, + if `a` is a matrix, then y is a 1-D ndarray. + + See Also + -------- + ndarray.flat : 1-D iterator over an array. + ndarray.flatten : 1-D array copy of the elements of an array + in row-major order. + ndarray.reshape : Change the shape of an array without changing its data. + + Notes + ----- + In row-major, C-style order, in two dimensions, the row index + varies the slowest, and the column index the quickest. This can + be generalized to multiple dimensions, where row-major order + implies that the index along the first axis varies slowest, and + the index along the last quickest. The opposite holds for + column-major, Fortran-style index ordering. + + When a view is desired in as many cases as possible, ``arr.reshape(-1)`` + may be preferable. However, ``ravel`` supports ``K`` in the optional + ``order`` argument while ``reshape`` does not. + + Examples + -------- + It is equivalent to ``reshape(-1, order=order)``. + + >>> import numpy as np + >>> x = np.array([[1, 2, 3], [4, 5, 6]]) + >>> np.ravel(x) + array([1, 2, 3, 4, 5, 6]) + + >>> x.reshape(-1) + array([1, 2, 3, 4, 5, 6]) + + >>> np.ravel(x, order='F') + array([1, 4, 2, 5, 3, 6]) + + When ``order`` is 'A', it will preserve the array's 'C' or 'F' ordering: + + >>> np.ravel(x.T) + array([1, 4, 2, 5, 3, 6]) + >>> np.ravel(x.T, order='A') + array([1, 2, 3, 4, 5, 6]) + + When ``order`` is 'K', it will preserve orderings that are neither 'C' + nor 'F', but won't reverse axes: + + >>> a = np.arange(3)[::-1]; a + array([2, 1, 0]) + >>> a.ravel(order='C') + array([2, 1, 0]) + >>> a.ravel(order='K') + array([2, 1, 0]) + + >>> a = np.arange(12).reshape(2,3,2).swapaxes(1,2); a + array([[[ 0, 2, 4], + [ 1, 3, 5]], + [[ 6, 8, 10], + [ 7, 9, 11]]]) + >>> a.ravel(order='C') + array([ 0, 2, 4, 1, 3, 5, 6, 8, 10, 7, 9, 11]) + >>> a.ravel(order='K') + array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) + + """ + if isinstance(a, np.matrix): + return asarray(a).ravel(order=order) + else: + return asanyarray(a).ravel(order=order) + + +def _nonzero_dispatcher(a): + return (a,) + + +@array_function_dispatch(_nonzero_dispatcher) +def nonzero(a): + """ + Return the indices of the elements that are non-zero. + + Returns a tuple of arrays, one for each dimension of `a`, + containing the indices of the non-zero elements in that + dimension. The values in `a` are always tested and returned in + row-major, C-style order. + + To group the indices by element, rather than dimension, use `argwhere`, + which returns a row for each non-zero element. + + Parameters + ---------- + a : array_like + Input array. + + Returns + ------- + tuple_of_arrays : tuple + Indices of elements that are non-zero. + + See Also + -------- + flatnonzero : + Return indices that are non-zero in the flattened version of the input + array. + ndarray.nonzero : + Equivalent ndarray method. + count_nonzero : + Counts the number of non-zero elements in the input array. + + Notes + ----- + While the nonzero values can be obtained with ``a[nonzero(a)]``, it is + recommended to use ``x[x.astype(bool)]`` or ``x[x != 0]`` instead, which + will correctly handle 0-d arrays. + + Examples + -------- + >>> import numpy as np + >>> x = np.array([[3, 0, 0], [0, 4, 0], [5, 6, 0]]) + >>> x + array([[3, 0, 0], + [0, 4, 0], + [5, 6, 0]]) + >>> np.nonzero(x) + (array([0, 1, 2, 2]), array([0, 1, 0, 1])) + + >>> x[np.nonzero(x)] + array([3, 4, 5, 6]) + >>> np.transpose(np.nonzero(x)) + array([[0, 0], + [1, 1], + [2, 0], + [2, 1]]) + + A common use for ``nonzero`` is to find the indices of an array, where + a condition is True. Given an array `a`, the condition `a` > 3 is a + boolean array and since False is interpreted as 0, np.nonzero(a > 3) + yields the indices of the `a` where the condition is true. + + >>> a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) + >>> a > 3 + array([[False, False, False], + [ True, True, True], + [ True, True, True]]) + >>> np.nonzero(a > 3) + (array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2])) + + Using this result to index `a` is equivalent to using the mask directly: + + >>> a[np.nonzero(a > 3)] + array([4, 5, 6, 7, 8, 9]) + >>> a[a > 3] # prefer this spelling + array([4, 5, 6, 7, 8, 9]) + + ``nonzero`` can also be called as a method of the array. + + >>> (a > 3).nonzero() + (array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2])) + + """ + return _wrapfunc(a, 'nonzero') + + +def _shape_dispatcher(a): + return (a,) + + +@array_function_dispatch(_shape_dispatcher) +def shape(a): + """ + Return the shape of an array. + + Parameters + ---------- + a : array_like + Input array. + + Returns + ------- + shape : tuple of ints + The elements of the shape tuple give the lengths of the + corresponding array dimensions. + + See Also + -------- + len : ``len(a)`` is equivalent to ``np.shape(a)[0]`` for N-D arrays with + ``N>=1``. + ndarray.shape : Equivalent array method. + + Examples + -------- + >>> import numpy as np + >>> np.shape(np.eye(3)) + (3, 3) + >>> np.shape([[1, 3]]) + (1, 2) + >>> np.shape([0]) + (1,) + >>> np.shape(0) + () + + >>> a = np.array([(1, 2), (3, 4), (5, 6)], + ... dtype=[('x', 'i4'), ('y', 'i4')]) + >>> np.shape(a) + (3,) + >>> a.shape + (3,) + + """ + try: + result = a.shape + except AttributeError: + result = asarray(a).shape + return result + + +def _compress_dispatcher(condition, a, axis=None, out=None): + return (condition, a, out) + + +@array_function_dispatch(_compress_dispatcher) +def compress(condition, a, axis=None, out=None): + """ + Return selected slices of an array along given axis. + + When working along a given axis, a slice along that axis is returned in + `output` for each index where `condition` evaluates to True. When + working on a 1-D array, `compress` is equivalent to `extract`. + + Parameters + ---------- + condition : 1-D array of bools + Array that selects which entries to return. If len(condition) + is less than the size of `a` along the given axis, then output is + truncated to the length of the condition array. + a : array_like + Array from which to extract a part. + axis : int, optional + Axis along which to take slices. If None (default), work on the + flattened array. + out : ndarray, optional + Output array. Its type is preserved and it must be of the right + shape to hold the output. + + Returns + ------- + compressed_array : ndarray + A copy of `a` without the slices along axis for which `condition` + is false. + + See Also + -------- + take, choose, diag, diagonal, select + ndarray.compress : Equivalent method in ndarray + extract : Equivalent method when working on 1-D arrays + :ref:`ufuncs-output-type` + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[1, 2], [3, 4], [5, 6]]) + >>> a + array([[1, 2], + [3, 4], + [5, 6]]) + >>> np.compress([0, 1], a, axis=0) + array([[3, 4]]) + >>> np.compress([False, True, True], a, axis=0) + array([[3, 4], + [5, 6]]) + >>> np.compress([False, True], a, axis=1) + array([[2], + [4], + [6]]) + + Working on the flattened array does not return slices along an axis but + selects elements. + + >>> np.compress([False, True], a) + array([2]) + + """ + return _wrapfunc(a, 'compress', condition, axis=axis, out=out) + + +def _clip_dispatcher(a, a_min=None, a_max=None, out=None, *, min=None, + max=None, **kwargs): + return (a, a_min, a_max, out, min, max) + + +@array_function_dispatch(_clip_dispatcher) +def clip(a, a_min=np._NoValue, a_max=np._NoValue, out=None, *, + min=np._NoValue, max=np._NoValue, **kwargs): + """ + Clip (limit) the values in an array. + + Given an interval, values outside the interval are clipped to + the interval edges. For example, if an interval of ``[0, 1]`` + is specified, values smaller than 0 become 0, and values larger + than 1 become 1. + + Equivalent to but faster than ``np.minimum(a_max, np.maximum(a, a_min))``. + + No check is performed to ensure ``a_min < a_max``. + + Parameters + ---------- + a : array_like + Array containing elements to clip. + a_min, a_max : array_like or None + Minimum and maximum value. If ``None``, clipping is not performed on + the corresponding edge. If both ``a_min`` and ``a_max`` are ``None``, + the elements of the returned array stay the same. Both are broadcasted + against ``a``. + out : ndarray, optional + The results will be placed in this array. It may be the input + array for in-place clipping. `out` must be of the right shape + to hold the output. Its type is preserved. + min, max : array_like or None + Array API compatible alternatives for ``a_min`` and ``a_max`` + arguments. Either ``a_min`` and ``a_max`` or ``min`` and ``max`` + can be passed at the same time. Default: ``None``. + + .. versionadded:: 2.1.0 + **kwargs + For other keyword-only arguments, see the + :ref:`ufunc docs `. + + Returns + ------- + clipped_array : ndarray + An array with the elements of `a`, but where values + < `a_min` are replaced with `a_min`, and those > `a_max` + with `a_max`. + + See Also + -------- + :ref:`ufuncs-output-type` + + Notes + ----- + When `a_min` is greater than `a_max`, `clip` returns an + array in which all values are equal to `a_max`, + as shown in the second example. + + Examples + -------- + >>> import numpy as np + >>> a = np.arange(10) + >>> a + array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + >>> np.clip(a, 1, 8) + array([1, 1, 2, 3, 4, 5, 6, 7, 8, 8]) + >>> np.clip(a, 8, 1) + array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1]) + >>> np.clip(a, 3, 6, out=a) + array([3, 3, 3, 3, 4, 5, 6, 6, 6, 6]) + >>> a + array([3, 3, 3, 3, 4, 5, 6, 6, 6, 6]) + >>> a = np.arange(10) + >>> a + array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + >>> np.clip(a, [3, 4, 1, 1, 1, 4, 4, 4, 4, 4], 8) + array([3, 4, 2, 3, 4, 5, 6, 7, 8, 8]) + + """ + if a_min is np._NoValue and a_max is np._NoValue: + a_min = None if min is np._NoValue else min + a_max = None if max is np._NoValue else max + elif a_min is np._NoValue: + raise TypeError("clip() missing 1 required positional " + "argument: 'a_min'") + elif a_max is np._NoValue: + raise TypeError("clip() missing 1 required positional " + "argument: 'a_max'") + elif min is not np._NoValue or max is not np._NoValue: + raise ValueError("Passing `min` or `max` keyword argument when " + "`a_min` and `a_max` are provided is forbidden.") + + return _wrapfunc(a, 'clip', a_min, a_max, out=out, **kwargs) + + +def _sum_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None, + initial=None, where=None): + return (a, out) + + +@array_function_dispatch(_sum_dispatcher) +def sum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, + initial=np._NoValue, where=np._NoValue): + """ + Sum of array elements over a given axis. + + Parameters + ---------- + a : array_like + Elements to sum. + axis : None or int or tuple of ints, optional + Axis or axes along which a sum is performed. The default, + axis=None, will sum all of the elements of the input array. If + axis is negative it counts from the last to the first axis. If + axis is a tuple of ints, a sum is performed on all of the axes + specified in the tuple instead of a single axis or all the axes as + before. + dtype : dtype, optional + The type of the returned array and of the accumulator in which the + elements are summed. The dtype of `a` is used by default unless `a` + has an integer dtype of less precision than the default platform + integer. In that case, if `a` is signed then the platform integer + is used while if `a` is unsigned then an unsigned integer of the + same precision as the platform integer is used. + out : ndarray, optional + Alternative output array in which to place the result. It must have + the same shape as the expected output, but the type of the output + values will be cast if necessary. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the input array. + + If the default value is passed, then `keepdims` will not be + passed through to the `sum` method of sub-classes of + `ndarray`, however any non-default value will be. If the + sub-class' method does not implement `keepdims` any + exceptions will be raised. + initial : scalar, optional + Starting value for the sum. See `~numpy.ufunc.reduce` for details. + where : array_like of bool, optional + Elements to include in the sum. See `~numpy.ufunc.reduce` for details. + + Returns + ------- + sum_along_axis : ndarray + An array with the same shape as `a`, with the specified + axis removed. If `a` is a 0-d array, or if `axis` is None, a scalar + is returned. If an output array is specified, a reference to + `out` is returned. + + See Also + -------- + ndarray.sum : Equivalent method. + add: ``numpy.add.reduce`` equivalent function. + cumsum : Cumulative sum of array elements. + trapezoid : Integration of array values using composite trapezoidal rule. + + mean, average + + Notes + ----- + Arithmetic is modular when using integer types, and no error is + raised on overflow. + + The sum of an empty array is the neutral element 0: + + >>> np.sum([]) + 0.0 + + For floating point numbers the numerical precision of sum (and + ``np.add.reduce``) is in general limited by directly adding each number + individually to the result causing rounding errors in every step. + However, often numpy will use a numerically better approach (partial + pairwise summation) leading to improved precision in many use-cases. + This improved precision is always provided when no ``axis`` is given. + When ``axis`` is given, it will depend on which axis is summed. + Technically, to provide the best speed possible, the improved precision + is only used when the summation is along the fast axis in memory. + Note that the exact precision may vary depending on other parameters. + In contrast to NumPy, Python's ``math.fsum`` function uses a slower but + more precise approach to summation. + Especially when summing a large number of lower precision floating point + numbers, such as ``float32``, numerical errors can become significant. + In such cases it can be advisable to use `dtype="float64"` to use a higher + precision for the output. + + Examples + -------- + >>> import numpy as np + >>> np.sum([0.5, 1.5]) + 2.0 + >>> np.sum([0.5, 0.7, 0.2, 1.5], dtype=np.int32) + np.int32(1) + >>> np.sum([[0, 1], [0, 5]]) + 6 + >>> np.sum([[0, 1], [0, 5]], axis=0) + array([0, 6]) + >>> np.sum([[0, 1], [0, 5]], axis=1) + array([1, 5]) + >>> np.sum([[0, 1], [np.nan, 5]], where=[False, True], axis=1) + array([1., 5.]) + + If the accumulator is too small, overflow occurs: + + >>> np.ones(128, dtype=np.int8).sum(dtype=np.int8) + np.int8(-128) + + You can also start the sum with a value other than zero: + + >>> np.sum([10], initial=5) + 15 + """ + if isinstance(a, _gentype): + # 2018-02-25, 1.15.0 + raise TypeError( + "Calling np.sum(generator) is deprecated." + "Use np.sum(np.fromiter(generator)) or " + "the python sum builtin instead.", + ) + + return _wrapreduction( + a, np.add, 'sum', axis, dtype, out, + keepdims=keepdims, initial=initial, where=where + ) + + +def _any_dispatcher(a, axis=None, out=None, keepdims=None, *, + where=np._NoValue): + return (a, where, out) + + +@array_function_dispatch(_any_dispatcher) +def any(a, axis=None, out=None, keepdims=np._NoValue, *, where=np._NoValue): + """ + Test whether any array element along a given axis evaluates to True. + + Returns single boolean if `axis` is ``None`` + + Parameters + ---------- + a : array_like + Input array or object that can be converted to an array. + axis : None or int or tuple of ints, optional + Axis or axes along which a logical OR reduction is performed. + The default (``axis=None``) is to perform a logical OR over all + the dimensions of the input array. `axis` may be negative, in + which case it counts from the last to the first axis. If this + is a tuple of ints, a reduction is performed on multiple + axes, instead of a single axis or all the axes as before. + out : ndarray, optional + Alternate output array in which to place the result. It must have + the same shape as the expected output and its type is preserved + (e.g., if it is of type float, then it will remain so, returning + 1.0 for True and 0.0 for False, regardless of the type of `a`). + See :ref:`ufuncs-output-type` for more details. + + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the input array. + + If the default value is passed, then `keepdims` will not be + passed through to the `any` method of sub-classes of + `ndarray`, however any non-default value will be. If the + sub-class' method does not implement `keepdims` any + exceptions will be raised. + + where : array_like of bool, optional + Elements to include in checking for any `True` values. + See `~numpy.ufunc.reduce` for details. + + .. versionadded:: 1.20.0 + + Returns + ------- + any : bool or ndarray + A new boolean or `ndarray` is returned unless `out` is specified, + in which case a reference to `out` is returned. + + See Also + -------- + ndarray.any : equivalent method + + all : Test whether all elements along a given axis evaluate to True. + + Notes + ----- + Not a Number (NaN), positive infinity and negative infinity evaluate + to `True` because these are not equal to zero. + + .. versionchanged:: 2.0 + Before NumPy 2.0, ``any`` did not return booleans for object dtype + input arrays. + This behavior is still available via ``np.logical_or.reduce``. + + Examples + -------- + >>> import numpy as np + >>> np.any([[True, False], [True, True]]) + True + + >>> np.any([[True, False, True ], + ... [False, False, False]], axis=0) + array([ True, False, True]) + + >>> np.any([-1, 0, 5]) + True + + >>> np.any([[np.nan], [np.inf]], axis=1, keepdims=True) + array([[ True], + [ True]]) + + >>> np.any([[True, False], [False, False]], where=[[False], [True]]) + False + + >>> a = np.array([[1, 0, 0], + ... [0, 0, 1], + ... [0, 0, 0]]) + >>> np.any(a, axis=0) + array([ True, False, True]) + >>> np.any(a, axis=1) + array([ True, True, False]) + + >>> o=np.array(False) + >>> z=np.any([-1, 4, 5], out=o) + >>> z, o + (array(True), array(True)) + >>> # Check now that z is a reference to o + >>> z is o + True + >>> id(z), id(o) # identity of z and o # doctest: +SKIP + (191614240, 191614240) + + """ + return _wrapreduction_any_all(a, np.logical_or, 'any', axis, out, + keepdims=keepdims, where=where) + + +def _all_dispatcher(a, axis=None, out=None, keepdims=None, *, + where=None): + return (a, where, out) + + +@array_function_dispatch(_all_dispatcher) +def all(a, axis=None, out=None, keepdims=np._NoValue, *, where=np._NoValue): + """ + Test whether all array elements along a given axis evaluate to True. + + Parameters + ---------- + a : array_like + Input array or object that can be converted to an array. + axis : None or int or tuple of ints, optional + Axis or axes along which a logical AND reduction is performed. + The default (``axis=None``) is to perform a logical AND over all + the dimensions of the input array. `axis` may be negative, in + which case it counts from the last to the first axis. If this + is a tuple of ints, a reduction is performed on multiple + axes, instead of a single axis or all the axes as before. + out : ndarray, optional + Alternate output array in which to place the result. + It must have the same shape as the expected output and its + type is preserved (e.g., if ``dtype(out)`` is float, the result + will consist of 0.0's and 1.0's). See :ref:`ufuncs-output-type` + for more details. + + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the input array. + + If the default value is passed, then `keepdims` will not be + passed through to the `all` method of sub-classes of + `ndarray`, however any non-default value will be. If the + sub-class' method does not implement `keepdims` any + exceptions will be raised. + + where : array_like of bool, optional + Elements to include in checking for all `True` values. + See `~numpy.ufunc.reduce` for details. + + .. versionadded:: 1.20.0 + + Returns + ------- + all : ndarray, bool + A new boolean or array is returned unless `out` is specified, + in which case a reference to `out` is returned. + + See Also + -------- + ndarray.all : equivalent method + + any : Test whether any element along a given axis evaluates to True. + + Notes + ----- + Not a Number (NaN), positive infinity and negative infinity + evaluate to `True` because these are not equal to zero. + + .. versionchanged:: 2.0 + Before NumPy 2.0, ``all`` did not return booleans for object dtype + input arrays. + This behavior is still available via ``np.logical_and.reduce``. + + Examples + -------- + >>> import numpy as np + >>> np.all([[True,False],[True,True]]) + False + + >>> np.all([[True,False],[True,True]], axis=0) + array([ True, False]) + + >>> np.all([-1, 4, 5]) + True + + >>> np.all([1.0, np.nan]) + True + + >>> np.all([[True, True], [False, True]], where=[[True], [False]]) + True + + >>> o=np.array(False) + >>> z=np.all([-1, 4, 5], out=o) + >>> id(z), id(o), z + (28293632, 28293632, array(True)) # may vary + + """ + return _wrapreduction_any_all(a, np.logical_and, 'all', axis, out, + keepdims=keepdims, where=where) + + +def _cumulative_func(x, func, axis, dtype, out, include_initial): + x = np.atleast_1d(x) + x_ndim = x.ndim + if axis is None: + if x_ndim >= 2: + raise ValueError("For arrays which have more than one dimension " + "``axis`` argument is required.") + axis = 0 + + if out is not None and include_initial: + item = [slice(None)] * x_ndim + item[axis] = slice(1, None) + func.accumulate(x, axis=axis, dtype=dtype, out=out[tuple(item)]) + item[axis] = 0 + out[tuple(item)] = func.identity + return out + + res = func.accumulate(x, axis=axis, dtype=dtype, out=out) + if include_initial: + initial_shape = list(x.shape) + initial_shape[axis] = 1 + res = np.concat( + [np.full_like(res, func.identity, shape=initial_shape), res], + axis=axis, + ) + + return res + + +def _cumulative_prod_dispatcher(x, /, *, axis=None, dtype=None, out=None, + include_initial=None): + return (x, out) + + +@array_function_dispatch(_cumulative_prod_dispatcher) +def cumulative_prod(x, /, *, axis=None, dtype=None, out=None, + include_initial=False): + """ + Return the cumulative product of elements along a given axis. + + This function is an Array API compatible alternative to `numpy.cumprod`. + + Parameters + ---------- + x : array_like + Input array. + axis : int, optional + Axis along which the cumulative product is computed. The default + (None) is only allowed for one-dimensional arrays. For arrays + with more than one dimension ``axis`` is required. + dtype : dtype, optional + Type of the returned array, as well as of the accumulator in which + the elements are multiplied. If ``dtype`` is not specified, it + defaults to the dtype of ``x``, unless ``x`` has an integer dtype + with a precision less than that of the default platform integer. + In that case, the default platform integer is used instead. + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output + but the type of the resulting values will be cast if necessary. + See :ref:`ufuncs-output-type` for more details. + include_initial : bool, optional + Boolean indicating whether to include the initial value (ones) as + the first value in the output. With ``include_initial=True`` + the shape of the output is different than the shape of the input. + Default: ``False``. + + Returns + ------- + cumulative_prod_along_axis : ndarray + A new array holding the result is returned unless ``out`` is + specified, in which case a reference to ``out`` is returned. The + result has the same shape as ``x`` if ``include_initial=False``. + + Notes + ----- + Arithmetic is modular when using integer types, and no error is + raised on overflow. + + Examples + -------- + >>> a = np.array([1, 2, 3]) + >>> np.cumulative_prod(a) # intermediate results 1, 1*2 + ... # total product 1*2*3 = 6 + array([1, 2, 6]) + >>> a = np.array([1, 2, 3, 4, 5, 6]) + >>> np.cumulative_prod(a, dtype=float) # specify type of output + array([ 1., 2., 6., 24., 120., 720.]) + + The cumulative product for each column (i.e., over the rows) of ``b``: + + >>> b = np.array([[1, 2, 3], [4, 5, 6]]) + >>> np.cumulative_prod(b, axis=0) + array([[ 1, 2, 3], + [ 4, 10, 18]]) + + The cumulative product for each row (i.e. over the columns) of ``b``: + + >>> np.cumulative_prod(b, axis=1) + array([[ 1, 2, 6], + [ 4, 20, 120]]) + + """ + return _cumulative_func(x, um.multiply, axis, dtype, out, include_initial) + + +def _cumulative_sum_dispatcher(x, /, *, axis=None, dtype=None, out=None, + include_initial=None): + return (x, out) + + +@array_function_dispatch(_cumulative_sum_dispatcher) +def cumulative_sum(x, /, *, axis=None, dtype=None, out=None, + include_initial=False): + """ + Return the cumulative sum of the elements along a given axis. + + This function is an Array API compatible alternative to `numpy.cumsum`. + + Parameters + ---------- + x : array_like + Input array. + axis : int, optional + Axis along which the cumulative sum is computed. The default + (None) is only allowed for one-dimensional arrays. For arrays + with more than one dimension ``axis`` is required. + dtype : dtype, optional + Type of the returned array and of the accumulator in which the + elements are summed. If ``dtype`` is not specified, it defaults + to the dtype of ``x``, unless ``x`` has an integer dtype with + a precision less than that of the default platform integer. + In that case, the default platform integer is used. + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output + but the type will be cast if necessary. See :ref:`ufuncs-output-type` + for more details. + include_initial : bool, optional + Boolean indicating whether to include the initial value (zeros) as + the first value in the output. With ``include_initial=True`` + the shape of the output is different than the shape of the input. + Default: ``False``. + + Returns + ------- + cumulative_sum_along_axis : ndarray + A new array holding the result is returned unless ``out`` is + specified, in which case a reference to ``out`` is returned. The + result has the same shape as ``x`` if ``include_initial=False``. + + See Also + -------- + sum : Sum array elements. + trapezoid : Integration of array values using composite trapezoidal rule. + diff : Calculate the n-th discrete difference along given axis. + + Notes + ----- + Arithmetic is modular when using integer types, and no error is + raised on overflow. + + ``cumulative_sum(a)[-1]`` may not be equal to ``sum(a)`` for + floating-point values since ``sum`` may use a pairwise summation routine, + reducing the roundoff-error. See `sum` for more information. + + Examples + -------- + >>> a = np.array([1, 2, 3, 4, 5, 6]) + >>> a + array([1, 2, 3, 4, 5, 6]) + >>> np.cumulative_sum(a) + array([ 1, 3, 6, 10, 15, 21]) + >>> np.cumulative_sum(a, dtype=float) # specifies type of output value(s) + array([ 1., 3., 6., 10., 15., 21.]) + + >>> b = np.array([[1, 2, 3], [4, 5, 6]]) + >>> np.cumulative_sum(b,axis=0) # sum over rows for each of the 3 columns + array([[1, 2, 3], + [5, 7, 9]]) + >>> np.cumulative_sum(b,axis=1) # sum over columns for each of the 2 rows + array([[ 1, 3, 6], + [ 4, 9, 15]]) + + ``cumulative_sum(c)[-1]`` may not be equal to ``sum(c)`` + + >>> c = np.array([1, 2e-9, 3e-9] * 1000000) + >>> np.cumulative_sum(c)[-1] + 1000000.0050045159 + >>> c.sum() + 1000000.0050000029 + + """ + return _cumulative_func(x, um.add, axis, dtype, out, include_initial) + + +def _cumsum_dispatcher(a, axis=None, dtype=None, out=None): + return (a, out) + + +@array_function_dispatch(_cumsum_dispatcher) +def cumsum(a, axis=None, dtype=None, out=None): + """ + Return the cumulative sum of the elements along a given axis. + + Parameters + ---------- + a : array_like + Input array. + axis : int, optional + Axis along which the cumulative sum is computed. The default + (None) is to compute the cumsum over the flattened array. + dtype : dtype, optional + Type of the returned array and of the accumulator in which the + elements are summed. If `dtype` is not specified, it defaults + to the dtype of `a`, unless `a` has an integer dtype with a + precision less than that of the default platform integer. In + that case, the default platform integer is used. + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output + but the type will be cast if necessary. See :ref:`ufuncs-output-type` + for more details. + + Returns + ------- + cumsum_along_axis : ndarray. + A new array holding the result is returned unless `out` is + specified, in which case a reference to `out` is returned. The + result has the same size as `a`, and the same shape as `a` if + `axis` is not None or `a` is a 1-d array. + + See Also + -------- + cumulative_sum : Array API compatible alternative for ``cumsum``. + sum : Sum array elements. + trapezoid : Integration of array values using composite trapezoidal rule. + diff : Calculate the n-th discrete difference along given axis. + + Notes + ----- + Arithmetic is modular when using integer types, and no error is + raised on overflow. + + ``cumsum(a)[-1]`` may not be equal to ``sum(a)`` for floating-point + values since ``sum`` may use a pairwise summation routine, reducing + the roundoff-error. See `sum` for more information. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[1,2,3], [4,5,6]]) + >>> a + array([[1, 2, 3], + [4, 5, 6]]) + >>> np.cumsum(a) + array([ 1, 3, 6, 10, 15, 21]) + >>> np.cumsum(a, dtype=float) # specifies type of output value(s) + array([ 1., 3., 6., 10., 15., 21.]) + + >>> np.cumsum(a,axis=0) # sum over rows for each of the 3 columns + array([[1, 2, 3], + [5, 7, 9]]) + >>> np.cumsum(a,axis=1) # sum over columns for each of the 2 rows + array([[ 1, 3, 6], + [ 4, 9, 15]]) + + ``cumsum(b)[-1]`` may not be equal to ``sum(b)`` + + >>> b = np.array([1, 2e-9, 3e-9] * 1000000) + >>> b.cumsum()[-1] + 1000000.0050045159 + >>> b.sum() + 1000000.0050000029 + + """ + return _wrapfunc(a, 'cumsum', axis=axis, dtype=dtype, out=out) + + +def _ptp_dispatcher(a, axis=None, out=None, keepdims=None): + return (a, out) + + +@array_function_dispatch(_ptp_dispatcher) +def ptp(a, axis=None, out=None, keepdims=np._NoValue): + """ + Range of values (maximum - minimum) along an axis. + + The name of the function comes from the acronym for 'peak to peak'. + + .. warning:: + `ptp` preserves the data type of the array. This means the + return value for an input of signed integers with n bits + (e.g. `numpy.int8`, `numpy.int16`, etc) is also a signed integer + with n bits. In that case, peak-to-peak values greater than + ``2**(n-1)-1`` will be returned as negative values. An example + with a work-around is shown below. + + Parameters + ---------- + a : array_like + Input values. + axis : None or int or tuple of ints, optional + Axis along which to find the peaks. By default, flatten the + array. `axis` may be negative, in + which case it counts from the last to the first axis. + If this is a tuple of ints, a reduction is performed on multiple + axes, instead of a single axis or all the axes as before. + out : array_like + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output, + but the type of the output values will be cast if necessary. + + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the input array. + + If the default value is passed, then `keepdims` will not be + passed through to the `ptp` method of sub-classes of + `ndarray`, however any non-default value will be. If the + sub-class' method does not implement `keepdims` any + exceptions will be raised. + + Returns + ------- + ptp : ndarray or scalar + The range of a given array - `scalar` if array is one-dimensional + or a new array holding the result along the given axis + + Examples + -------- + >>> import numpy as np + >>> x = np.array([[4, 9, 2, 10], + ... [6, 9, 7, 12]]) + + >>> np.ptp(x, axis=1) + array([8, 6]) + + >>> np.ptp(x, axis=0) + array([2, 0, 5, 2]) + + >>> np.ptp(x) + 10 + + This example shows that a negative value can be returned when + the input is an array of signed integers. + + >>> y = np.array([[1, 127], + ... [0, 127], + ... [-1, 127], + ... [-2, 127]], dtype=np.int8) + >>> np.ptp(y, axis=1) + array([ 126, 127, -128, -127], dtype=int8) + + A work-around is to use the `view()` method to view the result as + unsigned integers with the same bit width: + + >>> np.ptp(y, axis=1).view(np.uint8) + array([126, 127, 128, 129], dtype=uint8) + + """ + kwargs = {} + if keepdims is not np._NoValue: + kwargs['keepdims'] = keepdims + return _methods._ptp(a, axis=axis, out=out, **kwargs) + + +def _max_dispatcher(a, axis=None, out=None, keepdims=None, initial=None, + where=None): + return (a, out) + + +@array_function_dispatch(_max_dispatcher) +@set_module('numpy') +def max(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, + where=np._NoValue): + """ + Return the maximum of an array or maximum along an axis. + + Parameters + ---------- + a : array_like + Input data. + axis : None or int or tuple of ints, optional + Axis or axes along which to operate. By default, flattened input is + used. If this is a tuple of ints, the maximum is selected over + multiple axes, instead of a single axis or all the axes as before. + + out : ndarray, optional + Alternative output array in which to place the result. Must + be of the same shape and buffer length as the expected output. + See :ref:`ufuncs-output-type` for more details. + + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the input array. + + If the default value is passed, then `keepdims` will not be + passed through to the ``max`` method of sub-classes of + `ndarray`, however any non-default value will be. If the + sub-class' method does not implement `keepdims` any + exceptions will be raised. + + initial : scalar, optional + The minimum value of an output element. Must be present to allow + computation on empty slice. See `~numpy.ufunc.reduce` for details. + + where : array_like of bool, optional + Elements to compare for the maximum. See `~numpy.ufunc.reduce` + for details. + + Returns + ------- + max : ndarray or scalar + Maximum of `a`. If `axis` is None, the result is a scalar value. + If `axis` is an int, the result is an array of dimension + ``a.ndim - 1``. If `axis` is a tuple, the result is an array of + dimension ``a.ndim - len(axis)``. + + See Also + -------- + amin : + The minimum value of an array along a given axis, propagating any NaNs. + nanmax : + The maximum value of an array along a given axis, ignoring any NaNs. + maximum : + Element-wise maximum of two arrays, propagating any NaNs. + fmax : + Element-wise maximum of two arrays, ignoring any NaNs. + argmax : + Return the indices of the maximum values. + + nanmin, minimum, fmin + + Notes + ----- + NaN values are propagated, that is if at least one item is NaN, the + corresponding max value will be NaN as well. To ignore NaN values + (MATLAB behavior), please use nanmax. + + Don't use `~numpy.max` for element-wise comparison of 2 arrays; when + ``a.shape[0]`` is 2, ``maximum(a[0], a[1])`` is faster than + ``max(a, axis=0)``. + + Examples + -------- + >>> import numpy as np + >>> a = np.arange(4).reshape((2,2)) + >>> a + array([[0, 1], + [2, 3]]) + >>> np.max(a) # Maximum of the flattened array + 3 + >>> np.max(a, axis=0) # Maxima along the first axis + array([2, 3]) + >>> np.max(a, axis=1) # Maxima along the second axis + array([1, 3]) + >>> np.max(a, where=[False, True], initial=-1, axis=0) + array([-1, 3]) + >>> b = np.arange(5, dtype=float) + >>> b[2] = np.nan + >>> np.max(b) + np.float64(nan) + >>> np.max(b, where=~np.isnan(b), initial=-1) + 4.0 + >>> np.nanmax(b) + 4.0 + + You can use an initial value to compute the maximum of an empty slice, or + to initialize it to a different value: + + >>> np.max([[-50], [10]], axis=-1, initial=0) + array([ 0, 10]) + + Notice that the initial value is used as one of the elements for which the + maximum is determined, unlike for the default argument Python's max + function, which is only used for empty iterables. + + >>> np.max([5], initial=6) + 6 + >>> max([5], default=6) + 5 + """ + return _wrapreduction(a, np.maximum, 'max', axis, None, out, + keepdims=keepdims, initial=initial, where=where) + + +@array_function_dispatch(_max_dispatcher) +def amax(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, + where=np._NoValue): + """ + Return the maximum of an array or maximum along an axis. + + `amax` is an alias of `~numpy.max`. + + See Also + -------- + max : alias of this function + ndarray.max : equivalent method + """ + return _wrapreduction(a, np.maximum, 'max', axis, None, out, + keepdims=keepdims, initial=initial, where=where) + + +def _min_dispatcher(a, axis=None, out=None, keepdims=None, initial=None, + where=None): + return (a, out) + + +@array_function_dispatch(_min_dispatcher) +def min(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, + where=np._NoValue): + """ + Return the minimum of an array or minimum along an axis. + + Parameters + ---------- + a : array_like + Input data. + axis : None or int or tuple of ints, optional + Axis or axes along which to operate. By default, flattened input is + used. + + If this is a tuple of ints, the minimum is selected over multiple axes, + instead of a single axis or all the axes as before. + out : ndarray, optional + Alternative output array in which to place the result. Must + be of the same shape and buffer length as the expected output. + See :ref:`ufuncs-output-type` for more details. + + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the input array. + + If the default value is passed, then `keepdims` will not be + passed through to the ``min`` method of sub-classes of + `ndarray`, however any non-default value will be. If the + sub-class' method does not implement `keepdims` any + exceptions will be raised. + + initial : scalar, optional + The maximum value of an output element. Must be present to allow + computation on empty slice. See `~numpy.ufunc.reduce` for details. + + where : array_like of bool, optional + Elements to compare for the minimum. See `~numpy.ufunc.reduce` + for details. + + Returns + ------- + min : ndarray or scalar + Minimum of `a`. If `axis` is None, the result is a scalar value. + If `axis` is an int, the result is an array of dimension + ``a.ndim - 1``. If `axis` is a tuple, the result is an array of + dimension ``a.ndim - len(axis)``. + + See Also + -------- + amax : + The maximum value of an array along a given axis, propagating any NaNs. + nanmin : + The minimum value of an array along a given axis, ignoring any NaNs. + minimum : + Element-wise minimum of two arrays, propagating any NaNs. + fmin : + Element-wise minimum of two arrays, ignoring any NaNs. + argmin : + Return the indices of the minimum values. + + nanmax, maximum, fmax + + Notes + ----- + NaN values are propagated, that is if at least one item is NaN, the + corresponding min value will be NaN as well. To ignore NaN values + (MATLAB behavior), please use nanmin. + + Don't use `~numpy.min` for element-wise comparison of 2 arrays; when + ``a.shape[0]`` is 2, ``minimum(a[0], a[1])`` is faster than + ``min(a, axis=0)``. + + Examples + -------- + >>> import numpy as np + >>> a = np.arange(4).reshape((2,2)) + >>> a + array([[0, 1], + [2, 3]]) + >>> np.min(a) # Minimum of the flattened array + 0 + >>> np.min(a, axis=0) # Minima along the first axis + array([0, 1]) + >>> np.min(a, axis=1) # Minima along the second axis + array([0, 2]) + >>> np.min(a, where=[False, True], initial=10, axis=0) + array([10, 1]) + + >>> b = np.arange(5, dtype=float) + >>> b[2] = np.nan + >>> np.min(b) + np.float64(nan) + >>> np.min(b, where=~np.isnan(b), initial=10) + 0.0 + >>> np.nanmin(b) + 0.0 + + >>> np.min([[-50], [10]], axis=-1, initial=0) + array([-50, 0]) + + Notice that the initial value is used as one of the elements for which the + minimum is determined, unlike for the default argument Python's max + function, which is only used for empty iterables. + + Notice that this isn't the same as Python's ``default`` argument. + + >>> np.min([6], initial=5) + 5 + >>> min([6], default=5) + 6 + """ + return _wrapreduction(a, np.minimum, 'min', axis, None, out, + keepdims=keepdims, initial=initial, where=where) + + +@array_function_dispatch(_min_dispatcher) +def amin(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, + where=np._NoValue): + """ + Return the minimum of an array or minimum along an axis. + + `amin` is an alias of `~numpy.min`. + + See Also + -------- + min : alias of this function + ndarray.min : equivalent method + """ + return _wrapreduction(a, np.minimum, 'min', axis, None, out, + keepdims=keepdims, initial=initial, where=where) + + +def _prod_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None, + initial=None, where=None): + return (a, out) + + +@array_function_dispatch(_prod_dispatcher) +def prod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, + initial=np._NoValue, where=np._NoValue): + """ + Return the product of array elements over a given axis. + + Parameters + ---------- + a : array_like + Input data. + axis : None or int or tuple of ints, optional + Axis or axes along which a product is performed. The default, + axis=None, will calculate the product of all the elements in the + input array. If axis is negative it counts from the last to the + first axis. + + If axis is a tuple of ints, a product is performed on all of the + axes specified in the tuple instead of a single axis or all the + axes as before. + dtype : dtype, optional + The type of the returned array, as well as of the accumulator in + which the elements are multiplied. The dtype of `a` is used by + default unless `a` has an integer dtype of less precision than the + default platform integer. In that case, if `a` is signed then the + platform integer is used while if `a` is unsigned then an unsigned + integer of the same precision as the platform integer is used. + out : ndarray, optional + Alternative output array in which to place the result. It must have + the same shape as the expected output, but the type of the output + values will be cast if necessary. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left in the + result as dimensions with size one. With this option, the result + will broadcast correctly against the input array. + + If the default value is passed, then `keepdims` will not be + passed through to the `prod` method of sub-classes of + `ndarray`, however any non-default value will be. If the + sub-class' method does not implement `keepdims` any + exceptions will be raised. + initial : scalar, optional + The starting value for this product. See `~numpy.ufunc.reduce` + for details. + where : array_like of bool, optional + Elements to include in the product. See `~numpy.ufunc.reduce` + for details. + + Returns + ------- + product_along_axis : ndarray, see `dtype` parameter above. + An array shaped as `a` but with the specified axis removed. + Returns a reference to `out` if specified. + + See Also + -------- + ndarray.prod : equivalent method + :ref:`ufuncs-output-type` + + Notes + ----- + Arithmetic is modular when using integer types, and no error is + raised on overflow. That means that, on a 32-bit platform: + + >>> x = np.array([536870910, 536870910, 536870910, 536870910]) + >>> np.prod(x) + 16 # may vary + + The product of an empty array is the neutral element 1: + + >>> np.prod([]) + 1.0 + + Examples + -------- + By default, calculate the product of all elements: + + >>> import numpy as np + >>> np.prod([1.,2.]) + 2.0 + + Even when the input array is two-dimensional: + + >>> a = np.array([[1., 2.], [3., 4.]]) + >>> np.prod(a) + 24.0 + + But we can also specify the axis over which to multiply: + + >>> np.prod(a, axis=1) + array([ 2., 12.]) + >>> np.prod(a, axis=0) + array([3., 8.]) + + Or select specific elements to include: + + >>> np.prod([1., np.nan, 3.], where=[True, False, True]) + 3.0 + + If the type of `x` is unsigned, then the output type is + the unsigned platform integer: + + >>> x = np.array([1, 2, 3], dtype=np.uint8) + >>> np.prod(x).dtype == np.uint + True + + If `x` is of a signed integer type, then the output type + is the default platform integer: + + >>> x = np.array([1, 2, 3], dtype=np.int8) + >>> np.prod(x).dtype == int + True + + You can also start the product with a value other than one: + + >>> np.prod([1, 2], initial=5) + 10 + """ + return _wrapreduction(a, np.multiply, 'prod', axis, dtype, out, + keepdims=keepdims, initial=initial, where=where) + + +def _cumprod_dispatcher(a, axis=None, dtype=None, out=None): + return (a, out) + + +@array_function_dispatch(_cumprod_dispatcher) +def cumprod(a, axis=None, dtype=None, out=None): + """ + Return the cumulative product of elements along a given axis. + + Parameters + ---------- + a : array_like + Input array. + axis : int, optional + Axis along which the cumulative product is computed. By default + the input is flattened. + dtype : dtype, optional + Type of the returned array, as well as of the accumulator in which + the elements are multiplied. If *dtype* is not specified, it + defaults to the dtype of `a`, unless `a` has an integer dtype with + a precision less than that of the default platform integer. In + that case, the default platform integer is used instead. + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output + but the type of the resulting values will be cast if necessary. + + Returns + ------- + cumprod : ndarray + A new array holding the result is returned unless `out` is + specified, in which case a reference to out is returned. + + See Also + -------- + cumulative_prod : Array API compatible alternative for ``cumprod``. + :ref:`ufuncs-output-type` + + Notes + ----- + Arithmetic is modular when using integer types, and no error is + raised on overflow. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([1,2,3]) + >>> np.cumprod(a) # intermediate results 1, 1*2 + ... # total product 1*2*3 = 6 + array([1, 2, 6]) + >>> a = np.array([[1, 2, 3], [4, 5, 6]]) + >>> np.cumprod(a, dtype=float) # specify type of output + array([ 1., 2., 6., 24., 120., 720.]) + + The cumulative product for each column (i.e., over the rows) of `a`: + + >>> np.cumprod(a, axis=0) + array([[ 1, 2, 3], + [ 4, 10, 18]]) + + The cumulative product for each row (i.e. over the columns) of `a`: + + >>> np.cumprod(a,axis=1) + array([[ 1, 2, 6], + [ 4, 20, 120]]) + + """ + return _wrapfunc(a, 'cumprod', axis=axis, dtype=dtype, out=out) + + +def _ndim_dispatcher(a): + return (a,) + + +@array_function_dispatch(_ndim_dispatcher) +def ndim(a): + """ + Return the number of dimensions of an array. + + Parameters + ---------- + a : array_like + Input array. If it is not already an ndarray, a conversion is + attempted. + + Returns + ------- + number_of_dimensions : int + The number of dimensions in `a`. Scalars are zero-dimensional. + + See Also + -------- + ndarray.ndim : equivalent method + shape : dimensions of array + ndarray.shape : dimensions of array + + Examples + -------- + >>> import numpy as np + >>> np.ndim([[1,2,3],[4,5,6]]) + 2 + >>> np.ndim(np.array([[1,2,3],[4,5,6]])) + 2 + >>> np.ndim(1) + 0 + + """ + try: + return a.ndim + except AttributeError: + return asarray(a).ndim + + +def _size_dispatcher(a, axis=None): + return (a,) + + +@array_function_dispatch(_size_dispatcher) +def size(a, axis=None): + """ + Return the number of elements along a given axis. + + Parameters + ---------- + a : array_like + Input data. + axis : None or int or tuple of ints, optional + Axis or axes along which the elements are counted. By default, give + the total number of elements. + + .. versionchanged:: 2.4 + Extended to accept multiple axes. + + Returns + ------- + element_count : int + Number of elements along the specified axis. + + See Also + -------- + shape : dimensions of array + ndarray.shape : dimensions of array + ndarray.size : number of elements in array + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[1,2,3],[4,5,6]]) + >>> np.size(a) + 6 + >>> np.size(a,axis=1) + 3 + >>> np.size(a,axis=0) + 2 + >>> np.size(a,axis=(0,1)) + 6 + + """ + if axis is None: + try: + return a.size + except AttributeError: + return asarray(a).size + else: + _shape = shape(a) + from .numeric import normalize_axis_tuple + axis = normalize_axis_tuple(axis, len(_shape), allow_duplicate=False) + return math.prod(_shape[ax] for ax in axis) + + +def _round_dispatcher(a, decimals=None, out=None): + return (a, out) + + +@array_function_dispatch(_round_dispatcher) +def round(a, decimals=0, out=None): + """ + Evenly round to the given number of decimals. + + Parameters + ---------- + a : array_like + Input data. + decimals : int, optional + Number of decimal places to round to (default: 0). If + decimals is negative, it specifies the number of positions to + the left of the decimal point. + out : ndarray, optional + Alternative output array in which to place the result. It must have + the same shape as the expected output, but the type of the output + values will be cast if necessary. See :ref:`ufuncs-output-type` + for more details. + + Returns + ------- + rounded_array : ndarray + An array of the same type as `a`, containing the rounded values. + Unless `out` was specified, a new array is created. A reference to + the result is returned. + + The real and imaginary parts of complex numbers are rounded + separately. The result of rounding a float is a float. + + See Also + -------- + ndarray.round : equivalent method + around : an alias for this function + ceil, fix, floor, rint, trunc + + + Notes + ----- + For values exactly halfway between rounded decimal values, NumPy + rounds to the nearest even value. Thus 1.5 and 2.5 round to 2.0, + -0.5 and 0.5 round to 0.0, etc. + + ``np.round`` uses a fast but sometimes inexact algorithm to round + floating-point datatypes. For positive `decimals` it is equivalent to + ``np.true_divide(np.rint(a * 10**decimals), 10**decimals)``, which has + error due to the inexact representation of decimal fractions in the IEEE + floating point standard [1]_ and errors introduced when scaling by powers + of ten. For instance, note the extra "1" in the following: + + >>> np.round(56294995342131.5, 3) + 56294995342131.51 + + If your goal is to print such values with a fixed number of decimals, it is + preferable to use numpy's float printing routines to limit the number of + printed decimals: + + >>> np.format_float_positional(56294995342131.5, precision=3) + '56294995342131.5' + + The float printing routines use an accurate but much more computationally + demanding algorithm to compute the number of digits after the decimal + point. + + Alternatively, Python's builtin `round` function uses a more accurate + but slower algorithm for 64-bit floating point values: + + >>> round(56294995342131.5, 3) + 56294995342131.5 + >>> np.round(16.055, 2), round(16.055, 2) # equals 16.0549999999999997 + (16.06, 16.05) + + + References + ---------- + .. [1] "Lecture Notes on the Status of IEEE 754", William Kahan, + https://people.eecs.berkeley.edu/~wkahan/ieee754status/IEEE754.PDF + + Examples + -------- + >>> import numpy as np + >>> np.round([0.37, 1.64]) + array([0., 2.]) + >>> np.round([0.37, 1.64], decimals=1) + array([0.4, 1.6]) + >>> np.round([.5, 1.5, 2.5, 3.5, 4.5]) # rounds to nearest even value + array([0., 2., 2., 4., 4.]) + >>> np.round([1,2,3,11], decimals=1) # ndarray of ints is returned + array([ 1, 2, 3, 11]) + >>> np.round([1,2,3,11], decimals=-1) + array([ 0, 0, 0, 10]) + + """ + return _wrapfunc(a, 'round', decimals=decimals, out=out) + + +@array_function_dispatch(_round_dispatcher) +def around(a, decimals=0, out=None): + """ + Round an array to the given number of decimals. + + `around` is an alias of `~numpy.round`. + + See Also + -------- + ndarray.round : equivalent method + round : alias for this function + ceil, fix, floor, rint, trunc + + """ + return _wrapfunc(a, 'round', decimals=decimals, out=out) + + +def _mean_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None, *, + where=None): + return (a, where, out) + + +@array_function_dispatch(_mean_dispatcher) +def mean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, *, + where=np._NoValue): + """ + Compute the arithmetic mean along the specified axis. + + Returns the average of the array elements. The average is taken over + the flattened array by default, otherwise over the specified axis. + `float64` intermediate and return values are used for integer inputs. + + Parameters + ---------- + a : array_like + Array containing numbers whose mean is desired. If `a` is not an + array, a conversion is attempted. + axis : None or int or tuple of ints, optional + Axis or axes along which the means are computed. The default is to + compute the mean of the flattened array. + + If this is a tuple of ints, a mean is performed over multiple axes, + instead of a single axis or all the axes as before. + dtype : data-type, optional + Type to use in computing the mean. For integer inputs, the default + is `float64`; for floating point inputs, it is the same as the + input dtype. + out : ndarray, optional + Alternate output array in which to place the result. The default + is ``None``; if provided, it must have the same shape as the + expected output, but the type will be cast if necessary. + See :ref:`ufuncs-output-type` for more details. + See :ref:`ufuncs-output-type` for more details. + + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the input array. + + If the default value is passed, then `keepdims` will not be + passed through to the `mean` method of sub-classes of + `ndarray`, however any non-default value will be. If the + sub-class' method does not implement `keepdims` any + exceptions will be raised. + + where : array_like of bool, optional + Elements to include in the mean. See `~numpy.ufunc.reduce` for details. + + .. versionadded:: 1.20.0 + + Returns + ------- + m : ndarray, see dtype parameter above + If `out=None`, returns a new array containing the mean values, + otherwise a reference to the output array is returned. + + See Also + -------- + average : Weighted average + std, var, nanmean, nanstd, nanvar + + Notes + ----- + The arithmetic mean is the sum of the elements along the axis divided + by the number of elements. + + Note that for floating-point input, the mean is computed using the + same precision the input has. Depending on the input data, this can + cause the results to be inaccurate, especially for `float32` (see + example below). Specifying a higher-precision accumulator using the + `dtype` keyword can alleviate this issue. + + By default, `float16` results are computed using `float32` intermediates + for extra precision. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[1, 2], [3, 4]]) + >>> np.mean(a) + 2.5 + >>> np.mean(a, axis=0) + array([2., 3.]) + >>> np.mean(a, axis=1) + array([1.5, 3.5]) + + In single precision, `mean` can be inaccurate: + + >>> a = np.zeros((2, 512*512), dtype=np.float32) + >>> a[0, :] = 1.0 + >>> a[1, :] = 0.1 + >>> np.mean(a) + np.float32(0.54999924) + + Computing the mean in float64 is more accurate: + + >>> np.mean(a, dtype=np.float64) + 0.55000000074505806 # may vary + + Computing the mean in timedelta64 is available: + + >>> b = np.array([1, 3], dtype="timedelta64[D]") + >>> np.mean(b) + np.timedelta64(2,'D') + + Specifying a where argument: + + >>> a = np.array([[5, 9, 13], [14, 10, 12], [11, 15, 19]]) + >>> np.mean(a) + 12.0 + >>> np.mean(a, where=[[True], [False], [False]]) + 9.0 + + """ + kwargs = {} + if keepdims is not np._NoValue: + kwargs['keepdims'] = keepdims + if where is not np._NoValue: + kwargs['where'] = where + if type(a) is not mu.ndarray: + try: + mean = a.mean + except AttributeError: + pass + else: + return mean(axis=axis, dtype=dtype, out=out, **kwargs) + + return _methods._mean(a, axis=axis, dtype=dtype, + out=out, **kwargs) + + +def _std_dispatcher(a, axis=None, dtype=None, out=None, ddof=None, + keepdims=None, *, where=None, mean=None, correction=None): + return (a, where, out, mean) + + +@array_function_dispatch(_std_dispatcher) +def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, *, + where=np._NoValue, mean=np._NoValue, correction=np._NoValue): + r""" + Compute the standard deviation along the specified axis. + + Returns the standard deviation, a measure of the spread of a distribution, + of the array elements. The standard deviation is computed for the + flattened array by default, otherwise over the specified axis. + + Parameters + ---------- + a : array_like + Calculate the standard deviation of these values. + axis : None or int or tuple of ints, optional + Axis or axes along which the standard deviation is computed. The + default is to compute the standard deviation of the flattened array. + If this is a tuple of ints, a standard deviation is performed over + multiple axes, instead of a single axis or all the axes as before. + dtype : dtype, optional + Type to use in computing the standard deviation. For arrays of + integer type the default is float64, for arrays of float types it is + the same as the array type. + out : ndarray, optional + Alternative output array in which to place the result. It must have + the same shape as the expected output but the type (of the calculated + values) will be cast if necessary. + See :ref:`ufuncs-output-type` for more details. + ddof : {int, float}, optional + Means Delta Degrees of Freedom. The divisor used in calculations + is ``N - ddof``, where ``N`` represents the number of elements. + By default `ddof` is zero. See Notes for details about use of `ddof`. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the input array. + + If the default value is passed, then `keepdims` will not be + passed through to the `std` method of sub-classes of + `ndarray`, however any non-default value will be. If the + sub-class' method does not implement `keepdims` any + exceptions will be raised. + where : array_like of bool, optional + Elements to include in the standard deviation. + See `~numpy.ufunc.reduce` for details. + + .. versionadded:: 1.20.0 + + mean : array_like, optional + Provide the mean to prevent its recalculation. The mean should have + a shape as if it was calculated with ``keepdims=True``. + The axis for the calculation of the mean should be the same as used in + the call to this std function. + + .. versionadded:: 2.0.0 + + correction : {int, float}, optional + Array API compatible name for the ``ddof`` parameter. Only one of them + can be provided at the same time. + + .. versionadded:: 2.0.0 + + Returns + ------- + standard_deviation : ndarray, see dtype parameter above. + If `out` is None, return a new array containing the standard deviation, + otherwise return a reference to the output array. + + See Also + -------- + var, mean, nanmean, nanstd, nanvar + :ref:`ufuncs-output-type` + + Notes + ----- + There are several common variants of the array standard deviation + calculation. Assuming the input `a` is a one-dimensional NumPy array + and ``mean`` is either provided as an argument or computed as + ``a.mean()``, NumPy computes the standard deviation of an array as:: + + N = len(a) + d2 = abs(a - mean)**2 # abs is for complex `a` + var = d2.sum() / (N - ddof) # note use of `ddof` + std = var**0.5 + + Different values of the argument `ddof` are useful in different + contexts. NumPy's default ``ddof=0`` corresponds with the expression: + + .. math:: + + \sqrt{\frac{\sum_i{|a_i - \bar{a}|^2 }}{N}} + + which is sometimes called the "population standard deviation" in the field + of statistics because it applies the definition of standard deviation to + `a` as if `a` were a complete population of possible observations. + + Many other libraries define the standard deviation of an array + differently, e.g.: + + .. math:: + + \sqrt{\frac{\sum_i{|a_i - \bar{a}|^2 }}{N - 1}} + + In statistics, the resulting quantity is sometimes called the "sample + standard deviation" because if `a` is a random sample from a larger + population, this calculation provides the square root of an unbiased + estimate of the variance of the population. The use of :math:`N-1` in the + denominator is often called "Bessel's correction" because it corrects for + bias (toward lower values) in the variance estimate introduced when the + sample mean of `a` is used in place of the true mean of the population. + The resulting estimate of the standard deviation is still biased, but less + than it would have been without the correction. For this quantity, use + ``ddof=1``. + + Note that, for complex numbers, `std` takes the absolute + value before squaring, so that the result is always real and nonnegative. + + For floating-point input, the standard deviation is computed using the same + precision the input has. Depending on the input data, this can cause + the results to be inaccurate, especially for float32 (see example below). + Specifying a higher-accuracy accumulator using the `dtype` keyword can + alleviate this issue. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[1, 2], [3, 4]]) + >>> np.std(a) + 1.1180339887498949 # may vary + >>> np.std(a, axis=0) + array([1., 1.]) + >>> np.std(a, axis=1) + array([0.5, 0.5]) + + In single precision, std() can be inaccurate: + + >>> a = np.zeros((2, 512*512), dtype=np.float32) + >>> a[0, :] = 1.0 + >>> a[1, :] = 0.1 + >>> np.std(a) + np.float32(0.45000005) + + Computing the standard deviation in float64 is more accurate: + + >>> np.std(a, dtype=np.float64) + 0.44999999925494177 # may vary + + Specifying a where argument: + + >>> a = np.array([[14, 8, 11, 10], [7, 9, 10, 11], [10, 15, 5, 10]]) + >>> np.std(a) + 2.614064523559687 # may vary + >>> np.std(a, where=[[True], [True], [False]]) + 2.0 + + Using the mean keyword to save computation time: + + >>> import numpy as np + >>> from timeit import timeit + >>> a = np.array([[14, 8, 11, 10], [7, 9, 10, 11], [10, 15, 5, 10]]) + >>> mean = np.mean(a, axis=1, keepdims=True) + >>> + >>> g = globals() + >>> n = 10000 + >>> t1 = timeit("std = np.std(a, axis=1, mean=mean)", globals=g, number=n) + >>> t2 = timeit("std = np.std(a, axis=1)", globals=g, number=n) + >>> print(f'Percentage execution time saved {100*(t2-t1)/t2:.0f}%') + #doctest: +SKIP + Percentage execution time saved 30% + + """ + kwargs = {} + if keepdims is not np._NoValue: + kwargs['keepdims'] = keepdims + if where is not np._NoValue: + kwargs['where'] = where + if mean is not np._NoValue: + kwargs['mean'] = mean + + if correction != np._NoValue: + if ddof != 0: + raise ValueError( + "ddof and correction can't be provided simultaneously." + ) + else: + ddof = correction + + if type(a) is not mu.ndarray: + try: + std = a.std + except AttributeError: + pass + else: + return std(axis=axis, dtype=dtype, out=out, ddof=ddof, **kwargs) + + return _methods._std(a, axis=axis, dtype=dtype, out=out, ddof=ddof, + **kwargs) + + +def _var_dispatcher(a, axis=None, dtype=None, out=None, ddof=None, + keepdims=None, *, where=None, mean=None, correction=None): + return (a, where, out, mean) + + +@array_function_dispatch(_var_dispatcher) +def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, *, + where=np._NoValue, mean=np._NoValue, correction=np._NoValue): + r""" + Compute the variance along the specified axis. + + Returns the variance of the array elements, a measure of the spread of a + distribution. The variance is computed for the flattened array by + default, otherwise over the specified axis. + + Parameters + ---------- + a : array_like + Array containing numbers whose variance is desired. If `a` is not an + array, a conversion is attempted. + axis : None or int or tuple of ints, optional + Axis or axes along which the variance is computed. The default is to + compute the variance of the flattened array. + If this is a tuple of ints, a variance is performed over multiple axes, + instead of a single axis or all the axes as before. + dtype : data-type, optional + Type to use in computing the variance. For arrays of integer type + the default is `float64`; for arrays of float types it is the same as + the array type. + out : ndarray, optional + Alternate output array in which to place the result. It must have + the same shape as the expected output, but the type is cast if + necessary. + ddof : {int, float}, optional + "Delta Degrees of Freedom": the divisor used in the calculation is + ``N - ddof``, where ``N`` represents the number of elements. By + default `ddof` is zero. See notes for details about use of `ddof`. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the input array. + + If the default value is passed, then `keepdims` will not be + passed through to the `var` method of sub-classes of + `ndarray`, however any non-default value will be. If the + sub-class' method does not implement `keepdims` any + exceptions will be raised. + where : array_like of bool, optional + Elements to include in the variance. See `~numpy.ufunc.reduce` for + details. + + .. versionadded:: 1.20.0 + + mean : array like, optional + Provide the mean to prevent its recalculation. The mean should have + a shape as if it was calculated with ``keepdims=True``. + The axis for the calculation of the mean should be the same as used in + the call to this var function. + + .. versionadded:: 2.0.0 + + correction : {int, float}, optional + Array API compatible name for the ``ddof`` parameter. Only one of them + can be provided at the same time. + + .. versionadded:: 2.0.0 + + Returns + ------- + variance : ndarray, see dtype parameter above + If ``out=None``, returns a new array containing the variance; + otherwise, a reference to the output array is returned. + + See Also + -------- + std, mean, nanmean, nanstd, nanvar + :ref:`ufuncs-output-type` + + Notes + ----- + There are several common variants of the array variance calculation. + Assuming the input `a` is a one-dimensional NumPy array and ``mean`` is + either provided as an argument or computed as ``a.mean()``, NumPy + computes the variance of an array as:: + + N = len(a) + d2 = abs(a - mean)**2 # abs is for complex `a` + var = d2.sum() / (N - ddof) # note use of `ddof` + + Different values of the argument `ddof` are useful in different + contexts. NumPy's default ``ddof=0`` corresponds with the expression: + + .. math:: + + \frac{\sum_i{|a_i - \bar{a}|^2 }}{N} + + which is sometimes called the "population variance" in the field of + statistics because it applies the definition of variance to `a` as if `a` + were a complete population of possible observations. + + Many other libraries define the variance of an array differently, e.g.: + + .. math:: + + \frac{\sum_i{|a_i - \bar{a}|^2}}{N - 1} + + In statistics, the resulting quantity is sometimes called the "sample + variance" because if `a` is a random sample from a larger population, + this calculation provides an unbiased estimate of the variance of the + population. The use of :math:`N-1` in the denominator is often called + "Bessel's correction" because it corrects for bias (toward lower values) + in the variance estimate introduced when the sample mean of `a` is used + in place of the true mean of the population. For this quantity, use + ``ddof=1``. + + Note that for complex numbers, the absolute value is taken before + squaring, so that the result is always real and nonnegative. + + For floating-point input, the variance is computed using the same + precision the input has. Depending on the input data, this can cause + the results to be inaccurate, especially for `float32` (see example + below). Specifying a higher-accuracy accumulator using the ``dtype`` + keyword can alleviate this issue. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[1, 2], [3, 4]]) + >>> np.var(a) + 1.25 + >>> np.var(a, axis=0) + array([1., 1.]) + >>> np.var(a, axis=1) + array([0.25, 0.25]) + + In single precision, var() can be inaccurate: + + >>> a = np.zeros((2, 512*512), dtype=np.float32) + >>> a[0, :] = 1.0 + >>> a[1, :] = 0.1 + >>> np.var(a) + np.float32(0.20250003) + + Computing the variance in float64 is more accurate: + + >>> np.var(a, dtype=np.float64) + 0.20249999932944759 # may vary + >>> ((1-0.55)**2 + (0.1-0.55)**2)/2 + 0.2025 + + Specifying a where argument: + + >>> a = np.array([[14, 8, 11, 10], [7, 9, 10, 11], [10, 15, 5, 10]]) + >>> np.var(a) + 6.833333333333333 # may vary + >>> np.var(a, where=[[True], [True], [False]]) + 4.0 + + Using the mean keyword to save computation time: + + >>> import numpy as np + >>> from timeit import timeit + >>> + >>> a = np.array([[14, 8, 11, 10], [7, 9, 10, 11], [10, 15, 5, 10]]) + >>> mean = np.mean(a, axis=1, keepdims=True) + >>> + >>> g = globals() + >>> n = 10000 + >>> t1 = timeit("var = np.var(a, axis=1, mean=mean)", globals=g, number=n) + >>> t2 = timeit("var = np.var(a, axis=1)", globals=g, number=n) + >>> print(f'Percentage execution time saved {100*(t2-t1)/t2:.0f}%') + #doctest: +SKIP + Percentage execution time saved 32% + + """ + kwargs = {} + if keepdims is not np._NoValue: + kwargs['keepdims'] = keepdims + if where is not np._NoValue: + kwargs['where'] = where + if mean is not np._NoValue: + kwargs['mean'] = mean + + if correction != np._NoValue: + if ddof != 0: + raise ValueError( + "ddof and correction can't be provided simultaneously." + ) + else: + ddof = correction + + if type(a) is not mu.ndarray: + try: + var = a.var + + except AttributeError: + pass + else: + return var(axis=axis, dtype=dtype, out=out, ddof=ddof, **kwargs) + + return _methods._var(a, axis=axis, dtype=dtype, out=out, ddof=ddof, + **kwargs) diff --git a/local_packages/numpy/_core/fromnumeric.pyi b/local_packages/numpy/_core/fromnumeric.pyi new file mode 100644 index 0000000..2a97622 --- /dev/null +++ b/local_packages/numpy/_core/fromnumeric.pyi @@ -0,0 +1,1735 @@ +# ruff: noqa: ANN401 +from _typeshed import Incomplete +from collections.abc import Sequence +from typing import ( + Any, + Literal, + Never, + Protocol, + SupportsIndex, + TypeAlias, + TypedDict, + TypeVar, + Unpack, + overload, + type_check_only, +) + +import numpy as np +from numpy import ( + _AnyShapeT, + _CastingKind, + _ModeKind, + _OrderACF, + _OrderKACF, + _PartitionKind, + _SortKind, + _SortSide, + complexfloating, + float16, + floating, + generic, + int64, + int_, + intp, + object_, + timedelta64, + uint64, +) +from numpy._globals import _NoValueType +from numpy._typing import ( + ArrayLike, + DTypeLike, + NDArray, + _AnyShape, + _ArrayLike, + _ArrayLikeBool_co, + _ArrayLikeComplex_co, + _ArrayLikeFloat_co, + _ArrayLikeInt, + _ArrayLikeInt_co, + _ArrayLikeObject_co, + _ArrayLikeUInt_co, + _BoolLike_co, + _ComplexLike_co, + _DTypeLike, + _IntLike_co, + _NestedSequence, + _NumberLike_co, + _ScalarLike_co, + _ShapeLike, +) + +__all__ = [ + "all", + "amax", + "amin", + "any", + "argmax", + "argmin", + "argpartition", + "argsort", + "around", + "choose", + "clip", + "compress", + "cumprod", + "cumsum", + "cumulative_prod", + "cumulative_sum", + "diagonal", + "mean", + "max", + "min", + "matrix_transpose", + "ndim", + "nonzero", + "partition", + "prod", + "ptp", + "put", + "ravel", + "repeat", + "reshape", + "resize", + "round", + "searchsorted", + "shape", + "size", + "sort", + "squeeze", + "std", + "sum", + "swapaxes", + "take", + "trace", + "transpose", + "var", +] + +_ScalarT = TypeVar("_ScalarT", bound=generic) +_NumberOrObjectT = TypeVar("_NumberOrObjectT", bound=np.number | np.object_) +_ArrayT = TypeVar("_ArrayT", bound=np.ndarray[Any, Any]) +_ShapeT = TypeVar("_ShapeT", bound=tuple[int, ...]) +_ShapeT_co = TypeVar("_ShapeT_co", bound=tuple[int, ...], covariant=True) +_BoolOrIntArrayT = TypeVar("_BoolOrIntArrayT", bound=NDArray[np.integer | np.bool]) + +@type_check_only +class _SupportsShape(Protocol[_ShapeT_co]): + # NOTE: it matters that `self` is positional only + @property + def shape(self, /) -> _ShapeT_co: ... + +@type_check_only +class _UFuncKwargs(TypedDict, total=False): + where: _ArrayLikeBool_co | None + order: _OrderKACF + subok: bool + signature: str | tuple[str | None, ...] + casting: _CastingKind + +# a "sequence" that isn't a string, bytes, bytearray, or memoryview +_T = TypeVar("_T") +_PyArray: TypeAlias = list[_T] | tuple[_T, ...] +# `int` also covers `bool` +_PyScalar: TypeAlias = complex | bytes | str + +# TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 +@overload +def take( + a: _ArrayLike[_ScalarT], + indices: _IntLike_co, + axis: None = None, + out: None = None, + mode: _ModeKind = "raise", +) -> _ScalarT: ... +@overload +def take( + a: ArrayLike, + indices: _IntLike_co, + axis: SupportsIndex | None = None, + out: None = None, + mode: _ModeKind = "raise", +) -> Any: ... +@overload +def take( + a: _ArrayLike[_ScalarT], + indices: _ArrayLikeInt_co, + axis: SupportsIndex | None = None, + out: None = None, + mode: _ModeKind = "raise", +) -> NDArray[_ScalarT]: ... +@overload +def take( + a: ArrayLike, + indices: _ArrayLikeInt_co, + axis: SupportsIndex | None = None, + out: None = None, + mode: _ModeKind = "raise", +) -> NDArray[Any]: ... +@overload +def take( + a: ArrayLike, + indices: _ArrayLikeInt_co, + axis: SupportsIndex | None, + out: _ArrayT, + mode: _ModeKind = "raise", +) -> _ArrayT: ... +@overload +def take( + a: ArrayLike, + indices: _ArrayLikeInt_co, + axis: SupportsIndex | None = None, + *, + out: _ArrayT, + mode: _ModeKind = "raise", +) -> _ArrayT: ... + +@overload +def reshape( # shape: index + a: _ArrayLike[_ScalarT], + /, + shape: SupportsIndex, + order: _OrderACF = "C", + *, + copy: bool | None = None, +) -> np.ndarray[tuple[int], np.dtype[_ScalarT]]: ... +@overload +def reshape( # shape: (int, ...) @ _AnyShapeT + a: _ArrayLike[_ScalarT], + /, + shape: _AnyShapeT, + order: _OrderACF = "C", + *, + copy: bool | None = None, +) -> np.ndarray[_AnyShapeT, np.dtype[_ScalarT]]: ... +@overload # shape: Sequence[index] +def reshape( + a: _ArrayLike[_ScalarT], + /, + shape: Sequence[SupportsIndex], + order: _OrderACF = "C", + *, + copy: bool | None = None, +) -> NDArray[_ScalarT]: ... +@overload # shape: index +def reshape( + a: ArrayLike, + /, + shape: SupportsIndex, + order: _OrderACF = "C", + *, + copy: bool | None = None, +) -> np.ndarray[tuple[int], np.dtype]: ... +@overload +def reshape( # shape: (int, ...) @ _AnyShapeT + a: ArrayLike, + /, + shape: _AnyShapeT, + order: _OrderACF = "C", + *, + copy: bool | None = None, +) -> np.ndarray[_AnyShapeT, np.dtype]: ... +@overload # shape: Sequence[index] +def reshape( + a: ArrayLike, + /, + shape: Sequence[SupportsIndex], + order: _OrderACF = "C", + *, + copy: bool | None = None, +) -> NDArray[Any]: ... + +@overload +def choose( + a: _IntLike_co, + choices: ArrayLike, + out: None = None, + mode: _ModeKind = "raise", +) -> Any: ... +@overload +def choose( + a: _ArrayLikeInt_co, + choices: _ArrayLike[_ScalarT], + out: None = None, + mode: _ModeKind = "raise", +) -> NDArray[_ScalarT]: ... +@overload +def choose( + a: _ArrayLikeInt_co, + choices: ArrayLike, + out: None = None, + mode: _ModeKind = "raise", +) -> NDArray[Any]: ... +@overload +def choose( + a: _ArrayLikeInt_co, + choices: ArrayLike, + out: _ArrayT, + mode: _ModeKind = "raise", +) -> _ArrayT: ... + +# keep in sync with `ma.core.repeat` +@overload +def repeat( + a: _ArrayLike[_ScalarT], + repeats: _ArrayLikeInt_co, + axis: None = None, +) -> np.ndarray[tuple[int], np.dtype[_ScalarT]]: ... +@overload +def repeat( + a: _ArrayLike[_ScalarT], + repeats: _ArrayLikeInt_co, + axis: SupportsIndex, +) -> NDArray[_ScalarT]: ... +@overload +def repeat( + a: ArrayLike, + repeats: _ArrayLikeInt_co, + axis: None = None, +) -> np.ndarray[tuple[int], np.dtype[Any]]: ... +@overload +def repeat( + a: ArrayLike, + repeats: _ArrayLikeInt_co, + axis: SupportsIndex, +) -> NDArray[Any]: ... + +# +def put( + a: NDArray[Any], + ind: _ArrayLikeInt_co, + v: ArrayLike, + mode: _ModeKind = "raise", +) -> None: ... + +# keep in sync with `ndarray.swapaxes` and `ma.core.swapaxes` +@overload +def swapaxes(a: _ArrayT, axis1: SupportsIndex, axis2: SupportsIndex) -> _ArrayT: ... +@overload +def swapaxes(a: _ArrayLike[_ScalarT], axis1: SupportsIndex, axis2: SupportsIndex) -> NDArray[_ScalarT]: ... +@overload +def swapaxes(a: ArrayLike, axis1: SupportsIndex, axis2: SupportsIndex) -> NDArray[Any]: ... + +@overload +def transpose( + a: _ArrayLike[_ScalarT], + axes: _ShapeLike | None = None, +) -> NDArray[_ScalarT]: ... +@overload +def transpose( + a: ArrayLike, + axes: _ShapeLike | None = None, +) -> NDArray[Any]: ... + +@overload +def matrix_transpose(x: _ArrayLike[_ScalarT], /) -> NDArray[_ScalarT]: ... +@overload +def matrix_transpose(x: ArrayLike, /) -> NDArray[Any]: ... + +# +@overload +def partition( + a: _ArrayLike[_ScalarT], + kth: _ArrayLikeInt, + axis: SupportsIndex | None = -1, + kind: _PartitionKind = "introselect", + order: None = None, +) -> NDArray[_ScalarT]: ... +@overload +def partition( + a: _ArrayLike[np.void], + kth: _ArrayLikeInt, + axis: SupportsIndex | None = -1, + kind: _PartitionKind = "introselect", + order: str | Sequence[str] | None = None, +) -> NDArray[np.void]: ... +@overload +def partition( + a: ArrayLike, + kth: _ArrayLikeInt, + axis: SupportsIndex | None = -1, + kind: _PartitionKind = "introselect", + order: str | Sequence[str] | None = None, +) -> NDArray[Any]: ... + +# +def argpartition( + a: ArrayLike, + kth: _ArrayLikeInt, + axis: SupportsIndex | None = -1, + kind: _PartitionKind = "introselect", + order: str | Sequence[str] | None = None, +) -> NDArray[intp]: ... + +# +@overload +def sort( + a: _ArrayLike[_ScalarT], + axis: SupportsIndex | None = -1, + kind: _SortKind | None = None, + order: str | Sequence[str] | None = None, + *, + stable: bool | None = None, +) -> NDArray[_ScalarT]: ... +@overload +def sort( + a: ArrayLike, + axis: SupportsIndex | None = -1, + kind: _SortKind | None = None, + order: str | Sequence[str] | None = None, + *, + stable: bool | None = None, +) -> NDArray[Any]: ... + +def argsort( + a: ArrayLike, + axis: SupportsIndex | None = -1, + kind: _SortKind | None = None, + order: str | Sequence[str] | None = None, + *, + stable: bool | None = None, +) -> NDArray[intp]: ... + +@overload +def argmax( + a: ArrayLike, + axis: None = None, + out: None = None, + *, + keepdims: Literal[False] | _NoValueType = ..., +) -> intp: ... +@overload +def argmax( + a: ArrayLike, + axis: SupportsIndex | None = None, + out: None = None, + *, + keepdims: bool | _NoValueType = ..., +) -> Any: ... +@overload +def argmax( + a: ArrayLike, + axis: SupportsIndex | None, + out: _BoolOrIntArrayT, + *, + keepdims: bool | _NoValueType = ..., +) -> _BoolOrIntArrayT: ... +@overload +def argmax( + a: ArrayLike, + axis: SupportsIndex | None = None, + *, + out: _BoolOrIntArrayT, + keepdims: bool | _NoValueType = ..., +) -> _BoolOrIntArrayT: ... + +@overload +def argmin( + a: ArrayLike, + axis: None = None, + out: None = None, + *, + keepdims: Literal[False] | _NoValueType = ..., +) -> intp: ... +@overload +def argmin( + a: ArrayLike, + axis: SupportsIndex | None = None, + out: None = None, + *, + keepdims: bool | _NoValueType = ..., +) -> Any: ... +@overload +def argmin( + a: ArrayLike, + axis: SupportsIndex | None, + out: _BoolOrIntArrayT, + *, + keepdims: bool | _NoValueType = ..., +) -> _BoolOrIntArrayT: ... +@overload +def argmin( + a: ArrayLike, + axis: SupportsIndex | None = None, + *, + out: _BoolOrIntArrayT, + keepdims: bool | _NoValueType = ..., +) -> _BoolOrIntArrayT: ... + +# TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 +@overload +def searchsorted( + a: ArrayLike, + v: _ScalarLike_co, + side: _SortSide = "left", + sorter: _ArrayLikeInt_co | None = None, # 1D int array +) -> intp: ... +@overload +def searchsorted( + a: ArrayLike, + v: ArrayLike, + side: _SortSide = "left", + sorter: _ArrayLikeInt_co | None = None, # 1D int array +) -> NDArray[intp]: ... + +# TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 +@overload +def resize(a: _ArrayLike[_ScalarT], new_shape: SupportsIndex | tuple[SupportsIndex]) -> np.ndarray[tuple[int], np.dtype[_ScalarT]]: ... +@overload +def resize(a: _ArrayLike[_ScalarT], new_shape: _AnyShapeT) -> np.ndarray[_AnyShapeT, np.dtype[_ScalarT]]: ... +@overload +def resize(a: _ArrayLike[_ScalarT], new_shape: _ShapeLike) -> NDArray[_ScalarT]: ... +@overload +def resize(a: ArrayLike, new_shape: SupportsIndex | tuple[SupportsIndex]) -> np.ndarray[tuple[int], np.dtype]: ... +@overload +def resize(a: ArrayLike, new_shape: _AnyShapeT) -> np.ndarray[_AnyShapeT, np.dtype]: ... +@overload +def resize(a: ArrayLike, new_shape: _ShapeLike) -> NDArray[Any]: ... + +# TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 +@overload +def squeeze( + a: _ScalarT, + axis: _ShapeLike | None = None, +) -> _ScalarT: ... +@overload +def squeeze( + a: _ArrayLike[_ScalarT], + axis: _ShapeLike | None = None, +) -> NDArray[_ScalarT]: ... +@overload +def squeeze( + a: ArrayLike, + axis: _ShapeLike | None = None, +) -> NDArray[Any]: ... + +# keep in sync with `ma.core.diagonal` +@overload +def diagonal( + a: _ArrayLike[_ScalarT], + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, # >= 2D array +) -> NDArray[_ScalarT]: ... +@overload +def diagonal( + a: ArrayLike, + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, # >= 2D array +) -> NDArray[Any]: ... + +# keep in sync with `ma.core.trace` +@overload +def trace( + a: ArrayLike, # >= 2D array + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, + dtype: DTypeLike | None = None, + out: None = None, +) -> Any: ... +@overload +def trace( + a: ArrayLike, # >= 2D array + offset: SupportsIndex, + axis1: SupportsIndex, + axis2: SupportsIndex, + dtype: DTypeLike | None, + out: _ArrayT, +) -> _ArrayT: ... +@overload +def trace( + a: ArrayLike, # >= 2D array + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, +) -> _ArrayT: ... + +_Array1D: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] + +@overload +def ravel(a: _ArrayLike[_ScalarT], order: _OrderKACF = "C") -> _Array1D[_ScalarT]: ... +@overload +def ravel(a: bytes | _NestedSequence[bytes], order: _OrderKACF = "C") -> _Array1D[np.bytes_]: ... +@overload +def ravel(a: str | _NestedSequence[str], order: _OrderKACF = "C") -> _Array1D[np.str_]: ... +@overload +def ravel(a: bool | _NestedSequence[bool], order: _OrderKACF = "C") -> _Array1D[np.bool]: ... +@overload +def ravel(a: int | _NestedSequence[int], order: _OrderKACF = "C") -> _Array1D[np.int_ | Any]: ... +@overload +def ravel(a: float | _NestedSequence[float], order: _OrderKACF = "C") -> _Array1D[np.float64 | Any]: ... +@overload +def ravel(a: complex | _NestedSequence[complex], order: _OrderKACF = "C") -> _Array1D[np.complex128 | Any]: ... +@overload +def ravel(a: ArrayLike, order: _OrderKACF = "C") -> np.ndarray[tuple[int], np.dtype]: ... + +def nonzero(a: _ArrayLike[Any]) -> tuple[np.ndarray[tuple[int], np.dtype[intp]], ...]: ... + +# this prevents `Any` from being returned with Pyright +@overload +def shape(a: _SupportsShape[Never]) -> _AnyShape: ... +@overload +def shape(a: _SupportsShape[_ShapeT]) -> _ShapeT: ... +@overload +def shape(a: _PyScalar) -> tuple[()]: ... +# `collections.abc.Sequence` can't be used hesre, since `bytes` and `str` are +# subtypes of it, which would make the return types incompatible. +@overload +def shape(a: _PyArray[_PyScalar]) -> tuple[int]: ... +@overload +def shape(a: _PyArray[_PyArray[_PyScalar]]) -> tuple[int, int]: ... +# this overload will be skipped by typecheckers that don't support PEP 688 +@overload +def shape(a: memoryview | bytearray) -> tuple[int]: ... +@overload +def shape(a: ArrayLike) -> _AnyShape: ... + +@overload +def compress( + condition: _ArrayLikeBool_co, # 1D bool array + a: _ArrayLike[_ScalarT], + axis: SupportsIndex | None = None, + out: None = None, +) -> NDArray[_ScalarT]: ... +@overload +def compress( + condition: _ArrayLikeBool_co, # 1D bool array + a: ArrayLike, + axis: SupportsIndex | None = None, + out: None = None, +) -> NDArray[Any]: ... +@overload +def compress( + condition: _ArrayLikeBool_co, # 1D bool array + a: ArrayLike, + axis: SupportsIndex | None, + out: _ArrayT, +) -> _ArrayT: ... +@overload +def compress( + condition: _ArrayLikeBool_co, # 1D bool array + a: ArrayLike, + axis: SupportsIndex | None = None, + *, + out: _ArrayT, +) -> _ArrayT: ... + +# TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 +@overload +def clip( + a: _ScalarT, + a_min: ArrayLike | _NoValueType | None = ..., + a_max: ArrayLike | _NoValueType | None = ..., + out: None = None, + *, + min: ArrayLike | _NoValueType | None = ..., + max: ArrayLike | _NoValueType | None = ..., + dtype: None = None, + **kwargs: Unpack[_UFuncKwargs], +) -> _ScalarT: ... +@overload +def clip( + a: _ScalarLike_co, + a_min: ArrayLike | _NoValueType | None = ..., + a_max: ArrayLike | _NoValueType | None = ..., + out: None = None, + *, + min: ArrayLike | _NoValueType | None = ..., + max: ArrayLike | _NoValueType | None = ..., + dtype: None = None, + **kwargs: Unpack[_UFuncKwargs], +) -> Any: ... +@overload +def clip( + a: _ArrayLike[_ScalarT], + a_min: ArrayLike | _NoValueType | None = ..., + a_max: ArrayLike | _NoValueType | None = ..., + out: None = None, + *, + min: ArrayLike | _NoValueType | None = ..., + max: ArrayLike | _NoValueType | None = ..., + dtype: None = None, + **kwargs: Unpack[_UFuncKwargs], +) -> NDArray[_ScalarT]: ... +@overload +def clip( + a: ArrayLike, + a_min: ArrayLike | _NoValueType | None = ..., + a_max: ArrayLike | _NoValueType | None = ..., + out: None = None, + *, + min: ArrayLike | _NoValueType | None = ..., + max: ArrayLike | _NoValueType | None = ..., + dtype: None = None, + **kwargs: Unpack[_UFuncKwargs], +) -> NDArray[Any]: ... +@overload +def clip( + a: ArrayLike, + a_min: ArrayLike | None, + a_max: ArrayLike | None, + out: _ArrayT, + *, + min: ArrayLike | _NoValueType | None = ..., + max: ArrayLike | _NoValueType | None = ..., + dtype: DTypeLike | None = None, + **kwargs: Unpack[_UFuncKwargs], +) -> _ArrayT: ... +@overload +def clip( + a: ArrayLike, + a_min: ArrayLike | _NoValueType | None = ..., + a_max: ArrayLike | _NoValueType | None = ..., + *, + out: _ArrayT, + min: ArrayLike | _NoValueType | None = ..., + max: ArrayLike | _NoValueType | None = ..., + dtype: DTypeLike | None = None, + **kwargs: Unpack[_UFuncKwargs], +) -> _ArrayT: ... +@overload +def clip( + a: ArrayLike, + a_min: ArrayLike | _NoValueType | None = ..., + a_max: ArrayLike | _NoValueType | None = ..., + out: None = None, + *, + min: ArrayLike | _NoValueType | None = ..., + max: ArrayLike | _NoValueType | None = ..., + dtype: DTypeLike | None = None, + **kwargs: Unpack[_UFuncKwargs], +) -> Any: ... + +@overload +def sum( + a: _ArrayLike[_ScalarT], + axis: None = None, + dtype: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> _ScalarT: ... +@overload +def sum( + a: _ArrayLike[_ScalarT], + axis: None = None, + dtype: None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> _ScalarT | NDArray[_ScalarT]: ... +@overload +def sum( + a: ArrayLike, + axis: None, + dtype: _DTypeLike[_ScalarT], + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> _ScalarT: ... +@overload +def sum( + a: ArrayLike, + axis: None = None, + *, + dtype: _DTypeLike[_ScalarT], + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> _ScalarT: ... +@overload +def sum( + a: ArrayLike, + axis: _ShapeLike | None, + dtype: _DTypeLike[_ScalarT], + out: None = None, + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> _ScalarT | NDArray[_ScalarT]: ... +@overload +def sum( + a: ArrayLike, + axis: _ShapeLike | None = None, + *, + dtype: _DTypeLike[_ScalarT], + out: None = None, + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> _ScalarT | NDArray[_ScalarT]: ... +@overload +def sum( + a: ArrayLike, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> Any: ... +@overload +def sum( + a: ArrayLike, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> _ArrayT: ... +@overload +def sum( + a: ArrayLike, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> _ArrayT: ... + +# keep in sync with `any` +@overload +def all( + a: ArrayLike | None, + axis: None = None, + out: None = None, + keepdims: Literal[False, 0] | _NoValueType = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> np.bool: ... +@overload +def all( + a: ArrayLike | None, + axis: int | tuple[int, ...] | None = None, + out: None = None, + keepdims: _BoolLike_co | _NoValueType = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> Incomplete: ... +@overload +def all( + a: ArrayLike | None, + axis: int | tuple[int, ...] | None, + out: _ArrayT, + keepdims: _BoolLike_co | _NoValueType = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> _ArrayT: ... +@overload +def all( + a: ArrayLike | None, + axis: int | tuple[int, ...] | None = None, + *, + out: _ArrayT, + keepdims: _BoolLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> _ArrayT: ... + +# keep in sync with `all` +@overload +def any( + a: ArrayLike | None, + axis: None = None, + out: None = None, + keepdims: Literal[False, 0] | _NoValueType = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> np.bool: ... +@overload +def any( + a: ArrayLike | None, + axis: int | tuple[int, ...] | None = None, + out: None = None, + keepdims: _BoolLike_co | _NoValueType = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> Incomplete: ... +@overload +def any( + a: ArrayLike | None, + axis: int | tuple[int, ...] | None, + out: _ArrayT, + keepdims: _BoolLike_co | _NoValueType = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> _ArrayT: ... +@overload +def any( + a: ArrayLike | None, + axis: int | tuple[int, ...] | None = None, + *, + out: _ArrayT, + keepdims: _BoolLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> _ArrayT: ... + +# +@overload +def cumsum( + a: _ArrayLike[_ScalarT], + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, +) -> NDArray[_ScalarT]: ... +@overload +def cumsum( + a: ArrayLike, + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, +) -> NDArray[Any]: ... +@overload +def cumsum( + a: ArrayLike, + axis: SupportsIndex | None, + dtype: _DTypeLike[_ScalarT], + out: None = None, +) -> NDArray[_ScalarT]: ... +@overload +def cumsum( + a: ArrayLike, + axis: SupportsIndex | None = None, + *, + dtype: _DTypeLike[_ScalarT], + out: None = None, +) -> NDArray[_ScalarT]: ... +@overload +def cumsum( + a: ArrayLike, + axis: SupportsIndex | None = None, + dtype: DTypeLike | None = None, + out: None = None, +) -> NDArray[Any]: ... +@overload +def cumsum( + a: ArrayLike, + axis: SupportsIndex | None, + dtype: DTypeLike | None, + out: _ArrayT, +) -> _ArrayT: ... +@overload +def cumsum( + a: ArrayLike, + axis: SupportsIndex | None = None, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, +) -> _ArrayT: ... + +@overload +def cumulative_sum( + x: _ArrayLike[_ScalarT], + /, + *, + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, + include_initial: bool = False, +) -> NDArray[_ScalarT]: ... +@overload +def cumulative_sum( + x: ArrayLike, + /, + *, + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, + include_initial: bool = False, +) -> NDArray[Any]: ... +@overload +def cumulative_sum( + x: ArrayLike, + /, + *, + axis: SupportsIndex | None = None, + dtype: _DTypeLike[_ScalarT], + out: None = None, + include_initial: bool = False, +) -> NDArray[_ScalarT]: ... +@overload +def cumulative_sum( + x: ArrayLike, + /, + *, + axis: SupportsIndex | None = None, + dtype: DTypeLike | None = None, + out: None = None, + include_initial: bool = False, +) -> NDArray[Any]: ... +@overload +def cumulative_sum( + x: ArrayLike, + /, + *, + axis: SupportsIndex | None = None, + dtype: DTypeLike | None = None, + out: _ArrayT, + include_initial: bool = False, +) -> _ArrayT: ... + +@overload +def ptp( + a: _ArrayLike[_ScalarT], + axis: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., +) -> _ScalarT: ... +@overload +def ptp( + a: ArrayLike, + axis: _ShapeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., +) -> Any: ... +@overload +def ptp( + a: ArrayLike, + axis: _ShapeLike | None, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., +) -> _ArrayT: ... +@overload +def ptp( + a: ArrayLike, + axis: _ShapeLike | None = None, + *, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., +) -> _ArrayT: ... + +@overload +def amax( + a: _ArrayLike[_ScalarT], + axis: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> _ScalarT: ... +@overload +def amax( + a: ArrayLike, + axis: _ShapeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> Any: ... +@overload +def amax( + a: ArrayLike, + axis: _ShapeLike | None, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> _ArrayT: ... +@overload +def amax( + a: ArrayLike, + axis: _ShapeLike | None = None, + *, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> _ArrayT: ... + +@overload +def amin( + a: _ArrayLike[_ScalarT], + axis: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> _ScalarT: ... +@overload +def amin( + a: ArrayLike, + axis: _ShapeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> Any: ... +@overload +def amin( + a: ArrayLike, + axis: _ShapeLike | None, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> _ArrayT: ... +@overload +def amin( + a: ArrayLike, + axis: _ShapeLike | None = None, + *, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> _ArrayT: ... + +# TODO: `np.prod()``: For object arrays `initial` does not necessarily +# have to be a numerical scalar. +# The only requirement is that it is compatible +# with the `.__mul__()` method(s) of the passed array's elements. +# Note that the same situation holds for all wrappers around +# `np.ufunc.reduce`, e.g. `np.sum()` (`.__add__()`). +# TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 +@overload +def prod( + a: _ArrayLikeBool_co, + axis: None = None, + dtype: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> int_: ... +@overload +def prod( + a: _ArrayLikeUInt_co, + axis: None = None, + dtype: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> uint64: ... +@overload +def prod( + a: _ArrayLikeInt_co, + axis: None = None, + dtype: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> int64: ... +@overload +def prod( + a: _ArrayLikeFloat_co, + axis: None = None, + dtype: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> floating: ... +@overload +def prod( + a: _ArrayLikeComplex_co, + axis: None = None, + dtype: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> complexfloating: ... +@overload +def prod( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: _ShapeLike | None = None, + dtype: None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> Any: ... +@overload +def prod( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: None, + dtype: _DTypeLike[_ScalarT], + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> _ScalarT: ... +@overload +def prod( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: None = None, + *, + dtype: _DTypeLike[_ScalarT], + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> _ScalarT: ... +@overload +def prod( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> Any: ... +@overload +def prod( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> _ArrayT: ... +@overload +def prod( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., + initial: _NumberLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> _ArrayT: ... + +# TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 +@overload +def cumprod( + a: _ArrayLikeBool_co, + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, +) -> NDArray[int_]: ... +@overload +def cumprod( + a: _ArrayLikeUInt_co, + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, +) -> NDArray[uint64]: ... +@overload +def cumprod( + a: _ArrayLikeInt_co, + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, +) -> NDArray[int64]: ... +@overload +def cumprod( + a: _ArrayLikeFloat_co, + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, +) -> NDArray[floating]: ... +@overload +def cumprod( + a: _ArrayLikeComplex_co, + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, +) -> NDArray[complexfloating]: ... +@overload +def cumprod( + a: _ArrayLikeObject_co, + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, +) -> NDArray[object_]: ... +@overload +def cumprod( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: SupportsIndex | None, + dtype: _DTypeLike[_ScalarT], + out: None = None, +) -> NDArray[_ScalarT]: ... +@overload +def cumprod( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: SupportsIndex | None = None, + *, + dtype: _DTypeLike[_ScalarT], + out: None = None, +) -> NDArray[_ScalarT]: ... +@overload +def cumprod( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: SupportsIndex | None = None, + dtype: DTypeLike | None = None, + out: None = None, +) -> NDArray[Any]: ... +@overload +def cumprod( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: SupportsIndex | None, + dtype: DTypeLike | None, + out: _ArrayT, +) -> _ArrayT: ... +@overload +def cumprod( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: SupportsIndex | None = None, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, +) -> _ArrayT: ... + +# TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 +@overload +def cumulative_prod( + x: _ArrayLikeBool_co, + /, + *, + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, + include_initial: bool = False, +) -> NDArray[int_]: ... +@overload +def cumulative_prod( + x: _ArrayLikeUInt_co, + /, + *, + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, + include_initial: bool = False, +) -> NDArray[uint64]: ... +@overload +def cumulative_prod( + x: _ArrayLikeInt_co, + /, + *, + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, + include_initial: bool = False, +) -> NDArray[int64]: ... +@overload +def cumulative_prod( + x: _ArrayLikeFloat_co, + /, + *, + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, + include_initial: bool = False, +) -> NDArray[floating]: ... +@overload +def cumulative_prod( + x: _ArrayLikeComplex_co, + /, + *, + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, + include_initial: bool = False, +) -> NDArray[complexfloating]: ... +@overload +def cumulative_prod( + x: _ArrayLikeObject_co, + /, + *, + axis: SupportsIndex | None = None, + dtype: None = None, + out: None = None, + include_initial: bool = False, +) -> NDArray[object_]: ... +@overload +def cumulative_prod( + x: _ArrayLikeComplex_co | _ArrayLikeObject_co, + /, + *, + axis: SupportsIndex | None = None, + dtype: _DTypeLike[_ScalarT], + out: None = None, + include_initial: bool = False, +) -> NDArray[_ScalarT]: ... +@overload +def cumulative_prod( + x: _ArrayLikeComplex_co | _ArrayLikeObject_co, + /, + *, + axis: SupportsIndex | None = None, + dtype: DTypeLike | None = None, + out: None = None, + include_initial: bool = False, +) -> NDArray[Any]: ... +@overload +def cumulative_prod( + x: _ArrayLikeComplex_co | _ArrayLikeObject_co, + /, + *, + axis: SupportsIndex | None = None, + dtype: DTypeLike | None = None, + out: _ArrayT, + include_initial: bool = False, +) -> _ArrayT: ... + +def ndim(a: ArrayLike) -> int: ... + +def size(a: ArrayLike, axis: int | tuple[int, ...] | None = None) -> int: ... + +# TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 +@overload +def around( + a: _BoolLike_co, + decimals: SupportsIndex = 0, + out: None = None, +) -> float16: ... +@overload +def around( + a: _NumberOrObjectT, + decimals: SupportsIndex = 0, + out: None = None, +) -> _NumberOrObjectT: ... +@overload +def around( + a: _ComplexLike_co | object_, + decimals: SupportsIndex = 0, + out: None = None, +) -> Any: ... +@overload +def around( + a: _ArrayLikeBool_co, + decimals: SupportsIndex = 0, + out: None = None, +) -> NDArray[float16]: ... +@overload +def around( + a: _ArrayLike[_NumberOrObjectT], + decimals: SupportsIndex = 0, + out: None = None, +) -> NDArray[_NumberOrObjectT]: ... +@overload +def around( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + decimals: SupportsIndex = 0, + out: None = None, +) -> NDArray[Any]: ... +@overload +def around( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + decimals: SupportsIndex, + out: _ArrayT, +) -> _ArrayT: ... +@overload +def around( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + decimals: SupportsIndex = 0, + *, + out: _ArrayT, +) -> _ArrayT: ... + +# TODO: Fix overlapping overloads: https://github.com/numpy/numpy/issues/27032 +@overload +def mean( + a: _ArrayLikeFloat_co, + axis: None = None, + dtype: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> floating: ... +@overload +def mean( + a: _ArrayLikeComplex_co, + axis: None = None, + dtype: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> complexfloating: ... +@overload +def mean( + a: _ArrayLike[np.timedelta64], + axis: None = None, + dtype: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> timedelta64: ... +@overload +def mean( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> _ArrayT: ... +@overload +def mean( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> _ArrayT: ... +@overload +def mean( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: None, + dtype: _DTypeLike[_ScalarT], + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> _ScalarT: ... +@overload +def mean( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: None = None, + *, + dtype: _DTypeLike[_ScalarT], + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> _ScalarT: ... +@overload +def mean( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: _ShapeLike | None, + dtype: _DTypeLike[_ScalarT], + out: None, + keepdims: Literal[True, 1], + *, + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[_ScalarT]: ... +@overload +def mean( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: _ShapeLike | None, + dtype: _DTypeLike[_ScalarT], + out: None = None, + *, + keepdims: bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> _ScalarT | NDArray[_ScalarT]: ... +@overload +def mean( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: _ShapeLike | None = None, + *, + dtype: _DTypeLike[_ScalarT], + out: None = None, + keepdims: bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> _ScalarT | NDArray[_ScalarT]: ... +@overload +def mean( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> Incomplete: ... + +@overload +def std( + a: _ArrayLikeComplex_co, + axis: None = None, + dtype: None = None, + out: None = None, + ddof: float = 0, + keepdims: Literal[False] | _NoValueType = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> floating: ... +@overload +def std( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: _ShapeLike | None = None, + dtype: None = None, + out: None = None, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> Any: ... +@overload +def std( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: None, + dtype: _DTypeLike[_ScalarT], + out: None = None, + ddof: float = 0, + keepdims: Literal[False] | _NoValueType = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> _ScalarT: ... +@overload +def std( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: None = None, + *, + dtype: _DTypeLike[_ScalarT], + out: None = None, + ddof: float = 0, + keepdims: Literal[False] | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> _ScalarT: ... +@overload +def std( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> Any: ... +@overload +def std( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: _ArrayT, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> _ArrayT: ... +@overload +def std( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> _ArrayT: ... + +@overload +def var( + a: _ArrayLikeComplex_co, + axis: None = None, + dtype: None = None, + out: None = None, + ddof: float = 0, + keepdims: Literal[False] | _NoValueType = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> floating: ... +@overload +def var( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: _ShapeLike | None = None, + dtype: None = None, + out: None = None, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> Any: ... +@overload +def var( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: None, + dtype: _DTypeLike[_ScalarT], + out: None = None, + ddof: float = 0, + keepdims: Literal[False] | _NoValueType = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> _ScalarT: ... +@overload +def var( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: None = None, + *, + dtype: _DTypeLike[_ScalarT], + out: None = None, + ddof: float = 0, + keepdims: Literal[False] | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> _ScalarT: ... +@overload +def var( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> Any: ... +@overload +def var( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: _ArrayT, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> _ArrayT: ... +@overload +def var( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> _ArrayT: ... + +max = amax +min = amin +round = around diff --git a/local_packages/numpy/_core/function_base.py b/local_packages/numpy/_core/function_base.py new file mode 100644 index 0000000..7fcc6b4 --- /dev/null +++ b/local_packages/numpy/_core/function_base.py @@ -0,0 +1,547 @@ +import functools +import inspect +import operator +import types +import warnings + +import numpy as np +from numpy._core import overrides +from numpy._core._multiarray_umath import _array_converter +from numpy._core.multiarray import add_docstring + +from . import numeric as _nx +from .numeric import asanyarray, nan, ndim, result_type + +__all__ = ['logspace', 'linspace', 'geomspace'] + + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + + +def _linspace_dispatcher(start, stop, num=None, endpoint=None, retstep=None, + dtype=None, axis=None, *, device=None): + return (start, stop) + + +@array_function_dispatch(_linspace_dispatcher) +def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None, + axis=0, *, device=None): + """ + Return evenly spaced numbers over a specified interval. + + Returns `num` evenly spaced samples, calculated over the + interval [`start`, `stop`]. + + The endpoint of the interval can optionally be excluded. + + .. versionchanged:: 1.20.0 + Values are rounded towards ``-inf`` instead of ``0`` when an + integer ``dtype`` is specified. The old behavior can + still be obtained with ``np.linspace(start, stop, num).astype(int)`` + + Parameters + ---------- + start : array_like + The starting value of the sequence. + stop : array_like + The end value of the sequence, unless `endpoint` is set to False. + In that case, the sequence consists of all but the last of ``num + 1`` + evenly spaced samples, so that `stop` is excluded. Note that the step + size changes when `endpoint` is False. + num : int, optional + Number of samples to generate. Default is 50. Must be non-negative. + endpoint : bool, optional + If True, `stop` is the last sample. Otherwise, it is not included. + Default is True. + retstep : bool, optional + If True, return (`samples`, `step`), where `step` is the spacing + between samples. + dtype : dtype, optional + The type of the output array. If `dtype` is not given, the data type + is inferred from `start` and `stop`. The inferred dtype will never be + an integer; `float` is chosen even if the arguments would produce an + array of integers. + axis : int, optional + The axis in the result to store the samples. Relevant only if start + or stop are array-like. By default (0), the samples will be along a + new axis inserted at the beginning. Use -1 to get an axis at the end. + device : str, optional + The device on which to place the created array. Default: None. + For Array-API interoperability only, so must be ``"cpu"`` if passed. + + .. versionadded:: 2.0.0 + + Returns + ------- + samples : ndarray + There are `num` equally spaced samples in the closed interval + ``[start, stop]`` or the half-open interval ``[start, stop)`` + (depending on whether `endpoint` is True or False). + step : float, optional + Only returned if `retstep` is True + + Size of spacing between samples. + + + See Also + -------- + arange : Similar to `linspace`, but uses a step size (instead of the + number of samples). + geomspace : Similar to `linspace`, but with numbers spaced evenly on a log + scale (a geometric progression). + logspace : Similar to `geomspace`, but with the end points specified as + logarithms. + :ref:`how-to-partition` + + Examples + -------- + >>> import numpy as np + >>> np.linspace(2.0, 3.0, num=5) + array([2. , 2.25, 2.5 , 2.75, 3. ]) + >>> np.linspace(2.0, 3.0, num=5, endpoint=False) + array([2. , 2.2, 2.4, 2.6, 2.8]) + >>> np.linspace(2.0, 3.0, num=5, retstep=True) + (array([2. , 2.25, 2.5 , 2.75, 3. ]), 0.25) + + Graphical illustration: + + >>> import matplotlib.pyplot as plt + >>> N = 8 + >>> y = np.zeros(N) + >>> x1 = np.linspace(0, 10, N, endpoint=True) + >>> x2 = np.linspace(0, 10, N, endpoint=False) + >>> plt.plot(x1, y, 'o') + [] + >>> plt.plot(x2, y + 0.5, 'o') + [] + >>> plt.ylim([-0.5, 1]) + (-0.5, 1) + >>> plt.show() + + """ + num = operator.index(num) + if num < 0: + raise ValueError( + f"Number of samples, {num}, must be non-negative." + ) + div = (num - 1) if endpoint else num + + conv = _array_converter(start, stop) + start, stop = conv.as_arrays() + dt = conv.result_type(ensure_inexact=True) + + if dtype is None: + dtype = dt + integer_dtype = False + else: + integer_dtype = _nx.issubdtype(dtype, _nx.integer) + + # Use `dtype=type(dt)` to enforce a floating point evaluation: + delta = np.subtract(stop, start, dtype=type(dt)) + y = _nx.arange( + 0, num, dtype=dt, device=device + ).reshape((-1,) + (1,) * ndim(delta)) + + # In-place multiplication y *= delta/div is faster, but prevents + # the multiplicant from overriding what class is produced, and thus + # prevents, e.g. use of Quantities, see gh-7142. Hence, we multiply + # in place only for standard scalar types. + if div > 0: + _mult_inplace = _nx.isscalar(delta) + step = delta / div + any_step_zero = ( + step == 0 if _mult_inplace else _nx.asanyarray(step == 0).any()) + if any_step_zero: + # Special handling for denormal numbers, gh-5437 + y /= div + if _mult_inplace: + y *= delta + else: + y = y * delta + elif _mult_inplace: + y *= step + else: + y = y * step + else: + # sequences with 0 items or 1 item with endpoint=True (i.e. div <= 0) + # have an undefined step + step = nan + # Multiply with delta to allow possible override of output class. + y = y * delta + + y += start + + if endpoint and num > 1: + y[-1, ...] = stop + + if axis != 0: + y = _nx.moveaxis(y, 0, axis) + + if integer_dtype: + _nx.floor(y, out=y) + + y = conv.wrap(y.astype(dtype, copy=False)) + if retstep: + return y, step + else: + return y + + +def _logspace_dispatcher(start, stop, num=None, endpoint=None, base=None, + dtype=None, axis=None): + return (start, stop, base) + + +@array_function_dispatch(_logspace_dispatcher) +def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None, + axis=0): + """ + Return numbers spaced evenly on a log scale. + + In linear space, the sequence starts at ``base ** start`` + (`base` to the power of `start`) and ends with ``base ** stop`` + (see `endpoint` below). + + .. versionchanged:: 1.25.0 + Non-scalar 'base` is now supported + + Parameters + ---------- + start : array_like + ``base ** start`` is the starting value of the sequence. + stop : array_like + ``base ** stop`` is the final value of the sequence, unless `endpoint` + is False. In that case, ``num + 1`` values are spaced over the + interval in log-space, of which all but the last (a sequence of + length `num`) are returned. + num : integer, optional + Number of samples to generate. Default is 50. + endpoint : boolean, optional + If true, `stop` is the last sample. Otherwise, it is not included. + Default is True. + base : array_like, optional + The base of the log space. The step size between the elements in + ``ln(samples) / ln(base)`` (or ``log_base(samples)``) is uniform. + Default is 10.0. + dtype : dtype + The type of the output array. If `dtype` is not given, the data type + is inferred from `start` and `stop`. The inferred type will never be + an integer; `float` is chosen even if the arguments would produce an + array of integers. + axis : int, optional + The axis in the result to store the samples. Relevant only if start, + stop, or base are array-like. By default (0), the samples will be + along a new axis inserted at the beginning. Use -1 to get an axis at + the end. + + Returns + ------- + samples : ndarray + `num` samples, equally spaced on a log scale. + + See Also + -------- + arange : Similar to linspace, with the step size specified instead of the + number of samples. Note that, when used with a float endpoint, the + endpoint may or may not be included. + linspace : Similar to logspace, but with the samples uniformly distributed + in linear space, instead of log space. + geomspace : Similar to logspace, but with endpoints specified directly. + :ref:`how-to-partition` + + Notes + ----- + If base is a scalar, logspace is equivalent to the code + + >>> y = np.linspace(start, stop, num=num, endpoint=endpoint) + ... # doctest: +SKIP + >>> power(base, y).astype(dtype) + ... # doctest: +SKIP + + Examples + -------- + >>> import numpy as np + >>> np.logspace(2.0, 3.0, num=4) + array([ 100. , 215.443469 , 464.15888336, 1000. ]) + >>> np.logspace(2.0, 3.0, num=4, endpoint=False) + array([100. , 177.827941 , 316.22776602, 562.34132519]) + >>> np.logspace(2.0, 3.0, num=4, base=2.0) + array([4. , 5.0396842 , 6.34960421, 8. ]) + >>> np.logspace(2.0, 3.0, num=4, base=[2.0, 3.0], axis=-1) + array([[ 4. , 5.0396842 , 6.34960421, 8. ], + [ 9. , 12.98024613, 18.72075441, 27. ]]) + + Graphical illustration: + + >>> import matplotlib.pyplot as plt + >>> N = 10 + >>> x1 = np.logspace(0.1, 1, N, endpoint=True) + >>> x2 = np.logspace(0.1, 1, N, endpoint=False) + >>> y = np.zeros(N) + >>> plt.plot(x1, y, 'o') + [] + >>> plt.plot(x2, y + 0.5, 'o') + [] + >>> plt.ylim([-0.5, 1]) + (-0.5, 1) + >>> plt.show() + + """ + if not isinstance(base, (float, int)) and np.ndim(base): + # If base is non-scalar, broadcast it with the others, since it + # may influence how axis is interpreted. + ndmax = np.broadcast(start, stop, base).ndim + start, stop, base = ( + np.array(a, copy=None, subok=True, ndmin=ndmax) + for a in (start, stop, base) + ) + base = np.expand_dims(base, axis=axis) + y = linspace(start, stop, num=num, endpoint=endpoint, axis=axis) + if dtype is None: + return _nx.power(base, y) + return _nx.power(base, y).astype(dtype, copy=False) + + +def _geomspace_dispatcher(start, stop, num=None, endpoint=None, dtype=None, + axis=None): + return (start, stop) + + +@array_function_dispatch(_geomspace_dispatcher) +def geomspace(start, stop, num=50, endpoint=True, dtype=None, axis=0): + """ + Return numbers spaced evenly on a log scale (a geometric progression). + + This is similar to `logspace`, but with endpoints specified directly. + Each output sample is a constant multiple of the previous. + + Parameters + ---------- + start : array_like + The starting value of the sequence. + stop : array_like + The final value of the sequence, unless `endpoint` is False. + In that case, ``num + 1`` values are spaced over the + interval in log-space, of which all but the last (a sequence of + length `num`) are returned. + num : integer, optional + Number of samples to generate. Default is 50. + endpoint : boolean, optional + If true, `stop` is the last sample. Otherwise, it is not included. + Default is True. + dtype : dtype + The type of the output array. If `dtype` is not given, the data type + is inferred from `start` and `stop`. The inferred dtype will never be + an integer; `float` is chosen even if the arguments would produce an + array of integers. + axis : int, optional + The axis in the result to store the samples. Relevant only if start + or stop are array-like. By default (0), the samples will be along a + new axis inserted at the beginning. Use -1 to get an axis at the end. + + Returns + ------- + samples : ndarray + `num` samples, equally spaced on a log scale. + + See Also + -------- + logspace : Similar to geomspace, but with endpoints specified using log + and base. + linspace : Similar to geomspace, but with arithmetic instead of geometric + progression. + arange : Similar to linspace, with the step size specified instead of the + number of samples. + :ref:`how-to-partition` + + Notes + ----- + If the inputs or dtype are complex, the output will follow a logarithmic + spiral in the complex plane. (There are an infinite number of spirals + passing through two points; the output will follow the shortest such path.) + + Examples + -------- + >>> import numpy as np + >>> np.geomspace(1, 1000, num=4) + array([ 1., 10., 100., 1000.]) + >>> np.geomspace(1, 1000, num=3, endpoint=False) + array([ 1., 10., 100.]) + >>> np.geomspace(1, 1000, num=4, endpoint=False) + array([ 1. , 5.62341325, 31.6227766 , 177.827941 ]) + >>> np.geomspace(1, 256, num=9) + array([ 1., 2., 4., 8., 16., 32., 64., 128., 256.]) + + Note that the above may not produce exact integers: + + >>> np.geomspace(1, 256, num=9, dtype=int) + array([ 1, 2, 4, 7, 16, 32, 63, 127, 256]) + >>> np.around(np.geomspace(1, 256, num=9)).astype(int) + array([ 1, 2, 4, 8, 16, 32, 64, 128, 256]) + + Negative, decreasing, and complex inputs are allowed: + + >>> np.geomspace(1000, 1, num=4) + array([1000., 100., 10., 1.]) + >>> np.geomspace(-1000, -1, num=4) + array([-1000., -100., -10., -1.]) + >>> np.geomspace(1j, 1000j, num=4) # Straight line + array([0. +1.j, 0. +10.j, 0. +100.j, 0.+1000.j]) + >>> np.geomspace(-1+0j, 1+0j, num=5) # Circle + array([-1.00000000e+00+1.22464680e-16j, -7.07106781e-01+7.07106781e-01j, + 6.12323400e-17+1.00000000e+00j, 7.07106781e-01+7.07106781e-01j, + 1.00000000e+00+0.00000000e+00j]) + + Graphical illustration of `endpoint` parameter: + + >>> import matplotlib.pyplot as plt + >>> N = 10 + >>> y = np.zeros(N) + >>> plt.semilogx(np.geomspace(1, 1000, N, endpoint=True), y + 1, 'o') + [] + >>> plt.semilogx(np.geomspace(1, 1000, N, endpoint=False), y + 2, 'o') + [] + >>> plt.axis([0.5, 2000, 0, 3]) + [0.5, 2000, 0, 3] + >>> plt.grid(True, color='0.7', linestyle='-', which='both', axis='both') + >>> plt.show() + + """ + start = asanyarray(start) + stop = asanyarray(stop) + if _nx.any(start == 0) or _nx.any(stop == 0): + raise ValueError('Geometric sequence cannot include zero') + + dt = result_type(start, stop, float(num), _nx.zeros((), dtype)) + if dtype is None: + dtype = dt + else: + # complex to dtype('complex128'), for instance + dtype = _nx.dtype(dtype) + + # Promote both arguments to the same dtype in case, for instance, one is + # complex and another is negative and log would produce NaN otherwise. + # Copy since we may change things in-place further down. + start = start.astype(dt, copy=True) + stop = stop.astype(dt, copy=True) + + # Allow negative real values and ensure a consistent result for complex + # (including avoiding negligible real or imaginary parts in output) by + # rotating start to positive real, calculating, then undoing rotation. + out_sign = _nx.sign(start) + start /= out_sign + stop = stop / out_sign + + log_start = _nx.log10(start) + log_stop = _nx.log10(stop) + result = logspace(log_start, log_stop, num=num, + endpoint=endpoint, base=10.0, dtype=dt) + + # Make sure the endpoints match the start and stop arguments. This is + # necessary because np.exp(np.log(x)) is not necessarily equal to x. + if num > 0: + result[0] = start + if num > 1 and endpoint: + result[-1] = stop + + result *= out_sign + + if axis != 0: + result = _nx.moveaxis(result, 0, axis) + + return result.astype(dtype, copy=False) + + +def _needs_add_docstring(obj): + """ + Returns true if the only way to set the docstring of `obj` from python is + via add_docstring. + + This function errs on the side of being overly conservative. + """ + Py_TPFLAGS_HEAPTYPE = 1 << 9 + + if isinstance(obj, (types.FunctionType, types.MethodType, property)): + return False + + if isinstance(obj, type) and obj.__flags__ & Py_TPFLAGS_HEAPTYPE: + return False + + return True + + +def _add_docstring(obj, doc, warn_on_python): + if warn_on_python and not _needs_add_docstring(obj): + warnings.warn( + f"add_newdoc was used on a pure-python object {obj}. " + "Prefer to attach it directly to the source.", + UserWarning, + stacklevel=3) + + doc = inspect.cleandoc(doc) + + try: + add_docstring(obj, doc) + except Exception: + pass + + +def add_newdoc(place, obj, doc, warn_on_python=True): + """ + Add documentation to an existing object, typically one defined in C + + The purpose is to allow easier editing of the docstrings without requiring + a re-compile. This exists primarily for internal use within numpy itself. + + Parameters + ---------- + place : str + The absolute name of the module to import from + obj : str | None + The name of the object to add documentation to, typically a class or + function name. + doc : str | tuple[str, str] | list[tuple[str, str]] + If a string, the documentation to apply to `obj` + + If a tuple, then the first element is interpreted as an attribute + of `obj` and the second as the docstring to apply - + ``(method, docstring)`` + + If a list, then each element of the list should be a tuple of length + two - ``[(method1, docstring1), (method2, docstring2), ...]`` + warn_on_python : bool + If True, the default, emit `UserWarning` if this is used to attach + documentation to a pure-python object. + + Notes + ----- + This routine never raises an error if the docstring can't be written, but + will raise an error if the object being documented does not exist. + + This routine cannot modify read-only docstrings, as appear + in new-style classes or built-in functions. Because this + routine never raises an error the caller must check manually + that the docstrings were changed. + + Since this function grabs the ``char *`` from a c-level str object and puts + it into the ``tp_doc`` slot of the type of `obj`, it violates a number of + C-API best-practices, by: + + - modifying a `PyTypeObject` after calling `PyType_Ready` + - calling `Py_INCREF` on the str and losing the reference, so the str + will never be released + + If possible it should be avoided. + """ + new = getattr(__import__(place, globals(), {}, [obj]), obj) + if isinstance(doc, str): + if "${ARRAY_FUNCTION_LIKE}" in doc: + doc = overrides.get_array_function_like_doc(new, doc) + _add_docstring(new, doc, warn_on_python) + elif isinstance(doc, tuple): + attr, docstring = doc + _add_docstring(getattr(new, attr), docstring, warn_on_python) + elif isinstance(doc, list): + for attr, docstring in doc: + _add_docstring(getattr(new, attr), docstring, warn_on_python) diff --git a/local_packages/numpy/_core/function_base.pyi b/local_packages/numpy/_core/function_base.pyi new file mode 100644 index 0000000..19e1238 --- /dev/null +++ b/local_packages/numpy/_core/function_base.pyi @@ -0,0 +1,276 @@ +from _typeshed import Incomplete +from typing import Literal as L, SupportsIndex, TypeAlias, TypeVar, overload + +import numpy as np +from numpy._typing import ( + DTypeLike, + NDArray, + _ArrayLikeComplex_co, + _ArrayLikeFloat_co, + _DTypeLike, +) +from numpy._typing._array_like import _DualArrayLike + +__all__ = ["geomspace", "linspace", "logspace"] + +_ScalarT = TypeVar("_ScalarT", bound=np.generic) + +_ToArrayFloat64: TypeAlias = _DualArrayLike[np.dtype[np.float64 | np.integer | np.bool], float] + +@overload +def linspace( + start: _ToArrayFloat64, + stop: _ToArrayFloat64, + num: SupportsIndex = 50, + endpoint: bool = True, + retstep: L[False] = False, + dtype: None = None, + axis: SupportsIndex = 0, + *, + device: L["cpu"] | None = None, +) -> NDArray[np.float64]: ... +@overload +def linspace( + start: _ArrayLikeFloat_co, + stop: _ArrayLikeFloat_co, + num: SupportsIndex = 50, + endpoint: bool = True, + retstep: L[False] = False, + dtype: None = None, + axis: SupportsIndex = 0, + *, + device: L["cpu"] | None = None, +) -> NDArray[np.floating]: ... +@overload +def linspace( + start: _ArrayLikeComplex_co, + stop: _ArrayLikeComplex_co, + num: SupportsIndex = 50, + endpoint: bool = True, + retstep: L[False] = False, + dtype: None = None, + axis: SupportsIndex = 0, + *, + device: L["cpu"] | None = None, +) -> NDArray[np.complexfloating]: ... +@overload +def linspace( + start: _ArrayLikeComplex_co, + stop: _ArrayLikeComplex_co, + num: SupportsIndex, + endpoint: bool, + retstep: L[False], + dtype: _DTypeLike[_ScalarT], + axis: SupportsIndex = 0, + *, + device: L["cpu"] | None = None, +) -> NDArray[_ScalarT]: ... +@overload +def linspace( + start: _ArrayLikeComplex_co, + stop: _ArrayLikeComplex_co, + num: SupportsIndex = 50, + endpoint: bool = True, + retstep: L[False] = False, + *, + dtype: _DTypeLike[_ScalarT], + axis: SupportsIndex = 0, + device: L["cpu"] | None = None, +) -> NDArray[_ScalarT]: ... +@overload +def linspace( + start: _ArrayLikeComplex_co, + stop: _ArrayLikeComplex_co, + num: SupportsIndex = 50, + endpoint: bool = True, + retstep: L[False] = False, + dtype: DTypeLike | None = None, + axis: SupportsIndex = 0, + *, + device: L["cpu"] | None = None, +) -> NDArray[Incomplete]: ... +@overload +def linspace( + start: _ToArrayFloat64, + stop: _ToArrayFloat64, + num: SupportsIndex = 50, + endpoint: bool = True, + *, + retstep: L[True], + dtype: None = None, + axis: SupportsIndex = 0, + device: L["cpu"] | None = None, +) -> tuple[NDArray[np.float64], np.float64]: ... +@overload +def linspace( + start: _ArrayLikeFloat_co, + stop: _ArrayLikeFloat_co, + num: SupportsIndex = 50, + endpoint: bool = True, + *, + retstep: L[True], + dtype: None = None, + axis: SupportsIndex = 0, + device: L["cpu"] | None = None, +) -> tuple[NDArray[np.floating], np.floating]: ... +@overload +def linspace( + start: _ArrayLikeComplex_co, + stop: _ArrayLikeComplex_co, + num: SupportsIndex = 50, + endpoint: bool = True, + *, + retstep: L[True], + dtype: None = None, + axis: SupportsIndex = 0, + device: L["cpu"] | None = None, +) -> tuple[NDArray[np.complexfloating], np.complexfloating]: ... +@overload +def linspace( + start: _ArrayLikeComplex_co, + stop: _ArrayLikeComplex_co, + num: SupportsIndex = 50, + endpoint: bool = True, + *, + retstep: L[True], + dtype: _DTypeLike[_ScalarT], + axis: SupportsIndex = 0, + device: L["cpu"] | None = None, +) -> tuple[NDArray[_ScalarT], _ScalarT]: ... +@overload +def linspace( + start: _ArrayLikeComplex_co, + stop: _ArrayLikeComplex_co, + num: SupportsIndex = 50, + endpoint: bool = True, + *, + retstep: L[True], + dtype: DTypeLike | None = None, + axis: SupportsIndex = 0, + device: L["cpu"] | None = None, +) -> tuple[NDArray[Incomplete], Incomplete]: ... + +@overload +def logspace( + start: _ToArrayFloat64, + stop: _ToArrayFloat64, + num: SupportsIndex = 50, + endpoint: bool = True, + base: _ToArrayFloat64 = 10.0, + dtype: None = None, + axis: SupportsIndex = 0, +) -> NDArray[np.float64]: ... +@overload +def logspace( + start: _ArrayLikeFloat_co, + stop: _ArrayLikeFloat_co, + num: SupportsIndex = 50, + endpoint: bool = True, + base: _ArrayLikeFloat_co = 10.0, + dtype: None = None, + axis: SupportsIndex = 0, +) -> NDArray[np.floating]: ... +@overload +def logspace( + start: _ArrayLikeComplex_co, + stop: _ArrayLikeComplex_co, + num: SupportsIndex = 50, + endpoint: bool = True, + base: _ArrayLikeComplex_co = 10.0, + dtype: None = None, + axis: SupportsIndex = 0, +) -> NDArray[np.complexfloating]: ... +@overload +def logspace( + start: _ArrayLikeComplex_co, + stop: _ArrayLikeComplex_co, + num: SupportsIndex, + endpoint: bool, + base: _ArrayLikeComplex_co, + dtype: _DTypeLike[_ScalarT], + axis: SupportsIndex = 0, +) -> NDArray[_ScalarT]: ... +@overload +def logspace( + start: _ArrayLikeComplex_co, + stop: _ArrayLikeComplex_co, + num: SupportsIndex = 50, + endpoint: bool = True, + base: _ArrayLikeComplex_co = 10.0, + *, + dtype: _DTypeLike[_ScalarT], + axis: SupportsIndex = 0, +) -> NDArray[_ScalarT]: ... +@overload +def logspace( + start: _ArrayLikeComplex_co, + stop: _ArrayLikeComplex_co, + num: SupportsIndex = 50, + endpoint: bool = True, + base: _ArrayLikeComplex_co = 10.0, + dtype: DTypeLike | None = None, + axis: SupportsIndex = 0, +) -> NDArray[Incomplete]: ... + +@overload +def geomspace( + start: _ToArrayFloat64, + stop: _ToArrayFloat64, + num: SupportsIndex = 50, + endpoint: bool = True, + dtype: None = None, + axis: SupportsIndex = 0, +) -> NDArray[np.float64]: ... +@overload +def geomspace( + start: _ArrayLikeFloat_co, + stop: _ArrayLikeFloat_co, + num: SupportsIndex = 50, + endpoint: bool = True, + dtype: None = None, + axis: SupportsIndex = 0, +) -> NDArray[np.floating]: ... +@overload +def geomspace( + start: _ArrayLikeComplex_co, + stop: _ArrayLikeComplex_co, + num: SupportsIndex = 50, + endpoint: bool = True, + dtype: None = None, + axis: SupportsIndex = 0, +) -> NDArray[np.complexfloating]: ... +@overload +def geomspace( + start: _ArrayLikeComplex_co, + stop: _ArrayLikeComplex_co, + num: SupportsIndex, + endpoint: bool, + dtype: _DTypeLike[_ScalarT], + axis: SupportsIndex = 0, +) -> NDArray[_ScalarT]: ... +@overload +def geomspace( + start: _ArrayLikeComplex_co, + stop: _ArrayLikeComplex_co, + num: SupportsIndex = 50, + endpoint: bool = True, + *, + dtype: _DTypeLike[_ScalarT], + axis: SupportsIndex = 0, +) -> NDArray[_ScalarT]: ... +@overload +def geomspace( + start: _ArrayLikeComplex_co, + stop: _ArrayLikeComplex_co, + num: SupportsIndex = 50, + endpoint: bool = True, + dtype: DTypeLike | None = None, + axis: SupportsIndex = 0, +) -> NDArray[Incomplete]: ... + +def add_newdoc( + place: str, + obj: str, + doc: str | tuple[str, str] | list[tuple[str, str]], + warn_on_python: bool = True, +) -> None: ... diff --git a/local_packages/numpy/_core/getlimits.py b/local_packages/numpy/_core/getlimits.py new file mode 100644 index 0000000..3c03d81 --- /dev/null +++ b/local_packages/numpy/_core/getlimits.py @@ -0,0 +1,462 @@ +"""Machine limits for Float32 and Float64 and (long double) if available... + +""" +__all__ = ['finfo', 'iinfo'] + +import math +import types +import warnings +from functools import cached_property + +from numpy._utils import set_module + +from . import numeric, numerictypes as ntypes +from ._multiarray_umath import _populate_finfo_constants + + +def _fr0(a): + """fix rank-0 --> rank-1""" + if a.ndim == 0: + a = a.copy() + a.shape = (1,) + return a + + +def _fr1(a): + """fix rank > 0 --> rank-0""" + if a.size == 1: + a = a.copy() + a.shape = () + return a + + +_convert_to_float = { + ntypes.csingle: ntypes.single, + ntypes.complex128: ntypes.float64, + ntypes.clongdouble: ntypes.longdouble + } + +# Parameters for creating MachAr / MachAr-like objects +_title_fmt = 'numpy {} precision floating point number' +_MACHAR_PARAMS = { + ntypes.double: { + 'itype': ntypes.int64, + 'fmt': '%24.16e', + 'title': _title_fmt.format('double')}, + ntypes.single: { + 'itype': ntypes.int32, + 'fmt': '%15.7e', + 'title': _title_fmt.format('single')}, + ntypes.longdouble: { + 'itype': ntypes.longlong, + 'fmt': '%s', + 'title': _title_fmt.format('long double')}, + ntypes.half: { + 'itype': ntypes.int16, + 'fmt': '%12.5e', + 'title': _title_fmt.format('half')}} + + +@set_module('numpy') +class finfo: + """ + finfo(dtype) + + Machine limits for floating point types. + + Attributes + ---------- + bits : int + The number of bits occupied by the type. + dtype : dtype + Returns the dtype for which `finfo` returns information. For complex + input, the returned dtype is the associated ``float*`` dtype for its + real and complex components. + eps : float + The difference between 1.0 and the next smallest representable float + larger than 1.0. For example, for 64-bit binary floats in the IEEE-754 + standard, ``eps = 2**-52``, approximately 2.22e-16. + epsneg : float + The difference between 1.0 and the next smallest representable float + less than 1.0. For example, for 64-bit binary floats in the IEEE-754 + standard, ``epsneg = 2**-53``, approximately 1.11e-16. + iexp : int + The number of bits in the exponent portion of the floating point + representation. + machep : int + The exponent that yields `eps`. + max : floating point number of the appropriate type + The largest representable number. + maxexp : int + The smallest positive power of the base (2) that causes overflow. + Corresponds to the C standard MAX_EXP. + min : floating point number of the appropriate type + The smallest representable number, typically ``-max``. + minexp : int + The most negative power of the base (2) consistent with there + being no leading 0's in the mantissa. Corresponds to the C + standard MIN_EXP - 1. + negep : int + The exponent that yields `epsneg`. + nexp : int + The number of bits in the exponent including its sign and bias. + nmant : int + The number of explicit bits in the mantissa (excluding the implicit + leading bit for normalized numbers). + precision : int + The approximate number of decimal digits to which this kind of + float is precise. + resolution : floating point number of the appropriate type + The approximate decimal resolution of this type, i.e., + ``10**-precision``. + tiny : float + An alias for `smallest_normal`, kept for backwards compatibility. + smallest_normal : float + The smallest positive floating point number with 1 as leading bit in + the mantissa following IEEE-754 (see Notes). + smallest_subnormal : float + The smallest positive floating point number with 0 as leading bit in + the mantissa following IEEE-754. + + Parameters + ---------- + dtype : float, dtype, or instance + Kind of floating point or complex floating point + data-type about which to get information. + + See Also + -------- + iinfo : The equivalent for integer data types. + spacing : The distance between a value and the nearest adjacent number + nextafter : The next floating point value after x1 towards x2 + + Notes + ----- + For developers of NumPy: do not instantiate this at the module level. + The initial calculation of these parameters is expensive and negatively + impacts import times. These objects are cached, so calling ``finfo()`` + repeatedly inside your functions is not a problem. + + Note that ``smallest_normal`` is not actually the smallest positive + representable value in a NumPy floating point type. As in the IEEE-754 + standard [1]_, NumPy floating point types make use of subnormal numbers to + fill the gap between 0 and ``smallest_normal``. However, subnormal numbers + may have significantly reduced precision [2]_. + + For ``longdouble``, the representation varies across platforms. On most + platforms it is IEEE 754 binary128 (quad precision) or binary64-extended + (80-bit extended precision). On PowerPC systems, it may use the IBM + double-double format (a pair of float64 values), which has special + characteristics for precision and range. + + This function can also be used for complex data types as well. If used, + the output will be the same as the corresponding real float type + (e.g. numpy.finfo(numpy.csingle) is the same as numpy.finfo(numpy.single)). + However, the output is true for the real and imaginary components. + + References + ---------- + .. [1] IEEE Standard for Floating-Point Arithmetic, IEEE Std 754-2008, + pp.1-70, 2008, https://doi.org/10.1109/IEEESTD.2008.4610935 + .. [2] Wikipedia, "Denormal Numbers", + https://en.wikipedia.org/wiki/Denormal_number + + Examples + -------- + >>> import numpy as np + >>> np.finfo(np.float64).dtype + dtype('float64') + >>> np.finfo(np.complex64).dtype + dtype('float32') + + """ + + _finfo_cache = {} + + __class_getitem__ = classmethod(types.GenericAlias) + + def __new__(cls, dtype): + try: + obj = cls._finfo_cache.get(dtype) # most common path + if obj is not None: + return obj + except TypeError: + pass + + if dtype is None: + # Deprecated in NumPy 1.25, 2023-01-16 + warnings.warn( + "finfo() dtype cannot be None. This behavior will " + "raise an error in the future. (Deprecated in NumPy 1.25)", + DeprecationWarning, + stacklevel=2 + ) + + try: + dtype = numeric.dtype(dtype) + except TypeError: + # In case a float instance was given + dtype = numeric.dtype(type(dtype)) + + obj = cls._finfo_cache.get(dtype) + if obj is not None: + return obj + dtypes = [dtype] + newdtype = ntypes.obj2sctype(dtype) + if newdtype is not dtype: + dtypes.append(newdtype) + dtype = newdtype + if not issubclass(dtype, numeric.inexact): + raise ValueError(f"data type {dtype!r} not inexact") + obj = cls._finfo_cache.get(dtype) + if obj is not None: + return obj + if not issubclass(dtype, numeric.floating): + newdtype = _convert_to_float[dtype] + if newdtype is not dtype: + # dtype changed, for example from complex128 to float64 + dtypes.append(newdtype) + dtype = newdtype + + obj = cls._finfo_cache.get(dtype, None) + if obj is not None: + # the original dtype was not in the cache, but the new + # dtype is in the cache. we add the original dtypes to + # the cache and return the result + for dt in dtypes: + cls._finfo_cache[dt] = obj + return obj + obj = object.__new__(cls)._init(dtype) + for dt in dtypes: + cls._finfo_cache[dt] = obj + return obj + + def _init(self, dtype): + self.dtype = numeric.dtype(dtype) + self.bits = self.dtype.itemsize * 8 + self._fmt = None + self._repr = None + _populate_finfo_constants(self, self.dtype) + return self + + @cached_property + def epsneg(self): + # Assume typical floating point logic. Could also use nextafter. + return self.eps / self._radix + + @cached_property + def resolution(self): + return self.dtype.type(10)**-self.precision + + @cached_property + def machep(self): + return int(math.log2(self.eps)) + + @cached_property + def negep(self): + return int(math.log2(self.epsneg)) + + @cached_property + def nexp(self): + # considering all ones (inf/nan) and all zeros (subnormal/zero) + return math.ceil(math.log2(self.maxexp - self.minexp + 2)) + + @cached_property + def iexp(self): + # Calculate exponent bits from it's range: + return math.ceil(math.log2(self.maxexp - self.minexp)) + + def __str__(self): + if (fmt := getattr(self, "_fmt", None)) is not None: + return fmt + + def get_str(name, pad=None): + if (val := getattr(self, name, None)) is None: + return "" + if pad is not None: + s = str(val).ljust(pad) + return str(val) + + precision = get_str("precision", 3) + machep = get_str("machep", 6) + negep = get_str("negep", 6) + minexp = get_str("minexp", 6) + maxexp = get_str("maxexp", 6) + resolution = get_str("resolution") + eps = get_str("eps") + epsneg = get_str("epsneg") + tiny = get_str("tiny") + smallest_normal = get_str("smallest_normal") + smallest_subnormal = get_str("smallest_subnormal") + nexp = get_str("nexp", 6) + max_ = get_str("max") + if hasattr(self, "min") and hasattr(self, "max") and -self.min == self.max: + min_ = "-max" + else: + min_ = get_str("min") + + fmt = ( + f'Machine parameters for {self.dtype}\n' + f'---------------------------------------------------------------\n' + f'precision = {precision} resolution = {resolution}\n' + f'machep = {machep} eps = {eps}\n' + f'negep = {negep} epsneg = {epsneg}\n' + f'minexp = {minexp} tiny = {tiny}\n' + f'maxexp = {maxexp} max = {max_}\n' + f'nexp = {nexp} min = {min_}\n' + f'smallest_normal = {smallest_normal} ' + f'smallest_subnormal = {smallest_subnormal}\n' + f'---------------------------------------------------------------\n' + ) + self._fmt = fmt + return fmt + + def __repr__(self): + if (repr_str := getattr(self, "_repr", None)) is not None: + return repr_str + + c = self.__class__.__name__ + + # Use precision+1 digits in exponential notation + fmt_str = _MACHAR_PARAMS.get(self.dtype.type, {}).get('fmt', '%s') + if fmt_str != '%s' and hasattr(self, 'max') and hasattr(self, 'min'): + max_str = (fmt_str % self.max).strip() + min_str = (fmt_str % self.min).strip() + else: + max_str = str(self.max) + min_str = str(self.min) + + resolution_str = str(self.resolution) + + repr_str = (f"{c}(resolution={resolution_str}, min={min_str}," + f" max={max_str}, dtype={self.dtype})") + self._repr = repr_str + return repr_str + + @cached_property + def tiny(self): + """Return the value for tiny, alias of smallest_normal. + + Returns + ------- + tiny : float + Value for the smallest normal, alias of smallest_normal. + + Warns + ----- + UserWarning + If the calculated value for the smallest normal is requested for + double-double. + """ + return self.smallest_normal + + +@set_module('numpy') +class iinfo: + """ + iinfo(type) + + Machine limits for integer types. + + Attributes + ---------- + bits : int + The number of bits occupied by the type. + dtype : dtype + Returns the dtype for which `iinfo` returns information. + min : int + The smallest integer expressible by the type. + max : int + The largest integer expressible by the type. + + Parameters + ---------- + int_type : integer type, dtype, or instance + The kind of integer data type to get information about. + + See Also + -------- + finfo : The equivalent for floating point data types. + + Examples + -------- + With types: + + >>> import numpy as np + >>> ii16 = np.iinfo(np.int16) + >>> ii16.min + -32768 + >>> ii16.max + 32767 + >>> ii32 = np.iinfo(np.int32) + >>> ii32.min + -2147483648 + >>> ii32.max + 2147483647 + + With instances: + + >>> ii32 = np.iinfo(np.int32(10)) + >>> ii32.min + -2147483648 + >>> ii32.max + 2147483647 + + """ + + _min_vals = {} + _max_vals = {} + + __class_getitem__ = classmethod(types.GenericAlias) + + def __init__(self, int_type): + try: + self.dtype = numeric.dtype(int_type) + except TypeError: + self.dtype = numeric.dtype(type(int_type)) + self.kind = self.dtype.kind + self.bits = self.dtype.itemsize * 8 + self.key = "%s%d" % (self.kind, self.bits) + if self.kind not in 'iu': + raise ValueError(f"Invalid integer data type {self.kind!r}.") + + @property + def min(self): + """Minimum value of given dtype.""" + if self.kind == 'u': + return 0 + else: + try: + val = iinfo._min_vals[self.key] + except KeyError: + val = int(-(1 << (self.bits - 1))) + iinfo._min_vals[self.key] = val + return val + + @property + def max(self): + """Maximum value of given dtype.""" + try: + val = iinfo._max_vals[self.key] + except KeyError: + if self.kind == 'u': + val = int((1 << self.bits) - 1) + else: + val = int((1 << (self.bits - 1)) - 1) + iinfo._max_vals[self.key] = val + return val + + def __str__(self): + """String representation.""" + fmt = ( + 'Machine parameters for %(dtype)s\n' + '---------------------------------------------------------------\n' + 'min = %(min)s\n' + 'max = %(max)s\n' + '---------------------------------------------------------------\n' + ) + return fmt % {'dtype': self.dtype, 'min': self.min, 'max': self.max} + + def __repr__(self): + return "%s(min=%s, max=%s, dtype=%s)" % (self.__class__.__name__, + self.min, self.max, self.dtype) diff --git a/local_packages/numpy/_core/getlimits.pyi b/local_packages/numpy/_core/getlimits.pyi new file mode 100644 index 0000000..a22149c --- /dev/null +++ b/local_packages/numpy/_core/getlimits.pyi @@ -0,0 +1,124 @@ +from functools import cached_property +from types import GenericAlias +from typing import Final, Generic, Self, overload +from typing_extensions import TypeVar + +import numpy as np +from numpy._typing import ( + _CLongDoubleCodes, + _Complex64Codes, + _Complex128Codes, + _DTypeLike, + _Float16Codes, + _Float32Codes, + _Float64Codes, + _Int8Codes, + _Int16Codes, + _Int32Codes, + _Int64Codes, + _IntPCodes, + _LongDoubleCodes, + _UInt8Codes, + _UInt16Codes, + _UInt32Codes, + _UInt64Codes, +) + +__all__ = ["finfo", "iinfo"] + +### + +_IntegerT_co = TypeVar("_IntegerT_co", bound=np.integer, default=np.integer, covariant=True) +_FloatingT_co = TypeVar("_FloatingT_co", bound=np.floating, default=np.floating, covariant=True) + +### + +class iinfo(Generic[_IntegerT_co]): + dtype: np.dtype[_IntegerT_co] + bits: Final[int] + kind: Final[str] + key: Final[str] + + @property + def min(self, /) -> int: ... + @property + def max(self, /) -> int: ... + + # + @overload + def __init__(self, /, int_type: _IntegerT_co | _DTypeLike[_IntegerT_co]) -> None: ... + @overload + def __init__(self: iinfo[np.int_], /, int_type: _IntPCodes | type[int] | int) -> None: ... + @overload + def __init__(self: iinfo[np.int8], /, int_type: _Int8Codes) -> None: ... + @overload + def __init__(self: iinfo[np.uint8], /, int_type: _UInt8Codes) -> None: ... + @overload + def __init__(self: iinfo[np.int16], /, int_type: _Int16Codes) -> None: ... + @overload + def __init__(self: iinfo[np.uint16], /, int_type: _UInt16Codes) -> None: ... + @overload + def __init__(self: iinfo[np.int32], /, int_type: _Int32Codes) -> None: ... + @overload + def __init__(self: iinfo[np.uint32], /, int_type: _UInt32Codes) -> None: ... + @overload + def __init__(self: iinfo[np.int64], /, int_type: _Int64Codes) -> None: ... + @overload + def __init__(self: iinfo[np.uint64], /, int_type: _UInt64Codes) -> None: ... + @overload + def __init__(self, /, int_type: str) -> None: ... + + # + @classmethod + def __class_getitem__(cls, item: object, /) -> GenericAlias: ... + +class finfo(Generic[_FloatingT_co]): + dtype: np.dtype[_FloatingT_co] # readonly + eps: _FloatingT_co # readonly + _radix: _FloatingT_co # readonly + smallest_normal: _FloatingT_co # readonly + smallest_subnormal: _FloatingT_co # readonly + max: _FloatingT_co # readonly + min: _FloatingT_co # readonly + + _fmt: str | None # `__str__` cache + _repr: str | None # `__repr__` cache + + bits: Final[int] + maxexp: Final[int] + minexp: Final[int] + nmant: Final[int] + precision: Final[int] + + @classmethod + def __class_getitem__(cls, item: object, /) -> GenericAlias: ... + + # + @overload + def __new__(cls, dtype: _FloatingT_co | _DTypeLike[_FloatingT_co]) -> Self: ... + @overload + def __new__(cls, dtype: _Float16Codes) -> finfo[np.float16]: ... + @overload + def __new__(cls, dtype: _Float32Codes | _Complex64Codes | _DTypeLike[np.complex64]) -> finfo[np.float32]: ... + @overload + def __new__(cls, dtype: _Float64Codes | _Complex128Codes | type[complex] | complex) -> finfo[np.float64]: ... + @overload + def __new__(cls, dtype: _LongDoubleCodes | _CLongDoubleCodes | _DTypeLike[np.clongdouble]) -> finfo[np.longdouble]: ... + @overload + def __new__(cls, dtype: str) -> finfo: ... + + # + @cached_property + def epsneg(self, /) -> _FloatingT_co: ... + @cached_property + def resolution(self, /) -> _FloatingT_co: ... + @cached_property + def machep(self, /) -> int: ... + @cached_property + def negep(self, /) -> int: ... + @cached_property + def nexp(self, /) -> int: ... + @cached_property + def iexp(self, /) -> int: ... + @cached_property + def tiny(self, /) -> _FloatingT_co: ... diff --git a/local_packages/numpy/_core/include/numpy/__multiarray_api.c b/local_packages/numpy/_core/include/numpy/__multiarray_api.c new file mode 100644 index 0000000..8398c62 --- /dev/null +++ b/local_packages/numpy/_core/include/numpy/__multiarray_api.c @@ -0,0 +1,376 @@ + +/* These pointers will be stored in the C-object for use in other + extension modules +*/ + +void *PyArray_API[] = { + (void *) PyArray_GetNDArrayCVersion, + NULL, + (void *) &PyArray_Type, + (void *) &PyArrayDescr_Type, + NULL, + (void *) &PyArrayIter_Type, + (void *) &PyArrayMultiIter_Type, + (int *) &NPY_NUMUSERTYPES, + (void *) &PyBoolArrType_Type, + (void *) &_PyArrayScalar_BoolValues, + (void *) &PyGenericArrType_Type, + (void *) &PyNumberArrType_Type, + (void *) &PyIntegerArrType_Type, + (void *) &PySignedIntegerArrType_Type, + (void *) &PyUnsignedIntegerArrType_Type, + (void *) &PyInexactArrType_Type, + (void *) &PyFloatingArrType_Type, + (void *) &PyComplexFloatingArrType_Type, + (void *) &PyFlexibleArrType_Type, + (void *) &PyCharacterArrType_Type, + (void *) &PyByteArrType_Type, + (void *) &PyShortArrType_Type, + (void *) &PyIntArrType_Type, + (void *) &PyLongArrType_Type, + (void *) &PyLongLongArrType_Type, + (void *) &PyUByteArrType_Type, + (void *) &PyUShortArrType_Type, + (void *) &PyUIntArrType_Type, + (void *) &PyULongArrType_Type, + (void *) &PyULongLongArrType_Type, + (void *) &PyFloatArrType_Type, + (void *) &PyDoubleArrType_Type, + (void *) &PyLongDoubleArrType_Type, + (void *) &PyCFloatArrType_Type, + (void *) &PyCDoubleArrType_Type, + (void *) &PyCLongDoubleArrType_Type, + (void *) &PyObjectArrType_Type, + (void *) &PyStringArrType_Type, + (void *) &PyUnicodeArrType_Type, + (void *) &PyVoidArrType_Type, + NULL, + NULL, + (void *) PyArray_INCREF, + (void *) PyArray_XDECREF, + (void *) PyArray_SetStringFunction, + (void *) PyArray_DescrFromType, + (void *) PyArray_TypeObjectFromType, + (void *) PyArray_Zero, + (void *) PyArray_One, + (void *) PyArray_CastToType, + (void *) PyArray_CopyInto, + (void *) PyArray_CopyAnyInto, + (void *) PyArray_CanCastSafely, + (void *) PyArray_CanCastTo, + (void *) PyArray_ObjectType, + (void *) PyArray_DescrFromObject, + (void *) PyArray_ConvertToCommonType, + (void *) PyArray_DescrFromScalar, + (void *) PyArray_DescrFromTypeObject, + (void *) PyArray_Size, + (void *) PyArray_Scalar, + (void *) PyArray_FromScalar, + (void *) PyArray_ScalarAsCtype, + (void *) PyArray_CastScalarToCtype, + (void *) PyArray_CastScalarDirect, + (void *) PyArray_Pack, + NULL, + NULL, + NULL, + (void *) PyArray_FromAny, + (void *) PyArray_EnsureArray, + (void *) PyArray_EnsureAnyArray, + (void *) PyArray_FromFile, + (void *) PyArray_FromString, + (void *) PyArray_FromBuffer, + (void *) PyArray_FromIter, + (void *) PyArray_Return, + (void *) PyArray_GetField, + (void *) PyArray_SetField, + (void *) PyArray_Byteswap, + (void *) PyArray_Resize, + NULL, + NULL, + NULL, + (void *) PyArray_CopyObject, + (void *) PyArray_NewCopy, + (void *) PyArray_ToList, + (void *) PyArray_ToString, + (void *) PyArray_ToFile, + (void *) PyArray_Dump, + (void *) PyArray_Dumps, + (void *) PyArray_ValidType, + (void *) PyArray_UpdateFlags, + (void *) PyArray_New, + (void *) PyArray_NewFromDescr, + (void *) PyArray_DescrNew, + (void *) PyArray_DescrNewFromType, + (void *) PyArray_GetPriority, + (void *) PyArray_IterNew, + (void *) PyArray_MultiIterNew, + (void *) PyArray_PyIntAsInt, + (void *) PyArray_PyIntAsIntp, + (void *) PyArray_Broadcast, + NULL, + (void *) PyArray_FillWithScalar, + (void *) PyArray_CheckStrides, + (void *) PyArray_DescrNewByteorder, + (void *) PyArray_IterAllButAxis, + (void *) PyArray_CheckFromAny, + (void *) PyArray_FromArray, + (void *) PyArray_FromInterface, + (void *) PyArray_FromStructInterface, + (void *) PyArray_FromArrayAttr, + (void *) PyArray_ScalarKind, + (void *) PyArray_CanCoerceScalar, + NULL, + (void *) PyArray_CanCastScalar, + NULL, + (void *) PyArray_RemoveSmallest, + (void *) PyArray_ElementStrides, + (void *) PyArray_Item_INCREF, + (void *) PyArray_Item_XDECREF, + NULL, + (void *) PyArray_Transpose, + (void *) PyArray_TakeFrom, + (void *) PyArray_PutTo, + (void *) PyArray_PutMask, + (void *) PyArray_Repeat, + (void *) PyArray_Choose, + (void *) PyArray_Sort, + (void *) PyArray_ArgSort, + (void *) PyArray_SearchSorted, + (void *) PyArray_ArgMax, + (void *) PyArray_ArgMin, + (void *) PyArray_Reshape, + (void *) PyArray_Newshape, + (void *) PyArray_Squeeze, + (void *) PyArray_View, + (void *) PyArray_SwapAxes, + (void *) PyArray_Max, + (void *) PyArray_Min, + (void *) PyArray_Ptp, + (void *) PyArray_Mean, + (void *) PyArray_Trace, + (void *) PyArray_Diagonal, + (void *) PyArray_Clip, + (void *) PyArray_Conjugate, + (void *) PyArray_Nonzero, + (void *) PyArray_Std, + (void *) PyArray_Sum, + (void *) PyArray_CumSum, + (void *) PyArray_Prod, + (void *) PyArray_CumProd, + (void *) PyArray_All, + (void *) PyArray_Any, + (void *) PyArray_Compress, + (void *) PyArray_Flatten, + (void *) PyArray_Ravel, + (void *) PyArray_MultiplyList, + (void *) PyArray_MultiplyIntList, + (void *) PyArray_GetPtr, + (void *) PyArray_CompareLists, + (void *) PyArray_AsCArray, + NULL, + NULL, + (void *) PyArray_Free, + (void *) PyArray_Converter, + (void *) PyArray_IntpFromSequence, + (void *) PyArray_Concatenate, + (void *) PyArray_InnerProduct, + (void *) PyArray_MatrixProduct, + NULL, + (void *) PyArray_Correlate, + NULL, + (void *) PyArray_DescrConverter, + (void *) PyArray_DescrConverter2, + (void *) PyArray_IntpConverter, + (void *) PyArray_BufferConverter, + (void *) PyArray_AxisConverter, + (void *) PyArray_BoolConverter, + (void *) PyArray_ByteorderConverter, + (void *) PyArray_OrderConverter, + (void *) PyArray_EquivTypes, + (void *) PyArray_Zeros, + (void *) PyArray_Empty, + (void *) PyArray_Where, + (void *) PyArray_Arange, + (void *) PyArray_ArangeObj, + (void *) PyArray_SortkindConverter, + (void *) PyArray_LexSort, + (void *) PyArray_Round, + (void *) PyArray_EquivTypenums, + (void *) PyArray_RegisterDataType, + (void *) PyArray_RegisterCastFunc, + (void *) PyArray_RegisterCanCast, + (void *) PyArray_InitArrFuncs, + (void *) PyArray_IntTupleFromIntp, + NULL, + (void *) PyArray_ClipmodeConverter, + (void *) PyArray_OutputConverter, + (void *) PyArray_BroadcastToShape, + NULL, + NULL, + (void *) PyArray_DescrAlignConverter, + (void *) PyArray_DescrAlignConverter2, + (void *) PyArray_SearchsideConverter, + (void *) PyArray_CheckAxis, + (void *) PyArray_OverflowMultiplyList, + NULL, + (void *) PyArray_MultiIterFromObjects, + (void *) PyArray_GetEndianness, + (void *) PyArray_GetNDArrayCFeatureVersion, + (void *) PyArray_Correlate2, + (void *) PyArray_NeighborhoodIterNew, + (void *) &PyTimeIntegerArrType_Type, + (void *) &PyDatetimeArrType_Type, + (void *) &PyTimedeltaArrType_Type, + (void *) &PyHalfArrType_Type, + (void *) &NpyIter_Type, + NULL, + NULL, + NULL, + NULL, + (void *) NpyIter_GetTransferFlags, + (void *) NpyIter_New, + (void *) NpyIter_MultiNew, + (void *) NpyIter_AdvancedNew, + (void *) NpyIter_Copy, + (void *) NpyIter_Deallocate, + (void *) NpyIter_HasDelayedBufAlloc, + (void *) NpyIter_HasExternalLoop, + (void *) NpyIter_EnableExternalLoop, + (void *) NpyIter_GetInnerStrideArray, + (void *) NpyIter_GetInnerLoopSizePtr, + (void *) NpyIter_Reset, + (void *) NpyIter_ResetBasePointers, + (void *) NpyIter_ResetToIterIndexRange, + (void *) NpyIter_GetNDim, + (void *) NpyIter_GetNOp, + (void *) NpyIter_GetIterNext, + (void *) NpyIter_GetIterSize, + (void *) NpyIter_GetIterIndexRange, + (void *) NpyIter_GetIterIndex, + (void *) NpyIter_GotoIterIndex, + (void *) NpyIter_HasMultiIndex, + (void *) NpyIter_GetShape, + (void *) NpyIter_GetGetMultiIndex, + (void *) NpyIter_GotoMultiIndex, + (void *) NpyIter_RemoveMultiIndex, + (void *) NpyIter_HasIndex, + (void *) NpyIter_IsBuffered, + (void *) NpyIter_IsGrowInner, + (void *) NpyIter_GetBufferSize, + (void *) NpyIter_GetIndexPtr, + (void *) NpyIter_GotoIndex, + (void *) NpyIter_GetDataPtrArray, + (void *) NpyIter_GetDescrArray, + (void *) NpyIter_GetOperandArray, + (void *) NpyIter_GetIterView, + (void *) NpyIter_GetReadFlags, + (void *) NpyIter_GetWriteFlags, + (void *) NpyIter_DebugPrint, + (void *) NpyIter_IterationNeedsAPI, + (void *) NpyIter_GetInnerFixedStrideArray, + (void *) NpyIter_RemoveAxis, + (void *) NpyIter_GetAxisStrideArray, + (void *) NpyIter_RequiresBuffering, + (void *) NpyIter_GetInitialDataPtrArray, + (void *) NpyIter_CreateCompatibleStrides, + (void *) PyArray_CastingConverter, + (void *) PyArray_CountNonzero, + (void *) PyArray_PromoteTypes, + (void *) PyArray_MinScalarType, + (void *) PyArray_ResultType, + (void *) PyArray_CanCastArrayTo, + (void *) PyArray_CanCastTypeTo, + (void *) PyArray_EinsteinSum, + (void *) PyArray_NewLikeArray, + NULL, + (void *) PyArray_ConvertClipmodeSequence, + (void *) PyArray_MatrixProduct2, + (void *) NpyIter_IsFirstVisit, + (void *) PyArray_SetBaseObject, + (void *) PyArray_CreateSortedStridePerm, + (void *) PyArray_RemoveAxesInPlace, + (void *) PyArray_DebugPrint, + (void *) PyArray_FailUnlessWriteable, + (void *) PyArray_SetUpdateIfCopyBase, + (void *) PyDataMem_NEW, + (void *) PyDataMem_FREE, + (void *) PyDataMem_RENEW, + NULL, + (NPY_CASTING *) &NPY_DEFAULT_ASSIGN_CASTING, + NULL, + NULL, + NULL, + (void *) PyArray_Partition, + (void *) PyArray_ArgPartition, + (void *) PyArray_SelectkindConverter, + (void *) PyDataMem_NEW_ZEROED, + (void *) PyArray_CheckAnyScalarExact, + NULL, + (void *) PyArray_ResolveWritebackIfCopy, + (void *) PyArray_SetWritebackIfCopyBase, + (void *) PyDataMem_SetHandler, + (void *) PyDataMem_GetHandler, + (PyObject* *) &PyDataMem_DefaultHandler, + (void *) NpyDatetime_ConvertDatetime64ToDatetimeStruct, + (void *) NpyDatetime_ConvertDatetimeStructToDatetime64, + (void *) NpyDatetime_ConvertPyDateTimeToDatetimeStruct, + (void *) NpyDatetime_GetDatetimeISO8601StrLen, + (void *) NpyDatetime_MakeISO8601Datetime, + (void *) NpyDatetime_ParseISO8601Datetime, + (void *) NpyString_load, + (void *) NpyString_pack, + (void *) NpyString_pack_null, + (void *) NpyString_acquire_allocator, + (void *) NpyString_acquire_allocators, + (void *) NpyString_release_allocator, + (void *) NpyString_release_allocators, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + (void *) PyArray_GetDefaultDescr, + (void *) PyArrayInitDTypeMeta_FromSpec, + (void *) PyArray_CommonDType, + (void *) PyArray_PromoteDTypeSequence, + (void *) _PyDataType_GetArrFuncs, + NULL, + NULL, + NULL +}; diff --git a/local_packages/numpy/_core/include/numpy/__multiarray_api.h b/local_packages/numpy/_core/include/numpy/__multiarray_api.h new file mode 100644 index 0000000..763dc85 --- /dev/null +++ b/local_packages/numpy/_core/include/numpy/__multiarray_api.h @@ -0,0 +1,1628 @@ + +#if defined(_MULTIARRAYMODULE) || defined(WITH_CPYCHECKER_STEALS_REFERENCE_TO_ARG_ATTRIBUTE) + +typedef struct { + PyObject_HEAD + npy_bool obval; +} PyBoolScalarObject; + +extern NPY_NO_EXPORT PyTypeObject PyArrayNeighborhoodIter_Type; +extern NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2]; + +NPY_NO_EXPORT unsigned int PyArray_GetNDArrayCVersion \ + (void); +extern NPY_NO_EXPORT PyTypeObject PyArray_Type; + +extern NPY_NO_EXPORT PyArray_DTypeMeta PyArrayDescr_TypeFull; +#define PyArrayDescr_Type (*(PyTypeObject *)(&PyArrayDescr_TypeFull)) + +extern NPY_NO_EXPORT PyTypeObject PyArrayIter_Type; + +extern NPY_NO_EXPORT PyTypeObject PyArrayMultiIter_Type; + +extern NPY_NO_EXPORT int NPY_NUMUSERTYPES; + +extern NPY_NO_EXPORT PyTypeObject PyBoolArrType_Type; + +extern NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2]; + +extern NPY_NO_EXPORT PyTypeObject PyGenericArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyNumberArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyIntegerArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PySignedIntegerArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyUnsignedIntegerArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyInexactArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyFloatingArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyComplexFloatingArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyFlexibleArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyCharacterArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyByteArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyShortArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyIntArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyLongArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyLongLongArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyUByteArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyUShortArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyUIntArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyULongArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyULongLongArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyFloatArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyDoubleArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyLongDoubleArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyCFloatArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyCDoubleArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyCLongDoubleArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyObjectArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyStringArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyUnicodeArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyVoidArrType_Type; + +NPY_NO_EXPORT int PyArray_INCREF \ + (PyArrayObject *); +NPY_NO_EXPORT int PyArray_XDECREF \ + (PyArrayObject *); +NPY_NO_EXPORT void PyArray_SetStringFunction \ + (PyObject *, int); +NPY_NO_EXPORT PyArray_Descr * PyArray_DescrFromType \ + (int); +NPY_NO_EXPORT PyObject * PyArray_TypeObjectFromType \ + (int); +NPY_NO_EXPORT char * PyArray_Zero \ + (PyArrayObject *); +NPY_NO_EXPORT char * PyArray_One \ + (PyArrayObject *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_CastToType \ + (PyArrayObject *, PyArray_Descr *, int); +NPY_NO_EXPORT int PyArray_CopyInto \ + (PyArrayObject *, PyArrayObject *); +NPY_NO_EXPORT int PyArray_CopyAnyInto \ + (PyArrayObject *, PyArrayObject *); +NPY_NO_EXPORT int PyArray_CanCastSafely \ + (int, int); +NPY_NO_EXPORT npy_bool PyArray_CanCastTo \ + (PyArray_Descr *, PyArray_Descr *); +NPY_NO_EXPORT int PyArray_ObjectType \ + (PyObject *, int); +NPY_NO_EXPORT PyArray_Descr * PyArray_DescrFromObject \ + (PyObject *, PyArray_Descr *); +NPY_NO_EXPORT PyArrayObject ** PyArray_ConvertToCommonType \ + (PyObject *, int *); +NPY_NO_EXPORT PyArray_Descr * PyArray_DescrFromScalar \ + (PyObject *); +NPY_NO_EXPORT PyArray_Descr * PyArray_DescrFromTypeObject \ + (PyObject *); +NPY_NO_EXPORT npy_intp PyArray_Size \ + (PyObject *); +NPY_NO_EXPORT PyObject * PyArray_Scalar \ + (void *, PyArray_Descr *, PyObject *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_FromScalar \ + (PyObject *, PyArray_Descr *); +NPY_NO_EXPORT void PyArray_ScalarAsCtype \ + (PyObject *, void *); +NPY_NO_EXPORT int PyArray_CastScalarToCtype \ + (PyObject *, void *, PyArray_Descr *); +NPY_NO_EXPORT int PyArray_CastScalarDirect \ + (PyObject *, PyArray_Descr *, void *, int); +NPY_NO_EXPORT int PyArray_Pack \ + (PyArray_Descr *, void *, PyObject *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_FromAny \ + (PyObject *, PyArray_Descr *, int, int, int, PyObject *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(1) PyObject * PyArray_EnsureArray \ + (PyObject *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(1) PyObject * PyArray_EnsureAnyArray \ + (PyObject *); +NPY_NO_EXPORT PyObject * PyArray_FromFile \ + (FILE *, PyArray_Descr *, npy_intp, char *); +NPY_NO_EXPORT PyObject * PyArray_FromString \ + (char *, npy_intp, PyArray_Descr *, npy_intp, char *); +NPY_NO_EXPORT PyObject * PyArray_FromBuffer \ + (PyObject *, PyArray_Descr *, npy_intp, npy_intp); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_FromIter \ + (PyObject *, PyArray_Descr *, npy_intp); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(1) PyObject * PyArray_Return \ + (PyArrayObject *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_GetField \ + (PyArrayObject *, PyArray_Descr *, int); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) int PyArray_SetField \ + (PyArrayObject *, PyArray_Descr *, int, PyObject *); +NPY_NO_EXPORT PyObject * PyArray_Byteswap \ + (PyArrayObject *, npy_bool); +NPY_NO_EXPORT PyObject * PyArray_Resize \ + (PyArrayObject *, PyArray_Dims *, int, NPY_ORDER NPY_UNUSED(order)); +NPY_NO_EXPORT int PyArray_CopyObject \ + (PyArrayObject *, PyObject *); +NPY_NO_EXPORT PyObject * PyArray_NewCopy \ + (PyArrayObject *, NPY_ORDER); +NPY_NO_EXPORT PyObject * PyArray_ToList \ + (PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_ToString \ + (PyArrayObject *, NPY_ORDER); +NPY_NO_EXPORT int PyArray_ToFile \ + (PyArrayObject *, FILE *, char *, char *); +NPY_NO_EXPORT int PyArray_Dump \ + (PyObject *, PyObject *, int); +NPY_NO_EXPORT PyObject * PyArray_Dumps \ + (PyObject *, int); +NPY_NO_EXPORT int PyArray_ValidType \ + (int); +NPY_NO_EXPORT void PyArray_UpdateFlags \ + (PyArrayObject *, int); +NPY_NO_EXPORT PyObject * PyArray_New \ + (PyTypeObject *, int, npy_intp const *, int, npy_intp const *, void *, int, int, PyObject *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_NewFromDescr \ + (PyTypeObject *, PyArray_Descr *, int, npy_intp const *, npy_intp const *, void *, int, PyObject *); +NPY_NO_EXPORT PyArray_Descr * PyArray_DescrNew \ + (PyArray_Descr *); +NPY_NO_EXPORT PyArray_Descr * PyArray_DescrNewFromType \ + (int); +NPY_NO_EXPORT double PyArray_GetPriority \ + (PyObject *, double); +NPY_NO_EXPORT PyObject * PyArray_IterNew \ + (PyObject *); +NPY_NO_EXPORT PyObject* PyArray_MultiIterNew \ + (int, ...); +NPY_NO_EXPORT int PyArray_PyIntAsInt \ + (PyObject *); +NPY_NO_EXPORT npy_intp PyArray_PyIntAsIntp \ + (PyObject *); +NPY_NO_EXPORT int PyArray_Broadcast \ + (PyArrayMultiIterObject *); +NPY_NO_EXPORT int PyArray_FillWithScalar \ + (PyArrayObject *, PyObject *); +NPY_NO_EXPORT npy_bool PyArray_CheckStrides \ + (int, int, npy_intp, npy_intp, npy_intp const *, npy_intp const *); +NPY_NO_EXPORT PyArray_Descr * PyArray_DescrNewByteorder \ + (PyArray_Descr *, char); +NPY_NO_EXPORT PyObject * PyArray_IterAllButAxis \ + (PyObject *, int *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_CheckFromAny \ + (PyObject *, PyArray_Descr *, int, int, int, PyObject *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_FromArray \ + (PyArrayObject *, PyArray_Descr *, int); +NPY_NO_EXPORT PyObject * PyArray_FromInterface \ + (PyObject *); +NPY_NO_EXPORT PyObject * PyArray_FromStructInterface \ + (PyObject *); +NPY_NO_EXPORT PyObject * PyArray_FromArrayAttr \ + (PyObject *, PyArray_Descr *, PyObject *); +NPY_NO_EXPORT NPY_SCALARKIND PyArray_ScalarKind \ + (int, PyArrayObject **); +NPY_NO_EXPORT int PyArray_CanCoerceScalar \ + (int, int, NPY_SCALARKIND); +NPY_NO_EXPORT npy_bool PyArray_CanCastScalar \ + (PyTypeObject *, PyTypeObject *); +NPY_NO_EXPORT int PyArray_RemoveSmallest \ + (PyArrayMultiIterObject *); +NPY_NO_EXPORT int PyArray_ElementStrides \ + (PyObject *); +NPY_NO_EXPORT void PyArray_Item_INCREF \ + (char *, PyArray_Descr *); +NPY_NO_EXPORT void PyArray_Item_XDECREF \ + (char *, PyArray_Descr *); +NPY_NO_EXPORT PyObject * PyArray_Transpose \ + (PyArrayObject *, PyArray_Dims *); +NPY_NO_EXPORT PyObject * PyArray_TakeFrom \ + (PyArrayObject *, PyObject *, int, PyArrayObject *, NPY_CLIPMODE); +NPY_NO_EXPORT PyObject * PyArray_PutTo \ + (PyArrayObject *, PyObject*, PyObject *, NPY_CLIPMODE); +NPY_NO_EXPORT PyObject * PyArray_PutMask \ + (PyArrayObject *, PyObject*, PyObject*); +NPY_NO_EXPORT PyObject * PyArray_Repeat \ + (PyArrayObject *, PyObject *, int); +NPY_NO_EXPORT PyObject * PyArray_Choose \ + (PyArrayObject *, PyObject *, PyArrayObject *, NPY_CLIPMODE); +NPY_NO_EXPORT int PyArray_Sort \ + (PyArrayObject *, int, NPY_SORTKIND); +NPY_NO_EXPORT PyObject * PyArray_ArgSort \ + (PyArrayObject *, int, NPY_SORTKIND); +NPY_NO_EXPORT PyObject * PyArray_SearchSorted \ + (PyArrayObject *, PyObject *, NPY_SEARCHSIDE, PyObject *); +NPY_NO_EXPORT PyObject * PyArray_ArgMax \ + (PyArrayObject *, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_ArgMin \ + (PyArrayObject *, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Reshape \ + (PyArrayObject *, PyObject *); +NPY_NO_EXPORT PyObject * PyArray_Newshape \ + (PyArrayObject *, PyArray_Dims *, NPY_ORDER); +NPY_NO_EXPORT PyObject * PyArray_Squeeze \ + (PyArrayObject *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_View \ + (PyArrayObject *, PyArray_Descr *, PyTypeObject *); +NPY_NO_EXPORT PyObject * PyArray_SwapAxes \ + (PyArrayObject *, int, int); +NPY_NO_EXPORT PyObject * PyArray_Max \ + (PyArrayObject *, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Min \ + (PyArrayObject *, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Ptp \ + (PyArrayObject *, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Mean \ + (PyArrayObject *, int, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Trace \ + (PyArrayObject *, int, int, int, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Diagonal \ + (PyArrayObject *, int, int, int); +NPY_NO_EXPORT PyObject * PyArray_Clip \ + (PyArrayObject *, PyObject *, PyObject *, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Conjugate \ + (PyArrayObject *, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Nonzero \ + (PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Std \ + (PyArrayObject *, int, int, PyArrayObject *, int); +NPY_NO_EXPORT PyObject * PyArray_Sum \ + (PyArrayObject *, int, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_CumSum \ + (PyArrayObject *, int, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Prod \ + (PyArrayObject *, int, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_CumProd \ + (PyArrayObject *, int, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_All \ + (PyArrayObject *, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Any \ + (PyArrayObject *, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Compress \ + (PyArrayObject *, PyObject *, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Flatten \ + (PyArrayObject *, NPY_ORDER); +NPY_NO_EXPORT PyObject * PyArray_Ravel \ + (PyArrayObject *, NPY_ORDER); +NPY_NO_EXPORT npy_intp PyArray_MultiplyList \ + (npy_intp const *, int); +NPY_NO_EXPORT int PyArray_MultiplyIntList \ + (int const *, int); +NPY_NO_EXPORT void * PyArray_GetPtr \ + (PyArrayObject *, npy_intp const*); +NPY_NO_EXPORT int PyArray_CompareLists \ + (npy_intp const *, npy_intp const *, int); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(5) int PyArray_AsCArray \ + (PyObject **, void *, npy_intp *, int, PyArray_Descr*); +NPY_NO_EXPORT int PyArray_Free \ + (PyObject *, void *); +NPY_NO_EXPORT int PyArray_Converter \ + (PyObject *, PyObject **); +NPY_NO_EXPORT int PyArray_IntpFromSequence \ + (PyObject *, npy_intp *, int); +NPY_NO_EXPORT PyObject * PyArray_Concatenate \ + (PyObject *, int); +NPY_NO_EXPORT PyObject * PyArray_InnerProduct \ + (PyObject *, PyObject *); +NPY_NO_EXPORT PyObject * PyArray_MatrixProduct \ + (PyObject *, PyObject *); +NPY_NO_EXPORT PyObject * PyArray_Correlate \ + (PyObject *, PyObject *, int); +NPY_NO_EXPORT int PyArray_DescrConverter \ + (PyObject *, PyArray_Descr **); +NPY_NO_EXPORT int PyArray_DescrConverter2 \ + (PyObject *, PyArray_Descr **); +NPY_NO_EXPORT int PyArray_IntpConverter \ + (PyObject *, PyArray_Dims *); +NPY_NO_EXPORT int PyArray_BufferConverter \ + (PyObject *, PyArray_Chunk *); +NPY_NO_EXPORT int PyArray_AxisConverter \ + (PyObject *, int *); +NPY_NO_EXPORT int PyArray_BoolConverter \ + (PyObject *, npy_bool *); +NPY_NO_EXPORT int PyArray_ByteorderConverter \ + (PyObject *, char *); +NPY_NO_EXPORT int PyArray_OrderConverter \ + (PyObject *, NPY_ORDER *); +NPY_NO_EXPORT unsigned char PyArray_EquivTypes \ + (PyArray_Descr *, PyArray_Descr *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(3) PyObject * PyArray_Zeros \ + (int, npy_intp const *, PyArray_Descr *, int); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(3) PyObject * PyArray_Empty \ + (int, npy_intp const *, PyArray_Descr *, int); +NPY_NO_EXPORT PyObject * PyArray_Where \ + (PyObject *, PyObject *, PyObject *); +NPY_NO_EXPORT PyObject * PyArray_Arange \ + (double, double, double, int); +NPY_NO_EXPORT PyObject * PyArray_ArangeObj \ + (PyObject *, PyObject *, PyObject *, PyArray_Descr *); +NPY_NO_EXPORT int PyArray_SortkindConverter \ + (PyObject *, NPY_SORTKIND *); +NPY_NO_EXPORT PyObject * PyArray_LexSort \ + (PyObject *, int); +NPY_NO_EXPORT PyObject * PyArray_Round \ + (PyArrayObject *, int, PyArrayObject *); +NPY_NO_EXPORT unsigned char PyArray_EquivTypenums \ + (int, int); +NPY_NO_EXPORT int PyArray_RegisterDataType \ + (PyArray_DescrProto *); +NPY_NO_EXPORT int PyArray_RegisterCastFunc \ + (PyArray_Descr *, int, PyArray_VectorUnaryFunc *); +NPY_NO_EXPORT int PyArray_RegisterCanCast \ + (PyArray_Descr *, int, NPY_SCALARKIND); +NPY_NO_EXPORT void PyArray_InitArrFuncs \ + (PyArray_ArrFuncs *); +NPY_NO_EXPORT PyObject * PyArray_IntTupleFromIntp \ + (int, npy_intp const *); +NPY_NO_EXPORT int PyArray_ClipmodeConverter \ + (PyObject *, NPY_CLIPMODE *); +NPY_NO_EXPORT int PyArray_OutputConverter \ + (PyObject *, PyArrayObject **); +NPY_NO_EXPORT PyObject * PyArray_BroadcastToShape \ + (PyObject *, npy_intp *, int); +NPY_NO_EXPORT int PyArray_DescrAlignConverter \ + (PyObject *, PyArray_Descr **); +NPY_NO_EXPORT int PyArray_DescrAlignConverter2 \ + (PyObject *, PyArray_Descr **); +NPY_NO_EXPORT int PyArray_SearchsideConverter \ + (PyObject *, void *); +NPY_NO_EXPORT PyObject * PyArray_CheckAxis \ + (PyArrayObject *, int *, int); +NPY_NO_EXPORT npy_intp PyArray_OverflowMultiplyList \ + (npy_intp const *, int); +NPY_NO_EXPORT PyObject* PyArray_MultiIterFromObjects \ + (PyObject **, int, int, ...); +NPY_NO_EXPORT int PyArray_GetEndianness \ + (void); +NPY_NO_EXPORT unsigned int PyArray_GetNDArrayCFeatureVersion \ + (void); +NPY_NO_EXPORT PyObject * PyArray_Correlate2 \ + (PyObject *, PyObject *, int); +NPY_NO_EXPORT PyObject* PyArray_NeighborhoodIterNew \ + (PyArrayIterObject *, const npy_intp *, int, PyArrayObject*); +extern NPY_NO_EXPORT PyTypeObject PyTimeIntegerArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyDatetimeArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyTimedeltaArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyHalfArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject NpyIter_Type; + +NPY_NO_EXPORT NPY_ARRAYMETHOD_FLAGS NpyIter_GetTransferFlags \ + (NpyIter *); +NPY_NO_EXPORT NpyIter * NpyIter_New \ + (PyArrayObject *, npy_uint32, NPY_ORDER, NPY_CASTING, PyArray_Descr*); +NPY_NO_EXPORT NpyIter * NpyIter_MultiNew \ + (int, PyArrayObject **, npy_uint32, NPY_ORDER, NPY_CASTING, npy_uint32 *, PyArray_Descr **); +NPY_NO_EXPORT NpyIter * NpyIter_AdvancedNew \ + (int, PyArrayObject **, npy_uint32, NPY_ORDER, NPY_CASTING, npy_uint32 *, PyArray_Descr **, int, int **, npy_intp *, npy_intp); +NPY_NO_EXPORT NpyIter * NpyIter_Copy \ + (NpyIter *); +NPY_NO_EXPORT int NpyIter_Deallocate \ + (NpyIter *); +NPY_NO_EXPORT npy_bool NpyIter_HasDelayedBufAlloc \ + (NpyIter *); +NPY_NO_EXPORT npy_bool NpyIter_HasExternalLoop \ + (NpyIter *); +NPY_NO_EXPORT int NpyIter_EnableExternalLoop \ + (NpyIter *); +NPY_NO_EXPORT npy_intp * NpyIter_GetInnerStrideArray \ + (NpyIter *); +NPY_NO_EXPORT npy_intp * NpyIter_GetInnerLoopSizePtr \ + (NpyIter *); +NPY_NO_EXPORT int NpyIter_Reset \ + (NpyIter *, char **); +NPY_NO_EXPORT int NpyIter_ResetBasePointers \ + (NpyIter *, char **, char **); +NPY_NO_EXPORT int NpyIter_ResetToIterIndexRange \ + (NpyIter *, npy_intp, npy_intp, char **); +NPY_NO_EXPORT int NpyIter_GetNDim \ + (NpyIter *); +NPY_NO_EXPORT int NpyIter_GetNOp \ + (NpyIter *); +NPY_NO_EXPORT NpyIter_IterNextFunc * NpyIter_GetIterNext \ + (NpyIter *, char **); +NPY_NO_EXPORT npy_intp NpyIter_GetIterSize \ + (NpyIter *); +NPY_NO_EXPORT void NpyIter_GetIterIndexRange \ + (NpyIter *, npy_intp *, npy_intp *); +NPY_NO_EXPORT npy_intp NpyIter_GetIterIndex \ + (NpyIter *); +NPY_NO_EXPORT int NpyIter_GotoIterIndex \ + (NpyIter *, npy_intp); +NPY_NO_EXPORT npy_bool NpyIter_HasMultiIndex \ + (NpyIter *); +NPY_NO_EXPORT int NpyIter_GetShape \ + (NpyIter *, npy_intp *); +NPY_NO_EXPORT NpyIter_GetMultiIndexFunc * NpyIter_GetGetMultiIndex \ + (NpyIter *, char **); +NPY_NO_EXPORT int NpyIter_GotoMultiIndex \ + (NpyIter *, npy_intp const *); +NPY_NO_EXPORT int NpyIter_RemoveMultiIndex \ + (NpyIter *); +NPY_NO_EXPORT npy_bool NpyIter_HasIndex \ + (NpyIter *); +NPY_NO_EXPORT npy_bool NpyIter_IsBuffered \ + (NpyIter *); +NPY_NO_EXPORT npy_bool NpyIter_IsGrowInner \ + (NpyIter *); +NPY_NO_EXPORT npy_intp NpyIter_GetBufferSize \ + (NpyIter *); +NPY_NO_EXPORT npy_intp * NpyIter_GetIndexPtr \ + (NpyIter *); +NPY_NO_EXPORT int NpyIter_GotoIndex \ + (NpyIter *, npy_intp); +NPY_NO_EXPORT char ** NpyIter_GetDataPtrArray \ + (NpyIter *); +NPY_NO_EXPORT PyArray_Descr ** NpyIter_GetDescrArray \ + (NpyIter *); +NPY_NO_EXPORT PyArrayObject ** NpyIter_GetOperandArray \ + (NpyIter *); +NPY_NO_EXPORT PyArrayObject * NpyIter_GetIterView \ + (NpyIter *, npy_intp); +NPY_NO_EXPORT void NpyIter_GetReadFlags \ + (NpyIter *, char *); +NPY_NO_EXPORT void NpyIter_GetWriteFlags \ + (NpyIter *, char *); +NPY_NO_EXPORT void NpyIter_DebugPrint \ + (NpyIter *); +NPY_NO_EXPORT npy_bool NpyIter_IterationNeedsAPI \ + (NpyIter *); +NPY_NO_EXPORT void NpyIter_GetInnerFixedStrideArray \ + (NpyIter *, npy_intp *); +NPY_NO_EXPORT int NpyIter_RemoveAxis \ + (NpyIter *, int); +NPY_NO_EXPORT npy_intp * NpyIter_GetAxisStrideArray \ + (NpyIter *, int); +NPY_NO_EXPORT npy_bool NpyIter_RequiresBuffering \ + (NpyIter *); +NPY_NO_EXPORT char ** NpyIter_GetInitialDataPtrArray \ + (NpyIter *); +NPY_NO_EXPORT int NpyIter_CreateCompatibleStrides \ + (NpyIter *, npy_intp, npy_intp *); +NPY_NO_EXPORT int PyArray_CastingConverter \ + (PyObject *, NPY_CASTING *); +NPY_NO_EXPORT npy_intp PyArray_CountNonzero \ + (PyArrayObject *); +NPY_NO_EXPORT PyArray_Descr * PyArray_PromoteTypes \ + (PyArray_Descr *, PyArray_Descr *); +NPY_NO_EXPORT PyArray_Descr * PyArray_MinScalarType \ + (PyArrayObject *); +NPY_NO_EXPORT PyArray_Descr * PyArray_ResultType \ + (npy_intp, PyArrayObject *arrs[], npy_intp, PyArray_Descr *descrs[]); +NPY_NO_EXPORT npy_bool PyArray_CanCastArrayTo \ + (PyArrayObject *, PyArray_Descr *, NPY_CASTING); +NPY_NO_EXPORT npy_bool PyArray_CanCastTypeTo \ + (PyArray_Descr *, PyArray_Descr *, NPY_CASTING); +NPY_NO_EXPORT PyArrayObject * PyArray_EinsteinSum \ + (char *, npy_intp, PyArrayObject **, PyArray_Descr *, NPY_ORDER, NPY_CASTING, PyArrayObject *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(3) PyObject * PyArray_NewLikeArray \ + (PyArrayObject *, NPY_ORDER, PyArray_Descr *, int); +NPY_NO_EXPORT int PyArray_ConvertClipmodeSequence \ + (PyObject *, NPY_CLIPMODE *, int); +NPY_NO_EXPORT PyObject * PyArray_MatrixProduct2 \ + (PyObject *, PyObject *, PyArrayObject*); +NPY_NO_EXPORT npy_bool NpyIter_IsFirstVisit \ + (NpyIter *, int); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) int PyArray_SetBaseObject \ + (PyArrayObject *, PyObject *); +NPY_NO_EXPORT void PyArray_CreateSortedStridePerm \ + (int, npy_intp const *, npy_stride_sort_item *); +NPY_NO_EXPORT void PyArray_RemoveAxesInPlace \ + (PyArrayObject *, const npy_bool *); +NPY_NO_EXPORT void PyArray_DebugPrint \ + (PyArrayObject *); +NPY_NO_EXPORT int PyArray_FailUnlessWriteable \ + (PyArrayObject *, const char *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) int PyArray_SetUpdateIfCopyBase \ + (PyArrayObject *, PyArrayObject *); +NPY_NO_EXPORT void * PyDataMem_NEW \ + (size_t); +NPY_NO_EXPORT void PyDataMem_FREE \ + (void *); +NPY_NO_EXPORT void * PyDataMem_RENEW \ + (void *, size_t); +extern NPY_NO_EXPORT NPY_CASTING NPY_DEFAULT_ASSIGN_CASTING; + +NPY_NO_EXPORT int PyArray_Partition \ + (PyArrayObject *, PyArrayObject *, int, NPY_SELECTKIND); +NPY_NO_EXPORT PyObject * PyArray_ArgPartition \ + (PyArrayObject *, PyArrayObject *, int, NPY_SELECTKIND); +NPY_NO_EXPORT int PyArray_SelectkindConverter \ + (PyObject *, NPY_SELECTKIND *); +NPY_NO_EXPORT void * PyDataMem_NEW_ZEROED \ + (size_t, size_t); +NPY_NO_EXPORT int PyArray_CheckAnyScalarExact \ + (PyObject *); +NPY_NO_EXPORT int PyArray_ResolveWritebackIfCopy \ + (PyArrayObject *); +NPY_NO_EXPORT int PyArray_SetWritebackIfCopyBase \ + (PyArrayObject *, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyDataMem_SetHandler \ + (PyObject *); +NPY_NO_EXPORT PyObject * PyDataMem_GetHandler \ + (void); +extern NPY_NO_EXPORT PyObject* PyDataMem_DefaultHandler; + +NPY_NO_EXPORT int NpyDatetime_ConvertDatetime64ToDatetimeStruct \ + (PyArray_DatetimeMetaData *, npy_datetime, npy_datetimestruct *); +NPY_NO_EXPORT int NpyDatetime_ConvertDatetimeStructToDatetime64 \ + (PyArray_DatetimeMetaData *, const npy_datetimestruct *, npy_datetime *); +NPY_NO_EXPORT int NpyDatetime_ConvertPyDateTimeToDatetimeStruct \ + (PyObject *, npy_datetimestruct *, NPY_DATETIMEUNIT *, int); +NPY_NO_EXPORT int NpyDatetime_GetDatetimeISO8601StrLen \ + (int, NPY_DATETIMEUNIT); +NPY_NO_EXPORT int NpyDatetime_MakeISO8601Datetime \ + (npy_datetimestruct *, char *, npy_intp, int, int, NPY_DATETIMEUNIT, int, NPY_CASTING); +NPY_NO_EXPORT int NpyDatetime_ParseISO8601Datetime \ + (char const *, Py_ssize_t, NPY_DATETIMEUNIT, NPY_CASTING, npy_datetimestruct *, NPY_DATETIMEUNIT *, npy_bool *); +NPY_NO_EXPORT int NpyString_load \ + (npy_string_allocator *, const npy_packed_static_string *, npy_static_string *); +NPY_NO_EXPORT int NpyString_pack \ + (npy_string_allocator *, npy_packed_static_string *, const char *, size_t); +NPY_NO_EXPORT int NpyString_pack_null \ + (npy_string_allocator *, npy_packed_static_string *); +NPY_NO_EXPORT npy_string_allocator * NpyString_acquire_allocator \ + (const PyArray_StringDTypeObject *); +NPY_NO_EXPORT void NpyString_acquire_allocators \ + (size_t, PyArray_Descr *const descrs[], npy_string_allocator *allocators[]); +NPY_NO_EXPORT void NpyString_release_allocator \ + (npy_string_allocator *); +NPY_NO_EXPORT void NpyString_release_allocators \ + (size_t, npy_string_allocator *allocators[]); +NPY_NO_EXPORT PyArray_Descr * PyArray_GetDefaultDescr \ + (PyArray_DTypeMeta *); +NPY_NO_EXPORT int PyArrayInitDTypeMeta_FromSpec \ + (PyArray_DTypeMeta *, PyArrayDTypeMeta_Spec *); +NPY_NO_EXPORT PyArray_DTypeMeta * PyArray_CommonDType \ + (PyArray_DTypeMeta *, PyArray_DTypeMeta *); +NPY_NO_EXPORT PyArray_DTypeMeta * PyArray_PromoteDTypeSequence \ + (npy_intp, PyArray_DTypeMeta **); +NPY_NO_EXPORT PyArray_ArrFuncs * _PyDataType_GetArrFuncs \ + (const PyArray_Descr *); + +#else + +#if defined(PY_ARRAY_UNIQUE_SYMBOL) + #define PyArray_API PY_ARRAY_UNIQUE_SYMBOL + #define _NPY_VERSION_CONCAT_HELPER2(x, y) x ## y + #define _NPY_VERSION_CONCAT_HELPER(arg) \ + _NPY_VERSION_CONCAT_HELPER2(arg, PyArray_RUNTIME_VERSION) + #define PyArray_RUNTIME_VERSION \ + _NPY_VERSION_CONCAT_HELPER(PY_ARRAY_UNIQUE_SYMBOL) +#endif + +/* By default do not export API in an .so (was never the case on windows) */ +#ifndef NPY_API_SYMBOL_ATTRIBUTE + #define NPY_API_SYMBOL_ATTRIBUTE NPY_VISIBILITY_HIDDEN +#endif + +#if defined(NO_IMPORT) || defined(NO_IMPORT_ARRAY) +extern NPY_API_SYMBOL_ATTRIBUTE void **PyArray_API; +extern NPY_API_SYMBOL_ATTRIBUTE int PyArray_RUNTIME_VERSION; +#else +#if defined(PY_ARRAY_UNIQUE_SYMBOL) +NPY_API_SYMBOL_ATTRIBUTE void **PyArray_API; +NPY_API_SYMBOL_ATTRIBUTE int PyArray_RUNTIME_VERSION; +#else +static void **PyArray_API = NULL; +static int PyArray_RUNTIME_VERSION = 0; +#endif +#endif + +#define PyArray_GetNDArrayCVersion \ + (*(unsigned int (*)(void)) \ + PyArray_API[0]) +#define PyArray_Type (*(PyTypeObject *)PyArray_API[2]) +#define PyArrayDescr_Type (*(PyTypeObject *)PyArray_API[3]) +#define PyArrayIter_Type (*(PyTypeObject *)PyArray_API[5]) +#define PyArrayMultiIter_Type (*(PyTypeObject *)PyArray_API[6]) +#define NPY_NUMUSERTYPES (*(int *)PyArray_API[7]) +#define PyBoolArrType_Type (*(PyTypeObject *)PyArray_API[8]) +#define _PyArrayScalar_BoolValues ((PyBoolScalarObject *)PyArray_API[9]) +#define PyGenericArrType_Type (*(PyTypeObject *)PyArray_API[10]) +#define PyNumberArrType_Type (*(PyTypeObject *)PyArray_API[11]) +#define PyIntegerArrType_Type (*(PyTypeObject *)PyArray_API[12]) +#define PySignedIntegerArrType_Type (*(PyTypeObject *)PyArray_API[13]) +#define PyUnsignedIntegerArrType_Type (*(PyTypeObject *)PyArray_API[14]) +#define PyInexactArrType_Type (*(PyTypeObject *)PyArray_API[15]) +#define PyFloatingArrType_Type (*(PyTypeObject *)PyArray_API[16]) +#define PyComplexFloatingArrType_Type (*(PyTypeObject *)PyArray_API[17]) +#define PyFlexibleArrType_Type (*(PyTypeObject *)PyArray_API[18]) +#define PyCharacterArrType_Type (*(PyTypeObject *)PyArray_API[19]) +#define PyByteArrType_Type (*(PyTypeObject *)PyArray_API[20]) +#define PyShortArrType_Type (*(PyTypeObject *)PyArray_API[21]) +#define PyIntArrType_Type (*(PyTypeObject *)PyArray_API[22]) +#define PyLongArrType_Type (*(PyTypeObject *)PyArray_API[23]) +#define PyLongLongArrType_Type (*(PyTypeObject *)PyArray_API[24]) +#define PyUByteArrType_Type (*(PyTypeObject *)PyArray_API[25]) +#define PyUShortArrType_Type (*(PyTypeObject *)PyArray_API[26]) +#define PyUIntArrType_Type (*(PyTypeObject *)PyArray_API[27]) +#define PyULongArrType_Type (*(PyTypeObject *)PyArray_API[28]) +#define PyULongLongArrType_Type (*(PyTypeObject *)PyArray_API[29]) +#define PyFloatArrType_Type (*(PyTypeObject *)PyArray_API[30]) +#define PyDoubleArrType_Type (*(PyTypeObject *)PyArray_API[31]) +#define PyLongDoubleArrType_Type (*(PyTypeObject *)PyArray_API[32]) +#define PyCFloatArrType_Type (*(PyTypeObject *)PyArray_API[33]) +#define PyCDoubleArrType_Type (*(PyTypeObject *)PyArray_API[34]) +#define PyCLongDoubleArrType_Type (*(PyTypeObject *)PyArray_API[35]) +#define PyObjectArrType_Type (*(PyTypeObject *)PyArray_API[36]) +#define PyStringArrType_Type (*(PyTypeObject *)PyArray_API[37]) +#define PyUnicodeArrType_Type (*(PyTypeObject *)PyArray_API[38]) +#define PyVoidArrType_Type (*(PyTypeObject *)PyArray_API[39]) +#define PyArray_INCREF \ + (*(int (*)(PyArrayObject *)) \ + PyArray_API[42]) +#define PyArray_XDECREF \ + (*(int (*)(PyArrayObject *)) \ + PyArray_API[43]) +#define PyArray_SetStringFunction \ + (*(void (*)(PyObject *, int)) \ + PyArray_API[44]) +#define PyArray_DescrFromType \ + (*(PyArray_Descr * (*)(int)) \ + PyArray_API[45]) +#define PyArray_TypeObjectFromType \ + (*(PyObject * (*)(int)) \ + PyArray_API[46]) +#define PyArray_Zero \ + (*(char * (*)(PyArrayObject *)) \ + PyArray_API[47]) +#define PyArray_One \ + (*(char * (*)(PyArrayObject *)) \ + PyArray_API[48]) +#define PyArray_CastToType \ + (*(PyObject * (*)(PyArrayObject *, PyArray_Descr *, int)) \ + PyArray_API[49]) +#define PyArray_CopyInto \ + (*(int (*)(PyArrayObject *, PyArrayObject *)) \ + PyArray_API[50]) +#define PyArray_CopyAnyInto \ + (*(int (*)(PyArrayObject *, PyArrayObject *)) \ + PyArray_API[51]) +#define PyArray_CanCastSafely \ + (*(int (*)(int, int)) \ + PyArray_API[52]) +#define PyArray_CanCastTo \ + (*(npy_bool (*)(PyArray_Descr *, PyArray_Descr *)) \ + PyArray_API[53]) +#define PyArray_ObjectType \ + (*(int (*)(PyObject *, int)) \ + PyArray_API[54]) +#define PyArray_DescrFromObject \ + (*(PyArray_Descr * (*)(PyObject *, PyArray_Descr *)) \ + PyArray_API[55]) +#define PyArray_ConvertToCommonType \ + (*(PyArrayObject ** (*)(PyObject *, int *)) \ + PyArray_API[56]) +#define PyArray_DescrFromScalar \ + (*(PyArray_Descr * (*)(PyObject *)) \ + PyArray_API[57]) +#define PyArray_DescrFromTypeObject \ + (*(PyArray_Descr * (*)(PyObject *)) \ + PyArray_API[58]) +#define PyArray_Size \ + (*(npy_intp (*)(PyObject *)) \ + PyArray_API[59]) +#define PyArray_Scalar \ + (*(PyObject * (*)(void *, PyArray_Descr *, PyObject *)) \ + PyArray_API[60]) +#define PyArray_FromScalar \ + (*(PyObject * (*)(PyObject *, PyArray_Descr *)) \ + PyArray_API[61]) +#define PyArray_ScalarAsCtype \ + (*(void (*)(PyObject *, void *)) \ + PyArray_API[62]) +#define PyArray_CastScalarToCtype \ + (*(int (*)(PyObject *, void *, PyArray_Descr *)) \ + PyArray_API[63]) +#define PyArray_CastScalarDirect \ + (*(int (*)(PyObject *, PyArray_Descr *, void *, int)) \ + PyArray_API[64]) + +#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION +#define PyArray_Pack \ + (*(int (*)(PyArray_Descr *, void *, PyObject *)) \ + PyArray_API[65]) +#endif +#define PyArray_FromAny \ + (*(PyObject * (*)(PyObject *, PyArray_Descr *, int, int, int, PyObject *)) \ + PyArray_API[69]) +#define PyArray_EnsureArray \ + (*(PyObject * (*)(PyObject *)) \ + PyArray_API[70]) +#define PyArray_EnsureAnyArray \ + (*(PyObject * (*)(PyObject *)) \ + PyArray_API[71]) +#define PyArray_FromFile \ + (*(PyObject * (*)(FILE *, PyArray_Descr *, npy_intp, char *)) \ + PyArray_API[72]) +#define PyArray_FromString \ + (*(PyObject * (*)(char *, npy_intp, PyArray_Descr *, npy_intp, char *)) \ + PyArray_API[73]) +#define PyArray_FromBuffer \ + (*(PyObject * (*)(PyObject *, PyArray_Descr *, npy_intp, npy_intp)) \ + PyArray_API[74]) +#define PyArray_FromIter \ + (*(PyObject * (*)(PyObject *, PyArray_Descr *, npy_intp)) \ + PyArray_API[75]) +#define PyArray_Return \ + (*(PyObject * (*)(PyArrayObject *)) \ + PyArray_API[76]) +#define PyArray_GetField \ + (*(PyObject * (*)(PyArrayObject *, PyArray_Descr *, int)) \ + PyArray_API[77]) +#define PyArray_SetField \ + (*(int (*)(PyArrayObject *, PyArray_Descr *, int, PyObject *)) \ + PyArray_API[78]) +#define PyArray_Byteswap \ + (*(PyObject * (*)(PyArrayObject *, npy_bool)) \ + PyArray_API[79]) +#define PyArray_Resize \ + (*(PyObject * (*)(PyArrayObject *, PyArray_Dims *, int, NPY_ORDER NPY_UNUSED(order))) \ + PyArray_API[80]) +#define PyArray_CopyObject \ + (*(int (*)(PyArrayObject *, PyObject *)) \ + PyArray_API[84]) +#define PyArray_NewCopy \ + (*(PyObject * (*)(PyArrayObject *, NPY_ORDER)) \ + PyArray_API[85]) +#define PyArray_ToList \ + (*(PyObject * (*)(PyArrayObject *)) \ + PyArray_API[86]) +#define PyArray_ToString \ + (*(PyObject * (*)(PyArrayObject *, NPY_ORDER)) \ + PyArray_API[87]) +#define PyArray_ToFile \ + (*(int (*)(PyArrayObject *, FILE *, char *, char *)) \ + PyArray_API[88]) +#define PyArray_Dump \ + (*(int (*)(PyObject *, PyObject *, int)) \ + PyArray_API[89]) +#define PyArray_Dumps \ + (*(PyObject * (*)(PyObject *, int)) \ + PyArray_API[90]) +#define PyArray_ValidType \ + (*(int (*)(int)) \ + PyArray_API[91]) +#define PyArray_UpdateFlags \ + (*(void (*)(PyArrayObject *, int)) \ + PyArray_API[92]) +#define PyArray_New \ + (*(PyObject * (*)(PyTypeObject *, int, npy_intp const *, int, npy_intp const *, void *, int, int, PyObject *)) \ + PyArray_API[93]) +#define PyArray_NewFromDescr \ + (*(PyObject * (*)(PyTypeObject *, PyArray_Descr *, int, npy_intp const *, npy_intp const *, void *, int, PyObject *)) \ + PyArray_API[94]) +#define PyArray_DescrNew \ + (*(PyArray_Descr * (*)(PyArray_Descr *)) \ + PyArray_API[95]) +#define PyArray_DescrNewFromType \ + (*(PyArray_Descr * (*)(int)) \ + PyArray_API[96]) +#define PyArray_GetPriority \ + (*(double (*)(PyObject *, double)) \ + PyArray_API[97]) +#define PyArray_IterNew \ + (*(PyObject * (*)(PyObject *)) \ + PyArray_API[98]) +#define PyArray_MultiIterNew \ + (*(PyObject* (*)(int, ...)) \ + PyArray_API[99]) +#define PyArray_PyIntAsInt \ + (*(int (*)(PyObject *)) \ + PyArray_API[100]) +#define PyArray_PyIntAsIntp \ + (*(npy_intp (*)(PyObject *)) \ + PyArray_API[101]) +#define PyArray_Broadcast \ + (*(int (*)(PyArrayMultiIterObject *)) \ + PyArray_API[102]) +#define PyArray_FillWithScalar \ + (*(int (*)(PyArrayObject *, PyObject *)) \ + PyArray_API[104]) +#define PyArray_CheckStrides \ + (*(npy_bool (*)(int, int, npy_intp, npy_intp, npy_intp const *, npy_intp const *)) \ + PyArray_API[105]) +#define PyArray_DescrNewByteorder \ + (*(PyArray_Descr * (*)(PyArray_Descr *, char)) \ + PyArray_API[106]) +#define PyArray_IterAllButAxis \ + (*(PyObject * (*)(PyObject *, int *)) \ + PyArray_API[107]) +#define PyArray_CheckFromAny \ + (*(PyObject * (*)(PyObject *, PyArray_Descr *, int, int, int, PyObject *)) \ + PyArray_API[108]) +#define PyArray_FromArray \ + (*(PyObject * (*)(PyArrayObject *, PyArray_Descr *, int)) \ + PyArray_API[109]) +#define PyArray_FromInterface \ + (*(PyObject * (*)(PyObject *)) \ + PyArray_API[110]) +#define PyArray_FromStructInterface \ + (*(PyObject * (*)(PyObject *)) \ + PyArray_API[111]) +#define PyArray_FromArrayAttr \ + (*(PyObject * (*)(PyObject *, PyArray_Descr *, PyObject *)) \ + PyArray_API[112]) +#define PyArray_ScalarKind \ + (*(NPY_SCALARKIND (*)(int, PyArrayObject **)) \ + PyArray_API[113]) +#define PyArray_CanCoerceScalar \ + (*(int (*)(int, int, NPY_SCALARKIND)) \ + PyArray_API[114]) +#define PyArray_CanCastScalar \ + (*(npy_bool (*)(PyTypeObject *, PyTypeObject *)) \ + PyArray_API[116]) +#define PyArray_RemoveSmallest \ + (*(int (*)(PyArrayMultiIterObject *)) \ + PyArray_API[118]) +#define PyArray_ElementStrides \ + (*(int (*)(PyObject *)) \ + PyArray_API[119]) +#define PyArray_Item_INCREF \ + (*(void (*)(char *, PyArray_Descr *)) \ + PyArray_API[120]) +#define PyArray_Item_XDECREF \ + (*(void (*)(char *, PyArray_Descr *)) \ + PyArray_API[121]) +#define PyArray_Transpose \ + (*(PyObject * (*)(PyArrayObject *, PyArray_Dims *)) \ + PyArray_API[123]) +#define PyArray_TakeFrom \ + (*(PyObject * (*)(PyArrayObject *, PyObject *, int, PyArrayObject *, NPY_CLIPMODE)) \ + PyArray_API[124]) +#define PyArray_PutTo \ + (*(PyObject * (*)(PyArrayObject *, PyObject*, PyObject *, NPY_CLIPMODE)) \ + PyArray_API[125]) +#define PyArray_PutMask \ + (*(PyObject * (*)(PyArrayObject *, PyObject*, PyObject*)) \ + PyArray_API[126]) +#define PyArray_Repeat \ + (*(PyObject * (*)(PyArrayObject *, PyObject *, int)) \ + PyArray_API[127]) +#define PyArray_Choose \ + (*(PyObject * (*)(PyArrayObject *, PyObject *, PyArrayObject *, NPY_CLIPMODE)) \ + PyArray_API[128]) +#define PyArray_Sort \ + (*(int (*)(PyArrayObject *, int, NPY_SORTKIND)) \ + PyArray_API[129]) +#define PyArray_ArgSort \ + (*(PyObject * (*)(PyArrayObject *, int, NPY_SORTKIND)) \ + PyArray_API[130]) +#define PyArray_SearchSorted \ + (*(PyObject * (*)(PyArrayObject *, PyObject *, NPY_SEARCHSIDE, PyObject *)) \ + PyArray_API[131]) +#define PyArray_ArgMax \ + (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \ + PyArray_API[132]) +#define PyArray_ArgMin \ + (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \ + PyArray_API[133]) +#define PyArray_Reshape \ + (*(PyObject * (*)(PyArrayObject *, PyObject *)) \ + PyArray_API[134]) +#define PyArray_Newshape \ + (*(PyObject * (*)(PyArrayObject *, PyArray_Dims *, NPY_ORDER)) \ + PyArray_API[135]) +#define PyArray_Squeeze \ + (*(PyObject * (*)(PyArrayObject *)) \ + PyArray_API[136]) +#define PyArray_View \ + (*(PyObject * (*)(PyArrayObject *, PyArray_Descr *, PyTypeObject *)) \ + PyArray_API[137]) +#define PyArray_SwapAxes \ + (*(PyObject * (*)(PyArrayObject *, int, int)) \ + PyArray_API[138]) +#define PyArray_Max \ + (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \ + PyArray_API[139]) +#define PyArray_Min \ + (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \ + PyArray_API[140]) +#define PyArray_Ptp \ + (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \ + PyArray_API[141]) +#define PyArray_Mean \ + (*(PyObject * (*)(PyArrayObject *, int, int, PyArrayObject *)) \ + PyArray_API[142]) +#define PyArray_Trace \ + (*(PyObject * (*)(PyArrayObject *, int, int, int, int, PyArrayObject *)) \ + PyArray_API[143]) +#define PyArray_Diagonal \ + (*(PyObject * (*)(PyArrayObject *, int, int, int)) \ + PyArray_API[144]) +#define PyArray_Clip \ + (*(PyObject * (*)(PyArrayObject *, PyObject *, PyObject *, PyArrayObject *)) \ + PyArray_API[145]) +#define PyArray_Conjugate \ + (*(PyObject * (*)(PyArrayObject *, PyArrayObject *)) \ + PyArray_API[146]) +#define PyArray_Nonzero \ + (*(PyObject * (*)(PyArrayObject *)) \ + PyArray_API[147]) +#define PyArray_Std \ + (*(PyObject * (*)(PyArrayObject *, int, int, PyArrayObject *, int)) \ + PyArray_API[148]) +#define PyArray_Sum \ + (*(PyObject * (*)(PyArrayObject *, int, int, PyArrayObject *)) \ + PyArray_API[149]) +#define PyArray_CumSum \ + (*(PyObject * (*)(PyArrayObject *, int, int, PyArrayObject *)) \ + PyArray_API[150]) +#define PyArray_Prod \ + (*(PyObject * (*)(PyArrayObject *, int, int, PyArrayObject *)) \ + PyArray_API[151]) +#define PyArray_CumProd \ + (*(PyObject * (*)(PyArrayObject *, int, int, PyArrayObject *)) \ + PyArray_API[152]) +#define PyArray_All \ + (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \ + PyArray_API[153]) +#define PyArray_Any \ + (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \ + PyArray_API[154]) +#define PyArray_Compress \ + (*(PyObject * (*)(PyArrayObject *, PyObject *, int, PyArrayObject *)) \ + PyArray_API[155]) +#define PyArray_Flatten \ + (*(PyObject * (*)(PyArrayObject *, NPY_ORDER)) \ + PyArray_API[156]) +#define PyArray_Ravel \ + (*(PyObject * (*)(PyArrayObject *, NPY_ORDER)) \ + PyArray_API[157]) +#define PyArray_MultiplyList \ + (*(npy_intp (*)(npy_intp const *, int)) \ + PyArray_API[158]) +#define PyArray_MultiplyIntList \ + (*(int (*)(int const *, int)) \ + PyArray_API[159]) +#define PyArray_GetPtr \ + (*(void * (*)(PyArrayObject *, npy_intp const*)) \ + PyArray_API[160]) +#define PyArray_CompareLists \ + (*(int (*)(npy_intp const *, npy_intp const *, int)) \ + PyArray_API[161]) +#define PyArray_AsCArray \ + (*(int (*)(PyObject **, void *, npy_intp *, int, PyArray_Descr*)) \ + PyArray_API[162]) +#define PyArray_Free \ + (*(int (*)(PyObject *, void *)) \ + PyArray_API[165]) +#define PyArray_Converter \ + (*(int (*)(PyObject *, PyObject **)) \ + PyArray_API[166]) +#define PyArray_IntpFromSequence \ + (*(int (*)(PyObject *, npy_intp *, int)) \ + PyArray_API[167]) +#define PyArray_Concatenate \ + (*(PyObject * (*)(PyObject *, int)) \ + PyArray_API[168]) +#define PyArray_InnerProduct \ + (*(PyObject * (*)(PyObject *, PyObject *)) \ + PyArray_API[169]) +#define PyArray_MatrixProduct \ + (*(PyObject * (*)(PyObject *, PyObject *)) \ + PyArray_API[170]) +#define PyArray_Correlate \ + (*(PyObject * (*)(PyObject *, PyObject *, int)) \ + PyArray_API[172]) +#define PyArray_DescrConverter \ + (*(int (*)(PyObject *, PyArray_Descr **)) \ + PyArray_API[174]) +#define PyArray_DescrConverter2 \ + (*(int (*)(PyObject *, PyArray_Descr **)) \ + PyArray_API[175]) +#define PyArray_IntpConverter \ + (*(int (*)(PyObject *, PyArray_Dims *)) \ + PyArray_API[176]) +#define PyArray_BufferConverter \ + (*(int (*)(PyObject *, PyArray_Chunk *)) \ + PyArray_API[177]) +#define PyArray_AxisConverter \ + (*(int (*)(PyObject *, int *)) \ + PyArray_API[178]) +#define PyArray_BoolConverter \ + (*(int (*)(PyObject *, npy_bool *)) \ + PyArray_API[179]) +#define PyArray_ByteorderConverter \ + (*(int (*)(PyObject *, char *)) \ + PyArray_API[180]) +#define PyArray_OrderConverter \ + (*(int (*)(PyObject *, NPY_ORDER *)) \ + PyArray_API[181]) +#define PyArray_EquivTypes \ + (*(unsigned char (*)(PyArray_Descr *, PyArray_Descr *)) \ + PyArray_API[182]) +#define PyArray_Zeros \ + (*(PyObject * (*)(int, npy_intp const *, PyArray_Descr *, int)) \ + PyArray_API[183]) +#define PyArray_Empty \ + (*(PyObject * (*)(int, npy_intp const *, PyArray_Descr *, int)) \ + PyArray_API[184]) +#define PyArray_Where \ + (*(PyObject * (*)(PyObject *, PyObject *, PyObject *)) \ + PyArray_API[185]) +#define PyArray_Arange \ + (*(PyObject * (*)(double, double, double, int)) \ + PyArray_API[186]) +#define PyArray_ArangeObj \ + (*(PyObject * (*)(PyObject *, PyObject *, PyObject *, PyArray_Descr *)) \ + PyArray_API[187]) +#define PyArray_SortkindConverter \ + (*(int (*)(PyObject *, NPY_SORTKIND *)) \ + PyArray_API[188]) +#define PyArray_LexSort \ + (*(PyObject * (*)(PyObject *, int)) \ + PyArray_API[189]) +#define PyArray_Round \ + (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \ + PyArray_API[190]) +#define PyArray_EquivTypenums \ + (*(unsigned char (*)(int, int)) \ + PyArray_API[191]) +#define PyArray_RegisterDataType \ + (*(int (*)(PyArray_DescrProto *)) \ + PyArray_API[192]) +#define PyArray_RegisterCastFunc \ + (*(int (*)(PyArray_Descr *, int, PyArray_VectorUnaryFunc *)) \ + PyArray_API[193]) +#define PyArray_RegisterCanCast \ + (*(int (*)(PyArray_Descr *, int, NPY_SCALARKIND)) \ + PyArray_API[194]) +#define PyArray_InitArrFuncs \ + (*(void (*)(PyArray_ArrFuncs *)) \ + PyArray_API[195]) +#define PyArray_IntTupleFromIntp \ + (*(PyObject * (*)(int, npy_intp const *)) \ + PyArray_API[196]) +#define PyArray_ClipmodeConverter \ + (*(int (*)(PyObject *, NPY_CLIPMODE *)) \ + PyArray_API[198]) +#define PyArray_OutputConverter \ + (*(int (*)(PyObject *, PyArrayObject **)) \ + PyArray_API[199]) +#define PyArray_BroadcastToShape \ + (*(PyObject * (*)(PyObject *, npy_intp *, int)) \ + PyArray_API[200]) +#define PyArray_DescrAlignConverter \ + (*(int (*)(PyObject *, PyArray_Descr **)) \ + PyArray_API[203]) +#define PyArray_DescrAlignConverter2 \ + (*(int (*)(PyObject *, PyArray_Descr **)) \ + PyArray_API[204]) +#define PyArray_SearchsideConverter \ + (*(int (*)(PyObject *, void *)) \ + PyArray_API[205]) +#define PyArray_CheckAxis \ + (*(PyObject * (*)(PyArrayObject *, int *, int)) \ + PyArray_API[206]) +#define PyArray_OverflowMultiplyList \ + (*(npy_intp (*)(npy_intp const *, int)) \ + PyArray_API[207]) +#define PyArray_MultiIterFromObjects \ + (*(PyObject* (*)(PyObject **, int, int, ...)) \ + PyArray_API[209]) +#define PyArray_GetEndianness \ + (*(int (*)(void)) \ + PyArray_API[210]) +#define PyArray_GetNDArrayCFeatureVersion \ + (*(unsigned int (*)(void)) \ + PyArray_API[211]) +#define PyArray_Correlate2 \ + (*(PyObject * (*)(PyObject *, PyObject *, int)) \ + PyArray_API[212]) +#define PyArray_NeighborhoodIterNew \ + (*(PyObject* (*)(PyArrayIterObject *, const npy_intp *, int, PyArrayObject*)) \ + PyArray_API[213]) +#define PyTimeIntegerArrType_Type (*(PyTypeObject *)PyArray_API[214]) +#define PyDatetimeArrType_Type (*(PyTypeObject *)PyArray_API[215]) +#define PyTimedeltaArrType_Type (*(PyTypeObject *)PyArray_API[216]) +#define PyHalfArrType_Type (*(PyTypeObject *)PyArray_API[217]) +#define NpyIter_Type (*(PyTypeObject *)PyArray_API[218]) + +#if NPY_FEATURE_VERSION >= NPY_2_3_API_VERSION +#define NpyIter_GetTransferFlags \ + (*(NPY_ARRAYMETHOD_FLAGS (*)(NpyIter *)) \ + PyArray_API[223]) +#endif +#define NpyIter_New \ + (*(NpyIter * (*)(PyArrayObject *, npy_uint32, NPY_ORDER, NPY_CASTING, PyArray_Descr*)) \ + PyArray_API[224]) +#define NpyIter_MultiNew \ + (*(NpyIter * (*)(int, PyArrayObject **, npy_uint32, NPY_ORDER, NPY_CASTING, npy_uint32 *, PyArray_Descr **)) \ + PyArray_API[225]) +#define NpyIter_AdvancedNew \ + (*(NpyIter * (*)(int, PyArrayObject **, npy_uint32, NPY_ORDER, NPY_CASTING, npy_uint32 *, PyArray_Descr **, int, int **, npy_intp *, npy_intp)) \ + PyArray_API[226]) +#define NpyIter_Copy \ + (*(NpyIter * (*)(NpyIter *)) \ + PyArray_API[227]) +#define NpyIter_Deallocate \ + (*(int (*)(NpyIter *)) \ + PyArray_API[228]) +#define NpyIter_HasDelayedBufAlloc \ + (*(npy_bool (*)(NpyIter *)) \ + PyArray_API[229]) +#define NpyIter_HasExternalLoop \ + (*(npy_bool (*)(NpyIter *)) \ + PyArray_API[230]) +#define NpyIter_EnableExternalLoop \ + (*(int (*)(NpyIter *)) \ + PyArray_API[231]) +#define NpyIter_GetInnerStrideArray \ + (*(npy_intp * (*)(NpyIter *)) \ + PyArray_API[232]) +#define NpyIter_GetInnerLoopSizePtr \ + (*(npy_intp * (*)(NpyIter *)) \ + PyArray_API[233]) +#define NpyIter_Reset \ + (*(int (*)(NpyIter *, char **)) \ + PyArray_API[234]) +#define NpyIter_ResetBasePointers \ + (*(int (*)(NpyIter *, char **, char **)) \ + PyArray_API[235]) +#define NpyIter_ResetToIterIndexRange \ + (*(int (*)(NpyIter *, npy_intp, npy_intp, char **)) \ + PyArray_API[236]) +#define NpyIter_GetNDim \ + (*(int (*)(NpyIter *)) \ + PyArray_API[237]) +#define NpyIter_GetNOp \ + (*(int (*)(NpyIter *)) \ + PyArray_API[238]) +#define NpyIter_GetIterNext \ + (*(NpyIter_IterNextFunc * (*)(NpyIter *, char **)) \ + PyArray_API[239]) +#define NpyIter_GetIterSize \ + (*(npy_intp (*)(NpyIter *)) \ + PyArray_API[240]) +#define NpyIter_GetIterIndexRange \ + (*(void (*)(NpyIter *, npy_intp *, npy_intp *)) \ + PyArray_API[241]) +#define NpyIter_GetIterIndex \ + (*(npy_intp (*)(NpyIter *)) \ + PyArray_API[242]) +#define NpyIter_GotoIterIndex \ + (*(int (*)(NpyIter *, npy_intp)) \ + PyArray_API[243]) +#define NpyIter_HasMultiIndex \ + (*(npy_bool (*)(NpyIter *)) \ + PyArray_API[244]) +#define NpyIter_GetShape \ + (*(int (*)(NpyIter *, npy_intp *)) \ + PyArray_API[245]) +#define NpyIter_GetGetMultiIndex \ + (*(NpyIter_GetMultiIndexFunc * (*)(NpyIter *, char **)) \ + PyArray_API[246]) +#define NpyIter_GotoMultiIndex \ + (*(int (*)(NpyIter *, npy_intp const *)) \ + PyArray_API[247]) +#define NpyIter_RemoveMultiIndex \ + (*(int (*)(NpyIter *)) \ + PyArray_API[248]) +#define NpyIter_HasIndex \ + (*(npy_bool (*)(NpyIter *)) \ + PyArray_API[249]) +#define NpyIter_IsBuffered \ + (*(npy_bool (*)(NpyIter *)) \ + PyArray_API[250]) +#define NpyIter_IsGrowInner \ + (*(npy_bool (*)(NpyIter *)) \ + PyArray_API[251]) +#define NpyIter_GetBufferSize \ + (*(npy_intp (*)(NpyIter *)) \ + PyArray_API[252]) +#define NpyIter_GetIndexPtr \ + (*(npy_intp * (*)(NpyIter *)) \ + PyArray_API[253]) +#define NpyIter_GotoIndex \ + (*(int (*)(NpyIter *, npy_intp)) \ + PyArray_API[254]) +#define NpyIter_GetDataPtrArray \ + (*(char ** (*)(NpyIter *)) \ + PyArray_API[255]) +#define NpyIter_GetDescrArray \ + (*(PyArray_Descr ** (*)(NpyIter *)) \ + PyArray_API[256]) +#define NpyIter_GetOperandArray \ + (*(PyArrayObject ** (*)(NpyIter *)) \ + PyArray_API[257]) +#define NpyIter_GetIterView \ + (*(PyArrayObject * (*)(NpyIter *, npy_intp)) \ + PyArray_API[258]) +#define NpyIter_GetReadFlags \ + (*(void (*)(NpyIter *, char *)) \ + PyArray_API[259]) +#define NpyIter_GetWriteFlags \ + (*(void (*)(NpyIter *, char *)) \ + PyArray_API[260]) +#define NpyIter_DebugPrint \ + (*(void (*)(NpyIter *)) \ + PyArray_API[261]) +#define NpyIter_IterationNeedsAPI \ + (*(npy_bool (*)(NpyIter *)) \ + PyArray_API[262]) +#define NpyIter_GetInnerFixedStrideArray \ + (*(void (*)(NpyIter *, npy_intp *)) \ + PyArray_API[263]) +#define NpyIter_RemoveAxis \ + (*(int (*)(NpyIter *, int)) \ + PyArray_API[264]) +#define NpyIter_GetAxisStrideArray \ + (*(npy_intp * (*)(NpyIter *, int)) \ + PyArray_API[265]) +#define NpyIter_RequiresBuffering \ + (*(npy_bool (*)(NpyIter *)) \ + PyArray_API[266]) +#define NpyIter_GetInitialDataPtrArray \ + (*(char ** (*)(NpyIter *)) \ + PyArray_API[267]) +#define NpyIter_CreateCompatibleStrides \ + (*(int (*)(NpyIter *, npy_intp, npy_intp *)) \ + PyArray_API[268]) +#define PyArray_CastingConverter \ + (*(int (*)(PyObject *, NPY_CASTING *)) \ + PyArray_API[269]) +#define PyArray_CountNonzero \ + (*(npy_intp (*)(PyArrayObject *)) \ + PyArray_API[270]) +#define PyArray_PromoteTypes \ + (*(PyArray_Descr * (*)(PyArray_Descr *, PyArray_Descr *)) \ + PyArray_API[271]) +#define PyArray_MinScalarType \ + (*(PyArray_Descr * (*)(PyArrayObject *)) \ + PyArray_API[272]) +#define PyArray_ResultType \ + (*(PyArray_Descr * (*)(npy_intp, PyArrayObject *arrs[], npy_intp, PyArray_Descr *descrs[])) \ + PyArray_API[273]) +#define PyArray_CanCastArrayTo \ + (*(npy_bool (*)(PyArrayObject *, PyArray_Descr *, NPY_CASTING)) \ + PyArray_API[274]) +#define PyArray_CanCastTypeTo \ + (*(npy_bool (*)(PyArray_Descr *, PyArray_Descr *, NPY_CASTING)) \ + PyArray_API[275]) +#define PyArray_EinsteinSum \ + (*(PyArrayObject * (*)(char *, npy_intp, PyArrayObject **, PyArray_Descr *, NPY_ORDER, NPY_CASTING, PyArrayObject *)) \ + PyArray_API[276]) +#define PyArray_NewLikeArray \ + (*(PyObject * (*)(PyArrayObject *, NPY_ORDER, PyArray_Descr *, int)) \ + PyArray_API[277]) +#define PyArray_ConvertClipmodeSequence \ + (*(int (*)(PyObject *, NPY_CLIPMODE *, int)) \ + PyArray_API[279]) +#define PyArray_MatrixProduct2 \ + (*(PyObject * (*)(PyObject *, PyObject *, PyArrayObject*)) \ + PyArray_API[280]) +#define NpyIter_IsFirstVisit \ + (*(npy_bool (*)(NpyIter *, int)) \ + PyArray_API[281]) +#define PyArray_SetBaseObject \ + (*(int (*)(PyArrayObject *, PyObject *)) \ + PyArray_API[282]) +#define PyArray_CreateSortedStridePerm \ + (*(void (*)(int, npy_intp const *, npy_stride_sort_item *)) \ + PyArray_API[283]) +#define PyArray_RemoveAxesInPlace \ + (*(void (*)(PyArrayObject *, const npy_bool *)) \ + PyArray_API[284]) +#define PyArray_DebugPrint \ + (*(void (*)(PyArrayObject *)) \ + PyArray_API[285]) +#define PyArray_FailUnlessWriteable \ + (*(int (*)(PyArrayObject *, const char *)) \ + PyArray_API[286]) +#define PyArray_SetUpdateIfCopyBase \ + (*(int (*)(PyArrayObject *, PyArrayObject *)) \ + PyArray_API[287]) +#define PyDataMem_NEW \ + (*(void * (*)(size_t)) \ + PyArray_API[288]) +#define PyDataMem_FREE \ + (*(void (*)(void *)) \ + PyArray_API[289]) +#define PyDataMem_RENEW \ + (*(void * (*)(void *, size_t)) \ + PyArray_API[290]) +#define NPY_DEFAULT_ASSIGN_CASTING (*(NPY_CASTING *)PyArray_API[292]) +#define PyArray_Partition \ + (*(int (*)(PyArrayObject *, PyArrayObject *, int, NPY_SELECTKIND)) \ + PyArray_API[296]) +#define PyArray_ArgPartition \ + (*(PyObject * (*)(PyArrayObject *, PyArrayObject *, int, NPY_SELECTKIND)) \ + PyArray_API[297]) +#define PyArray_SelectkindConverter \ + (*(int (*)(PyObject *, NPY_SELECTKIND *)) \ + PyArray_API[298]) +#define PyDataMem_NEW_ZEROED \ + (*(void * (*)(size_t, size_t)) \ + PyArray_API[299]) +#define PyArray_CheckAnyScalarExact \ + (*(int (*)(PyObject *)) \ + PyArray_API[300]) +#define PyArray_ResolveWritebackIfCopy \ + (*(int (*)(PyArrayObject *)) \ + PyArray_API[302]) +#define PyArray_SetWritebackIfCopyBase \ + (*(int (*)(PyArrayObject *, PyArrayObject *)) \ + PyArray_API[303]) + +#if NPY_FEATURE_VERSION >= NPY_1_22_API_VERSION +#define PyDataMem_SetHandler \ + (*(PyObject * (*)(PyObject *)) \ + PyArray_API[304]) +#endif + +#if NPY_FEATURE_VERSION >= NPY_1_22_API_VERSION +#define PyDataMem_GetHandler \ + (*(PyObject * (*)(void)) \ + PyArray_API[305]) +#endif +#define PyDataMem_DefaultHandler (*(PyObject* *)PyArray_API[306]) + +#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION +#define NpyDatetime_ConvertDatetime64ToDatetimeStruct \ + (*(int (*)(PyArray_DatetimeMetaData *, npy_datetime, npy_datetimestruct *)) \ + PyArray_API[307]) +#endif + +#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION +#define NpyDatetime_ConvertDatetimeStructToDatetime64 \ + (*(int (*)(PyArray_DatetimeMetaData *, const npy_datetimestruct *, npy_datetime *)) \ + PyArray_API[308]) +#endif + +#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION +#define NpyDatetime_ConvertPyDateTimeToDatetimeStruct \ + (*(int (*)(PyObject *, npy_datetimestruct *, NPY_DATETIMEUNIT *, int)) \ + PyArray_API[309]) +#endif + +#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION +#define NpyDatetime_GetDatetimeISO8601StrLen \ + (*(int (*)(int, NPY_DATETIMEUNIT)) \ + PyArray_API[310]) +#endif + +#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION +#define NpyDatetime_MakeISO8601Datetime \ + (*(int (*)(npy_datetimestruct *, char *, npy_intp, int, int, NPY_DATETIMEUNIT, int, NPY_CASTING)) \ + PyArray_API[311]) +#endif + +#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION +#define NpyDatetime_ParseISO8601Datetime \ + (*(int (*)(char const *, Py_ssize_t, NPY_DATETIMEUNIT, NPY_CASTING, npy_datetimestruct *, NPY_DATETIMEUNIT *, npy_bool *)) \ + PyArray_API[312]) +#endif + +#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION +#define NpyString_load \ + (*(int (*)(npy_string_allocator *, const npy_packed_static_string *, npy_static_string *)) \ + PyArray_API[313]) +#endif + +#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION +#define NpyString_pack \ + (*(int (*)(npy_string_allocator *, npy_packed_static_string *, const char *, size_t)) \ + PyArray_API[314]) +#endif + +#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION +#define NpyString_pack_null \ + (*(int (*)(npy_string_allocator *, npy_packed_static_string *)) \ + PyArray_API[315]) +#endif + +#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION +#define NpyString_acquire_allocator \ + (*(npy_string_allocator * (*)(const PyArray_StringDTypeObject *)) \ + PyArray_API[316]) +#endif + +#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION +#define NpyString_acquire_allocators \ + (*(void (*)(size_t, PyArray_Descr *const descrs[], npy_string_allocator *allocators[])) \ + PyArray_API[317]) +#endif + +#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION +#define NpyString_release_allocator \ + (*(void (*)(npy_string_allocator *)) \ + PyArray_API[318]) +#endif + +#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION +#define NpyString_release_allocators \ + (*(void (*)(size_t, npy_string_allocator *allocators[])) \ + PyArray_API[319]) +#endif + +#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION +#define PyArray_GetDefaultDescr \ + (*(PyArray_Descr * (*)(PyArray_DTypeMeta *)) \ + PyArray_API[361]) +#endif + +#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION +#define PyArrayInitDTypeMeta_FromSpec \ + (*(int (*)(PyArray_DTypeMeta *, PyArrayDTypeMeta_Spec *)) \ + PyArray_API[362]) +#endif + +#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION +#define PyArray_CommonDType \ + (*(PyArray_DTypeMeta * (*)(PyArray_DTypeMeta *, PyArray_DTypeMeta *)) \ + PyArray_API[363]) +#endif + +#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION +#define PyArray_PromoteDTypeSequence \ + (*(PyArray_DTypeMeta * (*)(npy_intp, PyArray_DTypeMeta **)) \ + PyArray_API[364]) +#endif +#define _PyDataType_GetArrFuncs \ + (*(PyArray_ArrFuncs * (*)(const PyArray_Descr *)) \ + PyArray_API[365]) + +/* + * The DType classes are inconvenient for the Python generation so exposed + * manually in the header below (may be moved). + */ +#include "numpy/_public_dtype_api_table.h" + +#if !defined(NO_IMPORT_ARRAY) && !defined(NO_IMPORT) +static int +_import_array(void) +{ + int st; + PyObject *numpy = PyImport_ImportModule("numpy._core._multiarray_umath"); + PyObject *c_api; + if (numpy == NULL && PyErr_ExceptionMatches(PyExc_ModuleNotFoundError)) { + PyErr_Clear(); + numpy = PyImport_ImportModule("numpy.core._multiarray_umath"); + } + + if (numpy == NULL) { + return -1; + } + + c_api = PyObject_GetAttrString(numpy, "_ARRAY_API"); + Py_DECREF(numpy); + if (c_api == NULL) { + return -1; + } + + if (!PyCapsule_CheckExact(c_api)) { + PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is not PyCapsule object"); + Py_DECREF(c_api); + return -1; + } + PyArray_API = (void **)PyCapsule_GetPointer(c_api, NULL); + Py_DECREF(c_api); + if (PyArray_API == NULL) { + PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is NULL pointer"); + return -1; + } + + /* + * On exceedingly few platforms these sizes may not match, in which case + * We do not support older NumPy versions at all. + */ + if (sizeof(Py_ssize_t) != sizeof(Py_intptr_t) && + PyArray_RUNTIME_VERSION < NPY_2_0_API_VERSION) { + PyErr_Format(PyExc_RuntimeError, + "module compiled against NumPy 2.0 but running on NumPy 1.x. " + "Unfortunately, this is not supported on niche platforms where " + "`sizeof(size_t) != sizeof(inptr_t)`."); + } + /* + * Perform runtime check of C API version. As of now NumPy 2.0 is ABI + * backwards compatible (in the exposed feature subset!) for all practical + * purposes. + */ + if (NPY_VERSION < PyArray_GetNDArrayCVersion()) { + PyErr_Format(PyExc_RuntimeError, "module compiled against "\ + "ABI version 0x%x but this version of numpy is 0x%x", \ + (int) NPY_VERSION, (int) PyArray_GetNDArrayCVersion()); + return -1; + } + PyArray_RUNTIME_VERSION = (int)PyArray_GetNDArrayCFeatureVersion(); + if (NPY_FEATURE_VERSION > PyArray_RUNTIME_VERSION) { + PyErr_Format(PyExc_RuntimeError, + "module was compiled against NumPy C-API version 0x%x " + "(NumPy " NPY_FEATURE_VERSION_STRING ") " + "but the running NumPy has C-API version 0x%x. " + "Check the section C-API incompatibility at the " + "Troubleshooting ImportError section at " + "https://numpy.org/devdocs/user/troubleshooting-importerror.html" + "#c-api-incompatibility " + "for indications on how to solve this problem.", + (int)NPY_FEATURE_VERSION, PyArray_RUNTIME_VERSION); + return -1; + } + + /* + * Perform runtime check of endianness and check it matches the one set by + * the headers (npy_endian.h) as a safeguard + */ + st = PyArray_GetEndianness(); + if (st == NPY_CPU_UNKNOWN_ENDIAN) { + PyErr_SetString(PyExc_RuntimeError, + "FATAL: module compiled as unknown endian"); + return -1; + } +#if NPY_BYTE_ORDER == NPY_BIG_ENDIAN + if (st != NPY_CPU_BIG) { + PyErr_SetString(PyExc_RuntimeError, + "FATAL: module compiled as big endian, but " + "detected different endianness at runtime"); + return -1; + } +#elif NPY_BYTE_ORDER == NPY_LITTLE_ENDIAN + if (st != NPY_CPU_LITTLE) { + PyErr_SetString(PyExc_RuntimeError, + "FATAL: module compiled as little endian, but " + "detected different endianness at runtime"); + return -1; + } +#endif + + return 0; +} + +#if (SWIG_VERSION < 0x040400) +#define _RETURN_VALUE NULL +#else +#define _RETURN_VALUE 0 +#endif + +#define import_array() { \ + if (_import_array() < 0) { \ + PyErr_Print(); \ + PyErr_SetString( \ + PyExc_ImportError, \ + "numpy._core.multiarray failed to import" \ + ); \ + return _RETURN_VALUE; \ + } \ +} + +#define import_array1(ret) { \ + if (_import_array() < 0) { \ + PyErr_Print(); \ + PyErr_SetString( \ + PyExc_ImportError, \ + "numpy._core.multiarray failed to import" \ + ); \ + return ret; \ + } \ +} + +#define import_array2(msg, ret) { \ + if (_import_array() < 0) { \ + PyErr_Print(); \ + PyErr_SetString(PyExc_ImportError, msg); \ + return ret; \ + } \ +} + +#endif + +#endif diff --git a/local_packages/numpy/_core/include/numpy/__ufunc_api.c b/local_packages/numpy/_core/include/numpy/__ufunc_api.c new file mode 100644 index 0000000..d18aa72 --- /dev/null +++ b/local_packages/numpy/_core/include/numpy/__ufunc_api.c @@ -0,0 +1,55 @@ + +/* These pointers will be stored in the C-object for use in other + extension modules +*/ + +void *PyUFunc_API[] = { + (void *) &PyUFunc_Type, + (void *) PyUFunc_FromFuncAndData, + (void *) PyUFunc_RegisterLoopForType, + NULL, + (void *) PyUFunc_f_f_As_d_d, + (void *) PyUFunc_d_d, + (void *) PyUFunc_f_f, + (void *) PyUFunc_g_g, + (void *) PyUFunc_F_F_As_D_D, + (void *) PyUFunc_F_F, + (void *) PyUFunc_D_D, + (void *) PyUFunc_G_G, + (void *) PyUFunc_O_O, + (void *) PyUFunc_ff_f_As_dd_d, + (void *) PyUFunc_ff_f, + (void *) PyUFunc_dd_d, + (void *) PyUFunc_gg_g, + (void *) PyUFunc_FF_F_As_DD_D, + (void *) PyUFunc_DD_D, + (void *) PyUFunc_FF_F, + (void *) PyUFunc_GG_G, + (void *) PyUFunc_OO_O, + (void *) PyUFunc_O_O_method, + (void *) PyUFunc_OO_O_method, + (void *) PyUFunc_On_Om, + NULL, + NULL, + (void *) PyUFunc_clearfperr, + (void *) PyUFunc_getfperr, + NULL, + (void *) PyUFunc_ReplaceLoopBySignature, + (void *) PyUFunc_FromFuncAndDataAndSignature, + NULL, + (void *) PyUFunc_e_e, + (void *) PyUFunc_e_e_As_f_f, + (void *) PyUFunc_e_e_As_d_d, + (void *) PyUFunc_ee_e, + (void *) PyUFunc_ee_e_As_ff_f, + (void *) PyUFunc_ee_e_As_dd_d, + (void *) PyUFunc_DefaultTypeResolver, + (void *) PyUFunc_ValidateCasting, + (void *) PyUFunc_RegisterLoopForDescr, + (void *) PyUFunc_FromFuncAndDataAndSignatureAndIdentity, + (void *) PyUFunc_AddLoopFromSpec, + (void *) PyUFunc_AddPromoter, + (void *) PyUFunc_AddWrappingLoop, + (void *) PyUFunc_GiveFloatingpointErrors, + (void *) PyUFunc_AddLoopsFromSpecs +}; diff --git a/local_packages/numpy/_core/include/numpy/__ufunc_api.h b/local_packages/numpy/_core/include/numpy/__ufunc_api.h new file mode 100644 index 0000000..5c98f01 --- /dev/null +++ b/local_packages/numpy/_core/include/numpy/__ufunc_api.h @@ -0,0 +1,349 @@ + +#ifdef _UMATHMODULE + +extern NPY_NO_EXPORT PyTypeObject PyUFunc_Type; + +extern NPY_NO_EXPORT PyTypeObject PyUFunc_Type; + +NPY_NO_EXPORT PyObject * PyUFunc_FromFuncAndData \ + (PyUFuncGenericFunction *, void *const *, const char *, int, int, int, int, const char *, const char *, int); +NPY_NO_EXPORT int PyUFunc_RegisterLoopForType \ + (PyUFuncObject *, int, PyUFuncGenericFunction, const int *, void *); +NPY_NO_EXPORT void PyUFunc_f_f_As_d_d \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_d_d \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_f_f \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_g_g \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_F_F_As_D_D \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_F_F \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_D_D \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_G_G \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_O_O \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_ff_f_As_dd_d \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_ff_f \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_dd_d \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_gg_g \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_FF_F_As_DD_D \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_DD_D \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_FF_F \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_GG_G \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_OO_O \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_O_O_method \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_OO_O_method \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_On_Om \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_clearfperr \ + (void); +NPY_NO_EXPORT int PyUFunc_getfperr \ + (void); +NPY_NO_EXPORT int PyUFunc_ReplaceLoopBySignature \ + (PyUFuncObject *, PyUFuncGenericFunction, const int *, PyUFuncGenericFunction *); +NPY_NO_EXPORT PyObject * PyUFunc_FromFuncAndDataAndSignature \ + (PyUFuncGenericFunction *, void *const *, const char *, int, int, int, int, const char *, const char *, int, const char *); +NPY_NO_EXPORT void PyUFunc_e_e \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_e_e_As_f_f \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_e_e_As_d_d \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_ee_e \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_ee_e_As_ff_f \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_ee_e_As_dd_d \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT int PyUFunc_DefaultTypeResolver \ + (PyUFuncObject *, NPY_CASTING, PyArrayObject **, PyObject *, PyArray_Descr **); +NPY_NO_EXPORT int PyUFunc_ValidateCasting \ + (PyUFuncObject *, NPY_CASTING, PyArrayObject **, PyArray_Descr *const *); +NPY_NO_EXPORT int PyUFunc_RegisterLoopForDescr \ + (PyUFuncObject *, PyArray_Descr *, PyUFuncGenericFunction, PyArray_Descr **, void *); +NPY_NO_EXPORT PyObject * PyUFunc_FromFuncAndDataAndSignatureAndIdentity \ + (PyUFuncGenericFunction *, void *const *, const char *, int, int, int, int, const char *, const char *, const int, const char *, PyObject *); +NPY_NO_EXPORT int PyUFunc_AddLoopFromSpec \ + (PyObject *, PyArrayMethod_Spec *); +NPY_NO_EXPORT int PyUFunc_AddPromoter \ + (PyObject *, PyObject *, PyObject *); +NPY_NO_EXPORT int PyUFunc_AddWrappingLoop \ + (PyObject *, PyArray_DTypeMeta *new_dtypes[], PyArray_DTypeMeta *wrapped_dtypes[], PyArrayMethod_TranslateGivenDescriptors *, PyArrayMethod_TranslateLoopDescriptors *); +NPY_NO_EXPORT int PyUFunc_GiveFloatingpointErrors \ + (const char *, int); +NPY_NO_EXPORT int PyUFunc_AddLoopsFromSpecs \ + (PyUFunc_LoopSlot *); + +#else + +#if defined(PY_UFUNC_UNIQUE_SYMBOL) +#define PyUFunc_API PY_UFUNC_UNIQUE_SYMBOL +#endif + +/* By default do not export API in an .so (was never the case on windows) */ +#ifndef NPY_API_SYMBOL_ATTRIBUTE + #define NPY_API_SYMBOL_ATTRIBUTE NPY_VISIBILITY_HIDDEN +#endif + +#if defined(NO_IMPORT) || defined(NO_IMPORT_UFUNC) +extern NPY_API_SYMBOL_ATTRIBUTE void **PyUFunc_API; +#else +#if defined(PY_UFUNC_UNIQUE_SYMBOL) +NPY_API_SYMBOL_ATTRIBUTE void **PyUFunc_API; +#else +static void **PyUFunc_API=NULL; +#endif +#endif + +#define PyUFunc_Type (*(PyTypeObject *)PyUFunc_API[0]) +#define PyUFunc_FromFuncAndData \ + (*(PyObject * (*)(PyUFuncGenericFunction *, void *const *, const char *, int, int, int, int, const char *, const char *, int)) \ + PyUFunc_API[1]) +#define PyUFunc_RegisterLoopForType \ + (*(int (*)(PyUFuncObject *, int, PyUFuncGenericFunction, const int *, void *)) \ + PyUFunc_API[2]) +#define PyUFunc_f_f_As_d_d \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[4]) +#define PyUFunc_d_d \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[5]) +#define PyUFunc_f_f \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[6]) +#define PyUFunc_g_g \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[7]) +#define PyUFunc_F_F_As_D_D \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[8]) +#define PyUFunc_F_F \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[9]) +#define PyUFunc_D_D \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[10]) +#define PyUFunc_G_G \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[11]) +#define PyUFunc_O_O \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[12]) +#define PyUFunc_ff_f_As_dd_d \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[13]) +#define PyUFunc_ff_f \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[14]) +#define PyUFunc_dd_d \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[15]) +#define PyUFunc_gg_g \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[16]) +#define PyUFunc_FF_F_As_DD_D \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[17]) +#define PyUFunc_DD_D \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[18]) +#define PyUFunc_FF_F \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[19]) +#define PyUFunc_GG_G \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[20]) +#define PyUFunc_OO_O \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[21]) +#define PyUFunc_O_O_method \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[22]) +#define PyUFunc_OO_O_method \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[23]) +#define PyUFunc_On_Om \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[24]) +#define PyUFunc_clearfperr \ + (*(void (*)(void)) \ + PyUFunc_API[27]) +#define PyUFunc_getfperr \ + (*(int (*)(void)) \ + PyUFunc_API[28]) +#define PyUFunc_ReplaceLoopBySignature \ + (*(int (*)(PyUFuncObject *, PyUFuncGenericFunction, const int *, PyUFuncGenericFunction *)) \ + PyUFunc_API[30]) +#define PyUFunc_FromFuncAndDataAndSignature \ + (*(PyObject * (*)(PyUFuncGenericFunction *, void *const *, const char *, int, int, int, int, const char *, const char *, int, const char *)) \ + PyUFunc_API[31]) +#define PyUFunc_e_e \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[33]) +#define PyUFunc_e_e_As_f_f \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[34]) +#define PyUFunc_e_e_As_d_d \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[35]) +#define PyUFunc_ee_e \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[36]) +#define PyUFunc_ee_e_As_ff_f \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[37]) +#define PyUFunc_ee_e_As_dd_d \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[38]) +#define PyUFunc_DefaultTypeResolver \ + (*(int (*)(PyUFuncObject *, NPY_CASTING, PyArrayObject **, PyObject *, PyArray_Descr **)) \ + PyUFunc_API[39]) +#define PyUFunc_ValidateCasting \ + (*(int (*)(PyUFuncObject *, NPY_CASTING, PyArrayObject **, PyArray_Descr *const *)) \ + PyUFunc_API[40]) +#define PyUFunc_RegisterLoopForDescr \ + (*(int (*)(PyUFuncObject *, PyArray_Descr *, PyUFuncGenericFunction, PyArray_Descr **, void *)) \ + PyUFunc_API[41]) + +#if NPY_FEATURE_VERSION >= NPY_1_16_API_VERSION +#define PyUFunc_FromFuncAndDataAndSignatureAndIdentity \ + (*(PyObject * (*)(PyUFuncGenericFunction *, void *const *, const char *, int, int, int, int, const char *, const char *, const int, const char *, PyObject *)) \ + PyUFunc_API[42]) +#endif + +#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION +#define PyUFunc_AddLoopFromSpec \ + (*(int (*)(PyObject *, PyArrayMethod_Spec *)) \ + PyUFunc_API[43]) +#endif + +#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION +#define PyUFunc_AddPromoter \ + (*(int (*)(PyObject *, PyObject *, PyObject *)) \ + PyUFunc_API[44]) +#endif + +#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION +#define PyUFunc_AddWrappingLoop \ + (*(int (*)(PyObject *, PyArray_DTypeMeta *new_dtypes[], PyArray_DTypeMeta *wrapped_dtypes[], PyArrayMethod_TranslateGivenDescriptors *, PyArrayMethod_TranslateLoopDescriptors *)) \ + PyUFunc_API[45]) +#endif + +#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION +#define PyUFunc_GiveFloatingpointErrors \ + (*(int (*)(const char *, int)) \ + PyUFunc_API[46]) +#endif + +#if NPY_FEATURE_VERSION >= NPY_2_4_API_VERSION +#define PyUFunc_AddLoopsFromSpecs \ + (*(int (*)(PyUFunc_LoopSlot *)) \ + PyUFunc_API[47]) +#endif + +static inline int +_import_umath(void) +{ + PyObject *c_api; + PyObject *numpy = PyImport_ImportModule("numpy._core._multiarray_umath"); + if (numpy == NULL && PyErr_ExceptionMatches(PyExc_ModuleNotFoundError)) { + PyErr_Clear(); + numpy = PyImport_ImportModule("numpy.core._multiarray_umath"); + } + + if (numpy == NULL) { + PyErr_SetString(PyExc_ImportError, + "_multiarray_umath failed to import"); + return -1; + } + + c_api = PyObject_GetAttrString(numpy, "_UFUNC_API"); + Py_DECREF(numpy); + if (c_api == NULL) { + PyErr_SetString(PyExc_AttributeError, "_UFUNC_API not found"); + return -1; + } + + if (!PyCapsule_CheckExact(c_api)) { + PyErr_SetString(PyExc_RuntimeError, "_UFUNC_API is not PyCapsule object"); + Py_DECREF(c_api); + return -1; + } + PyUFunc_API = (void **)PyCapsule_GetPointer(c_api, NULL); + Py_DECREF(c_api); + if (PyUFunc_API == NULL) { + PyErr_SetString(PyExc_RuntimeError, "_UFUNC_API is NULL pointer"); + return -1; + } + return 0; +} + +#define import_umath() \ + do {\ + UFUNC_NOFPE\ + if (_import_umath() < 0) {\ + PyErr_Print();\ + PyErr_SetString(PyExc_ImportError,\ + "numpy._core.umath failed to import");\ + return NULL;\ + }\ + } while(0) + +#define import_umath1(ret) \ + do {\ + UFUNC_NOFPE\ + if (_import_umath() < 0) {\ + PyErr_Print();\ + PyErr_SetString(PyExc_ImportError,\ + "numpy._core.umath failed to import");\ + return ret;\ + }\ + } while(0) + +#define import_umath2(ret, msg) \ + do {\ + UFUNC_NOFPE\ + if (_import_umath() < 0) {\ + PyErr_Print();\ + PyErr_SetString(PyExc_ImportError, msg);\ + return ret;\ + }\ + } while(0) + +#define import_ufunc() \ + do {\ + UFUNC_NOFPE\ + if (_import_umath() < 0) {\ + PyErr_Print();\ + PyErr_SetString(PyExc_ImportError,\ + "numpy._core.umath failed to import");\ + }\ + } while(0) + + +static inline int +PyUFunc_ImportUFuncAPI() +{ + if (NPY_UNLIKELY(PyUFunc_API == NULL)) { + import_umath1(-1); + } + return 0; +} + +#endif diff --git a/local_packages/numpy/_core/include/numpy/_neighborhood_iterator_imp.h b/local_packages/numpy/_core/include/numpy/_neighborhood_iterator_imp.h new file mode 100644 index 0000000..b365cb5 --- /dev/null +++ b/local_packages/numpy/_core/include/numpy/_neighborhood_iterator_imp.h @@ -0,0 +1,90 @@ +#ifndef NUMPY_CORE_INCLUDE_NUMPY__NEIGHBORHOOD_IMP_H_ +#error You should not include this header directly +#endif +/* + * Private API (here for inline) + */ +static inline int +_PyArrayNeighborhoodIter_IncrCoord(PyArrayNeighborhoodIterObject* iter); + +/* + * Update to next item of the iterator + * + * Note: this simply increment the coordinates vector, last dimension + * incremented first , i.e, for dimension 3 + * ... + * -1, -1, -1 + * -1, -1, 0 + * -1, -1, 1 + * .... + * -1, 0, -1 + * -1, 0, 0 + * .... + * 0, -1, -1 + * 0, -1, 0 + * .... + */ +#define _UPDATE_COORD_ITER(c) \ + wb = iter->coordinates[c] < iter->bounds[c][1]; \ + if (wb) { \ + iter->coordinates[c] += 1; \ + return 0; \ + } \ + else { \ + iter->coordinates[c] = iter->bounds[c][0]; \ + } + +static inline int +_PyArrayNeighborhoodIter_IncrCoord(PyArrayNeighborhoodIterObject* iter) +{ + npy_intp i, wb; + + for (i = iter->nd - 1; i >= 0; --i) { + _UPDATE_COORD_ITER(i) + } + + return 0; +} + +/* + * Version optimized for 2d arrays, manual loop unrolling + */ +static inline int +_PyArrayNeighborhoodIter_IncrCoord2D(PyArrayNeighborhoodIterObject* iter) +{ + npy_intp wb; + + _UPDATE_COORD_ITER(1) + _UPDATE_COORD_ITER(0) + + return 0; +} +#undef _UPDATE_COORD_ITER + +/* + * Advance to the next neighbour + */ +static inline int +PyArrayNeighborhoodIter_Next(PyArrayNeighborhoodIterObject* iter) +{ + _PyArrayNeighborhoodIter_IncrCoord (iter); + iter->dataptr = iter->translate((PyArrayIterObject*)iter, iter->coordinates); + + return 0; +} + +/* + * Reset functions + */ +static inline int +PyArrayNeighborhoodIter_Reset(PyArrayNeighborhoodIterObject* iter) +{ + npy_intp i; + + for (i = 0; i < iter->nd; ++i) { + iter->coordinates[i] = iter->bounds[i][0]; + } + iter->dataptr = iter->translate((PyArrayIterObject*)iter, iter->coordinates); + + return 0; +} diff --git a/local_packages/numpy/_core/include/numpy/_numpyconfig.h b/local_packages/numpy/_core/include/numpy/_numpyconfig.h new file mode 100644 index 0000000..8879afe --- /dev/null +++ b/local_packages/numpy/_core/include/numpy/_numpyconfig.h @@ -0,0 +1,33 @@ +/* #undef NPY_HAVE_ENDIAN_H */ + +#define NPY_SIZEOF_SHORT 2 +#define NPY_SIZEOF_INT 4 +#define NPY_SIZEOF_LONG 4 +#define NPY_SIZEOF_FLOAT 4 +#define NPY_SIZEOF_COMPLEX_FLOAT 8 +#define NPY_SIZEOF_DOUBLE 8 +#define NPY_SIZEOF_COMPLEX_DOUBLE 16 +#define NPY_SIZEOF_LONGDOUBLE 8 +#define NPY_SIZEOF_COMPLEX_LONGDOUBLE 16 +#define NPY_SIZEOF_PY_INTPTR_T 8 +#define NPY_SIZEOF_INTP 8 +#define NPY_SIZEOF_UINTP 8 +#define NPY_SIZEOF_WCHAR_T 2 +#define NPY_SIZEOF_OFF_T 4 +#define NPY_SIZEOF_PY_LONG_LONG 8 +#define NPY_SIZEOF_LONGLONG 8 + +/* + * Defined to 1 or 0. Note that Pyodide hardcodes NPY_NO_SMP (and other defines + * in this header) for better cross-compilation, so don't rename them without a + * good reason. + */ +#define NPY_NO_SMP 0 + +#define NPY_VISIBILITY_HIDDEN +#define NPY_ABI_VERSION 0x02000000 +#define NPY_API_VERSION 0x00000015 + +#ifndef __STDC_FORMAT_MACROS +#define __STDC_FORMAT_MACROS 1 +#endif diff --git a/local_packages/numpy/_core/include/numpy/_public_dtype_api_table.h b/local_packages/numpy/_core/include/numpy/_public_dtype_api_table.h new file mode 100644 index 0000000..51f3905 --- /dev/null +++ b/local_packages/numpy/_core/include/numpy/_public_dtype_api_table.h @@ -0,0 +1,86 @@ +/* + * Public exposure of the DType Classes. These are tricky to expose + * via the Python API, so they are exposed through this header for now. + * + * These definitions are only relevant for the public API and we reserve + * the slots 320-360 in the API table generation for this (currently). + * + * TODO: This file should be consolidated with the API table generation + * (although not sure the current generation is worth preserving). + */ +#ifndef NUMPY_CORE_INCLUDE_NUMPY__PUBLIC_DTYPE_API_TABLE_H_ +#define NUMPY_CORE_INCLUDE_NUMPY__PUBLIC_DTYPE_API_TABLE_H_ + +#if !(defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD) + +/* All of these require NumPy 2.0 support */ +#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION + +/* + * The type of the DType metaclass + */ +#define PyArrayDTypeMeta_Type (*(PyTypeObject *)(PyArray_API + 320)[0]) +/* + * NumPy's builtin DTypes: + */ +#define PyArray_BoolDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[1]) +/* Integers */ +#define PyArray_ByteDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[2]) +#define PyArray_UByteDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[3]) +#define PyArray_ShortDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[4]) +#define PyArray_UShortDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[5]) +#define PyArray_IntDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[6]) +#define PyArray_UIntDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[7]) +#define PyArray_LongDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[8]) +#define PyArray_ULongDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[9]) +#define PyArray_LongLongDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[10]) +#define PyArray_ULongLongDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[11]) +/* Integer aliases */ +#define PyArray_Int8DType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[12]) +#define PyArray_UInt8DType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[13]) +#define PyArray_Int16DType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[14]) +#define PyArray_UInt16DType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[15]) +#define PyArray_Int32DType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[16]) +#define PyArray_UInt32DType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[17]) +#define PyArray_Int64DType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[18]) +#define PyArray_UInt64DType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[19]) +#define PyArray_IntpDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[20]) +#define PyArray_UIntpDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[21]) +/* Floats */ +#define PyArray_HalfDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[22]) +#define PyArray_FloatDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[23]) +#define PyArray_DoubleDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[24]) +#define PyArray_LongDoubleDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[25]) +/* Complex */ +#define PyArray_CFloatDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[26]) +#define PyArray_CDoubleDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[27]) +#define PyArray_CLongDoubleDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[28]) +/* String/Bytes */ +#define PyArray_BytesDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[29]) +#define PyArray_UnicodeDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[30]) +/* Datetime/Timedelta */ +#define PyArray_DatetimeDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[31]) +#define PyArray_TimedeltaDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[32]) +/* Object/Void */ +#define PyArray_ObjectDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[33]) +#define PyArray_VoidDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[34]) +/* Python types (used as markers for scalars) */ +#define PyArray_PyLongDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[35]) +#define PyArray_PyFloatDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[36]) +#define PyArray_PyComplexDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[37]) +/* Default integer type */ +#define PyArray_DefaultIntDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[38]) +/* New non-legacy DTypes follow in the order they were added */ +#define PyArray_StringDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[39]) + +/* NOTE: offset 40 is free */ + +/* Need to start with a larger offset again for the abstract classes: */ +#define PyArray_IntAbstractDType (*(PyArray_DTypeMeta *)PyArray_API[366]) +#define PyArray_FloatAbstractDType (*(PyArray_DTypeMeta *)PyArray_API[367]) +#define PyArray_ComplexAbstractDType (*(PyArray_DTypeMeta *)PyArray_API[368]) + +#endif /* NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION */ + +#endif /* NPY_INTERNAL_BUILD */ +#endif /* NUMPY_CORE_INCLUDE_NUMPY__PUBLIC_DTYPE_API_TABLE_H_ */ diff --git a/local_packages/numpy/_core/include/numpy/arrayobject.h b/local_packages/numpy/_core/include/numpy/arrayobject.h new file mode 100644 index 0000000..97d9359 --- /dev/null +++ b/local_packages/numpy/_core/include/numpy/arrayobject.h @@ -0,0 +1,7 @@ +#ifndef NUMPY_CORE_INCLUDE_NUMPY_ARRAYOBJECT_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_ARRAYOBJECT_H_ +#define Py_ARRAYOBJECT_H + +#include "ndarrayobject.h" + +#endif /* NUMPY_CORE_INCLUDE_NUMPY_ARRAYOBJECT_H_ */ diff --git a/local_packages/numpy/_core/include/numpy/arrayscalars.h b/local_packages/numpy/_core/include/numpy/arrayscalars.h new file mode 100644 index 0000000..46bc58c --- /dev/null +++ b/local_packages/numpy/_core/include/numpy/arrayscalars.h @@ -0,0 +1,198 @@ +#ifndef NUMPY_CORE_INCLUDE_NUMPY_ARRAYSCALARS_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_ARRAYSCALARS_H_ + +#ifndef _MULTIARRAYMODULE +typedef struct { + PyObject_HEAD + npy_bool obval; +} PyBoolScalarObject; +#endif + + +typedef struct { + PyObject_HEAD + signed char obval; +} PyByteScalarObject; + + +typedef struct { + PyObject_HEAD + short obval; +} PyShortScalarObject; + + +typedef struct { + PyObject_HEAD + int obval; +} PyIntScalarObject; + + +typedef struct { + PyObject_HEAD + long obval; +} PyLongScalarObject; + + +typedef struct { + PyObject_HEAD + npy_longlong obval; +} PyLongLongScalarObject; + + +typedef struct { + PyObject_HEAD + unsigned char obval; +} PyUByteScalarObject; + + +typedef struct { + PyObject_HEAD + unsigned short obval; +} PyUShortScalarObject; + + +typedef struct { + PyObject_HEAD + unsigned int obval; +} PyUIntScalarObject; + + +typedef struct { + PyObject_HEAD + unsigned long obval; +} PyULongScalarObject; + + +typedef struct { + PyObject_HEAD + npy_ulonglong obval; +} PyULongLongScalarObject; + + +typedef struct { + PyObject_HEAD + npy_half obval; +} PyHalfScalarObject; + + +typedef struct { + PyObject_HEAD + float obval; +} PyFloatScalarObject; + + +typedef struct { + PyObject_HEAD + double obval; +} PyDoubleScalarObject; + + +typedef struct { + PyObject_HEAD + npy_longdouble obval; +} PyLongDoubleScalarObject; + + +typedef struct { + PyObject_HEAD + npy_cfloat obval; +} PyCFloatScalarObject; + + +typedef struct { + PyObject_HEAD + npy_cdouble obval; +} PyCDoubleScalarObject; + + +typedef struct { + PyObject_HEAD + npy_clongdouble obval; +} PyCLongDoubleScalarObject; + + +typedef struct { + PyObject_HEAD + PyObject * obval; +} PyObjectScalarObject; + +typedef struct { + PyObject_HEAD + npy_datetime obval; + PyArray_DatetimeMetaData obmeta; +} PyDatetimeScalarObject; + +typedef struct { + PyObject_HEAD + npy_timedelta obval; + PyArray_DatetimeMetaData obmeta; +} PyTimedeltaScalarObject; + + +typedef struct { + PyObject_HEAD + char obval; +} PyScalarObject; + +#define PyStringScalarObject PyBytesObject +#ifndef Py_LIMITED_API +typedef struct { + /* note that the PyObject_HEAD macro lives right here */ + PyUnicodeObject base; + Py_UCS4 *obval; + #if NPY_FEATURE_VERSION >= NPY_1_20_API_VERSION + char *buffer_fmt; + #endif +} PyUnicodeScalarObject; +#endif + + +typedef struct { + PyObject_VAR_HEAD + char *obval; +#if defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD + /* Internally use the subclass to allow accessing names/fields */ + _PyArray_LegacyDescr *descr; +#else + PyArray_Descr *descr; +#endif + int flags; + PyObject *base; + #if NPY_FEATURE_VERSION >= NPY_1_20_API_VERSION + void *_buffer_info; /* private buffer info, tagged to allow warning */ + #endif +} PyVoidScalarObject; + +/* Macros + PyScalarObject + PyArrType_Type + are defined in ndarrayobject.h +*/ + +#define PyArrayScalar_False ((PyObject *)(&(_PyArrayScalar_BoolValues[0]))) +#define PyArrayScalar_True ((PyObject *)(&(_PyArrayScalar_BoolValues[1]))) +#define PyArrayScalar_FromLong(i) \ + ((PyObject *)(&(_PyArrayScalar_BoolValues[((i)!=0)]))) +#define PyArrayScalar_RETURN_BOOL_FROM_LONG(i) do { \ + PyObject *obj = PyArrayScalar_FromLong(i); \ + Py_INCREF(obj); \ + return obj; \ +} while (0) +#define PyArrayScalar_RETURN_FALSE \ + return Py_INCREF(PyArrayScalar_False), \ + PyArrayScalar_False +#define PyArrayScalar_RETURN_TRUE \ + return Py_INCREF(PyArrayScalar_True), \ + PyArrayScalar_True + +#define PyArrayScalar_New(cls) \ + Py##cls##ArrType_Type.tp_alloc(&Py##cls##ArrType_Type, 0) +#ifndef Py_LIMITED_API +/* For the limited API, use PyArray_ScalarAsCtype instead */ +#define PyArrayScalar_VAL(obj, cls) \ + ((Py##cls##ScalarObject *)obj)->obval +#define PyArrayScalar_ASSIGN(obj, cls, val) \ + PyArrayScalar_VAL(obj, cls) = val +#endif + +#endif /* NUMPY_CORE_INCLUDE_NUMPY_ARRAYSCALARS_H_ */ diff --git a/local_packages/numpy/_core/include/numpy/dtype_api.h b/local_packages/numpy/_core/include/numpy/dtype_api.h new file mode 100644 index 0000000..5ac9647 --- /dev/null +++ b/local_packages/numpy/_core/include/numpy/dtype_api.h @@ -0,0 +1,547 @@ +/* + * The public DType API + */ + +#ifndef NUMPY_CORE_INCLUDE_NUMPY___DTYPE_API_H_ +#define NUMPY_CORE_INCLUDE_NUMPY___DTYPE_API_H_ + +struct PyArrayMethodObject_tag; + +/* + * Largely opaque struct for DType classes (i.e. metaclass instances). + * The internal definition is currently in `ndarraytypes.h` (export is a bit + * more complex because `PyArray_Descr` is a DTypeMeta internally but not + * externally). + */ +#if !(defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD) + +#ifndef Py_LIMITED_API + + typedef struct PyArray_DTypeMeta_tag { + PyHeapTypeObject super; + + /* + * Most DTypes will have a singleton default instance, for the + * parametric legacy DTypes (bytes, string, void, datetime) this + * may be a pointer to the *prototype* instance? + */ + PyArray_Descr *singleton; + /* Copy of the legacy DTypes type number, usually invalid. */ + int type_num; + + /* The type object of the scalar instances (may be NULL?) */ + PyTypeObject *scalar_type; + /* + * DType flags to signal legacy, parametric, or + * abstract. But plenty of space for additional information/flags. + */ + npy_uint64 flags; + + /* + * Use indirection in order to allow a fixed size for this struct. + * A stable ABI size makes creating a static DType less painful + * while also ensuring flexibility for all opaque API (with one + * indirection due the pointer lookup). + */ + void *dt_slots; + /* Allow growing (at the moment also beyond this) */ + void *reserved[3]; + } PyArray_DTypeMeta; + +#else + +typedef PyTypeObject PyArray_DTypeMeta; + +#endif /* Py_LIMITED_API */ + +#endif /* not internal build */ + +/* + * ****************************************************** + * ArrayMethod API (Casting and UFuncs) + * ****************************************************** + */ + + +typedef enum { + /* Flag for whether the GIL is required */ + NPY_METH_REQUIRES_PYAPI = 1 << 0, + /* + * Some functions cannot set floating point error flags, this flag + * gives us the option (not requirement) to skip floating point error + * setup/check. No function should set error flags and ignore them + * since it would interfere with chaining operations (e.g. casting). + */ + NPY_METH_NO_FLOATINGPOINT_ERRORS = 1 << 1, + /* Whether the method supports unaligned access (not runtime) */ + NPY_METH_SUPPORTS_UNALIGNED = 1 << 2, + /* + * Used for reductions to allow reordering the operation. At this point + * assume that if set, it also applies to normal operations though! + */ + NPY_METH_IS_REORDERABLE = 1 << 3, + /* + * Private flag for now for *logic* functions. The logical functions + * `logical_or` and `logical_and` can always cast the inputs to booleans + * "safely" (because that is how the cast to bool is defined). + * @seberg: I am not sure this is the best way to handle this, so its + * private for now (also it is very limited anyway). + * There is one "exception". NA aware dtypes cannot cast to bool + * (hopefully), so the `??->?` loop should error even with this flag. + * But a second NA fallback loop will be necessary. + */ + _NPY_METH_FORCE_CAST_INPUTS = 1 << 17, + + /* All flags which can change at runtime */ + NPY_METH_RUNTIME_FLAGS = ( + NPY_METH_REQUIRES_PYAPI | + NPY_METH_NO_FLOATINGPOINT_ERRORS), +} NPY_ARRAYMETHOD_FLAGS; + + +typedef enum { + /* Casting via same_value logic */ + NPY_SAME_VALUE_CONTEXT_FLAG=1, +} NPY_ARRAYMETHOD_CONTEXT_FLAGS; + +typedef struct PyArrayMethod_Context_tag { + /* The caller, which is typically the original ufunc. May be NULL */ + PyObject *caller; + /* The method "self". Currently an opaque object. */ + struct PyArrayMethodObject_tag *method; + + /* Operand descriptors, filled in by resolve_descriptors */ + PyArray_Descr *const *descriptors; + #if NPY_FEATURE_VERSION > NPY_2_3_API_VERSION + void * _reserved; + /* + * Optional flag to pass information into the inner loop + * NPY_ARRAYMETHOD_CONTEXT_FLAGS + */ + uint64_t flags; + + /* + * Optional run-time parameters to pass to the loop (currently used in sorting). + * Fixed parameters are expected to be passed via auxdata. + */ + void *parameters; + + /* Structure may grow (this is harmless for DType authors) */ + #endif +} PyArrayMethod_Context; + + +/* + * The main object for creating a new ArrayMethod. We use the typical `slots` + * mechanism used by the Python limited API (see below for the slot defs). + */ +typedef struct { + const char *name; + int nin, nout; + NPY_CASTING casting; + NPY_ARRAYMETHOD_FLAGS flags; + PyArray_DTypeMeta **dtypes; + PyType_Slot *slots; +} PyArrayMethod_Spec; + + +// This is used for the convenience function `PyUFunc_AddLoopsFromSpecs` +typedef struct { + const char *name; + PyArrayMethod_Spec *spec; +} PyUFunc_LoopSlot; + + +/* + * ArrayMethod slots + * ----------------- + * + * SLOTS IDs For the ArrayMethod creation, once fully public, IDs are fixed + * but can be deprecated and arbitrarily extended. + */ +#define _NPY_METH_resolve_descriptors_with_scalars 1 +#define NPY_METH_resolve_descriptors 2 +#define NPY_METH_get_loop 3 +#define NPY_METH_get_reduction_initial 4 +/* specific loops for constructions/default get_loop: */ +#define NPY_METH_strided_loop 5 +#define NPY_METH_contiguous_loop 6 +#define NPY_METH_unaligned_strided_loop 7 +#define NPY_METH_unaligned_contiguous_loop 8 +#define NPY_METH_contiguous_indexed_loop 9 +#define _NPY_METH_static_data 10 + +/* + * The resolve descriptors function, must be able to handle NULL values for + * all output (but not input) `given_descrs` and fill `loop_descrs`. + * Return -1 on error or 0 if the operation is not possible without an error + * set. (This may still be in flux.) + * Otherwise must return the "casting safety", for normal functions, this is + * almost always "safe" (or even "equivalent"?). + * + * `resolve_descriptors` is optional if all output DTypes are non-parametric. + */ +typedef NPY_CASTING (PyArrayMethod_ResolveDescriptors)( + /* "method" is currently opaque (necessary e.g. to wrap Python) */ + struct PyArrayMethodObject_tag *method, + /* DTypes the method was created for */ + PyArray_DTypeMeta *const *dtypes, + /* Input descriptors (instances). Outputs may be NULL. */ + PyArray_Descr *const *given_descrs, + /* Exact loop descriptors to use, must not hold references on error */ + PyArray_Descr **loop_descrs, + npy_intp *view_offset); + + +/* + * Rarely needed, slightly more powerful version of `resolve_descriptors`. + * See also `PyArrayMethod_ResolveDescriptors` for details on shared arguments. + * + * NOTE: This function is private now as it is unclear how and what to pass + * exactly as additional information to allow dealing with the scalars. + * See also gh-24915. + */ +typedef NPY_CASTING (PyArrayMethod_ResolveDescriptorsWithScalar)( + struct PyArrayMethodObject_tag *method, + PyArray_DTypeMeta *const *dtypes, + /* Unlike above, these can have any DType and we may allow NULL. */ + PyArray_Descr *const *given_descrs, + /* + * Input scalars or NULL. Only ever passed for python scalars. + * WARNING: In some cases, a loop may be explicitly selected and the + * value passed is not available (NULL) or does not have the + * expected type. + */ + PyObject *const *input_scalars, + PyArray_Descr **loop_descrs, + npy_intp *view_offset); + + + +typedef int (PyArrayMethod_StridedLoop)(PyArrayMethod_Context *context, + char *const *data, const npy_intp *dimensions, const npy_intp *strides, + NpyAuxData *transferdata); + + +typedef int (PyArrayMethod_GetLoop)( + PyArrayMethod_Context *context, + int aligned, int move_references, + const npy_intp *strides, + PyArrayMethod_StridedLoop **out_loop, + NpyAuxData **out_transferdata, + NPY_ARRAYMETHOD_FLAGS *flags); + +/** + * Query an ArrayMethod for the initial value for use in reduction. + * + * @param context The arraymethod context, mainly to access the descriptors. + * @param reduction_is_empty Whether the reduction is empty. When it is, the + * value returned may differ. In this case it is a "default" value that + * may differ from the "identity" value normally used. For example: + * - `0.0` is the default for `sum([])`. But `-0.0` is the correct + * identity otherwise as it preserves the sign for `sum([-0.0])`. + * - We use no identity for object, but return the default of `0` and `1` + * for the empty `sum([], dtype=object)` and `prod([], dtype=object)`. + * This allows `np.sum(np.array(["a", "b"], dtype=object))` to work. + * - `-inf` or `INT_MIN` for `max` is an identity, but at least `INT_MIN` + * not a good *default* when there are no items. + * @param initial Pointer to initial data to be filled (if possible) + * + * @returns -1, 0, or 1 indicating error, no initial value, and initial being + * successfully filled. Errors must not be given where 0 is correct, NumPy + * may call this even when not strictly necessary. + */ +typedef int (PyArrayMethod_GetReductionInitial)( + PyArrayMethod_Context *context, npy_bool reduction_is_empty, + void *initial); + +/* + * The following functions are only used by the wrapping array method defined + * in umath/wrapping_array_method.c + */ + + +/* + * The function to convert the given descriptors (passed in to + * `resolve_descriptors`) and translates them for the wrapped loop. + * The new descriptors MUST be viewable with the old ones, `NULL` must be + * supported (for outputs) and should normally be forwarded. + * + * The function must clean up on error. + * + * NOTE: We currently assume that this translation gives "viewable" results. + * I.e. there is no additional casting related to the wrapping process. + * In principle that could be supported, but not sure it is useful. + * This currently also means that e.g. alignment must apply identically + * to the new dtypes. + * + * TODO: Due to the fact that `resolve_descriptors` is also used for `can_cast` + * there is no way to "pass out" the result of this function. This means + * it will be called twice for every ufunc call. + * (I am considering including `auxdata` as an "optional" parameter to + * `resolve_descriptors`, so that it can be filled there if not NULL.) + */ +typedef int (PyArrayMethod_TranslateGivenDescriptors)(int nin, int nout, + PyArray_DTypeMeta *const wrapped_dtypes[], + PyArray_Descr *const given_descrs[], PyArray_Descr *new_descrs[]); + +/** + * The function to convert the actual loop descriptors (as returned by the + * original `resolve_descriptors` function) to the ones the output array + * should use. + * This function must return "viewable" types, it must not mutate them in any + * form that would break the inner-loop logic. Does not need to support NULL. + * + * The function must clean up on error. + * + * @param nin Number of input arguments + * @param nout Number of output arguments + * @param new_dtypes The DTypes of the output (usually probably not needed) + * @param given_descrs Original given_descrs to the resolver, necessary to + * fetch any information related to the new dtypes from the original. + * @param original_descrs The `loop_descrs` returned by the wrapped loop. + * @param loop_descrs The output descriptors, compatible to `original_descrs`. + * + * @returns 0 on success, -1 on failure. + */ +typedef int (PyArrayMethod_TranslateLoopDescriptors)(int nin, int nout, + PyArray_DTypeMeta *const new_dtypes[], PyArray_Descr *const given_descrs[], + PyArray_Descr *original_descrs[], PyArray_Descr *loop_descrs[]); + + + +/* + * A traverse loop working on a single array. This is similar to the general + * strided-loop function. This is designed for loops that need to visit every + * element of a single array. + * + * Currently this is used for array clearing, via the NPY_DT_get_clear_loop + * API hook, and zero-filling, via the NPY_DT_get_fill_zero_loop API hook. + * These are most useful for handling arrays storing embedded references to + * python objects or heap-allocated data. + * + * The `void *traverse_context` is passed in because we may need to pass in + * Interpreter state or similar in the future, but we don't want to pass in + * a full context (with pointers to dtypes, method, caller which all make + * no sense for a traverse function). + * + * We assume for now that this context can be just passed through in the + * the future (for structured dtypes). + * + */ +typedef int (PyArrayMethod_TraverseLoop)( + void *traverse_context, const PyArray_Descr *descr, char *data, + npy_intp size, npy_intp stride, NpyAuxData *auxdata); + + +/* + * Simplified get_loop function specific to dtype traversal + * + * It should set the flags needed for the traversal loop and set out_loop to the + * loop function, which must be a valid PyArrayMethod_TraverseLoop + * pointer. Currently this is used for zero-filling and clearing arrays storing + * embedded references. + * + */ +typedef int (PyArrayMethod_GetTraverseLoop)( + void *traverse_context, const PyArray_Descr *descr, + int aligned, npy_intp fixed_stride, + PyArrayMethod_TraverseLoop **out_loop, NpyAuxData **out_auxdata, + NPY_ARRAYMETHOD_FLAGS *flags); + + +/* + * Type of the C promoter function, which must be wrapped into a + * PyCapsule with name "numpy._ufunc_promoter". + * + * Note that currently the output dtypes are always NULL unless they are + * also part of the signature. This is an implementation detail and could + * change in the future. However, in general promoters should not have a + * need for output dtypes. + * (There are potential use-cases, these are currently unsupported.) + */ +typedef int (PyArrayMethod_PromoterFunction)(PyObject *ufunc, + PyArray_DTypeMeta *const op_dtypes[], PyArray_DTypeMeta *const signature[], + PyArray_DTypeMeta *new_op_dtypes[]); + +/* + * **************************** + * DTYPE API + * **************************** + */ + +#define NPY_DT_ABSTRACT 1 << 1 +#define NPY_DT_PARAMETRIC 1 << 2 +#define NPY_DT_NUMERIC 1 << 3 + +/* + * These correspond to slots in the NPY_DType_Slots struct and must + * be in the same order as the members of that struct. If new slots + * get added or old slots get removed NPY_NUM_DTYPE_SLOTS must also + * be updated + */ + +#define NPY_DT_discover_descr_from_pyobject 1 +// this slot is considered private because its API hasn't been decided +#define _NPY_DT_is_known_scalar_type 2 +#define NPY_DT_default_descr 3 +#define NPY_DT_common_dtype 4 +#define NPY_DT_common_instance 5 +#define NPY_DT_ensure_canonical 6 +#define NPY_DT_setitem 7 +#define NPY_DT_getitem 8 +#define NPY_DT_get_clear_loop 9 +#define NPY_DT_get_fill_zero_loop 10 +#define NPY_DT_finalize_descr 11 +#define NPY_DT_get_constant 12 + +// These PyArray_ArrFunc slots will be deprecated and replaced eventually +// getitem and setitem can be defined as a performance optimization; +// by default the user dtypes call `legacy_getitem_using_DType` and +// `legacy_setitem_using_DType`, respectively. This functionality is +// only supported for basic NumPy DTypes. + + +// used to separate dtype slots from arrfuncs slots +// intended only for internal use but defined here for clarity +#define _NPY_DT_ARRFUNCS_OFFSET (1 << 11) + +// Cast is disabled +// #define NPY_DT_PyArray_ArrFuncs_cast 0 + _NPY_DT_ARRFUNCS_OFFSET + +#define NPY_DT_PyArray_ArrFuncs_getitem 1 + _NPY_DT_ARRFUNCS_OFFSET +#define NPY_DT_PyArray_ArrFuncs_setitem 2 + _NPY_DT_ARRFUNCS_OFFSET + +// Copyswap is disabled +// #define NPY_DT_PyArray_ArrFuncs_copyswapn 3 + _NPY_DT_ARRFUNCS_OFFSET +// #define NPY_DT_PyArray_ArrFuncs_copyswap 4 + _NPY_DT_ARRFUNCS_OFFSET +#define NPY_DT_PyArray_ArrFuncs_compare 5 + _NPY_DT_ARRFUNCS_OFFSET +#define NPY_DT_PyArray_ArrFuncs_argmax 6 + _NPY_DT_ARRFUNCS_OFFSET +#define NPY_DT_PyArray_ArrFuncs_dotfunc 7 + _NPY_DT_ARRFUNCS_OFFSET +#define NPY_DT_PyArray_ArrFuncs_scanfunc 8 + _NPY_DT_ARRFUNCS_OFFSET +#define NPY_DT_PyArray_ArrFuncs_fromstr 9 + _NPY_DT_ARRFUNCS_OFFSET +#define NPY_DT_PyArray_ArrFuncs_nonzero 10 + _NPY_DT_ARRFUNCS_OFFSET +#define NPY_DT_PyArray_ArrFuncs_fill 11 + _NPY_DT_ARRFUNCS_OFFSET +#define NPY_DT_PyArray_ArrFuncs_fillwithscalar 12 + _NPY_DT_ARRFUNCS_OFFSET +#define NPY_DT_PyArray_ArrFuncs_sort 13 + _NPY_DT_ARRFUNCS_OFFSET +#define NPY_DT_PyArray_ArrFuncs_argsort 14 + _NPY_DT_ARRFUNCS_OFFSET + +// Casting related slots are disabled. See +// https://github.com/numpy/numpy/pull/23173#discussion_r1101098163 +// #define NPY_DT_PyArray_ArrFuncs_castdict 15 + _NPY_DT_ARRFUNCS_OFFSET +// #define NPY_DT_PyArray_ArrFuncs_scalarkind 16 + _NPY_DT_ARRFUNCS_OFFSET +// #define NPY_DT_PyArray_ArrFuncs_cancastscalarkindto 17 + _NPY_DT_ARRFUNCS_OFFSET +// #define NPY_DT_PyArray_ArrFuncs_cancastto 18 + _NPY_DT_ARRFUNCS_OFFSET + +// These are deprecated in NumPy 1.19, so are disabled here. +// #define NPY_DT_PyArray_ArrFuncs_fastclip 19 + _NPY_DT_ARRFUNCS_OFFSET +// #define NPY_DT_PyArray_ArrFuncs_fastputmask 20 + _NPY_DT_ARRFUNCS_OFFSET +// #define NPY_DT_PyArray_ArrFuncs_fasttake 21 + _NPY_DT_ARRFUNCS_OFFSET +#define NPY_DT_PyArray_ArrFuncs_argmin 22 + _NPY_DT_ARRFUNCS_OFFSET + + +// TODO: These slots probably still need some thought, and/or a way to "grow"? +typedef struct { + PyTypeObject *typeobj; /* type of python scalar or NULL */ + int flags; /* flags, including parametric and abstract */ + /* NULL terminated cast definitions. Use NULL for the newly created DType */ + PyArrayMethod_Spec **casts; + PyType_Slot *slots; + /* Baseclass or NULL (will always subclass `np.dtype`) */ + PyTypeObject *baseclass; +} PyArrayDTypeMeta_Spec; + + +typedef PyArray_Descr *(PyArrayDTypeMeta_DiscoverDescrFromPyobject)( + PyArray_DTypeMeta *cls, PyObject *obj); + +/* + * Before making this public, we should decide whether it should pass + * the type, or allow looking at the object. A possible use-case: + * `np.array(np.array([0]), dtype=np.ndarray)` + * Could consider arrays that are not `dtype=ndarray` "scalars". + */ +typedef int (PyArrayDTypeMeta_IsKnownScalarType)( + PyArray_DTypeMeta *cls, PyTypeObject *obj); + +typedef PyArray_Descr *(PyArrayDTypeMeta_DefaultDescriptor)(PyArray_DTypeMeta *cls); +typedef PyArray_DTypeMeta *(PyArrayDTypeMeta_CommonDType)( + PyArray_DTypeMeta *dtype1, PyArray_DTypeMeta *dtype2); + + +/* + * Convenience utility for getting a reference to the DType metaclass associated + * with a dtype instance. + */ +#define NPY_DTYPE(descr) ((PyArray_DTypeMeta *)Py_TYPE(descr)) + +static inline PyArray_DTypeMeta * +NPY_DT_NewRef(PyArray_DTypeMeta *o) { + Py_INCREF((PyObject *)o); + return o; +} + + +typedef PyArray_Descr *(PyArrayDTypeMeta_CommonInstance)( + PyArray_Descr *dtype1, PyArray_Descr *dtype2); +typedef PyArray_Descr *(PyArrayDTypeMeta_EnsureCanonical)(PyArray_Descr *dtype); +/* + * Returns either a new reference to *dtype* or a new descriptor instance + * initialized with the same parameters as *dtype*. The caller cannot know + * which choice a dtype will make. This function is called just before the + * array buffer is created for a newly created array, it is not called for + * views and the descriptor returned by this function is attached to the array. + */ +typedef PyArray_Descr *(PyArrayDTypeMeta_FinalizeDescriptor)(PyArray_Descr *dtype); + +/* + * Constants that can be queried and used e.g. by reduce identies defaults. + * These are also used to expose .finfo and .iinfo for example. + */ +/* Numerical constants */ +#define NPY_CONSTANT_zero 1 +#define NPY_CONSTANT_one 2 +#define NPY_CONSTANT_all_bits_set 3 +#define NPY_CONSTANT_maximum_finite 4 +#define NPY_CONSTANT_minimum_finite 5 +#define NPY_CONSTANT_inf 6 +#define NPY_CONSTANT_ninf 7 +#define NPY_CONSTANT_nan 8 +#define NPY_CONSTANT_finfo_radix 9 +#define NPY_CONSTANT_finfo_eps 10 +#define NPY_CONSTANT_finfo_smallest_normal 11 +#define NPY_CONSTANT_finfo_smallest_subnormal 12 +/* Constants that are always of integer type, value is `npy_intp/Py_ssize_t` */ +#define NPY_CONSTANT_finfo_nmant (1 << 16) + 0 +#define NPY_CONSTANT_finfo_min_exp (1 << 16) + 1 +#define NPY_CONSTANT_finfo_max_exp (1 << 16) + 2 +#define NPY_CONSTANT_finfo_decimal_digits (1 << 16) + 3 + +/* It may make sense to continue with other constants here, e.g. pi, etc? */ + +/* + * Function to get a constant value for the dtype. Data may be unaligned, the + * function is always called with the GIL held. + * + * @param descr The dtype instance (i.e. self) + * @param ID The ID of the constant to get. + * @param data Pointer to the data to be written too, may be unaligned. + * @returns 1 on success, 0 if the constant is not available, or -1 with an error set. + */ +typedef int (PyArrayDTypeMeta_GetConstant)(PyArray_Descr *descr, int ID, void *data); + +/* + * TODO: These two functions are currently only used for experimental DType + * API support. Their relation should be "reversed": NumPy should + * always use them internally. + * There are open points about "casting safety" though, e.g. setting + * elements is currently always unsafe. + */ +typedef int(PyArrayDTypeMeta_SetItem)(PyArray_Descr *, PyObject *, char *); +typedef PyObject *(PyArrayDTypeMeta_GetItem)(PyArray_Descr *, char *); + +typedef struct { + NPY_SORTKIND flags; +} PyArrayMethod_SortParameters; + +#endif /* NUMPY_CORE_INCLUDE_NUMPY___DTYPE_API_H_ */ diff --git a/local_packages/numpy/_core/include/numpy/halffloat.h b/local_packages/numpy/_core/include/numpy/halffloat.h new file mode 100644 index 0000000..9504016 --- /dev/null +++ b/local_packages/numpy/_core/include/numpy/halffloat.h @@ -0,0 +1,70 @@ +#ifndef NUMPY_CORE_INCLUDE_NUMPY_HALFFLOAT_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_HALFFLOAT_H_ + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * Half-precision routines + */ + +/* Conversions */ +float npy_half_to_float(npy_half h); +double npy_half_to_double(npy_half h); +npy_half npy_float_to_half(float f); +npy_half npy_double_to_half(double d); +/* Comparisons */ +int npy_half_eq(npy_half h1, npy_half h2); +int npy_half_ne(npy_half h1, npy_half h2); +int npy_half_le(npy_half h1, npy_half h2); +int npy_half_lt(npy_half h1, npy_half h2); +int npy_half_ge(npy_half h1, npy_half h2); +int npy_half_gt(npy_half h1, npy_half h2); +/* faster *_nonan variants for when you know h1 and h2 are not NaN */ +int npy_half_eq_nonan(npy_half h1, npy_half h2); +int npy_half_lt_nonan(npy_half h1, npy_half h2); +int npy_half_le_nonan(npy_half h1, npy_half h2); +/* Miscellaneous functions */ +int npy_half_iszero(npy_half h); +int npy_half_isnan(npy_half h); +int npy_half_isinf(npy_half h); +int npy_half_isfinite(npy_half h); +int npy_half_signbit(npy_half h); +npy_half npy_half_copysign(npy_half x, npy_half y); +npy_half npy_half_spacing(npy_half h); +npy_half npy_half_nextafter(npy_half x, npy_half y); +npy_half npy_half_divmod(npy_half x, npy_half y, npy_half *modulus); + +/* + * Half-precision constants + */ + +#define NPY_HALF_ZERO (0x0000u) +#define NPY_HALF_PZERO (0x0000u) +#define NPY_HALF_NZERO (0x8000u) +#define NPY_HALF_ONE (0x3c00u) +#define NPY_HALF_NEGONE (0xbc00u) +#define NPY_HALF_PINF (0x7c00u) +#define NPY_HALF_NINF (0xfc00u) +#define NPY_HALF_NAN (0x7e00u) + +#define NPY_MAX_HALF (0x7bffu) + +/* + * Bit-level conversions + */ + +npy_uint16 npy_floatbits_to_halfbits(npy_uint32 f); +npy_uint16 npy_doublebits_to_halfbits(npy_uint64 d); +npy_uint32 npy_halfbits_to_floatbits(npy_uint16 h); +npy_uint64 npy_halfbits_to_doublebits(npy_uint16 h); + +#ifdef __cplusplus +} +#endif + +#endif /* NUMPY_CORE_INCLUDE_NUMPY_HALFFLOAT_H_ */ diff --git a/local_packages/numpy/_core/include/numpy/ndarrayobject.h b/local_packages/numpy/_core/include/numpy/ndarrayobject.h new file mode 100644 index 0000000..6bfc40f --- /dev/null +++ b/local_packages/numpy/_core/include/numpy/ndarrayobject.h @@ -0,0 +1,304 @@ +/* + * DON'T INCLUDE THIS DIRECTLY. + */ +#ifndef NUMPY_CORE_INCLUDE_NUMPY_NDARRAYOBJECT_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_NDARRAYOBJECT_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include "ndarraytypes.h" +#include "dtype_api.h" + +/* Includes the "function" C-API -- these are all stored in a + list of pointers --- one for each file + The two lists are concatenated into one in multiarray. + + They are available as import_array() +*/ + +#include "__multiarray_api.h" + +/* + * Include any definitions which are defined differently for 1.x and 2.x + * (Symbols only available on 2.x are not there, but rather guarded.) + */ +#include "npy_2_compat.h" + +/* C-API that requires previous API to be defined */ + +#define PyArray_DescrCheck(op) PyObject_TypeCheck(op, &PyArrayDescr_Type) + +#define PyArray_Check(op) PyObject_TypeCheck(op, &PyArray_Type) +#define PyArray_CheckExact(op) (Py_TYPE((PyObject*)(op)) == &PyArray_Type) + +#define PyArray_HasArrayInterfaceType(op, type, context, out) \ + ((((out)=PyArray_FromStructInterface(op)) != Py_NotImplemented) || \ + (((out)=PyArray_FromInterface(op)) != Py_NotImplemented) || \ + (((out)=PyArray_FromArrayAttr(op, type, context)) != \ + Py_NotImplemented)) + +#define PyArray_HasArrayInterface(op, out) \ + PyArray_HasArrayInterfaceType(op, NULL, NULL, out) + +#define PyArray_IsZeroDim(op) (PyArray_Check(op) && \ + (PyArray_NDIM((PyArrayObject *)op) == 0)) + +#define PyArray_IsScalar(obj, cls) \ + (PyObject_TypeCheck(obj, &Py##cls##ArrType_Type)) + +#define PyArray_CheckScalar(m) (PyArray_IsScalar(m, Generic) || \ + PyArray_IsZeroDim(m)) +#define PyArray_IsPythonNumber(obj) \ + (PyFloat_Check(obj) || PyComplex_Check(obj) || \ + PyLong_Check(obj) || PyBool_Check(obj)) +#define PyArray_IsIntegerScalar(obj) (PyLong_Check(obj) \ + || PyArray_IsScalar((obj), Integer)) +#define PyArray_IsPythonScalar(obj) \ + (PyArray_IsPythonNumber(obj) || PyBytes_Check(obj) || \ + PyUnicode_Check(obj)) + +#define PyArray_IsAnyScalar(obj) \ + (PyArray_IsScalar(obj, Generic) || PyArray_IsPythonScalar(obj)) + +#define PyArray_CheckAnyScalar(obj) (PyArray_IsPythonScalar(obj) || \ + PyArray_CheckScalar(obj)) + + +#define PyArray_GETCONTIGUOUS(m) (PyArray_ISCONTIGUOUS(m) ? \ + Py_INCREF(m), (m) : \ + (PyArrayObject *)(PyArray_Copy(m))) + +#define PyArray_SAMESHAPE(a1,a2) ((PyArray_NDIM(a1) == PyArray_NDIM(a2)) && \ + PyArray_CompareLists(PyArray_DIMS(a1), \ + PyArray_DIMS(a2), \ + PyArray_NDIM(a1))) + +#define PyArray_SIZE(m) PyArray_MultiplyList(PyArray_DIMS(m), PyArray_NDIM(m)) +#define PyArray_NBYTES(m) (PyArray_ITEMSIZE(m) * PyArray_SIZE(m)) +#define PyArray_FROM_O(m) PyArray_FromAny(m, NULL, 0, 0, 0, NULL) + +#define PyArray_FROM_OF(m,flags) PyArray_CheckFromAny(m, NULL, 0, 0, flags, \ + NULL) + +#define PyArray_FROM_OT(m,type) PyArray_FromAny(m, \ + PyArray_DescrFromType(type), 0, 0, 0, NULL) + +#define PyArray_FROM_OTF(m, type, flags) \ + PyArray_FromAny(m, PyArray_DescrFromType(type), 0, 0, \ + (((flags) & NPY_ARRAY_ENSURECOPY) ? \ + ((flags) | NPY_ARRAY_DEFAULT) : (flags)), NULL) + +#define PyArray_FROMANY(m, type, min, max, flags) \ + PyArray_FromAny(m, PyArray_DescrFromType(type), min, max, \ + (((flags) & NPY_ARRAY_ENSURECOPY) ? \ + (flags) | NPY_ARRAY_DEFAULT : (flags)), NULL) + +#define PyArray_ZEROS(m, dims, type, is_f_order) \ + PyArray_Zeros(m, dims, PyArray_DescrFromType(type), is_f_order) + +#define PyArray_EMPTY(m, dims, type, is_f_order) \ + PyArray_Empty(m, dims, PyArray_DescrFromType(type), is_f_order) + +#define PyArray_FILLWBYTE(obj, val) memset(PyArray_DATA(obj), val, \ + PyArray_NBYTES(obj)) + +#define PyArray_ContiguousFromAny(op, type, min_depth, max_depth) \ + PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \ + max_depth, NPY_ARRAY_DEFAULT, NULL) + +#define PyArray_EquivArrTypes(a1, a2) \ + PyArray_EquivTypes(PyArray_DESCR(a1), PyArray_DESCR(a2)) + +#define PyArray_EquivByteorders(b1, b2) \ + (((b1) == (b2)) || (PyArray_ISNBO(b1) == PyArray_ISNBO(b2))) + +#define PyArray_SimpleNew(nd, dims, typenum) \ + PyArray_New(&PyArray_Type, nd, dims, typenum, NULL, NULL, 0, 0, NULL) + +#define PyArray_SimpleNewFromData(nd, dims, typenum, data) \ + PyArray_New(&PyArray_Type, nd, dims, typenum, NULL, \ + data, 0, NPY_ARRAY_CARRAY, NULL) + +#define PyArray_SimpleNewFromDescr(nd, dims, descr) \ + PyArray_NewFromDescr(&PyArray_Type, descr, nd, dims, \ + NULL, NULL, 0, NULL) + +#define PyArray_ToScalar(data, arr) \ + PyArray_Scalar(data, PyArray_DESCR(arr), (PyObject *)arr) + + +/* These might be faster without the dereferencing of obj + going on inside -- of course an optimizing compiler should + inline the constants inside a for loop making it a moot point +*/ + +#define PyArray_GETPTR1(obj, i) ((void *)(PyArray_BYTES(obj) + \ + (i)*PyArray_STRIDES(obj)[0])) + +#define PyArray_GETPTR2(obj, i, j) ((void *)(PyArray_BYTES(obj) + \ + (i)*PyArray_STRIDES(obj)[0] + \ + (j)*PyArray_STRIDES(obj)[1])) + +#define PyArray_GETPTR3(obj, i, j, k) ((void *)(PyArray_BYTES(obj) + \ + (i)*PyArray_STRIDES(obj)[0] + \ + (j)*PyArray_STRIDES(obj)[1] + \ + (k)*PyArray_STRIDES(obj)[2])) + +#define PyArray_GETPTR4(obj, i, j, k, l) ((void *)(PyArray_BYTES(obj) + \ + (i)*PyArray_STRIDES(obj)[0] + \ + (j)*PyArray_STRIDES(obj)[1] + \ + (k)*PyArray_STRIDES(obj)[2] + \ + (l)*PyArray_STRIDES(obj)[3])) + +static inline void +PyArray_DiscardWritebackIfCopy(PyArrayObject *arr) +{ + PyArrayObject_fields *fa = (PyArrayObject_fields *)arr; + if (fa && fa->base) { + if (fa->flags & NPY_ARRAY_WRITEBACKIFCOPY) { + PyArray_ENABLEFLAGS((PyArrayObject*)fa->base, NPY_ARRAY_WRITEABLE); + Py_DECREF(fa->base); + fa->base = NULL; + PyArray_CLEARFLAGS(arr, NPY_ARRAY_WRITEBACKIFCOPY); + } + } +} + +#define PyArray_DESCR_REPLACE(descr) do { \ + PyArray_Descr *_new_; \ + _new_ = PyArray_DescrNew(descr); \ + Py_XDECREF(descr); \ + descr = _new_; \ + } while(0) + +/* Copy should always return contiguous array */ +#define PyArray_Copy(obj) PyArray_NewCopy(obj, NPY_CORDER) + +#define PyArray_FromObject(op, type, min_depth, max_depth) \ + PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \ + max_depth, NPY_ARRAY_BEHAVED | \ + NPY_ARRAY_ENSUREARRAY, NULL) + +#define PyArray_ContiguousFromObject(op, type, min_depth, max_depth) \ + PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \ + max_depth, NPY_ARRAY_DEFAULT | \ + NPY_ARRAY_ENSUREARRAY, NULL) + +#define PyArray_CopyFromObject(op, type, min_depth, max_depth) \ + PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \ + max_depth, NPY_ARRAY_ENSURECOPY | \ + NPY_ARRAY_DEFAULT | \ + NPY_ARRAY_ENSUREARRAY, NULL) + +#define PyArray_Cast(mp, type_num) \ + PyArray_CastToType(mp, PyArray_DescrFromType(type_num), 0) + +#define PyArray_Take(ap, items, axis) \ + PyArray_TakeFrom(ap, items, axis, NULL, NPY_RAISE) + +#define PyArray_Put(ap, items, values) \ + PyArray_PutTo(ap, items, values, NPY_RAISE) + + +/* + Check to see if this key in the dictionary is the "title" + entry of the tuple (i.e. a duplicate dictionary entry in the fields + dict). +*/ + +static inline int +NPY_TITLE_KEY_check(PyObject *key, PyObject *value) +{ + PyObject *title; + if (PyTuple_Size(value) != 3) { + return 0; + } + title = PyTuple_GetItem(value, 2); + if (key == title) { + return 1; + } +#ifdef PYPY_VERSION + /* + * On PyPy, dictionary keys do not always preserve object identity. + * Fall back to comparison by value. + */ + if (PyUnicode_Check(title) && PyUnicode_Check(key)) { + return PyUnicode_Compare(title, key) == 0 ? 1 : 0; + } +#endif + return 0; +} + +/* Macro, for backward compat with "if NPY_TITLE_KEY(key, value) { ..." */ +#define NPY_TITLE_KEY(key, value) (NPY_TITLE_KEY_check((key), (value))) + +#define DEPRECATE(msg) PyErr_WarnEx(PyExc_DeprecationWarning,msg,1) +#define DEPRECATE_FUTUREWARNING(msg) PyErr_WarnEx(PyExc_FutureWarning,msg,1) + + +/* + * These macros and functions unfortunately require runtime version checks + * that are only defined in `npy_2_compat.h`. For that reasons they cannot be + * part of `ndarraytypes.h` which tries to be self contained. + */ + +static inline npy_intp +PyArray_ITEMSIZE(const PyArrayObject *arr) +{ + return PyDataType_ELSIZE(((PyArrayObject_fields *)arr)->descr); +} + +#define PyDataType_HASFIELDS(obj) (PyDataType_ISLEGACY((PyArray_Descr*)(obj)) && PyDataType_NAMES((PyArray_Descr*)(obj)) != NULL) +#define PyDataType_HASSUBARRAY(dtype) (PyDataType_ISLEGACY(dtype) && PyDataType_SUBARRAY(dtype) != NULL) +#define PyDataType_ISUNSIZED(dtype) ((dtype)->elsize == 0 && \ + !PyDataType_HASFIELDS(dtype)) + +#define PyDataType_FLAGCHK(dtype, flag) \ + ((PyDataType_FLAGS(dtype) & (flag)) == (flag)) + +#define PyDataType_REFCHK(dtype) \ + PyDataType_FLAGCHK(dtype, NPY_ITEM_REFCOUNT) + +#define NPY_BEGIN_THREADS_DESCR(dtype) \ + do {if (!(PyDataType_FLAGCHK((dtype), NPY_NEEDS_PYAPI))) \ + NPY_BEGIN_THREADS;} while (0); + +#define NPY_END_THREADS_DESCR(dtype) \ + do {if (!(PyDataType_FLAGCHK((dtype), NPY_NEEDS_PYAPI))) \ + NPY_END_THREADS; } while (0); + +#if !(defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD) +/* The internal copy of this is now defined in `dtypemeta.h` */ +/* + * `PyArray_Scalar` is the same as this function but converts will convert + * most NumPy types to Python scalars. + */ +static inline PyObject * +PyArray_GETITEM(const PyArrayObject *arr, const char *itemptr) +{ + return PyDataType_GetArrFuncs(((PyArrayObject_fields *)arr)->descr)->getitem( + (void *)itemptr, (PyArrayObject *)arr); +} + +/* + * SETITEM should only be used if it is known that the value is a scalar + * and of a type understood by the arrays dtype. + * Use `PyArray_Pack` if the value may be of a different dtype. + */ +static inline int +PyArray_SETITEM(PyArrayObject *arr, char *itemptr, PyObject *v) +{ + return PyDataType_GetArrFuncs(((PyArrayObject_fields *)arr)->descr)->setitem(v, itemptr, arr); +} +#endif /* not internal */ + + +#ifdef __cplusplus +} +#endif + + +#endif /* NUMPY_CORE_INCLUDE_NUMPY_NDARRAYOBJECT_H_ */ diff --git a/local_packages/numpy/_core/include/numpy/ndarraytypes.h b/local_packages/numpy/_core/include/numpy/ndarraytypes.h new file mode 100644 index 0000000..f740788 --- /dev/null +++ b/local_packages/numpy/_core/include/numpy/ndarraytypes.h @@ -0,0 +1,1982 @@ +#ifndef NUMPY_CORE_INCLUDE_NUMPY_NDARRAYTYPES_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_NDARRAYTYPES_H_ + +#include "npy_common.h" +#include "npy_endian.h" +#include "npy_cpu.h" +#include "utils.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define NPY_NO_EXPORT NPY_VISIBILITY_HIDDEN + +/* Always allow threading unless it was explicitly disabled at build time */ +#if !NPY_NO_SMP + #define NPY_ALLOW_THREADS 1 +#else + #define NPY_ALLOW_THREADS 0 +#endif + +#ifndef __has_extension +#define __has_extension(x) 0 +#endif + +/* + * There are several places in the code where an array of dimensions + * is allocated statically. This is the size of that static + * allocation. + * + * The array creation itself could have arbitrary dimensions but all + * the places where static allocation is used would need to be changed + * to dynamic (including inside of several structures) + * + * As of NumPy 2.0, we strongly discourage the downstream use of NPY_MAXDIMS, + * but since auditing everything seems a big ask, define it as 64. + * A future version could: + * - Increase or remove the limit and require recompilation (like 2.0 did) + * - Deprecate or remove the macro but keep the limit (at basically any time) + */ +#define NPY_MAXDIMS 64 +/* We cannot change this as it would break ABI: */ +#define NPY_MAXDIMS_LEGACY_ITERS 32 +/* NPY_MAXARGS is version dependent and defined in npy_2_compat.h */ + +/* Used for Converter Functions "O&" code in ParseTuple */ +#define NPY_FAIL 0 +#define NPY_SUCCEED 1 + + +enum NPY_TYPES { NPY_BOOL=0, + NPY_BYTE, NPY_UBYTE, + NPY_SHORT, NPY_USHORT, + NPY_INT, NPY_UINT, + NPY_LONG, NPY_ULONG, + NPY_LONGLONG, NPY_ULONGLONG, + NPY_FLOAT, NPY_DOUBLE, NPY_LONGDOUBLE, + NPY_CFLOAT, NPY_CDOUBLE, NPY_CLONGDOUBLE, + NPY_OBJECT=17, + NPY_STRING, NPY_UNICODE, + NPY_VOID, + /* + * New 1.6 types appended, may be integrated + * into the above in 2.0. + */ + NPY_DATETIME, NPY_TIMEDELTA, NPY_HALF, + + NPY_CHAR, /* Deprecated, will raise if used */ + + /* The number of *legacy* dtypes */ + NPY_NTYPES_LEGACY=24, + + /* assign a high value to avoid changing this in the + future when new dtypes are added */ + NPY_NOTYPE=25, + + NPY_USERDEF=256, /* leave room for characters */ + + /* The number of types not including the new 1.6 types */ + NPY_NTYPES_ABI_COMPATIBLE=21, + + /* + * New DTypes which do not share the legacy layout + * (added after NumPy 2.0). VSTRING is the first of these + * we may open up a block for user-defined dtypes in the + * future. + */ + NPY_VSTRING=2056, +}; + + +/* basetype array priority */ +#define NPY_PRIORITY 0.0 + +/* default subtype priority */ +#define NPY_SUBTYPE_PRIORITY 1.0 + +/* default scalar priority */ +#define NPY_SCALAR_PRIORITY -1000000.0 + +/* How many floating point types are there (excluding half) */ +#define NPY_NUM_FLOATTYPE 3 + +/* + * These characters correspond to the array type and the struct + * module + */ + +enum NPY_TYPECHAR { + NPY_BOOLLTR = '?', + NPY_BYTELTR = 'b', + NPY_UBYTELTR = 'B', + NPY_SHORTLTR = 'h', + NPY_USHORTLTR = 'H', + NPY_INTLTR = 'i', + NPY_UINTLTR = 'I', + NPY_LONGLTR = 'l', + NPY_ULONGLTR = 'L', + NPY_LONGLONGLTR = 'q', + NPY_ULONGLONGLTR = 'Q', + NPY_HALFLTR = 'e', + NPY_FLOATLTR = 'f', + NPY_DOUBLELTR = 'd', + NPY_LONGDOUBLELTR = 'g', + NPY_CFLOATLTR = 'F', + NPY_CDOUBLELTR = 'D', + NPY_CLONGDOUBLELTR = 'G', + NPY_OBJECTLTR = 'O', + NPY_STRINGLTR = 'S', + NPY_DEPRECATED_STRINGLTR2 = 'a', + NPY_UNICODELTR = 'U', + NPY_VOIDLTR = 'V', + NPY_DATETIMELTR = 'M', + NPY_TIMEDELTALTR = 'm', + NPY_CHARLTR = 'c', + + /* + * New non-legacy DTypes + */ + NPY_VSTRINGLTR = 'T', + + /* + * Note, we removed `NPY_INTPLTR` due to changing its definition + * to 'n', rather than 'p'. On any typical platform this is the + * same integer. 'n' should be used for the `np.intp` with the same + * size as `size_t` while 'p' remains pointer sized. + * + * 'p', 'P', 'n', and 'N' are valid and defined explicitly + * in `arraytypes.c.src`. + */ + + /* + * These are for dtype 'kinds', not dtype 'typecodes' + * as the above are for. + */ + NPY_GENBOOLLTR ='b', + NPY_SIGNEDLTR = 'i', + NPY_UNSIGNEDLTR = 'u', + NPY_FLOATINGLTR = 'f', + NPY_COMPLEXLTR = 'c', + +}; + +/* + * Changing this may break Numpy API compatibility due to changing offsets in + * PyArray_ArrFuncs, so be careful. Here we have reused the mergesort slot for + * any kind of stable sort, the actual implementation will depend on the data + * type. + * + * Updated in NumPy 2.4 + * + * Updated with new names denoting requirements rather than specifying a + * particular algorithm. All the previous values are reused in a way that + * should be downstream compatible, but the actual algorithms used may be + * different than before. The new approach should be more flexible and easier + * to update. + * + * Names with a leading underscore are private, and should only be used + * internally by NumPy. + * + * NPY_NSORTS remains the same for backwards compatibility, it should not be + * changed. + */ + +typedef enum { + _NPY_SORT_UNDEFINED = -1, + NPY_QUICKSORT = 0, + NPY_HEAPSORT = 1, + NPY_MERGESORT = 2, + NPY_STABLESORT = 2, + // new style names + _NPY_SORT_HEAPSORT = 1, + NPY_SORT_DEFAULT = 0, + NPY_SORT_STABLE = 2, + NPY_SORT_DESCENDING = 4, +} NPY_SORTKIND; +#define NPY_NSORTS (NPY_STABLESORT + 1) + + +typedef enum { + NPY_INTROSELECT=0 +} NPY_SELECTKIND; +#define NPY_NSELECTS (NPY_INTROSELECT + 1) + + +typedef enum { + NPY_SEARCHLEFT=0, + NPY_SEARCHRIGHT=1 +} NPY_SEARCHSIDE; +#define NPY_NSEARCHSIDES (NPY_SEARCHRIGHT + 1) + + +typedef enum { + NPY_NOSCALAR=-1, + NPY_BOOL_SCALAR, + NPY_INTPOS_SCALAR, + NPY_INTNEG_SCALAR, + NPY_FLOAT_SCALAR, + NPY_COMPLEX_SCALAR, + NPY_OBJECT_SCALAR +} NPY_SCALARKIND; +#define NPY_NSCALARKINDS (NPY_OBJECT_SCALAR + 1) + +/* For specifying array memory layout or iteration order */ +typedef enum { + /* Fortran order if inputs are all Fortran, C otherwise */ + NPY_ANYORDER=-1, + /* C order */ + NPY_CORDER=0, + /* Fortran order */ + NPY_FORTRANORDER=1, + /* An order as close to the inputs as possible */ + NPY_KEEPORDER=2 +} NPY_ORDER; + +#if NPY_FEATURE_VERSION >= NPY_2_4_API_VERSION +/* + * check that no values overflow/change during casting + * Used explicitly only in the ArrayMethod creation or resolve_dtypes functions to + * indicate that a same-value cast is supported. In external APIs, use only + * NPY_SAME_VALUE_CASTING + */ +#define NPY_SAME_VALUE_CASTING_FLAG 64 +#endif + +/* For specifying allowed casting in operations which support it */ +typedef enum { + _NPY_ERROR_OCCURRED_IN_CAST = -1, + /* Only allow identical types */ + NPY_NO_CASTING=0, + /* Allow identical and byte swapped types */ + NPY_EQUIV_CASTING=1, + /* Only allow safe casts */ + NPY_SAFE_CASTING=2, + /* Allow safe casts or casts within the same kind */ + NPY_SAME_KIND_CASTING=3, + /* Allow any casts */ + NPY_UNSAFE_CASTING=4, +#if NPY_FEATURE_VERSION >= NPY_2_4_API_VERSION + NPY_SAME_VALUE_CASTING=NPY_UNSAFE_CASTING | NPY_SAME_VALUE_CASTING_FLAG, +#endif +} NPY_CASTING; + +typedef enum { + NPY_CLIP=0, + NPY_WRAP=1, + NPY_RAISE=2 +} NPY_CLIPMODE; + +typedef enum { + NPY_VALID=0, + NPY_SAME=1, + NPY_FULL=2 +} NPY_CORRELATEMODE; + +/* The special not-a-time (NaT) value */ +#define NPY_DATETIME_NAT NPY_MIN_INT64 + +/* + * Upper bound on the length of a DATETIME ISO 8601 string + * YEAR: 21 (64-bit year) + * MONTH: 3 + * DAY: 3 + * HOURS: 3 + * MINUTES: 3 + * SECONDS: 3 + * ATTOSECONDS: 1 + 3*6 + * TIMEZONE: 5 + * NULL TERMINATOR: 1 + */ +#define NPY_DATETIME_MAX_ISO8601_STRLEN (21 + 3*5 + 1 + 3*6 + 6 + 1) + +/* The FR in the unit names stands for frequency */ +typedef enum { + /* Force signed enum type, must be -1 for code compatibility */ + NPY_FR_ERROR = -1, /* error or undetermined */ + + /* Start of valid units */ + NPY_FR_Y = 0, /* Years */ + NPY_FR_M = 1, /* Months */ + NPY_FR_W = 2, /* Weeks */ + /* Gap where 1.6 NPY_FR_B (value 3) was */ + NPY_FR_D = 4, /* Days */ + NPY_FR_h = 5, /* hours */ + NPY_FR_m = 6, /* minutes */ + NPY_FR_s = 7, /* seconds */ + NPY_FR_ms = 8, /* milliseconds */ + NPY_FR_us = 9, /* microseconds */ + NPY_FR_ns = 10, /* nanoseconds */ + NPY_FR_ps = 11, /* picoseconds */ + NPY_FR_fs = 12, /* femtoseconds */ + NPY_FR_as = 13, /* attoseconds */ + NPY_FR_GENERIC = 14 /* unbound units, can convert to anything */ +} NPY_DATETIMEUNIT; + +/* + * NOTE: With the NPY_FR_B gap for 1.6 ABI compatibility, NPY_DATETIME_NUMUNITS + * is technically one more than the actual number of units. + */ +#define NPY_DATETIME_NUMUNITS (NPY_FR_GENERIC + 1) +#define NPY_DATETIME_DEFAULTUNIT NPY_FR_GENERIC + +/* + * Business day conventions for mapping invalid business + * days to valid business days. + */ +typedef enum { + /* Go forward in time to the following business day. */ + NPY_BUSDAY_FORWARD, + NPY_BUSDAY_FOLLOWING = NPY_BUSDAY_FORWARD, + /* Go backward in time to the preceding business day. */ + NPY_BUSDAY_BACKWARD, + NPY_BUSDAY_PRECEDING = NPY_BUSDAY_BACKWARD, + /* + * Go forward in time to the following business day, unless it + * crosses a month boundary, in which case go backward + */ + NPY_BUSDAY_MODIFIEDFOLLOWING, + /* + * Go backward in time to the preceding business day, unless it + * crosses a month boundary, in which case go forward. + */ + NPY_BUSDAY_MODIFIEDPRECEDING, + /* Produce a NaT for non-business days. */ + NPY_BUSDAY_NAT, + /* Raise an exception for non-business days. */ + NPY_BUSDAY_RAISE +} NPY_BUSDAY_ROLL; + + +/************************************************************ + * NumPy Auxiliary Data for inner loops, sort functions, etc. + ************************************************************/ + +/* + * When creating an auxiliary data struct, this should always appear + * as the first member, like this: + * + * typedef struct { + * NpyAuxData base; + * double constant; + * } constant_multiplier_aux_data; + */ +typedef struct NpyAuxData_tag NpyAuxData; + +/* Function pointers for freeing or cloning auxiliary data */ +typedef void (NpyAuxData_FreeFunc) (NpyAuxData *); +typedef NpyAuxData *(NpyAuxData_CloneFunc) (NpyAuxData *); + +struct NpyAuxData_tag { + NpyAuxData_FreeFunc *free; + NpyAuxData_CloneFunc *clone; + /* To allow for a bit of expansion without breaking the ABI */ + void *reserved[2]; +}; + +/* Macros to use for freeing and cloning auxiliary data */ +#define NPY_AUXDATA_FREE(auxdata) \ + do { \ + if ((auxdata) != NULL) { \ + (auxdata)->free(auxdata); \ + } \ + } while(0) +#define NPY_AUXDATA_CLONE(auxdata) \ + ((auxdata)->clone(auxdata)) + +#define NPY_ERR(str) fprintf(stderr, #str); fflush(stderr); +#define NPY_ERR2(str) fprintf(stderr, str); fflush(stderr); + +/* +* Macros to define how array, and dimension/strides data is +* allocated. These should be made private +*/ + +#define NPY_USE_PYMEM 1 + + +#if NPY_USE_PYMEM == 1 +/* use the Raw versions which are safe to call with the GIL released */ +#define PyArray_malloc PyMem_RawMalloc +#define PyArray_free PyMem_RawFree +#define PyArray_realloc PyMem_RawRealloc +#else +#define PyArray_malloc malloc +#define PyArray_free free +#define PyArray_realloc realloc +#endif + +/* Dimensions and strides */ +#define PyDimMem_NEW(size) \ + ((npy_intp *)PyArray_malloc(size*sizeof(npy_intp))) + +#define PyDimMem_FREE(ptr) PyArray_free(ptr) + +#define PyDimMem_RENEW(ptr,size) \ + ((npy_intp *)PyArray_realloc(ptr,size*sizeof(npy_intp))) + +/* forward declaration */ +struct _PyArray_Descr; + +/* These must deal with unaligned and swapped data if necessary */ +typedef PyObject * (PyArray_GetItemFunc) (void *, void *); +typedef int (PyArray_SetItemFunc)(PyObject *, void *, void *); + +typedef void (PyArray_CopySwapNFunc)(void *, npy_intp, void *, npy_intp, + npy_intp, int, void *); + +typedef void (PyArray_CopySwapFunc)(void *, void *, int, void *); +typedef npy_bool (PyArray_NonzeroFunc)(void *, void *); + + +/* + * These assume aligned and notswapped data -- a buffer will be used + * before or contiguous data will be obtained + */ + +typedef int (PyArray_CompareFunc)(const void *, const void *, void *); +typedef int (PyArray_ArgFunc)(void*, npy_intp, npy_intp*, void *); + +typedef void (PyArray_DotFunc)(void *, npy_intp, void *, npy_intp, void *, + npy_intp, void *); + +typedef void (PyArray_VectorUnaryFunc)(void *, void *, npy_intp, void *, + void *); + +/* + * XXX the ignore argument should be removed next time the API version + * is bumped. It used to be the separator. + */ +typedef int (PyArray_ScanFunc)(FILE *fp, void *dptr, + char *ignore, struct _PyArray_Descr *); +typedef int (PyArray_FromStrFunc)(char *s, void *dptr, char **endptr, + struct _PyArray_Descr *); + +typedef int (PyArray_FillFunc)(void *, npy_intp, void *); + +typedef int (PyArray_SortFunc)(void *, npy_intp, void *); +typedef int (PyArray_ArgSortFunc)(void *, npy_intp *, npy_intp, void *); + +typedef int (PyArray_FillWithScalarFunc)(void *, npy_intp, void *, void *); + +typedef int (PyArray_ScalarKindFunc)(void *); + +typedef struct { + npy_intp *ptr; + int len; +} PyArray_Dims; + +typedef struct { + /* + * Functions to cast to most other standard types + * Can have some NULL entries. The types + * DATETIME, TIMEDELTA, and HALF go into the castdict + * even though they are built-in. + */ + PyArray_VectorUnaryFunc *cast[NPY_NTYPES_ABI_COMPATIBLE]; + + /* The next four functions *cannot* be NULL */ + + /* + * Functions to get and set items with standard Python types + * -- not array scalars + */ + PyArray_GetItemFunc *getitem; + PyArray_SetItemFunc *setitem; + + /* + * Copy and/or swap data. Memory areas may not overlap + * Use memmove first if they might + */ + PyArray_CopySwapNFunc *copyswapn; + PyArray_CopySwapFunc *copyswap; + + /* + * Function to compare items + * Can be NULL + */ + PyArray_CompareFunc *compare; + + /* + * Function to select largest + * Can be NULL + */ + PyArray_ArgFunc *argmax; + + /* + * Function to compute dot product + * Can be NULL + */ + PyArray_DotFunc *dotfunc; + + /* + * Function to scan an ASCII file and + * place a single value plus possible separator + * Can be NULL + */ + PyArray_ScanFunc *scanfunc; + + /* + * Function to read a single value from a string + * and adjust the pointer; Can be NULL + */ + PyArray_FromStrFunc *fromstr; + + /* + * Function to determine if data is zero or not + * If NULL a default version is + * used at Registration time. + */ + PyArray_NonzeroFunc *nonzero; + + /* + * Used for arange. Should return 0 on success + * and -1 on failure. + * Can be NULL. + */ + PyArray_FillFunc *fill; + + /* + * Function to fill arrays with scalar values + * Can be NULL + */ + PyArray_FillWithScalarFunc *fillwithscalar; + + /* + * Sorting functions + * Can be NULL + */ + PyArray_SortFunc *sort[NPY_NSORTS]; + PyArray_ArgSortFunc *argsort[NPY_NSORTS]; + + /* + * Dictionary of additional casting functions + * PyArray_VectorUnaryFuncs + * which can be populated to support casting + * to other registered types. Can be NULL + */ + PyObject *castdict; + + /* + * Functions useful for generalizing + * the casting rules. + * Can be NULL; + */ + PyArray_ScalarKindFunc *scalarkind; + int **cancastscalarkindto; + int *cancastto; + + void *_unused1; + void *_unused2; + void *_unused3; + + /* + * Function to select smallest + * Can be NULL + */ + PyArray_ArgFunc *argmin; + +} PyArray_ArrFuncs; + + +/* The item must be reference counted when it is inserted or extracted. */ +#define NPY_ITEM_REFCOUNT 0x01 +/* Same as needing REFCOUNT */ +#define NPY_ITEM_HASOBJECT 0x01 +/* Convert to list for pickling */ +#define NPY_LIST_PICKLE 0x02 +/* The item is a POINTER */ +#define NPY_ITEM_IS_POINTER 0x04 +/* memory needs to be initialized for this data-type */ +#define NPY_NEEDS_INIT 0x08 +/* operations need Python C-API so don't give-up thread. */ +#define NPY_NEEDS_PYAPI 0x10 +/* Use f.getitem when extracting elements of this data-type */ +#define NPY_USE_GETITEM 0x20 +/* Use f.setitem when setting creating 0-d array from this data-type.*/ +#define NPY_USE_SETITEM 0x40 +/* A sticky flag specifically for structured arrays */ +#define NPY_ALIGNED_STRUCT 0x80 + +/* + *These are inherited for global data-type if any data-types in the + * field have them + */ +#define NPY_FROM_FIELDS (NPY_NEEDS_INIT | NPY_LIST_PICKLE | \ + NPY_ITEM_REFCOUNT | NPY_NEEDS_PYAPI) + +#define NPY_OBJECT_DTYPE_FLAGS (NPY_LIST_PICKLE | NPY_USE_GETITEM | \ + NPY_ITEM_IS_POINTER | NPY_ITEM_REFCOUNT | \ + NPY_NEEDS_INIT | NPY_NEEDS_PYAPI) + +#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION +/* + * Public version of the Descriptor struct as of 2.x + */ +typedef struct _PyArray_Descr { + PyObject_HEAD + /* + * the type object representing an + * instance of this type -- should not + * be two type_numbers with the same type + * object. + */ + PyTypeObject *typeobj; + /* kind for this type */ + char kind; + /* unique-character representing this type */ + char type; + /* + * '>' (big), '<' (little), '|' + * (not-applicable), or '=' (native). + */ + char byteorder; + /* Former flags flags space (unused) to ensure type_num is stable. */ + char _former_flags; + /* number representing this type */ + int type_num; + /* Space for dtype instance specific flags. */ + npy_uint64 flags; + /* element size (itemsize) for this type */ + npy_intp elsize; + /* alignment needed for this type */ + npy_intp alignment; + /* metadata dict or NULL */ + PyObject *metadata; + /* Cached hash value (-1 if not yet computed). */ + npy_hash_t hash; + /* Unused slot (must be initialized to NULL) for future use */ + void *reserved_null[2]; +} PyArray_Descr; + +#else /* 1.x and 2.x compatible version (only shared fields): */ + +typedef struct _PyArray_Descr { + PyObject_HEAD + PyTypeObject *typeobj; + char kind; + char type; + char byteorder; + char _former_flags; + int type_num; +} PyArray_Descr; + +/* To access modified fields, define the full 2.0 struct: */ +typedef struct { + PyObject_HEAD + PyTypeObject *typeobj; + char kind; + char type; + char byteorder; + char _former_flags; + int type_num; + npy_uint64 flags; + npy_intp elsize; + npy_intp alignment; + PyObject *metadata; + npy_hash_t hash; + void *reserved_null[2]; +} _PyArray_DescrNumPy2; + +#endif /* 1.x and 2.x compatible version */ + +/* + * Semi-private struct with additional field of legacy descriptors (must + * check NPY_DT_is_legacy before casting/accessing). The struct is also not + * valid when running on 1.x (i.e. in public API use). + */ +typedef struct { + PyObject_HEAD + PyTypeObject *typeobj; + char kind; + char type; + char byteorder; + char _former_flags; + int type_num; + npy_uint64 flags; + npy_intp elsize; + npy_intp alignment; + PyObject *metadata; + npy_hash_t hash; + void *reserved_null[2]; + struct _arr_descr *subarray; + PyObject *fields; + PyObject *names; + NpyAuxData *c_metadata; +} _PyArray_LegacyDescr; + + +/* + * Umodified PyArray_Descr struct identical to NumPy 1.x. This struct is + * used as a prototype for registering a new legacy DType. + * It is also used to access the fields in user code running on 1.x. + */ +typedef struct { + PyObject_HEAD + PyTypeObject *typeobj; + char kind; + char type; + char byteorder; + char flags; + int type_num; + int elsize; + int alignment; + struct _arr_descr *subarray; + PyObject *fields; + PyObject *names; + PyArray_ArrFuncs *f; + PyObject *metadata; + NpyAuxData *c_metadata; + npy_hash_t hash; +} PyArray_DescrProto; + + +typedef struct _arr_descr { + PyArray_Descr *base; + PyObject *shape; /* a tuple */ +} PyArray_ArrayDescr; + +/* + * Memory handler structure for array data. + */ +/* The declaration of free differs from PyMemAllocatorEx */ +typedef struct { + void *ctx; + void* (*malloc) (void *ctx, size_t size); + void* (*calloc) (void *ctx, size_t nelem, size_t elsize); + void* (*realloc) (void *ctx, void *ptr, size_t new_size); + void (*free) (void *ctx, void *ptr, size_t size); + /* + * This is the end of the version=1 struct. Only add new fields after + * this line + */ +} PyDataMemAllocator; + +typedef struct { + char name[127]; /* multiple of 64 to keep the struct aligned */ + uint8_t version; /* currently 1 */ + PyDataMemAllocator allocator; +} PyDataMem_Handler; + + +/* + * The main array object structure. + * + * It has been recommended to use the inline functions defined below + * (PyArray_DATA and friends) to access fields here for a number of + * releases. Direct access to the members themselves is deprecated. + * To ensure that your code does not use deprecated access, + * #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION + * (or NPY_1_8_API_VERSION or higher as required). + */ +/* This struct will be moved to a private header in a future release */ +typedef struct tagPyArrayObject_fields { + PyObject_HEAD + /* Pointer to the raw data buffer */ + char *data; + /* The number of dimensions, also called 'ndim' */ + int nd; + /* The size in each dimension, also called 'shape' */ + npy_intp *dimensions; + /* + * Number of bytes to jump to get to the + * next element in each dimension + */ + npy_intp *strides; + /* + * This object is decref'd upon + * deletion of array. Except in the + * case of WRITEBACKIFCOPY which has + * special handling. + * + * For views it points to the original + * array, collapsed so no chains of + * views occur. + * + * For creation from buffer object it + * points to an object that should be + * decref'd on deletion + * + * For WRITEBACKIFCOPY flag this is an + * array to-be-updated upon calling + * PyArray_ResolveWritebackIfCopy + */ + PyObject *base; + /* Pointer to type structure */ + PyArray_Descr *descr; + /* Flags describing array -- see below */ + int flags; + /* For weak references */ + PyObject *weakreflist; +#if NPY_FEATURE_VERSION >= NPY_1_20_API_VERSION + void *_buffer_info; /* private buffer info, tagged to allow warning */ +#endif + /* + * For malloc/calloc/realloc/free per object + */ +#if NPY_FEATURE_VERSION >= NPY_1_22_API_VERSION + PyObject *mem_handler; +#endif +} PyArrayObject_fields; + +/* + * To hide the implementation details, we only expose + * the Python struct HEAD. + */ +#if !defined(NPY_NO_DEPRECATED_API) || \ + (NPY_NO_DEPRECATED_API < NPY_1_7_API_VERSION) +/* + * Can't put this in npy_deprecated_api.h like the others. + * PyArrayObject field access is deprecated as of NumPy 1.7. + */ +typedef PyArrayObject_fields PyArrayObject; +#else +typedef struct tagPyArrayObject { + PyObject_HEAD +} PyArrayObject; +#endif + +/* + * Removed 2020-Nov-25, NumPy 1.20 + * #define NPY_SIZEOF_PYARRAYOBJECT (sizeof(PyArrayObject_fields)) + * + * The above macro was removed as it gave a false sense of a stable ABI + * with respect to the structures size. If you require a runtime constant, + * you can use `PyArray_Type.tp_basicsize` instead. Otherwise, please + * see the PyArrayObject documentation or ask the NumPy developers for + * information on how to correctly replace the macro in a way that is + * compatible with multiple NumPy versions. + */ + +/* Mirrors buffer object to ptr */ + +typedef struct { + PyObject_HEAD + PyObject *base; + void *ptr; + npy_intp len; + int flags; +} PyArray_Chunk; + +typedef struct { + NPY_DATETIMEUNIT base; + int num; +} PyArray_DatetimeMetaData; + +typedef struct { + NpyAuxData base; + PyArray_DatetimeMetaData meta; +} PyArray_DatetimeDTypeMetaData; + +/* + * This structure contains an exploded view of a date-time value. + * NaT is represented by year == NPY_DATETIME_NAT. + */ +typedef struct { + npy_int64 year; + npy_int32 month, day, hour, min, sec, us, ps, as; +} npy_datetimestruct; + +/* This structure contains an exploded view of a timedelta value */ +typedef struct { + npy_int64 day; + npy_int32 sec, us, ps, as; +} npy_timedeltastruct; + +typedef int (PyArray_FinalizeFunc)(PyArrayObject *, PyObject *); + +/* + * Means c-style contiguous (last index varies the fastest). The data + * elements right after each other. + * + * This flag may be requested in constructor functions. + * This flag may be tested for in PyArray_FLAGS(arr). + */ +#define NPY_ARRAY_C_CONTIGUOUS 0x0001 + +/* + * Set if array is a contiguous Fortran array: the first index varies + * the fastest in memory (strides array is reverse of C-contiguous + * array) + * + * This flag may be requested in constructor functions. + * This flag may be tested for in PyArray_FLAGS(arr). + */ +#define NPY_ARRAY_F_CONTIGUOUS 0x0002 + +/* + * Note: all 0-d arrays are C_CONTIGUOUS and F_CONTIGUOUS. If a + * 1-d array is C_CONTIGUOUS it is also F_CONTIGUOUS. Arrays with + * more then one dimension can be C_CONTIGUOUS and F_CONTIGUOUS + * at the same time if they have either zero or one element. + * A higher dimensional array always has the same contiguity flags as + * `array.squeeze()`; dimensions with `array.shape[dimension] == 1` are + * effectively ignored when checking for contiguity. + */ + +/* + * If set, the array owns the data: it will be free'd when the array + * is deleted. + * + * This flag may be tested for in PyArray_FLAGS(arr). + */ +#define NPY_ARRAY_OWNDATA 0x0004 + +/* + * An array never has the next four set; they're only used as parameter + * flags to the various FromAny functions + * + * This flag may be requested in constructor functions. + */ + +/* Cause a cast to occur regardless of whether or not it is safe. */ +#define NPY_ARRAY_FORCECAST 0x0010 + +/* + * Always copy the array. Returned arrays are always CONTIGUOUS, + * ALIGNED, and WRITEABLE. See also: NPY_ARRAY_ENSURENOCOPY = 0x4000. + * + * This flag may be requested in constructor functions. + */ +#define NPY_ARRAY_ENSURECOPY 0x0020 + +/* + * Make sure the returned array is a base-class ndarray + * + * This flag may be requested in constructor functions. + */ +#define NPY_ARRAY_ENSUREARRAY 0x0040 + +/* + * Make sure that the strides are in units of the element size Needed + * for some operations with record-arrays. + * + * This flag may be requested in constructor functions. + */ +#define NPY_ARRAY_ELEMENTSTRIDES 0x0080 + +/* + * Array data is aligned on the appropriate memory address for the type + * stored according to how the compiler would align things (e.g., an + * array of integers (4 bytes each) starts on a memory address that's + * a multiple of 4) + * + * This flag may be requested in constructor functions. + * This flag may be tested for in PyArray_FLAGS(arr). + */ +#define NPY_ARRAY_ALIGNED 0x0100 + +/* + * Array data has the native endianness + * + * This flag may be requested in constructor functions. + */ +#define NPY_ARRAY_NOTSWAPPED 0x0200 + +/* + * Array data is writeable + * + * This flag may be requested in constructor functions. + * This flag may be tested for in PyArray_FLAGS(arr). + */ +#define NPY_ARRAY_WRITEABLE 0x0400 + +/* + * If this flag is set, then base contains a pointer to an array of + * the same size that should be updated with the current contents of + * this array when PyArray_ResolveWritebackIfCopy is called. + * + * This flag may be requested in constructor functions. + * This flag may be tested for in PyArray_FLAGS(arr). + */ +#define NPY_ARRAY_WRITEBACKIFCOPY 0x2000 + +/* + * No copy may be made while converting from an object/array (result is a view) + * + * This flag may be requested in constructor functions. + */ +#define NPY_ARRAY_ENSURENOCOPY 0x4000 + +/* + * NOTE: there are also internal flags defined in multiarray/arrayobject.h, + * which start at bit 31 and work down. + */ + +#define NPY_ARRAY_BEHAVED (NPY_ARRAY_ALIGNED | \ + NPY_ARRAY_WRITEABLE) +#define NPY_ARRAY_BEHAVED_NS (NPY_ARRAY_ALIGNED | \ + NPY_ARRAY_WRITEABLE | \ + NPY_ARRAY_NOTSWAPPED) +#define NPY_ARRAY_CARRAY (NPY_ARRAY_C_CONTIGUOUS | \ + NPY_ARRAY_BEHAVED) +#define NPY_ARRAY_CARRAY_RO (NPY_ARRAY_C_CONTIGUOUS | \ + NPY_ARRAY_ALIGNED) +#define NPY_ARRAY_FARRAY (NPY_ARRAY_F_CONTIGUOUS | \ + NPY_ARRAY_BEHAVED) +#define NPY_ARRAY_FARRAY_RO (NPY_ARRAY_F_CONTIGUOUS | \ + NPY_ARRAY_ALIGNED) +#define NPY_ARRAY_DEFAULT (NPY_ARRAY_CARRAY) +#define NPY_ARRAY_IN_ARRAY (NPY_ARRAY_CARRAY_RO) +#define NPY_ARRAY_OUT_ARRAY (NPY_ARRAY_CARRAY) +#define NPY_ARRAY_INOUT_ARRAY (NPY_ARRAY_CARRAY) +#define NPY_ARRAY_INOUT_ARRAY2 (NPY_ARRAY_CARRAY | \ + NPY_ARRAY_WRITEBACKIFCOPY) +#define NPY_ARRAY_IN_FARRAY (NPY_ARRAY_FARRAY_RO) +#define NPY_ARRAY_OUT_FARRAY (NPY_ARRAY_FARRAY) +#define NPY_ARRAY_INOUT_FARRAY (NPY_ARRAY_FARRAY) +#define NPY_ARRAY_INOUT_FARRAY2 (NPY_ARRAY_FARRAY | \ + NPY_ARRAY_WRITEBACKIFCOPY) + +#define NPY_ARRAY_UPDATE_ALL (NPY_ARRAY_C_CONTIGUOUS | \ + NPY_ARRAY_F_CONTIGUOUS | \ + NPY_ARRAY_ALIGNED) + +/* This flag is for the array interface, not PyArrayObject */ +#define NPY_ARR_HAS_DESCR 0x0800 + + + + +/* + * Size of internal buffers used for alignment Make BUFSIZE a multiple + * of sizeof(npy_cdouble) -- usually 16 so that ufunc buffers are aligned + */ +#define NPY_MIN_BUFSIZE ((int)sizeof(npy_cdouble)) +#define NPY_MAX_BUFSIZE (((int)sizeof(npy_cdouble))*1000000) +#define NPY_BUFSIZE 8192 +/* buffer stress test size: */ +/*#define NPY_BUFSIZE 17*/ + +/* + * C API: consists of Macros and functions. The MACROS are defined + * here. + */ + + +#define PyArray_ISCONTIGUOUS(m) PyArray_CHKFLAGS((m), NPY_ARRAY_C_CONTIGUOUS) +#define PyArray_ISWRITEABLE(m) PyArray_CHKFLAGS((m), NPY_ARRAY_WRITEABLE) +#define PyArray_ISALIGNED(m) PyArray_CHKFLAGS((m), NPY_ARRAY_ALIGNED) + +#define PyArray_IS_C_CONTIGUOUS(m) PyArray_CHKFLAGS((m), NPY_ARRAY_C_CONTIGUOUS) +#define PyArray_IS_F_CONTIGUOUS(m) PyArray_CHKFLAGS((m), NPY_ARRAY_F_CONTIGUOUS) + +/* the variable is used in some places, so always define it */ +#define NPY_BEGIN_THREADS_DEF PyThreadState *_save=NULL; +#if NPY_ALLOW_THREADS +#define NPY_BEGIN_ALLOW_THREADS Py_BEGIN_ALLOW_THREADS +#define NPY_END_ALLOW_THREADS Py_END_ALLOW_THREADS +#define NPY_BEGIN_THREADS do {_save = PyEval_SaveThread();} while (0); +#define NPY_END_THREADS do { if (_save) \ + { PyEval_RestoreThread(_save); _save = NULL;} } while (0); +#define NPY_BEGIN_THREADS_THRESHOLDED(loop_size) do { if ((loop_size) > 500) \ + { _save = PyEval_SaveThread();} } while (0); + + +#define NPY_ALLOW_C_API_DEF PyGILState_STATE __save__; +#define NPY_ALLOW_C_API do {__save__ = PyGILState_Ensure();} while (0); +#define NPY_DISABLE_C_API do {PyGILState_Release(__save__);} while (0); +#else +#define NPY_BEGIN_ALLOW_THREADS +#define NPY_END_ALLOW_THREADS +#define NPY_BEGIN_THREADS +#define NPY_END_THREADS +#define NPY_BEGIN_THREADS_THRESHOLDED(loop_size) +#define NPY_BEGIN_THREADS_DESCR(dtype) +#define NPY_END_THREADS_DESCR(dtype) +#define NPY_ALLOW_C_API_DEF +#define NPY_ALLOW_C_API +#define NPY_DISABLE_C_API +#endif + +/********************************** + * The nditer object, added in 1.6 + **********************************/ + +/* The actual structure of the iterator is an internal detail */ +typedef struct NpyIter_InternalOnly NpyIter; + +/* Iterator function pointers that may be specialized */ +typedef int (NpyIter_IterNextFunc)(NpyIter *iter); +typedef void (NpyIter_GetMultiIndexFunc)(NpyIter *iter, + npy_intp *outcoords); + +/*** Global flags that may be passed to the iterator constructors ***/ + +/* Track an index representing C order */ +#define NPY_ITER_C_INDEX 0x00000001 +/* Track an index representing Fortran order */ +#define NPY_ITER_F_INDEX 0x00000002 +/* Track a multi-index */ +#define NPY_ITER_MULTI_INDEX 0x00000004 +/* User code external to the iterator does the 1-dimensional innermost loop */ +#define NPY_ITER_EXTERNAL_LOOP 0x00000008 +/* Convert all the operands to a common data type */ +#define NPY_ITER_COMMON_DTYPE 0x00000010 +/* Operands may hold references, requiring API access during iteration */ +#define NPY_ITER_REFS_OK 0x00000020 +/* Zero-sized operands should be permitted, iteration checks IterSize for 0 */ +#define NPY_ITER_ZEROSIZE_OK 0x00000040 +/* Permits reductions (size-0 stride with dimension size > 1) */ +#define NPY_ITER_REDUCE_OK 0x00000080 +/* Enables sub-range iteration */ +#define NPY_ITER_RANGED 0x00000100 +/* Enables buffering */ +#define NPY_ITER_BUFFERED 0x00000200 +/* When buffering is enabled, grows the inner loop if possible */ +#define NPY_ITER_GROWINNER 0x00000400 +/* Delay allocation of buffers until first Reset* call */ +#define NPY_ITER_DELAY_BUFALLOC 0x00000800 +/* When NPY_KEEPORDER is specified, disable reversing negative-stride axes */ +#define NPY_ITER_DONT_NEGATE_STRIDES 0x00001000 +/* + * If output operands overlap with other operands (based on heuristics that + * has false positives but no false negatives), make temporary copies to + * eliminate overlap. + */ +#define NPY_ITER_COPY_IF_OVERLAP 0x00002000 + +/*** Per-operand flags that may be passed to the iterator constructors ***/ + +/* The operand will be read from and written to */ +#define NPY_ITER_READWRITE 0x00010000 +/* The operand will only be read from */ +#define NPY_ITER_READONLY 0x00020000 +/* The operand will only be written to */ +#define NPY_ITER_WRITEONLY 0x00040000 +/* The operand's data must be in native byte order */ +#define NPY_ITER_NBO 0x00080000 +/* The operand's data must be aligned */ +#define NPY_ITER_ALIGNED 0x00100000 +/* The operand's data must be contiguous (within the inner loop) */ +#define NPY_ITER_CONTIG 0x00200000 +/* The operand may be copied to satisfy requirements */ +#define NPY_ITER_COPY 0x00400000 +/* The operand may be copied with WRITEBACKIFCOPY to satisfy requirements */ +#define NPY_ITER_UPDATEIFCOPY 0x00800000 +/* Allocate the operand if it is NULL */ +#define NPY_ITER_ALLOCATE 0x01000000 +/* If an operand is allocated, don't use any subtype */ +#define NPY_ITER_NO_SUBTYPE 0x02000000 +/* This is a virtual array slot, operand is NULL but temporary data is there */ +#define NPY_ITER_VIRTUAL 0x04000000 +/* Require that the dimension match the iterator dimensions exactly */ +#define NPY_ITER_NO_BROADCAST 0x08000000 +/* A mask is being used on this array, affects buffer -> array copy */ +#define NPY_ITER_WRITEMASKED 0x10000000 +/* This array is the mask for all WRITEMASKED operands */ +#define NPY_ITER_ARRAYMASK 0x20000000 +/* Assume iterator order data access for COPY_IF_OVERLAP */ +#define NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE 0x40000000 + +#define NPY_ITER_GLOBAL_FLAGS 0x0000ffff +#define NPY_ITER_PER_OP_FLAGS 0xffff0000 + + +/***************************** + * Basic iterator object + *****************************/ + +/* FWD declaration */ +typedef struct PyArrayIterObject_tag PyArrayIterObject; + +/* + * type of the function which translates a set of coordinates to a + * pointer to the data + */ +typedef char* (*npy_iter_get_dataptr_t)( + PyArrayIterObject* iter, const npy_intp*); + +struct PyArrayIterObject_tag { + PyObject_HEAD + int nd_m1; /* number of dimensions - 1 */ + npy_intp index, size; + npy_intp coordinates[NPY_MAXDIMS_LEGACY_ITERS];/* N-dimensional loop */ + npy_intp dims_m1[NPY_MAXDIMS_LEGACY_ITERS]; /* ao->dimensions - 1 */ + npy_intp strides[NPY_MAXDIMS_LEGACY_ITERS]; /* ao->strides or fake */ + npy_intp backstrides[NPY_MAXDIMS_LEGACY_ITERS];/* how far to jump back */ + npy_intp factors[NPY_MAXDIMS_LEGACY_ITERS]; /* shape factors */ + PyArrayObject *ao; + char *dataptr; /* pointer to current item*/ + npy_bool contiguous; + + npy_intp bounds[NPY_MAXDIMS_LEGACY_ITERS][2]; + npy_intp limits[NPY_MAXDIMS_LEGACY_ITERS][2]; + npy_intp limits_sizes[NPY_MAXDIMS_LEGACY_ITERS]; + npy_iter_get_dataptr_t translate; +} ; + + +/* Iterator API */ +#define PyArrayIter_Check(op) PyObject_TypeCheck((op), &PyArrayIter_Type) + +#define _PyAIT(it) ((PyArrayIterObject *)(it)) +#define PyArray_ITER_RESET(it) do { \ + _PyAIT(it)->index = 0; \ + _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao); \ + memset(_PyAIT(it)->coordinates, 0, \ + (_PyAIT(it)->nd_m1+1)*sizeof(npy_intp)); \ +} while (0) + +#define _PyArray_ITER_NEXT1(it) do { \ + (it)->dataptr += _PyAIT(it)->strides[0]; \ + (it)->coordinates[0]++; \ +} while (0) + +#define _PyArray_ITER_NEXT2(it) do { \ + if ((it)->coordinates[1] < (it)->dims_m1[1]) { \ + (it)->coordinates[1]++; \ + (it)->dataptr += (it)->strides[1]; \ + } \ + else { \ + (it)->coordinates[1] = 0; \ + (it)->coordinates[0]++; \ + (it)->dataptr += (it)->strides[0] - \ + (it)->backstrides[1]; \ + } \ +} while (0) + +#define PyArray_ITER_NEXT(it) do { \ + _PyAIT(it)->index++; \ + if (_PyAIT(it)->nd_m1 == 0) { \ + _PyArray_ITER_NEXT1(_PyAIT(it)); \ + } \ + else if (_PyAIT(it)->contiguous) \ + _PyAIT(it)->dataptr += PyArray_ITEMSIZE(_PyAIT(it)->ao); \ + else if (_PyAIT(it)->nd_m1 == 1) { \ + _PyArray_ITER_NEXT2(_PyAIT(it)); \ + } \ + else { \ + int __npy_i; \ + for (__npy_i=_PyAIT(it)->nd_m1; __npy_i >= 0; __npy_i--) { \ + if (_PyAIT(it)->coordinates[__npy_i] < \ + _PyAIT(it)->dims_m1[__npy_i]) { \ + _PyAIT(it)->coordinates[__npy_i]++; \ + _PyAIT(it)->dataptr += \ + _PyAIT(it)->strides[__npy_i]; \ + break; \ + } \ + else { \ + _PyAIT(it)->coordinates[__npy_i] = 0; \ + _PyAIT(it)->dataptr -= \ + _PyAIT(it)->backstrides[__npy_i]; \ + } \ + } \ + } \ +} while (0) + +#define PyArray_ITER_GOTO(it, destination) do { \ + int __npy_i; \ + _PyAIT(it)->index = 0; \ + _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao); \ + for (__npy_i = _PyAIT(it)->nd_m1; __npy_i>=0; __npy_i--) { \ + if (destination[__npy_i] < 0) { \ + destination[__npy_i] += \ + _PyAIT(it)->dims_m1[__npy_i]+1; \ + } \ + _PyAIT(it)->dataptr += destination[__npy_i] * \ + _PyAIT(it)->strides[__npy_i]; \ + _PyAIT(it)->coordinates[__npy_i] = \ + destination[__npy_i]; \ + _PyAIT(it)->index += destination[__npy_i] * \ + ( __npy_i==_PyAIT(it)->nd_m1 ? 1 : \ + _PyAIT(it)->dims_m1[__npy_i+1]+1) ; \ + } \ +} while (0) + +#define PyArray_ITER_GOTO1D(it, ind) do { \ + int __npy_i; \ + npy_intp __npy_ind = (npy_intp)(ind); \ + if (__npy_ind < 0) __npy_ind += _PyAIT(it)->size; \ + _PyAIT(it)->index = __npy_ind; \ + if (_PyAIT(it)->nd_m1 == 0) { \ + _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao) + \ + __npy_ind * _PyAIT(it)->strides[0]; \ + } \ + else if (_PyAIT(it)->contiguous) \ + _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao) + \ + __npy_ind * PyArray_ITEMSIZE(_PyAIT(it)->ao); \ + else { \ + _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao); \ + for (__npy_i = 0; __npy_i<=_PyAIT(it)->nd_m1; \ + __npy_i++) { \ + _PyAIT(it)->coordinates[__npy_i] = \ + (__npy_ind / _PyAIT(it)->factors[__npy_i]); \ + _PyAIT(it)->dataptr += \ + (__npy_ind / _PyAIT(it)->factors[__npy_i]) \ + * _PyAIT(it)->strides[__npy_i]; \ + __npy_ind %= _PyAIT(it)->factors[__npy_i]; \ + } \ + } \ +} while (0) + +#define PyArray_ITER_DATA(it) ((void *)(_PyAIT(it)->dataptr)) + +#define PyArray_ITER_NOTDONE(it) (_PyAIT(it)->index < _PyAIT(it)->size) + + +/* + * Any object passed to PyArray_Broadcast must be binary compatible + * with this structure. + */ + +typedef struct { + PyObject_HEAD + int numiter; /* number of iters */ + npy_intp size; /* broadcasted size */ + npy_intp index; /* current index */ + int nd; /* number of dims */ + npy_intp dimensions[NPY_MAXDIMS_LEGACY_ITERS]; /* dimensions */ + /* + * Space for the individual iterators, do not specify size publicly + * to allow changing it more easily. + * One reason is that Cython uses this for checks and only allows + * growing structs (as of Cython 3.0.6). It also allows NPY_MAXARGS + * to be runtime dependent. + */ +#if (defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD) + PyArrayIterObject *iters[64]; +#elif defined(__cplusplus) + /* + * C++ doesn't strictly support flexible members and gives compilers + * warnings (pedantic only), so we lie. We can't make it 64 because + * then Cython is unhappy (larger struct at runtime is OK smaller not). + */ + PyArrayIterObject *iters[32]; +#else + PyArrayIterObject *iters[]; +#endif +} PyArrayMultiIterObject; + +#define _PyMIT(m) ((PyArrayMultiIterObject *)(m)) +#define PyArray_MultiIter_RESET(multi) do { \ + int __npy_mi; \ + _PyMIT(multi)->index = 0; \ + for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ + PyArray_ITER_RESET(_PyMIT(multi)->iters[__npy_mi]); \ + } \ +} while (0) + +#define PyArray_MultiIter_NEXT(multi) do { \ + int __npy_mi; \ + _PyMIT(multi)->index++; \ + for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ + PyArray_ITER_NEXT(_PyMIT(multi)->iters[__npy_mi]); \ + } \ +} while (0) + +#define PyArray_MultiIter_GOTO(multi, dest) do { \ + int __npy_mi; \ + for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ + PyArray_ITER_GOTO(_PyMIT(multi)->iters[__npy_mi], dest); \ + } \ + _PyMIT(multi)->index = _PyMIT(multi)->iters[0]->index; \ +} while (0) + +#define PyArray_MultiIter_GOTO1D(multi, ind) do { \ + int __npy_mi; \ + for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ + PyArray_ITER_GOTO1D(_PyMIT(multi)->iters[__npy_mi], ind); \ + } \ + _PyMIT(multi)->index = _PyMIT(multi)->iters[0]->index; \ +} while (0) + +#define PyArray_MultiIter_DATA(multi, i) \ + ((void *)(_PyMIT(multi)->iters[i]->dataptr)) + +#define PyArray_MultiIter_NEXTi(multi, i) \ + PyArray_ITER_NEXT(_PyMIT(multi)->iters[i]) + +#define PyArray_MultiIter_NOTDONE(multi) \ + (_PyMIT(multi)->index < _PyMIT(multi)->size) + + +static NPY_INLINE int +PyArray_MultiIter_NUMITER(PyArrayMultiIterObject *multi) +{ + return multi->numiter; +} + + +static NPY_INLINE npy_intp +PyArray_MultiIter_SIZE(PyArrayMultiIterObject *multi) +{ + return multi->size; +} + + +static NPY_INLINE npy_intp +PyArray_MultiIter_INDEX(PyArrayMultiIterObject *multi) +{ + return multi->index; +} + + +static NPY_INLINE int +PyArray_MultiIter_NDIM(PyArrayMultiIterObject *multi) +{ + return multi->nd; +} + + +static NPY_INLINE npy_intp * +PyArray_MultiIter_DIMS(PyArrayMultiIterObject *multi) +{ + return multi->dimensions; +} + + +static NPY_INLINE void ** +PyArray_MultiIter_ITERS(PyArrayMultiIterObject *multi) +{ + return (void**)multi->iters; +} + + +enum { + NPY_NEIGHBORHOOD_ITER_ZERO_PADDING, + NPY_NEIGHBORHOOD_ITER_ONE_PADDING, + NPY_NEIGHBORHOOD_ITER_CONSTANT_PADDING, + NPY_NEIGHBORHOOD_ITER_CIRCULAR_PADDING, + NPY_NEIGHBORHOOD_ITER_MIRROR_PADDING +}; + +typedef struct { + PyObject_HEAD + + /* + * PyArrayIterObject part: keep this in this exact order + */ + int nd_m1; /* number of dimensions - 1 */ + npy_intp index, size; + npy_intp coordinates[NPY_MAXDIMS_LEGACY_ITERS];/* N-dimensional loop */ + npy_intp dims_m1[NPY_MAXDIMS_LEGACY_ITERS]; /* ao->dimensions - 1 */ + npy_intp strides[NPY_MAXDIMS_LEGACY_ITERS]; /* ao->strides or fake */ + npy_intp backstrides[NPY_MAXDIMS_LEGACY_ITERS];/* how far to jump back */ + npy_intp factors[NPY_MAXDIMS_LEGACY_ITERS]; /* shape factors */ + PyArrayObject *ao; + char *dataptr; /* pointer to current item*/ + npy_bool contiguous; + + npy_intp bounds[NPY_MAXDIMS_LEGACY_ITERS][2]; + npy_intp limits[NPY_MAXDIMS_LEGACY_ITERS][2]; + npy_intp limits_sizes[NPY_MAXDIMS_LEGACY_ITERS]; + npy_iter_get_dataptr_t translate; + + /* + * New members + */ + npy_intp nd; + + /* Dimensions is the dimension of the array */ + npy_intp dimensions[NPY_MAXDIMS_LEGACY_ITERS]; + + /* + * Neighborhood points coordinates are computed relatively to the + * point pointed by _internal_iter + */ + PyArrayIterObject* _internal_iter; + /* + * To keep a reference to the representation of the constant value + * for constant padding + */ + char* constant; + + int mode; +} PyArrayNeighborhoodIterObject; + +/* + * Neighborhood iterator API + */ + +/* General: those work for any mode */ +static inline int +PyArrayNeighborhoodIter_Reset(PyArrayNeighborhoodIterObject* iter); +static inline int +PyArrayNeighborhoodIter_Next(PyArrayNeighborhoodIterObject* iter); +#if 0 +static inline int +PyArrayNeighborhoodIter_Next2D(PyArrayNeighborhoodIterObject* iter); +#endif + +/* + * Include inline implementations - functions defined there are not + * considered public API + */ +#define NUMPY_CORE_INCLUDE_NUMPY__NEIGHBORHOOD_IMP_H_ +#include "_neighborhood_iterator_imp.h" +#undef NUMPY_CORE_INCLUDE_NUMPY__NEIGHBORHOOD_IMP_H_ + + + +/* The default array type */ +#define NPY_DEFAULT_TYPE NPY_DOUBLE +/* default integer type defined in npy_2_compat header */ + +/* + * All sorts of useful ways to look into a PyArrayObject. It is recommended + * to use PyArrayObject * objects instead of always casting from PyObject *, + * for improved type checking. + * + * In many cases here the macro versions of the accessors are deprecated, + * but can't be immediately changed to inline functions because the + * preexisting macros accept PyObject * and do automatic casts. Inline + * functions accepting PyArrayObject * provides for some compile-time + * checking of correctness when working with these objects in C. + */ + +#define PyArray_ISONESEGMENT(m) (PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS) || \ + PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS)) + +#define PyArray_ISFORTRAN(m) (PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS) && \ + (!PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS))) + +#define PyArray_FORTRAN_IF(m) ((PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS) ? \ + NPY_ARRAY_F_CONTIGUOUS : 0)) + +static inline int +PyArray_NDIM(const PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->nd; +} + +static inline void * +PyArray_DATA(const PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->data; +} + +static inline char * +PyArray_BYTES(const PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->data; +} + +static inline npy_intp * +PyArray_DIMS(const PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->dimensions; +} + +static inline npy_intp * +PyArray_STRIDES(const PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->strides; +} + +static inline npy_intp +PyArray_DIM(const PyArrayObject *arr, int idim) +{ + return ((PyArrayObject_fields *)arr)->dimensions[idim]; +} + +static inline npy_intp +PyArray_STRIDE(const PyArrayObject *arr, int istride) +{ + return ((PyArrayObject_fields *)arr)->strides[istride]; +} + +static inline NPY_RETURNS_BORROWED_REF PyObject * +PyArray_BASE(const PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->base; +} + +static inline NPY_RETURNS_BORROWED_REF PyArray_Descr * +PyArray_DESCR(const PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->descr; +} + +static inline int +PyArray_FLAGS(const PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->flags; +} + + +static inline int +PyArray_TYPE(const PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->descr->type_num; +} + +static inline int +PyArray_CHKFLAGS(const PyArrayObject *arr, int flags) +{ + return (PyArray_FLAGS(arr) & flags) == flags; +} + +static inline PyArray_Descr * +PyArray_DTYPE(const PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->descr; +} + +static inline npy_intp * +PyArray_SHAPE(const PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->dimensions; +} + +/* + * Enables the specified array flags. Does no checking, + * assumes you know what you're doing. + */ +static inline void +PyArray_ENABLEFLAGS(PyArrayObject *arr, int flags) +{ + ((PyArrayObject_fields *)arr)->flags |= flags; +} + +/* + * Clears the specified array flags. Does no checking, + * assumes you know what you're doing. + */ +static inline void +PyArray_CLEARFLAGS(PyArrayObject *arr, int flags) +{ + ((PyArrayObject_fields *)arr)->flags &= ~flags; +} + +#if NPY_FEATURE_VERSION >= NPY_1_22_API_VERSION + static inline NPY_RETURNS_BORROWED_REF PyObject * + PyArray_HANDLER(PyArrayObject *arr) + { + return ((PyArrayObject_fields *)arr)->mem_handler; + } +#endif + +#define PyTypeNum_ISBOOL(type) ((type) == NPY_BOOL) + +#define PyTypeNum_ISUNSIGNED(type) (((type) == NPY_UBYTE) || \ + ((type) == NPY_USHORT) || \ + ((type) == NPY_UINT) || \ + ((type) == NPY_ULONG) || \ + ((type) == NPY_ULONGLONG)) + +#define PyTypeNum_ISSIGNED(type) (((type) == NPY_BYTE) || \ + ((type) == NPY_SHORT) || \ + ((type) == NPY_INT) || \ + ((type) == NPY_LONG) || \ + ((type) == NPY_LONGLONG)) + +#define PyTypeNum_ISINTEGER(type) (((type) >= NPY_BYTE) && \ + ((type) <= NPY_ULONGLONG)) + +#define PyTypeNum_ISFLOAT(type) ((((type) >= NPY_FLOAT) && \ + ((type) <= NPY_LONGDOUBLE)) || \ + ((type) == NPY_HALF)) + +#define PyTypeNum_ISNUMBER(type) (((type) <= NPY_CLONGDOUBLE) || \ + ((type) == NPY_HALF)) + +#define PyTypeNum_ISSTRING(type) (((type) == NPY_STRING) || \ + ((type) == NPY_UNICODE)) + +#define PyTypeNum_ISCOMPLEX(type) (((type) >= NPY_CFLOAT) && \ + ((type) <= NPY_CLONGDOUBLE)) + +#define PyTypeNum_ISFLEXIBLE(type) (((type) >=NPY_STRING) && \ + ((type) <=NPY_VOID)) + +#define PyTypeNum_ISDATETIME(type) (((type) >=NPY_DATETIME) && \ + ((type) <=NPY_TIMEDELTA)) + +#define PyTypeNum_ISUSERDEF(type) (((type) >= NPY_USERDEF) && \ + ((type) < NPY_USERDEF+ \ + NPY_NUMUSERTYPES)) + +#define PyTypeNum_ISEXTENDED(type) (PyTypeNum_ISFLEXIBLE(type) || \ + PyTypeNum_ISUSERDEF(type)) + +#define PyTypeNum_ISOBJECT(type) ((type) == NPY_OBJECT) + + +#define PyDataType_ISLEGACY(dtype) ((dtype)->type_num < NPY_VSTRING && ((dtype)->type_num >= 0)) +#define PyDataType_ISBOOL(obj) PyTypeNum_ISBOOL(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISUNSIGNED(obj) PyTypeNum_ISUNSIGNED(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISSIGNED(obj) PyTypeNum_ISSIGNED(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISINTEGER(obj) PyTypeNum_ISINTEGER(((PyArray_Descr*)(obj))->type_num ) +#define PyDataType_ISFLOAT(obj) PyTypeNum_ISFLOAT(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISNUMBER(obj) PyTypeNum_ISNUMBER(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISSTRING(obj) PyTypeNum_ISSTRING(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISCOMPLEX(obj) PyTypeNum_ISCOMPLEX(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISFLEXIBLE(obj) PyTypeNum_ISFLEXIBLE(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISDATETIME(obj) PyTypeNum_ISDATETIME(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISUSERDEF(obj) PyTypeNum_ISUSERDEF(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISEXTENDED(obj) PyTypeNum_ISEXTENDED(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISOBJECT(obj) PyTypeNum_ISOBJECT(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_MAKEUNSIZED(dtype) ((dtype)->elsize = 0) +/* + * PyDataType_* FLAGS, FLACHK, REFCHK, HASFIELDS, HASSUBARRAY, UNSIZED, + * SUBARRAY, NAMES, FIELDS, C_METADATA, and METADATA require version specific + * lookup and are defined in npy_2_compat.h. + */ + + +#define PyArray_ISBOOL(obj) PyTypeNum_ISBOOL(PyArray_TYPE(obj)) +#define PyArray_ISUNSIGNED(obj) PyTypeNum_ISUNSIGNED(PyArray_TYPE(obj)) +#define PyArray_ISSIGNED(obj) PyTypeNum_ISSIGNED(PyArray_TYPE(obj)) +#define PyArray_ISINTEGER(obj) PyTypeNum_ISINTEGER(PyArray_TYPE(obj)) +#define PyArray_ISFLOAT(obj) PyTypeNum_ISFLOAT(PyArray_TYPE(obj)) +#define PyArray_ISNUMBER(obj) PyTypeNum_ISNUMBER(PyArray_TYPE(obj)) +#define PyArray_ISSTRING(obj) PyTypeNum_ISSTRING(PyArray_TYPE(obj)) +#define PyArray_ISCOMPLEX(obj) PyTypeNum_ISCOMPLEX(PyArray_TYPE(obj)) +#define PyArray_ISFLEXIBLE(obj) PyTypeNum_ISFLEXIBLE(PyArray_TYPE(obj)) +#define PyArray_ISDATETIME(obj) PyTypeNum_ISDATETIME(PyArray_TYPE(obj)) +#define PyArray_ISUSERDEF(obj) PyTypeNum_ISUSERDEF(PyArray_TYPE(obj)) +#define PyArray_ISEXTENDED(obj) PyTypeNum_ISEXTENDED(PyArray_TYPE(obj)) +#define PyArray_ISOBJECT(obj) PyTypeNum_ISOBJECT(PyArray_TYPE(obj)) +#define PyArray_HASFIELDS(obj) PyDataType_HASFIELDS(PyArray_DESCR(obj)) + + /* + * FIXME: This should check for a flag on the data-type that + * states whether or not it is variable length. Because the + * ISFLEXIBLE check is hard-coded to the built-in data-types. + */ +#define PyArray_ISVARIABLE(obj) PyTypeNum_ISFLEXIBLE(PyArray_TYPE(obj)) + +#define PyArray_SAFEALIGNEDCOPY(obj) (PyArray_ISALIGNED(obj) && !PyArray_ISVARIABLE(obj)) + + +#define NPY_LITTLE '<' +#define NPY_BIG '>' +#define NPY_NATIVE '=' +#define NPY_SWAP 's' +#define NPY_IGNORE '|' + +#if NPY_BYTE_ORDER == NPY_BIG_ENDIAN +#define NPY_NATBYTE NPY_BIG +#define NPY_OPPBYTE NPY_LITTLE +#else +#define NPY_NATBYTE NPY_LITTLE +#define NPY_OPPBYTE NPY_BIG +#endif + +#define PyArray_ISNBO(arg) ((arg) != NPY_OPPBYTE) +#define PyArray_IsNativeByteOrder PyArray_ISNBO +#define PyArray_ISNOTSWAPPED(m) PyArray_ISNBO(PyArray_DESCR(m)->byteorder) +#define PyArray_ISBYTESWAPPED(m) (!PyArray_ISNOTSWAPPED(m)) + +#define PyArray_FLAGSWAP(m, flags) (PyArray_CHKFLAGS(m, flags) && \ + PyArray_ISNOTSWAPPED(m)) + +#define PyArray_ISCARRAY(m) PyArray_FLAGSWAP(m, NPY_ARRAY_CARRAY) +#define PyArray_ISCARRAY_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_CARRAY_RO) +#define PyArray_ISFARRAY(m) PyArray_FLAGSWAP(m, NPY_ARRAY_FARRAY) +#define PyArray_ISFARRAY_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_FARRAY_RO) +#define PyArray_ISBEHAVED(m) PyArray_FLAGSWAP(m, NPY_ARRAY_BEHAVED) +#define PyArray_ISBEHAVED_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_ALIGNED) + + +#define PyDataType_ISNOTSWAPPED(d) PyArray_ISNBO(((PyArray_Descr *)(d))->byteorder) +#define PyDataType_ISBYTESWAPPED(d) (!PyDataType_ISNOTSWAPPED(d)) + +/************************************************************ + * A struct used by PyArray_CreateSortedStridePerm, new in 1.7. + ************************************************************/ + +typedef struct { + npy_intp perm, stride; +} npy_stride_sort_item; + +/************************************************************ + * This is the form of the struct that's stored in the + * PyCapsule returned by an array's __array_struct__ attribute. See + * https://docs.scipy.org/doc/numpy/reference/arrays.interface.html for the full + * documentation. + ************************************************************/ +typedef struct { + int two; /* + * contains the integer 2 as a sanity + * check + */ + + int nd; /* number of dimensions */ + + char typekind; /* + * kind in array --- character code of + * typestr + */ + + int itemsize; /* size of each element */ + + int flags; /* + * how should be data interpreted. Valid + * flags are CONTIGUOUS (1), F_CONTIGUOUS (2), + * ALIGNED (0x100), NOTSWAPPED (0x200), and + * WRITEABLE (0x400). ARR_HAS_DESCR (0x800) + * states that arrdescr field is present in + * structure + */ + + npy_intp *shape; /* + * A length-nd array of shape + * information + */ + + npy_intp *strides; /* A length-nd array of stride information */ + + void *data; /* A pointer to the first element of the array */ + + PyObject *descr; /* + * A list of fields or NULL (ignored if flags + * does not have ARR_HAS_DESCR flag set) + */ +} PyArrayInterface; + + +/**************************************** + * NpyString + * + * Types used by the NpyString API. + ****************************************/ + +/* + * A "packed" encoded string. The string data must be accessed by first unpacking the string. + */ +typedef struct npy_packed_static_string npy_packed_static_string; + +/* + * An unpacked read-only view onto the data in a packed string + */ +typedef struct npy_unpacked_static_string { + size_t size; + const char *buf; +} npy_static_string; + +/* + * Handles heap allocations for static strings. + */ +typedef struct npy_string_allocator npy_string_allocator; + +typedef struct { + PyArray_Descr base; + // The object representing a null value + PyObject *na_object; + // Flag indicating whether or not to coerce arbitrary objects to strings + char coerce; + // Flag indicating the na object is NaN-like + char has_nan_na; + // Flag indicating the na object is a string + char has_string_na; + // If nonzero, indicates that this instance is owned by an array already + char array_owned; + // The string data to use when a default string is needed + npy_static_string default_string; + // The name of the missing data object, if any + npy_static_string na_name; + // the allocator should only be directly accessed after + // acquiring the allocator_lock and the lock should + // be released immediately after the allocator is + // no longer needed + npy_string_allocator *allocator; +} PyArray_StringDTypeObject; + +/* + * PyArray_DTypeMeta related definitions. + * + * As of now, this API is preliminary and will be extended as necessary. + */ +#if defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD + /* + * The Structures defined in this block are currently considered + * private API and may change without warning! + * Part of this (at least the size) is expected to be public API without + * further modifications. + */ + /* TODO: Make this definition public in the API, as soon as its settled */ + NPY_NO_EXPORT extern PyTypeObject PyArrayDTypeMeta_Type; + + /* + * While NumPy DTypes would not need to be heap types the plan is to + * make DTypes available in Python at which point they will be heap types. + * Since we also wish to add fields to the DType class, this looks like + * a typical instance definition, but with PyHeapTypeObject instead of + * only the PyObject_HEAD. + * This must only be exposed very extremely careful consideration, since + * it is a fairly complex construct which may be better to allow + * refactoring of. + */ + typedef struct { + PyHeapTypeObject super; + + /* + * Most DTypes will have a singleton default instance, for the + * parametric legacy DTypes (bytes, string, void, datetime) this + * may be a pointer to the *prototype* instance? + */ + PyArray_Descr *singleton; + /* Copy of the legacy DTypes type number, usually invalid. */ + int type_num; + + /* The type object of the scalar instances (may be NULL?) */ + PyTypeObject *scalar_type; + /* + * DType flags to signal legacy, parametric, or + * abstract. But plenty of space for additional information/flags. + */ + npy_uint64 flags; + + /* + * Use indirection in order to allow a fixed size for this struct. + * A stable ABI size makes creating a static DType less painful + * while also ensuring flexibility for all opaque API (with one + * indirection due the pointer lookup). + */ + void *dt_slots; + void *reserved[3]; + } PyArray_DTypeMeta; + +#endif /* NPY_INTERNAL_BUILD */ + + +/* + * Use the keyword NPY_DEPRECATED_INCLUDES to ensure that the header files + * npy_*_*_deprecated_api.h are only included from here and nowhere else. + */ +#ifdef NPY_DEPRECATED_INCLUDES +#error "Do not use the reserved keyword NPY_DEPRECATED_INCLUDES." +#endif +#define NPY_DEPRECATED_INCLUDES +/* + * There is no file npy_1_8_deprecated_api.h since there are no additional + * deprecated API features in NumPy 1.8. + * + * Note to maintainers: insert code like the following in future NumPy + * versions. + * + * #if !defined(NPY_NO_DEPRECATED_API) || \ + * (NPY_NO_DEPRECATED_API < NPY_1_9_API_VERSION) + * #include "npy_1_9_deprecated_api.h" + * #endif + * Then in the npy_1_9_deprecated_api.h header add something like this + * -------------------- + * #ifndef NPY_DEPRECATED_INCLUDES + * #error "Should never include npy_*_*_deprecated_api directly." + * #endif + * #ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_1_7_DEPRECATED_API_H_ + * #define NUMPY_CORE_INCLUDE_NUMPY_NPY_1_7_DEPRECATED_API_H_ + * + * #ifndef NPY_NO_DEPRECATED_API + * #if defined(_WIN32) + * #define _WARN___STR2__(x) #x + * #define _WARN___STR1__(x) _WARN___STR2__(x) + * #define _WARN___LOC__ __FILE__ "(" _WARN___STR1__(__LINE__) ") : Warning Msg: " + * #pragma message(_WARN___LOC__"Using deprecated NumPy API, disable it with " \ + * "#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION") + * #else + * #warning "Using deprecated NumPy API, disable it with " \ + * "#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION" + * #endif + * #endif + * -------------------- + */ +#undef NPY_DEPRECATED_INCLUDES + +#ifdef __cplusplus +} +#endif + +#endif /* NUMPY_CORE_INCLUDE_NUMPY_NDARRAYTYPES_H_ */ diff --git a/local_packages/numpy/_core/include/numpy/npy_2_compat.h b/local_packages/numpy/_core/include/numpy/npy_2_compat.h new file mode 100644 index 0000000..e39e65a --- /dev/null +++ b/local_packages/numpy/_core/include/numpy/npy_2_compat.h @@ -0,0 +1,249 @@ +/* + * This header file defines relevant features which: + * - Require runtime inspection depending on the NumPy version. + * - May be needed when compiling with an older version of NumPy to allow + * a smooth transition. + * + * As such, it is shipped with NumPy 2.0, but designed to be vendored in full + * or parts by downstream projects. + * + * It must be included after any other includes. `import_array()` must have + * been called in the scope or version dependency will misbehave, even when + * only `PyUFunc_` API is used. + * + * If required complicated defs (with inline functions) should be written as: + * + * #if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION + * Simple definition when NumPy 2.0 API is guaranteed. + * #else + * static inline definition of a 1.x compatibility shim + * #if NPY_ABI_VERSION < 0x02000000 + * Make 1.x compatibility shim the public API (1.x only branch) + * #else + * Runtime dispatched version (1.x or 2.x) + * #endif + * #endif + * + * An internal build always passes NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION + */ + +#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_2_COMPAT_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_NPY_2_COMPAT_H_ + +/* + * New macros for accessing real and complex part of a complex number can be + * found in "npy_2_complexcompat.h". + */ + + +/* + * This header is meant to be included by downstream directly for 1.x compat. + * In that case we need to ensure that users first included the full headers + * and not just `ndarraytypes.h`. + */ + +#ifndef NPY_FEATURE_VERSION + #error "The NumPy 2 compat header requires `import_array()` for which " \ + "the `ndarraytypes.h` header include is not sufficient. Please " \ + "include it after `numpy/ndarrayobject.h` or similar.\n" \ + "To simplify inclusion, you may use `PyArray_ImportNumPy()` " \ + "which is defined in the compat header and is lightweight (can be)." +#endif + +#if NPY_ABI_VERSION < 0x02000000 + /* + * Define 2.0 feature version as it is needed below to decide whether we + * compile for both 1.x and 2.x (defining it guarantees 1.x only). + */ + #define NPY_2_0_API_VERSION 0x00000012 + /* + * If we are compiling with NumPy 1.x, PyArray_RUNTIME_VERSION so we + * pretend the `PyArray_RUNTIME_VERSION` is `NPY_FEATURE_VERSION`. + * This allows downstream to use `PyArray_RUNTIME_VERSION` if they need to. + */ + #define PyArray_RUNTIME_VERSION NPY_FEATURE_VERSION + /* Compiling on NumPy 1.x where these are the same: */ + #define PyArray_DescrProto PyArray_Descr +#endif + + +/* + * Define a better way to call `_import_array()` to simplify backporting as + * we now require imports more often (necessary to make ABI flexible). + */ +#ifdef import_array1 + +static inline int +PyArray_ImportNumPyAPI(void) +{ + if (NPY_UNLIKELY(PyArray_API == NULL)) { + import_array1(-1); + } + return 0; +} + +#endif /* import_array1 */ + + +/* + * NPY_DEFAULT_INT + * + * The default integer has changed, `NPY_DEFAULT_INT` is available at runtime + * for use as type number, e.g. `PyArray_DescrFromType(NPY_DEFAULT_INT)`. + * + * NPY_RAVEL_AXIS + * + * This was introduced in NumPy 2.0 to allow indicating that an axis should be + * raveled in an operation. Before NumPy 2.0, NPY_MAXDIMS was used for this purpose. + * + * NPY_MAXDIMS + * + * A constant indicating the maximum number dimensions allowed when creating + * an ndarray. + * + * NPY_NTYPES_LEGACY + * + * The number of built-in NumPy dtypes. + */ +#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION + #define NPY_DEFAULT_INT NPY_INTP + #define NPY_RAVEL_AXIS NPY_MIN_INT + #define NPY_MAXARGS 64 + +#elif NPY_ABI_VERSION < 0x02000000 + #define NPY_DEFAULT_INT NPY_LONG + #define NPY_RAVEL_AXIS 32 + #define NPY_MAXARGS 32 + + /* Aliases of 2.x names to 1.x only equivalent names */ + #define NPY_NTYPES NPY_NTYPES_LEGACY + #define PyArray_DescrProto PyArray_Descr + #define _PyArray_LegacyDescr PyArray_Descr + /* NumPy 2 definition always works, but add it for 1.x only */ + #define PyDataType_ISLEGACY(dtype) (1) +#else + #define NPY_DEFAULT_INT \ + (PyArray_RUNTIME_VERSION >= NPY_2_0_API_VERSION ? NPY_INTP : NPY_LONG) + #define NPY_RAVEL_AXIS \ + (PyArray_RUNTIME_VERSION >= NPY_2_0_API_VERSION ? NPY_MIN_INT : 32) + #define NPY_MAXARGS \ + (PyArray_RUNTIME_VERSION >= NPY_2_0_API_VERSION ? 64 : 32) +#endif + + +/* + * Access inline functions for descriptor fields. Except for the first + * few fields, these needed to be moved (elsize, alignment) for + * additional space. Or they are descriptor specific and are not generally + * available anymore (metadata, c_metadata, subarray, names, fields). + * + * Most of these are defined via the `DESCR_ACCESSOR` macro helper. + */ +#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION || NPY_ABI_VERSION < 0x02000000 + /* Compiling for 1.x or 2.x only, direct field access is OK: */ + + static inline void + PyDataType_SET_ELSIZE(PyArray_Descr *dtype, npy_intp size) + { + dtype->elsize = size; + } + + static inline npy_uint64 + PyDataType_FLAGS(const PyArray_Descr *dtype) + { + #if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION + return dtype->flags; + #else + return (unsigned char)dtype->flags; /* Need unsigned cast on 1.x */ + #endif + } + + #define DESCR_ACCESSOR(FIELD, field, type, legacy_only) \ + static inline type \ + PyDataType_##FIELD(const PyArray_Descr *dtype) { \ + if (legacy_only && !PyDataType_ISLEGACY(dtype)) { \ + return (type)0; \ + } \ + return ((_PyArray_LegacyDescr *)dtype)->field; \ + } +#else /* compiling for both 1.x and 2.x */ + + static inline void + PyDataType_SET_ELSIZE(PyArray_Descr *dtype, npy_intp size) + { + if (PyArray_RUNTIME_VERSION >= NPY_2_0_API_VERSION) { + ((_PyArray_DescrNumPy2 *)dtype)->elsize = size; + } + else { + ((PyArray_DescrProto *)dtype)->elsize = (int)size; + } + } + + static inline npy_uint64 + PyDataType_FLAGS(const PyArray_Descr *dtype) + { + if (PyArray_RUNTIME_VERSION >= NPY_2_0_API_VERSION) { + return ((_PyArray_DescrNumPy2 *)dtype)->flags; + } + else { + return (unsigned char)((PyArray_DescrProto *)dtype)->flags; + } + } + + /* Cast to LegacyDescr always fine but needed when `legacy_only` */ + #define DESCR_ACCESSOR(FIELD, field, type, legacy_only) \ + static inline type \ + PyDataType_##FIELD(const PyArray_Descr *dtype) { \ + if (legacy_only && !PyDataType_ISLEGACY(dtype)) { \ + return (type)0; \ + } \ + if (PyArray_RUNTIME_VERSION >= NPY_2_0_API_VERSION) { \ + return ((_PyArray_LegacyDescr *)dtype)->field; \ + } \ + else { \ + return ((PyArray_DescrProto *)dtype)->field; \ + } \ + } +#endif + +DESCR_ACCESSOR(ELSIZE, elsize, npy_intp, 0) +DESCR_ACCESSOR(ALIGNMENT, alignment, npy_intp, 0) +DESCR_ACCESSOR(METADATA, metadata, PyObject *, 1) +DESCR_ACCESSOR(SUBARRAY, subarray, PyArray_ArrayDescr *, 1) +DESCR_ACCESSOR(NAMES, names, PyObject *, 1) +DESCR_ACCESSOR(FIELDS, fields, PyObject *, 1) +DESCR_ACCESSOR(C_METADATA, c_metadata, NpyAuxData *, 1) + +#undef DESCR_ACCESSOR + + +#if !(defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD) +#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION + static inline PyArray_ArrFuncs * + PyDataType_GetArrFuncs(const PyArray_Descr *descr) + { + return _PyDataType_GetArrFuncs(descr); + } +#elif NPY_ABI_VERSION < 0x02000000 + static inline PyArray_ArrFuncs * + PyDataType_GetArrFuncs(const PyArray_Descr *descr) + { + return descr->f; + } +#else + static inline PyArray_ArrFuncs * + PyDataType_GetArrFuncs(const PyArray_Descr *descr) + { + if (PyArray_RUNTIME_VERSION >= NPY_2_0_API_VERSION) { + return _PyDataType_GetArrFuncs(descr); + } + else { + return ((PyArray_DescrProto *)descr)->f; + } + } +#endif + + +#endif /* not internal build */ + +#endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_2_COMPAT_H_ */ diff --git a/local_packages/numpy/_core/include/numpy/npy_2_complexcompat.h b/local_packages/numpy/_core/include/numpy/npy_2_complexcompat.h new file mode 100644 index 0000000..0b50901 --- /dev/null +++ b/local_packages/numpy/_core/include/numpy/npy_2_complexcompat.h @@ -0,0 +1,28 @@ +/* This header is designed to be copy-pasted into downstream packages, since it provides + a compatibility layer between the old C struct complex types and the new native C99 + complex types. The new macros are in numpy/npy_math.h, which is why it is included here. */ +#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_2_COMPLEXCOMPAT_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_NPY_2_COMPLEXCOMPAT_H_ + +#include + +#ifndef NPY_CSETREALF +#define NPY_CSETREALF(c, r) (c)->real = (r) +#endif +#ifndef NPY_CSETIMAGF +#define NPY_CSETIMAGF(c, i) (c)->imag = (i) +#endif +#ifndef NPY_CSETREAL +#define NPY_CSETREAL(c, r) (c)->real = (r) +#endif +#ifndef NPY_CSETIMAG +#define NPY_CSETIMAG(c, i) (c)->imag = (i) +#endif +#ifndef NPY_CSETREALL +#define NPY_CSETREALL(c, r) (c)->real = (r) +#endif +#ifndef NPY_CSETIMAGL +#define NPY_CSETIMAGL(c, i) (c)->imag = (i) +#endif + +#endif diff --git a/local_packages/numpy/_core/include/numpy/npy_3kcompat.h b/local_packages/numpy/_core/include/numpy/npy_3kcompat.h new file mode 100644 index 0000000..cd91f66 --- /dev/null +++ b/local_packages/numpy/_core/include/numpy/npy_3kcompat.h @@ -0,0 +1,374 @@ +/* + * This is a convenience header file providing compatibility utilities + * for supporting different minor versions of Python 3. + * It was originally used to support the transition from Python 2, + * hence the "3k" naming. + * + * If you want to use this for your own projects, it's recommended to make a + * copy of it. We don't provide backwards compatibility guarantees. + */ + +#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_3KCOMPAT_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_NPY_3KCOMPAT_H_ + +#include +#include + +#include "npy_common.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* Python13 removes _PyLong_AsInt */ +static inline int +Npy__PyLong_AsInt(PyObject *obj) +{ + int overflow; + long result = PyLong_AsLongAndOverflow(obj, &overflow); + + /* INT_MAX and INT_MIN are defined in Python.h */ + if (overflow || result > INT_MAX || result < INT_MIN) { + /* XXX: could be cute and give a different + message for overflow == -1 */ + PyErr_SetString(PyExc_OverflowError, + "Python int too large to convert to C int"); + return -1; + } + return (int)result; +} + +#if defined _MSC_VER && _MSC_VER >= 1900 + +#include + +/* + * Macros to protect CRT calls against instant termination when passed an + * invalid parameter (https://bugs.python.org/issue23524). + */ +extern _invalid_parameter_handler _Py_silent_invalid_parameter_handler; +#define NPY_BEGIN_SUPPRESS_IPH { _invalid_parameter_handler _Py_old_handler = \ + _set_thread_local_invalid_parameter_handler(_Py_silent_invalid_parameter_handler); +#define NPY_END_SUPPRESS_IPH _set_thread_local_invalid_parameter_handler(_Py_old_handler); } + +#else + +#define NPY_BEGIN_SUPPRESS_IPH +#define NPY_END_SUPPRESS_IPH + +#endif /* _MSC_VER >= 1900 */ + +/* + * PyFile_* compatibility + */ + +/* + * Get a FILE* handle to the file represented by the Python object + */ +static inline FILE* +npy_PyFile_Dup2(PyObject *file, char *mode, npy_off_t *orig_pos) +{ + int fd, fd2, unbuf; + Py_ssize_t fd2_tmp; + PyObject *ret, *os, *io, *io_raw; + npy_off_t pos; + FILE *handle; + + /* Flush first to ensure things end up in the file in the correct order */ + ret = PyObject_CallMethod(file, "flush", ""); + if (ret == NULL) { + return NULL; + } + Py_DECREF(ret); + fd = PyObject_AsFileDescriptor(file); + if (fd == -1) { + return NULL; + } + + /* + * The handle needs to be dup'd because we have to call fclose + * at the end + */ + os = PyImport_ImportModule("os"); + if (os == NULL) { + return NULL; + } + ret = PyObject_CallMethod(os, "dup", "i", fd); + Py_DECREF(os); + if (ret == NULL) { + return NULL; + } + fd2_tmp = PyNumber_AsSsize_t(ret, PyExc_IOError); + Py_DECREF(ret); + if (fd2_tmp == -1 && PyErr_Occurred()) { + return NULL; + } + if (fd2_tmp < INT_MIN || fd2_tmp > INT_MAX) { + PyErr_SetString(PyExc_IOError, + "Getting an 'int' from os.dup() failed"); + return NULL; + } + fd2 = (int)fd2_tmp; + + /* Convert to FILE* handle */ +#ifdef _WIN32 + NPY_BEGIN_SUPPRESS_IPH + handle = _fdopen(fd2, mode); + NPY_END_SUPPRESS_IPH +#else + handle = fdopen(fd2, mode); +#endif + if (handle == NULL) { + PyErr_SetString(PyExc_IOError, + "Getting a FILE* from a Python file object via " + "_fdopen failed. If you built NumPy, you probably " + "linked with the wrong debug/release runtime"); + return NULL; + } + + /* Record the original raw file handle position */ + *orig_pos = npy_ftell(handle); + if (*orig_pos == -1) { + /* The io module is needed to determine if buffering is used */ + io = PyImport_ImportModule("io"); + if (io == NULL) { + fclose(handle); + return NULL; + } + /* File object instances of RawIOBase are unbuffered */ + io_raw = PyObject_GetAttrString(io, "RawIOBase"); + Py_DECREF(io); + if (io_raw == NULL) { + fclose(handle); + return NULL; + } + unbuf = PyObject_IsInstance(file, io_raw); + Py_DECREF(io_raw); + if (unbuf == 1) { + /* Succeed if the IO is unbuffered */ + return handle; + } + else { + PyErr_SetString(PyExc_IOError, "obtaining file position failed"); + fclose(handle); + return NULL; + } + } + + /* Seek raw handle to the Python-side position */ + ret = PyObject_CallMethod(file, "tell", ""); + if (ret == NULL) { + fclose(handle); + return NULL; + } + pos = PyLong_AsLongLong(ret); + Py_DECREF(ret); + if (PyErr_Occurred()) { + fclose(handle); + return NULL; + } + if (npy_fseek(handle, pos, SEEK_SET) == -1) { + PyErr_SetString(PyExc_IOError, "seeking file failed"); + fclose(handle); + return NULL; + } + return handle; +} + +/* + * Close the dup-ed file handle, and seek the Python one to the current position + */ +static inline int +npy_PyFile_DupClose2(PyObject *file, FILE* handle, npy_off_t orig_pos) +{ + int fd, unbuf; + PyObject *ret, *io, *io_raw; + npy_off_t position; + + position = npy_ftell(handle); + + /* Close the FILE* handle */ + fclose(handle); + + /* + * Restore original file handle position, in order to not confuse + * Python-side data structures + */ + fd = PyObject_AsFileDescriptor(file); + if (fd == -1) { + return -1; + } + + if (npy_lseek(fd, orig_pos, SEEK_SET) == -1) { + + /* The io module is needed to determine if buffering is used */ + io = PyImport_ImportModule("io"); + if (io == NULL) { + return -1; + } + /* File object instances of RawIOBase are unbuffered */ + io_raw = PyObject_GetAttrString(io, "RawIOBase"); + Py_DECREF(io); + if (io_raw == NULL) { + return -1; + } + unbuf = PyObject_IsInstance(file, io_raw); + Py_DECREF(io_raw); + if (unbuf == 1) { + /* Succeed if the IO is unbuffered */ + return 0; + } + else { + PyErr_SetString(PyExc_IOError, "seeking file failed"); + return -1; + } + } + + if (position == -1) { + PyErr_SetString(PyExc_IOError, "obtaining file position failed"); + return -1; + } + + /* Seek Python-side handle to the FILE* handle position */ + ret = PyObject_CallMethod(file, "seek", NPY_OFF_T_PYFMT "i", position, 0); + if (ret == NULL) { + return -1; + } + Py_DECREF(ret); + return 0; +} + +static inline PyObject* +npy_PyFile_OpenFile(PyObject *filename, const char *mode) +{ + PyObject *open; + open = PyDict_GetItemString(PyEval_GetBuiltins(), "open"); // noqa: borrowed-ref OK + if (open == NULL) { + return NULL; + } + return PyObject_CallFunction(open, "Os", filename, mode); +} + +static inline int +npy_PyFile_CloseFile(PyObject *file) +{ + PyObject *ret; + + ret = PyObject_CallMethod(file, "close", NULL); + if (ret == NULL) { + return -1; + } + Py_DECREF(ret); + return 0; +} + +/* This is a copy of _PyErr_ChainExceptions, which + * is no longer exported from Python3.12 + */ +static inline void +npy_PyErr_ChainExceptions(PyObject *exc, PyObject *val, PyObject *tb) +{ + if (exc == NULL) + return; + + if (PyErr_Occurred()) { + PyObject *exc2, *val2, *tb2; + PyErr_Fetch(&exc2, &val2, &tb2); + PyErr_NormalizeException(&exc, &val, &tb); + if (tb != NULL) { + PyException_SetTraceback(val, tb); + Py_DECREF(tb); + } + Py_DECREF(exc); + PyErr_NormalizeException(&exc2, &val2, &tb2); + PyException_SetContext(val2, val); + PyErr_Restore(exc2, val2, tb2); + } + else { + PyErr_Restore(exc, val, tb); + } +} + +/* This is a copy of _PyErr_ChainExceptions, with: + * __cause__ used instead of __context__ + */ +static inline void +npy_PyErr_ChainExceptionsCause(PyObject *exc, PyObject *val, PyObject *tb) +{ + if (exc == NULL) + return; + + if (PyErr_Occurred()) { + PyObject *exc2, *val2, *tb2; + PyErr_Fetch(&exc2, &val2, &tb2); + PyErr_NormalizeException(&exc, &val, &tb); + if (tb != NULL) { + PyException_SetTraceback(val, tb); + Py_DECREF(tb); + } + Py_DECREF(exc); + PyErr_NormalizeException(&exc2, &val2, &tb2); + PyException_SetCause(val2, val); + PyErr_Restore(exc2, val2, tb2); + } + else { + PyErr_Restore(exc, val, tb); + } +} + +/* + * PyCObject functions adapted to PyCapsules. + * + * The main job here is to get rid of the improved error handling + * of PyCapsules. It's a shame... + */ +static inline PyObject * +NpyCapsule_FromVoidPtr(void *ptr, void (*dtor)(PyObject *)) +{ + PyObject *ret = PyCapsule_New(ptr, NULL, dtor); + if (ret == NULL) { + PyErr_Clear(); + } + return ret; +} + +static inline PyObject * +NpyCapsule_FromVoidPtrAndDesc(void *ptr, void* context, void (*dtor)(PyObject *)) +{ + PyObject *ret = NpyCapsule_FromVoidPtr(ptr, dtor); + if (ret != NULL && PyCapsule_SetContext(ret, context) != 0) { + PyErr_Clear(); + Py_DECREF(ret); + ret = NULL; + } + return ret; +} + +static inline void * +NpyCapsule_AsVoidPtr(PyObject *obj) +{ + void *ret = PyCapsule_GetPointer(obj, NULL); + if (ret == NULL) { + PyErr_Clear(); + } + return ret; +} + +static inline void * +NpyCapsule_GetDesc(PyObject *obj) +{ + return PyCapsule_GetContext(obj); +} + +static inline int +NpyCapsule_Check(PyObject *ptr) +{ + return PyCapsule_CheckExact(ptr); +} + +#ifdef __cplusplus +} +#endif + + +#endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_3KCOMPAT_H_ */ diff --git a/local_packages/numpy/_core/include/numpy/npy_common.h b/local_packages/numpy/_core/include/numpy/npy_common.h new file mode 100644 index 0000000..5eaa290 --- /dev/null +++ b/local_packages/numpy/_core/include/numpy/npy_common.h @@ -0,0 +1,989 @@ +#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_COMMON_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_NPY_COMMON_H_ + +/* need Python.h for npy_intp, npy_uintp */ +#include + +/* numpconfig.h is auto-generated */ +#include "numpyconfig.h" +#ifdef HAVE_NPY_CONFIG_H +#include +#endif + +/* + * using static inline modifiers when defining npy_math functions + * allows the compiler to make optimizations when possible + */ +#ifndef NPY_INLINE_MATH +#if defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD + #define NPY_INLINE_MATH 1 +#else + #define NPY_INLINE_MATH 0 +#endif +#endif + +/* + * gcc does not unroll even with -O3 + * use with care, unrolling on modern cpus rarely speeds things up + */ +#ifdef HAVE_ATTRIBUTE_OPTIMIZE_UNROLL_LOOPS +#define NPY_GCC_UNROLL_LOOPS \ + __attribute__((optimize("unroll-loops"))) +#else +#define NPY_GCC_UNROLL_LOOPS +#endif + +/* highest gcc optimization level, enabled autovectorizer */ +#ifdef HAVE_ATTRIBUTE_OPTIMIZE_OPT_3 +#define NPY_GCC_OPT_3 __attribute__((optimize("O3"))) +#else +#define NPY_GCC_OPT_3 +#endif + +/* + * mark an argument (starting from 1) that must not be NULL and is not checked + * DO NOT USE IF FUNCTION CHECKS FOR NULL!! the compiler will remove the check + */ +#ifdef HAVE_ATTRIBUTE_NONNULL +#define NPY_GCC_NONNULL(n) __attribute__((nonnull(n))) +#else +#define NPY_GCC_NONNULL(n) +#endif + +/* + * give a hint to the compiler which branch is more likely or unlikely + * to occur, e.g. rare error cases: + * + * if (NPY_UNLIKELY(failure == 0)) + * return NULL; + * + * the double !! is to cast the expression (e.g. NULL) to a boolean required by + * the intrinsic + */ +#ifdef HAVE___BUILTIN_EXPECT +#define NPY_LIKELY(x) __builtin_expect(!!(x), 1) +#define NPY_UNLIKELY(x) __builtin_expect(!!(x), 0) +#else +#define NPY_LIKELY(x) (x) +#define NPY_UNLIKELY(x) (x) +#endif + +#ifdef HAVE___BUILTIN_PREFETCH +/* unlike _mm_prefetch also works on non-x86 */ +#define NPY_PREFETCH(x, rw, loc) __builtin_prefetch((x), (rw), (loc)) +#else +#ifdef NPY_HAVE_SSE +/* _MM_HINT_ET[01] (rw = 1) unsupported, only available in gcc >= 4.9 */ +#define NPY_PREFETCH(x, rw, loc) _mm_prefetch((x), loc == 0 ? _MM_HINT_NTA : \ + (loc == 1 ? _MM_HINT_T2 : \ + (loc == 2 ? _MM_HINT_T1 : \ + (loc == 3 ? _MM_HINT_T0 : -1)))) +#else +#define NPY_PREFETCH(x, rw,loc) +#endif +#endif + +/* `NPY_INLINE` kept for backwards compatibility; use `inline` instead */ +#if defined(_MSC_VER) && !defined(__clang__) + #define NPY_INLINE __inline +/* clang included here to handle clang-cl on Windows */ +#elif defined(__GNUC__) || defined(__clang__) + #if defined(__STRICT_ANSI__) + #define NPY_INLINE __inline__ + #else + #define NPY_INLINE inline + #endif +#else + #define NPY_INLINE +#endif + +#ifdef _MSC_VER + #ifdef __cplusplus + #define NPY_FINLINE __forceinline + #else + #define NPY_FINLINE static __forceinline + #endif +#elif defined(__GNUC__) + #ifdef __cplusplus + #define NPY_FINLINE inline __attribute__((always_inline)) + #else + #define NPY_FINLINE static inline __attribute__((always_inline)) + #endif +#else + #ifdef __cplusplus + #define NPY_FINLINE inline + #else + #define NPY_FINLINE static NPY_INLINE + #endif +#endif + +#if defined(_MSC_VER) + #define NPY_NOINLINE static __declspec(noinline) +#elif defined(__GNUC__) || defined(__clang__) + #define NPY_NOINLINE static __attribute__((noinline)) +#else + #define NPY_NOINLINE static +#endif + +#ifdef __cplusplus + #define NPY_TLS thread_local +#elif defined(HAVE_THREAD_LOCAL) + #define NPY_TLS thread_local +#elif defined(HAVE__THREAD_LOCAL) + #define NPY_TLS _Thread_local +#elif defined(HAVE___THREAD) + #define NPY_TLS __thread +#elif defined(HAVE___DECLSPEC_THREAD_) + #define NPY_TLS __declspec(thread) +#else + #define NPY_TLS +#endif + +#ifdef WITH_CPYCHECKER_RETURNS_BORROWED_REF_ATTRIBUTE + #define NPY_RETURNS_BORROWED_REF \ + __attribute__((cpychecker_returns_borrowed_ref)) +#else + #define NPY_RETURNS_BORROWED_REF +#endif + +#ifdef WITH_CPYCHECKER_STEALS_REFERENCE_TO_ARG_ATTRIBUTE + #define NPY_STEALS_REF_TO_ARG(n) \ + __attribute__((cpychecker_steals_reference_to_arg(n))) +#else + #define NPY_STEALS_REF_TO_ARG(n) +#endif + +/* 64 bit file position support, also on win-amd64. Issue gh-2256 */ +#if defined(_MSC_VER) && defined(_WIN64) && (_MSC_VER > 1400) || \ + defined(__MINGW32__) || defined(__MINGW64__) + #include + + #define npy_fseek _fseeki64 + #define npy_ftell _ftelli64 + #define npy_lseek _lseeki64 + #define npy_off_t npy_int64 + + #if NPY_SIZEOF_INT == 8 + #define NPY_OFF_T_PYFMT "i" + #elif NPY_SIZEOF_LONG == 8 + #define NPY_OFF_T_PYFMT "l" + #elif NPY_SIZEOF_LONGLONG == 8 + #define NPY_OFF_T_PYFMT "L" + #else + #error Unsupported size for type off_t + #endif +#else +#ifdef HAVE_FSEEKO + #define npy_fseek fseeko +#else + #define npy_fseek fseek +#endif +#ifdef HAVE_FTELLO + #define npy_ftell ftello +#else + #define npy_ftell ftell +#endif + #include + #ifndef _WIN32 + #include + #endif + #define npy_lseek lseek + #define npy_off_t off_t + + #if NPY_SIZEOF_OFF_T == NPY_SIZEOF_SHORT + #define NPY_OFF_T_PYFMT "h" + #elif NPY_SIZEOF_OFF_T == NPY_SIZEOF_INT + #define NPY_OFF_T_PYFMT "i" + #elif NPY_SIZEOF_OFF_T == NPY_SIZEOF_LONG + #define NPY_OFF_T_PYFMT "l" + #elif NPY_SIZEOF_OFF_T == NPY_SIZEOF_LONGLONG + #define NPY_OFF_T_PYFMT "L" + #else + #error Unsupported size for type off_t + #endif +#endif + +/* enums for detected endianness */ +enum { + NPY_CPU_UNKNOWN_ENDIAN, + NPY_CPU_LITTLE, + NPY_CPU_BIG +}; + +/* + * This is to typedef npy_intp to the appropriate size for Py_ssize_t. + * (Before NumPy 2.0 we used Py_intptr_t and Py_uintptr_t from `pyport.h`.) + */ +typedef Py_ssize_t npy_intp; +typedef size_t npy_uintp; + +/* + * Define sizes that were not defined in numpyconfig.h. + */ +#define NPY_SIZEOF_CHAR 1 +#define NPY_SIZEOF_BYTE 1 +#define NPY_SIZEOF_DATETIME 8 +#define NPY_SIZEOF_TIMEDELTA 8 +#define NPY_SIZEOF_HALF 2 +#define NPY_SIZEOF_CFLOAT NPY_SIZEOF_COMPLEX_FLOAT +#define NPY_SIZEOF_CDOUBLE NPY_SIZEOF_COMPLEX_DOUBLE +#define NPY_SIZEOF_CLONGDOUBLE NPY_SIZEOF_COMPLEX_LONGDOUBLE + +#ifdef constchar +#undef constchar +#endif + +#define NPY_SSIZE_T_PYFMT "n" +#define constchar char + +/* NPY_INTP_FMT Note: + * Unlike the other NPY_*_FMT macros, which are used with PyOS_snprintf, + * NPY_INTP_FMT is used with PyErr_Format and PyUnicode_FromFormat. Those + * functions use different formatting codes that are portably specified + * according to the Python documentation. See issue gh-2388. + */ +#if NPY_SIZEOF_INTP == NPY_SIZEOF_LONG + #define NPY_INTP NPY_LONG + #define NPY_UINTP NPY_ULONG + #define PyIntpArrType_Type PyLongArrType_Type + #define PyUIntpArrType_Type PyULongArrType_Type + #define NPY_MAX_INTP NPY_MAX_LONG + #define NPY_MIN_INTP NPY_MIN_LONG + #define NPY_MAX_UINTP NPY_MAX_ULONG + #define NPY_INTP_FMT "ld" +#elif NPY_SIZEOF_INTP == NPY_SIZEOF_INT + #define NPY_INTP NPY_INT + #define NPY_UINTP NPY_UINT + #define PyIntpArrType_Type PyIntArrType_Type + #define PyUIntpArrType_Type PyUIntArrType_Type + #define NPY_MAX_INTP NPY_MAX_INT + #define NPY_MIN_INTP NPY_MIN_INT + #define NPY_MAX_UINTP NPY_MAX_UINT + #define NPY_INTP_FMT "d" +#elif defined(PY_LONG_LONG) && (NPY_SIZEOF_INTP == NPY_SIZEOF_LONGLONG) + #define NPY_INTP NPY_LONGLONG + #define NPY_UINTP NPY_ULONGLONG + #define PyIntpArrType_Type PyLongLongArrType_Type + #define PyUIntpArrType_Type PyULongLongArrType_Type + #define NPY_MAX_INTP NPY_MAX_LONGLONG + #define NPY_MIN_INTP NPY_MIN_LONGLONG + #define NPY_MAX_UINTP NPY_MAX_ULONGLONG + #define NPY_INTP_FMT "lld" +#else + #error "Failed to correctly define NPY_INTP and NPY_UINTP" +#endif + + +/* + * Some platforms don't define bool, long long, or long double. + * Handle that here. + */ +#define NPY_BYTE_FMT "hhd" +#define NPY_UBYTE_FMT "hhu" +#define NPY_SHORT_FMT "hd" +#define NPY_USHORT_FMT "hu" +#define NPY_INT_FMT "d" +#define NPY_UINT_FMT "u" +#define NPY_LONG_FMT "ld" +#define NPY_ULONG_FMT "lu" +#define NPY_HALF_FMT "g" +#define NPY_FLOAT_FMT "g" +#define NPY_DOUBLE_FMT "g" + + +#ifdef PY_LONG_LONG +typedef PY_LONG_LONG npy_longlong; +typedef unsigned PY_LONG_LONG npy_ulonglong; +# ifdef _MSC_VER +# define NPY_LONGLONG_FMT "I64d" +# define NPY_ULONGLONG_FMT "I64u" +# else +# define NPY_LONGLONG_FMT "lld" +# define NPY_ULONGLONG_FMT "llu" +# endif +# ifdef _MSC_VER +# define NPY_LONGLONG_SUFFIX(x) (x##i64) +# define NPY_ULONGLONG_SUFFIX(x) (x##Ui64) +# else +# define NPY_LONGLONG_SUFFIX(x) (x##LL) +# define NPY_ULONGLONG_SUFFIX(x) (x##ULL) +# endif +#else +typedef long npy_longlong; +typedef unsigned long npy_ulonglong; +# define NPY_LONGLONG_SUFFIX(x) (x##L) +# define NPY_ULONGLONG_SUFFIX(x) (x##UL) +#endif + + +typedef unsigned char npy_bool; +#define NPY_FALSE 0 +#define NPY_TRUE 1 +/* + * `NPY_SIZEOF_LONGDOUBLE` isn't usually equal to sizeof(long double). + * In some certain cases, it may forced to be equal to sizeof(double) + * even against the compiler implementation and the same goes for + * `complex long double`. + * + * Therefore, avoid `long double`, use `npy_longdouble` instead, + * and when it comes to standard math functions make sure of using + * the double version when `NPY_SIZEOF_LONGDOUBLE` == `NPY_SIZEOF_DOUBLE`. + * For example: + * npy_longdouble *ptr, x; + * #if NPY_SIZEOF_LONGDOUBLE == NPY_SIZEOF_DOUBLE + * npy_longdouble r = modf(x, ptr); + * #else + * npy_longdouble r = modfl(x, ptr); + * #endif + * + * See https://github.com/numpy/numpy/issues/20348 + */ +#if NPY_SIZEOF_LONGDOUBLE == NPY_SIZEOF_DOUBLE + #define NPY_LONGDOUBLE_FMT "g" + #define longdouble_t double + typedef double npy_longdouble; +#else + #define NPY_LONGDOUBLE_FMT "Lg" + #define longdouble_t long double + typedef long double npy_longdouble; +#endif + +#ifndef Py_USING_UNICODE +#error Must use Python with unicode enabled. +#endif + + +typedef signed char npy_byte; +typedef unsigned char npy_ubyte; +typedef unsigned short npy_ushort; +typedef unsigned int npy_uint; +typedef unsigned long npy_ulong; + +/* These are for completeness */ +typedef char npy_char; +typedef short npy_short; +typedef int npy_int; +typedef long npy_long; +typedef float npy_float; +typedef double npy_double; + +typedef Py_hash_t npy_hash_t; +#define NPY_SIZEOF_HASH_T NPY_SIZEOF_INTP + +#if defined(__cplusplus) + +typedef struct +{ + double _Val[2]; +} npy_cdouble; + +typedef struct +{ + float _Val[2]; +} npy_cfloat; + +typedef struct +{ + long double _Val[2]; +} npy_clongdouble; + +#else + +#include + + +#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) +typedef _Dcomplex npy_cdouble; +typedef _Fcomplex npy_cfloat; +typedef _Lcomplex npy_clongdouble; +#else /* !defined(_MSC_VER) || defined(__INTEL_COMPILER) */ +typedef double _Complex npy_cdouble; +typedef float _Complex npy_cfloat; +typedef longdouble_t _Complex npy_clongdouble; +#endif + +#endif + +/* + * numarray-style bit-width typedefs + */ +#define NPY_MAX_INT8 127 +#define NPY_MIN_INT8 -128 +#define NPY_MAX_UINT8 255 +#define NPY_MAX_INT16 32767 +#define NPY_MIN_INT16 -32768 +#define NPY_MAX_UINT16 65535 +#define NPY_MAX_INT32 2147483647 +#define NPY_MIN_INT32 (-NPY_MAX_INT32 - 1) +#define NPY_MAX_UINT32 4294967295U +#define NPY_MAX_INT64 NPY_LONGLONG_SUFFIX(9223372036854775807) +#define NPY_MIN_INT64 (-NPY_MAX_INT64 - NPY_LONGLONG_SUFFIX(1)) +#define NPY_MAX_UINT64 NPY_ULONGLONG_SUFFIX(18446744073709551615) +#define NPY_MAX_INT128 NPY_LONGLONG_SUFFIX(85070591730234615865843651857942052864) +#define NPY_MIN_INT128 (-NPY_MAX_INT128 - NPY_LONGLONG_SUFFIX(1)) +#define NPY_MAX_UINT128 NPY_ULONGLONG_SUFFIX(170141183460469231731687303715884105728) +#define NPY_MIN_DATETIME NPY_MIN_INT64 +#define NPY_MAX_DATETIME NPY_MAX_INT64 +#define NPY_MIN_TIMEDELTA NPY_MIN_INT64 +#define NPY_MAX_TIMEDELTA NPY_MAX_INT64 + + /* Need to find the number of bits for each type and + make definitions accordingly. + + C states that sizeof(char) == 1 by definition + + So, just using the sizeof keyword won't help. + + It also looks like Python itself uses sizeof(char) quite a + bit, which by definition should be 1 all the time. + + Idea: Make Use of CHAR_BIT which should tell us how many + BITS per CHARACTER + */ + + /* Include platform definitions -- These are in the C89/90 standard */ +#include +#define NPY_MAX_BYTE SCHAR_MAX +#define NPY_MIN_BYTE SCHAR_MIN +#define NPY_MAX_UBYTE UCHAR_MAX +#define NPY_MAX_SHORT SHRT_MAX +#define NPY_MIN_SHORT SHRT_MIN +#define NPY_MAX_USHORT USHRT_MAX +#define NPY_MAX_INT INT_MAX +#ifndef INT_MIN +#define INT_MIN (-INT_MAX - 1) +#endif +#define NPY_MIN_INT INT_MIN +#define NPY_MAX_UINT UINT_MAX +#define NPY_MAX_LONG LONG_MAX +#define NPY_MIN_LONG LONG_MIN +#define NPY_MAX_ULONG ULONG_MAX + +#define NPY_BITSOF_BOOL (sizeof(npy_bool) * CHAR_BIT) +#define NPY_BITSOF_CHAR CHAR_BIT +#define NPY_BITSOF_BYTE (NPY_SIZEOF_BYTE * CHAR_BIT) +#define NPY_BITSOF_SHORT (NPY_SIZEOF_SHORT * CHAR_BIT) +#define NPY_BITSOF_INT (NPY_SIZEOF_INT * CHAR_BIT) +#define NPY_BITSOF_LONG (NPY_SIZEOF_LONG * CHAR_BIT) +#define NPY_BITSOF_LONGLONG (NPY_SIZEOF_LONGLONG * CHAR_BIT) +#define NPY_BITSOF_INTP (NPY_SIZEOF_INTP * CHAR_BIT) +#define NPY_BITSOF_HALF (NPY_SIZEOF_HALF * CHAR_BIT) +#define NPY_BITSOF_FLOAT (NPY_SIZEOF_FLOAT * CHAR_BIT) +#define NPY_BITSOF_DOUBLE (NPY_SIZEOF_DOUBLE * CHAR_BIT) +#define NPY_BITSOF_LONGDOUBLE (NPY_SIZEOF_LONGDOUBLE * CHAR_BIT) +#define NPY_BITSOF_CFLOAT (NPY_SIZEOF_CFLOAT * CHAR_BIT) +#define NPY_BITSOF_CDOUBLE (NPY_SIZEOF_CDOUBLE * CHAR_BIT) +#define NPY_BITSOF_CLONGDOUBLE (NPY_SIZEOF_CLONGDOUBLE * CHAR_BIT) +#define NPY_BITSOF_DATETIME (NPY_SIZEOF_DATETIME * CHAR_BIT) +#define NPY_BITSOF_TIMEDELTA (NPY_SIZEOF_TIMEDELTA * CHAR_BIT) + +#if NPY_BITSOF_LONG == 8 +#define NPY_INT8 NPY_LONG +#define NPY_UINT8 NPY_ULONG + typedef long npy_int8; + typedef unsigned long npy_uint8; +#define PyInt8ScalarObject PyLongScalarObject +#define PyInt8ArrType_Type PyLongArrType_Type +#define PyUInt8ScalarObject PyULongScalarObject +#define PyUInt8ArrType_Type PyULongArrType_Type +#define NPY_INT8_FMT NPY_LONG_FMT +#define NPY_UINT8_FMT NPY_ULONG_FMT +#elif NPY_BITSOF_LONG == 16 +#define NPY_INT16 NPY_LONG +#define NPY_UINT16 NPY_ULONG + typedef long npy_int16; + typedef unsigned long npy_uint16; +#define PyInt16ScalarObject PyLongScalarObject +#define PyInt16ArrType_Type PyLongArrType_Type +#define PyUInt16ScalarObject PyULongScalarObject +#define PyUInt16ArrType_Type PyULongArrType_Type +#define NPY_INT16_FMT NPY_LONG_FMT +#define NPY_UINT16_FMT NPY_ULONG_FMT +#elif NPY_BITSOF_LONG == 32 +#define NPY_INT32 NPY_LONG +#define NPY_UINT32 NPY_ULONG + typedef long npy_int32; + typedef unsigned long npy_uint32; + typedef unsigned long npy_ucs4; +#define PyInt32ScalarObject PyLongScalarObject +#define PyInt32ArrType_Type PyLongArrType_Type +#define PyUInt32ScalarObject PyULongScalarObject +#define PyUInt32ArrType_Type PyULongArrType_Type +#define NPY_INT32_FMT NPY_LONG_FMT +#define NPY_UINT32_FMT NPY_ULONG_FMT +#elif NPY_BITSOF_LONG == 64 +#define NPY_INT64 NPY_LONG +#define NPY_UINT64 NPY_ULONG + typedef long npy_int64; + typedef unsigned long npy_uint64; +#define PyInt64ScalarObject PyLongScalarObject +#define PyInt64ArrType_Type PyLongArrType_Type +#define PyUInt64ScalarObject PyULongScalarObject +#define PyUInt64ArrType_Type PyULongArrType_Type +#define NPY_INT64_FMT NPY_LONG_FMT +#define NPY_UINT64_FMT NPY_ULONG_FMT +#define MyPyLong_FromInt64 PyLong_FromLong +#define MyPyLong_AsInt64 PyLong_AsLong +#endif + +#if NPY_BITSOF_LONGLONG == 8 +# ifndef NPY_INT8 +# define NPY_INT8 NPY_LONGLONG +# define NPY_UINT8 NPY_ULONGLONG + typedef npy_longlong npy_int8; + typedef npy_ulonglong npy_uint8; +# define PyInt8ScalarObject PyLongLongScalarObject +# define PyInt8ArrType_Type PyLongLongArrType_Type +# define PyUInt8ScalarObject PyULongLongScalarObject +# define PyUInt8ArrType_Type PyULongLongArrType_Type +#define NPY_INT8_FMT NPY_LONGLONG_FMT +#define NPY_UINT8_FMT NPY_ULONGLONG_FMT +# endif +# define NPY_MAX_LONGLONG NPY_MAX_INT8 +# define NPY_MIN_LONGLONG NPY_MIN_INT8 +# define NPY_MAX_ULONGLONG NPY_MAX_UINT8 +#elif NPY_BITSOF_LONGLONG == 16 +# ifndef NPY_INT16 +# define NPY_INT16 NPY_LONGLONG +# define NPY_UINT16 NPY_ULONGLONG + typedef npy_longlong npy_int16; + typedef npy_ulonglong npy_uint16; +# define PyInt16ScalarObject PyLongLongScalarObject +# define PyInt16ArrType_Type PyLongLongArrType_Type +# define PyUInt16ScalarObject PyULongLongScalarObject +# define PyUInt16ArrType_Type PyULongLongArrType_Type +#define NPY_INT16_FMT NPY_LONGLONG_FMT +#define NPY_UINT16_FMT NPY_ULONGLONG_FMT +# endif +# define NPY_MAX_LONGLONG NPY_MAX_INT16 +# define NPY_MIN_LONGLONG NPY_MIN_INT16 +# define NPY_MAX_ULONGLONG NPY_MAX_UINT16 +#elif NPY_BITSOF_LONGLONG == 32 +# ifndef NPY_INT32 +# define NPY_INT32 NPY_LONGLONG +# define NPY_UINT32 NPY_ULONGLONG + typedef npy_longlong npy_int32; + typedef npy_ulonglong npy_uint32; + typedef npy_ulonglong npy_ucs4; +# define PyInt32ScalarObject PyLongLongScalarObject +# define PyInt32ArrType_Type PyLongLongArrType_Type +# define PyUInt32ScalarObject PyULongLongScalarObject +# define PyUInt32ArrType_Type PyULongLongArrType_Type +#define NPY_INT32_FMT NPY_LONGLONG_FMT +#define NPY_UINT32_FMT NPY_ULONGLONG_FMT +# endif +# define NPY_MAX_LONGLONG NPY_MAX_INT32 +# define NPY_MIN_LONGLONG NPY_MIN_INT32 +# define NPY_MAX_ULONGLONG NPY_MAX_UINT32 +#elif NPY_BITSOF_LONGLONG == 64 +# ifndef NPY_INT64 +# define NPY_INT64 NPY_LONGLONG +# define NPY_UINT64 NPY_ULONGLONG + typedef npy_longlong npy_int64; + typedef npy_ulonglong npy_uint64; +# define PyInt64ScalarObject PyLongLongScalarObject +# define PyInt64ArrType_Type PyLongLongArrType_Type +# define PyUInt64ScalarObject PyULongLongScalarObject +# define PyUInt64ArrType_Type PyULongLongArrType_Type +#define NPY_INT64_FMT NPY_LONGLONG_FMT +#define NPY_UINT64_FMT NPY_ULONGLONG_FMT +# define MyPyLong_FromInt64 PyLong_FromLongLong +# define MyPyLong_AsInt64 PyLong_AsLongLong +# endif +# define NPY_MAX_LONGLONG NPY_MAX_INT64 +# define NPY_MIN_LONGLONG NPY_MIN_INT64 +# define NPY_MAX_ULONGLONG NPY_MAX_UINT64 +#endif + +#if NPY_BITSOF_INT == 8 +#ifndef NPY_INT8 +#define NPY_INT8 NPY_INT +#define NPY_UINT8 NPY_UINT + typedef int npy_int8; + typedef unsigned int npy_uint8; +# define PyInt8ScalarObject PyIntScalarObject +# define PyInt8ArrType_Type PyIntArrType_Type +# define PyUInt8ScalarObject PyUIntScalarObject +# define PyUInt8ArrType_Type PyUIntArrType_Type +#define NPY_INT8_FMT NPY_INT_FMT +#define NPY_UINT8_FMT NPY_UINT_FMT +#endif +#elif NPY_BITSOF_INT == 16 +#ifndef NPY_INT16 +#define NPY_INT16 NPY_INT +#define NPY_UINT16 NPY_UINT + typedef int npy_int16; + typedef unsigned int npy_uint16; +# define PyInt16ScalarObject PyIntScalarObject +# define PyInt16ArrType_Type PyIntArrType_Type +# define PyUInt16ScalarObject PyIntUScalarObject +# define PyUInt16ArrType_Type PyIntUArrType_Type +#define NPY_INT16_FMT NPY_INT_FMT +#define NPY_UINT16_FMT NPY_UINT_FMT +#endif +#elif NPY_BITSOF_INT == 32 +#ifndef NPY_INT32 +#define NPY_INT32 NPY_INT +#define NPY_UINT32 NPY_UINT + typedef int npy_int32; + typedef unsigned int npy_uint32; + typedef unsigned int npy_ucs4; +# define PyInt32ScalarObject PyIntScalarObject +# define PyInt32ArrType_Type PyIntArrType_Type +# define PyUInt32ScalarObject PyUIntScalarObject +# define PyUInt32ArrType_Type PyUIntArrType_Type +#define NPY_INT32_FMT NPY_INT_FMT +#define NPY_UINT32_FMT NPY_UINT_FMT +#endif +#elif NPY_BITSOF_INT == 64 +#ifndef NPY_INT64 +#define NPY_INT64 NPY_INT +#define NPY_UINT64 NPY_UINT + typedef int npy_int64; + typedef unsigned int npy_uint64; +# define PyInt64ScalarObject PyIntScalarObject +# define PyInt64ArrType_Type PyIntArrType_Type +# define PyUInt64ScalarObject PyUIntScalarObject +# define PyUInt64ArrType_Type PyUIntArrType_Type +#define NPY_INT64_FMT NPY_INT_FMT +#define NPY_UINT64_FMT NPY_UINT_FMT +# define MyPyLong_FromInt64 PyLong_FromLong +# define MyPyLong_AsInt64 PyLong_AsLong +#endif +#endif + +#if NPY_BITSOF_SHORT == 8 +#ifndef NPY_INT8 +#define NPY_INT8 NPY_SHORT +#define NPY_UINT8 NPY_USHORT + typedef short npy_int8; + typedef unsigned short npy_uint8; +# define PyInt8ScalarObject PyShortScalarObject +# define PyInt8ArrType_Type PyShortArrType_Type +# define PyUInt8ScalarObject PyUShortScalarObject +# define PyUInt8ArrType_Type PyUShortArrType_Type +#define NPY_INT8_FMT NPY_SHORT_FMT +#define NPY_UINT8_FMT NPY_USHORT_FMT +#endif +#elif NPY_BITSOF_SHORT == 16 +#ifndef NPY_INT16 +#define NPY_INT16 NPY_SHORT +#define NPY_UINT16 NPY_USHORT + typedef short npy_int16; + typedef unsigned short npy_uint16; +# define PyInt16ScalarObject PyShortScalarObject +# define PyInt16ArrType_Type PyShortArrType_Type +# define PyUInt16ScalarObject PyUShortScalarObject +# define PyUInt16ArrType_Type PyUShortArrType_Type +#define NPY_INT16_FMT NPY_SHORT_FMT +#define NPY_UINT16_FMT NPY_USHORT_FMT +#endif +#elif NPY_BITSOF_SHORT == 32 +#ifndef NPY_INT32 +#define NPY_INT32 NPY_SHORT +#define NPY_UINT32 NPY_USHORT + typedef short npy_int32; + typedef unsigned short npy_uint32; + typedef unsigned short npy_ucs4; +# define PyInt32ScalarObject PyShortScalarObject +# define PyInt32ArrType_Type PyShortArrType_Type +# define PyUInt32ScalarObject PyUShortScalarObject +# define PyUInt32ArrType_Type PyUShortArrType_Type +#define NPY_INT32_FMT NPY_SHORT_FMT +#define NPY_UINT32_FMT NPY_USHORT_FMT +#endif +#elif NPY_BITSOF_SHORT == 64 +#ifndef NPY_INT64 +#define NPY_INT64 NPY_SHORT +#define NPY_UINT64 NPY_USHORT + typedef short npy_int64; + typedef unsigned short npy_uint64; +# define PyInt64ScalarObject PyShortScalarObject +# define PyInt64ArrType_Type PyShortArrType_Type +# define PyUInt64ScalarObject PyUShortScalarObject +# define PyUInt64ArrType_Type PyUShortArrType_Type +#define NPY_INT64_FMT NPY_SHORT_FMT +#define NPY_UINT64_FMT NPY_USHORT_FMT +# define MyPyLong_FromInt64 PyLong_FromLong +# define MyPyLong_AsInt64 PyLong_AsLong +#endif +#endif + + +#if NPY_BITSOF_CHAR == 8 +#ifndef NPY_INT8 +#define NPY_INT8 NPY_BYTE +#define NPY_UINT8 NPY_UBYTE + typedef signed char npy_int8; + typedef unsigned char npy_uint8; +# define PyInt8ScalarObject PyByteScalarObject +# define PyInt8ArrType_Type PyByteArrType_Type +# define PyUInt8ScalarObject PyUByteScalarObject +# define PyUInt8ArrType_Type PyUByteArrType_Type +#define NPY_INT8_FMT NPY_BYTE_FMT +#define NPY_UINT8_FMT NPY_UBYTE_FMT +#endif +#elif NPY_BITSOF_CHAR == 16 +#ifndef NPY_INT16 +#define NPY_INT16 NPY_BYTE +#define NPY_UINT16 NPY_UBYTE + typedef signed char npy_int16; + typedef unsigned char npy_uint16; +# define PyInt16ScalarObject PyByteScalarObject +# define PyInt16ArrType_Type PyByteArrType_Type +# define PyUInt16ScalarObject PyUByteScalarObject +# define PyUInt16ArrType_Type PyUByteArrType_Type +#define NPY_INT16_FMT NPY_BYTE_FMT +#define NPY_UINT16_FMT NPY_UBYTE_FMT +#endif +#elif NPY_BITSOF_CHAR == 32 +#ifndef NPY_INT32 +#define NPY_INT32 NPY_BYTE +#define NPY_UINT32 NPY_UBYTE + typedef signed char npy_int32; + typedef unsigned char npy_uint32; + typedef unsigned char npy_ucs4; +# define PyInt32ScalarObject PyByteScalarObject +# define PyInt32ArrType_Type PyByteArrType_Type +# define PyUInt32ScalarObject PyUByteScalarObject +# define PyUInt32ArrType_Type PyUByteArrType_Type +#define NPY_INT32_FMT NPY_BYTE_FMT +#define NPY_UINT32_FMT NPY_UBYTE_FMT +#endif +#elif NPY_BITSOF_CHAR == 64 +#ifndef NPY_INT64 +#define NPY_INT64 NPY_BYTE +#define NPY_UINT64 NPY_UBYTE + typedef signed char npy_int64; + typedef unsigned char npy_uint64; +# define PyInt64ScalarObject PyByteScalarObject +# define PyInt64ArrType_Type PyByteArrType_Type +# define PyUInt64ScalarObject PyUByteScalarObject +# define PyUInt64ArrType_Type PyUByteArrType_Type +#define NPY_INT64_FMT NPY_BYTE_FMT +#define NPY_UINT64_FMT NPY_UBYTE_FMT +# define MyPyLong_FromInt64 PyLong_FromLong +# define MyPyLong_AsInt64 PyLong_AsLong +#endif +#elif NPY_BITSOF_CHAR == 128 +#endif + + + +#if NPY_BITSOF_DOUBLE == 32 +#ifndef NPY_FLOAT32 +#define NPY_FLOAT32 NPY_DOUBLE +#define NPY_COMPLEX64 NPY_CDOUBLE + typedef double npy_float32; + typedef npy_cdouble npy_complex64; +# define PyFloat32ScalarObject PyDoubleScalarObject +# define PyComplex64ScalarObject PyCDoubleScalarObject +# define PyFloat32ArrType_Type PyDoubleArrType_Type +# define PyComplex64ArrType_Type PyCDoubleArrType_Type +#define NPY_FLOAT32_FMT NPY_DOUBLE_FMT +#define NPY_COMPLEX64_FMT NPY_CDOUBLE_FMT +#endif +#elif NPY_BITSOF_DOUBLE == 64 +#ifndef NPY_FLOAT64 +#define NPY_FLOAT64 NPY_DOUBLE +#define NPY_COMPLEX128 NPY_CDOUBLE + typedef double npy_float64; + typedef npy_cdouble npy_complex128; +# define PyFloat64ScalarObject PyDoubleScalarObject +# define PyComplex128ScalarObject PyCDoubleScalarObject +# define PyFloat64ArrType_Type PyDoubleArrType_Type +# define PyComplex128ArrType_Type PyCDoubleArrType_Type +#define NPY_FLOAT64_FMT NPY_DOUBLE_FMT +#define NPY_COMPLEX128_FMT NPY_CDOUBLE_FMT +#endif +#elif NPY_BITSOF_DOUBLE == 80 +#ifndef NPY_FLOAT80 +#define NPY_FLOAT80 NPY_DOUBLE +#define NPY_COMPLEX160 NPY_CDOUBLE + typedef double npy_float80; + typedef npy_cdouble npy_complex160; +# define PyFloat80ScalarObject PyDoubleScalarObject +# define PyComplex160ScalarObject PyCDoubleScalarObject +# define PyFloat80ArrType_Type PyDoubleArrType_Type +# define PyComplex160ArrType_Type PyCDoubleArrType_Type +#define NPY_FLOAT80_FMT NPY_DOUBLE_FMT +#define NPY_COMPLEX160_FMT NPY_CDOUBLE_FMT +#endif +#elif NPY_BITSOF_DOUBLE == 96 +#ifndef NPY_FLOAT96 +#define NPY_FLOAT96 NPY_DOUBLE +#define NPY_COMPLEX192 NPY_CDOUBLE + typedef double npy_float96; + typedef npy_cdouble npy_complex192; +# define PyFloat96ScalarObject PyDoubleScalarObject +# define PyComplex192ScalarObject PyCDoubleScalarObject +# define PyFloat96ArrType_Type PyDoubleArrType_Type +# define PyComplex192ArrType_Type PyCDoubleArrType_Type +#define NPY_FLOAT96_FMT NPY_DOUBLE_FMT +#define NPY_COMPLEX192_FMT NPY_CDOUBLE_FMT +#endif +#elif NPY_BITSOF_DOUBLE == 128 +#ifndef NPY_FLOAT128 +#define NPY_FLOAT128 NPY_DOUBLE +#define NPY_COMPLEX256 NPY_CDOUBLE + typedef double npy_float128; + typedef npy_cdouble npy_complex256; +# define PyFloat128ScalarObject PyDoubleScalarObject +# define PyComplex256ScalarObject PyCDoubleScalarObject +# define PyFloat128ArrType_Type PyDoubleArrType_Type +# define PyComplex256ArrType_Type PyCDoubleArrType_Type +#define NPY_FLOAT128_FMT NPY_DOUBLE_FMT +#define NPY_COMPLEX256_FMT NPY_CDOUBLE_FMT +#endif +#endif + + + +#if NPY_BITSOF_FLOAT == 32 +#ifndef NPY_FLOAT32 +#define NPY_FLOAT32 NPY_FLOAT +#define NPY_COMPLEX64 NPY_CFLOAT + typedef float npy_float32; + typedef npy_cfloat npy_complex64; +# define PyFloat32ScalarObject PyFloatScalarObject +# define PyComplex64ScalarObject PyCFloatScalarObject +# define PyFloat32ArrType_Type PyFloatArrType_Type +# define PyComplex64ArrType_Type PyCFloatArrType_Type +#define NPY_FLOAT32_FMT NPY_FLOAT_FMT +#define NPY_COMPLEX64_FMT NPY_CFLOAT_FMT +#endif +#elif NPY_BITSOF_FLOAT == 64 +#ifndef NPY_FLOAT64 +#define NPY_FLOAT64 NPY_FLOAT +#define NPY_COMPLEX128 NPY_CFLOAT + typedef float npy_float64; + typedef npy_cfloat npy_complex128; +# define PyFloat64ScalarObject PyFloatScalarObject +# define PyComplex128ScalarObject PyCFloatScalarObject +# define PyFloat64ArrType_Type PyFloatArrType_Type +# define PyComplex128ArrType_Type PyCFloatArrType_Type +#define NPY_FLOAT64_FMT NPY_FLOAT_FMT +#define NPY_COMPLEX128_FMT NPY_CFLOAT_FMT +#endif +#elif NPY_BITSOF_FLOAT == 80 +#ifndef NPY_FLOAT80 +#define NPY_FLOAT80 NPY_FLOAT +#define NPY_COMPLEX160 NPY_CFLOAT + typedef float npy_float80; + typedef npy_cfloat npy_complex160; +# define PyFloat80ScalarObject PyFloatScalarObject +# define PyComplex160ScalarObject PyCFloatScalarObject +# define PyFloat80ArrType_Type PyFloatArrType_Type +# define PyComplex160ArrType_Type PyCFloatArrType_Type +#define NPY_FLOAT80_FMT NPY_FLOAT_FMT +#define NPY_COMPLEX160_FMT NPY_CFLOAT_FMT +#endif +#elif NPY_BITSOF_FLOAT == 96 +#ifndef NPY_FLOAT96 +#define NPY_FLOAT96 NPY_FLOAT +#define NPY_COMPLEX192 NPY_CFLOAT + typedef float npy_float96; + typedef npy_cfloat npy_complex192; +# define PyFloat96ScalarObject PyFloatScalarObject +# define PyComplex192ScalarObject PyCFloatScalarObject +# define PyFloat96ArrType_Type PyFloatArrType_Type +# define PyComplex192ArrType_Type PyCFloatArrType_Type +#define NPY_FLOAT96_FMT NPY_FLOAT_FMT +#define NPY_COMPLEX192_FMT NPY_CFLOAT_FMT +#endif +#elif NPY_BITSOF_FLOAT == 128 +#ifndef NPY_FLOAT128 +#define NPY_FLOAT128 NPY_FLOAT +#define NPY_COMPLEX256 NPY_CFLOAT + typedef float npy_float128; + typedef npy_cfloat npy_complex256; +# define PyFloat128ScalarObject PyFloatScalarObject +# define PyComplex256ScalarObject PyCFloatScalarObject +# define PyFloat128ArrType_Type PyFloatArrType_Type +# define PyComplex256ArrType_Type PyCFloatArrType_Type +#define NPY_FLOAT128_FMT NPY_FLOAT_FMT +#define NPY_COMPLEX256_FMT NPY_CFLOAT_FMT +#endif +#endif + +/* half/float16 isn't a floating-point type in C */ +#define NPY_FLOAT16 NPY_HALF +typedef npy_uint16 npy_half; +typedef npy_half npy_float16; + +#if NPY_BITSOF_LONGDOUBLE == 32 +#ifndef NPY_FLOAT32 +#define NPY_FLOAT32 NPY_LONGDOUBLE +#define NPY_COMPLEX64 NPY_CLONGDOUBLE + typedef npy_longdouble npy_float32; + typedef npy_clongdouble npy_complex64; +# define PyFloat32ScalarObject PyLongDoubleScalarObject +# define PyComplex64ScalarObject PyCLongDoubleScalarObject +# define PyFloat32ArrType_Type PyLongDoubleArrType_Type +# define PyComplex64ArrType_Type PyCLongDoubleArrType_Type +#define NPY_FLOAT32_FMT NPY_LONGDOUBLE_FMT +#define NPY_COMPLEX64_FMT NPY_CLONGDOUBLE_FMT +#endif +#elif NPY_BITSOF_LONGDOUBLE == 64 +#ifndef NPY_FLOAT64 +#define NPY_FLOAT64 NPY_LONGDOUBLE +#define NPY_COMPLEX128 NPY_CLONGDOUBLE + typedef npy_longdouble npy_float64; + typedef npy_clongdouble npy_complex128; +# define PyFloat64ScalarObject PyLongDoubleScalarObject +# define PyComplex128ScalarObject PyCLongDoubleScalarObject +# define PyFloat64ArrType_Type PyLongDoubleArrType_Type +# define PyComplex128ArrType_Type PyCLongDoubleArrType_Type +#define NPY_FLOAT64_FMT NPY_LONGDOUBLE_FMT +#define NPY_COMPLEX128_FMT NPY_CLONGDOUBLE_FMT +#endif +#elif NPY_BITSOF_LONGDOUBLE == 80 +#ifndef NPY_FLOAT80 +#define NPY_FLOAT80 NPY_LONGDOUBLE +#define NPY_COMPLEX160 NPY_CLONGDOUBLE + typedef npy_longdouble npy_float80; + typedef npy_clongdouble npy_complex160; +# define PyFloat80ScalarObject PyLongDoubleScalarObject +# define PyComplex160ScalarObject PyCLongDoubleScalarObject +# define PyFloat80ArrType_Type PyLongDoubleArrType_Type +# define PyComplex160ArrType_Type PyCLongDoubleArrType_Type +#define NPY_FLOAT80_FMT NPY_LONGDOUBLE_FMT +#define NPY_COMPLEX160_FMT NPY_CLONGDOUBLE_FMT +#endif +#elif NPY_BITSOF_LONGDOUBLE == 96 +#ifndef NPY_FLOAT96 +#define NPY_FLOAT96 NPY_LONGDOUBLE +#define NPY_COMPLEX192 NPY_CLONGDOUBLE + typedef npy_longdouble npy_float96; + typedef npy_clongdouble npy_complex192; +# define PyFloat96ScalarObject PyLongDoubleScalarObject +# define PyComplex192ScalarObject PyCLongDoubleScalarObject +# define PyFloat96ArrType_Type PyLongDoubleArrType_Type +# define PyComplex192ArrType_Type PyCLongDoubleArrType_Type +#define NPY_FLOAT96_FMT NPY_LONGDOUBLE_FMT +#define NPY_COMPLEX192_FMT NPY_CLONGDOUBLE_FMT +#endif +#elif NPY_BITSOF_LONGDOUBLE == 128 +#ifndef NPY_FLOAT128 +#define NPY_FLOAT128 NPY_LONGDOUBLE +#define NPY_COMPLEX256 NPY_CLONGDOUBLE + typedef npy_longdouble npy_float128; + typedef npy_clongdouble npy_complex256; +# define PyFloat128ScalarObject PyLongDoubleScalarObject +# define PyComplex256ScalarObject PyCLongDoubleScalarObject +# define PyFloat128ArrType_Type PyLongDoubleArrType_Type +# define PyComplex256ArrType_Type PyCLongDoubleArrType_Type +#define NPY_FLOAT128_FMT NPY_LONGDOUBLE_FMT +#define NPY_COMPLEX256_FMT NPY_CLONGDOUBLE_FMT +#endif +#endif + +/* datetime typedefs */ +typedef npy_int64 npy_timedelta; +typedef npy_int64 npy_datetime; +#define NPY_DATETIME_FMT NPY_INT64_FMT +#define NPY_TIMEDELTA_FMT NPY_INT64_FMT + +/* End of typedefs for numarray style bit-width names */ + +#endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_COMMON_H_ */ diff --git a/local_packages/numpy/_core/include/numpy/npy_cpu.h b/local_packages/numpy/_core/include/numpy/npy_cpu.h new file mode 100644 index 0000000..d3a29da --- /dev/null +++ b/local_packages/numpy/_core/include/numpy/npy_cpu.h @@ -0,0 +1,126 @@ +/* + * This set (target) cpu specific macros: + * - Possible values: + * NPY_CPU_X86 + * NPY_CPU_AMD64 + * NPY_CPU_PPC + * NPY_CPU_PPC64 + * NPY_CPU_PPC64LE + * NPY_CPU_SPARC + * NPY_CPU_S390 + * NPY_CPU_IA64 + * NPY_CPU_HPPA + * NPY_CPU_ALPHA + * NPY_CPU_ARMEL + * NPY_CPU_ARMEB + * NPY_CPU_SH_LE + * NPY_CPU_SH_BE + * NPY_CPU_ARCEL + * NPY_CPU_ARCEB + * NPY_CPU_RISCV64 + * NPY_CPU_RISCV32 + * NPY_CPU_LOONGARCH + * NPY_CPU_SW_64 + * NPY_CPU_WASM + */ +#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_CPU_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_NPY_CPU_H_ + +#include "numpyconfig.h" + +#if defined( __i386__ ) || defined(i386) || defined(_M_IX86) + /* + * __i386__ is defined by gcc and Intel compiler on Linux, + * _M_IX86 by VS compiler, + * i386 by Sun compilers on opensolaris at least + */ + #define NPY_CPU_X86 +#elif defined(__x86_64__) || defined(__amd64__) || defined(__x86_64) || defined(_M_AMD64) + /* + * both __x86_64__ and __amd64__ are defined by gcc + * __x86_64 defined by sun compiler on opensolaris at least + * _M_AMD64 defined by MS compiler + */ + #define NPY_CPU_AMD64 +#elif defined(__powerpc64__) && defined(__LITTLE_ENDIAN__) + #define NPY_CPU_PPC64LE +#elif defined(__powerpc64__) && defined(__BIG_ENDIAN__) + #define NPY_CPU_PPC64 +#elif defined(__ppc__) || defined(__powerpc__) || defined(_ARCH_PPC) + /* + * __ppc__ is defined by gcc, I remember having seen __powerpc__ once, + * but can't find it ATM + * _ARCH_PPC is used by at least gcc on AIX + * As __powerpc__ and _ARCH_PPC are also defined by PPC64 check + * for those specifically first before defaulting to ppc + */ + #define NPY_CPU_PPC +#elif defined(__sparc__) || defined(__sparc) + /* __sparc__ is defined by gcc and Forte (e.g. Sun) compilers */ + #define NPY_CPU_SPARC +#elif defined(__s390__) + #define NPY_CPU_S390 +#elif defined(__ia64) + #define NPY_CPU_IA64 +#elif defined(__hppa) + #define NPY_CPU_HPPA +#elif defined(__alpha__) + #define NPY_CPU_ALPHA +#elif defined(__arm__) || defined(__aarch64__) || defined(_M_ARM64) + /* _M_ARM64 is defined in MSVC for ARM64 compilation on Windows */ + #if defined(__ARMEB__) || defined(__AARCH64EB__) + #if defined(__ARM_32BIT_STATE) + #define NPY_CPU_ARMEB_AARCH32 + #elif defined(__ARM_64BIT_STATE) + #define NPY_CPU_ARMEB_AARCH64 + #else + #define NPY_CPU_ARMEB + #endif + #elif defined(__ARMEL__) || defined(__AARCH64EL__) || defined(_M_ARM64) + #if defined(__ARM_32BIT_STATE) + #define NPY_CPU_ARMEL_AARCH32 + #elif defined(__ARM_64BIT_STATE) || defined(_M_ARM64) || defined(__AARCH64EL__) + #define NPY_CPU_ARMEL_AARCH64 + #else + #define NPY_CPU_ARMEL + #endif + #else + # error Unknown ARM CPU, please report this to numpy maintainers with \ + information about your platform (OS, CPU and compiler) + #endif +#elif defined(__sh__) && defined(__LITTLE_ENDIAN__) + #define NPY_CPU_SH_LE +#elif defined(__sh__) && defined(__BIG_ENDIAN__) + #define NPY_CPU_SH_BE +#elif defined(__MIPSEL__) + #define NPY_CPU_MIPSEL +#elif defined(__MIPSEB__) + #define NPY_CPU_MIPSEB +#elif defined(__or1k__) + #define NPY_CPU_OR1K +#elif defined(__mc68000__) + #define NPY_CPU_M68K +#elif defined(__arc__) && defined(__LITTLE_ENDIAN__) + #define NPY_CPU_ARCEL +#elif defined(__arc__) && defined(__BIG_ENDIAN__) + #define NPY_CPU_ARCEB +#elif defined(__riscv) + #if __riscv_xlen == 64 + #define NPY_CPU_RISCV64 + #elif __riscv_xlen == 32 + #define NPY_CPU_RISCV32 + #endif +#elif defined(__loongarch_lp64) + #define NPY_CPU_LOONGARCH64 +#elif defined(__sw_64__) + #define NPY_CPU_SW_64 +#elif defined(__EMSCRIPTEN__) || defined(__wasm__) + /* __EMSCRIPTEN__ is defined by emscripten: an LLVM-to-Web compiler */ + /* __wasm__ is defined by clang when targeting wasm */ + #define NPY_CPU_WASM +#else + #error Unknown CPU, please report this to numpy maintainers with \ + information about your platform (OS, CPU and compiler) +#endif + +#endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_CPU_H_ */ diff --git a/local_packages/numpy/_core/include/numpy/npy_endian.h b/local_packages/numpy/_core/include/numpy/npy_endian.h new file mode 100644 index 0000000..ecb4b00 --- /dev/null +++ b/local_packages/numpy/_core/include/numpy/npy_endian.h @@ -0,0 +1,79 @@ +#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_ENDIAN_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_NPY_ENDIAN_H_ + +/* + * NPY_BYTE_ORDER is set to the same value as BYTE_ORDER set by glibc in + * endian.h + */ + +#if defined(NPY_HAVE_ENDIAN_H) || defined(NPY_HAVE_SYS_ENDIAN_H) + /* Use endian.h if available */ + + #if defined(NPY_HAVE_ENDIAN_H) + #include + #elif defined(NPY_HAVE_SYS_ENDIAN_H) + #include + #endif + + #if defined(BYTE_ORDER) && defined(BIG_ENDIAN) && defined(LITTLE_ENDIAN) + #define NPY_BYTE_ORDER BYTE_ORDER + #define NPY_LITTLE_ENDIAN LITTLE_ENDIAN + #define NPY_BIG_ENDIAN BIG_ENDIAN + #elif defined(_BYTE_ORDER) && defined(_BIG_ENDIAN) && defined(_LITTLE_ENDIAN) + #define NPY_BYTE_ORDER _BYTE_ORDER + #define NPY_LITTLE_ENDIAN _LITTLE_ENDIAN + #define NPY_BIG_ENDIAN _BIG_ENDIAN + #elif defined(__BYTE_ORDER) && defined(__BIG_ENDIAN) && defined(__LITTLE_ENDIAN) + #define NPY_BYTE_ORDER __BYTE_ORDER + #define NPY_LITTLE_ENDIAN __LITTLE_ENDIAN + #define NPY_BIG_ENDIAN __BIG_ENDIAN + #endif +#endif + +#ifndef NPY_BYTE_ORDER + /* Set endianness info using target CPU */ + #include "npy_cpu.h" + + #define NPY_LITTLE_ENDIAN 1234 + #define NPY_BIG_ENDIAN 4321 + + #if defined(NPY_CPU_X86) \ + || defined(NPY_CPU_AMD64) \ + || defined(NPY_CPU_IA64) \ + || defined(NPY_CPU_ALPHA) \ + || defined(NPY_CPU_ARMEL) \ + || defined(NPY_CPU_ARMEL_AARCH32) \ + || defined(NPY_CPU_ARMEL_AARCH64) \ + || defined(NPY_CPU_SH_LE) \ + || defined(NPY_CPU_MIPSEL) \ + || defined(NPY_CPU_PPC64LE) \ + || defined(NPY_CPU_ARCEL) \ + || defined(NPY_CPU_RISCV64) \ + || defined(NPY_CPU_RISCV32) \ + || defined(NPY_CPU_LOONGARCH) \ + || defined(NPY_CPU_SW_64) \ + || defined(NPY_CPU_WASM) + #define NPY_BYTE_ORDER NPY_LITTLE_ENDIAN + + #elif defined(NPY_CPU_PPC) \ + || defined(NPY_CPU_SPARC) \ + || defined(NPY_CPU_S390) \ + || defined(NPY_CPU_HPPA) \ + || defined(NPY_CPU_PPC64) \ + || defined(NPY_CPU_ARMEB) \ + || defined(NPY_CPU_ARMEB_AARCH32) \ + || defined(NPY_CPU_ARMEB_AARCH64) \ + || defined(NPY_CPU_SH_BE) \ + || defined(NPY_CPU_MIPSEB) \ + || defined(NPY_CPU_OR1K) \ + || defined(NPY_CPU_M68K) \ + || defined(NPY_CPU_ARCEB) + #define NPY_BYTE_ORDER NPY_BIG_ENDIAN + + #else + #error Unknown CPU: can not set endianness + #endif + +#endif + +#endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_ENDIAN_H_ */ diff --git a/local_packages/numpy/_core/include/numpy/npy_math.h b/local_packages/numpy/_core/include/numpy/npy_math.h new file mode 100644 index 0000000..abc784b --- /dev/null +++ b/local_packages/numpy/_core/include/numpy/npy_math.h @@ -0,0 +1,602 @@ +#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_MATH_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_NPY_MATH_H_ + +#include + +#include + +/* By adding static inline specifiers to npy_math function definitions when + appropriate, compiler is given the opportunity to optimize */ +#if NPY_INLINE_MATH +#define NPY_INPLACE static inline +#else +#define NPY_INPLACE +#endif + + +#ifdef __cplusplus +extern "C" { +#endif + +#define PyArray_MAX(a,b) (((a)>(b))?(a):(b)) +#define PyArray_MIN(a,b) (((a)<(b))?(a):(b)) + +/* + * NAN and INFINITY like macros (same behavior as glibc for NAN, same as C99 + * for INFINITY) + * + * XXX: I should test whether INFINITY and NAN are available on the platform + */ +static inline float __npy_inff(void) +{ + const union { npy_uint32 __i; float __f;} __bint = {0x7f800000UL}; + return __bint.__f; +} + +static inline float __npy_nanf(void) +{ + const union { npy_uint32 __i; float __f;} __bint = {0x7fc00000UL}; + return __bint.__f; +} + +static inline float __npy_pzerof(void) +{ + const union { npy_uint32 __i; float __f;} __bint = {0x00000000UL}; + return __bint.__f; +} + +static inline float __npy_nzerof(void) +{ + const union { npy_uint32 __i; float __f;} __bint = {0x80000000UL}; + return __bint.__f; +} + +#define NPY_INFINITYF __npy_inff() +#define NPY_NANF __npy_nanf() +#define NPY_PZEROF __npy_pzerof() +#define NPY_NZEROF __npy_nzerof() + +#define NPY_INFINITY ((npy_double)NPY_INFINITYF) +#define NPY_NAN ((npy_double)NPY_NANF) +#define NPY_PZERO ((npy_double)NPY_PZEROF) +#define NPY_NZERO ((npy_double)NPY_NZEROF) + +#define NPY_INFINITYL ((npy_longdouble)NPY_INFINITYF) +#define NPY_NANL ((npy_longdouble)NPY_NANF) +#define NPY_PZEROL ((npy_longdouble)NPY_PZEROF) +#define NPY_NZEROL ((npy_longdouble)NPY_NZEROF) + +/* + * Useful constants + */ +#define NPY_E 2.718281828459045235360287471352662498 /* e */ +#define NPY_LOG2E 1.442695040888963407359924681001892137 /* log_2 e */ +#define NPY_LOG10E 0.434294481903251827651128918916605082 /* log_10 e */ +#define NPY_LOGE2 0.693147180559945309417232121458176568 /* log_e 2 */ +#define NPY_LOGE10 2.302585092994045684017991454684364208 /* log_e 10 */ +#define NPY_PI 3.141592653589793238462643383279502884 /* pi */ +#define NPY_PI_2 1.570796326794896619231321691639751442 /* pi/2 */ +#define NPY_PI_4 0.785398163397448309615660845819875721 /* pi/4 */ +#define NPY_1_PI 0.318309886183790671537767526745028724 /* 1/pi */ +#define NPY_2_PI 0.636619772367581343075535053490057448 /* 2/pi */ +#define NPY_EULER 0.577215664901532860606512090082402431 /* Euler constant */ +#define NPY_SQRT2 1.414213562373095048801688724209698079 /* sqrt(2) */ +#define NPY_SQRT1_2 0.707106781186547524400844362104849039 /* 1/sqrt(2) */ + +#define NPY_Ef 2.718281828459045235360287471352662498F /* e */ +#define NPY_LOG2Ef 1.442695040888963407359924681001892137F /* log_2 e */ +#define NPY_LOG10Ef 0.434294481903251827651128918916605082F /* log_10 e */ +#define NPY_LOGE2f 0.693147180559945309417232121458176568F /* log_e 2 */ +#define NPY_LOGE10f 2.302585092994045684017991454684364208F /* log_e 10 */ +#define NPY_PIf 3.141592653589793238462643383279502884F /* pi */ +#define NPY_PI_2f 1.570796326794896619231321691639751442F /* pi/2 */ +#define NPY_PI_4f 0.785398163397448309615660845819875721F /* pi/4 */ +#define NPY_1_PIf 0.318309886183790671537767526745028724F /* 1/pi */ +#define NPY_2_PIf 0.636619772367581343075535053490057448F /* 2/pi */ +#define NPY_EULERf 0.577215664901532860606512090082402431F /* Euler constant */ +#define NPY_SQRT2f 1.414213562373095048801688724209698079F /* sqrt(2) */ +#define NPY_SQRT1_2f 0.707106781186547524400844362104849039F /* 1/sqrt(2) */ + +#define NPY_El 2.718281828459045235360287471352662498L /* e */ +#define NPY_LOG2El 1.442695040888963407359924681001892137L /* log_2 e */ +#define NPY_LOG10El 0.434294481903251827651128918916605082L /* log_10 e */ +#define NPY_LOGE2l 0.693147180559945309417232121458176568L /* log_e 2 */ +#define NPY_LOGE10l 2.302585092994045684017991454684364208L /* log_e 10 */ +#define NPY_PIl 3.141592653589793238462643383279502884L /* pi */ +#define NPY_PI_2l 1.570796326794896619231321691639751442L /* pi/2 */ +#define NPY_PI_4l 0.785398163397448309615660845819875721L /* pi/4 */ +#define NPY_1_PIl 0.318309886183790671537767526745028724L /* 1/pi */ +#define NPY_2_PIl 0.636619772367581343075535053490057448L /* 2/pi */ +#define NPY_EULERl 0.577215664901532860606512090082402431L /* Euler constant */ +#define NPY_SQRT2l 1.414213562373095048801688724209698079L /* sqrt(2) */ +#define NPY_SQRT1_2l 0.707106781186547524400844362104849039L /* 1/sqrt(2) */ + +/* + * Integer functions. + */ +NPY_INPLACE npy_uint npy_gcdu(npy_uint a, npy_uint b); +NPY_INPLACE npy_uint npy_lcmu(npy_uint a, npy_uint b); +NPY_INPLACE npy_ulong npy_gcdul(npy_ulong a, npy_ulong b); +NPY_INPLACE npy_ulong npy_lcmul(npy_ulong a, npy_ulong b); +NPY_INPLACE npy_ulonglong npy_gcdull(npy_ulonglong a, npy_ulonglong b); +NPY_INPLACE npy_ulonglong npy_lcmull(npy_ulonglong a, npy_ulonglong b); + +NPY_INPLACE npy_int npy_gcd(npy_int a, npy_int b); +NPY_INPLACE npy_int npy_lcm(npy_int a, npy_int b); +NPY_INPLACE npy_long npy_gcdl(npy_long a, npy_long b); +NPY_INPLACE npy_long npy_lcml(npy_long a, npy_long b); +NPY_INPLACE npy_longlong npy_gcdll(npy_longlong a, npy_longlong b); +NPY_INPLACE npy_longlong npy_lcmll(npy_longlong a, npy_longlong b); + +NPY_INPLACE npy_ubyte npy_rshiftuhh(npy_ubyte a, npy_ubyte b); +NPY_INPLACE npy_ubyte npy_lshiftuhh(npy_ubyte a, npy_ubyte b); +NPY_INPLACE npy_ushort npy_rshiftuh(npy_ushort a, npy_ushort b); +NPY_INPLACE npy_ushort npy_lshiftuh(npy_ushort a, npy_ushort b); +NPY_INPLACE npy_uint npy_rshiftu(npy_uint a, npy_uint b); +NPY_INPLACE npy_uint npy_lshiftu(npy_uint a, npy_uint b); +NPY_INPLACE npy_ulong npy_rshiftul(npy_ulong a, npy_ulong b); +NPY_INPLACE npy_ulong npy_lshiftul(npy_ulong a, npy_ulong b); +NPY_INPLACE npy_ulonglong npy_rshiftull(npy_ulonglong a, npy_ulonglong b); +NPY_INPLACE npy_ulonglong npy_lshiftull(npy_ulonglong a, npy_ulonglong b); + +NPY_INPLACE npy_byte npy_rshifthh(npy_byte a, npy_byte b); +NPY_INPLACE npy_byte npy_lshifthh(npy_byte a, npy_byte b); +NPY_INPLACE npy_short npy_rshifth(npy_short a, npy_short b); +NPY_INPLACE npy_short npy_lshifth(npy_short a, npy_short b); +NPY_INPLACE npy_int npy_rshift(npy_int a, npy_int b); +NPY_INPLACE npy_int npy_lshift(npy_int a, npy_int b); +NPY_INPLACE npy_long npy_rshiftl(npy_long a, npy_long b); +NPY_INPLACE npy_long npy_lshiftl(npy_long a, npy_long b); +NPY_INPLACE npy_longlong npy_rshiftll(npy_longlong a, npy_longlong b); +NPY_INPLACE npy_longlong npy_lshiftll(npy_longlong a, npy_longlong b); + +NPY_INPLACE uint8_t npy_popcountuhh(npy_ubyte a); +NPY_INPLACE uint8_t npy_popcountuh(npy_ushort a); +NPY_INPLACE uint8_t npy_popcountu(npy_uint a); +NPY_INPLACE uint8_t npy_popcountul(npy_ulong a); +NPY_INPLACE uint8_t npy_popcountull(npy_ulonglong a); +NPY_INPLACE uint8_t npy_popcounthh(npy_byte a); +NPY_INPLACE uint8_t npy_popcounth(npy_short a); +NPY_INPLACE uint8_t npy_popcount(npy_int a); +NPY_INPLACE uint8_t npy_popcountl(npy_long a); +NPY_INPLACE uint8_t npy_popcountll(npy_longlong a); + +/* + * C99 double math funcs that need fixups or are blocklist-able + */ +NPY_INPLACE double npy_sin(double x); +NPY_INPLACE double npy_cos(double x); +NPY_INPLACE double npy_tan(double x); +NPY_INPLACE double npy_hypot(double x, double y); +NPY_INPLACE double npy_log2(double x); +NPY_INPLACE double npy_atan2(double x, double y); + +/* Mandatory C99 double math funcs, no blocklisting or fixups */ +/* defined for legacy reasons, should be deprecated at some point */ +#define npy_sinh sinh +#define npy_cosh cosh +#define npy_tanh tanh +#define npy_asin asin +#define npy_acos acos +#define npy_atan atan +#define npy_log log +#define npy_log10 log10 +#define npy_cbrt cbrt +#define npy_fabs fabs +#define npy_ceil ceil +#define npy_fmod fmod +#define npy_floor floor +#define npy_expm1 expm1 +#define npy_log1p log1p +#define npy_acosh acosh +#define npy_asinh asinh +#define npy_atanh atanh +#define npy_rint rint +#define npy_trunc trunc +#define npy_exp2 exp2 +#define npy_frexp frexp +#define npy_ldexp ldexp +#define npy_copysign copysign +#define npy_exp exp +#define npy_sqrt sqrt +#define npy_pow pow +#define npy_modf modf +#define npy_nextafter nextafter + +double npy_spacing(double x); + +/* + * IEEE 754 fpu handling + */ + +/* use builtins to avoid function calls in tight loops + * only available if npy_config.h is available (= numpys own build) */ +#ifdef HAVE___BUILTIN_ISNAN + #define npy_isnan(x) __builtin_isnan(x) +#else + #define npy_isnan(x) isnan(x) +#endif + + +/* only available if npy_config.h is available (= numpys own build) */ +#ifdef HAVE___BUILTIN_ISFINITE + #define npy_isfinite(x) __builtin_isfinite(x) +#else + #define npy_isfinite(x) isfinite((x)) +#endif + +/* only available if npy_config.h is available (= numpys own build) */ +#ifdef HAVE___BUILTIN_ISINF + #define npy_isinf(x) __builtin_isinf(x) +#else + #define npy_isinf(x) isinf((x)) +#endif + +#define npy_signbit(x) signbit((x)) + +/* + * float C99 math funcs that need fixups or are blocklist-able + */ +NPY_INPLACE float npy_sinf(float x); +NPY_INPLACE float npy_cosf(float x); +NPY_INPLACE float npy_tanf(float x); +NPY_INPLACE float npy_expf(float x); +NPY_INPLACE float npy_sqrtf(float x); +NPY_INPLACE float npy_hypotf(float x, float y); +NPY_INPLACE float npy_log2f(float x); +NPY_INPLACE float npy_atan2f(float x, float y); +NPY_INPLACE float npy_powf(float x, float y); +NPY_INPLACE float npy_modff(float x, float* y); + +/* Mandatory C99 float math funcs, no blocklisting or fixups */ +/* defined for legacy reasons, should be deprecated at some point */ + +#define npy_sinhf sinhf +#define npy_coshf coshf +#define npy_tanhf tanhf +#define npy_asinf asinf +#define npy_acosf acosf +#define npy_atanf atanf +#define npy_logf logf +#define npy_log10f log10f +#define npy_cbrtf cbrtf +#define npy_fabsf fabsf +#define npy_ceilf ceilf +#define npy_fmodf fmodf +#define npy_floorf floorf +#define npy_expm1f expm1f +#define npy_log1pf log1pf +#define npy_asinhf asinhf +#define npy_acoshf acoshf +#define npy_atanhf atanhf +#define npy_rintf rintf +#define npy_truncf truncf +#define npy_exp2f exp2f +#define npy_frexpf frexpf +#define npy_ldexpf ldexpf +#define npy_copysignf copysignf +#define npy_nextafterf nextafterf + +float npy_spacingf(float x); + +/* + * long double C99 double math funcs that need fixups or are blocklist-able + */ +NPY_INPLACE npy_longdouble npy_sinl(npy_longdouble x); +NPY_INPLACE npy_longdouble npy_cosl(npy_longdouble x); +NPY_INPLACE npy_longdouble npy_tanl(npy_longdouble x); +NPY_INPLACE npy_longdouble npy_expl(npy_longdouble x); +NPY_INPLACE npy_longdouble npy_sqrtl(npy_longdouble x); +NPY_INPLACE npy_longdouble npy_hypotl(npy_longdouble x, npy_longdouble y); +NPY_INPLACE npy_longdouble npy_log2l(npy_longdouble x); +NPY_INPLACE npy_longdouble npy_atan2l(npy_longdouble x, npy_longdouble y); +NPY_INPLACE npy_longdouble npy_powl(npy_longdouble x, npy_longdouble y); +NPY_INPLACE npy_longdouble npy_modfl(npy_longdouble x, npy_longdouble* y); + +/* Mandatory C99 double math funcs, no blocklisting or fixups */ +/* defined for legacy reasons, should be deprecated at some point */ +#define npy_sinhl sinhl +#define npy_coshl coshl +#define npy_tanhl tanhl +#define npy_fabsl fabsl +#define npy_floorl floorl +#define npy_ceill ceill +#define npy_rintl rintl +#define npy_truncl truncl +#define npy_cbrtl cbrtl +#define npy_log10l log10l +#define npy_logl logl +#define npy_expm1l expm1l +#define npy_asinl asinl +#define npy_acosl acosl +#define npy_atanl atanl +#define npy_asinhl asinhl +#define npy_acoshl acoshl +#define npy_atanhl atanhl +#define npy_log1pl log1pl +#define npy_exp2l exp2l +#define npy_fmodl fmodl +#define npy_frexpl frexpl +#define npy_ldexpl ldexpl +#define npy_copysignl copysignl +#define npy_nextafterl nextafterl + +npy_longdouble npy_spacingl(npy_longdouble x); + +/* + * Non standard functions + */ +NPY_INPLACE double npy_deg2rad(double x); +NPY_INPLACE double npy_rad2deg(double x); +NPY_INPLACE double npy_logaddexp(double x, double y); +NPY_INPLACE double npy_logaddexp2(double x, double y); +NPY_INPLACE double npy_divmod(double x, double y, double *modulus); +NPY_INPLACE double npy_heaviside(double x, double h0); + +NPY_INPLACE float npy_deg2radf(float x); +NPY_INPLACE float npy_rad2degf(float x); +NPY_INPLACE float npy_logaddexpf(float x, float y); +NPY_INPLACE float npy_logaddexp2f(float x, float y); +NPY_INPLACE float npy_divmodf(float x, float y, float *modulus); +NPY_INPLACE float npy_heavisidef(float x, float h0); + +NPY_INPLACE npy_longdouble npy_deg2radl(npy_longdouble x); +NPY_INPLACE npy_longdouble npy_rad2degl(npy_longdouble x); +NPY_INPLACE npy_longdouble npy_logaddexpl(npy_longdouble x, npy_longdouble y); +NPY_INPLACE npy_longdouble npy_logaddexp2l(npy_longdouble x, npy_longdouble y); +NPY_INPLACE npy_longdouble npy_divmodl(npy_longdouble x, npy_longdouble y, + npy_longdouble *modulus); +NPY_INPLACE npy_longdouble npy_heavisidel(npy_longdouble x, npy_longdouble h0); + +#define npy_degrees npy_rad2deg +#define npy_degreesf npy_rad2degf +#define npy_degreesl npy_rad2degl + +#define npy_radians npy_deg2rad +#define npy_radiansf npy_deg2radf +#define npy_radiansl npy_deg2radl + +/* + * Complex declarations + */ + +static inline double npy_creal(const npy_cdouble z) +{ +#if defined(__cplusplus) + return z._Val[0]; +#else + return creal(z); +#endif +} + +static inline void npy_csetreal(npy_cdouble *z, const double r) +{ + ((double *) z)[0] = r; +} + +static inline double npy_cimag(const npy_cdouble z) +{ +#if defined(__cplusplus) + return z._Val[1]; +#else + return cimag(z); +#endif +} + +static inline void npy_csetimag(npy_cdouble *z, const double i) +{ + ((double *) z)[1] = i; +} + +static inline float npy_crealf(const npy_cfloat z) +{ +#if defined(__cplusplus) + return z._Val[0]; +#else + return crealf(z); +#endif +} + +static inline void npy_csetrealf(npy_cfloat *z, const float r) +{ + ((float *) z)[0] = r; +} + +static inline float npy_cimagf(const npy_cfloat z) +{ +#if defined(__cplusplus) + return z._Val[1]; +#else + return cimagf(z); +#endif +} + +static inline void npy_csetimagf(npy_cfloat *z, const float i) +{ + ((float *) z)[1] = i; +} + +static inline npy_longdouble npy_creall(const npy_clongdouble z) +{ +#if defined(__cplusplus) + return (npy_longdouble)z._Val[0]; +#else + return creall(z); +#endif +} + +static inline void npy_csetreall(npy_clongdouble *z, const longdouble_t r) +{ + ((longdouble_t *) z)[0] = r; +} + +static inline npy_longdouble npy_cimagl(const npy_clongdouble z) +{ +#if defined(__cplusplus) + return (npy_longdouble)z._Val[1]; +#else + return cimagl(z); +#endif +} + +static inline void npy_csetimagl(npy_clongdouble *z, const longdouble_t i) +{ + ((longdouble_t *) z)[1] = i; +} + +#define NPY_CSETREAL(z, r) npy_csetreal(z, r) +#define NPY_CSETIMAG(z, i) npy_csetimag(z, i) +#define NPY_CSETREALF(z, r) npy_csetrealf(z, r) +#define NPY_CSETIMAGF(z, i) npy_csetimagf(z, i) +#define NPY_CSETREALL(z, r) npy_csetreall(z, r) +#define NPY_CSETIMAGL(z, i) npy_csetimagl(z, i) + +static inline npy_cdouble npy_cpack(double x, double y) +{ + npy_cdouble z; + npy_csetreal(&z, x); + npy_csetimag(&z, y); + return z; +} + +static inline npy_cfloat npy_cpackf(float x, float y) +{ + npy_cfloat z; + npy_csetrealf(&z, x); + npy_csetimagf(&z, y); + return z; +} + +static inline npy_clongdouble npy_cpackl(npy_longdouble x, npy_longdouble y) +{ + npy_clongdouble z; + npy_csetreall(&z, x); + npy_csetimagl(&z, y); + return z; +} + +/* + * Double precision complex functions + */ +double npy_cabs(npy_cdouble z); +double npy_carg(npy_cdouble z); + +npy_cdouble npy_cexp(npy_cdouble z); +npy_cdouble npy_clog(npy_cdouble z); +npy_cdouble npy_cpow(npy_cdouble x, npy_cdouble y); + +npy_cdouble npy_csqrt(npy_cdouble z); + +npy_cdouble npy_ccos(npy_cdouble z); +npy_cdouble npy_csin(npy_cdouble z); +npy_cdouble npy_ctan(npy_cdouble z); + +npy_cdouble npy_ccosh(npy_cdouble z); +npy_cdouble npy_csinh(npy_cdouble z); +npy_cdouble npy_ctanh(npy_cdouble z); + +npy_cdouble npy_cacos(npy_cdouble z); +npy_cdouble npy_casin(npy_cdouble z); +npy_cdouble npy_catan(npy_cdouble z); + +npy_cdouble npy_cacosh(npy_cdouble z); +npy_cdouble npy_casinh(npy_cdouble z); +npy_cdouble npy_catanh(npy_cdouble z); + +/* + * Single precision complex functions + */ +float npy_cabsf(npy_cfloat z); +float npy_cargf(npy_cfloat z); + +npy_cfloat npy_cexpf(npy_cfloat z); +npy_cfloat npy_clogf(npy_cfloat z); +npy_cfloat npy_cpowf(npy_cfloat x, npy_cfloat y); + +npy_cfloat npy_csqrtf(npy_cfloat z); + +npy_cfloat npy_ccosf(npy_cfloat z); +npy_cfloat npy_csinf(npy_cfloat z); +npy_cfloat npy_ctanf(npy_cfloat z); + +npy_cfloat npy_ccoshf(npy_cfloat z); +npy_cfloat npy_csinhf(npy_cfloat z); +npy_cfloat npy_ctanhf(npy_cfloat z); + +npy_cfloat npy_cacosf(npy_cfloat z); +npy_cfloat npy_casinf(npy_cfloat z); +npy_cfloat npy_catanf(npy_cfloat z); + +npy_cfloat npy_cacoshf(npy_cfloat z); +npy_cfloat npy_casinhf(npy_cfloat z); +npy_cfloat npy_catanhf(npy_cfloat z); + + +/* + * Extended precision complex functions + */ +npy_longdouble npy_cabsl(npy_clongdouble z); +npy_longdouble npy_cargl(npy_clongdouble z); + +npy_clongdouble npy_cexpl(npy_clongdouble z); +npy_clongdouble npy_clogl(npy_clongdouble z); +npy_clongdouble npy_cpowl(npy_clongdouble x, npy_clongdouble y); + +npy_clongdouble npy_csqrtl(npy_clongdouble z); + +npy_clongdouble npy_ccosl(npy_clongdouble z); +npy_clongdouble npy_csinl(npy_clongdouble z); +npy_clongdouble npy_ctanl(npy_clongdouble z); + +npy_clongdouble npy_ccoshl(npy_clongdouble z); +npy_clongdouble npy_csinhl(npy_clongdouble z); +npy_clongdouble npy_ctanhl(npy_clongdouble z); + +npy_clongdouble npy_cacosl(npy_clongdouble z); +npy_clongdouble npy_casinl(npy_clongdouble z); +npy_clongdouble npy_catanl(npy_clongdouble z); + +npy_clongdouble npy_cacoshl(npy_clongdouble z); +npy_clongdouble npy_casinhl(npy_clongdouble z); +npy_clongdouble npy_catanhl(npy_clongdouble z); + + +/* + * Functions that set the floating point error + * status word. + */ + +/* + * platform-dependent code translates floating point + * status to an integer sum of these values + */ +#define NPY_FPE_DIVIDEBYZERO 1 +#define NPY_FPE_OVERFLOW 2 +#define NPY_FPE_UNDERFLOW 4 +#define NPY_FPE_INVALID 8 + +int npy_clear_floatstatus_barrier(char*); +int npy_get_floatstatus_barrier(char*); +/* + * use caution with these - clang and gcc8.1 are known to reorder calls + * to this form of the function which can defeat the check. The _barrier + * form of the call is preferable, where the argument is + * (char*)&local_variable + */ +int npy_clear_floatstatus(void); +int npy_get_floatstatus(void); + +void npy_set_floatstatus_divbyzero(void); +void npy_set_floatstatus_overflow(void); +void npy_set_floatstatus_underflow(void); +void npy_set_floatstatus_invalid(void); + +#ifdef __cplusplus +} +#endif + +#if NPY_INLINE_MATH +#include "npy_math_internal.h" +#endif + +#endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_MATH_H_ */ diff --git a/local_packages/numpy/_core/include/numpy/npy_no_deprecated_api.h b/local_packages/numpy/_core/include/numpy/npy_no_deprecated_api.h new file mode 100644 index 0000000..39658c0 --- /dev/null +++ b/local_packages/numpy/_core/include/numpy/npy_no_deprecated_api.h @@ -0,0 +1,20 @@ +/* + * This include file is provided for inclusion in Cython *.pyd files where + * one would like to define the NPY_NO_DEPRECATED_API macro. It can be + * included by + * + * cdef extern from "npy_no_deprecated_api.h": pass + * + */ +#ifndef NPY_NO_DEPRECATED_API + +/* put this check here since there may be multiple includes in C extensions. */ +#if defined(NUMPY_CORE_INCLUDE_NUMPY_NDARRAYTYPES_H_) || \ + defined(NUMPY_CORE_INCLUDE_NUMPY_NPY_DEPRECATED_API_H) || \ + defined(NUMPY_CORE_INCLUDE_NUMPY_OLD_DEFINES_H_) +#error "npy_no_deprecated_api.h" must be first among numpy includes. +#else +#define NPY_NO_DEPRECATED_API NPY_API_VERSION +#endif + +#endif /* NPY_NO_DEPRECATED_API */ diff --git a/local_packages/numpy/_core/include/numpy/npy_os.h b/local_packages/numpy/_core/include/numpy/npy_os.h new file mode 100644 index 0000000..0ce5d78 --- /dev/null +++ b/local_packages/numpy/_core/include/numpy/npy_os.h @@ -0,0 +1,42 @@ +#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_OS_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_NPY_OS_H_ + +#if defined(linux) || defined(__linux) || defined(__linux__) + #define NPY_OS_LINUX +#elif defined(__FreeBSD__) || defined(__NetBSD__) || \ + defined(__OpenBSD__) || defined(__DragonFly__) + #define NPY_OS_BSD + #ifdef __FreeBSD__ + #define NPY_OS_FREEBSD + #elif defined(__NetBSD__) + #define NPY_OS_NETBSD + #elif defined(__OpenBSD__) + #define NPY_OS_OPENBSD + #elif defined(__DragonFly__) + #define NPY_OS_DRAGONFLY + #endif +#elif defined(sun) || defined(__sun) + #define NPY_OS_SOLARIS +#elif defined(__CYGWIN__) + #define NPY_OS_CYGWIN +/* We are on Windows.*/ +#elif defined(_WIN32) + /* We are using MinGW (64-bit or 32-bit)*/ + #if defined(__MINGW32__) || defined(__MINGW64__) + #define NPY_OS_MINGW + /* Otherwise, if _WIN64 is defined, we are targeting 64-bit Windows*/ + #elif defined(_WIN64) + #define NPY_OS_WIN64 + /* Otherwise assume we are targeting 32-bit Windows*/ + #else + #define NPY_OS_WIN32 + #endif +#elif defined(__APPLE__) + #define NPY_OS_DARWIN +#elif defined(__HAIKU__) + #define NPY_OS_HAIKU +#else + #define NPY_OS_UNKNOWN +#endif + +#endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_OS_H_ */ diff --git a/local_packages/numpy/_core/include/numpy/numpyconfig.h b/local_packages/numpy/_core/include/numpy/numpyconfig.h new file mode 100644 index 0000000..c129a3a --- /dev/null +++ b/local_packages/numpy/_core/include/numpy/numpyconfig.h @@ -0,0 +1,185 @@ +#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_NUMPYCONFIG_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_NPY_NUMPYCONFIG_H_ + +#include "_numpyconfig.h" + +/* + * On Mac OS X, because there is only one configuration stage for all the archs + * in universal builds, any macro which depends on the arch needs to be + * hardcoded. + * + * Note that distutils/pip will attempt a universal2 build when Python itself + * is built as universal2, hence this hardcoding is needed even if we do not + * support universal2 wheels anymore (see gh-22796). + * This code block can be removed after we have dropped the setup.py based + * build completely. + */ +#ifdef __APPLE__ + #undef NPY_SIZEOF_LONG + + #ifdef __LP64__ + #define NPY_SIZEOF_LONG 8 + #else + #define NPY_SIZEOF_LONG 4 + #endif + + #undef NPY_SIZEOF_LONGDOUBLE + #undef NPY_SIZEOF_COMPLEX_LONGDOUBLE + #ifdef HAVE_LDOUBLE_IEEE_DOUBLE_LE + #undef HAVE_LDOUBLE_IEEE_DOUBLE_LE + #endif + #ifdef HAVE_LDOUBLE_INTEL_EXTENDED_16_BYTES_LE + #undef HAVE_LDOUBLE_INTEL_EXTENDED_16_BYTES_LE + #endif + + #if defined(__arm64__) + #define NPY_SIZEOF_LONGDOUBLE 8 + #define NPY_SIZEOF_COMPLEX_LONGDOUBLE 16 + #define HAVE_LDOUBLE_IEEE_DOUBLE_LE 1 + #elif defined(__x86_64) + #define NPY_SIZEOF_LONGDOUBLE 16 + #define NPY_SIZEOF_COMPLEX_LONGDOUBLE 32 + #define HAVE_LDOUBLE_INTEL_EXTENDED_16_BYTES_LE 1 + #elif defined (__i386) + #define NPY_SIZEOF_LONGDOUBLE 12 + #define NPY_SIZEOF_COMPLEX_LONGDOUBLE 24 + #elif defined(__ppc__) || defined (__ppc64__) + #define NPY_SIZEOF_LONGDOUBLE 16 + #define NPY_SIZEOF_COMPLEX_LONGDOUBLE 32 + #else + #error "unknown architecture" + #endif +#endif + + +/** + * To help with both NPY_TARGET_VERSION and the NPY_NO_DEPRECATED_API macro, + * we include API version numbers for specific versions of NumPy. + * To exclude all API that was deprecated as of 1.7, add the following before + * #including any NumPy headers: + * #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION + * The same is true for NPY_TARGET_VERSION, although NumPy will default to + * a backwards compatible build anyway. + */ +#define NPY_1_7_API_VERSION 0x00000007 +#define NPY_1_8_API_VERSION 0x00000008 +#define NPY_1_9_API_VERSION 0x00000009 +#define NPY_1_10_API_VERSION 0x0000000a +#define NPY_1_11_API_VERSION 0x0000000a +#define NPY_1_12_API_VERSION 0x0000000a +#define NPY_1_13_API_VERSION 0x0000000b +#define NPY_1_14_API_VERSION 0x0000000c +#define NPY_1_15_API_VERSION 0x0000000c +#define NPY_1_16_API_VERSION 0x0000000d +#define NPY_1_17_API_VERSION 0x0000000d +#define NPY_1_18_API_VERSION 0x0000000d +#define NPY_1_19_API_VERSION 0x0000000d +#define NPY_1_20_API_VERSION 0x0000000e +#define NPY_1_21_API_VERSION 0x0000000e +#define NPY_1_22_API_VERSION 0x0000000f +#define NPY_1_23_API_VERSION 0x00000010 +#define NPY_1_24_API_VERSION 0x00000010 +#define NPY_1_25_API_VERSION 0x00000011 +#define NPY_2_0_API_VERSION 0x00000012 +#define NPY_2_1_API_VERSION 0x00000013 +#define NPY_2_2_API_VERSION 0x00000013 +#define NPY_2_3_API_VERSION 0x00000014 +#define NPY_2_4_API_VERSION 0x00000015 + + +/* + * Binary compatibility version number. This number is increased + * whenever the C-API is changed such that binary compatibility is + * broken, i.e. whenever a recompile of extension modules is needed. + */ +#define NPY_VERSION NPY_ABI_VERSION + +/* + * Minor API version we are compiling to be compatible with. The version + * Number is always increased when the API changes via: `NPY_API_VERSION` + * (and should maybe just track the NumPy version). + * + * If we have an internal build, we always target the current version of + * course. + * + * For downstream users, we default to an older version to provide them with + * maximum compatibility by default. Downstream can choose to extend that + * default, or narrow it down if they wish to use newer API. If you adjust + * this, consider the Python version support (example for 1.25.x): + * + * NumPy 1.25.x supports Python: 3.9 3.10 3.11 (3.12) + * NumPy 1.19.x supports Python: 3.6 3.7 3.8 3.9 + * NumPy 1.17.x supports Python: 3.5 3.6 3.7 3.8 + * NumPy 1.15.x supports Python: ... 3.6 3.7 + * + * Users of the stable ABI may wish to target the last Python that is not + * end of life. This would be 3.8 at NumPy 1.25 release time. + * 1.17 as default was the choice of oldest-support-numpy at the time and + * has in practice no limit (compared to 1.19). Even earlier becomes legacy. + */ +#if defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD + /* NumPy internal build, always use current version. */ + #define NPY_FEATURE_VERSION NPY_API_VERSION +#elif defined(NPY_TARGET_VERSION) && NPY_TARGET_VERSION + /* user provided a target version, use it */ + #define NPY_FEATURE_VERSION NPY_TARGET_VERSION +#else + /* Use the default (increase when dropping Python 3.11 support) */ + #define NPY_FEATURE_VERSION NPY_1_23_API_VERSION +#endif + +/* Sanity check the (requested) feature version */ +#if NPY_FEATURE_VERSION > NPY_API_VERSION + #error "NPY_TARGET_VERSION higher than NumPy headers!" +#elif NPY_FEATURE_VERSION < NPY_1_15_API_VERSION + /* No support for irrelevant old targets, no need for error, but warn. */ + #ifndef _MSC_VER + #warning "Requested NumPy target lower than supported NumPy 1.15." + #else + #define _WARN___STR2__(x) #x + #define _WARN___STR1__(x) _WARN___STR2__(x) + #define _WARN___LOC__ __FILE__ "(" _WARN___STR1__(__LINE__) ") : Warning Msg: " + #pragma message(_WARN___LOC__"Requested NumPy target lower than supported NumPy 1.15.") + #endif +#endif + +/* + * We define a human readable translation to the Python version of NumPy + * for error messages (and also to allow grepping the binaries for conda). + */ +#if NPY_FEATURE_VERSION == NPY_1_7_API_VERSION + #define NPY_FEATURE_VERSION_STRING "1.7" +#elif NPY_FEATURE_VERSION == NPY_1_8_API_VERSION + #define NPY_FEATURE_VERSION_STRING "1.8" +#elif NPY_FEATURE_VERSION == NPY_1_9_API_VERSION + #define NPY_FEATURE_VERSION_STRING "1.9" +#elif NPY_FEATURE_VERSION == NPY_1_10_API_VERSION /* also 1.11, 1.12 */ + #define NPY_FEATURE_VERSION_STRING "1.10" +#elif NPY_FEATURE_VERSION == NPY_1_13_API_VERSION + #define NPY_FEATURE_VERSION_STRING "1.13" +#elif NPY_FEATURE_VERSION == NPY_1_14_API_VERSION /* also 1.15 */ + #define NPY_FEATURE_VERSION_STRING "1.14" +#elif NPY_FEATURE_VERSION == NPY_1_16_API_VERSION /* also 1.17, 1.18, 1.19 */ + #define NPY_FEATURE_VERSION_STRING "1.16" +#elif NPY_FEATURE_VERSION == NPY_1_20_API_VERSION /* also 1.21 */ + #define NPY_FEATURE_VERSION_STRING "1.20" +#elif NPY_FEATURE_VERSION == NPY_1_22_API_VERSION + #define NPY_FEATURE_VERSION_STRING "1.22" +#elif NPY_FEATURE_VERSION == NPY_1_23_API_VERSION /* also 1.24 */ + #define NPY_FEATURE_VERSION_STRING "1.23" +#elif NPY_FEATURE_VERSION == NPY_1_25_API_VERSION + #define NPY_FEATURE_VERSION_STRING "1.25" +#elif NPY_FEATURE_VERSION == NPY_2_0_API_VERSION + #define NPY_FEATURE_VERSION_STRING "2.0" +#elif NPY_FEATURE_VERSION == NPY_2_1_API_VERSION + #define NPY_FEATURE_VERSION_STRING "2.1" +#elif NPY_FEATURE_VERSION == NPY_2_3_API_VERSION + #define NPY_FEATURE_VERSION_STRING "2.3" +#elif NPY_FEATURE_VERSION == NPY_2_4_API_VERSION + #define NPY_FEATURE_VERSION_STRING "2.4" +#else + #error "Missing version string define for new NumPy version." +#endif + + +#endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_NUMPYCONFIG_H_ */ diff --git a/local_packages/numpy/_core/include/numpy/random/LICENSE.txt b/local_packages/numpy/_core/include/numpy/random/LICENSE.txt new file mode 100644 index 0000000..d72a7c3 --- /dev/null +++ b/local_packages/numpy/_core/include/numpy/random/LICENSE.txt @@ -0,0 +1,21 @@ + zlib License + ------------ + + Copyright (C) 2010 - 2019 ridiculous_fish, + Copyright (C) 2016 - 2019 Kim Walisch, + + This software is provided 'as-is', without any express or implied + warranty. In no event will the authors be held liable for any damages + arising from the use of this software. + + Permission is granted to anyone to use this software for any purpose, + including commercial applications, and to alter it and redistribute it + freely, subject to the following restrictions: + + 1. The origin of this software must not be misrepresented; you must not + claim that you wrote the original software. If you use this software + in a product, an acknowledgment in the product documentation would be + appreciated but is not required. + 2. Altered source versions must be plainly marked as such, and must not be + misrepresented as being the original software. + 3. This notice may not be removed or altered from any source distribution. diff --git a/local_packages/numpy/_core/include/numpy/random/bitgen.h b/local_packages/numpy/_core/include/numpy/random/bitgen.h new file mode 100644 index 0000000..162dd5c --- /dev/null +++ b/local_packages/numpy/_core/include/numpy/random/bitgen.h @@ -0,0 +1,20 @@ +#ifndef NUMPY_CORE_INCLUDE_NUMPY_RANDOM_BITGEN_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_RANDOM_BITGEN_H_ + +#pragma once +#include +#include +#include + +/* Must match the declaration in numpy/random/.pxd */ + +typedef struct bitgen { + void *state; + uint64_t (*next_uint64)(void *st); + uint32_t (*next_uint32)(void *st); + double (*next_double)(void *st); + uint64_t (*next_raw)(void *st); +} bitgen_t; + + +#endif /* NUMPY_CORE_INCLUDE_NUMPY_RANDOM_BITGEN_H_ */ diff --git a/local_packages/numpy/_core/include/numpy/random/distributions.h b/local_packages/numpy/_core/include/numpy/random/distributions.h new file mode 100644 index 0000000..e7fa4bd --- /dev/null +++ b/local_packages/numpy/_core/include/numpy/random/distributions.h @@ -0,0 +1,209 @@ +#ifndef NUMPY_CORE_INCLUDE_NUMPY_RANDOM_DISTRIBUTIONS_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_RANDOM_DISTRIBUTIONS_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include "numpy/npy_common.h" +#include +#include +#include + +#include "numpy/npy_math.h" +#include "numpy/random/bitgen.h" + +/* + * RAND_INT_TYPE is used to share integer generators with RandomState which + * used long in place of int64_t. If changing a distribution that uses + * RAND_INT_TYPE, then the original unmodified copy must be retained for + * use in RandomState by copying to the legacy distributions source file. + */ +#ifdef NP_RANDOM_LEGACY +#define RAND_INT_TYPE long +#define RAND_INT_MAX LONG_MAX +#else +#define RAND_INT_TYPE int64_t +#define RAND_INT_MAX INT64_MAX +#endif + +#ifdef _MSC_VER +#define DECLDIR __declspec(dllexport) +#else +#define DECLDIR extern +#endif + +#ifndef MIN +#define MIN(x, y) (((x) < (y)) ? x : y) +#define MAX(x, y) (((x) > (y)) ? x : y) +#endif + +#ifndef M_PI +#define M_PI 3.14159265358979323846264338328 +#endif + +typedef struct s_binomial_t { + int has_binomial; /* !=0: following parameters initialized for binomial */ + double psave; + RAND_INT_TYPE nsave; + double r; + double q; + double fm; + RAND_INT_TYPE m; + double p1; + double xm; + double xl; + double xr; + double c; + double laml; + double lamr; + double p2; + double p3; + double p4; +} binomial_t; + +DECLDIR float random_standard_uniform_f(bitgen_t *bitgen_state); +DECLDIR double random_standard_uniform(bitgen_t *bitgen_state); +DECLDIR void random_standard_uniform_fill(bitgen_t *, npy_intp, double *); +DECLDIR void random_standard_uniform_fill_f(bitgen_t *, npy_intp, float *); + +DECLDIR int64_t random_positive_int64(bitgen_t *bitgen_state); +DECLDIR int32_t random_positive_int32(bitgen_t *bitgen_state); +DECLDIR int64_t random_positive_int(bitgen_t *bitgen_state); +DECLDIR uint64_t random_uint(bitgen_t *bitgen_state); + +DECLDIR double random_standard_exponential(bitgen_t *bitgen_state); +DECLDIR float random_standard_exponential_f(bitgen_t *bitgen_state); +DECLDIR void random_standard_exponential_fill(bitgen_t *, npy_intp, double *); +DECLDIR void random_standard_exponential_fill_f(bitgen_t *, npy_intp, float *); +DECLDIR void random_standard_exponential_inv_fill(bitgen_t *, npy_intp, double *); +DECLDIR void random_standard_exponential_inv_fill_f(bitgen_t *, npy_intp, float *); + +DECLDIR double random_standard_normal(bitgen_t *bitgen_state); +DECLDIR float random_standard_normal_f(bitgen_t *bitgen_state); +DECLDIR void random_standard_normal_fill(bitgen_t *, npy_intp, double *); +DECLDIR void random_standard_normal_fill_f(bitgen_t *, npy_intp, float *); +DECLDIR double random_standard_gamma(bitgen_t *bitgen_state, double shape); +DECLDIR float random_standard_gamma_f(bitgen_t *bitgen_state, float shape); + +DECLDIR double random_normal(bitgen_t *bitgen_state, double loc, double scale); + +DECLDIR double random_gamma(bitgen_t *bitgen_state, double shape, double scale); +DECLDIR float random_gamma_f(bitgen_t *bitgen_state, float shape, float scale); + +DECLDIR double random_exponential(bitgen_t *bitgen_state, double scale); +DECLDIR double random_uniform(bitgen_t *bitgen_state, double lower, double range); +DECLDIR double random_beta(bitgen_t *bitgen_state, double a, double b); +DECLDIR double random_chisquare(bitgen_t *bitgen_state, double df); +DECLDIR double random_f(bitgen_t *bitgen_state, double dfnum, double dfden); +DECLDIR double random_standard_cauchy(bitgen_t *bitgen_state); +DECLDIR double random_pareto(bitgen_t *bitgen_state, double a); +DECLDIR double random_weibull(bitgen_t *bitgen_state, double a); +DECLDIR double random_power(bitgen_t *bitgen_state, double a); +DECLDIR double random_laplace(bitgen_t *bitgen_state, double loc, double scale); +DECLDIR double random_gumbel(bitgen_t *bitgen_state, double loc, double scale); +DECLDIR double random_logistic(bitgen_t *bitgen_state, double loc, double scale); +DECLDIR double random_lognormal(bitgen_t *bitgen_state, double mean, double sigma); +DECLDIR double random_rayleigh(bitgen_t *bitgen_state, double mode); +DECLDIR double random_standard_t(bitgen_t *bitgen_state, double df); +DECLDIR double random_noncentral_chisquare(bitgen_t *bitgen_state, double df, + double nonc); +DECLDIR double random_noncentral_f(bitgen_t *bitgen_state, double dfnum, + double dfden, double nonc); +DECLDIR double random_wald(bitgen_t *bitgen_state, double mean, double scale); +DECLDIR double random_vonmises(bitgen_t *bitgen_state, double mu, double kappa); +DECLDIR double random_triangular(bitgen_t *bitgen_state, double left, double mode, + double right); + +DECLDIR RAND_INT_TYPE random_poisson(bitgen_t *bitgen_state, double lam); +DECLDIR RAND_INT_TYPE random_negative_binomial(bitgen_t *bitgen_state, double n, + double p); + +DECLDIR int64_t random_binomial(bitgen_t *bitgen_state, double p, + int64_t n, binomial_t *binomial); + +DECLDIR int64_t random_logseries(bitgen_t *bitgen_state, double p); +DECLDIR int64_t random_geometric(bitgen_t *bitgen_state, double p); +DECLDIR RAND_INT_TYPE random_geometric_search(bitgen_t *bitgen_state, double p); +DECLDIR RAND_INT_TYPE random_zipf(bitgen_t *bitgen_state, double a); +DECLDIR int64_t random_hypergeometric(bitgen_t *bitgen_state, + int64_t good, int64_t bad, int64_t sample); +DECLDIR uint64_t random_interval(bitgen_t *bitgen_state, uint64_t max); + +/* Generate random uint64 numbers in closed interval [off, off + rng]. */ +DECLDIR uint64_t random_bounded_uint64(bitgen_t *bitgen_state, uint64_t off, + uint64_t rng, uint64_t mask, + bool use_masked); + +/* Generate random uint32 numbers in closed interval [off, off + rng]. */ +DECLDIR uint32_t random_buffered_bounded_uint32(bitgen_t *bitgen_state, + uint32_t off, uint32_t rng, + uint32_t mask, bool use_masked, + int *bcnt, uint32_t *buf); +DECLDIR uint16_t random_buffered_bounded_uint16(bitgen_t *bitgen_state, + uint16_t off, uint16_t rng, + uint16_t mask, bool use_masked, + int *bcnt, uint32_t *buf); +DECLDIR uint8_t random_buffered_bounded_uint8(bitgen_t *bitgen_state, uint8_t off, + uint8_t rng, uint8_t mask, + bool use_masked, int *bcnt, + uint32_t *buf); +DECLDIR npy_bool random_buffered_bounded_bool(bitgen_t *bitgen_state, npy_bool off, + npy_bool rng, npy_bool mask, + bool use_masked, int *bcnt, + uint32_t *buf); + +DECLDIR void random_bounded_uint64_fill(bitgen_t *bitgen_state, uint64_t off, + uint64_t rng, npy_intp cnt, + bool use_masked, uint64_t *out); +DECLDIR void random_bounded_uint32_fill(bitgen_t *bitgen_state, uint32_t off, + uint32_t rng, npy_intp cnt, + bool use_masked, uint32_t *out); +DECLDIR void random_bounded_uint16_fill(bitgen_t *bitgen_state, uint16_t off, + uint16_t rng, npy_intp cnt, + bool use_masked, uint16_t *out); +DECLDIR void random_bounded_uint8_fill(bitgen_t *bitgen_state, uint8_t off, + uint8_t rng, npy_intp cnt, + bool use_masked, uint8_t *out); +DECLDIR void random_bounded_bool_fill(bitgen_t *bitgen_state, npy_bool off, + npy_bool rng, npy_intp cnt, + bool use_masked, npy_bool *out); + +DECLDIR void random_multinomial(bitgen_t *bitgen_state, RAND_INT_TYPE n, RAND_INT_TYPE *mnix, + double *pix, npy_intp d, binomial_t *binomial); + +/* multivariate hypergeometric, "count" method */ +DECLDIR int random_multivariate_hypergeometric_count(bitgen_t *bitgen_state, + int64_t total, + size_t num_colors, int64_t *colors, + int64_t nsample, + size_t num_variates, int64_t *variates); + +/* multivariate hypergeometric, "marginals" method */ +DECLDIR void random_multivariate_hypergeometric_marginals(bitgen_t *bitgen_state, + int64_t total, + size_t num_colors, int64_t *colors, + int64_t nsample, + size_t num_variates, int64_t *variates); + +/* Common to legacy-distributions.c and distributions.c but not exported */ + +RAND_INT_TYPE random_binomial_btpe(bitgen_t *bitgen_state, + RAND_INT_TYPE n, + double p, + binomial_t *binomial); +RAND_INT_TYPE random_binomial_inversion(bitgen_t *bitgen_state, + RAND_INT_TYPE n, + double p, + binomial_t *binomial); +double random_loggam(double x); +static inline double next_double(bitgen_t *bitgen_state) { + return bitgen_state->next_double(bitgen_state->state); +} + +#ifdef __cplusplus +} +#endif + +#endif /* NUMPY_CORE_INCLUDE_NUMPY_RANDOM_DISTRIBUTIONS_H_ */ diff --git a/local_packages/numpy/_core/include/numpy/random/libdivide.h b/local_packages/numpy/_core/include/numpy/random/libdivide.h new file mode 100644 index 0000000..f4eb803 --- /dev/null +++ b/local_packages/numpy/_core/include/numpy/random/libdivide.h @@ -0,0 +1,2079 @@ +// libdivide.h - Optimized integer division +// https://libdivide.com +// +// Copyright (C) 2010 - 2019 ridiculous_fish, +// Copyright (C) 2016 - 2019 Kim Walisch, +// +// libdivide is dual-licensed under the Boost or zlib licenses. +// You may use libdivide under the terms of either of these. +// See LICENSE.txt for more details. + +#ifndef NUMPY_CORE_INCLUDE_NUMPY_LIBDIVIDE_LIBDIVIDE_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_LIBDIVIDE_LIBDIVIDE_H_ + +#define LIBDIVIDE_VERSION "3.0" +#define LIBDIVIDE_VERSION_MAJOR 3 +#define LIBDIVIDE_VERSION_MINOR 0 + +#include + +#if defined(__cplusplus) + #include + #include + #include +#else + #include + #include +#endif + +#if defined(LIBDIVIDE_AVX512) + #include +#elif defined(LIBDIVIDE_AVX2) + #include +#elif defined(LIBDIVIDE_SSE2) + #include +#endif + +#if defined(_MSC_VER) + #include + // disable warning C4146: unary minus operator applied + // to unsigned type, result still unsigned + #pragma warning(disable: 4146) + #define LIBDIVIDE_VC +#endif + +#if !defined(__has_builtin) + #define __has_builtin(x) 0 +#endif + +#if defined(__SIZEOF_INT128__) + #define HAS_INT128_T + // clang-cl on Windows does not yet support 128-bit division + #if !(defined(__clang__) && defined(LIBDIVIDE_VC)) + #define HAS_INT128_DIV + #endif +#endif + +#if defined(__x86_64__) || defined(_M_X64) + #define LIBDIVIDE_X86_64 +#endif + +#if defined(__i386__) + #define LIBDIVIDE_i386 +#endif + +#if defined(__GNUC__) || defined(__clang__) + #define LIBDIVIDE_GCC_STYLE_ASM +#endif + +#if defined(__cplusplus) || defined(LIBDIVIDE_VC) + #define LIBDIVIDE_FUNCTION __FUNCTION__ +#else + #define LIBDIVIDE_FUNCTION __func__ +#endif + +#define LIBDIVIDE_ERROR(msg) \ + do { \ + fprintf(stderr, "libdivide.h:%d: %s(): Error: %s\n", \ + __LINE__, LIBDIVIDE_FUNCTION, msg); \ + abort(); \ + } while (0) + +#if defined(LIBDIVIDE_ASSERTIONS_ON) + #define LIBDIVIDE_ASSERT(x) \ + do { \ + if (!(x)) { \ + fprintf(stderr, "libdivide.h:%d: %s(): Assertion failed: %s\n", \ + __LINE__, LIBDIVIDE_FUNCTION, #x); \ + abort(); \ + } \ + } while (0) +#else + #define LIBDIVIDE_ASSERT(x) +#endif + +#ifdef __cplusplus +namespace libdivide { +#endif + +// pack divider structs to prevent compilers from padding. +// This reduces memory usage by up to 43% when using a large +// array of libdivide dividers and improves performance +// by up to 10% because of reduced memory bandwidth. +#pragma pack(push, 1) + +struct libdivide_u32_t { + uint32_t magic; + uint8_t more; +}; + +struct libdivide_s32_t { + int32_t magic; + uint8_t more; +}; + +struct libdivide_u64_t { + uint64_t magic; + uint8_t more; +}; + +struct libdivide_s64_t { + int64_t magic; + uint8_t more; +}; + +struct libdivide_u32_branchfree_t { + uint32_t magic; + uint8_t more; +}; + +struct libdivide_s32_branchfree_t { + int32_t magic; + uint8_t more; +}; + +struct libdivide_u64_branchfree_t { + uint64_t magic; + uint8_t more; +}; + +struct libdivide_s64_branchfree_t { + int64_t magic; + uint8_t more; +}; + +#pragma pack(pop) + +// Explanation of the "more" field: +// +// * Bits 0-5 is the shift value (for shift path or mult path). +// * Bit 6 is the add indicator for mult path. +// * Bit 7 is set if the divisor is negative. We use bit 7 as the negative +// divisor indicator so that we can efficiently use sign extension to +// create a bitmask with all bits set to 1 (if the divisor is negative) +// or 0 (if the divisor is positive). +// +// u32: [0-4] shift value +// [5] ignored +// [6] add indicator +// magic number of 0 indicates shift path +// +// s32: [0-4] shift value +// [5] ignored +// [6] add indicator +// [7] indicates negative divisor +// magic number of 0 indicates shift path +// +// u64: [0-5] shift value +// [6] add indicator +// magic number of 0 indicates shift path +// +// s64: [0-5] shift value +// [6] add indicator +// [7] indicates negative divisor +// magic number of 0 indicates shift path +// +// In s32 and s64 branchfree modes, the magic number is negated according to +// whether the divisor is negated. In branchfree strategy, it is not negated. + +enum { + LIBDIVIDE_32_SHIFT_MASK = 0x1F, + LIBDIVIDE_64_SHIFT_MASK = 0x3F, + LIBDIVIDE_ADD_MARKER = 0x40, + LIBDIVIDE_NEGATIVE_DIVISOR = 0x80 +}; + +static inline struct libdivide_s32_t libdivide_s32_gen(int32_t d); +static inline struct libdivide_u32_t libdivide_u32_gen(uint32_t d); +static inline struct libdivide_s64_t libdivide_s64_gen(int64_t d); +static inline struct libdivide_u64_t libdivide_u64_gen(uint64_t d); + +static inline struct libdivide_s32_branchfree_t libdivide_s32_branchfree_gen(int32_t d); +static inline struct libdivide_u32_branchfree_t libdivide_u32_branchfree_gen(uint32_t d); +static inline struct libdivide_s64_branchfree_t libdivide_s64_branchfree_gen(int64_t d); +static inline struct libdivide_u64_branchfree_t libdivide_u64_branchfree_gen(uint64_t d); + +static inline int32_t libdivide_s32_do(int32_t numer, const struct libdivide_s32_t *denom); +static inline uint32_t libdivide_u32_do(uint32_t numer, const struct libdivide_u32_t *denom); +static inline int64_t libdivide_s64_do(int64_t numer, const struct libdivide_s64_t *denom); +static inline uint64_t libdivide_u64_do(uint64_t numer, const struct libdivide_u64_t *denom); + +static inline int32_t libdivide_s32_branchfree_do(int32_t numer, const struct libdivide_s32_branchfree_t *denom); +static inline uint32_t libdivide_u32_branchfree_do(uint32_t numer, const struct libdivide_u32_branchfree_t *denom); +static inline int64_t libdivide_s64_branchfree_do(int64_t numer, const struct libdivide_s64_branchfree_t *denom); +static inline uint64_t libdivide_u64_branchfree_do(uint64_t numer, const struct libdivide_u64_branchfree_t *denom); + +static inline int32_t libdivide_s32_recover(const struct libdivide_s32_t *denom); +static inline uint32_t libdivide_u32_recover(const struct libdivide_u32_t *denom); +static inline int64_t libdivide_s64_recover(const struct libdivide_s64_t *denom); +static inline uint64_t libdivide_u64_recover(const struct libdivide_u64_t *denom); + +static inline int32_t libdivide_s32_branchfree_recover(const struct libdivide_s32_branchfree_t *denom); +static inline uint32_t libdivide_u32_branchfree_recover(const struct libdivide_u32_branchfree_t *denom); +static inline int64_t libdivide_s64_branchfree_recover(const struct libdivide_s64_branchfree_t *denom); +static inline uint64_t libdivide_u64_branchfree_recover(const struct libdivide_u64_branchfree_t *denom); + +//////// Internal Utility Functions + +static inline uint32_t libdivide_mullhi_u32(uint32_t x, uint32_t y) { + uint64_t xl = x, yl = y; + uint64_t rl = xl * yl; + return (uint32_t)(rl >> 32); +} + +static inline int32_t libdivide_mullhi_s32(int32_t x, int32_t y) { + int64_t xl = x, yl = y; + int64_t rl = xl * yl; + // needs to be arithmetic shift + return (int32_t)(rl >> 32); +} + +static inline uint64_t libdivide_mullhi_u64(uint64_t x, uint64_t y) { +#if defined(LIBDIVIDE_VC) && \ + defined(LIBDIVIDE_X86_64) + return __umulh(x, y); +#elif defined(HAS_INT128_T) + __uint128_t xl = x, yl = y; + __uint128_t rl = xl * yl; + return (uint64_t)(rl >> 64); +#else + // full 128 bits are x0 * y0 + (x0 * y1 << 32) + (x1 * y0 << 32) + (x1 * y1 << 64) + uint32_t mask = 0xFFFFFFFF; + uint32_t x0 = (uint32_t)(x & mask); + uint32_t x1 = (uint32_t)(x >> 32); + uint32_t y0 = (uint32_t)(y & mask); + uint32_t y1 = (uint32_t)(y >> 32); + uint32_t x0y0_hi = libdivide_mullhi_u32(x0, y0); + uint64_t x0y1 = x0 * (uint64_t)y1; + uint64_t x1y0 = x1 * (uint64_t)y0; + uint64_t x1y1 = x1 * (uint64_t)y1; + uint64_t temp = x1y0 + x0y0_hi; + uint64_t temp_lo = temp & mask; + uint64_t temp_hi = temp >> 32; + + return x1y1 + temp_hi + ((temp_lo + x0y1) >> 32); +#endif +} + +static inline int64_t libdivide_mullhi_s64(int64_t x, int64_t y) { +#if defined(LIBDIVIDE_VC) && \ + defined(LIBDIVIDE_X86_64) + return __mulh(x, y); +#elif defined(HAS_INT128_T) + __int128_t xl = x, yl = y; + __int128_t rl = xl * yl; + return (int64_t)(rl >> 64); +#else + // full 128 bits are x0 * y0 + (x0 * y1 << 32) + (x1 * y0 << 32) + (x1 * y1 << 64) + uint32_t mask = 0xFFFFFFFF; + uint32_t x0 = (uint32_t)(x & mask); + uint32_t y0 = (uint32_t)(y & mask); + int32_t x1 = (int32_t)(x >> 32); + int32_t y1 = (int32_t)(y >> 32); + uint32_t x0y0_hi = libdivide_mullhi_u32(x0, y0); + int64_t t = x1 * (int64_t)y0 + x0y0_hi; + int64_t w1 = x0 * (int64_t)y1 + (t & mask); + + return x1 * (int64_t)y1 + (t >> 32) + (w1 >> 32); +#endif +} + +static inline int32_t libdivide_count_leading_zeros32(uint32_t val) { +#if defined(__GNUC__) || \ + __has_builtin(__builtin_clz) + // Fast way to count leading zeros + return __builtin_clz(val); +#elif defined(LIBDIVIDE_VC) + unsigned long result; + if (_BitScanReverse(&result, val)) { + return 31 - result; + } + return 0; +#else + if (val == 0) + return 32; + int32_t result = 8; + uint32_t hi = 0xFFU << 24; + while ((val & hi) == 0) { + hi >>= 8; + result += 8; + } + while (val & hi) { + result -= 1; + hi <<= 1; + } + return result; +#endif +} + +static inline int32_t libdivide_count_leading_zeros64(uint64_t val) { +#if defined(__GNUC__) || \ + __has_builtin(__builtin_clzll) + // Fast way to count leading zeros + return __builtin_clzll(val); +#elif defined(LIBDIVIDE_VC) && defined(_WIN64) + unsigned long result; + if (_BitScanReverse64(&result, val)) { + return 63 - result; + } + return 0; +#else + uint32_t hi = val >> 32; + uint32_t lo = val & 0xFFFFFFFF; + if (hi != 0) return libdivide_count_leading_zeros32(hi); + return 32 + libdivide_count_leading_zeros32(lo); +#endif +} + +// libdivide_64_div_32_to_32: divides a 64-bit uint {u1, u0} by a 32-bit +// uint {v}. The result must fit in 32 bits. +// Returns the quotient directly and the remainder in *r +static inline uint32_t libdivide_64_div_32_to_32(uint32_t u1, uint32_t u0, uint32_t v, uint32_t *r) { +#if (defined(LIBDIVIDE_i386) || defined(LIBDIVIDE_X86_64)) && \ + defined(LIBDIVIDE_GCC_STYLE_ASM) + uint32_t result; + __asm__("divl %[v]" + : "=a"(result), "=d"(*r) + : [v] "r"(v), "a"(u0), "d"(u1) + ); + return result; +#else + uint64_t n = ((uint64_t)u1 << 32) | u0; + uint32_t result = (uint32_t)(n / v); + *r = (uint32_t)(n - result * (uint64_t)v); + return result; +#endif +} + +// libdivide_128_div_64_to_64: divides a 128-bit uint {u1, u0} by a 64-bit +// uint {v}. The result must fit in 64 bits. +// Returns the quotient directly and the remainder in *r +static uint64_t libdivide_128_div_64_to_64(uint64_t u1, uint64_t u0, uint64_t v, uint64_t *r) { +#if defined(LIBDIVIDE_X86_64) && \ + defined(LIBDIVIDE_GCC_STYLE_ASM) + uint64_t result; + __asm__("divq %[v]" + : "=a"(result), "=d"(*r) + : [v] "r"(v), "a"(u0), "d"(u1) + ); + return result; +#elif defined(HAS_INT128_T) && \ + defined(HAS_INT128_DIV) + __uint128_t n = ((__uint128_t)u1 << 64) | u0; + uint64_t result = (uint64_t)(n / v); + *r = (uint64_t)(n - result * (__uint128_t)v); + return result; +#else + // Code taken from Hacker's Delight: + // http://www.hackersdelight.org/HDcode/divlu.c. + // License permits inclusion here per: + // http://www.hackersdelight.org/permissions.htm + + const uint64_t b = (1ULL << 32); // Number base (32 bits) + uint64_t un1, un0; // Norm. dividend LSD's + uint64_t vn1, vn0; // Norm. divisor digits + uint64_t q1, q0; // Quotient digits + uint64_t un64, un21, un10; // Dividend digit pairs + uint64_t rhat; // A remainder + int32_t s; // Shift amount for norm + + // If overflow, set rem. to an impossible value, + // and return the largest possible quotient + if (u1 >= v) { + *r = (uint64_t) -1; + return (uint64_t) -1; + } + + // count leading zeros + s = libdivide_count_leading_zeros64(v); + if (s > 0) { + // Normalize divisor + v = v << s; + un64 = (u1 << s) | (u0 >> (64 - s)); + un10 = u0 << s; // Shift dividend left + } else { + // Avoid undefined behavior of (u0 >> 64). + // The behavior is undefined if the right operand is + // negative, or greater than or equal to the length + // in bits of the promoted left operand. + un64 = u1; + un10 = u0; + } + + // Break divisor up into two 32-bit digits + vn1 = v >> 32; + vn0 = v & 0xFFFFFFFF; + + // Break right half of dividend into two digits + un1 = un10 >> 32; + un0 = un10 & 0xFFFFFFFF; + + // Compute the first quotient digit, q1 + q1 = un64 / vn1; + rhat = un64 - q1 * vn1; + + while (q1 >= b || q1 * vn0 > b * rhat + un1) { + q1 = q1 - 1; + rhat = rhat + vn1; + if (rhat >= b) + break; + } + + // Multiply and subtract + un21 = un64 * b + un1 - q1 * v; + + // Compute the second quotient digit + q0 = un21 / vn1; + rhat = un21 - q0 * vn1; + + while (q0 >= b || q0 * vn0 > b * rhat + un0) { + q0 = q0 - 1; + rhat = rhat + vn1; + if (rhat >= b) + break; + } + + *r = (un21 * b + un0 - q0 * v) >> s; + return q1 * b + q0; +#endif +} + +// Bitshift a u128 in place, left (signed_shift > 0) or right (signed_shift < 0) +static inline void libdivide_u128_shift(uint64_t *u1, uint64_t *u0, int32_t signed_shift) { + if (signed_shift > 0) { + uint32_t shift = signed_shift; + *u1 <<= shift; + *u1 |= *u0 >> (64 - shift); + *u0 <<= shift; + } + else if (signed_shift < 0) { + uint32_t shift = -signed_shift; + *u0 >>= shift; + *u0 |= *u1 << (64 - shift); + *u1 >>= shift; + } +} + +// Computes a 128 / 128 -> 64 bit division, with a 128 bit remainder. +static uint64_t libdivide_128_div_128_to_64(uint64_t u_hi, uint64_t u_lo, uint64_t v_hi, uint64_t v_lo, uint64_t *r_hi, uint64_t *r_lo) { +#if defined(HAS_INT128_T) && \ + defined(HAS_INT128_DIV) + __uint128_t ufull = u_hi; + __uint128_t vfull = v_hi; + ufull = (ufull << 64) | u_lo; + vfull = (vfull << 64) | v_lo; + uint64_t res = (uint64_t)(ufull / vfull); + __uint128_t remainder = ufull - (vfull * res); + *r_lo = (uint64_t)remainder; + *r_hi = (uint64_t)(remainder >> 64); + return res; +#else + // Adapted from "Unsigned Doubleword Division" in Hacker's Delight + // We want to compute u / v + typedef struct { uint64_t hi; uint64_t lo; } u128_t; + u128_t u = {u_hi, u_lo}; + u128_t v = {v_hi, v_lo}; + + if (v.hi == 0) { + // divisor v is a 64 bit value, so we just need one 128/64 division + // Note that we are simpler than Hacker's Delight here, because we know + // the quotient fits in 64 bits whereas Hacker's Delight demands a full + // 128 bit quotient + *r_hi = 0; + return libdivide_128_div_64_to_64(u.hi, u.lo, v.lo, r_lo); + } + // Here v >= 2**64 + // We know that v.hi != 0, so count leading zeros is OK + // We have 0 <= n <= 63 + uint32_t n = libdivide_count_leading_zeros64(v.hi); + + // Normalize the divisor so its MSB is 1 + u128_t v1t = v; + libdivide_u128_shift(&v1t.hi, &v1t.lo, n); + uint64_t v1 = v1t.hi; // i.e. v1 = v1t >> 64 + + // To ensure no overflow + u128_t u1 = u; + libdivide_u128_shift(&u1.hi, &u1.lo, -1); + + // Get quotient from divide unsigned insn. + uint64_t rem_ignored; + uint64_t q1 = libdivide_128_div_64_to_64(u1.hi, u1.lo, v1, &rem_ignored); + + // Undo normalization and division of u by 2. + u128_t q0 = {0, q1}; + libdivide_u128_shift(&q0.hi, &q0.lo, n); + libdivide_u128_shift(&q0.hi, &q0.lo, -63); + + // Make q0 correct or too small by 1 + // Equivalent to `if (q0 != 0) q0 = q0 - 1;` + if (q0.hi != 0 || q0.lo != 0) { + q0.hi -= (q0.lo == 0); // borrow + q0.lo -= 1; + } + + // Now q0 is correct. + // Compute q0 * v as q0v + // = (q0.hi << 64 + q0.lo) * (v.hi << 64 + v.lo) + // = (q0.hi * v.hi << 128) + (q0.hi * v.lo << 64) + + // (q0.lo * v.hi << 64) + q0.lo * v.lo) + // Each term is 128 bit + // High half of full product (upper 128 bits!) are dropped + u128_t q0v = {0, 0}; + q0v.hi = q0.hi*v.lo + q0.lo*v.hi + libdivide_mullhi_u64(q0.lo, v.lo); + q0v.lo = q0.lo*v.lo; + + // Compute u - q0v as u_q0v + // This is the remainder + u128_t u_q0v = u; + u_q0v.hi -= q0v.hi + (u.lo < q0v.lo); // second term is borrow + u_q0v.lo -= q0v.lo; + + // Check if u_q0v >= v + // This checks if our remainder is larger than the divisor + if ((u_q0v.hi > v.hi) || + (u_q0v.hi == v.hi && u_q0v.lo >= v.lo)) { + // Increment q0 + q0.lo += 1; + q0.hi += (q0.lo == 0); // carry + + // Subtract v from remainder + u_q0v.hi -= v.hi + (u_q0v.lo < v.lo); + u_q0v.lo -= v.lo; + } + + *r_hi = u_q0v.hi; + *r_lo = u_q0v.lo; + + LIBDIVIDE_ASSERT(q0.hi == 0); + return q0.lo; +#endif +} + +////////// UINT32 + +static inline struct libdivide_u32_t libdivide_internal_u32_gen(uint32_t d, int branchfree) { + if (d == 0) { + LIBDIVIDE_ERROR("divider must be != 0"); + } + + struct libdivide_u32_t result; + uint32_t floor_log_2_d = 31 - libdivide_count_leading_zeros32(d); + + // Power of 2 + if ((d & (d - 1)) == 0) { + // We need to subtract 1 from the shift value in case of an unsigned + // branchfree divider because there is a hardcoded right shift by 1 + // in its division algorithm. Because of this we also need to add back + // 1 in its recovery algorithm. + result.magic = 0; + result.more = (uint8_t)(floor_log_2_d - (branchfree != 0)); + } else { + uint8_t more; + uint32_t rem, proposed_m; + proposed_m = libdivide_64_div_32_to_32(1U << floor_log_2_d, 0, d, &rem); + + LIBDIVIDE_ASSERT(rem > 0 && rem < d); + const uint32_t e = d - rem; + + // This power works if e < 2**floor_log_2_d. + if (!branchfree && (e < (1U << floor_log_2_d))) { + // This power works + more = floor_log_2_d; + } else { + // We have to use the general 33-bit algorithm. We need to compute + // (2**power) / d. However, we already have (2**(power-1))/d and + // its remainder. By doubling both, and then correcting the + // remainder, we can compute the larger division. + // don't care about overflow here - in fact, we expect it + proposed_m += proposed_m; + const uint32_t twice_rem = rem + rem; + if (twice_rem >= d || twice_rem < rem) proposed_m += 1; + more = floor_log_2_d | LIBDIVIDE_ADD_MARKER; + } + result.magic = 1 + proposed_m; + result.more = more; + // result.more's shift should in general be ceil_log_2_d. But if we + // used the smaller power, we subtract one from the shift because we're + // using the smaller power. If we're using the larger power, we + // subtract one from the shift because it's taken care of by the add + // indicator. So floor_log_2_d happens to be correct in both cases. + } + return result; +} + +struct libdivide_u32_t libdivide_u32_gen(uint32_t d) { + return libdivide_internal_u32_gen(d, 0); +} + +struct libdivide_u32_branchfree_t libdivide_u32_branchfree_gen(uint32_t d) { + if (d == 1) { + LIBDIVIDE_ERROR("branchfree divider must be != 1"); + } + struct libdivide_u32_t tmp = libdivide_internal_u32_gen(d, 1); + struct libdivide_u32_branchfree_t ret = {tmp.magic, (uint8_t)(tmp.more & LIBDIVIDE_32_SHIFT_MASK)}; + return ret; +} + +uint32_t libdivide_u32_do(uint32_t numer, const struct libdivide_u32_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + return numer >> more; + } + else { + uint32_t q = libdivide_mullhi_u32(denom->magic, numer); + if (more & LIBDIVIDE_ADD_MARKER) { + uint32_t t = ((numer - q) >> 1) + q; + return t >> (more & LIBDIVIDE_32_SHIFT_MASK); + } + else { + // All upper bits are 0, + // don't need to mask them off. + return q >> more; + } + } +} + +uint32_t libdivide_u32_branchfree_do(uint32_t numer, const struct libdivide_u32_branchfree_t *denom) { + uint32_t q = libdivide_mullhi_u32(denom->magic, numer); + uint32_t t = ((numer - q) >> 1) + q; + return t >> denom->more; +} + +uint32_t libdivide_u32_recover(const struct libdivide_u32_t *denom) { + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + + if (!denom->magic) { + return 1U << shift; + } else if (!(more & LIBDIVIDE_ADD_MARKER)) { + // We compute q = n/d = n*m / 2^(32 + shift) + // Therefore we have d = 2^(32 + shift) / m + // We need to ceil it. + // We know d is not a power of 2, so m is not a power of 2, + // so we can just add 1 to the floor + uint32_t hi_dividend = 1U << shift; + uint32_t rem_ignored; + return 1 + libdivide_64_div_32_to_32(hi_dividend, 0, denom->magic, &rem_ignored); + } else { + // Here we wish to compute d = 2^(32+shift+1)/(m+2^32). + // Notice (m + 2^32) is a 33 bit number. Use 64 bit division for now + // Also note that shift may be as high as 31, so shift + 1 will + // overflow. So we have to compute it as 2^(32+shift)/(m+2^32), and + // then double the quotient and remainder. + uint64_t half_n = 1ULL << (32 + shift); + uint64_t d = (1ULL << 32) | denom->magic; + // Note that the quotient is guaranteed <= 32 bits, but the remainder + // may need 33! + uint32_t half_q = (uint32_t)(half_n / d); + uint64_t rem = half_n % d; + // We computed 2^(32+shift)/(m+2^32) + // Need to double it, and then add 1 to the quotient if doubling th + // remainder would increase the quotient. + // Note that rem<<1 cannot overflow, since rem < d and d is 33 bits + uint32_t full_q = half_q + half_q + ((rem<<1) >= d); + + // We rounded down in gen (hence +1) + return full_q + 1; + } +} + +uint32_t libdivide_u32_branchfree_recover(const struct libdivide_u32_branchfree_t *denom) { + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + + if (!denom->magic) { + return 1U << (shift + 1); + } else { + // Here we wish to compute d = 2^(32+shift+1)/(m+2^32). + // Notice (m + 2^32) is a 33 bit number. Use 64 bit division for now + // Also note that shift may be as high as 31, so shift + 1 will + // overflow. So we have to compute it as 2^(32+shift)/(m+2^32), and + // then double the quotient and remainder. + uint64_t half_n = 1ULL << (32 + shift); + uint64_t d = (1ULL << 32) | denom->magic; + // Note that the quotient is guaranteed <= 32 bits, but the remainder + // may need 33! + uint32_t half_q = (uint32_t)(half_n / d); + uint64_t rem = half_n % d; + // We computed 2^(32+shift)/(m+2^32) + // Need to double it, and then add 1 to the quotient if doubling th + // remainder would increase the quotient. + // Note that rem<<1 cannot overflow, since rem < d and d is 33 bits + uint32_t full_q = half_q + half_q + ((rem<<1) >= d); + + // We rounded down in gen (hence +1) + return full_q + 1; + } +} + +/////////// UINT64 + +static inline struct libdivide_u64_t libdivide_internal_u64_gen(uint64_t d, int branchfree) { + if (d == 0) { + LIBDIVIDE_ERROR("divider must be != 0"); + } + + struct libdivide_u64_t result; + uint32_t floor_log_2_d = 63 - libdivide_count_leading_zeros64(d); + + // Power of 2 + if ((d & (d - 1)) == 0) { + // We need to subtract 1 from the shift value in case of an unsigned + // branchfree divider because there is a hardcoded right shift by 1 + // in its division algorithm. Because of this we also need to add back + // 1 in its recovery algorithm. + result.magic = 0; + result.more = (uint8_t)(floor_log_2_d - (branchfree != 0)); + } else { + uint64_t proposed_m, rem; + uint8_t more; + // (1 << (64 + floor_log_2_d)) / d + proposed_m = libdivide_128_div_64_to_64(1ULL << floor_log_2_d, 0, d, &rem); + + LIBDIVIDE_ASSERT(rem > 0 && rem < d); + const uint64_t e = d - rem; + + // This power works if e < 2**floor_log_2_d. + if (!branchfree && e < (1ULL << floor_log_2_d)) { + // This power works + more = floor_log_2_d; + } else { + // We have to use the general 65-bit algorithm. We need to compute + // (2**power) / d. However, we already have (2**(power-1))/d and + // its remainder. By doubling both, and then correcting the + // remainder, we can compute the larger division. + // don't care about overflow here - in fact, we expect it + proposed_m += proposed_m; + const uint64_t twice_rem = rem + rem; + if (twice_rem >= d || twice_rem < rem) proposed_m += 1; + more = floor_log_2_d | LIBDIVIDE_ADD_MARKER; + } + result.magic = 1 + proposed_m; + result.more = more; + // result.more's shift should in general be ceil_log_2_d. But if we + // used the smaller power, we subtract one from the shift because we're + // using the smaller power. If we're using the larger power, we + // subtract one from the shift because it's taken care of by the add + // indicator. So floor_log_2_d happens to be correct in both cases, + // which is why we do it outside of the if statement. + } + return result; +} + +struct libdivide_u64_t libdivide_u64_gen(uint64_t d) { + return libdivide_internal_u64_gen(d, 0); +} + +struct libdivide_u64_branchfree_t libdivide_u64_branchfree_gen(uint64_t d) { + if (d == 1) { + LIBDIVIDE_ERROR("branchfree divider must be != 1"); + } + struct libdivide_u64_t tmp = libdivide_internal_u64_gen(d, 1); + struct libdivide_u64_branchfree_t ret = {tmp.magic, (uint8_t)(tmp.more & LIBDIVIDE_64_SHIFT_MASK)}; + return ret; +} + +uint64_t libdivide_u64_do(uint64_t numer, const struct libdivide_u64_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + return numer >> more; + } + else { + uint64_t q = libdivide_mullhi_u64(denom->magic, numer); + if (more & LIBDIVIDE_ADD_MARKER) { + uint64_t t = ((numer - q) >> 1) + q; + return t >> (more & LIBDIVIDE_64_SHIFT_MASK); + } + else { + // All upper bits are 0, + // don't need to mask them off. + return q >> more; + } + } +} + +uint64_t libdivide_u64_branchfree_do(uint64_t numer, const struct libdivide_u64_branchfree_t *denom) { + uint64_t q = libdivide_mullhi_u64(denom->magic, numer); + uint64_t t = ((numer - q) >> 1) + q; + return t >> denom->more; +} + +uint64_t libdivide_u64_recover(const struct libdivide_u64_t *denom) { + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + + if (!denom->magic) { + return 1ULL << shift; + } else if (!(more & LIBDIVIDE_ADD_MARKER)) { + // We compute q = n/d = n*m / 2^(64 + shift) + // Therefore we have d = 2^(64 + shift) / m + // We need to ceil it. + // We know d is not a power of 2, so m is not a power of 2, + // so we can just add 1 to the floor + uint64_t hi_dividend = 1ULL << shift; + uint64_t rem_ignored; + return 1 + libdivide_128_div_64_to_64(hi_dividend, 0, denom->magic, &rem_ignored); + } else { + // Here we wish to compute d = 2^(64+shift+1)/(m+2^64). + // Notice (m + 2^64) is a 65 bit number. This gets hairy. See + // libdivide_u32_recover for more on what we do here. + // TODO: do something better than 128 bit math + + // Full n is a (potentially) 129 bit value + // half_n is a 128 bit value + // Compute the hi half of half_n. Low half is 0. + uint64_t half_n_hi = 1ULL << shift, half_n_lo = 0; + // d is a 65 bit value. The high bit is always set to 1. + const uint64_t d_hi = 1, d_lo = denom->magic; + // Note that the quotient is guaranteed <= 64 bits, + // but the remainder may need 65! + uint64_t r_hi, r_lo; + uint64_t half_q = libdivide_128_div_128_to_64(half_n_hi, half_n_lo, d_hi, d_lo, &r_hi, &r_lo); + // We computed 2^(64+shift)/(m+2^64) + // Double the remainder ('dr') and check if that is larger than d + // Note that d is a 65 bit value, so r1 is small and so r1 + r1 + // cannot overflow + uint64_t dr_lo = r_lo + r_lo; + uint64_t dr_hi = r_hi + r_hi + (dr_lo < r_lo); // last term is carry + int dr_exceeds_d = (dr_hi > d_hi) || (dr_hi == d_hi && dr_lo >= d_lo); + uint64_t full_q = half_q + half_q + (dr_exceeds_d ? 1 : 0); + return full_q + 1; + } +} + +uint64_t libdivide_u64_branchfree_recover(const struct libdivide_u64_branchfree_t *denom) { + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + + if (!denom->magic) { + return 1ULL << (shift + 1); + } else { + // Here we wish to compute d = 2^(64+shift+1)/(m+2^64). + // Notice (m + 2^64) is a 65 bit number. This gets hairy. See + // libdivide_u32_recover for more on what we do here. + // TODO: do something better than 128 bit math + + // Full n is a (potentially) 129 bit value + // half_n is a 128 bit value + // Compute the hi half of half_n. Low half is 0. + uint64_t half_n_hi = 1ULL << shift, half_n_lo = 0; + // d is a 65 bit value. The high bit is always set to 1. + const uint64_t d_hi = 1, d_lo = denom->magic; + // Note that the quotient is guaranteed <= 64 bits, + // but the remainder may need 65! + uint64_t r_hi, r_lo; + uint64_t half_q = libdivide_128_div_128_to_64(half_n_hi, half_n_lo, d_hi, d_lo, &r_hi, &r_lo); + // We computed 2^(64+shift)/(m+2^64) + // Double the remainder ('dr') and check if that is larger than d + // Note that d is a 65 bit value, so r1 is small and so r1 + r1 + // cannot overflow + uint64_t dr_lo = r_lo + r_lo; + uint64_t dr_hi = r_hi + r_hi + (dr_lo < r_lo); // last term is carry + int dr_exceeds_d = (dr_hi > d_hi) || (dr_hi == d_hi && dr_lo >= d_lo); + uint64_t full_q = half_q + half_q + (dr_exceeds_d ? 1 : 0); + return full_q + 1; + } +} + +/////////// SINT32 + +static inline struct libdivide_s32_t libdivide_internal_s32_gen(int32_t d, int branchfree) { + if (d == 0) { + LIBDIVIDE_ERROR("divider must be != 0"); + } + + struct libdivide_s32_t result; + + // If d is a power of 2, or negative a power of 2, we have to use a shift. + // This is especially important because the magic algorithm fails for -1. + // To check if d is a power of 2 or its inverse, it suffices to check + // whether its absolute value has exactly one bit set. This works even for + // INT_MIN, because abs(INT_MIN) == INT_MIN, and INT_MIN has one bit set + // and is a power of 2. + uint32_t ud = (uint32_t)d; + uint32_t absD = (d < 0) ? -ud : ud; + uint32_t floor_log_2_d = 31 - libdivide_count_leading_zeros32(absD); + // check if exactly one bit is set, + // don't care if absD is 0 since that's divide by zero + if ((absD & (absD - 1)) == 0) { + // Branchfree and normal paths are exactly the same + result.magic = 0; + result.more = floor_log_2_d | (d < 0 ? LIBDIVIDE_NEGATIVE_DIVISOR : 0); + } else { + LIBDIVIDE_ASSERT(floor_log_2_d >= 1); + + uint8_t more; + // the dividend here is 2**(floor_log_2_d + 31), so the low 32 bit word + // is 0 and the high word is floor_log_2_d - 1 + uint32_t rem, proposed_m; + proposed_m = libdivide_64_div_32_to_32(1U << (floor_log_2_d - 1), 0, absD, &rem); + const uint32_t e = absD - rem; + + // We are going to start with a power of floor_log_2_d - 1. + // This works if works if e < 2**floor_log_2_d. + if (!branchfree && e < (1U << floor_log_2_d)) { + // This power works + more = floor_log_2_d - 1; + } else { + // We need to go one higher. This should not make proposed_m + // overflow, but it will make it negative when interpreted as an + // int32_t. + proposed_m += proposed_m; + const uint32_t twice_rem = rem + rem; + if (twice_rem >= absD || twice_rem < rem) proposed_m += 1; + more = floor_log_2_d | LIBDIVIDE_ADD_MARKER; + } + + proposed_m += 1; + int32_t magic = (int32_t)proposed_m; + + // Mark if we are negative. Note we only negate the magic number in the + // branchfull case. + if (d < 0) { + more |= LIBDIVIDE_NEGATIVE_DIVISOR; + if (!branchfree) { + magic = -magic; + } + } + + result.more = more; + result.magic = magic; + } + return result; +} + +struct libdivide_s32_t libdivide_s32_gen(int32_t d) { + return libdivide_internal_s32_gen(d, 0); +} + +struct libdivide_s32_branchfree_t libdivide_s32_branchfree_gen(int32_t d) { + struct libdivide_s32_t tmp = libdivide_internal_s32_gen(d, 1); + struct libdivide_s32_branchfree_t result = {tmp.magic, tmp.more}; + return result; +} + +int32_t libdivide_s32_do(int32_t numer, const struct libdivide_s32_t *denom) { + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + + if (!denom->magic) { + uint32_t sign = (int8_t)more >> 7; + uint32_t mask = (1U << shift) - 1; + uint32_t uq = numer + ((numer >> 31) & mask); + int32_t q = (int32_t)uq; + q >>= shift; + q = (q ^ sign) - sign; + return q; + } else { + uint32_t uq = (uint32_t)libdivide_mullhi_s32(denom->magic, numer); + if (more & LIBDIVIDE_ADD_MARKER) { + // must be arithmetic shift and then sign extend + int32_t sign = (int8_t)more >> 7; + // q += (more < 0 ? -numer : numer) + // cast required to avoid UB + uq += ((uint32_t)numer ^ sign) - sign; + } + int32_t q = (int32_t)uq; + q >>= shift; + q += (q < 0); + return q; + } +} + +int32_t libdivide_s32_branchfree_do(int32_t numer, const struct libdivide_s32_branchfree_t *denom) { + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + // must be arithmetic shift and then sign extend + int32_t sign = (int8_t)more >> 7; + int32_t magic = denom->magic; + int32_t q = libdivide_mullhi_s32(magic, numer); + q += numer; + + // If q is non-negative, we have nothing to do + // If q is negative, we want to add either (2**shift)-1 if d is a power of + // 2, or (2**shift) if it is not a power of 2 + uint32_t is_power_of_2 = (magic == 0); + uint32_t q_sign = (uint32_t)(q >> 31); + q += q_sign & ((1U << shift) - is_power_of_2); + + // Now arithmetic right shift + q >>= shift; + // Negate if needed + q = (q ^ sign) - sign; + + return q; +} + +int32_t libdivide_s32_recover(const struct libdivide_s32_t *denom) { + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + if (!denom->magic) { + uint32_t absD = 1U << shift; + if (more & LIBDIVIDE_NEGATIVE_DIVISOR) { + absD = -absD; + } + return (int32_t)absD; + } else { + // Unsigned math is much easier + // We negate the magic number only in the branchfull case, and we don't + // know which case we're in. However we have enough information to + // determine the correct sign of the magic number. The divisor was + // negative if LIBDIVIDE_NEGATIVE_DIVISOR is set. If ADD_MARKER is set, + // the magic number's sign is opposite that of the divisor. + // We want to compute the positive magic number. + int negative_divisor = (more & LIBDIVIDE_NEGATIVE_DIVISOR); + int magic_was_negated = (more & LIBDIVIDE_ADD_MARKER) + ? denom->magic > 0 : denom->magic < 0; + + // Handle the power of 2 case (including branchfree) + if (denom->magic == 0) { + int32_t result = 1U << shift; + return negative_divisor ? -result : result; + } + + uint32_t d = (uint32_t)(magic_was_negated ? -denom->magic : denom->magic); + uint64_t n = 1ULL << (32 + shift); // this shift cannot exceed 30 + uint32_t q = (uint32_t)(n / d); + int32_t result = (int32_t)q; + result += 1; + return negative_divisor ? -result : result; + } +} + +int32_t libdivide_s32_branchfree_recover(const struct libdivide_s32_branchfree_t *denom) { + return libdivide_s32_recover((const struct libdivide_s32_t *)denom); +} + +///////////// SINT64 + +static inline struct libdivide_s64_t libdivide_internal_s64_gen(int64_t d, int branchfree) { + if (d == 0) { + LIBDIVIDE_ERROR("divider must be != 0"); + } + + struct libdivide_s64_t result; + + // If d is a power of 2, or negative a power of 2, we have to use a shift. + // This is especially important because the magic algorithm fails for -1. + // To check if d is a power of 2 or its inverse, it suffices to check + // whether its absolute value has exactly one bit set. This works even for + // INT_MIN, because abs(INT_MIN) == INT_MIN, and INT_MIN has one bit set + // and is a power of 2. + uint64_t ud = (uint64_t)d; + uint64_t absD = (d < 0) ? -ud : ud; + uint32_t floor_log_2_d = 63 - libdivide_count_leading_zeros64(absD); + // check if exactly one bit is set, + // don't care if absD is 0 since that's divide by zero + if ((absD & (absD - 1)) == 0) { + // Branchfree and non-branchfree cases are the same + result.magic = 0; + result.more = floor_log_2_d | (d < 0 ? LIBDIVIDE_NEGATIVE_DIVISOR : 0); + } else { + // the dividend here is 2**(floor_log_2_d + 63), so the low 64 bit word + // is 0 and the high word is floor_log_2_d - 1 + uint8_t more; + uint64_t rem, proposed_m; + proposed_m = libdivide_128_div_64_to_64(1ULL << (floor_log_2_d - 1), 0, absD, &rem); + const uint64_t e = absD - rem; + + // We are going to start with a power of floor_log_2_d - 1. + // This works if works if e < 2**floor_log_2_d. + if (!branchfree && e < (1ULL << floor_log_2_d)) { + // This power works + more = floor_log_2_d - 1; + } else { + // We need to go one higher. This should not make proposed_m + // overflow, but it will make it negative when interpreted as an + // int32_t. + proposed_m += proposed_m; + const uint64_t twice_rem = rem + rem; + if (twice_rem >= absD || twice_rem < rem) proposed_m += 1; + // note that we only set the LIBDIVIDE_NEGATIVE_DIVISOR bit if we + // also set ADD_MARKER this is an annoying optimization that + // enables algorithm #4 to avoid the mask. However we always set it + // in the branchfree case + more = floor_log_2_d | LIBDIVIDE_ADD_MARKER; + } + proposed_m += 1; + int64_t magic = (int64_t)proposed_m; + + // Mark if we are negative + if (d < 0) { + more |= LIBDIVIDE_NEGATIVE_DIVISOR; + if (!branchfree) { + magic = -magic; + } + } + + result.more = more; + result.magic = magic; + } + return result; +} + +struct libdivide_s64_t libdivide_s64_gen(int64_t d) { + return libdivide_internal_s64_gen(d, 0); +} + +struct libdivide_s64_branchfree_t libdivide_s64_branchfree_gen(int64_t d) { + struct libdivide_s64_t tmp = libdivide_internal_s64_gen(d, 1); + struct libdivide_s64_branchfree_t ret = {tmp.magic, tmp.more}; + return ret; +} + +int64_t libdivide_s64_do(int64_t numer, const struct libdivide_s64_t *denom) { + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + + if (!denom->magic) { // shift path + uint64_t mask = (1ULL << shift) - 1; + uint64_t uq = numer + ((numer >> 63) & mask); + int64_t q = (int64_t)uq; + q >>= shift; + // must be arithmetic shift and then sign-extend + int64_t sign = (int8_t)more >> 7; + q = (q ^ sign) - sign; + return q; + } else { + uint64_t uq = (uint64_t)libdivide_mullhi_s64(denom->magic, numer); + if (more & LIBDIVIDE_ADD_MARKER) { + // must be arithmetic shift and then sign extend + int64_t sign = (int8_t)more >> 7; + // q += (more < 0 ? -numer : numer) + // cast required to avoid UB + uq += ((uint64_t)numer ^ sign) - sign; + } + int64_t q = (int64_t)uq; + q >>= shift; + q += (q < 0); + return q; + } +} + +int64_t libdivide_s64_branchfree_do(int64_t numer, const struct libdivide_s64_branchfree_t *denom) { + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + // must be arithmetic shift and then sign extend + int64_t sign = (int8_t)more >> 7; + int64_t magic = denom->magic; + int64_t q = libdivide_mullhi_s64(magic, numer); + q += numer; + + // If q is non-negative, we have nothing to do. + // If q is negative, we want to add either (2**shift)-1 if d is a power of + // 2, or (2**shift) if it is not a power of 2. + uint64_t is_power_of_2 = (magic == 0); + uint64_t q_sign = (uint64_t)(q >> 63); + q += q_sign & ((1ULL << shift) - is_power_of_2); + + // Arithmetic right shift + q >>= shift; + // Negate if needed + q = (q ^ sign) - sign; + + return q; +} + +int64_t libdivide_s64_recover(const struct libdivide_s64_t *denom) { + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + if (denom->magic == 0) { // shift path + uint64_t absD = 1ULL << shift; + if (more & LIBDIVIDE_NEGATIVE_DIVISOR) { + absD = -absD; + } + return (int64_t)absD; + } else { + // Unsigned math is much easier + int negative_divisor = (more & LIBDIVIDE_NEGATIVE_DIVISOR); + int magic_was_negated = (more & LIBDIVIDE_ADD_MARKER) + ? denom->magic > 0 : denom->magic < 0; + + uint64_t d = (uint64_t)(magic_was_negated ? -denom->magic : denom->magic); + uint64_t n_hi = 1ULL << shift, n_lo = 0; + uint64_t rem_ignored; + uint64_t q = libdivide_128_div_64_to_64(n_hi, n_lo, d, &rem_ignored); + int64_t result = (int64_t)(q + 1); + if (negative_divisor) { + result = -result; + } + return result; + } +} + +int64_t libdivide_s64_branchfree_recover(const struct libdivide_s64_branchfree_t *denom) { + return libdivide_s64_recover((const struct libdivide_s64_t *)denom); +} + +#if defined(LIBDIVIDE_AVX512) + +static inline __m512i libdivide_u32_do_vector(__m512i numers, const struct libdivide_u32_t *denom); +static inline __m512i libdivide_s32_do_vector(__m512i numers, const struct libdivide_s32_t *denom); +static inline __m512i libdivide_u64_do_vector(__m512i numers, const struct libdivide_u64_t *denom); +static inline __m512i libdivide_s64_do_vector(__m512i numers, const struct libdivide_s64_t *denom); + +static inline __m512i libdivide_u32_branchfree_do_vector(__m512i numers, const struct libdivide_u32_branchfree_t *denom); +static inline __m512i libdivide_s32_branchfree_do_vector(__m512i numers, const struct libdivide_s32_branchfree_t *denom); +static inline __m512i libdivide_u64_branchfree_do_vector(__m512i numers, const struct libdivide_u64_branchfree_t *denom); +static inline __m512i libdivide_s64_branchfree_do_vector(__m512i numers, const struct libdivide_s64_branchfree_t *denom); + +//////// Internal Utility Functions + +static inline __m512i libdivide_s64_signbits(__m512i v) {; + return _mm512_srai_epi64(v, 63); +} + +static inline __m512i libdivide_s64_shift_right_vector(__m512i v, int amt) { + return _mm512_srai_epi64(v, amt); +} + +// Here, b is assumed to contain one 32-bit value repeated. +static inline __m512i libdivide_mullhi_u32_vector(__m512i a, __m512i b) { + __m512i hi_product_0Z2Z = _mm512_srli_epi64(_mm512_mul_epu32(a, b), 32); + __m512i a1X3X = _mm512_srli_epi64(a, 32); + __m512i mask = _mm512_set_epi32(-1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0); + __m512i hi_product_Z1Z3 = _mm512_and_si512(_mm512_mul_epu32(a1X3X, b), mask); + return _mm512_or_si512(hi_product_0Z2Z, hi_product_Z1Z3); +} + +// b is one 32-bit value repeated. +static inline __m512i libdivide_mullhi_s32_vector(__m512i a, __m512i b) { + __m512i hi_product_0Z2Z = _mm512_srli_epi64(_mm512_mul_epi32(a, b), 32); + __m512i a1X3X = _mm512_srli_epi64(a, 32); + __m512i mask = _mm512_set_epi32(-1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0); + __m512i hi_product_Z1Z3 = _mm512_and_si512(_mm512_mul_epi32(a1X3X, b), mask); + return _mm512_or_si512(hi_product_0Z2Z, hi_product_Z1Z3); +} + +// Here, y is assumed to contain one 64-bit value repeated. +// https://stackoverflow.com/a/28827013 +static inline __m512i libdivide_mullhi_u64_vector(__m512i x, __m512i y) { + __m512i lomask = _mm512_set1_epi64(0xffffffff); + __m512i xh = _mm512_shuffle_epi32(x, (_MM_PERM_ENUM) 0xB1); + __m512i yh = _mm512_shuffle_epi32(y, (_MM_PERM_ENUM) 0xB1); + __m512i w0 = _mm512_mul_epu32(x, y); + __m512i w1 = _mm512_mul_epu32(x, yh); + __m512i w2 = _mm512_mul_epu32(xh, y); + __m512i w3 = _mm512_mul_epu32(xh, yh); + __m512i w0h = _mm512_srli_epi64(w0, 32); + __m512i s1 = _mm512_add_epi64(w1, w0h); + __m512i s1l = _mm512_and_si512(s1, lomask); + __m512i s1h = _mm512_srli_epi64(s1, 32); + __m512i s2 = _mm512_add_epi64(w2, s1l); + __m512i s2h = _mm512_srli_epi64(s2, 32); + __m512i hi = _mm512_add_epi64(w3, s1h); + hi = _mm512_add_epi64(hi, s2h); + + return hi; +} + +// y is one 64-bit value repeated. +static inline __m512i libdivide_mullhi_s64_vector(__m512i x, __m512i y) { + __m512i p = libdivide_mullhi_u64_vector(x, y); + __m512i t1 = _mm512_and_si512(libdivide_s64_signbits(x), y); + __m512i t2 = _mm512_and_si512(libdivide_s64_signbits(y), x); + p = _mm512_sub_epi64(p, t1); + p = _mm512_sub_epi64(p, t2); + return p; +} + +////////// UINT32 + +__m512i libdivide_u32_do_vector(__m512i numers, const struct libdivide_u32_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + return _mm512_srli_epi32(numers, more); + } + else { + __m512i q = libdivide_mullhi_u32_vector(numers, _mm512_set1_epi32(denom->magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // uint32_t t = ((numer - q) >> 1) + q; + // return t >> denom->shift; + uint32_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + __m512i t = _mm512_add_epi32(_mm512_srli_epi32(_mm512_sub_epi32(numers, q), 1), q); + return _mm512_srli_epi32(t, shift); + } + else { + return _mm512_srli_epi32(q, more); + } + } +} + +__m512i libdivide_u32_branchfree_do_vector(__m512i numers, const struct libdivide_u32_branchfree_t *denom) { + __m512i q = libdivide_mullhi_u32_vector(numers, _mm512_set1_epi32(denom->magic)); + __m512i t = _mm512_add_epi32(_mm512_srli_epi32(_mm512_sub_epi32(numers, q), 1), q); + return _mm512_srli_epi32(t, denom->more); +} + +////////// UINT64 + +__m512i libdivide_u64_do_vector(__m512i numers, const struct libdivide_u64_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + return _mm512_srli_epi64(numers, more); + } + else { + __m512i q = libdivide_mullhi_u64_vector(numers, _mm512_set1_epi64(denom->magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // uint32_t t = ((numer - q) >> 1) + q; + // return t >> denom->shift; + uint32_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + __m512i t = _mm512_add_epi64(_mm512_srli_epi64(_mm512_sub_epi64(numers, q), 1), q); + return _mm512_srli_epi64(t, shift); + } + else { + return _mm512_srli_epi64(q, more); + } + } +} + +__m512i libdivide_u64_branchfree_do_vector(__m512i numers, const struct libdivide_u64_branchfree_t *denom) { + __m512i q = libdivide_mullhi_u64_vector(numers, _mm512_set1_epi64(denom->magic)); + __m512i t = _mm512_add_epi64(_mm512_srli_epi64(_mm512_sub_epi64(numers, q), 1), q); + return _mm512_srli_epi64(t, denom->more); +} + +////////// SINT32 + +__m512i libdivide_s32_do_vector(__m512i numers, const struct libdivide_s32_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + uint32_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + uint32_t mask = (1U << shift) - 1; + __m512i roundToZeroTweak = _mm512_set1_epi32(mask); + // q = numer + ((numer >> 31) & roundToZeroTweak); + __m512i q = _mm512_add_epi32(numers, _mm512_and_si512(_mm512_srai_epi32(numers, 31), roundToZeroTweak)); + q = _mm512_srai_epi32(q, shift); + __m512i sign = _mm512_set1_epi32((int8_t)more >> 7); + // q = (q ^ sign) - sign; + q = _mm512_sub_epi32(_mm512_xor_si512(q, sign), sign); + return q; + } + else { + __m512i q = libdivide_mullhi_s32_vector(numers, _mm512_set1_epi32(denom->magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // must be arithmetic shift + __m512i sign = _mm512_set1_epi32((int8_t)more >> 7); + // q += ((numer ^ sign) - sign); + q = _mm512_add_epi32(q, _mm512_sub_epi32(_mm512_xor_si512(numers, sign), sign)); + } + // q >>= shift + q = _mm512_srai_epi32(q, more & LIBDIVIDE_32_SHIFT_MASK); + q = _mm512_add_epi32(q, _mm512_srli_epi32(q, 31)); // q += (q < 0) + return q; + } +} + +__m512i libdivide_s32_branchfree_do_vector(__m512i numers, const struct libdivide_s32_branchfree_t *denom) { + int32_t magic = denom->magic; + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + // must be arithmetic shift + __m512i sign = _mm512_set1_epi32((int8_t)more >> 7); + __m512i q = libdivide_mullhi_s32_vector(numers, _mm512_set1_epi32(magic)); + q = _mm512_add_epi32(q, numers); // q += numers + + // If q is non-negative, we have nothing to do + // If q is negative, we want to add either (2**shift)-1 if d is + // a power of 2, or (2**shift) if it is not a power of 2 + uint32_t is_power_of_2 = (magic == 0); + __m512i q_sign = _mm512_srai_epi32(q, 31); // q_sign = q >> 31 + __m512i mask = _mm512_set1_epi32((1U << shift) - is_power_of_2); + q = _mm512_add_epi32(q, _mm512_and_si512(q_sign, mask)); // q = q + (q_sign & mask) + q = _mm512_srai_epi32(q, shift); // q >>= shift + q = _mm512_sub_epi32(_mm512_xor_si512(q, sign), sign); // q = (q ^ sign) - sign + return q; +} + +////////// SINT64 + +__m512i libdivide_s64_do_vector(__m512i numers, const struct libdivide_s64_t *denom) { + uint8_t more = denom->more; + int64_t magic = denom->magic; + if (magic == 0) { // shift path + uint32_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + uint64_t mask = (1ULL << shift) - 1; + __m512i roundToZeroTweak = _mm512_set1_epi64(mask); + // q = numer + ((numer >> 63) & roundToZeroTweak); + __m512i q = _mm512_add_epi64(numers, _mm512_and_si512(libdivide_s64_signbits(numers), roundToZeroTweak)); + q = libdivide_s64_shift_right_vector(q, shift); + __m512i sign = _mm512_set1_epi32((int8_t)more >> 7); + // q = (q ^ sign) - sign; + q = _mm512_sub_epi64(_mm512_xor_si512(q, sign), sign); + return q; + } + else { + __m512i q = libdivide_mullhi_s64_vector(numers, _mm512_set1_epi64(magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // must be arithmetic shift + __m512i sign = _mm512_set1_epi32((int8_t)more >> 7); + // q += ((numer ^ sign) - sign); + q = _mm512_add_epi64(q, _mm512_sub_epi64(_mm512_xor_si512(numers, sign), sign)); + } + // q >>= denom->mult_path.shift + q = libdivide_s64_shift_right_vector(q, more & LIBDIVIDE_64_SHIFT_MASK); + q = _mm512_add_epi64(q, _mm512_srli_epi64(q, 63)); // q += (q < 0) + return q; + } +} + +__m512i libdivide_s64_branchfree_do_vector(__m512i numers, const struct libdivide_s64_branchfree_t *denom) { + int64_t magic = denom->magic; + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + // must be arithmetic shift + __m512i sign = _mm512_set1_epi32((int8_t)more >> 7); + + // libdivide_mullhi_s64(numers, magic); + __m512i q = libdivide_mullhi_s64_vector(numers, _mm512_set1_epi64(magic)); + q = _mm512_add_epi64(q, numers); // q += numers + + // If q is non-negative, we have nothing to do. + // If q is negative, we want to add either (2**shift)-1 if d is + // a power of 2, or (2**shift) if it is not a power of 2. + uint32_t is_power_of_2 = (magic == 0); + __m512i q_sign = libdivide_s64_signbits(q); // q_sign = q >> 63 + __m512i mask = _mm512_set1_epi64((1ULL << shift) - is_power_of_2); + q = _mm512_add_epi64(q, _mm512_and_si512(q_sign, mask)); // q = q + (q_sign & mask) + q = libdivide_s64_shift_right_vector(q, shift); // q >>= shift + q = _mm512_sub_epi64(_mm512_xor_si512(q, sign), sign); // q = (q ^ sign) - sign + return q; +} + +#elif defined(LIBDIVIDE_AVX2) + +static inline __m256i libdivide_u32_do_vector(__m256i numers, const struct libdivide_u32_t *denom); +static inline __m256i libdivide_s32_do_vector(__m256i numers, const struct libdivide_s32_t *denom); +static inline __m256i libdivide_u64_do_vector(__m256i numers, const struct libdivide_u64_t *denom); +static inline __m256i libdivide_s64_do_vector(__m256i numers, const struct libdivide_s64_t *denom); + +static inline __m256i libdivide_u32_branchfree_do_vector(__m256i numers, const struct libdivide_u32_branchfree_t *denom); +static inline __m256i libdivide_s32_branchfree_do_vector(__m256i numers, const struct libdivide_s32_branchfree_t *denom); +static inline __m256i libdivide_u64_branchfree_do_vector(__m256i numers, const struct libdivide_u64_branchfree_t *denom); +static inline __m256i libdivide_s64_branchfree_do_vector(__m256i numers, const struct libdivide_s64_branchfree_t *denom); + +//////// Internal Utility Functions + +// Implementation of _mm256_srai_epi64(v, 63) (from AVX512). +static inline __m256i libdivide_s64_signbits(__m256i v) { + __m256i hiBitsDuped = _mm256_shuffle_epi32(v, _MM_SHUFFLE(3, 3, 1, 1)); + __m256i signBits = _mm256_srai_epi32(hiBitsDuped, 31); + return signBits; +} + +// Implementation of _mm256_srai_epi64 (from AVX512). +static inline __m256i libdivide_s64_shift_right_vector(__m256i v, int amt) { + const int b = 64 - amt; + __m256i m = _mm256_set1_epi64x(1ULL << (b - 1)); + __m256i x = _mm256_srli_epi64(v, amt); + __m256i result = _mm256_sub_epi64(_mm256_xor_si256(x, m), m); + return result; +} + +// Here, b is assumed to contain one 32-bit value repeated. +static inline __m256i libdivide_mullhi_u32_vector(__m256i a, __m256i b) { + __m256i hi_product_0Z2Z = _mm256_srli_epi64(_mm256_mul_epu32(a, b), 32); + __m256i a1X3X = _mm256_srli_epi64(a, 32); + __m256i mask = _mm256_set_epi32(-1, 0, -1, 0, -1, 0, -1, 0); + __m256i hi_product_Z1Z3 = _mm256_and_si256(_mm256_mul_epu32(a1X3X, b), mask); + return _mm256_or_si256(hi_product_0Z2Z, hi_product_Z1Z3); +} + +// b is one 32-bit value repeated. +static inline __m256i libdivide_mullhi_s32_vector(__m256i a, __m256i b) { + __m256i hi_product_0Z2Z = _mm256_srli_epi64(_mm256_mul_epi32(a, b), 32); + __m256i a1X3X = _mm256_srli_epi64(a, 32); + __m256i mask = _mm256_set_epi32(-1, 0, -1, 0, -1, 0, -1, 0); + __m256i hi_product_Z1Z3 = _mm256_and_si256(_mm256_mul_epi32(a1X3X, b), mask); + return _mm256_or_si256(hi_product_0Z2Z, hi_product_Z1Z3); +} + +// Here, y is assumed to contain one 64-bit value repeated. +// https://stackoverflow.com/a/28827013 +static inline __m256i libdivide_mullhi_u64_vector(__m256i x, __m256i y) { + __m256i lomask = _mm256_set1_epi64x(0xffffffff); + __m256i xh = _mm256_shuffle_epi32(x, 0xB1); // x0l, x0h, x1l, x1h + __m256i yh = _mm256_shuffle_epi32(y, 0xB1); // y0l, y0h, y1l, y1h + __m256i w0 = _mm256_mul_epu32(x, y); // x0l*y0l, x1l*y1l + __m256i w1 = _mm256_mul_epu32(x, yh); // x0l*y0h, x1l*y1h + __m256i w2 = _mm256_mul_epu32(xh, y); // x0h*y0l, x1h*y0l + __m256i w3 = _mm256_mul_epu32(xh, yh); // x0h*y0h, x1h*y1h + __m256i w0h = _mm256_srli_epi64(w0, 32); + __m256i s1 = _mm256_add_epi64(w1, w0h); + __m256i s1l = _mm256_and_si256(s1, lomask); + __m256i s1h = _mm256_srli_epi64(s1, 32); + __m256i s2 = _mm256_add_epi64(w2, s1l); + __m256i s2h = _mm256_srli_epi64(s2, 32); + __m256i hi = _mm256_add_epi64(w3, s1h); + hi = _mm256_add_epi64(hi, s2h); + + return hi; +} + +// y is one 64-bit value repeated. +static inline __m256i libdivide_mullhi_s64_vector(__m256i x, __m256i y) { + __m256i p = libdivide_mullhi_u64_vector(x, y); + __m256i t1 = _mm256_and_si256(libdivide_s64_signbits(x), y); + __m256i t2 = _mm256_and_si256(libdivide_s64_signbits(y), x); + p = _mm256_sub_epi64(p, t1); + p = _mm256_sub_epi64(p, t2); + return p; +} + +////////// UINT32 + +__m256i libdivide_u32_do_vector(__m256i numers, const struct libdivide_u32_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + return _mm256_srli_epi32(numers, more); + } + else { + __m256i q = libdivide_mullhi_u32_vector(numers, _mm256_set1_epi32(denom->magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // uint32_t t = ((numer - q) >> 1) + q; + // return t >> denom->shift; + uint32_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + __m256i t = _mm256_add_epi32(_mm256_srli_epi32(_mm256_sub_epi32(numers, q), 1), q); + return _mm256_srli_epi32(t, shift); + } + else { + return _mm256_srli_epi32(q, more); + } + } +} + +__m256i libdivide_u32_branchfree_do_vector(__m256i numers, const struct libdivide_u32_branchfree_t *denom) { + __m256i q = libdivide_mullhi_u32_vector(numers, _mm256_set1_epi32(denom->magic)); + __m256i t = _mm256_add_epi32(_mm256_srli_epi32(_mm256_sub_epi32(numers, q), 1), q); + return _mm256_srli_epi32(t, denom->more); +} + +////////// UINT64 + +__m256i libdivide_u64_do_vector(__m256i numers, const struct libdivide_u64_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + return _mm256_srli_epi64(numers, more); + } + else { + __m256i q = libdivide_mullhi_u64_vector(numers, _mm256_set1_epi64x(denom->magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // uint32_t t = ((numer - q) >> 1) + q; + // return t >> denom->shift; + uint32_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + __m256i t = _mm256_add_epi64(_mm256_srli_epi64(_mm256_sub_epi64(numers, q), 1), q); + return _mm256_srli_epi64(t, shift); + } + else { + return _mm256_srli_epi64(q, more); + } + } +} + +__m256i libdivide_u64_branchfree_do_vector(__m256i numers, const struct libdivide_u64_branchfree_t *denom) { + __m256i q = libdivide_mullhi_u64_vector(numers, _mm256_set1_epi64x(denom->magic)); + __m256i t = _mm256_add_epi64(_mm256_srli_epi64(_mm256_sub_epi64(numers, q), 1), q); + return _mm256_srli_epi64(t, denom->more); +} + +////////// SINT32 + +__m256i libdivide_s32_do_vector(__m256i numers, const struct libdivide_s32_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + uint32_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + uint32_t mask = (1U << shift) - 1; + __m256i roundToZeroTweak = _mm256_set1_epi32(mask); + // q = numer + ((numer >> 31) & roundToZeroTweak); + __m256i q = _mm256_add_epi32(numers, _mm256_and_si256(_mm256_srai_epi32(numers, 31), roundToZeroTweak)); + q = _mm256_srai_epi32(q, shift); + __m256i sign = _mm256_set1_epi32((int8_t)more >> 7); + // q = (q ^ sign) - sign; + q = _mm256_sub_epi32(_mm256_xor_si256(q, sign), sign); + return q; + } + else { + __m256i q = libdivide_mullhi_s32_vector(numers, _mm256_set1_epi32(denom->magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // must be arithmetic shift + __m256i sign = _mm256_set1_epi32((int8_t)more >> 7); + // q += ((numer ^ sign) - sign); + q = _mm256_add_epi32(q, _mm256_sub_epi32(_mm256_xor_si256(numers, sign), sign)); + } + // q >>= shift + q = _mm256_srai_epi32(q, more & LIBDIVIDE_32_SHIFT_MASK); + q = _mm256_add_epi32(q, _mm256_srli_epi32(q, 31)); // q += (q < 0) + return q; + } +} + +__m256i libdivide_s32_branchfree_do_vector(__m256i numers, const struct libdivide_s32_branchfree_t *denom) { + int32_t magic = denom->magic; + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + // must be arithmetic shift + __m256i sign = _mm256_set1_epi32((int8_t)more >> 7); + __m256i q = libdivide_mullhi_s32_vector(numers, _mm256_set1_epi32(magic)); + q = _mm256_add_epi32(q, numers); // q += numers + + // If q is non-negative, we have nothing to do + // If q is negative, we want to add either (2**shift)-1 if d is + // a power of 2, or (2**shift) if it is not a power of 2 + uint32_t is_power_of_2 = (magic == 0); + __m256i q_sign = _mm256_srai_epi32(q, 31); // q_sign = q >> 31 + __m256i mask = _mm256_set1_epi32((1U << shift) - is_power_of_2); + q = _mm256_add_epi32(q, _mm256_and_si256(q_sign, mask)); // q = q + (q_sign & mask) + q = _mm256_srai_epi32(q, shift); // q >>= shift + q = _mm256_sub_epi32(_mm256_xor_si256(q, sign), sign); // q = (q ^ sign) - sign + return q; +} + +////////// SINT64 + +__m256i libdivide_s64_do_vector(__m256i numers, const struct libdivide_s64_t *denom) { + uint8_t more = denom->more; + int64_t magic = denom->magic; + if (magic == 0) { // shift path + uint32_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + uint64_t mask = (1ULL << shift) - 1; + __m256i roundToZeroTweak = _mm256_set1_epi64x(mask); + // q = numer + ((numer >> 63) & roundToZeroTweak); + __m256i q = _mm256_add_epi64(numers, _mm256_and_si256(libdivide_s64_signbits(numers), roundToZeroTweak)); + q = libdivide_s64_shift_right_vector(q, shift); + __m256i sign = _mm256_set1_epi32((int8_t)more >> 7); + // q = (q ^ sign) - sign; + q = _mm256_sub_epi64(_mm256_xor_si256(q, sign), sign); + return q; + } + else { + __m256i q = libdivide_mullhi_s64_vector(numers, _mm256_set1_epi64x(magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // must be arithmetic shift + __m256i sign = _mm256_set1_epi32((int8_t)more >> 7); + // q += ((numer ^ sign) - sign); + q = _mm256_add_epi64(q, _mm256_sub_epi64(_mm256_xor_si256(numers, sign), sign)); + } + // q >>= denom->mult_path.shift + q = libdivide_s64_shift_right_vector(q, more & LIBDIVIDE_64_SHIFT_MASK); + q = _mm256_add_epi64(q, _mm256_srli_epi64(q, 63)); // q += (q < 0) + return q; + } +} + +__m256i libdivide_s64_branchfree_do_vector(__m256i numers, const struct libdivide_s64_branchfree_t *denom) { + int64_t magic = denom->magic; + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + // must be arithmetic shift + __m256i sign = _mm256_set1_epi32((int8_t)more >> 7); + + // libdivide_mullhi_s64(numers, magic); + __m256i q = libdivide_mullhi_s64_vector(numers, _mm256_set1_epi64x(magic)); + q = _mm256_add_epi64(q, numers); // q += numers + + // If q is non-negative, we have nothing to do. + // If q is negative, we want to add either (2**shift)-1 if d is + // a power of 2, or (2**shift) if it is not a power of 2. + uint32_t is_power_of_2 = (magic == 0); + __m256i q_sign = libdivide_s64_signbits(q); // q_sign = q >> 63 + __m256i mask = _mm256_set1_epi64x((1ULL << shift) - is_power_of_2); + q = _mm256_add_epi64(q, _mm256_and_si256(q_sign, mask)); // q = q + (q_sign & mask) + q = libdivide_s64_shift_right_vector(q, shift); // q >>= shift + q = _mm256_sub_epi64(_mm256_xor_si256(q, sign), sign); // q = (q ^ sign) - sign + return q; +} + +#elif defined(LIBDIVIDE_SSE2) + +static inline __m128i libdivide_u32_do_vector(__m128i numers, const struct libdivide_u32_t *denom); +static inline __m128i libdivide_s32_do_vector(__m128i numers, const struct libdivide_s32_t *denom); +static inline __m128i libdivide_u64_do_vector(__m128i numers, const struct libdivide_u64_t *denom); +static inline __m128i libdivide_s64_do_vector(__m128i numers, const struct libdivide_s64_t *denom); + +static inline __m128i libdivide_u32_branchfree_do_vector(__m128i numers, const struct libdivide_u32_branchfree_t *denom); +static inline __m128i libdivide_s32_branchfree_do_vector(__m128i numers, const struct libdivide_s32_branchfree_t *denom); +static inline __m128i libdivide_u64_branchfree_do_vector(__m128i numers, const struct libdivide_u64_branchfree_t *denom); +static inline __m128i libdivide_s64_branchfree_do_vector(__m128i numers, const struct libdivide_s64_branchfree_t *denom); + +//////// Internal Utility Functions + +// Implementation of _mm_srai_epi64(v, 63) (from AVX512). +static inline __m128i libdivide_s64_signbits(__m128i v) { + __m128i hiBitsDuped = _mm_shuffle_epi32(v, _MM_SHUFFLE(3, 3, 1, 1)); + __m128i signBits = _mm_srai_epi32(hiBitsDuped, 31); + return signBits; +} + +// Implementation of _mm_srai_epi64 (from AVX512). +static inline __m128i libdivide_s64_shift_right_vector(__m128i v, int amt) { + const int b = 64 - amt; + __m128i m = _mm_set1_epi64x(1ULL << (b - 1)); + __m128i x = _mm_srli_epi64(v, amt); + __m128i result = _mm_sub_epi64(_mm_xor_si128(x, m), m); + return result; +} + +// Here, b is assumed to contain one 32-bit value repeated. +static inline __m128i libdivide_mullhi_u32_vector(__m128i a, __m128i b) { + __m128i hi_product_0Z2Z = _mm_srli_epi64(_mm_mul_epu32(a, b), 32); + __m128i a1X3X = _mm_srli_epi64(a, 32); + __m128i mask = _mm_set_epi32(-1, 0, -1, 0); + __m128i hi_product_Z1Z3 = _mm_and_si128(_mm_mul_epu32(a1X3X, b), mask); + return _mm_or_si128(hi_product_0Z2Z, hi_product_Z1Z3); +} + +// SSE2 does not have a signed multiplication instruction, but we can convert +// unsigned to signed pretty efficiently. Again, b is just a 32 bit value +// repeated four times. +static inline __m128i libdivide_mullhi_s32_vector(__m128i a, __m128i b) { + __m128i p = libdivide_mullhi_u32_vector(a, b); + // t1 = (a >> 31) & y, arithmetic shift + __m128i t1 = _mm_and_si128(_mm_srai_epi32(a, 31), b); + __m128i t2 = _mm_and_si128(_mm_srai_epi32(b, 31), a); + p = _mm_sub_epi32(p, t1); + p = _mm_sub_epi32(p, t2); + return p; +} + +// Here, y is assumed to contain one 64-bit value repeated. +// https://stackoverflow.com/a/28827013 +static inline __m128i libdivide_mullhi_u64_vector(__m128i x, __m128i y) { + __m128i lomask = _mm_set1_epi64x(0xffffffff); + __m128i xh = _mm_shuffle_epi32(x, 0xB1); // x0l, x0h, x1l, x1h + __m128i yh = _mm_shuffle_epi32(y, 0xB1); // y0l, y0h, y1l, y1h + __m128i w0 = _mm_mul_epu32(x, y); // x0l*y0l, x1l*y1l + __m128i w1 = _mm_mul_epu32(x, yh); // x0l*y0h, x1l*y1h + __m128i w2 = _mm_mul_epu32(xh, y); // x0h*y0l, x1h*y0l + __m128i w3 = _mm_mul_epu32(xh, yh); // x0h*y0h, x1h*y1h + __m128i w0h = _mm_srli_epi64(w0, 32); + __m128i s1 = _mm_add_epi64(w1, w0h); + __m128i s1l = _mm_and_si128(s1, lomask); + __m128i s1h = _mm_srli_epi64(s1, 32); + __m128i s2 = _mm_add_epi64(w2, s1l); + __m128i s2h = _mm_srli_epi64(s2, 32); + __m128i hi = _mm_add_epi64(w3, s1h); + hi = _mm_add_epi64(hi, s2h); + + return hi; +} + +// y is one 64-bit value repeated. +static inline __m128i libdivide_mullhi_s64_vector(__m128i x, __m128i y) { + __m128i p = libdivide_mullhi_u64_vector(x, y); + __m128i t1 = _mm_and_si128(libdivide_s64_signbits(x), y); + __m128i t2 = _mm_and_si128(libdivide_s64_signbits(y), x); + p = _mm_sub_epi64(p, t1); + p = _mm_sub_epi64(p, t2); + return p; +} + +////////// UINT32 + +__m128i libdivide_u32_do_vector(__m128i numers, const struct libdivide_u32_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + return _mm_srli_epi32(numers, more); + } + else { + __m128i q = libdivide_mullhi_u32_vector(numers, _mm_set1_epi32(denom->magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // uint32_t t = ((numer - q) >> 1) + q; + // return t >> denom->shift; + uint32_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + __m128i t = _mm_add_epi32(_mm_srli_epi32(_mm_sub_epi32(numers, q), 1), q); + return _mm_srli_epi32(t, shift); + } + else { + return _mm_srli_epi32(q, more); + } + } +} + +__m128i libdivide_u32_branchfree_do_vector(__m128i numers, const struct libdivide_u32_branchfree_t *denom) { + __m128i q = libdivide_mullhi_u32_vector(numers, _mm_set1_epi32(denom->magic)); + __m128i t = _mm_add_epi32(_mm_srli_epi32(_mm_sub_epi32(numers, q), 1), q); + return _mm_srli_epi32(t, denom->more); +} + +////////// UINT64 + +__m128i libdivide_u64_do_vector(__m128i numers, const struct libdivide_u64_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + return _mm_srli_epi64(numers, more); + } + else { + __m128i q = libdivide_mullhi_u64_vector(numers, _mm_set1_epi64x(denom->magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // uint32_t t = ((numer - q) >> 1) + q; + // return t >> denom->shift; + uint32_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + __m128i t = _mm_add_epi64(_mm_srli_epi64(_mm_sub_epi64(numers, q), 1), q); + return _mm_srli_epi64(t, shift); + } + else { + return _mm_srli_epi64(q, more); + } + } +} + +__m128i libdivide_u64_branchfree_do_vector(__m128i numers, const struct libdivide_u64_branchfree_t *denom) { + __m128i q = libdivide_mullhi_u64_vector(numers, _mm_set1_epi64x(denom->magic)); + __m128i t = _mm_add_epi64(_mm_srli_epi64(_mm_sub_epi64(numers, q), 1), q); + return _mm_srli_epi64(t, denom->more); +} + +////////// SINT32 + +__m128i libdivide_s32_do_vector(__m128i numers, const struct libdivide_s32_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + uint32_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + uint32_t mask = (1U << shift) - 1; + __m128i roundToZeroTweak = _mm_set1_epi32(mask); + // q = numer + ((numer >> 31) & roundToZeroTweak); + __m128i q = _mm_add_epi32(numers, _mm_and_si128(_mm_srai_epi32(numers, 31), roundToZeroTweak)); + q = _mm_srai_epi32(q, shift); + __m128i sign = _mm_set1_epi32((int8_t)more >> 7); + // q = (q ^ sign) - sign; + q = _mm_sub_epi32(_mm_xor_si128(q, sign), sign); + return q; + } + else { + __m128i q = libdivide_mullhi_s32_vector(numers, _mm_set1_epi32(denom->magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // must be arithmetic shift + __m128i sign = _mm_set1_epi32((int8_t)more >> 7); + // q += ((numer ^ sign) - sign); + q = _mm_add_epi32(q, _mm_sub_epi32(_mm_xor_si128(numers, sign), sign)); + } + // q >>= shift + q = _mm_srai_epi32(q, more & LIBDIVIDE_32_SHIFT_MASK); + q = _mm_add_epi32(q, _mm_srli_epi32(q, 31)); // q += (q < 0) + return q; + } +} + +__m128i libdivide_s32_branchfree_do_vector(__m128i numers, const struct libdivide_s32_branchfree_t *denom) { + int32_t magic = denom->magic; + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + // must be arithmetic shift + __m128i sign = _mm_set1_epi32((int8_t)more >> 7); + __m128i q = libdivide_mullhi_s32_vector(numers, _mm_set1_epi32(magic)); + q = _mm_add_epi32(q, numers); // q += numers + + // If q is non-negative, we have nothing to do + // If q is negative, we want to add either (2**shift)-1 if d is + // a power of 2, or (2**shift) if it is not a power of 2 + uint32_t is_power_of_2 = (magic == 0); + __m128i q_sign = _mm_srai_epi32(q, 31); // q_sign = q >> 31 + __m128i mask = _mm_set1_epi32((1U << shift) - is_power_of_2); + q = _mm_add_epi32(q, _mm_and_si128(q_sign, mask)); // q = q + (q_sign & mask) + q = _mm_srai_epi32(q, shift); // q >>= shift + q = _mm_sub_epi32(_mm_xor_si128(q, sign), sign); // q = (q ^ sign) - sign + return q; +} + +////////// SINT64 + +__m128i libdivide_s64_do_vector(__m128i numers, const struct libdivide_s64_t *denom) { + uint8_t more = denom->more; + int64_t magic = denom->magic; + if (magic == 0) { // shift path + uint32_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + uint64_t mask = (1ULL << shift) - 1; + __m128i roundToZeroTweak = _mm_set1_epi64x(mask); + // q = numer + ((numer >> 63) & roundToZeroTweak); + __m128i q = _mm_add_epi64(numers, _mm_and_si128(libdivide_s64_signbits(numers), roundToZeroTweak)); + q = libdivide_s64_shift_right_vector(q, shift); + __m128i sign = _mm_set1_epi32((int8_t)more >> 7); + // q = (q ^ sign) - sign; + q = _mm_sub_epi64(_mm_xor_si128(q, sign), sign); + return q; + } + else { + __m128i q = libdivide_mullhi_s64_vector(numers, _mm_set1_epi64x(magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // must be arithmetic shift + __m128i sign = _mm_set1_epi32((int8_t)more >> 7); + // q += ((numer ^ sign) - sign); + q = _mm_add_epi64(q, _mm_sub_epi64(_mm_xor_si128(numers, sign), sign)); + } + // q >>= denom->mult_path.shift + q = libdivide_s64_shift_right_vector(q, more & LIBDIVIDE_64_SHIFT_MASK); + q = _mm_add_epi64(q, _mm_srli_epi64(q, 63)); // q += (q < 0) + return q; + } +} + +__m128i libdivide_s64_branchfree_do_vector(__m128i numers, const struct libdivide_s64_branchfree_t *denom) { + int64_t magic = denom->magic; + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + // must be arithmetic shift + __m128i sign = _mm_set1_epi32((int8_t)more >> 7); + + // libdivide_mullhi_s64(numers, magic); + __m128i q = libdivide_mullhi_s64_vector(numers, _mm_set1_epi64x(magic)); + q = _mm_add_epi64(q, numers); // q += numers + + // If q is non-negative, we have nothing to do. + // If q is negative, we want to add either (2**shift)-1 if d is + // a power of 2, or (2**shift) if it is not a power of 2. + uint32_t is_power_of_2 = (magic == 0); + __m128i q_sign = libdivide_s64_signbits(q); // q_sign = q >> 63 + __m128i mask = _mm_set1_epi64x((1ULL << shift) - is_power_of_2); + q = _mm_add_epi64(q, _mm_and_si128(q_sign, mask)); // q = q + (q_sign & mask) + q = libdivide_s64_shift_right_vector(q, shift); // q >>= shift + q = _mm_sub_epi64(_mm_xor_si128(q, sign), sign); // q = (q ^ sign) - sign + return q; +} + +#endif + +/////////// C++ stuff + +#ifdef __cplusplus + +// The C++ divider class is templated on both an integer type +// (like uint64_t) and an algorithm type. +// * BRANCHFULL is the default algorithm type. +// * BRANCHFREE is the branchfree algorithm type. +enum { + BRANCHFULL, + BRANCHFREE +}; + +#if defined(LIBDIVIDE_AVX512) + #define LIBDIVIDE_VECTOR_TYPE __m512i +#elif defined(LIBDIVIDE_AVX2) + #define LIBDIVIDE_VECTOR_TYPE __m256i +#elif defined(LIBDIVIDE_SSE2) + #define LIBDIVIDE_VECTOR_TYPE __m128i +#endif + +#if !defined(LIBDIVIDE_VECTOR_TYPE) + #define LIBDIVIDE_DIVIDE_VECTOR(ALGO) +#else + #define LIBDIVIDE_DIVIDE_VECTOR(ALGO) \ + LIBDIVIDE_VECTOR_TYPE divide(LIBDIVIDE_VECTOR_TYPE n) const { \ + return libdivide_##ALGO##_do_vector(n, &denom); \ + } +#endif + +// The DISPATCHER_GEN() macro generates C++ methods (for the given integer +// and algorithm types) that redirect to libdivide's C API. +#define DISPATCHER_GEN(T, ALGO) \ + libdivide_##ALGO##_t denom; \ + dispatcher() { } \ + dispatcher(T d) \ + : denom(libdivide_##ALGO##_gen(d)) \ + { } \ + T divide(T n) const { \ + return libdivide_##ALGO##_do(n, &denom); \ + } \ + LIBDIVIDE_DIVIDE_VECTOR(ALGO) \ + T recover() const { \ + return libdivide_##ALGO##_recover(&denom); \ + } + +// The dispatcher selects a specific division algorithm for a given +// type and ALGO using partial template specialization. +template struct dispatcher { }; + +template<> struct dispatcher { DISPATCHER_GEN(int32_t, s32) }; +template<> struct dispatcher { DISPATCHER_GEN(int32_t, s32_branchfree) }; +template<> struct dispatcher { DISPATCHER_GEN(uint32_t, u32) }; +template<> struct dispatcher { DISPATCHER_GEN(uint32_t, u32_branchfree) }; +template<> struct dispatcher { DISPATCHER_GEN(int64_t, s64) }; +template<> struct dispatcher { DISPATCHER_GEN(int64_t, s64_branchfree) }; +template<> struct dispatcher { DISPATCHER_GEN(uint64_t, u64) }; +template<> struct dispatcher { DISPATCHER_GEN(uint64_t, u64_branchfree) }; + +// This is the main divider class for use by the user (C++ API). +// The actual division algorithm is selected using the dispatcher struct +// based on the integer and algorithm template parameters. +template +class divider { +public: + // We leave the default constructor empty so that creating + // an array of dividers and then initializing them + // later doesn't slow us down. + divider() { } + + // Constructor that takes the divisor as a parameter + divider(T d) : div(d) { } + + // Divides n by the divisor + T divide(T n) const { + return div.divide(n); + } + + // Recovers the divisor, returns the value that was + // used to initialize this divider object. + T recover() const { + return div.recover(); + } + + bool operator==(const divider& other) const { + return div.denom.magic == other.denom.magic && + div.denom.more == other.denom.more; + } + + bool operator!=(const divider& other) const { + return !(*this == other); + } + +#if defined(LIBDIVIDE_VECTOR_TYPE) + // Treats the vector as packed integer values with the same type as + // the divider (e.g. s32, u32, s64, u64) and divides each of + // them by the divider, returning the packed quotients. + LIBDIVIDE_VECTOR_TYPE divide(LIBDIVIDE_VECTOR_TYPE n) const { + return div.divide(n); + } +#endif + +private: + // Storage for the actual divisor + dispatcher::value, + std::is_signed::value, sizeof(T), ALGO> div; +}; + +// Overload of operator / for scalar division +template +T operator/(T n, const divider& div) { + return div.divide(n); +} + +// Overload of operator /= for scalar division +template +T& operator/=(T& n, const divider& div) { + n = div.divide(n); + return n; +} + +#if defined(LIBDIVIDE_VECTOR_TYPE) + // Overload of operator / for vector division + template + LIBDIVIDE_VECTOR_TYPE operator/(LIBDIVIDE_VECTOR_TYPE n, const divider& div) { + return div.divide(n); + } + // Overload of operator /= for vector division + template + LIBDIVIDE_VECTOR_TYPE& operator/=(LIBDIVIDE_VECTOR_TYPE& n, const divider& div) { + n = div.divide(n); + return n; + } +#endif + +// libdivdie::branchfree_divider +template +using branchfree_divider = divider; + +} // namespace libdivide + +#endif // __cplusplus + +#endif // NUMPY_CORE_INCLUDE_NUMPY_LIBDIVIDE_LIBDIVIDE_H_ diff --git a/local_packages/numpy/_core/include/numpy/ufuncobject.h b/local_packages/numpy/_core/include/numpy/ufuncobject.h new file mode 100644 index 0000000..f5f82b5 --- /dev/null +++ b/local_packages/numpy/_core/include/numpy/ufuncobject.h @@ -0,0 +1,343 @@ +#ifndef NUMPY_CORE_INCLUDE_NUMPY_UFUNCOBJECT_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_UFUNCOBJECT_H_ + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * The legacy generic inner loop for a standard element-wise or + * generalized ufunc. + */ +typedef void (*PyUFuncGenericFunction) + (char **args, + npy_intp const *dimensions, + npy_intp const *strides, + void *innerloopdata); + +/* + * The most generic one-dimensional inner loop for + * a masked standard element-wise ufunc. "Masked" here means that it skips + * doing calculations on any items for which the maskptr array has a true + * value. + */ +typedef void (PyUFunc_MaskedStridedInnerLoopFunc)( + char **dataptrs, npy_intp *strides, + char *maskptr, npy_intp mask_stride, + npy_intp count, + NpyAuxData *innerloopdata); + +/* Forward declaration for the type resolver and loop selector typedefs */ +struct _tagPyUFuncObject; + +/* + * Given the operands for calling a ufunc, should determine the + * calculation input and output data types and return an inner loop function. + * This function should validate that the casting rule is being followed, + * and fail if it is not. + * + * For backwards compatibility, the regular type resolution function does not + * support auxiliary data with object semantics. The type resolution call + * which returns a masked generic function returns a standard NpyAuxData + * object, for which the NPY_AUXDATA_FREE and NPY_AUXDATA_CLONE macros + * work. + * + * ufunc: The ufunc object. + * casting: The 'casting' parameter provided to the ufunc. + * operands: An array of length (ufunc->nin + ufunc->nout), + * with the output parameters possibly NULL. + * type_tup: Either NULL, or the type_tup passed to the ufunc. + * out_dtypes: An array which should be populated with new + * references to (ufunc->nin + ufunc->nout) new + * dtypes, one for each input and output. These + * dtypes should all be in native-endian format. + * + * Should return 0 on success, -1 on failure (with exception set), + * or -2 if Py_NotImplemented should be returned. + */ +typedef int (PyUFunc_TypeResolutionFunc)( + struct _tagPyUFuncObject *ufunc, + NPY_CASTING casting, + PyArrayObject **operands, + PyObject *type_tup, + PyArray_Descr **out_dtypes); + +/* + * This is the signature for the functions that may be assigned to the + * `process_core_dims_func` field of the PyUFuncObject structure. + * Implementation of this function is optional. This function is only used + * by generalized ufuncs (i.e. those with the field `core_enabled` set to 1). + * The function is called by the ufunc during the processing of the arguments + * of a call of the ufunc. The function can check the core dimensions of the + * input and output arrays and return -1 with an exception set if any + * requirements are not satisfied. If the caller of the ufunc didn't provide + * output arrays, the core dimensions associated with the output arrays (i.e. + * those that are not also used in input arrays) will have the value -1 in + * `core_dim_sizes`. This function can replace any output core dimensions + * that are -1 with a value that is appropriate for the ufunc. + * + * Parameter Description + * --------------- ------------------------------------------------------ + * ufunc The ufunc object + * core_dim_sizes An array with length `ufunc->core_num_dim_ix`. + * The core dimensions of the arrays passed to the ufunc + * will have been set. If the caller of the ufunc didn't + * provide the output array(s), the output-only core + * dimensions will have the value -1. + * + * The function must not change any element in `core_dim_sizes` that is + * not -1 on input. Doing so will result in incorrect output from the + * ufunc, and could result in a crash of the Python interpreter. + * + * The function must return 0 on success, -1 on failure (with an exception + * set). + */ +typedef int (PyUFunc_ProcessCoreDimsFunc)( + struct _tagPyUFuncObject *ufunc, + npy_intp *core_dim_sizes); + +typedef struct _tagPyUFuncObject { + PyObject_HEAD + /* + * nin: Number of inputs + * nout: Number of outputs + * nargs: Always nin + nout (Why is it stored?) + */ + int nin, nout, nargs; + + /* + * Identity for reduction, any of PyUFunc_One, PyUFunc_Zero + * PyUFunc_MinusOne, PyUFunc_None, PyUFunc_ReorderableNone, + * PyUFunc_IdentityValue. + */ + int identity; + + /* Array of one-dimensional core loops */ + PyUFuncGenericFunction *functions; + /* Array of funcdata that gets passed into the functions */ + void *const *data; + /* The number of elements in 'functions' and 'data' */ + int ntypes; + + /* Used to be unused field 'check_return' */ + int reserved1; + + /* The name of the ufunc */ + const char *name; + + /* Array of type numbers, of size ('nargs' * 'ntypes') */ + const char *types; + + /* Documentation string */ + const char *doc; + + void *ptr; + PyObject *obj; + PyObject *userloops; + + /* generalized ufunc parameters */ + + /* 0 for scalar ufunc; 1 for generalized ufunc */ + int core_enabled; + /* number of distinct dimension names in signature */ + int core_num_dim_ix; + + /* + * dimension indices of input/output argument k are stored in + * core_dim_ixs[core_offsets[k]..core_offsets[k]+core_num_dims[k]-1] + */ + + /* numbers of core dimensions of each argument */ + int *core_num_dims; + /* + * dimension indices in a flatted form; indices + * are in the range of [0,core_num_dim_ix) + */ + int *core_dim_ixs; + /* + * positions of 1st core dimensions of each + * argument in core_dim_ixs, equivalent to cumsum(core_num_dims) + */ + int *core_offsets; + /* signature string for printing purpose */ + char *core_signature; + + /* + * A function which resolves the types and fills an array + * with the dtypes for the inputs and outputs. + */ + PyUFunc_TypeResolutionFunc *type_resolver; + + /* A dictionary to monkeypatch ufuncs */ + PyObject *dict; + + /* + * This was blocked off to be the "new" inner loop selector in 1.7, + * but this was never implemented. (This is also why the above + * selector is called the "legacy" selector.) + */ + #ifndef Py_LIMITED_API + vectorcallfunc vectorcall; + #else + void *vectorcall; + #endif + + /* Was previously the `PyUFunc_MaskedInnerLoopSelectionFunc` */ + void *reserved3; + + /* + * List of flags for each operand when ufunc is called by nditer object. + * These flags will be used in addition to the default flags for each + * operand set by nditer object. + */ + npy_uint32 *op_flags; + + /* + * List of global flags used when ufunc is called by nditer object. + * These flags will be used in addition to the default global flags + * set by nditer object. + */ + npy_uint32 iter_flags; + + /* New in NPY_API_VERSION 0x0000000D and above */ + #if NPY_FEATURE_VERSION >= NPY_1_16_API_VERSION + /* + * for each core_num_dim_ix distinct dimension names, + * the possible "frozen" size (-1 if not frozen). + */ + npy_intp *core_dim_sizes; + + /* + * for each distinct core dimension, a set of UFUNC_CORE_DIM* flags + */ + npy_uint32 *core_dim_flags; + + /* Identity for reduction, when identity == PyUFunc_IdentityValue */ + PyObject *identity_value; + #endif /* NPY_FEATURE_VERSION >= NPY_1_16_API_VERSION */ + + /* New in NPY_API_VERSION 0x0000000F and above */ + #if NPY_FEATURE_VERSION >= NPY_1_22_API_VERSION + /* New private fields related to dispatching */ + void *_dispatch_cache; + /* A PyListObject of `(tuple of DTypes, ArrayMethod/Promoter)` */ + PyObject *_loops; + #endif + #if NPY_FEATURE_VERSION >= NPY_2_1_API_VERSION + /* + * Optional function to process core dimensions of a gufunc. + */ + PyUFunc_ProcessCoreDimsFunc *process_core_dims_func; + #endif +} PyUFuncObject; + +#include "arrayobject.h" +/* Generalized ufunc; 0x0001 reserved for possible use as CORE_ENABLED */ +/* the core dimension's size will be determined by the operands. */ +#define UFUNC_CORE_DIM_SIZE_INFERRED 0x0002 +/* the core dimension may be absent */ +#define UFUNC_CORE_DIM_CAN_IGNORE 0x0004 +/* flags inferred during execution */ +#define UFUNC_CORE_DIM_MISSING 0x00040000 + + +#define UFUNC_OBJ_ISOBJECT 1 +#define UFUNC_OBJ_NEEDS_API 2 + + +#if NPY_ALLOW_THREADS +#define NPY_LOOP_BEGIN_THREADS do {if (!(loop->obj & UFUNC_OBJ_NEEDS_API)) _save = PyEval_SaveThread();} while (0); +#define NPY_LOOP_END_THREADS do {if (!(loop->obj & UFUNC_OBJ_NEEDS_API)) PyEval_RestoreThread(_save);} while (0); +#else +#define NPY_LOOP_BEGIN_THREADS +#define NPY_LOOP_END_THREADS +#endif + +/* + * UFunc has unit of 0, and the order of operations can be reordered + * This case allows reduction with multiple axes at once. + */ +#define PyUFunc_Zero 0 +/* + * UFunc has unit of 1, and the order of operations can be reordered + * This case allows reduction with multiple axes at once. + */ +#define PyUFunc_One 1 +/* + * UFunc has unit of -1, and the order of operations can be reordered + * This case allows reduction with multiple axes at once. Intended for + * bitwise_and reduction. + */ +#define PyUFunc_MinusOne 2 +/* + * UFunc has no unit, and the order of operations cannot be reordered. + * This case does not allow reduction with multiple axes at once. + */ +#define PyUFunc_None -1 +/* + * UFunc has no unit, and the order of operations can be reordered + * This case allows reduction with multiple axes at once. + */ +#define PyUFunc_ReorderableNone -2 +/* + * UFunc unit is an identity_value, and the order of operations can be reordered + * This case allows reduction with multiple axes at once. + */ +#define PyUFunc_IdentityValue -3 + + +#define UFUNC_REDUCE 0 +#define UFUNC_ACCUMULATE 1 +#define UFUNC_REDUCEAT 2 +#define UFUNC_OUTER 3 + + +typedef struct { + int nin; + int nout; + PyObject *callable; +} PyUFunc_PyFuncData; + +/* A linked-list of function information for + user-defined 1-d loops. + */ +typedef struct _loop1d_info { + PyUFuncGenericFunction func; + void *data; + int *arg_types; + struct _loop1d_info *next; + int nargs; + PyArray_Descr **arg_dtypes; +} PyUFunc_Loop1d; + + +#define UFUNC_PYVALS_NAME "UFUNC_PYVALS" + +/* THESE MACROS ARE DEPRECATED. + * Use npy_set_floatstatus_* in the npymath library. + */ +#define UFUNC_FPE_DIVIDEBYZERO NPY_FPE_DIVIDEBYZERO +#define UFUNC_FPE_OVERFLOW NPY_FPE_OVERFLOW +#define UFUNC_FPE_UNDERFLOW NPY_FPE_UNDERFLOW +#define UFUNC_FPE_INVALID NPY_FPE_INVALID + +/* Make sure it gets defined if it isn't already */ +#ifndef UFUNC_NOFPE +/* Clear the floating point exception default of Borland C++ */ +#if defined(__BORLANDC__) +#define UFUNC_NOFPE _control87(MCW_EM, MCW_EM); +#else +#define UFUNC_NOFPE +#endif +#endif + +#include "__ufunc_api.h" + +#ifdef __cplusplus +} +#endif + +#endif /* NUMPY_CORE_INCLUDE_NUMPY_UFUNCOBJECT_H_ */ diff --git a/local_packages/numpy/_core/include/numpy/utils.h b/local_packages/numpy/_core/include/numpy/utils.h new file mode 100644 index 0000000..97f0609 --- /dev/null +++ b/local_packages/numpy/_core/include/numpy/utils.h @@ -0,0 +1,37 @@ +#ifndef NUMPY_CORE_INCLUDE_NUMPY_UTILS_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_UTILS_H_ + +#ifndef __COMP_NPY_UNUSED + #if defined(__GNUC__) + #define __COMP_NPY_UNUSED __attribute__ ((__unused__)) + #elif defined(__ICC) + #define __COMP_NPY_UNUSED __attribute__ ((__unused__)) + #elif defined(__clang__) + #define __COMP_NPY_UNUSED __attribute__ ((unused)) + #else + #define __COMP_NPY_UNUSED + #endif +#endif + +#if defined(__GNUC__) || defined(__ICC) || defined(__clang__) + #define NPY_DECL_ALIGNED(x) __attribute__ ((aligned (x))) +#elif defined(_MSC_VER) + #define NPY_DECL_ALIGNED(x) __declspec(align(x)) +#else + #define NPY_DECL_ALIGNED(x) +#endif + +/* Use this to tag a variable as not used. It will remove unused variable + * warning on support platforms (see __COM_NPY_UNUSED) and mangle the variable + * to avoid accidental use */ +#define NPY_UNUSED(x) __NPY_UNUSED_TAGGED ## x __COMP_NPY_UNUSED +#define NPY_EXPAND(x) x + +#define NPY_STRINGIFY(x) #x +#define NPY_TOSTRING(x) NPY_STRINGIFY(x) + +#define NPY_CAT__(a, b) a ## b +#define NPY_CAT_(a, b) NPY_CAT__(a, b) +#define NPY_CAT(a, b) NPY_CAT_(a, b) + +#endif /* NUMPY_CORE_INCLUDE_NUMPY_UTILS_H_ */ diff --git a/local_packages/numpy/_core/lib/npy-pkg-config/mlib.ini b/local_packages/numpy/_core/lib/npy-pkg-config/mlib.ini new file mode 100644 index 0000000..5ab396e --- /dev/null +++ b/local_packages/numpy/_core/lib/npy-pkg-config/mlib.ini @@ -0,0 +1,12 @@ +[meta] +Name = mlib +Description = Math library used with this version of numpy +Version = 1.0 + +[default] +Libs= +Cflags= + +[msvc] +Libs=m.lib +Cflags= diff --git a/local_packages/numpy/_core/lib/npy-pkg-config/npymath.ini b/local_packages/numpy/_core/lib/npy-pkg-config/npymath.ini new file mode 100644 index 0000000..7225587 --- /dev/null +++ b/local_packages/numpy/_core/lib/npy-pkg-config/npymath.ini @@ -0,0 +1,20 @@ +[meta] +Name=npymath +Description=Portable, core math library implementing C99 standard +Version=0.1 + +[variables] +pkgname=numpy._core +prefix=${pkgdir} +libdir=${prefix}\lib +includedir=${prefix}\include + +[default] +Libs=-L${libdir} -lnpymath +Cflags=-I${includedir} +Requires=mlib + +[msvc] +Libs=/LIBPATH:${libdir} npymath.lib +Cflags=/INCLUDE:${includedir} +Requires=mlib diff --git a/local_packages/numpy/_core/lib/npymath.lib b/local_packages/numpy/_core/lib/npymath.lib new file mode 100644 index 0000000..85b5488 Binary files /dev/null and b/local_packages/numpy/_core/lib/npymath.lib differ diff --git a/local_packages/numpy/_core/lib/pkgconfig/numpy.pc b/local_packages/numpy/_core/lib/pkgconfig/numpy.pc new file mode 100644 index 0000000..9705dd1 --- /dev/null +++ b/local_packages/numpy/_core/lib/pkgconfig/numpy.pc @@ -0,0 +1,7 @@ +prefix=${pcfiledir}/../.. +includedir=${prefix}/include + +Name: numpy +Description: NumPy is the fundamental package for scientific computing with Python. +Version: 2.4.4 +Cflags: -I${includedir} diff --git a/local_packages/numpy/_core/memmap.py b/local_packages/numpy/_core/memmap.py new file mode 100644 index 0000000..8cfa7f9 --- /dev/null +++ b/local_packages/numpy/_core/memmap.py @@ -0,0 +1,363 @@ +import operator +from contextlib import nullcontext + +import numpy as np +from numpy._utils import set_module + +from .numeric import dtype, ndarray, uint8 + +__all__ = ['memmap'] + +dtypedescr = dtype +valid_filemodes = ["r", "c", "r+", "w+"] +writeable_filemodes = ["r+", "w+"] + +mode_equivalents = { + "readonly": "r", + "copyonwrite": "c", + "readwrite": "r+", + "write": "w+" + } + + +@set_module('numpy') +class memmap(ndarray): + """Create a memory-map to an array stored in a *binary* file on disk. + + Memory-mapped files are used for accessing small segments of large files + on disk, without reading the entire file into memory. NumPy's + memmap's are array-like objects. This differs from Python's ``mmap`` + module, which uses file-like objects. + + This subclass of ndarray has some unpleasant interactions with + some operations, because it doesn't quite fit properly as a subclass. + An alternative to using this subclass is to create the ``mmap`` + object yourself, then create an ndarray with ndarray.__new__ directly, + passing the object created in its 'buffer=' parameter. + + This class may at some point be turned into a factory function + which returns a view into an mmap buffer. + + Flush the memmap instance to write the changes to the file. Currently there + is no API to close the underlying ``mmap``. It is tricky to ensure the + resource is actually closed, since it may be shared between different + memmap instances. + + + Parameters + ---------- + filename : str, file-like object, or pathlib.Path instance + The file name or file object to be used as the array data buffer. + dtype : data-type, optional + The data-type used to interpret the file contents. + Default is `uint8`. + mode : {'r+', 'r', 'w+', 'c'}, optional + The file is opened in this mode: + + +------+-------------------------------------------------------------+ + | 'r' | Open existing file for reading only. | + +------+-------------------------------------------------------------+ + | 'r+' | Open existing file for reading and writing. | + +------+-------------------------------------------------------------+ + | 'w+' | Create or overwrite existing file for reading and writing. | + | | If ``mode == 'w+'`` then `shape` must also be specified. | + +------+-------------------------------------------------------------+ + | 'c' | Copy-on-write: assignments affect data in memory, but | + | | changes are not saved to disk. The file on disk is | + | | read-only. | + +------+-------------------------------------------------------------+ + + Default is 'r+'. + offset : int, optional + In the file, array data starts at this offset. Since `offset` is + measured in bytes, it should normally be a multiple of the byte-size + of `dtype`. When ``mode != 'r'``, even positive offsets beyond end of + file are valid; The file will be extended to accommodate the + additional data. By default, ``memmap`` will start at the beginning of + the file, even if ``filename`` is a file pointer ``fp`` and + ``fp.tell() != 0``. + shape : int or sequence of ints, optional + The desired shape of the array. If ``mode == 'r'`` and the number + of remaining bytes after `offset` is not a multiple of the byte-size + of `dtype`, you must specify `shape`. By default, the returned array + will be 1-D with the number of elements determined by file size + and data-type. + + .. versionchanged:: 2.0 + The shape parameter can now be any integer sequence type, previously + types were limited to tuple and int. + + order : {'C', 'F'}, optional + Specify the order of the ndarray memory layout: + :term:`row-major`, C-style or :term:`column-major`, + Fortran-style. This only has an effect if the shape is + greater than 1-D. The default order is 'C'. + + Attributes + ---------- + filename : str or pathlib.Path instance + Path to the mapped file. + offset : int + Offset position in the file. + mode : str + File mode. + + Methods + ------- + flush + Flush any changes in memory to file on disk. + When you delete a memmap object, flush is called first to write + changes to disk. + + + See also + -------- + lib.format.open_memmap : Create or load a memory-mapped ``.npy`` file. + + Notes + ----- + The memmap object can be used anywhere an ndarray is accepted. + Given a memmap ``fp``, ``isinstance(fp, numpy.ndarray)`` returns + ``True``. + + Memory-mapped files cannot be larger than 2GB on 32-bit systems. + + When a memmap causes a file to be created or extended beyond its + current size in the filesystem, the contents of the new part are + unspecified. On systems with POSIX filesystem semantics, the extended + part will be filled with zero bytes. + + Examples + -------- + >>> import numpy as np + >>> data = np.arange(12, dtype='float32') + >>> data.resize((3,4)) + + This example uses a temporary file so that doctest doesn't write + files to your directory. You would use a 'normal' filename. + + >>> from tempfile import mkdtemp + >>> import os.path as path + >>> filename = path.join(mkdtemp(), 'newfile.dat') + + Create a memmap with dtype and shape that matches our data: + + >>> fp = np.memmap(filename, dtype='float32', mode='w+', shape=(3,4)) + >>> fp + memmap([[0., 0., 0., 0.], + [0., 0., 0., 0.], + [0., 0., 0., 0.]], dtype=float32) + + Write data to memmap array: + + >>> fp[:] = data[:] + >>> fp + memmap([[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.]], dtype=float32) + + >>> fp.filename == path.abspath(filename) + True + + Flushes memory changes to disk in order to read them back + + >>> fp.flush() + + Load the memmap and verify data was stored: + + >>> newfp = np.memmap(filename, dtype='float32', mode='r', shape=(3,4)) + >>> newfp + memmap([[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.]], dtype=float32) + + Read-only memmap: + + >>> fpr = np.memmap(filename, dtype='float32', mode='r', shape=(3,4)) + >>> fpr.flags.writeable + False + + Copy-on-write memmap: + + >>> fpc = np.memmap(filename, dtype='float32', mode='c', shape=(3,4)) + >>> fpc.flags.writeable + True + + It's possible to assign to copy-on-write array, but values are only + written into the memory copy of the array, and not written to disk: + + >>> fpc + memmap([[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.]], dtype=float32) + >>> fpc[0,:] = 0 + >>> fpc + memmap([[ 0., 0., 0., 0.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.]], dtype=float32) + + File on disk is unchanged: + + >>> fpr + memmap([[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.]], dtype=float32) + + Offset into a memmap: + + >>> fpo = np.memmap(filename, dtype='float32', mode='r', offset=16) + >>> fpo + memmap([ 4., 5., 6., 7., 8., 9., 10., 11.], dtype=float32) + + """ + + __array_priority__ = -100.0 + + def __new__(subtype, filename, dtype=uint8, mode='r+', offset=0, + shape=None, order='C'): + # Import here to minimize 'import numpy' overhead + import mmap + import os.path + try: + mode = mode_equivalents[mode] + except KeyError as e: + if mode not in valid_filemodes: + all_modes = valid_filemodes + list(mode_equivalents.keys()) + raise ValueError( + f"mode must be one of {all_modes!r} (got {mode!r})" + ) from None + + if mode == 'w+' and shape is None: + raise ValueError("shape must be given if mode == 'w+'") + + if hasattr(filename, 'read'): + f_ctx = nullcontext(filename) + else: + f_ctx = open( + os.fspath(filename), + ('r' if mode == 'c' else mode) + 'b' + ) + + with f_ctx as fid: + fid.seek(0, 2) + flen = fid.tell() + descr = dtypedescr(dtype) + _dbytes = descr.itemsize + + if shape is None: + bytes = flen - offset + if bytes % _dbytes: + raise ValueError("Size of available data is not a " + "multiple of the data-type size.") + size = bytes // _dbytes + shape = (size,) + else: + if not isinstance(shape, (tuple, list)): + try: + shape = [operator.index(shape)] + except TypeError: + pass + shape = tuple(shape) + size = np.intp(1) # avoid overflows + for k in shape: + size *= k + + bytes = int(offset + size * _dbytes) + + if mode in ('w+', 'r+'): + # gh-27723 + # if bytes == 0, we write out 1 byte to allow empty memmap. + bytes = max(bytes, 1) + if flen < bytes: + fid.seek(bytes - 1, 0) + fid.write(b'\0') + fid.flush() + + if mode == 'c': + acc = mmap.ACCESS_COPY + elif mode == 'r': + acc = mmap.ACCESS_READ + else: + acc = mmap.ACCESS_WRITE + + start = offset - offset % mmap.ALLOCATIONGRANULARITY + bytes -= start + # bytes == 0 is problematic as in mmap length=0 maps the full file. + # See PR gh-27723 for a more detailed explanation. + if bytes == 0 and start > 0: + bytes += mmap.ALLOCATIONGRANULARITY + start -= mmap.ALLOCATIONGRANULARITY + array_offset = offset - start + mm = mmap.mmap(fid.fileno(), bytes, access=acc, offset=start) + + self = ndarray.__new__(subtype, shape, dtype=descr, buffer=mm, + offset=array_offset, order=order) + self._mmap = mm + self.offset = offset + self.mode = mode + + if isinstance(filename, os.PathLike): + # special case - if we were constructed with a pathlib.path, + # then filename is a path object, not a string + self.filename = filename.resolve() + elif hasattr(fid, "name") and isinstance(fid.name, str): + # py3 returns int for TemporaryFile().name + self.filename = os.path.abspath(fid.name) + # same as memmap copies (e.g. memmap + 1) + else: + self.filename = None + + return self + + def __array_finalize__(self, obj): + if hasattr(obj, '_mmap') and np.may_share_memory(self, obj): + self._mmap = obj._mmap + self.filename = obj.filename + self.offset = obj.offset + self.mode = obj.mode + else: + self._mmap = None + self.filename = None + self.offset = None + self.mode = None + + def flush(self): + """ + Write any changes in the array to the file on disk. + + For further information, see `memmap`. + + Parameters + ---------- + None + + See Also + -------- + memmap + + """ + if self.base is not None and hasattr(self.base, 'flush'): + self.base.flush() + + def __array_wrap__(self, arr, context=None, return_scalar=False): + arr = super().__array_wrap__(arr, context) + + # Return a memmap if a memmap was given as the output of the + # ufunc. Leave the arr class unchanged if self is not a memmap + # to keep original memmap subclasses behavior + if self is arr or type(self) is not memmap: + return arr + + # Return scalar instead of 0d memmap, e.g. for np.sum with + # axis=None (note that subclasses will not reach here) + if return_scalar: + return arr[()] + + # Return ndarray otherwise + return arr.view(np.ndarray) + + def __getitem__(self, index): + res = super().__getitem__(index) + if type(res) is memmap and res._mmap is None: + return res.view(type=ndarray) + return res diff --git a/local_packages/numpy/_core/memmap.pyi b/local_packages/numpy/_core/memmap.pyi new file mode 100644 index 0000000..0b31328 --- /dev/null +++ b/local_packages/numpy/_core/memmap.pyi @@ -0,0 +1,3 @@ +from numpy import memmap + +__all__ = ["memmap"] diff --git a/local_packages/numpy/_core/multiarray.py b/local_packages/numpy/_core/multiarray.py new file mode 100644 index 0000000..dd5a66a --- /dev/null +++ b/local_packages/numpy/_core/multiarray.py @@ -0,0 +1,1740 @@ +""" +Create the numpy._core.multiarray namespace for backward compatibility. +In v1.16 the multiarray and umath c-extension modules were merged into +a single _multiarray_umath extension module. So we replicate the old +namespace by importing from the extension module. + +""" + +import functools + +from . import _multiarray_umath, overrides +from ._multiarray_umath import * # noqa: F403 + +# These imports are needed for backward compatibility, +# do not change them. issue gh-15518 +# _get_ndarray_c_version is semi-public, on purpose not added to __all__ +from ._multiarray_umath import ( # noqa: F401 + _ARRAY_API, + _flagdict, + _get_madvise_hugepage, + _get_ndarray_c_version, + _monotonicity, + _place, + _reconstruct, + _set_madvise_hugepage, + _vec_string, + from_dlpack, +) + +__all__ = [ + '_ARRAY_API', 'ALLOW_THREADS', 'BUFSIZE', 'CLIP', 'DATETIMEUNITS', + 'ITEM_HASOBJECT', 'ITEM_IS_POINTER', 'LIST_PICKLE', 'MAXDIMS', + 'MAY_SHARE_BOUNDS', 'MAY_SHARE_EXACT', 'NEEDS_INIT', 'NEEDS_PYAPI', + 'RAISE', 'USE_GETITEM', 'USE_SETITEM', 'WRAP', + '_flagdict', 'from_dlpack', '_place', '_reconstruct', '_vec_string', + '_monotonicity', 'add_docstring', 'arange', 'array', 'asarray', + 'asanyarray', 'ascontiguousarray', 'asfortranarray', 'bincount', + 'broadcast', 'busday_count', 'busday_offset', 'busdaycalendar', 'can_cast', + 'compare_chararrays', 'concatenate', 'copyto', 'correlate', 'correlate2', + 'count_nonzero', 'c_einsum', 'datetime_as_string', 'datetime_data', + 'dot', 'dragon4_positional', 'dragon4_scientific', 'dtype', + 'empty', 'empty_like', 'error', 'flagsobj', 'flatiter', 'format_longfloat', + 'frombuffer', 'fromfile', 'fromiter', 'fromstring', + 'get_handler_name', 'get_handler_version', 'inner', 'interp', + 'interp_complex', 'is_busday', 'lexsort', 'matmul', 'vecdot', + 'may_share_memory', 'min_scalar_type', 'ndarray', 'nditer', 'nested_iters', + 'normalize_axis_index', 'packbits', 'promote_types', 'putmask', + 'ravel_multi_index', 'result_type', 'scalar', 'set_datetimeparse_function', + 'set_typeDict', 'shares_memory', 'typeinfo', + 'unpackbits', 'unravel_index', 'vdot', 'where', 'zeros'] + +# For backward compatibility, make sure pickle imports +# these functions from here +_reconstruct.__module__ = 'numpy._core.multiarray' +scalar.__module__ = 'numpy._core.multiarray' + + +from_dlpack.__module__ = 'numpy' +arange.__module__ = 'numpy' +array.__module__ = 'numpy' +asarray.__module__ = 'numpy' +asanyarray.__module__ = 'numpy' +ascontiguousarray.__module__ = 'numpy' +asfortranarray.__module__ = 'numpy' +datetime_data.__module__ = 'numpy' +empty.__module__ = 'numpy' +frombuffer.__module__ = 'numpy' +fromfile.__module__ = 'numpy' +fromiter.__module__ = 'numpy' +frompyfunc.__module__ = 'numpy' +fromstring.__module__ = 'numpy' +may_share_memory.__module__ = 'numpy' +nested_iters.__module__ = 'numpy' +promote_types.__module__ = 'numpy' +zeros.__module__ = 'numpy' +normalize_axis_index.__module__ = 'numpy.lib.array_utils' +add_docstring.__module__ = 'numpy.lib' +compare_chararrays.__module__ = 'numpy.char' + + +def _override___module__(): + namespace_names = globals() + for ufunc_name in [ + 'absolute', 'arccos', 'arccosh', 'add', 'arcsin', 'arcsinh', 'arctan', + 'arctan2', 'arctanh', 'bitwise_and', 'bitwise_count', 'invert', + 'left_shift', 'bitwise_or', 'right_shift', 'bitwise_xor', 'cbrt', + 'ceil', 'conjugate', 'copysign', 'cos', 'cosh', 'deg2rad', 'degrees', + 'divide', 'divmod', 'equal', 'exp', 'exp2', 'expm1', 'fabs', + 'float_power', 'floor', 'floor_divide', 'fmax', 'fmin', 'fmod', + 'frexp', 'gcd', 'greater', 'greater_equal', 'heaviside', 'hypot', + 'isfinite', 'isinf', 'isnan', 'isnat', 'lcm', 'ldexp', 'less', + 'less_equal', 'log', 'log10', 'log1p', 'log2', 'logaddexp', + 'logaddexp2', 'logical_and', 'logical_not', 'logical_or', + 'logical_xor', 'matmul', 'matvec', 'maximum', 'minimum', 'remainder', + 'modf', 'multiply', 'negative', 'nextafter', 'not_equal', 'positive', + 'power', 'rad2deg', 'radians', 'reciprocal', 'rint', 'sign', 'signbit', + 'sin', 'sinh', 'spacing', 'sqrt', 'square', 'subtract', 'tan', 'tanh', + 'trunc', 'vecdot', 'vecmat', + ]: + ufunc = namespace_names[ufunc_name] + ufunc.__module__ = "numpy" + ufunc.__qualname__ = ufunc_name + + +_override___module__() + + +# We can't verify dispatcher signatures because NumPy's C functions don't +# support introspection. +array_function_from_c_func_and_dispatcher = functools.partial( + overrides.array_function_from_dispatcher, + module='numpy', docs_from_dispatcher=True, verify=False) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.empty_like) +def empty_like( + prototype, dtype=None, order="K", subok=True, shape=None, *, device=None +): + """ + empty_like( + prototype, + /, + dtype=None, + order='K', + subok=True, + shape=None, + *, + device=None, + ) + -- + + Return a new array with the same shape and type as a given array. + + Parameters + ---------- + prototype : array_like + The shape and data-type of `prototype` define these same attributes + of the returned array. + dtype : data-type, optional + Overrides the data type of the result. + order : {'C', 'F', 'A', or 'K'}, optional + Overrides the memory layout of the result. 'C' means C-order, + 'F' means F-order, 'A' means 'F' if `prototype` is Fortran + contiguous, 'C' otherwise. 'K' means match the layout of `prototype` + as closely as possible. + subok : bool, optional. + If True, then the newly created array will use the sub-class + type of `prototype`, otherwise it will be a base-class array. Defaults + to True. + shape : int or sequence of ints, optional. + Overrides the shape of the result. If order='K' and the number of + dimensions is unchanged, will try to keep order, otherwise, + order='C' is implied. + device : str, optional + The device on which to place the created array. Default: None. + For Array-API interoperability only, so must be ``"cpu"`` if passed. + + .. versionadded:: 2.0.0 + + Returns + ------- + out : ndarray + Array of uninitialized (arbitrary) data with the same + shape and type as `prototype`. + + See Also + -------- + ones_like : Return an array of ones with shape and type of input. + zeros_like : Return an array of zeros with shape and type of input. + full_like : Return a new array with shape of input filled with value. + empty : Return a new uninitialized array. + + Notes + ----- + Unlike other array creation functions (e.g. `zeros_like`, `ones_like`, + `full_like`), `empty_like` does not initialize the values of the array, + and may therefore be marginally faster. However, the values stored in the + newly allocated array are arbitrary. For reproducible behavior, be sure + to set each element of the array before reading. + + Examples + -------- + >>> import numpy as np + >>> a = ([1,2,3], [4,5,6]) # a is array-like + >>> np.empty_like(a) + array([[-1073741821, -1073741821, 3], # uninitialized + [ 0, 0, -1073741821]]) + >>> a = np.array([[1., 2., 3.],[4.,5.,6.]]) + >>> np.empty_like(a) + array([[ -2.00000715e+000, 1.48219694e-323, -2.00000572e+000], # uninitialized + [ 4.38791518e-305, -2.00000715e+000, 4.17269252e-309]]) + + """ + return (prototype,) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.concatenate) +def concatenate(arrays, axis=0, out=None, *, dtype=None, casting="same_kind"): + """ + concatenate( + arrays, + /, + axis=0, + out=None, + *, + dtype=None, + casting="same_kind", + ) + -- + + Join a sequence of arrays along an existing axis. + + Parameters + ---------- + a1, a2, ... : sequence of array_like + The arrays must have the same shape, except in the dimension + corresponding to `axis` (the first, by default). + axis : int, optional + The axis along which the arrays will be joined. If axis is None, + arrays are flattened before use. Default is 0. + out : ndarray, optional + If provided, the destination to place the result. The shape must be + correct, matching that of what concatenate would have returned if no + out argument were specified. + dtype : str or dtype + If provided, the destination array will have this dtype. Cannot be + provided together with `out`. + + .. versionadded:: 1.20.0 + + casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + Controls what kind of data casting may occur. Defaults to 'same_kind'. + For a description of the options, please see :term:`casting`. + + .. versionadded:: 1.20.0 + + Returns + ------- + res : ndarray + The concatenated array. + + See Also + -------- + ma.concatenate : Concatenate function that preserves input masks. + array_split : Split an array into multiple sub-arrays of equal or + near-equal size. + split : Split array into a list of multiple sub-arrays of equal size. + hsplit : Split array into multiple sub-arrays horizontally (column wise). + vsplit : Split array into multiple sub-arrays vertically (row wise). + dsplit : Split array into multiple sub-arrays along the 3rd axis (depth). + stack : Stack a sequence of arrays along a new axis. + block : Assemble arrays from blocks. + hstack : Stack arrays in sequence horizontally (column wise). + vstack : Stack arrays in sequence vertically (row wise). + dstack : Stack arrays in sequence depth wise (along third dimension). + column_stack : Stack 1-D arrays as columns into a 2-D array. + + Notes + ----- + When one or more of the arrays to be concatenated is a MaskedArray, + this function will return a MaskedArray object instead of an ndarray, + but the input masks are *not* preserved. In cases where a MaskedArray + is expected as input, use the ma.concatenate function from the masked + array module instead. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[1, 2], [3, 4]]) + >>> b = np.array([[5, 6]]) + >>> np.concatenate((a, b), axis=0) + array([[1, 2], + [3, 4], + [5, 6]]) + >>> np.concatenate((a, b.T), axis=1) + array([[1, 2, 5], + [3, 4, 6]]) + >>> np.concatenate((a, b), axis=None) + array([1, 2, 3, 4, 5, 6]) + + This function will not preserve masking of MaskedArray inputs. + + >>> a = np.ma.arange(3) + >>> a[1] = np.ma.masked + >>> b = np.arange(2, 5) + >>> a + masked_array(data=[0, --, 2], + mask=[False, True, False], + fill_value=999999) + >>> b + array([2, 3, 4]) + >>> np.concatenate([a, b]) + masked_array(data=[0, 1, 2, 2, 3, 4], + mask=False, + fill_value=999999) + >>> np.ma.concatenate([a, b]) + masked_array(data=[0, --, 2, 2, 3, 4], + mask=[False, True, False, False, False, False], + fill_value=999999) + + """ + if out is not None: + # optimize for the typical case where only arrays is provided + arrays = list(arrays) + arrays.append(out) + return arrays + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.inner) +def inner(a, b, /): + """ + inner(a, b, /) + + Inner product of two arrays. + + Ordinary inner product of vectors for 1-D arrays (without complex + conjugation), in higher dimensions a sum product over the last axes. + + Parameters + ---------- + a, b : array_like + If `a` and `b` are nonscalar, their last dimensions must match. + + Returns + ------- + out : ndarray + If `a` and `b` are both + scalars or both 1-D arrays then a scalar is returned; otherwise + an array is returned. + ``out.shape = (*a.shape[:-1], *b.shape[:-1])`` + + Raises + ------ + ValueError + If both `a` and `b` are nonscalar and their last dimensions have + different sizes. + + See Also + -------- + tensordot : Sum products over arbitrary axes. + dot : Generalised matrix product, using second last dimension of `b`. + vecdot : Vector dot product of two arrays. + einsum : Einstein summation convention. + + Notes + ----- + For vectors (1-D arrays) it computes the ordinary inner-product:: + + np.inner(a, b) = sum(a[:]*b[:]) + + More generally, if ``ndim(a) = r > 0`` and ``ndim(b) = s > 0``:: + + np.inner(a, b) = np.tensordot(a, b, axes=(-1,-1)) + + or explicitly:: + + np.inner(a, b)[i0,...,ir-2,j0,...,js-2] + = sum(a[i0,...,ir-2,:]*b[j0,...,js-2,:]) + + In addition `a` or `b` may be scalars, in which case:: + + np.inner(a,b) = a*b + + Examples + -------- + Ordinary inner product for vectors: + + >>> import numpy as np + >>> a = np.array([1,2,3]) + >>> b = np.array([0,1,0]) + >>> np.inner(a, b) + 2 + + Some multidimensional examples: + + >>> a = np.arange(24).reshape((2,3,4)) + >>> b = np.arange(4) + >>> c = np.inner(a, b) + >>> c.shape + (2, 3) + >>> c + array([[ 14, 38, 62], + [ 86, 110, 134]]) + + >>> a = np.arange(2).reshape((1,1,2)) + >>> b = np.arange(6).reshape((3,2)) + >>> c = np.inner(a, b) + >>> c.shape + (1, 1, 3) + >>> c + array([[[1, 3, 5]]]) + + An example where `b` is a scalar: + + >>> np.inner(np.eye(2), 7) + array([[7., 0.], + [0., 7.]]) + + """ + return (a, b) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.where) +def where(condition, x=None, y=None, /): + """ + where(condition, [x, y], /) + + Return elements chosen from `x` or `y` depending on `condition`. + + .. note:: + When only `condition` is provided, this function is a shorthand for + ``np.asarray(condition).nonzero()``. Using `nonzero` directly should be + preferred, as it behaves correctly for subclasses. The rest of this + documentation covers only the case where all three arguments are + provided. + + Parameters + ---------- + condition : array_like, bool + Where True, yield `x`, otherwise yield `y`. + x, y : array_like + Values from which to choose. `x`, `y` and `condition` need to be + broadcastable to some shape. + + Returns + ------- + out : ndarray + An array with elements from `x` where `condition` is True, and elements + from `y` elsewhere. + + See Also + -------- + choose + nonzero : The function that is called when x and y are omitted + + Notes + ----- + If all the arrays are 1-D, `where` is equivalent to:: + + [xv if c else yv + for c, xv, yv in zip(condition, x, y)] + + Examples + -------- + >>> import numpy as np + >>> a = np.arange(10) + >>> a + array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + >>> np.where(a < 5, a, 10*a) + array([ 0, 1, 2, 3, 4, 50, 60, 70, 80, 90]) + + This can be used on multidimensional arrays too: + + >>> np.where([[True, False], [True, True]], + ... [[1, 2], [3, 4]], + ... [[9, 8], [7, 6]]) + array([[1, 8], + [3, 4]]) + + The shapes of x, y, and the condition are broadcast together: + + >>> x, y = np.ogrid[:3, :4] + >>> np.where(x < y, x, 10 + y) # both x and 10+y are broadcast + array([[10, 0, 0, 0], + [10, 11, 1, 1], + [10, 11, 12, 2]]) + + >>> a = np.array([[0, 1, 2], + ... [0, 2, 4], + ... [0, 3, 6]]) + >>> np.where(a < 4, a, -1) # -1 is broadcast + array([[ 0, 1, 2], + [ 0, 2, -1], + [ 0, 3, -1]]) + """ + return (condition, x, y) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.lexsort) +def lexsort(keys, axis=-1): + """ + lexsort(keys, axis=-1) + + Perform an indirect stable sort using a sequence of keys. + + Given multiple sorting keys, lexsort returns an array of integer indices + that describes the sort order by multiple keys. The last key in the + sequence is used for the primary sort order, ties are broken by the + second-to-last key, and so on. + + Parameters + ---------- + keys : (k, m, n, ...) array-like + The `k` keys to be sorted. The *last* key (e.g, the last + row if `keys` is a 2D array) is the primary sort key. + Each element of `keys` along the zeroth axis must be + an array-like object of the same shape. + axis : int, optional + Axis to be indirectly sorted. By default, sort over the last axis + of each sequence. Separate slices along `axis` sorted over + independently; see last example. + + Returns + ------- + indices : (m, n, ...) ndarray of ints + Array of indices that sort the keys along the specified axis. + + See Also + -------- + argsort : Indirect sort. + ndarray.sort : In-place sort. + sort : Return a sorted copy of an array. + + Examples + -------- + Sort names: first by surname, then by name. + + >>> import numpy as np + >>> surnames = ('Hertz', 'Galilei', 'Hertz') + >>> first_names = ('Heinrich', 'Galileo', 'Gustav') + >>> ind = np.lexsort((first_names, surnames)) + >>> ind + array([1, 2, 0]) + + >>> [surnames[i] + ", " + first_names[i] for i in ind] + ['Galilei, Galileo', 'Hertz, Gustav', 'Hertz, Heinrich'] + + Sort according to two numerical keys, first by elements + of ``a``, then breaking ties according to elements of ``b``: + + >>> a = [1, 5, 1, 4, 3, 4, 4] # First sequence + >>> b = [9, 4, 0, 4, 0, 2, 1] # Second sequence + >>> ind = np.lexsort((b, a)) # Sort by `a`, then by `b` + >>> ind + array([2, 0, 4, 6, 5, 3, 1]) + >>> [(a[i], b[i]) for i in ind] + [(1, 0), (1, 9), (3, 0), (4, 1), (4, 2), (4, 4), (5, 4)] + + Compare against `argsort`, which would sort each key independently. + + >>> np.argsort((b, a), kind='stable') + array([[2, 4, 6, 5, 1, 3, 0], + [0, 2, 4, 3, 5, 6, 1]]) + + To sort lexicographically with `argsort`, we would need to provide a + structured array. + + >>> x = np.array([(ai, bi) for ai, bi in zip(a, b)], + ... dtype = np.dtype([('x', int), ('y', int)])) + >>> np.argsort(x) # or np.argsort(x, order=('x', 'y')) + array([2, 0, 4, 6, 5, 3, 1]) + + The zeroth axis of `keys` always corresponds with the sequence of keys, + so 2D arrays are treated just like other sequences of keys. + + >>> arr = np.asarray([b, a]) + >>> ind2 = np.lexsort(arr) + >>> np.testing.assert_equal(ind2, ind) + + Accordingly, the `axis` parameter refers to an axis of *each* key, not of + the `keys` argument itself. For instance, the array ``arr`` is treated as + a sequence of two 1-D keys, so specifying ``axis=0`` is equivalent to + using the default axis, ``axis=-1``. + + >>> np.testing.assert_equal(np.lexsort(arr, axis=0), + ... np.lexsort(arr, axis=-1)) + + For higher-dimensional arrays, the axis parameter begins to matter. The + resulting array has the same shape as each key, and the values are what + we would expect if `lexsort` were performed on corresponding slices + of the keys independently. For instance, + + >>> x = [[1, 2, 3, 4], + ... [4, 3, 2, 1], + ... [2, 1, 4, 3]] + >>> y = [[2, 2, 1, 1], + ... [1, 2, 1, 2], + ... [1, 1, 2, 1]] + >>> np.lexsort((x, y), axis=1) + array([[2, 3, 0, 1], + [2, 0, 3, 1], + [1, 0, 3, 2]]) + + Each row of the result is what we would expect if we were to perform + `lexsort` on the corresponding row of the keys: + + >>> for i in range(3): + ... print(np.lexsort((x[i], y[i]))) + [2 3 0 1] + [2 0 3 1] + [1 0 3 2] + + """ + if isinstance(keys, tuple): + return keys + else: + return (keys,) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.can_cast) +def can_cast(from_, to, casting="safe"): + """ + can_cast(from_, to, casting='safe') + + Returns True if cast between data types can occur according to the + casting rule. + + Parameters + ---------- + from_ : dtype, dtype specifier, NumPy scalar, or array + Data type, NumPy scalar, or array to cast from. + to : dtype or dtype specifier + Data type to cast to. + casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + Controls what kind of data casting may occur. + + * 'no' means the data types should not be cast at all. + * 'equiv' means only byte-order changes are allowed. + * 'safe' means only casts which can preserve values are allowed. + * 'same_kind' means only safe casts or casts within a kind, + like float64 to float32, are allowed. + * 'unsafe' means any data conversions may be done. + + Returns + ------- + out : bool + True if cast can occur according to the casting rule. + + Notes + ----- + .. versionchanged:: 2.0 + This function does not support Python scalars anymore and does not + apply any value-based logic for 0-D arrays and NumPy scalars. + + See also + -------- + dtype, result_type + + Examples + -------- + Basic examples + + >>> import numpy as np + >>> np.can_cast(np.int32, np.int64) + True + >>> np.can_cast(np.float64, complex) + True + >>> np.can_cast(complex, float) + False + + >>> np.can_cast('i8', 'f8') + True + >>> np.can_cast('i8', 'f4') + False + >>> np.can_cast('i4', 'S4') + False + + """ + return (from_,) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.min_scalar_type) +def min_scalar_type(a, /): + """ + min_scalar_type(a, /) + + For scalar ``a``, returns the data type with the smallest size + and smallest scalar kind which can hold its value. For non-scalar + array ``a``, returns the vector's dtype unmodified. + + Floating point values are not demoted to integers, + and complex values are not demoted to floats. + + Parameters + ---------- + a : scalar or array_like + The value whose minimal data type is to be found. + + Returns + ------- + out : dtype + The minimal data type. + + See Also + -------- + result_type, promote_types, dtype, can_cast + + Examples + -------- + >>> import numpy as np + >>> np.min_scalar_type(10) + dtype('uint8') + + >>> np.min_scalar_type(-260) + dtype('int16') + + >>> np.min_scalar_type(3.1) + dtype('float16') + + >>> np.min_scalar_type(1e50) + dtype('float64') + + >>> np.min_scalar_type(np.arange(4,dtype='f8')) + dtype('float64') + + """ + return (a,) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.result_type) +def result_type(*arrays_and_dtypes): + """ + result_type(*arrays_and_dtypes) + + Returns the type that results from applying the NumPy + :ref:`type promotion ` rules to the arguments. + + Parameters + ---------- + arrays_and_dtypes : list of arrays and dtypes + The operands of some operation whose result type is needed. + + Returns + ------- + out : dtype + The result type. + + See also + -------- + dtype, promote_types, min_scalar_type, can_cast + + Examples + -------- + >>> import numpy as np + >>> np.result_type(3, np.arange(7, dtype='i1')) + dtype('int8') + + >>> np.result_type('i4', 'c8') + dtype('complex128') + + >>> np.result_type(3.0, -2) + dtype('float64') + + """ + return arrays_and_dtypes + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.dot) +def dot(a, b, out=None): + """ + dot(a, b, out=None) + + Dot product of two arrays. Specifically, + + - If both `a` and `b` are 1-D arrays, it is inner product of vectors + (without complex conjugation). + + - If both `a` and `b` are 2-D arrays, it is matrix multiplication, + but using :func:`matmul` or ``a @ b`` is preferred. + + - If either `a` or `b` is 0-D (scalar), it is equivalent to + :func:`multiply` and using ``numpy.multiply(a, b)`` or ``a * b`` is + preferred. + + - If `a` is an N-D array and `b` is a 1-D array, it is a sum product over + the last axis of `a` and `b`. + + - If `a` is an N-D array and `b` is an M-D array (where ``M>=2``), it is a + sum product over the last axis of `a` and the second-to-last axis of + `b`:: + + dot(a, b)[i,j,k,m] = sum(a[i,j,:] * b[k,:,m]) + + It uses an optimized BLAS library when possible (see `numpy.linalg`). + + Parameters + ---------- + a : array_like + First argument. + b : array_like + Second argument. + out : ndarray, optional + Output argument. This must have the exact kind that would be returned + if it was not used. In particular, it must have the right type, must be + C-contiguous, and its dtype must be the dtype that would be returned + for `dot(a,b)`. This is a performance feature. Therefore, if these + conditions are not met, an exception is raised, instead of attempting + to be flexible. + + Returns + ------- + output : ndarray + Returns the dot product of `a` and `b`. If `a` and `b` are both + scalars or both 1-D arrays then a scalar is returned; otherwise + an array is returned. + If `out` is given, then it is returned. + + Raises + ------ + ValueError + If the last dimension of `a` is not the same size as + the second-to-last dimension of `b`. + + See Also + -------- + vdot : Complex-conjugating dot product. + vecdot : Vector dot product of two arrays. + tensordot : Sum products over arbitrary axes. + einsum : Einstein summation convention. + matmul : '@' operator as method with out parameter. + linalg.multi_dot : Chained dot product. + + Examples + -------- + >>> import numpy as np + >>> np.dot(3, 4) + 12 + + Neither argument is complex-conjugated: + + >>> np.dot([2j, 3j], [2j, 3j]) + (-13+0j) + + For 2-D arrays it is the matrix product: + + >>> a = [[1, 0], [0, 1]] + >>> b = [[4, 1], [2, 2]] + >>> np.dot(a, b) + array([[4, 1], + [2, 2]]) + + >>> a = np.arange(3*4*5*6).reshape((3,4,5,6)) + >>> b = np.arange(3*4*5*6)[::-1].reshape((5,4,6,3)) + >>> np.dot(a, b)[2,3,2,1,2,2] + 499128 + >>> sum(a[2,3,2,:] * b[1,2,:,2]) + 499128 + + """ + return (a, b, out) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.vdot) +def vdot(a, b, /): + r""" + vdot(a, b, /) + + Return the dot product of two vectors. + + The `vdot` function handles complex numbers differently than `dot`: + if the first argument is complex, it is replaced by its complex conjugate + in the dot product calculation. `vdot` also handles multidimensional + arrays differently than `dot`: it does not perform a matrix product, but + flattens the arguments to 1-D arrays before taking a vector dot product. + + Consequently, when the arguments are 2-D arrays of the same shape, this + function effectively returns their + `Frobenius inner product `_ + (also known as the *trace inner product* or the *standard inner product* + on a vector space of matrices). + + Parameters + ---------- + a : array_like + If `a` is complex the complex conjugate is taken before calculation + of the dot product. + b : array_like + Second argument to the dot product. + + Returns + ------- + output : ndarray + Dot product of `a` and `b`. Can be an int, float, or + complex depending on the types of `a` and `b`. + + See Also + -------- + dot : Return the dot product without using the complex conjugate of the + first argument. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([1+2j,3+4j]) + >>> b = np.array([5+6j,7+8j]) + >>> np.vdot(a, b) + (70-8j) + >>> np.vdot(b, a) + (70+8j) + + Note that higher-dimensional arrays are flattened! + + >>> a = np.array([[1, 4], [5, 6]]) + >>> b = np.array([[4, 1], [2, 2]]) + >>> np.vdot(a, b) + 30 + >>> np.vdot(b, a) + 30 + >>> 1*4 + 4*1 + 5*2 + 6*2 + 30 + + """ # noqa: E501 + return (a, b) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.bincount) +def bincount(x, /, weights=None, minlength=0): + """ + bincount(x, /, weights=None, minlength=0) + + Count number of occurrences of each value in array of non-negative ints. + + The number of bins (of size 1) is one larger than the largest value in + `x`. If `minlength` is specified, there will be at least this number + of bins in the output array (though it will be longer if necessary, + depending on the contents of `x`). + Each bin gives the number of occurrences of its index value in `x`. + If `weights` is specified the input array is weighted by it, i.e. if a + value ``n`` is found at position ``i``, ``out[n] += weight[i]`` instead + of ``out[n] += 1``. + + Parameters + ---------- + x : array_like, 1 dimension, nonnegative ints + Input array. + weights : array_like, optional + Weights, array of the same shape as `x`. + minlength : int, optional + A minimum number of bins for the output array. + + Returns + ------- + out : ndarray of ints + The result of binning the input array. + The length of `out` is equal to ``np.amax(x)+1``. + + Raises + ------ + ValueError + If the input is not 1-dimensional, or contains elements with negative + values, or if `minlength` is negative. + TypeError + If the type of the input is float or complex. + + See Also + -------- + histogram, digitize, unique + + Examples + -------- + >>> import numpy as np + >>> np.bincount(np.arange(5)) + array([1, 1, 1, 1, 1]) + >>> np.bincount(np.array([0, 1, 1, 3, 2, 1, 7])) + array([1, 3, 1, 1, 0, 0, 0, 1]) + + >>> x = np.array([0, 1, 1, 3, 2, 1, 7, 23]) + >>> np.bincount(x).size == np.amax(x)+1 + True + + The input array needs to be of integer dtype, otherwise a + TypeError is raised: + + >>> np.bincount(np.arange(5, dtype=float)) + Traceback (most recent call last): + ... + TypeError: Cannot cast array data from dtype('float64') to dtype('int64') + according to the rule 'safe' + + A possible use of ``bincount`` is to perform sums over + variable-size chunks of an array, using the ``weights`` keyword. + + >>> w = np.array([0.3, 0.5, 0.2, 0.7, 1., -0.6]) # weights + >>> x = np.array([0, 1, 1, 2, 2, 2]) + >>> np.bincount(x, weights=w) + array([ 0.3, 0.7, 1.1]) + + """ + return (x, weights) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.ravel_multi_index) +def ravel_multi_index(multi_index, dims, mode="raise", order="C"): + """ + ravel_multi_index(multi_index, dims, mode='raise', order='C') + + Converts a tuple of index arrays into an array of flat + indices, applying boundary modes to the multi-index. + + Parameters + ---------- + multi_index : tuple of array_like + A tuple of integer arrays, one array for each dimension. + dims : tuple of ints + The shape of array into which the indices from ``multi_index`` apply. + mode : {'raise', 'wrap', 'clip'}, optional + Specifies how out-of-bounds indices are handled. Can specify + either one mode or a tuple of modes, one mode per index. + + * 'raise' -- raise an error (default) + * 'wrap' -- wrap around + * 'clip' -- clip to the range + + In 'clip' mode, a negative index which would normally + wrap will clip to 0 instead. + order : {'C', 'F'}, optional + Determines whether the multi-index should be viewed as + indexing in row-major (C-style) or column-major + (Fortran-style) order. + + Returns + ------- + raveled_indices : ndarray + An array of indices into the flattened version of an array + of dimensions ``dims``. + + See Also + -------- + unravel_index + + Examples + -------- + >>> import numpy as np + >>> arr = np.array([[3,6,6],[4,5,1]]) + >>> np.ravel_multi_index(arr, (7,6)) + array([22, 41, 37]) + >>> np.ravel_multi_index(arr, (7,6), order='F') + array([31, 41, 13]) + >>> np.ravel_multi_index(arr, (4,6), mode='clip') + array([22, 23, 19]) + >>> np.ravel_multi_index(arr, (4,4), mode=('clip','wrap')) + array([12, 13, 13]) + + >>> np.ravel_multi_index((3,1,4,1), (6,7,8,9)) + 1621 + """ + return multi_index + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.unravel_index) +def unravel_index(indices, shape, order="C"): + """ + unravel_index(indices, shape, order='C') + + Converts a flat index or array of flat indices into a tuple + of coordinate arrays. + + Parameters + ---------- + indices : array_like + An integer array whose elements are indices into the flattened + version of an array of dimensions ``shape``. Before version 1.6.0, + this function accepted just one index value. + shape : tuple of ints + The shape of the array to use for unraveling ``indices``. + order : {'C', 'F'}, optional + Determines whether the indices should be viewed as indexing in + row-major (C-style) or column-major (Fortran-style) order. + + Returns + ------- + unraveled_coords : tuple of ndarray + Each array in the tuple has the same shape as the ``indices`` + array. + + See Also + -------- + ravel_multi_index + + Examples + -------- + >>> import numpy as np + >>> np.unravel_index([22, 41, 37], (7,6)) + (array([3, 6, 6]), array([4, 5, 1])) + >>> np.unravel_index([31, 41, 13], (7,6), order='F') + (array([3, 6, 6]), array([4, 5, 1])) + + >>> np.unravel_index(1621, (6,7,8,9)) + (3, 1, 4, 1) + + """ + return (indices,) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.copyto) +def copyto(dst, src, casting="same_kind", where=True): + """ + copyto(dst, src, casting='same_kind', where=True) + + Copies values from one array to another, broadcasting as necessary. + + Raises a TypeError if the `casting` rule is violated, and if + `where` is provided, it selects which elements to copy. + + Parameters + ---------- + dst : ndarray + The array into which values are copied. + src : array_like + The array from which values are copied. + casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + Controls what kind of data casting may occur when copying. + + * 'no' means the data types should not be cast at all. + * 'equiv' means only byte-order changes are allowed. + * 'safe' means only casts which can preserve values are allowed. + * 'same_kind' means only safe casts or casts within a kind, + like float64 to float32, are allowed. + * 'unsafe' means any data conversions may be done. + where : array_like of bool, optional + A boolean array which is broadcasted to match the dimensions + of `dst`, and selects elements to copy from `src` to `dst` + wherever it contains the value True. + + Examples + -------- + >>> import numpy as np + >>> A = np.array([4, 5, 6]) + >>> B = [1, 2, 3] + >>> np.copyto(A, B) + >>> A + array([1, 2, 3]) + + >>> A = np.array([[1, 2, 3], [4, 5, 6]]) + >>> B = [[4, 5, 6], [7, 8, 9]] + >>> np.copyto(A, B) + >>> A + array([[4, 5, 6], + [7, 8, 9]]) + + """ + return (dst, src, where) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.putmask) +def putmask(a, /, mask, values): + """ + putmask(a, /, mask, values) + + Changes elements of an array based on conditional and input values. + + Sets ``a.flat[n] = values[n]`` for each n where ``mask.flat[n]==True``. + + If `values` is not the same size as `a` and `mask` then it will repeat. + This gives behavior different from ``a[mask] = values``. + + Parameters + ---------- + a : ndarray + Target array. + mask : array_like + Boolean mask array. It has to be the same shape as `a`. + values : array_like + Values to put into `a` where `mask` is True. If `values` is smaller + than `a` it will be repeated. + + See Also + -------- + place, put, take, copyto + + Examples + -------- + >>> import numpy as np + >>> x = np.arange(6).reshape(2, 3) + >>> np.putmask(x, x>2, x**2) + >>> x + array([[ 0, 1, 2], + [ 9, 16, 25]]) + + If `values` is smaller than `a` it is repeated: + + >>> x = np.arange(5) + >>> np.putmask(x, x>1, [-33, -44]) + >>> x + array([ 0, 1, -33, -44, -33]) + + """ + return (a, mask, values) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.packbits) +def packbits(a, /, axis=None, bitorder="big"): + """ + packbits(a, /, axis=None, bitorder='big') + + Packs the elements of a binary-valued array into bits in a uint8 array. + + The result is padded to full bytes by inserting zero bits at the end. + + Parameters + ---------- + a : array_like + An array of integers or booleans whose elements should be packed to + bits. + axis : int, optional + The dimension over which bit-packing is done. + ``None`` implies packing the flattened array. + bitorder : {'big', 'little'}, optional + The order of the input bits. 'big' will mimic bin(val), + ``[0, 0, 0, 0, 0, 0, 1, 1] => 3 = 0b00000011``, 'little' will + reverse the order so ``[1, 1, 0, 0, 0, 0, 0, 0] => 3``. + Defaults to 'big'. + + Returns + ------- + packed : ndarray + Array of type uint8 whose elements represent bits corresponding to the + logical (0 or nonzero) value of the input elements. The shape of + `packed` has the same number of dimensions as the input (unless `axis` + is None, in which case the output is 1-D). + + See Also + -------- + unpackbits: Unpacks elements of a uint8 array into a binary-valued output + array. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[[1,0,1], + ... [0,1,0]], + ... [[1,1,0], + ... [0,0,1]]]) + >>> b = np.packbits(a, axis=-1) + >>> b + array([[[160], + [ 64]], + [[192], + [ 32]]], dtype=uint8) + + Note that in binary 160 = 1010 0000, 64 = 0100 0000, 192 = 1100 0000, + and 32 = 0010 0000. + + """ + return (a,) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.unpackbits) +def unpackbits(a, /, axis=None, count=None, bitorder="big"): + """ + unpackbits(a, /, axis=None, count=None, bitorder='big') + + Unpacks elements of a uint8 array into a binary-valued output array. + + Each element of `a` represents a bit-field that should be unpacked + into a binary-valued output array. The shape of the output array is + either 1-D (if `axis` is ``None``) or the same shape as the input + array with unpacking done along the axis specified. + + Parameters + ---------- + a : ndarray, uint8 type + Input array. + axis : int, optional + The dimension over which bit-unpacking is done. + ``None`` implies unpacking the flattened array. + count : int or None, optional + The number of elements to unpack along `axis`, provided as a way + of undoing the effect of packing a size that is not a multiple + of eight. A non-negative number means to only unpack `count` + bits. A negative number means to trim off that many bits from + the end. ``None`` means to unpack the entire array (the + default). Counts larger than the available number of bits will + add zero padding to the output. Negative counts must not + exceed the available number of bits. + bitorder : {'big', 'little'}, optional + The order of the returned bits. 'big' will mimic bin(val), + ``3 = 0b00000011 => [0, 0, 0, 0, 0, 0, 1, 1]``, 'little' will reverse + the order to ``[1, 1, 0, 0, 0, 0, 0, 0]``. + Defaults to 'big'. + + Returns + ------- + unpacked : ndarray, uint8 type + The elements are binary-valued (0 or 1). + + See Also + -------- + packbits : Packs the elements of a binary-valued array into bits in + a uint8 array. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[2], [7], [23]], dtype=np.uint8) + >>> a + array([[ 2], + [ 7], + [23]], dtype=uint8) + >>> b = np.unpackbits(a, axis=1) + >>> b + array([[0, 0, 0, 0, 0, 0, 1, 0], + [0, 0, 0, 0, 0, 1, 1, 1], + [0, 0, 0, 1, 0, 1, 1, 1]], dtype=uint8) + >>> c = np.unpackbits(a, axis=1, count=-3) + >>> c + array([[0, 0, 0, 0, 0], + [0, 0, 0, 0, 0], + [0, 0, 0, 1, 0]], dtype=uint8) + + >>> p = np.packbits(b, axis=0) + >>> np.unpackbits(p, axis=0) + array([[0, 0, 0, 0, 0, 0, 1, 0], + [0, 0, 0, 0, 0, 1, 1, 1], + [0, 0, 0, 1, 0, 1, 1, 1], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]], dtype=uint8) + >>> np.array_equal(b, np.unpackbits(p, axis=0, count=b.shape[0])) + True + + """ + return (a,) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.shares_memory) +def shares_memory(a, b, /, max_work=-1): + """ + shares_memory(a, b, /, max_work=-1) + + Determine if two arrays share memory. + + .. warning:: + + This function can be exponentially slow for some inputs, unless + `max_work` is set to zero or a positive integer. + If in doubt, use `numpy.may_share_memory` instead. + + Parameters + ---------- + a, b : ndarray + Input arrays + max_work : int, optional + Effort to spend on solving the overlap problem (maximum number + of candidate solutions to consider). The following special + values are recognized: + + max_work=-1 (default) + The problem is solved exactly. In this case, the function returns + True only if there is an element shared between the arrays. Finding + the exact solution may take extremely long in some cases. + max_work=0 + Only the memory bounds of a and b are checked. + This is equivalent to using ``may_share_memory()``. + + Raises + ------ + numpy.exceptions.TooHardError + Exceeded max_work. + + Returns + ------- + out : bool + + See Also + -------- + may_share_memory + + Examples + -------- + >>> import numpy as np + >>> x = np.array([1, 2, 3, 4]) + >>> np.shares_memory(x, np.array([5, 6, 7])) + False + >>> np.shares_memory(x[::2], x) + True + >>> np.shares_memory(x[::2], x[1::2]) + False + + Checking whether two arrays share memory is NP-complete, and + runtime may increase exponentially in the number of + dimensions. Hence, `max_work` should generally be set to a finite + number, as it is possible to construct examples that take + extremely long to run: + + >>> from numpy.lib.stride_tricks import as_strided + >>> x = np.zeros([192163377], dtype=np.int8) + >>> x1 = as_strided( + ... x, strides=(36674, 61119, 85569), shape=(1049, 1049, 1049)) + >>> x2 = as_strided( + ... x[64023025:], strides=(12223, 12224, 1), shape=(1049, 1049, 1)) + >>> np.shares_memory(x1, x2, max_work=1000) + Traceback (most recent call last): + ... + numpy.exceptions.TooHardError: Exceeded max_work + + Running ``np.shares_memory(x1, x2)`` without `max_work` set takes + around 1 minute for this case. It is possible to find problems + that take still significantly longer. + + """ + return (a, b) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.may_share_memory) +def may_share_memory(a, b, /, max_work=0): + """ + may_share_memory(a, b, /, max_work=0) + + Determine if two arrays might share memory + + A return of True does not necessarily mean that the two arrays + share any element. It just means that they *might*. + + Only the memory bounds of a and b are checked by default. + + Parameters + ---------- + a, b : ndarray + Input arrays + max_work : int, optional + Effort to spend on solving the overlap problem. See + `shares_memory` for details. Default for ``may_share_memory`` + is to do a bounds check. + + Returns + ------- + out : bool + + See Also + -------- + shares_memory + + Examples + -------- + >>> import numpy as np + >>> np.may_share_memory(np.array([1,2]), np.array([5,8,9])) + False + >>> x = np.zeros([3, 4]) + >>> np.may_share_memory(x[:,0], x[:,1]) + True + + """ + return (a, b) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.is_busday) +def is_busday(dates, weekmask="1111100", holidays=None, busdaycal=None, out=None): + """ + is_busday( + dates, + weekmask='1111100', + holidays=None, + busdaycal=None, + out=None, + ) + + Calculates which of the given dates are valid days, and which are not. + + Parameters + ---------- + dates : array_like of datetime64[D] + The array of dates to process. + weekmask : str or array_like of bool, optional + A seven-element array indicating which of Monday through Sunday are + valid days. May be specified as a length-seven list or array, like + [1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string + like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for + weekdays, optionally separated by white space. Valid abbreviations + are: Mon Tue Wed Thu Fri Sat Sun + holidays : array_like of datetime64[D], optional + An array of dates to consider as invalid dates. They may be + specified in any order, and NaT (not-a-time) dates are ignored. + This list is saved in a normalized form that is suited for + fast calculations of valid days. + busdaycal : busdaycalendar, optional + A `busdaycalendar` object which specifies the valid days. If this + parameter is provided, neither weekmask nor holidays may be + provided. + out : array of bool, optional + If provided, this array is filled with the result. + + Returns + ------- + out : array of bool + An array with the same shape as ``dates``, containing True for + each valid day, and False for each invalid day. + + See Also + -------- + busdaycalendar : An object that specifies a custom set of valid days. + busday_offset : Applies an offset counted in valid days. + busday_count : Counts how many valid days are in a half-open date range. + + Examples + -------- + >>> import numpy as np + >>> # The weekdays are Friday, Saturday, and Monday + ... np.is_busday(['2011-07-01', '2011-07-02', '2011-07-18'], + ... holidays=['2011-07-01', '2011-07-04', '2011-07-17']) + array([False, False, True]) + """ + return (dates, weekmask, holidays, out) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.busday_offset) +def busday_offset(dates, offsets, roll="raise", weekmask="1111100", holidays=None, + busdaycal=None, out=None): + """ + busday_offset( + dates, + offsets, + roll='raise', + weekmask='1111100', + holidays=None, + busdaycal=None, + out=None, + ) + + First adjusts the date to fall on a valid day according to + the ``roll`` rule, then applies offsets to the given dates + counted in valid days. + + Parameters + ---------- + dates : array_like of datetime64[D] + The array of dates to process. + offsets : array_like of int + The array of offsets, which is broadcast with ``dates``. + roll : {'raise', 'nat', 'forward', 'following', 'backward', 'preceding', \ + 'modifiedfollowing', 'modifiedpreceding'}, optional + How to treat dates that do not fall on a valid day. The default + is 'raise'. + + * 'raise' means to raise an exception for an invalid day. + * 'nat' means to return a NaT (not-a-time) for an invalid day. + * 'forward' and 'following' mean to take the first valid day + later in time. + * 'backward' and 'preceding' mean to take the first valid day + earlier in time. + * 'modifiedfollowing' means to take the first valid day + later in time unless it is across a Month boundary, in which + case to take the first valid day earlier in time. + * 'modifiedpreceding' means to take the first valid day + earlier in time unless it is across a Month boundary, in which + case to take the first valid day later in time. + weekmask : str or array_like of bool, optional + A seven-element array indicating which of Monday through Sunday are + valid days. May be specified as a length-seven list or array, like + [1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string + like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for + weekdays, optionally separated by white space. Valid abbreviations + are: Mon Tue Wed Thu Fri Sat Sun + holidays : array_like of datetime64[D], optional + An array of dates to consider as invalid dates. They may be + specified in any order, and NaT (not-a-time) dates are ignored. + This list is saved in a normalized form that is suited for + fast calculations of valid days. + busdaycal : busdaycalendar, optional + A `busdaycalendar` object which specifies the valid days. If this + parameter is provided, neither weekmask nor holidays may be + provided. + out : array of datetime64[D], optional + If provided, this array is filled with the result. + + Returns + ------- + out : array of datetime64[D] + An array with a shape from broadcasting ``dates`` and ``offsets`` + together, containing the dates with offsets applied. + + See Also + -------- + busdaycalendar : An object that specifies a custom set of valid days. + is_busday : Returns a boolean array indicating valid days. + busday_count : Counts how many valid days are in a half-open date range. + + Examples + -------- + >>> import numpy as np + >>> # First business day in October 2011 (not accounting for holidays) + ... np.busday_offset('2011-10', 0, roll='forward') + np.datetime64('2011-10-03') + >>> # Last business day in February 2012 (not accounting for holidays) + ... np.busday_offset('2012-03', -1, roll='forward') + np.datetime64('2012-02-29') + >>> # Third Wednesday in January 2011 + ... np.busday_offset('2011-01', 2, roll='forward', weekmask='Wed') + np.datetime64('2011-01-19') + >>> # 2012 Mother's Day in Canada and the U.S. + ... np.busday_offset('2012-05', 1, roll='forward', weekmask='Sun') + np.datetime64('2012-05-13') + + >>> # First business day on or after a date + ... np.busday_offset('2011-03-20', 0, roll='forward') + np.datetime64('2011-03-21') + >>> np.busday_offset('2011-03-22', 0, roll='forward') + np.datetime64('2011-03-22') + >>> # First business day after a date + ... np.busday_offset('2011-03-20', 1, roll='backward') + np.datetime64('2011-03-21') + >>> np.busday_offset('2011-03-22', 1, roll='backward') + np.datetime64('2011-03-23') + """ + return (dates, offsets, weekmask, holidays, out) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.busday_count) +def busday_count(begindates, enddates, weekmask="1111100", holidays=(), + busdaycal=None, out=None): + """ + busday_count( + begindates, + enddates, + weekmask='1111100', + holidays=[], + busdaycal=None, + out=None + ) + + Counts the number of valid days between `begindates` and + `enddates`, not including the day of `enddates`. + + If ``enddates`` specifies a date value that is earlier than the + corresponding ``begindates`` date value, the count will be negative. + + Parameters + ---------- + begindates : array_like of datetime64[D] + The array of the first dates for counting. + enddates : array_like of datetime64[D] + The array of the end dates for counting, which are excluded + from the count themselves. + weekmask : str or array_like of bool, optional + A seven-element array indicating which of Monday through Sunday are + valid days. May be specified as a length-seven list or array, like + [1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string + like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for + weekdays, optionally separated by white space. Valid abbreviations + are: Mon Tue Wed Thu Fri Sat Sun + holidays : array_like of datetime64[D], optional + An array of dates to consider as invalid dates. They may be + specified in any order, and NaT (not-a-time) dates are ignored. + This list is saved in a normalized form that is suited for + fast calculations of valid days. + busdaycal : busdaycalendar, optional + A `busdaycalendar` object which specifies the valid days. If this + parameter is provided, neither weekmask nor holidays may be + provided. + out : array of int, optional + If provided, this array is filled with the result. + + Returns + ------- + out : array of int + An array with a shape from broadcasting ``begindates`` and ``enddates`` + together, containing the number of valid days between + the begin and end dates. + + See Also + -------- + busdaycalendar : An object that specifies a custom set of valid days. + is_busday : Returns a boolean array indicating valid days. + busday_offset : Applies an offset counted in valid days. + + Examples + -------- + >>> import numpy as np + >>> # Number of weekdays in January 2011 + ... np.busday_count('2011-01', '2011-02') + 21 + >>> # Number of weekdays in 2011 + >>> np.busday_count('2011', '2012') + 260 + >>> # Number of Saturdays in 2011 + ... np.busday_count('2011', '2012', weekmask='Sat') + 53 + """ + return (begindates, enddates, weekmask, holidays, out) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.datetime_as_string) +def datetime_as_string(arr, unit=None, timezone="naive", casting="same_kind"): + """ + datetime_as_string(arr, unit=None, timezone='naive', casting='same_kind') + + Convert an array of datetimes into an array of strings. + + Parameters + ---------- + arr : array_like of datetime64 + The array of UTC timestamps to format. + unit : str + One of None, 'auto', or + a :ref:`datetime unit `. + timezone : {'naive', 'UTC', 'local'} or tzinfo + Timezone information to use when displaying the datetime. If 'UTC', + end with a Z to indicate UTC time. If 'local', convert to the local + timezone first, and suffix with a +-#### timezone offset. If a tzinfo + object, then do as with 'local', but use the specified timezone. + casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'} + Casting to allow when changing between datetime units. + + Returns + ------- + str_arr : ndarray + An array of strings the same shape as `arr`. + + Examples + -------- + >>> import numpy as np + >>> from zoneinfo import ZoneInfo + >>> d = np.arange('2002-10-27T04:30', 4*60, 60, dtype='M8[m]') + >>> d + array(['2002-10-27T04:30', '2002-10-27T05:30', '2002-10-27T06:30', + '2002-10-27T07:30'], dtype='datetime64[m]') + + Setting the timezone to UTC shows the same information, but with a Z suffix + + >>> np.datetime_as_string(d, timezone='UTC') + array(['2002-10-27T04:30Z', '2002-10-27T05:30Z', '2002-10-27T06:30Z', + '2002-10-27T07:30Z'], dtype='>> np.datetime_as_string(d, timezone=ZoneInfo('US/Eastern')) + array(['2002-10-27T00:30-0400', '2002-10-27T01:30-0400', + '2002-10-27T01:30-0500', '2002-10-27T02:30-0500'], dtype='>> np.datetime_as_string(d, unit='h') + array(['2002-10-27T04', '2002-10-27T05', '2002-10-27T06', '2002-10-27T07'], + dtype='>> np.datetime_as_string(d, unit='s') + array(['2002-10-27T04:30:00', '2002-10-27T05:30:00', '2002-10-27T06:30:00', + '2002-10-27T07:30:00'], dtype='>> np.datetime_as_string(d, unit='h', casting='safe') + Traceback (most recent call last): + ... + TypeError: Cannot create a datetime string as units 'h' from a NumPy + datetime with units 'm' according to the rule 'safe' + """ + return (arr,) diff --git a/local_packages/numpy/_core/multiarray.pyi b/local_packages/numpy/_core/multiarray.pyi new file mode 100644 index 0000000..279ba5a --- /dev/null +++ b/local_packages/numpy/_core/multiarray.pyi @@ -0,0 +1,1328 @@ +# TODO: Sort out any and all missing functions in this namespace +import datetime as dt +from _typeshed import Incomplete, StrOrBytesPath, SupportsLenAndGetItem +from collections.abc import Callable, Iterable, Sequence +from typing import ( + Any, + ClassVar, + Final, + Literal as L, + Protocol, + SupportsIndex, + TypeAlias, + TypeVar, + final, + overload, + type_check_only, +) +from typing_extensions import CapsuleType + +import numpy as np +from numpy import ( # type: ignore[attr-defined] # Python >=3.12 + _AnyShapeT, + _CastingKind, + _CopyMode, + _ModeKind, + _NDIterFlagsKind, + _NDIterFlagsOp, + _OrderCF, + _OrderKACF, + _SupportsBuffer, + _SupportsFileMethods, + broadcast, + busdaycalendar, + complexfloating, + correlate, + count_nonzero, + datetime64, + dtype, + einsum as c_einsum, + flatiter, + float64, + floating, + from_dlpack, + generic, + int_, + interp, + intp, + matmul, + ndarray, + nditer, + signedinteger, + str_, + timedelta64, + ufunc, + uint8, + unsignedinteger, + vecdot, +) +from numpy._typing import ( + ArrayLike, + DTypeLike, + NDArray, + _AnyShape, + _ArrayLike, + _ArrayLikeBool_co, + _ArrayLikeBytes_co, + _ArrayLikeComplex_co, + _ArrayLikeDT64_co, + _ArrayLikeFloat_co, + _ArrayLikeInt_co, + _ArrayLikeObject_co, + _ArrayLikeStr_co, + _ArrayLikeTD64_co, + _ArrayLikeUInt_co, + _DT64Codes, + _DTypeLike, + _FloatLike_co, + _IntLike_co, + _NestedSequence, + _ScalarLike_co, + _Shape, + _ShapeLike, + _SupportsArrayFunc, + _SupportsDType, + _TD64Like_co, +) +from numpy._typing._ufunc import ( + _2PTuple, + _PyFunc_Nin1_Nout1, + _PyFunc_Nin1P_Nout2P, + _PyFunc_Nin2_Nout1, + _PyFunc_Nin3P_Nout1, +) + +__all__ = [ + "_ARRAY_API", + "ALLOW_THREADS", + "BUFSIZE", + "CLIP", + "DATETIMEUNITS", + "ITEM_HASOBJECT", + "ITEM_IS_POINTER", + "LIST_PICKLE", + "MAXDIMS", + "MAY_SHARE_BOUNDS", + "MAY_SHARE_EXACT", + "NEEDS_INIT", + "NEEDS_PYAPI", + "RAISE", + "USE_GETITEM", + "USE_SETITEM", + "WRAP", + "_flagdict", + "from_dlpack", + "_place", + "_reconstruct", + "_vec_string", + "_monotonicity", + "add_docstring", + "arange", + "array", + "asarray", + "asanyarray", + "ascontiguousarray", + "asfortranarray", + "bincount", + "broadcast", + "busday_count", + "busday_offset", + "busdaycalendar", + "can_cast", + "compare_chararrays", + "concatenate", + "copyto", + "correlate", + "correlate2", + "count_nonzero", + "c_einsum", + "datetime_as_string", + "datetime_data", + "dot", + "dragon4_positional", + "dragon4_scientific", + "dtype", + "empty", + "empty_like", + "error", + "flagsobj", + "flatiter", + "format_longfloat", + "frombuffer", + "fromfile", + "fromiter", + "fromstring", + "get_handler_name", + "get_handler_version", + "inner", + "interp", + "interp_complex", + "is_busday", + "lexsort", + "matmul", + "vecdot", + "may_share_memory", + "min_scalar_type", + "ndarray", + "nditer", + "nested_iters", + "normalize_axis_index", + "packbits", + "promote_types", + "putmask", + "ravel_multi_index", + "result_type", + "scalar", + "set_datetimeparse_function", + "set_typeDict", + "shares_memory", + "typeinfo", + "unpackbits", + "unravel_index", + "vdot", + "where", + "zeros", +] + +_ScalarT = TypeVar("_ScalarT", bound=generic) +_DTypeT = TypeVar("_DTypeT", bound=np.dtype) +_ArrayT = TypeVar("_ArrayT", bound=ndarray) +_ArrayT_co = TypeVar("_ArrayT_co", bound=ndarray, covariant=True) +_ShapeT = TypeVar("_ShapeT", bound=_Shape) +# TODO: fix the names of these typevars +_ReturnType = TypeVar("_ReturnType") +_IDType = TypeVar("_IDType") +_Nin = TypeVar("_Nin", bound=int) +_Nout = TypeVar("_Nout", bound=int) + +_Array: TypeAlias = ndarray[_ShapeT, dtype[_ScalarT]] +_Array1D: TypeAlias = ndarray[tuple[int], dtype[_ScalarT]] + +# Valid time units +_UnitKind: TypeAlias = L[ + "Y", + "M", + "D", + "h", + "m", + "s", + "ms", + "us", "μs", + "ns", + "ps", + "fs", + "as", +] +_RollKind: TypeAlias = L[ # `raise` is deliberately excluded + "nat", + "forward", + "following", + "backward", + "preceding", + "modifiedfollowing", + "modifiedpreceding", +] + +@type_check_only +class _SupportsArray(Protocol[_ArrayT_co]): + def __array__(self, /) -> _ArrayT_co: ... + +@type_check_only +class _ConstructorEmpty(Protocol): + # 1-D shape + @overload + def __call__( + self, + /, + shape: SupportsIndex, + dtype: None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + ) -> _Array1D[float64]: ... + @overload + def __call__( + self, + /, + shape: SupportsIndex, + dtype: _DTypeT | _SupportsDType[_DTypeT], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + ) -> ndarray[tuple[int], _DTypeT]: ... + @overload + def __call__( + self, + /, + shape: SupportsIndex, + dtype: type[_ScalarT], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + ) -> _Array1D[_ScalarT]: ... + @overload + def __call__( + self, + /, + shape: SupportsIndex, + dtype: DTypeLike | None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + ) -> _Array1D[Incomplete]: ... + + # known shape + @overload + def __call__( + self, + /, + shape: _AnyShapeT, + dtype: None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + ) -> _Array[_AnyShapeT, float64]: ... + @overload + def __call__( + self, + /, + shape: _AnyShapeT, + dtype: _DTypeT | _SupportsDType[_DTypeT], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + ) -> ndarray[_AnyShapeT, _DTypeT]: ... + @overload + def __call__( + self, + /, + shape: _AnyShapeT, + dtype: type[_ScalarT], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + ) -> _Array[_AnyShapeT, _ScalarT]: ... + @overload + def __call__( + self, + /, + shape: _AnyShapeT, + dtype: DTypeLike | None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + ) -> _Array[_AnyShapeT, Incomplete]: ... + + # unknown shape + @overload + def __call__( + self, /, + shape: _ShapeLike, + dtype: None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + ) -> NDArray[float64]: ... + @overload + def __call__( + self, /, + shape: _ShapeLike, + dtype: _DTypeT | _SupportsDType[_DTypeT], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + ) -> ndarray[_AnyShape, _DTypeT]: ... + @overload + def __call__( + self, /, + shape: _ShapeLike, + dtype: type[_ScalarT], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + ) -> NDArray[_ScalarT]: ... + @overload + def __call__( + self, + /, + shape: _ShapeLike, + dtype: DTypeLike | None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + ) -> NDArray[Incomplete]: ... + +# using `Final` or `TypeAlias` will break stubtest +error = Exception + +# from ._multiarray_umath +ITEM_HASOBJECT: Final = 1 +LIST_PICKLE: Final = 2 +ITEM_IS_POINTER: Final = 4 +NEEDS_INIT: Final = 8 +NEEDS_PYAPI: Final = 16 +USE_GETITEM: Final = 32 +USE_SETITEM: Final = 64 +DATETIMEUNITS: Final[CapsuleType] = ... +_ARRAY_API: Final[CapsuleType] = ... + +_flagdict: Final[dict[str, int]] = ... +_monotonicity: Final[Callable[..., object]] = ... +_place: Final[Callable[..., object]] = ... +_reconstruct: Final[Callable[..., object]] = ... +_vec_string: Final[Callable[..., object]] = ... +correlate2: Final[Callable[..., object]] = ... +dragon4_positional: Final[Callable[..., object]] = ... +dragon4_scientific: Final[Callable[..., object]] = ... +interp_complex: Final[Callable[..., object]] = ... +set_datetimeparse_function: Final[Callable[..., object]] = ... + +def get_handler_name(a: NDArray[Any] = ..., /) -> str | None: ... +def get_handler_version(a: NDArray[Any] = ..., /) -> int | None: ... +def format_longfloat(x: np.longdouble, precision: int) -> str: ... +def scalar(dtype: _DTypeT, object: bytes | object = ...) -> ndarray[tuple[()], _DTypeT]: ... +def set_typeDict(dict_: dict[str, np.dtype], /) -> None: ... + +typeinfo: Final[dict[str, np.dtype[np.generic]]] = ... + +ALLOW_THREADS: Final[int] # 0 or 1 (system-specific) +BUFSIZE: Final = 8_192 +CLIP: Final = 0 +WRAP: Final = 1 +RAISE: Final = 2 +MAXDIMS: Final = 64 +MAY_SHARE_BOUNDS: Final = 0 +MAY_SHARE_EXACT: Final = -1 +tracemalloc_domain: Final = 389_047 + +zeros: Final[_ConstructorEmpty] = ... +empty: Final[_ConstructorEmpty] = ... + +@overload +def empty_like( + prototype: _ArrayT, + /, + dtype: None = None, + order: _OrderKACF = "K", + subok: bool = True, + shape: _ShapeLike | None = None, + *, + device: L["cpu"] | None = None, +) -> _ArrayT: ... +@overload +def empty_like( + prototype: _ArrayLike[_ScalarT], + /, + dtype: None = None, + order: _OrderKACF = "K", + subok: bool = True, + shape: _ShapeLike | None = None, + *, + device: L["cpu"] | None = None, +) -> NDArray[_ScalarT]: ... +@overload +def empty_like( + prototype: Incomplete, + /, + dtype: _DTypeLike[_ScalarT], + order: _OrderKACF = "K", + subok: bool = True, + shape: _ShapeLike | None = None, + *, + device: L["cpu"] | None = None, +) -> NDArray[_ScalarT]: ... +@overload +def empty_like( + prototype: Incomplete, + /, + dtype: DTypeLike | None = None, + order: _OrderKACF = "K", + subok: bool = True, + shape: _ShapeLike | None = None, + *, + device: L["cpu"] | None = None, +) -> NDArray[Incomplete]: ... + +@overload +def array( + object: _ArrayT, + dtype: None = None, + *, + copy: bool | _CopyMode | None = True, + order: _OrderKACF = "K", + subok: L[True], + ndmin: int = 0, + ndmax: int = 0, + like: _SupportsArrayFunc | None = None, +) -> _ArrayT: ... +@overload +def array( + object: _SupportsArray[_ArrayT], + dtype: None = None, + *, + copy: bool | _CopyMode | None = True, + order: _OrderKACF = "K", + subok: L[True], + ndmin: L[0] = 0, + ndmax: int = 0, + like: _SupportsArrayFunc | None = None, +) -> _ArrayT: ... +@overload +def array( + object: _ArrayLike[_ScalarT], + dtype: None = None, + *, + copy: bool | _CopyMode | None = True, + order: _OrderKACF = "K", + subok: bool = False, + ndmin: int = 0, + ndmax: int = 0, + like: _SupportsArrayFunc | None = None, +) -> NDArray[_ScalarT]: ... +@overload +def array( + object: Any, + dtype: _DTypeLike[_ScalarT], + *, + copy: bool | _CopyMode | None = True, + order: _OrderKACF = "K", + subok: bool = False, + ndmin: int = 0, + ndmax: int = 0, + like: _SupportsArrayFunc | None = None, +) -> NDArray[_ScalarT]: ... +@overload +def array( + object: Any, + dtype: DTypeLike | None = None, + *, + copy: bool | _CopyMode | None = True, + order: _OrderKACF = "K", + subok: bool = False, + ndmin: int = 0, + ndmax: int = 0, + like: _SupportsArrayFunc | None = None, +) -> NDArray[Any]: ... + +# +@overload +def ravel_multi_index( + multi_index: SupportsLenAndGetItem[_IntLike_co], + dims: _ShapeLike, + mode: _ModeKind | tuple[_ModeKind, ...] = "raise", + order: _OrderCF = "C", +) -> intp: ... +@overload +def ravel_multi_index( + multi_index: SupportsLenAndGetItem[_ArrayLikeInt_co], + dims: _ShapeLike, + mode: _ModeKind | tuple[_ModeKind, ...] = "raise", + order: _OrderCF = "C", +) -> NDArray[intp]: ... + +# +@overload +def unravel_index(indices: _IntLike_co, shape: _ShapeLike, order: _OrderCF = "C") -> tuple[intp, ...]: ... +@overload +def unravel_index(indices: _ArrayLikeInt_co, shape: _ShapeLike, order: _OrderCF = "C") -> tuple[NDArray[intp], ...]: ... + +# +def normalize_axis_index(axis: int, ndim: int, msg_prefix: str | None = None) -> int: ... + +# NOTE: Allow any sequence of array-like objects +@overload +def concatenate( + arrays: _ArrayLike[_ScalarT], + /, + axis: SupportsIndex | None = 0, + out: None = None, + *, + dtype: None = None, + casting: _CastingKind | None = "same_kind", +) -> NDArray[_ScalarT]: ... +@overload +def concatenate( + arrays: SupportsLenAndGetItem[ArrayLike], + /, + axis: SupportsIndex | None = 0, + out: None = None, + *, + dtype: _DTypeLike[_ScalarT], + casting: _CastingKind | None = "same_kind", +) -> NDArray[_ScalarT]: ... +@overload +def concatenate( + arrays: SupportsLenAndGetItem[ArrayLike], + /, + axis: SupportsIndex | None = 0, + out: None = None, + *, + dtype: DTypeLike | None = None, + casting: _CastingKind | None = "same_kind", +) -> NDArray[Incomplete]: ... +@overload +def concatenate( + arrays: SupportsLenAndGetItem[ArrayLike], + /, + axis: SupportsIndex | None = 0, + *, + out: _ArrayT, + dtype: DTypeLike | None = None, + casting: _CastingKind | None = "same_kind", +) -> _ArrayT: ... +@overload +def concatenate( + arrays: SupportsLenAndGetItem[ArrayLike], + /, + axis: SupportsIndex | None, + out: _ArrayT, + *, + dtype: DTypeLike | None = None, + casting: _CastingKind | None = "same_kind", +) -> _ArrayT: ... + +def inner(a: ArrayLike, b: ArrayLike, /) -> Incomplete: ... + +@overload +def where(condition: ArrayLike, x: None = None, y: None = None, /) -> tuple[NDArray[intp], ...]: ... +@overload +def where(condition: ArrayLike, x: ArrayLike, y: ArrayLike, /) -> NDArray[Incomplete]: ... + +def lexsort(keys: ArrayLike, axis: SupportsIndex = -1) -> NDArray[intp]: ... + +def can_cast(from_: ArrayLike | DTypeLike, to: DTypeLike, casting: _CastingKind = "safe") -> bool: ... + +def min_scalar_type(a: ArrayLike, /) -> dtype: ... +def result_type(*arrays_and_dtypes: ArrayLike | DTypeLike | None) -> dtype: ... + +@overload +def dot(a: ArrayLike, b: ArrayLike, out: None = None) -> Incomplete: ... +@overload +def dot(a: ArrayLike, b: ArrayLike, out: _ArrayT) -> _ArrayT: ... + +@overload +def vdot(a: _ArrayLikeBool_co, b: _ArrayLikeBool_co, /) -> np.bool: ... +@overload +def vdot(a: _ArrayLikeUInt_co, b: _ArrayLikeUInt_co, /) -> unsignedinteger: ... +@overload +def vdot(a: _ArrayLikeInt_co, b: _ArrayLikeInt_co, /) -> signedinteger: ... +@overload +def vdot(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, /) -> floating: ... +@overload +def vdot(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, /) -> complexfloating: ... +@overload +def vdot(a: _ArrayLikeTD64_co, b: _ArrayLikeTD64_co, /) -> timedelta64: ... +@overload +def vdot(a: _ArrayLikeObject_co, b: object, /) -> Any: ... +@overload +def vdot(a: object, b: _ArrayLikeObject_co, /) -> Any: ... + +def bincount(x: ArrayLike, /, weights: ArrayLike | None = None, minlength: SupportsIndex = 0) -> NDArray[intp]: ... + +def copyto(dst: ndarray, src: ArrayLike, casting: _CastingKind = "same_kind", where: object = True) -> None: ... +def putmask(a: ndarray, /, mask: _ArrayLikeBool_co, values: ArrayLike) -> None: ... + +_BitOrder: TypeAlias = L["big", "little"] + +@overload +def packbits(a: _ArrayLikeInt_co, /, axis: None = None, bitorder: _BitOrder = "big") -> ndarray[tuple[int], dtype[uint8]]: ... +@overload +def packbits(a: _ArrayLikeInt_co, /, axis: SupportsIndex, bitorder: _BitOrder = "big") -> NDArray[uint8]: ... + +@overload +def unpackbits( + a: _ArrayLike[uint8], + /, + axis: None = None, + count: SupportsIndex | None = None, + bitorder: _BitOrder = "big", +) -> ndarray[tuple[int], dtype[uint8]]: ... +@overload +def unpackbits( + a: _ArrayLike[uint8], + /, + axis: SupportsIndex, + count: SupportsIndex | None = None, + bitorder: _BitOrder = "big", +) -> NDArray[uint8]: ... + +_MaxWork: TypeAlias = L[-1, 0] + +# any two python objects will be accepted, not just `ndarray`s +def shares_memory(a: object, b: object, /, max_work: _MaxWork = -1) -> bool: ... +def may_share_memory(a: object, b: object, /, max_work: _MaxWork = 0) -> bool: ... + +@overload +def asarray( + a: _ArrayLike[_ScalarT], + dtype: None = None, + order: _OrderKACF = ..., + *, + device: L["cpu"] | None = ..., + copy: bool | None = ..., + like: _SupportsArrayFunc | None = ..., +) -> NDArray[_ScalarT]: ... +@overload +def asarray( + a: Any, + dtype: _DTypeLike[_ScalarT], + order: _OrderKACF = ..., + *, + device: L["cpu"] | None = ..., + copy: bool | None = ..., + like: _SupportsArrayFunc | None = ..., +) -> NDArray[_ScalarT]: ... +@overload +def asarray( + a: Any, + dtype: DTypeLike | None = ..., + order: _OrderKACF = ..., + *, + device: L["cpu"] | None = ..., + copy: bool | None = ..., + like: _SupportsArrayFunc | None = ..., +) -> NDArray[Any]: ... + +@overload +def asanyarray( + a: _ArrayT, # Preserve subclass-information + dtype: None = None, + order: _OrderKACF = ..., + *, + device: L["cpu"] | None = ..., + copy: bool | None = ..., + like: _SupportsArrayFunc | None = ..., +) -> _ArrayT: ... +@overload +def asanyarray( + a: _ArrayLike[_ScalarT], + dtype: None = None, + order: _OrderKACF = ..., + *, + device: L["cpu"] | None = ..., + copy: bool | None = ..., + like: _SupportsArrayFunc | None = ..., +) -> NDArray[_ScalarT]: ... +@overload +def asanyarray( + a: Any, + dtype: _DTypeLike[_ScalarT], + order: _OrderKACF = ..., + *, + device: L["cpu"] | None = ..., + copy: bool | None = ..., + like: _SupportsArrayFunc | None = ..., +) -> NDArray[_ScalarT]: ... +@overload +def asanyarray( + a: Any, + dtype: DTypeLike | None = ..., + order: _OrderKACF = ..., + *, + device: L["cpu"] | None = ..., + copy: bool | None = ..., + like: _SupportsArrayFunc | None = ..., +) -> NDArray[Any]: ... + +@overload +def ascontiguousarray( + a: _ArrayLike[_ScalarT], + dtype: None = None, + *, + like: _SupportsArrayFunc | None = ..., +) -> NDArray[_ScalarT]: ... +@overload +def ascontiguousarray( + a: Any, + dtype: _DTypeLike[_ScalarT], + *, + like: _SupportsArrayFunc | None = ..., +) -> NDArray[_ScalarT]: ... +@overload +def ascontiguousarray( + a: Any, + dtype: DTypeLike | None = ..., + *, + like: _SupportsArrayFunc | None = ..., +) -> NDArray[Any]: ... + +@overload +def asfortranarray( + a: _ArrayLike[_ScalarT], + dtype: None = None, + *, + like: _SupportsArrayFunc | None = ..., +) -> NDArray[_ScalarT]: ... +@overload +def asfortranarray( + a: Any, + dtype: _DTypeLike[_ScalarT], + *, + like: _SupportsArrayFunc | None = ..., +) -> NDArray[_ScalarT]: ... +@overload +def asfortranarray( + a: Any, + dtype: DTypeLike | None = ..., + *, + like: _SupportsArrayFunc | None = ..., +) -> NDArray[Any]: ... + +def promote_types(__type1: DTypeLike, __type2: DTypeLike) -> dtype: ... + +# `sep` is a de facto mandatory argument, as its default value is deprecated +@overload +def fromstring( + string: str | bytes, + dtype: None = None, + count: SupportsIndex = ..., + *, + sep: str, + like: _SupportsArrayFunc | None = ..., +) -> NDArray[float64]: ... +@overload +def fromstring( + string: str | bytes, + dtype: _DTypeLike[_ScalarT], + count: SupportsIndex = ..., + *, + sep: str, + like: _SupportsArrayFunc | None = ..., +) -> NDArray[_ScalarT]: ... +@overload +def fromstring( + string: str | bytes, + dtype: DTypeLike | None = ..., + count: SupportsIndex = ..., + *, + sep: str, + like: _SupportsArrayFunc | None = ..., +) -> NDArray[Any]: ... + +@overload +def frompyfunc( # type: ignore[overload-overlap] + func: Callable[[Any], _ReturnType], /, + nin: L[1], + nout: L[1], + *, + identity: None = None, +) -> _PyFunc_Nin1_Nout1[_ReturnType, None]: ... +@overload +def frompyfunc( # type: ignore[overload-overlap] + func: Callable[[Any], _ReturnType], /, + nin: L[1], + nout: L[1], + *, + identity: _IDType, +) -> _PyFunc_Nin1_Nout1[_ReturnType, _IDType]: ... +@overload +def frompyfunc( # type: ignore[overload-overlap] + func: Callable[[Any, Any], _ReturnType], /, + nin: L[2], + nout: L[1], + *, + identity: None = None, +) -> _PyFunc_Nin2_Nout1[_ReturnType, None]: ... +@overload +def frompyfunc( # type: ignore[overload-overlap] + func: Callable[[Any, Any], _ReturnType], /, + nin: L[2], + nout: L[1], + *, + identity: _IDType, +) -> _PyFunc_Nin2_Nout1[_ReturnType, _IDType]: ... +@overload +def frompyfunc( # type: ignore[overload-overlap] + func: Callable[..., _ReturnType], /, + nin: _Nin, + nout: L[1], + *, + identity: None = None, +) -> _PyFunc_Nin3P_Nout1[_ReturnType, None, _Nin]: ... +@overload +def frompyfunc( # type: ignore[overload-overlap] + func: Callable[..., _ReturnType], /, + nin: _Nin, + nout: L[1], + *, + identity: _IDType, +) -> _PyFunc_Nin3P_Nout1[_ReturnType, _IDType, _Nin]: ... +@overload +def frompyfunc( + func: Callable[..., _2PTuple[_ReturnType]], /, + nin: _Nin, + nout: _Nout, + *, + identity: None = None, +) -> _PyFunc_Nin1P_Nout2P[_ReturnType, None, _Nin, _Nout]: ... +@overload +def frompyfunc( + func: Callable[..., _2PTuple[_ReturnType]], /, + nin: _Nin, + nout: _Nout, + *, + identity: _IDType, +) -> _PyFunc_Nin1P_Nout2P[_ReturnType, _IDType, _Nin, _Nout]: ... +@overload +def frompyfunc( + func: Callable[..., Any], /, + nin: SupportsIndex, + nout: SupportsIndex, + *, + identity: object | None = ..., +) -> ufunc: ... + +@overload +def fromfile( + file: StrOrBytesPath | _SupportsFileMethods, + dtype: None = None, + count: SupportsIndex = ..., + sep: str = ..., + offset: SupportsIndex = ..., + *, + like: _SupportsArrayFunc | None = ..., +) -> NDArray[float64]: ... +@overload +def fromfile( + file: StrOrBytesPath | _SupportsFileMethods, + dtype: _DTypeLike[_ScalarT], + count: SupportsIndex = ..., + sep: str = ..., + offset: SupportsIndex = ..., + *, + like: _SupportsArrayFunc | None = ..., +) -> NDArray[_ScalarT]: ... +@overload +def fromfile( + file: StrOrBytesPath | _SupportsFileMethods, + dtype: DTypeLike | None = ..., + count: SupportsIndex = ..., + sep: str = ..., + offset: SupportsIndex = ..., + *, + like: _SupportsArrayFunc | None = ..., +) -> NDArray[Any]: ... + +@overload +def fromiter( + iter: Iterable[Any], + dtype: _DTypeLike[_ScalarT], + count: SupportsIndex = ..., + *, + like: _SupportsArrayFunc | None = ..., +) -> NDArray[_ScalarT]: ... +@overload +def fromiter( + iter: Iterable[Any], + dtype: DTypeLike | None, + count: SupportsIndex = ..., + *, + like: _SupportsArrayFunc | None = ..., +) -> NDArray[Any]: ... + +@overload +def frombuffer( + buffer: _SupportsBuffer, + dtype: None = None, + count: SupportsIndex = ..., + offset: SupportsIndex = ..., + *, + like: _SupportsArrayFunc | None = ..., +) -> NDArray[float64]: ... +@overload +def frombuffer( + buffer: _SupportsBuffer, + dtype: _DTypeLike[_ScalarT], + count: SupportsIndex = ..., + offset: SupportsIndex = ..., + *, + like: _SupportsArrayFunc | None = ..., +) -> NDArray[_ScalarT]: ... +@overload +def frombuffer( + buffer: _SupportsBuffer, + dtype: DTypeLike | None = ..., + count: SupportsIndex = ..., + offset: SupportsIndex = ..., + *, + like: _SupportsArrayFunc | None = ..., +) -> NDArray[Any]: ... + +_ArangeScalar: TypeAlias = np.integer | np.floating | np.datetime64 | np.timedelta64 +_ArangeScalarT = TypeVar("_ArangeScalarT", bound=_ArangeScalar) + +# keep in sync with ma.core.arange +# NOTE: The `float64 | Any` return types needed to avoid incompatible overlapping overloads +@overload # dtype= +def arange( + start_or_stop: _ArangeScalar | float, + /, + stop: _ArangeScalar | float | None = None, + step: _ArangeScalar | float | None = 1, + *, + dtype: _DTypeLike[_ArangeScalarT], + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[_ArangeScalarT]: ... +@overload # (int-like, int-like?, int-like?) +def arange( + start_or_stop: _IntLike_co, + /, + stop: _IntLike_co | None = None, + step: _IntLike_co | None = 1, + *, + dtype: type[int] | _DTypeLike[np.int_] | None = None, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[np.int_]: ... +@overload # (float, float-like?, float-like?) +def arange( + start_or_stop: float | floating, + /, + stop: _FloatLike_co | None = None, + step: _FloatLike_co | None = 1, + *, + dtype: type[float] | _DTypeLike[np.float64] | None = None, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[np.float64 | Any]: ... +@overload # (float-like, float, float-like?) +def arange( + start_or_stop: _FloatLike_co, + /, + stop: float | floating, + step: _FloatLike_co | None = 1, + *, + dtype: type[float] | _DTypeLike[np.float64] | None = None, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[np.float64 | Any]: ... +@overload # (timedelta, timedelta-like?, timedelta-like?) +def arange( + start_or_stop: np.timedelta64, + /, + stop: _TD64Like_co | None = None, + step: _TD64Like_co | None = 1, + *, + dtype: _DTypeLike[np.timedelta64] | None = None, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[np.timedelta64[Incomplete]]: ... +@overload # (timedelta-like, timedelta, timedelta-like?) +def arange( + start_or_stop: _TD64Like_co, + /, + stop: np.timedelta64, + step: _TD64Like_co | None = 1, + *, + dtype: _DTypeLike[np.timedelta64] | None = None, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[np.timedelta64[Incomplete]]: ... +@overload # (datetime, datetime, timedelta-like) (requires both start and stop) +def arange( + start_or_stop: np.datetime64, + /, + stop: np.datetime64, + step: _TD64Like_co | None = 1, + *, + dtype: _DTypeLike[np.datetime64] | None = None, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[np.datetime64[Incomplete]]: ... +@overload # (str, str, timedelta-like, dtype=dt64-like) (requires both start and stop) +def arange( + start_or_stop: str, + /, + stop: str, + step: _TD64Like_co | None = 1, + *, + dtype: _DTypeLike[np.datetime64] | _DT64Codes, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[np.datetime64[Incomplete]]: ... +@overload # dtype= +def arange( + start_or_stop: _ArangeScalar | float | str, + /, + stop: _ArangeScalar | float | str | None = None, + step: _ArangeScalar | float | None = 1, + *, + dtype: DTypeLike | None = None, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array1D[Incomplete]: ... + +# +def datetime_data(dtype: str | _DTypeLike[datetime64 | timedelta64], /) -> tuple[str, int]: ... + +# The datetime functions perform unsafe casts to `datetime64[D]`, +# so a lot of different argument types are allowed here + +_ToDates: TypeAlias = dt.date | _NestedSequence[dt.date] +_ToDeltas: TypeAlias = dt.timedelta | _NestedSequence[dt.timedelta] + +@overload +def busday_count( + begindates: _ScalarLike_co | dt.date, + enddates: _ScalarLike_co | dt.date, + weekmask: ArrayLike = "1111100", + holidays: ArrayLike | _ToDates = (), + busdaycal: busdaycalendar | None = None, + out: None = None, +) -> int_: ... +@overload +def busday_count( + begindates: ArrayLike | _ToDates, + enddates: ArrayLike | _ToDates, + weekmask: ArrayLike = "1111100", + holidays: ArrayLike | _ToDates = (), + busdaycal: busdaycalendar | None = None, + out: None = None, +) -> NDArray[int_]: ... +@overload +def busday_count( + begindates: ArrayLike | _ToDates, + enddates: ArrayLike | _ToDates, + weekmask: ArrayLike = "1111100", + holidays: ArrayLike | _ToDates = (), + busdaycal: busdaycalendar | None = None, + *, + out: _ArrayT, +) -> _ArrayT: ... +@overload +def busday_count( + begindates: ArrayLike | _ToDates, + enddates: ArrayLike | _ToDates, + weekmask: ArrayLike, + holidays: ArrayLike | _ToDates, + busdaycal: busdaycalendar | None, + out: _ArrayT, +) -> _ArrayT: ... + +# `roll="raise"` is (more or less?) equivalent to `casting="safe"` +@overload +def busday_offset( + dates: datetime64 | dt.date, + offsets: _TD64Like_co | dt.timedelta, + roll: L["raise"] = "raise", + weekmask: ArrayLike = "1111100", + holidays: ArrayLike | _ToDates | None = None, + busdaycal: busdaycalendar | None = None, + out: None = None, +) -> datetime64: ... +@overload +def busday_offset( + dates: _ArrayLike[datetime64] | _NestedSequence[dt.date], + offsets: _ArrayLikeTD64_co | _ToDeltas, + roll: L["raise"] = "raise", + weekmask: ArrayLike = "1111100", + holidays: ArrayLike | _ToDates | None = None, + busdaycal: busdaycalendar | None = None, + out: None = None, +) -> NDArray[datetime64]: ... +@overload +def busday_offset( + dates: _ArrayLike[datetime64] | _ToDates, + offsets: _ArrayLikeTD64_co | _ToDeltas, + roll: L["raise"] = "raise", + weekmask: ArrayLike = "1111100", + holidays: ArrayLike | _ToDates | None = None, + busdaycal: busdaycalendar | None = None, + *, + out: _ArrayT, +) -> _ArrayT: ... +@overload +def busday_offset( + dates: _ArrayLike[datetime64] | _ToDates, + offsets: _ArrayLikeTD64_co | _ToDeltas, + roll: L["raise"], + weekmask: ArrayLike, + holidays: ArrayLike | _ToDates | None, + busdaycal: busdaycalendar | None, + out: _ArrayT, +) -> _ArrayT: ... +@overload +def busday_offset( + dates: _ScalarLike_co | dt.date, + offsets: _ScalarLike_co | dt.timedelta, + roll: _RollKind, + weekmask: ArrayLike = "1111100", + holidays: ArrayLike | _ToDates | None = None, + busdaycal: busdaycalendar | None = None, + out: None = None, +) -> datetime64: ... +@overload +def busday_offset( + dates: ArrayLike | _NestedSequence[dt.date], + offsets: ArrayLike | _ToDeltas, + roll: _RollKind, + weekmask: ArrayLike = "1111100", + holidays: ArrayLike | _ToDates | None = None, + busdaycal: busdaycalendar | None = None, + out: None = None, +) -> NDArray[datetime64]: ... +@overload +def busday_offset( + dates: ArrayLike | _ToDates, + offsets: ArrayLike | _ToDeltas, + roll: _RollKind, + weekmask: ArrayLike = "1111100", + holidays: ArrayLike | _ToDates | None = None, + busdaycal: busdaycalendar | None = None, + *, + out: _ArrayT, +) -> _ArrayT: ... +@overload +def busday_offset( + dates: ArrayLike | _ToDates, + offsets: ArrayLike | _ToDeltas, + roll: _RollKind, + weekmask: ArrayLike, + holidays: ArrayLike | _ToDates | None, + busdaycal: busdaycalendar | None, + out: _ArrayT, +) -> _ArrayT: ... + +@overload +def is_busday( + dates: _ScalarLike_co | dt.date, + weekmask: ArrayLike = "1111100", + holidays: ArrayLike | _ToDates | None = None, + busdaycal: busdaycalendar | None = None, + out: None = None, +) -> np.bool: ... +@overload +def is_busday( + dates: ArrayLike | _NestedSequence[dt.date], + weekmask: ArrayLike = "1111100", + holidays: ArrayLike | _ToDates | None = None, + busdaycal: busdaycalendar | None = None, + out: None = None, +) -> NDArray[np.bool]: ... +@overload +def is_busday( + dates: ArrayLike | _ToDates, + weekmask: ArrayLike = "1111100", + holidays: ArrayLike | _ToDates | None = None, + busdaycal: busdaycalendar | None = None, + *, + out: _ArrayT, +) -> _ArrayT: ... +@overload +def is_busday( + dates: ArrayLike | _ToDates, + weekmask: ArrayLike, + holidays: ArrayLike | _ToDates | None, + busdaycal: busdaycalendar | None, + out: _ArrayT, +) -> _ArrayT: ... + +_TimezoneContext: TypeAlias = L["naive", "UTC", "local"] | dt.tzinfo + +@overload +def datetime_as_string( + arr: datetime64 | dt.date, + unit: L["auto"] | _UnitKind | None = None, + timezone: _TimezoneContext = "naive", + casting: _CastingKind = "same_kind", +) -> str_: ... +@overload +def datetime_as_string( + arr: _ArrayLikeDT64_co | _NestedSequence[dt.date], + unit: L["auto"] | _UnitKind | None = None, + timezone: _TimezoneContext = "naive", + casting: _CastingKind = "same_kind", +) -> NDArray[str_]: ... + +@overload +def compare_chararrays( + a1: _ArrayLikeStr_co, + a2: _ArrayLikeStr_co, + cmp: L["<", "<=", "==", ">=", ">", "!="], + rstrip: bool, +) -> NDArray[np.bool]: ... +@overload +def compare_chararrays( + a1: _ArrayLikeBytes_co, + a2: _ArrayLikeBytes_co, + cmp: L["<", "<=", "==", ">=", ">", "!="], + rstrip: bool, +) -> NDArray[np.bool]: ... + +def add_docstring(obj: Callable[..., Any], docstring: str, /) -> None: ... + +_GetItemKeys: TypeAlias = L[ + "C", "CONTIGUOUS", "C_CONTIGUOUS", + "F", "FORTRAN", "F_CONTIGUOUS", + "W", "WRITEABLE", + "B", "BEHAVED", + "O", "OWNDATA", + "A", "ALIGNED", + "X", "WRITEBACKIFCOPY", + "CA", "CARRAY", + "FA", "FARRAY", + "FNC", + "FORC", +] +_SetItemKeys: TypeAlias = L[ + "A", "ALIGNED", + "W", "WRITEABLE", + "X", "WRITEBACKIFCOPY", +] + +@final +class flagsobj: + __hash__: ClassVar[None] # type: ignore[assignment] + aligned: bool + # NOTE: deprecated + # updateifcopy: bool + writeable: bool + writebackifcopy: bool + @property + def behaved(self) -> bool: ... + @property + def c_contiguous(self) -> bool: ... + @property + def carray(self) -> bool: ... + @property + def contiguous(self) -> bool: ... + @property + def f_contiguous(self) -> bool: ... + @property + def farray(self) -> bool: ... + @property + def fnc(self) -> bool: ... + @property + def forc(self) -> bool: ... + @property + def fortran(self) -> bool: ... + @property + def num(self) -> int: ... + @property + def owndata(self) -> bool: ... + def __getitem__(self, key: _GetItemKeys) -> bool: ... + def __setitem__(self, key: _SetItemKeys, value: bool) -> None: ... + +def nested_iters( + op: ArrayLike | Sequence[ArrayLike], + axes: Sequence[Sequence[SupportsIndex]], + flags: Sequence[_NDIterFlagsKind] | None = ..., + op_flags: Sequence[Sequence[_NDIterFlagsOp]] | None = ..., + op_dtypes: DTypeLike | Sequence[DTypeLike | None] | None = ..., + order: _OrderKACF = ..., + casting: _CastingKind = ..., + buffersize: SupportsIndex = ..., +) -> tuple[nditer, ...]: ... diff --git a/local_packages/numpy/_core/numeric.py b/local_packages/numpy/_core/numeric.py new file mode 100644 index 0000000..da382b4 --- /dev/null +++ b/local_packages/numpy/_core/numeric.py @@ -0,0 +1,2771 @@ +import builtins +import functools +import itertools +import math +import numbers +import operator +import sys +import warnings + +import numpy as np +from numpy.exceptions import AxisError + +from . import multiarray, numerictypes, numerictypes as nt, overrides, shape_base, umath +from ._ufunc_config import errstate +from .multiarray import ( # noqa: F401 + ALLOW_THREADS, + BUFSIZE, + CLIP, + MAXDIMS, + MAY_SHARE_BOUNDS, + MAY_SHARE_EXACT, + RAISE, + WRAP, + arange, + array, + asanyarray, + asarray, + ascontiguousarray, + asfortranarray, + broadcast, + can_cast, + concatenate, + copyto, + dot, + dtype, + empty, + empty_like, + flatiter, + from_dlpack, + frombuffer, + fromfile, + fromiter, + fromstring, + inner, + lexsort, + matmul, + may_share_memory, + min_scalar_type, + ndarray, + nditer, + nested_iters, + normalize_axis_index, + promote_types, + putmask, + result_type, + shares_memory, + vdot, + vecdot, + where, + zeros, +) +from .overrides import finalize_array_function_like, set_module +from .umath import NAN, PINF, invert, multiply, sin + +bitwise_not = invert +ufunc = type(sin) +newaxis = None + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + + +__all__ = [ + 'newaxis', 'ndarray', 'flatiter', 'nditer', 'nested_iters', 'ufunc', + 'arange', 'array', 'asarray', 'asanyarray', 'ascontiguousarray', + 'asfortranarray', 'zeros', 'count_nonzero', 'empty', 'broadcast', 'dtype', + 'fromstring', 'fromfile', 'frombuffer', 'from_dlpack', 'where', + 'argwhere', 'copyto', 'concatenate', 'lexsort', 'astype', + 'can_cast', 'promote_types', 'min_scalar_type', + 'result_type', 'isfortran', 'empty_like', 'zeros_like', 'ones_like', + 'correlate', 'convolve', 'inner', 'dot', 'outer', 'vdot', 'roll', + 'rollaxis', 'moveaxis', 'cross', 'tensordot', 'little_endian', + 'fromiter', 'array_equal', 'array_equiv', 'indices', 'fromfunction', + 'isclose', 'isscalar', 'binary_repr', 'base_repr', 'ones', + 'identity', 'allclose', 'putmask', + 'flatnonzero', 'inf', 'nan', 'False_', 'True_', 'bitwise_not', + 'full', 'full_like', 'matmul', 'vecdot', 'shares_memory', + 'may_share_memory'] + + +def _zeros_like_dispatcher( + a, dtype=None, order=None, subok=None, shape=None, *, device=None +): + return (a,) + + +@array_function_dispatch(_zeros_like_dispatcher) +def zeros_like( + a, dtype=None, order='K', subok=True, shape=None, *, device=None +): + """ + Return an array of zeros with the same shape and type as a given array. + + Parameters + ---------- + a : array_like + The shape and data-type of `a` define these same attributes of + the returned array. + dtype : data-type, optional + Overrides the data type of the result. + order : {'C', 'F', 'A', or 'K'}, optional + Overrides the memory layout of the result. 'C' means C-order, + 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous, + 'C' otherwise. 'K' means match the layout of `a` as closely + as possible. + subok : bool, optional. + If True, then the newly created array will use the sub-class + type of `a`, otherwise it will be a base-class array. Defaults + to True. + shape : int or sequence of ints, optional. + Overrides the shape of the result. If order='K' and the number of + dimensions is unchanged, will try to keep order, otherwise, + order='C' is implied. + device : str, optional + The device on which to place the created array. Default: None. + For Array-API interoperability only, so must be ``"cpu"`` if passed. + + .. versionadded:: 2.0.0 + + Returns + ------- + out : ndarray + Array of zeros with the same shape and type as `a`. + + See Also + -------- + empty_like : Return an empty array with shape and type of input. + ones_like : Return an array of ones with shape and type of input. + full_like : Return a new array with shape of input filled with value. + zeros : Return a new array setting values to zero. + + Examples + -------- + >>> import numpy as np + >>> x = np.arange(6) + >>> x = x.reshape((2, 3)) + >>> x + array([[0, 1, 2], + [3, 4, 5]]) + >>> np.zeros_like(x) + array([[0, 0, 0], + [0, 0, 0]]) + + >>> y = np.arange(3, dtype=float) + >>> y + array([0., 1., 2.]) + >>> np.zeros_like(y) + array([0., 0., 0.]) + + """ + res = empty_like( + a, dtype=dtype, order=order, subok=subok, shape=shape, device=device + ) + # needed instead of a 0 to get same result as zeros for string dtypes + z = zeros(1, dtype=res.dtype) + multiarray.copyto(res, z, casting='unsafe') + return res + + +@finalize_array_function_like +@set_module('numpy') +def ones(shape, dtype=None, order='C', *, device=None, like=None): + """ + Return a new array of given shape and type, filled with ones. + + Parameters + ---------- + shape : int or sequence of ints + Shape of the new array, e.g., ``(2, 3)`` or ``2``. + dtype : data-type, optional + The desired data-type for the array, e.g., `numpy.int8`. Default is + `numpy.float64`. + order : {'C', 'F'}, optional, default: C + Whether to store multi-dimensional data in row-major + (C-style) or column-major (Fortran-style) order in + memory. + device : str, optional + The device on which to place the created array. Default: None. + For Array-API interoperability only, so must be ``"cpu"`` if passed. + + .. versionadded:: 2.0.0 + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + out : ndarray + Array of ones with the given shape, dtype, and order. + + See Also + -------- + ones_like : Return an array of ones with shape and type of input. + empty : Return a new uninitialized array. + zeros : Return a new array setting values to zero. + full : Return a new array of given shape filled with value. + + Examples + -------- + >>> import numpy as np + >>> np.ones(5) + array([1., 1., 1., 1., 1.]) + + >>> np.ones((5,), dtype=int) + array([1, 1, 1, 1, 1]) + + >>> np.ones((2, 1)) + array([[1.], + [1.]]) + + >>> s = (2,2) + >>> np.ones(s) + array([[1., 1.], + [1., 1.]]) + + """ + if like is not None: + return _ones_with_like( + like, shape, dtype=dtype, order=order, device=device + ) + + a = empty(shape, dtype, order, device=device) + multiarray.copyto(a, 1, casting='unsafe') + return a + + +_ones_with_like = array_function_dispatch()(ones) + + +def _ones_like_dispatcher( + a, dtype=None, order=None, subok=None, shape=None, *, device=None +): + return (a,) + + +@array_function_dispatch(_ones_like_dispatcher) +def ones_like( + a, dtype=None, order='K', subok=True, shape=None, *, device=None +): + """ + Return an array of ones with the same shape and type as a given array. + + Parameters + ---------- + a : array_like + The shape and data-type of `a` define these same attributes of + the returned array. + dtype : data-type, optional + Overrides the data type of the result. + order : {'C', 'F', 'A', or 'K'}, optional + Overrides the memory layout of the result. 'C' means C-order, + 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous, + 'C' otherwise. 'K' means match the layout of `a` as closely + as possible. + subok : bool, optional. + If True, then the newly created array will use the sub-class + type of `a`, otherwise it will be a base-class array. Defaults + to True. + shape : int or sequence of ints, optional. + Overrides the shape of the result. If order='K' and the number of + dimensions is unchanged, will try to keep order, otherwise, + order='C' is implied. + device : str, optional + The device on which to place the created array. Default: None. + For Array-API interoperability only, so must be ``"cpu"`` if passed. + + .. versionadded:: 2.0.0 + + Returns + ------- + out : ndarray + Array of ones with the same shape and type as `a`. + + See Also + -------- + empty_like : Return an empty array with shape and type of input. + zeros_like : Return an array of zeros with shape and type of input. + full_like : Return a new array with shape of input filled with value. + ones : Return a new array setting values to one. + + Examples + -------- + >>> import numpy as np + >>> x = np.arange(6) + >>> x = x.reshape((2, 3)) + >>> x + array([[0, 1, 2], + [3, 4, 5]]) + >>> np.ones_like(x) + array([[1, 1, 1], + [1, 1, 1]]) + + >>> y = np.arange(3, dtype=float) + >>> y + array([0., 1., 2.]) + >>> np.ones_like(y) + array([1., 1., 1.]) + + """ + res = empty_like( + a, dtype=dtype, order=order, subok=subok, shape=shape, device=device + ) + multiarray.copyto(res, 1, casting='unsafe') + return res + + +def _full_dispatcher( + shape, fill_value, dtype=None, order=None, *, device=None, like=None +): + return (like,) + + +@finalize_array_function_like +@set_module('numpy') +def full(shape, fill_value, dtype=None, order='C', *, device=None, like=None): + """ + Return a new array of given shape and type, filled with `fill_value`. + + Parameters + ---------- + shape : int or sequence of ints + Shape of the new array, e.g., ``(2, 3)`` or ``2``. + fill_value : scalar or array_like + Fill value. + dtype : data-type, optional + The desired data-type for the array The default, None, means + ``np.array(fill_value).dtype``. + order : {'C', 'F'}, optional + Whether to store multidimensional data in C- or Fortran-contiguous + (row- or column-wise) order in memory. + device : str, optional + The device on which to place the created array. Default: None. + For Array-API interoperability only, so must be ``"cpu"`` if passed. + + .. versionadded:: 2.0.0 + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + out : ndarray + Array of `fill_value` with the given shape, dtype, and order. + + See Also + -------- + full_like : Return a new array with shape of input filled with value. + empty : Return a new uninitialized array. + ones : Return a new array setting values to one. + zeros : Return a new array setting values to zero. + + Examples + -------- + >>> import numpy as np + >>> np.full((2, 2), np.inf) + array([[inf, inf], + [inf, inf]]) + >>> np.full((2, 2), 10) + array([[10, 10], + [10, 10]]) + + >>> np.full((2, 2), [1, 2]) + array([[1, 2], + [1, 2]]) + + """ + if like is not None: + return _full_with_like( + like, shape, fill_value, dtype=dtype, order=order, device=device + ) + + if dtype is None: + fill_value = asarray(fill_value) + dtype = fill_value.dtype + a = empty(shape, dtype, order, device=device) + multiarray.copyto(a, fill_value, casting='unsafe') + return a + + +_full_with_like = array_function_dispatch()(full) + + +def _full_like_dispatcher( + a, fill_value, dtype=None, order=None, subok=None, shape=None, + *, device=None +): + return (a,) + + +@array_function_dispatch(_full_like_dispatcher) +def full_like( + a, fill_value, dtype=None, order='K', subok=True, shape=None, + *, device=None +): + """ + Return a full array with the same shape and type as a given array. + + Parameters + ---------- + a : array_like + The shape and data-type of `a` define these same attributes of + the returned array. + fill_value : array_like + Fill value. + dtype : data-type, optional + Overrides the data type of the result. + order : {'C', 'F', 'A', or 'K'}, optional + Overrides the memory layout of the result. 'C' means C-order, + 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous, + 'C' otherwise. 'K' means match the layout of `a` as closely + as possible. + subok : bool, optional. + If True, then the newly created array will use the sub-class + type of `a`, otherwise it will be a base-class array. Defaults + to True. + shape : int or sequence of ints, optional. + Overrides the shape of the result. If order='K' and the number of + dimensions is unchanged, will try to keep order, otherwise, + order='C' is implied. + device : str, optional + The device on which to place the created array. Default: None. + For Array-API interoperability only, so must be ``"cpu"`` if passed. + + .. versionadded:: 2.0.0 + + Returns + ------- + out : ndarray + Array of `fill_value` with the same shape and type as `a`. + + See Also + -------- + empty_like : Return an empty array with shape and type of input. + ones_like : Return an array of ones with shape and type of input. + zeros_like : Return an array of zeros with shape and type of input. + full : Return a new array of given shape filled with value. + + Examples + -------- + >>> import numpy as np + >>> x = np.arange(6, dtype=int) + >>> np.full_like(x, 1) + array([1, 1, 1, 1, 1, 1]) + >>> np.full_like(x, 0.1) + array([0, 0, 0, 0, 0, 0]) + >>> np.full_like(x, 0.1, dtype=np.double) + array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1]) + >>> np.full_like(x, np.nan, dtype=np.double) + array([nan, nan, nan, nan, nan, nan]) + + >>> y = np.arange(6, dtype=np.double) + >>> np.full_like(y, 0.1) + array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1]) + + >>> y = np.zeros([2, 2, 3], dtype=int) + >>> np.full_like(y, [0, 0, 255]) + array([[[ 0, 0, 255], + [ 0, 0, 255]], + [[ 0, 0, 255], + [ 0, 0, 255]]]) + """ + res = empty_like( + a, dtype=dtype, order=order, subok=subok, shape=shape, device=device + ) + multiarray.copyto(res, fill_value, casting='unsafe') + return res + + +def _count_nonzero_dispatcher(a, axis=None, *, keepdims=None): + return (a,) + + +@array_function_dispatch(_count_nonzero_dispatcher) +def count_nonzero(a, axis=None, *, keepdims=False): + """ + Counts the number of non-zero values in the array ``a``. + + The word "non-zero" is in reference to the Python 2.x + built-in method ``__nonzero__()`` (renamed ``__bool__()`` + in Python 3.x) of Python objects that tests an object's + "truthfulness". For example, any number is considered + truthful if it is nonzero, whereas any string is considered + truthful if it is not the empty string. Thus, this function + (recursively) counts how many elements in ``a`` (and in + sub-arrays thereof) have their ``__nonzero__()`` or ``__bool__()`` + method evaluated to ``True``. + + Parameters + ---------- + a : array_like + The array for which to count non-zeros. + axis : int or tuple, optional + Axis or tuple of axes along which to count non-zeros. + Default is None, meaning that non-zeros will be counted + along a flattened version of ``a``. + keepdims : bool, optional + If this is set to True, the axes that are counted are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the input array. + + Returns + ------- + count : int or array of int + Number of non-zero values in the array along a given axis. + Otherwise, the total number of non-zero values in the array + is returned. + + See Also + -------- + nonzero : Return the coordinates of all the non-zero values. + + Examples + -------- + >>> import numpy as np + >>> np.count_nonzero(np.eye(4)) + np.int64(4) + >>> a = np.array([[0, 1, 7, 0], + ... [3, 0, 2, 19]]) + >>> np.count_nonzero(a) + np.int64(5) + >>> np.count_nonzero(a, axis=0) + array([1, 1, 2, 1]) + >>> np.count_nonzero(a, axis=1) + array([2, 3]) + >>> np.count_nonzero(a, axis=1, keepdims=True) + array([[2], + [3]]) + """ + if axis is None and not keepdims: + return multiarray.count_nonzero(a) + + a = asanyarray(a) + + # TODO: this works around .astype(bool) not working properly (gh-9847) + if np.issubdtype(a.dtype, np.character): + a_bool = a != a.dtype.type() + else: + a_bool = a.astype(np.bool, copy=False) + + return a_bool.sum(axis=axis, dtype=np.intp, keepdims=keepdims) + + +@set_module('numpy') +def isfortran(a): + """ + Check if the array is Fortran contiguous but *not* C contiguous. + + This function is obsolete. If you only want to check if an array is Fortran + contiguous use ``a.flags.f_contiguous`` instead. + + Parameters + ---------- + a : ndarray + Input array. + + Returns + ------- + isfortran : bool + Returns True if the array is Fortran contiguous but *not* C contiguous. + + + Examples + -------- + + np.array allows to specify whether the array is written in C-contiguous + order (last index varies the fastest), or FORTRAN-contiguous order in + memory (first index varies the fastest). + + >>> import numpy as np + >>> a = np.array([[1, 2, 3], [4, 5, 6]], order='C') + >>> a + array([[1, 2, 3], + [4, 5, 6]]) + >>> np.isfortran(a) + False + + >>> b = np.array([[1, 2, 3], [4, 5, 6]], order='F') + >>> b + array([[1, 2, 3], + [4, 5, 6]]) + >>> np.isfortran(b) + True + + + The transpose of a C-ordered array is a FORTRAN-ordered array. + + >>> a = np.array([[1, 2, 3], [4, 5, 6]], order='C') + >>> a + array([[1, 2, 3], + [4, 5, 6]]) + >>> np.isfortran(a) + False + >>> b = a.T + >>> b + array([[1, 4], + [2, 5], + [3, 6]]) + >>> np.isfortran(b) + True + + C-ordered arrays evaluate as False even if they are also FORTRAN-ordered. + + >>> np.isfortran(np.array([1, 2], order='F')) + False + + """ + return a.flags.fnc + + +def _argwhere_dispatcher(a): + return (a,) + + +@array_function_dispatch(_argwhere_dispatcher) +def argwhere(a): + """ + Find the indices of array elements that are non-zero, grouped by element. + + Parameters + ---------- + a : array_like + Input data. + + Returns + ------- + index_array : (N, a.ndim) ndarray + Indices of elements that are non-zero. Indices are grouped by element. + This array will have shape ``(N, a.ndim)`` where ``N`` is the number of + non-zero items. + + See Also + -------- + where, nonzero + + Notes + ----- + ``np.argwhere(a)`` is almost the same as ``np.transpose(np.nonzero(a))``, + but produces a result of the correct shape for a 0D array. + + The output of ``argwhere`` is not suitable for indexing arrays. + For this purpose use ``nonzero(a)`` instead. + + Examples + -------- + >>> import numpy as np + >>> x = np.arange(6).reshape(2,3) + >>> x + array([[0, 1, 2], + [3, 4, 5]]) + >>> np.argwhere(x>1) + array([[0, 2], + [1, 0], + [1, 1], + [1, 2]]) + + """ + # nonzero does not behave well on 0d, so promote to 1d + if np.ndim(a) == 0: + a = shape_base.atleast_1d(a) + # then remove the added dimension + return argwhere(a)[:, :0] + return transpose(nonzero(a)) + + +def _flatnonzero_dispatcher(a): + return (a,) + + +@array_function_dispatch(_flatnonzero_dispatcher) +def flatnonzero(a): + """ + Return indices that are non-zero in the flattened version of a. + + This is equivalent to ``np.nonzero(np.ravel(a))[0]``. + + Parameters + ---------- + a : array_like + Input data. + + Returns + ------- + res : ndarray + Output array, containing the indices of the elements of ``a.ravel()`` + that are non-zero. + + See Also + -------- + nonzero : Return the indices of the non-zero elements of the input array. + ravel : Return a 1-D array containing the elements of the input array. + + Examples + -------- + >>> import numpy as np + >>> x = np.arange(-2, 3) + >>> x + array([-2, -1, 0, 1, 2]) + >>> np.flatnonzero(x) + array([0, 1, 3, 4]) + + Use the indices of the non-zero elements as an index array to extract + these elements: + + >>> x.ravel()[np.flatnonzero(x)] + array([-2, -1, 1, 2]) + + """ + return np.nonzero(np.ravel(a))[0] + + +def _correlate_dispatcher(a, v, mode=None): + return (a, v) + + +@array_function_dispatch(_correlate_dispatcher) +def correlate(a, v, mode='valid'): + r""" + Cross-correlation of two 1-dimensional sequences. + + This function computes the correlation as generally defined in signal + processing texts [1]_: + + .. math:: c_k = \sum_n a_{n+k} \cdot \overline{v}_n + + with a and v sequences being zero-padded where necessary and + :math:`\overline v` denoting complex conjugation. + + Parameters + ---------- + a, v : array_like + Input sequences. + mode : {'valid', 'same', 'full'}, optional + Refer to the `convolve` docstring. Note that the default + is 'valid', unlike `convolve`, which uses 'full'. + + Returns + ------- + out : ndarray + Discrete cross-correlation of `a` and `v`. + + See Also + -------- + convolve : Discrete, linear convolution of two one-dimensional sequences. + scipy.signal.correlate : uses FFT which has superior performance + on large arrays. + + Notes + ----- + The definition of correlation above is not unique and sometimes + correlation may be defined differently. Another common definition is [1]_: + + .. math:: c'_k = \sum_n a_{n} \cdot \overline{v_{n+k}} + + which is related to :math:`c_k` by :math:`c'_k = c_{-k}`. + + `numpy.correlate` may perform slowly in large arrays (i.e. n = 1e5) + because it does not use the FFT to compute the convolution; in that case, + `scipy.signal.correlate` might be preferable. + + References + ---------- + .. [1] Wikipedia, "Cross-correlation", + https://en.wikipedia.org/wiki/Cross-correlation + + Examples + -------- + >>> import numpy as np + >>> np.correlate([1, 2, 3], [0, 1, 0.5]) + array([3.5]) + >>> np.correlate([1, 2, 3], [0, 1, 0.5], "same") + array([2. , 3.5, 3. ]) + >>> np.correlate([1, 2, 3], [0, 1, 0.5], "full") + array([0.5, 2. , 3.5, 3. , 0. ]) + + Using complex sequences: + + >>> np.correlate([1+1j, 2, 3-1j], [0, 1, 0.5j], 'full') + array([ 0.5-0.5j, 1.0+0.j , 1.5-1.5j, 3.0-1.j , 0.0+0.j ]) + + Note that you get the time reversed, complex conjugated result + (:math:`\overline{c_{-k}}`) when the two input sequences a and v change + places: + + >>> np.correlate([0, 1, 0.5j], [1+1j, 2, 3-1j], 'full') + array([ 0.0+0.j , 3.0+1.j , 1.5+1.5j, 1.0+0.j , 0.5+0.5j]) + + """ + return multiarray.correlate2(a, v, mode) + + +def _convolve_dispatcher(a, v, mode=None): + return (a, v) + + +@array_function_dispatch(_convolve_dispatcher) +def convolve(a, v, mode='full'): + """ + Returns the discrete, linear convolution of two one-dimensional sequences. + + The convolution operator is often seen in signal processing, where it + models the effect of a linear time-invariant system on a signal [1]_. In + probability theory, the sum of two independent random variables is + distributed according to the convolution of their individual + distributions. + + If `v` is longer than `a`, the arrays are swapped before computation. + + Parameters + ---------- + a : (N,) array_like + First one-dimensional input array. + v : (M,) array_like + Second one-dimensional input array. + mode : {'full', 'valid', 'same'}, optional + 'full': + By default, mode is 'full'. This returns the convolution + at each point of overlap, with an output shape of (N+M-1,). At + the end-points of the convolution, the signals do not overlap + completely, and boundary effects may be seen. + + 'same': + Mode 'same' returns output of length ``max(M, N)``. Boundary + effects are still visible. + + 'valid': + Mode 'valid' returns output of length + ``max(M, N) - min(M, N) + 1``. The convolution product is only given + for points where the signals overlap completely. Values outside + the signal boundary have no effect. + + Returns + ------- + out : ndarray + Discrete, linear convolution of `a` and `v`. + + See Also + -------- + scipy.signal.fftconvolve : Convolve two arrays using the Fast Fourier + Transform. + scipy.linalg.toeplitz : Used to construct the convolution operator. + polymul : Polynomial multiplication. Same output as convolve, but also + accepts poly1d objects as input. + + Notes + ----- + The discrete convolution operation is defined as + + .. math:: (a * v)_n = \\sum_{m = -\\infty}^{\\infty} a_m v_{n - m} + + It can be shown that a convolution :math:`x(t) * y(t)` in time/space + is equivalent to the multiplication :math:`X(f) Y(f)` in the Fourier + domain, after appropriate padding (padding is necessary to prevent + circular convolution). Since multiplication is more efficient (faster) + than convolution, the function `scipy.signal.fftconvolve` exploits the + FFT to calculate the convolution of large data-sets. + + References + ---------- + .. [1] Wikipedia, "Convolution", + https://en.wikipedia.org/wiki/Convolution + + Examples + -------- + Note how the convolution operator flips the second array + before "sliding" the two across one another: + + >>> import numpy as np + >>> np.convolve([1, 2, 3], [0, 1, 0.5]) + array([0. , 1. , 2.5, 4. , 1.5]) + + Only return the middle values of the convolution. + Contains boundary effects, where zeros are taken + into account: + + >>> np.convolve([1,2,3],[0,1,0.5], 'same') + array([1. , 2.5, 4. ]) + + The two arrays are of the same length, so there + is only one position where they completely overlap: + + >>> np.convolve([1,2,3],[0,1,0.5], 'valid') + array([2.5]) + + """ + a, v = array(a, copy=None, ndmin=1), array(v, copy=None, ndmin=1) + if len(a) == 0: + raise ValueError('a cannot be empty') + if len(v) == 0: + raise ValueError('v cannot be empty') + if len(v) > len(a): + a, v = v, a + return multiarray.correlate(a, v[::-1], mode) + + +def _outer_dispatcher(a, b, out=None): + return (a, b, out) + + +@array_function_dispatch(_outer_dispatcher) +def outer(a, b, out=None): + """ + Compute the outer product of two vectors. + + Given two vectors `a` and `b` of length ``M`` and ``N``, respectively, + the outer product [1]_ is:: + + [[a_0*b_0 a_0*b_1 ... a_0*b_{N-1} ] + [a_1*b_0 . + [ ... . + [a_{M-1}*b_0 a_{M-1}*b_{N-1} ]] + + Parameters + ---------- + a : (M,) array_like + First input vector. Input is flattened if + not already 1-dimensional. + b : (N,) array_like + Second input vector. Input is flattened if + not already 1-dimensional. + out : (M, N) ndarray, optional + A location where the result is stored + + Returns + ------- + out : (M, N) ndarray + ``out[i, j] = a[i] * b[j]`` + + See also + -------- + inner + einsum : ``einsum('i,j->ij', a.ravel(), b.ravel())`` is the equivalent. + ufunc.outer : A generalization to dimensions other than 1D and other + operations. ``np.multiply.outer(a.ravel(), b.ravel())`` + is the equivalent. + linalg.outer : An Array API compatible variation of ``np.outer``, + which accepts 1-dimensional inputs only. + tensordot : ``np.tensordot(a.ravel(), b.ravel(), axes=((), ()))`` + is the equivalent. + + References + ---------- + .. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*, 3rd + ed., Baltimore, MD, Johns Hopkins University Press, 1996, + pg. 8. + + Examples + -------- + Make a (*very* coarse) grid for computing a Mandelbrot set: + + >>> import numpy as np + >>> rl = np.outer(np.ones((5,)), np.linspace(-2, 2, 5)) + >>> rl + array([[-2., -1., 0., 1., 2.], + [-2., -1., 0., 1., 2.], + [-2., -1., 0., 1., 2.], + [-2., -1., 0., 1., 2.], + [-2., -1., 0., 1., 2.]]) + >>> im = np.outer(1j*np.linspace(2, -2, 5), np.ones((5,))) + >>> im + array([[0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j], + [0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j], + [0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], + [0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j], + [0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j]]) + >>> grid = rl + im + >>> grid + array([[-2.+2.j, -1.+2.j, 0.+2.j, 1.+2.j, 2.+2.j], + [-2.+1.j, -1.+1.j, 0.+1.j, 1.+1.j, 2.+1.j], + [-2.+0.j, -1.+0.j, 0.+0.j, 1.+0.j, 2.+0.j], + [-2.-1.j, -1.-1.j, 0.-1.j, 1.-1.j, 2.-1.j], + [-2.-2.j, -1.-2.j, 0.-2.j, 1.-2.j, 2.-2.j]]) + + An example using a "vector" of letters: + + >>> x = np.array(['a', 'b', 'c'], dtype=object) + >>> np.outer(x, [1, 2, 3]) + array([['a', 'aa', 'aaa'], + ['b', 'bb', 'bbb'], + ['c', 'cc', 'ccc']], dtype=object) + + """ + a = asarray(a) + b = asarray(b) + return multiply(a.ravel()[:, newaxis], b.ravel()[newaxis, :], out) + + +def _tensordot_dispatcher(a, b, axes=None): + return (a, b) + + +@array_function_dispatch(_tensordot_dispatcher) +def tensordot(a, b, axes=2): + """ + Compute tensor dot product along specified axes. + + Given two tensors, `a` and `b`, and an array_like object containing + two array_like objects, ``(a_axes, b_axes)``, sum the products of + `a`'s and `b`'s elements (components) over the axes specified by + ``a_axes`` and ``b_axes``. The third argument can be a single non-negative + integer_like scalar, ``N``; if it is such, then the last ``N`` dimensions + of `a` and the first ``N`` dimensions of `b` are summed over. + + Parameters + ---------- + a, b : array_like + Tensors to "dot". + + axes : int or (2,) array_like + * integer_like + If an int N, sum over the last N axes of `a` and the first N axes + of `b` in order. The sizes of the corresponding axes must match. + * (2,) array_like + Or, a list of axes to be summed over, first sequence applying to `a`, + second to `b`. Both elements array_like must be of the same length. + Each axis may appear at most once; repeated axes are not allowed. + For example, ``axes=([1, 1], [0, 0])`` is invalid. + Returns + ------- + output : ndarray + The tensor dot product of the input. + + See Also + -------- + dot, einsum + + Notes + ----- + Three common use cases are: + * ``axes = 0`` : tensor product :math:`a\\otimes b` + * ``axes = 1`` : tensor dot product :math:`a\\cdot b` + * ``axes = 2`` : (default) tensor double contraction :math:`a:b` + + When `axes` is integer_like, the sequence of axes for evaluation + will be: from the -Nth axis to the -1th axis in `a`, + and from the 0th axis to (N-1)th axis in `b`. + For example, ``axes = 2`` is the equal to + ``axes = [[-2, -1], [0, 1]]``. + When N-1 is smaller than 0, or when -N is larger than -1, + the element of `a` and `b` are defined as the `axes`. + + When there is more than one axis to sum over - and they are not the last + (first) axes of `a` (`b`) - the argument `axes` should consist of + two sequences of the same length, with the first axis to sum over given + first in both sequences, the second axis second, and so forth. + The calculation can be referred to ``numpy.einsum``. + + For example, if ``a.shape == (2, 3, 4)`` and ``b.shape == (3, 4, 5)``, + then ``axes=([1, 2], [0, 1])`` sums over the ``(3, 4)`` dimensions of + both arrays and produces an output of shape ``(2, 5)``. + + Each summation axis corresponds to a distinct contraction index; repeating + an axis (for example ``axes=([1, 1], [0, 0])``) is invalid. + + The shape of the result consists of the non-contracted axes of the + first tensor, followed by the non-contracted axes of the second. + + Examples + -------- + An example on integer_like: + + >>> a_0 = np.array([[1, 2], [3, 4]]) + >>> b_0 = np.array([[5, 6], [7, 8]]) + >>> c_0 = np.tensordot(a_0, b_0, axes=0) + >>> c_0.shape + (2, 2, 2, 2) + >>> c_0 + array([[[[ 5, 6], + [ 7, 8]], + [[10, 12], + [14, 16]]], + [[[15, 18], + [21, 24]], + [[20, 24], + [28, 32]]]]) + + An example on array_like: + + >>> a = np.arange(60.).reshape(3,4,5) + >>> b = np.arange(24.).reshape(4,3,2) + >>> c = np.tensordot(a,b, axes=([1,0],[0,1])) + >>> c.shape + (5, 2) + >>> c + array([[4400., 4730.], + [4532., 4874.], + [4664., 5018.], + [4796., 5162.], + [4928., 5306.]]) + + A slower but equivalent way of computing the same... + + >>> d = np.zeros((5,2)) + >>> for i in range(5): + ... for j in range(2): + ... for k in range(3): + ... for n in range(4): + ... d[i,j] += a[k,n,i] * b[n,k,j] + >>> c == d + array([[ True, True], + [ True, True], + [ True, True], + [ True, True], + [ True, True]]) + + An extended example taking advantage of the overloading of + and \\*: + + >>> a = np.array(range(1, 9)).reshape((2, 2, 2)) + >>> A = np.array(('a', 'b', 'c', 'd'), dtype=object) + >>> A = A.reshape((2, 2)) + >>> a; A + array([[[1, 2], + [3, 4]], + [[5, 6], + [7, 8]]]) + array([['a', 'b'], + ['c', 'd']], dtype=object) + + >>> np.tensordot(a, A) # third argument default is 2 for double-contraction + array(['abbcccdddd', 'aaaaabbbbbbcccccccdddddddd'], dtype=object) + + >>> np.tensordot(a, A, 1) + array([[['acc', 'bdd'], + ['aaacccc', 'bbbdddd']], + [['aaaaacccccc', 'bbbbbdddddd'], + ['aaaaaaacccccccc', 'bbbbbbbdddddddd']]], dtype=object) + + >>> np.tensordot(a, A, 0) # tensor product (result too long to incl.) + array([[[[['a', 'b'], + ['c', 'd']], + ... + + >>> np.tensordot(a, A, (0, 1)) + array([[['abbbbb', 'cddddd'], + ['aabbbbbb', 'ccdddddd']], + [['aaabbbbbbb', 'cccddddddd'], + ['aaaabbbbbbbb', 'ccccdddddddd']]], dtype=object) + + >>> np.tensordot(a, A, (2, 1)) + array([[['abb', 'cdd'], + ['aaabbbb', 'cccdddd']], + [['aaaaabbbbbb', 'cccccdddddd'], + ['aaaaaaabbbbbbbb', 'cccccccdddddddd']]], dtype=object) + + >>> np.tensordot(a, A, ((0, 1), (0, 1))) + array(['abbbcccccddddddd', 'aabbbbccccccdddddddd'], dtype=object) + + >>> np.tensordot(a, A, ((2, 1), (1, 0))) + array(['acccbbdddd', 'aaaaacccccccbbbbbbdddddddd'], dtype=object) + + """ + try: + iter(axes) + except Exception: + axes_a = list(range(-axes, 0)) + axes_b = list(range(axes)) + else: + axes_a, axes_b = axes + try: + na = len(axes_a) + axes_a = list(axes_a) + except TypeError: + axes_a = [axes_a] + na = 1 + try: + nb = len(axes_b) + axes_b = list(axes_b) + except TypeError: + axes_b = [axes_b] + nb = 1 + + if len(set(axes_a)) != len(axes_a): + raise ValueError("duplicate axes are not allowed in tensordot") + if len(set(axes_b)) != len(axes_b): + raise ValueError("duplicate axes are not allowed in tensordot") + + a, b = asarray(a), asarray(b) + as_ = a.shape + nda = a.ndim + bs = b.shape + ndb = b.ndim + equal = True + if na != nb: + equal = False + else: + for k in range(na): + if as_[axes_a[k]] != bs[axes_b[k]]: + equal = False + break + if axes_a[k] < 0: + axes_a[k] += nda + if axes_b[k] < 0: + axes_b[k] += ndb + if not equal: + raise ValueError("shape-mismatch for sum") + + # Move the axes to sum over to the end of "a" + # and to the front of "b" + notin = [k for k in range(nda) if k not in axes_a] + newaxes_a = notin + axes_a + N2 = math.prod(as_[axis] for axis in axes_a) + newshape_a = (math.prod(as_[ax] for ax in notin), N2) + olda = [as_[axis] for axis in notin] + + notin = [k for k in range(ndb) if k not in axes_b] + newaxes_b = axes_b + notin + N2 = math.prod(bs[axis] for axis in axes_b) + newshape_b = (N2, math.prod(bs[ax] for ax in notin)) + oldb = [bs[axis] for axis in notin] + + at = a.transpose(newaxes_a).reshape(newshape_a) + bt = b.transpose(newaxes_b).reshape(newshape_b) + res = dot(at, bt) + return res.reshape(olda + oldb) + + +def _roll_dispatcher(a, shift, axis=None): + return (a,) + + +@array_function_dispatch(_roll_dispatcher) +def roll(a, shift, axis=None): + """ + Roll array elements along a given axis. + + Elements that roll beyond the last position are re-introduced at + the first. + + Parameters + ---------- + a : array_like + Input array. + shift : int or tuple of ints + The number of places by which elements are shifted. If a tuple, + then `axis` must be a tuple of the same size, and each of the + given axes is shifted by the corresponding number. If an int + while `axis` is a tuple of ints, then the same value is used for + all given axes. + axis : int or tuple of ints, optional + Axis or axes along which elements are shifted. By default, the + array is flattened before shifting, after which the original + shape is restored. + + Returns + ------- + res : ndarray + Output array, with the same shape as `a`. + + See Also + -------- + rollaxis : Roll the specified axis backwards, until it lies in a + given position. + + Notes + ----- + Supports rolling over multiple dimensions simultaneously. + + Examples + -------- + >>> import numpy as np + >>> x = np.arange(10) + >>> np.roll(x, 2) + array([8, 9, 0, 1, 2, 3, 4, 5, 6, 7]) + >>> np.roll(x, -2) + array([2, 3, 4, 5, 6, 7, 8, 9, 0, 1]) + + >>> x2 = np.reshape(x, (2, 5)) + >>> x2 + array([[0, 1, 2, 3, 4], + [5, 6, 7, 8, 9]]) + >>> np.roll(x2, 1) + array([[9, 0, 1, 2, 3], + [4, 5, 6, 7, 8]]) + >>> np.roll(x2, -1) + array([[1, 2, 3, 4, 5], + [6, 7, 8, 9, 0]]) + >>> np.roll(x2, 1, axis=0) + array([[5, 6, 7, 8, 9], + [0, 1, 2, 3, 4]]) + >>> np.roll(x2, -1, axis=0) + array([[5, 6, 7, 8, 9], + [0, 1, 2, 3, 4]]) + >>> np.roll(x2, 1, axis=1) + array([[4, 0, 1, 2, 3], + [9, 5, 6, 7, 8]]) + >>> np.roll(x2, -1, axis=1) + array([[1, 2, 3, 4, 0], + [6, 7, 8, 9, 5]]) + >>> np.roll(x2, (1, 1), axis=(1, 0)) + array([[9, 5, 6, 7, 8], + [4, 0, 1, 2, 3]]) + >>> np.roll(x2, (2, 1), axis=(1, 0)) + array([[8, 9, 5, 6, 7], + [3, 4, 0, 1, 2]]) + + """ + a = asanyarray(a) + if axis is None: + return roll(a.ravel(), shift, 0).reshape(a.shape) + + else: + axis = normalize_axis_tuple(axis, a.ndim, allow_duplicate=True) + broadcasted = broadcast(shift, axis) + if broadcasted.ndim > 1: + raise ValueError( + "'shift' and 'axis' should be scalars or 1D sequences") + shifts = dict.fromkeys(range(a.ndim), 0) + for sh, ax in broadcasted: + shifts[ax] += int(sh) + + rolls = [((slice(None), slice(None)),)] * a.ndim + for ax, offset in shifts.items(): + offset %= a.shape[ax] or 1 # If `a` is empty, nothing matters. + if offset: + # (original, result), (original, result) + rolls[ax] = ((slice(None, -offset), slice(offset, None)), + (slice(-offset, None), slice(None, offset))) + + result = empty_like(a) + for indices in itertools.product(*rolls): + arr_index, res_index = zip(*indices) + result[res_index] = a[arr_index] + + return result + + +def _rollaxis_dispatcher(a, axis, start=None): + return (a,) + + +@array_function_dispatch(_rollaxis_dispatcher) +def rollaxis(a, axis, start=0): + """ + Roll the specified axis backwards, until it lies in a given position. + + This function continues to be supported for backward compatibility, but you + should prefer `moveaxis`. The `moveaxis` function was added in NumPy + 1.11. + + Parameters + ---------- + a : ndarray + Input array. + axis : int + The axis to be rolled. The positions of the other axes do not + change relative to one another. + start : int, optional + When ``start <= axis``, the axis is rolled back until it lies in + this position. When ``start > axis``, the axis is rolled until it + lies before this position. The default, 0, results in a "complete" + roll. The following table describes how negative values of ``start`` + are interpreted: + + .. table:: + :align: left + + +-------------------+----------------------+ + | ``start`` | Normalized ``start`` | + +===================+======================+ + | ``-(arr.ndim+1)`` | raise ``AxisError`` | + +-------------------+----------------------+ + | ``-arr.ndim`` | 0 | + +-------------------+----------------------+ + | |vdots| | |vdots| | + +-------------------+----------------------+ + | ``-1`` | ``arr.ndim-1`` | + +-------------------+----------------------+ + | ``0`` | ``0`` | + +-------------------+----------------------+ + | |vdots| | |vdots| | + +-------------------+----------------------+ + | ``arr.ndim`` | ``arr.ndim`` | + +-------------------+----------------------+ + | ``arr.ndim + 1`` | raise ``AxisError`` | + +-------------------+----------------------+ + + .. |vdots| unicode:: U+22EE .. Vertical Ellipsis + + Returns + ------- + res : ndarray + For NumPy >= 1.10.0 a view of `a` is always returned. For earlier + NumPy versions a view of `a` is returned only if the order of the + axes is changed, otherwise the input array is returned. + + See Also + -------- + moveaxis : Move array axes to new positions. + roll : Roll the elements of an array by a number of positions along a + given axis. + + Examples + -------- + >>> import numpy as np + >>> a = np.ones((3,4,5,6)) + >>> np.rollaxis(a, 3, 1).shape + (3, 6, 4, 5) + >>> np.rollaxis(a, 2).shape + (5, 3, 4, 6) + >>> np.rollaxis(a, 1, 4).shape + (3, 5, 6, 4) + + """ + n = a.ndim + axis = normalize_axis_index(axis, n) + if start < 0: + start += n + msg = "'%s' arg requires %d <= %s < %d, but %d was passed in" + if not (0 <= start < n + 1): + raise AxisError(msg % ('start', -n, 'start', n + 1, start)) + if axis < start: + # it's been removed + start -= 1 + if axis == start: + return a[...] + axes = list(range(n)) + axes.remove(axis) + axes.insert(start, axis) + return a.transpose(axes) + + +@set_module("numpy.lib.array_utils") +def normalize_axis_tuple(axis, ndim, argname=None, allow_duplicate=False): + """ + Normalizes an axis argument into a tuple of non-negative integer axes. + + This handles shorthands such as ``1`` and converts them to ``(1,)``, + as well as performing the handling of negative indices covered by + `normalize_axis_index`. + + By default, this forbids axes from being specified multiple times. + + Used internally by multi-axis-checking logic. + + Parameters + ---------- + axis : int, iterable of int + The un-normalized index or indices of the axis. + ndim : int + The number of dimensions of the array that `axis` should be normalized + against. + argname : str, optional + A prefix to put before the error message, typically the name of the + argument. + allow_duplicate : bool, optional + If False, the default, disallow an axis from being specified twice. + + Returns + ------- + normalized_axes : tuple of int + The normalized axis index, such that `0 <= normalized_axis < ndim` + + Raises + ------ + AxisError + If any axis provided is out of range + ValueError + If an axis is repeated + + See also + -------- + normalize_axis_index : normalizing a single scalar axis + """ + # Optimization to speed-up the most common cases. + if not isinstance(axis, (tuple, list)): + try: + axis = [operator.index(axis)] + except TypeError: + pass + # Going via an iterator directly is slower than via list comprehension. + axis = tuple(normalize_axis_index(ax, ndim, argname) for ax in axis) + if not allow_duplicate and len(set(axis)) != len(axis): + if argname: + raise ValueError(f'repeated axis in `{argname}` argument') + else: + raise ValueError('repeated axis') + return axis + + +def _moveaxis_dispatcher(a, source, destination): + return (a,) + + +@array_function_dispatch(_moveaxis_dispatcher) +def moveaxis(a, source, destination): + """ + Move axes of an array to new positions. + + Other axes remain in their original order. + + Parameters + ---------- + a : np.ndarray + The array whose axes should be reordered. + source : int or sequence of int + Original positions of the axes to move. These must be unique. + destination : int or sequence of int + Destination positions for each of the original axes. These must also be + unique. + + Returns + ------- + result : np.ndarray + Array with moved axes. This array is a view of the input array. + + See Also + -------- + transpose : Permute the dimensions of an array. + swapaxes : Interchange two axes of an array. + + Examples + -------- + >>> import numpy as np + >>> x = np.zeros((3, 4, 5)) + >>> np.moveaxis(x, 0, -1).shape + (4, 5, 3) + >>> np.moveaxis(x, -1, 0).shape + (5, 3, 4) + + These all achieve the same result: + + >>> np.transpose(x).shape + (5, 4, 3) + >>> np.swapaxes(x, 0, -1).shape + (5, 4, 3) + >>> np.moveaxis(x, [0, 1], [-1, -2]).shape + (5, 4, 3) + >>> np.moveaxis(x, [0, 1, 2], [-1, -2, -3]).shape + (5, 4, 3) + + """ + try: + # allow duck-array types if they define transpose + transpose = a.transpose + except AttributeError: + a = asarray(a) + transpose = a.transpose + + source = normalize_axis_tuple(source, a.ndim, 'source') + destination = normalize_axis_tuple(destination, a.ndim, 'destination') + if len(source) != len(destination): + raise ValueError('`source` and `destination` arguments must have ' + 'the same number of elements') + + order = [n for n in range(a.ndim) if n not in source] + + for dest, src in sorted(zip(destination, source)): + order.insert(dest, src) + + result = transpose(order) + return result + + +def _cross_dispatcher(a, b, axisa=None, axisb=None, axisc=None, axis=None): + return (a, b) + + +@array_function_dispatch(_cross_dispatcher) +def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None): + """ + Return the cross product of two (arrays of) vectors. + + The cross product of `a` and `b` in :math:`R^3` is a vector perpendicular + to both `a` and `b`. If `a` and `b` are arrays of vectors, the vectors + are defined by the last axis of `a` and `b` by default, and these axes + can have dimensions 2 or 3. Where the dimension of either `a` or `b` is + 2, the third component of the input vector is assumed to be zero and the + cross product calculated accordingly. In cases where both input vectors + have dimension 2, the z-component of the cross product is returned. + + Parameters + ---------- + a : array_like + Components of the first vector(s). + b : array_like + Components of the second vector(s). + axisa : int, optional + Axis of `a` that defines the vector(s). By default, the last axis. + axisb : int, optional + Axis of `b` that defines the vector(s). By default, the last axis. + axisc : int, optional + Axis of `c` containing the cross product vector(s). Ignored if + both input vectors have dimension 2, as the return is scalar. + By default, the last axis. + axis : int, optional + If defined, the axis of `a`, `b` and `c` that defines the vector(s) + and cross product(s). Overrides `axisa`, `axisb` and `axisc`. + + Returns + ------- + c : ndarray + Vector cross product(s). + + Raises + ------ + ValueError + When the dimension of the vector(s) in `a` and/or `b` does not + equal 2 or 3. + + See Also + -------- + inner : Inner product + outer : Outer product. + linalg.cross : An Array API compatible variation of ``np.cross``, + which accepts (arrays of) 3-element vectors only. + ix_ : Construct index arrays. + + Notes + ----- + Supports full broadcasting of the inputs. + + Dimension-2 input arrays were deprecated in 2.0.0. If you do need this + functionality, you can use:: + + def cross2d(x, y): + return x[..., 0] * y[..., 1] - x[..., 1] * y[..., 0] + + Examples + -------- + Vector cross-product. + + >>> import numpy as np + >>> x = [1, 2, 3] + >>> y = [4, 5, 6] + >>> np.cross(x, y) + array([-3, 6, -3]) + + One vector with dimension 2. + + >>> x = [1, 2] + >>> y = [4, 5, 6] + >>> np.cross(x, y) + array([12, -6, -3]) + + Equivalently: + + >>> x = [1, 2, 0] + >>> y = [4, 5, 6] + >>> np.cross(x, y) + array([12, -6, -3]) + + Both vectors with dimension 2. + + >>> x = [1,2] + >>> y = [4,5] + >>> np.cross(x, y) + array(-3) + + Multiple vector cross-products. Note that the direction of the cross + product vector is defined by the *right-hand rule*. + + >>> x = np.array([[1,2,3], [4,5,6]]) + >>> y = np.array([[4,5,6], [1,2,3]]) + >>> np.cross(x, y) + array([[-3, 6, -3], + [ 3, -6, 3]]) + + The orientation of `c` can be changed using the `axisc` keyword. + + >>> np.cross(x, y, axisc=0) + array([[-3, 3], + [ 6, -6], + [-3, 3]]) + + Change the vector definition of `x` and `y` using `axisa` and `axisb`. + + >>> x = np.array([[1,2,3], [4,5,6], [7, 8, 9]]) + >>> y = np.array([[7, 8, 9], [4,5,6], [1,2,3]]) + >>> np.cross(x, y) + array([[ -6, 12, -6], + [ 0, 0, 0], + [ 6, -12, 6]]) + >>> np.cross(x, y, axisa=0, axisb=0) + array([[-24, 48, -24], + [-30, 60, -30], + [-36, 72, -36]]) + + """ + if axis is not None: + axisa, axisb, axisc = (axis,) * 3 + a = asarray(a) + b = asarray(b) + + if (a.ndim < 1) or (b.ndim < 1): + raise ValueError("At least one array has zero dimension") + + # Check axisa and axisb are within bounds + axisa = normalize_axis_index(axisa, a.ndim, msg_prefix='axisa') + axisb = normalize_axis_index(axisb, b.ndim, msg_prefix='axisb') + + # Move working axis to the end of the shape + a = moveaxis(a, axisa, -1) + b = moveaxis(b, axisb, -1) + msg = ("incompatible dimensions for cross product\n" + "(dimension must be 2 or 3)") + if a.shape[-1] not in (2, 3) or b.shape[-1] not in (2, 3): + raise ValueError(msg) + if a.shape[-1] == 2 or b.shape[-1] == 2: + # Deprecated in NumPy 2.0, 2023-09-26 + warnings.warn( + "Arrays of 2-dimensional vectors are deprecated. Use arrays of " + "3-dimensional vectors instead. (deprecated in NumPy 2.0)", + DeprecationWarning, stacklevel=2 + ) + + # Create the output array + shape = broadcast(a[..., 0], b[..., 0]).shape + if a.shape[-1] == 3 or b.shape[-1] == 3: + shape += (3,) + # Check axisc is within bounds + axisc = normalize_axis_index(axisc, len(shape), msg_prefix='axisc') + dtype = promote_types(a.dtype, b.dtype) + cp = empty(shape, dtype) + + # recast arrays as dtype + a = a.astype(dtype) + b = b.astype(dtype) + + # create local aliases for readability + a0 = a[..., 0] + a1 = a[..., 1] + if a.shape[-1] == 3: + a2 = a[..., 2] + b0 = b[..., 0] + b1 = b[..., 1] + if b.shape[-1] == 3: + b2 = b[..., 2] + if cp.ndim != 0 and cp.shape[-1] == 3: + cp0 = cp[..., 0] + cp1 = cp[..., 1] + cp2 = cp[..., 2] + + if a.shape[-1] == 2: + if b.shape[-1] == 2: + # a0 * b1 - a1 * b0 + multiply(a0, b1, out=cp) + cp -= a1 * b0 + return cp + else: + assert b.shape[-1] == 3 + # cp0 = a1 * b2 - 0 (a2 = 0) + # cp1 = 0 - a0 * b2 (a2 = 0) + # cp2 = a0 * b1 - a1 * b0 + multiply(a1, b2, out=cp0) + multiply(a0, b2, out=cp1) + negative(cp1, out=cp1) + multiply(a0, b1, out=cp2) + cp2 -= a1 * b0 + else: + assert a.shape[-1] == 3 + if b.shape[-1] == 3: + # cp0 = a1 * b2 - a2 * b1 + # cp1 = a2 * b0 - a0 * b2 + # cp2 = a0 * b1 - a1 * b0 + multiply(a1, b2, out=cp0) + tmp = np.multiply(a2, b1, out=...) + cp0 -= tmp + multiply(a2, b0, out=cp1) + multiply(a0, b2, out=tmp) + cp1 -= tmp + multiply(a0, b1, out=cp2) + multiply(a1, b0, out=tmp) + cp2 -= tmp + else: + assert b.shape[-1] == 2 + # cp0 = 0 - a2 * b1 (b2 = 0) + # cp1 = a2 * b0 - 0 (b2 = 0) + # cp2 = a0 * b1 - a1 * b0 + multiply(a2, b1, out=cp0) + negative(cp0, out=cp0) + multiply(a2, b0, out=cp1) + multiply(a0, b1, out=cp2) + cp2 -= a1 * b0 + + return moveaxis(cp, -1, axisc) + + +little_endian = (sys.byteorder == 'little') + + +@set_module('numpy') +def indices(dimensions, dtype=int, sparse=False): + """ + Return an array representing the indices of a grid. + + Compute an array where the subarrays contain index values 0, 1, ... + varying only along the corresponding axis. + + Parameters + ---------- + dimensions : sequence of ints + The shape of the grid. + dtype : dtype, optional + Data type of the result. + sparse : boolean, optional + Return a sparse representation of the grid instead of a dense + representation. Default is False. + + Returns + ------- + grid : one ndarray or tuple of ndarrays + If sparse is False: + Returns one array of grid indices, + ``grid.shape = (len(dimensions),) + tuple(dimensions)``. + If sparse is True: + Returns a tuple of arrays, with + ``grid[i].shape = (1, ..., 1, dimensions[i], 1, ..., 1)`` with + dimensions[i] in the ith place + + See Also + -------- + mgrid, ogrid, meshgrid + + Notes + ----- + The output shape in the dense case is obtained by prepending the number + of dimensions in front of the tuple of dimensions, i.e. if `dimensions` + is a tuple ``(r0, ..., rN-1)`` of length ``N``, the output shape is + ``(N, r0, ..., rN-1)``. + + The subarrays ``grid[k]`` contains the N-D array of indices along the + ``k-th`` axis. Explicitly:: + + grid[k, i0, i1, ..., iN-1] = ik + + Examples + -------- + >>> import numpy as np + >>> grid = np.indices((2, 3)) + >>> grid.shape + (2, 2, 3) + >>> grid[0] # row indices + array([[0, 0, 0], + [1, 1, 1]]) + >>> grid[1] # column indices + array([[0, 1, 2], + [0, 1, 2]]) + + The indices can be used as an index into an array. + + >>> x = np.arange(20).reshape(5, 4) + >>> row, col = np.indices((2, 3)) + >>> x[row, col] + array([[0, 1, 2], + [4, 5, 6]]) + + Note that it would be more straightforward in the above example to + extract the required elements directly with ``x[:2, :3]``. + + If sparse is set to true, the grid will be returned in a sparse + representation. + + >>> i, j = np.indices((2, 3), sparse=True) + >>> i.shape + (2, 1) + >>> j.shape + (1, 3) + >>> i # row indices + array([[0], + [1]]) + >>> j # column indices + array([[0, 1, 2]]) + + """ + dimensions = tuple(dimensions) + N = len(dimensions) + shape = (1,) * N + if sparse: + res = () + else: + res = empty((N,) + dimensions, dtype=dtype) + for i, dim in enumerate(dimensions): + idx = arange(dim, dtype=dtype).reshape( + shape[:i] + (dim,) + shape[i + 1:] + ) + if sparse: + res = res + (idx,) + else: + res[i] = idx + return res + + +@finalize_array_function_like +@set_module('numpy') +def fromfunction(function, shape, *, dtype=float, like=None, **kwargs): + """ + Construct an array by executing a function over each coordinate. + + The resulting array therefore has a value ``fn(x, y, z)`` at + coordinate ``(x, y, z)``. + + Parameters + ---------- + function : callable + The function is called with N parameters, where N is the rank of + `shape`. Each parameter represents the coordinates of the array + varying along a specific axis. For example, if `shape` + were ``(2, 2)``, then the parameters would be + ``array([[0, 0], [1, 1]])`` and ``array([[0, 1], [0, 1]])`` + shape : (N,) tuple of ints + Shape of the output array, which also determines the shape of + the coordinate arrays passed to `function`. + dtype : data-type, optional + Data-type of the coordinate arrays passed to `function`. + By default, `dtype` is float. + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + fromfunction : any + The result of the call to `function` is passed back directly. + Therefore the shape of `fromfunction` is completely determined by + `function`. If `function` returns a scalar value, the shape of + `fromfunction` would not match the `shape` parameter. + + See Also + -------- + indices, meshgrid + + Notes + ----- + Keywords other than `dtype` and `like` are passed to `function`. + + Examples + -------- + >>> import numpy as np + >>> np.fromfunction(lambda i, j: i, (2, 2), dtype=float) + array([[0., 0.], + [1., 1.]]) + + >>> np.fromfunction(lambda i, j: j, (2, 2), dtype=float) + array([[0., 1.], + [0., 1.]]) + + >>> np.fromfunction(lambda i, j: i == j, (3, 3), dtype=int) + array([[ True, False, False], + [False, True, False], + [False, False, True]]) + + >>> np.fromfunction(lambda i, j: i + j, (3, 3), dtype=int) + array([[0, 1, 2], + [1, 2, 3], + [2, 3, 4]]) + + """ + if like is not None: + return _fromfunction_with_like( + like, function, shape, dtype=dtype, **kwargs) + + args = indices(shape, dtype=dtype) + return function(*args, **kwargs) + + +_fromfunction_with_like = array_function_dispatch()(fromfunction) + + +def _frombuffer(buf, dtype, shape, order, axis_order=None): + array = frombuffer(buf, dtype=dtype) + if order == 'K' and axis_order is not None: + return array.reshape(shape, order='C').transpose(axis_order) + return array.reshape(shape, order=order) + + +@set_module('numpy') +def isscalar(element): + """ + Returns True if the type of `element` is a scalar type. + + Parameters + ---------- + element : any + Input argument, can be of any type and shape. + + Returns + ------- + val : bool + True if `element` is a scalar type, False if it is not. + + See Also + -------- + ndim : Get the number of dimensions of an array + + Notes + ----- + If you need a stricter way to identify a *numerical* scalar, use + ``isinstance(x, numbers.Number)``, as that returns ``False`` for most + non-numerical elements such as strings. + + In most cases ``np.ndim(x) == 0`` should be used instead of this function, + as that will also return true for 0d arrays. This is how numpy overloads + functions in the style of the ``dx`` arguments to `gradient` and + the ``bins`` argument to `histogram`. Some key differences: + + +------------------------------------+---------------+-------------------+ + | x |``isscalar(x)``|``np.ndim(x) == 0``| + +====================================+===============+===================+ + | PEP 3141 numeric objects | ``True`` | ``True`` | + | (including builtins) | | | + +------------------------------------+---------------+-------------------+ + | builtin string and buffer objects | ``True`` | ``True`` | + +------------------------------------+---------------+-------------------+ + | other builtin objects, like | ``False`` | ``True`` | + | `pathlib.Path`, `Exception`, | | | + | the result of `re.compile` | | | + +------------------------------------+---------------+-------------------+ + | third-party objects like | ``False`` | ``True`` | + | `matplotlib.figure.Figure` | | | + +------------------------------------+---------------+-------------------+ + | zero-dimensional numpy arrays | ``False`` | ``True`` | + +------------------------------------+---------------+-------------------+ + | other numpy arrays | ``False`` | ``False`` | + +------------------------------------+---------------+-------------------+ + | `list`, `tuple`, and other | ``False`` | ``False`` | + | sequence objects | | | + +------------------------------------+---------------+-------------------+ + + Examples + -------- + >>> import numpy as np + + >>> np.isscalar(3.1) + True + + >>> np.isscalar(np.array(3.1)) + False + + >>> np.isscalar([3.1]) + False + + >>> np.isscalar(False) + True + + >>> np.isscalar('numpy') + True + + NumPy supports PEP 3141 numbers: + + >>> from fractions import Fraction + >>> np.isscalar(Fraction(5, 17)) + True + >>> from numbers import Number + >>> np.isscalar(Number()) + True + + """ + return (isinstance(element, generic) + or type(element) in ScalarType + or isinstance(element, numbers.Number)) + + +@set_module('numpy') +def binary_repr(num, width=None): + """ + Return the binary representation of the input number as a string. + + For negative numbers, if width is not given, a minus sign is added to the + front. If width is given, the two's complement of the number is + returned, with respect to that width. + + In a two's-complement system negative numbers are represented by the two's + complement of the absolute value. This is the most common method of + representing signed integers on computers [1]_. A N-bit two's-complement + system can represent every integer in the range + :math:`-2^{N-1}` to :math:`+2^{N-1}-1`. + + Parameters + ---------- + num : int + Only an integer decimal number can be used. + width : int, optional + The length of the returned string if `num` is positive, or the length + of the two's complement if `num` is negative, provided that `width` is + at least a sufficient number of bits for `num` to be represented in + the designated form. If the `width` value is insufficient, an error is + raised. + + Returns + ------- + bin : str + Binary representation of `num` or two's complement of `num`. + + See Also + -------- + base_repr: Return a string representation of a number in the given base + system. + bin: Python's built-in binary representation generator of an integer. + + Notes + ----- + `binary_repr` is equivalent to using `base_repr` with base 2, but about 25x + faster. + + References + ---------- + .. [1] Wikipedia, "Two's complement", + https://en.wikipedia.org/wiki/Two's_complement + + Examples + -------- + >>> import numpy as np + >>> np.binary_repr(3) + '11' + >>> np.binary_repr(-3) + '-11' + >>> np.binary_repr(3, width=4) + '0011' + + The two's complement is returned when the input number is negative and + width is specified: + + >>> np.binary_repr(-3, width=3) + '101' + >>> np.binary_repr(-3, width=5) + '11101' + + """ + def err_if_insufficient(width, binwidth): + if width is not None and width < binwidth: + raise ValueError( + f"Insufficient bit {width=} provided for {binwidth=}" + ) + + # Ensure that num is a Python integer to avoid overflow or unwanted + # casts to floating point. + num = operator.index(num) + + if num == 0: + return '0' * (width or 1) + + elif num > 0: + binary = f'{num:b}' + binwidth = len(binary) + outwidth = (binwidth if width is None + else builtins.max(binwidth, width)) + err_if_insufficient(width, binwidth) + return binary.zfill(outwidth) + + elif width is None: + return f'-{-num:b}' + + else: + poswidth = len(f'{-num:b}') + + # See gh-8679: remove extra digit + # for numbers at boundaries. + if 2**(poswidth - 1) == -num: + poswidth -= 1 + + twocomp = 2**(poswidth + 1) + num + binary = f'{twocomp:b}' + binwidth = len(binary) + + outwidth = builtins.max(binwidth, width) + err_if_insufficient(width, binwidth) + return '1' * (outwidth - binwidth) + binary + + +@set_module('numpy') +def base_repr(number, base=2, padding=0): + """ + Return a string representation of a number in the given base system. + + Parameters + ---------- + number : int + The value to convert. Positive and negative values are handled. + base : int, optional + Convert `number` to the `base` number system. The valid range is 2-36, + the default value is 2. + padding : int, optional + Number of zeros padded on the left. Default is 0 (no padding). + + Returns + ------- + out : str + String representation of `number` in `base` system. + + See Also + -------- + binary_repr : Faster version of `base_repr` for base 2. + + Examples + -------- + >>> import numpy as np + >>> np.base_repr(5) + '101' + >>> np.base_repr(6, 5) + '11' + >>> np.base_repr(7, base=5, padding=3) + '00012' + + >>> np.base_repr(10, base=16) + 'A' + >>> np.base_repr(32, base=16) + '20' + + """ + digits = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ' + if base > len(digits): + raise ValueError("Bases greater than 36 not handled in base_repr.") + elif base < 2: + raise ValueError("Bases less than 2 not handled in base_repr.") + + num = abs(int(number)) + res = [] + while num: + res.append(digits[num % base]) + num //= base + if padding: + res.append('0' * padding) + if number < 0: + res.append('-') + return ''.join(reversed(res or '0')) + + +# These are all essentially abbreviations +# These might wind up in a special abbreviations module + + +def _maketup(descr, val): + dt = dtype(descr) + # Place val in all scalar tuples: + fields = dt.fields + if fields is None: + return val + else: + res = [_maketup(fields[name][0], val) for name in dt.names] + return tuple(res) + + +@finalize_array_function_like +@set_module('numpy') +def identity(n, dtype=None, *, like=None): + """ + Return the identity array. + + The identity array is a square array with ones on + the main diagonal. + + Parameters + ---------- + n : int + Number of rows (and columns) in `n` x `n` output. + dtype : data-type, optional + Data-type of the output. Defaults to ``float``. + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + out : ndarray + `n` x `n` array with its main diagonal set to one, + and all other elements 0. + + Examples + -------- + >>> import numpy as np + >>> np.identity(3) + array([[1., 0., 0.], + [0., 1., 0.], + [0., 0., 1.]]) + + """ + if like is not None: + return _identity_with_like(like, n, dtype=dtype) + + from numpy import eye + return eye(n, dtype=dtype, like=like) + + +_identity_with_like = array_function_dispatch()(identity) + + +def _allclose_dispatcher(a, b, rtol=None, atol=None, equal_nan=None): + return (a, b, rtol, atol) + + +@array_function_dispatch(_allclose_dispatcher) +def allclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False): + """ + Returns True if two arrays are element-wise equal within a tolerance. + + The tolerance values are positive, typically very small numbers. The + relative difference (`rtol` * abs(`b`)) and the absolute difference + `atol` are added together to compare against the absolute difference + between `a` and `b`. + + .. warning:: The default `atol` is not appropriate for comparing numbers + with magnitudes much smaller than one (see Notes). + + NaNs are treated as equal if they are in the same place and if + ``equal_nan=True``. Infs are treated as equal if they are in the same + place and of the same sign in both arrays. + + Parameters + ---------- + a, b : array_like + Input arrays to compare. + rtol : array_like + The relative tolerance parameter (see Notes). + atol : array_like + The absolute tolerance parameter (see Notes). + equal_nan : bool + Whether to compare NaN's as equal. If True, NaN's in `a` will be + considered equal to NaN's in `b` in the output array. + + Returns + ------- + allclose : bool + Returns True if the two arrays are equal within the given + tolerance; False otherwise. + + See Also + -------- + isclose, all, any, equal + + Notes + ----- + If the following equation is element-wise True, then allclose returns + True.:: + + absolute(a - b) <= (atol + rtol * absolute(b)) + + The above equation is not symmetric in `a` and `b`, so that + ``allclose(a, b)`` might be different from ``allclose(b, a)`` in + some rare cases. + + The default value of `atol` is not appropriate when the reference value + `b` has magnitude smaller than one. For example, it is unlikely that + ``a = 1e-9`` and ``b = 2e-9`` should be considered "close", yet + ``allclose(1e-9, 2e-9)`` is ``True`` with default settings. Be sure + to select `atol` for the use case at hand, especially for defining the + threshold below which a non-zero value in `a` will be considered "close" + to a very small or zero value in `b`. + + The comparison of `a` and `b` uses standard broadcasting, which + means that `a` and `b` need not have the same shape in order for + ``allclose(a, b)`` to evaluate to True. The same is true for + `equal` but not `array_equal`. + + `allclose` is not defined for non-numeric data types. + `bool` is considered a numeric data-type for this purpose. + + Examples + -------- + >>> import numpy as np + >>> np.allclose([1e10,1e-7], [1.00001e10,1e-8]) + False + + >>> np.allclose([1e10,1e-8], [1.00001e10,1e-9]) + True + + >>> np.allclose([1e10,1e-8], [1.0001e10,1e-9]) + False + + >>> np.allclose([1.0, np.nan], [1.0, np.nan]) + False + + >>> np.allclose([1.0, np.nan], [1.0, np.nan], equal_nan=True) + True + + + """ + res = all(isclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan)) + return builtins.bool(res) + + +def _isclose_dispatcher(a, b, rtol=None, atol=None, equal_nan=None): + return (a, b, rtol, atol) + + +@array_function_dispatch(_isclose_dispatcher) +def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False): + """ + Returns a boolean array where two arrays are element-wise equal within a + tolerance. + + The tolerance values are positive, typically very small numbers. The + relative difference (`rtol` * abs(`b`)) and the absolute difference + `atol` are added together to compare against the absolute difference + between `a` and `b`. + + .. warning:: The default `atol` is not appropriate for comparing numbers + with magnitudes much smaller than one (see Notes). + + Parameters + ---------- + a, b : array_like + Input arrays to compare. + rtol : array_like + The relative tolerance parameter (see Notes). + atol : array_like + The absolute tolerance parameter (see Notes). + equal_nan : bool + Whether to compare NaN's as equal. If True, NaN's in `a` will be + considered equal to NaN's in `b` in the output array. + + Returns + ------- + y : array_like + Returns a boolean array of where `a` and `b` are equal within the + given tolerance. If both `a` and `b` are scalars, returns a single + boolean value. + + See Also + -------- + allclose + math.isclose + + Notes + ----- + For finite values, isclose uses the following equation to test whether + two floating point values are equivalent.:: + + absolute(a - b) <= (atol + rtol * absolute(b)) + + Unlike the built-in `math.isclose`, the above equation is not symmetric + in `a` and `b` -- it assumes `b` is the reference value -- so that + `isclose(a, b)` might be different from `isclose(b, a)`. + + The default value of `atol` is not appropriate when the reference value + `b` has magnitude smaller than one. For example, it is unlikely that + ``a = 1e-9`` and ``b = 2e-9`` should be considered "close", yet + ``isclose(1e-9, 2e-9)`` is ``True`` with default settings. Be sure + to select `atol` for the use case at hand, especially for defining the + threshold below which a non-zero value in `a` will be considered "close" + to a very small or zero value in `b`. + + `isclose` is not defined for non-numeric data types. + :class:`bool` is considered a numeric data-type for this purpose. + + Examples + -------- + >>> import numpy as np + >>> np.isclose([1e10,1e-7], [1.00001e10,1e-8]) + array([ True, False]) + + >>> np.isclose([1e10,1e-8], [1.00001e10,1e-9]) + array([ True, True]) + + >>> np.isclose([1e10,1e-8], [1.0001e10,1e-9]) + array([False, True]) + + >>> np.isclose([1.0, np.nan], [1.0, np.nan]) + array([ True, False]) + + >>> np.isclose([1.0, np.nan], [1.0, np.nan], equal_nan=True) + array([ True, True]) + + >>> np.isclose([1e-8, 1e-7], [0.0, 0.0]) + array([ True, False]) + + >>> np.isclose([1e-100, 1e-7], [0.0, 0.0], atol=0.0) + array([False, False]) + + >>> np.isclose([1e-10, 1e-10], [1e-20, 0.0]) + array([ True, True]) + + >>> np.isclose([1e-10, 1e-10], [1e-20, 0.999999e-10], atol=0.0) + array([False, True]) + + """ + # Turn all but python scalars into arrays. + x, y, atol, rtol = ( + a if isinstance(a, (int, float, complex)) else asanyarray(a) + for a in (a, b, atol, rtol)) + + # Make sure y is an inexact type to avoid bad behavior on abs(MIN_INT). + # This will cause casting of x later. Also, make sure to allow subclasses + # (e.g., for numpy.ma). + # NOTE: We explicitly allow timedelta, which used to work. This could + # possibly be deprecated. See also gh-18286. + # timedelta works if `atol` is an integer or also a timedelta. + # Although, the default tolerances are unlikely to be useful + if (dtype := getattr(y, "dtype", None)) is not None and dtype.kind != "m": + dt = multiarray.result_type(y, 1.) + y = asanyarray(y, dtype=dt) + elif isinstance(y, int): + y = float(y) + + # atol and rtol can be arrays + if not (np.all(np.isfinite(atol)) and np.all(np.isfinite(rtol))): + err_s = np.geterr()["invalid"] + err_msg = f"One of rtol or atol is not valid, atol: {atol}, rtol: {rtol}" + + if err_s == "warn": + warnings.warn(err_msg, RuntimeWarning, stacklevel=2) + elif err_s == "raise": + raise FloatingPointError(err_msg) + elif err_s == "print": + print(err_msg) + + with errstate(invalid='ignore'): + + result = (less_equal(abs(x - y), atol + rtol * abs(y)) + & isfinite(y) + | (x == y)) + if equal_nan: + result |= isnan(x) & isnan(y) + + return result[()] # Flatten 0d arrays to scalars + + +def _array_equal_dispatcher(a1, a2, equal_nan=None): + return (a1, a2) + + +_no_nan_types = { + # should use np.dtype.BoolDType, but as of writing + # that fails the reloading test. + type(dtype(nt.bool)), + type(dtype(nt.int8)), + type(dtype(nt.int16)), + type(dtype(nt.int32)), + type(dtype(nt.int64)), +} + + +def _dtype_cannot_hold_nan(dtype): + return type(dtype) in _no_nan_types + + +@array_function_dispatch(_array_equal_dispatcher) +def array_equal(a1, a2, equal_nan=False): + """ + True if two arrays have the same shape and elements, False otherwise. + + Parameters + ---------- + a1, a2 : array_like + Input arrays. + equal_nan : bool + Whether to compare NaN's as equal. If the dtype of a1 and a2 is + complex, values will be considered equal if either the real or the + imaginary component of a given value is ``nan``. + + Returns + ------- + b : bool + Returns True if the arrays are equal. + + See Also + -------- + allclose: Returns True if two arrays are element-wise equal within a + tolerance. + array_equiv: Returns True if input arrays are shape consistent and all + elements equal. + + Examples + -------- + >>> import numpy as np + + >>> np.array_equal([1, 2], [1, 2]) + True + + >>> np.array_equal(np.array([1, 2]), np.array([1, 2])) + True + + >>> np.array_equal([1, 2], [1, 2, 3]) + False + + >>> np.array_equal([1, 2], [1, 4]) + False + + >>> a = np.array([1, np.nan]) + >>> np.array_equal(a, a) + False + + >>> np.array_equal(a, a, equal_nan=True) + True + + When ``equal_nan`` is True, complex values with nan components are + considered equal if either the real *or* the imaginary components are nan. + + >>> a = np.array([1 + 1j]) + >>> b = a.copy() + >>> a.real = np.nan + >>> b.imag = np.nan + >>> np.array_equal(a, b, equal_nan=True) + True + """ + try: + a1, a2 = asarray(a1), asarray(a2) + except Exception: + return False + if a1.shape != a2.shape: + return False + if not equal_nan: + return builtins.bool((asanyarray(a1 == a2)).all()) + + if a1 is a2: + # nan will compare equal so an array will compare equal to itself. + return True + + cannot_have_nan = (_dtype_cannot_hold_nan(a1.dtype) + and _dtype_cannot_hold_nan(a2.dtype)) + if cannot_have_nan: + return builtins.bool(asarray(a1 == a2).all()) + + # Handling NaN values if equal_nan is True + a1nan, a2nan = isnan(a1), isnan(a2) + # NaN's occur at different locations + if not (a1nan == a2nan).all(): + return False + # Shapes of a1, a2 and masks are guaranteed to be consistent by this point + return builtins.bool((a1[~a1nan] == a2[~a1nan]).all()) + + +def _array_equiv_dispatcher(a1, a2): + return (a1, a2) + + +@array_function_dispatch(_array_equiv_dispatcher) +def array_equiv(a1, a2): + """ + Returns True if input arrays are shape consistent and all elements equal. + + Shape consistent means they are either the same shape, or one input array + can be broadcasted to create the same shape as the other one. + + Parameters + ---------- + a1, a2 : array_like + Input arrays. + + Returns + ------- + out : bool + True if equivalent, False otherwise. + + Examples + -------- + >>> import numpy as np + >>> np.array_equiv([1, 2], [1, 2]) + True + >>> np.array_equiv([1, 2], [1, 3]) + False + + Showing the shape equivalence: + + >>> np.array_equiv([1, 2], [[1, 2], [1, 2]]) + True + >>> np.array_equiv([1, 2], [[1, 2, 1, 2], [1, 2, 1, 2]]) + False + + >>> np.array_equiv([1, 2], [[1, 2], [1, 3]]) + False + + """ + try: + a1, a2 = asarray(a1), asarray(a2) + except Exception: + return False + try: + multiarray.broadcast(a1, a2) + except Exception: + return False + + return builtins.bool(asanyarray(a1 == a2).all()) + + +def _astype_dispatcher(x, dtype, /, *, copy=None, device=None): + return (x, dtype) + + +@array_function_dispatch(_astype_dispatcher) +def astype(x, dtype, /, *, copy=True, device=None): + """ + Copies an array to a specified data type. + + This function is an Array API compatible alternative to + `numpy.ndarray.astype`. + + Parameters + ---------- + x : ndarray + Input NumPy array to cast. ``array_likes`` are explicitly not + supported here. + dtype : dtype + Data type of the result. + copy : bool, optional + Specifies whether to copy an array when the specified dtype matches + the data type of the input array ``x``. If ``True``, a newly allocated + array must always be returned. If ``False`` and the specified dtype + matches the data type of the input array, the input array must be + returned; otherwise, a newly allocated array must be returned. + Defaults to ``True``. + device : str, optional + The device on which to place the returned array. Default: None. + For Array-API interoperability only, so must be ``"cpu"`` if passed. + + .. versionadded:: 2.1.0 + + Returns + ------- + out : ndarray + An array having the specified data type. + + See Also + -------- + ndarray.astype + + Examples + -------- + >>> import numpy as np + >>> arr = np.array([1, 2, 3]); arr + array([1, 2, 3]) + >>> np.astype(arr, np.float64) + array([1., 2., 3.]) + + Non-copy case: + + >>> arr = np.array([1, 2, 3]) + >>> arr_noncpy = np.astype(arr, arr.dtype, copy=False) + >>> np.shares_memory(arr, arr_noncpy) + True + + """ + if not (isinstance(x, np.ndarray) or isscalar(x)): + raise TypeError( + "Input should be a NumPy array or scalar. " + f"It is a {type(x)} instead." + ) + if device is not None and device != "cpu": + raise ValueError( + 'Device not understood. Only "cpu" is allowed, but received:' + f' {device}' + ) + return x.astype(dtype, copy=copy) + + +inf = PINF +nan = NAN +False_ = nt.bool(False) +True_ = nt.bool(True) + + +def extend_all(module): + existing = set(__all__) + mall = module.__all__ + for a in mall: + if a not in existing: + __all__.append(a) + + +from . import _asarray, _ufunc_config, arrayprint, fromnumeric +from ._asarray import * +from ._ufunc_config import * +from .arrayprint import * +from .fromnumeric import * +from .numerictypes import * +from .umath import * + +extend_all(fromnumeric) +extend_all(umath) +extend_all(numerictypes) +extend_all(arrayprint) +extend_all(_asarray) +extend_all(_ufunc_config) diff --git a/local_packages/numpy/_core/numeric.pyi b/local_packages/numpy/_core/numeric.pyi new file mode 100644 index 0000000..4ad2881 --- /dev/null +++ b/local_packages/numpy/_core/numeric.pyi @@ -0,0 +1,1276 @@ +from _typeshed import Incomplete +from builtins import bool as py_bool +from collections.abc import Callable, Iterable, Sequence +from typing import ( + Any, + Final, + Literal as L, + SupportsAbs, + SupportsIndex, + TypeAlias, + TypeGuard, + TypeVar, + overload, +) + +import numpy as np +from numpy import ( + False_, + True_, + _OrderCF, + _OrderKACF, + bitwise_not, + inf, + little_endian, + nan, + newaxis, + ufunc, +) +from numpy._typing import ( + ArrayLike, + DTypeLike, + NDArray, + _ArrayLike, + _ArrayLikeBool_co, + _ArrayLikeComplex_co, + _ArrayLikeFloat_co, + _ArrayLikeInt_co, + _ArrayLikeNumber_co, + _ArrayLikeTD64_co, + _CDoubleCodes, + _Complex128Codes, + _DoubleCodes, + _DTypeLike, + _DTypeLikeBool, + _Float64Codes, + _IntCodes, + _NestedSequence, + _NumberLike_co, + _ScalarLike_co, + _Shape, + _ShapeLike, + _SupportsArray, + _SupportsArrayFunc, + _SupportsDType, +) + +from ._asarray import require +from ._ufunc_config import ( + errstate, + getbufsize, + geterr, + geterrcall, + setbufsize, + seterr, + seterrcall, +) +from .arrayprint import ( + array2string, + array_repr, + array_str, + format_float_positional, + format_float_scientific, + get_printoptions, + printoptions, + set_printoptions, +) +from .fromnumeric import ( + all, + amax, + amin, + any, + argmax, + argmin, + argpartition, + argsort, + around, + choose, + clip, + compress, + cumprod, + cumsum, + cumulative_prod, + cumulative_sum, + diagonal, + matrix_transpose, + max, + mean, + min, + ndim, + nonzero, + partition, + prod, + ptp, + put, + ravel, + repeat, + reshape, + resize, + round, + searchsorted, + shape, + size, + sort, + squeeze, + std, + sum, + swapaxes, + take, + trace, + transpose, + var, +) +from .multiarray import ( + ALLOW_THREADS as ALLOW_THREADS, + BUFSIZE as BUFSIZE, + CLIP as CLIP, + MAXDIMS as MAXDIMS, + MAY_SHARE_BOUNDS as MAY_SHARE_BOUNDS, + MAY_SHARE_EXACT as MAY_SHARE_EXACT, + RAISE as RAISE, + WRAP as WRAP, + _Array, + _ConstructorEmpty, + arange, + array, + asanyarray, + asarray, + ascontiguousarray, + asfortranarray, + broadcast, + can_cast, + concatenate, + copyto, + dot, + dtype, + empty, + empty_like, + flatiter, + from_dlpack, + frombuffer, + fromfile, + fromiter, + fromstring, + inner, + lexsort, + matmul, + may_share_memory, + min_scalar_type, + ndarray, + nditer, + nested_iters, + normalize_axis_index as normalize_axis_index, + promote_types, + putmask, + result_type, + shares_memory, + vdot, + where, + zeros, +) +from .numerictypes import ( + ScalarType, + bool, + bool_, + busday_count, + busday_offset, + busdaycalendar, + byte, + bytes_, + cdouble, + character, + clongdouble, + complex64, + complex128, + complex192, + complex256, + complexfloating, + csingle, + datetime64, + datetime_as_string, + datetime_data, + double, + flexible, + float16, + float32, + float64, + float96, + float128, + floating, + generic, + half, + inexact, + int8, + int16, + int32, + int64, + int_, + intc, + integer, + intp, + is_busday, + isdtype, + issubdtype, + long, + longdouble, + longlong, + number, + object_, + short, + signedinteger, + single, + str_, + timedelta64, + typecodes, + ubyte, + uint, + uint8, + uint16, + uint32, + uint64, + uintc, + uintp, + ulong, + ulonglong, + unsignedinteger, + ushort, + void, +) +from .umath import ( + absolute, + add, + arccos, + arccosh, + arcsin, + arcsinh, + arctan, + arctan2, + arctanh, + bitwise_and, + bitwise_count, + bitwise_or, + bitwise_xor, + cbrt, + ceil, + conj, + conjugate, + copysign, + cos, + cosh, + deg2rad, + degrees, + divide, + divmod, + e, + equal, + euler_gamma, + exp, + exp2, + expm1, + fabs, + float_power, + floor, + floor_divide, + fmax, + fmin, + fmod, + frexp, + frompyfunc, + gcd, + greater, + greater_equal, + heaviside, + hypot, + invert, + isfinite, + isinf, + isnan, + isnat, + lcm, + ldexp, + left_shift, + less, + less_equal, + log, + log1p, + log2, + log10, + logaddexp, + logaddexp2, + logical_and, + logical_not, + logical_or, + logical_xor, + matvec, + maximum, + minimum, + mod, + modf, + multiply, + negative, + nextafter, + not_equal, + pi, + positive, + power, + rad2deg, + radians, + reciprocal, + remainder, + right_shift, + rint, + sign, + signbit, + sin, + sinh, + spacing, + sqrt, + square, + subtract, + tan, + tanh, + true_divide, + trunc, + vecdot, + vecmat, +) + +__all__ = [ + "False_", + "ScalarType", + "True_", + "absolute", + "add", + "all", + "allclose", + "amax", + "amin", + "any", + "arange", + "arccos", + "arccosh", + "arcsin", + "arcsinh", + "arctan", + "arctan2", + "arctanh", + "argmax", + "argmin", + "argpartition", + "argsort", + "argwhere", + "around", + "array", + "array2string", + "array_equal", + "array_equiv", + "array_repr", + "array_str", + "asanyarray", + "asarray", + "ascontiguousarray", + "asfortranarray", + "astype", + "base_repr", + "binary_repr", + "bitwise_and", + "bitwise_count", + "bitwise_not", + "bitwise_or", + "bitwise_xor", + "bool", + "bool_", + "broadcast", + "busday_count", + "busday_offset", + "busdaycalendar", + "byte", + "bytes_", + "can_cast", + "cbrt", + "cdouble", + "ceil", + "character", + "choose", + "clip", + "clongdouble", + "complex64", + "complex128", + "complex192", + "complex256", + "complexfloating", + "compress", + "concatenate", + "conj", + "conjugate", + "convolve", + "copysign", + "copyto", + "correlate", + "cos", + "cosh", + "count_nonzero", + "cross", + "csingle", + "cumprod", + "cumsum", + "cumulative_prod", + "cumulative_sum", + "datetime64", + "datetime_as_string", + "datetime_data", + "deg2rad", + "degrees", + "diagonal", + "divide", + "divmod", + "dot", + "double", + "dtype", + "e", + "empty", + "empty_like", + "equal", + "errstate", + "euler_gamma", + "exp", + "exp2", + "expm1", + "fabs", + "flatiter", + "flatnonzero", + "flexible", + "float16", + "float32", + "float64", + "float96", + "float128", + "float_power", + "floating", + "floor", + "floor_divide", + "fmax", + "fmin", + "fmod", + "format_float_positional", + "format_float_scientific", + "frexp", + "from_dlpack", + "frombuffer", + "fromfile", + "fromfunction", + "fromiter", + "frompyfunc", + "fromstring", + "full", + "full_like", + "gcd", + "generic", + "get_printoptions", + "getbufsize", + "geterr", + "geterrcall", + "greater", + "greater_equal", + "half", + "heaviside", + "hypot", + "identity", + "indices", + "inexact", + "inf", + "inner", + "int8", + "int16", + "int32", + "int64", + "int_", + "intc", + "integer", + "intp", + "invert", + "is_busday", + "isclose", + "isdtype", + "isfinite", + "isfortran", + "isinf", + "isnan", + "isnat", + "isscalar", + "issubdtype", + "lcm", + "ldexp", + "left_shift", + "less", + "less_equal", + "lexsort", + "little_endian", + "log", + "log1p", + "log2", + "log10", + "logaddexp", + "logaddexp2", + "logical_and", + "logical_not", + "logical_or", + "logical_xor", + "long", + "longdouble", + "longlong", + "matmul", + "matrix_transpose", + "matvec", + "max", + "maximum", + "may_share_memory", + "mean", + "min", + "min_scalar_type", + "minimum", + "mod", + "modf", + "moveaxis", + "multiply", + "nan", + "ndarray", + "ndim", + "nditer", + "negative", + "nested_iters", + "newaxis", + "nextafter", + "nonzero", + "not_equal", + "number", + "object_", + "ones", + "ones_like", + "outer", + "partition", + "pi", + "positive", + "power", + "printoptions", + "prod", + "promote_types", + "ptp", + "put", + "putmask", + "rad2deg", + "radians", + "ravel", + "reciprocal", + "remainder", + "repeat", + "require", + "reshape", + "resize", + "result_type", + "right_shift", + "rint", + "roll", + "rollaxis", + "round", + "searchsorted", + "set_printoptions", + "setbufsize", + "seterr", + "seterrcall", + "shape", + "shares_memory", + "short", + "sign", + "signbit", + "signedinteger", + "sin", + "single", + "sinh", + "size", + "sort", + "spacing", + "sqrt", + "square", + "squeeze", + "std", + "str_", + "subtract", + "sum", + "swapaxes", + "take", + "tan", + "tanh", + "tensordot", + "timedelta64", + "trace", + "transpose", + "true_divide", + "trunc", + "typecodes", + "ubyte", + "ufunc", + "uint", + "uint8", + "uint16", + "uint32", + "uint64", + "uintc", + "uintp", + "ulong", + "ulonglong", + "unsignedinteger", + "ushort", + "var", + "vdot", + "vecdot", + "vecmat", + "void", + "where", + "zeros", + "zeros_like", +] + +_T = TypeVar("_T") +_ScalarT = TypeVar("_ScalarT", bound=generic) +_NumberObjectT = TypeVar("_NumberObjectT", bound=number | object_) +_NumericScalarT = TypeVar("_NumericScalarT", bound=number | timedelta64 | object_) +_DTypeT = TypeVar("_DTypeT", bound=dtype) +_ArrayT = TypeVar("_ArrayT", bound=np.ndarray[Any, Any]) +_ShapeT = TypeVar("_ShapeT", bound=_Shape) + +_AnyShapeT = TypeVar( + "_AnyShapeT", + tuple[()], + tuple[int], + tuple[int, int], + tuple[int, int, int], + tuple[int, int, int, int], + tuple[int, ...], +) +_AnyNumericScalarT = TypeVar( + "_AnyNumericScalarT", + np.int8, np.int16, np.int32, np.int64, + np.uint8, np.uint16, np.uint32, np.uint64, + np.float16, np.float32, np.float64, np.longdouble, + np.complex64, np.complex128, np.clongdouble, + np.timedelta64, + np.object_, +) + +_CorrelateMode: TypeAlias = L["valid", "same", "full"] + +_Array1D: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] +_Array2D: TypeAlias = np.ndarray[tuple[int, int], np.dtype[_ScalarT]] +_Array3D: TypeAlias = np.ndarray[tuple[int, int, int], np.dtype[_ScalarT]] +_Array4D: TypeAlias = np.ndarray[tuple[int, int, int, int], np.dtype[_ScalarT]] + +_Int_co: TypeAlias = np.integer | np.bool +_Float_co: TypeAlias = np.floating | _Int_co +_Number_co: TypeAlias = np.number | np.bool +_TD64_co: TypeAlias = np.timedelta64 | _Int_co + +_ArrayLike1D: TypeAlias = _SupportsArray[np.dtype[_ScalarT]] | Sequence[_ScalarT] +_ArrayLike1DBool_co: TypeAlias = _SupportsArray[np.dtype[np.bool]] | Sequence[py_bool | np.bool] +_ArrayLike1DInt_co: TypeAlias = _SupportsArray[np.dtype[_Int_co]] | Sequence[int | _Int_co] +_ArrayLike1DFloat_co: TypeAlias = _SupportsArray[np.dtype[_Float_co]] | Sequence[float | _Float_co] +_ArrayLike1DNumber_co: TypeAlias = _SupportsArray[np.dtype[_Number_co]] | Sequence[complex | _Number_co] +_ArrayLike1DTD64_co: TypeAlias = _ArrayLike1D[_TD64_co] +_ArrayLike1DObject_co: TypeAlias = _ArrayLike1D[np.object_] + +_DTypeLikeInt: TypeAlias = type[int] | _IntCodes +_DTypeLikeFloat64: TypeAlias = type[float] | _Float64Codes | _DoubleCodes +_DTypeLikeComplex128: TypeAlias = type[complex] | _Complex128Codes | _CDoubleCodes + +### + +# keep in sync with `ones_like` +@overload +def zeros_like( + a: _ArrayT, + dtype: None = None, + order: _OrderKACF = "K", + subok: L[True] = True, + shape: None = None, + *, + device: L["cpu"] | None = None, +) -> _ArrayT: ... +@overload +def zeros_like( + a: _ArrayLike[_ScalarT], + dtype: None = None, + order: _OrderKACF = "K", + subok: py_bool = True, + shape: _ShapeLike | None = None, + *, + device: L["cpu"] | None = None, +) -> NDArray[_ScalarT]: ... +@overload +def zeros_like( + a: object, + dtype: _DTypeLike[_ScalarT], + order: _OrderKACF = "K", + subok: py_bool = True, + shape: _ShapeLike | None = None, + *, + device: L["cpu"] | None = None, +) -> NDArray[_ScalarT]: ... +@overload +def zeros_like( + a: object, + dtype: DTypeLike | None = None, + order: _OrderKACF = "K", + subok: py_bool = True, + shape: _ShapeLike | None = None, + *, + device: L["cpu"] | None = None, +) -> NDArray[Any]: ... + +ones: Final[_ConstructorEmpty] + +# keep in sync with `zeros_like` +@overload +def ones_like( + a: _ArrayT, + dtype: None = None, + order: _OrderKACF = "K", + subok: L[True] = True, + shape: None = None, + *, + device: L["cpu"] | None = None, +) -> _ArrayT: ... +@overload +def ones_like( + a: _ArrayLike[_ScalarT], + dtype: None = None, + order: _OrderKACF = "K", + subok: py_bool = True, + shape: _ShapeLike | None = None, + *, + device: L["cpu"] | None = None, +) -> NDArray[_ScalarT]: ... +@overload +def ones_like( + a: object, + dtype: _DTypeLike[_ScalarT], + order: _OrderKACF = "K", + subok: py_bool = True, + shape: _ShapeLike | None = None, + *, + device: L["cpu"] | None = None, +) -> NDArray[_ScalarT]: ... +@overload +def ones_like( + a: object, + dtype: DTypeLike | None = None, + order: _OrderKACF = "K", + subok: py_bool = True, + shape: _ShapeLike | None = None, + *, + device: L["cpu"] | None = None, +) -> NDArray[Any]: ... + +# TODO: Add overloads for bool, int, float, complex, str, bytes, and memoryview +# 1-D shape +@overload +def full( + shape: SupportsIndex, + fill_value: _ScalarT, + dtype: None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array[tuple[int], _ScalarT]: ... +@overload +def full( + shape: SupportsIndex, + fill_value: Any, + dtype: _DTypeT | _SupportsDType[_DTypeT], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> np.ndarray[tuple[int], _DTypeT]: ... +@overload +def full( + shape: SupportsIndex, + fill_value: Any, + dtype: type[_ScalarT], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array[tuple[int], _ScalarT]: ... +@overload +def full( + shape: SupportsIndex, + fill_value: Any, + dtype: DTypeLike | None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array[tuple[int], Any]: ... +# known shape +@overload +def full( + shape: _AnyShapeT, + fill_value: _ScalarT, + dtype: None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array[_AnyShapeT, _ScalarT]: ... +@overload +def full( + shape: _AnyShapeT, + fill_value: Any, + dtype: _DTypeT | _SupportsDType[_DTypeT], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> np.ndarray[_AnyShapeT, _DTypeT]: ... +@overload +def full( + shape: _AnyShapeT, + fill_value: Any, + dtype: type[_ScalarT], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array[_AnyShapeT, _ScalarT]: ... +@overload +def full( + shape: _AnyShapeT, + fill_value: Any, + dtype: DTypeLike | None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array[_AnyShapeT, Any]: ... +# unknown shape +@overload +def full( + shape: _ShapeLike, + fill_value: _ScalarT, + dtype: None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> NDArray[_ScalarT]: ... +@overload +def full( + shape: _ShapeLike, + fill_value: Any, + dtype: _DTypeT | _SupportsDType[_DTypeT], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> np.ndarray[Any, _DTypeT]: ... +@overload +def full( + shape: _ShapeLike, + fill_value: Any, + dtype: type[_ScalarT], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> NDArray[_ScalarT]: ... +@overload +def full( + shape: _ShapeLike, + fill_value: Any, + dtype: DTypeLike | None = None, + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> NDArray[Any]: ... + +@overload +def full_like( + a: _ArrayT, + fill_value: object, + dtype: None = None, + order: _OrderKACF = "K", + subok: L[True] = True, + shape: None = None, + *, + device: L["cpu"] | None = None, +) -> _ArrayT: ... +@overload +def full_like( + a: _ArrayLike[_ScalarT], + fill_value: object, + dtype: None = None, + order: _OrderKACF = "K", + subok: py_bool = True, + shape: _ShapeLike | None = None, + *, + device: L["cpu"] | None = None, +) -> NDArray[_ScalarT]: ... +@overload +def full_like( + a: object, + fill_value: object, + dtype: _DTypeLike[_ScalarT], + order: _OrderKACF = "K", + subok: py_bool = True, + shape: _ShapeLike | None = None, + *, + device: L["cpu"] | None = None, +) -> NDArray[_ScalarT]: ... +@overload +def full_like( + a: object, + fill_value: object, + dtype: DTypeLike | None = None, + order: _OrderKACF = "K", + subok: py_bool = True, + shape: _ShapeLike | None = None, + *, + device: L["cpu"] | None = None, +) -> NDArray[Any]: ... + +# +@overload +def count_nonzero(a: ArrayLike, axis: None = None, *, keepdims: L[False] = False) -> np.intp: ... +@overload +def count_nonzero(a: _ScalarLike_co, axis: _ShapeLike | None = None, *, keepdims: L[True]) -> np.intp: ... +@overload +def count_nonzero( + a: NDArray[Any] | _NestedSequence[ArrayLike], axis: _ShapeLike | None = None, *, keepdims: L[True] +) -> NDArray[np.intp]: ... +@overload +def count_nonzero(a: ArrayLike, axis: _ShapeLike | None = None, *, keepdims: py_bool = False) -> Any: ... + +# +def isfortran(a: ndarray | generic) -> py_bool: ... + +# +def argwhere(a: ArrayLike) -> _Array2D[np.intp]: ... +def flatnonzero(a: ArrayLike) -> _Array1D[np.intp]: ... + +# keep in sync with `convolve` +@overload +def correlate( + a: _ArrayLike1D[_AnyNumericScalarT], v: _ArrayLike1D[_AnyNumericScalarT], mode: _CorrelateMode = "valid" +) -> _Array1D[_AnyNumericScalarT]: ... +@overload +def correlate(a: _ArrayLike1DBool_co, v: _ArrayLike1DBool_co, mode: _CorrelateMode = "valid") -> _Array1D[np.bool]: ... +@overload +def correlate(a: _ArrayLike1DInt_co, v: _ArrayLike1DInt_co, mode: _CorrelateMode = "valid") -> _Array1D[np.int_ | Any]: ... +@overload +def correlate(a: _ArrayLike1DFloat_co, v: _ArrayLike1DFloat_co, mode: _CorrelateMode = "valid") -> _Array1D[np.float64 | Any]: ... +@overload +def correlate( + a: _ArrayLike1DNumber_co, v: _ArrayLike1DNumber_co, mode: _CorrelateMode = "valid" +) -> _Array1D[np.complex128 | Any]: ... +@overload +def correlate( + a: _ArrayLike1DTD64_co, v: _ArrayLike1DTD64_co, mode: _CorrelateMode = "valid" +) -> _Array1D[np.timedelta64 | Any]: ... + +# keep in sync with `correlate` +@overload +def convolve( + a: _ArrayLike1D[_AnyNumericScalarT], v: _ArrayLike1D[_AnyNumericScalarT], mode: _CorrelateMode = "valid" +) -> _Array1D[_AnyNumericScalarT]: ... +@overload +def convolve(a: _ArrayLike1DBool_co, v: _ArrayLike1DBool_co, mode: _CorrelateMode = "valid") -> _Array1D[np.bool]: ... +@overload +def convolve(a: _ArrayLike1DInt_co, v: _ArrayLike1DInt_co, mode: _CorrelateMode = "valid") -> _Array1D[np.int_ | Any]: ... +@overload +def convolve(a: _ArrayLike1DFloat_co, v: _ArrayLike1DFloat_co, mode: _CorrelateMode = "valid") -> _Array1D[np.float64 | Any]: ... +@overload +def convolve( + a: _ArrayLike1DNumber_co, v: _ArrayLike1DNumber_co, mode: _CorrelateMode = "valid" +) -> _Array1D[np.complex128 | Any]: ... +@overload +def convolve( + a: _ArrayLike1DTD64_co, v: _ArrayLike1DTD64_co, mode: _CorrelateMode = "valid" +) -> _Array1D[np.timedelta64 | Any]: ... + +# keep roughly in sync with `convolve` and `correlate`, but for 2-D output and an additional `out` overload +@overload +def outer( + a: _ArrayLike[_AnyNumericScalarT], b: _ArrayLike[_AnyNumericScalarT], out: None = None +) -> _Array2D[_AnyNumericScalarT]: ... +@overload +def outer(a: _ArrayLikeBool_co, b: _ArrayLikeBool_co, out: None = None) -> _Array2D[np.bool]: ... +@overload +def outer(a: _ArrayLikeInt_co, b: _ArrayLikeInt_co, out: None = None) -> _Array2D[np.int_ | Any]: ... +@overload +def outer(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, out: None = None) -> _Array2D[np.float64 | Any]: ... +@overload +def outer(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, out: None = None) -> _Array2D[np.complex128 | Any]: ... +@overload +def outer(a: _ArrayLikeTD64_co, b: _ArrayLikeTD64_co, out: None = None) -> _Array2D[np.timedelta64 | Any]: ... +@overload +def outer(a: _ArrayLikeNumber_co | _ArrayLikeTD64_co, b: _ArrayLikeNumber_co | _ArrayLikeTD64_co, out: _ArrayT) -> _ArrayT: ... + +# keep in sync with numpy.linalg._linalg.tensordot (ignoring `/, *`) +@overload +def tensordot( + a: _ArrayLike[_AnyNumericScalarT], b: _ArrayLike[_AnyNumericScalarT], axes: int | tuple[_ShapeLike, _ShapeLike] = 2 +) -> NDArray[_AnyNumericScalarT]: ... +@overload +def tensordot(a: _ArrayLikeBool_co, b: _ArrayLikeBool_co, axes: int | tuple[_ShapeLike, _ShapeLike] = 2) -> NDArray[np.bool]: ... +@overload +def tensordot( + a: _ArrayLikeInt_co, b: _ArrayLikeInt_co, axes: int | tuple[_ShapeLike, _ShapeLike] = 2 +) -> NDArray[np.int_ | Any]: ... +@overload +def tensordot( + a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, axes: int | tuple[_ShapeLike, _ShapeLike] = 2 +) -> NDArray[np.float64 | Any]: ... +@overload +def tensordot( + a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, axes: int | tuple[_ShapeLike, _ShapeLike] = 2 +) -> NDArray[np.complex128 | Any]: ... + +# +@overload +def cross( + a: _ArrayLike[_AnyNumericScalarT], + b: _ArrayLike[_AnyNumericScalarT], + axisa: int = -1, + axisb: int = -1, + axisc: int = -1, + axis: int | None = None, +) -> NDArray[_AnyNumericScalarT]: ... +@overload +def cross( + a: _ArrayLikeInt_co, + b: _ArrayLikeInt_co, + axisa: int = -1, + axisb: int = -1, + axisc: int = -1, + axis: int | None = None, +) -> NDArray[np.int_ | Any]: ... +@overload +def cross( + a: _ArrayLikeFloat_co, + b: _ArrayLikeFloat_co, + axisa: int = -1, + axisb: int = -1, + axisc: int = -1, + axis: int | None = None, +) -> NDArray[np.float64 | Any]: ... +@overload +def cross( + a: _ArrayLikeComplex_co, + b: _ArrayLikeComplex_co, + axisa: int = -1, + axisb: int = -1, + axisc: int = -1, + axis: int | None = None, +) -> NDArray[np.complex128 | Any]: ... + +# +@overload +def roll(a: _ArrayT, shift: _ShapeLike, axis: _ShapeLike | None = None) -> _ArrayT: ... +@overload +def roll(a: _ArrayLike[_ScalarT], shift: _ShapeLike, axis: _ShapeLike | None = None) -> NDArray[_ScalarT]: ... +@overload +def roll(a: ArrayLike, shift: _ShapeLike, axis: _ShapeLike | None = None) -> NDArray[Any]: ... + +# +def rollaxis(a: _ArrayT, axis: int, start: int = 0) -> _ArrayT: ... +def moveaxis(a: _ArrayT, source: _ShapeLike, destination: _ShapeLike) -> _ArrayT: ... +def normalize_axis_tuple( + axis: int | Iterable[int], + ndim: int, + argname: str | None = None, + allow_duplicate: py_bool | None = False, +) -> tuple[int, ...]: ... + +# +@overload # 0d, dtype=int (default), sparse=False (default) +def indices(dimensions: tuple[()], dtype: type[int] = int, sparse: L[False] = False) -> _Array1D[np.intp]: ... +@overload # 0d, dtype=, sparse=True +def indices(dimensions: tuple[()], dtype: DTypeLike | None = int, *, sparse: L[True]) -> tuple[()]: ... +@overload # 0d, dtype=, sparse=False (default) +def indices(dimensions: tuple[()], dtype: _DTypeLike[_ScalarT], sparse: L[False] = False) -> _Array1D[_ScalarT]: ... +@overload # 0d, dtype=, sparse=False (default) +def indices(dimensions: tuple[()], dtype: DTypeLike, sparse: L[False] = False) -> _Array1D[Any]: ... +@overload # 1d, dtype=int (default), sparse=False (default) +def indices(dimensions: tuple[int], dtype: type[int] = int, sparse: L[False] = False) -> _Array2D[np.intp]: ... +@overload # 1d, dtype=int (default), sparse=True +def indices(dimensions: tuple[int], dtype: type[int] = int, *, sparse: L[True]) -> tuple[_Array1D[np.intp]]: ... +@overload # 1d, dtype=, sparse=False (default) +def indices(dimensions: tuple[int], dtype: _DTypeLike[_ScalarT], sparse: L[False] = False) -> _Array2D[_ScalarT]: ... +@overload # 1d, dtype=, sparse=True +def indices(dimensions: tuple[int], dtype: _DTypeLike[_ScalarT], sparse: L[True]) -> tuple[_Array1D[_ScalarT]]: ... +@overload # 1d, dtype=, sparse=False (default) +def indices(dimensions: tuple[int], dtype: DTypeLike, sparse: L[False] = False) -> _Array2D[Any]: ... +@overload # 1d, dtype=, sparse=True +def indices(dimensions: tuple[int], dtype: DTypeLike, sparse: L[True]) -> tuple[_Array1D[Any]]: ... +@overload # 2d, dtype=int (default), sparse=False (default) +def indices(dimensions: tuple[int, int], dtype: type[int] = int, sparse: L[False] = False) -> _Array3D[np.intp]: ... +@overload # 2d, dtype=int (default), sparse=True +def indices( + dimensions: tuple[int, int], dtype: type[int] = int, *, sparse: L[True] +) -> tuple[_Array2D[np.intp], _Array2D[np.intp]]: ... +@overload # 2d, dtype=, sparse=False (default) +def indices(dimensions: tuple[int, int], dtype: _DTypeLike[_ScalarT], sparse: L[False] = False) -> _Array3D[_ScalarT]: ... +@overload # 2d, dtype=, sparse=True +def indices( + dimensions: tuple[int, int], dtype: _DTypeLike[_ScalarT], sparse: L[True] +) -> tuple[_Array2D[_ScalarT], _Array2D[_ScalarT]]: ... +@overload # 2d, dtype=, sparse=False (default) +def indices(dimensions: tuple[int, int], dtype: DTypeLike, sparse: L[False] = False) -> _Array3D[Any]: ... +@overload # 2d, dtype=, sparse=True +def indices(dimensions: tuple[int, int], dtype: DTypeLike, sparse: L[True]) -> tuple[_Array2D[Any], _Array2D[Any]]: ... +@overload # ?d, dtype=int (default), sparse=False (default) +def indices(dimensions: Sequence[int], dtype: type[int] = int, sparse: L[False] = False) -> NDArray[np.intp]: ... +@overload # ?d, dtype=int (default), sparse=True +def indices(dimensions: Sequence[int], dtype: type[int] = int, *, sparse: L[True]) -> tuple[NDArray[np.intp], ...]: ... +@overload # ?d, dtype=, sparse=False (default) +def indices(dimensions: Sequence[int], dtype: _DTypeLike[_ScalarT], sparse: L[False] = False) -> NDArray[_ScalarT]: ... +@overload # ?d, dtype=, sparse=True +def indices(dimensions: Sequence[int], dtype: _DTypeLike[_ScalarT], sparse: L[True]) -> tuple[NDArray[_ScalarT], ...]: ... +@overload # ?d, dtype=, sparse=False (default) +def indices(dimensions: Sequence[int], dtype: DTypeLike, sparse: L[False] = False) -> ndarray: ... +@overload # ?d, dtype=, sparse=True +def indices(dimensions: Sequence[int], dtype: DTypeLike, sparse: L[True]) -> tuple[ndarray, ...]: ... + +# +def fromfunction( + function: Callable[..., _T], + shape: Sequence[int], + *, + dtype: DTypeLike | None = float, + like: _SupportsArrayFunc | None = None, + **kwargs: object, +) -> _T: ... + +# +def isscalar(element: object) -> TypeGuard[generic | complex | str | bytes | memoryview]: ... + +# +def binary_repr(num: SupportsIndex, width: int | None = None) -> str: ... +def base_repr(number: SupportsAbs[float], base: float = 2, padding: SupportsIndex | None = 0) -> str: ... + +# +@overload # dtype: None (default) +def identity(n: int, dtype: None = None, *, like: _SupportsArrayFunc | None = None) -> _Array2D[np.float64]: ... +@overload # dtype: known scalar type +def identity(n: int, dtype: _DTypeLike[_ScalarT], *, like: _SupportsArrayFunc | None = None) -> _Array2D[_ScalarT]: ... +@overload # dtype: like bool +def identity(n: int, dtype: _DTypeLikeBool, *, like: _SupportsArrayFunc | None = None) -> _Array2D[np.bool]: ... +@overload # dtype: like int_ +def identity(n: int, dtype: _DTypeLikeInt, *, like: _SupportsArrayFunc | None = None) -> _Array2D[np.int_ | Any]: ... +@overload # dtype: like float64 +def identity(n: int, dtype: _DTypeLikeFloat64, *, like: _SupportsArrayFunc | None = None) -> _Array2D[np.float64 | Any]: ... +@overload # dtype: like complex128 +def identity(n: int, dtype: _DTypeLikeComplex128, *, like: _SupportsArrayFunc | None = None) -> _Array2D[np.complex128 | Any]: ... +@overload # dtype: unknown +def identity(n: int, dtype: DTypeLike, *, like: _SupportsArrayFunc | None = None) -> _Array2D[Incomplete]: ... + +# +def allclose( + a: ArrayLike, + b: ArrayLike, + rtol: ArrayLike = 1e-5, + atol: ArrayLike = 1e-8, + equal_nan: py_bool = False, +) -> py_bool: ... + +# +@overload # scalar, scalar +def isclose( + a: _NumberLike_co, + b: _NumberLike_co, + rtol: ArrayLike = 1e-5, + atol: ArrayLike = 1e-8, + equal_nan: py_bool = False, +) -> np.bool: ... +@overload # known shape, same shape or scalar +def isclose( + a: np.ndarray[_ShapeT], + b: np.ndarray[_ShapeT] | _NumberLike_co, + rtol: ArrayLike = 1e-5, + atol: ArrayLike = 1e-8, + equal_nan: py_bool = False, +) -> np.ndarray[_ShapeT, np.dtype[np.bool]]: ... +@overload # same shape or scalar, known shape +def isclose( + a: np.ndarray[_ShapeT] | _NumberLike_co, + b: np.ndarray[_ShapeT], + rtol: ArrayLike = 1e-5, + atol: ArrayLike = 1e-8, + equal_nan: py_bool = False, +) -> np.ndarray[_ShapeT, np.dtype[np.bool]]: ... +@overload # 1d sequence, <=1d array-like +def isclose( + a: Sequence[_NumberLike_co], + b: Sequence[_NumberLike_co] | _NumberLike_co | np.ndarray[tuple[int]], + rtol: ArrayLike = 1e-5, + atol: ArrayLike = 1e-8, + equal_nan: py_bool = False, +) -> np.ndarray[tuple[int], np.dtype[np.bool]]: ... +@overload # <=1d array-like, 1d sequence +def isclose( + a: Sequence[_NumberLike_co] | _NumberLike_co | np.ndarray[tuple[int]], + b: Sequence[_NumberLike_co], + rtol: ArrayLike = 1e-5, + atol: ArrayLike = 1e-8, + equal_nan: py_bool = False, +) -> np.ndarray[tuple[int], np.dtype[np.bool]]: ... +@overload # 2d sequence, <=2d array-like +def isclose( + a: Sequence[Sequence[_NumberLike_co]], + b: Sequence[Sequence[_NumberLike_co]] | Sequence[_NumberLike_co] | _NumberLike_co | np.ndarray[tuple[int] | tuple[int, int]], + rtol: ArrayLike = 1e-5, + atol: ArrayLike = 1e-8, + equal_nan: py_bool = False, +) -> np.ndarray[tuple[int], np.dtype[np.bool]]: ... +@overload # <=2d array-like, 2d sequence +def isclose( + b: Sequence[Sequence[_NumberLike_co]] | Sequence[_NumberLike_co] | _NumberLike_co | np.ndarray[tuple[int] | tuple[int, int]], + a: Sequence[Sequence[_NumberLike_co]], + rtol: ArrayLike = 1e-5, + atol: ArrayLike = 1e-8, + equal_nan: py_bool = False, +) -> np.ndarray[tuple[int], np.dtype[np.bool]]: ... +@overload # unknown shape, unknown shape +def isclose( + a: ArrayLike, + b: ArrayLike, + rtol: ArrayLike = 1e-5, + atol: ArrayLike = 1e-8, + equal_nan: py_bool = False, +) -> NDArray[np.bool] | Any: ... + +# +def array_equal(a1: ArrayLike, a2: ArrayLike, equal_nan: py_bool = False) -> py_bool: ... +def array_equiv(a1: ArrayLike, a2: ArrayLike) -> py_bool: ... + +# +@overload +def astype( + x: ndarray[_ShapeT], + dtype: _DTypeLike[_ScalarT], + /, + *, + copy: py_bool = True, + device: L["cpu"] | None = None, +) -> ndarray[_ShapeT, dtype[_ScalarT]]: ... +@overload +def astype( + x: ndarray[_ShapeT], + dtype: DTypeLike | None, + /, + *, + copy: py_bool = True, + device: L["cpu"] | None = None, +) -> ndarray[_ShapeT]: ... diff --git a/local_packages/numpy/_core/numerictypes.py b/local_packages/numpy/_core/numerictypes.py new file mode 100644 index 0000000..265ad4f --- /dev/null +++ b/local_packages/numpy/_core/numerictypes.py @@ -0,0 +1,633 @@ +""" +numerictypes: Define the numeric type objects + +This module is designed so "from numerictypes import \\*" is safe. +Exported symbols include: + + Dictionary with all registered number types (including aliases): + sctypeDict + + Type objects (not all will be available, depends on platform): + see variable sctypes for which ones you have + + Bit-width names + + int8 int16 int32 int64 + uint8 uint16 uint32 uint64 + float16 float32 float64 float96 float128 + complex64 complex128 complex192 complex256 + datetime64 timedelta64 + + c-based names + + bool + + object_ + + void, str_ + + byte, ubyte, + short, ushort + intc, uintc, + intp, uintp, + int_, uint, + longlong, ulonglong, + + single, csingle, + double, cdouble, + longdouble, clongdouble, + + As part of the type-hierarchy: xx -- is bit-width + + generic + +-> bool (kind=b) + +-> number + | +-> integer + | | +-> signedinteger (intxx) (kind=i) + | | | byte + | | | short + | | | intc + | | | intp + | | | int_ + | | | longlong + | | \\-> unsignedinteger (uintxx) (kind=u) + | | ubyte + | | ushort + | | uintc + | | uintp + | | uint + | | ulonglong + | +-> inexact + | +-> floating (floatxx) (kind=f) + | | half + | | single + | | double + | | longdouble + | \\-> complexfloating (complexxx) (kind=c) + | csingle + | cdouble + | clongdouble + +-> flexible + | +-> character + | | bytes_ (kind=S) + | | str_ (kind=U) + | | + | \\-> void (kind=V) + \\-> object_ (not used much) (kind=O) + +""" +import numbers +import warnings + +from numpy._utils import set_module + +from . import multiarray as ma +from .multiarray import ( + busday_count, + busday_offset, + busdaycalendar, + datetime_as_string, + datetime_data, + dtype, + is_busday, + ndarray, +) + +# we add more at the bottom +__all__ = [ + 'ScalarType', 'typecodes', 'issubdtype', 'datetime_data', + 'datetime_as_string', 'busday_offset', 'busday_count', + 'is_busday', 'busdaycalendar', 'isdtype' +] + +# we don't need all these imports, but we need to keep them for compatibility +# for users using np._core.numerictypes.UPPER_TABLE +# we don't export these for import *, but we do want them accessible +# as numerictypes.bool, etc. +from builtins import bool, bytes, complex, float, int, object, str # noqa: F401, UP029 + +from ._dtype import _kind_name +from ._string_helpers import ( # noqa: F401 + LOWER_TABLE, + UPPER_TABLE, + english_capitalize, + english_lower, + english_upper, +) +from ._type_aliases import allTypes, sctypeDict, sctypes + +# We use this later +generic = allTypes['generic'] + +genericTypeRank = ['bool', 'int8', 'uint8', 'int16', 'uint16', + 'int32', 'uint32', 'int64', 'uint64', + 'float16', 'float32', 'float64', 'float96', 'float128', + 'complex64', 'complex128', 'complex192', 'complex256', + 'object'] + +@set_module('numpy') +def maximum_sctype(t): + """ + Return the scalar type of highest precision of the same kind as the input. + + .. deprecated:: 2.0 + Use an explicit dtype like int64 or float64 instead. + + Parameters + ---------- + t : dtype or dtype specifier + The input data type. This can be a `dtype` object or an object that + is convertible to a `dtype`. + + Returns + ------- + out : dtype + The highest precision data type of the same kind (`dtype.kind`) as `t`. + + See Also + -------- + obj2sctype, mintypecode, sctype2char + dtype + + Examples + -------- + >>> from numpy._core.numerictypes import maximum_sctype + >>> maximum_sctype(int) + + >>> maximum_sctype(np.uint8) + + >>> maximum_sctype(complex) + # may vary + + >>> maximum_sctype(str) + + + >>> maximum_sctype('i2') + + >>> maximum_sctype('f4') + # may vary + + """ + + # Deprecated in NumPy 2.0, 2023-07-11 + warnings.warn( + "`maximum_sctype` is deprecated. Use an explicit dtype like int64 " + "or float64 instead. (deprecated in NumPy 2.0)", + DeprecationWarning, + stacklevel=2 + ) + + g = obj2sctype(t) + if g is None: + return t + t = g + base = _kind_name(dtype(t)) + if base in sctypes: + return sctypes[base][-1] + else: + return t + + +@set_module('numpy') +def issctype(rep): + """ + Determines whether the given object represents a scalar data-type. + + Parameters + ---------- + rep : any + If `rep` is an instance of a scalar dtype, True is returned. If not, + False is returned. + + Returns + ------- + out : bool + Boolean result of check whether `rep` is a scalar dtype. + + See Also + -------- + issubsctype, issubdtype, obj2sctype, sctype2char + + Examples + -------- + >>> from numpy._core.numerictypes import issctype + >>> issctype(np.int32) + True + >>> issctype(list) + False + >>> issctype(1.1) + False + + Strings are also a scalar type: + + >>> issctype(np.dtype('str')) + True + + """ + if not isinstance(rep, (type, dtype)): + return False + try: + res = obj2sctype(rep) + if res and res != object_: + return True + else: + return False + except Exception: + return False + + +def obj2sctype(rep, default=None): + """ + Return the scalar dtype or NumPy equivalent of Python type of an object. + + Parameters + ---------- + rep : any + The object of which the type is returned. + default : any, optional + If given, this is returned for objects whose types can not be + determined. If not given, None is returned for those objects. + + Returns + ------- + dtype : dtype or Python type + The data type of `rep`. + + See Also + -------- + sctype2char, issctype, issubsctype, issubdtype + + Examples + -------- + >>> from numpy._core.numerictypes import obj2sctype + >>> obj2sctype(np.int32) + + >>> obj2sctype(np.array([1., 2.])) + + >>> obj2sctype(np.array([1.j])) + + + >>> obj2sctype(dict) + + >>> obj2sctype('string') + + >>> obj2sctype(1, default=list) + + + """ + # prevent abstract classes being upcast + if isinstance(rep, type) and issubclass(rep, generic): + return rep + # extract dtype from arrays + if isinstance(rep, ndarray): + return rep.dtype.type + # fall back on dtype to convert + try: + res = dtype(rep) + except Exception: + return default + else: + return res.type + + +@set_module('numpy') +def issubclass_(arg1, arg2): + """ + Determine if a class is a subclass of a second class. + + `issubclass_` is equivalent to the Python built-in ``issubclass``, + except that it returns False instead of raising a TypeError if one + of the arguments is not a class. + + Parameters + ---------- + arg1 : class + Input class. True is returned if `arg1` is a subclass of `arg2`. + arg2 : class or tuple of classes. + Input class. If a tuple of classes, True is returned if `arg1` is a + subclass of any of the tuple elements. + + Returns + ------- + out : bool + Whether `arg1` is a subclass of `arg2` or not. + + See Also + -------- + issubsctype, issubdtype, issctype + + Examples + -------- + >>> np.issubclass_(np.int32, int) + False + >>> np.issubclass_(np.int32, float) + False + >>> np.issubclass_(np.float64, float) + True + + """ + try: + return issubclass(arg1, arg2) + except TypeError: + return False + + +@set_module('numpy') +def issubsctype(arg1, arg2): + """ + Determine if the first argument is a subclass of the second argument. + + Parameters + ---------- + arg1, arg2 : dtype or dtype specifier + Data-types. + + Returns + ------- + out : bool + The result. + + See Also + -------- + issctype, issubdtype, obj2sctype + + Examples + -------- + >>> from numpy._core import issubsctype + >>> issubsctype('S8', str) + False + >>> issubsctype(np.array([1]), int) + True + >>> issubsctype(np.array([1]), float) + False + + """ + return issubclass(obj2sctype(arg1), obj2sctype(arg2)) + + +class _PreprocessDTypeError(Exception): + pass + + +def _preprocess_dtype(dtype): + """ + Preprocess dtype argument by: + 1. fetching type from a data type + 2. verifying that types are built-in NumPy dtypes + """ + if isinstance(dtype, ma.dtype): + dtype = dtype.type + if isinstance(dtype, ndarray) or dtype not in allTypes.values(): + raise _PreprocessDTypeError + return dtype + + +@set_module('numpy') +def isdtype(dtype, kind): + """ + Determine if a provided dtype is of a specified data type ``kind``. + + This function only supports built-in NumPy's data types. + Third-party dtypes are not yet supported. + + Parameters + ---------- + dtype : dtype + The input dtype. + kind : dtype or str or tuple of dtypes/strs. + dtype or dtype kind. Allowed dtype kinds are: + * ``'bool'`` : boolean kind + * ``'signed integer'`` : signed integer data types + * ``'unsigned integer'`` : unsigned integer data types + * ``'integral'`` : integer data types + * ``'real floating'`` : real-valued floating-point data types + * ``'complex floating'`` : complex floating-point data types + * ``'numeric'`` : numeric data types + + Returns + ------- + out : bool + + See Also + -------- + issubdtype + + Examples + -------- + >>> import numpy as np + >>> np.isdtype(np.float32, np.float64) + False + >>> np.isdtype(np.float32, "real floating") + True + >>> np.isdtype(np.complex128, ("real floating", "complex floating")) + True + + """ + try: + dtype = _preprocess_dtype(dtype) + except _PreprocessDTypeError: + raise TypeError( + "dtype argument must be a NumPy dtype, " + f"but it is a {type(dtype)}." + ) from None + + input_kinds = kind if isinstance(kind, tuple) else (kind,) + + processed_kinds = set() + + for kind in input_kinds: + if kind == "bool": + processed_kinds.add(allTypes["bool"]) + elif kind == "signed integer": + processed_kinds.update(sctypes["int"]) + elif kind == "unsigned integer": + processed_kinds.update(sctypes["uint"]) + elif kind == "integral": + processed_kinds.update(sctypes["int"] + sctypes["uint"]) + elif kind == "real floating": + processed_kinds.update(sctypes["float"]) + elif kind == "complex floating": + processed_kinds.update(sctypes["complex"]) + elif kind == "numeric": + processed_kinds.update( + sctypes["int"] + sctypes["uint"] + + sctypes["float"] + sctypes["complex"] + ) + elif isinstance(kind, str): + raise ValueError( + "kind argument is a string, but" + f" {kind!r} is not a known kind name." + ) + else: + try: + kind = _preprocess_dtype(kind) + except _PreprocessDTypeError: + raise TypeError( + "kind argument must be comprised of " + "NumPy dtypes or strings only, " + f"but is a {type(kind)}." + ) from None + processed_kinds.add(kind) + + return dtype in processed_kinds + + +@set_module('numpy') +def issubdtype(arg1, arg2): + r""" + Returns True if first argument is a typecode lower/equal in type hierarchy. + + This is like the builtin :func:`issubclass`, but for `dtype`\ s. + + Parameters + ---------- + arg1, arg2 : dtype_like + `dtype` or object coercible to one + + Returns + ------- + out : bool + + See Also + -------- + :ref:`arrays.scalars` : Overview of the numpy type hierarchy. + + Examples + -------- + `issubdtype` can be used to check the type of arrays: + + >>> ints = np.array([1, 2, 3], dtype=np.int32) + >>> np.issubdtype(ints.dtype, np.integer) + True + >>> np.issubdtype(ints.dtype, np.floating) + False + + >>> floats = np.array([1, 2, 3], dtype=np.float32) + >>> np.issubdtype(floats.dtype, np.integer) + False + >>> np.issubdtype(floats.dtype, np.floating) + True + + Similar types of different sizes are not subdtypes of each other: + + >>> np.issubdtype(np.float64, np.float32) + False + >>> np.issubdtype(np.float32, np.float64) + False + + but both are subtypes of `floating`: + + >>> np.issubdtype(np.float64, np.floating) + True + >>> np.issubdtype(np.float32, np.floating) + True + + For convenience, dtype-like objects are allowed too: + + >>> np.issubdtype('S1', np.bytes_) + True + >>> np.issubdtype('i4', np.signedinteger) + True + + """ + if not issubclass_(arg1, generic): + arg1 = dtype(arg1).type + if not issubclass_(arg2, generic): + arg2 = dtype(arg2).type + + return issubclass(arg1, arg2) + + +@set_module('numpy') +def sctype2char(sctype): + """ + Return the string representation of a scalar dtype. + + Parameters + ---------- + sctype : scalar dtype or object + If a scalar dtype, the corresponding string character is + returned. If an object, `sctype2char` tries to infer its scalar type + and then return the corresponding string character. + + Returns + ------- + typechar : str + The string character corresponding to the scalar type. + + Raises + ------ + ValueError + If `sctype` is an object for which the type can not be inferred. + + See Also + -------- + obj2sctype, issctype, issubsctype, mintypecode + + Examples + -------- + >>> from numpy._core.numerictypes import sctype2char + >>> for sctype in [np.int32, np.double, np.cdouble, np.bytes_, np.ndarray]: + ... print(sctype2char(sctype)) + l # may vary + d + D + S + O + + >>> x = np.array([1., 2-1.j]) + >>> sctype2char(x) + 'D' + >>> sctype2char(list) + 'O' + + """ + sctype = obj2sctype(sctype) + if sctype is None: + raise ValueError("unrecognized type") + if sctype not in sctypeDict.values(): + # for compatibility + raise KeyError(sctype) + return dtype(sctype).char + + +def _scalar_type_key(typ): + """A ``key`` function for `sorted`.""" + dt = dtype(typ) + return (dt.kind.lower(), dt.itemsize) + + +ScalarType = [int, float, complex, bool, bytes, str, memoryview] +ScalarType += sorted(dict.fromkeys(sctypeDict.values()), key=_scalar_type_key) +ScalarType = tuple(ScalarType) + + +# Now add the types we've determined to this module +for key in allTypes: + globals()[key] = allTypes[key] + __all__.append(key) + +del key + +typecodes = {'Character': 'c', + 'Integer': 'bhilqnp', + 'UnsignedInteger': 'BHILQNP', + 'Float': 'efdg', + 'Complex': 'FDG', + 'AllInteger': 'bBhHiIlLqQnNpP', + 'AllFloat': 'efdgFDG', + 'Datetime': 'Mm', + 'All': '?bhilqnpBHILQNPefdgFDGSUVOMm'} + +# backwards compatibility --- deprecated name +# Formal deprecation: Numpy 1.20.0, 2020-10-19 (see numpy/__init__.py) +typeDict = sctypeDict + +def _register_types(): + numbers.Integral.register(integer) + numbers.Complex.register(inexact) + numbers.Real.register(floating) + numbers.Number.register(number) + + +_register_types() diff --git a/local_packages/numpy/_core/numerictypes.pyi b/local_packages/numpy/_core/numerictypes.pyi new file mode 100644 index 0000000..46bb6a3 --- /dev/null +++ b/local_packages/numpy/_core/numerictypes.pyi @@ -0,0 +1,196 @@ +from builtins import bool as py_bool +from typing import Any, Final, Literal as L, TypedDict, type_check_only + +import numpy as np +from numpy import ( + bool, + bool_, + byte, + bytes_, + cdouble, + character, + clongdouble, + complex64, + complex128, + complex192, + complex256, + complexfloating, + csingle, + datetime64, + double, + dtype, + flexible, + float16, + float32, + float64, + float96, + float128, + floating, + generic, + half, + inexact, + int8, + int16, + int32, + int64, + int_, + intc, + integer, + intp, + long, + longdouble, + longlong, + number, + object_, + short, + signedinteger, + single, + str_, + timedelta64, + ubyte, + uint, + uint8, + uint16, + uint32, + uint64, + uintc, + uintp, + ulong, + ulonglong, + unsignedinteger, + ushort, + void, +) +from numpy._typing import DTypeLike + +from ._type_aliases import sctypeDict as sctypeDict +from .multiarray import ( + busday_count, + busday_offset, + busdaycalendar, + datetime_as_string, + datetime_data, + is_busday, +) + +__all__ = [ + "ScalarType", + "typecodes", + "issubdtype", + "datetime_data", + "datetime_as_string", + "busday_offset", + "busday_count", + "is_busday", + "busdaycalendar", + "isdtype", + "generic", + "unsignedinteger", + "character", + "inexact", + "number", + "integer", + "flexible", + "complexfloating", + "signedinteger", + "floating", + "bool", + "float16", + "float32", + "float64", + "longdouble", + "complex64", + "complex128", + "clongdouble", + "bytes_", + "str_", + "void", + "object_", + "datetime64", + "timedelta64", + "int8", + "byte", + "uint8", + "ubyte", + "int16", + "short", + "uint16", + "ushort", + "int32", + "intc", + "uint32", + "uintc", + "int64", + "long", + "uint64", + "ulong", + "longlong", + "ulonglong", + "intp", + "uintp", + "double", + "cdouble", + "single", + "csingle", + "half", + "bool_", + "int_", + "uint", + "float96", + "float128", + "complex192", + "complex256", +] + +@type_check_only +class _TypeCodes(TypedDict): + Character: L["c"] + Integer: L["bhilqnp"] + UnsignedInteger: L["BHILQNP"] + Float: L["efdg"] + Complex: L["FDG"] + AllInteger: L["bBhHiIlLqQnNpP"] + AllFloat: L["efdgFDG"] + Datetime: L["Mm"] + All: L["?bhilqnpBHILQNPefdgFDGSUVOMm"] + +def isdtype(dtype: dtype | type, kind: DTypeLike | tuple[DTypeLike, ...]) -> py_bool: ... +def issubdtype(arg1: DTypeLike | None, arg2: DTypeLike | None) -> py_bool: ... + +typecodes: Final[_TypeCodes] = ... +ScalarType: Final[ + tuple[ + type[int], + type[float], + type[complex], + type[py_bool], + type[bytes], + type[str], + type[memoryview[Any]], + type[np.bool], + type[complex64], + type[complex128], + type[complex128 | complex192 | complex256], + type[float16], + type[float32], + type[float64], + type[float64 | float96 | float128], + type[int8], + type[int16], + type[int32], + type[int32 | int64], + type[int64], + type[datetime64], + type[timedelta64], + type[object_], + type[bytes_], + type[str_], + type[uint8], + type[uint16], + type[uint32], + type[uint32 | uint64], + type[uint64], + type[void], + ] +] = ... +typeDict: Final = sctypeDict diff --git a/local_packages/numpy/_core/overrides.py b/local_packages/numpy/_core/overrides.py new file mode 100644 index 0000000..6d5e775 --- /dev/null +++ b/local_packages/numpy/_core/overrides.py @@ -0,0 +1,188 @@ +"""Implementation of __array_function__ overrides from NEP-18.""" +import collections +import functools +import inspect + +from numpy._core._multiarray_umath import ( + _ArrayFunctionDispatcher, + _get_implementing_args, + add_docstring, +) +from numpy._utils import set_module # noqa: F401 +from numpy._utils._inspect import getargspec + +ARRAY_FUNCTIONS = set() + +array_function_like_doc = ( + """like : array_like, optional + Reference object to allow the creation of arrays which are not + NumPy arrays. If an array-like passed in as ``like`` supports + the ``__array_function__`` protocol, the result will be defined + by it. In this case, it ensures the creation of an array object + compatible with that passed in via this argument.""" +) + +def get_array_function_like_doc(public_api, docstring_template=""): + ARRAY_FUNCTIONS.add(public_api) + docstring = public_api.__doc__ or docstring_template + return docstring.replace("${ARRAY_FUNCTION_LIKE}", array_function_like_doc) + +def finalize_array_function_like(public_api): + public_api.__doc__ = get_array_function_like_doc(public_api) + return public_api + + +add_docstring( + _ArrayFunctionDispatcher, + """ + Class to wrap functions with checks for __array_function__ overrides. + + All arguments are required, and can only be passed by position. + + Parameters + ---------- + dispatcher : function or None + The dispatcher function that returns a single sequence-like object + of all arguments relevant. It must have the same signature (except + the default values) as the actual implementation. + If ``None``, this is a ``like=`` dispatcher and the + ``_ArrayFunctionDispatcher`` must be called with ``like`` as the + first (additional and positional) argument. + implementation : function + Function that implements the operation on NumPy arrays without + overrides. Arguments passed calling the ``_ArrayFunctionDispatcher`` + will be forwarded to this (and the ``dispatcher``) as if using + ``*args, **kwargs``. + + Attributes + ---------- + _implementation : function + The original implementation passed in. + """) + + +# exposed for testing purposes; used internally by _ArrayFunctionDispatcher +add_docstring( + _get_implementing_args, + """ + Collect arguments on which to call __array_function__. + + Parameters + ---------- + relevant_args : iterable of array-like + Iterable of possibly array-like arguments to check for + __array_function__ methods. + + Returns + ------- + Sequence of arguments with __array_function__ methods, in the order in + which they should be called. + """) + + +ArgSpec = collections.namedtuple('ArgSpec', 'args varargs keywords defaults') + + +def verify_matching_signatures(implementation, dispatcher): + """Verify that a dispatcher function has the right signature.""" + implementation_spec = ArgSpec(*getargspec(implementation)) + dispatcher_spec = ArgSpec(*getargspec(dispatcher)) + + if (implementation_spec.args != dispatcher_spec.args or + implementation_spec.varargs != dispatcher_spec.varargs or + implementation_spec.keywords != dispatcher_spec.keywords or + (bool(implementation_spec.defaults) != + bool(dispatcher_spec.defaults)) or + (implementation_spec.defaults is not None and + len(implementation_spec.defaults) != + len(dispatcher_spec.defaults))): + raise RuntimeError('implementation and dispatcher for %s have ' + 'different function signatures' % implementation) + + if implementation_spec.defaults is not None: + if dispatcher_spec.defaults != (None,) * len(dispatcher_spec.defaults): + raise RuntimeError('dispatcher functions can only use None for ' + 'default argument values') + + +def array_function_dispatch(dispatcher=None, module=None, verify=True, + docs_from_dispatcher=False): + """Decorator for adding dispatch with the __array_function__ protocol. + + See NEP-18 for example usage. + + Parameters + ---------- + dispatcher : callable or None + Function that when called like ``dispatcher(*args, **kwargs)`` with + arguments from the NumPy function call returns an iterable of + array-like arguments to check for ``__array_function__``. + + If `None`, the first argument is used as the single `like=` argument + and not passed on. A function implementing `like=` must call its + dispatcher with `like` as the first non-keyword argument. + module : str, optional + __module__ attribute to set on new function, e.g., ``module='numpy'``. + By default, module is copied from the decorated function. + verify : bool, optional + If True, verify the that the signature of the dispatcher and decorated + function signatures match exactly: all required and optional arguments + should appear in order with the same names, but the default values for + all optional arguments should be ``None``. Only disable verification + if the dispatcher's signature needs to deviate for some particular + reason, e.g., because the function has a signature like + ``func(*args, **kwargs)``. + docs_from_dispatcher : bool, optional + If True, copy docs from the dispatcher function onto the dispatched + function, rather than from the implementation. This is useful for + functions defined in C, which otherwise don't have docstrings. + + Returns + ------- + Function suitable for decorating the implementation of a NumPy function. + + """ + def decorator(implementation): + if verify: + if dispatcher is not None: + verify_matching_signatures(implementation, dispatcher) + else: + # Using __code__ directly similar to verify_matching_signature + co = implementation.__code__ + last_arg = co.co_argcount + co.co_kwonlyargcount - 1 + last_arg = co.co_varnames[last_arg] + if last_arg != "like" or co.co_kwonlyargcount == 0: + raise RuntimeError( + "__array_function__ expects `like=` to be the last " + "argument and a keyword-only argument. " + f"{implementation} does not seem to comply.") + + if docs_from_dispatcher and dispatcher.__doc__ is not None: + doc = inspect.cleandoc(dispatcher.__doc__) + add_docstring(implementation, doc) + + public_api = _ArrayFunctionDispatcher(dispatcher, implementation) + functools.update_wrapper(public_api, implementation) + + if not verify and not getattr(implementation, "__text_signature__", None): + public_api.__signature__ = inspect.signature(dispatcher) + + if module is not None: + public_api.__module__ = module + + ARRAY_FUNCTIONS.add(public_api) + + return public_api + + return decorator + + +def array_function_from_dispatcher( + implementation, module=None, verify=True, docs_from_dispatcher=True): + """Like array_function_dispatcher, but with function arguments flipped.""" + + def decorator(dispatcher): + return array_function_dispatch( + dispatcher, module, verify=verify, + docs_from_dispatcher=docs_from_dispatcher)(implementation) + return decorator diff --git a/local_packages/numpy/_core/overrides.pyi b/local_packages/numpy/_core/overrides.pyi new file mode 100644 index 0000000..6ef5256 --- /dev/null +++ b/local_packages/numpy/_core/overrides.pyi @@ -0,0 +1,47 @@ +from collections.abc import Callable, Iterable +from typing import Any, Final, NamedTuple, ParamSpec, TypeAlias, TypeVar + +from numpy._utils import set_module as set_module + +_T = TypeVar("_T") +_Tss = ParamSpec("_Tss") +_FuncLikeT = TypeVar("_FuncLikeT", bound=type | Callable[..., object]) + +_Dispatcher: TypeAlias = Callable[_Tss, Iterable[object]] + +### + +ARRAY_FUNCTIONS: set[Callable[..., Any]] = ... +array_function_like_doc: Final[str] = ... + +class ArgSpec(NamedTuple): + args: list[str] + varargs: str | None + keywords: str | None + defaults: tuple[Any, ...] + +def get_array_function_like_doc(public_api: Callable[..., object], docstring_template: str = "") -> str: ... +def finalize_array_function_like(public_api: _FuncLikeT) -> _FuncLikeT: ... + +# +def verify_matching_signatures(implementation: Callable[_Tss, object], dispatcher: _Dispatcher[_Tss]) -> None: ... + +# NOTE: This actually returns a `_ArrayFunctionDispatcher` callable wrapper object, with +# the original wrapped callable stored in the `._implementation` attribute. It checks +# for any `__array_function__` of the values of specific arguments that the dispatcher +# specifies. Since the dispatcher only returns an iterable of passed array-like args, +# this overridable behaviour is impossible to annotate. +def array_function_dispatch( + dispatcher: _Dispatcher[_Tss] | None = None, + module: str | None = None, + verify: bool = True, + docs_from_dispatcher: bool = False, +) -> Callable[[_FuncLikeT], _FuncLikeT]: ... + +# +def array_function_from_dispatcher( + implementation: Callable[_Tss, _T], + module: str | None = None, + verify: bool = True, + docs_from_dispatcher: bool = True, +) -> Callable[[_Dispatcher[_Tss]], Callable[_Tss, _T]]: ... diff --git a/local_packages/numpy/_core/printoptions.py b/local_packages/numpy/_core/printoptions.py new file mode 100644 index 0000000..5d6f963 --- /dev/null +++ b/local_packages/numpy/_core/printoptions.py @@ -0,0 +1,32 @@ +""" +Stores and defines the low-level format_options context variable. + +This is defined in its own file outside of the arrayprint module +so we can import it from C while initializing the multiarray +C module during import without introducing circular dependencies. +""" + +import sys +from contextvars import ContextVar + +__all__ = ["format_options"] + +default_format_options_dict = { + "edgeitems": 3, # repr N leading and trailing items of each dimension + "threshold": 1000, # total items > triggers array summarization + "floatmode": "maxprec", + "precision": 8, # precision of floating point representations + "suppress": False, # suppress printing small floating values in exp format + "linewidth": 75, + "nanstr": "nan", + "infstr": "inf", + "sign": "-", + "formatter": None, + # Internally stored as an int to simplify comparisons; converted from/to + # str/False on the way in/out. + 'legacy': sys.maxsize, + 'override_repr': None, +} + +format_options = ContextVar( + "format_options", default=default_format_options_dict) diff --git a/local_packages/numpy/_core/printoptions.pyi b/local_packages/numpy/_core/printoptions.pyi new file mode 100644 index 0000000..bd7c7b4 --- /dev/null +++ b/local_packages/numpy/_core/printoptions.pyi @@ -0,0 +1,28 @@ +from collections.abc import Callable +from contextvars import ContextVar +from typing import Any, Final, TypedDict + +from .arrayprint import _FormatDict + +__all__ = ["format_options"] + +### + +class _FormatOptionsDict(TypedDict): + edgeitems: int + threshold: int + floatmode: str + precision: int + suppress: bool + linewidth: int + nanstr: str + infstr: str + sign: str + formatter: _FormatDict | None + legacy: int + override_repr: Callable[[Any], str] | None + +### + +default_format_options_dict: Final[_FormatOptionsDict] = ... +format_options: ContextVar[_FormatOptionsDict] diff --git a/local_packages/numpy/_core/records.py b/local_packages/numpy/_core/records.py new file mode 100644 index 0000000..9a6af16 --- /dev/null +++ b/local_packages/numpy/_core/records.py @@ -0,0 +1,1088 @@ +""" +This module contains a set of functions for record arrays. +""" +import os +import warnings +from collections import Counter +from contextlib import nullcontext + +from numpy._utils import set_module + +from . import numeric as sb, numerictypes as nt +from .arrayprint import _get_legacy_print_mode + +# All of the functions allow formats to be a dtype +__all__ = [ + 'record', 'recarray', 'format_parser', 'fromarrays', 'fromrecords', + 'fromstring', 'fromfile', 'array', 'find_duplicate', +] + + +ndarray = sb.ndarray + +_byteorderconv = {'b': '>', + 'l': '<', + 'n': '=', + 'B': '>', + 'L': '<', + 'N': '=', + 'S': 's', + 's': 's', + '>': '>', + '<': '<', + '=': '=', + '|': '|', + 'I': '|', + 'i': '|'} + +# formats regular expression +# allows multidimensional spec with a tuple syntax in front +# of the letter code '(2,3)f4' and ' ( 2 , 3 ) f4 ' +# are equally allowed + +numfmt = nt.sctypeDict + + +@set_module('numpy.rec') +def find_duplicate(list): + """Find duplication in a list, return a list of duplicated elements""" + return [ + item + for item, counts in Counter(list).items() + if counts > 1 + ] + + +@set_module('numpy.rec') +class format_parser: + """ + Class to convert formats, names, titles description to a dtype. + + After constructing the format_parser object, the dtype attribute is + the converted data-type: + ``dtype = format_parser(formats, names, titles).dtype`` + + Attributes + ---------- + dtype : dtype + The converted data-type. + + Parameters + ---------- + formats : str or list of str + The format description, either specified as a string with + comma-separated format descriptions in the form ``'f8, i4, S5'``, or + a list of format description strings in the form + ``['f8', 'i4', 'S5']``. + names : str or list/tuple of str + The field names, either specified as a comma-separated string in the + form ``'col1, col2, col3'``, or as a list or tuple of strings in the + form ``['col1', 'col2', 'col3']``. + An empty list can be used, in that case default field names + ('f0', 'f1', ...) are used. + titles : sequence + Sequence of title strings. An empty list can be used to leave titles + out. + aligned : bool, optional + If True, align the fields by padding as the C-compiler would. + Default is False. + byteorder : str, optional + If specified, all the fields will be changed to the + provided byte-order. Otherwise, the default byte-order is + used. For all available string specifiers, see `dtype.newbyteorder`. + + See Also + -------- + numpy.dtype, numpy.typename + + Examples + -------- + >>> import numpy as np + >>> np.rec.format_parser(['>> np.rec.format_parser(['f8', 'i4', 'a5'], ['col1', 'col2', 'col3'], + ... []).dtype + dtype([('col1', '>> np.rec.format_parser([' len(titles): + self._titles += [None] * (self._nfields - len(titles)) + + def _createdtype(self, byteorder): + dtype = sb.dtype({ + 'names': self._names, + 'formats': self._f_formats, + 'offsets': self._offsets, + 'titles': self._titles, + }) + if byteorder is not None: + byteorder = _byteorderconv[byteorder[0]] + dtype = dtype.newbyteorder(byteorder) + + self.dtype = dtype + + +class record(nt.void): + """A data-type scalar that allows field access as attribute lookup. + """ + + # manually set name and module so that this class's type shows up + # as numpy.record when printed + __name__ = 'record' + __module__ = 'numpy' + + def __repr__(self): + if _get_legacy_print_mode() <= 113: + return self.__str__() + return super().__repr__() + + def __str__(self): + if _get_legacy_print_mode() <= 113: + return str(self.item()) + return super().__str__() + + def __getattribute__(self, attr): + if attr in ('setfield', 'getfield', 'dtype'): + return nt.void.__getattribute__(self, attr) + try: + return nt.void.__getattribute__(self, attr) + except AttributeError: + pass + fielddict = nt.void.__getattribute__(self, 'dtype').fields + res = fielddict.get(attr, None) + if res: + obj = self.getfield(*res[:2]) + # if it has fields return a record, + # otherwise return the object + try: + dt = obj.dtype + except AttributeError: + # happens if field is Object type + return obj + if dt.names is not None: + return obj.view((self.__class__, obj.dtype)) + return obj + else: + raise AttributeError(f"'record' object has no attribute '{attr}'") + + def __setattr__(self, attr, val): + if attr in ('setfield', 'getfield', 'dtype'): + raise AttributeError(f"Cannot set '{attr}' attribute") + fielddict = nt.void.__getattribute__(self, 'dtype').fields + res = fielddict.get(attr, None) + if res: + return self.setfield(val, *res[:2]) + elif getattr(self, attr, None): + return nt.void.__setattr__(self, attr, val) + else: + raise AttributeError(f"'record' object has no attribute '{attr}'") + + def __getitem__(self, indx): + obj = nt.void.__getitem__(self, indx) + + # copy behavior of record.__getattribute__, + if isinstance(obj, nt.void) and obj.dtype.names is not None: + return obj.view((self.__class__, obj.dtype)) + else: + # return a single element + return obj + + def pprint(self): + """Pretty-print all fields.""" + # pretty-print all fields + names = self.dtype.names + maxlen = max(len(name) for name in names) + fmt = '%% %ds: %%s' % maxlen + rows = [fmt % (name, getattr(self, name)) for name in names] + return "\n".join(rows) + +# The recarray is almost identical to a standard array (which supports +# named fields already) The biggest difference is that it can use +# attribute-lookup to find the fields and it is constructed using +# a record. + +# If byteorder is given it forces a particular byteorder on all +# the fields (and any subfields) + + +@set_module("numpy.rec") +class recarray(ndarray): + """Construct an ndarray that allows field access using attributes. + + Arrays may have a data-types containing fields, analogous + to columns in a spread sheet. An example is ``[(x, int), (y, float)]``, + where each entry in the array is a pair of ``(int, float)``. Normally, + these attributes are accessed using dictionary lookups such as ``arr['x']`` + and ``arr['y']``. Record arrays allow the fields to be accessed as members + of the array, using ``arr.x`` and ``arr.y``. + + Parameters + ---------- + shape : tuple + Shape of output array. + dtype : data-type, optional + The desired data-type. By default, the data-type is determined + from `formats`, `names`, `titles`, `aligned` and `byteorder`. + formats : list of data-types, optional + A list containing the data-types for the different columns, e.g. + ``['i4', 'f8', 'i4']``. `formats` does *not* support the new + convention of using types directly, i.e. ``(int, float, int)``. + Note that `formats` must be a list, not a tuple. + Given that `formats` is somewhat limited, we recommend specifying + `dtype` instead. + names : tuple of str, optional + The name of each column, e.g. ``('x', 'y', 'z')``. + buf : buffer, optional + By default, a new array is created of the given shape and data-type. + If `buf` is specified and is an object exposing the buffer interface, + the array will use the memory from the existing buffer. In this case, + the `offset` and `strides` keywords are available. + + Other Parameters + ---------------- + titles : tuple of str, optional + Aliases for column names. For example, if `names` were + ``('x', 'y', 'z')`` and `titles` is + ``('x_coordinate', 'y_coordinate', 'z_coordinate')``, then + ``arr['x']`` is equivalent to both ``arr.x`` and ``arr.x_coordinate``. + byteorder : {'<', '>', '='}, optional + Byte-order for all fields. + aligned : bool, optional + Align the fields in memory as the C-compiler would. + strides : tuple of ints, optional + Buffer (`buf`) is interpreted according to these strides (strides + define how many bytes each array element, row, column, etc. + occupy in memory). + offset : int, optional + Start reading buffer (`buf`) from this offset onwards. + order : {'C', 'F'}, optional + Row-major (C-style) or column-major (Fortran-style) order. + + Returns + ------- + rec : recarray + Empty array of the given shape and type. + + See Also + -------- + numpy.rec.fromrecords : Construct a record array from data. + numpy.record : fundamental data-type for `recarray`. + numpy.rec.format_parser : determine data-type from formats, names, titles. + + Notes + ----- + This constructor can be compared to ``empty``: it creates a new record + array but does not fill it with data. To create a record array from data, + use one of the following methods: + + 1. Create a standard ndarray and convert it to a record array, + using ``arr.view(np.recarray)`` + 2. Use the `buf` keyword. + 3. Use `np.rec.fromrecords`. + + Examples + -------- + Create an array with two fields, ``x`` and ``y``: + + >>> import numpy as np + >>> x = np.array([(1.0, 2), (3.0, 4)], dtype=[('x', '>> x + array([(1., 2), (3., 4)], dtype=[('x', '>> x['x'] + array([1., 3.]) + + View the array as a record array: + + >>> x = x.view(np.recarray) + + >>> x.x + array([1., 3.]) + + >>> x.y + array([2, 4]) + + Create a new, empty record array: + + >>> np.recarray((2,), + ... dtype=[('x', int), ('y', float), ('z', int)]) #doctest: +SKIP + rec.array([(-1073741821, 1.2249118382103472e-301, 24547520), + (3471280, 1.2134086255804012e-316, 0)], + dtype=[('x', ' 0 or self.shape == (0,): + lst = sb.array2string( + self, separator=', ', prefix=prefix, suffix=',') + else: + # show zero-length shape unless it is (0,) + lst = f"[], shape={repr(self.shape)}" + + lf = '\n' + ' ' * len(prefix) + if _get_legacy_print_mode() <= 113: + lf = ' ' + lf # trailing space + return fmt % (lst, lf, repr_dtype) + + def field(self, attr, val=None): + if isinstance(attr, int): + names = ndarray.__getattribute__(self, 'dtype').names + attr = names[attr] + + fielddict = ndarray.__getattribute__(self, 'dtype').fields + + res = fielddict[attr][:2] + + if val is None: + obj = self.getfield(*res) + if obj.dtype.names is not None: + return obj + return obj.view(ndarray) + else: + return self.setfield(val, *res) + + +def _deprecate_shape_0_as_None(shape): + if shape == 0: + warnings.warn( + "Passing `shape=0` to have the shape be inferred is deprecated, " + "and in future will be equivalent to `shape=(0,)`. To infer " + "the shape and suppress this warning, pass `shape=None` instead.", + FutureWarning, stacklevel=3) + return None + else: + return shape + + +@set_module("numpy.rec") +def fromarrays(arrayList, dtype=None, shape=None, formats=None, + names=None, titles=None, aligned=False, byteorder=None): + """Create a record array from a (flat) list of arrays + + Parameters + ---------- + arrayList : list or tuple + List of array-like objects (such as lists, tuples, + and ndarrays). + dtype : data-type, optional + valid dtype for all arrays + shape : int or tuple of ints, optional + Shape of the resulting array. If not provided, inferred from + ``arrayList[0]``. + formats, names, titles, aligned, byteorder : + If `dtype` is ``None``, these arguments are passed to + `numpy.rec.format_parser` to construct a dtype. See that function for + detailed documentation. + + Returns + ------- + np.recarray + Record array consisting of given arrayList columns. + + Examples + -------- + >>> x1=np.array([1,2,3,4]) + >>> x2=np.array(['a','dd','xyz','12']) + >>> x3=np.array([1.1,2,3,4]) + >>> r = np.rec.fromarrays([x1,x2,x3],names='a,b,c') + >>> print(r[1]) + (2, 'dd', 2.0) # may vary + >>> x1[1]=34 + >>> r.a + array([1, 2, 3, 4]) + + >>> x1 = np.array([1, 2, 3, 4]) + >>> x2 = np.array(['a', 'dd', 'xyz', '12']) + >>> x3 = np.array([1.1, 2, 3,4]) + >>> r = np.rec.fromarrays( + ... [x1, x2, x3], + ... dtype=np.dtype([('a', np.int32), ('b', 'S3'), ('c', np.float32)])) + >>> r + rec.array([(1, b'a', 1.1), (2, b'dd', 2. ), (3, b'xyz', 3. ), + (4, b'12', 4. )], + dtype=[('a', ' 0: + shape = shape[:-nn] + + _array = recarray(shape, descr) + + # populate the record array (makes a copy) + for k, obj in enumerate(arrayList): + nn = descr[k].ndim + testshape = obj.shape[:obj.ndim - nn] + name = _names[k] + if testshape != shape: + raise ValueError(f'array-shape mismatch in array {k} ("{name}")') + + _array[name] = obj + + return _array + + +@set_module("numpy.rec") +def fromrecords(recList, dtype=None, shape=None, formats=None, names=None, + titles=None, aligned=False, byteorder=None): + """Create a recarray from a list of records in text form. + + Parameters + ---------- + recList : sequence + data in the same field may be heterogeneous - they will be promoted + to the highest data type. + dtype : data-type, optional + valid dtype for all arrays + shape : int or tuple of ints, optional + shape of each array. + formats, names, titles, aligned, byteorder : + If `dtype` is ``None``, these arguments are passed to + `numpy.format_parser` to construct a dtype. See that function for + detailed documentation. + + If both `formats` and `dtype` are None, then this will auto-detect + formats. Use list of tuples rather than list of lists for faster + processing. + + Returns + ------- + np.recarray + record array consisting of given recList rows. + + Examples + -------- + >>> r=np.rec.fromrecords([(456,'dbe',1.2),(2,'de',1.3)], + ... names='col1,col2,col3') + >>> print(r[0]) + (456, 'dbe', 1.2) + >>> r.col1 + array([456, 2]) + >>> r.col2 + array(['dbe', 'de'], dtype='>> import pickle + >>> pickle.loads(pickle.dumps(r)) + rec.array([(456, 'dbe', 1.2), ( 2, 'de', 1.3)], + dtype=[('col1', ' 1: + raise ValueError("Can only deal with 1-d array.") + _array = recarray(shape, descr) + for k in range(_array.size): + _array[k] = tuple(recList[k]) + # list of lists instead of list of tuples ? + # 2018-02-07, 1.14.1 + warnings.warn( + "fromrecords expected a list of tuples, may have received a list " + "of lists instead. In the future that will raise an error", + FutureWarning, stacklevel=2) + return _array + else: + if shape is not None and retval.shape != shape: + retval.shape = shape + + res = retval.view(recarray) + + return res + + +@set_module("numpy.rec") +def fromstring(datastring, dtype=None, shape=None, offset=0, formats=None, + names=None, titles=None, aligned=False, byteorder=None): + r"""Create a record array from binary data + + Note that despite the name of this function it does not accept `str` + instances. + + Parameters + ---------- + datastring : bytes-like + Buffer of binary data + dtype : data-type, optional + Valid dtype for all arrays + shape : int or tuple of ints, optional + Shape of each array. + offset : int, optional + Position in the buffer to start reading from. + formats, names, titles, aligned, byteorder : + If `dtype` is ``None``, these arguments are passed to + `numpy.format_parser` to construct a dtype. See that function for + detailed documentation. + + + Returns + ------- + np.recarray + Record array view into the data in datastring. This will be readonly + if `datastring` is readonly. + + See Also + -------- + numpy.frombuffer + + Examples + -------- + >>> a = b'\x01\x02\x03abc' + >>> np.rec.fromstring(a, dtype='u1,u1,u1,S3') + rec.array([(1, 2, 3, b'abc')], + dtype=[('f0', 'u1'), ('f1', 'u1'), ('f2', 'u1'), ('f3', 'S3')]) + + >>> grades_dtype = [('Name', (np.str_, 10)), ('Marks', np.float64), + ... ('GradeLevel', np.int32)] + >>> grades_array = np.array([('Sam', 33.3, 3), ('Mike', 44.4, 5), + ... ('Aadi', 66.6, 6)], dtype=grades_dtype) + >>> np.rec.fromstring(grades_array.tobytes(), dtype=grades_dtype) + rec.array([('Sam', 33.3, 3), ('Mike', 44.4, 5), ('Aadi', 66.6, 6)], + dtype=[('Name', '>> s = '\x01\x02\x03abc' + >>> np.rec.fromstring(s, dtype='u1,u1,u1,S3') + Traceback (most recent call last): + ... + TypeError: a bytes-like object is required, not 'str' + """ + + if dtype is None and formats is None: + raise TypeError("fromstring() needs a 'dtype' or 'formats' argument") + + if dtype is not None: + descr = sb.dtype(dtype) + else: + descr = format_parser(formats, names, titles, aligned, byteorder).dtype + + itemsize = descr.itemsize + + # NumPy 1.19.0, 2020-01-01 + shape = _deprecate_shape_0_as_None(shape) + + if shape in (None, -1): + shape = (len(datastring) - offset) // itemsize + + _array = recarray(shape, descr, buf=datastring, offset=offset) + return _array + +def get_remaining_size(fd): + pos = fd.tell() + try: + fd.seek(0, 2) + return fd.tell() - pos + finally: + fd.seek(pos, 0) + + +@set_module("numpy.rec") +def fromfile(fd, dtype=None, shape=None, offset=0, formats=None, + names=None, titles=None, aligned=False, byteorder=None): + """Create an array from binary file data + + Parameters + ---------- + fd : str or file type + If file is a string or a path-like object then that file is opened, + else it is assumed to be a file object. The file object must + support random access (i.e. it must have tell and seek methods). + dtype : data-type, optional + valid dtype for all arrays + shape : int or tuple of ints, optional + shape of each array. + offset : int, optional + Position in the file to start reading from. + formats, names, titles, aligned, byteorder : + If `dtype` is ``None``, these arguments are passed to + `numpy.format_parser` to construct a dtype. See that function for + detailed documentation + + Returns + ------- + np.recarray + record array consisting of data enclosed in file. + + Examples + -------- + >>> from tempfile import TemporaryFile + >>> a = np.empty(10,dtype='f8,i4,a5') + >>> a[5] = (0.5,10,'abcde') + >>> + >>> fd=TemporaryFile() + >>> a = a.view(a.dtype.newbyteorder('<')) + >>> a.tofile(fd) + >>> + >>> _ = fd.seek(0) + >>> r=np.rec.fromfile(fd, formats='f8,i4,a5', shape=10, + ... byteorder='<') + >>> print(r[5]) + (0.5, 10, b'abcde') + >>> r.shape + (10,) + """ + + if dtype is None and formats is None: + raise TypeError("fromfile() needs a 'dtype' or 'formats' argument") + + # NumPy 1.19.0, 2020-01-01 + shape = _deprecate_shape_0_as_None(shape) + + if shape is None: + shape = (-1,) + elif isinstance(shape, int): + shape = (shape,) + + if hasattr(fd, 'readinto'): + # GH issue 2504. fd supports io.RawIOBase or io.BufferedIOBase + # interface. Example of fd: gzip, BytesIO, BufferedReader + # file already opened + ctx = nullcontext(fd) + else: + # open file + ctx = open(os.fspath(fd), 'rb') + + with ctx as fd: + if offset > 0: + fd.seek(offset, 1) + size = get_remaining_size(fd) + + if dtype is not None: + descr = sb.dtype(dtype) + else: + descr = format_parser( + formats, names, titles, aligned, byteorder + ).dtype + + itemsize = descr.itemsize + + shapeprod = sb.array(shape).prod(dtype=nt.intp) + shapesize = shapeprod * itemsize + if shapesize < 0: + shape = list(shape) + shape[shape.index(-1)] = size // -shapesize + shape = tuple(shape) + shapeprod = sb.array(shape).prod(dtype=nt.intp) + + nbytes = shapeprod * itemsize + + if nbytes > size: + raise ValueError( + "Not enough bytes left in file for specified " + "shape and type." + ) + + # create the array + _array = recarray(shape, descr) + nbytesread = fd.readinto(_array.data) + if nbytesread != nbytes: + raise OSError("Didn't read as many bytes as expected") + + return _array + + +@set_module("numpy.rec") +def array(obj, dtype=None, shape=None, offset=0, strides=None, formats=None, + names=None, titles=None, aligned=False, byteorder=None, copy=True): + """ + Construct a record array from a wide-variety of objects. + + A general-purpose record array constructor that dispatches to the + appropriate `recarray` creation function based on the inputs (see Notes). + + Parameters + ---------- + obj : any + Input object. See Notes for details on how various input types are + treated. + dtype : data-type, optional + Valid dtype for array. + shape : int or tuple of ints, optional + Shape of each array. + offset : int, optional + Position in the file or buffer to start reading from. + strides : tuple of ints, optional + Buffer (`buf`) is interpreted according to these strides (strides + define how many bytes each array element, row, column, etc. + occupy in memory). + formats, names, titles, aligned, byteorder : + If `dtype` is ``None``, these arguments are passed to + `numpy.format_parser` to construct a dtype. See that function for + detailed documentation. + copy : bool, optional + Whether to copy the input object (True), or to use a reference instead. + This option only applies when the input is an ndarray or recarray. + Defaults to True. + + Returns + ------- + np.recarray + Record array created from the specified object. + + Notes + ----- + If `obj` is ``None``, then call the `~numpy.recarray` constructor. If + `obj` is a string, then call the `fromstring` constructor. If `obj` is a + list or a tuple, then if the first object is an `~numpy.ndarray`, call + `fromarrays`, otherwise call `fromrecords`. If `obj` is a + `~numpy.recarray`, then make a copy of the data in the recarray + (if ``copy=True``) and use the new formats, names, and titles. If `obj` + is a file, then call `fromfile`. Finally, if obj is an `ndarray`, then + return ``obj.view(recarray)``, making a copy of the data if ``copy=True``. + + Examples + -------- + >>> a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) + >>> a + array([[1, 2, 3], + [4, 5, 6], + [7, 8, 9]]) + + >>> np.rec.array(a) + rec.array([[1, 2, 3], + [4, 5, 6], + [7, 8, 9]], + dtype=int64) + + >>> b = [(1, 1), (2, 4), (3, 9)] + >>> c = np.rec.array(b, formats = ['i2', 'f2'], names = ('x', 'y')) + >>> c + rec.array([(1, 1.), (2, 4.), (3, 9.)], + dtype=[('x', '>> c.x + array([1, 2, 3], dtype=int16) + + >>> c.y + array([1., 4., 9.], dtype=float16) + + >>> r = np.rec.array(['abc','def'], names=['col1','col2']) + >>> print(r.col1) + abc + + >>> r.col1 + array('abc', dtype='>> r.col2 + array('def', dtype=' object: ... + def tell(self, /) -> int: ... + def readinto(self, buffer: memoryview, /) -> int: ... + +### + +# exported in `numpy.rec` +class record(np.void): # type: ignore[misc] + __name__: ClassVar[Literal["record"]] = "record" + __module__: Literal["numpy"] = "numpy" + + def pprint(self) -> str: ... + + def __getattribute__(self, attr: str, /) -> Any: ... + def __setattr__(self, attr: str, val: ArrayLike, /) -> None: ... + + @overload + def __getitem__(self, key: str | SupportsIndex, /) -> Incomplete: ... + @overload + def __getitem__(self, key: list[str], /) -> record: ... + +# exported in `numpy.rec` +class recarray(np.ndarray[_ShapeT_co, _DTypeT_co]): + __name__: ClassVar[Literal["recarray"]] = "recarray" + __module__: Literal["numpy.rec"] = "numpy.rec" + + @overload + def __new__( + subtype, + shape: _ShapeLike, + dtype: None = None, + buf: Buffer | None = None, + offset: SupportsIndex = 0, + strides: _ShapeLike | None = None, + *, + formats: DTypeLike | None, + names: str | Sequence[str] | None = None, + titles: str | Sequence[str] | None = None, + byteorder: _ByteOrder | None = None, + aligned: bool = False, + order: _OrderKACF = "C", + ) -> _RecArray[record]: ... + @overload + def __new__( + subtype, + shape: _ShapeLike, + dtype: DTypeLike | None, + buf: Buffer | None = None, + offset: SupportsIndex = 0, + strides: _ShapeLike | None = None, + formats: None = None, + names: None = None, + titles: None = None, + byteorder: None = None, + aligned: Literal[False] = False, + order: _OrderKACF = "C", + ) -> _RecArray[Incomplete]: ... + + def __getattribute__(self, attr: str, /) -> Any: ... + def __setattr__(self, attr: str, val: ArrayLike, /) -> None: ... + + def __array_finalize__(self, /, obj: object) -> None: ... + + # + @overload + def field(self, /, attr: int | str, val: ArrayLike) -> None: ... + @overload + def field(self, /, attr: int | str, val: None = None) -> Incomplete: ... + +# exported in `numpy.rec` +class format_parser: + dtype: np.dtype[np.void] + def __init__( + self, + /, + formats: DTypeLike | None, + names: str | Sequence[str] | None, + titles: str | Sequence[str] | None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, + ) -> None: ... + +# exported in `numpy.rec` +@overload +def fromarrays( + arrayList: Iterable[ArrayLike], + dtype: DTypeLike | None = None, + shape: _ShapeLike | None = None, + formats: None = None, + names: None = None, + titles: None = None, + aligned: bool = False, + byteorder: None = None, +) -> _RecArray[Any]: ... +@overload +def fromarrays( + arrayList: Iterable[ArrayLike], + dtype: None = None, + shape: _ShapeLike | None = None, + *, + formats: DTypeLike | None, + names: str | Sequence[str] | None = None, + titles: str | Sequence[str] | None = None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, +) -> _RecArray[record]: ... + +@overload +def fromrecords( + recList: _ArrayLikeVoid_co | tuple[object, ...] | _NestedSequence[tuple[object, ...]], + dtype: DTypeLike | None = None, + shape: _ShapeLike | None = None, + formats: None = None, + names: None = None, + titles: None = None, + aligned: bool = False, + byteorder: None = None, +) -> _RecArray[record]: ... +@overload +def fromrecords( + recList: _ArrayLikeVoid_co | tuple[object, ...] | _NestedSequence[tuple[object, ...]], + dtype: None = None, + shape: _ShapeLike | None = None, + *, + formats: DTypeLike | None, + names: str | Sequence[str] | None = None, + titles: str | Sequence[str] | None = None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, +) -> _RecArray[record]: ... + +# exported in `numpy.rec` +@overload +def fromstring( + datastring: Buffer, + dtype: DTypeLike | None, + shape: _ShapeLike | None = None, + offset: int = 0, + formats: None = None, + names: None = None, + titles: None = None, + aligned: bool = False, + byteorder: None = None, +) -> _RecArray[record]: ... +@overload +def fromstring( + datastring: Buffer, + dtype: None = None, + shape: _ShapeLike | None = None, + offset: int = 0, + *, + formats: DTypeLike | None, + names: str | Sequence[str] | None = None, + titles: str | Sequence[str] | None = None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, +) -> _RecArray[record]: ... + +# exported in `numpy.rec` +@overload +def fromfile( + fd: StrOrBytesPath | _SupportsReadInto, + dtype: DTypeLike | None, + shape: _ShapeLike | None = None, + offset: int = 0, + formats: None = None, + names: None = None, + titles: None = None, + aligned: bool = False, + byteorder: None = None, +) -> _RecArray[Any]: ... +@overload +def fromfile( + fd: StrOrBytesPath | _SupportsReadInto, + dtype: None = None, + shape: _ShapeLike | None = None, + offset: int = 0, + *, + formats: DTypeLike | None, + names: str | Sequence[str] | None = None, + titles: str | Sequence[str] | None = None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, +) -> _RecArray[record]: ... + +# exported in `numpy.rec` +@overload +def array( + obj: _ScalarT | NDArray[_ScalarT], + dtype: None = None, + shape: _ShapeLike | None = None, + offset: int = 0, + strides: tuple[int, ...] | None = None, + formats: None = None, + names: None = None, + titles: None = None, + aligned: bool = False, + byteorder: None = None, + copy: bool = True, +) -> _RecArray[_ScalarT]: ... +@overload +def array( + obj: ArrayLike, + dtype: DTypeLike | None, + shape: _ShapeLike | None = None, + offset: int = 0, + strides: tuple[int, ...] | None = None, + formats: None = None, + names: None = None, + titles: None = None, + aligned: bool = False, + byteorder: None = None, + copy: bool = True, +) -> _RecArray[Any]: ... +@overload +def array( + obj: ArrayLike, + dtype: None = None, + shape: _ShapeLike | None = None, + offset: int = 0, + strides: tuple[int, ...] | None = None, + *, + formats: DTypeLike | None, + names: str | Sequence[str] | None = None, + titles: str | Sequence[str] | None = None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, + copy: bool = True, +) -> _RecArray[record]: ... +@overload +def array( + obj: None, + dtype: DTypeLike | None, + shape: _ShapeLike, + offset: int = 0, + strides: tuple[int, ...] | None = None, + formats: None = None, + names: None = None, + titles: None = None, + aligned: bool = False, + byteorder: None = None, + copy: bool = True, +) -> _RecArray[Any]: ... +@overload +def array( + obj: None, + dtype: None = None, + *, + shape: _ShapeLike, + offset: int = 0, + strides: tuple[int, ...] | None = None, + formats: DTypeLike | None, + names: str | Sequence[str] | None = None, + titles: str | Sequence[str] | None = None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, + copy: bool = True, +) -> _RecArray[record]: ... +@overload +def array( + obj: _SupportsReadInto, + dtype: DTypeLike | None, + shape: _ShapeLike | None = None, + offset: int = 0, + strides: tuple[int, ...] | None = None, + formats: None = None, + names: None = None, + titles: None = None, + aligned: bool = False, + byteorder: None = None, + copy: bool = True, +) -> _RecArray[Any]: ... +@overload +def array( + obj: _SupportsReadInto, + dtype: None = None, + shape: _ShapeLike | None = None, + offset: int = 0, + strides: tuple[int, ...] | None = None, + *, + formats: DTypeLike | None, + names: str | Sequence[str] | None = None, + titles: str | Sequence[str] | None = None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, + copy: bool = True, +) -> _RecArray[record]: ... + +# exported in `numpy.rec` +def find_duplicate(list: Iterable[_T]) -> list[_T]: ... diff --git a/local_packages/numpy/_core/shape_base.py b/local_packages/numpy/_core/shape_base.py new file mode 100644 index 0000000..39de873 --- /dev/null +++ b/local_packages/numpy/_core/shape_base.py @@ -0,0 +1,996 @@ +__all__ = ['atleast_1d', 'atleast_2d', 'atleast_3d', 'block', 'hstack', + 'stack', 'unstack', 'vstack'] + +import functools +import itertools +import operator + +from . import fromnumeric as _from_nx, numeric as _nx, overrides +from .multiarray import array, asanyarray, normalize_axis_index + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + + +def _atleast_1d_dispatcher(*arys): + return arys + + +@array_function_dispatch(_atleast_1d_dispatcher) +def atleast_1d(*arys): + """ + Convert inputs to arrays with at least one dimension. + + Scalar inputs are converted to 1-dimensional arrays, whilst + higher-dimensional inputs are preserved. + + Parameters + ---------- + arys1, arys2, ... : array_like + One or more input arrays. + + Returns + ------- + ret : ndarray + An array, or tuple of arrays, each with ``a.ndim >= 1``. + Copies are made only if necessary. + + See Also + -------- + atleast_2d, atleast_3d + + Examples + -------- + >>> import numpy as np + >>> np.atleast_1d(1.0) + array([1.]) + + >>> x = np.arange(9.0).reshape(3,3) + >>> np.atleast_1d(x) + array([[0., 1., 2.], + [3., 4., 5.], + [6., 7., 8.]]) + >>> np.atleast_1d(x) is x + True + + >>> np.atleast_1d(1, [3, 4]) + (array([1]), array([3, 4])) + + """ + if len(arys) == 1: + result = asanyarray(arys[0]) + if result.ndim == 0: + result = result.reshape(1) + return result + res = [] + for ary in arys: + result = asanyarray(ary) + if result.ndim == 0: + result = result.reshape(1) + res.append(result) + return tuple(res) + + +def _atleast_2d_dispatcher(*arys): + return arys + + +@array_function_dispatch(_atleast_2d_dispatcher) +def atleast_2d(*arys): + """ + View inputs as arrays with at least two dimensions. + + Parameters + ---------- + arys1, arys2, ... : array_like + One or more array-like sequences. Non-array inputs are converted + to arrays. Arrays that already have two or more dimensions are + preserved. + + Returns + ------- + res, res2, ... : ndarray + An array, or tuple of arrays, each with ``a.ndim >= 2``. + Copies are avoided where possible, and views with two or more + dimensions are returned. + + See Also + -------- + atleast_1d, atleast_3d + + Examples + -------- + >>> import numpy as np + >>> np.atleast_2d(3.0) + array([[3.]]) + + >>> x = np.arange(3.0) + >>> np.atleast_2d(x) + array([[0., 1., 2.]]) + >>> np.atleast_2d(x).base is x + True + + >>> np.atleast_2d(1, [1, 2], [[1, 2]]) + (array([[1]]), array([[1, 2]]), array([[1, 2]])) + + """ + res = [] + for ary in arys: + ary = asanyarray(ary) + if ary.ndim == 0: + result = ary.reshape(1, 1) + elif ary.ndim == 1: + result = ary[_nx.newaxis, :] + else: + result = ary + res.append(result) + if len(res) == 1: + return res[0] + else: + return tuple(res) + + +def _atleast_3d_dispatcher(*arys): + return arys + + +@array_function_dispatch(_atleast_3d_dispatcher) +def atleast_3d(*arys): + """ + View inputs as arrays with at least three dimensions. + + Parameters + ---------- + arys1, arys2, ... : array_like + One or more array-like sequences. Non-array inputs are converted to + arrays. Arrays that already have three or more dimensions are + preserved. + + Returns + ------- + res1, res2, ... : ndarray + An array, or tuple of arrays, each with ``a.ndim >= 3``. Copies are + avoided where possible, and views with three or more dimensions are + returned. For example, a 1-D array of shape ``(N,)`` becomes a view + of shape ``(1, N, 1)``, and a 2-D array of shape ``(M, N)`` becomes a + view of shape ``(M, N, 1)``. + + See Also + -------- + atleast_1d, atleast_2d + + Examples + -------- + >>> import numpy as np + >>> np.atleast_3d(3.0) + array([[[3.]]]) + + >>> x = np.arange(3.0) + >>> np.atleast_3d(x).shape + (1, 3, 1) + + >>> x = np.arange(12.0).reshape(4,3) + >>> np.atleast_3d(x).shape + (4, 3, 1) + >>> np.atleast_3d(x).base is x.base # x is a reshape, so not base itself + True + + >>> for arr in np.atleast_3d([1, 2], [[1, 2]], [[[1, 2]]]): + ... print(arr, arr.shape) # doctest: +SKIP + ... + [[[1] + [2]]] (1, 2, 1) + [[[1] + [2]]] (1, 2, 1) + [[[1 2]]] (1, 1, 2) + + """ + res = [] + for ary in arys: + ary = asanyarray(ary) + if ary.ndim == 0: + result = ary.reshape(1, 1, 1) + elif ary.ndim == 1: + result = ary[_nx.newaxis, :, _nx.newaxis] + elif ary.ndim == 2: + result = ary[:, :, _nx.newaxis] + else: + result = ary + res.append(result) + if len(res) == 1: + return res[0] + else: + return tuple(res) + + +def _arrays_for_stack_dispatcher(arrays): + if not hasattr(arrays, "__getitem__"): + raise TypeError('arrays to stack must be passed as a "sequence" type ' + 'such as list or tuple.') + + return tuple(arrays) + + +def _vhstack_dispatcher(tup, *, dtype=None, casting=None): + return _arrays_for_stack_dispatcher(tup) + + +@array_function_dispatch(_vhstack_dispatcher) +def vstack(tup, *, dtype=None, casting="same_kind"): + """ + Stack arrays in sequence vertically (row wise). + + This is equivalent to concatenation along the first axis after 1-D arrays + of shape `(N,)` have been reshaped to `(1,N)`. Rebuilds arrays divided by + `vsplit`. + + This function makes most sense for arrays with up to 3 dimensions. For + instance, for pixel-data with a height (first axis), width (second axis), + and r/g/b channels (third axis). The functions `concatenate`, `stack` and + `block` provide more general stacking and concatenation operations. + + Parameters + ---------- + tup : sequence of ndarrays + The arrays must have the same shape along all but the first axis. + 1-D arrays must have the same length. In the case of a single + array_like input, it will be treated as a sequence of arrays; i.e., + each element along the zeroth axis is treated as a separate array. + + dtype : str or dtype + If provided, the destination array will have this dtype. Cannot be + provided together with `out`. + + .. versionadded:: 1.24 + + casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + Controls what kind of data casting may occur. Defaults to 'same_kind'. + + .. versionadded:: 1.24 + + Returns + ------- + stacked : ndarray + The array formed by stacking the given arrays, will be at least 2-D. + + See Also + -------- + concatenate : Join a sequence of arrays along an existing axis. + stack : Join a sequence of arrays along a new axis. + block : Assemble an nd-array from nested lists of blocks. + hstack : Stack arrays in sequence horizontally (column wise). + dstack : Stack arrays in sequence depth wise (along third axis). + column_stack : Stack 1-D arrays as columns into a 2-D array. + vsplit : Split an array into multiple sub-arrays vertically (row-wise). + unstack : Split an array into a tuple of sub-arrays along an axis. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([1, 2, 3]) + >>> b = np.array([4, 5, 6]) + >>> np.vstack((a,b)) + array([[1, 2, 3], + [4, 5, 6]]) + + >>> a = np.array([[1], [2], [3]]) + >>> b = np.array([[4], [5], [6]]) + >>> np.vstack((a,b)) + array([[1], + [2], + [3], + [4], + [5], + [6]]) + + """ + arrs = atleast_2d(*tup) + if not isinstance(arrs, tuple): + arrs = (arrs,) + return _nx.concatenate(arrs, 0, dtype=dtype, casting=casting) + + +@array_function_dispatch(_vhstack_dispatcher) +def hstack(tup, *, dtype=None, casting="same_kind"): + """ + Stack arrays in sequence horizontally (column wise). + + This is equivalent to concatenation along the second axis, except for 1-D + arrays where it concatenates along the first axis. Rebuilds arrays divided + by `hsplit`. + + This function makes most sense for arrays with up to 3 dimensions. For + instance, for pixel-data with a height (first axis), width (second axis), + and r/g/b channels (third axis). The functions `concatenate`, `stack` and + `block` provide more general stacking and concatenation operations. + + Parameters + ---------- + tup : sequence of ndarrays + The arrays must have the same shape along all but the second axis, + except 1-D arrays which can be any length. In the case of a single + array_like input, it will be treated as a sequence of arrays; i.e., + each element along the zeroth axis is treated as a separate array. + + dtype : str or dtype + If provided, the destination array will have this dtype. Cannot be + provided together with `out`. + + .. versionadded:: 1.24 + + casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + Controls what kind of data casting may occur. Defaults to 'same_kind'. + + .. versionadded:: 1.24 + + Returns + ------- + stacked : ndarray + The array formed by stacking the given arrays. + + See Also + -------- + concatenate : Join a sequence of arrays along an existing axis. + stack : Join a sequence of arrays along a new axis. + block : Assemble an nd-array from nested lists of blocks. + vstack : Stack arrays in sequence vertically (row wise). + dstack : Stack arrays in sequence depth wise (along third axis). + column_stack : Stack 1-D arrays as columns into a 2-D array. + hsplit : Split an array into multiple sub-arrays + horizontally (column-wise). + unstack : Split an array into a tuple of sub-arrays along an axis. + + Examples + -------- + >>> import numpy as np + >>> a = np.array((1,2,3)) + >>> b = np.array((4,5,6)) + >>> np.hstack((a,b)) + array([1, 2, 3, 4, 5, 6]) + >>> a = np.array([[1],[2],[3]]) + >>> b = np.array([[4],[5],[6]]) + >>> np.hstack((a,b)) + array([[1, 4], + [2, 5], + [3, 6]]) + + """ + arrs = atleast_1d(*tup) + if not isinstance(arrs, tuple): + arrs = (arrs,) + # As a special case, dimension 0 of 1-dimensional arrays is "horizontal" + if arrs and arrs[0].ndim == 1: + return _nx.concatenate(arrs, 0, dtype=dtype, casting=casting) + else: + return _nx.concatenate(arrs, 1, dtype=dtype, casting=casting) + + +def _stack_dispatcher(arrays, axis=None, out=None, *, + dtype=None, casting=None): + arrays = _arrays_for_stack_dispatcher(arrays) + if out is not None: + # optimize for the typical case where only arrays is provided + arrays = list(arrays) + arrays.append(out) + return arrays + + +@array_function_dispatch(_stack_dispatcher) +def stack(arrays, axis=0, out=None, *, dtype=None, casting="same_kind"): + """ + Join a sequence of arrays along a new axis. + + The ``axis`` parameter specifies the index of the new axis in the + dimensions of the result. For example, if ``axis=0`` it will be the first + dimension and if ``axis=-1`` it will be the last dimension. + + Parameters + ---------- + arrays : sequence of ndarrays + Each array must have the same shape. In the case of a single ndarray + array_like input, it will be treated as a sequence of arrays; i.e., + each element along the zeroth axis is treated as a separate array. + + axis : int, optional + The axis in the result array along which the input arrays are stacked. + + out : ndarray, optional + If provided, the destination to place the result. The shape must be + correct, matching that of what stack would have returned if no + out argument were specified. + + dtype : str or dtype + If provided, the destination array will have this dtype. Cannot be + provided together with `out`. + + .. versionadded:: 1.24 + + casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + Controls what kind of data casting may occur. Defaults to 'same_kind'. + + .. versionadded:: 1.24 + + + Returns + ------- + stacked : ndarray + The stacked array has one more dimension than the input arrays. + + See Also + -------- + concatenate : Join a sequence of arrays along an existing axis. + block : Assemble an nd-array from nested lists of blocks. + split : Split array into a list of multiple sub-arrays of equal size. + unstack : Split an array into a tuple of sub-arrays along an axis. + + Examples + -------- + >>> import numpy as np + >>> rng = np.random.default_rng() + >>> arrays = [rng.normal(size=(3,4)) for _ in range(10)] + >>> np.stack(arrays, axis=0).shape + (10, 3, 4) + + >>> np.stack(arrays, axis=1).shape + (3, 10, 4) + + >>> np.stack(arrays, axis=2).shape + (3, 4, 10) + + >>> a = np.array([1, 2, 3]) + >>> b = np.array([4, 5, 6]) + >>> np.stack((a, b)) + array([[1, 2, 3], + [4, 5, 6]]) + + >>> np.stack((a, b), axis=-1) + array([[1, 4], + [2, 5], + [3, 6]]) + + """ + arrays = [asanyarray(arr) for arr in arrays] + if not arrays: + raise ValueError('need at least one array to stack') + + shapes = {arr.shape for arr in arrays} + if len(shapes) != 1: + raise ValueError('all input arrays must have the same shape') + + result_ndim = arrays[0].ndim + 1 + axis = normalize_axis_index(axis, result_ndim) + + sl = (slice(None),) * axis + (_nx.newaxis,) + expanded_arrays = [arr[sl] for arr in arrays] + return _nx.concatenate(expanded_arrays, axis=axis, out=out, + dtype=dtype, casting=casting) + +def _unstack_dispatcher(x, /, *, axis=None): + return (x,) + +@array_function_dispatch(_unstack_dispatcher) +def unstack(x, /, *, axis=0): + """ + Split an array into a sequence of arrays along the given axis. + + The ``axis`` parameter specifies the dimension along which the array will + be split. For example, if ``axis=0`` (the default) it will be the first + dimension and if ``axis=-1`` it will be the last dimension. + + The result is a tuple of arrays split along ``axis``. + + .. versionadded:: 2.1.0 + + Parameters + ---------- + x : ndarray + The array to be unstacked. + axis : int, optional + Axis along which the array will be split. Default: ``0``. + + Returns + ------- + unstacked : tuple of ndarrays + The unstacked arrays. + + See Also + -------- + stack : Join a sequence of arrays along a new axis. + concatenate : Join a sequence of arrays along an existing axis. + block : Assemble an nd-array from nested lists of blocks. + split : Split array into a list of multiple sub-arrays of equal size. + + Notes + ----- + ``unstack`` serves as the reverse operation of :py:func:`stack`, i.e., + ``stack(unstack(x, axis=axis), axis=axis) == x``. + + This function is equivalent to ``tuple(np.moveaxis(x, axis, 0))``, since + iterating on an array iterates along the first axis. + + Examples + -------- + >>> arr = np.arange(24).reshape((2, 3, 4)) + >>> np.unstack(arr) + (array([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]), + array([[12, 13, 14, 15], + [16, 17, 18, 19], + [20, 21, 22, 23]])) + >>> np.unstack(arr, axis=1) + (array([[ 0, 1, 2, 3], + [12, 13, 14, 15]]), + array([[ 4, 5, 6, 7], + [16, 17, 18, 19]]), + array([[ 8, 9, 10, 11], + [20, 21, 22, 23]])) + >>> arr2 = np.stack(np.unstack(arr, axis=1), axis=1) + >>> arr2.shape + (2, 3, 4) + >>> np.all(arr == arr2) + np.True_ + + """ + if x.ndim == 0: + raise ValueError("Input array must be at least 1-d.") + return tuple(_nx.moveaxis(x, axis, 0)) + + +# Internal functions to eliminate the overhead of repeated dispatch in one of +# the two possible paths inside np.block. +# Use getattr to protect against __array_function__ being disabled. +_size = getattr(_from_nx.size, '__wrapped__', _from_nx.size) +_ndim = getattr(_from_nx.ndim, '__wrapped__', _from_nx.ndim) +_concatenate = getattr(_from_nx.concatenate, + '__wrapped__', _from_nx.concatenate) + + +def _block_format_index(index): + """ + Convert a list of indices ``[0, 1, 2]`` into ``"arrays[0][1][2]"``. + """ + idx_str = ''.join(f'[{i}]' for i in index if i is not None) + return 'arrays' + idx_str + + +def _block_check_depths_match(arrays, parent_index=[]): + """ + Recursive function checking that the depths of nested lists in `arrays` + all match. Mismatch raises a ValueError as described in the block + docstring below. + + The entire index (rather than just the depth) needs to be calculated + for each innermost list, in case an error needs to be raised, so that + the index of the offending list can be printed as part of the error. + + Parameters + ---------- + arrays : nested list of arrays + The arrays to check + parent_index : list of int + The full index of `arrays` within the nested lists passed to + `_block_check_depths_match` at the top of the recursion. + + Returns + ------- + first_index : list of int + The full index of an element from the bottom of the nesting in + `arrays`. If any element at the bottom is an empty list, this will + refer to it, and the last index along the empty axis will be None. + max_arr_ndim : int + The maximum of the ndims of the arrays nested in `arrays`. + final_size: int + The number of elements in the final array. This is used the motivate + the choice of algorithm used using benchmarking wisdom. + + """ + if isinstance(arrays, tuple): + # not strictly necessary, but saves us from: + # - more than one way to do things - no point treating tuples like + # lists + # - horribly confusing behaviour that results when tuples are + # treated like ndarray + raise TypeError( + f'{_block_format_index(parent_index)} is a tuple. ' + 'Only lists can be used to arrange blocks, and np.block does ' + 'not allow implicit conversion from tuple to ndarray.' + ) + elif isinstance(arrays, list) and len(arrays) > 0: + idxs_ndims = (_block_check_depths_match(arr, parent_index + [i]) + for i, arr in enumerate(arrays)) + + first_index, max_arr_ndim, final_size = next(idxs_ndims) + for index, ndim, size in idxs_ndims: + final_size += size + if ndim > max_arr_ndim: + max_arr_ndim = ndim + if len(index) != len(first_index): + raise ValueError( + "List depths are mismatched. First element was at " + f"depth {len(first_index)}, but there is an element at " + f"depth {len(index)} ({_block_format_index(index)})" + ) + # propagate our flag that indicates an empty list at the bottom + if index[-1] is None: + first_index = index + + return first_index, max_arr_ndim, final_size + elif isinstance(arrays, list) and len(arrays) == 0: + # We've 'bottomed out' on an empty list + return parent_index + [None], 0, 0 + else: + # We've 'bottomed out' - arrays is either a scalar or an array + size = _size(arrays) + return parent_index, _ndim(arrays), size + + +def _atleast_nd(a, ndim): + # Ensures `a` has at least `ndim` dimensions by prepending + # ones to `a.shape` as necessary + return array(a, ndmin=ndim, copy=None, subok=True) + + +def _accumulate(values): + return list(itertools.accumulate(values)) + + +def _concatenate_shapes(shapes, axis): + """Given array shapes, return the resulting shape and slices prefixes. + + These help in nested concatenation. + + Returns + ------- + shape: tuple of int + This tuple satisfies:: + + shape, _ = _concatenate_shapes([arr.shape for shape in arrs], axis) + shape == concatenate(arrs, axis).shape + + slice_prefixes: tuple of (slice(start, end), ) + For a list of arrays being concatenated, this returns the slice + in the larger array at axis that needs to be sliced into. + + For example, the following holds:: + + ret = concatenate([a, b, c], axis) + _, (sl_a, sl_b, sl_c) = concatenate_slices([a, b, c], axis) + + ret[(slice(None),) * axis + sl_a] == a + ret[(slice(None),) * axis + sl_b] == b + ret[(slice(None),) * axis + sl_c] == c + + These are called slice prefixes since they are used in the recursive + blocking algorithm to compute the left-most slices during the + recursion. Therefore, they must be prepended to rest of the slice + that was computed deeper in the recursion. + + These are returned as tuples to ensure that they can quickly be added + to existing slice tuple without creating a new tuple every time. + + """ + # Cache a result that will be reused. + shape_at_axis = [shape[axis] for shape in shapes] + + # Take a shape, any shape + first_shape = shapes[0] + first_shape_pre = first_shape[:axis] + first_shape_post = first_shape[axis + 1:] + + if any(shape[:axis] != first_shape_pre or + shape[axis + 1:] != first_shape_post for shape in shapes): + raise ValueError( + f'Mismatched array shapes in block along axis {axis}.') + + shape = (first_shape_pre + (sum(shape_at_axis),) + first_shape[axis + 1:]) + + offsets_at_axis = _accumulate(shape_at_axis) + slice_prefixes = [(slice(start, end),) + for start, end in zip([0] + offsets_at_axis, + offsets_at_axis)] + return shape, slice_prefixes + + +def _block_info_recursion(arrays, max_depth, result_ndim, depth=0): + """ + Returns the shape of the final array, along with a list + of slices and a list of arrays that can be used for assignment inside the + new array + + Parameters + ---------- + arrays : nested list of arrays + The arrays to check + max_depth : list of int + The number of nested lists + result_ndim : int + The number of dimensions in thefinal array. + + Returns + ------- + shape : tuple of int + The shape that the final array will take on. + slices: list of tuple of slices + The slices into the full array required for assignment. These are + required to be prepended with ``(Ellipsis, )`` to obtain to correct + final index. + arrays: list of ndarray + The data to assign to each slice of the full array + + """ + if depth < max_depth: + shapes, slices, arrays = zip( + *[_block_info_recursion(arr, max_depth, result_ndim, depth + 1) + for arr in arrays]) + + axis = result_ndim - max_depth + depth + shape, slice_prefixes = _concatenate_shapes(shapes, axis) + + # Prepend the slice prefix and flatten the slices + slices = [slice_prefix + the_slice + for slice_prefix, inner_slices in zip(slice_prefixes, slices) + for the_slice in inner_slices] + + # Flatten the array list + arrays = functools.reduce(operator.add, arrays) + + return shape, slices, arrays + else: + # We've 'bottomed out' - arrays is either a scalar or an array + # type(arrays) is not list + # Return the slice and the array inside a list to be consistent with + # the recursive case. + arr = _atleast_nd(arrays, result_ndim) + return arr.shape, [()], [arr] + + +def _block(arrays, max_depth, result_ndim, depth=0): + """ + Internal implementation of block based on repeated concatenation. + `arrays` is the argument passed to + block. `max_depth` is the depth of nested lists within `arrays` and + `result_ndim` is the greatest of the dimensions of the arrays in + `arrays` and the depth of the lists in `arrays` (see block docstring + for details). + """ + if depth < max_depth: + arrs = [_block(arr, max_depth, result_ndim, depth + 1) + for arr in arrays] + return _concatenate(arrs, axis=-(max_depth - depth)) + else: + # We've 'bottomed out' - arrays is either a scalar or an array + # type(arrays) is not list + return _atleast_nd(arrays, result_ndim) + + +def _block_dispatcher(arrays): + # Use type(...) is list to match the behavior of np.block(), which special + # cases list specifically rather than allowing for generic iterables or + # tuple. Also, we know that list.__array_function__ will never exist. + if isinstance(arrays, list): + for subarrays in arrays: + yield from _block_dispatcher(subarrays) + else: + yield arrays + + +@array_function_dispatch(_block_dispatcher) +def block(arrays): + """ + Assemble an nd-array from nested lists of blocks. + + Blocks in the innermost lists are concatenated (see `concatenate`) along + the last dimension (-1), then these are concatenated along the + second-last dimension (-2), and so on until the outermost list is reached. + + Blocks can be of any dimension, but will not be broadcasted using + the normal rules. Instead, leading axes of size 1 are inserted, + to make ``block.ndim`` the same for all blocks. This is primarily useful + for working with scalars, and means that code like ``np.block([v, 1])`` + is valid, where ``v.ndim == 1``. + + When the nested list is two levels deep, this allows block matrices to be + constructed from their components. + + Parameters + ---------- + arrays : nested list of array_like or scalars (but not tuples) + If passed a single ndarray or scalar (a nested list of depth 0), this + is returned unmodified (and not copied). + + Elements shapes must match along the appropriate axes (without + broadcasting), but leading 1s will be prepended to the shape as + necessary to make the dimensions match. + + Returns + ------- + block_array : ndarray + The array assembled from the given blocks. + + The dimensionality of the output is equal to the greatest of: + + * the dimensionality of all the inputs + * the depth to which the input list is nested + + Raises + ------ + ValueError + * If list depths are mismatched - for instance, ``[[a, b], c]`` is + illegal, and should be spelt ``[[a, b], [c]]`` + * If lists are empty - for instance, ``[[a, b], []]`` + + See Also + -------- + concatenate : Join a sequence of arrays along an existing axis. + stack : Join a sequence of arrays along a new axis. + vstack : Stack arrays in sequence vertically (row wise). + hstack : Stack arrays in sequence horizontally (column wise). + dstack : Stack arrays in sequence depth wise (along third axis). + column_stack : Stack 1-D arrays as columns into a 2-D array. + vsplit : Split an array into multiple sub-arrays vertically (row-wise). + unstack : Split an array into a tuple of sub-arrays along an axis. + + Notes + ----- + When called with only scalars, ``np.block`` is equivalent to an ndarray + call. So ``np.block([[1, 2], [3, 4]])`` is equivalent to + ``np.array([[1, 2], [3, 4]])``. + + This function does not enforce that the blocks lie on a fixed grid. + ``np.block([[a, b], [c, d]])`` is not restricted to arrays of the form:: + + AAAbb + AAAbb + cccDD + + But is also allowed to produce, for some ``a, b, c, d``:: + + AAAbb + AAAbb + cDDDD + + Since concatenation happens along the last axis first, `block` is *not* + capable of producing the following directly:: + + AAAbb + cccbb + cccDD + + Matlab's "square bracket stacking", ``[A, B, ...; p, q, ...]``, is + equivalent to ``np.block([[A, B, ...], [p, q, ...]])``. + + Examples + -------- + The most common use of this function is to build a block matrix: + + >>> import numpy as np + >>> A = np.eye(2) * 2 + >>> B = np.eye(3) * 3 + >>> np.block([ + ... [A, np.zeros((2, 3))], + ... [np.ones((3, 2)), B ] + ... ]) + array([[2., 0., 0., 0., 0.], + [0., 2., 0., 0., 0.], + [1., 1., 3., 0., 0.], + [1., 1., 0., 3., 0.], + [1., 1., 0., 0., 3.]]) + + With a list of depth 1, `block` can be used as `hstack`: + + >>> np.block([1, 2, 3]) # hstack([1, 2, 3]) + array([1, 2, 3]) + + >>> a = np.array([1, 2, 3]) + >>> b = np.array([4, 5, 6]) + >>> np.block([a, b, 10]) # hstack([a, b, 10]) + array([ 1, 2, 3, 4, 5, 6, 10]) + + >>> A = np.ones((2, 2), int) + >>> B = 2 * A + >>> np.block([A, B]) # hstack([A, B]) + array([[1, 1, 2, 2], + [1, 1, 2, 2]]) + + With a list of depth 2, `block` can be used in place of `vstack`: + + >>> a = np.array([1, 2, 3]) + >>> b = np.array([4, 5, 6]) + >>> np.block([[a], [b]]) # vstack([a, b]) + array([[1, 2, 3], + [4, 5, 6]]) + + >>> A = np.ones((2, 2), int) + >>> B = 2 * A + >>> np.block([[A], [B]]) # vstack([A, B]) + array([[1, 1], + [1, 1], + [2, 2], + [2, 2]]) + + It can also be used in place of `atleast_1d` and `atleast_2d`: + + >>> a = np.array(0) + >>> b = np.array([1]) + >>> np.block([a]) # atleast_1d(a) + array([0]) + >>> np.block([b]) # atleast_1d(b) + array([1]) + + >>> np.block([[a]]) # atleast_2d(a) + array([[0]]) + >>> np.block([[b]]) # atleast_2d(b) + array([[1]]) + + + """ + arrays, list_ndim, result_ndim, final_size = _block_setup(arrays) + + # It was found through benchmarking that making an array of final size + # around 256x256 was faster by straight concatenation on a + # i7-7700HQ processor and dual channel ram 2400MHz. + # It didn't seem to matter heavily on the dtype used. + # + # A 2D array using repeated concatenation requires 2 copies of the array. + # + # The fastest algorithm will depend on the ratio of CPU power to memory + # speed. + # One can monitor the results of the benchmark + # https://pv.github.io/numpy-bench/#bench_shape_base.Block2D.time_block2d + # to tune this parameter until a C version of the `_block_info_recursion` + # algorithm is implemented which would likely be faster than the python + # version. + if list_ndim * final_size > (2 * 512 * 512): + return _block_slicing(arrays, list_ndim, result_ndim) + else: + return _block_concatenate(arrays, list_ndim, result_ndim) + + +# These helper functions are mostly used for testing. +# They allow us to write tests that directly call `_block_slicing` +# or `_block_concatenate` without blocking large arrays to force the wisdom +# to trigger the desired path. +def _block_setup(arrays): + """ + Returns + (`arrays`, list_ndim, result_ndim, final_size) + """ + bottom_index, arr_ndim, final_size = _block_check_depths_match(arrays) + list_ndim = len(bottom_index) + if bottom_index and bottom_index[-1] is None: + raise ValueError( + f'List at {_block_format_index(bottom_index)} cannot be empty' + ) + result_ndim = max(arr_ndim, list_ndim) + return arrays, list_ndim, result_ndim, final_size + + +def _block_slicing(arrays, list_ndim, result_ndim): + shape, slices, arrays = _block_info_recursion( + arrays, list_ndim, result_ndim) + dtype = _nx.result_type(*[arr.dtype for arr in arrays]) + + # Test preferring F only in the case that all input arrays are F + F_order = all(arr.flags['F_CONTIGUOUS'] for arr in arrays) + C_order = all(arr.flags['C_CONTIGUOUS'] for arr in arrays) + order = 'F' if F_order and not C_order else 'C' + result = _nx.empty(shape=shape, dtype=dtype, order=order) + # Note: In a c implementation, the function + # PyArray_CreateMultiSortedStridePerm could be used for more advanced + # guessing of the desired order. + + for the_slice, arr in zip(slices, arrays): + result[(Ellipsis,) + the_slice] = arr + return result + + +def _block_concatenate(arrays, list_ndim, result_ndim): + result = _block(arrays, list_ndim, result_ndim) + if list_ndim == 0: + # Catch an edge case where _block returns a view because + # `arrays` is a single numpy array and not a list of numpy arrays. + # This might copy scalars or lists twice, but this isn't a likely + # usecase for those interested in performance + result = result.copy() + return result diff --git a/local_packages/numpy/_core/shape_base.pyi b/local_packages/numpy/_core/shape_base.pyi new file mode 100644 index 0000000..b87cae8 --- /dev/null +++ b/local_packages/numpy/_core/shape_base.pyi @@ -0,0 +1,182 @@ +from collections.abc import Sequence +from typing import Any, SupportsIndex, TypeVar, overload + +from numpy import _CastingKind, generic +from numpy._typing import ArrayLike, DTypeLike, NDArray, _ArrayLike, _DTypeLike + +__all__ = [ + "atleast_1d", + "atleast_2d", + "atleast_3d", + "block", + "hstack", + "stack", + "unstack", + "vstack", +] + +_T = TypeVar("_T") +_ScalarT = TypeVar("_ScalarT", bound=generic) +_ScalarT1 = TypeVar("_ScalarT1", bound=generic) +_ScalarT2 = TypeVar("_ScalarT2", bound=generic) +_ArrayT = TypeVar("_ArrayT", bound=NDArray[Any]) + +### + +# keep in sync with `numpy.ma.extras.atleast_1d` +@overload +def atleast_1d(a0: _ArrayLike[_ScalarT], /) -> NDArray[_ScalarT]: ... +@overload +def atleast_1d(a0: _ArrayLike[_ScalarT1], a1: _ArrayLike[_ScalarT2], /) -> tuple[NDArray[_ScalarT1], NDArray[_ScalarT2]]: ... +@overload +def atleast_1d(a0: _ArrayLike[_ScalarT], a1: _ArrayLike[_ScalarT], /, *arys: _ArrayLike[_ScalarT]) -> tuple[NDArray[_ScalarT], ...]: ... +@overload +def atleast_1d(a0: ArrayLike, /) -> NDArray[Any]: ... +@overload +def atleast_1d(a0: ArrayLike, a1: ArrayLike, /) -> tuple[NDArray[Any], NDArray[Any]]: ... +@overload +def atleast_1d(a0: ArrayLike, a1: ArrayLike, /, *ai: ArrayLike) -> tuple[NDArray[Any], ...]: ... + +# keep in sync with `numpy.ma.extras.atleast_2d` +@overload +def atleast_2d(a0: _ArrayLike[_ScalarT], /) -> NDArray[_ScalarT]: ... +@overload +def atleast_2d(a0: _ArrayLike[_ScalarT1], a1: _ArrayLike[_ScalarT2], /) -> tuple[NDArray[_ScalarT1], NDArray[_ScalarT2]]: ... +@overload +def atleast_2d(a0: _ArrayLike[_ScalarT], a1: _ArrayLike[_ScalarT], /, *arys: _ArrayLike[_ScalarT]) -> tuple[NDArray[_ScalarT], ...]: ... +@overload +def atleast_2d(a0: ArrayLike, /) -> NDArray[Any]: ... +@overload +def atleast_2d(a0: ArrayLike, a1: ArrayLike, /) -> tuple[NDArray[Any], NDArray[Any]]: ... +@overload +def atleast_2d(a0: ArrayLike, a1: ArrayLike, /, *ai: ArrayLike) -> tuple[NDArray[Any], ...]: ... + +# keep in sync with `numpy.ma.extras.atleast_3d` +@overload +def atleast_3d(a0: _ArrayLike[_ScalarT], /) -> NDArray[_ScalarT]: ... +@overload +def atleast_3d(a0: _ArrayLike[_ScalarT1], a1: _ArrayLike[_ScalarT2], /) -> tuple[NDArray[_ScalarT1], NDArray[_ScalarT2]]: ... +@overload +def atleast_3d(a0: _ArrayLike[_ScalarT], a1: _ArrayLike[_ScalarT], /, *arys: _ArrayLike[_ScalarT]) -> tuple[NDArray[_ScalarT], ...]: ... +@overload +def atleast_3d(a0: ArrayLike, /) -> NDArray[Any]: ... +@overload +def atleast_3d(a0: ArrayLike, a1: ArrayLike, /) -> tuple[NDArray[Any], NDArray[Any]]: ... +@overload +def atleast_3d(a0: ArrayLike, a1: ArrayLike, /, *ai: ArrayLike) -> tuple[NDArray[Any], ...]: ... + +# used by numpy.lib._shape_base_impl +def _arrays_for_stack_dispatcher(arrays: Sequence[_T]) -> tuple[_T, ...]: ... + +# keep in sync with `numpy.ma.extras.vstack` +@overload +def vstack( + tup: Sequence[_ArrayLike[_ScalarT]], + *, + dtype: None = None, + casting: _CastingKind = "same_kind" +) -> NDArray[_ScalarT]: ... +@overload +def vstack( + tup: Sequence[ArrayLike], + *, + dtype: _DTypeLike[_ScalarT], + casting: _CastingKind = "same_kind" +) -> NDArray[_ScalarT]: ... +@overload +def vstack( + tup: Sequence[ArrayLike], + *, + dtype: DTypeLike | None = None, + casting: _CastingKind = "same_kind" +) -> NDArray[Any]: ... + +# keep in sync with `numpy.ma.extras.hstack` +@overload +def hstack( + tup: Sequence[_ArrayLike[_ScalarT]], + *, + dtype: None = None, + casting: _CastingKind = "same_kind" +) -> NDArray[_ScalarT]: ... +@overload +def hstack( + tup: Sequence[ArrayLike], + *, + dtype: _DTypeLike[_ScalarT], + casting: _CastingKind = "same_kind" +) -> NDArray[_ScalarT]: ... +@overload +def hstack( + tup: Sequence[ArrayLike], + *, + dtype: DTypeLike | None = None, + casting: _CastingKind = "same_kind" +) -> NDArray[Any]: ... + +# keep in sync with `numpy.ma.extras.stack` +@overload +def stack( + arrays: Sequence[_ArrayLike[_ScalarT]], + axis: SupportsIndex = 0, + out: None = None, + *, + dtype: None = None, + casting: _CastingKind = "same_kind" +) -> NDArray[_ScalarT]: ... +@overload +def stack( + arrays: Sequence[ArrayLike], + axis: SupportsIndex = 0, + out: None = None, + *, + dtype: _DTypeLike[_ScalarT], + casting: _CastingKind = "same_kind" +) -> NDArray[_ScalarT]: ... +@overload +def stack( + arrays: Sequence[ArrayLike], + axis: SupportsIndex = 0, + out: None = None, + *, + dtype: DTypeLike | None = None, + casting: _CastingKind = "same_kind" +) -> NDArray[Any]: ... +@overload +def stack( + arrays: Sequence[ArrayLike], + axis: SupportsIndex, + out: _ArrayT, + *, + dtype: DTypeLike | None = None, + casting: _CastingKind = "same_kind", +) -> _ArrayT: ... +@overload +def stack( + arrays: Sequence[ArrayLike], + axis: SupportsIndex = 0, + *, + out: _ArrayT, + dtype: DTypeLike | None = None, + casting: _CastingKind = "same_kind", +) -> _ArrayT: ... + +@overload +def unstack( + array: _ArrayLike[_ScalarT], + /, + *, + axis: int = 0, +) -> tuple[NDArray[_ScalarT], ...]: ... +@overload +def unstack( + array: ArrayLike, + /, + *, + axis: int = 0, +) -> tuple[NDArray[Any], ...]: ... + +@overload +def block(arrays: _ArrayLike[_ScalarT]) -> NDArray[_ScalarT]: ... +@overload +def block(arrays: ArrayLike) -> NDArray[Any]: ... diff --git a/local_packages/numpy/_core/strings.py b/local_packages/numpy/_core/strings.py new file mode 100644 index 0000000..e9fa7f5 --- /dev/null +++ b/local_packages/numpy/_core/strings.py @@ -0,0 +1,1813 @@ +""" +This module contains a set of functions for vectorized string +operations. +""" + +import functools +import sys + +import numpy as np +from numpy import ( + add, + equal, + greater, + greater_equal, + less, + less_equal, + multiply as _multiply_ufunc, + not_equal, +) +from numpy._core.multiarray import _vec_string +from numpy._core.overrides import array_function_dispatch, set_module +from numpy._core.umath import ( + _center, + _expandtabs, + _expandtabs_length, + _ljust, + _lstrip_chars, + _lstrip_whitespace, + _partition, + _partition_index, + _replace, + _rjust, + _rpartition, + _rpartition_index, + _rstrip_chars, + _rstrip_whitespace, + _slice, + _strip_chars, + _strip_whitespace, + _zfill, + count as _count_ufunc, + endswith as _endswith_ufunc, + find as _find_ufunc, + index as _index_ufunc, + isalnum, + isalpha, + isdecimal, + isdigit, + islower, + isnumeric, + isspace, + istitle, + isupper, + rfind as _rfind_ufunc, + rindex as _rindex_ufunc, + startswith as _startswith_ufunc, + str_len, +) + + +def _override___module__(): + for ufunc in [ + isalnum, isalpha, isdecimal, isdigit, islower, isnumeric, isspace, + istitle, isupper, str_len, + ]: + ufunc.__module__ = "numpy.strings" + ufunc.__qualname__ = ufunc.__name__ + + +_override___module__() + + +__all__ = [ + # UFuncs + "equal", "not_equal", "less", "less_equal", "greater", "greater_equal", + "add", "multiply", "isalpha", "isdigit", "isspace", "isalnum", "islower", + "isupper", "istitle", "isdecimal", "isnumeric", "str_len", "find", + "rfind", "index", "rindex", "count", "startswith", "endswith", "lstrip", + "rstrip", "strip", "replace", "expandtabs", "center", "ljust", "rjust", + "zfill", "partition", "rpartition", "slice", + + # _vec_string - Will gradually become ufuncs as well + "upper", "lower", "swapcase", "capitalize", "title", + + # _vec_string - Will probably not become ufuncs + "mod", "decode", "encode", "translate", + + # Removed from namespace until behavior has been crystallized + # "join", "split", "rsplit", "splitlines", +] + + +MAX = np.iinfo(np.int64).max + +array_function_dispatch = functools.partial( + array_function_dispatch, module='numpy.strings') + + +def _get_num_chars(a): + """ + Helper function that returns the number of characters per field in + a string or unicode array. This is to abstract out the fact that + for a unicode array this is itemsize / 4. + """ + if issubclass(a.dtype.type, np.str_): + return a.itemsize // 4 + return a.itemsize + + +def _to_bytes_or_str_array(result, output_dtype_like): + """ + Helper function to cast a result back into an array + with the appropriate dtype if an object array must be used + as an intermediary. + """ + output_dtype_like = np.asarray(output_dtype_like) + if result.size == 0: + # Calling asarray & tolist in an empty array would result + # in losing shape information + return result.astype(output_dtype_like.dtype) + ret = np.asarray(result.tolist()) + if isinstance(output_dtype_like.dtype, np.dtypes.StringDType): + return ret.astype(type(output_dtype_like.dtype)) + return ret.astype(type(output_dtype_like.dtype)(_get_num_chars(ret))) + + +def _clean_args(*args): + """ + Helper function for delegating arguments to Python string + functions. + + Many of the Python string operations that have optional arguments + do not use 'None' to indicate a default value. In these cases, + we need to remove all None arguments, and those following them. + """ + newargs = [] + for chk in args: + if chk is None: + break + newargs.append(chk) + return newargs + + +def _multiply_dispatcher(a, i): + return (a,) + + +@set_module("numpy.strings") +@array_function_dispatch(_multiply_dispatcher) +def multiply(a, i): + """ + Return (a * i), that is string multiple concatenation, + element-wise. + + Values in ``i`` of less than 0 are treated as 0 (which yields an + empty string). + + Parameters + ---------- + a : array_like, with ``StringDType``, ``bytes_`` or ``str_`` dtype + + i : array_like, with any integer dtype + + Returns + ------- + out : ndarray + Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype, + depending on input types + + Examples + -------- + >>> import numpy as np + >>> a = np.array(["a", "b", "c"]) + >>> np.strings.multiply(a, 3) + array(['aaa', 'bbb', 'ccc'], dtype='>> i = np.array([1, 2, 3]) + >>> np.strings.multiply(a, i) + array(['a', 'bb', 'ccc'], dtype='>> np.strings.multiply(np.array(['a']), i) + array(['a', 'aa', 'aaa'], dtype='>> a = np.array(['a', 'b', 'c', 'd', 'e', 'f']).reshape((2, 3)) + >>> np.strings.multiply(a, 3) + array([['aaa', 'bbb', 'ccc'], + ['ddd', 'eee', 'fff']], dtype='>> np.strings.multiply(a, i) + array([['a', 'bb', 'ccc'], + ['d', 'ee', 'fff']], dtype=' sys.maxsize / np.maximum(i, 1)): + raise OverflowError("Overflow encountered in string multiply") + + buffersizes = a_len * i + out_dtype = f"{a.dtype.char}{buffersizes.max()}" + out = np.empty_like(a, shape=buffersizes.shape, dtype=out_dtype) + return _multiply_ufunc(a, i, out=out) + + +def _mod_dispatcher(a, values): + return (a, values) + + +@set_module("numpy.strings") +@array_function_dispatch(_mod_dispatcher) +def mod(a, values): + """ + Return (a % i), that is pre-Python 2.6 string formatting + (interpolation), element-wise for a pair of array_likes of str + or unicode. + + Parameters + ---------- + a : array_like, with `np.bytes_` or `np.str_` dtype + + values : array_like of values + These values will be element-wise interpolated into the string. + + Returns + ------- + out : ndarray + Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype, + depending on input types + + Examples + -------- + >>> import numpy as np + >>> a = np.array(["NumPy is a %s library"]) + >>> np.strings.mod(a, values=["Python"]) + array(['NumPy is a Python library'], dtype='>> a = np.array([b'%d bytes', b'%d bits']) + >>> values = np.array([8, 64]) + >>> np.strings.mod(a, values) + array([b'8 bytes', b'64 bits'], dtype='|S7') + + """ + return _to_bytes_or_str_array( + _vec_string(a, np.object_, '__mod__', (values,)), a) + + +@set_module("numpy.strings") +def find(a, sub, start=0, end=None): + """ + For each element, return the lowest index in the string where + substring ``sub`` is found, such that ``sub`` is contained in the + range [``start``, ``end``). + + Parameters + ---------- + a : array_like, with ``StringDType``, ``bytes_`` or ``str_`` dtype + + sub : array_like, with `np.bytes_` or `np.str_` dtype + The substring to search for. + + start, end : array_like, with any integer dtype + The range to look in, interpreted as in slice notation. + + Returns + ------- + y : ndarray + Output array of ints + + See Also + -------- + str.find + + Examples + -------- + >>> import numpy as np + >>> a = np.array(["NumPy is a Python library"]) + >>> np.strings.find(a, "Python") + array([11]) + + """ + end = end if end is not None else MAX + return _find_ufunc(a, sub, start, end) + + +@set_module("numpy.strings") +def rfind(a, sub, start=0, end=None): + """ + For each element, return the highest index in the string where + substring ``sub`` is found, such that ``sub`` is contained in the + range [``start``, ``end``). + + Parameters + ---------- + a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype + + sub : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype + The substring to search for. + + start, end : array_like, with any integer dtype + The range to look in, interpreted as in slice notation. + + Returns + ------- + y : ndarray + Output array of ints + + See Also + -------- + str.rfind + + Examples + -------- + >>> import numpy as np + >>> a = np.array(["Computer Science"]) + >>> np.strings.rfind(a, "Science", start=0, end=None) + array([9]) + >>> np.strings.rfind(a, "Science", start=0, end=8) + array([-1]) + >>> b = np.array(["Computer Science", "Science"]) + >>> np.strings.rfind(b, "Science", start=0, end=None) + array([9, 0]) + + """ + end = end if end is not None else MAX + return _rfind_ufunc(a, sub, start, end) + + +@set_module("numpy.strings") +def index(a, sub, start=0, end=None): + """ + Like `find`, but raises :exc:`ValueError` when the substring is not found. + + Parameters + ---------- + a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype + + sub : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype + + start, end : array_like, with any integer dtype, optional + + Returns + ------- + out : ndarray + Output array of ints. + + See Also + -------- + find, str.index + + Examples + -------- + >>> import numpy as np + >>> a = np.array(["Computer Science"]) + >>> np.strings.index(a, "Science", start=0, end=None) + array([9]) + + """ + end = end if end is not None else MAX + return _index_ufunc(a, sub, start, end) + + +@set_module("numpy.strings") +def rindex(a, sub, start=0, end=None): + """ + Like `rfind`, but raises :exc:`ValueError` when the substring `sub` is + not found. + + Parameters + ---------- + a : array-like, with `np.bytes_` or `np.str_` dtype + + sub : array-like, with `np.bytes_` or `np.str_` dtype + + start, end : array-like, with any integer dtype, optional + + Returns + ------- + out : ndarray + Output array of ints. + + See Also + -------- + rfind, str.rindex + + Examples + -------- + >>> a = np.array(["Computer Science"]) + >>> np.strings.rindex(a, "Science", start=0, end=None) + array([9]) + + """ + end = end if end is not None else MAX + return _rindex_ufunc(a, sub, start, end) + + +@set_module("numpy.strings") +def count(a, sub, start=0, end=None): + """ + Returns an array with the number of non-overlapping occurrences of + substring ``sub`` in the range [``start``, ``end``). + + Parameters + ---------- + a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype + + sub : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype + The substring to search for. + + start, end : array_like, with any integer dtype + The range to look in, interpreted as in slice notation. + + Returns + ------- + y : ndarray + Output array of ints + + See Also + -------- + str.count + + Examples + -------- + >>> import numpy as np + >>> c = np.array(['aAaAaA', ' aA ', 'abBABba']) + >>> c + array(['aAaAaA', ' aA ', 'abBABba'], dtype='>> np.strings.count(c, 'A') + array([3, 1, 1]) + >>> np.strings.count(c, 'aA') + array([3, 1, 0]) + >>> np.strings.count(c, 'A', start=1, end=4) + array([2, 1, 1]) + >>> np.strings.count(c, 'A', start=1, end=3) + array([1, 0, 0]) + + """ + end = end if end is not None else MAX + return _count_ufunc(a, sub, start, end) + + +@set_module("numpy.strings") +def startswith(a, prefix, start=0, end=None): + """ + Returns a boolean array which is `True` where the string element + in ``a`` starts with ``prefix``, otherwise `False`. + + Parameters + ---------- + a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype + + prefix : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype + + start, end : array_like, with any integer dtype + With ``start``, test beginning at that position. With ``end``, + stop comparing at that position. + + Returns + ------- + out : ndarray + Output array of bools + + See Also + -------- + str.startswith + + Examples + -------- + >>> import numpy as np + >>> s = np.array(['foo', 'bar']) + >>> s + array(['foo', 'bar'], dtype='>> np.strings.startswith(s, 'fo') + array([True, False]) + >>> np.strings.startswith(s, 'o', start=1, end=2) + array([True, False]) + + """ + end = end if end is not None else MAX + return _startswith_ufunc(a, prefix, start, end) + + +@set_module("numpy.strings") +def endswith(a, suffix, start=0, end=None): + """ + Returns a boolean array which is `True` where the string element + in ``a`` ends with ``suffix``, otherwise `False`. + + Parameters + ---------- + a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype + + suffix : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype + + start, end : array_like, with any integer dtype + With ``start``, test beginning at that position. With ``end``, + stop comparing at that position. + + Returns + ------- + out : ndarray + Output array of bools + + See Also + -------- + str.endswith + + Examples + -------- + >>> import numpy as np + >>> s = np.array(['foo', 'bar']) + >>> s + array(['foo', 'bar'], dtype='>> np.strings.endswith(s, 'ar') + array([False, True]) + >>> np.strings.endswith(s, 'a', start=1, end=2) + array([False, True]) + + """ + end = end if end is not None else MAX + return _endswith_ufunc(a, suffix, start, end) + + +def _code_dispatcher(a, encoding=None, errors=None): + return (a,) + + +@set_module("numpy.strings") +@array_function_dispatch(_code_dispatcher) +def decode(a, encoding=None, errors=None): + r""" + Calls :meth:`bytes.decode` element-wise. + + The set of available codecs comes from the Python standard library, + and may be extended at runtime. For more information, see the + :mod:`codecs` module. + + Parameters + ---------- + a : array_like, with ``bytes_`` dtype + + encoding : str, optional + The name of an encoding + + errors : str, optional + Specifies how to handle encoding errors + + Returns + ------- + out : ndarray + + See Also + -------- + :py:meth:`bytes.decode` + + Notes + ----- + The type of the result will depend on the encoding specified. + + Examples + -------- + >>> import numpy as np + >>> c = np.array([b'\x81\xc1\x81\xc1\x81\xc1', b'@@\x81\xc1@@', + ... b'\x81\x82\xc2\xc1\xc2\x82\x81']) + >>> c + array([b'\x81\xc1\x81\xc1\x81\xc1', b'@@\x81\xc1@@', + b'\x81\x82\xc2\xc1\xc2\x82\x81'], dtype='|S7') + >>> np.strings.decode(c, encoding='cp037') + array(['aAaAaA', ' aA ', 'abBABba'], dtype='>> import numpy as np + >>> a = np.array(['aAaAaA', ' aA ', 'abBABba']) + >>> np.strings.encode(a, encoding='cp037') + array([b'\x81\xc1\x81\xc1\x81\xc1', b'@@\x81\xc1@@', + b'\x81\x82\xc2\xc1\xc2\x82\x81'], dtype='|S7') + + """ + return _to_bytes_or_str_array( + _vec_string(a, np.object_, 'encode', _clean_args(encoding, errors)), + np.bytes_(b'')) + + +def _expandtabs_dispatcher(a, tabsize=None): + return (a,) + + +@set_module("numpy.strings") +@array_function_dispatch(_expandtabs_dispatcher) +def expandtabs(a, tabsize=8): + """ + Return a copy of each string element where all tab characters are + replaced by one or more spaces. + + Calls :meth:`str.expandtabs` element-wise. + + Return a copy of each string element where all tab characters are + replaced by one or more spaces, depending on the current column + and the given `tabsize`. The column number is reset to zero after + each newline occurring in the string. This doesn't understand other + non-printing characters or escape sequences. + + Parameters + ---------- + a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype + Input array + tabsize : int, optional + Replace tabs with `tabsize` number of spaces. If not given defaults + to 8 spaces. + + Returns + ------- + out : ndarray + Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype, + depending on input type + + See Also + -------- + str.expandtabs + + Examples + -------- + >>> import numpy as np + >>> a = np.array(['\t\tHello\tworld']) + >>> np.strings.expandtabs(a, tabsize=4) # doctest: +SKIP + array([' Hello world'], dtype='>> import numpy as np + >>> c = np.array(['a1b2','1b2a','b2a1','2a1b']); c + array(['a1b2', '1b2a', 'b2a1', '2a1b'], dtype='>> np.strings.center(c, width=9) + array([' a1b2 ', ' 1b2a ', ' b2a1 ', ' 2a1b '], dtype='>> np.strings.center(c, width=9, fillchar='*') + array(['***a1b2**', '***1b2a**', '***b2a1**', '***2a1b**'], dtype='>> np.strings.center(c, width=1) + array(['a1b2', '1b2a', 'b2a1', '2a1b'], dtype='>> import numpy as np + >>> c = np.array(['aAaAaA', ' aA ', 'abBABba']) + >>> np.strings.ljust(c, width=3) + array(['aAaAaA', ' aA ', 'abBABba'], dtype='>> np.strings.ljust(c, width=9) + array(['aAaAaA ', ' aA ', 'abBABba '], dtype='>> import numpy as np + >>> a = np.array(['aAaAaA', ' aA ', 'abBABba']) + >>> np.strings.rjust(a, width=3) + array(['aAaAaA', ' aA ', 'abBABba'], dtype='>> np.strings.rjust(a, width=9) + array([' aAaAaA', ' aA ', ' abBABba'], dtype='>> import numpy as np + >>> np.strings.zfill(['1', '-1', '+1'], 3) + array(['001', '-01', '+01'], dtype='>> import numpy as np + >>> c = np.array(['aAaAaA', ' aA ', 'abBABba']) + >>> c + array(['aAaAaA', ' aA ', 'abBABba'], dtype='>> np.strings.lstrip(c, 'a') + array(['AaAaA', ' aA ', 'bBABba'], dtype='>> np.strings.lstrip(c, 'A') # leaves c unchanged + array(['aAaAaA', ' aA ', 'abBABba'], dtype='>> (np.strings.lstrip(c, ' ') == np.strings.lstrip(c, '')).all() + np.False_ + >>> (np.strings.lstrip(c, ' ') == np.strings.lstrip(c)).all() + np.True_ + + """ + if chars is None: + return _lstrip_whitespace(a) + return _lstrip_chars(a, chars) + + +@set_module("numpy.strings") +def rstrip(a, chars=None): + """ + For each element in `a`, return a copy with the trailing characters + removed. + + Parameters + ---------- + a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype + chars : scalar with the same dtype as ``a``, optional + The ``chars`` argument is a string specifying the set of + characters to be removed. If ``None``, the ``chars`` + argument defaults to removing whitespace. The ``chars`` argument + is not a prefix or suffix; rather, all combinations of its + values are stripped. + + Returns + ------- + out : ndarray + Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype, + depending on input types + + See Also + -------- + str.rstrip + + Examples + -------- + >>> import numpy as np + >>> c = np.array(['aAaAaA', 'abBABba']) + >>> c + array(['aAaAaA', 'abBABba'], dtype='>> np.strings.rstrip(c, 'a') + array(['aAaAaA', 'abBABb'], dtype='>> np.strings.rstrip(c, 'A') + array(['aAaAa', 'abBABba'], dtype='>> import numpy as np + >>> c = np.array(['aAaAaA', ' aA ', 'abBABba']) + >>> c + array(['aAaAaA', ' aA ', 'abBABba'], dtype='>> np.strings.strip(c) + array(['aAaAaA', 'aA', 'abBABba'], dtype='>> np.strings.strip(c, 'a') + array(['AaAaA', ' aA ', 'bBABb'], dtype='>> np.strings.strip(c, 'A') + array(['aAaAa', ' aA ', 'abBABba'], dtype='>> import numpy as np + >>> c = np.array(['a1b c', '1bca', 'bca1']); c + array(['a1b c', '1bca', 'bca1'], dtype='>> np.strings.upper(c) + array(['A1B C', '1BCA', 'BCA1'], dtype='>> import numpy as np + >>> c = np.array(['A1B C', '1BCA', 'BCA1']); c + array(['A1B C', '1BCA', 'BCA1'], dtype='>> np.strings.lower(c) + array(['a1b c', '1bca', 'bca1'], dtype='>> import numpy as np + >>> c=np.array(['a1B c','1b Ca','b Ca1','cA1b'],'S5'); c + array(['a1B c', '1b Ca', 'b Ca1', 'cA1b'], + dtype='|S5') + >>> np.strings.swapcase(c) + array(['A1b C', '1B cA', 'B cA1', 'Ca1B'], + dtype='|S5') + + """ + a_arr = np.asarray(a) + return _vec_string(a_arr, a_arr.dtype, 'swapcase') + + +@set_module("numpy.strings") +@array_function_dispatch(_unary_op_dispatcher) +def capitalize(a): + """ + Return a copy of ``a`` with only the first character of each element + capitalized. + + Calls :meth:`str.capitalize` element-wise. + + For byte strings, this method is locale-dependent. + + Parameters + ---------- + a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype + Input array of strings to capitalize. + + Returns + ------- + out : ndarray + Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype, + depending on input types + + See Also + -------- + str.capitalize + + Examples + -------- + >>> import numpy as np + >>> c = np.array(['a1b2','1b2a','b2a1','2a1b'],'S4'); c + array(['a1b2', '1b2a', 'b2a1', '2a1b'], + dtype='|S4') + >>> np.strings.capitalize(c) + array(['A1b2', '1b2a', 'B2a1', '2a1b'], + dtype='|S4') + + """ + a_arr = np.asarray(a) + return _vec_string(a_arr, a_arr.dtype, 'capitalize') + + +@set_module("numpy.strings") +@array_function_dispatch(_unary_op_dispatcher) +def title(a): + """ + Return element-wise title cased version of string or unicode. + + Title case words start with uppercase characters, all remaining cased + characters are lowercase. + + Calls :meth:`str.title` element-wise. + + For 8-bit strings, this method is locale-dependent. + + Parameters + ---------- + a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype + Input array. + + Returns + ------- + out : ndarray + Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype, + depending on input types + + See Also + -------- + str.title + + Examples + -------- + >>> import numpy as np + >>> c=np.array(['a1b c','1b ca','b ca1','ca1b'],'S5'); c + array(['a1b c', '1b ca', 'b ca1', 'ca1b'], + dtype='|S5') + >>> np.strings.title(c) + array(['A1B C', '1B Ca', 'B Ca1', 'Ca1B'], + dtype='|S5') + + """ + a_arr = np.asarray(a) + return _vec_string(a_arr, a_arr.dtype, 'title') + + +def _replace_dispatcher(a, old, new, count=None): + return (a,) + + +@set_module("numpy.strings") +@array_function_dispatch(_replace_dispatcher) +def replace(a, old, new, count=-1): + """ + For each element in ``a``, return a copy of the string with + occurrences of substring ``old`` replaced by ``new``. + + Parameters + ---------- + a : array_like, with ``bytes_`` or ``str_`` dtype + + old, new : array_like, with ``bytes_`` or ``str_`` dtype + + count : array_like, with ``int_`` dtype + If the optional argument ``count`` is given, only the first + ``count`` occurrences are replaced. + + Returns + ------- + out : ndarray + Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype, + depending on input types + + See Also + -------- + str.replace + + Examples + -------- + >>> import numpy as np + >>> a = np.array(["That is a mango", "Monkeys eat mangos"]) + >>> np.strings.replace(a, 'mango', 'banana') + array(['That is a banana', 'Monkeys eat bananas'], dtype='>> a = np.array(["The dish is fresh", "This is it"]) + >>> np.strings.replace(a, 'is', 'was') + array(['The dwash was fresh', 'Thwas was it'], dtype='>> import numpy as np + >>> np.strings.join('-', 'osd') # doctest: +SKIP + array('o-s-d', dtype='>> np.strings.join(['-', '.'], ['ghc', 'osd']) # doctest: +SKIP + array(['g-h-c', 'o.s.d'], dtype='>> import numpy as np + >>> x = np.array("Numpy is nice!") + >>> np.strings.split(x, " ") # doctest: +SKIP + array(list(['Numpy', 'is', 'nice!']), dtype=object) # doctest: +SKIP + + >>> np.strings.split(x, " ", 1) # doctest: +SKIP + array(list(['Numpy', 'is nice!']), dtype=object) # doctest: +SKIP + + See Also + -------- + str.split, rsplit + + """ + # This will return an array of lists of different sizes, so we + # leave it as an object array + return _vec_string( + a, np.object_, 'split', [sep] + _clean_args(maxsplit)) + + +@array_function_dispatch(_split_dispatcher) +def _rsplit(a, sep=None, maxsplit=None): + """ + For each element in `a`, return a list of the words in the + string, using `sep` as the delimiter string. + + Calls :meth:`str.rsplit` element-wise. + + Except for splitting from the right, `rsplit` + behaves like `split`. + + Parameters + ---------- + a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype + + sep : str or unicode, optional + If `sep` is not specified or None, any whitespace string + is a separator. + maxsplit : int, optional + If `maxsplit` is given, at most `maxsplit` splits are done, + the rightmost ones. + + Returns + ------- + out : ndarray + Array of list objects + + See Also + -------- + str.rsplit, split + + Examples + -------- + >>> import numpy as np + >>> a = np.array(['aAaAaA', 'abBABba']) + >>> np.strings.rsplit(a, 'A') # doctest: +SKIP + array([list(['a', 'a', 'a', '']), # doctest: +SKIP + list(['abB', 'Bba'])], dtype=object) # doctest: +SKIP + + """ + # This will return an array of lists of different sizes, so we + # leave it as an object array + return _vec_string( + a, np.object_, 'rsplit', [sep] + _clean_args(maxsplit)) + + +def _splitlines_dispatcher(a, keepends=None): + return (a,) + + +@array_function_dispatch(_splitlines_dispatcher) +def _splitlines(a, keepends=None): + """ + For each element in `a`, return a list of the lines in the + element, breaking at line boundaries. + + Calls :meth:`str.splitlines` element-wise. + + Parameters + ---------- + a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype + + keepends : bool, optional + Line breaks are not included in the resulting list unless + keepends is given and true. + + Returns + ------- + out : ndarray + Array of list objects + + See Also + -------- + str.splitlines + + Examples + -------- + >>> np.char.splitlines("first line\\nsecond line") + array(list(['first line', 'second line']), dtype=object) + >>> a = np.array(["first\\nsecond", "third\\nfourth"]) + >>> np.char.splitlines(a) + array([list(['first', 'second']), list(['third', 'fourth'])], dtype=object) + + """ + return _vec_string( + a, np.object_, 'splitlines', _clean_args(keepends)) + + +def _partition_dispatcher(a, sep): + return (a,) + + +@set_module("numpy.strings") +@array_function_dispatch(_partition_dispatcher) +def partition(a, sep): + """ + Partition each element in ``a`` around ``sep``. + + For each element in ``a``, split the element at the first + occurrence of ``sep``, and return a 3-tuple containing the part + before the separator, the separator itself, and the part after + the separator. If the separator is not found, the first item of + the tuple will contain the whole string, and the second and third + ones will be the empty string. + + Parameters + ---------- + a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype + Input array + sep : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype + Separator to split each string element in ``a``. + + Returns + ------- + out : 3-tuple: + - array with ``StringDType``, ``bytes_`` or ``str_`` dtype with the + part before the separator + - array with ``StringDType``, ``bytes_`` or ``str_`` dtype with the + separator + - array with ``StringDType``, ``bytes_`` or ``str_`` dtype with the + part after the separator + + See Also + -------- + str.partition + + Examples + -------- + >>> import numpy as np + >>> x = np.array(["Numpy is nice!"]) + >>> np.strings.partition(x, " ") + (array(['Numpy'], dtype='>> import numpy as np + >>> a = np.array(['aAaAaA', ' aA ', 'abBABba']) + >>> np.strings.rpartition(a, 'A') + (array(['aAaAa', ' a', 'abB'], dtype='>> import numpy as np + >>> a = np.array(['a1b c', '1bca', 'bca1']) + >>> table = a[0].maketrans('abc', '123') + >>> deletechars = ' ' + >>> np.char.translate(a, table, deletechars) + array(['112 3', '1231', '2311'], dtype='>> import numpy as np + >>> a = np.array(['hello', 'world']) + >>> np.strings.slice(a, 2) + array(['he', 'wo'], dtype='>> np.strings.slice(a, 2, None) + array(['llo', 'rld'], dtype='>> np.strings.slice(a, 1, 5, 2) + array(['el', 'ol'], dtype='>> np.strings.slice(a, np.array([1, 2]), np.array([4, 5])) + array(['ell', 'rld'], dtype='>> b = np.array(['hello world', 'γεια σου κόσμε', '你好世界', '👋 🌍'], + ... dtype=np.dtypes.StringDType()) + >>> np.strings.slice(b, -2) + array(['hello wor', 'γεια σου κόσ', '你好', '👋'], dtype=StringDType()) + + >>> np.strings.slice(b, -2, None) + array(['ld', 'με', '世界', ' 🌍'], dtype=StringDType()) + + >>> np.strings.slice(b, [3, -10, 2, -3], [-1, -2, -1, 3]) + array(['lo worl', ' σου κόσ', '世', '👋 🌍'], dtype=StringDType()) + + >>> np.strings.slice(b, None, None, -1) + array(['dlrow olleh', 'εμσόκ υοσ αιεγ', '界世好你', '🌍 👋'], + dtype=StringDType()) + + """ + # Just like in the construction of a regular slice object, if only start + # is specified then start will become stop, see logic in slice_new. + if stop is np._NoValue: + stop = start + start = None + + # adjust start, stop, step to be integers, see logic in PySlice_Unpack + if step is None: + step = 1 + step = np.asanyarray(step) + if not np.issubdtype(step.dtype, np.integer): + raise TypeError(f"unsupported type {step.dtype} for operand 'step'") + if np.any(step == 0): + raise ValueError("slice step cannot be zero") + + if start is None: + start = np.where(step < 0, np.iinfo(np.intp).max, 0) + + if stop is None: + stop = np.where(step < 0, np.iinfo(np.intp).min, np.iinfo(np.intp).max) + + return _slice(a, start, stop, step) diff --git a/local_packages/numpy/_core/strings.pyi b/local_packages/numpy/_core/strings.pyi new file mode 100644 index 0000000..2707604 --- /dev/null +++ b/local_packages/numpy/_core/strings.pyi @@ -0,0 +1,536 @@ +from typing import TypeAlias, overload + +import numpy as np +from numpy._globals import _NoValueType +from numpy._typing import ( + NDArray, + _AnyShape, + _ArrayLikeAnyString_co as UST_co, + _ArrayLikeBytes_co as S_co, + _ArrayLikeInt_co as i_co, + _ArrayLikeStr_co as U_co, + _ArrayLikeString_co as T_co, + _SupportsArray, +) + +__all__ = [ + "add", + "capitalize", + "center", + "count", + "decode", + "encode", + "endswith", + "equal", + "expandtabs", + "find", + "greater", + "greater_equal", + "index", + "isalnum", + "isalpha", + "isdecimal", + "isdigit", + "islower", + "isnumeric", + "isspace", + "istitle", + "isupper", + "less", + "less_equal", + "ljust", + "lower", + "lstrip", + "mod", + "multiply", + "not_equal", + "partition", + "replace", + "rfind", + "rindex", + "rjust", + "rpartition", + "rstrip", + "startswith", + "str_len", + "strip", + "swapcase", + "title", + "translate", + "upper", + "zfill", + "slice", +] + +_StringDTypeArray: TypeAlias = np.ndarray[_AnyShape, np.dtypes.StringDType] +_StringDTypeSupportsArray: TypeAlias = _SupportsArray[np.dtypes.StringDType] +_StringDTypeOrUnicodeArray: TypeAlias = np.ndarray[_AnyShape, np.dtype[np.str_]] | _StringDTypeArray + +@overload +def equal(x1: U_co, x2: U_co) -> NDArray[np.bool]: ... +@overload +def equal(x1: S_co, x2: S_co) -> NDArray[np.bool]: ... +@overload +def equal(x1: T_co, x2: T_co) -> NDArray[np.bool]: ... + +@overload +def not_equal(x1: U_co, x2: U_co) -> NDArray[np.bool]: ... +@overload +def not_equal(x1: S_co, x2: S_co) -> NDArray[np.bool]: ... +@overload +def not_equal(x1: T_co, x2: T_co) -> NDArray[np.bool]: ... + +@overload +def greater_equal(x1: U_co, x2: U_co) -> NDArray[np.bool]: ... +@overload +def greater_equal(x1: S_co, x2: S_co) -> NDArray[np.bool]: ... +@overload +def greater_equal(x1: T_co, x2: T_co) -> NDArray[np.bool]: ... + +@overload +def less_equal(x1: U_co, x2: U_co) -> NDArray[np.bool]: ... +@overload +def less_equal(x1: S_co, x2: S_co) -> NDArray[np.bool]: ... +@overload +def less_equal(x1: T_co, x2: T_co) -> NDArray[np.bool]: ... + +@overload +def greater(x1: U_co, x2: U_co) -> NDArray[np.bool]: ... +@overload +def greater(x1: S_co, x2: S_co) -> NDArray[np.bool]: ... +@overload +def greater(x1: T_co, x2: T_co) -> NDArray[np.bool]: ... + +@overload +def less(x1: U_co, x2: U_co) -> NDArray[np.bool]: ... +@overload +def less(x1: S_co, x2: S_co) -> NDArray[np.bool]: ... +@overload +def less(x1: T_co, x2: T_co) -> NDArray[np.bool]: ... + +@overload +def add(x1: U_co, x2: U_co) -> NDArray[np.str_]: ... +@overload +def add(x1: S_co, x2: S_co) -> NDArray[np.bytes_]: ... +@overload +def add(x1: _StringDTypeSupportsArray, x2: _StringDTypeSupportsArray) -> _StringDTypeArray: ... +@overload +def add(x1: T_co, x2: T_co) -> _StringDTypeOrUnicodeArray: ... + +@overload +def multiply(a: U_co, i: i_co) -> NDArray[np.str_]: ... +@overload +def multiply(a: S_co, i: i_co) -> NDArray[np.bytes_]: ... +@overload +def multiply(a: _StringDTypeSupportsArray, i: i_co) -> _StringDTypeArray: ... +@overload +def multiply(a: T_co, i: i_co) -> _StringDTypeOrUnicodeArray: ... + +@overload +def mod(a: U_co, value: object) -> NDArray[np.str_]: ... +@overload +def mod(a: S_co, value: object) -> NDArray[np.bytes_]: ... +@overload +def mod(a: _StringDTypeSupportsArray, value: object) -> _StringDTypeArray: ... +@overload +def mod(a: T_co, value: object) -> _StringDTypeOrUnicodeArray: ... + +def isalpha(x: UST_co) -> NDArray[np.bool]: ... +def isalnum(a: UST_co) -> NDArray[np.bool]: ... +def isdigit(x: UST_co) -> NDArray[np.bool]: ... +def isspace(x: UST_co) -> NDArray[np.bool]: ... +def isdecimal(x: U_co | T_co) -> NDArray[np.bool]: ... +def isnumeric(x: U_co | T_co) -> NDArray[np.bool]: ... +def islower(a: UST_co) -> NDArray[np.bool]: ... +def istitle(a: UST_co) -> NDArray[np.bool]: ... +def isupper(a: UST_co) -> NDArray[np.bool]: ... + +def str_len(x: UST_co) -> NDArray[np.int_]: ... + +@overload +def find( + a: U_co, + sub: U_co, + start: i_co = 0, + end: i_co | None = None, +) -> NDArray[np.int_]: ... +@overload +def find( + a: S_co, + sub: S_co, + start: i_co = 0, + end: i_co | None = None, +) -> NDArray[np.int_]: ... +@overload +def find( + a: T_co, + sub: T_co, + start: i_co = 0, + end: i_co | None = None, +) -> NDArray[np.int_]: ... + +@overload +def rfind( + a: U_co, + sub: U_co, + start: i_co = 0, + end: i_co | None = None, +) -> NDArray[np.int_]: ... +@overload +def rfind( + a: S_co, + sub: S_co, + start: i_co = 0, + end: i_co | None = None, +) -> NDArray[np.int_]: ... +@overload +def rfind( + a: T_co, + sub: T_co, + start: i_co = 0, + end: i_co | None = None, +) -> NDArray[np.int_]: ... + +@overload +def index( + a: U_co, + sub: U_co, + start: i_co = 0, + end: i_co | None = None, +) -> NDArray[np.int_]: ... +@overload +def index( + a: S_co, + sub: S_co, + start: i_co = 0, + end: i_co | None = None, +) -> NDArray[np.int_]: ... +@overload +def index( + a: T_co, + sub: T_co, + start: i_co = 0, + end: i_co | None = None, +) -> NDArray[np.int_]: ... + +@overload +def rindex( + a: U_co, + sub: U_co, + start: i_co = 0, + end: i_co | None = None, +) -> NDArray[np.int_]: ... +@overload +def rindex( + a: S_co, + sub: S_co, + start: i_co = 0, + end: i_co | None = None, +) -> NDArray[np.int_]: ... +@overload +def rindex( + a: T_co, + sub: T_co, + start: i_co = 0, + end: i_co | None = None, +) -> NDArray[np.int_]: ... + +@overload +def count( + a: U_co, + sub: U_co, + start: i_co = 0, + end: i_co | None = None, +) -> NDArray[np.int_]: ... +@overload +def count( + a: S_co, + sub: S_co, + start: i_co = 0, + end: i_co | None = None, +) -> NDArray[np.int_]: ... +@overload +def count( + a: T_co, + sub: T_co, + start: i_co = 0, + end: i_co | None = None, +) -> NDArray[np.int_]: ... + +@overload +def startswith( + a: U_co, + prefix: U_co, + start: i_co = 0, + end: i_co | None = None, +) -> NDArray[np.bool]: ... +@overload +def startswith( + a: S_co, + prefix: S_co, + start: i_co = 0, + end: i_co | None = None, +) -> NDArray[np.bool]: ... +@overload +def startswith( + a: T_co, + prefix: T_co, + start: i_co = 0, + end: i_co | None = None, +) -> NDArray[np.bool]: ... + +@overload +def endswith( + a: U_co, + suffix: U_co, + start: i_co = 0, + end: i_co | None = None, +) -> NDArray[np.bool]: ... +@overload +def endswith( + a: S_co, + suffix: S_co, + start: i_co = 0, + end: i_co | None = None, +) -> NDArray[np.bool]: ... +@overload +def endswith( + a: T_co, + suffix: T_co, + start: i_co = 0, + end: i_co | None = None, +) -> NDArray[np.bool]: ... + +def decode( + a: S_co, + encoding: str | None = None, + errors: str | None = None, +) -> NDArray[np.str_]: ... +def encode( + a: U_co | T_co, + encoding: str | None = None, + errors: str | None = None, +) -> NDArray[np.bytes_]: ... + +@overload +def expandtabs(a: U_co, tabsize: i_co = 8) -> NDArray[np.str_]: ... +@overload +def expandtabs(a: S_co, tabsize: i_co = 8) -> NDArray[np.bytes_]: ... +@overload +def expandtabs(a: _StringDTypeSupportsArray, tabsize: i_co = 8) -> _StringDTypeArray: ... +@overload +def expandtabs(a: T_co, tabsize: i_co = 8) -> _StringDTypeOrUnicodeArray: ... + +@overload +def center(a: U_co, width: i_co, fillchar: UST_co = " ") -> NDArray[np.str_]: ... +@overload +def center(a: S_co, width: i_co, fillchar: UST_co = " ") -> NDArray[np.bytes_]: ... +@overload +def center(a: _StringDTypeSupportsArray, width: i_co, fillchar: UST_co = " ") -> _StringDTypeArray: ... +@overload +def center(a: T_co, width: i_co, fillchar: UST_co = " ") -> _StringDTypeOrUnicodeArray: ... + +@overload +def ljust(a: U_co, width: i_co, fillchar: UST_co = " ") -> NDArray[np.str_]: ... +@overload +def ljust(a: S_co, width: i_co, fillchar: UST_co = " ") -> NDArray[np.bytes_]: ... +@overload +def ljust(a: _StringDTypeSupportsArray, width: i_co, fillchar: UST_co = " ") -> _StringDTypeArray: ... +@overload +def ljust(a: T_co, width: i_co, fillchar: UST_co = " ") -> _StringDTypeOrUnicodeArray: ... + +@overload +def rjust(a: U_co, width: i_co, fillchar: UST_co = " ") -> NDArray[np.str_]: ... +@overload +def rjust(a: S_co, width: i_co, fillchar: UST_co = " ") -> NDArray[np.bytes_]: ... +@overload +def rjust(a: _StringDTypeSupportsArray, width: i_co, fillchar: UST_co = " ") -> _StringDTypeArray: ... +@overload +def rjust(a: T_co, width: i_co, fillchar: UST_co = " ") -> _StringDTypeOrUnicodeArray: ... + +@overload +def lstrip(a: U_co, chars: U_co | None = None) -> NDArray[np.str_]: ... +@overload +def lstrip(a: S_co, chars: S_co | None = None) -> NDArray[np.bytes_]: ... +@overload +def lstrip(a: _StringDTypeSupportsArray, chars: T_co | None = None) -> _StringDTypeArray: ... +@overload +def lstrip(a: T_co, chars: T_co | None = None) -> _StringDTypeOrUnicodeArray: ... + +@overload +def rstrip(a: U_co, chars: U_co | None = None) -> NDArray[np.str_]: ... +@overload +def rstrip(a: S_co, chars: S_co | None = None) -> NDArray[np.bytes_]: ... +@overload +def rstrip(a: _StringDTypeSupportsArray, chars: T_co | None = None) -> _StringDTypeArray: ... +@overload +def rstrip(a: T_co, chars: T_co | None = None) -> _StringDTypeOrUnicodeArray: ... + +@overload +def strip(a: U_co, chars: U_co | None = None) -> NDArray[np.str_]: ... +@overload +def strip(a: S_co, chars: S_co | None = None) -> NDArray[np.bytes_]: ... +@overload +def strip(a: _StringDTypeSupportsArray, chars: T_co | None = None) -> _StringDTypeArray: ... +@overload +def strip(a: T_co, chars: T_co | None = None) -> _StringDTypeOrUnicodeArray: ... + +@overload +def zfill(a: U_co, width: i_co) -> NDArray[np.str_]: ... +@overload +def zfill(a: S_co, width: i_co) -> NDArray[np.bytes_]: ... +@overload +def zfill(a: _StringDTypeSupportsArray, width: i_co) -> _StringDTypeArray: ... +@overload +def zfill(a: T_co, width: i_co) -> _StringDTypeOrUnicodeArray: ... + +@overload +def upper(a: U_co) -> NDArray[np.str_]: ... +@overload +def upper(a: S_co) -> NDArray[np.bytes_]: ... +@overload +def upper(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ... +@overload +def upper(a: T_co) -> _StringDTypeOrUnicodeArray: ... + +@overload +def lower(a: U_co) -> NDArray[np.str_]: ... +@overload +def lower(a: S_co) -> NDArray[np.bytes_]: ... +@overload +def lower(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ... +@overload +def lower(a: T_co) -> _StringDTypeOrUnicodeArray: ... + +@overload +def swapcase(a: U_co) -> NDArray[np.str_]: ... +@overload +def swapcase(a: S_co) -> NDArray[np.bytes_]: ... +@overload +def swapcase(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ... +@overload +def swapcase(a: T_co) -> _StringDTypeOrUnicodeArray: ... + +@overload +def capitalize(a: U_co) -> NDArray[np.str_]: ... +@overload +def capitalize(a: S_co) -> NDArray[np.bytes_]: ... +@overload +def capitalize(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ... +@overload +def capitalize(a: T_co) -> _StringDTypeOrUnicodeArray: ... + +@overload +def title(a: U_co) -> NDArray[np.str_]: ... +@overload +def title(a: S_co) -> NDArray[np.bytes_]: ... +@overload +def title(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ... +@overload +def title(a: T_co) -> _StringDTypeOrUnicodeArray: ... + +@overload +def replace( + a: U_co, + old: U_co, + new: U_co, + count: i_co = -1, +) -> NDArray[np.str_]: ... +@overload +def replace( + a: S_co, + old: S_co, + new: S_co, + count: i_co = -1, +) -> NDArray[np.bytes_]: ... +@overload +def replace( + a: _StringDTypeSupportsArray, + old: _StringDTypeSupportsArray, + new: _StringDTypeSupportsArray, + count: i_co = -1, +) -> _StringDTypeArray: ... +@overload +def replace( + a: T_co, + old: T_co, + new: T_co, + count: i_co = -1, +) -> _StringDTypeOrUnicodeArray: ... + +@overload +def partition(a: U_co, sep: U_co) -> NDArray[np.str_]: ... +@overload +def partition(a: S_co, sep: S_co) -> NDArray[np.bytes_]: ... +@overload +def partition(a: _StringDTypeSupportsArray, sep: _StringDTypeSupportsArray) -> _StringDTypeArray: ... +@overload +def partition(a: T_co, sep: T_co) -> _StringDTypeOrUnicodeArray: ... + +@overload +def rpartition(a: U_co, sep: U_co) -> NDArray[np.str_]: ... +@overload +def rpartition(a: S_co, sep: S_co) -> NDArray[np.bytes_]: ... +@overload +def rpartition(a: _StringDTypeSupportsArray, sep: _StringDTypeSupportsArray) -> _StringDTypeArray: ... +@overload +def rpartition(a: T_co, sep: T_co) -> _StringDTypeOrUnicodeArray: ... + +@overload +def translate( + a: U_co, + table: str, + deletechars: str | None = None, +) -> NDArray[np.str_]: ... +@overload +def translate( + a: S_co, + table: str, + deletechars: str | None = None, +) -> NDArray[np.bytes_]: ... +@overload +def translate( + a: _StringDTypeSupportsArray, + table: str, + deletechars: str | None = None, +) -> _StringDTypeArray: ... +@overload +def translate( + a: T_co, + table: str, + deletechars: str | None = None, +) -> _StringDTypeOrUnicodeArray: ... + +# +@overload +def slice( + a: U_co, + start: i_co | None = None, + stop: i_co | _NoValueType | None = ..., # = np._NoValue + step: i_co | None = None, + /, +) -> NDArray[np.str_]: ... +@overload +def slice( + a: S_co, + start: i_co | None = None, + stop: i_co | _NoValueType | None = ..., # = np._NoValue + step: i_co | None = None, + /, +) -> NDArray[np.bytes_]: ... +@overload +def slice( + a: _StringDTypeSupportsArray, + start: i_co | None = None, + stop: i_co | _NoValueType | None = ..., # = np._NoValue + step: i_co | None = None, + /, +) -> _StringDTypeArray: ... +@overload +def slice( + a: T_co, + start: i_co | None = None, + stop: i_co | _NoValueType | None = ..., # = np._NoValue + step: i_co | None = None, + /, +) -> _StringDTypeOrUnicodeArray: ... diff --git a/local_packages/numpy/_core/tests/_locales.py b/local_packages/numpy/_core/tests/_locales.py new file mode 100644 index 0000000..debda96 --- /dev/null +++ b/local_packages/numpy/_core/tests/_locales.py @@ -0,0 +1,72 @@ +"""Provide class for testing in French locale + +""" +import locale +import sys + +import pytest + +__ALL__ = ['CommaDecimalPointLocale'] + + +def find_comma_decimal_point_locale(): + """See if platform has a decimal point as comma locale. + + Find a locale that uses a comma instead of a period as the + decimal point. + + Returns + ------- + old_locale: str + Locale when the function was called. + new_locale: {str, None) + First French locale found, None if none found. + + """ + if sys.platform == 'win32': + locales = ['FRENCH'] + else: + locales = ['fr_FR', 'fr_FR.UTF-8', 'fi_FI', 'fi_FI.UTF-8'] + + old_locale = locale.getlocale(locale.LC_NUMERIC) + new_locale = None + try: + for loc in locales: + try: + locale.setlocale(locale.LC_NUMERIC, loc) + new_locale = loc + break + except locale.Error: + pass + finally: + locale.setlocale(locale.LC_NUMERIC, locale=old_locale) + return old_locale, new_locale + + +class CommaDecimalPointLocale: + """Sets LC_NUMERIC to a locale with comma as decimal point. + + Classes derived from this class have setup and teardown methods that run + tests with locale.LC_NUMERIC set to a locale where commas (',') are used as + the decimal point instead of periods ('.'). On exit the locale is restored + to the initial locale. It also serves as context manager with the same + effect. If no such locale is available, the test is skipped. + + """ + (cur_locale, tst_locale) = find_comma_decimal_point_locale() + + def setup_method(self): + if self.tst_locale is None: + pytest.skip("No French locale available") + locale.setlocale(locale.LC_NUMERIC, locale=self.tst_locale) + + def teardown_method(self): + locale.setlocale(locale.LC_NUMERIC, locale=self.cur_locale) + + def __enter__(self): + if self.tst_locale is None: + pytest.skip("No French locale available") + locale.setlocale(locale.LC_NUMERIC, locale=self.tst_locale) + + def __exit__(self, type, value, traceback): + locale.setlocale(locale.LC_NUMERIC, locale=self.cur_locale) diff --git a/local_packages/numpy/_core/tests/_natype.py b/local_packages/numpy/_core/tests/_natype.py new file mode 100644 index 0000000..539dfd2 --- /dev/null +++ b/local_packages/numpy/_core/tests/_natype.py @@ -0,0 +1,144 @@ +# Vendored implementation of pandas.NA, adapted from pandas/_libs/missing.pyx +# +# This is vendored to avoid adding pandas as a test dependency. + +__all__ = ["pd_NA"] + +import numbers + +import numpy as np + + +def _create_binary_propagating_op(name, is_divmod=False): + is_cmp = name.strip("_") in ["eq", "ne", "le", "lt", "ge", "gt"] + + def method(self, other): + if ( + other is pd_NA + or isinstance(other, (str, bytes, numbers.Number, np.bool)) + or (isinstance(other, np.ndarray) and not other.shape) + ): + # Need the other.shape clause to handle NumPy scalars, + # since we do a setitem on `out` below, which + # won't work for NumPy scalars. + if is_divmod: + return pd_NA, pd_NA + else: + return pd_NA + + elif isinstance(other, np.ndarray): + out = np.empty(other.shape, dtype=object) + out[:] = pd_NA + + if is_divmod: + return out, out.copy() + else: + return out + + elif is_cmp and isinstance(other, (np.datetime64, np.timedelta64)): + return pd_NA + + elif isinstance(other, np.datetime64): + if name in ["__sub__", "__rsub__"]: + return pd_NA + + elif isinstance(other, np.timedelta64): + if name in ["__sub__", "__rsub__", "__add__", "__radd__"]: + return pd_NA + + return NotImplemented + + method.__name__ = name + return method + + +def _create_unary_propagating_op(name: str): + def method(self): + return pd_NA + + method.__name__ = name + return method + + +class NAType: + def __repr__(self) -> str: + return "" + + def __format__(self, format_spec) -> str: + try: + return self.__repr__().__format__(format_spec) + except ValueError: + return self.__repr__() + + def __bool__(self): + raise TypeError("boolean value of NA is ambiguous") + + def __hash__(self): + return 2**61 - 1 + + def __reduce__(self): + return "pd_NA" + + # Binary arithmetic and comparison ops -> propagate + + __add__ = _create_binary_propagating_op("__add__") + __radd__ = _create_binary_propagating_op("__radd__") + __sub__ = _create_binary_propagating_op("__sub__") + __rsub__ = _create_binary_propagating_op("__rsub__") + __mul__ = _create_binary_propagating_op("__mul__") + __rmul__ = _create_binary_propagating_op("__rmul__") + __matmul__ = _create_binary_propagating_op("__matmul__") + __rmatmul__ = _create_binary_propagating_op("__rmatmul__") + __truediv__ = _create_binary_propagating_op("__truediv__") + __rtruediv__ = _create_binary_propagating_op("__rtruediv__") + __floordiv__ = _create_binary_propagating_op("__floordiv__") + __rfloordiv__ = _create_binary_propagating_op("__rfloordiv__") + __mod__ = _create_binary_propagating_op("__mod__") + __rmod__ = _create_binary_propagating_op("__rmod__") + __divmod__ = _create_binary_propagating_op("__divmod__", is_divmod=True) + __rdivmod__ = _create_binary_propagating_op("__rdivmod__", is_divmod=True) + # __lshift__ and __rshift__ are not implemented + + __eq__ = _create_binary_propagating_op("__eq__") + __ne__ = _create_binary_propagating_op("__ne__") + __le__ = _create_binary_propagating_op("__le__") + __lt__ = _create_binary_propagating_op("__lt__") + __gt__ = _create_binary_propagating_op("__gt__") + __ge__ = _create_binary_propagating_op("__ge__") + + # Unary ops + + __neg__ = _create_unary_propagating_op("__neg__") + __pos__ = _create_unary_propagating_op("__pos__") + __abs__ = _create_unary_propagating_op("__abs__") + __invert__ = _create_unary_propagating_op("__invert__") + + # Logical ops using Kleene logic + + def __and__(self, other): + if other is False: + return False + elif other is True or other is pd_NA: + return pd_NA + return NotImplemented + + __rand__ = __and__ + + def __or__(self, other): + if other is True: + return True + elif other is False or other is pd_NA: + return pd_NA + return NotImplemented + + __ror__ = __or__ + + def __xor__(self, other): + if other is False or other is True or other is pd_NA: + return pd_NA + return NotImplemented + + __rxor__ = __xor__ + + +pd_NA = NAType() diff --git a/local_packages/numpy/_core/tests/data/astype_copy.pkl b/local_packages/numpy/_core/tests/data/astype_copy.pkl new file mode 100644 index 0000000..7397c97 Binary files /dev/null and b/local_packages/numpy/_core/tests/data/astype_copy.pkl differ diff --git a/local_packages/numpy/_core/tests/data/generate_umath_validation_data.cpp b/local_packages/numpy/_core/tests/data/generate_umath_validation_data.cpp new file mode 100644 index 0000000..88ff45e --- /dev/null +++ b/local_packages/numpy/_core/tests/data/generate_umath_validation_data.cpp @@ -0,0 +1,170 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +struct ufunc { + std::string name; + double (*f32func)(double); + long double (*f64func)(long double); + float f32ulp; + float f64ulp; +}; + +template +T +RandomFloat(T a, T b) +{ + T random = ((T)rand()) / (T)RAND_MAX; + T diff = b - a; + T r = random * diff; + return a + r; +} + +template +void +append_random_array(std::vector &arr, T min, T max, size_t N) +{ + for (size_t ii = 0; ii < N; ++ii) + arr.emplace_back(RandomFloat(min, max)); +} + +template +std::vector +computeTrueVal(const std::vector &in, T2 (*mathfunc)(T2)) +{ + std::vector out; + for (T1 elem : in) { + T2 elem_d = (T2)elem; + T1 out_elem = (T1)mathfunc(elem_d); + out.emplace_back(out_elem); + } + return out; +} + +/* + * FP range: + * [-inf, -maxflt, -1., -minflt, -minden, 0., minden, minflt, 1., maxflt, inf] + */ + +#define MINDEN std::numeric_limits::denorm_min() +#define MINFLT std::numeric_limits::min() +#define MAXFLT std::numeric_limits::max() +#define INF std::numeric_limits::infinity() +#define qNAN std::numeric_limits::quiet_NaN() +#define sNAN std::numeric_limits::signaling_NaN() + +template +std::vector +generate_input_vector(std::string func) +{ + std::vector input = {MINDEN, -MINDEN, MINFLT, -MINFLT, MAXFLT, + -MAXFLT, INF, -INF, qNAN, sNAN, + -1.0, 1.0, 0.0, -0.0}; + + // [-1.0, 1.0] + if ((func == "arcsin") || (func == "arccos") || (func == "arctanh")) { + append_random_array(input, -1.0, 1.0, 700); + } + // (0.0, INF] + else if ((func == "log2") || (func == "log10")) { + append_random_array(input, 0.0, 1.0, 200); + append_random_array(input, MINDEN, MINFLT, 200); + append_random_array(input, MINFLT, 1.0, 200); + append_random_array(input, 1.0, MAXFLT, 200); + } + // (-1.0, INF] + else if (func == "log1p") { + append_random_array(input, -1.0, 1.0, 200); + append_random_array(input, -MINFLT, -MINDEN, 100); + append_random_array(input, -1.0, -MINFLT, 100); + append_random_array(input, MINDEN, MINFLT, 100); + append_random_array(input, MINFLT, 1.0, 100); + append_random_array(input, 1.0, MAXFLT, 100); + } + // [1.0, INF] + else if (func == "arccosh") { + append_random_array(input, 1.0, 2.0, 400); + append_random_array(input, 2.0, MAXFLT, 300); + } + // [-INF, INF] + else { + append_random_array(input, -1.0, 1.0, 100); + append_random_array(input, MINDEN, MINFLT, 100); + append_random_array(input, -MINFLT, -MINDEN, 100); + append_random_array(input, MINFLT, 1.0, 100); + append_random_array(input, -1.0, -MINFLT, 100); + append_random_array(input, 1.0, MAXFLT, 100); + append_random_array(input, -MAXFLT, -100.0, 100); + } + + std::random_shuffle(input.begin(), input.end()); + return input; +} + +int +main() +{ + srand(42); + std::vector umathfunc = { + {"sin", sin, sin, 1.49, 1.00}, + {"cos", cos, cos, 1.49, 1.00}, + {"tan", tan, tan, 3.91, 1.00}, + {"arcsin", asin, asin, 3.12, 1.00}, + {"arccos", acos, acos, 2.1, 1.00}, + {"arctan", atan, atan, 2.3, 1.00}, + {"sinh", sinh, sinh, 1.55, 1.00}, + {"cosh", cosh, cosh, 2.48, 1.00}, + {"tanh", tanh, tanh, 1.38, 2.00}, + {"arcsinh", asinh, asinh, 1.01, 1.00}, + {"arccosh", acosh, acosh, 1.16, 1.00}, + {"arctanh", atanh, atanh, 1.45, 1.00}, + {"cbrt", cbrt, cbrt, 1.94, 2.00}, + //{"exp",exp,exp,3.76,1.00}, + {"exp2", exp2, exp2, 1.01, 1.00}, + {"expm1", expm1, expm1, 2.62, 1.00}, + //{"log",log,log,1.84,1.00}, + {"log10", log10, log10, 3.5, 1.00}, + {"log1p", log1p, log1p, 1.96, 1.0}, + {"log2", log2, log2, 2.12, 1.00}, + }; + + for (int ii = 0; ii < umathfunc.size(); ++ii) { + // ignore sin/cos + if ((umathfunc[ii].name != "sin") && (umathfunc[ii].name != "cos")) { + std::string fileName = + "umath-validation-set-" + umathfunc[ii].name + ".csv"; + std::ofstream txtOut; + txtOut.open(fileName, std::ofstream::trunc); + txtOut << "dtype,input,output,ulperrortol" << std::endl; + + // Single Precision + auto f32in = generate_input_vector(umathfunc[ii].name); + auto f32out = computeTrueVal(f32in, + umathfunc[ii].f32func); + for (int jj = 0; jj < f32in.size(); ++jj) { + txtOut << "np.float32" << std::hex << ",0x" + << *reinterpret_cast(&f32in[jj]) << ",0x" + << *reinterpret_cast(&f32out[jj]) << "," + << ceil(umathfunc[ii].f32ulp) << std::endl; + } + + // Double Precision + auto f64in = generate_input_vector(umathfunc[ii].name); + auto f64out = computeTrueVal( + f64in, umathfunc[ii].f64func); + for (int jj = 0; jj < f64in.size(); ++jj) { + txtOut << "np.float64" << std::hex << ",0x" + << *reinterpret_cast(&f64in[jj]) << ",0x" + << *reinterpret_cast(&f64out[jj]) << "," + << ceil(umathfunc[ii].f64ulp) << std::endl; + } + txtOut.close(); + } + } + return 0; +} diff --git a/local_packages/numpy/_core/tests/data/recarray_from_file.fits b/local_packages/numpy/_core/tests/data/recarray_from_file.fits new file mode 100644 index 0000000..ca48ee8 Binary files /dev/null and b/local_packages/numpy/_core/tests/data/recarray_from_file.fits differ diff --git a/local_packages/numpy/_core/tests/data/umath-validation-set-README.txt b/local_packages/numpy/_core/tests/data/umath-validation-set-README.txt new file mode 100644 index 0000000..cfc9e41 --- /dev/null +++ b/local_packages/numpy/_core/tests/data/umath-validation-set-README.txt @@ -0,0 +1,15 @@ +Steps to validate transcendental functions: +1) Add a file 'umath-validation-set-.txt', where ufuncname is name of + the function in NumPy you want to validate +2) The file should contain 4 columns: dtype,input,expected output,ulperror + a. dtype: one of np.float16, np.float32, np.float64 + b. input: floating point input to ufunc in hex. Example: 0x414570a4 + represents 12.340000152587890625 + c. expected output: floating point output for the corresponding input in hex. + This should be computed using a high(er) precision library and then rounded to + same format as the input. + d. ulperror: expected maximum ulp error of the function. This + should be same across all rows of the same dtype. Otherwise, the function is + tested for the maximum ulp error among all entries of that dtype. +3) Add file umath-validation-set-.txt to the test file test_umath_accuracy.py + which will then validate your ufunc. diff --git a/local_packages/numpy/_core/tests/data/umath-validation-set-arccos.csv b/local_packages/numpy/_core/tests/data/umath-validation-set-arccos.csv new file mode 100644 index 0000000..82c8595 --- /dev/null +++ b/local_packages/numpy/_core/tests/data/umath-validation-set-arccos.csv @@ -0,0 +1,1429 @@ +dtype,input,output,ulperrortol +np.float32,0xbddd7f50,0x3fd6eec2,3 +np.float32,0xbe32a20c,0x3fdf8182,3 +np.float32,0xbf607c09,0x4028f84f,3 +np.float32,0x3f25d906,0x3f5db544,3 +np.float32,0x3f01cec8,0x3f84febf,3 +np.float32,0x3f1d5c6e,0x3f68a735,3 +np.float32,0xbf0cab89,0x4009c36d,3 +np.float32,0xbf176b40,0x400d0941,3 +np.float32,0x3f3248b2,0x3f4ce6d4,3 +np.float32,0x3f390b48,0x3f434e0d,3 +np.float32,0xbe261698,0x3fddea43,3 +np.float32,0x3f0e1154,0x3f7b848b,3 +np.float32,0xbf379a3c,0x4017b764,3 +np.float32,0xbeda6f2c,0x4000bd62,3 +np.float32,0xbf6a0c3f,0x402e5d5a,3 +np.float32,0x3ef1d700,0x3f8a17b7,3 +np.float32,0xbf6f4f65,0x4031d30d,3 +np.float32,0x3f2c9eee,0x3f54adfd,3 +np.float32,0x3f3cfb18,0x3f3d8a1e,3 +np.float32,0x3ba80800,0x3fc867d2,3 +np.float32,0x3e723b08,0x3faa7e4d,3 +np.float32,0xbf65820f,0x402bb054,3 +np.float32,0xbee64e7a,0x40026410,3 +np.float32,0x3cb15140,0x3fc64a87,3 +np.float32,0x3f193660,0x3f6ddf2a,3 +np.float32,0xbf0e5b52,0x400a44f7,3 +np.float32,0x3ed55f14,0x3f920a4b,3 +np.float32,0x3dd11a80,0x3fbbf85c,3 +np.float32,0xbf4f5c4b,0x4020f4f9,3 +np.float32,0x3f787532,0x3e792e87,3 +np.float32,0x3f40e6ac,0x3f37a74f,3 +np.float32,0x3f1c1318,0x3f6a47b6,3 +np.float32,0xbe3c48d8,0x3fe0bb70,3 +np.float32,0xbe94d4bc,0x3feed08e,3 +np.float32,0xbe5c3688,0x3fe4ce26,3 +np.float32,0xbf6fe026,0x403239cb,3 +np.float32,0x3ea5983c,0x3f9ee7bf,3 +np.float32,0x3f1471e6,0x3f73c5bb,3 +np.float32,0x3f0e2622,0x3f7b6b87,3 +np.float32,0xbf597180,0x40257ad1,3 +np.float32,0xbeb5321c,0x3ff75d34,3 +np.float32,0x3f5afcd2,0x3f0b6012,3 +np.float32,0xbef2ff88,0x40042e14,3 +np.float32,0xbedc747e,0x400104f5,3 +np.float32,0xbee0c2f4,0x40019dfc,3 +np.float32,0xbf152cd8,0x400c57dc,3 +np.float32,0xbf6cf9e2,0x40303bbe,3 +np.float32,0x3ed9cd74,0x3f90d1a1,3 +np.float32,0xbf754406,0x4036767f,3 +np.float32,0x3f59c5c2,0x3f0db42f,3 +np.float32,0x3f2eefd8,0x3f518684,3 +np.float32,0xbf156bf9,0x400c6b49,3 +np.float32,0xbd550790,0x3fcfb8dc,3 +np.float32,0x3ede58fc,0x3f8f8f77,3 +np.float32,0xbf00ac19,0x40063c4b,3 +np.float32,0x3f4d25ba,0x3f24280e,3 +np.float32,0xbe9568be,0x3feef73c,3 +np.float32,0x3f67d154,0x3ee05547,3 +np.float32,0x3f617226,0x3efcb4f4,3 +np.float32,0xbf3ab41a,0x4018d6cc,3 +np.float32,0xbf3186fe,0x401592cd,3 +np.float32,0x3de3ba50,0x3fbacca9,3 +np.float32,0x3e789f98,0x3fa9ab97,3 +np.float32,0x3f016e08,0x3f8536d8,3 +np.float32,0x3e8b618c,0x3fa5c571,3 +np.float32,0x3eff97bc,0x3f8628a9,3 +np.float32,0xbf6729f0,0x402ca32f,3 +np.float32,0xbebec146,0x3ff9eddc,3 +np.float32,0x3ddb2e60,0x3fbb563a,3 +np.float32,0x3caa8e40,0x3fc66595,3 +np.float32,0xbf5973f2,0x40257bfa,3 +np.float32,0xbdd82c70,0x3fd69916,3 +np.float32,0xbedf4c82,0x400169ef,3 +np.float32,0x3ef8f22c,0x3f881184,3 +np.float32,0xbf1d74d4,0x400eedc9,3 +np.float32,0x3f2e10a6,0x3f52b790,3 +np.float32,0xbf08ecc0,0x4008a628,3 +np.float32,0x3ecb7db4,0x3f94be9f,3 +np.float32,0xbf052ded,0x40078bfc,3 +np.float32,0x3f2ee78a,0x3f5191e4,3 +np.float32,0xbf56f4e1,0x40245194,3 +np.float32,0x3f600a3e,0x3f014a25,3 +np.float32,0x3f3836f8,0x3f44808b,3 +np.float32,0x3ecabfbc,0x3f94f25c,3 +np.float32,0x3c70f500,0x3fc72dec,3 +np.float32,0x3f17c444,0x3f6fabf0,3 +np.float32,0xbf4c22a5,0x401f9a09,3 +np.float32,0xbe4205dc,0x3fe1765a,3 +np.float32,0x3ea49138,0x3f9f2d36,3 +np.float32,0xbece0082,0x3ffe106b,3 +np.float32,0xbe387578,0x3fe03eef,3 +np.float32,0xbf2b6466,0x40137a30,3 +np.float32,0xbe9dadb2,0x3ff12204,3 +np.float32,0xbf56b3f2,0x402433bb,3 +np.float32,0xbdf9b4d8,0x3fd8b51f,3 +np.float32,0x3f58a596,0x3f0fd4b4,3 +np.float32,0xbedf5748,0x40016b6e,3 +np.float32,0x3f446442,0x3f32476f,3 +np.float32,0x3f5be886,0x3f099658,3 +np.float32,0x3ea1e44c,0x3f9fe1de,3 +np.float32,0xbf11e9b8,0x400b585f,3 +np.float32,0xbf231f8f,0x4010befb,3 +np.float32,0xbf4395ea,0x401c2dd0,3 +np.float32,0x3e9e7784,0x3fa0c8a6,3 +np.float32,0xbe255184,0x3fddd14c,3 +np.float32,0x3f70d25e,0x3eb13148,3 +np.float32,0x3f220cdc,0x3f62a722,3 +np.float32,0xbd027bf0,0x3fcd23e7,3 +np.float32,0x3e4ef8b8,0x3faf02d2,3 +np.float32,0xbf76fc6b,0x40380728,3 +np.float32,0xbf57e761,0x4024c1cd,3 +np.float32,0x3ed4fc20,0x3f922580,3 +np.float32,0xbf09b64a,0x4008e1db,3 +np.float32,0x3f21ca62,0x3f62fcf5,3 +np.float32,0xbe55f610,0x3fe40170,3 +np.float32,0xbc0def80,0x3fca2bbb,3 +np.float32,0xbebc8764,0x3ff9547b,3 +np.float32,0x3ec1b200,0x3f9766d1,3 +np.float32,0xbf4ee44e,0x4020c1ee,3 +np.float32,0xbea85852,0x3ff3f22a,3 +np.float32,0xbf195c0c,0x400da3d3,3 +np.float32,0xbf754b5d,0x40367ce8,3 +np.float32,0xbdcbfe50,0x3fd5d52b,3 +np.float32,0xbf1adb87,0x400e1be3,3 +np.float32,0xbf6f8491,0x4031f898,3 +np.float32,0xbf6f9ae7,0x4032086e,3 +np.float32,0xbf52b3f0,0x40226790,3 +np.float32,0xbf698452,0x402e09f4,3 +np.float32,0xbf43dc9a,0x401c493a,3 +np.float32,0xbf165f7f,0x400cb664,3 +np.float32,0x3e635468,0x3fac682f,3 +np.float32,0xbe8cf2b6,0x3fecc28a,3 +np.float32,0x7f7fffff,0x7fc00000,3 +np.float32,0xbf4c6513,0x401fb597,3 +np.float32,0xbf02b8f8,0x4006d47e,3 +np.float32,0x3ed3759c,0x3f9290c8,3 +np.float32,0xbf2a7a5f,0x40132b98,3 +np.float32,0xbae65000,0x3fc9496f,3 +np.float32,0x3f65f5ea,0x3ee8ef07,3 +np.float32,0xbe7712fc,0x3fe84106,3 +np.float32,0xbb9ff700,0x3fc9afd2,3 +np.float32,0x3d8d87a0,0x3fc03592,3 +np.float32,0xbefc921c,0x40058c23,3 +np.float32,0xbf286566,0x401279d8,3 +np.float32,0x3f53857e,0x3f192eaf,3 +np.float32,0xbee9b0f4,0x4002dd90,3 +np.float32,0x3f4041f8,0x3f38a14a,3 +np.float32,0x3f54ea96,0x3f16b02d,3 +np.float32,0x3ea50ef8,0x3f9f0c01,3 +np.float32,0xbeaad2dc,0x3ff49a4a,3 +np.float32,0xbec428c8,0x3ffb636f,3 +np.float32,0xbda46178,0x3fd358c7,3 +np.float32,0xbefacfc4,0x40054b7f,3 +np.float32,0xbf7068f9,0x40329c85,3 +np.float32,0x3f70b850,0x3eb1caa7,3 +np.float32,0x7fa00000,0x7fe00000,3 +np.float32,0x80000000,0x3fc90fdb,3 +np.float32,0x3f68d5c8,0x3edb7cf3,3 +np.float32,0x3d9443d0,0x3fbfc98a,3 +np.float32,0xff7fffff,0x7fc00000,3 +np.float32,0xbeee7ba8,0x40038a5e,3 +np.float32,0xbf0aaaba,0x40092a73,3 +np.float32,0x3f36a4e8,0x3f46c0ee,3 +np.float32,0x3ed268e4,0x3f92da82,3 +np.float32,0xbee6002c,0x4002591b,3 +np.float32,0xbe8f2752,0x3fed5576,3 +np.float32,0x3f525912,0x3f1b40e0,3 +np.float32,0xbe8e151e,0x3fed0e16,3 +np.float32,0x1,0x3fc90fdb,3 +np.float32,0x3ee23b84,0x3f8e7ae1,3 +np.float32,0xbf5961ca,0x40257361,3 +np.float32,0x3f6bbca0,0x3ecd14cd,3 +np.float32,0x3e27b230,0x3fb4014d,3 +np.float32,0xbf183bb8,0x400d49fc,3 +np.float32,0x3f57759c,0x3f120b68,3 +np.float32,0xbd6994c0,0x3fd05d84,3 +np.float32,0xbf1dd684,0x400f0cc8,3 +np.float32,0xbececc1c,0x3ffe480a,3 +np.float32,0xbf48855f,0x401e206d,3 +np.float32,0x3f28c922,0x3f59d382,3 +np.float32,0xbf65c094,0x402bd3b0,3 +np.float32,0x3f657d42,0x3eeb11dd,3 +np.float32,0xbed32d4e,0x3fff7b15,3 +np.float32,0xbf31af02,0x4015a0b1,3 +np.float32,0x3d89eb00,0x3fc06f7f,3 +np.float32,0x3dac2830,0x3fbe4a17,3 +np.float32,0x3f7f7cb6,0x3d81a7df,3 +np.float32,0xbedbb570,0x4000ea82,3 +np.float32,0x3db37830,0x3fbdd4a8,3 +np.float32,0xbf376f48,0x4017a7fd,3 +np.float32,0x3f319f12,0x3f4dd2c9,3 +np.float32,0x7fc00000,0x7fc00000,3 +np.float32,0x3f1b4f70,0x3f6b3e31,3 +np.float32,0x3e33c880,0x3fb278d1,3 +np.float32,0x3f2796e0,0x3f5b69bd,3 +np.float32,0x3f4915d6,0x3f2ad4d0,3 +np.float32,0x3e4db120,0x3faf2ca0,3 +np.float32,0x3ef03dd4,0x3f8a8ba9,3 +np.float32,0x3e96ca88,0x3fa2cbf7,3 +np.float32,0xbeb136ce,0x3ff64d2b,3 +np.float32,0xbf2f3938,0x4014c75e,3 +np.float32,0x3f769dde,0x3e8b0d76,3 +np.float32,0x3f67cec8,0x3ee06148,3 +np.float32,0x3f0a1ade,0x3f80204e,3 +np.float32,0x3e4b9718,0x3faf7144,3 +np.float32,0x3cccb480,0x3fc5dcf3,3 +np.float32,0x3caeb740,0x3fc654f0,3 +np.float32,0x3f684e0e,0x3ede0678,3 +np.float32,0x3f0ba93c,0x3f7e6663,3 +np.float32,0xbf12bbc4,0x400b985e,3 +np.float32,0xbf2a8e1a,0x40133235,3 +np.float32,0x3f42029c,0x3f35f5c5,3 +np.float32,0x3eed1728,0x3f8b6f9c,3 +np.float32,0xbe5779ac,0x3fe432fd,3 +np.float32,0x3f6ed8b8,0x3ebc7e4b,3 +np.float32,0x3eea25b0,0x3f8c43c7,3 +np.float32,0x3f1988a4,0x3f6d786b,3 +np.float32,0xbe751674,0x3fe7ff8a,3 +np.float32,0xbe9f7418,0x3ff1997d,3 +np.float32,0x3dca11d0,0x3fbc6979,3 +np.float32,0x3f795226,0x3e6a6cab,3 +np.float32,0xbea780e0,0x3ff3b926,3 +np.float32,0xbed92770,0x4000901e,3 +np.float32,0xbf3e9f8c,0x401a49f8,3 +np.float32,0x3f0f7054,0x3f79ddb2,3 +np.float32,0x3a99d400,0x3fc8e966,3 +np.float32,0xbef082b0,0x4003d3c6,3 +np.float32,0xbf0d0790,0x4009defb,3 +np.float32,0xbf1649da,0x400cafb4,3 +np.float32,0xbea5aca8,0x3ff33d5c,3 +np.float32,0xbf4e1843,0x40206ba1,3 +np.float32,0xbe3d7d5c,0x3fe0e2ad,3 +np.float32,0xbf0e802d,0x400a500e,3 +np.float32,0xbf0de8f0,0x400a2295,3 +np.float32,0xbf3016ba,0x4015137e,3 +np.float32,0x3f36b1ea,0x3f46ae5d,3 +np.float32,0xbd27f170,0x3fce4fc7,3 +np.float32,0x3e96ec54,0x3fa2c31f,3 +np.float32,0x3eb4dfdc,0x3f9ad87d,3 +np.float32,0x3f5cac6c,0x3f0815cc,3 +np.float32,0xbf0489aa,0x40075bf1,3 +np.float32,0x3df010c0,0x3fba05f5,3 +np.float32,0xbf229f4a,0x4010956a,3 +np.float32,0x3f75e474,0x3e905a99,3 +np.float32,0xbcece6a0,0x3fccc397,3 +np.float32,0xbdb41528,0x3fd454e7,3 +np.float32,0x3ec8b2f8,0x3f958118,3 +np.float32,0x3f5eaa70,0x3f041a1d,3 +np.float32,0xbf32e1cc,0x40160b91,3 +np.float32,0xbe8e6026,0x3fed219c,3 +np.float32,0x3e6b3160,0x3fab65e3,3 +np.float32,0x3e6d7460,0x3fab1b81,3 +np.float32,0xbf13fbde,0x400bfa3b,3 +np.float32,0xbe8235ec,0x3fe9f9e3,3 +np.float32,0x3d71c4a0,0x3fc18096,3 +np.float32,0x3eb769d0,0x3f9a2aa0,3 +np.float32,0xbf68cb3b,0x402d99e4,3 +np.float32,0xbd917610,0x3fd22932,3 +np.float32,0x3d3cba60,0x3fc3297f,3 +np.float32,0xbf383cbe,0x4017f1cc,3 +np.float32,0xbeee96d0,0x40038e34,3 +np.float32,0x3ec89cb4,0x3f958725,3 +np.float32,0x3ebf92d8,0x3f97f95f,3 +np.float32,0x3f30f3da,0x3f4ec021,3 +np.float32,0xbd26b560,0x3fce45e4,3 +np.float32,0xbec0eb12,0x3ffa8330,3 +np.float32,0x3f6d592a,0x3ec4a6c1,3 +np.float32,0x3ea6d39c,0x3f9e9463,3 +np.float32,0x3e884184,0x3fa6951e,3 +np.float32,0x3ea566c4,0x3f9ef4d1,3 +np.float32,0x3f0c8f4c,0x3f7d5380,3 +np.float32,0x3f28e1ba,0x3f59b2cb,3 +np.float32,0x3f798538,0x3e66e1c3,3 +np.float32,0xbe2889b8,0x3fde39b8,3 +np.float32,0x3f3da05e,0x3f3c949c,3 +np.float32,0x3f24d700,0x3f5f073e,3 +np.float32,0xbe5b5768,0x3fe4b198,3 +np.float32,0xbed3b03a,0x3fff9f05,3 +np.float32,0x3e8a1c4c,0x3fa619eb,3 +np.float32,0xbf075d24,0x40083030,3 +np.float32,0x3f765648,0x3e8d1f52,3 +np.float32,0xbf70fc5e,0x403308bb,3 +np.float32,0x3f557ae8,0x3f15ab76,3 +np.float32,0x3f02f7ea,0x3f84521c,3 +np.float32,0x3f7ebbde,0x3dcbc5c5,3 +np.float32,0xbefbdfc6,0x40057285,3 +np.float32,0x3ec687ac,0x3f9617d9,3 +np.float32,0x3e4831c8,0x3fafe01b,3 +np.float32,0x3e25cde0,0x3fb43ea8,3 +np.float32,0x3e4f2ab8,0x3faefc70,3 +np.float32,0x3ea60ae4,0x3f9ec973,3 +np.float32,0xbf1ed55f,0x400f5dde,3 +np.float32,0xbf5ad4aa,0x40262479,3 +np.float32,0x3e8b3594,0x3fa5d0de,3 +np.float32,0x3f3a77aa,0x3f413c80,3 +np.float32,0xbf07512b,0x40082ca9,3 +np.float32,0x3f33d990,0x3f4ab5e5,3 +np.float32,0x3f521556,0x3f1bb78f,3 +np.float32,0xbecf6036,0x3ffe7086,3 +np.float32,0x3db91bd0,0x3fbd7a11,3 +np.float32,0x3ef63a74,0x3f88d839,3 +np.float32,0xbf2f1116,0x4014b99c,3 +np.float32,0xbf17fdc0,0x400d36b9,3 +np.float32,0xbe87df2c,0x3feb7117,3 +np.float32,0x80800000,0x3fc90fdb,3 +np.float32,0x3ee24c1c,0x3f8e7641,3 +np.float32,0x3f688dce,0x3edcd644,3 +np.float32,0xbf0f4e1c,0x400a8e1b,3 +np.float32,0x0,0x3fc90fdb,3 +np.float32,0x3f786eba,0x3e7999d4,3 +np.float32,0xbf404f80,0x401aeca8,3 +np.float32,0xbe9ffb6a,0x3ff1bd18,3 +np.float32,0x3f146bfc,0x3f73ccfd,3 +np.float32,0xbe47d630,0x3fe233ee,3 +np.float32,0xbe95847c,0x3feefe7c,3 +np.float32,0xbf135df0,0x400bc9e5,3 +np.float32,0x3ea19f3c,0x3f9ff411,3 +np.float32,0x3f235e20,0x3f60f247,3 +np.float32,0xbec789ec,0x3ffc4def,3 +np.float32,0x3f04b656,0x3f834db6,3 +np.float32,0x3dfaf440,0x3fb95679,3 +np.float32,0xbe4a7f28,0x3fe28abe,3 +np.float32,0x3ed4850c,0x3f92463b,3 +np.float32,0x3ec4ba5c,0x3f9694dd,3 +np.float32,0xbce24ca0,0x3fcc992b,3 +np.float32,0xbf5b7c6e,0x402675a0,3 +np.float32,0xbea3ce2a,0x3ff2bf04,3 +np.float32,0x3db02c60,0x3fbe0998,3 +np.float32,0x3c47b780,0x3fc78069,3 +np.float32,0x3ed33b20,0x3f92a0d5,3 +np.float32,0xbf4556d7,0x401cdcde,3 +np.float32,0xbe1b6e28,0x3fdc90ec,3 +np.float32,0xbf3289b7,0x4015ecd0,3 +np.float32,0x3df3f240,0x3fb9c76d,3 +np.float32,0x3eefa7d0,0x3f8ab61d,3 +np.float32,0xbe945838,0x3feeb006,3 +np.float32,0xbf0b1386,0x400949a3,3 +np.float32,0x3f77e546,0x3e812cc1,3 +np.float32,0x3e804ba0,0x3fa8a480,3 +np.float32,0x3f43dcea,0x3f331a06,3 +np.float32,0x3eb87450,0x3f99e33c,3 +np.float32,0x3e5f4898,0x3facecea,3 +np.float32,0x3f646640,0x3eeff10e,3 +np.float32,0x3f1aa832,0x3f6c1051,3 +np.float32,0xbebf6bfa,0x3ffa1bdc,3 +np.float32,0xbb77f300,0x3fc98bd4,3 +np.float32,0x3f3587fe,0x3f485645,3 +np.float32,0x3ef85f34,0x3f883b8c,3 +np.float32,0x3f50e584,0x3f1dc82c,3 +np.float32,0x3f1d30a8,0x3f68deb0,3 +np.float32,0x3ee75a78,0x3f8d0c86,3 +np.float32,0x3f2c023a,0x3f5581e1,3 +np.float32,0xbf074e34,0x40082bca,3 +np.float32,0xbead71f0,0x3ff54c6d,3 +np.float32,0xbf39ed88,0x40188e69,3 +np.float32,0x3f5d2fe6,0x3f07118b,3 +np.float32,0xbf1f79f8,0x400f9267,3 +np.float32,0x3e900c58,0x3fa48e99,3 +np.float32,0xbf759cb2,0x4036c47b,3 +np.float32,0x3f63329c,0x3ef5359c,3 +np.float32,0xbf5d6755,0x40276709,3 +np.float32,0x3f2ce31c,0x3f54519a,3 +np.float32,0x7f800000,0x7fc00000,3 +np.float32,0x3f1bf50e,0x3f6a6d9a,3 +np.float32,0x3f258334,0x3f5e25d8,3 +np.float32,0xbf661a3f,0x402c06ac,3 +np.float32,0x3d1654c0,0x3fc45cef,3 +np.float32,0xbef14a36,0x4003f009,3 +np.float32,0xbf356051,0x4016ec3a,3 +np.float32,0x3f6ccc42,0x3ec79193,3 +np.float32,0xbf2fe3d6,0x401501f9,3 +np.float32,0x3deedc80,0x3fba195b,3 +np.float32,0x3f2e5a28,0x3f52533e,3 +np.float32,0x3e6b68b8,0x3fab5ec8,3 +np.float32,0x3e458240,0x3fb037b7,3 +np.float32,0xbf24bab0,0x401144cb,3 +np.float32,0x3f600f4c,0x3f013fb2,3 +np.float32,0x3f021a04,0x3f84d316,3 +np.float32,0x3f741732,0x3e9cc948,3 +np.float32,0x3f0788aa,0x3f81a5b0,3 +np.float32,0x3f28802c,0x3f5a347c,3 +np.float32,0x3c9eb400,0x3fc69500,3 +np.float32,0x3e5d11e8,0x3fad357a,3 +np.float32,0x3d921250,0x3fbfecb9,3 +np.float32,0x3f354866,0x3f48b066,3 +np.float32,0xbf72cf43,0x40346d84,3 +np.float32,0x3eecdbb8,0x3f8b805f,3 +np.float32,0xbee585d0,0x400247fd,3 +np.float32,0x3e3607a8,0x3fb22fc6,3 +np.float32,0xbf0cb7d6,0x4009c71c,3 +np.float32,0xbf56b230,0x402432ec,3 +np.float32,0xbf4ced02,0x401fee29,3 +np.float32,0xbf3a325c,0x4018a776,3 +np.float32,0x3ecae8bc,0x3f94e732,3 +np.float32,0xbe48c7e8,0x3fe252bd,3 +np.float32,0xbe175d7c,0x3fdc0d5b,3 +np.float32,0x3ea78dac,0x3f9e632d,3 +np.float32,0xbe7434a8,0x3fe7e279,3 +np.float32,0x3f1f9e02,0x3f65c7b9,3 +np.float32,0xbe150f2c,0x3fdbc2c2,3 +np.float32,0x3ee13480,0x3f8ec423,3 +np.float32,0x3ecb7d54,0x3f94beb9,3 +np.float32,0x3f1cef42,0x3f693181,3 +np.float32,0xbf1ec06a,0x400f5730,3 +np.float32,0xbe112acc,0x3fdb44e8,3 +np.float32,0xbe77b024,0x3fe85545,3 +np.float32,0x3ec86fe0,0x3f959353,3 +np.float32,0x3f36b326,0x3f46ac9a,3 +np.float32,0x3e581a70,0x3fadd829,3 +np.float32,0xbf032c0c,0x4006f5f9,3 +np.float32,0xbf43b1fd,0x401c38b1,3 +np.float32,0x3f3701b4,0x3f463c5c,3 +np.float32,0x3f1a995a,0x3f6c22f1,3 +np.float32,0xbf05de0b,0x4007bf97,3 +np.float32,0x3d4bd960,0x3fc2b063,3 +np.float32,0x3f0e1618,0x3f7b7ed0,3 +np.float32,0x3edfd420,0x3f8f2628,3 +np.float32,0xbf6662fe,0x402c3047,3 +np.float32,0x3ec0690c,0x3f97bf9b,3 +np.float32,0xbeaf4146,0x3ff5c7a0,3 +np.float32,0x3f5e7764,0x3f04816d,3 +np.float32,0xbedd192c,0x40011bc5,3 +np.float32,0x3eb76350,0x3f9a2c5e,3 +np.float32,0xbed8108c,0x400069a5,3 +np.float32,0xbe59f31c,0x3fe48401,3 +np.float32,0xbea3e1e6,0x3ff2c439,3 +np.float32,0x3e26d1f8,0x3fb41db5,3 +np.float32,0x3f3a0a7c,0x3f41dba5,3 +np.float32,0x3ebae068,0x3f993ce4,3 +np.float32,0x3f2d8e30,0x3f536942,3 +np.float32,0xbe838bbe,0x3fea5247,3 +np.float32,0x3ebe4420,0x3f98538f,3 +np.float32,0xbcc59b80,0x3fcc265c,3 +np.float32,0x3eebb5c8,0x3f8bd334,3 +np.float32,0xbafc3400,0x3fc94ee8,3 +np.float32,0xbf63ddc1,0x402ac683,3 +np.float32,0xbeabdf80,0x3ff4e18f,3 +np.float32,0x3ea863f0,0x3f9e2a78,3 +np.float32,0x3f45b292,0x3f303bc1,3 +np.float32,0xbe68aa60,0x3fe666bf,3 +np.float32,0x3eb9de18,0x3f998239,3 +np.float32,0xbf719d85,0x4033815e,3 +np.float32,0x3edef9a8,0x3f8f62db,3 +np.float32,0xbd7781c0,0x3fd0cd1e,3 +np.float32,0x3f0b3b90,0x3f7ee92a,3 +np.float32,0xbe3eb3b4,0x3fe10a27,3 +np.float32,0xbf31a4c4,0x40159d23,3 +np.float32,0x3e929434,0x3fa3e5b0,3 +np.float32,0xbeb1a90e,0x3ff66b9e,3 +np.float32,0xbeba9b5e,0x3ff8d048,3 +np.float32,0xbf272a84,0x4012119e,3 +np.float32,0x3f1ebbd0,0x3f66e889,3 +np.float32,0x3ed3cdc8,0x3f927893,3 +np.float32,0xbf50dfce,0x40219b58,3 +np.float32,0x3f0c02de,0x3f7dfb62,3 +np.float32,0xbf694de3,0x402de8d2,3 +np.float32,0xbeaeb13e,0x3ff5a14f,3 +np.float32,0xbf61aa7a,0x40299702,3 +np.float32,0xbf13d159,0x400bed35,3 +np.float32,0xbeecd034,0x40034e0b,3 +np.float32,0xbe50c2e8,0x3fe35761,3 +np.float32,0x3f714406,0x3eae8e57,3 +np.float32,0xbf1ca486,0x400eabd8,3 +np.float32,0x3f5858cc,0x3f106497,3 +np.float32,0x3f670288,0x3ee41c84,3 +np.float32,0xbf20bd2c,0x400ff9f5,3 +np.float32,0xbe29afd8,0x3fde5eff,3 +np.float32,0xbf635e6a,0x402a80f3,3 +np.float32,0x3e82b7b0,0x3fa80446,3 +np.float32,0x3e982e7c,0x3fa26ece,3 +np.float32,0x3d9f0e00,0x3fbf1c6a,3 +np.float32,0x3e8299b4,0x3fa80c07,3 +np.float32,0xbf0529c1,0x40078ac3,3 +np.float32,0xbf403b8a,0x401ae519,3 +np.float32,0xbe57e09c,0x3fe44027,3 +np.float32,0x3ea1c8f4,0x3f9fe913,3 +np.float32,0xbe216a94,0x3fdd52d0,3 +np.float32,0x3f59c442,0x3f0db709,3 +np.float32,0xbd636260,0x3fd02bdd,3 +np.float32,0xbdbbc788,0x3fd4d08d,3 +np.float32,0x3dd19560,0x3fbbf0a3,3 +np.float32,0x3f060ad4,0x3f828641,3 +np.float32,0x3b102e00,0x3fc8c7c4,3 +np.float32,0x3f42b3b8,0x3f34e5a6,3 +np.float32,0x3f0255ac,0x3f84b071,3 +np.float32,0xbf014898,0x40066996,3 +np.float32,0x3e004dc0,0x3fb8fb51,3 +np.float32,0xbf594ff8,0x40256af2,3 +np.float32,0x3efafddc,0x3f877b80,3 +np.float32,0xbf5f0780,0x40283899,3 +np.float32,0x3ee95e54,0x3f8c7bcc,3 +np.float32,0x3eba2f0c,0x3f996c80,3 +np.float32,0x3f37721c,0x3f459b68,3 +np.float32,0x3e2be780,0x3fb378bf,3 +np.float32,0x3e550270,0x3fae3d69,3 +np.float32,0x3e0f9500,0x3fb70e0a,3 +np.float32,0xbf51974a,0x4021eaf4,3 +np.float32,0x3f393832,0x3f430d05,3 +np.float32,0x3f3df16a,0x3f3c1bd8,3 +np.float32,0xbd662340,0x3fd041ed,3 +np.float32,0x3f7e8418,0x3ddc9fce,3 +np.float32,0xbf392734,0x40184672,3 +np.float32,0x3ee3b278,0x3f8e124e,3 +np.float32,0x3eed4808,0x3f8b61d2,3 +np.float32,0xbf6fccbd,0x40322beb,3 +np.float32,0x3e3ecdd0,0x3fb1123b,3 +np.float32,0x3f4419e0,0x3f32bb45,3 +np.float32,0x3f595e00,0x3f0e7914,3 +np.float32,0xbe8c1486,0x3fec88c6,3 +np.float32,0xbf800000,0x40490fdb,3 +np.float32,0xbdaf5020,0x3fd4084d,3 +np.float32,0xbf407660,0x401afb63,3 +np.float32,0x3f0c3aa8,0x3f7db8b8,3 +np.float32,0xbcdb5980,0x3fcc7d5b,3 +np.float32,0x3f4738d4,0x3f2dd1ed,3 +np.float32,0x3f4d7064,0x3f23ab14,3 +np.float32,0xbeb1d576,0x3ff67774,3 +np.float32,0xbf507166,0x40216bb3,3 +np.float32,0x3e86484c,0x3fa71813,3 +np.float32,0x3f09123e,0x3f80bd35,3 +np.float32,0xbe9abe0e,0x3ff05cb2,3 +np.float32,0x3f3019dc,0x3f4fed21,3 +np.float32,0xbe99e00e,0x3ff0227d,3 +np.float32,0xbf155ec5,0x400c6739,3 +np.float32,0x3f5857ba,0x3f106698,3 +np.float32,0x3edf619c,0x3f8f45fb,3 +np.float32,0xbf5ab76a,0x40261664,3 +np.float32,0x3e54b5a8,0x3fae4738,3 +np.float32,0xbee92772,0x4002ca40,3 +np.float32,0x3f2fd610,0x3f504a7a,3 +np.float32,0xbf38521c,0x4017f97e,3 +np.float32,0xff800000,0x7fc00000,3 +np.float32,0x3e2da348,0x3fb34077,3 +np.float32,0x3f2f85fa,0x3f50b894,3 +np.float32,0x3e88f9c8,0x3fa66551,3 +np.float32,0xbf61e570,0x4029b648,3 +np.float32,0xbeab362c,0x3ff4b4a1,3 +np.float32,0x3ec6c310,0x3f9607bd,3 +np.float32,0x3f0d7bda,0x3f7c3810,3 +np.float32,0xbeba5d36,0x3ff8bf99,3 +np.float32,0x3f4b0554,0x3f27adda,3 +np.float32,0x3f60f5dc,0x3efebfb3,3 +np.float32,0x3f36ce2c,0x3f468603,3 +np.float32,0xbe70afac,0x3fe76e8e,3 +np.float32,0x3f673350,0x3ee339b5,3 +np.float32,0xbe124cf0,0x3fdb698c,3 +np.float32,0xbf1243dc,0x400b73d0,3 +np.float32,0x3f3c8850,0x3f3e3407,3 +np.float32,0x3ea02f24,0x3fa05500,3 +np.float32,0xbeffed34,0x400607db,3 +np.float32,0x3f5c75c2,0x3f08817c,3 +np.float32,0x3f4b2fbe,0x3f27682d,3 +np.float32,0x3ee47c34,0x3f8dd9f9,3 +np.float32,0x3f50d48c,0x3f1de584,3 +np.float32,0x3f12dc5e,0x3f75b628,3 +np.float32,0xbefe7e4a,0x4005d2f4,3 +np.float32,0xbec2e846,0x3ffb0cbc,3 +np.float32,0xbedc3036,0x4000fb80,3 +np.float32,0xbf48aedc,0x401e311f,3 +np.float32,0x3f6e032e,0x3ec11363,3 +np.float32,0xbf60de15,0x40292b72,3 +np.float32,0x3f06585e,0x3f8258ba,3 +np.float32,0x3ef49b98,0x3f894e66,3 +np.float32,0x3cc5fe00,0x3fc5f7cf,3 +np.float32,0xbf7525c5,0x40365c2c,3 +np.float32,0x3f64f9f8,0x3eed5fb2,3 +np.float32,0x3e8849c0,0x3fa692fb,3 +np.float32,0x3e50c878,0x3faec79e,3 +np.float32,0x3ed61530,0x3f91d831,3 +np.float32,0xbf54872e,0x40233724,3 +np.float32,0xbf52ee7f,0x4022815e,3 +np.float32,0xbe708c24,0x3fe769fc,3 +np.float32,0xbf26fc54,0x40120260,3 +np.float32,0x3f226e8a,0x3f6228db,3 +np.float32,0xbef30406,0x40042eb8,3 +np.float32,0x3f5d996c,0x3f063f5f,3 +np.float32,0xbf425f9c,0x401bb618,3 +np.float32,0x3e4bb260,0x3faf6dc9,3 +np.float32,0xbe52d5a4,0x3fe39b29,3 +np.float32,0xbe169cf0,0x3fdbf505,3 +np.float32,0xbedfc422,0x40017a8e,3 +np.float32,0x3d8ffef0,0x3fc00e05,3 +np.float32,0xbf12bdab,0x400b98f2,3 +np.float32,0x3f295d0a,0x3f590e88,3 +np.float32,0x3f49d8e4,0x3f2998aa,3 +np.float32,0xbef914f4,0x40050c12,3 +np.float32,0xbf4ea2b5,0x4020a61e,3 +np.float32,0xbf3a89e5,0x4018c762,3 +np.float32,0x3e8707b4,0x3fa6e67a,3 +np.float32,0x3ac55400,0x3fc8de86,3 +np.float32,0x800000,0x3fc90fdb,3 +np.float32,0xbeb9762c,0x3ff8819b,3 +np.float32,0xbebbe23c,0x3ff92815,3 +np.float32,0xbf598c88,0x402587a1,3 +np.float32,0x3e95d864,0x3fa30b4a,3 +np.float32,0x3f7f6f40,0x3d882486,3 +np.float32,0xbf53658c,0x4022b604,3 +np.float32,0xbf2a35f2,0x401314ad,3 +np.float32,0x3eb14380,0x3f9bcf28,3 +np.float32,0x3f0e0c64,0x3f7b8a7a,3 +np.float32,0x3d349920,0x3fc36a9a,3 +np.float32,0xbec2092c,0x3ffad071,3 +np.float32,0xbe1d08e8,0x3fdcc4e0,3 +np.float32,0xbf008968,0x40063243,3 +np.float32,0xbefad582,0x40054c51,3 +np.float32,0xbe52d010,0x3fe39a72,3 +np.float32,0x3f4afdac,0x3f27ba6b,3 +np.float32,0x3f6c483c,0x3eca4408,3 +np.float32,0xbef3cb68,0x40044b0c,3 +np.float32,0x3e94687c,0x3fa36b6f,3 +np.float32,0xbf64ae5c,0x402b39bb,3 +np.float32,0xbf0022b4,0x40061497,3 +np.float32,0x80000001,0x3fc90fdb,3 +np.float32,0x3f25bcd0,0x3f5dda4b,3 +np.float32,0x3ed91b40,0x3f9102d7,3 +np.float32,0x3f800000,0x0,3 +np.float32,0xbebc6aca,0x3ff94cca,3 +np.float32,0x3f239e9a,0x3f609e7d,3 +np.float32,0xbf7312be,0x4034a305,3 +np.float32,0x3efd16d0,0x3f86e148,3 +np.float32,0x3f52753a,0x3f1b0f72,3 +np.float32,0xbde58960,0x3fd7702c,3 +np.float32,0x3ef88580,0x3f883099,3 +np.float32,0x3eebaefc,0x3f8bd51e,3 +np.float32,0x3e877d2c,0x3fa6c807,3 +np.float32,0x3f1a0324,0x3f6cdf32,3 +np.float32,0xbedfe20a,0x40017eb6,3 +np.float32,0x3f205a3c,0x3f64d69d,3 +np.float32,0xbeed5b7c,0x400361b0,3 +np.float32,0xbf69ba10,0x402e2ad0,3 +np.float32,0x3c4fe200,0x3fc77014,3 +np.float32,0x3f043310,0x3f839a69,3 +np.float32,0xbeaf359a,0x3ff5c485,3 +np.float32,0x3db3f110,0x3fbdcd12,3 +np.float32,0x3e24af88,0x3fb462ed,3 +np.float32,0xbf34e858,0x4016c1c8,3 +np.float32,0x3f3334f2,0x3f4b9cd0,3 +np.float32,0xbf145882,0x400c16a2,3 +np.float32,0xbf541c38,0x40230748,3 +np.float32,0x3eba7e10,0x3f99574b,3 +np.float32,0xbe34c6e0,0x3fdfc731,3 +np.float32,0xbe957abe,0x3feefbf0,3 +np.float32,0xbf595a59,0x40256fdb,3 +np.float32,0xbdedc7b8,0x3fd7f4f0,3 +np.float32,0xbf627c02,0x402a06a9,3 +np.float32,0x3f339b78,0x3f4b0d18,3 +np.float32,0xbf2df6d2,0x40145929,3 +np.float32,0x3f617726,0x3efc9fd8,3 +np.float32,0xbee3a8fc,0x40020561,3 +np.float32,0x3efe9f68,0x3f867043,3 +np.float32,0xbf2c3e76,0x4013c3ba,3 +np.float32,0xbf218f28,0x40103d84,3 +np.float32,0xbf1ea847,0x400f4f7f,3 +np.float32,0x3ded9160,0x3fba2e31,3 +np.float32,0x3bce1b00,0x3fc841bf,3 +np.float32,0xbe90566e,0x3feda46a,3 +np.float32,0xbf5ea2ba,0x4028056b,3 +np.float32,0x3f538e62,0x3f191ee6,3 +np.float32,0xbf59e054,0x4025af74,3 +np.float32,0xbe8c98ba,0x3fecab24,3 +np.float32,0x3ee7bdb0,0x3f8cf0b7,3 +np.float32,0xbf2eb828,0x40149b2b,3 +np.float32,0xbe5eb904,0x3fe52068,3 +np.float32,0xbf16b422,0x400cd08d,3 +np.float32,0x3f1ab9b4,0x3f6bfa58,3 +np.float32,0x3dc23040,0x3fbce82a,3 +np.float32,0xbf29d9e7,0x4012f5e5,3 +np.float32,0xbf38f30a,0x40183393,3 +np.float32,0x3e88e798,0x3fa66a09,3 +np.float32,0x3f1d07e6,0x3f69124f,3 +np.float32,0xbe1d3d34,0x3fdccb7e,3 +np.float32,0xbf1715be,0x400ceec2,3 +np.float32,0x3f7a0eac,0x3e5d11f7,3 +np.float32,0xbe764924,0x3fe82707,3 +np.float32,0xbf01a1f8,0x4006837c,3 +np.float32,0x3f2be730,0x3f55a661,3 +np.float32,0xbf7bb070,0x403d4ce5,3 +np.float32,0xbd602110,0x3fd011c9,3 +np.float32,0x3f5d080c,0x3f07609d,3 +np.float32,0xbda20400,0x3fd332d1,3 +np.float32,0x3f1c62da,0x3f69e308,3 +np.float32,0xbf2c6916,0x4013d223,3 +np.float32,0xbf44f8fd,0x401cb816,3 +np.float32,0x3f4da392,0x3f235539,3 +np.float32,0x3e9e8aa0,0x3fa0c3a0,3 +np.float32,0x3e9633c4,0x3fa2f366,3 +np.float32,0xbf0422ab,0x40073ddd,3 +np.float32,0x3f518386,0x3f1cb603,3 +np.float32,0x3f24307a,0x3f5fe096,3 +np.float32,0xbdfb4220,0x3fd8ce24,3 +np.float32,0x3f179d28,0x3f6fdc7d,3 +np.float32,0xbecc2df0,0x3ffd911e,3 +np.float32,0x3f3dff0c,0x3f3c0782,3 +np.float32,0xbf58c4d8,0x4025295b,3 +np.float32,0xbdcf8438,0x3fd60dd3,3 +np.float32,0xbeeaf1b2,0x40030aa7,3 +np.float32,0xbf298a28,0x4012db45,3 +np.float32,0x3f6c4dec,0x3eca2678,3 +np.float32,0x3f4d1ac8,0x3f243a59,3 +np.float32,0x3f62cdfa,0x3ef6e8f8,3 +np.float32,0xbee8acce,0x4002b909,3 +np.float32,0xbd5f2af0,0x3fd00a15,3 +np.float32,0x3f5fde8e,0x3f01a453,3 +np.float32,0x3e95233c,0x3fa33aa4,3 +np.float32,0x3ecd2a60,0x3f9449be,3 +np.float32,0x3f10aa86,0x3f78619d,3 +np.float32,0x3f3888e8,0x3f440a70,3 +np.float32,0x3eeb5bfc,0x3f8bec7d,3 +np.float32,0xbe12d654,0x3fdb7ae6,3 +np.float32,0x3eca3110,0x3f951931,3 +np.float32,0xbe2d1b7c,0x3fdece05,3 +np.float32,0xbf29e9db,0x4012fb3a,3 +np.float32,0xbf0c50b8,0x4009a845,3 +np.float32,0xbed9f0e4,0x4000abef,3 +np.float64,0x3fd078ec5ba0f1d8,0x3ff4f7c00595a4d3,1 +np.float64,0xbfdbc39743b7872e,0x400027f85bce43b2,1 +np.float64,0xbfacd2707c39a4e0,0x3ffa08ae1075d766,1 +np.float64,0xbfc956890f32ad14,0x3ffc52308e7285fd,1 +np.float64,0xbf939c2298273840,0x3ff9706d18e6ea6b,1 +np.float64,0xbfe0d7048961ae09,0x4000fff4406bd395,1 +np.float64,0xbfe9d19b86f3a337,0x4004139bc683a69f,1 +np.float64,0x3fd35c7f90a6b900,0x3ff437220e9123f8,1 +np.float64,0x3fdddca171bbb944,0x3ff15da61e61ec08,1 +np.float64,0x3feb300de9f6601c,0x3fe1c6fadb68cdca,1 +np.float64,0xbfef1815327e302a,0x400739808fc6f964,1 +np.float64,0xbfe332d78e6665af,0x4001b6c4ef922f7c,1 +np.float64,0xbfedbf4dfb7b7e9c,0x40061cefed62a58b,1 +np.float64,0xbfd8dcc7e3b1b990,0x3fff84307713c2c3,1 +np.float64,0xbfedaf161c7b5e2c,0x400612027c1b2b25,1 +np.float64,0xbfed9bde897b37bd,0x4006053f05bd7d26,1 +np.float64,0xbfe081ebc26103d8,0x4000e70755eb66e0,1 +np.float64,0xbfe0366f9c606cdf,0x4000d11212f29afd,1 +np.float64,0xbfc7c115212f822c,0x3ffc1e8c9d58f7db,1 +np.float64,0x3fd8dd9a78b1bb34,0x3ff2bf8d0f4c9376,1 +np.float64,0xbfe54eff466a9dfe,0x4002655950b611f4,1 +np.float64,0xbfe4aad987e955b3,0x40022efb19882518,1 +np.float64,0x3f70231ca0204600,0x3ff911d834e7abf4,1 +np.float64,0x3fede01d047bc03a,0x3fd773cecbd8561b,1 +np.float64,0xbfd6a00d48ad401a,0x3ffee9fd7051633f,1 +np.float64,0x3fd44f3d50a89e7c,0x3ff3f74dd0fc9c91,1 +np.float64,0x3fe540f0d0ea81e2,0x3feb055a7c7d43d6,1 +np.float64,0xbf3ba2e200374800,0x3ff923b582650c6c,1 +np.float64,0x3fe93b2d3f72765a,0x3fe532fa15331072,1 +np.float64,0x3fee8ce5a17d19cc,0x3fd35666eefbe336,1 +np.float64,0x3fe55d5f8feabac0,0x3feadf3dcfe251d4,1 +np.float64,0xbfd1d2ede8a3a5dc,0x3ffda600041ac884,1 +np.float64,0xbfee41186e7c8231,0x40067a625cc6f64d,1 +np.float64,0x3fe521a8b9ea4352,0x3feb2f1a6c8084e5,1 +np.float64,0x3fc65378ef2ca6f0,0x3ff653dfe81ee9f2,1 +np.float64,0x3fdaba0fbcb57420,0x3ff23d630995c6ba,1 +np.float64,0xbfe6b7441d6d6e88,0x4002e182539a2994,1 +np.float64,0x3fda00b6dcb4016c,0x3ff2703d516f28e7,1 +np.float64,0xbfe8699f01f0d33e,0x400382326920ea9e,1 +np.float64,0xbfef5889367eb112,0x4007832af5983793,1 +np.float64,0x3fefb57c8aff6afa,0x3fc14700ab38dcef,1 +np.float64,0xbfda0dfdaab41bfc,0x3fffd75b6fd497f6,1 +np.float64,0xbfb059c36620b388,0x3ffa27c528b97a42,1 +np.float64,0xbfdd450ab1ba8a16,0x40005dcac6ab50fd,1 +np.float64,0xbfe54d6156ea9ac2,0x400264ce9f3f0fb9,1 +np.float64,0xbfe076e94760edd2,0x4000e3d1374884da,1 +np.float64,0xbfc063286720c650,0x3ffb2fd1d6bff0ef,1 +np.float64,0xbfe24680f2e48d02,0x40016ddfbb5bcc0e,1 +np.float64,0xbfdc9351d2b926a4,0x400044e3756fb765,1 +np.float64,0x3fefb173d8ff62e8,0x3fc1bd5626f80850,1 +np.float64,0x3fe77c117a6ef822,0x3fe7e57089bad2ec,1 +np.float64,0xbfddbcebf7bb79d8,0x40006eadb60406b3,1 +np.float64,0xbfecf6625ff9ecc5,0x40059e6c6961a6db,1 +np.float64,0x3fdc8950b8b912a0,0x3ff1bcfb2e27795b,1 +np.float64,0xbfeb2fa517765f4a,0x4004b00aee3e6888,1 +np.float64,0x3fd0efc88da1df90,0x3ff4d8f7cbd8248a,1 +np.float64,0xbfe6641a2becc834,0x4002c43362c1bd0f,1 +np.float64,0xbfe28aec0fe515d8,0x400182c91d4df039,1 +np.float64,0xbfd5ede8d0abdbd2,0x3ffeba7baef05ae8,1 +np.float64,0xbfbd99702a3b32e0,0x3ffafca21c1053f1,1 +np.float64,0x3f96f043f82de080,0x3ff8c6384d5eb610,1 +np.float64,0xbfe5badbc9eb75b8,0x400289c8cd5873d1,1 +np.float64,0x3fe5c6bf95eb8d80,0x3fea5093e9a3e43e,1 +np.float64,0x3fb1955486232ab0,0x3ff8086d4c3e71d5,1 +np.float64,0xbfea145f397428be,0x4004302237a35871,1 +np.float64,0xbfdabe685db57cd0,0x400003e2e29725fb,1 +np.float64,0xbfefc79758ff8f2f,0x400831814e23bfc8,1 +np.float64,0x3fd7edb66cafdb6c,0x3ff3006c5123bfaf,1 +np.float64,0xbfeaf7644bf5eec8,0x400495a7963ce4ed,1 +np.float64,0x3fdf838d78bf071c,0x3ff0e527eed73800,1 +np.float64,0xbfd1a0165ba3402c,0x3ffd98c5ab76d375,1 +np.float64,0x3fd75b67a9aeb6d0,0x3ff327c8d80b17cf,1 +np.float64,0x3fc2aa9647255530,0x3ff6ca854b157df1,1 +np.float64,0xbfe0957fd4612b00,0x4000ecbf3932becd,1 +np.float64,0x3fda1792c0b42f24,0x3ff269fbb2360487,1 +np.float64,0x3fd480706ca900e0,0x3ff3ea53a6aa3ae8,1 +np.float64,0xbfd0780ed9a0f01e,0x3ffd4bfd544c7d47,1 +np.float64,0x3feeec0cd77dd81a,0x3fd0a8a241fdb441,1 +np.float64,0x3fcfa933e93f5268,0x3ff5223478621a6b,1 +np.float64,0x3fdad2481fb5a490,0x3ff236b86c6b2b49,1 +np.float64,0x3fe03b129de07626,0x3ff09f21fb868451,1 +np.float64,0xbfc01212cd202424,0x3ffb259a07159ae9,1 +np.float64,0x3febdb912df7b722,0x3fe0768e20dac8c9,1 +np.float64,0xbfbf2148763e4290,0x3ffb154c361ce5bf,1 +np.float64,0xbfb1a7eb1e234fd8,0x3ffa3cb37ac4a176,1 +np.float64,0xbfe26ad1ec64d5a4,0x400178f480ecce8d,1 +np.float64,0x3fe6d1cd1b6da39a,0x3fe8dc20ec4dad3b,1 +np.float64,0xbfede0e53dfbc1ca,0x4006340d3bdd7c97,1 +np.float64,0xbfe8fd1bd9f1fa38,0x4003bc3477f93f40,1 +np.float64,0xbfe329d0f26653a2,0x4001b3f345af5648,1 +np.float64,0xbfe4bb20eee97642,0x40023451404d6d08,1 +np.float64,0x3fb574832e2ae900,0x3ff7ca4bed0c7110,1 +np.float64,0xbfdf3c098fbe7814,0x4000a525bb72d659,1 +np.float64,0x3fa453e6d428a7c0,0x3ff87f512bb9b0c6,1 +np.float64,0x3faaec888435d920,0x3ff84a7d9e4def63,1 +np.float64,0xbfcdc240df3b8480,0x3ffce30ece754e7f,1 +np.float64,0xbf8c3220f0386440,0x3ff95a600ae6e157,1 +np.float64,0x3fe806076c700c0e,0x3fe71784a96c76eb,1 +np.float64,0x3fedf9b0e17bf362,0x3fd6e35fc0a7b6c3,1 +np.float64,0xbfe1b48422636908,0x400141bd8ed251bc,1 +np.float64,0xbfe82e2817705c50,0x40036b5a5556d021,1 +np.float64,0xbfc8ef8ff931df20,0x3ffc450ffae7ce58,1 +np.float64,0xbfe919fa94f233f5,0x4003c7cce4697fe8,1 +np.float64,0xbfc3ace4a72759c8,0x3ffb9a197bb22651,1 +np.float64,0x3fe479f71ee8f3ee,0x3fec0bd2f59097aa,1 +np.float64,0xbfeeb54a967d6a95,0x4006da12c83649c5,1 +np.float64,0x3fe5e74ea8ebce9e,0x3fea2407cef0f08c,1 +np.float64,0x3fb382baf2270570,0x3ff7e98213b921ba,1 +np.float64,0xbfdd86fd3cbb0dfa,0x40006712952ddbcf,1 +np.float64,0xbfd250eb52a4a1d6,0x3ffdc6d56253b1cd,1 +np.float64,0x3fea30c4ed74618a,0x3fe3962deba4f30e,1 +np.float64,0x3fc895963d312b30,0x3ff60a5d52fcbccc,1 +np.float64,0x3fe9cc4f6273989e,0x3fe442740942c80f,1 +np.float64,0xbfe8769f5cf0ed3f,0x4003873b4cb5bfce,1 +np.float64,0xbfe382f3726705e7,0x4001cfeb3204d110,1 +np.float64,0x3fbfe9a9163fd350,0x3ff7220bd2b97c8f,1 +np.float64,0xbfca6162bb34c2c4,0x3ffc743f939358f1,1 +np.float64,0x3fe127a014e24f40,0x3ff0147c4bafbc39,1 +np.float64,0x3fee9cdd2a7d39ba,0x3fd2e9ef45ab122f,1 +np.float64,0x3fa9ffb97c33ff80,0x3ff851e69fa3542c,1 +np.float64,0x3fd378f393a6f1e8,0x3ff42faafa77de56,1 +np.float64,0xbfe4df1e1669be3c,0x400240284df1c321,1 +np.float64,0x3fed0ed79bfa1db0,0x3fdba89060aa96fb,1 +np.float64,0x3fdef2ee52bde5dc,0x3ff10e942244f4f1,1 +np.float64,0xbfdab38f3ab5671e,0x40000264d8d5b49b,1 +np.float64,0x3fbe95a96e3d2b50,0x3ff73774cb59ce2d,1 +np.float64,0xbfe945653af28aca,0x4003d9657bf129c2,1 +np.float64,0xbfb18f3f2a231e80,0x3ffa3b27cba23f50,1 +np.float64,0xbfef50bf22fea17e,0x40077998a850082c,1 +np.float64,0xbfc52b8c212a5718,0x3ffbca8d6560a2da,1 +np.float64,0x7ff8000000000000,0x7ff8000000000000,1 +np.float64,0x3fc1e3a02d23c740,0x3ff6e3a5fcac12a4,1 +np.float64,0xbfeb5e4ea5f6bc9d,0x4004c65abef9426f,1 +np.float64,0xbfe425b132684b62,0x400203c29608b00d,1 +np.float64,0xbfbfa1c19e3f4380,0x3ffb1d6367711158,1 +np.float64,0x3fbba2776e3744f0,0x3ff766f6df586fad,1 +np.float64,0xbfb5d0951e2ba128,0x3ffa7f712480b25e,1 +np.float64,0xbfe949fdab7293fb,0x4003db4530a18507,1 +np.float64,0xbfcf13519b3e26a4,0x3ffd0e6f0a6c38ee,1 +np.float64,0x3f91e6d72823cdc0,0x3ff8da5f08909b6e,1 +np.float64,0x3f78a2e360314600,0x3ff909586727caef,1 +np.float64,0xbfe1ae7e8fe35cfd,0x40013fef082caaa3,1 +np.float64,0x3fe97a6dd1f2f4dc,0x3fe4cb4b99863478,1 +np.float64,0xbfcc1e1e69383c3c,0x3ffcad250a949843,1 +np.float64,0x3faccb797c399700,0x3ff83b8066b49330,1 +np.float64,0x3fe7a2647a6f44c8,0x3fe7acceae6ec425,1 +np.float64,0xbfec3bfcf0f877fa,0x4005366af5a7175b,1 +np.float64,0xbfe2310b94646217,0x400167588fceb228,1 +np.float64,0x3feb167372762ce6,0x3fe1f74c0288fad8,1 +np.float64,0xbfb722b4ee2e4568,0x3ffa94a81b94dfca,1 +np.float64,0x3fc58da9712b1b50,0x3ff66cf8f072aa14,1 +np.float64,0xbfe7fff9d6effff4,0x400359d01b8141de,1 +np.float64,0xbfd56691c5aacd24,0x3ffe9686697797e8,1 +np.float64,0x3fe3ab0557e7560a,0x3fed1593959ef8e8,1 +np.float64,0x3fdd458995ba8b14,0x3ff1883d6f22a322,1 +np.float64,0x3fe7bbed2cef77da,0x3fe786d618094cda,1 +np.float64,0x3fa31a30c4263460,0x3ff88920b936fd79,1 +np.float64,0x8010000000000000,0x3ff921fb54442d18,1 +np.float64,0xbfdc5effbdb8be00,0x40003d95fe0dff11,1 +np.float64,0x3febfdad7e77fb5a,0x3fe030b5297dbbdd,1 +np.float64,0x3fe4f3f3b2e9e7e8,0x3feb6bc59eeb2be2,1 +np.float64,0xbfe44469fd6888d4,0x40020daa5488f97a,1 +np.float64,0xbfe19fddb0e33fbc,0x40013b8c902b167b,1 +np.float64,0x3fa36ad17c26d5a0,0x3ff8869b3e828134,1 +np.float64,0x3fcf23e6c93e47d0,0x3ff5336491a65d1e,1 +np.float64,0xffefffffffffffff,0x7ff8000000000000,1 +np.float64,0xbfe375f4cee6ebea,0x4001cbd2ba42e8b5,1 +np.float64,0xbfaef1215c3de240,0x3ffa19ab02081189,1 +np.float64,0xbfec39c59c78738b,0x4005353dc38e3d78,1 +np.float64,0x7ff4000000000000,0x7ffc000000000000,1 +np.float64,0xbfec09bb7b781377,0x40051c0a5754cb3a,1 +np.float64,0x3fe8301f2870603e,0x3fe6d783c5ef0944,1 +np.float64,0xbfed418c987a8319,0x4005cbae1b8693d1,1 +np.float64,0xbfdc16e7adb82dd0,0x4000338b634eaf03,1 +np.float64,0x3fd5d361bdaba6c4,0x3ff390899300a54c,1 +np.float64,0xbff0000000000000,0x400921fb54442d18,1 +np.float64,0x3fd5946232ab28c4,0x3ff3a14767813f29,1 +np.float64,0x3fe833e5fef067cc,0x3fe6d1be720edf2d,1 +np.float64,0x3fedf746a67bee8e,0x3fd6f127fdcadb7b,1 +np.float64,0x3fd90353d3b206a8,0x3ff2b54f7d369ba9,1 +np.float64,0x3fec4b4b72f89696,0x3fdf1b38d2e93532,1 +np.float64,0xbfe9c67596f38ceb,0x40040ee5f524ce03,1 +np.float64,0x3fd350d91aa6a1b4,0x3ff43a303c0da27f,1 +np.float64,0x3fd062603ba0c4c0,0x3ff4fd9514b935d8,1 +np.float64,0xbfe24c075f64980e,0x40016f8e9f2663b3,1 +np.float64,0x3fdaa546eeb54a8c,0x3ff2431a88fef1d5,1 +np.float64,0x3fe92b8151f25702,0x3fe54c67e005cbf9,1 +np.float64,0xbfe1be8b8a637d17,0x400144c078f67c6e,1 +np.float64,0xbfe468a1d7e8d144,0x40021964b118cbf4,1 +np.float64,0xbfdc6de4fab8dbca,0x40003fa9e27893d8,1 +np.float64,0xbfe3c2788ae784f1,0x4001e407ba3aa956,1 +np.float64,0xbfe2bf1542e57e2a,0x400192d4a9072016,1 +np.float64,0xbfe6982f4c6d305e,0x4002d681b1991bbb,1 +np.float64,0x3fdbceb1c4b79d64,0x3ff1f0f117b9d354,1 +np.float64,0x3fdb3705e7b66e0c,0x3ff21af01ca27ace,1 +np.float64,0x3fe3e6358ee7cc6c,0x3fecca4585053983,1 +np.float64,0xbfe16d6a9a62dad5,0x40012c7988aee247,1 +np.float64,0xbfce66e4413ccdc8,0x3ffcf83b08043a0c,1 +np.float64,0xbfeb6cd46876d9a9,0x4004cd61733bfb79,1 +np.float64,0xbfdb1cdd64b639ba,0x400010e6cf087cb7,1 +np.float64,0xbfe09e4e30e13c9c,0x4000ef5277c47721,1 +np.float64,0xbfee88dd127d11ba,0x4006b3cd443643ac,1 +np.float64,0xbf911e06c8223c00,0x3ff966744064fb05,1 +np.float64,0xbfe8f22bc471e458,0x4003b7d5513af295,1 +np.float64,0x3fe3d7329567ae66,0x3fecdd6c241f83ee,1 +np.float64,0x3fc8a9404b315280,0x3ff607dc175edf3f,1 +np.float64,0x3fe7eb80ad6fd702,0x3fe73f8fdb3e6a6c,1 +np.float64,0x3fef0931e37e1264,0x3fcf7fde80a3c5ab,1 +np.float64,0x3fe2ed3c3fe5da78,0x3fee038334cd1860,1 +np.float64,0x3fe251fdb8e4a3fc,0x3feec26dc636ac31,1 +np.float64,0x3feb239436764728,0x3fe1de9462455da7,1 +np.float64,0xbfe63fd7eeec7fb0,0x4002b78cfa3d2fa6,1 +np.float64,0x3fdd639cb5bac738,0x3ff17fc7d92b3eee,1 +np.float64,0x3fd0a7a13fa14f44,0x3ff4eba95c559c84,1 +np.float64,0x3fe804362d70086c,0x3fe71a44cd91ffa4,1 +np.float64,0xbfe0fecf6e61fd9f,0x40010bac8edbdc4f,1 +np.float64,0x3fcb74acfd36e958,0x3ff5ac84437f1b7c,1 +np.float64,0x3fe55053e1eaa0a8,0x3feaf0bf76304c30,1 +np.float64,0x3fc06b508d20d6a0,0x3ff7131da17f3902,1 +np.float64,0x3fdd78750fbaf0ec,0x3ff179e97fbf7f65,1 +np.float64,0x3fe44cb946689972,0x3fec46859b5da6be,1 +np.float64,0xbfeb165a7ff62cb5,0x4004a41c9cc9589e,1 +np.float64,0x3fe01ffb2b603ff6,0x3ff0aed52bf1c3c1,1 +np.float64,0x3f983c60a83078c0,0x3ff8c107805715ab,1 +np.float64,0x3fd8b5ff13b16c00,0x3ff2ca4a837a476a,1 +np.float64,0x3fc80510a1300a20,0x3ff61cc3b4af470b,1 +np.float64,0xbfd3935b06a726b6,0x3ffe1b3a2066f473,1 +np.float64,0xbfdd4a1f31ba943e,0x40005e81979ed445,1 +np.float64,0xbfa76afdd42ed600,0x3ff9dd63ffba72d2,1 +np.float64,0x3fe7e06d496fc0da,0x3fe7503773566707,1 +np.float64,0xbfea5fbfe874bf80,0x40045106af6c538f,1 +np.float64,0x3fee000c487c0018,0x3fd6bef1f8779d88,1 +np.float64,0xbfb39f4ee2273ea0,0x3ffa5c3f2b3888ab,1 +np.float64,0x3feb9247b0772490,0x3fe1092d2905efce,1 +np.float64,0x3fdaa39b4cb54738,0x3ff243901da0da17,1 +np.float64,0x3fcd5b2b493ab658,0x3ff56e262e65b67d,1 +np.float64,0x3fcf82512f3f04a0,0x3ff52738847c55f2,1 +np.float64,0x3fe2af5e0c655ebc,0x3fee4ffab0c82348,1 +np.float64,0xbfec0055d0f800ac,0x4005172d325933e8,1 +np.float64,0x3fe71da9336e3b52,0x3fe86f2e12f6e303,1 +np.float64,0x3fbefab0723df560,0x3ff731188ac716ec,1 +np.float64,0xbfe11dca28623b94,0x400114d3d4ad370d,1 +np.float64,0x3fbcbda8ca397b50,0x3ff755281078abd4,1 +np.float64,0x3fe687c7126d0f8e,0x3fe945099a7855cc,1 +np.float64,0xbfecde510579bca2,0x400590606e244591,1 +np.float64,0xbfd72de681ae5bce,0x3fff0ff797ad1755,1 +np.float64,0xbfe7c0f7386f81ee,0x40034226e0805309,1 +np.float64,0x3fd8d55619b1aaac,0x3ff2c1cb3267b14e,1 +np.float64,0x3fecd7a2ad79af46,0x3fdcabbffeaa279e,1 +np.float64,0x3fee7fb1a8fcff64,0x3fd3ae620286fe19,1 +np.float64,0xbfc5f3a3592be748,0x3ffbe3ed204d9842,1 +np.float64,0x3fec9e5527793caa,0x3fddb00bc8687e4b,1 +np.float64,0x3fc35dc70f26bb90,0x3ff6b3ded7191e33,1 +np.float64,0x3fda91c07ab52380,0x3ff24878848fec8f,1 +np.float64,0xbfe12cde1fe259bc,0x4001194ab99d5134,1 +np.float64,0xbfd35ab736a6b56e,0x3ffe0c5ce8356d16,1 +np.float64,0x3fc9c94123339280,0x3ff5e3239f3ad795,1 +np.float64,0xbfe72f54926e5ea9,0x40030c95d1d02b56,1 +np.float64,0xbfee283186fc5063,0x40066786bd0feb79,1 +np.float64,0xbfe7b383f56f6708,0x40033d23ef0e903d,1 +np.float64,0x3fd6037327ac06e8,0x3ff383bf2f311ddb,1 +np.float64,0x3fe0e344b561c68a,0x3ff03cd90fd4ba65,1 +np.float64,0xbfef0ff54b7e1feb,0x400730fa5fce381e,1 +np.float64,0x3fd269929da4d324,0x3ff476b230136d32,1 +np.float64,0xbfbc5fb9f638bf70,0x3ffae8e63a4e3234,1 +np.float64,0xbfe2e8bc84e5d179,0x40019fb5874f4310,1 +np.float64,0xbfd7017413ae02e8,0x3fff040d843c1531,1 +np.float64,0x3fefd362fa7fa6c6,0x3fbababc3ddbb21d,1 +np.float64,0x3fecb62ed3f96c5e,0x3fdd44ba77ccff94,1 +np.float64,0xbfb16fad5222df58,0x3ffa392d7f02b522,1 +np.float64,0x3fbcf4abc639e950,0x3ff751b23c40e27f,1 +np.float64,0x3fe128adbce2515c,0x3ff013dc91db04b5,1 +np.float64,0x3fa5dd9d842bbb40,0x3ff87300c88d512f,1 +np.float64,0xbfe61efcaf6c3dfa,0x4002ac27117f87c9,1 +np.float64,0x3feffe1233fffc24,0x3f9638d3796a4954,1 +np.float64,0xbfe78548b66f0a92,0x40032c0447b7bfe2,1 +np.float64,0x3fe7bd38416f7a70,0x3fe784e86d6546b6,1 +np.float64,0x3fe0d6bc5961ad78,0x3ff0443899e747ac,1 +np.float64,0xbfd0bb6e47a176dc,0x3ffd5d6dff390d41,1 +np.float64,0xbfec1d16b8f83a2e,0x40052620378d3b78,1 +np.float64,0x3fe9bbec20f377d8,0x3fe45e167c7a3871,1 +np.float64,0xbfeed81d9dfdb03b,0x4006f9dec2db7310,1 +np.float64,0xbfe1e35179e3c6a3,0x40014fd1b1186ac0,1 +np.float64,0xbfc9c7e605338fcc,0x3ffc60a6bd1a7126,1 +np.float64,0x3feec92810fd9250,0x3fd1afde414ab338,1 +np.float64,0xbfeb9f1d90773e3b,0x4004e606b773f5b0,1 +np.float64,0x3fcbabdf6b3757c0,0x3ff5a573866404af,1 +np.float64,0x3fe9f4e1fff3e9c4,0x3fe3fd7b6712dd7b,1 +np.float64,0xbfe6c0175ded802e,0x4002e4a4dc12f3fe,1 +np.float64,0xbfeefc96f37df92e,0x40071d367cd721ff,1 +np.float64,0xbfeaab58dc7556b2,0x400472ce37e31e50,1 +np.float64,0xbfc62668772c4cd0,0x3ffbea5e6c92010a,1 +np.float64,0x3fafe055fc3fc0a0,0x3ff822ce6502519a,1 +np.float64,0x3fd7b648ffaf6c90,0x3ff30f5a42f11418,1 +np.float64,0xbfe934fe827269fd,0x4003d2b9fed9e6ad,1 +np.float64,0xbfe6d691f2edad24,0x4002eca6a4b1797b,1 +np.float64,0x3fc7e62ced2fcc58,0x3ff620b1f44398b7,1 +np.float64,0xbfc89be9f33137d4,0x3ffc3a67a497f59c,1 +np.float64,0xbfe7793d536ef27a,0x40032794bf14dd64,1 +np.float64,0x3fde55a02dbcab40,0x3ff13b5f82d223e4,1 +np.float64,0xbfc8eabd7b31d57c,0x3ffc4472a81cb6d0,1 +np.float64,0x3fddcb5468bb96a8,0x3ff162899c381f2e,1 +np.float64,0xbfec7554d8f8eaaa,0x40055550e18ec463,1 +np.float64,0x3fd0b6e8b6a16dd0,0x3ff4e7b4781a50e3,1 +np.float64,0x3fedaae01b7b55c0,0x3fd8964916cdf53d,1 +np.float64,0x3fe0870f8a610e20,0x3ff072e7db95c2a2,1 +np.float64,0xbfec3e3ce2787c7a,0x4005379d0f6be873,1 +np.float64,0xbfe65502586caa04,0x4002beecff89147f,1 +np.float64,0xbfe0df39a961be74,0x4001025e36d1c061,1 +np.float64,0xbfb5d8edbe2bb1d8,0x3ffa7ff72b7d6a2b,1 +np.float64,0xbfde89574bbd12ae,0x40008ba4cd74544d,1 +np.float64,0xbfe72938f0ee5272,0x40030a5efd1acb6d,1 +np.float64,0xbfcd500d133aa01c,0x3ffcd462f9104689,1 +np.float64,0x3fe0350766606a0e,0x3ff0a2a3664e2c14,1 +np.float64,0xbfc892fb573125f8,0x3ffc3944641cc69d,1 +np.float64,0xbfba7dc7c634fb90,0x3ffaca9a6a0ffe61,1 +np.float64,0xbfeac94478759289,0x40048068a8b83e45,1 +np.float64,0xbfe8f60c1af1ec18,0x4003b961995b6e51,1 +np.float64,0x3fea1c0817743810,0x3fe3ba28c1643cf7,1 +np.float64,0xbfe42a0fefe85420,0x4002052aadd77f01,1 +np.float64,0x3fd2c61c56a58c38,0x3ff45e84cb9a7fa9,1 +np.float64,0xbfd83fb7cdb07f70,0x3fff59ab4790074c,1 +np.float64,0x3fd95e630fb2bcc8,0x3ff29c8bee1335ad,1 +np.float64,0x3feee88f387dd11e,0x3fd0c3ad3ded4094,1 +np.float64,0x3fe061291160c252,0x3ff0890010199bbc,1 +np.float64,0xbfdc7db3b5b8fb68,0x400041dea3759443,1 +np.float64,0x3fee23b320fc4766,0x3fd5ee73d7aa5c56,1 +np.float64,0xbfdc25c590b84b8c,0x4000359cf98a00b4,1 +np.float64,0xbfd63cbfd2ac7980,0x3ffecf7b9cf99b3c,1 +np.float64,0xbfbeb3c29a3d6788,0x3ffb0e66ecc0fc3b,1 +np.float64,0xbfd2f57fd6a5eb00,0x3ffdf1d7c79e1532,1 +np.float64,0xbfab3eda9c367db0,0x3ff9fc0c875f42e9,1 +np.float64,0xbfe12df1c6e25be4,0x4001199c673e698c,1 +np.float64,0x3fef8ab23a7f1564,0x3fc5aff358c59f1c,1 +np.float64,0x3fe562f50feac5ea,0x3fead7bce205f7d9,1 +np.float64,0x3fdc41adbeb8835c,0x3ff1d0f71341b8f2,1 +np.float64,0x3fe2748967e4e912,0x3fee9837f970ff9e,1 +np.float64,0xbfdaa89d57b5513a,0x400000e3889ba4cf,1 +np.float64,0x3fdf2a137dbe5428,0x3ff0fecfbecbbf86,1 +np.float64,0xbfea1fdcd2f43fba,0x4004351974b32163,1 +np.float64,0xbfe34a93a3e69528,0x4001be323946a3e0,1 +np.float64,0x3fe929bacff25376,0x3fe54f47bd7f4cf2,1 +np.float64,0xbfd667fbd6accff8,0x3ffedb04032b3a1a,1 +np.float64,0xbfeb695796f6d2af,0x4004cbb08ec6f525,1 +np.float64,0x3fd204df2ea409c0,0x3ff490f51e6670f5,1 +np.float64,0xbfd89a2757b1344e,0x3fff722127b988c4,1 +np.float64,0xbfd0787187a0f0e4,0x3ffd4c16dbe94f32,1 +np.float64,0x3fd44239bfa88474,0x3ff3fabbfb24b1fa,1 +np.float64,0xbfeb0b3489f61669,0x40049ee33d811d33,1 +np.float64,0x3fdcf04eaab9e09c,0x3ff1a02a29996c4e,1 +np.float64,0x3fd4c51e4fa98a3c,0x3ff3d8302c68fc9a,1 +np.float64,0x3fd1346645a268cc,0x3ff4c72b4970ecaf,1 +np.float64,0x3fd6a89d09ad513c,0x3ff357af6520afac,1 +np.float64,0xbfba0f469a341e90,0x3ffac3a8f41bed23,1 +np.float64,0xbfe13f8ddce27f1c,0x40011ed557719fd6,1 +np.float64,0x3fd43e5e26a87cbc,0x3ff3fbc040fc30dc,1 +np.float64,0x3fe838125a707024,0x3fe6cb5c987248f3,1 +np.float64,0x3fe128c30c625186,0x3ff013cff238dd1b,1 +np.float64,0xbfcd4718833a8e30,0x3ffcd33c96bde6f9,1 +np.float64,0x3fe43fcd08e87f9a,0x3fec573997456ec1,1 +np.float64,0xbfe9a29104734522,0x4003ffd502a1b57f,1 +np.float64,0xbfe4709d7968e13b,0x40021bfc5cd55af4,1 +np.float64,0x3fd21c3925a43874,0x3ff48adf48556cbb,1 +np.float64,0x3fe9a521b2734a44,0x3fe4844fc054e839,1 +np.float64,0xbfdfa6a912bf4d52,0x4000b4730ad8521e,1 +np.float64,0x3fe3740702e6e80e,0x3fed5b106283b6ed,1 +np.float64,0x3fd0a3aa36a14754,0x3ff4ecb02a5e3f49,1 +np.float64,0x3fdcb903d0b97208,0x3ff1afa5d692c5b9,1 +np.float64,0xbfe7d67839efacf0,0x40034a3146abf6f2,1 +np.float64,0x3f9981c6d8330380,0x3ff8bbf1853d7b90,1 +np.float64,0xbfe9d4191673a832,0x400414a9ab453c5d,1 +np.float64,0x3fef0a1e5c7e143c,0x3fcf70b02a54c415,1 +np.float64,0xbfd996dee6b32dbe,0x3fffb6cf707ad8e4,1 +np.float64,0x3fe19bef17e337de,0x3fef9e70d4fcedae,1 +np.float64,0x3fe34a59716694b2,0x3fed8f6d5cfba474,1 +np.float64,0x3fdf27e27cbe4fc4,0x3ff0ff70500e0c7c,1 +np.float64,0xbfe19df87fe33bf1,0x40013afb401de24c,1 +np.float64,0xbfbdfd97ba3bfb30,0x3ffb02ef8c225e57,1 +np.float64,0xbfe3d3417267a683,0x4001e95ed240b0f8,1 +np.float64,0x3fe566498b6acc94,0x3fead342957d4910,1 +np.float64,0x3ff0000000000000,0x0,1 +np.float64,0x3feb329bd8766538,0x3fe1c2225aafe3b4,1 +np.float64,0xbfc19ca703233950,0x3ffb575b5df057b9,1 +np.float64,0x3fe755027d6eaa04,0x3fe81eb99c262e00,1 +np.float64,0xbfe6c2b8306d8570,0x4002e594199f9eec,1 +np.float64,0x3fd69438e6ad2870,0x3ff35d2275ae891d,1 +np.float64,0x3fda3e7285b47ce4,0x3ff25f5573dd47ae,1 +np.float64,0x3fe7928a166f2514,0x3fe7c4490ef4b9a9,1 +np.float64,0xbfd4eb71b9a9d6e4,0x3ffe75e8ccb74be1,1 +np.float64,0xbfcc3a07f1387410,0x3ffcb0b8af914a5b,1 +np.float64,0xbfe6e80225edd004,0x4002f2e26eae8999,1 +np.float64,0xbfb347728a268ee8,0x3ffa56bd526a12db,1 +np.float64,0x3fe5140ead6a281e,0x3feb4132c9140a1c,1 +np.float64,0xbfc147f125228fe4,0x3ffb4cab18b9050f,1 +np.float64,0xbfcb9145b537228c,0x3ffc9b1b6227a8c9,1 +np.float64,0xbfda84ef4bb509de,0x3ffff7f8a674e17d,1 +np.float64,0x3fd2eb6bbfa5d6d8,0x3ff454c225529d7e,1 +np.float64,0x3fe18c95f1e3192c,0x3fefb0cf0efba75a,1 +np.float64,0x3fe78606efef0c0e,0x3fe7d6c3a092d64c,1 +np.float64,0x3fbad5119a35aa20,0x3ff773dffe3ce660,1 +np.float64,0x3fd0cf5903a19eb4,0x3ff4e15fd21fdb42,1 +np.float64,0xbfd85ce90bb0b9d2,0x3fff618ee848e974,1 +np.float64,0x3fe90e11b9f21c24,0x3fe57be62f606f4a,1 +np.float64,0x3fd7a2040faf4408,0x3ff314ce85457ec2,1 +np.float64,0xbfd73fba69ae7f74,0x3fff14bff3504811,1 +np.float64,0x3fa04b4bd42096a0,0x3ff89f9b52f521a2,1 +np.float64,0xbfd7219ce5ae433a,0x3fff0cac0b45cc18,1 +np.float64,0xbfe0cf4661e19e8d,0x4000fdadb14e3c22,1 +np.float64,0x3fd07469fea0e8d4,0x3ff4f8eaa9b2394a,1 +np.float64,0x3f9b05c5d8360b80,0x3ff8b5e10672db5c,1 +np.float64,0x3fe4c25b916984b8,0x3febad29bd0e25e2,1 +np.float64,0xbfde8b4891bd1692,0x40008beb88d5c409,1 +np.float64,0xbfe199a7efe33350,0x400139b089aee21c,1 +np.float64,0x3fecdad25cf9b5a4,0x3fdc9d062867e8c3,1 +np.float64,0xbfe979b277f2f365,0x4003eedb061e25a4,1 +np.float64,0x3fc8c7311f318e60,0x3ff6040b9aeaad9d,1 +np.float64,0x3fd2b605b8a56c0c,0x3ff462b9a955c224,1 +np.float64,0x3fc073b6ad20e770,0x3ff7120e9f2fd63c,1 +np.float64,0xbfec60ede678c1dc,0x40054a3863e24dc2,1 +np.float64,0x3fe225171be44a2e,0x3feef910dca420ea,1 +np.float64,0xbfd7529762aea52e,0x3fff19d00661f650,1 +np.float64,0xbfd781783daf02f0,0x3fff2667b90be461,1 +np.float64,0x3fe3f6ec6d67edd8,0x3fecb4e814a2e33a,1 +np.float64,0x3fece6702df9cce0,0x3fdc6719d92a50d2,1 +np.float64,0xbfb5c602ce2b8c08,0x3ffa7ec761ba856a,1 +np.float64,0xbfd61f0153ac3e02,0x3ffec78e3b1a6c4d,1 +np.float64,0xbfec3462b2f868c5,0x400532630bbd7050,1 +np.float64,0xbfdd248485ba490a,0x400059391c07c1bb,1 +np.float64,0xbfd424921fa84924,0x3ffe416a85d1dcdf,1 +np.float64,0x3fbb23a932364750,0x3ff76eef79209f7f,1 +np.float64,0x3fca248b0f344918,0x3ff5d77c5c1b4e5e,1 +np.float64,0xbfe69af4a4ed35ea,0x4002d77c2e4fbd4e,1 +np.float64,0x3fdafe3cdcb5fc78,0x3ff22a9be6efbbf2,1 +np.float64,0xbfebba3377f77467,0x4004f3836e1fe71a,1 +np.float64,0xbfe650fae06ca1f6,0x4002bd851406377c,1 +np.float64,0x3fda630007b4c600,0x3ff2554f1832bd94,1 +np.float64,0xbfda8107d9b50210,0x3ffff6e6209659f3,1 +np.float64,0x3fea759a02f4eb34,0x3fe31d1a632c9aae,1 +np.float64,0x3fbf88149e3f1030,0x3ff728313aa12ccb,1 +np.float64,0x3f7196d2a0232e00,0x3ff910647e1914c1,1 +np.float64,0x3feeae51d17d5ca4,0x3fd2709698d31f6f,1 +np.float64,0xbfd73cd663ae79ac,0x3fff13f96300b55a,1 +np.float64,0x3fd4fc5f06a9f8c0,0x3ff3c99359854b97,1 +np.float64,0x3fb29f5d6e253ec0,0x3ff7f7c20e396b20,1 +np.float64,0xbfd757c82aaeaf90,0x3fff1b34c6141e98,1 +np.float64,0x3fc56fd4cf2adfa8,0x3ff670c145122909,1 +np.float64,0x3fc609a2f52c1348,0x3ff65d3ef3cade2c,1 +np.float64,0xbfe1de631163bcc6,0x40014e5528fadb73,1 +np.float64,0xbfe7eb4a726fd695,0x40035202f49d95c4,1 +np.float64,0xbfc9223771324470,0x3ffc4b84d5e263b9,1 +np.float64,0x3fee91a8a87d2352,0x3fd3364befde8de6,1 +np.float64,0x3fbc9784fe392f10,0x3ff7578e29f6a1b2,1 +np.float64,0xbfec627c2c78c4f8,0x40054b0ff2cb9c55,1 +np.float64,0xbfb8b406a6316810,0x3ffaadd97062fb8c,1 +np.float64,0xbfecf98384f9f307,0x4005a043d9110d79,1 +np.float64,0xbfe5834bab6b0698,0x400276f114aebee4,1 +np.float64,0xbfd90f391eb21e72,0x3fff91e26a8f48f3,1 +np.float64,0xbfee288ce2fc511a,0x400667cb09aa04b3,1 +np.float64,0x3fd5aa5e32ab54bc,0x3ff39b7080a52214,1 +np.float64,0xbfee7ef907fcfdf2,0x4006ab96a8eba4c5,1 +np.float64,0x3fd6097973ac12f4,0x3ff3822486978bd1,1 +np.float64,0xbfe02d14b8e05a2a,0x4000ce5be53047b1,1 +np.float64,0xbf9c629a6838c540,0x3ff993897728c3f9,1 +np.float64,0xbfee2024667c4049,0x40066188782fb1f0,1 +np.float64,0xbfa42a88fc285510,0x3ff9c35a4bbce104,1 +np.float64,0x3fa407af5c280f60,0x3ff881b360d8eea1,1 +np.float64,0x3fed0ba42cfa1748,0x3fdbb7d55609175f,1 +np.float64,0xbfdd0b5844ba16b0,0x400055b0bb59ebb2,1 +np.float64,0x3fd88d97e6b11b30,0x3ff2d53c1ecb8f8c,1 +np.float64,0xbfeb7a915ef6f523,0x4004d410812eb84c,1 +np.float64,0xbfb5f979ca2bf2f0,0x3ffa8201d73cd4ca,1 +np.float64,0x3fb3b65dd6276cc0,0x3ff7e64576199505,1 +np.float64,0x3fcd47a7793a8f50,0x3ff570a7b672f160,1 +np.float64,0xbfa41dd30c283ba0,0x3ff9c2f488127eb3,1 +np.float64,0x3fe4b1ea1f6963d4,0x3febc2bed7760427,1 +np.float64,0xbfdd0f81d2ba1f04,0x400056463724b768,1 +np.float64,0x3fd15d93f7a2bb28,0x3ff4bc7a24eacfd7,1 +np.float64,0xbfe3213af8e64276,0x4001b14579dfded3,1 +np.float64,0x3fd90dfbeab21bf8,0x3ff2b26a6c2c3bb3,1 +np.float64,0xbfd02d54bca05aaa,0x3ffd38ab3886b203,1 +np.float64,0x3fc218dcad2431b8,0x3ff6dced56d5b417,1 +np.float64,0x3fea5edf71f4bdbe,0x3fe3455ee09f27e6,1 +np.float64,0x3fa74319042e8640,0x3ff867d224545438,1 +np.float64,0x3fd970ad92b2e15c,0x3ff2979084815dc1,1 +np.float64,0x3fce0a4bf73c1498,0x3ff557a4df32df3e,1 +np.float64,0x3fef5c8e10feb91c,0x3fc99ca0eeaaebe4,1 +np.float64,0xbfedae997ffb5d33,0x400611af18f407ab,1 +np.float64,0xbfbcf07d6239e0f8,0x3ffaf201177a2d36,1 +np.float64,0xbfc3c52541278a4c,0x3ffb9d2af0408e4a,1 +np.float64,0x3fe4ef44e4e9de8a,0x3feb71f7331255e5,1 +np.float64,0xbfccd9f5f539b3ec,0x3ffcc53a99339592,1 +np.float64,0xbfda32c745b4658e,0x3fffe16e8727ef89,1 +np.float64,0xbfef54932a7ea926,0x40077e4605e61ca1,1 +np.float64,0x3fe9d4ae3573a95c,0x3fe4344a069a3fd0,1 +np.float64,0x3fda567e73b4acfc,0x3ff258bd77a663c7,1 +np.float64,0xbfd5bcac5eab7958,0x3ffead6379c19c52,1 +np.float64,0xbfee5e56f97cbcae,0x40069131fc54018d,1 +np.float64,0x3fc2d4413925a880,0x3ff6c54163816298,1 +np.float64,0xbfe9ddf6e873bbee,0x400418d8c722f7c5,1 +np.float64,0x3fdaf2a683b5e54c,0x3ff22dcda599d69c,1 +np.float64,0xbfca69789f34d2f0,0x3ffc7547ff10b1a6,1 +np.float64,0x3fed076f62fa0ede,0x3fdbcbda03c1d72a,1 +np.float64,0xbfcb38326f367064,0x3ffc8fb55dadeae5,1 +np.float64,0x3fe1938705e3270e,0x3fefa88130c5adda,1 +np.float64,0x3feaffae3b75ff5c,0x3fe221e3da537c7e,1 +np.float64,0x3fefc94acb7f9296,0x3fbd9a360ace67b4,1 +np.float64,0xbfe8bddeb0f17bbe,0x4003a316685c767e,1 +np.float64,0x3fbe10fbee3c21f0,0x3ff73fceb10650f5,1 +np.float64,0x3fde9126c1bd224c,0x3ff12a742f734d0a,1 +np.float64,0xbfe9686c91f2d0d9,0x4003e7bc6ee77906,1 +np.float64,0xbfb1ba4892237490,0x3ffa3dda064c2509,1 +np.float64,0xbfe2879100e50f22,0x400181c1a5b16f0f,1 +np.float64,0x3fd1cd40b6a39a80,0x3ff49f70e3064e95,1 +np.float64,0xbfc965869132cb0c,0x3ffc5419f3b43701,1 +np.float64,0x3fea7a6f2874f4de,0x3fe31480fb2dd862,1 +np.float64,0x3fc3bc56892778b0,0x3ff6a7e8fa0e8b0e,1 +np.float64,0x3fec1ed451f83da8,0x3fdfd78e564b8ad7,1 +np.float64,0x3feb77d16df6efa2,0x3fe13d083344e45e,1 +np.float64,0xbfe822e7c67045d0,0x400367104a830cf6,1 +np.float64,0x8000000000000001,0x3ff921fb54442d18,1 +np.float64,0xbfd4900918a92012,0x3ffe5dc0e19737b4,1 +np.float64,0x3fed184187fa3084,0x3fdb7b7a39f234f4,1 +np.float64,0x3fecef846179df08,0x3fdc3cb2228c3682,1 +np.float64,0xbfe2d2aed165a55e,0x400198e21c5b861b,1 +np.float64,0x7ff0000000000000,0x7ff8000000000000,1 +np.float64,0xbfee9409a07d2813,0x4006bd358232d073,1 +np.float64,0xbfecedc2baf9db86,0x4005995df566fc21,1 +np.float64,0x3fe6d857396db0ae,0x3fe8d2cb8794aa99,1 +np.float64,0xbf9a579e7834af40,0x3ff98b5cc8021e1c,1 +np.float64,0x3fc664fefb2cca00,0x3ff651a664ccf8fa,1 +np.float64,0xbfe8a7aa0e714f54,0x40039a5b4df938a0,1 +np.float64,0xbfdf27d380be4fa8,0x4000a241074dbae6,1 +np.float64,0x3fe00ddf55e01bbe,0x3ff0b94eb1ea1851,1 +np.float64,0x3feb47edbff68fdc,0x3fe199822d075959,1 +np.float64,0x3fb4993822293270,0x3ff7d80c838186d0,1 +np.float64,0xbfca2cd1473459a4,0x3ffc6d88c8de3d0d,1 +np.float64,0xbfea7d9c7674fb39,0x40045e4559e9e52d,1 +np.float64,0x3fe0dce425e1b9c8,0x3ff04099cab23289,1 +np.float64,0x3fd6bb7e97ad76fc,0x3ff352a30434499c,1 +np.float64,0x3fd4a4f16da949e4,0x3ff3e0b07432c9aa,1 +np.float64,0x8000000000000000,0x3ff921fb54442d18,1 +np.float64,0x3fe688f5b56d11ec,0x3fe9435f63264375,1 +np.float64,0xbfdf5a427ebeb484,0x4000a97a6c5d4abc,1 +np.float64,0xbfd1f3483fa3e690,0x3ffdae6c8a299383,1 +np.float64,0xbfeac920db759242,0x4004805862be51ec,1 +np.float64,0x3fef5bc711feb78e,0x3fc9ac40fba5b93b,1 +np.float64,0x3fe4bd9e12e97b3c,0x3febb363c787d381,1 +np.float64,0x3fef6a59ab7ed4b4,0x3fc880f1324eafce,1 +np.float64,0x3fc07a362120f470,0x3ff7113cf2c672b3,1 +np.float64,0xbfe4d6dbe2e9adb8,0x40023d6f6bea44b7,1 +np.float64,0xbfec2d6a15785ad4,0x40052eb425cc37a2,1 +np.float64,0x3fc90dae05321b60,0x3ff5fb10015d2934,1 +np.float64,0xbfa9239f74324740,0x3ff9eb2d057068ea,1 +np.float64,0xbfeb4fc8baf69f92,0x4004bf5e17fb08a4,1 +np.float64,0x0,0x3ff921fb54442d18,1 +np.float64,0x3faaf1884c35e320,0x3ff84a5591dbe1f3,1 +np.float64,0xbfed842561fb084b,0x4005f5c0a19116ce,1 +np.float64,0xbfc64850c32c90a0,0x3ffbeeac2ee70f9a,1 +np.float64,0x3fd7d879f5afb0f4,0x3ff306254c453436,1 +np.float64,0xbfdabaa586b5754c,0x4000035e6ac83a2b,1 +np.float64,0xbfebfeefa977fddf,0x4005167446fb9faf,1 +np.float64,0xbfe9383462727069,0x4003d407aa6a1577,1 +np.float64,0x3fe108dfb6e211c0,0x3ff026ac924b281d,1 +np.float64,0xbf85096df02a12c0,0x3ff94c0e60a22ede,1 +np.float64,0xbfe3121cd566243a,0x4001ac8f90db5882,1 +np.float64,0xbfd227f62aa44fec,0x3ffdbc26bb175dcc,1 +np.float64,0x3fd931af2cb26360,0x3ff2a8b62dfe003c,1 +np.float64,0xbfd9b794e3b36f2a,0x3fffbfbc89ec013d,1 +np.float64,0x3fc89b2e6f313660,0x3ff609a6e67f15f2,1 +np.float64,0x3fc0b14a8f216298,0x3ff70a4b6905aad2,1 +np.float64,0xbfeda11a657b4235,0x400608b3f9fff574,1 +np.float64,0xbfed2ee9ec7a5dd4,0x4005c040b7c02390,1 +np.float64,0xbfef7819d8fef034,0x4007ac6bf75cf09d,1 +np.float64,0xbfcc4720fb388e40,0x3ffcb2666a00b336,1 +np.float64,0xbfe05dec4be0bbd8,0x4000dc8a25ca3760,1 +np.float64,0x3fb093416e212680,0x3ff81897b6d8b374,1 +np.float64,0xbfc6ab89332d5714,0x3ffbfb4559d143e7,1 +np.float64,0x3fc51948512a3290,0x3ff67bb9df662c0a,1 +np.float64,0x3fed4d94177a9b28,0x3fda76c92f0c0132,1 +np.float64,0x3fdd195fbeba32c0,0x3ff194a5586dd18e,1 +np.float64,0x3fe3f82799e7f050,0x3fecb354c2faf55c,1 +np.float64,0x3fecac2169f95842,0x3fdd7222296cb7a7,1 +np.float64,0x3fe3d3f36fe7a7e6,0x3fece18f45e30dd7,1 +np.float64,0x3fe31ff63d663fec,0x3fedc46c77d30c6a,1 +np.float64,0xbfe3120c83e62419,0x4001ac8a7c4aa742,1 +np.float64,0x3fe7c1a7976f8350,0x3fe77e4a9307c9f8,1 +np.float64,0x3fe226fe9de44dfe,0x3feef6c0f3cb00fa,1 +np.float64,0x3fd5c933baab9268,0x3ff3933e8a37de42,1 +np.float64,0x3feaa98496f5530a,0x3fe2c003832ebf21,1 +np.float64,0xbfc6f80a2f2df014,0x3ffc04fd54cb1317,1 +np.float64,0x3fde5e18d0bcbc30,0x3ff138f7b32a2ca3,1 +np.float64,0xbfe30c8dd566191c,0x4001aad4af935a78,1 +np.float64,0x3fbe8d196e3d1a30,0x3ff737fec8149ecc,1 +np.float64,0x3feaee6731f5dcce,0x3fe241fa42cce22d,1 +np.float64,0x3fef9cc46cff3988,0x3fc3f17b708dbdbb,1 +np.float64,0xbfdb181bdeb63038,0x4000103ecf405602,1 +np.float64,0xbfc58de0ed2b1bc0,0x3ffbd704c14e15cd,1 +np.float64,0xbfee05d5507c0bab,0x40064e480faba6d8,1 +np.float64,0x3fe27d0ffa64fa20,0x3fee8dc71ef79f2c,1 +np.float64,0xbfe4f7ad4c69ef5a,0x400248456cd09a07,1 +np.float64,0xbfe4843e91e9087d,0x4002225f3e139c84,1 +np.float64,0x3fe7158b9c6e2b18,0x3fe87ae845c5ba96,1 +np.float64,0xbfea64316074c863,0x400452fd2bc23a44,1 +np.float64,0xbfc9f3ae4133e75c,0x3ffc663d482afa42,1 +np.float64,0xbfd5e18513abc30a,0x3ffeb72fc76d7071,1 +np.float64,0xbfd52f6438aa5ec8,0x3ffe87e5b18041e5,1 +np.float64,0xbfea970650f52e0d,0x400469a4a6758154,1 +np.float64,0xbfe44321b7e88644,0x40020d404a2141b1,1 +np.float64,0x3fdf5a39bbbeb474,0x3ff0f10453059dbd,1 +np.float64,0xbfa1d4069423a810,0x3ff9b0a2eacd2ce2,1 +np.float64,0xbfc36d16a326da2c,0x3ffb92077d41d26a,1 +np.float64,0x1,0x3ff921fb54442d18,1 +np.float64,0x3feb232a79764654,0x3fe1df5beeb249d0,1 +np.float64,0xbfed2003d5fa4008,0x4005b737c2727583,1 +np.float64,0x3fd5b093a3ab6128,0x3ff399ca2db1d96d,1 +np.float64,0x3fca692c3d34d258,0x3ff5ceb86b79223e,1 +np.float64,0x3fd6bbdf89ad77c0,0x3ff3528916df652d,1 +np.float64,0xbfefdadd46ffb5bb,0x40085ee735e19f19,1 +np.float64,0x3feb69fb2676d3f6,0x3fe157ee0c15691e,1 +np.float64,0x3fe44c931f689926,0x3fec46b6f5e3f265,1 +np.float64,0xbfc43ddbcb287bb8,0x3ffbac71d268d74d,1 +np.float64,0x3fe6e16d43edc2da,0x3fe8c5cf0f0daa66,1 +np.float64,0x3fe489efc76913e0,0x3febf704ca1ac2a6,1 +np.float64,0xbfe590aadceb2156,0x40027b764205cf78,1 +np.float64,0xbf782e8aa0305d00,0x3ff93a29e81928ab,1 +np.float64,0x3fedcb80cffb9702,0x3fd7e5d1f98a418b,1 +np.float64,0x3fe075858060eb0c,0x3ff07d23ab46b60f,1 +np.float64,0x3fe62a68296c54d0,0x3fe9c77f7068043b,1 +np.float64,0x3feff16a3c7fe2d4,0x3fae8e8a739cc67a,1 +np.float64,0xbfd6ed93e3addb28,0x3ffefebab206fa99,1 +np.float64,0x3fe40d8ccf681b1a,0x3fec97e9cd29966d,1 +np.float64,0x3fd6408210ac8104,0x3ff3737a7d374107,1 +np.float64,0x3fec8023b8f90048,0x3fde35ebfb2b3afd,1 +np.float64,0xbfe13babd4627758,0x40011dae5c07c56b,1 +np.float64,0xbfd2183e61a4307c,0x3ffdb80dd747cfbe,1 +np.float64,0x3feae8eb1d75d1d6,0x3fe24c1f6e42ae77,1 +np.float64,0xbfea559b9c74ab37,0x40044c8e5e123b20,1 +np.float64,0xbfd12c9d57a2593a,0x3ffd7ac6222f561c,1 +np.float64,0x3fe32eb697e65d6e,0x3fedb202693875b6,1 +np.float64,0xbfde0808c3bc1012,0x4000794bd8616ea3,1 +np.float64,0x3fe14958a06292b2,0x3ff0007b40ac648a,1 +np.float64,0x3fe3d388a6e7a712,0x3fece21751a6dd7c,1 +np.float64,0x3fe7ad7897ef5af2,0x3fe79c5b3da302a7,1 +np.float64,0x3fec75527e78eaa4,0x3fde655de0cf0508,1 +np.float64,0x3fea920d4c75241a,0x3fe2ea48f031d908,1 +np.float64,0x7fefffffffffffff,0x7ff8000000000000,1 +np.float64,0xbfc17a68cb22f4d0,0x3ffb530925f41aa0,1 +np.float64,0xbfe1c93166e39263,0x400147f3cb435dec,1 +np.float64,0x3feb97c402f72f88,0x3fe0fe5b561bf869,1 +np.float64,0x3fb58ff5162b1ff0,0x3ff7c8933fa969dc,1 +np.float64,0x3fe68e2beded1c58,0x3fe93c075283703b,1 +np.float64,0xbf94564cc828aca0,0x3ff97355e5ee35db,1 +np.float64,0x3fd31061c9a620c4,0x3ff44b150ec96998,1 +np.float64,0xbfc7d0c89f2fa190,0x3ffc208bf4eddc4d,1 +np.float64,0x3fe5736f1d6ae6de,0x3feac18f84992d1e,1 +np.float64,0x3fdb62e480b6c5c8,0x3ff20ecfdc4afe7c,1 +np.float64,0xbfc417228b282e44,0x3ffba78afea35979,1 +np.float64,0x3f8f5ba1303eb780,0x3ff8e343714630ff,1 +np.float64,0x3fe8e99126f1d322,0x3fe5b6511d4c0798,1 +np.float64,0xbfe2ec08a1e5d812,0x4001a0bb28a85875,1 +np.float64,0x3fea3b46cf74768e,0x3fe383dceaa74296,1 +np.float64,0xbfe008b5ed60116c,0x4000c3d62c275d40,1 +np.float64,0xbfcd9f8a4b3b3f14,0x3ffcde98d6484202,1 +np.float64,0xbfdb5fb112b6bf62,0x40001a22137ef1c9,1 +np.float64,0xbfe9079565f20f2b,0x4003c0670c92e401,1 +np.float64,0xbfce250dc53c4a1c,0x3ffcefc2b3dc3332,1 +np.float64,0x3fe9ba85d373750c,0x3fe4607131b28773,1 +np.float64,0x10000000000000,0x3ff921fb54442d18,1 +np.float64,0xbfeb9ef42c773de8,0x4004e5f239203ad8,1 +np.float64,0xbfd6bf457dad7e8a,0x3ffef2563d87b18d,1 +np.float64,0x3fe4de9aa5e9bd36,0x3feb87f97defb04a,1 +np.float64,0x3fedb4f67cfb69ec,0x3fd8603c465bffac,1 +np.float64,0x3fe7b6d9506f6db2,0x3fe78e670c7bdb67,1 +np.float64,0x3fe071717460e2e2,0x3ff07f84472d9cc5,1 +np.float64,0xbfed2e79dbfa5cf4,0x4005bffc6f9ad24f,1 +np.float64,0x3febb8adc377715c,0x3fe0bcebfbd45900,1 +np.float64,0xbfee2cffd87c5a00,0x40066b20a037c478,1 +np.float64,0x3fef7e358d7efc6c,0x3fc6d0ba71a542a8,1 +np.float64,0xbfef027eef7e04fe,0x400723291cb00a7a,1 +np.float64,0x3fac96da34392dc0,0x3ff83d260a936c6a,1 +np.float64,0x3fe9dba94a73b752,0x3fe428736b94885e,1 +np.float64,0x3fed37581efa6eb0,0x3fdae49dcadf1d90,1 +np.float64,0xbfe6e61037edcc20,0x4002f23031b8d522,1 +np.float64,0xbfdea7204dbd4e40,0x40008fe1f37918b7,1 +np.float64,0x3feb9f8edb773f1e,0x3fe0eef20bd4387b,1 +np.float64,0x3feeb0b6ed7d616e,0x3fd25fb3b7a525d6,1 +np.float64,0xbfd7ce9061af9d20,0x3fff3b25d531aa2b,1 +np.float64,0xbfc806b509300d6c,0x3ffc2768743a8360,1 +np.float64,0xbfa283882c250710,0x3ff9b61fda28914a,1 +np.float64,0x3fdec70050bd8e00,0x3ff11b1d769b578f,1 +np.float64,0xbfc858a44930b148,0x3ffc31d6758b4721,1 +np.float64,0x3fdc321150b86424,0x3ff1d5504c3c91e4,1 +np.float64,0x3fd9416870b282d0,0x3ff2a46f3a850f5b,1 +np.float64,0x3fdd756968baead4,0x3ff17ac510a5573f,1 +np.float64,0xbfedfd632cfbfac6,0x400648345a2f89b0,1 +np.float64,0x3fd6874285ad0e84,0x3ff36098ebff763f,1 +np.float64,0x3fe6daacc9edb55a,0x3fe8cf75fae1e35f,1 +np.float64,0x3fe53f19766a7e32,0x3feb07d0e97cd55b,1 +np.float64,0x3fd13cc36ca27988,0x3ff4c4ff801b1faa,1 +np.float64,0x3fe4f21cbce9e43a,0x3feb6e34a72ef529,1 +np.float64,0xbfc21c1cc9243838,0x3ffb67726394ca89,1 +np.float64,0x3fe947a3f2728f48,0x3fe51eae4660e23c,1 +np.float64,0xbfce78cd653cf19c,0x3ffcfa89194b3f5e,1 +np.float64,0x3fe756f049eeade0,0x3fe81be7f2d399e2,1 +np.float64,0xbfcc727cf138e4f8,0x3ffcb7f547841bb0,1 +np.float64,0xbfc2d8d58f25b1ac,0x3ffb7f496cc72458,1 +np.float64,0xbfcfd0e4653fa1c8,0x3ffd26e1309bc80b,1 +np.float64,0xbfe2126c106424d8,0x40015e0e01db6a4a,1 +np.float64,0x3fe580e4306b01c8,0x3feaaf683ce51aa5,1 +np.float64,0x3fcea8a1b93d5140,0x3ff543456c0d28c7,1 +np.float64,0xfff0000000000000,0x7ff8000000000000,1 +np.float64,0xbfd9d5da72b3abb4,0x3fffc8013113f968,1 +np.float64,0xbfe1fdfcea63fbfa,0x400157def2e4808d,1 +np.float64,0xbfc0022e0720045c,0x3ffb239963e7cbf2,1 diff --git a/local_packages/numpy/_core/tests/data/umath-validation-set-arccosh.csv b/local_packages/numpy/_core/tests/data/umath-validation-set-arccosh.csv new file mode 100644 index 0000000..1b3eda4 --- /dev/null +++ b/local_packages/numpy/_core/tests/data/umath-validation-set-arccosh.csv @@ -0,0 +1,1429 @@ +dtype,input,output,ulperrortol +np.float32,0x3f83203f,0x3e61d9d6,2 +np.float32,0x3f98dea1,0x3f1d1af6,2 +np.float32,0x7fa00000,0x7fe00000,2 +np.float32,0x7eba99af,0x42b0d032,2 +np.float32,0x3fc95a13,0x3f833650,2 +np.float32,0x3fce9a45,0x3f8771e1,2 +np.float32,0x3fc1bd96,0x3f797811,2 +np.float32,0x7eba2391,0x42b0ceed,2 +np.float32,0x7d4e8f15,0x42acdb8c,2 +np.float32,0x3feca42e,0x3f9cc88e,2 +np.float32,0x7e2b314e,0x42af412e,2 +np.float32,0x7f7fffff,0x42b2d4fc,2 +np.float32,0x3f803687,0x3d6c4380,2 +np.float32,0x3fa0edbd,0x3f33e706,2 +np.float32,0x3faa8074,0x3f4b3d3c,2 +np.float32,0x3fa0c49e,0x3f337af3,2 +np.float32,0x3f8c9ec4,0x3ee18812,2 +np.float32,0x7efef78e,0x42b17006,2 +np.float32,0x3fc75720,0x3f818aa4,2 +np.float32,0x7f52d4c8,0x42b27198,2 +np.float32,0x3f88f21e,0x3ebe52b0,2 +np.float32,0x3ff7a042,0x3fa3a07a,2 +np.float32,0x7f52115c,0x42b26fbd,2 +np.float32,0x3fc6bf6f,0x3f810b42,2 +np.float32,0x3fd105d0,0x3f895649,2 +np.float32,0x3fee7c2a,0x3f9df66e,2 +np.float32,0x7f0ff9a5,0x42b1ae4f,2 +np.float32,0x7e81f075,0x42b016e7,2 +np.float32,0x3fa57d65,0x3f3f70c6,2 +np.float32,0x80800000,0xffc00000,2 +np.float32,0x7da239f5,0x42adc2bf,2 +np.float32,0x3f9e432c,0x3f2cbd80,2 +np.float32,0x3ff2839b,0x3fa07ee4,2 +np.float32,0x3fec8aef,0x3f9cb850,2 +np.float32,0x7d325893,0x42ac905b,2 +np.float32,0x3fa27431,0x3f37dade,2 +np.float32,0x3fce7408,0x3f8753ae,2 +np.float32,0x3fde6684,0x3f93353f,2 +np.float32,0x3feb9a3e,0x3f9c1cff,2 +np.float32,0x7deb34bb,0x42ae80f0,2 +np.float32,0x3fed9300,0x3f9d61b7,2 +np.float32,0x7f35e253,0x42b225fb,2 +np.float32,0x7e6db57f,0x42afe93f,2 +np.float32,0x3fa41f08,0x3f3c10bc,2 +np.float32,0x3fb0d4da,0x3f590de3,2 +np.float32,0x3fb5c690,0x3f632351,2 +np.float32,0x3fcde9ce,0x3f86e638,2 +np.float32,0x3f809c7b,0x3dc81161,2 +np.float32,0x3fd77291,0x3f8e3226,2 +np.float32,0x3fc21a06,0x3f7a1a82,2 +np.float32,0x3fba177e,0x3f6b8139,2 +np.float32,0x7f370dff,0x42b22944,2 +np.float32,0x3fe5bfcc,0x3f9841c1,2 +np.float32,0x3feb0caa,0x3f9bc139,2 +np.float32,0x7f4fe5c3,0x42b26a6c,2 +np.float32,0x7f1e1419,0x42b1de28,2 +np.float32,0x7f5e3c96,0x42b28c92,2 +np.float32,0x3f8cd313,0x3ee3521e,2 +np.float32,0x3fa97824,0x3f48e049,2 +np.float32,0x7d8ca281,0x42ad799e,2 +np.float32,0x3f96b51b,0x3f165193,2 +np.float32,0x3f81328a,0x3e0bf504,2 +np.float32,0x3ff60bf3,0x3fa2ab45,2 +np.float32,0x3ff9b629,0x3fa4e107,2 +np.float32,0x3fecacfc,0x3f9cce37,2 +np.float32,0x3fba8804,0x3f6c5600,2 +np.float32,0x3f81f752,0x3e333fdd,2 +np.float32,0x3fb5b262,0x3f62fb46,2 +np.float32,0x3fa21bc0,0x3f36f7e6,2 +np.float32,0x3fbc87bb,0x3f7011dc,2 +np.float32,0x3fe18b32,0x3f9565ae,2 +np.float32,0x7dfb6dd5,0x42aea316,2 +np.float32,0x3fb7c602,0x3f670ee3,2 +np.float32,0x7efeb6a2,0x42b16f84,2 +np.float32,0x3fa56180,0x3f3f2ca4,2 +np.float32,0x3f8dcaff,0x3eeb9ac0,2 +np.float32,0x7e876238,0x42b02beb,2 +np.float32,0x7f0bb67d,0x42b19eec,2 +np.float32,0x3faca01c,0x3f4fffa5,2 +np.float32,0x3fdb57ee,0x3f9108b8,2 +np.float32,0x3fe3bade,0x3f96e4b7,2 +np.float32,0x7f7aa2dd,0x42b2ca25,2 +np.float32,0x3fed92ec,0x3f9d61aa,2 +np.float32,0x7eb789b1,0x42b0c7b9,2 +np.float32,0x7f7f16e4,0x42b2d329,2 +np.float32,0x3fb6647e,0x3f645b84,2 +np.float32,0x3f99335e,0x3f1e1d96,2 +np.float32,0x7e690a11,0x42afdf17,2 +np.float32,0x7dff2f95,0x42aeaaae,2 +np.float32,0x7f70adfd,0x42b2b564,2 +np.float32,0x3fe92252,0x3f9a80fe,2 +np.float32,0x3fef54ce,0x3f9e7fe5,2 +np.float32,0x3ff24eaa,0x3fa05df9,2 +np.float32,0x7f04565a,0x42b18328,2 +np.float32,0x3fcb8b80,0x3f85007f,2 +np.float32,0x3fcd4d0a,0x3f866983,2 +np.float32,0x3fbe7d82,0x3f73a911,2 +np.float32,0x3f8a7a8a,0x3ecdc8f6,2 +np.float32,0x3f912441,0x3f030d56,2 +np.float32,0x3f9b29d6,0x3f23f663,2 +np.float32,0x3fab7f36,0x3f4d7c6c,2 +np.float32,0x7dfedafc,0x42aeaa04,2 +np.float32,0x3fe190c0,0x3f956982,2 +np.float32,0x3f927515,0x3f07e0bb,2 +np.float32,0x3ff6442a,0x3fa2cd7e,2 +np.float32,0x7f6656d0,0x42b29ee8,2 +np.float32,0x3fe29aa0,0x3f96201f,2 +np.float32,0x3fa4a247,0x3f3d5687,2 +np.float32,0x3fa1cf19,0x3f363226,2 +np.float32,0x3fc20037,0x3f79ed36,2 +np.float32,0x7cc1241a,0x42ab5645,2 +np.float32,0x3fafd540,0x3f56f25a,2 +np.float32,0x7e5b3f5f,0x42afbfdb,2 +np.float32,0x7f48de5f,0x42b258d0,2 +np.float32,0x3fce1ca0,0x3f870e85,2 +np.float32,0x7ee40bb2,0x42b136e4,2 +np.float32,0x7ecdb133,0x42b10212,2 +np.float32,0x3f9f181c,0x3f2f02ca,2 +np.float32,0x3f936cbf,0x3f0b4f63,2 +np.float32,0x3fa4f8ea,0x3f3e2c2f,2 +np.float32,0x3fcc03e2,0x3f8561ac,2 +np.float32,0x3fb801f2,0x3f67831b,2 +np.float32,0x7e141dad,0x42aef70c,2 +np.float32,0x3fe8c04e,0x3f9a4087,2 +np.float32,0x3f8548d5,0x3e929f37,2 +np.float32,0x7f148d7d,0x42b1be56,2 +np.float32,0x3fd2c9a2,0x3f8ab1ed,2 +np.float32,0x7eb374fd,0x42b0bc36,2 +np.float32,0x7f296d36,0x42b201a7,2 +np.float32,0x3ff138e2,0x3f9fb09d,2 +np.float32,0x3ff42898,0x3fa18347,2 +np.float32,0x7da8c5e1,0x42add700,2 +np.float32,0x7dcf72c4,0x42ae40a4,2 +np.float32,0x7ea571fc,0x42b09296,2 +np.float32,0x3fc0953d,0x3f776ba3,2 +np.float32,0x7f1773dd,0x42b1c83c,2 +np.float32,0x7ef53b68,0x42b15c17,2 +np.float32,0x3f85d69f,0x3e9a0f3a,2 +np.float32,0x7e8b9a05,0x42b03ba0,2 +np.float32,0x3ff07d20,0x3f9f3ad2,2 +np.float32,0x7e8da32c,0x42b0430a,2 +np.float32,0x7ef96004,0x42b164ab,2 +np.float32,0x3fdfaa62,0x3f941837,2 +np.float32,0x7f0057c5,0x42b17377,2 +np.float32,0x3fb2663f,0x3f5c5065,2 +np.float32,0x3fd3d8c3,0x3f8b8055,2 +np.float32,0x1,0xffc00000,2 +np.float32,0x3fd536c1,0x3f8c8862,2 +np.float32,0x3f91b953,0x3f053619,2 +np.float32,0x3fb3305c,0x3f5deee1,2 +np.float32,0x7ecd86b9,0x42b101a8,2 +np.float32,0x3fbf71c5,0x3f75624d,2 +np.float32,0x3ff5f0f4,0x3fa29ad2,2 +np.float32,0x3fe50389,0x3f97c328,2 +np.float32,0x3fa325a1,0x3f399e69,2 +np.float32,0x3fe4397a,0x3f973a9f,2 +np.float32,0x3f8684c6,0x3ea2b784,2 +np.float32,0x7f25ae00,0x42b1f634,2 +np.float32,0x3ff7cbf7,0x3fa3badb,2 +np.float32,0x7f73f0e0,0x42b2bc48,2 +np.float32,0x3fc88b70,0x3f828b92,2 +np.float32,0x3fb01c16,0x3f578886,2 +np.float32,0x7e557623,0x42afb229,2 +np.float32,0x3fcbcd5b,0x3f8535b4,2 +np.float32,0x7f7157e4,0x42b2b6cd,2 +np.float32,0x7f51d9d4,0x42b26f36,2 +np.float32,0x7f331a3b,0x42b21e17,2 +np.float32,0x7f777fb5,0x42b2c3b2,2 +np.float32,0x3f832001,0x3e61d11f,2 +np.float32,0x7f2cd055,0x42b20bca,2 +np.float32,0x3f89831f,0x3ec42f76,2 +np.float32,0x7f21da33,0x42b1ea3d,2 +np.float32,0x3f99e416,0x3f20330a,2 +np.float32,0x7f2c8ea1,0x42b20b07,2 +np.float32,0x7f462c98,0x42b251e6,2 +np.float32,0x7f4fdb3f,0x42b26a52,2 +np.float32,0x3fcc1338,0x3f856e07,2 +np.float32,0x3f823673,0x3e3e20da,2 +np.float32,0x7dbfe89d,0x42ae18c6,2 +np.float32,0x3fc9b04c,0x3f837d38,2 +np.float32,0x7dba3213,0x42ae094d,2 +np.float32,0x7ec5a483,0x42b0eda1,2 +np.float32,0x3fbc4d14,0x3f6fa543,2 +np.float32,0x3fc85ce2,0x3f8264f1,2 +np.float32,0x7f77c816,0x42b2c447,2 +np.float32,0x3f9c9281,0x3f280492,2 +np.float32,0x7f49b3e2,0x42b25aef,2 +np.float32,0x3fa7e4da,0x3f45347c,2 +np.float32,0x7e0c9df5,0x42aedc72,2 +np.float32,0x7f21fd1a,0x42b1eaab,2 +np.float32,0x7f7c63ad,0x42b2cdb6,2 +np.float32,0x7f4eb80a,0x42b26783,2 +np.float32,0x7e98038c,0x42b0673c,2 +np.float32,0x7e89ba08,0x42b034b4,2 +np.float32,0x3ffc06ba,0x3fa64094,2 +np.float32,0x3fae63f6,0x3f53db36,2 +np.float32,0x3fbc2d30,0x3f6f6a1c,2 +np.float32,0x7de0e5e5,0x42ae69fe,2 +np.float32,0x7e09ed18,0x42aed28d,2 +np.float32,0x3fea78f8,0x3f9b6129,2 +np.float32,0x7dfe0bcc,0x42aea863,2 +np.float32,0x7ee21d03,0x42b13289,2 +np.float32,0x3fcc3aed,0x3f858dfc,2 +np.float32,0x3fe6b3ba,0x3f98e4ea,2 +np.float32,0x3f90f25f,0x3f025225,2 +np.float32,0x7f1bcaf4,0x42b1d6b3,2 +np.float32,0x3f83ac81,0x3e74c20e,2 +np.float32,0x3f98681d,0x3f1bae16,2 +np.float32,0x3fe1f2d9,0x3f95ad08,2 +np.float32,0x3fa279d7,0x3f37e951,2 +np.float32,0x3feb922a,0x3f9c17c4,2 +np.float32,0x7f1c72e8,0x42b1d8da,2 +np.float32,0x3fea156b,0x3f9b2038,2 +np.float32,0x3fed6bda,0x3f9d48aa,2 +np.float32,0x3fa86142,0x3f46589c,2 +np.float32,0x3ff16bc2,0x3f9fd072,2 +np.float32,0x3fbebf65,0x3f74207b,2 +np.float32,0x7e7b78b5,0x42b00610,2 +np.float32,0x3ff51ab8,0x3fa217f0,2 +np.float32,0x3f8361bb,0x3e6adf07,2 +np.float32,0x7edbceed,0x42b1240e,2 +np.float32,0x7f10e2c0,0x42b1b18a,2 +np.float32,0x3fa7bc58,0x3f44d4ef,2 +np.float32,0x3f813bde,0x3e0e1138,2 +np.float32,0x7f30d5b9,0x42b21791,2 +np.float32,0x3fb4f450,0x3f61806a,2 +np.float32,0x7eee02c4,0x42b14cca,2 +np.float32,0x7ec74b62,0x42b0f1e4,2 +np.float32,0x3ff96bca,0x3fa4b498,2 +np.float32,0x7f50e304,0x42b26cda,2 +np.float32,0x7eb14c57,0x42b0b603,2 +np.float32,0x7c3f0733,0x42a9edbf,2 +np.float32,0x7ea57acb,0x42b092b1,2 +np.float32,0x7f2788dc,0x42b1fbe7,2 +np.float32,0x3fa39f14,0x3f3ad09b,2 +np.float32,0x3fc3a7e0,0x3f7ccfa0,2 +np.float32,0x3fe70a73,0x3f991eb0,2 +np.float32,0x7f4831f7,0x42b25718,2 +np.float32,0x3fe947d0,0x3f9a999c,2 +np.float32,0x7ef2b1c7,0x42b156c4,2 +np.float32,0x3fede0ea,0x3f9d937f,2 +np.float32,0x3f9fef8e,0x3f314637,2 +np.float32,0x3fc313c5,0x3f7bcebd,2 +np.float32,0x7ee99337,0x42b14328,2 +np.float32,0x7eb9042e,0x42b0cbd5,2 +np.float32,0x3fc9d3dc,0x3f839a69,2 +np.float32,0x3fb2c018,0x3f5d091d,2 +np.float32,0x3fcc4e8f,0x3f859dc5,2 +np.float32,0x3fa9363b,0x3f484819,2 +np.float32,0x7f72ce2e,0x42b2b9e4,2 +np.float32,0x7e639326,0x42afd2f1,2 +np.float32,0x7f4595d3,0x42b25060,2 +np.float32,0x7f6d0ac4,0x42b2ad97,2 +np.float32,0x7f1bda0d,0x42b1d6e5,2 +np.float32,0x3fd85ffd,0x3f8ee0ed,2 +np.float32,0x3f91d53f,0x3f059c8e,2 +np.float32,0x7d06e103,0x42ac0155,2 +np.float32,0x3fb83126,0x3f67de6e,2 +np.float32,0x7d81ce1f,0x42ad5097,2 +np.float32,0x7f79cb3b,0x42b2c86b,2 +np.float32,0x7f800000,0x7f800000,2 +np.float32,0x3fdbfffd,0x3f918137,2 +np.float32,0x7f4ecb1c,0x42b267b2,2 +np.float32,0x3fc2c122,0x3f7b3ed3,2 +np.float32,0x7f415854,0x42b24544,2 +np.float32,0x7e3d988b,0x42af7575,2 +np.float32,0x3f83ca99,0x3e789fcb,2 +np.float32,0x7f274f70,0x42b1fb38,2 +np.float32,0x7f0d20e6,0x42b1a416,2 +np.float32,0x3fdf3a1d,0x3f93c9c1,2 +np.float32,0x7efaa13e,0x42b1673d,2 +np.float32,0x3fb20b15,0x3f5b9434,2 +np.float32,0x3f86af9f,0x3ea4c664,2 +np.float32,0x3fe4fcb0,0x3f97be8a,2 +np.float32,0x3f920683,0x3f065085,2 +np.float32,0x3fa4b278,0x3f3d7e8b,2 +np.float32,0x3f8077a8,0x3daef77f,2 +np.float32,0x7e865be4,0x42b02807,2 +np.float32,0x3fcea7e2,0x3f877c9f,2 +np.float32,0x7e7e9db1,0x42b00c6d,2 +np.float32,0x3f9819aa,0x3f1aba7e,2 +np.float32,0x7f2b6c4b,0x42b207a7,2 +np.float32,0x7ef85e3e,0x42b16299,2 +np.float32,0x3fbd8290,0x3f71df8b,2 +np.float32,0x3fbbb615,0x3f6e8c8c,2 +np.float32,0x7f1bc7f5,0x42b1d6a9,2 +np.float32,0x3fbb4fea,0x3f6dcdad,2 +np.float32,0x3fb67e09,0x3f648dd1,2 +np.float32,0x3fc83495,0x3f824374,2 +np.float32,0x3fe52980,0x3f97dcbc,2 +np.float32,0x3f87d893,0x3eb25d7c,2 +np.float32,0x3fdb805a,0x3f9125c0,2 +np.float32,0x3fb33f0f,0x3f5e0ce1,2 +np.float32,0x3facc524,0x3f50516b,2 +np.float32,0x3ff40484,0x3fa16d0e,2 +np.float32,0x3ff078bf,0x3f9f3811,2 +np.float32,0x7f736747,0x42b2bb27,2 +np.float32,0x7f55768b,0x42b277f3,2 +np.float32,0x80000001,0xffc00000,2 +np.float32,0x7f6463d1,0x42b29a8e,2 +np.float32,0x3f8f8b59,0x3ef9d792,2 +np.float32,0x3f8a6f4d,0x3ecd5bf4,2 +np.float32,0x3fe958d9,0x3f9aa4ca,2 +np.float32,0x7f1e2ce2,0x42b1de78,2 +np.float32,0x3fb8584a,0x3f682a05,2 +np.float32,0x7dea3dc6,0x42ae7ed5,2 +np.float32,0x7f53a815,0x42b27399,2 +np.float32,0x7e0cf986,0x42aeddbf,2 +np.float32,0x7f3afb71,0x42b23422,2 +np.float32,0x3fd87d6e,0x3f8ef685,2 +np.float32,0x3ffcaa46,0x3fa6a0d7,2 +np.float32,0x7eecd276,0x42b14a3a,2 +np.float32,0x3ffc30b4,0x3fa65951,2 +np.float32,0x7e9c85e2,0x42b07634,2 +np.float32,0x3f95d862,0x3f1383de,2 +np.float32,0x7ef21410,0x42b15577,2 +np.float32,0x3fbfa1b5,0x3f75b86e,2 +np.float32,0x3fd6d90f,0x3f8dc086,2 +np.float32,0x0,0xffc00000,2 +np.float32,0x7e885dcd,0x42b02f9f,2 +np.float32,0x3fb3e057,0x3f5f54bf,2 +np.float32,0x7f40afdd,0x42b24385,2 +np.float32,0x3fb795c2,0x3f66b120,2 +np.float32,0x3fba7c11,0x3f6c3f73,2 +np.float32,0x3ffef620,0x3fa7f828,2 +np.float32,0x7d430508,0x42acbe1e,2 +np.float32,0x3f8d2892,0x3ee6369f,2 +np.float32,0x3fbea139,0x3f73e9d5,2 +np.float32,0x3ffaa928,0x3fa571b9,2 +np.float32,0x7fc00000,0x7fc00000,2 +np.float32,0x7f16f9ce,0x42b1c69f,2 +np.float32,0x3fa8f753,0x3f47b657,2 +np.float32,0x3fd48a63,0x3f8c06ac,2 +np.float32,0x7f13419e,0x42b1b9d9,2 +np.float32,0x3fdf1526,0x3f93afde,2 +np.float32,0x3f903c8b,0x3eff3be8,2 +np.float32,0x7f085323,0x42b1925b,2 +np.float32,0x7cdbe309,0x42ab98ac,2 +np.float32,0x3fba2cfd,0x3f6ba9f1,2 +np.float32,0x7f5a805d,0x42b283e4,2 +np.float32,0x7f6753dd,0x42b2a119,2 +np.float32,0x3fed9f02,0x3f9d6964,2 +np.float32,0x3f96422c,0x3f14ddba,2 +np.float32,0x7f22f2a9,0x42b1edb1,2 +np.float32,0x3fe3fcfd,0x3f97119d,2 +np.float32,0x7e018ad0,0x42aeb271,2 +np.float32,0x7db896f5,0x42ae04de,2 +np.float32,0x7e55c795,0x42afb2ec,2 +np.float32,0x7f58ef8d,0x42b28036,2 +np.float32,0x7f24a16a,0x42b1f2f3,2 +np.float32,0x3fcf714c,0x3f881b09,2 +np.float32,0x3fcdd056,0x3f86d200,2 +np.float32,0x7f02fad0,0x42b17de0,2 +np.float32,0x7eeab877,0x42b145a9,2 +np.float32,0x3fd6029d,0x3f8d20f7,2 +np.float32,0x3fd4f8cd,0x3f8c59d6,2 +np.float32,0x3fb29d4a,0x3f5cc1a5,2 +np.float32,0x3fb11e2d,0x3f59a77a,2 +np.float32,0x7eded576,0x42b12b0e,2 +np.float32,0x7f26c2a5,0x42b1f988,2 +np.float32,0x3fb6165b,0x3f63c151,2 +np.float32,0x7f3bca47,0x42b23657,2 +np.float32,0x7d8c93bf,0x42ad7968,2 +np.float32,0x3f8ede02,0x3ef47176,2 +np.float32,0x3fbef762,0x3f7485b9,2 +np.float32,0x7f1419af,0x42b1bcc6,2 +np.float32,0x7d9e8c79,0x42adb701,2 +np.float32,0x3fa26336,0x3f37af63,2 +np.float32,0x7f5f5590,0x42b28f18,2 +np.float32,0x3fddc93a,0x3f92c651,2 +np.float32,0x3ff0a5fc,0x3f9f547f,2 +np.float32,0x3fb2f6b8,0x3f5d790e,2 +np.float32,0x3ffe59a4,0x3fa79d2c,2 +np.float32,0x7e4df848,0x42af9fde,2 +np.float32,0x3fb0ab3b,0x3f58b678,2 +np.float32,0x7ea54d47,0x42b09225,2 +np.float32,0x3fdd6404,0x3f927eb2,2 +np.float32,0x3f846dc0,0x3e864caa,2 +np.float32,0x7d046aee,0x42abf7e7,2 +np.float32,0x7f7c5a05,0x42b2cda3,2 +np.float32,0x3faf6126,0x3f55fb21,2 +np.float32,0x7f36a910,0x42b22829,2 +np.float32,0x3fdc7b36,0x3f91d938,2 +np.float32,0x3fff443e,0x3fa82577,2 +np.float32,0x7ee7154a,0x42b13daa,2 +np.float32,0x3f944742,0x3f0e435c,2 +np.float32,0x7f5b510a,0x42b285cc,2 +np.float32,0x3f9bc940,0x3f25c4d2,2 +np.float32,0x3fee4782,0x3f9dd4ea,2 +np.float32,0x3fcfc2dd,0x3f885aea,2 +np.float32,0x7eab65cf,0x42b0a4af,2 +np.float32,0x3f9cf908,0x3f292689,2 +np.float32,0x7ed35501,0x42b10feb,2 +np.float32,0x7dabb70a,0x42addfd9,2 +np.float32,0x7f348919,0x42b2222b,2 +np.float32,0x3fb137d4,0x3f59dd17,2 +np.float32,0x7e7b36c9,0x42b0058a,2 +np.float32,0x7e351fa4,0x42af5e0d,2 +np.float32,0x3f973c0c,0x3f18011e,2 +np.float32,0xff800000,0xffc00000,2 +np.float32,0x3f9b0a4b,0x3f239a33,2 +np.float32,0x3f87c4cf,0x3eb17e7e,2 +np.float32,0x7ef67760,0x42b15eaa,2 +np.float32,0x3fc4d2c8,0x3f7ed20f,2 +np.float32,0x7e940dac,0x42b059b8,2 +np.float32,0x7f6e6a52,0x42b2b08d,2 +np.float32,0x3f838752,0x3e6fe4b2,2 +np.float32,0x3fd8f046,0x3f8f4a94,2 +np.float32,0x3fa82112,0x3f45c223,2 +np.float32,0x3fd49b16,0x3f8c1345,2 +np.float32,0x7f02a941,0x42b17ca1,2 +np.float32,0x3f8a9d2c,0x3ecf1768,2 +np.float32,0x7c9372e3,0x42aacc0f,2 +np.float32,0x3fd260b3,0x3f8a619a,2 +np.float32,0x3f8a1b88,0x3eca27cb,2 +np.float32,0x7d25d510,0x42ac6b1c,2 +np.float32,0x7ef5a578,0x42b15cf5,2 +np.float32,0x3fe6625d,0x3f98ae9a,2 +np.float32,0x3ff53240,0x3fa22658,2 +np.float32,0x3f8bb2e6,0x3ed944cf,2 +np.float32,0x7f4679b1,0x42b252ad,2 +np.float32,0x3fa8db30,0x3f4774fc,2 +np.float32,0x7ee5fafd,0x42b13b37,2 +np.float32,0x3fc405e0,0x3f7d71fb,2 +np.float32,0x3f9303cd,0x3f09ddfd,2 +np.float32,0x7f486e67,0x42b257b2,2 +np.float32,0x7e73f12b,0x42aff680,2 +np.float32,0x3fe80f8b,0x3f99cbe4,2 +np.float32,0x3f84200a,0x3e81a3f3,2 +np.float32,0x3fa14e5c,0x3f34e3ce,2 +np.float32,0x3fda22ec,0x3f9029bb,2 +np.float32,0x3f801772,0x3d1aef98,2 +np.float32,0x7eaa1428,0x42b0a0bb,2 +np.float32,0x3feae0b3,0x3f9ba4aa,2 +np.float32,0x7ea439b4,0x42b08ecc,2 +np.float32,0x3fa28b1c,0x3f381579,2 +np.float32,0x7e8af247,0x42b03937,2 +np.float32,0x3fd19216,0x3f89c2b7,2 +np.float32,0x7f6ea033,0x42b2b100,2 +np.float32,0x3fad4fbf,0x3f518224,2 +np.float32,0x3febd940,0x3f9c45bd,2 +np.float32,0x7f4643a3,0x42b25221,2 +np.float32,0x7ec34478,0x42b0e771,2 +np.float32,0x7f18c83b,0x42b1ccb5,2 +np.float32,0x3fc665ad,0x3f80bf94,2 +np.float32,0x3ff0a999,0x3f9f56c4,2 +np.float32,0x3faf1cd2,0x3f5568fe,2 +np.float32,0x7ecd9dc6,0x42b101e1,2 +np.float32,0x3faad282,0x3f4bf754,2 +np.float32,0x3ff905a0,0x3fa47771,2 +np.float32,0x7f596481,0x42b28149,2 +np.float32,0x7f1cb31f,0x42b1d9ac,2 +np.float32,0x7e266719,0x42af32a6,2 +np.float32,0x7eccce06,0x42b0ffdb,2 +np.float32,0x3f9b6f71,0x3f24c102,2 +np.float32,0x3f80e4ba,0x3df1d6bc,2 +np.float32,0x3f843d51,0x3e836a60,2 +np.float32,0x7f70bd88,0x42b2b585,2 +np.float32,0x3fe4cc96,0x3f979e18,2 +np.float32,0x3ff737c7,0x3fa36151,2 +np.float32,0x3ff1197e,0x3f9f9cf4,2 +np.float32,0x7f08e190,0x42b19471,2 +np.float32,0x3ff1542e,0x3f9fc1b2,2 +np.float32,0x3ff6673c,0x3fa2e2d2,2 +np.float32,0xbf800000,0xffc00000,2 +np.float32,0x7e3f9ba7,0x42af7add,2 +np.float32,0x7f658ff6,0x42b29d2d,2 +np.float32,0x3f93441c,0x3f0ac0d9,2 +np.float32,0x7f526a74,0x42b27096,2 +np.float32,0x7f5b00c8,0x42b28511,2 +np.float32,0x3ff212f8,0x3fa038cf,2 +np.float32,0x7e0bd60d,0x42aed998,2 +np.float32,0x7f71ef7f,0x42b2b80e,2 +np.float32,0x7f7a897e,0x42b2c9f1,2 +np.float32,0x7e8b76a6,0x42b03b1e,2 +np.float32,0x7efa0da3,0x42b1660f,2 +np.float32,0x3fce9166,0x3f876ae0,2 +np.float32,0x3fc4163d,0x3f7d8e30,2 +np.float32,0x3fdb3784,0x3f90f16b,2 +np.float32,0x7c5f177b,0x42aa3d30,2 +np.float32,0x3fc6276d,0x3f808af5,2 +np.float32,0x7bac9cc2,0x42a856f4,2 +np.float32,0x3fe5876f,0x3f981bea,2 +np.float32,0x3fef60e3,0x3f9e878a,2 +np.float32,0x3fb23cd8,0x3f5bfb06,2 +np.float32,0x3fe114e2,0x3f951402,2 +np.float32,0x7ca8ef04,0x42ab11b4,2 +np.float32,0x7d93c2ad,0x42ad92ec,2 +np.float32,0x3fe5bb8a,0x3f983ee6,2 +np.float32,0x7f0182fd,0x42b1781b,2 +np.float32,0x7da63bb2,0x42adcf3d,2 +np.float32,0x3fac46b7,0x3f4f399e,2 +np.float32,0x7f7a5d8f,0x42b2c997,2 +np.float32,0x7f76572e,0x42b2c14b,2 +np.float32,0x7f42d53e,0x42b24931,2 +np.float32,0x7f7ffd00,0x42b2d4f6,2 +np.float32,0x3fc346c3,0x3f7c2756,2 +np.float32,0x7f1f6ae3,0x42b1e27a,2 +np.float32,0x3f87fb56,0x3eb3e2ee,2 +np.float32,0x3fed17a2,0x3f9d12b4,2 +np.float32,0x7f5ea903,0x42b28d8c,2 +np.float32,0x3f967f82,0x3f15a4ab,2 +np.float32,0x7d3b540c,0x42aca984,2 +np.float32,0x7f56711a,0x42b27a4a,2 +np.float32,0x7f122223,0x42b1b5ee,2 +np.float32,0x3fd6fa34,0x3f8dd919,2 +np.float32,0x3fadd62e,0x3f52a7b3,2 +np.float32,0x3fb7bf0c,0x3f67015f,2 +np.float32,0x7edf4ba7,0x42b12c1d,2 +np.float32,0x7e33cc65,0x42af5a4b,2 +np.float32,0x3fa6be17,0x3f427831,2 +np.float32,0x3fa07aa8,0x3f32b7d4,2 +np.float32,0x3fa4a3af,0x3f3d5a01,2 +np.float32,0x3fdbb267,0x3f9149a8,2 +np.float32,0x7ed45e25,0x42b1126c,2 +np.float32,0x3fe3f432,0x3f970ba6,2 +np.float32,0x7f752080,0x42b2bec3,2 +np.float32,0x3f872747,0x3eaa62ea,2 +np.float32,0x7e52175d,0x42afaa03,2 +np.float32,0x3fdc766c,0x3f91d5ce,2 +np.float32,0x7ecd6841,0x42b1015c,2 +np.float32,0x7f3d6c40,0x42b23ac6,2 +np.float32,0x3fb80c14,0x3f6796b9,2 +np.float32,0x3ff6ad56,0x3fa30d68,2 +np.float32,0x3fda44c3,0x3f90423e,2 +np.float32,0x3fdcba0c,0x3f9205fc,2 +np.float32,0x7e14a720,0x42aef8e6,2 +np.float32,0x3fe9e489,0x3f9b0047,2 +np.float32,0x7e69f933,0x42afe123,2 +np.float32,0x3ff3ee6d,0x3fa15f71,2 +np.float32,0x3f8538cd,0x3e91c1a7,2 +np.float32,0x3fdc3f07,0x3f91ae46,2 +np.float32,0x3fba2ef0,0x3f6bada2,2 +np.float32,0x7da64cd8,0x42adcf71,2 +np.float32,0x3fc34bd2,0x3f7c301d,2 +np.float32,0x3fa273aa,0x3f37d984,2 +np.float32,0x3ff0338c,0x3f9f0c86,2 +np.float32,0x7ed62cef,0x42b116c3,2 +np.float32,0x3f911e7e,0x3f02f7c6,2 +np.float32,0x7c8514c9,0x42aa9792,2 +np.float32,0x3fea2a74,0x3f9b2df5,2 +np.float32,0x3fe036f8,0x3f947a25,2 +np.float32,0x7c5654bf,0x42aa28ad,2 +np.float32,0x3fd9e423,0x3f8ffc32,2 +np.float32,0x7eec0439,0x42b1487b,2 +np.float32,0x3fc580f4,0x3f7ffb62,2 +np.float32,0x3fb0e316,0x3f592bbe,2 +np.float32,0x7c4cfb7d,0x42aa11d8,2 +np.float32,0x3faf9704,0x3f566e00,2 +np.float32,0x3fa7cf8a,0x3f45023d,2 +np.float32,0x7f7b724d,0x42b2cbcc,2 +np.float32,0x7f05bfe3,0x42b18897,2 +np.float32,0x3f90bde3,0x3f018bf3,2 +np.float32,0x7c565479,0x42aa28ad,2 +np.float32,0x3f94b517,0x3f0fb8e5,2 +np.float32,0x3fd6aadd,0x3f8d9e3c,2 +np.float32,0x7f09b37c,0x42b1977f,2 +np.float32,0x7f2b45ea,0x42b20734,2 +np.float32,0x3ff1d15e,0x3fa00fe9,2 +np.float32,0x3f99bce6,0x3f1fbd6c,2 +np.float32,0x7ecd1f76,0x42b100a7,2 +np.float32,0x7f443e2b,0x42b24ce2,2 +np.float32,0x7da7d6a5,0x42add428,2 +np.float32,0x7ebe0193,0x42b0d975,2 +np.float32,0x7ee13c43,0x42b1308b,2 +np.float32,0x3f8adf1b,0x3ed18e0c,2 +np.float32,0x7f76ce65,0x42b2c242,2 +np.float32,0x7e34f43d,0x42af5d92,2 +np.float32,0x7f306b76,0x42b2165d,2 +np.float32,0x7e1fd07f,0x42af1df7,2 +np.float32,0x3fab9a41,0x3f4db909,2 +np.float32,0x3fc23d1a,0x3f7a5803,2 +np.float32,0x3f8b7403,0x3ed70245,2 +np.float32,0x3f8c4dd6,0x3edebbae,2 +np.float32,0x3fe5f411,0x3f9864cd,2 +np.float32,0x3f88128b,0x3eb4e508,2 +np.float32,0x3fcb09de,0x3f84976f,2 +np.float32,0x7f32f2f5,0x42b21da6,2 +np.float32,0x3fe75610,0x3f9950f6,2 +np.float32,0x3f993edf,0x3f1e408d,2 +np.float32,0x3fc4a9d7,0x3f7e8be9,2 +np.float32,0x7f74551a,0x42b2bd1a,2 +np.float32,0x7de87129,0x42ae7ae2,2 +np.float32,0x7f18bbbd,0x42b1cc8c,2 +np.float32,0x7e7e1dd4,0x42b00b6c,2 +np.float32,0x3ff6e55b,0x3fa32f64,2 +np.float32,0x3fa634c8,0x3f412df3,2 +np.float32,0x3fd0fb7c,0x3f894e49,2 +np.float32,0x3ff4f6a6,0x3fa201d7,2 +np.float32,0x7f69d418,0x42b2a69a,2 +np.float32,0x7cb9632d,0x42ab414a,2 +np.float32,0x3fc57d36,0x3f7ff503,2 +np.float32,0x7e9e2ed7,0x42b07b9b,2 +np.float32,0x7f2e6868,0x42b2107d,2 +np.float32,0x3fa3169a,0x3f39785d,2 +np.float32,0x7f03cde0,0x42b18117,2 +np.float32,0x7f6d75d2,0x42b2ae7f,2 +np.float32,0x3ff483f2,0x3fa1bb75,2 +np.float32,0x7f1b39f7,0x42b1d4d6,2 +np.float32,0x3f8c7a7d,0x3ee0481e,2 +np.float32,0x3f989095,0x3f1c2b19,2 +np.float32,0x3fa4cbfd,0x3f3dbd87,2 +np.float32,0x7f75b00f,0x42b2bfef,2 +np.float32,0x3f940724,0x3f0d6756,2 +np.float32,0x7f5e5a1a,0x42b28cd6,2 +np.float32,0x800000,0xffc00000,2 +np.float32,0x7edd1d29,0x42b12716,2 +np.float32,0x3fa3e9e4,0x3f3b8c16,2 +np.float32,0x7e46d70e,0x42af8dd5,2 +np.float32,0x3f824745,0x3e40ec1e,2 +np.float32,0x3fd67623,0x3f8d770a,2 +np.float32,0x3fe9a6f3,0x3f9ad7fa,2 +np.float32,0x3fdda67c,0x3f92adc1,2 +np.float32,0x7ccb6c9a,0x42ab70d4,2 +np.float32,0x3ffd364a,0x3fa6f2fe,2 +np.float32,0x7e02424c,0x42aeb545,2 +np.float32,0x3fb6d2f2,0x3f6534a1,2 +np.float32,0x3fe1fe26,0x3f95b4cc,2 +np.float32,0x7e93ac57,0x42b05867,2 +np.float32,0x7f7b3433,0x42b2cb4d,2 +np.float32,0x3fb76803,0x3f66580d,2 +np.float32,0x3f9af881,0x3f23661b,2 +np.float32,0x3fd58062,0x3f8cbf98,2 +np.float32,0x80000000,0xffc00000,2 +np.float32,0x7f1af8f4,0x42b1d3ff,2 +np.float32,0x3fe66bba,0x3f98b4dc,2 +np.float32,0x7f6bd7bf,0x42b2aaff,2 +np.float32,0x3f84f79a,0x3e8e2e49,2 +np.float32,0x7e475b06,0x42af8f28,2 +np.float32,0x3faff89b,0x3f573d5e,2 +np.float32,0x7de5aa77,0x42ae74bb,2 +np.float32,0x3f8e9e42,0x3ef26cd2,2 +np.float32,0x3fb1cec3,0x3f5b1740,2 +np.float32,0x3f8890d6,0x3eba4821,2 +np.float32,0x3f9b39e9,0x3f242547,2 +np.float32,0x3fc895a4,0x3f829407,2 +np.float32,0x7f77943c,0x42b2c3dc,2 +np.float32,0x7f390d58,0x42b22ed2,2 +np.float32,0x3fe7e160,0x3f99ad58,2 +np.float32,0x3f93d2a0,0x3f0cb205,2 +np.float32,0x7f29499b,0x42b2013c,2 +np.float32,0x3f8c11b2,0x3edca10f,2 +np.float32,0x7e898ef8,0x42b03413,2 +np.float32,0x3fdff942,0x3f944f34,2 +np.float32,0x7f3d602f,0x42b23aa5,2 +np.float32,0x3f8a50f3,0x3ecc345b,2 +np.float32,0x3fa1f86d,0x3f369ce4,2 +np.float32,0x3f97ad95,0x3f19681d,2 +np.float32,0x3ffad1e0,0x3fa589e5,2 +np.float32,0x3fa70590,0x3f432311,2 +np.float32,0x7e6840cb,0x42afdd5c,2 +np.float32,0x3fd4036d,0x3f8ba0aa,2 +np.float32,0x7f7cc953,0x42b2ce84,2 +np.float32,0x7f228e1e,0x42b1ec74,2 +np.float32,0x7e37a866,0x42af652a,2 +np.float32,0x3fda22d0,0x3f9029a7,2 +np.float32,0x7f736bff,0x42b2bb31,2 +np.float32,0x3f9833b6,0x3f1b0b8e,2 +np.float32,0x7f466001,0x42b2526a,2 +np.float32,0xff7fffff,0xffc00000,2 +np.float32,0x7dd62bcd,0x42ae50f8,2 +np.float32,0x7f1d2bfe,0x42b1db36,2 +np.float32,0x7ecffe9e,0x42b107c5,2 +np.float32,0x7ebefe0a,0x42b0dc1b,2 +np.float32,0x7f45c63d,0x42b250dd,2 +np.float32,0x7f601af0,0x42b290db,2 +np.float32,0x3fcbb88a,0x3f8524e5,2 +np.float32,0x7ede55ff,0x42b129e8,2 +np.float32,0x7ea5dd5a,0x42b093e2,2 +np.float32,0x3ff53857,0x3fa22a12,2 +np.float32,0x3f8dbd6a,0x3eeb28a4,2 +np.float32,0x3fd1b467,0x3f89dd2c,2 +np.float32,0x3fe0423f,0x3f9481fc,2 +np.float32,0x3f84b421,0x3e8a6174,2 +np.float32,0x7f4efc97,0x42b2682c,2 +np.float32,0x7f601b33,0x42b290dc,2 +np.float32,0x3f94f240,0x3f108719,2 +np.float32,0x7decd251,0x42ae8471,2 +np.float32,0x3fdc457c,0x3f91b2e2,2 +np.float32,0x3f92a966,0x3f089c5a,2 +np.float32,0x3fc9732f,0x3f834afc,2 +np.float32,0x3f97948f,0x3f19194e,2 +np.float32,0x7f0824a1,0x42b191ac,2 +np.float32,0x7f0365a5,0x42b17f81,2 +np.float32,0x3f800000,0x0,2 +np.float32,0x7f0054c6,0x42b1736b,2 +np.float32,0x3fe86544,0x3f9a0484,2 +np.float32,0x7e95f844,0x42b0604e,2 +np.float32,0x3fce8602,0x3f8761e2,2 +np.float32,0x3fc726c8,0x3f81621d,2 +np.float32,0x3fcf6b03,0x3f88161b,2 +np.float32,0x3fceb843,0x3f87898a,2 +np.float32,0x3fe2f8b2,0x3f966071,2 +np.float32,0x7f3c8e7f,0x42b2386d,2 +np.float32,0x3fcee13a,0x3f87a9d2,2 +np.float32,0x3fc4df27,0x3f7ee73c,2 +np.float32,0x3ffde486,0x3fa758e3,2 +np.float32,0x3fa91be0,0x3f480b17,2 +np.float32,0x7f2a5a7d,0x42b20472,2 +np.float32,0x7e278d80,0x42af362d,2 +np.float32,0x3f96d091,0x3f16a9d5,2 +np.float32,0x7e925225,0x42b053b2,2 +np.float32,0x7f7ef83a,0x42b2d2ec,2 +np.float32,0x7eb4923a,0x42b0bf61,2 +np.float32,0x7e98bf19,0x42b069b3,2 +np.float32,0x3fac93a2,0x3f4fe410,2 +np.float32,0x7f46389c,0x42b25205,2 +np.float32,0x3f9fd447,0x3f30fd54,2 +np.float32,0x3fef42d4,0x3f9e7483,2 +np.float32,0x7f482174,0x42b256ed,2 +np.float32,0x3f97aedb,0x3f196c1e,2 +np.float32,0x7f764edd,0x42b2c13a,2 +np.float32,0x3f9117b5,0x3f02de5c,2 +np.float32,0x3fc7984e,0x3f81c12d,2 +np.float64,0x3ff1e2cb7463c597,0x3fdec6caf39e0c0e,1 +np.float64,0x3ffe4f89789c9f13,0x3ff40f4b1da0f3e9,1 +np.float64,0x7f6a5c9ac034b935,0x408605e51703c145,1 +np.float64,0x7fdcb6ece3b96dd9,0x40862d6521e16d60,1 +np.float64,0x3ff6563e182cac7c,0x3feb9d8210f3fa88,1 +np.float64,0x7fde32025f3c6404,0x40862dcc1d1a9b7f,1 +np.float64,0x7fd755ed35aeabd9,0x40862bbc5522b779,1 +np.float64,0x3ff5c81f4bcb903e,0x3fea71f10b954ea3,1 +np.float64,0x3fffe805d35fd00c,0x3ff50463a1ba2938,1 +np.float64,0x7fd045a1c1a08b43,0x408628d9f431f2f5,1 +np.float64,0x3ff49f7dd9893efc,0x3fe7c6736e17ea8e,1 +np.float64,0x7fccfbc1fd39f783,0x408627eca79acf51,1 +np.float64,0x3ff1af0a00035e14,0x3fdd1c0e7d5706ea,1 +np.float64,0x7fe7bd17162f7a2d,0x4086316af683502b,1 +np.float64,0x3ff0941b8d012837,0x3fd128d274065ac0,1 +np.float64,0x3ffa0c5d98b418bb,0x3ff11af9c8edd17f,1 +np.float64,0x3ffad9733355b2e6,0x3ff1b6d1307acb42,1 +np.float64,0x3ffabb2a33d57654,0x3ff1a0442b034e50,1 +np.float64,0x3ff36118b0c6c231,0x3fe472b7dfb23516,1 +np.float64,0x3ff2441d3664883a,0x3fe0d61145608f0c,1 +np.float64,0x7fe039862d20730b,0x40862e5f8ed752d3,1 +np.float64,0x7fb1dde24023bbc4,0x40861e824cdb0664,1 +np.float64,0x7face6335839cc66,0x40861ccf90a26e16,1 +np.float64,0x3ffb5d0e1af6ba1c,0x3ff2170f6f42fafe,1 +np.float64,0x3ff5c2c6a50b858d,0x3fea665aabf04407,1 +np.float64,0x3ffabb409db57681,0x3ff1a054ea32bfc3,1 +np.float64,0x3ff1e054e983c0aa,0x3fdeb30c17286cb6,1 +np.float64,0x7fe467f73268cfed,0x4086303529e52e9b,1 +np.float64,0x7fe0e86bf961d0d7,0x40862eb40788b04a,1 +np.float64,0x3ffb743542f6e86a,0x3ff227b4ea5acee0,1 +np.float64,0x3ff2de6826e5bcd0,0x3fe2e31fcde0a96c,1 +np.float64,0x7fd6b27ccfad64f9,0x40862b8385697c31,1 +np.float64,0x7fe0918e8d21231c,0x40862e8a82d9517a,1 +np.float64,0x7fd0ca0395a19406,0x4086291a0696ed33,1 +np.float64,0x3ffb042496960849,0x3ff1d658c928abfc,1 +np.float64,0x3ffcd0409799a081,0x3ff31877df0cb245,1 +np.float64,0x7fe429bd06685379,0x4086301c9f259934,1 +np.float64,0x3ff933076092660f,0x3ff06d2e5f4d9ab7,1 +np.float64,0x7feaefcb28f5df95,0x4086326dccf88e6f,1 +np.float64,0x7fb5f2c1f82be583,0x40862027ac02a39d,1 +np.float64,0x3ffb5d9e3bd6bb3c,0x3ff21777501d097e,1 +np.float64,0x10000000000000,0xfff8000000000000,1 +np.float64,0x3ff70361596e06c3,0x3fecf675ceda7e19,1 +np.float64,0x3ff71a21b5ee3444,0x3fed224fa048d9a9,1 +np.float64,0x3ffb102b86762057,0x3ff1df2cc9390518,1 +np.float64,0x7feaaeb35c355d66,0x4086325a60704a90,1 +np.float64,0x7fd9a3d0a93347a0,0x40862c7d300fc076,1 +np.float64,0x7fabcf159c379e2a,0x40861c80cdbbff27,1 +np.float64,0x7fd1c066ec2380cd,0x4086298c3006fee6,1 +np.float64,0x3ff3d5ae2d67ab5c,0x3fe5bc16447428db,1 +np.float64,0x3ff4b76add696ed6,0x3fe800f5bbf21376,1 +np.float64,0x3ff60d89ee0c1b14,0x3feb063fdebe1a68,1 +np.float64,0x7f1d2648003a4c8f,0x4085eaf9238af95a,1 +np.float64,0x7fe8b45f6df168be,0x408631bca5abf6d6,1 +np.float64,0x7fe9ea5308f3d4a5,0x4086321ea2bd3af9,1 +np.float64,0x7fcb6ba5a636d74a,0x4086277b208075ed,1 +np.float64,0x3ff621cfd74c43a0,0x3feb30d59baf5919,1 +np.float64,0x3ff7bc8ca0af7919,0x3fee524da8032896,1 +np.float64,0x7fda22dd0c3445b9,0x40862ca47326d063,1 +np.float64,0x7fd02ed4b2a05da8,0x408628ceb6919421,1 +np.float64,0x3ffe64309fdcc861,0x3ff41c1b18940709,1 +np.float64,0x3ffee4042abdc808,0x3ff46a6005bccb41,1 +np.float64,0x3ff078145b00f029,0x3fceeb3d6bfae0eb,1 +np.float64,0x7fda20fd20b441f9,0x40862ca3e03b990b,1 +np.float64,0x3ffa9e9e9af53d3d,0x3ff18ade3cbee789,1 +np.float64,0x3ff0a1062501420c,0x3fd1e32de6d18c0d,1 +np.float64,0x3ff3bdf118477be2,0x3fe57ad89b7fdf8b,1 +np.float64,0x3ff101c0d5c20382,0x3fd6965d3539be47,1 +np.float64,0x7feba3b53b774769,0x408632a28c7aca4d,1 +np.float64,0x3ff598db5d4b31b7,0x3fea0aa65c0b421a,1 +np.float64,0x3ff5fdfbb72bfbf8,0x3feae55accde4a5e,1 +np.float64,0x7fe5bae53aab75c9,0x408630b5e7a5b92a,1 +np.float64,0x3ff8f668afd1ecd2,0x3ff03af686666c9c,1 +np.float64,0x3ff5ba72dd2b74e6,0x3fea5441f223c093,1 +np.float64,0x3ff8498147109302,0x3fef4e45d501601d,1 +np.float64,0x7feddcfa5efbb9f4,0x4086334106a6e76b,1 +np.float64,0x7fd1a30200234603,0x4086297ee5cc562c,1 +np.float64,0x3ffffa8ee07ff51e,0x3ff50f1dc46f1303,1 +np.float64,0x7fef7ed00ebefd9f,0x408633ae01dabe52,1 +np.float64,0x3ffb6e062276dc0c,0x3ff22344c58c2016,1 +np.float64,0x7fcf2b59943e56b2,0x4086288190dd5eeb,1 +np.float64,0x3ffa589f9254b13f,0x3ff155cc081eee0b,1 +np.float64,0x3ff05415ca60a82c,0x3fc9e45565baef0a,1 +np.float64,0x7feb34bed576697d,0x408632822d5a178c,1 +np.float64,0x3ff3993845c73270,0x3fe51423baf246c3,1 +np.float64,0x3ff88367aaf106d0,0x3fefb2d9ca9f1192,1 +np.float64,0x7fef364304fe6c85,0x4086339b7ed82997,1 +np.float64,0x7fcba2c317374585,0x4086278b24e42934,1 +np.float64,0x3ff1aef885e35df1,0x3fdd1b79f55b20c0,1 +np.float64,0x7fe19367886326ce,0x40862f035f867445,1 +np.float64,0x3ff3c8295e279053,0x3fe5970aa670d32e,1 +np.float64,0x3ff6edda164ddbb4,0x3feccca9eb59d6b9,1 +np.float64,0x7fdeaea940bd5d52,0x40862dece02d151b,1 +np.float64,0x7fea9d6324353ac5,0x408632552ddf0d4f,1 +np.float64,0x7fe60e39e66c1c73,0x408630d45b1ad0c4,1 +np.float64,0x7fde06325abc0c64,0x40862dc07910038c,1 +np.float64,0x7f9ec89d303d9139,0x408617c55ea4c576,1 +np.float64,0x3ff9801930530032,0x3ff0abe5be046051,1 +np.float64,0x3ff4d5859689ab0b,0x3fe849a7f7a19fa3,1 +np.float64,0x3ff38afbc48715f8,0x3fe4ebb7710cbab9,1 +np.float64,0x3ffd88a0e77b1142,0x3ff3916964407e21,1 +np.float64,0x1,0xfff8000000000000,1 +np.float64,0x3ff5db59e58bb6b4,0x3fea9b6b5ccc116f,1 +np.float64,0x3ffd4b05b15a960c,0x3ff369792f661a90,1 +np.float64,0x7fdcebc4fb39d789,0x40862d73cd623378,1 +np.float64,0x3ff5b56f944b6adf,0x3fea4955d6b06ca3,1 +np.float64,0x7fd4e4abf2a9c957,0x40862ad9e9da3c61,1 +np.float64,0x7fe08e0d6aa11c1a,0x40862e88d17ef277,1 +np.float64,0x3ff0dfc97da1bf93,0x3fd50f9004136d8f,1 +np.float64,0x7fdec38eaebd871c,0x40862df2511e26b4,1 +np.float64,0x7ff8000000000000,0x7ff8000000000000,1 +np.float64,0x3ff21865504430cb,0x3fe033fe3cf3947a,1 +np.float64,0x7fdc139708b8272d,0x40862d371cfbad03,1 +np.float64,0x7fe1fe3be3a3fc77,0x40862f336e3ba63a,1 +np.float64,0x7fd9fa2493b3f448,0x40862c97f2960be9,1 +np.float64,0x3ff0a027db414050,0x3fd1d6e54a707c87,1 +np.float64,0x3ff568b16f4ad163,0x3fe99f5c6d7b6e18,1 +np.float64,0x3ffe2f82877c5f05,0x3ff3fb54bd0da753,1 +np.float64,0x7fbaf5778435eaee,0x408621ccc9e2c1be,1 +np.float64,0x7fc5aaf8362b55ef,0x40862598e7072a49,1 +np.float64,0x7fe0ebfdd4a1d7fb,0x40862eb5b7bf99d5,1 +np.float64,0x7fd8efeb5931dfd6,0x40862c444636f408,1 +np.float64,0x3ff361a308c6c346,0x3fe4744cae63e6df,1 +np.float64,0x7fef287d39be50f9,0x40863397f65c807e,1 +np.float64,0x7fe72c4a14ae5893,0x4086313992e52082,1 +np.float64,0x3ffd1be44cba37c8,0x3ff34a9a45239eb9,1 +np.float64,0x3ff50369c18a06d4,0x3fe8b69319f091f1,1 +np.float64,0x3ffb333c25766678,0x3ff1f8c78eeb28f1,1 +np.float64,0x7fe12050416240a0,0x40862ece4e2f2f24,1 +np.float64,0x7fe348f5526691ea,0x40862fc16fbe7b6c,1 +np.float64,0x3ff343cc4d068799,0x3fe41c2a30cab7d2,1 +np.float64,0x7fd1b0daaa2361b4,0x408629852b3104ff,1 +np.float64,0x3ff6a41f37ad483e,0x3fec3b36ee6c6d4a,1 +np.float64,0x3ffad9439435b287,0x3ff1b6add9a1b3d7,1 +np.float64,0x7fbeb9a2f23d7345,0x408622d89ac1eaba,1 +np.float64,0x3ffab3d39fb567a7,0x3ff19ac75b4427f3,1 +np.float64,0x3ff890003ed12000,0x3fefc8844471c6ad,1 +np.float64,0x3ffc9f595e593eb2,0x3ff2f7a8699f06d8,1 +np.float64,0x7fe2224ef6e4449d,0x40862f43684a154a,1 +np.float64,0x3ffa67ba08d4cf74,0x3ff161525778df99,1 +np.float64,0x7fe87e24b570fc48,0x408631ab02b159fb,1 +np.float64,0x7fd6e99be92dd337,0x40862b96dba73685,1 +np.float64,0x7fe90f39fdf21e73,0x408631d9dbd36c1e,1 +np.float64,0x3ffb7806abd6f00e,0x3ff22a719b0f4c46,1 +np.float64,0x3ffa511ba3d4a238,0x3ff1500c124f6e17,1 +np.float64,0x3ff5d7a569abaf4b,0x3fea937391c280e8,1 +np.float64,0x7fc4279d20284f39,0x40862504a5cdcb96,1 +np.float64,0x3ffe8791b1fd0f24,0x3ff431f1ed7eaba0,1 +np.float64,0x7fe3b2f5276765e9,0x40862fecf15e2535,1 +np.float64,0x7feeab0e7abd561c,0x408633778044cfbc,1 +np.float64,0x7fdba88531375109,0x40862d1860306d7a,1 +np.float64,0x7fe7b19b3def6335,0x4086316716d6890b,1 +np.float64,0x3ff9e9437413d287,0x3ff0ff89431c748c,1 +np.float64,0x3ff960716a52c0e3,0x3ff092498028f802,1 +np.float64,0x3ff271bf56a4e37f,0x3fe1786fc8dd775d,1 +np.float64,0x3fff2a6578be54cb,0x3ff494bbe303eeb5,1 +np.float64,0x3ffd842eb5fb085e,0x3ff38e8b7ba42bc5,1 +np.float64,0x3ff91600e5d22c02,0x3ff0553c6a6b3d93,1 +np.float64,0x3ff9153f45f22a7e,0x3ff0549c0eaecf95,1 +np.float64,0x7fe0ab319da15662,0x40862e96da3b19f9,1 +np.float64,0x3ff06acd1f60d59a,0x3fcd2aca543d2772,1 +np.float64,0x3ffb3e7a54d67cf4,0x3ff200f288cd391b,1 +np.float64,0x3ffd01356f1a026b,0x3ff339003462a56c,1 +np.float64,0x3ffacd35def59a6c,0x3ff1adb8d32b3ec0,1 +np.float64,0x3ff6f953264df2a6,0x3fece2f992948d6e,1 +np.float64,0x3ff0fa91f5a1f524,0x3fd64609a28f1590,1 +np.float64,0x7fd1b7610ca36ec1,0x408629881e03dc7d,1 +np.float64,0x3ff4317fb7c86300,0x3fe6b086ed265887,1 +np.float64,0x3ff3856198070ac3,0x3fe4dbb6bc88b9e3,1 +np.float64,0x7fed7fc4573aff88,0x40863327e7013a81,1 +np.float64,0x3ffe53cbbf5ca798,0x3ff411f07a29b1f4,1 +np.float64,0x3ff092195b012433,0x3fd10b1c0b4b14fe,1 +np.float64,0x3ff1a3171163462e,0x3fdcb5c301d5d40d,1 +np.float64,0x3ffa1401f1742804,0x3ff120eb319e9faa,1 +np.float64,0x7fd352f6f426a5ed,0x40862a3a048feb6d,1 +np.float64,0x7fd4ee246fa9dc48,0x40862add895d808f,1 +np.float64,0x3ff0675cfa00ceba,0x3fccb2222c5493ca,1 +np.float64,0x3ffe5cb38f3cb967,0x3ff417773483d161,1 +np.float64,0x7fe11469ea2228d3,0x40862ec8bd3e497f,1 +np.float64,0x3fff13cba67e2798,0x3ff4872fe2c26104,1 +np.float64,0x3ffb73d3d316e7a8,0x3ff2276f08612ea2,1 +np.float64,0x7febfb70f237f6e1,0x408632bbc9450721,1 +np.float64,0x3ff84a0d87b0941b,0x3fef4f3b707e3145,1 +np.float64,0x7fd71fd5082e3fa9,0x40862ba9b4091172,1 +np.float64,0x3ff560737d8ac0e7,0x3fe98cc9c9ba2f61,1 +np.float64,0x3ff46a266ae8d44d,0x3fe74190e5234822,1 +np.float64,0x7fe8cc9225719923,0x408631c477db9708,1 +np.float64,0x3ff871de5930e3bc,0x3fef948f7d00fbef,1 +np.float64,0x3ffd0bc7895a178f,0x3ff33ffc18357721,1 +np.float64,0x3ff66099f9ccc134,0x3febb2bc775b4720,1 +np.float64,0x7fe91f1be9723e37,0x408631deec3a5c9e,1 +np.float64,0x7fd60462f12c08c5,0x40862b4537e1c1c6,1 +np.float64,0x3ff053100ba0a620,0x3fc9bc0c21e2284f,1 +np.float64,0x7fd864c611b0c98b,0x40862c1724506255,1 +np.float64,0x7fd191decb2323bd,0x408629771bfb68cc,1 +np.float64,0x3ff792a1656f2543,0x3fee054f2e135fcf,1 +np.float64,0x7fd03625cea06c4b,0x408628d253b840e3,1 +np.float64,0x7fc3967716272ced,0x408624ca35451042,1 +np.float64,0x7fe6636cb32cc6d8,0x408630f3073a22a7,1 +np.float64,0x3ffc2d3976585a73,0x3ff2a9d4c0dae607,1 +np.float64,0x3fffd10ee79fa21e,0x3ff4f70db69888be,1 +np.float64,0x3ff1d4fcae23a9f9,0x3fde57675007b23c,1 +np.float64,0x3ffa5da19e14bb43,0x3ff1599f74d1c113,1 +np.float64,0x3ff7f4eb0d6fe9d6,0x3feeb85189659e99,1 +np.float64,0x7fbcca44d8399489,0x408622536234f7c1,1 +np.float64,0x7fef5f97ec3ebf2f,0x408633a60fdde0d7,1 +np.float64,0x7fde4a66da3c94cd,0x40862dd290ebc184,1 +np.float64,0x3ff072957a40e52b,0x3fce34d913d87613,1 +np.float64,0x3ff2bc4c9dc57899,0x3fe27497e6ebe27d,1 +np.float64,0x7fd7d152b4afa2a4,0x40862be63469eecd,1 +np.float64,0x3ff957d768f2afaf,0x3ff08b4ad8062a73,1 +np.float64,0x7fe4bc5f45a978be,0x40863055fd66e4eb,1 +np.float64,0x7fc90de345321bc6,0x408626c24ce7e370,1 +np.float64,0x3ff2d7a37d85af47,0x3fe2cd6a40b544a0,1 +np.float64,0x7fe536ea1f6a6dd3,0x40863084bade76a3,1 +np.float64,0x3fff970c9cdf2e19,0x3ff4d524572356dd,1 +np.float64,0x3ffe173ae63c2e76,0x3ff3ec1ee35ad28c,1 +np.float64,0x3ff714025cce2805,0x3fed168aedff4a2b,1 +np.float64,0x7fce7b414c3cf682,0x40862853dcdd19d4,1 +np.float64,0x3ff019623f2032c4,0x3fbc7c602df0bbaf,1 +np.float64,0x3ff72f57fd0e5eb0,0x3fed4ae75f697432,1 +np.float64,0x3ff283778e8506ef,0x3fe1b5c5725b0dfd,1 +np.float64,0x3ff685a29aed0b45,0x3febfdfdedd581e2,1 +np.float64,0x3ff942d24fb285a4,0x3ff07a224c3ecfaf,1 +np.float64,0x3ff2e4a9f465c954,0x3fe2f71905399e8f,1 +np.float64,0x7fdfa1c7fa3f438f,0x40862e2b4e06f098,1 +np.float64,0x3ff49b59c26936b4,0x3fe7bc41c8c1e59d,1 +np.float64,0x3ff2102d3704205a,0x3fe014bf7e28924e,1 +np.float64,0x3ff88de3b8311bc8,0x3fefc4e3e0a15a89,1 +np.float64,0x7fea5ba25374b744,0x40863241519c9b66,1 +np.float64,0x3fffe5df637fcbbf,0x3ff5032488f570f9,1 +np.float64,0x7fe67cfefe6cf9fd,0x408630fc25333cb4,1 +np.float64,0x3ff090bf2b01217e,0x3fd0f6fcf1092b4a,1 +np.float64,0x7fecd75bc5f9aeb7,0x408632f9b6c2e013,1 +np.float64,0x7fe15df38c62bbe6,0x40862eeae5ac944b,1 +np.float64,0x3ff4757875a8eaf1,0x3fe75e0eafbe28ce,1 +np.float64,0x7fecca8a51b99514,0x408632f627c23923,1 +np.float64,0x3ff91ca529d2394a,0x3ff05abb327fd1ca,1 +np.float64,0x3ffb962993b72c53,0x3ff23ff831717579,1 +np.float64,0x3ffd548a2c7aa914,0x3ff36fac7f56d716,1 +np.float64,0x7fbafb5cb035f6b8,0x408621ce898a02fb,1 +np.float64,0x3ff1d86daca3b0db,0x3fde73536c29218c,1 +np.float64,0x7fa8d0f8f431a1f1,0x40861b97a03c3a18,1 +np.float64,0x3ff44f1067489e21,0x3fe6fcbd8144ab2a,1 +np.float64,0x7fec062b07380c55,0x408632bed9c6ce85,1 +np.float64,0x3ff7e11e0fcfc23c,0x3fee94ada7efaac4,1 +np.float64,0x7fe77505c1aeea0b,0x4086315287dda0ba,1 +np.float64,0x7fc465af2728cb5d,0x4086251d236107f7,1 +np.float64,0x3ffe811c4a7d0238,0x3ff42df7e8b6cf2d,1 +np.float64,0x7fe05a471260b48d,0x40862e6fa502738b,1 +np.float64,0x7fec32cd9778659a,0x408632cb8d98c5a3,1 +np.float64,0x7fd203a220a40743,0x408629aa43b010c0,1 +np.float64,0x7fed71f7d17ae3ef,0x4086332428207101,1 +np.float64,0x3ff3918999e72313,0x3fe4fe5e8991402f,1 +np.float64,0x3ff3ecae38c7d95c,0x3fe5fa787d887981,1 +np.float64,0x7fd65345b82ca68a,0x40862b61aed8c64e,1 +np.float64,0x3ff1efdd01c3dfba,0x3fdf2eae36139204,1 +np.float64,0x3ffba9344f375268,0x3ff24d7fdcfc313b,1 +np.float64,0x7fd0469b35208d35,0x408628da6ed24bdd,1 +np.float64,0x7fe525782daa4aef,0x4086307e240c8b30,1 +np.float64,0x3ff8e473d371c8e8,0x3ff02beebd4171c7,1 +np.float64,0x3ff59a43898b3487,0x3fea0dc0a6acea0a,1 +np.float64,0x7fef50c7263ea18d,0x408633a247d7cd42,1 +np.float64,0x7fe8b5a301f16b45,0x408631bd0e71c855,1 +np.float64,0x3ff209369de4126d,0x3fdff4264334446b,1 +np.float64,0x3ffbe2ff4437c5fe,0x3ff2763b356814c7,1 +np.float64,0x3ff55938156ab270,0x3fe97c70514f91bf,1 +np.float64,0x3fff5d8bf81ebb18,0x3ff4b333b230672a,1 +np.float64,0x3ff16a317bc2d463,0x3fdab84e7faa468f,1 +np.float64,0x3ff7e64f8dafcc9f,0x3fee9e0bd57e9566,1 +np.float64,0x7fef4dc065be9b80,0x408633a181e25abb,1 +np.float64,0x3ff64a24a62c9449,0x3feb849ced76437e,1 +np.float64,0x7fc3cb85ef27970b,0x408624dfc39c8f74,1 +np.float64,0x7fec2162a77842c4,0x408632c69b0d43b6,1 +np.float64,0x7feccee6dc399dcd,0x408632f75de98c46,1 +np.float64,0x7faff4f5f43fe9eb,0x40861d9d89be14c9,1 +np.float64,0x7fee82df60fd05be,0x4086336cfdeb7317,1 +np.float64,0x3ffe54588d9ca8b1,0x3ff41247eb2f75ca,1 +np.float64,0x3ffe5615b55cac2c,0x3ff4135c4eb11620,1 +np.float64,0x3ffdaf9a6a1b5f35,0x3ff3aa70e50d1692,1 +np.float64,0x3ff69c045f4d3809,0x3fec2b00734e2cde,1 +np.float64,0x7fd049239aa09246,0x408628dbad6dd995,1 +np.float64,0x3ff2acbe8465597d,0x3fe24138652195e1,1 +np.float64,0x3ffb288302365106,0x3ff1f0f86ca7e5d1,1 +np.float64,0x3fff6fe8d87edfd2,0x3ff4be136acf53c5,1 +np.float64,0x3ffc87c8bfb90f92,0x3ff2e7bbd65867cb,1 +np.float64,0x3ff173327ca2e665,0x3fdb0b945abb00d7,1 +np.float64,0x3ff9a5cf7a134b9f,0x3ff0ca2450f07c78,1 +np.float64,0x7faf782b043ef055,0x40861d7e0e9b35ef,1 +np.float64,0x3ffa0874975410e9,0x3ff117ee3dc8f5ba,1 +np.float64,0x7fc710fc7f2e21f8,0x40862618fed167fb,1 +np.float64,0x7feb73f4c876e7e9,0x40863294ae3ac1eb,1 +np.float64,0x8000000000000000,0xfff8000000000000,1 +np.float64,0x7fb46615c028cc2b,0x40861f91bade4dad,1 +np.float64,0x7fc26b064624d60c,0x4086244c1b76c938,1 +np.float64,0x3ff06ab9fa40d574,0x3fcd282fd971d1b4,1 +np.float64,0x3ff61da7410c3b4e,0x3feb28201031af02,1 +np.float64,0x3ffec7ba1b9d8f74,0x3ff459342511f952,1 +np.float64,0x7ff4000000000000,0x7ffc000000000000,1 +np.float64,0x7fe5d570422baae0,0x408630bfa75008c9,1 +np.float64,0x3ffa895832f512b0,0x3ff17ad41555dccb,1 +np.float64,0x7fd343ac21a68757,0x40862a33ad59947a,1 +np.float64,0x3ffc1eeb37383dd6,0x3ff29ff29e55a006,1 +np.float64,0x7fee3c5c507c78b8,0x4086335a6b768090,1 +np.float64,0x7fe96d774a32daee,0x408631f7b9937e36,1 +np.float64,0x7fb878362430f06b,0x40862106603497b6,1 +np.float64,0x7fec0a79c03814f3,0x408632c01479905e,1 +np.float64,0x3ffa2f143c145e28,0x3ff135e25d902e1a,1 +np.float64,0x3ff14ccff80299a0,0x3fd9a0cd3397b14c,1 +np.float64,0x3ff97980dcb2f302,0x3ff0a6942a8133ab,1 +np.float64,0x3ff872e2d1f0e5c6,0x3fef96526eb2f756,1 +np.float64,0x7fdf1c9b46be3936,0x40862e0957fee329,1 +np.float64,0x7fcab6525d356ca4,0x408627458791f029,1 +np.float64,0x3ff964e74a52c9ce,0x3ff095e8845d523c,1 +np.float64,0x3ffb3aa23c967544,0x3ff1fe282d897c13,1 +np.float64,0x7fdd8a36afbb146c,0x40862d9f2b05f61b,1 +np.float64,0x3ffea39f42fd473e,0x3ff4432a48176399,1 +np.float64,0x7fea614f68b4c29e,0x408632430a750385,1 +np.float64,0x7feeafb86abd5f70,0x40863378b79f70cf,1 +np.float64,0x3ff80bc94eb01792,0x3feee138e9d626bd,1 +np.float64,0x7fcaca74743594e8,0x4086274b8ce4d1e1,1 +np.float64,0x3ff8b14815316290,0x3ff000b3526c8321,1 +np.float64,0x7fc698eb5f2d31d6,0x408625eeec86cd2b,1 +np.float64,0x7fe15429a3e2a852,0x40862ee6621205b8,1 +np.float64,0x7fee37f81b7c6fef,0x4086335941ed80dd,1 +np.float64,0x3ff8097ab3f012f6,0x3feedd1bafc3196e,1 +np.float64,0x7fe7c889ceaf9113,0x4086316ed13f2394,1 +np.float64,0x7fceca94513d9528,0x4086286893a06824,1 +np.float64,0x3ff593a103cb2742,0x3fe9ff1af4f63cc9,1 +np.float64,0x7fee237d24bc46f9,0x40863353d4142c87,1 +np.float64,0x3ffbf71e4777ee3c,0x3ff2844c0ed9f4d9,1 +np.float64,0x3ff490c65c09218d,0x3fe7a2216d9f69fd,1 +np.float64,0x3fff5ceaf1feb9d6,0x3ff4b2d430a90110,1 +np.float64,0x3ff55baecceab75e,0x3fe98203980666c4,1 +np.float64,0x3ff511bc306a2378,0x3fe8d81ce7be7b50,1 +np.float64,0x3ff38f83dcc71f08,0x3fe4f89f130d5f87,1 +np.float64,0x3ff73a3676ee746d,0x3fed5f98a65107ee,1 +np.float64,0x7fc27e50c824fca1,0x408624547828bc49,1 +np.float64,0xfff0000000000000,0xfff8000000000000,1 +np.float64,0x3fff38959ebe712b,0x3ff49d362c7ba16a,1 +np.float64,0x3ffad6d23a75ada4,0x3ff1b4dda6394ed0,1 +np.float64,0x3ffe77c6c2dcef8e,0x3ff4283698835ecb,1 +np.float64,0x3fff5feb413ebfd6,0x3ff4b49bcbdb3aa9,1 +np.float64,0x3ff0d30aa161a615,0x3fd4751bcdd7d727,1 +np.float64,0x3ff51e07e00a3c10,0x3fe8f4bd1408d694,1 +np.float64,0x8010000000000000,0xfff8000000000000,1 +np.float64,0x7fd231d2fe2463a5,0x408629beaceafcba,1 +np.float64,0x3fff6b4aee1ed696,0x3ff4bb58544bf8eb,1 +np.float64,0x3ff91fcd2f323f9a,0x3ff05d56e33db6b3,1 +np.float64,0x3ff3b889ab477113,0x3fe56bdeab74cce5,1 +np.float64,0x3ff99bfe30d337fc,0x3ff0c24bbf265561,1 +np.float64,0x3ffbe9e5eaf7d3cc,0x3ff27b0fe60f827a,1 +np.float64,0x7fd65678e92cacf1,0x40862b62d44fe8b6,1 +np.float64,0x7fd9cc477233988e,0x40862c89c638ee48,1 +np.float64,0x3ffc123c72d82479,0x3ff297294d05cbc0,1 +np.float64,0x3ff58abad58b1576,0x3fe9eb65da2a867a,1 +np.float64,0x7fe534887b2a6910,0x40863083d4ec2877,1 +np.float64,0x7fe1d3dcb123a7b8,0x40862f208116c55e,1 +np.float64,0x7fd4d570dba9aae1,0x40862ad412c413cd,1 +np.float64,0x3fffce7d3fdf9cfa,0x3ff4f58f02451928,1 +np.float64,0x3ffa76901c74ed20,0x3ff16c9a5851539c,1 +np.float64,0x7fdd88ffa23b11fe,0x40862d9ed6c6f426,1 +np.float64,0x3ff09fdbb9e13fb7,0x3fd1d2ae4fcbf713,1 +np.float64,0x7fe64567772c8ace,0x408630e845dbc290,1 +np.float64,0x7fb1a849ba235092,0x40861e6a291535b2,1 +np.float64,0x3ffaddb105f5bb62,0x3ff1b9f68f4c419b,1 +np.float64,0x7fd2fc3d5025f87a,0x40862a15cbc1df75,1 +np.float64,0x7fdea7d872bd4fb0,0x40862deb190b2c50,1 +np.float64,0x7fd50ea97eaa1d52,0x40862ae9edc4c812,1 +np.float64,0x3fff659c245ecb38,0x3ff4b7fb18b31aea,1 +np.float64,0x3ff3f1fbb7c7e3f7,0x3fe608bd9d76268c,1 +np.float64,0x3ff76869d9aed0d4,0x3fedb6c23d3a317b,1 +np.float64,0x7fedd4efe93ba9df,0x4086333edeecaa43,1 +np.float64,0x3ff9a5bd4eb34b7a,0x3ff0ca15d02bc960,1 +np.float64,0x3ffd9359cc5b26b4,0x3ff39850cb1a6b6c,1 +np.float64,0x7fe912d0427225a0,0x408631db00e46272,1 +np.float64,0x3ffb3802fe567006,0x3ff1fc4093646465,1 +np.float64,0x3ff02cc38a205987,0x3fc2e8182802a07b,1 +np.float64,0x3ffda953dd1b52a8,0x3ff3a66c504cf207,1 +np.float64,0x7fe0a487e4a1490f,0x40862e93a6f20152,1 +np.float64,0x7fed265ed1fa4cbd,0x4086330f838ae431,1 +np.float64,0x7fd0000114200001,0x408628b76ec48b5c,1 +np.float64,0x3ff2c262786584c5,0x3fe288860d354b0f,1 +np.float64,0x8000000000000001,0xfff8000000000000,1 +np.float64,0x3ffdae9f075b5d3e,0x3ff3a9d006ae55c1,1 +np.float64,0x3ffb69c72156d38e,0x3ff22037cbb85e5b,1 +np.float64,0x7feeae255f7d5c4a,0x408633784e89bc05,1 +np.float64,0x7feb13927c362724,0x408632786630c55d,1 +np.float64,0x7fef49e072be93c0,0x408633a08451d476,1 +np.float64,0x3fff23d6337e47ac,0x3ff490ceb6e634ae,1 +np.float64,0x3ffba82cf8f7505a,0x3ff24cc51c73234d,1 +np.float64,0x7fe948719ef290e2,0x408631ec0b36476e,1 +np.float64,0x3ff41926c5e8324e,0x3fe670e14bbda8cd,1 +np.float64,0x3ff91f09c1523e14,0x3ff05cb5731878da,1 +np.float64,0x3ff6ae6afccd5cd6,0x3fec4fbeca764086,1 +np.float64,0x3ff927f7e0f24ff0,0x3ff06413eeb8eb1e,1 +np.float64,0x3ff19dd2b9e33ba5,0x3fdc882f97994600,1 +np.float64,0x7fe8e502c5b1ca05,0x408631cc56526fff,1 +np.float64,0x7feb49f70fb693ed,0x4086328868486fcd,1 +np.float64,0x3ffd942d535b285a,0x3ff398d8d89f52ca,1 +np.float64,0x7fc3b9c5c627738b,0x408624d893e692ca,1 +np.float64,0x7fea0780ff340f01,0x408632279fa46704,1 +np.float64,0x7fe4c90066a99200,0x4086305adb47a598,1 +np.float64,0x7fdb209113364121,0x40862cf0ab64fd7d,1 +np.float64,0x3ff38617e5470c30,0x3fe4ddc0413b524f,1 +np.float64,0x7fea1b5b803436b6,0x4086322db767f091,1 +np.float64,0x7fe2004898e40090,0x40862f3457795dc5,1 +np.float64,0x3ff3c4360ac7886c,0x3fe58c29843a4c75,1 +np.float64,0x3ff504bc168a0978,0x3fe8b9ada7f698e6,1 +np.float64,0x3ffd3e936fda7d27,0x3ff3615912c5b4ac,1 +np.float64,0x3ffbdc52fb97b8a6,0x3ff2718dae5f1f2b,1 +np.float64,0x3fffef6d84ffdedb,0x3ff508adbc8556cf,1 +np.float64,0x3ff23b65272476ca,0x3fe0b646ed2579eb,1 +np.float64,0x7fe4633068a8c660,0x408630334a4b7ff7,1 +np.float64,0x3ff769b754aed36f,0x3fedb932af0223f9,1 +np.float64,0x7fe7482d92ee905a,0x408631432de1b057,1 +np.float64,0x3ff5dd682aabbad0,0x3fea9fd5e506a86d,1 +np.float64,0x7fd68399a2ad0732,0x40862b72ed89805d,1 +np.float64,0x3ffad7acc3d5af5a,0x3ff1b57fe632c948,1 +np.float64,0x3ffc68e43698d1c8,0x3ff2d2be6f758761,1 +np.float64,0x3ff4e517fbc9ca30,0x3fe86eddf5e63a58,1 +np.float64,0x3ff34c63c56698c8,0x3fe435b74ccd6a13,1 +np.float64,0x7fea9456c17528ad,0x4086325275237015,1 +np.float64,0x7fee6573f2fccae7,0x4086336543760346,1 +np.float64,0x7fd5496fb9aa92de,0x40862b0023235667,1 +np.float64,0x7ff0000000000000,0x7ff0000000000000,1 +np.float64,0x3ffb70e31256e1c6,0x3ff22552f54b13e0,1 +np.float64,0x3ff66a33988cd467,0x3febc656da46a1ca,1 +np.float64,0x3fff0af2eb1e15e6,0x3ff481dec325f5c8,1 +np.float64,0x3ff6a0233d0d4046,0x3fec33400958eda1,1 +np.float64,0x7fdb11e2d5b623c5,0x40862cec55e405f9,1 +np.float64,0x3ffb8a015ad71402,0x3ff2374d7b563a72,1 +np.float64,0x3ff1807d8ce300fb,0x3fdb849e4bce8335,1 +np.float64,0x3ffefd535e3dfaa6,0x3ff479aaac6ffe79,1 +np.float64,0x3ff701e23a6e03c4,0x3fecf39072d96fc7,1 +np.float64,0x3ff4ac809f895901,0x3fe7e6598f2335a5,1 +np.float64,0x3ff0309f26a0613e,0x3fc3b3f4b2783690,1 +np.float64,0x3ff241dd0ce483ba,0x3fe0cde2cb639144,1 +np.float64,0x3ffabce63fb579cc,0x3ff1a18fe2a2da59,1 +np.float64,0x3ffd84b967db0973,0x3ff38ee4f240645d,1 +np.float64,0x7fc3f88b9a27f116,0x408624f1e10cdf3f,1 +np.float64,0x7fe1d5fd5923abfa,0x40862f2175714a3a,1 +np.float64,0x7fe487b145690f62,0x4086304190700183,1 +np.float64,0x7fe7997feaef32ff,0x4086315eeefdddd2,1 +np.float64,0x3ff8f853b671f0a8,0x3ff03c907353a8da,1 +np.float64,0x7fca4c23b5349846,0x408627257ace5778,1 +np.float64,0x7fe0c9bf3a21937d,0x40862ea576c3ea43,1 +np.float64,0x7fc442b389288566,0x4086250f5f126ec9,1 +np.float64,0x7fc6d382ed2da705,0x40862603900431b0,1 +np.float64,0x7fe40b069068160c,0x4086301066468124,1 +np.float64,0x3ff7f62a146fec54,0x3feeba8dfc4363fe,1 +np.float64,0x3ff721e8e94e43d2,0x3fed313a6755d34f,1 +np.float64,0x7fe579feaf2af3fc,0x4086309ddefb6112,1 +np.float64,0x3ffe2c6bde5c58d8,0x3ff3f9665dc9a16e,1 +np.float64,0x7fcf9998ed3f3331,0x4086289dab274788,1 +np.float64,0x7fdb03af2236075d,0x40862ce82252e490,1 +np.float64,0x7fe72799392e4f31,0x40863137f428ee71,1 +np.float64,0x7f9f2190603e4320,0x408617dc5b3b3c3c,1 +np.float64,0x3ff69c56d52d38ae,0x3fec2ba59fe938b2,1 +np.float64,0x7fdcde27bf39bc4e,0x40862d70086cd06d,1 +np.float64,0x3ff654d6b8eca9ae,0x3feb9aa0107609a6,1 +np.float64,0x7fdf69d967bed3b2,0x40862e1d1c2b94c2,1 +np.float64,0xffefffffffffffff,0xfff8000000000000,1 +np.float64,0x7fedfd073f3bfa0d,0x40863349980c2c8b,1 +np.float64,0x7f7c1856803830ac,0x40860bf312b458c7,1 +np.float64,0x7fe9553f1bb2aa7d,0x408631f0173eadd5,1 +np.float64,0x3ff6e92efc2dd25e,0x3fecc38f98e7e1a7,1 +np.float64,0x7fe9719ac532e335,0x408631f906cd79c3,1 +np.float64,0x3ff60e56ae4c1cad,0x3feb07ef8637ec7e,1 +np.float64,0x3ff0d0803501a100,0x3fd455c0af195a9c,1 +np.float64,0x7fe75248a3eea490,0x40863146a614aec1,1 +np.float64,0x7fdff61ead3fec3c,0x40862e408643d7aa,1 +np.float64,0x7fed4ac7a4fa958e,0x408633197b5cf6ea,1 +np.float64,0x7fe58d44562b1a88,0x408630a5098d1bbc,1 +np.float64,0x7fd89dcdb1b13b9a,0x40862c29c2979288,1 +np.float64,0x3ff205deda240bbe,0x3fdfda67c84fd3a8,1 +np.float64,0x7fdf84c15abf0982,0x40862e23f361923d,1 +np.float64,0x3ffe012b3afc0256,0x3ff3de3dfa5f47ce,1 +np.float64,0x3ffe2f3512dc5e6a,0x3ff3fb245206398e,1 +np.float64,0x7fed6174c2bac2e9,0x4086331faa699617,1 +np.float64,0x3ff1f30f8783e61f,0x3fdf47e06f2c40d1,1 +np.float64,0x3ff590da9eab21b5,0x3fe9f8f7b4baf3c2,1 +np.float64,0x3ffb3ca1eb967944,0x3ff1ff9baf66d704,1 +np.float64,0x7fe50ba9a5aa1752,0x408630745ab7fd3c,1 +np.float64,0x3ff43743a4a86e87,0x3fe6bf7ae80b1dda,1 +np.float64,0x3ff47e1a24e8fc34,0x3fe773acca44c7d6,1 +np.float64,0x3ff589ede9eb13dc,0x3fe9e99f28fab3a4,1 +np.float64,0x3ff72f2cbf8e5e5a,0x3fed4a94e7edbf24,1 +np.float64,0x3ffa4f9bbc549f38,0x3ff14ee60aea45d3,1 +np.float64,0x3ff975dae732ebb6,0x3ff0a3a1fbd7284a,1 +np.float64,0x7fbcf14ee039e29d,0x4086225e33f3793e,1 +np.float64,0x3ff10e027f621c05,0x3fd71cce2452b4e0,1 +np.float64,0x3ff33ea193067d43,0x3fe40cbac4daaddc,1 +np.float64,0x7fbef8f2263df1e3,0x408622e905c8e1b4,1 +np.float64,0x3fff7f5bfe3efeb8,0x3ff4c732e83df253,1 +np.float64,0x3ff5700a6b4ae015,0x3fe9afdd7b8b82b0,1 +np.float64,0x3ffd5099da5aa134,0x3ff36d1bf26e55bf,1 +np.float64,0x3ffed8e0f89db1c2,0x3ff4639ff065107a,1 +np.float64,0x3fff9d0c463f3a18,0x3ff4d8a9f297cf52,1 +np.float64,0x3ff23db5b2e47b6b,0x3fe0bebdd48f961a,1 +np.float64,0x3ff042bff1e08580,0x3fc713bf24cc60ef,1 +np.float64,0x7feb4fe97a769fd2,0x4086328a26675646,1 +np.float64,0x3ffeafbfeedd5f80,0x3ff44a955a553b1c,1 +np.float64,0x3ff83fb524507f6a,0x3fef3d1729ae0976,1 +np.float64,0x3ff1992294433245,0x3fdc5f5ce53dd197,1 +np.float64,0x7fe89fe629b13fcb,0x408631b601a83867,1 +np.float64,0x7fe53e4d74aa7c9a,0x40863087839b52f1,1 +np.float64,0x3ff113713e6226e2,0x3fd757631ca7cd09,1 +np.float64,0x7fd4a0b7a629416e,0x40862abfba27a09b,1 +np.float64,0x3ff184c6e2a3098e,0x3fdbab2e3966ae57,1 +np.float64,0x3ffafbbf77f5f77f,0x3ff1d02bb331d9f9,1 +np.float64,0x3ffc6099a358c134,0x3ff2cd16941613d1,1 +np.float64,0x3ffb7c441ef6f888,0x3ff22d7b12e31432,1 +np.float64,0x3ff625ba5eec4b75,0x3feb39060e55fb79,1 +np.float64,0x7fde879acbbd0f35,0x40862de2aab4d72d,1 +np.float64,0x7f930aed982615da,0x408613edb6df8528,1 +np.float64,0x7fa4b82dac29705a,0x40861a261c0a9aae,1 +np.float64,0x7fced5c16b3dab82,0x4086286b7a73e611,1 +np.float64,0x7fe133749d2266e8,0x40862ed73a41b112,1 +np.float64,0x3ff2d8146ea5b029,0x3fe2ced55dbf997d,1 +np.float64,0x3ff60dac77ac1b59,0x3feb0688b0e54c7b,1 +np.float64,0x3ff275d9b024ebb3,0x3fe186b87258b834,1 +np.float64,0x3ff533e6500a67cd,0x3fe92746c8b50ddd,1 +np.float64,0x7fe370896666e112,0x40862fd1ca144736,1 +np.float64,0x7fee7695357ced29,0x40863369c459420e,1 +np.float64,0x7fd1e0528023c0a4,0x4086299a85caffd0,1 +np.float64,0x7fd05c7b24a0b8f5,0x408628e52824386f,1 +np.float64,0x3ff11dcc3b023b98,0x3fd7c56c8cef1be1,1 +np.float64,0x7fc9d9fae933b3f5,0x408627027404bc5f,1 +np.float64,0x7fe2359981246b32,0x40862f4be675e90d,1 +np.float64,0x3ffb10a949962152,0x3ff1df88f83b8cde,1 +np.float64,0x3ffa65b53654cb6a,0x3ff15fc8956ccc87,1 +np.float64,0x3ff0000000000000,0x0,1 +np.float64,0x7fad97ef703b2fde,0x40861d002f3d02da,1 +np.float64,0x3ff57aaf93aaf55f,0x3fe9c7b01f194edb,1 +np.float64,0x7fe9ecd73f33d9ad,0x4086321f69917205,1 +np.float64,0x3ff0dcb79c61b96f,0x3fd4eac86a7a9c38,1 +np.float64,0x7fee9c12ffbd3825,0x4086337396cd706d,1 +np.float64,0x3ff52c40af4a5881,0x3fe915a8a7de8f00,1 +np.float64,0x3ffbcfff59779ffe,0x3ff268e523fe8dda,1 +np.float64,0x7fe014cb4b602996,0x40862e4d5de42a03,1 +np.float64,0x7fae2370e83c46e1,0x40861d258dd5b3ee,1 +np.float64,0x7fe9e33602f3c66b,0x4086321c704ac2bb,1 +np.float64,0x3ff648acd74c915a,0x3feb8195ca53bcaa,1 +np.float64,0x7fe385f507670be9,0x40862fda95ebaf44,1 +np.float64,0x3ffb0e382c361c70,0x3ff1ddbea963e0a7,1 +np.float64,0x3ff47d6b6ae8fad7,0x3fe771f80ad37cd2,1 +np.float64,0x3ffca7d538f94faa,0x3ff2fd5f62e851ac,1 +np.float64,0x3ff83e949c107d29,0x3fef3b1c5bbac99b,1 +np.float64,0x7fc6fb933a2df725,0x408626118e51a286,1 +np.float64,0x7fe43a1454e87428,0x4086302318512d9b,1 +np.float64,0x7fe51fe32aaa3fc5,0x4086307c07271348,1 +np.float64,0x3ff35e563966bcac,0x3fe46aa2856ef85f,1 +np.float64,0x3ff84dd4e4909baa,0x3fef55d86d1d5c2e,1 +np.float64,0x7febe3d84077c7b0,0x408632b507686f03,1 +np.float64,0x3ff6aca2e32d5946,0x3fec4c32a2368ee3,1 +np.float64,0x7fe7070e3e6e0e1b,0x4086312caddb0454,1 +np.float64,0x7fd3657f2aa6cafd,0x40862a41acf47e70,1 +np.float64,0x3ff61534456c2a68,0x3feb1663900af13b,1 +np.float64,0x3ff8bc556eb178ab,0x3ff00a16b5403f88,1 +np.float64,0x3ffa7782e3f4ef06,0x3ff16d529c94a438,1 +np.float64,0x7fc15785ed22af0b,0x408623d0cd94fb86,1 +np.float64,0x3ff2e3eeb6e5c7dd,0x3fe2f4c4876d3edf,1 +np.float64,0x3ff2e4e17e85c9c3,0x3fe2f7c9e437b22e,1 +np.float64,0x7feb3aaf67f6755e,0x40863283ec4a0d76,1 +np.float64,0x7fe89efcf7313df9,0x408631b5b5e41263,1 +np.float64,0x3ffcc6fad4f98df6,0x3ff31245778dff6d,1 +np.float64,0x3ff356114466ac22,0x3fe45253d040a024,1 +np.float64,0x3ff81c70d2d038e2,0x3feefed71ebac776,1 +np.float64,0x7fdb75c96136eb92,0x40862d09a603f03e,1 +np.float64,0x3ff340f91b8681f2,0x3fe413bb6e6d4a54,1 +np.float64,0x3fff906079df20c1,0x3ff4d13869d16bc7,1 +np.float64,0x3ff226a42d644d48,0x3fe0698d316f1ac0,1 +np.float64,0x3ff948abc3b29158,0x3ff07eeb0b3c81ba,1 +np.float64,0x3ffc25df1fb84bbe,0x3ff2a4c13ad4edad,1 +np.float64,0x7fe07ea3b960fd46,0x40862e815b4cf43d,1 +np.float64,0x3ff497d3dae92fa8,0x3fe7b3917bf10311,1 +np.float64,0x7fea561db1f4ac3a,0x4086323fa4aef2a9,1 +np.float64,0x7fd1b49051236920,0x40862986d8759ce5,1 +np.float64,0x7f7ba3bd6037477a,0x40860bd19997fd90,1 +np.float64,0x3ff01126dd00224e,0x3fb76b67938dfb11,1 +np.float64,0x3ff29e1105053c22,0x3fe2102a4c5fa102,1 +np.float64,0x3ff9de2a6553bc55,0x3ff0f6cfe4dea30e,1 +np.float64,0x7fc558e7d42ab1cf,0x4086257a608fc055,1 +np.float64,0x3ff79830a74f3061,0x3fee0f93db153d65,1 +np.float64,0x7fe2661648e4cc2c,0x40862f6117a71eb2,1 +np.float64,0x3ff140cf4262819e,0x3fd92aefedae1ab4,1 +np.float64,0x3ff5f36251abe6c5,0x3feaced481ceaee3,1 +np.float64,0x7fc80911d5301223,0x4086266d4757f768,1 +np.float64,0x3ff9079a6c320f35,0x3ff04949d21ebe1e,1 +np.float64,0x3ffde8d2e09bd1a6,0x3ff3cedca8a5db5d,1 +np.float64,0x3ffadd1de375ba3c,0x3ff1b989790e8d93,1 +np.float64,0x3ffdbc40ee1b7882,0x3ff3b286b1c7da57,1 +np.float64,0x3ff8ff514771fea2,0x3ff04264add00971,1 +np.float64,0x7fefd7d0e63fafa1,0x408633c47d9f7ae4,1 +np.float64,0x3ffc47798c588ef3,0x3ff2bbe441fa783a,1 +np.float64,0x7fe6ebc55b6dd78a,0x408631232d9abf31,1 +np.float64,0xbff0000000000000,0xfff8000000000000,1 +np.float64,0x7fd378e4afa6f1c8,0x40862a49a8f98cb4,1 +np.float64,0x0,0xfff8000000000000,1 +np.float64,0x3ffe88ed7efd11db,0x3ff432c7ecb95492,1 +np.float64,0x3ff4f5509289eaa1,0x3fe8955a11656323,1 +np.float64,0x7fda255b41344ab6,0x40862ca53676a23e,1 +np.float64,0x3ffebe85b9bd7d0c,0x3ff453992cd55dea,1 +np.float64,0x3ff5d6180b8bac30,0x3fea901c2160c3bc,1 +np.float64,0x3ffcdfb8fcf9bf72,0x3ff322c83b3bc735,1 +np.float64,0x3ff3c91c26679238,0x3fe599a652b7cf59,1 +np.float64,0x7fc389f7a62713ee,0x408624c518edef93,1 +np.float64,0x3ffe1245ba1c248c,0x3ff3e901b2c4a47a,1 +np.float64,0x7fe1e76e95e3cedc,0x40862f29446f9eff,1 +np.float64,0x3ff02ae4f92055ca,0x3fc28221abd63daa,1 +np.float64,0x7fbf648a143ec913,0x40862304a0619d03,1 +np.float64,0x3ff2be7ef8657cfe,0x3fe27bcc6c97522e,1 +np.float64,0x3ffa7595e514eb2c,0x3ff16bdc64249ad1,1 +np.float64,0x3ff4ee130049dc26,0x3fe884354cbad8c9,1 +np.float64,0x3ff19211fc232424,0x3fdc2160bf3eae40,1 +np.float64,0x3ffec215aedd842c,0x3ff455c4cdd50c32,1 +np.float64,0x7fe7cb50ffaf96a1,0x4086316fc06a53af,1 +np.float64,0x3fffa679161f4cf2,0x3ff4de30ba7ac5b8,1 +np.float64,0x7fdcb459763968b2,0x40862d646a21011d,1 +np.float64,0x3ff9f338d6d3e672,0x3ff1075835d8f64e,1 +np.float64,0x3ff8de3319d1bc66,0x3ff026ae858c0458,1 +np.float64,0x7fee0199d33c0333,0x4086334ad03ac683,1 +np.float64,0x3ffc06076c380c0f,0x3ff28eaec3814faa,1 +np.float64,0x3ffe9e2e235d3c5c,0x3ff43fd4d2191a7f,1 +np.float64,0x3ffd93b06adb2761,0x3ff398888239cde8,1 +np.float64,0x7fefe4b71cffc96d,0x408633c7ba971b92,1 +np.float64,0x7fb2940352252806,0x40861ed244bcfed6,1 +np.float64,0x3ffba4647e3748c9,0x3ff24a15f02e11b9,1 +np.float64,0x7fd2d9543725b2a7,0x40862a0708446596,1 +np.float64,0x7fc04997f120932f,0x4086235055d35251,1 +np.float64,0x3ff6d14313ada286,0x3fec94b177f5d3fc,1 +np.float64,0x3ff279fc8684f3f9,0x3fe19511c3e5b9a8,1 +np.float64,0x3ff42f4609085e8c,0x3fe6aabe526ce2bc,1 +np.float64,0x7fc1c6c62a238d8b,0x408624037de7f6ec,1 +np.float64,0x7fe31ff4b8e63fe8,0x40862fb05b40fd16,1 +np.float64,0x7fd2a8825fa55104,0x408629f234d460d6,1 +np.float64,0x3ffe8c1d725d183b,0x3ff434bdc444143f,1 +np.float64,0x3ff0e9dc3e21d3b8,0x3fd58676e2c13fc9,1 +np.float64,0x3ffed03172fda063,0x3ff45e59f7aa6c8b,1 +np.float64,0x7fd74621962e8c42,0x40862bb6e90d66f8,1 +np.float64,0x3ff1faa29663f545,0x3fdf833a2c5efde1,1 +np.float64,0x7fda02834db40506,0x40862c9a860d6747,1 +np.float64,0x7f709b2fc021365f,0x408607be328eb3eb,1 +np.float64,0x7fec0d58aa381ab0,0x408632c0e61a1af6,1 +np.float64,0x3ff524d1720a49a3,0x3fe90479968d40fd,1 +np.float64,0x7fd64cb3b32c9966,0x40862b5f53c4b0b4,1 +np.float64,0x3ff9593e3ed2b27c,0x3ff08c6eea5f6e8b,1 +np.float64,0x3ff7de8b1f6fbd16,0x3fee9007abcfdf7b,1 +np.float64,0x7fe8d816d6b1b02d,0x408631c82e38a894,1 +np.float64,0x7fd726bbe22e4d77,0x40862bac16ee8d52,1 +np.float64,0x7fa70b07d42e160f,0x40861affcc4265e2,1 +np.float64,0x7fe18b4091e31680,0x40862effa8bce66f,1 +np.float64,0x3ff830253010604a,0x3fef21b2eaa75758,1 +np.float64,0x3fffcade407f95bc,0x3ff4f3734b24c419,1 +np.float64,0x3ff8c17cecb182fa,0x3ff00e75152d7bda,1 +np.float64,0x7fdad9b9d035b373,0x40862cdbabb793ba,1 +np.float64,0x3ff9f9e154f3f3c2,0x3ff10c8dfdbd2510,1 +np.float64,0x3ff465e162e8cbc3,0x3fe736c751c75b73,1 +np.float64,0x3ff9b4cd8493699b,0x3ff0d616235544b8,1 +np.float64,0x7fe557c4a56aaf88,0x4086309114ed12d9,1 +np.float64,0x7fe5999133eb3321,0x408630a9991a9b54,1 +np.float64,0x7fe7c9009e2f9200,0x4086316ef9359a47,1 +np.float64,0x3ff8545cabd0a8ba,0x3fef6141f1030c36,1 +np.float64,0x3ffa1f1712943e2e,0x3ff129849d492ce3,1 +np.float64,0x7fea803a14750073,0x4086324c652c276c,1 +np.float64,0x3ff5b6f97fcb6df3,0x3fea4cb0b97b18e9,1 +np.float64,0x7fc2efdfc425dfbf,0x40862485036a5c6e,1 +np.float64,0x7fe2c78e5be58f1c,0x40862f8b0a5e7baf,1 +np.float64,0x7fe80d7fff301aff,0x40863185e234060a,1 +np.float64,0x3ffd895d457b12ba,0x3ff391e2cac7a3f8,1 +np.float64,0x3ff44c9764a8992f,0x3fe6f6690396c232,1 +np.float64,0x3ff731688b8e62d1,0x3fed4ed70fac3839,1 +np.float64,0x3ff060200460c040,0x3fcbad4a07d97f0e,1 +np.float64,0x3ffbd2f70a17a5ee,0x3ff26afb46ade929,1 +np.float64,0x7febe9e841f7d3d0,0x408632b6c465ddd9,1 +np.float64,0x3ff2532f8be4a65f,0x3fe10c6cd8d64cf4,1 +np.float64,0x7fefffffffffffff,0x408633ce8fb9f87e,1 +np.float64,0x3ff3a1ae3a47435c,0x3fe52c00210cc459,1 +np.float64,0x7fe9c34ae6b38695,0x408632128d150149,1 +np.float64,0x3fff311029fe6220,0x3ff498b852f30bff,1 +np.float64,0x3ffd4485a1ba890c,0x3ff3653b6fa701cd,1 +np.float64,0x7fd52718b1aa4e30,0x40862af330d9c68c,1 +np.float64,0x3ff10b695a4216d3,0x3fd7009294e367b7,1 +np.float64,0x3ffdf73de59bee7c,0x3ff3d7fa96d2c1ae,1 +np.float64,0x3ff2f1c75965e38f,0x3fe320aaff3db882,1 +np.float64,0x3ff2a56a5a854ad5,0x3fe228cc4ad7e7a5,1 +np.float64,0x7fe60cd1cf6c19a3,0x408630d3d87a04b3,1 +np.float64,0x3ff89fa65c113f4c,0x3fefe3543773180c,1 +np.float64,0x3ffd253130ba4a62,0x3ff350b76ba692a0,1 +np.float64,0x7feaad7051f55ae0,0x40863259ff932d62,1 +np.float64,0x7fd9cc37cf33986f,0x40862c89c15f963b,1 +np.float64,0x3ff8c08de771811c,0x3ff00daa9c17acd7,1 +np.float64,0x7fea58b25d34b164,0x408632406d54cc6f,1 +np.float64,0x7fe5f161fd2be2c3,0x408630c9ddf272a5,1 +np.float64,0x3ff5840dbf8b081c,0x3fe9dc9117b4cbc7,1 +np.float64,0x3ff3fd762307faec,0x3fe6277cd530c640,1 +np.float64,0x3ff9095c98b212b9,0x3ff04abff170ac24,1 +np.float64,0x7feaac66017558cb,0x40863259afb4f8ce,1 +np.float64,0x7fd78f96bcaf1f2c,0x40862bd00175fdf9,1 +np.float64,0x3ffaca27e0959450,0x3ff1ab72b8f8633e,1 +np.float64,0x3ffb7f18cb96fe32,0x3ff22f81bcb8907b,1 +np.float64,0x3ffcce48d1199c92,0x3ff317276f62c0b2,1 +np.float64,0x3ffcb9a7f3797350,0x3ff30958e0d6a34d,1 +np.float64,0x7fda569ef6b4ad3d,0x40862cb43b33275a,1 +np.float64,0x7fde9f0893bd3e10,0x40862de8cc036283,1 +np.float64,0x3ff428be3928517c,0x3fe699bb5ab58904,1 +np.float64,0x7fa4d3344029a668,0x40861a3084989291,1 +np.float64,0x3ff03607bd006c0f,0x3fc4c4840cf35f48,1 +np.float64,0x3ff2b1335c056267,0x3fe25000846b75a2,1 +np.float64,0x7fe0cb8bd8e19717,0x40862ea65237d496,1 +np.float64,0x3fff4b1b7b9e9637,0x3ff4a83fb08e7b24,1 +np.float64,0x7fe7526140aea4c2,0x40863146ae86069c,1 +np.float64,0x7fbfcfb7c23f9f6f,0x4086231fc246ede5,1 diff --git a/local_packages/numpy/_core/tests/data/umath-validation-set-arcsin.csv b/local_packages/numpy/_core/tests/data/umath-validation-set-arcsin.csv new file mode 100644 index 0000000..75d5707 --- /dev/null +++ b/local_packages/numpy/_core/tests/data/umath-validation-set-arcsin.csv @@ -0,0 +1,1429 @@ +dtype,input,output,ulperrortol +np.float32,0xbe7d3a7c,0xbe7fe217,4 +np.float32,0x3dc102f0,0x3dc14c60,4 +np.float32,0xbe119c28,0xbe121aef,4 +np.float32,0xbe51cd68,0xbe534c75,4 +np.float32,0x3c04a300,0x3c04a35f,4 +np.float32,0xbf4f0b62,0xbf712a69,4 +np.float32,0x3ef61a5c,0x3f005cf6,4 +np.float32,0xbf13024c,0xbf1c97df,4 +np.float32,0x3e93b580,0x3e95d6b5,4 +np.float32,0x3e44e7b8,0x3e4623a5,4 +np.float32,0xbe35df20,0xbe36d773,4 +np.float32,0x3eecd2c0,0x3ef633cf,4 +np.float32,0x3f2772ba,0x3f36862a,4 +np.float32,0x3e211ea8,0x3e21cac5,4 +np.float32,0x3e3b3d90,0x3e3c4cc6,4 +np.float32,0x3f37c962,0x3f4d018c,4 +np.float32,0x3e92ad88,0x3e94c31a,4 +np.float32,0x3f356ffc,0x3f49a766,4 +np.float32,0x3f487ba2,0x3f665254,4 +np.float32,0x3f061c46,0x3f0d27ae,4 +np.float32,0xbee340a2,0xbeeb7722,4 +np.float32,0xbe85aede,0xbe874026,4 +np.float32,0x3f34cf9a,0x3f48c474,4 +np.float32,0x3e29a690,0x3e2a6fbd,4 +np.float32,0xbeb29428,0xbeb669d1,4 +np.float32,0xbe606d40,0xbe624370,4 +np.float32,0x3dae6860,0x3dae9e85,4 +np.float32,0xbf04872b,0xbf0b4d25,4 +np.float32,0x3f2080e2,0x3f2d7ab0,4 +np.float32,0xbec77dcc,0xbecceb27,4 +np.float32,0x3e0dda10,0x3e0e4f38,4 +np.float32,0xbefaf970,0xbf03262c,4 +np.float32,0x3f576a0c,0x3f7ffee6,4 +np.float32,0x3f222382,0x3f2f95d6,4 +np.float32,0x7fc00000,0x7fc00000,4 +np.float32,0x3e41c468,0x3e42f14e,4 +np.float32,0xbf2f64dd,0xbf4139a8,4 +np.float32,0xbf60ef90,0xbf895956,4 +np.float32,0xbf67c855,0xbf90eff0,4 +np.float32,0xbed35aee,0xbed9df00,4 +np.float32,0xbf2c7d92,0xbf3d448f,4 +np.float32,0x3f7b1604,0x3faff122,4 +np.float32,0xbf7c758b,0xbfb3bf87,4 +np.float32,0x3ecda1c8,0x3ed39acf,4 +np.float32,0x3f3af8ae,0x3f519fcb,4 +np.float32,0xbf16e6a3,0xbf2160fd,4 +np.float32,0x3f0c97d2,0x3f14d668,4 +np.float32,0x3f0a8060,0x3f1257b9,4 +np.float32,0x3f27905a,0x3f36ad57,4 +np.float32,0x3eeaeba4,0x3ef40efe,4 +np.float32,0x3e58dde0,0x3e5a8580,4 +np.float32,0xbf0cabe2,0xbf14ee6b,4 +np.float32,0xbe805ca8,0xbe81bf03,4 +np.float32,0x3f5462ba,0x3f7a7b85,4 +np.float32,0xbee235d0,0xbeea4d8b,4 +np.float32,0xbe880cb0,0xbe89b426,4 +np.float32,0x80000001,0x80000001,4 +np.float32,0x3f208c00,0x3f2d88f6,4 +np.float32,0xbf34f3d2,0xbf48f7a2,4 +np.float32,0x3f629428,0x3f8b1763,4 +np.float32,0xbf52a900,0xbf776b4a,4 +np.float32,0xbd17f8d0,0xbd1801be,4 +np.float32,0xbef7cada,0xbf0153d1,4 +np.float32,0x3f7d3b90,0x3fb63967,4 +np.float32,0xbd6a20b0,0xbd6a4160,4 +np.float32,0x3f740496,0x3fa1beb7,4 +np.float32,0x3ed8762c,0x3edf7dd9,4 +np.float32,0x3f53b066,0x3f793d42,4 +np.float32,0xbe9de718,0xbea084f9,4 +np.float32,0x3ea3ae90,0x3ea69b4b,4 +np.float32,0x3f1b8f00,0x3f273183,4 +np.float32,0x3f5cd6ac,0x3f852ead,4 +np.float32,0x3f29d510,0x3f39b169,4 +np.float32,0x3ee2a934,0x3eeace33,4 +np.float32,0x3eecac94,0x3ef608c2,4 +np.float32,0xbea915e2,0xbeac5203,4 +np.float32,0xbd316e90,0xbd317cc8,4 +np.float32,0xbf70b495,0xbf9c97b6,4 +np.float32,0xbe80d976,0xbe823ff3,4 +np.float32,0x3e9205f8,0x3e94143f,4 +np.float32,0x3f49247e,0x3f676296,4 +np.float32,0x3d9030c0,0x3d904f50,4 +np.float32,0x3e4df058,0x3e4f5a5c,4 +np.float32,0xbe1fd360,0xbe207b58,4 +np.float32,0xbf69dc7c,0xbf937006,4 +np.float32,0x3f36babe,0x3f4b7df3,4 +np.float32,0xbe8c9758,0xbe8e6bb7,4 +np.float32,0xbf4de72d,0xbf6f3c20,4 +np.float32,0xbecdad68,0xbed3a780,4 +np.float32,0xbf73e2cf,0xbfa18702,4 +np.float32,0xbece16a8,0xbed41a75,4 +np.float32,0x3f618a96,0x3f89fc6d,4 +np.float32,0xbf325853,0xbf454ea9,4 +np.float32,0x3f138568,0x3f1d3828,4 +np.float32,0xbf56a6e9,0xbf7e9748,4 +np.float32,0x3ef5d594,0x3f0035bf,4 +np.float32,0xbf408220,0xbf59dfaa,4 +np.float32,0xbed120e6,0xbed76dd5,4 +np.float32,0xbf6dbda5,0xbf986cee,4 +np.float32,0x3f744a38,0x3fa23282,4 +np.float32,0xbe4b56d8,0xbe4cb329,4 +np.float32,0x3f54c5f2,0x3f7b2d97,4 +np.float32,0xbd8b1c90,0xbd8b3801,4 +np.float32,0x3ee19a48,0x3ee9a03b,4 +np.float32,0x3f48460e,0x3f65fc3d,4 +np.float32,0x3eb541c0,0x3eb9461e,4 +np.float32,0xbea7d098,0xbeaaf98c,4 +np.float32,0xbda99e40,0xbda9d00c,4 +np.float32,0xbefb2ca6,0xbf03438d,4 +np.float32,0x3f4256be,0x3f5cab0b,4 +np.float32,0xbdbdb198,0xbdbdf74d,4 +np.float32,0xbf325b5f,0xbf4552e9,4 +np.float32,0xbf704d1a,0xbf9c00b4,4 +np.float32,0x3ebb1d04,0x3ebf8cf8,4 +np.float32,0xbed03566,0xbed66bf1,4 +np.float32,0x3e8fcee8,0x3e91c501,4 +np.float32,0xbf2e1eec,0xbf3f7b9d,4 +np.float32,0x3f33c4d2,0x3f474cac,4 +np.float32,0x3f598ef4,0x3f8201b4,4 +np.float32,0x3e09bb30,0x3e0a2660,4 +np.float32,0x3ed4e228,0x3edb8cdb,4 +np.float32,0x3eb7a190,0x3ebbd0a1,4 +np.float32,0xbd9ae630,0xbd9b0c18,4 +np.float32,0x3f43020e,0x3f5db2d7,4 +np.float32,0xbec06ac0,0xbec542d4,4 +np.float32,0x3f3dfde0,0x3f561674,4 +np.float32,0xbf64084a,0xbf8cabe6,4 +np.float32,0xbd6f95b0,0xbd6fb8b7,4 +np.float32,0x3f268640,0x3f354e2d,4 +np.float32,0xbe72b4bc,0xbe7509b2,4 +np.float32,0xbf3414fa,0xbf47bd5a,4 +np.float32,0xbf375218,0xbf4c566b,4 +np.float32,0x3f203c1a,0x3f2d2273,4 +np.float32,0xbd503530,0xbd504c2b,4 +np.float32,0xbc45e540,0xbc45e67b,4 +np.float32,0xbf175c4f,0xbf21f2c6,4 +np.float32,0x3f7432a6,0x3fa20b2b,4 +np.float32,0xbf43367f,0xbf5e03d8,4 +np.float32,0x3eb3997c,0x3eb780c4,4 +np.float32,0x3e5574c8,0x3e570878,4 +np.float32,0xbf04b57b,0xbf0b8349,4 +np.float32,0x3f6216d8,0x3f8a914b,4 +np.float32,0xbf57a237,0xbf80337d,4 +np.float32,0xbee1403a,0xbee93bee,4 +np.float32,0xbeaf9b9a,0xbeb33f3b,4 +np.float32,0xbf109374,0xbf19a223,4 +np.float32,0xbeae6824,0xbeb1f810,4 +np.float32,0xbcff9320,0xbcff9dbe,4 +np.float32,0x3ed205c0,0x3ed868a9,4 +np.float32,0x3d897c30,0x3d8996ad,4 +np.float32,0xbf2899d2,0xbf380d4c,4 +np.float32,0xbf54cb0b,0xbf7b36c2,4 +np.float32,0x3ea8e8ec,0x3eac2262,4 +np.float32,0x3ef5e1a0,0x3f003c9d,4 +np.float32,0xbf00c81e,0xbf06f1e2,4 +np.float32,0xbf346775,0xbf483181,4 +np.float32,0x3f7a4fe4,0x3fae077c,4 +np.float32,0x3f00776e,0x3f06948f,4 +np.float32,0xbe0a3078,0xbe0a9cbc,4 +np.float32,0xbeba0b06,0xbebe66be,4 +np.float32,0xbdff4e38,0xbdfff8b2,4 +np.float32,0xbe927f70,0xbe9492ff,4 +np.float32,0x3ebb07e0,0x3ebf7642,4 +np.float32,0x3ebcf8e0,0x3ec18c95,4 +np.float32,0x3f49bdfc,0x3f685b51,4 +np.float32,0x3cbc29c0,0x3cbc2dfd,4 +np.float32,0xbe9e951a,0xbea13bf1,4 +np.float32,0xbe8c237c,0xbe8df33d,4 +np.float32,0x3e17f198,0x3e1881c4,4 +np.float32,0xbd0b5220,0xbd0b5902,4 +np.float32,0xbf34c4a2,0xbf48b4f5,4 +np.float32,0xbedaa814,0xbee1ea94,4 +np.float32,0x3ebf5d6c,0x3ec42053,4 +np.float32,0x3cd04b40,0x3cd050ff,4 +np.float32,0xbec33fe0,0xbec85244,4 +np.float32,0xbf00b27a,0xbf06d8d8,4 +np.float32,0x3f15d7be,0x3f201243,4 +np.float32,0xbe3debd0,0xbe3f06f7,4 +np.float32,0xbea81704,0xbeab4418,4 +np.float32,0x1,0x1,4 +np.float32,0x3f49e6ba,0x3f689d8b,4 +np.float32,0x3f351030,0x3f491fc0,4 +np.float32,0x3e607de8,0x3e625482,4 +np.float32,0xbe8dbbe4,0xbe8f9c0e,4 +np.float32,0x3edbf350,0x3ee35924,4 +np.float32,0xbf0c84c4,0xbf14bf9c,4 +np.float32,0x3eb218b0,0x3eb5e61a,4 +np.float32,0x3e466dd0,0x3e47b138,4 +np.float32,0xbe8ece94,0xbe90ba01,4 +np.float32,0xbe82ec2a,0xbe84649a,4 +np.float32,0xbf7e1f10,0xbfb98b9e,4 +np.float32,0xbf2d00ea,0xbf3df688,4 +np.float32,0x3db7cdd0,0x3db80d36,4 +np.float32,0xbe388b98,0xbe398f25,4 +np.float32,0xbd86cb40,0xbd86e436,4 +np.float32,0x7f7fffff,0x7fc00000,4 +np.float32,0x3f472a60,0x3f6436c6,4 +np.float32,0xbf5b2c1d,0xbf838d87,4 +np.float32,0x3f0409ea,0x3f0abad8,4 +np.float32,0x3f47dd0e,0x3f6553f0,4 +np.float32,0x3e3eab00,0x3e3fc98a,4 +np.float32,0xbf7c2a7f,0xbfb2e19b,4 +np.float32,0xbeda0048,0xbee13112,4 +np.float32,0x3f46600a,0x3f62f5b2,4 +np.float32,0x3f45aef4,0x3f61de43,4 +np.float32,0x3dd40a50,0x3dd46bc4,4 +np.float32,0xbf6cdd0b,0xbf974191,4 +np.float32,0x3f78de4c,0x3faac725,4 +np.float32,0x3f3c39a4,0x3f53777f,4 +np.float32,0xbe2a30ec,0xbe2afc0b,4 +np.float32,0xbf3c0ef0,0xbf533887,4 +np.float32,0x3ecb6548,0x3ed12a53,4 +np.float32,0x3eb994e8,0x3ebde7fc,4 +np.float32,0x3d4c1ee0,0x3d4c3487,4 +np.float32,0xbf52cb6d,0xbf77a7eb,4 +np.float32,0x3eb905d4,0x3ebd4e80,4 +np.float32,0x3e712428,0x3e736d72,4 +np.float32,0xbf79ee6e,0xbfad22be,4 +np.float32,0x3de6f8b0,0x3de776c1,4 +np.float32,0x3e9b2898,0x3e9da325,4 +np.float32,0x3ea09b20,0x3ea35d20,4 +np.float32,0x3d0ea9a0,0x3d0eb103,4 +np.float32,0xbd911500,0xbd913423,4 +np.float32,0x3e004618,0x3e009c97,4 +np.float32,0x3f5e0e5a,0x3f86654c,4 +np.float32,0x3f2e6300,0x3f3fd88b,4 +np.float32,0x3e0cf5d0,0x3e0d68c3,4 +np.float32,0x3d6a16c0,0x3d6a376c,4 +np.float32,0x3f7174aa,0x3f9db53c,4 +np.float32,0xbe04bba0,0xbe051b81,4 +np.float32,0xbe6fdcb4,0xbe721c92,4 +np.float32,0x3f4379f0,0x3f5e6c31,4 +np.float32,0xbf680098,0xbf913257,4 +np.float32,0xbf3c31ca,0xbf536bea,4 +np.float32,0x3f59db58,0x3f824a4e,4 +np.float32,0xbf3ffc84,0xbf591554,4 +np.float32,0x3d1d5160,0x3d1d5b48,4 +np.float32,0x3f6c64ae,0x3f96a3da,4 +np.float32,0xbf1b49fd,0xbf26daaa,4 +np.float32,0x3ec80be0,0x3ecd8576,4 +np.float32,0x3f3becc0,0x3f530629,4 +np.float32,0xbea93890,0xbeac76c1,4 +np.float32,0x3f5b3acc,0x3f839bbd,4 +np.float32,0xbf5d6818,0xbf85bef9,4 +np.float32,0x3f794266,0x3fab9fa6,4 +np.float32,0xbee8eb7c,0xbef1cf3b,4 +np.float32,0xbf360a06,0xbf4a821e,4 +np.float32,0x3f441cf6,0x3f5f693d,4 +np.float32,0x3e60de40,0x3e62b742,4 +np.float32,0xbebb3d7e,0xbebfafdc,4 +np.float32,0x3e56a3a0,0x3e583e28,4 +np.float32,0x3f375bfe,0x3f4c6499,4 +np.float32,0xbf384d7d,0xbf4dbf9a,4 +np.float32,0x3efb03a4,0x3f032c06,4 +np.float32,0x3f1d5d10,0x3f29794d,4 +np.float32,0xbe25f7dc,0xbe26b41d,4 +np.float32,0x3f6d2f88,0x3f97aebb,4 +np.float32,0xbe9fa100,0xbea255cb,4 +np.float32,0xbf21dafa,0xbf2f382a,4 +np.float32,0x3d3870e0,0x3d3880d9,4 +np.float32,0x3eeaf00c,0x3ef413f4,4 +np.float32,0xbc884ea0,0xbc88503c,4 +np.float32,0xbf7dbdad,0xbfb80b6d,4 +np.float32,0xbf4eb713,0xbf709b46,4 +np.float32,0xbf1c0ad4,0xbf27cd92,4 +np.float32,0x3f323088,0x3f451737,4 +np.float32,0x3e405d88,0x3e4183e1,4 +np.float32,0x3d7ad580,0x3d7afdb4,4 +np.float32,0xbf207338,0xbf2d6927,4 +np.float32,0xbecf7948,0xbed59e1a,4 +np.float32,0x3f16ff94,0x3f217fde,4 +np.float32,0xbdf19588,0xbdf225dd,4 +np.float32,0xbf4d9654,0xbf6eb442,4 +np.float32,0xbf390b9b,0xbf4ed220,4 +np.float32,0xbe155a74,0xbe15e354,4 +np.float32,0x3f519e4c,0x3f759850,4 +np.float32,0xbee3f08c,0xbeec3b84,4 +np.float32,0xbf478be7,0xbf64d23b,4 +np.float32,0xbefdee50,0xbf04d92a,4 +np.float32,0x3e8def78,0x3e8fd1bc,4 +np.float32,0x3e3df2a8,0x3e3f0dee,4 +np.float32,0xbf413e22,0xbf5afd97,4 +np.float32,0xbf1b8bc4,0xbf272d71,4 +np.float32,0xbf31e5be,0xbf44af22,4 +np.float32,0x3de7e080,0x3de86010,4 +np.float32,0xbf5ddf7e,0xbf863645,4 +np.float32,0x3f3eba6a,0x3f57306e,4 +np.float32,0xff7fffff,0x7fc00000,4 +np.float32,0x3ec22d5c,0x3ec72973,4 +np.float32,0x80800000,0x80800000,4 +np.float32,0x3f032e0c,0x3f09ba82,4 +np.float32,0x3d74bd60,0x3d74e2b7,4 +np.float32,0xbea0d61e,0xbea39b42,4 +np.float32,0xbefdfa78,0xbf04e02a,4 +np.float32,0x3e5cb220,0x3e5e70ec,4 +np.float32,0xbe239e54,0xbe2452a4,4 +np.float32,0x3f452738,0x3f61090e,4 +np.float32,0x3e99a2e0,0x3e9c0a66,4 +np.float32,0x3e4394d8,0x3e44ca5f,4 +np.float32,0x3f4472e2,0x3f5fef14,4 +np.float32,0xbf46bc70,0xbf638814,4 +np.float32,0xbf0b910f,0xbf139c7a,4 +np.float32,0x3f36b4a6,0x3f4b753f,4 +np.float32,0x3e0bf478,0x3e0c64f6,4 +np.float32,0x3ce02480,0x3ce02ba9,4 +np.float32,0xbd904b10,0xbd9069b1,4 +np.float32,0xbf7f5d72,0xbfc00b70,4 +np.float32,0x3f62127e,0x3f8a8ca8,4 +np.float32,0xbf320253,0xbf44d6e4,4 +np.float32,0x3f2507be,0x3f335833,4 +np.float32,0x3f299284,0x3f395887,4 +np.float32,0xbd8211b0,0xbd82281d,4 +np.float32,0xbd3374c0,0xbd338376,4 +np.float32,0x3f36c56a,0x3f4b8d30,4 +np.float32,0xbf51f704,0xbf76331f,4 +np.float32,0xbe9871ca,0xbe9acab2,4 +np.float32,0xbe818d8c,0xbe82fa0f,4 +np.float32,0x3f08b958,0x3f103c18,4 +np.float32,0x3f22559a,0x3f2fd698,4 +np.float32,0xbf11f388,0xbf1b4db8,4 +np.float32,0x3ebe1990,0x3ec2c359,4 +np.float32,0xbe75ab38,0xbe7816b6,4 +np.float32,0x3e96102c,0x3e984c99,4 +np.float32,0xbe80d9d2,0xbe824052,4 +np.float32,0x3ef47588,0x3efeda7f,4 +np.float32,0xbe45e524,0xbe4725ea,4 +np.float32,0x3f7f9e7a,0x3fc213ff,4 +np.float32,0x3f1d3c36,0x3f294faa,4 +np.float32,0xbf3c58db,0xbf53a591,4 +np.float32,0x3f0d3d20,0x3f159c69,4 +np.float32,0x3f744be6,0x3fa23552,4 +np.float32,0x3f2e0cea,0x3f3f630e,4 +np.float32,0x3e193c10,0x3e19cff7,4 +np.float32,0xbf4150ac,0xbf5b19dd,4 +np.float32,0xbf145f72,0xbf1e4355,4 +np.float32,0xbb76cc00,0xbb76cc26,4 +np.float32,0x3f756780,0x3fa41b3e,4 +np.float32,0x3ea9b868,0x3eacfe3c,4 +np.float32,0x3d07c920,0x3d07cf7f,4 +np.float32,0xbf2263d4,0xbf2fe8ff,4 +np.float32,0x3e53b3f8,0x3e553daa,4 +np.float32,0xbf785be8,0xbfa9b5ba,4 +np.float32,0x3f324f7a,0x3f454254,4 +np.float32,0xbf2188f2,0xbf2ece5b,4 +np.float32,0xbe33781c,0xbe3466a2,4 +np.float32,0xbd3cf120,0xbd3d024c,4 +np.float32,0x3f06b18a,0x3f0dd70f,4 +np.float32,0x3f40d63e,0x3f5a5f6a,4 +np.float32,0x3f752340,0x3fa3a41e,4 +np.float32,0xbe1cf1c0,0xbe1d90bc,4 +np.float32,0xbf02d948,0xbf0957d7,4 +np.float32,0x3f73bed0,0x3fa14bf7,4 +np.float32,0x3d914920,0x3d916864,4 +np.float32,0x7fa00000,0x7fe00000,4 +np.float32,0xbe67a5d8,0xbe69aba7,4 +np.float32,0x3f689c4a,0x3f91eb9f,4 +np.float32,0xbf196e00,0xbf248601,4 +np.float32,0xbf50dacb,0xbf7444fe,4 +np.float32,0x3f628b86,0x3f8b0e1e,4 +np.float32,0x3f6ee2f2,0x3f99fe7f,4 +np.float32,0x3ee5df40,0x3eee6492,4 +np.float32,0x3f501746,0x3f72f41b,4 +np.float32,0xbf1f0f18,0xbf2ba164,4 +np.float32,0xbf1a8bfd,0xbf25ec01,4 +np.float32,0xbd4926f0,0xbd493ba9,4 +np.float32,0xbf4e364f,0xbf6fc17b,4 +np.float32,0x3e50c578,0x3e523ed4,4 +np.float32,0x3f65bf10,0x3f8e95ce,4 +np.float32,0xbe8d75a2,0xbe8f52f2,4 +np.float32,0xbf3f557e,0xbf581962,4 +np.float32,0xbeff2bfc,0xbf05903a,4 +np.float32,0x3f5e8bde,0x3f86e3d8,4 +np.float32,0xbf7a0012,0xbfad4b9b,4 +np.float32,0x3edefce0,0x3ee6b790,4 +np.float32,0xbf0003de,0xbf060f09,4 +np.float32,0x3efc4650,0x3f03e548,4 +np.float32,0x3f4582e4,0x3f6198f5,4 +np.float32,0x3f10086c,0x3f18f9d0,4 +np.float32,0x3f1cd304,0x3f28ca77,4 +np.float32,0x3f683366,0x3f916e8d,4 +np.float32,0xbed49392,0xbedb3675,4 +np.float32,0xbf6fe5f6,0xbf9b6c0e,4 +np.float32,0xbf59b416,0xbf8224f6,4 +np.float32,0x3d20c960,0x3d20d3f4,4 +np.float32,0x3f6b00d6,0x3f94dbe7,4 +np.float32,0x3f6c26ae,0x3f965352,4 +np.float32,0xbf370ea6,0xbf4bf5dd,4 +np.float32,0x3dfe7230,0x3dff1af1,4 +np.float32,0xbefc21a8,0xbf03d038,4 +np.float32,0x3f16a990,0x3f21156a,4 +np.float32,0xbef8ac0c,0xbf01d48f,4 +np.float32,0x3f170de8,0x3f21919d,4 +np.float32,0x3db9ef80,0x3dba3122,4 +np.float32,0x3d696400,0x3d698461,4 +np.float32,0x3f007aa2,0x3f069843,4 +np.float32,0x3f22827c,0x3f3010a9,4 +np.float32,0x3f3650dc,0x3f4ae6f1,4 +np.float32,0xbf1d8037,0xbf29a5e1,4 +np.float32,0xbf08fdc4,0xbf108d0e,4 +np.float32,0xbd8df350,0xbd8e1079,4 +np.float32,0xbf36bb32,0xbf4b7e98,4 +np.float32,0x3f2e3756,0x3f3f9ced,4 +np.float32,0x3d5a6f20,0x3d5a89aa,4 +np.float32,0x3f55d568,0x3f7d1889,4 +np.float32,0x3e1ed110,0x3e1f75d9,4 +np.float32,0x3e7386b8,0x3e75e1dc,4 +np.float32,0x3f48ea0e,0x3f670434,4 +np.float32,0x3e921fb0,0x3e942f14,4 +np.float32,0xbf0d4d0b,0xbf15af7f,4 +np.float32,0x3f179ed2,0x3f224549,4 +np.float32,0xbf3a328e,0xbf507e6d,4 +np.float32,0xbf74591a,0xbfa24b6e,4 +np.float32,0x3ec7d1c4,0x3ecd4657,4 +np.float32,0xbf6ecbed,0xbf99de85,4 +np.float32,0x3db0bd00,0x3db0f559,4 +np.float32,0x7f800000,0x7fc00000,4 +np.float32,0x3e0373b8,0x3e03d0d6,4 +np.float32,0xbf439784,0xbf5e9a04,4 +np.float32,0xbef97a9e,0xbf024ac6,4 +np.float32,0x3e4d71a8,0x3e4ed90a,4 +np.float32,0xbf14d868,0xbf1ed7e3,4 +np.float32,0xbf776870,0xbfa7ce37,4 +np.float32,0xbe32a500,0xbe339038,4 +np.float32,0xbf326d8a,0xbf456c3d,4 +np.float32,0xbe9b758c,0xbe9df3e7,4 +np.float32,0x3d9515a0,0x3d95376a,4 +np.float32,0x3e3f7320,0x3e40953e,4 +np.float32,0xbee57e7e,0xbeedf84f,4 +np.float32,0x3e821e94,0x3e838ffd,4 +np.float32,0x3f74beaa,0x3fa2f721,4 +np.float32,0xbe9b7672,0xbe9df4d9,4 +np.float32,0x3f4041fc,0x3f597e71,4 +np.float32,0xbe9ea7c4,0xbea14f92,4 +np.float32,0xbf800000,0xbfc90fdb,4 +np.float32,0x3e04fb90,0x3e055bfd,4 +np.float32,0xbf14d3d6,0xbf1ed245,4 +np.float32,0xbe84ebec,0xbe86763e,4 +np.float32,0x3f08e568,0x3f107039,4 +np.float32,0x3d8dc9e0,0x3d8de6ef,4 +np.float32,0x3ea4549c,0x3ea74a94,4 +np.float32,0xbebd2806,0xbec1bf51,4 +np.float32,0x3f311a26,0x3f439498,4 +np.float32,0xbf3d2222,0xbf54cf7e,4 +np.float32,0x3e00c500,0x3e011c81,4 +np.float32,0xbe35ed1c,0xbe36e5a9,4 +np.float32,0xbd4ec020,0xbd4ed6a0,4 +np.float32,0x3e1eb088,0x3e1f54eb,4 +np.float32,0x3cf94840,0x3cf9521a,4 +np.float32,0xbf010c5d,0xbf0740e0,4 +np.float32,0xbf3bd63b,0xbf52e502,4 +np.float32,0x3f233f30,0x3f310542,4 +np.float32,0x3ea24128,0x3ea519d7,4 +np.float32,0x3f478b38,0x3f64d124,4 +np.float32,0x3f1e0c6c,0x3f2a57ec,4 +np.float32,0xbf3ad294,0xbf51680a,4 +np.float32,0x3ede0554,0x3ee5a4b4,4 +np.float32,0x3e451a98,0x3e46577d,4 +np.float32,0x3f520164,0x3f764542,4 +np.float32,0x0,0x0,4 +np.float32,0xbd056cd0,0xbd0572db,4 +np.float32,0xbf58b018,0xbf812f5e,4 +np.float32,0x3e036eb0,0x3e03cbc3,4 +np.float32,0x3d1377a0,0x3d137fc9,4 +np.float32,0xbf692d3a,0xbf929a2c,4 +np.float32,0xbec60fb8,0xbecb5dea,4 +np.float32,0x3ed23340,0x3ed89a8e,4 +np.float32,0x3c87f040,0x3c87f1d9,4 +np.float32,0x3dac62f0,0x3dac9737,4 +np.float32,0xbed97c16,0xbee09f02,4 +np.float32,0xbf2d5f3c,0xbf3e769c,4 +np.float32,0xbc3b7c40,0xbc3b7d4c,4 +np.float32,0x3ed998ec,0x3ee0bedd,4 +np.float32,0x3dd86630,0x3dd8cdcb,4 +np.float32,0x3e8b4304,0x3e8d09ea,4 +np.float32,0x3f51e6b0,0x3f761697,4 +np.float32,0x3ec51f24,0x3eca5923,4 +np.float32,0xbf647430,0xbf8d2307,4 +np.float32,0x3f253d9c,0x3f339eb2,4 +np.float32,0x3dc969d0,0x3dc9bd4b,4 +np.float32,0xbc2f1300,0xbc2f13da,4 +np.float32,0xbf170007,0xbf21806d,4 +np.float32,0x3f757d10,0x3fa4412e,4 +np.float32,0xbe7864ac,0xbe7ae564,4 +np.float32,0x3f2ffe90,0x3f420cfb,4 +np.float32,0xbe576138,0xbe590012,4 +np.float32,0xbf517a21,0xbf755959,4 +np.float32,0xbf159cfe,0xbf1fc9d5,4 +np.float32,0xbf638b2a,0xbf8c22cf,4 +np.float32,0xff800000,0x7fc00000,4 +np.float32,0x3ed19ca0,0x3ed7f569,4 +np.float32,0x3f7c4460,0x3fb32d26,4 +np.float32,0x3ebfae6c,0x3ec477ab,4 +np.float32,0x3dd452d0,0x3dd4b4a8,4 +np.float32,0x3f471482,0x3f6413fb,4 +np.float32,0xbf49d704,0xbf6883fe,4 +np.float32,0xbd42c4e0,0xbd42d7af,4 +np.float32,0xbeb02994,0xbeb3d668,4 +np.float32,0x3f4d1fd8,0x3f6dedd2,4 +np.float32,0x3efb591c,0x3f035d11,4 +np.float32,0x80000000,0x80000000,4 +np.float32,0xbf50f782,0xbf7476ad,4 +np.float32,0x3d7232c0,0x3d7256f0,4 +np.float32,0x3f649460,0x3f8d46bb,4 +np.float32,0x3f5561bc,0x3f7c46a9,4 +np.float32,0x3e64f6a0,0x3e66ea5d,4 +np.float32,0x3e5b0470,0x3e5cb8f9,4 +np.float32,0xbe9b6b2c,0xbe9de904,4 +np.float32,0x3f6c33f4,0x3f966486,4 +np.float32,0x3f5cee54,0x3f854613,4 +np.float32,0x3ed3e044,0x3eda716e,4 +np.float32,0xbf3cac7f,0xbf542131,4 +np.float32,0x3c723500,0x3c723742,4 +np.float32,0x3de59900,0x3de614d3,4 +np.float32,0xbdf292f8,0xbdf32517,4 +np.float32,0x3f05c8b2,0x3f0cc59b,4 +np.float32,0xbf1ab182,0xbf261b14,4 +np.float32,0xbda396f0,0xbda3c39a,4 +np.float32,0xbf270ed0,0xbf360231,4 +np.float32,0x3f2063e6,0x3f2d557e,4 +np.float32,0x3c550280,0x3c550409,4 +np.float32,0xbe103b48,0xbe10b679,4 +np.float32,0xbebae390,0xbebf4f40,4 +np.float32,0x3f3bc868,0x3f52d0aa,4 +np.float32,0xbd62f880,0xbd631647,4 +np.float32,0xbe7a38f4,0xbe7cc833,4 +np.float32,0x3f09d796,0x3f118f39,4 +np.float32,0xbf5fa558,0xbf8802d0,4 +np.float32,0x3f111cc8,0x3f1a48b0,4 +np.float32,0x3e831958,0x3e849356,4 +np.float32,0xbf614dbd,0xbf89bc3b,4 +np.float32,0xbd521510,0xbd522cac,4 +np.float32,0x3f05af22,0x3f0ca7a0,4 +np.float32,0xbf1ac60e,0xbf2634df,4 +np.float32,0xbf6bd05e,0xbf95e3fe,4 +np.float32,0xbd1fa6e0,0xbd1fb13b,4 +np.float32,0xbeb82f7a,0xbebc68b1,4 +np.float32,0xbd92aaf8,0xbd92cb23,4 +np.float32,0xbe073a54,0xbe079fbf,4 +np.float32,0xbf198655,0xbf24a468,4 +np.float32,0x3f62f6d8,0x3f8b81ba,4 +np.float32,0x3eef4310,0x3ef8f4f9,4 +np.float32,0x3e8988e0,0x3e8b3eae,4 +np.float32,0xbf3ddba5,0xbf55e367,4 +np.float32,0x3dc6d2e0,0x3dc7232b,4 +np.float32,0xbf31040e,0xbf437601,4 +np.float32,0x3f1bb74a,0x3f276442,4 +np.float32,0xbf0075d2,0xbf0692b3,4 +np.float32,0xbf606ce0,0xbf88d0ff,4 +np.float32,0xbf083856,0xbf0fa39d,4 +np.float32,0xbdb25b20,0xbdb2950a,4 +np.float32,0xbeb86860,0xbebca5ae,4 +np.float32,0x3de83160,0x3de8b176,4 +np.float32,0xbf33a98f,0xbf472664,4 +np.float32,0x3e7795f8,0x3e7a1058,4 +np.float32,0x3e0ca6f8,0x3e0d192a,4 +np.float32,0xbf1aef60,0xbf2668c3,4 +np.float32,0xbda53b58,0xbda5695e,4 +np.float32,0xbf178096,0xbf221fc5,4 +np.float32,0xbf0a4159,0xbf120ccf,4 +np.float32,0x3f7bca36,0x3fb1d0df,4 +np.float32,0xbef94360,0xbf022b26,4 +np.float32,0xbef16f36,0xbefb6ad6,4 +np.float32,0x3f53a7e6,0x3f792e25,4 +np.float32,0xbf7c536f,0xbfb35993,4 +np.float32,0xbe84aaa0,0xbe8632a2,4 +np.float32,0x3ecb3998,0x3ed0fab9,4 +np.float32,0x3f539304,0x3f79090a,4 +np.float32,0xbf3c7816,0xbf53d3b3,4 +np.float32,0xbe7a387c,0xbe7cc7b7,4 +np.float32,0x3f7000e4,0x3f9b92b1,4 +np.float32,0x3e08fd70,0x3e0966e5,4 +np.float32,0x3db97ba0,0x3db9bcc8,4 +np.float32,0xbee99056,0xbef2886a,4 +np.float32,0xbf0668da,0xbf0d819e,4 +np.float32,0x3e58a408,0x3e5a4a51,4 +np.float32,0x3f3440b8,0x3f47faed,4 +np.float32,0xbf19a2ce,0xbf24c7ff,4 +np.float32,0xbe75e990,0xbe7856ee,4 +np.float32,0x3f3c865c,0x3f53e8cb,4 +np.float32,0x3e5e03d0,0x3e5fcac9,4 +np.float32,0x3edb8e34,0x3ee2e932,4 +np.float32,0xbf7e1f5f,0xbfb98ce4,4 +np.float32,0xbf7372ff,0xbfa0d0ae,4 +np.float32,0xbf3ee850,0xbf577548,4 +np.float32,0x3ef19658,0x3efb9737,4 +np.float32,0xbe8088de,0xbe81ecaf,4 +np.float32,0x800000,0x800000,4 +np.float32,0xbde39dd8,0xbde4167a,4 +np.float32,0xbf065d7a,0xbf0d7441,4 +np.float32,0xbde52c78,0xbde5a79b,4 +np.float32,0xbe3a28c0,0xbe3b333e,4 +np.float32,0x3f6e8b3c,0x3f998516,4 +np.float32,0x3f3485c2,0x3f485c39,4 +np.float32,0x3e6f2c68,0x3e71673e,4 +np.float32,0xbe4ec9cc,0xbe50385e,4 +np.float32,0xbf1c3bb0,0xbf280b39,4 +np.float32,0x3ec8ea18,0x3ece76f7,4 +np.float32,0x3e26b5f8,0x3e2774c9,4 +np.float32,0x3e1e4a38,0x3e1eed5c,4 +np.float32,0xbee7a106,0xbef05c6b,4 +np.float32,0xbf305928,0xbf4289d8,4 +np.float32,0x3f0c431c,0x3f147118,4 +np.float32,0xbe57ba6c,0xbe595b52,4 +np.float32,0x3eabc9cc,0x3eaf2fc7,4 +np.float32,0xbef1ed24,0xbefbf9ae,4 +np.float32,0xbf61b576,0xbf8a29cc,4 +np.float32,0x3e9c1ff4,0x3e9ea6cb,4 +np.float32,0x3f6c53b2,0x3f968dbe,4 +np.float32,0x3e2d1b80,0x3e2df156,4 +np.float32,0x3e9f2f70,0x3ea1de4a,4 +np.float32,0xbf5861ee,0xbf80e61a,4 +np.float32,0x3f429144,0x3f5d0505,4 +np.float32,0x3e235cc8,0x3e24103e,4 +np.float32,0xbf354879,0xbf496f6a,4 +np.float32,0xbf20a146,0xbf2da447,4 +np.float32,0x3e8d8968,0x3e8f6785,4 +np.float32,0x3f3fbc94,0x3f58b4c1,4 +np.float32,0x3f2c5f50,0x3f3d1b9f,4 +np.float32,0x3f7bf0f8,0x3fb23d23,4 +np.float32,0xbf218282,0xbf2ec60f,4 +np.float32,0x3f2545aa,0x3f33a93e,4 +np.float32,0xbf4b17be,0xbf6a9018,4 +np.float32,0xbb9df700,0xbb9df728,4 +np.float32,0x3f685d54,0x3f91a06c,4 +np.float32,0x3efdfe2c,0x3f04e24c,4 +np.float32,0x3ef1c5a0,0x3efbccd9,4 +np.float32,0xbf41d731,0xbf5be76e,4 +np.float32,0x3ebd1360,0x3ec1a919,4 +np.float32,0xbf706bd4,0xbf9c2d58,4 +np.float32,0x3ea525e4,0x3ea8279d,4 +np.float32,0xbe51f1b0,0xbe537186,4 +np.float32,0x3f5e8cf6,0x3f86e4f4,4 +np.float32,0xbdad2520,0xbdad5a19,4 +np.float32,0xbf5c5704,0xbf84b0e5,4 +np.float32,0x3f47b54e,0x3f65145e,4 +np.float32,0x3eb4fc78,0x3eb8fc0c,4 +np.float32,0x3dca1450,0x3dca68a1,4 +np.float32,0x3eb02a74,0x3eb3d757,4 +np.float32,0x3f74ae6a,0x3fa2db75,4 +np.float32,0x3f800000,0x3fc90fdb,4 +np.float32,0xbdb46a00,0xbdb4a5f2,4 +np.float32,0xbe9f2ba6,0xbea1da4e,4 +np.float32,0x3f0afa70,0x3f12e8f7,4 +np.float32,0xbf677b20,0xbf909547,4 +np.float32,0x3eff9188,0x3f05cacf,4 +np.float32,0x3f720562,0x3f9e911b,4 +np.float32,0xbf7180d8,0xbf9dc794,4 +np.float32,0xbee7d076,0xbef0919d,4 +np.float32,0x3f0432ce,0x3f0aea95,4 +np.float32,0x3f3bc4c8,0x3f52cb54,4 +np.float32,0xbea72f30,0xbeaa4ebe,4 +np.float32,0x3e90ed00,0x3e92ef33,4 +np.float32,0xbda63670,0xbda6654a,4 +np.float32,0xbf5a6f85,0xbf82d7e0,4 +np.float32,0x3e6e8808,0x3e70be34,4 +np.float32,0xbf4f3822,0xbf71768f,4 +np.float32,0x3e5c8a68,0x3e5e483f,4 +np.float32,0xbf0669d4,0xbf0d82c4,4 +np.float32,0xbf79f77c,0xbfad37b0,4 +np.float32,0x3f25c82c,0x3f345453,4 +np.float32,0x3f1b2948,0x3f26b188,4 +np.float32,0x3ef7e288,0x3f016159,4 +np.float32,0x3c274280,0x3c27433e,4 +np.float32,0xbf4c8fa0,0xbf6cfd5e,4 +np.float32,0x3ea4ccb4,0x3ea7c966,4 +np.float32,0xbf7b157e,0xbfafefca,4 +np.float32,0xbee4c2b0,0xbeed264d,4 +np.float32,0xbc1fd640,0xbc1fd6e6,4 +np.float32,0x3e892308,0x3e8ad4f6,4 +np.float32,0xbf3f69c7,0xbf5837ed,4 +np.float32,0x3ec879e8,0x3ecdfd05,4 +np.float32,0x3f07a8c6,0x3f0efa30,4 +np.float32,0x3f67b880,0x3f90dd4d,4 +np.float32,0x3e8a11c8,0x3e8bccd5,4 +np.float32,0x3f7df6fc,0x3fb8e935,4 +np.float32,0xbef3e498,0xbefe3599,4 +np.float32,0xbf18ad7d,0xbf2395d8,4 +np.float32,0x3f2bce74,0x3f3c57f5,4 +np.float32,0xbf38086e,0xbf4d5c2e,4 +np.float32,0x3f772d7a,0x3fa75c35,4 +np.float32,0xbf3b6e24,0xbf524c00,4 +np.float32,0xbdd39108,0xbdd3f1d4,4 +np.float32,0xbf691f6b,0xbf928974,4 +np.float32,0x3f146188,0x3f1e45e4,4 +np.float32,0xbf56045b,0xbf7d6e03,4 +np.float32,0xbf4b2ee4,0xbf6ab622,4 +np.float32,0xbf3fa3f6,0xbf588f9d,4 +np.float32,0x3f127bb0,0x3f1bf398,4 +np.float32,0x3ed858a0,0x3edf5d3e,4 +np.float32,0xbd6de3b0,0xbd6e05fa,4 +np.float32,0xbecc662c,0xbed24261,4 +np.float32,0xbd6791d0,0xbd67b170,4 +np.float32,0xbf146016,0xbf1e441e,4 +np.float32,0xbf61f04c,0xbf8a6841,4 +np.float32,0xbe7f16d0,0xbe80e6e7,4 +np.float32,0xbebf93e6,0xbec45b10,4 +np.float32,0xbe8a59fc,0xbe8c17d1,4 +np.float32,0xbebc7a0c,0xbec10426,4 +np.float32,0xbf2a682e,0xbf3a7649,4 +np.float32,0xbe18d0cc,0xbe19637b,4 +np.float32,0x3d7f5100,0x3d7f7b66,4 +np.float32,0xbf10f5fa,0xbf1a1998,4 +np.float32,0x3f25e956,0x3f347fdc,4 +np.float32,0x3e6e8658,0x3e70bc78,4 +np.float32,0x3f21a5de,0x3f2ef3a5,4 +np.float32,0xbf4e71d4,0xbf702607,4 +np.float32,0xbf49d6b6,0xbf688380,4 +np.float32,0xbdb729c0,0xbdb7687c,4 +np.float32,0xbf63e1f4,0xbf8c81c7,4 +np.float32,0x3dda6cb0,0x3ddad73e,4 +np.float32,0x3ee1bc40,0x3ee9c612,4 +np.float32,0x3ebdb5f8,0x3ec2581b,4 +np.float32,0x3f7d9576,0x3fb77646,4 +np.float32,0x3e087140,0x3e08d971,4 +np.float64,0xbfdba523cfb74a48,0xbfdc960ddd9c0506,1 +np.float64,0x3fb51773622a2ee0,0x3fb51d93f77089d5,1 +np.float64,0x3fc839f6d33073f0,0x3fc85f9a47dfe8e6,1 +np.float64,0xbfecba2d82f9745b,0xbff1d55416c6c993,1 +np.float64,0x3fd520fe47aa41fc,0x3fd58867f1179634,1 +np.float64,0x3fe1b369c56366d4,0x3fe2c1ac9dd2c45a,1 +np.float64,0xbfec25a7cd784b50,0xbff133417389b12d,1 +np.float64,0xbfd286342ea50c68,0xbfd2cb0bca22e66d,1 +np.float64,0x3fd5f6fe5eabedfc,0x3fd66bad16680d08,1 +np.float64,0xbfe863a87570c751,0xbfebbb9b637eb6dc,1 +np.float64,0x3fc97f5b4d32feb8,0x3fc9ab5066d8eaec,1 +np.float64,0xbfcb667af936ccf4,0xbfcb9d3017047a1d,1 +np.float64,0xbfd1b7b9afa36f74,0xbfd1f3c175706154,1 +np.float64,0x3fef97385b7f2e70,0x3ff6922a1a6c709f,1 +np.float64,0xbfd13e4205a27c84,0xbfd1757c993cdb74,1 +np.float64,0xbfd18d88aca31b12,0xbfd1c7dd75068f7d,1 +np.float64,0x3fe040ce0f60819c,0x3fe10c59d2a27089,1 +np.float64,0xbfddc7deddbb8fbe,0xbfdef9de5baecdda,1 +np.float64,0xbfcf6e96193edd2c,0xbfcfc1bb7396b9a3,1 +np.float64,0x3fd544f494aa89e8,0x3fd5ae850e2b37dd,1 +np.float64,0x3fe15b381fe2b670,0x3fe25841c7bfe2af,1 +np.float64,0xbfde793420bcf268,0xbfdfc2ddc7b4a341,1 +np.float64,0x3fd0d5db30a1abb8,0x3fd1092cef4aa4fb,1 +np.float64,0x3fe386a08c670d42,0x3fe50059bbf7f491,1 +np.float64,0xbfe0aae3a96155c8,0xbfe1880ef13e95ce,1 +np.float64,0xbfe80eeb03f01dd6,0xbfeb39e9f107e944,1 +np.float64,0xbfd531af3caa635e,0xbfd59a178f17552a,1 +np.float64,0x3fcced14ab39da28,0x3fcd2d9a806337ef,1 +np.float64,0xbfdb4c71bcb698e4,0xbfdc33d9d9daf708,1 +np.float64,0xbfde7375ecbce6ec,0xbfdfbc5611bc48ff,1 +np.float64,0x3fecc5707a798ae0,0x3ff1e2268d778017,1 +np.float64,0x3fe8f210a1f1e422,0x3fec9b3349a5baa2,1 +np.float64,0x3fe357f9b8e6aff4,0x3fe4c5a0b89a9228,1 +np.float64,0xbfe0f863b761f0c8,0xbfe1e3283494c3d4,1 +np.float64,0x3fd017c395a02f88,0x3fd044761f2f4a66,1 +np.float64,0x3febeb4746f7d68e,0x3ff0f6b955e7feb6,1 +np.float64,0xbfbdaaeeae3b55e0,0xbfbdbc0950109261,1 +np.float64,0xbfea013095f40261,0xbfee5b8fe8ad8593,1 +np.float64,0xbfe9f87b7973f0f7,0xbfee4ca3a8438d72,1 +np.float64,0x3fd37f77cfa6fef0,0x3fd3d018c825f057,1 +np.float64,0x3fb0799cee20f340,0x3fb07c879e7cb63f,1 +np.float64,0xbfdcfd581cb9fab0,0xbfde15e35314b52d,1 +np.float64,0xbfd49781b8a92f04,0xbfd4f6fa1516fefc,1 +np.float64,0x3fb3fcb6d627f970,0x3fb401ed44a713a8,1 +np.float64,0x3fd5737ef8aae6fc,0x3fd5dfe42d4416c7,1 +np.float64,0x7ff4000000000000,0x7ffc000000000000,1 +np.float64,0xbfe56ae780ead5cf,0xbfe776ea5721b900,1 +np.float64,0x3fd4567786a8acf0,0x3fd4b255421c161a,1 +np.float64,0x3fef6fb58cfedf6c,0x3ff62012dfcf0a33,1 +np.float64,0xbfd1dbcd3da3b79a,0xbfd2194fd628f74d,1 +np.float64,0x3fd9350016b26a00,0x3fd9e8b01eb023e9,1 +np.float64,0xbfe4fb3a69e9f675,0xbfe6e1d2c9eca56c,1 +np.float64,0x3fe9fe0f73f3fc1e,0x3fee5631cfd39772,1 +np.float64,0xbfd51c1bc6aa3838,0xbfd5833b3bd53543,1 +np.float64,0x3fc64158e12c82b0,0x3fc65e7352f237d7,1 +np.float64,0x3fd0d8ee1ba1b1dc,0x3fd10c5c99a16f0e,1 +np.float64,0x3fd5554e15aaaa9c,0x3fd5bfdb9ec9e873,1 +np.float64,0x3fe61ce209ec39c4,0x3fe869bc4c28437d,1 +np.float64,0xbfe4e42c8c69c859,0xbfe6c356dac7e2db,1 +np.float64,0xbfe157021062ae04,0xbfe2533ed39f4212,1 +np.float64,0x3fe844066cf0880c,0x3feb8aea0b7bd0a4,1 +np.float64,0x3fe55016586aa02c,0x3fe752e4b2a67b9f,1 +np.float64,0x3fdabce619b579cc,0x3fdb95809bc789d9,1 +np.float64,0x3fee03bae37c0776,0x3ff3778ba38ca882,1 +np.float64,0xbfeb2f5844f65eb0,0xbff03dd1b767d3c8,1 +np.float64,0x3fedcfdbaffb9fb8,0x3ff32e81d0639164,1 +np.float64,0x3fe06fc63ee0df8c,0x3fe142fc27f92eaf,1 +np.float64,0x3fe7ce90fd6f9d22,0x3fead8f832bbbf5d,1 +np.float64,0xbfbc0015ce380028,0xbfbc0e7470e06e86,1 +np.float64,0xbfe9b3de90f367bd,0xbfedd857931dfc6b,1 +np.float64,0xbfcb588f5936b120,0xbfcb8ef0124a4f21,1 +np.float64,0x3f8d376a503a6f00,0x3f8d37ab43e7988d,1 +np.float64,0xbfdb123a40b62474,0xbfdbf38b6cf5db92,1 +np.float64,0xbfee7da6be7cfb4e,0xbff433042cd9d5eb,1 +np.float64,0xbfc4c9e01b2993c0,0xbfc4e18dbafe37ef,1 +np.float64,0x3fedd42faffba860,0x3ff334790cd18a19,1 +np.float64,0x3fe9cdf772f39bee,0x3fee044f87b856ab,1 +np.float64,0x3fe0245881e048b2,0x3fe0eb5a1f739c8d,1 +np.float64,0xbfe4712bd9e8e258,0xbfe62cb3d82034aa,1 +np.float64,0x3fe9a16b46f342d6,0x3fedb972b2542551,1 +np.float64,0xbfe57ab4536af568,0xbfe78c34b03569c2,1 +np.float64,0x3fb6d6ceb22dada0,0x3fb6de976964d6dd,1 +np.float64,0x3fc3ac23a3275848,0x3fc3c02de53919b8,1 +np.float64,0xbfccb531e7396a64,0xbfccf43ec69f6281,1 +np.float64,0xbfd2f07fc8a5e100,0xbfd33a35a8c41b62,1 +np.float64,0xbfe3e5dd04e7cbba,0xbfe57940157c27ba,1 +np.float64,0x3feefe40757dfc80,0x3ff51bc72b846af6,1 +np.float64,0x8000000000000001,0x8000000000000001,1 +np.float64,0x3fecb7b766796f6e,0x3ff1d28972a0fc7e,1 +np.float64,0xbfea1bf1357437e2,0xbfee89a6532bfd71,1 +np.float64,0xbfca3983b7347308,0xbfca696463b791ef,1 +np.float64,0x10000000000000,0x10000000000000,1 +np.float64,0xbf886b45d030d680,0xbf886b6bbc04314b,1 +np.float64,0x3fd5224bb5aa4498,0x3fd589c92e82218f,1 +np.float64,0xbfec799874f8f331,0xbff18d5158b8e640,1 +np.float64,0xbf88124410302480,0xbf88126863350a16,1 +np.float64,0xbfe37feaaa66ffd6,0xbfe4f7e24382e79d,1 +np.float64,0x3fd777eca1aeefd8,0x3fd8076ead6d55dc,1 +np.float64,0x3fecaaeb3af955d6,0x3ff1c4159fa3e965,1 +np.float64,0xbfeb81e4e6f703ca,0xbff08d4e4c77fada,1 +np.float64,0xbfd7d0a0edafa142,0xbfd866e37010312e,1 +np.float64,0x3feda48c00fb4918,0x3ff2f3fd33c36307,1 +np.float64,0x3feb87ecc4770fda,0x3ff09336e490deda,1 +np.float64,0xbfefd78ad27faf16,0xbff78abbafb50ac1,1 +np.float64,0x3fe58e918c6b1d24,0x3fe7a70b38cbf016,1 +np.float64,0x3fda163b95b42c78,0x3fdade86b88ba4ee,1 +np.float64,0x3fe8fc1aaf71f836,0x3fecab3f93b59df5,1 +np.float64,0xbf8de56f903bcac0,0xbf8de5b527cec797,1 +np.float64,0xbfec112db2f8225b,0xbff11dd648de706f,1 +np.float64,0x3fc3214713264290,0x3fc333b1c862f7d0,1 +np.float64,0xbfeb5e5836f6bcb0,0xbff06ac364b49177,1 +np.float64,0x3fc23d9777247b30,0x3fc24d8ae3bcb615,1 +np.float64,0xbfdf0eed65be1dda,0xbfe036cea9b9dfb6,1 +np.float64,0xbfb2d5c85a25ab90,0xbfb2da24bb409ff3,1 +np.float64,0xbfecdda0c3f9bb42,0xbff1fdf94fc6e89e,1 +np.float64,0x3fdfe79154bfcf24,0x3fe0b338e0476a9d,1 +np.float64,0xbfd712ac6bae2558,0xbfd79abde21f287b,1 +np.float64,0x3fea3f148a747e2a,0x3feec6bed9d4fa04,1 +np.float64,0x3fd4879e4ca90f3c,0x3fd4e632fa4e2edd,1 +np.float64,0x3fe9137a9e7226f6,0x3fecd0c441088d6a,1 +np.float64,0xbfc75bf4ef2eb7e8,0xbfc77da8347d742d,1 +np.float64,0xbfd94090a0b28122,0xbfd9f5458816ed5a,1 +np.float64,0x3fde439cbcbc8738,0x3fdf85fbf496b61f,1 +np.float64,0xbfe18bacdce3175a,0xbfe29210e01237f7,1 +np.float64,0xbfd58ec413ab1d88,0xbfd5fcd838f0a934,1 +np.float64,0xbfeae5af2d75cb5e,0xbfeff1de1b4a06be,1 +np.float64,0x3fb64d1a162c9a30,0x3fb65458fb831354,1 +np.float64,0x3fc18b1e15231640,0x3fc1994c6ffd7a6a,1 +np.float64,0xbfd7b881bcaf7104,0xbfd84ce89a9ee8c7,1 +np.float64,0x3feb916a40f722d4,0x3ff09c8aa851d7c4,1 +np.float64,0x3fdab5fbb5b56bf8,0x3fdb8de43961bbde,1 +np.float64,0x3fe4f35402e9e6a8,0x3fe6d75dc5082894,1 +np.float64,0x3fe2fdb2e5e5fb66,0x3fe454e32a5d2182,1 +np.float64,0x3fe8607195f0c0e4,0x3febb6a4c3bf6a5c,1 +np.float64,0x3fd543ca9aaa8794,0x3fd5ad49203ae572,1 +np.float64,0x3fe8e05ca1f1c0ba,0x3fec7eff123dcc58,1 +np.float64,0x3fe298b6ca65316e,0x3fe3d81d2927c4dd,1 +np.float64,0x3fcfecea733fd9d8,0x3fd0220f1d0faf78,1 +np.float64,0xbfe2e739f065ce74,0xbfe439004e73772a,1 +np.float64,0xbfd1ae6b82a35cd8,0xbfd1ea129a5ee756,1 +np.float64,0xbfeb7edff576fdc0,0xbff08a5a638b8a8b,1 +np.float64,0x3fe5b645ff6b6c8c,0x3fe7dcee1faefe3f,1 +np.float64,0xbfd478427ba8f084,0xbfd4d5fc7c239e60,1 +np.float64,0xbfe39904e3e7320a,0xbfe517972b30b1e5,1 +np.float64,0xbfd3b75b6ba76eb6,0xbfd40acf20a6e074,1 +np.float64,0x3fd596267aab2c4c,0x3fd604b01faeaf75,1 +np.float64,0x3fe134463762688c,0x3fe229fc36784a72,1 +np.float64,0x3fd25dadf7a4bb5c,0x3fd2a0b9e04ea060,1 +np.float64,0xbfc05d3e0b20ba7c,0xbfc068bd2bb9966f,1 +np.float64,0x3f8cf517b039ea00,0x3f8cf556ed74b163,1 +np.float64,0x3fda87361cb50e6c,0x3fdb5a75af897e7f,1 +np.float64,0x3fe53e1926ea7c32,0x3fe73acf01b8ff31,1 +np.float64,0x3fe2e94857e5d290,0x3fe43b8cc820f9c7,1 +np.float64,0x3fd81fe6acb03fcc,0x3fd8bc623c0068cf,1 +np.float64,0xbfddf662c3bbecc6,0xbfdf2e76dc90786e,1 +np.float64,0x3fece174fbf9c2ea,0x3ff2026a1a889580,1 +np.float64,0xbfdc83c5b8b9078c,0xbfdd8dcf6ee3b7da,1 +np.float64,0x3feaf5448f75ea8a,0x3ff0075b108bcd0d,1 +np.float64,0xbfebf32f7ef7e65f,0xbff0fed42aaa826a,1 +np.float64,0x3fe389e5e8e713cc,0x3fe5047ade055ccb,1 +np.float64,0x3f635cdcc026ba00,0x3f635cddeea082ce,1 +np.float64,0x3fae580f543cb020,0x3fae5c9d5108a796,1 +np.float64,0x3fec9fafce793f60,0x3ff1b77bec654f00,1 +np.float64,0x3fb19d226e233a40,0x3fb1a0b32531f7ee,1 +np.float64,0xbfdf9a71e7bf34e4,0xbfe086cef88626c7,1 +np.float64,0x8010000000000000,0x8010000000000000,1 +np.float64,0xbfef170ba2fe2e17,0xbff54ed4675f5b8a,1 +np.float64,0xbfcc6e2f8f38dc60,0xbfccab65fc34d183,1 +np.float64,0x3fee756c4bfcead8,0x3ff4258782c137e6,1 +np.float64,0xbfd461c218a8c384,0xbfd4be3e391f0ff4,1 +np.float64,0xbfe3b64686e76c8d,0xbfe53caa16d6c90f,1 +np.float64,0xbfc1c65d8d238cbc,0xbfc1d51e58f82403,1 +np.float64,0x3fe6e06c63edc0d8,0x3fe97cb832eeb6a2,1 +np.float64,0xbfc9fc20b933f840,0xbfca2ab004312d85,1 +np.float64,0xbfe29aa6df65354e,0xbfe3da7ecf3ba466,1 +np.float64,0x3fea4df7d1749bf0,0x3feee0d448bd4746,1 +np.float64,0xbfedec6161fbd8c3,0xbff3563e1d943aa2,1 +np.float64,0x3fdb6f0437b6de08,0x3fdc5a1888b1213d,1 +np.float64,0xbfe270cbd3e4e198,0xbfe3a72ac27a0b0c,1 +np.float64,0xbfdfff8068bfff00,0xbfe0c1088e3b8983,1 +np.float64,0xbfd28edbe6a51db8,0xbfd2d416c8ed363e,1 +np.float64,0xbfb4e35f9229c6c0,0xbfb4e9531d2a737f,1 +np.float64,0xbfee6727e97cce50,0xbff40e7717576e46,1 +np.float64,0xbfddb5fbddbb6bf8,0xbfdee5aad78f5361,1 +np.float64,0xbfdf9d3e9dbf3a7e,0xbfe0886b191f2957,1 +np.float64,0x3fa57e77042afce0,0x3fa5801518ea9342,1 +np.float64,0x3f95c4e4882b89c0,0x3f95c55003c8e714,1 +np.float64,0x3fd9b10f61b36220,0x3fda6fe5d635a8aa,1 +np.float64,0xbfe2973411652e68,0xbfe3d641fe9885fd,1 +np.float64,0xbfee87bd5a7d0f7b,0xbff443bea81b3fff,1 +np.float64,0x3f9ea064c83d40c0,0x3f9ea19025085b2f,1 +np.float64,0xbfe4b823dfe97048,0xbfe689623d30dc75,1 +np.float64,0xbfa06a326c20d460,0xbfa06aeacbcd3eb8,1 +np.float64,0x3fe1e5c4c1e3cb8a,0x3fe2fe44b822f20e,1 +np.float64,0x3f99dafaa833b600,0x3f99dbaec10a1a0a,1 +np.float64,0xbfed7cb3877af967,0xbff2bfe9e556aaf9,1 +np.float64,0x3fd604f2e2ac09e4,0x3fd67a89408ce6ba,1 +np.float64,0x3fec57b60f78af6c,0x3ff16881f46d60f7,1 +np.float64,0xbfea2e3a17745c74,0xbfeea95c7190fd42,1 +np.float64,0xbfd60a7c37ac14f8,0xbfd6806ed642de35,1 +np.float64,0xbfe544b9726a8973,0xbfe743ac399d81d7,1 +np.float64,0xbfd13520faa26a42,0xbfd16c02034a8fe0,1 +np.float64,0xbfea9ea59ff53d4b,0xbfef70538ee12e00,1 +np.float64,0x3fd66633f8accc68,0x3fd6e23c13ab0e9e,1 +np.float64,0xbfe4071bd3e80e38,0xbfe5a3c9ba897d81,1 +np.float64,0xbfbe1659fa3c2cb0,0xbfbe2831d4fed196,1 +np.float64,0xbfd3312777a6624e,0xbfd37df09b9baeba,1 +np.float64,0x3fd13997caa27330,0x3fd170a4900c8907,1 +np.float64,0xbfe7cbc235ef9784,0xbfead4c4d6cbf129,1 +np.float64,0xbfe1456571628acb,0xbfe23e4ec768c8e2,1 +np.float64,0xbfedf1a044fbe340,0xbff35da96773e176,1 +np.float64,0x3fce38b1553c7160,0x3fce8270709774f9,1 +np.float64,0xbfecb01761f9602f,0xbff1c9e9d382f1f8,1 +np.float64,0xbfe0a03560e1406b,0xbfe17b8d5a1ca662,1 +np.float64,0x3fe50f37cbea1e70,0x3fe6fc55e1ae7da6,1 +np.float64,0xbfe12d64a0625aca,0xbfe221d3a7834e43,1 +np.float64,0xbf6fb288403f6500,0xbf6fb28d6f389db6,1 +np.float64,0x3fda831765b50630,0x3fdb55eecae58ca9,1 +np.float64,0x3fe1a0fe4c6341fc,0x3fe2ab9564304425,1 +np.float64,0xbfef2678a77e4cf1,0xbff56ff42b2797bb,1 +np.float64,0xbfab269c1c364d40,0xbfab29df1cd48779,1 +np.float64,0x3fe8ec82a271d906,0x3fec92567d7a6675,1 +np.float64,0xbfc235115f246a24,0xbfc244ee567682ea,1 +np.float64,0x3feef5bf8d7deb80,0x3ff50ad4875ee9bd,1 +np.float64,0x3fe768b5486ed16a,0x3fea421356160e65,1 +np.float64,0xbfd4255684a84aae,0xbfd47e8baf7ec7f6,1 +np.float64,0x3fc7f67f2b2fed00,0x3fc81ae83cf92dd5,1 +np.float64,0x3fe9b1b19a736364,0x3fedd4b0e24ee741,1 +np.float64,0x3fb27eb9e624fd70,0x3fb282dacd89ce28,1 +np.float64,0xbfd490b710a9216e,0xbfd4efcdeb213458,1 +np.float64,0xbfd1347b2ca268f6,0xbfd16b55dece2d38,1 +np.float64,0x3fc6a5668d2d4ad0,0x3fc6c41452c0c087,1 +np.float64,0xbfca7b209f34f640,0xbfcaac710486f6bd,1 +np.float64,0x3fc23a1a47247438,0x3fc24a047fd4c27a,1 +np.float64,0x3fdb1413a8b62828,0x3fdbf595e2d994bc,1 +np.float64,0xbfea69b396f4d367,0xbfef11bdd2b0709a,1 +np.float64,0x3fd14c9958a29934,0x3fd1846161b10422,1 +np.float64,0xbfe205f44be40be8,0xbfe325283aa3c6a8,1 +np.float64,0x3fecd03c9ef9a07a,0x3ff1ee85aaf52a01,1 +np.float64,0x3fe34281d7e68504,0x3fe4aab63e6de816,1 +np.float64,0xbfe120e2376241c4,0xbfe213023ab03939,1 +np.float64,0xbfe951edc4f2a3dc,0xbfed3615e38576f8,1 +np.float64,0x3fe5a2286f6b4450,0x3fe7c196e0ec10ed,1 +np.float64,0xbfed7a3e1f7af47c,0xbff2bcc0793555d2,1 +np.float64,0x3fe050274960a04e,0x3fe11e2e256ea5cc,1 +np.float64,0xbfcfa71f653f4e40,0xbfcffc11483d6a06,1 +np.float64,0x3f6ead2e403d5a00,0x3f6ead32f314c052,1 +np.float64,0x3fe3a2a026674540,0x3fe523bfe085f6ec,1 +np.float64,0xbfe294a62e65294c,0xbfe3d31ebd0b4ca2,1 +np.float64,0xbfb4894d06291298,0xbfb48ef4b8e256b8,1 +np.float64,0xbfc0c042c1218084,0xbfc0cc98ac2767c4,1 +np.float64,0xbfc6a32cb52d4658,0xbfc6c1d1597ed06b,1 +np.float64,0xbfd30f7777a61eee,0xbfd35aa39fee34eb,1 +np.float64,0x3fe7fc2c2eeff858,0x3feb1d8a558b5537,1 +np.float64,0x7fefffffffffffff,0x7ff8000000000000,1 +np.float64,0xbfdadf917bb5bf22,0xbfdbbbae9a9f67a0,1 +np.float64,0xbfcf0395e13e072c,0xbfcf5366015f7362,1 +np.float64,0xbfe8644c9170c899,0xbfebbc98e74a227d,1 +np.float64,0x3fc3b2d8e52765b0,0x3fc3c6f7d44cffaa,1 +np.float64,0x3fc57407b92ae810,0x3fc58e12ccdd47a1,1 +np.float64,0x3fd56a560daad4ac,0x3fd5d62b8dfcc058,1 +np.float64,0x3fd595deefab2bbc,0x3fd6046420b2f79b,1 +np.float64,0xbfd5360f50aa6c1e,0xbfd59ebaacd815b8,1 +np.float64,0x3fdfb6aababf6d54,0x3fe0970b8aac9f61,1 +np.float64,0x3ff0000000000000,0x3ff921fb54442d18,1 +np.float64,0xbfeb3a8958f67513,0xbff04872e8278c79,1 +np.float64,0x3f9e1ea6683c3d40,0x3f9e1fc326186705,1 +np.float64,0x3fe6b6d5986d6dac,0x3fe94175bd60b19d,1 +np.float64,0xbfee4d90b77c9b21,0xbff3e60e9134edc2,1 +np.float64,0x3fd806ce0cb00d9c,0x3fd8a14c4855a8f5,1 +np.float64,0x3fd54acc75aa9598,0x3fd5b4b72fcbb5df,1 +np.float64,0xbfe59761f16b2ec4,0xbfe7b2fa5d0244ac,1 +np.float64,0xbfcd4fa3513a9f48,0xbfcd92d0814a5383,1 +np.float64,0xbfdc827523b904ea,0xbfdd8c577b53053c,1 +np.float64,0xbfd4bb7f34a976fe,0xbfd51d00d9a99360,1 +np.float64,0xbfe818bc87f03179,0xbfeb48d1ea0199c5,1 +np.float64,0xbfa8a2e15c3145c0,0xbfa8a5510ba0e45c,1 +np.float64,0xbfb6d15f422da2c0,0xbfb6d922689da015,1 +np.float64,0x3fcd04eaab3a09d8,0x3fcd46131746ef08,1 +np.float64,0x3fcfb5cfbb3f6ba0,0x3fd0059d308237f3,1 +np.float64,0x3fe8dcf609f1b9ec,0x3fec7997973010b6,1 +np.float64,0xbfdf1834d7be306a,0xbfe03c1d4e2b48f0,1 +np.float64,0x3fee82ae50fd055c,0x3ff43b545066fe1a,1 +np.float64,0xbfde039c08bc0738,0xbfdf3d6ed4d2ee5c,1 +np.float64,0x3fec07389bf80e72,0x3ff1137ed0acd161,1 +np.float64,0xbfef44c010fe8980,0xbff5b488ad22a4c5,1 +np.float64,0x3f76e722e02dce00,0x3f76e72ab2759d88,1 +np.float64,0xbfcaa9e6053553cc,0xbfcadc41125fca93,1 +np.float64,0x3fed6088147ac110,0x3ff29c06c4ef35fc,1 +np.float64,0x3fd32bd836a657b0,0x3fd3785fdb75909f,1 +np.float64,0xbfeedbb1d97db764,0xbff4d87f6c82a93c,1 +np.float64,0xbfe40f31d5e81e64,0xbfe5ae292cf258a2,1 +np.float64,0x7ff8000000000000,0x7ff8000000000000,1 +np.float64,0xbfeb2b25bc76564c,0xbff039d81388550c,1 +np.float64,0x3fec5008fa78a012,0x3ff1604195801da3,1 +np.float64,0x3fce2d4f293c5aa0,0x3fce76b99c2db4da,1 +np.float64,0xbfdc435412b886a8,0xbfdd45e7b7813f1e,1 +np.float64,0x3fdf2c9d06be593c,0x3fe047cb03c141b6,1 +np.float64,0x3fddefc61ebbdf8c,0x3fdf26fb8fad9fae,1 +np.float64,0x3fab50218436a040,0x3fab537395eaf3bb,1 +np.float64,0xbfd5b95a8fab72b6,0xbfd62a191a59343a,1 +np.float64,0x3fdbf803b4b7f008,0x3fdcf211578e98c3,1 +np.float64,0xbfec8c255979184b,0xbff1a1bee108ed30,1 +np.float64,0x3fe33cdaffe679b6,0x3fe4a3a318cd994f,1 +np.float64,0x3fd8cf585cb19eb0,0x3fd97a408bf3c38c,1 +np.float64,0x3fe919dde07233bc,0x3fecdb0ea13a2455,1 +np.float64,0xbfd5ba35e4ab746c,0xbfd62b024805542d,1 +np.float64,0x3fd2f933e7a5f268,0x3fd343527565e97c,1 +np.float64,0xbfe5b9f8ddeb73f2,0xbfe7e1f772c3e438,1 +np.float64,0x3fe843cd92f0879c,0x3feb8a92d68eae3e,1 +np.float64,0xbfd096b234a12d64,0xbfd0c7beca2c6605,1 +np.float64,0xbfef3363da7e66c8,0xbff58c98dde6c27c,1 +np.float64,0x3fd51b01ddaa3604,0x3fd582109d89ead1,1 +np.float64,0x3fea0f10ff741e22,0x3fee736c2d2a2067,1 +np.float64,0x3fc276e7b724edd0,0x3fc28774520bc6d4,1 +np.float64,0xbfef9abc9f7f3579,0xbff69d49762b1889,1 +np.float64,0x3fe1539ec0e2a73e,0x3fe24f370b7687d0,1 +np.float64,0x3fad72350c3ae460,0x3fad765e7766682a,1 +np.float64,0x3fa289a47c251340,0x3fa28aae12f41646,1 +np.float64,0xbfe5c488e5eb8912,0xbfe7f05d7e7dcddb,1 +np.float64,0xbfc22ef1d7245de4,0xbfc23ebeb990a1b8,1 +np.float64,0x3fe59a0b80eb3418,0x3fe7b695fdcba1de,1 +np.float64,0xbfe9cad619f395ac,0xbfedff0514d91e2c,1 +np.float64,0x3fc8bc74eb3178e8,0x3fc8e48cb22da666,1 +np.float64,0xbfc5389a3f2a7134,0xbfc551cd6febc544,1 +np.float64,0x3fce82feb33d0600,0x3fceceecce2467ef,1 +np.float64,0x3fda346791b468d0,0x3fdaff95154a4ca6,1 +np.float64,0x3fd04501fea08a04,0x3fd073397b32607e,1 +np.float64,0xbfb6be498a2d7c90,0xbfb6c5f93aeb0e57,1 +np.float64,0x3fe1f030dd63e062,0x3fe30ad8fb97cce0,1 +np.float64,0xbfee3fb36dfc7f67,0xbff3d0a5e380b86f,1 +np.float64,0xbfa876773c30ecf0,0xbfa878d9d3df6a3f,1 +np.float64,0x3fdb58296eb6b054,0x3fdc40ceffb17f82,1 +np.float64,0xbfea16b5d8742d6c,0xbfee809b99fd6adc,1 +np.float64,0xbfdc5062b6b8a0c6,0xbfdd547623275fdb,1 +np.float64,0x3fef6db242fedb64,0x3ff61ab4cdaef467,1 +np.float64,0xbfc9f778f933eef0,0xbfca25eef1088167,1 +np.float64,0xbfd22063eba440c8,0xbfd260c8766c69cf,1 +np.float64,0x3fdd2379f2ba46f4,0x3fde40b025cb1ffa,1 +np.float64,0xbfea967af2f52cf6,0xbfef61a178774636,1 +np.float64,0x3fe4f5b49fe9eb6a,0x3fe6da8311a5520e,1 +np.float64,0x3feccde17b799bc2,0x3ff1ebd0ea228b71,1 +np.float64,0x3fe1bb76506376ec,0x3fe2cb56fca01840,1 +np.float64,0xbfef94e583ff29cb,0xbff68aeab8ba75a2,1 +np.float64,0x3fed024a55fa0494,0x3ff228ea5d456e9d,1 +np.float64,0xbfe877b2a8f0ef65,0xbfebdaa1a4712459,1 +np.float64,0x3fef687a8d7ed0f6,0x3ff60cf5fef8d448,1 +np.float64,0xbfeeb2dc8afd65b9,0xbff48dda6a906cd6,1 +np.float64,0x3fdb2e28aeb65c50,0x3fdc12620655eb7a,1 +np.float64,0x3fedc1863afb830c,0x3ff31ae823315e83,1 +np.float64,0xbfe6b1bb546d6376,0xbfe93a38163e3a59,1 +np.float64,0x3fe479c78468f390,0x3fe637e5c0fc5730,1 +np.float64,0x3fbad1fade35a3f0,0x3fbade9a43ca05cf,1 +np.float64,0xbfe2d1c563e5a38b,0xbfe41e712785900c,1 +np.float64,0xbfc08c33ed211868,0xbfc09817a752d500,1 +np.float64,0xbfecce0935f99c12,0xbff1ebfe84524037,1 +np.float64,0x3fce4ef0e73c9de0,0x3fce995638a3dc48,1 +np.float64,0xbfd2fb2343a5f646,0xbfd345592517ca18,1 +np.float64,0x3fd848f7cdb091f0,0x3fd8e8bee5f7b49a,1 +np.float64,0x3fe532b7d2ea6570,0x3fe72b9ac747926a,1 +np.float64,0x3fd616aadcac2d54,0x3fd68d692c5cad42,1 +np.float64,0x3fd7720eb3aee41c,0x3fd801206a0e1e43,1 +np.float64,0x3fee835a35fd06b4,0x3ff43c7175eb7a54,1 +np.float64,0xbfe2e8f70b65d1ee,0xbfe43b2800a947a7,1 +np.float64,0xbfed38f45d7a71e9,0xbff26acd6bde7174,1 +np.float64,0xbfc0c62661218c4c,0xbfc0d28964d66120,1 +np.float64,0x3fe97940bef2f282,0x3fed76b986a74ee3,1 +np.float64,0x3fc96f7dc532def8,0x3fc99b20044c8fcf,1 +np.float64,0xbfd60201eeac0404,0xbfd677675efaaedc,1 +np.float64,0x3fe63c0867ec7810,0x3fe894f060200140,1 +np.float64,0xbfef6144b37ec289,0xbff5fa589a515ba8,1 +np.float64,0xbfde2da0c8bc5b42,0xbfdf6d0b59e3232a,1 +np.float64,0xbfd7401612ae802c,0xbfd7cb74ddd413b9,1 +np.float64,0x3fe41c012de83802,0x3fe5be9d87da3f82,1 +np.float64,0x3fdf501609bea02c,0x3fe05c1d96a2270b,1 +np.float64,0x3fcf9fa1233f3f40,0x3fcff45598e72f07,1 +np.float64,0x3fd4e3895ea9c714,0x3fd547580d8392a2,1 +np.float64,0x3fe1e8ff5fe3d1fe,0x3fe3022a0b86a2ab,1 +np.float64,0xbfe0aa55956154ab,0xbfe18768823da589,1 +np.float64,0x3fb2a0aa26254150,0x3fb2a4e1faff1c93,1 +np.float64,0x3fd3823417a70468,0x3fd3d2f808dbb167,1 +np.float64,0xbfaed323643da640,0xbfaed7e9bef69811,1 +np.float64,0x3fe661e8c4ecc3d2,0x3fe8c9c535f43c16,1 +np.float64,0xbfa429777c2852f0,0xbfa42acd38ba02a6,1 +np.float64,0x3fb5993ea22b3280,0x3fb59fd353e47397,1 +np.float64,0x3fee62d21efcc5a4,0x3ff40788f9278ade,1 +np.float64,0xbf813fb810227f80,0xbf813fc56d8f3c53,1 +np.float64,0x3fd56205deaac40c,0x3fd5cd59671ef193,1 +np.float64,0x3fd31a4de5a6349c,0x3fd365fe401b66e8,1 +np.float64,0xbfec7cc7a478f98f,0xbff190cf69703ca4,1 +np.float64,0xbf755881a02ab100,0xbf755887f52e7794,1 +np.float64,0x3fdd1c92e6ba3924,0x3fde38efb4e8605c,1 +np.float64,0x3fdf49da80be93b4,0x3fe0588af8dd4a34,1 +np.float64,0x3fe1fcdbf2e3f9b8,0x3fe31a27b9d273f2,1 +np.float64,0x3fe2a0f18be541e4,0x3fe3e23b159ce20f,1 +np.float64,0xbfed0f1561fa1e2b,0xbff23820fc0a54ca,1 +np.float64,0x3fe34a006c669400,0x3fe4b419b9ed2b83,1 +np.float64,0xbfd51be430aa37c8,0xbfd583005a4d62e7,1 +np.float64,0x3fe5ec4e336bd89c,0x3fe826caad6b0f65,1 +np.float64,0xbfdad71b1fb5ae36,0xbfdbb25bef8b53d8,1 +np.float64,0xbfe8eac2d871d586,0xbfec8f8cac7952f9,1 +np.float64,0xbfe1d5aef663ab5e,0xbfe2eae14b7ccdfd,1 +np.float64,0x3fec11d3157823a6,0x3ff11e8279506753,1 +np.float64,0xbfe67ff1166cffe2,0xbfe8f3e61c1dfd32,1 +np.float64,0xbfd101eecda203de,0xbfd136e0e9557022,1 +np.float64,0x3fde6c9e5cbcd93c,0x3fdfb48ee7efe134,1 +np.float64,0x3fec3ede9c787dbe,0x3ff14dead1e5cc1c,1 +np.float64,0x3fe7a022086f4044,0x3fea93ce2980b161,1 +np.float64,0xbfc3b2b1b7276564,0xbfc3c6d02d60bb21,1 +np.float64,0x7ff0000000000000,0x7ff8000000000000,1 +np.float64,0x3fe60b5647ec16ac,0x3fe8517ef0544b40,1 +np.float64,0xbfd20ab654a4156c,0xbfd24a2f1b8e4932,1 +np.float64,0xbfe4aa1e2f69543c,0xbfe677005cbd2646,1 +np.float64,0xbfc831cc0b306398,0xbfc8574910d0b86d,1 +np.float64,0xbfc3143495262868,0xbfc3267961b79198,1 +np.float64,0x3fc14d64c1229ac8,0x3fc15afea90a319d,1 +np.float64,0x3fc0a5a207214b48,0x3fc0b1bd2f15c1b0,1 +np.float64,0xbfc0b8351521706c,0xbfc0c4792672d6db,1 +np.float64,0xbfdc383600b8706c,0xbfdd398429e163bd,1 +np.float64,0x3fd9e17321b3c2e8,0x3fdaa4c4d140a622,1 +np.float64,0xbfd44f079ea89e10,0xbfd4aa7d6deff4ab,1 +np.float64,0xbfc3de52a927bca4,0xbfc3f2f8f65f4c3f,1 +np.float64,0x3fe7779d566eef3a,0x3fea57f8592dbaad,1 +np.float64,0xbfe309039e661207,0xbfe462f47f9a64e5,1 +np.float64,0x3fd8e06d08b1c0dc,0x3fd98cc946e440a6,1 +np.float64,0x3fdde66c9ebbccd8,0x3fdf1c68009a8dc1,1 +np.float64,0x3fd4369c6ba86d38,0x3fd490bf460a69e4,1 +np.float64,0xbfe132252fe2644a,0xbfe22775e109cc2e,1 +np.float64,0x3fee15483c7c2a90,0x3ff39111de89036f,1 +np.float64,0xbfc1d5ee8123abdc,0xbfc1e4d66c6871a5,1 +np.float64,0x3fc851c52b30a388,0x3fc877d93fb4ae1a,1 +np.float64,0x3fdaade707b55bd0,0x3fdb85001661fffe,1 +np.float64,0xbfe79fb7f96f3f70,0xbfea9330ec27ac10,1 +np.float64,0xbfe8b0f725f161ee,0xbfec3411c0e4517a,1 +np.float64,0xbfea79f5f374f3ec,0xbfef2e9dd9270488,1 +np.float64,0x3fe0b5fe5b616bfc,0x3fe19512a36a4534,1 +np.float64,0xbfad7c622c3af8c0,0xbfad808fea96a804,1 +np.float64,0xbfe3e24dbce7c49c,0xbfe574b4c1ea9818,1 +np.float64,0xbfe80b038af01607,0xbfeb33fec279576a,1 +np.float64,0xbfef69e2ea7ed3c6,0xbff610a5593a18bc,1 +np.float64,0x3fdcc0bb39b98178,0x3fddd1f8c9a46430,1 +np.float64,0xbfba39976a347330,0xbfba4563bb5369a4,1 +np.float64,0xbfebf9768ef7f2ed,0xbff10548ab725f74,1 +np.float64,0xbfec21c066f84381,0xbff12f2803ba052f,1 +np.float64,0xbfca216a6b3442d4,0xbfca50c5e1e5748e,1 +np.float64,0x3fd5e40da4abc81c,0x3fd65783f9a22946,1 +np.float64,0x3fc235ca17246b98,0x3fc245a8f453173f,1 +np.float64,0x3fecb5b867796b70,0x3ff1d046a0bfda69,1 +np.float64,0x3fcb457fef368b00,0x3fcb7b6daa8165a7,1 +np.float64,0xbfa5ed6f7c2bdae0,0xbfa5ef27244e2e42,1 +np.float64,0x3fecf618a1f9ec32,0x3ff21a86cc104542,1 +np.float64,0x3fe9d95413f3b2a8,0x3fee178dcafa11fc,1 +np.float64,0xbfe93a5357f274a7,0xbfed0f9a565da84a,1 +np.float64,0xbfeb9e45ff773c8c,0xbff0a93cab8e258d,1 +np.float64,0x3fcbd9d0bd37b3a0,0x3fcc134e87cae241,1 +np.float64,0x3fe55d4db76aba9c,0x3fe764a0e028475a,1 +np.float64,0xbfc8a6fc71314df8,0xbfc8ceaafbfc59a7,1 +np.float64,0x3fe0615fa660c2c0,0x3fe1323611c4cbc2,1 +np.float64,0x3fb965558632cab0,0x3fb9700b84de20ab,1 +np.float64,0x8000000000000000,0x8000000000000000,1 +np.float64,0x3fe76776c6eeceee,0x3fea40403e24a9f1,1 +np.float64,0x3fe3b7f672676fec,0x3fe53ece71a1a1b1,1 +np.float64,0xbfa9b82ba4337050,0xbfa9baf15394ca64,1 +np.float64,0xbfe31faf49663f5e,0xbfe47f31b1ca73dc,1 +np.float64,0xbfcc4c6beb3898d8,0xbfcc88c5f814b2c1,1 +np.float64,0x3fd481530aa902a8,0x3fd4df8df03bc155,1 +np.float64,0x3fd47593b8a8eb28,0x3fd4d327ab78a1a8,1 +np.float64,0x3fd70e6ccbae1cd8,0x3fd7962fe8b63d46,1 +np.float64,0x3fd25191f7a4a324,0x3fd2941623c88e02,1 +np.float64,0x3fd0603ef0a0c07c,0x3fd08f64e97588dc,1 +np.float64,0xbfc653bae52ca774,0xbfc6711e5e0d8ea9,1 +np.float64,0xbfd11db8fea23b72,0xbfd153b63c6e8812,1 +np.float64,0xbfea9bde25f537bc,0xbfef6b52268e139a,1 +np.float64,0x1,0x1,1 +np.float64,0xbfefd3806d7fa701,0xbff776dcef9583ca,1 +np.float64,0xbfe0fb8cfde1f71a,0xbfe1e6e2e774a8f8,1 +np.float64,0x3fea384534f4708a,0x3feebadaa389be0d,1 +np.float64,0x3feff761c97feec4,0x3ff866157b9d072d,1 +np.float64,0x3fe7131ccb6e263a,0x3fe9c58b4389f505,1 +np.float64,0x3fe9084f7872109e,0x3fecbed0355dbc8f,1 +np.float64,0x3f708e89e0211d00,0x3f708e8cd4946b9e,1 +np.float64,0xbfe39185f067230c,0xbfe50e1cd178244d,1 +np.float64,0x3fd67cc1a9acf984,0x3fd6fa514784b48c,1 +np.float64,0xbfecaef005f95de0,0xbff1c89c9c3ef94a,1 +np.float64,0xbfe12eec81e25dd9,0xbfe223a4285bba9a,1 +np.float64,0x3fbe7f9faa3cff40,0x3fbe92363525068d,1 +np.float64,0xbfe1950b2b632a16,0xbfe29d45fc1e4ce9,1 +np.float64,0x3fe45049e6e8a094,0x3fe6020de759e383,1 +np.float64,0x3fe4d10c8969a21a,0x3fe6aa1fe42cbeb9,1 +np.float64,0xbfe9d04658f3a08d,0xbfee08370a0dbf0c,1 +np.float64,0x3fe14fb314e29f66,0x3fe24a8d73663521,1 +np.float64,0xbfef4abfe4fe9580,0xbff5c2c1ff1250ca,1 +np.float64,0xbfe6162b366c2c56,0xbfe86073ac3c6243,1 +np.float64,0x3feffe781e7ffcf0,0x3ff8d2cbedd6a1b5,1 +np.float64,0xbff0000000000000,0xbff921fb54442d18,1 +np.float64,0x3fc1dc45ad23b888,0x3fc1eb3d9bddda58,1 +np.float64,0xbfe793f6fcef27ee,0xbfea81c93d65aa64,1 +np.float64,0x3fdef6d2bbbdeda4,0x3fe029079d42efb5,1 +np.float64,0xbfdf0ac479be1588,0xbfe0346dbc95963f,1 +np.float64,0xbfd33927d7a67250,0xbfd38653f90a5b73,1 +np.float64,0xbfe248b072e49161,0xbfe37631ef6572e1,1 +np.float64,0xbfc8ceb6af319d6c,0xbfc8f7288657f471,1 +np.float64,0x3fdd7277fcbae4f0,0x3fde99886e6766ef,1 +np.float64,0xbfe0d30c6561a619,0xbfe1b72f90bf53d6,1 +np.float64,0xbfcb0fe07d361fc0,0xbfcb448e2eae9542,1 +np.float64,0xbfe351f57fe6a3eb,0xbfe4be13eef250f2,1 +np.float64,0x3fe85ec02cf0bd80,0x3febb407e2e52e4c,1 +np.float64,0x3fc8bc59b53178b0,0x3fc8e470f65800ec,1 +np.float64,0xbfd278d447a4f1a8,0xbfd2bd133c9c0620,1 +np.float64,0x3feda5cfd87b4ba0,0x3ff2f5ab4324f43f,1 +np.float64,0xbfd2b32a36a56654,0xbfd2fa09c36afd34,1 +np.float64,0xbfed4a81cb7a9504,0xbff28077a4f4fff4,1 +np.float64,0x3fdf079bf9be0f38,0x3fe0329f7fb13f54,1 +np.float64,0x3fd14097f6a28130,0x3fd177e9834ec23f,1 +np.float64,0xbfaeab11843d5620,0xbfaeafc5531eb6b5,1 +np.float64,0xbfac3f8c14387f20,0xbfac433893d53360,1 +np.float64,0xbfc139d7ed2273b0,0xbfc14743adbbe660,1 +np.float64,0x3fe78cb02cef1960,0x3fea7707f76edba9,1 +np.float64,0x3fefe16b41ffc2d6,0x3ff7bff36a7aa7b8,1 +np.float64,0x3fec5260d378a4c2,0x3ff162c588b0da38,1 +np.float64,0x3fedb146f17b628e,0x3ff304f90d3a15d1,1 +np.float64,0x3fd1fd45f7a3fa8c,0x3fd23c2dc3929e20,1 +np.float64,0x3fe0898a5ee11314,0x3fe1610c63e726eb,1 +np.float64,0x3fe7719946eee332,0x3fea4f205eecb59f,1 +np.float64,0x3fe955218972aa44,0x3fed3b530c1f7651,1 +np.float64,0x3fe0ccbf4461997e,0x3fe1afc7b4587836,1 +np.float64,0xbfe9204314f24086,0xbfece5605780e346,1 +np.float64,0xbfe552017feaa403,0xbfe755773cbd74d5,1 +np.float64,0x3fd8ce4b32b19c98,0x3fd9791c8dd44eae,1 +np.float64,0x3fef89acd9ff135a,0x3ff668f78adf7ced,1 +np.float64,0x3fc9d713ad33ae28,0x3fca04da6c293bbd,1 +np.float64,0xbfe22d9c4de45b38,0xbfe3553effadcf92,1 +np.float64,0x3fa5cda38c2b9b40,0x3fa5cf53c5787482,1 +np.float64,0x3fa878ebdc30f1e0,0x3fa87b4f2bf1d4c3,1 +np.float64,0x3fe8030353700606,0x3feb27e196928789,1 +np.float64,0x3fb50607222a0c10,0x3fb50c188ce391e6,1 +np.float64,0x3fd9ba4ab4b37494,0x3fda79fa8bd40f45,1 +np.float64,0x3fb564598e2ac8b0,0x3fb56abe42d1ba13,1 +np.float64,0xbfd1177c83a22efa,0xbfd14d3d7ef30cc4,1 +np.float64,0xbfd952cec7b2a59e,0xbfda09215d17c0ac,1 +np.float64,0x3fe1d8066663b00c,0x3fe2edb35770b8dd,1 +np.float64,0xbfc89427a3312850,0xbfc8bb7a7c389497,1 +np.float64,0xbfe86ebfd3f0dd80,0xbfebccc2ba0f506c,1 +np.float64,0x3fc390578b2720b0,0x3fc3a40cb7f5f728,1 +np.float64,0xbfd122f9b8a245f4,0xbfd15929dc57a897,1 +np.float64,0x3f8d0636d03a0c80,0x3f8d06767de576df,1 +np.float64,0xbfe4b55d8b696abb,0xbfe685be537a9637,1 +np.float64,0xbfdfd51cf9bfaa3a,0xbfe0a894fcff0c76,1 +np.float64,0xbfd37c1f52a6f83e,0xbfd3cc9593c37aad,1 +np.float64,0x3fd0e8283ea1d050,0x3fd11c25c800785a,1 +np.float64,0x3fd3160784a62c10,0x3fd36183a6c2880c,1 +np.float64,0x3fd4c66e57a98cdc,0x3fd5288fe3394eff,1 +np.float64,0x3fee2f7e3afc5efc,0x3ff3b8063eb30cdc,1 +np.float64,0xbfe526773a6a4cee,0xbfe71b4364215b18,1 +np.float64,0x3fea01181e740230,0x3fee5b65eccfd130,1 +np.float64,0xbfe51c03f76a3808,0xbfe70d5919d37587,1 +np.float64,0x3fd97e1375b2fc28,0x3fda3845da40b22b,1 +np.float64,0x3fd5c14a14ab8294,0x3fd632890d07ed03,1 +np.float64,0xbfec9b474279368e,0xbff1b28f50584fe3,1 +np.float64,0x3fe0139ca860273a,0x3fe0d7fc377f001c,1 +np.float64,0x3fdb080c9db61018,0x3fdbe85056358fa0,1 +np.float64,0xbfdd72ceb1bae59e,0xbfde99ea171661eb,1 +np.float64,0xbfe64e934fec9d26,0xbfe8aec2ef24be63,1 +np.float64,0x3fd1036a93a206d4,0x3fd1386adabe01bd,1 +np.float64,0x3febc9d4a5f793aa,0x3ff0d4c069f1e67d,1 +np.float64,0xbfe547a16fea8f43,0xbfe747902fe6fb4d,1 +np.float64,0x3fc289b0f9251360,0x3fc29a709de6bdd9,1 +np.float64,0xbfe694494a6d2892,0xbfe9108f3dc133e2,1 +np.float64,0x3fd827dfe4b04fc0,0x3fd8c4fe40532b91,1 +np.float64,0xbfe8b89418f17128,0xbfec400c5a334b2e,1 +np.float64,0x3fed5605147aac0a,0x3ff28ed1f612814a,1 +np.float64,0xbfed36af31fa6d5e,0xbff26804e1f71af0,1 +np.float64,0x3fdbb01c02b76038,0x3fdca2381558bbf0,1 +np.float64,0x3fe2a951666552a2,0x3fe3ec88f780f9e6,1 +np.float64,0x3fe662defbecc5be,0x3fe8cb1dbfca98ab,1 +np.float64,0x3fd098b1b3a13164,0x3fd0c9d064e4eaf2,1 +np.float64,0x3fefa10edeff421e,0x3ff6b1c6187b18a8,1 +np.float64,0xbfec4feb7a789fd7,0xbff16021ef37a219,1 +np.float64,0x3fd8e415bbb1c82c,0x3fd990c1f8b786bd,1 +np.float64,0xbfead5a09275ab41,0xbfefd44fab5b4f6e,1 +np.float64,0xbfe8666c16f0ccd8,0xbfebbfe0c9f2a9ae,1 +np.float64,0x3fdc962132b92c44,0x3fdda2525a6f406c,1 +np.float64,0xbfe2037f03e406fe,0xbfe3222ec2a3449e,1 +np.float64,0xbfec82c27e790585,0xbff197626ea9df1e,1 +np.float64,0x3fd2b4e03ca569c0,0x3fd2fbd3c7fda23e,1 +np.float64,0xbfe9b0dee5f361be,0xbfedd34f6d3dfe8a,1 +np.float64,0x3feef45cd17de8ba,0x3ff508180687b591,1 +np.float64,0x3f82c39bf0258700,0x3f82c3ad24c3b3f1,1 +np.float64,0xbfca848cfd350918,0xbfcab612ce258546,1 +np.float64,0x3fd6442aaaac8854,0x3fd6bdea54016e48,1 +np.float64,0x3fe550799e6aa0f4,0x3fe75369c9ea5b1e,1 +np.float64,0xbfe0e9d5a361d3ac,0xbfe1d20011139d89,1 +np.float64,0x3fbfc9ff1e3f9400,0x3fbfdf0ea6885c80,1 +np.float64,0xbfa187e8b4230fd0,0xbfa188c95072092e,1 +np.float64,0x3fcd28c9533a5190,0x3fcd6ae879c21b47,1 +np.float64,0x3fc6227ec52c4500,0x3fc63f1fbb441d29,1 +np.float64,0x3fe9b7a2ed736f46,0x3feddeab49b2d176,1 +np.float64,0x3fd4aee93da95dd4,0x3fd50fb3b71e0339,1 +np.float64,0xbfe164dacf62c9b6,0xbfe263bb2f7dd5d9,1 +np.float64,0x3fec62e525f8c5ca,0x3ff17496416d9921,1 +np.float64,0x3fdd363ee0ba6c7c,0x3fde55c6a49a5f86,1 +np.float64,0x3fe65cbf75ecb97e,0x3fe8c28d31ff3ebd,1 +np.float64,0xbfe76d27ca6eda50,0xbfea4899e3661425,1 +np.float64,0xbfc305738d260ae8,0xbfc3178dcfc9d30f,1 +np.float64,0xbfd3aa2a54a75454,0xbfd3fcf1e1ce8328,1 +np.float64,0x3fd1609fc9a2c140,0x3fd1992efa539b9f,1 +np.float64,0xbfac1291bc382520,0xbfac162cc7334b4d,1 +np.float64,0xbfedb461ea7b68c4,0xbff309247850455d,1 +np.float64,0xbfe8d2adf8f1a55c,0xbfec6947be90ba92,1 +np.float64,0xbfd7128965ae2512,0xbfd79a9855bcfc5a,1 +np.float64,0x3fe8deb09471bd62,0x3fec7c56b3aee531,1 +np.float64,0xbfe5f4d329ebe9a6,0xbfe8327ea8189af8,1 +np.float64,0xbfd3b46ac9a768d6,0xbfd407b80b12ff17,1 +np.float64,0x3fec899d7cf9133a,0x3ff19ef26baca36f,1 +np.float64,0xbfec192fd5783260,0xbff126306e507fd0,1 +np.float64,0x3fe945bdaef28b7c,0x3fed222f787310bf,1 +np.float64,0xbfeff9635d7ff2c7,0xbff87d6773f318eb,1 +np.float64,0xbfd604b81cac0970,0xbfd67a4aa852559a,1 +np.float64,0x3fcd1cc9d53a3990,0x3fcd5e962e237c24,1 +np.float64,0xbfed77b0fffaef62,0xbff2b97a1c9b6483,1 +np.float64,0xbfc9c69325338d28,0xbfc9f401500402fb,1 +np.float64,0xbfdf97e246bf2fc4,0xbfe0855601ea9db3,1 +np.float64,0x3fc7e6304f2fcc60,0x3fc80a4e718504cd,1 +np.float64,0x3fec3b599e7876b4,0x3ff14a2d1b9c68e6,1 +np.float64,0xbfe98618e1f30c32,0xbfed8bfbb31c394a,1 +np.float64,0xbfe59b3c0feb3678,0xbfe7b832d6df81de,1 +np.float64,0xbfe54ce2fe6a99c6,0xbfe74e9a85be4116,1 +np.float64,0x3fc9db49cb33b690,0x3fca092737ef500a,1 +np.float64,0xbfb4a922ae295248,0xbfb4aee4e39078a9,1 +np.float64,0xbfd0e542e0a1ca86,0xbfd11925208d66af,1 +np.float64,0x3fd70543f2ae0a88,0x3fd78c5e9238a3ee,1 +np.float64,0x3fd67f7a7facfef4,0x3fd6fd3998df8545,1 +np.float64,0xbfe40b643d6816c8,0xbfe5a947e427f298,1 +np.float64,0xbfcd85f69b3b0bec,0xbfcdcaa24b75f1a3,1 +np.float64,0x3fec705fb4f8e0c0,0x3ff1833c82163ee2,1 +np.float64,0x3fb37650ea26eca0,0x3fb37b20c16fb717,1 +np.float64,0x3fe5ebfa55ebd7f4,0x3fe826578d716e70,1 +np.float64,0x3fe991dfe5f323c0,0x3fed9f8a4bf1f588,1 +np.float64,0xbfd658bd0aacb17a,0xbfd6d3dd06e54900,1 +np.float64,0xbfc24860252490c0,0xbfc258701a0b9290,1 +np.float64,0xbfefb8d763ff71af,0xbff705b6ea4a569d,1 +np.float64,0x3fb8fcb4ae31f970,0x3fb906e809e7899f,1 +np.float64,0x3fce6343cb3cc688,0x3fceae41d1629625,1 +np.float64,0xbfd43d5a11a87ab4,0xbfd497da25687e07,1 +np.float64,0xbfe9568851f2ad11,0xbfed3d9e5fe83a76,1 +np.float64,0x3fe1b66153e36cc2,0x3fe2c53c7e016271,1 +np.float64,0x3fef27452bfe4e8a,0x3ff571b3486ed416,1 +np.float64,0x3fca87c0a7350f80,0x3fcab958a7bb82d4,1 +np.float64,0xbfd8776a8fb0eed6,0xbfd91afaf2f50edf,1 +np.float64,0x3fe9522a76f2a454,0x3fed3679264e1525,1 +np.float64,0x3fea14ff2cf429fe,0x3fee7da6431cc316,1 +np.float64,0x3fe970618bf2e0c4,0x3fed68154d54dd97,1 +np.float64,0x3fd3410cfca68218,0x3fd38e9b21792240,1 +np.float64,0xbf6a8070c0350100,0xbf6a8073c7c34517,1 +np.float64,0xbfbe449de23c8938,0xbfbe56c8e5e4d98b,1 +np.float64,0x3fedbc92e27b7926,0x3ff314313216d8e6,1 +np.float64,0xbfe3be4706677c8e,0xbfe546d3ceb85aea,1 +np.float64,0x3fe30cd6d76619ae,0x3fe467b6f2664a8d,1 +np.float64,0x3fd7d69b21afad38,0x3fd86d54284d05ad,1 +np.float64,0xbfe501001fea0200,0xbfe6e978afcff4d9,1 +np.float64,0xbfe44ba3d8e89748,0xbfe5fc0a31cd1e3e,1 +np.float64,0x3fec52f7c078a5f0,0x3ff16367acb209b2,1 +np.float64,0xbfcb19efcb3633e0,0xbfcb4ed9235a7d47,1 +np.float64,0xbfab86796c370cf0,0xbfab89df7bf15710,1 +np.float64,0xbfb962feda32c600,0xbfb96db1e1679c98,1 +np.float64,0x3fe0dd14e861ba2a,0x3fe1c2fc72810567,1 +np.float64,0x3fe41bcc6de83798,0x3fe5be59b7f9003b,1 +np.float64,0x3fc82f4c4f305e98,0x3fc854bd9798939f,1 +np.float64,0xbfcd143a613a2874,0xbfcd55cbd1619d84,1 +np.float64,0xbfd52da61baa5b4c,0xbfd595d0b3543439,1 +np.float64,0xbfb71b4a8e2e3698,0xbfb7235a4ab8432f,1 +np.float64,0xbfec141a19782834,0xbff120e1e39fc856,1 +np.float64,0xbfdba9319db75264,0xbfdc9a8ca2578bb2,1 +np.float64,0xbfbce5d74639cbb0,0xbfbcf5a4878cfa51,1 +np.float64,0x3fde67f7b3bccff0,0x3fdfaf45a9f843ad,1 +np.float64,0xbfe12d87bc625b10,0xbfe221fd4476eb71,1 +np.float64,0x3fe35b8f6be6b71e,0x3fe4ca20f65179e1,1 +np.float64,0xbfdbada1d3b75b44,0xbfdc9f78b19f93d1,1 +np.float64,0xbfc60159c52c02b4,0xbfc61d79b879f598,1 +np.float64,0x3fd6b81c38ad7038,0x3fd739c27bfa16d8,1 +np.float64,0xbfd646a253ac8d44,0xbfd6c08c19612bbb,1 +np.float64,0xbfe6babef0ed757e,0xbfe94703d0bfa311,1 +np.float64,0xbfed5671f1faace4,0xbff28f5a3f3683d0,1 +np.float64,0x3fc01d1e85203a40,0x3fc02817ec0dfd38,1 +np.float64,0xbfe9188a61f23115,0xbfecd8eb5da84223,1 +np.float64,0x3fdca3bab9b94774,0x3fddb1868660c239,1 +np.float64,0xbfa255750c24aaf0,0xbfa25675f7b36343,1 +np.float64,0x3fb3602db626c060,0x3fb364ed2d5b2876,1 +np.float64,0xbfd30a14bda6142a,0xbfd354ff703b8862,1 +np.float64,0xbfe1cfe381639fc7,0xbfe2e3e720b968c8,1 +np.float64,0xbfd2af6a4fa55ed4,0xbfd2f61e190bcd1f,1 +np.float64,0xbfe93c50937278a1,0xbfed12d64bb10d73,1 +np.float64,0x3fddd8bc44bbb178,0x3fdf0ced7f9005cc,1 +np.float64,0x3fdb2bc73cb65790,0x3fdc0fc0e18e425e,1 +np.float64,0xbfd073f6aba0e7ee,0xbfd0a3cb5468a961,1 +np.float64,0x3fed4bad7b7a975a,0x3ff281ebeb75e414,1 +np.float64,0xbfdc75b50bb8eb6a,0xbfdd7e1a7631cb22,1 +np.float64,0x3fd458a90fa8b154,0x3fd4b4a5817248ce,1 +np.float64,0x3feead5db57d5abc,0x3ff484286fab55ff,1 +np.float64,0x3fb3894382271280,0x3fb38e217b4e7905,1 +np.float64,0xffefffffffffffff,0x7ff8000000000000,1 +np.float64,0xbfe428212ae85042,0xbfe5ce36f226bea8,1 +np.float64,0xbfc08b39f7211674,0xbfc0971b93ebc7ad,1 +np.float64,0xbfc2e7cf5525cfa0,0xbfc2f994eb72b623,1 +np.float64,0xbfdb0d85afb61b0c,0xbfdbee5a2de3c5db,1 +np.float64,0xfff0000000000000,0x7ff8000000000000,1 +np.float64,0xbfd0d36af7a1a6d6,0xbfd106a5f05ef6ff,1 +np.float64,0xbfc333d0912667a0,0xbfc3467162b7289a,1 +np.float64,0x3fcdababc53b5758,0x3fcdf16458c20fa8,1 +np.float64,0x3fd0821b38a10438,0x3fd0b26e3e0b9185,1 +np.float64,0x0,0x0,1 +np.float64,0x3feb7f70edf6fee2,0x3ff08ae81854bf20,1 +np.float64,0x3fe6e075716dc0ea,0x3fe97cc5254be6ff,1 +np.float64,0x3fea13b682f4276e,0x3fee7b6f18073b5b,1 diff --git a/local_packages/numpy/_core/tests/data/umath-validation-set-arcsinh.csv b/local_packages/numpy/_core/tests/data/umath-validation-set-arcsinh.csv new file mode 100644 index 0000000..9eedb1a --- /dev/null +++ b/local_packages/numpy/_core/tests/data/umath-validation-set-arcsinh.csv @@ -0,0 +1,1429 @@ +dtype,input,output,ulperrortol +np.float32,0xbf24142a,0xbf1a85ef,2 +np.float32,0x3e71cf91,0x3e6f9e37,2 +np.float32,0xe52a7,0xe52a7,2 +np.float32,0x3ef1e074,0x3ee9add9,2 +np.float32,0x806160ac,0x806160ac,2 +np.float32,0x7e2d59a2,0x42af4798,2 +np.float32,0xbf32cac9,0xbf26bf96,2 +np.float32,0x3f081701,0x3f026142,2 +np.float32,0x3f23cc88,0x3f1a499c,2 +np.float32,0xbf090d94,0xbf033ad0,2 +np.float32,0x803af2fc,0x803af2fc,2 +np.float32,0x807eb17e,0x807eb17e,2 +np.float32,0x5c0d8e,0x5c0d8e,2 +np.float32,0x3f7b79d2,0x3f5e6b1d,2 +np.float32,0x806feeae,0x806feeae,2 +np.float32,0x3e4b423a,0x3e49f274,2 +np.float32,0x3f49e5ac,0x3f394a41,2 +np.float32,0x3f18cd4e,0x3f10ef35,2 +np.float32,0xbed75734,0xbed17322,2 +np.float32,0x7f591151,0x42b28085,2 +np.float32,0xfefe9da6,0xc2b16f51,2 +np.float32,0xfeac90fc,0xc2b0a82a,2 +np.float32,0x805c198e,0x805c198e,2 +np.float32,0x7f66d6df,0x42b2a004,2 +np.float32,0x505438,0x505438,2 +np.float32,0xbf39a209,0xbf2c5255,2 +np.float32,0x7fa00000,0x7fe00000,2 +np.float32,0xc84cb,0xc84cb,2 +np.float32,0x7f07d6f5,0x42b19088,2 +np.float32,0x79d7e4,0x79d7e4,2 +np.float32,0xff32f6a0,0xc2b21db1,2 +np.float32,0x7c005c05,0x42a9222e,2 +np.float32,0x3ec449aa,0x3ebfc5ae,2 +np.float32,0x800ec323,0x800ec323,2 +np.float32,0xff1c904c,0xc2b1d93a,2 +np.float32,0x7f4eca52,0x42b267b0,2 +np.float32,0x3ee06540,0x3ed9c514,2 +np.float32,0x6aab4,0x6aab4,2 +np.float32,0x3e298d8c,0x3e28c99e,2 +np.float32,0xbf38d162,0xbf2ba94a,2 +np.float32,0x2d9083,0x2d9083,2 +np.float32,0x7eae5032,0x42b0ad52,2 +np.float32,0x3ead5b3c,0x3eaa3443,2 +np.float32,0x806fef66,0x806fef66,2 +np.float32,0x3f5b614e,0x3f46ca71,2 +np.float32,0xbf4c906a,0xbf3b60fc,2 +np.float32,0x8049453e,0x8049453e,2 +np.float32,0x3d305220,0x3d304432,2 +np.float32,0x2e1a89,0x2e1a89,2 +np.float32,0xbf4e74ec,0xbf3cdacf,2 +np.float32,0x807a827a,0x807a827a,2 +np.float32,0x80070745,0x80070745,2 +np.float32,0xbe1ba2fc,0xbe1b0b28,2 +np.float32,0xbe5131d0,0xbe4fc421,2 +np.float32,0x5bfd98,0x5bfd98,2 +np.float32,0xbd8e1a48,0xbd8dfd27,2 +np.float32,0x8006c160,0x8006c160,2 +np.float32,0x346490,0x346490,2 +np.float32,0xbdbdf060,0xbdbdaaf0,2 +np.float32,0x3ea9d0c4,0x3ea6d8c7,2 +np.float32,0xbf2aaa28,0xbf200916,2 +np.float32,0xbf160c26,0xbf0e9047,2 +np.float32,0x80081fd4,0x80081fd4,2 +np.float32,0x7db44283,0x42adf8b6,2 +np.float32,0xbf1983f8,0xbf118bf5,2 +np.float32,0x2c4a35,0x2c4a35,2 +np.float32,0x6165a7,0x6165a7,2 +np.float32,0xbe776b44,0xbe75129f,2 +np.float32,0xfe81841a,0xc2b0153b,2 +np.float32,0xbf7d1b2f,0xbf5f9461,2 +np.float32,0x80602d36,0x80602d36,2 +np.float32,0xfe8d5046,0xc2b041dd,2 +np.float32,0xfe5037bc,0xc2afa56d,2 +np.float32,0x4bbea6,0x4bbea6,2 +np.float32,0xfea039de,0xc2b0822d,2 +np.float32,0x7ea627a4,0x42b094c7,2 +np.float32,0x3f556198,0x3f423591,2 +np.float32,0xfedbae04,0xc2b123c1,2 +np.float32,0xbe30432c,0xbe2f6744,2 +np.float32,0x80202c77,0x80202c77,2 +np.float32,0xff335cc1,0xc2b21ed5,2 +np.float32,0x3e1e1ebe,0x3e1d7f95,2 +np.float32,0x8021c9c0,0x8021c9c0,2 +np.float32,0x7dc978,0x7dc978,2 +np.float32,0xff6cfabc,0xc2b2ad75,2 +np.float32,0x7f2bd542,0x42b208e0,2 +np.float32,0x53bf33,0x53bf33,2 +np.float32,0x804e04bb,0x804e04bb,2 +np.float32,0x3f30d2f9,0x3f2521ca,2 +np.float32,0x3dfde876,0x3dfd4316,2 +np.float32,0x46f8b1,0x46f8b1,2 +np.float32,0xbd5f9e20,0xbd5f81ba,2 +np.float32,0x807d6a22,0x807d6a22,2 +np.float32,0xff3881da,0xc2b22d50,2 +np.float32,0x1b1cb5,0x1b1cb5,2 +np.float32,0x3f75f2d0,0x3f5a7435,2 +np.float32,0xfee39c1a,0xc2b135e9,2 +np.float32,0x7f79f14a,0x42b2c8b9,2 +np.float32,0x8000e2d1,0x8000e2d1,2 +np.float32,0xab779,0xab779,2 +np.float32,0xbede6690,0xbed7f102,2 +np.float32,0x76e20d,0x76e20d,2 +np.float32,0x3ed714cb,0x3ed135e9,2 +np.float32,0xbeaa6f44,0xbea76f31,2 +np.float32,0x7f7dc8b1,0x42b2d089,2 +np.float32,0x108cb2,0x108cb2,2 +np.float32,0x7d37ba82,0x42ac9f94,2 +np.float32,0x3f31d068,0x3f25f221,2 +np.float32,0x8010a331,0x8010a331,2 +np.float32,0x3f2fdc7c,0x3f2456cd,2 +np.float32,0x7f7a9a67,0x42b2ca13,2 +np.float32,0x3f2acb31,0x3f202492,2 +np.float32,0x7f54fa94,0x42b276c9,2 +np.float32,0x3ebf8a70,0x3ebb553c,2 +np.float32,0x7f75b1a7,0x42b2bff2,2 +np.float32,0x7daebe07,0x42ade8cc,2 +np.float32,0xbd3a3ef0,0xbd3a2e86,2 +np.float32,0x8078ec9e,0x8078ec9e,2 +np.float32,0x3eda206a,0x3ed403ec,2 +np.float32,0x3f7248f2,0x3f57cd77,2 +np.float32,0x805d55ba,0x805d55ba,2 +np.float32,0xff30dc3e,0xc2b217a3,2 +np.float32,0xbe12b27c,0xbe123333,2 +np.float32,0xbf6ed9cf,0xbf554cd0,2 +np.float32,0xbed9eb5c,0xbed3d31c,2 +np.float32,0xbf1c9aea,0xbf14307b,2 +np.float32,0x3f540ac4,0x3f412de2,2 +np.float32,0x800333ac,0x800333ac,2 +np.float32,0x3f74cdb4,0x3f59a09a,2 +np.float32,0xbf41dc41,0xbf32ee6f,2 +np.float32,0xff2c7804,0xc2b20ac4,2 +np.float32,0x514493,0x514493,2 +np.float32,0xbddf1220,0xbddea1cf,2 +np.float32,0xfeaf74de,0xc2b0b0ab,2 +np.float32,0xfe5dfb30,0xc2afc633,2 +np.float32,0xbf4785c4,0xbf376bdb,2 +np.float32,0x80191cd3,0x80191cd3,2 +np.float32,0xfe44f708,0xc2af88fb,2 +np.float32,0x3d4cd8a0,0x3d4cc2ca,2 +np.float32,0x7f572eff,0x42b27c0f,2 +np.float32,0x8031bacb,0x8031bacb,2 +np.float32,0x7f2ea684,0x42b21133,2 +np.float32,0xbea1976a,0xbe9f05bb,2 +np.float32,0x3d677b41,0x3d675bc1,2 +np.float32,0x3f61bf24,0x3f4b9870,2 +np.float32,0x7ef55ddf,0x42b15c5f,2 +np.float32,0x3eabcb20,0x3ea8b91c,2 +np.float32,0xff73d9ec,0xc2b2bc18,2 +np.float32,0x77b9f5,0x77b9f5,2 +np.float32,0x4c6c6c,0x4c6c6c,2 +np.float32,0x7ed09c94,0x42b10949,2 +np.float32,0xdeeec,0xdeeec,2 +np.float32,0x7eac5858,0x42b0a782,2 +np.float32,0x7e190658,0x42af07bd,2 +np.float32,0xbe3c8980,0xbe3b7ce2,2 +np.float32,0x8059e86e,0x8059e86e,2 +np.float32,0xff201836,0xc2b1e4a5,2 +np.float32,0xbeac109c,0xbea8fafb,2 +np.float32,0x7edd1e2b,0x42b12718,2 +np.float32,0x639cd8,0x639cd8,2 +np.float32,0x3f5e4cae,0x3f490059,2 +np.float32,0x3d84c185,0x3d84a9c4,2 +np.float32,0xbe8c1130,0xbe8a605b,2 +np.float32,0x80000000,0x80000000,2 +np.float32,0x3f1da5e4,0x3f151404,2 +np.float32,0x7f75a873,0x42b2bfdf,2 +np.float32,0xbd873540,0xbd871c28,2 +np.float32,0xbe8e5e10,0xbe8c9808,2 +np.float32,0x7f004bf2,0x42b17347,2 +np.float32,0x800000,0x800000,2 +np.float32,0xbf6d6b79,0xbf544095,2 +np.float32,0x7ed7b563,0x42b11a6a,2 +np.float32,0x80693745,0x80693745,2 +np.float32,0x3ee0f608,0x3eda49a8,2 +np.float32,0xfe1285a4,0xc2aef181,2 +np.float32,0x72d946,0x72d946,2 +np.float32,0x6a0dca,0x6a0dca,2 +np.float32,0x3f5c9df6,0x3f47ba99,2 +np.float32,0xff002af6,0xc2b172c4,2 +np.float32,0x3f4ac98f,0x3f39fd0a,2 +np.float32,0x8066acf7,0x8066acf7,2 +np.float32,0xbcaa4e60,0xbcaa4b3c,2 +np.float32,0x80162813,0x80162813,2 +np.float32,0xff34b318,0xc2b222a2,2 +np.float32,0x7f1ce33c,0x42b1da49,2 +np.float32,0x3f0e55ab,0x3f07ddb0,2 +np.float32,0x7c75d996,0x42aa6eec,2 +np.float32,0xbf221bc6,0xbf18dc89,2 +np.float32,0x3f5a1a4c,0x3f45d1d4,2 +np.float32,0x7f2451b8,0x42b1f1fb,2 +np.float32,0x3ec55ca0,0x3ec0c655,2 +np.float32,0x3f752dc2,0x3f59e600,2 +np.float32,0xbe33f638,0xbe330c4d,2 +np.float32,0x3e2a9148,0x3e29c9d8,2 +np.float32,0x3f3362a1,0x3f273c01,2 +np.float32,0x5f83b3,0x5f83b3,2 +np.float32,0x3e362488,0x3e353216,2 +np.float32,0x140bcf,0x140bcf,2 +np.float32,0x7e3e96df,0x42af7822,2 +np.float32,0xbebc7082,0xbeb86ce6,2 +np.float32,0xbe92a92e,0xbe90b9d2,2 +np.float32,0xff3d8afc,0xc2b23b19,2 +np.float32,0x804125e3,0x804125e3,2 +np.float32,0x3f3675d1,0x3f29bedb,2 +np.float32,0xff70bb09,0xc2b2b57f,2 +np.float32,0x3f29681c,0x3f1efcd2,2 +np.float32,0xbdc70380,0xbdc6b3a8,2 +np.float32,0x54e0dd,0x54e0dd,2 +np.float32,0x3d545de0,0x3d54458c,2 +np.float32,0x7f800000,0x7f800000,2 +np.float32,0x8014a4c2,0x8014a4c2,2 +np.float32,0xbe93f58a,0xbe91f938,2 +np.float32,0x17de33,0x17de33,2 +np.float32,0xfefb679a,0xc2b168d2,2 +np.float32,0xbf23423e,0xbf19d511,2 +np.float32,0x7e893fa1,0x42b032ec,2 +np.float32,0x3f44fe2d,0x3f356bda,2 +np.float32,0xbebb2e78,0xbeb73e8f,2 +np.float32,0x3f5632e0,0x3f42d633,2 +np.float32,0x3ddd8698,0x3ddd1896,2 +np.float32,0x80164ea7,0x80164ea7,2 +np.float32,0x80087b37,0x80087b37,2 +np.float32,0xbf06ab1e,0xbf011f95,2 +np.float32,0x3db95524,0x3db9149f,2 +np.float32,0x7aa1fbb3,0x42a570a1,2 +np.float32,0xbd84fc48,0xbd84e467,2 +np.float32,0x3d65c6f5,0x3d65a826,2 +np.float32,0xfe987800,0xc2b068c4,2 +np.float32,0x7ec59532,0x42b0ed7a,2 +np.float32,0x3ea0232c,0x3e9da29a,2 +np.float32,0x80292a08,0x80292a08,2 +np.float32,0x734cfe,0x734cfe,2 +np.float32,0x3f3b6d63,0x3f2dc596,2 +np.float32,0x3f27bcc1,0x3f1d97e6,2 +np.float32,0xfe1da554,0xc2af16f9,2 +np.float32,0x7c91f5,0x7c91f5,2 +np.float32,0xfe4e78cc,0xc2afa11e,2 +np.float32,0x7e4b4e08,0x42af9933,2 +np.float32,0xfe0949ec,0xc2aed02e,2 +np.float32,0x7e2f057f,0x42af4c81,2 +np.float32,0xbf200ae0,0xbf171ce1,2 +np.float32,0x3ebcc244,0x3eb8b99e,2 +np.float32,0xbf68f58d,0xbf50f7aa,2 +np.float32,0x4420b1,0x4420b1,2 +np.float32,0x3f5b61bf,0x3f46cac7,2 +np.float32,0x3fec78,0x3fec78,2 +np.float32,0x7f4183c8,0x42b245b7,2 +np.float32,0xbf10587c,0xbf099ee2,2 +np.float32,0x0,0x0,2 +np.float32,0x7ec84dc3,0x42b0f47a,2 +np.float32,0x3f5fbd7b,0x3f4a166d,2 +np.float32,0xbd884eb8,0xbd883502,2 +np.float32,0xfe3f10a4,0xc2af7969,2 +np.float32,0xff3f4920,0xc2b23fc9,2 +np.float32,0x8013900f,0x8013900f,2 +np.float32,0x8003529d,0x8003529d,2 +np.float32,0xbf032384,0xbefbfb3c,2 +np.float32,0xff418c7c,0xc2b245ce,2 +np.float32,0xbec0aad0,0xbebc633b,2 +np.float32,0xfdbff178,0xc2ae18de,2 +np.float32,0x68ab15,0x68ab15,2 +np.float32,0xbdfc4a88,0xbdfba848,2 +np.float32,0xbf5adec6,0xbf466747,2 +np.float32,0x807d5dcc,0x807d5dcc,2 +np.float32,0x61d144,0x61d144,2 +np.float32,0x807e3a03,0x807e3a03,2 +np.float32,0x1872f2,0x1872f2,2 +np.float32,0x7f2a272c,0x42b203d8,2 +np.float32,0xfe7f8314,0xc2b00e3a,2 +np.float32,0xbe42aeac,0xbe418737,2 +np.float32,0x8024b614,0x8024b614,2 +np.float32,0xbe41b6b8,0xbe40939a,2 +np.float32,0xa765c,0xa765c,2 +np.float32,0x7ea74f4b,0x42b09853,2 +np.float32,0x7f7ef631,0x42b2d2e7,2 +np.float32,0x7eaef5e6,0x42b0af38,2 +np.float32,0xff733d85,0xc2b2bacf,2 +np.float32,0x537ac0,0x537ac0,2 +np.float32,0xbeca4790,0xbec55b1d,2 +np.float32,0x80117314,0x80117314,2 +np.float32,0xfe958536,0xc2b05ec5,2 +np.float32,0x8066ecc2,0x8066ecc2,2 +np.float32,0xbf56baf3,0xbf433e82,2 +np.float32,0x1f7fd7,0x1f7fd7,2 +np.float32,0x3e942104,0x3e9222fc,2 +np.float32,0xfeaffe82,0xc2b0b23c,2 +np.float32,0xfe0e02b0,0xc2aee17e,2 +np.float32,0xbf800000,0xbf61a1b3,2 +np.float32,0x800b7e49,0x800b7e49,2 +np.float32,0x6c514f,0x6c514f,2 +np.float32,0xff800000,0xff800000,2 +np.float32,0x7f7d9a45,0x42b2d02b,2 +np.float32,0x800c9c69,0x800c9c69,2 +np.float32,0x274b14,0x274b14,2 +np.float32,0xbf4b22b0,0xbf3a42e2,2 +np.float32,0x63e5ae,0x63e5ae,2 +np.float32,0xbe18facc,0xbe186a90,2 +np.float32,0x7e137351,0x42aef4bd,2 +np.float32,0x80518ffd,0x80518ffd,2 +np.float32,0xbf0a8ffc,0xbf048f0d,2 +np.float32,0x841d,0x841d,2 +np.float32,0x7edfdc9e,0x42b12d69,2 +np.float32,0xfd1092b0,0xc2ac24de,2 +np.float32,0x7e2c9bdf,0x42af4566,2 +np.float32,0x7f7fffff,0x42b2d4fc,2 +np.float32,0x3f4954a6,0x3f38d853,2 +np.float32,0xbe83efd2,0xbe8284c3,2 +np.float32,0x800e8e02,0x800e8e02,2 +np.float32,0x78ad39,0x78ad39,2 +np.float32,0x7eb0f967,0x42b0b514,2 +np.float32,0xbe39aa94,0xbe38a9ee,2 +np.float32,0x80194e7b,0x80194e7b,2 +np.float32,0x3cf3a340,0x3cf39a0f,2 +np.float32,0x3ed3117a,0x3ecd8173,2 +np.float32,0x7f530b11,0x42b2721c,2 +np.float32,0xff756ba2,0xc2b2bf60,2 +np.float32,0x15ea25,0x15ea25,2 +np.float32,0x803cbb64,0x803cbb64,2 +np.float32,0x3f34722d,0x3f281a2c,2 +np.float32,0x3ddd88e0,0x3ddd1adb,2 +np.float32,0x3f54244c,0x3f41418b,2 +np.float32,0x3e0adb98,0x3e0a6f8b,2 +np.float32,0x80800000,0x80800000,2 +np.float32,0x58902b,0x58902b,2 +np.float32,0xfe3b50b8,0xc2af6f43,2 +np.float32,0xfe0846d0,0xc2aecc64,2 +np.float32,0xbe0299d0,0xbe023fd4,2 +np.float32,0x18dde6,0x18dde6,2 +np.float32,0x8039fe8b,0x8039fe8b,2 +np.float32,0x8015d179,0x8015d179,2 +np.float32,0x3f551322,0x3f41f947,2 +np.float32,0x2ab387,0x2ab387,2 +np.float32,0xbf7e311e,0xbf6059d0,2 +np.float32,0xbdba58a8,0xbdba1713,2 +np.float32,0xbf1d008a,0xbf148724,2 +np.float32,0xbf6b9c97,0xbf52ec98,2 +np.float32,0x802acf04,0x802acf04,2 +np.float32,0x1,0x1,2 +np.float32,0xbe9e16d6,0xbe9bade3,2 +np.float32,0xbf048a14,0xbefe78c7,2 +np.float32,0x7e432ad3,0x42af8449,2 +np.float32,0xbdcc7fe0,0xbdcc2944,2 +np.float32,0x6dfc27,0x6dfc27,2 +np.float32,0xfef6eed8,0xc2b15fa1,2 +np.float32,0xbeeff6e8,0xbee7f2e4,2 +np.float32,0x7e3a6ca8,0x42af6cd2,2 +np.float32,0xff2c82e8,0xc2b20ae4,2 +np.float32,0x3e9f8d74,0x3e9d13b0,2 +np.float32,0x7ea36191,0x42b08c29,2 +np.float32,0x7f734bed,0x42b2baed,2 +np.float32,0x7f2df96d,0x42b20f37,2 +np.float32,0x5036fd,0x5036fd,2 +np.float32,0x806eab38,0x806eab38,2 +np.float32,0xbe9db90e,0xbe9b5446,2 +np.float32,0xfeef6fac,0xc2b14fd9,2 +np.float32,0xc2bf7,0xc2bf7,2 +np.float32,0xff53ec3d,0xc2b2743d,2 +np.float32,0x7e837637,0x42b01cde,2 +np.float32,0xbefb5934,0xbef23662,2 +np.float32,0x3f6cec80,0x3f53e371,2 +np.float32,0x3e86e7de,0x3e85643f,2 +np.float32,0x3f09cb42,0x3f03e1ef,2 +np.float32,0xbec3d236,0xbebf5620,2 +np.float32,0xfedef246,0xc2b12b50,2 +np.float32,0xbf08d6a8,0xbf030a62,2 +np.float32,0x8036cbf9,0x8036cbf9,2 +np.float32,0x3f74d3e3,0x3f59a512,2 +np.float32,0x6a600c,0x6a600c,2 +np.float32,0xfd1295b0,0xc2ac2bf1,2 +np.float32,0xbeb61142,0xbeb26efa,2 +np.float32,0x80216556,0x80216556,2 +np.float32,0xbf1fa0f6,0xbf16c30a,2 +np.float32,0x3e0af8e1,0x3e0a8c90,2 +np.float32,0x80434709,0x80434709,2 +np.float32,0x49efd9,0x49efd9,2 +np.float32,0x7f7cce6c,0x42b2ce8f,2 +np.float32,0x6e5450,0x6e5450,2 +np.float32,0x7f0fc115,0x42b1ad86,2 +np.float32,0x632db0,0x632db0,2 +np.float32,0x3f6f4c2a,0x3f55a064,2 +np.float32,0x7ec4f273,0x42b0ebd3,2 +np.float32,0x61ae1e,0x61ae1e,2 +np.float32,0x5f47c4,0x5f47c4,2 +np.float32,0xbf3c8f62,0xbf2eaf54,2 +np.float32,0xfca38900,0xc2ab0113,2 +np.float32,0x3ec89d52,0x3ec3ce78,2 +np.float32,0xbe0e3f70,0xbe0dcb53,2 +np.float32,0x805d3156,0x805d3156,2 +np.float32,0x3eee33f8,0x3ee65a4e,2 +np.float32,0xbeda7e9a,0xbed45a90,2 +np.float32,0x7e2fac7b,0x42af4e69,2 +np.float32,0x7efd0e28,0x42b16c2c,2 +np.float32,0x3f0c7b17,0x3f063e46,2 +np.float32,0xbf395bec,0xbf2c198f,2 +np.float32,0xfdf1c3f8,0xc2ae8f05,2 +np.float32,0xbe11f4e4,0xbe117783,2 +np.float32,0x7eddc901,0x42b128a3,2 +np.float32,0x3f4bad09,0x3f3aaf33,2 +np.float32,0xfefb5d76,0xc2b168bd,2 +np.float32,0x3ed3a4cf,0x3ece09a3,2 +np.float32,0x7ec582e4,0x42b0ed4a,2 +np.float32,0x3dc2268a,0x3dc1dc64,2 +np.float32,0x3ef9b17c,0x3ef0b9c9,2 +np.float32,0x2748ac,0x2748ac,2 +np.float32,0xfed6a602,0xc2b117e4,2 +np.float32,0xbefc9c36,0xbef35832,2 +np.float32,0x7e0476,0x7e0476,2 +np.float32,0x804be1a0,0x804be1a0,2 +np.float32,0xbefbc1c2,0xbef2943a,2 +np.float32,0xbd4698f0,0xbd46850a,2 +np.float32,0x688627,0x688627,2 +np.float32,0x3f7f7685,0x3f61406f,2 +np.float32,0x827fb,0x827fb,2 +np.float32,0x3f503264,0x3f3e34fd,2 +np.float32,0x7f5458d1,0x42b27543,2 +np.float32,0x800ac01f,0x800ac01f,2 +np.float32,0x6188dd,0x6188dd,2 +np.float32,0x806ac0ba,0x806ac0ba,2 +np.float32,0xbe14493c,0xbe13c5cc,2 +np.float32,0x3f77542c,0x3f5b72ae,2 +np.float32,0xfeaacab6,0xc2b0a2df,2 +np.float32,0x7f2893d5,0x42b1ff15,2 +np.float32,0x66b528,0x66b528,2 +np.float32,0xbf653e24,0xbf4e3573,2 +np.float32,0x801a2853,0x801a2853,2 +np.float32,0x3f3d8c98,0x3f2f7b04,2 +np.float32,0xfdffbad8,0xc2aeabc5,2 +np.float32,0x3dd50f,0x3dd50f,2 +np.float32,0x3f325a4c,0x3f266353,2 +np.float32,0xfcc48ec0,0xc2ab5f3f,2 +np.float32,0x3e6f5b9a,0x3e6d3ae5,2 +np.float32,0x3dbcd62b,0x3dbc91ee,2 +np.float32,0xbf7458d9,0xbf594c1c,2 +np.float32,0xff5adb24,0xc2b284b9,2 +np.float32,0x807b246d,0x807b246d,2 +np.float32,0x3f800000,0x3f61a1b3,2 +np.float32,0x231a28,0x231a28,2 +np.float32,0xbdc66258,0xbdc61341,2 +np.float32,0x3c84b4b4,0x3c84b338,2 +np.float32,0xbf215894,0xbf183783,2 +np.float32,0xff4ee298,0xc2b267ec,2 +np.float32,0x801ef52e,0x801ef52e,2 +np.float32,0x1040b0,0x1040b0,2 +np.float32,0xff545582,0xc2b2753b,2 +np.float32,0x3f3b9dda,0x3f2decaf,2 +np.float32,0x730f99,0x730f99,2 +np.float32,0xff7fffff,0xc2b2d4fc,2 +np.float32,0xff24cc5e,0xc2b1f379,2 +np.float32,0xbe9b456a,0xbe98fc0b,2 +np.float32,0x188fb,0x188fb,2 +np.float32,0x3f5c7ce2,0x3f47a18a,2 +np.float32,0x7fc00000,0x7fc00000,2 +np.float32,0x806ea4da,0x806ea4da,2 +np.float32,0xfe810570,0xc2b01345,2 +np.float32,0x8036af89,0x8036af89,2 +np.float32,0x8043cec6,0x8043cec6,2 +np.float32,0x80342bb3,0x80342bb3,2 +np.float32,0x1a2bd4,0x1a2bd4,2 +np.float32,0x3f6248c2,0x3f4bff9a,2 +np.float32,0x8024eb35,0x8024eb35,2 +np.float32,0x7ea55872,0x42b09247,2 +np.float32,0x806d6e56,0x806d6e56,2 +np.float32,0x25c21a,0x25c21a,2 +np.float32,0x3f4e95f3,0x3f3cf483,2 +np.float32,0x15ca38,0x15ca38,2 +np.float32,0x803f01b2,0x803f01b2,2 +np.float32,0xbe731634,0xbe70dc10,2 +np.float32,0x3e80cee4,0x3e7ef933,2 +np.float32,0x3ef6dda5,0x3eee2e7b,2 +np.float32,0x3f3dfdc2,0x3f2fd5ed,2 +np.float32,0xff0492a7,0xc2b18411,2 +np.float32,0xbf1d0adf,0xbf148ff3,2 +np.float32,0xfcf75460,0xc2abd4e3,2 +np.float32,0x3f46fca6,0x3f36ffa6,2 +np.float32,0xbe63b5c0,0xbe61dfb3,2 +np.float32,0xff019bec,0xc2b1787d,2 +np.float32,0x801f14a9,0x801f14a9,2 +np.float32,0x3f176cfa,0x3f0fc051,2 +np.float32,0x3f69d976,0x3f51a015,2 +np.float32,0x3f4917cb,0x3f38a87a,2 +np.float32,0x3b2a0bea,0x3b2a0bdd,2 +np.float32,0xbf41d857,0xbf32eb50,2 +np.float32,0xbf08841a,0xbf02c18f,2 +np.float32,0x7ec86f14,0x42b0f4d0,2 +np.float32,0xbf7d15d1,0xbf5f9090,2 +np.float32,0xbd080550,0xbd07feea,2 +np.float32,0xbf6f1bef,0xbf557d26,2 +np.float32,0xfebc282c,0xc2b0d473,2 +np.float32,0x3e68d2f5,0x3e66dd03,2 +np.float32,0x3f3ed8fe,0x3f3085d5,2 +np.float32,0xff2f78ae,0xc2b2139a,2 +np.float32,0xff647a70,0xc2b29ac1,2 +np.float32,0xfd0859a0,0xc2ac06e2,2 +np.float32,0x3ea578a8,0x3ea2b7e1,2 +np.float32,0x6c58c6,0x6c58c6,2 +np.float32,0xff23f26a,0xc2b1f0d2,2 +np.float32,0x800902a4,0x800902a4,2 +np.float32,0xfe8ba64e,0xc2b03bcd,2 +np.float32,0x3f091143,0x3f033e0f,2 +np.float32,0x8017c4bd,0x8017c4bd,2 +np.float32,0xbf708fd4,0xbf568c8c,2 +np.float32,0x3be1d8,0x3be1d8,2 +np.float32,0x80091f07,0x80091f07,2 +np.float32,0x68eabe,0x68eabe,2 +np.float32,0xfe9ab2c8,0xc2b07033,2 +np.float32,0x3eabe752,0x3ea8d3d7,2 +np.float32,0xbf7adcb2,0xbf5dfaf5,2 +np.float32,0x801ecc01,0x801ecc01,2 +np.float32,0xbf5570a9,0xbf424123,2 +np.float32,0x3e89eecd,0x3e88510e,2 +np.float32,0xfeb2feee,0xc2b0bae4,2 +np.float32,0xbeb25ec2,0xbeaef22b,2 +np.float32,0x201e49,0x201e49,2 +np.float32,0x800a35f6,0x800a35f6,2 +np.float32,0xbf02d449,0xbefb6e2a,2 +np.float32,0x3f062bea,0x3f00aef6,2 +np.float32,0x7f5219ff,0x42b26fd2,2 +np.float32,0xbd4561d0,0xbd454e47,2 +np.float32,0x3f6c4789,0x3f536a4b,2 +np.float32,0x7f58b06d,0x42b27fa1,2 +np.float32,0x7f132f39,0x42b1b999,2 +np.float32,0x3e05dcb4,0x3e057bd8,2 +np.float32,0x7f526045,0x42b2707d,2 +np.float32,0x3f6117d0,0x3f4b1adb,2 +np.float32,0xbf21f47d,0xbf18bb57,2 +np.float32,0x1a26d6,0x1a26d6,2 +np.float32,0x46b114,0x46b114,2 +np.float32,0x3eb24518,0x3eaed9ef,2 +np.float32,0xfe2139c8,0xc2af2278,2 +np.float32,0xbf7c36fb,0xbf5ef1f6,2 +np.float32,0x3f193834,0x3f114af7,2 +np.float32,0xff3ea650,0xc2b23e14,2 +np.float32,0xfeeb3bca,0xc2b146c7,2 +np.float32,0x7e8b8ca0,0x42b03b6f,2 +np.float32,0x3eed903d,0x3ee5c5d2,2 +np.float32,0xbdc73740,0xbdc6e72a,2 +np.float32,0x7e500307,0x42afa4ec,2 +np.float32,0xe003c,0xe003c,2 +np.float32,0x3e612bb4,0x3e5f64fd,2 +np.float32,0xfd81e248,0xc2ad50e6,2 +np.float32,0x766a4f,0x766a4f,2 +np.float32,0x3e8708c9,0x3e858414,2 +np.float32,0xbf206c58,0xbf176f7f,2 +np.float32,0x7e93aeb0,0x42b0586f,2 +np.float32,0xfd9d36b8,0xc2adb2ad,2 +np.float32,0xff1f4e0e,0xc2b1e21d,2 +np.float32,0x3f22bd5a,0x3f1964f8,2 +np.float32,0x7f6a517a,0x42b2a7ad,2 +np.float32,0xff6ca773,0xc2b2acc1,2 +np.float32,0x7f6bf453,0x42b2ab3d,2 +np.float32,0x3edfdd64,0x3ed9489f,2 +np.float32,0xbeafc5ba,0xbeac7daa,2 +np.float32,0x7d862039,0x42ad615b,2 +np.float32,0xbe9d2002,0xbe9ac1fc,2 +np.float32,0xbdcc54c0,0xbdcbfe5b,2 +np.float32,0xbf1bc0aa,0xbf13762a,2 +np.float32,0xbf4679ce,0xbf36984b,2 +np.float32,0x3ef45696,0x3eebe713,2 +np.float32,0xff6eb999,0xc2b2b137,2 +np.float32,0xbe4b2e4c,0xbe49dee8,2 +np.float32,0x3f498951,0x3f3901b7,2 +np.float32,0xbe9692f4,0xbe947be1,2 +np.float32,0xbf44ce26,0xbf3545c8,2 +np.float32,0x805787a8,0x805787a8,2 +np.float32,0xbf342650,0xbf27dc26,2 +np.float32,0x3edafbf0,0x3ed4cdd2,2 +np.float32,0x3f6fb858,0x3f55ef63,2 +np.float32,0xff227d0a,0xc2b1ec3f,2 +np.float32,0xfeb9a202,0xc2b0cd89,2 +np.float32,0x7f5b12c1,0x42b2853b,2 +np.float32,0x584578,0x584578,2 +np.float32,0x7ec0b76f,0x42b0e0b5,2 +np.float32,0x3f57f54b,0x3f442f10,2 +np.float32,0x7eef3620,0x42b14f5d,2 +np.float32,0x4525b5,0x4525b5,2 +np.float32,0x801bd407,0x801bd407,2 +np.float32,0xbed1f166,0xbecc7703,2 +np.float32,0x3f57e732,0x3f442449,2 +np.float32,0x80767cd5,0x80767cd5,2 +np.float32,0xbef1a7d2,0xbee97aa3,2 +np.float32,0x3dd5b1af,0x3dd54ee6,2 +np.float32,0x960c,0x960c,2 +np.float32,0x7c392d41,0x42a9ddd1,2 +np.float32,0x3f5c9a34,0x3f47b7c1,2 +np.float32,0x3f5cecee,0x3f47f667,2 +np.float32,0xbee482ce,0xbedd8899,2 +np.float32,0x8066ba7e,0x8066ba7e,2 +np.float32,0x7ed76127,0x42b119a2,2 +np.float32,0x805ca40b,0x805ca40b,2 +np.float32,0x7f5ed5d1,0x42b28df3,2 +np.float32,0xfe9e1b1e,0xc2b07b5b,2 +np.float32,0x3f0201a2,0x3ef9f6c4,2 +np.float32,0xbf2e6430,0xbf232039,2 +np.float32,0x80326b4d,0x80326b4d,2 +np.float32,0x3f11dc7c,0x3f0af06e,2 +np.float32,0xbe89c42e,0xbe8827e6,2 +np.float32,0x3f3c69f8,0x3f2e9133,2 +np.float32,0x806326a9,0x806326a9,2 +np.float32,0x3f1c5286,0x3f13f2b6,2 +np.float32,0xff5c0ead,0xc2b28786,2 +np.float32,0xff32b952,0xc2b21d01,2 +np.float32,0x7dd27c4e,0x42ae4815,2 +np.float32,0xbf7a6816,0xbf5da7a2,2 +np.float32,0xfeac72f8,0xc2b0a7d1,2 +np.float32,0x335ad7,0x335ad7,2 +np.float32,0xbe682da4,0xbe663bcc,2 +np.float32,0x3f2df244,0x3f22c208,2 +np.float32,0x80686e8e,0x80686e8e,2 +np.float32,0x7f50120f,0x42b26ad9,2 +np.float32,0x3dbc596a,0x3dbc15b3,2 +np.float32,0xbf4f2868,0xbf3d666d,2 +np.float32,0x80000001,0x80000001,2 +np.float32,0xff66c059,0xc2b29fd2,2 +np.float32,0xfe8bbcaa,0xc2b03c1f,2 +np.float32,0x3ece6a51,0x3ec93271,2 +np.float32,0x7f06cd26,0x42b18c9a,2 +np.float32,0x7e41e6dc,0x42af80f5,2 +np.float32,0x7d878334,0x42ad669f,2 +np.float32,0xfe8c5c4c,0xc2b03e67,2 +np.float32,0x337a05,0x337a05,2 +np.float32,0x3e63801d,0x3e61ab58,2 +np.float32,0x62c315,0x62c315,2 +np.float32,0x802aa888,0x802aa888,2 +np.float32,0x80038b43,0x80038b43,2 +np.float32,0xff5c1271,0xc2b2878f,2 +np.float32,0xff4184a5,0xc2b245b9,2 +np.float32,0x7ef58f4b,0x42b15cc6,2 +np.float32,0x7f42d8ac,0x42b2493a,2 +np.float32,0x806609f2,0x806609f2,2 +np.float32,0x801e763b,0x801e763b,2 +np.float32,0x7f2bc073,0x42b208a2,2 +np.float32,0x801d7d7f,0x801d7d7f,2 +np.float32,0x7d415dc1,0x42acb9c2,2 +np.float32,0xbf624ff9,0xbf4c0502,2 +np.float32,0xbf603afd,0xbf4a74e2,2 +np.float32,0x8007fe42,0x8007fe42,2 +np.float32,0x800456db,0x800456db,2 +np.float32,0x620871,0x620871,2 +np.float32,0x3e9c6c1e,0x3e9a15fa,2 +np.float32,0x4245d,0x4245d,2 +np.float32,0x8035bde9,0x8035bde9,2 +np.float32,0xbf597418,0xbf45533c,2 +np.float32,0x3c730f80,0x3c730d38,2 +np.float32,0x3f7cd8ed,0x3f5f6540,2 +np.float32,0x807e49c3,0x807e49c3,2 +np.float32,0x3d6584c0,0x3d65660c,2 +np.float32,0xff42a744,0xc2b248b8,2 +np.float32,0xfedc6f56,0xc2b12583,2 +np.float32,0x806263a4,0x806263a4,2 +np.float32,0x175a17,0x175a17,2 +np.float32,0x3f1e8537,0x3f15d208,2 +np.float32,0x4055b5,0x4055b5,2 +np.float32,0x438aa6,0x438aa6,2 +np.float32,0x8038507f,0x8038507f,2 +np.float32,0xbed75348,0xbed16f85,2 +np.float32,0x7f07b7d6,0x42b19012,2 +np.float32,0xfe8b9d30,0xc2b03bac,2 +np.float32,0x805c501c,0x805c501c,2 +np.float32,0x3ef22b1d,0x3ee9f159,2 +np.float32,0x802b6759,0x802b6759,2 +np.float32,0x45281a,0x45281a,2 +np.float32,0xbf7e9970,0xbf60a3cf,2 +np.float32,0xbf14d152,0xbf0d8062,2 +np.float32,0x3d9ff950,0x3d9fcfc8,2 +np.float32,0x7865d9,0x7865d9,2 +np.float32,0xbee67fa4,0xbedf58eb,2 +np.float32,0x7dc822d1,0x42ae2e44,2 +np.float32,0x3f3af0fe,0x3f2d612c,2 +np.float32,0xbefea106,0xbef5274e,2 +np.float32,0xbf758a3f,0xbf5a28c5,2 +np.float32,0xbf331bdd,0xbf270209,2 +np.float32,0x7f51c901,0x42b26f0d,2 +np.float32,0x3f67c33b,0x3f5014d8,2 +np.float32,0xbbc9d980,0xbbc9d92c,2 +np.float32,0xbc407540,0xbc40741e,2 +np.float32,0x7eed9a3c,0x42b14be9,2 +np.float32,0x1be0fe,0x1be0fe,2 +np.float32,0xbf6b4913,0xbf52af1f,2 +np.float32,0xbda8eba8,0xbda8bac6,2 +np.float32,0x8004bcea,0x8004bcea,2 +np.float32,0xff6f6afe,0xc2b2b2b3,2 +np.float32,0xbf205810,0xbf175e50,2 +np.float32,0x80651944,0x80651944,2 +np.float32,0xbec73016,0xbec27a3f,2 +np.float32,0x5701b9,0x5701b9,2 +np.float32,0xbf1062ce,0xbf09a7df,2 +np.float32,0x3e0306ae,0x3e02abd1,2 +np.float32,0x7bfc62,0x7bfc62,2 +np.float32,0xbf48dd3c,0xbf387a6b,2 +np.float32,0x8009573e,0x8009573e,2 +np.float32,0x660a2c,0x660a2c,2 +np.float32,0xff2280da,0xc2b1ec4b,2 +np.float32,0xbf7034fe,0xbf564a54,2 +np.float32,0xbeeb448e,0xbee3b045,2 +np.float32,0xff4e949c,0xc2b2672b,2 +np.float32,0xbf3c4486,0xbf2e7309,2 +np.float32,0x7eb086d8,0x42b0b3c8,2 +np.float32,0x7eac8aca,0x42b0a817,2 +np.float32,0xfd3d2d60,0xc2acae8b,2 +np.float32,0xbf363226,0xbf2987bd,2 +np.float32,0x7f02e524,0x42b17d8c,2 +np.float32,0x8049a148,0x8049a148,2 +np.float32,0x147202,0x147202,2 +np.float32,0x8031d3f6,0x8031d3f6,2 +np.float32,0xfe78bf68,0xc2b0007d,2 +np.float32,0x7ebd16d0,0x42b0d6fb,2 +np.float32,0xbdaed2e8,0xbdae9cbb,2 +np.float32,0x802833ae,0x802833ae,2 +np.float32,0x7f62adf6,0x42b296b5,2 +np.float32,0xff2841c0,0xc2b1fe1b,2 +np.float32,0xbeb2c47e,0xbeaf523b,2 +np.float32,0x7e42a36e,0x42af82e6,2 +np.float32,0x41ea29,0x41ea29,2 +np.float32,0xbcaaa800,0xbcaaa4d7,2 +np.float64,0x3fed71f27ebae3e5,0x3fea5c6095012ca6,1 +np.float64,0x224dc392449b9,0x224dc392449b9,1 +np.float64,0x3fdf897a7d3f12f5,0x3fde620339360992,1 +np.float64,0xbfe1f99a5123f334,0xbfe124a57cfaf556,1 +np.float64,0xbfd9725c3bb2e4b8,0xbfd8d1e3f75110c7,1 +np.float64,0x3fe38977546712ee,0x3fe27d9d37f4b91f,1 +np.float64,0xbfc36c29e526d854,0xbfc3594743ee45c4,1 +np.float64,0xbfe5cbec332b97d8,0xbfe4638802316849,1 +np.float64,0x2ff35efe5fe6d,0x2ff35efe5fe6d,1 +np.float64,0x7fd3f828e227f051,0x40862a7d4a40b1e0,1 +np.float64,0xffd06fc11620df82,0xc08628ee8f1bf6c8,1 +np.float64,0x3fe5321bf4aa6438,0x3fe3e3d9fa453199,1 +np.float64,0xffd07a323ca0f464,0xc08628f3a2930f8c,1 +np.float64,0x3fdf7abe7abef57c,0x3fde54cb193d49cb,1 +np.float64,0x40941f1881285,0x40941f1881285,1 +np.float64,0xffef18defc7e31bd,0xc0863393f2c9f061,1 +np.float64,0xbfe379f871e6f3f1,0xbfe270620cb68347,1 +np.float64,0xffec829848f90530,0xc08632e210edaa2b,1 +np.float64,0x80070c00574e1801,0x80070c00574e1801,1 +np.float64,0xffce7654b23ceca8,0xc086285291e89975,1 +np.float64,0x7fc9932daa33265a,0x408626ec6cc2b807,1 +np.float64,0x355ee98c6abde,0x355ee98c6abde,1 +np.float64,0x3fac54962c38a920,0x3fac50e40b6c19f2,1 +np.float64,0x800857984af0af31,0x800857984af0af31,1 +np.float64,0x7fea6a3d55f4d47a,0x40863245bf39f179,1 +np.float64,0x3fdb8fab33371f56,0x3fdac5ffc9e1c347,1 +np.float64,0x800a887a7bf510f5,0x800a887a7bf510f5,1 +np.float64,0xbfbdbda3c63b7b48,0xbfbdac9dd5a2d3e8,1 +np.float64,0xbfd4a2457b29448a,0xbfd44acb3b316d6d,1 +np.float64,0x7fd5329a502a6534,0x40862af789b528b5,1 +np.float64,0x3fd96a7bceb2d4f8,0x3fd8ca92104d6cd6,1 +np.float64,0x3fde6a0cd6bcd41a,0x3fdd5f4b85abf749,1 +np.float64,0xbfc7faaff32ff560,0xbfc7d7560b8c4a52,1 +np.float64,0x7fec381b2f787035,0x408632cd0e9c095c,1 +np.float64,0x1fc2eb543f85e,0x1fc2eb543f85e,1 +np.float64,0x7ac6000af58c1,0x7ac6000af58c1,1 +np.float64,0xffe060a87920c150,0xc0862e72c37d5a4e,1 +np.float64,0xbfb7d8c89e2fb190,0xbfb7cffd3c3f8e3a,1 +np.float64,0x3fd91033deb22068,0x3fd87695b067aa1e,1 +np.float64,0x3fec1aff01b835fe,0x3fe95d5cbd729af7,1 +np.float64,0x7fb97f69ec32fed3,0x4086215aaae5c697,1 +np.float64,0x7feaf1e4e5f5e3c9,0x4086326e6ca6a2bb,1 +np.float64,0x800537e44d0a6fc9,0x800537e44d0a6fc9,1 +np.float64,0x800b2a0d0d36541a,0x800b2a0d0d36541a,1 +np.float64,0x3fe2193846e43270,0x3fe140308550138e,1 +np.float64,0x5e2a0a32bc542,0x5e2a0a32bc542,1 +np.float64,0xffe5888b09eb1116,0xc08630a348783aa3,1 +np.float64,0xbfceb9b5033d736c,0xbfce701049c10435,1 +np.float64,0x7fe5d68589abad0a,0x408630c00ce63f23,1 +np.float64,0x8009b5457ff36a8b,0x8009b5457ff36a8b,1 +np.float64,0xbfb5518c2e2aa318,0xbfb54b42638ca718,1 +np.float64,0x3f9c58469838b080,0x3f9c575974fbcd7b,1 +np.float64,0x3fe8db4b4731b697,0x3fe6dc9231587966,1 +np.float64,0x8007d0f77f4fa1f0,0x8007d0f77f4fa1f0,1 +np.float64,0x7fe79eef542f3dde,0x40863160c673c67f,1 +np.float64,0xffbdc0b6163b8170,0xc0862296be4bf032,1 +np.float64,0x3fbb8d3312371a66,0x3fbb7fa76fb4cf8d,1 +np.float64,0xffd8a0eedbb141de,0xc0862c2ac6e512f0,1 +np.float64,0x7fee99d8d87d33b1,0x4086337301c4c8df,1 +np.float64,0xffe7479b552e8f36,0xc0863142fba0f0ec,1 +np.float64,0xffedf8ef4abbf1de,0xc08633488068fe69,1 +np.float64,0x895c4d9f12b8a,0x895c4d9f12b8a,1 +np.float64,0x29b4caf05369a,0x29b4caf05369a,1 +np.float64,0xbfefb90d657f721b,0xbfec01efa2425b35,1 +np.float64,0xde07c3bdbc0f9,0xde07c3bdbc0f9,1 +np.float64,0x7feae9fd02f5d3f9,0x4086326c1368ed5a,1 +np.float64,0x3feab792da756f26,0x3fe84f6e15338ed7,1 +np.float64,0xbfeff8ed72fff1db,0xbfec2f35da06daaf,1 +np.float64,0x8004b2c132896583,0x8004b2c132896583,1 +np.float64,0xbf9fcb00103f9600,0xbf9fc9b1751c569e,1 +np.float64,0x4182b72e83058,0x4182b72e83058,1 +np.float64,0x90820d812105,0x90820d812105,1 +np.float64,0xbfdec9a0ba3d9342,0xbfddb585df607ce1,1 +np.float64,0x7fdc0a69a03814d2,0x40862d347f201b63,1 +np.float64,0xbfef0708937e0e11,0xbfeb82d27f8ea97f,1 +np.float64,0xffda57e4ddb4afca,0xc0862cb49e2e0c4c,1 +np.float64,0xbfa30b9af4261730,0xbfa30a7b4a633060,1 +np.float64,0x7feb57fcc4b6aff9,0x4086328c83957a0b,1 +np.float64,0x7fe6759153eceb22,0x408630f980433963,1 +np.float64,0x7fdd3278c8ba64f1,0x40862d87445243e9,1 +np.float64,0xd3b8e6b9a771d,0xd3b8e6b9a771d,1 +np.float64,0x6267dc88c4cfc,0x6267dc88c4cfc,1 +np.float64,0x7fedd3cf00bba79d,0x4086333e91712ff5,1 +np.float64,0xffbe512ce03ca258,0xc08622bd39314cea,1 +np.float64,0xbfe71742ca6e2e86,0xbfe572ccbf2d010d,1 +np.float64,0x8002fb048c65f60a,0x8002fb048c65f60a,1 +np.float64,0x800d9d9ddf7b3b3c,0x800d9d9ddf7b3b3c,1 +np.float64,0xbfeaf6230df5ec46,0xbfe87f5d751ec3d5,1 +np.float64,0xbfe69973a42d32e8,0xbfe50c680f7002fe,1 +np.float64,0x3fe309cf87e613a0,0x3fe21048714ce1ac,1 +np.float64,0x800435d17a286ba4,0x800435d17a286ba4,1 +np.float64,0x7fefffffffffffff,0x408633ce8fb9f87e,1 +np.float64,0x3fe36ade1766d5bc,0x3fe26379fb285dde,1 +np.float64,0x3f98d8d94831b1c0,0x3f98d839885dc527,1 +np.float64,0xbfd08f7ae5211ef6,0xbfd0618ab5293e1e,1 +np.float64,0xbfcf630bd53ec618,0xbfcf14a0cd20704d,1 +np.float64,0xbfe58f0ca6eb1e1a,0xbfe4312225df8e28,1 +np.float64,0xffef4f6406be9ec7,0xc08633a1ed1d27e5,1 +np.float64,0x7fe10120b3e20240,0x40862ebfaf94e6e8,1 +np.float64,0xffe96c52fbb2d8a5,0xc08631f75d9a59a0,1 +np.float64,0xbfe448a333e89146,0xbfe31fee44c3ec43,1 +np.float64,0x80045ff4e788bfeb,0x80045ff4e788bfeb,1 +np.float64,0x7fefaa2f823f545e,0x408633b8fea29524,1 +np.float64,0xffea6b8bf234d717,0xc0863246248e5960,1 +np.float64,0xbfdb085d80b610bc,0xbfda498b15b43eec,1 +np.float64,0xbfd5e12da3abc25c,0xbfd57970e2b8aecc,1 +np.float64,0x3fcc84928a390925,0x3fcc497c417a89f3,1 +np.float64,0xbfdcb713bf396e28,0xbfdbd46c5e731fd9,1 +np.float64,0xffdf50c0453ea180,0xc0862e16b5562f25,1 +np.float64,0x800342c2f7268587,0x800342c2f7268587,1 +np.float64,0x7feb8b6d743716da,0x4086329b8248de2c,1 +np.float64,0x800a9b18b4953632,0x800a9b18b4953632,1 +np.float64,0xffedaf0d12fb5e19,0xc0863334af82de1a,1 +np.float64,0x800aebda4ab5d7b5,0x800aebda4ab5d7b5,1 +np.float64,0xbfa9f5848433eb10,0xbfa9f2ac7ac065d4,1 +np.float64,0x3fea375928f46eb2,0x3fe7ec9f10eeac7d,1 +np.float64,0x3fd6c213fead8428,0x3fd64dcc1eff5f1b,1 +np.float64,0xbfa0476f44208ee0,0xbfa046bb986007ac,1 +np.float64,0x6c8e18aed91c4,0x6c8e18aed91c4,1 +np.float64,0x8000000000000001,0x8000000000000001,1 +np.float64,0x7fea86b5ba350d6a,0x4086324e59f13027,1 +np.float64,0x2316c3b0462d9,0x2316c3b0462d9,1 +np.float64,0x3fec4e3281389c65,0x3fe983c5c9d65940,1 +np.float64,0x3fbb87c47f772,0x3fbb87c47f772,1 +np.float64,0x8004af00fdc95e03,0x8004af00fdc95e03,1 +np.float64,0xbfd316db9ba62db8,0xbfd2d12765b9d155,1 +np.float64,0x3fec1a7a99f834f6,0x3fe95cf941889b3d,1 +np.float64,0x3feff7e1477fefc3,0x3fec2e782392d4b9,1 +np.float64,0xbfc683ea042d07d4,0xbfc66698cfa5026e,1 +np.float64,0x3fdbc8aaa9b79154,0x3fdafa50e6fc3fff,1 +np.float64,0xfb3b630ff676d,0xfb3b630ff676d,1 +np.float64,0x7fe715ef8eae2bde,0x40863131d794b41f,1 +np.float64,0x7fefa06c11bf40d7,0x408633b686c7996a,1 +np.float64,0x80002a40f5205483,0x80002a40f5205483,1 +np.float64,0x7fe95f3c74b2be78,0x408631f33e37bf76,1 +np.float64,0x3fb2977b32252ef0,0x3fb2934eaf5a4be8,1 +np.float64,0x3fc0f3dbc821e7b8,0x3fc0e745288c84c3,1 +np.float64,0x3fda98da56b531b5,0x3fd9e2b19447dacc,1 +np.float64,0x3f95b9d5202b73aa,0x3f95b96a53282949,1 +np.float64,0x3fdc1ace7738359d,0x3fdb4597d31df7ff,1 +np.float64,0xffeac5bb2e358b76,0xc0863261452ab66c,1 +np.float64,0xbfefb1b78f7f636f,0xbfebfcb9be100ced,1 +np.float64,0xf5c9e191eb93c,0xf5c9e191eb93c,1 +np.float64,0x3fe83a977630752f,0x3fe65d0df90ff6ef,1 +np.float64,0x3fc317515d262ea0,0x3fc3056072b719f0,1 +np.float64,0x7fe2dcfab225b9f4,0x40862f94257c28a2,1 +np.float64,0xca2b115794562,0xca2b115794562,1 +np.float64,0x3fd495301aa92a60,0x3fd43e57108761d5,1 +np.float64,0x800ccc4293199885,0x800ccc4293199885,1 +np.float64,0xc8d3173d91a63,0xc8d3173d91a63,1 +np.float64,0xbf2541bb7e4a8,0xbf2541bb7e4a8,1 +np.float64,0xbfe9a330df334662,0xbfe779816573f5be,1 +np.float64,0xffd5e4c8252bc990,0xc0862b39b3ca5d72,1 +np.float64,0x3fe90f3a53721e75,0x3fe70585ae09531d,1 +np.float64,0xbfe2b5ddc7a56bbc,0xbfe1c7fa91a675ed,1 +np.float64,0xbf981a0360303400,0xbf9819719345073a,1 +np.float64,0x19174b0e322ea,0x19174b0e322ea,1 +np.float64,0xbfd2f71a1725ee34,0xbfd2b2b6f7cd10b1,1 +np.float64,0x80056e83236add07,0x80056e83236add07,1 +np.float64,0x7fe4bc41d9697883,0x40863055f20ce0cb,1 +np.float64,0xffe76e06c46edc0d,0xc086315024b25559,1 +np.float64,0x3fe3c4f0f96789e2,0x3fe2b04b584609bf,1 +np.float64,0x3fe6cfc533ed9f8a,0x3fe538b4d784d5ee,1 +np.float64,0x7fd234a640a4694c,0x408629bfead4f0b2,1 +np.float64,0x3fdbc49c9ab78939,0x3fdaf698a83d08e2,1 +np.float64,0x3fe4c5336ee98a66,0x3fe388c6ddb60e0a,1 +np.float64,0xf4b9497be9729,0xf4b9497be9729,1 +np.float64,0x3fb312be12262580,0x3fb30e3c847c1d16,1 +np.float64,0x3fe9554218f2aa84,0x3fe73c8b311c7a98,1 +np.float64,0xff899816a0333040,0xc08610bfb2cd8559,1 +np.float64,0x8006008ad52c0116,0x8006008ad52c0116,1 +np.float64,0x3fd7d47be4afa8f8,0x3fd74fa71ec17fd0,1 +np.float64,0x8010000000000000,0x8010000000000000,1 +np.float64,0xdf2a9943be553,0xdf2a9943be553,1 +np.float64,0xbfeb86bf1eb70d7e,0xbfe8ed797580ba5c,1 +np.float64,0x800e2c0c28bc5818,0x800e2c0c28bc5818,1 +np.float64,0xbfe2be65d4657ccc,0xbfe1cf578dec2323,1 +np.float64,0xbfedea3a5afbd475,0xbfeab490bf05e585,1 +np.float64,0xbfe04b1583a0962b,0xbfdf523dfd7be25c,1 +np.float64,0x75929bb4eb254,0x75929bb4eb254,1 +np.float64,0x3fd7b4968caf692d,0x3fd731c0938ff97c,1 +np.float64,0x60bd8fd2c17b3,0x60bd8fd2c17b3,1 +np.float64,0xbfdaf15e70b5e2bc,0xbfda345a95ce18fe,1 +np.float64,0x7fdd7c35c2baf86b,0x40862d9b5f40c6b2,1 +np.float64,0x7feeb4d2ab7d69a4,0x4086337a0c0dffaf,1 +np.float64,0xffe65b5a1decb6b4,0xc08630f024420efb,1 +np.float64,0x7feb272b30764e55,0x4086327e2e553aa2,1 +np.float64,0x3fd27513e8a4ea28,0x3fd235ea49670f6a,1 +np.float64,0x3fe6541a6aeca834,0x3fe4d3a5b69fd1b6,1 +np.float64,0xbfe0c6ca0f618d94,0xbfe017058259efdb,1 +np.float64,0x7fc1bf07b7237e0e,0x4086240000fa5a52,1 +np.float64,0x7fe96af9c0f2d5f3,0x408631f6f0f4faa2,1 +np.float64,0x3fe0728be7a0e518,0x3fdf9881a5869de9,1 +np.float64,0xffe8ea4441b1d488,0xc08631ce0685ae7e,1 +np.float64,0xffd0b973f02172e8,0xc08629121e7fdf85,1 +np.float64,0xffe37b907a26f720,0xc0862fd6529401a0,1 +np.float64,0x3fe0ee826461dd05,0x3fe03a2a424a1b40,1 +np.float64,0xbfe8073c92300e79,0xbfe6340cbd179ac1,1 +np.float64,0x800768383f8ed071,0x800768383f8ed071,1 +np.float64,0x8002e467c7c5c8d0,0x8002e467c7c5c8d0,1 +np.float64,0xbfd8d53ea5b1aa7e,0xbfd83fa7243289d7,1 +np.float64,0xffebefce2bb7df9c,0xc08632b874f4f8dc,1 +np.float64,0xffe3be9eb9277d3d,0xc0862ff1ac70ad0b,1 +np.float64,0xffe2f8a82e65f150,0xc0862f9fd9e77d86,1 +np.float64,0xbfa01d151c203a30,0xbfa01c66dc13a70a,1 +np.float64,0x800877062d30ee0d,0x800877062d30ee0d,1 +np.float64,0xaade16a755bc3,0xaade16a755bc3,1 +np.float64,0xbfeb1abc70363579,0xbfe89b52c3b003aa,1 +np.float64,0x80097d0b2ad2fa17,0x80097d0b2ad2fa17,1 +np.float64,0x8001499907429333,0x8001499907429333,1 +np.float64,0x3fe8db2aaf71b656,0x3fe6dc7873f1b235,1 +np.float64,0x5cfeadc4b9fd6,0x5cfeadc4b9fd6,1 +np.float64,0xff3f77d1fe7ef,0xff3f77d1fe7ef,1 +np.float64,0xffeecd56f9bd9aad,0xc08633806cb1163d,1 +np.float64,0xbf96f3ca582de7a0,0xbf96f34c6b8e1c85,1 +np.float64,0x7ed6b44afdad7,0x7ed6b44afdad7,1 +np.float64,0x80071808da4e3012,0x80071808da4e3012,1 +np.float64,0x3feb8aee2bf715dc,0x3fe8f0a55516615c,1 +np.float64,0x800038f62e2071ed,0x800038f62e2071ed,1 +np.float64,0x3fb13f9af2227f30,0x3fb13c456ced8e08,1 +np.float64,0xffd584d1812b09a4,0xc0862b165558ec0c,1 +np.float64,0x800b20c30fb64186,0x800b20c30fb64186,1 +np.float64,0x80024f9646e49f2d,0x80024f9646e49f2d,1 +np.float64,0xffefffffffffffff,0xc08633ce8fb9f87e,1 +np.float64,0x3fdddbcb5bbbb797,0x3fdcde981111f650,1 +np.float64,0xffed14077f3a280e,0xc086330a795ad634,1 +np.float64,0x800fec2da7ffd85b,0x800fec2da7ffd85b,1 +np.float64,0x3fe8205ffc7040c0,0x3fe6482318d217f9,1 +np.float64,0x3013e5226027d,0x3013e5226027d,1 +np.float64,0xffe4e5aad469cb55,0xc0863065dc2fb4e3,1 +np.float64,0x5cb0f7b2b9620,0x5cb0f7b2b9620,1 +np.float64,0xbfeb4537d2768a70,0xbfe8bbb2c1d3bff9,1 +np.float64,0xbfd859e297b0b3c6,0xbfd7cc807948bf9d,1 +np.float64,0x71f00b8ce3e02,0x71f00b8ce3e02,1 +np.float64,0xf5c1b875eb837,0xf5c1b875eb837,1 +np.float64,0xa0f35c8141e8,0xa0f35c8141e8,1 +np.float64,0xffe24860b42490c1,0xc0862f54222f616e,1 +np.float64,0xffcd9ae8583b35d0,0xc08628181e643a42,1 +np.float64,0x7fe9b710c7736e21,0x4086320ec033490f,1 +np.float64,0x3fd2b9ca1d257394,0x3fd277e631f0c0b3,1 +np.float64,0x23559bfc46ab4,0x23559bfc46ab4,1 +np.float64,0x8002adf75e455bef,0x8002adf75e455bef,1 +np.float64,0xbfefa4d75cbf49af,0xbfebf392e51d6a1a,1 +np.float64,0xffcfef263e3fde4c,0xc08628b336adb611,1 +np.float64,0x80061acaa8ec3596,0x80061acaa8ec3596,1 +np.float64,0x7fc1b33be0236677,0x408623faaddcc17e,1 +np.float64,0x7fe3a84083675080,0x40862fe8972e41e1,1 +np.float64,0xbfe756c1276ead82,0xbfe5a6318b061e1b,1 +np.float64,0xbfae4b71b43c96e0,0xbfae46ed0b6203a4,1 +np.float64,0x800421c6d0a8438e,0x800421c6d0a8438e,1 +np.float64,0x8009ad56fe335aae,0x8009ad56fe335aae,1 +np.float64,0xbfe71afc976e35f9,0xbfe575d21f3d7193,1 +np.float64,0x7fec0bbe4c38177c,0x408632c0710f1d8a,1 +np.float64,0x750e1daeea1c4,0x750e1daeea1c4,1 +np.float64,0x800501d4240a03a9,0x800501d4240a03a9,1 +np.float64,0x800794955cef292b,0x800794955cef292b,1 +np.float64,0x3fdf8a87f5bf1510,0x3fde62f4f00cfa19,1 +np.float64,0xbfebebdbc7f7d7b8,0xbfe939e51ba1340c,1 +np.float64,0xbfe3a16217a742c4,0xbfe292039dd08a71,1 +np.float64,0x3fed6cd04c3ad9a1,0x3fea58995973f74b,1 +np.float64,0xffcad8787335b0f0,0xc086274fbb35dd37,1 +np.float64,0x3fcb178e3d362f1c,0x3fcae4c9f3e6dddc,1 +np.float64,0xbfcadc669435b8cc,0xbfcaaae7cf075420,1 +np.float64,0x7fe0e3906321c720,0x40862eb1bacc5c43,1 +np.float64,0xff8ad5edb035abc0,0xc0861120b6404d0b,1 +np.float64,0x3fe175a21562eb44,0x3fe0b13120a46549,1 +np.float64,0xbfeb4c4a5f769895,0xbfe8c1147f1c9d8f,1 +np.float64,0x7fca22f4e63445e9,0x40862718e9b4094e,1 +np.float64,0x3fe4269d0c684d3a,0x3fe3032aa2015c53,1 +np.float64,0x3fef551c09beaa38,0x3febbabe03f49c83,1 +np.float64,0xffd843df9fb087c0,0xc0862c0c52d5e5d9,1 +np.float64,0x7fc497e2ca292fc5,0x40862530bbd9fcc7,1 +np.float64,0x3fee02919efc0523,0x3feac655588a4acd,1 +np.float64,0x7fed1e52c0fa3ca5,0x4086330d4ddd8a2c,1 +np.float64,0xba04d4ef7409b,0xba04d4ef7409b,1 +np.float64,0x3fee22d0937c45a2,0x3feaddd4ca66b447,1 +np.float64,0xffeb2558cf764ab1,0xc086327da4e84053,1 +np.float64,0xbfe103d987e207b3,0xbfe04d04818ad1ff,1 +np.float64,0x3f9fd7fed03faffe,0x3f9fd6ae9a45be84,1 +np.float64,0x800a53ec4c34a7d9,0x800a53ec4c34a7d9,1 +np.float64,0xbfe2feb17f65fd63,0xbfe206b9d33a78a2,1 +np.float64,0x989bdd613139,0x989bdd613139,1 +np.float64,0xbfdd0ad3fb3a15a8,0xbfdc20c32a530741,1 +np.float64,0xbfc4222163284444,0xbfc40d1c612784b5,1 +np.float64,0xc30cf5c78619f,0xc30cf5c78619f,1 +np.float64,0x3fe913bd6732277b,0x3fe70912f76bad71,1 +np.float64,0x98f175f531e2f,0x98f175f531e2f,1 +np.float64,0x3fed8c1f717b183f,0x3fea6f9fb3af3423,1 +np.float64,0x7fee46b085bc8d60,0x4086335d269eb7e9,1 +np.float64,0x8007480f564e901f,0x8007480f564e901f,1 +np.float64,0xc9b96e179372e,0xc9b96e179372e,1 +np.float64,0x3fe44deac4289bd6,0x3fe32463a74a69e7,1 +np.float64,0x80021d6c5c243ad9,0x80021d6c5c243ad9,1 +np.float64,0xbfebc805a6f7900b,0xbfe91edcf65a1c19,1 +np.float64,0x80044748adc88e92,0x80044748adc88e92,1 +np.float64,0x4007ee44800fe,0x4007ee44800fe,1 +np.float64,0xbfe24307a4648610,0xbfe1648ad5c47b6f,1 +np.float64,0xbfee6d3a93fcda75,0xbfeb13e1a3196e78,1 +np.float64,0x3fe49a287f293451,0x3fe364a11b9f0068,1 +np.float64,0x80052b37ceaa5670,0x80052b37ceaa5670,1 +np.float64,0xbfd42be893a857d2,0xbfd3da05dac7c286,1 +np.float64,0xffb4bbe4ac2977c8,0xc0861fb31bda6956,1 +np.float64,0xbfc732a4142e6548,0xbfc7129a4eafa399,1 +np.float64,0x7fd0696791a0d2ce,0x408628eb7756cb9c,1 +np.float64,0x3fe46c8f8d68d91f,0x3fe33e3df16187c1,1 +np.float64,0x3fe3a28f1ce7451e,0x3fe293043238d08c,1 +np.float64,0xffedc4eb723b89d6,0xc086333a92258c15,1 +np.float64,0x8000d15b4c41a2b7,0x8000d15b4c41a2b7,1 +np.float64,0xffeb73450236e689,0xc08632947b0148ab,1 +np.float64,0xffe68cf4722d19e8,0xc0863101d08d77bd,1 +np.float64,0x800c70eb4698e1d7,0x800c70eb4698e1d7,1 +np.float64,0xffa94387ff529,0xffa94387ff529,1 +np.float64,0x7fe3835d996706ba,0x40862fd985ff8e7d,1 +np.float64,0x3fe55e476feabc8e,0x3fe408a15594ec52,1 +np.float64,0xffc69672222d2ce4,0xc08625ee0c4c0f6a,1 +np.float64,0xbf9d900b883b2020,0xbf9d8efe811d36df,1 +np.float64,0xbfdb9b9755b7372e,0xbfdad0f2aa2cb110,1 +np.float64,0xffeade6073b5bcc0,0xc08632689f17a25d,1 +np.float64,0xffd1d6a6baa3ad4e,0xc086299630a93a7b,1 +np.float64,0x7fd05ba25620b744,0x408628e4be1ef845,1 +np.float64,0xbfc7d422d52fa844,0xbfc7b170a61531bf,1 +np.float64,0x3fd5196797aa32d0,0x3fd4bc0f0e7d8e1d,1 +np.float64,0x617594a4c2eb3,0x617594a4c2eb3,1 +np.float64,0x7fd779bc4caef378,0x40862bc89271b882,1 +np.float64,0xffd2fb262ba5f64c,0xc0862a15561e9524,1 +np.float64,0x72fd661ae5fad,0x72fd661ae5fad,1 +np.float64,0x3fecf441f339e884,0x3fe9ff880d584f64,1 +np.float64,0x7fc3a8968827512c,0x408624d198b05c61,1 +np.float64,0x3fe7a25c56ef44b9,0x3fe5e32509a7c32d,1 +np.float64,0x7fd117d514222fa9,0x4086293ec640d5f2,1 +np.float64,0x3fe37dfe5ee6fbfc,0x3fe273d1bcaa1ef0,1 +np.float64,0xbfed4cd19d7a99a3,0xbfea41064cba4c8b,1 +np.float64,0x8003ff12aaa7fe26,0x8003ff12aaa7fe26,1 +np.float64,0x3fcbc3d1193787a2,0x3fcb8d39e3e88264,1 +np.float64,0xe9ba1a91d3744,0xe9ba1a91d3744,1 +np.float64,0x8002ab71998556e4,0x8002ab71998556e4,1 +np.float64,0x800110057922200c,0x800110057922200c,1 +np.float64,0xbfe3b7af19a76f5e,0xbfe2a502fc0a2882,1 +np.float64,0x7fd9de9d5e33bd3a,0x40862c8f73cccabf,1 +np.float64,0xbfba0f0a86341e18,0xbfba0392f44c2771,1 +np.float64,0x8000000000000000,0x8000000000000000,1 +np.float64,0x7fe5d162e96ba2c5,0x408630be2b15e01b,1 +np.float64,0x800b7f0eac76fe1e,0x800b7f0eac76fe1e,1 +np.float64,0xff98bed150317da0,0xc086160633164f5f,1 +np.float64,0x3fef91fd70ff23fb,0x3febe629709d0ae7,1 +np.float64,0x7fe5bea7f16b7d4f,0x408630b749f445e9,1 +np.float64,0xbfe3dc428467b885,0xbfe2c41ea93fab07,1 +np.float64,0xbfeba1fbfcf743f8,0xbfe9021b52851bb9,1 +np.float64,0x7fd2fb2108a5f641,0x40862a1553f45830,1 +np.float64,0x7feb8199a4370332,0x40863298a7169dad,1 +np.float64,0x800f97ff8d7f2fff,0x800f97ff8d7f2fff,1 +np.float64,0x3fd5e20b6b2bc417,0x3fd57a42bd1c0993,1 +np.float64,0x8006b4072dad680f,0x8006b4072dad680f,1 +np.float64,0x605dccf2c0bba,0x605dccf2c0bba,1 +np.float64,0x3fc705ed142e0bda,0x3fc6e69971d86f73,1 +np.float64,0xffd2ba1aad257436,0xc08629f9bc918f8b,1 +np.float64,0x8002954e23c52a9d,0x8002954e23c52a9d,1 +np.float64,0xbfecc65da7798cbb,0xbfe9dd745be18562,1 +np.float64,0x7fc66110482cc220,0x408625db0db57ef8,1 +np.float64,0x3fcd09446d3a1289,0x3fcccaf2dd0a41ea,1 +np.float64,0x3febe7095437ce13,0x3fe93642d1e73b2a,1 +np.float64,0x8004773c7da8ee7a,0x8004773c7da8ee7a,1 +np.float64,0x8001833241230665,0x8001833241230665,1 +np.float64,0x3fe6a262db6d44c6,0x3fe513b3dab5adce,1 +np.float64,0xe6282cc1cc506,0xe6282cc1cc506,1 +np.float64,0x800b9d8553973b0b,0x800b9d8553973b0b,1 +np.float64,0x3fdfbe0c7b3f7c19,0x3fde912375d867a8,1 +np.float64,0x7fd5ac11ebab5823,0x40862b24dfc6d08e,1 +np.float64,0x800e4b7cb1fc96f9,0x800e4b7cb1fc96f9,1 +np.float64,0x3fe14706da628e0e,0x3fe0883aec2a917a,1 +np.float64,0x7fc963f97532c7f2,0x408626dd9b0cafe1,1 +np.float64,0xbfe9c250b5b384a2,0xbfe791c5eabcb05d,1 +np.float64,0x3fe8d16e6c71a2dd,0x3fe6d4c7a33a0bf4,1 +np.float64,0x3fe474ae4628e95d,0x3fe34515c93f4733,1 +np.float64,0x3fbf3257ee3e64b0,0x3fbf1eb530e126ea,1 +np.float64,0x8005f089b3abe114,0x8005f089b3abe114,1 +np.float64,0x3fece07bccf9c0f8,0x3fe9f0dc228124d5,1 +np.float64,0xbfc52521632a4a44,0xbfc50ccebdf59c2c,1 +np.float64,0x7fdf53beb13ea77c,0x40862e177918195e,1 +np.float64,0x8003d9f6ad07b3ee,0x8003d9f6ad07b3ee,1 +np.float64,0xffeacf96bbb59f2d,0xc086326436b38b1a,1 +np.float64,0xdccaea29b995e,0xdccaea29b995e,1 +np.float64,0x5948d21eb291b,0x5948d21eb291b,1 +np.float64,0x10000000000000,0x10000000000000,1 +np.float64,0x7fef6d2c543eda58,0x408633a98593cdf5,1 +np.float64,0x7feda454f47b48a9,0x40863331cb6dc9f7,1 +np.float64,0x3fdd377cecba6ef8,0x3fdc4968f74a9c83,1 +np.float64,0x800644096d4c8814,0x800644096d4c8814,1 +np.float64,0xbfe33ca15ae67942,0xbfe23be5de832bd8,1 +np.float64,0xffce9582bd3d2b04,0xc086285abdf9bf9d,1 +np.float64,0x3fe6621e86acc43d,0x3fe4df231bfa93e1,1 +np.float64,0xee7d19e9dcfa3,0xee7d19e9dcfa3,1 +np.float64,0x800be5997277cb33,0x800be5997277cb33,1 +np.float64,0x82069041040e,0x82069041040e,1 +np.float64,0x800d6efdc19addfc,0x800d6efdc19addfc,1 +np.float64,0x7fb27770ee24eee1,0x40861ec5ed91b839,1 +np.float64,0x3fd506064caa0c0d,0x3fd4a9a66353fefd,1 +np.float64,0xbfeca9b36bf95367,0xbfe9c81f03ba37b8,1 +np.float64,0xffeab1b7bab5636f,0xc086325b47f61f2b,1 +np.float64,0xffc99f5b2e333eb8,0xc08626f03b08b412,1 +np.float64,0x3fbf1a71bc3e34e3,0x3fbf06fbcaa5de58,1 +np.float64,0x3fe75015736ea02b,0x3fe5a0cd8d763d8d,1 +np.float64,0xffe6a7442fad4e88,0xc086310b20addba4,1 +np.float64,0x3fe5d62ff86bac60,0x3fe46c033195bf28,1 +np.float64,0x7fd0b1f0362163df,0x4086290e857dc1be,1 +np.float64,0xbe0353737c06b,0xbe0353737c06b,1 +np.float64,0x7fec912d8739225a,0x408632e627704635,1 +np.float64,0xded8ba2fbdb18,0xded8ba2fbdb18,1 +np.float64,0x7fec0b53fdf816a7,0x408632c052bc1bd2,1 +np.float64,0x7fe9640d12b2c819,0x408631f4c2ba54d8,1 +np.float64,0x800be714eeb7ce2a,0x800be714eeb7ce2a,1 +np.float64,0xbfcf444a793e8894,0xbfcef6c126b54853,1 +np.float64,0xffeb20cf1bf6419e,0xc086327c4e6ffe80,1 +np.float64,0xc07de22180fd,0xc07de22180fd,1 +np.float64,0xffed129d387a253a,0xc086330a15ad0adb,1 +np.float64,0x3fd9e94fedb3d2a0,0x3fd94049924706a8,1 +np.float64,0x7fe6ba488c2d7490,0x40863111d51e7861,1 +np.float64,0xbfebbdf25db77be5,0xbfe91740ad7ba521,1 +np.float64,0x7fbc6c3c4838d878,0x40862239160cb613,1 +np.float64,0xbfefa82ecebf505e,0xbfebf5f31957dffd,1 +np.float64,0x800bebeb7ad7d7d7,0x800bebeb7ad7d7d7,1 +np.float64,0x7fecccc6f8f9998d,0x408632f6c6da8aac,1 +np.float64,0xcbe4926197ca,0xcbe4926197ca,1 +np.float64,0x2c5d9fd858bb5,0x2c5d9fd858bb5,1 +np.float64,0xbfe9fb021073f604,0xbfe7bddc61f1151a,1 +np.float64,0xbfebb18572f7630b,0xbfe90ddc5002313f,1 +np.float64,0x13bb0d3227763,0x13bb0d3227763,1 +np.float64,0x3feefa5e5cbdf4bd,0x3feb79b9e8ce16bf,1 +np.float64,0x3fc97f086132fe10,0x3fc9549fc8e15ecb,1 +np.float64,0xffe70887c06e110f,0xc086312d30fd31cf,1 +np.float64,0xa00c113540182,0xa00c113540182,1 +np.float64,0x800950984772a131,0x800950984772a131,1 +np.float64,0x1,0x1,1 +np.float64,0x3fd83b4026b07680,0x3fd7afdc659d9a34,1 +np.float64,0xbfe32348fbe64692,0xbfe226292a706a1a,1 +np.float64,0x800b894dcc77129c,0x800b894dcc77129c,1 +np.float64,0xeb2ca419d6595,0xeb2ca419d6595,1 +np.float64,0xbff0000000000000,0xbfec34366179d427,1 +np.float64,0x3feb269e99f64d3d,0x3fe8a4634b927a21,1 +np.float64,0xbfe83149d7706294,0xbfe655a2b245254e,1 +np.float64,0xbfe6eef3ca6ddde8,0xbfe5521310e24d16,1 +np.float64,0x3fea89a4b7b51349,0x3fe82c1fc69edcec,1 +np.float64,0x800f2a8bf17e5518,0x800f2a8bf17e5518,1 +np.float64,0x800f71fac29ee3f6,0x800f71fac29ee3f6,1 +np.float64,0xe7cb31f1cf966,0xe7cb31f1cf966,1 +np.float64,0x3b0f8752761f2,0x3b0f8752761f2,1 +np.float64,0x3fea27dea3744fbd,0x3fe7e0a4705476b2,1 +np.float64,0xbfa97c019c32f800,0xbfa97950c1257b92,1 +np.float64,0xffeff13647ffe26c,0xc08633cadc7105ed,1 +np.float64,0x3feee162353dc2c4,0x3feb67c2da0fbce8,1 +np.float64,0x80088c0807911810,0x80088c0807911810,1 +np.float64,0x3fe936ab1db26d56,0x3fe72489bc69719d,1 +np.float64,0xa2f84bd545f0a,0xa2f84bd545f0a,1 +np.float64,0xbfed445ed27a88be,0xbfea3acac0aaf482,1 +np.float64,0x800faf3e69df5e7d,0x800faf3e69df5e7d,1 +np.float64,0x3fc145a330228b46,0x3fc13853f11b1c90,1 +np.float64,0xbfe25ec5abe4bd8c,0xbfe17c9e9b486f07,1 +np.float64,0x3fe119b160e23363,0x3fe0604b10178966,1 +np.float64,0x7fe0cbf2836197e4,0x40862ea6831e5f4a,1 +np.float64,0x3fe75dd3b4eebba8,0x3fe5abe80fd628fb,1 +np.float64,0x3f7c391000387220,0x3f7c39015d8f3a36,1 +np.float64,0x899d9cad133b4,0x899d9cad133b4,1 +np.float64,0x3fe5f0e34febe1c6,0x3fe4820cefe138fc,1 +np.float64,0x7fe060dfdba0c1bf,0x40862e72de8afcd0,1 +np.float64,0xbfae42f7103c85f0,0xbfae3e7630819c60,1 +np.float64,0x35f1f2c06be5,0x35f1f2c06be5,1 +np.float64,0xffc5194d362a329c,0xc086256266c8b7ad,1 +np.float64,0xbfda034f1b34069e,0xbfd95860a44c43ad,1 +np.float64,0x32bcebca6579e,0x32bcebca6579e,1 +np.float64,0xbfd1751ebca2ea3e,0xbfd13f79f45bf75c,1 +np.float64,0x3fee4fa1e5bc9f44,0x3feafe69e0d6c1c7,1 +np.float64,0x7f9c03cd5038079a,0x4086170459172900,1 +np.float64,0x7fc5fb6d6d2bf6da,0x408625b6651cfc73,1 +np.float64,0x7ff8000000000000,0x7ff8000000000000,1 +np.float64,0xffd1a8162ca3502c,0xc0862981333931ad,1 +np.float64,0x7fc415c198282b82,0x408624fd8c155d1b,1 +np.float64,0xffda37fbe7b46ff8,0xc0862caae7865c43,1 +np.float64,0xbfef4312257e8624,0xbfebadd89f3ee31c,1 +np.float64,0xbfec45e1fd788bc4,0xbfe97d8b14db6274,1 +np.float64,0xbfe6fdcfd26dfba0,0xbfe55e25b770d00a,1 +np.float64,0x7feb66d424f6cda7,0x40863290d9ff7ea2,1 +np.float64,0x8b08a29916115,0x8b08a29916115,1 +np.float64,0xffe12ca25c625944,0xc0862ed40d769f72,1 +np.float64,0x7ff4000000000000,0x7ffc000000000000,1 +np.float64,0x804925e100925,0x804925e100925,1 +np.float64,0xcebf3e019d9,0xcebf3e019d9,1 +np.float64,0xbfd5d75d4aabaeba,0xbfd57027671dedf7,1 +np.float64,0x800b829ecd37053e,0x800b829ecd37053e,1 +np.float64,0x800b1205daf6240c,0x800b1205daf6240c,1 +np.float64,0x3fdf7e9889befd31,0x3fde583fdff406c3,1 +np.float64,0x7ff0000000000000,0x7ff0000000000000,1 +np.float64,0x3fdc09760d3812ec,0x3fdb35b55c8090c6,1 +np.float64,0x800c4d99e4f89b34,0x800c4d99e4f89b34,1 +np.float64,0xffbaa6772e354cf0,0xc08621b535badb2f,1 +np.float64,0xbfc91188fd322310,0xbfc8e933b5d25ea7,1 +np.float64,0xffc1b947f4237290,0xc08623fd69164251,1 +np.float64,0x3fc6ab3b252d5678,0x3fc68d50bbac106d,1 +np.float64,0xffac8eb968391d70,0xc0861cb734833355,1 +np.float64,0xffe29a35c365346b,0xc0862f77a1aed6d8,1 +np.float64,0x3fde14b9543c2973,0x3fdd122697779015,1 +np.float64,0xbf10f5400021e000,0xbf10f53fffef1383,1 +np.float64,0xffe0831aa3e10635,0xc0862e838553d0ca,1 +np.float64,0x3fccbadbcf3975b8,0x3fcc7e768d0154ec,1 +np.float64,0x3fe092ef66e125df,0x3fdfd212a7116c9b,1 +np.float64,0xbfd727f039ae4fe0,0xbfd6adad040b2334,1 +np.float64,0xbfe4223b93a84477,0xbfe2ff7587364db4,1 +np.float64,0x3f4e5c3a003cb874,0x3f4e5c39b75c70f7,1 +np.float64,0x800e76b1a87ced63,0x800e76b1a87ced63,1 +np.float64,0x3fed2b7368fa56e7,0x3fea2863b9131b8c,1 +np.float64,0xffadb76ec43b6ee0,0xc0861d08ae79f20c,1 +np.float64,0x800b6a0cd1f6d41a,0x800b6a0cd1f6d41a,1 +np.float64,0xffee6aa943fcd552,0xc0863366a24250d5,1 +np.float64,0xbfe68cbc4e6d1978,0xbfe502040591aa5b,1 +np.float64,0xff859a38002b3480,0xc0860f64726235cc,1 +np.float64,0x3474d13e68e9b,0x3474d13e68e9b,1 +np.float64,0xffc11d49f6223a94,0xc08623b5c2df9712,1 +np.float64,0x800d82d019bb05a0,0x800d82d019bb05a0,1 +np.float64,0xbfe2af0192255e03,0xbfe1c20e38106388,1 +np.float64,0x3fe97d13c032fa28,0x3fe75bba11a65f86,1 +np.float64,0x7fcd457e133a8afb,0x40862800e80f5863,1 +np.float64,0x9d7254cf3ae4b,0x9d7254cf3ae4b,1 +np.float64,0x8003047675a608ee,0x8003047675a608ee,1 +np.float64,0x3fead6cd7d75ad9a,0x3fe8676138e5ff93,1 +np.float64,0x3fea6ee3b0f4ddc7,0x3fe817838a2bcbe3,1 +np.float64,0x3feed0edea7da1dc,0x3feb5bea3cb12fe2,1 +np.float64,0x88003fe510008,0x88003fe510008,1 +np.float64,0x3fe64cadc56c995c,0x3fe4cd8ead87fc79,1 +np.float64,0xaae30c5955c62,0xaae30c5955c62,1 +np.float64,0x7fc8c97cae3192f8,0x408626ac579f4fc5,1 +np.float64,0xbfc2bc0e8b25781c,0xbfc2ab188fdab7dc,1 +np.float64,0xc8f8e5e791f1d,0xc8f8e5e791f1d,1 +np.float64,0x3fecfaa5d6f9f54c,0x3fea0444dabe5a15,1 +np.float64,0xbfeb93740ff726e8,0xbfe8f71a9ab13baf,1 +np.float64,0xffd951236c32a246,0xc0862c633a4661eb,1 +np.float64,0x3fddbc5fcd3b78c0,0x3fdcc21c1a0a9246,1 +np.float64,0xbfd242443da48488,0xbfd20512d91f7924,1 +np.float64,0x2a3689b2546d2,0x2a3689b2546d2,1 +np.float64,0xffe24c67382498ce,0xc0862f55e4ea6283,1 +np.float64,0x800cbfce22197f9c,0x800cbfce22197f9c,1 +np.float64,0x8002269428044d29,0x8002269428044d29,1 +np.float64,0x7fd44babbd289756,0x40862a9e79b51c3b,1 +np.float64,0x3feea056a27d40ad,0x3feb38dcddb682f0,1 +np.float64,0xffeca8174b39502e,0xc08632ec8f88a5b2,1 +np.float64,0x7fbe0853a03c10a6,0x408622a9e8d53a9e,1 +np.float64,0xbfa9704b2432e090,0xbfa96d9dfc8c0cc2,1 +np.float64,0x800bda28fab7b452,0x800bda28fab7b452,1 +np.float64,0xbfb0ffa2f621ff48,0xbfb0fc71f405e82a,1 +np.float64,0xbfe66c04216cd808,0xbfe4e73ea3b58cf6,1 +np.float64,0x3fe336ea5d266dd5,0x3fe236ffcf078c62,1 +np.float64,0xbfe7729ae6aee536,0xbfe5bcad4b8ac62d,1 +np.float64,0x558cfc96ab1a0,0x558cfc96ab1a0,1 +np.float64,0xbfe7d792aaefaf26,0xbfe60de1b8f0279d,1 +np.float64,0xffd19ef6bda33dee,0xc086297d0ffee3c7,1 +np.float64,0x666b3ab4ccd68,0x666b3ab4ccd68,1 +np.float64,0xffa3d89e3c27b140,0xc08619cdeb2c1e49,1 +np.float64,0xbfb1728f7f62f,0xbfb1728f7f62f,1 +np.float64,0x3fc76319f32ec634,0x3fc74247bd005e20,1 +np.float64,0xbfbf1caee23e3960,0xbfbf0934c13d70e2,1 +np.float64,0x7fe79626f32f2c4d,0x4086315dcc68a5cb,1 +np.float64,0xffee78c4603cf188,0xc086336a572c05c2,1 +np.float64,0x3fce546eda3ca8de,0x3fce0d8d737fd31d,1 +np.float64,0xa223644d4446d,0xa223644d4446d,1 +np.float64,0x3fecea878b79d510,0x3fe9f850d50973f6,1 +np.float64,0x3fc20e0ea1241c1d,0x3fc1fedda87c5e75,1 +np.float64,0xffd1c5a99ca38b54,0xc086298e8e94cd47,1 +np.float64,0x7feb2c299d765852,0x4086327fa6db2808,1 +np.float64,0xcaf9d09595f3a,0xcaf9d09595f3a,1 +np.float64,0xbfe293bf21e5277e,0xbfe1aa7f6ac274ef,1 +np.float64,0xbfbaa3c8ce354790,0xbfba97891df19c01,1 +np.float64,0x3faf5784543eaf09,0x3faf5283acc7d71d,1 +np.float64,0x7fc014f8f62029f1,0x40862336531c662d,1 +np.float64,0xbfe0d9ac2d61b358,0xbfe027bce36699ca,1 +np.float64,0x8003e112ff27c227,0x8003e112ff27c227,1 +np.float64,0xffec0d4151381a82,0xc08632c0df718dd0,1 +np.float64,0x7fa2156fb0242ade,0x4086190f7587d708,1 +np.float64,0xd698358dad307,0xd698358dad307,1 +np.float64,0xbfed8d1b0efb1a36,0xbfea70588ef9ba18,1 +np.float64,0xbfd2cae6a92595ce,0xbfd28851e2185dee,1 +np.float64,0xffe7a36764ef46ce,0xc086316249c9287a,1 +np.float64,0xbfdb8ad8e5b715b2,0xbfdac19213c14315,1 +np.float64,0x3b5dba6076bc,0x3b5dba6076bc,1 +np.float64,0x800e6e8347bcdd07,0x800e6e8347bcdd07,1 +np.float64,0x800bea9f3fb7d53f,0x800bea9f3fb7d53f,1 +np.float64,0x7fb6d0e5fc2da1cb,0x4086207714c4ab85,1 +np.float64,0x0,0x0,1 +np.float64,0xbfe2aa1e1465543c,0xbfe1bdd550ef2966,1 +np.float64,0x7fd3f6a47fa7ed48,0x40862a7caea33055,1 +np.float64,0x800094e292c129c6,0x800094e292c129c6,1 +np.float64,0x800e1500ecbc2a02,0x800e1500ecbc2a02,1 +np.float64,0xbfd8ff6f97b1fee0,0xbfd866f84346ecdc,1 +np.float64,0x681457d0d028c,0x681457d0d028c,1 +np.float64,0x3feed0b5987da16b,0x3feb5bc1ab424984,1 +np.float64,0x3fdbcb34cdb79668,0x3fdafca540f32c06,1 +np.float64,0xbfdc9eacdcb93d5a,0xbfdbbe274aa8aeb0,1 +np.float64,0xffe6e35d526dc6ba,0xc08631203df38ed2,1 +np.float64,0x3fcac1cc65358398,0x3fca90de41889613,1 +np.float64,0xbfebf07a55b7e0f5,0xbfe93d6007db0c67,1 +np.float64,0xbfd7a7b1e7af4f64,0xbfd725a9081c22cb,1 +np.float64,0x800232bd7de4657c,0x800232bd7de4657c,1 +np.float64,0x7fb1dae43c23b5c7,0x40861e80f5c0a64e,1 +np.float64,0x8013ded70027c,0x8013ded70027c,1 +np.float64,0x7fc4373a59286e74,0x4086250ad60575d0,1 +np.float64,0xbfe9980fd6733020,0xbfe770d1352d0ed3,1 +np.float64,0x8008a66b8dd14cd7,0x8008a66b8dd14cd7,1 +np.float64,0xbfaebc67f83d78d0,0xbfaeb7b015848478,1 +np.float64,0xffd0c52762218a4e,0xc0862917b564afc6,1 +np.float64,0xbfd503860aaa070c,0xbfd4a74618441561,1 +np.float64,0x5bdacabcb7b5a,0x5bdacabcb7b5a,1 +np.float64,0xf3623cffe6c48,0xf3623cffe6c48,1 +np.float64,0x7fe16c6c7ea2d8d8,0x40862ef18d90201f,1 +np.float64,0x3ff0000000000000,0x3fec34366179d427,1 +np.float64,0x7fe19cbc84233978,0x40862f079dcbc169,1 +np.float64,0x3fcfd3d6933fa7ad,0x3fcf822187907f6b,1 +np.float64,0x8007d65d672facbc,0x8007d65d672facbc,1 +np.float64,0xffca6115aa34c22c,0xc086272bd7728750,1 +np.float64,0xbfe77ab1556ef562,0xbfe5c332fb55b66e,1 +np.float64,0x8001ed797c23daf4,0x8001ed797c23daf4,1 +np.float64,0x7fdd3d16cb3a7a2d,0x40862d8a2c869281,1 +np.float64,0x75f36beaebe6e,0x75f36beaebe6e,1 +np.float64,0xffda3c2798b47850,0xc0862cac2d3435df,1 +np.float64,0xbfa37cc3c426f980,0xbfa37b8f9d3ec4b7,1 +np.float64,0x80030ea8bd061d52,0x80030ea8bd061d52,1 +np.float64,0xffe41f7617683eec,0xc08630188a3e135e,1 +np.float64,0x800e40590dfc80b2,0x800e40590dfc80b2,1 +np.float64,0x3fea950d80f52a1c,0x3fe834e74481e66f,1 +np.float64,0xffec95e39a792bc6,0xc08632e779150084,1 +np.float64,0xbfd54310ecaa8622,0xbfd4e39c4d767002,1 +np.float64,0xffd40c9971a81932,0xc0862a85764eb2f4,1 +np.float64,0xb0a2230761445,0xb0a2230761445,1 +np.float64,0x80092973661252e7,0x80092973661252e7,1 +np.float64,0x7fb13b030a227605,0x40861e380aeb5549,1 +np.float64,0x3fbd5d8db23abb1b,0x3fbd4d2a0b94af36,1 +np.float64,0xbfd6cb8567ad970a,0xbfd656b19ab8fa61,1 +np.float64,0xbfe7c0fd346f81fa,0xbfe5fbc28807c794,1 +np.float64,0xffd586579eab0cb0,0xc0862b16e65c0754,1 +np.float64,0x8000e52da461ca5c,0x8000e52da461ca5c,1 +np.float64,0x3fc69d17112d3a2e,0x3fc67f63fe1fea1c,1 +np.float64,0x3fd36ba892a6d750,0x3fd3225be1fa87af,1 +np.float64,0x7fe2850598e50a0a,0x40862f6e7fcd6c1a,1 +np.float64,0x80074a4dacce949c,0x80074a4dacce949c,1 +np.float64,0x3fe25eea4d64bdd5,0x3fe17cbe5fefbd4e,1 +np.float64,0xbfe250c08be4a181,0xbfe17074c520e5de,1 +np.float64,0x8000f5665481eacd,0x8000f5665481eacd,1 +np.float64,0x7fdb3172f83662e5,0x40862cf5a46764f1,1 +np.float64,0x7fd8ed82d631db05,0x40862c4380658afa,1 +np.float64,0xffec5163feb8a2c7,0xc08632d4366aab06,1 +np.float64,0x800ff14ac6ffe296,0x800ff14ac6ffe296,1 +np.float64,0xbfc7cc7aea2f98f4,0xbfc7a9e9cb38f023,1 +np.float64,0xbfd50cdfc32a19c0,0xbfd4b0282b452fb2,1 +np.float64,0xbfec256d75b84adb,0xbfe965328c1860b2,1 +np.float64,0xffe860c4cdb0c189,0xc08631a164b7059a,1 +np.float64,0xbfe23de164247bc3,0xbfe16011bffa4651,1 +np.float64,0xcc96b39d992d7,0xcc96b39d992d7,1 +np.float64,0xbfec43acf938875a,0xbfe97be3a13b50c3,1 +np.float64,0xc4f587bb89eb1,0xc4f587bb89eb1,1 +np.float64,0xbfcd971d9a3b2e3c,0xbfcd5537ad15dab4,1 +np.float64,0xffcaf00d8035e01c,0xc0862756bf2cdf8f,1 +np.float64,0x8008c26f93f184e0,0x8008c26f93f184e0,1 +np.float64,0xfff0000000000000,0xfff0000000000000,1 +np.float64,0xbfd13552c3a26aa6,0xbfd101e5e252eb7b,1 +np.float64,0x7fe497235e292e46,0x4086304792fb423a,1 +np.float64,0x7fd6dc0192adb802,0x40862b921a5e935d,1 +np.float64,0xf16d49a1e2da9,0xf16d49a1e2da9,1 +np.float64,0xffef6b1b71bed636,0xc08633a8feed0178,1 +np.float64,0x7fe15ec62f62bd8b,0x40862eeb46b193dc,1 +np.float64,0x3fef4369ec7e86d4,0x3febae1768be52cc,1 +np.float64,0x4f84e8e89f09e,0x4f84e8e89f09e,1 +np.float64,0xbfe19e71ade33ce4,0xbfe0d4fad05e0ebc,1 +np.float64,0xbfe7e1df1defc3be,0xbfe616233e15b3d0,1 +np.float64,0x7fe9349afdb26935,0x408631e5c1c5c6cd,1 +np.float64,0xff90c35ac82186c0,0xc08612e896a06467,1 +np.float64,0xbfe88bf8807117f1,0xbfe69dc786464422,1 +np.float64,0x3feaf9ff6475f3fe,0x3fe8825132410d18,1 +np.float64,0x9ff487a33fe91,0x9ff487a33fe91,1 +np.float64,0x7fedb30159bb6602,0x40863335c0419322,1 +np.float64,0x800bddf6ed77bbee,0x800bddf6ed77bbee,1 +np.float64,0x3fd919df133233be,0x3fd87f963b9584ce,1 +np.float64,0x7fd64da3b52c9b46,0x40862b5fa9dd3b6d,1 +np.float64,0xbfce288db43c511c,0xbfcde2d953407ae8,1 +np.float64,0x3fe88bc72771178e,0x3fe69da05e9e9b4e,1 +np.float64,0x800feafe259fd5fc,0x800feafe259fd5fc,1 +np.float64,0x3febbbff4a7777ff,0x3fe915c78f6a280f,1 +np.float64,0xbfefbde4417f7bc9,0xbfec055f4fb2cd21,1 +np.float64,0xf13ca103e2794,0xf13ca103e2794,1 +np.float64,0x3fe6423884ec8471,0x3fe4c4f97eaa876a,1 +np.float64,0x800ca01c8cb94039,0x800ca01c8cb94039,1 +np.float64,0x3fbc5073f638a0e0,0x3fbc41c163ac0001,1 +np.float64,0xbfda0d83cfb41b08,0xbfd961d4cacc82cf,1 +np.float64,0x800f37b8f17e6f72,0x800f37b8f17e6f72,1 +np.float64,0x7fe0b08cd7216119,0x40862e996becb771,1 +np.float64,0xffd4222a40a84454,0xc0862a8e0c984917,1 +np.float64,0x7feb3df98ff67bf2,0x40863284e3a86ee6,1 +np.float64,0x8001d5d291e3aba6,0x8001d5d291e3aba6,1 +np.float64,0xbfd3c21629a7842c,0xbfd3750095a5894a,1 +np.float64,0xbfd069eb48a0d3d6,0xbfd03d2b1c2ae9db,1 +np.float64,0xffeb1be2973637c4,0xc086327ada954662,1 +np.float64,0x3fc659f97e2cb3f3,0x3fc63d497a451f10,1 +np.float64,0xbfeb624bc776c498,0xbfe8d1cf7c0626ca,1 +np.float64,0xffeedf26e23dbe4d,0xc08633850baab425,1 +np.float64,0xffe70da48a6e1b48,0xc086312ef75d5036,1 +np.float64,0x2b4f4830569ea,0x2b4f4830569ea,1 +np.float64,0xffe82e7fcfb05cff,0xc0863190d4771f75,1 +np.float64,0x3fcc2c1fd5385840,0x3fcbf3211ddc5123,1 +np.float64,0x7fe22ced5a6459da,0x40862f481629ee6a,1 +np.float64,0x7fe13d2895e27a50,0x40862edbbc411899,1 +np.float64,0x3fd54c4280aa9884,0x3fd4ec55a946c5d7,1 +np.float64,0xffd75b8e01aeb71c,0xc0862bbe42d76e5e,1 +np.float64,0x7f1d5376fe3ab,0x7f1d5376fe3ab,1 +np.float64,0x3fe6ec6c902dd8d9,0x3fe55004f35192bd,1 +np.float64,0x5634504aac68b,0x5634504aac68b,1 +np.float64,0x3feedb0d83bdb61b,0x3feb633467467ce6,1 +np.float64,0x3fddb1c0dcbb6380,0x3fdcb87a02daf1fa,1 +np.float64,0xbfa832da443065b0,0xbfa8308c70257209,1 +np.float64,0x87a9836b0f531,0x87a9836b0f531,1 diff --git a/local_packages/numpy/_core/tests/data/umath-validation-set-arctan.csv b/local_packages/numpy/_core/tests/data/umath-validation-set-arctan.csv new file mode 100644 index 0000000..c03e144 --- /dev/null +++ b/local_packages/numpy/_core/tests/data/umath-validation-set-arctan.csv @@ -0,0 +1,1429 @@ +dtype,input,output,ulperrortol +np.float32,0x3f338252,0x3f1c8d9c,3 +np.float32,0x7e569df2,0x3fc90fdb,3 +np.float32,0xbf347e25,0xbf1d361f,3 +np.float32,0xbf0a654e,0xbefdbfd2,3 +np.float32,0x8070968e,0x8070968e,3 +np.float32,0x803cfb27,0x803cfb27,3 +np.float32,0x8024362e,0x8024362e,3 +np.float32,0xfd55dca0,0xbfc90fdb,3 +np.float32,0x592b82,0x592b82,3 +np.float32,0x802eb8e1,0x802eb8e1,3 +np.float32,0xbc5fef40,0xbc5febae,3 +np.float32,0x3f1f6ce8,0x3f0e967c,3 +np.float32,0x20bedc,0x20bedc,3 +np.float32,0xbf058860,0xbef629c7,3 +np.float32,0x311504,0x311504,3 +np.float32,0xbd23f560,0xbd23defa,3 +np.float32,0x800ff4e8,0x800ff4e8,3 +np.float32,0x355009,0x355009,3 +np.float32,0x3f7be42e,0x3f46fdb3,3 +np.float32,0xbf225f7c,0xbf10b364,3 +np.float32,0x8074fa9e,0x8074fa9e,3 +np.float32,0xbea4b418,0xbe9f59ce,3 +np.float32,0xbe909c14,0xbe8cf045,3 +np.float32,0x80026bee,0x80026bee,3 +np.float32,0x3d789c20,0x3d784e25,3 +np.float32,0x7f56a4ba,0x3fc90fdb,3 +np.float32,0xbf70d141,0xbf413db7,3 +np.float32,0xbf2c4886,0xbf17a505,3 +np.float32,0x7e2993bf,0x3fc90fdb,3 +np.float32,0xbe2c8a30,0xbe2aef28,3 +np.float32,0x803f82d9,0x803f82d9,3 +np.float32,0x3f062fbc,0x3ef730a1,3 +np.float32,0x3f349ee0,0x3f1d4bfa,3 +np.float32,0x3eccfb69,0x3ec2f9e8,3 +np.float32,0x7e8a85dd,0x3fc90fdb,3 +np.float32,0x25331,0x25331,3 +np.float32,0x464f19,0x464f19,3 +np.float32,0x8035c818,0x8035c818,3 +np.float32,0x802e5799,0x802e5799,3 +np.float32,0x64e1c0,0x64e1c0,3 +np.float32,0x701cc2,0x701cc2,3 +np.float32,0x265c57,0x265c57,3 +np.float32,0x807a053f,0x807a053f,3 +np.float32,0x3bd2c412,0x3bd2c354,3 +np.float32,0xff28f1c8,0xbfc90fdb,3 +np.float32,0x7f08f08b,0x3fc90fdb,3 +np.float32,0x800c50e4,0x800c50e4,3 +np.float32,0x369674,0x369674,3 +np.float32,0xbf5b7db3,0xbf3571bf,3 +np.float32,0x7edcf5e2,0x3fc90fdb,3 +np.float32,0x800e5d4b,0x800e5d4b,3 +np.float32,0x80722554,0x80722554,3 +np.float32,0x693f33,0x693f33,3 +np.float32,0x800844e4,0x800844e4,3 +np.float32,0xbf111b82,0xbf0402ec,3 +np.float32,0x7df9c9ac,0x3fc90fdb,3 +np.float32,0xbf6619a6,0xbf3b6f57,3 +np.float32,0x8002fafe,0x8002fafe,3 +np.float32,0xfe1e67f8,0xbfc90fdb,3 +np.float32,0x3f7f4bf8,0x3f48b5b7,3 +np.float32,0x7f017b20,0x3fc90fdb,3 +np.float32,0x2d9b07,0x2d9b07,3 +np.float32,0x803aa174,0x803aa174,3 +np.float32,0x7d530336,0x3fc90fdb,3 +np.float32,0x80662195,0x80662195,3 +np.float32,0xfd5ebcf0,0xbfc90fdb,3 +np.float32,0xbe7b8dcc,0xbe76ab59,3 +np.float32,0x7f2bacaf,0x3fc90fdb,3 +np.float32,0x3f194fc4,0x3f0a229e,3 +np.float32,0x7ee21cdf,0x3fc90fdb,3 +np.float32,0x3f5a17fc,0x3f34a307,3 +np.float32,0x7f100c58,0x3fc90fdb,3 +np.float32,0x7e9128f5,0x3fc90fdb,3 +np.float32,0xbf2107c6,0xbf0fbdb4,3 +np.float32,0xbd29c800,0xbd29af22,3 +np.float32,0xbf5af499,0xbf3522a6,3 +np.float32,0x801bde44,0x801bde44,3 +np.float32,0xfeb4761a,0xbfc90fdb,3 +np.float32,0x3d88aa1b,0x3d887650,3 +np.float32,0x7eba5e0b,0x3fc90fdb,3 +np.float32,0x803906bd,0x803906bd,3 +np.float32,0x80101512,0x80101512,3 +np.float32,0x7e898f83,0x3fc90fdb,3 +np.float32,0x806406d3,0x806406d3,3 +np.float32,0x7ed20fc0,0x3fc90fdb,3 +np.float32,0x20827d,0x20827d,3 +np.float32,0x3f361359,0x3f1e43fe,3 +np.float32,0xfe4ef8d8,0xbfc90fdb,3 +np.float32,0x805e7d2d,0x805e7d2d,3 +np.float32,0xbe4316b0,0xbe40c745,3 +np.float32,0xbf0a1c06,0xbefd4e5a,3 +np.float32,0x3e202860,0x3e1edee1,3 +np.float32,0xbeb32a2c,0xbeac5899,3 +np.float32,0xfe528838,0xbfc90fdb,3 +np.float32,0x2f73e2,0x2f73e2,3 +np.float32,0xbe16e010,0xbe15cc27,3 +np.float32,0x3f50d6c5,0x3f2f2d75,3 +np.float32,0xbe88a6a2,0xbe8589c7,3 +np.float32,0x3ee36060,0x3ed5fb36,3 +np.float32,0x6c978b,0x6c978b,3 +np.float32,0x7f1b735f,0x3fc90fdb,3 +np.float32,0x3dad8256,0x3dad1885,3 +np.float32,0x807f5094,0x807f5094,3 +np.float32,0x65c358,0x65c358,3 +np.float32,0xff315ce4,0xbfc90fdb,3 +np.float32,0x7411a6,0x7411a6,3 +np.float32,0x80757b04,0x80757b04,3 +np.float32,0x3eec73a6,0x3edd82f4,3 +np.float32,0xfe9f69e8,0xbfc90fdb,3 +np.float32,0x801f4fa8,0x801f4fa8,3 +np.float32,0xbf6f2fae,0xbf405f79,3 +np.float32,0xfea206b6,0xbfc90fdb,3 +np.float32,0x3f257301,0x3f12e1ee,3 +np.float32,0x7ea6a506,0x3fc90fdb,3 +np.float32,0x80800000,0x80800000,3 +np.float32,0xff735c2d,0xbfc90fdb,3 +np.float32,0x80197f95,0x80197f95,3 +np.float32,0x7f4a354f,0x3fc90fdb,3 +np.float32,0xff320c00,0xbfc90fdb,3 +np.float32,0x3f2659de,0x3f138484,3 +np.float32,0xbe5451bc,0xbe515a52,3 +np.float32,0x3f6e228c,0x3f3fcf7c,3 +np.float32,0x66855a,0x66855a,3 +np.float32,0x8034b3a3,0x8034b3a3,3 +np.float32,0xbe21a2fc,0xbe20505d,3 +np.float32,0x7f79e2dc,0x3fc90fdb,3 +np.float32,0xbe19a8e0,0xbe18858c,3 +np.float32,0x10802c,0x10802c,3 +np.float32,0xfeee579e,0xbfc90fdb,3 +np.float32,0x3f3292c8,0x3f1becc0,3 +np.float32,0xbf595a71,0xbf34350a,3 +np.float32,0xbf7c3373,0xbf4725f4,3 +np.float32,0xbdd30938,0xbdd24b36,3 +np.float32,0x153a17,0x153a17,3 +np.float32,0x807282a0,0x807282a0,3 +np.float32,0xfe817322,0xbfc90fdb,3 +np.float32,0x3f1b3628,0x3f0b8771,3 +np.float32,0x41be8f,0x41be8f,3 +np.float32,0x7f4a8343,0x3fc90fdb,3 +np.float32,0x3dc4ea2b,0x3dc44fae,3 +np.float32,0x802aac25,0x802aac25,3 +np.float32,0xbf20e1d7,0xbf0fa284,3 +np.float32,0xfd91a1b0,0xbfc90fdb,3 +np.float32,0x3f0d5476,0x3f012265,3 +np.float32,0x21c916,0x21c916,3 +np.float32,0x807df399,0x807df399,3 +np.float32,0x7e207b4c,0x3fc90fdb,3 +np.float32,0x8055f8ff,0x8055f8ff,3 +np.float32,0x7edf3b01,0x3fc90fdb,3 +np.float32,0x803a8df3,0x803a8df3,3 +np.float32,0x3ce3b002,0x3ce3a101,3 +np.float32,0x3f62dd54,0x3f39a248,3 +np.float32,0xff33ae10,0xbfc90fdb,3 +np.float32,0x7e3de69d,0x3fc90fdb,3 +np.float32,0x8024581e,0x8024581e,3 +np.float32,0xbf4ac99d,0xbf2b807a,3 +np.float32,0x3f157d19,0x3f074d8c,3 +np.float32,0xfed383f4,0xbfc90fdb,3 +np.float32,0xbf5a39fa,0xbf34b6b8,3 +np.float32,0x800d757d,0x800d757d,3 +np.float32,0x807d606b,0x807d606b,3 +np.float32,0x3e828f89,0x3e7fac2d,3 +np.float32,0x7a6604,0x7a6604,3 +np.float32,0x7dc7e72b,0x3fc90fdb,3 +np.float32,0x80144146,0x80144146,3 +np.float32,0x7c2eed69,0x3fc90fdb,3 +np.float32,0x3f5b4d8c,0x3f3555fc,3 +np.float32,0xfd8b7778,0xbfc90fdb,3 +np.float32,0xfc9d9140,0xbfc90fdb,3 +np.float32,0xbea265d4,0xbe9d4232,3 +np.float32,0xbe9344d0,0xbe8f65da,3 +np.float32,0x3f71f19a,0x3f41d65b,3 +np.float32,0x804a3f59,0x804a3f59,3 +np.float32,0x3e596290,0x3e563476,3 +np.float32,0x3e994ee4,0x3e94f546,3 +np.float32,0xbc103e00,0xbc103d0c,3 +np.float32,0xbf1cd896,0xbf0cb889,3 +np.float32,0x7f52b080,0x3fc90fdb,3 +np.float32,0xff584452,0xbfc90fdb,3 +np.float32,0x58b26b,0x58b26b,3 +np.float32,0x3f23cd4c,0x3f11b799,3 +np.float32,0x707d7,0x707d7,3 +np.float32,0xff732cff,0xbfc90fdb,3 +np.float32,0x3e41c2a6,0x3e3f7f0f,3 +np.float32,0xbf7058e9,0xbf40fdcf,3 +np.float32,0x7dca9857,0x3fc90fdb,3 +np.float32,0x7f0eb44b,0x3fc90fdb,3 +np.float32,0x8000405c,0x8000405c,3 +np.float32,0x4916ab,0x4916ab,3 +np.float32,0x4811a8,0x4811a8,3 +np.float32,0x3d69bf,0x3d69bf,3 +np.float32,0xfeadcf1e,0xbfc90fdb,3 +np.float32,0x3e08dbbf,0x3e080d58,3 +np.float32,0xff031f88,0xbfc90fdb,3 +np.float32,0xbe09cab8,0xbe08f818,3 +np.float32,0x21d7cd,0x21d7cd,3 +np.float32,0x3f23230d,0x3f113ea9,3 +np.float32,0x7e8a48d4,0x3fc90fdb,3 +np.float32,0x413869,0x413869,3 +np.float32,0x7e832990,0x3fc90fdb,3 +np.float32,0x800f5c09,0x800f5c09,3 +np.float32,0x7f5893b6,0x3fc90fdb,3 +np.float32,0x7f06b5b1,0x3fc90fdb,3 +np.float32,0xbe1cbee8,0xbe1b89d6,3 +np.float32,0xbf279f14,0xbf1468a8,3 +np.float32,0xfea86060,0xbfc90fdb,3 +np.float32,0x3e828174,0x3e7f91bb,3 +np.float32,0xff682c82,0xbfc90fdb,3 +np.float32,0x4e20f3,0x4e20f3,3 +np.float32,0x7f17d7e9,0x3fc90fdb,3 +np.float32,0x80671f92,0x80671f92,3 +np.float32,0x7f6dd100,0x3fc90fdb,3 +np.float32,0x3f219a4d,0x3f102695,3 +np.float32,0x803c9808,0x803c9808,3 +np.float32,0x3c432ada,0x3c43287d,3 +np.float32,0xbd3db450,0xbd3d91a2,3 +np.float32,0x3baac135,0x3baac0d0,3 +np.float32,0xff7fffe1,0xbfc90fdb,3 +np.float32,0xfe38a6f4,0xbfc90fdb,3 +np.float32,0x3dfb0a04,0x3df9cb04,3 +np.float32,0x800b05c2,0x800b05c2,3 +np.float32,0x644163,0x644163,3 +np.float32,0xff03a025,0xbfc90fdb,3 +np.float32,0x3f7d506c,0x3f47b641,3 +np.float32,0xff0e682a,0xbfc90fdb,3 +np.float32,0x3e09b7b0,0x3e08e567,3 +np.float32,0x7f72a216,0x3fc90fdb,3 +np.float32,0x7f800000,0x3fc90fdb,3 +np.float32,0x8050a281,0x8050a281,3 +np.float32,0x7edafa2f,0x3fc90fdb,3 +np.float32,0x3f4e0df6,0x3f2d7f2f,3 +np.float32,0xbf6728e0,0xbf3c050f,3 +np.float32,0x3e904ce4,0x3e8ca6eb,3 +np.float32,0x0,0x0,3 +np.float32,0xfd215070,0xbfc90fdb,3 +np.float32,0x7e406b15,0x3fc90fdb,3 +np.float32,0xbf2803c9,0xbf14af18,3 +np.float32,0x5950c8,0x5950c8,3 +np.float32,0xbeddcec8,0xbed14faa,3 +np.float32,0xbec6457e,0xbebd2aa5,3 +np.float32,0xbf42843c,0xbf2656db,3 +np.float32,0x3ee9cba8,0x3edb5163,3 +np.float32,0xbe30c954,0xbe2f0f90,3 +np.float32,0xbeee6b44,0xbedf216f,3 +np.float32,0xbe35d818,0xbe33f7cd,3 +np.float32,0xbe47c630,0xbe454bc6,3 +np.float32,0x801b146f,0x801b146f,3 +np.float32,0x7f6788da,0x3fc90fdb,3 +np.float32,0x3eaef088,0x3ea8927d,3 +np.float32,0x3eb5983e,0x3eae81fc,3 +np.float32,0x40b51d,0x40b51d,3 +np.float32,0xfebddd04,0xbfc90fdb,3 +np.float32,0x3e591aee,0x3e55efea,3 +np.float32,0xbe2b6b48,0xbe29d81f,3 +np.float32,0xff4a8826,0xbfc90fdb,3 +np.float32,0x3e791df0,0x3e745eac,3 +np.float32,0x7c8f681f,0x3fc90fdb,3 +np.float32,0xfe7a15c4,0xbfc90fdb,3 +np.float32,0x3c8963,0x3c8963,3 +np.float32,0x3f0afa0a,0x3efea5cc,3 +np.float32,0xbf0d2680,0xbf00ff29,3 +np.float32,0x3dc306b0,0x3dc27096,3 +np.float32,0x7f4cf105,0x3fc90fdb,3 +np.float32,0xbe196060,0xbe183ea4,3 +np.float32,0x5caf1c,0x5caf1c,3 +np.float32,0x801f2852,0x801f2852,3 +np.float32,0xbe01aa0c,0xbe00fa53,3 +np.float32,0x3f0cfd32,0x3f00df7a,3 +np.float32,0x7d82038e,0x3fc90fdb,3 +np.float32,0x7f7b927f,0x3fc90fdb,3 +np.float32,0xbe93b2e4,0xbe8fcb7f,3 +np.float32,0x1ffe8c,0x1ffe8c,3 +np.float32,0x3faaf6,0x3faaf6,3 +np.float32,0x3e32b1b8,0x3e30e9ab,3 +np.float32,0x802953c0,0x802953c0,3 +np.float32,0xfe5d9844,0xbfc90fdb,3 +np.float32,0x3e1a59d0,0x3e193292,3 +np.float32,0x801c6edc,0x801c6edc,3 +np.float32,0x1ecf41,0x1ecf41,3 +np.float32,0xfe56b09c,0xbfc90fdb,3 +np.float32,0x7e878351,0x3fc90fdb,3 +np.float32,0x3f401e2c,0x3f24cfcb,3 +np.float32,0xbf204a40,0xbf0f35bb,3 +np.float32,0x3e155a98,0x3e144ee1,3 +np.float32,0xbf34f929,0xbf1d8838,3 +np.float32,0x801bbf70,0x801bbf70,3 +np.float32,0x7e7c9730,0x3fc90fdb,3 +np.float32,0x7cc23432,0x3fc90fdb,3 +np.float32,0xbf351638,0xbf1d9b97,3 +np.float32,0x80152094,0x80152094,3 +np.float32,0x3f2d731c,0x3f187219,3 +np.float32,0x804ab0b7,0x804ab0b7,3 +np.float32,0x37d6db,0x37d6db,3 +np.float32,0xbf3ccc56,0xbf22acbf,3 +np.float32,0x3e546f8c,0x3e5176e7,3 +np.float32,0xbe90e87e,0xbe8d3707,3 +np.float32,0x48256c,0x48256c,3 +np.float32,0x7e2468d0,0x3fc90fdb,3 +np.float32,0x807af47e,0x807af47e,3 +np.float32,0x3ed4b221,0x3ec996f0,3 +np.float32,0x3d3b1956,0x3d3af811,3 +np.float32,0xbe69d93c,0xbe65e7f0,3 +np.float32,0xff03ff14,0xbfc90fdb,3 +np.float32,0x801e79dc,0x801e79dc,3 +np.float32,0x3f467c53,0x3f28d63d,3 +np.float32,0x3eab6baa,0x3ea56a1c,3 +np.float32,0xbf15519c,0xbf072d1c,3 +np.float32,0x7f0bd8e8,0x3fc90fdb,3 +np.float32,0xbe1e0d1c,0xbe1cd053,3 +np.float32,0x8016edab,0x8016edab,3 +np.float32,0x7ecaa09b,0x3fc90fdb,3 +np.float32,0x3f72e6d9,0x3f4257a8,3 +np.float32,0xbefe787e,0xbeec29a4,3 +np.float32,0xbee989e8,0xbedb1af9,3 +np.float32,0xbe662db0,0xbe626a45,3 +np.float32,0x495bf7,0x495bf7,3 +np.float32,0x26c379,0x26c379,3 +np.float32,0x7f54d41a,0x3fc90fdb,3 +np.float32,0x801e7dd9,0x801e7dd9,3 +np.float32,0x80000000,0x80000000,3 +np.float32,0xfa3d3000,0xbfc90fdb,3 +np.float32,0xfa3cb800,0xbfc90fdb,3 +np.float32,0x264894,0x264894,3 +np.float32,0xff6de011,0xbfc90fdb,3 +np.float32,0x7e9045b2,0x3fc90fdb,3 +np.float32,0x3f2253a8,0x3f10aaf4,3 +np.float32,0xbd462bf0,0xbd460469,3 +np.float32,0x7f1796af,0x3fc90fdb,3 +np.float32,0x3e718858,0x3e6d3279,3 +np.float32,0xff437d7e,0xbfc90fdb,3 +np.float32,0x805ae7cb,0x805ae7cb,3 +np.float32,0x807e32e9,0x807e32e9,3 +np.float32,0x3ee0bafc,0x3ed3c453,3 +np.float32,0xbf721dee,0xbf41edc3,3 +np.float32,0xfec9f792,0xbfc90fdb,3 +np.float32,0x7f050720,0x3fc90fdb,3 +np.float32,0x182261,0x182261,3 +np.float32,0x3e39e678,0x3e37e5be,3 +np.float32,0x7e096e4b,0x3fc90fdb,3 +np.float32,0x103715,0x103715,3 +np.float32,0x3f7e7741,0x3f484ae4,3 +np.float32,0x3e29aea5,0x3e28277c,3 +np.float32,0x58c183,0x58c183,3 +np.float32,0xff72fdb2,0xbfc90fdb,3 +np.float32,0xbd9a9420,0xbd9a493c,3 +np.float32,0x7f1e07e7,0x3fc90fdb,3 +np.float32,0xff79f522,0xbfc90fdb,3 +np.float32,0x7c7d0e96,0x3fc90fdb,3 +np.float32,0xbeba9e8e,0xbeb2f504,3 +np.float32,0xfd880a80,0xbfc90fdb,3 +np.float32,0xff7f2a33,0xbfc90fdb,3 +np.float32,0x3e861ae0,0x3e83289c,3 +np.float32,0x7f0161c1,0x3fc90fdb,3 +np.float32,0xfe844ff8,0xbfc90fdb,3 +np.float32,0xbebf4b98,0xbeb7128e,3 +np.float32,0x652bee,0x652bee,3 +np.float32,0xff188a4b,0xbfc90fdb,3 +np.float32,0xbf800000,0xbf490fdb,3 +np.float32,0x80418711,0x80418711,3 +np.float32,0xbeb712d4,0xbeafd1f6,3 +np.float32,0xbf7cee28,0xbf478491,3 +np.float32,0xfe66c59c,0xbfc90fdb,3 +np.float32,0x4166a2,0x4166a2,3 +np.float32,0x3dfa1a2c,0x3df8deb5,3 +np.float32,0xbdbfbcb8,0xbdbf2e0f,3 +np.float32,0xfe60ef70,0xbfc90fdb,3 +np.float32,0xfe009444,0xbfc90fdb,3 +np.float32,0xfeb27aa0,0xbfc90fdb,3 +np.float32,0xbe99f7bc,0xbe95902b,3 +np.float32,0x8043d28d,0x8043d28d,3 +np.float32,0xfe5328c4,0xbfc90fdb,3 +np.float32,0x8017b27e,0x8017b27e,3 +np.float32,0x3ef1d2cf,0x3ee1ebd7,3 +np.float32,0x805ddd90,0x805ddd90,3 +np.float32,0xbf424263,0xbf262d17,3 +np.float32,0xfc99dde0,0xbfc90fdb,3 +np.float32,0xbf7ec13b,0xbf487015,3 +np.float32,0xbef727ea,0xbee64377,3 +np.float32,0xff15ce95,0xbfc90fdb,3 +np.float32,0x1fbba4,0x1fbba4,3 +np.float32,0x3f3b2368,0x3f2198a9,3 +np.float32,0xfefda26e,0xbfc90fdb,3 +np.float32,0x801519ad,0x801519ad,3 +np.float32,0x80473fa2,0x80473fa2,3 +np.float32,0x7e7a8bc1,0x3fc90fdb,3 +np.float32,0x3e8a9289,0x3e87548a,3 +np.float32,0x3ed68987,0x3ecb2872,3 +np.float32,0x805bca66,0x805bca66,3 +np.float32,0x8079c4e3,0x8079c4e3,3 +np.float32,0x3a2510,0x3a2510,3 +np.float32,0x7eedc598,0x3fc90fdb,3 +np.float32,0x80681956,0x80681956,3 +np.float32,0xff64c778,0xbfc90fdb,3 +np.float32,0x806bbc46,0x806bbc46,3 +np.float32,0x433643,0x433643,3 +np.float32,0x705b92,0x705b92,3 +np.float32,0xff359392,0xbfc90fdb,3 +np.float32,0xbee78672,0xbed96fa7,3 +np.float32,0x3e21717b,0x3e202010,3 +np.float32,0xfea13c34,0xbfc90fdb,3 +np.float32,0x2c8895,0x2c8895,3 +np.float32,0x3ed33290,0x3ec84f7c,3 +np.float32,0x3e63031e,0x3e5f662e,3 +np.float32,0x7e30907b,0x3fc90fdb,3 +np.float32,0xbe293708,0xbe27b310,3 +np.float32,0x3ed93738,0x3ecd6ea3,3 +np.float32,0x9db7e,0x9db7e,3 +np.float32,0x3f7cd1b8,0x3f47762c,3 +np.float32,0x3eb5143c,0x3eae0cb0,3 +np.float32,0xbe69b234,0xbe65c2d7,3 +np.float32,0x3f6e74de,0x3f3ffb97,3 +np.float32,0x5d0559,0x5d0559,3 +np.float32,0x3e1e8c30,0x3e1d4c70,3 +np.float32,0xbf2d1878,0xbf1833ef,3 +np.float32,0xff2adf82,0xbfc90fdb,3 +np.float32,0x8012e2c1,0x8012e2c1,3 +np.float32,0x7f031be3,0x3fc90fdb,3 +np.float32,0x805ff94e,0x805ff94e,3 +np.float32,0x3e9d5b27,0x3e98aa31,3 +np.float32,0x3f56d5cf,0x3f32bc9e,3 +np.float32,0x3eaa0412,0x3ea4267f,3 +np.float32,0xbe899ea4,0xbe86712f,3 +np.float32,0x800f2f48,0x800f2f48,3 +np.float32,0x3f1c2269,0x3f0c33ea,3 +np.float32,0x3f4a5f64,0x3f2b3f28,3 +np.float32,0x80739318,0x80739318,3 +np.float32,0x806e9b47,0x806e9b47,3 +np.float32,0x3c8cd300,0x3c8ccf73,3 +np.float32,0x7f39a39d,0x3fc90fdb,3 +np.float32,0x3ec95d61,0x3ebfd9dc,3 +np.float32,0xff351ff8,0xbfc90fdb,3 +np.float32,0xff3a8f58,0xbfc90fdb,3 +np.float32,0x7f313ec0,0x3fc90fdb,3 +np.float32,0x803aed13,0x803aed13,3 +np.float32,0x7f771d9b,0x3fc90fdb,3 +np.float32,0x8045a6d6,0x8045a6d6,3 +np.float32,0xbc85f280,0xbc85ef72,3 +np.float32,0x7e9c68f5,0x3fc90fdb,3 +np.float32,0xbf0f9379,0xbf02d975,3 +np.float32,0x7e97bcb1,0x3fc90fdb,3 +np.float32,0x804a07d5,0x804a07d5,3 +np.float32,0x802e6117,0x802e6117,3 +np.float32,0x7ed5e388,0x3fc90fdb,3 +np.float32,0x80750455,0x80750455,3 +np.float32,0xff4a8325,0xbfc90fdb,3 +np.float32,0xbedb6866,0xbecf497c,3 +np.float32,0x52ea3b,0x52ea3b,3 +np.float32,0xff773172,0xbfc90fdb,3 +np.float32,0xbeaa8ff0,0xbea4a46e,3 +np.float32,0x7eef2058,0x3fc90fdb,3 +np.float32,0x3f712472,0x3f4169d3,3 +np.float32,0xff6c8608,0xbfc90fdb,3 +np.float32,0xbf6eaa41,0xbf40182a,3 +np.float32,0x3eb03c24,0x3ea9bb34,3 +np.float32,0xfe118cd4,0xbfc90fdb,3 +np.float32,0x3e5b03b0,0x3e57c378,3 +np.float32,0x7f34d92d,0x3fc90fdb,3 +np.float32,0x806c3418,0x806c3418,3 +np.float32,0x7f3074e3,0x3fc90fdb,3 +np.float32,0x8002df02,0x8002df02,3 +np.float32,0x3f6df63a,0x3f3fb7b7,3 +np.float32,0xfd2b4100,0xbfc90fdb,3 +np.float32,0x80363d5c,0x80363d5c,3 +np.float32,0xbeac1f98,0xbea60bd6,3 +np.float32,0xff7fffff,0xbfc90fdb,3 +np.float32,0x80045097,0x80045097,3 +np.float32,0xfe011100,0xbfc90fdb,3 +np.float32,0x80739ef5,0x80739ef5,3 +np.float32,0xff3976ed,0xbfc90fdb,3 +np.float32,0xbe18e3a0,0xbe17c49e,3 +np.float32,0xbe289294,0xbe2712f6,3 +np.float32,0x3f1d41e7,0x3f0d050e,3 +np.float32,0x39364a,0x39364a,3 +np.float32,0x8072b77e,0x8072b77e,3 +np.float32,0x3f7cfec0,0x3f478cf6,3 +np.float32,0x2f68f6,0x2f68f6,3 +np.float32,0xbf031fb8,0xbef25c84,3 +np.float32,0xbf0b842c,0xbeff7afc,3 +np.float32,0x3f081e7e,0x3efa3676,3 +np.float32,0x7f7fffff,0x3fc90fdb,3 +np.float32,0xff15da0e,0xbfc90fdb,3 +np.float32,0x3d2001b2,0x3d1fece1,3 +np.float32,0x7f76efef,0x3fc90fdb,3 +np.float32,0x3f2405dd,0x3f11dfb7,3 +np.float32,0xa0319,0xa0319,3 +np.float32,0x3e23d2bd,0x3e227255,3 +np.float32,0xbd4d4c50,0xbd4d205e,3 +np.float32,0x382344,0x382344,3 +np.float32,0x21bbf,0x21bbf,3 +np.float32,0xbf209e82,0xbf0f7239,3 +np.float32,0xff03bf9f,0xbfc90fdb,3 +np.float32,0x7b1789,0x7b1789,3 +np.float32,0xff314944,0xbfc90fdb,3 +np.float32,0x1a63eb,0x1a63eb,3 +np.float32,0x803dc983,0x803dc983,3 +np.float32,0x3f0ff558,0x3f0323dc,3 +np.float32,0x3f544f2c,0x3f313f58,3 +np.float32,0xff032948,0xbfc90fdb,3 +np.float32,0x7f4933cc,0x3fc90fdb,3 +np.float32,0x7f14c5ed,0x3fc90fdb,3 +np.float32,0x803aeebf,0x803aeebf,3 +np.float32,0xbf0d4c0f,0xbf011bf5,3 +np.float32,0xbeaf8de2,0xbea91f57,3 +np.float32,0xff3ae030,0xbfc90fdb,3 +np.float32,0xbb362d00,0xbb362ce1,3 +np.float32,0x3d1f79e0,0x3d1f6544,3 +np.float32,0x3f56e9d9,0x3f32c860,3 +np.float32,0x3f723e5e,0x3f41fee2,3 +np.float32,0x4c0179,0x4c0179,3 +np.float32,0xfee36132,0xbfc90fdb,3 +np.float32,0x619ae6,0x619ae6,3 +np.float32,0xfde5d670,0xbfc90fdb,3 +np.float32,0xff079ac5,0xbfc90fdb,3 +np.float32,0x3e974fbd,0x3e931fae,3 +np.float32,0x8020ae6b,0x8020ae6b,3 +np.float32,0x6b5af1,0x6b5af1,3 +np.float32,0xbeb57cd6,0xbeae69a3,3 +np.float32,0x806e7eb2,0x806e7eb2,3 +np.float32,0x7e666edb,0x3fc90fdb,3 +np.float32,0xbf458c18,0xbf283ff0,3 +np.float32,0x3e50518e,0x3e4d8399,3 +np.float32,0x3e9ce224,0x3e983b98,3 +np.float32,0x3e6bc067,0x3e67b6c6,3 +np.float32,0x13783d,0x13783d,3 +np.float32,0xff3d518c,0xbfc90fdb,3 +np.float32,0xfeba5968,0xbfc90fdb,3 +np.float32,0xbf0b9f76,0xbeffa50f,3 +np.float32,0xfe174900,0xbfc90fdb,3 +np.float32,0x3f38bb0a,0x3f200527,3 +np.float32,0x7e94a77d,0x3fc90fdb,3 +np.float32,0x29d776,0x29d776,3 +np.float32,0xbf4e058d,0xbf2d7a15,3 +np.float32,0xbd94abc8,0xbd946923,3 +np.float32,0xbee62db0,0xbed85124,3 +np.float32,0x800000,0x800000,3 +np.float32,0xbef1df7e,0xbee1f636,3 +np.float32,0xbcf3cd20,0xbcf3bab5,3 +np.float32,0x80007b05,0x80007b05,3 +np.float32,0x3d9b3f2e,0x3d9af351,3 +np.float32,0xbf714a68,0xbf417dee,3 +np.float32,0xbf2a2d37,0xbf163069,3 +np.float32,0x8055104f,0x8055104f,3 +np.float32,0x7f5c40d7,0x3fc90fdb,3 +np.float32,0x1,0x1,3 +np.float32,0xff35f3a6,0xbfc90fdb,3 +np.float32,0xd9c7c,0xd9c7c,3 +np.float32,0xbf440cfc,0xbf274f22,3 +np.float32,0x8050ac43,0x8050ac43,3 +np.float32,0x63ee16,0x63ee16,3 +np.float32,0x7d90419b,0x3fc90fdb,3 +np.float32,0xfee22198,0xbfc90fdb,3 +np.float32,0xc2ead,0xc2ead,3 +np.float32,0x7f5cd6a6,0x3fc90fdb,3 +np.float32,0x3f6fab7e,0x3f40a184,3 +np.float32,0x3ecf998c,0x3ec53a73,3 +np.float32,0x7e5271f0,0x3fc90fdb,3 +np.float32,0x67c016,0x67c016,3 +np.float32,0x2189c8,0x2189c8,3 +np.float32,0x27d892,0x27d892,3 +np.float32,0x3f0d02c4,0x3f00e3c0,3 +np.float32,0xbf69ebca,0xbf3d8862,3 +np.float32,0x3e60c0d6,0x3e5d3ebb,3 +np.float32,0x3f45206c,0x3f27fc66,3 +np.float32,0xbf6b47dc,0xbf3e4592,3 +np.float32,0xfe9be2e2,0xbfc90fdb,3 +np.float32,0x7fa00000,0x7fe00000,3 +np.float32,0xff271562,0xbfc90fdb,3 +np.float32,0x3e2e5270,0x3e2caaaf,3 +np.float32,0x80222934,0x80222934,3 +np.float32,0xbd01d220,0xbd01c701,3 +np.float32,0x223aa0,0x223aa0,3 +np.float32,0x3f4b5a7e,0x3f2bd967,3 +np.float32,0x3f217d85,0x3f101200,3 +np.float32,0xbf57663a,0xbf331144,3 +np.float32,0x3f219862,0x3f102536,3 +np.float32,0x28a28c,0x28a28c,3 +np.float32,0xbf3f55f4,0xbf244f86,3 +np.float32,0xbf3de287,0xbf236092,3 +np.float32,0xbf1c1ce2,0xbf0c2fe3,3 +np.float32,0x80000001,0x80000001,3 +np.float32,0x3db695d0,0x3db61a90,3 +np.float32,0x6c39bf,0x6c39bf,3 +np.float32,0x7e33a12f,0x3fc90fdb,3 +np.float32,0x67623a,0x67623a,3 +np.float32,0x3e45dc54,0x3e4373b6,3 +np.float32,0x7f62fa68,0x3fc90fdb,3 +np.float32,0x3f0e1d01,0x3f01bbe5,3 +np.float32,0x3f13dc69,0x3f0615f5,3 +np.float32,0x246703,0x246703,3 +np.float32,0xbf1055b5,0xbf036d07,3 +np.float32,0x7f46d3d0,0x3fc90fdb,3 +np.float32,0x3d2b8086,0x3d2b66e5,3 +np.float32,0xbf03be44,0xbef35776,3 +np.float32,0x3f800000,0x3f490fdb,3 +np.float32,0xbec8d226,0xbebf613d,3 +np.float32,0x3d8faf00,0x3d8f72d4,3 +np.float32,0x170c4e,0x170c4e,3 +np.float32,0xff14c0f0,0xbfc90fdb,3 +np.float32,0xff16245d,0xbfc90fdb,3 +np.float32,0x7f44ce6d,0x3fc90fdb,3 +np.float32,0xbe8175d8,0xbe7d9aeb,3 +np.float32,0x3df7a4a1,0x3df67254,3 +np.float32,0xfe2cc46c,0xbfc90fdb,3 +np.float32,0x3f284e63,0x3f14e335,3 +np.float32,0x7e46e5d6,0x3fc90fdb,3 +np.float32,0x397be4,0x397be4,3 +np.float32,0xbf2560bc,0xbf12d50b,3 +np.float32,0x3ed9b8c1,0x3ecddc60,3 +np.float32,0xfec18c5a,0xbfc90fdb,3 +np.float32,0x64894d,0x64894d,3 +np.float32,0x36a65d,0x36a65d,3 +np.float32,0x804ffcd7,0x804ffcd7,3 +np.float32,0x800f79e4,0x800f79e4,3 +np.float32,0x5d45ac,0x5d45ac,3 +np.float32,0x6cdda0,0x6cdda0,3 +np.float32,0xbf7f2077,0xbf489fe5,3 +np.float32,0xbf152f78,0xbf0713a1,3 +np.float32,0x807bf344,0x807bf344,3 +np.float32,0x3f775023,0x3f44a4d8,3 +np.float32,0xbf3edf67,0xbf240365,3 +np.float32,0x7eed729c,0x3fc90fdb,3 +np.float32,0x14cc29,0x14cc29,3 +np.float32,0x7edd7b6b,0x3fc90fdb,3 +np.float32,0xbf3c6e2c,0xbf226fb7,3 +np.float32,0x51b9ad,0x51b9ad,3 +np.float32,0x3f617ee8,0x3f38dd7c,3 +np.float32,0xff800000,0xbfc90fdb,3 +np.float32,0x7f440ea0,0x3fc90fdb,3 +np.float32,0x3e639893,0x3e5ff49e,3 +np.float32,0xbd791bb0,0xbd78cd3c,3 +np.float32,0x8059fcbc,0x8059fcbc,3 +np.float32,0xbf7d1214,0xbf4796bd,3 +np.float32,0x3ef368fa,0x3ee33788,3 +np.float32,0xbecec0f4,0xbec48055,3 +np.float32,0xbc83d940,0xbc83d656,3 +np.float32,0xbce01220,0xbce003d4,3 +np.float32,0x803192a5,0x803192a5,3 +np.float32,0xbe40e0c0,0xbe3ea4f0,3 +np.float32,0xfb692600,0xbfc90fdb,3 +np.float32,0x3f1bec65,0x3f0c0c88,3 +np.float32,0x7f042798,0x3fc90fdb,3 +np.float32,0xbe047374,0xbe03b83b,3 +np.float32,0x7f7c6630,0x3fc90fdb,3 +np.float32,0x7f58dae3,0x3fc90fdb,3 +np.float32,0x80691c92,0x80691c92,3 +np.float32,0x7dbe76,0x7dbe76,3 +np.float32,0xbf231384,0xbf11339d,3 +np.float32,0xbef4acf8,0xbee43f8b,3 +np.float32,0x3ee9f9d0,0x3edb7793,3 +np.float32,0x3f0064f6,0x3eee04a8,3 +np.float32,0x313732,0x313732,3 +np.float32,0xfd58cf80,0xbfc90fdb,3 +np.float32,0x3f7a2bc9,0x3f461d30,3 +np.float32,0x7f7681af,0x3fc90fdb,3 +np.float32,0x7f504211,0x3fc90fdb,3 +np.float32,0xfeae0c00,0xbfc90fdb,3 +np.float32,0xbee14396,0xbed436d1,3 +np.float32,0x7fc00000,0x7fc00000,3 +np.float32,0x693406,0x693406,3 +np.float32,0x3eb4a679,0x3eadab1b,3 +np.float32,0x550505,0x550505,3 +np.float32,0xfd493d10,0xbfc90fdb,3 +np.float32,0x3f4fc907,0x3f2e8b2c,3 +np.float32,0x80799aa4,0x80799aa4,3 +np.float32,0xff1ea89b,0xbfc90fdb,3 +np.float32,0xff424510,0xbfc90fdb,3 +np.float32,0x7f68d026,0x3fc90fdb,3 +np.float32,0xbea230ca,0xbe9d1200,3 +np.float32,0x7ea585da,0x3fc90fdb,3 +np.float32,0x3f3db211,0x3f23414c,3 +np.float32,0xfea4d964,0xbfc90fdb,3 +np.float32,0xbf17fe18,0xbf092984,3 +np.float32,0x7cc8a2,0x7cc8a2,3 +np.float32,0xff0330ba,0xbfc90fdb,3 +np.float32,0x3f769835,0x3f444592,3 +np.float32,0xeb0ac,0xeb0ac,3 +np.float32,0x7f7e45de,0x3fc90fdb,3 +np.float32,0xbdb510a8,0xbdb49873,3 +np.float32,0x3ebf900b,0x3eb74e9c,3 +np.float32,0xbf21bbce,0xbf103e89,3 +np.float32,0xbf3f4682,0xbf24459d,3 +np.float32,0x7eb6e9c8,0x3fc90fdb,3 +np.float32,0xbf42532d,0xbf2637be,3 +np.float32,0xbd3b2600,0xbd3b04b4,3 +np.float32,0x3f1fa9aa,0x3f0ec23e,3 +np.float32,0x7ed6a0f1,0x3fc90fdb,3 +np.float32,0xff4759a1,0xbfc90fdb,3 +np.float32,0x6d26e3,0x6d26e3,3 +np.float32,0xfe1108e0,0xbfc90fdb,3 +np.float32,0xfdf76900,0xbfc90fdb,3 +np.float32,0xfec66f22,0xbfc90fdb,3 +np.float32,0xbf3d097f,0xbf22d458,3 +np.float32,0x3d85be25,0x3d858d99,3 +np.float32,0x7f36739f,0x3fc90fdb,3 +np.float32,0x7bc0a304,0x3fc90fdb,3 +np.float32,0xff48dd90,0xbfc90fdb,3 +np.float32,0x48cab0,0x48cab0,3 +np.float32,0x3ed3943c,0x3ec8a2ef,3 +np.float32,0xbf61488e,0xbf38bede,3 +np.float32,0x3f543df5,0x3f313525,3 +np.float32,0x5cf2ca,0x5cf2ca,3 +np.float32,0x572686,0x572686,3 +np.float32,0x80369c7c,0x80369c7c,3 +np.float32,0xbd2c1d20,0xbd2c0338,3 +np.float32,0x3e255428,0x3e23ea0b,3 +np.float32,0xbeba9ee0,0xbeb2f54c,3 +np.float32,0x8015c165,0x8015c165,3 +np.float32,0x3d31f488,0x3d31d7e6,3 +np.float32,0x3f68591c,0x3f3cac43,3 +np.float32,0xf5ed5,0xf5ed5,3 +np.float32,0xbf3b1d34,0xbf21949e,3 +np.float32,0x1f0343,0x1f0343,3 +np.float32,0x3f0e52b5,0x3f01e4ef,3 +np.float32,0x7f57c596,0x3fc90fdb,3 +np.float64,0x7fd8e333ddb1c667,0x3ff921fb54442d18,1 +np.float64,0x800bcc9cdad7993a,0x800bcc9cdad7993a,1 +np.float64,0x3fcd6f81df3adf00,0x3fcceebbafc5d55e,1 +np.float64,0x3fed7338a57ae671,0x3fe7ce3e5811fc0a,1 +np.float64,0x7fe64994fcac9329,0x3ff921fb54442d18,1 +np.float64,0xfa5a6345f4b4d,0xfa5a6345f4b4d,1 +np.float64,0xe9dcd865d3b9b,0xe9dcd865d3b9b,1 +np.float64,0x7fea6cffabf4d9fe,0x3ff921fb54442d18,1 +np.float64,0xa9e1de6153c3c,0xa9e1de6153c3c,1 +np.float64,0xab6bdc5356d7c,0xab6bdc5356d7c,1 +np.float64,0x80062864a02c50ca,0x80062864a02c50ca,1 +np.float64,0xbfdac03aa7b58076,0xbfd9569f3230128d,1 +np.float64,0xbfe61b77752c36ef,0xbfe3588f51b8be8f,1 +np.float64,0x800bc854c8d790aa,0x800bc854c8d790aa,1 +np.float64,0x3feed1a2da3da346,0x3fe887f9b8ea031f,1 +np.float64,0x3fe910d3697221a7,0x3fe54365a53d840e,1 +np.float64,0x7fe7ab4944ef5692,0x3ff921fb54442d18,1 +np.float64,0x3fa462f1a028c5e3,0x3fa460303a6a4e69,1 +np.float64,0x800794f1a3af29e4,0x800794f1a3af29e4,1 +np.float64,0x3fee6fe7fafcdfd0,0x3fe854f863816d55,1 +np.float64,0x8000000000000000,0x8000000000000000,1 +np.float64,0x7f336472fe66d,0x7f336472fe66d,1 +np.float64,0xffb1623ac822c478,0xbff921fb54442d18,1 +np.float64,0x3fbacd68ce359ad2,0x3fbab480b3638846,1 +np.float64,0xffd5c02706ab804e,0xbff921fb54442d18,1 +np.float64,0xbfd4daf03d29b5e0,0xbfd42928f069c062,1 +np.float64,0x800c6e85dbd8dd0c,0x800c6e85dbd8dd0c,1 +np.float64,0x800e3599c5bc6b34,0x800e3599c5bc6b34,1 +np.float64,0x2c0d654c581ad,0x2c0d654c581ad,1 +np.float64,0xbfdd3eb13fba7d62,0xbfdb6e8143302de7,1 +np.float64,0x800b60cb8776c197,0x800b60cb8776c197,1 +np.float64,0x80089819ad113034,0x80089819ad113034,1 +np.float64,0x29fe721453fcf,0x29fe721453fcf,1 +np.float64,0x3fe8722f4df0e45f,0x3fe4e026d9eadb4d,1 +np.float64,0xffd1fbcd01a3f79a,0xbff921fb54442d18,1 +np.float64,0x7fc74e1e982e9c3c,0x3ff921fb54442d18,1 +np.float64,0x800c09d3d15813a8,0x800c09d3d15813a8,1 +np.float64,0xbfeee4578b3dc8af,0xbfe891ab3d6c3ce4,1 +np.float64,0xffdd01a6f33a034e,0xbff921fb54442d18,1 +np.float64,0x7fcc130480382608,0x3ff921fb54442d18,1 +np.float64,0xffcbb6bd1d376d7c,0xbff921fb54442d18,1 +np.float64,0xc068a53780d15,0xc068a53780d15,1 +np.float64,0xbfc974f15532e9e4,0xbfc92100b355f3e7,1 +np.float64,0x3fe6da79442db4f3,0x3fe3d87393b082e7,1 +np.float64,0xd9d9be4db3b38,0xd9d9be4db3b38,1 +np.float64,0x5ea50a20bd4a2,0x5ea50a20bd4a2,1 +np.float64,0xbfe5597f7d2ab2ff,0xbfe2d3ccc544b52b,1 +np.float64,0x80019364e4e326cb,0x80019364e4e326cb,1 +np.float64,0x3fed2902c3fa5206,0x3fe7a5e1df07e5c1,1 +np.float64,0xbfa7b72b5c2f6e50,0xbfa7b2d545b3cc1f,1 +np.float64,0xffdb60dd43b6c1ba,0xbff921fb54442d18,1 +np.float64,0x81a65d8b034cc,0x81a65d8b034cc,1 +np.float64,0x8000c30385818608,0x8000c30385818608,1 +np.float64,0x6022f5f4c045f,0x6022f5f4c045f,1 +np.float64,0x8007a2bb810f4578,0x8007a2bb810f4578,1 +np.float64,0x7fdc68893238d111,0x3ff921fb54442d18,1 +np.float64,0x7fd443454ea8868a,0x3ff921fb54442d18,1 +np.float64,0xffe6b04209ed6084,0xbff921fb54442d18,1 +np.float64,0x7fcd9733d13b2e67,0x3ff921fb54442d18,1 +np.float64,0xf5ee80a9ebdd0,0xf5ee80a9ebdd0,1 +np.float64,0x3fe3788e8de6f11e,0x3fe17dec7e6843a0,1 +np.float64,0x3fee36f62f7c6dec,0x3fe836f832515b43,1 +np.float64,0xf6cb49aded969,0xf6cb49aded969,1 +np.float64,0x3fd2b15ea4a562bc,0x3fd22fdc09920e67,1 +np.float64,0x7fccf6aef139ed5d,0x3ff921fb54442d18,1 +np.float64,0x3fd396b8ce272d72,0x3fd3026118857bd4,1 +np.float64,0x7fe53d3c80ea7a78,0x3ff921fb54442d18,1 +np.float64,0x3feae88fc4f5d120,0x3fe65fb04b18ef7a,1 +np.float64,0x3fedc643747b8c86,0x3fe7fafa6c20e25a,1 +np.float64,0xffdb2dc0df365b82,0xbff921fb54442d18,1 +np.float64,0xbfa2af3658255e70,0xbfa2ad17348f4253,1 +np.float64,0x3f8aa77b30354ef6,0x3f8aa71892336a69,1 +np.float64,0xbfdd1b1efbba363e,0xbfdb510dcd186820,1 +np.float64,0x800f50d99c5ea1b3,0x800f50d99c5ea1b3,1 +np.float64,0xff6ed602403dac00,0xbff921fb54442d18,1 +np.float64,0x800477d71aa8efaf,0x800477d71aa8efaf,1 +np.float64,0xbfe729a9e86e5354,0xbfe40ca78d9eefcf,1 +np.float64,0x3fd81ab2d4303566,0x3fd70d7e3937ea22,1 +np.float64,0xb617cbab6c2fa,0xb617cbab6c2fa,1 +np.float64,0x7fefffffffffffff,0x3ff921fb54442d18,1 +np.float64,0xffa40933ac281260,0xbff921fb54442d18,1 +np.float64,0xbfe1ede621e3dbcc,0xbfe057bb2b341ced,1 +np.float64,0xbfec700f03b8e01e,0xbfe73fb190bc722e,1 +np.float64,0x6e28af02dc517,0x6e28af02dc517,1 +np.float64,0x3fe37ad37ae6f5a7,0x3fe17f94674818a9,1 +np.float64,0x8000cbdeeae197bf,0x8000cbdeeae197bf,1 +np.float64,0x3fe8fd1f01f1fa3e,0x3fe5372bbec5d72c,1 +np.float64,0x3f8f9229103f2452,0x3f8f918531894256,1 +np.float64,0x800536858e0a6d0c,0x800536858e0a6d0c,1 +np.float64,0x7fe82bb4f9f05769,0x3ff921fb54442d18,1 +np.float64,0xffc1c2fb592385f8,0xbff921fb54442d18,1 +np.float64,0x7f924ddfc0249bbf,0x3ff921fb54442d18,1 +np.float64,0xffd5e125c52bc24c,0xbff921fb54442d18,1 +np.float64,0xbfef0d8738be1b0e,0xbfe8a6ef17b16c10,1 +np.float64,0x3fc9c8875233910f,0x3fc9715e708503cb,1 +np.float64,0xbfe2d926f4e5b24e,0xbfe108956e61cbb3,1 +np.float64,0x7fd61c496dac3892,0x3ff921fb54442d18,1 +np.float64,0x7fed545c6b7aa8b8,0x3ff921fb54442d18,1 +np.float64,0x8003746fea86e8e1,0x8003746fea86e8e1,1 +np.float64,0x3fdf515e75bea2bd,0x3fdd201a5585caa3,1 +np.float64,0xffda87c8ee350f92,0xbff921fb54442d18,1 +np.float64,0xffc675d8e22cebb0,0xbff921fb54442d18,1 +np.float64,0xffcdc173433b82e8,0xbff921fb54442d18,1 +np.float64,0xffed9df1517b3be2,0xbff921fb54442d18,1 +np.float64,0x3fd6a2eec72d45de,0x3fd5c1f1d7dcddcf,1 +np.float64,0xffec116a66f822d4,0xbff921fb54442d18,1 +np.float64,0x8007c2a2458f8545,0x8007c2a2458f8545,1 +np.float64,0x3fe4ee80d969dd02,0x3fe2895076094668,1 +np.float64,0x3fe3cae7116795ce,0x3fe1b9c07e0d03a7,1 +np.float64,0xbfd81bf8d8b037f2,0xbfd70e9bbbb4ca57,1 +np.float64,0x800c88ccd1f9119a,0x800c88ccd1f9119a,1 +np.float64,0xffdab2aee2b5655e,0xbff921fb54442d18,1 +np.float64,0x3fe743d227ee87a4,0x3fe41dcaef186d96,1 +np.float64,0x3fb060fd0220c1fa,0x3fb05b47f56ebbb4,1 +np.float64,0xbfd3f03772a7e06e,0xbfd3541522377291,1 +np.float64,0x190a5ae03216,0x190a5ae03216,1 +np.float64,0x3fe48c71916918e4,0x3fe24442f45b3183,1 +np.float64,0x800862470590c48e,0x800862470590c48e,1 +np.float64,0x7fd3ced89d279db0,0x3ff921fb54442d18,1 +np.float64,0x3feb3d9b4ab67b37,0x3fe69140cf2623f7,1 +np.float64,0xbc3f296b787e5,0xbc3f296b787e5,1 +np.float64,0xbfed6b905dfad721,0xbfe7ca1881a8c0fd,1 +np.float64,0xbfe621c2aaac4386,0xbfe35cd1969a82db,1 +np.float64,0x8009e7b17593cf63,0x8009e7b17593cf63,1 +np.float64,0x80045f580ca8beb1,0x80045f580ca8beb1,1 +np.float64,0xbfea2e177e745c2f,0xbfe5f13971633339,1 +np.float64,0x3fee655787fccab0,0x3fe84f6b98b6de26,1 +np.float64,0x3fc9cde92f339bd0,0x3fc9768a88b2c97c,1 +np.float64,0x3fc819c3b3303388,0x3fc7d25e1526e731,1 +np.float64,0x3fd3e848d2a7d090,0x3fd34cd9e6af558f,1 +np.float64,0x3fe19dacac633b5a,0x3fe01a6b4d27adc2,1 +np.float64,0x800b190da316321c,0x800b190da316321c,1 +np.float64,0xd5c69711ab8d3,0xd5c69711ab8d3,1 +np.float64,0xbfdc31bed7b8637e,0xbfda8ea3c1309d6d,1 +np.float64,0xbfd02ba007a05740,0xbfcfad86f0d756dc,1 +np.float64,0x3fe874473d70e88e,0x3fe4e1793cd82123,1 +np.float64,0xffb465585c28cab0,0xbff921fb54442d18,1 +np.float64,0xbfb5d8e13e2bb1c0,0xbfb5cb5c7807fc4d,1 +np.float64,0xffe80f933bf01f26,0xbff921fb54442d18,1 +np.float64,0x7feea783f5fd4f07,0x3ff921fb54442d18,1 +np.float64,0xbfae6665f43cccd0,0xbfae5d45b0a6f90a,1 +np.float64,0x800bd6ef5a77addf,0x800bd6ef5a77addf,1 +np.float64,0x800d145babda28b8,0x800d145babda28b8,1 +np.float64,0x39de155473bc3,0x39de155473bc3,1 +np.float64,0x3fefbd6bb1ff7ad8,0x3fe9008e73a3296e,1 +np.float64,0x3fc40bca3d281798,0x3fc3e2710e167007,1 +np.float64,0x3fcae0918335c120,0x3fca7e09e704a678,1 +np.float64,0x51287fbea2511,0x51287fbea2511,1 +np.float64,0x7fa6bc33a82d7866,0x3ff921fb54442d18,1 +np.float64,0xe72a2bebce546,0xe72a2bebce546,1 +np.float64,0x3fe1c8fd686391fa,0x3fe03b9622aeb4e3,1 +np.float64,0x3fe2a73ac3654e76,0x3fe0e36bc1ee4ac4,1 +np.float64,0x59895218b312b,0x59895218b312b,1 +np.float64,0xc6dc25c78db85,0xc6dc25c78db85,1 +np.float64,0xbfc06cfac520d9f4,0xbfc0561f85d2c907,1 +np.float64,0xbfea912dc4f5225c,0xbfe62c3b1c01c793,1 +np.float64,0x3fb78ce89a2f19d0,0x3fb77bfcb65a67d3,1 +np.float64,0xbfece5cdea39cb9c,0xbfe78103d24099e5,1 +np.float64,0x30d3054e61a61,0x30d3054e61a61,1 +np.float64,0xbfd3fe26fba7fc4e,0xbfd360c8447c4f7a,1 +np.float64,0x800956072a92ac0f,0x800956072a92ac0f,1 +np.float64,0x7fe639b3b6ec7366,0x3ff921fb54442d18,1 +np.float64,0x800ee30240bdc605,0x800ee30240bdc605,1 +np.float64,0x7fef6af0d2bed5e1,0x3ff921fb54442d18,1 +np.float64,0xffefce8725ff9d0d,0xbff921fb54442d18,1 +np.float64,0x3fe2e311da65c624,0x3fe10ff1623089dc,1 +np.float64,0xbfe7e5cbe56fcb98,0xbfe486c3daeda67c,1 +np.float64,0x80095bc14472b783,0x80095bc14472b783,1 +np.float64,0xffef0cb4553e1968,0xbff921fb54442d18,1 +np.float64,0xe3e60567c7cc1,0xe3e60567c7cc1,1 +np.float64,0xffde919f06bd233e,0xbff921fb54442d18,1 +np.float64,0x3fe3f9632e27f2c6,0x3fe1db49ebd21c4e,1 +np.float64,0x9dee9a233bdd4,0x9dee9a233bdd4,1 +np.float64,0xbfe3bb0602e7760c,0xbfe1ae41b6d4c488,1 +np.float64,0x3fc46945a128d288,0x3fc43da54c6c6a2a,1 +np.float64,0x7fdef149ac3de292,0x3ff921fb54442d18,1 +np.float64,0x800a96c76d752d8f,0x800a96c76d752d8f,1 +np.float64,0x3f971a32382e3464,0x3f9719316b9e9baf,1 +np.float64,0x7fe97bcf15b2f79d,0x3ff921fb54442d18,1 +np.float64,0x7fea894558f5128a,0x3ff921fb54442d18,1 +np.float64,0x3fc9e3be1933c780,0x3fc98b847c3923eb,1 +np.float64,0x3f7accac40359959,0x3f7acc9330741b64,1 +np.float64,0xa80c136950183,0xa80c136950183,1 +np.float64,0x3fe408732b2810e6,0x3fe1e61e7cbc8824,1 +np.float64,0xffa775bc042eeb80,0xbff921fb54442d18,1 +np.float64,0x3fbf04bd223e0980,0x3fbede37b8fc697e,1 +np.float64,0x7fd999b34c333366,0x3ff921fb54442d18,1 +np.float64,0xe72146dfce429,0xe72146dfce429,1 +np.float64,0x4f511ee49ea24,0x4f511ee49ea24,1 +np.float64,0xffb3e6e58827cdc8,0xbff921fb54442d18,1 +np.float64,0x3fd1f180cfa3e300,0x3fd17e85b2871de2,1 +np.float64,0x97c8e45b2f91d,0x97c8e45b2f91d,1 +np.float64,0xbfeeb20e88fd641d,0xbfe8778f878440bf,1 +np.float64,0xbfe1fc6dee23f8dc,0xbfe062c815a93cde,1 +np.float64,0xab4bf71f5697f,0xab4bf71f5697f,1 +np.float64,0xa9675a2952cec,0xa9675a2952cec,1 +np.float64,0xbfef3ea4a33e7d49,0xbfe8c02743ebc1b6,1 +np.float64,0x3fe22a2eafa4545d,0x3fe08577afca52a9,1 +np.float64,0x3fe8a08daaf1411c,0x3fe4fd5a34f05305,1 +np.float64,0xbfc6cda77b2d9b50,0xbfc6910bcfa0cf4f,1 +np.float64,0x3fec398394387307,0x3fe7211dd5276500,1 +np.float64,0x3fe36c95c626d92c,0x3fe1752e5aa2357b,1 +np.float64,0xffd8b9e7073173ce,0xbff921fb54442d18,1 +np.float64,0xffe19f043ae33e08,0xbff921fb54442d18,1 +np.float64,0x800e3640709c6c81,0x800e3640709c6c81,1 +np.float64,0x3fe7d6c20aafad84,0x3fe47d1a3307d9c8,1 +np.float64,0x80093fd63b727fad,0x80093fd63b727fad,1 +np.float64,0xffe1a671a4634ce3,0xbff921fb54442d18,1 +np.float64,0xbfe53a6b386a74d6,0xbfe2be41859cb10d,1 +np.float64,0xbfed149a097a2934,0xbfe79ab7e3e93c1c,1 +np.float64,0x7fc2769a5724ed34,0x3ff921fb54442d18,1 +np.float64,0xffd01e4e99a03c9e,0xbff921fb54442d18,1 +np.float64,0xa61f38434c3e7,0xa61f38434c3e7,1 +np.float64,0x800ad4ac5195a959,0x800ad4ac5195a959,1 +np.float64,0x7ff8000000000000,0x7ff8000000000000,1 +np.float64,0x80034a45b6c6948c,0x80034a45b6c6948c,1 +np.float64,0x6350b218c6a17,0x6350b218c6a17,1 +np.float64,0xfff0000000000000,0xbff921fb54442d18,1 +np.float64,0x3fe363e759e6c7cf,0x3fe16ed58d80f9ce,1 +np.float64,0xffe3b98e59e7731c,0xbff921fb54442d18,1 +np.float64,0x3fdbf7b40337ef68,0x3fda5df7ad3c80f9,1 +np.float64,0xbfe9cdf784739bef,0xbfe5b74f346ef93d,1 +np.float64,0xbfc321bea326437c,0xbfc2fdc0d4ff7561,1 +np.float64,0xbfe40f77d2a81ef0,0xbfe1eb28c4ae4dde,1 +np.float64,0x7fe071806960e300,0x3ff921fb54442d18,1 +np.float64,0x7fd269006ea4d200,0x3ff921fb54442d18,1 +np.float64,0x80017a56e0e2f4af,0x80017a56e0e2f4af,1 +np.float64,0x8004b4ea09a969d5,0x8004b4ea09a969d5,1 +np.float64,0xbfedbb01e63b7604,0xbfe7f4f0e84297df,1 +np.float64,0x3fe44454826888a9,0x3fe210ff6d005706,1 +np.float64,0xbfe0e77e6ea1cefd,0xbfdf1a977da33402,1 +np.float64,0xbfed6d4c8c3ada99,0xbfe7cb0932093f60,1 +np.float64,0x1d74cb9e3ae9a,0x1d74cb9e3ae9a,1 +np.float64,0x80082a785d1054f1,0x80082a785d1054f1,1 +np.float64,0x3fe58393266b0726,0x3fe2f0d8e91d4887,1 +np.float64,0xffe4028899680510,0xbff921fb54442d18,1 +np.float64,0x783a2e5af0746,0x783a2e5af0746,1 +np.float64,0x7fcdce88e73b9d11,0x3ff921fb54442d18,1 +np.float64,0x3fc58672a72b0ce5,0x3fc5535e090e56e2,1 +np.float64,0x800889c839b11391,0x800889c839b11391,1 +np.float64,0xffe5e05c466bc0b8,0xbff921fb54442d18,1 +np.float64,0xbfcbef6ebe37dedc,0xbfcb810752468f49,1 +np.float64,0xffe9408563b2810a,0xbff921fb54442d18,1 +np.float64,0xbfee4738367c8e70,0xbfe83f8e5dd7602f,1 +np.float64,0xbfe4aeb587295d6b,0xbfe25c7a0c76a454,1 +np.float64,0xffc9aea0a7335d40,0xbff921fb54442d18,1 +np.float64,0xe1e02199c3c04,0xe1e02199c3c04,1 +np.float64,0xbfbd9400783b2800,0xbfbd729345d1d14f,1 +np.float64,0x7a5418bcf4a84,0x7a5418bcf4a84,1 +np.float64,0x3fdc1c2fa5b83860,0x3fda7c935965ae72,1 +np.float64,0x80076a9f58ced53f,0x80076a9f58ced53f,1 +np.float64,0x3fedc4bf957b897f,0x3fe7fa2a83148f1c,1 +np.float64,0x800981b8a9d30372,0x800981b8a9d30372,1 +np.float64,0xffe1082311621046,0xbff921fb54442d18,1 +np.float64,0xe0091f89c0124,0xe0091f89c0124,1 +np.float64,0xbfce8d674f3d1ad0,0xbfcdfdbf2ddaa0ca,1 +np.float64,0x800516e72eaa2dcf,0x800516e72eaa2dcf,1 +np.float64,0xffe61ee64c6c3dcc,0xbff921fb54442d18,1 +np.float64,0x7fed2683cafa4d07,0x3ff921fb54442d18,1 +np.float64,0xffd4faf27729f5e4,0xbff921fb54442d18,1 +np.float64,0x7fe308fa842611f4,0x3ff921fb54442d18,1 +np.float64,0x3fc612a62b2c2550,0x3fc5db9ddbd4e159,1 +np.float64,0xbfe5b01e766b603d,0xbfe30f72a875e988,1 +np.float64,0x3fc2dd8b9a25bb17,0x3fc2bb06246b9f78,1 +np.float64,0x8170908102e12,0x8170908102e12,1 +np.float64,0x800c1c8a8a583915,0x800c1c8a8a583915,1 +np.float64,0xffe5d91e8b6bb23c,0xbff921fb54442d18,1 +np.float64,0xffd140adee22815c,0xbff921fb54442d18,1 +np.float64,0xbfe2f1f5f8e5e3ec,0xbfe11afa5d749952,1 +np.float64,0xbfed6d1d587ada3b,0xbfe7caef9ecf7651,1 +np.float64,0x3fe9b85e67f370bd,0x3fe5aa3474768982,1 +np.float64,0x7fdc8932edb91265,0x3ff921fb54442d18,1 +np.float64,0x7fd136bc54a26d78,0x3ff921fb54442d18,1 +np.float64,0x800a1ea12a343d43,0x800a1ea12a343d43,1 +np.float64,0x3fec6a5c1b78d4b8,0x3fe73c82235c3f8f,1 +np.float64,0x800fbf6a00df7ed4,0x800fbf6a00df7ed4,1 +np.float64,0xbfd0e6e0cda1cdc2,0xbfd0864bf8cad294,1 +np.float64,0x3fc716df482e2dbf,0x3fc6d7fbfd4a8470,1 +np.float64,0xbfe75990936eb321,0xbfe42bffec3fa0d7,1 +np.float64,0x3fd58e54a02b1ca9,0x3fd4cace1107a5cc,1 +np.float64,0xbfc9c04136338084,0xbfc9696ad2591d54,1 +np.float64,0xdd1f0147ba3e0,0xdd1f0147ba3e0,1 +np.float64,0x5c86a940b90e,0x5c86a940b90e,1 +np.float64,0xbfecae3b8e795c77,0xbfe7624d4988c612,1 +np.float64,0xffd0370595206e0c,0xbff921fb54442d18,1 +np.float64,0xbfdc26d443384da8,0xbfda857ecd33ba9f,1 +np.float64,0xbfd1c849d9a39094,0xbfd15849449cc378,1 +np.float64,0xffee04acdb3c0959,0xbff921fb54442d18,1 +np.float64,0xbfded1056dbda20a,0xbfdcb83b30e1528c,1 +np.float64,0x7fb7b826622f704c,0x3ff921fb54442d18,1 +np.float64,0xbfee4df8ae7c9bf1,0xbfe8431df9dfd05d,1 +np.float64,0x7fe7f3670e2fe6cd,0x3ff921fb54442d18,1 +np.float64,0x8008ac9ae0d15936,0x8008ac9ae0d15936,1 +np.float64,0x800dce9f3b3b9d3f,0x800dce9f3b3b9d3f,1 +np.float64,0x7fbb19db203633b5,0x3ff921fb54442d18,1 +np.float64,0x3fe56c7f302ad8fe,0x3fe2e0eec3ad45fd,1 +np.float64,0x7fe82c05c570580b,0x3ff921fb54442d18,1 +np.float64,0xc0552b7780aa6,0xc0552b7780aa6,1 +np.float64,0x39d40e3073a83,0x39d40e3073a83,1 +np.float64,0x3fd8db54d731b6aa,0x3fd7b589b3ee9b20,1 +np.float64,0xffcdd355233ba6ac,0xbff921fb54442d18,1 +np.float64,0x3fbe97b3a43d2f67,0x3fbe72bca9be0348,1 +np.float64,0xbff0000000000000,0xbfe921fb54442d18,1 +np.float64,0xbfb4f55e6229eac0,0xbfb4e96df18a75a7,1 +np.float64,0xbfc66399ba2cc734,0xbfc62a3298bd96fc,1 +np.float64,0x3fd00988bb201311,0x3fcf6d67a9374c38,1 +np.float64,0x7fe471867d28e30c,0x3ff921fb54442d18,1 +np.float64,0xbfe38e0e64271c1d,0xbfe18d9888b7523b,1 +np.float64,0x8009dc127573b825,0x8009dc127573b825,1 +np.float64,0x800047bde4608f7d,0x800047bde4608f7d,1 +np.float64,0xffeede42c77dbc85,0xbff921fb54442d18,1 +np.float64,0xd8cf6d13b19ee,0xd8cf6d13b19ee,1 +np.float64,0xbfd08fb302a11f66,0xbfd034b1f8235e23,1 +np.float64,0x7fdb404c0b368097,0x3ff921fb54442d18,1 +np.float64,0xbfd6ba0438ad7408,0xbfd5d673e3276ec1,1 +np.float64,0xffd9568027b2ad00,0xbff921fb54442d18,1 +np.float64,0xbfb313b73e262770,0xbfb30ab4acb4fa67,1 +np.float64,0xbfe2dc1a15e5b834,0xbfe10ac5f8f3acd3,1 +np.float64,0xbfee426bf4bc84d8,0xbfe83d061df91edd,1 +np.float64,0xd9142c2fb2286,0xd9142c2fb2286,1 +np.float64,0x7feb0d11dff61a23,0x3ff921fb54442d18,1 +np.float64,0x800fea5b509fd4b7,0x800fea5b509fd4b7,1 +np.float64,0x3fe1a8818da35103,0x3fe022ba1bdf366e,1 +np.float64,0x8010000000000000,0x8010000000000000,1 +np.float64,0xbfd8fc6de6b1f8dc,0xbfd7d24726ed8dcc,1 +np.float64,0xf4b3dc2de967c,0xf4b3dc2de967c,1 +np.float64,0x8af0409b15e08,0x8af0409b15e08,1 +np.float64,0x3fb21e6934243cd2,0x3fb216b065f8709a,1 +np.float64,0x3fc53069392a60d2,0x3fc4ffa931211fb9,1 +np.float64,0xffc955812c32ab04,0xbff921fb54442d18,1 +np.float64,0xbfe3de42b1a7bc86,0xbfe1c7bd1324de75,1 +np.float64,0x1dc149a03b82a,0x1dc149a03b82a,1 +np.float64,0x8001bc5a24a378b5,0x8001bc5a24a378b5,1 +np.float64,0x3da14c407b44,0x3da14c407b44,1 +np.float64,0x80025e8da924bd1c,0x80025e8da924bd1c,1 +np.float64,0xbfcb0141c9360284,0xbfca9d572ea5e1f3,1 +np.float64,0xc90036fd92007,0xc90036fd92007,1 +np.float64,0x138312c427063,0x138312c427063,1 +np.float64,0x800dda3a963bb475,0x800dda3a963bb475,1 +np.float64,0x3fe9339934f26732,0x3fe558e723291f78,1 +np.float64,0xbfea8357027506ae,0xbfe6240826faaf48,1 +np.float64,0x7fe04735cae08e6b,0x3ff921fb54442d18,1 +np.float64,0x3fe29aca3c653594,0x3fe0da214c8bc6a4,1 +np.float64,0x3fbe1f09a03c3e13,0x3fbdfbbefef0155b,1 +np.float64,0x816ee4ad02ddd,0x816ee4ad02ddd,1 +np.float64,0xffddd1b31d3ba366,0xbff921fb54442d18,1 +np.float64,0x3fe2e01e0625c03c,0x3fe10dc0bd6677c2,1 +np.float64,0x3fec6bcf1978d79e,0x3fe73d518cddeb7c,1 +np.float64,0x7fe01aaaf8603555,0x3ff921fb54442d18,1 +np.float64,0xdf300cc5be602,0xdf300cc5be602,1 +np.float64,0xbfe71c01a36e3804,0xbfe403af80ce47b8,1 +np.float64,0xffa5be00ac2b7c00,0xbff921fb54442d18,1 +np.float64,0xbfda9ba711b5374e,0xbfd93775e3ac6bda,1 +np.float64,0xbfe56d8a27eadb14,0xbfe2e1a7185e8e6d,1 +np.float64,0x800f1bc937be3792,0x800f1bc937be3792,1 +np.float64,0x800a61d93c74c3b3,0x800a61d93c74c3b3,1 +np.float64,0x7fe71a52fcae34a5,0x3ff921fb54442d18,1 +np.float64,0x7fb4aef256295de4,0x3ff921fb54442d18,1 +np.float64,0x3fe6c1e861ed83d1,0x3fe3c828f281a7ef,1 +np.float64,0x3fba128402342508,0x3fb9fb94cf141860,1 +np.float64,0x3fee55a7ecfcab50,0x3fe8472a9af893ee,1 +np.float64,0x3fe586f31b2b0de6,0x3fe2f32bce9e91bc,1 +np.float64,0xbfbb1d1442363a28,0xbfbb034c7729d5f2,1 +np.float64,0xc78b4d3f8f16a,0xc78b4d3f8f16a,1 +np.float64,0x7fdbc277d4b784ef,0x3ff921fb54442d18,1 +np.float64,0xbfa728ca2c2e5190,0xbfa724c04e73ccbd,1 +np.float64,0x7fefc7b2143f8f63,0x3ff921fb54442d18,1 +np.float64,0x3fd153a3dda2a748,0x3fd0ebccd33a4dca,1 +np.float64,0xbfe18a6eace314de,0xbfe00ba32ec89d30,1 +np.float64,0x7feef518537dea30,0x3ff921fb54442d18,1 +np.float64,0x8005f007cd4be010,0x8005f007cd4be010,1 +np.float64,0x7fd890b840b12170,0x3ff921fb54442d18,1 +np.float64,0x7feed0582ebda0af,0x3ff921fb54442d18,1 +np.float64,0x1013f53220280,0x1013f53220280,1 +np.float64,0xbfe77273986ee4e7,0xbfe43c375a8bf6de,1 +np.float64,0x7fe3ab8918675711,0x3ff921fb54442d18,1 +np.float64,0xbfc6ad515b2d5aa4,0xbfc671b2f7f86624,1 +np.float64,0x7fcd86231d3b0c45,0x3ff921fb54442d18,1 +np.float64,0xffe2523299a4a464,0xbff921fb54442d18,1 +np.float64,0x7fcadc5a1b35b8b3,0x3ff921fb54442d18,1 +np.float64,0x3fe5e020c4ebc042,0x3fe330418eec75bd,1 +np.float64,0x7fe332a9dc266553,0x3ff921fb54442d18,1 +np.float64,0xfa11dc21f425,0xfa11dc21f425,1 +np.float64,0xbec800177d900,0xbec800177d900,1 +np.float64,0x3fcadd057835ba0b,0x3fca7aa42face8bc,1 +np.float64,0xbfe6b9a206ad7344,0xbfe3c2a9719803de,1 +np.float64,0x3fbb4250b63684a0,0x3fbb281e9cefc519,1 +np.float64,0x7fef8787517f0f0e,0x3ff921fb54442d18,1 +np.float64,0x8001315c2d6262b9,0x8001315c2d6262b9,1 +np.float64,0xbfd94e3cf2b29c7a,0xbfd819257d36f56c,1 +np.float64,0xf1f325abe3e65,0xf1f325abe3e65,1 +np.float64,0x7fd6c07079ad80e0,0x3ff921fb54442d18,1 +np.float64,0x7fe328b075a65160,0x3ff921fb54442d18,1 +np.float64,0x7fe7998f812f331e,0x3ff921fb54442d18,1 +np.float64,0xffe026bb65604d76,0xbff921fb54442d18,1 +np.float64,0xffd6c06de8ad80dc,0xbff921fb54442d18,1 +np.float64,0x3fcd5a37bf3ab46f,0x3fccda82935d98ce,1 +np.float64,0xffc3e5a45227cb48,0xbff921fb54442d18,1 +np.float64,0x3febf7dd8177efbc,0x3fe6fc0bb999883e,1 +np.float64,0x7fd7047ea92e08fc,0x3ff921fb54442d18,1 +np.float64,0x35b3fc406b680,0x35b3fc406b680,1 +np.float64,0x7fd52e97632a5d2e,0x3ff921fb54442d18,1 +np.float64,0x3fd464d401a8c9a8,0x3fd3be2967fc97c3,1 +np.float64,0x800e815b2ebd02b6,0x800e815b2ebd02b6,1 +np.float64,0x3fca8428af350850,0x3fca257b466b8970,1 +np.float64,0x8007b7526f6f6ea6,0x8007b7526f6f6ea6,1 +np.float64,0x82f60a8f05ec2,0x82f60a8f05ec2,1 +np.float64,0x3fb71a5d0a2e34c0,0x3fb70a629ef8e2a2,1 +np.float64,0x7fc8570c7d30ae18,0x3ff921fb54442d18,1 +np.float64,0x7fe5528e77eaa51c,0x3ff921fb54442d18,1 +np.float64,0xffc20dbbf1241b78,0xbff921fb54442d18,1 +np.float64,0xeb13368fd6267,0xeb13368fd6267,1 +np.float64,0x7fe7d529056faa51,0x3ff921fb54442d18,1 +np.float64,0x3fecd02eabf9a05d,0x3fe77516f0ba1ac4,1 +np.float64,0x800fcba6a09f974d,0x800fcba6a09f974d,1 +np.float64,0x7fe7e8e015afd1bf,0x3ff921fb54442d18,1 +np.float64,0xbfd271a382a4e348,0xbfd1f513a191c595,1 +np.float64,0x9f1014013e21,0x9f1014013e21,1 +np.float64,0x3fc05da47f20bb49,0x3fc04708a13a3a47,1 +np.float64,0x3fe0f427dda1e850,0x3fdf2e60ba8678b9,1 +np.float64,0xbfecb29fa539653f,0xbfe764bc791c45dd,1 +np.float64,0x45881ec68b104,0x45881ec68b104,1 +np.float64,0x8000000000000001,0x8000000000000001,1 +np.float64,0x3fe9c67ee1338cfe,0x3fe5b2c7b3df6ce8,1 +np.float64,0x7fedb8fef6bb71fd,0x3ff921fb54442d18,1 +np.float64,0x3fe54f6aaaea9ed6,0x3fe2ccd1df2abaa9,1 +np.float64,0x7feff58a1bbfeb13,0x3ff921fb54442d18,1 +np.float64,0x7fe3b62827276c4f,0x3ff921fb54442d18,1 +np.float64,0x3fe5feb682ebfd6d,0x3fe345105bc6d980,1 +np.float64,0x3fe49f38d9693e72,0x3fe2518b2824757f,1 +np.float64,0x8006bfd27c6d7fa6,0x8006bfd27c6d7fa6,1 +np.float64,0x3fc13409e2226814,0x3fc119ce0c01a5a2,1 +np.float64,0x95f8c7212bf19,0x95f8c7212bf19,1 +np.float64,0x3fd9f0fa6133e1f5,0x3fd8a567515edecf,1 +np.float64,0x3fef95cbe5ff2b98,0x3fe8ec88c768ba0b,1 +np.float64,0x3fbed28bba3da510,0x3fbeacbf136e51c2,1 +np.float64,0xbfd3987aeca730f6,0xbfd303fca58e3e60,1 +np.float64,0xbfed0f90cbfa1f22,0xbfe797f59249410d,1 +np.float64,0xffe55d8cbf2abb19,0xbff921fb54442d18,1 +np.float64,0x3feb4d9fc6769b40,0x3fe69a88131a1f1f,1 +np.float64,0x80085569acd0aad4,0x80085569acd0aad4,1 +np.float64,0x20557a6e40ab0,0x20557a6e40ab0,1 +np.float64,0x3fead2fd5df5a5fb,0x3fe653091f33b27f,1 +np.float64,0x3fe7b9983eaf7330,0x3fe46a50c4b5235e,1 +np.float64,0xffdad237ffb5a470,0xbff921fb54442d18,1 +np.float64,0xbfe5cc39a4eb9874,0xbfe322ad3a903f93,1 +np.float64,0x800ad6eecb35adde,0x800ad6eecb35adde,1 +np.float64,0xffec620f6438c41e,0xbff921fb54442d18,1 +np.float64,0xbfe5ef29122bde52,0xbfe33a7dfcc255e2,1 +np.float64,0x3fd451e7d0a8a3d0,0x3fd3acfa4939af10,1 +np.float64,0x8003ea93c127d528,0x8003ea93c127d528,1 +np.float64,0x800b48d37c9691a7,0x800b48d37c9691a7,1 +np.float64,0x3fe7e202acafc405,0x3fe484558246069b,1 +np.float64,0x80070c9b686e1938,0x80070c9b686e1938,1 +np.float64,0xbfda90bbc6352178,0xbfd92e25fcd12288,1 +np.float64,0x800e1ffebb1c3ffe,0x800e1ffebb1c3ffe,1 +np.float64,0x3ff0000000000000,0x3fe921fb54442d18,1 +np.float64,0xffd8cfdd46319fba,0xbff921fb54442d18,1 +np.float64,0x7fd8cd4182319a82,0x3ff921fb54442d18,1 +np.float64,0x3fed8bb778bb176f,0x3fe7db7c77c4c694,1 +np.float64,0x3fc74a70302e94e0,0x3fc709e95d6defec,1 +np.float64,0x3fe87269d070e4d4,0x3fe4e04bcc4a2137,1 +np.float64,0x7fb48223f6290447,0x3ff921fb54442d18,1 +np.float64,0xffe8ec444b71d888,0xbff921fb54442d18,1 +np.float64,0x7fde17d280bc2fa4,0x3ff921fb54442d18,1 +np.float64,0x3fd1cbde01a397bc,0x3fd15b9bb7b3147b,1 +np.float64,0x800883a64451074d,0x800883a64451074d,1 +np.float64,0x7fe3160a3f262c13,0x3ff921fb54442d18,1 +np.float64,0xbfe051d4d9a0a3aa,0xbfde2ecf14dc75fb,1 +np.float64,0xbfd89de689b13bce,0xbfd780176d1a28a3,1 +np.float64,0x3fecde2bf779bc58,0x3fe77ccf10bdd8e2,1 +np.float64,0xffe75774dc6eaee9,0xbff921fb54442d18,1 +np.float64,0x7fe834414d706882,0x3ff921fb54442d18,1 +np.float64,0x1,0x1,1 +np.float64,0xbfea5e4e4a74bc9c,0xbfe60e0601711835,1 +np.float64,0xffec248d4cb8491a,0xbff921fb54442d18,1 +np.float64,0xffd9942c2c332858,0xbff921fb54442d18,1 +np.float64,0xa9db36a553b67,0xa9db36a553b67,1 +np.float64,0x7fec630718b8c60d,0x3ff921fb54442d18,1 +np.float64,0xbfd062188f20c432,0xbfd009ecd652be89,1 +np.float64,0x8001b84e3023709d,0x8001b84e3023709d,1 +np.float64,0xbfe9e26d7cb3c4db,0xbfe5c3b157ecf668,1 +np.float64,0xbfef66ddf33ecdbc,0xbfe8d4b1f6410a24,1 +np.float64,0x3fd8d7109431ae21,0x3fd7b1d4860719a2,1 +np.float64,0xffee0f53107c1ea5,0xbff921fb54442d18,1 +np.float64,0x80000b4fd60016a0,0x80000b4fd60016a0,1 +np.float64,0xbfd99ff6e5333fee,0xbfd85fb3cbdaa049,1 +np.float64,0xbfe9cfd268339fa5,0xbfe5b86ef021a1b1,1 +np.float64,0xe32eace1c65d6,0xe32eace1c65d6,1 +np.float64,0xffc81f6627303ecc,0xbff921fb54442d18,1 +np.float64,0x7fe98dadde331b5b,0x3ff921fb54442d18,1 +np.float64,0xbfbcebd11e39d7a0,0xbfbccc8ec47883c7,1 +np.float64,0x7fe164880f22c90f,0x3ff921fb54442d18,1 +np.float64,0x800467c0cae8cf82,0x800467c0cae8cf82,1 +np.float64,0x800071e4b140e3ca,0x800071e4b140e3ca,1 +np.float64,0xbfc87a7eae30f4fc,0xbfc82fbc55bb0f24,1 +np.float64,0xffb2e0e23225c1c8,0xbff921fb54442d18,1 +np.float64,0x20ef338041df,0x20ef338041df,1 +np.float64,0x7fe6de71ca6dbce3,0x3ff921fb54442d18,1 +np.float64,0x5d1fa026ba3f5,0x5d1fa026ba3f5,1 +np.float64,0xffd112a9ce222554,0xbff921fb54442d18,1 +np.float64,0x3fb351f66626a3ed,0x3fb3489ab578c452,1 +np.float64,0x7fef7b2bd3bef657,0x3ff921fb54442d18,1 +np.float64,0xffe144f5d4e289eb,0xbff921fb54442d18,1 +np.float64,0xffd63a6750ac74ce,0xbff921fb54442d18,1 +np.float64,0x7fd2d8bb25a5b175,0x3ff921fb54442d18,1 +np.float64,0x3fec5920a078b242,0x3fe732dcffcf6521,1 +np.float64,0x80009a8b7f813518,0x80009a8b7f813518,1 +np.float64,0x3fdea220893d4441,0x3fdc921edf6bf3d8,1 +np.float64,0x8006cee2208d9dc5,0x8006cee2208d9dc5,1 +np.float64,0xdd0b0081ba17,0xdd0b0081ba17,1 +np.float64,0x7ff4000000000000,0x7ffc000000000000,1 +np.float64,0xbfdac33955358672,0xbfd9592bce7daf1f,1 +np.float64,0x7fe8301d7170603a,0x3ff921fb54442d18,1 +np.float64,0xbfc1d34d8523a69c,0xbfc1b62449af9684,1 +np.float64,0x800c62239458c447,0x800c62239458c447,1 +np.float64,0xffd398c009a73180,0xbff921fb54442d18,1 +np.float64,0xbfe0c6d9ee218db4,0xbfdee777557f4401,1 +np.float64,0x3feccdd373799ba7,0x3fe773c9c2263f89,1 +np.float64,0xbfd21898bda43132,0xbfd1a2be8545fcc5,1 +np.float64,0x3fd77019b62ee033,0x3fd67793cabdf267,1 +np.float64,0x7fa609cad42c1395,0x3ff921fb54442d18,1 +np.float64,0x7fb4eaea5a29d5d4,0x3ff921fb54442d18,1 +np.float64,0x3fc570dc9a2ae1b9,0x3fc53e5f6218a799,1 +np.float64,0x800344ae8466895e,0x800344ae8466895e,1 +np.float64,0xbfc7c985252f930c,0xbfc784d60fa27bac,1 +np.float64,0xffaa2929fc345250,0xbff921fb54442d18,1 +np.float64,0xffe63e5ee9ac7cbe,0xbff921fb54442d18,1 +np.float64,0x73f0280ce7e06,0x73f0280ce7e06,1 +np.float64,0xffc525f8822a4bf0,0xbff921fb54442d18,1 +np.float64,0x7fd744d00aae899f,0x3ff921fb54442d18,1 +np.float64,0xbfe0fe590761fcb2,0xbfdf3e493e8b1f32,1 +np.float64,0xfae04ae7f5c0a,0xfae04ae7f5c0a,1 +np.float64,0xef821939df043,0xef821939df043,1 +np.float64,0x7fef6135843ec26a,0x3ff921fb54442d18,1 +np.float64,0xbfebf34dcbf7e69c,0xbfe6f97588a8f911,1 +np.float64,0xbfeec0b498fd8169,0xbfe87f2eceeead12,1 +np.float64,0x7fb67161b42ce2c2,0x3ff921fb54442d18,1 +np.float64,0x3fdcfd998639fb33,0x3fdb38934927c096,1 +np.float64,0xffda5960bc34b2c2,0xbff921fb54442d18,1 +np.float64,0xbfe11f8c71223f19,0xbfdf71fe770c96ab,1 +np.float64,0x3fe4ac1bab695838,0x3fe25aa4517b8322,1 +np.float64,0x3f730458a02608b1,0x3f73044fabb5e999,1 +np.float64,0x3fdb14ffcdb62a00,0x3fd99ea6c241a3ed,1 +np.float64,0xbfc93208cd326410,0xbfc8e09d78b6d4db,1 +np.float64,0x19e734dc33ce8,0x19e734dc33ce8,1 +np.float64,0x3fe5e98428abd308,0x3fe336a6a085eb55,1 +np.float64,0x7fec672a1378ce53,0x3ff921fb54442d18,1 +np.float64,0x800f8bd8d4ff17b2,0x800f8bd8d4ff17b2,1 +np.float64,0xbfe5a12e4e6b425c,0xbfe30533f99d5d06,1 +np.float64,0x75a34cb0eb46a,0x75a34cb0eb46a,1 +np.float64,0x7fe1d21d16a3a439,0x3ff921fb54442d18,1 +np.float64,0x7ff0000000000000,0x3ff921fb54442d18,1 +np.float64,0xffe0f50db261ea1b,0xbff921fb54442d18,1 +np.float64,0xbfd9dc22feb3b846,0xbfd8937ec965a501,1 +np.float64,0x8009d68e48d3ad1d,0x8009d68e48d3ad1d,1 +np.float64,0xbfe2eba620e5d74c,0xbfe1164d7d273c60,1 +np.float64,0x992efa09325e0,0x992efa09325e0,1 +np.float64,0x3fdab640ea356c82,0x3fd94e20cab88db2,1 +np.float64,0x69a6f04ad34df,0x69a6f04ad34df,1 +np.float64,0x3fe397df25272fbe,0x3fe194bd1a3a6192,1 +np.float64,0xebcce9fdd799d,0xebcce9fdd799d,1 +np.float64,0x3fbb49490c369292,0x3fbb2f02eccc497d,1 +np.float64,0xffd871f980b0e3f4,0xbff921fb54442d18,1 +np.float64,0x800348f6966691ee,0x800348f6966691ee,1 +np.float64,0xbfebc270a7f784e1,0xbfe6dda8d0d80f26,1 +np.float64,0xffd6d559b1adaab4,0xbff921fb54442d18,1 +np.float64,0x3fec3635c0b86c6c,0x3fe71f420256e43e,1 +np.float64,0x7fbc82ad7039055a,0x3ff921fb54442d18,1 +np.float64,0x7f873050602e60a0,0x3ff921fb54442d18,1 +np.float64,0x3fca44b8c3348970,0x3fc9e8a1a1a2d96e,1 +np.float64,0x3fe0fc308fe1f861,0x3fdf3aeb469ea225,1 +np.float64,0x7fefc27de8bf84fb,0x3ff921fb54442d18,1 +np.float64,0x8005f3f3916be7e8,0x8005f3f3916be7e8,1 +np.float64,0xbfd4278c7c284f18,0xbfd38678988873b6,1 +np.float64,0x435eafc486bd7,0x435eafc486bd7,1 +np.float64,0xbfd01f5199203ea4,0xbfcf96631f2108a3,1 +np.float64,0xffd5ee9185abdd24,0xbff921fb54442d18,1 +np.float64,0xffedb363257b66c5,0xbff921fb54442d18,1 +np.float64,0x800d68e6e11ad1ce,0x800d68e6e11ad1ce,1 +np.float64,0xbfcf687f8e3ed100,0xbfceccb771b0d39a,1 +np.float64,0x7feb3b9ef2f6773d,0x3ff921fb54442d18,1 +np.float64,0x3fe15ec5ca62bd8c,0x3fdfd3fab9d96f81,1 +np.float64,0x10000000000000,0x10000000000000,1 +np.float64,0xd2386f81a470e,0xd2386f81a470e,1 +np.float64,0xb9feed4573fde,0xb9feed4573fde,1 +np.float64,0x3fe7ed25c9efda4c,0x3fe48b7b72db4014,1 +np.float64,0xbfe01478726028f1,0xbfddcd1f5a2efc59,1 +np.float64,0x9946d02f328da,0x9946d02f328da,1 +np.float64,0xbfe3bb67f06776d0,0xbfe1ae88aa81c5a6,1 +np.float64,0xbfd3fd8a4c27fb14,0xbfd3603982e3b78d,1 +np.float64,0xffd5c3ab912b8758,0xbff921fb54442d18,1 +np.float64,0xffd5f502b12bea06,0xbff921fb54442d18,1 +np.float64,0xbfc64981ec2c9304,0xbfc610e0382b1fa6,1 +np.float64,0xffec42e3413885c6,0xbff921fb54442d18,1 +np.float64,0x80084eb4ed109d6a,0x80084eb4ed109d6a,1 +np.float64,0xbfd17cac9fa2f95a,0xbfd112020588a4b3,1 +np.float64,0xbfd06c1359a0d826,0xbfd0134a28aa9a66,1 +np.float64,0x7fdc3d7c03b87af7,0x3ff921fb54442d18,1 +np.float64,0x7bdf5aaaf7bec,0x7bdf5aaaf7bec,1 +np.float64,0xbfee3cd966fc79b3,0xbfe83a14bc07ac3b,1 +np.float64,0x7fec910da3f9221a,0x3ff921fb54442d18,1 +np.float64,0xffb4ea667029d4d0,0xbff921fb54442d18,1 +np.float64,0x800103d7cce207b0,0x800103d7cce207b0,1 +np.float64,0x7fbb229a6c364534,0x3ff921fb54442d18,1 +np.float64,0x0,0x0,1 +np.float64,0xffd8fccd0331f99a,0xbff921fb54442d18,1 +np.float64,0xbfd0784ae1a0f096,0xbfd01ebff62e39ad,1 +np.float64,0xbfed2ec9b3ba5d93,0xbfe7a9099410bc76,1 +np.float64,0x800690b8d16d2172,0x800690b8d16d2172,1 +np.float64,0x7fc061b26520c364,0x3ff921fb54442d18,1 +np.float64,0x8007ec47054fd88f,0x8007ec47054fd88f,1 +np.float64,0x775546b6eeaa9,0x775546b6eeaa9,1 +np.float64,0x8005e00fb56bc020,0x8005e00fb56bc020,1 +np.float64,0xbfe510f8d0ea21f2,0xbfe2a16862b5a37f,1 +np.float64,0xffd87a6bf3b0f4d8,0xbff921fb54442d18,1 +np.float64,0x800906e3d0520dc8,0x800906e3d0520dc8,1 +np.float64,0x2296f000452f,0x2296f000452f,1 +np.float64,0xbfe3189fa2e63140,0xbfe1378c0e005be4,1 +np.float64,0xb4d2447f69a49,0xb4d2447f69a49,1 +np.float64,0xffd056a24a20ad44,0xbff921fb54442d18,1 +np.float64,0xbfe3b23fe4e76480,0xbfe1a7e5840fcbeb,1 +np.float64,0x80018ee270831dc6,0x80018ee270831dc6,1 +np.float64,0x800df89f245bf13e,0x800df89f245bf13e,1 +np.float64,0x3fee1409d7bc2814,0x3fe824779d133232,1 +np.float64,0xbfef8d81667f1b03,0xbfe8e85523620368,1 +np.float64,0xffd8a6519b314ca4,0xbff921fb54442d18,1 +np.float64,0x7fc7bc86f32f790d,0x3ff921fb54442d18,1 +np.float64,0xffea6159e674c2b3,0xbff921fb54442d18,1 +np.float64,0x3fe153c3fba2a788,0x3fdfc2f74769d300,1 +np.float64,0xffc4261ef3284c3c,0xbff921fb54442d18,1 +np.float64,0x7fe8a8961ff1512b,0x3ff921fb54442d18,1 +np.float64,0xbfe3fb1fd167f640,0xbfe1dc89dcb7ecdf,1 +np.float64,0x3fd88577c2b10af0,0x3fd76acc09660704,1 +np.float64,0x3fe128ec27e251d8,0x3fdf808fc7ebcd8f,1 +np.float64,0xbfed6ca7c4fad950,0xbfe7caafe9a3e213,1 +np.float64,0xbf9a3912b8347220,0xbf9a379b3349352e,1 +np.float64,0xbfd724d7bcae49b0,0xbfd6351efa2a5fc5,1 +np.float64,0xbfed59700a7ab2e0,0xbfe7c043014c694c,1 +np.float64,0x8002ad435bc55a87,0x8002ad435bc55a87,1 +np.float64,0xffe46ed345a8dda6,0xbff921fb54442d18,1 +np.float64,0x7fd2f1d1d825e3a3,0x3ff921fb54442d18,1 +np.float64,0xbfea0265e23404cc,0xbfe5d6fb3fd30464,1 +np.float64,0xbfd17e049122fc0a,0xbfd113421078bbae,1 +np.float64,0xffea03b986b40772,0xbff921fb54442d18,1 +np.float64,0x800b55331a16aa67,0x800b55331a16aa67,1 +np.float64,0xbfc6fcafbf2df960,0xbfc6be9ecd0ebc1f,1 +np.float64,0xd6a36017ad46c,0xd6a36017ad46c,1 +np.float64,0xbfe9ba86dfb3750e,0xbfe5ab840cb0ef86,1 +np.float64,0x75c4a108eb895,0x75c4a108eb895,1 +np.float64,0x8008d6bc8051ad79,0x8008d6bc8051ad79,1 +np.float64,0xbfd3dc5984a7b8b4,0xbfd341f78e0528ec,1 +np.float64,0xffe1cbb01aa39760,0xbff921fb54442d18,1 +np.float64,0x3fc7e292f52fc526,0x3fc79d0ce9365767,1 +np.float64,0xbfcbeae2bd37d5c4,0xbfcb7cb034f82467,1 +np.float64,0x8000f0c62e21e18d,0x8000f0c62e21e18d,1 +np.float64,0xbfe23d8bc6247b18,0xbfe09418ee35c3c7,1 +np.float64,0x717394bae2e73,0x717394bae2e73,1 +np.float64,0xffa2ef1cc425de40,0xbff921fb54442d18,1 +np.float64,0x3fd938c229b27184,0x3fd806900735c99d,1 +np.float64,0x800bf3ec8a77e7d9,0x800bf3ec8a77e7d9,1 +np.float64,0xffeef41dd57de83b,0xbff921fb54442d18,1 +np.float64,0x8008df97e5b1bf30,0x8008df97e5b1bf30,1 +np.float64,0xffe9ab9d0db35739,0xbff921fb54442d18,1 +np.float64,0x99ff391333fe7,0x99ff391333fe7,1 +np.float64,0x3fb864b4a630c969,0x3fb851e883ea2cf9,1 +np.float64,0x22c1230a45825,0x22c1230a45825,1 +np.float64,0xff2336fbfe467,0xff2336fbfe467,1 +np.float64,0xbfd488f4cea911ea,0xbfd3def0490f5414,1 +np.float64,0x3fa379c78426f38f,0x3fa377607370800b,1 +np.float64,0xbfb0873302210e68,0xbfb08155b78dfd53,1 +np.float64,0xbfdf9ff7c2bf3ff0,0xbfdd5f658e357ad2,1 +np.float64,0x800978719192f0e4,0x800978719192f0e4,1 +np.float64,0xbfba8759ea350eb0,0xbfba6f325013b9e5,1 +np.float64,0xbfdd3e6b06ba7cd6,0xbfdb6e472b6091b0,1 +np.float64,0x7fe0c334a7a18668,0x3ff921fb54442d18,1 +np.float64,0xbfeb971feb772e40,0xbfe6c4e0f61404d1,1 +np.float64,0x3fe2a50968e54a13,0x3fe0e1c8b8d96e85,1 +np.float64,0x800fa9c5515f538b,0x800fa9c5515f538b,1 +np.float64,0x800f8532fbbf0a66,0x800f8532fbbf0a66,1 +np.float64,0x167d6f1e2cfaf,0x167d6f1e2cfaf,1 +np.float64,0xffee88e769fd11ce,0xbff921fb54442d18,1 +np.float64,0xbfeecc8529fd990a,0xbfe885520cdad8ea,1 +np.float64,0xffefffffffffffff,0xbff921fb54442d18,1 +np.float64,0xbfef6a566afed4ad,0xbfe8d6767b4c4235,1 +np.float64,0xffec12415af82482,0xbff921fb54442d18,1 +np.float64,0x3678a20a6cf15,0x3678a20a6cf15,1 +np.float64,0xffe468d54ee8d1aa,0xbff921fb54442d18,1 +np.float64,0x800ad6006795ac01,0x800ad6006795ac01,1 +np.float64,0x8001d5b61063ab6d,0x8001d5b61063ab6d,1 +np.float64,0x800dfcd1863bf9a3,0x800dfcd1863bf9a3,1 +np.float64,0xc9fbff6f93f80,0xc9fbff6f93f80,1 +np.float64,0xffe55c20f9eab842,0xbff921fb54442d18,1 +np.float64,0xbfcb596b6536b2d8,0xbfcaf1b339c5c615,1 +np.float64,0xbfe092689ea124d1,0xbfde94fa58946e51,1 +np.float64,0x3fe9ec733af3d8e6,0x3fe5c9bf5dee2623,1 +np.float64,0x3fe30f3d83261e7b,0x3fe1309fd6620e03,1 +np.float64,0xffd31d7f84263b00,0xbff921fb54442d18,1 +np.float64,0xbfe88d2d3e711a5a,0xbfe4f12b5a136178,1 +np.float64,0xffc81e4ce1303c98,0xbff921fb54442d18,1 +np.float64,0xffe5b96ebfab72dd,0xbff921fb54442d18,1 +np.float64,0x512f0502a25e1,0x512f0502a25e1,1 +np.float64,0x7fa3a376982746ec,0x3ff921fb54442d18,1 +np.float64,0x80005b5f2f60b6bf,0x80005b5f2f60b6bf,1 +np.float64,0xc337cc69866fa,0xc337cc69866fa,1 +np.float64,0x3fe7719c4caee339,0x3fe43bab42b19e64,1 +np.float64,0x7fde7ec1d93cfd83,0x3ff921fb54442d18,1 +np.float64,0x3fd2f38f3825e71e,0x3fd26cc7b1dd0acb,1 +np.float64,0x7fce298b993c5316,0x3ff921fb54442d18,1 +np.float64,0x56ae3b2cad5c8,0x56ae3b2cad5c8,1 +np.float64,0x3fe9299f2bf2533e,0x3fe552bddd999e72,1 +np.float64,0x7feff3a4823fe748,0x3ff921fb54442d18,1 +np.float64,0xbfd05c670aa0b8ce,0xbfd00494d78e9e97,1 +np.float64,0xffe745323eae8a64,0xbff921fb54442d18,1 diff --git a/local_packages/numpy/_core/tests/data/umath-validation-set-arctanh.csv b/local_packages/numpy/_core/tests/data/umath-validation-set-arctanh.csv new file mode 100644 index 0000000..68ecaab --- /dev/null +++ b/local_packages/numpy/_core/tests/data/umath-validation-set-arctanh.csv @@ -0,0 +1,1429 @@ +dtype,input,output,ulperrortol +np.float32,0x3ee82930,0x3efa60fd,2 +np.float32,0x3f0aa640,0x3f1b3e13,2 +np.float32,0x3ec1a21c,0x3ecbbf8d,2 +np.float32,0x3cdb1740,0x3cdb24a1,2 +np.float32,0xbf28b6f3,0xbf4a86ac,2 +np.float32,0xbe490dcc,0xbe4bb2eb,2 +np.float32,0x80000001,0x80000001,2 +np.float32,0xbf44f9dd,0xbf826ce1,2 +np.float32,0xbf1d66c4,0xbf37786b,2 +np.float32,0x3f0ad26a,0x3f1b7c9b,2 +np.float32,0x3f7b6c54,0x4016aab0,2 +np.float32,0xbf715bb8,0xbfe1a0bc,2 +np.float32,0xbee8a562,0xbefafd6a,2 +np.float32,0x3db94d00,0x3db9cf16,2 +np.float32,0x3ee2970c,0x3ef368b3,2 +np.float32,0x3f3f8614,0x3f77fdca,2 +np.float32,0xbf1fb5f0,0xbf3b3789,2 +np.float32,0x3f798dc0,0x400b96bb,2 +np.float32,0x3e975d64,0x3e9c0573,2 +np.float32,0xbe3f1908,0xbe415d1f,2 +np.float32,0x3f2cea38,0x3f52192e,2 +np.float32,0x3e82f1ac,0x3e85eaa1,2 +np.float32,0x3eab6b30,0x3eb24acd,2 +np.float32,0xbe9bb90c,0xbea0cf5f,2 +np.float32,0xbf43e847,0xbf81202f,2 +np.float32,0xbd232fa0,0xbd2345c0,2 +np.float32,0xbbabbc00,0xbbabbc67,2 +np.float32,0xbf0b2975,0xbf1bf808,2 +np.float32,0xbef5ab0a,0xbf05d305,2 +np.float32,0x3f2cad16,0x3f51a8e2,2 +np.float32,0xbef75940,0xbf06eb08,2 +np.float32,0xbf0c1216,0xbf1d4325,2 +np.float32,0x3e7bdc08,0x3e8090c2,2 +np.float32,0x3da14e10,0x3da1a3c5,2 +np.float32,0x3f627412,0x3fb2bf21,2 +np.float32,0xbd6d08c0,0xbd6d4ca0,2 +np.float32,0x3f3e2368,0x3f74df8b,2 +np.float32,0xbe0df104,0xbe0edc77,2 +np.float32,0x3e8a265c,0x3e8da833,2 +np.float32,0xbdccdbb0,0xbdcd8ba8,2 +np.float32,0x3eb080c4,0x3eb80a44,2 +np.float32,0x3e627800,0x3e6645fe,2 +np.float32,0xbd8be0b0,0xbd8c1886,2 +np.float32,0xbf3282ac,0xbf5cae8c,2 +np.float32,0xbe515910,0xbe545707,2 +np.float32,0xbf2e64ac,0xbf54d637,2 +np.float32,0x3e0fc230,0x3e10b6de,2 +np.float32,0x3eb13ca0,0x3eb8df94,2 +np.float32,0x3f07a3ca,0x3f170572,2 +np.float32,0x3f2c7026,0x3f513935,2 +np.float32,0x3f3c4ec8,0x3f70d67c,2 +np.float32,0xbee9cce8,0xbefc724f,2 +np.float32,0xbe53ca60,0xbe56e3f3,2 +np.float32,0x3dd9e9a0,0x3ddabd98,2 +np.float32,0x3f38b8d4,0x3f69319b,2 +np.float32,0xbe176dc4,0xbe188c1d,2 +np.float32,0xbf322f2e,0xbf5c0c51,2 +np.float32,0xbe9b8676,0xbea097a2,2 +np.float32,0xbca44280,0xbca44823,2 +np.float32,0xbe2b0248,0xbe2ca036,2 +np.float32,0x3d101e80,0x3d102dbd,2 +np.float32,0xbf4eb610,0xbf8f526d,2 +np.float32,0xbec32a50,0xbecd89d1,2 +np.float32,0x3d549100,0x3d54c1ee,2 +np.float32,0x3f78e55e,0x40087025,2 +np.float32,0x3e592798,0x3e5c802d,2 +np.float32,0x3de045d0,0x3de12cfb,2 +np.float32,0xbdad28e0,0xbdad92f7,2 +np.float32,0x3e9a69e0,0x3e9f5e59,2 +np.float32,0x3e809778,0x3e836716,2 +np.float32,0xbf3278d9,0xbf5c9b6d,2 +np.float32,0x3f39fa00,0x3f6bd4a5,2 +np.float32,0xbec8143c,0xbed34ffa,2 +np.float32,0x3ddb7f40,0x3ddc57e6,2 +np.float32,0x3f0e8342,0x3f20c634,2 +np.float32,0x3f353dda,0x3f6213a4,2 +np.float32,0xbe96b400,0xbe9b4bea,2 +np.float32,0x3e626580,0x3e66328a,2 +np.float32,0xbde091c8,0xbde179df,2 +np.float32,0x3eb47b5c,0x3ebc91ca,2 +np.float32,0xbf282182,0xbf497f2f,2 +np.float32,0x3ea9f64c,0x3eb0a748,2 +np.float32,0x3f28dd4e,0x3f4aca86,2 +np.float32,0xbf71de18,0xbfe3f587,2 +np.float32,0x7fa00000,0x7fe00000,2 +np.float32,0xbf6696a6,0xbfbcf11a,2 +np.float32,0xbc853ae0,0xbc853de2,2 +np.float32,0xbeced246,0xbedb51b8,2 +np.float32,0x3f3472a4,0x3f607e00,2 +np.float32,0xbee90124,0xbefb7117,2 +np.float32,0x3eb45b90,0x3ebc6d7c,2 +np.float32,0xbe53ead0,0xbe5705d6,2 +np.float32,0x3f630c80,0x3fb420e2,2 +np.float32,0xbf408cd0,0xbf7a56a2,2 +np.float32,0x3dda4ed0,0x3ddb23f1,2 +np.float32,0xbf37ae88,0xbf67096b,2 +np.float32,0xbdd48c28,0xbdd550c9,2 +np.float32,0xbf5745b0,0xbf9cb4a4,2 +np.float32,0xbf44e6fc,0xbf8255c1,2 +np.float32,0x3f5c8e6a,0x3fa65020,2 +np.float32,0xbea45fe8,0xbeaa6630,2 +np.float32,0x3f08bdee,0x3f188ef5,2 +np.float32,0x3ec77e74,0x3ed29f4b,2 +np.float32,0xbf1a1d3c,0xbf324029,2 +np.float32,0x3cad7340,0x3cad79e3,2 +np.float32,0xbf4fac2e,0xbf90b72a,2 +np.float32,0x3f58516e,0x3f9e8330,2 +np.float32,0x3f442008,0x3f816391,2 +np.float32,0xbf6e0c6c,0xbfd42854,2 +np.float32,0xbf266f7a,0xbf4689b2,2 +np.float32,0x3eb7e2f0,0x3ec077ba,2 +np.float32,0xbf320fd0,0xbf5bcf83,2 +np.float32,0xbf6a76b9,0xbfc80a11,2 +np.float32,0xbf2a91b4,0xbf4dd526,2 +np.float32,0x3f176e30,0x3f2e150e,2 +np.float32,0xbdcccad0,0xbdcd7a9c,2 +np.float32,0x3f60a8a4,0x3faebbf7,2 +np.float32,0x3d9706f0,0x3d974d40,2 +np.float32,0x3ef3cd34,0x3f049d58,2 +np.float32,0xbf73c615,0xbfed79fe,2 +np.float32,0x3df1b170,0x3df2d31b,2 +np.float32,0x3f632a46,0x3fb466c7,2 +np.float32,0xbf3ea18e,0xbf75f9ce,2 +np.float32,0xbf3ea05c,0xbf75f71f,2 +np.float32,0xbdd76750,0xbdd83403,2 +np.float32,0xbca830c0,0xbca836cd,2 +np.float32,0x3f1d4162,0x3f373c59,2 +np.float32,0x3c115700,0x3c1157fa,2 +np.float32,0x3dae8ab0,0x3daef758,2 +np.float32,0xbcad5020,0xbcad56bf,2 +np.float32,0x3ee299c4,0x3ef36c15,2 +np.float32,0xbf7f566c,0xc054c3bd,2 +np.float32,0x3f0cc698,0x3f1e4557,2 +np.float32,0xbe75c648,0xbe7aaa04,2 +np.float32,0x3ea29238,0x3ea86417,2 +np.float32,0x3f09d9c0,0x3f1a1d61,2 +np.float32,0x3f67275c,0x3fbe74b3,2 +np.float32,0x3e1a4e18,0x3e1b7d3a,2 +np.float32,0xbef6e3fc,0xbf069e98,2 +np.float32,0xbf6038ac,0xbfadc9fd,2 +np.float32,0xbe46bdd4,0xbe494b7f,2 +np.float32,0xbf4df1f4,0xbf8e3a98,2 +np.float32,0x3d094dc0,0x3d095aed,2 +np.float32,0x3f44c7d2,0x3f822fa3,2 +np.float32,0xbea30816,0xbea8e737,2 +np.float32,0xbe3c27c4,0xbe3e511b,2 +np.float32,0x3f3bb47c,0x3f6f8789,2 +np.float32,0xbe423760,0xbe4498c3,2 +np.float32,0x3ece1a74,0x3eda7634,2 +np.float32,0x3f14d1f6,0x3f2a1a89,2 +np.float32,0xbf4d9e8f,0xbf8dc4c1,2 +np.float32,0xbe92968e,0xbe96cd7f,2 +np.float32,0x3e99e6c0,0x3e9ece26,2 +np.float32,0xbf397361,0xbf6ab878,2 +np.float32,0xbf4fcea4,0xbf90e99f,2 +np.float32,0x3de37640,0x3de46779,2 +np.float32,0x3eb1b604,0x3eb9698c,2 +np.float32,0xbf52d0a2,0xbf957361,2 +np.float32,0xbe20435c,0xbe21975a,2 +np.float32,0x3f437a58,0x3f809bf1,2 +np.float32,0x3f27d1cc,0x3f48f335,2 +np.float32,0x3f7d4ff2,0x4027d1e2,2 +np.float32,0xbef732e4,0xbf06d205,2 +np.float32,0x3f4a0ae6,0x3f88e18e,2 +np.float32,0x3f800000,0x7f800000,2 +np.float32,0x3e3e56a0,0x3e4093ba,2 +np.float32,0xbed2fcfa,0xbee0517d,2 +np.float32,0xbe0e0114,0xbe0eecd7,2 +np.float32,0xbe808574,0xbe8353db,2 +np.float32,0x3f572e2a,0x3f9c8c86,2 +np.float32,0x80800000,0x80800000,2 +np.float32,0x3f3f3c82,0x3f775703,2 +np.float32,0xbf6e2482,0xbfd4818b,2 +np.float32,0xbf3943b0,0xbf6a5439,2 +np.float32,0x3f6e42ac,0x3fd4f1ea,2 +np.float32,0x3eb676c4,0x3ebed619,2 +np.float32,0xbe5e56c4,0xbe61ef6c,2 +np.float32,0x3eea200c,0x3efcdb65,2 +np.float32,0x3e3d2c78,0x3e3f5ef8,2 +np.float32,0xbdfd8fb0,0xbdfede71,2 +np.float32,0xbee69c8a,0xbef86e89,2 +np.float32,0x3e9efca0,0x3ea46a1c,2 +np.float32,0x3e4c2498,0x3e4ee9ee,2 +np.float32,0xbf3cc93c,0xbf71e21d,2 +np.float32,0x3ee0d77c,0x3ef13d2b,2 +np.float32,0xbefbcd2a,0xbf09d6a3,2 +np.float32,0x3f6dbe5c,0x3fd30a3e,2 +np.float32,0x3dae63e0,0x3daed03f,2 +np.float32,0xbd5001e0,0xbd502fb9,2 +np.float32,0x3f59632a,0x3fa067c8,2 +np.float32,0x3f0d355a,0x3f1ee452,2 +np.float32,0x3f2cbe5c,0x3f51c896,2 +np.float32,0x3c5e6e80,0x3c5e7200,2 +np.float32,0xbe8ac49c,0xbe8e52f0,2 +np.float32,0x3f54e576,0x3f98c0e6,2 +np.float32,0xbeaa0762,0xbeb0ba7c,2 +np.float32,0x3ec81e88,0x3ed35c21,2 +np.float32,0x3f5a6738,0x3fa23fb6,2 +np.float32,0xbf24a682,0xbf43784a,2 +np.float32,0x1,0x1,2 +np.float32,0x3ee6bc24,0x3ef89630,2 +np.float32,0x3f19444a,0x3f30ecf5,2 +np.float32,0x3ec1fc70,0x3ecc28fc,2 +np.float32,0xbf706e14,0xbfdd92fb,2 +np.float32,0x3eccb630,0x3ed8cd98,2 +np.float32,0xbcdf7aa0,0xbcdf88d3,2 +np.float32,0xbe450da8,0xbe478a8e,2 +np.float32,0x3ec9c210,0x3ed54c0b,2 +np.float32,0xbf3b86ca,0xbf6f24d1,2 +np.float32,0x3edcc7a0,0x3eec3a5c,2 +np.float32,0x3f075d5c,0x3f16a39a,2 +np.float32,0xbf5719ce,0xbf9c69de,2 +np.float32,0x3f62cb22,0x3fb3885a,2 +np.float32,0x3f639216,0x3fb55c93,2 +np.float32,0xbf473ee7,0xbf85413a,2 +np.float32,0xbf01b66c,0xbf0eea86,2 +np.float32,0x3e872d80,0x3e8a74f8,2 +np.float32,0xbf60957e,0xbfae925c,2 +np.float32,0xbf6847b2,0xbfc1929b,2 +np.float32,0x3f78bb94,0x4007b363,2 +np.float32,0xbf47efdb,0xbf8622db,2 +np.float32,0xbe1f2308,0xbe206fd6,2 +np.float32,0xbf414926,0xbf7c0a7e,2 +np.float32,0x3eecc268,0x3f00194d,2 +np.float32,0x3eb086d0,0x3eb81120,2 +np.float32,0xbef1af80,0xbf033ff5,2 +np.float32,0xbf454e56,0xbf82d4aa,2 +np.float32,0x3e622560,0x3e65ef20,2 +np.float32,0x3f50d2b2,0x3f926a83,2 +np.float32,0x3eb2c45c,0x3eba9d2c,2 +np.float32,0x3e42d1a0,0x3e4538c9,2 +np.float32,0xbf24cc5c,0xbf43b8e3,2 +np.float32,0x3e8c6464,0x3e90141a,2 +np.float32,0xbf3abff2,0xbf6d79c5,2 +np.float32,0xbec8f2e6,0xbed456fa,2 +np.float32,0xbf787b38,0xc00698b4,2 +np.float32,0xbf58d5cd,0xbf9f6c03,2 +np.float32,0x3df4ee20,0x3df61ba8,2 +np.float32,0xbf34581e,0xbf604951,2 +np.float32,0xbeba5cf4,0xbec35119,2 +np.float32,0xbf76c22d,0xbfffc51c,2 +np.float32,0x3ef63b2c,0x3f0630b4,2 +np.float32,0x3eeadb64,0x3efdc877,2 +np.float32,0x3dfd8c70,0x3dfedb24,2 +np.float32,0x3f441600,0x3f81576d,2 +np.float32,0x3f23a0d8,0x3f41bbf6,2 +np.float32,0x3cb84d40,0x3cb85536,2 +np.float32,0xbf25cb5c,0xbf456e38,2 +np.float32,0xbc108540,0xbc108636,2 +np.float32,0xbc5b9140,0xbc5b949e,2 +np.float32,0xbf62ff40,0xbfb401dd,2 +np.float32,0x3e8e0710,0x3e91d93e,2 +np.float32,0x3f1b6ae0,0x3f344dfd,2 +np.float32,0xbf4dbbbe,0xbf8dedea,2 +np.float32,0x3f1a5fb2,0x3f32a880,2 +np.float32,0xbe56bd00,0xbe59f8cb,2 +np.float32,0xbf490a5c,0xbf87902d,2 +np.float32,0xbf513072,0xbf92f717,2 +np.float32,0x3e73ee28,0x3e78b542,2 +np.float32,0x3f0a4c7a,0x3f1abf2c,2 +np.float32,0x3e10d5c8,0x3e11d00b,2 +np.float32,0xbf771aac,0xc001207e,2 +np.float32,0x3efe2f54,0x3f0b6a46,2 +np.float32,0xbea5f3ea,0xbeac291f,2 +np.float32,0xbf1a73e8,0xbf32c845,2 +np.float32,0x3ebcc82c,0x3ec61c4f,2 +np.float32,0xbf24f492,0xbf43fd9a,2 +np.float32,0x3ecbd908,0x3ed7c691,2 +np.float32,0x3f461c5e,0x3f83d3f0,2 +np.float32,0x3eed0524,0x3f0043c1,2 +np.float32,0x3d06e840,0x3d06f4bf,2 +np.float32,0x3eb6c974,0x3ebf34d7,2 +np.float32,0xbf1c85e1,0xbf36100f,2 +np.float32,0x3ed697d0,0x3ee4ad04,2 +np.float32,0x3eab0484,0x3eb1d733,2 +np.float32,0xbf3b02f2,0xbf6e0935,2 +np.float32,0xbeeab154,0xbefd9334,2 +np.float32,0xbf695372,0xbfc49881,2 +np.float32,0x3e8aaa7c,0x3e8e36be,2 +np.float32,0xbf208754,0xbf3c8f7b,2 +np.float32,0xbe0dbf28,0xbe0ea9a1,2 +np.float32,0x3ca780c0,0x3ca786ba,2 +np.float32,0xbeb320b4,0xbebb065e,2 +np.float32,0x3f13c698,0x3f288821,2 +np.float32,0xbe8cbbec,0xbe9072c4,2 +np.float32,0x3f1ed534,0x3f39c8df,2 +np.float32,0x3e1ca450,0x3e1de190,2 +np.float32,0x3f54be1c,0x3f988134,2 +np.float32,0x3f34e4ee,0x3f6161b4,2 +np.float32,0xbf7e6913,0xc038b246,2 +np.float32,0x3d3c3f20,0x3d3c6119,2 +np.float32,0x3ca9dc80,0x3ca9e2bc,2 +np.float32,0xbf577ea2,0xbf9d161a,2 +np.float32,0xbedb22c8,0xbeea3644,2 +np.float32,0x3f22a044,0x3f400bfa,2 +np.float32,0xbe214b8c,0xbe22a637,2 +np.float32,0x3e8cd300,0x3e908bbc,2 +np.float32,0xbec4d214,0xbecf7a58,2 +np.float32,0x3e9399a4,0x3e97e7e4,2 +np.float32,0xbee6a1a2,0xbef874ed,2 +np.float32,0xbf323742,0xbf5c1bfd,2 +np.float32,0x3f48b882,0x3f8725ac,2 +np.float32,0xbf4d4dba,0xbf8d532e,2 +np.float32,0xbf59640a,0xbfa0695a,2 +np.float32,0xbf2ad562,0xbf4e4f03,2 +np.float32,0x3e317d98,0x3e334d03,2 +np.float32,0xbf6a5b71,0xbfc7b5a2,2 +np.float32,0x3e87b434,0x3e8b05cf,2 +np.float32,0xbf1c344c,0xbf358dee,2 +np.float32,0x3e449428,0x3e470c65,2 +np.float32,0xbf2c0f2f,0xbf508808,2 +np.float32,0xbec5b5ac,0xbed0859c,2 +np.float32,0xbf4aa956,0xbf89b4b1,2 +np.float32,0x3f6dd374,0x3fd35717,2 +np.float32,0x3f45f76c,0x3f83a5ef,2 +np.float32,0xbed1fba8,0xbedf1bd5,2 +np.float32,0xbd26b2d0,0xbd26ca66,2 +np.float32,0xbe9817c2,0xbe9cd1c3,2 +np.float32,0x3e725988,0x3e770875,2 +np.float32,0xbf1a8ded,0xbf32f132,2 +np.float32,0xbe695860,0xbe6d83d3,2 +np.float32,0x3d8cecd0,0x3d8d25ea,2 +np.float32,0x3f574706,0x3f9cb6ec,2 +np.float32,0xbf5c5a1f,0xbfa5eaf3,2 +np.float32,0x3e7a7c88,0x3e7fab83,2 +np.float32,0xff800000,0xffc00000,2 +np.float32,0x3f66396a,0x3fbbfbb0,2 +np.float32,0x3ed6e588,0x3ee50b53,2 +np.float32,0xbb56d500,0xbb56d532,2 +np.float32,0x3ebd23fc,0x3ec6869a,2 +np.float32,0xbf70d490,0xbfdf4af5,2 +np.float32,0x3e514f88,0x3e544d15,2 +np.float32,0x3e660f98,0x3e6a0dac,2 +np.float32,0xbf034da1,0xbf1110bb,2 +np.float32,0xbf60d9be,0xbfaf2714,2 +np.float32,0x3df67b10,0x3df7ae64,2 +np.float32,0xbeeedc0a,0xbf017010,2 +np.float32,0xbe149224,0xbe15a072,2 +np.float32,0x3f455084,0x3f82d759,2 +np.float32,0x3f210f9e,0x3f3d7093,2 +np.float32,0xbeaea3e0,0xbeb5edd3,2 +np.float32,0x3e0724b0,0x3e07efad,2 +np.float32,0x3f09a784,0x3f19d6ac,2 +np.float32,0xbf044340,0xbf125ee8,2 +np.float32,0xbf71adc9,0xbfe315fe,2 +np.float32,0x3efd3870,0x3f0ac6a8,2 +np.float32,0xbf53c7a6,0xbf96f6df,2 +np.float32,0xbf3cf784,0xbf7247af,2 +np.float32,0x3e0ce9e0,0x3e0dd035,2 +np.float32,0xbd3051a0,0xbd306d89,2 +np.float32,0x3ecab804,0x3ed66f77,2 +np.float32,0x3e984350,0x3e9d0189,2 +np.float32,0x3edd1c00,0x3eeca20b,2 +np.float32,0xbe8e22a0,0xbe91f71b,2 +np.float32,0x3ebebc18,0x3ec85fd6,2 +np.float32,0xba275c00,0xba275c01,2 +np.float32,0x3f1d8190,0x3f37a385,2 +np.float32,0x3f17343e,0x3f2dbbfe,2 +np.float32,0x3caa8000,0x3caa864e,2 +np.float32,0x3e7a7308,0x3e7fa168,2 +np.float32,0x3f7359a6,0x3feb3e1a,2 +np.float32,0xbf7ad15a,0xc012a743,2 +np.float32,0xbf122efb,0xbf262812,2 +np.float32,0xbf03ba04,0xbf11a3fa,2 +np.float32,0x3ed7a90c,0x3ee5f8d4,2 +np.float32,0xbe23e318,0xbe254eed,2 +np.float32,0xbe2866f4,0xbe29f20a,2 +np.float32,0xbeaedff2,0xbeb631d0,2 +np.float32,0x0,0x0,2 +np.float32,0x3ef2a034,0x3f03dafd,2 +np.float32,0x3f35806c,0x3f62994e,2 +np.float32,0xbf655e19,0xbfb9c718,2 +np.float32,0x3f5d54ce,0x3fa7d4f4,2 +np.float32,0x3f33e64a,0x3f5f67e3,2 +np.float32,0x3ebf4010,0x3ec8f923,2 +np.float32,0xbe050dc8,0xbe05cf70,2 +np.float32,0x3f61693e,0x3fb063b0,2 +np.float32,0xbd94ac00,0xbd94ef12,2 +np.float32,0x3e9de008,0x3ea32f61,2 +np.float32,0xbe3d042c,0xbe3f3540,2 +np.float32,0x3e8fdfc0,0x3e93d9e4,2 +np.float32,0x3f28bc48,0x3f4a9019,2 +np.float32,0x3edea928,0x3eee8b09,2 +np.float32,0xbf05f673,0xbf14b362,2 +np.float32,0xbf360730,0xbf63a914,2 +np.float32,0xbe3fb454,0xbe41fe0a,2 +np.float32,0x3f6d99a8,0x3fd28552,2 +np.float32,0xbf3ae866,0xbf6dd052,2 +np.float32,0x3f5b1164,0x3fa37aec,2 +np.float32,0xbf64a451,0xbfb7f61b,2 +np.float32,0xbdd79bd0,0xbdd86919,2 +np.float32,0x3e89fc00,0x3e8d7a85,2 +np.float32,0x3f4bf690,0x3f8b77ea,2 +np.float32,0x3cbdf280,0x3cbdfb38,2 +np.float32,0x3f138f98,0x3f2835b4,2 +np.float32,0xbe33967c,0xbe3576bc,2 +np.float32,0xbf298164,0xbf4bedda,2 +np.float32,0x3e9955cc,0x3e9e2edb,2 +np.float32,0xbf79b383,0xc00c56c0,2 +np.float32,0x3ea0834c,0x3ea61aea,2 +np.float32,0xbf511184,0xbf92c89a,2 +np.float32,0x3f4d9fba,0x3f8dc666,2 +np.float32,0x3f3387c2,0x3f5ead80,2 +np.float32,0x3e3f7360,0x3e41babb,2 +np.float32,0xbf3cc4d6,0xbf71d879,2 +np.float32,0x3f2e4402,0x3f54994e,2 +np.float32,0x3e6a7118,0x3e6eabff,2 +np.float32,0xbf05d83e,0xbf1489cc,2 +np.float32,0xbdce4fd8,0xbdcf039a,2 +np.float32,0xbf03e2f4,0xbf11dbaf,2 +np.float32,0x3f1ea0a0,0x3f397375,2 +np.float32,0x3f7aff54,0x4013cb1b,2 +np.float32,0x3f5ef158,0x3fab1801,2 +np.float32,0xbe33bcc8,0xbe359e40,2 +np.float32,0xbf04dd0e,0xbf133111,2 +np.float32,0xbf14f887,0xbf2a54d1,2 +np.float32,0x3f75c37a,0x3ff9196e,2 +np.float32,0x3f35c3c8,0x3f6320f2,2 +np.float32,0x3f53bb94,0x3f96e3c3,2 +np.float32,0x3f4d473e,0x3f8d4a19,2 +np.float32,0xbdfe19e0,0xbdff6ac9,2 +np.float32,0xbf7f0cc4,0xc049342d,2 +np.float32,0xbdbfc778,0xbdc057bb,2 +np.float32,0xbf7575b7,0xbff73067,2 +np.float32,0xbe9df488,0xbea34609,2 +np.float32,0xbefbd3c6,0xbf09daff,2 +np.float32,0x3f19962c,0x3f316cbd,2 +np.float32,0x3f7acec6,0x40129732,2 +np.float32,0xbf5db7de,0xbfa89a21,2 +np.float32,0x3f62f444,0x3fb3e830,2 +np.float32,0xbf522adb,0xbf94737f,2 +np.float32,0xbef6ceb2,0xbf0690ba,2 +np.float32,0xbf57c41e,0xbf9d8db0,2 +np.float32,0x3eb3360c,0x3ebb1eb0,2 +np.float32,0x3f29327e,0x3f4b618e,2 +np.float32,0xbf08d099,0xbf18a916,2 +np.float32,0x3ea21014,0x3ea7d369,2 +np.float32,0x3f39e516,0x3f6ba861,2 +np.float32,0x3e7c4f28,0x3e80ce08,2 +np.float32,0xbec5a7f8,0xbed07582,2 +np.float32,0xbf0b1b46,0xbf1be3e7,2 +np.float32,0xbef0e0ec,0xbf02bb2e,2 +np.float32,0x3d835a30,0x3d838869,2 +np.float32,0x3f08aa40,0x3f18736e,2 +np.float32,0x3eb0e4c8,0x3eb87bcd,2 +np.float32,0x3eb3821c,0x3ebb7564,2 +np.float32,0xbe3a7320,0xbe3c8d5a,2 +np.float32,0x3e43f8c0,0x3e466b10,2 +np.float32,0x3e914288,0x3e955b69,2 +np.float32,0x3ec7d800,0x3ed308e7,2 +np.float32,0x3e603df8,0x3e63eef2,2 +np.float32,0x3f225cac,0x3f3f9ac6,2 +np.float32,0x3e3db8f0,0x3e3ff06b,2 +np.float32,0x3f358d78,0x3f62b38c,2 +np.float32,0xbed9bd64,0xbee88158,2 +np.float32,0x800000,0x800000,2 +np.float32,0x3f1adfce,0x3f337230,2 +np.float32,0xbefdc346,0xbf0b229d,2 +np.float32,0xbf091018,0xbf190208,2 +np.float32,0xbf800000,0xff800000,2 +np.float32,0x3f27c2c4,0x3f48d8db,2 +np.float32,0x3ef59c80,0x3f05c993,2 +np.float32,0x3e18a340,0x3e19c893,2 +np.float32,0x3f209610,0x3f3ca7c5,2 +np.float32,0x3f69cc22,0x3fc60087,2 +np.float32,0xbf66cf07,0xbfbd8721,2 +np.float32,0xbf768098,0xbffdfcc4,2 +np.float32,0x3df27a40,0x3df39ec4,2 +np.float32,0x3daf5bd0,0x3dafca02,2 +np.float32,0x3f53f2be,0x3f973b41,2 +np.float32,0xbf7edcbc,0xc0436ce3,2 +np.float32,0xbdf61db8,0xbdf74fae,2 +np.float32,0x3e2c9328,0x3e2e3cb2,2 +np.float32,0x3f1a4570,0x3f327f41,2 +np.float32,0xbf766306,0xbffd32f1,2 +np.float32,0xbf468b9d,0xbf845f0f,2 +np.float32,0x3e398970,0x3e3b9bb1,2 +np.float32,0xbbefa900,0xbbefaa18,2 +np.float32,0xbf54c989,0xbf9893ad,2 +np.float32,0x3f262cf6,0x3f46169d,2 +np.float32,0x3f638a8a,0x3fb54a98,2 +np.float32,0xbeb36c78,0xbebb5cb8,2 +np.float32,0xbeac4d42,0xbeb34993,2 +np.float32,0x3f1d1942,0x3f36fbf2,2 +np.float32,0xbf5d49ba,0xbfa7bf07,2 +np.float32,0xbf182b5c,0xbf2f38d0,2 +np.float32,0x3f41a742,0x3f7ce5ef,2 +np.float32,0x3f0b9a6c,0x3f1c9898,2 +np.float32,0x3e847494,0x3e8788f3,2 +np.float32,0xbde41608,0xbde50941,2 +np.float32,0x3f693944,0x3fc44b5a,2 +np.float32,0x3f0386b2,0x3f115e37,2 +np.float32,0x3f3a08b0,0x3f6bf3c1,2 +np.float32,0xbf78ee64,0xc0089977,2 +np.float32,0xbf013a11,0xbf0e436e,2 +np.float32,0x3f00668e,0x3f0d2836,2 +np.float32,0x3e6d9850,0x3e720081,2 +np.float32,0x3eacf578,0x3eb4075d,2 +np.float32,0x3f18aef8,0x3f3004b4,2 +np.float32,0x3de342f0,0x3de43385,2 +np.float32,0x3e56cee8,0x3e5a0b85,2 +np.float32,0xbf287912,0xbf4a1966,2 +np.float32,0x3e92c948,0x3e9704c2,2 +np.float32,0x3c07d080,0x3c07d14c,2 +np.float32,0xbe90f6a0,0xbe9508e0,2 +np.float32,0x3e8b4f28,0x3e8ee884,2 +np.float32,0xbf35b56c,0xbf6303ff,2 +np.float32,0xbef512b8,0xbf057027,2 +np.float32,0x3e36c630,0x3e38c0cd,2 +np.float32,0x3f0b3ca8,0x3f1c134a,2 +np.float32,0x3e4cd610,0x3e4fa2c5,2 +np.float32,0xbf5a8372,0xbfa273a3,2 +np.float32,0xbecaad3c,0xbed662ae,2 +np.float32,0xbec372d2,0xbecddeac,2 +np.float32,0x3f6fb2b2,0x3fda8a22,2 +np.float32,0x3f365f28,0x3f645b5a,2 +np.float32,0xbecd00fa,0xbed926a4,2 +np.float32,0xbebafa32,0xbec40672,2 +np.float32,0xbf235b73,0xbf4146c4,2 +np.float32,0x3f7a4658,0x400f6e2c,2 +np.float32,0x3f35e824,0x3f636a54,2 +np.float32,0x3cb87640,0x3cb87e3c,2 +np.float32,0xbf296288,0xbf4bb6ee,2 +np.float32,0x7f800000,0xffc00000,2 +np.float32,0xbf4de86e,0xbf8e2d1a,2 +np.float32,0xbf4ace12,0xbf89e5f3,2 +np.float32,0x3d65a300,0x3d65e0b5,2 +np.float32,0xbe10c534,0xbe11bf21,2 +np.float32,0xbeba3c1c,0xbec32b3e,2 +np.float32,0x3e87eaf8,0x3e8b40b8,2 +np.float32,0x3d5c3bc0,0x3d5c722d,2 +np.float32,0x3e8c14b8,0x3e8fbdf8,2 +np.float32,0xbf06c6f0,0xbf15d327,2 +np.float32,0xbe0f1e30,0xbe100f96,2 +np.float32,0xbee244b0,0xbef30251,2 +np.float32,0x3f2a21b0,0x3f4d0c1d,2 +np.float32,0xbf5f7f81,0xbfac408e,2 +np.float32,0xbe3dba2c,0xbe3ff1b2,2 +np.float32,0x3f3ffc22,0x3f790abf,2 +np.float32,0x3edc3dac,0x3eeb90fd,2 +np.float32,0x7f7fffff,0xffc00000,2 +np.float32,0x3ecfaaac,0x3edc5485,2 +np.float32,0x3f0affbe,0x3f1bbcd9,2 +np.float32,0x3f5f2264,0x3fab7dca,2 +np.float32,0x3f37394c,0x3f66186c,2 +np.float32,0xbe6b2f6c,0xbe6f74e3,2 +np.float32,0x3f284772,0x3f49c1f1,2 +np.float32,0xbdf27bc8,0xbdf3a051,2 +np.float32,0xbc8b14e0,0xbc8b184c,2 +np.float32,0x3f6a867c,0x3fc83b07,2 +np.float32,0x3f1ec876,0x3f39b429,2 +np.float32,0x3f6fd9a8,0x3fdb28d6,2 +np.float32,0xbf473cca,0xbf853e8c,2 +np.float32,0x3e23eff8,0x3e255c23,2 +np.float32,0x3ebefdfc,0x3ec8ac5d,2 +np.float32,0x3f6c8c22,0x3fced2b1,2 +np.float32,0x3f168388,0x3f2cad44,2 +np.float32,0xbece2410,0xbeda81ac,2 +np.float32,0x3f5532f0,0x3f993eea,2 +np.float32,0x3ef1938c,0x3f032dfa,2 +np.float32,0xbef05268,0xbf025fba,2 +np.float32,0x3f552e4a,0x3f993754,2 +np.float32,0x3e9ed068,0x3ea4392d,2 +np.float32,0xbe1a0c24,0xbe1b39be,2 +np.float32,0xbf2623aa,0xbf46068c,2 +np.float32,0xbe1cc300,0xbe1e00fc,2 +np.float32,0xbe9c0576,0xbea12397,2 +np.float32,0xbd827338,0xbd82a07e,2 +np.float32,0x3f0fc31a,0x3f229786,2 +np.float32,0x3e577810,0x3e5abc7d,2 +np.float32,0x3e0e1cb8,0x3e0f0906,2 +np.float32,0x3e84d344,0x3e87ee73,2 +np.float32,0xbf39c45e,0xbf6b6337,2 +np.float32,0x3edfb25c,0x3eefd273,2 +np.float32,0x3e016398,0x3e021596,2 +np.float32,0xbefeb1be,0xbf0bc0de,2 +np.float32,0x3f37e104,0x3f677196,2 +np.float32,0x3f545316,0x3f97d500,2 +np.float32,0xbefc165a,0xbf0a06ed,2 +np.float32,0xbf0923e6,0xbf191dcd,2 +np.float32,0xbf386508,0xbf68831f,2 +np.float32,0xbf3d4630,0xbf72f4e1,2 +np.float32,0x3f3dbe82,0x3f73ff13,2 +np.float32,0xbf703de4,0xbfdcc7e2,2 +np.float32,0xbf531482,0xbf95dd1a,2 +np.float32,0xbf0af1b6,0xbf1ba8f4,2 +np.float32,0xbec8fd9c,0xbed463a4,2 +np.float32,0xbe230320,0xbe24691a,2 +np.float32,0xbf7de541,0xc02faf38,2 +np.float32,0x3efd2360,0x3f0ab8b7,2 +np.float32,0x3db7f350,0x3db87291,2 +np.float32,0x3e74c510,0x3e799924,2 +np.float32,0x3da549c0,0x3da5a5fc,2 +np.float32,0x3e8a3bc4,0x3e8dbf4a,2 +np.float32,0xbf69f086,0xbfc66e84,2 +np.float32,0x3f323f8e,0x3f5c2c17,2 +np.float32,0x3ec0ae3c,0x3ecaa334,2 +np.float32,0xbebe8966,0xbec824fc,2 +np.float32,0x3f34691e,0x3f606b13,2 +np.float32,0x3f13790e,0x3f2813f5,2 +np.float32,0xbf61c027,0xbfb12618,2 +np.float32,0x3e90c690,0x3e94d4a1,2 +np.float32,0xbefce8f0,0xbf0a920e,2 +np.float32,0xbf5c0e8a,0xbfa559a7,2 +np.float32,0x3f374f60,0x3f6645b6,2 +np.float32,0x3f25f6fa,0x3f45b967,2 +np.float32,0x3f2421aa,0x3f42963a,2 +np.float32,0x3ebfa328,0x3ec96c57,2 +np.float32,0x3e3bef28,0x3e3e1685,2 +np.float32,0x3ea3fa3c,0x3ea9f4dd,2 +np.float32,0x3f362b8e,0x3f63f2b2,2 +np.float32,0xbedcef18,0xbeec6ada,2 +np.float32,0xbdd29c88,0xbdd35bd0,2 +np.float32,0x3f261aea,0x3f45f76f,2 +np.float32,0xbe62c470,0xbe66965e,2 +np.float32,0x7fc00000,0x7fc00000,2 +np.float32,0xbee991aa,0xbefc277b,2 +np.float32,0xbf571960,0xbf9c6923,2 +np.float32,0xbe6fb410,0xbe743b41,2 +np.float32,0x3eb1bed0,0x3eb9738d,2 +np.float32,0x80000000,0x80000000,2 +np.float32,0x3eddcbe4,0x3eed7a69,2 +np.float32,0xbf2a81ba,0xbf4db86d,2 +np.float32,0x3f74da54,0x3ff38737,2 +np.float32,0xbeb6bff4,0xbebf29f4,2 +np.float32,0x3f445752,0x3f81a698,2 +np.float32,0x3ed081b4,0x3edd5618,2 +np.float32,0xbee73802,0xbef931b4,2 +np.float32,0xbd13f2a0,0xbd14031c,2 +np.float32,0xbb4d1200,0xbb4d122c,2 +np.float32,0xbee8777a,0xbefac393,2 +np.float32,0x3f42047c,0x3f7dc06c,2 +np.float32,0xbd089270,0xbd089f67,2 +np.float32,0xbf628c16,0xbfb2f66b,2 +np.float32,0x3e72e098,0x3e77978d,2 +np.float32,0x3ed967cc,0x3ee818e4,2 +np.float32,0x3e284c80,0x3e29d6d9,2 +np.float32,0x3f74e8ba,0x3ff3dbef,2 +np.float32,0x3f013e86,0x3f0e4969,2 +np.float32,0xbf610d4f,0xbfaf983c,2 +np.float32,0xbf3c8d36,0xbf715eba,2 +np.float32,0xbedbc756,0xbeeaffdb,2 +np.float32,0x3e143ec8,0x3e154b4c,2 +np.float32,0xbe1c9808,0xbe1dd4fc,2 +np.float32,0xbe887a1e,0xbe8bdac5,2 +np.float32,0xbe85c4bc,0xbe88f17a,2 +np.float32,0x3f35967e,0x3f62c5b4,2 +np.float32,0x3ea2c4a4,0x3ea89c2d,2 +np.float32,0xbc8703c0,0xbc8706e1,2 +np.float32,0xbf13d52c,0xbf289dff,2 +np.float32,0xbf63bb56,0xbfb5bf29,2 +np.float32,0xbf61c5ef,0xbfb13319,2 +np.float32,0xbf128410,0xbf26a675,2 +np.float32,0x3f03fcf2,0x3f11ff13,2 +np.float32,0xbe49c924,0xbe4c75cd,2 +np.float32,0xbf211a9c,0xbf3d82c5,2 +np.float32,0x3f7e9d52,0x403d1b42,2 +np.float32,0x3edfefd4,0x3ef01e71,2 +np.float32,0x3ebc5bd8,0x3ec59efb,2 +np.float32,0x3d7b02e0,0x3d7b537f,2 +np.float32,0xbf1163ba,0xbf24fb43,2 +np.float32,0x3f5072f2,0x3f91dbf1,2 +np.float32,0xbee700ce,0xbef8ec60,2 +np.float32,0x3f534168,0x3f962359,2 +np.float32,0x3e6d6c40,0x3e71d1ef,2 +np.float32,0x3def9d70,0x3df0b7a8,2 +np.float32,0x3e89cf80,0x3e8d4a8a,2 +np.float32,0xbf687ca7,0xbfc2290f,2 +np.float32,0x3f35e134,0x3f635c51,2 +np.float32,0x3e59eef8,0x3e5d50fa,2 +np.float32,0xbf65c9e1,0xbfbada61,2 +np.float32,0xbf759292,0xbff7e43d,2 +np.float32,0x3f4635a0,0x3f83f372,2 +np.float32,0x3f29baaa,0x3f4c53f1,2 +np.float32,0x3f6b15a6,0x3fc9fe04,2 +np.float32,0x3edabc88,0x3ee9b922,2 +np.float32,0x3ef382e0,0x3f046d4d,2 +np.float32,0xbe351310,0xbe36ff7f,2 +np.float32,0xbf05c935,0xbf14751c,2 +np.float32,0xbf0e7c50,0xbf20bc24,2 +np.float32,0xbf69bc94,0xbfc5d1b8,2 +np.float32,0xbed41aca,0xbee1aa23,2 +np.float32,0x3f518c08,0x3f938162,2 +np.float32,0xbf3d7974,0xbf73661a,2 +np.float32,0x3f1951a6,0x3f3101c9,2 +np.float32,0xbeb3f436,0xbebbf787,2 +np.float32,0xbf77a190,0xc0031d43,2 +np.float32,0x3eb5b3cc,0x3ebdf6e7,2 +np.float32,0xbed534b4,0xbee2fed2,2 +np.float32,0xbe53e1b8,0xbe56fc56,2 +np.float32,0x3f679e20,0x3fbfb91c,2 +np.float32,0xff7fffff,0xffc00000,2 +np.float32,0xbf7b9bcb,0xc0180073,2 +np.float32,0xbf5635e8,0xbf9aea15,2 +np.float32,0xbe5a3318,0xbe5d9856,2 +np.float32,0xbe003284,0xbe00df9a,2 +np.float32,0x3eb119a4,0x3eb8b7d6,2 +np.float32,0xbf3bccf8,0xbf6fbc84,2 +np.float32,0x3f36f600,0x3f658ea8,2 +np.float32,0x3f1ea834,0x3f397fc2,2 +np.float32,0xbe7cfb54,0xbe8129b3,2 +np.float32,0xbe9b3746,0xbea0406a,2 +np.float32,0x3edc0f90,0x3eeb586c,2 +np.float32,0x3e1842e8,0x3e19660c,2 +np.float32,0xbd8f10b0,0xbd8f4c70,2 +np.float32,0xbf064aca,0xbf1527a2,2 +np.float32,0x3e632e58,0x3e6705be,2 +np.float32,0xbef28ba4,0xbf03cdbb,2 +np.float32,0x3f27b21e,0x3f48bbaf,2 +np.float32,0xbe6f30d4,0xbe73b06e,2 +np.float32,0x3f3e6cb0,0x3f75834b,2 +np.float32,0xbf264aa5,0xbf4649f0,2 +np.float32,0xbf690775,0xbfc3b978,2 +np.float32,0xbf3e4a38,0xbf753632,2 +np.float64,0x3fe12bbe8c62577e,0x3fe32de8e5f961b0,1 +np.float64,0x3fc9b8909b337120,0x3fca1366da00efff,1 +np.float64,0x3feaee4245f5dc84,0x3ff3a011ea0432f3,1 +np.float64,0xbfe892c000f12580,0xbff03e5adaed6f0c,1 +np.float64,0xbf9be8de4837d1c0,0xbf9beaa367756bd1,1 +np.float64,0x3fe632e58fec65cc,0x3feb5ccc5114ca38,1 +np.float64,0x3fe78a0ef7ef141e,0x3fee1b4521d8eb6c,1 +np.float64,0x3feec27a65fd84f4,0x3fff643c8318e81e,1 +np.float64,0x3fbed6efce3dade0,0x3fbefd76cff00111,1 +np.float64,0xbfe3a05fab6740c0,0xbfe6db078aeeb0ca,1 +np.float64,0x3fdca11a56b94234,0x3fdece9e6eacff1b,1 +np.float64,0x3fe0fb15aae1f62c,0x3fe2e9e095ec2089,1 +np.float64,0x3fede12abf7bc256,0x3ffafd0ff4142807,1 +np.float64,0x3feb919edcf7233e,0x3ff4c9aa0bc2432f,1 +np.float64,0x3fd39633b5a72c68,0x3fd43c2e6d5f441c,1 +np.float64,0x3fd9efcbfeb3df98,0x3fdb83f03e58f91c,1 +np.float64,0x3fe2867a36650cf4,0x3fe525858c8ce72e,1 +np.float64,0x3fdacbb8f3b59770,0x3fdc8cd431b6e3ff,1 +np.float64,0x3fcc120503382408,0x3fcc88a8fa43e1c6,1 +np.float64,0xbfd99ff4eab33fea,0xbfdb24a20ae3687d,1 +np.float64,0xbfe8caf0157195e0,0xbff083b8dd0941d3,1 +np.float64,0x3fddc9bf92bb9380,0x3fe022aac0f761d5,1 +np.float64,0x3fe2dbb66e65b76c,0x3fe5a6e7caf3f1f2,1 +np.float64,0x3fe95f5c4a72beb8,0x3ff1444697e96138,1 +np.float64,0xbfc6b163d92d62c8,0xbfc6ef6e006658a1,1 +np.float64,0x3fdf1b2616be364c,0x3fe0fcbd2848c9e8,1 +np.float64,0xbfdca1ccf7b9439a,0xbfdecf7dc0eaa663,1 +np.float64,0x3fe078d6a260f1ae,0x3fe236a7c66ef6c2,1 +np.float64,0x3fdf471bb9be8e38,0x3fe11990ec74e704,1 +np.float64,0xbfe417626be82ec5,0xbfe79c9aa5ed2e2f,1 +np.float64,0xbfeb9cf5677739eb,0xbff4dfc24c012c90,1 +np.float64,0x3f8d9142b03b2280,0x3f8d91c9559d4779,1 +np.float64,0x3fb052c67220a590,0x3fb05873c90d1cd6,1 +np.float64,0x3fd742e2c7ae85c4,0x3fd860128947d15d,1 +np.float64,0x3fec2e2a2bf85c54,0x3ff60eb554bb8d71,1 +np.float64,0xbfeb2b8bc8f65718,0xbff40b734679497a,1 +np.float64,0x3fe25f8e0d64bf1c,0x3fe4eb381d077803,1 +np.float64,0x3fe56426256ac84c,0x3fe9dafbe79370f0,1 +np.float64,0x3feecc1e5d7d983c,0x3fffa49bedc7aa25,1 +np.float64,0xbfc88ce94b3119d4,0xbfc8dbba0fdee2d2,1 +np.float64,0xbfabcf51ac379ea0,0xbfabd6552aa63da3,1 +np.float64,0xbfccc8b849399170,0xbfcd48d6ff057a4d,1 +np.float64,0x3fd2f831e8a5f064,0x3fd38e67b0dda905,1 +np.float64,0x3fcafdcd6135fb98,0x3fcb670ae2ef4d36,1 +np.float64,0x3feda6042efb4c08,0x3ffa219442ac4ea5,1 +np.float64,0x3fed382b157a7056,0x3ff8bc01bc6d10bc,1 +np.float64,0x3fed858a50fb0b14,0x3ff9b1c05cb6cc0f,1 +np.float64,0x3fcc3960653872c0,0x3fccb2045373a3d1,1 +np.float64,0xbfec5177e478a2f0,0xbff65eb4557d94eb,1 +np.float64,0x3feafe0d5e75fc1a,0x3ff3bb4a260a0dcb,1 +np.float64,0x3fe08bc87ee11790,0x3fe25078aac99d31,1 +np.float64,0xffefffffffffffff,0xfff8000000000000,1 +np.float64,0x3f79985ce0333100,0x3f799872b591d1cb,1 +np.float64,0xbfd4001cf9a8003a,0xbfd4b14b9035b94f,1 +np.float64,0x3fe54a17e6ea9430,0x3fe9ac0f18682343,1 +np.float64,0xbfb4e07fea29c100,0xbfb4ec6520dd0689,1 +np.float64,0xbfed2b6659fa56cd,0xbff895ed57dc1450,1 +np.float64,0xbfe81fc8b5f03f92,0xbfef6b95e72a7a7c,1 +np.float64,0xbfe6aced16ed59da,0xbfec4ce131ee3704,1 +np.float64,0xbfe599f30ceb33e6,0xbfea3d07c1cd78e2,1 +np.float64,0xbfe0ff278b61fe4f,0xbfe2ef8b5efa89ed,1 +np.float64,0xbfe3e9406467d281,0xbfe750e43e841736,1 +np.float64,0x3fcc6b52cf38d6a8,0x3fcce688f4fb2cf1,1 +np.float64,0xbfc890e8133121d0,0xbfc8dfdfee72d258,1 +np.float64,0x3fe46e81dbe8dd04,0x3fe82e09783811a8,1 +np.float64,0x3fd94455e5b288ac,0x3fdab7cef2de0b1f,1 +np.float64,0xbfe82151fff042a4,0xbfef6f254c9696ca,1 +np.float64,0x3fcee1ac1d3dc358,0x3fcf80a6ed07070a,1 +np.float64,0x3fcce8f90939d1f0,0x3fcd6ad18d34f8b5,1 +np.float64,0x3fd6afe56fad5fcc,0x3fd7b7567526b1fb,1 +np.float64,0x3fb1a77092234ee0,0x3fb1ae9fe0d176fc,1 +np.float64,0xbfeb758b0d76eb16,0xbff493d105652edc,1 +np.float64,0xbfb857c24e30af88,0xbfb86aa4da3be53f,1 +np.float64,0x3fe89064eff120ca,0x3ff03b7c5b3339a8,1 +np.float64,0xbfc1bd2fef237a60,0xbfc1da99893473ed,1 +np.float64,0xbfe5ad6e2eeb5adc,0xbfea60ed181b5c05,1 +np.float64,0x3fd5a66358ab4cc8,0x3fd6899e640aeb1f,1 +np.float64,0xbfe198e832e331d0,0xbfe3c8c9496d0de5,1 +np.float64,0xbfdaa5c0d7b54b82,0xbfdc5ed7d3c5ce49,1 +np.float64,0x3fcceccb6939d998,0x3fcd6ed88c2dd3a5,1 +np.float64,0xbfe44413eae88828,0xbfe7e6cd32b34046,1 +np.float64,0xbfc7cbeccf2f97d8,0xbfc8139a2626edae,1 +np.float64,0x3fbf31e4fa3e63d0,0x3fbf59c6e863255e,1 +np.float64,0x3fdf03fa05be07f4,0x3fe0ed953f7989ad,1 +np.float64,0x3fe7f4eaceefe9d6,0x3fef092ca7e2ac39,1 +np.float64,0xbfc084e9d92109d4,0xbfc09ca10fd6aaea,1 +np.float64,0xbf88cfbf70319f80,0xbf88d00effa6d897,1 +np.float64,0x7ff4000000000000,0x7ffc000000000000,1 +np.float64,0xbfa0176e9c202ee0,0xbfa018ca0a6ceef3,1 +np.float64,0xbfd88d0815b11a10,0xbfd9dfc6c6bcbe4e,1 +np.float64,0x3fe89f7730713eee,0x3ff04de52fb536f3,1 +np.float64,0xbfedc9707bfb92e1,0xbffaa25fcf9dd6da,1 +np.float64,0x3fe936d1a6726da4,0x3ff10e40c2d94bc9,1 +np.float64,0x3fdb64aec7b6c95c,0x3fdd473177317b3f,1 +np.float64,0xbfee4f9aaefc9f35,0xbffcdd212667003c,1 +np.float64,0x3fe3730067e6e600,0x3fe692b0a0babf5f,1 +np.float64,0xbfc257e58924afcc,0xbfc27871f8c218d7,1 +np.float64,0x3fe62db12dec5b62,0x3feb52c61b97d9f6,1 +np.float64,0xbfe3ff491367fe92,0xbfe774f1b3a96fd6,1 +np.float64,0x3fea43255274864a,0x3ff28b0c4b7b8d21,1 +np.float64,0xbfea37923c746f24,0xbff27962159f2072,1 +np.float64,0x3fcd0ac3c73a1588,0x3fcd8e6f8de41755,1 +np.float64,0xbfdccafde6b995fc,0xbfdf030fea8a0630,1 +np.float64,0x3fdba35268b746a4,0x3fdd94094f6f50c1,1 +np.float64,0x3fc68ea1d92d1d40,0x3fc6cb8d07cbb0e4,1 +np.float64,0xbfb88b1f6e311640,0xbfb89e7af4e58778,1 +np.float64,0xbfedc7cadffb8f96,0xbffa9c3766227956,1 +np.float64,0x3fe7928d3eef251a,0x3fee2dcf2ac7961b,1 +np.float64,0xbfeff42ede7fe85e,0xc00cef6b0f1e8323,1 +np.float64,0xbfebf07fa477e0ff,0xbff5893f99e15236,1 +np.float64,0x3fe3002ab9660056,0x3fe5defba550c583,1 +np.float64,0x3feb8f4307f71e86,0x3ff4c517ec8d6de9,1 +np.float64,0x3fd3c16f49a782e0,0x3fd46becaacf74da,1 +np.float64,0x3fc7613df12ec278,0x3fc7a52b2a3c3368,1 +np.float64,0xbfe33af560e675eb,0xbfe63a6528ff1587,1 +np.float64,0xbfde86495abd0c92,0xbfe09bd7ba05b461,1 +np.float64,0x3fe1e7fb4ee3cff6,0x3fe43b04311c0ab6,1 +np.float64,0xbfc528b6bd2a516c,0xbfc55ae0a0c184c8,1 +np.float64,0xbfd81025beb0204c,0xbfd94dd72d804613,1 +np.float64,0x10000000000000,0x10000000000000,1 +np.float64,0x3fc1151c47222a38,0x3fc12f5aad80a6bf,1 +np.float64,0x3feafa136775f426,0x3ff3b46854da0b3a,1 +np.float64,0x3fed2da0747a5b40,0x3ff89c85b658459e,1 +np.float64,0x3fda2a4b51b45498,0x3fdbca0d908ddbbd,1 +np.float64,0xbfd04cf518a099ea,0xbfd0aae0033b9e4c,1 +np.float64,0xbfb9065586320ca8,0xbfb91adb7e31f322,1 +np.float64,0xbfd830b428b06168,0xbfd973ca3c484d8d,1 +np.float64,0x3fc952f7ed32a5f0,0x3fc9a9994561fc1a,1 +np.float64,0xbfeb06c83c760d90,0xbff3ca77b326df20,1 +np.float64,0xbfeb1c98ac763931,0xbff3f0d0900f6149,1 +np.float64,0x3fdf061dbebe0c3c,0x3fe0eefb32b48d17,1 +np.float64,0xbf9acbaf28359760,0xbf9acd4024be9fec,1 +np.float64,0x3fec0adde2f815bc,0x3ff5c1628423794d,1 +np.float64,0xbfc4bc750d2978ec,0xbfc4eba43f590b94,1 +np.float64,0x3fdbe47878b7c8f0,0x3fdde44a2b500d73,1 +np.float64,0x3fe160d18162c1a4,0x3fe378cff08f18f0,1 +np.float64,0x3fc3b58dfd276b18,0x3fc3de01d3802de9,1 +np.float64,0x3fa860343430c060,0x3fa864ecd07ec962,1 +np.float64,0x3fcaebfb4b35d7f8,0x3fcb546512d1b4c7,1 +np.float64,0x3fe3fda558e7fb4a,0x3fe772412e5776de,1 +np.float64,0xbfe8169f2c702d3e,0xbfef5666c9a10f6d,1 +np.float64,0x3feda78e9efb4f1e,0x3ffa270712ded769,1 +np.float64,0xbfda483161b49062,0xbfdbedfbf2e850ba,1 +np.float64,0x3fd7407cf3ae80f8,0x3fd85d4f52622743,1 +np.float64,0xbfd63de4d4ac7bca,0xbfd73550a33e3c32,1 +np.float64,0xbfd9c30b90b38618,0xbfdb4e7695c856f3,1 +np.float64,0x3fcd70c00b3ae180,0x3fcdfa0969e0a119,1 +np.float64,0x3feb4f127f769e24,0x3ff44bf42514e0f4,1 +np.float64,0xbfec1db44af83b69,0xbff5ea54aed1f8e9,1 +np.float64,0x3fd68ff051ad1fe0,0x3fd792d0ed6d6122,1 +np.float64,0x3fe0a048a5614092,0x3fe26c80a826b2a2,1 +np.float64,0x3fd59f3742ab3e70,0x3fd6818563fcaf80,1 +np.float64,0x3fca26ecf9344dd8,0x3fca867ceb5d7ba8,1 +np.float64,0x3fdc1d547ab83aa8,0x3fde2a9cea866484,1 +np.float64,0xbfc78df6312f1bec,0xbfc7d3719b698a39,1 +np.float64,0x3fe754e72b6ea9ce,0x3feda89ea844a2e5,1 +np.float64,0x3fe740c1a4ee8184,0x3fed7dc56ec0c425,1 +np.float64,0x3fe77566a9eeeace,0x3fedee6f408df6de,1 +np.float64,0xbfbbf5bf8e37eb80,0xbfbc126a223781b4,1 +np.float64,0xbfe0acb297615965,0xbfe27d86681ca2b5,1 +np.float64,0xbfc20a0487241408,0xbfc228f5f7d52ce8,1 +np.float64,0xfff0000000000000,0xfff8000000000000,1 +np.float64,0x3fef98a4dbff314a,0x40043cfb60bd46fa,1 +np.float64,0x3fd059102ca0b220,0x3fd0b7d2be6d7822,1 +np.float64,0x3fe89f18a1f13e32,0x3ff04d714bbbf400,1 +np.float64,0x3fd45b6275a8b6c4,0x3fd516a44a276a4b,1 +np.float64,0xbfe04463e86088c8,0xbfe1ef9dfc9f9a53,1 +np.float64,0xbfe086e279610dc5,0xbfe249c9c1040a13,1 +np.float64,0x3f89c9b110339380,0x3f89ca0a641454b5,1 +np.float64,0xbfb5f5b4322beb68,0xbfb6038dc3fd1516,1 +np.float64,0x3fe6eae76f6dd5ce,0x3feccabae04d5c14,1 +np.float64,0x3fa9ef6c9c33dee0,0x3fa9f51c9a8c8a2f,1 +np.float64,0xbfe171b45f62e368,0xbfe390ccc4c01bf6,1 +np.float64,0x3fb2999442253330,0x3fb2a1fc006804b5,1 +np.float64,0x3fd124bf04a24980,0x3fd1927abb92472d,1 +np.float64,0xbfe6e05938edc0b2,0xbfecb519ba78114f,1 +np.float64,0x3fed466ee6fa8cde,0x3ff8e75405b50490,1 +np.float64,0xbfb999aa92333358,0xbfb9afa4f19f80a2,1 +np.float64,0xbfe98969ed7312d4,0xbff17d887b0303e7,1 +np.float64,0x3fe782843e6f0508,0x3fee0adbeebe3486,1 +np.float64,0xbfe232fcc26465fa,0xbfe4a90a68d46040,1 +np.float64,0x3fd190a90fa32154,0x3fd206f56ffcdca2,1 +np.float64,0xbfc4f8b75929f170,0xbfc5298b2d4e7740,1 +np.float64,0xbfba3a63d63474c8,0xbfba520835c2fdc2,1 +np.float64,0xbfb7708eea2ee120,0xbfb781695ec17846,1 +np.float64,0x3fed9fb7a5fb3f70,0x3ffa0b717bcd1609,1 +np.float64,0xbfc1b158cd2362b0,0xbfc1ce87345f3473,1 +np.float64,0x3f963478082c6900,0x3f96355c3000953b,1 +np.float64,0x3fc5050e532a0a20,0x3fc536397f38f616,1 +np.float64,0x3fe239f9eee473f4,0x3fe4b360da3b2faa,1 +np.float64,0xbfd66bd80eacd7b0,0xbfd769a29fd784c0,1 +np.float64,0x3fc57cdad52af9b8,0x3fc5b16b937f5f72,1 +np.float64,0xbfd3c36a0aa786d4,0xbfd46e1cd0b4eddc,1 +np.float64,0x3feff433487fe866,0x400cf0ea1def3161,1 +np.float64,0xbfed5577807aaaef,0xbff915e8f6bfdf22,1 +np.float64,0xbfca0dd3eb341ba8,0xbfca6c4d11836cb6,1 +np.float64,0x7ff8000000000000,0x7ff8000000000000,1 +np.float64,0xbf974deaa82e9be0,0xbf974ef26a3130d1,1 +np.float64,0xbfe7f425e1efe84c,0xbfef076cb00d649d,1 +np.float64,0xbfe4413605e8826c,0xbfe7e20448b8a4b1,1 +np.float64,0xbfdfad202cbf5a40,0xbfe15cd9eb2be707,1 +np.float64,0xbfe43261ee6864c4,0xbfe7c952c951fe33,1 +np.float64,0xbfec141225782824,0xbff5d54d33861d98,1 +np.float64,0x3fd0f47abaa1e8f4,0x3fd15e8691a7f1c2,1 +np.float64,0x3fd378f0baa6f1e0,0x3fd41bea4a599081,1 +np.float64,0xbfb52523462a4a48,0xbfb5317fa7f436e2,1 +np.float64,0x3fcb30797d3660f0,0x3fcb9c174ea401ff,1 +np.float64,0xbfd48480dea90902,0xbfd5446e02c8b329,1 +np.float64,0xbfee4ae3ab7c95c7,0xbffcc650340ba274,1 +np.float64,0xbfeab086d075610e,0xbff3387f4e83ae26,1 +np.float64,0x3fa17cddf422f9c0,0x3fa17e9bf1b25736,1 +np.float64,0xbfe3064536e60c8a,0xbfe5e86aa5244319,1 +np.float64,0x3feb2882c5765106,0x3ff40604c7d97d44,1 +np.float64,0xbfa6923ff42d2480,0xbfa695ff57b2fc3f,1 +np.float64,0xbfa8bdbdcc317b80,0xbfa8c2ada0d94aa7,1 +np.float64,0x3fe7f16b8e6fe2d8,0x3fef013948c391a6,1 +np.float64,0x3fe4e7169f69ce2e,0x3fe8fceef835050a,1 +np.float64,0x3fed877638fb0eec,0x3ff9b83694127959,1 +np.float64,0xbfe0cc9ecf61993e,0xbfe2a978234cbde5,1 +np.float64,0xbfe977e79672efcf,0xbff16589ea494a38,1 +np.float64,0xbfe240130ae48026,0xbfe4bc69113e0d7f,1 +np.float64,0x3feb1e9b70763d36,0x3ff3f4615938a491,1 +np.float64,0xbfdf197dfcbe32fc,0xbfe0fba78a0fc816,1 +np.float64,0xbfee0f8543fc1f0a,0xbffbb9d9a4ee5387,1 +np.float64,0x3fe88d2191f11a44,0x3ff037843b5b6313,1 +np.float64,0xbfd11bb850a23770,0xbfd188c1cef40007,1 +np.float64,0xbfa1b36e9c2366e0,0xbfa1b53d1d8a8bc4,1 +np.float64,0xbfea2d70d9f45ae2,0xbff26a0629e36b3e,1 +np.float64,0xbfd9188703b2310e,0xbfda83f9ddc18348,1 +np.float64,0xbfee194894fc3291,0xbffbe3c83b61e7cb,1 +np.float64,0xbfe093b4a9e1276a,0xbfe25b4ad6f8f83d,1 +np.float64,0x3fea031489f4062a,0x3ff22accc000082e,1 +np.float64,0xbfc6c0827b2d8104,0xbfc6ff0a94326381,1 +np.float64,0x3fef5cd340feb9a6,0x4002659c5a1b34af,1 +np.float64,0x8010000000000000,0x8010000000000000,1 +np.float64,0x3fd97cb533b2f96c,0x3fdafab28aaae8e3,1 +np.float64,0x3fe2123334642466,0x3fe478bd83a8ce02,1 +np.float64,0xbfd9a69637b34d2c,0xbfdb2c87c6b6fb8c,1 +np.float64,0x3fc58def7f2b1be0,0x3fc5c2ff724a9f61,1 +np.float64,0xbfedd5da1f7babb4,0xbffad15949b7fb22,1 +np.float64,0x3fe90e92a0721d26,0x3ff0d9b64323efb8,1 +np.float64,0x3fd34b9442a69728,0x3fd3e9f8fe80654e,1 +np.float64,0xbfc5f509ab2bea14,0xbfc62d2ad325c59f,1 +np.float64,0x3feb245634f648ac,0x3ff3fe91a46acbe1,1 +np.float64,0x3fd101e539a203cc,0x3fd16cf52ae6d203,1 +np.float64,0xbfc51e9ba72a3d38,0xbfc5507d00521ba3,1 +np.float64,0x3fe5fe1683ebfc2e,0x3feaf7dd8b1f92b0,1 +np.float64,0x3fc362e59126c5c8,0x3fc389601814170b,1 +np.float64,0x3fea34dbd77469b8,0x3ff27542eb721e7e,1 +np.float64,0xbfc13ed241227da4,0xbfc159d42c0a35a9,1 +np.float64,0xbfe6df118cedbe23,0xbfecb27bb5d3f784,1 +np.float64,0x3fd92895f6b2512c,0x3fda96f5f94b625e,1 +np.float64,0xbfe7ea3aa76fd476,0xbfeef0e93939086e,1 +np.float64,0xbfc855498330aa94,0xbfc8a1ff690c9533,1 +np.float64,0x3fd9f27b3ab3e4f8,0x3fdb8726979afc3b,1 +np.float64,0x3fc65d52232cbaa8,0x3fc698ac4367afba,1 +np.float64,0x3fd1271dd0a24e3c,0x3fd195087649d54e,1 +np.float64,0xbfe983445df30689,0xbff175158b773b90,1 +np.float64,0xbfe0d9b13261b362,0xbfe2bb8908fc9e6e,1 +np.float64,0x3fd7671f2aaece40,0x3fd889dccbf21629,1 +np.float64,0x3fe748aebfee915e,0x3fed8e970d94c17d,1 +np.float64,0x3fea756e4e74eadc,0x3ff2d947ef3a54f4,1 +np.float64,0x3fde22311cbc4464,0x3fe05b4ce9df1fdd,1 +np.float64,0x3fe2b55ec1e56abe,0x3fe56c6849e3985a,1 +np.float64,0x3fed7b47437af68e,0x3ff98f8e82de99a0,1 +np.float64,0x3fec8184b179030a,0x3ff6d03aaf0135ba,1 +np.float64,0x3fc9ea825533d508,0x3fca4776d7190e71,1 +np.float64,0xbfe8ddd58b71bbab,0xbff09b770ed7bc9a,1 +np.float64,0xbfed41741bfa82e8,0xbff8d81c2a9fc615,1 +np.float64,0x3fe0a73888e14e72,0x3fe27602ad9a3726,1 +np.float64,0xbfe9d0a565f3a14b,0xbff1e1897b628f66,1 +np.float64,0x3fda12b381b42568,0x3fdbadbec22fbd5a,1 +np.float64,0x3fef0081187e0102,0x4000949eff8313c2,1 +np.float64,0x3fef6942b67ed286,0x4002b7913eb1ee76,1 +np.float64,0x3fda10f882b421f0,0x3fdbababa2d6659d,1 +np.float64,0x3fe5828971eb0512,0x3fea122b5088315a,1 +np.float64,0x3fe9d4b53ff3a96a,0x3ff1e75c148bda01,1 +np.float64,0x3fe95d246bf2ba48,0x3ff1414a61a136ec,1 +np.float64,0x3f9e575eb83caec0,0x3f9e59a4f17179e3,1 +np.float64,0x3fdb0a20b5b61440,0x3fdcd8a56178a17f,1 +np.float64,0xbfdef425e3bde84c,0xbfe0e33eeacf3861,1 +np.float64,0x3fd6afcf6bad5fa0,0x3fd7b73d47288347,1 +np.float64,0x3fe89256367124ac,0x3ff03dd9f36ce40e,1 +np.float64,0x3fe7e560fcefcac2,0x3feee5ef8688b60b,1 +np.float64,0x3fedef55e1fbdeac,0x3ffb350ee1df986b,1 +np.float64,0xbfe44b926de89725,0xbfe7f3539910c41f,1 +np.float64,0x3fc58310f32b0620,0x3fc5b7cfdba15bd0,1 +np.float64,0x3f736d256026da00,0x3f736d2eebe91a90,1 +np.float64,0x3feb012d2076025a,0x3ff3c0b5d21a7259,1 +np.float64,0xbfe466a6c468cd4e,0xbfe820c9c197601f,1 +np.float64,0x3fe1aba8aa635752,0x3fe3e3b73920f64c,1 +np.float64,0x3fe5597c336ab2f8,0x3fe9c7bc4b765b15,1 +np.float64,0x3fe1004ac5e20096,0x3fe2f12116e99821,1 +np.float64,0x3fecbc67477978ce,0x3ff76377434dbdad,1 +np.float64,0x3fe0e64515e1cc8a,0x3fe2ccf5447c1579,1 +np.float64,0x3febcfa874f79f50,0x3ff54528f0822144,1 +np.float64,0x3fc36915ed26d228,0x3fc38fb5b28d3f72,1 +np.float64,0xbfe01213e5e02428,0xbfe1ac0e1e7418f1,1 +np.float64,0x3fcd97875b3b2f10,0x3fce22fe3fc98702,1 +np.float64,0xbfe30383c5e60708,0xbfe5e427e62cc957,1 +np.float64,0xbfde339bf9bc6738,0xbfe0667f337924f5,1 +np.float64,0xbfda7c1c49b4f838,0xbfdc2c8801ce654a,1 +np.float64,0x3fb6b3489e2d6690,0x3fb6c29650387b92,1 +np.float64,0xbfe1fd4d76e3fa9b,0xbfe45a1f60077678,1 +np.float64,0xbf67c5e0402f8c00,0xbf67c5e49fce115a,1 +np.float64,0xbfd4f9aa2da9f354,0xbfd5c759603d0b9b,1 +np.float64,0x3fe83c227bf07844,0x3fefada9f1bd7fa9,1 +np.float64,0xbf97f717982fee20,0xbf97f836701a8cd5,1 +np.float64,0x3fe9688a2472d114,0x3ff150aa575e7d51,1 +np.float64,0xbfc5a9779d2b52f0,0xbfc5df56509c48b1,1 +np.float64,0xbfe958d5f472b1ac,0xbff13b813f9bee20,1 +np.float64,0xbfd7b3b944af6772,0xbfd8e276c2b2920f,1 +np.float64,0x3fed10198e7a2034,0x3ff8469c817572f0,1 +np.float64,0xbfeeecc4517dd989,0xc000472b1f858be3,1 +np.float64,0xbfdbcce47eb799c8,0xbfddc734aa67812b,1 +np.float64,0xbfd013ee24a027dc,0xbfd06df3089384ca,1 +np.float64,0xbfd215f2bfa42be6,0xbfd29774ffe26a74,1 +np.float64,0x3fdfd0ae67bfa15c,0x3fe1746e3a963a9f,1 +np.float64,0xbfc84aa10b309544,0xbfc896f0d25b723a,1 +np.float64,0xbfcd0c627d3a18c4,0xbfcd9024c73747a9,1 +np.float64,0x3fd87df6dbb0fbec,0x3fd9ce1dde757f31,1 +np.float64,0xbfdad85e05b5b0bc,0xbfdc9c2addb6ce47,1 +np.float64,0xbfee4f8977fc9f13,0xbffcdccd68e514b3,1 +np.float64,0x3fa5c290542b8520,0x3fa5c5ebdf09ca70,1 +np.float64,0xbfd7e401d2afc804,0xbfd91a7e4eb5a026,1 +np.float64,0xbfe33ff73b667fee,0xbfe6423cc6eb07d7,1 +np.float64,0x3fdfb7d6c4bf6fac,0x3fe163f2e8175177,1 +np.float64,0xbfd515d69eaa2bae,0xbfd5e6eedd6a1598,1 +np.float64,0x3fb322232e264440,0x3fb32b49d91c3cbe,1 +np.float64,0xbfe20ac39e641587,0xbfe46dd4b3803f19,1 +np.float64,0x3fe282dc18e505b8,0x3fe520152120c297,1 +np.float64,0xbfc905a4cd320b48,0xbfc95929b74865fb,1 +np.float64,0x3fe0ae3b83615c78,0x3fe27fa1dafc825b,1 +np.float64,0xbfc1bfed0f237fdc,0xbfc1dd6466225cdf,1 +np.float64,0xbfeca4d47d7949a9,0xbff72761a34fb682,1 +np.float64,0xbfe8cf8c48f19f18,0xbff0897ebc003626,1 +np.float64,0xbfe1aaf0a36355e2,0xbfe3e2ae7b17a286,1 +np.float64,0x3fe2ca442e659488,0x3fe58c3a2fb4f14a,1 +np.float64,0xbfda3c2deeb4785c,0xbfdbdf89fe96a243,1 +np.float64,0xbfdc12bfecb82580,0xbfde1d81dea3c221,1 +np.float64,0xbfe2d6d877e5adb1,0xbfe59f73e22c1fc7,1 +np.float64,0x3fe5f930636bf260,0x3feaee96a462e4de,1 +np.float64,0x3fcf3c0ea53e7820,0x3fcfe0b0f92be7e9,1 +np.float64,0xbfa5bb90f42b7720,0xbfa5bee9424004cc,1 +np.float64,0xbfe2fb3a3265f674,0xbfe5d75b988bb279,1 +np.float64,0x3fcaec7aab35d8f8,0x3fcb54ea582fff6f,1 +np.float64,0xbfd8d3228db1a646,0xbfda322297747fbc,1 +np.float64,0x3fedd2e0ad7ba5c2,0x3ffac6002b65c424,1 +np.float64,0xbfd9edeca2b3dbda,0xbfdb81b2b7785e33,1 +np.float64,0xbfef5febb17ebfd7,0xc002796b15950960,1 +np.float64,0x3fde22f787bc45f0,0x3fe05bcc624b9ba2,1 +np.float64,0xbfc716a4ab2e2d48,0xbfc758073839dd44,1 +np.float64,0xbf9bed852837db00,0xbf9bef4b2a3f3bdc,1 +np.float64,0x3fef8f88507f1f10,0x4003e5e566444571,1 +np.float64,0xbfdc1bbed6b8377e,0xbfde28a64e174e60,1 +np.float64,0x3fe02d30eae05a62,0x3fe1d064ec027cd3,1 +np.float64,0x3fd9dbb500b3b76c,0x3fdb6bea40162279,1 +np.float64,0x3fe353ff1d66a7fe,0x3fe661b3358c925e,1 +np.float64,0x3fac3ebfb4387d80,0x3fac4618effff2b0,1 +np.float64,0x3fe63cf0ba6c79e2,0x3feb7030cff5f434,1 +np.float64,0x3fd0e915f8a1d22c,0x3fd152464597b510,1 +np.float64,0xbfd36987cda6d310,0xbfd40af049d7621e,1 +np.float64,0xbfdc5b4dc7b8b69c,0xbfde7790a35da2bc,1 +np.float64,0x3feee7ff4a7dcffe,0x40003545989e07c7,1 +np.float64,0xbfeb2c8308765906,0xbff40d2e6469249e,1 +np.float64,0x3fe535a894ea6b52,0x3fe98781648550d0,1 +np.float64,0xbfef168eb9fe2d1d,0xc000f274ed3cd312,1 +np.float64,0x3fc3e2d98927c5b0,0x3fc40c6991b8900c,1 +np.float64,0xbfcd8fe3e73b1fc8,0xbfce1aec7f9b7f7d,1 +np.float64,0xbfd55d8c3aaabb18,0xbfd6378132ee4892,1 +np.float64,0xbfe424a66168494d,0xbfe7b289d72c98b3,1 +np.float64,0x3fd81af13eb035e4,0x3fd95a6a9696ab45,1 +np.float64,0xbfe3016722e602ce,0xbfe5e0e46db228cd,1 +np.float64,0x3fe9a20beff34418,0x3ff19faca17fc468,1 +np.float64,0xbfe2124bc7e42498,0xbfe478e19927e723,1 +np.float64,0x3fd96f8622b2df0c,0x3fdaeb08da6b08ae,1 +np.float64,0x3fecd6796579acf2,0x3ff7a7d02159e181,1 +np.float64,0x3fe60015df6c002c,0x3feafba6f2682a61,1 +np.float64,0x3fc7181cf72e3038,0x3fc7598c2cc3c3b4,1 +np.float64,0xbfce6e2e0b3cdc5c,0xbfcf0621b3e37115,1 +np.float64,0xbfe52a829e6a5505,0xbfe973a785980af9,1 +np.float64,0x3fed4bbac37a9776,0x3ff8f7a0e68a2bbe,1 +np.float64,0x3fabdfaacc37bf60,0x3fabe6bab42bd246,1 +np.float64,0xbfcd9598cb3b2b30,0xbfce20f3c4c2c261,1 +np.float64,0x3fd717d859ae2fb0,0x3fd82e88eca09ab1,1 +np.float64,0x3fe28ccb18e51996,0x3fe52f071d2694fd,1 +np.float64,0xbfe43f064ae87e0c,0xbfe7de5eab36b5b9,1 +np.float64,0x7fefffffffffffff,0xfff8000000000000,1 +np.float64,0xbfb39b045a273608,0xbfb3a4dd3395fdd5,1 +np.float64,0xbfb3358bae266b18,0xbfb33ece5e95970a,1 +np.float64,0xbfeeafb6717d5f6d,0xbffeec3f9695b575,1 +np.float64,0xbfe7a321afef4644,0xbfee522dd80f41f4,1 +np.float64,0x3fe3a17e5be742fc,0x3fe6dcd32af51e92,1 +np.float64,0xbfc61694bd2c2d28,0xbfc64fbbd835f6e7,1 +np.float64,0xbfd795906faf2b20,0xbfd8bf89b370655c,1 +np.float64,0xbfe4b39b59e96736,0xbfe8a3c5c645b6e3,1 +np.float64,0x3fd310af3ba62160,0x3fd3a9442e825e1c,1 +np.float64,0xbfd45198a6a8a332,0xbfd50bc10311a0a3,1 +np.float64,0x3fd0017eaaa002fc,0x3fd05a472a837999,1 +np.float64,0xbfea974d98752e9b,0xbff30f67f1835183,1 +np.float64,0xbf978f60582f1ec0,0xbf979070e1c2b59d,1 +np.float64,0x3fe1c715d4e38e2c,0x3fe40b479e1241a2,1 +np.float64,0xbfccb965cd3972cc,0xbfcd38b40c4a352d,1 +np.float64,0xbfd9897048b312e0,0xbfdb09d55624c2a3,1 +np.float64,0x3fe7f5de4befebbc,0x3fef0b56be259f9c,1 +np.float64,0x3fcc6c6d4338d8d8,0x3fcce7b20ed68a78,1 +np.float64,0xbfe63884046c7108,0xbfeb67a3b945c3ee,1 +np.float64,0xbfce64e2ad3cc9c4,0xbfcefc47fae2e81f,1 +np.float64,0x3fefeb57b27fd6b0,0x400ab2eac6321cfb,1 +np.float64,0x3fe679627e6cf2c4,0x3febe6451b6ee0c4,1 +np.float64,0x3fc5f710172bee20,0x3fc62f40f85cb040,1 +np.float64,0x3fc34975e52692e8,0x3fc36f58588c7fa2,1 +np.float64,0x3fe8a3784cf146f0,0x3ff052ced9bb9406,1 +np.float64,0x3fd11a607ca234c0,0x3fd1874f876233fe,1 +np.float64,0x3fb2d653f625aca0,0x3fb2df0f4c9633f3,1 +np.float64,0x3fe555f39eeaabe8,0x3fe9c15ee962a28c,1 +np.float64,0xbfea297e3bf452fc,0xbff264107117f709,1 +np.float64,0x3fe1581cdde2b03a,0x3fe36c79acedf99c,1 +np.float64,0x3fd4567063a8ace0,0x3fd51123dbd9106f,1 +np.float64,0x3fa3883aec271080,0x3fa38aa86ec71218,1 +np.float64,0x3fe40e5d7de81cba,0x3fe78dbb9b568850,1 +np.float64,0xbfe9a2f7347345ee,0xbff1a0f4faa05041,1 +np.float64,0x3f9eef03a83dde00,0x3f9ef16caa0c1478,1 +np.float64,0xbfcb4641d1368c84,0xbfcbb2e7ff8c266d,1 +np.float64,0xbfa8403b2c308070,0xbfa844e148b735b7,1 +np.float64,0xbfe1875cd6e30eba,0xbfe3afadc08369f5,1 +np.float64,0xbfdd3c3d26ba787a,0xbfdf919b3e296766,1 +np.float64,0x3fcd6c4c853ad898,0x3fcdf55647b518b8,1 +np.float64,0xbfe360a173e6c143,0xbfe6759eb3a08cf2,1 +np.float64,0x3fe5a13147eb4262,0x3fea4a5a060f5adb,1 +np.float64,0x3feb3cdd7af679ba,0x3ff42aae0cf61234,1 +np.float64,0x3fe5205128ea40a2,0x3fe9618f3d0c54af,1 +np.float64,0x3fce35343f3c6a68,0x3fcec9c4e612b050,1 +np.float64,0xbfc345724d268ae4,0xbfc36b3ce6338e6a,1 +np.float64,0x3fedc4fc0e7b89f8,0x3ffa91c1d775c1f7,1 +np.float64,0x3fe41fbf21683f7e,0x3fe7aa6c174a0e65,1 +np.float64,0xbfc7a1a5d32f434c,0xbfc7e7d27a4c5241,1 +np.float64,0x3fd3e33eaca7c67c,0x3fd4915264441e2f,1 +np.float64,0x3feb3f02f6f67e06,0x3ff42e942249e596,1 +np.float64,0x3fdb75fcb0b6ebf8,0x3fdd5c63f98b6275,1 +np.float64,0x3fd6476603ac8ecc,0x3fd74020b164cf38,1 +np.float64,0x3fed535372faa6a6,0x3ff90f3791821841,1 +np.float64,0x3fe8648ead70c91e,0x3ff006a62befd7ed,1 +np.float64,0x3fd0f90760a1f210,0x3fd1636b39bb1525,1 +np.float64,0xbfca052443340a48,0xbfca633d6e777ae0,1 +np.float64,0xbfa6a5e3342d4bc0,0xbfa6a9ac6a488f5f,1 +np.float64,0x3fd5598038aab300,0x3fd632f35c0c3d52,1 +np.float64,0xbfdf66218fbecc44,0xbfe12df83b19f300,1 +np.float64,0x3fe78e15b56f1c2c,0x3fee240d12489cd1,1 +np.float64,0x3fe3d6a7b3e7ad50,0x3fe7329dcf7401e2,1 +np.float64,0xbfddb8e97bbb71d2,0xbfe017ed6d55a673,1 +np.float64,0xbfd57afd55aaf5fa,0xbfd658a9607c3370,1 +np.float64,0xbfdba4c9abb74994,0xbfdd95d69e5e8814,1 +np.float64,0xbfe71d8090ee3b01,0xbfed3390be6d2eef,1 +np.float64,0xbfc738ac0f2e7158,0xbfc77b3553b7c026,1 +np.float64,0x3f873656302e6c80,0x3f873697556ae011,1 +np.float64,0x3fe559491d6ab292,0x3fe9c7603b12c608,1 +np.float64,0xbfe262776864c4ef,0xbfe4ef905dda8599,1 +np.float64,0x3fe59d8917eb3b12,0x3fea439f44b7573f,1 +np.float64,0xbfd4b5afb5a96b60,0xbfd57b4e3df4dbc8,1 +np.float64,0x3fe81158447022b0,0x3fef4a3cea3eb6a9,1 +np.float64,0xbfeb023441f60468,0xbff3c27f0fc1a4dc,1 +np.float64,0x3fefb212eaff6426,0x40055fc6d949cf44,1 +np.float64,0xbfe1300ac1e26016,0xbfe333f297a1260e,1 +np.float64,0xbfeae0a2f575c146,0xbff388d58c380b8c,1 +np.float64,0xbfeddd8e55fbbb1d,0xbffaef045b2e21d9,1 +np.float64,0x3fec7c6c1d78f8d8,0x3ff6c3ebb019a8e5,1 +np.float64,0xbfe27e071f64fc0e,0xbfe518d2ff630f33,1 +np.float64,0x8000000000000001,0x8000000000000001,1 +np.float64,0x3fc5872abf2b0e58,0x3fc5bc083105db76,1 +np.float64,0x3fe65114baeca22a,0x3feb9745b82ef15a,1 +np.float64,0xbfc783abe52f0758,0xbfc7c8cb23f93e79,1 +np.float64,0x3fe4b7a5dd696f4c,0x3fe8aab9d492f0ca,1 +np.float64,0xbf91a8e8a82351e0,0xbf91a95b6ae806f1,1 +np.float64,0xbfee482eb77c905d,0xbffcb952830e715a,1 +np.float64,0x3fba0eee2a341de0,0x3fba261d495e3a1b,1 +np.float64,0xbfeb8876ae7710ed,0xbff4b7f7f4343506,1 +np.float64,0xbfe4d29e46e9a53c,0xbfe8d9547a601ba7,1 +np.float64,0xbfe12413b8e24828,0xbfe3232656541d10,1 +np.float64,0x3fc0bd8f61217b20,0x3fc0d63f937f0aa4,1 +np.float64,0xbfd3debafda7bd76,0xbfd48c534e5329e4,1 +np.float64,0x3fc0f92de921f258,0x3fc112eb7d47349b,1 +np.float64,0xbfe576b95f6aed72,0xbfe9fca859239b3c,1 +np.float64,0x3fd10e520da21ca4,0x3fd17a546e4152f7,1 +np.float64,0x3fcef917eb3df230,0x3fcf998677a8fa8f,1 +np.float64,0x3fdfcf863abf9f0c,0x3fe173a98af1cb13,1 +np.float64,0x3fc28c4b4f251898,0x3fc2adf43792e917,1 +np.float64,0x3fceb837ad3d7070,0x3fcf54a63b7d8c5c,1 +np.float64,0x3fc0140a05202818,0x3fc029e4f75330cb,1 +np.float64,0xbfd76c3362aed866,0xbfd88fb9e790b4e8,1 +np.float64,0xbfe475300868ea60,0xbfe8395334623e1f,1 +np.float64,0x3fea70b9b4f4e174,0x3ff2d1dad92173ba,1 +np.float64,0xbfe2edbd4965db7a,0xbfe5c29449a9365d,1 +np.float64,0xbfddf86f66bbf0de,0xbfe0408439cada9b,1 +np.float64,0xbfb443cdfa288798,0xbfb44eae796ad3ea,1 +np.float64,0xbf96a8a0482d5140,0xbf96a992b6ef073b,1 +np.float64,0xbfd279db2fa4f3b6,0xbfd3043db6acbd9e,1 +np.float64,0x3fe5d99088ebb322,0x3feab30be14e1605,1 +np.float64,0xbfe1a917abe35230,0xbfe3e0063d0f5f63,1 +np.float64,0x3fc77272f52ee4e8,0x3fc7b6f8ab6f4591,1 +np.float64,0x3fd6b62146ad6c44,0x3fd7be77eef8390a,1 +np.float64,0xbfe39fd9bc673fb4,0xbfe6da30dc4eadde,1 +np.float64,0x3fe35545c066aa8c,0x3fe663b5873e4d4b,1 +np.float64,0xbfcbbeffb3377e00,0xbfcc317edf7f6992,1 +np.float64,0xbfe28a58366514b0,0xbfe52b5734579ffa,1 +np.float64,0xbfbf0c87023e1910,0xbfbf33d970a0dfa5,1 +np.float64,0xbfd31144cba6228a,0xbfd3a9e84f9168f9,1 +np.float64,0xbfe5c044056b8088,0xbfea83d607c1a88a,1 +np.float64,0x3fdaabdf18b557c0,0x3fdc663ee8eddc83,1 +np.float64,0xbfeb883006f71060,0xbff4b76feff615be,1 +np.float64,0xbfebaef41d775de8,0xbff5034111440754,1 +np.float64,0x3fd9b6eb3bb36dd8,0x3fdb3fff5071dacf,1 +np.float64,0x3fe4e33c45e9c678,0x3fe8f637779ddedf,1 +np.float64,0x3fe52213a06a4428,0x3fe964adeff5c14e,1 +np.float64,0x3fe799254cef324a,0x3fee3c3ecfd3cdc5,1 +np.float64,0x3fd0533f35a0a680,0x3fd0b19a003469d3,1 +np.float64,0x3fec7ef5c7f8fdec,0x3ff6ca0abe055048,1 +np.float64,0xbfd1b5da82a36bb6,0xbfd22f357acbee79,1 +np.float64,0xbfd8f9c652b1f38c,0xbfda5faacbce9cf9,1 +np.float64,0x3fc8fc818b31f900,0x3fc94fa9a6aa53c8,1 +np.float64,0x3fcf42cc613e8598,0x3fcfe7dc128f33f2,1 +np.float64,0x3fd393a995a72754,0x3fd4396127b19305,1 +np.float64,0x3fec7b7df9f8f6fc,0x3ff6c1ae51753ef2,1 +np.float64,0x3fc07f175b20fe30,0x3fc096b55c11568c,1 +np.float64,0xbf979170082f22e0,0xbf979280d9555f44,1 +np.float64,0xbfb9d110c633a220,0xbfb9e79ba19b3c4a,1 +np.float64,0x3fedcd7d417b9afa,0x3ffab19734e86d58,1 +np.float64,0xbfec116f27f822de,0xbff5cf9425cb415b,1 +np.float64,0xbfec4fa0bef89f42,0xbff65a771982c920,1 +np.float64,0x3f94d4452829a880,0x3f94d501789ad11c,1 +np.float64,0xbfefe5ede27fcbdc,0xc009c440d3c2a4ce,1 +np.float64,0xbfe7e5f7b5efcbf0,0xbfeee74449aee1db,1 +np.float64,0xbfeb71dc8976e3b9,0xbff48cd84ea54ed2,1 +np.float64,0xbfe4cdb65f699b6c,0xbfe8d0d3bce901ef,1 +np.float64,0x3fb78ef1ee2f1de0,0x3fb7a00e7d183c48,1 +np.float64,0x3fb681864a2d0310,0x3fb6906fe64b4cd7,1 +np.float64,0xbfd2ad3b31a55a76,0xbfd33c57b5985399,1 +np.float64,0x3fdcdaaa95b9b554,0x3fdf16b99628db1e,1 +np.float64,0x3fa4780b7428f020,0x3fa47ad6ce9b8081,1 +np.float64,0x3fc546b0ad2a8d60,0x3fc579b361b3b18f,1 +np.float64,0x3feaf98dd6f5f31c,0x3ff3b38189c3539c,1 +np.float64,0x3feb0b2eca76165e,0x3ff3d22797083f9a,1 +np.float64,0xbfdc02ae3ab8055c,0xbfde099ecb5dbacf,1 +np.float64,0x3fd248bf17a49180,0x3fd2ceb77b346d1d,1 +np.float64,0x3fe349d666e693ac,0x3fe651b9933a8853,1 +np.float64,0xbfca526fc534a4e0,0xbfcab3e83f0d9b93,1 +np.float64,0x3fc156421722ac88,0x3fc171b38826563b,1 +np.float64,0xbfe4244569e8488b,0xbfe7b1e93e7d4f92,1 +np.float64,0x3fe010faabe021f6,0x3fe1aa961338886d,1 +np.float64,0xbfc52dacb72a5b58,0xbfc55ffa50eba380,1 +np.float64,0x8000000000000000,0x8000000000000000,1 +np.float64,0x3fea1d4865f43a90,0x3ff251b839eb4817,1 +np.float64,0xbfa0f65c8421ecc0,0xbfa0f7f37c91be01,1 +np.float64,0x3fcab29c0b356538,0x3fcb1863edbee184,1 +np.float64,0x3fe7949162ef2922,0x3fee323821958b88,1 +np.float64,0x3fdaf9288ab5f250,0x3fdcc400190a4839,1 +np.float64,0xbfe13ece6be27d9d,0xbfe348ba07553179,1 +np.float64,0x3f8a0c4fd0341880,0x3f8a0cabdf710185,1 +np.float64,0x3fdd0442a2ba0884,0x3fdf4b016c4da452,1 +np.float64,0xbfaf06d2343e0da0,0xbfaf1090b1600422,1 +np.float64,0xbfd3b65225a76ca4,0xbfd45fa49ae76cca,1 +np.float64,0x3fef5d75fefebaec,0x400269a5e7c11891,1 +np.float64,0xbfe048e35ce091c6,0xbfe1f5af45dd64f8,1 +np.float64,0xbfe27d4599e4fa8b,0xbfe517b07843d04c,1 +np.float64,0xbfe6f2a637ede54c,0xbfecdaa730462576,1 +np.float64,0x3fc63fbb752c7f78,0x3fc67a2854974109,1 +np.float64,0x3fedda6bfbfbb4d8,0x3ffae2e6131f3475,1 +np.float64,0x3fe7a6f5286f4dea,0x3fee5a9b1ef46016,1 +np.float64,0xbfd4ea8bcea9d518,0xbfd5b66ab7e5cf00,1 +np.float64,0x3fdc116568b822cc,0x3fde1bd4d0d9fd6c,1 +np.float64,0x3fdc45cb1bb88b98,0x3fde5cd1d2751032,1 +np.float64,0x3feabd932f757b26,0x3ff34e06e56a62a1,1 +np.float64,0xbfae5dbe0c3cbb80,0xbfae66e062ac0d65,1 +np.float64,0xbfdb385a00b670b4,0xbfdd10fedf3a58a7,1 +np.float64,0xbfebb14755f7628f,0xbff507e123a2b47c,1 +np.float64,0x3fe6de2fdfedbc60,0x3fecb0ae6e131da2,1 +np.float64,0xbfd86de640b0dbcc,0xbfd9bb4dbf0bf6af,1 +np.float64,0x3fe39e86d9e73d0e,0x3fe6d811c858d5d9,1 +np.float64,0x7ff0000000000000,0xfff8000000000000,1 +np.float64,0x3fa8101684302020,0x3fa814a12176e937,1 +np.float64,0x3fefdd5ad37fbab6,0x4008a08c0b76fbb5,1 +np.float64,0x3fe645c727ec8b8e,0x3feb814ebc470940,1 +np.float64,0x3fe3ba79dce774f4,0x3fe70500db564cb6,1 +np.float64,0xbfe0e5a254e1cb44,0xbfe2cc13940c6d9a,1 +np.float64,0x3fe2cac62465958c,0x3fe58d008c5e31f8,1 +np.float64,0xbfd3ffb531a7ff6a,0xbfd4b0d88cff2040,1 +np.float64,0x3fe0929104612522,0x3fe259bc42dce788,1 +np.float64,0x1,0x1,1 +np.float64,0xbfe7db77e6efb6f0,0xbfeecf93e8a61cb3,1 +np.float64,0xbfe37e9559e6fd2a,0xbfe6a514e29cb7aa,1 +np.float64,0xbfc53a843f2a7508,0xbfc56d2e9ad8b716,1 +np.float64,0xbfedb04485fb6089,0xbffa4615d4334ec3,1 +np.float64,0xbfc44349b1288694,0xbfc46f484b6f1cd6,1 +np.float64,0xbfe265188264ca31,0xbfe4f37d61cd9e17,1 +np.float64,0xbfd030351da0606a,0xbfd08c2537287ee1,1 +np.float64,0x3fd8fb131db1f628,0x3fda613363ca601e,1 +np.float64,0xbff0000000000000,0xfff0000000000000,1 +np.float64,0xbfe48d9a60691b35,0xbfe862c02d8fec1e,1 +np.float64,0x3fd185e050a30bc0,0x3fd1fb4c614ddb07,1 +np.float64,0xbfe4a5807e694b01,0xbfe88b8ff2d6caa7,1 +np.float64,0xbfc934d7ad3269b0,0xbfc98a405d25a666,1 +np.float64,0xbfea0e3c62741c79,0xbff23b4bd3a7b15d,1 +np.float64,0x3fe7244071ee4880,0x3fed41b27ba6bb22,1 +np.float64,0xbfd419f81ba833f0,0xbfd4cdf71b4533a3,1 +np.float64,0xbfe1e73a34e3ce74,0xbfe439eb15fa6baf,1 +np.float64,0x3fcdd9a63f3bb350,0x3fce68e1c401eff0,1 +np.float64,0x3fd1b5960ba36b2c,0x3fd22eeb566f1976,1 +np.float64,0x3fe9ad18e0735a32,0x3ff1af23c534260d,1 +np.float64,0xbfd537918aaa6f24,0xbfd60ccc8df0962b,1 +np.float64,0x3fcba3d3c73747a8,0x3fcc14fd5e5c49ad,1 +np.float64,0x3fd367e3c0a6cfc8,0x3fd40921b14e288e,1 +np.float64,0x3fe94303c6f28608,0x3ff11e62db2db6ac,1 +np.float64,0xbfcc5f77fd38bef0,0xbfccda110c087519,1 +np.float64,0xbfd63b74d7ac76ea,0xbfd7328af9f37402,1 +np.float64,0xbfe5321289ea6425,0xbfe9811ce96609ad,1 +np.float64,0xbfde910879bd2210,0xbfe0a2cd0ed1d368,1 +np.float64,0xbfcc9d9bad393b38,0xbfcd1b722a0b1371,1 +np.float64,0xbfe6dd39e16dba74,0xbfecaeb7c8c069f6,1 +np.float64,0xbfe98316eff3062e,0xbff174d7347d48bf,1 +np.float64,0xbfda88f8d1b511f2,0xbfdc3c0e75dad903,1 +np.float64,0x3fd400d8c2a801b0,0x3fd4b21bacff1f5d,1 +np.float64,0xbfe1ed335863da66,0xbfe4429e45e99779,1 +np.float64,0xbf3423a200284800,0xbf3423a20acb0342,1 +np.float64,0xbfe97bc59672f78b,0xbff16ad1adc44a33,1 +np.float64,0xbfeeca60d7fd94c2,0xbfff98d7f18f7728,1 +np.float64,0x3fd1eb13b2a3d628,0x3fd268e6ff4d56ce,1 +np.float64,0xbfa5594c242ab2a0,0xbfa55c77d6740a39,1 +np.float64,0x3fe72662006e4cc4,0x3fed462a9dedbfee,1 +np.float64,0x3fef4bb221fe9764,0x4001fe4f4cdfedb2,1 +np.float64,0xbfe938d417f271a8,0xbff110e78724ca2b,1 +np.float64,0xbfcc29ab2f385358,0xbfcca182140ef541,1 +np.float64,0x3fe18cd42c6319a8,0x3fe3b77e018165e7,1 +np.float64,0xbfec6c5cae78d8b9,0xbff69d8e01309b48,1 +np.float64,0xbfd5723da7aae47c,0xbfd64ecde17da471,1 +np.float64,0xbfe3096722e612ce,0xbfe5ed43634f37ff,1 +np.float64,0xbfdacaceb1b5959e,0xbfdc8bb826bbed39,1 +np.float64,0x3fc59a57cb2b34b0,0x3fc5cfc4a7c9bac8,1 +np.float64,0x3f84adce10295b80,0x3f84adfc1f1f6e97,1 +np.float64,0x3fdd5b28bbbab650,0x3fdfb8b906d77df4,1 +np.float64,0x3fdebf94c6bd7f28,0x3fe0c10188e1bc7c,1 +np.float64,0x3fdb30c612b6618c,0x3fdd07bf18597821,1 +np.float64,0x3fe7eeb3176fdd66,0x3feefb0be694b855,1 +np.float64,0x0,0x0,1 +np.float64,0xbfe10057e9e200b0,0xbfe2f13365e5b1c9,1 +np.float64,0xbfeb61a82376c350,0xbff46e665d3a60f5,1 +np.float64,0xbfe7f54aec6fea96,0xbfef0a0759f726dc,1 +np.float64,0xbfe4f6da3de9edb4,0xbfe9187d85bd1ab5,1 +np.float64,0xbfeb8be1b3f717c4,0xbff4be8efaab2e75,1 +np.float64,0x3fed40bc31fa8178,0x3ff8d5ec4a7f3e9b,1 +np.float64,0xbfe40f8711681f0e,0xbfe78fa5c62b191b,1 +np.float64,0x3fd1034d94a2069c,0x3fd16e78e9efb85b,1 +np.float64,0x3fc74db15b2e9b60,0x3fc790f26e894098,1 +np.float64,0x3fd912a88cb22550,0x3fda7d0ab3b21308,1 +np.float64,0x3fd8948a3bb12914,0x3fd9e8950c7874c8,1 +np.float64,0xbfa7ada5242f5b50,0xbfa7b1f8db50c104,1 +np.float64,0x3feeb2e1c27d65c4,0x3fff000b7d09c9b7,1 +np.float64,0x3fe9d46cbbf3a8da,0x3ff1e6f405265a6e,1 +np.float64,0xbfe2480b77e49017,0xbfe4c83b9b37bf0c,1 +np.float64,0x3fe950ea9372a1d6,0x3ff130e62468bf2c,1 +np.float64,0x3fefa7272a7f4e4e,0x4004d8c9bf31ab58,1 +np.float64,0xbfe7309209ee6124,0xbfed5b94acef917a,1 +np.float64,0x3fd05e8c64a0bd18,0x3fd0bdb11e0903c6,1 +np.float64,0x3fd9236043b246c0,0x3fda90ccbe4bab1e,1 +np.float64,0xbfdc3d6805b87ad0,0xbfde5266e17154c3,1 +np.float64,0x3fe5e6bad76bcd76,0x3feacbc306c63445,1 +np.float64,0x3ff0000000000000,0x7ff0000000000000,1 +np.float64,0xbfde3d7390bc7ae8,0xbfe06cd480bd0196,1 +np.float64,0xbfd3e2e3c0a7c5c8,0xbfd490edc0a45e26,1 +np.float64,0x3fe39871d76730e4,0x3fe6ce54d1719953,1 +np.float64,0x3fdff00ebcbfe01c,0x3fe1894b6655a6d0,1 +np.float64,0x3f91b7ad58236f40,0x3f91b8213bcb8b0b,1 +np.float64,0xbfd99f48f7b33e92,0xbfdb23d544f62591,1 +np.float64,0x3fae3512cc3c6a20,0x3fae3e10939fd7b5,1 +np.float64,0x3fcc4cf3db3899e8,0x3fccc698a15176d6,1 +np.float64,0xbfd0927e39a124fc,0xbfd0f5522e2bc030,1 +np.float64,0x3fcee859633dd0b0,0x3fcf87bdef7a1e82,1 +np.float64,0xbfe2a8b69565516d,0xbfe5593437b6659a,1 +np.float64,0x3fecf61e20f9ec3c,0x3ff7fda16b0209d4,1 +np.float64,0xbfbf37571e3e6eb0,0xbfbf5f4e1379a64c,1 +np.float64,0xbfd54e1b75aa9c36,0xbfd626223b68971a,1 +np.float64,0x3fe1035a56e206b4,0x3fe2f5651ca0f4b0,1 +np.float64,0x3fe4992989e93254,0x3fe876751afa70dc,1 +np.float64,0x3fc8c313d3318628,0x3fc913faf15d1562,1 +np.float64,0x3f99f6ba8833ed80,0x3f99f8274fb94828,1 +np.float64,0xbfd4a58af0a94b16,0xbfd56947c276e04f,1 +np.float64,0x3fc66f8c872cdf18,0x3fc6ab7a14372a73,1 +np.float64,0x3fc41eee0d283de0,0x3fc449ff1ff0e7a6,1 +np.float64,0x3fefd04d287fa09a,0x4007585010cfa9b0,1 +np.float64,0x3fce9e746f3d3ce8,0x3fcf39514bbe5070,1 +np.float64,0xbfe8056f72700adf,0xbfef2ee2c13e67ba,1 +np.float64,0x3fdd6b1ec0bad63c,0x3fdfccf2ba144fa8,1 +np.float64,0x3fd92ee432b25dc8,0x3fda9e6b96b2b142,1 +np.float64,0xbfc4d18f9529a320,0xbfc50150fb4de0cc,1 +np.float64,0xbfe09939a7613274,0xbfe262d703c317af,1 +np.float64,0xbfd130b132a26162,0xbfd19f5a00ae29c4,1 +np.float64,0x3fa06e21d420dc40,0x3fa06f93aba415fb,1 +np.float64,0x3fc5c48fbd2b8920,0x3fc5fb3bfad3bf55,1 +np.float64,0xbfdfa2bacbbf4576,0xbfe155f839825308,1 +np.float64,0x3fe3e1fa0f67c3f4,0x3fe745081dd4fd03,1 +np.float64,0x3fdae58289b5cb04,0x3fdcac1f6789130a,1 +np.float64,0xbf8ed3ba103da780,0xbf8ed452a9cc1442,1 +np.float64,0xbfec06b46f780d69,0xbff5b86f30d70908,1 +np.float64,0xbfe990c13b732182,0xbff187a90ae611f8,1 +np.float64,0xbfdd46c738ba8d8e,0xbfdf9eee0a113230,1 +np.float64,0x3fe08b83f3611708,0x3fe2501b1c77035c,1 +np.float64,0xbfd501b65baa036c,0xbfd5d05de3fceac8,1 +np.float64,0xbfcf4fa21f3e9f44,0xbfcff5829582c0b6,1 +np.float64,0xbfefbc0bfbff7818,0xc005eca1a2c56b38,1 +np.float64,0xbfe1ba6959e374d2,0xbfe3f8f88d128ce5,1 +np.float64,0xbfd4e74ee3a9ce9e,0xbfd5b2cabeb45e6c,1 +np.float64,0xbfe77c38eaeef872,0xbfedfd332d6f1c75,1 +np.float64,0x3fa9b5e4fc336bc0,0x3fa9bb6f6b80b4af,1 +np.float64,0xbfecba63917974c7,0xbff75e44df7f8e81,1 +np.float64,0x3fd6cf17b2ad9e30,0x3fd7db0b93b7f2b5,1 diff --git a/local_packages/numpy/_core/tests/data/umath-validation-set-cbrt.csv b/local_packages/numpy/_core/tests/data/umath-validation-set-cbrt.csv new file mode 100644 index 0000000..ad141cb --- /dev/null +++ b/local_packages/numpy/_core/tests/data/umath-validation-set-cbrt.csv @@ -0,0 +1,1429 @@ +dtype,input,output,ulperrortol +np.float32,0x3ee7054c,0x3f4459ea,2 +np.float32,0x7d1e2489,0x54095925,2 +np.float32,0x7ee5edf5,0x549b992b,2 +np.float32,0x380607,0x2a425e72,2 +np.float32,0x34a8f3,0x2a3e6603,2 +np.float32,0x3eee2844,0x3f465a45,2 +np.float32,0x59e49c,0x2a638d0a,2 +np.float32,0xbf72c77a,0xbf7b83d4,2 +np.float32,0x7f2517b4,0x54af8bf0,2 +np.float32,0x80068a69,0xa9bdfe8b,2 +np.float32,0xbe8e3578,0xbf270775,2 +np.float32,0xbe4224dc,0xbf131119,2 +np.float32,0xbe0053b8,0xbf001be2,2 +np.float32,0x70e8d,0x29c2ddc5,2 +np.float32,0xff63f7b5,0xd4c37b7f,2 +np.float32,0x3f00bbed,0x3f4b9335,2 +np.float32,0x3f135f4e,0x3f54f5d4,2 +np.float32,0xbe13a488,0xbf063d13,2 +np.float32,0x3f14ec78,0x3f55b478,2 +np.float32,0x7ec35cfb,0x54935fbf,2 +np.float32,0x7d41c589,0x5412f904,2 +np.float32,0x3ef8a16e,0x3f4937f7,2 +np.float32,0x3f5d8464,0x3f73f279,2 +np.float32,0xbeec85ac,0xbf45e5cb,2 +np.float32,0x7f11f722,0x54a87cb1,2 +np.float32,0x8032c085,0xaa3c1219,2 +np.float32,0x80544bac,0xaa5eb9f2,2 +np.float32,0x3e944a10,0x3f296065,2 +np.float32,0xbf29fe50,0xbf5f5796,2 +np.float32,0x7e204d8d,0x545b03d5,2 +np.float32,0xfe1d0254,0xd4598127,2 +np.float32,0x80523129,0xaa5cdba9,2 +np.float32,0x806315fa,0xaa6b0eaf,2 +np.float32,0x3ed3d2a4,0x3f3ec117,2 +np.float32,0x7ee15007,0x549a8cc0,2 +np.float32,0x801ffb5e,0xaa213d4f,2 +np.float32,0x807f9f4a,0xaa7fbf76,2 +np.float32,0xbe45e854,0xbf1402d3,2 +np.float32,0x3d9e2e70,0x3eda0b64,2 +np.float32,0x51f404,0x2a5ca4d7,2 +np.float32,0xbe26a8b0,0xbf0bc54d,2 +np.float32,0x22c99a,0x2a25d2a7,2 +np.float32,0xbf71248b,0xbf7af2d5,2 +np.float32,0x7219fe,0x2a76608e,2 +np.float32,0x7f16fd7d,0x54aa6610,2 +np.float32,0x80716faa,0xaa75e5b9,2 +np.float32,0xbe24f9a4,0xbf0b4c65,2 +np.float32,0x800000,0x2a800000,2 +np.float32,0x80747456,0xaa780f27,2 +np.float32,0x68f9e8,0x2a6fa035,2 +np.float32,0x3f6a297e,0x3f7880d8,2 +np.float32,0x3f28b973,0x3f5ec8f6,2 +np.float32,0x7f58c577,0x54c03a70,2 +np.float32,0x804befcc,0xaa571b4f,2 +np.float32,0x3e2be027,0x3f0d36cf,2 +np.float32,0xfe7e80a4,0xd47f7ff7,2 +np.float32,0xfe9d444a,0xd489181b,2 +np.float32,0x3db3e790,0x3ee399d6,2 +np.float32,0xbf154c3e,0xbf55e23e,2 +np.float32,0x3d1096b7,0x3ea7f4aa,2 +np.float32,0x7fc00000,0x7fc00000,2 +np.float32,0x804e2521,0xaa592c06,2 +np.float32,0xbeda2f00,0xbf40a513,2 +np.float32,0x3f191788,0x3f57ae30,2 +np.float32,0x3ed24ade,0x3f3e4b34,2 +np.float32,0x807fadb4,0xaa7fc917,2 +np.float32,0xbe0a06dc,0xbf034234,2 +np.float32,0x3f250bba,0x3f5d276d,2 +np.float32,0x7e948b00,0x548682c8,2 +np.float32,0xfe65ecdc,0xd476fed2,2 +np.float32,0x6fdbdd,0x2a74c095,2 +np.float32,0x800112de,0xa9500fa6,2 +np.float32,0xfe63225c,0xd475fdee,2 +np.float32,0x7f3d9acd,0x54b7d648,2 +np.float32,0xfc46f480,0xd3bacf87,2 +np.float32,0xfe5deaac,0xd47417ff,2 +np.float32,0x60ce53,0x2a693d93,2 +np.float32,0x6a6e2f,0x2a70ba2c,2 +np.float32,0x7f43f0f1,0x54b9dcd0,2 +np.float32,0xbf6170c9,0xbf756104,2 +np.float32,0xbe5c9f74,0xbf197852,2 +np.float32,0xff1502b0,0xd4a9a693,2 +np.float32,0x8064f6af,0xaa6c886e,2 +np.float32,0xbf380564,0xbf6552e5,2 +np.float32,0xfeb9b7dc,0xd490e85f,2 +np.float32,0x7f34f941,0x54b5010d,2 +np.float32,0xbe9d4ca0,0xbf2cbd5f,2 +np.float32,0x3f6e43d2,0x3f79f240,2 +np.float32,0xbdad0530,0xbee0a8f2,2 +np.float32,0x3da18459,0x3edb9105,2 +np.float32,0xfd968340,0xd42a3808,2 +np.float32,0x3ea03e64,0x3f2dcf96,2 +np.float32,0x801d2f5b,0xaa1c6525,2 +np.float32,0xbf47d92d,0xbf6bb7e9,2 +np.float32,0x55a6b9,0x2a5fe9fb,2 +np.float32,0x77a7c2,0x2a7a4fb8,2 +np.float32,0xfebbc16e,0xd4916f88,2 +np.float32,0x3f5d3d6e,0x3f73d86a,2 +np.float32,0xfccd2b60,0xd3edcacb,2 +np.float32,0xbd026460,0xbea244b0,2 +np.float32,0x3e55bd,0x2a4968e4,2 +np.float32,0xbe7b5708,0xbf20490d,2 +np.float32,0xfe413cf4,0xd469171f,2 +np.float32,0x7710e3,0x2a79e657,2 +np.float32,0xfc932520,0xd3d4d9ca,2 +np.float32,0xbf764a1b,0xbf7cb8aa,2 +np.float32,0x6b1923,0x2a713aca,2 +np.float32,0xfe4dcd04,0xd46e092d,2 +np.float32,0xff3085ac,0xd4b381f8,2 +np.float32,0x3f72c438,0x3f7b82b4,2 +np.float32,0xbf6f0c6e,0xbf7a3852,2 +np.float32,0x801d2b1b,0xaa1c5d8d,2 +np.float32,0x3e9db91e,0x3f2ce50d,2 +np.float32,0x3f684f9d,0x3f77d8c5,2 +np.float32,0x7dc784,0x2a7e82cc,2 +np.float32,0x7d2c88e9,0x540d64f8,2 +np.float32,0x807fb708,0xaa7fcf51,2 +np.float32,0x8003c49a,0xa99e16e0,2 +np.float32,0x3ee4f5b8,0x3f43c3ff,2 +np.float32,0xfe992c5e,0xd487e4ec,2 +np.float32,0x4b4dfa,0x2a568216,2 +np.float32,0x3d374c80,0x3eb5c6a8,2 +np.float32,0xbd3a4700,0xbeb6c15c,2 +np.float32,0xbf13cb80,0xbf5529e5,2 +np.float32,0xbe7306d4,0xbf1e7f91,2 +np.float32,0xbf800000,0xbf800000,2 +np.float32,0xbea42efe,0xbf2f394e,2 +np.float32,0x3e1981d0,0x3f07fe2c,2 +np.float32,0x3f17ea1d,0x3f572047,2 +np.float32,0x7dc1e0,0x2a7e7efe,2 +np.float32,0x80169c08,0xaa0fa320,2 +np.float32,0x3f3e1972,0x3f67d248,2 +np.float32,0xfe5d3c88,0xd473d815,2 +np.float32,0xbf677448,0xbf778aac,2 +np.float32,0x7e799b7d,0x547dd9e4,2 +np.float32,0x3f00bb2c,0x3f4b92cf,2 +np.float32,0xbeb29f9c,0xbf343798,2 +np.float32,0xbd6b7830,0xbec59a86,2 +np.float32,0x807a524a,0xaa7c282a,2 +np.float32,0xbe0a7a04,0xbf0366ab,2 +np.float32,0x80237470,0xaa26e061,2 +np.float32,0x3ccbc0f6,0x3e95744f,2 +np.float32,0x3edec6bc,0x3f41fcb6,2 +np.float32,0x3f635198,0x3f760efa,2 +np.float32,0x800eca4f,0xa9f960d8,2 +np.float32,0x3f800000,0x3f800000,2 +np.float32,0xff4eeb9e,0xd4bd456a,2 +np.float32,0x56f4e,0x29b29e70,2 +np.float32,0xff5383a0,0xd4bea95c,2 +np.float32,0x3f4c3a77,0x3f6d6d94,2 +np.float32,0x3f6c324a,0x3f79388c,2 +np.float32,0xbebdc092,0xbf37e27c,2 +np.float32,0xff258956,0xd4afb42e,2 +np.float32,0xdc78c,0x29f39012,2 +np.float32,0xbf2db06a,0xbf60f2f5,2 +np.float32,0xbe3c5808,0xbf119660,2 +np.float32,0xbf1ba866,0xbf58e0f4,2 +np.float32,0x80377640,0xaa41b79d,2 +np.float32,0x4fdc4d,0x2a5abfea,2 +np.float32,0x7f5e7560,0x54c1e516,2 +np.float32,0xfeb4d3f2,0xd48f9fde,2 +np.float32,0x3f12a622,0x3f549c7d,2 +np.float32,0x7f737ed7,0x54c7d2dc,2 +np.float32,0xa0ddc,0x29db456d,2 +np.float32,0xfe006740,0xd44b6689,2 +np.float32,0x3f17dfd4,0x3f571b6c,2 +np.float32,0x67546e,0x2a6e5dd1,2 +np.float32,0xff0d0f11,0xd4a693e2,2 +np.float32,0xbd170090,0xbeaa6738,2 +np.float32,0x5274a0,0x2a5d1806,2 +np.float32,0x3e154fe0,0x3f06be1a,2 +np.float32,0x7ddb302e,0x5440f0a7,2 +np.float32,0x3f579d10,0x3f71c2af,2 +np.float32,0xff2bc5bb,0xd4b1e20c,2 +np.float32,0xfee8fa6a,0xd49c4872,2 +np.float32,0xbea551b0,0xbf2fa07b,2 +np.float32,0xfeabc75c,0xd48d3004,2 +np.float32,0x7f50a5a8,0x54bdcbd1,2 +np.float32,0x50354b,0x2a5b110d,2 +np.float32,0x7d139f13,0x54063b6b,2 +np.float32,0xbeee1b08,0xbf465699,2 +np.float32,0xfe5e1650,0xd47427fe,2 +np.float32,0x7f7fffff,0x54cb2ff5,2 +np.float32,0xbf52ede8,0xbf6fff35,2 +np.float32,0x804bba81,0xaa56e8f1,2 +np.float32,0x6609e2,0x2a6d5e94,2 +np.float32,0x692621,0x2a6fc1d6,2 +np.float32,0xbf288bb6,0xbf5eb4d3,2 +np.float32,0x804f28c4,0xaa5a1b82,2 +np.float32,0xbdaad2a8,0xbedfb46e,2 +np.float32,0x5e04f8,0x2a66fb13,2 +np.float32,0x804c10da,0xaa573a81,2 +np.float32,0xbe412764,0xbf12d0fd,2 +np.float32,0x801c35cc,0xaa1aa250,2 +np.float32,0x6364d4,0x2a6b4cf9,2 +np.float32,0xbf6d3cea,0xbf79962f,2 +np.float32,0x7e5a9935,0x5472defb,2 +np.float32,0xbe73a38c,0xbf1ea19c,2 +np.float32,0xbd35e950,0xbeb550f2,2 +np.float32,0x46cc16,0x2a5223d6,2 +np.float32,0x3f005288,0x3f4b5b97,2 +np.float32,0x8034e8b7,0xaa3eb2be,2 +np.float32,0xbea775fc,0xbf3061cf,2 +np.float32,0xea0e9,0x29f87751,2 +np.float32,0xbf38faaf,0xbf65b89d,2 +np.float32,0xbedf3184,0xbf421bb0,2 +np.float32,0xbe04250c,0xbf015def,2 +np.float32,0x7f56dae8,0x54bfa901,2 +np.float32,0xfebe3e04,0xd492132e,2 +np.float32,0x3e4dc326,0x3f15f19e,2 +np.float32,0x803da197,0xaa48a621,2 +np.float32,0x7eeb35aa,0x549cc7c6,2 +np.float32,0xfebb3eb6,0xd4914dc0,2 +np.float32,0xfed17478,0xd496d5e2,2 +np.float32,0x80243694,0xaa280ed2,2 +np.float32,0x8017e666,0xaa1251d3,2 +np.float32,0xbf07e942,0xbf4f4a3e,2 +np.float32,0xbf578fa6,0xbf71bdab,2 +np.float32,0x7ed8d80f,0x549896b6,2 +np.float32,0x3f2277ae,0x3f5bff11,2 +np.float32,0x7e6f195b,0x547a3cd4,2 +np.float32,0xbf441559,0xbf6a3a91,2 +np.float32,0x7f1fb427,0x54ad9d8d,2 +np.float32,0x71695f,0x2a75e12d,2 +np.float32,0xbd859588,0xbece19a1,2 +np.float32,0x7f5702fc,0x54bfb4eb,2 +np.float32,0x3f040008,0x3f4d4842,2 +np.float32,0x3de00ca5,0x3ef4df89,2 +np.float32,0x3eeabb03,0x3f45658c,2 +np.float32,0x3dfe5e65,0x3eff7480,2 +np.float32,0x1,0x26a14518,2 +np.float32,0x8065e400,0xaa6d4130,2 +np.float32,0xff50e1bb,0xd4bdde07,2 +np.float32,0xbe88635a,0xbf24b7e9,2 +np.float32,0x3f46bfab,0x3f6b4908,2 +np.float32,0xbd85c3c8,0xbece3168,2 +np.float32,0xbe633f64,0xbf1afdb1,2 +np.float32,0xff2c7706,0xd4b21f2a,2 +np.float32,0xbf02816c,0xbf4c812a,2 +np.float32,0x80653aeb,0xaa6cbdab,2 +np.float32,0x3eef1d10,0x3f469e24,2 +np.float32,0x3d9944bf,0x3ed7c36a,2 +np.float32,0x1b03d4,0x2a186b2b,2 +np.float32,0x3f251b7c,0x3f5d2e76,2 +np.float32,0x3edebab0,0x3f41f937,2 +np.float32,0xfefc2148,0xd4a073ff,2 +np.float32,0x7448ee,0x2a77f051,2 +np.float32,0x3bb8a400,0x3e3637ee,2 +np.float32,0x57df36,0x2a61d527,2 +np.float32,0xfd8b9098,0xd425fccb,2 +np.float32,0x7f67627e,0x54c4744d,2 +np.float32,0x801165d7,0xaa039fba,2 +np.float32,0x53aae5,0x2a5e2bfd,2 +np.float32,0x8014012b,0xaa09e4f1,2 +np.float32,0x3f7a2d53,0x3f7e0b4b,2 +np.float32,0x3f5fb700,0x3f74c052,2 +np.float32,0x7f192a06,0x54ab366c,2 +np.float32,0x3f569611,0x3f71603b,2 +np.float32,0x25e2dc,0x2a2a9b65,2 +np.float32,0x8036465e,0xaa405342,2 +np.float32,0x804118e1,0xaa4c5785,2 +np.float32,0xbef08d3e,0xbf4703e1,2 +np.float32,0x3447e2,0x2a3df0be,2 +np.float32,0xbf2a350b,0xbf5f6f8c,2 +np.float32,0xbec87e3e,0xbf3b4a73,2 +np.float32,0xbe99a4a8,0xbf2b6412,2 +np.float32,0x2ea2ae,0x2a36d77e,2 +np.float32,0xfcb69600,0xd3e4b9e3,2 +np.float32,0x717700,0x2a75eb06,2 +np.float32,0xbf4e81ce,0xbf6e4ecc,2 +np.float32,0xbe2021ac,0xbf09ebee,2 +np.float32,0xfef94eee,0xd49fda31,2 +np.float32,0x8563e,0x29ce0015,2 +np.float32,0x7f5d0ca5,0x54c17c0f,2 +np.float32,0x3f16459a,0x3f56590f,2 +np.float32,0xbe12f7bc,0xbf0608a0,2 +np.float32,0x3f10fd3d,0x3f53ce5f,2 +np.float32,0x3ca5e1b0,0x3e8b8d96,2 +np.float32,0xbe5288e0,0xbf17181f,2 +np.float32,0xbf7360f6,0xbf7bb8c9,2 +np.float32,0x7e989d33,0x5487ba88,2 +np.float32,0x3ea7b5dc,0x3f307839,2 +np.float32,0x7e8da0c9,0x548463f0,2 +np.float32,0xfeaf7888,0xd48e3122,2 +np.float32,0x7d90402d,0x5427d321,2 +np.float32,0x72e309,0x2a76f0ee,2 +np.float32,0xbe1faa34,0xbf09c998,2 +np.float32,0xbf2b1652,0xbf5fd1f4,2 +np.float32,0x8051eb0c,0xaa5c9cca,2 +np.float32,0x7edf02bf,0x549a058e,2 +np.float32,0x7fa00000,0x7fe00000,2 +np.float32,0x3f67f873,0x3f77b9c1,2 +np.float32,0x3f276b63,0x3f5e358c,2 +np.float32,0x7eeb4bf2,0x549cccb9,2 +np.float32,0x3bfa2c,0x2a46d675,2 +np.float32,0x3e133c50,0x3f061d75,2 +np.float32,0x3ca302c0,0x3e8abe4a,2 +np.float32,0x802e152e,0xaa361dd5,2 +np.float32,0x3f504810,0x3f6efd0a,2 +np.float32,0xbf43e0b5,0xbf6a2599,2 +np.float32,0x80800000,0xaa800000,2 +np.float32,0x3f1c0980,0x3f590e03,2 +np.float32,0xbf0084f6,0xbf4b7638,2 +np.float32,0xfee72d32,0xd49be10d,2 +np.float32,0x3f3c00ed,0x3f66f763,2 +np.float32,0x80511e81,0xaa5be492,2 +np.float32,0xfdd1b8a0,0xd43e1f0d,2 +np.float32,0x7d877474,0x54245785,2 +np.float32,0x7f110bfe,0x54a82207,2 +np.float32,0xff800000,0xff800000,2 +np.float32,0x6b6a2,0x29bfa706,2 +np.float32,0xbf5bdfd9,0xbf7357b7,2 +np.float32,0x8025bfa3,0xaa2a6676,2 +np.float32,0x3a3581,0x2a44dd3a,2 +np.float32,0x542c2a,0x2a5e9e2f,2 +np.float32,0xbe1d5650,0xbf091d57,2 +np.float32,0x3e97760d,0x3f2a935e,2 +np.float32,0x7f5dcde2,0x54c1b460,2 +np.float32,0x800bde1e,0xa9e7bbaf,2 +np.float32,0x3e6b9e61,0x3f1cdf07,2 +np.float32,0x7d46c003,0x54143884,2 +np.float32,0x80073fbb,0xa9c49e67,2 +np.float32,0x503c23,0x2a5b1748,2 +np.float32,0x7eb7b070,0x549060c8,2 +np.float32,0xe9d8f,0x29f86456,2 +np.float32,0xbeedd4f0,0xbf464320,2 +np.float32,0x3f40d5d6,0x3f68eda1,2 +np.float32,0xff201f28,0xd4adc44b,2 +np.float32,0xbdf61e98,0xbefca9c7,2 +np.float32,0x3e8a0dc9,0x3f2562e3,2 +np.float32,0xbc0c0c80,0xbe515f61,2 +np.float32,0x2b3c15,0x2a3248e3,2 +np.float32,0x42a7bb,0x2a4df592,2 +np.float32,0x7f337947,0x54b480af,2 +np.float32,0xfec21db4,0xd4930f4b,2 +np.float32,0x7f4fdbf3,0x54bd8e94,2 +np.float32,0x1e2253,0x2a1e1286,2 +np.float32,0x800c4c80,0xa9ea819e,2 +np.float32,0x7e96f5b7,0x54873c88,2 +np.float32,0x7ce4e131,0x53f69ed4,2 +np.float32,0xbead8372,0xbf327b63,2 +np.float32,0x3e15ca7e,0x3f06e2f3,2 +np.float32,0xbf63e17b,0xbf7642da,2 +np.float32,0xff5bdbdb,0xd4c122f9,2 +np.float32,0x3f44411e,0x3f6a4bfd,2 +np.float32,0xfd007da0,0xd40029d2,2 +np.float32,0xbe940168,0xbf2944b7,2 +np.float32,0x80000000,0x80000000,2 +np.float32,0x3d28e356,0x3eb0e1b8,2 +np.float32,0x3eb9fcd8,0x3f36a918,2 +np.float32,0x4f6410,0x2a5a51eb,2 +np.float32,0xbdf18e30,0xbefb1775,2 +np.float32,0x32edbd,0x2a3c49e3,2 +np.float32,0x801f70a5,0xaa2052da,2 +np.float32,0x8045a045,0xaa50f98c,2 +np.float32,0xbdd6cb00,0xbef17412,2 +np.float32,0x3f118f2c,0x3f541557,2 +np.float32,0xbe65c378,0xbf1b8f95,2 +np.float32,0xfd9a9060,0xd42bbb8b,2 +np.float32,0x3f04244f,0x3f4d5b0f,2 +np.float32,0xff05214b,0xd4a3656f,2 +np.float32,0xfe342cd0,0xd463b706,2 +np.float32,0x3f3409a8,0x3f63a836,2 +np.float32,0x80205db2,0xaa21e1e5,2 +np.float32,0xbf37c982,0xbf653a03,2 +np.float32,0x3f36ce8f,0x3f64d17e,2 +np.float32,0x36ffda,0x2a412d61,2 +np.float32,0xff569752,0xd4bf94e6,2 +np.float32,0x802fdb0f,0xaa386c3a,2 +np.float32,0x7ec55a87,0x5493df71,2 +np.float32,0x7f2234c7,0x54ae847e,2 +np.float32,0xbf02df76,0xbf4cb23d,2 +np.float32,0x3d68731a,0x3ec4c156,2 +np.float32,0x8146,0x2921cd8e,2 +np.float32,0x80119364,0xaa041235,2 +np.float32,0xfe6c1c00,0xd47930b5,2 +np.float32,0x8070da44,0xaa757996,2 +np.float32,0xfefbf50c,0xd4a06a9d,2 +np.float32,0xbf01b6a8,0xbf4c170a,2 +np.float32,0x110702,0x2a02aedb,2 +np.float32,0xbf063cd4,0xbf4e6f87,2 +np.float32,0x3f1ff178,0x3f5ad9dd,2 +np.float32,0xbf76dcd4,0xbf7cead0,2 +np.float32,0x80527281,0xaa5d1620,2 +np.float32,0xfea96df8,0xd48c8a7f,2 +np.float32,0x68db02,0x2a6f88b0,2 +np.float32,0x62d971,0x2a6adec7,2 +np.float32,0x3e816fe0,0x3f21df04,2 +np.float32,0x3f586379,0x3f720cc0,2 +np.float32,0x804a3718,0xaa5577ff,2 +np.float32,0x2e2506,0x2a3632b2,2 +np.float32,0x3f297d,0x2a4a4bf3,2 +np.float32,0xbe37aba8,0xbf105f88,2 +np.float32,0xbf18b264,0xbf577ea7,2 +np.float32,0x7f50d02d,0x54bdd8b5,2 +np.float32,0xfee296dc,0xd49ad757,2 +np.float32,0x7ec5137e,0x5493cdb1,2 +np.float32,0x3f4811f4,0x3f6bce3a,2 +np.float32,0xfdff32a0,0xd44af991,2 +np.float32,0x3f6ef140,0x3f7a2ed6,2 +np.float32,0x250838,0x2a2950b5,2 +np.float32,0x25c28e,0x2a2a6ada,2 +np.float32,0xbe875e50,0xbf244e90,2 +np.float32,0x3e3bdff8,0x3f11776a,2 +np.float32,0x3e9fe493,0x3f2daf17,2 +np.float32,0x804d8599,0xaa5897d9,2 +np.float32,0x3f0533da,0x3f4de759,2 +np.float32,0xbe63023c,0xbf1aefc8,2 +np.float32,0x80636e5e,0xaa6b547f,2 +np.float32,0xff112958,0xd4a82d5d,2 +np.float32,0x3e924112,0x3f28991f,2 +np.float32,0xbe996ffc,0xbf2b507a,2 +np.float32,0x802a7cda,0xaa314081,2 +np.float32,0x8022b524,0xaa25b21e,2 +np.float32,0x3f0808c8,0x3f4f5a43,2 +np.float32,0xbef0ec2a,0xbf471e0b,2 +np.float32,0xff4c2345,0xd4bc6b3c,2 +np.float32,0x25ccc8,0x2a2a7a3b,2 +np.float32,0x7f4467d6,0x54ba0260,2 +np.float32,0x7f506539,0x54bdb846,2 +np.float32,0x412ab4,0x2a4c6a2a,2 +np.float32,0x80672c4a,0xaa6e3ef0,2 +np.float32,0xbddfb7f8,0xbef4c0ac,2 +np.float32,0xbf250bb9,0xbf5d276c,2 +np.float32,0x807dca65,0xaa7e84bd,2 +np.float32,0xbf63b8e0,0xbf763438,2 +np.float32,0xbeed1b0c,0xbf460f6b,2 +np.float32,0x8021594f,0xaa238136,2 +np.float32,0xbebc74c8,0xbf377710,2 +np.float32,0x3e9f8e3b,0x3f2d8fce,2 +np.float32,0x7f50ca09,0x54bdd6d8,2 +np.float32,0x805797c1,0xaa6197df,2 +np.float32,0x3de198f9,0x3ef56f98,2 +np.float32,0xf154d,0x29fb0392,2 +np.float32,0xff7fffff,0xd4cb2ff5,2 +np.float32,0xfed22fa8,0xd49702c4,2 +np.float32,0xbf733736,0xbf7baa64,2 +np.float32,0xbf206a8a,0xbf5b1108,2 +np.float32,0xbca49680,0xbe8b3078,2 +np.float32,0xfecba794,0xd4956e1a,2 +np.float32,0x80126582,0xaa061886,2 +np.float32,0xfee5cc82,0xd49b919f,2 +np.float32,0xbf7ad6ae,0xbf7e4491,2 +np.float32,0x7ea88c81,0x548c4c0c,2 +np.float32,0xbf493a0d,0xbf6c4255,2 +np.float32,0xbf06dda0,0xbf4ec1d4,2 +np.float32,0xff3f6e84,0xd4b86cf6,2 +np.float32,0x3e4fe093,0x3f1674b0,2 +np.float32,0x8048ad60,0xaa53fbde,2 +np.float32,0x7ebb7112,0x54915ac5,2 +np.float32,0x5bd191,0x2a652a0d,2 +np.float32,0xfe3121d0,0xd4626cfb,2 +np.float32,0x7e4421c6,0x546a3f83,2 +np.float32,0x19975b,0x2a15b14f,2 +np.float32,0x801c8087,0xaa1b2a64,2 +np.float32,0xfdf6e950,0xd448c0f6,2 +np.float32,0x74e711,0x2a786083,2 +np.float32,0xbf2b2f2e,0xbf5fdccb,2 +np.float32,0x7ed19ece,0x5496e00b,2 +np.float32,0x7f6f8322,0x54c6ba63,2 +np.float32,0x3e90316d,0x3f27cd69,2 +np.float32,0x7ecb42ce,0x54955571,2 +np.float32,0x3f6d49be,0x3f799aaf,2 +np.float32,0x8053d327,0xaa5e4f9a,2 +np.float32,0x7ebd7361,0x5491df3e,2 +np.float32,0xfdb6eed0,0xd435a7aa,2 +np.float32,0x7f3e79f4,0x54b81e4b,2 +np.float32,0xfe83afa6,0xd4813794,2 +np.float32,0x37c443,0x2a421246,2 +np.float32,0xff075a10,0xd4a44cd8,2 +np.float32,0x3ebc5fe0,0x3f377047,2 +np.float32,0x739694,0x2a77714e,2 +np.float32,0xfe832946,0xd4810b91,2 +np.float32,0x7f2638e6,0x54aff235,2 +np.float32,0xfe87f7a6,0xd4829a3f,2 +np.float32,0x3f50f3f8,0x3f6f3eb8,2 +np.float32,0x3eafa3d0,0x3f333548,2 +np.float32,0xbec26ee6,0xbf39626f,2 +np.float32,0x7e6f924f,0x547a66ff,2 +np.float32,0x7f0baa46,0x54a606f8,2 +np.float32,0xbf6dfc49,0xbf79d939,2 +np.float32,0x7f005709,0x54a1699d,2 +np.float32,0x7ee3d7ef,0x549b2057,2 +np.float32,0x803709a4,0xaa4138d7,2 +np.float32,0x3f7bf49a,0x3f7ea509,2 +np.float32,0x509db7,0x2a5b6ff5,2 +np.float32,0x7eb1b0d4,0x548ec9ff,2 +np.float32,0x7eb996ec,0x5490dfce,2 +np.float32,0xbf1fcbaa,0xbf5ac89e,2 +np.float32,0x3e2c9a98,0x3f0d69cc,2 +np.float32,0x3ea77994,0x3f306312,2 +np.float32,0x3f3cbfe4,0x3f67457c,2 +np.float32,0x8422a,0x29cd5a30,2 +np.float32,0xbd974558,0xbed6d264,2 +np.float32,0xfecee77a,0xd496387f,2 +np.float32,0x3f51876b,0x3f6f76f1,2 +np.float32,0x3b1a25,0x2a45ddad,2 +np.float32,0xfe9912f0,0xd487dd67,2 +np.float32,0x3f3ab13d,0x3f666d99,2 +np.float32,0xbf35565a,0xbf64341b,2 +np.float32,0x7d4e84aa,0x54162091,2 +np.float32,0x4c2570,0x2a574dea,2 +np.float32,0x7e82dca6,0x5480f26b,2 +np.float32,0x7f5503e7,0x54bf1c8d,2 +np.float32,0xbeb85034,0xbf361c59,2 +np.float32,0x80460a69,0xaa516387,2 +np.float32,0x805fbbab,0xaa68602c,2 +np.float32,0x7d4b4c1b,0x541557b8,2 +np.float32,0xbefa9a0a,0xbf49bfbc,2 +np.float32,0x3dbd233f,0x3ee76e09,2 +np.float32,0x58b6df,0x2a628d50,2 +np.float32,0xfcdcc180,0xd3f3aad9,2 +np.float32,0x423a37,0x2a4d8487,2 +np.float32,0xbed8b32a,0xbf403507,2 +np.float32,0x3f68e85d,0x3f780f0b,2 +np.float32,0x7ee13c4b,0x549a883d,2 +np.float32,0xff2ed4c5,0xd4b2eec1,2 +np.float32,0xbf54dadc,0xbf70b99a,2 +np.float32,0x3f78b0af,0x3f7d8a32,2 +np.float32,0x3f377372,0x3f651635,2 +np.float32,0xfdaa6178,0xd43166bc,2 +np.float32,0x8060c337,0xaa6934a6,2 +np.float32,0x7ec752c2,0x54945cf6,2 +np.float32,0xbd01a760,0xbea1f624,2 +np.float32,0x6f6599,0x2a746a35,2 +np.float32,0x3f6315b0,0x3f75f95b,2 +np.float32,0x7f2baf32,0x54b1da44,2 +np.float32,0x3e400353,0x3f1286d8,2 +np.float32,0x40d3bf,0x2a4c0f15,2 +np.float32,0x7f733aca,0x54c7c03d,2 +np.float32,0x7e5c5407,0x5473828b,2 +np.float32,0x80191703,0xaa14b56a,2 +np.float32,0xbf4fc144,0xbf6ec970,2 +np.float32,0xbf1137a7,0xbf53eacd,2 +np.float32,0x80575410,0xaa615db3,2 +np.float32,0xbd0911d0,0xbea4fe07,2 +np.float32,0x3e98534a,0x3f2ae643,2 +np.float32,0x3f3b089a,0x3f669185,2 +np.float32,0x4fc752,0x2a5aacc1,2 +np.float32,0xbef44ddc,0xbf480b6e,2 +np.float32,0x80464217,0xaa519af4,2 +np.float32,0x80445fae,0xaa4fb6de,2 +np.float32,0x80771cf4,0xaa79eec8,2 +np.float32,0xfd9182e8,0xd4284fed,2 +np.float32,0xff0a5d16,0xd4a58288,2 +np.float32,0x3f33e169,0x3f63973e,2 +np.float32,0x8021a247,0xaa23f820,2 +np.float32,0xbf362522,0xbf648ab8,2 +np.float32,0x3f457cd7,0x3f6ac95e,2 +np.float32,0xbcadf400,0xbe8dc7e2,2 +np.float32,0x80237210,0xaa26dca7,2 +np.float32,0xbf1293c9,0xbf54939f,2 +np.float32,0xbc5e73c0,0xbe744a37,2 +np.float32,0x3c03f980,0x3e4d44df,2 +np.float32,0x7da46f,0x2a7e6b20,2 +np.float32,0x5d4570,0x2a665dd0,2 +np.float32,0x3e93fbac,0x3f294287,2 +np.float32,0x7e6808fd,0x5477bfa4,2 +np.float32,0xff5aa9a6,0xd4c0c925,2 +np.float32,0xbf5206ba,0xbf6fa767,2 +np.float32,0xbf6e513e,0xbf79f6f1,2 +np.float32,0x3ed01c0f,0x3f3da20f,2 +np.float32,0xff47d93d,0xd4bb1704,2 +np.float32,0x7f466cfd,0x54baa514,2 +np.float32,0x665e10,0x2a6d9fc8,2 +np.float32,0x804d0629,0xaa5820e8,2 +np.float32,0x7e0beaa0,0x54514e7e,2 +np.float32,0xbf7fcb6c,0xbf7fee78,2 +np.float32,0x3f6c5b03,0x3f7946dd,2 +np.float32,0x3e941504,0x3f294c30,2 +np.float32,0xbf2749ad,0xbf5e26a1,2 +np.float32,0xfec2a00a,0xd493302d,2 +np.float32,0x3f15a358,0x3f560bce,2 +np.float32,0x3f15c4e7,0x3f561bcd,2 +np.float32,0xfedc8692,0xd499728c,2 +np.float32,0x7e8f6902,0x5484f180,2 +np.float32,0x7f663d62,0x54c42136,2 +np.float32,0x8027ea62,0xaa2d99b4,2 +np.float32,0x3f3d093d,0x3f67636d,2 +np.float32,0x7f118c33,0x54a85382,2 +np.float32,0x803e866a,0xaa499d43,2 +np.float32,0x80053632,0xa9b02407,2 +np.float32,0xbf36dd66,0xbf64d7af,2 +np.float32,0xbf560358,0xbf71292b,2 +np.float32,0x139a8,0x29596bc0,2 +np.float32,0xbe04f75c,0xbf01a26c,2 +np.float32,0xfe1c3268,0xd45920fa,2 +np.float32,0x7ec77f72,0x5494680c,2 +np.float32,0xbedde724,0xbf41bbba,2 +np.float32,0x3e81dbe0,0x3f220bfd,2 +np.float32,0x800373ac,0xa99989d4,2 +np.float32,0x3f7f859a,0x3f7fd72d,2 +np.float32,0x3eb9dc7e,0x3f369e80,2 +np.float32,0xff5f8eb7,0xd4c236b1,2 +np.float32,0xff1c03cb,0xd4ac44ac,2 +np.float32,0x18cfe1,0x2a14285b,2 +np.float32,0x7f21b075,0x54ae54fd,2 +np.float32,0xff490bd8,0xd4bb7680,2 +np.float32,0xbf15dc22,0xbf5626de,2 +np.float32,0xfe1d5a10,0xd459a9a3,2 +np.float32,0x750544,0x2a7875e4,2 +np.float32,0x8023d5df,0xaa2778b3,2 +np.float32,0x3e42aa08,0x3f1332b2,2 +np.float32,0x3ecaa751,0x3f3bf60d,2 +np.float32,0x0,0x0,2 +np.float32,0x80416da6,0xaa4cb011,2 +np.float32,0x3f4ea9ae,0x3f6e5e22,2 +np.float32,0x2113f4,0x2a230f8e,2 +np.float32,0x3f35c2e6,0x3f64619a,2 +np.float32,0xbf50db8a,0xbf6f3564,2 +np.float32,0xff4d5cea,0xd4bccb8a,2 +np.float32,0x7ee54420,0x549b72d2,2 +np.float32,0x64ee68,0x2a6c81f7,2 +np.float32,0x5330da,0x2a5dbfc2,2 +np.float32,0x80047f88,0xa9a7b467,2 +np.float32,0xbda01078,0xbedae800,2 +np.float32,0xfe96d05a,0xd487315f,2 +np.float32,0x8003cc10,0xa99e7ef4,2 +np.float32,0x8007b4ac,0xa9c8aa3d,2 +np.float32,0x5d4bcf,0x2a66630e,2 +np.float32,0xfdd0c0b0,0xd43dd403,2 +np.float32,0xbf7a1d82,0xbf7e05f0,2 +np.float32,0x74ca33,0x2a784c0f,2 +np.float32,0x804f45e5,0xaa5a3640,2 +np.float32,0x7e6d16aa,0x547988c4,2 +np.float32,0x807d5762,0xaa7e3714,2 +np.float32,0xfecf93d0,0xd4966229,2 +np.float32,0xfecbd25c,0xd4957890,2 +np.float32,0xff7db31c,0xd4ca93b0,2 +np.float32,0x3dac9e18,0x3ee07c4a,2 +np.float32,0xbf4b2d28,0xbf6d0509,2 +np.float32,0xbd4f4c50,0xbebd62e0,2 +np.float32,0xbd2eac40,0xbeb2e0ee,2 +np.float32,0x3d01b69b,0x3ea1fc7b,2 +np.float32,0x7ec63902,0x549416ed,2 +np.float32,0xfcc47700,0xd3ea616d,2 +np.float32,0xbf5ddec2,0xbf7413a1,2 +np.float32,0xff6a6110,0xd4c54c52,2 +np.float32,0xfdfae2a0,0xd449d335,2 +np.float32,0x7e54868c,0x547099cd,2 +np.float32,0x802b5b88,0xaa327413,2 +np.float32,0x80440e72,0xaa4f647a,2 +np.float32,0x3e313c94,0x3f0eaad5,2 +np.float32,0x3ebb492a,0x3f3715a2,2 +np.float32,0xbef56286,0xbf4856d5,2 +np.float32,0x3f0154ba,0x3f4be3a0,2 +np.float32,0xff2df86c,0xd4b2a376,2 +np.float32,0x3ef6a850,0x3f48af57,2 +np.float32,0x3d8d33e1,0x3ed1f22d,2 +np.float32,0x4dd9b9,0x2a58e615,2 +np.float32,0x7f1caf83,0x54ac83c9,2 +np.float32,0xbf7286b3,0xbf7b6d73,2 +np.float32,0x80064f88,0xa9bbbd9f,2 +np.float32,0xbf1f55fa,0xbf5a92db,2 +np.float32,0x546a81,0x2a5ed516,2 +np.float32,0xbe912880,0xbf282d0a,2 +np.float32,0x5df587,0x2a66ee6e,2 +np.float32,0x801f706c,0xaa205279,2 +np.float32,0x58cb6d,0x2a629ece,2 +np.float32,0xfe754f8c,0xd47c62da,2 +np.float32,0xbefb6f4c,0xbf49f8e7,2 +np.float32,0x80000001,0xa6a14518,2 +np.float32,0xbf067837,0xbf4e8df4,2 +np.float32,0x3e8e715c,0x3f271ee4,2 +np.float32,0x8009de9b,0xa9d9ebc8,2 +np.float32,0xbf371ff1,0xbf64f36e,2 +np.float32,0x7f5ce661,0x54c170e4,2 +np.float32,0x3f3c47d1,0x3f671467,2 +np.float32,0xfea5e5a6,0xd48b8eb2,2 +np.float32,0xff62b17f,0xd4c31e15,2 +np.float32,0xff315932,0xd4b3c98f,2 +np.float32,0xbf1c3ca8,0xbf5925b9,2 +np.float32,0x7f800000,0x7f800000,2 +np.float32,0xfdf20868,0xd4476c3b,2 +np.float32,0x5b790e,0x2a64e052,2 +np.float32,0x3f5ddf4e,0x3f7413d4,2 +np.float32,0x7f1a3182,0x54ab9861,2 +np.float32,0x3f4b906e,0x3f6d2b9d,2 +np.float32,0x7ebac760,0x54912edb,2 +np.float32,0x7f626d3f,0x54c30a7e,2 +np.float32,0x3e27b058,0x3f0c0edc,2 +np.float32,0x8041e69c,0xaa4d2de8,2 +np.float32,0x3f42cee0,0x3f69b84a,2 +np.float32,0x7ec5fe83,0x5494085b,2 +np.float32,0x9d3e6,0x29d99cde,2 +np.float32,0x3edc50c0,0x3f41452d,2 +np.float32,0xbf2c463a,0xbf60562c,2 +np.float32,0x800bfa33,0xa9e871e8,2 +np.float32,0x7c9f2c,0x2a7dba4d,2 +np.float32,0x7f2ef9fd,0x54b2fb73,2 +np.float32,0x80741847,0xaa77cdb9,2 +np.float32,0x7e9c462a,0x5488ce1b,2 +np.float32,0x3ea47ec1,0x3f2f55a9,2 +np.float32,0x7f311c43,0x54b3b4f5,2 +np.float32,0x3d8f4c73,0x3ed2facd,2 +np.float32,0x806d7bd2,0xaa7301ef,2 +np.float32,0xbf633d24,0xbf760799,2 +np.float32,0xff4f9a3f,0xd4bd7a99,2 +np.float32,0x3f6021ca,0x3f74e73d,2 +np.float32,0x7e447015,0x546a5eac,2 +np.float32,0x6bff3c,0x2a71e711,2 +np.float32,0xe9c9f,0x29f85f06,2 +np.float32,0x8009fe14,0xa9dad277,2 +np.float32,0x807cf79c,0xaa7df644,2 +np.float32,0xff440e1b,0xd4b9e608,2 +np.float32,0xbddf9a50,0xbef4b5db,2 +np.float32,0x7f3b1c39,0x54b706fc,2 +np.float32,0x3c7471a0,0x3e7c16a7,2 +np.float32,0x8065b02b,0xaa6d18ee,2 +np.float32,0x7f63a3b2,0x54c36379,2 +np.float32,0xbe9c9d92,0xbf2c7d33,2 +np.float32,0x3d93aad3,0x3ed51a2e,2 +np.float32,0xbf41b040,0xbf694571,2 +np.float32,0x80396b9e,0xaa43f899,2 +np.float64,0x800fa025695f404b,0xaaa4000ff64bb00c,2 +np.float64,0xbfecc00198f98003,0xbfeee0b623fbd94b,2 +np.float64,0x7f9eeb60b03dd6c0,0x55291bf8554bb303,2 +np.float64,0x3fba74485634e890,0x3fde08710bdb148d,2 +np.float64,0xbfdd9a75193b34ea,0xbfe8bf711660a2f5,2 +np.float64,0xbfcf92e17a3f25c4,0xbfe4119eda6f3773,2 +np.float64,0xbfe359e2ba66b3c6,0xbfeb0f7ae97ea142,2 +np.float64,0x20791a5640f24,0x2a9441f13d262bed,2 +np.float64,0x3fe455fbfae8abf8,0x3feb830d63e1022c,2 +np.float64,0xbd112b7b7a226,0x2aa238c097ec269a,2 +np.float64,0x93349ba126694,0x2aa0c363cd74465a,2 +np.float64,0x20300cd440602,0x2a9432b4f4081209,2 +np.float64,0x3fdcfae677b9f5cc,0x3fe892a9ee56fe8d,2 +np.float64,0xbfefaae3f7bf55c8,0xbfefe388066132c4,2 +np.float64,0x1a7d6eb634faf,0x2a92ed9851d29ab5,2 +np.float64,0x7fd5308d39aa6119,0x553be444e30326c6,2 +np.float64,0xff811c7390223900,0xd5205cb404952fa7,2 +np.float64,0x80083d24aff07a4a,0xaaa0285cf764d898,2 +np.float64,0x800633810ccc6703,0xaa9d65341419586b,2 +np.float64,0x800ff456223fe8ac,0xaaa423bbcc24dff1,2 +np.float64,0x7fde5c99aebcb932,0x553f71be7d6d9daa,2 +np.float64,0x3fed961c4b3b2c39,0x3fef2ca146270cac,2 +np.float64,0x7fe744d30c6e89a5,0x554220a4cdc78e62,2 +np.float64,0x3fd8f527c7b1ea50,0x3fe76101085be1cb,2 +np.float64,0xbfc96a14b232d428,0xbfe2ab1a8962606c,2 +np.float64,0xffe85f540cf0bea7,0xd54268dff964519a,2 +np.float64,0x800e3be0fe7c77c2,0xaaa3634efd7f020b,2 +np.float64,0x3feb90d032f721a0,0x3fee72a4579e8b12,2 +np.float64,0xffe05674aaa0ace9,0xd5401c9e3fb4abcf,2 +np.float64,0x3fefc2e32c3f85c6,0x3fefeb940924bf42,2 +np.float64,0xbfecfd89e9f9fb14,0xbfeef6addf73ee49,2 +np.float64,0xf5862717eb0c5,0x2aa3e1428780382d,2 +np.float64,0xffc3003b32260078,0xd53558f92202dcdb,2 +np.float64,0x3feb4c152c36982a,0x3fee5940f7da0825,2 +np.float64,0x3fe7147b002e28f6,0x3fecb2948f46d1e3,2 +np.float64,0x7fe00ad9b4a015b2,0x5540039d15e1da54,2 +np.float64,0x8010000000000000,0xaaa428a2f98d728b,2 +np.float64,0xbfd3a41bfea74838,0xbfe595ab45b1be91,2 +np.float64,0x7fdbfd6e5537fadc,0x553e9a6e1107b8d0,2 +np.float64,0x800151d9d9a2a3b4,0xaa918cd8fb63f40f,2 +np.float64,0x7fe6828401ad0507,0x5541eda05dcd1fcf,2 +np.float64,0x3fdae1e7a1b5c3d0,0x3fe7f711e72ecc35,2 +np.float64,0x7fdf4936133e926b,0x553fc29c8d5edea3,2 +np.float64,0x80079de12d4f3bc3,0xaa9f7b06a9286da4,2 +np.float64,0x3fe1261cade24c39,0x3fe9fe09488e417a,2 +np.float64,0xbfc20dce21241b9c,0xbfe0a842fb207a28,2 +np.float64,0x3fe3285dfa2650bc,0x3feaf85215f59ef9,2 +np.float64,0x7fe42b93aea85726,0x554148c3c3bb35e3,2 +np.float64,0xffe6c74e7f6d8e9c,0xd541ffd13fa36dbd,2 +np.float64,0x3fe73ea139ee7d42,0x3fecc402242ab7d3,2 +np.float64,0xffbd4b46be3a9690,0xd53392de917c72e4,2 +np.float64,0x800caed8df395db2,0xaaa2a811a02e6be4,2 +np.float64,0x800aacdb6c9559b7,0xaaa19d6fbc8feebf,2 +np.float64,0x839fb4eb073f7,0x2aa0264b98327c12,2 +np.float64,0xffd0157ba9a02af8,0xd5397157a11c0d05,2 +np.float64,0x7fddc8ff173b91fd,0x553f3e7663fb2ac7,2 +np.float64,0x67b365facf66d,0x2a9dd4d838b0d853,2 +np.float64,0xffe12e7fc7225cff,0xd5406272a83a8e1b,2 +np.float64,0x7fea5b19a034b632,0x5542e567658b3e36,2 +np.float64,0x124989d824932,0x2a90ba8dc7a39532,2 +np.float64,0xffe12ef098225de0,0xd54062968450a078,2 +np.float64,0x3fea2f44a3f45e8a,0x3fedee3c461f4716,2 +np.float64,0x3fe6b033e66d6068,0x3fec88c8035e06b1,2 +np.float64,0x3fe928a2ccf25146,0x3fed88d4cde7a700,2 +np.float64,0x3feead27e97d5a50,0x3fef8d7537d82e60,2 +np.float64,0x8003ab80b6875702,0xaa98adfedd7715a9,2 +np.float64,0x45a405828b481,0x2a9a1fa99a4eff1e,2 +np.float64,0x8002ddebad85bbd8,0xaa96babfda4e0031,2 +np.float64,0x3fc278c32824f186,0x3fe0c8e7c979fbd5,2 +np.float64,0x2e10fffc5c221,0x2a96c30a766d06fa,2 +np.float64,0xffd6ba8c2ead7518,0xd53c8d1d92bc2788,2 +np.float64,0xbfeb5ec3a036bd87,0xbfee602bbf0a0d01,2 +np.float64,0x3fed5bd58f7ab7ab,0x3fef181bf591a4a7,2 +np.float64,0x7feb5274a5b6a4e8,0x55431fcf81876218,2 +np.float64,0xaf8fd6cf5f1fb,0x2aa1c6edbb1e2aaf,2 +np.float64,0x7fece718f179ce31,0x55437c74efb90933,2 +np.float64,0xbfa3c42d0c278860,0xbfd5a16407c77e73,2 +np.float64,0x800b5cff0576b9fe,0xaaa1fc4ecb0dec4f,2 +np.float64,0x800be89ae557d136,0xaaa244d115fc0963,2 +np.float64,0x800d2578f5ba4af2,0xaaa2e18a3a3fc134,2 +np.float64,0x80090ff93e321ff3,0xaaa0add578e3cc3c,2 +np.float64,0x28c5a240518c,0x2a81587cccd7e202,2 +np.float64,0x7fec066929780cd1,0x55434971435d1069,2 +np.float64,0x7fc84d4d15309a99,0x55372c204515694f,2 +np.float64,0xffe070a75de0e14e,0xd54025365046dad2,2 +np.float64,0x7fe5b27cc36b64f9,0x5541b5b822f0b6ca,2 +np.float64,0x3fdea35ac8bd46b6,0x3fe9086a0fb792c2,2 +np.float64,0xbfe79996f7af332e,0xbfece9571d37a5b3,2 +np.float64,0xffdfb47f943f6900,0xd53fe6c14c3366db,2 +np.float64,0xc015cf63802ba,0x2aa2517164d075f4,2 +np.float64,0x7feba98948375312,0x5543340b5b1f1181,2 +np.float64,0x8008678e6550cf1d,0xaaa043e7cea90da5,2 +np.float64,0x3fb11b92fa223726,0x3fd9f8b53be4d90b,2 +np.float64,0x7fc9b18cf0336319,0x55379b42da882047,2 +np.float64,0xbfe5043e736a087d,0xbfebd0c67db7a8e3,2 +np.float64,0x7fde88546a3d10a8,0x553f80cfe5bcf5fe,2 +np.float64,0x8006a6c82dcd4d91,0xaa9e171d182ba049,2 +np.float64,0xbfa0f707ac21ee10,0xbfd48e5d3faa1699,2 +np.float64,0xbfe7716bffaee2d8,0xbfecd8e6abfb8964,2 +np.float64,0x9511ccab2a23a,0x2aa0d56d748f0313,2 +np.float64,0x8003ddb9b847bb74,0xaa991ca06fd9d308,2 +np.float64,0x80030710fac60e23,0xaa9725845ac95fe8,2 +np.float64,0xffece5bbaeb9cb76,0xd5437c2670f894f4,2 +np.float64,0x3fd9be5c72b37cb9,0x3fe79f2e932a5708,2 +np.float64,0x1f050cca3e0a3,0x2a93f36499fe5228,2 +np.float64,0x3fd5422becaa8458,0x3fe6295d6150df58,2 +np.float64,0xffd72c050e2e580a,0xd53cbc52d73b495f,2 +np.float64,0xbfe66d5235ecdaa4,0xbfec6ca27e60bf23,2 +np.float64,0x17ac49a42f58a,0x2a923b5b757087a0,2 +np.float64,0xffd39edc40273db8,0xd53b2f7bb99b96bf,2 +np.float64,0x7fde6cf009bcd9df,0x553f77614eb30d75,2 +np.float64,0x80042b4c3fa85699,0xaa99c05fbdd057db,2 +np.float64,0xbfde5547f8bcaa90,0xbfe8f3147d67a940,2 +np.float64,0xbfdd02f9bf3a05f4,0xbfe894f2048aa3fe,2 +np.float64,0xbfa20ec82c241d90,0xbfd4fd02ee55aac7,2 +np.float64,0x8002f670f8c5ece3,0xaa96fad7e53dd479,2 +np.float64,0x80059f24d7eb3e4a,0xaa9c7312dae0d7bc,2 +np.float64,0x7fe6ae7423ad5ce7,0x5541f9430be53062,2 +np.float64,0xe135ea79c26be,0x2aa350d8f8c526e1,2 +np.float64,0x3fec188ce4f8311a,0x3feea44d21c23f68,2 +np.float64,0x800355688286aad2,0xaa97e6ca51eb8357,2 +np.float64,0xa2d6530b45acb,0x2aa15635bbd366e8,2 +np.float64,0x600e0150c01c1,0x2a9d1456ea6c239c,2 +np.float64,0x8009c30863338611,0xaaa118f94b188bcf,2 +np.float64,0x3fe7e4c0dfefc982,0x3fed07e8480b8c07,2 +np.float64,0xbfddac6407bb58c8,0xbfe8c46f63a50225,2 +np.float64,0xbc85e977790bd,0x2aa2344636ed713d,2 +np.float64,0xfff0000000000000,0xfff0000000000000,2 +np.float64,0xffcd1570303a2ae0,0xd5389a27d5148701,2 +np.float64,0xbf937334d026e660,0xbfd113762e4e29a7,2 +np.float64,0x3fdbfdaa9b37fb55,0x3fe84a425fdff7df,2 +np.float64,0xffc10800f5221000,0xd5349535ffe12030,2 +np.float64,0xaf40f3755e81f,0x2aa1c443af16cd27,2 +np.float64,0x800f7da34f7efb47,0xaaa3f14bf25fc89f,2 +np.float64,0xffe4a60125a94c02,0xd5416b764a294128,2 +np.float64,0xbf8e25aa903c4b40,0xbfcf5ebc275b4789,2 +np.float64,0x3fca681bbb34d038,0x3fe2e882bcaee320,2 +np.float64,0xbfd0f3c9c1a1e794,0xbfe48d0df7b47572,2 +np.float64,0xffeb99b49d373368,0xd5433060dc641910,2 +np.float64,0x3fe554fb916aa9f8,0x3febf437cf30bd67,2 +np.float64,0x80079518d0af2a32,0xaa9f6ee87044745a,2 +np.float64,0x5e01a8a0bc036,0x2a9cdf0badf222c3,2 +np.float64,0xbfea9831b3f53064,0xbfee1601ee953ab3,2 +np.float64,0xbfc369d1a826d3a4,0xbfe110b675c311e0,2 +np.float64,0xa82e640d505cd,0x2aa1863d4e523b9c,2 +np.float64,0x3fe506d70a2a0dae,0x3febd1eba3aa83fa,2 +np.float64,0xcbacba7197598,0x2aa2adeb9927f1f2,2 +np.float64,0xc112d6038225b,0x2aa25978f12038b0,2 +np.float64,0xffa7f5f44c2febf0,0xd52d0ede02d4e18b,2 +np.float64,0x8006f218e34de433,0xaa9e870cf373b4eb,2 +np.float64,0xffe6d9a5d06db34b,0xd54204a4adc608c7,2 +np.float64,0x7fe717210eae2e41,0x554214bf3e2b5228,2 +np.float64,0xbfdd4b45cdba968c,0xbfe8a94c7f225f8e,2 +np.float64,0x883356571066b,0x2aa055ab0b2a8833,2 +np.float64,0x3fe307fc02a60ff8,0x3feae9175053288f,2 +np.float64,0x3fefa985f77f530c,0x3fefe31289446615,2 +np.float64,0x8005698a98aad316,0xaa9c17814ff7d630,2 +np.float64,0x3fea77333c74ee66,0x3fee098ba70e10fd,2 +np.float64,0xbfd1d00b0023a016,0xbfe4e497fd1cbea1,2 +np.float64,0x80009b0c39813619,0xaa8b130a6909cc3f,2 +np.float64,0x3fdbeb896fb7d714,0x3fe84502ba5437f8,2 +np.float64,0x3fb6e7e3562dcfc7,0x3fdca00d35c389ad,2 +np.float64,0xb2d46ebf65a8e,0x2aa1e2fe158d0838,2 +np.float64,0xbfd5453266aa8a64,0xbfe62a6a74c8ef6e,2 +np.float64,0x7fe993aa07732753,0x5542b5438bf31cb7,2 +np.float64,0xbfda5a098cb4b414,0xbfe7ce6d4d606203,2 +np.float64,0xbfe40c3ce068187a,0xbfeb61a32c57a6d0,2 +np.float64,0x3fcf17671d3e2ed0,0x3fe3f753170ab686,2 +np.float64,0xbfe4f814b6e9f02a,0xbfebcb67c60b7b08,2 +np.float64,0x800efedf59fdfdbf,0xaaa3ba4ed44ad45a,2 +np.float64,0x800420b556e8416b,0xaa99aa7fb14edeab,2 +np.float64,0xbf6e4ae6403c9600,0xbfc3cb2b29923989,2 +np.float64,0x3fda5c760a34b8ec,0x3fe7cf2821c52391,2 +np.float64,0x7f898faac0331f55,0x5522b44a01408188,2 +np.float64,0x3fd55af4b7aab5e9,0x3fe631f6d19503b3,2 +np.float64,0xbfa30a255c261450,0xbfd55caf0826361d,2 +np.float64,0x7fdfb801343f7001,0x553fe7ee50b9199a,2 +np.float64,0x7fa89ee91c313dd1,0x552d528ca2a4d659,2 +np.float64,0xffea72921d34e524,0xd542eb01af2e470d,2 +np.float64,0x3feddf0f33fbbe1e,0x3fef462b67fc0a91,2 +np.float64,0x3fe36700b566ce01,0x3feb1596caa8eff7,2 +np.float64,0x7fe6284a25ac5093,0x5541d58be3956601,2 +np.float64,0xffda16f7c8b42df0,0xd53de4f722485205,2 +np.float64,0x7f9355b94026ab72,0x552578cdeb41d2ca,2 +np.float64,0xffd3a9b022275360,0xd53b347b02dcea21,2 +np.float64,0x3fcb7f4f4a36fe9f,0x3fe32a40e9f6c1aa,2 +np.float64,0x7fdb958836372b0f,0x553e746103f92111,2 +np.float64,0x3fd37761c0a6eec4,0x3fe5853c5654027e,2 +np.float64,0x3fe449f1a2e893e4,0x3feb7d9e4eacc356,2 +np.float64,0x80077dfbef0efbf9,0xaa9f4ed788d2fadd,2 +np.float64,0x4823aa7890476,0x2a9a6eb4b653bad5,2 +np.float64,0xbfede01a373bc034,0xbfef468895fbcd29,2 +np.float64,0xbfe2bac5f125758c,0xbfeac4811c4dd66f,2 +np.float64,0x3fec10373af8206e,0x3feea14529e0f178,2 +np.float64,0x3fe305e30ca60bc6,0x3feae81a2f9d0302,2 +np.float64,0xa9668c5f52cd2,0x2aa1910e3a8f2113,2 +np.float64,0xbfd98b1717b3162e,0xbfe78f75995335d2,2 +np.float64,0x800fa649c35f4c94,0xaaa402ae79026a8f,2 +np.float64,0xbfb07dacf620fb58,0xbfd9a7d33d93a30f,2 +np.float64,0x80015812f382b027,0xaa91a843e9c85c0e,2 +np.float64,0x3fc687d96c2d0fb3,0x3fe1ef0ac16319c5,2 +np.float64,0xbfecad2ecd795a5e,0xbfeed9f786697af0,2 +np.float64,0x1608c1242c119,0x2a91cd11e9b4ccd2,2 +np.float64,0x6df775e8dbeef,0x2a9e6ba8c71130eb,2 +np.float64,0xffe96e9332b2dd26,0xd542ac342d06299b,2 +np.float64,0x7fecb6a3b8396d46,0x5543718af8162472,2 +np.float64,0x800d379f893a6f3f,0xaaa2ea36bbcb9308,2 +np.float64,0x3f924cdb202499b6,0x3fd0bb90af8d1f79,2 +np.float64,0x0,0x0,2 +np.float64,0x7feaf3b365f5e766,0x5543099a160e2427,2 +np.float64,0x3fea169ed0742d3e,0x3fede4d526e404f8,2 +np.float64,0x7feaf5f2f775ebe5,0x55430a2196c5f35a,2 +np.float64,0xbfc80d4429301a88,0xbfe2541f2ddd3334,2 +np.float64,0xffc75203b32ea408,0xd536db2837068689,2 +np.float64,0xffed2850e63a50a1,0xd5438b1217b72b8a,2 +np.float64,0x7fc16b0e7f22d61c,0x5534bcd0bfddb6f0,2 +np.float64,0x7feee8ed09fdd1d9,0x5543ed5b3ca483ab,2 +np.float64,0x7fb6c7ee662d8fdc,0x5531fffb5d46dafb,2 +np.float64,0x3fd77cebf8aef9d8,0x3fe6e9242e2bd29d,2 +np.float64,0x3f81c33f70238680,0x3fca4c7f3c9848f7,2 +np.float64,0x3fd59fea92ab3fd5,0x3fe649c1558cadd5,2 +np.float64,0xffeba82d4bf7505a,0xd54333bad387f7bd,2 +np.float64,0xffd37630e1a6ec62,0xd53b1ca62818c670,2 +np.float64,0xffec2c1e70b8583c,0xd5435213dcd27c22,2 +np.float64,0x7fec206971f840d2,0x55434f6660a8ae41,2 +np.float64,0x3fed2964adba52c9,0x3fef0642fe72e894,2 +np.float64,0xffd08e30d6211c62,0xd539b060e0ae02da,2 +np.float64,0x3e5f976c7cbf4,0x2a992e6ff991a122,2 +np.float64,0xffe6eee761adddce,0xd5420a393c67182f,2 +np.float64,0xbfe8ec9a31f1d934,0xbfed714426f58147,2 +np.float64,0x7fefffffffffffff,0x554428a2f98d728b,2 +np.float64,0x3fb3ae8b2c275d16,0x3fdb36b81b18a546,2 +np.float64,0x800f73df4dfee7bf,0xaaa3ed1a3e2cf49c,2 +np.float64,0xffd0c8873b21910e,0xd539ce6a3eab5dfd,2 +np.float64,0x3facd6c49439ad80,0x3fd8886f46335df1,2 +np.float64,0x3935859c726b2,0x2a98775f6438dbb1,2 +np.float64,0x7feed879fbfdb0f3,0x5543e9d1ac239469,2 +np.float64,0xbfe84dd990f09bb3,0xbfed323af09543b1,2 +np.float64,0xbfe767cc5a6ecf98,0xbfecd4f39aedbacb,2 +np.float64,0xffd8bd91d5b17b24,0xd53d5eb3734a2609,2 +np.float64,0xbfe13edeb2a27dbe,0xbfea0a856f0b9656,2 +np.float64,0xd933dd53b267c,0x2aa3158784e428c9,2 +np.float64,0xbfef6fef987edfdf,0xbfefcfb1c160462b,2 +np.float64,0x8009eeda4893ddb5,0xaaa13268a41045b1,2 +np.float64,0xab48c7a156919,0x2aa1a1a9c124c87d,2 +np.float64,0xa997931d532f3,0x2aa192bfe5b7bbb4,2 +np.float64,0xffe39ce8b1e739d1,0xd5411fa1c5c2cbd8,2 +np.float64,0x7e7ac2f6fcf59,0x2a9fdf6f263a9e9f,2 +np.float64,0xbfee1e35a6fc3c6b,0xbfef5c25d32b4047,2 +np.float64,0xffe5589c626ab138,0xd5419d220cc9a6da,2 +np.float64,0x7fe12509bf224a12,0x55405f7036dc5932,2 +np.float64,0xa6f15ba94de2c,0x2aa17b3367b1fc1b,2 +np.float64,0x3fca8adbfa3515b8,0x3fe2f0ca775749e5,2 +np.float64,0xbfcb03aa21360754,0xbfe30d5b90ca41f7,2 +np.float64,0x3fefafb2da7f5f66,0x3fefe5251aead4e7,2 +np.float64,0xffd90a59d23214b4,0xd53d7cf63a644f0e,2 +np.float64,0x3fba499988349333,0x3fddf84154fab7e5,2 +np.float64,0x800a76a0bc54ed42,0xaaa17f68cf67f2fa,2 +np.float64,0x3fea33d15bb467a3,0x3fedeff7f445b2ff,2 +np.float64,0x8005d9b0726bb362,0xaa9cd48624afeca9,2 +np.float64,0x7febf42e9a77e85c,0x55434541d8073376,2 +np.float64,0xbfedfc4469bbf889,0xbfef505989f7ee7d,2 +np.float64,0x8001211f1422423f,0xaa90a9889d865349,2 +np.float64,0x800e852f7fdd0a5f,0xaaa3845f11917f8e,2 +np.float64,0xffefd613c87fac27,0xd5441fd17ec669b4,2 +np.float64,0x7fed2a74543a54e8,0x55438b8c637da8b8,2 +np.float64,0xb83d50ff707aa,0x2aa210b4fc11e4b2,2 +np.float64,0x10000000000000,0x2aa428a2f98d728b,2 +np.float64,0x474ad9208e97,0x2a84e5a31530368a,2 +np.float64,0xffd0c5498ea18a94,0xd539ccc0e5cb425e,2 +np.float64,0x8001a8e9c82351d4,0xaa92f1aee6ca5b7c,2 +np.float64,0xd28db1e5a51b6,0x2aa2e328c0788f4a,2 +np.float64,0x3bf734ac77ee7,0x2a98da65c014b761,2 +np.float64,0x3fe56e17c96adc30,0x3febff2b6b829b7a,2 +np.float64,0x7783113eef063,0x2a9f46c3f09eb42c,2 +np.float64,0x3fd69d4e42ad3a9d,0x3fe69f83a21679f4,2 +np.float64,0x3fd34f4841a69e90,0x3fe5766b3c771616,2 +np.float64,0x3febb49895b76931,0x3fee7fcb603416c9,2 +np.float64,0x7fe8d6cb55f1ad96,0x554286c3b3bf4313,2 +np.float64,0xbfe67c6ba36cf8d8,0xbfec730218f2e284,2 +np.float64,0xffef9d97723f3b2e,0xd54413e38b6c29be,2 +np.float64,0x12d8cd2a25b1b,0x2a90e5ccd37b8563,2 +np.float64,0x81fe019103fc0,0x2aa01524155e73c5,2 +np.float64,0x7fe95d546f72baa8,0x5542a7fabfd425ff,2 +np.float64,0x800e742f1f9ce85e,0xaaa37cbe09e1f874,2 +np.float64,0xffd96bd3a732d7a8,0xd53da3086071264a,2 +np.float64,0x4ef2691e9de4e,0x2a9b3d316047fd6d,2 +np.float64,0x1a91684c3522e,0x2a92f25913c213de,2 +np.float64,0x3d5151b87aa2b,0x2a9909dbd9a44a84,2 +np.float64,0x800d9049435b2093,0xaaa31424e32d94a2,2 +np.float64,0xffe5b25fcc2b64bf,0xd541b5b0416b40b5,2 +np.float64,0xffe0eb784c21d6f0,0xd5404d083c3d6bc6,2 +np.float64,0x8007ceefbf0f9de0,0xaa9fbe0d739368b4,2 +np.float64,0xb78529416f0b,0x2a8ca3b29b5b3f18,2 +np.float64,0x7fba61130034c225,0x5532e6d4ca0f2918,2 +np.float64,0x3fba8d67ae351acf,0x3fde11efd6239b09,2 +np.float64,0x3fe7f24c576fe498,0x3fed0d63947a854d,2 +np.float64,0x2bb58dec576b3,0x2a965de7fca12aff,2 +np.float64,0xbfe86ceec4f0d9de,0xbfed3ea7f1d084e2,2 +np.float64,0x7fd1a7f7bca34fee,0x553a3f01b67fad2a,2 +np.float64,0x3fd9a43acfb34874,0x3fe7972dc5d8dfd6,2 +np.float64,0x7fd9861acdb30c35,0x553dad3b1bbb3b4d,2 +np.float64,0xffecc0c388398186,0xd54373d3b903deec,2 +np.float64,0x3fa6f86e9c2df0e0,0x3fd6bdbe40fcf710,2 +np.float64,0x800ddd99815bbb33,0xaaa33820d2f889bb,2 +np.float64,0x7fe087089b610e10,0x55402c868348a6d3,2 +np.float64,0x3fdf43d249be87a5,0x3fe933d29fbf7c23,2 +np.float64,0x7fe4f734c7a9ee69,0x5541822e56c40725,2 +np.float64,0x3feb39a9d3b67354,0x3fee526bf1f69f0e,2 +np.float64,0x3fe61454a0ec28a9,0x3fec46d7c36f7566,2 +np.float64,0xbfeafaa0a375f541,0xbfee3af2e49d457a,2 +np.float64,0x3fda7378e1b4e6f0,0x3fe7d613a3f92c40,2 +np.float64,0xe3e31c5fc7c64,0x2aa3645c12e26171,2 +np.float64,0xbfe97a556df2f4ab,0xbfeda8aa84cf3544,2 +np.float64,0xff612f9c80225f00,0xd514a51e5a2a8a97,2 +np.float64,0x800c51c8a0f8a391,0xaaa279fe7d40b50b,2 +np.float64,0xffd6f9d2312df3a4,0xd53ca783a5f8d110,2 +np.float64,0xbfead48bd7f5a918,0xbfee2cb2f89c5e57,2 +np.float64,0x800f5949e89eb294,0xaaa3e1a67a10cfef,2 +np.float64,0x800faf292b7f5e52,0xaaa40675e0c96cfd,2 +np.float64,0xbfedc238453b8470,0xbfef3c179d2d0209,2 +np.float64,0x3feb0443c5760888,0x3fee3e8bf29089c2,2 +np.float64,0xb26f69e164ded,0x2aa1df9f3dd7d765,2 +np.float64,0x3fcacdc053359b80,0x3fe300a67765b667,2 +np.float64,0x3fe8b274647164e8,0x3fed5a4cd4da8155,2 +np.float64,0x291e6782523ce,0x2a95ea7ac1b13a68,2 +np.float64,0xbfc4fc094e29f814,0xbfe1838671fc8513,2 +np.float64,0x3fbf1301f23e2600,0x3fdfb03a6f13e597,2 +np.float64,0xffeb36554ab66caa,0xd543193d8181e4f9,2 +np.float64,0xbfd969a52db2d34a,0xbfe78528ae61f16d,2 +np.float64,0x800cccd04d3999a1,0xaaa2b6b7a2d2d2d6,2 +np.float64,0x808eb4cb011d7,0x2aa005effecb2b4a,2 +np.float64,0x7fe839b3f9b07367,0x55425f61e344cd6d,2 +np.float64,0xbfeb25b6ed764b6e,0xbfee4b0234fee365,2 +np.float64,0xffefffffffffffff,0xd54428a2f98d728b,2 +np.float64,0xbfe01305da60260c,0xbfe9700b784af7e9,2 +np.float64,0xffcbf36b0a37e6d8,0xd538474b1d74ffe1,2 +np.float64,0xffaeebe3e83dd7c0,0xd52fa2e8dabf7209,2 +np.float64,0xbfd9913bf0b32278,0xbfe7915907aab13c,2 +np.float64,0xbfe7d125d9efa24c,0xbfecfff563177706,2 +np.float64,0xbfee98d23cbd31a4,0xbfef867ae393e446,2 +np.float64,0x3fe30efb67e61df6,0x3feaec6344633d11,2 +np.float64,0x1,0x2990000000000000,2 +np.float64,0x7fd5524fd3aaa49f,0x553bf30d18ab877e,2 +np.float64,0xc98b403f93168,0x2aa29d2fadb13c07,2 +np.float64,0xffe57080046ae100,0xd541a3b1b687360e,2 +np.float64,0x7fe20bade5e4175b,0x5540a79b94294f40,2 +np.float64,0x3fe155400a22aa80,0x3fea15c45f5b5837,2 +np.float64,0x7fe428dc8f6851b8,0x554147fd2ce93cc1,2 +np.float64,0xffefb77eb67f6efc,0xd544195dcaff4980,2 +np.float64,0x3fe49e733b293ce6,0x3feba394b833452a,2 +np.float64,0x38e01e3e71c05,0x2a986b2c955bad21,2 +np.float64,0x7fe735eb376e6bd5,0x55421cc51290d92d,2 +np.float64,0xbfd81d8644b03b0c,0xbfe71ce6d6fbd51a,2 +np.float64,0x8009a32325134647,0xaaa10645d0e6b0d7,2 +np.float64,0x56031ab8ac064,0x2a9c074be40b1f80,2 +np.float64,0xff8989aa30331340,0xd522b2d319a0ac6e,2 +np.float64,0xbfd6c183082d8306,0xbfe6ab8ffb3a8293,2 +np.float64,0x7ff8000000000000,0x7ff8000000000000,2 +np.float64,0xbfe17b68b1e2f6d2,0xbfea28dac8e0c457,2 +np.float64,0x3fbb50e42236a1c8,0x3fde5b090d51e3bd,2 +np.float64,0xffc2bb7cbf2576f8,0xd5353f1b3571c17f,2 +np.float64,0xbfe7576bca6eaed8,0xbfecce388241f47c,2 +np.float64,0x3fe7b52b04ef6a56,0x3fecf495bef99e7e,2 +np.float64,0xffe5511af82aa236,0xd5419b11524e8350,2 +np.float64,0xbfe66d5edf2cdabe,0xbfec6ca7d7b5be8c,2 +np.float64,0xc84a0ba790942,0x2aa29346f16a2cb4,2 +np.float64,0x6db5e7a0db6be,0x2a9e659c0e8244a0,2 +np.float64,0x7fef8f7b647f1ef6,0x554410e67af75d27,2 +np.float64,0xbfe2b4ada7e5695c,0xbfeac1997ec5a064,2 +np.float64,0xbfe99372e03326e6,0xbfedb2662b287543,2 +np.float64,0x3fa45d352428ba6a,0x3fd5d8a895423abb,2 +np.float64,0x3fa029695c2052d3,0x3fd439f858998886,2 +np.float64,0xffe0a9bd3261537a,0xd54037d0cd8bfcda,2 +np.float64,0xbfef83e09a7f07c1,0xbfefd66a4070ce73,2 +np.float64,0x7fee3dcc31fc7b97,0x5543c8503869407e,2 +np.float64,0xffbd16f1603a2de0,0xd533872fa5be978b,2 +np.float64,0xbfe8173141b02e62,0xbfed1c478614c6f4,2 +np.float64,0xbfef57aa277eaf54,0xbfefc77fdab27771,2 +np.float64,0x7fe883a02f31073f,0x554271ff0e3208da,2 +np.float64,0xe3adb63bc75b7,0x2aa362d833d0e41c,2 +np.float64,0x8001c430bac38862,0xaa93575026d26510,2 +np.float64,0x12fb347225f67,0x2a90f00eb9edb3fe,2 +np.float64,0x3fe53f83cbaa7f08,0x3febead40de452c2,2 +np.float64,0xbfe7f67227efece4,0xbfed0f10e32ad220,2 +np.float64,0xb8c5b45d718b7,0x2aa2152912cda86d,2 +np.float64,0x3fd23bb734a4776e,0x3fe50e5d3008c095,2 +np.float64,0x8001fd558ee3faac,0xaa941faa1f7ed450,2 +np.float64,0xffe6bbeda9ed77db,0xd541fcd185a63afa,2 +np.float64,0x4361d79086c3c,0x2a99d692237c30b7,2 +np.float64,0xbfd012f004a025e0,0xbfe43093e290fd0d,2 +np.float64,0xffe1d8850423b10a,0xd54097cf79d8d01e,2 +np.float64,0x3fccf4df7939e9bf,0x3fe37f8cf8be6436,2 +np.float64,0x8000546bc6c0a8d8,0xaa861bb3588556f2,2 +np.float64,0xbfecb4d6ba7969ae,0xbfeedcb6239135fe,2 +np.float64,0xbfaeb425cc3d6850,0xbfd90cfc103bb896,2 +np.float64,0x800ec037ec7d8070,0xaaa39eae8bde9774,2 +np.float64,0xbfeeaf863dfd5f0c,0xbfef8e4514772a8a,2 +np.float64,0xffec67c6c4b8cf8d,0xd5435fad89f900cf,2 +np.float64,0x3fda4498da348932,0x3fe7c7f6b3f84048,2 +np.float64,0xbfd05fd3dea0bfa8,0xbfe4509265a9b65f,2 +np.float64,0x3fe42cc713a8598e,0x3feb706ba9cd533c,2 +np.float64,0xec22d4d7d845b,0x2aa39f8cccb9711c,2 +np.float64,0x7fda30606c3460c0,0x553deea865065196,2 +np.float64,0xbfd58cba8bab1976,0xbfe64327ce32d611,2 +np.float64,0xadd521c75baa4,0x2aa1b7efce201a98,2 +np.float64,0x7fed43c1027a8781,0x55439131832b6429,2 +np.float64,0x800bee278fb7dc4f,0xaaa247a71e776db4,2 +np.float64,0xbfe9be5dd2737cbc,0xbfedc2f9501755b0,2 +np.float64,0x8003f4854447e90b,0xaa994d9b5372b13b,2 +np.float64,0xbfe5d0f867eba1f1,0xbfec29f8dd8b33a4,2 +np.float64,0x3fd79102d5af2206,0x3fe6efaa7a1efddb,2 +np.float64,0xbfeae783c835cf08,0xbfee33cdb4a44e81,2 +np.float64,0x3fcf1713e83e2e28,0x3fe3f7414753ddfb,2 +np.float64,0xffe5ab3cff2b567a,0xd541b3bf0213274a,2 +np.float64,0x7fe0fc65d8a1f8cb,0x554052761ac96386,2 +np.float64,0x7e81292efd026,0x2a9fdff8c01ae86f,2 +np.float64,0x80091176039222ec,0xaaa0aebf0565dfa6,2 +np.float64,0x800d2bf5ab5a57ec,0xaaa2e4a4c31e7e29,2 +np.float64,0xffd1912ea923225e,0xd53a33b2856726ab,2 +np.float64,0x800869918ed0d323,0xaaa0453408e1295d,2 +np.float64,0xffba0898fa341130,0xd532d19b202a9646,2 +np.float64,0xbfe09fac29613f58,0xbfe9b9687b5811a1,2 +np.float64,0xbfbd4ae82e3a95d0,0xbfdf1220f6f0fdfa,2 +np.float64,0xffea11d27bb423a4,0xd542d3d3e1522474,2 +np.float64,0xbfe6b05705ad60ae,0xbfec88d6bcab2683,2 +np.float64,0x3fe624a3f2ec4948,0x3fec4dcc78ddf871,2 +np.float64,0x53483018a6907,0x2a9bba8f92006b69,2 +np.float64,0xbfec0a6eeb7814de,0xbfee9f2a741248d7,2 +np.float64,0x3fe8c8ce6371919d,0x3fed63250c643482,2 +np.float64,0xbfe26b0ef964d61e,0xbfea9e511db83437,2 +np.float64,0xffa0408784208110,0xd52987f62c369ae9,2 +np.float64,0xffc153abc322a758,0xd534b384b5c5fe63,2 +np.float64,0xbfbdce88a63b9d10,0xbfdf4065ef0b01d4,2 +np.float64,0xffed4a4136fa9482,0xd54392a450f8b0af,2 +np.float64,0x8007aa18748f5432,0xaa9f8bd2226d4299,2 +np.float64,0xbfdab4d3e8b569a8,0xbfe7e9a5402540e5,2 +np.float64,0x7fe68914f92d1229,0x5541ef5e78fa35de,2 +np.float64,0x800a538bb1b4a718,0xaaa16bc487711295,2 +np.float64,0xffe02edbc8605db7,0xd5400f8f713df890,2 +np.float64,0xffe8968053712d00,0xd54276b9cc7f460a,2 +np.float64,0x800a4ce211d499c5,0xaaa1680491deb40c,2 +np.float64,0x3f988080f8310102,0x3fd2713691e99329,2 +np.float64,0xf64e42a7ec9c9,0x2aa3e6a7af780878,2 +np.float64,0xff73cc7100279900,0xd51b4478c3409618,2 +np.float64,0x71e6722ce3ccf,0x2a9ec76ddf296ce0,2 +np.float64,0x8006ca16ab0d942e,0xaa9e4bfd862af570,2 +np.float64,0x8000000000000000,0x8000000000000000,2 +np.float64,0xbfed373e02ba6e7c,0xbfef0b2b7bb767b3,2 +np.float64,0xa6cb0f694d962,0x2aa179dd16b0242b,2 +np.float64,0x7fec14626cf828c4,0x55434ca55b7c85d5,2 +np.float64,0x3fcda404513b4808,0x3fe3a68e8d977752,2 +np.float64,0xbfeb94995f772933,0xbfee74091d288b81,2 +np.float64,0x3fce2299a13c4530,0x3fe3c2603f28d23b,2 +np.float64,0xffd07f4534a0fe8a,0xd539a8a6ebc5a603,2 +np.float64,0x7fdb1c651e3638c9,0x553e478a6385c86b,2 +np.float64,0x3fec758336f8eb06,0x3feec5f3b92c8b28,2 +np.float64,0x796fc87cf2dfa,0x2a9f7184a4ad8c49,2 +np.float64,0x3fef9ba866ff3750,0x3fefde6a446fc2cd,2 +np.float64,0x964d26c72c9a5,0x2aa0e143f1820179,2 +np.float64,0xbfef6af750bed5ef,0xbfefce04870a97bd,2 +np.float64,0x3fe2f3961aa5e72c,0x3feadf769321a3ff,2 +np.float64,0xbfd6b706e9ad6e0e,0xbfe6a8141c5c3b5d,2 +np.float64,0x7fe0ecc40a21d987,0x55404d72c2b46a82,2 +np.float64,0xbfe560d19deac1a3,0xbfebf962681a42a4,2 +np.float64,0xbfea37170ab46e2e,0xbfedf136ee9df02b,2 +np.float64,0xbfebf78947b7ef12,0xbfee9847ef160257,2 +np.float64,0x800551f8312aa3f1,0xaa9bee7d3aa5491b,2 +np.float64,0xffed2513897a4a26,0xd5438a58c4ae28ec,2 +np.float64,0x7fd962d75cb2c5ae,0x553d9f8a0c2016f3,2 +np.float64,0x3fefdd8512bfbb0a,0x3feff47d8da7424d,2 +np.float64,0xbfefa5b43bff4b68,0xbfefe1ca42867af0,2 +np.float64,0xbfc8a2853531450c,0xbfe279bb7b965729,2 +np.float64,0x800c8843bc391088,0xaaa2951344e7b29b,2 +np.float64,0x7fe22587bae44b0e,0x5540af8bb58cfe86,2 +np.float64,0xbfe159fae822b3f6,0xbfea182394eafd8d,2 +np.float64,0xbfe6fdfd50edfbfa,0xbfeca93f2a3597d0,2 +np.float64,0xbfe5cd5afaeb9ab6,0xbfec286a8ce0470f,2 +np.float64,0xbfc84bb97f309774,0xbfe263ef0f8f1f6e,2 +np.float64,0x7fd9c1e548b383ca,0x553dc4556874ecb9,2 +np.float64,0x7fda43d33bb487a5,0x553df60f61532fc0,2 +np.float64,0xbfe774bd25eee97a,0xbfecda42e8578c1f,2 +np.float64,0x800df1f5ab9be3ec,0xaaa34184712e69db,2 +np.float64,0xbff0000000000000,0xbff0000000000000,2 +np.float64,0x3fe14ec21b629d84,0x3fea128244215713,2 +np.float64,0x7fc1ce7843239cf0,0x5534e3fa8285b7b8,2 +np.float64,0xbfe922b204724564,0xbfed86818687d649,2 +np.float64,0x3fc58924fb2b1248,0x3fe1aa715ff6ebbf,2 +np.float64,0x8008b637e4d16c70,0xaaa0760b53abcf46,2 +np.float64,0xffbf55bd4c3eab78,0xd53404a23091a842,2 +np.float64,0x9f6b4a753ed6a,0x2aa136ef9fef9596,2 +np.float64,0xbfd11da7f8a23b50,0xbfe49deb493710d8,2 +np.float64,0x800a2f07fcd45e10,0xaaa157237c98b4f6,2 +np.float64,0x3fdd4defa4ba9bdf,0x3fe8aa0bcf895f4f,2 +np.float64,0x7fe9b0ab05f36155,0x5542bc5335414473,2 +np.float64,0x3fe89c97de313930,0x3fed51a1189b8982,2 +np.float64,0x3fdd45c8773a8b91,0x3fe8a7c2096fbf5a,2 +np.float64,0xbfeb6f64daf6deca,0xbfee665167ef43ad,2 +np.float64,0xffdf9da1c4bf3b44,0xd53fdf141944a983,2 +np.float64,0x3fde092ed0bc125c,0x3fe8de25bfbfc2db,2 +np.float64,0xbfcb21f96b3643f4,0xbfe3147904c258cf,2 +np.float64,0x800c9c934f993927,0xaaa29f17c43f021b,2 +np.float64,0x9b91814d37230,0x2aa11329e59bf6b0,2 +np.float64,0x3fe28a7e0b6514fc,0x3feaad6d23e2eadd,2 +np.float64,0xffecf38395f9e706,0xd5437f3ee1cd61e4,2 +np.float64,0x3fcade92a935bd25,0x3fe3049f4c1da1d0,2 +np.float64,0x800ab25d95d564bc,0xaaa1a076d7c66e04,2 +np.float64,0xffc0989e1e21313c,0xd53467f3b8158298,2 +np.float64,0x3fd81523eeb02a48,0x3fe71a38d2da8a82,2 +np.float64,0x7fe5b9dd402b73ba,0x5541b7b9b8631010,2 +np.float64,0x2c160d94582c3,0x2a966e51b503a3d1,2 +np.float64,0x2c416ffa5882f,0x2a9675aaef8b29c4,2 +np.float64,0x7fefe2ff01bfc5fd,0x55442289faf22b86,2 +np.float64,0xbfd469bf5d28d37e,0xbfe5dd239ffdc7eb,2 +np.float64,0xbfdd56f3eabaade8,0xbfe8ac93244ca17b,2 +np.float64,0xbfe057b89160af71,0xbfe9941557340bb3,2 +np.float64,0x800c50e140b8a1c3,0xaaa2798ace9097ee,2 +np.float64,0xbfda5a8984b4b514,0xbfe7ce93d65a56b0,2 +np.float64,0xbfcd6458323ac8b0,0xbfe39872514127bf,2 +np.float64,0x3fefb1f5ebff63ec,0x3fefe5e761b49b89,2 +np.float64,0x3fea3abc1df47578,0x3fedf29a1c997863,2 +np.float64,0x7fcb4a528e3694a4,0x553815f169667213,2 +np.float64,0x8c77da7b18efc,0x2aa080e52bdedb54,2 +np.float64,0x800e5dde4c5cbbbd,0xaaa372b16fd8b1ad,2 +np.float64,0x3fd2976038a52ec0,0x3fe5316b4f79fdbc,2 +np.float64,0x69413a0ed2828,0x2a9dfacd9cb44286,2 +np.float64,0xbfebbac0bdb77582,0xbfee820d9288b631,2 +np.float64,0x1a12aa7c34256,0x2a92d407e073bbfe,2 +np.float64,0xbfc41a27c3283450,0xbfe143c8665b0d3c,2 +np.float64,0xffe4faa41369f548,0xd54183230e0ce613,2 +np.float64,0xbfdeae81f23d5d04,0xbfe90b734bf35b68,2 +np.float64,0x3fc984ba58330975,0x3fe2b19e9052008e,2 +np.float64,0x7fe6e51b8d2dca36,0x554207a74ae2bb39,2 +np.float64,0x80081a58a81034b2,0xaaa0117d4aff11c8,2 +np.float64,0x7fde3fddfe3c7fbb,0x553f67d0082acc67,2 +np.float64,0x3fac7c999038f933,0x3fd86ec2f5dc3aa4,2 +np.float64,0x7fa26b4c4c24d698,0x552a9e6ea8545c18,2 +np.float64,0x3fdacd06e6b59a0e,0x3fe7f0dc0e8f9c6d,2 +np.float64,0x80064b62cbec96c6,0xaa9d8ac0506fdd05,2 +np.float64,0xb858116170b1,0x2a8caea703d9ccc8,2 +np.float64,0xbfe8d94ccef1b29a,0xbfed69a8782cbf3d,2 +np.float64,0x8005607d6a6ac0fc,0xaa9c07cf8620b037,2 +np.float64,0xbfe66a52daacd4a6,0xbfec6b5e403e6864,2 +np.float64,0x7fc398c2e0273185,0x5535918245894606,2 +np.float64,0x74b2d7dce965c,0x2a9f077020defdbc,2 +np.float64,0x7fe8f7a4d9b1ef49,0x55428eeae210e8eb,2 +np.float64,0x80027deddc84fbdc,0xaa95b11ff9089745,2 +np.float64,0xffeba2a94e774552,0xd5433273f6568902,2 +np.float64,0x80002f8259405f05,0xaa8240b68d7b9dc4,2 +np.float64,0xbfdf0d84883e1b0a,0xbfe92532c69c5802,2 +np.float64,0xbfcdfa7b6b3bf4f8,0xbfe3b997a84d0914,2 +np.float64,0x800c18b04e183161,0xaaa25d46d60b15c6,2 +np.float64,0xffeaf1e37c35e3c6,0xd543092cd929ac19,2 +np.float64,0xbfc5aa07752b5410,0xbfe1b36ab5ec741f,2 +np.float64,0x3fe5c491d1eb8924,0x3fec24a1c3f6a178,2 +np.float64,0xbfeb736937f6e6d2,0xbfee67cd296e6fa9,2 +np.float64,0xffec3d5718787aad,0xd5435602e1a2cc43,2 +np.float64,0x7fe71e1da86e3c3a,0x55421691ead882cb,2 +np.float64,0x3fdd6ed0c93adda2,0x3fe8b341d066c43c,2 +np.float64,0x7fbe3d7a203c7af3,0x5533c83e53283430,2 +np.float64,0x3fdc20cb56384197,0x3fe854676360aba9,2 +np.float64,0xb7a1ac636f436,0x2aa20b9d40d66e78,2 +np.float64,0x3fb1491bb8229237,0x3fda0fabad1738ee,2 +np.float64,0xbfdf9c0ce73f381a,0xbfe94b716dbe35ee,2 +np.float64,0xbfbd4f0ad23a9e18,0xbfdf1397329a2dce,2 +np.float64,0xbfe4e0caac69c196,0xbfebc119b8a181cd,2 +np.float64,0x5753641aaea6d,0x2a9c2ba3e92b0cd2,2 +np.float64,0x72bb814ae5771,0x2a9eda92fada66de,2 +np.float64,0x57ed8f5aafdb3,0x2a9c3c2e1d42e609,2 +np.float64,0xffec33359c38666a,0xd54353b2acd0daf1,2 +np.float64,0x3fa5fe6e8c2bfce0,0x3fd66a0b3bf2720a,2 +np.float64,0xffe2dc8d7ca5b91a,0xd540e6ebc097d601,2 +np.float64,0x7fd99d260eb33a4b,0x553db626c9c75f78,2 +np.float64,0xbfe2dd73e425bae8,0xbfead4fc4b93a727,2 +np.float64,0xdcd4a583b9a95,0x2aa33094c9a17ad7,2 +np.float64,0x7fb0af6422215ec7,0x553039a606e8e64f,2 +np.float64,0x7fdfab6227bf56c3,0x553fe3b26164aeda,2 +np.float64,0x1e4d265e3c9a6,0x2a93cba8a1a8ae6d,2 +np.float64,0xbfdc7d097238fa12,0xbfe86ee2f24fd473,2 +np.float64,0x7fe5d35d29eba6b9,0x5541bea5878bce2b,2 +np.float64,0xffcb886a903710d4,0xd53828281710aab5,2 +np.float64,0xffe058c7ffe0b190,0xd5401d61e9a7cbcf,2 +np.float64,0x3ff0000000000000,0x3ff0000000000000,2 +np.float64,0xffd5b1c1132b6382,0xd53c1c839c098340,2 +np.float64,0x3fe2e7956725cf2b,0x3fead9c907b9d041,2 +np.float64,0x800a8ee293951dc6,0xaaa18ce3f079f118,2 +np.float64,0x7febcd3085b79a60,0x55433c47e1f822ad,2 +np.float64,0x3feb0e14cd761c2a,0x3fee423542102546,2 +np.float64,0x3fb45e6d0628bcda,0x3fdb86db67d0c992,2 +np.float64,0x7fa836e740306dce,0x552d2907cb8118b2,2 +np.float64,0x3fd15ba25b22b745,0x3fe4b6b018409d78,2 +np.float64,0xbfb59980ce2b3300,0xbfdc1206274cb51d,2 +np.float64,0x3fdef1b87fbde371,0x3fe91dafc62124a1,2 +np.float64,0x7fed37a4337a6f47,0x55438e7e0b50ae37,2 +np.float64,0xffe6c87633ad90ec,0xd542001f216ab448,2 +np.float64,0x8008d2548ab1a4a9,0xaaa087ad272d8e17,2 +np.float64,0xbfd1d6744da3ace8,0xbfe4e71965adda74,2 +np.float64,0xbfb27f751224fee8,0xbfdaa82132775406,2 +np.float64,0x3fe2b336ae65666d,0x3feac0e6b13ec2d2,2 +np.float64,0xffc6bac2262d7584,0xd536a951a2eecb49,2 +np.float64,0x7fdb661321b6cc25,0x553e62dfd7fcd3f3,2 +np.float64,0xffe83567d5706acf,0xd5425e4bb5027568,2 +np.float64,0xbf7f0693e03e0d00,0xbfc9235314d53f82,2 +np.float64,0x3feb32b218766564,0x3fee4fd5847f3722,2 +np.float64,0x3fec25d33df84ba6,0x3feea91fcd4aebab,2 +np.float64,0x7fe17abecb22f57d,0x55407a8ba661207c,2 +np.float64,0xbfe5674b1eeace96,0xbfebfc351708dc70,2 +np.float64,0xbfe51a2d2f6a345a,0xbfebda702c9d302a,2 +np.float64,0x3fec05584af80ab0,0x3fee9d502a7bf54d,2 +np.float64,0xffda8871dcb510e4,0xd53e10105f0365b5,2 +np.float64,0xbfc279c31824f388,0xbfe0c9354d871484,2 +np.float64,0x1cbed61e397dc,0x2a937364712cd518,2 +np.float64,0x800787d198af0fa4,0xaa9f5c847affa1d2,2 +np.float64,0x80079f6d65af3edc,0xaa9f7d2863368bbd,2 +np.float64,0xb942f1e97285e,0x2aa2193e0c513b7f,2 +np.float64,0x7fe9078263320f04,0x554292d85dee2c18,2 +np.float64,0xbfe4de0761a9bc0f,0xbfebbfe04116b829,2 +np.float64,0xbfdbe6f3fc37cde8,0xbfe843aea59a0749,2 +np.float64,0xffcb6c0de136d81c,0xd5381fd9c525b813,2 +np.float64,0x9b6bda9336d7c,0x2aa111c924c35386,2 +np.float64,0x3fe17eece422fdda,0x3fea2a9bacd78607,2 +np.float64,0xd8011c49b0024,0x2aa30c87574fc0c6,2 +np.float64,0xbfc0a08b3f214118,0xbfe034d48f0d8dc0,2 +np.float64,0x3fd60adb1eac15b8,0x3fe66e42e4e7e6b5,2 +np.float64,0x80011d68ea023ad3,0xaa909733befbb962,2 +np.float64,0xffb35ac32426b588,0xd5310c4be1c37270,2 +np.float64,0x3fee8b56c9bd16ae,0x3fef81d8d15f6939,2 +np.float64,0x3fdc10a45e382149,0x3fe84fbe4cf11e68,2 +np.float64,0xbfc85dc45e30bb88,0xbfe2687b5518abde,2 +np.float64,0x3fd53b85212a770a,0x3fe6270d6d920d0f,2 +np.float64,0x800fc158927f82b1,0xaaa40e303239586f,2 +np.float64,0x11af5e98235ed,0x2a908b04a790083f,2 +np.float64,0xbfe2a097afe54130,0xbfeab80269eece99,2 +np.float64,0xbfd74ac588ae958c,0xbfe6d8ca3828d0b8,2 +np.float64,0xffea18ab2ef43156,0xd542d579ab31df1e,2 +np.float64,0xbfecda7058f9b4e1,0xbfeeea29c33b7913,2 +np.float64,0x3fc4ac56ed2958b0,0x3fe16d3e2bd7806d,2 +np.float64,0x3feccc898cb99913,0x3feee531f217dcfa,2 +np.float64,0xffeb3a64c5b674c9,0xd5431a30a41f0905,2 +np.float64,0x3fe5a7ee212b4fdc,0x3fec1844af9076fc,2 +np.float64,0x80080fdb52301fb7,0xaaa00a8b4274db67,2 +np.float64,0x800b3e7e47d67cfd,0xaaa1ec2876959852,2 +np.float64,0x80063fb8ee2c7f73,0xaa9d7875c9f20d6f,2 +np.float64,0x7fdacf80d0b59f01,0x553e2acede4c62a8,2 +np.float64,0x401e9b24803d4,0x2a996a0a75d0e093,2 +np.float64,0x3fe6c29505ed852a,0x3fec907a6d8c10af,2 +np.float64,0x8005c04ee2cb809f,0xaa9caa9813faef46,2 +np.float64,0xbfe1360f21e26c1e,0xbfea06155d6985b6,2 +np.float64,0xffc70606682e0c0c,0xd536c239b9d4be0a,2 +np.float64,0x800e639afefcc736,0xaaa37547d0229a26,2 +np.float64,0x3fe5589290aab125,0x3febf5c925c4e6db,2 +np.float64,0x8003b59330276b27,0xaa98c47e44524335,2 +np.float64,0x800d67ec22dacfd8,0xaaa301251b6a730a,2 +np.float64,0x7fdaeb5025b5d69f,0x553e35397dfe87eb,2 +np.float64,0x3fdae32a24b5c654,0x3fe7f771bc108f6c,2 +np.float64,0xffe6c1fc93ad83f8,0xd541fe6a6a716756,2 +np.float64,0xbfd7b9c1d32f7384,0xbfe6fcdae563d638,2 +np.float64,0x800e1bea06fc37d4,0xaaa354c0bf61449c,2 +np.float64,0xbfd78f097aaf1e12,0xbfe6ef068329bdf4,2 +np.float64,0x7fea6a400874d47f,0x5542e905978ad722,2 +np.float64,0x8008b4377cb1686f,0xaaa074c87eee29f9,2 +np.float64,0x8002f3fb8d45e7f8,0xaa96f47ac539b614,2 +np.float64,0xbfcf2b3fd13e5680,0xbfe3fb91c0cc66ad,2 +np.float64,0xffecca2f5279945e,0xd54375f361075927,2 +np.float64,0x7ff0000000000000,0x7ff0000000000000,2 +np.float64,0x7f84d5a5a029ab4a,0x552178d1d4e8640e,2 +np.float64,0x3fea8a4b64351497,0x3fee10c332440eb2,2 +np.float64,0x800fe01ac1dfc036,0xaaa41b34d91a4bee,2 +np.float64,0x3fc0b3d8872167b1,0x3fe03b178d354f8d,2 +np.float64,0x5ee8b0acbdd17,0x2a9cf69f2e317729,2 +np.float64,0x8006ef0407adde09,0xaa9e82888f3dd83e,2 +np.float64,0x7fdbb08a07b76113,0x553e7e4e35b938b9,2 +np.float64,0x49663f9c92cc9,0x2a9a95e0affe5108,2 +np.float64,0x7fd9b87e79b370fc,0x553dc0b5cff3dc7d,2 +np.float64,0xbfd86ae657b0d5cc,0xbfe73584d02bdd2b,2 +np.float64,0x3fd4d4a13729a942,0x3fe6030a962aaaf8,2 +np.float64,0x7fcc246bcb3848d7,0x5538557309449bba,2 +np.float64,0xbfdc86a7d5b90d50,0xbfe871a2983c2a29,2 +np.float64,0xd2a6e995a54dd,0x2aa2e3e9c0fdd6c0,2 +np.float64,0x3f92eb447825d680,0x3fd0eb4fd2ba16d2,2 +np.float64,0x800d4001697a8003,0xaaa2ee358661b75c,2 +np.float64,0x3fd3705fd1a6e0c0,0x3fe582a6f321d7d6,2 +np.float64,0xbfcfdf51533fbea4,0xbfe421c3bdd9f2a3,2 +np.float64,0x3fe268e87964d1d1,0x3fea9d47e08aad8a,2 +np.float64,0x24b8901e49713,0x2a951adeefe7b31b,2 +np.float64,0x3fedb35d687b66bb,0x3fef36e440850bf8,2 +np.float64,0x3fb7ab5cbe2f56c0,0x3fdcf097380721c6,2 +np.float64,0x3f8c4eaa10389d54,0x3fceb7ecb605b73b,2 +np.float64,0xbfed831ed6fb063e,0xbfef25f462a336f1,2 +np.float64,0x7fd8c52112318a41,0x553d61b0ee609f58,2 +np.float64,0xbfe71c4ff76e38a0,0xbfecb5d32e789771,2 +np.float64,0xbfe35fb7b166bf70,0xbfeb12328e75ee6b,2 +np.float64,0x458e1a3a8b1c4,0x2a9a1cebadc81342,2 +np.float64,0x8003c1b3ad478368,0xaa98df5ed060b28c,2 +np.float64,0x7ff4000000000000,0x7ffc000000000000,2 +np.float64,0x7fe17098c162e131,0x5540775a9a3a104f,2 +np.float64,0xbfd95cb71732b96e,0xbfe7812acf7ea511,2 +np.float64,0x8000000000000001,0xa990000000000000,2 +np.float64,0xbfde0e7d9ebc1cfc,0xbfe8df9ca9e49a5b,2 +np.float64,0xffef4f67143e9ecd,0xd5440348a6a2f231,2 +np.float64,0x7fe37d23c826fa47,0x5541165de17caa03,2 +np.float64,0xbfcc0e5f85381cc0,0xbfe34b44b0deefe9,2 +np.float64,0x3fe858f1c470b1e4,0x3fed36ab90557d89,2 +np.float64,0x800e857278fd0ae5,0xaaa3847d13220545,2 +np.float64,0x3febd31a66f7a635,0x3fee8af90e66b043,2 +np.float64,0x7fd3fde1b127fbc2,0x553b5b186a49b968,2 +np.float64,0x3fd3dabb8b27b577,0x3fe5a99b446bed26,2 +np.float64,0xffeb4500f1768a01,0xd5431cab828e254a,2 +np.float64,0xffccca8fc6399520,0xd53884f8b505e79e,2 +np.float64,0xffeee9406b7dd280,0xd543ed6d27a1a899,2 +np.float64,0xffecdde0f0f9bbc1,0xd5437a6258b14092,2 +np.float64,0xe6b54005cd6a8,0x2aa378c25938dfda,2 +np.float64,0x7fe610f1022c21e1,0x5541cf460b972925,2 +np.float64,0xbfe5a170ec6b42e2,0xbfec1576081e3232,2 diff --git a/local_packages/numpy/_core/tests/data/umath-validation-set-cos.csv b/local_packages/numpy/_core/tests/data/umath-validation-set-cos.csv new file mode 100644 index 0000000..258ae48 --- /dev/null +++ b/local_packages/numpy/_core/tests/data/umath-validation-set-cos.csv @@ -0,0 +1,1375 @@ +dtype,input,output,ulperrortol +## +ve denormals ## +np.float32,0x004b4716,0x3f800000,2 +np.float32,0x007b2490,0x3f800000,2 +np.float32,0x007c99fa,0x3f800000,2 +np.float32,0x00734a0c,0x3f800000,2 +np.float32,0x0070de24,0x3f800000,2 +np.float32,0x007fffff,0x3f800000,2 +np.float32,0x00000001,0x3f800000,2 +## -ve denormals ## +np.float32,0x80495d65,0x3f800000,2 +np.float32,0x806894f6,0x3f800000,2 +np.float32,0x80555a76,0x3f800000,2 +np.float32,0x804e1fb8,0x3f800000,2 +np.float32,0x80687de9,0x3f800000,2 +np.float32,0x807fffff,0x3f800000,2 +np.float32,0x80000001,0x3f800000,2 +## +/-0.0f, +/-FLT_MIN +/-FLT_MAX ## +np.float32,0x00000000,0x3f800000,2 +np.float32,0x80000000,0x3f800000,2 +np.float32,0x00800000,0x3f800000,2 +np.float32,0x80800000,0x3f800000,2 +## 1.00f + 0x00000001 ## +np.float32,0x3f800000,0x3f0a5140,2 +np.float32,0x3f800001,0x3f0a513f,2 +np.float32,0x3f800002,0x3f0a513d,2 +np.float32,0xc090a8b0,0xbe4332ce,2 +np.float32,0x41ce3184,0x3f4d1de1,2 +np.float32,0xc1d85848,0xbeaa8980,2 +np.float32,0x402b8820,0xbf653aa3,2 +np.float32,0x42b4e454,0xbf4a338b,2 +np.float32,0x42a67a60,0x3c58202e,2 +np.float32,0x41d92388,0xbed987c7,2 +np.float32,0x422dd66c,0x3f5dcab3,2 +np.float32,0xc28f5be6,0xbf5688d8,2 +np.float32,0x41ab2674,0xbf53aa3b,2 +np.float32,0x3f490fdb,0x3f3504f3,2 +np.float32,0xbf490fdb,0x3f3504f3,2 +np.float32,0x3fc90fdb,0xb33bbd2e,2 +np.float32,0xbfc90fdb,0xb33bbd2e,2 +np.float32,0x40490fdb,0xbf800000,2 +np.float32,0xc0490fdb,0xbf800000,2 +np.float32,0x3fc90fdb,0xb33bbd2e,2 +np.float32,0xbfc90fdb,0xb33bbd2e,2 +np.float32,0x40490fdb,0xbf800000,2 +np.float32,0xc0490fdb,0xbf800000,2 +np.float32,0x40c90fdb,0x3f800000,2 +np.float32,0xc0c90fdb,0x3f800000,2 +np.float32,0x4016cbe4,0xbf3504f3,2 +np.float32,0xc016cbe4,0xbf3504f3,2 +np.float32,0x4096cbe4,0x324cde2e,2 +np.float32,0xc096cbe4,0x324cde2e,2 +np.float32,0x4116cbe4,0xbf800000,2 +np.float32,0xc116cbe4,0xbf800000,2 +np.float32,0x40490fdb,0xbf800000,2 +np.float32,0xc0490fdb,0xbf800000,2 +np.float32,0x40c90fdb,0x3f800000,2 +np.float32,0xc0c90fdb,0x3f800000,2 +np.float32,0x41490fdb,0x3f800000,2 +np.float32,0xc1490fdb,0x3f800000,2 +np.float32,0x407b53d2,0xbf3504f1,2 +np.float32,0xc07b53d2,0xbf3504f1,2 +np.float32,0x40fb53d2,0xb4b5563d,2 +np.float32,0xc0fb53d2,0xb4b5563d,2 +np.float32,0x417b53d2,0xbf800000,2 +np.float32,0xc17b53d2,0xbf800000,2 +np.float32,0x4096cbe4,0x324cde2e,2 +np.float32,0xc096cbe4,0x324cde2e,2 +np.float32,0x4116cbe4,0xbf800000,2 +np.float32,0xc116cbe4,0xbf800000,2 +np.float32,0x4196cbe4,0x3f800000,2 +np.float32,0xc196cbe4,0x3f800000,2 +np.float32,0x40afede0,0x3f3504f7,2 +np.float32,0xc0afede0,0x3f3504f7,2 +np.float32,0x412fede0,0x353222c4,2 +np.float32,0xc12fede0,0x353222c4,2 +np.float32,0x41afede0,0xbf800000,2 +np.float32,0xc1afede0,0xbf800000,2 +np.float32,0x40c90fdb,0x3f800000,2 +np.float32,0xc0c90fdb,0x3f800000,2 +np.float32,0x41490fdb,0x3f800000,2 +np.float32,0xc1490fdb,0x3f800000,2 +np.float32,0x41c90fdb,0x3f800000,2 +np.float32,0xc1c90fdb,0x3f800000,2 +np.float32,0x40e231d6,0x3f3504f3,2 +np.float32,0xc0e231d6,0x3f3504f3,2 +np.float32,0x416231d6,0xb319a6a2,2 +np.float32,0xc16231d6,0xb319a6a2,2 +np.float32,0x41e231d6,0xbf800000,2 +np.float32,0xc1e231d6,0xbf800000,2 +np.float32,0x40fb53d2,0xb4b5563d,2 +np.float32,0xc0fb53d2,0xb4b5563d,2 +np.float32,0x417b53d2,0xbf800000,2 +np.float32,0xc17b53d2,0xbf800000,2 +np.float32,0x41fb53d2,0x3f800000,2 +np.float32,0xc1fb53d2,0x3f800000,2 +np.float32,0x410a3ae7,0xbf3504fb,2 +np.float32,0xc10a3ae7,0xbf3504fb,2 +np.float32,0x418a3ae7,0x35b08908,2 +np.float32,0xc18a3ae7,0x35b08908,2 +np.float32,0x420a3ae7,0xbf800000,2 +np.float32,0xc20a3ae7,0xbf800000,2 +np.float32,0x4116cbe4,0xbf800000,2 +np.float32,0xc116cbe4,0xbf800000,2 +np.float32,0x4196cbe4,0x3f800000,2 +np.float32,0xc196cbe4,0x3f800000,2 +np.float32,0x4216cbe4,0x3f800000,2 +np.float32,0xc216cbe4,0x3f800000,2 +np.float32,0x41235ce2,0xbf3504ef,2 +np.float32,0xc1235ce2,0xbf3504ef,2 +np.float32,0x41a35ce2,0xb53889b6,2 +np.float32,0xc1a35ce2,0xb53889b6,2 +np.float32,0x42235ce2,0xbf800000,2 +np.float32,0xc2235ce2,0xbf800000,2 +np.float32,0x412fede0,0x353222c4,2 +np.float32,0xc12fede0,0x353222c4,2 +np.float32,0x41afede0,0xbf800000,2 +np.float32,0xc1afede0,0xbf800000,2 +np.float32,0x422fede0,0x3f800000,2 +np.float32,0xc22fede0,0x3f800000,2 +np.float32,0x413c7edd,0x3f3504f4,2 +np.float32,0xc13c7edd,0x3f3504f4,2 +np.float32,0x41bc7edd,0x33800add,2 +np.float32,0xc1bc7edd,0x33800add,2 +np.float32,0x423c7edd,0xbf800000,2 +np.float32,0xc23c7edd,0xbf800000,2 +np.float32,0x41490fdb,0x3f800000,2 +np.float32,0xc1490fdb,0x3f800000,2 +np.float32,0x41c90fdb,0x3f800000,2 +np.float32,0xc1c90fdb,0x3f800000,2 +np.float32,0x42490fdb,0x3f800000,2 +np.float32,0xc2490fdb,0x3f800000,2 +np.float32,0x4155a0d9,0x3f3504eb,2 +np.float32,0xc155a0d9,0x3f3504eb,2 +np.float32,0x41d5a0d9,0xb5b3bc81,2 +np.float32,0xc1d5a0d9,0xb5b3bc81,2 +np.float32,0x4255a0d9,0xbf800000,2 +np.float32,0xc255a0d9,0xbf800000,2 +np.float32,0x416231d6,0xb319a6a2,2 +np.float32,0xc16231d6,0xb319a6a2,2 +np.float32,0x41e231d6,0xbf800000,2 +np.float32,0xc1e231d6,0xbf800000,2 +np.float32,0x426231d6,0x3f800000,2 +np.float32,0xc26231d6,0x3f800000,2 +np.float32,0x416ec2d4,0xbf3504f7,2 +np.float32,0xc16ec2d4,0xbf3504f7,2 +np.float32,0x41eec2d4,0x353ef0a7,2 +np.float32,0xc1eec2d4,0x353ef0a7,2 +np.float32,0x426ec2d4,0xbf800000,2 +np.float32,0xc26ec2d4,0xbf800000,2 +np.float32,0x417b53d2,0xbf800000,2 +np.float32,0xc17b53d2,0xbf800000,2 +np.float32,0x41fb53d2,0x3f800000,2 +np.float32,0xc1fb53d2,0x3f800000,2 +np.float32,0x427b53d2,0x3f800000,2 +np.float32,0xc27b53d2,0x3f800000,2 +np.float32,0x4183f268,0xbf3504e7,2 +np.float32,0xc183f268,0xbf3504e7,2 +np.float32,0x4203f268,0xb6059a13,2 +np.float32,0xc203f268,0xb6059a13,2 +np.float32,0x4283f268,0xbf800000,2 +np.float32,0xc283f268,0xbf800000,2 +np.float32,0x418a3ae7,0x35b08908,2 +np.float32,0xc18a3ae7,0x35b08908,2 +np.float32,0x420a3ae7,0xbf800000,2 +np.float32,0xc20a3ae7,0xbf800000,2 +np.float32,0x428a3ae7,0x3f800000,2 +np.float32,0xc28a3ae7,0x3f800000,2 +np.float32,0x41908365,0x3f3504f0,2 +np.float32,0xc1908365,0x3f3504f0,2 +np.float32,0x42108365,0xb512200d,2 +np.float32,0xc2108365,0xb512200d,2 +np.float32,0x42908365,0xbf800000,2 +np.float32,0xc2908365,0xbf800000,2 +np.float32,0x4196cbe4,0x3f800000,2 +np.float32,0xc196cbe4,0x3f800000,2 +np.float32,0x4216cbe4,0x3f800000,2 +np.float32,0xc216cbe4,0x3f800000,2 +np.float32,0x4296cbe4,0x3f800000,2 +np.float32,0xc296cbe4,0x3f800000,2 +np.float32,0x419d1463,0x3f3504ef,2 +np.float32,0xc19d1463,0x3f3504ef,2 +np.float32,0x421d1463,0xb5455799,2 +np.float32,0xc21d1463,0xb5455799,2 +np.float32,0x429d1463,0xbf800000,2 +np.float32,0xc29d1463,0xbf800000,2 +np.float32,0x41a35ce2,0xb53889b6,2 +np.float32,0xc1a35ce2,0xb53889b6,2 +np.float32,0x42235ce2,0xbf800000,2 +np.float32,0xc2235ce2,0xbf800000,2 +np.float32,0x42a35ce2,0x3f800000,2 +np.float32,0xc2a35ce2,0x3f800000,2 +np.float32,0x41a9a561,0xbf3504ff,2 +np.float32,0xc1a9a561,0xbf3504ff,2 +np.float32,0x4229a561,0x360733d0,2 +np.float32,0xc229a561,0x360733d0,2 +np.float32,0x42a9a561,0xbf800000,2 +np.float32,0xc2a9a561,0xbf800000,2 +np.float32,0x41afede0,0xbf800000,2 +np.float32,0xc1afede0,0xbf800000,2 +np.float32,0x422fede0,0x3f800000,2 +np.float32,0xc22fede0,0x3f800000,2 +np.float32,0x42afede0,0x3f800000,2 +np.float32,0xc2afede0,0x3f800000,2 +np.float32,0x41b6365e,0xbf3504f6,2 +np.float32,0xc1b6365e,0xbf3504f6,2 +np.float32,0x4236365e,0x350bb91c,2 +np.float32,0xc236365e,0x350bb91c,2 +np.float32,0x42b6365e,0xbf800000,2 +np.float32,0xc2b6365e,0xbf800000,2 +np.float32,0x41bc7edd,0x33800add,2 +np.float32,0xc1bc7edd,0x33800add,2 +np.float32,0x423c7edd,0xbf800000,2 +np.float32,0xc23c7edd,0xbf800000,2 +np.float32,0x42bc7edd,0x3f800000,2 +np.float32,0xc2bc7edd,0x3f800000,2 +np.float32,0x41c2c75c,0x3f3504f8,2 +np.float32,0xc1c2c75c,0x3f3504f8,2 +np.float32,0x4242c75c,0x354bbe8a,2 +np.float32,0xc242c75c,0x354bbe8a,2 +np.float32,0x42c2c75c,0xbf800000,2 +np.float32,0xc2c2c75c,0xbf800000,2 +np.float32,0x41c90fdb,0x3f800000,2 +np.float32,0xc1c90fdb,0x3f800000,2 +np.float32,0x42490fdb,0x3f800000,2 +np.float32,0xc2490fdb,0x3f800000,2 +np.float32,0x42c90fdb,0x3f800000,2 +np.float32,0xc2c90fdb,0x3f800000,2 +np.float32,0x41cf585a,0x3f3504e7,2 +np.float32,0xc1cf585a,0x3f3504e7,2 +np.float32,0x424f585a,0xb608cd8c,2 +np.float32,0xc24f585a,0xb608cd8c,2 +np.float32,0x42cf585a,0xbf800000,2 +np.float32,0xc2cf585a,0xbf800000,2 +np.float32,0x41d5a0d9,0xb5b3bc81,2 +np.float32,0xc1d5a0d9,0xb5b3bc81,2 +np.float32,0x4255a0d9,0xbf800000,2 +np.float32,0xc255a0d9,0xbf800000,2 +np.float32,0x42d5a0d9,0x3f800000,2 +np.float32,0xc2d5a0d9,0x3f800000,2 +np.float32,0x41dbe958,0xbf350507,2 +np.float32,0xc1dbe958,0xbf350507,2 +np.float32,0x425be958,0x365eab75,2 +np.float32,0xc25be958,0x365eab75,2 +np.float32,0x42dbe958,0xbf800000,2 +np.float32,0xc2dbe958,0xbf800000,2 +np.float32,0x41e231d6,0xbf800000,2 +np.float32,0xc1e231d6,0xbf800000,2 +np.float32,0x426231d6,0x3f800000,2 +np.float32,0xc26231d6,0x3f800000,2 +np.float32,0x42e231d6,0x3f800000,2 +np.float32,0xc2e231d6,0x3f800000,2 +np.float32,0x41e87a55,0xbf3504ef,2 +np.float32,0xc1e87a55,0xbf3504ef,2 +np.float32,0x42687a55,0xb552257b,2 +np.float32,0xc2687a55,0xb552257b,2 +np.float32,0x42e87a55,0xbf800000,2 +np.float32,0xc2e87a55,0xbf800000,2 +np.float32,0x41eec2d4,0x353ef0a7,2 +np.float32,0xc1eec2d4,0x353ef0a7,2 +np.float32,0x426ec2d4,0xbf800000,2 +np.float32,0xc26ec2d4,0xbf800000,2 +np.float32,0x42eec2d4,0x3f800000,2 +np.float32,0xc2eec2d4,0x3f800000,2 +np.float32,0x41f50b53,0x3f3504ff,2 +np.float32,0xc1f50b53,0x3f3504ff,2 +np.float32,0x42750b53,0x360a6748,2 +np.float32,0xc2750b53,0x360a6748,2 +np.float32,0x42f50b53,0xbf800000,2 +np.float32,0xc2f50b53,0xbf800000,2 +np.float32,0x41fb53d2,0x3f800000,2 +np.float32,0xc1fb53d2,0x3f800000,2 +np.float32,0x427b53d2,0x3f800000,2 +np.float32,0xc27b53d2,0x3f800000,2 +np.float32,0x42fb53d2,0x3f800000,2 +np.float32,0xc2fb53d2,0x3f800000,2 +np.float32,0x4200ce28,0x3f3504f6,2 +np.float32,0xc200ce28,0x3f3504f6,2 +np.float32,0x4280ce28,0x34fdd672,2 +np.float32,0xc280ce28,0x34fdd672,2 +np.float32,0x4300ce28,0xbf800000,2 +np.float32,0xc300ce28,0xbf800000,2 +np.float32,0x4203f268,0xb6059a13,2 +np.float32,0xc203f268,0xb6059a13,2 +np.float32,0x4283f268,0xbf800000,2 +np.float32,0xc283f268,0xbf800000,2 +np.float32,0x4303f268,0x3f800000,2 +np.float32,0xc303f268,0x3f800000,2 +np.float32,0x420716a7,0xbf3504f8,2 +np.float32,0xc20716a7,0xbf3504f8,2 +np.float32,0x428716a7,0x35588c6d,2 +np.float32,0xc28716a7,0x35588c6d,2 +np.float32,0x430716a7,0xbf800000,2 +np.float32,0xc30716a7,0xbf800000,2 +np.float32,0x420a3ae7,0xbf800000,2 +np.float32,0xc20a3ae7,0xbf800000,2 +np.float32,0x428a3ae7,0x3f800000,2 +np.float32,0xc28a3ae7,0x3f800000,2 +np.float32,0x430a3ae7,0x3f800000,2 +np.float32,0xc30a3ae7,0x3f800000,2 +np.float32,0x420d5f26,0xbf3504e7,2 +np.float32,0xc20d5f26,0xbf3504e7,2 +np.float32,0x428d5f26,0xb60c0105,2 +np.float32,0xc28d5f26,0xb60c0105,2 +np.float32,0x430d5f26,0xbf800000,2 +np.float32,0xc30d5f26,0xbf800000,2 +np.float32,0x42108365,0xb512200d,2 +np.float32,0xc2108365,0xb512200d,2 +np.float32,0x42908365,0xbf800000,2 +np.float32,0xc2908365,0xbf800000,2 +np.float32,0x43108365,0x3f800000,2 +np.float32,0xc3108365,0x3f800000,2 +np.float32,0x4213a7a5,0x3f350507,2 +np.float32,0xc213a7a5,0x3f350507,2 +np.float32,0x4293a7a5,0x3661deee,2 +np.float32,0xc293a7a5,0x3661deee,2 +np.float32,0x4313a7a5,0xbf800000,2 +np.float32,0xc313a7a5,0xbf800000,2 +np.float32,0x4216cbe4,0x3f800000,2 +np.float32,0xc216cbe4,0x3f800000,2 +np.float32,0x4296cbe4,0x3f800000,2 +np.float32,0xc296cbe4,0x3f800000,2 +np.float32,0x4316cbe4,0x3f800000,2 +np.float32,0xc316cbe4,0x3f800000,2 +np.float32,0x4219f024,0x3f3504d8,2 +np.float32,0xc219f024,0x3f3504d8,2 +np.float32,0x4299f024,0xb69bde6c,2 +np.float32,0xc299f024,0xb69bde6c,2 +np.float32,0x4319f024,0xbf800000,2 +np.float32,0xc319f024,0xbf800000,2 +np.float32,0x421d1463,0xb5455799,2 +np.float32,0xc21d1463,0xb5455799,2 +np.float32,0x429d1463,0xbf800000,2 +np.float32,0xc29d1463,0xbf800000,2 +np.float32,0x431d1463,0x3f800000,2 +np.float32,0xc31d1463,0x3f800000,2 +np.float32,0x422038a3,0xbf350516,2 +np.float32,0xc22038a3,0xbf350516,2 +np.float32,0x42a038a3,0x36c6cd61,2 +np.float32,0xc2a038a3,0x36c6cd61,2 +np.float32,0x432038a3,0xbf800000,2 +np.float32,0xc32038a3,0xbf800000,2 +np.float32,0x42235ce2,0xbf800000,2 +np.float32,0xc2235ce2,0xbf800000,2 +np.float32,0x42a35ce2,0x3f800000,2 +np.float32,0xc2a35ce2,0x3f800000,2 +np.float32,0x43235ce2,0x3f800000,2 +np.float32,0xc3235ce2,0x3f800000,2 +np.float32,0x42268121,0xbf3504f6,2 +np.float32,0xc2268121,0xbf3504f6,2 +np.float32,0x42a68121,0x34e43aac,2 +np.float32,0xc2a68121,0x34e43aac,2 +np.float32,0x43268121,0xbf800000,2 +np.float32,0xc3268121,0xbf800000,2 +np.float32,0x4229a561,0x360733d0,2 +np.float32,0xc229a561,0x360733d0,2 +np.float32,0x42a9a561,0xbf800000,2 +np.float32,0xc2a9a561,0xbf800000,2 +np.float32,0x4329a561,0x3f800000,2 +np.float32,0xc329a561,0x3f800000,2 +np.float32,0x422cc9a0,0x3f3504f8,2 +np.float32,0xc22cc9a0,0x3f3504f8,2 +np.float32,0x42acc9a0,0x35655a50,2 +np.float32,0xc2acc9a0,0x35655a50,2 +np.float32,0x432cc9a0,0xbf800000,2 +np.float32,0xc32cc9a0,0xbf800000,2 +np.float32,0x422fede0,0x3f800000,2 +np.float32,0xc22fede0,0x3f800000,2 +np.float32,0x42afede0,0x3f800000,2 +np.float32,0xc2afede0,0x3f800000,2 +np.float32,0x432fede0,0x3f800000,2 +np.float32,0xc32fede0,0x3f800000,2 +np.float32,0x4233121f,0x3f3504e7,2 +np.float32,0xc233121f,0x3f3504e7,2 +np.float32,0x42b3121f,0xb60f347d,2 +np.float32,0xc2b3121f,0xb60f347d,2 +np.float32,0x4333121f,0xbf800000,2 +np.float32,0xc333121f,0xbf800000,2 +np.float32,0x4236365e,0x350bb91c,2 +np.float32,0xc236365e,0x350bb91c,2 +np.float32,0x42b6365e,0xbf800000,2 +np.float32,0xc2b6365e,0xbf800000,2 +np.float32,0x4336365e,0x3f800000,2 +np.float32,0xc336365e,0x3f800000,2 +np.float32,0x42395a9e,0xbf350507,2 +np.float32,0xc2395a9e,0xbf350507,2 +np.float32,0x42b95a9e,0x36651267,2 +np.float32,0xc2b95a9e,0x36651267,2 +np.float32,0x43395a9e,0xbf800000,2 +np.float32,0xc3395a9e,0xbf800000,2 +np.float32,0x423c7edd,0xbf800000,2 +np.float32,0xc23c7edd,0xbf800000,2 +np.float32,0x42bc7edd,0x3f800000,2 +np.float32,0xc2bc7edd,0x3f800000,2 +np.float32,0x433c7edd,0x3f800000,2 +np.float32,0xc33c7edd,0x3f800000,2 +np.float32,0x423fa31d,0xbf3504d7,2 +np.float32,0xc23fa31d,0xbf3504d7,2 +np.float32,0x42bfa31d,0xb69d7828,2 +np.float32,0xc2bfa31d,0xb69d7828,2 +np.float32,0x433fa31d,0xbf800000,2 +np.float32,0xc33fa31d,0xbf800000,2 +np.float32,0x4242c75c,0x354bbe8a,2 +np.float32,0xc242c75c,0x354bbe8a,2 +np.float32,0x42c2c75c,0xbf800000,2 +np.float32,0xc2c2c75c,0xbf800000,2 +np.float32,0x4342c75c,0x3f800000,2 +np.float32,0xc342c75c,0x3f800000,2 +np.float32,0x4245eb9c,0x3f350517,2 +np.float32,0xc245eb9c,0x3f350517,2 +np.float32,0x42c5eb9c,0x36c8671d,2 +np.float32,0xc2c5eb9c,0x36c8671d,2 +np.float32,0x4345eb9c,0xbf800000,2 +np.float32,0xc345eb9c,0xbf800000,2 +np.float32,0x42490fdb,0x3f800000,2 +np.float32,0xc2490fdb,0x3f800000,2 +np.float32,0x42c90fdb,0x3f800000,2 +np.float32,0xc2c90fdb,0x3f800000,2 +np.float32,0x43490fdb,0x3f800000,2 +np.float32,0xc3490fdb,0x3f800000,2 +np.float32,0x424c341a,0x3f3504f5,2 +np.float32,0xc24c341a,0x3f3504f5,2 +np.float32,0x42cc341a,0x34ca9ee6,2 +np.float32,0xc2cc341a,0x34ca9ee6,2 +np.float32,0x434c341a,0xbf800000,2 +np.float32,0xc34c341a,0xbf800000,2 +np.float32,0x424f585a,0xb608cd8c,2 +np.float32,0xc24f585a,0xb608cd8c,2 +np.float32,0x42cf585a,0xbf800000,2 +np.float32,0xc2cf585a,0xbf800000,2 +np.float32,0x434f585a,0x3f800000,2 +np.float32,0xc34f585a,0x3f800000,2 +np.float32,0x42527c99,0xbf3504f9,2 +np.float32,0xc2527c99,0xbf3504f9,2 +np.float32,0x42d27c99,0x35722833,2 +np.float32,0xc2d27c99,0x35722833,2 +np.float32,0x43527c99,0xbf800000,2 +np.float32,0xc3527c99,0xbf800000,2 +np.float32,0x4255a0d9,0xbf800000,2 +np.float32,0xc255a0d9,0xbf800000,2 +np.float32,0x42d5a0d9,0x3f800000,2 +np.float32,0xc2d5a0d9,0x3f800000,2 +np.float32,0x4355a0d9,0x3f800000,2 +np.float32,0xc355a0d9,0x3f800000,2 +np.float32,0x4258c518,0xbf3504e6,2 +np.float32,0xc258c518,0xbf3504e6,2 +np.float32,0x42d8c518,0xb61267f6,2 +np.float32,0xc2d8c518,0xb61267f6,2 +np.float32,0x4358c518,0xbf800000,2 +np.float32,0xc358c518,0xbf800000,2 +np.float32,0x425be958,0x365eab75,2 +np.float32,0xc25be958,0x365eab75,2 +np.float32,0x42dbe958,0xbf800000,2 +np.float32,0xc2dbe958,0xbf800000,2 +np.float32,0x435be958,0x3f800000,2 +np.float32,0xc35be958,0x3f800000,2 +np.float32,0x425f0d97,0x3f350508,2 +np.float32,0xc25f0d97,0x3f350508,2 +np.float32,0x42df0d97,0x366845e0,2 +np.float32,0xc2df0d97,0x366845e0,2 +np.float32,0x435f0d97,0xbf800000,2 +np.float32,0xc35f0d97,0xbf800000,2 +np.float32,0x426231d6,0x3f800000,2 +np.float32,0xc26231d6,0x3f800000,2 +np.float32,0x42e231d6,0x3f800000,2 +np.float32,0xc2e231d6,0x3f800000,2 +np.float32,0x436231d6,0x3f800000,2 +np.float32,0xc36231d6,0x3f800000,2 +np.float32,0x42655616,0x3f3504d7,2 +np.float32,0xc2655616,0x3f3504d7,2 +np.float32,0x42e55616,0xb69f11e5,2 +np.float32,0xc2e55616,0xb69f11e5,2 +np.float32,0x43655616,0xbf800000,2 +np.float32,0xc3655616,0xbf800000,2 +np.float32,0x42687a55,0xb552257b,2 +np.float32,0xc2687a55,0xb552257b,2 +np.float32,0x42e87a55,0xbf800000,2 +np.float32,0xc2e87a55,0xbf800000,2 +np.float32,0x43687a55,0x3f800000,2 +np.float32,0xc3687a55,0x3f800000,2 +np.float32,0x426b9e95,0xbf350517,2 +np.float32,0xc26b9e95,0xbf350517,2 +np.float32,0x42eb9e95,0x36ca00d9,2 +np.float32,0xc2eb9e95,0x36ca00d9,2 +np.float32,0x436b9e95,0xbf800000,2 +np.float32,0xc36b9e95,0xbf800000,2 +np.float32,0x426ec2d4,0xbf800000,2 +np.float32,0xc26ec2d4,0xbf800000,2 +np.float32,0x42eec2d4,0x3f800000,2 +np.float32,0xc2eec2d4,0x3f800000,2 +np.float32,0x436ec2d4,0x3f800000,2 +np.float32,0xc36ec2d4,0x3f800000,2 +np.float32,0x4271e713,0xbf3504f5,2 +np.float32,0xc271e713,0xbf3504f5,2 +np.float32,0x42f1e713,0x34b10321,2 +np.float32,0xc2f1e713,0x34b10321,2 +np.float32,0x4371e713,0xbf800000,2 +np.float32,0xc371e713,0xbf800000,2 +np.float32,0x42750b53,0x360a6748,2 +np.float32,0xc2750b53,0x360a6748,2 +np.float32,0x42f50b53,0xbf800000,2 +np.float32,0xc2f50b53,0xbf800000,2 +np.float32,0x43750b53,0x3f800000,2 +np.float32,0xc3750b53,0x3f800000,2 +np.float32,0x42782f92,0x3f3504f9,2 +np.float32,0xc2782f92,0x3f3504f9,2 +np.float32,0x42f82f92,0x357ef616,2 +np.float32,0xc2f82f92,0x357ef616,2 +np.float32,0x43782f92,0xbf800000,2 +np.float32,0xc3782f92,0xbf800000,2 +np.float32,0x427b53d2,0x3f800000,2 +np.float32,0xc27b53d2,0x3f800000,2 +np.float32,0x42fb53d2,0x3f800000,2 +np.float32,0xc2fb53d2,0x3f800000,2 +np.float32,0x437b53d2,0x3f800000,2 +np.float32,0xc37b53d2,0x3f800000,2 +np.float32,0x427e7811,0x3f3504e6,2 +np.float32,0xc27e7811,0x3f3504e6,2 +np.float32,0x42fe7811,0xb6159b6f,2 +np.float32,0xc2fe7811,0xb6159b6f,2 +np.float32,0x437e7811,0xbf800000,2 +np.float32,0xc37e7811,0xbf800000,2 +np.float32,0x4280ce28,0x34fdd672,2 +np.float32,0xc280ce28,0x34fdd672,2 +np.float32,0x4300ce28,0xbf800000,2 +np.float32,0xc300ce28,0xbf800000,2 +np.float32,0x4380ce28,0x3f800000,2 +np.float32,0xc380ce28,0x3f800000,2 +np.float32,0x42826048,0xbf350508,2 +np.float32,0xc2826048,0xbf350508,2 +np.float32,0x43026048,0x366b7958,2 +np.float32,0xc3026048,0x366b7958,2 +np.float32,0x43826048,0xbf800000,2 +np.float32,0xc3826048,0xbf800000,2 +np.float32,0x4283f268,0xbf800000,2 +np.float32,0xc283f268,0xbf800000,2 +np.float32,0x4303f268,0x3f800000,2 +np.float32,0xc303f268,0x3f800000,2 +np.float32,0x4383f268,0x3f800000,2 +np.float32,0xc383f268,0x3f800000,2 +np.float32,0x42858487,0xbf350504,2 +np.float32,0xc2858487,0xbf350504,2 +np.float32,0x43058487,0x363ea8be,2 +np.float32,0xc3058487,0x363ea8be,2 +np.float32,0x43858487,0xbf800000,2 +np.float32,0xc3858487,0xbf800000,2 +np.float32,0x428716a7,0x35588c6d,2 +np.float32,0xc28716a7,0x35588c6d,2 +np.float32,0x430716a7,0xbf800000,2 +np.float32,0xc30716a7,0xbf800000,2 +np.float32,0x438716a7,0x3f800000,2 +np.float32,0xc38716a7,0x3f800000,2 +np.float32,0x4288a8c7,0x3f350517,2 +np.float32,0xc288a8c7,0x3f350517,2 +np.float32,0x4308a8c7,0x36cb9a96,2 +np.float32,0xc308a8c7,0x36cb9a96,2 +np.float32,0x4388a8c7,0xbf800000,2 +np.float32,0xc388a8c7,0xbf800000,2 +np.float32,0x428a3ae7,0x3f800000,2 +np.float32,0xc28a3ae7,0x3f800000,2 +np.float32,0x430a3ae7,0x3f800000,2 +np.float32,0xc30a3ae7,0x3f800000,2 +np.float32,0x438a3ae7,0x3f800000,2 +np.float32,0xc38a3ae7,0x3f800000,2 +np.float32,0x428bcd06,0x3f3504f5,2 +np.float32,0xc28bcd06,0x3f3504f5,2 +np.float32,0x430bcd06,0x3497675b,2 +np.float32,0xc30bcd06,0x3497675b,2 +np.float32,0x438bcd06,0xbf800000,2 +np.float32,0xc38bcd06,0xbf800000,2 +np.float32,0x428d5f26,0xb60c0105,2 +np.float32,0xc28d5f26,0xb60c0105,2 +np.float32,0x430d5f26,0xbf800000,2 +np.float32,0xc30d5f26,0xbf800000,2 +np.float32,0x438d5f26,0x3f800000,2 +np.float32,0xc38d5f26,0x3f800000,2 +np.float32,0x428ef146,0xbf350526,2 +np.float32,0xc28ef146,0xbf350526,2 +np.float32,0x430ef146,0x3710bc40,2 +np.float32,0xc30ef146,0x3710bc40,2 +np.float32,0x438ef146,0xbf800000,2 +np.float32,0xc38ef146,0xbf800000,2 +np.float32,0x42908365,0xbf800000,2 +np.float32,0xc2908365,0xbf800000,2 +np.float32,0x43108365,0x3f800000,2 +np.float32,0xc3108365,0x3f800000,2 +np.float32,0x43908365,0x3f800000,2 +np.float32,0xc3908365,0x3f800000,2 +np.float32,0x42921585,0xbf3504e6,2 +np.float32,0xc2921585,0xbf3504e6,2 +np.float32,0x43121585,0xb618cee8,2 +np.float32,0xc3121585,0xb618cee8,2 +np.float32,0x43921585,0xbf800000,2 +np.float32,0xc3921585,0xbf800000,2 +np.float32,0x4293a7a5,0x3661deee,2 +np.float32,0xc293a7a5,0x3661deee,2 +np.float32,0x4313a7a5,0xbf800000,2 +np.float32,0xc313a7a5,0xbf800000,2 +np.float32,0x4393a7a5,0x3f800000,2 +np.float32,0xc393a7a5,0x3f800000,2 +np.float32,0x429539c5,0x3f350536,2 +np.float32,0xc29539c5,0x3f350536,2 +np.float32,0x431539c5,0x373bab34,2 +np.float32,0xc31539c5,0x373bab34,2 +np.float32,0x439539c5,0xbf800000,2 +np.float32,0xc39539c5,0xbf800000,2 +np.float32,0x4296cbe4,0x3f800000,2 +np.float32,0xc296cbe4,0x3f800000,2 +np.float32,0x4316cbe4,0x3f800000,2 +np.float32,0xc316cbe4,0x3f800000,2 +np.float32,0x4396cbe4,0x3f800000,2 +np.float32,0xc396cbe4,0x3f800000,2 +np.float32,0x42985e04,0x3f3504d7,2 +np.float32,0xc2985e04,0x3f3504d7,2 +np.float32,0x43185e04,0xb6a2455d,2 +np.float32,0xc3185e04,0xb6a2455d,2 +np.float32,0x43985e04,0xbf800000,2 +np.float32,0xc3985e04,0xbf800000,2 +np.float32,0x4299f024,0xb69bde6c,2 +np.float32,0xc299f024,0xb69bde6c,2 +np.float32,0x4319f024,0xbf800000,2 +np.float32,0xc319f024,0xbf800000,2 +np.float32,0x4399f024,0x3f800000,2 +np.float32,0xc399f024,0x3f800000,2 +np.float32,0x429b8243,0xbf3504ea,2 +np.float32,0xc29b8243,0xbf3504ea,2 +np.float32,0x431b8243,0xb5cb2eb8,2 +np.float32,0xc31b8243,0xb5cb2eb8,2 +np.float32,0x439b8243,0xbf800000,2 +np.float32,0xc39b8243,0xbf800000,2 +np.float32,0x435b2047,0x3f3504c1,2 +np.float32,0x42a038a2,0xb5e4ca7e,2 +np.float32,0x432038a2,0xbf800000,2 +np.float32,0x4345eb9b,0xbf800000,2 +np.float32,0x42c5eb9b,0xb5de638c,2 +np.float32,0x42eb9e94,0xb5d7fc9b,2 +np.float32,0x4350ea79,0x3631dadb,2 +np.float32,0x42dbe957,0xbf800000,2 +np.float32,0x425be957,0xb505522a,2 +np.float32,0x435be957,0x3f800000,2 +np.float32,0x46027eb2,0x3e7d94c9,2 +np.float32,0x4477baed,0xbe7f1824,2 +np.float32,0x454b8024,0x3e7f5268,2 +np.float32,0x455d2c09,0x3e7f40cb,2 +np.float32,0x4768d3de,0xba14b4af,2 +np.float32,0x46c1e7cd,0x3e7fb102,2 +np.float32,0x44a52949,0xbe7dc9d5,2 +np.float32,0x4454633a,0x3e7dbc7d,2 +np.float32,0x4689810b,0x3e7eb02b,2 +np.float32,0x473473cd,0xbe7eef6f,2 +np.float32,0x44a5193f,0x3e7e1b1f,2 +np.float32,0x46004b36,0x3e7dac59,2 +np.float32,0x467f604b,0x3d7ffd3a,2 +np.float32,0x45ea1805,0x3dffd2e0,2 +np.float32,0x457b6af3,0x3dff7831,2 +np.float32,0x44996159,0xbe7d85f4,2 +np.float32,0x47883553,0xbb80584e,2 +np.float32,0x44e19f0c,0xbdffcfe6,2 +np.float32,0x472b3bf6,0xbe7f7a82,2 +np.float32,0x4600bb4e,0x3a135e33,2 +np.float32,0x449f4556,0x3e7e42e5,2 +np.float32,0x474e9420,0x3dff77b2,2 +np.float32,0x45cbdb23,0x3dff7240,2 +np.float32,0x44222747,0x3dffb039,2 +np.float32,0x4772e419,0xbdff74b8,2 +np.float64,0x1,0x3ff0000000000000,1 +np.float64,0x8000000000000001,0x3ff0000000000000,1 +np.float64,0x10000000000000,0x3ff0000000000000,1 +np.float64,0x8010000000000000,0x3ff0000000000000,1 +np.float64,0x7fefffffffffffff,0xbfefffe62ecfab75,1 +np.float64,0xffefffffffffffff,0xbfefffe62ecfab75,1 +np.float64,0x7ff0000000000000,0xfff8000000000000,1 +np.float64,0xfff0000000000000,0xfff8000000000000,1 +np.float64,0x7ff8000000000000,0x7ff8000000000000,1 +np.float64,0x7ff4000000000000,0x7ffc000000000000,1 +np.float64,0xbfc28bd9dd2517b4,0x3fefaa28ba13a702,1 +np.float64,0x3fb673c62e2ce790,0x3fefe083847a717f,1 +np.float64,0xbfe3e1dac7e7c3b6,0x3fea0500ba099f3a,1 +np.float64,0xbfbe462caa3c8c58,0x3fefc6c8b9c1c87c,1 +np.float64,0xbfb9353576326a68,0x3fefd8513e50e6b1,1 +np.float64,0xbfc05e798520bcf4,0x3fefbd1ad81cf089,1 +np.float64,0xbfe3ca3be2e79478,0x3fea12b995ea6574,1 +np.float64,0xbfde875d46bd0eba,0x3fec6d888662a824,1 +np.float64,0x3fafc4e02c3f89c0,0x3feff03c34bffd69,1 +np.float64,0xbf98855848310ac0,0x3feffda6c1588bdb,1 +np.float64,0x3fe66c51186cd8a2,0x3fe875c61c630ecb,1 +np.float64,0xbfedff1c3b7bfe38,0x3fe2f0c8c9e8fa39,1 +np.float64,0x3fd6082267ac1044,0x3fee1f6023695050,1 +np.float64,0xbfe78449b06f0894,0x3fe7bda2b223850e,1 +np.float64,0x3feedb8e63fdb71c,0x3fe23d5dfd2dd33f,1 +np.float64,0xbfc0a9de3d2153bc,0x3fefbaadf5e5285e,1 +np.float64,0x3fc04c67432098d0,0x3fefbdae07b7de8d,1 +np.float64,0xbfeeef84c4fddf0a,0x3fe22cf37f309d88,1 +np.float64,0x3fc04bb025209760,0x3fefbdb3d7d34ecf,1 +np.float64,0x3fd6b84d48ad709c,0x3fee013403da6e2a,1 +np.float64,0x3fec1ae25d7835c4,0x3fe46e62195cf274,1 +np.float64,0xbfdc6fdf9bb8dfc0,0x3fece48dc78bbb2e,1 +np.float64,0x3fb4db2c9229b660,0x3fefe4d42f79bf49,1 +np.float64,0xbfc0ed698521dad4,0x3fefb8785ea658c9,1 +np.float64,0xbfee82772b7d04ee,0x3fe2864a80efe8e9,1 +np.float64,0x3fd575b664aaeb6c,0x3fee37c669a12879,1 +np.float64,0x3fe4afb1c5e95f64,0x3fe98b177194439c,1 +np.float64,0x3fd93962f9b272c4,0x3fed8bef61876294,1 +np.float64,0x3fd97ae025b2f5c0,0x3fed7f4cfbf4d300,1 +np.float64,0xbfd9afdb1bb35fb6,0x3fed74fdc44dabb1,1 +np.float64,0x3f8ae65e3035cc80,0x3fefff4b1a0ea62b,1 +np.float64,0xbfe7e58664efcb0d,0x3fe77c02a1cbb670,1 +np.float64,0x3fe5f68b37ebed16,0x3fe8c10f849a5d4d,1 +np.float64,0x3fd9137d61b226fc,0x3fed9330eb4815a1,1 +np.float64,0x3fc146d019228da0,0x3fefb57e2d4d52f8,1 +np.float64,0xbfda6036edb4c06e,0x3fed521b2b578679,1 +np.float64,0xbfe78ddfb0ef1bc0,0x3fe7b734319a77e4,1 +np.float64,0x3fe0877823610ef0,0x3febd33a993dd786,1 +np.float64,0x3fbc61af2e38c360,0x3fefcdb4f889756d,1 +np.float64,0x3fd4dcdca4a9b9b8,0x3fee50962ffea5ae,1 +np.float64,0xbfe03cb29f607965,0x3febf7dbf640a75a,1 +np.float64,0xbfc81de407303bc8,0x3fef6f066cef64bc,1 +np.float64,0x3fd8dea42db1bd48,0x3fed9d3e00dbe0b3,1 +np.float64,0x3feac75e94f58ebe,0x3fe56f1f47f97896,1 +np.float64,0x3fb3a1ea6e2743d0,0x3fefe7ec1247cdaa,1 +np.float64,0x3fd695c0f4ad2b80,0x3fee0730bd40883d,1 +np.float64,0xbfd2c631f5a58c64,0x3feea20cbd1105d7,1 +np.float64,0xbfe978a8e1f2f152,0x3fe663014d40ad7a,1 +np.float64,0x3fd8b6b76ab16d70,0x3feda4c879aacc19,1 +np.float64,0x3feaafd30e755fa6,0x3fe5809514c28453,1 +np.float64,0x3fe1e37dc263c6fc,0x3feb20f9ad1f3f5c,1 +np.float64,0x3fd0ec7c24a1d8f8,0x3feee34048f43b75,1 +np.float64,0xbfe3881cbf67103a,0x3fea38d7886e6f53,1 +np.float64,0xbfd7023957ae0472,0x3fedf4471c765a1c,1 +np.float64,0xbfebc51c4ef78a38,0x3fe4b01c424e297b,1 +np.float64,0xbfe20a93eae41528,0x3feb0c2aa321d2e0,1 +np.float64,0x3fef39be867e737e,0x3fe1efaba9164d27,1 +np.float64,0x3fe8ea9576f1d52a,0x3fe6c7a8826ce1be,1 +np.float64,0x3fea921d91f5243c,0x3fe5968c6cf78963,1 +np.float64,0x3fd7ee5d31afdcbc,0x3fedc9f19d43fe61,1 +np.float64,0xbfe3ed581767dab0,0x3fe9fe4ee2f2b1cd,1 +np.float64,0xbfc40923d5281248,0x3fef9bd8ee9f6e68,1 +np.float64,0x3fe411a834682350,0x3fe9e9103854f057,1 +np.float64,0xbfedf6ccdf7bed9a,0x3fe2f77ad6543246,1 +np.float64,0xbfe8788a44f0f114,0x3fe7172f3aa0c742,1 +np.float64,0xbfce728f173ce520,0x3fef1954083bea04,1 +np.float64,0xbfd64dd0acac9ba2,0x3fee138c3293c246,1 +np.float64,0xbfe00669f5600cd4,0x3fec121443945350,1 +np.float64,0xbfe7152ba2ee2a58,0x3fe8079465d09846,1 +np.float64,0x3fe8654d8f70ca9c,0x3fe7247c94f09596,1 +np.float64,0x3fea68045cf4d008,0x3fe5b58cfe81a243,1 +np.float64,0xbfcd4779073a8ef4,0x3fef2a9d78153fa5,1 +np.float64,0xbfdb4456e5b688ae,0x3fed23b11614203f,1 +np.float64,0x3fcb5d59cd36bab0,0x3fef45818216a515,1 +np.float64,0xbfd914ff5ab229fe,0x3fed92e73746fea8,1 +np.float64,0x3fe4d211db69a424,0x3fe97653f433d15f,1 +np.float64,0xbfdbbb9224b77724,0x3fed0adb593dde80,1 +np.float64,0x3fd424ceafa8499c,0x3fee6d9124795d33,1 +np.float64,0x3feb5968f976b2d2,0x3fe501d116efbf54,1 +np.float64,0x3fee7d92a2fcfb26,0x3fe28a479b6a9dcf,1 +np.float64,0x3fc308e9972611d0,0x3fefa595f4df0c89,1 +np.float64,0x3fda79cd77b4f39c,0x3fed4cf8e69ba1f8,1 +np.float64,0x3fcbcf42d5379e88,0x3fef3f6a6a77c187,1 +np.float64,0x3fe13a1da662743c,0x3feb79504faea888,1 +np.float64,0xbfee4435f07c886c,0x3fe2b8ea98d2fc29,1 +np.float64,0x3fd65d68ccacbad0,0x3fee10e1ac7ada89,1 +np.float64,0x3fef2f89bb7e5f14,0x3fe1f81e882cc3f4,1 +np.float64,0xbfef0a7769fe14ef,0x3fe216bf384fc646,1 +np.float64,0x3fc065277320ca50,0x3fefbce44835c193,1 +np.float64,0x3fe9c1a74d73834e,0x3fe62e9ee0c2f2bf,1 +np.float64,0x3fd9d96e5db3b2dc,0x3fed6cd88eb51f6a,1 +np.float64,0x3fe02bf1c56057e4,0x3febfffc24b5a7ba,1 +np.float64,0xbfd6814350ad0286,0x3fee0ab9ad318b84,1 +np.float64,0x3f9fcbec583f97c0,0x3feffc0d0f1d8e75,1 +np.float64,0x3fe23524e5e46a4a,0x3feaf55372949a06,1 +np.float64,0xbfbdc95f6a3b92c0,0x3fefc89c21d44995,1 +np.float64,0x3fe961bb9cf2c378,0x3fe6735d6e1cca58,1 +np.float64,0xbfe8f1c370f1e387,0x3fe6c29d1be8bee9,1 +np.float64,0x3fd880d43ab101a8,0x3fedaee3c7ccfc96,1 +np.float64,0xbfedb37005fb66e0,0x3fe32d91ef2e3bd3,1 +np.float64,0xfdce287bfb9c5,0x3ff0000000000000,1 +np.float64,0x9aa1b9e735437,0x3ff0000000000000,1 +np.float64,0x6beac6e0d7d59,0x3ff0000000000000,1 +np.float64,0x47457aae8e8b0,0x3ff0000000000000,1 +np.float64,0x35ff13b46bfe3,0x3ff0000000000000,1 +np.float64,0xb9c0c82b73819,0x3ff0000000000000,1 +np.float64,0x1a8dc21a351b9,0x3ff0000000000000,1 +np.float64,0x7e87ef6afd0ff,0x3ff0000000000000,1 +np.float64,0x620a6588c414d,0x3ff0000000000000,1 +np.float64,0x7f366000fe6e,0x3ff0000000000000,1 +np.float64,0x787e39f4f0fc8,0x3ff0000000000000,1 +np.float64,0xf5134f1fea26a,0x3ff0000000000000,1 +np.float64,0xbce700ef79ce0,0x3ff0000000000000,1 +np.float64,0x144d7cc8289b1,0x3ff0000000000000,1 +np.float64,0xb9fbc5b973f79,0x3ff0000000000000,1 +np.float64,0xc3d6292d87ac5,0x3ff0000000000000,1 +np.float64,0xc1084e618210a,0x3ff0000000000000,1 +np.float64,0xb6b9eca56d73e,0x3ff0000000000000,1 +np.float64,0xc7ac4b858f58a,0x3ff0000000000000,1 +np.float64,0x516d75d2a2daf,0x3ff0000000000000,1 +np.float64,0x9dc089d93b811,0x3ff0000000000000,1 +np.float64,0x7b5f2840f6be6,0x3ff0000000000000,1 +np.float64,0x121d3ce8243a9,0x3ff0000000000000,1 +np.float64,0xf0be0337e17c1,0x3ff0000000000000,1 +np.float64,0xff58a5cbfeb15,0x3ff0000000000000,1 +np.float64,0xdaf1d07fb5e3a,0x3ff0000000000000,1 +np.float64,0x61d95382c3b2b,0x3ff0000000000000,1 +np.float64,0xe4df943fc9bf3,0x3ff0000000000000,1 +np.float64,0xf72ac2bdee559,0x3ff0000000000000,1 +np.float64,0x12dafbf625b60,0x3ff0000000000000,1 +np.float64,0xee11d427dc23b,0x3ff0000000000000,1 +np.float64,0xf4f8eb37e9f1e,0x3ff0000000000000,1 +np.float64,0xad7cb5df5af97,0x3ff0000000000000,1 +np.float64,0x59fc9b06b3f94,0x3ff0000000000000,1 +np.float64,0x3c3e65e4787ce,0x3ff0000000000000,1 +np.float64,0xe37bc993c6f79,0x3ff0000000000000,1 +np.float64,0x13bd6330277ad,0x3ff0000000000000,1 +np.float64,0x56cc2800ad986,0x3ff0000000000000,1 +np.float64,0x6203b8fcc4078,0x3ff0000000000000,1 +np.float64,0x75c7c8b8eb8fa,0x3ff0000000000000,1 +np.float64,0x5ebf8e00bd7f2,0x3ff0000000000000,1 +np.float64,0xda81f2f1b503f,0x3ff0000000000000,1 +np.float64,0x6adb17d6d5b64,0x3ff0000000000000,1 +np.float64,0x1ba68eee374d3,0x3ff0000000000000,1 +np.float64,0xeecf6fbbdd9ee,0x3ff0000000000000,1 +np.float64,0x24d6dd8e49add,0x3ff0000000000000,1 +np.float64,0xdf7cb81bbef97,0x3ff0000000000000,1 +np.float64,0xafd7be1b5faf8,0x3ff0000000000000,1 +np.float64,0xdb90ca35b721a,0x3ff0000000000000,1 +np.float64,0xa72903a14e521,0x3ff0000000000000,1 +np.float64,0x14533ee028a7,0x3ff0000000000000,1 +np.float64,0x7951540cf2a2b,0x3ff0000000000000,1 +np.float64,0x22882be045106,0x3ff0000000000000,1 +np.float64,0x136270d626c4f,0x3ff0000000000000,1 +np.float64,0x6a0f5744d41ec,0x3ff0000000000000,1 +np.float64,0x21e0d1aa43c1b,0x3ff0000000000000,1 +np.float64,0xee544155dca88,0x3ff0000000000000,1 +np.float64,0xcbe8aac797d16,0x3ff0000000000000,1 +np.float64,0x6c065e80d80e,0x3ff0000000000000,1 +np.float64,0xe57f0411cafe1,0x3ff0000000000000,1 +np.float64,0xdec3a6bdbd875,0x3ff0000000000000,1 +np.float64,0xf4d23a0fe9a48,0x3ff0000000000000,1 +np.float64,0xda77ef47b4efe,0x3ff0000000000000,1 +np.float64,0x8c405c9b1880c,0x3ff0000000000000,1 +np.float64,0x4eced5149d9db,0x3ff0000000000000,1 +np.float64,0x16b6552c2d6cc,0x3ff0000000000000,1 +np.float64,0x6fbc262cdf785,0x3ff0000000000000,1 +np.float64,0x628c3844c5188,0x3ff0000000000000,1 +np.float64,0x6d827d2cdb050,0x3ff0000000000000,1 +np.float64,0xd1bfdf29a37fc,0x3ff0000000000000,1 +np.float64,0xd85400fdb0a80,0x3ff0000000000000,1 +np.float64,0xcc420b2d98842,0x3ff0000000000000,1 +np.float64,0xac41d21b5883b,0x3ff0000000000000,1 +np.float64,0x432f18d4865e4,0x3ff0000000000000,1 +np.float64,0xe7e89a1bcfd14,0x3ff0000000000000,1 +np.float64,0x9b1141d536228,0x3ff0000000000000,1 +np.float64,0x6805f662d00bf,0x3ff0000000000000,1 +np.float64,0xc76552358ecab,0x3ff0000000000000,1 +np.float64,0x4ae8ffee95d21,0x3ff0000000000000,1 +np.float64,0x4396c096872d9,0x3ff0000000000000,1 +np.float64,0x6e8e55d4dd1cb,0x3ff0000000000000,1 +np.float64,0x4c2e33dc985c7,0x3ff0000000000000,1 +np.float64,0xbce814a579d03,0x3ff0000000000000,1 +np.float64,0x911681b5222d0,0x3ff0000000000000,1 +np.float64,0x5f90a4b2bf215,0x3ff0000000000000,1 +np.float64,0x26f76be84deee,0x3ff0000000000000,1 +np.float64,0xb2f7536165eeb,0x3ff0000000000000,1 +np.float64,0x4de4e6089bc9d,0x3ff0000000000000,1 +np.float64,0xf2e016afe5c03,0x3ff0000000000000,1 +np.float64,0xb9b7b949736f7,0x3ff0000000000000,1 +np.float64,0x3363ea1866c7e,0x3ff0000000000000,1 +np.float64,0xd1a3bd6ba3478,0x3ff0000000000000,1 +np.float64,0xae89f3595d13f,0x3ff0000000000000,1 +np.float64,0xddbd9601bb7c,0x3ff0000000000000,1 +np.float64,0x5de41a06bbc84,0x3ff0000000000000,1 +np.float64,0xfd58c86dfab19,0x3ff0000000000000,1 +np.float64,0x24922e8c49247,0x3ff0000000000000,1 +np.float64,0xcda040339b408,0x3ff0000000000000,1 +np.float64,0x5fe500b2bfca1,0x3ff0000000000000,1 +np.float64,0x9214abb924296,0x3ff0000000000000,1 +np.float64,0x800609fe0a2c13fd,0x3ff0000000000000,1 +np.float64,0x800c7c6fe518f8e0,0x3ff0000000000000,1 +np.float64,0x800a1a9491b4352a,0x3ff0000000000000,1 +np.float64,0x800b45e0e8968bc2,0x3ff0000000000000,1 +np.float64,0x8008497e57d092fd,0x3ff0000000000000,1 +np.float64,0x800b9c0af0173816,0x3ff0000000000000,1 +np.float64,0x800194cccb43299a,0x3ff0000000000000,1 +np.float64,0x8001c91ef183923f,0x3ff0000000000000,1 +np.float64,0x800f25b5ccde4b6c,0x3ff0000000000000,1 +np.float64,0x800ce63ccc79cc7a,0x3ff0000000000000,1 +np.float64,0x800d8fb2e83b1f66,0x3ff0000000000000,1 +np.float64,0x80083cd06f7079a1,0x3ff0000000000000,1 +np.float64,0x800823598e9046b3,0x3ff0000000000000,1 +np.float64,0x8001c1319de38264,0x3ff0000000000000,1 +np.float64,0x800f2b68543e56d1,0x3ff0000000000000,1 +np.float64,0x80022a4f4364549f,0x3ff0000000000000,1 +np.float64,0x800f51badf7ea376,0x3ff0000000000000,1 +np.float64,0x8003fbf31e27f7e7,0x3ff0000000000000,1 +np.float64,0x800d4c00e2fa9802,0x3ff0000000000000,1 +np.float64,0x800023b974804774,0x3ff0000000000000,1 +np.float64,0x800860778990c0ef,0x3ff0000000000000,1 +np.float64,0x800a15c241542b85,0x3ff0000000000000,1 +np.float64,0x8003097d9dc612fc,0x3ff0000000000000,1 +np.float64,0x800d77d8541aefb1,0x3ff0000000000000,1 +np.float64,0x80093804ab52700a,0x3ff0000000000000,1 +np.float64,0x800d2b3bfd7a5678,0x3ff0000000000000,1 +np.float64,0x800da24bcd5b4498,0x3ff0000000000000,1 +np.float64,0x8006eee1c28dddc4,0x3ff0000000000000,1 +np.float64,0x80005137fa40a271,0x3ff0000000000000,1 +np.float64,0x8007a3fbc22f47f8,0x3ff0000000000000,1 +np.float64,0x800dcd97071b9b2e,0x3ff0000000000000,1 +np.float64,0x80065b36048cb66d,0x3ff0000000000000,1 +np.float64,0x8004206ba72840d8,0x3ff0000000000000,1 +np.float64,0x8007e82b98cfd058,0x3ff0000000000000,1 +np.float64,0x8001a116ed23422f,0x3ff0000000000000,1 +np.float64,0x800c69e9ff18d3d4,0x3ff0000000000000,1 +np.float64,0x8003843688e7086e,0x3ff0000000000000,1 +np.float64,0x800335e3b8866bc8,0x3ff0000000000000,1 +np.float64,0x800e3308f0bc6612,0x3ff0000000000000,1 +np.float64,0x8002a9ec55c553d9,0x3ff0000000000000,1 +np.float64,0x80001c2084e03842,0x3ff0000000000000,1 +np.float64,0x800bc2bbd8d78578,0x3ff0000000000000,1 +np.float64,0x800ae6bcc555cd7a,0x3ff0000000000000,1 +np.float64,0x80083f7a13907ef5,0x3ff0000000000000,1 +np.float64,0x800d83ed76db07db,0x3ff0000000000000,1 +np.float64,0x800a12251974244b,0x3ff0000000000000,1 +np.float64,0x800a69c95714d393,0x3ff0000000000000,1 +np.float64,0x800cd5a85639ab51,0x3ff0000000000000,1 +np.float64,0x800e0e1837bc1c31,0x3ff0000000000000,1 +np.float64,0x8007b5ca39ef6b95,0x3ff0000000000000,1 +np.float64,0x800cf961cad9f2c4,0x3ff0000000000000,1 +np.float64,0x80066e8fc14cdd20,0x3ff0000000000000,1 +np.float64,0x8001cb8c7b43971a,0x3ff0000000000000,1 +np.float64,0x800002df68a005c0,0x3ff0000000000000,1 +np.float64,0x8003e6681567ccd1,0x3ff0000000000000,1 +np.float64,0x800b039126b60723,0x3ff0000000000000,1 +np.float64,0x800d2e1b663a5c37,0x3ff0000000000000,1 +np.float64,0x800188b3e2a31169,0x3ff0000000000000,1 +np.float64,0x8001f272e943e4e7,0x3ff0000000000000,1 +np.float64,0x800d7f53607afea7,0x3ff0000000000000,1 +np.float64,0x80092cafa4f25960,0x3ff0000000000000,1 +np.float64,0x800fc009f07f8014,0x3ff0000000000000,1 +np.float64,0x8003da896507b514,0x3ff0000000000000,1 +np.float64,0x800d4d1b4c3a9a37,0x3ff0000000000000,1 +np.float64,0x8007a835894f506c,0x3ff0000000000000,1 +np.float64,0x80057ba0522af741,0x3ff0000000000000,1 +np.float64,0x8009b7054b336e0b,0x3ff0000000000000,1 +np.float64,0x800b2c6c125658d9,0x3ff0000000000000,1 +np.float64,0x8008b1840ad16308,0x3ff0000000000000,1 +np.float64,0x8007ea0e3befd41d,0x3ff0000000000000,1 +np.float64,0x800dd658683bacb1,0x3ff0000000000000,1 +np.float64,0x8008cda48fd19b49,0x3ff0000000000000,1 +np.float64,0x8003acca14c75995,0x3ff0000000000000,1 +np.float64,0x8008bd152d717a2b,0x3ff0000000000000,1 +np.float64,0x80010d1ea3621a3e,0x3ff0000000000000,1 +np.float64,0x800130b78b826170,0x3ff0000000000000,1 +np.float64,0x8002cf3a46e59e75,0x3ff0000000000000,1 +np.float64,0x800b76e7fa76edd0,0x3ff0000000000000,1 +np.float64,0x800e065fe1dc0cc0,0x3ff0000000000000,1 +np.float64,0x8000dd527ea1baa6,0x3ff0000000000000,1 +np.float64,0x80032cb234665965,0x3ff0000000000000,1 +np.float64,0x800affc1acb5ff84,0x3ff0000000000000,1 +np.float64,0x80074be23fee97c5,0x3ff0000000000000,1 +np.float64,0x8004f83eafc9f07e,0x3ff0000000000000,1 +np.float64,0x800b02a115560543,0x3ff0000000000000,1 +np.float64,0x800b324a55766495,0x3ff0000000000000,1 +np.float64,0x800ffbcfd69ff7a0,0x3ff0000000000000,1 +np.float64,0x800830bc7b906179,0x3ff0000000000000,1 +np.float64,0x800cbafe383975fd,0x3ff0000000000000,1 +np.float64,0x8001ee42bfe3dc86,0x3ff0000000000000,1 +np.float64,0x8005b00fdc0b6020,0x3ff0000000000000,1 +np.float64,0x8005e7addd0bcf5c,0x3ff0000000000000,1 +np.float64,0x8001ae4cb0635c9a,0x3ff0000000000000,1 +np.float64,0x80098a9941131533,0x3ff0000000000000,1 +np.float64,0x800334c929466993,0x3ff0000000000000,1 +np.float64,0x8009568239d2ad05,0x3ff0000000000000,1 +np.float64,0x800f0639935e0c73,0x3ff0000000000000,1 +np.float64,0x800cebce7499d79d,0x3ff0000000000000,1 +np.float64,0x800482ee4c2905dd,0x3ff0000000000000,1 +np.float64,0x8007b7bd9e2f6f7c,0x3ff0000000000000,1 +np.float64,0x3fe654469f2ca88d,0x3fe8853f6c01ffb3,1 +np.float64,0x3feb4d7297369ae5,0x3fe50ad5bb621408,1 +np.float64,0x3feef53ba43dea77,0x3fe2283f356f8658,1 +np.float64,0x3fddf564eabbeaca,0x3fec8ec0e0dead9c,1 +np.float64,0x3fd3a69078274d21,0x3fee80e05c320000,1 +np.float64,0x3fecdafe5d39b5fd,0x3fe3d91a5d440fd9,1 +np.float64,0x3fd93286bc32650d,0x3fed8d40696cd10e,1 +np.float64,0x3fc0d34eb821a69d,0x3fefb954023d4284,1 +np.float64,0x3fc7b4b9a02f6973,0x3fef73e8739787ce,1 +np.float64,0x3fe08c839a611907,0x3febd0bc6f5641cd,1 +np.float64,0x3fb3d1758627a2eb,0x3fefe776f6183f96,1 +np.float64,0x3fef93c9ff3f2794,0x3fe1a4d2f622627d,1 +np.float64,0x3fea8d0041351a01,0x3fe59a52a1c78c9e,1 +np.float64,0x3fe3e26a30e7c4d4,0x3fea04ad3e0bbf8d,1 +np.float64,0x3fe5a34c9f6b4699,0x3fe8f57c5ccd1eab,1 +np.float64,0x3fc21ef859243df1,0x3fefae0b68a3a2e7,1 +np.float64,0x3fed7dd585fafbab,0x3fe35860041e5b0d,1 +np.float64,0x3fe5abacf22b575a,0x3fe8f03d8b6ef0f2,1 +np.float64,0x3fe426451f284c8a,0x3fe9dcf21f13205b,1 +np.float64,0x3fc01f6456203ec9,0x3fefbf19e2a8e522,1 +np.float64,0x3fe1cf2772239e4f,0x3feb2bbd645c7697,1 +np.float64,0x3fd18c4ace231896,0x3feecdfdd086c110,1 +np.float64,0x3fe8387d5b7070fb,0x3fe74358f2ec4910,1 +np.float64,0x3fdce51c2239ca38,0x3feccb2ae5459632,1 +np.float64,0x3fe5b0f2e4eb61e6,0x3fe8ecef4dbe4277,1 +np.float64,0x3fe1ceeb08a39dd6,0x3feb2bdd4dcfb3df,1 +np.float64,0x3febc5899d778b13,0x3fe4afc8dd8ad228,1 +np.float64,0x3fe7a47fbe2f48ff,0x3fe7a7fd9b352ea5,1 +np.float64,0x3fe7f74e1fafee9c,0x3fe76feb2755b247,1 +np.float64,0x3fe2bfad04e57f5a,0x3feaa9b46adddaeb,1 +np.float64,0x3fd06a090320d412,0x3feef40c334f8fba,1 +np.float64,0x3fdc97297d392e53,0x3fecdc16a3e22fcb,1 +np.float64,0x3fdc1a3f3838347e,0x3fecf6db2769d404,1 +np.float64,0x3fcca90096395201,0x3fef338156fcd218,1 +np.float64,0x3fed464733fa8c8e,0x3fe38483f0465d91,1 +np.float64,0x3fe7e067d82fc0d0,0x3fe77f7c8c9de896,1 +np.float64,0x3fc014fa0b2029f4,0x3fefbf6d84c933f8,1 +np.float64,0x3fd3bf1524277e2a,0x3fee7d2997b74dec,1 +np.float64,0x3fec153b86782a77,0x3fe472bb5497bb2a,1 +np.float64,0x3fd3e4d9d5a7c9b4,0x3fee776842691902,1 +np.float64,0x3fea6c0e2c74d81c,0x3fe5b2954cb458d9,1 +np.float64,0x3fee8f6a373d1ed4,0x3fe27bb9e348125b,1 +np.float64,0x3fd30c6dd42618dc,0x3fee97d2cab2b0bc,1 +np.float64,0x3fe4f90e6d69f21d,0x3fe95ea3dd4007f2,1 +np.float64,0x3fe271d467e4e3a9,0x3fead470d6d4008b,1 +np.float64,0x3fef2983897e5307,0x3fe1fd1a4debe33b,1 +np.float64,0x3fe980cc83b30199,0x3fe65d2fb8a0eb46,1 +np.float64,0x3fdfdf53db3fbea8,0x3fec1cf95b2a1cc7,1 +np.float64,0x3fe4d5307ba9aa61,0x3fe974701b4156cb,1 +np.float64,0x3fdb4e2345b69c47,0x3fed21aa6c146512,1 +np.float64,0x3fe3f7830327ef06,0x3fe9f85f6c88c2a8,1 +np.float64,0x3fca915fb63522bf,0x3fef502b73a52ecf,1 +np.float64,0x3fe66d3709ecda6e,0x3fe87531d7372d7a,1 +np.float64,0x3fd86000bcb0c001,0x3fedb5018dd684ca,1 +np.float64,0x3fe516e5feea2dcc,0x3fe94c68b111404e,1 +np.float64,0x3fd83c53dd3078a8,0x3fedbb9e5dd9e165,1 +np.float64,0x3fedfeeb673bfdd7,0x3fe2f0f0253c5d5d,1 +np.float64,0x3fe0dc6f9c21b8df,0x3feba8e2452410c2,1 +np.float64,0x3fbe154d643c2a9b,0x3fefc780a9357457,1 +np.float64,0x3fe5f63986abec73,0x3fe8c1434951a40a,1 +np.float64,0x3fbce0e50839c1ca,0x3fefcbeeaa27de75,1 +np.float64,0x3fd7ef5c5c2fdeb9,0x3fedc9c3022495b3,1 +np.float64,0x3fc1073914220e72,0x3fefb79de80fc0fd,1 +np.float64,0x3fe1a93c3d235278,0x3feb3fb21f86ac67,1 +np.float64,0x3fe321ee53e643dd,0x3fea72e2999f1e22,1 +np.float64,0x3fa881578c3102af,0x3feff69e6e51e0d6,1 +np.float64,0x3fd313482a262690,0x3fee96d161199495,1 +np.float64,0x3fe7272cd6ae4e5a,0x3fe7fbacbd0d8f43,1 +np.float64,0x3fd6cf4015ad9e80,0x3fedfd3513d544b8,1 +np.float64,0x3fc67b7e6d2cf6fd,0x3fef81f5c16923a4,1 +np.float64,0x3fa1999c14233338,0x3feffb2913a14184,1 +np.float64,0x3fc74eb8dd2e9d72,0x3fef78909a138e3c,1 +np.float64,0x3fc0b9274921724f,0x3fefba2ebd5f3e1c,1 +np.float64,0x3fd53fa156aa7f43,0x3fee40a18e952e88,1 +np.float64,0x3feaccbca4b59979,0x3fe56b22b33eb713,1 +np.float64,0x3fe6a01e3a2d403c,0x3fe8543fbd820ecc,1 +np.float64,0x3fd392a869a72551,0x3fee83e0ffe0e8de,1 +np.float64,0x3fe44d8928689b12,0x3fe9c5bf3c8fffdb,1 +np.float64,0x3fca3f209f347e41,0x3fef5461b6fa0924,1 +np.float64,0x3fee9e84b07d3d09,0x3fe26f638f733549,1 +np.float64,0x3faf49acb03e9359,0x3feff0b583cd8c48,1 +np.float64,0x3fea874b2af50e96,0x3fe59e882fa6febf,1 +np.float64,0x3fc50b72772a16e5,0x3fef918777dc41be,1 +np.float64,0x3fe861d1d4f0c3a4,0x3fe726e44d9d42c2,1 +np.float64,0x3fcadd2e2535ba5c,0x3fef4c3e2b56da38,1 +np.float64,0x3fea59c29cb4b385,0x3fe5c0043e586439,1 +np.float64,0x3fc1ffef0d23ffde,0x3fefaf22be452d13,1 +np.float64,0x3fc2d8dbc125b1b8,0x3fefa75b646d8e4e,1 +np.float64,0x3fd66c6471acd8c9,0x3fee0e5038b895c0,1 +np.float64,0x3fd0854adfa10a96,0x3feef0945bcc5c99,1 +np.float64,0x3feaac7076f558e1,0x3fe58316c23a82ad,1 +np.float64,0x3fdda49db3bb493b,0x3feca0e347c0ad6f,1 +np.float64,0x3fe43a539de874a7,0x3fe9d11d722d4822,1 +np.float64,0x3feeee3ebbfddc7d,0x3fe22dffd251e9af,1 +np.float64,0x3f8ee2c5b03dc58b,0x3fefff11855a7b6c,1 +np.float64,0x3fcd7107c63ae210,0x3fef2840bb55ca52,1 +np.float64,0x3f8d950d203b2a1a,0x3fefff253a08e40e,1 +np.float64,0x3fd40a5e57a814bd,0x3fee71a633c761fc,1 +np.float64,0x3fee836ec83d06de,0x3fe28580975be2fd,1 +np.float64,0x3fd7bbe87f2f77d1,0x3fedd31f661890cc,1 +np.float64,0xbfe05bf138a0b7e2,0x3febe8a000d96e47,1 +np.float64,0xbf88bddd90317bc0,0x3fefff66f6e2ff26,1 +np.float64,0xbfdc9cbb12393976,0x3fecdae2982335db,1 +np.float64,0xbfd85b4eccb0b69e,0x3fedb5e0dd87f702,1 +np.float64,0xbfe5c326cb2b864e,0x3fe8e180f525fa12,1 +np.float64,0xbfe381a0e4a70342,0x3fea3c8e5e3ab78e,1 +np.float64,0xbfe58d892c2b1b12,0x3fe9031551617aed,1 +np.float64,0xbfd7f3a52cafe74a,0x3fedc8fa97edd080,1 +np.float64,0xbfef3417bc7e682f,0x3fe1f45989f6a009,1 +np.float64,0xbfddfb8208bbf704,0x3fec8d5fa9970773,1 +np.float64,0xbfdab69bcc356d38,0x3fed40b2f6c347c6,1 +np.float64,0xbfed3f7cf17a7efa,0x3fe389e4ff4d9235,1 +np.float64,0xbfe47675d9a8ecec,0x3fe9ad6829a69e94,1 +np.float64,0xbfd030e2902061c6,0x3feefb3f811e024f,1 +np.float64,0xbfc376ac7226ed58,0x3fefa1798712b37e,1 +np.float64,0xbfdb7e54a0b6fcaa,0x3fed17a974c4bc28,1 +np.float64,0xbfdb7d5d5736faba,0x3fed17dcf31a8d84,1 +np.float64,0xbf876bd6502ed7c0,0x3fefff76dce6232c,1 +np.float64,0xbfd211e6c02423ce,0x3feebba41f0a1764,1 +np.float64,0xbfb443e3962887c8,0x3fefe658953629d4,1 +np.float64,0xbfe81b09e9b03614,0x3fe757882e4fdbae,1 +np.float64,0xbfdcb905d2b9720c,0x3fecd4c22cfe84e5,1 +np.float64,0xbfe3b62d99276c5b,0x3fea1e5520b3098d,1 +np.float64,0xbfbf05b25c3e0b68,0x3fefc3ecc04bca8e,1 +np.float64,0xbfdedc885b3db910,0x3fec59e22feb49f3,1 +np.float64,0xbfe33aa282667545,0x3fea64f2d55ec471,1 +np.float64,0xbfec84745a3908e9,0x3fe41cb3214e7044,1 +np.float64,0xbfddefdff1bbdfc0,0x3fec8fff88d4d0ec,1 +np.float64,0xbfd26ae6aca4d5ce,0x3feeaf208c7fedf6,1 +np.float64,0xbfee010591fc020b,0x3fe2ef3e57211a5e,1 +np.float64,0xbfb8cfddca319fb8,0x3fefd98d8f7918ed,1 +np.float64,0xbfe991648f3322c9,0x3fe6514e54670bae,1 +np.float64,0xbfee63fd087cc7fa,0x3fe29f1bfa3297cc,1 +np.float64,0xbfe1685942a2d0b2,0x3feb617f5f839eee,1 +np.float64,0xbfc6fc2fd62df860,0x3fef7c4698fd58cf,1 +np.float64,0xbfe42723d3a84e48,0x3fe9dc6ef7243e90,1 +np.float64,0xbfc3a7e89d274fd0,0x3fef9f99e3314e77,1 +np.float64,0xbfeb4c9521f6992a,0x3fe50b7c919bc6d8,1 +np.float64,0xbf707b34e020f680,0x3fefffef05e30264,1 +np.float64,0xbfc078478e20f090,0x3fefbc479305d5aa,1 +np.float64,0xbfd494ac4ca92958,0x3fee5c11f1cd8269,1 +np.float64,0xbfdaf888a035f112,0x3fed3346ae600469,1 +np.float64,0xbfa5d8ed502bb1e0,0x3feff88b0f262609,1 +np.float64,0xbfeec0cbfffd8198,0x3fe253543b2371cb,1 +np.float64,0xbfe594b5986b296b,0x3fe8fe9b39fb3940,1 +np.float64,0xbfc8ece7c631d9d0,0x3fef652bd0611ac7,1 +np.float64,0xbfd8ffeca0b1ffda,0x3fed96ebdf9b65cb,1 +np.float64,0xbfba9b221e353648,0x3fefd3cc21e2f15c,1 +np.float64,0xbfca63a52c34c74c,0x3fef52848eb9ed3b,1 +np.float64,0xbfe588e9b06b11d4,0x3fe905f7403e8881,1 +np.float64,0xbfc76f82db2edf04,0x3fef77138fe9bbc2,1 +np.float64,0xbfeeb3f334bd67e6,0x3fe25ddadb1096d6,1 +np.float64,0xbfbf2b64ce3e56c8,0x3fefc35a9555f6df,1 +np.float64,0xbfe9920e4ff3241c,0x3fe650d4ab8f5c42,1 +np.float64,0xbfb4a54c02294a98,0x3fefe55fc85ae5e9,1 +np.float64,0xbfe353b0c766a762,0x3fea56c02d17e4b7,1 +np.float64,0xbfd99961a4b332c4,0x3fed795fcd00dbf9,1 +np.float64,0xbfef191ddabe323c,0x3fe20aa79524f636,1 +np.float64,0xbfb25d060224ba10,0x3fefeaeee5cc8c0b,1 +np.float64,0xbfe6022428ec0448,0x3fe8b9b46e776194,1 +np.float64,0xbfed1a236cba3447,0x3fe3a76bee0d9861,1 +np.float64,0xbfc59671e72b2ce4,0x3fef8bc4daef6f14,1 +np.float64,0xbfdf2711703e4e22,0x3fec4886a8c9ceb5,1 +np.float64,0xbfeb7e207536fc41,0x3fe4e610c783f168,1 +np.float64,0xbfe6cdf5bcad9bec,0x3fe8365f8a59bc81,1 +np.float64,0xbfe55294adaaa52a,0x3fe927b0af5ccd09,1 +np.float64,0xbfdf4a88913e9512,0x3fec4036df58ba74,1 +np.float64,0xbfebb7efe4376fe0,0x3fe4ba276006992d,1 +np.float64,0xbfe09f29cfa13e54,0x3febc77f4f9c95e7,1 +np.float64,0xbfdf8c75653f18ea,0x3fec30ac924e4f46,1 +np.float64,0xbfefd601c7ffac04,0x3fe16d6f21bcb9c1,1 +np.float64,0xbfeae97ff5f5d300,0x3fe555bb5b87efe9,1 +np.float64,0xbfed427f02fa84fe,0x3fe387830db093bc,1 +np.float64,0xbfa33909cc267210,0x3feffa3a1bcb50dd,1 +np.float64,0xbfe9aa4bf5f35498,0x3fe63f6e98f6aa0f,1 +np.float64,0xbfe2d7349b25ae69,0x3fea9caa7c331e7e,1 +np.float64,0xbfcdbb2a3a3b7654,0x3fef2401c9659e4b,1 +np.float64,0xbfc8a90919315214,0x3fef686fe7fc0513,1 +np.float64,0xbfe62a98df2c5532,0x3fe89ff22a02cc6b,1 +np.float64,0xbfdc0f67b3b81ed0,0x3fecf928b637798f,1 +np.float64,0xbfebb32bf6f76658,0x3fe4bdc893c09698,1 +np.float64,0xbfec067996380cf3,0x3fe47e132741db97,1 +np.float64,0xbfd9774e1d32ee9c,0x3fed7ffe1e87c434,1 +np.float64,0xbfef989890bf3131,0x3fe1a0d025c80cf4,1 +np.float64,0xbfe59887e62b3110,0x3fe8fc382a3d4197,1 +np.float64,0xbfdea0a11e3d4142,0x3fec67b987e236ec,1 +np.float64,0xbfe2ec495825d892,0x3fea90efb231602d,1 +np.float64,0xbfb329c5c2265388,0x3fefe90f1b8209c3,1 +np.float64,0xbfdcd2dcd339a5ba,0x3feccf24c60b1478,1 +np.float64,0xbfe537ea18aa6fd4,0x3fe938237e217fe0,1 +np.float64,0xbfe8675ce170ceba,0x3fe723105925ce3a,1 +np.float64,0xbfd70723acae0e48,0x3fedf369ac070e65,1 +np.float64,0xbfea9d8692b53b0d,0x3fe58e1ee42e3fdb,1 +np.float64,0xbfcfeb96653fd72c,0x3fef029770033bdc,1 +np.float64,0xbfcc06c92d380d94,0x3fef3c69797d9b0a,1 +np.float64,0xbfe16b7c4f62d6f8,0x3feb5fdf9f0a9a07,1 +np.float64,0xbfed4d7a473a9af4,0x3fe37ecee27b1eb7,1 +np.float64,0xbfe6a6f6942d4ded,0x3fe84fccdf762b19,1 +np.float64,0xbfda46d867348db0,0x3fed572d928fa657,1 +np.float64,0xbfdbd9482db7b290,0x3fed049b5f907b52,1 +np.float64,0x7fe992ceb933259c,0xbfeb15af92aad70e,1 +np.float64,0x7fe3069204a60d23,0xbfe5eeff454240e9,1 +np.float64,0x7fe729dbf32e53b7,0xbfefe0528a330e4c,1 +np.float64,0x7fec504fb638a09e,0x3fd288e95dbedf65,1 +np.float64,0x7fe1d30167a3a602,0xbfeffc41f946fd02,1 +np.float64,0x7fed7f8ffd3aff1f,0x3fefe68ec604a19d,1 +np.float64,0x7fd2f23635a5e46b,0x3fea63032efbb447,1 +np.float64,0x7fd4c86db1a990da,0x3fdf6b9f7888db5d,1 +np.float64,0x7fe7554db6eeaa9a,0x3fe1b41476861bb0,1 +np.float64,0x7fe34e823ba69d03,0x3fefc435532e6294,1 +np.float64,0x7fec5c82fef8b905,0x3fef8f0c6473034f,1 +np.float64,0x7feba221bff74442,0xbfea95b81eb19b47,1 +np.float64,0x7fe74808a5ae9010,0xbfd3aa322917c3e5,1 +np.float64,0x7fdf41b7e0be836f,0x3fd14283c7147282,1 +np.float64,0x7fec09892f381311,0x3fe5240376ae484b,1 +np.float64,0x7faaf80bf435f017,0x3fe20227fa811423,1 +np.float64,0x7f8422d8402845b0,0x3fe911714593b8a0,1 +np.float64,0x7fd23a7fada474fe,0x3feff9f40aa37e9c,1 +np.float64,0x7fef4a4806fe948f,0x3fec6eca89cb4a62,1 +np.float64,0x7fe1e71cf763ce39,0xbfea6ac63f9ba457,1 +np.float64,0x7fe3e555be27caaa,0xbfe75b305d0dbbfd,1 +np.float64,0x7fcb8bac96371758,0xbfe8b126077f9d4c,1 +np.float64,0x7fc98e2c84331c58,0x3fef9092eb0bc85a,1 +np.float64,0x7fe947cf2b728f9d,0xbfebfff2c5b7d198,1 +np.float64,0x7feee8058c3dd00a,0xbfef21ebaae2eb17,1 +np.float64,0x7fef61d8d5bec3b1,0xbfdf1a032fb1c864,1 +np.float64,0x7fcf714b6f3ee296,0x3fe6fc89a8084098,1 +np.float64,0x7fa9a8b44c335168,0xbfeb16c149cea943,1 +np.float64,0x7fd175c482a2eb88,0xbfef64d341e73f88,1 +np.float64,0x7feab8e6a87571cc,0x3feb10069c397464,1 +np.float64,0x7fe3ade72de75bcd,0x3fd1753e333d5790,1 +np.float64,0x7fb26d87d224db0f,0xbfe753d36b18f4ca,1 +np.float64,0x7fdb7ef159b6fde2,0x3fe5c0a6044d3607,1 +np.float64,0x7fd5af86422b5f0c,0x3fe77193c95f6484,1 +np.float64,0x7fee9e00b07d3c00,0x3fe864d494596845,1 +np.float64,0x7fef927a147f24f3,0xbfe673b14715693d,1 +np.float64,0x7fd0aea63c215d4b,0xbfeff435f119fce9,1 +np.float64,0x7fd02e3796a05c6e,0x3fe4f7e3706e9a3d,1 +np.float64,0x7fd3ed61da27dac3,0xbfefef2f057f168c,1 +np.float64,0x7fefaca0d4ff5941,0x3fd3e8ad205cd4ab,1 +np.float64,0x7feb659e06f6cb3b,0x3fd64d803203e027,1 +np.float64,0x7fc94ccfaf32999e,0x3fee04922209369a,1 +np.float64,0x7feb4ec294f69d84,0xbfd102763a056c89,1 +np.float64,0x7fe2ada6ac655b4c,0x3fef4f6792aa6093,1 +np.float64,0x7fe5f40fdc2be81f,0xbfb4a6327186eee8,1 +np.float64,0x7fe7584bc3eeb097,0xbfd685b8ff94651d,1 +np.float64,0x7fe45d276be8ba4e,0x3fee53b13f7e442f,1 +np.float64,0x7fe6449b3d6c8935,0xbfe7e08bafa75251,1 +np.float64,0x7f8d62e6b03ac5cc,0x3fe73d30762f38fd,1 +np.float64,0x7fe3a76f72a74ede,0xbfeb48a28bc60968,1 +np.float64,0x7fd057706920aee0,0x3fdece8fa06f626c,1 +np.float64,0x7fe45ae158e8b5c2,0x3fe7a70f47b4d349,1 +np.float64,0x7fea8a5a983514b4,0x3fefb053d5f9ddd7,1 +np.float64,0x7fdd1e86ab3a3d0c,0x3fe3cded1b93816b,1 +np.float64,0x7fdb456108b68ac1,0xbfe37574c0b9bf8f,1 +np.float64,0x7fe972602432e4bf,0x3fef9a26e65ec01c,1 +np.float64,0x7fdbe2385637c470,0x3fed541df57969e1,1 +np.float64,0x7fe57f03602afe06,0x3fbd90f595cbbd94,1 +np.float64,0x7feb0ceb68f619d6,0xbfeae9cb8ee5261f,1 +np.float64,0x7fe6abfe6c6d57fc,0xbfef40a6edaca26f,1 +np.float64,0x7fe037ea08606fd3,0xbfda817d75858597,1 +np.float64,0x7fdd75a52dbaeb49,0x3feef2a0d91d6aa1,1 +np.float64,0x7fe8f9af66b1f35e,0xbfedfceef2a3bfc9,1 +np.float64,0x7fedf762b53beec4,0x3fd8b4f21ef69ee3,1 +np.float64,0x7fe99295b7f3252a,0x3feffc24d970383e,1 +np.float64,0x7fe797b0172f2f5f,0x3fee089aa56f7ce8,1 +np.float64,0x7fed89dcc97b13b9,0xbfcfa2bb0c3ea41f,1 +np.float64,0x7fae9e8d5c3d3d1a,0xbfe512ffe16c6b08,1 +np.float64,0x7fefaecbe27f5d97,0x3fbfc718a5e972f1,1 +np.float64,0x7fce0236d93c046d,0xbfa9b7cd790db256,1 +np.float64,0x7fa9689aac32d134,0x3feced501946628a,1 +np.float64,0x7feb1469e93628d3,0x3fef2a988e7673ed,1 +np.float64,0x7fdba78344b74f06,0xbfe092e78965b30c,1 +np.float64,0x7fece54c3fb9ca97,0x3fd3cfd184bed2e6,1 +np.float64,0x7fdb84212b370841,0xbfe25ebf2db6ee55,1 +np.float64,0x7fbe3e8bf23c7d17,0x3fe2ee72df573345,1 +np.float64,0x7fe43d9803687b2f,0xbfed2eff6a9e66a0,1 +np.float64,0x7fb0f9c00a21f37f,0x3feff70f3276fdb7,1 +np.float64,0x7fea0c6cbbb418d8,0xbfefa612494798b2,1 +np.float64,0x7fe4b3239e296646,0xbfe74dd959af8cdc,1 +np.float64,0x7fe5c6a773eb8d4e,0xbfd06944048f8d2b,1 +np.float64,0x7fb1c1278223824e,0xbfeb533a34655bde,1 +np.float64,0x7fd21c09ee243813,0xbfe921ccbc9255c3,1 +np.float64,0x7fe051020c20a203,0x3fbd519d700c1f2f,1 +np.float64,0x7fe0c76845e18ed0,0x3fefb9595191a31b,1 +np.float64,0x7fe6b0b57b6d616a,0xbf8c59a8ba5fcd9a,1 +np.float64,0x7fd386c460270d88,0x3fe8ffea5d1a5c46,1 +np.float64,0x7feeb884713d7108,0x3fee9b2247ef6c0d,1 +np.float64,0x7fd85f71b6b0bee2,0xbfefc30ec3e28f07,1 +np.float64,0x7fc341366426826c,0x3fd4234d35386d3b,1 +np.float64,0x7fe56482dd6ac905,0x3fe7189de6a50668,1 +np.float64,0x7fec67a2e3f8cf45,0xbfef86d0b940f37f,1 +np.float64,0x7fe38b202fe7163f,0x3feb90b75caa2030,1 +np.float64,0x7fdcbc64883978c8,0x3fed4f758fbf64d4,1 +np.float64,0x7fea5f0598f4be0a,0x3fdd503a417b3d4d,1 +np.float64,0x7fda3b6bcf3476d7,0x3fea6e9af3f7f9f5,1 +np.float64,0x7fc7d7896c2faf12,0x3fda2bebc36a2363,1 +np.float64,0x7fe7e8e2626fd1c4,0xbfe7d5e390c4cc3f,1 +np.float64,0x7fde0f3d7abc1e7a,0xbfede7a0ecfa3606,1 +np.float64,0x7fc692b8f52d2571,0x3feff0cd7ab6f61b,1 +np.float64,0xff92d1fce825a400,0xbfc921c36fc014fa,1 +np.float64,0xffdec3af2fbd875e,0xbfed6a77e6a0364e,1 +np.float64,0xffef46e7d9be8dcf,0xbfed7d39476f7e27,1 +np.float64,0xffe2c2ce4525859c,0x3fe1757261316bc9,1 +np.float64,0xffe27c8b5864f916,0xbfefe017c0d43457,1 +np.float64,0xffe184d7442309ae,0x3fa1fb8c49dba596,1 +np.float64,0xffddf5f98d3bebf4,0x3fee4f8eaa5f847e,1 +np.float64,0xffee3ef354fc7de6,0xbfebfd60fa51b2ba,1 +np.float64,0xffdecb3e85bd967e,0x3fbfad2667a8b468,1 +np.float64,0xffe4ee900b29dd20,0xbfdc02dc626f91cd,1 +np.float64,0xffd3179f6da62f3e,0xbfe2cfe442511776,1 +np.float64,0xffe99ef7cef33def,0x3f50994542a7f303,1 +np.float64,0xffe2b66b1ae56cd6,0xbfefe3e066eb6329,1 +np.float64,0xff8f72aff03ee540,0x3fe9c46224cf5003,1 +np.float64,0xffd29beb85a537d8,0x3fefcb0b6166be71,1 +np.float64,0xffaef02d4c3de060,0xbfef5fb71028fc72,1 +np.float64,0xffd39a2a89273456,0x3fe6d4b183205dca,1 +np.float64,0xffef8a9392ff1526,0x3fedb99fbf402468,1 +np.float64,0xffb9b3f31e3367e8,0x3fee1005270fcf80,1 +np.float64,0xffed9d5c693b3ab8,0x3fd110f4b02365d5,1 +np.float64,0xffeaba45f9f5748b,0x3fe499e0a6f4afb2,1 +np.float64,0xffdba3f70d3747ee,0xbfca0c30493ae519,1 +np.float64,0xffa35b985426b730,0xbfdb625df56bcf45,1 +np.float64,0xffccbc9728397930,0x3fc53cbc59020704,1 +np.float64,0xffef73c942bee792,0xbfdc647a7a5e08be,1 +np.float64,0xffcb5acfb236b5a0,0x3feeb4ec038c39fc,1 +np.float64,0xffea116fe2b422df,0x3fefe03b6ae0b435,1 +np.float64,0xffe97de6e7b2fbcd,0xbfd2025698fab9eb,1 +np.float64,0xffdddba314bbb746,0x3fd31f0fdb8f93be,1 +np.float64,0xffd613a24a2c2744,0xbfebbb1efae884b3,1 +np.float64,0xffe3d938aa67b271,0xbfc2099cead3d3be,1 +np.float64,0xffdf08c2e33e1186,0xbfefd236839b900d,1 +np.float64,0xffea6ba8bd34d751,0x3fe8dfc032114719,1 +np.float64,0xffe3202083e64040,0x3fed513b81432a22,1 +np.float64,0xffb2397db62472f8,0xbfee7d7fe1c3f76c,1 +np.float64,0xffd9d0682ab3a0d0,0x3fe0bcf9e531ad79,1 +np.float64,0xffc293df202527c0,0xbfe58d0bdece5e64,1 +np.float64,0xffe1422c7da28458,0xbf81bd72595f2341,1 +np.float64,0xffd64e4ed4ac9c9e,0x3fa4334cc011c703,1 +np.float64,0xffe40a970ae8152e,0x3fead3d258b55b7d,1 +np.float64,0xffc8c2f2223185e4,0xbfef685f07c8b9fd,1 +np.float64,0xffe4b2f7216965ee,0x3fe3861d3d896a83,1 +np.float64,0xffdb531db3b6a63c,0x3fe18cb8332dd59d,1 +np.float64,0xffe8e727a3b1ce4e,0xbfe57b15abb677b9,1 +np.float64,0xffe530c1e12a6184,0xbfb973ea5535e48f,1 +np.float64,0xffe6f7849cedef08,0x3fd39a37ec5af4b6,1 +np.float64,0xffead62a78b5ac54,0x3fe69b3f6c7aa24b,1 +np.float64,0xffeefdd725fdfbad,0xbfc08a456111fdd5,1 +np.float64,0xffe682182fed0430,0x3fecc7c1292761d2,1 +np.float64,0xffee0ca8dcbc1951,0x3fef6cc361ef2c19,1 +np.float64,0xffec9b338f393666,0x3fefa9ab8e0471b5,1 +np.float64,0xffe13c5e29a278bc,0xbfef8da74ad83398,1 +np.float64,0xffd7bd48c62f7a92,0x3fe3468cd4ac9d34,1 +np.float64,0xffedd0ed14bba1d9,0xbfd563a83477077b,1 +np.float64,0xffe86b83f3f0d707,0x3fe9eb3c658e4b2d,1 +np.float64,0xffd6a4db4bad49b6,0xbfc7e11276166e17,1 +np.float64,0xffc29e8404253d08,0x3fd35971961c789f,1 +np.float64,0xffe27cf3d664f9e7,0xbfeca0f73c72f810,1 +np.float64,0xffc34152352682a4,0x3fef384e564c002c,1 +np.float64,0xffe395728ba72ae4,0x3f8fe18c2de86eba,1 +np.float64,0xffed86c4fbbb0d89,0x3fef709db881c672,1 +np.float64,0xffe8a98d37f1531a,0x3fd4879c8f73c3dc,1 +np.float64,0xffb8ce9fea319d40,0xbfb853c8fe46b08d,1 +np.float64,0xffe7f26db8efe4db,0xbfec1cfd3e5c2ac1,1 +np.float64,0xffd7935b77af26b6,0x3fb7368c89b2a460,1 +np.float64,0xffc5840ed02b081c,0x3fd92220b56631f3,1 +np.float64,0xffc36a873926d510,0x3fa84d61baf61811,1 +np.float64,0xffe06ea583e0dd4a,0x3feb647e348b9e39,1 +np.float64,0xffe6a33031ed4660,0xbfe096b851dc1a0a,1 +np.float64,0xffe001c938e00392,0x3fe4eece77623e7a,1 +np.float64,0xffc1e4f23b23c9e4,0xbfdb9bb1f83f6ac4,1 +np.float64,0xffecd3ecbab9a7d9,0x3fbafb1f800f177d,1 +np.float64,0xffc2d3016825a604,0xbfef650e8b0d6afb,1 +np.float64,0xffe222cb68e44596,0x3fde3690e44de5bd,1 +np.float64,0xffe5bb145e2b7628,0x3fedbb98e23c9dc1,1 +np.float64,0xffe9e5823b73cb04,0xbfee41661016c03c,1 +np.float64,0xffd234a00ba46940,0x3fda0312cda580c2,1 +np.float64,0xffe0913ed6e1227d,0xbfed508bb529bd23,1 +np.float64,0xffe8e3596171c6b2,0xbfdc33e1c1d0310e,1 +np.float64,0xffef9c6835ff38cf,0x3fea8ce6d27dfba3,1 +np.float64,0xffdd3bcf66ba779e,0x3fe50523d2b6470e,1 +np.float64,0xffe57e8cf06afd1a,0xbfee600933347247,1 +np.float64,0xffe0d8c65fa1b18c,0x3fe75091f93d5e4c,1 +np.float64,0xffea7c8c16b4f918,0x3fee681724795198,1 +np.float64,0xffe34f7a05269ef4,0xbfe3c3e179676f13,1 +np.float64,0xffd28894a6a5112a,0xbfe5d1027aee615d,1 +np.float64,0xffc73be6f22e77cc,0x3fe469bbc08b472a,1 +np.float64,0xffe7f71b066fee36,0x3fe7ed136c8fdfaa,1 +np.float64,0xffebc13e29f7827c,0x3fefcdc6e677d314,1 +np.float64,0xffd53e9c942a7d3a,0x3fea5a02c7341749,1 +np.float64,0xffd7191b23ae3236,0x3fea419b66023443,1 +np.float64,0xffe9480325b29006,0xbfefeaff5fa38cd5,1 +np.float64,0xffba46dc0e348db8,0xbfefa54f4de28eba,1 +np.float64,0xffdd4cc31eba9986,0x3fe60bb41fe1c4da,1 +np.float64,0xffe13a70dea274e1,0xbfaa9192f7bd6c9b,1 +np.float64,0xffde25127bbc4a24,0x3f7c75f45e29be7d,1 +np.float64,0xffe4076543a80eca,0x3fea5aad50d2f687,1 +np.float64,0xffe61512acec2a25,0xbfefffeb67401649,1 +np.float64,0xffef812ec1ff025d,0xbfe919c7c073c766,1 +np.float64,0xffd5552aeaaaaa56,0x3fc89d38ab047396,1 diff --git a/local_packages/numpy/_core/tests/data/umath-validation-set-cosh.csv b/local_packages/numpy/_core/tests/data/umath-validation-set-cosh.csv new file mode 100644 index 0000000..af14d84 --- /dev/null +++ b/local_packages/numpy/_core/tests/data/umath-validation-set-cosh.csv @@ -0,0 +1,1429 @@ +dtype,input,output,ulperrortol +np.float32,0xfe0ac238,0x7f800000,3 +np.float32,0xbf553b86,0x3faf079b,3 +np.float32,0xff4457da,0x7f800000,3 +np.float32,0xff7253f3,0x7f800000,3 +np.float32,0x5a5802,0x3f800000,3 +np.float32,0x3db03413,0x3f80795b,3 +np.float32,0x7f6795c9,0x7f800000,3 +np.float32,0x805b9142,0x3f800000,3 +np.float32,0xfeea581a,0x7f800000,3 +np.float32,0x3f7e2dba,0x3fc472f6,3 +np.float32,0x3d9c4d74,0x3f805f7a,3 +np.float32,0x7f18c665,0x7f800000,3 +np.float32,0x7f003e23,0x7f800000,3 +np.float32,0x3d936fa0,0x3f8054f3,3 +np.float32,0x3f32034f,0x3fa0368e,3 +np.float32,0xff087604,0x7f800000,3 +np.float32,0x380a5,0x3f800000,3 +np.float32,0x3f59694e,0x3fb10077,3 +np.float32,0x3e63e648,0x3f832ee4,3 +np.float32,0x80712f42,0x3f800000,3 +np.float32,0x3e169908,0x3f816302,3 +np.float32,0x3f2d766e,0x3f9e8692,3 +np.float32,0x3d6412e0,0x3f8032d0,3 +np.float32,0xbde689e8,0x3f80cfd4,3 +np.float32,0x483e2e,0x3f800000,3 +np.float32,0xff1ba2d0,0x7f800000,3 +np.float32,0x80136bff,0x3f800000,3 +np.float32,0x3f72534c,0x3fbdc1d4,3 +np.float32,0x3e9eb381,0x3f8632c6,3 +np.float32,0x3e142892,0x3f815795,3 +np.float32,0x0,0x3f800000,3 +np.float32,0x2f2528,0x3f800000,3 +np.float32,0x7f38be13,0x7f800000,3 +np.float32,0xfeee6896,0x7f800000,3 +np.float32,0x7f09095d,0x7f800000,3 +np.float32,0xbe94d,0x3f800000,3 +np.float32,0xbedcf8d4,0x3f8c1b74,3 +np.float32,0xbf694c02,0x3fb8ef07,3 +np.float32,0x3e2261f8,0x3f819cde,3 +np.float32,0xbf01d3ce,0x3f90d0e0,3 +np.float32,0xbeb7b3a2,0x3f8853de,3 +np.float32,0x8046de7b,0x3f800000,3 +np.float32,0xbcb45ea0,0x3f8007f1,3 +np.float32,0x3eef14af,0x3f8e35dd,3 +np.float32,0xbf047316,0x3f91846e,3 +np.float32,0x801cef45,0x3f800000,3 +np.float32,0x3e9ad891,0x3f85e609,3 +np.float32,0xff20e9cf,0x7f800000,3 +np.float32,0x80068434,0x3f800000,3 +np.float32,0xbe253020,0x3f81ab49,3 +np.float32,0x3f13f4b8,0x3f95fac9,3 +np.float32,0x804accd1,0x3f800000,3 +np.float32,0x3dee3e10,0x3f80ddf7,3 +np.float32,0xbe6c4690,0x3f836c29,3 +np.float32,0xff30d431,0x7f800000,3 +np.float32,0xbec82416,0x3f89e791,3 +np.float32,0x3f30bbcb,0x3f9fbbcc,3 +np.float32,0x3f5620a2,0x3faf72b8,3 +np.float32,0x807a8130,0x3f800000,3 +np.float32,0x3e3cb02d,0x3f822de0,3 +np.float32,0xff4839ac,0x7f800000,3 +np.float32,0x800a3e9c,0x3f800000,3 +np.float32,0x3dffd65b,0x3f810002,3 +np.float32,0xbf2b1492,0x3f9da987,3 +np.float32,0xbf21602c,0x3f9a48fe,3 +np.float32,0x512531,0x3f800000,3 +np.float32,0x24b99a,0x3f800000,3 +np.float32,0xbf53e345,0x3fae67b1,3 +np.float32,0xff2126ec,0x7f800000,3 +np.float32,0x7e79b49d,0x7f800000,3 +np.float32,0x3ea3cf04,0x3f869b6f,3 +np.float32,0x7f270059,0x7f800000,3 +np.float32,0x3f625b2f,0x3fb561e1,3 +np.float32,0xbf59947e,0x3fb11519,3 +np.float32,0xfe0d1c64,0x7f800000,3 +np.float32,0xbf3f3eae,0x3fa568e2,3 +np.float32,0x7c04d1,0x3f800000,3 +np.float32,0x7e66bd,0x3f800000,3 +np.float32,0x8011880d,0x3f800000,3 +np.float32,0x3f302f07,0x3f9f8759,3 +np.float32,0x4e3375,0x3f800000,3 +np.float32,0xfe67a134,0x7f800000,3 +np.float32,0xff670249,0x7f800000,3 +np.float32,0x7e19f27d,0x7f800000,3 +np.float32,0xbf36ce12,0x3fa20b81,3 +np.float32,0xbe6bcfc4,0x3f8368b5,3 +np.float32,0x76fcba,0x3f800000,3 +np.float32,0x7f30abaf,0x7f800000,3 +np.float32,0x3f4c1f6d,0x3faae43c,3 +np.float32,0x7f61f44a,0x7f800000,3 +np.float32,0xbf4bb3c9,0x3faab4af,3 +np.float32,0xbda15ee0,0x3f8065c6,3 +np.float32,0xfbb4e800,0x7f800000,3 +np.float32,0x7fa00000,0x7fe00000,3 +np.float32,0x80568501,0x3f800000,3 +np.float32,0xfeb285e4,0x7f800000,3 +np.float32,0x804423a7,0x3f800000,3 +np.float32,0x7e6c0f21,0x7f800000,3 +np.float32,0x7f136b3c,0x7f800000,3 +np.float32,0x3f2d08e6,0x3f9e5e9c,3 +np.float32,0xbf6b454e,0x3fb9f7e6,3 +np.float32,0x3e6bceb0,0x3f8368ad,3 +np.float32,0xff1ad16a,0x7f800000,3 +np.float32,0x7cce1a04,0x7f800000,3 +np.float32,0xff7bcf95,0x7f800000,3 +np.float32,0x8049788d,0x3f800000,3 +np.float32,0x7ec45918,0x7f800000,3 +np.float32,0xff7fffff,0x7f800000,3 +np.float32,0x8039a1a0,0x3f800000,3 +np.float32,0x7e90cd72,0x7f800000,3 +np.float32,0xbf7dfd53,0x3fc456cc,3 +np.float32,0x3eeeb664,0x3f8e2a76,3 +np.float32,0x8055ef9b,0x3f800000,3 +np.float32,0x7ee06ddd,0x7f800000,3 +np.float32,0xba2cc000,0x3f800002,3 +np.float32,0x806da632,0x3f800000,3 +np.float32,0x7ecfaaf5,0x7f800000,3 +np.float32,0x3ddd12e6,0x3f80bf19,3 +np.float32,0xbf754394,0x3fbf60b1,3 +np.float32,0x6f3f19,0x3f800000,3 +np.float32,0x800a9af0,0x3f800000,3 +np.float32,0xfeef13ea,0x7f800000,3 +np.float32,0x7f74841f,0x7f800000,3 +np.float32,0xbeb9a2f0,0x3f888181,3 +np.float32,0x77cbb,0x3f800000,3 +np.float32,0xbf587f84,0x3fb0911b,3 +np.float32,0x210ba5,0x3f800000,3 +np.float32,0x3ee60a28,0x3f8d2367,3 +np.float32,0xbe3731ac,0x3f820dc7,3 +np.float32,0xbee8cfee,0x3f8d765e,3 +np.float32,0x7b2ef179,0x7f800000,3 +np.float32,0xfe81377c,0x7f800000,3 +np.float32,0x6ac98c,0x3f800000,3 +np.float32,0x3f51f144,0x3fad8288,3 +np.float32,0x80785750,0x3f800000,3 +np.float32,0x3f46615a,0x3fa864ff,3 +np.float32,0xbf35ac9e,0x3fa19b8e,3 +np.float32,0x7f0982ac,0x7f800000,3 +np.float32,0x1b2610,0x3f800000,3 +np.float32,0x3ed8bb25,0x3f8ba3df,3 +np.float32,0xbeb41bac,0x3f88006d,3 +np.float32,0xff48e89d,0x7f800000,3 +np.float32,0x3ed0ab8c,0x3f8ac755,3 +np.float32,0xbe64671c,0x3f833282,3 +np.float32,0x64bce4,0x3f800000,3 +np.float32,0x284f79,0x3f800000,3 +np.float32,0x7e09faa7,0x7f800000,3 +np.float32,0x4376c1,0x3f800000,3 +np.float32,0x805ca8c0,0x3f800000,3 +np.float32,0xff0859d5,0x7f800000,3 +np.float32,0xbed2f3b2,0x3f8b04dd,3 +np.float32,0x8045bd0c,0x3f800000,3 +np.float32,0x3f0e6216,0x3f94503f,3 +np.float32,0x3f41e3ae,0x3fa68035,3 +np.float32,0x80088ccc,0x3f800000,3 +np.float32,0x3f37fc19,0x3fa2812f,3 +np.float32,0x71c87d,0x3f800000,3 +np.float32,0x8024f4b2,0x3f800000,3 +np.float32,0xff78dd88,0x7f800000,3 +np.float32,0xbda66c90,0x3f806c40,3 +np.float32,0x7f33ef0d,0x7f800000,3 +np.float32,0x46a343,0x3f800000,3 +np.float32,0xff1dce38,0x7f800000,3 +np.float32,0x1b935d,0x3f800000,3 +np.float32,0x3ebec598,0x3f88fd0e,3 +np.float32,0xff115530,0x7f800000,3 +np.float32,0x803916aa,0x3f800000,3 +np.float32,0xff60a3e2,0x7f800000,3 +np.float32,0x3b8ddd48,0x3f80004f,3 +np.float32,0x3f761b6e,0x3fbfd8ea,3 +np.float32,0xbdf55b88,0x3f80eb70,3 +np.float32,0x37374,0x3f800000,3 +np.float32,0x3de150e0,0x3f80c682,3 +np.float32,0x3f343278,0x3fa10a83,3 +np.float32,0xbe9baefa,0x3f85f68b,3 +np.float32,0x3d8d43,0x3f800000,3 +np.float32,0x3e80994b,0x3f840f0c,3 +np.float32,0xbe573c6c,0x3f82d685,3 +np.float32,0x805b83b4,0x3f800000,3 +np.float32,0x683d88,0x3f800000,3 +np.float32,0x692465,0x3f800000,3 +np.float32,0xbdc345f8,0x3f809511,3 +np.float32,0x3f7c1c5a,0x3fc3406f,3 +np.float32,0xbf40bef3,0x3fa606df,3 +np.float32,0xff1e25b9,0x7f800000,3 +np.float32,0x3e4481e0,0x3f825d37,3 +np.float32,0x75d188,0x3f800000,3 +np.float32,0x3ea53cec,0x3f86b956,3 +np.float32,0xff105a54,0x7f800000,3 +np.float32,0x7f800000,0x7f800000,3 +np.float32,0x7f11f0b0,0x7f800000,3 +np.float32,0xbf58a57d,0x3fb0a328,3 +np.float32,0xbdd11e38,0x3f80aaf8,3 +np.float32,0xbea94adc,0x3f870fa0,3 +np.float32,0x3e9dd780,0x3f862180,3 +np.float32,0xff1786b9,0x7f800000,3 +np.float32,0xfec46aa2,0x7f800000,3 +np.float32,0x7f4300c1,0x7f800000,3 +np.float32,0x29ba2b,0x3f800000,3 +np.float32,0x3f4112e2,0x3fa62993,3 +np.float32,0xbe6c9224,0x3f836e5d,3 +np.float32,0x7f0e42a3,0x7f800000,3 +np.float32,0xff6390ad,0x7f800000,3 +np.float32,0x3f54e374,0x3faede94,3 +np.float32,0x7f2642a2,0x7f800000,3 +np.float32,0x7f46b2be,0x7f800000,3 +np.float32,0xfe59095c,0x7f800000,3 +np.float32,0x7146a0,0x3f800000,3 +np.float32,0x3f07763d,0x3f925786,3 +np.float32,0x3d172780,0x3f801651,3 +np.float32,0xff66f1c5,0x7f800000,3 +np.float32,0xff025349,0x7f800000,3 +np.float32,0x6ce99d,0x3f800000,3 +np.float32,0xbf7e4f50,0x3fc48685,3 +np.float32,0xbeff8ca2,0x3f904708,3 +np.float32,0x3e6c8,0x3f800000,3 +np.float32,0x7f7153dc,0x7f800000,3 +np.float32,0xbedcf612,0x3f8c1b26,3 +np.float32,0xbbc2f180,0x3f800094,3 +np.float32,0xbf397399,0x3fa314b8,3 +np.float32,0x6c6e35,0x3f800000,3 +np.float32,0x7f50a88b,0x7f800000,3 +np.float32,0xfe84093e,0x7f800000,3 +np.float32,0x3f737b9d,0x3fbe6478,3 +np.float32,0x7f6a5340,0x7f800000,3 +np.float32,0xbde83c20,0x3f80d2e7,3 +np.float32,0xff769ce9,0x7f800000,3 +np.float32,0xfdd33c30,0x7f800000,3 +np.float32,0xbc95cb60,0x3f80057a,3 +np.float32,0x8007a40d,0x3f800000,3 +np.float32,0x3f55d90c,0x3faf5132,3 +np.float32,0x80282082,0x3f800000,3 +np.float32,0xbf43b1f2,0x3fa7418c,3 +np.float32,0x3f1dc7cb,0x3f991731,3 +np.float32,0xbd4346a0,0x3f80253f,3 +np.float32,0xbf5aa82a,0x3fb19946,3 +np.float32,0x3f4b8c22,0x3faaa333,3 +np.float32,0x3d13468c,0x3f80152f,3 +np.float32,0x7db77097,0x7f800000,3 +np.float32,0x4a00df,0x3f800000,3 +np.float32,0xbedea5e0,0x3f8c4b64,3 +np.float32,0x80482543,0x3f800000,3 +np.float32,0xbef344fe,0x3f8eb8dd,3 +np.float32,0x7ebd4044,0x7f800000,3 +np.float32,0xbf512c0e,0x3fad287e,3 +np.float32,0x3db28cce,0x3f807c9c,3 +np.float32,0xbd0f5ae0,0x3f801412,3 +np.float32,0xfe7ed9ac,0x7f800000,3 +np.float32,0x3eb1aa82,0x3f87c8b4,3 +np.float32,0xfef1679e,0x7f800000,3 +np.float32,0xff3629f2,0x7f800000,3 +np.float32,0xff3562b4,0x7f800000,3 +np.float32,0x3dcafe1d,0x3f80a118,3 +np.float32,0xfedf242a,0x7f800000,3 +np.float32,0xbf43102a,0x3fa6fda4,3 +np.float32,0x8028834e,0x3f800000,3 +np.float32,0x805c8513,0x3f800000,3 +np.float32,0x3f59306a,0x3fb0e550,3 +np.float32,0x3eda2c9c,0x3f8bcc4a,3 +np.float32,0x80023524,0x3f800000,3 +np.float32,0x7ef72879,0x7f800000,3 +np.float32,0x661c8a,0x3f800000,3 +np.float32,0xfec3ba6c,0x7f800000,3 +np.float32,0x805aaca6,0x3f800000,3 +np.float32,0xff5c1f13,0x7f800000,3 +np.float32,0x3f6ab3f4,0x3fb9ab6b,3 +np.float32,0x3f014896,0x3f90ac20,3 +np.float32,0x3f030584,0x3f91222a,3 +np.float32,0xbf74853d,0x3fbef71d,3 +np.float32,0xbf534ee0,0x3fae2323,3 +np.float32,0x2c90c3,0x3f800000,3 +np.float32,0x7f62ad25,0x7f800000,3 +np.float32,0x1c8847,0x3f800000,3 +np.float32,0x7e2a8d43,0x7f800000,3 +np.float32,0x807a09cd,0x3f800000,3 +np.float32,0x413871,0x3f800000,3 +np.float32,0x80063692,0x3f800000,3 +np.float32,0x3edaf29b,0x3f8be211,3 +np.float32,0xbf64a7ab,0x3fb68b2d,3 +np.float32,0xfe56a720,0x7f800000,3 +np.float32,0xbf54a8d4,0x3faec350,3 +np.float32,0x3ecbaef7,0x3f8a4350,3 +np.float32,0x3f413714,0x3fa63890,3 +np.float32,0x7d3aa8,0x3f800000,3 +np.float32,0xbea9a13c,0x3f8716e7,3 +np.float32,0x7ef7553e,0x7f800000,3 +np.float32,0x8056f29f,0x3f800000,3 +np.float32,0xff1f7ffe,0x7f800000,3 +np.float32,0x3f41953b,0x3fa65f9c,3 +np.float32,0x3daa2f,0x3f800000,3 +np.float32,0xff0893e4,0x7f800000,3 +np.float32,0xbefc7ec6,0x3f8fe207,3 +np.float32,0xbb026800,0x3f800011,3 +np.float32,0x341e4f,0x3f800000,3 +np.float32,0x3e7b708a,0x3f83e0d1,3 +np.float32,0xa18cb,0x3f800000,3 +np.float32,0x7e290239,0x7f800000,3 +np.float32,0xbf4254f2,0x3fa6af62,3 +np.float32,0x80000000,0x3f800000,3 +np.float32,0x3f0a6c,0x3f800000,3 +np.float32,0xbec44d28,0x3f898609,3 +np.float32,0xf841f,0x3f800000,3 +np.float32,0x7f01a693,0x7f800000,3 +np.float32,0x8053340b,0x3f800000,3 +np.float32,0xfd4e7990,0x7f800000,3 +np.float32,0xbf782f1f,0x3fc10356,3 +np.float32,0xbe962118,0x3f858acc,3 +np.float32,0xfe8cd702,0x7f800000,3 +np.float32,0x7ecd986f,0x7f800000,3 +np.float32,0x3ebe775f,0x3f88f59b,3 +np.float32,0x8065524f,0x3f800000,3 +np.float32,0x3ede7fc4,0x3f8c471e,3 +np.float32,0x7f5e15ea,0x7f800000,3 +np.float32,0xbe871ada,0x3f847b78,3 +np.float32,0x3f21958b,0x3f9a5af7,3 +np.float32,0x3f64d480,0x3fb6a1fa,3 +np.float32,0xff18b0e9,0x7f800000,3 +np.float32,0xbf0840dd,0x3f928fd9,3 +np.float32,0x80104f5d,0x3f800000,3 +np.float32,0x643b94,0x3f800000,3 +np.float32,0xbc560a80,0x3f8002cc,3 +np.float32,0x3f5c75d6,0x3fb2786e,3 +np.float32,0x7f365fc9,0x7f800000,3 +np.float32,0x54e965,0x3f800000,3 +np.float32,0x6dcd4d,0x3f800000,3 +np.float32,0x3f2057a0,0x3f99f04d,3 +np.float32,0x272fa3,0x3f800000,3 +np.float32,0xff423dc9,0x7f800000,3 +np.float32,0x80273463,0x3f800000,3 +np.float32,0xfe21cc78,0x7f800000,3 +np.float32,0x7fc00000,0x7fc00000,3 +np.float32,0x802feb65,0x3f800000,3 +np.float32,0x3dc733d0,0x3f809b21,3 +np.float32,0x65d56b,0x3f800000,3 +np.float32,0x80351d8e,0x3f800000,3 +np.float32,0xbf244247,0x3f9b43dd,3 +np.float32,0x7f328e7e,0x7f800000,3 +np.float32,0x7f4d9712,0x7f800000,3 +np.float32,0x2c505d,0x3f800000,3 +np.float32,0xbf232ebe,0x3f9ae5a0,3 +np.float32,0x804a363a,0x3f800000,3 +np.float32,0x80417102,0x3f800000,3 +np.float32,0xbf48b170,0x3fa963d4,3 +np.float32,0x7ea3e3b6,0x7f800000,3 +np.float32,0xbf41415b,0x3fa63cd2,3 +np.float32,0xfe3af7c8,0x7f800000,3 +np.float32,0x7f478010,0x7f800000,3 +np.float32,0x80143113,0x3f800000,3 +np.float32,0x3f7626a7,0x3fbfdf2e,3 +np.float32,0xfea20b0a,0x7f800000,3 +np.float32,0x80144d64,0x3f800000,3 +np.float32,0x7db9ba47,0x7f800000,3 +np.float32,0x7f7fffff,0x7f800000,3 +np.float32,0xbe410834,0x3f8247ef,3 +np.float32,0x14a7af,0x3f800000,3 +np.float32,0x7eaebf9e,0x7f800000,3 +np.float32,0xff800000,0x7f800000,3 +np.float32,0x3f0a7d8e,0x3f9330fd,3 +np.float32,0x3ef780,0x3f800000,3 +np.float32,0x3f62253e,0x3fb546d1,3 +np.float32,0x3f4cbeac,0x3fab2acc,3 +np.float32,0x25db1,0x3f800000,3 +np.float32,0x65c54a,0x3f800000,3 +np.float32,0x800f0645,0x3f800000,3 +np.float32,0x3ed28c78,0x3f8af9f0,3 +np.float32,0x8040c6ce,0x3f800000,3 +np.float32,0x5e4e9a,0x3f800000,3 +np.float32,0xbd3fd2b0,0x3f8023f1,3 +np.float32,0xbf5d2d3f,0x3fb2d1b6,3 +np.float32,0x7ead999f,0x7f800000,3 +np.float32,0xbf30dc86,0x3f9fc805,3 +np.float32,0xff2b0a62,0x7f800000,3 +np.float32,0x3d5180e9,0x3f802adf,3 +np.float32,0x3f62716f,0x3fb56d0d,3 +np.float32,0x7e82ae9c,0x7f800000,3 +np.float32,0xfe2d4bdc,0x7f800000,3 +np.float32,0x805cc7d4,0x3f800000,3 +np.float32,0xfb50f700,0x7f800000,3 +np.float32,0xff57b684,0x7f800000,3 +np.float32,0x80344f01,0x3f800000,3 +np.float32,0x7f2af372,0x7f800000,3 +np.float32,0xfeab6204,0x7f800000,3 +np.float32,0x30b251,0x3f800000,3 +np.float32,0x3eed8cc4,0x3f8e0698,3 +np.float32,0x7eeb1c6a,0x7f800000,3 +np.float32,0x3f17ece6,0x3f9735b0,3 +np.float32,0x21e985,0x3f800000,3 +np.float32,0x3f3a7df3,0x3fa37e34,3 +np.float32,0x802a14a2,0x3f800000,3 +np.float32,0x807d4d5b,0x3f800000,3 +np.float32,0x7f6093ce,0x7f800000,3 +np.float32,0x3f800000,0x3fc583ab,3 +np.float32,0x3da2c26e,0x3f806789,3 +np.float32,0xfe05f278,0x7f800000,3 +np.float32,0x800000,0x3f800000,3 +np.float32,0xbee63342,0x3f8d282e,3 +np.float32,0xbf225586,0x3f9a9bd4,3 +np.float32,0xbed60e86,0x3f8b59ba,3 +np.float32,0xbec99484,0x3f8a0ca3,3 +np.float32,0x3e967c71,0x3f859199,3 +np.float32,0x7f26ab62,0x7f800000,3 +np.float32,0xca7f4,0x3f800000,3 +np.float32,0xbf543790,0x3fae8ebc,3 +np.float32,0x3e4c1ed9,0x3f828d2d,3 +np.float32,0xbdf37f88,0x3f80e7e1,3 +np.float32,0xff0cc44e,0x7f800000,3 +np.float32,0x5dea48,0x3f800000,3 +np.float32,0x31023c,0x3f800000,3 +np.float32,0x3ea10733,0x3f866208,3 +np.float32,0x3e11e6f2,0x3f814d2e,3 +np.float32,0x80641960,0x3f800000,3 +np.float32,0x3ef779a8,0x3f8f3edb,3 +np.float32,0x3f2a5062,0x3f9d632a,3 +np.float32,0x2b7d34,0x3f800000,3 +np.float32,0x3eeb95c5,0x3f8dca67,3 +np.float32,0x805c1357,0x3f800000,3 +np.float32,0x3db3a79d,0x3f807e29,3 +np.float32,0xfded1900,0x7f800000,3 +np.float32,0x45f362,0x3f800000,3 +np.float32,0x451f38,0x3f800000,3 +np.float32,0x801d3ae5,0x3f800000,3 +np.float32,0x458d45,0x3f800000,3 +np.float32,0xfda9d298,0x7f800000,3 +np.float32,0x467439,0x3f800000,3 +np.float32,0x7f66554a,0x7f800000,3 +np.float32,0xfef2375a,0x7f800000,3 +np.float32,0xbf33fc47,0x3fa0f5d7,3 +np.float32,0x3f75ba69,0x3fbfa2d0,3 +np.float32,0xfeb625b2,0x7f800000,3 +np.float32,0x8066b371,0x3f800000,3 +np.float32,0x3f5cb4e9,0x3fb29718,3 +np.float32,0x7f3b6a58,0x7f800000,3 +np.float32,0x7f6b35ea,0x7f800000,3 +np.float32,0xbf6ee555,0x3fbbe5be,3 +np.float32,0x3d836e21,0x3f804380,3 +np.float32,0xff43cd0c,0x7f800000,3 +np.float32,0xff55c1fa,0x7f800000,3 +np.float32,0xbf0dfccc,0x3f9432a6,3 +np.float32,0x3ed92121,0x3f8baf00,3 +np.float32,0x80068cc1,0x3f800000,3 +np.float32,0xff0103f9,0x7f800000,3 +np.float32,0x7e51b175,0x7f800000,3 +np.float32,0x8012f214,0x3f800000,3 +np.float32,0x62d298,0x3f800000,3 +np.float32,0xbf3e1525,0x3fa4ef8d,3 +np.float32,0x806b4882,0x3f800000,3 +np.float32,0xbf38c146,0x3fa2ce7c,3 +np.float32,0xbed59c30,0x3f8b4d70,3 +np.float32,0x3d1910c0,0x3f8016e2,3 +np.float32,0x7f33d55b,0x7f800000,3 +np.float32,0x7f5800e3,0x7f800000,3 +np.float32,0x5b2c5d,0x3f800000,3 +np.float32,0x807be750,0x3f800000,3 +np.float32,0x7eb297c1,0x7f800000,3 +np.float32,0x7dafee62,0x7f800000,3 +np.float32,0x7d9e23f0,0x7f800000,3 +np.float32,0x3e580537,0x3f82dbd8,3 +np.float32,0xbf800000,0x3fc583ab,3 +np.float32,0x7f40f880,0x7f800000,3 +np.float32,0x775ad3,0x3f800000,3 +np.float32,0xbedacd36,0x3f8bddf3,3 +np.float32,0x2138f6,0x3f800000,3 +np.float32,0x52c3b7,0x3f800000,3 +np.float32,0x8041cfdd,0x3f800000,3 +np.float32,0x7bf16791,0x7f800000,3 +np.float32,0xbe95869c,0x3f857f55,3 +np.float32,0xbf199796,0x3f97bcaf,3 +np.float32,0x3ef8da38,0x3f8f6b45,3 +np.float32,0x803f3648,0x3f800000,3 +np.float32,0x80026fd2,0x3f800000,3 +np.float32,0x7eb3ac26,0x7f800000,3 +np.float32,0x3e49921b,0x3f827ce8,3 +np.float32,0xbf689aed,0x3fb892de,3 +np.float32,0x3f253509,0x3f9b9779,3 +np.float32,0xff17894a,0x7f800000,3 +np.float32,0x3cd12639,0x3f800aae,3 +np.float32,0x1db14b,0x3f800000,3 +np.float32,0x39a0bf,0x3f800000,3 +np.float32,0xfdfe1d08,0x7f800000,3 +np.float32,0xff416cd2,0x7f800000,3 +np.float32,0x8070d818,0x3f800000,3 +np.float32,0x3e516e12,0x3f82afb8,3 +np.float32,0x80536651,0x3f800000,3 +np.float32,0xbf2903d2,0x3f9cecb7,3 +np.float32,0x3e896ae4,0x3f84a353,3 +np.float32,0xbd6ba2c0,0x3f80363d,3 +np.float32,0x80126d3e,0x3f800000,3 +np.float32,0xfd9d43d0,0x7f800000,3 +np.float32,0x7b56b6,0x3f800000,3 +np.float32,0xff04718e,0x7f800000,3 +np.float32,0x31440f,0x3f800000,3 +np.float32,0xbf7a1313,0x3fc215c9,3 +np.float32,0x7f43d6a0,0x7f800000,3 +np.float32,0x3f566503,0x3faf92cc,3 +np.float32,0xbf39eb0e,0x3fa343f1,3 +np.float32,0xbe35fd70,0x3f8206df,3 +np.float32,0x800c36ac,0x3f800000,3 +np.float32,0x60d061,0x3f800000,3 +np.float32,0x80453e12,0x3f800000,3 +np.float32,0xfe17c36c,0x7f800000,3 +np.float32,0x3d8c72,0x3f800000,3 +np.float32,0xfe8e9134,0x7f800000,3 +np.float32,0xff5d89de,0x7f800000,3 +np.float32,0x7f45020e,0x7f800000,3 +np.float32,0x3f28225e,0x3f9c9d01,3 +np.float32,0xbf3b6900,0x3fa3dbdd,3 +np.float32,0x80349023,0x3f800000,3 +np.float32,0xbf14d780,0x3f964042,3 +np.float32,0x3f56b5d2,0x3fafb8c3,3 +np.float32,0x800c639c,0x3f800000,3 +np.float32,0x7f7a19c8,0x7f800000,3 +np.float32,0xbf7a0815,0x3fc20f86,3 +np.float32,0xbec55926,0x3f89a06e,3 +np.float32,0x4b2cd2,0x3f800000,3 +np.float32,0xbf271eb2,0x3f9c41c8,3 +np.float32,0xff26e168,0x7f800000,3 +np.float32,0x800166b2,0x3f800000,3 +np.float32,0xbde97e38,0x3f80d532,3 +np.float32,0xbf1f93ec,0x3f99af1a,3 +np.float32,0x7f2896ed,0x7f800000,3 +np.float32,0x3da7d96d,0x3f806e1d,3 +np.float32,0x802b7237,0x3f800000,3 +np.float32,0xfdca6bc0,0x7f800000,3 +np.float32,0xbed2e300,0x3f8b0318,3 +np.float32,0x8079d9e8,0x3f800000,3 +np.float32,0x3f388c81,0x3fa2b9c2,3 +np.float32,0x3ed2607c,0x3f8af54a,3 +np.float32,0xff287de6,0x7f800000,3 +np.float32,0x3f55ed89,0x3faf5ac9,3 +np.float32,0x7f5b6af7,0x7f800000,3 +np.float32,0xbeb24730,0x3f87d698,3 +np.float32,0x1,0x3f800000,3 +np.float32,0x3f3a2350,0x3fa35a3b,3 +np.float32,0x8013b422,0x3f800000,3 +np.float32,0x3e9a6560,0x3f85dd35,3 +np.float32,0x80510631,0x3f800000,3 +np.float32,0xfeae39d6,0x7f800000,3 +np.float32,0x7eb437ad,0x7f800000,3 +np.float32,0x8047545b,0x3f800000,3 +np.float32,0x806a1c71,0x3f800000,3 +np.float32,0xbe5543f0,0x3f82c93b,3 +np.float32,0x40e8d,0x3f800000,3 +np.float32,0x63d18b,0x3f800000,3 +np.float32,0x1fa1ea,0x3f800000,3 +np.float32,0x801944e0,0x3f800000,3 +np.float32,0xbf4c7ac6,0x3fab0cae,3 +np.float32,0x7f2679d4,0x7f800000,3 +np.float32,0x3f0102fc,0x3f9099d0,3 +np.float32,0x7e44bdc1,0x7f800000,3 +np.float32,0xbf2072f6,0x3f99f970,3 +np.float32,0x5c7d38,0x3f800000,3 +np.float32,0x30a2e6,0x3f800000,3 +np.float32,0x805b9ca3,0x3f800000,3 +np.float32,0x7cc24ad5,0x7f800000,3 +np.float32,0x3f4f7920,0x3fac6357,3 +np.float32,0x111d62,0x3f800000,3 +np.float32,0xbf4de40a,0x3fabad77,3 +np.float32,0x805d0354,0x3f800000,3 +np.float32,0xbb3d2b00,0x3f800023,3 +np.float32,0x3ef229e7,0x3f8e960b,3 +np.float32,0x3f15754e,0x3f9670e0,3 +np.float32,0xbf689c6b,0x3fb893a5,3 +np.float32,0xbf3796c6,0x3fa2599b,3 +np.float32,0xbe95303c,0x3f8578f2,3 +np.float32,0xfee330de,0x7f800000,3 +np.float32,0xff0d9705,0x7f800000,3 +np.float32,0xbeb0ebd0,0x3f87b7dd,3 +np.float32,0xbf4d5a13,0x3fab6fe7,3 +np.float32,0x80142f5a,0x3f800000,3 +np.float32,0x7e01a87b,0x7f800000,3 +np.float32,0xbe45e5ec,0x3f8265d7,3 +np.float32,0x7f4ac255,0x7f800000,3 +np.float32,0x3ebf6a60,0x3f890ccb,3 +np.float32,0x7f771e16,0x7f800000,3 +np.float32,0x3f41834e,0x3fa6582b,3 +np.float32,0x3f7f6f98,0x3fc52ef0,3 +np.float32,0x7e4ad775,0x7f800000,3 +np.float32,0x3eb39991,0x3f87f4c4,3 +np.float32,0x1e3f4,0x3f800000,3 +np.float32,0x7e84ba19,0x7f800000,3 +np.float32,0x80640be4,0x3f800000,3 +np.float32,0x3f459fc8,0x3fa81272,3 +np.float32,0x3f554ed0,0x3faf109b,3 +np.float32,0x3c6617,0x3f800000,3 +np.float32,0x7f441158,0x7f800000,3 +np.float32,0x7f66e6d8,0x7f800000,3 +np.float32,0x7f565152,0x7f800000,3 +np.float32,0x7f16d550,0x7f800000,3 +np.float32,0xbd4f1950,0x3f8029e5,3 +np.float32,0xcf722,0x3f800000,3 +np.float32,0x3f37d6fd,0x3fa272ad,3 +np.float32,0xff7324ea,0x7f800000,3 +np.float32,0x804bc246,0x3f800000,3 +np.float32,0x7f099ef8,0x7f800000,3 +np.float32,0x5f838b,0x3f800000,3 +np.float32,0x80523534,0x3f800000,3 +np.float32,0x3f595e84,0x3fb0fb50,3 +np.float32,0xfdef8ac8,0x7f800000,3 +np.float32,0x3d9a07,0x3f800000,3 +np.float32,0x410f61,0x3f800000,3 +np.float32,0xbf715dbb,0x3fbd3bcb,3 +np.float32,0xbedd4734,0x3f8c242f,3 +np.float32,0x7e86739a,0x7f800000,3 +np.float32,0x3e81f144,0x3f8424fe,3 +np.float32,0x7f6342d1,0x7f800000,3 +np.float32,0xff6919a3,0x7f800000,3 +np.float32,0xff051878,0x7f800000,3 +np.float32,0x800ba28f,0x3f800000,3 +np.float32,0xfefab3d8,0x7f800000,3 +np.float32,0xff612a84,0x7f800000,3 +np.float32,0x800cd5ab,0x3f800000,3 +np.float32,0x802a07ae,0x3f800000,3 +np.float32,0xfef6ee3a,0x7f800000,3 +np.float32,0x8037e896,0x3f800000,3 +np.float32,0x3ef2d86f,0x3f8eab7d,3 +np.float32,0x3eafe53d,0x3f87a0cb,3 +np.float32,0xba591c00,0x3f800003,3 +np.float32,0x3e9ed028,0x3f863508,3 +np.float32,0x4a12a8,0x3f800000,3 +np.float32,0xbee55c84,0x3f8d0f45,3 +np.float32,0x8038a8d3,0x3f800000,3 +np.float32,0xff055243,0x7f800000,3 +np.float32,0xbf659067,0x3fb701ca,3 +np.float32,0xbee36a86,0x3f8cd5e0,3 +np.float32,0x7f1d74c1,0x7f800000,3 +np.float32,0xbf7657df,0x3fbffaad,3 +np.float32,0x7e37ee34,0x7f800000,3 +np.float32,0xff04bc74,0x7f800000,3 +np.float32,0x806d194e,0x3f800000,3 +np.float32,0x7f5596c3,0x7f800000,3 +np.float32,0xbe09d268,0x3f81293e,3 +np.float32,0x79ff75,0x3f800000,3 +np.float32,0xbf55479c,0x3faf0d3e,3 +np.float32,0xbe5428ec,0x3f82c1d4,3 +np.float32,0x3f624134,0x3fb554d7,3 +np.float32,0x2ccb8a,0x3f800000,3 +np.float32,0xfc082040,0x7f800000,3 +np.float32,0xff315467,0x7f800000,3 +np.float32,0x3e6ea2d2,0x3f837dd5,3 +np.float32,0x8020fdd1,0x3f800000,3 +np.float32,0x7f0416a1,0x7f800000,3 +np.float32,0x710a1b,0x3f800000,3 +np.float32,0x3dfcd050,0x3f80f9fc,3 +np.float32,0xfe995e96,0x7f800000,3 +np.float32,0x3f020d00,0x3f90e006,3 +np.float32,0x8064263e,0x3f800000,3 +np.float32,0xfcee4160,0x7f800000,3 +np.float32,0x801b3a18,0x3f800000,3 +np.float32,0x3f62c984,0x3fb59955,3 +np.float32,0x806e8355,0x3f800000,3 +np.float32,0x7e94f65d,0x7f800000,3 +np.float32,0x1173de,0x3f800000,3 +np.float32,0x3e3ff3b7,0x3f824166,3 +np.float32,0x803b4aea,0x3f800000,3 +np.float32,0x804c5bcc,0x3f800000,3 +np.float32,0x509fe5,0x3f800000,3 +np.float32,0xbf33b5ee,0x3fa0db0b,3 +np.float32,0x3f2ac15c,0x3f9d8ba4,3 +np.float32,0x7f2c54f8,0x7f800000,3 +np.float32,0x7f33d933,0x7f800000,3 +np.float32,0xbf09b2b4,0x3f92f795,3 +np.float32,0x805db8d6,0x3f800000,3 +np.float32,0x6d6e66,0x3f800000,3 +np.float32,0x3ddfea92,0x3f80c40c,3 +np.float32,0xfda719b8,0x7f800000,3 +np.float32,0x5d657f,0x3f800000,3 +np.float32,0xbf005ba3,0x3f906df6,3 +np.float32,0xbf45e606,0x3fa8305c,3 +np.float32,0x5e9fd1,0x3f800000,3 +np.float32,0x8079dc45,0x3f800000,3 +np.float32,0x7e9c40e3,0x7f800000,3 +np.float32,0x6bd5f6,0x3f800000,3 +np.float32,0xbea14a0e,0x3f866761,3 +np.float32,0x7e7323f3,0x7f800000,3 +np.float32,0x7f0c0a79,0x7f800000,3 +np.float32,0xbf7d7aeb,0x3fc40b0f,3 +np.float32,0x437588,0x3f800000,3 +np.float32,0xbf356376,0x3fa17f63,3 +np.float32,0x7f129921,0x7f800000,3 +np.float32,0x7f47a52e,0x7f800000,3 +np.float32,0xba8cb400,0x3f800005,3 +np.float32,0x802284e0,0x3f800000,3 +np.float32,0xbe820f56,0x3f8426ec,3 +np.float32,0x7f2ef6cf,0x7f800000,3 +np.float32,0xbf70a090,0x3fbcd501,3 +np.float32,0xbf173fea,0x3f96ff6d,3 +np.float32,0x3e19c489,0x3f817224,3 +np.float32,0x7f429b30,0x7f800000,3 +np.float32,0xbdae4118,0x3f8076af,3 +np.float32,0x3e70ad30,0x3f838d41,3 +np.float32,0x335fed,0x3f800000,3 +np.float32,0xff5359cf,0x7f800000,3 +np.float32,0xbf17e42b,0x3f9732f1,3 +np.float32,0xff3a950b,0x7f800000,3 +np.float32,0xbcca70c0,0x3f800a02,3 +np.float32,0x3f2cda62,0x3f9e4dad,3 +np.float32,0x3f50c185,0x3facf805,3 +np.float32,0x80000001,0x3f800000,3 +np.float32,0x807b86d2,0x3f800000,3 +np.float32,0x8010c2cf,0x3f800000,3 +np.float32,0x3f130fb8,0x3f95b519,3 +np.float32,0x807dc546,0x3f800000,3 +np.float32,0xbee20740,0x3f8cad3f,3 +np.float32,0x80800000,0x3f800000,3 +np.float32,0x3cbd90c0,0x3f8008c6,3 +np.float32,0x3e693488,0x3f835571,3 +np.float32,0xbe70cd44,0x3f838e35,3 +np.float32,0xbe348dc8,0x3f81feb1,3 +np.float32,0x3f31ea90,0x3fa02d3f,3 +np.float32,0xfcd7e180,0x7f800000,3 +np.float32,0xbe30a75c,0x3f81e8d0,3 +np.float32,0x3e552c5a,0x3f82c89d,3 +np.float32,0xff513f74,0x7f800000,3 +np.float32,0xbdb16248,0x3f807afd,3 +np.float64,0x7fbbf954e437f2a9,0x7ff0000000000000,1 +np.float64,0x581bbf0cb0379,0x3ff0000000000000,1 +np.float64,0x7ff8000000000000,0x7ff8000000000000,1 +np.float64,0xffb959a2a632b348,0x7ff0000000000000,1 +np.float64,0xbfdbd6baebb7ad76,0x3ff189a5ca25a6e1,1 +np.float64,0xbfd094ec9aa129da,0x3ff08a3f6b918065,1 +np.float64,0x3fe236753f646cea,0x3ff2a982660b8b43,1 +np.float64,0xbfe537fadfaa6ff6,0x3ff3a5f1c49c31bf,1 +np.float64,0xbfe31fa7dc663f50,0x3ff2f175374aef0e,1 +np.float64,0x3fc4b6569f296cb0,0x3ff035bde801bb53,1 +np.float64,0x800ce3c00f99c780,0x3ff0000000000000,1 +np.float64,0xbfebcde33e779bc6,0x3ff66de82cd30fc5,1 +np.float64,0x800dc09d3b7b813b,0x3ff0000000000000,1 +np.float64,0x80067d4c450cfa99,0x3ff0000000000000,1 +np.float64,0x1f6ade203ed7,0x3ff0000000000000,1 +np.float64,0xbfd4e311eca9c624,0x3ff0dc1383d6c3db,1 +np.float64,0x800649b3a54c9368,0x3ff0000000000000,1 +np.float64,0xcc14d1ab9829a,0x3ff0000000000000,1 +np.float64,0x3fc290c5bb25218b,0x3ff02b290f46dd6d,1 +np.float64,0x3fe78eb8376f1d70,0x3ff488f3bc259537,1 +np.float64,0xffc60f58e82c1eb0,0x7ff0000000000000,1 +np.float64,0x3fd35666ad26accd,0x3ff0bc6573da6bcd,1 +np.float64,0x7fc20257a62404ae,0x7ff0000000000000,1 +np.float64,0x80076d842e0edb09,0x3ff0000000000000,1 +np.float64,0x3fd8e44b08b1c898,0x3ff139b9a1f8428e,1 +np.float64,0x7fd6f6fc7a2dedf8,0x7ff0000000000000,1 +np.float64,0x3fa01b9f0820373e,0x3ff00206f8ad0f1b,1 +np.float64,0x69ed190ed3da4,0x3ff0000000000000,1 +np.float64,0xbfd997eb34b32fd6,0x3ff14be65a5db4a0,1 +np.float64,0x7feada2d0935b459,0x7ff0000000000000,1 +np.float64,0xbf80987120213100,0x3ff000226d29a9fc,1 +np.float64,0xbfef203e37fe407c,0x3ff82f51f04e8821,1 +np.float64,0xffe3dcf91fa7b9f2,0x7ff0000000000000,1 +np.float64,0x9a367283346cf,0x3ff0000000000000,1 +np.float64,0x800feb09f7bfd614,0x3ff0000000000000,1 +np.float64,0xbfe0319f9520633f,0x3ff217c5205c403f,1 +np.float64,0xbfa91eabd4323d50,0x3ff004ee4347f627,1 +np.float64,0x3fd19cbf7d23397f,0x3ff09c13e8e43571,1 +np.float64,0xffeb8945f0b7128b,0x7ff0000000000000,1 +np.float64,0x800a0eb4f2141d6a,0x3ff0000000000000,1 +np.float64,0xffe83e7312f07ce6,0x7ff0000000000000,1 +np.float64,0xffca53fee834a7fc,0x7ff0000000000000,1 +np.float64,0x800881cbf1710398,0x3ff0000000000000,1 +np.float64,0x80003e6abbe07cd6,0x3ff0000000000000,1 +np.float64,0xbfef6a998afed533,0x3ff859b7852d1b4d,1 +np.float64,0x3fd4eb7577a9d6eb,0x3ff0dcc601261aab,1 +np.float64,0xbfc9c12811338250,0x3ff05331268b05c8,1 +np.float64,0x7fddf84e5e3bf09c,0x7ff0000000000000,1 +np.float64,0xbfd4d6fbbc29adf8,0x3ff0db12db19d187,1 +np.float64,0x80077892bfaef126,0x3ff0000000000000,1 +np.float64,0xffae9d49543d3a90,0x7ff0000000000000,1 +np.float64,0xbfd8bef219317de4,0x3ff136034e5d2f1b,1 +np.float64,0xffe89c74ddb138e9,0x7ff0000000000000,1 +np.float64,0x8003b6bbb7e76d78,0x3ff0000000000000,1 +np.float64,0x315a4e8462b4b,0x3ff0000000000000,1 +np.float64,0x800ee616edddcc2e,0x3ff0000000000000,1 +np.float64,0xdfb27f97bf650,0x3ff0000000000000,1 +np.float64,0x8004723dc328e47c,0x3ff0000000000000,1 +np.float64,0xbfe529500daa52a0,0x3ff3a0b9b33fc84c,1 +np.float64,0xbfe4e46a7ce9c8d5,0x3ff3886ce0f92612,1 +np.float64,0xbf52003680240000,0x3ff00000a203d61a,1 +np.float64,0xffd3400458268008,0x7ff0000000000000,1 +np.float64,0x80076deb444edbd7,0x3ff0000000000000,1 +np.float64,0xa612f6c14c27,0x3ff0000000000000,1 +np.float64,0xbfd41c74c9a838ea,0x3ff0cbe61e16aecf,1 +np.float64,0x43f464a887e8d,0x3ff0000000000000,1 +np.float64,0x800976e748b2edcf,0x3ff0000000000000,1 +np.float64,0xffc79d6ba12f3ad8,0x7ff0000000000000,1 +np.float64,0xffd6dbcb022db796,0x7ff0000000000000,1 +np.float64,0xffd6a9672a2d52ce,0x7ff0000000000000,1 +np.float64,0x3fe95dcfa632bb9f,0x3ff54bbad2ee919e,1 +np.float64,0x3febadd2e1375ba6,0x3ff65e336c47c018,1 +np.float64,0x7fd47c37d828f86f,0x7ff0000000000000,1 +np.float64,0xbfd4ea59e0a9d4b4,0x3ff0dcae6af3e443,1 +np.float64,0x2c112afc58226,0x3ff0000000000000,1 +np.float64,0x8008122bced02458,0x3ff0000000000000,1 +np.float64,0x7fe7105ab3ee20b4,0x7ff0000000000000,1 +np.float64,0x80089634df312c6a,0x3ff0000000000000,1 +np.float64,0x68e9fbc8d1d40,0x3ff0000000000000,1 +np.float64,0xbfec1e1032f83c20,0x3ff69590b9f18ea8,1 +np.float64,0xbfedf181623be303,0x3ff787ef48935dc6,1 +np.float64,0xffe8600457f0c008,0x7ff0000000000000,1 +np.float64,0x7a841ec6f5084,0x3ff0000000000000,1 +np.float64,0x459a572e8b34c,0x3ff0000000000000,1 +np.float64,0x3fe8a232bef14465,0x3ff4fac1780f731e,1 +np.float64,0x3fcb37597d366eb3,0x3ff05cf08ab14ebd,1 +np.float64,0xbfb0261d00204c38,0x3ff00826fb86ca8a,1 +np.float64,0x3fc6e7a6dd2dcf4e,0x3ff041c1222ffa79,1 +np.float64,0xee65dd03dccbc,0x3ff0000000000000,1 +np.float64,0xffe26fdc23e4dfb8,0x7ff0000000000000,1 +np.float64,0x7fe8d6c8cab1ad91,0x7ff0000000000000,1 +np.float64,0xbfeb64bf2676c97e,0x3ff63abb8607828c,1 +np.float64,0x3fd28417b425082f,0x3ff0ac9eb22a732b,1 +np.float64,0xbfd26835b3a4d06c,0x3ff0aa94c48fb6d2,1 +np.float64,0xffec617a01b8c2f3,0x7ff0000000000000,1 +np.float64,0xe1bfff01c3800,0x3ff0000000000000,1 +np.float64,0x3fd4def913a9bdf4,0x3ff0dbbc7271046f,1 +np.float64,0x94f4c17129e98,0x3ff0000000000000,1 +np.float64,0x8009b2eaa33365d6,0x3ff0000000000000,1 +np.float64,0x3fd9633b41b2c678,0x3ff1468388bdfb65,1 +np.float64,0xffe0ae5c80e15cb8,0x7ff0000000000000,1 +np.float64,0x7fdfc35996bf86b2,0x7ff0000000000000,1 +np.float64,0x3fcfc5bdc23f8b7c,0x3ff07ed5caa4545c,1 +np.float64,0xd48b4907a9169,0x3ff0000000000000,1 +np.float64,0xbfe0a2cc52614598,0x3ff2361665895d95,1 +np.float64,0xbfe9068f90720d1f,0x3ff525b82491a1a5,1 +np.float64,0x4238b9208472,0x3ff0000000000000,1 +np.float64,0x800e6b2bf69cd658,0x3ff0000000000000,1 +np.float64,0x7fb638b6ae2c716c,0x7ff0000000000000,1 +np.float64,0x7fe267641764cec7,0x7ff0000000000000,1 +np.float64,0xffc0933d3521267c,0x7ff0000000000000,1 +np.float64,0x7fddfdfb533bfbf6,0x7ff0000000000000,1 +np.float64,0xced2a8e99da55,0x3ff0000000000000,1 +np.float64,0x2a80d5165501b,0x3ff0000000000000,1 +np.float64,0xbfeead2ab63d5a55,0x3ff7eeb5cbcfdcab,1 +np.float64,0x80097f6f92f2fee0,0x3ff0000000000000,1 +np.float64,0x3fee1f29b77c3e54,0x3ff7a0a58c13df62,1 +np.float64,0x3f9d06b8383a0d70,0x3ff001a54a2d8cf8,1 +np.float64,0xbfc8b41d3f31683c,0x3ff04c85379dd6b0,1 +np.float64,0xffd2a04c1e254098,0x7ff0000000000000,1 +np.float64,0xbfb71c01e02e3800,0x3ff010b34220e838,1 +np.float64,0xbfe69249ef6d2494,0x3ff425e48d1e938b,1 +np.float64,0xffefffffffffffff,0x7ff0000000000000,1 +np.float64,0x3feb1d52fbf63aa6,0x3ff618813ae922d7,1 +np.float64,0x7fb8d1a77e31a34e,0x7ff0000000000000,1 +np.float64,0xffc3cfc4ed279f88,0x7ff0000000000000,1 +np.float64,0x2164b9fc42c98,0x3ff0000000000000,1 +np.float64,0x3fbb868cee370d1a,0x3ff017b31b0d4d27,1 +np.float64,0x3fcd6dea583adbd5,0x3ff06cbd16bf44a0,1 +np.float64,0xbfecd041d479a084,0x3ff6efb25f61012d,1 +np.float64,0xbfb0552e6e20aa60,0x3ff00856ca83834a,1 +np.float64,0xe6293cbfcc528,0x3ff0000000000000,1 +np.float64,0x7fba58394034b072,0x7ff0000000000000,1 +np.float64,0x33bc96d467794,0x3ff0000000000000,1 +np.float64,0xffe90ea86bf21d50,0x7ff0000000000000,1 +np.float64,0xbfc626ea6d2c4dd4,0x3ff03d7e01ec3849,1 +np.float64,0x65b56fe4cb6af,0x3ff0000000000000,1 +np.float64,0x3fea409fb7f4813f,0x3ff5b171deab0ebd,1 +np.float64,0x3fe849c1df709384,0x3ff4d59063ff98c4,1 +np.float64,0x169073082d20f,0x3ff0000000000000,1 +np.float64,0xcc8b6add9916e,0x3ff0000000000000,1 +np.float64,0xbfef3d78d5fe7af2,0x3ff83fecc26abeea,1 +np.float64,0x3fe8c65a4a718cb4,0x3ff50a23bfeac7df,1 +np.float64,0x3fde9fa5c8bd3f4c,0x3ff1ddeb12b9d623,1 +np.float64,0xffe2af536da55ea6,0x7ff0000000000000,1 +np.float64,0x800186d0b0c30da2,0x3ff0000000000000,1 +np.float64,0x3fe9ba3c1d737478,0x3ff574ab2bf3a560,1 +np.float64,0xbfe1489c46a29138,0x3ff2641d36b30e21,1 +np.float64,0xbfe4b6b7c0e96d70,0x3ff37880ac8b0540,1 +np.float64,0x800e66ad82fccd5b,0x3ff0000000000000,1 +np.float64,0x7ff0000000000000,0x7ff0000000000000,1 +np.float64,0x7febb0fd477761fa,0x7ff0000000000000,1 +np.float64,0xbfdc433f2eb8867e,0x3ff195ec2a6cce27,1 +np.float64,0x3fe12c5a172258b4,0x3ff25c225b8a34bb,1 +np.float64,0xbfef6f116c3ede23,0x3ff85c47eaed49a0,1 +np.float64,0x800af6f60f35edec,0x3ff0000000000000,1 +np.float64,0xffe567999a2acf32,0x7ff0000000000000,1 +np.float64,0xbfc5ac5ae72b58b4,0x3ff03adb50ec04f3,1 +np.float64,0x3fea1b57e23436b0,0x3ff5a06f98541767,1 +np.float64,0x7fcc3e36fb387c6d,0x7ff0000000000000,1 +np.float64,0x8000c8dc698191ba,0x3ff0000000000000,1 +np.float64,0x3fee5085ed7ca10c,0x3ff7bb92f61245b8,1 +np.float64,0x7fbb9f803a373eff,0x7ff0000000000000,1 +np.float64,0xbfe1e5e806e3cbd0,0x3ff2918f2d773007,1 +np.float64,0x8008f8c3f3b1f188,0x3ff0000000000000,1 +np.float64,0x7fe53df515ea7be9,0x7ff0000000000000,1 +np.float64,0x7fdbb87fb3b770fe,0x7ff0000000000000,1 +np.float64,0x3fefcc0f50ff981f,0x3ff89210a6a04e6b,1 +np.float64,0x3fe33f87d0267f10,0x3ff2fb989ea4f2bc,1 +np.float64,0x1173992022e8,0x3ff0000000000000,1 +np.float64,0x3fef534632bea68c,0x3ff84c5ca9713ff9,1 +np.float64,0x3fc5991d552b3238,0x3ff03a72bfdb6e5f,1 +np.float64,0x3fdad90dc1b5b21c,0x3ff16db868180034,1 +np.float64,0xffe20b8078e41700,0x7ff0000000000000,1 +np.float64,0x7fdf409a82be8134,0x7ff0000000000000,1 +np.float64,0x3fccb7e691396fcd,0x3ff06786b6ccdbcb,1 +np.float64,0xffe416e0b7282dc1,0x7ff0000000000000,1 +np.float64,0xffe3a8a981275152,0x7ff0000000000000,1 +np.float64,0x3fd9c8bd31b3917c,0x3ff150ee6f5f692f,1 +np.float64,0xffeab6fef6356dfd,0x7ff0000000000000,1 +np.float64,0x3fe9c5e3faf38bc8,0x3ff579e18c9bd548,1 +np.float64,0x800b173e44762e7d,0x3ff0000000000000,1 +np.float64,0xffe2719db764e33b,0x7ff0000000000000,1 +np.float64,0x3fd1fcf31223f9e6,0x3ff0a2da7ad99856,1 +np.float64,0x80082c4afcd05896,0x3ff0000000000000,1 +np.float64,0xa56e5e4b4adcc,0x3ff0000000000000,1 +np.float64,0xffbbbddab2377bb8,0x7ff0000000000000,1 +np.float64,0x3b3927c076726,0x3ff0000000000000,1 +np.float64,0x3fec03fd58f807fb,0x3ff6889b8a774728,1 +np.float64,0xbfaa891fb4351240,0x3ff00580987bd914,1 +np.float64,0x7fb4800c4a290018,0x7ff0000000000000,1 +np.float64,0xffbb5d2b6036ba58,0x7ff0000000000000,1 +np.float64,0x7fd6608076acc100,0x7ff0000000000000,1 +np.float64,0x31267e4c624d1,0x3ff0000000000000,1 +np.float64,0x33272266664e5,0x3ff0000000000000,1 +np.float64,0x47bb37f28f768,0x3ff0000000000000,1 +np.float64,0x3fe134bb4ee26977,0x3ff25e7ea647a928,1 +np.float64,0xbfe2b5f42ba56be8,0x3ff2d05cbdc7344b,1 +np.float64,0xbfe0e013fd61c028,0x3ff246dfce572914,1 +np.float64,0x7fecedcda4f9db9a,0x7ff0000000000000,1 +np.float64,0x8001816c2da302d9,0x3ff0000000000000,1 +np.float64,0xffced8b65b3db16c,0x7ff0000000000000,1 +np.float64,0xffdc1d4a0b383a94,0x7ff0000000000000,1 +np.float64,0x7fe94e7339f29ce5,0x7ff0000000000000,1 +np.float64,0x33fb846667f71,0x3ff0000000000000,1 +np.float64,0x800a1380e9542702,0x3ff0000000000000,1 +np.float64,0x800b74eaa776e9d6,0x3ff0000000000000,1 +np.float64,0x5681784aad030,0x3ff0000000000000,1 +np.float64,0xbfee0eb7917c1d6f,0x3ff797b949f7f6b4,1 +np.float64,0xffe4ec5fd2a9d8bf,0x7ff0000000000000,1 +np.float64,0xbfcd7401dd3ae804,0x3ff06cea52c792c0,1 +np.float64,0x800587563beb0ead,0x3ff0000000000000,1 +np.float64,0x3fc15c6f3322b8de,0x3ff025bbd030166d,1 +np.float64,0x7feb6b4caf76d698,0x7ff0000000000000,1 +np.float64,0x7fe136ef82a26dde,0x7ff0000000000000,1 +np.float64,0xf592dac3eb25c,0x3ff0000000000000,1 +np.float64,0x7fd300baf6a60175,0x7ff0000000000000,1 +np.float64,0x7fc880de9e3101bc,0x7ff0000000000000,1 +np.float64,0x7fe7a1aa5caf4354,0x7ff0000000000000,1 +np.float64,0x2f9b8e0e5f373,0x3ff0000000000000,1 +np.float64,0xffcc9071993920e4,0x7ff0000000000000,1 +np.float64,0x8009e151b313c2a4,0x3ff0000000000000,1 +np.float64,0xbfd46e2d18a8dc5a,0x3ff0d27a7b37c1ae,1 +np.float64,0x3fe65c7961acb8f3,0x3ff4116946062a4c,1 +np.float64,0x7fd31b371626366d,0x7ff0000000000000,1 +np.float64,0x98dc924d31b93,0x3ff0000000000000,1 +np.float64,0x268bef364d17f,0x3ff0000000000000,1 +np.float64,0x7fd883ba56310774,0x7ff0000000000000,1 +np.float64,0x3fc53f01a32a7e03,0x3ff0388dea9cd63e,1 +np.float64,0xffe1ea8c0563d518,0x7ff0000000000000,1 +np.float64,0x3fd0bf0e63a17e1d,0x3ff08d0577f5ffa6,1 +np.float64,0x7fef42418f7e8482,0x7ff0000000000000,1 +np.float64,0x8000bccd38c1799b,0x3ff0000000000000,1 +np.float64,0xbfe6c48766ed890f,0x3ff43936fa4048c8,1 +np.float64,0xbfb2a38f3a254720,0x3ff00adc7f7b2822,1 +np.float64,0x3fd5262b2eaa4c56,0x3ff0e1af492c08f5,1 +np.float64,0x80065b4691ecb68e,0x3ff0000000000000,1 +np.float64,0xfb6b9e9ff6d74,0x3ff0000000000000,1 +np.float64,0x8006c71e6ecd8e3e,0x3ff0000000000000,1 +np.float64,0x3fd0a3e43ca147c8,0x3ff08b3ad7b42485,1 +np.float64,0xbfc82d8607305b0c,0x3ff04949d6733ef6,1 +np.float64,0xde048c61bc092,0x3ff0000000000000,1 +np.float64,0xffcf73e0fa3ee7c0,0x7ff0000000000000,1 +np.float64,0xbfe8639d7830c73b,0x3ff4e05f97948376,1 +np.float64,0x8010000000000000,0x3ff0000000000000,1 +np.float64,0x67f01a2acfe04,0x3ff0000000000000,1 +np.float64,0x3fe222e803e445d0,0x3ff2a3a75e5f29d8,1 +np.float64,0xffef84c6387f098b,0x7ff0000000000000,1 +np.float64,0x3fe5969c1e6b2d38,0x3ff3c80130462bb2,1 +np.float64,0x8009f56953d3ead3,0x3ff0000000000000,1 +np.float64,0x3fe05c9b6360b937,0x3ff2232e1cba5617,1 +np.float64,0x3fd8888d63b1111b,0x3ff130a5b788d52f,1 +np.float64,0xffe3a9e6f26753ce,0x7ff0000000000000,1 +np.float64,0x800e2aaa287c5554,0x3ff0000000000000,1 +np.float64,0x3fea8d6c82351ad9,0x3ff5d4d8cde9a11d,1 +np.float64,0x7feef700723dee00,0x7ff0000000000000,1 +np.float64,0x3fa5cb77242b96e0,0x3ff003b62b3e50f1,1 +np.float64,0x7fb68f0a862d1e14,0x7ff0000000000000,1 +np.float64,0x7fb97ee83432fdcf,0x7ff0000000000000,1 +np.float64,0x7fd74a78632e94f0,0x7ff0000000000000,1 +np.float64,0x7fcfe577713fcaee,0x7ff0000000000000,1 +np.float64,0xffe192ee5ea325dc,0x7ff0000000000000,1 +np.float64,0x477d6ae48efae,0x3ff0000000000000,1 +np.float64,0xffe34d5237669aa4,0x7ff0000000000000,1 +np.float64,0x7fe3ce8395a79d06,0x7ff0000000000000,1 +np.float64,0x80019c01ffa33805,0x3ff0000000000000,1 +np.float64,0x74b5b56ce96b7,0x3ff0000000000000,1 +np.float64,0x7fe05ecdeda0bd9b,0x7ff0000000000000,1 +np.float64,0xffe9693eb232d27d,0x7ff0000000000000,1 +np.float64,0xffd2be2c7da57c58,0x7ff0000000000000,1 +np.float64,0x800dbd5cbc1b7aba,0x3ff0000000000000,1 +np.float64,0xbfa36105d426c210,0x3ff002ef2e3a87f7,1 +np.float64,0x800b2d69fb765ad4,0x3ff0000000000000,1 +np.float64,0xbfdb81c9a9370394,0x3ff1802d409cbf7a,1 +np.float64,0x7fd481d014a9039f,0x7ff0000000000000,1 +np.float64,0xffe66c3c1fecd878,0x7ff0000000000000,1 +np.float64,0x3fc55865192ab0c8,0x3ff03915b51e8839,1 +np.float64,0xd6a78987ad4f1,0x3ff0000000000000,1 +np.float64,0x800c6cc80d58d990,0x3ff0000000000000,1 +np.float64,0x979435a12f29,0x3ff0000000000000,1 +np.float64,0xbfbd971e7a3b2e40,0x3ff01b647e45f5a6,1 +np.float64,0x80067565bfeceacc,0x3ff0000000000000,1 +np.float64,0x8001ad689ce35ad2,0x3ff0000000000000,1 +np.float64,0x7fa43253dc2864a7,0x7ff0000000000000,1 +np.float64,0xbfe3dda307e7bb46,0x3ff32ef99a2efe1d,1 +np.float64,0x3fe5d7b395ebaf68,0x3ff3dfd33cdc8ef4,1 +np.float64,0xd94cc9c3b2999,0x3ff0000000000000,1 +np.float64,0x3fee5a513fbcb4a2,0x3ff7c0f17b876ce5,1 +np.float64,0xffe27761fa64eec4,0x7ff0000000000000,1 +np.float64,0x3feb788119b6f102,0x3ff64446f67f4efa,1 +np.float64,0xbfed6e10dffadc22,0x3ff741d5ef610ca0,1 +np.float64,0x7fe73cf98b2e79f2,0x7ff0000000000000,1 +np.float64,0x7847d09af08fb,0x3ff0000000000000,1 +np.float64,0x29ded2da53bdb,0x3ff0000000000000,1 +np.float64,0xbfe51c1ec1aa383e,0x3ff39c0b7cf832e2,1 +np.float64,0xbfeafd5e65f5fabd,0x3ff609548a787f57,1 +np.float64,0x3fd872a26fb0e545,0x3ff12e7fbd95505c,1 +np.float64,0x7fed6b7c1b7ad6f7,0x7ff0000000000000,1 +np.float64,0xffe7ba9ec16f753d,0x7ff0000000000000,1 +np.float64,0x7f89b322f0336645,0x7ff0000000000000,1 +np.float64,0xbfad1677383a2cf0,0x3ff0069ca67e7baa,1 +np.float64,0x3fe0906d04a120da,0x3ff2311b04b7bfef,1 +np.float64,0xffe4b3c9d4296793,0x7ff0000000000000,1 +np.float64,0xbfe476bb0ce8ed76,0x3ff36277d2921a74,1 +np.float64,0x7fc35655cf26acab,0x7ff0000000000000,1 +np.float64,0x7fe9980f0373301d,0x7ff0000000000000,1 +np.float64,0x9e6e04cb3cdc1,0x3ff0000000000000,1 +np.float64,0x800b89e0afb713c2,0x3ff0000000000000,1 +np.float64,0x800bd951a3f7b2a4,0x3ff0000000000000,1 +np.float64,0x29644a9e52c8a,0x3ff0000000000000,1 +np.float64,0x3fe1be2843637c51,0x3ff285e90d8387e4,1 +np.float64,0x7fa233cce4246799,0x7ff0000000000000,1 +np.float64,0xbfcfb7bc2d3f6f78,0x3ff07e657de3e2ed,1 +np.float64,0xffd7c953e7af92a8,0x7ff0000000000000,1 +np.float64,0xbfc5bbaf772b7760,0x3ff03b2ee4febb1e,1 +np.float64,0x8007b7315a6f6e63,0x3ff0000000000000,1 +np.float64,0xbfe906d902320db2,0x3ff525d7e16acfe0,1 +np.float64,0x3fde33d8553c67b1,0x3ff1d09faa19aa53,1 +np.float64,0x61fe76a0c3fcf,0x3ff0000000000000,1 +np.float64,0xa75e355b4ebc7,0x3ff0000000000000,1 +np.float64,0x3fc9e6d86033cdb1,0x3ff05426299c7064,1 +np.float64,0x7fd83f489eb07e90,0x7ff0000000000000,1 +np.float64,0x8000000000000001,0x3ff0000000000000,1 +np.float64,0x80014434ae62886a,0x3ff0000000000000,1 +np.float64,0xbfe21af9686435f3,0x3ff2a149338bdefe,1 +np.float64,0x9354e6cd26a9d,0x3ff0000000000000,1 +np.float64,0xb42b95f768573,0x3ff0000000000000,1 +np.float64,0xbfecb4481bb96890,0x3ff6e15d269dd651,1 +np.float64,0x3f97842ae82f0840,0x3ff0011485156f28,1 +np.float64,0xffdef63d90bdec7c,0x7ff0000000000000,1 +np.float64,0x7fe511a8d36a2351,0x7ff0000000000000,1 +np.float64,0xbf8cb638a0396c80,0x3ff000670c318fb6,1 +np.float64,0x3fe467e1f668cfc4,0x3ff35d65f93ccac6,1 +np.float64,0xbfce7d88f03cfb10,0x3ff074c22475fe5b,1 +np.float64,0x6d0a4994da14a,0x3ff0000000000000,1 +np.float64,0xbfb3072580260e48,0x3ff00b51d3913e9f,1 +np.float64,0x8008fcde36b1f9bd,0x3ff0000000000000,1 +np.float64,0x3fd984df66b309c0,0x3ff149f29125eca4,1 +np.float64,0xffee2a10fe7c5421,0x7ff0000000000000,1 +np.float64,0x80039168ace722d2,0x3ff0000000000000,1 +np.float64,0xffda604379b4c086,0x7ff0000000000000,1 +np.float64,0xffdc6a405bb8d480,0x7ff0000000000000,1 +np.float64,0x3fe62888b26c5111,0x3ff3fdda754c4372,1 +np.float64,0x8008b452cb5168a6,0x3ff0000000000000,1 +np.float64,0x6165d540c2cbb,0x3ff0000000000000,1 +np.float64,0xbfee0c04d17c180a,0x3ff796431c64bcbe,1 +np.float64,0x800609b8448c1371,0x3ff0000000000000,1 +np.float64,0x800fc3fca59f87f9,0x3ff0000000000000,1 +np.float64,0x77f64848efeca,0x3ff0000000000000,1 +np.float64,0x8007cf522d8f9ea5,0x3ff0000000000000,1 +np.float64,0xbfe9fb0b93f3f617,0x3ff591cb0052e22c,1 +np.float64,0x7fd569d5f0aad3ab,0x7ff0000000000000,1 +np.float64,0x7fe5cf489d6b9e90,0x7ff0000000000000,1 +np.float64,0x7fd6e193e92dc327,0x7ff0000000000000,1 +np.float64,0xf78988a5ef131,0x3ff0000000000000,1 +np.float64,0x3fe8f97562b1f2eb,0x3ff5201080fbc12d,1 +np.float64,0x7febfd69d7b7fad3,0x7ff0000000000000,1 +np.float64,0xffc07b5c1720f6b8,0x7ff0000000000000,1 +np.float64,0xbfd966926832cd24,0x3ff146da9adf492e,1 +np.float64,0x7fef5bd9edfeb7b3,0x7ff0000000000000,1 +np.float64,0xbfd2afbc96255f7a,0x3ff0afd601febf44,1 +np.float64,0x7fdd4ea6293a9d4b,0x7ff0000000000000,1 +np.float64,0xbfe8a1e916b143d2,0x3ff4faa23c2793e5,1 +np.float64,0x800188fcd8c311fa,0x3ff0000000000000,1 +np.float64,0xbfe30803f1661008,0x3ff2e9fc729baaee,1 +np.float64,0x7fefffffffffffff,0x7ff0000000000000,1 +np.float64,0x3fd287bec3250f7e,0x3ff0ace34d3102f6,1 +np.float64,0x1f0ee9443e1de,0x3ff0000000000000,1 +np.float64,0xbfd92f73da325ee8,0x3ff14143e4fa2c5a,1 +np.float64,0x3fed7c9bdffaf938,0x3ff74984168734d3,1 +np.float64,0x8002c4d1696589a4,0x3ff0000000000000,1 +np.float64,0xfe03011bfc060,0x3ff0000000000000,1 +np.float64,0x7f7a391e6034723c,0x7ff0000000000000,1 +np.float64,0xffd6fd46f82dfa8e,0x7ff0000000000000,1 +np.float64,0xbfd7520a742ea414,0x3ff112f1ba5d4f91,1 +np.float64,0x8009389d8812713b,0x3ff0000000000000,1 +np.float64,0x7fefb846aaff708c,0x7ff0000000000000,1 +np.float64,0x3fd98a0983331413,0x3ff14a79efb8adbf,1 +np.float64,0xbfd897158db12e2c,0x3ff132137902cf3e,1 +np.float64,0xffc4048d5928091c,0x7ff0000000000000,1 +np.float64,0x80036ae46046d5ca,0x3ff0000000000000,1 +np.float64,0x7faba7ed3c374fd9,0x7ff0000000000000,1 +np.float64,0xbfec4265e1f884cc,0x3ff6a7b8602422c9,1 +np.float64,0xaa195e0b5432c,0x3ff0000000000000,1 +np.float64,0x3feac15d317582ba,0x3ff5ed115758145f,1 +np.float64,0x6c13a5bcd8275,0x3ff0000000000000,1 +np.float64,0xbfed20b8883a4171,0x3ff7194dbd0dc988,1 +np.float64,0x800cde65c899bccc,0x3ff0000000000000,1 +np.float64,0x7c72912af8e53,0x3ff0000000000000,1 +np.float64,0x3fe49d2bb4e93a57,0x3ff36fab3aba15d4,1 +np.float64,0xbfd598fa02ab31f4,0x3ff0eb72fc472025,1 +np.float64,0x8007a191712f4324,0x3ff0000000000000,1 +np.float64,0xbfdeb14872bd6290,0x3ff1e01ca83f35fd,1 +np.float64,0xbfe1da46b3e3b48e,0x3ff28e23ad2f5615,1 +np.float64,0x800a2f348e745e69,0x3ff0000000000000,1 +np.float64,0xbfee66928afccd25,0x3ff7c7ac7dbb3273,1 +np.float64,0xffd78a0a2b2f1414,0x7ff0000000000000,1 +np.float64,0x7fc5fa80b82bf500,0x7ff0000000000000,1 +np.float64,0x800e6d7260dcdae5,0x3ff0000000000000,1 +np.float64,0xbfd6cff2aaad9fe6,0x3ff106f78ee61642,1 +np.float64,0x7fe1041d1d220839,0x7ff0000000000000,1 +np.float64,0xbfdf75586cbeeab0,0x3ff1f8dbaa7e57f0,1 +np.float64,0xffdcaae410b955c8,0x7ff0000000000000,1 +np.float64,0x800fe5e0d1ffcbc2,0x3ff0000000000000,1 +np.float64,0x800d7999527af333,0x3ff0000000000000,1 +np.float64,0xbfe62c233bac5846,0x3ff3ff34220a204c,1 +np.float64,0x7fe99bbff8f3377f,0x7ff0000000000000,1 +np.float64,0x7feeaf471d3d5e8d,0x7ff0000000000000,1 +np.float64,0xd5904ff5ab20a,0x3ff0000000000000,1 +np.float64,0x3fd07aae3320f55c,0x3ff08888c227c968,1 +np.float64,0x7fea82b8dff50571,0x7ff0000000000000,1 +np.float64,0xffef2db9057e5b71,0x7ff0000000000000,1 +np.float64,0xbfe2077fef640f00,0x3ff29b7dd0d39d36,1 +np.float64,0xbfe09a4d7c61349b,0x3ff233c7e88881f4,1 +np.float64,0x3fda50c4cbb4a188,0x3ff15f28a71deee7,1 +np.float64,0x7fe7d9ee6b2fb3dc,0x7ff0000000000000,1 +np.float64,0x3febbf6faeb77edf,0x3ff666d13682ea93,1 +np.float64,0xc401a32988035,0x3ff0000000000000,1 +np.float64,0xbfeab30aa8f56615,0x3ff5e65dcc6603f8,1 +np.float64,0x92c8cea32591a,0x3ff0000000000000,1 +np.float64,0xbff0000000000000,0x3ff8b07551d9f550,1 +np.float64,0xbfbddfb4dc3bbf68,0x3ff01bebaec38faa,1 +np.float64,0xbfd8de3e2a31bc7c,0x3ff1391f4830d20b,1 +np.float64,0xffc83a8f8a307520,0x7ff0000000000000,1 +np.float64,0x3fee026ef53c04de,0x3ff7911337085827,1 +np.float64,0x7fbaf380b235e700,0x7ff0000000000000,1 +np.float64,0xffe5b89fa62b713f,0x7ff0000000000000,1 +np.float64,0xbfdc1ff54ab83fea,0x3ff191e8c0b60bb2,1 +np.float64,0x6ae3534cd5c6b,0x3ff0000000000000,1 +np.float64,0xbfea87e558750fcb,0x3ff5d24846013794,1 +np.float64,0xffe0f467bee1e8cf,0x7ff0000000000000,1 +np.float64,0x7fee3b0dc7bc761b,0x7ff0000000000000,1 +np.float64,0x3fed87521afb0ea4,0x3ff74f2f5cd36a5c,1 +np.float64,0x7b3c9882f6794,0x3ff0000000000000,1 +np.float64,0x7fdd1a62243a34c3,0x7ff0000000000000,1 +np.float64,0x800f1dc88d3e3b91,0x3ff0000000000000,1 +np.float64,0x7fc3213cfa264279,0x7ff0000000000000,1 +np.float64,0x3fe40e0f3d681c1e,0x3ff33f135e9d5ded,1 +np.float64,0x7febf14e51f7e29c,0x7ff0000000000000,1 +np.float64,0xffe96c630c72d8c5,0x7ff0000000000000,1 +np.float64,0x7fdd82fbe7bb05f7,0x7ff0000000000000,1 +np.float64,0xbf9a6a0b1034d420,0x3ff0015ce009f7d8,1 +np.float64,0xbfceb4f8153d69f0,0x3ff0766e3ecc77df,1 +np.float64,0x3fd9de31e633bc64,0x3ff15327b794a16e,1 +np.float64,0x3faa902a30352054,0x3ff00583848d1969,1 +np.float64,0x0,0x3ff0000000000000,1 +np.float64,0x3fbe3459c43c68b4,0x3ff01c8af6710ef6,1 +np.float64,0xbfa8df010031be00,0x3ff004d5632dc9f5,1 +np.float64,0x7fbcf6cf2a39ed9d,0x7ff0000000000000,1 +np.float64,0xffe4236202a846c4,0x7ff0000000000000,1 +np.float64,0x3fd35ed52e26bdaa,0x3ff0bd0b231f11f7,1 +np.float64,0x7fe7a2df532f45be,0x7ff0000000000000,1 +np.float64,0xffe32f8315665f06,0x7ff0000000000000,1 +np.float64,0x7fe1a69f03e34d3d,0x7ff0000000000000,1 +np.float64,0x7fa5542b742aa856,0x7ff0000000000000,1 +np.float64,0x3fe84e9f8ef09d3f,0x3ff4d79816359765,1 +np.float64,0x29076fe6520ef,0x3ff0000000000000,1 +np.float64,0xffd70894f7ae112a,0x7ff0000000000000,1 +np.float64,0x800188edcbe311dc,0x3ff0000000000000,1 +np.float64,0x3fe2c7acda258f5a,0x3ff2d5dad4617703,1 +np.float64,0x3f775d41a02ebb00,0x3ff000110f212445,1 +np.float64,0x7fe8a084d1714109,0x7ff0000000000000,1 +np.float64,0x3fe31562d8a62ac6,0x3ff2ee35055741cd,1 +np.float64,0xbfd195d4d1a32baa,0x3ff09b98a50c151b,1 +np.float64,0xffaae9ff0c35d400,0x7ff0000000000000,1 +np.float64,0xff819866502330c0,0x7ff0000000000000,1 +np.float64,0x7fddc64815bb8c8f,0x7ff0000000000000,1 +np.float64,0xbfd442b428288568,0x3ff0cef70aa73ae6,1 +np.float64,0x8002e7625aa5cec5,0x3ff0000000000000,1 +np.float64,0x7fe8d4f70e71a9ed,0x7ff0000000000000,1 +np.float64,0xbfc3bd015f277a04,0x3ff030cbf16f29d9,1 +np.float64,0x3fd315d5baa62bab,0x3ff0b77a551a5335,1 +np.float64,0x7fa638b4642c7168,0x7ff0000000000000,1 +np.float64,0x3fdea8b795bd516f,0x3ff1df0bb70cdb79,1 +np.float64,0xbfd78754762f0ea8,0x3ff117ee0f29abed,1 +np.float64,0x8009f6a37633ed47,0x3ff0000000000000,1 +np.float64,0x3fea1daf75343b5f,0x3ff5a1804789bf13,1 +np.float64,0x3fd044b6c0a0896e,0x3ff0850b7297d02f,1 +np.float64,0x8003547a9c86a8f6,0x3ff0000000000000,1 +np.float64,0x3fa6c2cd782d859b,0x3ff0040c4ac8f44a,1 +np.float64,0x3fe225baaae44b76,0x3ff2a47f5e1f5e85,1 +np.float64,0x8000000000000000,0x3ff0000000000000,1 +np.float64,0x3fcb53da8736a7b8,0x3ff05db45af470ac,1 +np.float64,0x80079f8f140f3f1f,0x3ff0000000000000,1 +np.float64,0xbfcd1d7e2b3a3afc,0x3ff06a6b6845d05f,1 +np.float64,0x96df93672dbf3,0x3ff0000000000000,1 +np.float64,0xdef86e43bdf0e,0x3ff0000000000000,1 +np.float64,0xbfec05a09db80b41,0x3ff6896b768eea08,1 +np.float64,0x7fe3ff91d267ff23,0x7ff0000000000000,1 +np.float64,0xffea3eaa07347d53,0x7ff0000000000000,1 +np.float64,0xbfebde1cc1f7bc3a,0x3ff675e34ac2afc2,1 +np.float64,0x629bcde8c537a,0x3ff0000000000000,1 +np.float64,0xbfdde4fcff3bc9fa,0x3ff1c7061d21f0fe,1 +np.float64,0x3fee60fd003cc1fa,0x3ff7c49af3878a51,1 +np.float64,0x3fe5c92ac32b9256,0x3ff3da7a7929588b,1 +np.float64,0xbfe249c78f64938f,0x3ff2af52a06f1a50,1 +np.float64,0xbfc6de9dbe2dbd3c,0x3ff0418d284ee29f,1 +np.float64,0xffc8ef094631de14,0x7ff0000000000000,1 +np.float64,0x3fdef05f423de0bf,0x3ff1e800caba8ab5,1 +np.float64,0xffc1090731221210,0x7ff0000000000000,1 +np.float64,0xbfedec9b5fbbd937,0x3ff7854b6792a24a,1 +np.float64,0xbfb873507630e6a0,0x3ff012b23b3b7a67,1 +np.float64,0xbfe3cd6692679acd,0x3ff3299d6936ec4b,1 +np.float64,0xbfb107c890220f90,0x3ff0091122162472,1 +np.float64,0xbfe4e6ee48e9cddc,0x3ff3894e5a5e70a6,1 +np.float64,0xffe6fa3413edf468,0x7ff0000000000000,1 +np.float64,0x3fe2faf79b65f5ef,0x3ff2e5e11fae8b54,1 +np.float64,0xbfdfeb8df9bfd71c,0x3ff208189691b15f,1 +np.float64,0x75d2d03ceba5b,0x3ff0000000000000,1 +np.float64,0x3feb48c182b69183,0x3ff62d4462eba6cb,1 +np.float64,0xffcda9f7ff3b53f0,0x7ff0000000000000,1 +np.float64,0x7fcafbdcbd35f7b8,0x7ff0000000000000,1 +np.float64,0xbfd1895523a312aa,0x3ff09aba642a78d9,1 +np.float64,0x3fe3129c3f662538,0x3ff2ed546bbfafcf,1 +np.float64,0x3fb444dee02889be,0x3ff00cd86273b964,1 +np.float64,0xbf73b32d7ee77,0x3ff0000000000000,1 +np.float64,0x3fae19904c3c3321,0x3ff00714865c498a,1 +np.float64,0x7fefbfaef5bf7f5d,0x7ff0000000000000,1 +np.float64,0x8000dc3816e1b871,0x3ff0000000000000,1 +np.float64,0x8003f957ba47f2b0,0x3ff0000000000000,1 +np.float64,0xbfe3563c7ea6ac79,0x3ff302dcebc92856,1 +np.float64,0xbfdc80fbae3901f8,0x3ff19cfe73e58092,1 +np.float64,0x8009223b04524476,0x3ff0000000000000,1 +np.float64,0x3fd95f431c32be86,0x3ff1461c21cb03f0,1 +np.float64,0x7ff4000000000000,0x7ffc000000000000,1 +np.float64,0xbfe7c12ed3ef825e,0x3ff49d59c265efcd,1 +np.float64,0x10000000000000,0x3ff0000000000000,1 +np.float64,0x7fc5e2632f2bc4c5,0x7ff0000000000000,1 +np.float64,0xffd8f6b4c7b1ed6a,0x7ff0000000000000,1 +np.float64,0x80034b93d4069728,0x3ff0000000000000,1 +np.float64,0xffdf5d4c1dbeba98,0x7ff0000000000000,1 +np.float64,0x800bc63d70178c7b,0x3ff0000000000000,1 +np.float64,0xbfeba31ea0f7463d,0x3ff658fa27073d2b,1 +np.float64,0xbfeebeede97d7ddc,0x3ff7f89a8e80dec4,1 +np.float64,0x7feb0f1f91361e3e,0x7ff0000000000000,1 +np.float64,0xffec3158d0b862b1,0x7ff0000000000000,1 +np.float64,0x3fde51cbfbbca398,0x3ff1d44c2ff15b3d,1 +np.float64,0xd58fb2b3ab1f7,0x3ff0000000000000,1 +np.float64,0x80028b9e32e5173d,0x3ff0000000000000,1 +np.float64,0x7fea77a56c74ef4a,0x7ff0000000000000,1 +np.float64,0x3fdaabbd4a35577b,0x3ff168d82edf2fe0,1 +np.float64,0xbfe69c39cc2d3874,0x3ff429b2f4cdb362,1 +np.float64,0x3b78f5d876f20,0x3ff0000000000000,1 +np.float64,0x7fa47d116428fa22,0x7ff0000000000000,1 +np.float64,0xbfe4118b0ce82316,0x3ff3403d989f780f,1 +np.float64,0x800482e793c905d0,0x3ff0000000000000,1 +np.float64,0xbfe48e5728e91cae,0x3ff36a9020bf9d20,1 +np.float64,0x7fe078ba8860f174,0x7ff0000000000000,1 +np.float64,0x3fd80843e5b01088,0x3ff1242f401e67da,1 +np.float64,0x3feb1f6965f63ed3,0x3ff6197fc590e143,1 +np.float64,0xffa41946d8283290,0x7ff0000000000000,1 +np.float64,0xffe30de129661bc2,0x7ff0000000000000,1 +np.float64,0x3fec9c8e1ab9391c,0x3ff6d542ea2f49b4,1 +np.float64,0x3fdc3e4490387c89,0x3ff1955ae18cac37,1 +np.float64,0xffef49d9c77e93b3,0x7ff0000000000000,1 +np.float64,0xfff0000000000000,0x7ff0000000000000,1 +np.float64,0x3fe0442455608849,0x3ff21cab90067d5c,1 +np.float64,0xbfed86aebd3b0d5e,0x3ff74ed8d4b75f50,1 +np.float64,0xffe4600d2b28c01a,0x7ff0000000000000,1 +np.float64,0x7fc1e8ccff23d199,0x7ff0000000000000,1 +np.float64,0x8008d49b0091a936,0x3ff0000000000000,1 +np.float64,0xbfe4139df028273c,0x3ff340ef3c86227c,1 +np.float64,0xbfe9ab4542b3568a,0x3ff56dfe32061247,1 +np.float64,0xbfd76dd365aedba6,0x3ff11589bab5fe71,1 +np.float64,0x3fd42cf829a859f0,0x3ff0cd3844bb0e11,1 +np.float64,0x7fd077cf2e20ef9d,0x7ff0000000000000,1 +np.float64,0x3fd7505760aea0b0,0x3ff112c937b3f088,1 +np.float64,0x1f93341a3f267,0x3ff0000000000000,1 +np.float64,0x7fe3c3c1b0678782,0x7ff0000000000000,1 +np.float64,0x800f85cec97f0b9e,0x3ff0000000000000,1 +np.float64,0xd93ab121b2756,0x3ff0000000000000,1 +np.float64,0xbfef8066fd7f00ce,0x3ff8663ed7d15189,1 +np.float64,0xffe31dd4af663ba9,0x7ff0000000000000,1 +np.float64,0xbfd7ff05a6affe0c,0x3ff1234c09bb686d,1 +np.float64,0xbfe718c31fee3186,0x3ff45a0c2d0ef7b0,1 +np.float64,0x800484bf33e9097f,0x3ff0000000000000,1 +np.float64,0xffd409dad02813b6,0x7ff0000000000000,1 +np.float64,0x3fe59679896b2cf4,0x3ff3c7f49e4fbbd3,1 +np.float64,0xbfd830c54d30618a,0x3ff1281729861390,1 +np.float64,0x1d4fc81c3a9fa,0x3ff0000000000000,1 +np.float64,0x3fd334e4272669c8,0x3ff0b9d5d82894f0,1 +np.float64,0xffc827e65c304fcc,0x7ff0000000000000,1 +np.float64,0xffe2d1814aa5a302,0x7ff0000000000000,1 +np.float64,0xffd7b5b8d32f6b72,0x7ff0000000000000,1 +np.float64,0xbfdbc9f077b793e0,0x3ff18836b9106ad0,1 +np.float64,0x7fc724c2082e4983,0x7ff0000000000000,1 +np.float64,0x3fa39ed72c273da0,0x3ff00302051ce17e,1 +np.float64,0xbfe3c4c209678984,0x3ff326c4fd16b5cd,1 +np.float64,0x7fe91f6d00f23ed9,0x7ff0000000000000,1 +np.float64,0x8004ee93fea9dd29,0x3ff0000000000000,1 +np.float64,0xbfe7c32d0eaf865a,0x3ff49e290ed2ca0e,1 +np.float64,0x800ea996b29d532d,0x3ff0000000000000,1 +np.float64,0x2df9ec1c5bf3e,0x3ff0000000000000,1 +np.float64,0xabb175df5762f,0x3ff0000000000000,1 +np.float64,0xffe3fc9c8e27f938,0x7ff0000000000000,1 +np.float64,0x7fb358a62826b14b,0x7ff0000000000000,1 +np.float64,0x800aedcccaf5db9a,0x3ff0000000000000,1 +np.float64,0xffca530c5234a618,0x7ff0000000000000,1 +np.float64,0x40f91e9681f24,0x3ff0000000000000,1 +np.float64,0x80098f4572f31e8b,0x3ff0000000000000,1 +np.float64,0xbfdc58c21fb8b184,0x3ff1986115f8fe92,1 +np.float64,0xbfebeafd40b7d5fa,0x3ff67c3cf34036e3,1 +np.float64,0x7fd108861a22110b,0x7ff0000000000000,1 +np.float64,0xff8e499ae03c9340,0x7ff0000000000000,1 +np.float64,0xbfd2f58caa25eb1a,0x3ff0b50b1bffafdf,1 +np.float64,0x3fa040c9bc208193,0x3ff002105e95aefa,1 +np.float64,0xbfd2ebc0a5a5d782,0x3ff0b44ed5a11584,1 +np.float64,0xffe237bc93a46f78,0x7ff0000000000000,1 +np.float64,0x3fd557c5eeaaaf8c,0x3ff0e5e0a575e1ba,1 +np.float64,0x7abb419ef5769,0x3ff0000000000000,1 +np.float64,0xffefa1fe353f43fb,0x7ff0000000000000,1 +np.float64,0x3fa6f80ba02df017,0x3ff0041f51fa0d76,1 +np.float64,0xbfdce79488b9cf2a,0x3ff1a8e32877beb4,1 +np.float64,0x2285f3e4450bf,0x3ff0000000000000,1 +np.float64,0x3bf7eb7277efe,0x3ff0000000000000,1 +np.float64,0xbfd5925fd3ab24c0,0x3ff0eae1c2ac2e78,1 +np.float64,0xbfed6325227ac64a,0x3ff73c14a2ad5bfe,1 +np.float64,0x8000429c02408539,0x3ff0000000000000,1 +np.float64,0xb67c21e76cf84,0x3ff0000000000000,1 +np.float64,0x3fec3d3462f87a69,0x3ff6a51e4c027eb7,1 +np.float64,0x3feae69cbcf5cd3a,0x3ff5fe9387314afd,1 +np.float64,0x7fd0c9a0ec219341,0x7ff0000000000000,1 +np.float64,0x8004adb7f6295b71,0x3ff0000000000000,1 +np.float64,0xffd61fe8bb2c3fd2,0x7ff0000000000000,1 +np.float64,0xffe7fb3834aff670,0x7ff0000000000000,1 +np.float64,0x7fd1eef163a3dde2,0x7ff0000000000000,1 +np.float64,0x2e84547a5d08b,0x3ff0000000000000,1 +np.float64,0x8002d8875ee5b10f,0x3ff0000000000000,1 +np.float64,0x3fe1d1c5f763a38c,0x3ff28ba524fb6de8,1 +np.float64,0x8001dea0bc43bd42,0x3ff0000000000000,1 +np.float64,0xfecfad91fd9f6,0x3ff0000000000000,1 +np.float64,0xffed7965fa3af2cb,0x7ff0000000000000,1 +np.float64,0xbfe6102ccc2c205a,0x3ff3f4c082506686,1 +np.float64,0x3feff75b777feeb6,0x3ff8ab6222578e0c,1 +np.float64,0x3fb8a97bd43152f8,0x3ff013057f0a9d89,1 +np.float64,0xffe234b5e964696c,0x7ff0000000000000,1 +np.float64,0x984d9137309b2,0x3ff0000000000000,1 +np.float64,0xbfe42e9230e85d24,0x3ff349fb7d1a7560,1 +np.float64,0xbfecc8b249f99165,0x3ff6ebd0fea0ea72,1 +np.float64,0x8000840910410813,0x3ff0000000000000,1 +np.float64,0xbfd81db9e7303b74,0x3ff126402d3539ec,1 +np.float64,0x800548eb7fea91d8,0x3ff0000000000000,1 +np.float64,0xbfe4679ad0e8cf36,0x3ff35d4db89296a3,1 +np.float64,0x3fd4c55b5a298ab7,0x3ff0d99da31081f9,1 +np.float64,0xbfa8f5b38c31eb60,0x3ff004de3a23b32d,1 +np.float64,0x80005d348e80ba6a,0x3ff0000000000000,1 +np.float64,0x800c348d6118691b,0x3ff0000000000000,1 +np.float64,0xffd6b88f84ad7120,0x7ff0000000000000,1 +np.float64,0x3fc1aaaa82235555,0x3ff027136afd08e0,1 +np.float64,0x7fca7d081b34fa0f,0x7ff0000000000000,1 +np.float64,0x1,0x3ff0000000000000,1 +np.float64,0xbfdc810d1139021a,0x3ff19d007408cfe3,1 +np.float64,0xbfe5dce05f2bb9c0,0x3ff3e1bb9234617b,1 +np.float64,0xffecfe2c32b9fc58,0x7ff0000000000000,1 +np.float64,0x95b2891b2b651,0x3ff0000000000000,1 +np.float64,0x8000b60c6c616c1a,0x3ff0000000000000,1 +np.float64,0x4944f0889289f,0x3ff0000000000000,1 +np.float64,0x3fe6e508696dca10,0x3ff445d1b94863e9,1 +np.float64,0xbfe63355d0ec66ac,0x3ff401e74f16d16f,1 +np.float64,0xbfe9b9595af372b3,0x3ff57445e1b4d670,1 +np.float64,0x800e16f7313c2dee,0x3ff0000000000000,1 +np.float64,0xffe898f5f0b131eb,0x7ff0000000000000,1 +np.float64,0x3fe91ac651f2358d,0x3ff52e787c21c004,1 +np.float64,0x7fbfaac6783f558c,0x7ff0000000000000,1 +np.float64,0xd8ef3dfbb1de8,0x3ff0000000000000,1 +np.float64,0xbfc58c13a52b1828,0x3ff03a2c19d65019,1 +np.float64,0xbfbde55e8a3bcac0,0x3ff01bf648a3e0a7,1 +np.float64,0xffc3034930260694,0x7ff0000000000000,1 +np.float64,0xea77a64dd4ef5,0x3ff0000000000000,1 +np.float64,0x800cfe7e7739fcfd,0x3ff0000000000000,1 +np.float64,0x4960f31a92c1f,0x3ff0000000000000,1 +np.float64,0x3fd9552c94b2aa58,0x3ff14515a29add09,1 +np.float64,0xffe8b3244c316648,0x7ff0000000000000,1 +np.float64,0x3fe8201e6a70403d,0x3ff4c444fa679cce,1 +np.float64,0xffe9ab7c20f356f8,0x7ff0000000000000,1 +np.float64,0x3fed8bba5f7b1774,0x3ff751853c4c95c5,1 +np.float64,0x8007639cb76ec73a,0x3ff0000000000000,1 +np.float64,0xbfe396db89672db7,0x3ff317bfd1d6fa8c,1 +np.float64,0xbfeb42f888f685f1,0x3ff62a7e0eee56b1,1 +np.float64,0x3fe894827c712904,0x3ff4f4f561d9ea13,1 +np.float64,0xb66b3caf6cd68,0x3ff0000000000000,1 +np.float64,0x800f8907fdbf1210,0x3ff0000000000000,1 +np.float64,0x7fe9b0cddb73619b,0x7ff0000000000000,1 +np.float64,0xbfda70c0e634e182,0x3ff1628c6fdffc53,1 +np.float64,0x3fe0b5f534a16bea,0x3ff23b4ed4c2b48e,1 +np.float64,0xbfe8eee93671ddd2,0x3ff51b85b3c50ae4,1 +np.float64,0xbfe8c22627f1844c,0x3ff50858787a3bfe,1 +np.float64,0x37bb83c86f771,0x3ff0000000000000,1 +np.float64,0xffb7827ffe2f0500,0x7ff0000000000000,1 +np.float64,0x64317940c864,0x3ff0000000000000,1 +np.float64,0x800430ecee6861db,0x3ff0000000000000,1 +np.float64,0x3fa4291fbc285240,0x3ff0032d0204f6dd,1 +np.float64,0xffec69f76af8d3ee,0x7ff0000000000000,1 +np.float64,0x3ff0000000000000,0x3ff8b07551d9f550,1 +np.float64,0x3fc4cf3c42299e79,0x3ff0363fb1d3c254,1 +np.float64,0x7fe0223a77e04474,0x7ff0000000000000,1 +np.float64,0x800a3d4fa4347aa0,0x3ff0000000000000,1 +np.float64,0x3fdd273f94ba4e7f,0x3ff1b05b686e6879,1 +np.float64,0x3feca79052f94f20,0x3ff6dadedfa283aa,1 +np.float64,0x5e7f6f80bcfef,0x3ff0000000000000,1 +np.float64,0xbfef035892fe06b1,0x3ff81efb39cbeba2,1 +np.float64,0x3fee6c08e07cd812,0x3ff7caad952860a1,1 +np.float64,0xffeda715877b4e2a,0x7ff0000000000000,1 +np.float64,0x800580286b0b0052,0x3ff0000000000000,1 +np.float64,0x800703a73fee074f,0x3ff0000000000000,1 +np.float64,0xbfccf96a6639f2d4,0x3ff0696330a60832,1 +np.float64,0x7feb408442368108,0x7ff0000000000000,1 +np.float64,0x3fedc87a46fb90f5,0x3ff771e3635649a9,1 +np.float64,0x3fd8297b773052f7,0x3ff12762bc0cea76,1 +np.float64,0x3fee41bb03fc8376,0x3ff7b37b2da48ab4,1 +np.float64,0xbfe2b05a226560b4,0x3ff2cea17ae7c528,1 +np.float64,0xbfd2e92cf2a5d25a,0x3ff0b41d605ced61,1 +np.float64,0x4817f03a902ff,0x3ff0000000000000,1 +np.float64,0x8c9d4f0d193aa,0x3ff0000000000000,1 diff --git a/local_packages/numpy/_core/tests/data/umath-validation-set-exp.csv b/local_packages/numpy/_core/tests/data/umath-validation-set-exp.csv new file mode 100644 index 0000000..7c5ef3b --- /dev/null +++ b/local_packages/numpy/_core/tests/data/umath-validation-set-exp.csv @@ -0,0 +1,412 @@ +dtype,input,output,ulperrortol +## +ve denormals ## +np.float32,0x004b4716,0x3f800000,3 +np.float32,0x007b2490,0x3f800000,3 +np.float32,0x007c99fa,0x3f800000,3 +np.float32,0x00734a0c,0x3f800000,3 +np.float32,0x0070de24,0x3f800000,3 +np.float32,0x00495d65,0x3f800000,3 +np.float32,0x006894f6,0x3f800000,3 +np.float32,0x00555a76,0x3f800000,3 +np.float32,0x004e1fb8,0x3f800000,3 +np.float32,0x00687de9,0x3f800000,3 +## -ve denormals ## +np.float32,0x805b59af,0x3f800000,3 +np.float32,0x807ed8ed,0x3f800000,3 +np.float32,0x807142ad,0x3f800000,3 +np.float32,0x80772002,0x3f800000,3 +np.float32,0x8062abcb,0x3f800000,3 +np.float32,0x8045e31c,0x3f800000,3 +np.float32,0x805f01c2,0x3f800000,3 +np.float32,0x80506432,0x3f800000,3 +np.float32,0x8060089d,0x3f800000,3 +np.float32,0x8071292f,0x3f800000,3 +## floats that output a denormal ## +np.float32,0xc2cf3fc1,0x00000001,3 +np.float32,0xc2c79726,0x00000021,3 +np.float32,0xc2cb295d,0x00000005,3 +np.float32,0xc2b49e6b,0x00068c4c,3 +np.float32,0xc2ca8116,0x00000008,3 +np.float32,0xc2c23f82,0x000001d7,3 +np.float32,0xc2cb69c0,0x00000005,3 +np.float32,0xc2cc1f4d,0x00000003,3 +np.float32,0xc2ae094e,0x00affc4c,3 +np.float32,0xc2c86c44,0x00000015,3 +## random floats between -87.0f and 88.0f ## +np.float32,0x4030d7e0,0x417d9a05,3 +np.float32,0x426f60e8,0x6aa1be2c,3 +np.float32,0x41a1b220,0x4e0efc11,3 +np.float32,0xc20cc722,0x26159da7,3 +np.float32,0x41c492bc,0x512ec79d,3 +np.float32,0x40980210,0x42e73a0e,3 +np.float32,0xbf1f7b80,0x3f094de3,3 +np.float32,0x42a678a4,0x7b87a383,3 +np.float32,0xc20f3cfd,0x25a1c304,3 +np.float32,0x423ff34c,0x6216467f,3 +np.float32,0x00000000,0x3f800000,3 +## floats that cause an overflow ## +np.float32,0x7f06d8c1,0x7f800000,3 +np.float32,0x7f451912,0x7f800000,3 +np.float32,0x7ecceac3,0x7f800000,3 +np.float32,0x7f643b45,0x7f800000,3 +np.float32,0x7e910ea0,0x7f800000,3 +np.float32,0x7eb4756b,0x7f800000,3 +np.float32,0x7f4ec708,0x7f800000,3 +np.float32,0x7f6b4551,0x7f800000,3 +np.float32,0x7d8edbda,0x7f800000,3 +np.float32,0x7f730718,0x7f800000,3 +np.float32,0x42b17217,0x7f7fff84,3 +np.float32,0x42b17218,0x7f800000,3 +np.float32,0x42b17219,0x7f800000,3 +np.float32,0xfef2b0bc,0x00000000,3 +np.float32,0xff69f83e,0x00000000,3 +np.float32,0xff4ecb12,0x00000000,3 +np.float32,0xfeac6d86,0x00000000,3 +np.float32,0xfde0cdb8,0x00000000,3 +np.float32,0xff26aef4,0x00000000,3 +np.float32,0xff6f9277,0x00000000,3 +np.float32,0xff7adfc4,0x00000000,3 +np.float32,0xff0ad40e,0x00000000,3 +np.float32,0xff6fd8f3,0x00000000,3 +np.float32,0xc2cff1b4,0x00000001,3 +np.float32,0xc2cff1b5,0x00000000,3 +np.float32,0xc2cff1b6,0x00000000,3 +np.float32,0x7f800000,0x7f800000,3 +np.float32,0xff800000,0x00000000,3 +np.float32,0x4292f27c,0x7480000a,3 +np.float32,0x42a920be,0x7c7fff94,3 +np.float32,0x41c214c9,0x50ffffd9,3 +np.float32,0x41abe686,0x4effffd9,3 +np.float32,0x4287db5a,0x707fffd3,3 +np.float32,0x41902cbb,0x4c800078,3 +np.float32,0x42609466,0x67ffffeb,3 +np.float32,0x41a65af5,0x4e7fffd1,3 +np.float32,0x417f13ff,0x4affffc9,3 +np.float32,0x426d0e6c,0x6a3504f2,3 +np.float32,0x41bc8934,0x507fff51,3 +np.float32,0x42a7bdde,0x7c0000d6,3 +np.float32,0x4120cf66,0x46b504f6,3 +np.float32,0x4244da8f,0x62ffff1a,3 +np.float32,0x41a0cf69,0x4e000034,3 +np.float32,0x41cd2bec,0x52000005,3 +np.float32,0x42893e41,0x7100009e,3 +np.float32,0x41b437e1,0x4fb50502,3 +np.float32,0x41d8430f,0x5300001d,3 +np.float32,0x4244da92,0x62ffffda,3 +np.float32,0x41a0cf63,0x4dffffa9,3 +np.float32,0x3eb17218,0x3fb504f3,3 +np.float32,0x428729e8,0x703504dc,3 +np.float32,0x41a0cf67,0x4e000014,3 +np.float32,0x4252b77d,0x65800011,3 +np.float32,0x41902cb9,0x4c800058,3 +np.float32,0x42a0cf67,0x79800052,3 +np.float32,0x4152b77b,0x48ffffe9,3 +np.float32,0x41265af3,0x46ffffc8,3 +np.float32,0x42187e0b,0x5affff9a,3 +np.float32,0xc0d2b77c,0x3ab504f6,3 +np.float32,0xc283b2ac,0x10000072,3 +np.float32,0xc1cff1b4,0x2cb504f5,3 +np.float32,0xc05dce9e,0x3d000000,3 +np.float32,0xc28ec9d2,0x0bfffea5,3 +np.float32,0xc23c893a,0x1d7fffde,3 +np.float32,0xc2a920c0,0x027fff6c,3 +np.float32,0xc1f9886f,0x2900002b,3 +np.float32,0xc2c42920,0x000000b5,3 +np.float32,0xc2893e41,0x0dfffec5,3 +np.float32,0xc2c4da93,0x00000080,3 +np.float32,0xc17f1401,0x3400000c,3 +np.float32,0xc1902cb6,0x327fffaf,3 +np.float32,0xc27c4e3b,0x11ffffc5,3 +np.float32,0xc268e5c5,0x157ffe9d,3 +np.float32,0xc2b4e953,0x0005a826,3 +np.float32,0xc287db5a,0x0e800016,3 +np.float32,0xc207db5a,0x2700000b,3 +np.float32,0xc2b2d4fe,0x000ffff1,3 +np.float32,0xc268e5c0,0x157fffdd,3 +np.float32,0xc22920bd,0x2100003b,3 +np.float32,0xc2902caf,0x0b80011e,3 +np.float32,0xc1902cba,0x327fff2f,3 +np.float32,0xc2ca6625,0x00000008,3 +np.float32,0xc280ece8,0x10fffeb5,3 +np.float32,0xc2918f94,0x0b0000ea,3 +np.float32,0xc29b43d5,0x077ffffc,3 +np.float32,0xc1e61ff7,0x2ab504f5,3 +np.float32,0xc2867878,0x0effff15,3 +np.float32,0xc2a2324a,0x04fffff4,3 +#float64 +## near zero ## +np.float64,0x8000000000000000,0x3ff0000000000000,1 +np.float64,0x8010000000000000,0x3ff0000000000000,1 +np.float64,0x8000000000000001,0x3ff0000000000000,1 +np.float64,0x8360000000000000,0x3ff0000000000000,1 +np.float64,0x9a70000000000000,0x3ff0000000000000,1 +np.float64,0xb9b0000000000000,0x3ff0000000000000,1 +np.float64,0xb810000000000000,0x3ff0000000000000,1 +np.float64,0xbc30000000000000,0x3ff0000000000000,1 +np.float64,0xb6a0000000000000,0x3ff0000000000000,1 +np.float64,0x0000000000000000,0x3ff0000000000000,1 +np.float64,0x0010000000000000,0x3ff0000000000000,1 +np.float64,0x0000000000000001,0x3ff0000000000000,1 +np.float64,0x0360000000000000,0x3ff0000000000000,1 +np.float64,0x1a70000000000000,0x3ff0000000000000,1 +np.float64,0x3c30000000000000,0x3ff0000000000000,1 +np.float64,0x36a0000000000000,0x3ff0000000000000,1 +np.float64,0x39b0000000000000,0x3ff0000000000000,1 +np.float64,0x3810000000000000,0x3ff0000000000000,1 +## underflow ## +np.float64,0xc0c6276800000000,0x0000000000000000,1 +np.float64,0xc0c62d918ce2421d,0x0000000000000000,1 +np.float64,0xc0c62d918ce2421e,0x0000000000000000,1 +np.float64,0xc0c62d91a0000000,0x0000000000000000,1 +np.float64,0xc0c62d9180000000,0x0000000000000000,1 +np.float64,0xc0c62dea45ee3e06,0x0000000000000000,1 +np.float64,0xc0c62dea45ee3e07,0x0000000000000000,1 +np.float64,0xc0c62dea40000000,0x0000000000000000,1 +np.float64,0xc0c62dea60000000,0x0000000000000000,1 +np.float64,0xc0875f1120000000,0x0000000000000000,1 +np.float64,0xc0875f113c30b1c8,0x0000000000000000,1 +np.float64,0xc0875f1140000000,0x0000000000000000,1 +np.float64,0xc093480000000000,0x0000000000000000,1 +np.float64,0xffefffffffffffff,0x0000000000000000,1 +np.float64,0xc7efffffe0000000,0x0000000000000000,1 +## overflow ## +np.float64,0x40862e52fefa39ef,0x7ff0000000000000,1 +np.float64,0x40872e42fefa39ef,0x7ff0000000000000,1 +## +/- INF, +/- NAN ## +np.float64,0x7ff0000000000000,0x7ff0000000000000,1 +np.float64,0xfff0000000000000,0x0000000000000000,1 +np.float64,0x7ff8000000000000,0x7ff8000000000000,1 +np.float64,0xfff8000000000000,0xfff8000000000000,1 +## output denormal ## +np.float64,0xc087438520000000,0x0000000000000001,1 +np.float64,0xc08743853f2f4461,0x0000000000000001,1 +np.float64,0xc08743853f2f4460,0x0000000000000001,1 +np.float64,0xc087438540000000,0x0000000000000001,1 +## between -745.13321910 and 709.78271289 ## +np.float64,0xbff760cd14774bd9,0x3fcdb14ced00ceb6,1 +np.float64,0xbff760cd20000000,0x3fcdb14cd7993879,1 +np.float64,0xbff760cd00000000,0x3fcdb14d12fbd264,1 +np.float64,0xc07f1cf360000000,0x130c1b369af14fda,1 +np.float64,0xbeb0000000000000,0x3feffffe00001000,1 +np.float64,0xbd70000000000000,0x3fefffffffffe000,1 +np.float64,0xc084fd46e5c84952,0x0360000000000139,1 +np.float64,0xc084fd46e5c84953,0x035ffffffffffe71,1 +np.float64,0xc084fd46e0000000,0x0360000b9096d32c,1 +np.float64,0xc084fd4700000000,0x035fff9721d12104,1 +np.float64,0xc086232bc0000000,0x0010003af5e64635,1 +np.float64,0xc086232bdd7abcd2,0x001000000000007c,1 +np.float64,0xc086232bdd7abcd3,0x000ffffffffffe7c,1 +np.float64,0xc086232be0000000,0x000ffffaf57a6fc9,1 +np.float64,0xc086233920000000,0x000fe590e3b45eb0,1 +np.float64,0xc086233938000000,0x000fe56133493c57,1 +np.float64,0xc086233940000000,0x000fe5514deffbbc,1 +np.float64,0xc086234c98000000,0x000fbf1024c32ccb,1 +np.float64,0xc086234ca0000000,0x000fbf0065bae78d,1 +np.float64,0xc086234c80000000,0x000fbf3f623a7724,1 +np.float64,0xc086234ec0000000,0x000fbad237c846f9,1 +np.float64,0xc086234ec8000000,0x000fbac27cfdec97,1 +np.float64,0xc086234ee0000000,0x000fba934cfd3dc2,1 +np.float64,0xc086234ef0000000,0x000fba73d7f618d9,1 +np.float64,0xc086234f00000000,0x000fba54632dddc0,1 +np.float64,0xc0862356e0000000,0x000faae0945b761a,1 +np.float64,0xc0862356f0000000,0x000faac13eb9a310,1 +np.float64,0xc086235700000000,0x000faaa1e9567b0a,1 +np.float64,0xc086236020000000,0x000f98cd75c11ed7,1 +np.float64,0xc086236ca0000000,0x000f8081b4d93f89,1 +np.float64,0xc086236cb0000000,0x000f8062b3f4d6c5,1 +np.float64,0xc086236cc0000000,0x000f8043b34e6f8c,1 +np.float64,0xc086238d98000000,0x000f41220d9b0d2c,1 +np.float64,0xc086238da0000000,0x000f4112cc80a01f,1 +np.float64,0xc086238d80000000,0x000f414fd145db5b,1 +np.float64,0xc08624fd00000000,0x000cbfce8ea1e6c4,1 +np.float64,0xc086256080000000,0x000c250747fcd46e,1 +np.float64,0xc08626c480000000,0x000a34f4bd975193,1 +np.float64,0xbf50000000000000,0x3feff800ffeaac00,1 +np.float64,0xbe10000000000000,0x3fefffffff800000,1 +np.float64,0xbcd0000000000000,0x3feffffffffffff8,1 +np.float64,0xc055d589e0000000,0x38100004bf94f63e,1 +np.float64,0xc055d58a00000000,0x380ffff97f292ce8,1 +np.float64,0xbfd962d900000000,0x3fe585a4b00110e1,1 +np.float64,0x3ff4bed280000000,0x400d411e7a58a303,1 +np.float64,0x3fff0b3620000000,0x401bd7737ffffcf3,1 +np.float64,0x3ff0000000000000,0x4005bf0a8b145769,1 +np.float64,0x3eb0000000000000,0x3ff0000100000800,1 +np.float64,0x3d70000000000000,0x3ff0000000001000,1 +np.float64,0x40862e42e0000000,0x7fefff841808287f,1 +np.float64,0x40862e42fefa39ef,0x7fefffffffffff2a,1 +np.float64,0x40862e0000000000,0x7feef85a11e73f2d,1 +np.float64,0x4000000000000000,0x401d8e64b8d4ddae,1 +np.float64,0x4009242920000000,0x40372a52c383a488,1 +np.float64,0x4049000000000000,0x44719103e4080b45,1 +np.float64,0x4008000000000000,0x403415e5bf6fb106,1 +np.float64,0x3f50000000000000,0x3ff00400800aab55,1 +np.float64,0x3e10000000000000,0x3ff0000000400000,1 +np.float64,0x3cd0000000000000,0x3ff0000000000004,1 +np.float64,0x40562e40a0000000,0x47effed088821c3f,1 +np.float64,0x40562e42e0000000,0x47effff082e6c7ff,1 +np.float64,0x40562e4300000000,0x47f00000417184b8,1 +np.float64,0x3fe8000000000000,0x4000ef9db467dcf8,1 +np.float64,0x402b12e8d4f33589,0x412718f68c71a6fe,1 +np.float64,0x402b12e8d4f3358a,0x412718f68c71a70a,1 +np.float64,0x402b12e8c0000000,0x412718f59a7f472e,1 +np.float64,0x402b12e8e0000000,0x412718f70c0eac62,1 +##use 1th entry +np.float64,0x40631659AE147CB4,0x4db3a95025a4890f,1 +np.float64,0xC061B87D2E85A4E2,0x332640c8e2de2c51,1 +np.float64,0x405A4A50BE243AF4,0x496a45e4b7f0339a,1 +np.float64,0xC0839898B98EC5C6,0x0764027828830df4,1 +#use 2th entry +np.float64,0xC072428C44B6537C,0x2596ade838b96f3e,1 +np.float64,0xC053057C5E1AE9BF,0x3912c8fad18fdadf,1 +np.float64,0x407E89C78328BAA3,0x6bfe35d5b9a1a194,1 +np.float64,0x4083501B6DD87112,0x77a855503a38924e,1 +#use 3th entry +np.float64,0x40832C6195F24540,0x7741e73c80e5eb2f,1 +np.float64,0xC083D4CD557C2EC9,0x06b61727c2d2508e,1 +np.float64,0x400C48F5F67C99BD,0x404128820f02b92e,1 +np.float64,0x4056E36D9B2DF26A,0x4830f52ff34a8242,1 +#use 4th entry +np.float64,0x4080FF700D8CBD06,0x70fa70df9bc30f20,1 +np.float64,0x406C276D39E53328,0x543eb8e20a8f4741,1 +np.float64,0xC070D6159BBD8716,0x27a4a0548c904a75,1 +np.float64,0xC052EBCF8ED61F83,0x391c0e92368d15e4,1 +#use 5th entry +np.float64,0xC061F892A8AC5FBE,0x32f807a89efd3869,1 +np.float64,0x4021D885D2DBA085,0x40bd4dc86d3e3270,1 +np.float64,0x40767AEEEE7D4FCF,0x605e22851ee2afb7,1 +np.float64,0xC0757C5D75D08C80,0x20f0751599b992a2,1 +#use 6th entry +np.float64,0x405ACF7A284C4CE3,0x499a4e0b7a27027c,1 +np.float64,0xC085A6C9E80D7AF5,0x0175914009d62ec2,1 +np.float64,0xC07E4C02F86F1DAE,0x1439269b29a9231e,1 +np.float64,0x4080D80F9691CC87,0x7088a6cdafb041de,1 +#use 7th entry +np.float64,0x407FDFD84FBA0AC1,0x6deb1ae6f9bc4767,1 +np.float64,0x40630C06A1A2213D,0x4dac7a9d51a838b7,1 +np.float64,0x40685FDB30BB8B4F,0x5183f5cc2cac9e79,1 +np.float64,0x408045A2208F77F4,0x6ee299e08e2aa2f0,1 +#use 8th entry +np.float64,0xC08104E391F5078B,0x0ed397b7cbfbd230,1 +np.float64,0xC031501CAEFAE395,0x3e6040fd1ea35085,1 +np.float64,0xC079229124F6247C,0x1babf4f923306b1e,1 +np.float64,0x407FB65F44600435,0x6db03beaf2512b8a,1 +#use 9th entry +np.float64,0xC07EDEE8E8E8A5AC,0x136536cec9cbef48,1 +np.float64,0x4072BB4086099A14,0x5af4d3c3008b56cc,1 +np.float64,0x4050442A2EC42CB4,0x45cd393bd8fad357,1 +np.float64,0xC06AC28FB3D419B4,0x2ca1b9d3437df85f,1 +#use 10th entry +np.float64,0x40567FC6F0A68076,0x480c977fd5f3122e,1 +np.float64,0x40620A2F7EDA59BB,0x4cf278e96f4ce4d7,1 +np.float64,0xC085044707CD557C,0x034aad6c968a045a,1 +np.float64,0xC07374EA5AC516AA,0x23dd6afdc03e83d5,1 +#use 11th entry +np.float64,0x4073CC95332619C1,0x5c804b1498bbaa54,1 +np.float64,0xC0799FEBBE257F31,0x1af6a954c43b87d2,1 +np.float64,0x408159F19EA424F6,0x7200858efcbfc84d,1 +np.float64,0x404A81F6F24C0792,0x44b664a07ce5bbfa,1 +#use 12th entry +np.float64,0x40295FF1EFB9A741,0x4113c0e74c52d7b0,1 +np.float64,0x4073975F4CC411DA,0x5c32be40b4fec2c1,1 +np.float64,0x406E9DE52E82A77E,0x56049c9a3f1ae089,1 +np.float64,0x40748C2F52560ED9,0x5d93bc14fd4cd23b,1 +#use 13th entry +np.float64,0x4062A553CDC4D04C,0x4d6266bfde301318,1 +np.float64,0xC079EC1D63598AB7,0x1a88cb184dab224c,1 +np.float64,0xC0725C1CB3167427,0x25725b46f8a081f6,1 +np.float64,0x407888771D9B45F9,0x6353b1ec6bd7ce80,1 +#use 14th entry +np.float64,0xC082CBA03AA89807,0x09b383723831ce56,1 +np.float64,0xC083A8961BB67DD7,0x0735b118d5275552,1 +np.float64,0xC076BC6ECA12E7E3,0x1f2222679eaef615,1 +np.float64,0xC072752503AA1A5B,0x254eb832242c77e1,1 +#use 15th entry +np.float64,0xC058800792125DEC,0x371882372a0b48d4,1 +np.float64,0x4082909FD863E81C,0x7580d5f386920142,1 +np.float64,0xC071616F8FB534F9,0x26dbe20ef64a412b,1 +np.float64,0x406D1AB571CAA747,0x54ee0d55cb38ac20,1 +#use 16th entry +np.float64,0x406956428B7DAD09,0x52358682c271237f,1 +np.float64,0xC07EFC2D9D17B621,0x133b3e77c27a4d45,1 +np.float64,0xC08469BAC5BA3CCA,0x050863e5f42cc52f,1 +np.float64,0x407189D9626386A5,0x593cb1c0b3b5c1d3,1 +#use 17th entry +np.float64,0x4077E652E3DEB8C6,0x6269a10dcbd3c752,1 +np.float64,0x407674C97DB06878,0x605485dcc2426ec2,1 +np.float64,0xC07CE9969CF4268D,0x16386cf8996669f2,1 +np.float64,0x40780EE32D5847C4,0x62a436bd1abe108d,1 +#use 18th entry +np.float64,0x4076C3AA5E1E8DA1,0x60c62f56a5e72e24,1 +np.float64,0xC0730AFC7239B9BE,0x24758ead095cec1e,1 +np.float64,0xC085CC2B9C420DDB,0x0109cdaa2e5694c1,1 +np.float64,0x406D0765CB6D7AA4,0x54e06f8dd91bd945,1 +#use 19th entry +np.float64,0xC082D011F3B495E7,0x09a6647661d279c2,1 +np.float64,0xC072826AF8F6AFBC,0x253acd3cd224507e,1 +np.float64,0x404EB9C4810CEA09,0x457933dbf07e8133,1 +np.float64,0x408284FBC97C58CE,0x755f6eb234aa4b98,1 +#use 20th entry +np.float64,0x40856008CF6EDC63,0x7d9c0b3c03f4f73c,1 +np.float64,0xC077CB2E9F013B17,0x1d9b3d3a166a55db,1 +np.float64,0xC0479CA3C20AD057,0x3bad40e081555b99,1 +np.float64,0x40844CD31107332A,0x7a821d70aea478e2,1 +#use 21th entry +np.float64,0xC07C8FCC0BFCC844,0x16ba1cc8c539d19b,1 +np.float64,0xC085C4E9A3ABA488,0x011ff675ba1a2217,1 +np.float64,0x4074D538B32966E5,0x5dfd9d78043c6ad9,1 +np.float64,0xC0630CA16902AD46,0x3231a446074cede6,1 +#use 22th entry +np.float64,0xC06C826733D7D0B7,0x2b5f1078314d41e1,1 +np.float64,0xC0520DF55B2B907F,0x396c13a6ce8e833e,1 +np.float64,0xC080712072B0F437,0x107eae02d11d98ea,1 +np.float64,0x40528A6150E19EFB,0x469fdabda02228c5,1 +#use 23th entry +np.float64,0xC07B1D74B6586451,0x18d1253883ae3b48,1 +np.float64,0x4045AFD7867DAEC0,0x43d7d634fc4c5d98,1 +np.float64,0xC07A08B91F9ED3E2,0x1a60973e6397fc37,1 +np.float64,0x407B3ECF0AE21C8C,0x673e03e9d98d7235,1 +#use 24th entry +np.float64,0xC078AEB6F30CEABF,0x1c530b93ab54a1b3,1 +np.float64,0x4084495006A41672,0x7a775b6dc7e63064,1 +np.float64,0x40830B1C0EBF95DD,0x76e1e6eed77cfb89,1 +np.float64,0x407D93E8F33D8470,0x6a9adbc9e1e4f1e5,1 +#use 25th entry +np.float64,0x4066B11A09EFD9E8,0x504dd528065c28a7,1 +np.float64,0x408545823723AEEB,0x7d504a9b1844f594,1 +np.float64,0xC068C711F2CA3362,0x2e104f3496ea118e,1 +np.float64,0x407F317FCC3CA873,0x6cf0732c9948ebf4,1 +#use 26th entry +np.float64,0x407AFB3EBA2ED50F,0x66dc28a129c868d5,1 +np.float64,0xC075377037708ADE,0x21531a329f3d793e,1 +np.float64,0xC07C30066A1F3246,0x174448baa16ded2b,1 +np.float64,0xC06689A75DE2ABD3,0x2fad70662fae230b,1 +#use 27th entry +np.float64,0x4081514E9FCCF1E0,0x71e673b9efd15f44,1 +np.float64,0xC0762C710AF68460,0x1ff1ed7d8947fe43,1 +np.float64,0xC0468102FF70D9C4,0x3be0c3a8ff3419a3,1 +np.float64,0xC07EA4CEEF02A83E,0x13b908f085102c61,1 +#use 28th entry +np.float64,0xC06290B04AE823C4,0x328a83da3c2e3351,1 +np.float64,0xC0770EB1D1C395FB,0x1eab281c1f1db5fe,1 +np.float64,0xC06F5D4D838A5BAE,0x29500ea32fb474ea,1 +np.float64,0x40723B3133B54C5D,0x5a3c82c7c3a2b848,1 +#use 29th entry +np.float64,0x4085E6454CE3B4AA,0x7f20319b9638d06a,1 +np.float64,0x408389F2A0585D4B,0x7850667c58aab3d0,1 +np.float64,0xC0382798F9C8AE69,0x3dc1c79fe8739d6d,1 +np.float64,0xC08299D827608418,0x0a4335f76cdbaeb5,1 +#use 30th entry +np.float64,0xC06F3DED43301BF1,0x2965670ae46750a8,1 +np.float64,0xC070CAF6BDD577D9,0x27b4aa4ffdd29981,1 +np.float64,0x4078529AD4B2D9F2,0x6305c12755d5e0a6,1 +np.float64,0xC055B14E75A31B96,0x381c2eda6d111e5d,1 +#use 31th entry +np.float64,0x407B13EE414FA931,0x6700772c7544564d,1 +np.float64,0x407EAFDE9DE3EC54,0x6c346a0e49724a3c,1 +np.float64,0xC08362F398B9530D,0x07ffeddbadf980cb,1 +np.float64,0x407E865CDD9EEB86,0x6bf866cac5e0d126,1 +#use 32th entry +np.float64,0x407FB62DBC794C86,0x6db009f708ac62cb,1 +np.float64,0xC063D0BAA68CDDDE,0x31a3b2a51ce50430,1 +np.float64,0xC05E7706A2231394,0x34f24bead6fab5c9,1 +np.float64,0x4083E3A06FDE444E,0x79527b7a386d1937,1 diff --git a/local_packages/numpy/_core/tests/data/umath-validation-set-exp2.csv b/local_packages/numpy/_core/tests/data/umath-validation-set-exp2.csv new file mode 100644 index 0000000..4e0a63e --- /dev/null +++ b/local_packages/numpy/_core/tests/data/umath-validation-set-exp2.csv @@ -0,0 +1,1429 @@ +dtype,input,output,ulperrortol +np.float32,0xbdfe94b0,0x3f6adda6,2 +np.float32,0x3f20f8f8,0x3fc5ec69,2 +np.float32,0x7040b5,0x3f800000,2 +np.float32,0x30ec5,0x3f800000,2 +np.float32,0x3eb63070,0x3fa3ce29,2 +np.float32,0xff4dda3d,0x0,2 +np.float32,0x805b832f,0x3f800000,2 +np.float32,0x3e883fb7,0x3f99ed8c,2 +np.float32,0x3f14d71f,0x3fbf8708,2 +np.float32,0xff7b1e55,0x0,2 +np.float32,0xbf691ac6,0x3f082fa2,2 +np.float32,0x7ee3e6ab,0x7f800000,2 +np.float32,0xbec6e2b4,0x3f439248,2 +np.float32,0xbf5f5ec2,0x3f0bd2c0,2 +np.float32,0x8025cc2c,0x3f800000,2 +np.float32,0x7e0d7672,0x7f800000,2 +np.float32,0xff4bbc5c,0x0,2 +np.float32,0xbd94fb30,0x3f73696b,2 +np.float32,0x6cc079,0x3f800000,2 +np.float32,0x803cf080,0x3f800000,2 +np.float32,0x71d418,0x3f800000,2 +np.float32,0xbf24a442,0x3f23ec1e,2 +np.float32,0xbe6c9510,0x3f5a1e1d,2 +np.float32,0xbe8fb284,0x3f52be38,2 +np.float32,0x7ea64754,0x7f800000,2 +np.float32,0x7fc00000,0x7fc00000,2 +np.float32,0x80620cfd,0x3f800000,2 +np.float32,0x3f3e20e8,0x3fd62e72,2 +np.float32,0x3f384600,0x3fd2d00e,2 +np.float32,0xff362150,0x0,2 +np.float32,0xbf349fa8,0x3f1cfaef,2 +np.float32,0xbf776cf2,0x3f0301a6,2 +np.float32,0x8021fc60,0x3f800000,2 +np.float32,0xbdb75280,0x3f70995c,2 +np.float32,0x7e9363a6,0x7f800000,2 +np.float32,0x7e728422,0x7f800000,2 +np.float32,0xfe91edc2,0x0,2 +np.float32,0x3f5f438c,0x3fea491d,2 +np.float32,0x3f2afae9,0x3fcb5c1f,2 +np.float32,0xbef8e766,0x3f36c448,2 +np.float32,0xba522c00,0x3f7fdb97,2 +np.float32,0xff18ee8c,0x0,2 +np.float32,0xbee8c5f4,0x3f3acd44,2 +np.float32,0x3e790448,0x3f97802c,2 +np.float32,0x3e8c9541,0x3f9ad571,2 +np.float32,0xbf03fa9f,0x3f331460,2 +np.float32,0x801ee053,0x3f800000,2 +np.float32,0xbf773230,0x3f03167f,2 +np.float32,0x356fd9,0x3f800000,2 +np.float32,0x8009cd88,0x3f800000,2 +np.float32,0x7f2bac51,0x7f800000,2 +np.float32,0x4d9eeb,0x3f800000,2 +np.float32,0x3133,0x3f800000,2 +np.float32,0x7f4290e0,0x7f800000,2 +np.float32,0xbf5e6523,0x3f0c3161,2 +np.float32,0x3f19182e,0x3fc1bf10,2 +np.float32,0x7e1248bb,0x7f800000,2 +np.float32,0xff5f7aae,0x0,2 +np.float32,0x7e8557b5,0x7f800000,2 +np.float32,0x26fc7f,0x3f800000,2 +np.float32,0x80397d61,0x3f800000,2 +np.float32,0x3cb1825d,0x3f81efe0,2 +np.float32,0x3ed808d0,0x3fab7c45,2 +np.float32,0xbf6f668a,0x3f05e259,2 +np.float32,0x3e3c7802,0x3f916abd,2 +np.float32,0xbd5ac5a0,0x3f76b21b,2 +np.float32,0x805aa6c9,0x3f800000,2 +np.float32,0xbe4d6f68,0x3f5ec3e1,2 +np.float32,0x3f3108b2,0x3fceb87f,2 +np.float32,0x3ec385cc,0x3fa6c9fb,2 +np.float32,0xbe9fc1ce,0x3f4e35e8,2 +np.float32,0x43b68,0x3f800000,2 +np.float32,0x3ef0cdcc,0x3fb15557,2 +np.float32,0x3e3f729b,0x3f91b5e1,2 +np.float32,0x7f52a4df,0x7f800000,2 +np.float32,0xbf56da96,0x3f0f15b9,2 +np.float32,0xbf161d2b,0x3f2a7faf,2 +np.float32,0x3e8df763,0x3f9b1fbe,2 +np.float32,0xff4f0780,0x0,2 +np.float32,0x8048f594,0x3f800000,2 +np.float32,0x3e62bb1d,0x3f953b7e,2 +np.float32,0xfe58e764,0x0,2 +np.float32,0x3dd2c922,0x3f897718,2 +np.float32,0x7fa00000,0x7fe00000,2 +np.float32,0xff07b4b2,0x0,2 +np.float32,0x7f6231a0,0x7f800000,2 +np.float32,0xb8d1d,0x3f800000,2 +np.float32,0x3ee01d24,0x3fad5f16,2 +np.float32,0xbf43f59f,0x3f169869,2 +np.float32,0x801f5257,0x3f800000,2 +np.float32,0x803c15d8,0x3f800000,2 +np.float32,0x3f171a08,0x3fc0b42a,2 +np.float32,0x127aef,0x3f800000,2 +np.float32,0xfd1c6,0x3f800000,2 +np.float32,0x3f1ed13e,0x3fc4c59a,2 +np.float32,0x57fd4f,0x3f800000,2 +np.float32,0x6e8c61,0x3f800000,2 +np.float32,0x804019ab,0x3f800000,2 +np.float32,0x3ef4e5c6,0x3fb251a1,2 +np.float32,0x5044c3,0x3f800000,2 +np.float32,0x3f04460f,0x3fb7204b,2 +np.float32,0x7e326b47,0x7f800000,2 +np.float32,0x800a7e4c,0x3f800000,2 +np.float32,0xbf47ec82,0x3f14fccc,2 +np.float32,0xbedb1b3e,0x3f3e4a4d,2 +np.float32,0x3f741d86,0x3ff7e4b0,2 +np.float32,0xbe249d20,0x3f6501a6,2 +np.float32,0xbf2ea152,0x3f1f8c68,2 +np.float32,0x3ec6dbcc,0x3fa78b3f,2 +np.float32,0x7ebd9bb4,0x7f800000,2 +np.float32,0x3f61b574,0x3febd77a,2 +np.float32,0x3f3dfb2b,0x3fd61891,2 +np.float32,0x3c7d95,0x3f800000,2 +np.float32,0x8071e840,0x3f800000,2 +np.float32,0x15c6fe,0x3f800000,2 +np.float32,0xbf096601,0x3f307893,2 +np.float32,0x7f5c2ef9,0x7f800000,2 +np.float32,0xbe79f750,0x3f582689,2 +np.float32,0x1eb692,0x3f800000,2 +np.float32,0xbd8024f0,0x3f75226d,2 +np.float32,0xbf5a8be8,0x3f0da950,2 +np.float32,0xbf4d28f3,0x3f12e3e1,2 +np.float32,0x7f800000,0x7f800000,2 +np.float32,0xfea8a758,0x0,2 +np.float32,0x8075d2cf,0x3f800000,2 +np.float32,0xfd99af58,0x0,2 +np.float32,0x9e6a,0x3f800000,2 +np.float32,0x2fa19f,0x3f800000,2 +np.float32,0x3e9f4206,0x3f9ecc56,2 +np.float32,0xbee0b666,0x3f3cd9fc,2 +np.float32,0xbec558c4,0x3f43fab1,2 +np.float32,0x7e9a77df,0x7f800000,2 +np.float32,0xff3a9694,0x0,2 +np.float32,0x3f3b3708,0x3fd47f9a,2 +np.float32,0x807cd6d4,0x3f800000,2 +np.float32,0x804aa422,0x3f800000,2 +np.float32,0xfead7a70,0x0,2 +np.float32,0x3f08c610,0x3fb95efe,2 +np.float32,0xff390126,0x0,2 +np.float32,0x5d2d47,0x3f800000,2 +np.float32,0x8006849c,0x3f800000,2 +np.float32,0x654f6e,0x3f800000,2 +np.float32,0xff478a16,0x0,2 +np.float32,0x3f480b0c,0x3fdc024c,2 +np.float32,0xbc3b96c0,0x3f7df9f4,2 +np.float32,0xbcc96460,0x3f7bacb5,2 +np.float32,0x7f349f30,0x7f800000,2 +np.float32,0xbe08fa98,0x3f6954a1,2 +np.float32,0x4f3a13,0x3f800000,2 +np.float32,0x7f6a5ab4,0x7f800000,2 +np.float32,0x7eb85247,0x7f800000,2 +np.float32,0xbf287246,0x3f223e08,2 +np.float32,0x801584d0,0x3f800000,2 +np.float32,0x7ec25371,0x7f800000,2 +np.float32,0x3f002165,0x3fb51552,2 +np.float32,0x3e1108a8,0x3f8d3429,2 +np.float32,0x4f0f88,0x3f800000,2 +np.float32,0x7f67c1ce,0x7f800000,2 +np.float32,0xbf4348f8,0x3f16dedf,2 +np.float32,0xbe292b64,0x3f644d24,2 +np.float32,0xbf2bfa36,0x3f20b2d6,2 +np.float32,0xbf2a6e58,0x3f215f71,2 +np.float32,0x3e97d5d3,0x3f9d35df,2 +np.float32,0x31f597,0x3f800000,2 +np.float32,0x100544,0x3f800000,2 +np.float32,0x10a197,0x3f800000,2 +np.float32,0x3f44df50,0x3fda20d2,2 +np.float32,0x59916d,0x3f800000,2 +np.float32,0x707472,0x3f800000,2 +np.float32,0x8054194e,0x3f800000,2 +np.float32,0x80627b01,0x3f800000,2 +np.float32,0x7f4d5a5b,0x7f800000,2 +np.float32,0xbcecad00,0x3f7aeca5,2 +np.float32,0xff69c541,0x0,2 +np.float32,0xbe164e20,0x3f673c3a,2 +np.float32,0x3dd321de,0x3f897b39,2 +np.float32,0x3c9c4900,0x3f81b431,2 +np.float32,0x7f0efae3,0x7f800000,2 +np.float32,0xbf1b3ee6,0x3f282567,2 +np.float32,0x3ee858ac,0x3faf5083,2 +np.float32,0x3f0e6a39,0x3fbc3965,2 +np.float32,0x7f0c06d8,0x7f800000,2 +np.float32,0x801dd236,0x3f800000,2 +np.float32,0x564245,0x3f800000,2 +np.float32,0x7e99d3ad,0x7f800000,2 +np.float32,0xff3b0164,0x0,2 +np.float32,0x3f386f18,0x3fd2e785,2 +np.float32,0x7f603c39,0x7f800000,2 +np.float32,0x3cbd9b00,0x3f8211f0,2 +np.float32,0x2178e2,0x3f800000,2 +np.float32,0x5db226,0x3f800000,2 +np.float32,0xfec78d62,0x0,2 +np.float32,0x7f40bc1e,0x7f800000,2 +np.float32,0x80325064,0x3f800000,2 +np.float32,0x3f6068dc,0x3feb0377,2 +np.float32,0xfe8b95c6,0x0,2 +np.float32,0xbe496894,0x3f5f5f87,2 +np.float32,0xbf18722a,0x3f296cf4,2 +np.float32,0x332d0e,0x3f800000,2 +np.float32,0x3f6329dc,0x3fecc5c0,2 +np.float32,0x807d1802,0x3f800000,2 +np.float32,0x3e8afcee,0x3f9a7ff1,2 +np.float32,0x26a0a7,0x3f800000,2 +np.float32,0x7f13085d,0x7f800000,2 +np.float32,0x68d547,0x3f800000,2 +np.float32,0x7e9b04ae,0x7f800000,2 +np.float32,0x3f3ecdfe,0x3fd692ea,2 +np.float32,0x805256f4,0x3f800000,2 +np.float32,0x3f312dc8,0x3fcecd42,2 +np.float32,0x23ca15,0x3f800000,2 +np.float32,0x3f53c455,0x3fe31ad6,2 +np.float32,0xbf21186c,0x3f2580fd,2 +np.float32,0x803b9bb1,0x3f800000,2 +np.float32,0xff6ae1fc,0x0,2 +np.float32,0x2103cf,0x3f800000,2 +np.float32,0xbedcec6c,0x3f3dd29d,2 +np.float32,0x7f520afa,0x7f800000,2 +np.float32,0x7e8b44f2,0x7f800000,2 +np.float32,0xfef7f6ce,0x0,2 +np.float32,0xbd5e7c30,0x3f768a6f,2 +np.float32,0xfeb36848,0x0,2 +np.float32,0xff49effb,0x0,2 +np.float32,0xbec207c0,0x3f44dc74,2 +np.float32,0x3e91147f,0x3f9bc77f,2 +np.float32,0xfe784cd4,0x0,2 +np.float32,0xfd1a7250,0x0,2 +np.float32,0xff3b3f48,0x0,2 +np.float32,0x3f685db5,0x3ff0219f,2 +np.float32,0x3f370976,0x3fd21bae,2 +np.float32,0xfed4cc20,0x0,2 +np.float32,0xbf41e337,0x3f17714a,2 +np.float32,0xbf4e8638,0x3f12593a,2 +np.float32,0x3edaf0f1,0x3fac295e,2 +np.float32,0x803cbb4f,0x3f800000,2 +np.float32,0x7f492043,0x7f800000,2 +np.float32,0x2cabcf,0x3f800000,2 +np.float32,0x17f8ac,0x3f800000,2 +np.float32,0x3e846478,0x3f99205a,2 +np.float32,0x76948f,0x3f800000,2 +np.float32,0x1,0x3f800000,2 +np.float32,0x7ea6419e,0x7f800000,2 +np.float32,0xa5315,0x3f800000,2 +np.float32,0xff3a8e32,0x0,2 +np.float32,0xbe5714e8,0x3f5d50b7,2 +np.float32,0xfeadf960,0x0,2 +np.float32,0x3ebbd1a8,0x3fa50efc,2 +np.float32,0x7f31dce7,0x7f800000,2 +np.float32,0x80314999,0x3f800000,2 +np.float32,0x8017f41b,0x3f800000,2 +np.float32,0x7ed6d051,0x7f800000,2 +np.float32,0x7f525688,0x7f800000,2 +np.float32,0x7f7fffff,0x7f800000,2 +np.float32,0x3e8b0461,0x3f9a8180,2 +np.float32,0x3d9fe46e,0x3f871e1f,2 +np.float32,0x5e6d8f,0x3f800000,2 +np.float32,0xbf09ae55,0x3f305608,2 +np.float32,0xfe7028c4,0x0,2 +np.float32,0x7f3ade56,0x7f800000,2 +np.float32,0xff4c9ef9,0x0,2 +np.float32,0x7e3199cf,0x7f800000,2 +np.float32,0x8048652f,0x3f800000,2 +np.float32,0x805e1237,0x3f800000,2 +np.float32,0x189ed8,0x3f800000,2 +np.float32,0xbea7c094,0x3f4bfd98,2 +np.float32,0xbf2f109c,0x3f1f5c5c,2 +np.float32,0xbf0e7f4c,0x3f2e0d2c,2 +np.float32,0x8005981f,0x3f800000,2 +np.float32,0xbf762005,0x3f0377f3,2 +np.float32,0xbf0f60ab,0x3f2da317,2 +np.float32,0xbf4aa3e7,0x3f13e54e,2 +np.float32,0xbf348fd2,0x3f1d01aa,2 +np.float32,0x3e530b50,0x3f93a7fb,2 +np.float32,0xbf0b05a4,0x3f2fb26a,2 +np.float32,0x3eea416c,0x3fafc4aa,2 +np.float32,0x805ad04d,0x3f800000,2 +np.float32,0xbf6328d8,0x3f0a655e,2 +np.float32,0x3f7347b9,0x3ff75558,2 +np.float32,0xfda3ca68,0x0,2 +np.float32,0x80497d21,0x3f800000,2 +np.float32,0x3e740452,0x3f96fd22,2 +np.float32,0x3e528e57,0x3f939b7e,2 +np.float32,0x3e9e19fa,0x3f9e8cbd,2 +np.float32,0x8078060b,0x3f800000,2 +np.float32,0x3f3fea7a,0x3fd73872,2 +np.float32,0xfcfa30a0,0x0,2 +np.float32,0x7f4eb4bf,0x7f800000,2 +np.float32,0x3f712618,0x3ff5e900,2 +np.float32,0xbf668f0e,0x3f0920c6,2 +np.float32,0x3f3001e9,0x3fce259d,2 +np.float32,0xbe9b6fac,0x3f4f6b9c,2 +np.float32,0xbf61fcf3,0x3f0ad5ec,2 +np.float32,0xff08a55c,0x0,2 +np.float32,0x3e805014,0x3f984872,2 +np.float32,0x6ce04c,0x3f800000,2 +np.float32,0x7f7cbc07,0x7f800000,2 +np.float32,0x3c87dc,0x3f800000,2 +np.float32,0x3f2ee498,0x3fcd869a,2 +np.float32,0x4b1116,0x3f800000,2 +np.float32,0x3d382d06,0x3f840d5f,2 +np.float32,0xff7de21e,0x0,2 +np.float32,0x3f2f1d6d,0x3fcda63c,2 +np.float32,0xbf1c1618,0x3f27c38a,2 +np.float32,0xff4264b1,0x0,2 +np.float32,0x8026e5e7,0x3f800000,2 +np.float32,0xbe6fa180,0x3f59ab02,2 +np.float32,0xbe923c02,0x3f52053b,2 +np.float32,0xff3aa453,0x0,2 +np.float32,0x3f77a7ac,0x3ffa47d0,2 +np.float32,0xbed15f36,0x3f40d08a,2 +np.float32,0xa62d,0x3f800000,2 +np.float32,0xbf342038,0x3f1d3123,2 +np.float32,0x7f2f7f80,0x7f800000,2 +np.float32,0x7f2b6fc1,0x7f800000,2 +np.float32,0xff323540,0x0,2 +np.float32,0x3f1a2b6e,0x3fc24faa,2 +np.float32,0x800cc1d2,0x3f800000,2 +np.float32,0xff38fa01,0x0,2 +np.float32,0x80800000,0x3f800000,2 +np.float32,0xbf3d22e0,0x3f196745,2 +np.float32,0x7f40fd62,0x7f800000,2 +np.float32,0x7e1785c7,0x7f800000,2 +np.float32,0x807408c4,0x3f800000,2 +np.float32,0xbf300192,0x3f1ef485,2 +np.float32,0x351e3d,0x3f800000,2 +np.float32,0x7f5ab736,0x7f800000,2 +np.float32,0x2f1696,0x3f800000,2 +np.float32,0x806ac5d7,0x3f800000,2 +np.float32,0x42ec59,0x3f800000,2 +np.float32,0x7f79f52d,0x7f800000,2 +np.float32,0x44ad28,0x3f800000,2 +np.float32,0xbf49dc9c,0x3f143532,2 +np.float32,0x3f6c1f1f,0x3ff295e7,2 +np.float32,0x1589b3,0x3f800000,2 +np.float32,0x3f49b44e,0x3fdd0031,2 +np.float32,0x7f5942c9,0x7f800000,2 +np.float32,0x3f2dab28,0x3fccd877,2 +np.float32,0xff7fffff,0x0,2 +np.float32,0x80578eb2,0x3f800000,2 +np.float32,0x3f39ba67,0x3fd3a50b,2 +np.float32,0x8020340d,0x3f800000,2 +np.float32,0xbf6025b2,0x3f0b8783,2 +np.float32,0x8015ccfe,0x3f800000,2 +np.float32,0x3f6b9762,0x3ff23cd0,2 +np.float32,0xfeeb0c86,0x0,2 +np.float32,0x802779bc,0x3f800000,2 +np.float32,0xbf32bf64,0x3f1dc796,2 +np.float32,0xbf577eb6,0x3f0ed631,2 +np.float32,0x0,0x3f800000,2 +np.float32,0xfe99de6c,0x0,2 +np.float32,0x7a4e53,0x3f800000,2 +np.float32,0x1a15d3,0x3f800000,2 +np.float32,0x8035fe16,0x3f800000,2 +np.float32,0x3e845784,0x3f991dab,2 +np.float32,0x43d688,0x3f800000,2 +np.float32,0xbd447cc0,0x3f77a0b7,2 +np.float32,0x3f83fa,0x3f800000,2 +np.float32,0x3f141df2,0x3fbf2719,2 +np.float32,0x805c586a,0x3f800000,2 +np.float32,0x14c47e,0x3f800000,2 +np.float32,0x3d3bed00,0x3f8422d4,2 +np.float32,0x7f6f4ecd,0x7f800000,2 +np.float32,0x3f0a5e5a,0x3fba2c5c,2 +np.float32,0x523ecf,0x3f800000,2 +np.float32,0xbef4a6e8,0x3f37d262,2 +np.float32,0xff54eb58,0x0,2 +np.float32,0xff3fc875,0x0,2 +np.float32,0x8067c392,0x3f800000,2 +np.float32,0xfedae910,0x0,2 +np.float32,0x80595979,0x3f800000,2 +np.float32,0x3ee87d1d,0x3faf5929,2 +np.float32,0x7f5bad33,0x7f800000,2 +np.float32,0xbf45b868,0x3f15e109,2 +np.float32,0x3ef2277d,0x3fb1a868,2 +np.float32,0x3ca5a950,0x3f81ce8c,2 +np.float32,0x3e70f4e6,0x3f96ad25,2 +np.float32,0xfe3515bc,0x0,2 +np.float32,0xfe4af088,0x0,2 +np.float32,0xff3c78b2,0x0,2 +np.float32,0x7f50f51a,0x7f800000,2 +np.float32,0x3e3a232a,0x3f913009,2 +np.float32,0x7dfec6ff,0x7f800000,2 +np.float32,0x3e1bbaec,0x3f8e3ad6,2 +np.float32,0xbd658fa0,0x3f763ee7,2 +np.float32,0xfe958684,0x0,2 +np.float32,0x503670,0x3f800000,2 +np.float32,0x3f800000,0x40000000,2 +np.float32,0x1bbec6,0x3f800000,2 +np.float32,0xbea7bb7c,0x3f4bff00,2 +np.float32,0xff3a24a2,0x0,2 +np.float32,0xbf416240,0x3f17a635,2 +np.float32,0xbf800000,0x3f000000,2 +np.float32,0xff0c965c,0x0,2 +np.float32,0x80000000,0x3f800000,2 +np.float32,0xbec2c69a,0x3f44a99e,2 +np.float32,0x5b68d4,0x3f800000,2 +np.float32,0xb9a93000,0x3f7ff158,2 +np.float32,0x3d5a0dd8,0x3f84cfbc,2 +np.float32,0xbeaf7a28,0x3f49de4e,2 +np.float32,0x3ee83555,0x3faf4820,2 +np.float32,0xfd320330,0x0,2 +np.float32,0xe1af2,0x3f800000,2 +np.float32,0x7cf28caf,0x7f800000,2 +np.float32,0x80781009,0x3f800000,2 +np.float32,0xbf1e0baf,0x3f26e04d,2 +np.float32,0x7edb05b1,0x7f800000,2 +np.float32,0x3de004,0x3f800000,2 +np.float32,0xff436af6,0x0,2 +np.float32,0x802a9408,0x3f800000,2 +np.float32,0x7ed82205,0x7f800000,2 +np.float32,0x3e3f8212,0x3f91b767,2 +np.float32,0x16a2b2,0x3f800000,2 +np.float32,0xff1e5af3,0x0,2 +np.float32,0xbf1c860c,0x3f2790b7,2 +np.float32,0x3f3bc5da,0x3fd4d1d6,2 +np.float32,0x7f5f7085,0x7f800000,2 +np.float32,0x7f68e409,0x7f800000,2 +np.float32,0x7f4b3388,0x7f800000,2 +np.float32,0x7ecaf440,0x7f800000,2 +np.float32,0x80078785,0x3f800000,2 +np.float32,0x3ebd800d,0x3fa56f45,2 +np.float32,0xbe39a140,0x3f61c58e,2 +np.float32,0x803b587e,0x3f800000,2 +np.float32,0xbeaaa418,0x3f4b31c4,2 +np.float32,0xff7e2b9f,0x0,2 +np.float32,0xff5180a3,0x0,2 +np.float32,0xbf291394,0x3f21f73c,2 +np.float32,0x7f7b9698,0x7f800000,2 +np.float32,0x4218da,0x3f800000,2 +np.float32,0x7f135262,0x7f800000,2 +np.float32,0x804c10e8,0x3f800000,2 +np.float32,0xbf1c2a54,0x3f27ba5a,2 +np.float32,0x7f41fd32,0x7f800000,2 +np.float32,0x3e5cc464,0x3f94a195,2 +np.float32,0xff7a2fa7,0x0,2 +np.float32,0x3e05dc30,0x3f8c23c9,2 +np.float32,0x7f206d99,0x7f800000,2 +np.float32,0xbe9ae520,0x3f4f9287,2 +np.float32,0xfe4f4d58,0x0,2 +np.float32,0xbf44db42,0x3f163ae3,2 +np.float32,0x3f65ac48,0x3fee6300,2 +np.float32,0x3ebfaf36,0x3fa5ecb0,2 +np.float32,0x3f466719,0x3fdb08b0,2 +np.float32,0x80000001,0x3f800000,2 +np.float32,0xff4b3c7b,0x0,2 +np.float32,0x3df44374,0x3f8b0819,2 +np.float32,0xfea4b540,0x0,2 +np.float32,0x7f358e3d,0x7f800000,2 +np.float32,0x801f5e63,0x3f800000,2 +np.float32,0x804ae77e,0x3f800000,2 +np.float32,0xdbb5,0x3f800000,2 +np.float32,0x7f0a7e3b,0x7f800000,2 +np.float32,0xbe4152e4,0x3f609953,2 +np.float32,0x4b9579,0x3f800000,2 +np.float32,0x3ece0bd4,0x3fa92ea5,2 +np.float32,0x7e499d9a,0x7f800000,2 +np.float32,0x80637d8a,0x3f800000,2 +np.float32,0x3e50a425,0x3f936a8b,2 +np.float32,0xbf0e8cb0,0x3f2e06dd,2 +np.float32,0x802763e2,0x3f800000,2 +np.float32,0xff73041b,0x0,2 +np.float32,0xfea466da,0x0,2 +np.float32,0x80064c73,0x3f800000,2 +np.float32,0xbef29222,0x3f385728,2 +np.float32,0x8029c215,0x3f800000,2 +np.float32,0xbd3994e0,0x3f7815d1,2 +np.float32,0xbe6ac9e4,0x3f5a61f3,2 +np.float32,0x804b58b0,0x3f800000,2 +np.float32,0xbdb83be0,0x3f70865c,2 +np.float32,0x7ee18da2,0x7f800000,2 +np.float32,0xfd4ca010,0x0,2 +np.float32,0x807c668b,0x3f800000,2 +np.float32,0xbd40ed90,0x3f77c6e9,2 +np.float32,0x7efc6881,0x7f800000,2 +np.float32,0xfe633bfc,0x0,2 +np.float32,0x803ce363,0x3f800000,2 +np.float32,0x7ecba81e,0x7f800000,2 +np.float32,0xfdcb2378,0x0,2 +np.float32,0xbebc5524,0x3f4662b2,2 +np.float32,0xfaa30000,0x0,2 +np.float32,0x805d451b,0x3f800000,2 +np.float32,0xbee85600,0x3f3ae996,2 +np.float32,0xfefb0a54,0x0,2 +np.float32,0xbdfc6690,0x3f6b0a08,2 +np.float32,0x58a57,0x3f800000,2 +np.float32,0x3b41b7,0x3f800000,2 +np.float32,0x7c99812d,0x7f800000,2 +np.float32,0xbd3ae740,0x3f78079d,2 +np.float32,0xbf4a48a7,0x3f1409dd,2 +np.float32,0xfdeaad58,0x0,2 +np.float32,0xbe9aa65a,0x3f4fa42c,2 +np.float32,0x3f79d78c,0x3ffbc458,2 +np.float32,0x805e7389,0x3f800000,2 +np.float32,0x7ebb3612,0x7f800000,2 +np.float32,0x2e27dc,0x3f800000,2 +np.float32,0x80726dec,0x3f800000,2 +np.float32,0xfe8fb738,0x0,2 +np.float32,0xff1ff3bd,0x0,2 +np.float32,0x7f5264a2,0x7f800000,2 +np.float32,0x3f5a6893,0x3fe739ca,2 +np.float32,0xbec4029c,0x3f44558d,2 +np.float32,0xbef65cfa,0x3f37657e,2 +np.float32,0x63aba1,0x3f800000,2 +np.float32,0xfbb6e200,0x0,2 +np.float32,0xbf3466fc,0x3f1d1307,2 +np.float32,0x3f258844,0x3fc861d7,2 +np.float32,0xbf5f29a7,0x3f0be6dc,2 +np.float32,0x802b51cd,0x3f800000,2 +np.float32,0xbe9094dc,0x3f527dae,2 +np.float32,0xfec2e68c,0x0,2 +np.float32,0x807b38bd,0x3f800000,2 +np.float32,0xbf594662,0x3f0e2663,2 +np.float32,0x7cbcf747,0x7f800000,2 +np.float32,0xbe4b88f0,0x3f5f0d47,2 +np.float32,0x3c53c4,0x3f800000,2 +np.float32,0xbe883562,0x3f54e3f7,2 +np.float32,0xbf1efaf0,0x3f267456,2 +np.float32,0x3e22cd3e,0x3f8ee98b,2 +np.float32,0x80434875,0x3f800000,2 +np.float32,0xbf000b44,0x3f34ff6e,2 +np.float32,0x7f311c3a,0x7f800000,2 +np.float32,0x802f7f3f,0x3f800000,2 +np.float32,0x805155fe,0x3f800000,2 +np.float32,0x7f5d7485,0x7f800000,2 +np.float32,0x80119197,0x3f800000,2 +np.float32,0x3f445b8b,0x3fd9d30d,2 +np.float32,0xbf638eb3,0x3f0a3f38,2 +np.float32,0x402410,0x3f800000,2 +np.float32,0xbc578a40,0x3f7dad1d,2 +np.float32,0xbeecbf8a,0x3f39cc9e,2 +np.float32,0x7f2935a4,0x7f800000,2 +np.float32,0x3f570fea,0x3fe523e2,2 +np.float32,0xbf06bffa,0x3f31bdb6,2 +np.float32,0xbf2afdfd,0x3f2120ba,2 +np.float32,0x7f76f7ab,0x7f800000,2 +np.float32,0xfee2d1e8,0x0,2 +np.float32,0x800b026d,0x3f800000,2 +np.float32,0xff0eda75,0x0,2 +np.float32,0x3d4c,0x3f800000,2 +np.float32,0xbed538a2,0x3f3fcffb,2 +np.float32,0x3f73f4f9,0x3ff7c979,2 +np.float32,0x2aa9fc,0x3f800000,2 +np.float32,0x806a45b3,0x3f800000,2 +np.float32,0xff770d35,0x0,2 +np.float32,0x7e999be3,0x7f800000,2 +np.float32,0x80741128,0x3f800000,2 +np.float32,0xff6aac34,0x0,2 +np.float32,0x470f74,0x3f800000,2 +np.float32,0xff423b7b,0x0,2 +np.float32,0x17dfdd,0x3f800000,2 +np.float32,0x7f029e12,0x7f800000,2 +np.float32,0x803fcb9d,0x3f800000,2 +np.float32,0x3f3dc3,0x3f800000,2 +np.float32,0x7f3a27bc,0x7f800000,2 +np.float32,0x3e473108,0x3f9279ec,2 +np.float32,0x7f4add5d,0x7f800000,2 +np.float32,0xfd9736e0,0x0,2 +np.float32,0x805f1df2,0x3f800000,2 +np.float32,0x6c49c1,0x3f800000,2 +np.float32,0x7ec733c7,0x7f800000,2 +np.float32,0x804c1abf,0x3f800000,2 +np.float32,0x3de2e887,0x3f8a37a5,2 +np.float32,0x3f51630a,0x3fe1a561,2 +np.float32,0x3de686a8,0x3f8a62ff,2 +np.float32,0xbedb3538,0x3f3e439c,2 +np.float32,0xbf3aa892,0x3f1a6f9e,2 +np.float32,0x7ee5fb32,0x7f800000,2 +np.float32,0x7e916c9b,0x7f800000,2 +np.float32,0x3f033f1c,0x3fb69e19,2 +np.float32,0x25324b,0x3f800000,2 +np.float32,0x3f348d1d,0x3fd0b2e2,2 +np.float32,0x3f5797e8,0x3fe57851,2 +np.float32,0xbf69c316,0x3f07f1a0,2 +np.float32,0xbe8b7fb0,0x3f53f1bf,2 +np.float32,0xbdbbc190,0x3f703d00,2 +np.float32,0xff6c4fc0,0x0,2 +np.float32,0x7f29fcbe,0x7f800000,2 +np.float32,0x3f678d19,0x3fef9a23,2 +np.float32,0x73d140,0x3f800000,2 +np.float32,0x3e25bdd2,0x3f8f326b,2 +np.float32,0xbeb775ec,0x3f47b2c6,2 +np.float32,0xff451c4d,0x0,2 +np.float32,0x8072c466,0x3f800000,2 +np.float32,0x3f65e836,0x3fee89b2,2 +np.float32,0x52ca7a,0x3f800000,2 +np.float32,0x62cfed,0x3f800000,2 +np.float32,0xbf583dd0,0x3f0e8c5c,2 +np.float32,0xbf683842,0x3f088342,2 +np.float32,0x3f1a7828,0x3fc2780c,2 +np.float32,0x800ea979,0x3f800000,2 +np.float32,0xbeb9133c,0x3f474328,2 +np.float32,0x3ef09fc7,0x3fb14a4b,2 +np.float32,0x7ebbcb75,0x7f800000,2 +np.float32,0xff316c0e,0x0,2 +np.float32,0x805b84e3,0x3f800000,2 +np.float32,0x3d6a55e0,0x3f852d8a,2 +np.float32,0x3e755788,0x3f971fd1,2 +np.float32,0x3ee7aacb,0x3faf2743,2 +np.float32,0x7f714039,0x7f800000,2 +np.float32,0xff70bad8,0x0,2 +np.float32,0xbe0b74c8,0x3f68f08c,2 +np.float32,0xbf6cb170,0x3f06de86,2 +np.float32,0x7ec1fbff,0x7f800000,2 +np.float32,0x8014b1f6,0x3f800000,2 +np.float32,0xfe8b45fe,0x0,2 +np.float32,0x6e2220,0x3f800000,2 +np.float32,0x3ed1777d,0x3fa9f7ab,2 +np.float32,0xff48e467,0x0,2 +np.float32,0xff76c5aa,0x0,2 +np.float32,0x3e9bd330,0x3f9e0fd7,2 +np.float32,0x3f17de4f,0x3fc11aae,2 +np.float32,0x7eeaa2fd,0x7f800000,2 +np.float32,0xbf572746,0x3f0ef806,2 +np.float32,0x7e235554,0x7f800000,2 +np.float32,0xfe24fc1c,0x0,2 +np.float32,0x7daf71ad,0x7f800000,2 +np.float32,0x800d4a6b,0x3f800000,2 +np.float32,0xbf6fc31d,0x3f05c0ce,2 +np.float32,0x1c4d93,0x3f800000,2 +np.float32,0x7ee9200c,0x7f800000,2 +np.float32,0x3f54b4da,0x3fe3aeec,2 +np.float32,0x2b37b1,0x3f800000,2 +np.float32,0x3f7468bd,0x3ff81731,2 +np.float32,0x3f2850ea,0x3fc9e5f4,2 +np.float32,0xbe0d47ac,0x3f68a6f9,2 +np.float32,0x314877,0x3f800000,2 +np.float32,0x802700c3,0x3f800000,2 +np.float32,0x7e2c915f,0x7f800000,2 +np.float32,0x800d0059,0x3f800000,2 +np.float32,0x3f7f3c25,0x3fff7862,2 +np.float32,0xff735d31,0x0,2 +np.float32,0xff7e339e,0x0,2 +np.float32,0xbef96cf0,0x3f36a340,2 +np.float32,0x3db6ea21,0x3f882cb2,2 +np.float32,0x67cb3d,0x3f800000,2 +np.float32,0x801f349d,0x3f800000,2 +np.float32,0x3f1390ec,0x3fbede29,2 +np.float32,0x7f13644a,0x7f800000,2 +np.float32,0x804a369b,0x3f800000,2 +np.float32,0x80262666,0x3f800000,2 +np.float32,0x7e850fbc,0x7f800000,2 +np.float32,0x18b002,0x3f800000,2 +np.float32,0x8051f1ed,0x3f800000,2 +np.float32,0x3eba48f6,0x3fa4b753,2 +np.float32,0xbf3f4130,0x3f1886a9,2 +np.float32,0xbedac006,0x3f3e61cf,2 +np.float32,0xbf097c70,0x3f306ddc,2 +np.float32,0x4aba6d,0x3f800000,2 +np.float32,0x580078,0x3f800000,2 +np.float32,0x3f64d82e,0x3fedda40,2 +np.float32,0x7f781fd6,0x7f800000,2 +np.float32,0x6aff3d,0x3f800000,2 +np.float32,0xff25e074,0x0,2 +np.float32,0x7ea9ec89,0x7f800000,2 +np.float32,0xbf63b816,0x3f0a2fbb,2 +np.float32,0x133f07,0x3f800000,2 +np.float32,0xff800000,0x0,2 +np.float32,0x8013dde7,0x3f800000,2 +np.float32,0xff770b95,0x0,2 +np.float32,0x806154e8,0x3f800000,2 +np.float32,0x3f1e7bce,0x3fc4981a,2 +np.float32,0xff262c78,0x0,2 +np.float32,0x3f59a652,0x3fe6c04c,2 +np.float32,0x7f220166,0x7f800000,2 +np.float32,0x7eb24939,0x7f800000,2 +np.float32,0xbed58bb0,0x3f3fba6a,2 +np.float32,0x3c2ad000,0x3f80eda7,2 +np.float32,0x2adb2e,0x3f800000,2 +np.float32,0xfe8b213e,0x0,2 +np.float32,0xbf2e0c1e,0x3f1fccea,2 +np.float32,0x7e1716be,0x7f800000,2 +np.float32,0x80184e73,0x3f800000,2 +np.float32,0xbf254743,0x3f23a3d5,2 +np.float32,0x8063a722,0x3f800000,2 +np.float32,0xbe50adf0,0x3f5e46c7,2 +np.float32,0x3f614158,0x3feb8d60,2 +np.float32,0x8014bbc8,0x3f800000,2 +np.float32,0x283bc7,0x3f800000,2 +np.float32,0x3ffb5c,0x3f800000,2 +np.float32,0xfe8de6bc,0x0,2 +np.float32,0xbea6e086,0x3f4c3b82,2 +np.float32,0xfee64b92,0x0,2 +np.float32,0x506c1a,0x3f800000,2 +np.float32,0xff342af8,0x0,2 +np.float32,0x6b6f4c,0x3f800000,2 +np.float32,0xfeb42b1e,0x0,2 +np.float32,0x3e49384a,0x3f92ad71,2 +np.float32,0x152d08,0x3f800000,2 +np.float32,0x804c8f09,0x3f800000,2 +np.float32,0xff5e927d,0x0,2 +np.float32,0x6374da,0x3f800000,2 +np.float32,0x3f48f011,0x3fdc8ae4,2 +np.float32,0xbf446a30,0x3f1668e8,2 +np.float32,0x3ee77073,0x3faf196e,2 +np.float32,0xff4caa40,0x0,2 +np.float32,0x7efc9363,0x7f800000,2 +np.float32,0xbf706dcc,0x3f05830d,2 +np.float32,0xfe29c7e8,0x0,2 +np.float32,0x803cfe58,0x3f800000,2 +np.float32,0x3ec34c7c,0x3fa6bd0a,2 +np.float32,0x3eb85b62,0x3fa44968,2 +np.float32,0xfda1b9d8,0x0,2 +np.float32,0x802932cd,0x3f800000,2 +np.float32,0xbf5cde78,0x3f0cc5fa,2 +np.float32,0x3f31bf44,0x3fcf1ec8,2 +np.float32,0x803a0882,0x3f800000,2 +np.float32,0x800000,0x3f800000,2 +np.float32,0x3f54110e,0x3fe34a08,2 +np.float32,0x80645ea9,0x3f800000,2 +np.float32,0xbd8c1070,0x3f7425c3,2 +np.float32,0x801a006a,0x3f800000,2 +np.float32,0x7f5d161e,0x7f800000,2 +np.float32,0x805b5df3,0x3f800000,2 +np.float32,0xbf71a7c0,0x3f0511be,2 +np.float32,0xbe9a55c0,0x3f4fbad6,2 +np.float64,0xde7e2fd9bcfc6,0x3ff0000000000000,1 +np.float64,0xbfd8cd88eb319b12,0x3fe876349efbfa2b,1 +np.float64,0x3fe4fa13ace9f428,0x3ff933fbb117d196,1 +np.float64,0x475b3d048eb68,0x3ff0000000000000,1 +np.float64,0x7fef39ed07be73d9,0x7ff0000000000000,1 +np.float64,0x80026b84d904d70a,0x3ff0000000000000,1 +np.float64,0xebd60627d7ac1,0x3ff0000000000000,1 +np.float64,0xbfd7cbefdbaf97e0,0x3fe8bad30f6cf8e1,1 +np.float64,0x7fc17c605a22f8c0,0x7ff0000000000000,1 +np.float64,0x8cdac05119b58,0x3ff0000000000000,1 +np.float64,0x3fc45cd60a28b9ac,0x3ff1dd8028ec3f41,1 +np.float64,0x7fef4fce137e9f9b,0x7ff0000000000000,1 +np.float64,0xe5a2b819cb457,0x3ff0000000000000,1 +np.float64,0xe3bcfd4dc77a0,0x3ff0000000000000,1 +np.float64,0x68f0b670d1e17,0x3ff0000000000000,1 +np.float64,0xae69a6455cd35,0x3ff0000000000000,1 +np.float64,0xffe7007a0c6e00f4,0x0,1 +np.float64,0x59fc57a8b3f8c,0x3ff0000000000000,1 +np.float64,0xbfeee429c0bdc854,0x3fe0638fa62bed9f,1 +np.float64,0x80030bb6e206176f,0x3ff0000000000000,1 +np.float64,0x8006967a36ad2cf5,0x3ff0000000000000,1 +np.float64,0x3fe128176a22502f,0x3ff73393301e5dc8,1 +np.float64,0x218de20c431bd,0x3ff0000000000000,1 +np.float64,0x3fe7dbc48aafb789,0x3ffad38989b5955c,1 +np.float64,0xffda1ef411343de8,0x0,1 +np.float64,0xc6b392838d673,0x3ff0000000000000,1 +np.float64,0x7fe6d080c1ada101,0x7ff0000000000000,1 +np.float64,0xbfed36dd67fa6dbb,0x3fe0fec342c4ee89,1 +np.float64,0x3fee2bb6a3fc576e,0x3ffec1c149f1f092,1 +np.float64,0xbfd1f785eb23ef0c,0x3fea576eb01233cb,1 +np.float64,0x7fdad29a1f35a533,0x7ff0000000000000,1 +np.float64,0xffe8928c4fb12518,0x0,1 +np.float64,0x7fb123160022462b,0x7ff0000000000000,1 +np.float64,0x8007ab56cfaf56ae,0x3ff0000000000000,1 +np.float64,0x7fda342d6634685a,0x7ff0000000000000,1 +np.float64,0xbfe3b7e42c676fc8,0x3fe4e05cf8685b8a,1 +np.float64,0xffa708be7c2e1180,0x0,1 +np.float64,0xbfe8ffbece31ff7e,0x3fe29eb84077a34a,1 +np.float64,0xbf91002008220040,0x3fefa245058f05cb,1 +np.float64,0x8000281f0ee0503f,0x3ff0000000000000,1 +np.float64,0x8005617adc2ac2f6,0x3ff0000000000000,1 +np.float64,0x7fa84fec60309fd8,0x7ff0000000000000,1 +np.float64,0x8d00c0231a018,0x3ff0000000000000,1 +np.float64,0xbfdfe52ca63fca5a,0x3fe6a7324cc00d57,1 +np.float64,0x7fcc81073d39020d,0x7ff0000000000000,1 +np.float64,0x800134ff5a6269ff,0x3ff0000000000000,1 +np.float64,0xffc7fff98d2ffff4,0x0,1 +np.float64,0x8000925ce50124bb,0x3ff0000000000000,1 +np.float64,0xffe2530c66a4a618,0x0,1 +np.float64,0x7fc99070673320e0,0x7ff0000000000000,1 +np.float64,0xbfddd5c1f13bab84,0x3fe72a0c80f8df39,1 +np.float64,0x3fe1c220fee38442,0x3ff7817ec66aa55b,1 +np.float64,0x3fb9a1e1043343c2,0x3ff1265e575e6404,1 +np.float64,0xffef72e0833ee5c0,0x0,1 +np.float64,0x3fe710c0416e2181,0x3ffa5e93588aaa69,1 +np.float64,0xbfd8d23cbab1a47a,0x3fe874f5b9d99885,1 +np.float64,0x7fe9628ebd72c51c,0x7ff0000000000000,1 +np.float64,0xdd5fa611babf5,0x3ff0000000000000,1 +np.float64,0x8002bafac86575f6,0x3ff0000000000000,1 +np.float64,0x68acea44d159e,0x3ff0000000000000,1 +np.float64,0xffd776695eaeecd2,0x0,1 +np.float64,0x80059b59bb4b36b4,0x3ff0000000000000,1 +np.float64,0xbdcdd2af7b9bb,0x3ff0000000000000,1 +np.float64,0x8002b432ee856867,0x3ff0000000000000,1 +np.float64,0xcbc72f09978e6,0x3ff0000000000000,1 +np.float64,0xbfee8f4bf6fd1e98,0x3fe081cc0318b170,1 +np.float64,0xffc6e2892d2dc514,0x0,1 +np.float64,0x7feb682e4db6d05c,0x7ff0000000000000,1 +np.float64,0x8004b70a04296e15,0x3ff0000000000000,1 +np.float64,0x42408a4284812,0x3ff0000000000000,1 +np.float64,0xbfe9b8b197f37163,0x3fe254b4c003ce0a,1 +np.float64,0x3fcaadf5f5355bec,0x3ff27ca7876a8d20,1 +np.float64,0xfff0000000000000,0x0,1 +np.float64,0x7fea8376d33506ed,0x7ff0000000000000,1 +np.float64,0xffef73c2d63ee785,0x0,1 +np.float64,0xffe68b2bae2d1657,0x0,1 +np.float64,0x3fd8339cb2306739,0x3ff4cb774d616f90,1 +np.float64,0xbfc6d1db4d2da3b8,0x3fec47bb873a309c,1 +np.float64,0x7fe858016230b002,0x7ff0000000000000,1 +np.float64,0x7fe74cb99d2e9972,0x7ff0000000000000,1 +np.float64,0xffec2e96dc385d2d,0x0,1 +np.float64,0xb762a9876ec55,0x3ff0000000000000,1 +np.float64,0x3feca230c5794462,0x3ffdbfe62a572f52,1 +np.float64,0xbfb5ebad3a2bd758,0x3fee27eed86dcc39,1 +np.float64,0x471c705a8e38f,0x3ff0000000000000,1 +np.float64,0x7fc79bb5cf2f376b,0x7ff0000000000000,1 +np.float64,0xbfe53d6164ea7ac3,0x3fe4331b3beb73bd,1 +np.float64,0xbfe375a3f766eb48,0x3fe4fe67edb516e6,1 +np.float64,0x3fe1c7686ca38ed1,0x3ff7842f04770ba9,1 +np.float64,0x242e74dc485cf,0x3ff0000000000000,1 +np.float64,0x8009c06ab71380d6,0x3ff0000000000000,1 +np.float64,0x3fd08505efa10a0c,0x3ff3227b735b956d,1 +np.float64,0xffe3dfcecda7bf9d,0x0,1 +np.float64,0x8001f079bbc3e0f4,0x3ff0000000000000,1 +np.float64,0x3fddc706b6bb8e0c,0x3ff616d927987363,1 +np.float64,0xbfd151373ea2a26e,0x3fea870ba53ec126,1 +np.float64,0x7fe89533bfb12a66,0x7ff0000000000000,1 +np.float64,0xffed302cbc3a6059,0x0,1 +np.float64,0x3fd871cc28b0e398,0x3ff4d97d58c16ae2,1 +np.float64,0x7fbe9239683d2472,0x7ff0000000000000,1 +np.float64,0x848a445909149,0x3ff0000000000000,1 +np.float64,0x8007b104ce2f620a,0x3ff0000000000000,1 +np.float64,0x7fc2cd6259259ac4,0x7ff0000000000000,1 +np.float64,0xbfeadb640df5b6c8,0x3fe1e2b068de10af,1 +np.float64,0x800033b2f1a06767,0x3ff0000000000000,1 +np.float64,0x7fe54e5b7caa9cb6,0x7ff0000000000000,1 +np.float64,0x4f928f209f26,0x3ff0000000000000,1 +np.float64,0x8003c3dc6f2787ba,0x3ff0000000000000,1 +np.float64,0xbfd55a59daaab4b4,0x3fe9649d57b32b5d,1 +np.float64,0xffe3e2968d67c52c,0x0,1 +np.float64,0x80087434d550e86a,0x3ff0000000000000,1 +np.float64,0xffdde800083bd000,0x0,1 +np.float64,0xffe291f0542523e0,0x0,1 +np.float64,0xbfe1419bc3e28338,0x3fe6051d4f95a34a,1 +np.float64,0x3fd9d00ee1b3a01e,0x3ff5292bb8d5f753,1 +np.float64,0x3fdb720b60b6e417,0x3ff589d133625374,1 +np.float64,0xbfe3e21f0967c43e,0x3fe4cd4d02e3ef9a,1 +np.float64,0x7fd7e27f3dafc4fd,0x7ff0000000000000,1 +np.float64,0x3fd1cc2620a3984c,0x3ff366befbc38e3e,1 +np.float64,0x3fe78d05436f1a0b,0x3ffaa5ee4ea54b79,1 +np.float64,0x7e2acc84fc55a,0x3ff0000000000000,1 +np.float64,0x800ffb861c5ff70c,0x3ff0000000000000,1 +np.float64,0xffb2b0db1a2561b8,0x0,1 +np.float64,0xbfe80c2363701847,0x3fe301fdfe789576,1 +np.float64,0x7fe383c1c3e70783,0x7ff0000000000000,1 +np.float64,0xbfeefc02e6fdf806,0x3fe05b1a8528bf6c,1 +np.float64,0xbfe42c9268285925,0x3fe4abdc14793cb8,1 +np.float64,0x1,0x3ff0000000000000,1 +np.float64,0xa71c7ce94e390,0x3ff0000000000000,1 +np.float64,0x800ed4e6777da9cd,0x3ff0000000000000,1 +np.float64,0x3fde11b35d3c2367,0x3ff628bdc6dd1b78,1 +np.float64,0x3fef3964dbfe72ca,0x3fff777cae357608,1 +np.float64,0x3fefe369b7ffc6d4,0x3fffec357be508a3,1 +np.float64,0xbfdef1855f3de30a,0x3fe6e348c58e3fed,1 +np.float64,0x3fee0e2bc13c1c58,0x3ffeae1909c1b973,1 +np.float64,0xbfd31554ffa62aaa,0x3fea06628b2f048a,1 +np.float64,0x800dc56bcc7b8ad8,0x3ff0000000000000,1 +np.float64,0x7fbba01b8e374036,0x7ff0000000000000,1 +np.float64,0x7fd9737a92b2e6f4,0x7ff0000000000000,1 +np.float64,0x3feeae0fac3d5c1f,0x3fff1913705f1f07,1 +np.float64,0x3fdcc64fcdb98ca0,0x3ff5d9c3e5862972,1 +np.float64,0x3fdad9f83db5b3f0,0x3ff56674e81c1bd1,1 +np.float64,0x32b8797065710,0x3ff0000000000000,1 +np.float64,0x3fd20deae6241bd6,0x3ff37495bc057394,1 +np.float64,0x7fc899f0763133e0,0x7ff0000000000000,1 +np.float64,0x80045805fc08b00d,0x3ff0000000000000,1 +np.float64,0xbfcd8304cb3b0608,0x3feb4611f1eaa30c,1 +np.float64,0x3fd632a2fcac6544,0x3ff4592e1ea14fb0,1 +np.float64,0xffeeb066007d60cb,0x0,1 +np.float64,0x800bb12a42b76255,0x3ff0000000000000,1 +np.float64,0xbfe060fe1760c1fc,0x3fe6714640ab2574,1 +np.float64,0x80067ed737acfdaf,0x3ff0000000000000,1 +np.float64,0x3fd5ec3211abd864,0x3ff449adea82e73e,1 +np.float64,0x7fc4b2fdc22965fb,0x7ff0000000000000,1 +np.float64,0xff656afd002ad600,0x0,1 +np.float64,0xffeadefcdcb5bdf9,0x0,1 +np.float64,0x80052f18610a5e32,0x3ff0000000000000,1 +np.float64,0xbfd5b75c78ab6eb8,0x3fe94b15e0f39194,1 +np.float64,0xa4d3de2b49a7c,0x3ff0000000000000,1 +np.float64,0xbfe321c93de64392,0x3fe524ac7bbee401,1 +np.float64,0x3feb32f5def665ec,0x3ffcd6e4e5f9c271,1 +np.float64,0x7fe6b07e4ced60fc,0x7ff0000000000000,1 +np.float64,0x3fe013bb2de02776,0x3ff6aa4c32ab5ba4,1 +np.float64,0xbfeadd81d375bb04,0x3fe1e1de89b4aebf,1 +np.float64,0xffece7678079cece,0x0,1 +np.float64,0x3fe3d87b8467b0f8,0x3ff897cf22505e4d,1 +np.float64,0xffc4e3a05129c740,0x0,1 +np.float64,0xbfddee6b03bbdcd6,0x3fe723dd83ab49bd,1 +np.float64,0x3fcc4e2672389c4d,0x3ff2a680db769116,1 +np.float64,0x3fd8ed221ab1da44,0x3ff4f569aec8b850,1 +np.float64,0x80000a3538a0146b,0x3ff0000000000000,1 +np.float64,0x8004832eb109065e,0x3ff0000000000000,1 +np.float64,0xffdca83c60395078,0x0,1 +np.float64,0xffef551cda3eaa39,0x0,1 +np.float64,0x800fd95dd65fb2bc,0x3ff0000000000000,1 +np.float64,0x3ff0000000000000,0x4000000000000000,1 +np.float64,0xbfc06f5c4f20deb8,0x3fed466c17305ad8,1 +np.float64,0xbfeb01b5f476036c,0x3fe1d3de0f4211f4,1 +np.float64,0xbfdb2b9284365726,0x3fe7d7b02f790b05,1 +np.float64,0xff76ba83202d7500,0x0,1 +np.float64,0x3fd3f1c59ea7e38c,0x3ff3db96b3a0aaad,1 +np.float64,0x8b99ff6d17340,0x3ff0000000000000,1 +np.float64,0xbfeb383aa0f67075,0x3fe1bedcf2531c08,1 +np.float64,0x3fe321e35fa643c7,0x3ff83749a5d686ee,1 +np.float64,0xbfd863eb2130c7d6,0x3fe8923fcc39bac7,1 +np.float64,0x9e71dd333ce3c,0x3ff0000000000000,1 +np.float64,0x9542962b2a853,0x3ff0000000000000,1 +np.float64,0xba2c963b74593,0x3ff0000000000000,1 +np.float64,0x80019f4d0ca33e9b,0x3ff0000000000000,1 +np.float64,0xffde3e39a73c7c74,0x0,1 +np.float64,0x800258ae02c4b15d,0x3ff0000000000000,1 +np.float64,0xbfd99a535a3334a6,0x3fe8402f3a0662a5,1 +np.float64,0xe6c62143cd8c4,0x3ff0000000000000,1 +np.float64,0x7fbcc828f0399051,0x7ff0000000000000,1 +np.float64,0xbfe42e3596285c6b,0x3fe4ab2066d66071,1 +np.float64,0xffe2ee42d365dc85,0x0,1 +np.float64,0x3fe1f98abea3f315,0x3ff79dc68002a80b,1 +np.float64,0x7fd7225891ae44b0,0x7ff0000000000000,1 +np.float64,0x477177408ee30,0x3ff0000000000000,1 +np.float64,0xbfe16a7e2162d4fc,0x3fe5f1a5c745385d,1 +np.float64,0xbf98aaee283155e0,0x3fef785952e9c089,1 +np.float64,0x7fd7c14a8daf8294,0x7ff0000000000000,1 +np.float64,0xf7e7713defcee,0x3ff0000000000000,1 +np.float64,0x800769aa11aed355,0x3ff0000000000000,1 +np.float64,0xbfed30385e3a6071,0x3fe10135a3bd9ae6,1 +np.float64,0x3fe6dd7205edbae4,0x3ffa4155899efd70,1 +np.float64,0x800d705d26bae0ba,0x3ff0000000000000,1 +np.float64,0xa443ac1f48876,0x3ff0000000000000,1 +np.float64,0xbfec8cfec43919fe,0x3fe13dbf966e6633,1 +np.float64,0x7fd246efaa248dde,0x7ff0000000000000,1 +np.float64,0x800f2ad14afe55a3,0x3ff0000000000000,1 +np.float64,0x800487a894c90f52,0x3ff0000000000000,1 +np.float64,0x80014c4f19e2989f,0x3ff0000000000000,1 +np.float64,0x3fc11f265f223e4d,0x3ff18def05c971e5,1 +np.float64,0xffeb6d565776daac,0x0,1 +np.float64,0x7fd5ca5df8ab94bb,0x7ff0000000000000,1 +np.float64,0xbfe33de4fde67bca,0x3fe517d0e212cd1c,1 +np.float64,0xbfd1c738e5a38e72,0x3fea6539e9491693,1 +np.float64,0xbfec1d8c33b83b18,0x3fe16790fbca0c65,1 +np.float64,0xbfeecb464b7d968d,0x3fe06c67e2aefa55,1 +np.float64,0xbfd621dbf1ac43b8,0x3fe92dfa32d93846,1 +np.float64,0x80069a02860d3406,0x3ff0000000000000,1 +np.float64,0xbfe84f650e309eca,0x3fe2e661300f1975,1 +np.float64,0x7fc1d2cec523a59d,0x7ff0000000000000,1 +np.float64,0x3fd7706d79aee0db,0x3ff49fb033353dfe,1 +np.float64,0xffd94ba458329748,0x0,1 +np.float64,0x7fea98ba1a753173,0x7ff0000000000000,1 +np.float64,0xbfe756ba092ead74,0x3fe34d428d1857bc,1 +np.float64,0xffecfbd836b9f7b0,0x0,1 +np.float64,0x3fd211fbe5a423f8,0x3ff375711a3641e0,1 +np.float64,0x7fee24f7793c49ee,0x7ff0000000000000,1 +np.float64,0x7fe6a098886d4130,0x7ff0000000000000,1 +np.float64,0xbfd4ade909a95bd2,0x3fe99436524db1f4,1 +np.float64,0xbfeb704e6476e09d,0x3fe1a95be4a21bc6,1 +np.float64,0xffefc0f6627f81ec,0x0,1 +np.float64,0x7feff3f896ffe7f0,0x7ff0000000000000,1 +np.float64,0xa3f74edb47eea,0x3ff0000000000000,1 +np.float64,0xbfe0a551cf214aa4,0x3fe65027a7ff42e3,1 +np.float64,0x3fe164b23622c964,0x3ff7521c6225f51d,1 +np.float64,0x7fc258752324b0e9,0x7ff0000000000000,1 +np.float64,0x4739b3348e737,0x3ff0000000000000,1 +np.float64,0xb0392b1d60726,0x3ff0000000000000,1 +np.float64,0x7fe26f42e5e4de85,0x7ff0000000000000,1 +np.float64,0x8004601f87e8c040,0x3ff0000000000000,1 +np.float64,0xffe92ce37b3259c6,0x0,1 +np.float64,0x3fe620da3a6c41b4,0x3ff9d6ee3d005466,1 +np.float64,0x3fd850cfa2b0a1a0,0x3ff4d20bd249d411,1 +np.float64,0xffdcdfdfb5b9bfc0,0x0,1 +np.float64,0x800390297d672054,0x3ff0000000000000,1 +np.float64,0x3fde5864f6bcb0ca,0x3ff639bb9321f5ef,1 +np.float64,0x3fee484cec7c909a,0x3ffed4d2c6274219,1 +np.float64,0x7fe9b9a064b37340,0x7ff0000000000000,1 +np.float64,0xffe50028b8aa0051,0x0,1 +np.float64,0x3fe37774ade6eee9,0x3ff864558498a9a8,1 +np.float64,0x7fef83c724bf078d,0x7ff0000000000000,1 +np.float64,0xbfeb58450fb6b08a,0x3fe1b290556be73d,1 +np.float64,0x7fd7161475ae2c28,0x7ff0000000000000,1 +np.float64,0x3fece09621f9c12c,0x3ffde836a583bbdd,1 +np.float64,0x3fd045790ea08af2,0x3ff31554778fd4e2,1 +np.float64,0xbfe7c7dd6cef8fbb,0x3fe31e2eeda857fc,1 +np.float64,0xffe9632f5372c65e,0x0,1 +np.float64,0x800d4f3a703a9e75,0x3ff0000000000000,1 +np.float64,0xffea880e4df5101c,0x0,1 +np.float64,0xbfeb7edc4ff6fdb8,0x3fe1a3cb5dc33594,1 +np.float64,0xbfcaae4bab355c98,0x3febb1ee65e16b58,1 +np.float64,0xbfde598a19bcb314,0x3fe709145eafaaf8,1 +np.float64,0x3feefb6d78fdf6db,0x3fff4d5c8c68e39a,1 +np.float64,0x13efc75427dfa,0x3ff0000000000000,1 +np.float64,0xffe26f65c064decb,0x0,1 +np.float64,0xbfed5c1addfab836,0x3fe0f1133bd2189a,1 +np.float64,0x7fe7a7cf756f4f9e,0x7ff0000000000000,1 +np.float64,0xffc681702e2d02e0,0x0,1 +np.float64,0x8003d6ab5067ad57,0x3ff0000000000000,1 +np.float64,0xffa695f1342d2be0,0x0,1 +np.float64,0xbfcf8857db3f10b0,0x3feafa14da8c29a4,1 +np.float64,0xbfe8ca06be71940e,0x3fe2b46f6d2c64b4,1 +np.float64,0x3451c74468a3a,0x3ff0000000000000,1 +np.float64,0x3fde47d5f6bc8fac,0x3ff635bf8e024716,1 +np.float64,0xffda159d5db42b3a,0x0,1 +np.float64,0x7fef9fecaa3f3fd8,0x7ff0000000000000,1 +np.float64,0x3fd4e745e3a9ce8c,0x3ff410a9cb6fd8bf,1 +np.float64,0xffef57019b3eae02,0x0,1 +np.float64,0xbfe6604f4f6cc09e,0x3fe3b55de43c626d,1 +np.float64,0xffe066a424a0cd48,0x0,1 +np.float64,0x3fd547de85aa8fbc,0x3ff425b2a7a16675,1 +np.float64,0xffb3c69280278d28,0x0,1 +np.float64,0xffebe0b759f7c16e,0x0,1 +np.float64,0x3fefc84106ff9082,0x3fffd973687337d8,1 +np.float64,0x501c42a4a0389,0x3ff0000000000000,1 +np.float64,0x7feb45d13eb68ba1,0x7ff0000000000000,1 +np.float64,0xbfb16a8c2e22d518,0x3fee86a9c0f9291a,1 +np.float64,0x3be327b877c66,0x3ff0000000000000,1 +np.float64,0x7fe4a58220694b03,0x7ff0000000000000,1 +np.float64,0x3fe0286220a050c4,0x3ff6b472157ab8f2,1 +np.float64,0x3fc9381825327030,0x3ff2575fbea2bf5d,1 +np.float64,0xbfd1af7ee8a35efe,0x3fea6c032cf7e669,1 +np.float64,0xbfea9b0f39b5361e,0x3fe1fbae14b40b4d,1 +np.float64,0x39efe4aa73dfd,0x3ff0000000000000,1 +np.float64,0xffeb06fdc8360dfb,0x0,1 +np.float64,0xbfda481e72b4903c,0x3fe812b4b08d4884,1 +np.float64,0xbfd414ba5ba82974,0x3fe9bec9474bdfe6,1 +np.float64,0x7fe707177b6e0e2e,0x7ff0000000000000,1 +np.float64,0x8000000000000001,0x3ff0000000000000,1 +np.float64,0xbfede6a75bbbcd4f,0x3fe0be874cccd399,1 +np.float64,0x8006cdb577cd9b6c,0x3ff0000000000000,1 +np.float64,0x800051374f20a26f,0x3ff0000000000000,1 +np.float64,0x3fe5cba8c96b9752,0x3ff9a76b3adcc122,1 +np.float64,0xbfee3933487c7267,0x3fe0a0b190f9609a,1 +np.float64,0x3fd574b8d8aae970,0x3ff42f7e83de1af9,1 +np.float64,0xba5db72b74bb7,0x3ff0000000000000,1 +np.float64,0x3fa9bf512c337ea0,0x3ff0914a7f743a94,1 +np.float64,0xffe8cb736c3196e6,0x0,1 +np.float64,0x3761b2f06ec37,0x3ff0000000000000,1 +np.float64,0x8b4d4433169a9,0x3ff0000000000000,1 +np.float64,0x800f0245503e048b,0x3ff0000000000000,1 +np.float64,0x7fb20d54ac241aa8,0x7ff0000000000000,1 +np.float64,0x3fdf26666b3e4ccd,0x3ff66b8995142017,1 +np.float64,0xbfcbf2a83737e550,0x3feb8173a7b9d6b5,1 +np.float64,0x3fd31572a0a62ae5,0x3ff3ac6c94313dcd,1 +np.float64,0x7fb6c2807a2d8500,0x7ff0000000000000,1 +np.float64,0x800799758f2f32ec,0x3ff0000000000000,1 +np.float64,0xe72f1f6bce5e4,0x3ff0000000000000,1 +np.float64,0x3fe0e0f223a1c1e4,0x3ff70fed5b761673,1 +np.float64,0x3fe6d4f133eda9e2,0x3ffa3c8000c169eb,1 +np.float64,0xbfe1ccc3d8639988,0x3fe5c32148bedbda,1 +np.float64,0x3fea71c53574e38a,0x3ffc5f31201fe9be,1 +np.float64,0x9e0323eb3c065,0x3ff0000000000000,1 +np.float64,0x8005cc79a5cb98f4,0x3ff0000000000000,1 +np.float64,0x1dace1f83b59d,0x3ff0000000000000,1 +np.float64,0x10000000000000,0x3ff0000000000000,1 +np.float64,0xbfdef50830bdea10,0x3fe6e269fc17ebef,1 +np.float64,0x8010000000000000,0x3ff0000000000000,1 +np.float64,0xbfdfa82192bf5044,0x3fe6b6313ee0a095,1 +np.float64,0x3fd9398fe2b27320,0x3ff506ca2093c060,1 +np.float64,0x8002721fe664e441,0x3ff0000000000000,1 +np.float64,0x800c04166ad8082d,0x3ff0000000000000,1 +np.float64,0xffec3918b3387230,0x0,1 +np.float64,0x3fec62d5dfb8c5ac,0x3ffd972ea4a54b32,1 +np.float64,0x3fe7e42a0b6fc854,0x3ffad86b0443181d,1 +np.float64,0x3fc0aff5f3215fec,0x3ff1836058d4d210,1 +np.float64,0xbf82ff68a025fec0,0x3fefcb7f06862dce,1 +np.float64,0xae2e35195c5c7,0x3ff0000000000000,1 +np.float64,0x3fece3bddf79c77c,0x3ffdea41fb1ba8fa,1 +np.float64,0xbfa97b947832f730,0x3feeea34ebedbbd2,1 +np.float64,0xbfdfb1b1ce3f6364,0x3fe6b3d72871335c,1 +np.float64,0xbfe61a4f24ac349e,0x3fe3d356bf991b06,1 +np.float64,0x7fe23117a5e4622e,0x7ff0000000000000,1 +np.float64,0x800552a8cccaa552,0x3ff0000000000000,1 +np.float64,0x625b4d0ac4b6a,0x3ff0000000000000,1 +np.float64,0x3f86cf15702d9e00,0x3ff01fbe0381676d,1 +np.float64,0x800d7d1b685afa37,0x3ff0000000000000,1 +np.float64,0x3fe2cb6e40a596dd,0x3ff80a1a562f7fc9,1 +np.float64,0x3fe756eb8e2eadd7,0x3ffa86c638aad07d,1 +np.float64,0x800dc9a5513b934b,0x3ff0000000000000,1 +np.float64,0xbfbbdd118a37ba20,0x3fedacb4624f3cee,1 +np.float64,0x800de01f8efbc03f,0x3ff0000000000000,1 +np.float64,0x800da1a3fe9b4348,0x3ff0000000000000,1 +np.float64,0xbf87d8c7602fb180,0x3fefbe2614998ab6,1 +np.float64,0xbfdfff6141bffec2,0x3fe6a0c54d9f1bc8,1 +np.float64,0xee8fbba5dd1f8,0x3ff0000000000000,1 +np.float64,0x3fe79dc93e6f3b92,0x3ffaaf9d7d955b2c,1 +np.float64,0xffedd4b3d07ba967,0x0,1 +np.float64,0x800905dfc1720bc0,0x3ff0000000000000,1 +np.float64,0x3fd9e483b8b3c907,0x3ff52ddc6c950e7f,1 +np.float64,0xe34ffefdc6a00,0x3ff0000000000000,1 +np.float64,0x2168e62242d1e,0x3ff0000000000000,1 +np.float64,0x800349950e26932b,0x3ff0000000000000,1 +np.float64,0x7fc50da8532a1b50,0x7ff0000000000000,1 +np.float64,0xae1a4d115c34a,0x3ff0000000000000,1 +np.float64,0xa020f0b74041e,0x3ff0000000000000,1 +np.float64,0x3fd2aa2f77a5545f,0x3ff3959f09519a25,1 +np.float64,0x3fbfefc3223fdf86,0x3ff171f3df2d408b,1 +np.float64,0xbfea9fc340b53f86,0x3fe1f9d92b712654,1 +np.float64,0xffe9b920a5337240,0x0,1 +np.float64,0xbfe2eb0265e5d605,0x3fe53dd195782de3,1 +np.float64,0x7fb932c70e32658d,0x7ff0000000000000,1 +np.float64,0x3fda816bfcb502d8,0x3ff551f8d5c84c82,1 +np.float64,0x3fed68cbe9fad198,0x3ffe40f6692d5693,1 +np.float64,0x32df077665be2,0x3ff0000000000000,1 +np.float64,0x7fdc9c2f3539385d,0x7ff0000000000000,1 +np.float64,0x7fe71091a2ee2122,0x7ff0000000000000,1 +np.float64,0xbfe68106c46d020e,0x3fe3a76b56024c2c,1 +np.float64,0xffcf0572823e0ae4,0x0,1 +np.float64,0xbfeeab341fbd5668,0x3fe077d496941cda,1 +np.float64,0x7fe7ada0d2af5b41,0x7ff0000000000000,1 +np.float64,0xffacdef2a439bde0,0x0,1 +np.float64,0x3fe4200f3128401e,0x3ff8be0ddf30fd1e,1 +np.float64,0xffd9022a69320454,0x0,1 +np.float64,0xbfe8e06914f1c0d2,0x3fe2ab5fe7fffb5a,1 +np.float64,0x3fc4b976602972ed,0x3ff1e6786fa7a890,1 +np.float64,0xbfd784c105af0982,0x3fe8cdeb1cdbd57e,1 +np.float64,0x7feb20a20eb64143,0x7ff0000000000000,1 +np.float64,0xbfc87dd83630fbb0,0x3fec067c1e7e6983,1 +np.float64,0x7fe5400cbe6a8018,0x7ff0000000000000,1 +np.float64,0xbfb4a1f5e22943e8,0x3fee42e6c81559a9,1 +np.float64,0x3fe967c575f2cf8a,0x3ffbbd8bc0d5c50d,1 +np.float64,0xbfeb059cf4760b3a,0x3fe1d25c592c4dab,1 +np.float64,0xbfeef536d5bdea6e,0x3fe05d832c15c64a,1 +np.float64,0x3fa90b3f6432167f,0x3ff08d410dd732cc,1 +np.float64,0xbfeaff265e75fe4d,0x3fe1d4db3fb3208d,1 +np.float64,0x6d93d688db27b,0x3ff0000000000000,1 +np.float64,0x800ab9b4ea55736a,0x3ff0000000000000,1 +np.float64,0x3fd444b39d288967,0x3ff3ed749d48d444,1 +np.float64,0xbfd5f2c0d0abe582,0x3fe93ad6124d88e7,1 +np.float64,0x3fea8fd915f51fb2,0x3ffc71b32cb92d60,1 +np.float64,0xbfd23d6491a47aca,0x3fea43875709b0f0,1 +np.float64,0xffe76f75ce6edeeb,0x0,1 +np.float64,0x1f5670da3eacf,0x3ff0000000000000,1 +np.float64,0x8000d89c9621b13a,0x3ff0000000000000,1 +np.float64,0x3fedb51c52bb6a39,0x3ffe732279c228ff,1 +np.float64,0x7f99215ac83242b5,0x7ff0000000000000,1 +np.float64,0x742a6864e854e,0x3ff0000000000000,1 +np.float64,0xbfe02fb340205f66,0x3fe689495f9164e3,1 +np.float64,0x7fef4c12b0fe9824,0x7ff0000000000000,1 +np.float64,0x3fd40e17c2a81c30,0x3ff3e1aee8ed972f,1 +np.float64,0x7fdcd264e939a4c9,0x7ff0000000000000,1 +np.float64,0x3fdb675838b6ceb0,0x3ff587526241c550,1 +np.float64,0x3fdf1a4081be3480,0x3ff66896a18c2385,1 +np.float64,0xbfea5082b874a106,0x3fe218cf8f11be13,1 +np.float64,0xffe1a0ebf7e341d8,0x0,1 +np.float64,0x3fed0a2222ba1444,0x3ffe032ce928ae7d,1 +np.float64,0xffeae036da75c06d,0x0,1 +np.float64,0x5b05fc8ab60c0,0x3ff0000000000000,1 +np.float64,0x7fd8aae5f03155cb,0x7ff0000000000000,1 +np.float64,0xbfd0b4d9fda169b4,0x3feab41e58b6ccb7,1 +np.float64,0xffdcaffa57395ff4,0x0,1 +np.float64,0xbfcbf1455437e28c,0x3feb81a884182c5d,1 +np.float64,0x3f9d6700b83ace01,0x3ff0525657db35d4,1 +np.float64,0x4fd5b0b29fab7,0x3ff0000000000000,1 +np.float64,0x3fe9af2df5b35e5c,0x3ffbe895684df916,1 +np.float64,0x800dfd41f9dbfa84,0x3ff0000000000000,1 +np.float64,0xbf2a30457e546,0x3ff0000000000000,1 +np.float64,0x7fc6be37182d7c6d,0x7ff0000000000000,1 +np.float64,0x800e0f9788dc1f2f,0x3ff0000000000000,1 +np.float64,0x8006890c704d121a,0x3ff0000000000000,1 +np.float64,0xffecb1a7cbb9634f,0x0,1 +np.float64,0xffb35c330426b868,0x0,1 +np.float64,0x7fe8f2ba8a71e574,0x7ff0000000000000,1 +np.float64,0xf3ccff8fe79a0,0x3ff0000000000000,1 +np.float64,0x3fdf19a84e3e3351,0x3ff66871b17474c1,1 +np.float64,0x80049a662d0934cd,0x3ff0000000000000,1 +np.float64,0xdf5bb4bbbeb77,0x3ff0000000000000,1 +np.float64,0x8005eca030cbd941,0x3ff0000000000000,1 +np.float64,0xffe5f239586be472,0x0,1 +np.float64,0xbfc4526a0728a4d4,0x3fecaa52fbf5345e,1 +np.float64,0xbfe8f1ecda31e3da,0x3fe2a44c080848b3,1 +np.float64,0x3feebd32f4bd7a66,0x3fff234788938c3e,1 +np.float64,0xffd6ca04e9ad940a,0x0,1 +np.float64,0x7ff0000000000000,0x7ff0000000000000,1 +np.float64,0xbfd4c560a9a98ac2,0x3fe98db6d97442fc,1 +np.float64,0x8005723471cae46a,0x3ff0000000000000,1 +np.float64,0xbfeb278299764f05,0x3fe1c54b48f8ba4b,1 +np.float64,0x8007907b376f20f7,0x3ff0000000000000,1 +np.float64,0x7fe9c2fd01b385f9,0x7ff0000000000000,1 +np.float64,0x7fdaa37368b546e6,0x7ff0000000000000,1 +np.float64,0xbfe6d0f3786da1e7,0x3fe38582271cada7,1 +np.float64,0xbfea9b77823536ef,0x3fe1fb8575cd1b7d,1 +np.float64,0xbfe90ac38bf21587,0x3fe29a471b47a2e8,1 +np.float64,0xbfe9c51844738a30,0x3fe24fc8de03ea84,1 +np.float64,0x3fe45a9013a8b520,0x3ff8dd7c80f1cf75,1 +np.float64,0xbfe5780551eaf00a,0x3fe419832a6a4c56,1 +np.float64,0xffefffffffffffff,0x0,1 +np.float64,0x7fe3778c84a6ef18,0x7ff0000000000000,1 +np.float64,0xbfdc8a60413914c0,0x3fe77dc55b85028f,1 +np.float64,0xef47ae2fde8f6,0x3ff0000000000000,1 +np.float64,0x8001269fa4c24d40,0x3ff0000000000000,1 +np.float64,0x3fe9d2d39e73a5a7,0x3ffbfe2a66c4148e,1 +np.float64,0xffee61f528fcc3e9,0x0,1 +np.float64,0x3fe8a259ab7144b3,0x3ffb47e797a34bd2,1 +np.float64,0x3f906d610820dac0,0x3ff02dccda8e1a75,1 +np.float64,0x3fe70739f32e0e74,0x3ffa59232f4fcd07,1 +np.float64,0x3fe6b7f5e6ad6fec,0x3ffa2c0cc54f2c16,1 +np.float64,0x95a91a792b524,0x3ff0000000000000,1 +np.float64,0xbfedf6fcf57bedfa,0x3fe0b89bb40081cc,1 +np.float64,0xbfa4d2de9c29a5c0,0x3fef1c485678d657,1 +np.float64,0x3fe130470d22608e,0x3ff737b0be409a38,1 +np.float64,0x3fcf8035423f006b,0x3ff2f9d7c3c6a302,1 +np.float64,0xffe5995a3eab32b4,0x0,1 +np.float64,0xffca68c63034d18c,0x0,1 +np.float64,0xff9d53af903aa760,0x0,1 +np.float64,0x800563f1de6ac7e4,0x3ff0000000000000,1 +np.float64,0x7fce284fa63c509e,0x7ff0000000000000,1 +np.float64,0x7fb2a3959a25472a,0x7ff0000000000000,1 +np.float64,0x7fdbe2652f37c4c9,0x7ff0000000000000,1 +np.float64,0x800d705bbc1ae0b8,0x3ff0000000000000,1 +np.float64,0x7fd9bd2347b37a46,0x7ff0000000000000,1 +np.float64,0x3fcac3c0fb358782,0x3ff27ed62d6c8221,1 +np.float64,0x800110691ec220d3,0x3ff0000000000000,1 +np.float64,0x3fef79a8157ef350,0x3fffa368513eb909,1 +np.float64,0x7fe8bd2f0e317a5d,0x7ff0000000000000,1 +np.float64,0x7fd3040e60a6081c,0x7ff0000000000000,1 +np.float64,0xffea50723234a0e4,0x0,1 +np.float64,0xbfe6220054ac4400,0x3fe3d00961238a93,1 +np.float64,0x3f9eddd8c83dbbc0,0x3ff0567b0c73005a,1 +np.float64,0xbfa4a062c42940c0,0x3fef1e68badde324,1 +np.float64,0xbfd077ad4720ef5a,0x3feac5d577581d07,1 +np.float64,0x7fdfd4b025bfa95f,0x7ff0000000000000,1 +np.float64,0xd00d3cf3a01a8,0x3ff0000000000000,1 +np.float64,0x7fe3010427260207,0x7ff0000000000000,1 +np.float64,0x22ea196645d44,0x3ff0000000000000,1 +np.float64,0x7fd747e8cd2e8fd1,0x7ff0000000000000,1 +np.float64,0xd50665e7aa0cd,0x3ff0000000000000,1 +np.float64,0x7fe1da580ae3b4af,0x7ff0000000000000,1 +np.float64,0xffeb218ecfb6431d,0x0,1 +np.float64,0xbf887d0dd030fa00,0x3fefbc6252c8b354,1 +np.float64,0x3fcaa31067354621,0x3ff27b904c07e07f,1 +np.float64,0x7fe698cc4ded3198,0x7ff0000000000000,1 +np.float64,0x1c40191a38804,0x3ff0000000000000,1 +np.float64,0x80086fd20e30dfa4,0x3ff0000000000000,1 +np.float64,0x7fed34d5eaba69ab,0x7ff0000000000000,1 +np.float64,0xffd00b52622016a4,0x0,1 +np.float64,0x3f80abcdb021579b,0x3ff0172d27945851,1 +np.float64,0x3fe614cfd66c29a0,0x3ff9d031e1839191,1 +np.float64,0x80021d71c8843ae4,0x3ff0000000000000,1 +np.float64,0x800bc2adc657855c,0x3ff0000000000000,1 +np.float64,0x6b9fec1cd73fe,0x3ff0000000000000,1 +np.float64,0xffd9093b5f321276,0x0,1 +np.float64,0x800d3c6c77fa78d9,0x3ff0000000000000,1 +np.float64,0xffe80fc1cbf01f83,0x0,1 +np.float64,0xffbffbaf2a3ff760,0x0,1 +np.float64,0x3fea1ed29eb43da5,0x3ffc2c64ec0e17a3,1 +np.float64,0x7ff4000000000000,0x7ffc000000000000,1 +np.float64,0x3fd944a052328941,0x3ff5094f4c43ecca,1 +np.float64,0x800b1f9416163f29,0x3ff0000000000000,1 +np.float64,0x800f06bf33de0d7e,0x3ff0000000000000,1 +np.float64,0xbfdbf0d226b7e1a4,0x3fe7a4f73793d95b,1 +np.float64,0xffe7306c30ae60d8,0x0,1 +np.float64,0x7fe991accfb32359,0x7ff0000000000000,1 +np.float64,0x3fcc0040d2380082,0x3ff29ea47e4f07d4,1 +np.float64,0x7fefffffffffffff,0x7ff0000000000000,1 +np.float64,0x0,0x3ff0000000000000,1 +np.float64,0x3fe1423f7be2847e,0x3ff740bc1d3b20f8,1 +np.float64,0xbfeae3a3cab5c748,0x3fe1df7e936f8504,1 +np.float64,0x800b2da7d6165b50,0x3ff0000000000000,1 +np.float64,0x800b2404fcd6480a,0x3ff0000000000000,1 +np.float64,0x6fcbcf88df97b,0x3ff0000000000000,1 +np.float64,0xa248c0e14492,0x3ff0000000000000,1 +np.float64,0xffd255776824aaee,0x0,1 +np.float64,0x80057b3effeaf67f,0x3ff0000000000000,1 +np.float64,0x3feb0b07d7761610,0x3ffcbdfe1be5a594,1 +np.float64,0x924e1019249c2,0x3ff0000000000000,1 +np.float64,0x80074307e80e8611,0x3ff0000000000000,1 +np.float64,0xffb207fa46240ff8,0x0,1 +np.float64,0x95ac388d2b587,0x3ff0000000000000,1 +np.float64,0xbff0000000000000,0x3fe0000000000000,1 +np.float64,0x3fd38b6a492716d5,0x3ff3c59f62b5add5,1 +np.float64,0x7fe49362c3e926c5,0x7ff0000000000000,1 +np.float64,0x7fe842889db08510,0x7ff0000000000000,1 +np.float64,0xbfba6003e834c008,0x3fedcb620a2d9856,1 +np.float64,0xffe7e782bd6fcf05,0x0,1 +np.float64,0x7fd9b93d9433727a,0x7ff0000000000000,1 +np.float64,0x7fc8fcb61d31f96b,0x7ff0000000000000,1 +np.float64,0xbfef9be8db3f37d2,0x3fe022d603b81dc2,1 +np.float64,0x6f4fc766de9fa,0x3ff0000000000000,1 +np.float64,0xbfe93016f132602e,0x3fe28b42d782d949,1 +np.float64,0x3fe10e52b8e21ca5,0x3ff726a38b0bb895,1 +np.float64,0x3fbbba0ae6377416,0x3ff13f56084a9da3,1 +np.float64,0x3fe09e42ece13c86,0x3ff6eeb57e775e24,1 +np.float64,0x800942e39fb285c8,0x3ff0000000000000,1 +np.float64,0xffe5964370eb2c86,0x0,1 +np.float64,0x3fde479f32bc8f3e,0x3ff635b2619ba53a,1 +np.float64,0x3fe826e187f04dc3,0x3ffaff52b79c3a08,1 +np.float64,0x3febcbf1eab797e4,0x3ffd37152e5e2598,1 +np.float64,0x3fa0816a202102d4,0x3ff05c8e6a8b00d5,1 +np.float64,0xbd005ccb7a00c,0x3ff0000000000000,1 +np.float64,0x44c12fdc89827,0x3ff0000000000000,1 +np.float64,0xffc8fdffa431fc00,0x0,1 +np.float64,0xffeb4f5a87b69eb4,0x0,1 +np.float64,0xbfb07e7f8420fd00,0x3fee9a32924fe6a0,1 +np.float64,0xbfbd9d1bb63b3a38,0x3fed88ca81e5771c,1 +np.float64,0x8008682a74f0d055,0x3ff0000000000000,1 +np.float64,0x3fdeedbc7b3ddb79,0x3ff65dcb7c55f4dc,1 +np.float64,0x8009e889c613d114,0x3ff0000000000000,1 +np.float64,0x3faea831f43d5064,0x3ff0ad935e890e49,1 +np.float64,0xf0af1703e15e3,0x3ff0000000000000,1 +np.float64,0xffec06c4a5f80d88,0x0,1 +np.float64,0x53a1cc0ca743a,0x3ff0000000000000,1 +np.float64,0x7fd10c9eea22193d,0x7ff0000000000000,1 +np.float64,0xbfd48a6bf0a914d8,0x3fe99e0d109f2bac,1 +np.float64,0x3fd6dfe931adbfd4,0x3ff47f81c2dfc5d3,1 +np.float64,0x3fed20e86b7a41d0,0x3ffe11fecc7bc686,1 +np.float64,0xbfea586818b4b0d0,0x3fe215b7747d5cb8,1 +np.float64,0xbfd4ad3e20295a7c,0x3fe99465ab8c3275,1 +np.float64,0x3fd6619ee4acc33e,0x3ff4638b7b80c08a,1 +np.float64,0x3fdf6fcb63bedf97,0x3ff67d62fd3d560c,1 +np.float64,0x800a9191e7152324,0x3ff0000000000000,1 +np.float64,0x3fd2ff3c0da5fe78,0x3ff3a7b17e892a28,1 +np.float64,0x8003dbf1f327b7e5,0x3ff0000000000000,1 +np.float64,0xffea6b89a934d712,0x0,1 +np.float64,0x7fcfb879043f70f1,0x7ff0000000000000,1 +np.float64,0xea6a84dbd4d51,0x3ff0000000000000,1 +np.float64,0x800ec97a815d92f5,0x3ff0000000000000,1 +np.float64,0xffe304c3a8660987,0x0,1 +np.float64,0xbfefe24dd3ffc49c,0x3fe00a4e065be96d,1 +np.float64,0xffd3cc8c00a79918,0x0,1 +np.float64,0x95be8b7b2b7d2,0x3ff0000000000000,1 +np.float64,0x7fe20570cba40ae1,0x7ff0000000000000,1 +np.float64,0x7f97a06da02f40da,0x7ff0000000000000,1 +np.float64,0xffe702b9522e0572,0x0,1 +np.float64,0x3fada2d8543b45b1,0x3ff0a7adc4201e08,1 +np.float64,0x235e6acc46bce,0x3ff0000000000000,1 +np.float64,0x3fea6bc28ef4d786,0x3ffc5b7fc68fddac,1 +np.float64,0xffdbc9f505b793ea,0x0,1 +np.float64,0xffe98b137ff31626,0x0,1 +np.float64,0x800e26c6721c4d8d,0x3ff0000000000000,1 +np.float64,0x80080de445301bc9,0x3ff0000000000000,1 +np.float64,0x37e504a86fca1,0x3ff0000000000000,1 +np.float64,0x8002f5f60325ebed,0x3ff0000000000000,1 +np.float64,0x5c8772feb90ef,0x3ff0000000000000,1 +np.float64,0xbfe021abb4604358,0x3fe69023a51d22b8,1 +np.float64,0x3fde744f8fbce8a0,0x3ff64074dc84edd7,1 +np.float64,0xbfdd92899f3b2514,0x3fe73aefd9701858,1 +np.float64,0x7fc1ad5c51235ab8,0x7ff0000000000000,1 +np.float64,0xaae2f98955c5f,0x3ff0000000000000,1 +np.float64,0x7f9123d5782247aa,0x7ff0000000000000,1 +np.float64,0xbfe3f8e94b67f1d2,0x3fe4c30ab28e9cb7,1 +np.float64,0x7fdaba8b4cb57516,0x7ff0000000000000,1 +np.float64,0x7fefc85cfeff90b9,0x7ff0000000000000,1 +np.float64,0xffb83b4f523076a0,0x0,1 +np.float64,0xbfe888a68c71114d,0x3fe2ceff17c203d1,1 +np.float64,0x800de1dac4bbc3b6,0x3ff0000000000000,1 +np.float64,0xbfe4f27f09e9e4fe,0x3fe453f9af407eac,1 +np.float64,0xffe3d2713467a4e2,0x0,1 +np.float64,0xbfebaab840375570,0x3fe1931131b98842,1 +np.float64,0x93892a1b27126,0x3ff0000000000000,1 +np.float64,0x1e8e7f983d1d1,0x3ff0000000000000,1 +np.float64,0x3fecc950627992a0,0x3ffdd926f036add0,1 +np.float64,0xbfd41dfb1aa83bf6,0x3fe9bc34ece35b94,1 +np.float64,0x800aebfc6555d7f9,0x3ff0000000000000,1 +np.float64,0x7fe33ba52ca67749,0x7ff0000000000000,1 +np.float64,0xffe57c9b3feaf936,0x0,1 +np.float64,0x3fdd12464fba248c,0x3ff5ebc5598e6bd0,1 +np.float64,0xffe06d7f0fe0dafe,0x0,1 +np.float64,0x800e55b7fe9cab70,0x3ff0000000000000,1 +np.float64,0x3fd33803c8267008,0x3ff3b3cb78b2d642,1 +np.float64,0xe9cab8a1d3957,0x3ff0000000000000,1 +np.float64,0x3fb38ac166271580,0x3ff0de906947c0f0,1 +np.float64,0xbfd67aa552acf54a,0x3fe915cf64a389fd,1 +np.float64,0x1db96daa3b72f,0x3ff0000000000000,1 +np.float64,0xbfee9f08f4fd3e12,0x3fe07c2c615add3c,1 +np.float64,0xf14f6d65e29ee,0x3ff0000000000000,1 +np.float64,0x800bce089e179c12,0x3ff0000000000000,1 +np.float64,0xffc42dcc37285b98,0x0,1 +np.float64,0x7fd5f37063abe6e0,0x7ff0000000000000,1 +np.float64,0xbfd943c2cbb28786,0x3fe856f6452ec753,1 +np.float64,0x8ddfbc091bbf8,0x3ff0000000000000,1 +np.float64,0xbfe153491e22a692,0x3fe5fcb075dbbd5d,1 +np.float64,0xffe7933999ef2672,0x0,1 +np.float64,0x7ff8000000000000,0x7ff8000000000000,1 +np.float64,0x8000000000000000,0x3ff0000000000000,1 +np.float64,0xbfe9154580b22a8b,0x3fe2960bac3a8220,1 +np.float64,0x800dc6dda21b8dbb,0x3ff0000000000000,1 +np.float64,0xbfb26225a824c448,0x3fee7239a457df81,1 +np.float64,0xbfd7b68c83af6d1a,0x3fe8c08e351ab468,1 +np.float64,0xffde01f7213c03ee,0x0,1 +np.float64,0x3fe54cbe0faa997c,0x3ff9614527191d72,1 +np.float64,0xbfd6bec3732d7d86,0x3fe90354909493de,1 +np.float64,0xbfef3c85bd7e790b,0x3fe0444f8c489ca6,1 +np.float64,0x899501b7132a0,0x3ff0000000000000,1 +np.float64,0xbfe17a456462f48b,0x3fe5ea2719a9a84b,1 +np.float64,0xffe34003b8668007,0x0,1 +np.float64,0x7feff6a3633fed46,0x7ff0000000000000,1 +np.float64,0x3fba597ecc34b2fe,0x3ff12ee72e4de474,1 +np.float64,0x4084c7b68109a,0x3ff0000000000000,1 +np.float64,0x3fad23bf4c3a4780,0x3ff0a4d06193ff6d,1 +np.float64,0xffd0fe2707a1fc4e,0x0,1 +np.float64,0xb96cb43f72d97,0x3ff0000000000000,1 +np.float64,0x7fc4d684d829ad09,0x7ff0000000000000,1 +np.float64,0x7fdc349226b86923,0x7ff0000000000000,1 +np.float64,0x7fd82851cd3050a3,0x7ff0000000000000,1 +np.float64,0x800cde0041b9bc01,0x3ff0000000000000,1 +np.float64,0x4e8caa1e9d196,0x3ff0000000000000,1 +np.float64,0xbfed06a6d2fa0d4e,0x3fe1108c3682b05a,1 +np.float64,0xffe8908122312102,0x0,1 +np.float64,0xffe56ed6d9aaddad,0x0,1 +np.float64,0x3fedd6db00fbadb6,0x3ffe896c68c4b26e,1 +np.float64,0x3fde31f9b4bc63f4,0x3ff6307e08f8b6ba,1 +np.float64,0x6bb963c2d772d,0x3ff0000000000000,1 +np.float64,0x787b7142f0f6f,0x3ff0000000000000,1 +np.float64,0x3fe6e4147c6dc829,0x3ffa451bbdece240,1 +np.float64,0x8003857401470ae9,0x3ff0000000000000,1 +np.float64,0xbfeae82c3c75d058,0x3fe1ddbd66e65aab,1 +np.float64,0x7fe174707c62e8e0,0x7ff0000000000000,1 +np.float64,0x80008d2545e11a4b,0x3ff0000000000000,1 +np.float64,0xbfecc2dce17985ba,0x3fe129ad4325985a,1 +np.float64,0xbfe1fa1daf63f43c,0x3fe5adcb0731a44b,1 +np.float64,0x7fcf2530203e4a5f,0x7ff0000000000000,1 +np.float64,0xbfea5cefe874b9e0,0x3fe213f134b61f4a,1 +np.float64,0x800103729f2206e6,0x3ff0000000000000,1 +np.float64,0xbfe8442ff7708860,0x3fe2eaf850faa169,1 +np.float64,0x8006c78e19ed8f1d,0x3ff0000000000000,1 +np.float64,0x3fc259589c24b2b1,0x3ff1abe6a4d28816,1 +np.float64,0xffed02b7b5ba056e,0x0,1 +np.float64,0xbfce0aa4fe3c1548,0x3feb32115d92103e,1 +np.float64,0x7fec06e78bf80dce,0x7ff0000000000000,1 +np.float64,0xbfe0960bbc612c18,0x3fe6578ab29b70d4,1 +np.float64,0x3fee45841cbc8b08,0x3ffed2f6ca808ad3,1 +np.float64,0xbfeb0f8ebef61f1e,0x3fe1ce86003044cd,1 +np.float64,0x8002c357358586af,0x3ff0000000000000,1 +np.float64,0x3fe9aa10cc735422,0x3ffbe57e294ce68b,1 +np.float64,0x800256c0a544ad82,0x3ff0000000000000,1 +np.float64,0x4de6e1449bcdd,0x3ff0000000000000,1 +np.float64,0x65e9bc9ccbd38,0x3ff0000000000000,1 +np.float64,0xbfe53b0fa9aa7620,0x3fe4341f0aa29bbc,1 +np.float64,0xbfcdd94cd13bb298,0x3feb3956acd2e2dd,1 +np.float64,0x8004a49b65a94938,0x3ff0000000000000,1 +np.float64,0x800d3d05deba7a0c,0x3ff0000000000000,1 +np.float64,0x3fe4e05bce69c0b8,0x3ff925f55602a7e0,1 +np.float64,0xffe391e3256723c6,0x0,1 +np.float64,0xbfe92f0f37b25e1e,0x3fe28bacc76ae753,1 +np.float64,0x3f990238d8320472,0x3ff045edd36e2d62,1 +np.float64,0xffed8d15307b1a2a,0x0,1 +np.float64,0x3fee82e01afd05c0,0x3ffefc09e8b9c2b7,1 +np.float64,0xffb2d94b2225b298,0x0,1 diff --git a/local_packages/numpy/_core/tests/data/umath-validation-set-expm1.csv b/local_packages/numpy/_core/tests/data/umath-validation-set-expm1.csv new file mode 100644 index 0000000..dcbc7cd --- /dev/null +++ b/local_packages/numpy/_core/tests/data/umath-validation-set-expm1.csv @@ -0,0 +1,1429 @@ +dtype,input,output,ulperrortol +np.float32,0x80606724,0x80606724,3 +np.float32,0xbf16790f,0xbee38e14,3 +np.float32,0xbf1778a1,0xbee4a97f,3 +np.float32,0x7d4fc610,0x7f800000,3 +np.float32,0xbec30a20,0xbea230d5,3 +np.float32,0x3eae8a36,0x3ecffac5,3 +np.float32,0xbf1f08f1,0xbeece93c,3 +np.float32,0x80374376,0x80374376,3 +np.float32,0x3f2e04ca,0x3f793115,3 +np.float32,0x7e2c7e36,0x7f800000,3 +np.float32,0xbf686cae,0xbf18bcf0,3 +np.float32,0xbf5518cd,0xbf10a3da,3 +np.float32,0x807e233c,0x807e233c,3 +np.float32,0x7f4edd54,0x7f800000,3 +np.float32,0x7ed70088,0x7f800000,3 +np.float32,0x801675da,0x801675da,3 +np.float32,0x806735d5,0x806735d5,3 +np.float32,0xfe635fec,0xbf800000,3 +np.float32,0xfed88a0a,0xbf800000,3 +np.float32,0xff52c052,0xbf800000,3 +np.float32,0x7fc00000,0x7fc00000,3 +np.float32,0xff4f65f9,0xbf800000,3 +np.float32,0xfe0f6c20,0xbf800000,3 +np.float32,0x80322b30,0x80322b30,3 +np.float32,0xfb757000,0xbf800000,3 +np.float32,0x3c81e0,0x3c81e0,3 +np.float32,0x79d56a,0x79d56a,3 +np.float32,0x8029d7af,0x8029d7af,3 +np.float32,0x8058a593,0x8058a593,3 +np.float32,0x3f3a13c7,0x3f88c75c,3 +np.float32,0x2a6b05,0x2a6b05,3 +np.float32,0xbd64c960,0xbd5e83ae,3 +np.float32,0x80471052,0x80471052,3 +np.float32,0xbe5dd950,0xbe47766c,3 +np.float32,0xfd8f88f0,0xbf800000,3 +np.float32,0x75a4b7,0x75a4b7,3 +np.float32,0x3f726f2e,0x3fc9fb7d,3 +np.float32,0x3ed6795c,0x3f053115,3 +np.float32,0x17d7f5,0x17d7f5,3 +np.float32,0xbf4cf19b,0xbf0d094f,3 +np.float32,0x3e0ec532,0x3e1933c6,3 +np.float32,0xff084016,0xbf800000,3 +np.float32,0x800829aa,0x800829aa,3 +np.float32,0x806d7302,0x806d7302,3 +np.float32,0x7f59d9da,0x7f800000,3 +np.float32,0x15f8b9,0x15f8b9,3 +np.float32,0x803befb3,0x803befb3,3 +np.float32,0x525043,0x525043,3 +np.float32,0x51a647,0x51a647,3 +np.float32,0xbf1cfce4,0xbeeab3d9,3 +np.float32,0x3f1f27a4,0x3f5cb1d2,3 +np.float32,0xbebc3a04,0xbe9d8142,3 +np.float32,0xbeea548c,0xbebc07e5,3 +np.float32,0x3f47401c,0x3f96c2a3,3 +np.float32,0x806b1ea3,0x806b1ea3,3 +np.float32,0x3ea56bb8,0x3ec3450c,3 +np.float32,0x3f7b4963,0x3fd597b5,3 +np.float32,0x7f051fa0,0x7f800000,3 +np.float32,0x1d411c,0x1d411c,3 +np.float32,0xff0b6a35,0xbf800000,3 +np.float32,0xbead63c0,0xbe9314f7,3 +np.float32,0x3738be,0x3738be,3 +np.float32,0x3f138cc8,0x3f479155,3 +np.float32,0x800a539f,0x800a539f,3 +np.float32,0x801b0ebd,0x801b0ebd,3 +np.float32,0x318fcd,0x318fcd,3 +np.float32,0x3ed67556,0x3f052e06,3 +np.float32,0x702886,0x702886,3 +np.float32,0x80000001,0x80000001,3 +np.float32,0x70a174,0x70a174,3 +np.float32,0x4f9c66,0x4f9c66,3 +np.float32,0x3e3e1927,0x3e50e351,3 +np.float32,0x7eac9a4d,0x7f800000,3 +np.float32,0x4b7407,0x4b7407,3 +np.float32,0x7f5bd2fd,0x7f800000,3 +np.float32,0x3eaafc58,0x3ecaffbd,3 +np.float32,0xbc989360,0xbc9729e2,3 +np.float32,0x3f470e5c,0x3f968c7b,3 +np.float32,0x4c5672,0x4c5672,3 +np.float32,0xff2b2ee2,0xbf800000,3 +np.float32,0xbf28a104,0xbef7079b,3 +np.float32,0x2c6175,0x2c6175,3 +np.float32,0x3d7e4fb0,0x3d832f9f,3 +np.float32,0x763276,0x763276,3 +np.float32,0x3cf364,0x3cf364,3 +np.float32,0xbf7ace75,0xbf1fe48c,3 +np.float32,0xff19e858,0xbf800000,3 +np.float32,0x80504c70,0x80504c70,3 +np.float32,0xff390210,0xbf800000,3 +np.float32,0x8046a743,0x8046a743,3 +np.float32,0x80000000,0x80000000,3 +np.float32,0x806c51da,0x806c51da,3 +np.float32,0x806ab38f,0x806ab38f,3 +np.float32,0x3f3de863,0x3f8cc538,3 +np.float32,0x7f6d45bb,0x7f800000,3 +np.float32,0xfd16ec60,0xbf800000,3 +np.float32,0x80513cba,0x80513cba,3 +np.float32,0xbf68996b,0xbf18cefa,3 +np.float32,0xfe039f2c,0xbf800000,3 +np.float32,0x3f013207,0x3f280c55,3 +np.float32,0x7ef4bc07,0x7f800000,3 +np.float32,0xbe8b65ac,0xbe741069,3 +np.float32,0xbf7a8186,0xbf1fc7a6,3 +np.float32,0x802532e5,0x802532e5,3 +np.float32,0x32c7df,0x32c7df,3 +np.float32,0x3ce4dceb,0x3ce81701,3 +np.float32,0xfe801118,0xbf800000,3 +np.float32,0x3d905f20,0x3d9594fb,3 +np.float32,0xbe11ed28,0xbe080168,3 +np.float32,0x59e773,0x59e773,3 +np.float32,0x3e9a2547,0x3eb3dd57,3 +np.float32,0x7ecb7c67,0x7f800000,3 +np.float32,0x7f69a67e,0x7f800000,3 +np.float32,0xff121e11,0xbf800000,3 +np.float32,0x3f7917cb,0x3fd2ad8c,3 +np.float32,0xbf1a7da8,0xbee7fc0c,3 +np.float32,0x3f077e66,0x3f329c40,3 +np.float32,0x3ce8e040,0x3cec37b3,3 +np.float32,0xbf3f0b8e,0xbf069f4d,3 +np.float32,0x3f52f194,0x3fa3c9d6,3 +np.float32,0xbf0e7422,0xbeda80f2,3 +np.float32,0xfd67e230,0xbf800000,3 +np.float32,0xff14d9a9,0xbf800000,3 +np.float32,0x3f3546e3,0x3f83dc2b,3 +np.float32,0x3e152e3a,0x3e20983d,3 +np.float32,0x4a89a3,0x4a89a3,3 +np.float32,0x63217,0x63217,3 +np.float32,0xbeb9e2a8,0xbe9be153,3 +np.float32,0x7e9fa049,0x7f800000,3 +np.float32,0x7f58110c,0x7f800000,3 +np.float32,0x3e88290c,0x3e9bfba9,3 +np.float32,0xbf2cb206,0xbefb3494,3 +np.float32,0xff5880c4,0xbf800000,3 +np.float32,0x7ecff3ac,0x7f800000,3 +np.float32,0x3f4b3de6,0x3f9b23fd,3 +np.float32,0xbebd2048,0xbe9e208c,3 +np.float32,0xff08f7a2,0xbf800000,3 +np.float32,0xff473330,0xbf800000,3 +np.float32,0x1,0x1,3 +np.float32,0xbf5dc239,0xbf14584b,3 +np.float32,0x458e3f,0x458e3f,3 +np.float32,0xbdb8a650,0xbdb091f8,3 +np.float32,0xff336ffc,0xbf800000,3 +np.float32,0x3c60bd00,0x3c624966,3 +np.float32,0xbe16a4f8,0xbe0c1664,3 +np.float32,0x3f214246,0x3f60a0f0,3 +np.float32,0x7fa00000,0x7fe00000,3 +np.float32,0x7e08737e,0x7f800000,3 +np.float32,0x3f70574c,0x3fc74b8e,3 +np.float32,0xbed5745c,0xbeae8c77,3 +np.float32,0x361752,0x361752,3 +np.float32,0x3eb276d6,0x3ed584ea,3 +np.float32,0x3f03fc1e,0x3f2cb1a5,3 +np.float32,0x3fafd1,0x3fafd1,3 +np.float32,0x7e50d74c,0x7f800000,3 +np.float32,0x3eeca5,0x3eeca5,3 +np.float32,0x5dc963,0x5dc963,3 +np.float32,0x7f0e63ae,0x7f800000,3 +np.float32,0x8021745f,0x8021745f,3 +np.float32,0xbf5881a9,0xbf121d07,3 +np.float32,0x7dadc7fd,0x7f800000,3 +np.float32,0xbf2c0798,0xbefa86bb,3 +np.float32,0x3e635f50,0x3e7e97a9,3 +np.float32,0xbf2053fa,0xbeee4c0e,3 +np.float32,0x3e8eee2b,0x3ea4dfcc,3 +np.float32,0xfc8a03c0,0xbf800000,3 +np.float32,0xfd9e4948,0xbf800000,3 +np.float32,0x801e817e,0x801e817e,3 +np.float32,0xbf603a27,0xbf1560c3,3 +np.float32,0x7f729809,0x7f800000,3 +np.float32,0x3f5a1864,0x3fac0e04,3 +np.float32,0x3e7648b8,0x3e8b3677,3 +np.float32,0x3edade24,0x3f088bc1,3 +np.float32,0x65e16e,0x65e16e,3 +np.float32,0x3f24aa50,0x3f671117,3 +np.float32,0x803cb1d0,0x803cb1d0,3 +np.float32,0xbe7b1858,0xbe5eadcc,3 +np.float32,0xbf19bb27,0xbee726fb,3 +np.float32,0xfd1f6e60,0xbf800000,3 +np.float32,0xfeb0de60,0xbf800000,3 +np.float32,0xff511a52,0xbf800000,3 +np.float32,0xff7757f7,0xbf800000,3 +np.float32,0x463ff5,0x463ff5,3 +np.float32,0x3f770d12,0x3fcffcc2,3 +np.float32,0xbf208562,0xbeee80dc,3 +np.float32,0x6df204,0x6df204,3 +np.float32,0xbf62d24f,0xbf1673fb,3 +np.float32,0x3dfcf210,0x3e069d5f,3 +np.float32,0xbef26002,0xbec114d7,3 +np.float32,0x7f800000,0x7f800000,3 +np.float32,0x7f30fb85,0x7f800000,3 +np.float32,0x7ee5dfef,0x7f800000,3 +np.float32,0x3f317829,0x3f800611,3 +np.float32,0x3f4b0bbd,0x3f9aec88,3 +np.float32,0x7edf708c,0x7f800000,3 +np.float32,0xff071260,0xbf800000,3 +np.float32,0x3e7b8c30,0x3e8e9198,3 +np.float32,0x3f33778b,0x3f82077f,3 +np.float32,0x3e8cd11d,0x3ea215fd,3 +np.float32,0x8004483d,0x8004483d,3 +np.float32,0x801633e3,0x801633e3,3 +np.float32,0x7e76eb15,0x7f800000,3 +np.float32,0x3c1571,0x3c1571,3 +np.float32,0x7de3de52,0x7f800000,3 +np.float32,0x804ae906,0x804ae906,3 +np.float32,0x7f3a2616,0x7f800000,3 +np.float32,0xff7fffff,0xbf800000,3 +np.float32,0xff5d17e4,0xbf800000,3 +np.float32,0xbeaa6704,0xbe90f252,3 +np.float32,0x7e6a43af,0x7f800000,3 +np.float32,0x2a0f35,0x2a0f35,3 +np.float32,0xfd8fece0,0xbf800000,3 +np.float32,0xfeef2e2a,0xbf800000,3 +np.float32,0xff800000,0xbf800000,3 +np.float32,0xbeefcc52,0xbebf78e4,3 +np.float32,0x3db6c490,0x3dbf2bd5,3 +np.float32,0x8290f,0x8290f,3 +np.float32,0xbeace648,0xbe92bb7f,3 +np.float32,0x801fea79,0x801fea79,3 +np.float32,0x3ea6c230,0x3ec51ebf,3 +np.float32,0x3e5f2ca3,0x3e795c8a,3 +np.float32,0x3eb6f634,0x3edbeb9f,3 +np.float32,0xff790b45,0xbf800000,3 +np.float32,0x3d82e240,0x3d872816,3 +np.float32,0x3f0d6a57,0x3f3cc7db,3 +np.float32,0x7f08531a,0x7f800000,3 +np.float32,0x702b6d,0x702b6d,3 +np.float32,0x7d3a3c38,0x7f800000,3 +np.float32,0x3d0a7fb3,0x3d0cddf3,3 +np.float32,0xff28084c,0xbf800000,3 +np.float32,0xfeee8804,0xbf800000,3 +np.float32,0x804094eb,0x804094eb,3 +np.float32,0x7acb39,0x7acb39,3 +np.float32,0x3f01c07a,0x3f28f88c,3 +np.float32,0x3e05c500,0x3e0ee674,3 +np.float32,0xbe6f7c38,0xbe558ac1,3 +np.float32,0x803b1f4b,0x803b1f4b,3 +np.float32,0xbf76561f,0xbf1e332b,3 +np.float32,0xff30d368,0xbf800000,3 +np.float32,0x7e2e1f38,0x7f800000,3 +np.float32,0x3ee085b8,0x3f0ce7c0,3 +np.float32,0x8064c4a7,0x8064c4a7,3 +np.float32,0xa7c1d,0xa7c1d,3 +np.float32,0x3f27498a,0x3f6c14bc,3 +np.float32,0x137ca,0x137ca,3 +np.float32,0x3d0a5c60,0x3d0cb969,3 +np.float32,0x80765f1f,0x80765f1f,3 +np.float32,0x80230a71,0x80230a71,3 +np.float32,0x3f321ed2,0x3f80acf4,3 +np.float32,0x7d61e7f4,0x7f800000,3 +np.float32,0xbf39f7f2,0xbf0430f7,3 +np.float32,0xbe2503f8,0xbe1867e8,3 +np.float32,0x29333d,0x29333d,3 +np.float32,0x7edc5a0e,0x7f800000,3 +np.float32,0xbe81a8a2,0xbe651663,3 +np.float32,0x7f76ab6d,0x7f800000,3 +np.float32,0x7f46111f,0x7f800000,3 +np.float32,0xff0fc888,0xbf800000,3 +np.float32,0x805ece89,0x805ece89,3 +np.float32,0xc390b,0xc390b,3 +np.float32,0xff64bdee,0xbf800000,3 +np.float32,0x3dd07e4e,0x3ddb79bd,3 +np.float32,0xfecc1f10,0xbf800000,3 +np.float32,0x803f5177,0x803f5177,3 +np.float32,0x802a24d2,0x802a24d2,3 +np.float32,0x7f27d0cc,0x7f800000,3 +np.float32,0x3ef57c98,0x3f1d7e88,3 +np.float32,0x7b848d,0x7b848d,3 +np.float32,0x7f7fffff,0x7f800000,3 +np.float32,0xfe889c46,0xbf800000,3 +np.float32,0xff2d6dc5,0xbf800000,3 +np.float32,0x3f53a186,0x3fa492a6,3 +np.float32,0xbf239c94,0xbef1c90c,3 +np.float32,0xff7c0f4e,0xbf800000,3 +np.float32,0x3e7c69a9,0x3e8f1f3a,3 +np.float32,0xbf47c9e9,0xbf0ab2a9,3 +np.float32,0xbc1eaf00,0xbc1deae9,3 +np.float32,0x3f4a6d39,0x3f9a3d8e,3 +np.float32,0x3f677930,0x3fbc26eb,3 +np.float32,0x3f45eea1,0x3f955418,3 +np.float32,0x7f61a1f8,0x7f800000,3 +np.float32,0xff58c7c6,0xbf800000,3 +np.float32,0x80239801,0x80239801,3 +np.float32,0xff56e616,0xbf800000,3 +np.float32,0xff62052c,0xbf800000,3 +np.float32,0x8009b615,0x8009b615,3 +np.float32,0x293d6b,0x293d6b,3 +np.float32,0xfe9e585c,0xbf800000,3 +np.float32,0x7f58ff4b,0x7f800000,3 +np.float32,0x10937c,0x10937c,3 +np.float32,0x7f5cc13f,0x7f800000,3 +np.float32,0x110c5d,0x110c5d,3 +np.float32,0x805e51fc,0x805e51fc,3 +np.float32,0xbedcf70a,0xbeb3766c,3 +np.float32,0x3f4d5e42,0x3f9d8091,3 +np.float32,0xff5925a0,0xbf800000,3 +np.float32,0x7e87cafa,0x7f800000,3 +np.float32,0xbf6474b2,0xbf171fee,3 +np.float32,0x4b39b2,0x4b39b2,3 +np.float32,0x8020cc28,0x8020cc28,3 +np.float32,0xff004ed8,0xbf800000,3 +np.float32,0xbf204cf5,0xbeee448d,3 +np.float32,0x3e30cf10,0x3e40fdb1,3 +np.float32,0x80202bee,0x80202bee,3 +np.float32,0xbf55a985,0xbf10e2bc,3 +np.float32,0xbe297dd8,0xbe1c351c,3 +np.float32,0x5780d9,0x5780d9,3 +np.float32,0x7ef729fa,0x7f800000,3 +np.float32,0x8039a3b5,0x8039a3b5,3 +np.float32,0x7cdd3f,0x7cdd3f,3 +np.float32,0x7ef0145a,0x7f800000,3 +np.float32,0x807ad7ae,0x807ad7ae,3 +np.float32,0x7f6c2643,0x7f800000,3 +np.float32,0xbec56124,0xbea3c929,3 +np.float32,0x512c3b,0x512c3b,3 +np.float32,0xbed3effe,0xbead8c1e,3 +np.float32,0x7f5e0a4d,0x7f800000,3 +np.float32,0x3f315316,0x3f7fc200,3 +np.float32,0x7eca5727,0x7f800000,3 +np.float32,0x7f4834f3,0x7f800000,3 +np.float32,0x8004af6d,0x8004af6d,3 +np.float32,0x3f223ca4,0x3f6277e3,3 +np.float32,0x7eea4fdd,0x7f800000,3 +np.float32,0x3e7143e8,0x3e880763,3 +np.float32,0xbf737008,0xbf1d160e,3 +np.float32,0xfc408b00,0xbf800000,3 +np.float32,0x803912ca,0x803912ca,3 +np.float32,0x7db31f4e,0x7f800000,3 +np.float32,0xff578b54,0xbf800000,3 +np.float32,0x3f068ec4,0x3f31062b,3 +np.float32,0x35f64f,0x35f64f,3 +np.float32,0x80437df4,0x80437df4,3 +np.float32,0x568059,0x568059,3 +np.float32,0x8005f8ba,0x8005f8ba,3 +np.float32,0x6824ad,0x6824ad,3 +np.float32,0xff3fdf30,0xbf800000,3 +np.float32,0xbf6f7682,0xbf1b89d6,3 +np.float32,0x3dcea8a0,0x3dd971f5,3 +np.float32,0x3ee32a62,0x3f0ef5a9,3 +np.float32,0xbf735bcd,0xbf1d0e3d,3 +np.float32,0x7e8c7c28,0x7f800000,3 +np.float32,0x3ed552bc,0x3f045161,3 +np.float32,0xfed90a8a,0xbf800000,3 +np.float32,0xbe454368,0xbe336d2a,3 +np.float32,0xbf171d26,0xbee4442d,3 +np.float32,0x80652bf9,0x80652bf9,3 +np.float32,0xbdbaaa20,0xbdb26914,3 +np.float32,0x3f56063d,0x3fa7522e,3 +np.float32,0x3d3d4fd3,0x3d41c13f,3 +np.float32,0x80456040,0x80456040,3 +np.float32,0x3dc15586,0x3dcac0ef,3 +np.float32,0x7f753060,0x7f800000,3 +np.float32,0x7f7d8039,0x7f800000,3 +np.float32,0xfdebf280,0xbf800000,3 +np.float32,0xbf1892c3,0xbee5e116,3 +np.float32,0xbf0f1468,0xbedb3878,3 +np.float32,0x40d85c,0x40d85c,3 +np.float32,0x3f93dd,0x3f93dd,3 +np.float32,0xbf5730fd,0xbf118c24,3 +np.float32,0xfe17aa44,0xbf800000,3 +np.float32,0x3dc0baf4,0x3dca1716,3 +np.float32,0xbf3433d8,0xbf015efb,3 +np.float32,0x1c59f5,0x1c59f5,3 +np.float32,0x802b1540,0x802b1540,3 +np.float32,0xbe47df6c,0xbe35936e,3 +np.float32,0xbe8e7070,0xbe78af32,3 +np.float32,0xfe7057f4,0xbf800000,3 +np.float32,0x80668b69,0x80668b69,3 +np.float32,0xbe677810,0xbe4f2c2d,3 +np.float32,0xbe7a2f1c,0xbe5df733,3 +np.float32,0xfeb79e3c,0xbf800000,3 +np.float32,0xbeb6e320,0xbe99c9e8,3 +np.float32,0xfea188f2,0xbf800000,3 +np.float32,0x7dcaeb15,0x7f800000,3 +np.float32,0x1be567,0x1be567,3 +np.float32,0xbf4041cc,0xbf07320d,3 +np.float32,0x3f721aa7,0x3fc98e9a,3 +np.float32,0x7f5aa835,0x7f800000,3 +np.float32,0x15180e,0x15180e,3 +np.float32,0x3f73d739,0x3fcbccdb,3 +np.float32,0xbeecd380,0xbebd9b36,3 +np.float32,0x3f2caec7,0x3f768fea,3 +np.float32,0xbeaf65f2,0xbe9482bb,3 +np.float32,0xfe6aa384,0xbf800000,3 +np.float32,0xbf4f2c0a,0xbf0e085e,3 +np.float32,0xbf2b5907,0xbef9d431,3 +np.float32,0x3e855e0d,0x3e985960,3 +np.float32,0x8056cc64,0x8056cc64,3 +np.float32,0xff746bb5,0xbf800000,3 +np.float32,0x3e0332f6,0x3e0bf986,3 +np.float32,0xff637720,0xbf800000,3 +np.float32,0xbf330676,0xbf00c990,3 +np.float32,0x3ec449a1,0x3eef3862,3 +np.float32,0x766541,0x766541,3 +np.float32,0xfe2edf6c,0xbf800000,3 +np.float32,0xbebb28ca,0xbe9cc3e2,3 +np.float32,0x3f16c930,0x3f4d5ce4,3 +np.float32,0x7f1a9a4a,0x7f800000,3 +np.float32,0x3e9ba1,0x3e9ba1,3 +np.float32,0xbf73d5f6,0xbf1d3d69,3 +np.float32,0xfdc8a8b0,0xbf800000,3 +np.float32,0x50f051,0x50f051,3 +np.float32,0xff0add02,0xbf800000,3 +np.float32,0x1e50bf,0x1e50bf,3 +np.float32,0x3f04d287,0x3f2e1948,3 +np.float32,0x7f1e50,0x7f1e50,3 +np.float32,0x2affb3,0x2affb3,3 +np.float32,0x80039f07,0x80039f07,3 +np.float32,0x804ba79e,0x804ba79e,3 +np.float32,0x7b5a8eed,0x7f800000,3 +np.float32,0x3e1a8b28,0x3e26d0a7,3 +np.float32,0x3ea95f29,0x3ec8bfa4,3 +np.float32,0x7e09fa55,0x7f800000,3 +np.float32,0x7eacb1b3,0x7f800000,3 +np.float32,0x3e8ad7c0,0x3e9f7dec,3 +np.float32,0x7e0e997c,0x7f800000,3 +np.float32,0x3f4422b4,0x3f936398,3 +np.float32,0x806bd222,0x806bd222,3 +np.float32,0x677ae6,0x677ae6,3 +np.float32,0x62cf68,0x62cf68,3 +np.float32,0x7e4e594e,0x7f800000,3 +np.float32,0x80445fd1,0x80445fd1,3 +np.float32,0xff3a0d04,0xbf800000,3 +np.float32,0x8052b256,0x8052b256,3 +np.float32,0x3cb34440,0x3cb53e11,3 +np.float32,0xbf0e3865,0xbeda3c6d,3 +np.float32,0x3f49f5df,0x3f99ba17,3 +np.float32,0xbed75a22,0xbeafcc09,3 +np.float32,0xbf7aec64,0xbf1fefc8,3 +np.float32,0x7f35a62d,0x7f800000,3 +np.float32,0xbf787b03,0xbf1f03fc,3 +np.float32,0x8006a62a,0x8006a62a,3 +np.float32,0x3f6419e7,0x3fb803c7,3 +np.float32,0x3ecea2e5,0x3efe8f01,3 +np.float32,0x80603577,0x80603577,3 +np.float32,0xff73198c,0xbf800000,3 +np.float32,0x7def110a,0x7f800000,3 +np.float32,0x544efd,0x544efd,3 +np.float32,0x3f052340,0x3f2ea0fc,3 +np.float32,0xff306666,0xbf800000,3 +np.float32,0xbf800000,0xbf21d2a7,3 +np.float32,0xbed3e150,0xbead826a,3 +np.float32,0x3f430c99,0x3f92390f,3 +np.float32,0xbf4bffa4,0xbf0c9c73,3 +np.float32,0xfd97a710,0xbf800000,3 +np.float32,0x3cadf0fe,0x3cafcd1a,3 +np.float32,0x807af7b4,0x807af7b4,3 +np.float32,0xbc508600,0xbc4f33bc,3 +np.float32,0x7f3e0ec7,0x7f800000,3 +np.float32,0xbe51334c,0xbe3d36f7,3 +np.float32,0xfe7b7fb4,0xbf800000,3 +np.float32,0xfed9c45e,0xbf800000,3 +np.float32,0x3da024eb,0x3da6926a,3 +np.float32,0x7eed9e76,0x7f800000,3 +np.float32,0xbf2b8f1f,0xbefa0b91,3 +np.float32,0x3f2b9286,0x3f746318,3 +np.float32,0xfe8af49c,0xbf800000,3 +np.float32,0x9c4f7,0x9c4f7,3 +np.float32,0x801d7543,0x801d7543,3 +np.float32,0xbf66474a,0xbf17de66,3 +np.float32,0xbf562155,0xbf1116b1,3 +np.float32,0x46a8de,0x46a8de,3 +np.float32,0x8053fe6b,0x8053fe6b,3 +np.float32,0xbf6ee842,0xbf1b51f3,3 +np.float32,0xbf6ad78e,0xbf19b565,3 +np.float32,0xbf012574,0xbecad7ff,3 +np.float32,0x748364,0x748364,3 +np.float32,0x8073f59b,0x8073f59b,3 +np.float32,0xff526825,0xbf800000,3 +np.float32,0xfeb02dc4,0xbf800000,3 +np.float32,0x8033eb1c,0x8033eb1c,3 +np.float32,0x3f3685ea,0x3f8520cc,3 +np.float32,0x7f657902,0x7f800000,3 +np.float32,0xbf75eac4,0xbf1e0a1f,3 +np.float32,0xfe67f384,0xbf800000,3 +np.float32,0x3f56d3cc,0x3fa83faf,3 +np.float32,0x44a4ce,0x44a4ce,3 +np.float32,0x1dc4b3,0x1dc4b3,3 +np.float32,0x4fb3b2,0x4fb3b2,3 +np.float32,0xbea904a4,0xbe8ff3ed,3 +np.float32,0x7e668f16,0x7f800000,3 +np.float32,0x7f538378,0x7f800000,3 +np.float32,0x80541709,0x80541709,3 +np.float32,0x80228040,0x80228040,3 +np.float32,0x7ef9694e,0x7f800000,3 +np.float32,0x3f5fca9b,0x3fb2ce54,3 +np.float32,0xbe9c43c2,0xbe86ab84,3 +np.float32,0xfecee000,0xbf800000,3 +np.float32,0x5a65c2,0x5a65c2,3 +np.float32,0x3f736572,0x3fcb3985,3 +np.float32,0xbf2a03f7,0xbef87600,3 +np.float32,0xfe96b488,0xbf800000,3 +np.float32,0xfedd8800,0xbf800000,3 +np.float32,0x80411804,0x80411804,3 +np.float32,0x7edcb0a6,0x7f800000,3 +np.float32,0x2bb882,0x2bb882,3 +np.float32,0x3f800000,0x3fdbf0a9,3 +np.float32,0x764b27,0x764b27,3 +np.float32,0x7e92035d,0x7f800000,3 +np.float32,0x3e80facb,0x3e92ae1d,3 +np.float32,0x8040b81a,0x8040b81a,3 +np.float32,0x7f487fe4,0x7f800000,3 +np.float32,0xbc641780,0xbc6282ed,3 +np.float32,0x804b0bb9,0x804b0bb9,3 +np.float32,0x7d0b7c39,0x7f800000,3 +np.float32,0xff072080,0xbf800000,3 +np.float32,0xbed7aff8,0xbeb00462,3 +np.float32,0x35e247,0x35e247,3 +np.float32,0xbf7edd19,0xbf216766,3 +np.float32,0x8004a539,0x8004a539,3 +np.float32,0xfdfc1790,0xbf800000,3 +np.float32,0x8037a841,0x8037a841,3 +np.float32,0xfed0a8a8,0xbf800000,3 +np.float32,0x7f1f1697,0x7f800000,3 +np.float32,0x3f2ccc6e,0x3f76ca23,3 +np.float32,0x35eada,0x35eada,3 +np.float32,0xff111f42,0xbf800000,3 +np.float32,0x3ee1ab7f,0x3f0dcbbe,3 +np.float32,0xbf6e89ee,0xbf1b2cd4,3 +np.float32,0x3f58611c,0x3faa0cdc,3 +np.float32,0x1ac6a6,0x1ac6a6,3 +np.float32,0xbf1286fa,0xbedf2312,3 +np.float32,0x7e451137,0x7f800000,3 +np.float32,0xbe92c326,0xbe7f3405,3 +np.float32,0x3f2fdd16,0x3f7cd87b,3 +np.float32,0xbe5c0ea0,0xbe4604c2,3 +np.float32,0xbdb29968,0xbdab0883,3 +np.float32,0x3964,0x3964,3 +np.float32,0x3f0dc236,0x3f3d60a0,3 +np.float32,0x7c3faf06,0x7f800000,3 +np.float32,0xbef41f7a,0xbec22b16,3 +np.float32,0x3f4c0289,0x3f9bfdcc,3 +np.float32,0x806084e9,0x806084e9,3 +np.float32,0x3ed1d8dd,0x3f01b0c1,3 +np.float32,0x806d8d8b,0x806d8d8b,3 +np.float32,0x3f052180,0x3f2e9e0a,3 +np.float32,0x803d85d5,0x803d85d5,3 +np.float32,0x3e0afd70,0x3e14dd48,3 +np.float32,0x2fbc63,0x2fbc63,3 +np.float32,0x2e436f,0x2e436f,3 +np.float32,0xbf7b19e6,0xbf2000da,3 +np.float32,0x3f34022e,0x3f829362,3 +np.float32,0x3d2b40e0,0x3d2ee246,3 +np.float32,0x3f5298b4,0x3fa3649b,3 +np.float32,0xbdb01328,0xbda8b7de,3 +np.float32,0x7f693c81,0x7f800000,3 +np.float32,0xbeb1abc0,0xbe961edc,3 +np.float32,0x801d9b5d,0x801d9b5d,3 +np.float32,0x80628668,0x80628668,3 +np.float32,0x800f57dd,0x800f57dd,3 +np.float32,0x8017c94f,0x8017c94f,3 +np.float32,0xbf16f5f4,0xbee418b8,3 +np.float32,0x3e686476,0x3e827022,3 +np.float32,0xbf256796,0xbef3abd9,3 +np.float32,0x7f1b4485,0x7f800000,3 +np.float32,0xbea0b3cc,0xbe89ed21,3 +np.float32,0xfee08b2e,0xbf800000,3 +np.float32,0x523cb4,0x523cb4,3 +np.float32,0x3daf2cb2,0x3db6e273,3 +np.float32,0xbd531c40,0xbd4dc323,3 +np.float32,0x80078fe5,0x80078fe5,3 +np.float32,0x80800000,0x80800000,3 +np.float32,0x3f232438,0x3f642d1a,3 +np.float32,0x3ec29446,0x3eecb7c0,3 +np.float32,0x3dbcd2a4,0x3dc5cd1d,3 +np.float32,0x7f045b0d,0x7f800000,3 +np.float32,0x7f22e6d1,0x7f800000,3 +np.float32,0xbf5d3430,0xbf141c80,3 +np.float32,0xbe03ec70,0xbdf78ee6,3 +np.float32,0x3e93ec9a,0x3eab822f,3 +np.float32,0x7f3b9262,0x7f800000,3 +np.float32,0x65ac6a,0x65ac6a,3 +np.float32,0x3db9a8,0x3db9a8,3 +np.float32,0xbf37ab59,0xbf031306,3 +np.float32,0x33c40e,0x33c40e,3 +np.float32,0x7f7a478f,0x7f800000,3 +np.float32,0xbe8532d0,0xbe6a906f,3 +np.float32,0x801c081d,0x801c081d,3 +np.float32,0xbe4212a0,0xbe30ca73,3 +np.float32,0xff0b603e,0xbf800000,3 +np.float32,0x4554dc,0x4554dc,3 +np.float32,0x3dd324be,0x3dde695e,3 +np.float32,0x3f224c44,0x3f629557,3 +np.float32,0x8003cd79,0x8003cd79,3 +np.float32,0xbf31351c,0xbeffc2fd,3 +np.float32,0x8034603a,0x8034603a,3 +np.float32,0xbf6fcb70,0xbf1bab24,3 +np.float32,0x804eb67e,0x804eb67e,3 +np.float32,0xff05c00e,0xbf800000,3 +np.float32,0x3eb5b36f,0x3eda1ec7,3 +np.float32,0x3f1ed7f9,0x3f5c1d90,3 +np.float32,0x3f052d8a,0x3f2eb24b,3 +np.float32,0x5ddf51,0x5ddf51,3 +np.float32,0x7e50c11c,0x7f800000,3 +np.float32,0xff74f55a,0xbf800000,3 +np.float32,0x4322d,0x4322d,3 +np.float32,0x3f16f8a9,0x3f4db27a,3 +np.float32,0x3f4f23d6,0x3f9f7c2c,3 +np.float32,0xbf706c1e,0xbf1bea0a,3 +np.float32,0x3f2cbd52,0x3f76ac77,3 +np.float32,0xf3043,0xf3043,3 +np.float32,0xfee79de0,0xbf800000,3 +np.float32,0x7e942f69,0x7f800000,3 +np.float32,0x180139,0x180139,3 +np.float32,0xff69c678,0xbf800000,3 +np.float32,0x3f46773f,0x3f95e840,3 +np.float32,0x804aae1c,0x804aae1c,3 +np.float32,0x3eb383b4,0x3ed7024c,3 +np.float32,0x8032624e,0x8032624e,3 +np.float32,0xbd0a0f80,0xbd07c27d,3 +np.float32,0xbf1c9b98,0xbeea4a61,3 +np.float32,0x7f370999,0x7f800000,3 +np.float32,0x801931f9,0x801931f9,3 +np.float32,0x3f6f45ce,0x3fc5eea0,3 +np.float32,0xff0ab4cc,0xbf800000,3 +np.float32,0x4c043d,0x4c043d,3 +np.float32,0x8002a599,0x8002a599,3 +np.float32,0xbc4a6080,0xbc4921d7,3 +np.float32,0x3f008d14,0x3f26fb72,3 +np.float32,0x7f48b3d9,0x7f800000,3 +np.float32,0x7cb2ec7e,0x7f800000,3 +np.float32,0xbf1338bd,0xbedfeb61,3 +np.float32,0x0,0x0,3 +np.float32,0xbf2f5b64,0xbefde71c,3 +np.float32,0xbe422974,0xbe30dd56,3 +np.float32,0x3f776be8,0x3fd07950,3 +np.float32,0xbf3e97a1,0xbf06684a,3 +np.float32,0x7d28cb26,0x7f800000,3 +np.float32,0x801618d2,0x801618d2,3 +np.float32,0x807e4f83,0x807e4f83,3 +np.float32,0x8006b07d,0x8006b07d,3 +np.float32,0xfea1c042,0xbf800000,3 +np.float32,0xff24ef74,0xbf800000,3 +np.float32,0xfef7ab16,0xbf800000,3 +np.float32,0x70b771,0x70b771,3 +np.float32,0x7daeb64e,0x7f800000,3 +np.float32,0xbe66e378,0xbe4eb59c,3 +np.float32,0xbead1534,0xbe92dcf7,3 +np.float32,0x7e6769b8,0x7f800000,3 +np.float32,0x7ecd0890,0x7f800000,3 +np.float32,0xbe7380d8,0xbe58b747,3 +np.float32,0x3efa6f2f,0x3f218265,3 +np.float32,0x3f59dada,0x3fabc5eb,3 +np.float32,0xff0f2d20,0xbf800000,3 +np.float32,0x8060210e,0x8060210e,3 +np.float32,0x3ef681e8,0x3f1e51c8,3 +np.float32,0x77a6dd,0x77a6dd,3 +np.float32,0xbebfdd0e,0xbea00399,3 +np.float32,0xfe889b72,0xbf800000,3 +np.float32,0x8049ed2c,0x8049ed2c,3 +np.float32,0x3b089dc4,0x3b08c23e,3 +np.float32,0xbf13c7c4,0xbee08c28,3 +np.float32,0x3efa13b9,0x3f2137d7,3 +np.float32,0x3e9385dc,0x3eaaf914,3 +np.float32,0x7e0e6a43,0x7f800000,3 +np.float32,0x7df6d63f,0x7f800000,3 +np.float32,0x3f3efead,0x3f8dea03,3 +np.float32,0xff52548c,0xbf800000,3 +np.float32,0x803ff9d8,0x803ff9d8,3 +np.float32,0x3c825823,0x3c836303,3 +np.float32,0xfc9e97a0,0xbf800000,3 +np.float32,0xfe644f48,0xbf800000,3 +np.float32,0x802f5017,0x802f5017,3 +np.float32,0x3d5753b9,0x3d5d1661,3 +np.float32,0x7f2a55d2,0x7f800000,3 +np.float32,0x7f4dabfe,0x7f800000,3 +np.float32,0x3f49492a,0x3f98fc47,3 +np.float32,0x3f4d1589,0x3f9d2f82,3 +np.float32,0xff016208,0xbf800000,3 +np.float32,0xbf571cb7,0xbf118365,3 +np.float32,0xbf1ef297,0xbeecd136,3 +np.float32,0x36266b,0x36266b,3 +np.float32,0xbed07b0e,0xbeab4129,3 +np.float32,0x7f553365,0x7f800000,3 +np.float32,0xfe9bb8c6,0xbf800000,3 +np.float32,0xbeb497d6,0xbe982e19,3 +np.float32,0xbf27af6c,0xbef60d16,3 +np.float32,0x55cf51,0x55cf51,3 +np.float32,0x3eab1db0,0x3ecb2e4f,3 +np.float32,0x3e777603,0x3e8bf62f,3 +np.float32,0x7f10e374,0x7f800000,3 +np.float32,0xbf1f6480,0xbeed4b8d,3 +np.float32,0x40479d,0x40479d,3 +np.float32,0x156259,0x156259,3 +np.float32,0x3d852e30,0x3d899b2d,3 +np.float32,0x80014ff3,0x80014ff3,3 +np.float32,0xbd812fa8,0xbd7a645c,3 +np.float32,0x800ab780,0x800ab780,3 +np.float32,0x3ea02ff4,0x3ebc13bd,3 +np.float32,0x7e858b8e,0x7f800000,3 +np.float32,0x75d63b,0x75d63b,3 +np.float32,0xbeb15c94,0xbe95e6e3,3 +np.float32,0x3da0cee0,0x3da74a39,3 +np.float32,0xff21c01c,0xbf800000,3 +np.float32,0x8049b5eb,0x8049b5eb,3 +np.float32,0x80177ab0,0x80177ab0,3 +np.float32,0xff137a50,0xbf800000,3 +np.float32,0x3f7febba,0x3fdbd51c,3 +np.float32,0x8041e4dd,0x8041e4dd,3 +np.float32,0x99b8c,0x99b8c,3 +np.float32,0x5621ba,0x5621ba,3 +np.float32,0x14b534,0x14b534,3 +np.float32,0xbe2eb3a8,0xbe209c95,3 +np.float32,0x7e510c28,0x7f800000,3 +np.float32,0x804ec2f2,0x804ec2f2,3 +np.float32,0x3f662406,0x3fba82b0,3 +np.float32,0x800000,0x800000,3 +np.float32,0x3f3120d6,0x3f7f5d96,3 +np.float32,0x7f179b8e,0x7f800000,3 +np.float32,0x7f65278e,0x7f800000,3 +np.float32,0xfeb50f52,0xbf800000,3 +np.float32,0x7f051bd1,0x7f800000,3 +np.float32,0x7ea0558d,0x7f800000,3 +np.float32,0xbd0a96c0,0xbd08453f,3 +np.float64,0xee82da5ddd05c,0xee82da5ddd05c,1 +np.float64,0x800c3a22d7f87446,0x800c3a22d7f87446,1 +np.float64,0xbfd34b20eaa69642,0xbfd0a825e7688d3e,1 +np.float64,0x3fd6a0f2492d41e5,0x3fdb253b906057b3,1 +np.float64,0xbfda13d8783427b0,0xbfd56b1d76684332,1 +np.float64,0xbfe50b5a99ea16b5,0xbfded7dd82c6f746,1 +np.float64,0x3f82468fc0248d20,0x3f825b7fa9378ee9,1 +np.float64,0x7ff0000000000000,0x7ff0000000000000,1 +np.float64,0x856e50290adca,0x856e50290adca,1 +np.float64,0x7fde55a5fa3cab4b,0x7ff0000000000000,1 +np.float64,0x7fcf2c8dd93e591b,0x7ff0000000000000,1 +np.float64,0x8001b3a0e3236743,0x8001b3a0e3236743,1 +np.float64,0x8000fdb14821fb63,0x8000fdb14821fb63,1 +np.float64,0xbfe3645e08e6c8bc,0xbfdd161362a5e9ef,1 +np.float64,0x7feb34d28b3669a4,0x7ff0000000000000,1 +np.float64,0x80099dd810933bb1,0x80099dd810933bb1,1 +np.float64,0xbfedbcc1097b7982,0xbfe35d86414d53dc,1 +np.float64,0x7fdc406fbdb880de,0x7ff0000000000000,1 +np.float64,0x800c4bf85ab897f1,0x800c4bf85ab897f1,1 +np.float64,0x3fd8f7b0e0b1ef60,0x3fde89b497ae20d8,1 +np.float64,0xffe4fced5c69f9da,0xbff0000000000000,1 +np.float64,0xbfe54d421fea9a84,0xbfdf1be0cbfbfcba,1 +np.float64,0x800af72f3535ee5f,0x800af72f3535ee5f,1 +np.float64,0x3fe24e6570e49ccb,0x3fe8b3a86d970411,1 +np.float64,0xbfdd7b22d0baf646,0xbfd79fac2e4f7558,1 +np.float64,0xbfe6a7654c6d4eca,0xbfe03c1f13f3b409,1 +np.float64,0x3fe2c3eb662587d7,0x3fe98566e625d4f5,1 +np.float64,0x3b1ef71e763e0,0x3b1ef71e763e0,1 +np.float64,0xffed03c6baba078d,0xbff0000000000000,1 +np.float64,0x3febac19d0b75834,0x3ff5fdacc9d51bcd,1 +np.float64,0x800635d6794c6bae,0x800635d6794c6bae,1 +np.float64,0xbfe8cafc827195f9,0xbfe1411438608ae1,1 +np.float64,0x7feeb616a83d6c2c,0x7ff0000000000000,1 +np.float64,0x3fd52d62a2aa5ac5,0x3fd91a07a7f18f44,1 +np.float64,0x80036996b8a6d32e,0x80036996b8a6d32e,1 +np.float64,0x2b1945965632a,0x2b1945965632a,1 +np.float64,0xbfecb5e8c9796bd2,0xbfe2f40fca276aa2,1 +np.float64,0x3fe8669ed4f0cd3e,0x3ff24c89fc9cdbff,1 +np.float64,0x71e9f65ee3d3f,0x71e9f65ee3d3f,1 +np.float64,0xbfd5ab262bab564c,0xbfd261ae108ef79e,1 +np.float64,0xbfe7091342ee1226,0xbfe06bf5622d75f6,1 +np.float64,0x49e888d093d12,0x49e888d093d12,1 +np.float64,0x2272f3dc44e5f,0x2272f3dc44e5f,1 +np.float64,0x7fe98736e0b30e6d,0x7ff0000000000000,1 +np.float64,0x30fa9cde61f54,0x30fa9cde61f54,1 +np.float64,0x7fdc163fc0382c7f,0x7ff0000000000000,1 +np.float64,0xffb40d04ee281a08,0xbff0000000000000,1 +np.float64,0xffe624617f2c48c2,0xbff0000000000000,1 +np.float64,0x3febb582bd376b05,0x3ff608da584d1716,1 +np.float64,0xfc30a5a5f8615,0xfc30a5a5f8615,1 +np.float64,0x3fef202efd7e405e,0x3ffa52009319b069,1 +np.float64,0x8004d0259829a04c,0x8004d0259829a04c,1 +np.float64,0x800622dc71ec45ba,0x800622dc71ec45ba,1 +np.float64,0xffefffffffffffff,0xbff0000000000000,1 +np.float64,0x800e89113c9d1223,0x800e89113c9d1223,1 +np.float64,0x7fba7fde3034ffbb,0x7ff0000000000000,1 +np.float64,0xbfeea31e807d463d,0xbfe3b7369b725915,1 +np.float64,0x3feb7c9589f6f92c,0x3ff5c56cf71b0dff,1 +np.float64,0x3fd52d3b59aa5a77,0x3fd919d0f683fd07,1 +np.float64,0x800de90a43fbd215,0x800de90a43fbd215,1 +np.float64,0x3fe7eb35a9efd66b,0x3ff1c940dbfc6ef9,1 +np.float64,0xbda0adcb7b416,0xbda0adcb7b416,1 +np.float64,0x7fc5753e3a2aea7b,0x7ff0000000000000,1 +np.float64,0xffdd101d103a203a,0xbff0000000000000,1 +np.float64,0x7fcb54f56836a9ea,0x7ff0000000000000,1 +np.float64,0xbfd61c8d6eac391a,0xbfd2b23bc0a2cef4,1 +np.float64,0x3feef55de37deabc,0x3ffa198639a0161d,1 +np.float64,0x7fe4ffbfaea9ff7e,0x7ff0000000000000,1 +np.float64,0x9d1071873a20e,0x9d1071873a20e,1 +np.float64,0x3fef1ecb863e3d97,0x3ffa502a81e09cfc,1 +np.float64,0xad2da12b5a5b4,0xad2da12b5a5b4,1 +np.float64,0xffe614b74c6c296e,0xbff0000000000000,1 +np.float64,0xffe60d3f286c1a7e,0xbff0000000000000,1 +np.float64,0x7fda7d91f4b4fb23,0x7ff0000000000000,1 +np.float64,0x800023f266a047e6,0x800023f266a047e6,1 +np.float64,0x7fdf5f9ad23ebf35,0x7ff0000000000000,1 +np.float64,0x3fa7459f002e8b3e,0x3fa7cf178dcf0af6,1 +np.float64,0x3fe9938d61f3271b,0x3ff39516a13caec3,1 +np.float64,0xbfd59314c3ab262a,0xbfd250830f73efd2,1 +np.float64,0xbfc7e193f72fc328,0xbfc5c924339dd7a8,1 +np.float64,0x7fec1965f17832cb,0x7ff0000000000000,1 +np.float64,0xbfd932908eb26522,0xbfd4d4312d272580,1 +np.float64,0xbfdf2d08e2be5a12,0xbfd8add1413b0b1b,1 +np.float64,0x7fdcf7cc74b9ef98,0x7ff0000000000000,1 +np.float64,0x7fc79300912f2600,0x7ff0000000000000,1 +np.float64,0xffd4bd8f23297b1e,0xbff0000000000000,1 +np.float64,0x41869ce0830e,0x41869ce0830e,1 +np.float64,0x3fe5dcec91ebb9da,0x3fef5e213598cbd4,1 +np.float64,0x800815d9c2902bb4,0x800815d9c2902bb4,1 +np.float64,0x800ba1a4b877434a,0x800ba1a4b877434a,1 +np.float64,0x80069d7bdc4d3af8,0x80069d7bdc4d3af8,1 +np.float64,0xcf00d4339e01b,0xcf00d4339e01b,1 +np.float64,0x80072b71bd4e56e4,0x80072b71bd4e56e4,1 +np.float64,0x80059ca6fbab394f,0x80059ca6fbab394f,1 +np.float64,0x3fe522fc092a45f8,0x3fedf212682bf894,1 +np.float64,0x7fe17f384ea2fe70,0x7ff0000000000000,1 +np.float64,0x0,0x0,1 +np.float64,0x3f72bb4c20257698,0x3f72c64766b52069,1 +np.float64,0x7fbc97c940392f92,0x7ff0000000000000,1 +np.float64,0xffc5904ebd2b209c,0xbff0000000000000,1 +np.float64,0xbfe34fb55b669f6a,0xbfdcff81dd30a49d,1 +np.float64,0x8007ccda006f99b5,0x8007ccda006f99b5,1 +np.float64,0x3fee50e4c8fca1ca,0x3ff9434c7750ad0f,1 +np.float64,0x7fee7b07c67cf60f,0x7ff0000000000000,1 +np.float64,0x3fdcce4a5a399c95,0x3fe230c83f28218a,1 +np.float64,0x7fee5187b37ca30e,0x7ff0000000000000,1 +np.float64,0x3fc48f6a97291ed8,0x3fc64db6200a9833,1 +np.float64,0xc7fec3498ffd9,0xc7fec3498ffd9,1 +np.float64,0x800769c59d2ed38c,0x800769c59d2ed38c,1 +np.float64,0xffe69ede782d3dbc,0xbff0000000000000,1 +np.float64,0x3fecd9770979b2ee,0x3ff76a1f2f0f08f2,1 +np.float64,0x5aa358a8b546c,0x5aa358a8b546c,1 +np.float64,0xbfe795a0506f2b40,0xbfe0afcc52c0166b,1 +np.float64,0xffd4ada1e8a95b44,0xbff0000000000000,1 +np.float64,0xffcac1dc213583b8,0xbff0000000000000,1 +np.float64,0xffe393c15fa72782,0xbff0000000000000,1 +np.float64,0xbfcd6a3c113ad478,0xbfca47a2157b9cdd,1 +np.float64,0xffedde20647bbc40,0xbff0000000000000,1 +np.float64,0x3fd0d011b1a1a024,0x3fd33a57945559f4,1 +np.float64,0x3fef27e29f7e4fc6,0x3ffa5c314e0e3d69,1 +np.float64,0xffe96ff71f72dfee,0xbff0000000000000,1 +np.float64,0xffe762414f2ec482,0xbff0000000000000,1 +np.float64,0x3fc2dcfd3d25b9fa,0x3fc452f41682a12e,1 +np.float64,0xbfbdb125b63b6248,0xbfbc08e6553296d4,1 +np.float64,0x7b915740f724,0x7b915740f724,1 +np.float64,0x60b502b2c16a1,0x60b502b2c16a1,1 +np.float64,0xbfeb38b0be367162,0xbfe254f6782cfc47,1 +np.float64,0x800dc39a3edb8735,0x800dc39a3edb8735,1 +np.float64,0x3fea4fb433349f68,0x3ff468b97cf699f5,1 +np.float64,0xbfd49967962932d0,0xbfd19ceb41ff4cd0,1 +np.float64,0xbfebf75cd377eeba,0xbfe2a576bdbccccc,1 +np.float64,0xbfb653d65c2ca7b0,0xbfb561ab8fcb3f26,1 +np.float64,0xffe3f34b8727e696,0xbff0000000000000,1 +np.float64,0x3fdd798064baf301,0x3fe2b7c130a6fc63,1 +np.float64,0x3febe027e6b7c050,0x3ff63bac1b22e12d,1 +np.float64,0x7fcaa371af3546e2,0x7ff0000000000000,1 +np.float64,0xbfe6ee980a2ddd30,0xbfe05f0bc5dc80d2,1 +np.float64,0xc559c33f8ab39,0xc559c33f8ab39,1 +np.float64,0x84542c2b08a86,0x84542c2b08a86,1 +np.float64,0xbfe5645e046ac8bc,0xbfdf3398dc3cc1bd,1 +np.float64,0x3fee8c48ae7d1892,0x3ff9902899480526,1 +np.float64,0x3fb706471c2e0c8e,0x3fb817787aace8db,1 +np.float64,0x7fefe78f91ffcf1e,0x7ff0000000000000,1 +np.float64,0xbfcf6d560b3edaac,0xbfcbddc72a2130df,1 +np.float64,0x7fd282bfd925057f,0x7ff0000000000000,1 +np.float64,0x3fb973dbee32e7b8,0x3fbac2c87cbd0215,1 +np.float64,0x3fd1ce38ff239c72,0x3fd4876de5164420,1 +np.float64,0x8008ac2e3c31585d,0x8008ac2e3c31585d,1 +np.float64,0x3fa05e06dc20bc00,0x3fa0a1b7de904dce,1 +np.float64,0x7fd925f215324be3,0x7ff0000000000000,1 +np.float64,0x3f949d95d0293b2c,0x3f94d31197d51874,1 +np.float64,0xffdded9e67bbdb3c,0xbff0000000000000,1 +np.float64,0x3fed390dcfba721c,0x3ff7e08c7a709240,1 +np.float64,0x7fe6e62300adcc45,0x7ff0000000000000,1 +np.float64,0xbfd779bc312ef378,0xbfd3a6cb64bb0181,1 +np.float64,0x3fe43e9877287d31,0x3fec3e100ef935fd,1 +np.float64,0x210b68e44216e,0x210b68e44216e,1 +np.float64,0x3fcdffc1e73bff84,0x3fd0e729d02ec539,1 +np.float64,0xcea10c0f9d422,0xcea10c0f9d422,1 +np.float64,0x7feb97a82d772f4f,0x7ff0000000000000,1 +np.float64,0x9b4b4d953696a,0x9b4b4d953696a,1 +np.float64,0x3fd1bd8e95237b1d,0x3fd4716dd34cf828,1 +np.float64,0x800fc273841f84e7,0x800fc273841f84e7,1 +np.float64,0xbfd2aef167255de2,0xbfd0340f30d82f18,1 +np.float64,0x800d021a551a0435,0x800d021a551a0435,1 +np.float64,0xffebf934a8b7f268,0xbff0000000000000,1 +np.float64,0x3fd819849fb03308,0x3fdd43bca0aac749,1 +np.float64,0x7ff8000000000000,0x7ff8000000000000,1 +np.float64,0x27c34b064f86a,0x27c34b064f86a,1 +np.float64,0x7fef4f5a373e9eb3,0x7ff0000000000000,1 +np.float64,0x7fd92fccce325f99,0x7ff0000000000000,1 +np.float64,0x800520869d6a410e,0x800520869d6a410e,1 +np.float64,0x3fccbcaddf397958,0x3fd01bf6b0c4d97f,1 +np.float64,0x80039ebfc4273d80,0x80039ebfc4273d80,1 +np.float64,0xbfed1f0b3c7a3e16,0xbfe31ea6e4c69141,1 +np.float64,0x7fee1bb7c4bc376f,0x7ff0000000000000,1 +np.float64,0xbfa8bee1d8317dc0,0xbfa8283b7dbf95a9,1 +np.float64,0x3fe797db606f2fb6,0x3ff171b1c2bc8fe5,1 +np.float64,0xbfee2ecfdbbc5da0,0xbfe38a3f0a43d14e,1 +np.float64,0x3fe815c7f1302b90,0x3ff1f65165c45d71,1 +np.float64,0xbfbb265c94364cb8,0xbfb9c27ec61a9a1d,1 +np.float64,0x3fcf1cab5d3e3957,0x3fd19c07444642f9,1 +np.float64,0xbfe6ae753f6d5cea,0xbfe03f99666dbe17,1 +np.float64,0xbfd18a2a73a31454,0xbfceaee204aca016,1 +np.float64,0x3fb8a1dffc3143c0,0x3fb9db38341ab1a3,1 +np.float64,0x7fd2a0376025406e,0x7ff0000000000000,1 +np.float64,0x7fe718c0e3ae3181,0x7ff0000000000000,1 +np.float64,0x3fb264d42424c9a8,0x3fb3121f071d4db4,1 +np.float64,0xd27190a7a4e32,0xd27190a7a4e32,1 +np.float64,0xbfe467668c68cecd,0xbfde2c4616738d5e,1 +np.float64,0x800ab9a2b9357346,0x800ab9a2b9357346,1 +np.float64,0x7fcbd108d537a211,0x7ff0000000000000,1 +np.float64,0x3fb79bba6e2f3770,0x3fb8bb2c140d3445,1 +np.float64,0xffefa7165e3f4e2c,0xbff0000000000000,1 +np.float64,0x7fb40185a428030a,0x7ff0000000000000,1 +np.float64,0xbfe9e3d58e73c7ab,0xbfe1c04d51c83d69,1 +np.float64,0x7fef5b97b17eb72e,0x7ff0000000000000,1 +np.float64,0x800a2957683452af,0x800a2957683452af,1 +np.float64,0x800f54f1925ea9e3,0x800f54f1925ea9e3,1 +np.float64,0xeffa4e77dff4a,0xeffa4e77dff4a,1 +np.float64,0xffbe501aa03ca038,0xbff0000000000000,1 +np.float64,0x8006c651bced8ca4,0x8006c651bced8ca4,1 +np.float64,0x3fe159faff22b3f6,0x3fe708f78efbdbed,1 +np.float64,0x800e7d59a31cfab3,0x800e7d59a31cfab3,1 +np.float64,0x3fe6ac2f272d585e,0x3ff07ee5305385c3,1 +np.float64,0x7fd014c054202980,0x7ff0000000000000,1 +np.float64,0xbfe4800b11e90016,0xbfde4648c6f29ce5,1 +np.float64,0xbfe6738470ece709,0xbfe0227b5b42b713,1 +np.float64,0x3fed052add3a0a56,0x3ff7a01819e65c6e,1 +np.float64,0xffe03106f120620e,0xbff0000000000000,1 +np.float64,0x7fe11df4d4e23be9,0x7ff0000000000000,1 +np.float64,0xbfcea25d7b3d44bc,0xbfcb3e808e7ce852,1 +np.float64,0xd0807b03a1010,0xd0807b03a1010,1 +np.float64,0x8004eda4fec9db4b,0x8004eda4fec9db4b,1 +np.float64,0x3fceb5c98d3d6b90,0x3fd15a894b15dd9f,1 +np.float64,0xbfee27228afc4e45,0xbfe38741702f3c0b,1 +np.float64,0xbfe606278c6c0c4f,0xbfdfd7cb6093652d,1 +np.float64,0xbfd66f59bc2cdeb4,0xbfd2ecb2297f6afc,1 +np.float64,0x4aee390095dc8,0x4aee390095dc8,1 +np.float64,0xbfe391355d67226a,0xbfdd46ddc0997014,1 +np.float64,0xffd27765e7a4eecc,0xbff0000000000000,1 +np.float64,0xbfe795e20a2f2bc4,0xbfe0afebc66c4dbd,1 +np.float64,0x7fc9a62e81334c5c,0x7ff0000000000000,1 +np.float64,0xffe4e57e52a9cafc,0xbff0000000000000,1 +np.float64,0x7fac326c8c3864d8,0x7ff0000000000000,1 +np.float64,0x3fe8675f6370cebf,0x3ff24d5863029c15,1 +np.float64,0x7fcf4745e73e8e8b,0x7ff0000000000000,1 +np.float64,0x7fcc9aec9f3935d8,0x7ff0000000000000,1 +np.float64,0x3fec2e8fcab85d20,0x3ff699ccd0b2fed6,1 +np.float64,0x3fd110a968222153,0x3fd38e81a88c2d13,1 +np.float64,0xffb3a68532274d08,0xbff0000000000000,1 +np.float64,0xf0e562bbe1cad,0xf0e562bbe1cad,1 +np.float64,0xbfe815b9e5f02b74,0xbfe0ec9f5023aebc,1 +np.float64,0xbf5151d88022a400,0xbf514f80c465feea,1 +np.float64,0x2547e3144a8fd,0x2547e3144a8fd,1 +np.float64,0x3fedcc0c28fb9818,0x3ff899612fbeb4c5,1 +np.float64,0x3fdc3d1c0f387a38,0x3fe1bf6e2d39bd75,1 +np.float64,0x7fe544dbe62a89b7,0x7ff0000000000000,1 +np.float64,0x8001500e48e2a01d,0x8001500e48e2a01d,1 +np.float64,0xbfed3b2b09fa7656,0xbfe329f3e7bada64,1 +np.float64,0xbfe76a943aeed528,0xbfe09b24e3aa3f79,1 +np.float64,0x3fe944330e328866,0x3ff33d472dee70c5,1 +np.float64,0x8004bbbd6cc9777c,0x8004bbbd6cc9777c,1 +np.float64,0xbfe28133fb650268,0xbfdc1ac230ac4ef5,1 +np.float64,0xc1370af7826e2,0xc1370af7826e2,1 +np.float64,0x7fcfa47f5f3f48fe,0x7ff0000000000000,1 +np.float64,0xbfa3002a04260050,0xbfa2a703a538b54e,1 +np.float64,0xffef44f3903e89e6,0xbff0000000000000,1 +np.float64,0xc32cce298659a,0xc32cce298659a,1 +np.float64,0x7b477cc2f68f0,0x7b477cc2f68f0,1 +np.float64,0x40a7f4ec814ff,0x40a7f4ec814ff,1 +np.float64,0xffee38edf67c71db,0xbff0000000000000,1 +np.float64,0x3fe23f6f1ce47ede,0x3fe8992b8bb03499,1 +np.float64,0x7fc8edfe7f31dbfc,0x7ff0000000000000,1 +np.float64,0x800bb8e6fb3771ce,0x800bb8e6fb3771ce,1 +np.float64,0xbfe11d364ee23a6c,0xbfda82a0c2ef9e46,1 +np.float64,0xbfeb993cb4b7327a,0xbfe27df565da85dc,1 +np.float64,0x10000000000000,0x10000000000000,1 +np.float64,0x3fc1f997d723f330,0x3fc34c5cff060af1,1 +np.float64,0x6e326fa0dc64f,0x6e326fa0dc64f,1 +np.float64,0x800fa30c2c5f4618,0x800fa30c2c5f4618,1 +np.float64,0x7fed16ad603a2d5a,0x7ff0000000000000,1 +np.float64,0x9411cf172823a,0x9411cf172823a,1 +np.float64,0xffece51d4cb9ca3a,0xbff0000000000000,1 +np.float64,0x3fdda3d1453b47a3,0x3fe2d954f7849890,1 +np.float64,0xffd58330172b0660,0xbff0000000000000,1 +np.float64,0xbfc6962ae52d2c54,0xbfc4b4bdf0069f17,1 +np.float64,0xbfb4010a8e280218,0xbfb33e1236f7efa0,1 +np.float64,0x7fd0444909208891,0x7ff0000000000000,1 +np.float64,0xbfe027a24de04f44,0xbfd95e9064101e7c,1 +np.float64,0xa6f3f3214de9,0xa6f3f3214de9,1 +np.float64,0xbfe112eb0fe225d6,0xbfda768f7cbdf346,1 +np.float64,0xbfe99e90d4b33d22,0xbfe1a153e45a382a,1 +np.float64,0xffecb34f8e79669e,0xbff0000000000000,1 +np.float64,0xbfdf32c9653e6592,0xbfd8b159caf5633d,1 +np.float64,0x3fe9519829b2a330,0x3ff34c0a8152e20f,1 +np.float64,0xffd08ec8a7a11d92,0xbff0000000000000,1 +np.float64,0xffd19b71b6a336e4,0xbff0000000000000,1 +np.float64,0x7feda6b9377b4d71,0x7ff0000000000000,1 +np.float64,0x800fda2956bfb453,0x800fda2956bfb453,1 +np.float64,0x3fe54f601bea9ec0,0x3fee483cb03cbde4,1 +np.float64,0xbfe2a8ad5ee5515a,0xbfdc46ee7a10bf0d,1 +np.float64,0xbfd336c8bd266d92,0xbfd09916d432274a,1 +np.float64,0xfff0000000000000,0xbff0000000000000,1 +np.float64,0x3fd9a811a9b35024,0x3fdf8fa68cc048e3,1 +np.float64,0x3fe078c68520f18d,0x3fe58aecc1f9649b,1 +np.float64,0xbfc6d5aa3a2dab54,0xbfc4e9ea84f3d73c,1 +np.float64,0xf9682007f2d04,0xf9682007f2d04,1 +np.float64,0x3fee54523dbca8a4,0x3ff947b826de81f4,1 +np.float64,0x80461e5d008c4,0x80461e5d008c4,1 +np.float64,0x3fdd6d12d5bada26,0x3fe2ade8dee2fa02,1 +np.float64,0x3fcd5f0dfd3abe18,0x3fd081d6cd25731d,1 +np.float64,0x7fa36475c826c8eb,0x7ff0000000000000,1 +np.float64,0xbfdf3ce052be79c0,0xbfd8b78baccfb908,1 +np.float64,0x7fcd890dd13b121b,0x7ff0000000000000,1 +np.float64,0x8000000000000001,0x8000000000000001,1 +np.float64,0x800ec0f4281d81e8,0x800ec0f4281d81e8,1 +np.float64,0xbfba960116352c00,0xbfb94085424496d9,1 +np.float64,0x3fdddedc9bbbbdb8,0x3fe30853fe4ef5ce,1 +np.float64,0x238092a847013,0x238092a847013,1 +np.float64,0xbfe38d4803271a90,0xbfdd429a955c46af,1 +np.float64,0xbfd4c9067329920c,0xbfd1bf6255ed91a4,1 +np.float64,0xbfbee213923dc428,0xbfbd17ce1bda6088,1 +np.float64,0xffd5a2d337ab45a6,0xbff0000000000000,1 +np.float64,0x7fe21bfcf82437f9,0x7ff0000000000000,1 +np.float64,0x3fe2a2714da544e3,0x3fe949594a74ea25,1 +np.float64,0x800e05cf8ebc0b9f,0x800e05cf8ebc0b9f,1 +np.float64,0x559a1526ab343,0x559a1526ab343,1 +np.float64,0xffe6a1b7906d436e,0xbff0000000000000,1 +np.float64,0xffef27d6253e4fab,0xbff0000000000000,1 +np.float64,0xbfe0f90ab0a1f216,0xbfda5828a1edde48,1 +np.float64,0x9675d2ab2cebb,0x9675d2ab2cebb,1 +np.float64,0xffee0f7eecfc1efd,0xbff0000000000000,1 +np.float64,0x2ec005625d801,0x2ec005625d801,1 +np.float64,0x7fde35ff14bc6bfd,0x7ff0000000000000,1 +np.float64,0xffe03f36d9e07e6d,0xbff0000000000000,1 +np.float64,0x7fe09ff7c4213fef,0x7ff0000000000000,1 +np.float64,0xffeac29dd1b5853b,0xbff0000000000000,1 +np.float64,0x3fb63120aa2c6241,0x3fb72ea3de98a853,1 +np.float64,0xffd079eb84a0f3d8,0xbff0000000000000,1 +np.float64,0xbfd3c2cc75a78598,0xbfd1005996880b3f,1 +np.float64,0x7fb80507ee300a0f,0x7ff0000000000000,1 +np.float64,0xffe8006105f000c1,0xbff0000000000000,1 +np.float64,0x8009138b0ab22716,0x8009138b0ab22716,1 +np.float64,0xbfd6dfb40b2dbf68,0xbfd33b8e4008e3b0,1 +np.float64,0xbfe7c2cf9bef859f,0xbfe0c55c807460df,1 +np.float64,0xbfe75fe4da6ebfca,0xbfe09600256d3b81,1 +np.float64,0xffd662fc73acc5f8,0xbff0000000000000,1 +np.float64,0x20b99dbc41735,0x20b99dbc41735,1 +np.float64,0x3fe10b38ade21671,0x3fe68229a9bbeefc,1 +np.float64,0x3743b99c6e878,0x3743b99c6e878,1 +np.float64,0xff9eb5ed903d6be0,0xbff0000000000000,1 +np.float64,0x3ff0000000000000,0x3ffb7e151628aed3,1 +np.float64,0xffb9e0569e33c0b0,0xbff0000000000000,1 +np.float64,0x7fd39c804fa73900,0x7ff0000000000000,1 +np.float64,0x3fe881ef67f103df,0x3ff269dd704b7129,1 +np.float64,0x1b6eb40236dd7,0x1b6eb40236dd7,1 +np.float64,0xbfe734ea432e69d4,0xbfe0813e6355d02f,1 +np.float64,0xffcf48f3743e91e8,0xbff0000000000000,1 +np.float64,0xffed10bcf6fa2179,0xbff0000000000000,1 +np.float64,0x3fef07723b7e0ee4,0x3ffa3156123f3c15,1 +np.float64,0xffe45c704aa8b8e0,0xbff0000000000000,1 +np.float64,0xb7b818d96f703,0xb7b818d96f703,1 +np.float64,0x42fcc04085f99,0x42fcc04085f99,1 +np.float64,0xbfda7ced01b4f9da,0xbfd5b0ce1e5524ae,1 +np.float64,0xbfe1e5963d63cb2c,0xbfdb6a87b6c09185,1 +np.float64,0x7fdfa18003bf42ff,0x7ff0000000000000,1 +np.float64,0xbfe3790a43e6f214,0xbfdd2c9a38b4f089,1 +np.float64,0xffe0ff5b9ae1feb6,0xbff0000000000000,1 +np.float64,0x80085a7d3110b4fb,0x80085a7d3110b4fb,1 +np.float64,0xffd6bfa6622d7f4c,0xbff0000000000000,1 +np.float64,0xbfef5ddc7cfebbb9,0xbfe3fe170521593e,1 +np.float64,0x3fc21773fa242ee8,0x3fc36ebda1f91a72,1 +np.float64,0x7fc04d98da209b31,0x7ff0000000000000,1 +np.float64,0xbfeba3b535b7476a,0xbfe282602e3c322e,1 +np.float64,0xffd41fb5c1a83f6c,0xbff0000000000000,1 +np.float64,0xf87d206df0fa4,0xf87d206df0fa4,1 +np.float64,0x800060946fc0c12a,0x800060946fc0c12a,1 +np.float64,0x3fe69d5f166d3abe,0x3ff06fdddcf4ca93,1 +np.float64,0x7fe9b5793b336af1,0x7ff0000000000000,1 +np.float64,0x7fe0dd4143e1ba82,0x7ff0000000000000,1 +np.float64,0xbfa8eaea3c31d5d0,0xbfa8522e397da3bd,1 +np.float64,0x119f0078233e1,0x119f0078233e1,1 +np.float64,0xbfd78a207aaf1440,0xbfd3b225bbf2ab4f,1 +np.float64,0xc66a6d4d8cd4e,0xc66a6d4d8cd4e,1 +np.float64,0xe7fc4b57cff8a,0xe7fc4b57cff8a,1 +np.float64,0x800883e8091107d0,0x800883e8091107d0,1 +np.float64,0x3fa6520c842ca419,0x3fa6d06e1041743a,1 +np.float64,0x3fa563182c2ac630,0x3fa5d70e27a84c97,1 +np.float64,0xe6a30b61cd462,0xe6a30b61cd462,1 +np.float64,0x3fee85dac37d0bb6,0x3ff987cfa41a9778,1 +np.float64,0x3fe8f621db71ec44,0x3ff2e7b768a2e9d0,1 +np.float64,0x800f231d861e463b,0x800f231d861e463b,1 +np.float64,0xbfe22eb07c645d61,0xbfdbbdbb853ab4c6,1 +np.float64,0x7fd2dda2dea5bb45,0x7ff0000000000000,1 +np.float64,0xbfd09b79a0a136f4,0xbfcd4147606ffd27,1 +np.float64,0xca039cc394074,0xca039cc394074,1 +np.float64,0x8000000000000000,0x8000000000000000,1 +np.float64,0xcb34575d9668b,0xcb34575d9668b,1 +np.float64,0x3fea62c1f3f4c584,0x3ff47e6dc67ec89f,1 +np.float64,0x7fe544c8606a8990,0x7ff0000000000000,1 +np.float64,0xffe0a980c4615301,0xbff0000000000000,1 +np.float64,0x3fdd67d5f8bacfac,0x3fe2a9c3421830f1,1 +np.float64,0xffe41d3dda283a7b,0xbff0000000000000,1 +np.float64,0xffeed59e5ffdab3c,0xbff0000000000000,1 +np.float64,0xffeeae8326fd5d05,0xbff0000000000000,1 +np.float64,0x800d70b4fa7ae16a,0x800d70b4fa7ae16a,1 +np.float64,0xffec932e6839265c,0xbff0000000000000,1 +np.float64,0xee30b185dc616,0xee30b185dc616,1 +np.float64,0x7fc3cf4397279e86,0x7ff0000000000000,1 +np.float64,0xbfeab34f1875669e,0xbfe21b868229de7d,1 +np.float64,0xf45f5f7de8bec,0xf45f5f7de8bec,1 +np.float64,0x3fad2c4b203a5896,0x3fae0528b568f3cf,1 +np.float64,0xbfe2479543e48f2a,0xbfdbd9e57cf64028,1 +np.float64,0x3fd41a1473283429,0x3fd79df2bc60debb,1 +np.float64,0x3febb5155ef76a2a,0x3ff608585afd698b,1 +np.float64,0xffe21f5303e43ea6,0xbff0000000000000,1 +np.float64,0x7fe9ef390833de71,0x7ff0000000000000,1 +np.float64,0xffe8ee873d71dd0e,0xbff0000000000000,1 +np.float64,0x7fd7cbc55e2f978a,0x7ff0000000000000,1 +np.float64,0x80081f9080d03f21,0x80081f9080d03f21,1 +np.float64,0x7fecbafc8b3975f8,0x7ff0000000000000,1 +np.float64,0x800b6c4b0b16d896,0x800b6c4b0b16d896,1 +np.float64,0xbfaa0fc2d4341f80,0xbfa968cdf32b98ad,1 +np.float64,0x3fec79fe4078f3fc,0x3ff6f5361a4a5d93,1 +np.float64,0xbfb14b79de2296f0,0xbfb0b93b75ecec11,1 +np.float64,0x800009d084c013a2,0x800009d084c013a2,1 +np.float64,0x4a4cdfe29499d,0x4a4cdfe29499d,1 +np.float64,0xbfe721c2d56e4386,0xbfe077f541987d76,1 +np.float64,0x3e5f539e7cbeb,0x3e5f539e7cbeb,1 +np.float64,0x3fd23f044c247e09,0x3fd51ceafcdd64aa,1 +np.float64,0x3fc70785b02e0f0b,0x3fc93b2a37eb342a,1 +np.float64,0xbfe7ab4ec7af569e,0xbfe0ba28eecbf6b0,1 +np.float64,0x800c1d4134583a83,0x800c1d4134583a83,1 +np.float64,0xffd9a73070334e60,0xbff0000000000000,1 +np.float64,0x68a4bf24d1499,0x68a4bf24d1499,1 +np.float64,0x7feba9d9507753b2,0x7ff0000000000000,1 +np.float64,0xbfe9d747db73ae90,0xbfe1bab53d932010,1 +np.float64,0x800a9a4aed953496,0x800a9a4aed953496,1 +np.float64,0xffcb89b0ad371360,0xbff0000000000000,1 +np.float64,0xbfc62388b82c4710,0xbfc4547be442a38c,1 +np.float64,0x800a006d187400db,0x800a006d187400db,1 +np.float64,0x3fcef2fbd33de5f8,0x3fd18177b2150148,1 +np.float64,0x8000b74e3da16e9d,0x8000b74e3da16e9d,1 +np.float64,0x25be536e4b7cb,0x25be536e4b7cb,1 +np.float64,0x3fa86e189430dc31,0x3fa905b4684c9f01,1 +np.float64,0xa7584b114eb0a,0xa7584b114eb0a,1 +np.float64,0x800331133c866227,0x800331133c866227,1 +np.float64,0x3fb52b48142a5690,0x3fb611a6f6e7c664,1 +np.float64,0x3fe825797cf04af2,0x3ff206fd60e98116,1 +np.float64,0x3fd0bec4e5217d8a,0x3fd323db3ffd59b2,1 +np.float64,0x907b43a120f7,0x907b43a120f7,1 +np.float64,0x3fed31eb1d3a63d6,0x3ff7d7a91c6930a4,1 +np.float64,0x7f97a13d782f427a,0x7ff0000000000000,1 +np.float64,0xffc7121a702e2434,0xbff0000000000000,1 +np.float64,0xbfe8bb4cbbf1769a,0xbfe139d7f46f1fb1,1 +np.float64,0xbfe3593cc5a6b27a,0xbfdd09ec91d6cd48,1 +np.float64,0x7fcff218ff9ff,0x7fcff218ff9ff,1 +np.float64,0x3fe73651d4ae6ca4,0x3ff10c5c1d21d127,1 +np.float64,0x80054e396eaa9c74,0x80054e396eaa9c74,1 +np.float64,0x3fe527d5f9aa4fac,0x3fedfb7743db9b53,1 +np.float64,0x7fec6f28c5f8de51,0x7ff0000000000000,1 +np.float64,0x3fcd2bbff53a5780,0x3fd061987416b49b,1 +np.float64,0xffd1f0046423e008,0xbff0000000000000,1 +np.float64,0x80034d97fac69b31,0x80034d97fac69b31,1 +np.float64,0x3faa803f14350080,0x3fab32e3f8073be4,1 +np.float64,0x3fcf8da0163f1b40,0x3fd1e42ba2354c8e,1 +np.float64,0x3fd573c2632ae785,0x3fd97c37609d18d7,1 +np.float64,0x7f922960482452c0,0x7ff0000000000000,1 +np.float64,0x800ebd0c5d3d7a19,0x800ebd0c5d3d7a19,1 +np.float64,0xbfee63b7807cc76f,0xbfe39ec7981035db,1 +np.float64,0xffdc023f8e380480,0xbff0000000000000,1 +np.float64,0x3fe3ffa02c67ff40,0x3febc7f8b900ceba,1 +np.float64,0x36c508b86d8a2,0x36c508b86d8a2,1 +np.float64,0x3fc9fbb0f133f760,0x3fcccee9f6ba801c,1 +np.float64,0x3fd75c1d5faeb83b,0x3fdc3150f9eff99e,1 +np.float64,0x3fe9a8d907b351b2,0x3ff3accc78a31df8,1 +np.float64,0x3fdd8fdcafbb1fb8,0x3fe2c97c97757994,1 +np.float64,0x3fb10c34ca22186a,0x3fb1a0cc42c76b86,1 +np.float64,0xbff0000000000000,0xbfe43a54e4e98864,1 +np.float64,0xffd046aefda08d5e,0xbff0000000000000,1 +np.float64,0x80067989758cf314,0x80067989758cf314,1 +np.float64,0x3fee9d77763d3aef,0x3ff9a67ff0841ba5,1 +np.float64,0xffe4d3cbf8e9a798,0xbff0000000000000,1 +np.float64,0x800f9cab273f3956,0x800f9cab273f3956,1 +np.float64,0x800a5c84f9f4b90a,0x800a5c84f9f4b90a,1 +np.float64,0x4fd377009fa8,0x4fd377009fa8,1 +np.float64,0xbfe7ba26af6f744e,0xbfe0c13ce45d6f95,1 +np.float64,0x609c8a86c1392,0x609c8a86c1392,1 +np.float64,0x7fe4d0296ea9a052,0x7ff0000000000000,1 +np.float64,0x59847bccb3090,0x59847bccb3090,1 +np.float64,0xbfdf944157bf2882,0xbfd8ed092bacad43,1 +np.float64,0xbfe7560a632eac15,0xbfe091405ec34973,1 +np.float64,0x3fea0699f4340d34,0x3ff415eb72089230,1 +np.float64,0x800a5533f374aa68,0x800a5533f374aa68,1 +np.float64,0xbf8e8cdb103d19c0,0xbf8e52cffcb83774,1 +np.float64,0x3fe87d9e52f0fb3d,0x3ff2653952344b81,1 +np.float64,0x7fca3950f73472a1,0x7ff0000000000000,1 +np.float64,0xffd5d1068aaba20e,0xbff0000000000000,1 +np.float64,0x3fd1a5f169a34be4,0x3fd4524b6ef17f91,1 +np.float64,0x3fdc4b95a8b8972c,0x3fe1caafd8652bf7,1 +np.float64,0x3fe333f65a6667ed,0x3fea502fb1f8a578,1 +np.float64,0xbfc117aaac222f54,0xbfc00018a4b84b6e,1 +np.float64,0x7fecf2efdf39e5df,0x7ff0000000000000,1 +np.float64,0x4e99d83e9d33c,0x4e99d83e9d33c,1 +np.float64,0x800d18937bda3127,0x800d18937bda3127,1 +np.float64,0x3fd6c67778ad8cef,0x3fdb5aba70a3ea9e,1 +np.float64,0x3fdbb71770b76e2f,0x3fe157ae8da20bc5,1 +np.float64,0xbfe9faf6ebf3f5ee,0xbfe1ca963d83f17f,1 +np.float64,0x80038850ac0710a2,0x80038850ac0710a2,1 +np.float64,0x8006beb72f8d7d6f,0x8006beb72f8d7d6f,1 +np.float64,0x3feead67bffd5acf,0x3ff9bb43e8b15e2f,1 +np.float64,0xbfd1174b89222e98,0xbfcdff9972799907,1 +np.float64,0x7fee2c077cfc580e,0x7ff0000000000000,1 +np.float64,0xbfbdbd904e3b7b20,0xbfbc13f4916ed466,1 +np.float64,0xffee47b8fe3c8f71,0xbff0000000000000,1 +np.float64,0xffd161884222c310,0xbff0000000000000,1 +np.float64,0xbfd42f27c4a85e50,0xbfd14fa8d67ba5ee,1 +np.float64,0x7fefffffffffffff,0x7ff0000000000000,1 +np.float64,0x8008151791b02a30,0x8008151791b02a30,1 +np.float64,0xbfba79029234f208,0xbfb926616cf41755,1 +np.float64,0x8004c486be29890e,0x8004c486be29890e,1 +np.float64,0x7fe5325a252a64b3,0x7ff0000000000000,1 +np.float64,0x5a880f04b5103,0x5a880f04b5103,1 +np.float64,0xbfe6f4b7702de96f,0xbfe06209002dd72c,1 +np.float64,0xbfdf8b3739bf166e,0xbfd8e783efe3c30f,1 +np.float64,0xbfe32571c8e64ae4,0xbfdcd128b9aa49a1,1 +np.float64,0xbfe97c98c172f932,0xbfe1920ac0fc040f,1 +np.float64,0x3fd0b513a2a16a28,0x3fd31744e3a1bf0a,1 +np.float64,0xffe3ab70832756e0,0xbff0000000000000,1 +np.float64,0x80030f055ce61e0b,0x80030f055ce61e0b,1 +np.float64,0xffd5f3b21b2be764,0xbff0000000000000,1 +np.float64,0x800c1f2d6c783e5b,0x800c1f2d6c783e5b,1 +np.float64,0x80075f4f148ebe9f,0x80075f4f148ebe9f,1 +np.float64,0xbfa5a046f42b4090,0xbfa52cfbf8992256,1 +np.float64,0xffd6702583ace04c,0xbff0000000000000,1 +np.float64,0x800dc0a5cf1b814c,0x800dc0a5cf1b814c,1 +np.float64,0x14f2203a29e45,0x14f2203a29e45,1 +np.float64,0x800421a40ee84349,0x800421a40ee84349,1 +np.float64,0xbfea7c279df4f84f,0xbfe2037fff3ed877,1 +np.float64,0xbfe9b41ddcf3683c,0xbfe1aafe18a44bf8,1 +np.float64,0xffe7b037022f606e,0xbff0000000000000,1 +np.float64,0x800bafb648775f6d,0x800bafb648775f6d,1 +np.float64,0x800b81681d5702d1,0x800b81681d5702d1,1 +np.float64,0x3fe29f8dc8653f1c,0x3fe9442da1c32c6b,1 +np.float64,0xffef9a05dc7f340b,0xbff0000000000000,1 +np.float64,0x800c8c65a65918cb,0x800c8c65a65918cb,1 +np.float64,0xffe99df0d5f33be1,0xbff0000000000000,1 +np.float64,0x9afeb22535fd7,0x9afeb22535fd7,1 +np.float64,0x7fc620dd822c41ba,0x7ff0000000000000,1 +np.float64,0x29c2cdf25385b,0x29c2cdf25385b,1 +np.float64,0x2d92284e5b246,0x2d92284e5b246,1 +np.float64,0xffc794aa942f2954,0xbff0000000000000,1 +np.float64,0xbfe7ed907eafdb21,0xbfe0d9a7b1442497,1 +np.float64,0xbfd4e0d4aea9c1aa,0xbfd1d09366dba2a7,1 +np.float64,0xa70412c34e083,0xa70412c34e083,1 +np.float64,0x41dc0ee083b9,0x41dc0ee083b9,1 +np.float64,0x8000ece20da1d9c5,0x8000ece20da1d9c5,1 +np.float64,0x3fdf3dae103e7b5c,0x3fe42314bf826bc5,1 +np.float64,0x3fe972533c72e4a6,0x3ff3703761e70f04,1 +np.float64,0xffba1d2b82343a58,0xbff0000000000000,1 +np.float64,0xe0086c83c010e,0xe0086c83c010e,1 +np.float64,0x3fe6fb0dde6df61c,0x3ff0cf5fae01aa08,1 +np.float64,0x3fcfaf057e3f5e0b,0x3fd1f98c1fd20139,1 +np.float64,0xbfdca19d9239433c,0xbfd7158745192ca9,1 +np.float64,0xffb17f394e22fe70,0xbff0000000000000,1 +np.float64,0x7fe40f05c7681e0b,0x7ff0000000000000,1 +np.float64,0x800b3c575d5678af,0x800b3c575d5678af,1 +np.float64,0x7fa4ab20ac295640,0x7ff0000000000000,1 +np.float64,0xbfd2fff4f6a5ffea,0xbfd07069bb50e1a6,1 +np.float64,0xbfef81b9147f0372,0xbfe40b845a749787,1 +np.float64,0x7fd7400e54ae801c,0x7ff0000000000000,1 +np.float64,0x3fd4401a17a88034,0x3fd7d20fb76a4f3d,1 +np.float64,0xbfd3e907fd27d210,0xbfd11c64b7577fc5,1 +np.float64,0x7fe34bed9ae697da,0x7ff0000000000000,1 +np.float64,0x80039119c0472234,0x80039119c0472234,1 +np.float64,0xbfe2e36ac565c6d6,0xbfdc88454ee997b3,1 +np.float64,0xbfec57204478ae40,0xbfe2cd3183de1d2d,1 +np.float64,0x7fed7e2a12fafc53,0x7ff0000000000000,1 +np.float64,0x7fd5c5fa7d2b8bf4,0x7ff0000000000000,1 +np.float64,0x3fdcf368d6b9e6d0,0x3fe24decce1ebd35,1 +np.float64,0xbfe0ebfcf2e1d7fa,0xbfda48c9247ae8cf,1 +np.float64,0xbfe10dbea2e21b7e,0xbfda707d68b59674,1 +np.float64,0xbfdf201b6ebe4036,0xbfd8a5df27742fdf,1 +np.float64,0xffe16555be62caab,0xbff0000000000000,1 +np.float64,0xffc23a5db22474bc,0xbff0000000000000,1 +np.float64,0xffe1cbb3f8a39768,0xbff0000000000000,1 +np.float64,0x8007b823be0f7048,0x8007b823be0f7048,1 +np.float64,0xbfa5d1f3042ba3e0,0xbfa55c97cd77bf6e,1 +np.float64,0xbfe316a074662d41,0xbfdcc0da4e7334d0,1 +np.float64,0xbfdfab2bf2bf5658,0xbfd8fb046b88b51f,1 +np.float64,0xfacc9dabf5994,0xfacc9dabf5994,1 +np.float64,0xffe7e420a4efc841,0xbff0000000000000,1 +np.float64,0x800bb986cd57730e,0x800bb986cd57730e,1 +np.float64,0xbfe314fa38e629f4,0xbfdcbf09302c3bf5,1 +np.float64,0x7fc56b17772ad62e,0x7ff0000000000000,1 +np.float64,0x8006a87d54ad50fb,0x8006a87d54ad50fb,1 +np.float64,0xbfe6633e4a6cc67c,0xbfe01a67c3b3ff32,1 +np.float64,0x3fe0ff56eb21feae,0x3fe66df01defb0fb,1 +np.float64,0xffc369cfc126d3a0,0xbff0000000000000,1 +np.float64,0x7fe8775d9a30eeba,0x7ff0000000000000,1 +np.float64,0x3fb53db13e2a7b60,0x3fb625a7279cdac3,1 +np.float64,0xffee76e7e6fcedcf,0xbff0000000000000,1 +np.float64,0xb45595b568ab3,0xb45595b568ab3,1 +np.float64,0xffa09a1d50213440,0xbff0000000000000,1 +np.float64,0x7d11dc16fa23c,0x7d11dc16fa23c,1 +np.float64,0x7fd4cc2928299851,0x7ff0000000000000,1 +np.float64,0x6a30e0ead461d,0x6a30e0ead461d,1 +np.float64,0x7fd3ee735a27dce6,0x7ff0000000000000,1 +np.float64,0x8008d7084b31ae11,0x8008d7084b31ae11,1 +np.float64,0x3fe469353fe8d26a,0x3fec8e7e2df38590,1 +np.float64,0x3fcecef2743d9de5,0x3fd16a888b715dfd,1 +np.float64,0x460130d68c027,0x460130d68c027,1 +np.float64,0xbfd76510c62eca22,0xbfd398766b741d6e,1 +np.float64,0x800ec88c2a5d9118,0x800ec88c2a5d9118,1 +np.float64,0x3fac969c6c392d40,0x3fad66ca6a1e583c,1 +np.float64,0x3fe5c616bf6b8c2e,0x3fef30f931e8dde5,1 +np.float64,0xb4cb6cd56996e,0xb4cb6cd56996e,1 +np.float64,0xffc3eacf8827d5a0,0xbff0000000000000,1 +np.float64,0x3fe1ceaf60e39d5f,0x3fe7d31e0a627cf9,1 +np.float64,0xffea69b42ff4d368,0xbff0000000000000,1 +np.float64,0x800ff8aef99ff15e,0x800ff8aef99ff15e,1 +np.float64,0x6c3953f0d872b,0x6c3953f0d872b,1 +np.float64,0x8007ca5a0d0f94b5,0x8007ca5a0d0f94b5,1 +np.float64,0x800993ce3ad3279d,0x800993ce3ad3279d,1 +np.float64,0x3fe5a4d1516b49a2,0x3feeef67b22ac65b,1 +np.float64,0x8003d7512a67aea3,0x8003d7512a67aea3,1 +np.float64,0x33864430670c9,0x33864430670c9,1 +np.float64,0xbfdbf477e3b7e8f0,0xbfd6a63f1b36f424,1 +np.float64,0x3fb5da92582bb525,0x3fb6d04ef1a1d31a,1 +np.float64,0xe38aae71c7156,0xe38aae71c7156,1 +np.float64,0x3fcaf5590a35eab2,0x3fce01ed6eb6188e,1 +np.float64,0x800deba9b05bd754,0x800deba9b05bd754,1 +np.float64,0x7fee0cde287c19bb,0x7ff0000000000000,1 +np.float64,0xbfe0c2ae70e1855d,0xbfda17fa64d84fcf,1 +np.float64,0x518618faa30c4,0x518618faa30c4,1 +np.float64,0xbfeb4c49b8769894,0xbfe25d52cd7e529f,1 +np.float64,0xbfeb3aa21b367544,0xbfe255cae1df4cfd,1 +np.float64,0xffd23f1c5d247e38,0xbff0000000000000,1 +np.float64,0xff9a75132034ea20,0xbff0000000000000,1 +np.float64,0xbfef9d96307f3b2c,0xbfe415e8b6ce0e50,1 +np.float64,0x8004046f2f0808df,0x8004046f2f0808df,1 +np.float64,0x3fe15871aea2b0e3,0x3fe706532ea5c770,1 +np.float64,0x7fd86b1576b0d62a,0x7ff0000000000000,1 +np.float64,0xbfc240a5c724814c,0xbfc102c7971ca455,1 +np.float64,0xffd8ea670bb1d4ce,0xbff0000000000000,1 +np.float64,0xbfeb1ddd1ff63bba,0xbfe2497c4e27bb8e,1 +np.float64,0x3fcd47e0a33a8fc1,0x3fd0734444150d83,1 +np.float64,0xe00b6a65c016e,0xe00b6a65c016e,1 +np.float64,0xbfc7d582142fab04,0xbfc5bf1fbe755a4c,1 +np.float64,0x8cc91ca11993,0x8cc91ca11993,1 +np.float64,0x7fdbc530e3b78a61,0x7ff0000000000000,1 +np.float64,0x7fee437522bc86e9,0x7ff0000000000000,1 +np.float64,0xffe9e09ae2b3c135,0xbff0000000000000,1 +np.float64,0x8002841cada5083a,0x8002841cada5083a,1 +np.float64,0x3fd6b485f8ad690c,0x3fdb412135932699,1 +np.float64,0x80070e8d0b0e1d1b,0x80070e8d0b0e1d1b,1 +np.float64,0x7fed5df165babbe2,0x7ff0000000000000,1 +np.float64,0x7ff4000000000000,0x7ffc000000000000,1 +np.float64,0x7fe99d08cd333a11,0x7ff0000000000000,1 +np.float64,0xdfff4201bfff,0xdfff4201bfff,1 +np.float64,0x800ccf7aaf999ef6,0x800ccf7aaf999ef6,1 +np.float64,0x3fddb05aad3b60b5,0x3fe2e34bdd1dd9d5,1 +np.float64,0xbfe5e1c60e6bc38c,0xbfdfb3275cc1675f,1 +np.float64,0x8004fe674269fccf,0x8004fe674269fccf,1 +np.float64,0x7fe9280363325006,0x7ff0000000000000,1 +np.float64,0xf605b9f1ec0b7,0xf605b9f1ec0b7,1 +np.float64,0x800c7c214018f843,0x800c7c214018f843,1 +np.float64,0x7fd97eb6b9b2fd6c,0x7ff0000000000000,1 +np.float64,0x7fd03f8fb6207f1e,0x7ff0000000000000,1 +np.float64,0x7fc526b64d2a4d6c,0x7ff0000000000000,1 +np.float64,0xbfef1a7c42fe34f9,0xbfe3e4b4399e0fcf,1 +np.float64,0xffdde10a2fbbc214,0xbff0000000000000,1 +np.float64,0xbfdd274f72ba4e9e,0xbfd76aa73788863c,1 +np.float64,0xbfecf7f77af9efef,0xbfe30ee2ae03fed1,1 +np.float64,0xffde709322bce126,0xbff0000000000000,1 +np.float64,0x268b5dac4d16d,0x268b5dac4d16d,1 +np.float64,0x8005c099606b8134,0x8005c099606b8134,1 +np.float64,0xffcf54c1593ea984,0xbff0000000000000,1 +np.float64,0xbfee9b8ebabd371d,0xbfe3b44f2663139d,1 +np.float64,0x3faf0330643e0661,0x3faff88fab74b447,1 +np.float64,0x7fe1c6011be38c01,0x7ff0000000000000,1 +np.float64,0xbfe9d58053b3ab01,0xbfe1b9ea12242485,1 +np.float64,0xbfe15a80fee2b502,0xbfdaca2aa7d1231a,1 +np.float64,0x7fe0d766d8a1aecd,0x7ff0000000000000,1 +np.float64,0x800f65e6a21ecbcd,0x800f65e6a21ecbcd,1 +np.float64,0x7fc85e45a530bc8a,0x7ff0000000000000,1 +np.float64,0x3fcc240e5438481d,0x3fcf7954fc080ac3,1 +np.float64,0xffddd49da2bba93c,0xbff0000000000000,1 +np.float64,0x1376f36c26edf,0x1376f36c26edf,1 +np.float64,0x3feffb7af17ff6f6,0x3ffb77f0ead2f881,1 +np.float64,0x3fd9354ea9b26a9d,0x3fdee4e4c8db8239,1 +np.float64,0xffdf7beed4bef7de,0xbff0000000000000,1 +np.float64,0xbfdef256ecbde4ae,0xbfd889b0e213a019,1 +np.float64,0x800d78bd1e7af17a,0x800d78bd1e7af17a,1 +np.float64,0xb66d66276cdad,0xb66d66276cdad,1 +np.float64,0x7fd8f51138b1ea21,0x7ff0000000000000,1 +np.float64,0xffe8c9c302b19385,0xbff0000000000000,1 +np.float64,0x8000be4cf5417c9b,0x8000be4cf5417c9b,1 +np.float64,0xbfe2293a25645274,0xbfdbb78a8c547c68,1 +np.float64,0xce8392c19d08,0xce8392c19d08,1 +np.float64,0xbfe075736b60eae7,0xbfd9bc0f6e34a283,1 +np.float64,0xbfe8d6fe6a71adfd,0xbfe1469ba80b4915,1 +np.float64,0xffe0c7993fa18f32,0xbff0000000000000,1 +np.float64,0x3fce5210fd3ca422,0x3fd11b40a1270a95,1 +np.float64,0x6c0534a8d80a7,0x6c0534a8d80a7,1 +np.float64,0x23c1823647831,0x23c1823647831,1 +np.float64,0x3fc901253732024a,0x3fcb9d264accb07c,1 +np.float64,0x3fe42b8997685714,0x3fec1a39e207b6e4,1 +np.float64,0x3fec4fd00fb89fa0,0x3ff6c1fdd0c262c8,1 +np.float64,0x8007b333caaf6668,0x8007b333caaf6668,1 +np.float64,0x800f9275141f24ea,0x800f9275141f24ea,1 +np.float64,0xffbba361a23746c0,0xbff0000000000000,1 +np.float64,0xbfee4effa9fc9dff,0xbfe396c11d0cd524,1 +np.float64,0x3e47e84c7c8fe,0x3e47e84c7c8fe,1 +np.float64,0x3fe80eb7b1301d6f,0x3ff1eed318a00153,1 +np.float64,0x7fd3f4c5b4a7e98a,0x7ff0000000000000,1 +np.float64,0x158abab02b158,0x158abab02b158,1 +np.float64,0x1,0x1,1 +np.float64,0x1f1797883e2f4,0x1f1797883e2f4,1 +np.float64,0x3feec055d03d80ac,0x3ff9d3fb0394de33,1 +np.float64,0x8010000000000000,0x8010000000000000,1 +np.float64,0xbfd070860ea0e10c,0xbfccfeec2828efef,1 +np.float64,0x80015c8b3e82b917,0x80015c8b3e82b917,1 +np.float64,0xffef9956d9ff32ad,0xbff0000000000000,1 +np.float64,0x7fe7f087dd2fe10f,0x7ff0000000000000,1 +np.float64,0x8002e7718665cee4,0x8002e7718665cee4,1 +np.float64,0x3fdfb9adb2bf735c,0x3fe4887a86214c1e,1 +np.float64,0xffc7747dfb2ee8fc,0xbff0000000000000,1 +np.float64,0x3fec309bb5386137,0x3ff69c44e1738547,1 +np.float64,0xffdbe2bf9ab7c580,0xbff0000000000000,1 +np.float64,0xbfe6a274daed44ea,0xbfe039aff2be9d48,1 +np.float64,0x7fd5a4e4efab49c9,0x7ff0000000000000,1 +np.float64,0xffbe6aaeb03cd560,0xbff0000000000000,1 diff --git a/local_packages/numpy/_core/tests/data/umath-validation-set-log.csv b/local_packages/numpy/_core/tests/data/umath-validation-set-log.csv new file mode 100644 index 0000000..b8f6b08 --- /dev/null +++ b/local_packages/numpy/_core/tests/data/umath-validation-set-log.csv @@ -0,0 +1,271 @@ +dtype,input,output,ulperrortol +## +ve denormals ## +np.float32,0x004b4716,0xc2afbc1b,4 +np.float32,0x007b2490,0xc2aec01e,4 +np.float32,0x007c99fa,0xc2aeba17,4 +np.float32,0x00734a0c,0xc2aee1dc,4 +np.float32,0x0070de24,0xc2aeecba,4 +np.float32,0x007fffff,0xc2aeac50,4 +np.float32,0x00000001,0xc2ce8ed0,4 +## -ve denormals ## +np.float32,0x80495d65,0xffc00000,4 +np.float32,0x806894f6,0xffc00000,4 +np.float32,0x80555a76,0xffc00000,4 +np.float32,0x804e1fb8,0xffc00000,4 +np.float32,0x80687de9,0xffc00000,4 +np.float32,0x807fffff,0xffc00000,4 +np.float32,0x80000001,0xffc00000,4 +## +/-0.0f, +/-FLT_MIN +/-FLT_MAX ## +np.float32,0x00000000,0xff800000,4 +np.float32,0x80000000,0xff800000,4 +np.float32,0x7f7fffff,0x42b17218,4 +np.float32,0x80800000,0xffc00000,4 +np.float32,0xff7fffff,0xffc00000,4 +## 1.00f + 0x00000001 ## +np.float32,0x3f800000,0x00000000,4 +np.float32,0x3f800001,0x33ffffff,4 +np.float32,0x3f800002,0x347ffffe,4 +np.float32,0x3f7fffff,0xb3800000,4 +np.float32,0x3f7ffffe,0xb4000000,4 +np.float32,0x3f7ffffd,0xb4400001,4 +np.float32,0x402df853,0x3f7ffffe,4 +np.float32,0x402df854,0x3f7fffff,4 +np.float32,0x402df855,0x3f800000,4 +np.float32,0x402df856,0x3f800001,4 +np.float32,0x3ebc5ab0,0xbf800001,4 +np.float32,0x3ebc5ab1,0xbf800000,4 +np.float32,0x3ebc5ab2,0xbf800000,4 +np.float32,0x3ebc5ab3,0xbf7ffffe,4 +np.float32,0x423ef575,0x407768ab,4 +np.float32,0x427b8c61,0x408485dd,4 +np.float32,0x4211e9ee,0x406630b0,4 +np.float32,0x424d5c41,0x407c0fed,4 +np.float32,0x42be722a,0x4091cc91,4 +np.float32,0x42b73d30,0x4090908b,4 +np.float32,0x427e48e2,0x4084de7f,4 +np.float32,0x428f759b,0x4088bba3,4 +np.float32,0x41629069,0x4029a0cc,4 +np.float32,0x4272c99d,0x40836379,4 +np.float32,0x4d1b7458,0x4197463d,4 +np.float32,0x4f10c594,0x41ace2b2,4 +np.float32,0x4ea397c2,0x41a85171,4 +np.float32,0x4fefa9d1,0x41b6769c,4 +np.float32,0x4ebac6ab,0x41a960dc,4 +np.float32,0x4f6efb42,0x41b0e535,4 +np.float32,0x4e9ab8e7,0x41a7df44,4 +np.float32,0x4e81b5d1,0x41a67625,4 +np.float32,0x5014d9f2,0x41b832bd,4 +np.float32,0x4f02175c,0x41ac07b8,4 +np.float32,0x7f034f89,0x42b01c47,4 +np.float32,0x7f56d00e,0x42b11849,4 +np.float32,0x7f1cd5f6,0x42b0773a,4 +np.float32,0x7e979174,0x42af02d7,4 +np.float32,0x7f23369f,0x42b08ba2,4 +np.float32,0x7f0637ae,0x42b0277d,4 +np.float32,0x7efcb6e8,0x42b00897,4 +np.float32,0x7f7907c8,0x42b163f6,4 +np.float32,0x7e95c4c2,0x42aefcba,4 +np.float32,0x7f4577b2,0x42b0ed2d,4 +np.float32,0x3f49c92e,0xbe73ae84,4 +np.float32,0x3f4a23d1,0xbe71e2f8,4 +np.float32,0x3f4abb67,0xbe6ee430,4 +np.float32,0x3f48169a,0xbe7c5532,4 +np.float32,0x3f47f5fa,0xbe7cfc37,4 +np.float32,0x3f488309,0xbe7a2ad8,4 +np.float32,0x3f479df4,0xbe7ebf5f,4 +np.float32,0x3f47cfff,0xbe7dbec9,4 +np.float32,0x3f496704,0xbe75a125,4 +np.float32,0x3f478ee8,0xbe7f0c92,4 +np.float32,0x3f4a763b,0xbe7041ce,4 +np.float32,0x3f47a108,0xbe7eaf94,4 +np.float32,0x3f48136c,0xbe7c6578,4 +np.float32,0x3f481c17,0xbe7c391c,4 +np.float32,0x3f47cd28,0xbe7dcd56,4 +np.float32,0x3f478be8,0xbe7f1bf7,4 +np.float32,0x3f4c1f8e,0xbe67e367,4 +np.float32,0x3f489b0c,0xbe79b03f,4 +np.float32,0x3f4934cf,0xbe76a08a,4 +np.float32,0x3f4954df,0xbe75fd6a,4 +np.float32,0x3f47a3f5,0xbe7ea093,4 +np.float32,0x3f4ba4fc,0xbe6a4b02,4 +np.float32,0x3f47a0e1,0xbe7eb05c,4 +np.float32,0x3f48c30a,0xbe78e42f,4 +np.float32,0x3f48cab8,0xbe78bd05,4 +np.float32,0x3f4b0569,0xbe6d6ea4,4 +np.float32,0x3f47de32,0xbe7d7607,4 +np.float32,0x3f477328,0xbe7f9b00,4 +np.float32,0x3f496dab,0xbe757f52,4 +np.float32,0x3f47662c,0xbe7fddac,4 +np.float32,0x3f48ddd8,0xbe785b80,4 +np.float32,0x3f481866,0xbe7c4bff,4 +np.float32,0x3f48b119,0xbe793fb6,4 +np.float32,0x3f48c7e8,0xbe78cb5c,4 +np.float32,0x3f4985f6,0xbe7503da,4 +np.float32,0x3f483fdf,0xbe7b8212,4 +np.float32,0x3f4b1c76,0xbe6cfa67,4 +np.float32,0x3f480b2e,0xbe7c8fa8,4 +np.float32,0x3f48745f,0xbe7a75bf,4 +np.float32,0x3f485bda,0xbe7af308,4 +np.float32,0x3f47a660,0xbe7e942c,4 +np.float32,0x3f47d4d5,0xbe7da600,4 +np.float32,0x3f4b0a26,0xbe6d56be,4 +np.float32,0x3f4a4883,0xbe712924,4 +np.float32,0x3f4769e7,0xbe7fca84,4 +np.float32,0x3f499702,0xbe74ad3f,4 +np.float32,0x3f494ab1,0xbe763131,4 +np.float32,0x3f476b69,0xbe7fc2c6,4 +np.float32,0x3f4884e8,0xbe7a214a,4 +np.float32,0x3f486945,0xbe7aae76,4 +#float64 +## +ve denormal ## +np.float64,0x0000000000000001,0xc0874385446d71c3,1 +np.float64,0x0001000000000000,0xc086395a2079b70c,1 +np.float64,0x000fffffffffffff,0xc086232bdd7abcd2,1 +np.float64,0x0007ad63e2168cb6,0xc086290bc0b2980f,1 +## -ve denormal ## +np.float64,0x8000000000000001,0xfff8000000000001,1 +np.float64,0x8001000000000000,0xfff8000000000001,1 +np.float64,0x800fffffffffffff,0xfff8000000000001,1 +np.float64,0x8007ad63e2168cb6,0xfff8000000000001,1 +## +/-0.0f, MAX, MIN## +np.float64,0x0000000000000000,0xfff0000000000000,1 +np.float64,0x8000000000000000,0xfff0000000000000,1 +np.float64,0x7fefffffffffffff,0x40862e42fefa39ef,1 +np.float64,0xffefffffffffffff,0xfff8000000000001,1 +## near 1.0f ## +np.float64,0x3ff0000000000000,0x0000000000000000,1 +np.float64,0x3fe8000000000000,0xbfd269621134db92,1 +np.float64,0x3ff0000000000001,0x3cafffffffffffff,1 +np.float64,0x3ff0000020000000,0x3e7fffffe000002b,1 +np.float64,0x3ff0000000000001,0x3cafffffffffffff,1 +np.float64,0x3fefffffe0000000,0xbe70000008000005,1 +np.float64,0x3fefffffffffffff,0xbca0000000000000,1 +## random numbers ## +np.float64,0x02500186f3d9da56,0xc0855b8abf135773,1 +np.float64,0x09200815a3951173,0xc082ff1ad7131bdc,1 +np.float64,0x0da029623b0243d4,0xc0816fc994695bb5,1 +np.float64,0x48703b8ac483a382,0x40579213a313490b,1 +np.float64,0x09207b74c87c9860,0xc082fee20ff349ef,1 +np.float64,0x62c077698e8df947,0x407821c996d110f0,1 +np.float64,0x2350b45e87c3cfb0,0xc073d6b16b51d072,1 +np.float64,0x3990a23f9ff2b623,0xc051aa60eadd8c61,1 +np.float64,0x0d011386a116c348,0xc081a6cc7ea3b8fb,1 +np.float64,0x1fe0f0303ebe273a,0xc0763870b78a81ca,1 +np.float64,0x0cd1260121d387da,0xc081b7668d61a9d1,1 +np.float64,0x1e6135a8f581d422,0xc077425ac10f08c2,1 +np.float64,0x622168db5fe52d30,0x4077b3c669b9fadb,1 +np.float64,0x69f188e1ec6d1718,0x407d1e2f18c63889,1 +np.float64,0x3aa1bf1d9c4dd1a3,0xc04d682e24bde479,1 +np.float64,0x6c81c4011ce4f683,0x407ee5190e8a8e6a,1 +np.float64,0x2191fa55aa5a5095,0xc0750c0c318b5e2d,1 +np.float64,0x32a1f602a32bf360,0xc06270caa493fc17,1 +np.float64,0x16023c90ba93249b,0xc07d0f88e0801638,1 +np.float64,0x1c525fe6d71fa9ff,0xc078af49c66a5d63,1 +np.float64,0x1a927675815d65b7,0xc079e5bdd7fe376e,1 +np.float64,0x41227b8fe70da028,0x402aa0c9f9a84c71,1 +np.float64,0x4962bb6e853fe87d,0x405a34aa04c83747,1 +np.float64,0x23d2cda00b26b5a4,0xc0737c13a06d00ea,1 +np.float64,0x2d13083fd62987fa,0xc06a25055aeb474e,1 +np.float64,0x10e31e4c9b4579a1,0xc0804e181929418e,1 +np.float64,0x26d3247d556a86a9,0xc0716774171da7e8,1 +np.float64,0x6603379398d0d4ac,0x407a64f51f8a887b,1 +np.float64,0x02d38af17d9442ba,0xc0852d955ac9dd68,1 +np.float64,0x6a2382b4818dd967,0x407d4129d688e5d4,1 +np.float64,0x2ee3c403c79b3934,0xc067a091fefaf8b6,1 +np.float64,0x6493a699acdbf1a4,0x4079663c8602bfc5,1 +np.float64,0x1c8413c4f0de3100,0xc0788c99697059b6,1 +np.float64,0x4573f1ed350d9622,0x404e9bd1e4c08920,1 +np.float64,0x2f34265c9200b69c,0xc067310cfea4e986,1 +np.float64,0x19b43e65fa22029b,0xc07a7f8877de22d6,1 +np.float64,0x0af48ab7925ed6bc,0xc0825c4fbc0e5ade,1 +np.float64,0x4fa49699cad82542,0x4065c76d2a318235,1 +np.float64,0x7204a15e56ade492,0x40815bb87484dffb,1 +np.float64,0x4734aa08a230982d,0x40542a4bf7a361a9,1 +np.float64,0x1ae4ed296c2fd749,0xc079ac4921f20abb,1 +np.float64,0x472514ea4370289c,0x4053ff372bd8f18f,1 +np.float64,0x53a54b3f73820430,0x406b5411fc5f2e33,1 +np.float64,0x64754de5a15684fa,0x407951592e99a5ab,1 +np.float64,0x69358e279868a7c3,0x407c9c671a882c31,1 +np.float64,0x284579ec61215945,0xc0706688e55f0927,1 +np.float64,0x68b5c58806447adc,0x407c43d6f4eff760,1 +np.float64,0x1945a83f98b0e65d,0xc07acc15eeb032cc,1 +np.float64,0x0fc5eb98a16578bf,0xc080b0d02eddca0e,1 +np.float64,0x6a75e208f5784250,0x407d7a7383bf8f05,1 +np.float64,0x0fe63a029c47645d,0xc080a59ca1e98866,1 +np.float64,0x37963ac53f065510,0xc057236281f7bdb6,1 +np.float64,0x135661bb07067ff7,0xc07ee924930c21e4,1 +np.float64,0x4b4699469d458422,0x405f73843756e887,1 +np.float64,0x1a66d73e4bf4881b,0xc07a039ba1c63adf,1 +np.float64,0x12a6b9b119a7da59,0xc07f62e49c6431f3,1 +np.float64,0x24c719aa8fd1bdb5,0xc072d26da4bf84d3,1 +np.float64,0x0fa6ff524ffef314,0xc080bb8514662e77,1 +np.float64,0x1db751d66fdd4a9a,0xc077b77cb50d7c92,1 +np.float64,0x4947374c516da82c,0x4059e9acfc7105bf,1 +np.float64,0x1b1771ab98f3afc8,0xc07989326b8e1f66,1 +np.float64,0x25e78805baac8070,0xc0720a818e6ef080,1 +np.float64,0x4bd7a148225d3687,0x406082d004ea3ee7,1 +np.float64,0x53d7d6b2bbbda00a,0x406b9a398967cbd5,1 +np.float64,0x6997fb9f4e1c685f,0x407ce0a703413eba,1 +np.float64,0x069802c2ff71b951,0xc083df39bf7acddc,1 +np.float64,0x4d683ac9890f66d8,0x4062ae21d8c2acf0,1 +np.float64,0x5a2825863ec14f4c,0x40722d718d549552,1 +np.float64,0x0398799a88f4db80,0xc084e93dab8e2158,1 +np.float64,0x5ed87a8b77e135a5,0x40756d7051777b33,1 +np.float64,0x5828cd6d79b9bede,0x4070cafb22fc6ca1,1 +np.float64,0x7b18ba2a5ec6f068,0x408481386b3ed6fe,1 +np.float64,0x4938fd60922198fe,0x4059c206b762ea7e,1 +np.float64,0x31b8f44fcdd1a46e,0xc063b2faa8b6434e,1 +np.float64,0x5729341c0d918464,0x407019cac0c4a7d7,1 +np.float64,0x13595e9228ee878e,0xc07ee7235a7d8088,1 +np.float64,0x17698b0dc9dd4135,0xc07c1627e3a5ad5f,1 +np.float64,0x63b977c283abb0cc,0x4078cf1ec6ed65be,1 +np.float64,0x7349cc0d4dc16943,0x4081cc697ce4cb53,1 +np.float64,0x4e49a80b732fb28d,0x4063e67e3c5cbe90,1 +np.float64,0x07ba14b848a8ae02,0xc0837ac032a094e0,1 +np.float64,0x3da9f17b691bfddc,0xc03929c25366acda,1 +np.float64,0x02ea39aa6c3ac007,0xc08525af6f21e1c4,1 +np.float64,0x3a6a42f04ed9563d,0xc04e98e825dca46b,1 +np.float64,0x1afa877cd7900be7,0xc0799d6648cb34a9,1 +np.float64,0x58ea986649e052c6,0x4071512e939ad790,1 +np.float64,0x691abbc04647f536,0x407c89aaae0fcb83,1 +np.float64,0x43aabc5063e6f284,0x4044b45d18106fd2,1 +np.float64,0x488b003c893e0bea,0x4057df012a2dafbe,1 +np.float64,0x77eb076ed67caee5,0x40836720de94769e,1 +np.float64,0x5c1b46974aba46f4,0x40738731ba256007,1 +np.float64,0x1a5b29ecb5d3c261,0xc07a0becc77040d6,1 +np.float64,0x5d8b6ccf868c6032,0x4074865c1865e2db,1 +np.float64,0x4cfb6690b4aaf5af,0x406216cd8c7e8ddb,1 +np.float64,0x76cbd8eb5c5fc39e,0x4083038dc66d682b,1 +np.float64,0x28bbd1fec5012814,0xc07014c2dd1b9711,1 +np.float64,0x33dc1b3a4fd6bf7a,0xc060bd0756e07d8a,1 +np.float64,0x52bbe89b37de99f3,0x406a10041aa7d343,1 +np.float64,0x07bc479d15eb2dd3,0xc0837a1a6e3a3b61,1 +np.float64,0x18fc5275711a901d,0xc07aff3e9d62bc93,1 +np.float64,0x114c9758e247dc71,0xc080299a7cf15b05,1 +np.float64,0x25ac8f6d60755148,0xc07233c4c0c511d4,1 +np.float64,0x260cae2bb9e9fd7e,0xc071f128c7e82eac,1 +np.float64,0x572ccdfe0241de82,0x40701bedc84bb504,1 +np.float64,0x0ddcef6c8d41f5ee,0xc0815a7e16d07084,1 +np.float64,0x6dad1d59c988af68,0x407fb4a0bc0142b1,1 +np.float64,0x025d200580d8b6d1,0xc08556c0bc32b1b2,1 +np.float64,0x7aad344b6aa74c18,0x40845bbc453f22be,1 +np.float64,0x5b5d9d6ad9d14429,0x4073036d2d21f382,1 +np.float64,0x49cd8d8dcdf19954,0x405b5c034f5c7353,1 +np.float64,0x63edb9483335c1e6,0x4078f2dd21378786,1 +np.float64,0x7b1dd64c9d2c26bd,0x408482b922017bc9,1 +np.float64,0x782e13e0b574be5f,0x40837e2a0090a5ad,1 +np.float64,0x592dfe18b9d6db2f,0x40717f777fbcb1ec,1 +np.float64,0x654e3232ac60d72c,0x4079e71a95a70446,1 +np.float64,0x7b8e42ad22091456,0x4084a9a6f1e61722,1 +np.float64,0x570e88dfd5860ae6,0x407006ae6c0d137a,1 +np.float64,0x294e98346cb98ef1,0xc06f5edaac12bd44,1 +np.float64,0x1adeaa4ab792e642,0xc079b1431d5e2633,1 +np.float64,0x7b6ead3377529ac8,0x40849eabc8c7683c,1 +np.float64,0x2b8eedae8a9b2928,0xc06c400054deef11,1 +np.float64,0x65defb45b2dcf660,0x407a4b53f181c05a,1 +np.float64,0x1baf582d475e7701,0xc07920bcad4a502c,1 +np.float64,0x461f39cf05a0f15a,0x405126368f984fa1,1 +np.float64,0x7e5f6f5dcfff005b,0x4085a37d610439b4,1 +np.float64,0x136f66e4d09bd662,0xc07ed8a2719f2511,1 +np.float64,0x65afd8983fb6ca1f,0x407a2a7f48bf7fc1,1 +np.float64,0x572fa7f95ed22319,0x40701d706cf82e6f,1 diff --git a/local_packages/numpy/_core/tests/data/umath-validation-set-log10.csv b/local_packages/numpy/_core/tests/data/umath-validation-set-log10.csv new file mode 100644 index 0000000..c765777 --- /dev/null +++ b/local_packages/numpy/_core/tests/data/umath-validation-set-log10.csv @@ -0,0 +1,1629 @@ +dtype,input,output,ulperrortol +np.float32,0x3f6fd5c8,0xbce80e8e,4 +np.float32,0x3ea4ab17,0xbefc3deb,4 +np.float32,0x3e87a133,0xbf13b0b7,4 +np.float32,0x3f0d9069,0xbe83bb19,4 +np.float32,0x3f7b9269,0xbbf84f47,4 +np.float32,0x3f7a9ffa,0xbc16fd97,4 +np.float32,0x7f535d34,0x4219cb66,4 +np.float32,0x3e79ad7c,0xbf1ce857,4 +np.float32,0x7e8bfd3b,0x4217dfe9,4 +np.float32,0x3f2d2ee9,0xbe2dcec6,4 +np.float32,0x572e04,0xc21862e4,4 +np.float32,0x7f36f8,0xc217bad5,4 +np.float32,0x3f7982fb,0xbc36aaed,4 +np.float32,0x45b019,0xc218c67c,4 +np.float32,0x3f521c46,0xbdafb3e3,4 +np.float32,0x80000001,0x7fc00000,4 +np.float32,0x3f336c81,0xbe1e107f,4 +np.float32,0x3eac92d7,0xbef1d0bb,4 +np.float32,0x47bdfc,0xc218b990,4 +np.float32,0x7f2d94c8,0x421973d1,4 +np.float32,0x7d53ff8d,0x4214fbb6,4 +np.float32,0x3f581e4e,0xbd96a079,4 +np.float32,0x7ddaf20d,0x42163e4e,4 +np.float32,0x3f341d3c,0xbe1c5b4c,4 +np.float32,0x7ef04ba9,0x4218d032,4 +np.float32,0x620ed2,0xc2182e99,4 +np.float32,0x507850,0xc2188682,4 +np.float32,0x7d08f9,0xc217c284,4 +np.float32,0x7f0cf2aa,0x42191734,4 +np.float32,0x3f109a17,0xbe7e04fe,4 +np.float32,0x7f426152,0x4219a625,4 +np.float32,0x7f32d5a3,0x42198113,4 +np.float32,0x2e14b2,0xc2197e6f,4 +np.float32,0x3a5acd,0xc219156a,4 +np.float32,0x50a565,0xc2188589,4 +np.float32,0x5b751c,0xc2184d97,4 +np.float32,0x7e4149f6,0x42173b22,4 +np.float32,0x3dc34bf9,0xbf82a42a,4 +np.float32,0x3d12bc28,0xbfb910d6,4 +np.float32,0x7ebd2584,0x421865c1,4 +np.float32,0x7f6b3375,0x4219faeb,4 +np.float32,0x7fa00000,0x7fe00000,4 +np.float32,0x3f35fe7d,0xbe17bd33,4 +np.float32,0x7db45c87,0x4215e818,4 +np.float32,0x3efff366,0xbe9a2b8d,4 +np.float32,0x3eb331d0,0xbee971a3,4 +np.float32,0x3f259d5f,0xbe41ae2e,4 +np.float32,0x3eab85ec,0xbef32c4a,4 +np.float32,0x7f194b8a,0x42193c8c,4 +np.float32,0x3f11a614,0xbe7acfc7,4 +np.float32,0x5b17,0xc221f16b,4 +np.float32,0x3f33dadc,0xbe1cff4d,4 +np.float32,0x3cda1506,0xbfc9920f,4 +np.float32,0x3f6856f1,0xbd2c8290,4 +np.float32,0x7f3357fb,0x42198257,4 +np.float32,0x7f56f329,0x4219d2e1,4 +np.float32,0x3ef84108,0xbea0f595,4 +np.float32,0x3f72340f,0xbcc51916,4 +np.float32,0x3daf28,0xc218fcbd,4 +np.float32,0x131035,0xc21b06f4,4 +np.float32,0x3f275c3b,0xbe3d0487,4 +np.float32,0x3ef06130,0xbea82069,4 +np.float32,0x3f57f3b0,0xbd974fef,4 +np.float32,0x7f6c4a78,0x4219fcfa,4 +np.float32,0x7e8421d0,0x4217c639,4 +np.float32,0x3f17a479,0xbe68e08e,4 +np.float32,0x7f03774e,0x4218f83b,4 +np.float32,0x441a33,0xc218d0b8,4 +np.float32,0x539158,0xc21875b6,4 +np.float32,0x3e8fcc75,0xbf0d3018,4 +np.float32,0x7ef74130,0x4218dce4,4 +np.float32,0x3ea6f4fa,0xbef92c38,4 +np.float32,0x7f3948ab,0x421990d5,4 +np.float32,0x7db6f8f5,0x4215ee7c,4 +np.float32,0x3ee44a2f,0xbeb399e5,4 +np.float32,0x156c59,0xc21ad30d,4 +np.float32,0x3f21ee53,0xbe4baf16,4 +np.float32,0x3f2c08f4,0xbe30c424,4 +np.float32,0x3f49885c,0xbdd4c6a9,4 +np.float32,0x3eae0b9c,0xbeefed54,4 +np.float32,0x1b5c1f,0xc21a6646,4 +np.float32,0x3e7330e2,0xbf1fd592,4 +np.float32,0x3ebbeb4c,0xbededf82,4 +np.float32,0x427154,0xc218dbb1,4 +np.float32,0x3f6b8b4b,0xbd142498,4 +np.float32,0x8e769,0xc21c5981,4 +np.float32,0x3e9db557,0xbf02ec1c,4 +np.float32,0x3f001bef,0xbe99f019,4 +np.float32,0x3e58b48c,0xbf2ca77a,4 +np.float32,0x3d46c16b,0xbfa8327c,4 +np.float32,0x7eeeb305,0x4218cd3b,4 +np.float32,0x3e3f163d,0xbf3aa446,4 +np.float32,0x3f66c872,0xbd3877d9,4 +np.float32,0x7f7162f8,0x421a0677,4 +np.float32,0x3edca3bc,0xbebb2e28,4 +np.float32,0x3dc1055b,0xbf834afa,4 +np.float32,0x12b16f,0xc21b0fad,4 +np.float32,0x3f733898,0xbcb62e16,4 +np.float32,0x3e617af8,0xbf283db0,4 +np.float32,0x7e86577a,0x4217cd99,4 +np.float32,0x3f0ba3c7,0xbe86c633,4 +np.float32,0x3f4cad25,0xbdc70247,4 +np.float32,0xb6cdf,0xc21bea9f,4 +np.float32,0x3f42971a,0xbdf3f49e,4 +np.float32,0x3e6ccad2,0xbf22cc78,4 +np.float32,0x7f2121b2,0x421952b8,4 +np.float32,0x3f6d3f55,0xbd075366,4 +np.float32,0x3f524f,0xc218f117,4 +np.float32,0x3e95b5d9,0xbf08b56a,4 +np.float32,0x7f6ae47d,0x4219fa56,4 +np.float32,0x267539,0xc219ceda,4 +np.float32,0x3ef72f6d,0xbea1eb2e,4 +np.float32,0x2100b2,0xc21a12e2,4 +np.float32,0x3d9777d1,0xbf90c4e7,4 +np.float32,0x44c6f5,0xc218cc56,4 +np.float32,0x7f2a613d,0x42196b8a,4 +np.float32,0x390a25,0xc2191f8d,4 +np.float32,0x3f1de5ad,0xbe56e703,4 +np.float32,0x2f59ce,0xc2197258,4 +np.float32,0x7f3b12a1,0x4219951b,4 +np.float32,0x3ecb66d4,0xbecd44ca,4 +np.float32,0x7e74ff,0xc217bd7d,4 +np.float32,0x7ed83f78,0x4218a14d,4 +np.float32,0x685994,0xc21812f1,4 +np.float32,0xbf800000,0x7fc00000,4 +np.float32,0x736f47,0xc217e60b,4 +np.float32,0x7f09c371,0x42190d0a,4 +np.float32,0x3f7ca51d,0xbbbbbce0,4 +np.float32,0x7f4b4d3b,0x4219ba1a,4 +np.float32,0x3f6c4471,0xbd0eb076,4 +np.float32,0xd944e,0xc21b9dcf,4 +np.float32,0x7cb06ffc,0x421375cd,4 +np.float32,0x586187,0xc2185cce,4 +np.float32,0x3f3cbf5b,0xbe078911,4 +np.float32,0x3f30b504,0xbe24d983,4 +np.float32,0x3f0a16ba,0xbe8941fd,4 +np.float32,0x5c43b0,0xc21849af,4 +np.float32,0x3dad74f6,0xbf893bd5,4 +np.float32,0x3c586958,0xbff087a6,4 +np.float32,0x3e8307a8,0xbf1786ba,4 +np.float32,0x7dcd1776,0x4216213d,4 +np.float32,0x3f44d107,0xbde9d662,4 +np.float32,0x3e2e6823,0xbf44cbec,4 +np.float32,0x3d87ea27,0xbf96caca,4 +np.float32,0x3e0c715b,0xbf5ce07e,4 +np.float32,0x7ec9cd5a,0x4218828e,4 +np.float32,0x3e26c0b4,0xbf49c93e,4 +np.float32,0x75b94e,0xc217dd50,4 +np.float32,0x3df7b9f5,0xbf6ad7f4,4 +np.float32,0x0,0xff800000,4 +np.float32,0x3f284795,0xbe3a94da,4 +np.float32,0x7ee49092,0x4218b9f0,4 +np.float32,0x7f4c20e0,0x4219bbe8,4 +np.float32,0x3efbbce8,0xbe9ddc4b,4 +np.float32,0x12274a,0xc21b1cb4,4 +np.float32,0x5fa1b1,0xc21839be,4 +np.float32,0x7f0b210e,0x4219116d,4 +np.float32,0x3f67092a,0xbd368545,4 +np.float32,0x3d572721,0xbfa3ca5b,4 +np.float32,0x3f7913ce,0xbc431028,4 +np.float32,0x3b0613,0xc2191059,4 +np.float32,0x3e1d16c0,0xbf506c6f,4 +np.float32,0xab130,0xc21c081d,4 +np.float32,0x3e23ac97,0xbf4bdb9d,4 +np.float32,0x7ef52368,0x4218d911,4 +np.float32,0x7f38e686,0x42198fe9,4 +np.float32,0x3f106a21,0xbe7e9897,4 +np.float32,0x3ecef8d5,0xbec96644,4 +np.float32,0x3ec37e02,0xbed61683,4 +np.float32,0x3efbd063,0xbe9dcb17,4 +np.float32,0x3f318fe3,0xbe22b402,4 +np.float32,0x7e5e5228,0x4217795d,4 +np.float32,0x72a046,0xc217e92c,4 +np.float32,0x7f6f970b,0x421a0324,4 +np.float32,0x3ed871b4,0xbebf72fb,4 +np.float32,0x7a2eaa,0xc217ccc8,4 +np.float32,0x3e819655,0xbf18c1d7,4 +np.float32,0x80800000,0x7fc00000,4 +np.float32,0x7eab0719,0x421838f9,4 +np.float32,0x7f0763cb,0x4219054f,4 +np.float32,0x3f191672,0xbe64a8af,4 +np.float32,0x7d4327,0xc217c1b6,4 +np.float32,0x3f724ba6,0xbcc3bea3,4 +np.float32,0x60fe06,0xc2183375,4 +np.float32,0x48cd59,0xc218b30b,4 +np.float32,0x3f7fec2b,0xb909d3f3,4 +np.float32,0x1c7bb9,0xc21a5460,4 +np.float32,0x24d8a8,0xc219e1e4,4 +np.float32,0x3e727c52,0xbf20283c,4 +np.float32,0x4bc460,0xc218a14a,4 +np.float32,0x63e313,0xc2182661,4 +np.float32,0x7f625581,0x4219e9d4,4 +np.float32,0x3eeb3e77,0xbeacedc0,4 +np.float32,0x7ef27a47,0x4218d437,4 +np.float32,0x27105a,0xc219c7e6,4 +np.float32,0x22a10b,0xc219fd7d,4 +np.float32,0x3f41e907,0xbdf711ab,4 +np.float32,0x7c1fbf95,0x4212155b,4 +np.float32,0x7e5acceb,0x42177244,4 +np.float32,0x3e0892fa,0xbf5ffb83,4 +np.float32,0x3ea0e51d,0xbf00b2c0,4 +np.float32,0x3e56fc29,0xbf2d8a51,4 +np.float32,0x7ee724ed,0x4218beed,4 +np.float32,0x7ebf142b,0x42186a46,4 +np.float32,0x7f6cf35c,0x4219fe37,4 +np.float32,0x3f11abf7,0xbe7abdcd,4 +np.float32,0x588d7a,0xc2185bf1,4 +np.float32,0x3f6e81d2,0xbcfbcf97,4 +np.float32,0x3f1b6be8,0xbe5dee2b,4 +np.float32,0x7f3815e0,0x42198df2,4 +np.float32,0x3f5bfc88,0xbd86d93d,4 +np.float32,0x3f3775d0,0xbe142bbc,4 +np.float32,0x78a958,0xc217d25a,4 +np.float32,0x2ff7c3,0xc2196c96,4 +np.float32,0x4b9c0,0xc21d733c,4 +np.float32,0x3ec025af,0xbed9ecf3,4 +np.float32,0x6443f0,0xc21824b3,4 +np.float32,0x3f754e28,0xbc97d299,4 +np.float32,0x3eaa91d3,0xbef4699d,4 +np.float32,0x3e5f2837,0xbf296478,4 +np.float32,0xe5676,0xc21b85a4,4 +np.float32,0x3f6859f2,0xbd2c6b90,4 +np.float32,0x3f68686b,0xbd2bfcc6,4 +np.float32,0x4b39b8,0xc218a47b,4 +np.float32,0x630ac4,0xc2182a28,4 +np.float32,0x160980,0xc21ac67d,4 +np.float32,0x3ed91c4d,0xbebec3fd,4 +np.float32,0x7ec27b0d,0x4218721f,4 +np.float32,0x3f3c0a5f,0xbe09344b,4 +np.float32,0x3dbff9c1,0xbf839841,4 +np.float32,0x7f0e8ea7,0x42191c40,4 +np.float32,0x3f36b162,0xbe1608e4,4 +np.float32,0x228bb3,0xc219fe90,4 +np.float32,0x2fdd30,0xc2196d8c,4 +np.float32,0x3e8fce8e,0xbf0d2e79,4 +np.float32,0x3f36acc7,0xbe16141a,4 +np.float32,0x7f44b51c,0x4219ab70,4 +np.float32,0x3ec3371c,0xbed66736,4 +np.float32,0x4388a2,0xc218d473,4 +np.float32,0x3f5aa6c3,0xbd8c4344,4 +np.float32,0x7f09fce4,0x42190dc3,4 +np.float32,0x7ed7854a,0x42189fce,4 +np.float32,0x7f4da83a,0x4219bf3a,4 +np.float32,0x3db8da28,0xbf85b25a,4 +np.float32,0x7f449686,0x4219ab2b,4 +np.float32,0x2eb25,0xc21e498c,4 +np.float32,0x3f2bcc08,0xbe3161bd,4 +np.float32,0x36c923,0xc219317b,4 +np.float32,0x3d52a866,0xbfa4f6d2,4 +np.float32,0x3f7d6688,0xbb913e4e,4 +np.float32,0x3f5a6ba4,0xbd8d33e3,4 +np.float32,0x719740,0xc217ed35,4 +np.float32,0x78a472,0xc217d26c,4 +np.float32,0x7ee33d0c,0x4218b759,4 +np.float32,0x7f668c1d,0x4219f208,4 +np.float32,0x3e29c600,0xbf47ca46,4 +np.float32,0x3f3cefc3,0xbe071712,4 +np.float32,0x3e224ebd,0xbf4cca41,4 +np.float32,0x7f1417be,0x42192d31,4 +np.float32,0x7f29d7d5,0x42196a23,4 +np.float32,0x3338ce,0xc2194f65,4 +np.float32,0x2a7897,0xc219a2b6,4 +np.float32,0x3d6bc3d8,0xbf9eb468,4 +np.float32,0x3f6bd7bf,0xbd11e392,4 +np.float32,0x7f6d26bf,0x4219fe98,4 +np.float32,0x3f52d378,0xbdacadb5,4 +np.float32,0x3efac453,0xbe9eb84a,4 +np.float32,0x3f692eb7,0xbd261184,4 +np.float32,0x3f6a0bb5,0xbd1f7ec1,4 +np.float32,0x3f037a49,0xbe942aa8,4 +np.float32,0x3f465bd4,0xbde2e530,4 +np.float32,0x7ef0f47b,0x4218d16a,4 +np.float32,0x637127,0xc218285e,4 +np.float32,0x3f41e511,0xbdf723d7,4 +np.float32,0x7f800000,0x7f800000,4 +np.float32,0x3f3342d5,0xbe1e77d5,4 +np.float32,0x7f57cfe6,0x4219d4a9,4 +np.float32,0x3e4358ed,0xbf3830a7,4 +np.float32,0x3ce25f15,0xbfc77f2b,4 +np.float32,0x7ed057e7,0x421890be,4 +np.float32,0x7ce154d9,0x4213e295,4 +np.float32,0x3ee91984,0xbeaef703,4 +np.float32,0x7e4e919c,0x421758af,4 +np.float32,0x6830e7,0xc218139e,4 +np.float32,0x3f12f08e,0xbe76e328,4 +np.float32,0x7f0a7a32,0x42190f56,4 +np.float32,0x7f38e,0xc21c8bd3,4 +np.float32,0x3e01def9,0xbf6593e3,4 +np.float32,0x3f5c8c6d,0xbd849432,4 +np.float32,0x3eed8747,0xbeaac7a3,4 +np.float32,0x3cadaa0e,0xbfd63b21,4 +np.float32,0x3f7532a9,0xbc996178,4 +np.float32,0x31f3ac,0xc2195a8f,4 +np.float32,0x3f0e0f97,0xbe82f3af,4 +np.float32,0x3f2a1f35,0xbe35bd3f,4 +np.float32,0x3f4547b2,0xbde7bebd,4 +np.float32,0x3f7988a6,0xbc36094c,4 +np.float32,0x74464c,0xc217e2d2,4 +np.float32,0x7f7518be,0x421a0d3f,4 +np.float32,0x7e97fa0a,0x42180473,4 +np.float32,0x584e3a,0xc2185d2f,4 +np.float32,0x3e7291f3,0xbf201e52,4 +np.float32,0xc0a05,0xc21bd359,4 +np.float32,0x3a3177,0xc21916a6,4 +np.float32,0x4f417f,0xc2188d45,4 +np.float32,0x263fce,0xc219d145,4 +np.float32,0x7e1d58,0xc217beb1,4 +np.float32,0x7f056af3,0x4218fec9,4 +np.float32,0x3f21c181,0xbe4c2a3f,4 +np.float32,0x7eca4956,0x4218839f,4 +np.float32,0x3e58afa8,0xbf2ca9fd,4 +np.float32,0x3f40d583,0xbdfc04ef,4 +np.float32,0x7f432fbb,0x4219a7fc,4 +np.float32,0x43aaa4,0xc218d393,4 +np.float32,0x7f2c9b62,0x42197150,4 +np.float32,0x5c3876,0xc21849e5,4 +np.float32,0x7f2034e8,0x42195029,4 +np.float32,0x7e5be772,0x42177481,4 +np.float32,0x80000000,0xff800000,4 +np.float32,0x3f5be03b,0xbd874bb0,4 +np.float32,0x3e32494f,0xbf4259be,4 +np.float32,0x3e1f4671,0xbf4ee30b,4 +np.float32,0x4606cc,0xc218c454,4 +np.float32,0x425cbc,0xc218dc3b,4 +np.float32,0x7dd9b8bf,0x42163bd0,4 +np.float32,0x3f0465d0,0xbe929db7,4 +np.float32,0x3f735077,0xbcb4d0fa,4 +np.float32,0x4d6a43,0xc21897b8,4 +np.float32,0x3e27d600,0xbf4910f5,4 +np.float32,0x3f06e0cc,0xbe8e7d24,4 +np.float32,0x3f3fd064,0xbe005e45,4 +np.float32,0x176f1,0xc21f7c2d,4 +np.float32,0x3eb64e6f,0xbee59d9c,4 +np.float32,0x7f0f075d,0x42191db8,4 +np.float32,0x3f718cbe,0xbcceb621,4 +np.float32,0x3ead7bda,0xbef0a54a,4 +np.float32,0x7f77c1a8,0x421a120c,4 +np.float32,0x3f6a79c5,0xbd1c3afd,4 +np.float32,0x3e992d1f,0xbf062a02,4 +np.float32,0x3e6f6335,0xbf219639,4 +np.float32,0x7f6d9a3e,0x4219ff70,4 +np.float32,0x557ed1,0xc2186b91,4 +np.float32,0x3f13a456,0xbe74c457,4 +np.float32,0x15c2dc,0xc21acc17,4 +np.float32,0x71f36f,0xc217ebcc,4 +np.float32,0x748dea,0xc217e1c1,4 +np.float32,0x7f0f32e0,0x42191e3f,4 +np.float32,0x5b1da8,0xc2184f41,4 +np.float32,0x3d865d3a,0xbf976e11,4 +np.float32,0x3f800000,0x0,4 +np.float32,0x7f67b56d,0x4219f444,4 +np.float32,0x6266a1,0xc2182d0c,4 +np.float32,0x3ec9c5e4,0xbecf0e6b,4 +np.float32,0x6a6a0e,0xc2180a3b,4 +np.float32,0x7e9db6fd,0x421814ef,4 +np.float32,0x3e7458f7,0xbf1f4e88,4 +np.float32,0x3ead8016,0xbef09fdc,4 +np.float32,0x3e263d1c,0xbf4a211e,4 +np.float32,0x7f6b3329,0x4219faeb,4 +np.float32,0x800000,0xc217b818,4 +np.float32,0x3f0654c7,0xbe8f6471,4 +np.float32,0x3f281b71,0xbe3b0990,4 +np.float32,0x7c4c8e,0xc217c524,4 +np.float32,0x7d113a87,0x4214537d,4 +np.float32,0x734b5f,0xc217e696,4 +np.float32,0x7f079d05,0x4219060b,4 +np.float32,0x3ee830b1,0xbeafd58b,4 +np.float32,0x3f1c3b8b,0xbe5b9d96,4 +np.float32,0x3f2bf0c6,0xbe3102aa,4 +np.float32,0x7ddffe22,0x42164871,4 +np.float32,0x3f1e58b4,0xbe55a37f,4 +np.float32,0x5f3edf,0xc2183b8a,4 +np.float32,0x7f1fb6ec,0x42194eca,4 +np.float32,0x3f78718e,0xbc55311e,4 +np.float32,0x3e574b7d,0xbf2d6152,4 +np.float32,0x7eab27c6,0x4218394e,4 +np.float32,0x7f34603c,0x421984e5,4 +np.float32,0x3f3a8b57,0xbe0cc1ca,4 +np.float32,0x3f744181,0xbca7134e,4 +np.float32,0x3f7e3bc4,0xbb45156b,4 +np.float32,0x93ab4,0xc21c498b,4 +np.float32,0x7ed5541e,0x42189b42,4 +np.float32,0x6bf8ec,0xc21803c4,4 +np.float32,0x757395,0xc217de58,4 +np.float32,0x7f177214,0x42193726,4 +np.float32,0x59935f,0xc21856d6,4 +np.float32,0x2cd9ba,0xc2198a78,4 +np.float32,0x3ef6fd5c,0xbea2183c,4 +np.float32,0x3ebb6c63,0xbedf75e0,4 +np.float32,0x7f43272c,0x4219a7e9,4 +np.float32,0x7f42e67d,0x4219a755,4 +np.float32,0x3f3f744f,0xbe0133f6,4 +np.float32,0x7f5fddaa,0x4219e4f4,4 +np.float32,0x3dc9874f,0xbf80e529,4 +np.float32,0x3f2efe64,0xbe292ec8,4 +np.float32,0x3e0406a6,0xbf63bf7c,4 +np.float32,0x3cdbb0aa,0xbfc92984,4 +np.float32,0x3e6597e7,0xbf263b30,4 +np.float32,0x3f0c1153,0xbe861807,4 +np.float32,0x7fce16,0xc217b8c6,4 +np.float32,0x3f5f4e5f,0xbd730dc6,4 +np.float32,0x3ed41ffa,0xbec3ee69,4 +np.float32,0x3f216c78,0xbe4d1446,4 +np.float32,0x3f123ed7,0xbe78fe4b,4 +np.float32,0x7f7e0ca9,0x421a1d34,4 +np.float32,0x7e318af4,0x42171558,4 +np.float32,0x7f1e1659,0x42194a3d,4 +np.float32,0x34d12a,0xc21941c2,4 +np.float32,0x3d9566ad,0xbf918870,4 +np.float32,0x3e799a47,0xbf1cf0e5,4 +np.float32,0x3e89dd6f,0xbf11df76,4 +np.float32,0x32f0d3,0xc21951d8,4 +np.float32,0x7e89d17e,0x4217d8f6,4 +np.float32,0x1f3b38,0xc21a2b6b,4 +np.float32,0x7ee9e060,0x4218c427,4 +np.float32,0x31a673,0xc2195d41,4 +np.float32,0x5180f1,0xc21880d5,4 +np.float32,0x3cd36f,0xc21902f8,4 +np.float32,0x3bb63004,0xc01050cb,4 +np.float32,0x3e8ee9d1,0xbf0ddfde,4 +np.float32,0x3d2a7da3,0xbfb0b970,4 +np.float32,0x3ea58107,0xbefb1dc3,4 +np.float32,0x7f6760b0,0x4219f3a2,4 +np.float32,0x7f7f9e08,0x421a1ff0,4 +np.float32,0x37e7f1,0xc219287b,4 +np.float32,0x3ef7eb53,0xbea14267,4 +np.float32,0x3e2eb581,0xbf449aa5,4 +np.float32,0x3da7671c,0xbf8b3568,4 +np.float32,0x7af36f7b,0x420f33ee,4 +np.float32,0x3eb3602c,0xbee93823,4 +np.float32,0x3f68bcff,0xbd2975de,4 +np.float32,0x3ea7cefb,0xbef80a9d,4 +np.float32,0x3f329689,0xbe202414,4 +np.float32,0x7f0c7c80,0x421915be,4 +np.float32,0x7f4739b8,0x4219b118,4 +np.float32,0x73af58,0xc217e515,4 +np.float32,0x7f13eb2a,0x42192cab,4 +np.float32,0x30f2d9,0xc2196395,4 +np.float32,0x7ea7066c,0x42182e71,4 +np.float32,0x669fec,0xc2181a5b,4 +np.float32,0x3f7d6876,0xbb90d1ef,4 +np.float32,0x3f08a4ef,0xbe8b9897,4 +np.float32,0x7f2a906c,0x42196c05,4 +np.float32,0x3ed3ca42,0xbec44856,4 +np.float32,0x9d27,0xc220fee2,4 +np.float32,0x3e4508a1,0xbf373c03,4 +np.float32,0x3e41f8de,0xbf38f9bb,4 +np.float32,0x3e912714,0xbf0c255b,4 +np.float32,0xff800000,0x7fc00000,4 +np.float32,0x7eefd13d,0x4218cf4f,4 +np.float32,0x3f491674,0xbdd6bded,4 +np.float32,0x3ef49512,0xbea445c9,4 +np.float32,0x3f045b79,0xbe92af15,4 +np.float32,0x3ef6c412,0xbea24bd5,4 +np.float32,0x3e6f3c28,0xbf21a85d,4 +np.float32,0x3ef71839,0xbea2000e,4 +np.float32,0x1,0xc23369f4,4 +np.float32,0x3e3fcfe4,0xbf3a3876,4 +np.float32,0x3e9d7a65,0xbf0315b2,4 +np.float32,0x20b7c4,0xc21a16bd,4 +np.float32,0x7f707b10,0x421a04cb,4 +np.float32,0x7fc00000,0x7fc00000,4 +np.float32,0x3f285ebd,0xbe3a57ac,4 +np.float32,0x74c9ea,0xc217e0dc,4 +np.float32,0x3f6501f2,0xbd4634ab,4 +np.float32,0x3f248959,0xbe4495cc,4 +np.float32,0x7e915ff0,0x4217f0b3,4 +np.float32,0x7edbb910,0x4218a864,4 +np.float32,0x3f7042dd,0xbce1bddb,4 +np.float32,0x6f08c9,0xc217f754,4 +np.float32,0x7f423993,0x4219a5ca,4 +np.float32,0x3f125704,0xbe78b4cd,4 +np.float32,0x7ef7f5ae,0x4218de28,4 +np.float32,0x3f2dd940,0xbe2c1a33,4 +np.float32,0x3f1ca78e,0xbe5a6a8b,4 +np.float32,0x244863,0xc219e8be,4 +np.float32,0x3f2614fe,0xbe406d6b,4 +np.float32,0x3e75e7a3,0xbf1e99b5,4 +np.float32,0x2bdd6e,0xc2199459,4 +np.float32,0x7e49e279,0x42174e7b,4 +np.float32,0x3e3bb09a,0xbf3ca2cd,4 +np.float32,0x649f06,0xc2182320,4 +np.float32,0x7f4a44e1,0x4219b7d6,4 +np.float32,0x400473,0xc218ec3a,4 +np.float32,0x3edb19ad,0xbebcbcad,4 +np.float32,0x3d8ee956,0xbf94006c,4 +np.float32,0x7e91c603,0x4217f1eb,4 +np.float32,0x221384,0xc21a04a6,4 +np.float32,0x7f7dd660,0x421a1cd5,4 +np.float32,0x7ef34609,0x4218d5ac,4 +np.float32,0x7f5ed529,0x4219e2e5,4 +np.float32,0x7f1bf685,0x42194438,4 +np.float32,0x3cdd094a,0xbfc8d294,4 +np.float32,0x7e87fc8e,0x4217d303,4 +np.float32,0x7f53d971,0x4219cc6b,4 +np.float32,0xabc8b,0xc21c0646,4 +np.float32,0x7f5011e6,0x4219c46a,4 +np.float32,0x7e460638,0x421745e5,4 +np.float32,0xa8126,0xc21c0ffd,4 +np.float32,0x3eec2a66,0xbeac0f2d,4 +np.float32,0x3f3a1213,0xbe0de340,4 +np.float32,0x7f5908db,0x4219d72c,4 +np.float32,0x7e0ad3c5,0x4216a7f3,4 +np.float32,0x3f2de40e,0xbe2bfe90,4 +np.float32,0x3d0463c5,0xbfbec8e4,4 +np.float32,0x7c7cde0b,0x4212e19a,4 +np.float32,0x74c24f,0xc217e0f9,4 +np.float32,0x3f14b4cb,0xbe71929b,4 +np.float32,0x3e94e192,0xbf09537f,4 +np.float32,0x3eebde71,0xbeac56bd,4 +np.float32,0x3f65e413,0xbd3f5b8a,4 +np.float32,0x7e109199,0x4216b9f9,4 +np.float32,0x3f22f5d0,0xbe48ddc0,4 +np.float32,0x3e22d3bc,0xbf4c6f4d,4 +np.float32,0x3f7a812f,0xbc1a680b,4 +np.float32,0x3f67f361,0xbd2f7d7c,4 +np.float32,0x3f1caa63,0xbe5a6281,4 +np.float32,0x3f306fde,0xbe2587ab,4 +np.float32,0x3e8df9d3,0xbf0e9b2f,4 +np.float32,0x3eaaccc4,0xbef41cd4,4 +np.float32,0x7f3f65ec,0x42199f45,4 +np.float32,0x3dc706e0,0xbf8196ec,4 +np.float32,0x3e14eaba,0xbf565cf6,4 +np.float32,0xcc60,0xc2208a09,4 +np.float32,0x358447,0xc2193be7,4 +np.float32,0x3dcecade,0xbf7eec70,4 +np.float32,0x3f20b4f8,0xbe4f0ef0,4 +np.float32,0x7e7c979f,0x4217b222,4 +np.float32,0x7f2387b9,0x4219594a,4 +np.float32,0x3f6f6e5c,0xbcee0e05,4 +np.float32,0x7f19ad81,0x42193da8,4 +np.float32,0x5635e1,0xc21867dd,4 +np.float32,0x4c5e97,0xc2189dc4,4 +np.float32,0x7f35f97f,0x421988d1,4 +np.float32,0x7f685224,0x4219f571,4 +np.float32,0x3eca0616,0xbecec7b8,4 +np.float32,0x3f436d0d,0xbdf024ca,4 +np.float32,0x12a97d,0xc21b106a,4 +np.float32,0x7f0fdc93,0x4219204d,4 +np.float32,0x3debfb42,0xbf703e65,4 +np.float32,0x3c6c54d2,0xbfeba291,4 +np.float32,0x7e5d7491,0x421777a1,4 +np.float32,0x3f4bd2f0,0xbdcab87d,4 +np.float32,0x3f7517f4,0xbc9ae510,4 +np.float32,0x3f71a59a,0xbccd480d,4 +np.float32,0x3f514653,0xbdb33f61,4 +np.float32,0x3f4e6ea4,0xbdbf694b,4 +np.float32,0x3eadadec,0xbef06526,4 +np.float32,0x3f3b41c1,0xbe0b0fbf,4 +np.float32,0xc35a,0xc2209e1e,4 +np.float32,0x384982,0xc2192575,4 +np.float32,0x3464c3,0xc2194556,4 +np.float32,0x7f5e20d9,0x4219e17d,4 +np.float32,0x3ea18b62,0xbf004016,4 +np.float32,0x63a02b,0xc218278c,4 +np.float32,0x7ef547ba,0x4218d953,4 +np.float32,0x3f2496fb,0xbe4470f4,4 +np.float32,0x7ea0c8c6,0x42181d81,4 +np.float32,0x3f42ba60,0xbdf35372,4 +np.float32,0x7e40d9,0xc217be34,4 +np.float32,0x3e95883b,0xbf08d750,4 +np.float32,0x3e0cddf3,0xbf5c8aa8,4 +np.float32,0x3f2305d5,0xbe48b20a,4 +np.float32,0x7f0d0941,0x4219177b,4 +np.float32,0x3f7b98d3,0xbbf6e477,4 +np.float32,0x3f687cdc,0xbd2b6057,4 +np.float32,0x3f42ce91,0xbdf2f73d,4 +np.float32,0x3ee00fc0,0xbeb7c217,4 +np.float32,0x7f3d483a,0x42199a53,4 +np.float32,0x3e1e08eb,0xbf4fc18d,4 +np.float32,0x7e202ff5,0x4216e798,4 +np.float32,0x582898,0xc2185ded,4 +np.float32,0x3e3552b1,0xbf40790c,4 +np.float32,0x3d3f7c87,0xbfaa44b6,4 +np.float32,0x669d8e,0xc2181a65,4 +np.float32,0x3f0e21b4,0xbe82d757,4 +np.float32,0x686f95,0xc2181293,4 +np.float32,0x3f48367f,0xbdda9ead,4 +np.float32,0x3dc27802,0xbf82e0a0,4 +np.float32,0x3f6ac40c,0xbd1a07d4,4 +np.float32,0x3bba6d,0xc2190b12,4 +np.float32,0x3ec7b6b0,0xbed15665,4 +np.float32,0x3f1f9ca4,0xbe521955,4 +np.float32,0x3ef2f147,0xbea5c4b8,4 +np.float32,0x7c65f769,0x4212b762,4 +np.float32,0x7e98e162,0x42180716,4 +np.float32,0x3f0f0c09,0xbe8169ea,4 +np.float32,0x3d67f03b,0xbf9f9d48,4 +np.float32,0x7f3751e4,0x42198c18,4 +np.float32,0x7f1fac61,0x42194ead,4 +np.float32,0x3e9b698b,0xbf048d89,4 +np.float32,0x7e66507b,0x42178913,4 +np.float32,0x7f5cb680,0x4219dea5,4 +np.float32,0x234700,0xc219f53e,4 +np.float32,0x3d9984ad,0xbf900591,4 +np.float32,0x3f33a3f2,0xbe1d872a,4 +np.float32,0x3eaf52b6,0xbeee4cf4,4 +np.float32,0x7f078930,0x421905ca,4 +np.float32,0x3f083b39,0xbe8c44df,4 +np.float32,0x3e3823f8,0xbf3ec231,4 +np.float32,0x3eef6f5d,0xbea9008c,4 +np.float32,0x6145e1,0xc218322c,4 +np.float32,0x16d9ae,0xc21ab65f,4 +np.float32,0x7e543376,0x421764a5,4 +np.float32,0x3ef77ccb,0xbea1a5a0,4 +np.float32,0x3f4a443f,0xbdd18af5,4 +np.float32,0x8f209,0xc21c5770,4 +np.float32,0x3ecac126,0xbecdfa33,4 +np.float32,0x3e8662f9,0xbf14b6c7,4 +np.float32,0x23759a,0xc219f2f4,4 +np.float32,0xf256d,0xc21b6d3f,4 +np.float32,0x3f579f93,0xbd98aaa2,4 +np.float32,0x3ed4cc8e,0xbec339cb,4 +np.float32,0x3ed25400,0xbec5d2a1,4 +np.float32,0x3ed6f8ba,0xbec0f795,4 +np.float32,0x7f36efd9,0x42198b2a,4 +np.float32,0x7f5169dd,0x4219c746,4 +np.float32,0x7de18a20,0x42164b80,4 +np.float32,0x3e8de526,0xbf0eab61,4 +np.float32,0x3de0cbcd,0xbf75a47e,4 +np.float32,0xe265f,0xc21b8b82,4 +np.float32,0x3df3cdbd,0xbf6c9e40,4 +np.float32,0x3f38a25a,0xbe115589,4 +np.float32,0x7f01f2c0,0x4218f311,4 +np.float32,0x3da7d5f4,0xbf8b10a5,4 +np.float32,0x4d4fe8,0xc2189850,4 +np.float32,0x3cc96d9d,0xbfcdfc8d,4 +np.float32,0x259a88,0xc219d8d7,4 +np.float32,0x7f1d5102,0x42194810,4 +np.float32,0x7e17ca91,0x4216cfa7,4 +np.float32,0x3f73d110,0xbcad7a8f,4 +np.float32,0x3f009383,0xbe9920ed,4 +np.float32,0x7e22af,0xc217be9f,4 +np.float32,0x3f7de2ce,0xbb6c0394,4 +np.float32,0x3edd0cd2,0xbebac45a,4 +np.float32,0x3ec9b5c1,0xbecf2035,4 +np.float32,0x3168c5,0xc2195f6b,4 +np.float32,0x3e935522,0xbf0a7d18,4 +np.float32,0x3e494077,0xbf34e120,4 +np.float32,0x3f52ed06,0xbdac41ec,4 +np.float32,0x3f73d51e,0xbcad3f65,4 +np.float32,0x3f03d453,0xbe939295,4 +np.float32,0x7ef4ee68,0x4218d8b1,4 +np.float32,0x3ed0e2,0xc218f4a7,4 +np.float32,0x4efab8,0xc2188ed3,4 +np.float32,0x3dbd5632,0xbf845d3b,4 +np.float32,0x7eecad4f,0x4218c972,4 +np.float32,0x9d636,0xc21c2d32,4 +np.float32,0x3e5f3b6b,0xbf295ae7,4 +np.float32,0x7f4932df,0x4219b57a,4 +np.float32,0x4b59b5,0xc218a3be,4 +np.float32,0x3e5de97f,0xbf2a03b4,4 +np.float32,0x3f1c479d,0xbe5b7b3c,4 +np.float32,0x3f42e7e4,0xbdf283a5,4 +np.float32,0x2445,0xc2238af2,4 +np.float32,0x7aa71b43,0x420e8c9e,4 +np.float32,0x3ede6e4e,0xbeb961e1,4 +np.float32,0x7f05dd3b,0x42190045,4 +np.float32,0x3ef5b55c,0xbea3404b,4 +np.float32,0x7f738624,0x421a0a62,4 +np.float32,0x3e7d50a1,0xbf1b4cb4,4 +np.float32,0x3f44cc4a,0xbde9ebcc,4 +np.float32,0x7e1a7b0b,0x4216d777,4 +np.float32,0x3f1d9868,0xbe57c0da,4 +np.float32,0x1ebee2,0xc21a3263,4 +np.float32,0x31685f,0xc2195f6e,4 +np.float32,0x368a8e,0xc2193379,4 +np.float32,0xa9847,0xc21c0c2e,4 +np.float32,0x3bd3b3,0xc2190a56,4 +np.float32,0x3961e4,0xc2191ce3,4 +np.float32,0x7e13a243,0x4216c34e,4 +np.float32,0x7f7b1790,0x421a17ff,4 +np.float32,0x3e55f020,0xbf2e1545,4 +np.float32,0x3f513861,0xbdb37aa8,4 +np.float32,0x3dd9e754,0xbf791ad2,4 +np.float32,0x5e8d86,0xc2183ec9,4 +np.float32,0x26b796,0xc219cbdd,4 +np.float32,0x429daa,0xc218da89,4 +np.float32,0x3f477caa,0xbdddd9ba,4 +np.float32,0x3f0e5114,0xbe828d45,4 +np.float32,0x3f54f362,0xbda3c286,4 +np.float32,0x6eac1c,0xc217f8c8,4 +np.float32,0x3f04c479,0xbe91fef5,4 +np.float32,0x3e993765,0xbf06228e,4 +np.float32,0x3eafd99f,0xbeeda21b,4 +np.float32,0x3f2a759e,0xbe34db96,4 +np.float32,0x3f05adfb,0xbe907937,4 +np.float32,0x3f6e2dfc,0xbd005980,4 +np.float32,0x3f2f2daa,0xbe28b6b5,4 +np.float32,0x15e746,0xc21ac931,4 +np.float32,0x7d34ca26,0x4214b4e5,4 +np.float32,0x7ebd175c,0x4218659f,4 +np.float32,0x7f1ed26b,0x42194c4c,4 +np.float32,0x2588b,0xc21eaab0,4 +np.float32,0x3f0065e3,0xbe996fe2,4 +np.float32,0x3f610376,0xbd658122,4 +np.float32,0x451995,0xc218ca41,4 +np.float32,0x70e083,0xc217f002,4 +np.float32,0x7e19821a,0x4216d4a8,4 +np.float32,0x3e7cd9a0,0xbf1b80fb,4 +np.float32,0x7f1a8f18,0x42194033,4 +np.float32,0x3f008fee,0xbe99271f,4 +np.float32,0xff7fffff,0x7fc00000,4 +np.float32,0x7f31d826,0x42197e9b,4 +np.float32,0x3f18cf12,0xbe657838,4 +np.float32,0x3e5c1bc7,0xbf2aebf9,4 +np.float32,0x3e3d3993,0xbf3bbaf8,4 +np.float32,0x68457a,0xc2181347,4 +np.float32,0x7ddf7561,0x42164761,4 +np.float32,0x7f47341b,0x4219b10c,4 +np.float32,0x4d3ecd,0xc21898b2,4 +np.float32,0x7f43dee8,0x4219a98b,4 +np.float32,0x3f0def7c,0xbe8325f5,4 +np.float32,0x3d5a551f,0xbfa2f994,4 +np.float32,0x7ed26602,0x4218951b,4 +np.float32,0x3ee7fa5b,0xbeb0099a,4 +np.float32,0x7ef74ea8,0x4218dcfc,4 +np.float32,0x6a3bb2,0xc2180afd,4 +np.float32,0x7f4c1e6e,0x4219bbe3,4 +np.float32,0x3e26f625,0xbf49a5a2,4 +np.float32,0xb8482,0xc21be70b,4 +np.float32,0x3f32f077,0xbe1f445b,4 +np.float32,0x7dd694b6,0x4216355a,4 +np.float32,0x7f3d62fd,0x42199a92,4 +np.float32,0x3f48e41a,0xbdd79cbf,4 +np.float32,0x338fc3,0xc2194c75,4 +np.float32,0x3e8355f0,0xbf174462,4 +np.float32,0x7f487e83,0x4219b3eb,4 +np.float32,0x2227f7,0xc21a039b,4 +np.float32,0x7e4383dd,0x4217403a,4 +np.float32,0x52d28b,0xc21879b2,4 +np.float32,0x12472c,0xc21b19a9,4 +np.float32,0x353530,0xc2193e7b,4 +np.float32,0x3f4e4728,0xbdc0137a,4 +np.float32,0x3bf169,0xc2190979,4 +np.float32,0x3eb3ee2e,0xbee8885f,4 +np.float32,0x3f03e3c0,0xbe937892,4 +np.float32,0x3c9f8408,0xbfdaf47f,4 +np.float32,0x40e792,0xc218e61b,4 +np.float32,0x5a6b29,0xc21852ab,4 +np.float32,0x7f268b83,0x4219616a,4 +np.float32,0x3ee25997,0xbeb57fa7,4 +np.float32,0x3f175324,0xbe69cf53,4 +np.float32,0x3f781d91,0xbc5e9827,4 +np.float32,0x7dba5210,0x4215f68c,4 +np.float32,0x7f1e66,0xc217bb2b,4 +np.float32,0x7f7fffff,0x421a209b,4 +np.float32,0x3f646202,0xbd4b10b8,4 +np.float32,0x575248,0xc218622b,4 +np.float32,0x7c67faa1,0x4212bb42,4 +np.float32,0x7f1683f2,0x42193469,4 +np.float32,0x1a3864,0xc21a7931,4 +np.float32,0x7f30ad75,0x42197bae,4 +np.float32,0x7f1c9d05,0x42194612,4 +np.float32,0x3e791795,0xbf1d2b2c,4 +np.float32,0x7e9ebc19,0x421817cd,4 +np.float32,0x4999b7,0xc218ae31,4 +np.float32,0x3d130e2c,0xbfb8f1cc,4 +np.float32,0x3f7e436f,0xbb41bb07,4 +np.float32,0x3ee00241,0xbeb7cf7d,4 +np.float32,0x7e496181,0x42174d5f,4 +np.float32,0x7efe58be,0x4218e978,4 +np.float32,0x3f5e5b0c,0xbd7aa43f,4 +np.float32,0x7ee4c6ab,0x4218ba59,4 +np.float32,0x3f6da8c6,0xbd043d7e,4 +np.float32,0x3e3e6e0f,0xbf3b064b,4 +np.float32,0x3f0143b3,0xbe97f10a,4 +np.float32,0x79170f,0xc217d0c6,4 +np.float32,0x517645,0xc218810f,4 +np.float32,0x3f1f9960,0xbe52226e,4 +np.float32,0x2a8df9,0xc219a1d6,4 +np.float32,0x2300a6,0xc219f8b8,4 +np.float32,0x3ee31355,0xbeb4c97a,4 +np.float32,0x3f20b05f,0xbe4f1ba9,4 +np.float32,0x3ee64249,0xbeb1b0ff,4 +np.float32,0x3a94b7,0xc21913b2,4 +np.float32,0x7ef7ef43,0x4218de1d,4 +np.float32,0x3f1abb5d,0xbe5fe872,4 +np.float32,0x7f65360b,0x4219ef72,4 +np.float32,0x3d315d,0xc219004c,4 +np.float32,0x3f26bbc4,0xbe3eafb9,4 +np.float32,0x3ee8c6e9,0xbeaf45de,4 +np.float32,0x7e5f1452,0x42177ae1,4 +np.float32,0x3f32e777,0xbe1f5aba,4 +np.float32,0x4d39a1,0xc21898d0,4 +np.float32,0x3e59ad15,0xbf2c2841,4 +np.float32,0x3f4be746,0xbdca5fc4,4 +np.float32,0x72e4fd,0xc217e821,4 +np.float32,0x1af0b8,0xc21a6d25,4 +np.float32,0x3f311147,0xbe23f18d,4 +np.float32,0x3f1ecebb,0xbe545880,4 +np.float32,0x7e90d293,0x4217ef02,4 +np.float32,0x3e3b366a,0xbf3ceb46,4 +np.float32,0x3f133239,0xbe761c96,4 +np.float32,0x7541ab,0xc217df15,4 +np.float32,0x3d8c8275,0xbf94f1a1,4 +np.float32,0x483b92,0xc218b689,4 +np.float32,0x3eb0dbed,0xbeec5c6b,4 +np.float32,0x3f00c676,0xbe98c8e2,4 +np.float32,0x3f445ac2,0xbdebed7c,4 +np.float32,0x3d2af4,0xc219007a,4 +np.float32,0x7f196ee1,0x42193cf2,4 +np.float32,0x290c94,0xc219b1db,4 +np.float32,0x3f5dbdc9,0xbd7f9019,4 +np.float32,0x3e80c62e,0xbf1974fc,4 +np.float32,0x3ec9ed2c,0xbecee326,4 +np.float32,0x7f469d60,0x4219afbb,4 +np.float32,0x3f698413,0xbd2386ce,4 +np.float32,0x42163f,0xc218de14,4 +np.float32,0x67a554,0xc21815f4,4 +np.float32,0x3f4bff74,0xbdc9f651,4 +np.float32,0x16a743,0xc21aba39,4 +np.float32,0x2eb8b0,0xc219784b,4 +np.float32,0x3eed9be1,0xbeaab45b,4 +np.float64,0x7fe0d76873e1aed0,0x40733f9d783bad7a,1 +np.float64,0x3fe22626bb244c4d,0xbfcf86a59864eea2,1 +np.float64,0x7f874113d02e8227,0x407324f54c4015b8,1 +np.float64,0x3fe40a46a9e8148d,0xbfca0411f533fcb9,1 +np.float64,0x3fd03932eea07266,0xbfe312bc9cf5649e,1 +np.float64,0x7fee5d2a1b3cba53,0x407343b5f56367a0,1 +np.float64,0x3feb7bda4a76f7b5,0xbfb0ea2c6edc784a,1 +np.float64,0x3fd6cd831a2d9b06,0xbfdcaf2e1a5faf51,1 +np.float64,0x98324e273064a,0xc0733e0e4c6d11c6,1 +np.float64,0x7fe1dd63b363bac6,0x4073400667c405c3,1 +np.float64,0x3fec5971f178b2e4,0xbfaaef32a7d94563,1 +np.float64,0x17abc07e2f579,0xc0734afca4da721e,1 +np.float64,0x3feec6ab5cfd8d57,0xbf9157f3545a8235,1 +np.float64,0x3fe3ae9622a75d2c,0xbfcb04b5ad254581,1 +np.float64,0x7fea73d854b4e7b0,0x407342c0a548f4c5,1 +np.float64,0x7fe29babf4653757,0x4073404eeb5fe714,1 +np.float64,0x7fd3a55d85a74aba,0x40733bde72e86c27,1 +np.float64,0x3fe83ce305f079c6,0xbfbee3511e85e0f1,1 +np.float64,0x3fd72087ea2e4110,0xbfdc4ab30802d7c2,1 +np.float64,0x7feb54ddab76a9ba,0x407342facb6f3ede,1 +np.float64,0xc57e34a18afd,0xc0734f82ec815baa,1 +np.float64,0x7a8cb97ef5198,0xc0733f8fb3777a67,1 +np.float64,0x7fe801032c300205,0x40734213dbe4eda9,1 +np.float64,0x3aefb1f475df7,0xc07344a5f08a0584,1 +np.float64,0x7fee85f1dd3d0be3,0x407343bf4441c2a7,1 +np.float64,0x3fdc7f1055b8fe21,0xbfd67d300630e893,1 +np.float64,0xe8ecddb3d1d9c,0xc0733b194f18f466,1 +np.float64,0x3fdf2b23c73e5648,0xbfd3ff6872c1f887,1 +np.float64,0x3fdba4aef2b7495e,0xbfd7557205e18b7b,1 +np.float64,0x3fe2ac34c6e5586a,0xbfcdf1dac69bfa08,1 +np.float64,0x3fc9852628330a4c,0xbfe66914f0fb9b0a,1 +np.float64,0x7fda211acf344235,0x40733dd9c2177aeb,1 +np.float64,0x3fe9420eb432841d,0xbfba4dd969a32575,1 +np.float64,0xb2f9d1ed65f3a,0xc0733cedfb6527ff,1 +np.float64,0x3fe9768a68f2ed15,0xbfb967c39c35c435,1 +np.float64,0x7fe8268462b04d08,0x4073421eaed32734,1 +np.float64,0x3fcf331f063e663e,0xbfe39e2f4b427ca9,1 +np.float64,0x7fd4eb9e2b29d73b,0x40733c4e4141418d,1 +np.float64,0x7fd2bba658a5774c,0x40733b89cd53d5b1,1 +np.float64,0x3fdfdf04913fbe09,0xbfd360c7fd9d251b,1 +np.float64,0x3fca5bfd0534b7fa,0xbfe5f5f844b2b20c,1 +np.float64,0x3feacd5032f59aa0,0xbfb3b5234ba8bf7b,1 +np.float64,0x7fe9241cec724839,0x4073426631362cec,1 +np.float64,0x3fe57aca20eaf594,0xbfc628e3ac2c6387,1 +np.float64,0x3fec6553ca38caa8,0xbfaa921368d3b222,1 +np.float64,0x3fe1e9676563d2cf,0xbfd020f866ba9b24,1 +np.float64,0x3fd5590667aab20d,0xbfde8458af5a4fd6,1 +np.float64,0x3fdf7528f43eea52,0xbfd3bdb438d6ba5e,1 +np.float64,0xb8dddc5571bbc,0xc0733cb4601e5bb2,1 +np.float64,0xe6d4e1fbcda9c,0xc0733b295ef4a4ba,1 +np.float64,0x3fe7019d962e033b,0xbfc257c0a6e8de16,1 +np.float64,0x3f94ef585029deb1,0xbffb07e5dfb0e936,1 +np.float64,0x7fc863b08030c760,0x4073388e28d7b354,1 +np.float64,0xf684443bed089,0xc0733ab46cfbff9a,1 +np.float64,0x7fe00e901d201d1f,0x40733f489c05a0f0,1 +np.float64,0x9e5c0a273cb82,0xc0733dc7af797e19,1 +np.float64,0x7fe49734f0692e69,0x4073410303680df0,1 +np.float64,0x7fb7b584442f6b08,0x4073338acff72502,1 +np.float64,0x3f99984c30333098,0xbff9a2642a6ed8cc,1 +np.float64,0x7fea2fcda8745f9a,0x407342aeae7f5e64,1 +np.float64,0xe580caadcb01a,0xc0733b33a3639217,1 +np.float64,0x1899ab3831336,0xc0734ab823729417,1 +np.float64,0x39bd4c76737aa,0xc07344ca6fac6d21,1 +np.float64,0xd755b2dbaeab7,0xc0733ba4fe19f2cc,1 +np.float64,0x3f952bebf82a57d8,0xbffaf3e7749c2512,1 +np.float64,0x3fe62ee5d72c5dcc,0xbfc45e3cb5baad08,1 +np.float64,0xb1264a7d624ca,0xc0733d003a1d0a66,1 +np.float64,0x3fc4bd1bcd297a38,0xbfe94b3058345c46,1 +np.float64,0x7fc5758bb32aeb16,0x407337aa7805497f,1 +np.float64,0x3fb0edcaf421db96,0xbff2dfb09c405294,1 +np.float64,0x3fd240fceaa481fa,0xbfe16f356bb36134,1 +np.float64,0x38c0c62a7181a,0xc07344e916d1e9b7,1 +np.float64,0x3fe98f2b3bf31e56,0xbfb8fc6eb622a820,1 +np.float64,0x3fe2bdf99c257bf3,0xbfcdbd0dbbae4d0b,1 +np.float64,0xce4b390d9c967,0xc0733bf14ada3134,1 +np.float64,0x3fd2ad607ba55ac1,0xbfe11da15167b37b,1 +np.float64,0x3fd8154f11b02a9e,0xbfdb2a6fabb9a026,1 +np.float64,0xf37849fde6f09,0xc0733aca8c64344c,1 +np.float64,0x3fcbae43b2375c87,0xbfe547f267c8e570,1 +np.float64,0x3fcd46fd7d3a8dfb,0xbfe48070f7232929,1 +np.float64,0x7fcdd245273ba489,0x407339f3d907b101,1 +np.float64,0x3fac75cd0838eb9a,0xbff4149d177b057b,1 +np.float64,0x7fe8ff3fd7f1fe7f,0x4073425bf968ba6f,1 +np.float64,0x7febadaa4df75b54,0x407343113a91f0e9,1 +np.float64,0x7fd5e4649c2bc8c8,0x40733c9f0620b065,1 +np.float64,0x903429812069,0xc07351b255e27887,1 +np.float64,0x3fe1d8c51c63b18a,0xbfd03ad448c1f1ee,1 +np.float64,0x3fe573ea646ae7d5,0xbfc63ab0bfd0e601,1 +np.float64,0x3f83b3f3c02767e8,0xc00022677e310649,1 +np.float64,0x7fd15d1582a2ba2a,0x40733b02c469c1d6,1 +np.float64,0x3fe63d3dabec7a7b,0xbfc43a56ee97b27e,1 +np.float64,0x7fe3a452fb2748a5,0x407340af1973c228,1 +np.float64,0x3fafac6b303f58d6,0xbff35651703ae9f2,1 +np.float64,0x513ddd24a27bc,0xc073426af96aaebb,1 +np.float64,0x3fef152246be2a45,0xbf89df79d7719282,1 +np.float64,0x3fe8c923e9f19248,0xbfbc67228e8db5f6,1 +np.float64,0x3fd6e2325fadc465,0xbfdc9602fb0b950f,1 +np.float64,0x3fe9616815f2c2d0,0xbfb9c4311a3b415b,1 +np.float64,0x2fe4e4005fc9d,0xc0734616fe294395,1 +np.float64,0x3fbceb02dc39d606,0xbfee4e68f1c7886f,1 +np.float64,0x7fe35e843d66bd07,0x407340963b066ad6,1 +np.float64,0x7fecd6c648f9ad8c,0x4073435a4c176e94,1 +np.float64,0x7fcbd72bf437ae57,0x4073397994b85665,1 +np.float64,0x3feff6443b3fec88,0xbf40eb380d5318ae,1 +np.float64,0x7fb9373cf6326e79,0x407333f869edef08,1 +np.float64,0x63790d9cc6f22,0xc0734102d4793cda,1 +np.float64,0x3f9de6efe83bcde0,0xbff88db6f0a6b56e,1 +np.float64,0xe00f2dc1c01f,0xc0734ea26ab84ff2,1 +np.float64,0xd7a9aa8baf536,0xc0733ba248fa33ab,1 +np.float64,0x3fee0089ea7c0114,0xbf9cab936ac31c4b,1 +np.float64,0x3fdec0d51cbd81aa,0xbfd45ed8878c5860,1 +np.float64,0x7fe91bf5e9f237eb,0x40734263f005081d,1 +np.float64,0x34ea7d1e69d50,0xc07345659dde7444,1 +np.float64,0x7fe67321a3ace642,0x4073419cc8130d95,1 +np.float64,0x9d1aeb2f3a35e,0xc0733dd5d506425c,1 +np.float64,0x7fbb01df003603bd,0x4073347282f1391d,1 +np.float64,0x42b945b285729,0xc07343c92d1bbef9,1 +np.float64,0x7fc92799b8324f32,0x407338c51e3f0733,1 +np.float64,0x3fe119c19b223383,0xbfd16ab707f65686,1 +np.float64,0x3fc9f9ac5333f359,0xbfe62a2f91ec0dff,1 +np.float64,0x3fd820d5a8b041ab,0xbfdb1d2586fe7b18,1 +np.float64,0x10000000000000,0xc0733a7146f72a42,1 +np.float64,0x3fe7e1543eafc2a8,0xbfc045362889592d,1 +np.float64,0xcbc0e1819783,0xc0734f4b68e05b1c,1 +np.float64,0xeb57e411d6afd,0xc0733b06efec001a,1 +np.float64,0xa9b74b47536ea,0xc0733d4c7bd06ddc,1 +np.float64,0x3fe56d4022eada80,0xbfc64bf8c7e3dd59,1 +np.float64,0x3fd445ca27288b94,0xbfdff40aecd0f882,1 +np.float64,0x3fe5af1cf5ab5e3a,0xbfc5a21d83699a04,1 +np.float64,0x7fed3431eb7a6863,0x40734370aa6131e1,1 +np.float64,0x3fd878dea1b0f1bd,0xbfdab8730dc00517,1 +np.float64,0x7ff8000000000000,0x7ff8000000000000,1 +np.float64,0x3feba9fcc1f753fa,0xbfb03027dcecbf65,1 +np.float64,0x7fca4feed6349fdd,0x4073391526327eb0,1 +np.float64,0x3fe7748ddbaee91c,0xbfc144b438218065,1 +np.float64,0x3fb5fbd94c2bf7b3,0xbff10ee6342c21a0,1 +np.float64,0x3feb603b97f6c077,0xbfb15a1f99d6d25e,1 +np.float64,0x3fe2e6fc8ce5cdf9,0xbfcd43edd7f3b4e6,1 +np.float64,0x7feb2b31f7765663,0x407342f02b306688,1 +np.float64,0x3fe290e2282521c4,0xbfce436deb8dbcf3,1 +np.float64,0x3fe3d5adf9e7ab5c,0xbfca96b8aa55d942,1 +np.float64,0x691899f2d2314,0xc07340a1026897c8,1 +np.float64,0x7fe468b008e8d15f,0x407340f33eadc628,1 +np.float64,0x3fb3a4c416274988,0xbff1d71da539a56e,1 +np.float64,0x3fe2442b29e48856,0xbfcf2b0037322661,1 +np.float64,0x3f376fbc7e6ef,0xc073442939a84643,1 +np.float64,0x3fe7c78d65ef8f1b,0xbfc08157cff411de,1 +np.float64,0xd4f27acba9e50,0xc0733bb8d38daa50,1 +np.float64,0x5198919ea3313,0xc07342633ba7cbea,1 +np.float64,0x7fd09f66f0a13ecd,0x40733ab5310b4385,1 +np.float64,0x3fdfe5531dbfcaa6,0xbfd35b487c7e739f,1 +np.float64,0x3fc4b0fecc2961fe,0xbfe95350c38c1640,1 +np.float64,0x7fd5ae21962b5c42,0x40733c8db78b7250,1 +np.float64,0x3fa4a8fcd42951fa,0xbff64e62fe602b72,1 +np.float64,0x7fc8e0e25831c1c4,0x407338b179b91223,1 +np.float64,0x7fdde1df6f3bc3be,0x40733ec87f9f027e,1 +np.float64,0x3fd8b9ad86b1735b,0xbfda6f385532c41b,1 +np.float64,0x3fd9f20ee933e41e,0xbfd91872fd858597,1 +np.float64,0x7feb35332df66a65,0x407342f2b9c715f0,1 +np.float64,0x7fe783dc7eaf07b8,0x407341ef41873706,1 +np.float64,0x7fceee929f3ddd24,0x40733a34e3c660fd,1 +np.float64,0x985b58d730b6b,0xc0733e0c6cfbb6f8,1 +np.float64,0x3fef4bb55cfe976b,0xbf83cb246c6f2a78,1 +np.float64,0x3fe218014f243003,0xbfcfb20ac683e1f6,1 +np.float64,0x7fe43b9fbea8773e,0x407340e3d5d5d29e,1 +np.float64,0x7fe148c74c62918e,0x40733fcba4367b8b,1 +np.float64,0x3feea4ad083d495a,0xbf93443917f3c991,1 +np.float64,0x8bcf6311179ed,0xc0733ea54d59dd31,1 +np.float64,0xf4b7a2dbe96f5,0xc0733ac175182401,1 +np.float64,0x543338baa8668,0xc073422b59165fe4,1 +np.float64,0x3fdb467317368ce6,0xbfd7b4d515929635,1 +np.float64,0x7fe3bbbc89e77778,0x407340b75cdf3de7,1 +np.float64,0x7fe693377aad266e,0x407341a6af60a0f1,1 +np.float64,0x3fc66210502cc421,0xbfe83bb940610a24,1 +np.float64,0x7fa75638982eac70,0x40732e9da476b816,1 +np.float64,0x3fe0d72a4761ae55,0xbfd1d7c82c479fab,1 +np.float64,0x97dec0dd2fbd8,0xc0733e121e072804,1 +np.float64,0x3fef33ec8c7e67d9,0xbf86701be6be8df1,1 +np.float64,0x7fcfca9b423f9536,0x40733a65a51efb94,1 +np.float64,0x9f2215633e443,0xc0733dbf043de9ed,1 +np.float64,0x2469373e48d28,0xc07347fe9e904b77,1 +np.float64,0x7fecc2e18cb985c2,0x407343557f58dfa2,1 +np.float64,0x3fde4acbfdbc9598,0xbfd4ca559e575e74,1 +np.float64,0x3fd6b11cf1ad623a,0xbfdcd1e17ef36114,1 +np.float64,0x3fc19ec494233d89,0xbfeb8ef228e8826a,1 +np.float64,0x4c89ee389913e,0xc07342d50c904f61,1 +np.float64,0x88c2046f11841,0xc0733ecc91369431,1 +np.float64,0x7fc88c13fd311827,0x40733899a125b392,1 +np.float64,0x3fcebd893a3d7b12,0xbfe3d2f35ab93765,1 +np.float64,0x3feb582a1476b054,0xbfb17ae8ec6a0465,1 +np.float64,0x7fd4369e5da86d3c,0x40733c1118b8cd67,1 +np.float64,0x3fda013fc1340280,0xbfd90831b85e98b2,1 +np.float64,0x7fed33d73fba67ad,0x4073437094ce1bd9,1 +np.float64,0x3fed3191053a6322,0xbfa468cc26a8f685,1 +np.float64,0x3fc04ed51c209daa,0xbfeca24a6f093bca,1 +np.float64,0x3fee4ac8763c9591,0xbf986458abbb90b5,1 +np.float64,0xa2d39dd145a74,0xc0733d9633651fbc,1 +np.float64,0x3fe7d9f86f2fb3f1,0xbfc0565a0b059f1c,1 +np.float64,0x3fe3250144e64a03,0xbfcc8eb2b9ae494b,1 +np.float64,0x7fe2b29507a56529,0x4073405774492075,1 +np.float64,0x7fdcdfcbe2b9bf97,0x40733e8b736b1bd8,1 +np.float64,0x3fc832730f3064e6,0xbfe7267ac9b2e7c3,1 +np.float64,0x3fc7e912e52fd226,0xbfe750dfc0aeae57,1 +np.float64,0x7fc960472f32c08d,0x407338d4b4cb3957,1 +np.float64,0x3fbdf182ea3be306,0xbfedd27150283ffb,1 +np.float64,0x3fd1e9359823d26b,0xbfe1b2ac7fd25f8d,1 +np.float64,0x7fbcf75f6039eebe,0x407334ef13eb16f8,1 +np.float64,0x3fe5a3c910eb4792,0xbfc5bf2f57c5d643,1 +np.float64,0x3fcf4f2a6e3e9e55,0xbfe391b6f065c4b8,1 +np.float64,0x3fee067873fc0cf1,0xbf9c53af0373fc0e,1 +np.float64,0xd3f08b85a7e12,0xc0733bc14357e686,1 +np.float64,0x7ff0000000000000,0x7ff0000000000000,1 +np.float64,0x3fc8635f6430c6bf,0xbfe70a7dc77749a7,1 +np.float64,0x3fe3ff5c52a7feb9,0xbfca22617c6636d5,1 +np.float64,0x3fbbae91fa375d24,0xbfeee9d4c300543f,1 +np.float64,0xe3f71b59c7ee4,0xc0733b3f99187375,1 +np.float64,0x7fca93d3be3527a6,0x40733926fd48ecd6,1 +np.float64,0x3fcd29f7223a53ee,0xbfe48e3edf32fe57,1 +np.float64,0x7fdc4ef6f8389ded,0x40733e68401cf2a6,1 +np.float64,0xe009bc81c014,0xc0734ea295ee3e5b,1 +np.float64,0x61f56c78c3eae,0xc073411e1dbd7c54,1 +np.float64,0x3fde131928bc2632,0xbfd4fda024f6927c,1 +np.float64,0x3fb21ee530243dca,0xbff266aaf0358129,1 +np.float64,0x7feaac82a4f55904,0x407342cf7809d9f9,1 +np.float64,0x3fe66ab177ecd563,0xbfc3c92d4d522819,1 +np.float64,0xfe9f9c2bfd3f4,0xc0733a7ade3a88a7,1 +np.float64,0x7fd0c5217c218a42,0x40733ac4e4c6dfa5,1 +np.float64,0x430f4ae6861ea,0xc07343c03d8a9442,1 +np.float64,0x494bff2a92981,0xc073432209d2fd16,1 +np.float64,0x3f8860e9d030c1d4,0xbffeca059ebf5e89,1 +np.float64,0x3fe43732dc286e66,0xbfc98800388bad2e,1 +np.float64,0x6443b60ec8877,0xc07340f4bab11827,1 +np.float64,0x3feda9be6d7b537d,0xbfa0dcb9a6914069,1 +np.float64,0x3fc5ceb6772b9d6d,0xbfe89868c881db70,1 +np.float64,0x3fbdf153023be2a6,0xbfedd2878c3b4949,1 +np.float64,0x7fe8f6b8e8f1ed71,0x407342599a30b273,1 +np.float64,0x3fea6fbdb8b4df7b,0xbfb53bf66f71ee96,1 +np.float64,0xc7ac3dbb8f588,0xc0733c2b525b7963,1 +np.float64,0x3fef3a91f77e7524,0xbf85b2bd3adbbe31,1 +np.float64,0x3f887cb97030f973,0xbffec21ccbb5d22a,1 +np.float64,0x8b2f1c9f165e4,0xc0733ead49300951,1 +np.float64,0x2c1cb32058397,0xc07346a951bd8d2b,1 +np.float64,0x3fe057edd620afdc,0xbfd2acf1881b7e99,1 +np.float64,0x7f82e9530025d2a5,0x4073238591dd52ce,1 +np.float64,0x3fe4e03dff69c07c,0xbfc7be96c5c006fc,1 +np.float64,0x52727b4aa4e50,0xc0734250c58ebbc1,1 +np.float64,0x3f99a62160334c43,0xbff99ea3ca09d8f9,1 +np.float64,0x3fd5314b4faa6297,0xbfdeb843daf01e03,1 +np.float64,0x3fefde89e13fbd14,0xbf5d1facb7a1e9de,1 +np.float64,0x7fb460f1a228c1e2,0x4073327d8cbc5f86,1 +np.float64,0xeb93efb3d727e,0xc0733b052a4990e4,1 +np.float64,0x3fe884baecf10976,0xbfbd9ba9cfe23713,1 +np.float64,0x7fefffffffffffff,0x40734413509f79ff,1 +np.float64,0x149dc7c6293ba,0xc0734bf26b1df025,1 +np.float64,0x64188f88c8313,0xc07340f7b8e6f4b5,1 +np.float64,0x3fdfac314abf5863,0xbfd38d3e9dba1b0e,1 +np.float64,0x3fd72052a42e40a5,0xbfdc4af30ee0b245,1 +np.float64,0x7fdd951f743b2a3e,0x40733eb68fafa838,1 +np.float64,0x65a2dd5acb45c,0xc07340dc8ed625e1,1 +np.float64,0x7fe89a79997134f2,0x4073423fbceb1cbe,1 +np.float64,0x3fe70a000d6e1400,0xbfc24381e09d02f7,1 +np.float64,0x3fe2cec160259d83,0xbfcd8b5e92354129,1 +np.float64,0x3feb9ef77a773def,0xbfb05c7b2ee6f388,1 +np.float64,0xe0d66689c1acd,0xc0733b582c779620,1 +np.float64,0x3fee86bd0ffd0d7a,0xbf94f7870502c325,1 +np.float64,0x186afc6230d60,0xc0734ac55fb66d5d,1 +np.float64,0xc0631f4b80c64,0xc0733c6d7149d373,1 +np.float64,0x3fdad1b87735a371,0xbfd82cca73ec663b,1 +np.float64,0x7fe7f6d313efeda5,0x40734210e84576ab,1 +np.float64,0x7fd7b7fce6af6ff9,0x40733d2d92ffdaaf,1 +np.float64,0x3fe6f35a28ade6b4,0xbfc27a4239b540c3,1 +np.float64,0x7fdb0b834eb61706,0x40733e17073a61f3,1 +np.float64,0x82f4661105e8d,0xc0733f19b34adeed,1 +np.float64,0x3fc77230112ee460,0xbfe796a7603c0d16,1 +np.float64,0x8000000000000000,0xfff0000000000000,1 +np.float64,0x7fb8317bc63062f7,0x407333aec761a739,1 +np.float64,0x7fd165609a22cac0,0x40733b061541ff15,1 +np.float64,0x3fed394768fa728f,0xbfa42e1596e1faf6,1 +np.float64,0x7febab693d7756d1,0x40734310a9ac828e,1 +np.float64,0x7fe809a69230134c,0x407342165b9acb69,1 +np.float64,0x3fc091d38f2123a7,0xbfec69a70fc23548,1 +np.float64,0x3fb2a8f5dc2551ec,0xbff2327f2641dd0d,1 +np.float64,0x7fc60b6fe02c16df,0x407337da5adc342c,1 +np.float64,0x3fefa53c3bbf4a78,0xbf73d1be15b73b00,1 +np.float64,0x7fee09c1717c1382,0x407343a2c479e1cb,1 +np.float64,0x8000000000000001,0x7ff8000000000000,1 +np.float64,0x3fede0b2733bc165,0xbf9e848ac2ecf604,1 +np.float64,0x3fee2ac331bc5586,0xbf9a3b699b721c9a,1 +np.float64,0x3fd4db12d829b626,0xbfdf2a413d1e453a,1 +np.float64,0x7fe605230dec0a45,0x4073417a67db06be,1 +np.float64,0x3fe378b2bf26f165,0xbfcb9dbb2b6d6832,1 +np.float64,0xc1d4c1ab83a98,0xc0733c60244cadbf,1 +np.float64,0x3feb15500e762aa0,0xbfb28c071d5efc22,1 +np.float64,0x3fe36225a626c44b,0xbfcbde4259e9047e,1 +np.float64,0x3fe7c586a72f8b0d,0xbfc08614b13ed4b2,1 +np.float64,0x7fb0f2d8cc21e5b1,0x40733135b2c7dd99,1 +np.float64,0x5957f3feb2aff,0xc07341c1df75638c,1 +np.float64,0x3fca4851bd3490a3,0xbfe6005ae5279485,1 +np.float64,0x824217d904843,0xc0733f232fd58f0f,1 +np.float64,0x4f9332269f267,0xc073428fd8e9cb32,1 +np.float64,0x3fea6f087374de11,0xbfb53ef0d03918b2,1 +np.float64,0x3fd9409ab4328135,0xbfd9d9231381e2b8,1 +np.float64,0x3fdba03b00374076,0xbfd759ec94a7ab5b,1 +np.float64,0x3fe0ce3766619c6f,0xbfd1e6912582ccf0,1 +np.float64,0x3fabd45ddc37a8bc,0xbff43c78d3188423,1 +np.float64,0x3fc3cadd592795bb,0xbfe9f1576c9b2c79,1 +np.float64,0x3fe10df049621be1,0xbfd17df2f2c28022,1 +np.float64,0x945b5d1328b6c,0xc0733e3bc06f1e75,1 +np.float64,0x7fc1c3742b2386e7,0x4073365a403d1051,1 +np.float64,0x7fdc957138b92ae1,0x40733e7977717586,1 +np.float64,0x7f943fa1a0287f42,0x407328d01de143f5,1 +np.float64,0x3fec9631c4392c64,0xbfa914b176d8f9d2,1 +np.float64,0x3fd8e7c008b1cf80,0xbfda3b9d9b6da8f4,1 +np.float64,0x7222f9fee4460,0xc073400e371516cc,1 +np.float64,0x3fe890e43eb121c8,0xbfbd64921462e823,1 +np.float64,0x3fcfd7fe2a3faffc,0xbfe3557e2f207800,1 +np.float64,0x3fed5dd1c1babba4,0xbfa318bb20db64e6,1 +np.float64,0x3fe6aa34c66d546a,0xbfc32c8a8991c11e,1 +np.float64,0x8ca79801196,0xc0736522bd5adf6a,1 +np.float64,0x3feb274079364e81,0xbfb2427b24b0ca20,1 +np.float64,0x7fe04927e4a0924f,0x40733f61c96f7f89,1 +np.float64,0x7c05f656f80bf,0xc0733f7a70555b4e,1 +np.float64,0x7fe97819eff2f033,0x4073427d4169b0f8,1 +np.float64,0x9def86e33bdf1,0xc0733dcc740b7175,1 +np.float64,0x7fedd1ef3f3ba3dd,0x40734395ceab8238,1 +np.float64,0x77bed86cef7dc,0xc0733fb8e0e9bf73,1 +np.float64,0x9274b41b24e97,0xc0733e52b16dff71,1 +np.float64,0x8010000000000000,0x7ff8000000000000,1 +np.float64,0x9c977855392ef,0xc0733ddba7d421d9,1 +np.float64,0xfb4560a3f68ac,0xc0733a9271e6a118,1 +np.float64,0xa67d9f394cfb4,0xc0733d6e9d58cc94,1 +np.float64,0x3fbfa766b03f4ecd,0xbfed0cccfecfc900,1 +np.float64,0x3fe177417522ee83,0xbfd0d45803bff01a,1 +np.float64,0x7fe85e077bb0bc0e,0x4073422e957a4aa3,1 +np.float64,0x7feeb0a6883d614c,0x407343c8f6568f7c,1 +np.float64,0xbab82edb75706,0xc0733ca2a2b20094,1 +np.float64,0xfadb44bdf5b69,0xc0733a9561b7ec04,1 +np.float64,0x3fefb9b82b3f7370,0xbf6ea776b2dcc3a9,1 +np.float64,0x7fe080ba8a610174,0x40733f795779b220,1 +np.float64,0x3f87faa1c02ff544,0xbffee76acafc92b7,1 +np.float64,0x7fed474108fa8e81,0x4073437531d4313e,1 +np.float64,0x3fdb7b229336f645,0xbfd77f583a4a067f,1 +np.float64,0x256dbf0c4adb9,0xc07347cd94e6fa81,1 +np.float64,0x3fd034ae25a0695c,0xbfe3169c15decdac,1 +np.float64,0x3a72177274e44,0xc07344b4cf7d68cd,1 +np.float64,0x7fa2522d5c24a45a,0x40732cef2f793470,1 +np.float64,0x3fb052bdde20a57c,0xbff3207fd413c848,1 +np.float64,0x3fdccfecbbb99fd9,0xbfd62ec04a1a687a,1 +np.float64,0x3fd403ac53280759,0xbfe027a31df2c8cc,1 +np.float64,0x3fab708e4036e11d,0xbff45591df4f2e8b,1 +np.float64,0x7fcfc001993f8002,0x40733a63539acf9d,1 +np.float64,0x3fd2b295dfa5652c,0xbfe119c1b476c536,1 +np.float64,0x7fe8061262b00c24,0x4073421552ae4538,1 +np.float64,0xffefffffffffffff,0x7ff8000000000000,1 +np.float64,0x7fed52093ffaa411,0x40734377c072a7e8,1 +np.float64,0xf3df902fe7bf2,0xc0733ac79a75ff7a,1 +np.float64,0x7fe13d382e227a6f,0x40733fc6fd0486bd,1 +np.float64,0x3621d5086c43b,0xc073453d31effbcd,1 +np.float64,0x3ff0000000000000,0x0,1 +np.float64,0x3fdaffea27b5ffd4,0xbfd7fd139dc1c2c5,1 +np.float64,0x7fea6536dc34ca6d,0x407342bccc564fdd,1 +np.float64,0x7fd478f00c28f1df,0x40733c27c0072fde,1 +np.float64,0x7fa72ef0502e5de0,0x40732e91e83db75c,1 +np.float64,0x7fd302970626052d,0x40733ba3ec6775f6,1 +np.float64,0x7fbb57ab0036af55,0x407334887348e613,1 +np.float64,0x3fda0ff722b41fee,0xbfd8f87b77930330,1 +np.float64,0x1e983ce23d309,0xc073493438f57e61,1 +np.float64,0x7fc90de97c321bd2,0x407338be01ffd4bd,1 +np.float64,0x7fe074b09c20e960,0x40733f7443f0dbe1,1 +np.float64,0x3fed5dec9fbabbd9,0xbfa317efb1fe8a95,1 +np.float64,0x7fdb877632b70eeb,0x40733e3697c88ba8,1 +np.float64,0x7fe4fb0067e9f600,0x40734124604b99e8,1 +np.float64,0x7fd447dc96288fb8,0x40733c1703ab2cce,1 +np.float64,0x3feb2d1e64f65a3d,0xbfb22a781df61c05,1 +np.float64,0xb6c8e6676d91d,0xc0733cc8859a0b91,1 +np.float64,0x3fdc3c2418387848,0xbfd6bec3a3c3cdb5,1 +np.float64,0x3fdecb9ccdbd973a,0xbfd4551c05721a8e,1 +np.float64,0x3feb1100e7762202,0xbfb29db911fe6768,1 +np.float64,0x3fe0444bc2a08898,0xbfd2ce69582e78c1,1 +np.float64,0x7fda403218b48063,0x40733de201d8340c,1 +np.float64,0x3fdc70421238e084,0xbfd68ba4bd48322b,1 +np.float64,0x3fe06e747c60dce9,0xbfd286bcac34a981,1 +np.float64,0x7fc1931d9623263a,0x407336473da54de4,1 +np.float64,0x229914da45323,0xc073485979ff141c,1 +np.float64,0x3fe142f92da285f2,0xbfd1280909992cb6,1 +np.float64,0xf1d02fa9e3a06,0xc0733ad6b19d71a0,1 +np.float64,0x3fb1fe9b0023fd36,0xbff27317d8252c16,1 +np.float64,0x3fa544b9242a8972,0xbff61ac38569bcfc,1 +np.float64,0x3feeb129d4fd6254,0xbf928f23ad20c1ee,1 +np.float64,0xa2510b7f44a22,0xc0733d9bc81ea0a1,1 +np.float64,0x3fca75694d34ead3,0xbfe5e8975b3646c2,1 +np.float64,0x7fece10621b9c20b,0x4073435cc3dd9a1b,1 +np.float64,0x7fe98a57d3b314af,0x4073428239b6a135,1 +np.float64,0x3fe259c62a64b38c,0xbfcee96682a0f355,1 +np.float64,0x3feaaa9b9d755537,0xbfb445779f3359af,1 +np.float64,0xdaadecfdb55be,0xc0733b899338432a,1 +np.float64,0x3fed00eae4fa01d6,0xbfa5dc8d77be5991,1 +np.float64,0x7fcc96c773392d8e,0x407339a8c5cd786e,1 +np.float64,0x3fef7b8b203ef716,0xbf7cff655ecb6424,1 +np.float64,0x7fd4008113a80101,0x40733bfe6552acb7,1 +np.float64,0x7fe99ff035b33fdf,0x407342881753ee2e,1 +np.float64,0x3ee031e87dc07,0xc0734432d736e492,1 +np.float64,0x3fddfe390f3bfc72,0xbfd510f1d9ec3e36,1 +np.float64,0x3fd9ddce74b3bb9d,0xbfd92e2d75a061bb,1 +np.float64,0x7fe5f742edebee85,0x40734176058e3a77,1 +np.float64,0x3fdb04185b360831,0xbfd7f8c63aa5e1c4,1 +np.float64,0xea2b0f43d4562,0xc0733b0fd77c8118,1 +np.float64,0x7fc3f4973527e92d,0x407337293bbb22c4,1 +np.float64,0x3fb9adfb38335bf6,0xbfeff4f3ea85821a,1 +np.float64,0x87fb98750ff73,0xc0733ed6ad83c269,1 +np.float64,0x3fe005721a200ae4,0xbfd33a9f1ebfb0ac,1 +np.float64,0xd9e04fe7b3c0a,0xc0733b901ee257f3,1 +np.float64,0x2c39102658723,0xc07346a4db63bf55,1 +np.float64,0x3f7dc28e003b851c,0xc0011c1d1233d948,1 +np.float64,0x3430fd3868620,0xc073457e24e0b70d,1 +np.float64,0xbff0000000000000,0x7ff8000000000000,1 +np.float64,0x3fd23e45e0247c8c,0xbfe17146bcf87b57,1 +np.float64,0x6599df3ecb33d,0xc07340dd2c41644c,1 +np.float64,0x3fdf074f31be0e9e,0xbfd41f6e9dbb68a5,1 +np.float64,0x7fdd6233f3bac467,0x40733eaa8f674b72,1 +np.float64,0x7fe03e8481607d08,0x40733f5d3df3b087,1 +np.float64,0x3fcc3b79f13876f4,0xbfe501bf3b379b77,1 +np.float64,0xe5d97ae3cbb30,0xc0733b30f47cbd12,1 +np.float64,0x8acbc4a115979,0xc0733eb240a4d2c6,1 +np.float64,0x3fedbdbc48bb7b79,0xbfa0470fd70c4359,1 +np.float64,0x3fde1611103c2c22,0xbfd4fae1fa8e7e5e,1 +np.float64,0x3fe09478bd2128f1,0xbfd246b7e85711dc,1 +np.float64,0x3fd6dfe8f3adbfd2,0xbfdc98ca2f32c1ad,1 +np.float64,0x72ccf274e599f,0xc0734003e5b0da63,1 +np.float64,0xe27c7265c4f8f,0xc0733b4b2d808566,1 +np.float64,0x7fee3161703c62c2,0x407343abe90f5649,1 +np.float64,0xf54fb5c1eaa0,0xc0734e01384fcf78,1 +np.float64,0xcde5924d9bcb3,0xc0733bf4b83c66c2,1 +np.float64,0x3fc46fdbe528dfb8,0xbfe97f55ef5e9683,1 +np.float64,0x7fe513528a2a26a4,0x4073412c69baceca,1 +np.float64,0x3fd29eca4aa53d95,0xbfe128801cd33ed0,1 +np.float64,0x7febb21718b7642d,0x4073431256def857,1 +np.float64,0x3fcab536c0356a6e,0xbfe5c73c59f41578,1 +np.float64,0x7fc7e9f0d82fd3e1,0x4073386b213e5dfe,1 +np.float64,0xb5b121276b624,0xc0733cd33083941c,1 +np.float64,0x7e0dd9bcfc1bc,0xc0733f5d8bf35050,1 +np.float64,0x3fd1c75106238ea2,0xbfe1cd11cccda0f4,1 +np.float64,0x9f060e673e0c2,0xc0733dc03da71909,1 +np.float64,0x7fd915a2f3322b45,0x40733d912af07189,1 +np.float64,0x3fd8cbae4431975d,0xbfda5b02ca661139,1 +np.float64,0x3fde8b411f3d1682,0xbfd48f6f710a53b6,1 +np.float64,0x3fc17a780622f4f0,0xbfebabb10c55255f,1 +np.float64,0x3fde5cbe5f3cb97d,0xbfd4b9e2e0101fb1,1 +np.float64,0x7fd859036530b206,0x40733d5c2252ff81,1 +np.float64,0xb0f5040f61ea1,0xc0733d02292f527b,1 +np.float64,0x3fde5c49ae3cb893,0xbfd4ba4db3ce2cf3,1 +np.float64,0x3fecc4518df988a3,0xbfa7af0bfc98bc65,1 +np.float64,0x3feffee03cbffdc0,0xbf0f3ede6ca7d695,1 +np.float64,0xbc5eac9b78bd6,0xc0733c92fb51c8ae,1 +np.float64,0x3fe2bb4ef765769e,0xbfcdc4f70a65dadc,1 +np.float64,0x5089443ca1129,0xc073427a7d0cde4a,1 +np.float64,0x3fd0d6e29121adc5,0xbfe28e28ece1db86,1 +np.float64,0xbe171e397c2e4,0xc0733c82cede5d02,1 +np.float64,0x4ede27be9dbc6,0xc073429fba1a4af1,1 +np.float64,0x3fe2aff3af655fe7,0xbfcde6b52a8ed3c1,1 +np.float64,0x7fd85ca295b0b944,0x40733d5d2adcccf1,1 +np.float64,0x24919bba49234,0xc07347f6ed704a6f,1 +np.float64,0x7fd74bc1eeae9783,0x40733d0d94a89011,1 +np.float64,0x3fc1cd12cb239a26,0xbfeb6a9c25c2a11d,1 +np.float64,0x3fdafbc0ac35f781,0xbfd8015ccf1f1b51,1 +np.float64,0x3fee01327c3c0265,0xbf9ca1d0d762dc18,1 +np.float64,0x3fe65bd7702cb7af,0xbfc3ee0de5c36b8d,1 +np.float64,0x7349c82ee693a,0xc0733ffc5b6eccf2,1 +np.float64,0x3fdc5906f738b20e,0xbfd6a26288eb5933,1 +np.float64,0x1,0xc07434e6420f4374,1 +np.float64,0x3fb966128a32cc25,0xbff00e0aa7273838,1 +np.float64,0x3fd501ff9a2a03ff,0xbfdef69133482121,1 +np.float64,0x194d4f3c329ab,0xc0734a861b44cfbe,1 +np.float64,0x3fec5d34f8f8ba6a,0xbfaad1b31510e70b,1 +np.float64,0x1635e4c22c6be,0xc0734b6dec650943,1 +np.float64,0x3fead2f8edb5a5f2,0xbfb39dac30a962cf,1 +np.float64,0x3f7dfa4ce03bf49a,0xc00115a112141aa7,1 +np.float64,0x3fef6827223ed04e,0xbf80a42c9edebfe9,1 +np.float64,0xe771f303cee3f,0xc0733b24a6269fe4,1 +np.float64,0x1160ccc622c1b,0xc0734d22604eacb9,1 +np.float64,0x3fc485cd08290b9a,0xbfe970723008c8c9,1 +np.float64,0x7fef99c518bf3389,0x407343fcf9ed202f,1 +np.float64,0x7fd8c1447a318288,0x40733d79a440b44d,1 +np.float64,0xaf219f955e434,0xc0733d149c13f440,1 +np.float64,0xcf45f6239e8bf,0xc0733be8ddda045d,1 +np.float64,0x7599394aeb328,0xc0733fd90fdbb0ea,1 +np.float64,0xc7f6390f8fec7,0xc0733c28bfbc66a3,1 +np.float64,0x3fd39ae96c2735d3,0xbfe0712274a8742b,1 +np.float64,0xa4d6c18f49ad8,0xc0733d805a0528f7,1 +np.float64,0x7fd9ea78d7b3d4f1,0x40733dcb2b74802a,1 +np.float64,0x3fecd251cb39a4a4,0xbfa742ed41d4ae57,1 +np.float64,0x7fed7a07cd7af40f,0x407343813476027e,1 +np.float64,0x3fd328ae7f26515d,0xbfe0c30b56a83c64,1 +np.float64,0x7fc937ff7a326ffe,0x407338c9a45b9140,1 +np.float64,0x3fcf1d31143e3a62,0xbfe3a7f760fbd6a8,1 +np.float64,0x7fb911dcbc3223b8,0x407333ee158cccc7,1 +np.float64,0x3fd352fc83a6a5f9,0xbfe0a47d2f74d283,1 +np.float64,0x7fd310753fa620e9,0x40733ba8fc4300dd,1 +np.float64,0x3febd64b4577ac97,0xbfaefd4a79f95c4b,1 +np.float64,0x6a6961a4d4d2d,0xc073408ae1687943,1 +np.float64,0x3fe4ba73d16974e8,0xbfc8239341b9e457,1 +np.float64,0x3fed8e7cac3b1cf9,0xbfa1a96a0cc5fcdc,1 +np.float64,0x7fd505ec04aa0bd7,0x40733c56f86e3531,1 +np.float64,0x3fdf166e9abe2cdd,0xbfd411e5f8569d70,1 +np.float64,0x7fe1bc6434e378c7,0x40733ff9861bdabb,1 +np.float64,0x3fd3b0b175a76163,0xbfe061ba5703f3c8,1 +np.float64,0x7fed75d7ffbaebaf,0x4073438037ba6f19,1 +np.float64,0x5a9e109cb53c3,0xc07341a8b04819c8,1 +np.float64,0x3fe14786b4e28f0d,0xbfd120b541bb880e,1 +np.float64,0x3fed4948573a9291,0xbfa3b471ff91614b,1 +np.float64,0x66aac5d8cd559,0xc07340ca9b18af46,1 +np.float64,0x3fdb48efd23691e0,0xbfd7b24c5694838b,1 +np.float64,0x7fe6da7d1eadb4f9,0x407341bc7d1fae43,1 +np.float64,0x7feb702cf336e059,0x40734301b96cc3c0,1 +np.float64,0x3fd1e60987a3cc13,0xbfe1b522cfcc3d0e,1 +np.float64,0x3feca57f50794aff,0xbfa89dc90625d39c,1 +np.float64,0x7fdc46dc56b88db8,0x40733e664294a0f9,1 +np.float64,0x8dc8fd811b920,0xc0733e8c5955df06,1 +np.float64,0xf01634abe02c7,0xc0733ae370a76d0c,1 +np.float64,0x3fc6f8d8ab2df1b1,0xbfe7df5093829464,1 +np.float64,0xda3d7597b47af,0xc0733b8d2702727a,1 +np.float64,0x7feefd53227dfaa5,0x407343da3d04db28,1 +np.float64,0x3fe2fbca3525f794,0xbfcd06e134417c08,1 +np.float64,0x7fd36d3ce226da79,0x40733bca7c322df1,1 +np.float64,0x7fec37e00b786fbf,0x4073433397b48a5b,1 +np.float64,0x3fbf133f163e267e,0xbfed4e72f1362a77,1 +np.float64,0x3fc11efbb9223df7,0xbfebf53002a561fe,1 +np.float64,0x3fc89c0e5431381d,0xbfe6ea562364bf81,1 +np.float64,0x3f9cd45da839a8bb,0xbff8ceb14669ee4b,1 +np.float64,0x23dc8fa647b93,0xc0734819aaa9b0ee,1 +np.float64,0x3fe829110d305222,0xbfbf3e60c45e2399,1 +np.float64,0x7fed8144e57b0289,0x40734382e917a02a,1 +np.float64,0x7fe033fbf7a067f7,0x40733f58bb00b20f,1 +np.float64,0xe3807f45c7010,0xc0733b43379415d1,1 +np.float64,0x3fd708fb342e11f6,0xbfdc670ef9793782,1 +np.float64,0x3fe88c924b311925,0xbfbd78210d9e7164,1 +np.float64,0x3fe0a2a7c7614550,0xbfd22efaf0472c4a,1 +np.float64,0x7fe3a37501a746e9,0x407340aecaeade41,1 +np.float64,0x3fd05077ec20a0f0,0xbfe2fedbf07a5302,1 +np.float64,0x7fd33bf61da677eb,0x40733bb8c58912aa,1 +np.float64,0x3feb29bdae76537b,0xbfb2384a8f61b5f9,1 +np.float64,0x3fec0fc14ff81f83,0xbfad3423e7ade174,1 +np.float64,0x3fd0f8b1a1a1f163,0xbfe2725dd4ccea8b,1 +np.float64,0x3fe382d26a6705a5,0xbfcb80dba4218bdf,1 +np.float64,0x3fa873f2cc30e7e6,0xbff522911cb34279,1 +np.float64,0x7fed7fd7377affad,0x4073438292f6829b,1 +np.float64,0x3feeacd8067d59b0,0xbf92cdbeda94b35e,1 +np.float64,0x7fe464d62228c9ab,0x407340f1eee19aa9,1 +np.float64,0xe997648bd32ed,0xc0733b143aa0fad3,1 +np.float64,0x7fea4869f13490d3,0x407342b5333b54f7,1 +np.float64,0x935b871926b71,0xc0733e47c6683319,1 +np.float64,0x28a9d0c05155,0xc0735a7e3532af83,1 +np.float64,0x79026548f204d,0xc0733fa6339ffa2f,1 +np.float64,0x3fdb1daaabb63b55,0xbfd7de839c240ace,1 +np.float64,0x3fc0db73b421b6e7,0xbfec2c6e36c4f416,1 +np.float64,0xb8b50ac1716b,0xc0734ff9fc60ebce,1 +np.float64,0x7fdf13e0c6be27c1,0x40733f0e44f69437,1 +np.float64,0x3fcd0cb97b3a1973,0xbfe49c34ff531273,1 +np.float64,0x3fcbac034b375807,0xbfe54913d73f180d,1 +np.float64,0x3fe091d2a2e123a5,0xbfd24b290a9218de,1 +np.float64,0xede43627dbc87,0xc0733af3c7c7f716,1 +np.float64,0x7fc037e7ed206fcf,0x407335b85fb0fedb,1 +np.float64,0x3fce7ae4c63cf5ca,0xbfe3f1350fe03f28,1 +np.float64,0x7fcdd862263bb0c3,0x407339f5458bb20e,1 +np.float64,0x4d7adf709af5d,0xc07342bf4edfadb2,1 +np.float64,0xdc6c03f3b8d81,0xc0733b7b74d6a635,1 +np.float64,0x3fe72ae0a4ee55c1,0xbfc1f4665608b21f,1 +np.float64,0xcd62f19d9ac5e,0xc0733bf92235e4d8,1 +np.float64,0xe3a7b8fdc74f7,0xc0733b4204f8e166,1 +np.float64,0x3fdafd35adb5fa6b,0xbfd7ffdca0753b36,1 +np.float64,0x3fa023e8702047d1,0xbff8059150ea1464,1 +np.float64,0x99ff336933fe7,0xc0733df961197517,1 +np.float64,0x7feeb365b9bd66ca,0x407343c995864091,1 +np.float64,0x7fe449b49f689368,0x407340e8aa3369e3,1 +np.float64,0x7faf5843043eb085,0x407330aa700136ca,1 +np.float64,0x3fd47b2922a8f652,0xbfdfab3de86f09ee,1 +np.float64,0x7fd9fc3248b3f864,0x40733dcfea6f9b3e,1 +np.float64,0xe20b0d8dc4162,0xc0733b4ea8fe7b3f,1 +np.float64,0x7feff8e0e23ff1c1,0x40734411c490ed70,1 +np.float64,0x7fa58382d02b0705,0x40732e0cf28e14fe,1 +np.float64,0xb8ad9a1b715b4,0xc0733cb630b8f2d4,1 +np.float64,0xe90abcf1d2158,0xc0733b186b04eeee,1 +np.float64,0x7fd6aa6f32ad54dd,0x40733cdccc636604,1 +np.float64,0x3fd8f84eedb1f09e,0xbfda292909a5298a,1 +np.float64,0x7fecd6b1d9f9ad63,0x4073435a472b05b5,1 +np.float64,0x3fd9f47604b3e8ec,0xbfd915e028cbf4a6,1 +np.float64,0x3fd20d9398241b27,0xbfe19691363dd508,1 +np.float64,0x3fe5ed09bbabda13,0xbfc5043dfc9c8081,1 +np.float64,0x7fbe5265363ca4c9,0x407335406f8e4fac,1 +np.float64,0xac2878af5850f,0xc0733d3311be9786,1 +np.float64,0xac2074555840f,0xc0733d3364970018,1 +np.float64,0x3fcd49b96b3a9373,0xbfe47f24c8181d9c,1 +np.float64,0x3fd10caca6a21959,0xbfe2620ae5594f9a,1 +np.float64,0xec5b87e9d8b71,0xc0733aff499e72ca,1 +np.float64,0x9d5e9fad3abd4,0xc0733dd2d70eeb4a,1 +np.float64,0x7fe3d3a24227a744,0x407340bfc2072fdb,1 +np.float64,0x3fc5f7a77c2bef4f,0xbfe87e69d502d784,1 +np.float64,0x33161a66662c4,0xc07345a436308244,1 +np.float64,0xa27acdc744f5a,0xc0733d99feb3d8ea,1 +np.float64,0x3fe2d9301565b260,0xbfcd6c914e204437,1 +np.float64,0x7fd5d111e12ba223,0x40733c98e14a6fd0,1 +np.float64,0x6c3387bed8672,0xc073406d3648171a,1 +np.float64,0x24d89fe849b15,0xc07347e97bec008c,1 +np.float64,0x3fefd763677faec7,0xbf61ae69caa9cad9,1 +np.float64,0x7fe0a4684ba148d0,0x40733f884d32c464,1 +np.float64,0x3fd5c3c939ab8792,0xbfddfaaefc1c7fca,1 +np.float64,0x3fec9b87a6b9370f,0xbfa8eb34efcc6b9b,1 +np.float64,0x3feb062431f60c48,0xbfb2ca6036698877,1 +np.float64,0x3fef97f6633f2fed,0xbf76bc742860a340,1 +np.float64,0x74477490e88ef,0xc0733fed220986bc,1 +np.float64,0x3fe4bea67ce97d4d,0xbfc818525292b0f6,1 +np.float64,0x3fc6add3a92d5ba7,0xbfe80cfdc9a90bda,1 +np.float64,0x847c9ce308f94,0xc0733f05026f5965,1 +np.float64,0x7fea53fd2eb4a7f9,0x407342b841fc4723,1 +np.float64,0x3fc55a16fc2ab42e,0xbfe8e3849130da34,1 +np.float64,0x3fbdf7d07c3befa1,0xbfedcf84b9c6c161,1 +np.float64,0x3fe5fb25aa6bf64b,0xbfc4e083ff96b116,1 +np.float64,0x61c776a8c38ef,0xc0734121611d84d7,1 +np.float64,0x3fec413164f88263,0xbfabadbd05131546,1 +np.float64,0x9bf06fe137e0e,0xc0733de315469ee0,1 +np.float64,0x2075eefc40ebf,0xc07348cae84de924,1 +np.float64,0x3fdd42e0143a85c0,0xbfd5c0b6f60b3cea,1 +np.float64,0xdbb1ab45b7636,0xc0733b8157329daf,1 +np.float64,0x3feac6d56bf58dab,0xbfb3d00771b28621,1 +np.float64,0x7fb2dc825025b904,0x407331f3e950751a,1 +np.float64,0x3fecea6efd79d4de,0xbfa689309cc0e3fe,1 +np.float64,0x3fd83abec7b0757e,0xbfdaff5c674a9c59,1 +np.float64,0x3fd396f7c0272df0,0xbfe073ee75c414ba,1 +np.float64,0x3fe10036c162006e,0xbfd1945a38342ae1,1 +np.float64,0x3fd5bbded52b77be,0xbfde04cca40d4156,1 +np.float64,0x3fe870945ab0e129,0xbfbdf72f0e6206fa,1 +np.float64,0x3fef72fddcbee5fc,0xbf7ee2dba88b1bad,1 +np.float64,0x4e111aa09c224,0xc07342b1e2b29643,1 +np.float64,0x3fd926d8b5b24db1,0xbfd9f58b78d6b061,1 +np.float64,0x3fc55679172aacf2,0xbfe8e5df687842e2,1 +np.float64,0x7f5f1749803e2e92,0x40731886e16cfc4d,1 +np.float64,0x7fea082b53b41056,0x407342a42227700e,1 +np.float64,0x3fece1d1d039c3a4,0xbfa6cb780988a469,1 +np.float64,0x3b2721d8764e5,0xc073449f6a5a4832,1 +np.float64,0x365cb7006cba,0xc0735879ba5f0b6e,1 +np.float64,0x7ff4000000000000,0x7ffc000000000000,1 +np.float64,0x7fe606ce92ac0d9c,0x4073417aeebe97e8,1 +np.float64,0x3fe237b544a46f6b,0xbfcf50f8f76d7df9,1 +np.float64,0x3fe7265e5eee4cbd,0xbfc1ff39089ec8d0,1 +np.float64,0x7fe2bb3c5ea57678,0x4073405aaad81cf2,1 +np.float64,0x3fd811df84b023bf,0xbfdb2e670ea8d8de,1 +np.float64,0x3f6a0efd00341dfa,0xc003fac1ae831241,1 +np.float64,0x3fd0d214afa1a429,0xbfe2922080a91c72,1 +np.float64,0x3feca6a350b94d47,0xbfa894eea3a96809,1 +np.float64,0x7fe23e5c76247cb8,0x4073402bbaaf71c7,1 +np.float64,0x3fe739a1fdae7344,0xbfc1d109f66efb5d,1 +np.float64,0x3fdf4b8e283e971c,0xbfd3e28f46169cc5,1 +np.float64,0x38f2535271e4b,0xc07344e3085219fa,1 +np.float64,0x7fd263a0f9a4c741,0x40733b68d945dae0,1 +np.float64,0x7fdd941863bb2830,0x40733eb651e3dca9,1 +np.float64,0xace7279159ce5,0xc0733d2b63b5947e,1 +np.float64,0x7fe34670b2268ce0,0x4073408d92770cb5,1 +np.float64,0x7fd11fa6dfa23f4d,0x40733aea02e76ea3,1 +np.float64,0x3fe6d9cbca6db398,0xbfc2b84b5c8c7eab,1 +np.float64,0x3fd69a0274ad3405,0xbfdcee3c7e52c463,1 +np.float64,0x3feb5af671f6b5ed,0xbfb16f88d739477f,1 +np.float64,0x3feea400163d4800,0xbf934e071c64fd0b,1 +np.float64,0x3fefd6bcf17fad7a,0xbf61f711c392b119,1 +np.float64,0x3fe148d43da291a8,0xbfd11e9cd3f91cd3,1 +np.float64,0x7fedf1308b7be260,0x4073439d135656da,1 +np.float64,0x3fe614c99c6c2993,0xbfc49fd1984dfd6d,1 +np.float64,0xd6e8d4e5add1b,0xc0733ba88256026e,1 +np.float64,0xfff0000000000000,0x7ff8000000000000,1 +np.float64,0x3fb530b5562a616b,0xbff1504bcc5c8f73,1 +np.float64,0xb7da68396fb4d,0xc0733cbe2790f52e,1 +np.float64,0x7fad78e26c3af1c4,0x4073303cdbfb0a15,1 +np.float64,0x7fee5698447cad30,0x407343b474573a8b,1 +np.float64,0x3fd488325c291065,0xbfdf999296d901e7,1 +np.float64,0x2669283a4cd26,0xc073479f823109a4,1 +np.float64,0x7fef3b090afe7611,0x407343e805a3b264,1 +np.float64,0x7fe8b96ae0f172d5,0x4073424874a342ab,1 +np.float64,0x7fef409f56fe813e,0x407343e943c3cd44,1 +np.float64,0x3fed28073dfa500e,0xbfa4b17e4cd31a3a,1 +np.float64,0x7f87ecc4802fd988,0x40732527e027b24b,1 +np.float64,0x3fdda24da0bb449b,0xbfd566a43ac035af,1 +np.float64,0x179fc9e62f3fa,0xc0734b0028c80fc1,1 +np.float64,0x3fef85b0927f0b61,0xbf7ac27565d5ab4f,1 +np.float64,0x5631501aac62b,0xc0734201be12c5d4,1 +np.float64,0x3fd782e424af05c8,0xbfdbd57544f8a7c3,1 +np.float64,0x3fe603a9a6ac0753,0xbfc4caff04dc3caf,1 +np.float64,0x7fbd5225163aa449,0x40733504b88f0a56,1 +np.float64,0x3fecd27506b9a4ea,0xbfa741dd70e6b08c,1 +np.float64,0x9c99603b3932c,0xc0733ddb922dc5db,1 +np.float64,0x3fbeb57f1a3d6afe,0xbfed789ff217aa08,1 +np.float64,0x3fef9c0f85bf381f,0xbf75d5c3d6cb281a,1 +np.float64,0x3fde4afb613c95f7,0xbfd4ca2a231c9005,1 +np.float64,0x396233d472c47,0xc07344d56ee70631,1 +np.float64,0x3fb31ea1c6263d44,0xbff207356152138d,1 +np.float64,0x3fe50bdf78aa17bf,0xbfc74ae0cbffb735,1 +np.float64,0xef74c701dee99,0xc0733ae81e4bb443,1 +np.float64,0x9a3e13a1347c3,0xc0733df68b60afc7,1 +np.float64,0x33ba4f886774b,0xc073458e03f0c13e,1 +np.float64,0x3fe8ba0e9931741d,0xbfbcaadf974e8f64,1 +np.float64,0x3fe090a4cd61214a,0xbfd24d236cf365d6,1 +np.float64,0x7fd87d992930fb31,0x40733d668b73b820,1 +np.float64,0x3fe6422b296c8456,0xbfc42e070b695d01,1 +np.float64,0x3febe9334677d267,0xbfae667864606cfe,1 +np.float64,0x771a3ce4ee348,0xc0733fc274d12c97,1 +np.float64,0x3fe0413542e0826b,0xbfd2d3b08fb5b8a6,1 +np.float64,0x3fd00870ea2010e2,0xbfe33cc04cbd42e0,1 +np.float64,0x3fe74fb817ae9f70,0xbfc19c45dbf919e1,1 +np.float64,0x40382fa08071,0xc07357514ced5577,1 +np.float64,0xa14968474292d,0xc0733da71a990f3a,1 +np.float64,0x5487c740a90fa,0xc0734224622d5801,1 +np.float64,0x3fed7d8d14fafb1a,0xbfa228f7ecc2ac03,1 +np.float64,0x3fe39bb485e73769,0xbfcb3a235a722960,1 +np.float64,0x3fd01090b2202121,0xbfe335b752589a22,1 +np.float64,0x3fd21a3e7da4347d,0xbfe18cd435a7c582,1 +np.float64,0x3fe7fa855a2ff50b,0xbfc00ab0665709fe,1 +np.float64,0x3fedc0d4577b81a9,0xbfa02fef3ff553fc,1 +np.float64,0x3fe99d4906333a92,0xbfb8bf18220e5e8e,1 +np.float64,0x3fd944ee3c3289dc,0xbfd9d46071675e73,1 +np.float64,0x3fe3ed8d52e7db1b,0xbfca53f8d4aef484,1 +np.float64,0x7fe748623a6e90c3,0x407341dd97c9dd79,1 +np.float64,0x3fea1b4b98343697,0xbfb6a1560a56927f,1 +np.float64,0xe1215715c242b,0xc0733b55dbf1f0a8,1 +np.float64,0x3fd0d5bccca1ab7a,0xbfe28f1b66d7a470,1 +np.float64,0x881a962710353,0xc0733ed51848a30d,1 +np.float64,0x3fcf022afe3e0456,0xbfe3b40eabf24501,1 +np.float64,0x3fdf1ac6bbbe358d,0xbfd40e03e888288d,1 +np.float64,0x3fa51a5eac2a34bd,0xbff628a7c34d51b3,1 +np.float64,0x3fdbaf408d375e81,0xbfd74ad39d97c92a,1 +np.float64,0x3fcd2418ea3a4832,0xbfe4910b009d8b11,1 +np.float64,0x3fc7b3062a2f660c,0xbfe7706dc47993e1,1 +np.float64,0x7fb8232218304643,0x407333aaa7041a9f,1 +np.float64,0x7fd5f186362be30b,0x40733ca32fdf9cc6,1 +np.float64,0x3fe57ef1d6aafde4,0xbfc61e23d00210c7,1 +np.float64,0x7c6830baf8d07,0xc0733f74f19e9dad,1 +np.float64,0xcacbfd5595980,0xc0733c0fb49edca7,1 +np.float64,0x3fdfdeac873fbd59,0xbfd36114c56bed03,1 +np.float64,0x3fd31f0889263e11,0xbfe0ca0cc1250169,1 +np.float64,0x3fe839fbe47073f8,0xbfbef0a2abc3d63f,1 +np.float64,0x3fc36af57e26d5eb,0xbfea3553f38770b7,1 +np.float64,0x3fe73dbc44ee7b79,0xbfc1c738f8fa6b3d,1 +np.float64,0x3fd3760e4da6ec1d,0xbfe08b5b609d11e5,1 +np.float64,0x3fee1cfa297c39f4,0xbf9b06d081bc9d5b,1 +np.float64,0xdfb01561bf61,0xc0734ea55e559888,1 +np.float64,0x687bd01cd0f7b,0xc07340ab67fe1816,1 +np.float64,0x3fefc88f4cbf911f,0xbf6828c359cf19dc,1 +np.float64,0x8ad34adb15a6a,0xc0733eb1e03811e5,1 +np.float64,0x3fe2b49c12e56938,0xbfcdd8dbdbc0ce59,1 +np.float64,0x6e05037adc0a1,0xc073404f91261635,1 +np.float64,0x3fe2fd737fe5fae7,0xbfcd020407ef4d78,1 +np.float64,0x3fd0f3c0dc21e782,0xbfe2766a1ab02eae,1 +np.float64,0x28564d9850acb,0xc073474875f87c5e,1 +np.float64,0x3fe4758015a8eb00,0xbfc8ddb45134a1bd,1 +np.float64,0x7fe7f19306efe325,0x4073420f626141a7,1 +np.float64,0x7fd27f34c0a4fe69,0x40733b733d2a5b50,1 +np.float64,0x92c2366325847,0xc0733e4f04f8195a,1 +np.float64,0x3fc21f8441243f09,0xbfeb2ad23bc1ab0b,1 +np.float64,0x3fc721d3e42e43a8,0xbfe7c69bb47b40c2,1 +np.float64,0x3fe2f11a1625e234,0xbfcd26363b9c36c3,1 +np.float64,0x3fdcb585acb96b0b,0xbfd648446237cb55,1 +np.float64,0x3fd4060bf2280c18,0xbfe025fd4c8a658b,1 +np.float64,0x7fb8ae2750315c4e,0x407333d23b025d08,1 +np.float64,0x3fe3a03119a74062,0xbfcb2d6c91b38552,1 +np.float64,0x7fdd2af92bba55f1,0x40733e9d737e16e6,1 +np.float64,0x3fe50b05862a160b,0xbfc74d20815fe36b,1 +np.float64,0x164409f82c882,0xc0734b6980e19c03,1 +np.float64,0x3fe4093712a8126e,0xbfca070367fda5e3,1 +np.float64,0xae3049935c609,0xc0733d1e3608797b,1 +np.float64,0x3fd71df4b4ae3be9,0xbfdc4dcb7637600d,1 +np.float64,0x7fca01e8023403cf,0x407339006c521c49,1 +np.float64,0x3fb0c5c43e218b88,0xbff2f03211c63f25,1 +np.float64,0x3fee757af83ceaf6,0xbf95f33a6e56b454,1 +np.float64,0x3f865f1f402cbe3f,0xbfff62d9c9072bd7,1 +np.float64,0x89864e95130ca,0xc0733ec29f1e32c6,1 +np.float64,0x3fe51482bcea2905,0xbfc73414ddc8f1b7,1 +np.float64,0x7fd802f8fa3005f1,0x40733d43684e460a,1 +np.float64,0x3fbeb86ca63d70d9,0xbfed774ccca9b8f5,1 +np.float64,0x3fb355dcc826abba,0xbff1f33f9339e7a3,1 +np.float64,0x3fe506c61eaa0d8c,0xbfc7585a3f7565a6,1 +np.float64,0x7fe393f25ba727e4,0x407340a94bcea73b,1 +np.float64,0xf66f532decdeb,0xc0733ab5041feb0f,1 +np.float64,0x3fe26e872be4dd0e,0xbfceaaab466f32e0,1 +np.float64,0x3fefd9e290bfb3c5,0xbf60977d24496295,1 +np.float64,0x7fe19c5f692338be,0x40733fecef53ad95,1 +np.float64,0x3fe80365ab3006cb,0xbfbfec4090ef76ec,1 +np.float64,0x3fe88ab39eb11567,0xbfbd8099388d054d,1 +np.float64,0x3fe68fb09fad1f61,0xbfc36db9de38c2c0,1 +np.float64,0x3fe9051883b20a31,0xbfbb5b75b8cb8f24,1 +np.float64,0x3fd4708683a8e10d,0xbfdfb9b085dd8a83,1 +np.float64,0x3fe00ac11a601582,0xbfd3316af3e43500,1 +np.float64,0xd16af30ba2d5f,0xc0733bd68e8252f9,1 +np.float64,0x3fb97d654632facb,0xbff007ac1257f575,1 +np.float64,0x7fd637c10fac6f81,0x40733cb949d76546,1 +np.float64,0x7fed2cab6dba5956,0x4073436edfc3764e,1 +np.float64,0x3fed04afbbba095f,0xbfa5bfaa5074b7f4,1 +np.float64,0x0,0xfff0000000000000,1 +np.float64,0x389a1dc671345,0xc07344edd4206338,1 +np.float64,0x3fbc9ba25a393745,0xbfee74c34f49b921,1 +np.float64,0x3feee749947dce93,0xbf8f032d9cf6b5ae,1 +np.float64,0xedc4cf89db89a,0xc0733af4b2a57920,1 +np.float64,0x3fe41629eba82c54,0xbfc9e321faf79e1c,1 +np.float64,0x3feb0bcbf7b61798,0xbfb2b31e5d952869,1 +np.float64,0xad60654b5ac0d,0xc0733d26860df676,1 +np.float64,0x3fe154e1ff22a9c4,0xbfd10b416e58c867,1 +np.float64,0x7fb20e9c8a241d38,0x407331a66453b8bc,1 +np.float64,0x7fcbbaaf7d37755e,0x4073397274f28008,1 +np.float64,0x187d0fbc30fa3,0xc0734ac03cc98cc9,1 +np.float64,0x7fd153afeaa2a75f,0x40733aff00b4311d,1 +np.float64,0x3fe05310a5e0a621,0xbfd2b5386aeecaac,1 +np.float64,0x7fea863b2b750c75,0x407342c57807f700,1 +np.float64,0x3fed5f0c633abe19,0xbfa30f6cfbc4bf94,1 +np.float64,0xf227c8b3e44f9,0xc0733ad42daaec9f,1 +np.float64,0x3fe956524772aca5,0xbfb9f4cabed7081d,1 +np.float64,0xefd11af7dfa24,0xc0733ae570ed2552,1 +np.float64,0x1690fff02d221,0xc0734b51a56c2980,1 +np.float64,0x7fd2e547a825ca8e,0x40733b992d6d9635,1 diff --git a/local_packages/numpy/_core/tests/data/umath-validation-set-log1p.csv b/local_packages/numpy/_core/tests/data/umath-validation-set-log1p.csv new file mode 100644 index 0000000..094e052 --- /dev/null +++ b/local_packages/numpy/_core/tests/data/umath-validation-set-log1p.csv @@ -0,0 +1,1429 @@ +dtype,input,output,ulperrortol +np.float32,0x3e10aca8,0x3e075347,2 +np.float32,0x3f776e66,0x3f2d2003,2 +np.float32,0xbf34e8ce,0xbf9cfd5c,2 +np.float32,0xbf0260ee,0xbf363f69,2 +np.float32,0x3ed285e8,0x3eb05870,2 +np.float32,0x262b88,0x262b88,2 +np.float32,0x3eeffd6c,0x3ec4cfdb,2 +np.float32,0x3ee86808,0x3ebf9f54,2 +np.float32,0x3f36eba8,0x3f0a0524,2 +np.float32,0xbf1c047a,0xbf70afc7,2 +np.float32,0x3ead2916,0x3e952902,2 +np.float32,0x61c9c9,0x61c9c9,2 +np.float32,0xff7fffff,0xffc00000,2 +np.float32,0x7f64ee52,0x42b138e0,2 +np.float32,0x7ed00b1e,0x42afa4ff,2 +np.float32,0x3db53340,0x3dada0b2,2 +np.float32,0x3e6b0a4a,0x3e5397a4,2 +np.float32,0x7ed5d64f,0x42afb310,2 +np.float32,0xbf12bc5f,0xbf59f5ee,2 +np.float32,0xbda12710,0xbda7d8b5,2 +np.float32,0xbe2e89d8,0xbe3f5a9f,2 +np.float32,0x3f5bee75,0x3f1ebea4,2 +np.float32,0x9317a,0x9317a,2 +np.float32,0x7ee00130,0x42afcad8,2 +np.float32,0x7ef0d16d,0x42afefe7,2 +np.float32,0xbec7463a,0xbefc6a44,2 +np.float32,0xbf760ecc,0xc04fe59c,2 +np.float32,0xbecacb3c,0xbf011ae3,2 +np.float32,0x3ead92be,0x3e9577f0,2 +np.float32,0xbf41510d,0xbfb41b3a,2 +np.float32,0x7f71d489,0x42b154f1,2 +np.float32,0x8023bcd5,0x8023bcd5,2 +np.float32,0x801d33d8,0x801d33d8,2 +np.float32,0x3f3f545d,0x3f0ee0d4,2 +np.float32,0xbf700682,0xc0318c25,2 +np.float32,0xbe54e990,0xbe6eb0a3,2 +np.float32,0x7f0289bf,0x42b01941,2 +np.float32,0xbd61ac90,0xbd682113,2 +np.float32,0xbf2ff310,0xbf94cd6f,2 +np.float32,0x7f10064a,0x42b04b98,2 +np.float32,0x804d0d6d,0x804d0d6d,2 +np.float32,0x80317b0a,0x80317b0a,2 +np.float32,0xbddfef18,0xbded2640,2 +np.float32,0x3f00c9ab,0x3ed0a5bd,2 +np.float32,0x7f04b905,0x42b021c1,2 +np.float32,0x7fc00000,0x7fc00000,2 +np.float32,0x6524c4,0x6524c4,2 +np.float32,0x3da08ae0,0x3d9a8f88,2 +np.float32,0x293ea9,0x293ea9,2 +np.float32,0x71499e,0x71499e,2 +np.float32,0xbf14f54d,0xbf5f38a5,2 +np.float32,0x806e60f5,0x806e60f5,2 +np.float32,0x3f5f34bb,0x3f207fff,2 +np.float32,0x80513427,0x80513427,2 +np.float32,0x7f379670,0x42b0c7dc,2 +np.float32,0x3efba888,0x3eccb20b,2 +np.float32,0x3eeadd1b,0x3ec14f4b,2 +np.float32,0x7ec5a27f,0x42af8ab8,2 +np.float32,0x3f2afe4e,0x3f02f7a2,2 +np.float32,0x5591c8,0x5591c8,2 +np.float32,0x3dbb7240,0x3db35bab,2 +np.float32,0x805b911b,0x805b911b,2 +np.float32,0x800000,0x800000,2 +np.float32,0x7e784c04,0x42ae9cab,2 +np.float32,0x7ebaae14,0x42af6d86,2 +np.float32,0xbec84f7a,0xbefe1d42,2 +np.float32,0x7cea8281,0x42aa56bf,2 +np.float32,0xbf542cf6,0xbfe1eb1b,2 +np.float32,0xbf6bfb13,0xc0231a5b,2 +np.float32,0x7d6eeaef,0x42abc32c,2 +np.float32,0xbf062f6b,0xbf3e2000,2 +np.float32,0x8073d8e9,0x8073d8e9,2 +np.float32,0xbea4db14,0xbec6f485,2 +np.float32,0x7d7e8d62,0x42abe3a0,2 +np.float32,0x7e8fc34e,0x42aee7c6,2 +np.float32,0x7dcbb0c3,0x42acd464,2 +np.float32,0x7e123c,0x7e123c,2 +np.float32,0x3d77af62,0x3d707c34,2 +np.float32,0x498cc8,0x498cc8,2 +np.float32,0x7f4e2206,0x42b1032a,2 +np.float32,0x3f734e0a,0x3f2b04a1,2 +np.float32,0x8053a9d0,0x8053a9d0,2 +np.float32,0xbe8a67e0,0xbea15be9,2 +np.float32,0xbf78e0ea,0xc065409e,2 +np.float32,0x352bdd,0x352bdd,2 +np.float32,0x3ee42be7,0x3ebcb38a,2 +np.float32,0x7f482d10,0x42b0f427,2 +np.float32,0xbf23155e,0xbf81b993,2 +np.float32,0x594920,0x594920,2 +np.float32,0x63f53f,0x63f53f,2 +np.float32,0x363592,0x363592,2 +np.float32,0x7dafbb78,0x42ac88cc,2 +np.float32,0x7f69516c,0x42b14298,2 +np.float32,0x3e1d5be2,0x3e126131,2 +np.float32,0x410c23,0x410c23,2 +np.float32,0x7ec9563c,0x42af9439,2 +np.float32,0xbedd3a0e,0xbf10d705,2 +np.float32,0x7f7c4f1f,0x42b16aa8,2 +np.float32,0xbe99b34e,0xbeb6c2d3,2 +np.float32,0x6cdc84,0x6cdc84,2 +np.float32,0x5b3bbe,0x5b3bbe,2 +np.float32,0x252178,0x252178,2 +np.float32,0x7d531865,0x42ab83c8,2 +np.float32,0xbf565b44,0xbfe873bf,2 +np.float32,0x5977ce,0x5977ce,2 +np.float32,0x588a58,0x588a58,2 +np.float32,0x3eae7054,0x3e961d51,2 +np.float32,0x725049,0x725049,2 +np.float32,0x7f2b9386,0x42b0a538,2 +np.float32,0xbe674714,0xbe831245,2 +np.float32,0x8044f0d8,0x8044f0d8,2 +np.float32,0x800a3c21,0x800a3c21,2 +np.float32,0x807b275b,0x807b275b,2 +np.float32,0xbf2463b6,0xbf83896e,2 +np.float32,0x801cca42,0x801cca42,2 +np.float32,0xbf28f2d0,0xbf8a121a,2 +np.float32,0x3f4168c2,0x3f1010ce,2 +np.float32,0x6f91a1,0x6f91a1,2 +np.float32,0xbf2b9eeb,0xbf8e0fc5,2 +np.float32,0xbea4c858,0xbec6d8e4,2 +np.float32,0xbf7abba0,0xc0788e88,2 +np.float32,0x802f18f7,0x802f18f7,2 +np.float32,0xbf7f6c75,0xc0c3145c,2 +np.float32,0xbe988210,0xbeb50f5e,2 +np.float32,0xbf219b7e,0xbf7f6a3b,2 +np.float32,0x7f800000,0x7f800000,2 +np.float32,0x7f7fffff,0x42b17218,2 +np.float32,0xbdca8d90,0xbdd5487e,2 +np.float32,0xbef683b0,0xbf2821b0,2 +np.float32,0x8043e648,0x8043e648,2 +np.float32,0xbf4319a4,0xbfb7cd1b,2 +np.float32,0x62c2b2,0x62c2b2,2 +np.float32,0xbf479ccd,0xbfc1a7b1,2 +np.float32,0x806c8a32,0x806c8a32,2 +np.float32,0x7f004447,0x42b01045,2 +np.float32,0x3f737d36,0x3f2b1ccf,2 +np.float32,0x3ee71f24,0x3ebebced,2 +np.float32,0x3ea0b6b4,0x3e8bc606,2 +np.float32,0x358fd7,0x358fd7,2 +np.float32,0xbe69780c,0xbe847d17,2 +np.float32,0x7f6bed18,0x42b14849,2 +np.float32,0xbf6a5113,0xc01dfe1d,2 +np.float32,0xbf255693,0xbf84de88,2 +np.float32,0x7f34acac,0x42b0bfac,2 +np.float32,0xbe8a3b6a,0xbea11efe,2 +np.float32,0x3f470d84,0x3f1342ab,2 +np.float32,0xbf2cbde3,0xbf8fc602,2 +np.float32,0x47c103,0x47c103,2 +np.float32,0xe3c94,0xe3c94,2 +np.float32,0xbec07afa,0xbef1693a,2 +np.float32,0x6a9cfe,0x6a9cfe,2 +np.float32,0xbe4339e0,0xbe5899da,2 +np.float32,0x7ea9bf1e,0x42af3cd6,2 +np.float32,0x3f6378b4,0x3f22c4c4,2 +np.float32,0xbd989ff0,0xbd9e9c77,2 +np.float32,0xbe6f2f50,0xbe88343d,2 +np.float32,0x3f7f2ac5,0x3f310764,2 +np.float32,0x3f256704,0x3eff2fb2,2 +np.float32,0x80786aca,0x80786aca,2 +np.float32,0x65d02f,0x65d02f,2 +np.float32,0x50d1c3,0x50d1c3,2 +np.float32,0x3f4a9d76,0x3f1541b4,2 +np.float32,0x802cf491,0x802cf491,2 +np.float32,0x3e935cec,0x3e81829b,2 +np.float32,0x3e2ad478,0x3e1dfd81,2 +np.float32,0xbf107cbd,0xbf54bef2,2 +np.float32,0xbf58c02e,0xbff007fe,2 +np.float32,0x80090808,0x80090808,2 +np.float32,0x805d1f66,0x805d1f66,2 +np.float32,0x6aec95,0x6aec95,2 +np.float32,0xbee3fc6e,0xbf16dc73,2 +np.float32,0x7f63314b,0x42b134f9,2 +np.float32,0x550443,0x550443,2 +np.float32,0xbefa8174,0xbf2c026e,2 +np.float32,0x3f7fb380,0x3f314bd5,2 +np.float32,0x80171f2c,0x80171f2c,2 +np.float32,0x3f2f56ae,0x3f058f2d,2 +np.float32,0x3eacaecb,0x3e94cd97,2 +np.float32,0xbe0c4f0c,0xbe16e69d,2 +np.float32,0x3f48e4cb,0x3f144b42,2 +np.float32,0x7f03efe2,0x42b01eb7,2 +np.float32,0xbf1019ac,0xbf53dbe9,2 +np.float32,0x3e958524,0x3e832eb5,2 +np.float32,0xbf1b23c6,0xbf6e72f2,2 +np.float32,0x12c554,0x12c554,2 +np.float32,0x7dee588c,0x42ad24d6,2 +np.float32,0xbe8c216c,0xbea3ba70,2 +np.float32,0x804553cb,0x804553cb,2 +np.float32,0xbe446324,0xbe5a0966,2 +np.float32,0xbef7150a,0xbf28adff,2 +np.float32,0xbf087282,0xbf42ec6e,2 +np.float32,0x3eeef15c,0x3ec41937,2 +np.float32,0x61bbd2,0x61bbd2,2 +np.float32,0x3e51b28d,0x3e3ec538,2 +np.float32,0x57e869,0x57e869,2 +np.float32,0x7e5e7711,0x42ae646c,2 +np.float32,0x8050b173,0x8050b173,2 +np.float32,0xbf63c90c,0xc00d2438,2 +np.float32,0xbeba774c,0xbee7dcf8,2 +np.float32,0x8016faac,0x8016faac,2 +np.float32,0xbe8b448c,0xbea28aaf,2 +np.float32,0x3e8cd448,0x3e78d29e,2 +np.float32,0x80484e02,0x80484e02,2 +np.float32,0x3f63ba68,0x3f22e78c,2 +np.float32,0x2e87bb,0x2e87bb,2 +np.float32,0x230496,0x230496,2 +np.float32,0x1327b2,0x1327b2,2 +np.float32,0xbf046c56,0xbf3a72d2,2 +np.float32,0x3ecefe60,0x3eadd69a,2 +np.float32,0x49c56e,0x49c56e,2 +np.float32,0x3df22d60,0x3de4e550,2 +np.float32,0x3f67c19d,0x3f250707,2 +np.float32,0x3f20eb9c,0x3ef9b624,2 +np.float32,0x3f05ca75,0x3ed742fa,2 +np.float32,0xbe8514f8,0xbe9a1d45,2 +np.float32,0x8070a003,0x8070a003,2 +np.float32,0x7e49650e,0x42ae317a,2 +np.float32,0x3de16ce9,0x3dd5dc3e,2 +np.float32,0xbf4ae952,0xbfc95f1f,2 +np.float32,0xbe44dd84,0xbe5aa0db,2 +np.float32,0x803c3bc0,0x803c3bc0,2 +np.float32,0x3eebb9e8,0x3ec1e692,2 +np.float32,0x80588275,0x80588275,2 +np.float32,0xbea1e69a,0xbec29d86,2 +np.float32,0x3f7b4bf8,0x3f2f154c,2 +np.float32,0x7eb47ecc,0x42af5c46,2 +np.float32,0x3d441e00,0x3d3f911a,2 +np.float32,0x7f54d40e,0x42b11388,2 +np.float32,0xbf47f17e,0xbfc26882,2 +np.float32,0x3ea7da57,0x3e912db4,2 +np.float32,0x3f59cc7b,0x3f1d984e,2 +np.float32,0x570e08,0x570e08,2 +np.float32,0x3e99560c,0x3e8620a2,2 +np.float32,0x3ecfbd14,0x3eae5e55,2 +np.float32,0x7e86be08,0x42aec698,2 +np.float32,0x3f10f28a,0x3ee5b5d3,2 +np.float32,0x7f228722,0x42b0897a,2 +np.float32,0x3f4b979b,0x3f15cd30,2 +np.float32,0xbf134283,0xbf5b30f9,2 +np.float32,0x3f2ae16a,0x3f02e64f,2 +np.float32,0x3e98e158,0x3e85c6cc,2 +np.float32,0x7ec39f27,0x42af857a,2 +np.float32,0x3effedb0,0x3ecf8cea,2 +np.float32,0xbd545620,0xbd5a09c1,2 +np.float32,0x503a28,0x503a28,2 +np.float32,0x3f712744,0x3f29e9a1,2 +np.float32,0x3edc6194,0x3eb748b1,2 +np.float32,0xbf4ec1e5,0xbfd2ff5f,2 +np.float32,0x3f46669e,0x3f12e4b5,2 +np.float32,0xabad3,0xabad3,2 +np.float32,0x80000000,0x80000000,2 +np.float32,0x803f2e6d,0x803f2e6d,2 +np.float32,0xbf431542,0xbfb7c3e6,2 +np.float32,0x3f6f2d53,0x3f28e496,2 +np.float32,0x546bd8,0x546bd8,2 +np.float32,0x25c80a,0x25c80a,2 +np.float32,0x3e50883c,0x3e3dcd7e,2 +np.float32,0xbf5fa2ba,0xc0045c14,2 +np.float32,0x80271c07,0x80271c07,2 +np.float32,0x8043755d,0x8043755d,2 +np.float32,0xbf3c5cea,0xbfaa5ee9,2 +np.float32,0x3f2fea38,0x3f05e6af,2 +np.float32,0x6da3dc,0x6da3dc,2 +np.float32,0xbf095945,0xbf44dc70,2 +np.float32,0xbe33d584,0xbe45c1f5,2 +np.float32,0x7eb41b2e,0x42af5b2b,2 +np.float32,0xbf0feb74,0xbf537242,2 +np.float32,0xbe96225a,0xbeb1b0b1,2 +np.float32,0x3f63b95f,0x3f22e700,2 +np.float32,0x0,0x0,2 +np.float32,0x3e20b0cc,0x3e154374,2 +np.float32,0xbf79880c,0xc06b6801,2 +np.float32,0xbea690b6,0xbec97b93,2 +np.float32,0xbf3e11ca,0xbfada449,2 +np.float32,0x7e7e6292,0x42aea912,2 +np.float32,0x3e793350,0x3e5f0b7b,2 +np.float32,0x802e7183,0x802e7183,2 +np.float32,0x3f1b3695,0x3ef2a788,2 +np.float32,0x801efa20,0x801efa20,2 +np.float32,0x3f1ec43a,0x3ef70f42,2 +np.float32,0xbf12c5ed,0xbf5a0c52,2 +np.float32,0x8005e99c,0x8005e99c,2 +np.float32,0xbf79f5e7,0xc06fcca5,2 +np.float32,0x3ecbaf50,0x3eab7a03,2 +np.float32,0x46b0fd,0x46b0fd,2 +np.float32,0x3edb9023,0x3eb6b631,2 +np.float32,0x7f24bc41,0x42b09063,2 +np.float32,0xbd8d9328,0xbd92b4c6,2 +np.float32,0x3f2c5d7f,0x3f03c9d9,2 +np.float32,0x807bebc9,0x807bebc9,2 +np.float32,0x7f797a99,0x42b164e2,2 +np.float32,0x756e3c,0x756e3c,2 +np.float32,0x80416f8a,0x80416f8a,2 +np.float32,0x3e0d512a,0x3e04611a,2 +np.float32,0x3f7be3e6,0x3f2f61ec,2 +np.float32,0x80075c41,0x80075c41,2 +np.float32,0xbe850294,0xbe9a046c,2 +np.float32,0x684679,0x684679,2 +np.float32,0x3eb393c4,0x3e99eed2,2 +np.float32,0x3f4177c6,0x3f10195b,2 +np.float32,0x3dd1f402,0x3dc7dfe5,2 +np.float32,0x3ef484d4,0x3ec7e2e1,2 +np.float32,0x53eb8f,0x53eb8f,2 +np.float32,0x7f072cb6,0x42b02b20,2 +np.float32,0xbf1b6b55,0xbf6f28d4,2 +np.float32,0xbd8a98d8,0xbd8f827d,2 +np.float32,0x3eafb418,0x3e970e96,2 +np.float32,0x6555af,0x6555af,2 +np.float32,0x7dd5118e,0x42aceb6f,2 +np.float32,0x800a13f7,0x800a13f7,2 +np.float32,0x331a9d,0x331a9d,2 +np.float32,0x8063773f,0x8063773f,2 +np.float32,0x3e95e068,0x3e837553,2 +np.float32,0x80654b32,0x80654b32,2 +np.float32,0x3dabe0e0,0x3da50bb3,2 +np.float32,0xbf6283c3,0xc00a5280,2 +np.float32,0x80751cc5,0x80751cc5,2 +np.float32,0x3f668eb6,0x3f2465c0,2 +np.float32,0x3e13c058,0x3e0a048c,2 +np.float32,0x77780c,0x77780c,2 +np.float32,0x3f7d6e48,0x3f302868,2 +np.float32,0x7e31f9e3,0x42adf22f,2 +np.float32,0x246c7b,0x246c7b,2 +np.float32,0xbe915bf0,0xbeaafa6c,2 +np.float32,0xbf800000,0xff800000,2 +np.float32,0x3f698f42,0x3f25f8e0,2 +np.float32,0x7e698885,0x42ae7d48,2 +np.float32,0x3f5bbd42,0x3f1ea42c,2 +np.float32,0x5b8444,0x5b8444,2 +np.float32,0xbf6065f6,0xc005e2c6,2 +np.float32,0xbeb95036,0xbee60dad,2 +np.float32,0xbf44f846,0xbfbbcade,2 +np.float32,0xc96e5,0xc96e5,2 +np.float32,0xbf213e90,0xbf7e6eae,2 +np.float32,0xbeb309cc,0xbedc4fe6,2 +np.float32,0xbe781cf4,0xbe8e0fe6,2 +np.float32,0x7f0cf0db,0x42b04083,2 +np.float32,0xbf7b6143,0xc08078f9,2 +np.float32,0x80526fc6,0x80526fc6,2 +np.float32,0x3f092bf3,0x3edbaeec,2 +np.float32,0x3ecdf154,0x3ead16df,2 +np.float32,0x2fe85b,0x2fe85b,2 +np.float32,0xbf5100a0,0xbfd8f871,2 +np.float32,0xbec09d40,0xbef1a028,2 +np.float32,0x5e6a85,0x5e6a85,2 +np.float32,0xbec0e2a0,0xbef20f6b,2 +np.float32,0x3f72e788,0x3f2ad00d,2 +np.float32,0x880a6,0x880a6,2 +np.float32,0x3d9e90bf,0x3d98b9fc,2 +np.float32,0x15cf25,0x15cf25,2 +np.float32,0x10171b,0x10171b,2 +np.float32,0x805cf1aa,0x805cf1aa,2 +np.float32,0x3f19bd36,0x3ef0d0d2,2 +np.float32,0x3ebe2bda,0x3ea1b774,2 +np.float32,0xbecd8192,0xbf035c49,2 +np.float32,0x3e2ce508,0x3e1fc21b,2 +np.float32,0x290f,0x290f,2 +np.float32,0x803b679f,0x803b679f,2 +np.float32,0x1,0x1,2 +np.float32,0x807a9c76,0x807a9c76,2 +np.float32,0xbf65fced,0xc01257f8,2 +np.float32,0x3f783414,0x3f2d8475,2 +np.float32,0x3f2d9d92,0x3f0488da,2 +np.float32,0xbddb5798,0xbde80018,2 +np.float32,0x3e91afb8,0x3e8034e7,2 +np.float32,0xbf1b775a,0xbf6f476d,2 +np.float32,0xbf73a32c,0xc041f3ba,2 +np.float32,0xbea39364,0xbec5121b,2 +np.float32,0x80375b94,0x80375b94,2 +np.float32,0x3f331252,0x3f07c3e9,2 +np.float32,0xbf285774,0xbf892e74,2 +np.float32,0x3e699bb8,0x3e526d55,2 +np.float32,0x3f08208a,0x3eda523a,2 +np.float32,0xbf42fb4a,0xbfb78d60,2 +np.float32,0x8029c894,0x8029c894,2 +np.float32,0x3e926c0c,0x3e80c76e,2 +np.float32,0x801e4715,0x801e4715,2 +np.float32,0x3e4b36d8,0x3e395ffd,2 +np.float32,0x8041556b,0x8041556b,2 +np.float32,0xbf2d99ba,0xbf9119bd,2 +np.float32,0x3ed83ea8,0x3eb46250,2 +np.float32,0xbe94a280,0xbeaf92b4,2 +np.float32,0x7f4c7a64,0x42b0ff0a,2 +np.float32,0x806d4022,0x806d4022,2 +np.float32,0xbed382f8,0xbf086d26,2 +np.float32,0x1846fe,0x1846fe,2 +np.float32,0xbe702558,0xbe88d4d8,2 +np.float32,0xbe650ee0,0xbe81a3cc,2 +np.float32,0x3ee9d088,0x3ec0970c,2 +np.float32,0x7f6d4498,0x42b14b30,2 +np.float32,0xbef9f9e6,0xbf2b7ddb,2 +np.float32,0xbf70c384,0xc0349370,2 +np.float32,0xbeff9e9e,0xbf3110c8,2 +np.float32,0xbef06372,0xbf224aa9,2 +np.float32,0xbf15a692,0xbf60e1fa,2 +np.float32,0x8058c117,0x8058c117,2 +np.float32,0xbd9f74b8,0xbda6017b,2 +np.float32,0x801bf130,0x801bf130,2 +np.float32,0x805da84c,0x805da84c,2 +np.float32,0xff800000,0xffc00000,2 +np.float32,0xbeb01de2,0xbed7d6d6,2 +np.float32,0x8077de08,0x8077de08,2 +np.float32,0x3e327668,0x3e2482c1,2 +np.float32,0xbe7add88,0xbe8fe1ab,2 +np.float32,0x805a3c2e,0x805a3c2e,2 +np.float32,0x80326a73,0x80326a73,2 +np.float32,0x800b8a34,0x800b8a34,2 +np.float32,0x8048c83a,0x8048c83a,2 +np.float32,0xbf3799d6,0xbfa1a975,2 +np.float32,0x807649c7,0x807649c7,2 +np.float32,0x3dfdbf90,0x3def3798,2 +np.float32,0xbf1b538a,0xbf6eec4c,2 +np.float32,0xbf1e5989,0xbf76baa0,2 +np.float32,0xc7a80,0xc7a80,2 +np.float32,0x8001be54,0x8001be54,2 +np.float32,0x3f435bbc,0x3f112c6d,2 +np.float32,0xbeabcff8,0xbed151d1,2 +np.float32,0x7de20c78,0x42ad09b7,2 +np.float32,0x3f0e6d2e,0x3ee27b1e,2 +np.float32,0xbf0cb352,0xbf4c3267,2 +np.float32,0x7f6ec06f,0x42b14e61,2 +np.float32,0x7f6fa8ef,0x42b15053,2 +np.float32,0xbf3d2a6a,0xbfabe623,2 +np.float32,0x7f077a4c,0x42b02c46,2 +np.float32,0xbf2a68dc,0xbf8c3cc4,2 +np.float32,0x802a5dbe,0x802a5dbe,2 +np.float32,0x807f631c,0x807f631c,2 +np.float32,0x3dc9b8,0x3dc9b8,2 +np.float32,0x3ebdc1b7,0x3ea16a0a,2 +np.float32,0x7ef29dab,0x42aff3b5,2 +np.float32,0x3e8ab1cc,0x3e757806,2 +np.float32,0x3f27e88e,0x3f011c6d,2 +np.float32,0x3cfd1455,0x3cf93fb5,2 +np.float32,0x7f7eebf5,0x42b16fef,2 +np.float32,0x3c9b2140,0x3c99ade9,2 +np.float32,0x7e928601,0x42aef183,2 +np.float32,0xbd7d2db0,0xbd82abae,2 +np.float32,0x3e6f0df3,0x3e56da20,2 +np.float32,0x7d36a2fc,0x42ab39a3,2 +np.float32,0xbf49d3a2,0xbfc6c859,2 +np.float32,0x7ee541d3,0x42afd6b6,2 +np.float32,0x80753dc0,0x80753dc0,2 +np.float32,0x3f4ce486,0x3f16865d,2 +np.float32,0x39e701,0x39e701,2 +np.float32,0x3f3d9ede,0x3f0de5fa,2 +np.float32,0x7fafb2,0x7fafb2,2 +np.float32,0x3e013fdc,0x3df37090,2 +np.float32,0x807b6a2c,0x807b6a2c,2 +np.float32,0xbe86800a,0xbe9c08c7,2 +np.float32,0x7f40f080,0x42b0e14d,2 +np.float32,0x7eef5afe,0x42afecc8,2 +np.float32,0x7ec30052,0x42af83da,2 +np.float32,0x3eacf768,0x3e9503e1,2 +np.float32,0x7f13ef0e,0x42b0594e,2 +np.float32,0x80419f4a,0x80419f4a,2 +np.float32,0xbf485932,0xbfc3562a,2 +np.float32,0xbe8a24d6,0xbea10011,2 +np.float32,0xbda791c0,0xbdaed2bc,2 +np.float32,0x3e9b5169,0x3e87a67d,2 +np.float32,0x807dd882,0x807dd882,2 +np.float32,0x7f40170e,0x42b0df0a,2 +np.float32,0x7f02f7f9,0x42b01af1,2 +np.float32,0x3ea38bf9,0x3e8decde,2 +np.float32,0x3e2e7ce8,0x3e211ed4,2 +np.float32,0x70a7a6,0x70a7a6,2 +np.float32,0x7d978592,0x42ac3ce7,2 +np.float32,0x804d12d0,0x804d12d0,2 +np.float32,0x80165dc8,0x80165dc8,2 +np.float32,0x80000001,0x80000001,2 +np.float32,0x3e325da0,0x3e246da6,2 +np.float32,0xbe063bb8,0xbe0fe281,2 +np.float32,0x160b8,0x160b8,2 +np.float32,0xbe5687a4,0xbe70bbef,2 +np.float32,0x7f11ab34,0x42b05168,2 +np.float32,0xc955c,0xc955c,2 +np.float32,0xbea0003a,0xbebfd826,2 +np.float32,0x3f7fbdd9,0x3f315102,2 +np.float32,0xbe61aefc,0xbe7ef121,2 +np.float32,0xbf1b9873,0xbf6f9bc3,2 +np.float32,0x3a6d14,0x3a6d14,2 +np.float32,0xbf1ad3b4,0xbf6da808,2 +np.float32,0x3ed2dd24,0x3eb0963d,2 +np.float32,0xbe81a4ca,0xbe957d52,2 +np.float32,0x7f1be3e9,0x42b07421,2 +np.float32,0x7f5ce943,0x42b1269e,2 +np.float32,0x7eebcbdf,0x42afe51d,2 +np.float32,0x807181b5,0x807181b5,2 +np.float32,0xbecb03ba,0xbf0149ad,2 +np.float32,0x42edb8,0x42edb8,2 +np.float32,0xbf3aeec8,0xbfa7b13f,2 +np.float32,0xbd0c4f00,0xbd0ec4a0,2 +np.float32,0x3e48d260,0x3e376070,2 +np.float32,0x1a9731,0x1a9731,2 +np.float32,0x7f323be4,0x42b0b8b5,2 +np.float32,0x1a327f,0x1a327f,2 +np.float32,0x17f1fc,0x17f1fc,2 +np.float32,0xbf2f4f9b,0xbf93c91a,2 +np.float32,0x3ede8934,0x3eb8c9c3,2 +np.float32,0xbf56aaac,0xbfe968bb,2 +np.float32,0x3e22cb5a,0x3e17148c,2 +np.float32,0x7d9def,0x7d9def,2 +np.float32,0x8045b963,0x8045b963,2 +np.float32,0x77404f,0x77404f,2 +np.float32,0x7e2c9efb,0x42ade28b,2 +np.float32,0x8058ad89,0x8058ad89,2 +np.float32,0x7f4139,0x7f4139,2 +np.float32,0x8020e12a,0x8020e12a,2 +np.float32,0x800c9daa,0x800c9daa,2 +np.float32,0x7f2c5ac5,0x42b0a789,2 +np.float32,0x3f04a47b,0x3ed5c043,2 +np.float32,0x804692d5,0x804692d5,2 +np.float32,0xbf6e7fa4,0xc02bb493,2 +np.float32,0x80330756,0x80330756,2 +np.float32,0x7f3e29ad,0x42b0d9e1,2 +np.float32,0xbebf689a,0xbeefb24d,2 +np.float32,0x3f29a86c,0x3f022a56,2 +np.float32,0x3e3bd1c0,0x3e2c72b3,2 +np.float32,0x3f78f2e8,0x3f2de546,2 +np.float32,0x3f3709be,0x3f0a16af,2 +np.float32,0x3e11f150,0x3e086f97,2 +np.float32,0xbf5867ad,0xbfeee8a0,2 +np.float32,0xbebfb328,0xbef0296c,2 +np.float32,0x2f7f15,0x2f7f15,2 +np.float32,0x805cfe84,0x805cfe84,2 +np.float32,0xbf504e01,0xbfd71589,2 +np.float32,0x3ee0903c,0x3eba330c,2 +np.float32,0xbd838990,0xbd87f399,2 +np.float32,0x3f14444e,0x3ee9ee7d,2 +np.float32,0x7e352583,0x42adfb3a,2 +np.float32,0x7e76f824,0x42ae99ec,2 +np.float32,0x3f772d00,0x3f2cfebf,2 +np.float32,0x801f7763,0x801f7763,2 +np.float32,0x3f760bf5,0x3f2c6b87,2 +np.float32,0xbf0bb696,0xbf4a03a5,2 +np.float32,0x3f175d2c,0x3eedd6d2,2 +np.float32,0xbf5723f8,0xbfeae288,2 +np.float32,0x24de0a,0x24de0a,2 +np.float32,0x3cd73f80,0x3cd47801,2 +np.float32,0x7f013305,0x42b013fa,2 +np.float32,0x3e3ad425,0x3e2b9c50,2 +np.float32,0x7d3d16,0x7d3d16,2 +np.float32,0x3ef49738,0x3ec7ef54,2 +np.float32,0x3f5b8612,0x3f1e8678,2 +np.float32,0x7f0eeb5c,0x42b047a7,2 +np.float32,0x7e9d7cb0,0x42af1675,2 +np.float32,0xbdd1cfb0,0xbddd5aa0,2 +np.float32,0xbf645dba,0xc00e78fe,2 +np.float32,0x3f511174,0x3f18d56c,2 +np.float32,0x3d91ad00,0x3d8cba62,2 +np.float32,0x805298da,0x805298da,2 +np.float32,0xbedb6af4,0xbf0f4090,2 +np.float32,0x3d23b1ba,0x3d208205,2 +np.float32,0xbea5783e,0xbec7dc87,2 +np.float32,0x79d191,0x79d191,2 +np.float32,0x3e894413,0x3e7337da,2 +np.float32,0x80800000,0x80800000,2 +np.float32,0xbf34a8d3,0xbf9c907b,2 +np.float32,0x3bae779a,0x3bae011f,2 +np.float32,0x8049284d,0x8049284d,2 +np.float32,0x3eb42cc4,0x3e9a600b,2 +np.float32,0x3da1e2d0,0x3d9bce5f,2 +np.float32,0x3f364b8a,0x3f09a7af,2 +np.float32,0x3d930b10,0x3d8e0118,2 +np.float32,0x8061f8d7,0x8061f8d7,2 +np.float32,0x3f473213,0x3f13573b,2 +np.float32,0x3f1e2a38,0x3ef65102,2 +np.float32,0x8068f7d9,0x8068f7d9,2 +np.float32,0x3f181ef8,0x3eeeca2c,2 +np.float32,0x3eeb6168,0x3ec1a9f5,2 +np.float32,0xc2db6,0xc2db6,2 +np.float32,0x3ef7b578,0x3eca0a69,2 +np.float32,0xbf5b5a84,0xbff8d075,2 +np.float32,0x7f479d5f,0x42b0f2b7,2 +np.float32,0x3e6f3c24,0x3e56ff92,2 +np.float32,0x3f45543a,0x3f1249f0,2 +np.float32,0xbea7c1fa,0xbecb40d2,2 +np.float32,0x7de082,0x7de082,2 +np.float32,0x383729,0x383729,2 +np.float32,0xbd91cb90,0xbd973eb3,2 +np.float32,0x7f320218,0x42b0b80f,2 +np.float32,0x5547f2,0x5547f2,2 +np.float32,0x291fe4,0x291fe4,2 +np.float32,0xbe078ba0,0xbe11655f,2 +np.float32,0x7e0c0658,0x42ad7764,2 +np.float32,0x7e129a2b,0x42ad8ee5,2 +np.float32,0x3f7c96d4,0x3f2fbc0c,2 +np.float32,0x3f800000,0x3f317218,2 +np.float32,0x7f131754,0x42b05662,2 +np.float32,0x15f833,0x15f833,2 +np.float32,0x80392ced,0x80392ced,2 +np.float32,0x3f7c141a,0x3f2f7a36,2 +np.float32,0xbf71c03f,0xc038dcfd,2 +np.float32,0xbe14fb2c,0xbe20fff3,2 +np.float32,0xbee0bac6,0xbf13f14c,2 +np.float32,0x801a32dd,0x801a32dd,2 +np.float32,0x8e12d,0x8e12d,2 +np.float32,0x3f48c606,0x3f143a04,2 +np.float32,0x7f418af5,0x42b0e2e6,2 +np.float32,0x3f1f2918,0x3ef78bb7,2 +np.float32,0x11141b,0x11141b,2 +np.float32,0x3e9fc9e8,0x3e8b11ad,2 +np.float32,0xbea5447a,0xbec79010,2 +np.float32,0xbe31d904,0xbe4359db,2 +np.float32,0x80184667,0x80184667,2 +np.float32,0xbf00503c,0xbf3212c2,2 +np.float32,0x3e0328cf,0x3df6d425,2 +np.float32,0x7ee8e1b7,0x42afdebe,2 +np.float32,0xbef95e24,0xbf2ae5db,2 +np.float32,0x7f3e4eed,0x42b0da45,2 +np.float32,0x3f43ee85,0x3f117fa0,2 +np.float32,0xbcfa2ac0,0xbcfe10fe,2 +np.float32,0x80162774,0x80162774,2 +np.float32,0x372e8b,0x372e8b,2 +np.float32,0x3f263802,0x3f0016b0,2 +np.float32,0x8008725f,0x8008725f,2 +np.float32,0x800beb40,0x800beb40,2 +np.float32,0xbe93308e,0xbead8a77,2 +np.float32,0x3d8a4240,0x3d85cab8,2 +np.float32,0x80179de0,0x80179de0,2 +np.float32,0x7f4a98f2,0x42b0fa4f,2 +np.float32,0x3f0d214e,0x3ee0cff1,2 +np.float32,0x80536c2c,0x80536c2c,2 +np.float32,0x7e7038ed,0x42ae8bbe,2 +np.float32,0x7f345af9,0x42b0bec4,2 +np.float32,0xbf243219,0xbf83442f,2 +np.float32,0x7e0d5555,0x42ad7c27,2 +np.float32,0x762e95,0x762e95,2 +np.float32,0x7ebf4548,0x42af79f6,2 +np.float32,0x8079639e,0x8079639e,2 +np.float32,0x3ef925c0,0x3ecb0260,2 +np.float32,0x3f708695,0x3f2996d6,2 +np.float32,0xfca9f,0xfca9f,2 +np.float32,0x8060dbf4,0x8060dbf4,2 +np.float32,0x4c8840,0x4c8840,2 +np.float32,0xbea922ee,0xbecd4ed5,2 +np.float32,0xbf4f28a9,0xbfd40b98,2 +np.float32,0xbe25ad48,0xbe34ba1b,2 +np.float32,0x3f2fb254,0x3f05c58c,2 +np.float32,0x3f73bcc2,0x3f2b3d5f,2 +np.float32,0xbf479a07,0xbfc1a165,2 +np.float32,0xbeb9a808,0xbee69763,2 +np.float32,0x7eb16a65,0x42af5376,2 +np.float32,0xbeb3e442,0xbedda042,2 +np.float32,0x3d8f439c,0x3d8a79ac,2 +np.float32,0x80347516,0x80347516,2 +np.float32,0x3e8a0c5d,0x3e74738c,2 +np.float32,0xbf0383a4,0xbf389289,2 +np.float32,0x806be8f5,0x806be8f5,2 +np.float32,0x8023f0c5,0x8023f0c5,2 +np.float32,0x2060e9,0x2060e9,2 +np.float32,0xbf759eba,0xc04d239f,2 +np.float32,0x3d84cc5a,0x3d80ab96,2 +np.float32,0xbf57746b,0xbfebdf87,2 +np.float32,0x3e418417,0x3e31401f,2 +np.float32,0xaecce,0xaecce,2 +np.float32,0x3cd1766f,0x3cced45c,2 +np.float32,0x53724a,0x53724a,2 +np.float32,0x3f773710,0x3f2d03de,2 +np.float32,0x8013d040,0x8013d040,2 +np.float32,0x4d0eb2,0x4d0eb2,2 +np.float32,0x8014364a,0x8014364a,2 +np.float32,0x7f3c56c9,0x42b0d4f2,2 +np.float32,0x3eee1e1c,0x3ec3891a,2 +np.float32,0xbdda3eb8,0xbde6c5a0,2 +np.float32,0x26ef4a,0x26ef4a,2 +np.float32,0x7ed3370c,0x42afacbf,2 +np.float32,0xbf06e31b,0xbf3f9ab7,2 +np.float32,0xbe3185f0,0xbe42f556,2 +np.float32,0x3dcf9abe,0x3dc5be41,2 +np.float32,0xbf3696d9,0xbf9fe2bd,2 +np.float32,0x3e68ee50,0x3e51e01a,2 +np.float32,0x3f3d4cc2,0x3f0db6ca,2 +np.float32,0x7fa00000,0x7fe00000,2 +np.float32,0xbf03070c,0xbf3792d0,2 +np.float32,0x3ea79e6c,0x3e910092,2 +np.float32,0xbf1a393a,0xbf6c2251,2 +np.float32,0x3f41eb0e,0x3f105afc,2 +np.float32,0x3ceadb2f,0x3ce78d79,2 +np.float32,0xbf5dc105,0xc000be2c,2 +np.float32,0x7ebb5a0e,0x42af6f5c,2 +np.float32,0xbf7c44eb,0xc0875058,2 +np.float32,0x6aaaf4,0x6aaaf4,2 +np.float32,0x807d8f23,0x807d8f23,2 +np.float32,0xbee6b142,0xbf194fef,2 +np.float32,0xbe83f256,0xbe989526,2 +np.float32,0x7d588e,0x7d588e,2 +np.float32,0x7cc80131,0x42aa0542,2 +np.float32,0x3e0ab198,0x3e02124f,2 +np.float32,0xbf6e64db,0xc02b52eb,2 +np.float32,0x3d238b56,0x3d205d1b,2 +np.float32,0xbeb408e2,0xbeddd8bc,2 +np.float32,0x3f78340d,0x3f2d8471,2 +np.float32,0x806162a3,0x806162a3,2 +np.float32,0x804e484f,0x804e484f,2 +np.float32,0xbeb8c576,0xbee53466,2 +np.float32,0x807aab15,0x807aab15,2 +np.float32,0x3f523e20,0x3f197ab8,2 +np.float32,0xbf009190,0xbf3295de,2 +np.float32,0x3df43da5,0x3de6bd82,2 +np.float32,0x7f639aea,0x42b135e6,2 +np.float32,0x3f1e638a,0x3ef697da,2 +np.float32,0xbf4884de,0xbfc3bac3,2 +np.float32,0xbe9336b6,0xbead931b,2 +np.float32,0x6daf7f,0x6daf7f,2 +np.float32,0xbf1fc152,0xbf7a70b1,2 +np.float32,0x3f103720,0x3ee4c649,2 +np.float32,0x3eeaa227,0x3ec126df,2 +np.float32,0x7f7ea945,0x42b16f69,2 +np.float32,0x3d3cd800,0x3d389ead,2 +np.float32,0x3f3d7268,0x3f0dcc6e,2 +np.float32,0xbf3c1b41,0xbfa9e2e3,2 +np.float32,0x3ecf3818,0x3eadffb2,2 +np.float32,0x3f1af312,0x3ef25372,2 +np.float32,0x48fae4,0x48fae4,2 +np.float64,0x7fedaa1ee4fb543d,0x40862da7ca7c308e,1 +np.float64,0x8007d2d810efa5b1,0x8007d2d810efa5b1,1 +np.float64,0x3fc385e069270bc0,0x3fc22b8884cf2c3b,1 +np.float64,0x68ed4130d1da9,0x68ed4130d1da9,1 +np.float64,0x8008e93e58d1d27d,0x8008e93e58d1d27d,1 +np.float64,0xbfd3d62852a7ac50,0xbfd7be3a7ad1af02,1 +np.float64,0xbfc1fa0ba923f418,0xbfc35f0f19447df7,1 +np.float64,0xbfe01b8cec20371a,0xbfe6658c7e6c8e50,1 +np.float64,0xbfeda81a147b5034,0xc004e9c94f2b91c1,1 +np.float64,0xbfe1c36a97e386d5,0xbfe9ead4d6beaa92,1 +np.float64,0x3fe50be51f2a17ca,0x3fe02c8067d9e5c5,1 +np.float64,0x3febed4d3337da9a,0x3fe413956466134f,1 +np.float64,0x80068ea59ced1d4c,0x80068ea59ced1d4c,1 +np.float64,0x3febe77d5877cefb,0x3fe4107ac088bc71,1 +np.float64,0x800ae77617d5ceed,0x800ae77617d5ceed,1 +np.float64,0x3fd0546b60a0a8d7,0x3fcd16c2e995ab23,1 +np.float64,0xbfe33e1476667c29,0xbfed6d7faec4db2f,1 +np.float64,0x3fe9d2fd51b3a5fb,0x3fe2eef834310219,1 +np.float64,0x8004249878284932,0x8004249878284932,1 +np.float64,0xbfd5b485c72b690c,0xbfda828ccc6a7a5c,1 +np.float64,0x7fcd6e6b6b3adcd6,0x408622807f04768e,1 +np.float64,0x3fd7f9c32caff386,0x3fd45d024514b8da,1 +np.float64,0x7f87eb9d702fd73a,0x40860aa99fcff27f,1 +np.float64,0xbfc5d1f6fb2ba3ec,0xbfc7ec367cb3fecc,1 +np.float64,0x8008316a44d062d5,0x8008316a44d062d5,1 +np.float64,0xbfd54e4358aa9c86,0xbfd9e889d2998a4a,1 +np.float64,0xda65facdb4cc0,0xda65facdb4cc0,1 +np.float64,0x3fc5b4f6f32b69f0,0x3fc40d13aa8e248b,1 +np.float64,0x3fd825a5d5b04b4c,0x3fd47ce73e04d3ff,1 +np.float64,0x7ac9d56ef593b,0x7ac9d56ef593b,1 +np.float64,0xbfd0a51977214a32,0xbfd34702071428be,1 +np.float64,0x3fd21f620b243ec4,0x3fcfea0c02193640,1 +np.float64,0x3fe6fb3f1b2df67e,0x3fe151ffb18c983b,1 +np.float64,0x700de022e01bd,0x700de022e01bd,1 +np.float64,0xbfbb76b81236ed70,0xbfbd0d31deea1ec7,1 +np.float64,0x3fecfc3856f9f870,0x3fe4a2fcadf221e0,1 +np.float64,0x3fede286517bc50c,0x3fe51af2fbd6ef63,1 +np.float64,0x7fdc8da96c391b52,0x408627ce09cfef2b,1 +np.float64,0x8000edfcfb81dbfb,0x8000edfcfb81dbfb,1 +np.float64,0x8009ebc42af3d789,0x8009ebc42af3d789,1 +np.float64,0x7fd658aaf8acb155,0x408625d80cd1ccc9,1 +np.float64,0x3feea584a37d4b09,0x3fe57f29a73729cd,1 +np.float64,0x4cfe494699fca,0x4cfe494699fca,1 +np.float64,0xbfe9d96460b3b2c9,0xbffa62ecfa026c77,1 +np.float64,0x7fdb3852c3b670a5,0x4086276c191dc9b1,1 +np.float64,0xbfe4d1fc9ee9a3f9,0xbff0d37ce37cf479,1 +np.float64,0xffefffffffffffff,0xfff8000000000000,1 +np.float64,0xbfd1c43d7fa3887a,0xbfd4cfbefb5f2c43,1 +np.float64,0x3fec4a8e0d78951c,0x3fe4453a82ca2570,1 +np.float64,0x7fafed74583fdae8,0x4086181017b8dac9,1 +np.float64,0x80076c4ebcced89e,0x80076c4ebcced89e,1 +np.float64,0x8001a9aa7b235356,0x8001a9aa7b235356,1 +np.float64,0x121260fe2424d,0x121260fe2424d,1 +np.float64,0x3fddd028e3bba052,0x3fd87998c4c43c5b,1 +np.float64,0x800ed1cf4a9da39f,0x800ed1cf4a9da39f,1 +np.float64,0xbfef2e63d7fe5cc8,0xc00d53480b16971b,1 +np.float64,0xbfedde3309fbbc66,0xc005ab55b7a7c127,1 +np.float64,0x3fda3e1e85b47c3d,0x3fd5fddafd8d6729,1 +np.float64,0x8007c6443c6f8c89,0x8007c6443c6f8c89,1 +np.float64,0xbfe101705f2202e0,0xbfe8420817665121,1 +np.float64,0x7fe0bff3c1e17fe7,0x4086291539c56d80,1 +np.float64,0x7fe6001dab6c003a,0x40862b43aa7cb060,1 +np.float64,0x7fbdecf7de3bd9ef,0x40861d170b1c51a5,1 +np.float64,0xbfc0fd508c21faa0,0xbfc23a5876e99fa3,1 +np.float64,0xbfcf6eb14f3edd64,0xbfd208cbf742c8ea,1 +np.float64,0x3f6d40ea403a81d5,0x3f6d33934ab8e799,1 +np.float64,0x7fc32600b6264c00,0x40861f10302357e0,1 +np.float64,0x3fd05870baa0b0e0,0x3fcd1d2af420fac7,1 +np.float64,0x80051d5120aa3aa3,0x80051d5120aa3aa3,1 +np.float64,0x3fdb783fcfb6f080,0x3fd6db229658c083,1 +np.float64,0x3fe0b61199e16c24,0x3fdae41e277be2eb,1 +np.float64,0x3daf62167b5ed,0x3daf62167b5ed,1 +np.float64,0xbfec3c53b6f878a7,0xc0011f0ce7a78a2a,1 +np.float64,0x800fc905161f920a,0x800fc905161f920a,1 +np.float64,0x3fdc7b9cc138f73a,0x3fd78f9c2360e661,1 +np.float64,0x7fe4079e97a80f3c,0x40862a83795f2443,1 +np.float64,0x8010000000000000,0x8010000000000000,1 +np.float64,0x7fe6da5345adb4a6,0x40862b9183c1e4b0,1 +np.float64,0xbfd0a76667214ecc,0xbfd34a1e0c1f6186,1 +np.float64,0x37fb0b906ff62,0x37fb0b906ff62,1 +np.float64,0x7fe170e59fa2e1ca,0x408629680a55e5c5,1 +np.float64,0x3fea900c77752019,0x3fe356eec75aa345,1 +np.float64,0x3fc575c63a2aeb8c,0x3fc3d701167d76b5,1 +np.float64,0x3fe8b45da87168bc,0x3fe24ecbb778fd44,1 +np.float64,0xbfcb990ab5373214,0xbfcf1596c076813c,1 +np.float64,0xf146fdfbe28e0,0xf146fdfbe28e0,1 +np.float64,0x8001fcd474c3f9aa,0x8001fcd474c3f9aa,1 +np.float64,0xbfe9b555eeb36aac,0xbffa0630c3bb485b,1 +np.float64,0x800f950be83f2a18,0x800f950be83f2a18,1 +np.float64,0x7feb0e03ab761c06,0x40862ceb30e36887,1 +np.float64,0x7fca51bd4a34a37a,0x4086219b9dfd35c9,1 +np.float64,0xbfdc27c34cb84f86,0xbfe28ccde8d6bc08,1 +np.float64,0x80009ce1714139c4,0x80009ce1714139c4,1 +np.float64,0x8005290fb1ea5220,0x8005290fb1ea5220,1 +np.float64,0xbfee81e6473d03cd,0xc00885972ca1699b,1 +np.float64,0x7fcfb11a373f6233,0x408623180b8f75d9,1 +np.float64,0xbfcb9c4bfd373898,0xbfcf19bd25881928,1 +np.float64,0x7feaec5885f5d8b0,0x40862ce136050e6c,1 +np.float64,0x8009e17a4a53c2f5,0x8009e17a4a53c2f5,1 +np.float64,0xbfe1cceb9e6399d7,0xbfea0038bd3def20,1 +np.float64,0x8009170bd7122e18,0x8009170bd7122e18,1 +np.float64,0xb2b6f7f1656df,0xb2b6f7f1656df,1 +np.float64,0x3fc75bfd1f2eb7f8,0x3fc574c858332265,1 +np.float64,0x3fa24c06ec249800,0x3fa1fa462ffcb8ec,1 +np.float64,0xaa9a4d2d5534a,0xaa9a4d2d5534a,1 +np.float64,0xbfd7b76208af6ec4,0xbfdda0c3200dcc9f,1 +np.float64,0x7f8cbab73039756d,0x40860c20cba57a94,1 +np.float64,0x3fdbcf9f48b79f3f,0x3fd71827a60e8b6d,1 +np.float64,0xbfdd60f71a3ac1ee,0xbfe3a94bc8cf134d,1 +np.float64,0xb9253589724a7,0xb9253589724a7,1 +np.float64,0xbfcf28e37e3e51c8,0xbfd1da9977b741e3,1 +np.float64,0x80011457f7e228b1,0x80011457f7e228b1,1 +np.float64,0x7fec33df737867be,0x40862d404a897122,1 +np.float64,0xae55f8f95cabf,0xae55f8f95cabf,1 +np.float64,0xbfc1ab9397235728,0xbfc303e5533d4a5f,1 +np.float64,0x7fef0f84b3be1f08,0x40862e05f9ba7118,1 +np.float64,0x7fdc94f328b929e5,0x408627d01449d825,1 +np.float64,0x3fee1b598c7c36b3,0x3fe53847be166834,1 +np.float64,0x3fee8326f37d064e,0x3fe56d96f3fbcf43,1 +np.float64,0x3fe7b18a83ef6316,0x3fe1bb6a6d48c675,1 +np.float64,0x3fe5db969c6bb72e,0x3fe0a8d7d151996c,1 +np.float64,0x3e3391d27c673,0x3e3391d27c673,1 +np.float64,0x3fe79a46d76f348e,0x3fe1ae09a96ea628,1 +np.float64,0x7ff4000000000000,0x7ffc000000000000,1 +np.float64,0x7fe57d6505aafac9,0x40862b13925547f1,1 +np.float64,0x3fc433371d28666e,0x3fc2c196a764c47b,1 +np.float64,0x8008dbf69cd1b7ee,0x8008dbf69cd1b7ee,1 +np.float64,0xbfe744f459ee89e8,0xbff4c847ad3ee152,1 +np.float64,0x80098aa245331545,0x80098aa245331545,1 +np.float64,0x6747112ece8e3,0x6747112ece8e3,1 +np.float64,0x5d342a40ba69,0x5d342a40ba69,1 +np.float64,0xf7a17739ef42f,0xf7a17739ef42f,1 +np.float64,0x3fe1b34a9d236695,0x3fdc2d7c4e2c347a,1 +np.float64,0x7fb53bf5ec2a77eb,0x40861a585ec8f7ff,1 +np.float64,0xbfe6256f1cec4ade,0xbff2d89a36be65ae,1 +np.float64,0xb783bc9b6f078,0xb783bc9b6f078,1 +np.float64,0xbfedf74a3bfbee94,0xc0060bb6f2bc11ef,1 +np.float64,0x3fda2a5eccb454be,0x3fd5efd7f18b8e81,1 +np.float64,0xbfb3838ab2270718,0xbfb44c337fbca3c3,1 +np.float64,0x3fb4ac6dc22958e0,0x3fb3e194ca01a502,1 +np.float64,0x76c11aaaed824,0x76c11aaaed824,1 +np.float64,0x80025bb1af04b764,0x80025bb1af04b764,1 +np.float64,0x3fdc02740ab804e8,0x3fd73b8cd6f95f19,1 +np.float64,0x3fe71856f5ee30ae,0x3fe162e9fafb4428,1 +np.float64,0x800236f332646de7,0x800236f332646de7,1 +np.float64,0x7fe13fd9d2e27fb3,0x408629516b42a317,1 +np.float64,0x7fdf6bbd34bed779,0x40862892069d805c,1 +np.float64,0x3fd4727beba8e4f8,0x3fd1be5b48d9e282,1 +np.float64,0x800e0fac9e5c1f59,0x800e0fac9e5c1f59,1 +np.float64,0xfb54423ff6a89,0xfb54423ff6a89,1 +np.float64,0x800fbf7ed47f7efe,0x800fbf7ed47f7efe,1 +np.float64,0x3fe9d41fa2f3a840,0x3fe2ef98dc1fd463,1 +np.float64,0x800d733e805ae67d,0x800d733e805ae67d,1 +np.float64,0x3feebe4c46fd7c98,0x3fe58bcf7f47264e,1 +np.float64,0x7fe1ab77b5e356ee,0x40862982bb3dce34,1 +np.float64,0xbfdddac05abbb580,0xbfe41aa45f72d5a2,1 +np.float64,0x3fe14219dee28434,0x3fdb9b137d1f1220,1 +np.float64,0x3fe25d3d5a24ba7b,0x3fdd06e1cf32d35a,1 +np.float64,0x8000fa4fbe81f4a0,0x8000fa4fbe81f4a0,1 +np.float64,0x3fe303e23e6607c4,0x3fddd94982efa9f1,1 +np.float64,0x3fe89cf5d83139ec,0x3fe24193a2e12f75,1 +np.float64,0x3fe9b36ef87366de,0x3fe2dd7cdc25a4a5,1 +np.float64,0xbfdb8b38f8371672,0xbfe2023ba7e002bb,1 +np.float64,0xafc354955f86b,0xafc354955f86b,1 +np.float64,0xbfe2f3d49e65e7a9,0xbfecb557a94123d3,1 +np.float64,0x800496617c092cc4,0x800496617c092cc4,1 +np.float64,0x32db0cfa65b62,0x32db0cfa65b62,1 +np.float64,0xbfd893bfa2b12780,0xbfdf02a8c1e545aa,1 +np.float64,0x7fd5ac927d2b5924,0x408625997e7c1f9b,1 +np.float64,0x3fde9defb8bd3be0,0x3fd9056190986349,1 +np.float64,0x80030cfeb54619fe,0x80030cfeb54619fe,1 +np.float64,0x3fcba85b273750b8,0x3fc90a5ca976594f,1 +np.float64,0x3fe98f6f5cf31edf,0x3fe2c97fcb4eca25,1 +np.float64,0x3fe33dbf90667b80,0x3fde21b83321b993,1 +np.float64,0x3fe4686636e8d0cc,0x3fdf928cdca751b3,1 +np.float64,0x80018ade6ce315be,0x80018ade6ce315be,1 +np.float64,0x7fa9af70c8335ee1,0x408616528cd5a906,1 +np.float64,0x3fbeb460aa3d68c0,0x3fbcff96b00a2193,1 +np.float64,0x7fa82c869830590c,0x408615d6598d9368,1 +np.float64,0xd08c0e6fa1182,0xd08c0e6fa1182,1 +np.float64,0x3fef4eb750fe9d6f,0x3fe5d522fd4e7f64,1 +np.float64,0xbfc586f5492b0dec,0xbfc791eaae92aad1,1 +np.float64,0x7fede64ac7bbcc95,0x40862db7f444fa7b,1 +np.float64,0x3fe540003d6a8000,0x3fe04bdfc2916a0b,1 +np.float64,0x8009417fe6f28300,0x8009417fe6f28300,1 +np.float64,0x3fe6959cf16d2b3a,0x3fe116a1ce01887b,1 +np.float64,0x3fb0a40036214800,0x3fb01f447778219a,1 +np.float64,0x3feff26e91ffe4dd,0x3fe627798fc859a7,1 +np.float64,0x7fed8e46cd7b1c8d,0x40862da044a1d102,1 +np.float64,0x7fec4eb774f89d6e,0x40862d47e43edb53,1 +np.float64,0x3fe800e5e07001cc,0x3fe1e8e2b9105fc2,1 +np.float64,0x800f4eb2f9be9d66,0x800f4eb2f9be9d66,1 +np.float64,0x800611659bcc22cc,0x800611659bcc22cc,1 +np.float64,0x3fd66e65d2acdccc,0x3fd33ad63a5e1000,1 +np.float64,0x800a9085b7f5210c,0x800a9085b7f5210c,1 +np.float64,0x7fdf933a3fbf2673,0x4086289c0e292f2b,1 +np.float64,0x1cd1ba7a39a38,0x1cd1ba7a39a38,1 +np.float64,0xbfefd0b10fffa162,0xc0149ded900ed851,1 +np.float64,0xbfe8c63485b18c69,0xbff7cf3078b1574f,1 +np.float64,0x3fecde56ca79bcae,0x3fe4934afbd7dda9,1 +np.float64,0x8006cd6888cd9ad2,0x8006cd6888cd9ad2,1 +np.float64,0x3fd7a391c2af4724,0x3fd41e2f74df2329,1 +np.float64,0x3fe6a8ad58ed515a,0x3fe121ccfb28e6f5,1 +np.float64,0x7fe18a80dd631501,0x40862973c09086b9,1 +np.float64,0xbf74fd6d8029fb00,0xbf750b3e368ebe6b,1 +np.float64,0x3fdd35e93dba6bd4,0x3fd810071faaffad,1 +np.float64,0x3feb0d8f57361b1f,0x3fe39b3abdef8b7a,1 +np.float64,0xbfd5ec7288abd8e6,0xbfdad764df0d2ca1,1 +np.float64,0x7fdc848272b90904,0x408627cb78f3fb9e,1 +np.float64,0x800ed3eda91da7db,0x800ed3eda91da7db,1 +np.float64,0x3fefac64857f58c9,0x3fe60459dbaad1ba,1 +np.float64,0x3fd1df7a5ba3bef4,0x3fcf864a39b926ff,1 +np.float64,0xfe26ca4bfc4da,0xfe26ca4bfc4da,1 +np.float64,0xbfd1099f8da21340,0xbfd3cf6e6efe934b,1 +np.float64,0xbfe15de9a7a2bbd4,0xbfe909cc895f8795,1 +np.float64,0x3fe89714ed712e2a,0x3fe23e40d31242a4,1 +np.float64,0x800387113e470e23,0x800387113e470e23,1 +np.float64,0x3fe4f80730e9f00e,0x3fe0208219314cf1,1 +np.float64,0x2f95a97c5f2b6,0x2f95a97c5f2b6,1 +np.float64,0x800ea7cdd87d4f9c,0x800ea7cdd87d4f9c,1 +np.float64,0xbf64b967c0297300,0xbf64c020a145b7a5,1 +np.float64,0xbfc5a91a342b5234,0xbfc7bafd77a61d81,1 +np.float64,0xbfe2226fe76444e0,0xbfeac33eb1d1b398,1 +np.float64,0x3fc6aaa8d42d5552,0x3fc4de79f5c68cd4,1 +np.float64,0x3fe54fd4c1ea9faa,0x3fe05561a9a5922b,1 +np.float64,0x80029c1f75653840,0x80029c1f75653840,1 +np.float64,0xbfcb4a84a2369508,0xbfceb1a23bac3995,1 +np.float64,0x80010abeff02157f,0x80010abeff02157f,1 +np.float64,0x7f92d12cf825a259,0x40860e49bde3a5b6,1 +np.float64,0x800933e7027267ce,0x800933e7027267ce,1 +np.float64,0x3fc022b12e204562,0x3fbe64acc53ed887,1 +np.float64,0xbfe35f938de6bf27,0xbfedc1f3e443c016,1 +np.float64,0x1f8d9bae3f1b4,0x1f8d9bae3f1b4,1 +np.float64,0x3fe552f22ceaa5e4,0x3fe057404072350f,1 +np.float64,0xbfa73753442e6ea0,0xbfa7c24a100190f1,1 +np.float64,0x7fb3e2982827c52f,0x408619d1efa676b6,1 +np.float64,0xbfd80cb7a5301970,0xbfde28e65f344f33,1 +np.float64,0xbfcde835973bd06c,0xbfd10806fba46c8f,1 +np.float64,0xbfd4e3c749a9c78e,0xbfd949aff65de39c,1 +np.float64,0x3fcb4b9d6f36973b,0x3fc8be02ad6dc0d3,1 +np.float64,0x1a63000034c7,0x1a63000034c7,1 +np.float64,0x7fdc9c751e3938e9,0x408627d22df71959,1 +np.float64,0x3fd74f3f712e9e7f,0x3fd3e07df0c37ec1,1 +np.float64,0xbfceab74d33d56e8,0xbfd187e99bf82903,1 +np.float64,0x7ff0000000000000,0x7ff0000000000000,1 +np.float64,0xbfb2cca466259948,0xbfb3868208e8de30,1 +np.float64,0x800204688b8408d2,0x800204688b8408d2,1 +np.float64,0x3e4547407c8aa,0x3e4547407c8aa,1 +np.float64,0xbfe4668846e8cd10,0xbff03c85189f3818,1 +np.float64,0x800dd350245ba6a0,0x800dd350245ba6a0,1 +np.float64,0xbfbc13c160382780,0xbfbdbd56ce996d16,1 +np.float64,0x7fe25a628a24b4c4,0x408629d06eb2d64d,1 +np.float64,0x3fd19dabbc233b57,0x3fcf1f3ed1d34c8c,1 +np.float64,0x547e20faa8fc5,0x547e20faa8fc5,1 +np.float64,0xbfe19392c6232726,0xbfe97ffe4f303335,1 +np.float64,0x3f87f9f6702ff400,0x3f87d64fb471bb04,1 +np.float64,0x9dfc52db3bf8b,0x9dfc52db3bf8b,1 +np.float64,0x800e1f5a9adc3eb5,0x800e1f5a9adc3eb5,1 +np.float64,0xbfddbd09c8bb7a14,0xbfe3fed7d7cffc70,1 +np.float64,0xbfeda71af87b4e36,0xc004e6631c514544,1 +np.float64,0xbfdbfcfe1bb7f9fc,0xbfe266b5d4a56265,1 +np.float64,0x3fe4ee78cd69dcf2,0x3fe01abba4e81fc9,1 +np.float64,0x800f13b820de2770,0x800f13b820de2770,1 +np.float64,0x3f861e09702c3c00,0x3f85ffae83b02c4f,1 +np.float64,0xbfc0972479212e48,0xbfc1c4bf70b30cbc,1 +np.float64,0x7fef057ef57e0afd,0x40862e036479f6a9,1 +np.float64,0x8bdbabe517b76,0x8bdbabe517b76,1 +np.float64,0xbfec495417f892a8,0xc0013ade88746d18,1 +np.float64,0x3fec680ab3f8d015,0x3fe454dd304b560d,1 +np.float64,0xbfae7ce60c3cf9d0,0xbfaf6eef15bbe56b,1 +np.float64,0x3fec314124786282,0x3fe437ca06294f5a,1 +np.float64,0x7fd5ed05b82bda0a,0x408625b125518e58,1 +np.float64,0x3feac9f02f3593e0,0x3fe3768104dd5cb7,1 +np.float64,0x0,0x0,1 +np.float64,0xbfddd2abd5bba558,0xbfe41312b8ea20de,1 +np.float64,0xbfedf9558c7bf2ab,0xc00613c53e0bb33a,1 +np.float64,0x3fef245ffefe48c0,0x3fe5bfb4dfe3b7a5,1 +np.float64,0x7fe178604922f0c0,0x4086296b77d5eaef,1 +np.float64,0x10000000000000,0x10000000000000,1 +np.float64,0x7fed026766ba04ce,0x40862d7a0dc45643,1 +np.float64,0xbfde27d8c3bc4fb2,0xbfe46336b6447697,1 +np.float64,0x3fe9485d9cb290bb,0x3fe2a1e4b6419423,1 +np.float64,0xbfe27b8a7464f715,0xbfeb9382f5b16f65,1 +np.float64,0x5c34d274b869b,0x5c34d274b869b,1 +np.float64,0xbfeee0b7453dc16f,0xc00acdb46459b6e6,1 +np.float64,0x7fe3dfb4d4e7bf69,0x40862a73785fdf12,1 +np.float64,0xb4635eef68c6c,0xb4635eef68c6c,1 +np.float64,0xbfe522a2c82a4546,0xbff148912a59a1d6,1 +np.float64,0x8009ba38a9737472,0x8009ba38a9737472,1 +np.float64,0xbfc056ff3820ae00,0xbfc17b2205fa180d,1 +np.float64,0x7fe1c8b8a0239170,0x4086298feeee6133,1 +np.float64,0x3fe2d2c6b9e5a58e,0x3fdd9b907471031b,1 +np.float64,0x3fa0a161bc2142c0,0x3fa05db36f6a073b,1 +np.float64,0x3fdef4268ebde84c,0x3fd93f980794d1e7,1 +np.float64,0x800ecd9fe2fd9b40,0x800ecd9fe2fd9b40,1 +np.float64,0xbfc9fbd45e33f7a8,0xbfcd0afc47c340f6,1 +np.float64,0x3fe8c3035b718606,0x3fe2570eb65551a1,1 +np.float64,0xbfe78c4ad2ef1896,0xbff54d25b3328742,1 +np.float64,0x8006f5dcf8adebbb,0x8006f5dcf8adebbb,1 +np.float64,0x800301dca2a603ba,0x800301dca2a603ba,1 +np.float64,0xad4289e55a851,0xad4289e55a851,1 +np.float64,0x80037764f9e6eecb,0x80037764f9e6eecb,1 +np.float64,0xbfe73575b26e6aec,0xbff4abfb5e985c62,1 +np.float64,0xbfc6cb91652d9724,0xbfc91a8001b33ec2,1 +np.float64,0xbfe3a918ffe75232,0xbfee7e6e4fd34c53,1 +np.float64,0x9bc84e2b3790a,0x9bc84e2b3790a,1 +np.float64,0x7fdeec303cbdd85f,0x408628714a49d996,1 +np.float64,0x3fe1d1dcb763a3ba,0x3fdc54ce060dc7f4,1 +np.float64,0x8008ae6432b15cc9,0x8008ae6432b15cc9,1 +np.float64,0x3fd8022fa2b00460,0x3fd46322bf02a609,1 +np.float64,0xbfc55b64472ab6c8,0xbfc75d9568f462e0,1 +np.float64,0xbfe8b165437162ca,0xbff7a15e2ead645f,1 +np.float64,0x7f759330feeb3,0x7f759330feeb3,1 +np.float64,0xbfd504f68eaa09ee,0xbfd97b06c01d7473,1 +np.float64,0x54702d5aa8e06,0x54702d5aa8e06,1 +np.float64,0xbfed1779337a2ef2,0xc0032f7109ef5a51,1 +np.float64,0xe248bd4dc4918,0xe248bd4dc4918,1 +np.float64,0xbfd8c59150318b22,0xbfdf53bca6ca8b1e,1 +np.float64,0xbfe3b9d942e773b2,0xbfeea9fcad277ba7,1 +np.float64,0x800934ec127269d9,0x800934ec127269d9,1 +np.float64,0xbfbb7f535a36fea8,0xbfbd16d61b6c52b8,1 +np.float64,0xccb185a199631,0xccb185a199631,1 +np.float64,0x3fe3dda76fe7bb4e,0x3fdee83bc6094301,1 +np.float64,0xbfe0c902f5e19206,0xbfe7ca7c0e888006,1 +np.float64,0xbfefeed08cbfdda1,0xc018aadc483c8724,1 +np.float64,0x7fd0c05c52a180b8,0x40862389daf64aac,1 +np.float64,0xbfd28e3323a51c66,0xbfd5e9ba278fb685,1 +np.float64,0xbef4103b7de82,0xbef4103b7de82,1 +np.float64,0x3fe7661fd12ecc40,0x3fe18ff7dfb696e2,1 +np.float64,0x3fddd5f2f0bbabe4,0x3fd87d8bb6719c3b,1 +np.float64,0x800b3914cfd6722a,0x800b3914cfd6722a,1 +np.float64,0xf3f09a97e7e14,0xf3f09a97e7e14,1 +np.float64,0x7f97092b502e1256,0x40860fe8054cf54e,1 +np.float64,0xbfdbec7917b7d8f2,0xbfe2580b4b792c79,1 +np.float64,0x7fe7ff215aaffe42,0x40862bf5887fa062,1 +np.float64,0x80080186e570030e,0x80080186e570030e,1 +np.float64,0xbfc27f05e624fe0c,0xbfc3fa214be4adc4,1 +np.float64,0x3fe4481be1689038,0x3fdf6b11e9c4ca72,1 +np.float64,0x3fd642cc9cac8598,0x3fd31a857fe70227,1 +np.float64,0xbef8782d7df0f,0xbef8782d7df0f,1 +np.float64,0x8003077dc2e60efc,0x8003077dc2e60efc,1 +np.float64,0x80083eb5a2507d6c,0x80083eb5a2507d6c,1 +np.float64,0x800e8d1eb77d1a3e,0x800e8d1eb77d1a3e,1 +np.float64,0xbfc7737cd22ee6f8,0xbfc9e7716f03f1fc,1 +np.float64,0xbfe9a2b4ddf3456a,0xbff9d71664a8fc78,1 +np.float64,0x7fe67c7d322cf8f9,0x40862b7066465194,1 +np.float64,0x3fec080ce2b8101a,0x3fe421dac225be46,1 +np.float64,0xbfe6d27beb6da4f8,0xbff3fbb1add521f7,1 +np.float64,0x3fdd4f96ceba9f2e,0x3fd821a638986dbe,1 +np.float64,0x3fbd89f1303b13e2,0x3fbbf49223a9d002,1 +np.float64,0xbfe94e2b9d329c57,0xbff907e549c534f5,1 +np.float64,0x3fe2f2cc51e5e599,0x3fddc3d6b4a834a1,1 +np.float64,0xfdcb5b49fb96c,0xfdcb5b49fb96c,1 +np.float64,0xbfea7108fa74e212,0xbffc01b392f4897b,1 +np.float64,0x3fd38baef7a7175c,0x3fd10e7fd3b958dd,1 +np.float64,0x3fa75bf9cc2eb800,0x3fa6d792ecdedb8e,1 +np.float64,0x7fd19fd20aa33fa3,0x408623f1e2cd04c3,1 +np.float64,0x3fd62c708dac58e0,0x3fd309ec7818d16e,1 +np.float64,0x3fdf489047be9120,0x3fd978640617c758,1 +np.float64,0x1,0x1,1 +np.float64,0xbfe21e7c3ea43cf8,0xbfeaba21320697d3,1 +np.float64,0xbfd3649047a6c920,0xbfd71a6f14223744,1 +np.float64,0xbfd68ca68c2d194e,0xbfdbcce6784e5d44,1 +np.float64,0x3fdb26b0ea364d62,0x3fd6a1f86f64ff74,1 +np.float64,0xbfd843821cb08704,0xbfde80e90805ab3f,1 +np.float64,0x3fd508a27aaa1144,0x3fd22fc203a7b9d8,1 +np.float64,0xbfdb951c7eb72a38,0xbfe20aeaec13699b,1 +np.float64,0x3fef556ba57eaad7,0x3fe5d8865cce0a6d,1 +np.float64,0x3fd0d224b3a1a448,0x3fcdde7be5d7e21e,1 +np.float64,0x8007ff272baffe4f,0x8007ff272baffe4f,1 +np.float64,0x3fe1c7bddf638f7c,0x3fdc47cc6cf2f5cd,1 +np.float64,0x7ff8000000000000,0x7ff8000000000000,1 +np.float64,0x2016d560402f,0x2016d560402f,1 +np.float64,0xbfcca10be9394218,0xbfd033f36b94fc54,1 +np.float64,0xbfdb833628b7066c,0xbfe1fb344b840c70,1 +np.float64,0x3fd8529cb3b0a539,0x3fd49d847fe77218,1 +np.float64,0xbfc0b0ebab2161d8,0xbfc1e260c60ffd1b,1 +np.float64,0xbfea8b9a79f51735,0xbffc4ee6be8a0fa2,1 +np.float64,0x7feca8fab7f951f4,0x40862d613e454646,1 +np.float64,0x7fd8c52d82318a5a,0x408626aaf37423a3,1 +np.float64,0xbfe364ad4526c95a,0xbfedcee39bc93ff5,1 +np.float64,0x800b78161256f02d,0x800b78161256f02d,1 +np.float64,0xbfd55f0153aabe02,0xbfda01a78f72d494,1 +np.float64,0x800315a5f0662b4d,0x800315a5f0662b4d,1 +np.float64,0x7fe4c0dca02981b8,0x40862acc27e4819f,1 +np.float64,0x8009825c703304b9,0x8009825c703304b9,1 +np.float64,0x3fe6e94e1cadd29c,0x3fe1478ccc634f49,1 +np.float64,0x7fe622d8586c45b0,0x40862b504177827e,1 +np.float64,0x3fe4458600688b0c,0x3fdf67e79a84b953,1 +np.float64,0xbfdd75d8a1baebb2,0xbfe3bc9e6ca1bbb5,1 +np.float64,0x3fde789c6bbcf138,0x3fd8ec1d435531b3,1 +np.float64,0x3fe7052b94ee0a58,0x3fe157c5c4418dc1,1 +np.float64,0x7fef31652abe62c9,0x40862e0eaeabcfc0,1 +np.float64,0x3fe279691ee4f2d2,0x3fdd2aa41eb43cd4,1 +np.float64,0xbfd533fa95aa67f6,0xbfd9c12f516d29d7,1 +np.float64,0x3fe6d057f96da0b0,0x3fe138fd96693a6a,1 +np.float64,0x800bad984f775b31,0x800bad984f775b31,1 +np.float64,0x7fdd6fdba4badfb6,0x4086280c73d8ef97,1 +np.float64,0x7fe9b5c0eef36b81,0x40862c82c6f57a53,1 +np.float64,0x8000bc02ece17807,0x8000bc02ece17807,1 +np.float64,0xbff0000000000000,0xfff0000000000000,1 +np.float64,0xbfed430be3fa8618,0xc003aaf338c75b3c,1 +np.float64,0x3fee17b759fc2f6f,0x3fe53668696bf48b,1 +np.float64,0x3f8d4cf9d03a9a00,0x3f8d17d2f532afdc,1 +np.float64,0x8005d6257b8bac4c,0x8005d6257b8bac4c,1 +np.float64,0xbfd17a6df9a2f4dc,0xbfd469e3848adc6e,1 +np.float64,0xb28a293965145,0xb28a293965145,1 +np.float64,0xbfe7d011e42fa024,0xbff5cf818998c8ec,1 +np.float64,0xbfe74f0f136e9e1e,0xbff4dad6ebb0443c,1 +np.float64,0x800f249fc9be4940,0x800f249fc9be4940,1 +np.float64,0x2542f8fe4a860,0x2542f8fe4a860,1 +np.float64,0xc48d40cd891a8,0xc48d40cd891a8,1 +np.float64,0x3fe4e64bc8e9cc98,0x3fe015c9eb3caa53,1 +np.float64,0x3fd33881eca67104,0x3fd0cea886be2457,1 +np.float64,0xbfd01748fba02e92,0xbfd28875959e6901,1 +np.float64,0x7fb7ab01f22f5603,0x40861b369927bf53,1 +np.float64,0xbfe340274ce6804e,0xbfed72b39f0ebb24,1 +np.float64,0x7fc16c0c3422d817,0x40861e4eaf1a286c,1 +np.float64,0x3fc26944a324d288,0x3fc133a77b356ac4,1 +np.float64,0xa149d7134293b,0xa149d7134293b,1 +np.float64,0x800837382d106e71,0x800837382d106e71,1 +np.float64,0x797d1740f2fa4,0x797d1740f2fa4,1 +np.float64,0xc3f15b7787e2c,0xc3f15b7787e2c,1 +np.float64,0x80cad1b90195a,0x80cad1b90195a,1 +np.float64,0x3fdd8f1142bb1e23,0x3fd84d21490d1ce6,1 +np.float64,0xbfbde6c9123bcd90,0xbfbfcc030a86836a,1 +np.float64,0x8007f77e032feefd,0x8007f77e032feefd,1 +np.float64,0x3fe74fed1c6e9fda,0x3fe18322cf19cb61,1 +np.float64,0xbfd8a40bbcb14818,0xbfdf1d23520ba74b,1 +np.float64,0xbfeb7a0e6076f41d,0xbfff4ddfb926efa5,1 +np.float64,0xbfcb8c5f663718c0,0xbfcf0570f702bda9,1 +np.float64,0xf668cd97ecd1a,0xf668cd97ecd1a,1 +np.float64,0xbfe92accf572559a,0xbff8b4393878ffdb,1 +np.float64,0xbfeaa955567552ab,0xbffca70c7d73eee5,1 +np.float64,0xbfe083a14f610742,0xbfe739d84bc35077,1 +np.float64,0x78290568f0521,0x78290568f0521,1 +np.float64,0x3fe94bae2372975c,0x3fe2a3beac5c9858,1 +np.float64,0x3fca4fbab9349f78,0x3fc7edbca2492acb,1 +np.float64,0x8000000000000000,0x8000000000000000,1 +np.float64,0x7fb9eb505433d6a0,0x40861bf0adedb74d,1 +np.float64,0x7fdc66f72a38cded,0x408627c32aeecf0f,1 +np.float64,0x2e8e6f445d1cf,0x2e8e6f445d1cf,1 +np.float64,0xbfec43195af88633,0xc0012d7e3f91b7e8,1 +np.float64,0x7fcdb971e93b72e3,0x40862294c9e3a7bc,1 +np.float64,0x800cabc461195789,0x800cabc461195789,1 +np.float64,0x2c79709c58f2f,0x2c79709c58f2f,1 +np.float64,0x8005d772d3cbaee6,0x8005d772d3cbaee6,1 +np.float64,0x3fe84d8c03709b18,0x3fe21490ce3673dd,1 +np.float64,0x7fe5578adc2aaf15,0x40862b056e8437d4,1 +np.float64,0xbf91298c58225320,0xbf914ec86c32d11f,1 +np.float64,0xc7ed2b6d8fda6,0xc7ed2b6d8fda6,1 +np.float64,0x2761404c4ec29,0x2761404c4ec29,1 +np.float64,0x3fbad3c48835a789,0x3fb9833c02385305,1 +np.float64,0x3fa46fee5428dfe0,0x3fa40a357fb24c23,1 +np.float64,0xbfe3900c6fe72019,0xbfee3dba29dd9d43,1 +np.float64,0x3fe7a9e41a6f53c8,0x3fe1b704dfb9884b,1 +np.float64,0xbfe74a7a1eee94f4,0xbff4d269cacb1f29,1 +np.float64,0xbfee609c72fcc139,0xc007da8499d34123,1 +np.float64,0x3fef2d5fc23e5ac0,0x3fe5c44414e59cb4,1 +np.float64,0xbfd7bdc0402f7b80,0xbfddaae1e7bb78fb,1 +np.float64,0xd71ee01dae3dc,0xd71ee01dae3dc,1 +np.float64,0x3fe98cbcdef3197a,0x3fe2c7ffe33c4541,1 +np.float64,0x8000f8dbb3a1f1b8,0x8000f8dbb3a1f1b8,1 +np.float64,0x3fe3e98ad567d316,0x3fdef6e58058313f,1 +np.float64,0x41ad0bfc835a2,0x41ad0bfc835a2,1 +np.float64,0x7fdcc2dc0d3985b7,0x408627dce39f77af,1 +np.float64,0xbfe47b980de8f730,0xbff059acdccd6e2b,1 +np.float64,0xbfef49b6577e936d,0xc00e714f46b2ccc1,1 +np.float64,0x3fac31816c386300,0x3fab71cb92b0db8f,1 +np.float64,0x3fe59097e76b2130,0x3fe07c299fd1127c,1 +np.float64,0xbfecf0df5cf9e1bf,0xc002c7ebdd65039c,1 +np.float64,0x3fd2b7d0b6a56fa1,0x3fd06b638990ae02,1 +np.float64,0xbfeb68deecf6d1be,0xbfff1187e042d3e4,1 +np.float64,0x3fd44a9771a8952f,0x3fd1a01867c5e302,1 +np.float64,0xf79a9dedef354,0xf79a9dedef354,1 +np.float64,0x800c25a170d84b43,0x800c25a170d84b43,1 +np.float64,0x3ff0000000000000,0x3fe62e42fefa39ef,1 +np.float64,0x3fbff4f7623fe9f0,0x3fbe1d3878f4c417,1 +np.float64,0xd284c845a5099,0xd284c845a5099,1 +np.float64,0xbfe3c7815f678f02,0xbfeecdab5ca2e651,1 +np.float64,0x3fc19c934e233927,0x3fc08036104b1f23,1 +np.float64,0x800b6096de16c12e,0x800b6096de16c12e,1 +np.float64,0xbfe962a67e32c54d,0xbff9392313a112a1,1 +np.float64,0x2b9d0116573a1,0x2b9d0116573a1,1 +np.float64,0x3fcab269ed3564d4,0x3fc83f7e1c3095b7,1 +np.float64,0x3fc8c78d86318f1b,0x3fc6a6cde5696f99,1 +np.float64,0xd5b1e9b5ab63d,0xd5b1e9b5ab63d,1 +np.float64,0xbfed802a47fb0054,0xc00465cad3b5b0ef,1 +np.float64,0xbfd73aaf08ae755e,0xbfdcdbd62b8af271,1 +np.float64,0xbfd4f13c0229e278,0xbfd95dacff79e570,1 +np.float64,0xbfe9622808f2c450,0xbff937f13c397e8d,1 +np.float64,0xbfeddfa62efbbf4c,0xc005b0c835eed829,1 +np.float64,0x3fd65663d4acacc8,0x3fd3290cd0e675dc,1 +np.float64,0x8005e890f1abd123,0x8005e890f1abd123,1 +np.float64,0xbfe924919fb24923,0xbff8a5a827a28756,1 +np.float64,0x3fe8cdf490719be9,0x3fe25d39535e8366,1 +np.float64,0x7fc229e6ff2453cd,0x40861ea40ef87a5a,1 +np.float64,0x3fe5cf53ceeb9ea8,0x3fe0a18e0b65f27e,1 +np.float64,0xa79cf6fb4f39f,0xa79cf6fb4f39f,1 +np.float64,0x7fddbb3c0f3b7677,0x40862820d5edf310,1 +np.float64,0x3e1011de7c203,0x3e1011de7c203,1 +np.float64,0x3fc0b59a83216b38,0x3fbf6916510ff411,1 +np.float64,0x8647f98d0c8ff,0x8647f98d0c8ff,1 +np.float64,0x8005dad33ecbb5a7,0x8005dad33ecbb5a7,1 +np.float64,0x8a80d0631501a,0x8a80d0631501a,1 +np.float64,0xbfe18f7d6ee31efb,0xbfe976f06713afc1,1 +np.float64,0xbfe06eaed560dd5e,0xbfe70eac696933e6,1 +np.float64,0xbfed8ef93c7b1df2,0xc00495bfa3195b53,1 +np.float64,0x3febe9c24677d385,0x3fe411b10db16c42,1 +np.float64,0x7fd5d80c1fabb017,0x408625a97a7787ba,1 +np.float64,0x3fca79b59334f368,0x3fc8108a521341dc,1 +np.float64,0xbfccf8db4339f1b8,0xbfd06c9a5424aadb,1 +np.float64,0xbfea5ac5a574b58b,0xbffbc21d1405d840,1 +np.float64,0x800ce2bf4b19c57f,0x800ce2bf4b19c57f,1 +np.float64,0xbfe8df896d31bf13,0xbff807ab38ac41ab,1 +np.float64,0x3feab83da9f5707c,0x3fe36cdd827c0eff,1 +np.float64,0x3fee717683bce2ed,0x3fe564879171719b,1 +np.float64,0x80025e5577c4bcac,0x80025e5577c4bcac,1 +np.float64,0x3fe3e5378e67ca70,0x3fdef1902c5d1efd,1 +np.float64,0x3fa014bb7c202980,0x3f9faacf9238d499,1 +np.float64,0x3fddbf5e16bb7ebc,0x3fd86e2311cb0f6d,1 +np.float64,0x3fd24e50e6a49ca0,0x3fd0198f04f82186,1 +np.float64,0x656b5214cad6b,0x656b5214cad6b,1 +np.float64,0x8b0a4bfd1614a,0x8b0a4bfd1614a,1 +np.float64,0xbfeeb6bd9e7d6d7b,0xc009b669285e319e,1 +np.float64,0x8000000000000001,0x8000000000000001,1 +np.float64,0xbfe719feceee33fe,0xbff47a4c8cbf0cca,1 +np.float64,0xbfd14fa8c8a29f52,0xbfd42f27b1aced39,1 +np.float64,0x7fec9dcb80f93b96,0x40862d5e1e70bbb9,1 +np.float64,0x7fecacb826f9596f,0x40862d6249746915,1 +np.float64,0x973459f52e68b,0x973459f52e68b,1 +np.float64,0x7f40a59e00214b3b,0x4085f194f45f82b1,1 +np.float64,0x7fc5dbaec32bb75d,0x4086201f3e7065d9,1 +np.float64,0x82d0801305a10,0x82d0801305a10,1 +np.float64,0x7fec81c0f4790381,0x40862d5643c0fc85,1 +np.float64,0xbfe2d81e9ee5b03d,0xbfec71a8e864ea40,1 +np.float64,0x6c545c9ad8a8c,0x6c545c9ad8a8c,1 +np.float64,0x3f9be95a5037d2b5,0x3f9b89b48ac8f5d8,1 +np.float64,0x8000cae9702195d4,0x8000cae9702195d4,1 +np.float64,0xbfd375f45126ebe8,0xbfd733677e54a80d,1 +np.float64,0x3fd29a5b81a534b7,0x3fd05494bf200278,1 +np.float64,0xfff0000000000000,0xfff8000000000000,1 +np.float64,0x7fca8fc195351f82,0x408621ae61aa6c13,1 +np.float64,0x1b28e2ae3651d,0x1b28e2ae3651d,1 +np.float64,0x3fe7fdbd14effb7a,0x3fe1e714884b46a8,1 +np.float64,0x3fdf1ce068be39c0,0x3fd95b054e0fad3d,1 +np.float64,0x3fe79f9a636f3f34,0x3fe1b11a40c00b3e,1 +np.float64,0x3fe60eb7036c1d6e,0x3fe0c72a02176874,1 +np.float64,0x229da17e453b5,0x229da17e453b5,1 +np.float64,0x3fc1a921b5235240,0x3fc08b3f35e47fb1,1 +np.float64,0xbb92d2af7725b,0xbb92d2af7725b,1 +np.float64,0x3fe4110cb1e8221a,0x3fdf2787de6c73f7,1 +np.float64,0xbfbc87771a390ef0,0xbfbe3f6e95622363,1 +np.float64,0xbfe74025dfee804c,0xbff4bf7b1895e697,1 +np.float64,0x964eb6592c9d7,0x964eb6592c9d7,1 +np.float64,0x3f951689b82a2d00,0x3f94dfb38d746fdf,1 +np.float64,0x800356271be6ac4f,0x800356271be6ac4f,1 +np.float64,0x7fefffffffffffff,0x40862e42fefa39ef,1 +np.float64,0xbfed5ce250fab9c5,0xc003f7ddfeb94345,1 +np.float64,0x3fec3d5dc1387abc,0x3fe43e39c02d86f4,1 +np.float64,0x3999897e73332,0x3999897e73332,1 +np.float64,0xbfdcb57744b96aee,0xbfe30c4b98f3d088,1 +np.float64,0x7f961fb0b82c3f60,0x40860f9549c3a380,1 +np.float64,0x67d6efcacfadf,0x67d6efcacfadf,1 +np.float64,0x8002c9498f859294,0x8002c9498f859294,1 +np.float64,0xbfa3033800260670,0xbfa35fe3bf43e188,1 +np.float64,0xbfeab2fc157565f8,0xbffcc413c486b4eb,1 +np.float64,0x3fe25e62f364bcc6,0x3fdd0856e19e3430,1 +np.float64,0x7fb2f42dda25e85b,0x4086196fb34a65fd,1 +np.float64,0x3fe0f1a5af61e34c,0x3fdb3235a1786efb,1 +np.float64,0x800a340ca1f4681a,0x800a340ca1f4681a,1 +np.float64,0x7c20b9def8418,0x7c20b9def8418,1 +np.float64,0xdf0842a1be109,0xdf0842a1be109,1 +np.float64,0x3fe9f22cc2f3e45a,0x3fe300359b842bf0,1 +np.float64,0x3fe389ed73e713da,0x3fde809780fe4432,1 +np.float64,0x9500fb932a020,0x9500fb932a020,1 +np.float64,0x3fd8a21ffdb14440,0x3fd4d70862345d86,1 +np.float64,0x800d99c15cbb3383,0x800d99c15cbb3383,1 +np.float64,0x3fd96c98c932d932,0x3fd568959c9b028f,1 +np.float64,0x7fc228483a24508f,0x40861ea358420976,1 +np.float64,0x7fc6737bef2ce6f7,0x408620560ffc6a98,1 +np.float64,0xbfb2c27cee2584f8,0xbfb37b8cc7774b5f,1 +np.float64,0xbfd18409f9230814,0xbfd4771d1a9a24fb,1 +np.float64,0x3fb53cb3f42a7968,0x3fb466f06f88044b,1 +np.float64,0x3fef61d0187ec3a0,0x3fe5dec8a9d13dd9,1 +np.float64,0x3fe59a6ffd2b34e0,0x3fe0820a99c6143d,1 +np.float64,0x3fce18aff43c3160,0x3fcb07c7b523f0d1,1 +np.float64,0xbfb1319a62226338,0xbfb1cc62f31b2b40,1 +np.float64,0xa00cce6d4019a,0xa00cce6d4019a,1 +np.float64,0x80068ae8e0ed15d3,0x80068ae8e0ed15d3,1 +np.float64,0x3fecef353239de6a,0x3fe49c280adc607b,1 +np.float64,0x3fdf1a7fb0be34ff,0x3fd9596bafe2d766,1 +np.float64,0x3feb5e12eeb6bc26,0x3fe3c6be3ede8d07,1 +np.float64,0x3fdeff5cd43dfeba,0x3fd947262ec96b05,1 +np.float64,0x3f995e75e832bd00,0x3f990f511f4c7f1c,1 +np.float64,0xbfeb5b3ed0b6b67e,0xbffee24fc0fc2881,1 +np.float64,0x7fb82aad0a305559,0x40861b614d901182,1 +np.float64,0xbfe5c3a4926b8749,0xbff23cd0ad144fe6,1 +np.float64,0x3fef47da373e8fb4,0x3fe5d1aaa4031993,1 +np.float64,0x7fc6a8c3872d5186,0x40862068f5ca84be,1 +np.float64,0x7fc0c2276221844e,0x40861dff2566d001,1 +np.float64,0x7fc9ce7d28339cf9,0x40862173541f84d1,1 +np.float64,0x3fce2c34933c5869,0x3fcb179428ad241d,1 +np.float64,0xbfcf864c293f0c98,0xbfd21872c4821cfc,1 +np.float64,0x3fc51fd1f82a3fa4,0x3fc38d4f1685c166,1 +np.float64,0xbfe2707b70a4e0f7,0xbfeb795fbd5bb444,1 +np.float64,0x46629b568cc54,0x46629b568cc54,1 +np.float64,0x7fe5f821f32bf043,0x40862b40c2cdea3f,1 +np.float64,0x3fedd2c9457ba592,0x3fe512ce92394526,1 +np.float64,0x7fe6dcb8ceadb971,0x40862b925a7dc05d,1 +np.float64,0x3fd1b983b4a37307,0x3fcf4ae2545cf64e,1 +np.float64,0xbfe1c93104639262,0xbfe9f7d28e4c0c82,1 +np.float64,0x995ebc2932bd8,0x995ebc2932bd8,1 +np.float64,0x800a4c3ee614987e,0x800a4c3ee614987e,1 +np.float64,0x3fbb58766e36b0f0,0x3fb9fb3b9810ec16,1 +np.float64,0xbfe36d636666dac7,0xbfede5080f69053c,1 +np.float64,0x3f4feee1003fddc2,0x3f4feae5f05443d1,1 +np.float64,0x3fed0b772ffa16ee,0x3fe4aafb924903c6,1 +np.float64,0x800bb3faef3767f6,0x800bb3faef3767f6,1 +np.float64,0x3fe285cda5e50b9c,0x3fdd3a58df06c427,1 +np.float64,0x7feb9d560bb73aab,0x40862d152362bb94,1 +np.float64,0x3fecd1f447f9a3e9,0x3fe48cc78288cb3f,1 +np.float64,0x3fca927b0c3524f6,0x3fc8250f49ba28df,1 +np.float64,0x7fcc19944e383328,0x40862221b02fcf43,1 +np.float64,0xbfd8ddf41db1bbe8,0xbfdf7b92073ff2fd,1 +np.float64,0x80006fe736e0dfcf,0x80006fe736e0dfcf,1 +np.float64,0x800bbeb66d577d6d,0x800bbeb66d577d6d,1 +np.float64,0xbfe4329353e86526,0xbfefeaf19ab92b42,1 +np.float64,0x2fad72805f5af,0x2fad72805f5af,1 +np.float64,0x3fe1b827aa637050,0x3fdc33bf46012c0d,1 +np.float64,0x3fc3f3f8e227e7f2,0x3fc28aeb86d65278,1 +np.float64,0x3fec018933780312,0x3fe41e619aa4285c,1 +np.float64,0xbfd92428e0b24852,0xbfdfeecb08d154df,1 +np.float64,0x2d7046845ae0a,0x2d7046845ae0a,1 +np.float64,0x7fde7fd2233cffa3,0x408628550f8a948f,1 +np.float64,0x8000a32cd241465a,0x8000a32cd241465a,1 +np.float64,0x8004267a45084cf5,0x8004267a45084cf5,1 +np.float64,0xbfe6b422556d6844,0xbff3c71f67661e6e,1 +np.float64,0x3fe3a37d922746fb,0x3fdea04e04d6195c,1 +np.float64,0xbfddcc54b53b98aa,0xbfe40d2389cdb848,1 +np.float64,0x3fe18b4b92a31697,0x3fdbf9e68cbf5794,1 +np.float64,0x7fc9c5b2ee338b65,0x408621709a17a47a,1 +np.float64,0x1ebd1ce03d7b,0x1ebd1ce03d7b,1 +np.float64,0x8008a6fc39d14df9,0x8008a6fc39d14df9,1 +np.float64,0x3fec11384c782270,0x3fe426bdaedd2965,1 +np.float64,0x3fefc28344ff8507,0x3fe60f75d34fc3d2,1 +np.float64,0xc35f379786be7,0xc35f379786be7,1 +np.float64,0x3feef51f4a7dea3e,0x3fe5a7b95d7786b5,1 +np.float64,0x3fec9b9f0379373e,0x3fe4702477abbb63,1 +np.float64,0x3fde94f8cdbd29f0,0x3fd8ff50f7df0a6f,1 +np.float64,0xbfed32d1cdfa65a4,0xc0037c1470f6f979,1 +np.float64,0x800d3ba44f5a7749,0x800d3ba44f5a7749,1 +np.float64,0x3fe3c56c8fe78ad9,0x3fdeca4eb9bb8918,1 +np.float64,0xbfe7c97242ef92e4,0xbff5c2950dfd6f69,1 +np.float64,0xbd9440057b288,0xbd9440057b288,1 +np.float64,0x7feb2fc111f65f81,0x40862cf524bd2001,1 +np.float64,0x800a431e2df4863d,0x800a431e2df4863d,1 +np.float64,0x80038a3b79e71478,0x80038a3b79e71478,1 +np.float64,0x80000c93d4601928,0x80000c93d4601928,1 +np.float64,0x7fe9fec022f3fd7f,0x40862c995db8ada0,1 +np.float64,0x3fead0129c35a025,0x3fe379d7a92c8f79,1 +np.float64,0x3fdd8cbaf7bb1974,0x3fd84b87ff0c26c7,1 +np.float64,0x3fe8fb7c60b1f6f9,0x3fe276d5339e7135,1 +np.float64,0x85a255e10b44b,0x85a255e10b44b,1 +np.float64,0xbfe507c23fea0f84,0xbff1212d2260022a,1 +np.float64,0x3fc5487c7b2a90f9,0x3fc3b03222d3d148,1 +np.float64,0x7fec0bdcb8f817b8,0x40862d34e8fd11e7,1 +np.float64,0xbfc5f34b4f2be698,0xbfc8146a899c7a0c,1 +np.float64,0xbfa2a49c14254940,0xbfa2fdab2eae3826,1 +np.float64,0x800ec52f15dd8a5e,0x800ec52f15dd8a5e,1 +np.float64,0xbfe3ba4b12a77496,0xbfeeab256b3e9422,1 +np.float64,0x80034d6c7ba69ada,0x80034d6c7ba69ada,1 +np.float64,0x7fd394d4202729a7,0x408624c98a216742,1 +np.float64,0xbfd4493a38289274,0xbfd865d67af2de91,1 +np.float64,0xe47d6203c8fad,0xe47d6203c8fad,1 +np.float64,0x98eb4e4b31d6a,0x98eb4e4b31d6a,1 +np.float64,0x4507fb128a100,0x4507fb128a100,1 +np.float64,0xbfc77032e42ee064,0xbfc9e36ab747a14d,1 +np.float64,0xa1f8a03b43f14,0xa1f8a03b43f14,1 +np.float64,0xbfc3d4da8527a9b4,0xbfc58c27af2476b0,1 +np.float64,0x3fc0eb7d6921d6fb,0x3fbfc858a077ed61,1 +np.float64,0x7fddb2e9403b65d2,0x4086281e98443709,1 +np.float64,0xbfa7ea62942fd4c0,0xbfa87dfd06b05d2a,1 +np.float64,0xbfe7d5c5426fab8a,0xbff5daa969c6d9e5,1 +np.float64,0x3fbf7cba0c3ef974,0x3fbdb23cd8fe875b,1 +np.float64,0x7fe92021eb324043,0x40862c53aee8b154,1 +np.float64,0x7fefbaa1827f7542,0x40862e3194737072,1 +np.float64,0x3fc6f82c402df059,0x3fc520432cbc533f,1 +np.float64,0x7fb37679a826ecf2,0x408619a5f857e27f,1 +np.float64,0x79ec1528f3d83,0x79ec1528f3d83,1 +np.float64,0x3fbefe1d0c3dfc3a,0x3fbd41650ba2c893,1 +np.float64,0x3fc3e5e11827cbc2,0x3fc27eb9b47c9c42,1 +np.float64,0x16aed1922d5db,0x16aed1922d5db,1 +np.float64,0x800124f7e58249f1,0x800124f7e58249f1,1 +np.float64,0x8004f7d12489efa3,0x8004f7d12489efa3,1 +np.float64,0x3fef80b8e27f0172,0x3fe5ee5fd43322c6,1 +np.float64,0xbfe7740c88eee819,0xbff51f823c8da14d,1 +np.float64,0xbfe6e1f1f6edc3e4,0xbff416bcb1302e7c,1 +np.float64,0x8001a2c4a7e3458a,0x8001a2c4a7e3458a,1 +np.float64,0x3fe861e155f0c3c2,0x3fe2201d3000c329,1 +np.float64,0x3fd00a101a201420,0x3fcca01087dbd728,1 +np.float64,0x7fdf0eb1133e1d61,0x4086287a327839b8,1 +np.float64,0x95e3ffdb2bc80,0x95e3ffdb2bc80,1 +np.float64,0x3fd87a1e8230f43d,0x3fd4ba1eb9be1270,1 +np.float64,0x3fedc4792afb88f2,0x3fe50b6529080f73,1 +np.float64,0x7fc9e81fa833d03e,0x4086217b428cc6ff,1 +np.float64,0xbfd21f1ba5a43e38,0xbfd54e048b988e09,1 +np.float64,0xbfbf52af5a3ea560,0xbfc0b4ab3b81fafc,1 +np.float64,0x7fe475f8e268ebf1,0x40862aaf14fee029,1 +np.float64,0x3fcf56899f3ead10,0x3fcc081de28ae9cf,1 +np.float64,0x917d407122fa8,0x917d407122fa8,1 +np.float64,0x22e23e3245c49,0x22e23e3245c49,1 +np.float64,0xbfeec2814f3d8503,0xc00a00ecca27b426,1 +np.float64,0xbfd97fee1c32ffdc,0xbfe04351dfe306ec,1 diff --git a/local_packages/numpy/_core/tests/data/umath-validation-set-log2.csv b/local_packages/numpy/_core/tests/data/umath-validation-set-log2.csv new file mode 100644 index 0000000..26921ef --- /dev/null +++ b/local_packages/numpy/_core/tests/data/umath-validation-set-log2.csv @@ -0,0 +1,1629 @@ +dtype,input,output,ulperrortol +np.float32,0x80000000,0xff800000,3 +np.float32,0x7f12870a,0x42fe63db,3 +np.float32,0x3ef29cf5,0xbf89eb12,3 +np.float32,0x3d6ba8fb,0xc083d26c,3 +np.float32,0x3d9907e8,0xc06f8230,3 +np.float32,0x4ee592,0xc2fd656e,3 +np.float32,0x58d8b1,0xc2fd0db3,3 +np.float32,0x7ba103,0xc2fc19aa,3 +np.float32,0x7f52e90e,0x42ff70e4,3 +np.float32,0x7fcb15,0xc2fc0132,3 +np.float32,0x7cb7129f,0x42f50855,3 +np.float32,0x9faba,0xc301ae59,3 +np.float32,0x7f300a,0xc2fc04b4,3 +np.float32,0x3f0bf047,0xbf5f10cb,3 +np.float32,0x2fb1fb,0xc2fed934,3 +np.float32,0x3eedb0d1,0xbf8db417,3 +np.float32,0x3d7a0b40,0xc0811638,3 +np.float32,0x2e0bac,0xc2fef334,3 +np.float32,0x6278c1,0xc2fcc1b9,3 +np.float32,0x7f61ab2e,0x42ffa2d9,3 +np.float32,0x8fe7c,0xc301d4be,3 +np.float32,0x3f25e6ee,0xbf203536,3 +np.float32,0x7efc78f0,0x42fdf5c0,3 +np.float32,0x6d7304,0xc2fc73a7,3 +np.float32,0x7f1a472a,0x42fe89ed,3 +np.float32,0x7dd029a6,0x42f96734,3 +np.float32,0x3e9b9327,0xbfdbf8f7,3 +np.float32,0x3f4eefc1,0xbe9d2942,3 +np.float32,0x7f5b9b64,0x42ff8ebc,3 +np.float32,0x3e458ee1,0xc017ed6e,3 +np.float32,0x3f7b766b,0xbcd35acf,3 +np.float32,0x3e616070,0xc00bc378,3 +np.float32,0x7f20e633,0x42fea8f8,3 +np.float32,0x3ee3b461,0xbf95a126,3 +np.float32,0x7e7722ba,0x42fbe5f8,3 +np.float32,0x3f0873d7,0xbf6861fa,3 +np.float32,0x7b4cb2,0xc2fc1ba3,3 +np.float32,0x3f0b6b02,0xbf60712e,3 +np.float32,0x9bff4,0xc301b6f2,3 +np.float32,0x3f07be25,0xbf6a4f0c,3 +np.float32,0x3ef10e57,0xbf8b1b75,3 +np.float32,0x46ad75,0xc2fdb6b1,3 +np.float32,0x3f7bc542,0xbcc4e3a9,3 +np.float32,0x3f6673d4,0xbe1b509c,3 +np.float32,0x7f19fe59,0x42fe8890,3 +np.float32,0x7f800000,0x7f800000,3 +np.float32,0x7f2fe696,0x42feead0,3 +np.float32,0x3dc9432d,0xc0563655,3 +np.float32,0x3ee47623,0xbf950446,3 +np.float32,0x3f1f8817,0xbf2eab51,3 +np.float32,0x7f220ec5,0x42feae44,3 +np.float32,0x2325e3,0xc2ffbab1,3 +np.float32,0x29dfc8,0xc2ff395a,3 +np.float32,0x7f524950,0x42ff6eb3,3 +np.float32,0x3e2234e0,0xc02a21c8,3 +np.float32,0x7f1c6f5a,0x42fe942f,3 +np.float32,0x3b6a61,0xc2fe36e7,3 +np.float32,0x3f1df90e,0xbf324ba9,3 +np.float32,0xb57f0,0xc3017f07,3 +np.float32,0x7d0eba,0xc2fc112e,3 +np.float32,0x403aa9,0xc2fdfd5c,3 +np.float32,0x3e74ecc7,0xc004155f,3 +np.float32,0x17509c,0xc30074f2,3 +np.float32,0x7f62196b,0x42ffa442,3 +np.float32,0x3ecef9a9,0xbfa7417a,3 +np.float32,0x7f14b158,0x42fe6eb1,3 +np.float32,0x3ede12be,0xbf9a40fe,3 +np.float32,0x42cfaa,0xc2fde03f,3 +np.float32,0x3f407b0f,0xbed2a6f5,3 +np.float32,0x7f7fffff,0x43000000,3 +np.float32,0x5467c6,0xc2fd3394,3 +np.float32,0x7ea6b80f,0x42fcc336,3 +np.float32,0x3f21e7b2,0xbf293704,3 +np.float32,0x3dc7e9eb,0xc056d542,3 +np.float32,0x7f3e6e67,0x42ff2571,3 +np.float32,0x3e3e809d,0xc01b4911,3 +np.float32,0x3f800000,0x0,3 +np.float32,0x3d8fd238,0xc0753d52,3 +np.float32,0x3f74aa65,0xbd85cd0e,3 +np.float32,0x7ec30305,0x42fd36ff,3 +np.float32,0x3e97bb93,0xbfe0971d,3 +np.float32,0x3e109d9c,0xc034bb1b,3 +np.float32,0x3f4a0b67,0xbeaed537,3 +np.float32,0x3f25a7aa,0xbf20c228,3 +np.float32,0x3ebc05eb,0xbfb8fd6b,3 +np.float32,0x3eebe749,0xbf8f18e5,3 +np.float32,0x3e9dc479,0xbfd96356,3 +np.float32,0x7f245200,0x42feb882,3 +np.float32,0x1573a8,0xc30093b5,3 +np.float32,0x3e66c4b9,0xc00994a6,3 +np.float32,0x3e73bffc,0xc0048709,3 +np.float32,0x3dfef8e5,0xc0405f16,3 +np.float32,0x403750,0xc2fdfd83,3 +np.float32,0x3ebedf17,0xbfb636a4,3 +np.float32,0x15cae6,0xc3008de2,3 +np.float32,0x3edf4d4e,0xbf993c24,3 +np.float32,0x3f7cc41e,0xbc963fb3,3 +np.float32,0x3e9e12a4,0xbfd907ee,3 +np.float32,0x7ded7b59,0x42f9c889,3 +np.float32,0x7f034878,0x42fe12b5,3 +np.float32,0x7ddce43f,0x42f9930b,3 +np.float32,0x3d82b257,0xc07e1333,3 +np.float32,0x3dae89c1,0xc0635dd4,3 +np.float32,0x6b1d00,0xc2fc8396,3 +np.float32,0x449a5a,0xc2fdccb3,3 +np.float32,0x4e89d2,0xc2fd68cb,3 +np.float32,0x7e1ae83f,0x42fa8cef,3 +np.float32,0x7e4bb22c,0x42fb572e,3 +np.float32,0x3de308ea,0xc04b1634,3 +np.float32,0x7f238c7a,0x42feb508,3 +np.float32,0x3f6c62a3,0xbdeb86f3,3 +np.float32,0x3e58cba6,0xc00f5908,3 +np.float32,0x7f7dd91f,0x42fff9c4,3 +np.float32,0x3d989376,0xc06fc88d,3 +np.float32,0x3dd013c5,0xc0532339,3 +np.float32,0x4b17e6,0xc2fd89ed,3 +np.float32,0x7f67f287,0x42ffb71e,3 +np.float32,0x3f69365e,0xbe09ba3c,3 +np.float32,0x3e4b8b21,0xc0152bf1,3 +np.float32,0x3a75b,0xc3032171,3 +np.float32,0x7f303676,0x42feec1f,3 +np.float32,0x7f6570e5,0x42ffaf18,3 +np.float32,0x3f5ed61e,0xbe4cf676,3 +np.float32,0x3e9b22f9,0xbfdc7e4f,3 +np.float32,0x2c095e,0xc2ff1428,3 +np.float32,0x3f1b17c1,0xbf391754,3 +np.float32,0x422dc6,0xc2fde746,3 +np.float32,0x3f677c8d,0xbe14b365,3 +np.float32,0x3ef85d0c,0xbf8597a9,3 +np.float32,0x3ecaaa6b,0xbfab2430,3 +np.float32,0x3f0607d1,0xbf6eff3d,3 +np.float32,0x3f011fdb,0xbf7cc50d,3 +np.float32,0x6ed7c1,0xc2fc6a4e,3 +np.float32,0x7ec2d1a2,0x42fd3644,3 +np.float32,0x3f75b7fe,0xbd7238a2,3 +np.float32,0x3ef2d146,0xbf89c344,3 +np.float32,0x7ec2cd27,0x42fd3633,3 +np.float32,0x7ee1e55a,0x42fda397,3 +np.float32,0x7f464d6a,0x42ff435c,3 +np.float32,0x7f469a93,0x42ff447b,3 +np.float32,0x7ece752f,0x42fd6121,3 +np.float32,0x2ed878,0xc2fee67b,3 +np.float32,0x75b23,0xc3021eff,3 +np.float32,0x3e0f4be4,0xc03593b8,3 +np.float32,0x2778e1,0xc2ff64fc,3 +np.float32,0x5fe2b7,0xc2fcd561,3 +np.float32,0x19b8a9,0xc30050ab,3 +np.float32,0x7df303e5,0x42f9d98d,3 +np.float32,0x608b8d,0xc2fcd051,3 +np.float32,0x588f46,0xc2fd1017,3 +np.float32,0x3eec6a11,0xbf8eb2a1,3 +np.float32,0x3f714121,0xbdaf4906,3 +np.float32,0x7f4f7b9e,0x42ff64c9,3 +np.float32,0x3c271606,0xc0d3b29c,3 +np.float32,0x3f002fe0,0xbf7f75f6,3 +np.float32,0x7efa4798,0x42fdef4f,3 +np.float32,0x3f61a865,0xbe3a601a,3 +np.float32,0x7e8087aa,0x42fc030d,3 +np.float32,0x3f70f0c7,0xbdb321ba,3 +np.float32,0x5db898,0xc2fce63f,3 +np.float32,0x7a965f,0xc2fc1fea,3 +np.float32,0x7f68b112,0x42ffb97c,3 +np.float32,0x7ef0ed3d,0x42fdd32d,3 +np.float32,0x7f3156a1,0x42fef0d3,3 +np.float32,0x3f1d405f,0xbf33fc6e,3 +np.float32,0x3e3494cf,0xc0203945,3 +np.float32,0x6018de,0xc2fcd3c1,3 +np.float32,0x623e49,0xc2fcc370,3 +np.float32,0x3ea29f0f,0xbfd3cad4,3 +np.float32,0xa514,0xc305a20c,3 +np.float32,0x3e1b2ab1,0xc02e3a8f,3 +np.float32,0x3f450b6f,0xbec1578f,3 +np.float32,0x7eb12908,0x42fcf015,3 +np.float32,0x3f10b720,0xbf52ab48,3 +np.float32,0x3e0a93,0xc2fe16f6,3 +np.float32,0x93845,0xc301cb96,3 +np.float32,0x7f4e9ce3,0x42ff61af,3 +np.float32,0x3f6d4296,0xbde09ceb,3 +np.float32,0x6ddede,0xc2fc70d0,3 +np.float32,0x3f4fb6fd,0xbe9a636d,3 +np.float32,0x3f6d08de,0xbde36c0b,3 +np.float32,0x3f56f057,0xbe8122ad,3 +np.float32,0x334e95,0xc2fea349,3 +np.float32,0x7efadbcd,0x42fdf104,3 +np.float32,0x3db02e88,0xc0628046,3 +np.float32,0x3f3309d1,0xbf041066,3 +np.float32,0x2d8722,0xc2fefb8f,3 +np.float32,0x7e926cac,0x42fc6356,3 +np.float32,0x3e3674ab,0xc01f452e,3 +np.float32,0x1b46ce,0xc3003afc,3 +np.float32,0x3f06a338,0xbf6d53fc,3 +np.float32,0x1b1ba7,0xc3003d46,3 +np.float32,0x319dfb,0xc2febc06,3 +np.float32,0x3e2f126a,0xc02315a5,3 +np.float32,0x3f40fe65,0xbed0af9e,3 +np.float32,0x3f1d842f,0xbf335d4b,3 +np.float32,0x3d044e4f,0xc09e78f8,3 +np.float32,0x7f272674,0x42fec51f,3 +np.float32,0x3cda6d8f,0xc0a753db,3 +np.float32,0x3eb92f12,0xbfbbccbb,3 +np.float32,0x7e4318f4,0x42fb3752,3 +np.float32,0x3c5890,0xc2fe2b6d,3 +np.float32,0x3d1993c9,0xc09796f8,3 +np.float32,0x7f18ef24,0x42fe8377,3 +np.float32,0x3e30c3a0,0xc0223244,3 +np.float32,0x3f27cd27,0xbf1c00ef,3 +np.float32,0x3f150957,0xbf47cd6c,3 +np.float32,0x7e7178a3,0x42fbd4d8,3 +np.float32,0x3f298db8,0xbf182ac3,3 +np.float32,0x7cb3be,0xc2fc1348,3 +np.float32,0x3ef64266,0xbf8729de,3 +np.float32,0x3eeb06ce,0xbf8fc8f2,3 +np.float32,0x3f406e36,0xbed2d845,3 +np.float32,0x7f1e1bd3,0x42fe9c0b,3 +np.float32,0x478dcc,0xc2fdad97,3 +np.float32,0x7f7937b5,0x42ffec2b,3 +np.float32,0x3f20f350,0xbf2b6624,3 +np.float32,0x7f13661a,0x42fe683c,3 +np.float32,0x208177,0xc2fff46b,3 +np.float32,0x263cfb,0xc2ff7c72,3 +np.float32,0x7f0bd28c,0x42fe4141,3 +np.float32,0x7230d8,0xc2fc5453,3 +np.float32,0x3f261bbf,0xbf1fbfb4,3 +np.float32,0x737b56,0xc2fc4c05,3 +np.float32,0x3ef88f33,0xbf857263,3 +np.float32,0x7e036464,0x42fa1352,3 +np.float32,0x4b5c4f,0xc2fd874d,3 +np.float32,0x3f77984d,0xbd454596,3 +np.float32,0x3f674202,0xbe162932,3 +np.float32,0x3e7157d9,0xc0057197,3 +np.float32,0x3f3f21da,0xbed7d861,3 +np.float32,0x7f1fb40f,0x42fea375,3 +np.float32,0x7ef0157f,0x42fdd096,3 +np.float32,0x3f71e88d,0xbda74962,3 +np.float32,0x3f174855,0xbf424728,3 +np.float32,0x3f3fdd2c,0xbed505d5,3 +np.float32,0x7b95d1,0xc2fc19ed,3 +np.float32,0x7f23f4e5,0x42feb6df,3 +np.float32,0x7d741925,0x42f7dcd6,3 +np.float32,0x60f81d,0xc2fccd14,3 +np.float32,0x3f17d267,0xbf40f6ae,3 +np.float32,0x3f036fc8,0xbf7636f8,3 +np.float32,0x167653,0xc30082b5,3 +np.float32,0x256d05,0xc2ff8c4f,3 +np.float32,0x3eccc63d,0xbfa93adb,3 +np.float32,0x7f6c91ea,0x42ffc5b2,3 +np.float32,0x2ee52a,0xc2fee5b3,3 +np.float32,0x3dc3579e,0xc058f80d,3 +np.float32,0x4c7170,0xc2fd7cc4,3 +np.float32,0x7f737f20,0x42ffdb03,3 +np.float32,0x3f2f9dbf,0xbf0b3119,3 +np.float32,0x3f4d0c54,0xbea3eec5,3 +np.float32,0x7e380862,0x42fb0c32,3 +np.float32,0x5d637f,0xc2fce8df,3 +np.float32,0x3f0aa623,0xbf627c27,3 +np.float32,0x3e4d5896,0xc0145b88,3 +np.float32,0x3f6cacdc,0xbde7e7ca,3 +np.float32,0x63a2c3,0xc2fcb90a,3 +np.float32,0x6c138c,0xc2fc7cfa,3 +np.float32,0x2063c,0xc303fb88,3 +np.float32,0x7e9e5a3e,0x42fc9d2f,3 +np.float32,0x56ec64,0xc2fd1ddd,3 +np.float32,0x7f1d6a35,0x42fe98cc,3 +np.float32,0x73dc96,0xc2fc4998,3 +np.float32,0x3e5d74e5,0xc00d6238,3 +np.float32,0x7f033cbb,0x42fe1273,3 +np.float32,0x3f5143fc,0xbe94e4e7,3 +np.float32,0x1d56d9,0xc3002010,3 +np.float32,0x2bf3e4,0xc2ff1591,3 +np.float32,0x3f2a6ef1,0xbf164170,3 +np.float32,0x3f33238b,0xbf03db58,3 +np.float32,0x22780e,0xc2ffc91a,3 +np.float32,0x7f00b873,0x42fe0425,3 +np.float32,0x3f7f6145,0xbb654706,3 +np.float32,0x7fc00000,0x7fc00000,3 +np.float32,0x63895a,0xc2fcb9c7,3 +np.float32,0x18a1b2,0xc30060a8,3 +np.float32,0x7e43c6a6,0x42fb39e3,3 +np.float32,0x78676e,0xc2fc2d30,3 +np.float32,0x3f16d839,0xbf435940,3 +np.float32,0x7eff78ba,0x42fdfe79,3 +np.float32,0x3f2e152c,0xbf0e6e54,3 +np.float32,0x3db20ced,0xc06186e1,3 +np.float32,0x3f0cd1d8,0xbf5cbf57,3 +np.float32,0x3fd7a8,0xc2fe01d2,3 +np.float32,0x3ebb075e,0xbfb9f816,3 +np.float32,0x7f94ef,0xc2fc026b,3 +np.float32,0x3d80ba0e,0xc07f7a2b,3 +np.float32,0x7f227e15,0x42feb03f,3 +np.float32,0x792264bf,0x42e6afcc,3 +np.float32,0x7f501576,0x42ff66ec,3 +np.float32,0x223629,0xc2ffcea3,3 +np.float32,0x40a79e,0xc2fdf87b,3 +np.float32,0x449483,0xc2fdccf2,3 +np.float32,0x3f4fa978,0xbe9a9382,3 +np.float32,0x7f148c53,0x42fe6df9,3 +np.float32,0x3ec98b3c,0xbfac2a98,3 +np.float32,0x3e4da320,0xc0143a0a,3 +np.float32,0x3d1d94bb,0xc09666d0,3 +np.float32,0x3c8e624e,0xc0bb155b,3 +np.float32,0x66a9af,0xc2fca2ef,3 +np.float32,0x3ec76ed7,0xbfae1c57,3 +np.float32,0x3f4b52f3,0xbeaa2b81,3 +np.float32,0x7e99bbb5,0x42fc8750,3 +np.float32,0x3f69a46b,0xbe0701be,3 +np.float32,0x3f775400,0xbd4ba495,3 +np.float32,0x131e56,0xc300be3c,3 +np.float32,0x3f30abb4,0xbf08fb10,3 +np.float32,0x7f7e528c,0x42fffb25,3 +np.float32,0x3eb89515,0xbfbc668a,3 +np.float32,0x7e9191b6,0x42fc5f02,3 +np.float32,0x7e80c7e9,0x42fc047e,3 +np.float32,0x3f77ef58,0xbd3d2995,3 +np.float32,0x7ddb1f8a,0x42f98d1b,3 +np.float32,0x7ebc6c4f,0x42fd1d9c,3 +np.float32,0x3f6638e0,0xbe1ccab8,3 +np.float32,0x7f4c45,0xc2fc0410,3 +np.float32,0x3e7d8aad,0xc000e414,3 +np.float32,0x3f4d148b,0xbea3d12e,3 +np.float32,0x3e98c45c,0xbfdf55f4,3 +np.float32,0x3d754c78,0xc081f8a9,3 +np.float32,0x17e4cf,0xc3006be3,3 +np.float32,0x7eb65814,0x42fd0563,3 +np.float32,0x3f65e0d8,0xbe1f0008,3 +np.float32,0x3e99541f,0xbfdea87e,3 +np.float32,0x3f3cb80e,0xbee13b27,3 +np.float32,0x3e99f0c0,0xbfddec3b,3 +np.float32,0x3f43903e,0xbec6ea66,3 +np.float32,0x7e211cd4,0x42faa9f2,3 +np.float32,0x824af,0xc301f971,3 +np.float32,0x3e16a56e,0xc030f56c,3 +np.float32,0x542b3b,0xc2fd35a6,3 +np.float32,0x3eeea2d1,0xbf8cf873,3 +np.float32,0x232e93,0xc2ffb9fa,3 +np.float32,0x3e8c52b9,0xbfef06aa,3 +np.float32,0x7f69c7e3,0x42ffbcef,3 +np.float32,0x3f573e43,0xbe801714,3 +np.float32,0x43b009,0xc2fdd69f,3 +np.float32,0x3ee571ab,0xbf943966,3 +np.float32,0x3ee3d5d8,0xbf958604,3 +np.float32,0x338b12,0xc2fe9fe4,3 +np.float32,0x29cb1f,0xc2ff3ac6,3 +np.float32,0x3f0892b4,0xbf680e7a,3 +np.float32,0x3e8c4f7f,0xbfef0ae9,3 +np.float32,0x7c9d3963,0x42f497e6,3 +np.float32,0x3f26ba84,0xbf1e5f59,3 +np.float32,0x3dd0acc0,0xc052df6f,3 +np.float32,0x3e43fbda,0xc018aa8c,3 +np.float32,0x3ec4fd0f,0xbfb0635d,3 +np.float32,0x3f52c8c6,0xbe8f8d85,3 +np.float32,0x3f5fdc5d,0xbe462fdb,3 +np.float32,0x3f461920,0xbebd6743,3 +np.float32,0x6161ff,0xc2fcc9ef,3 +np.float32,0x7f7ed306,0x42fffc9a,3 +np.float32,0x3d212263,0xc0955f46,3 +np.float32,0x3eca5826,0xbfab6f36,3 +np.float32,0x7d6317ac,0x42f7a77e,3 +np.float32,0x3eb02063,0xbfc50f60,3 +np.float32,0x7f71a6f8,0x42ffd565,3 +np.float32,0x1a3efe,0xc3004935,3 +np.float32,0x3dc599c9,0xc057e856,3 +np.float32,0x3f3e1301,0xbedbf205,3 +np.float32,0xf17d4,0xc301158d,3 +np.float32,0x3f615f84,0xbe3c3d85,3 +np.float32,0x3de63be1,0xc049cb77,3 +np.float32,0x3e8d2f51,0xbfede541,3 +np.float32,0x3a5cdd,0xc2fe441c,3 +np.float32,0x3f443ec0,0xbec4586a,3 +np.float32,0x3eacbd00,0xbfc8a5ad,3 +np.float32,0x3f600f6a,0xbe44df1b,3 +np.float32,0x5f77a6,0xc2fcd89c,3 +np.float32,0x476706,0xc2fdaf28,3 +np.float32,0x2f469,0xc3036fde,3 +np.float32,0x7dc4ba24,0x42f93d77,3 +np.float32,0x3e2d6080,0xc023fb9b,3 +np.float32,0x7e8d7135,0x42fc49c3,3 +np.float32,0x3f589065,0xbe77247b,3 +np.float32,0x3f59e210,0xbe6e2c05,3 +np.float32,0x7f51d388,0x42ff6d15,3 +np.float32,0x7d9a5fda,0x42f88a63,3 +np.float32,0x3e67d5bc,0xc00927ab,3 +np.float32,0x61d72c,0xc2fcc679,3 +np.float32,0x3ef3351d,0xbf897766,3 +np.float32,0x1,0xc3150000,3 +np.float32,0x7f653429,0x42ffae54,3 +np.float32,0x7e1ad3e5,0x42fa8c8e,3 +np.float32,0x3f4ca01d,0xbea57500,3 +np.float32,0x3f7606db,0xbd6ad13e,3 +np.float32,0x7ec4a27d,0x42fd3d1f,3 +np.float32,0x3efe4fd5,0xbf8138c7,3 +np.float32,0x77c2f1,0xc2fc3124,3 +np.float32,0x7e4d3251,0x42fb5c9a,3 +np.float32,0x3f543ac7,0xbe8a8154,3 +np.float32,0x7c3dbe29,0x42f322c4,3 +np.float32,0x408e01,0xc2fdf9a0,3 +np.float32,0x45069b,0xc2fdc829,3 +np.float32,0x3d7ecab7,0xc08037e8,3 +np.float32,0xf8c22,0xc3010a99,3 +np.float32,0x7f69af63,0x42ffbca2,3 +np.float32,0x7ec7d228,0x42fd48fe,3 +np.float32,0xff800000,0xffc00000,3 +np.float32,0xdd7c5,0xc301357c,3 +np.float32,0x143f38,0xc300a90e,3 +np.float32,0x7e65c176,0x42fbb01b,3 +np.float32,0x2c1a9e,0xc2ff1307,3 +np.float32,0x7f6e9224,0x42ffcbeb,3 +np.float32,0x3d32ab39,0xc0909a77,3 +np.float32,0x3e150b42,0xc031f22b,3 +np.float32,0x1f84b4,0xc300059a,3 +np.float32,0x3f71ce21,0xbda88c2a,3 +np.float32,0x2625c4,0xc2ff7e33,3 +np.float32,0x3dd0b293,0xc052dcdc,3 +np.float32,0x625c11,0xc2fcc290,3 +np.float32,0x3f610297,0xbe3e9f24,3 +np.float32,0x7ebdd5e5,0x42fd2320,3 +np.float32,0x3e883458,0xbff486ff,3 +np.float32,0x782313,0xc2fc2ed4,3 +np.float32,0x7f39c843,0x42ff132f,3 +np.float32,0x7f326aa7,0x42fef54d,3 +np.float32,0x4d2c71,0xc2fd75be,3 +np.float32,0x3f55747c,0xbe86409e,3 +np.float32,0x7f7f0867,0x42fffd34,3 +np.float32,0x321316,0xc2feb53f,3 +np.float32,0x3e1b37ed,0xc02e32b0,3 +np.float32,0x80edf,0xc301fd54,3 +np.float32,0x3f0b08ad,0xbf617607,3 +np.float32,0x7f3f4174,0x42ff28a2,3 +np.float32,0x3d79306d,0xc0813eb0,3 +np.float32,0x3f5f657a,0xbe49413d,3 +np.float32,0x3f56c63a,0xbe81b376,3 +np.float32,0x7f667123,0x42ffb24f,3 +np.float32,0x3f71021b,0xbdb24d43,3 +np.float32,0x7f434ab1,0x42ff380f,3 +np.float32,0x3dcae496,0xc055779c,3 +np.float32,0x3f5a7d88,0xbe6a0f5b,3 +np.float32,0x3cdf5c32,0xc0a64bf5,3 +np.float32,0x3e56222c,0xc0107d11,3 +np.float32,0x561a3a,0xc2fd24df,3 +np.float32,0x7ddd953c,0x42f9955a,3 +np.float32,0x7e35d839,0x42fb035c,3 +np.float32,0x3ec1816c,0xbfb3aeb2,3 +np.float32,0x7c87cfcd,0x42f42bc2,3 +np.float32,0xd9cd,0xc3053baf,3 +np.float32,0x3f388234,0xbef1e5b7,3 +np.float32,0x3edfcaca,0xbf98d47b,3 +np.float32,0x3ef28852,0xbf89fac8,3 +np.float32,0x7f7525df,0x42ffe001,3 +np.float32,0x7f6c33ef,0x42ffc48c,3 +np.float32,0x3ea4a881,0xbfd17e61,3 +np.float32,0x3f3e379f,0xbedb63c6,3 +np.float32,0x3f0524c1,0xbf717301,3 +np.float32,0x3db3e7f0,0xc06091d3,3 +np.float32,0x800000,0xc2fc0000,3 +np.float32,0x3f2f2897,0xbf0c27ce,3 +np.float32,0x7eb1776d,0x42fcf15c,3 +np.float32,0x3f039018,0xbf75dc37,3 +np.float32,0x3c4055,0xc2fe2c96,3 +np.float32,0x3f603653,0xbe43dea5,3 +np.float32,0x7f700d24,0x42ffd07c,3 +np.float32,0x3f4741a3,0xbeb918dc,3 +np.float32,0x3f5fe959,0xbe45da2d,3 +np.float32,0x3f3e4401,0xbedb33b1,3 +np.float32,0x7f0705ff,0x42fe2775,3 +np.float32,0x3ea85662,0xbfcd69b0,3 +np.float32,0x3f15f49f,0xbf458829,3 +np.float32,0x3f17c50e,0xbf411728,3 +np.float32,0x3e483f60,0xc016add2,3 +np.float32,0x3f1ab9e5,0xbf39f71b,3 +np.float32,0x3de0b6fb,0xc04c08fe,3 +np.float32,0x7e671225,0x42fbb452,3 +np.float32,0x80800000,0xffc00000,3 +np.float32,0xe2df3,0xc3012c9d,3 +np.float32,0x3ede1e3c,0xbf9a3770,3 +np.float32,0x3df2ffde,0xc044cfec,3 +np.float32,0x3eed8da5,0xbf8dcf6c,3 +np.float32,0x3ead15c3,0xbfc846e1,3 +np.float32,0x7ef3750a,0x42fddae4,3 +np.float32,0x7e6ab7c0,0x42fbbfe4,3 +np.float32,0x7ea4bbe5,0x42fcba5d,3 +np.float32,0x3f227706,0xbf27f0a1,3 +np.float32,0x3ef39bfd,0xbf89295a,3 +np.float32,0x3f289a20,0xbf1a3edd,3 +np.float32,0x7f225f82,0x42feafb4,3 +np.float32,0x768963,0xc2fc38bc,3 +np.float32,0x3f493c00,0xbeb1ccfc,3 +np.float32,0x3f4e7249,0xbe9ee9a7,3 +np.float32,0x1d0c3a,0xc30023c0,3 +np.float32,0x7f3c5f78,0x42ff1d6a,3 +np.float32,0xff7fffff,0xffc00000,3 +np.float32,0x3ee7896a,0xbf928c2a,3 +np.float32,0x3e788479,0xc002bd2e,3 +np.float32,0x3ee4df17,0xbf94af84,3 +np.float32,0x5e06d7,0xc2fce3d7,3 +np.float32,0x3d7b2776,0xc080e1dc,3 +np.float32,0x3e3d39d3,0xc01be7fd,3 +np.float32,0x7c81dece,0x42f40ab7,3 +np.float32,0x3f7d2085,0xbc856255,3 +np.float32,0x7f7f6627,0x42fffe44,3 +np.float32,0x7f5f2e94,0x42ff9aaa,3 +np.float32,0x7f5835f2,0x42ff8339,3 +np.float32,0x3f6a0e32,0xbe046580,3 +np.float32,0x7e16f586,0x42fa79dd,3 +np.float32,0x3f04a2f2,0xbf72dbc5,3 +np.float32,0x3f35e334,0xbefc7740,3 +np.float32,0x3f0d056e,0xbf5c3824,3 +np.float32,0x7ebeb95e,0x42fd2693,3 +np.float32,0x3c6192,0xc2fe2aff,3 +np.float32,0x3e892b4f,0xbff33958,3 +np.float32,0x3f61d694,0xbe3931df,3 +np.float32,0x29d183,0xc2ff3a56,3 +np.float32,0x7f0b0598,0x42fe3d04,3 +np.float32,0x7f743b28,0x42ffdd3d,3 +np.float32,0x3a2ed6,0xc2fe4663,3 +np.float32,0x3e27403a,0xc0274de8,3 +np.float32,0x3f58ee78,0xbe74a349,3 +np.float32,0x3eaa4b,0xc2fe0f92,3 +np.float32,0x3ecb613b,0xbfaa7de8,3 +np.float32,0x7f637d81,0x42ffa8c9,3 +np.float32,0x3f026e96,0xbf790c73,3 +np.float32,0x386cdf,0xc2fe5d0c,3 +np.float32,0x35abd1,0xc2fe8202,3 +np.float32,0x3eac3cd1,0xbfc92ee8,3 +np.float32,0x3f567869,0xbe82bf47,3 +np.float32,0x3f65c643,0xbe1faae6,3 +np.float32,0x7f5422b9,0x42ff752b,3 +np.float32,0x7c26e9,0xc2fc168c,3 +np.float32,0x7eff5cfd,0x42fdfe29,3 +np.float32,0x3f728e7f,0xbd9f6142,3 +np.float32,0x3f10fd43,0xbf51f874,3 +np.float32,0x7e7ada08,0x42fbf0fe,3 +np.float32,0x3e82a611,0xbffc37be,3 +np.float32,0xbf800000,0xffc00000,3 +np.float32,0x3dbe2e12,0xc05b711c,3 +np.float32,0x7e768fa9,0x42fbe440,3 +np.float32,0x5e44e8,0xc2fce1f0,3 +np.float32,0x7f25071a,0x42febbae,3 +np.float32,0x3f54db5e,0xbe885339,3 +np.float32,0x3f0f2c26,0xbf56a0b8,3 +np.float32,0x22f9a7,0xc2ffbe55,3 +np.float32,0x7ed63dcb,0x42fd7c77,3 +np.float32,0x7ea4fae2,0x42fcbb78,3 +np.float32,0x3f1d7766,0xbf337b47,3 +np.float32,0x7f16d59f,0x42fe7941,3 +np.float32,0x3f3a1bb6,0xbeeb855c,3 +np.float32,0x3ef57128,0xbf87c709,3 +np.float32,0xb24ff,0xc3018591,3 +np.float32,0x3ef99e27,0xbf84a983,3 +np.float32,0x3eac2ccf,0xbfc94013,3 +np.float32,0x3e9d3e1e,0xbfda00dc,3 +np.float32,0x718213,0xc2fc58c1,3 +np.float32,0x7edbf509,0x42fd8fea,3 +np.float32,0x70c7f1,0xc2fc5d80,3 +np.float32,0x3f7012f5,0xbdbdc6cd,3 +np.float32,0x12cba,0xc304c487,3 +np.float32,0x7f5d445d,0x42ff944c,3 +np.float32,0x7f3e30bd,0x42ff2481,3 +np.float32,0x63b110,0xc2fcb8a0,3 +np.float32,0x3f39f728,0xbeec1680,3 +np.float32,0x3f5bea58,0xbe6074b1,3 +np.float32,0x3f350749,0xbefff679,3 +np.float32,0x3e91ab2c,0xbfe81f3e,3 +np.float32,0x7ec53fe0,0x42fd3f6d,3 +np.float32,0x3f6cbbdc,0xbde72c8e,3 +np.float32,0x3f4df49f,0xbea0abcf,3 +np.float32,0x3e9c9638,0xbfdac674,3 +np.float32,0x7f3b82ec,0x42ff1a07,3 +np.float32,0x7f612a09,0x42ffa132,3 +np.float32,0x7ea26650,0x42fcafd3,3 +np.float32,0x3a615138,0xc122f26d,3 +np.float32,0x3f1108bd,0xbf51db39,3 +np.float32,0x6f80f6,0xc2fc65ea,3 +np.float32,0x3f7cb578,0xbc98ecb1,3 +np.float32,0x7f54d31a,0x42ff7790,3 +np.float32,0x196868,0xc3005532,3 +np.float32,0x3f01ee0a,0xbf7a7925,3 +np.float32,0x3e184013,0xc02ffb11,3 +np.float32,0xadde3,0xc3018ee3,3 +np.float32,0x252a91,0xc2ff9173,3 +np.float32,0x3f0382c2,0xbf7601a9,3 +np.float32,0x6d818c,0xc2fc7345,3 +np.float32,0x3bfbfd,0xc2fe2fdd,3 +np.float32,0x7f3cad19,0x42ff1e9a,3 +np.float32,0x4169a7,0xc2fdefdf,3 +np.float32,0x3f615d96,0xbe3c4a2b,3 +np.float32,0x3f036480,0xbf7656ac,3 +np.float32,0x7f5fbda3,0x42ff9c83,3 +np.float32,0x3d202d,0xc2fe21f1,3 +np.float32,0x3d0f5e5d,0xc09ac3e9,3 +np.float32,0x3f0fff6e,0xbf548142,3 +np.float32,0x7f11ed32,0x42fe60d2,3 +np.float32,0x3e6f856b,0xc00624b6,3 +np.float32,0x7f7c4dd7,0x42fff542,3 +np.float32,0x3e76fb86,0xc0034fa0,3 +np.float32,0x3e8a0d6e,0xbff209e7,3 +np.float32,0x3eacad19,0xbfc8b6ad,3 +np.float32,0xa7776,0xc3019cbe,3 +np.float32,0x3dc84d74,0xc056a754,3 +np.float32,0x3efb8052,0xbf834626,3 +np.float32,0x3f0e55fc,0xbf58cacc,3 +np.float32,0x7e0e71e3,0x42fa4efb,3 +np.float32,0x3ed5a800,0xbfa1639c,3 +np.float32,0x3f33335b,0xbf03babf,3 +np.float32,0x38cad7,0xc2fe5842,3 +np.float32,0x3bc21256,0xc0ecc927,3 +np.float32,0x3f09522d,0xbf660a19,3 +np.float32,0xcbd5d,0xc3015428,3 +np.float32,0x492752,0xc2fd9d42,3 +np.float32,0x3f2b9b32,0xbf13b904,3 +np.float32,0x6544ac,0xc2fcad09,3 +np.float32,0x52eb12,0xc2fd40b5,3 +np.float32,0x3f66a7c0,0xbe1a03e8,3 +np.float32,0x7ab289,0xc2fc1f41,3 +np.float32,0x62af5e,0xc2fcc020,3 +np.float32,0x7f73e9cf,0x42ffdc46,3 +np.float32,0x3e5eca,0xc2fe130e,3 +np.float32,0x3e3a10f4,0xc01d7602,3 +np.float32,0x3f04db46,0xbf723f0d,3 +np.float32,0x18fc4a,0xc3005b63,3 +np.float32,0x525bcb,0xc2fd45b6,3 +np.float32,0x3f6b9108,0xbdf5c769,3 +np.float32,0x3e992e8c,0xbfded5c5,3 +np.float32,0x7efea647,0x42fdfc18,3 +np.float32,0x7e8371db,0x42fc139e,3 +np.float32,0x3f397cfb,0xbeedfc69,3 +np.float32,0x7e46d233,0x42fb454a,3 +np.float32,0x7d5281ad,0x42f76f79,3 +np.float32,0x7f4c1878,0x42ff58a1,3 +np.float32,0x3e96ca5e,0xbfe1bd97,3 +np.float32,0x6a2743,0xc2fc8a3d,3 +np.float32,0x7f688781,0x42ffb8f8,3 +np.float32,0x7814b7,0xc2fc2f2d,3 +np.float32,0x3f2ffdc9,0xbf0a6756,3 +np.float32,0x3f766fa8,0xbd60fe24,3 +np.float32,0x4dc64e,0xc2fd7003,3 +np.float32,0x3a296f,0xc2fe46a8,3 +np.float32,0x3f2af942,0xbf15162e,3 +np.float32,0x7f702c32,0x42ffd0dc,3 +np.float32,0x7e61e318,0x42fba390,3 +np.float32,0x7f7d3bdb,0x42fff7fa,3 +np.float32,0x3ee87f3f,0xbf91c881,3 +np.float32,0x2bbc28,0xc2ff193c,3 +np.float32,0x3e01f918,0xc03e966e,3 +np.float32,0x7f0b39f4,0x42fe3e1a,3 +np.float32,0x3eaa4d64,0xbfcb4516,3 +np.float32,0x3e53901e,0xc0119a88,3 +np.float32,0x603cb,0xc3026957,3 +np.float32,0x7e81f926,0x42fc0b4d,3 +np.float32,0x5dab7c,0xc2fce6a6,3 +np.float32,0x3f46fefd,0xbeba1018,3 +np.float32,0x648448,0xc2fcb28a,3 +np.float32,0x3ec49470,0xbfb0c58b,3 +np.float32,0x3e8a5393,0xbff1ac2b,3 +np.float32,0x3f27ccfc,0xbf1c014e,3 +np.float32,0x3ed886e6,0xbf9eeca8,3 +np.float32,0x7cfbe06e,0x42f5f401,3 +np.float32,0x3f5aa7ba,0xbe68f229,3 +np.float32,0x9500d,0xc301c7e3,3 +np.float32,0x3f4861,0xc2fe0853,3 +np.float32,0x3e5ae104,0xc00e76f5,3 +np.float32,0x71253a,0xc2fc5b1e,3 +np.float32,0xcf7b8,0xc3014d9c,3 +np.float32,0x7f7edd2d,0x42fffcb7,3 +np.float32,0x3e9039ee,0xbfe9f5ab,3 +np.float32,0x2fd54e,0xc2fed712,3 +np.float32,0x3f600752,0xbe45147a,3 +np.float32,0x3f4da8f6,0xbea1bb5c,3 +np.float32,0x3f2d34a9,0xbf104bd9,3 +np.float32,0x3e1e66dd,0xc02c52d2,3 +np.float32,0x798276,0xc2fc2670,3 +np.float32,0xd55e2,0xc3014347,3 +np.float32,0x80000001,0xffc00000,3 +np.float32,0x3e7a5ead,0xc0020da6,3 +np.float32,0x7ec4c744,0x42fd3da9,3 +np.float32,0x597e00,0xc2fd085a,3 +np.float32,0x3dff6bf4,0xc0403575,3 +np.float32,0x5d6f1a,0xc2fce883,3 +np.float32,0x7e21faff,0x42faadea,3 +np.float32,0x3e570fea,0xc01016c6,3 +np.float32,0x28e6b6,0xc2ff4ab7,3 +np.float32,0x7e77062d,0x42fbe5a3,3 +np.float32,0x74cac4,0xc2fc43b0,3 +np.float32,0x3f707273,0xbdb93078,3 +np.float32,0x228e96,0xc2ffc737,3 +np.float32,0x686ac1,0xc2fc966b,3 +np.float32,0x3d76400d,0xc081cae8,3 +np.float32,0x3e9f502f,0xbfd7966b,3 +np.float32,0x3f6bc656,0xbdf32b1f,3 +np.float32,0x3edb828b,0xbf9c65d4,3 +np.float32,0x6c6e56,0xc2fc7a8e,3 +np.float32,0x3f04552e,0xbf73b48f,3 +np.float32,0x3f39cb69,0xbeecc457,3 +np.float32,0x7f681c44,0x42ffb7a3,3 +np.float32,0x7f5b44ee,0x42ff8d99,3 +np.float32,0x3e71430a,0xc005798d,3 +np.float32,0x3edcfde3,0xbf9b27c6,3 +np.float32,0x3f616a5a,0xbe3bf67f,3 +np.float32,0x3f523936,0xbe918548,3 +np.float32,0x3f39ce3a,0xbeecb925,3 +np.float32,0x3eac589a,0xbfc91120,3 +np.float32,0x7efc8d3d,0x42fdf5fc,3 +np.float32,0x5704b0,0xc2fd1d0f,3 +np.float32,0x7e7972e9,0x42fbecda,3 +np.float32,0x3eb0811c,0xbfc4aa13,3 +np.float32,0x7f1efcbb,0x42fea023,3 +np.float32,0x3e0b9e32,0xc037fa6b,3 +np.float32,0x7eef6a48,0x42fdce87,3 +np.float32,0x3cc0a373,0xc0ad20c0,3 +np.float32,0x3f2a75bb,0xbf1632ba,3 +np.float32,0x0,0xff800000,3 +np.float32,0x7ecdb6f4,0x42fd5e77,3 +np.float32,0x7f2e2dfd,0x42fee38d,3 +np.float32,0x3ee17f6e,0xbf976d8c,3 +np.float32,0x3f51e7ee,0xbe92a319,3 +np.float32,0x3f06942f,0xbf6d7d3c,3 +np.float32,0x3f7ba528,0xbccac6f1,3 +np.float32,0x3f413787,0xbecfd513,3 +np.float32,0x3e085e48,0xc03a2716,3 +np.float32,0x7e4c5e0e,0x42fb599c,3 +np.float32,0x306f76,0xc2fecdd4,3 +np.float32,0x7f5c2203,0x42ff9081,3 +np.float32,0x3d5355b4,0xc088da05,3 +np.float32,0x9a2a,0xc305bb4f,3 +np.float32,0x3db93a1f,0xc05de0db,3 +np.float32,0x4e50c6,0xc2fd6ae4,3 +np.float32,0x7ec4afed,0x42fd3d51,3 +np.float32,0x3a8f27,0xc2fe41a0,3 +np.float32,0x7f213caf,0x42feaa84,3 +np.float32,0x7e7b5f00,0x42fbf286,3 +np.float32,0x7e367194,0x42fb05ca,3 +np.float32,0x7f56e6de,0x42ff7ebd,3 +np.float32,0x3ed7383e,0xbfa00aef,3 +np.float32,0x7e844752,0x42fc184a,3 +np.float32,0x15157,0xc3049a19,3 +np.float32,0x3f78cd92,0xbd28824a,3 +np.float32,0x7ecddb16,0x42fd5ef9,3 +np.float32,0x3e479f16,0xc016f7d8,3 +np.float32,0x3f5cb418,0xbe5b2bd3,3 +np.float32,0x7c0934cb,0x42f2334e,3 +np.float32,0x3ebe5505,0xbfb6bc69,3 +np.float32,0x3eb1335a,0xbfc3eff5,3 +np.float32,0x3f2488a3,0xbf234444,3 +np.float32,0x642906,0xc2fcb52a,3 +np.float32,0x3da635fa,0xc067e15a,3 +np.float32,0x7e0d80db,0x42fa4a15,3 +np.float32,0x4f0b9d,0xc2fd640a,3 +np.float32,0x7e083806,0x42fa2df8,3 +np.float32,0x7f77f8c6,0x42ffe877,3 +np.float32,0x3e7bb46a,0xc0018ff5,3 +np.float32,0x3f06eb2e,0xbf6c8eca,3 +np.float32,0x7eae8f7c,0x42fce52a,3 +np.float32,0x3de481a0,0xc04a7d7f,3 +np.float32,0x3eed4311,0xbf8e096f,3 +np.float32,0x3f7b0300,0xbce8903d,3 +np.float32,0x3811b,0xc30330dd,3 +np.float32,0x3eb6f8e1,0xbfbe04bc,3 +np.float32,0x3ec35210,0xbfb1f55a,3 +np.float32,0x3d386916,0xc08f24a5,3 +np.float32,0x3f1fa197,0xbf2e704d,3 +np.float32,0x7f2020a5,0x42fea56a,3 +np.float32,0x7e1ea53f,0x42fa9e8c,3 +np.float32,0x3f148903,0xbf490bf9,3 +np.float32,0x3f2f56a0,0xbf0bc6c9,3 +np.float32,0x7da9fc,0xc2fc0d9b,3 +np.float32,0x3d802134,0xc07fe810,3 +np.float32,0x3f6cb927,0xbde74e57,3 +np.float32,0x7e05b125,0x42fa2023,3 +np.float32,0x3f3307f9,0xbf041433,3 +np.float32,0x5666bf,0xc2fd2250,3 +np.float32,0x3f51c93b,0xbe930f28,3 +np.float32,0x3eb5dcfe,0xbfbf241e,3 +np.float32,0xb2773,0xc301853f,3 +np.float32,0x7f4dee96,0x42ff5f3f,3 +np.float32,0x3e3f5c33,0xc01adee1,3 +np.float32,0x3f2ed29a,0xbf0cdd4a,3 +np.float32,0x3e3c01ef,0xc01c80ab,3 +np.float32,0x3ec2236e,0xbfb31458,3 +np.float32,0x7e841dc4,0x42fc1761,3 +np.float32,0x3df2cd8e,0xc044e30c,3 +np.float32,0x3f010901,0xbf7d0670,3 +np.float32,0x3c05ceaa,0xc0ddf39b,3 +np.float32,0x3f517226,0xbe944206,3 +np.float32,0x3f23c83d,0xbf24f522,3 +np.float32,0x7fc9da,0xc2fc0139,3 +np.float32,0x7f1bde53,0x42fe9181,3 +np.float32,0x3ea3786c,0xbfd2d4a5,3 +np.float32,0x3e83a71b,0xbffacdd2,3 +np.float32,0x3f6f0d4f,0xbdca61d5,3 +np.float32,0x7f5ab613,0x42ff8bb7,3 +np.float32,0x3ab1ec,0xc2fe3fea,3 +np.float32,0x4fbf58,0xc2fd5d82,3 +np.float32,0x3dea141b,0xc0484403,3 +np.float32,0x7d86ad3b,0x42f8258f,3 +np.float32,0x7f345315,0x42fefd29,3 +np.float32,0x3f3752fe,0xbef6a780,3 +np.float32,0x64830d,0xc2fcb293,3 +np.float32,0x3d9dc1eb,0xc06cb32a,3 +np.float32,0x3f2f935a,0xbf0b46f6,3 +np.float32,0xb90a4,0xc30177e3,3 +np.float32,0x4111dd,0xc2fdf3c1,3 +np.float32,0x3d4cd078,0xc08a4c68,3 +np.float32,0x3e95c3f1,0xbfe30011,3 +np.float32,0x3ec9f356,0xbfabcb4e,3 +np.float32,0x1b90d5,0xc3003717,3 +np.float32,0xee70f,0xc3011a3e,3 +np.float32,0x7fa00000,0x7fe00000,3 +np.float32,0x3f74cdb6,0xbd8422af,3 +np.float32,0x3d9b56fe,0xc06e2037,3 +np.float32,0x3f1853df,0xbf3fbc40,3 +np.float32,0x7d86a011,0x42f82547,3 +np.float32,0x3dff9629,0xc0402634,3 +np.float32,0x46f8c9,0xc2fdb39f,3 +np.float32,0x3e9b410b,0xbfdc5a87,3 +np.float32,0x3f5aed42,0xbe671cac,3 +np.float32,0x3b739886,0xc101257f,3 +np.float64,0x3fe2f58d6565eb1b,0xbfe82a641138e19a,1 +np.float64,0x3fee7f0642fcfe0d,0xbfb1c702f6974932,1 +np.float64,0x25b71f244b6e5,0xc090030d3b3c5d2b,1 +np.float64,0x8c9cc8e1193b,0xc0900b752a678fa8,1 +np.float64,0x3fd329b5d326536c,0xbffbd607f6db945c,1 +np.float64,0x3fb5109b3a2a2136,0xc00cd36bd15dfb18,1 +np.float64,0x3fd5393ae12a7276,0xbff97a7e4a157154,1 +np.float64,0x3fd374d1b926e9a3,0xbffb7c3e1a3a7ed3,1 +np.float64,0x3fe2c7f4e2658fea,0xbfe899f15ca78fcb,1 +np.float64,0x7fe3d6b81ee7ad6f,0x408ffa7b63d407ee,1 +np.float64,0x3fe086d097e10da1,0xbfee81456ce8dd03,1 +np.float64,0x7fd374a64ca6e94c,0x408ff241c7306d39,1 +np.float64,0x3fc0709a5b20e135,0xc007afdede31b29c,1 +np.float64,0x3fd4218f4b28431f,0xbffab2c696966e2d,1 +np.float64,0x143134c828628,0xc09006a8372c4d8a,1 +np.float64,0x3f8bd0aa0037a154,0xc018cf0e8b9c3107,1 +np.float64,0x7fe0ce905ee19d20,0x408ff8915e71bd67,1 +np.float64,0x3fda0f5f32b41ebe,0xbff4bd5e0869e820,1 +np.float64,0x7fe9ae63d0b35cc7,0x408ffd760ca4f292,1 +np.float64,0x3fe75abd9eeeb57b,0xbfdd1476fc8b3089,1 +np.float64,0x786c3110f0d87,0xc08ff8b44cedbeea,1 +np.float64,0x22c5fe80458d,0xc09013853591c2f2,1 +np.float64,0x3fdc250797384a0f,0xbff2f6a02c961f0b,1 +np.float64,0x3fa2b367b02566cf,0xc013199238485054,1 +np.float64,0x3fd26a910ca4d522,0xbffcc0e2089b1c0c,1 +np.float64,0x8068d3b300d1b,0xc08ff7f690210aac,1 +np.float64,0x3fe663bfa9ecc77f,0xbfe07cd95a43a5ce,1 +np.float64,0x3fd0ddb07321bb61,0xbffec886665e895e,1 +np.float64,0x3f91c730b0238e61,0xc0176452badc8d22,1 +np.float64,0x4dd10d309ba22,0xc08ffdbe738b1d8d,1 +np.float64,0x7fe322afa4a6455e,0x408ffa10c038f9de,1 +np.float64,0x7fdf7f7c42befef8,0x408ff7d147ddaad5,1 +np.float64,0x7fd673f386ace7e6,0x408ff3e920d00eef,1 +np.float64,0x3feaebfcadb5d7f9,0xbfcfe8ec27083478,1 +np.float64,0x3fdc6dc23738db84,0xbff2bb46794f07b8,1 +np.float64,0xcd8819599b103,0xc08ff288c5b2cf0f,1 +np.float64,0xfda00e77fb402,0xc08ff01b895d2236,1 +np.float64,0x840b02ff08161,0xc08ff7a41e41114c,1 +np.float64,0x3fbdce3a383b9c74,0xc008d1e61903a289,1 +np.float64,0x3fd24ed3c4a49da8,0xbffce3c12136b6d3,1 +np.float64,0x3fe8d0834131a107,0xbfd77b194e7051d4,1 +np.float64,0x3fdd0cb11aba1962,0xbff23b9dbd554455,1 +np.float64,0x1a32d97e3465c,0xc090052781a37271,1 +np.float64,0x3fdb09d2b1b613a5,0xbff3e396b862bd83,1 +np.float64,0x3fe04c848aa09909,0xbfef2540dd90103a,1 +np.float64,0x3fce0c48613c1891,0xc000b9f76877d744,1 +np.float64,0x3fc37109a226e213,0xc005c05d8b2b9a2f,1 +np.float64,0x81cf3837039e7,0xc08ff7d686517dff,1 +np.float64,0xd9342c29b2686,0xc08ff1e591c9a895,1 +np.float64,0x7fec731b0638e635,0x408ffea4884550a9,1 +np.float64,0x3fba0fc138341f82,0xc00a5e839b085f64,1 +np.float64,0x7fdda893b03b5126,0x408ff71f7c5a2797,1 +np.float64,0xd2a4bb03a5498,0xc08ff2402f7a907c,1 +np.float64,0x3fea61fb0d34c3f6,0xbfd1d293fbe76183,1 +np.float64,0x3fed5cf486fab9e9,0xbfbfc2e01a7ffff1,1 +np.float64,0x3fcbabc2bf375785,0xc001ad7750c9dbdf,1 +np.float64,0x3fdb5fff53b6bfff,0xbff39a7973a0c6a5,1 +np.float64,0x7feef05a00bde0b3,0x408fff9c5cbc8651,1 +np.float64,0xb1cf24f1639e5,0xc08ff434de10fffb,1 +np.float64,0x3fa583989c2b0731,0xc0124a8a3bbf18ce,1 +np.float64,0x7feae90bf9f5d217,0x408ffe002e7bbbea,1 +np.float64,0x3fe9ef41c4b3de84,0xbfd367878ae4528e,1 +np.float64,0x9be24ce337c4a,0xc08ff5b9b1c31cf9,1 +np.float64,0x3fe916894cb22d13,0xbfd677f915d58503,1 +np.float64,0x3fec1bab20f83756,0xbfc7f2777aabe8ee,1 +np.float64,0x3feaabf2873557e5,0xbfd0d11f28341233,1 +np.float64,0x3fd4d3c3b529a787,0xbff9e9e47acc8ca9,1 +np.float64,0x3fe4cfe96c699fd3,0xbfe3dc53fa739169,1 +np.float64,0xccfdb97399fb7,0xc08ff2908d893400,1 +np.float64,0x3fec7598be78eb31,0xbfc5a750f8f3441a,1 +np.float64,0x355be5fc6ab7e,0xc090010ca315b50b,1 +np.float64,0x3fba9f9074353f21,0xc00a1f80eaf5e581,1 +np.float64,0x7fdcaff189395fe2,0x408ff6bd1c5b90d9,1 +np.float64,0x3fd94d3b64b29a77,0xbff56be1b43d25f3,1 +np.float64,0x4e5f29949cbe6,0xc08ffda972da1d73,1 +np.float64,0x3fe654e2d9aca9c6,0xbfe09b88dcd8f15d,1 +np.float64,0x7fdc130190b82602,0x408ff67d496c1a27,1 +np.float64,0x3fbcd4701e39a8e0,0xc009343e36627e80,1 +np.float64,0x7fdaa4d38f3549a6,0x408ff5e2c6d8678f,1 +np.float64,0x3febe95e5237d2bd,0xbfc93e16d453fe3a,1 +np.float64,0x9ef5ca553deba,0xc08ff57ff4f7883d,1 +np.float64,0x7fe878e91170f1d1,0x408ffce795868fc8,1 +np.float64,0x3fe63dff466c7bff,0xbfe0caf2b79c9e5f,1 +np.float64,0x6561446ccac29,0xc08ffab0e383834c,1 +np.float64,0x30c6c2ae618d9,0xc09001914b30381b,1 +np.float64,0x7ff0000000000000,0x7ff0000000000000,1 +np.float64,0x3fe5c9daf1ab93b6,0xbfe1be81baf4dbdb,1 +np.float64,0x3fe0a03e24a1407c,0xbfee3a73c4c0e8f8,1 +np.float64,0xff2a2cf3fe546,0xc08ff009a7e6e782,1 +np.float64,0x7fcf0332213e0663,0x408fefa36235e210,1 +np.float64,0x3fb612affc2c2560,0xc00c494be9c8c33b,1 +np.float64,0x3fd2b259702564b3,0xbffc67967f077e75,1 +np.float64,0x7fcb63685d36c6d0,0x408fee343343f913,1 +np.float64,0x3fe369f1d5a6d3e4,0xbfe71251139939ad,1 +np.float64,0x3fdd17c618ba2f8c,0xbff232d11c986251,1 +np.float64,0x3f92cc8040259901,0xc01711d8e06b52ee,1 +np.float64,0x69a81dc2d3504,0xc08ffa36cdaf1141,1 +np.float64,0x3fea0fad99b41f5b,0xbfd2f4625a652645,1 +np.float64,0xd1cd5799a39ab,0xc08ff24c02b90d26,1 +np.float64,0x324e59ce649cc,0xc0900163ad091c76,1 +np.float64,0x3fc3d460a227a8c1,0xc00585f903dc7a7f,1 +np.float64,0xa7185ec74e30c,0xc08ff4ec7d65ccd9,1 +np.float64,0x3fa254eaac24a9d5,0xc01337053963321a,1 +np.float64,0x3feaeb112435d622,0xbfcfef3be17f81f6,1 +np.float64,0x60144c3ac028a,0xc08ffb4f8eb94595,1 +np.float64,0x7fa4d2ec6829a5d8,0x408fdb0a9670ab83,1 +np.float64,0x3fed1372f97a26e6,0xbfc1b1fe50d48a55,1 +np.float64,0x3fd5ade5972b5bcb,0xbff8fcf28f525031,1 +np.float64,0x7fe72e335bee5c66,0x408ffc4759236437,1 +np.float64,0x7fdfafab143f5f55,0x408ff7e2e22a8129,1 +np.float64,0x3fe90d0db9321a1b,0xbfd69ae5fe10eb9e,1 +np.float64,0x7fe20a59072414b1,0x408ff962a2492484,1 +np.float64,0x3fed853690bb0a6d,0xbfbdc9dc5f199d2b,1 +np.float64,0x3fd709d469ae13a9,0xbff795a218deb700,1 +np.float64,0x3fe21c35f5e4386c,0xbfea47d71789329b,1 +np.float64,0x9ea5ec053d4be,0xc08ff585c2f6b7a3,1 +np.float64,0x3fc0580f9e20b01f,0xc007c1268f49d037,1 +np.float64,0xd99127abb3225,0xc08ff1e0a1ff339d,1 +np.float64,0x3fdc8c9bbfb91937,0xbff2a2478354effb,1 +np.float64,0x3fe15fc6b162bf8d,0xbfec323ac358e008,1 +np.float64,0xffefffffffffffff,0x7ff8000000000000,1 +np.float64,0x3fee341afb3c6836,0xbfb556b6faee9a84,1 +np.float64,0x3fe4b64c56296c99,0xbfe4154835ad2afe,1 +np.float64,0x85de22810bbc5,0xc08ff77b914fe5b5,1 +np.float64,0x3fd22c72e3a458e6,0xbffd0f4269d20bb9,1 +np.float64,0xc090e5218123,0xc09009a4a65a8a8f,1 +np.float64,0x7fd9641692b2c82c,0x408ff5547782bdfc,1 +np.float64,0x3fd9b9cb28b37396,0xbff509a8fb59a9f1,1 +np.float64,0x3fcd2726f93a4e4e,0xc001135059a22117,1 +np.float64,0x3fa4b493d4296928,0xc0128323c7a55f4a,1 +np.float64,0x47455e788e8ac,0xc08ffec2101c1e82,1 +np.float64,0x3fe0d7e2e261afc6,0xbfeda0f1e2d0f4bd,1 +np.float64,0x3fe860fc5b70c1f9,0xbfd91dc42eaf72c2,1 +np.float64,0xa5d7805b4baf0,0xc08ff502bc819ff6,1 +np.float64,0xd83395b1b0673,0xc08ff1f33c3f94c2,1 +np.float64,0x3f865972e02cb2e6,0xc01a1243651565c8,1 +np.float64,0x52fc6952a5f8e,0xc08ffd006b158179,1 +np.float64,0x7fecac6c793958d8,0x408ffebbb1c09a70,1 +np.float64,0x7fe621ff606c43fe,0x408ffbbeb2b1473a,1 +np.float64,0x3fdb9f3f9db73e7f,0xbff365610c52bda7,1 +np.float64,0x7feab92992757252,0x408ffdeb92a04813,1 +np.float64,0xcc46c79f988d9,0xc08ff29adf03fb7c,1 +np.float64,0x3fe3156a03262ad4,0xbfe7dd0f598781c7,1 +np.float64,0x3fc00e3a61201c75,0xc007f5c121a87302,1 +np.float64,0x3fdce8e9f739d1d4,0xbff2581d41ef50ef,1 +np.float64,0x0,0xfff0000000000000,1 +np.float64,0x7d373ac4fa6e8,0xc08ff840fa8beaec,1 +np.float64,0x3fee41e0653c83c1,0xbfb4ae786f2a0d54,1 +np.float64,0x3ff0000000000000,0x0,1 +np.float64,0x7feca6fff9794dff,0x408ffeb982a70556,1 +np.float64,0x7fc532716d2a64e2,0x408feb3f0f6c095b,1 +np.float64,0x3fe4ec2954a9d853,0xbfe39dd44aa5a040,1 +np.float64,0x7fd3321d52a6643a,0x408ff21a0ab9cd85,1 +np.float64,0x7fd8f1b2dfb1e365,0x408ff52001fa7922,1 +np.float64,0x3fee5e58cabcbcb2,0xbfb3539734a24d8b,1 +np.float64,0x3feebf6e7dfd7edd,0xbfad7c648f025102,1 +np.float64,0x6008026ec0101,0xc08ffb5108b54a93,1 +np.float64,0x3fea06f5e2340dec,0xbfd3134a48283360,1 +np.float64,0x41cad13c8395b,0xc08fffae654b2426,1 +np.float64,0x7fedb5c9353b6b91,0x408fff249f1f32b6,1 +np.float64,0xe00c5af9c018c,0xc08ff189e68c655f,1 +np.float64,0x7feac398ddf58731,0x408ffdf01374de9f,1 +np.float64,0x3fed21127c7a4225,0xbfc15b8cf55628fa,1 +np.float64,0x3fd3446711a688ce,0xbffbb5f7252a9fa3,1 +np.float64,0x7fe75fa07a6ebf40,0x408ffc5fdb096018,1 +np.float64,0x3feeb1618cbd62c3,0xbfaece3bd0863070,1 +np.float64,0x7f5226e180244dc2,0x408fb174d506e52f,1 +np.float64,0x3fcd67deca3acfbe,0xc000f9cd7a490749,1 +np.float64,0xdc6f30efb8de6,0xc08ff1b9f2a22d2e,1 +np.float64,0x9c14931338293,0xc08ff5b5f975ec5d,1 +np.float64,0x7fe93e802df27cff,0x408ffd4354eba0e0,1 +np.float64,0x3feb92ae5077255d,0xbfcb7f2084e44dbb,1 +np.float64,0xd78dbfddaf1b8,0xc08ff1fc19fa5a13,1 +np.float64,0x7fe14c301fa2985f,0x408ff8e666cb6592,1 +np.float64,0xbda3d8b77b47b,0xc08ff37689f4b2e5,1 +np.float64,0x8a42953b14853,0xc08ff71c2db3b8cf,1 +np.float64,0x7fe4ca7e186994fb,0x408ffb05e94254a7,1 +np.float64,0x7fe92ffc5e325ff8,0x408ffd3cb0265b12,1 +np.float64,0x91b262912364d,0xc08ff681619be214,1 +np.float64,0x33fe2b0667fc6,0xc0900132f3fab55e,1 +np.float64,0x3fde10e9183c21d2,0xbff17060fb4416c7,1 +np.float64,0xb6b811cb6d702,0xc08ff3e46303b541,1 +np.float64,0x3fe4a7bda0a94f7b,0xbfe435c6481cd0e3,1 +np.float64,0x7fd9fe6057b3fcc0,0x408ff599c79a822c,1 +np.float64,0x3fef44bf917e897f,0xbfa11484e351a6e9,1 +np.float64,0x3fe57d701daafae0,0xbfe2618ab40fc01b,1 +np.float64,0x7fe52d2adbaa5a55,0x408ffb3c2fb1c99d,1 +np.float64,0xb432f66d6865f,0xc08ff40d6b4084fe,1 +np.float64,0xbff0000000000000,0x7ff8000000000000,1 +np.float64,0x7fecd2292bf9a451,0x408ffecad860de6f,1 +np.float64,0x3fddd2ae153ba55c,0xbff1a059adaca33e,1 +np.float64,0x3fee55d6e5bcabae,0xbfb3bb1c6179d820,1 +np.float64,0x7fc1d0085623a010,0x408fe93d16ada7a7,1 +np.float64,0x829b000105360,0xc08ff7c47629a68f,1 +np.float64,0x7fe1e0257523c04a,0x408ff94782cf0717,1 +np.float64,0x7fd652f9ad2ca5f2,0x408ff3d820ec892e,1 +np.float64,0x3fef2246203e448c,0xbfa444ab6209d8cd,1 +np.float64,0x3fec6c0ae178d816,0xbfc5e559ebd4e790,1 +np.float64,0x3fe6ddfee92dbbfe,0xbfdf06dd7d3fa7a8,1 +np.float64,0x3fb7fbcbea2ff798,0xc00b5404d859d148,1 +np.float64,0x7feb9a154d37342a,0x408ffe4b26c29e55,1 +np.float64,0x3fe4db717aa9b6e3,0xbfe3c2c6b3ef13bc,1 +np.float64,0x3fbae17dda35c2fc,0xc00a030f7f4b37e7,1 +np.float64,0x7fd632b9082c6571,0x408ff3c76826ef19,1 +np.float64,0x7fc4184a15283093,0x408feaa14adf00be,1 +np.float64,0x3fe052d19920a5a3,0xbfef136b5df81a3e,1 +np.float64,0x7fe38b872b67170d,0x408ffa4f51aafc86,1 +np.float64,0x3fef9842d03f3086,0xbf92d3d2a21d4be2,1 +np.float64,0x9cea662139d4d,0xc08ff5a634810daa,1 +np.float64,0x3fe35f0855e6be11,0xbfe72c4b564e62aa,1 +np.float64,0x3fecee3d3779dc7a,0xbfc29ee942f8729e,1 +np.float64,0x3fe7903fd72f2080,0xbfdc41db9b5f4048,1 +np.float64,0xb958889572b11,0xc08ff3ba366cf84b,1 +np.float64,0x3fcb3a67c53674d0,0xc001dd21081ad1ea,1 +np.float64,0xe3b1b53fc7637,0xc08ff15a3505e1ce,1 +np.float64,0xe5954ae9cb2aa,0xc08ff141cbbf0ae4,1 +np.float64,0x3fe394af74e7295f,0xbfe6ad1d13f206e8,1 +np.float64,0x7fe21dd704643bad,0x408ff96f13f80c1a,1 +np.float64,0x3fd23a7cf02474fa,0xbffcfd7454117a05,1 +np.float64,0x7fe257515e24aea2,0x408ff99378764d52,1 +np.float64,0x7fe4c5d0a6e98ba0,0x408ffb03503cf939,1 +np.float64,0x3fadc2c1603b8583,0xc0106b2c17550e3a,1 +np.float64,0x3fc0f7f02421efe0,0xc007525ac446864c,1 +np.float64,0x3feaf0b27275e165,0xbfcfc8a03eaa32ad,1 +np.float64,0x5ce7503cb9ceb,0xc08ffbb2de365fa8,1 +np.float64,0x2a0014f654003,0xc090026e41761a0d,1 +np.float64,0x7fe2c848a8e59090,0x408ff9d9b723ee89,1 +np.float64,0x7f66f54bc02dea97,0x408fbc2ae0ec5623,1 +np.float64,0xa35a890146b6,0xc0900a97b358ddbd,1 +np.float64,0x7fee267ded7c4cfb,0x408fff501560c9f5,1 +np.float64,0x3fe07c328520f865,0xbfee9ef7c3435b58,1 +np.float64,0x3fe67122cf6ce246,0xbfe06147001932ba,1 +np.float64,0x3fdacc8925359912,0xbff41824cece219e,1 +np.float64,0xffa3047fff461,0xc08ff00431ec9be3,1 +np.float64,0x3e1af43e7c35f,0xc090002c6573d29b,1 +np.float64,0x86fa94590df53,0xc08ff7632525ed92,1 +np.float64,0x7fec4c76227898eb,0x408ffe94d032c657,1 +np.float64,0x7fe2274ce1e44e99,0x408ff975194cfdff,1 +np.float64,0x7fe670e1b4ace1c2,0x408ffbe78cc451de,1 +np.float64,0x7fe853871db0a70d,0x408ffcd5e6a6ff47,1 +np.float64,0x3fcbf265db37e4cc,0xc0019026336e1176,1 +np.float64,0x3fef033cef3e067a,0xbfa726712eaae7f0,1 +np.float64,0x5d74973abae94,0xc08ffba15e6bb992,1 +np.float64,0x7fdd9c99b6bb3932,0x408ff71ad24a7ae0,1 +np.float64,0xbdc8e09b7b91c,0xc08ff3744939e9a3,1 +np.float64,0xdbfcff71b7fa0,0xc08ff1bfeecc9dfb,1 +np.float64,0xf9b38cf5f3672,0xc08ff0499af34a43,1 +np.float64,0x3fea820aa6b50415,0xbfd162a38e1927b1,1 +np.float64,0x3fe67f59a12cfeb3,0xbfe04412adca49dc,1 +np.float64,0x3feb301d9c76603b,0xbfce17e6edeb92d5,1 +np.float64,0x828ce00b0519c,0xc08ff7c5b5c57cde,1 +np.float64,0x4f935e229f26c,0xc08ffd7c67c1c54f,1 +np.float64,0x7fcd139e023a273b,0x408feee4f12ff11e,1 +np.float64,0x666a9944ccd54,0xc08ffa92d5e5cd64,1 +np.float64,0x3fe792f0fa6f25e2,0xbfdc374fda28f470,1 +np.float64,0xe996029bd32c1,0xc08ff10eb9b47a11,1 +np.float64,0x3fe7b0dd1eef61ba,0xbfdbc2676dc77db0,1 +np.float64,0x7fd3ec0127a7d801,0x408ff287bf47e27d,1 +np.float64,0x3fe793a8ea6f2752,0xbfdc347f7717e48d,1 +np.float64,0x7fdb89d15e3713a2,0x408ff64457a13ea2,1 +np.float64,0x3fe35b3cbbe6b679,0xbfe73557c8321b70,1 +np.float64,0x66573c94ccae8,0xc08ffa9504af7eb5,1 +np.float64,0x3fc620a2302c4144,0xc00442036b944a67,1 +np.float64,0x49b2fe0693660,0xc08ffe5f131c3c7e,1 +np.float64,0x7fda936cdfb526d9,0x408ff5db3ab3f701,1 +np.float64,0xc774ceef8ee9a,0xc08ff2e16d082fa1,1 +np.float64,0x4da9f8a09b55,0xc0900ee2206d0c88,1 +np.float64,0x3fe2ca5d5ae594bb,0xbfe89406611a5f1a,1 +np.float64,0x7fe0832497e10648,0x408ff85d1de6056e,1 +np.float64,0x3fe6a9e3222d53c6,0xbfdfda35a9bc2de1,1 +np.float64,0x3fed3d92c8ba7b26,0xbfc0a73620db8b98,1 +np.float64,0x3fdd2ec093ba5d81,0xbff2209cf78ce3f1,1 +np.float64,0x62fcb968c5f98,0xc08ffaf775a593c7,1 +np.float64,0xfcfb019ff9f60,0xc08ff0230e95bd16,1 +np.float64,0x3fd7a63e8f2f4c7d,0xbff6faf4fff7dbe0,1 +np.float64,0x3fef23b0ec3e4762,0xbfa4230cb176f917,1 +np.float64,0x340d1e6a681a5,0xc09001314b68a0a2,1 +np.float64,0x7fc0b85ba02170b6,0x408fe8821487b802,1 +np.float64,0x7fe9976e84f32edc,0x408ffd6bb6aaf467,1 +np.float64,0x329a0e9e65343,0xc090015b044e3270,1 +np.float64,0x3fea4928d3f49252,0xbfd2299b05546eab,1 +np.float64,0x3f188c70003118e0,0xc02ac3ce23bc5d5a,1 +np.float64,0x3fecce5020b99ca0,0xbfc36b23153d5f50,1 +np.float64,0x3fe203873e24070e,0xbfea86edb3690830,1 +np.float64,0x3fe02d9eaa205b3d,0xbfef7d18c54a76d2,1 +np.float64,0xef7537ebdeea7,0xc08ff0c55e9d89e7,1 +np.float64,0x3fedf7572efbeeae,0xbfb840af357cf07c,1 +np.float64,0xd1a97a61a354,0xc0900926fdfb96cc,1 +np.float64,0x7fe6a0daeced41b5,0x408ffc001edf1407,1 +np.float64,0x3fe5063625aa0c6c,0xbfe3647cfb949d62,1 +np.float64,0x7fe9b28d31736519,0x408ffd77eb4a922b,1 +np.float64,0x7feea90d033d5219,0x408fff81a4bbff62,1 +np.float64,0x3fe9494d17f2929a,0xbfd5bde02eb5287a,1 +np.float64,0x7feee17a8cbdc2f4,0x408fff96cf0dc16a,1 +np.float64,0xb2ad18ef655a3,0xc08ff4267eda8af8,1 +np.float64,0x3fad3b52683a76a5,0xc01085ab75b797ce,1 +np.float64,0x2300a65846016,0xc090037b81ce9500,1 +np.float64,0x3feb1041f9b62084,0xbfcef0c87d8b3249,1 +np.float64,0x3fdd887d3e3b10fa,0xbff1da0e1ede6db2,1 +np.float64,0x3fd3e410eb27c822,0xbffaf9b5fc9cc8cc,1 +np.float64,0x3fe0aa53e3e154a8,0xbfee1e7b5c486578,1 +np.float64,0x7fe33e389aa67c70,0x408ffa214fe50961,1 +np.float64,0x3fd27e3a43a4fc75,0xbffca84a79e8adeb,1 +np.float64,0x3fb309e0082613c0,0xc00dfe407b77a508,1 +np.float64,0x7feaf2ed8cf5e5da,0x408ffe046a9d1ba9,1 +np.float64,0x1e76167a3cec4,0xc0900448cd35ec67,1 +np.float64,0x3fe0a18e1721431c,0xbfee36cf1165a0d4,1 +np.float64,0x3fa73b78c02e76f2,0xc011d9069823b172,1 +np.float64,0x3fef6d48287eda90,0xbf9ab2d08722c101,1 +np.float64,0x8fdf0da31fbe2,0xc08ff6a6a2accaa1,1 +np.float64,0x3fc3638db826c71b,0xc005c86191688826,1 +np.float64,0xaa9c09c555381,0xc08ff4aefe1d9473,1 +np.float64,0x7fccb0f4523961e8,0x408feebd84773f23,1 +np.float64,0xede75dcfdbcec,0xc08ff0d89ba887d1,1 +np.float64,0x7f8a051520340a29,0x408fcd9cc17f0d95,1 +np.float64,0x3fef5ca2babeb945,0xbf9dc221f3618e6a,1 +np.float64,0x7fea0ff4bcf41fe8,0x408ffda193359f22,1 +np.float64,0x7fe05c53fd20b8a7,0x408ff841dc7123e8,1 +np.float64,0x3fc625664b2c4acd,0xc0043f8749b9a1d8,1 +np.float64,0x7fed58f98f7ab1f2,0x408fff00585f48c2,1 +np.float64,0x3fb3e5e51427cbca,0xc00d7bcb6528cafe,1 +np.float64,0x3fe728bd3d6e517a,0xbfdddafa72bd0f60,1 +np.float64,0x3fe3f005dd27e00c,0xbfe5d7b3ec93bca0,1 +np.float64,0x3fd74fbd1a2e9f7a,0xbff750001b63ce81,1 +np.float64,0x3fd3af6d85a75edb,0xbffb371d678d11b4,1 +np.float64,0x7fa690ad8c2d215a,0x408fdbf7db9c7640,1 +np.float64,0x3fbdfd38e23bfa72,0xc008bfc1c5c9b89e,1 +np.float64,0x3fe2374684a46e8d,0xbfea030c4595dfba,1 +np.float64,0x7fc0806c372100d7,0x408fe85b36fee334,1 +np.float64,0x3fef3ac47b7e7589,0xbfa2007195c5213f,1 +np.float64,0x3fb55473922aa8e7,0xc00cae7af8230e0c,1 +np.float64,0x7fe018dc152031b7,0x408ff811e0d712fa,1 +np.float64,0x3fe3b3fca56767f9,0xbfe6638ae2c99c62,1 +np.float64,0x7fac79818c38f302,0x408fdea720b39c3c,1 +np.float64,0x7fefffffffffffff,0x4090000000000000,1 +np.float64,0xd2b290cba5652,0xc08ff23f6d7152a6,1 +np.float64,0x7fc5848eb52b091c,0x408feb6b6f8b77d0,1 +np.float64,0xf399f62de733f,0xc08ff092ae319ad8,1 +np.float64,0x7fdec56c12bd8ad7,0x408ff78c4ddbc667,1 +np.float64,0x3fca640f1e34c81e,0xc0023969c5cbfa4c,1 +np.float64,0x3fd55225db2aa44c,0xbff95f7442a2189e,1 +np.float64,0x7fefa009a97f4012,0x408fffdd2f42ef9f,1 +np.float64,0x4a3b70609478,0xc0900f24e449bc3d,1 +np.float64,0x7fe3738b1ba6e715,0x408ffa411f2cb5e7,1 +np.float64,0x7fe5e53f0b6bca7d,0x408ffb9ed8d95cea,1 +np.float64,0x3fe274dd24a4e9ba,0xbfe967fb114b2a83,1 +np.float64,0x3fcbc58b8c378b17,0xc001a2bb1e158bcc,1 +np.float64,0x3fefc2c0043f8580,0xbf862c9b464dcf38,1 +np.float64,0xc2c4fafd858a0,0xc08ff327aecc409b,1 +np.float64,0x3fd8bc39a9b17873,0xbff5f1ad46e5a51c,1 +np.float64,0x3fdf341656be682d,0xbff094f41e7cb4c4,1 +np.float64,0x3fef8495c13f092c,0xbf966cf6313bae4c,1 +np.float64,0x3fe14e0f05229c1e,0xbfec6166f26b7161,1 +np.float64,0x3fed42d3b2ba85a7,0xbfc0860b773d35d8,1 +np.float64,0x7fd92bbac5b25775,0x408ff53abcb3fe0c,1 +np.float64,0xb1635b6f62c6c,0xc08ff43bdf47accf,1 +np.float64,0x4a3a2dbc94746,0xc08ffe49fabddb36,1 +np.float64,0x87d831290fb06,0xc08ff750419dc6fb,1 +np.float64,0x3fec4713f7f88e28,0xbfc6d6217c9f5cf9,1 +np.float64,0x7fed43ba2d3a8773,0x408ffef7fa2fc303,1 +np.float64,0x7fd1ec5b56a3d8b6,0x408ff14f62615f1e,1 +np.float64,0x3fee534b6c7ca697,0xbfb3da1951aa3e68,1 +np.float64,0x3febb564c2b76aca,0xbfca9737062e55e7,1 +np.float64,0x943e6b0f287ce,0xc08ff64e2d09335c,1 +np.float64,0xf177d957e2efb,0xc08ff0acab2999fa,1 +np.float64,0x7fb5b881a82b7102,0x408fe3872b4fde5e,1 +np.float64,0x3fdb2b4a97b65695,0xbff3c715c91359bc,1 +np.float64,0x3fac0a17e4381430,0xc010c330967309fb,1 +np.float64,0x7fd8057990b00af2,0x408ff4b0a287a348,1 +np.float64,0x1f9026a23f206,0xc09004144f3a19dd,1 +np.float64,0x3fdb2977243652ee,0xbff3c8a2fd05803d,1 +np.float64,0x3fe0f6e74b21edcf,0xbfed4c3bb956bae0,1 +np.float64,0xde9cc3bbbd399,0xc08ff19ce5c1e762,1 +np.float64,0x3fe72ce106ae59c2,0xbfddca7ab14ceba2,1 +np.float64,0x3fa8ee14e031dc2a,0xc01170d54ca88e86,1 +np.float64,0x3fe0b09bbb216137,0xbfee0d189a95b877,1 +np.float64,0x7fdfdcb157bfb962,0x408ff7f33cf2afea,1 +np.float64,0x3fef84d5f53f09ac,0xbf966134e2a154f4,1 +np.float64,0x3fea0e0b1bb41c16,0xbfd2fa2d36637d19,1 +np.float64,0x1ab76fd6356ef,0xc090050a9616ffbd,1 +np.float64,0x7fd0ccf79a2199ee,0x408ff09045af2dee,1 +np.float64,0x7fea929345f52526,0x408ffddadc322b07,1 +np.float64,0x3fe9ef629cf3dec5,0xbfd367129c166838,1 +np.float64,0x3feedf0ea2fdbe1d,0xbfaa862afca44c00,1 +np.float64,0x7fce725f723ce4be,0x408fef6cfd2769a8,1 +np.float64,0x7fe4313b3ca86275,0x408ffaaf9557ef8c,1 +np.float64,0xe2d46463c5a8d,0xc08ff165725c6b08,1 +np.float64,0x7fbacb4ace359695,0x408fe5f3647bd0d5,1 +np.float64,0x3fbafd009635fa01,0xc009f745a7a5c5d5,1 +np.float64,0x3fe3cea66ce79d4d,0xbfe6253b895e2838,1 +np.float64,0x7feaa71484354e28,0x408ffde3c0bad2a6,1 +np.float64,0x3fd755b8b42eab71,0xbff74a1444c6e654,1 +np.float64,0x3fc313e2172627c4,0xc005f830e77940c3,1 +np.float64,0x12d699a225ad4,0xc090070ec00f2338,1 +np.float64,0x3fa975fe8432ebfd,0xc01151b3da48b3f9,1 +np.float64,0x7fdce3103b39c61f,0x408ff6d19b3326fa,1 +np.float64,0x7fd341cbba268396,0x408ff2237490fdca,1 +np.float64,0x3fd8405885b080b1,0xbff6666d8802a7d5,1 +np.float64,0x3fe0f0cca3a1e199,0xbfed5cdb3e600791,1 +np.float64,0x7fbd56680c3aaccf,0x408fe6ff55bf378d,1 +np.float64,0x3f939c4f3027389e,0xc016d364dd6313fb,1 +np.float64,0x3fe9e87fac73d0ff,0xbfd37f9a2be4fe38,1 +np.float64,0x7fc93c6a883278d4,0x408fed4260e614f1,1 +np.float64,0x7fa88c0ff031181f,0x408fdcf09a46bd3a,1 +np.float64,0xd5487f99aa910,0xc08ff21b6390ab3b,1 +np.float64,0x3fe34acc96e69599,0xbfe75c9d290428fb,1 +np.float64,0x3fd17f5964a2feb3,0xbffdef50b524137b,1 +np.float64,0xe23dec0dc47be,0xc08ff16d1ce61dcb,1 +np.float64,0x3fec8bd64fb917ad,0xbfc5173941614b8f,1 +np.float64,0x3fc81d97d7303b30,0xc00343ccb791401d,1 +np.float64,0x7fe79ad18e2f35a2,0x408ffc7cf0ab0f2a,1 +np.float64,0x3f96306b402c60d7,0xc0161ce54754cac1,1 +np.float64,0xfb09fc97f6140,0xc08ff039d1d30123,1 +np.float64,0x3fec9c4afa793896,0xbfc4ace43ee46079,1 +np.float64,0x3f9262dac824c5b6,0xc01732a3a7eeb598,1 +np.float64,0x3fa5cd33f42b9a68,0xc01236ed4d315a3a,1 +np.float64,0x3fe7bb336caf7667,0xbfdb9a268a82e267,1 +np.float64,0xc6c338f98d867,0xc08ff2ebb8475bbc,1 +np.float64,0x3fd50714482a0e29,0xbff9b14a9f84f2c2,1 +np.float64,0xfff0000000000000,0x7ff8000000000000,1 +np.float64,0x3fde2cd0f93c59a2,0xbff15afe35a43a37,1 +np.float64,0xf1719cb9e2e34,0xc08ff0acf77b06d3,1 +np.float64,0xfd3caaf9fa796,0xc08ff020101771bd,1 +np.float64,0x7f750d63a02a1ac6,0x408fc32ad0caa362,1 +np.float64,0x7fcc50f4e238a1e9,0x408fee96a5622f1a,1 +np.float64,0x421d1da0843a4,0xc08fff9ffe62d869,1 +np.float64,0x3fd9e17023b3c2e0,0xbff4e631d687ee8e,1 +np.float64,0x3fe4999a09693334,0xbfe4556b3734c215,1 +np.float64,0xd619ef03ac33e,0xc08ff21013c85529,1 +np.float64,0x3fc4da522229b4a4,0xc004f150b2c573aa,1 +np.float64,0x3feb04b053b60961,0xbfcf3fc9e00ebc40,1 +np.float64,0x3fbedec5ea3dbd8c,0xc0086a33dc22fab5,1 +np.float64,0x7fec3b217ab87642,0x408ffe8dbc8ca041,1 +np.float64,0xdb257d33b64b0,0xc08ff1cb42d3c182,1 +np.float64,0x7fa2d92ec025b25d,0x408fd9e414d11cb0,1 +np.float64,0x3fa425c550284b8b,0xc012ab7cbf83be12,1 +np.float64,0x10b4869021692,0xc09007c0487d648a,1 +np.float64,0x7f97918c902f2318,0x408fd47867806574,1 +np.float64,0x3fe4f91238e9f224,0xbfe38160b4e99919,1 +np.float64,0x3fc2b1af6125635f,0xc00634343bc58461,1 +np.float64,0x3fc2a98071255301,0xc0063942bc8301be,1 +np.float64,0x3fe4cfc585299f8b,0xbfe3dca39f114f34,1 +np.float64,0x3fd1ea75b3a3d4eb,0xbffd63acd02c5406,1 +np.float64,0x3fd6bf48492d7e91,0xbff7e0cd249f80f9,1 +np.float64,0x76643d36ecc88,0xc08ff8e68f13b38c,1 +np.float64,0x7feeabab3e7d5755,0x408fff82a0fd4501,1 +np.float64,0x46c0d4a68d81b,0xc08ffed79abaddc9,1 +np.float64,0x3fd088d57ca111ab,0xbfff3dd0ed7128ea,1 +np.float64,0x3fed25887cba4b11,0xbfc13f47639bd645,1 +np.float64,0x7fd90984b4b21308,0x408ff52b022c7fb4,1 +np.float64,0x3fe6ef31daadde64,0xbfdec185760cbf21,1 +np.float64,0x3fe48dbe83291b7d,0xbfe47005b99920bd,1 +np.float64,0x3fdce8422f39d084,0xbff258a33a96cc8e,1 +np.float64,0xb8ecdef771d9c,0xc08ff3c0eca61b10,1 +np.float64,0x3fe9bbf9a03377f3,0xbfd41ecfdcc336b9,1 +np.float64,0x7fe2565339a4aca5,0x408ff992d8851eaf,1 +np.float64,0x3fe1693e3822d27c,0xbfec1919da2ca697,1 +np.float64,0x3fd3680488a6d009,0xbffb8b7330275947,1 +np.float64,0x7fbe4f3d2c3c9e79,0x408fe75fa3f4e600,1 +np.float64,0x7fd4cfef3ca99fdd,0x408ff308ee3ab50f,1 +np.float64,0x3fd9c9a51cb3934a,0xbff4fb7440055ce6,1 +np.float64,0x3fe08a9640a1152d,0xbfee76bd1bfbf5c2,1 +np.float64,0x3fef012c41fe0259,0xbfa757a2da7f9707,1 +np.float64,0x3fee653fe2fcca80,0xbfb2ffae0c95025c,1 +np.float64,0x7fd0776933a0eed1,0x408ff054e7b43d41,1 +np.float64,0x4c94e5c09929d,0xc08ffdedb7f49e5e,1 +np.float64,0xca3e3d17947c8,0xc08ff2b86dce2f7a,1 +np.float64,0x3fb528e1342a51c2,0xc00cc626c8e2d9ba,1 +np.float64,0xd774df81aee9c,0xc08ff1fd6f0a7548,1 +np.float64,0x3fc47a9b6128f537,0xc00526c577b80849,1 +np.float64,0x3fe29a6f6a6534df,0xbfe90a5f83644911,1 +np.float64,0x3fecda4f59f9b49f,0xbfc31e4a80c4cbb6,1 +np.float64,0x7fe51d44f5aa3a89,0x408ffb3382437426,1 +np.float64,0x3fd677fc412ceff9,0xbff82999086977e7,1 +np.float64,0x3fe2a3c7e7254790,0xbfe8f33415cdba9d,1 +np.float64,0x3fe6d8d1dc6db1a4,0xbfdf1bc61bc24dff,1 +np.float64,0x7febb32d8ef7665a,0x408ffe55a043ded1,1 +np.float64,0x60677860c0d0,0xc0900da2caa7d571,1 +np.float64,0x7390c2e0e7219,0xc08ff92df18bb5d2,1 +np.float64,0x3fca53711b34a6e2,0xc00240b07a9b529b,1 +np.float64,0x7fe7ce6dd8ef9cdb,0x408ffc961164ead9,1 +np.float64,0x7fc0c9de0d2193bb,0x408fe88e245767f6,1 +np.float64,0xc0ee217981dc4,0xc08ff343b77ea770,1 +np.float64,0x72bd4668e57a9,0xc08ff94323fd74fc,1 +np.float64,0x7fd6970e252d2e1b,0x408ff3fb1e2fead2,1 +np.float64,0x7fdcb61040396c20,0x408ff6bf926bc98f,1 +np.float64,0xda4faa25b49f6,0xc08ff1d68b3877f0,1 +np.float64,0x3feb344749f6688f,0xbfcdfba2d66c72c5,1 +np.float64,0x3fe2aa4284e55485,0xbfe8e32ae0683f57,1 +np.float64,0x3f8e8fcfd03d1fa0,0xc01843efb2129908,1 +np.float64,0x8000000000000000,0xfff0000000000000,1 +np.float64,0x3fd8e01155b1c023,0xbff5d0529dae9515,1 +np.float64,0x3fe8033f3370067e,0xbfda837c80b87e7c,1 +np.float64,0x7fc5bf831e2b7f05,0x408feb8ae3b039a0,1 +np.float64,0x3fd8dcdf5331b9bf,0xbff5d349e1ed422a,1 +np.float64,0x3fe58b4e302b169c,0xbfe243c9cbccde44,1 +np.float64,0x3fea8a2e47b5145d,0xbfd1464e37221894,1 +np.float64,0x75cd1e88eb9a4,0xc08ff8f553ef0475,1 +np.float64,0x7fcfc876e23f90ed,0x408fefebe6cc95e6,1 +np.float64,0x7f51aceb002359d5,0x408fb1263f9003fb,1 +np.float64,0x7fc2a1b877254370,0x408fe9c1ec52f8b9,1 +np.float64,0x7fd495810e292b01,0x408ff2e859414d31,1 +np.float64,0x7fd72048632e4090,0x408ff440690cebdb,1 +np.float64,0x7fd7aafaffaf6,0xc08ff803a390779f,1 +np.float64,0x7fe18067d4a300cf,0x408ff9090a02693f,1 +np.float64,0x3fdc1080f8b82102,0xbff3077bf44a89bd,1 +np.float64,0x3fc34a462f26948c,0xc005d777b3cdf139,1 +np.float64,0x3fe21e4a1fe43c94,0xbfea428acfbc6ea9,1 +np.float64,0x1f0d79083e1b0,0xc090042c65a7abf2,1 +np.float64,0x3fe8d0d15931a1a3,0xbfd779f6bbd4db78,1 +np.float64,0x3fe74578022e8af0,0xbfdd68b6c15e9f5e,1 +np.float64,0x50995dd0a132c,0xc08ffd56a5c8accf,1 +np.float64,0x3f9a6342b034c685,0xc0151ce1973c62bd,1 +np.float64,0x3f30856a00210ad4,0xc027e852f4d1fcbc,1 +np.float64,0x3febcf7646b79eed,0xbfc9e9cc9d12425c,1 +np.float64,0x8010000000000000,0x7ff8000000000000,1 +np.float64,0x3fdf520c02bea418,0xbff07ed5013f3062,1 +np.float64,0x3fe5433ecbea867e,0xbfe2df38968b6d14,1 +np.float64,0x3fb933a84e326751,0xc00ac1a144ad26c5,1 +np.float64,0x7b6d72c2f6daf,0xc08ff86b7a67f962,1 +np.float64,0xaef5dae75debc,0xc08ff46496bb2932,1 +np.float64,0x522d869aa45b1,0xc08ffd1d55281e98,1 +np.float64,0xa2462b05448c6,0xc08ff542fe0ac5fd,1 +np.float64,0x3fe2b71dd6e56e3c,0xbfe8c3690cf15415,1 +np.float64,0x3fe5778231aaef04,0xbfe26e495d09b783,1 +np.float64,0x3fe9b8d564f371ab,0xbfd42a161132970d,1 +np.float64,0x3f89ebc34033d787,0xc019373f90bfc7f1,1 +np.float64,0x3fe438ddc6e871bc,0xbfe53039341b0a93,1 +np.float64,0x873c75250e78f,0xc08ff75d8478dccd,1 +np.float64,0x807134cb00e27,0xc08ff7f5cf59c57a,1 +np.float64,0x3fac459878388b31,0xc010b6fe803bcdc2,1 +np.float64,0xca9dc7eb953b9,0xc08ff2b2fb480784,1 +np.float64,0x7feb38587bb670b0,0x408ffe21ff6d521e,1 +np.float64,0x7fd70e9b782e1d36,0x408ff437936b393a,1 +np.float64,0x3fa4037bbc2806f7,0xc012b55744c65ab2,1 +np.float64,0x3fd3d4637427a8c7,0xbffb0beebf4311ef,1 +np.float64,0x7fdabbda5db577b4,0x408ff5ecbc0d4428,1 +np.float64,0x7fda9be0a2b537c0,0x408ff5dee5d03d5a,1 +np.float64,0x7fe9c74396338e86,0x408ffd813506a18a,1 +np.float64,0x3fd058243e20b048,0xbfff822ffd8a7f21,1 +np.float64,0x3fe6aa6ca9ed54d9,0xbfdfd805629ff49e,1 +np.float64,0x3fd91431d5322864,0xbff5a025eea8c78b,1 +np.float64,0x7fe4d7f02329afdf,0x408ffb0d5d9b7878,1 +np.float64,0x3fe2954a12252a94,0xbfe917266e3e22d5,1 +np.float64,0x3fb25f7c8224bef9,0xc00e6764c81b3718,1 +np.float64,0x3fda4bddeeb497bc,0xbff4880638908c81,1 +np.float64,0x55dfd12eabbfb,0xc08ffc9b54ff4002,1 +np.float64,0x3fe8f399e031e734,0xbfd6f8e5c4dcd93f,1 +np.float64,0x3fd954a24832a945,0xbff56521f4707a06,1 +np.float64,0x3fdea911f2bd5224,0xbff0fcb2d0c2b2e2,1 +np.float64,0x3fe6b4ff8a2d69ff,0xbfdfacfc85cafeab,1 +np.float64,0x3fc7fa02042ff404,0xc00354e13b0767ad,1 +np.float64,0x3fe955088c72aa11,0xbfd593130f29949e,1 +np.float64,0xd7e74ec1afcea,0xc08ff1f74f61721c,1 +np.float64,0x3fe9d69c1ab3ad38,0xbfd3bf710a337e06,1 +np.float64,0x3fd85669a2b0acd3,0xbff65176143ccc1e,1 +np.float64,0x3fea99b285353365,0xbfd11062744783f2,1 +np.float64,0x3fe2c79f80a58f3f,0xbfe89ac33f990289,1 +np.float64,0x3f8332ba30266574,0xc01af2cb7b635783,1 +np.float64,0x30d0150061a1,0xc090119030f74c5d,1 +np.float64,0x3fdbf4cb06b7e996,0xbff31e5207aaa754,1 +np.float64,0x3fe6b56c216d6ad8,0xbfdfab42fb2941c5,1 +np.float64,0x7fc4dc239829b846,0x408feb0fb0e13fbe,1 +np.float64,0x3fd0ab85ef21570c,0xbfff0d95d6c7a35c,1 +np.float64,0x7fe13d75e5e27aeb,0x408ff8dc8efa476b,1 +np.float64,0x3fece3b832f9c770,0xbfc2e21b165d583f,1 +np.float64,0x3fe3a279c4e744f4,0xbfe68ca4fbb55dbf,1 +np.float64,0x3feb64659ef6c8cb,0xbfccb6204b6bf724,1 +np.float64,0x2279a6bc44f36,0xc0900391eeeb3e7c,1 +np.float64,0xb88046d571009,0xc08ff3c7b5b45300,1 +np.float64,0x7ff4000000000000,0x7ffc000000000000,1 +np.float64,0x3fe49af059a935e1,0xbfe4526c294f248f,1 +np.float64,0xa3e5508147cc,0xc0900a92ce5924b1,1 +np.float64,0x7fc56def3d2adbdd,0x408feb5f46c360e8,1 +np.float64,0x7fd99f3574333e6a,0x408ff56f3807987c,1 +np.float64,0x3fdc38d56fb871ab,0xbff2e667cad8f36a,1 +np.float64,0xd0b03507a1607,0xc08ff25bbcf8aa9d,1 +np.float64,0xc493f9078927f,0xc08ff30c5fa4e759,1 +np.float64,0x3fc86ddbcb30dbb8,0xc0031da1fcb56d75,1 +np.float64,0x7fe75dc395aebb86,0x408ffc5eef841491,1 +np.float64,0x1647618a2c8ed,0xc0900616ef9479c1,1 +np.float64,0xdf144763be289,0xc08ff196b527f3c9,1 +np.float64,0x3fe0b29da6a1653b,0xbfee078b5f4d7744,1 +np.float64,0x3feb055852b60ab1,0xbfcf3b4db5779a7a,1 +np.float64,0x3fe8bc1625f1782c,0xbfd7c739ade904bc,1 +np.float64,0x7fd19bfb8ea337f6,0x408ff11b2b55699c,1 +np.float64,0x3fed1d80d1ba3b02,0xbfc1722e8d3ce094,1 +np.float64,0x2d9c65925b38e,0xc09001f46bcd3bc5,1 +np.float64,0x7fed6f4d857ade9a,0x408fff091cf6a3b4,1 +np.float64,0x3fd070cd6ba0e19b,0xbfff5f7609ca29e8,1 +np.float64,0x7fea3508b8f46a10,0x408ffdb1f30bd6be,1 +np.float64,0x508b897ca1172,0xc08ffd58a0eb3583,1 +np.float64,0x7feba367b07746ce,0x408ffe4f0bf4bd4e,1 +np.float64,0x3fefebd5c4bfd7ac,0xbf6d20b4fcf21b69,1 +np.float64,0x3fd8ef07b8b1de0f,0xbff5c2745c0795a5,1 +np.float64,0x3fd38ed518271daa,0xbffb5d75f00f6900,1 +np.float64,0x6de0fecedbc20,0xc08ff9c307bbc647,1 +np.float64,0xafc0ffc35f820,0xc08ff45737e5d6b4,1 +np.float64,0x7fd282097ca50412,0x408ff1ae3b27bf3b,1 +np.float64,0x3fe2f2d50b65e5aa,0xbfe831042e6a1e99,1 +np.float64,0x3faa437bac3486f7,0xc01123d8d962205a,1 +np.float64,0x3feea54434fd4a88,0xbfaff202cc456647,1 +np.float64,0x3fc9e65b8633ccb7,0xc00270e77ffd19da,1 +np.float64,0x7fee15af61fc2b5e,0x408fff49a49154a3,1 +np.float64,0x7fefe670a73fcce0,0x408ffff6c44c1005,1 +np.float64,0x3fc0832d0f21065a,0xc007a2dc2f25384a,1 +np.float64,0x3fecfc96bcb9f92d,0xbfc24367c3912620,1 +np.float64,0x3feb705682b6e0ad,0xbfcc65b1bb16f9c5,1 +np.float64,0x3fe185c4f9630b8a,0xbfebcdb401af67a4,1 +np.float64,0x3fb0a5a9f6214b54,0xc00f8ada2566a047,1 +np.float64,0x7fe2908cdda52119,0x408ff9b744861fb1,1 +np.float64,0x7fee776e183ceedb,0x408fff6ee7c2f86e,1 +np.float64,0x3fce1d608f3c3ac1,0xc000b3685d006474,1 +np.float64,0x7fecf92aa339f254,0x408ffeda6c998267,1 +np.float64,0xce13cb519c27a,0xc08ff280f02882a9,1 +np.float64,0x1,0xc090c80000000000,1 +np.float64,0x3fe485a8afa90b51,0xbfe4823265d5a50a,1 +np.float64,0x3feea60908bd4c12,0xbfafdf7ad7fe203f,1 +np.float64,0x3fd2253033a44a60,0xbffd187d0ec8d5b9,1 +np.float64,0x435338fc86a68,0xc08fff6a591059dd,1 +np.float64,0x7fce8763a73d0ec6,0x408fef74f1e715ff,1 +np.float64,0x3fbe5ddb783cbbb7,0xc0089acc5afa794b,1 +np.float64,0x7fe4cf19ada99e32,0x408ffb0877ca302b,1 +np.float64,0x3fe94c9ea1b2993d,0xbfd5b1c2e867b911,1 +np.float64,0x3fe75541c72eaa84,0xbfdd2a27aa117699,1 +np.float64,0x8000000000000001,0x7ff8000000000000,1 +np.float64,0x7fdbec7f2c37d8fd,0x408ff66d69a7f818,1 +np.float64,0x8ef10d091de22,0xc08ff6b9ca5094f8,1 +np.float64,0x3fea69025b74d205,0xbfd1b9fe2c252c70,1 +np.float64,0x562376d0ac46f,0xc08ffc924111cd31,1 +np.float64,0x8e8097ab1d013,0xc08ff6c2e2706f67,1 +np.float64,0x3fca6803ed34d008,0xc00237aef808825b,1 +np.float64,0x7fe8fe9067b1fd20,0x408ffd25f459a7d1,1 +np.float64,0x3f918e8c7f233,0xc0900009fe011d54,1 +np.float64,0x3fdfe773833fcee7,0xbff011bc1af87bb9,1 +np.float64,0xefffef6fdfffe,0xc08ff0beb0f09eb0,1 +np.float64,0x7fe64610282c8c1f,0x408ffbd17209db18,1 +np.float64,0xe66be8c1ccd7d,0xc08ff13706c056e1,1 +np.float64,0x2837e570506fd,0xc09002ae4dae0c1a,1 +np.float64,0x3febe3a081f7c741,0xbfc964171f2a5a47,1 +np.float64,0x3fe21ed09a243da1,0xbfea41342d29c3ff,1 +np.float64,0x3fe1596c8162b2d9,0xbfec431eee30823a,1 +np.float64,0x8f2b9a131e574,0xc08ff6b51104ed4e,1 +np.float64,0x3fe88ed179711da3,0xbfd870d08a4a4b0c,1 +np.float64,0x34159bc2682b4,0xc09001305a885f94,1 +np.float64,0x1ed31e543da65,0xc0900437481577f8,1 +np.float64,0x3feafbe9de75f7d4,0xbfcf7bcdbacf1c61,1 +np.float64,0xfb16fb27f62e0,0xc08ff03938e682a2,1 +np.float64,0x3fe5cd5ba7eb9ab7,0xbfe1b7165771af3c,1 +np.float64,0x7fe72905e76e520b,0x408ffc44c4e7e80c,1 +np.float64,0x7fb7136e2e2e26db,0x408fe439fd383fb7,1 +np.float64,0x8fa585e11f4c,0xc0900b55a08a486b,1 +np.float64,0x7fed985ce47b30b9,0x408fff192b596821,1 +np.float64,0x3feaaf0869755e11,0xbfd0c671571b3764,1 +np.float64,0x3fa40fd4ec281faa,0xc012b1c8dc0b9e5f,1 +np.float64,0x7fda2a70993454e0,0x408ff5ad47b0c68a,1 +np.float64,0x3fe5f7e931abefd2,0xbfe15d52b3605abf,1 +np.float64,0x3fe9fc6d3533f8da,0xbfd338b06a790994,1 +np.float64,0x3fe060649420c0c9,0xbfeeed1756111891,1 +np.float64,0x3fce8435e33d086c,0xc0008c41cea9ed40,1 +np.float64,0x7ff8000000000000,0x7ff8000000000000,1 +np.float64,0x617820aec2f05,0xc08ffb251e9af0f0,1 +np.float64,0x7fcc4ab6ee38956d,0x408fee9419c8f77d,1 +np.float64,0x7fdefda2fc3dfb45,0x408ff7a15063bc05,1 +np.float64,0x7fe5138ccaaa2719,0x408ffb2e30f3a46e,1 +np.float64,0x3fe3817a836702f5,0xbfe6da7c2b25e35a,1 +np.float64,0x3fb8a7dafa314fb6,0xc00b025bc0784ebe,1 +np.float64,0x349dc420693d,0xc09011215825d2c8,1 +np.float64,0x6b0e504ad61cb,0xc08ffa0fee9c5cd6,1 +np.float64,0x273987644e732,0xc09002d34294ed79,1 +np.float64,0x3fc0bd8a6e217b15,0xc0077a5828b4d2f5,1 +np.float64,0x758b48c4eb16a,0xc08ff8fbc8fbe46a,1 +np.float64,0x3fc8a9a52631534a,0xc00301854ec0ef81,1 +np.float64,0x7fe79d29a76f3a52,0x408ffc7e1607a4c1,1 +np.float64,0x3fd7d3ebce2fa7d8,0xbff6ce8a94aebcda,1 +np.float64,0x7fd1cb68a52396d0,0x408ff13a17533b2b,1 +np.float64,0x7fda514a5d34a294,0x408ff5be5e081578,1 +np.float64,0x3fc40b4382281687,0xc0056632c8067228,1 +np.float64,0x7feff1208c3fe240,0x408ffffaa180fa0d,1 +np.float64,0x8f58739f1eb0f,0xc08ff6b17402689d,1 +np.float64,0x1fdbe9a23fb7e,0xc090040685b2d24f,1 +np.float64,0xcb1d0e87963a2,0xc08ff2abbd903b82,1 +np.float64,0x3fc45a6a1a28b4d4,0xc00538f86c4aeaee,1 +np.float64,0x3fe61885b1ac310b,0xbfe118fd2251d2ec,1 +np.float64,0x3fedf584c8fbeb0a,0xbfb8572433ff67a9,1 +np.float64,0x7fb0bddd1a217bb9,0x408fe085e0d621db,1 +np.float64,0x72d8d3e0e5b3,0xc0900ca02f68c7a1,1 +np.float64,0x5cca6ff6b994f,0xc08ffbb6751fda01,1 +np.float64,0x7fe3197839a632ef,0x408ffa0b2fccfb68,1 +np.float64,0x3fcce4d9c139c9b4,0xc0012dae05baa91b,1 +np.float64,0x3fe76d00f62eda02,0xbfdccc5f12799be1,1 +np.float64,0x3fc53c22f72a7846,0xc004bbaa9cbc7958,1 +np.float64,0x7fdda02f1ebb405d,0x408ff71c37c71659,1 +np.float64,0x3fe0844eaba1089d,0xbfee884722762583,1 +np.float64,0x3febb438dc776872,0xbfca9f05e1c691f1,1 +np.float64,0x3fdf4170cdbe82e2,0xbff08b1561c8d848,1 +np.float64,0x3fce1b8d6f3c371b,0xc000b41b69507671,1 +np.float64,0x8370e60706e1d,0xc08ff7b19ea0b4ca,1 +np.float64,0x7fa5bf92382b7f23,0x408fdb8aebb3df87,1 +np.float64,0x7fe4a59979a94b32,0x408ffaf15c1358cd,1 +np.float64,0x3faa66086034cc11,0xc0111c466b7835d6,1 +np.float64,0x7fb7a958262f52af,0x408fe48408b1e093,1 +np.float64,0x3fdaacc5f635598c,0xbff43390d06b5614,1 +np.float64,0x3fd2825b9e2504b7,0xbffca3234264f109,1 +np.float64,0x3fcede160a3dbc2c,0xc0006a759e29060c,1 +np.float64,0x7fd3b19603a7632b,0x408ff265b528371c,1 +np.float64,0x7fcf8a86ea3f150d,0x408fefd552e7f3b2,1 +np.float64,0xedbcc0f7db798,0xc08ff0daad12096b,1 +np.float64,0xf1e1683de3c2d,0xc08ff0a7a0a37e00,1 +np.float64,0xb6ebd9bf6dd7b,0xc08ff3e11e28378d,1 +np.float64,0x3fec8090d6f90122,0xbfc56031b72194cc,1 +np.float64,0x3fd3e10e37a7c21c,0xbffafd34a3ebc933,1 +np.float64,0x7fbb1c96aa36392c,0x408fe616347b3342,1 +np.float64,0x3fe2f3996f25e733,0xbfe82f25bc5d1bbd,1 +np.float64,0x7fe8709da870e13a,0x408ffce3ab6ce59a,1 +np.float64,0x7fea3233d1b46467,0x408ffdb0b3bbc6de,1 +np.float64,0x65fa4112cbf49,0xc08ffa9f85eb72b9,1 +np.float64,0x3fca2cae9f34595d,0xc00251bb275afb87,1 +np.float64,0x8135fd9f026c0,0xc08ff7e42e14dce7,1 +np.float64,0x7fe0a6f057e14de0,0x408ff876081a4bfe,1 +np.float64,0x10000000000000,0xc08ff00000000000,1 +np.float64,0x3fe1fd506263faa1,0xbfea96dd8c543b72,1 +np.float64,0xa5532c554aa66,0xc08ff50bf5bfc66d,1 +np.float64,0xc239d00b8473a,0xc08ff32ff0ea3f92,1 +np.float64,0x7fdb5314e336a629,0x408ff62d4ff60d82,1 +np.float64,0x3fe5f506e2abea0e,0xbfe16362a4682120,1 +np.float64,0x3fa20c60202418c0,0xc0134e08d82608b6,1 +np.float64,0x7fe03864b22070c8,0x408ff82866d65e9a,1 +np.float64,0x3fe72cf5656e59eb,0xbfddca298969effa,1 +np.float64,0x5c295386b852b,0xc08ffbca90b136c9,1 +np.float64,0x7fd71e5020ae3c9f,0x408ff43f6d58eb7c,1 +np.float64,0x3fd1905a842320b5,0xbffdd8ecd288159c,1 +np.float64,0x3fe6bddb256d7bb6,0xbfdf88fee1a820bb,1 +np.float64,0xe061b967c0c37,0xc08ff18581951561,1 +np.float64,0x3fe534f65cea69ed,0xbfe2fe45fe7d3040,1 +np.float64,0xdc7dae07b8fb6,0xc08ff1b93074ea76,1 +np.float64,0x3fd0425082a084a1,0xbfffa11838b21633,1 +np.float64,0xba723fc974e48,0xc08ff3a8b8d01c58,1 +np.float64,0x3fce42ffc73c8600,0xc000a5062678406e,1 +np.float64,0x3f2e6d3c7e5ce,0xc090001304cfd1c7,1 +np.float64,0x3fd4b2e5f7a965cc,0xbffa0e6e6bae0a68,1 +np.float64,0x3fe6db1d18edb63a,0xbfdf128158ee92d9,1 +np.float64,0x7fe4e5792f29caf1,0x408ffb14d9dbf133,1 +np.float64,0x3fc11cdf992239bf,0xc00739569619cd77,1 +np.float64,0x3fc05ea11220bd42,0xc007bc841b48a890,1 +np.float64,0x4bd592d497ab3,0xc08ffe0ab1c962e2,1 +np.float64,0x280068fc5000e,0xc09002b64955e865,1 +np.float64,0x7fe2f2637065e4c6,0x408ff9f379c1253a,1 +np.float64,0x3fefc38467ff8709,0xbf85e53e64b9a424,1 +np.float64,0x2d78ec5a5af1e,0xc09001f8ea8601e0,1 +np.float64,0x7feeef2b957dde56,0x408fff9bebe995f7,1 +np.float64,0x2639baf44c738,0xc09002f9618d623b,1 +np.float64,0x3fc562964d2ac52d,0xc004a6d76959ef78,1 +np.float64,0x3fe21b071fe4360e,0xbfea4adb2cd96ade,1 +np.float64,0x7fe56aa6802ad54c,0x408ffb5d81d1a898,1 +np.float64,0x4296b452852d7,0xc08fff8ad7fbcbe1,1 +np.float64,0x7fe3fac4ff27f589,0x408ffa9049eec479,1 +np.float64,0x7fe7a83e6caf507c,0x408ffc837f436604,1 +np.float64,0x3fc4ac5b872958b7,0xc0050add72381ac3,1 +np.float64,0x3fd6d697c02dad30,0xbff7c931a3eefb01,1 +np.float64,0x3f61e391c023c724,0xc021ad91e754f94b,1 +np.float64,0x10817f9c21031,0xc09007d20434d7bc,1 +np.float64,0x3fdb9c4c4cb73899,0xbff367d8615c5ece,1 +np.float64,0x3fe26ead6b64dd5b,0xbfe977771def5989,1 +np.float64,0x3fc43ea5c3287d4c,0xc00548c2163ae631,1 +np.float64,0x3fe05bd8bba0b7b1,0xbfeef9ea0db91abc,1 +np.float64,0x3feac78369358f07,0xbfd071e2b0aeab39,1 +np.float64,0x7fe254922ca4a923,0x408ff991bdd4e5d3,1 +np.float64,0x3fe5a2f5842b45eb,0xbfe21135c9a71666,1 +np.float64,0x3fd5daf98c2bb5f3,0xbff8cd24f7c07003,1 +np.float64,0x3fcb2a1384365427,0xc001e40f0d04299a,1 +np.float64,0x3fe073974360e72f,0xbfeeb7183a9930b7,1 +np.float64,0xcf3440819e688,0xc08ff270d3a71001,1 +np.float64,0x3fd35656cda6acae,0xbffba083fba4939d,1 +np.float64,0x7fe6c59b4ded8b36,0x408ffc12ce725425,1 +np.float64,0x3fba896f943512df,0xc00a291cb6947701,1 +np.float64,0x7fe54917e86a922f,0x408ffb4b5e0fb848,1 +np.float64,0x7fed2a3f51ba547e,0x408ffeede945a948,1 +np.float64,0x3fdc72bd5038e57b,0xbff2b73b7e93e209,1 +np.float64,0x7fefdb3f9f3fb67e,0x408ffff2b702a768,1 +np.float64,0x3fb0184430203088,0xc00fee8c1351763c,1 +np.float64,0x7d6c3668fad87,0xc08ff83c195f2cca,1 +np.float64,0x3fd5aa254aab544b,0xbff900f16365991b,1 +np.float64,0x3f963daab02c7b55,0xc0161974495b1b71,1 +np.float64,0x3fa7a9c5982f538b,0xc011bde0f6052a89,1 +np.float64,0xb3a5a74b674b5,0xc08ff4167bc97c81,1 +np.float64,0x7fad0c14503a1828,0x408fdee1f2d56cd7,1 +np.float64,0x43e0e9d887c1e,0xc08fff522837b13b,1 +np.float64,0x3fe513b20aea2764,0xbfe346ea994100e6,1 +np.float64,0x7fe4e10393e9c206,0x408ffb12630f6a06,1 +np.float64,0x68b286e2d1651,0xc08ffa51c0d795d4,1 +np.float64,0x7fe8de453331bc89,0x408ffd17012b75ac,1 +np.float64,0x1b3d77d4367b0,0xc09004edea60aa36,1 +np.float64,0x3fd351cbc326a398,0xbffba5f0f4d5fdba,1 +np.float64,0x3fd264951b24c92a,0xbffcc8636788b9bf,1 +np.float64,0xd2465761a48cb,0xc08ff2455c9c53e5,1 +np.float64,0x7fe46a0ef028d41d,0x408ffacfe32c6f5d,1 +np.float64,0x3fafd8ac4c3fb159,0xc010071bf33195d0,1 +np.float64,0x902aec5d2055e,0xc08ff6a08e28aabc,1 +np.float64,0x3fcea61bb03d4c37,0xc0007f76e509b657,1 +np.float64,0x7fe8d90f9571b21e,0x408ffd1495f952e7,1 +np.float64,0x7fa650c9442ca192,0x408fdbd6ff22fdd8,1 +np.float64,0x3fe8ecfdf171d9fc,0xbfd7115df40e8580,1 +np.float64,0x7fd4e6fe7f29cdfc,0x408ff315b0dae183,1 +np.float64,0x77df4c52efbea,0xc08ff8c1d5c1df33,1 +np.float64,0xe200b0cfc4016,0xc08ff1703cfb8e79,1 +np.float64,0x3fe230ea7e2461d5,0xbfea132d2385160e,1 +np.float64,0x7fd1f7ced723ef9d,0x408ff156bfbf92a4,1 +np.float64,0x3fea762818f4ec50,0xbfd18c12a88e5f79,1 +np.float64,0x7feea4ba7c7d4974,0x408fff8004164054,1 +np.float64,0x833ec605067d9,0xc08ff7b606383841,1 +np.float64,0x7fd0c2d7fea185af,0x408ff0894f3a0cf4,1 +np.float64,0x3fe1d7d61d23afac,0xbfeaf76fee875d3e,1 +np.float64,0x65adecb0cb5be,0xc08ffaa82cb09d68,1 diff --git a/local_packages/numpy/_core/tests/data/umath-validation-set-sin.csv b/local_packages/numpy/_core/tests/data/umath-validation-set-sin.csv new file mode 100644 index 0000000..03e76ff --- /dev/null +++ b/local_packages/numpy/_core/tests/data/umath-validation-set-sin.csv @@ -0,0 +1,1370 @@ +dtype,input,output,ulperrortol +## +ve denormals ## +np.float32,0x004b4716,0x004b4716,2 +np.float32,0x007b2490,0x007b2490,2 +np.float32,0x007c99fa,0x007c99fa,2 +np.float32,0x00734a0c,0x00734a0c,2 +np.float32,0x0070de24,0x0070de24,2 +np.float32,0x007fffff,0x007fffff,2 +np.float32,0x00000001,0x00000001,2 +## -ve denormals ## +np.float32,0x80495d65,0x80495d65,2 +np.float32,0x806894f6,0x806894f6,2 +np.float32,0x80555a76,0x80555a76,2 +np.float32,0x804e1fb8,0x804e1fb8,2 +np.float32,0x80687de9,0x80687de9,2 +np.float32,0x807fffff,0x807fffff,2 +np.float32,0x80000001,0x80000001,2 +## +/-0.0f, +/-FLT_MIN +/-FLT_MAX ## +np.float32,0x00000000,0x00000000,2 +np.float32,0x80000000,0x80000000,2 +np.float32,0x00800000,0x00800000,2 +np.float32,0x80800000,0x80800000,2 +## 1.00f ## +np.float32,0x3f800000,0x3f576aa4,2 +np.float32,0x3f800001,0x3f576aa6,2 +np.float32,0x3f800002,0x3f576aa7,2 +np.float32,0xc090a8b0,0x3f7b4e48,2 +np.float32,0x41ce3184,0x3f192d43,2 +np.float32,0xc1d85848,0xbf7161cb,2 +np.float32,0x402b8820,0x3ee3f29f,2 +np.float32,0x42b4e454,0x3f1d0151,2 +np.float32,0x42a67a60,0x3f7ffa4c,2 +np.float32,0x41d92388,0x3f67beef,2 +np.float32,0x422dd66c,0xbeffb0c1,2 +np.float32,0xc28f5be6,0xbf0bae79,2 +np.float32,0x41ab2674,0x3f0ffe2b,2 +np.float32,0x3f490fdb,0x3f3504f3,2 +np.float32,0xbf490fdb,0xbf3504f3,2 +np.float32,0x3fc90fdb,0x3f800000,2 +np.float32,0xbfc90fdb,0xbf800000,2 +np.float32,0x40490fdb,0xb3bbbd2e,2 +np.float32,0xc0490fdb,0x33bbbd2e,2 +np.float32,0x3fc90fdb,0x3f800000,2 +np.float32,0xbfc90fdb,0xbf800000,2 +np.float32,0x40490fdb,0xb3bbbd2e,2 +np.float32,0xc0490fdb,0x33bbbd2e,2 +np.float32,0x40c90fdb,0x343bbd2e,2 +np.float32,0xc0c90fdb,0xb43bbd2e,2 +np.float32,0x4016cbe4,0x3f3504f3,2 +np.float32,0xc016cbe4,0xbf3504f3,2 +np.float32,0x4096cbe4,0xbf800000,2 +np.float32,0xc096cbe4,0x3f800000,2 +np.float32,0x4116cbe4,0xb2ccde2e,2 +np.float32,0xc116cbe4,0x32ccde2e,2 +np.float32,0x40490fdb,0xb3bbbd2e,2 +np.float32,0xc0490fdb,0x33bbbd2e,2 +np.float32,0x40c90fdb,0x343bbd2e,2 +np.float32,0xc0c90fdb,0xb43bbd2e,2 +np.float32,0x41490fdb,0x34bbbd2e,2 +np.float32,0xc1490fdb,0xb4bbbd2e,2 +np.float32,0x407b53d2,0xbf3504f5,2 +np.float32,0xc07b53d2,0x3f3504f5,2 +np.float32,0x40fb53d2,0x3f800000,2 +np.float32,0xc0fb53d2,0xbf800000,2 +np.float32,0x417b53d2,0xb535563d,2 +np.float32,0xc17b53d2,0x3535563d,2 +np.float32,0x4096cbe4,0xbf800000,2 +np.float32,0xc096cbe4,0x3f800000,2 +np.float32,0x4116cbe4,0xb2ccde2e,2 +np.float32,0xc116cbe4,0x32ccde2e,2 +np.float32,0x4196cbe4,0x334cde2e,2 +np.float32,0xc196cbe4,0xb34cde2e,2 +np.float32,0x40afede0,0xbf3504ef,2 +np.float32,0xc0afede0,0x3f3504ef,2 +np.float32,0x412fede0,0xbf800000,2 +np.float32,0xc12fede0,0x3f800000,2 +np.float32,0x41afede0,0xb5b222c4,2 +np.float32,0xc1afede0,0x35b222c4,2 +np.float32,0x40c90fdb,0x343bbd2e,2 +np.float32,0xc0c90fdb,0xb43bbd2e,2 +np.float32,0x41490fdb,0x34bbbd2e,2 +np.float32,0xc1490fdb,0xb4bbbd2e,2 +np.float32,0x41c90fdb,0x353bbd2e,2 +np.float32,0xc1c90fdb,0xb53bbd2e,2 +np.float32,0x40e231d6,0x3f3504f3,2 +np.float32,0xc0e231d6,0xbf3504f3,2 +np.float32,0x416231d6,0x3f800000,2 +np.float32,0xc16231d6,0xbf800000,2 +np.float32,0x41e231d6,0xb399a6a2,2 +np.float32,0xc1e231d6,0x3399a6a2,2 +np.float32,0x40fb53d2,0x3f800000,2 +np.float32,0xc0fb53d2,0xbf800000,2 +np.float32,0x417b53d2,0xb535563d,2 +np.float32,0xc17b53d2,0x3535563d,2 +np.float32,0x41fb53d2,0x35b5563d,2 +np.float32,0xc1fb53d2,0xb5b5563d,2 +np.float32,0x410a3ae7,0x3f3504eb,2 +np.float32,0xc10a3ae7,0xbf3504eb,2 +np.float32,0x418a3ae7,0xbf800000,2 +np.float32,0xc18a3ae7,0x3f800000,2 +np.float32,0x420a3ae7,0xb6308908,2 +np.float32,0xc20a3ae7,0x36308908,2 +np.float32,0x4116cbe4,0xb2ccde2e,2 +np.float32,0xc116cbe4,0x32ccde2e,2 +np.float32,0x4196cbe4,0x334cde2e,2 +np.float32,0xc196cbe4,0xb34cde2e,2 +np.float32,0x4216cbe4,0x33ccde2e,2 +np.float32,0xc216cbe4,0xb3ccde2e,2 +np.float32,0x41235ce2,0xbf3504f7,2 +np.float32,0xc1235ce2,0x3f3504f7,2 +np.float32,0x41a35ce2,0x3f800000,2 +np.float32,0xc1a35ce2,0xbf800000,2 +np.float32,0x42235ce2,0xb5b889b6,2 +np.float32,0xc2235ce2,0x35b889b6,2 +np.float32,0x412fede0,0xbf800000,2 +np.float32,0xc12fede0,0x3f800000,2 +np.float32,0x41afede0,0xb5b222c4,2 +np.float32,0xc1afede0,0x35b222c4,2 +np.float32,0x422fede0,0x363222c4,2 +np.float32,0xc22fede0,0xb63222c4,2 +np.float32,0x413c7edd,0xbf3504f3,2 +np.float32,0xc13c7edd,0x3f3504f3,2 +np.float32,0x41bc7edd,0xbf800000,2 +np.float32,0xc1bc7edd,0x3f800000,2 +np.float32,0x423c7edd,0xb4000add,2 +np.float32,0xc23c7edd,0x34000add,2 +np.float32,0x41490fdb,0x34bbbd2e,2 +np.float32,0xc1490fdb,0xb4bbbd2e,2 +np.float32,0x41c90fdb,0x353bbd2e,2 +np.float32,0xc1c90fdb,0xb53bbd2e,2 +np.float32,0x42490fdb,0x35bbbd2e,2 +np.float32,0xc2490fdb,0xb5bbbd2e,2 +np.float32,0x4155a0d9,0x3f3504fb,2 +np.float32,0xc155a0d9,0xbf3504fb,2 +np.float32,0x41d5a0d9,0x3f800000,2 +np.float32,0xc1d5a0d9,0xbf800000,2 +np.float32,0x4255a0d9,0xb633bc81,2 +np.float32,0xc255a0d9,0x3633bc81,2 +np.float32,0x416231d6,0x3f800000,2 +np.float32,0xc16231d6,0xbf800000,2 +np.float32,0x41e231d6,0xb399a6a2,2 +np.float32,0xc1e231d6,0x3399a6a2,2 +np.float32,0x426231d6,0x3419a6a2,2 +np.float32,0xc26231d6,0xb419a6a2,2 +np.float32,0x416ec2d4,0x3f3504ef,2 +np.float32,0xc16ec2d4,0xbf3504ef,2 +np.float32,0x41eec2d4,0xbf800000,2 +np.float32,0xc1eec2d4,0x3f800000,2 +np.float32,0x426ec2d4,0xb5bef0a7,2 +np.float32,0xc26ec2d4,0x35bef0a7,2 +np.float32,0x417b53d2,0xb535563d,2 +np.float32,0xc17b53d2,0x3535563d,2 +np.float32,0x41fb53d2,0x35b5563d,2 +np.float32,0xc1fb53d2,0xb5b5563d,2 +np.float32,0x427b53d2,0x3635563d,2 +np.float32,0xc27b53d2,0xb635563d,2 +np.float32,0x4183f268,0xbf3504ff,2 +np.float32,0xc183f268,0x3f3504ff,2 +np.float32,0x4203f268,0x3f800000,2 +np.float32,0xc203f268,0xbf800000,2 +np.float32,0x4283f268,0xb6859a13,2 +np.float32,0xc283f268,0x36859a13,2 +np.float32,0x418a3ae7,0xbf800000,2 +np.float32,0xc18a3ae7,0x3f800000,2 +np.float32,0x420a3ae7,0xb6308908,2 +np.float32,0xc20a3ae7,0x36308908,2 +np.float32,0x428a3ae7,0x36b08908,2 +np.float32,0xc28a3ae7,0xb6b08908,2 +np.float32,0x41908365,0xbf3504f6,2 +np.float32,0xc1908365,0x3f3504f6,2 +np.float32,0x42108365,0xbf800000,2 +np.float32,0xc2108365,0x3f800000,2 +np.float32,0x42908365,0x3592200d,2 +np.float32,0xc2908365,0xb592200d,2 +np.float32,0x4196cbe4,0x334cde2e,2 +np.float32,0xc196cbe4,0xb34cde2e,2 +np.float32,0x4216cbe4,0x33ccde2e,2 +np.float32,0xc216cbe4,0xb3ccde2e,2 +np.float32,0x4296cbe4,0x344cde2e,2 +np.float32,0xc296cbe4,0xb44cde2e,2 +np.float32,0x419d1463,0x3f3504f8,2 +np.float32,0xc19d1463,0xbf3504f8,2 +np.float32,0x421d1463,0x3f800000,2 +np.float32,0xc21d1463,0xbf800000,2 +np.float32,0x429d1463,0xb5c55799,2 +np.float32,0xc29d1463,0x35c55799,2 +np.float32,0x41a35ce2,0x3f800000,2 +np.float32,0xc1a35ce2,0xbf800000,2 +np.float32,0x42235ce2,0xb5b889b6,2 +np.float32,0xc2235ce2,0x35b889b6,2 +np.float32,0x42a35ce2,0x363889b6,2 +np.float32,0xc2a35ce2,0xb63889b6,2 +np.float32,0x41a9a561,0x3f3504e7,2 +np.float32,0xc1a9a561,0xbf3504e7,2 +np.float32,0x4229a561,0xbf800000,2 +np.float32,0xc229a561,0x3f800000,2 +np.float32,0x42a9a561,0xb68733d0,2 +np.float32,0xc2a9a561,0x368733d0,2 +np.float32,0x41afede0,0xb5b222c4,2 +np.float32,0xc1afede0,0x35b222c4,2 +np.float32,0x422fede0,0x363222c4,2 +np.float32,0xc22fede0,0xb63222c4,2 +np.float32,0x42afede0,0x36b222c4,2 +np.float32,0xc2afede0,0xb6b222c4,2 +np.float32,0x41b6365e,0xbf3504f0,2 +np.float32,0xc1b6365e,0x3f3504f0,2 +np.float32,0x4236365e,0x3f800000,2 +np.float32,0xc236365e,0xbf800000,2 +np.float32,0x42b6365e,0x358bb91c,2 +np.float32,0xc2b6365e,0xb58bb91c,2 +np.float32,0x41bc7edd,0xbf800000,2 +np.float32,0xc1bc7edd,0x3f800000,2 +np.float32,0x423c7edd,0xb4000add,2 +np.float32,0xc23c7edd,0x34000add,2 +np.float32,0x42bc7edd,0x34800add,2 +np.float32,0xc2bc7edd,0xb4800add,2 +np.float32,0x41c2c75c,0xbf3504ef,2 +np.float32,0xc1c2c75c,0x3f3504ef,2 +np.float32,0x4242c75c,0xbf800000,2 +np.float32,0xc242c75c,0x3f800000,2 +np.float32,0x42c2c75c,0xb5cbbe8a,2 +np.float32,0xc2c2c75c,0x35cbbe8a,2 +np.float32,0x41c90fdb,0x353bbd2e,2 +np.float32,0xc1c90fdb,0xb53bbd2e,2 +np.float32,0x42490fdb,0x35bbbd2e,2 +np.float32,0xc2490fdb,0xb5bbbd2e,2 +np.float32,0x42c90fdb,0x363bbd2e,2 +np.float32,0xc2c90fdb,0xb63bbd2e,2 +np.float32,0x41cf585a,0x3f3504ff,2 +np.float32,0xc1cf585a,0xbf3504ff,2 +np.float32,0x424f585a,0x3f800000,2 +np.float32,0xc24f585a,0xbf800000,2 +np.float32,0x42cf585a,0xb688cd8c,2 +np.float32,0xc2cf585a,0x3688cd8c,2 +np.float32,0x41d5a0d9,0x3f800000,2 +np.float32,0xc1d5a0d9,0xbf800000,2 +np.float32,0x4255a0d9,0xb633bc81,2 +np.float32,0xc255a0d9,0x3633bc81,2 +np.float32,0x42d5a0d9,0x36b3bc81,2 +np.float32,0xc2d5a0d9,0xb6b3bc81,2 +np.float32,0x41dbe958,0x3f3504e0,2 +np.float32,0xc1dbe958,0xbf3504e0,2 +np.float32,0x425be958,0xbf800000,2 +np.float32,0xc25be958,0x3f800000,2 +np.float32,0x42dbe958,0xb6deab75,2 +np.float32,0xc2dbe958,0x36deab75,2 +np.float32,0x41e231d6,0xb399a6a2,2 +np.float32,0xc1e231d6,0x3399a6a2,2 +np.float32,0x426231d6,0x3419a6a2,2 +np.float32,0xc26231d6,0xb419a6a2,2 +np.float32,0x42e231d6,0x3499a6a2,2 +np.float32,0xc2e231d6,0xb499a6a2,2 +np.float32,0x41e87a55,0xbf3504f8,2 +np.float32,0xc1e87a55,0x3f3504f8,2 +np.float32,0x42687a55,0x3f800000,2 +np.float32,0xc2687a55,0xbf800000,2 +np.float32,0x42e87a55,0xb5d2257b,2 +np.float32,0xc2e87a55,0x35d2257b,2 +np.float32,0x41eec2d4,0xbf800000,2 +np.float32,0xc1eec2d4,0x3f800000,2 +np.float32,0x426ec2d4,0xb5bef0a7,2 +np.float32,0xc26ec2d4,0x35bef0a7,2 +np.float32,0x42eec2d4,0x363ef0a7,2 +np.float32,0xc2eec2d4,0xb63ef0a7,2 +np.float32,0x41f50b53,0xbf3504e7,2 +np.float32,0xc1f50b53,0x3f3504e7,2 +np.float32,0x42750b53,0xbf800000,2 +np.float32,0xc2750b53,0x3f800000,2 +np.float32,0x42f50b53,0xb68a6748,2 +np.float32,0xc2f50b53,0x368a6748,2 +np.float32,0x41fb53d2,0x35b5563d,2 +np.float32,0xc1fb53d2,0xb5b5563d,2 +np.float32,0x427b53d2,0x3635563d,2 +np.float32,0xc27b53d2,0xb635563d,2 +np.float32,0x42fb53d2,0x36b5563d,2 +np.float32,0xc2fb53d2,0xb6b5563d,2 +np.float32,0x4200ce28,0x3f3504f0,2 +np.float32,0xc200ce28,0xbf3504f0,2 +np.float32,0x4280ce28,0x3f800000,2 +np.float32,0xc280ce28,0xbf800000,2 +np.float32,0x4300ce28,0x357dd672,2 +np.float32,0xc300ce28,0xb57dd672,2 +np.float32,0x4203f268,0x3f800000,2 +np.float32,0xc203f268,0xbf800000,2 +np.float32,0x4283f268,0xb6859a13,2 +np.float32,0xc283f268,0x36859a13,2 +np.float32,0x4303f268,0x37059a13,2 +np.float32,0xc303f268,0xb7059a13,2 +np.float32,0x420716a7,0x3f3504ee,2 +np.float32,0xc20716a7,0xbf3504ee,2 +np.float32,0x428716a7,0xbf800000,2 +np.float32,0xc28716a7,0x3f800000,2 +np.float32,0x430716a7,0xb5d88c6d,2 +np.float32,0xc30716a7,0x35d88c6d,2 +np.float32,0x420a3ae7,0xb6308908,2 +np.float32,0xc20a3ae7,0x36308908,2 +np.float32,0x428a3ae7,0x36b08908,2 +np.float32,0xc28a3ae7,0xb6b08908,2 +np.float32,0x430a3ae7,0x37308908,2 +np.float32,0xc30a3ae7,0xb7308908,2 +np.float32,0x420d5f26,0xbf350500,2 +np.float32,0xc20d5f26,0x3f350500,2 +np.float32,0x428d5f26,0x3f800000,2 +np.float32,0xc28d5f26,0xbf800000,2 +np.float32,0x430d5f26,0xb68c0105,2 +np.float32,0xc30d5f26,0x368c0105,2 +np.float32,0x42108365,0xbf800000,2 +np.float32,0xc2108365,0x3f800000,2 +np.float32,0x42908365,0x3592200d,2 +np.float32,0xc2908365,0xb592200d,2 +np.float32,0x43108365,0xb612200d,2 +np.float32,0xc3108365,0x3612200d,2 +np.float32,0x4213a7a5,0xbf3504df,2 +np.float32,0xc213a7a5,0x3f3504df,2 +np.float32,0x4293a7a5,0xbf800000,2 +np.float32,0xc293a7a5,0x3f800000,2 +np.float32,0x4313a7a5,0xb6e1deee,2 +np.float32,0xc313a7a5,0x36e1deee,2 +np.float32,0x4216cbe4,0x33ccde2e,2 +np.float32,0xc216cbe4,0xb3ccde2e,2 +np.float32,0x4296cbe4,0x344cde2e,2 +np.float32,0xc296cbe4,0xb44cde2e,2 +np.float32,0x4316cbe4,0x34ccde2e,2 +np.float32,0xc316cbe4,0xb4ccde2e,2 +np.float32,0x4219f024,0x3f35050f,2 +np.float32,0xc219f024,0xbf35050f,2 +np.float32,0x4299f024,0x3f800000,2 +np.float32,0xc299f024,0xbf800000,2 +np.float32,0x4319f024,0xb71bde6c,2 +np.float32,0xc319f024,0x371bde6c,2 +np.float32,0x421d1463,0x3f800000,2 +np.float32,0xc21d1463,0xbf800000,2 +np.float32,0x429d1463,0xb5c55799,2 +np.float32,0xc29d1463,0x35c55799,2 +np.float32,0x431d1463,0x36455799,2 +np.float32,0xc31d1463,0xb6455799,2 +np.float32,0x422038a3,0x3f3504d0,2 +np.float32,0xc22038a3,0xbf3504d0,2 +np.float32,0x42a038a3,0xbf800000,2 +np.float32,0xc2a038a3,0x3f800000,2 +np.float32,0x432038a3,0xb746cd61,2 +np.float32,0xc32038a3,0x3746cd61,2 +np.float32,0x42235ce2,0xb5b889b6,2 +np.float32,0xc2235ce2,0x35b889b6,2 +np.float32,0x42a35ce2,0x363889b6,2 +np.float32,0xc2a35ce2,0xb63889b6,2 +np.float32,0x43235ce2,0x36b889b6,2 +np.float32,0xc3235ce2,0xb6b889b6,2 +np.float32,0x42268121,0xbf3504f1,2 +np.float32,0xc2268121,0x3f3504f1,2 +np.float32,0x42a68121,0x3f800000,2 +np.float32,0xc2a68121,0xbf800000,2 +np.float32,0x43268121,0x35643aac,2 +np.float32,0xc3268121,0xb5643aac,2 +np.float32,0x4229a561,0xbf800000,2 +np.float32,0xc229a561,0x3f800000,2 +np.float32,0x42a9a561,0xb68733d0,2 +np.float32,0xc2a9a561,0x368733d0,2 +np.float32,0x4329a561,0x370733d0,2 +np.float32,0xc329a561,0xb70733d0,2 +np.float32,0x422cc9a0,0xbf3504ee,2 +np.float32,0xc22cc9a0,0x3f3504ee,2 +np.float32,0x42acc9a0,0xbf800000,2 +np.float32,0xc2acc9a0,0x3f800000,2 +np.float32,0x432cc9a0,0xb5e55a50,2 +np.float32,0xc32cc9a0,0x35e55a50,2 +np.float32,0x422fede0,0x363222c4,2 +np.float32,0xc22fede0,0xb63222c4,2 +np.float32,0x42afede0,0x36b222c4,2 +np.float32,0xc2afede0,0xb6b222c4,2 +np.float32,0x432fede0,0x373222c4,2 +np.float32,0xc32fede0,0xb73222c4,2 +np.float32,0x4233121f,0x3f350500,2 +np.float32,0xc233121f,0xbf350500,2 +np.float32,0x42b3121f,0x3f800000,2 +np.float32,0xc2b3121f,0xbf800000,2 +np.float32,0x4333121f,0xb68f347d,2 +np.float32,0xc333121f,0x368f347d,2 +np.float32,0x4236365e,0x3f800000,2 +np.float32,0xc236365e,0xbf800000,2 +np.float32,0x42b6365e,0x358bb91c,2 +np.float32,0xc2b6365e,0xb58bb91c,2 +np.float32,0x4336365e,0xb60bb91c,2 +np.float32,0xc336365e,0x360bb91c,2 +np.float32,0x42395a9e,0x3f3504df,2 +np.float32,0xc2395a9e,0xbf3504df,2 +np.float32,0x42b95a9e,0xbf800000,2 +np.float32,0xc2b95a9e,0x3f800000,2 +np.float32,0x43395a9e,0xb6e51267,2 +np.float32,0xc3395a9e,0x36e51267,2 +np.float32,0x423c7edd,0xb4000add,2 +np.float32,0xc23c7edd,0x34000add,2 +np.float32,0x42bc7edd,0x34800add,2 +np.float32,0xc2bc7edd,0xb4800add,2 +np.float32,0x433c7edd,0x35000add,2 +np.float32,0xc33c7edd,0xb5000add,2 +np.float32,0x423fa31d,0xbf35050f,2 +np.float32,0xc23fa31d,0x3f35050f,2 +np.float32,0x42bfa31d,0x3f800000,2 +np.float32,0xc2bfa31d,0xbf800000,2 +np.float32,0x433fa31d,0xb71d7828,2 +np.float32,0xc33fa31d,0x371d7828,2 +np.float32,0x4242c75c,0xbf800000,2 +np.float32,0xc242c75c,0x3f800000,2 +np.float32,0x42c2c75c,0xb5cbbe8a,2 +np.float32,0xc2c2c75c,0x35cbbe8a,2 +np.float32,0x4342c75c,0x364bbe8a,2 +np.float32,0xc342c75c,0xb64bbe8a,2 +np.float32,0x4245eb9c,0xbf3504d0,2 +np.float32,0xc245eb9c,0x3f3504d0,2 +np.float32,0x42c5eb9c,0xbf800000,2 +np.float32,0xc2c5eb9c,0x3f800000,2 +np.float32,0x4345eb9c,0xb748671d,2 +np.float32,0xc345eb9c,0x3748671d,2 +np.float32,0x42490fdb,0x35bbbd2e,2 +np.float32,0xc2490fdb,0xb5bbbd2e,2 +np.float32,0x42c90fdb,0x363bbd2e,2 +np.float32,0xc2c90fdb,0xb63bbd2e,2 +np.float32,0x43490fdb,0x36bbbd2e,2 +np.float32,0xc3490fdb,0xb6bbbd2e,2 +np.float32,0x424c341a,0x3f3504f1,2 +np.float32,0xc24c341a,0xbf3504f1,2 +np.float32,0x42cc341a,0x3f800000,2 +np.float32,0xc2cc341a,0xbf800000,2 +np.float32,0x434c341a,0x354a9ee6,2 +np.float32,0xc34c341a,0xb54a9ee6,2 +np.float32,0x424f585a,0x3f800000,2 +np.float32,0xc24f585a,0xbf800000,2 +np.float32,0x42cf585a,0xb688cd8c,2 +np.float32,0xc2cf585a,0x3688cd8c,2 +np.float32,0x434f585a,0x3708cd8c,2 +np.float32,0xc34f585a,0xb708cd8c,2 +np.float32,0x42527c99,0x3f3504ee,2 +np.float32,0xc2527c99,0xbf3504ee,2 +np.float32,0x42d27c99,0xbf800000,2 +np.float32,0xc2d27c99,0x3f800000,2 +np.float32,0x43527c99,0xb5f22833,2 +np.float32,0xc3527c99,0x35f22833,2 +np.float32,0x4255a0d9,0xb633bc81,2 +np.float32,0xc255a0d9,0x3633bc81,2 +np.float32,0x42d5a0d9,0x36b3bc81,2 +np.float32,0xc2d5a0d9,0xb6b3bc81,2 +np.float32,0x4355a0d9,0x3733bc81,2 +np.float32,0xc355a0d9,0xb733bc81,2 +np.float32,0x4258c518,0xbf350500,2 +np.float32,0xc258c518,0x3f350500,2 +np.float32,0x42d8c518,0x3f800000,2 +np.float32,0xc2d8c518,0xbf800000,2 +np.float32,0x4358c518,0xb69267f6,2 +np.float32,0xc358c518,0x369267f6,2 +np.float32,0x425be958,0xbf800000,2 +np.float32,0xc25be958,0x3f800000,2 +np.float32,0x42dbe958,0xb6deab75,2 +np.float32,0xc2dbe958,0x36deab75,2 +np.float32,0x435be958,0x375eab75,2 +np.float32,0xc35be958,0xb75eab75,2 +np.float32,0x425f0d97,0xbf3504df,2 +np.float32,0xc25f0d97,0x3f3504df,2 +np.float32,0x42df0d97,0xbf800000,2 +np.float32,0xc2df0d97,0x3f800000,2 +np.float32,0x435f0d97,0xb6e845e0,2 +np.float32,0xc35f0d97,0x36e845e0,2 +np.float32,0x426231d6,0x3419a6a2,2 +np.float32,0xc26231d6,0xb419a6a2,2 +np.float32,0x42e231d6,0x3499a6a2,2 +np.float32,0xc2e231d6,0xb499a6a2,2 +np.float32,0x436231d6,0x3519a6a2,2 +np.float32,0xc36231d6,0xb519a6a2,2 +np.float32,0x42655616,0x3f35050f,2 +np.float32,0xc2655616,0xbf35050f,2 +np.float32,0x42e55616,0x3f800000,2 +np.float32,0xc2e55616,0xbf800000,2 +np.float32,0x43655616,0xb71f11e5,2 +np.float32,0xc3655616,0x371f11e5,2 +np.float32,0x42687a55,0x3f800000,2 +np.float32,0xc2687a55,0xbf800000,2 +np.float32,0x42e87a55,0xb5d2257b,2 +np.float32,0xc2e87a55,0x35d2257b,2 +np.float32,0x43687a55,0x3652257b,2 +np.float32,0xc3687a55,0xb652257b,2 +np.float32,0x426b9e95,0x3f3504cf,2 +np.float32,0xc26b9e95,0xbf3504cf,2 +np.float32,0x42eb9e95,0xbf800000,2 +np.float32,0xc2eb9e95,0x3f800000,2 +np.float32,0x436b9e95,0xb74a00d9,2 +np.float32,0xc36b9e95,0x374a00d9,2 +np.float32,0x426ec2d4,0xb5bef0a7,2 +np.float32,0xc26ec2d4,0x35bef0a7,2 +np.float32,0x42eec2d4,0x363ef0a7,2 +np.float32,0xc2eec2d4,0xb63ef0a7,2 +np.float32,0x436ec2d4,0x36bef0a7,2 +np.float32,0xc36ec2d4,0xb6bef0a7,2 +np.float32,0x4271e713,0xbf3504f1,2 +np.float32,0xc271e713,0x3f3504f1,2 +np.float32,0x42f1e713,0x3f800000,2 +np.float32,0xc2f1e713,0xbf800000,2 +np.float32,0x4371e713,0x35310321,2 +np.float32,0xc371e713,0xb5310321,2 +np.float32,0x42750b53,0xbf800000,2 +np.float32,0xc2750b53,0x3f800000,2 +np.float32,0x42f50b53,0xb68a6748,2 +np.float32,0xc2f50b53,0x368a6748,2 +np.float32,0x43750b53,0x370a6748,2 +np.float32,0xc3750b53,0xb70a6748,2 +np.float32,0x42782f92,0xbf3504ee,2 +np.float32,0xc2782f92,0x3f3504ee,2 +np.float32,0x42f82f92,0xbf800000,2 +np.float32,0xc2f82f92,0x3f800000,2 +np.float32,0x43782f92,0xb5fef616,2 +np.float32,0xc3782f92,0x35fef616,2 +np.float32,0x427b53d2,0x3635563d,2 +np.float32,0xc27b53d2,0xb635563d,2 +np.float32,0x42fb53d2,0x36b5563d,2 +np.float32,0xc2fb53d2,0xb6b5563d,2 +np.float32,0x437b53d2,0x3735563d,2 +np.float32,0xc37b53d2,0xb735563d,2 +np.float32,0x427e7811,0x3f350500,2 +np.float32,0xc27e7811,0xbf350500,2 +np.float32,0x42fe7811,0x3f800000,2 +np.float32,0xc2fe7811,0xbf800000,2 +np.float32,0x437e7811,0xb6959b6f,2 +np.float32,0xc37e7811,0x36959b6f,2 +np.float32,0x4280ce28,0x3f800000,2 +np.float32,0xc280ce28,0xbf800000,2 +np.float32,0x4300ce28,0x357dd672,2 +np.float32,0xc300ce28,0xb57dd672,2 +np.float32,0x4380ce28,0xb5fdd672,2 +np.float32,0xc380ce28,0x35fdd672,2 +np.float32,0x42826048,0x3f3504de,2 +np.float32,0xc2826048,0xbf3504de,2 +np.float32,0x43026048,0xbf800000,2 +np.float32,0xc3026048,0x3f800000,2 +np.float32,0x43826048,0xb6eb7958,2 +np.float32,0xc3826048,0x36eb7958,2 +np.float32,0x4283f268,0xb6859a13,2 +np.float32,0xc283f268,0x36859a13,2 +np.float32,0x4303f268,0x37059a13,2 +np.float32,0xc303f268,0xb7059a13,2 +np.float32,0x4383f268,0x37859a13,2 +np.float32,0xc383f268,0xb7859a13,2 +np.float32,0x42858487,0xbf3504e2,2 +np.float32,0xc2858487,0x3f3504e2,2 +np.float32,0x43058487,0x3f800000,2 +np.float32,0xc3058487,0xbf800000,2 +np.float32,0x43858487,0x36bea8be,2 +np.float32,0xc3858487,0xb6bea8be,2 +np.float32,0x428716a7,0xbf800000,2 +np.float32,0xc28716a7,0x3f800000,2 +np.float32,0x430716a7,0xb5d88c6d,2 +np.float32,0xc30716a7,0x35d88c6d,2 +np.float32,0x438716a7,0x36588c6d,2 +np.float32,0xc38716a7,0xb6588c6d,2 +np.float32,0x4288a8c7,0xbf3504cf,2 +np.float32,0xc288a8c7,0x3f3504cf,2 +np.float32,0x4308a8c7,0xbf800000,2 +np.float32,0xc308a8c7,0x3f800000,2 +np.float32,0x4388a8c7,0xb74b9a96,2 +np.float32,0xc388a8c7,0x374b9a96,2 +np.float32,0x428a3ae7,0x36b08908,2 +np.float32,0xc28a3ae7,0xb6b08908,2 +np.float32,0x430a3ae7,0x37308908,2 +np.float32,0xc30a3ae7,0xb7308908,2 +np.float32,0x438a3ae7,0x37b08908,2 +np.float32,0xc38a3ae7,0xb7b08908,2 +np.float32,0x428bcd06,0x3f3504f2,2 +np.float32,0xc28bcd06,0xbf3504f2,2 +np.float32,0x430bcd06,0x3f800000,2 +np.float32,0xc30bcd06,0xbf800000,2 +np.float32,0x438bcd06,0x3517675b,2 +np.float32,0xc38bcd06,0xb517675b,2 +np.float32,0x428d5f26,0x3f800000,2 +np.float32,0xc28d5f26,0xbf800000,2 +np.float32,0x430d5f26,0xb68c0105,2 +np.float32,0xc30d5f26,0x368c0105,2 +np.float32,0x438d5f26,0x370c0105,2 +np.float32,0xc38d5f26,0xb70c0105,2 +np.float32,0x428ef146,0x3f3504c0,2 +np.float32,0xc28ef146,0xbf3504c0,2 +np.float32,0x430ef146,0xbf800000,2 +np.float32,0xc30ef146,0x3f800000,2 +np.float32,0x438ef146,0xb790bc40,2 +np.float32,0xc38ef146,0x3790bc40,2 +np.float32,0x42908365,0x3592200d,2 +np.float32,0xc2908365,0xb592200d,2 +np.float32,0x43108365,0xb612200d,2 +np.float32,0xc3108365,0x3612200d,2 +np.float32,0x43908365,0xb692200d,2 +np.float32,0xc3908365,0x3692200d,2 +np.float32,0x42921585,0xbf350501,2 +np.float32,0xc2921585,0x3f350501,2 +np.float32,0x43121585,0x3f800000,2 +np.float32,0xc3121585,0xbf800000,2 +np.float32,0x43921585,0xb698cee8,2 +np.float32,0xc3921585,0x3698cee8,2 +np.float32,0x4293a7a5,0xbf800000,2 +np.float32,0xc293a7a5,0x3f800000,2 +np.float32,0x4313a7a5,0xb6e1deee,2 +np.float32,0xc313a7a5,0x36e1deee,2 +np.float32,0x4393a7a5,0x3761deee,2 +np.float32,0xc393a7a5,0xb761deee,2 +np.float32,0x429539c5,0xbf3504b1,2 +np.float32,0xc29539c5,0x3f3504b1,2 +np.float32,0x431539c5,0xbf800000,2 +np.float32,0xc31539c5,0x3f800000,2 +np.float32,0x439539c5,0xb7bbab34,2 +np.float32,0xc39539c5,0x37bbab34,2 +np.float32,0x4296cbe4,0x344cde2e,2 +np.float32,0xc296cbe4,0xb44cde2e,2 +np.float32,0x4316cbe4,0x34ccde2e,2 +np.float32,0xc316cbe4,0xb4ccde2e,2 +np.float32,0x4396cbe4,0x354cde2e,2 +np.float32,0xc396cbe4,0xb54cde2e,2 +np.float32,0x42985e04,0x3f350510,2 +np.float32,0xc2985e04,0xbf350510,2 +np.float32,0x43185e04,0x3f800000,2 +np.float32,0xc3185e04,0xbf800000,2 +np.float32,0x43985e04,0xb722455d,2 +np.float32,0xc3985e04,0x3722455d,2 +np.float32,0x4299f024,0x3f800000,2 +np.float32,0xc299f024,0xbf800000,2 +np.float32,0x4319f024,0xb71bde6c,2 +np.float32,0xc319f024,0x371bde6c,2 +np.float32,0x4399f024,0x379bde6c,2 +np.float32,0xc399f024,0xb79bde6c,2 +np.float32,0x429b8243,0x3f3504fc,2 +np.float32,0xc29b8243,0xbf3504fc,2 +np.float32,0x431b8243,0xbf800000,2 +np.float32,0xc31b8243,0x3f800000,2 +np.float32,0x439b8243,0x364b2eb8,2 +np.float32,0xc39b8243,0xb64b2eb8,2 +np.float32,0x435b2047,0xbf350525,2 +np.float32,0x42a038a2,0xbf800000,2 +np.float32,0x432038a2,0x3664ca7e,2 +np.float32,0x4345eb9b,0x365e638c,2 +np.float32,0x42c5eb9b,0xbf800000,2 +np.float32,0x42eb9e94,0xbf800000,2 +np.float32,0x4350ea79,0x3f800000,2 +np.float32,0x42dbe957,0x3585522a,2 +np.float32,0x425be957,0xbf800000,2 +np.float32,0x435be957,0xb605522a,2 +np.float32,0x476362a2,0xbd7ff911,2 +np.float32,0x464c99a4,0x3e7f4d41,2 +np.float32,0x4471f73d,0x3e7fe1b0,2 +np.float32,0x445a6752,0x3e7ef367,2 +np.float32,0x474fa400,0x3e7f9fcd,2 +np.float32,0x45c1e72f,0xbe7fc7af,2 +np.float32,0x4558c91d,0x3e7e9f31,2 +np.float32,0x43784f94,0xbdff6654,2 +np.float32,0x466e8500,0xbe7ea0a3,2 +np.float32,0x468e1c25,0x3e7e22fb,2 +np.float32,0x44ea6cfc,0x3dff70c3,2 +np.float32,0x4605126c,0x3e7f89ef,2 +np.float32,0x4788b3c6,0xbb87d853,2 +np.float32,0x4531b042,0x3dffd163,2 +np.float32,0x43f1f71d,0x3dfff387,2 +np.float32,0x462c3fa5,0xbd7fe13d,2 +np.float32,0x441c5354,0xbdff76b4,2 +np.float32,0x44908b69,0x3e7dcf0d,2 +np.float32,0x478813ad,0xbe7e9d80,2 +np.float32,0x441c4351,0x3dff937b,2 +np.float64,0x1,0x1,1 +np.float64,0x8000000000000001,0x8000000000000001,1 +np.float64,0x10000000000000,0x10000000000000,1 +np.float64,0x8010000000000000,0x8010000000000000,1 +np.float64,0x7fefffffffffffff,0x3f7452fc98b34e97,1 +np.float64,0xffefffffffffffff,0xbf7452fc98b34e97,1 +np.float64,0x7ff0000000000000,0xfff8000000000000,1 +np.float64,0xfff0000000000000,0xfff8000000000000,1 +np.float64,0x7ff8000000000000,0x7ff8000000000000,1 +np.float64,0x7ff4000000000000,0x7ffc000000000000,1 +np.float64,0xbfda51b226b4a364,0xbfd9956328ff876c,1 +np.float64,0xbfb4a65aee294cb8,0xbfb4a09fd744f8a5,1 +np.float64,0xbfd73b914fae7722,0xbfd6b9cce55af379,1 +np.float64,0xbfd90c12b4b21826,0xbfd869a3867b51c2,1 +np.float64,0x3fe649bb3d6c9376,0x3fe48778d9b48a21,1 +np.float64,0xbfd5944532ab288a,0xbfd52c30e1951b42,1 +np.float64,0x3fb150c45222a190,0x3fb14d633eb8275d,1 +np.float64,0x3fe4a6ffa9e94e00,0x3fe33f8a95c33299,1 +np.float64,0x3fe8d2157171a42a,0x3fe667d904ac95a6,1 +np.float64,0xbfa889f52c3113f0,0xbfa8878d90a23fa5,1 +np.float64,0x3feb3234bef6646a,0x3fe809d541d9017a,1 +np.float64,0x3fc6de266f2dbc50,0x3fc6bf0ee80a0d86,1 +np.float64,0x3fe8455368f08aa6,0x3fe6028254338ed5,1 +np.float64,0xbfe5576079eaaec1,0xbfe3cb4a8f6bc3f5,1 +np.float64,0xbfe9f822ff73f046,0xbfe7360d7d5cb887,1 +np.float64,0xbfb1960e7e232c20,0xbfb1928438258602,1 +np.float64,0xbfca75938d34eb28,0xbfca4570979bf2fa,1 +np.float64,0x3fd767dd15aecfbc,0x3fd6e33039018bab,1 +np.float64,0xbfe987750ef30eea,0xbfe6e7ed30ce77f0,1 +np.float64,0xbfe87f95a1f0ff2b,0xbfe62ca7e928bb2a,1 +np.float64,0xbfd2465301a48ca6,0xbfd2070245775d76,1 +np.float64,0xbfb1306ed22260e0,0xbfb12d2088eaa4f9,1 +np.float64,0xbfd8089010b01120,0xbfd778f9db77f2f3,1 +np.float64,0x3fbf9cf4ee3f39f0,0x3fbf88674fde1ca2,1 +np.float64,0x3fe6d8468a6db08e,0x3fe4f403f38b7bec,1 +np.float64,0xbfd9e5deefb3cbbe,0xbfd932692c722351,1 +np.float64,0x3fd1584d55a2b09c,0x3fd122253eeecc2e,1 +np.float64,0x3fe857979cf0af30,0x3fe60fc12b5ba8db,1 +np.float64,0x3fe3644149e6c882,0x3fe239f47013cfe6,1 +np.float64,0xbfe22ea62be45d4c,0xbfe13834c17d56fe,1 +np.float64,0xbfe8d93e1df1b27c,0xbfe66cf4ee467fd2,1 +np.float64,0xbfe9c497c9f38930,0xbfe7127417da4204,1 +np.float64,0x3fd6791cecacf238,0x3fd6039ccb5a7fde,1 +np.float64,0xbfc1dc1b1523b838,0xbfc1cd48edd9ae19,1 +np.float64,0xbfc92a8491325508,0xbfc901176e0158a5,1 +np.float64,0x3fa8649b3430c940,0x3fa8623e82d9504f,1 +np.float64,0x3fe0bed6a1617dae,0x3fdffbb307fb1abe,1 +np.float64,0x3febdf7765f7beee,0x3fe87ad01a89b74a,1 +np.float64,0xbfd3a56d46a74ada,0xbfd356cf41bf83cd,1 +np.float64,0x3fd321d824a643b0,0x3fd2d93846a224b3,1 +np.float64,0xbfc6a49fb52d4940,0xbfc686704906e7d3,1 +np.float64,0xbfdd4103c9ba8208,0xbfdc3ef0c03615b4,1 +np.float64,0xbfe0b78a51e16f14,0xbfdfef0d9ffc38b5,1 +np.float64,0xbfdac7a908b58f52,0xbfda0158956ceecf,1 +np.float64,0xbfbfbf12f23f7e28,0xbfbfaa428989258c,1 +np.float64,0xbfd55f5aa2aabeb6,0xbfd4fa39de65f33a,1 +np.float64,0x3fe06969abe0d2d4,0x3fdf6744fafdd9cf,1 +np.float64,0x3fe56ab8be6ad572,0x3fe3da7a1986d543,1 +np.float64,0xbfeefbbec67df77e,0xbfea5d426132f4aa,1 +np.float64,0x3fe6e1f49cedc3ea,0x3fe4fb53f3d8e3d5,1 +np.float64,0x3feceb231c79d646,0x3fe923d3efa55414,1 +np.float64,0xbfd03dd08ea07ba2,0xbfd011549aa1998a,1 +np.float64,0xbfd688327aad1064,0xbfd611c61b56adbe,1 +np.float64,0xbfde3249d8bc6494,0xbfdd16a7237a39d5,1 +np.float64,0x3febd4b65677a96c,0x3fe873e1a401ef03,1 +np.float64,0xbfe46bd2b368d7a6,0xbfe31023c2467749,1 +np.float64,0x3fbf9f5cde3f3ec0,0x3fbf8aca8ec53c45,1 +np.float64,0x3fc20374032406e8,0x3fc1f43f1f2f4d5e,1 +np.float64,0xbfec143b16f82876,0xbfe89caa42582381,1 +np.float64,0xbfd14fa635a29f4c,0xbfd119ced11da669,1 +np.float64,0x3fe25236d4e4a46e,0x3fe156242d644b7a,1 +np.float64,0xbfe4ed793469daf2,0xbfe377a88928fd77,1 +np.float64,0xbfb363572626c6b0,0xbfb35e98d8fe87ae,1 +np.float64,0xbfb389d5aa2713a8,0xbfb384fae55565a7,1 +np.float64,0x3fca6e001934dc00,0x3fca3e0661eaca84,1 +np.float64,0x3fe748f3f76e91e8,0x3fe548ab2168aea6,1 +np.float64,0x3fef150efdfe2a1e,0x3fea6b92d74f60d3,1 +np.float64,0xbfd14b52b1a296a6,0xbfd115a387c0fa93,1 +np.float64,0x3fe3286b5ce650d6,0x3fe208a6469a7527,1 +np.float64,0xbfd57b4f4baaf69e,0xbfd514a12a9f7ab0,1 +np.float64,0xbfef14bd467e297b,0xbfea6b64bbfd42ce,1 +np.float64,0xbfe280bc90650179,0xbfe17d2c49955dba,1 +np.float64,0x3fca8759d7350eb0,0x3fca56d5c17bbc14,1 +np.float64,0xbfdf988f30bf311e,0xbfde53f96f69b05f,1 +np.float64,0x3f6b6eeb4036de00,0x3f6b6ee7e3f86f9a,1 +np.float64,0xbfed560be8faac18,0xbfe9656c5cf973d8,1 +np.float64,0x3fc6102c592c2058,0x3fc5f43efad5396d,1 +np.float64,0xbfdef64ed2bdec9e,0xbfddc4b7fbd45aea,1 +np.float64,0x3fe814acd570295a,0x3fe5df183d543bfe,1 +np.float64,0x3fca21313f344260,0x3fc9f2d47f64fbe2,1 +np.float64,0xbfe89932cc713266,0xbfe63f186a2f60ce,1 +np.float64,0x3fe4ffcff169ffa0,0x3fe386336115ee21,1 +np.float64,0x3fee6964087cd2c8,0x3fea093d31e2c2c5,1 +np.float64,0xbfbeea604e3dd4c0,0xbfbed72734852669,1 +np.float64,0xbfea1954fb7432aa,0xbfe74cdad8720032,1 +np.float64,0x3fea3e1a5ef47c34,0x3fe765ffba65a11d,1 +np.float64,0x3fcedb850b3db708,0x3fce8f39d92f00ba,1 +np.float64,0x3fd3b52d41a76a5c,0x3fd365d22b0003f9,1 +np.float64,0xbfa4108a0c282110,0xbfa40f397fcd844f,1 +np.float64,0x3fd7454c57ae8a98,0x3fd6c2e5542c6c83,1 +np.float64,0xbfeecd3c7a7d9a79,0xbfea42ca943a1695,1 +np.float64,0xbfdddda397bbbb48,0xbfdccb27283d4c4c,1 +np.float64,0x3fe6b52cf76d6a5a,0x3fe4d96ff32925ff,1 +np.float64,0xbfa39a75ec2734f0,0xbfa3993c0da84f87,1 +np.float64,0x3fdd3fe6fdba7fcc,0x3fdc3df12fe9e525,1 +np.float64,0xbfb57a98162af530,0xbfb5742525d5fbe2,1 +np.float64,0xbfd3e166cfa7c2ce,0xbfd38ff2891be9b0,1 +np.float64,0x3fdb6a04f9b6d408,0x3fda955e5018e9dc,1 +np.float64,0x3fe4ab03a4e95608,0x3fe342bfa76e1aa8,1 +np.float64,0xbfe6c8480b6d9090,0xbfe4e7eaa935b3f5,1 +np.float64,0xbdd6b5a17bae,0xbdd6b5a17bae,1 +np.float64,0xd6591979acb23,0xd6591979acb23,1 +np.float64,0x5adbed90b5b7e,0x5adbed90b5b7e,1 +np.float64,0xa664c5314cc99,0xa664c5314cc99,1 +np.float64,0x1727fb162e500,0x1727fb162e500,1 +np.float64,0xdb49a93db6935,0xdb49a93db6935,1 +np.float64,0xb10c958d62193,0xb10c958d62193,1 +np.float64,0xad38276f5a705,0xad38276f5a705,1 +np.float64,0x1d5d0b983aba2,0x1d5d0b983aba2,1 +np.float64,0x915f48e122be9,0x915f48e122be9,1 +np.float64,0x475958ae8eb2c,0x475958ae8eb2c,1 +np.float64,0x3af8406675f09,0x3af8406675f09,1 +np.float64,0x655e88a4cabd2,0x655e88a4cabd2,1 +np.float64,0x40fee8ce81fde,0x40fee8ce81fde,1 +np.float64,0xab83103f57062,0xab83103f57062,1 +np.float64,0x7cf934b8f9f27,0x7cf934b8f9f27,1 +np.float64,0x29f7524853eeb,0x29f7524853eeb,1 +np.float64,0x4a5e954894bd3,0x4a5e954894bd3,1 +np.float64,0x24638f3a48c73,0x24638f3a48c73,1 +np.float64,0xa4f32fc749e66,0xa4f32fc749e66,1 +np.float64,0xf8e92df7f1d26,0xf8e92df7f1d26,1 +np.float64,0x292e9d50525d4,0x292e9d50525d4,1 +np.float64,0xe937e897d26fd,0xe937e897d26fd,1 +np.float64,0xd3bde1d5a77bc,0xd3bde1d5a77bc,1 +np.float64,0xa447ffd548900,0xa447ffd548900,1 +np.float64,0xa3b7b691476f7,0xa3b7b691476f7,1 +np.float64,0x490095c892013,0x490095c892013,1 +np.float64,0xfc853235f90a7,0xfc853235f90a7,1 +np.float64,0x5a8bc082b5179,0x5a8bc082b5179,1 +np.float64,0x1baca45a37595,0x1baca45a37595,1 +np.float64,0x2164120842c83,0x2164120842c83,1 +np.float64,0x66692bdeccd26,0x66692bdeccd26,1 +np.float64,0xf205bdd3e40b8,0xf205bdd3e40b8,1 +np.float64,0x7c3fff98f8801,0x7c3fff98f8801,1 +np.float64,0xccdf10e199bf,0xccdf10e199bf,1 +np.float64,0x92db8e8125b8,0x92db8e8125b8,1 +np.float64,0x5789a8d6af136,0x5789a8d6af136,1 +np.float64,0xbdda869d7bb51,0xbdda869d7bb51,1 +np.float64,0xb665e0596ccbc,0xb665e0596ccbc,1 +np.float64,0x74e6b46ee9cd7,0x74e6b46ee9cd7,1 +np.float64,0x4f39cf7c9e73b,0x4f39cf7c9e73b,1 +np.float64,0xfdbf3907fb7e7,0xfdbf3907fb7e7,1 +np.float64,0xafdef4d55fbdf,0xafdef4d55fbdf,1 +np.float64,0xb49858236930b,0xb49858236930b,1 +np.float64,0x3ebe21d47d7c5,0x3ebe21d47d7c5,1 +np.float64,0x5b620512b6c41,0x5b620512b6c41,1 +np.float64,0x31918cda63232,0x31918cda63232,1 +np.float64,0x68b5741ed16af,0x68b5741ed16af,1 +np.float64,0xa5c09a5b4b814,0xa5c09a5b4b814,1 +np.float64,0x55f51c14abea4,0x55f51c14abea4,1 +np.float64,0xda8a3e41b515,0xda8a3e41b515,1 +np.float64,0x9ea9c8513d539,0x9ea9c8513d539,1 +np.float64,0x7f23b964fe478,0x7f23b964fe478,1 +np.float64,0xf6e08c7bedc12,0xf6e08c7bedc12,1 +np.float64,0x7267aa24e4cf6,0x7267aa24e4cf6,1 +np.float64,0x236bb93a46d78,0x236bb93a46d78,1 +np.float64,0x9a98430b35309,0x9a98430b35309,1 +np.float64,0xbb683fef76d08,0xbb683fef76d08,1 +np.float64,0x1ff0eb6e3fe1e,0x1ff0eb6e3fe1e,1 +np.float64,0xf524038fea481,0xf524038fea481,1 +np.float64,0xd714e449ae29d,0xd714e449ae29d,1 +np.float64,0x4154fd7682aa0,0x4154fd7682aa0,1 +np.float64,0x5b8d2f6cb71a7,0x5b8d2f6cb71a7,1 +np.float64,0xc91aa21d92355,0xc91aa21d92355,1 +np.float64,0xbd94fd117b2a0,0xbd94fd117b2a0,1 +np.float64,0x685b207ad0b65,0x685b207ad0b65,1 +np.float64,0xd2485b05a490c,0xd2485b05a490c,1 +np.float64,0x151ea5e62a3d6,0x151ea5e62a3d6,1 +np.float64,0x2635a7164c6b6,0x2635a7164c6b6,1 +np.float64,0x88ae3b5d115c8,0x88ae3b5d115c8,1 +np.float64,0x8a055a55140ac,0x8a055a55140ac,1 +np.float64,0x756f7694eadef,0x756f7694eadef,1 +np.float64,0x866d74630cdaf,0x866d74630cdaf,1 +np.float64,0x39e44f2873c8b,0x39e44f2873c8b,1 +np.float64,0x2a07ceb6540fb,0x2a07ceb6540fb,1 +np.float64,0xc52b96398a573,0xc52b96398a573,1 +np.float64,0x9546543b2a8cb,0x9546543b2a8cb,1 +np.float64,0x5b995b90b732c,0x5b995b90b732c,1 +np.float64,0x2de10a565bc22,0x2de10a565bc22,1 +np.float64,0x3b06ee94760df,0x3b06ee94760df,1 +np.float64,0xb18e77a5631cf,0xb18e77a5631cf,1 +np.float64,0x3b89ae3a77137,0x3b89ae3a77137,1 +np.float64,0xd9b0b6e5b3617,0xd9b0b6e5b3617,1 +np.float64,0x30b2310861647,0x30b2310861647,1 +np.float64,0x326a3ab464d48,0x326a3ab464d48,1 +np.float64,0x4c18610a9830d,0x4c18610a9830d,1 +np.float64,0x541dea42a83be,0x541dea42a83be,1 +np.float64,0xcd027dbf9a050,0xcd027dbf9a050,1 +np.float64,0x780a0f80f015,0x780a0f80f015,1 +np.float64,0x740ed5b2e81db,0x740ed5b2e81db,1 +np.float64,0xc226814d844d0,0xc226814d844d0,1 +np.float64,0xde958541bd2b1,0xde958541bd2b1,1 +np.float64,0xb563d3296ac7b,0xb563d3296ac7b,1 +np.float64,0x1db3b0b83b677,0x1db3b0b83b677,1 +np.float64,0xa7b0275d4f605,0xa7b0275d4f605,1 +np.float64,0x72f8d038e5f1b,0x72f8d038e5f1b,1 +np.float64,0x860ed1350c1da,0x860ed1350c1da,1 +np.float64,0x79f88262f3f11,0x79f88262f3f11,1 +np.float64,0x8817761f102ef,0x8817761f102ef,1 +np.float64,0xac44784b5888f,0xac44784b5888f,1 +np.float64,0x800fd594241fab28,0x800fd594241fab28,1 +np.float64,0x800ede32f8ddbc66,0x800ede32f8ddbc66,1 +np.float64,0x800de4c1121bc982,0x800de4c1121bc982,1 +np.float64,0x80076ebcddcedd7a,0x80076ebcddcedd7a,1 +np.float64,0x800b3fee06567fdc,0x800b3fee06567fdc,1 +np.float64,0x800b444426b68889,0x800b444426b68889,1 +np.float64,0x800b1c037a563807,0x800b1c037a563807,1 +np.float64,0x8001eb88c2a3d712,0x8001eb88c2a3d712,1 +np.float64,0x80058aae6dab155e,0x80058aae6dab155e,1 +np.float64,0x80083df2d4f07be6,0x80083df2d4f07be6,1 +np.float64,0x800e3b19d97c7634,0x800e3b19d97c7634,1 +np.float64,0x800a71c6f374e38e,0x800a71c6f374e38e,1 +np.float64,0x80048557f1490ab1,0x80048557f1490ab1,1 +np.float64,0x8000a00e6b01401e,0x8000a00e6b01401e,1 +np.float64,0x800766a3e2cecd49,0x800766a3e2cecd49,1 +np.float64,0x80015eb44602bd69,0x80015eb44602bd69,1 +np.float64,0x800bde885a77bd11,0x800bde885a77bd11,1 +np.float64,0x800224c53ea4498b,0x800224c53ea4498b,1 +np.float64,0x80048e8c6a291d1a,0x80048e8c6a291d1a,1 +np.float64,0x800b667e4af6ccfd,0x800b667e4af6ccfd,1 +np.float64,0x800ae3d7e395c7b0,0x800ae3d7e395c7b0,1 +np.float64,0x80086c245550d849,0x80086c245550d849,1 +np.float64,0x800d7d25f6fafa4c,0x800d7d25f6fafa4c,1 +np.float64,0x800f8d9ab0ff1b35,0x800f8d9ab0ff1b35,1 +np.float64,0x800690e949cd21d3,0x800690e949cd21d3,1 +np.float64,0x8003022381060448,0x8003022381060448,1 +np.float64,0x80085e0dad70bc1c,0x80085e0dad70bc1c,1 +np.float64,0x800e2ffc369c5ff9,0x800e2ffc369c5ff9,1 +np.float64,0x800b629b5af6c537,0x800b629b5af6c537,1 +np.float64,0x800fdc964b7fb92d,0x800fdc964b7fb92d,1 +np.float64,0x80036bb4b1c6d76a,0x80036bb4b1c6d76a,1 +np.float64,0x800b382f7f16705f,0x800b382f7f16705f,1 +np.float64,0x800ebac9445d7593,0x800ebac9445d7593,1 +np.float64,0x80015075c3e2a0ec,0x80015075c3e2a0ec,1 +np.float64,0x8002a6ec5ce54dd9,0x8002a6ec5ce54dd9,1 +np.float64,0x8009fab74a93f56f,0x8009fab74a93f56f,1 +np.float64,0x800c94b9ea992974,0x800c94b9ea992974,1 +np.float64,0x800dc2efd75b85e0,0x800dc2efd75b85e0,1 +np.float64,0x800be6400d57cc80,0x800be6400d57cc80,1 +np.float64,0x80021f6858443ed1,0x80021f6858443ed1,1 +np.float64,0x800600e2ac4c01c6,0x800600e2ac4c01c6,1 +np.float64,0x800a2159e6b442b4,0x800a2159e6b442b4,1 +np.float64,0x800c912f4bb9225f,0x800c912f4bb9225f,1 +np.float64,0x800a863a9db50c76,0x800a863a9db50c76,1 +np.float64,0x800ac16851d582d1,0x800ac16851d582d1,1 +np.float64,0x8003f7d32e87efa7,0x8003f7d32e87efa7,1 +np.float64,0x800be4eee3d7c9de,0x800be4eee3d7c9de,1 +np.float64,0x80069ff0ac4d3fe2,0x80069ff0ac4d3fe2,1 +np.float64,0x80061c986d4c3932,0x80061c986d4c3932,1 +np.float64,0x8000737b4de0e6f7,0x8000737b4de0e6f7,1 +np.float64,0x8002066ef7440cdf,0x8002066ef7440cdf,1 +np.float64,0x8001007050c200e1,0x8001007050c200e1,1 +np.float64,0x8008df9fa351bf40,0x8008df9fa351bf40,1 +np.float64,0x800f8394ee5f072a,0x800f8394ee5f072a,1 +np.float64,0x80008e0b01c11c17,0x80008e0b01c11c17,1 +np.float64,0x800f7088ed3ee112,0x800f7088ed3ee112,1 +np.float64,0x800285b86f650b72,0x800285b86f650b72,1 +np.float64,0x8008ec18af51d832,0x8008ec18af51d832,1 +np.float64,0x800da08523bb410a,0x800da08523bb410a,1 +np.float64,0x800de853ca7bd0a8,0x800de853ca7bd0a8,1 +np.float64,0x8008c8aefad1915e,0x8008c8aefad1915e,1 +np.float64,0x80010c39d5821874,0x80010c39d5821874,1 +np.float64,0x8009208349724107,0x8009208349724107,1 +np.float64,0x800783783f0f06f1,0x800783783f0f06f1,1 +np.float64,0x80025caf9984b960,0x80025caf9984b960,1 +np.float64,0x800bc76fa6778ee0,0x800bc76fa6778ee0,1 +np.float64,0x80017e2f89a2fc60,0x80017e2f89a2fc60,1 +np.float64,0x800ef169843de2d3,0x800ef169843de2d3,1 +np.float64,0x80098a5f7db314bf,0x80098a5f7db314bf,1 +np.float64,0x800d646f971ac8df,0x800d646f971ac8df,1 +np.float64,0x800110d1dc6221a4,0x800110d1dc6221a4,1 +np.float64,0x800f8b422a1f1684,0x800f8b422a1f1684,1 +np.float64,0x800785c97dcf0b94,0x800785c97dcf0b94,1 +np.float64,0x800da201283b4403,0x800da201283b4403,1 +np.float64,0x800a117cc7b422fa,0x800a117cc7b422fa,1 +np.float64,0x80024731cfa48e64,0x80024731cfa48e64,1 +np.float64,0x800199d456c333a9,0x800199d456c333a9,1 +np.float64,0x8005f66bab8becd8,0x8005f66bab8becd8,1 +np.float64,0x8008e7227c11ce45,0x8008e7227c11ce45,1 +np.float64,0x8007b66cc42f6cda,0x8007b66cc42f6cda,1 +np.float64,0x800669e6f98cd3cf,0x800669e6f98cd3cf,1 +np.float64,0x800aed917375db23,0x800aed917375db23,1 +np.float64,0x8008b6dd15116dbb,0x8008b6dd15116dbb,1 +np.float64,0x800f49869cfe930d,0x800f49869cfe930d,1 +np.float64,0x800a712661b4e24d,0x800a712661b4e24d,1 +np.float64,0x800944e816f289d1,0x800944e816f289d1,1 +np.float64,0x800eba0f8a1d741f,0x800eba0f8a1d741f,1 +np.float64,0x800cf6ded139edbe,0x800cf6ded139edbe,1 +np.float64,0x80023100c6246202,0x80023100c6246202,1 +np.float64,0x800c5a94add8b52a,0x800c5a94add8b52a,1 +np.float64,0x800adf329b95be66,0x800adf329b95be66,1 +np.float64,0x800af9afc115f360,0x800af9afc115f360,1 +np.float64,0x800d66ce837acd9d,0x800d66ce837acd9d,1 +np.float64,0x8003ffb5e507ff6d,0x8003ffb5e507ff6d,1 +np.float64,0x80027d280024fa51,0x80027d280024fa51,1 +np.float64,0x800fc37e1d1f86fc,0x800fc37e1d1f86fc,1 +np.float64,0x800fc7258b9f8e4b,0x800fc7258b9f8e4b,1 +np.float64,0x8003fb5789e7f6b0,0x8003fb5789e7f6b0,1 +np.float64,0x800eb4e7a13d69cf,0x800eb4e7a13d69cf,1 +np.float64,0x800951850952a30a,0x800951850952a30a,1 +np.float64,0x3fed4071be3a80e3,0x3fe95842074431df,1 +np.float64,0x3f8d2341203a4682,0x3f8d2300b453bd9f,1 +np.float64,0x3fdc8ce332b919c6,0x3fdb9cdf1440c28f,1 +np.float64,0x3fdc69bd84b8d37b,0x3fdb7d25c8166b7b,1 +np.float64,0x3fc4c22ad0298456,0x3fc4aae73e231b4f,1 +np.float64,0x3fea237809f446f0,0x3fe753cc6ca96193,1 +np.float64,0x3fd34cf6462699ed,0x3fd30268909bb47e,1 +np.float64,0x3fafce20643f9c41,0x3fafc8e41a240e35,1 +np.float64,0x3fdc6d416538da83,0x3fdb805262292863,1 +np.float64,0x3fe7d8362aefb06c,0x3fe5b2ce659db7fd,1 +np.float64,0x3fe290087de52011,0x3fe189f9a3eb123d,1 +np.float64,0x3fa62d2bf82c5a58,0x3fa62b65958ca2b8,1 +np.float64,0x3fafd134403fa269,0x3fafcbf670f8a6f3,1 +np.float64,0x3fa224e53c2449ca,0x3fa223ec5de1631b,1 +np.float64,0x3fb67e2c2c2cfc58,0x3fb676c445fb70a0,1 +np.float64,0x3fda358d01346b1a,0x3fd97b9441666eb2,1 +np.float64,0x3fdd30fc4bba61f9,0x3fdc308da423778d,1 +np.float64,0x3fc56e99c52add34,0x3fc5550004492621,1 +np.float64,0x3fe32d08de265a12,0x3fe20c761a73cec2,1 +np.float64,0x3fd46cf932a8d9f2,0x3fd414a7f3db03df,1 +np.float64,0x3fd94cfa2b3299f4,0x3fd8a5961b3e4bdd,1 +np.float64,0x3fed6ea3a6fadd47,0x3fe9745b2f6c9204,1 +np.float64,0x3fe4431d1768863a,0x3fe2ef61d0481de0,1 +np.float64,0x3fe1d8e00ea3b1c0,0x3fe0efab5050ee78,1 +np.float64,0x3fe56f37dcaade70,0x3fe3de00b0f392e0,1 +np.float64,0x3fde919a2dbd2334,0x3fdd6b6d2dcf2396,1 +np.float64,0x3fe251e3d4a4a3c8,0x3fe155de69605d60,1 +np.float64,0x3fe5e0ecc5abc1da,0x3fe436a5de5516cf,1 +np.float64,0x3fcd48780c3a90f0,0x3fcd073fa907ba9b,1 +np.float64,0x3fe4e8149229d029,0x3fe37360801d5b66,1 +np.float64,0x3fb9ef159633de2b,0x3fb9e3bc05a15d1d,1 +np.float64,0x3fc24a3f0424947e,0x3fc23a5432ca0e7c,1 +np.float64,0x3fe55ca196aab943,0x3fe3cf6b3143435a,1 +np.float64,0x3fe184544c2308a9,0x3fe0a7b49fa80aec,1 +np.float64,0x3fe2c76e83658edd,0x3fe1b8355c1ea771,1 +np.float64,0x3fea8d2c4ab51a59,0x3fe79ba85aabc099,1 +np.float64,0x3fd74f98abae9f31,0x3fd6cc85005d0593,1 +np.float64,0x3fec6de9a678dbd3,0x3fe8d59a1d23cdd1,1 +np.float64,0x3fec8a0e50f9141d,0x3fe8e7500f6f6a00,1 +np.float64,0x3fe9de6d08b3bcda,0x3fe7245319508767,1 +np.float64,0x3fe4461fd1688c40,0x3fe2f1cf0b93aba6,1 +np.float64,0x3fde342d9d3c685b,0x3fdd185609d5719d,1 +np.float64,0x3feb413fc8368280,0x3fe813c091d2519a,1 +np.float64,0x3fe64333156c8666,0x3fe48275b9a6a358,1 +np.float64,0x3fe03c65226078ca,0x3fdf18b26786be35,1 +np.float64,0x3fee11054dbc220b,0x3fe9d579a1cfa7ad,1 +np.float64,0x3fbaefccae35df99,0x3fbae314fef7c7ea,1 +np.float64,0x3feed4e3487da9c7,0x3fea4729241c8811,1 +np.float64,0x3fbb655df836cabc,0x3fbb57fcf9a097be,1 +np.float64,0x3fe68b0273ed1605,0x3fe4b96109afdf76,1 +np.float64,0x3fd216bfc3242d80,0x3fd1d957363f6a43,1 +np.float64,0x3fe01328d4a02652,0x3fded083bbf94aba,1 +np.float64,0x3fe3f9a61ae7f34c,0x3fe2b3f701b79028,1 +np.float64,0x3fed4e7cf8fa9cfa,0x3fe960d27084fb40,1 +np.float64,0x3faec08e343d811c,0x3faebbd2aa07ac1f,1 +np.float64,0x3fd2d1bbeea5a378,0x3fd28c9aefcf48ad,1 +np.float64,0x3fd92e941fb25d28,0x3fd889857f88410d,1 +np.float64,0x3fe43decb7e87bd9,0x3fe2eb32b4ee4667,1 +np.float64,0x3fef49cabcfe9395,0x3fea892f9a233f76,1 +np.float64,0x3fe3e96812e7d2d0,0x3fe2a6c6b45dd6ee,1 +np.float64,0x3fd24c0293a49805,0x3fd20c76d54473cb,1 +np.float64,0x3fb43d6b7e287ad7,0x3fb438060772795a,1 +np.float64,0x3fe87bf7d3f0f7f0,0x3fe62a0c47411c62,1 +np.float64,0x3fee82a2e07d0546,0x3fea17e27e752b7b,1 +np.float64,0x3fe40c01bbe81803,0x3fe2c2d9483f44d8,1 +np.float64,0x3fd686ccae2d0d99,0x3fd610763fb61097,1 +np.float64,0x3fe90fcf2af21f9e,0x3fe693c12df59ba9,1 +np.float64,0x3fefb3ce11ff679c,0x3feac3dd4787529d,1 +np.float64,0x3fcec53ff63d8a80,0x3fce79992af00c58,1 +np.float64,0x3fe599dd7bab33bb,0x3fe3ff5da7575d85,1 +np.float64,0x3fe9923b1a732476,0x3fe6ef71d13db456,1 +np.float64,0x3febf76fcef7eee0,0x3fe88a3952e11373,1 +np.float64,0x3fc2cfd128259fa2,0x3fc2be7fd47fd811,1 +np.float64,0x3fe4d37ae269a6f6,0x3fe36300d45e3745,1 +np.float64,0x3fe23aa2e4247546,0x3fe1424e172f756f,1 +np.float64,0x3fe4f0596ca9e0b3,0x3fe379f0c49de7ef,1 +np.float64,0x3fe2e4802fe5c900,0x3fe1d062a8812601,1 +np.float64,0x3fe5989c79eb3139,0x3fe3fe6308552dec,1 +np.float64,0x3fe3c53cb4e78a79,0x3fe28956e573aca4,1 +np.float64,0x3fe6512beeeca258,0x3fe48d2d5ece979f,1 +np.float64,0x3fd8473ddb308e7c,0x3fd7b33e38adc6ad,1 +np.float64,0x3fecd09c9679a139,0x3fe91361fa0c5bcb,1 +np.float64,0x3fc991530e3322a6,0x3fc965e2c514a9e9,1 +np.float64,0x3f6d4508403a8a11,0x3f6d45042b68acc5,1 +np.float64,0x3fea1f198f743e33,0x3fe750ce918d9330,1 +np.float64,0x3fd0a0bb4da14177,0x3fd07100f9c71e1c,1 +np.float64,0x3fd30c45ffa6188c,0x3fd2c499f9961f66,1 +np.float64,0x3fcad98e7c35b31d,0x3fcaa74293cbc52e,1 +np.float64,0x3fec8e4a5eb91c95,0x3fe8e9f898d118db,1 +np.float64,0x3fd19fdb79233fb7,0x3fd1670c00febd24,1 +np.float64,0x3fea9fcbb1f53f97,0x3fe7a836b29c4075,1 +np.float64,0x3fc6d12ea12da25d,0x3fc6b24bd2f89f59,1 +np.float64,0x3fd6af3658ad5e6d,0x3fd636613e08df3f,1 +np.float64,0x3fe31bc385a63787,0x3fe1fe3081621213,1 +np.float64,0x3fc0dbba2221b774,0x3fc0cf42c9313dba,1 +np.float64,0x3fef639ce87ec73a,0x3fea9795454f1036,1 +np.float64,0x3fee5f29dcbcbe54,0x3fea0349b288f355,1 +np.float64,0x3fed46bdb37a8d7b,0x3fe95c199f5aa569,1 +np.float64,0x3fef176afa3e2ed6,0x3fea6ce78b2aa3aa,1 +np.float64,0x3fc841e7683083cf,0x3fc81cccb84848cc,1 +np.float64,0xbfda3ec9a2347d94,0xbfd9840d180e9de3,1 +np.float64,0xbfcd5967ae3ab2d0,0xbfcd17be13142bb9,1 +np.float64,0xbfedf816573bf02d,0xbfe9c6bb06476c60,1 +np.float64,0xbfd0d6e10e21adc2,0xbfd0a54f99d2f3dc,1 +np.float64,0xbfe282df096505be,0xbfe17ef5e2e80760,1 +np.float64,0xbfd77ae6e62ef5ce,0xbfd6f4f6b603ad8a,1 +np.float64,0xbfe37b171aa6f62e,0xbfe24cb4b2d0ade4,1 +np.float64,0xbfef9e5ed9bf3cbe,0xbfeab817b41000bd,1 +np.float64,0xbfe624d6f96c49ae,0xbfe46b1e9c9aff86,1 +np.float64,0xbfefb5da65ff6bb5,0xbfeac4fc9c982772,1 +np.float64,0xbfd29a65d52534cc,0xbfd2579df8ff87b9,1 +np.float64,0xbfd40270172804e0,0xbfd3af6471104aef,1 +np.float64,0xbfb729ee7a2e53e0,0xbfb721d7dbd2705e,1 +np.float64,0xbfb746f1382e8de0,0xbfb73ebc1207f8e3,1 +np.float64,0xbfd3c7e606a78fcc,0xbfd377a8aa1b0dd9,1 +np.float64,0xbfd18c4880231892,0xbfd1543506584ad5,1 +np.float64,0xbfea988080753101,0xbfe7a34cba0d0fa1,1 +np.float64,0xbf877400e02ee800,0xbf8773df47fa7e35,1 +np.float64,0xbfb07e050820fc08,0xbfb07b198d4a52c9,1 +np.float64,0xbfee0a3621fc146c,0xbfe9d1745a05ba77,1 +np.float64,0xbfe78de246ef1bc4,0xbfe57bf2baab91c8,1 +np.float64,0xbfcdbfd3bd3b7fa8,0xbfcd7b728a955a06,1 +np.float64,0xbfe855ea79b0abd5,0xbfe60e8a4a17b921,1 +np.float64,0xbfd86c8e3530d91c,0xbfd7d5e36c918dc1,1 +np.float64,0xbfe4543169e8a863,0xbfe2fd23d42f552e,1 +np.float64,0xbfe41efbf1283df8,0xbfe2d235a2faed1a,1 +np.float64,0xbfd9a55464b34aa8,0xbfd8f7083f7281e5,1 +np.float64,0xbfe5f5078d6bea0f,0xbfe44637d910c270,1 +np.float64,0xbfe6d83e3dedb07c,0xbfe4f3fdadd10552,1 +np.float64,0xbfdb767e70b6ecfc,0xbfdaa0b6c17f3fb1,1 +np.float64,0xbfdfc91b663f9236,0xbfde7eb0dfbeaa26,1 +np.float64,0xbfbfbd18783f7a30,0xbfbfa84bf2fa1c8d,1 +np.float64,0xbfe51199242a2332,0xbfe39447dbe066ae,1 +np.float64,0xbfdbb94814b77290,0xbfdadd63bd796972,1 +np.float64,0xbfd8c6272cb18c4e,0xbfd828f2d9e8607e,1 +np.float64,0xbfce51e0b63ca3c0,0xbfce097ee908083a,1 +np.float64,0xbfe99a177d73342f,0xbfe6f4ec776a57ae,1 +np.float64,0xbfefde2ab0ffbc55,0xbfeadafdcbf54733,1 +np.float64,0xbfcccb5c1c3996b8,0xbfcc8d586a73d126,1 +np.float64,0xbfdf7ddcedbefbba,0xbfde3c749a906de7,1 +np.float64,0xbfef940516ff280a,0xbfeab26429e89f4b,1 +np.float64,0xbfe08009f1e10014,0xbfdf8eab352997eb,1 +np.float64,0xbfe9c02682b3804d,0xbfe70f5fd05f79ee,1 +np.float64,0xbfb3ca1732279430,0xbfb3c50bec5b453a,1 +np.float64,0xbfe368e81926d1d0,0xbfe23dc704d0887c,1 +np.float64,0xbfbd20cc2e3a4198,0xbfbd10b7e6d81c6c,1 +np.float64,0xbfd67ece4d2cfd9c,0xbfd608f527dcc5e7,1 +np.float64,0xbfdc02d1333805a2,0xbfdb20104454b79f,1 +np.float64,0xbfc007a626200f4c,0xbfbff9dc9dc70193,1 +np.float64,0xbfda9e4f8fb53ca0,0xbfd9db8af35dc630,1 +np.float64,0xbfd8173d77302e7a,0xbfd786a0cf3e2914,1 +np.float64,0xbfeb8fcbd0b71f98,0xbfe84734debc10fb,1 +np.float64,0xbfe4bf1cb7697e3a,0xbfe352c891113f29,1 +np.float64,0xbfc18624d5230c48,0xbfc178248e863b64,1 +np.float64,0xbfcf184bac3e3098,0xbfceca3b19be1ebe,1 +np.float64,0xbfd2269c42a44d38,0xbfd1e8920d72b694,1 +np.float64,0xbfe8808526b1010a,0xbfe62d5497292495,1 +np.float64,0xbfe498bd1da9317a,0xbfe334245eadea93,1 +np.float64,0xbfef0855aebe10ab,0xbfea6462f29aeaf9,1 +np.float64,0xbfdeb186c93d630e,0xbfdd87c37943c602,1 +np.float64,0xbfb29fe2ae253fc8,0xbfb29bae3c87efe4,1 +np.float64,0xbfddd9c6c3bbb38e,0xbfdcc7b400bf384b,1 +np.float64,0xbfe3506673e6a0cd,0xbfe2299f26295553,1 +np.float64,0xbfe765957a2ecb2b,0xbfe55e03cf22edab,1 +np.float64,0xbfecc9876c79930f,0xbfe90efaf15b6207,1 +np.float64,0xbfefb37a0a7f66f4,0xbfeac3af3898e7c2,1 +np.float64,0xbfeefa0da7bdf41b,0xbfea5c4cde53c1c3,1 +np.float64,0xbfe6639ee9ecc73e,0xbfe49b4e28a72482,1 +np.float64,0xbfef91a4bb7f2349,0xbfeab114ac9e25dd,1 +np.float64,0xbfc8b392bb316724,0xbfc88c657f4441a3,1 +np.float64,0xbfc88a358231146c,0xbfc863cb900970fe,1 +np.float64,0xbfef25a9d23e4b54,0xbfea74eda432aabe,1 +np.float64,0xbfe6aceea0ed59de,0xbfe4d32e54a3fd01,1 +np.float64,0xbfefe2b3e37fc568,0xbfeadd74f4605835,1 +np.float64,0xbfa9eecb8833dd90,0xbfa9ebf4f4cb2591,1 +np.float64,0xbfd42bad7428575a,0xbfd3d69de8e52d0a,1 +np.float64,0xbfbc366b4a386cd8,0xbfbc27ceee8f3019,1 +np.float64,0xbfd9bca7be337950,0xbfd90c80e6204e57,1 +np.float64,0xbfe8173f53f02e7f,0xbfe5e0f8d8ed329c,1 +np.float64,0xbfce22dbcb3c45b8,0xbfcddbc8159b63af,1 +np.float64,0xbfea2d7ba7345af7,0xbfe75aa62ad5b80a,1 +np.float64,0xbfc08b783e2116f0,0xbfc07faf8d501558,1 +np.float64,0xbfb8c4161c318830,0xbfb8ba33950748ec,1 +np.float64,0xbfddd930bcbbb262,0xbfdcc72dffdf51bb,1 +np.float64,0xbfd108ce8a22119e,0xbfd0d5801e7698bd,1 +np.float64,0xbfd5bd2b5dab7a56,0xbfd552c52c468c76,1 +np.float64,0xbfe7ffe67fefffcd,0xbfe5cfe96e35e6e5,1 +np.float64,0xbfa04ec6bc209d90,0xbfa04e120a2c25cc,1 +np.float64,0xbfef7752cc7eeea6,0xbfeaa28715addc4f,1 +np.float64,0xbfe7083c2eae1078,0xbfe5182bf8ddfc8e,1 +np.float64,0xbfe05dafd0a0bb60,0xbfdf52d397cfe5f6,1 +np.float64,0xbfacb4f2243969e0,0xbfacb118991ea235,1 +np.float64,0xbfc7d47e422fa8fc,0xbfc7b1504714a4fd,1 +np.float64,0xbfbd70b2243ae168,0xbfbd60182efb61de,1 +np.float64,0xbfe930e49cb261c9,0xbfe6ab272b3f9cfc,1 +np.float64,0xbfb5f537e62bea70,0xbfb5ee540dcdc635,1 +np.float64,0xbfbb0c8278361908,0xbfbaffa1f7642a87,1 +np.float64,0xbfe82af2447055e4,0xbfe5ef54ca8db9e8,1 +np.float64,0xbfe92245e6f2448c,0xbfe6a0d32168040b,1 +np.float64,0xbfb799a8522f3350,0xbfb7911a7ada3640,1 +np.float64,0x7faa8290c8350521,0x3fe5916f67209cd6,1 +np.float64,0x7f976597082ecb2d,0x3fcf94dce396bd37,1 +np.float64,0x7fede721237bce41,0x3fe3e7b1575b005f,1 +np.float64,0x7fd5f674d72bece9,0x3fe3210628eba199,1 +np.float64,0x7f9b0f1aa0361e34,0x3feffd34d15d1da7,1 +np.float64,0x7fec48346ab89068,0x3fe93dd84253d9a2,1 +np.float64,0x7f9cac76283958eb,0xbfec4cd999653868,1 +np.float64,0x7fed51ab6bbaa356,0x3fecc27fb5f37bca,1 +np.float64,0x7fded3c116bda781,0xbfda473efee47cf1,1 +np.float64,0x7fd19c48baa33890,0xbfe25700cbfc0326,1 +np.float64,0x7fe5c8f478ab91e8,0xbfee4ab6d84806be,1 +np.float64,0x7fe53c64e46a78c9,0x3fee19c3f227f4e1,1 +np.float64,0x7fc2ad1936255a31,0xbfe56db9b877f807,1 +np.float64,0x7fe2b071b52560e2,0xbfce3990a8d390a9,1 +np.float64,0x7fc93f3217327e63,0xbfd1f6d7ef838d2b,1 +np.float64,0x7fec26df08784dbd,0x3fd5397be41c93d9,1 +np.float64,0x7fcf4770183e8edf,0x3fe6354f5a785016,1 +np.float64,0x7fdc9fcc0bb93f97,0xbfeeeae952e8267d,1 +np.float64,0x7feb21f29c7643e4,0x3fec20122e33f1bf,1 +np.float64,0x7fd0b51273216a24,0x3fefb09f8daba00b,1 +np.float64,0x7fe747a9d76e8f53,0x3feb46a3232842a4,1 +np.float64,0x7fd58885972b110a,0xbfce5ea57c186221,1 +np.float64,0x7fca3ce85c3479d0,0x3fef93a24548e8ca,1 +np.float64,0x7fe1528a46a2a514,0xbfb54bb578d9da91,1 +np.float64,0x7fcc58b21b38b163,0x3feffb5b741ffc2d,1 +np.float64,0x7fdabcaaf5357955,0x3fecbf855db524d1,1 +np.float64,0x7fdd27c6933a4f8c,0xbfef2f41bb80144b,1 +np.float64,0x7fbda4e1be3b49c2,0x3fdb9b33f84f5381,1 +np.float64,0x7fe53363362a66c5,0x3fe4daff3a6a4ed0,1 +np.float64,0x7fe5719d62eae33a,0xbfef761d98f625d5,1 +np.float64,0x7f982ce5a83059ca,0x3fd0b27c3365f0a8,1 +np.float64,0x7fe6db8c42edb718,0x3fe786f4b1fe11a6,1 +np.float64,0x7fe62cca1b2c5993,0x3fd425b6c4c9714a,1 +np.float64,0x7feea88850bd5110,0xbfd7bbb432017175,1 +np.float64,0x7fad6c6ae43ad8d5,0x3fe82e49098bc6de,1 +np.float64,0x7fe70542f02e0a85,0x3fec3017960b4822,1 +np.float64,0x7feaf0bcbb35e178,0xbfc3aac74dd322d5,1 +np.float64,0x7fb5e152fe2bc2a5,0x3fd4b27a4720614c,1 +np.float64,0x7fe456ee5be8addc,0xbfe9e15ab5cff229,1 +np.float64,0x7fd4b53a8d296a74,0xbfefff450f503326,1 +np.float64,0x7fd7149d7a2e293a,0x3fef4ef0a9009096,1 +np.float64,0x7fd43fc5a8a87f8a,0x3fe0c929fee9dce7,1 +np.float64,0x7fef97022aff2e03,0x3fd4ea52a813da20,1 +np.float64,0x7fe035950ae06b29,0x3fef4e125394fb05,1 +np.float64,0x7fecd0548979a0a8,0x3fe89d226244037b,1 +np.float64,0x7fc79b3ac22f3675,0xbfee9c9cf78c8270,1 +np.float64,0x7fd8b8e8263171cf,0x3fe8e24437961db0,1 +np.float64,0x7fc288c23e251183,0xbfbaf8eca50986ca,1 +np.float64,0x7fe436b4b6686d68,0xbfecd661741931c4,1 +np.float64,0x7fcdf99abe3bf334,0x3feaa75c90830b92,1 +np.float64,0x7fd9f9739233f2e6,0xbfebbfcb301b0da5,1 +np.float64,0x7fd6fcbd1b2df979,0xbfccf2c77cb65f56,1 +np.float64,0x7fe242a97b248552,0xbfe5b0f13bcbabc8,1 +np.float64,0x7fe38bf3e06717e7,0x3fbc8fa9004d2668,1 +np.float64,0x7fecd0e8d479a1d1,0xbfe886a6b4f73a4a,1 +np.float64,0x7fe958d60232b1ab,0xbfeb7c4cf0cee2dd,1 +np.float64,0x7f9d492b583a9256,0xbfebe975d00221cb,1 +np.float64,0x7fd6c9983bad932f,0xbfefe817621a31f6,1 +np.float64,0x7fed0d7239fa1ae3,0x3feac7e1b6455b4b,1 +np.float64,0x7fe61dac90ec3b58,0x3fef845b9efe8421,1 +np.float64,0x7f9acd3010359a5f,0xbfe460d376200130,1 +np.float64,0x7fedced9673b9db2,0xbfeeaf23445e1944,1 +np.float64,0x7fd9f271a733e4e2,0xbfd41544535ecb78,1 +np.float64,0x7fe703339bee0666,0x3fef93334626b56c,1 +np.float64,0x7fec7761b7b8eec2,0xbfe6da9179e8e714,1 +np.float64,0x7fdd9fff043b3ffd,0xbfc0761dfb8d94f9,1 +np.float64,0x7fdc10ed17b821d9,0x3fe1481e2a26c77f,1 +np.float64,0x7fe7681e72aed03c,0x3fefff94a6d47c84,1 +np.float64,0x7fe18c29e1e31853,0x3fe86ebd2fd89456,1 +np.float64,0x7fb2fb273c25f64d,0xbfefc136f57e06de,1 +np.float64,0x7fac2bbb90385776,0x3fe25d8e3cdae7e3,1 +np.float64,0x7fed16789efa2cf0,0x3fe94555091fdfd9,1 +np.float64,0x7fd8fe8f7831fd1e,0xbfed58d520361902,1 +np.float64,0x7fa59bde3c2b37bb,0x3fef585391c077ff,1 +np.float64,0x7fda981b53353036,0x3fde02ca08737b5f,1 +np.float64,0x7fd29f388aa53e70,0xbfe04f5499246df2,1 +np.float64,0x7fcd0232513a0464,0xbfd9737f2f565829,1 +np.float64,0x7fe9a881bcf35102,0xbfe079cf285b35dd,1 +np.float64,0x7fdbe399a9b7c732,0x3fe965bc4220f340,1 +np.float64,0x7feb77414af6ee82,0xbfb7df2fcd491f55,1 +np.float64,0x7fa26e86c424dd0d,0xbfea474c3d65b9be,1 +np.float64,0x7feaee869e35dd0c,0xbfd7b333a888cd14,1 +np.float64,0x7fcbd67f6137acfe,0xbfe15a7a15dfcee6,1 +np.float64,0x7fe36991e766d323,0xbfeb288077c4ed9f,1 +np.float64,0x7fdcf4f4fcb9e9e9,0xbfea331ef7a75e7b,1 +np.float64,0x7fbe3445643c688a,0x3fedf21b94ae8e37,1 +np.float64,0x7fd984cfd2b3099f,0x3fc0d3ade71c395e,1 +np.float64,0x7fdec987b23d930e,0x3fe4af5e48f6c26e,1 +np.float64,0x7fde56a9953cad52,0x3fc8e7762cefb8b0,1 +np.float64,0x7fd39fb446273f68,0xbfe6c3443208f44d,1 +np.float64,0x7fc609c1a72c1382,0x3fe884e639571baa,1 +np.float64,0x7fe001be4b20037c,0xbfed0d90cbcb6010,1 +np.float64,0x7fce7ace283cf59b,0xbfd0303792e51f49,1 +np.float64,0x7fe27ba93da4f751,0x3fe548b5ce740d71,1 +np.float64,0x7fcc13c79b38278e,0xbfe2e14f5b64a1e9,1 +np.float64,0x7fc058550620b0a9,0x3fe44bb55ebd0590,1 +np.float64,0x7fa4ba8bf8297517,0x3fee59b39f9d08c4,1 +np.float64,0x7fe50d6872ea1ad0,0xbfea1eaa2d059e13,1 +np.float64,0x7feb7e33b476fc66,0xbfeff28a4424dd3e,1 +np.float64,0x7fe2d7d2a165afa4,0xbfdbaff0ba1ea460,1 +np.float64,0xffd126654b224cca,0xbfef0cd3031fb97c,1 +np.float64,0xffb5f884942bf108,0x3fe0de589bea2e4c,1 +np.float64,0xffe011b4bfe02369,0xbfe805a0edf1e1f2,1 +np.float64,0xffec13eae9b827d5,0x3fb5f30347d78447,1 +np.float64,0xffa6552ae82caa50,0x3fb1ecee60135f2f,1 +np.float64,0xffb62d38b02c5a70,0x3fbd35903148fd12,1 +np.float64,0xffe2c44ea425889d,0xbfd7616547f99a7d,1 +np.float64,0xffea24c61a74498c,0x3fef4a1b15ae9005,1 +np.float64,0xffd23a4ab2a47496,0x3fe933bfaa569ae9,1 +np.float64,0xffc34a073d269410,0xbfeec0f510bb7474,1 +np.float64,0xffeead84cfbd5b09,0x3feb2d635e5a78bd,1 +np.float64,0xffcfd8f3b43fb1e8,0xbfdd59625801771b,1 +np.float64,0xffd3c7f662a78fec,0x3f9cf3209edfbc4e,1 +np.float64,0xffe7b7e4f72f6fca,0xbfefdcff4925632c,1 +np.float64,0xffe48cab05e91956,0x3fe6b41217948423,1 +np.float64,0xffeb6980b336d301,0xbfca5de148f69324,1 +np.float64,0xffe3f15c4aa7e2b8,0xbfeb18efae892081,1 +np.float64,0xffcf290c713e5218,0x3fefe6f1a513ed26,1 +np.float64,0xffd80979b43012f4,0xbfde6c8df91af976,1 +np.float64,0xffc3181e0026303c,0x3fe7448f681def38,1 +np.float64,0xffedfa68f97bf4d1,0xbfeca6efb802d109,1 +np.float64,0xffca0931c0341264,0x3fe31b9f073b08cd,1 +np.float64,0xffe4c44934e98892,0x3feda393a2e8a0f7,1 +np.float64,0xffe65bb56f2cb76a,0xbfeffaf638a4b73e,1 +np.float64,0xffe406a332a80d46,0x3fe8151dadb853c1,1 +np.float64,0xffdb7eae9c36fd5e,0xbfeff89abf5ab16e,1 +np.float64,0xffe245a02da48b40,0x3fef1fb43e85f4b8,1 +np.float64,0xffe2bafa732575f4,0x3fcbab115c6fd86e,1 +np.float64,0xffe8b1eedb7163dd,0x3feff263df6f6b12,1 +np.float64,0xffe6c76c796d8ed8,0xbfe61a8668511293,1 +np.float64,0xffefe327d1ffc64f,0xbfd9b92887a84827,1 +np.float64,0xffa452180c28a430,0xbfa9b9e578a4e52f,1 +np.float64,0xffe9867d0bf30cf9,0xbfca577867588408,1 +np.float64,0xffdfe9b923bfd372,0x3fdab5c15f085c2d,1 +np.float64,0xffed590c6abab218,0xbfd7e7b6c5a120e6,1 +np.float64,0xffeaebcfbab5d79f,0x3fed58be8a9e2c3b,1 +np.float64,0xffe2ba83a8257507,0x3fe6c42a4ac1d4d9,1 +np.float64,0xffe01d5b0ee03ab6,0xbfe5dad6c9247db7,1 +np.float64,0xffe51095d52a212b,0x3fef822cebc32d8e,1 +np.float64,0xffebd7a901b7af51,0xbfe5e63f3e3b1185,1 +np.float64,0xffe4efdcde29dfb9,0xbfe811294dfa758f,1 +np.float64,0xffe3be1aa4a77c35,0x3fdd8dcfcd409bb1,1 +np.float64,0xffbe6f2f763cde60,0x3fd13766e43bd622,1 +np.float64,0xffeed3d80fbda7af,0x3fec10a23c1b7a4a,1 +np.float64,0xffd6ebff37add7fe,0xbfe6177411607c86,1 +np.float64,0xffe85a90f4b0b521,0x3fc09fdd66c8fde9,1 +np.float64,0xffea3d58c2b47ab1,0x3feb5bd4a04b3562,1 +np.float64,0xffef675be6beceb7,0x3fecd840683d1044,1 +np.float64,0xff726a088024d400,0x3feff2b4f47b5214,1 +np.float64,0xffc90856733210ac,0xbfe3c6ffbf6840a5,1 +np.float64,0xffc0b58d9a216b1c,0xbfe10314267d0611,1 +np.float64,0xffee1f3d0abc3e79,0xbfd12ea7efea9067,1 +np.float64,0xffd988c41a331188,0x3febe83802d8a32e,1 +np.float64,0xffe8f1ac9bb1e358,0xbfdbf5fa7e84f2f2,1 +np.float64,0xffe47af279e8f5e4,0x3fef11e339e5fa78,1 +np.float64,0xff9960a7f832c140,0xbfa150363f8ec5b2,1 +np.float64,0xffcac40fa7358820,0xbfec3d5847a3df1d,1 +np.float64,0xffcb024a9d360494,0xbfd060fa31fd6b6a,1 +np.float64,0xffe385ffb3270bff,0xbfee6859e8dcd9e8,1 +np.float64,0xffef62f2c53ec5e5,0x3fe0a71ffddfc718,1 +np.float64,0xffed87ff20fb0ffd,0xbfe661db7c4098e3,1 +np.float64,0xffe369278526d24e,0x3fd64d89a41822fc,1 +np.float64,0xff950288c02a0520,0x3fe1df91d1ad7d5c,1 +np.float64,0xffe70e7c2cee1cf8,0x3fc9fece08df2fd8,1 +np.float64,0xffbaf020b635e040,0xbfc68c43ff9911a7,1 +np.float64,0xffee0120b0fc0240,0x3f9f792e17b490b0,1 +np.float64,0xffe1fa4be7a3f498,0xbfef4b18ab4b319e,1 +np.float64,0xffe61887bf2c310f,0x3fe846714826cb32,1 +np.float64,0xffdc3cf77f3879ee,0x3fe033b948a36125,1 +np.float64,0xffcc2b86f238570c,0xbfefdcceac3f220f,1 +np.float64,0xffe1f030c0a3e061,0x3fef502a808c359a,1 +np.float64,0xffb872c4ee30e588,0x3fef66ed8d3e6175,1 +np.float64,0xffeac8fc617591f8,0xbfe5d8448602aac9,1 +np.float64,0xffe5be16afab7c2d,0x3fee75ccde3cd14d,1 +np.float64,0xffae230ad83c4610,0xbfe49bbe6074d459,1 +np.float64,0xffc8fbeff531f7e0,0x3f77201e0c927f97,1 +np.float64,0xffdc314f48b8629e,0x3fef810dfc5db118,1 +np.float64,0xffec1f8970783f12,0x3fe15567102e042a,1 +np.float64,0xffc6995f902d32c0,0xbfecd5d2eedf342c,1 +np.float64,0xffdc7af76b38f5ee,0xbfd6e754476ab320,1 +np.float64,0xffb30cf8682619f0,0x3fd5ac3dfc4048d0,1 +np.float64,0xffd3a77695a74eee,0xbfefb5d6889e36e9,1 +np.float64,0xffd8b971803172e4,0xbfeb7f62f0b6c70b,1 +np.float64,0xffde4c0234bc9804,0xbfed50ba9e16d5e0,1 +np.float64,0xffb62b3f342c5680,0xbfeabc0de4069b84,1 +np.float64,0xff9af5674035eac0,0xbfed6c198b6b1bd8,1 +np.float64,0xffdfe20cb43fc41a,0x3fb11f8238f66306,1 +np.float64,0xffd2ecd7a0a5d9b0,0xbfec17ef1a62b1e3,1 +np.float64,0xffce60f7863cc1f0,0x3fe6dbcad3e3a006,1 +np.float64,0xffbbb8306a377060,0xbfbfd0fbef485c4c,1 +np.float64,0xffd1b2bd2b23657a,0xbfda3e046d987b99,1 +np.float64,0xffc480f4092901e8,0xbfeeff0427f6897b,1 +np.float64,0xffe6e02d926dc05a,0xbfcd59552778890b,1 +np.float64,0xffd302e5b7a605cc,0xbfee7c08641366b0,1 +np.float64,0xffec2eb92f785d72,0xbfef5c9c7f771050,1 +np.float64,0xffea3e31a9747c62,0xbfc49cd54755faf0,1 +np.float64,0xffce0a4e333c149c,0x3feeb9a6d0db4aee,1 +np.float64,0xffdc520a2db8a414,0x3fefc7b72613dcd0,1 +np.float64,0xffe056b968a0ad72,0xbfe47a9fe1f827fb,1 +np.float64,0xffe5a10f4cab421e,0x3fec2b1f74b73dec,1 diff --git a/local_packages/numpy/_core/tests/data/umath-validation-set-sinh.csv b/local_packages/numpy/_core/tests/data/umath-validation-set-sinh.csv new file mode 100644 index 0000000..1ef7b6e --- /dev/null +++ b/local_packages/numpy/_core/tests/data/umath-validation-set-sinh.csv @@ -0,0 +1,1429 @@ +dtype,input,output,ulperrortol +np.float32,0xfee27582,0xff800000,2 +np.float32,0xff19f092,0xff800000,2 +np.float32,0xbf393576,0xbf49cb31,2 +np.float32,0x8020fdea,0x8020fdea,2 +np.float32,0x455f4e,0x455f4e,2 +np.float32,0xff718c35,0xff800000,2 +np.float32,0x3f3215e3,0x3f40cce5,2 +np.float32,0x19e833,0x19e833,2 +np.float32,0xff2dcd49,0xff800000,2 +np.float32,0x7e8f6c95,0x7f800000,2 +np.float32,0xbf159dac,0xbf1e47a5,2 +np.float32,0x100d3d,0x100d3d,2 +np.float32,0xff673441,0xff800000,2 +np.float32,0x80275355,0x80275355,2 +np.float32,0x4812d0,0x4812d0,2 +np.float32,0x8072b956,0x8072b956,2 +np.float32,0xff3bb918,0xff800000,2 +np.float32,0x0,0x0,2 +np.float32,0xfe327798,0xff800000,2 +np.float32,0x41d4e2,0x41d4e2,2 +np.float32,0xfe34b1b8,0xff800000,2 +np.float32,0x80199f72,0x80199f72,2 +np.float32,0x807242ce,0x807242ce,2 +np.float32,0x3ef4202d,0x3efd7b48,2 +np.float32,0x763529,0x763529,2 +np.float32,0x4f6662,0x4f6662,2 +np.float32,0x3f18efe9,0x3f2232b5,2 +np.float32,0x80701846,0x80701846,2 +np.float32,0x3f599948,0x3f74c393,2 +np.float32,0x5a3d69,0x5a3d69,2 +np.float32,0xbf4a7e65,0xbf6047a3,2 +np.float32,0xff0d4c82,0xff800000,2 +np.float32,0x7a74db,0x7a74db,2 +np.float32,0x803388e6,0x803388e6,2 +np.float32,0x7f4430bb,0x7f800000,2 +np.float32,0x14c5b1,0x14c5b1,2 +np.float32,0xfa113400,0xff800000,2 +np.float32,0x7f4b3209,0x7f800000,2 +np.float32,0x8038d88c,0x8038d88c,2 +np.float32,0xbef2f9de,0xbefc330b,2 +np.float32,0xbe147b38,0xbe15008f,2 +np.float32,0x2b61e6,0x2b61e6,2 +np.float32,0x80000001,0x80000001,2 +np.float32,0x8060456c,0x8060456c,2 +np.float32,0x3f30fa82,0x3f3f6a99,2 +np.float32,0xfd1f0220,0xff800000,2 +np.float32,0xbf2b7555,0xbf389151,2 +np.float32,0xff100b7a,0xff800000,2 +np.float32,0x70d3cd,0x70d3cd,2 +np.float32,0x2a8d4a,0x2a8d4a,2 +np.float32,0xbf7b733f,0xbf92f05f,2 +np.float32,0x3f7106dc,0x3f8b1fc6,2 +np.float32,0x3f39da7a,0x3f4a9d79,2 +np.float32,0x3f5dd73f,0x3f7aaab5,2 +np.float32,0xbe8c8754,0xbe8e4cba,2 +np.float32,0xbf6c74c9,0xbf87c556,2 +np.float32,0x800efbbb,0x800efbbb,2 +np.float32,0xff054ab5,0xff800000,2 +np.float32,0x800b4b46,0x800b4b46,2 +np.float32,0xff77fd74,0xff800000,2 +np.float32,0x257d0,0x257d0,2 +np.float32,0x7caa0c,0x7caa0c,2 +np.float32,0x8025d24d,0x8025d24d,2 +np.float32,0x3d9f1b60,0x3d9f445c,2 +np.float32,0xbe3bf6e8,0xbe3d0595,2 +np.float32,0x54bb93,0x54bb93,2 +np.float32,0xbf3e6a45,0xbf507716,2 +np.float32,0x3f4bb26e,0x3f61e1cd,2 +np.float32,0x3f698edc,0x3f85aac5,2 +np.float32,0xff7bd0ef,0xff800000,2 +np.float32,0xbed07b68,0xbed64a8e,2 +np.float32,0xbf237c72,0xbf2ed3d2,2 +np.float32,0x27b0fa,0x27b0fa,2 +np.float32,0x3f7606d1,0x3f8ed7d6,2 +np.float32,0x790dc0,0x790dc0,2 +np.float32,0x7f68f3ac,0x7f800000,2 +np.float32,0xbed39288,0xbed9a52f,2 +np.float32,0x3f6f8266,0x3f8a0187,2 +np.float32,0x3fbdca,0x3fbdca,2 +np.float32,0xbf7c3e5d,0xbf938b2c,2 +np.float32,0x802321a8,0x802321a8,2 +np.float32,0x3eecab66,0x3ef53031,2 +np.float32,0x62b324,0x62b324,2 +np.float32,0x3f13afac,0x3f1c03fe,2 +np.float32,0xff315ad7,0xff800000,2 +np.float32,0xbf1fac0d,0xbf2a3a63,2 +np.float32,0xbf543984,0xbf6d61d6,2 +np.float32,0x71a212,0x71a212,2 +np.float32,0x114fbe,0x114fbe,2 +np.float32,0x3f5b6ff2,0x3f77505f,2 +np.float32,0xff6ff89e,0xff800000,2 +np.float32,0xff4527a1,0xff800000,2 +np.float32,0x22cb3,0x22cb3,2 +np.float32,0x7f53bb6b,0x7f800000,2 +np.float32,0xff3d2dea,0xff800000,2 +np.float32,0xfd21dac0,0xff800000,2 +np.float32,0xfc486140,0xff800000,2 +np.float32,0x7e2b693a,0x7f800000,2 +np.float32,0x8022a9fb,0x8022a9fb,2 +np.float32,0x80765de0,0x80765de0,2 +np.float32,0x13d299,0x13d299,2 +np.float32,0x7ee53713,0x7f800000,2 +np.float32,0xbde1c770,0xbde23c96,2 +np.float32,0xbd473fc0,0xbd4753de,2 +np.float32,0x3f1cb455,0x3f26acf3,2 +np.float32,0x683e49,0x683e49,2 +np.float32,0x3ed5a9fc,0x3edbeb79,2 +np.float32,0x3f4fe3f6,0x3f67814f,2 +np.float32,0x802a2bce,0x802a2bce,2 +np.float32,0x7e951b4c,0x7f800000,2 +np.float32,0xbe6eb260,0xbe70dd44,2 +np.float32,0xbe3daca8,0xbe3ec2cb,2 +np.float32,0xbe9c38b2,0xbe9ea822,2 +np.float32,0xff2e29dc,0xff800000,2 +np.float32,0x7f62c7cc,0x7f800000,2 +np.float32,0xbf6799a4,0xbf84416c,2 +np.float32,0xbe30a7f0,0xbe318898,2 +np.float32,0xc83d9,0xc83d9,2 +np.float32,0x3f05abf4,0x3f0bd447,2 +np.float32,0x7e9b018a,0x7f800000,2 +np.float32,0xbf0ed72e,0xbf165e5b,2 +np.float32,0x8011ac8c,0x8011ac8c,2 +np.float32,0xbeb7c706,0xbebbbfcb,2 +np.float32,0x803637f9,0x803637f9,2 +np.float32,0xfe787cc8,0xff800000,2 +np.float32,0x3f533d4b,0x3f6c0a50,2 +np.float32,0x3f5c0f1c,0x3f782dde,2 +np.float32,0x3f301f36,0x3f3e590d,2 +np.float32,0x2dc929,0x2dc929,2 +np.float32,0xff15018a,0xff800000,2 +np.float32,0x3f4d0c56,0x3f63afeb,2 +np.float32,0xbf7a2ae3,0xbf91f6e4,2 +np.float32,0xbe771b84,0xbe798346,2 +np.float32,0x80800000,0x80800000,2 +np.float32,0x7f5689ba,0x7f800000,2 +np.float32,0x3f1c3177,0x3f2610df,2 +np.float32,0x3f1b9664,0x3f255825,2 +np.float32,0x3f7e5066,0x3f9520d4,2 +np.float32,0xbf1935f8,0xbf2285ab,2 +np.float32,0x3f096cc7,0x3f101ef9,2 +np.float32,0x8030c180,0x8030c180,2 +np.float32,0x6627ed,0x6627ed,2 +np.float32,0x454595,0x454595,2 +np.float32,0x7de66a33,0x7f800000,2 +np.float32,0xbf800000,0xbf966cfe,2 +np.float32,0xbf35c0a8,0xbf456939,2 +np.float32,0x3f6a6266,0x3f8643e0,2 +np.float32,0x3f0cbcee,0x3f13ef6a,2 +np.float32,0x7efd1e58,0x7f800000,2 +np.float32,0xfe9a74c6,0xff800000,2 +np.float32,0x807ebe6c,0x807ebe6c,2 +np.float32,0x80656736,0x80656736,2 +np.float32,0x800e0608,0x800e0608,2 +np.float32,0xbf30e39a,0xbf3f4e00,2 +np.float32,0x802015fd,0x802015fd,2 +np.float32,0x3e3ce26d,0x3e3df519,2 +np.float32,0x7ec142ac,0x7f800000,2 +np.float32,0xbf68c9ce,0xbf851c78,2 +np.float32,0xfede8356,0xff800000,2 +np.float32,0xbf1507ce,0xbf1d978d,2 +np.float32,0x3e53914c,0x3e551374,2 +np.float32,0x7f3e1c14,0x7f800000,2 +np.float32,0x8070d2ba,0x8070d2ba,2 +np.float32,0xbf4eb793,0xbf65ecee,2 +np.float32,0x7365a6,0x7365a6,2 +np.float32,0x8045cba2,0x8045cba2,2 +np.float32,0x7e4af521,0x7f800000,2 +np.float32,0xbf228625,0xbf2da9e1,2 +np.float32,0x7ee0536c,0x7f800000,2 +np.float32,0x3e126607,0x3e12e5d5,2 +np.float32,0x80311d92,0x80311d92,2 +np.float32,0xbf386b8b,0xbf48ca54,2 +np.float32,0x7f800000,0x7f800000,2 +np.float32,0x8049ec7a,0x8049ec7a,2 +np.float32,0xbf1dfde4,0xbf2836be,2 +np.float32,0x7e719a8c,0x7f800000,2 +np.float32,0x3eb9c856,0x3ebde2e6,2 +np.float32,0xfe3efda8,0xff800000,2 +np.float32,0xbe89d60c,0xbe8b81d1,2 +np.float32,0x3eaad338,0x3eae0317,2 +np.float32,0x7f4e5217,0x7f800000,2 +np.float32,0x3e9d0f40,0x3e9f88ce,2 +np.float32,0xbe026708,0xbe02c155,2 +np.float32,0x5fc22f,0x5fc22f,2 +np.float32,0x1c4572,0x1c4572,2 +np.float32,0xbed89d96,0xbedf22c5,2 +np.float32,0xbf3debee,0xbf4fd441,2 +np.float32,0xbf465520,0xbf5ac6e5,2 +np.float32,0x3f797081,0x3f9169b3,2 +np.float32,0xbf250734,0xbf30b2aa,2 +np.float32,0x7f5068e9,0x7f800000,2 +np.float32,0x3f1b814e,0x3f253f0c,2 +np.float32,0xbf27c5d3,0xbf340b05,2 +np.float32,0x3f1b78ae,0x3f2534c8,2 +np.float32,0x8059b51a,0x8059b51a,2 +np.float32,0x8059f182,0x8059f182,2 +np.float32,0xbf1bb36e,0xbf257ab8,2 +np.float32,0x41ac35,0x41ac35,2 +np.float32,0x68f41f,0x68f41f,2 +np.float32,0xbea504dc,0xbea7e40f,2 +np.float32,0x1,0x1,2 +np.float32,0x3e96b5b0,0x3e98e542,2 +np.float32,0x7f7fffff,0x7f800000,2 +np.float32,0x3c557a80,0x3c557c0c,2 +np.float32,0x800ca3ec,0x800ca3ec,2 +np.float32,0x8077d4aa,0x8077d4aa,2 +np.float32,0x3f000af0,0x3f0572d6,2 +np.float32,0x3e0434dd,0x3e0492f8,2 +np.float32,0x7d1a710a,0x7f800000,2 +np.float32,0x3f70f996,0x3f8b15f8,2 +np.float32,0x8033391d,0x8033391d,2 +np.float32,0x11927c,0x11927c,2 +np.float32,0x7f7784be,0x7f800000,2 +np.float32,0x7acb22af,0x7f800000,2 +np.float32,0x7e8b153c,0x7f800000,2 +np.float32,0x66d402,0x66d402,2 +np.float32,0xfed6e7b0,0xff800000,2 +np.float32,0x7f6872d3,0x7f800000,2 +np.float32,0x1bd49c,0x1bd49c,2 +np.float32,0xfdc4f1b8,0xff800000,2 +np.float32,0xbed8a466,0xbedf2a33,2 +np.float32,0x7ee789,0x7ee789,2 +np.float32,0xbece94b4,0xbed43b52,2 +np.float32,0x3cf3f734,0x3cf4006f,2 +np.float32,0x7e44aa00,0x7f800000,2 +np.float32,0x7f19e99c,0x7f800000,2 +np.float32,0x806ff1bc,0x806ff1bc,2 +np.float32,0x80296934,0x80296934,2 +np.float32,0x7f463363,0x7f800000,2 +np.float32,0xbf212ac3,0xbf2c06bb,2 +np.float32,0x3dc63778,0x3dc686ba,2 +np.float32,0x7f1b4328,0x7f800000,2 +np.float32,0x6311f6,0x6311f6,2 +np.float32,0xbf6b6fb6,0xbf870751,2 +np.float32,0xbf2c44cf,0xbf399155,2 +np.float32,0x3e7a67bc,0x3e7ce887,2 +np.float32,0x7f57c5f7,0x7f800000,2 +np.float32,0x7f2bb4ff,0x7f800000,2 +np.float32,0xbe9d448e,0xbe9fc0a4,2 +np.float32,0xbf4840f0,0xbf5d4f6b,2 +np.float32,0x7f1e1176,0x7f800000,2 +np.float32,0xff76638e,0xff800000,2 +np.float32,0xff055555,0xff800000,2 +np.float32,0x3f32b82b,0x3f419834,2 +np.float32,0xff363aa8,0xff800000,2 +np.float32,0x7f737fd0,0x7f800000,2 +np.float32,0x3da5d798,0x3da60602,2 +np.float32,0x3f1cc126,0x3f26bc3e,2 +np.float32,0x7eb07541,0x7f800000,2 +np.float32,0x3f7b2ff2,0x3f92bd2a,2 +np.float32,0x474f7,0x474f7,2 +np.float32,0x7fc00000,0x7fc00000,2 +np.float32,0xff2b0a4e,0xff800000,2 +np.float32,0xfeb24f16,0xff800000,2 +np.float32,0x2cb9fc,0x2cb9fc,2 +np.float32,0x67189d,0x67189d,2 +np.float32,0x8033d854,0x8033d854,2 +np.float32,0xbe85e94c,0xbe87717a,2 +np.float32,0x80767c6c,0x80767c6c,2 +np.float32,0x7ea84d65,0x7f800000,2 +np.float32,0x3f024bc7,0x3f07fead,2 +np.float32,0xbdcb0100,0xbdcb5625,2 +np.float32,0x3f160a9e,0x3f1ec7c9,2 +np.float32,0xff1734c8,0xff800000,2 +np.float32,0x7f424d5e,0x7f800000,2 +np.float32,0xbf75b215,0xbf8e9862,2 +np.float32,0x3f262a42,0x3f3214c4,2 +np.float32,0xbf4cfb53,0xbf639927,2 +np.float32,0x3f4ac8b8,0x3f60aa7c,2 +np.float32,0x3e90e593,0x3e92d6b3,2 +np.float32,0xbf66bccf,0xbf83a2d8,2 +np.float32,0x7d3d851a,0x7f800000,2 +np.float32,0x7bac783c,0x7f800000,2 +np.float32,0x8001c626,0x8001c626,2 +np.float32,0xbdffd480,0xbe003f7b,2 +np.float32,0x7f6680bf,0x7f800000,2 +np.float32,0xbecf448e,0xbed4f9bb,2 +np.float32,0x584c7,0x584c7,2 +np.float32,0x3f3e8ea0,0x3f50a5fb,2 +np.float32,0xbf5a5f04,0xbf75d56e,2 +np.float32,0x8065ae47,0x8065ae47,2 +np.float32,0xbf48dce3,0xbf5e1dba,2 +np.float32,0xbe8dae2e,0xbe8f7ed8,2 +np.float32,0x3f7ca6ab,0x3f93dace,2 +np.float32,0x4c3e81,0x4c3e81,2 +np.float32,0x80000000,0x80000000,2 +np.float32,0x3ee1f7d9,0x3ee96033,2 +np.float32,0x80588c6f,0x80588c6f,2 +np.float32,0x5ba34e,0x5ba34e,2 +np.float32,0x80095d28,0x80095d28,2 +np.float32,0xbe7ba198,0xbe7e2bdd,2 +np.float32,0xbe0bdcb4,0xbe0c4c22,2 +np.float32,0x1776f7,0x1776f7,2 +np.float32,0x80328b2a,0x80328b2a,2 +np.float32,0x3e978d37,0x3e99c63e,2 +np.float32,0x7ed50906,0x7f800000,2 +np.float32,0x3f776a54,0x3f8fe2bd,2 +np.float32,0xbed624c4,0xbedc7120,2 +np.float32,0x7f0b6a31,0x7f800000,2 +np.float32,0x7eb13913,0x7f800000,2 +np.float32,0xbe733684,0xbe758190,2 +np.float32,0x80016474,0x80016474,2 +np.float32,0x7a51ee,0x7a51ee,2 +np.float32,0x3f6cb91e,0x3f87f729,2 +np.float32,0xbd99b050,0xbd99d540,2 +np.float32,0x7c6e3cba,0x7f800000,2 +np.float32,0xbf00179a,0xbf05811e,2 +np.float32,0x3e609b29,0x3e626954,2 +np.float32,0xff3fd71a,0xff800000,2 +np.float32,0x5d8c2,0x5d8c2,2 +np.float32,0x7ee93662,0x7f800000,2 +np.float32,0x4b0b31,0x4b0b31,2 +np.float32,0x3ec243b7,0x3ec6f594,2 +np.float32,0x804d60f1,0x804d60f1,2 +np.float32,0xbf0cb784,0xbf13e929,2 +np.float32,0x3f13b74d,0x3f1c0cee,2 +np.float32,0xfe37cb64,0xff800000,2 +np.float32,0x1a88,0x1a88,2 +np.float32,0x3e22a472,0x3e2353ba,2 +np.float32,0x7f07d6a0,0x7f800000,2 +np.float32,0x3f78f435,0x3f910bb5,2 +np.float32,0x555a4a,0x555a4a,2 +np.float32,0x3e306c1f,0x3e314be3,2 +np.float32,0x8005877c,0x8005877c,2 +np.float32,0x4df389,0x4df389,2 +np.float32,0x8069ffc7,0x8069ffc7,2 +np.float32,0x3f328f24,0x3f4164c6,2 +np.float32,0x53a31b,0x53a31b,2 +np.float32,0xbe4d6768,0xbe4ec8be,2 +np.float32,0x7fa00000,0x7fe00000,2 +np.float32,0x3f484c1b,0x3f5d5e2f,2 +np.float32,0x8038be05,0x8038be05,2 +np.float32,0x58ac0f,0x58ac0f,2 +np.float32,0x7ed7fb72,0x7f800000,2 +np.float32,0x5a22e1,0x5a22e1,2 +np.float32,0xbebb7394,0xbebfaad6,2 +np.float32,0xbda98160,0xbda9b2ef,2 +np.float32,0x7f3e5c42,0x7f800000,2 +np.float32,0xfed204ae,0xff800000,2 +np.float32,0xbf5ef782,0xbf7c3ec5,2 +np.float32,0xbef7a0a8,0xbf00b292,2 +np.float32,0xfee6e176,0xff800000,2 +np.float32,0xfe121140,0xff800000,2 +np.float32,0xfe9e13be,0xff800000,2 +np.float32,0xbf3c98b1,0xbf4e2003,2 +np.float32,0x77520d,0x77520d,2 +np.float32,0xf17b2,0xf17b2,2 +np.float32,0x724d2f,0x724d2f,2 +np.float32,0x7eb326f5,0x7f800000,2 +np.float32,0x3edd6bf2,0x3ee4636e,2 +np.float32,0x350f57,0x350f57,2 +np.float32,0xff7d4435,0xff800000,2 +np.float32,0x802b2b9d,0x802b2b9d,2 +np.float32,0xbf7fbeee,0xbf963acf,2 +np.float32,0x804f3100,0x804f3100,2 +np.float32,0x7c594a71,0x7f800000,2 +np.float32,0x3ef49340,0x3efdfbb6,2 +np.float32,0x2e0659,0x2e0659,2 +np.float32,0x8006d5fe,0x8006d5fe,2 +np.float32,0xfd2a00b0,0xff800000,2 +np.float32,0xbee1c016,0xbee922ed,2 +np.float32,0x3e3b7de8,0x3e3c8a8b,2 +np.float32,0x805e6bba,0x805e6bba,2 +np.float32,0x1a7da2,0x1a7da2,2 +np.float32,0x6caba4,0x6caba4,2 +np.float32,0x802f7eab,0x802f7eab,2 +np.float32,0xff68b16b,0xff800000,2 +np.float32,0x8064f5e5,0x8064f5e5,2 +np.float32,0x2e39b4,0x2e39b4,2 +np.float32,0x800000,0x800000,2 +np.float32,0xfd0334c0,0xff800000,2 +np.float32,0x3e952fc4,0x3e974e7e,2 +np.float32,0x80057d33,0x80057d33,2 +np.float32,0x3ed3ddc4,0x3ed9f6f1,2 +np.float32,0x3f74ce18,0x3f8dedf4,2 +np.float32,0xff6bb7c0,0xff800000,2 +np.float32,0xff43bc21,0xff800000,2 +np.float32,0x80207570,0x80207570,2 +np.float32,0x7e1dda75,0x7f800000,2 +np.float32,0x3efe335c,0x3f0462ff,2 +np.float32,0xbf252c0c,0xbf30df70,2 +np.float32,0x3ef4b8e3,0x3efe25ba,2 +np.float32,0x7c33938d,0x7f800000,2 +np.float32,0x3eb1593c,0x3eb4ea95,2 +np.float32,0xfe1d0068,0xff800000,2 +np.float32,0xbf10da9b,0xbf18b551,2 +np.float32,0xfeb65748,0xff800000,2 +np.float32,0xfe8c6014,0xff800000,2 +np.float32,0x3f0503e2,0x3f0b14e3,2 +np.float32,0xfe5e5248,0xff800000,2 +np.float32,0xbd10afa0,0xbd10b754,2 +np.float32,0xff64b609,0xff800000,2 +np.float32,0xbf674a96,0xbf84089c,2 +np.float32,0x7f5d200d,0x7f800000,2 +np.float32,0x3cf44900,0x3cf45245,2 +np.float32,0x8044445a,0x8044445a,2 +np.float32,0xff35b676,0xff800000,2 +np.float32,0x806452cd,0x806452cd,2 +np.float32,0xbf2930fb,0xbf35c7b4,2 +np.float32,0x7e500617,0x7f800000,2 +np.float32,0x543719,0x543719,2 +np.float32,0x3ed11068,0x3ed6ec1d,2 +np.float32,0xbd8db068,0xbd8dcd59,2 +np.float32,0x3ede62c8,0x3ee571d0,2 +np.float32,0xbf00a410,0xbf061f9c,2 +np.float32,0xbf44fa39,0xbf58ff5b,2 +np.float32,0x3f1c3114,0x3f261069,2 +np.float32,0xbdea6210,0xbdeae521,2 +np.float32,0x80059f6d,0x80059f6d,2 +np.float32,0xbdba15f8,0xbdba578c,2 +np.float32,0x6d8a61,0x6d8a61,2 +np.float32,0x6f5428,0x6f5428,2 +np.float32,0x18d0e,0x18d0e,2 +np.float32,0x50e131,0x50e131,2 +np.float32,0x3f2f52be,0x3f3d5a7e,2 +np.float32,0x7399d8,0x7399d8,2 +np.float32,0x106524,0x106524,2 +np.float32,0x7ebf1c53,0x7f800000,2 +np.float32,0x80276458,0x80276458,2 +np.float32,0x3ebbde67,0x3ec01ceb,2 +np.float32,0x80144d9d,0x80144d9d,2 +np.float32,0x8017ea6b,0x8017ea6b,2 +np.float32,0xff38f201,0xff800000,2 +np.float32,0x7f2daa82,0x7f800000,2 +np.float32,0x3f3cb7c7,0x3f4e47ed,2 +np.float32,0x7f08c779,0x7f800000,2 +np.float32,0xbecc907a,0xbed20cec,2 +np.float32,0x7d440002,0x7f800000,2 +np.float32,0xbd410d80,0xbd411fcd,2 +np.float32,0x3d63ae07,0x3d63cc0c,2 +np.float32,0x805a9c13,0x805a9c13,2 +np.float32,0x803bdcdc,0x803bdcdc,2 +np.float32,0xbe88b354,0xbe8a5497,2 +np.float32,0x3f4eaf43,0x3f65e1c2,2 +np.float32,0x3f15e5b8,0x3f1e9c60,2 +np.float32,0x3e8a870c,0x3e8c394e,2 +np.float32,0x7e113de9,0x7f800000,2 +np.float32,0x7ee5ba41,0x7f800000,2 +np.float32,0xbe73d178,0xbe7620eb,2 +np.float32,0xfe972e6a,0xff800000,2 +np.float32,0xbf65567d,0xbf82a25a,2 +np.float32,0x3f38247e,0x3f487010,2 +np.float32,0xbece1c62,0xbed3b918,2 +np.float32,0x442c8d,0x442c8d,2 +np.float32,0x2dc52,0x2dc52,2 +np.float32,0x802ed923,0x802ed923,2 +np.float32,0x788cf8,0x788cf8,2 +np.float32,0x8024888e,0x8024888e,2 +np.float32,0x3f789bde,0x3f90c8fc,2 +np.float32,0x3f5de620,0x3f7abf88,2 +np.float32,0x3f0ffc45,0x3f17b2a7,2 +np.float32,0xbf709678,0xbf8accd4,2 +np.float32,0x12181f,0x12181f,2 +np.float32,0xfe54bbe4,0xff800000,2 +np.float32,0x7f1daba0,0x7f800000,2 +np.float32,0xbf6226df,0xbf805e3c,2 +np.float32,0xbd120610,0xbd120dfb,2 +np.float32,0x7f75e951,0x7f800000,2 +np.float32,0x80068048,0x80068048,2 +np.float32,0x45f04a,0x45f04a,2 +np.float32,0xff4c4f58,0xff800000,2 +np.float32,0x311604,0x311604,2 +np.float32,0x805e809c,0x805e809c,2 +np.float32,0x3d1d62c0,0x3d1d6caa,2 +np.float32,0x7f14ccf9,0x7f800000,2 +np.float32,0xff10017c,0xff800000,2 +np.float32,0xbf43ec48,0xbf579df4,2 +np.float32,0xff64da57,0xff800000,2 +np.float32,0x7f0622c5,0x7f800000,2 +np.float32,0x7f5460cd,0x7f800000,2 +np.float32,0xff0ef1c6,0xff800000,2 +np.float32,0xbece1146,0xbed3ad13,2 +np.float32,0x3f4d457f,0x3f63fc70,2 +np.float32,0xbdc1da28,0xbdc2244b,2 +np.float32,0xbe46d3f4,0xbe481463,2 +np.float32,0xff36b3d6,0xff800000,2 +np.float32,0xbec2e76c,0xbec7a540,2 +np.float32,0x8078fb81,0x8078fb81,2 +np.float32,0x7ec819cb,0x7f800000,2 +np.float32,0x39c4d,0x39c4d,2 +np.float32,0xbe8cddc2,0xbe8ea670,2 +np.float32,0xbf36dffb,0xbf46d48b,2 +np.float32,0xbf2302a3,0xbf2e4065,2 +np.float32,0x3e7b34a2,0x3e7dbb9a,2 +np.float32,0x3e3d87e1,0x3e3e9d62,2 +np.float32,0x7f3c94b1,0x7f800000,2 +np.float32,0x80455a85,0x80455a85,2 +np.float32,0xfd875568,0xff800000,2 +np.float32,0xbf618103,0xbf7fd1c8,2 +np.float32,0xbe332e3c,0xbe3418ac,2 +np.float32,0x80736b79,0x80736b79,2 +np.float32,0x3f705d9a,0x3f8aa2e6,2 +np.float32,0xbf3a36d2,0xbf4b134b,2 +np.float32,0xfddc55c0,0xff800000,2 +np.float32,0x805606fd,0x805606fd,2 +np.float32,0x3f4f0bc4,0x3f665e25,2 +np.float32,0xfebe7494,0xff800000,2 +np.float32,0xff0c541b,0xff800000,2 +np.float32,0xff0b8e7f,0xff800000,2 +np.float32,0xbcc51640,0xbcc51b1e,2 +np.float32,0x7ec1c4d0,0x7f800000,2 +np.float32,0xfc5c8e00,0xff800000,2 +np.float32,0x7f48d682,0x7f800000,2 +np.float32,0x7d5c7d8d,0x7f800000,2 +np.float32,0x8052ed03,0x8052ed03,2 +np.float32,0x7d4db058,0x7f800000,2 +np.float32,0xff3a65ee,0xff800000,2 +np.float32,0x806eeb93,0x806eeb93,2 +np.float32,0x803f9733,0x803f9733,2 +np.float32,0xbf2d1388,0xbf3a90e3,2 +np.float32,0x68e260,0x68e260,2 +np.float32,0x3e47a69f,0x3e48eb0e,2 +np.float32,0x3f0c4623,0x3f136646,2 +np.float32,0x3f37a831,0x3f47d249,2 +np.float32,0xff153a0c,0xff800000,2 +np.float32,0x2e8086,0x2e8086,2 +np.float32,0xc3f5e,0xc3f5e,2 +np.float32,0x7f31dc14,0x7f800000,2 +np.float32,0xfee37d68,0xff800000,2 +np.float32,0x711d4,0x711d4,2 +np.float32,0x7ede2ce4,0x7f800000,2 +np.float32,0xbf5d76d0,0xbf7a23d0,2 +np.float32,0xbe2b9eb4,0xbe2c6cac,2 +np.float32,0x2b14d7,0x2b14d7,2 +np.float32,0x3ea1db72,0x3ea4910e,2 +np.float32,0x7f3f03f7,0x7f800000,2 +np.float32,0x92de5,0x92de5,2 +np.float32,0x80322e1b,0x80322e1b,2 +np.float32,0xbf5eb214,0xbf7bdd55,2 +np.float32,0xbf21bf87,0xbf2cba14,2 +np.float32,0xbf5d4b78,0xbf79e73a,2 +np.float32,0xbc302840,0xbc30291e,2 +np.float32,0xfee567c6,0xff800000,2 +np.float32,0x7f70ee14,0x7f800000,2 +np.float32,0x7e5c4b33,0x7f800000,2 +np.float32,0x3f1e7b64,0x3f28ccfd,2 +np.float32,0xbf6309f7,0xbf80ff3e,2 +np.float32,0x1c2fe3,0x1c2fe3,2 +np.float32,0x8e78d,0x8e78d,2 +np.float32,0x7f2fce73,0x7f800000,2 +np.float32,0x7f25f690,0x7f800000,2 +np.float32,0x8074cba5,0x8074cba5,2 +np.float32,0x16975f,0x16975f,2 +np.float32,0x8012cf5c,0x8012cf5c,2 +np.float32,0x7da72138,0x7f800000,2 +np.float32,0xbf563f35,0xbf7025be,2 +np.float32,0x3f69d3f5,0x3f85dcbe,2 +np.float32,0xbf15c148,0xbf1e7184,2 +np.float32,0xbe7a077c,0xbe7c8564,2 +np.float32,0x3ebb6ef1,0x3ebfa5e3,2 +np.float32,0xbe41fde4,0xbe43277b,2 +np.float32,0x7f10b479,0x7f800000,2 +np.float32,0x3e021ace,0x3e02747d,2 +np.float32,0x3e93d984,0x3e95e9be,2 +np.float32,0xfe17e924,0xff800000,2 +np.float32,0xfe21a7cc,0xff800000,2 +np.float32,0x8019b660,0x8019b660,2 +np.float32,0x7e954631,0x7f800000,2 +np.float32,0x7e7330d1,0x7f800000,2 +np.float32,0xbe007d98,0xbe00d3fb,2 +np.float32,0x3ef3870e,0x3efcd077,2 +np.float32,0x7f5bbde8,0x7f800000,2 +np.float32,0x14a5b3,0x14a5b3,2 +np.float32,0x3e84d23f,0x3e8650e8,2 +np.float32,0x80763017,0x80763017,2 +np.float32,0xfe871f36,0xff800000,2 +np.float32,0x7ed43150,0x7f800000,2 +np.float32,0x3cc44547,0x3cc44a16,2 +np.float32,0x3ef0c0fa,0x3ef9b97d,2 +np.float32,0xbede9944,0xbee5ad86,2 +np.float32,0xbf10f0b2,0xbf18cf0a,2 +np.float32,0x3ecdaa78,0x3ed33dd9,2 +np.float32,0x3f7cc058,0x3f93ee6b,2 +np.float32,0x2d952f,0x2d952f,2 +np.float32,0x3f2cf2de,0x3f3a687a,2 +np.float32,0x8029b33c,0x8029b33c,2 +np.float32,0xbf22c737,0xbf2df888,2 +np.float32,0xff53c84a,0xff800000,2 +np.float32,0x40a509,0x40a509,2 +np.float32,0x56abce,0x56abce,2 +np.float32,0xff7fffff,0xff800000,2 +np.float32,0xbf3e67f6,0xbf50741c,2 +np.float32,0xfde67580,0xff800000,2 +np.float32,0x3f103e9b,0x3f17ffc7,2 +np.float32,0x3f3f7232,0x3f51cbe2,2 +np.float32,0x803e6d78,0x803e6d78,2 +np.float32,0x3a61da,0x3a61da,2 +np.float32,0xbc04de80,0xbc04dedf,2 +np.float32,0x7f1e7c52,0x7f800000,2 +np.float32,0x8058ee88,0x8058ee88,2 +np.float32,0x806dd660,0x806dd660,2 +np.float32,0x7e4af9,0x7e4af9,2 +np.float32,0x80702d27,0x80702d27,2 +np.float32,0x802cdad1,0x802cdad1,2 +np.float32,0x3e9b5c23,0x3e9dc149,2 +np.float32,0x7f076e89,0x7f800000,2 +np.float32,0x7f129d68,0x7f800000,2 +np.float32,0x7f6f0b0a,0x7f800000,2 +np.float32,0x7eafafb5,0x7f800000,2 +np.float32,0xbf2ef2ca,0xbf3ce332,2 +np.float32,0xff34c000,0xff800000,2 +np.float32,0x7f559274,0x7f800000,2 +np.float32,0xfed08556,0xff800000,2 +np.float32,0xbf014621,0xbf06d6ad,2 +np.float32,0xff23086a,0xff800000,2 +np.float32,0x6cb33f,0x6cb33f,2 +np.float32,0xfe6e3ffc,0xff800000,2 +np.float32,0x3e6bbec0,0x3e6dd546,2 +np.float32,0x8036afa6,0x8036afa6,2 +np.float32,0xff800000,0xff800000,2 +np.float32,0x3e0ed05c,0x3e0f46ff,2 +np.float32,0x3ec9215c,0x3ece57e6,2 +np.float32,0xbf449fa4,0xbf5888aa,2 +np.float32,0xff2c6640,0xff800000,2 +np.float32,0x7f08f4a7,0x7f800000,2 +np.float32,0xbf4f63e5,0xbf66d4c1,2 +np.float32,0x3f800000,0x3f966cfe,2 +np.float32,0xfe86c7d2,0xff800000,2 +np.float32,0x3f63f969,0x3f81a970,2 +np.float32,0xbd7022d0,0xbd704609,2 +np.float32,0xbead906c,0xbeb0e853,2 +np.float32,0x7ef149ee,0x7f800000,2 +np.float32,0xff0b9ff7,0xff800000,2 +np.float32,0x3f38380d,0x3f4888e7,2 +np.float32,0x3ef3a3e2,0x3efcf09e,2 +np.float32,0xff616477,0xff800000,2 +np.float32,0x3f3f83e4,0x3f51e2c3,2 +np.float32,0xbf79963c,0xbf918642,2 +np.float32,0x801416f4,0x801416f4,2 +np.float32,0xff75ce6d,0xff800000,2 +np.float32,0xbdbf3588,0xbdbf7cad,2 +np.float32,0xbe6ea938,0xbe70d3dc,2 +np.float32,0x8066f977,0x8066f977,2 +np.float32,0x3f5b5362,0x3f7728aa,2 +np.float32,0xbf72052c,0xbf8bdbd8,2 +np.float32,0xbe21ed74,0xbe229a6f,2 +np.float32,0x8062d19c,0x8062d19c,2 +np.float32,0x3ed8d01f,0x3edf59e6,2 +np.float32,0x803ed42b,0x803ed42b,2 +np.float32,0xbe099a64,0xbe0a0481,2 +np.float32,0xbe173eb4,0xbe17cba2,2 +np.float32,0xbebdcf02,0xbec22faf,2 +np.float32,0x7e3ff29e,0x7f800000,2 +np.float32,0x367c92,0x367c92,2 +np.float32,0xbf5c9db8,0xbf78f4a4,2 +np.float32,0xff0b49ea,0xff800000,2 +np.float32,0x3f4f9bc4,0x3f672001,2 +np.float32,0x85d4a,0x85d4a,2 +np.float32,0x80643e33,0x80643e33,2 +np.float32,0x8013aabd,0x8013aabd,2 +np.float32,0xff6997c3,0xff800000,2 +np.float32,0x3f4dd43c,0x3f64bbb6,2 +np.float32,0xff13bbb9,0xff800000,2 +np.float32,0x3f34efa2,0x3f446187,2 +np.float32,0x3e4b2f10,0x3e4c850d,2 +np.float32,0xfef695c6,0xff800000,2 +np.float32,0x7f7e0057,0x7f800000,2 +np.float32,0x3f6e1b9c,0x3f88fa40,2 +np.float32,0x806e46cf,0x806e46cf,2 +np.float32,0x3f15a88a,0x3f1e546c,2 +np.float32,0xbd2de7d0,0xbd2df530,2 +np.float32,0xbf63cae0,0xbf818854,2 +np.float32,0xbdc3e1a0,0xbdc42e1e,2 +np.float32,0xbf11a038,0xbf199b98,2 +np.float32,0xbec13706,0xbec5d56b,2 +np.float32,0x3f1c5f54,0x3f26478d,2 +np.float32,0x3e9ea97e,0x3ea136b4,2 +np.float32,0xfeb5a508,0xff800000,2 +np.float32,0x7f4698f4,0x7f800000,2 +np.float32,0xff51ee2c,0xff800000,2 +np.float32,0xff5994df,0xff800000,2 +np.float32,0x4b9fb9,0x4b9fb9,2 +np.float32,0xfda10d98,0xff800000,2 +np.float32,0x525555,0x525555,2 +np.float32,0x7ed571ef,0x7f800000,2 +np.float32,0xbf600d18,0xbf7dc50c,2 +np.float32,0x3ec674ca,0x3ecb768b,2 +np.float32,0x3cb69115,0x3cb694f3,2 +np.float32,0x7eac75f2,0x7f800000,2 +np.float32,0x804d4d75,0x804d4d75,2 +np.float32,0xfed5292e,0xff800000,2 +np.float32,0x800ed06a,0x800ed06a,2 +np.float32,0xfec37584,0xff800000,2 +np.float32,0x3ef96ac7,0x3f01b326,2 +np.float32,0x42f743,0x42f743,2 +np.float32,0x3f56f442,0x3f711e39,2 +np.float32,0xbf7ea726,0xbf956375,2 +np.float32,0x806c7202,0x806c7202,2 +np.float32,0xbd8ee980,0xbd8f0733,2 +np.float32,0xbdf2e930,0xbdf37b18,2 +np.float32,0x3f103910,0x3f17f955,2 +np.float32,0xff123e8f,0xff800000,2 +np.float32,0x806e4b5d,0x806e4b5d,2 +np.float32,0xbf4f3bfc,0xbf669f07,2 +np.float32,0xbf070c16,0xbf0d6609,2 +np.float32,0xff00e0ba,0xff800000,2 +np.float32,0xff49d828,0xff800000,2 +np.float32,0x7e47f04a,0x7f800000,2 +np.float32,0x7e984dac,0x7f800000,2 +np.float32,0x3f77473c,0x3f8fc858,2 +np.float32,0x3f017439,0x3f070ac8,2 +np.float32,0x118417,0x118417,2 +np.float32,0xbcf7a2c0,0xbcf7ac68,2 +np.float32,0xfee46fee,0xff800000,2 +np.float32,0x3e42a648,0x3e43d2e9,2 +np.float32,0x80131916,0x80131916,2 +np.float32,0x806209d3,0x806209d3,2 +np.float32,0x807c1f12,0x807c1f12,2 +np.float32,0x2f3696,0x2f3696,2 +np.float32,0xff28722b,0xff800000,2 +np.float32,0x7f1416a1,0x7f800000,2 +np.float32,0x8054e7a1,0x8054e7a1,2 +np.float32,0xbddc39a0,0xbddca656,2 +np.float32,0x7dc60175,0x7f800000,2 +np.float64,0x7fd0ae584da15cb0,0x7ff0000000000000,1 +np.float64,0x7fd41d68e5283ad1,0x7ff0000000000000,1 +np.float64,0x7fe93073bb7260e6,0x7ff0000000000000,1 +np.float64,0x3fb4fd19d229fa34,0x3fb5031f57dbac0f,1 +np.float64,0x85609ce10ac2,0x85609ce10ac2,1 +np.float64,0xbfd7aa12ccaf5426,0xbfd8351003a320e2,1 +np.float64,0x8004487c9b4890fa,0x8004487c9b4890fa,1 +np.float64,0x7fe7584cfd2eb099,0x7ff0000000000000,1 +np.float64,0x800ea8edc6dd51dc,0x800ea8edc6dd51dc,1 +np.float64,0x3fe0924aa5a12495,0x3fe15276e271c6dc,1 +np.float64,0x3feb1abf6d36357f,0x3fee76b4d3d06964,1 +np.float64,0x3fa8c14534318280,0x3fa8c3bd5ce5923c,1 +np.float64,0x800b9f5915d73eb3,0x800b9f5915d73eb3,1 +np.float64,0xffc05aaa7820b554,0xfff0000000000000,1 +np.float64,0x800157eda8c2afdc,0x800157eda8c2afdc,1 +np.float64,0xffe8d90042b1b200,0xfff0000000000000,1 +np.float64,0x3feda02ea93b405d,0x3ff1057e61d08d59,1 +np.float64,0xffd03b7361a076e6,0xfff0000000000000,1 +np.float64,0x3fe1a8ecd7e351da,0x3fe291eda9080847,1 +np.float64,0xffc5bfdff82b7fc0,0xfff0000000000000,1 +np.float64,0xbfe6fb3d386df67a,0xbfe9022c05df0565,1 +np.float64,0x7fefffffffffffff,0x7ff0000000000000,1 +np.float64,0x7fa10c340c221867,0x7ff0000000000000,1 +np.float64,0x3fe55cbf1daab97e,0x3fe6fc1648258b75,1 +np.float64,0xbfddeb5f60bbd6be,0xbfdf056d4fb5825f,1 +np.float64,0xffddb1a8213b6350,0xfff0000000000000,1 +np.float64,0xbfb20545e4240a88,0xbfb2091579375176,1 +np.float64,0x3f735ded2026bbda,0x3f735df1dad4ee3a,1 +np.float64,0xbfd1eb91efa3d724,0xbfd227c044dead61,1 +np.float64,0xffd737c588ae6f8c,0xfff0000000000000,1 +np.float64,0x3fc46818ec28d032,0x3fc47e416c4237a6,1 +np.float64,0x0,0x0,1 +np.float64,0xffb632097a2c6410,0xfff0000000000000,1 +np.float64,0xbfcb5ae84b36b5d0,0xbfcb905613af55b8,1 +np.float64,0xbfe7b926402f724c,0xbfe9f4f0be6aacc3,1 +np.float64,0x80081840b3f03082,0x80081840b3f03082,1 +np.float64,0x3fe767a656eecf4d,0x3fe98c53b4779de7,1 +np.float64,0x8005834c088b0699,0x8005834c088b0699,1 +np.float64,0x80074e92658e9d26,0x80074e92658e9d26,1 +np.float64,0x80045d60c268bac2,0x80045d60c268bac2,1 +np.float64,0xffb9aecfe8335da0,0xfff0000000000000,1 +np.float64,0x7fcad3e1cd35a7c3,0x7ff0000000000000,1 +np.float64,0xbf881853d03030c0,0xbf8818783e28fc87,1 +np.float64,0xe18c6d23c318e,0xe18c6d23c318e,1 +np.float64,0x7fcb367b8f366cf6,0x7ff0000000000000,1 +np.float64,0x5c13436cb8269,0x5c13436cb8269,1 +np.float64,0xffe5399938aa7332,0xfff0000000000000,1 +np.float64,0xbfdc45dbc3b88bb8,0xbfdd33958222c27e,1 +np.float64,0xbfd714691bae28d2,0xbfd7954edbef810b,1 +np.float64,0xbfdf18b02b3e3160,0xbfe02ad13634c651,1 +np.float64,0x8003e6f276e7cde6,0x8003e6f276e7cde6,1 +np.float64,0x3febb6b412776d68,0x3fef4f753def31f9,1 +np.float64,0x7fe016a3b4a02d46,0x7ff0000000000000,1 +np.float64,0x3fdc899ac7b91336,0x3fdd7e1cee1cdfc8,1 +np.float64,0x800219271e24324f,0x800219271e24324f,1 +np.float64,0x1529d93e2a53c,0x1529d93e2a53c,1 +np.float64,0x800d5bc827fab790,0x800d5bc827fab790,1 +np.float64,0x3e1495107c293,0x3e1495107c293,1 +np.float64,0x3fe89da0f2b13b42,0x3feb1dc1f3015ad7,1 +np.float64,0x800ba8c17b975183,0x800ba8c17b975183,1 +np.float64,0x8002dacf0265b59f,0x8002dacf0265b59f,1 +np.float64,0xffe6d0a4cc2da149,0xfff0000000000000,1 +np.float64,0x3fdf23fe82be47fc,0x3fe03126d8e2b309,1 +np.float64,0xffe41b1f1c28363e,0xfff0000000000000,1 +np.float64,0xbfd635c634ac6b8c,0xbfd6a8966da6adaa,1 +np.float64,0x800755bc08eeab79,0x800755bc08eeab79,1 +np.float64,0x800ba4c47c374989,0x800ba4c47c374989,1 +np.float64,0x7fec9f7649793eec,0x7ff0000000000000,1 +np.float64,0x7fdbf45738b7e8ad,0x7ff0000000000000,1 +np.float64,0x3f5597f07eab4,0x3f5597f07eab4,1 +np.float64,0xbfbf4599183e8b30,0xbfbf5985d8c65097,1 +np.float64,0xbf5b200580364000,0xbf5b2006501b21ae,1 +np.float64,0x7f91868370230d06,0x7ff0000000000000,1 +np.float64,0x3838e2a67071d,0x3838e2a67071d,1 +np.float64,0xffefe3ff5d3fc7fe,0xfff0000000000000,1 +np.float64,0xffe66b26d06cd64d,0xfff0000000000000,1 +np.float64,0xbfd830a571b0614a,0xbfd8c526927c742c,1 +np.float64,0x7fe8442122f08841,0x7ff0000000000000,1 +np.float64,0x800efa8c637df519,0x800efa8c637df519,1 +np.float64,0xf0026835e004d,0xf0026835e004d,1 +np.float64,0xffb11beefe2237e0,0xfff0000000000000,1 +np.float64,0x3fef9bbb327f3776,0x3ff2809f10641c32,1 +np.float64,0x350595306a0b3,0x350595306a0b3,1 +np.float64,0xf7f6538befecb,0xf7f6538befecb,1 +np.float64,0xffe36379c4a6c6f3,0xfff0000000000000,1 +np.float64,0x28b1d82e5163c,0x28b1d82e5163c,1 +np.float64,0x70a3d804e147c,0x70a3d804e147c,1 +np.float64,0xffd96c1bc9b2d838,0xfff0000000000000,1 +np.float64,0xffce8e00893d1c00,0xfff0000000000000,1 +np.float64,0x800f2bdcb25e57b9,0x800f2bdcb25e57b9,1 +np.float64,0xbfe0d9c63361b38c,0xbfe1a3eb02192b76,1 +np.float64,0xbfdc7b8711b8f70e,0xbfdd6e9db3a01e51,1 +np.float64,0x99e22ec133c46,0x99e22ec133c46,1 +np.float64,0xffeaef6ddab5dedb,0xfff0000000000000,1 +np.float64,0x7fe89c22c0f13845,0x7ff0000000000000,1 +np.float64,0x8002d5207de5aa42,0x8002d5207de5aa42,1 +np.float64,0x3fd1b13353236267,0x3fd1eb1b9345dfca,1 +np.float64,0x800ccae0a41995c1,0x800ccae0a41995c1,1 +np.float64,0x3fdbdaba38b7b574,0x3fdcbdfcbca37ce6,1 +np.float64,0x5b06d12cb60db,0x5b06d12cb60db,1 +np.float64,0xffd52262752a44c4,0xfff0000000000000,1 +np.float64,0x5a17f050b42ff,0x5a17f050b42ff,1 +np.float64,0x3d24205e7a485,0x3d24205e7a485,1 +np.float64,0x7fbed4dec63da9bd,0x7ff0000000000000,1 +np.float64,0xbfe56e9776aadd2f,0xbfe71212863c284f,1 +np.float64,0x7fea0bc952341792,0x7ff0000000000000,1 +np.float64,0x800f692d139ed25a,0x800f692d139ed25a,1 +np.float64,0xffdb63feab36c7fe,0xfff0000000000000,1 +np.float64,0x3fe1c2297fe38452,0x3fe2af21293c9571,1 +np.float64,0x7fede384747bc708,0x7ff0000000000000,1 +np.float64,0x800440169288802e,0x800440169288802e,1 +np.float64,0xffe3241eeb26483e,0xfff0000000000000,1 +np.float64,0xffe28f3879651e70,0xfff0000000000000,1 +np.float64,0xa435cbc1486d,0xa435cbc1486d,1 +np.float64,0x7fe55e08db6abc11,0x7ff0000000000000,1 +np.float64,0x1405e624280be,0x1405e624280be,1 +np.float64,0x3fd861bdf0b0c37c,0x3fd8f9d2e33e45e5,1 +np.float64,0x3feeb67cdc3d6cfa,0x3ff1d337d81d1c14,1 +np.float64,0x3fd159a10e22b342,0x3fd1903be7c2ea0c,1 +np.float64,0x3fd84626bc308c4d,0x3fd8dc373645e65b,1 +np.float64,0xffd3da81d9a7b504,0xfff0000000000000,1 +np.float64,0xbfd4a768b8294ed2,0xbfd503aa7c240051,1 +np.float64,0x3fe3059f2a660b3e,0x3fe42983e0c6bb2e,1 +np.float64,0x3fe3b8353827706a,0x3fe4fdd635c7269b,1 +np.float64,0xbfe4af0399695e07,0xbfe6277d9002b46c,1 +np.float64,0xbfd7e18a92afc316,0xbfd87066b54c4fe6,1 +np.float64,0x800432bcab48657a,0x800432bcab48657a,1 +np.float64,0x80033d609d267ac2,0x80033d609d267ac2,1 +np.float64,0x7fef5f758e7ebeea,0x7ff0000000000000,1 +np.float64,0xbfed7833dbfaf068,0xbff0e85bf45a5ebc,1 +np.float64,0x3fe2283985a45073,0x3fe325b0a9099c74,1 +np.float64,0xe820b4b3d0417,0xe820b4b3d0417,1 +np.float64,0x8003ecb72aa7d96f,0x8003ecb72aa7d96f,1 +np.float64,0xbfeab2c755b5658f,0xbfede7c83e92a625,1 +np.float64,0xbfc7b287f72f6510,0xbfc7d53ef2ffe9dc,1 +np.float64,0xffd9a41d0f33483a,0xfff0000000000000,1 +np.float64,0x3fd3a5b6e3a74b6c,0x3fd3f516f39a4725,1 +np.float64,0x800bc72091578e42,0x800bc72091578e42,1 +np.float64,0x800ff405ce9fe80c,0x800ff405ce9fe80c,1 +np.float64,0x57918600af24,0x57918600af24,1 +np.float64,0x2a5be7fa54b7e,0x2a5be7fa54b7e,1 +np.float64,0xbfdca7886bb94f10,0xbfdd9f142b5b43e4,1 +np.float64,0xbfe216993ee42d32,0xbfe3112936590995,1 +np.float64,0xbfe06bd9cf20d7b4,0xbfe126cd353ab42f,1 +np.float64,0x8003e6c31827cd87,0x8003e6c31827cd87,1 +np.float64,0x8005f37d810be6fc,0x8005f37d810be6fc,1 +np.float64,0x800715b081ae2b62,0x800715b081ae2b62,1 +np.float64,0x3fef94c35bff2986,0x3ff27b4bed2f4051,1 +np.float64,0x6f5798e0deb0,0x6f5798e0deb0,1 +np.float64,0x3fcef1f05c3de3e1,0x3fcf3f557550598f,1 +np.float64,0xbf9a91c400352380,0xbf9a92876273b85c,1 +np.float64,0x3fc9143f7f322880,0x3fc93d678c05d26b,1 +np.float64,0x78ad847af15b1,0x78ad847af15b1,1 +np.float64,0x8000fdc088c1fb82,0x8000fdc088c1fb82,1 +np.float64,0x800200fd304401fb,0x800200fd304401fb,1 +np.float64,0x7fb8ab09dc315613,0x7ff0000000000000,1 +np.float64,0x3fe949771b7292ee,0x3fec00891c3fc5a2,1 +np.float64,0xbfc54cae0e2a995c,0xbfc565e0f3d0e3af,1 +np.float64,0xffd546161e2a8c2c,0xfff0000000000000,1 +np.float64,0x800fe1d1279fc3a2,0x800fe1d1279fc3a2,1 +np.float64,0x3fd9c45301b388a8,0x3fda77fa1f4c79bf,1 +np.float64,0x7fe10ff238221fe3,0x7ff0000000000000,1 +np.float64,0xbfbc2181ae384300,0xbfbc3002229155c4,1 +np.float64,0xbfe7bbfae4ef77f6,0xbfe9f895e91f468d,1 +np.float64,0x800d3d994f7a7b33,0x800d3d994f7a7b33,1 +np.float64,0xffe6e15a896dc2b4,0xfff0000000000000,1 +np.float64,0x800e6b6c8abcd6d9,0x800e6b6c8abcd6d9,1 +np.float64,0xbfd862c938b0c592,0xbfd8faf1cdcb09db,1 +np.float64,0xffe2411f8464823e,0xfff0000000000000,1 +np.float64,0xffd0b32efaa1665e,0xfff0000000000000,1 +np.float64,0x3ac4ace475896,0x3ac4ace475896,1 +np.float64,0xf9c3a7ebf3875,0xf9c3a7ebf3875,1 +np.float64,0xdb998ba5b7332,0xdb998ba5b7332,1 +np.float64,0xbfe438a14fe87142,0xbfe5981751e4c5cd,1 +np.float64,0xbfbcf48cbc39e918,0xbfbd045d60e65d3a,1 +np.float64,0x7fde499615bc932b,0x7ff0000000000000,1 +np.float64,0x800bba269057744e,0x800bba269057744e,1 +np.float64,0x3fc9bb1ba3337638,0x3fc9e78fdb6799c1,1 +np.float64,0xffd9f974fbb3f2ea,0xfff0000000000000,1 +np.float64,0x7fcf1ad1693e35a2,0x7ff0000000000000,1 +np.float64,0x7fe5dcedd32bb9db,0x7ff0000000000000,1 +np.float64,0xeb06500bd60ca,0xeb06500bd60ca,1 +np.float64,0x7fd73e7b592e7cf6,0x7ff0000000000000,1 +np.float64,0xbfe9d91ae873b236,0xbfecc08482849bcd,1 +np.float64,0xffc85338b730a670,0xfff0000000000000,1 +np.float64,0x7fbba41eee37483d,0x7ff0000000000000,1 +np.float64,0x3fed5624fb7aac4a,0x3ff0cf9f0de1fd54,1 +np.float64,0xffe566d80d6acdb0,0xfff0000000000000,1 +np.float64,0x3fd4477884a88ef1,0x3fd49ec7acdd25a0,1 +np.float64,0x3fcb98c5fd37318c,0x3fcbcfa20e2c2712,1 +np.float64,0xffdeba71d5bd74e4,0xfff0000000000000,1 +np.float64,0x8001edc59dc3db8c,0x8001edc59dc3db8c,1 +np.float64,0x3fe6b09e896d613e,0x3fe8a3bb541ec0e3,1 +np.float64,0x3fe8694b4970d296,0x3fead94d271d05cf,1 +np.float64,0xb52c27bf6a585,0xb52c27bf6a585,1 +np.float64,0x7fcb0a21d9361443,0x7ff0000000000000,1 +np.float64,0xbfd9efc68cb3df8e,0xbfdaa7058c0ccbd1,1 +np.float64,0x8007cd170fef9a2f,0x8007cd170fef9a2f,1 +np.float64,0x3fe83325e770664c,0x3fea92c55c9d567e,1 +np.float64,0x800bd0085537a011,0x800bd0085537a011,1 +np.float64,0xffe05b9e7820b73c,0xfff0000000000000,1 +np.float64,0x3fea4ce4347499c8,0x3fed5cea9fdc541b,1 +np.float64,0x7fe08aae1921155b,0x7ff0000000000000,1 +np.float64,0x3fe7a5e7deef4bd0,0x3fe9dc2e20cfb61c,1 +np.float64,0xbfe0ccc8e6e19992,0xbfe195175f32ee3f,1 +np.float64,0xbfe8649717f0c92e,0xbfead3298974dcf0,1 +np.float64,0x7fed6c5308bad8a5,0x7ff0000000000000,1 +np.float64,0xffdbd8c7af37b190,0xfff0000000000000,1 +np.float64,0xbfb2bc4d06257898,0xbfb2c09569912839,1 +np.float64,0x3fc62eca512c5d95,0x3fc64b4251bce8f9,1 +np.float64,0xbfcae2ddbd35c5bc,0xbfcb15971fc61312,1 +np.float64,0x18d26ce831a4f,0x18d26ce831a4f,1 +np.float64,0x7fe38b279267164e,0x7ff0000000000000,1 +np.float64,0x97e1d9ab2fc3b,0x97e1d9ab2fc3b,1 +np.float64,0xbfee8e4785fd1c8f,0xbff1b52d16807627,1 +np.float64,0xbfb189b4a6231368,0xbfb18d37e83860ee,1 +np.float64,0xffd435761ea86aec,0xfff0000000000000,1 +np.float64,0x3fe6c48ebced891e,0x3fe8bcea189c3867,1 +np.float64,0x7fdadd3678b5ba6c,0x7ff0000000000000,1 +np.float64,0x7fea8f15b7b51e2a,0x7ff0000000000000,1 +np.float64,0xbff0000000000000,0xbff2cd9fc44eb982,1 +np.float64,0x80004c071120980f,0x80004c071120980f,1 +np.float64,0x8005367adfea6cf6,0x8005367adfea6cf6,1 +np.float64,0x3fbdc9139a3b9220,0x3fbdda4aba667ce5,1 +np.float64,0x7fed5ee3ad7abdc6,0x7ff0000000000000,1 +np.float64,0x51563fb2a2ac9,0x51563fb2a2ac9,1 +np.float64,0xbfba7d26ce34fa50,0xbfba894229c50ea1,1 +np.float64,0x6c10db36d821c,0x6c10db36d821c,1 +np.float64,0xbfbdaec0d03b5d80,0xbfbdbfca6ede64f4,1 +np.float64,0x800a1cbe7414397d,0x800a1cbe7414397d,1 +np.float64,0x800ae6e7f2d5cdd0,0x800ae6e7f2d5cdd0,1 +np.float64,0x3fea63d3fef4c7a8,0x3fed7c1356688ddc,1 +np.float64,0xbfde1e3a88bc3c76,0xbfdf3dfb09cc2260,1 +np.float64,0xbfd082d75a2105ae,0xbfd0b1e28c84877b,1 +np.float64,0x7fea1e5e85f43cbc,0x7ff0000000000000,1 +np.float64,0xffe2237a1a6446f4,0xfff0000000000000,1 +np.float64,0x3fd1e2be8523c57d,0x3fd21e93dfd1bbc4,1 +np.float64,0x3fd1acd428a359a8,0x3fd1e6916a42bc3a,1 +np.float64,0x61a152f0c342b,0x61a152f0c342b,1 +np.float64,0xbfc61a6b902c34d8,0xbfc6369557690ba0,1 +np.float64,0x7fd1a84b1f235095,0x7ff0000000000000,1 +np.float64,0x1c5cc7e638b9a,0x1c5cc7e638b9a,1 +np.float64,0x8008039755f0072f,0x8008039755f0072f,1 +np.float64,0x80097532d6f2ea66,0x80097532d6f2ea66,1 +np.float64,0xbfc6d979a12db2f4,0xbfc6f89777c53f8f,1 +np.float64,0x8004293ab1085276,0x8004293ab1085276,1 +np.float64,0x3fc2af5c21255eb8,0x3fc2c05dc0652554,1 +np.float64,0xbfd9a5ab87b34b58,0xbfda56d1076abc98,1 +np.float64,0xbfebd360ba77a6c2,0xbfef779fd6595f9b,1 +np.float64,0xffd5313c43aa6278,0xfff0000000000000,1 +np.float64,0xbfe994a262b32945,0xbfec64b969852ed5,1 +np.float64,0x3fce01a52e3c034a,0x3fce48324eb29c31,1 +np.float64,0x56bd74b2ad7af,0x56bd74b2ad7af,1 +np.float64,0xb84093ff70813,0xb84093ff70813,1 +np.float64,0x7fe776df946eedbe,0x7ff0000000000000,1 +np.float64,0xbfe294ac2e652958,0xbfe3a480938afa26,1 +np.float64,0x7fe741b4d0ee8369,0x7ff0000000000000,1 +np.float64,0x800b7e8a1056fd15,0x800b7e8a1056fd15,1 +np.float64,0x7fd28f1269251e24,0x7ff0000000000000,1 +np.float64,0x8009d4492e73a893,0x8009d4492e73a893,1 +np.float64,0x3fe3f27fca67e500,0x3fe543aff825e244,1 +np.float64,0x3fd12447e5a24890,0x3fd158efe43c0452,1 +np.float64,0xbfd58df0f2ab1be2,0xbfd5f6d908e3ebce,1 +np.float64,0xffc0a8e4642151c8,0xfff0000000000000,1 +np.float64,0xbfedb197787b632f,0xbff112367ec9d3e7,1 +np.float64,0xffdde07a7f3bc0f4,0xfff0000000000000,1 +np.float64,0x3fe91f3e5b723e7d,0x3febc886a1d48364,1 +np.float64,0x3fe50415236a082a,0x3fe68f43a5468d8c,1 +np.float64,0xd9a0c875b3419,0xd9a0c875b3419,1 +np.float64,0xbfee04ccf4bc099a,0xbff14f4740a114cf,1 +np.float64,0xbfd2bcc6a125798e,0xbfd30198b1e7d7ed,1 +np.float64,0xbfeb3c16f8f6782e,0xbfeea4ce47d09f58,1 +np.float64,0xffd3ba19e4a77434,0xfff0000000000000,1 +np.float64,0x8010000000000000,0x8010000000000000,1 +np.float64,0x3fdef0a642bde14d,0x3fe0146677b3a488,1 +np.float64,0x3fdc3dd0a2b87ba0,0x3fdd2abe65651487,1 +np.float64,0x3fdbb1fd47b763fb,0x3fdc915a2fd19f4b,1 +np.float64,0x7fbaa375e63546eb,0x7ff0000000000000,1 +np.float64,0x433ef8ee867e0,0x433ef8ee867e0,1 +np.float64,0xf5345475ea68b,0xf5345475ea68b,1 +np.float64,0xa126419b424c8,0xa126419b424c8,1 +np.float64,0x3fe0057248200ae5,0x3fe0b2f488339709,1 +np.float64,0xffc5e3b82f2bc770,0xfff0000000000000,1 +np.float64,0xffb215c910242b90,0xfff0000000000000,1 +np.float64,0xbfeba4ae0837495c,0xbfef3642e4b54aac,1 +np.float64,0xffbb187ebe363100,0xfff0000000000000,1 +np.float64,0x3fe4c6a496a98d49,0x3fe64440cdf06aab,1 +np.float64,0x800767a28f6ecf46,0x800767a28f6ecf46,1 +np.float64,0x3fdbed63b1b7dac8,0x3fdcd27318c0b683,1 +np.float64,0x80006d8339e0db07,0x80006d8339e0db07,1 +np.float64,0x8000b504f0416a0b,0x8000b504f0416a0b,1 +np.float64,0xbfe88055bfb100ac,0xbfeaf767bd2767b9,1 +np.float64,0x3fefe503317fca06,0x3ff2b8d4057240c8,1 +np.float64,0x7fe307538b660ea6,0x7ff0000000000000,1 +np.float64,0x944963c12892d,0x944963c12892d,1 +np.float64,0xbfd2c20b38a58416,0xbfd30717900f8233,1 +np.float64,0x7feed04e3e3da09b,0x7ff0000000000000,1 +np.float64,0x3fe639619cac72c3,0x3fe80de7b8560a8d,1 +np.float64,0x3fde066c66bc0cd9,0x3fdf237fb759a652,1 +np.float64,0xbfc56b22b52ad644,0xbfc584c267a47ebd,1 +np.float64,0x3fc710d5b12e21ab,0x3fc730d817ba0d0c,1 +np.float64,0x3fee1dfc347c3bf8,0x3ff161d9c3e15f68,1 +np.float64,0x3fde400954bc8013,0x3fdf639e5cc9e7a9,1 +np.float64,0x56e701f8adce1,0x56e701f8adce1,1 +np.float64,0xbfe33bbc89e67779,0xbfe46996b39381fe,1 +np.float64,0x7fec89e2f87913c5,0x7ff0000000000000,1 +np.float64,0xbfdad58b40b5ab16,0xbfdba098cc0ad5d3,1 +np.float64,0x3fe99c76a13338ed,0x3fec6f31bae613e7,1 +np.float64,0x3fe4242a29a84854,0x3fe57f6b45e5c0ef,1 +np.float64,0xbfe79d3199ef3a63,0xbfe9d0fb96c846ba,1 +np.float64,0x7ff8000000000000,0x7ff8000000000000,1 +np.float64,0xbfeb35a6cf766b4e,0xbfee9be4e7e943f7,1 +np.float64,0x3e047f267c091,0x3e047f267c091,1 +np.float64,0x4bf1376a97e28,0x4bf1376a97e28,1 +np.float64,0x800ef419685de833,0x800ef419685de833,1 +np.float64,0x3fe0efa61a21df4c,0x3fe1bce98baf2f0f,1 +np.float64,0x3fcc13c4d738278a,0x3fcc4d8c778bcaf7,1 +np.float64,0x800f1d291afe3a52,0x800f1d291afe3a52,1 +np.float64,0x3fd3f10e6da7e21d,0x3fd444106761ea1d,1 +np.float64,0x800706d6d76e0dae,0x800706d6d76e0dae,1 +np.float64,0xffa1ffbc9023ff80,0xfff0000000000000,1 +np.float64,0xbfe098f26d6131e5,0xbfe15a08a5f3eac0,1 +np.float64,0x3fe984f9cc7309f4,0x3fec4fcdbdb1cb9b,1 +np.float64,0x7fd7c2f1eaaf85e3,0x7ff0000000000000,1 +np.float64,0x800a8adb64f515b7,0x800a8adb64f515b7,1 +np.float64,0x80060d3ffc8c1a81,0x80060d3ffc8c1a81,1 +np.float64,0xbfec37e4aef86fc9,0xbff0029a6a1d61e2,1 +np.float64,0x800b21bcfcf6437a,0x800b21bcfcf6437a,1 +np.float64,0xbfc08facc1211f58,0xbfc09b8380ea8032,1 +np.float64,0xffebb4b52577696a,0xfff0000000000000,1 +np.float64,0x800b08096df61013,0x800b08096df61013,1 +np.float64,0x8000000000000000,0x8000000000000000,1 +np.float64,0xffd2f0c9c8a5e194,0xfff0000000000000,1 +np.float64,0xffe78b2299af1644,0xfff0000000000000,1 +np.float64,0x7fd0444794a0888e,0x7ff0000000000000,1 +np.float64,0x307c47b460f8a,0x307c47b460f8a,1 +np.float64,0xffe6b4c851ad6990,0xfff0000000000000,1 +np.float64,0xffe1877224a30ee4,0xfff0000000000000,1 +np.float64,0x48d7b5c091af7,0x48d7b5c091af7,1 +np.float64,0xbfa1dc6b1c23b8d0,0xbfa1dd5889e1b7da,1 +np.float64,0x3fe5004737ea008e,0x3fe68a9c310b08c1,1 +np.float64,0x7fec5f0742b8be0e,0x7ff0000000000000,1 +np.float64,0x3fd0a86285a150c5,0x3fd0d8b238d557fa,1 +np.float64,0x7fed60380efac06f,0x7ff0000000000000,1 +np.float64,0xeeca74dfdd94f,0xeeca74dfdd94f,1 +np.float64,0x3fda05aaa8b40b54,0x3fdabebdbf405e84,1 +np.float64,0x800e530ceb1ca61a,0x800e530ceb1ca61a,1 +np.float64,0x800b3866379670cd,0x800b3866379670cd,1 +np.float64,0xffedb3e7fa3b67cf,0xfff0000000000000,1 +np.float64,0xffdfa4c0713f4980,0xfff0000000000000,1 +np.float64,0x7fe4679e0728cf3b,0x7ff0000000000000,1 +np.float64,0xffe978611ef2f0c2,0xfff0000000000000,1 +np.float64,0x7fc9f4601f33e8bf,0x7ff0000000000000,1 +np.float64,0x3fd4942de6a9285c,0x3fd4ef6e089357dd,1 +np.float64,0x3faafe064435fc00,0x3fab0139cd6564dc,1 +np.float64,0x800d145a519a28b5,0x800d145a519a28b5,1 +np.float64,0xbfd82636f2304c6e,0xbfd8b9f75ddd2f02,1 +np.float64,0xbfdf2e975e3e5d2e,0xbfe037174280788c,1 +np.float64,0x7fd7051d7c2e0a3a,0x7ff0000000000000,1 +np.float64,0x8007933d452f267b,0x8007933d452f267b,1 +np.float64,0xb2043beb64088,0xb2043beb64088,1 +np.float64,0x3febfd9708f7fb2e,0x3fefb2ef090f18d2,1 +np.float64,0xffd9bc6bc83378d8,0xfff0000000000000,1 +np.float64,0xc10f9fd3821f4,0xc10f9fd3821f4,1 +np.float64,0x3fe3c83413a79068,0x3fe510fa1dd8edf7,1 +np.float64,0x3fbe26ccda3c4da0,0x3fbe38a892279975,1 +np.float64,0x3fcc1873103830e6,0x3fcc5257a6ae168d,1 +np.float64,0xe7e000e9cfc00,0xe7e000e9cfc00,1 +np.float64,0xffda73852bb4e70a,0xfff0000000000000,1 +np.float64,0xbfe831be19f0637c,0xbfea90f1b34da3e5,1 +np.float64,0xbfeb568f3076ad1e,0xbfeec97eebfde862,1 +np.float64,0x510a6ad0a214e,0x510a6ad0a214e,1 +np.float64,0x3fe6ba7e35ed74fc,0x3fe8b032a9a28c6a,1 +np.float64,0xffeb5cdcff76b9b9,0xfff0000000000000,1 +np.float64,0x4f0a23e89e145,0x4f0a23e89e145,1 +np.float64,0x446ec20288dd9,0x446ec20288dd9,1 +np.float64,0x7fe2521b02e4a435,0x7ff0000000000000,1 +np.float64,0x8001cd2969e39a54,0x8001cd2969e39a54,1 +np.float64,0x3fdfe90600bfd20c,0x3fe09fdcca10001c,1 +np.float64,0x7fd660c5762cc18a,0x7ff0000000000000,1 +np.float64,0xbfb11b23aa223648,0xbfb11e661949b377,1 +np.float64,0x800e025285fc04a5,0x800e025285fc04a5,1 +np.float64,0xffb180bb18230178,0xfff0000000000000,1 +np.float64,0xaaf590df55eb2,0xaaf590df55eb2,1 +np.float64,0xbfe8637d9df0c6fb,0xbfead1ba429462ec,1 +np.float64,0x7fd2577866a4aef0,0x7ff0000000000000,1 +np.float64,0xbfcfb2ab5a3f6558,0xbfd002ee87f272b9,1 +np.float64,0x7fdd64ae2f3ac95b,0x7ff0000000000000,1 +np.float64,0xffd1a502c9234a06,0xfff0000000000000,1 +np.float64,0x7fc4be4b60297c96,0x7ff0000000000000,1 +np.float64,0xbfb46b712a28d6e0,0xbfb470fca9919172,1 +np.float64,0xffdef913033df226,0xfff0000000000000,1 +np.float64,0x3fd94a3545b2946b,0x3fd9f40431ce9f9c,1 +np.float64,0x7fef88a0b6ff1140,0x7ff0000000000000,1 +np.float64,0xbfbcc81876399030,0xbfbcd7a0ab6cb388,1 +np.float64,0x800a4acfdd9495a0,0x800a4acfdd9495a0,1 +np.float64,0xffe270b3d5e4e167,0xfff0000000000000,1 +np.float64,0xbfd23f601e247ec0,0xbfd27eeca50a49eb,1 +np.float64,0x7fec6e796a78dcf2,0x7ff0000000000000,1 +np.float64,0x3fb85e0c9630bc19,0x3fb867791ccd6c72,1 +np.float64,0x7fe49fc424a93f87,0x7ff0000000000000,1 +np.float64,0xbfe75a99fbaeb534,0xbfe97ba37663de4c,1 +np.float64,0xffe85011b630a023,0xfff0000000000000,1 +np.float64,0xffe5962e492b2c5c,0xfff0000000000000,1 +np.float64,0x6f36ed4cde6de,0x6f36ed4cde6de,1 +np.float64,0x3feb72170af6e42e,0x3feeefbe6f1a2084,1 +np.float64,0x80014d8d60629b1c,0x80014d8d60629b1c,1 +np.float64,0xbfe0eb40d321d682,0xbfe1b7e31f252bf1,1 +np.float64,0x31fe305663fc7,0x31fe305663fc7,1 +np.float64,0x3fd2cd6381a59ac7,0x3fd312edc9868a4d,1 +np.float64,0xffcf0720793e0e40,0xfff0000000000000,1 +np.float64,0xbfeef1ef133de3de,0xbff1ffd5e1a3b648,1 +np.float64,0xbfd01c787aa038f0,0xbfd0482be3158a01,1 +np.float64,0x3fda3607c5b46c10,0x3fdaf3301e217301,1 +np.float64,0xffda9a9911b53532,0xfff0000000000000,1 +np.float64,0x3fc0b37c392166f8,0x3fc0bfa076f3c43e,1 +np.float64,0xbfe06591c760cb24,0xbfe11fad179ea12c,1 +np.float64,0x8006e369c20dc6d4,0x8006e369c20dc6d4,1 +np.float64,0x3fdf2912a8be5224,0x3fe033ff74b92f4d,1 +np.float64,0xffc0feb07821fd60,0xfff0000000000000,1 +np.float64,0xa4b938c949727,0xa4b938c949727,1 +np.float64,0x8008fe676571fccf,0x8008fe676571fccf,1 +np.float64,0xbfdda68459bb4d08,0xbfdeb8faab34fcbc,1 +np.float64,0xbfda18b419343168,0xbfdad360ca52ec7c,1 +np.float64,0x3febcbae35b7975c,0x3fef6cd51c9ebc15,1 +np.float64,0x3fbec615f63d8c30,0x3fbed912ba729926,1 +np.float64,0x7f99a831c8335063,0x7ff0000000000000,1 +np.float64,0x3fe663e8826cc7d1,0x3fe84330bd9aada8,1 +np.float64,0x70a9f9e6e1540,0x70a9f9e6e1540,1 +np.float64,0x8a13a5db14275,0x8a13a5db14275,1 +np.float64,0x7fc4330a3b286613,0x7ff0000000000000,1 +np.float64,0xbfe580c6136b018c,0xbfe728806cc7a99a,1 +np.float64,0x8000000000000001,0x8000000000000001,1 +np.float64,0xffec079d5df80f3a,0xfff0000000000000,1 +np.float64,0x8e1173c31c22f,0x8e1173c31c22f,1 +np.float64,0x3fe088456d21108b,0x3fe14712ca414103,1 +np.float64,0x3fe1b76f73636edf,0x3fe2a2b658557112,1 +np.float64,0xbfd4a1dd162943ba,0xbfd4fdd45cae8fb8,1 +np.float64,0x7fd60b46c8ac168d,0x7ff0000000000000,1 +np.float64,0xffe36cc3b166d987,0xfff0000000000000,1 +np.float64,0x3fdc2ae0cfb855c0,0x3fdd15f026773151,1 +np.float64,0xbfc41aa203283544,0xbfc42fd1b145fdd5,1 +np.float64,0xffed90c55fbb218a,0xfff0000000000000,1 +np.float64,0x3fe67e3a9aecfc75,0x3fe86440db65b4f6,1 +np.float64,0x7fd12dbeaba25b7c,0x7ff0000000000000,1 +np.float64,0xbfe1267c0de24cf8,0xbfe1fbb611bdf1e9,1 +np.float64,0x22e5619645cad,0x22e5619645cad,1 +np.float64,0x7fe327c72ea64f8d,0x7ff0000000000000,1 +np.float64,0x7fd2c3f545a587ea,0x7ff0000000000000,1 +np.float64,0x7fc7b689372f6d11,0x7ff0000000000000,1 +np.float64,0xc5e140bd8bc28,0xc5e140bd8bc28,1 +np.float64,0x3fccb3627a3966c5,0x3fccf11b44fa4102,1 +np.float64,0xbfd2cf725c259ee4,0xbfd315138d0e5dca,1 +np.float64,0x10000000000000,0x10000000000000,1 +np.float64,0xbfd3dfa8b627bf52,0xbfd431d17b235477,1 +np.float64,0xbfb82124e6304248,0xbfb82a4b6d9c2663,1 +np.float64,0x3fdcd590d9b9ab22,0x3fddd1d548806347,1 +np.float64,0x7fdee0cd1b3dc199,0x7ff0000000000000,1 +np.float64,0x8004ebfc60a9d7fa,0x8004ebfc60a9d7fa,1 +np.float64,0x3fe8eb818b71d704,0x3feb842679806108,1 +np.float64,0xffdd5e8fe63abd20,0xfff0000000000000,1 +np.float64,0xbfe3efcbd9e7df98,0xbfe54071436645ee,1 +np.float64,0x3fd5102557aa204b,0x3fd57203d31a05b8,1 +np.float64,0x3fe6318af7ec6316,0x3fe8041a177cbf96,1 +np.float64,0x3fdf3cecdabe79da,0x3fe03f2084ffbc78,1 +np.float64,0x7fe0ab6673a156cc,0x7ff0000000000000,1 +np.float64,0x800037d5c6c06fac,0x800037d5c6c06fac,1 +np.float64,0xffce58b86a3cb170,0xfff0000000000000,1 +np.float64,0xbfe3455d6ce68abb,0xbfe475034cecb2b8,1 +np.float64,0x991b663d3236d,0x991b663d3236d,1 +np.float64,0x3fda82d37c3505a7,0x3fdb46973da05c12,1 +np.float64,0x3f9b736fa036e6df,0x3f9b74471c234411,1 +np.float64,0x8001c96525e392cb,0x8001c96525e392cb,1 +np.float64,0x7ff0000000000000,0x7ff0000000000000,1 +np.float64,0xbfaf59122c3eb220,0xbfaf5e15f8b272b0,1 +np.float64,0xbf9aa7d288354fa0,0xbf9aa897d2a40cb5,1 +np.float64,0x8004a43428694869,0x8004a43428694869,1 +np.float64,0x7feead476dbd5a8e,0x7ff0000000000000,1 +np.float64,0xffca150f81342a20,0xfff0000000000000,1 +np.float64,0x80047ec3bc88fd88,0x80047ec3bc88fd88,1 +np.float64,0xbfee3e5b123c7cb6,0xbff179c8b8334278,1 +np.float64,0x3fd172359f22e46b,0x3fd1a9ba6b1420a1,1 +np.float64,0x3fe8e5e242f1cbc5,0x3feb7cbcaefc4d5c,1 +np.float64,0x8007fb059a6ff60c,0x8007fb059a6ff60c,1 +np.float64,0xe3899e71c7134,0xe3899e71c7134,1 +np.float64,0x7fe3b98326a77305,0x7ff0000000000000,1 +np.float64,0x7fec4e206cb89c40,0x7ff0000000000000,1 +np.float64,0xbfa3b012c4276020,0xbfa3b150c13b3cc5,1 +np.float64,0xffefffffffffffff,0xfff0000000000000,1 +np.float64,0xffe28a5b9aa514b6,0xfff0000000000000,1 +np.float64,0xbfd76a6cc2aed4da,0xbfd7f10f4d04e7f6,1 +np.float64,0xbc2b1c0178564,0xbc2b1c0178564,1 +np.float64,0x6d9d444adb3a9,0x6d9d444adb3a9,1 +np.float64,0xbfdcadd368395ba6,0xbfdda6037b5c429c,1 +np.float64,0x3fe11891fde23124,0x3fe1ebc1c204b14b,1 +np.float64,0x3fdd66c3eebacd88,0x3fde72526b5304c4,1 +np.float64,0xbfe79d85612f3b0b,0xbfe9d1673bd1f6d6,1 +np.float64,0x3fed60abdabac158,0x3ff0d7426b3800a2,1 +np.float64,0xbfb0ffa54021ff48,0xbfb102d81073a9f0,1 +np.float64,0xd2452af5a48a6,0xd2452af5a48a6,1 +np.float64,0xf4b835c1e971,0xf4b835c1e971,1 +np.float64,0x7e269cdafc4d4,0x7e269cdafc4d4,1 +np.float64,0x800097a21d812f45,0x800097a21d812f45,1 +np.float64,0x3fdfcc85e8bf990c,0x3fe08fcf770fd456,1 +np.float64,0xd8d53155b1aa6,0xd8d53155b1aa6,1 +np.float64,0x7fb8ed658831daca,0x7ff0000000000000,1 +np.float64,0xbfec865415b90ca8,0xbff03a4584d719f9,1 +np.float64,0xffd8cda62a319b4c,0xfff0000000000000,1 +np.float64,0x273598d84e6b4,0x273598d84e6b4,1 +np.float64,0x7fd566b5c32acd6b,0x7ff0000000000000,1 +np.float64,0xff61d9d48023b400,0xfff0000000000000,1 +np.float64,0xbfec5c3bf4f8b878,0xbff01c594243337c,1 +np.float64,0x7fd1be0561a37c0a,0x7ff0000000000000,1 +np.float64,0xffeaee3271b5dc64,0xfff0000000000000,1 +np.float64,0x800c0e1931b81c33,0x800c0e1931b81c33,1 +np.float64,0xbfad1171583a22e0,0xbfad1570e5c466d2,1 +np.float64,0x7fd783b0fe2f0761,0x7ff0000000000000,1 +np.float64,0x7fc39903e6273207,0x7ff0000000000000,1 +np.float64,0xffe00003c5600007,0xfff0000000000000,1 +np.float64,0x35a7b9c06b50,0x35a7b9c06b50,1 +np.float64,0x7fee441a22bc8833,0x7ff0000000000000,1 +np.float64,0xff6e47fbc03c9000,0xfff0000000000000,1 +np.float64,0xbfd3c3c9c8a78794,0xbfd41499b1912534,1 +np.float64,0x82c9c87f05939,0x82c9c87f05939,1 +np.float64,0xbfedeb0fe4fbd620,0xbff13c573ce9d3d0,1 +np.float64,0x2b79298656f26,0x2b79298656f26,1 +np.float64,0xbf5ee44f003dc800,0xbf5ee4503353c0ba,1 +np.float64,0xbfe1dd264e63ba4c,0xbfe2ce68116c7bf6,1 +np.float64,0x3fece10b7579c217,0x3ff07b21b11799c6,1 +np.float64,0x3fba47143a348e28,0x3fba52e601adf24c,1 +np.float64,0xffe9816e7a7302dc,0xfff0000000000000,1 +np.float64,0x8009a8047fd35009,0x8009a8047fd35009,1 +np.float64,0x800ac28e4e95851d,0x800ac28e4e95851d,1 +np.float64,0x80093facf4f27f5a,0x80093facf4f27f5a,1 +np.float64,0x3ff0000000000000,0x3ff2cd9fc44eb982,1 +np.float64,0x3fe76a9857eed530,0x3fe99018a5895a4f,1 +np.float64,0xbfd13c59a3a278b4,0xbfd171e133df0b16,1 +np.float64,0x7feb43bc83368778,0x7ff0000000000000,1 +np.float64,0xbfe2970c5fa52e18,0xbfe3a74a434c6efe,1 +np.float64,0xffd091c380212388,0xfff0000000000000,1 +np.float64,0x3febb3b9d2f76774,0x3fef4b4af2bd8580,1 +np.float64,0x7fec66787ef8ccf0,0x7ff0000000000000,1 +np.float64,0xbf935e185826bc40,0xbf935e640557a354,1 +np.float64,0x979df1552f3be,0x979df1552f3be,1 +np.float64,0x7fc096ee73212ddc,0x7ff0000000000000,1 +np.float64,0xbfe9de88faf3bd12,0xbfecc7d1ae691d1b,1 +np.float64,0x7fdc733f06b8e67d,0x7ff0000000000000,1 +np.float64,0xffd71be1a0ae37c4,0xfff0000000000000,1 +np.float64,0xb50dabd36a1b6,0xb50dabd36a1b6,1 +np.float64,0x7fce3d94d63c7b29,0x7ff0000000000000,1 +np.float64,0x7fbaf95e4435f2bc,0x7ff0000000000000,1 +np.float64,0x81a32a6f03466,0x81a32a6f03466,1 +np.float64,0xa99b5b4d5336c,0xa99b5b4d5336c,1 +np.float64,0x7f97c1eeb82f83dc,0x7ff0000000000000,1 +np.float64,0x3fe761636d6ec2c6,0x3fe98451160d2ffb,1 +np.float64,0xbfe3224ef5e6449e,0xbfe44b73eeadac52,1 +np.float64,0x7fde6feb0dbcdfd5,0x7ff0000000000000,1 +np.float64,0xbfee87f9ca7d0ff4,0xbff1b079e9d7f706,1 +np.float64,0x3fe46f4c9828de99,0x3fe5da2ab9609ea5,1 +np.float64,0xffb92fe882325fd0,0xfff0000000000000,1 +np.float64,0x80054bc63cea978d,0x80054bc63cea978d,1 +np.float64,0x3d988bea7b312,0x3d988bea7b312,1 +np.float64,0x3fe6468e1d6c8d1c,0x3fe81e64d37d39a8,1 +np.float64,0x3fd68eefc22d1de0,0x3fd7074264faeead,1 +np.float64,0xffb218a074243140,0xfff0000000000000,1 +np.float64,0x3fdbcb3b6cb79678,0x3fdcad011de40b7d,1 +np.float64,0x7fe3c161772782c2,0x7ff0000000000000,1 +np.float64,0x25575c904aaec,0x25575c904aaec,1 +np.float64,0x800fa43a8f5f4875,0x800fa43a8f5f4875,1 +np.float64,0x3fe41fc9e1e83f94,0x3fe57a25dd1a37f1,1 +np.float64,0x3fd895f4a7b12be9,0x3fd931e7b721a08a,1 +np.float64,0xce31469f9c629,0xce31469f9c629,1 +np.float64,0xffea0f55ca341eab,0xfff0000000000000,1 +np.float64,0xffe831c9ba306393,0xfff0000000000000,1 +np.float64,0x7fe2056f03a40add,0x7ff0000000000000,1 +np.float64,0x7fd6b075e02d60eb,0x7ff0000000000000,1 +np.float64,0x3fdfbef4273f7de8,0x3fe0882c1f59efc0,1 +np.float64,0x8005b9e094ab73c2,0x8005b9e094ab73c2,1 +np.float64,0x3fea881ac6351036,0x3fedad7a319b887c,1 +np.float64,0xbfe2c61c7ee58c39,0xbfe3de9a99d8a9c6,1 +np.float64,0x30b0d3786161b,0x30b0d3786161b,1 +np.float64,0x3fa51d56a02a3aad,0x3fa51edee2d2ecef,1 +np.float64,0x79745732f2e8c,0x79745732f2e8c,1 +np.float64,0x800d55b4907aab69,0x800d55b4907aab69,1 +np.float64,0xbfbe8fcf0a3d1fa0,0xbfbea267fbb5bfdf,1 +np.float64,0xbfd04e2756a09c4e,0xbfd07b74d079f9a2,1 +np.float64,0x3fc65170552ca2e1,0x3fc66e6eb00c82ed,1 +np.float64,0xbfb0674b8020ce98,0xbfb06a2b4771b64c,1 +np.float64,0x2059975840b34,0x2059975840b34,1 +np.float64,0x33d1385467a28,0x33d1385467a28,1 +np.float64,0x3fea41b74ff4836f,0x3fed4dc1a09e53cc,1 +np.float64,0xbfe8e08c9d71c119,0xbfeb75b4c59a6bec,1 +np.float64,0x7fdbbf14d6377e29,0x7ff0000000000000,1 +np.float64,0x3fcd8b71513b16e0,0x3fcdcec80174f9ad,1 +np.float64,0x5c50bc94b8a18,0x5c50bc94b8a18,1 +np.float64,0x969a18f52d343,0x969a18f52d343,1 +np.float64,0x3fd7ae44462f5c89,0x3fd8398bc34e395c,1 +np.float64,0xffdd0f8617ba1f0c,0xfff0000000000000,1 +np.float64,0xfff0000000000000,0xfff0000000000000,1 +np.float64,0xbfe2f9badb65f376,0xbfe41b771320ece8,1 +np.float64,0x3fd140bc7fa29,0x3fd140bc7fa29,1 +np.float64,0xbfe14523b5628a48,0xbfe21ee850972043,1 +np.float64,0x3feedd0336bdba06,0x3ff1f01afc1f3a06,1 +np.float64,0x800de423ad7bc848,0x800de423ad7bc848,1 +np.float64,0x4cef857c99df1,0x4cef857c99df1,1 +np.float64,0xbfea55e0e374abc2,0xbfed691e41d648dd,1 +np.float64,0x3fe70d7a18ae1af4,0x3fe91955a34d8094,1 +np.float64,0xbfc62fc3032c5f88,0xbfc64c3ec25decb8,1 +np.float64,0x3fc915abb5322b58,0x3fc93edac5cc73fe,1 +np.float64,0x69aaff66d3561,0x69aaff66d3561,1 +np.float64,0x5c6a90f2b8d53,0x5c6a90f2b8d53,1 +np.float64,0x3fefe30dc1bfc61c,0x3ff2b752257bdacd,1 +np.float64,0x3fef15db15fe2bb6,0x3ff21aea05601396,1 +np.float64,0xbfe353e5ac66a7cc,0xbfe48644e6553d1a,1 +np.float64,0x3fe6d30cffada61a,0x3fe8cf3e4c61ddac,1 +np.float64,0x7fb7857eb62f0afc,0x7ff0000000000000,1 +np.float64,0xbfdd9b53d23b36a8,0xbfdeac91a7af1340,1 +np.float64,0x3fd1456357228ac7,0x3fd17b3f7d39b27a,1 +np.float64,0x3fb57d10ae2afa21,0x3fb5838702b806f4,1 +np.float64,0x800c59c96c98b393,0x800c59c96c98b393,1 +np.float64,0x7fc1f2413823e481,0x7ff0000000000000,1 +np.float64,0xbfa3983624273070,0xbfa3996fa26c419a,1 +np.float64,0x7fb28874ae2510e8,0x7ff0000000000000,1 +np.float64,0x3fe826d02a304da0,0x3fea82bec50bc0b6,1 +np.float64,0x8008d6f0d3d1ade2,0x8008d6f0d3d1ade2,1 +np.float64,0xffe7c970ca2f92e1,0xfff0000000000000,1 +np.float64,0x7fcf42bcaa3e8578,0x7ff0000000000000,1 +np.float64,0x7fda1ab517343569,0x7ff0000000000000,1 +np.float64,0xbfe7926a65ef24d5,0xbfe9c323dd890d5b,1 +np.float64,0xbfcaf6282d35ec50,0xbfcb294f36a0a33d,1 +np.float64,0x800ca49df8d9493c,0x800ca49df8d9493c,1 +np.float64,0xffea18d26af431a4,0xfff0000000000000,1 +np.float64,0x3fb72f276e2e5e50,0x3fb7374539fd1221,1 +np.float64,0xffa6b613842d6c20,0xfff0000000000000,1 +np.float64,0xbfeb3c7263f678e5,0xbfeea54cdb60b54c,1 +np.float64,0x3fc976d2ba32eda5,0x3fc9a1e83a058de4,1 +np.float64,0xbfe4acd4b0e959aa,0xbfe624d5d4f9b9a6,1 +np.float64,0x7fca410a0f348213,0x7ff0000000000000,1 +np.float64,0xbfde368f77bc6d1e,0xbfdf5910c8c8bcb0,1 +np.float64,0xbfed7412937ae825,0xbff0e55afc428453,1 +np.float64,0xffef6b7b607ed6f6,0xfff0000000000000,1 +np.float64,0xbfb936f17e326de0,0xbfb941629a53c694,1 +np.float64,0x800dbb0c469b7619,0x800dbb0c469b7619,1 +np.float64,0x800f68b0581ed161,0x800f68b0581ed161,1 +np.float64,0x3fe25b2aad64b656,0x3fe361266fa9c5eb,1 +np.float64,0xbfb87e445a30fc88,0xbfb887d676910c3f,1 +np.float64,0x6e6ba9b6dcd76,0x6e6ba9b6dcd76,1 +np.float64,0x3fad27ce583a4f9d,0x3fad2bd72782ffdb,1 +np.float64,0xbfec0bc5d638178c,0xbfefc6e8c8f9095f,1 +np.float64,0x7fcba4a296374944,0x7ff0000000000000,1 +np.float64,0x8004ca237cc99448,0x8004ca237cc99448,1 +np.float64,0xffe85b8c3270b718,0xfff0000000000000,1 +np.float64,0x7fe7ee3eddafdc7d,0x7ff0000000000000,1 +np.float64,0xffd275967ca4eb2c,0xfff0000000000000,1 +np.float64,0xbfa95bc3a032b780,0xbfa95e6b288ecf43,1 +np.float64,0x3fc9e3214b33c643,0x3fca10667e7e7ff4,1 +np.float64,0x8001b89c5d837139,0x8001b89c5d837139,1 +np.float64,0xbf8807dfc0300fc0,0xbf880803e3badfbd,1 +np.float64,0x800aca94b895952a,0x800aca94b895952a,1 +np.float64,0x7fd79534a02f2a68,0x7ff0000000000000,1 +np.float64,0x3fe1b81179e37023,0x3fe2a371d8cc26f0,1 +np.float64,0x800699539d6d32a8,0x800699539d6d32a8,1 +np.float64,0xffe51dfbb3aa3bf7,0xfff0000000000000,1 +np.float64,0xbfdfb775abbf6eec,0xbfe083f48be2f98f,1 +np.float64,0x3fe87979d7b0f2f4,0x3feaee701d959079,1 +np.float64,0x3fd8e4e6a731c9cd,0x3fd986d29f25f982,1 +np.float64,0x3fe3dadaaf67b5b6,0x3fe527520fb02920,1 +np.float64,0x8003c2262bc7844d,0x8003c2262bc7844d,1 +np.float64,0x800c930add392616,0x800c930add392616,1 +np.float64,0xffb7a152a22f42a8,0xfff0000000000000,1 +np.float64,0x80028fe03dc51fc1,0x80028fe03dc51fc1,1 +np.float64,0xffe32ae60c6655cc,0xfff0000000000000,1 +np.float64,0x3fea3527e4746a50,0x3fed3cbbf47f18eb,1 +np.float64,0x800a53059e14a60c,0x800a53059e14a60c,1 +np.float64,0xbfd79e3b202f3c76,0xbfd828672381207b,1 +np.float64,0xffeed7e2eb7dafc5,0xfff0000000000000,1 +np.float64,0x3fec51ed6778a3db,0x3ff01509e34df61d,1 +np.float64,0xbfd84bc577b0978a,0xbfd8e23ec55e42e8,1 +np.float64,0x2483aff849077,0x2483aff849077,1 +np.float64,0x6f57883adeaf2,0x6f57883adeaf2,1 +np.float64,0xffd3fd74d927faea,0xfff0000000000000,1 +np.float64,0x7fca49ec773493d8,0x7ff0000000000000,1 +np.float64,0x7fd08fe2e8211fc5,0x7ff0000000000000,1 +np.float64,0x800852086db0a411,0x800852086db0a411,1 +np.float64,0x3fe5b1f2c9eb63e6,0x3fe7654f511bafc6,1 +np.float64,0xbfe01e2a58e03c54,0xbfe0cedb68f021e6,1 +np.float64,0x800988421d331085,0x800988421d331085,1 +np.float64,0xffd5038b18aa0716,0xfff0000000000000,1 +np.float64,0x8002c9264c85924d,0x8002c9264c85924d,1 +np.float64,0x3fd21ca302243946,0x3fd25ac653a71aab,1 +np.float64,0xbfea60d6e6f4c1ae,0xbfed78031d9dfa2b,1 +np.float64,0xffef97b6263f2f6b,0xfff0000000000000,1 +np.float64,0xbfd524732faa48e6,0xbfd5876ecc415dcc,1 +np.float64,0x660387e8cc072,0x660387e8cc072,1 +np.float64,0x7fcfc108a33f8210,0x7ff0000000000000,1 +np.float64,0x7febe5b0f877cb61,0x7ff0000000000000,1 +np.float64,0xbfa55fdfac2abfc0,0xbfa56176991851a8,1 +np.float64,0x25250f4c4a4a3,0x25250f4c4a4a3,1 +np.float64,0xffe2f6a2f2a5ed46,0xfff0000000000000,1 +np.float64,0x7fa754fcc02ea9f9,0x7ff0000000000000,1 +np.float64,0x3febd19dea37a33c,0x3fef75279f75d3b8,1 +np.float64,0xc5ed55218bdab,0xc5ed55218bdab,1 +np.float64,0x3fe72ff6b3ee5fed,0x3fe945388b979882,1 +np.float64,0xbfe16b854e22d70a,0xbfe24b10fc0dff14,1 +np.float64,0xffb22cbe10245980,0xfff0000000000000,1 +np.float64,0xa54246b54a849,0xa54246b54a849,1 +np.float64,0x3fe7f4cda76fe99c,0x3fea41edc74888b6,1 +np.float64,0x1,0x1,1 +np.float64,0x800d84acce9b095a,0x800d84acce9b095a,1 +np.float64,0xb0eef04761dde,0xb0eef04761dde,1 +np.float64,0x7ff4000000000000,0x7ffc000000000000,1 +np.float64,0xffecaf1dbb795e3b,0xfff0000000000000,1 +np.float64,0x90dbab8d21b76,0x90dbab8d21b76,1 +np.float64,0x3fe79584a9ef2b09,0x3fe9c71fa9e40eb5,1 diff --git a/local_packages/numpy/_core/tests/data/umath-validation-set-tan.csv b/local_packages/numpy/_core/tests/data/umath-validation-set-tan.csv new file mode 100644 index 0000000..ac97624 --- /dev/null +++ b/local_packages/numpy/_core/tests/data/umath-validation-set-tan.csv @@ -0,0 +1,1429 @@ +dtype,input,output,ulperrortol +np.float32,0xfd97ece0,0xc11186e9,4 +np.float32,0x8013bb34,0x8013bb34,4 +np.float32,0x316389,0x316389,4 +np.float32,0x7f7fffff,0xbf1c9eca,4 +np.float32,0x3f7674bb,0x3fb7e450,4 +np.float32,0x80800000,0x80800000,4 +np.float32,0x7f5995e8,0xbf94106c,4 +np.float32,0x74527,0x74527,4 +np.float32,0x7f08caea,0xbeceddb6,4 +np.float32,0x2d49b2,0x2d49b2,4 +np.float32,0x3f74e5e4,0x3fb58695,4 +np.float32,0x3f3fcd51,0x3f6e1e81,4 +np.float32,0xbf4f3608,0xbf864d3d,4 +np.float32,0xbed974a0,0xbee78c70,4 +np.float32,0xff5f483c,0x3ecf3cb2,4 +np.float32,0x7f4532f4,0xc0b96f7b,4 +np.float32,0x3f0a4f7c,0x3f198cc0,4 +np.float32,0x210193,0x210193,4 +np.float32,0xfeebad7a,0xbf92eba8,4 +np.float32,0xfed29f74,0xc134cab6,4 +np.float32,0x803433a0,0x803433a0,4 +np.float32,0x64eb46,0x64eb46,4 +np.float32,0xbf54ef22,0xbf8c757b,4 +np.float32,0x3f3d5fdd,0x3f69a17b,4 +np.float32,0x80000001,0x80000001,4 +np.float32,0x800a837a,0x800a837a,4 +np.float32,0x6ff0be,0x6ff0be,4 +np.float32,0xfe8f1186,0x3f518820,4 +np.float32,0x804963e5,0x804963e5,4 +np.float32,0xfebaa59a,0x3fa1dbb0,4 +np.float32,0x637970,0x637970,4 +np.float32,0x3e722a6b,0x3e76c89a,4 +np.float32,0xff2b0478,0xbddccb5f,4 +np.float32,0xbf7bd85b,0xbfc06821,4 +np.float32,0x3ec33600,0x3ecd4126,4 +np.float32,0x3e0a43b9,0x3e0b1c69,4 +np.float32,0x7f7511b6,0xbe427083,4 +np.float32,0x3f28c114,0x3f465a73,4 +np.float32,0x3f179e1c,0x3f2c3e7c,4 +np.float32,0x7b2963,0x7b2963,4 +np.float32,0x3f423d06,0x3f72b442,4 +np.float32,0x3f5a24c6,0x3f925508,4 +np.float32,0xff18c834,0xbf79b5c8,4 +np.float32,0x3f401ece,0x3f6eb6ac,4 +np.float32,0x7b8a3013,0xbffab968,4 +np.float32,0x80091ff0,0x80091ff0,4 +np.float32,0x3f389c51,0x3f610b47,4 +np.float32,0x5ea174,0x5ea174,4 +np.float32,0x807a9eb2,0x807a9eb2,4 +np.float32,0x806ce61e,0x806ce61e,4 +np.float32,0xbe956acc,0xbe99cefc,4 +np.float32,0x7e60e247,0xbf5e64a5,4 +np.float32,0x7f398e24,0x404d12ed,4 +np.float32,0x3d9049f8,0x3d908735,4 +np.float32,0x7db17ffc,0xbf5b3d87,4 +np.float32,0xff453f78,0xc0239c9f,4 +np.float32,0x3f024aac,0x3f0ed802,4 +np.float32,0xbe781c30,0xbe7d1508,4 +np.float32,0x3f77962a,0x3fb9a28e,4 +np.float32,0xff7fffff,0x3f1c9eca,4 +np.float32,0x3f7152e3,0x3fb03f9d,4 +np.float32,0xff7cb167,0x3f9ce831,4 +np.float32,0x3e763e30,0x3e7b1a10,4 +np.float32,0xbf126527,0xbf24c253,4 +np.float32,0x803f6660,0x803f6660,4 +np.float32,0xbf79de38,0xbfbd38b1,4 +np.float32,0x8046c2f0,0x8046c2f0,4 +np.float32,0x6dc74e,0x6dc74e,4 +np.float32,0xbec9c45e,0xbed4e768,4 +np.float32,0x3f0eedb6,0x3f1fe610,4 +np.float32,0x7e031999,0xbcc13026,4 +np.float32,0x7efc2fd7,0x41e4b284,4 +np.float32,0xbeab7454,0xbeb22a1b,4 +np.float32,0x805ee67b,0x805ee67b,4 +np.float32,0x7f76e58e,0xc2436659,4 +np.float32,0xbe62b024,0xbe667718,4 +np.float32,0x3eea0808,0x3efbd182,4 +np.float32,0xbf7fd00c,0xbfc70719,4 +np.float32,0x7f27b640,0xbf0d97e0,4 +np.float32,0x3f1b58a4,0x3f31b6f4,4 +np.float32,0x252a9f,0x252a9f,4 +np.float32,0x7f65f95a,0xbead5de3,4 +np.float32,0xfc6ea780,0x42d15801,4 +np.float32,0x7eac4c52,0xc0682424,4 +np.float32,0xbe8a3f5a,0xbe8db54d,4 +np.float32,0xbf1644e2,0xbf2a4abd,4 +np.float32,0x3fc96a,0x3fc96a,4 +np.float32,0x7f38c0e4,0x3cc04af8,4 +np.float32,0x3f623d75,0x3f9c065d,4 +np.float32,0x3ee6a51a,0x3ef7a058,4 +np.float32,0x3dd11020,0x3dd1cacf,4 +np.float32,0xb6918,0xb6918,4 +np.float32,0xfdd7a540,0x3f22f081,4 +np.float32,0x80798563,0x80798563,4 +np.float32,0x3e9a8b7a,0x3e9f6a7e,4 +np.float32,0xbea515d4,0xbeab0df5,4 +np.float32,0xbea9b9f4,0xbeb03abe,4 +np.float32,0xbf11a5fa,0xbf23b478,4 +np.float32,0xfd6cadf0,0xbfa2a878,4 +np.float32,0xbf6edd07,0xbfacbb78,4 +np.float32,0xff5c5328,0x3e2d1552,4 +np.float32,0xbea2f788,0xbea8b3f5,4 +np.float32,0x802efaeb,0x802efaeb,4 +np.float32,0xff1c85e5,0x41f8560e,4 +np.float32,0x3f53b123,0x3f8b18e1,4 +np.float32,0xff798c4a,0x4092e66f,4 +np.float32,0x7f2e6fe7,0xbdcbd58f,4 +np.float32,0xfe8a8196,0x3fd7fc56,4 +np.float32,0x5e7ad4,0x5e7ad4,4 +np.float32,0xbf23a02d,0xbf3e4533,4 +np.float32,0x3f31c55c,0x3f5531bf,4 +np.float32,0x80331be3,0x80331be3,4 +np.float32,0x8056960a,0x8056960a,4 +np.float32,0xff1c06ae,0xbfd26992,4 +np.float32,0xbe0cc4b0,0xbe0da96c,4 +np.float32,0x7e925ad5,0xbf8dba54,4 +np.float32,0x2c8cec,0x2c8cec,4 +np.float32,0x8011951e,0x8011951e,4 +np.float32,0x3f2caf84,0x3f4cb89f,4 +np.float32,0xbd32c220,0xbd32df33,4 +np.float32,0xbec358d6,0xbecd6996,4 +np.float32,0x3f6e4930,0x3fabeb92,4 +np.float32,0xbf6a3afd,0xbfa65a3a,4 +np.float32,0x80067764,0x80067764,4 +np.float32,0x3d8df1,0x3d8df1,4 +np.float32,0x7ee51cf2,0x409e4061,4 +np.float32,0x435f5d,0x435f5d,4 +np.float32,0xbf5b17f7,0xbf936ebe,4 +np.float32,0x3ecaacb5,0x3ed5f81f,4 +np.float32,0x807b0aa5,0x807b0aa5,4 +np.float32,0x52b40b,0x52b40b,4 +np.float32,0x146a97,0x146a97,4 +np.float32,0x7f42b952,0xbfdcb413,4 +np.float32,0xbf1a1af2,0xbf2fe1bb,4 +np.float32,0x3f312034,0x3f541aa2,4 +np.float32,0x3f281d60,0x3f4554f9,4 +np.float32,0x50e451,0x50e451,4 +np.float32,0xbe45838c,0xbe480016,4 +np.float32,0xff7d0aeb,0x3eb0746e,4 +np.float32,0x7f32a489,0xbf96af6d,4 +np.float32,0xbf1b4e27,0xbf31a769,4 +np.float32,0x3f242936,0x3f3f1a44,4 +np.float32,0xbf7482ff,0xbfb4f201,4 +np.float32,0x4bda38,0x4bda38,4 +np.float32,0xbf022208,0xbf0ea2bb,4 +np.float32,0x7d08ca95,0xbe904602,4 +np.float32,0x7ed2f356,0xc02b55ad,4 +np.float32,0xbf131204,0xbf25b734,4 +np.float32,0xff3464b4,0x3fb23706,4 +np.float32,0x5a97cf,0x5a97cf,4 +np.float32,0xbe52db70,0xbe55e388,4 +np.float32,0x3f52934f,0x3f89e2aa,4 +np.float32,0xfeea866a,0x40a2b33f,4 +np.float32,0x80333925,0x80333925,4 +np.float32,0xfef5d13e,0xc00139ec,4 +np.float32,0x3f4750ab,0x3f7c87ad,4 +np.float32,0x3e41bfdd,0x3e44185a,4 +np.float32,0xbf5b0572,0xbf935935,4 +np.float32,0xbe93c9da,0xbe9808d8,4 +np.float32,0x7f501f33,0xc0f9973c,4 +np.float32,0x800af035,0x800af035,4 +np.float32,0x3f29faf8,0x3f4852a8,4 +np.float32,0xbe1e4c20,0xbe1f920c,4 +np.float32,0xbf7e8616,0xbfc4d79d,4 +np.float32,0x43ffbf,0x43ffbf,4 +np.float32,0x7f28e8a9,0xbfa1ac24,4 +np.float32,0xbf1f9f92,0xbf3820bc,4 +np.float32,0x3f07e004,0x3f1641c4,4 +np.float32,0x3ef7ea7f,0x3f06a64a,4 +np.float32,0x7e013101,0x3f6080e6,4 +np.float32,0x7f122a4f,0xbf0a796f,4 +np.float32,0xfe096960,0x3ed7273a,4 +np.float32,0x3f06abf1,0x3f14a4b2,4 +np.float32,0x3e50ded3,0x3e53d0f1,4 +np.float32,0x7f50b346,0x3eabb536,4 +np.float32,0xff5adb0f,0xbd441972,4 +np.float32,0xbecefe46,0xbedb0f66,4 +np.float32,0x7da70bd4,0xbec66273,4 +np.float32,0x169811,0x169811,4 +np.float32,0xbee4dfee,0xbef5721a,4 +np.float32,0x3efbeae3,0x3f0936e6,4 +np.float32,0x8031bd61,0x8031bd61,4 +np.float32,0x8048e443,0x8048e443,4 +np.float32,0xff209aa6,0xbeb364cb,4 +np.float32,0xff477499,0x3c1b0041,4 +np.float32,0x803fe929,0x803fe929,4 +np.float32,0x3f70158b,0x3fae7725,4 +np.float32,0x7f795723,0x3e8e850a,4 +np.float32,0x3cba99,0x3cba99,4 +np.float32,0x80588d2a,0x80588d2a,4 +np.float32,0x805d1f05,0x805d1f05,4 +np.float32,0xff4ac09a,0xbefe614d,4 +np.float32,0x804af084,0x804af084,4 +np.float32,0x7c64ae63,0xc1a8b563,4 +np.float32,0x8078d793,0x8078d793,4 +np.float32,0x7f3e2436,0xbf8bf9d3,4 +np.float32,0x7ccec1,0x7ccec1,4 +np.float32,0xbf6462c7,0xbf9eb830,4 +np.float32,0x3f1002ca,0x3f216843,4 +np.float32,0xfe878ca6,0x409e73a5,4 +np.float32,0x3bd841d9,0x3bd842a7,4 +np.float32,0x7d406f41,0xbd9dcfa3,4 +np.float32,0x7c6d6,0x7c6d6,4 +np.float32,0x3f4ef360,0x3f86074b,4 +np.float32,0x805f534a,0x805f534a,4 +np.float32,0x1,0x1,4 +np.float32,0x3f739ee2,0x3fb39db2,4 +np.float32,0x3d0c2352,0x3d0c3153,4 +np.float32,0xfe8a4f2c,0x3edd8add,4 +np.float32,0x3e52eaa0,0x3e55f362,4 +np.float32,0x7bde9758,0xbf5ba5cf,4 +np.float32,0xff422654,0xbf41e487,4 +np.float32,0x385e5b,0x385e5b,4 +np.float32,0x5751dd,0x5751dd,4 +np.float32,0xff6c671c,0xc03e2d6d,4 +np.float32,0x1458be,0x1458be,4 +np.float32,0x80153d4d,0x80153d4d,4 +np.float32,0x7efd2adb,0x3e25458f,4 +np.float32,0xbe161880,0xbe172e12,4 +np.float32,0x7ecea1aa,0x40a66d79,4 +np.float32,0xbf5b02a2,0xbf9355f0,4 +np.float32,0x15d9ab,0x15d9ab,4 +np.float32,0x2dc7c7,0x2dc7c7,4 +np.float32,0xfebbf81a,0x4193f6e6,4 +np.float32,0xfe8e3594,0xc00a6695,4 +np.float32,0x185aa8,0x185aa8,4 +np.float32,0x3daea156,0x3daf0e00,4 +np.float32,0x3e071688,0x3e07e08e,4 +np.float32,0x802db9e6,0x802db9e6,4 +np.float32,0x7f7be2c4,0x3f1363dd,4 +np.float32,0x7eba3f5e,0xc13eb497,4 +np.float32,0x3de04a00,0x3de130a9,4 +np.float32,0xbf1022bc,0xbf2194eb,4 +np.float32,0xbf5b547e,0xbf93b53b,4 +np.float32,0x3e867bd6,0x3e89aa10,4 +np.float32,0xbea5eb5c,0xbeabfb73,4 +np.float32,0x7f1efae9,0x3ffca038,4 +np.float32,0xff5d0344,0xbe55dbbb,4 +np.float32,0x805167e7,0x805167e7,4 +np.float32,0xbdb3a020,0xbdb41667,4 +np.float32,0xbedea6b4,0xbeedd5fd,4 +np.float32,0x8053b45c,0x8053b45c,4 +np.float32,0x7ed370e9,0x3d90eba5,4 +np.float32,0xbefcd7da,0xbf09cf91,4 +np.float32,0x78b9ac,0x78b9ac,4 +np.float32,0xbf2f6dc0,0xbf5141ef,4 +np.float32,0x802d3a7b,0x802d3a7b,4 +np.float32,0xfd45d120,0x3fec31cc,4 +np.float32,0xbf7e7020,0xbfc4b2af,4 +np.float32,0xf04da,0xf04da,4 +np.float32,0xbe9819d4,0xbe9cbd35,4 +np.float32,0x8075ab35,0x8075ab35,4 +np.float32,0xbf052fdc,0xbf12aa2c,4 +np.float32,0x3f1530d0,0x3f28bd9f,4 +np.float32,0x80791881,0x80791881,4 +np.float32,0x67f309,0x67f309,4 +np.float32,0x3f12f16a,0x3f2588f5,4 +np.float32,0x3ecdac47,0x3ed97ff8,4 +np.float32,0xbf297fb7,0xbf478c39,4 +np.float32,0x8069fa80,0x8069fa80,4 +np.float32,0x807f940e,0x807f940e,4 +np.float32,0xbf648dc8,0xbf9eeecb,4 +np.float32,0x3de873b0,0x3de9748d,4 +np.float32,0x3f1aa645,0x3f30af1f,4 +np.float32,0xff227a62,0x3d8283cc,4 +np.float32,0xbf37187d,0xbf5e5f4c,4 +np.float32,0x803b1b1f,0x803b1b1f,4 +np.float32,0x3f58142a,0x3f8ff8da,4 +np.float32,0x8004339e,0x8004339e,4 +np.float32,0xbf0f5654,0xbf2077a4,4 +np.float32,0x3f17e509,0x3f2ca598,4 +np.float32,0x3f800000,0x3fc75923,4 +np.float32,0xfdf79980,0x42f13047,4 +np.float32,0x7f111381,0x3f13c4c9,4 +np.float32,0xbea40c70,0xbea9e724,4 +np.float32,0x110520,0x110520,4 +np.float32,0x60490d,0x60490d,4 +np.float32,0x3f6703ec,0x3fa21951,4 +np.float32,0xbf098256,0xbf187652,4 +np.float32,0x658951,0x658951,4 +np.float32,0x3f53bf16,0x3f8b2818,4 +np.float32,0xff451811,0xc0026068,4 +np.float32,0x80777ee0,0x80777ee0,4 +np.float32,0x3e4fcc19,0x3e52b286,4 +np.float32,0x7f387ee0,0x3ce93eb6,4 +np.float32,0xff51181f,0xbfca3ee4,4 +np.float32,0xbf5655ae,0xbf8e0304,4 +np.float32,0xff2f1dcd,0x40025471,4 +np.float32,0x7f6e58e5,0xbe9930d5,4 +np.float32,0x7adf11,0x7adf11,4 +np.float32,0xbe9a2bc2,0xbe9f0185,4 +np.float32,0x8065d3a0,0x8065d3a0,4 +np.float32,0x3ed6e826,0x3ee47c45,4 +np.float32,0x80598ea0,0x80598ea0,4 +np.float32,0x7f10b90a,0x40437bd0,4 +np.float32,0x27b447,0x27b447,4 +np.float32,0x7ecd861c,0x3fce250f,4 +np.float32,0x0,0x0,4 +np.float32,0xbeba82d6,0xbec3394c,4 +np.float32,0xbf4958b0,0xbf8048ea,4 +np.float32,0x7c643e,0x7c643e,4 +np.float32,0x580770,0x580770,4 +np.float32,0x805bf54a,0x805bf54a,4 +np.float32,0x7f1f3cee,0xbe1a54d6,4 +np.float32,0xfefefdea,0x3fa84576,4 +np.float32,0x7f007b7a,0x3e8a6d25,4 +np.float32,0xbf177959,0xbf2c0919,4 +np.float32,0xbf30fda0,0xbf53e058,4 +np.float32,0x3f0576be,0x3f130861,4 +np.float32,0x3f49380e,0x3f80283a,4 +np.float32,0xebc56,0xebc56,4 +np.float32,0x654e3b,0x654e3b,4 +np.float32,0x14a4d8,0x14a4d8,4 +np.float32,0xff69b3cb,0xbf822a88,4 +np.float32,0xbe9b6c1c,0xbea06109,4 +np.float32,0xbefddd7e,0xbf0a787b,4 +np.float32,0x4c4ebb,0x4c4ebb,4 +np.float32,0x7d0a74,0x7d0a74,4 +np.float32,0xbebb5f80,0xbec43635,4 +np.float32,0x7ee79723,0xc1c7f3f3,4 +np.float32,0x7f2be4c7,0xbfa6c693,4 +np.float32,0x805bc7d5,0x805bc7d5,4 +np.float32,0x8042f12c,0x8042f12c,4 +np.float32,0x3ef91be8,0x3f07697b,4 +np.float32,0x3cf37ac0,0x3cf38d1c,4 +np.float32,0x800000,0x800000,4 +np.float32,0xbe1ebf4c,0xbe200806,4 +np.float32,0x7f380862,0xbeb512e8,4 +np.float32,0xbe320064,0xbe33d0fc,4 +np.float32,0xff300b0c,0xbfadb805,4 +np.float32,0x308a06,0x308a06,4 +np.float32,0xbf084f6e,0xbf16d7b6,4 +np.float32,0xff47cab6,0x3f892b65,4 +np.float32,0xbed99f4a,0xbee7bfd5,4 +np.float32,0xff7d74c0,0x3ee88c9a,4 +np.float32,0x3c3d23,0x3c3d23,4 +np.float32,0x8074bde8,0x8074bde8,4 +np.float32,0x80042164,0x80042164,4 +np.float32,0x3e97c92a,0x3e9c6500,4 +np.float32,0x3b80e0,0x3b80e0,4 +np.float32,0xbf16646a,0xbf2a783d,4 +np.float32,0x7f3b4cb1,0xc01339be,4 +np.float32,0xbf31f36e,0xbf557fd0,4 +np.float32,0x7f540618,0xbe5f6fc1,4 +np.float32,0x7eee47d0,0x40a27e94,4 +np.float32,0x7f12f389,0xbebed654,4 +np.float32,0x56cff5,0x56cff5,4 +np.float32,0x8056032b,0x8056032b,4 +np.float32,0x3ed34e40,0x3ee02e38,4 +np.float32,0x7d51a908,0xbf19a90e,4 +np.float32,0x80000000,0x80000000,4 +np.float32,0xfdf73fd0,0xbf0f8cad,4 +np.float32,0x7ee4fe6d,0xbf1ea7e4,4 +np.float32,0x1f15ba,0x1f15ba,4 +np.float32,0xd18c3,0xd18c3,4 +np.float32,0x80797705,0x80797705,4 +np.float32,0x7ef07091,0x3f2f3b9a,4 +np.float32,0x7f552f41,0x3faf608c,4 +np.float32,0x3f779977,0x3fb9a7ad,4 +np.float32,0xfe1a7a50,0xbdadc4d1,4 +np.float32,0xbf449cf0,0xbf7740db,4 +np.float32,0xbe44e620,0xbe475cad,4 +np.float32,0x3f63a098,0x3f9dc2b5,4 +np.float32,0xfed40a12,0x4164533a,4 +np.float32,0x7a2bbb,0x7a2bbb,4 +np.float32,0xff7f7b9e,0xbeee8740,4 +np.float32,0x7ee27f8b,0x4233f53b,4 +np.float32,0xbf044c06,0xbf117c28,4 +np.float32,0xbeffde54,0xbf0bc49f,4 +np.float32,0xfeaef2e8,0x3ff258fe,4 +np.float32,0x527451,0x527451,4 +np.float32,0xbcef8d00,0xbcef9e7c,4 +np.float32,0xbf0e20c0,0xbf1ec9b2,4 +np.float32,0x8024afda,0x8024afda,4 +np.float32,0x7ef6cb3e,0x422cad0b,4 +np.float32,0x3c120,0x3c120,4 +np.float32,0xbf125c8f,0xbf24b62c,4 +np.float32,0x7e770a93,0x402c9d86,4 +np.float32,0xbd30a4e0,0xbd30c0ee,4 +np.float32,0xbf4d3388,0xbf843530,4 +np.float32,0x3f529072,0x3f89df92,4 +np.float32,0xff0270b1,0xbf81be9a,4 +np.float32,0x5e07e7,0x5e07e7,4 +np.float32,0x7bec32,0x7bec32,4 +np.float32,0x7fc00000,0x7fc00000,4 +np.float32,0x3e3ba5e0,0x3e3dc6e9,4 +np.float32,0x3ecb62d4,0x3ed6ce2c,4 +np.float32,0x3eb3dde8,0x3ebba68f,4 +np.float32,0x8063f952,0x8063f952,4 +np.float32,0x7f204aeb,0x3e88614e,4 +np.float32,0xbeae1ddc,0xbeb5278e,4 +np.float32,0x6829e9,0x6829e9,4 +np.float32,0xbf361a99,0xbf5ca354,4 +np.float32,0xbf24fbe6,0xbf406326,4 +np.float32,0x3f329d41,0x3f56a061,4 +np.float32,0xfed6d666,0x3e8f71a5,4 +np.float32,0x337f92,0x337f92,4 +np.float32,0xbe1c4970,0xbe1d8305,4 +np.float32,0xbe6b7e18,0xbe6fbbde,4 +np.float32,0x3f2267b9,0x3f3c61da,4 +np.float32,0xbee1ee94,0xbef1d628,4 +np.float32,0x7ecffc1a,0x3f02987e,4 +np.float32,0xbe9b1306,0xbe9fff3b,4 +np.float32,0xbeffacae,0xbf0ba468,4 +np.float32,0x7f800000,0xffc00000,4 +np.float32,0xfefc9aa8,0xc19de2a3,4 +np.float32,0x7d7185bb,0xbf9090ec,4 +np.float32,0x7edfbafd,0x3fe9352f,4 +np.float32,0x4ef2ec,0x4ef2ec,4 +np.float32,0x7f4cab2e,0xbff4e5dd,4 +np.float32,0xff3b1788,0x3e3c22e9,4 +np.float32,0x4e15ee,0x4e15ee,4 +np.float32,0xbf5451e6,0xbf8bc8a7,4 +np.float32,0x3f7f6d2e,0x3fc65e8b,4 +np.float32,0xbf1d9184,0xbf35071b,4 +np.float32,0xbf3a81cf,0xbf646d9b,4 +np.float32,0xbe71acc4,0xbe7643ab,4 +np.float32,0x528b7d,0x528b7d,4 +np.float32,0x2cb1d0,0x2cb1d0,4 +np.float32,0x3f324bf8,0x3f56161a,4 +np.float32,0x80709a21,0x80709a21,4 +np.float32,0x4bc448,0x4bc448,4 +np.float32,0x3e8bd600,0x3e8f6b7a,4 +np.float32,0xbeb97d30,0xbec20dd6,4 +np.float32,0x2a5669,0x2a5669,4 +np.float32,0x805f2689,0x805f2689,4 +np.float32,0xfe569f50,0x3fc51952,4 +np.float32,0x1de44c,0x1de44c,4 +np.float32,0x3ec7036c,0x3ed1ae67,4 +np.float32,0x8052b8e5,0x8052b8e5,4 +np.float32,0xff740a6b,0x3f4981a8,4 +np.float32,0xfee9bb70,0xc05e23be,4 +np.float32,0xff4e12c9,0x4002b4ad,4 +np.float32,0x803de0c2,0x803de0c2,4 +np.float32,0xbf433a07,0xbf74966f,4 +np.float32,0x803e60ca,0x803e60ca,4 +np.float32,0xbf19ee98,0xbf2fa07a,4 +np.float32,0x92929,0x92929,4 +np.float32,0x7f709c27,0x4257ba2d,4 +np.float32,0x803167c6,0x803167c6,4 +np.float32,0xbf095ead,0xbf184607,4 +np.float32,0x617060,0x617060,4 +np.float32,0x2d85b3,0x2d85b3,4 +np.float32,0x53d20b,0x53d20b,4 +np.float32,0x3e046838,0x3e052666,4 +np.float32,0xbe7c5fdc,0xbe80ce4b,4 +np.float32,0x3d18d060,0x3d18e289,4 +np.float32,0x804dc031,0x804dc031,4 +np.float32,0x3f224166,0x3f3c26cd,4 +np.float32,0x7d683e3c,0xbea24f25,4 +np.float32,0xbf3a92aa,0xbf648be4,4 +np.float32,0x8072670b,0x8072670b,4 +np.float32,0xbe281aec,0xbe29a1bc,4 +np.float32,0x7f09d918,0xc0942490,4 +np.float32,0x7ca9fd07,0x4018b990,4 +np.float32,0x7d36ac5d,0x3cf57184,4 +np.float32,0x8039b62f,0x8039b62f,4 +np.float32,0x6cad7b,0x6cad7b,4 +np.float32,0x3c0fd9ab,0x3c0fda9d,4 +np.float32,0x80299883,0x80299883,4 +np.float32,0x3c2d0e3e,0x3c2d0fe4,4 +np.float32,0x8002cf62,0x8002cf62,4 +np.float32,0x801dde97,0x801dde97,4 +np.float32,0x80411856,0x80411856,4 +np.float32,0x6ebce8,0x6ebce8,4 +np.float32,0x7b7d1a,0x7b7d1a,4 +np.float32,0x8031d3de,0x8031d3de,4 +np.float32,0x8005c4ab,0x8005c4ab,4 +np.float32,0xbf7dd803,0xbfc3b3ef,4 +np.float32,0x8017ae60,0x8017ae60,4 +np.float32,0xfe9316ce,0xbfe0544a,4 +np.float32,0x3f136bfe,0x3f2636ff,4 +np.float32,0x3df87b80,0x3df9b57d,4 +np.float32,0xff44c356,0xbf11c7ad,4 +np.float32,0x4914ae,0x4914ae,4 +np.float32,0x80524c21,0x80524c21,4 +np.float32,0x805c7dc8,0x805c7dc8,4 +np.float32,0xfed3c0aa,0xbff0c0ab,4 +np.float32,0x7eb2bfbb,0xbf4600bc,4 +np.float32,0xfec8df84,0x3f5bd350,4 +np.float32,0x3e5431a4,0x3e5748c3,4 +np.float32,0xbee6a3a0,0xbef79e86,4 +np.float32,0xbf6cc9b2,0xbfa9d61a,4 +np.float32,0x3f132bd5,0x3f25dbd9,4 +np.float32,0x7e6d2e48,0x3f9d025b,4 +np.float32,0x3edf430c,0x3eee942d,4 +np.float32,0x3f0d1b8a,0x3f1d60e1,4 +np.float32,0xbdf2f688,0xbdf41bfb,4 +np.float32,0xbe47a284,0xbe4a33ff,4 +np.float32,0x3eaa9fbc,0x3eb13be7,4 +np.float32,0xfe98d45e,0x3eb84517,4 +np.float32,0x7efc23b3,0x3dcc1c99,4 +np.float32,0x3ca36242,0x3ca367ce,4 +np.float32,0x3f76a944,0x3fb834e3,4 +np.float32,0xbf45207c,0xbf783f9b,4 +np.float32,0x3e7c1220,0x3e80a4f8,4 +np.float32,0x3f018200,0x3f0dd14e,4 +np.float32,0x3f53cdde,0x3f8b3839,4 +np.float32,0xbdbacb58,0xbdbb5063,4 +np.float32,0x804af68d,0x804af68d,4 +np.float32,0x3e2c12fc,0x3e2db65b,4 +np.float32,0x3f039433,0x3f10895a,4 +np.float32,0x7ef5193d,0x3f4115f7,4 +np.float32,0x8030afbe,0x8030afbe,4 +np.float32,0x3f06fa2a,0x3f150d5d,4 +np.float32,0x3f124442,0x3f2493d2,4 +np.float32,0xbeb5b792,0xbebdc090,4 +np.float32,0xbedc90a4,0xbeeb4de9,4 +np.float32,0x3f3ff8,0x3f3ff8,4 +np.float32,0x3ee75bc5,0x3ef881e4,4 +np.float32,0xfe80e3de,0xbf5cd535,4 +np.float32,0xf52eb,0xf52eb,4 +np.float32,0x80660ee8,0x80660ee8,4 +np.float32,0x3e173a58,0x3e185648,4 +np.float32,0xfe49520c,0xbf728d7c,4 +np.float32,0xbecbb8ec,0xbed73373,4 +np.float32,0xbf027ae0,0xbf0f173e,4 +np.float32,0xbcab6740,0xbcab6da8,4 +np.float32,0xbf2a15e2,0xbf487e11,4 +np.float32,0x3b781b,0x3b781b,4 +np.float32,0x44f559,0x44f559,4 +np.float32,0xff6a0ca6,0xc174d7c3,4 +np.float32,0x6460ef,0x6460ef,4 +np.float32,0xfe58009c,0x3ee2bb30,4 +np.float32,0xfec3c038,0x3e30d617,4 +np.float32,0x7f0687c0,0xbf62c820,4 +np.float32,0xbf44655e,0xbf76d589,4 +np.float32,0xbf42968c,0xbf735e78,4 +np.float32,0x80385503,0x80385503,4 +np.float32,0xbea7e3a2,0xbeae2d59,4 +np.float32,0x3dd0b770,0x3dd17131,4 +np.float32,0xbf4bc185,0xbf82b907,4 +np.float32,0xfefd7d64,0xbee05650,4 +np.float32,0xfaac3c00,0xbff23bc9,4 +np.float32,0xbf562f0d,0xbf8dd7f4,4 +np.float32,0x7fa00000,0x7fe00000,4 +np.float32,0x3e01bdb8,0x3e027098,4 +np.float32,0x3e2868ab,0x3e29f19e,4 +np.float32,0xfec55f2e,0x3f39f304,4 +np.float32,0xed4e,0xed4e,4 +np.float32,0x3e2b7330,0x3e2d11fa,4 +np.float32,0x7f738542,0x40cbbe16,4 +np.float32,0x3f123521,0x3f247e71,4 +np.float32,0x73572c,0x73572c,4 +np.float32,0x804936c8,0x804936c8,4 +np.float32,0x803b80d8,0x803b80d8,4 +np.float32,0x7f566c57,0xbee2855a,4 +np.float32,0xff0e3bd8,0xbff0543f,4 +np.float32,0x7d2b2fe7,0xbf94ba4c,4 +np.float32,0xbf0da470,0xbf1e1dc2,4 +np.float32,0xbd276500,0xbd277ce0,4 +np.float32,0xfcd15dc0,0x403ccc2a,4 +np.float32,0x80071e59,0x80071e59,4 +np.float32,0xbe9b0c34,0xbe9ff7be,4 +np.float32,0x3f4f9069,0x3f86ac50,4 +np.float32,0x80042a95,0x80042a95,4 +np.float32,0x7de28e39,0x3bc9b7f4,4 +np.float32,0xbf641935,0xbf9e5af8,4 +np.float32,0x8034f068,0x8034f068,4 +np.float32,0xff33a3d2,0xbf408e75,4 +np.float32,0xbcc51540,0xbcc51efc,4 +np.float32,0xff6d1ddf,0x3ef58f0e,4 +np.float32,0xbf64dfc4,0xbf9f5725,4 +np.float32,0xff068a06,0x3eea8987,4 +np.float32,0xff01c0af,0x3f24cdfe,4 +np.float32,0x3f4def7e,0x3f84f802,4 +np.float32,0xbf1b4ae7,0xbf31a299,4 +np.float32,0x8077df2d,0x8077df2d,4 +np.float32,0x3f0155c5,0x3f0d9785,4 +np.float32,0x5a54b2,0x5a54b2,4 +np.float32,0x7f271f9e,0x3efb2ef3,4 +np.float32,0xbf0ff2ec,0xbf215217,4 +np.float32,0x7f500130,0xbf8a7fdd,4 +np.float32,0xfed9891c,0xbf65c872,4 +np.float32,0xfecbfaae,0x403bdbc2,4 +np.float32,0x3f3a5aba,0x3f642772,4 +np.float32,0x7ebc681e,0xbd8df059,4 +np.float32,0xfe05e400,0xbfe35d74,4 +np.float32,0xbf295ace,0xbf4750ea,4 +np.float32,0x7ea055b2,0x3f62d6be,4 +np.float32,0xbd00b520,0xbd00bff9,4 +np.float32,0xbf7677aa,0xbfb7e8cf,4 +np.float32,0x3e83f788,0x3e86f816,4 +np.float32,0x801f6710,0x801f6710,4 +np.float32,0x801133cc,0x801133cc,4 +np.float32,0x41da2a,0x41da2a,4 +np.float32,0xff1622fd,0x3f023650,4 +np.float32,0x806c7a72,0x806c7a72,4 +np.float32,0x3f10779c,0x3f220bb4,4 +np.float32,0xbf08cf94,0xbf17848d,4 +np.float32,0xbecb55b4,0xbed6bebd,4 +np.float32,0xbf0a1528,0xbf193d7b,4 +np.float32,0x806a16bd,0x806a16bd,4 +np.float32,0xc222a,0xc222a,4 +np.float32,0x3930de,0x3930de,4 +np.float32,0x3f5c3588,0x3f94bca2,4 +np.float32,0x1215ad,0x1215ad,4 +np.float32,0x3ed15030,0x3eddcf67,4 +np.float32,0x7da83b2e,0x3fce0d39,4 +np.float32,0x32b0a8,0x32b0a8,4 +np.float32,0x805aed6b,0x805aed6b,4 +np.float32,0x3ef8e02f,0x3f074346,4 +np.float32,0xbdeb6780,0xbdec7250,4 +np.float32,0x3f6e3cec,0x3fabda61,4 +np.float32,0xfefd467a,0x3ef7821a,4 +np.float32,0xfef090fe,0x3bb752a2,4 +np.float32,0x8019c538,0x8019c538,4 +np.float32,0x3e8cf284,0x3e909e81,4 +np.float32,0xbe6c6618,0xbe70b0a2,4 +np.float32,0x7f50a539,0x3f367be1,4 +np.float32,0x8019fe2f,0x8019fe2f,4 +np.float32,0x800c3f48,0x800c3f48,4 +np.float32,0xfd054cc0,0xc0f52802,4 +np.float32,0x3d0cca20,0x3d0cd853,4 +np.float32,0xbf4a7c44,0xbf816e74,4 +np.float32,0x3f46fc40,0x3f7be153,4 +np.float32,0x807c5849,0x807c5849,4 +np.float32,0xd7e41,0xd7e41,4 +np.float32,0x70589b,0x70589b,4 +np.float32,0x80357b95,0x80357b95,4 +np.float32,0x3de239f0,0x3de326a5,4 +np.float32,0x800b08e3,0x800b08e3,4 +np.float32,0x807ec946,0x807ec946,4 +np.float32,0x3e2e4b83,0x3e2fff76,4 +np.float32,0x3f198e0f,0x3f2f12a6,4 +np.float32,0xbecb1aca,0xbed67979,4 +np.float32,0x80134082,0x80134082,4 +np.float32,0x3f3a269f,0x3f63ca05,4 +np.float32,0x3f1381e4,0x3f265622,4 +np.float32,0xff293080,0xbf10be6f,4 +np.float32,0xff800000,0xffc00000,4 +np.float32,0x37d196,0x37d196,4 +np.float32,0x7e57eea7,0x3e7d8138,4 +np.float32,0x804b1dae,0x804b1dae,4 +np.float32,0x7d9508f9,0xc1075b35,4 +np.float32,0x3f7bf468,0x3fc095e0,4 +np.float32,0x55472c,0x55472c,4 +np.float32,0x3ecdcd86,0x3ed9a738,4 +np.float32,0x3ed9be0f,0x3ee7e4e9,4 +np.float32,0x3e7e0ddb,0x3e81b2fe,4 +np.float32,0x7ee6c1d3,0x3f850634,4 +np.float32,0x800f6fad,0x800f6fad,4 +np.float32,0xfefb3bd6,0xbff68ecc,4 +np.float32,0x8013d6e2,0x8013d6e2,4 +np.float32,0x3f3a2cb6,0x3f63d4ee,4 +np.float32,0xff383c84,0x3e7854bb,4 +np.float32,0x3f21946e,0x3f3b1cea,4 +np.float32,0xff322ea2,0x3fb22f31,4 +np.float32,0x8065a024,0x8065a024,4 +np.float32,0x7f395e30,0xbefe0de1,4 +np.float32,0x5b52db,0x5b52db,4 +np.float32,0x7f7caea7,0x3dac8ded,4 +np.float32,0xbf0431f8,0xbf1159b2,4 +np.float32,0x7f15b25b,0xc02a3833,4 +np.float32,0x80131abc,0x80131abc,4 +np.float32,0x7e829d81,0xbeb2e93d,4 +np.float32,0x3f2c64d7,0x3f4c3e4d,4 +np.float32,0x7f228d48,0xc1518c74,4 +np.float32,0xfc3c6f40,0xbf00d585,4 +np.float32,0x7f754f0f,0x3e2152f5,4 +np.float32,0xff65d32b,0xbe8bd56c,4 +np.float32,0xfea6b8c0,0x41608655,4 +np.float32,0x3f7d4b05,0x3fc2c96a,4 +np.float32,0x3f463230,0x3f7a54da,4 +np.float32,0x805117bb,0x805117bb,4 +np.float32,0xbf2ad4f7,0xbf49b30e,4 +np.float32,0x3eaa01ff,0x3eb08b56,4 +np.float32,0xff7a02bb,0x3f095f73,4 +np.float32,0x759176,0x759176,4 +np.float32,0x803c18d5,0x803c18d5,4 +np.float32,0xbe0722d8,0xbe07ed16,4 +np.float32,0x3f4b4a99,0x3f823fc6,4 +np.float32,0x3f7d0451,0x3fc25463,4 +np.float32,0xfee31e40,0xbfb41091,4 +np.float32,0xbf733d2c,0xbfb30cf1,4 +np.float32,0x7ed81015,0x417c380c,4 +np.float32,0x7daafc3e,0xbe2a37ed,4 +np.float32,0x3e44f82b,0x3e476f67,4 +np.float32,0x7c8d99,0x7c8d99,4 +np.float32,0x3f7aec5a,0x3fbee991,4 +np.float32,0xff09fd55,0x3e0709d3,4 +np.float32,0xff4ba4df,0x4173c01f,4 +np.float32,0x3f43d944,0x3f75c7bd,4 +np.float32,0xff6a9106,0x40a10eff,4 +np.float32,0x3bc8341c,0x3bc834bf,4 +np.float32,0x3eea82,0x3eea82,4 +np.float32,0xfea36a3c,0x435729b2,4 +np.float32,0x7dcc1fb0,0x3e330053,4 +np.float32,0x3f616ae6,0x3f9b01ae,4 +np.float32,0x8030963f,0x8030963f,4 +np.float32,0x10d1e2,0x10d1e2,4 +np.float32,0xfeb9a8a6,0x40e6daac,4 +np.float32,0xbe1aba00,0xbe1bea3a,4 +np.float32,0x3cb6b4ea,0x3cb6bcac,4 +np.float32,0x3d8b0b64,0x3d8b422f,4 +np.float32,0x7b6894,0x7b6894,4 +np.float32,0x3e89dcde,0x3e8d4b4b,4 +np.float32,0x3f12b952,0x3f253974,4 +np.float32,0x1c316c,0x1c316c,4 +np.float32,0x7e2da535,0x3f95fe6b,4 +np.float32,0x3ae9a494,0x3ae9a4a4,4 +np.float32,0xbc5f5500,0xbc5f588b,4 +np.float32,0x3e7850fc,0x3e7d4d0e,4 +np.float32,0xbf800000,0xbfc75923,4 +np.float32,0x3e652d69,0x3e691502,4 +np.float32,0xbf6bdd26,0xbfa89129,4 +np.float32,0x3f441cfc,0x3f764a02,4 +np.float32,0x7f5445ff,0xc0906191,4 +np.float32,0x807b2ee3,0x807b2ee3,4 +np.float32,0xbeb6cab8,0xbebef9c0,4 +np.float32,0xff737277,0xbf327011,4 +np.float32,0xfc832aa0,0x402fd52e,4 +np.float32,0xbf0c7538,0xbf1c7c0f,4 +np.float32,0x7e1301c7,0xbf0ee63e,4 +np.float64,0xbfe0ef7df7a1defc,0xbfe2b76a8d8aeb35,1 +np.float64,0x7fdd9c2eae3b385c,0xbfc00d6885485039,1 +np.float64,0xbfb484c710290990,0xbfb4900e0a527555,1 +np.float64,0x7fe73e5d6cee7cba,0x3fefbf70a56b60d3,1 +np.float64,0x800a110aa8d42216,0x800a110aa8d42216,1 +np.float64,0xffedd4f3f3bba9e7,0xbff076f8c4124919,1 +np.float64,0x800093407f812682,0x800093407f812682,1 +np.float64,0x800a23150e54462a,0x800a23150e54462a,1 +np.float64,0xbfb1076864220ed0,0xbfb10dd95a74b733,1 +np.float64,0x3fed1f8b37fa3f16,0x3ff496100985211f,1 +np.float64,0x3fdf762f84beec5f,0x3fe1223eb04a17e0,1 +np.float64,0x53fd4e0aa7faa,0x53fd4e0aa7faa,1 +np.float64,0x3fdbd283bdb7a507,0x3fddb7ec9856a546,1 +np.float64,0xbfe43f449d687e89,0xbfe77724a0d3072b,1 +np.float64,0x618b73bcc316f,0x618b73bcc316f,1 +np.float64,0x67759424ceeb3,0x67759424ceeb3,1 +np.float64,0xbfe4b6f7d9a96df0,0xbfe831371f3bd7a8,1 +np.float64,0x800a531b8b74a637,0x800a531b8b74a637,1 +np.float64,0xffeeffd5c37dffab,0x3fea140cbc2c3726,1 +np.float64,0x3fe648e2002c91c4,0x3feac1b8816f972a,1 +np.float64,0x800f16242a1e2c48,0x800f16242a1e2c48,1 +np.float64,0xffeeff8e1dbdff1b,0xc000b555f117dce7,1 +np.float64,0x3fdf1cf73fbe39f0,0x3fe0e9032401135b,1 +np.float64,0x7fe19c388b633870,0x3fd5271b69317d5b,1 +np.float64,0x918f226d231e5,0x918f226d231e5,1 +np.float64,0x4cc19ab499834,0x4cc19ab499834,1 +np.float64,0xbd3121d57a624,0xbd3121d57a624,1 +np.float64,0xbfd145d334a28ba6,0xbfd1b468866124d6,1 +np.float64,0x8bdbf41517b7f,0x8bdbf41517b7f,1 +np.float64,0x3fd1b8cb3ea37198,0x3fd2306b13396cae,1 +np.float64,0xbfd632a959ac6552,0xbfd7220fcfb5ef78,1 +np.float64,0x1cdaafc639b57,0x1cdaafc639b57,1 +np.float64,0x3febdcce1577b99c,0x3ff2fe076195a2bc,1 +np.float64,0x7fca6e945934dd28,0x3ff43040df7024e8,1 +np.float64,0x3fbe08e78e3c11cf,0x3fbe2c60e6b48f75,1 +np.float64,0x7fc1ed0d0523da19,0x3ff55f8dcad9440f,1 +np.float64,0xbfdc729b8cb8e538,0xbfde7b6e15dd60c4,1 +np.float64,0x3fd219404f243281,0x3fd298d7b3546531,1 +np.float64,0x3fe715c3f56e2b88,0x3fec255b5a59456e,1 +np.float64,0x7fe8b88e74b1711c,0x3ff60efd2c81d13d,1 +np.float64,0xa1d2b9fd43a57,0xa1d2b9fd43a57,1 +np.float64,0xffc1818223230304,0xbfb85c6c1e8018e7,1 +np.float64,0x3fde38ac8b3c7159,0x3fe0580c7e228576,1 +np.float64,0x8008faf7b491f5f0,0x8008faf7b491f5f0,1 +np.float64,0xffe7a1d751af43ae,0xbf7114cd7bbcd981,1 +np.float64,0xffec2db1b4b85b62,0xbff5cae759667f83,1 +np.float64,0x7fefce1ae27f9c35,0x3ff4b8b88f4876cf,1 +np.float64,0x7fd1ff56a523feac,0xbff342ce192f14dd,1 +np.float64,0x80026b3e3f84d67d,0x80026b3e3f84d67d,1 +np.float64,0xffedee5879bbdcb0,0xc02fae11508b2be0,1 +np.float64,0x8003c0dc822781ba,0x8003c0dc822781ba,1 +np.float64,0xffe38a79eca714f4,0xc008aa23b7a63980,1 +np.float64,0xbfda70411eb4e082,0xbfdc0d7e29c89010,1 +np.float64,0x800a5e34f574bc6a,0x800a5e34f574bc6a,1 +np.float64,0x3fc19fac6e233f59,0x3fc1bc66ac0d73d4,1 +np.float64,0x3a8a61ea7514d,0x3a8a61ea7514d,1 +np.float64,0x3fb57b536e2af6a0,0x3fb588451f72f44c,1 +np.float64,0x7fd68c6d082d18d9,0xc032ac926b665c9a,1 +np.float64,0xd5b87cfdab710,0xd5b87cfdab710,1 +np.float64,0xfe80b20bfd017,0xfe80b20bfd017,1 +np.float64,0x3fef8781e37f0f04,0x3ff8215fe2c1315a,1 +np.float64,0xffedddbb9c3bbb76,0x3fd959b82258a32a,1 +np.float64,0x3fc7d41f382fa83e,0x3fc81b94c3a091ba,1 +np.float64,0xffc3275dcf264ebc,0x3fb2b3d4985c6078,1 +np.float64,0x7fe34d2b7ba69a56,0x40001f3618e3c7c9,1 +np.float64,0x3fd64ae35fac95c7,0x3fd73d77e0b730f8,1 +np.float64,0x800e53bf6b3ca77f,0x800e53bf6b3ca77f,1 +np.float64,0xbfddf7c9083bef92,0xbfe02f392744d2d1,1 +np.float64,0x1c237cc038471,0x1c237cc038471,1 +np.float64,0x3fe4172beea82e58,0x3fe739b4bf16bc7e,1 +np.float64,0xfa950523f52a1,0xfa950523f52a1,1 +np.float64,0xffc839a2c5307344,0xbff70ff8a3c9247f,1 +np.float64,0x264f828c4c9f1,0x264f828c4c9f1,1 +np.float64,0x148a650a2914e,0x148a650a2914e,1 +np.float64,0x3fe8d255c0b1a4ac,0x3fef623c3ea8d6e3,1 +np.float64,0x800f4fbb28be9f76,0x800f4fbb28be9f76,1 +np.float64,0x7fdca57bcfb94af7,0x3ff51207563fb6cb,1 +np.float64,0x3fe4944107692882,0x3fe7fad593235364,1 +np.float64,0x800119b4f1a2336b,0x800119b4f1a2336b,1 +np.float64,0xbfe734075e6e680e,0xbfec5b35381069f2,1 +np.float64,0xffeb3c00db767801,0xbfbbd7d22df7b4b3,1 +np.float64,0xbfe95c658cb2b8cb,0xbff03ad5e0bc888a,1 +np.float64,0xffeefeb58fbdfd6a,0xbfd5c9264deb0e11,1 +np.float64,0x7fccc80fde39901f,0xc012c60f914f3ca2,1 +np.float64,0x3fe5da289c2bb451,0x3fea07ad00a0ca63,1 +np.float64,0x800e364b0a5c6c96,0x800e364b0a5c6c96,1 +np.float64,0x3fcf9ea7d23f3d50,0x3fd023b72e8c9dcf,1 +np.float64,0x800a475cfc948eba,0x800a475cfc948eba,1 +np.float64,0xffd4e0d757a9c1ae,0xbfa89d573352e011,1 +np.float64,0xbfd4dbec8229b7da,0xbfd5a165f12c7c40,1 +np.float64,0xffe307ab51260f56,0x3fe6b1639da58c3f,1 +np.float64,0xbfe6955a546d2ab4,0xbfeb44ae2183fee9,1 +np.float64,0xbfca1f18f5343e30,0xbfca7d804ccccdf4,1 +np.float64,0xe9f4dfebd3e9c,0xe9f4dfebd3e9c,1 +np.float64,0xfff0000000000000,0xfff8000000000000,1 +np.float64,0x8008e69c0fb1cd38,0x8008e69c0fb1cd38,1 +np.float64,0xbfead1ccf975a39a,0xbff1c84b3db8ca93,1 +np.float64,0x25a982424b531,0x25a982424b531,1 +np.float64,0x8010000000000000,0x8010000000000000,1 +np.float64,0x80056204ea0ac40b,0x80056204ea0ac40b,1 +np.float64,0x800d1442d07a2886,0x800d1442d07a2886,1 +np.float64,0xbfaef3dadc3de7b0,0xbfaefd85ae6205f0,1 +np.float64,0x7fe969ce4b32d39c,0xbff3c4364fc6778f,1 +np.float64,0x7fe418bac0a83175,0x402167d16b1efe0b,1 +np.float64,0x3fd7c82a25af9054,0x3fd8f0c701315672,1 +np.float64,0x80013782a7826f06,0x80013782a7826f06,1 +np.float64,0x7fc031c7ee20638f,0x400747ab705e6904,1 +np.float64,0x3fe8cf327ff19e65,0x3fef5c14f8aafa89,1 +np.float64,0xbfe331a416a66348,0xbfe5e2290a098dd4,1 +np.float64,0x800607b2116c0f65,0x800607b2116c0f65,1 +np.float64,0x7fb40448f0280891,0xbfd43d4f0ffa1d64,1 +np.float64,0x7fefffffffffffff,0xbf74530cfe729484,1 +np.float64,0x3fe39b5444a736a9,0x3fe67eaa0b6acf27,1 +np.float64,0x3fee4733c4fc8e68,0x3ff631eabeef9696,1 +np.float64,0xbfec840f3b79081e,0xbff3cc8563ab2e74,1 +np.float64,0xbfc8f6854c31ed0c,0xbfc948caacb3bba0,1 +np.float64,0xffbcf754a639eea8,0xbfc88d17cad3992b,1 +np.float64,0x8000bd3163417a64,0x8000bd3163417a64,1 +np.float64,0x3fe766d0eaeecda2,0x3fecb660882f7024,1 +np.float64,0xb6cc30156d986,0xb6cc30156d986,1 +np.float64,0xffc0161f9f202c40,0x3fe19bdefe5cf8b1,1 +np.float64,0xffe1e462caa3c8c5,0x3fe392c47feea17b,1 +np.float64,0x30a36a566146e,0x30a36a566146e,1 +np.float64,0x3fa996f580332deb,0x3fa99c6b4f2abebe,1 +np.float64,0x3fba71716e34e2e0,0x3fba899f35edba1d,1 +np.float64,0xbfe8f7e5e971efcc,0xbfefac431a0e3d55,1 +np.float64,0xf48f1803e91e3,0xf48f1803e91e3,1 +np.float64,0x7fe3edc0a127db80,0xc03d1a579a5d74a8,1 +np.float64,0xffeba82056375040,0x3fdfd701308700db,1 +np.float64,0xbfeb5a924cf6b524,0xbff2640de7cd107f,1 +np.float64,0xfa4cd1a9f499a,0xfa4cd1a9f499a,1 +np.float64,0x800de1be7b9bc37d,0x800de1be7b9bc37d,1 +np.float64,0xffd44e56ad289cae,0x3fdf4b8085db9b67,1 +np.float64,0xbfe4fb3aea69f676,0xbfe89d2cc46fcc50,1 +np.float64,0xbfe596495d6b2c92,0xbfe997a589a1f632,1 +np.float64,0x6f55a2b8deab5,0x6f55a2b8deab5,1 +np.float64,0x7fe72dc4712e5b88,0x4039c4586b28c2bc,1 +np.float64,0x89348bd712692,0x89348bd712692,1 +np.float64,0xffe062156120c42a,0x4005f0580973bc77,1 +np.float64,0xbfeabc714d7578e2,0xbff1b07e2fa57dc0,1 +np.float64,0x8003a56b3e874ad7,0x8003a56b3e874ad7,1 +np.float64,0x800eeadfb85dd5c0,0x800eeadfb85dd5c0,1 +np.float64,0x46d77a4c8daf0,0x46d77a4c8daf0,1 +np.float64,0x8000c06e7dc180de,0x8000c06e7dc180de,1 +np.float64,0x3fe428d211e851a4,0x3fe754b1c00a89bc,1 +np.float64,0xc5be11818b7c2,0xc5be11818b7c2,1 +np.float64,0x7fefc244893f8488,0x401133dc54f52de5,1 +np.float64,0x3fde30eee93c61de,0x3fe0532b827543a6,1 +np.float64,0xbfd447f48b288fea,0xbfd4fd0654f90718,1 +np.float64,0xbfde98dc7b3d31b8,0xbfe094df12f84a06,1 +np.float64,0x3fed2c1a1dfa5834,0x3ff4a6c4f3470a65,1 +np.float64,0xbfe992165073242d,0xbff071ab039c9177,1 +np.float64,0x3fd0145d1b2028ba,0x3fd06d3867b703dc,1 +np.float64,0x3fe179457362f28b,0x3fe3722f1d045fda,1 +np.float64,0x800e28964fbc512d,0x800e28964fbc512d,1 +np.float64,0x8004a5d785294bb0,0x8004a5d785294bb0,1 +np.float64,0xbfd652f2272ca5e4,0xbfd7469713125120,1 +np.float64,0x7fe61f49036c3e91,0xbf9b6ccdf2d87e70,1 +np.float64,0xffb7d47dd02fa8f8,0xc004449a82320b13,1 +np.float64,0x3feb82f996b705f3,0x3ff29336c738a4c5,1 +np.float64,0x3fbb7fceea36ffa0,0x3fbb9b02c8ad7f93,1 +np.float64,0x80004519fb208a35,0x80004519fb208a35,1 +np.float64,0xbfe0539114e0a722,0xbfe1e86dc5aa039c,1 +np.float64,0x0,0x0,1 +np.float64,0xbfe99d1125f33a22,0xbff07cf8ec04300f,1 +np.float64,0xffd4fbeecc29f7de,0x3ffab76775a8455f,1 +np.float64,0xbfbf1c618e3e38c0,0xbfbf43d2764a8333,1 +np.float64,0x800cae02a9d95c06,0x800cae02a9d95c06,1 +np.float64,0x3febc47d3bf788fa,0x3ff2e0d7cf8ef509,1 +np.float64,0x3fef838f767f071f,0x3ff81aeac309bca0,1 +np.float64,0xbfd5e70716abce0e,0xbfd6ccb033ef7a35,1 +np.float64,0x3f9116fa60222df5,0x3f9117625f008e0b,1 +np.float64,0xffe02b1e5f20563c,0xbfe6b2ec293520b7,1 +np.float64,0xbf9b5aec3036b5e0,0xbf9b5c96c4c7f951,1 +np.float64,0xfdb0169bfb603,0xfdb0169bfb603,1 +np.float64,0x7fcdd1d51c3ba3a9,0x401f0e12fa0b7570,1 +np.float64,0xbfd088103fa11020,0xbfd0e8c4a333ffb2,1 +np.float64,0x3fe22df82ee45bf0,0x3fe46d03a7c14de2,1 +np.float64,0xbfd57b0c28aaf618,0xbfd65349a6191de5,1 +np.float64,0x3fe0a42f50a1485f,0x3fe252e26775d9a4,1 +np.float64,0x800fab4e363f569c,0x800fab4e363f569c,1 +np.float64,0xffe9f0ed63f3e1da,0xbfe278c341b171d5,1 +np.float64,0x7fe26c244664d848,0xbfb325269dad1996,1 +np.float64,0xffe830410bf06081,0xc00181a39f606e96,1 +np.float64,0x800c548a0c78a914,0x800c548a0c78a914,1 +np.float64,0x800f94761ebf28ec,0x800f94761ebf28ec,1 +np.float64,0x3fe5984845eb3091,0x3fe99aeb653c666d,1 +np.float64,0x7fe93e5bf8f27cb7,0xc010d159fa27396a,1 +np.float64,0xffefffffffffffff,0x3f74530cfe729484,1 +np.float64,0x4c83f1269907f,0x4c83f1269907f,1 +np.float64,0x3fde0065a8bc00cc,0x3fe034a1cdf026d4,1 +np.float64,0x800743810d6e8703,0x800743810d6e8703,1 +np.float64,0x80040662d5280cc6,0x80040662d5280cc6,1 +np.float64,0x3fed20b2c5ba4166,0x3ff497988519d7aa,1 +np.float64,0xffe8fa15e5f1f42b,0x3fff82ca76d797b4,1 +np.float64,0xbb72e22f76e5d,0xbb72e22f76e5d,1 +np.float64,0x7fc18ffa7c231ff4,0xbff4b8b4c3315026,1 +np.float64,0xbfe8d1ac44f1a358,0xbfef60efc4f821e3,1 +np.float64,0x3fd38c1fe8271840,0x3fd42dc37ff7262b,1 +np.float64,0xe577bee5caef8,0xe577bee5caef8,1 +np.float64,0xbff0000000000000,0xbff8eb245cbee3a6,1 +np.float64,0xffcb3a9dd436753c,0x3fcd1a3aff1c3fc7,1 +np.float64,0x7fe44bf2172897e3,0x3ff60bfe82a379f4,1 +np.float64,0x8009203823924071,0x8009203823924071,1 +np.float64,0x7fef8e0abc7f1c14,0x3fe90e4962d47ce5,1 +np.float64,0xffda50004434a000,0x3fb50dee03e1418b,1 +np.float64,0x7fe2ff276ea5fe4e,0xc0355b7d2a0a8d9d,1 +np.float64,0x3fd0711ba5a0e238,0x3fd0d03823d2d259,1 +np.float64,0xe7625b03cec4c,0xe7625b03cec4c,1 +np.float64,0xbfd492c8d7a92592,0xbfd55006cde8d300,1 +np.float64,0x8001fee99f23fdd4,0x8001fee99f23fdd4,1 +np.float64,0x7ff4000000000000,0x7ffc000000000000,1 +np.float64,0xfa15df97f42bc,0xfa15df97f42bc,1 +np.float64,0xbfec3fdca9787fb9,0xbff377164b13c7a9,1 +np.float64,0xbcec10e579d82,0xbcec10e579d82,1 +np.float64,0xbfc3b4e2132769c4,0xbfc3dd1fcc7150a6,1 +np.float64,0x80045b149ee8b62a,0x80045b149ee8b62a,1 +np.float64,0xffe044554c2088aa,0xbff741436d558785,1 +np.float64,0xffcc65f09f38cbe0,0xc0172b4adc2d317d,1 +np.float64,0xf68b2d3bed166,0xf68b2d3bed166,1 +np.float64,0x7fc7f44c572fe898,0x3fec69f3b1eca790,1 +np.float64,0x3fac51f61438a3ec,0x3fac595d34156002,1 +np.float64,0xbfeaa9f256f553e5,0xbff19bfdf5984326,1 +np.float64,0x800e4742149c8e84,0x800e4742149c8e84,1 +np.float64,0xbfc493df132927c0,0xbfc4c1ba4268ead9,1 +np.float64,0xbfbf0c56383e18b0,0xbfbf3389fcf50c72,1 +np.float64,0xbf978a0e082f1420,0xbf978b1dd1da3d3c,1 +np.float64,0xbfe04375356086ea,0xbfe1d34c57314dd1,1 +np.float64,0x3feaeeb29b75dd65,0x3ff1e8b772374979,1 +np.float64,0xbfe15e42c3a2bc86,0xbfe34d45d56c5c15,1 +np.float64,0x3fe507429a6a0e85,0x3fe8b058176b3225,1 +np.float64,0x3feee2b26c3dc565,0x3ff71b73203de921,1 +np.float64,0xbfd496577aa92cae,0xbfd553fa7fe15a5f,1 +np.float64,0x7fe2c10953e58212,0x3fc8ead6a0d14bbf,1 +np.float64,0x800035b77aa06b70,0x800035b77aa06b70,1 +np.float64,0x2329201e46525,0x2329201e46525,1 +np.float64,0xbfe6225c9a6c44b9,0xbfea80861590fa02,1 +np.float64,0xbfd6925030ad24a0,0xbfd78e70b1c2215d,1 +np.float64,0xbfd82225c4b0444c,0xbfd958a60f845b39,1 +np.float64,0xbb03d8a17609,0xbb03d8a17609,1 +np.float64,0x7fc33967b12672ce,0x40001e00c9af4002,1 +np.float64,0xff9373c6d026e780,0xbff308654a459d3d,1 +np.float64,0x3feab1f9c5f563f4,0x3ff1a4e0fd2f093d,1 +np.float64,0xbf993ef768327de0,0xbf994046b64e308b,1 +np.float64,0xffb87382fc30e708,0xbfde0accb83c891b,1 +np.float64,0x800bb3a118176743,0x800bb3a118176743,1 +np.float64,0x800c810250d90205,0x800c810250d90205,1 +np.float64,0xbfd2c4eb9ba589d8,0xbfd3539508b4a4a8,1 +np.float64,0xbee1f5437dc3f,0xbee1f5437dc3f,1 +np.float64,0x3fc07aeab520f5d8,0x3fc0926272f9d8e2,1 +np.float64,0xbfe23747a3246e90,0xbfe47a20a6e98687,1 +np.float64,0x3fde1296debc252c,0x3fe0401143ff6b5c,1 +np.float64,0xbfcec8c2f73d9184,0xbfcf644e25ed3b74,1 +np.float64,0xff9314f2c82629e0,0x40559a0f9099dfd1,1 +np.float64,0xbfe27487afa4e910,0xbfe4d0e01200bde6,1 +np.float64,0xffb3d6637627acc8,0x3fe326d4b1e1834f,1 +np.float64,0xffe6f84d642df09a,0x3fc73fa9f57c3acb,1 +np.float64,0xffe67cf76fecf9ee,0xc01cf48c97937ef9,1 +np.float64,0x7fdc73fc12b8e7f7,0xbfcfcecde9331104,1 +np.float64,0xffdcf8789239f0f2,0x3fe345e3b8e28776,1 +np.float64,0x800a70af5314e15f,0x800a70af5314e15f,1 +np.float64,0xffc862300730c460,0x3fc4e9ea813beca7,1 +np.float64,0xbfcc6961bd38d2c4,0xbfcce33bfa6c6bd1,1 +np.float64,0xbfc9b76bbf336ed8,0xbfca117456ac37e5,1 +np.float64,0x7fb86e829430dd04,0x400a5bd7a18e302d,1 +np.float64,0x7fb9813ef833027d,0xbfe5a6494f143625,1 +np.float64,0x8005085e2c2a10bd,0x8005085e2c2a10bd,1 +np.float64,0xffe5af099d6b5e12,0x40369bbe31e03e06,1 +np.float64,0xffde03b1fd3c0764,0x3ff061120aa1f52a,1 +np.float64,0x7fa4eb6cdc29d6d9,0x3fe9defbe9010322,1 +np.float64,0x800803f4b11007ea,0x800803f4b11007ea,1 +np.float64,0x7febd50f6df7aa1e,0xbffcf540ccf220dd,1 +np.float64,0x7fed454f08fa8a9d,0xbffc2a8b81079403,1 +np.float64,0xbfed7e8c69bafd19,0xbff5161e51ba6634,1 +np.float64,0xffef92e78eff25ce,0xbffefeecddae0ad3,1 +np.float64,0x7fe5b9b413ab7367,0xbfc681ba29704176,1 +np.float64,0x29284e805252,0x29284e805252,1 +np.float64,0xffed3955bcfa72ab,0xbfc695acb5f468de,1 +np.float64,0x3fe464ee1ca8c9dc,0x3fe7b140ce50fdca,1 +np.float64,0xffe522ae4bea455c,0x3feb957c146e66ef,1 +np.float64,0x8000000000000000,0x8000000000000000,1 +np.float64,0x3fd0c353a2a186a8,0x3fd1283aaa43a411,1 +np.float64,0x3fdb30a749b6614f,0x3fdcf40df006ed10,1 +np.float64,0x800109213cc21243,0x800109213cc21243,1 +np.float64,0xbfe72aa0c5ee5542,0xbfec4a713f513bc5,1 +np.float64,0x800865344ad0ca69,0x800865344ad0ca69,1 +np.float64,0x7feb7df60eb6fbeb,0x3fb1df06a67aa22f,1 +np.float64,0x3fe83a5dd93074bc,0x3fee3d63cda72636,1 +np.float64,0xbfde70e548bce1ca,0xbfe07b8e19c9dac6,1 +np.float64,0xbfeea38d537d471b,0xbff6bb18c230c0be,1 +np.float64,0x3fefeebbc47fdd78,0x3ff8cdaa53b7c7b4,1 +np.float64,0x7fe6512e20eca25b,0xbff623cee44a22b5,1 +np.float64,0xf8fa5ca3f1f4c,0xf8fa5ca3f1f4c,1 +np.float64,0x7fd12d00ed225a01,0xbfe90d518ea61faf,1 +np.float64,0x80027db43504fb69,0x80027db43504fb69,1 +np.float64,0xffc10a01aa221404,0x3fcc2065b3d0157b,1 +np.float64,0xbfef8286e87f050e,0xbff8193a54449b59,1 +np.float64,0xbfc73178092e62f0,0xbfc7735072ba4593,1 +np.float64,0x3fc859d70630b3ae,0x3fc8a626522af1c0,1 +np.float64,0x3fe4654c4268ca99,0x3fe7b1d2913eda1a,1 +np.float64,0xbfce93cd843d279c,0xbfcf2c2ef16a0957,1 +np.float64,0xffbcaa16d4395430,0xbfd511ced032d784,1 +np.float64,0xbfe91f980e723f30,0xbfeffb39cf8c7746,1 +np.float64,0x800556fb6f0aadf8,0x800556fb6f0aadf8,1 +np.float64,0xffd009cde520139c,0x3fe4fa83b1e93d28,1 +np.float64,0x7febc0675e3780ce,0x3feb53930c004dae,1 +np.float64,0xbfe7f975bdeff2ec,0xbfedc36e6729b010,1 +np.float64,0x45aff57c8b5ff,0x45aff57c8b5ff,1 +np.float64,0xbfec7ebd0138fd7a,0xbff3c5cab680aae0,1 +np.float64,0x8009448003b28900,0x8009448003b28900,1 +np.float64,0x3fca4b992d349732,0x3fcaabebcc86aa9c,1 +np.float64,0x3fca069161340d20,0x3fca63ecc742ff3a,1 +np.float64,0x80063bc80bec7791,0x80063bc80bec7791,1 +np.float64,0xbfe1764bffe2ec98,0xbfe36e1cb30cec94,1 +np.float64,0xffd0dba72f21b74e,0x3fb1834964d57ef6,1 +np.float64,0xbfe31848fc263092,0xbfe5bd066445cbc3,1 +np.float64,0xbfd1fb227323f644,0xbfd278334e27f02d,1 +np.float64,0xffdc59069fb8b20e,0xbfdfc363f559ea2c,1 +np.float64,0x3fdea52a52bd4a55,0x3fe09cada4e5344c,1 +np.float64,0x3f715e55a022bd00,0x3f715e5c72a2809e,1 +np.float64,0x1d1ac6023a35a,0x1d1ac6023a35a,1 +np.float64,0x7feacc71627598e2,0x400486b82121da19,1 +np.float64,0xa0287fa340510,0xa0287fa340510,1 +np.float64,0xffe352c5abe6a58b,0xc002623346060543,1 +np.float64,0x7fed577a23baaef3,0x3fda19bc8fa3b21f,1 +np.float64,0x3fde8dd5263d1baa,0x3fe08de0fedf7029,1 +np.float64,0x3feddd3be2bbba78,0x3ff599b2f3e018cc,1 +np.float64,0xc7a009f58f401,0xc7a009f58f401,1 +np.float64,0xbfef03d5a4fe07ab,0xbff74ee08681f47b,1 +np.float64,0x7fe2cf60eea59ec1,0x3fe905fb44f8cc60,1 +np.float64,0xbfe498fcab6931fa,0xbfe8023a6ff8becf,1 +np.float64,0xbfef7142acfee285,0xbff7fd196133a595,1 +np.float64,0xd214ffdba42a0,0xd214ffdba42a0,1 +np.float64,0x8006de7d78cdbcfc,0x8006de7d78cdbcfc,1 +np.float64,0xb247d34f648fb,0xb247d34f648fb,1 +np.float64,0xbfdd5bece6bab7da,0xbfdf9ba63ca2c5b2,1 +np.float64,0x7fe874650af0e8c9,0x3fe74204e122c10f,1 +np.float64,0x800768c49baed18a,0x800768c49baed18a,1 +np.float64,0x3fb4c0a192298140,0x3fb4cc4c8aa43300,1 +np.float64,0xbfa740531c2e80a0,0xbfa7446b7c74ae8e,1 +np.float64,0x7fe10d6edf221add,0x3fedbcd2eae26657,1 +np.float64,0xbfe9175d0f722eba,0xbfefeaca7f32c6e3,1 +np.float64,0x953e11d32a7c2,0x953e11d32a7c2,1 +np.float64,0x80032df90c465bf3,0x80032df90c465bf3,1 +np.float64,0xffec5b799638b6f2,0xbfe95cd2c69be12c,1 +np.float64,0xffe0c3cfa9a1879f,0x3fe20b99b0c108ce,1 +np.float64,0x3fb610d8e22c21b2,0x3fb61ee0d6c16df8,1 +np.float64,0xffe16bb39962d766,0xc016d370381b6b42,1 +np.float64,0xbfdc72edb238e5dc,0xbfde7bd2de10717a,1 +np.float64,0xffed52dee3baa5bd,0xc01994c08899129a,1 +np.float64,0xffa92aab08325550,0xbff2b881ce363cbd,1 +np.float64,0x7fe028282de0504f,0xc0157ff96c69a9c7,1 +np.float64,0xbfdb2151bf3642a4,0xbfdce196fcc35857,1 +np.float64,0x3fcffbd13c3ff7a2,0x3fd0554b5f0371ac,1 +np.float64,0x800d206bff1a40d8,0x800d206bff1a40d8,1 +np.float64,0x458f818c8b1f1,0x458f818c8b1f1,1 +np.float64,0x800a7b56a234f6ae,0x800a7b56a234f6ae,1 +np.float64,0xffe3d86161e7b0c2,0xbff58d0dbde9f188,1 +np.float64,0xe8ed82e3d1db1,0xe8ed82e3d1db1,1 +np.float64,0x3fe234e0176469c0,0x3fe476bd36b96a75,1 +np.float64,0xbfc7cb9c132f9738,0xbfc812c46e185e0b,1 +np.float64,0xbfeba116c1f7422e,0xbff2b6b7563ad854,1 +np.float64,0x7fe7041de62e083b,0x3f5d2b42aca47274,1 +np.float64,0xbfcf60f4ff3ec1e8,0xbfd002eb83406436,1 +np.float64,0xbfc06067a520c0d0,0xbfc0776e5839ecda,1 +np.float64,0x4384965a87093,0x4384965a87093,1 +np.float64,0xd2ed9d01a5db4,0xd2ed9d01a5db4,1 +np.float64,0x3fbea88cb63d5119,0x3fbece49cc34a379,1 +np.float64,0x3fe7e982ebefd306,0x3feda5bd4c435d43,1 +np.float64,0xffdb60a3e036c148,0xbfcb7ed21e7a8f49,1 +np.float64,0x7fdba9231eb75245,0xbfd750cab1536398,1 +np.float64,0x800d593534dab26b,0x800d593534dab26b,1 +np.float64,0xffdf15fb683e2bf6,0x3fb3aaea23357f06,1 +np.float64,0xbfd6f8a2e5adf146,0xbfd802e509d67c67,1 +np.float64,0x3feeaa31513d5463,0x3ff6c52147dc053c,1 +np.float64,0xf2f6dfd3e5edc,0xf2f6dfd3e5edc,1 +np.float64,0x7fd58d8279ab1b04,0x403243f23d02af2a,1 +np.float64,0x8000000000000001,0x8000000000000001,1 +np.float64,0x3fdffb8e0ebff71c,0x3fe1786cb0a6b0f3,1 +np.float64,0xc999826b93331,0xc999826b93331,1 +np.float64,0xffc4966f19292ce0,0x3ff0836c75c56cc7,1 +np.float64,0x7fef95a4b2ff2b48,0xbfbbe2c27c78154f,1 +np.float64,0xb8f1307f71e26,0xb8f1307f71e26,1 +np.float64,0x3fe807bc7eb00f79,0x3fedde19f2d3c42d,1 +np.float64,0x5e4b6580bc98,0x5e4b6580bc98,1 +np.float64,0xffe19353576326a6,0xc0278c51fee07d36,1 +np.float64,0xbfb0ca6f3e2194e0,0xbfb0d09be673fa72,1 +np.float64,0x3fea724211b4e484,0x3ff15ee06f0a0a13,1 +np.float64,0xbfda21e1c4b443c4,0xbfdbb041f3c86832,1 +np.float64,0x8008082b24901057,0x8008082b24901057,1 +np.float64,0xbfd031aa4ea06354,0xbfd08c77729634bb,1 +np.float64,0xbfc407e153280fc4,0xbfc432275711df5f,1 +np.float64,0xbb4fa4b5769f5,0xbb4fa4b5769f5,1 +np.float64,0x7fed6d1daffada3a,0xc037a14bc7b41fab,1 +np.float64,0xffeee589943dcb12,0x3ff2abfe47037778,1 +np.float64,0x301379d260270,0x301379d260270,1 +np.float64,0xbfec2fefc2b85fe0,0xbff36362c0363e06,1 +np.float64,0xbfe0b1c82e216390,0xbfe264f503f7c22c,1 +np.float64,0xbfea2bce78f4579d,0xbff112d6f07935ea,1 +np.float64,0x18508ef230a13,0x18508ef230a13,1 +np.float64,0x800667a74d6ccf4f,0x800667a74d6ccf4f,1 +np.float64,0x79ce5c8cf39cc,0x79ce5c8cf39cc,1 +np.float64,0x3feda61c8efb4c39,0x3ff54c9ade076f54,1 +np.float64,0x3fe27e06b0e4fc0d,0x3fe4de665c1dc3ca,1 +np.float64,0xbfd15fea2722bfd4,0xbfd1d081c55813b0,1 +np.float64,0xbfe5222c4cea4458,0xbfe8db62deb7d2ad,1 +np.float64,0xbfe8a16c33b142d8,0xbfef02d5831592a8,1 +np.float64,0x3fdb60e7c4b6c1d0,0x3fdd2e4265c4c3b6,1 +np.float64,0x800076d62b60edad,0x800076d62b60edad,1 +np.float64,0xbfec8f1527791e2a,0xbff3da7ed3641e8d,1 +np.float64,0x2af03bfe55e08,0x2af03bfe55e08,1 +np.float64,0xa862ee0950c5e,0xa862ee0950c5e,1 +np.float64,0x7fea5a7c1eb4b4f7,0xbffa6f07d28ef211,1 +np.float64,0x90e118fb21c23,0x90e118fb21c23,1 +np.float64,0xbfead0721bf5a0e4,0xbff1c6c7a771a128,1 +np.float64,0x3f63f4a4c027e94a,0x3f63f4a75665da67,1 +np.float64,0x3fece0efa579c1e0,0x3ff443bec52f021e,1 +np.float64,0xbfdbe743b737ce88,0xbfddd129bff89c15,1 +np.float64,0x3fd48c9b8fa91938,0x3fd5492a630a8cb5,1 +np.float64,0x3ff0000000000000,0x3ff8eb245cbee3a6,1 +np.float64,0xbfd51ea33baa3d46,0xbfd5ebd5dc710204,1 +np.float64,0x3fcfbab0183f7560,0x3fd032a054580b00,1 +np.float64,0x8007abce13cf579d,0x8007abce13cf579d,1 +np.float64,0xbfef0f4723be1e8e,0xbff760c7008e8913,1 +np.float64,0x8006340f524c681f,0x8006340f524c681f,1 +np.float64,0x87b7d7010f71,0x87b7d7010f71,1 +np.float64,0x3fe9422da9b2845b,0x3ff02052e6148c45,1 +np.float64,0x7fddd259b93ba4b2,0xc000731aa33d84b6,1 +np.float64,0x3fe0156d12202ada,0x3fe1972ba309cb29,1 +np.float64,0x8004f1264b89e24d,0x8004f1264b89e24d,1 +np.float64,0x3fececdcacb9d9b9,0x3ff4534d5861f731,1 +np.float64,0x3fd1790ab822f215,0x3fd1eb97b1bb6fb4,1 +np.float64,0xffce5d11863cba24,0xbfcb4f38c17210da,1 +np.float64,0x800a30c32a546187,0x800a30c32a546187,1 +np.float64,0x3fa58cc61c2b198c,0x3fa59008add7233e,1 +np.float64,0xbfe0ac77d62158f0,0xbfe25de3dba0bc4a,1 +np.float64,0xeb8c5753d718b,0xeb8c5753d718b,1 +np.float64,0x3fee5438dafca872,0x3ff644fef7e7adb5,1 +np.float64,0x3faad1eb2c35a3e0,0x3faad83499f94057,1 +np.float64,0x3fe39152c46722a6,0x3fe66fba0b96ab6e,1 +np.float64,0xffd6fd17712dfa2e,0xc010d697d1ab8731,1 +np.float64,0x5214a888a4296,0x5214a888a4296,1 +np.float64,0x8000127a5da024f5,0x8000127a5da024f5,1 +np.float64,0x7feb3a366cb6746c,0x3fbe49bd8d5f213a,1 +np.float64,0xca479501948f3,0xca479501948f3,1 +np.float64,0x7fe7c799ce6f8f33,0xbfd796cd98dc620c,1 +np.float64,0xffe20bcf30a4179e,0xbff8ca5453fa088f,1 +np.float64,0x3fe624638a6c48c7,0x3fea83f123832c3c,1 +np.float64,0xbfe5f1377c6be26f,0xbfea2e143a2d522c,1 +np.float64,0x7fd193f9f8a327f3,0xbfb04ee2602574d4,1 +np.float64,0xbfe7419d2fee833a,0xbfec737f140d363d,1 +np.float64,0x1,0x1,1 +np.float64,0x7fe2ac246c655848,0x3fd14fee3237727a,1 +np.float64,0xa459b42948b37,0xa459b42948b37,1 +np.float64,0x3fb26155ae24c2ab,0x3fb2696fc446d4c6,1 +np.float64,0xbfdd7b332e3af666,0xbfdfc296c21f1aa8,1 +np.float64,0xbfe00dbda4a01b7c,0xbfe18d2b060f0506,1 +np.float64,0x8003bb22d3e77646,0x8003bb22d3e77646,1 +np.float64,0x3fee21b0a57c4361,0x3ff5fb6a21dc911c,1 +np.float64,0x80ca69270194d,0x80ca69270194d,1 +np.float64,0xbfd6d80350adb006,0xbfd7ddb501edbde0,1 +np.float64,0xd2f8b801a5f2,0xd2f8b801a5f2,1 +np.float64,0xbfe856b3f170ad68,0xbfee7334fdc49296,1 +np.float64,0x3fed5c1b20bab836,0x3ff4e73ee5d5c7f3,1 +np.float64,0xbfd58085a5ab010c,0xbfd6596ddc381ffa,1 +np.float64,0x3fe4f0134b29e027,0x3fe88b70602fbd21,1 +np.float64,0xffc9098fdc321320,0x4011c334a74a92cf,1 +np.float64,0x794749bef28ea,0x794749bef28ea,1 +np.float64,0xbfc86b547f30d6a8,0xbfc8b84a4fafe0af,1 +np.float64,0x7fe1356b9da26ad6,0x3fd270bca208d899,1 +np.float64,0x7fca0ef1aa341de2,0xbff851044c0734fa,1 +np.float64,0x80064cb8b62c9972,0x80064cb8b62c9972,1 +np.float64,0xffd3a09a83a74136,0x3ffb66dae0accdf5,1 +np.float64,0x800e301aa15c6035,0x800e301aa15c6035,1 +np.float64,0x800e51f323bca3e6,0x800e51f323bca3e6,1 +np.float64,0x7ff0000000000000,0xfff8000000000000,1 +np.float64,0x800c4278c87884f2,0x800c4278c87884f2,1 +np.float64,0xbfe8481649f0902c,0xbfee576772695096,1 +np.float64,0xffe2344e3fa4689c,0x3fb10442ec0888de,1 +np.float64,0xbfeada313d75b462,0xbff1d1aee3fab3a9,1 +np.float64,0x8009ddfb1333bbf7,0x8009ddfb1333bbf7,1 +np.float64,0x7fed3314c93a6629,0x3ff7a9b12dc1cd37,1 +np.float64,0x3fd55c26da2ab84e,0x3fd630a7b8aac78a,1 +np.float64,0x800cdb5203f9b6a4,0x800cdb5203f9b6a4,1 +np.float64,0xffd04a875da0950e,0x4009a13810ab121d,1 +np.float64,0x800f1acb527e3597,0x800f1acb527e3597,1 +np.float64,0xbf9519bf282a3380,0xbf951a82e9b955ff,1 +np.float64,0x3fcd7a42fa3af486,0x3fce028f3c51072d,1 +np.float64,0xbfdd3e21b73a7c44,0xbfdf769f2ff2480b,1 +np.float64,0xffd4361e2aa86c3c,0xbfc211ce8e9f792c,1 +np.float64,0x7fccf97f6939f2fe,0xbff8464bad830f06,1 +np.float64,0x800ce47fb939c900,0x800ce47fb939c900,1 +np.float64,0xffe9e51df173ca3b,0xbfceaf990d652c4e,1 +np.float64,0x3fe05bba5b20b775,0x3fe1f326e4455442,1 +np.float64,0x800a29b4b134536a,0x800a29b4b134536a,1 +np.float64,0xe6f794b7cdef3,0xe6f794b7cdef3,1 +np.float64,0xffb5b688ce2b6d10,0x3ff924bb97ae2f6d,1 +np.float64,0x7fa74105d82e820b,0x3fd49643aaa9eee4,1 +np.float64,0x80020d15f7a41a2d,0x80020d15f7a41a2d,1 +np.float64,0x3fd6a983d5ad5308,0x3fd7a8cc8835b5b8,1 +np.float64,0x7fcd9798f03b2f31,0x3fc534c2f7bf4721,1 +np.float64,0xffdd31873a3a630e,0xbfe3171fcdffb3f7,1 +np.float64,0x80075183234ea307,0x80075183234ea307,1 +np.float64,0x82f3132505e63,0x82f3132505e63,1 +np.float64,0x3febfd9cb837fb39,0x3ff325bbf812515d,1 +np.float64,0xbfb4630fda28c620,0xbfb46e1f802ec278,1 +np.float64,0x3feeed7c89fddafa,0x3ff72c20ce5a9ee4,1 +np.float64,0x7fd3dcb3c127b967,0x40123d27ec9ec31d,1 +np.float64,0xbfe923450c72468a,0xbff00149c5742725,1 +np.float64,0x7fdef7f91abdeff1,0xbfe02ceb21f7923d,1 +np.float64,0x7fdd70d28fbae1a4,0xbfefcc5c9d10cdfd,1 +np.float64,0x800ca445a8d9488c,0x800ca445a8d9488c,1 +np.float64,0x7fec2754e1f84ea9,0x40173f6c1c97f825,1 +np.float64,0x7fcbca31f7379463,0x401e26bd2667075b,1 +np.float64,0x8003fa1d0847f43b,0x8003fa1d0847f43b,1 +np.float64,0xffe95cf85932b9f0,0xc01308e60278aa11,1 +np.float64,0x8009c53948f38a73,0x8009c53948f38a73,1 +np.float64,0x3fdcca9226b99524,0x3fdee7a008f75d41,1 +np.float64,0xbfe9ee241f33dc48,0xbff0d16bfff6c8e9,1 +np.float64,0xbfb3365058266ca0,0xbfb33f9176ebb51d,1 +np.float64,0x7fa98e10f4331c21,0x3fdee04ffd31314e,1 +np.float64,0xbfe1a11aea634236,0xbfe3a8e3d84fda38,1 +np.float64,0xbfd8df051131be0a,0xbfda342805d1948b,1 +np.float64,0x3d49a2407a935,0x3d49a2407a935,1 +np.float64,0xfc51eefff8a3e,0xfc51eefff8a3e,1 +np.float64,0xda63950bb4c73,0xda63950bb4c73,1 +np.float64,0x80050f3d4fea1e7b,0x80050f3d4fea1e7b,1 +np.float64,0x3fcdbd6e453b7ae0,0x3fce497478c28e77,1 +np.float64,0x7ebd4932fd7aa,0x7ebd4932fd7aa,1 +np.float64,0x7fa3904eac27209c,0xc0015f3125efc151,1 +np.float64,0x7fc59f956b2b3f2a,0xc00c012e7a2c281f,1 +np.float64,0xbfd436d716a86dae,0xbfd4ea13533a942b,1 +np.float64,0x9347ae3d268f6,0x9347ae3d268f6,1 +np.float64,0xffd001764d2002ec,0xbffab3462e515623,1 +np.float64,0x3fe6f406662de80d,0x3febe9bac3954999,1 +np.float64,0x3f943ecaf8287d96,0x3f943f77dee5e77f,1 +np.float64,0x3fd6250efcac4a1c,0x3fd712afa947d56f,1 +np.float64,0xbfe849ff777093ff,0xbfee5b089d03391f,1 +np.float64,0xffd3b8ef8f2771e0,0x4000463ff7f29214,1 +np.float64,0xbfc3bae9252775d4,0xbfc3e34c133f1933,1 +np.float64,0xbfea93943df52728,0xbff18355e4fc341d,1 +np.float64,0x3fc4d922ad29b245,0x3fc508d66869ef29,1 +np.float64,0x4329694a8652e,0x4329694a8652e,1 +np.float64,0x8834f1a71069e,0x8834f1a71069e,1 +np.float64,0xe0e5be8dc1cb8,0xe0e5be8dc1cb8,1 +np.float64,0x7fef4d103afe9a1f,0xc0047b88b94554fe,1 +np.float64,0x3fe9b57af4f36af6,0x3ff0963831d51c3f,1 +np.float64,0x3fe081e2fa6103c6,0x3fe22572e41be655,1 +np.float64,0x3fd78cf7b42f19ef,0x3fd8acafa1ad776a,1 +np.float64,0x7fbffd58d43ffab1,0x3fb16092c7de6036,1 +np.float64,0xbfe1e8bfae23d180,0xbfe40c1c6277dd52,1 +np.float64,0x800a9f59fb153eb4,0x800a9f59fb153eb4,1 +np.float64,0xffebe14e33b7c29c,0x3fe0ec532f4deedd,1 +np.float64,0xffc36ca00426d940,0xc000806a712d6e83,1 +np.float64,0xbfcc2be82d3857d0,0xbfcca2a7d372ec64,1 +np.float64,0x800c03b908780772,0x800c03b908780772,1 +np.float64,0xf315a64be62b5,0xf315a64be62b5,1 +np.float64,0xbfe644043cec8808,0xbfeab974d3dc6d80,1 +np.float64,0x3fedb7de3cbb6fbc,0x3ff56549a5acd324,1 +np.float64,0xbfb1a875522350e8,0xbfb1afa41dee338d,1 +np.float64,0xffee8d4a407d1a94,0x3fead1749a636ff6,1 +np.float64,0x8004061c13080c39,0x8004061c13080c39,1 +np.float64,0x3fe650ae7feca15c,0x3feacefb8bc25f64,1 +np.float64,0x3fda8340e6b50682,0x3fdc24275cab1df8,1 +np.float64,0x8009084344321087,0x8009084344321087,1 +np.float64,0x7fdd19cb823a3396,0xbfd1d8fb35d89e3f,1 +np.float64,0xbfe893172571262e,0xbfeee716b592b93c,1 +np.float64,0x8ff5acc11fec,0x8ff5acc11fec,1 +np.float64,0xbfdca0c57cb9418a,0xbfdeb42465a1b59e,1 +np.float64,0xffd77bd2a3aef7a6,0x4012cd69e85b82d8,1 +np.float64,0xbfe6ea78982dd4f1,0xbfebd8ec61fb9e1f,1 +np.float64,0x7fe14b1d80a2963a,0xc02241642102cf71,1 +np.float64,0x3fe712bf286e257e,0x3fec20012329a7fb,1 +np.float64,0x7fcb6fa4d636df49,0x400b899d14a886b3,1 +np.float64,0x3fb82cb39a305960,0x3fb83f29c5f0822e,1 +np.float64,0x7fed694c8b3ad298,0xbfe2724373c69808,1 +np.float64,0xbfcd21229f3a4244,0xbfcda497fc3e1245,1 +np.float64,0x564d3770ac9a8,0x564d3770ac9a8,1 +np.float64,0xf4409e13e8814,0xf4409e13e8814,1 +np.float64,0x80068dca9a8d1b96,0x80068dca9a8d1b96,1 +np.float64,0xbfe13f82afe27f06,0xbfe3236ddded353f,1 +np.float64,0x80023f8114647f03,0x80023f8114647f03,1 +np.float64,0xeafba7dfd5f75,0xeafba7dfd5f75,1 +np.float64,0x3feca74ddeb94e9c,0x3ff3f95dcce5a227,1 +np.float64,0x10000000000000,0x10000000000000,1 +np.float64,0xbfebdb4141f7b682,0xbff2fc29823ac64a,1 +np.float64,0xbfcd75ee2f3aebdc,0xbfcdfdfd87cc6a29,1 +np.float64,0x7fc010cda420219a,0x3fae4ca2cf1f2657,1 +np.float64,0x1a90209e35205,0x1a90209e35205,1 +np.float64,0x8008057d01900afa,0x8008057d01900afa,1 +np.float64,0x3f9cb5f280396be5,0x3f9cb7dfb4e4be4e,1 +np.float64,0xffe1bbb60b63776c,0xc00011b1ffcb2561,1 +np.float64,0xffda883f6fb5107e,0x4044238ef4e2a198,1 +np.float64,0x3fc07c0b4a20f817,0x3fc09387de9eebcf,1 +np.float64,0x8003a9ebc0c753d8,0x8003a9ebc0c753d8,1 +np.float64,0x1d7fd5923affc,0x1d7fd5923affc,1 +np.float64,0xbfe9cd8cf9b39b1a,0xbff0af43e567ba4a,1 +np.float64,0x11285cb42250c,0x11285cb42250c,1 +np.float64,0xffe81ae1ccb035c3,0xbfe038be7eb563a6,1 +np.float64,0xbfe56473b1eac8e8,0xbfe94654d8ab9e75,1 +np.float64,0x3fee904619fd208c,0x3ff69e198152fe17,1 +np.float64,0xbfeeb9a2cbfd7346,0xbff6dc8d96da78cd,1 +np.float64,0x8006cdfa59ed9bf5,0x8006cdfa59ed9bf5,1 +np.float64,0x8008f2366d31e46d,0x8008f2366d31e46d,1 +np.float64,0x8008d5f91e31abf3,0x8008d5f91e31abf3,1 +np.float64,0x3fe85886f8b0b10e,0x3fee76af16f5a126,1 +np.float64,0x3fefb9b2b73f7365,0x3ff8745128fa3e3b,1 +np.float64,0x7fdf3e721f3e7ce3,0xbfb19381541ca2a8,1 +np.float64,0x3fd2768c41a4ed18,0x3fd2fe2f85a3f3a6,1 +np.float64,0xbfcabe3c6a357c78,0xbfcb239fb88bc260,1 +np.float64,0xffdffb6a3dbff6d4,0xbff7af4759fd557c,1 +np.float64,0x800817f75f302fef,0x800817f75f302fef,1 +np.float64,0xbfe6a1d1762d43a3,0xbfeb5a399a095ef3,1 +np.float64,0x7fd6f32f912de65e,0x40016dedc51aabd0,1 +np.float64,0x3fc6cb26652d964d,0x3fc7099f047d924a,1 +np.float64,0x3fe8b975d67172ec,0x3fef31946123c0e7,1 +np.float64,0xffe44a09d1e89413,0x3fdee9e5eac6e540,1 +np.float64,0xbfece76d4cb9cedb,0xbff44c34849d07ba,1 +np.float64,0x7feb76027036ec04,0x3fe08595a5e263ac,1 +np.float64,0xffe194f591a329ea,0x3fbe5bd626400a70,1 +np.float64,0xbfc170698122e0d4,0xbfc18c3de8b63565,1 +np.float64,0x3fc82b2c0f305658,0x3fc875c3b5fbcd08,1 +np.float64,0x3fd5015634aa02ac,0x3fd5cb1df07213c3,1 +np.float64,0x7fe640884b6c8110,0xbff66255a420abb5,1 +np.float64,0x5a245206b448b,0x5a245206b448b,1 +np.float64,0xffe9d9fa2f73b3f4,0xc0272b0dd34ab9bf,1 +np.float64,0x3fd990e8aab321d0,0x3fdb04cd3a29bcc3,1 +np.float64,0xde9dda8bbd3bc,0xde9dda8bbd3bc,1 +np.float64,0xbfe81b32b4703666,0xbfee029937fa9f5a,1 +np.float64,0xbfe68116886d022d,0xbfeb21c62081cb73,1 +np.float64,0x3fb8da191231b432,0x3fb8ee28c71507d3,1 +np.float64,0x3fb111395a222273,0x3fb117b57de3dea4,1 +np.float64,0xffbafadc6a35f5b8,0x3ffcc6d2370297b9,1 +np.float64,0x8002ca475b05948f,0x8002ca475b05948f,1 +np.float64,0xbfeafef57875fdeb,0xbff1fb1315676f24,1 +np.float64,0x7fcda427d73b484f,0xbff9f70212694d17,1 +np.float64,0xffe2517b3ba4a2f6,0xc029ca6707305bf4,1 +np.float64,0x7fc5ee156b2bdc2a,0xbff8384b59e9056e,1 +np.float64,0xbfec22af3278455e,0xbff3530fe25816b4,1 +np.float64,0x6b5a8c2cd6b52,0x6b5a8c2cd6b52,1 +np.float64,0xffdaf6c4b935ed8a,0x4002f00ce58affcf,1 +np.float64,0x800a41813c748303,0x800a41813c748303,1 +np.float64,0xbfd09a1269213424,0xbfd0fc0a0c5de8eb,1 +np.float64,0x7fa2cb74d42596e9,0x3fc3d40e000fa69d,1 +np.float64,0x7ff8000000000000,0x7ff8000000000000,1 +np.float64,0x3fbfbf8ed63f7f1e,0x3fbfe97bcad9f53a,1 +np.float64,0x7fe0ebba65a1d774,0x401b0f17b28618df,1 +np.float64,0x3fd02c3a25a05874,0x3fd086aa55b19c9c,1 +np.float64,0xec628f95d8c52,0xec628f95d8c52,1 +np.float64,0x3fd319329fa63264,0x3fd3afb04e0dec63,1 +np.float64,0x180e0ade301c2,0x180e0ade301c2,1 +np.float64,0xbfe8d78324f1af06,0xbfef6c66153064ee,1 +np.float64,0xffb89fa200313f48,0xbfeb96ff2d9358dc,1 +np.float64,0x7fe6abcf86ed579e,0xc0269f4de86365ec,1 +np.float64,0x7fdff8cd65bff19a,0xbfd0f7c6b9052c9a,1 +np.float64,0xbfd2e3a53d25c74a,0xbfd37520cda5f6b2,1 +np.float64,0x7fe844b096708960,0x3ff696a6182e5a7a,1 +np.float64,0x7fdce0c7a3b9c18e,0x3fd42875d69ed379,1 +np.float64,0xffba5a91cc34b520,0x4001b571e8991951,1 +np.float64,0xffe78fe4a6ef1fc9,0x3ff4507b31f5b3bc,1 +np.float64,0xbfd7047493ae08ea,0xbfd810618a53fffb,1 +np.float64,0xc6559def8cab4,0xc6559def8cab4,1 +np.float64,0x3fe75d67a76ebacf,0x3feca56817de65e4,1 +np.float64,0xffd24adbd6a495b8,0xc012c491addf2df5,1 +np.float64,0x7fed35e28dba6bc4,0x403a0fa555ff7ec6,1 +np.float64,0x80078c4afa0f1897,0x80078c4afa0f1897,1 +np.float64,0xa6ec39114dd87,0xa6ec39114dd87,1 +np.float64,0x7fb1bd33ba237a66,0x4010092bb6810fd4,1 +np.float64,0x800ecf215edd9e43,0x800ecf215edd9e43,1 +np.float64,0x3fb7c169242f82d2,0x3fb7d2ed30c462e6,1 +np.float64,0xbf71b46d60236900,0xbf71b4749a10c112,1 +np.float64,0x800d7851787af0a3,0x800d7851787af0a3,1 +np.float64,0x3fcb4a45e7369488,0x3fcbb61701a1bcec,1 +np.float64,0x3fd4e3682429c6d0,0x3fd5a9bcb916eb94,1 +np.float64,0x800497564c292ead,0x800497564c292ead,1 +np.float64,0xbfca3737a1346e70,0xbfca96a86ae5d687,1 +np.float64,0x19aa87e03356,0x19aa87e03356,1 +np.float64,0xffb2593fe624b280,0xc05fedb99b467ced,1 +np.float64,0xbfdd8748fbbb0e92,0xbfdfd1a7df17252c,1 +np.float64,0x8004c7afc7098f60,0x8004c7afc7098f60,1 +np.float64,0x7fde48b2bf3c9164,0xbfe36ef1158ed420,1 +np.float64,0xbfec8e0eb0f91c1d,0xbff3d9319705a602,1 +np.float64,0xffea1be204f437c3,0xc0144f67298c3e6f,1 +np.float64,0x7fdb906b593720d6,0xbfce99233396eda7,1 +np.float64,0x3fef0f114ffe1e22,0x3ff76072a258a51b,1 +np.float64,0x3fe3e284c8e7c50a,0x3fe6e9b05e17c999,1 +np.float64,0xbfbda9eef23b53e0,0xbfbdcc1abb443597,1 +np.float64,0x3feb6454d4f6c8aa,0x3ff26f65a85baba4,1 +np.float64,0x3fea317439f462e8,0x3ff118e2187ef33f,1 +np.float64,0x376ad0646ed5b,0x376ad0646ed5b,1 +np.float64,0x7fdd461a1c3a8c33,0x3f7ba20fb79e785f,1 +np.float64,0xebc520a3d78a4,0xebc520a3d78a4,1 +np.float64,0x3fca90fe53352200,0x3fcaf45c7fae234d,1 +np.float64,0xbfe80dd1de701ba4,0xbfede97e12cde9de,1 +np.float64,0x3fd242b00ea48560,0x3fd2c5cf9bf69a31,1 +np.float64,0x7fe46c057828d80a,0xbfe2f76837488f94,1 +np.float64,0x3fc162bea322c580,0x3fc17e517c958867,1 +np.float64,0xffebf0452ff7e08a,0x3ffc3fd95c257b54,1 +np.float64,0xffd88043c6310088,0x4008b05598d0d95f,1 +np.float64,0x800d8c49da5b1894,0x800d8c49da5b1894,1 +np.float64,0xbfed33b487ba6769,0xbff4b0ea941f8a6a,1 +np.float64,0x16b881e22d711,0x16b881e22d711,1 +np.float64,0x288bae0051177,0x288bae0051177,1 +np.float64,0xffc83a0fe8307420,0x4006eff03da17f86,1 +np.float64,0x3fc7868b252f0d18,0x3fc7cb4954290324,1 +np.float64,0xbfe195514b232aa2,0xbfe398aae6c8ed76,1 +np.float64,0x800c001ae7f80036,0x800c001ae7f80036,1 +np.float64,0x7feb82abe7370557,0xbff1e13fe6fad23c,1 +np.float64,0xffecf609cdf9ec13,0xc0112aa1805ae59e,1 +np.float64,0xffddd654f63bacaa,0x3fe46cce899f710d,1 +np.float64,0x3fe2163138642c62,0x3fe44b9c760acd4c,1 +np.float64,0x4e570dc09cae2,0x4e570dc09cae2,1 +np.float64,0x7fe9e8d091f3d1a0,0xc000fe20f8e9a4b5,1 +np.float64,0x7fe60042952c0084,0x3fd0aa740f394c2a,1 diff --git a/local_packages/numpy/_core/tests/data/umath-validation-set-tanh.csv b/local_packages/numpy/_core/tests/data/umath-validation-set-tanh.csv new file mode 100644 index 0000000..9e3ddc6 --- /dev/null +++ b/local_packages/numpy/_core/tests/data/umath-validation-set-tanh.csv @@ -0,0 +1,1429 @@ +dtype,input,output,ulperrortol +np.float32,0xbe26ebb0,0xbe25752f,2 +np.float32,0xbe22ecc0,0xbe219054,2 +np.float32,0x8010a6b3,0x8010a6b3,2 +np.float32,0x3135da,0x3135da,2 +np.float32,0xbe982afc,0xbe93d727,2 +np.float32,0x16a51f,0x16a51f,2 +np.float32,0x491e56,0x491e56,2 +np.float32,0x4bf7ca,0x4bf7ca,2 +np.float32,0x3eebc21c,0x3edc65b2,2 +np.float32,0x80155c94,0x80155c94,2 +np.float32,0x3e14f626,0x3e13eb6a,2 +np.float32,0x801a238f,0x801a238f,2 +np.float32,0xbde33a80,0xbde24cf9,2 +np.float32,0xbef8439c,0xbee67a51,2 +np.float32,0x7f60d0a5,0x3f800000,2 +np.float32,0x190ee3,0x190ee3,2 +np.float32,0x80759113,0x80759113,2 +np.float32,0x800afa9f,0x800afa9f,2 +np.float32,0x7110cf,0x7110cf,2 +np.float32,0x3cf709f0,0x3cf6f6c6,2 +np.float32,0x3ef58da4,0x3ee44fa7,2 +np.float32,0xbf220ff2,0xbf0f662c,2 +np.float32,0xfd888078,0xbf800000,2 +np.float32,0xbe324734,0xbe307f9b,2 +np.float32,0x3eb5cb4f,0x3eae8560,2 +np.float32,0xbf7e7d02,0xbf425493,2 +np.float32,0x3ddcdcf0,0x3ddc02c2,2 +np.float32,0x8026d27a,0x8026d27a,2 +np.float32,0x3d4c0fb1,0x3d4be484,2 +np.float32,0xbf27d2c9,0xbf134d7c,2 +np.float32,0x8029ff80,0x8029ff80,2 +np.float32,0x7f046d2c,0x3f800000,2 +np.float32,0x13f94b,0x13f94b,2 +np.float32,0x7f4ff922,0x3f800000,2 +np.float32,0x3f4ea2ed,0x3f2b03e4,2 +np.float32,0x3e7211f0,0x3e6da8cf,2 +np.float32,0x7f39d0cf,0x3f800000,2 +np.float32,0xfee57fc6,0xbf800000,2 +np.float32,0xff6fb326,0xbf800000,2 +np.float32,0xff800000,0xbf800000,2 +np.float32,0x3f0437a4,0x3ef32fcd,2 +np.float32,0xff546d1e,0xbf800000,2 +np.float32,0x3eb5645b,0x3eae2a5c,2 +np.float32,0x3f08a6e5,0x3ef9ff8f,2 +np.float32,0x80800000,0x80800000,2 +np.float32,0x7f3413da,0x3f800000,2 +np.float32,0xfd760140,0xbf800000,2 +np.float32,0x7f3ad24a,0x3f800000,2 +np.float32,0xbf56e812,0xbf2f7f14,2 +np.float32,0xbece0338,0xbec3920a,2 +np.float32,0xbeede54a,0xbede22ae,2 +np.float32,0x7eaeb215,0x3f800000,2 +np.float32,0x3c213c00,0x3c213aab,2 +np.float32,0x7eaac217,0x3f800000,2 +np.float32,0xbf2f740e,0xbf1851a6,2 +np.float32,0x7f6ca5b8,0x3f800000,2 +np.float32,0xff42ce95,0xbf800000,2 +np.float32,0x802e4189,0x802e4189,2 +np.float32,0x80000001,0x80000001,2 +np.float32,0xbf31f298,0xbf19ebbe,2 +np.float32,0x3dcb0e6c,0x3dca64c1,2 +np.float32,0xbf29599c,0xbf145204,2 +np.float32,0x2e33f2,0x2e33f2,2 +np.float32,0x1c11e7,0x1c11e7,2 +np.float32,0x3f3b188d,0x3f1fa302,2 +np.float32,0x113300,0x113300,2 +np.float32,0x8054589e,0x8054589e,2 +np.float32,0x2a9e69,0x2a9e69,2 +np.float32,0xff513af7,0xbf800000,2 +np.float32,0x7f2e987a,0x3f800000,2 +np.float32,0x807cd426,0x807cd426,2 +np.float32,0x7f0dc4e4,0x3f800000,2 +np.float32,0x7e7c0d56,0x3f800000,2 +np.float32,0x5cb076,0x5cb076,2 +np.float32,0x80576426,0x80576426,2 +np.float32,0xff616222,0xbf800000,2 +np.float32,0xbf7accb5,0xbf40c005,2 +np.float32,0xfe4118c8,0xbf800000,2 +np.float32,0x804b9327,0x804b9327,2 +np.float32,0x3ed2b428,0x3ec79026,2 +np.float32,0x3f4a048f,0x3f286d41,2 +np.float32,0x800000,0x800000,2 +np.float32,0x7efceb9f,0x3f800000,2 +np.float32,0xbf5fe2d3,0xbf34246f,2 +np.float32,0x807e086a,0x807e086a,2 +np.float32,0x7ef5e856,0x3f800000,2 +np.float32,0xfc546f00,0xbf800000,2 +np.float32,0x3a65b890,0x3a65b88c,2 +np.float32,0x800cfa70,0x800cfa70,2 +np.float32,0x80672ea7,0x80672ea7,2 +np.float32,0x3f2bf3f2,0x3f160a12,2 +np.float32,0xbf0ab67e,0xbefd2004,2 +np.float32,0x3f2a0bb4,0x3f14c824,2 +np.float32,0xbeff5374,0xbeec12d7,2 +np.float32,0xbf221b58,0xbf0f6dff,2 +np.float32,0x7cc1f3,0x7cc1f3,2 +np.float32,0x7f234e3c,0x3f800000,2 +np.float32,0x3f60ff10,0x3f34b37d,2 +np.float32,0xbdd957f0,0xbdd887fe,2 +np.float32,0x801ce048,0x801ce048,2 +np.float32,0x7f3a8f76,0x3f800000,2 +np.float32,0xfdd13d08,0xbf800000,2 +np.float32,0x3e9af4a4,0x3e966445,2 +np.float32,0x1e55f3,0x1e55f3,2 +np.float32,0x327905,0x327905,2 +np.float32,0xbf03cf0b,0xbef28dad,2 +np.float32,0x3f0223d3,0x3eeff4f4,2 +np.float32,0xfdd96ff8,0xbf800000,2 +np.float32,0x428db8,0x428db8,2 +np.float32,0xbd74a200,0xbd7457a5,2 +np.float32,0x2a63a3,0x2a63a3,2 +np.float32,0x7e8aa9d7,0x3f800000,2 +np.float32,0x7f50b810,0x3f800000,2 +np.float32,0xbce5ec80,0xbce5dd0d,2 +np.float32,0x54711,0x54711,2 +np.float32,0x8074212a,0x8074212a,2 +np.float32,0xbf13d0ec,0xbf0551b5,2 +np.float32,0x80217f89,0x80217f89,2 +np.float32,0x3f300824,0x3f18b12f,2 +np.float32,0x7d252462,0x3f800000,2 +np.float32,0x807a154c,0x807a154c,2 +np.float32,0x8064d4b9,0x8064d4b9,2 +np.float32,0x804543b4,0x804543b4,2 +np.float32,0x4c269e,0x4c269e,2 +np.float32,0xff39823b,0xbf800000,2 +np.float32,0x3f5040b1,0x3f2be80b,2 +np.float32,0xbf7028c1,0xbf3bfee5,2 +np.float32,0x3e94eb78,0x3e90db93,2 +np.float32,0x3ccc1b40,0x3ccc1071,2 +np.float32,0xbe8796f0,0xbe8481a1,2 +np.float32,0xfc767bc0,0xbf800000,2 +np.float32,0xbdd81ed0,0xbdd75259,2 +np.float32,0xbed31bfc,0xbec7e82d,2 +np.float32,0xbf350a9e,0xbf1be1c6,2 +np.float32,0x33d41f,0x33d41f,2 +np.float32,0x3f73e076,0x3f3db0b5,2 +np.float32,0x3f800000,0x3f42f7d6,2 +np.float32,0xfee27c14,0xbf800000,2 +np.float32,0x7f6e4388,0x3f800000,2 +np.float32,0x4ea19b,0x4ea19b,2 +np.float32,0xff2d75f2,0xbf800000,2 +np.float32,0x7ee225ca,0x3f800000,2 +np.float32,0x3f31cb4b,0x3f19d2a4,2 +np.float32,0x80554a9d,0x80554a9d,2 +np.float32,0x3f4d57fa,0x3f2a4c03,2 +np.float32,0x3eac6a88,0x3ea62e72,2 +np.float32,0x773520,0x773520,2 +np.float32,0x8079c20a,0x8079c20a,2 +np.float32,0xfeb1eb94,0xbf800000,2 +np.float32,0xfe8d81c0,0xbf800000,2 +np.float32,0xfeed6902,0xbf800000,2 +np.float32,0x8066bb65,0x8066bb65,2 +np.float32,0x7f800000,0x3f800000,2 +np.float32,0x1,0x1,2 +np.float32,0x3f2c66a4,0x3f16554a,2 +np.float32,0x3cd231,0x3cd231,2 +np.float32,0x3e932a64,0x3e8f3e0c,2 +np.float32,0xbf3ab1c3,0xbf1f6420,2 +np.float32,0xbc902b20,0xbc902751,2 +np.float32,0x7dac0a5b,0x3f800000,2 +np.float32,0x3f2b7e06,0x3f15bc93,2 +np.float32,0x75de0,0x75de0,2 +np.float32,0x8020b7bc,0x8020b7bc,2 +np.float32,0x3f257cda,0x3f11bb6b,2 +np.float32,0x807480e5,0x807480e5,2 +np.float32,0xfe00d758,0xbf800000,2 +np.float32,0xbd9b54e0,0xbd9b08cd,2 +np.float32,0x4dfbe3,0x4dfbe3,2 +np.float32,0xff645788,0xbf800000,2 +np.float32,0xbe92c80a,0xbe8ee360,2 +np.float32,0x3eb9b400,0x3eb1f77c,2 +np.float32,0xff20b69c,0xbf800000,2 +np.float32,0x623c28,0x623c28,2 +np.float32,0xff235748,0xbf800000,2 +np.float32,0xbf3bbc56,0xbf2006f3,2 +np.float32,0x7e6f78b1,0x3f800000,2 +np.float32,0x7e1584e9,0x3f800000,2 +np.float32,0xff463423,0xbf800000,2 +np.float32,0x8002861e,0x8002861e,2 +np.float32,0xbf0491d8,0xbef3bb6a,2 +np.float32,0x7ea3bc17,0x3f800000,2 +np.float32,0xbedde7ea,0xbed0fb49,2 +np.float32,0xbf4bac48,0xbf295c8b,2 +np.float32,0xff28e276,0xbf800000,2 +np.float32,0x7e8f3bf5,0x3f800000,2 +np.float32,0xbf0a4a73,0xbefc7c9d,2 +np.float32,0x7ec5bd96,0x3f800000,2 +np.float32,0xbf4c22e8,0xbf299f2c,2 +np.float32,0x3e3970a0,0x3e377064,2 +np.float32,0x3ecb1118,0x3ec10c88,2 +np.float32,0xff548a7a,0xbf800000,2 +np.float32,0xfe8ec550,0xbf800000,2 +np.float32,0x3e158985,0x3e147bb2,2 +np.float32,0x7eb79ad7,0x3f800000,2 +np.float32,0xbe811384,0xbe7cd1ab,2 +np.float32,0xbdc4b9e8,0xbdc41f94,2 +np.float32,0xe0fd5,0xe0fd5,2 +np.float32,0x3f2485f2,0x3f11142b,2 +np.float32,0xfdd3c3d8,0xbf800000,2 +np.float32,0xfe8458e6,0xbf800000,2 +np.float32,0x3f06e398,0x3ef74dd8,2 +np.float32,0xff4752cf,0xbf800000,2 +np.float32,0x6998e3,0x6998e3,2 +np.float32,0x626751,0x626751,2 +np.float32,0x806631d6,0x806631d6,2 +np.float32,0xbf0c3cf4,0xbeff6c54,2 +np.float32,0x802860f8,0x802860f8,2 +np.float32,0xff2952cb,0xbf800000,2 +np.float32,0xff31d40b,0xbf800000,2 +np.float32,0x7c389473,0x3f800000,2 +np.float32,0x3dcd2f1b,0x3dcc8010,2 +np.float32,0x3d70c29f,0x3d707bbc,2 +np.float32,0x3f6bd386,0x3f39f979,2 +np.float32,0x1efec9,0x1efec9,2 +np.float32,0x3f675518,0x3f37d338,2 +np.float32,0x5fdbe3,0x5fdbe3,2 +np.float32,0x5d684e,0x5d684e,2 +np.float32,0xbedfe748,0xbed2a4c7,2 +np.float32,0x3f0cb07a,0x3f000cdc,2 +np.float32,0xbf77151e,0xbf3f1f5d,2 +np.float32,0x7f038ea0,0x3f800000,2 +np.float32,0x3ea91be9,0x3ea3376f,2 +np.float32,0xbdf20738,0xbdf0e861,2 +np.float32,0x807ea380,0x807ea380,2 +np.float32,0x2760ca,0x2760ca,2 +np.float32,0x7f20a544,0x3f800000,2 +np.float32,0x76ed83,0x76ed83,2 +np.float32,0x15a441,0x15a441,2 +np.float32,0x74c76d,0x74c76d,2 +np.float32,0xff3d5c2a,0xbf800000,2 +np.float32,0x7f6a76a6,0x3f800000,2 +np.float32,0x3eb87067,0x3eb0dabe,2 +np.float32,0xbf515cfa,0xbf2c83af,2 +np.float32,0xbdececc0,0xbdebdf9d,2 +np.float32,0x7f51b7c2,0x3f800000,2 +np.float32,0x3eb867ac,0x3eb0d30d,2 +np.float32,0xff50fd84,0xbf800000,2 +np.float32,0x806945e9,0x806945e9,2 +np.float32,0x298eed,0x298eed,2 +np.float32,0x441f53,0x441f53,2 +np.float32,0x8066d4b0,0x8066d4b0,2 +np.float32,0x3f6a479c,0x3f393dae,2 +np.float32,0xbf6ce2a7,0xbf3a7921,2 +np.float32,0x8064c3cf,0x8064c3cf,2 +np.float32,0xbf2d8146,0xbf170dfd,2 +np.float32,0x3b0e82,0x3b0e82,2 +np.float32,0xbea97574,0xbea387dc,2 +np.float32,0x67ad15,0x67ad15,2 +np.float32,0xbf68478f,0xbf38485a,2 +np.float32,0xff6f593b,0xbf800000,2 +np.float32,0xbeda26f2,0xbecdd806,2 +np.float32,0xbd216d50,0xbd2157ee,2 +np.float32,0x7a8544db,0x3f800000,2 +np.float32,0x801df20b,0x801df20b,2 +np.float32,0xbe14ba24,0xbe13b0a8,2 +np.float32,0xfdc6d8a8,0xbf800000,2 +np.float32,0x1d6b49,0x1d6b49,2 +np.float32,0x7f5ff1b8,0x3f800000,2 +np.float32,0x3f75e032,0x3f3e9625,2 +np.float32,0x7f2c5687,0x3f800000,2 +np.float32,0x3d95fb6c,0x3d95b6ee,2 +np.float32,0xbea515e4,0xbe9f97c8,2 +np.float32,0x7f2b2cd7,0x3f800000,2 +np.float32,0x3f076f7a,0x3ef8241e,2 +np.float32,0x5178ca,0x5178ca,2 +np.float32,0xbeb5976a,0xbeae5781,2 +np.float32,0x3e3c3563,0x3e3a1e13,2 +np.float32,0xbd208530,0xbd20702a,2 +np.float32,0x3eb03b04,0x3ea995ef,2 +np.float32,0x17fb9c,0x17fb9c,2 +np.float32,0xfca68e40,0xbf800000,2 +np.float32,0xbf5e7433,0xbf336a9f,2 +np.float32,0xff5b8d3d,0xbf800000,2 +np.float32,0x8003121d,0x8003121d,2 +np.float32,0xbe6dd344,0xbe69a3b0,2 +np.float32,0x67cc4,0x67cc4,2 +np.float32,0x9b01d,0x9b01d,2 +np.float32,0x127c13,0x127c13,2 +np.float32,0xfea5e3d6,0xbf800000,2 +np.float32,0xbdf5c610,0xbdf499c1,2 +np.float32,0x3aff4c00,0x3aff4beb,2 +np.float32,0x3b00afd0,0x3b00afc5,2 +np.float32,0x479618,0x479618,2 +np.float32,0x801cbd05,0x801cbd05,2 +np.float32,0x3ec9249f,0x3ebf6579,2 +np.float32,0x3535c4,0x3535c4,2 +np.float32,0xbeb4f662,0xbeadc915,2 +np.float32,0x8006fda6,0x8006fda6,2 +np.float32,0xbf4f3097,0xbf2b5239,2 +np.float32,0xbf3cb9a8,0xbf20a0e9,2 +np.float32,0x32ced0,0x32ced0,2 +np.float32,0x7ea34e76,0x3f800000,2 +np.float32,0x80063046,0x80063046,2 +np.float32,0x80727e8b,0x80727e8b,2 +np.float32,0xfd6b5780,0xbf800000,2 +np.float32,0x80109815,0x80109815,2 +np.float32,0xfdcc8a78,0xbf800000,2 +np.float32,0x81562,0x81562,2 +np.float32,0x803dfacc,0x803dfacc,2 +np.float32,0xbe204318,0xbe1ef75f,2 +np.float32,0xbf745d34,0xbf3de8e2,2 +np.float32,0xff13fdcc,0xbf800000,2 +np.float32,0x7f75ba8c,0x3f800000,2 +np.float32,0x806c04b4,0x806c04b4,2 +np.float32,0x3ec61ca6,0x3ebcc877,2 +np.float32,0xbeaea984,0xbea8301f,2 +np.float32,0xbf4dcd0e,0xbf2a8d34,2 +np.float32,0x802a01d3,0x802a01d3,2 +np.float32,0xbf747be5,0xbf3df6ad,2 +np.float32,0xbf75cbd2,0xbf3e8d0f,2 +np.float32,0x7db86576,0x3f800000,2 +np.float32,0xff49a2c3,0xbf800000,2 +np.float32,0xbedc5314,0xbecfa978,2 +np.float32,0x8078877b,0x8078877b,2 +np.float32,0xbead4824,0xbea6f499,2 +np.float32,0xbf3926e3,0xbf1e716c,2 +np.float32,0x807f4a1c,0x807f4a1c,2 +np.float32,0x7f2cd8fd,0x3f800000,2 +np.float32,0x806cfcca,0x806cfcca,2 +np.float32,0xff1aa048,0xbf800000,2 +np.float32,0x7eb9ea08,0x3f800000,2 +np.float32,0xbf1034bc,0xbf02ab3a,2 +np.float32,0xbd087830,0xbd086b44,2 +np.float32,0x7e071034,0x3f800000,2 +np.float32,0xbefcc9de,0xbeea122f,2 +np.float32,0x80796d7a,0x80796d7a,2 +np.float32,0x33ce46,0x33ce46,2 +np.float32,0x8074a783,0x8074a783,2 +np.float32,0xbe95a56a,0xbe918691,2 +np.float32,0xbf2ff3f4,0xbf18a42d,2 +np.float32,0x1633e9,0x1633e9,2 +np.float32,0x7f0f104b,0x3f800000,2 +np.float32,0xbf800000,0xbf42f7d6,2 +np.float32,0x3d2cd6,0x3d2cd6,2 +np.float32,0xfed43e16,0xbf800000,2 +np.float32,0x3ee6faec,0x3ed87d2c,2 +np.float32,0x3f2c32d0,0x3f163352,2 +np.float32,0xff4290c0,0xbf800000,2 +np.float32,0xbf66500e,0xbf37546a,2 +np.float32,0x7dfb8fe3,0x3f800000,2 +np.float32,0x3f20ba5d,0x3f0e7b16,2 +np.float32,0xff30c7ae,0xbf800000,2 +np.float32,0x1728a4,0x1728a4,2 +np.float32,0x340d82,0x340d82,2 +np.float32,0xff7870b7,0xbf800000,2 +np.float32,0xbeac6ac4,0xbea62ea7,2 +np.float32,0xbef936fc,0xbee73c36,2 +np.float32,0x3ec7e12c,0x3ebe4ef8,2 +np.float32,0x80673488,0x80673488,2 +np.float32,0xfdf14c90,0xbf800000,2 +np.float32,0x3f182568,0x3f08726e,2 +np.float32,0x7ed7dcd0,0x3f800000,2 +np.float32,0x3de4da34,0x3de3e790,2 +np.float32,0xff7fffff,0xbf800000,2 +np.float32,0x4ff90c,0x4ff90c,2 +np.float32,0x3efb0d1c,0x3ee8b1d6,2 +np.float32,0xbf66e952,0xbf379ef4,2 +np.float32,0xba9dc,0xba9dc,2 +np.float32,0xff67c766,0xbf800000,2 +np.float32,0x7f1ffc29,0x3f800000,2 +np.float32,0x3f51c906,0x3f2cbe99,2 +np.float32,0x3f2e5792,0x3f179968,2 +np.float32,0x3ecb9750,0x3ec17fa0,2 +np.float32,0x7f3fcefc,0x3f800000,2 +np.float32,0xbe4e30fc,0xbe4b72f9,2 +np.float32,0x7e9bc4ce,0x3f800000,2 +np.float32,0x7e70aa1f,0x3f800000,2 +np.float32,0x14c6e9,0x14c6e9,2 +np.float32,0xbcf327c0,0xbcf3157a,2 +np.float32,0xff1fd204,0xbf800000,2 +np.float32,0x7d934a03,0x3f800000,2 +np.float32,0x8028bf1e,0x8028bf1e,2 +np.float32,0x7f0800b7,0x3f800000,2 +np.float32,0xfe04825c,0xbf800000,2 +np.float32,0x807210ac,0x807210ac,2 +np.float32,0x3f7faf7c,0x3f42d5fd,2 +np.float32,0x3e04a543,0x3e03e899,2 +np.float32,0x3e98ea15,0x3e94863e,2 +np.float32,0x3d2a2e48,0x3d2a153b,2 +np.float32,0x7fa00000,0x7fe00000,2 +np.float32,0x20a488,0x20a488,2 +np.float32,0x3f6ba86a,0x3f39e51a,2 +np.float32,0x0,0x0,2 +np.float32,0x3e892ddd,0x3e85fcfe,2 +np.float32,0x3e2da627,0x3e2c00e0,2 +np.float32,0xff000a50,0xbf800000,2 +np.float32,0x3eb749f4,0x3eafd739,2 +np.float32,0x8024c0ae,0x8024c0ae,2 +np.float32,0xfc8f3b40,0xbf800000,2 +np.float32,0xbf685fc7,0xbf385405,2 +np.float32,0x3f1510e6,0x3f063a4f,2 +np.float32,0x3f68e8ad,0x3f3895d8,2 +np.float32,0x3dba8608,0x3dba0271,2 +np.float32,0xbf16ea10,0xbf079017,2 +np.float32,0xb3928,0xb3928,2 +np.float32,0xfe447c00,0xbf800000,2 +np.float32,0x3db9cd57,0x3db94b45,2 +np.float32,0x803b66b0,0x803b66b0,2 +np.float32,0x805b5e02,0x805b5e02,2 +np.float32,0x7ec93f61,0x3f800000,2 +np.float32,0x8005a126,0x8005a126,2 +np.float32,0x6d8888,0x6d8888,2 +np.float32,0x3e21b7de,0x3e206314,2 +np.float32,0xbec9c31e,0xbebfedc2,2 +np.float32,0xbea88aa8,0xbea2b4e5,2 +np.float32,0x3d8fc310,0x3d8f86bb,2 +np.float32,0xbf3cc68a,0xbf20a8b8,2 +np.float32,0x432690,0x432690,2 +np.float32,0xbe51d514,0xbe4ef1a3,2 +np.float32,0xbcda6d20,0xbcda5fe1,2 +np.float32,0xfe24e458,0xbf800000,2 +np.float32,0xfedc8c14,0xbf800000,2 +np.float32,0x7f7e9bd4,0x3f800000,2 +np.float32,0x3ebcc880,0x3eb4ab44,2 +np.float32,0xbe0aa490,0xbe09cd44,2 +np.float32,0x3dc9158c,0x3dc870c3,2 +np.float32,0x3e5c319e,0x3e58dc90,2 +np.float32,0x1d4527,0x1d4527,2 +np.float32,0x2dbf5,0x2dbf5,2 +np.float32,0xbf1f121f,0xbf0d5534,2 +np.float32,0x7e3e9ab5,0x3f800000,2 +np.float32,0x7f74b5c1,0x3f800000,2 +np.float32,0xbf6321ba,0xbf35c42b,2 +np.float32,0xbe5c7488,0xbe591c79,2 +np.float32,0x7e7b02cd,0x3f800000,2 +np.float32,0xfe7cbfa4,0xbf800000,2 +np.float32,0xbeace360,0xbea69a86,2 +np.float32,0x7e149b00,0x3f800000,2 +np.float32,0xbf61a700,0xbf35079a,2 +np.float32,0x7eb592a7,0x3f800000,2 +np.float32,0x3f2105e6,0x3f0eaf30,2 +np.float32,0xfd997a88,0xbf800000,2 +np.float32,0xff5d093b,0xbf800000,2 +np.float32,0x63aede,0x63aede,2 +np.float32,0x6907ee,0x6907ee,2 +np.float32,0xbf7578ee,0xbf3e680f,2 +np.float32,0xfea971e8,0xbf800000,2 +np.float32,0x3f21d0f5,0x3f0f3aed,2 +np.float32,0x3a50e2,0x3a50e2,2 +np.float32,0x7f0f5b1e,0x3f800000,2 +np.float32,0x805b9765,0x805b9765,2 +np.float32,0xbe764ab8,0xbe71a664,2 +np.float32,0x3eafac7f,0x3ea91701,2 +np.float32,0x807f4130,0x807f4130,2 +np.float32,0x7c5f31,0x7c5f31,2 +np.float32,0xbdbe0e30,0xbdbd8300,2 +np.float32,0x7ecfe4e0,0x3f800000,2 +np.float32,0xff7cb628,0xbf800000,2 +np.float32,0xff1842bc,0xbf800000,2 +np.float32,0xfd4163c0,0xbf800000,2 +np.float32,0x800e11f7,0x800e11f7,2 +np.float32,0x7f3adec8,0x3f800000,2 +np.float32,0x7f597514,0x3f800000,2 +np.float32,0xbe986e14,0xbe9414a4,2 +np.float32,0x800fa9d7,0x800fa9d7,2 +np.float32,0xff5b79c4,0xbf800000,2 +np.float32,0x80070565,0x80070565,2 +np.float32,0xbee5628e,0xbed72d60,2 +np.float32,0x3f438ef2,0x3f24b3ca,2 +np.float32,0xcda91,0xcda91,2 +np.float32,0x7e64151a,0x3f800000,2 +np.float32,0xbe95d584,0xbe91b2c7,2 +np.float32,0x8022c2a1,0x8022c2a1,2 +np.float32,0x7e7097bf,0x3f800000,2 +np.float32,0x80139035,0x80139035,2 +np.float32,0x804de2cb,0x804de2cb,2 +np.float32,0xfde5d178,0xbf800000,2 +np.float32,0x6d238,0x6d238,2 +np.float32,0x807abedc,0x807abedc,2 +np.float32,0x3f450a12,0x3f259129,2 +np.float32,0x3ef1c120,0x3ee141f2,2 +np.float32,0xfeb64dae,0xbf800000,2 +np.float32,0x8001732c,0x8001732c,2 +np.float32,0x3f76062e,0x3f3ea711,2 +np.float32,0x3eddd550,0x3ed0ebc8,2 +np.float32,0xff5ca1d4,0xbf800000,2 +np.float32,0xbf49dc5e,0xbf285673,2 +np.float32,0x7e9e5438,0x3f800000,2 +np.float32,0x7e83625e,0x3f800000,2 +np.float32,0x3f5dc41c,0x3f3310da,2 +np.float32,0x3f583efa,0x3f30342f,2 +np.float32,0xbe26bf88,0xbe254a2d,2 +np.float32,0xff1e0beb,0xbf800000,2 +np.float32,0xbe2244c8,0xbe20ec86,2 +np.float32,0xff0b1630,0xbf800000,2 +np.float32,0xff338dd6,0xbf800000,2 +np.float32,0x3eafc22c,0x3ea92a51,2 +np.float32,0x800ea07f,0x800ea07f,2 +np.float32,0x3f46f006,0x3f26aa7e,2 +np.float32,0x3e5f57cd,0x3e5bde16,2 +np.float32,0xbf1b2d8e,0xbf0a9a93,2 +np.float32,0xfeacdbe0,0xbf800000,2 +np.float32,0x7e5ea4bc,0x3f800000,2 +np.float32,0xbf51cbe2,0xbf2cc027,2 +np.float32,0x8073644c,0x8073644c,2 +np.float32,0xff2d6bfe,0xbf800000,2 +np.float32,0x3f65f0f6,0x3f37260a,2 +np.float32,0xff4b37a6,0xbf800000,2 +np.float32,0x712df7,0x712df7,2 +np.float32,0x7f71ef17,0x3f800000,2 +np.float32,0x8042245c,0x8042245c,2 +np.float32,0x3e5dde7b,0x3e5a760d,2 +np.float32,0x8069317d,0x8069317d,2 +np.float32,0x807932dd,0x807932dd,2 +np.float32,0x802f847e,0x802f847e,2 +np.float32,0x7e9300,0x7e9300,2 +np.float32,0x8040b4ab,0x8040b4ab,2 +np.float32,0xff76ef8e,0xbf800000,2 +np.float32,0x4aae3a,0x4aae3a,2 +np.float32,0x8058de73,0x8058de73,2 +np.float32,0x7e4d58c0,0x3f800000,2 +np.float32,0x3d811b30,0x3d80ef79,2 +np.float32,0x7ec952cc,0x3f800000,2 +np.float32,0xfe162b1c,0xbf800000,2 +np.float32,0x3f0f1187,0x3f01d367,2 +np.float32,0xbf2f3458,0xbf182878,2 +np.float32,0x5ceb14,0x5ceb14,2 +np.float32,0xbec29476,0xbeb9b939,2 +np.float32,0x3e71f943,0x3e6d9176,2 +np.float32,0x3ededefc,0x3ed1c909,2 +np.float32,0x805df6ac,0x805df6ac,2 +np.float32,0x3e5ae2c8,0x3e579ca8,2 +np.float32,0x3f6ad2c3,0x3f397fdf,2 +np.float32,0x7d5f94d3,0x3f800000,2 +np.float32,0xbeec7fe4,0xbedd0037,2 +np.float32,0x3f645304,0x3f365b0d,2 +np.float32,0xbf69a087,0xbf38edef,2 +np.float32,0x8025102e,0x8025102e,2 +np.float32,0x800db486,0x800db486,2 +np.float32,0x4df6c7,0x4df6c7,2 +np.float32,0x806d8cdd,0x806d8cdd,2 +np.float32,0x7f0c78cc,0x3f800000,2 +np.float32,0x7e1cf70b,0x3f800000,2 +np.float32,0x3e0ae570,0x3e0a0cf7,2 +np.float32,0x80176ef8,0x80176ef8,2 +np.float32,0x3f38b60c,0x3f1e2bbb,2 +np.float32,0x3d3071e0,0x3d3055f5,2 +np.float32,0x3ebfcfdd,0x3eb750a9,2 +np.float32,0xfe2cdec0,0xbf800000,2 +np.float32,0x7eeb2eed,0x3f800000,2 +np.float32,0x8026c904,0x8026c904,2 +np.float32,0xbec79bde,0xbebe133a,2 +np.float32,0xbf7dfab6,0xbf421d47,2 +np.float32,0x805b3cfd,0x805b3cfd,2 +np.float32,0xfdfcfb68,0xbf800000,2 +np.float32,0xbd537ec0,0xbd534eaf,2 +np.float32,0x52ce73,0x52ce73,2 +np.float32,0xfeac6ea6,0xbf800000,2 +np.float32,0x3f2c2990,0x3f162d41,2 +np.float32,0x3e3354e0,0x3e318539,2 +np.float32,0x802db22b,0x802db22b,2 +np.float32,0x7f0faa83,0x3f800000,2 +np.float32,0x7f10e161,0x3f800000,2 +np.float32,0x7f165c60,0x3f800000,2 +np.float32,0xbf5a756f,0xbf315c82,2 +np.float32,0x7f5a4b68,0x3f800000,2 +np.float32,0xbd77fbf0,0xbd77ae7c,2 +np.float32,0x65d83c,0x65d83c,2 +np.float32,0x3e5f28,0x3e5f28,2 +np.float32,0x8040ec92,0x8040ec92,2 +np.float32,0xbf2b41a6,0xbf1594d5,2 +np.float32,0x7f2f88f1,0x3f800000,2 +np.float32,0xfdb64ab8,0xbf800000,2 +np.float32,0xbf7a3ff1,0xbf4082f5,2 +np.float32,0x1948fc,0x1948fc,2 +np.float32,0x802c1039,0x802c1039,2 +np.float32,0x80119274,0x80119274,2 +np.float32,0x7e885d7b,0x3f800000,2 +np.float32,0xfaf6a,0xfaf6a,2 +np.float32,0x3eba28c4,0x3eb25e1d,2 +np.float32,0x3e4df370,0x3e4b37da,2 +np.float32,0xbf19eff6,0xbf09b97d,2 +np.float32,0xbeddd3c6,0xbed0ea7f,2 +np.float32,0xff6fc971,0xbf800000,2 +np.float32,0x7e93de29,0x3f800000,2 +np.float32,0x3eb12332,0x3eaa6485,2 +np.float32,0x3eb7c6e4,0x3eb04563,2 +np.float32,0x4a67ee,0x4a67ee,2 +np.float32,0xff1cafde,0xbf800000,2 +np.float32,0x3f5e2812,0x3f3343da,2 +np.float32,0x3f060e04,0x3ef605d4,2 +np.float32,0x3e9027d8,0x3e8c76a6,2 +np.float32,0xe2d33,0xe2d33,2 +np.float32,0xff4c94fc,0xbf800000,2 +np.float32,0xbf574908,0xbf2fb26b,2 +np.float32,0xbf786c08,0xbf3fb68e,2 +np.float32,0x8011ecab,0x8011ecab,2 +np.float32,0xbf061c6a,0xbef61bfa,2 +np.float32,0x7eea5f9d,0x3f800000,2 +np.float32,0x3ea2e19c,0x3e9d99a5,2 +np.float32,0x8071550c,0x8071550c,2 +np.float32,0x41c70b,0x41c70b,2 +np.float32,0x80291fc8,0x80291fc8,2 +np.float32,0x43b1ec,0x43b1ec,2 +np.float32,0x32f5a,0x32f5a,2 +np.float32,0xbe9310ec,0xbe8f2692,2 +np.float32,0x7f75f6bf,0x3f800000,2 +np.float32,0x3e6642a6,0x3e6274d2,2 +np.float32,0x3ecb88e0,0x3ec1733f,2 +np.float32,0x804011b6,0x804011b6,2 +np.float32,0x80629cca,0x80629cca,2 +np.float32,0x8016b914,0x8016b914,2 +np.float32,0xbdd05fc0,0xbdcfa870,2 +np.float32,0x807b824d,0x807b824d,2 +np.float32,0xfeec2576,0xbf800000,2 +np.float32,0xbf54bf22,0xbf2e584c,2 +np.float32,0xbf185eb0,0xbf089b6b,2 +np.float32,0xfbc09480,0xbf800000,2 +np.float32,0x3f413054,0x3f234e25,2 +np.float32,0x7e9e32b8,0x3f800000,2 +np.float32,0x266296,0x266296,2 +np.float32,0x460284,0x460284,2 +np.float32,0x3eb0b056,0x3ea9fe5a,2 +np.float32,0x1a7be5,0x1a7be5,2 +np.float32,0x7f099895,0x3f800000,2 +np.float32,0x3f3614f0,0x3f1c88ef,2 +np.float32,0x7e757dc2,0x3f800000,2 +np.float32,0x801fc91e,0x801fc91e,2 +np.float32,0x3f5ce37d,0x3f329ddb,2 +np.float32,0x3e664d70,0x3e627f15,2 +np.float32,0xbf38ed78,0xbf1e4dfa,2 +np.float32,0xbf5c563d,0xbf325543,2 +np.float32,0xbe91cc54,0xbe8dfb24,2 +np.float32,0x3d767fbe,0x3d7633ac,2 +np.float32,0xbf6aeb40,0xbf398b7f,2 +np.float32,0x7f40508b,0x3f800000,2 +np.float32,0x2650df,0x2650df,2 +np.float32,0xbe8cea3c,0xbe897628,2 +np.float32,0x80515af8,0x80515af8,2 +np.float32,0x7f423986,0x3f800000,2 +np.float32,0xbdf250e8,0xbdf1310c,2 +np.float32,0xfe89288a,0xbf800000,2 +np.float32,0x397b3b,0x397b3b,2 +np.float32,0x7e5e91b0,0x3f800000,2 +np.float32,0x6866e2,0x6866e2,2 +np.float32,0x7f4d8877,0x3f800000,2 +np.float32,0x3e6c4a21,0x3e682ee3,2 +np.float32,0xfc3d5980,0xbf800000,2 +np.float32,0x7eae2cd0,0x3f800000,2 +np.float32,0xbf241222,0xbf10c579,2 +np.float32,0xfebc02de,0xbf800000,2 +np.float32,0xff6e0645,0xbf800000,2 +np.float32,0x802030b6,0x802030b6,2 +np.float32,0x7ef9a441,0x3f800000,2 +np.float32,0x3fcf9f,0x3fcf9f,2 +np.float32,0xbf0ccf13,0xbf0023cc,2 +np.float32,0xfefee688,0xbf800000,2 +np.float32,0xbf6c8e0c,0xbf3a5160,2 +np.float32,0xfe749c28,0xbf800000,2 +np.float32,0x7f7fffff,0x3f800000,2 +np.float32,0x58c1a0,0x58c1a0,2 +np.float32,0x3f2de0a1,0x3f174c17,2 +np.float32,0xbf5f7138,0xbf33eb03,2 +np.float32,0x3da15270,0x3da0fd3c,2 +np.float32,0x3da66560,0x3da607e4,2 +np.float32,0xbf306f9a,0xbf18f3c6,2 +np.float32,0x3e81a4de,0x3e7de293,2 +np.float32,0xbebb5fb8,0xbeb36f1a,2 +np.float32,0x14bf64,0x14bf64,2 +np.float32,0xbeac46c6,0xbea60e73,2 +np.float32,0xbdcdf210,0xbdcd4111,2 +np.float32,0x3f7e3cd9,0x3f42395e,2 +np.float32,0xbc4be640,0xbc4be38e,2 +np.float32,0xff5f53b4,0xbf800000,2 +np.float32,0xbf1315ae,0xbf04c90b,2 +np.float32,0x80000000,0x80000000,2 +np.float32,0xbf6a4149,0xbf393aaa,2 +np.float32,0x3f66b8ee,0x3f378772,2 +np.float32,0xff29293e,0xbf800000,2 +np.float32,0xbcc989c0,0xbcc97f58,2 +np.float32,0xbd9a1b70,0xbd99d125,2 +np.float32,0xfef353cc,0xbf800000,2 +np.float32,0xbdc30cf0,0xbdc27683,2 +np.float32,0xfdfd6768,0xbf800000,2 +np.float32,0x7ebac44c,0x3f800000,2 +np.float32,0xff453cd6,0xbf800000,2 +np.float32,0x3ef07720,0x3ee03787,2 +np.float32,0x80219c14,0x80219c14,2 +np.float32,0x805553a8,0x805553a8,2 +np.float32,0x80703928,0x80703928,2 +np.float32,0xff16d3a7,0xbf800000,2 +np.float32,0x3f1472bc,0x3f05c77b,2 +np.float32,0x3eeea37a,0x3edebcf9,2 +np.float32,0x3db801e6,0x3db7838d,2 +np.float32,0x800870d2,0x800870d2,2 +np.float32,0xbea1172c,0xbe9bfa32,2 +np.float32,0x3f1f5e7c,0x3f0d8a42,2 +np.float32,0x123cdb,0x123cdb,2 +np.float32,0x7f6e6b06,0x3f800000,2 +np.float32,0x3ed80573,0x3ecc0def,2 +np.float32,0xfea31b82,0xbf800000,2 +np.float32,0x6744e0,0x6744e0,2 +np.float32,0x695e8b,0x695e8b,2 +np.float32,0xbee3888a,0xbed5a67d,2 +np.float32,0x7f64bc2a,0x3f800000,2 +np.float32,0x7f204244,0x3f800000,2 +np.float32,0x7f647102,0x3f800000,2 +np.float32,0x3dd8ebc0,0x3dd81d03,2 +np.float32,0x801e7ab1,0x801e7ab1,2 +np.float32,0x7d034b56,0x3f800000,2 +np.float32,0x7fc00000,0x7fc00000,2 +np.float32,0x80194193,0x80194193,2 +np.float32,0xfe31c8d4,0xbf800000,2 +np.float32,0x7fc0c4,0x7fc0c4,2 +np.float32,0xd95bf,0xd95bf,2 +np.float32,0x7e4f991d,0x3f800000,2 +np.float32,0x7fc563,0x7fc563,2 +np.float32,0xbe3fcccc,0xbe3d968a,2 +np.float32,0xfdaaa1c8,0xbf800000,2 +np.float32,0xbf48e449,0xbf27c949,2 +np.float32,0x3eb6c584,0x3eaf625e,2 +np.float32,0xbea35a74,0xbe9e0702,2 +np.float32,0x3eeab47a,0x3edb89d5,2 +np.float32,0xbed99556,0xbecd5de5,2 +np.float64,0xbfb94a81e0329500,0xbfb935867ba761fe,2 +np.float64,0xbfec132f1678265e,0xbfe6900eb097abc3,2 +np.float64,0x5685ea72ad0be,0x5685ea72ad0be,2 +np.float64,0xbfd74d3169ae9a62,0xbfd652e09b9daf32,2 +np.float64,0xbfe28df53d651bea,0xbfe0b8a7f50ab433,2 +np.float64,0x0,0x0,2 +np.float64,0xbfed912738bb224e,0xbfe749e3732831ae,2 +np.float64,0x7fcc6faed838df5d,0x3ff0000000000000,2 +np.float64,0xbfe95fe9a432bfd3,0xbfe51f6349919910,2 +np.float64,0xbfc4d5900b29ab20,0xbfc4a6f496179b8b,2 +np.float64,0xbfcd6025033ac04c,0xbfccded7b34b49b0,2 +np.float64,0xbfdfa655b43f4cac,0xbfdd4ca1e5bb9db8,2 +np.float64,0xe7ea5c7fcfd4c,0xe7ea5c7fcfd4c,2 +np.float64,0xffa5449ca42a8940,0xbff0000000000000,2 +np.float64,0xffe63294c1ac6529,0xbff0000000000000,2 +np.float64,0x7feb9cbae7f73975,0x3ff0000000000000,2 +np.float64,0x800eb07c3e3d60f9,0x800eb07c3e3d60f9,2 +np.float64,0x3fc95777e932aef0,0x3fc9040391e20c00,2 +np.float64,0x800736052dee6c0b,0x800736052dee6c0b,2 +np.float64,0x3fe9ae4afd335c96,0x3fe54b569bab45c7,2 +np.float64,0x7fee4c94217c9927,0x3ff0000000000000,2 +np.float64,0x80094b594bd296b3,0x80094b594bd296b3,2 +np.float64,0xffe5adbcee6b5b7a,0xbff0000000000000,2 +np.float64,0x3fecb8eab47971d5,0x3fe6e236be6f27e9,2 +np.float64,0x44956914892ae,0x44956914892ae,2 +np.float64,0xbfe3bd18ef677a32,0xbfe190bf1e07200c,2 +np.float64,0x800104e5b46209cc,0x800104e5b46209cc,2 +np.float64,0x8008fbcecf71f79e,0x8008fbcecf71f79e,2 +np.float64,0x800f0a46a0be148d,0x800f0a46a0be148d,2 +np.float64,0x7fe657a0702caf40,0x3ff0000000000000,2 +np.float64,0xffd3ff1a9027fe36,0xbff0000000000000,2 +np.float64,0x3fe78bc87bef1790,0x3fe40d2e63aaf029,2 +np.float64,0x7feeabdc4c7d57b8,0x3ff0000000000000,2 +np.float64,0xbfabd28d8437a520,0xbfabcb8ce03a0e56,2 +np.float64,0xbfddc3a133bb8742,0xbfdbc9fdb2594451,2 +np.float64,0x7fec911565b9222a,0x3ff0000000000000,2 +np.float64,0x71302604e2605,0x71302604e2605,2 +np.float64,0xee919d2bdd234,0xee919d2bdd234,2 +np.float64,0xbfc04fcff3209fa0,0xbfc0395a739a2ce4,2 +np.float64,0xffe4668a36e8cd14,0xbff0000000000000,2 +np.float64,0xbfeeafeebefd5fde,0xbfe7cd5f3d61a3ec,2 +np.float64,0x7fddb34219bb6683,0x3ff0000000000000,2 +np.float64,0xbfd2cac6cba5958e,0xbfd24520abb2ff36,2 +np.float64,0xbfb857e49630afc8,0xbfb8452d5064dec2,2 +np.float64,0x3fd2dbf90b25b7f2,0x3fd254eaf48484c2,2 +np.float64,0x800af65c94f5ecba,0x800af65c94f5ecba,2 +np.float64,0xa0eef4bf41ddf,0xa0eef4bf41ddf,2 +np.float64,0xffd8e0a4adb1c14a,0xbff0000000000000,2 +np.float64,0xffe858f6e870b1ed,0xbff0000000000000,2 +np.float64,0x3f94c2c308298580,0x3f94c208a4bb006d,2 +np.float64,0xffb45f0d7428be18,0xbff0000000000000,2 +np.float64,0x800ed4f43dbda9e9,0x800ed4f43dbda9e9,2 +np.float64,0x8002dd697e85bad4,0x8002dd697e85bad4,2 +np.float64,0x787ceab2f0f9e,0x787ceab2f0f9e,2 +np.float64,0xbfdff5fcc2bfebfa,0xbfdd8b736b128589,2 +np.float64,0x7fdb2b4294365684,0x3ff0000000000000,2 +np.float64,0xffe711e5e92e23cc,0xbff0000000000000,2 +np.float64,0x800b1c93f1163928,0x800b1c93f1163928,2 +np.float64,0x7fc524d2f22a49a5,0x3ff0000000000000,2 +np.float64,0x7fc88013b5310026,0x3ff0000000000000,2 +np.float64,0x3fe1a910c5e35222,0x3fe00fd779ebaa2a,2 +np.float64,0xbfb57ec9ca2afd90,0xbfb571e47ecb9335,2 +np.float64,0x7fd7594b20aeb295,0x3ff0000000000000,2 +np.float64,0x7fba4641ca348c83,0x3ff0000000000000,2 +np.float64,0xffe61393706c2726,0xbff0000000000000,2 +np.float64,0x7fd54f3c7baa9e78,0x3ff0000000000000,2 +np.float64,0xffe65ffb12ecbff6,0xbff0000000000000,2 +np.float64,0xbfba3b0376347608,0xbfba239cbbbd1b11,2 +np.float64,0x800200886d640112,0x800200886d640112,2 +np.float64,0xbfecf0ba4679e174,0xbfe6fd59de44a3ec,2 +np.float64,0xffe5c57e122b8afc,0xbff0000000000000,2 +np.float64,0x7fdaad0143355a02,0x3ff0000000000000,2 +np.float64,0x46ab32c08d567,0x46ab32c08d567,2 +np.float64,0x7ff8000000000000,0x7ff8000000000000,2 +np.float64,0xbfda7980fdb4f302,0xbfd90fa9c8066109,2 +np.float64,0x3fe237703c646ee0,0x3fe07969f8d8805a,2 +np.float64,0x8000e9fcfc21d3fb,0x8000e9fcfc21d3fb,2 +np.float64,0xbfdfe6e958bfcdd2,0xbfdd7f952fe87770,2 +np.float64,0xbd7baf217af8,0xbd7baf217af8,2 +np.float64,0xbfceba9e4b3d753c,0xbfce26e54359869a,2 +np.float64,0xb95a2caf72b46,0xb95a2caf72b46,2 +np.float64,0x3fb407e25a280fc5,0x3fb3fd71e457b628,2 +np.float64,0xa1da09d943b41,0xa1da09d943b41,2 +np.float64,0xbfe9c7271cf38e4e,0xbfe559296b471738,2 +np.float64,0x3fefae6170ff5cc3,0x3fe83c70ba82f0e1,2 +np.float64,0x7fe7375348ae6ea6,0x3ff0000000000000,2 +np.float64,0xffe18c9cc6e31939,0xbff0000000000000,2 +np.float64,0x800483d13a6907a3,0x800483d13a6907a3,2 +np.float64,0x7fe772a18caee542,0x3ff0000000000000,2 +np.float64,0xffefff64e7bffec9,0xbff0000000000000,2 +np.float64,0x7fcffc31113ff861,0x3ff0000000000000,2 +np.float64,0x3fd91e067e323c0d,0x3fd7e70bf365a7b3,2 +np.float64,0xb0a6673d614cd,0xb0a6673d614cd,2 +np.float64,0xffef9a297e3f3452,0xbff0000000000000,2 +np.float64,0xffe87cc15e70f982,0xbff0000000000000,2 +np.float64,0xffefd6ad8e7fad5a,0xbff0000000000000,2 +np.float64,0x7fe3aaa3a8a75546,0x3ff0000000000000,2 +np.float64,0xddab0341bb561,0xddab0341bb561,2 +np.float64,0x3fe996d6d7332dae,0x3fe53e3ed5be2922,2 +np.float64,0x3fdbe66a18b7ccd4,0x3fda41e6053c1512,2 +np.float64,0x8914775d1228f,0x8914775d1228f,2 +np.float64,0x3fe44621d4688c44,0x3fe1ef9c7225f8bd,2 +np.float64,0xffab29a2a4365340,0xbff0000000000000,2 +np.float64,0xffc8d4a0c431a940,0xbff0000000000000,2 +np.float64,0xbfd426e085284dc2,0xbfd382e2a9617b87,2 +np.float64,0xbfd3b0a525a7614a,0xbfd3176856faccf1,2 +np.float64,0x80036dedcb06dbdc,0x80036dedcb06dbdc,2 +np.float64,0x3feb13823b762704,0x3fe60ca3facdb696,2 +np.float64,0x3fd7246b7bae48d8,0x3fd62f08afded155,2 +np.float64,0x1,0x1,2 +np.float64,0x3fe8ade4b9715bc9,0x3fe4b97cc1387d27,2 +np.float64,0x3fdf2dbec53e5b7e,0x3fdcecfeee33de95,2 +np.float64,0x3fe4292bf9685258,0x3fe1dbb5a6704090,2 +np.float64,0xbfd21acbb8243598,0xbfd1a2ff42174cae,2 +np.float64,0xdd0d2d01ba1a6,0xdd0d2d01ba1a6,2 +np.float64,0x3fa3f3d2f427e7a0,0x3fa3f13d6f101555,2 +np.float64,0x7fdabf4aceb57e95,0x3ff0000000000000,2 +np.float64,0xd4d9e39ba9b3d,0xd4d9e39ba9b3d,2 +np.float64,0xffec773396f8ee66,0xbff0000000000000,2 +np.float64,0x3fa88cc79031198f,0x3fa887f7ade722ba,2 +np.float64,0xffe63a92066c7524,0xbff0000000000000,2 +np.float64,0xbfcf514e2e3ea29c,0xbfceb510e99aaa19,2 +np.float64,0x9d78c19d3af18,0x9d78c19d3af18,2 +np.float64,0x7fdd748bfbbae917,0x3ff0000000000000,2 +np.float64,0xffb3594c4626b298,0xbff0000000000000,2 +np.float64,0x80068ce5b32d19cc,0x80068ce5b32d19cc,2 +np.float64,0x3fec63d60e78c7ac,0x3fe6b85536e44217,2 +np.float64,0x80080bad4dd0175b,0x80080bad4dd0175b,2 +np.float64,0xbfec6807baf8d010,0xbfe6ba69740f9687,2 +np.float64,0x7fedbae0bbfb75c0,0x3ff0000000000000,2 +np.float64,0x8001cb7aa3c396f6,0x8001cb7aa3c396f6,2 +np.float64,0x7fe1f1f03563e3df,0x3ff0000000000000,2 +np.float64,0x7fd83d3978307a72,0x3ff0000000000000,2 +np.float64,0xbfc05ffe9d20bffc,0xbfc049464e3f0af2,2 +np.float64,0xfe6e053ffcdc1,0xfe6e053ffcdc1,2 +np.float64,0xbfd3bdf39d277be8,0xbfd32386edf12726,2 +np.float64,0x800f41b27bde8365,0x800f41b27bde8365,2 +np.float64,0xbfe2c98390e59307,0xbfe0e3c9260fe798,2 +np.float64,0xffdd6206bcbac40e,0xbff0000000000000,2 +np.float64,0x67f35ef4cfe6c,0x67f35ef4cfe6c,2 +np.float64,0x800337e02ae66fc1,0x800337e02ae66fc1,2 +np.float64,0x3fe0ff70afe1fee1,0x3fdf1f46434330df,2 +np.float64,0x3fd7e0a1df2fc144,0x3fd6d3f82c8031e4,2 +np.float64,0x8008da5cd1b1b4ba,0x8008da5cd1b1b4ba,2 +np.float64,0x80065ec9e4ccbd95,0x80065ec9e4ccbd95,2 +np.float64,0x3fe1d1e559a3a3cb,0x3fe02e4f146aa1ab,2 +np.float64,0x7feb7d2f0836fa5d,0x3ff0000000000000,2 +np.float64,0xbfcb33ce9736679c,0xbfcaccd431b205bb,2 +np.float64,0x800e6d0adf5cda16,0x800e6d0adf5cda16,2 +np.float64,0x7fe46f272ca8de4d,0x3ff0000000000000,2 +np.float64,0x4fdfc73e9fbfa,0x4fdfc73e9fbfa,2 +np.float64,0x800958a13112b143,0x800958a13112b143,2 +np.float64,0xbfea01f877f403f1,0xbfe579a541594247,2 +np.float64,0xeefaf599ddf5f,0xeefaf599ddf5f,2 +np.float64,0x80038766c5e70ece,0x80038766c5e70ece,2 +np.float64,0x7fd31bc28ba63784,0x3ff0000000000000,2 +np.float64,0xbfe4df77eee9bef0,0xbfe257abe7083b77,2 +np.float64,0x7fe6790c78acf218,0x3ff0000000000000,2 +np.float64,0xffe7c66884af8cd0,0xbff0000000000000,2 +np.float64,0x800115e36f422bc8,0x800115e36f422bc8,2 +np.float64,0x3fc601945d2c0329,0x3fc5cab917bb20bc,2 +np.float64,0x3fd6ac9546ad592b,0x3fd5c55437ec3508,2 +np.float64,0xa7bd59294f7ab,0xa7bd59294f7ab,2 +np.float64,0x8005c26c8b8b84da,0x8005c26c8b8b84da,2 +np.float64,0x8257501704aea,0x8257501704aea,2 +np.float64,0x5b12aae0b6256,0x5b12aae0b6256,2 +np.float64,0x800232fe02c465fd,0x800232fe02c465fd,2 +np.float64,0x800dae28f85b5c52,0x800dae28f85b5c52,2 +np.float64,0x3fdade1ac135bc36,0x3fd964a2000ace25,2 +np.float64,0x3fed72ca04fae594,0x3fe73b9170d809f9,2 +np.float64,0x7fc6397e2b2c72fb,0x3ff0000000000000,2 +np.float64,0x3fe1f5296d23ea53,0x3fe048802d17621e,2 +np.float64,0xffe05544b920aa89,0xbff0000000000000,2 +np.float64,0xbfdb2e1588365c2c,0xbfd9a7e4113c713e,2 +np.float64,0xbfed6a06fa3ad40e,0xbfe7376be60535f8,2 +np.float64,0xbfe31dcaf5e63b96,0xbfe120417c46cac1,2 +np.float64,0xbfb7ed67ae2fdad0,0xbfb7dba14af33b00,2 +np.float64,0xffd32bb7eb265770,0xbff0000000000000,2 +np.float64,0x80039877b04730f0,0x80039877b04730f0,2 +np.float64,0x3f832e5630265cac,0x3f832e316f47f218,2 +np.float64,0xffe7fa7f732ff4fe,0xbff0000000000000,2 +np.float64,0x9649b87f2c937,0x9649b87f2c937,2 +np.float64,0xffaee447183dc890,0xbff0000000000000,2 +np.float64,0x7fe4e02dd869c05b,0x3ff0000000000000,2 +np.float64,0x3fe1d35e7463a6bd,0x3fe02f67bd21e86e,2 +np.float64,0xffe57f40fe2afe82,0xbff0000000000000,2 +np.float64,0xbfea1362b93426c6,0xbfe5833421dba8fc,2 +np.float64,0xffe9c689fe338d13,0xbff0000000000000,2 +np.float64,0xffc592dd102b25bc,0xbff0000000000000,2 +np.float64,0x3fd283c7aba5078f,0x3fd203d61d1398c3,2 +np.float64,0x8001d6820243ad05,0x8001d6820243ad05,2 +np.float64,0x3fe0ad5991e15ab4,0x3fdea14ef0d47fbd,2 +np.float64,0x3fe3916f2ee722de,0x3fe1722684a9ffb1,2 +np.float64,0xffef9e54e03f3ca9,0xbff0000000000000,2 +np.float64,0x7fe864faebb0c9f5,0x3ff0000000000000,2 +np.float64,0xbfed3587c3fa6b10,0xbfe71e7112df8a68,2 +np.float64,0xbfdd9efc643b3df8,0xbfdbac3a16caf208,2 +np.float64,0xbfd5ac08feab5812,0xbfd4e14575a6e41b,2 +np.float64,0xffda90fae6b521f6,0xbff0000000000000,2 +np.float64,0x8001380ecf22701e,0x8001380ecf22701e,2 +np.float64,0x7fed266fa5fa4cde,0x3ff0000000000000,2 +np.float64,0xffec6c0ac3b8d815,0xbff0000000000000,2 +np.float64,0x3fe7de43c32fbc88,0x3fe43ef62821a5a6,2 +np.float64,0x800bf4ffc357ea00,0x800bf4ffc357ea00,2 +np.float64,0x3fe125c975624b93,0x3fdf59b2de3eff5d,2 +np.float64,0x8004714c1028e299,0x8004714c1028e299,2 +np.float64,0x3fef1bfbf5fe37f8,0x3fe7fd2ba1b63c8a,2 +np.float64,0x800cae15c3195c2c,0x800cae15c3195c2c,2 +np.float64,0x7fde708e083ce11b,0x3ff0000000000000,2 +np.float64,0x7fbcee5df639dcbb,0x3ff0000000000000,2 +np.float64,0x800b1467141628cf,0x800b1467141628cf,2 +np.float64,0x3fe525e0d36a4bc2,0x3fe286b6e59e30f5,2 +np.float64,0xffe987f8b8330ff1,0xbff0000000000000,2 +np.float64,0x7e0a8284fc151,0x7e0a8284fc151,2 +np.float64,0x8006f982442df305,0x8006f982442df305,2 +np.float64,0xbfd75a3cb62eb47a,0xbfd65e54cee981c9,2 +np.float64,0x258e91104b1d3,0x258e91104b1d3,2 +np.float64,0xbfecd0056779a00b,0xbfe6ed7ae97fff1b,2 +np.float64,0x7fc3a4f9122749f1,0x3ff0000000000000,2 +np.float64,0x6e2b1024dc563,0x6e2b1024dc563,2 +np.float64,0x800d575ad4daaeb6,0x800d575ad4daaeb6,2 +np.float64,0xbfceafb1073d5f64,0xbfce1c93023d8414,2 +np.float64,0xffe895cb5f312b96,0xbff0000000000000,2 +np.float64,0x7fe7811ed4ef023d,0x3ff0000000000000,2 +np.float64,0xbfd93f952f327f2a,0xbfd803e6b5576b99,2 +np.float64,0xffdd883a3fbb1074,0xbff0000000000000,2 +np.float64,0x7fee5624eefcac49,0x3ff0000000000000,2 +np.float64,0xbfe264bb2624c976,0xbfe09a9b7cc896e7,2 +np.float64,0xffef14b417be2967,0xbff0000000000000,2 +np.float64,0xbfecbd0d94397a1b,0xbfe6e43bef852d9f,2 +np.float64,0xbfe20d9e4ba41b3c,0xbfe05a98e05846d9,2 +np.float64,0x10000000000000,0x10000000000000,2 +np.float64,0x7fefde93f7bfbd27,0x3ff0000000000000,2 +np.float64,0x80076b9e232ed73d,0x80076b9e232ed73d,2 +np.float64,0xbfe80df52c701bea,0xbfe45b754b433792,2 +np.float64,0x7fe3b5a637676b4b,0x3ff0000000000000,2 +np.float64,0x2c81d14c5903b,0x2c81d14c5903b,2 +np.float64,0x80038945c767128c,0x80038945c767128c,2 +np.float64,0xffeebaf544bd75ea,0xbff0000000000000,2 +np.float64,0xffdb1867d2b630d0,0xbff0000000000000,2 +np.float64,0x3fe3376eaee66ede,0x3fe13285579763d8,2 +np.float64,0xffddf65ca43becba,0xbff0000000000000,2 +np.float64,0xffec8e3e04791c7b,0xbff0000000000000,2 +np.float64,0x80064f4bde2c9e98,0x80064f4bde2c9e98,2 +np.float64,0x7fe534a085ea6940,0x3ff0000000000000,2 +np.float64,0xbfcbabe31d3757c8,0xbfcb3f8e70adf7e7,2 +np.float64,0xbfe45ca11e28b942,0xbfe1ff04515ef809,2 +np.float64,0x65f4df02cbe9d,0x65f4df02cbe9d,2 +np.float64,0xb08b0cbb61162,0xb08b0cbb61162,2 +np.float64,0x3feae2e8b975c5d1,0x3fe5f302b5e8eda2,2 +np.float64,0x7fcf277ff93e4eff,0x3ff0000000000000,2 +np.float64,0x80010999c4821334,0x80010999c4821334,2 +np.float64,0xbfd7f65911afecb2,0xbfd6e6e9cd098f8b,2 +np.float64,0x800e0560ec3c0ac2,0x800e0560ec3c0ac2,2 +np.float64,0x7fec4152ba3882a4,0x3ff0000000000000,2 +np.float64,0xbfb5c77cd42b8ef8,0xbfb5ba1336084908,2 +np.float64,0x457ff1b68afff,0x457ff1b68afff,2 +np.float64,0x5323ec56a647e,0x5323ec56a647e,2 +np.float64,0xbfeed16cf8bda2da,0xbfe7dc49fc9ae549,2 +np.float64,0xffe8446106b088c1,0xbff0000000000000,2 +np.float64,0xffb93cd13c3279a0,0xbff0000000000000,2 +np.float64,0x7fe515c2aeea2b84,0x3ff0000000000000,2 +np.float64,0x80099df83f933bf1,0x80099df83f933bf1,2 +np.float64,0x7fb3a375562746ea,0x3ff0000000000000,2 +np.float64,0x7fcd7efa243afdf3,0x3ff0000000000000,2 +np.float64,0xffe40cddb12819bb,0xbff0000000000000,2 +np.float64,0x8008b68eecd16d1e,0x8008b68eecd16d1e,2 +np.float64,0x2aec688055d8e,0x2aec688055d8e,2 +np.float64,0xffe23750bc646ea1,0xbff0000000000000,2 +np.float64,0x5adacf60b5b7,0x5adacf60b5b7,2 +np.float64,0x7fefb29b1cbf6535,0x3ff0000000000000,2 +np.float64,0xbfeadbf90175b7f2,0xbfe5ef55e2194794,2 +np.float64,0xeaad2885d55a5,0xeaad2885d55a5,2 +np.float64,0xffd7939fba2f2740,0xbff0000000000000,2 +np.float64,0x3fd187ea3aa30fd4,0x3fd11af023472386,2 +np.float64,0xbf6eb579c03d6b00,0xbf6eb57052f47019,2 +np.float64,0x3fefb67b3bff6cf6,0x3fe83fe4499969ac,2 +np.float64,0xbfe5183aacea3076,0xbfe27da1aa0b61a0,2 +np.float64,0xbfb83e47a2307c90,0xbfb82bcb0e12db42,2 +np.float64,0x80088849b1b11094,0x80088849b1b11094,2 +np.float64,0x800ceeed7399dddb,0x800ceeed7399dddb,2 +np.float64,0x80097cd90892f9b2,0x80097cd90892f9b2,2 +np.float64,0x7ec73feefd8e9,0x7ec73feefd8e9,2 +np.float64,0x7fe3291de5a6523b,0x3ff0000000000000,2 +np.float64,0xbfd537086daa6e10,0xbfd4787af5f60653,2 +np.float64,0x800e8ed4455d1da9,0x800e8ed4455d1da9,2 +np.float64,0x800ef8d19cbdf1a3,0x800ef8d19cbdf1a3,2 +np.float64,0x800dc4fa3a5b89f5,0x800dc4fa3a5b89f5,2 +np.float64,0xaa8b85cd55171,0xaa8b85cd55171,2 +np.float64,0xffd67a5f40acf4be,0xbff0000000000000,2 +np.float64,0xbfb7496db22e92d8,0xbfb7390a48130861,2 +np.float64,0x3fd86a8e7ab0d51d,0x3fd74bfba0f72616,2 +np.float64,0xffb7f5b7fc2feb70,0xbff0000000000000,2 +np.float64,0xbfea0960a7f412c1,0xbfe57db6d0ff4191,2 +np.float64,0x375f4fc26ebeb,0x375f4fc26ebeb,2 +np.float64,0x800c537e70b8a6fd,0x800c537e70b8a6fd,2 +np.float64,0x800b3f4506d67e8a,0x800b3f4506d67e8a,2 +np.float64,0x7fe61f2d592c3e5a,0x3ff0000000000000,2 +np.float64,0xffefffffffffffff,0xbff0000000000000,2 +np.float64,0x8005d0bb84eba178,0x8005d0bb84eba178,2 +np.float64,0x800c78b0ec18f162,0x800c78b0ec18f162,2 +np.float64,0xbfc42cccfb285998,0xbfc4027392f66b0d,2 +np.float64,0x3fd8fdc73fb1fb8e,0x3fd7cb46f928153f,2 +np.float64,0x800c71754298e2eb,0x800c71754298e2eb,2 +np.float64,0x3fe4aa7a96a954f5,0x3fe233f5d3bc1352,2 +np.float64,0x7fd53841f6aa7083,0x3ff0000000000000,2 +np.float64,0x3fd0a887b8a15110,0x3fd04ac3b9c0d1ca,2 +np.float64,0x8007b8e164cf71c4,0x8007b8e164cf71c4,2 +np.float64,0xbfddc35c66bb86b8,0xbfdbc9c5dddfb014,2 +np.float64,0x6a3756fed46eb,0x6a3756fed46eb,2 +np.float64,0xffd3dcd05527b9a0,0xbff0000000000000,2 +np.float64,0xbfd7dc75632fb8ea,0xbfd6d0538b340a98,2 +np.float64,0x17501f822ea05,0x17501f822ea05,2 +np.float64,0xbfe1f98b99a3f317,0xbfe04bbf8f8b6cb3,2 +np.float64,0x66ea65d2cdd4d,0x66ea65d2cdd4d,2 +np.float64,0xbfd12241e2224484,0xbfd0bc62f46ea5e1,2 +np.float64,0x3fed6e6fb3fadcdf,0x3fe7398249097285,2 +np.float64,0x3fe0b5ebeba16bd8,0x3fdeae84b3000a47,2 +np.float64,0x66d1bce8cda38,0x66d1bce8cda38,2 +np.float64,0x3fdd728db3bae51b,0x3fdb880f28c52713,2 +np.float64,0xffb45dbe5228bb80,0xbff0000000000000,2 +np.float64,0x1ff8990c3ff14,0x1ff8990c3ff14,2 +np.float64,0x800a68e8f294d1d2,0x800a68e8f294d1d2,2 +np.float64,0xbfe4d08b84a9a117,0xbfe24da40bff6be7,2 +np.float64,0x3fe0177f0ee02efe,0x3fddb83c5971df51,2 +np.float64,0xffc56893692ad128,0xbff0000000000000,2 +np.float64,0x51b44f6aa368b,0x51b44f6aa368b,2 +np.float64,0x2258ff4e44b21,0x2258ff4e44b21,2 +np.float64,0x3fe913649e7226c9,0x3fe4f3f119530f53,2 +np.float64,0xffe3767df766ecfc,0xbff0000000000000,2 +np.float64,0xbfe62ae12fec55c2,0xbfe33108f1f22a94,2 +np.float64,0x7fb6a6308e2d4c60,0x3ff0000000000000,2 +np.float64,0xbfe00f2085e01e41,0xbfddab19b6fc77d1,2 +np.float64,0x3fb66447dc2cc890,0x3fb655b4f46844f0,2 +np.float64,0x3fd80238f6b00470,0x3fd6f143be1617d6,2 +np.float64,0xbfd05bfeb3a0b7fe,0xbfd0031ab3455e15,2 +np.float64,0xffc3a50351274a08,0xbff0000000000000,2 +np.float64,0xffd8f4241cb1e848,0xbff0000000000000,2 +np.float64,0xbfca72a88c34e550,0xbfca13ebe85f2aca,2 +np.float64,0x3fd47d683ba8fad0,0x3fd3d13f1176ed8c,2 +np.float64,0x3fb6418e642c831d,0x3fb6333ebe479ff2,2 +np.float64,0x800fde8e023fbd1c,0x800fde8e023fbd1c,2 +np.float64,0x8001fb01e323f605,0x8001fb01e323f605,2 +np.float64,0x3febb21ff9f76440,0x3fe65ed788d52fee,2 +np.float64,0x3fe47553ffe8eaa8,0x3fe20fe01f853603,2 +np.float64,0x7fca20b3f9344167,0x3ff0000000000000,2 +np.float64,0x3fe704f4ec6e09ea,0x3fe3ba7277201805,2 +np.float64,0xf864359df0c87,0xf864359df0c87,2 +np.float64,0x4d96b01c9b2d7,0x4d96b01c9b2d7,2 +np.float64,0x3fe8a09fe9f14140,0x3fe4b1c6a2d2e095,2 +np.float64,0xffc46c61b228d8c4,0xbff0000000000000,2 +np.float64,0x3fe680a837ed0150,0x3fe3679d6eeb6485,2 +np.float64,0xbfecedc20f39db84,0xbfe6fbe9ee978bf6,2 +np.float64,0x3fb2314eae24629d,0x3fb2297ba6d55d2d,2 +np.float64,0x3fe9f0b8e7b3e172,0x3fe57026eae36db3,2 +np.float64,0x80097a132ed2f427,0x80097a132ed2f427,2 +np.float64,0x800ae5a41955cb49,0x800ae5a41955cb49,2 +np.float64,0xbfd7527279aea4e4,0xbfd6577de356e1bd,2 +np.float64,0x3fe27d3e01e4fa7c,0x3fe0ac7dd96f9179,2 +np.float64,0x7fedd8cb01bbb195,0x3ff0000000000000,2 +np.float64,0x78f8695af1f0e,0x78f8695af1f0e,2 +np.float64,0x800d2d0e927a5a1d,0x800d2d0e927a5a1d,2 +np.float64,0xffe74b46fb2e968e,0xbff0000000000000,2 +np.float64,0xbfdd12d4c8ba25aa,0xbfdb39dae49e1c10,2 +np.float64,0xbfd6c14710ad828e,0xbfd5d79ef5a8d921,2 +np.float64,0x921f4e55243ea,0x921f4e55243ea,2 +np.float64,0x800b4e4c80969c99,0x800b4e4c80969c99,2 +np.float64,0x7fe08c6ab7e118d4,0x3ff0000000000000,2 +np.float64,0xbfed290014fa5200,0xbfe71871f7e859ed,2 +np.float64,0x8008c1d5c59183ac,0x8008c1d5c59183ac,2 +np.float64,0x3fd339e68c2673cd,0x3fd2aaff3f165a9d,2 +np.float64,0xbfdd20d8113a41b0,0xbfdb4553ea2cb2fb,2 +np.float64,0x3fe52a25deea544c,0x3fe2898d5bf4442c,2 +np.float64,0x498602d4930c1,0x498602d4930c1,2 +np.float64,0x3fd8c450113188a0,0x3fd799b0b2a6c43c,2 +np.float64,0xbfd72bc2f2ae5786,0xbfd6357e15ba7f70,2 +np.float64,0xbfd076188ea0ec32,0xbfd01b8fce44d1af,2 +np.float64,0x9aace1713559c,0x9aace1713559c,2 +np.float64,0x8008a730e8914e62,0x8008a730e8914e62,2 +np.float64,0x7fe9e9a3d833d347,0x3ff0000000000000,2 +np.float64,0x800d3a0d69da741b,0x800d3a0d69da741b,2 +np.float64,0xbfe3e28a29e7c514,0xbfe1aad7643a2d19,2 +np.float64,0x7fe9894c71331298,0x3ff0000000000000,2 +np.float64,0xbfe7c6acb5ef8d5a,0xbfe430c9e258ce62,2 +np.float64,0xffb5a520a62b4a40,0xbff0000000000000,2 +np.float64,0x7fc02109ae204212,0x3ff0000000000000,2 +np.float64,0xb5c58f196b8b2,0xb5c58f196b8b2,2 +np.float64,0x3feb4ee82e769dd0,0x3fe62bae9a39d8b1,2 +np.float64,0x3fec5c3cf278b87a,0x3fe6b49000f12441,2 +np.float64,0x81f64b8103eca,0x81f64b8103eca,2 +np.float64,0xbfeab00d73f5601b,0xbfe5d7f755ab73d9,2 +np.float64,0x3fd016bf28a02d7e,0x3fcf843ea23bcd3c,2 +np.float64,0xbfa1db617423b6c0,0xbfa1d9872ddeb5a8,2 +np.float64,0x3fe83c879d70790f,0x3fe4771502d8f012,2 +np.float64,0x6b267586d64cf,0x6b267586d64cf,2 +np.float64,0x3fc91b6d3f3236d8,0x3fc8ca3eb4da25a9,2 +np.float64,0x7fd4e3f8f3a9c7f1,0x3ff0000000000000,2 +np.float64,0x800a75899214eb14,0x800a75899214eb14,2 +np.float64,0x7fdb1f2e07b63e5b,0x3ff0000000000000,2 +np.float64,0xffe7805a11ef00b4,0xbff0000000000000,2 +np.float64,0x3fc8e1b88a31c371,0x3fc892af45330818,2 +np.float64,0xbfe809fe447013fc,0xbfe45918f07da4d9,2 +np.float64,0xbfeb9d7f2ab73afe,0xbfe65446bfddc792,2 +np.float64,0x3fb47f0a5c28fe15,0x3fb473db9113e880,2 +np.float64,0x800a17ae3cb42f5d,0x800a17ae3cb42f5d,2 +np.float64,0xf5540945eaa81,0xf5540945eaa81,2 +np.float64,0xbfe577fc26aaeff8,0xbfe2bcfbf2cf69ff,2 +np.float64,0xbfb99b3e06333680,0xbfb98577b88e0515,2 +np.float64,0x7fd9290391b25206,0x3ff0000000000000,2 +np.float64,0x7fe1aa62ffa354c5,0x3ff0000000000000,2 +np.float64,0x7b0189a0f604,0x7b0189a0f604,2 +np.float64,0x3f9000ed602001db,0x3f900097fe168105,2 +np.float64,0x3fd576128d2aec25,0x3fd4b1002c92286f,2 +np.float64,0xffecc98ece79931d,0xbff0000000000000,2 +np.float64,0x800a1736c7f42e6e,0x800a1736c7f42e6e,2 +np.float64,0xbfed947548bb28eb,0xbfe74b71479ae739,2 +np.float64,0xa45c032148b9,0xa45c032148b9,2 +np.float64,0xbfc13d011c227a04,0xbfc1228447de5e9f,2 +np.float64,0xffed8baa6ebb1754,0xbff0000000000000,2 +np.float64,0x800ea2de243d45bc,0x800ea2de243d45bc,2 +np.float64,0x8001396be52272d9,0x8001396be52272d9,2 +np.float64,0xd018d1cda031a,0xd018d1cda031a,2 +np.float64,0x7fe1fece1fe3fd9b,0x3ff0000000000000,2 +np.float64,0x8009ac484c135891,0x8009ac484c135891,2 +np.float64,0x3fc560ad132ac15a,0x3fc52e5a9479f08e,2 +np.float64,0x3fd6f80ebe2df01d,0x3fd607f70ce8e3f4,2 +np.float64,0xbfd3e69e82a7cd3e,0xbfd34887c2a40699,2 +np.float64,0x3fe232d9baa465b3,0x3fe0760a822ada0c,2 +np.float64,0x3fe769bbc6eed378,0x3fe3f872680f6631,2 +np.float64,0xffe63dbd952c7b7a,0xbff0000000000000,2 +np.float64,0x4e0c00da9c181,0x4e0c00da9c181,2 +np.float64,0xffeae4d89735c9b0,0xbff0000000000000,2 +np.float64,0x3fe030bcbb606179,0x3fdddfc66660bfce,2 +np.float64,0x7fe35ca40d66b947,0x3ff0000000000000,2 +np.float64,0xbfd45bd66628b7ac,0xbfd3b2e04bfe7866,2 +np.float64,0x3fd1f0be2323e17c,0x3fd17c1c340d7a48,2 +np.float64,0x3fd7123b6cae2478,0x3fd61f0675aa9ae1,2 +np.float64,0xbfe918a377723147,0xbfe4f6efe66f5714,2 +np.float64,0x7fc400356f28006a,0x3ff0000000000000,2 +np.float64,0x7fd2dead70a5bd5a,0x3ff0000000000000,2 +np.float64,0xffe9c28f81f3851e,0xbff0000000000000,2 +np.float64,0x3fd09b1ec7a1363e,0x3fd03e3894320140,2 +np.float64,0x7fe6e80c646dd018,0x3ff0000000000000,2 +np.float64,0x7fec3760a4786ec0,0x3ff0000000000000,2 +np.float64,0x309eb6ee613d8,0x309eb6ee613d8,2 +np.float64,0x800731cb0ece6397,0x800731cb0ece6397,2 +np.float64,0xbfdb0c553db618aa,0xbfd98b8a4680ee60,2 +np.float64,0x3fd603a52eac074c,0x3fd52f6b53de7455,2 +np.float64,0x9ecb821b3d971,0x9ecb821b3d971,2 +np.float64,0x3feb7d64dc36faca,0x3fe643c2754bb7f4,2 +np.float64,0xffeb94825ef72904,0xbff0000000000000,2 +np.float64,0x24267418484cf,0x24267418484cf,2 +np.float64,0xbfa6b2fbac2d65f0,0xbfa6af2dca5bfa6f,2 +np.float64,0x8010000000000000,0x8010000000000000,2 +np.float64,0xffe6873978ed0e72,0xbff0000000000000,2 +np.float64,0x800447934ba88f27,0x800447934ba88f27,2 +np.float64,0x3fef305f09fe60be,0x3fe806156b8ca47c,2 +np.float64,0xffd441c697a8838e,0xbff0000000000000,2 +np.float64,0xbfa7684f6c2ed0a0,0xbfa764238d34830c,2 +np.float64,0xffb2c976142592f0,0xbff0000000000000,2 +np.float64,0xbfcc9d1585393a2c,0xbfcc25756bcbca1f,2 +np.float64,0xbfd477bb1ba8ef76,0xbfd3cc1d2114e77e,2 +np.float64,0xbfed1559983a2ab3,0xbfe70f03afd994ee,2 +np.float64,0xbfeb51139036a227,0xbfe62ccf56bc7fff,2 +np.float64,0x7d802890fb006,0x7d802890fb006,2 +np.float64,0x800e00af777c015f,0x800e00af777c015f,2 +np.float64,0x800647ce128c8f9d,0x800647ce128c8f9d,2 +np.float64,0x800a26da91d44db6,0x800a26da91d44db6,2 +np.float64,0x3fdc727eddb8e4fe,0x3fdab5fd9db630b3,2 +np.float64,0x7fd06def2ba0dbdd,0x3ff0000000000000,2 +np.float64,0xffe23678c4a46cf1,0xbff0000000000000,2 +np.float64,0xbfe7198e42ee331c,0xbfe3c7326c9c7553,2 +np.float64,0xffae465f3c3c8cc0,0xbff0000000000000,2 +np.float64,0xff9aea7c5035d500,0xbff0000000000000,2 +np.float64,0xbfeae49c0f35c938,0xbfe5f3e9326cb08b,2 +np.float64,0x3f9a16f300342de6,0x3f9a1581212be50f,2 +np.float64,0x8d99e2c31b33d,0x8d99e2c31b33d,2 +np.float64,0xffd58af253ab15e4,0xbff0000000000000,2 +np.float64,0xbfd205cd25a40b9a,0xbfd18f97155f8b25,2 +np.float64,0xbfebe839bbf7d074,0xbfe67a6024e8fefe,2 +np.float64,0xbfe4fb3595a9f66b,0xbfe26a42f99819ea,2 +np.float64,0x800e867c739d0cf9,0x800e867c739d0cf9,2 +np.float64,0x8bc4274f17885,0x8bc4274f17885,2 +np.float64,0xaec8914b5d912,0xaec8914b5d912,2 +np.float64,0x7fd1d64473a3ac88,0x3ff0000000000000,2 +np.float64,0xbfe6d6f69cedaded,0xbfe39dd61bc7e23e,2 +np.float64,0x7fed05039d7a0a06,0x3ff0000000000000,2 +np.float64,0xbfc40eab0f281d58,0xbfc3e50d14b79265,2 +np.float64,0x45179aec8a2f4,0x45179aec8a2f4,2 +np.float64,0xbfe717e362ee2fc7,0xbfe3c62a95b07d13,2 +np.float64,0xbfe5b8df0d6b71be,0xbfe2e76c7ec5013d,2 +np.float64,0x5c67ba6eb8cf8,0x5c67ba6eb8cf8,2 +np.float64,0xbfda72ce4cb4e59c,0xbfd909fdc7ecfe20,2 +np.float64,0x7fdf59a1e2beb343,0x3ff0000000000000,2 +np.float64,0xc4f7897f89ef1,0xc4f7897f89ef1,2 +np.float64,0x8fcd0a351f9a2,0x8fcd0a351f9a2,2 +np.float64,0x3fb161761022c2ec,0x3fb15aa31c464de2,2 +np.float64,0x8008a985be71530c,0x8008a985be71530c,2 +np.float64,0x3fca4ddb5e349bb7,0x3fc9f0a3b60e49c6,2 +np.float64,0x7fcc10a2d9382145,0x3ff0000000000000,2 +np.float64,0x78902b3af1206,0x78902b3af1206,2 +np.float64,0x7fe1e2765f23c4ec,0x3ff0000000000000,2 +np.float64,0xc1d288cf83a51,0xc1d288cf83a51,2 +np.float64,0x7fe8af692bb15ed1,0x3ff0000000000000,2 +np.float64,0x80057d90fb8afb23,0x80057d90fb8afb23,2 +np.float64,0x3fdc136b8fb826d8,0x3fda6749582b2115,2 +np.float64,0x800ec8ea477d91d5,0x800ec8ea477d91d5,2 +np.float64,0x4c0f4796981ea,0x4c0f4796981ea,2 +np.float64,0xec34c4a5d8699,0xec34c4a5d8699,2 +np.float64,0x7fce343dfb3c687b,0x3ff0000000000000,2 +np.float64,0xbfc95a98a332b530,0xbfc90705b2cc2fec,2 +np.float64,0x800d118e1dba231c,0x800d118e1dba231c,2 +np.float64,0x3fd354f310a6a9e8,0x3fd2c3bb90054154,2 +np.float64,0xbfdac0d4fab581aa,0xbfd94bf37424928e,2 +np.float64,0x3fe7f5391fefea72,0x3fe44cb49d51985b,2 +np.float64,0xd4c3c329a9879,0xd4c3c329a9879,2 +np.float64,0x3fc53977692a72f0,0x3fc50835d85c9ed1,2 +np.float64,0xbfd6989538ad312a,0xbfd5b3a2c08511fe,2 +np.float64,0xbfe329f2906653e5,0xbfe128ec1525a1c0,2 +np.float64,0x7ff0000000000000,0x3ff0000000000000,2 +np.float64,0xbfea57c90974af92,0xbfe5a87b04aa3116,2 +np.float64,0x7fdfba94043f7527,0x3ff0000000000000,2 +np.float64,0x3feedabddafdb57c,0x3fe7e06c0661978d,2 +np.float64,0x4bd9f3b697b3f,0x4bd9f3b697b3f,2 +np.float64,0x3fdd15bbfc3a2b78,0x3fdb3c3b8d070f7e,2 +np.float64,0x3fbd89ccd23b13a0,0x3fbd686b825cff80,2 +np.float64,0x7ff4000000000000,0x7ffc000000000000,2 +np.float64,0x3f9baa8928375512,0x3f9ba8d01ddd5300,2 +np.float64,0x4a3ebdf2947d8,0x4a3ebdf2947d8,2 +np.float64,0x3fe698d5c06d31ac,0x3fe376dff48312c8,2 +np.float64,0xffd5323df12a647c,0xbff0000000000000,2 +np.float64,0xffea7f111174fe22,0xbff0000000000000,2 +np.float64,0x3feb4656a9b68cad,0x3fe627392eb2156f,2 +np.float64,0x7fc1260e9c224c1c,0x3ff0000000000000,2 +np.float64,0x80056e45e5eadc8d,0x80056e45e5eadc8d,2 +np.float64,0x7fd0958ef6a12b1d,0x3ff0000000000000,2 +np.float64,0x8001f85664e3f0ae,0x8001f85664e3f0ae,2 +np.float64,0x3fe553853beaa70a,0x3fe2a4f5e7c83558,2 +np.float64,0xbfeb33ce6276679d,0xbfe61d8ec9e5ff8c,2 +np.float64,0xbfd1b24e21a3649c,0xbfd14245df6065e9,2 +np.float64,0x3fe286fc40650df9,0x3fe0b395c8059429,2 +np.float64,0xffed378058fa6f00,0xbff0000000000000,2 +np.float64,0xbfd0c4a2d7a18946,0xbfd06509a434d6a0,2 +np.float64,0xbfea31d581f463ab,0xbfe593d976139f94,2 +np.float64,0xbfe0705c85e0e0b9,0xbfde42efa978eb0c,2 +np.float64,0xe4c4c339c9899,0xe4c4c339c9899,2 +np.float64,0x3fd68befa9ad17df,0x3fd5a870b3f1f83e,2 +np.float64,0x8000000000000001,0x8000000000000001,2 +np.float64,0x3fe294256965284b,0x3fe0bd271e22d86b,2 +np.float64,0x8005327a862a64f6,0x8005327a862a64f6,2 +np.float64,0xbfdb8155ce3702ac,0xbfd9ed9ef97920f8,2 +np.float64,0xbff0000000000000,0xbfe85efab514f394,2 +np.float64,0xffe66988f1ecd312,0xbff0000000000000,2 +np.float64,0x3fb178a85e22f150,0x3fb171b9fbf95f1d,2 +np.float64,0x7f829b900025371f,0x3ff0000000000000,2 +np.float64,0x8000000000000000,0x8000000000000000,2 +np.float64,0x8006cb77f60d96f1,0x8006cb77f60d96f1,2 +np.float64,0x3fe0c5d53aa18baa,0x3fdec7012ab92b42,2 +np.float64,0x77266426ee4cd,0x77266426ee4cd,2 +np.float64,0xbfec95f468392be9,0xbfe6d11428f60136,2 +np.float64,0x3fedbf532dfb7ea6,0x3fe75f8436dd1d58,2 +np.float64,0x8002fadd3f85f5bb,0x8002fadd3f85f5bb,2 +np.float64,0xbfefebaa8d3fd755,0xbfe8566c6aa90fba,2 +np.float64,0xffc7dd2b712fba58,0xbff0000000000000,2 +np.float64,0x7fe5d3a6e8aba74d,0x3ff0000000000000,2 +np.float64,0x2da061525b40d,0x2da061525b40d,2 +np.float64,0x7fcb9b9953373732,0x3ff0000000000000,2 +np.float64,0x2ca2f6fc59460,0x2ca2f6fc59460,2 +np.float64,0xffeb84b05af70960,0xbff0000000000000,2 +np.float64,0xffe551e86c6aa3d0,0xbff0000000000000,2 +np.float64,0xbfdb311311366226,0xbfd9aa6688faafb9,2 +np.float64,0xbfd4f3875629e70e,0xbfd43bcd73534c66,2 +np.float64,0x7fe95666f932accd,0x3ff0000000000000,2 +np.float64,0x3fc73dfb482e7bf7,0x3fc6fd70c20ebf60,2 +np.float64,0x800cd9e40939b3c8,0x800cd9e40939b3c8,2 +np.float64,0x3fb0c9fa422193f0,0x3fb0c3d38879a2ac,2 +np.float64,0xffd59a38372b3470,0xbff0000000000000,2 +np.float64,0x3fa8320ef4306420,0x3fa82d739e937d35,2 +np.float64,0x3fd517f16caa2fe4,0x3fd45c8de1e93b37,2 +np.float64,0xaed921655db24,0xaed921655db24,2 +np.float64,0x93478fb9268f2,0x93478fb9268f2,2 +np.float64,0x1615e28a2c2bd,0x1615e28a2c2bd,2 +np.float64,0xbfead23010f5a460,0xbfe5ea24d5d8f820,2 +np.float64,0x774a6070ee94d,0x774a6070ee94d,2 +np.float64,0x3fdf5874bd3eb0e9,0x3fdd0ef121dd915c,2 +np.float64,0x8004b25f53a964bf,0x8004b25f53a964bf,2 +np.float64,0xbfddacdd2ebb59ba,0xbfdbb78198fab36b,2 +np.float64,0x8008a3acf271475a,0x8008a3acf271475a,2 +np.float64,0xbfdb537c8736a6fa,0xbfd9c741038bb8f0,2 +np.float64,0xbfe56a133f6ad426,0xbfe2b3d5b8d259a1,2 +np.float64,0xffda1db531343b6a,0xbff0000000000000,2 +np.float64,0x3fcbe05f3a37c0be,0x3fcb71a54a64ddfb,2 +np.float64,0x7fe1ccaa7da39954,0x3ff0000000000000,2 +np.float64,0x3faeadd8343d5bb0,0x3faea475608860e6,2 +np.float64,0x3fe662ba1c2cc574,0x3fe354a6176e90df,2 +np.float64,0xffe4d49f4e69a93e,0xbff0000000000000,2 +np.float64,0xbfeadbc424f5b788,0xbfe5ef39dbe66343,2 +np.float64,0x99cf66f1339ed,0x99cf66f1339ed,2 +np.float64,0x33af77a2675f0,0x33af77a2675f0,2 +np.float64,0x7fec7b32ecf8f665,0x3ff0000000000000,2 +np.float64,0xffef3e44993e7c88,0xbff0000000000000,2 +np.float64,0xffe8f8ceac31f19c,0xbff0000000000000,2 +np.float64,0x7fe0d15b6da1a2b6,0x3ff0000000000000,2 +np.float64,0x4ba795c2974f3,0x4ba795c2974f3,2 +np.float64,0x3fe361aa37a6c354,0x3fe15079021d6b15,2 +np.float64,0xffe709714f6e12e2,0xbff0000000000000,2 +np.float64,0xffe7ea6a872fd4d4,0xbff0000000000000,2 +np.float64,0xffdb9441c8b72884,0xbff0000000000000,2 +np.float64,0xffd5e11ae9abc236,0xbff0000000000000,2 +np.float64,0xffe092a08b612540,0xbff0000000000000,2 +np.float64,0x3fe1f27e1ca3e4fc,0x3fe04685b5131207,2 +np.float64,0xbfe71ce1bdee39c4,0xbfe3c940809a7081,2 +np.float64,0xffe8c3aa68318754,0xbff0000000000000,2 +np.float64,0x800d4e2919da9c52,0x800d4e2919da9c52,2 +np.float64,0x7fe6c8bca76d9178,0x3ff0000000000000,2 +np.float64,0x7fced8751e3db0e9,0x3ff0000000000000,2 +np.float64,0xd61d0c8bac3a2,0xd61d0c8bac3a2,2 +np.float64,0x3fec57732938aee6,0x3fe6b22f15f38352,2 +np.float64,0xff9251cc7024a3a0,0xbff0000000000000,2 +np.float64,0xf4a68cb9e94d2,0xf4a68cb9e94d2,2 +np.float64,0x3feed76703bdaece,0x3fe7def0fc9a080c,2 +np.float64,0xbfe8971ff7712e40,0xbfe4ac3eb8ebff07,2 +np.float64,0x3fe4825f682904bf,0x3fe218c1952fe67d,2 +np.float64,0xbfd60f7698ac1eee,0xbfd539f0979b4b0c,2 +np.float64,0x3fcf0845993e1088,0x3fce7032f7180144,2 +np.float64,0x7fc83443f3306887,0x3ff0000000000000,2 +np.float64,0x3fe93123ae726247,0x3fe504e4fc437e89,2 +np.float64,0x3fbf9eb8363f3d70,0x3fbf75cdfa6828d5,2 +np.float64,0xbf8b45e5d0368bc0,0xbf8b457c29dfe1a9,2 +np.float64,0x8006c2853d0d850b,0x8006c2853d0d850b,2 +np.float64,0xffef26e25ffe4dc4,0xbff0000000000000,2 +np.float64,0x7fefffffffffffff,0x3ff0000000000000,2 +np.float64,0xbfde98f2c2bd31e6,0xbfdc761bfab1c4cb,2 +np.float64,0xffb725e6222e4bd0,0xbff0000000000000,2 +np.float64,0x800c63ead5d8c7d6,0x800c63ead5d8c7d6,2 +np.float64,0x3fea087e95f410fd,0x3fe57d3ab440706c,2 +np.float64,0xbfdf9f8a603f3f14,0xbfdd4742d77dfa57,2 +np.float64,0xfff0000000000000,0xbff0000000000000,2 +np.float64,0xbfcdc0841d3b8108,0xbfcd3a401debba9a,2 +np.float64,0x800f0c8f4f7e191f,0x800f0c8f4f7e191f,2 +np.float64,0x800ba6e75fd74dcf,0x800ba6e75fd74dcf,2 +np.float64,0x7fee4927e8bc924f,0x3ff0000000000000,2 +np.float64,0x3fadf141903be283,0x3fade8878d9d3551,2 +np.float64,0x3efb1a267df64,0x3efb1a267df64,2 +np.float64,0xffebf55f22b7eabe,0xbff0000000000000,2 +np.float64,0x7fbe8045663d008a,0x3ff0000000000000,2 +np.float64,0x3fefc0129f7f8026,0x3fe843f8b7d6cf38,2 +np.float64,0xbfe846b420f08d68,0xbfe47d1709e43937,2 +np.float64,0x7fe8e87043f1d0e0,0x3ff0000000000000,2 +np.float64,0x3fcfb718453f6e31,0x3fcf14ecee7b32b4,2 +np.float64,0x7fe4306b71a860d6,0x3ff0000000000000,2 +np.float64,0x7fee08459f7c108a,0x3ff0000000000000,2 +np.float64,0x3fed705165fae0a3,0x3fe73a66369c5700,2 +np.float64,0x7fd0e63f4da1cc7e,0x3ff0000000000000,2 +np.float64,0xffd1a40c2ea34818,0xbff0000000000000,2 +np.float64,0xbfa369795c26d2f0,0xbfa36718218d46b3,2 +np.float64,0xef70b9f5dee17,0xef70b9f5dee17,2 +np.float64,0x3fb50a0a6e2a1410,0x3fb4fdf27724560a,2 +np.float64,0x7fe30a0f6166141e,0x3ff0000000000000,2 +np.float64,0xbfd7b3ca7daf6794,0xbfd6accb81032b2d,2 +np.float64,0x3fc21dceb3243b9d,0x3fc1ff15d5d277a3,2 +np.float64,0x3fe483e445a907c9,0x3fe219ca0e269552,2 +np.float64,0x3fb2b1e2a22563c0,0x3fb2a96554900eaf,2 +np.float64,0x4b1ff6409641,0x4b1ff6409641,2 +np.float64,0xbfd92eabc9b25d58,0xbfd7f55d7776d64e,2 +np.float64,0x8003b8604c8770c1,0x8003b8604c8770c1,2 +np.float64,0x800d20a9df1a4154,0x800d20a9df1a4154,2 +np.float64,0xecf8a535d9f15,0xecf8a535d9f15,2 +np.float64,0x3fe92d15bab25a2b,0x3fe50296aa15ae85,2 +np.float64,0x800239c205a47385,0x800239c205a47385,2 +np.float64,0x3fc48664a9290cc8,0x3fc459d126320ef6,2 +np.float64,0x3fe7620625eec40c,0x3fe3f3bcbee3e8c6,2 +np.float64,0x3fd242ff4ca48600,0x3fd1c81ed7a971c8,2 +np.float64,0xbfe39bafcfa73760,0xbfe17959c7a279db,2 +np.float64,0x7fdcd2567239a4ac,0x3ff0000000000000,2 +np.float64,0x3fe5f2f292ebe5e6,0x3fe30d12f05e2752,2 +np.float64,0x7fda3819d1347033,0x3ff0000000000000,2 +np.float64,0xffca5b4d4334b69c,0xbff0000000000000,2 +np.float64,0xb8a2b7cd71457,0xb8a2b7cd71457,2 +np.float64,0x3fee689603fcd12c,0x3fe7ad4ace26d6dd,2 +np.float64,0x7fe26541a564ca82,0x3ff0000000000000,2 +np.float64,0x3fe6912ee66d225e,0x3fe3720d242c4d82,2 +np.float64,0xffe6580c75ecb018,0xbff0000000000000,2 +np.float64,0x7fe01a3370603466,0x3ff0000000000000,2 +np.float64,0xffe84e3f84b09c7e,0xbff0000000000000,2 +np.float64,0x3ff0000000000000,0x3fe85efab514f394,2 +np.float64,0x3fe214d4266429a8,0x3fe05fec03a3c247,2 +np.float64,0x3fd00aec5da015d8,0x3fcf6e070ad4ad62,2 +np.float64,0x800aac8631f5590d,0x800aac8631f5590d,2 +np.float64,0xbfe7c4f5f76f89ec,0xbfe42fc1c57b4a13,2 +np.float64,0xaf146c7d5e28e,0xaf146c7d5e28e,2 +np.float64,0xbfe57188b66ae312,0xbfe2b8be4615ef75,2 +np.float64,0xffef8cb8e1ff1971,0xbff0000000000000,2 +np.float64,0x8001daf8aa63b5f2,0x8001daf8aa63b5f2,2 +np.float64,0x3fdddcc339bbb986,0x3fdbde5f3783538b,2 +np.float64,0xdd8c92c3bb193,0xdd8c92c3bb193,2 +np.float64,0xbfe861a148f0c342,0xbfe48cf1d228a336,2 +np.float64,0xffe260a32e24c146,0xbff0000000000000,2 +np.float64,0x1f7474b43ee8f,0x1f7474b43ee8f,2 +np.float64,0x3fe81dbd89703b7c,0x3fe464d78df92b7b,2 +np.float64,0x7fed0101177a0201,0x3ff0000000000000,2 +np.float64,0x7fd8b419a8316832,0x3ff0000000000000,2 +np.float64,0x3fe93debccf27bd8,0x3fe50c27727917f0,2 +np.float64,0xe5ead05bcbd5a,0xe5ead05bcbd5a,2 +np.float64,0xbfebbbc4cff7778a,0xbfe663c4ca003bbf,2 +np.float64,0xbfea343eb474687e,0xbfe59529f73ea151,2 +np.float64,0x3fbe74a5963ce94b,0x3fbe50123ed05d8d,2 +np.float64,0x3fd31d3a5d263a75,0x3fd290c026cb38a5,2 +np.float64,0xbfd79908acaf3212,0xbfd695620e31c3c6,2 +np.float64,0xbfc26a350324d46c,0xbfc249f335f3e465,2 +np.float64,0xbfac38d5583871b0,0xbfac31866d12a45e,2 +np.float64,0x3fe40cea672819d5,0x3fe1c83754e72c92,2 +np.float64,0xbfa74770642e8ee0,0xbfa74355fcf67332,2 +np.float64,0x7fc60942d32c1285,0x3ff0000000000000,2 diff --git a/local_packages/numpy/_core/tests/examples/cython/checks.pyx b/local_packages/numpy/_core/tests/examples/cython/checks.pyx new file mode 100644 index 0000000..a263795 --- /dev/null +++ b/local_packages/numpy/_core/tests/examples/cython/checks.pyx @@ -0,0 +1,381 @@ +#cython: language_level=3 + +""" +Functions in this module give python-space wrappers for cython functions +exposed in numpy/__init__.pxd, so they can be tested in test_cython.py +""" +import numpy as np +cimport numpy as cnp +cnp.import_array() + + +def is_td64(obj): + return cnp.is_timedelta64_object(obj) + + +def is_dt64(obj): + return cnp.is_datetime64_object(obj) + + +def get_dt64_value(obj): + return cnp.get_datetime64_value(obj) + + +def get_td64_value(obj): + return cnp.get_timedelta64_value(obj) + + +def get_dt64_unit(obj): + return cnp.get_datetime64_unit(obj) + + +def is_integer(obj): + return isinstance(obj, (cnp.integer, int)) + + +def get_datetime_iso_8601_strlen(): + return cnp.get_datetime_iso_8601_strlen(0, cnp.NPY_FR_ns) + + +def convert_datetime64_to_datetimestruct(): + cdef: + cnp.npy_datetimestruct dts + cnp.PyArray_DatetimeMetaData meta + cnp.int64_t value = 1647374515260292 + # i.e. (time.time() * 10**6) at 2022-03-15 20:01:55.260292 UTC + + meta.base = cnp.NPY_FR_us + meta.num = 1 + cnp.convert_datetime64_to_datetimestruct(&meta, value, &dts) + return dts + + +def make_iso_8601_datetime(dt: "datetime"): + cdef: + cnp.npy_datetimestruct dts + char result[36] # 36 corresponds to NPY_FR_s passed below + int local = 0 + int utc = 0 + int tzoffset = 0 + + dts.year = dt.year + dts.month = dt.month + dts.day = dt.day + dts.hour = dt.hour + dts.min = dt.minute + dts.sec = dt.second + dts.us = dt.microsecond + dts.ps = dts.as = 0 + + cnp.make_iso_8601_datetime( + &dts, + result, + sizeof(result), + local, + utc, + cnp.NPY_FR_s, + tzoffset, + cnp.NPY_NO_CASTING, + ) + return result + + +cdef cnp.broadcast multiiter_from_broadcast_obj(object bcast): + cdef dict iter_map = { + 1: cnp.PyArray_MultiIterNew1, + 2: cnp.PyArray_MultiIterNew2, + 3: cnp.PyArray_MultiIterNew3, + 4: cnp.PyArray_MultiIterNew4, + 5: cnp.PyArray_MultiIterNew5, + } + arrays = [x.base for x in bcast.iters] + cdef cnp.broadcast result = iter_map[len(arrays)](*arrays) + return result + + +def get_multiiter_size(bcast: "broadcast"): + cdef cnp.broadcast multi = multiiter_from_broadcast_obj(bcast) + return multi.size + + +def get_multiiter_number_of_dims(bcast: "broadcast"): + cdef cnp.broadcast multi = multiiter_from_broadcast_obj(bcast) + return multi.nd + + +def get_multiiter_current_index(bcast: "broadcast"): + cdef cnp.broadcast multi = multiiter_from_broadcast_obj(bcast) + return multi.index + + +def get_multiiter_num_of_iterators(bcast: "broadcast"): + cdef cnp.broadcast multi = multiiter_from_broadcast_obj(bcast) + return multi.numiter + + +def get_multiiter_shape(bcast: "broadcast"): + cdef cnp.broadcast multi = multiiter_from_broadcast_obj(bcast) + return tuple([multi.dimensions[i] for i in range(bcast.nd)]) + + +def get_multiiter_iters(bcast: "broadcast"): + cdef cnp.broadcast multi = multiiter_from_broadcast_obj(bcast) + return tuple([multi.iters[i] for i in range(bcast.numiter)]) + + +def get_default_integer(): + if cnp.NPY_DEFAULT_INT == cnp.NPY_LONG: + return cnp.dtype("long") + if cnp.NPY_DEFAULT_INT == cnp.NPY_INTP: + return cnp.dtype("intp") + return None + +def get_ravel_axis(): + return cnp.NPY_RAVEL_AXIS + + +def conv_intp(cnp.intp_t val): + return val + + +def get_dtype_flags(cnp.dtype dtype): + return dtype.flags + + +cdef cnp.NpyIter* npyiter_from_nditer_obj(object it): + """A function to create a NpyIter struct from a nditer object. + + This function is only meant for testing purposes and only extracts the + necessary info from nditer to test the functionality of NpyIter methods + """ + cdef: + cnp.NpyIter* cit + cnp.PyArray_Descr* op_dtypes[3] + cnp.npy_uint32 op_flags[3] + cnp.PyArrayObject* ops[3] + cnp.npy_uint32 flags = 0 + + if it.has_index: + flags |= cnp.NPY_ITER_C_INDEX + if it.has_delayed_bufalloc: + flags |= cnp.NPY_ITER_BUFFERED | cnp.NPY_ITER_DELAY_BUFALLOC + if it.has_multi_index: + flags |= cnp.NPY_ITER_MULTI_INDEX + + # one of READWRITE, READONLY and WRTIEONLY at the minimum must be specified for op_flags + for i in range(it.nop): + op_flags[i] = cnp.NPY_ITER_READONLY + + for i in range(it.nop): + op_dtypes[i] = cnp.PyArray_DESCR(it.operands[i]) + ops[i] = it.operands[i] + + cit = cnp.NpyIter_MultiNew(it.nop, &ops[0], flags, cnp.NPY_KEEPORDER, + cnp.NPY_NO_CASTING, &op_flags[0], + NULL) + return cit + + +def get_npyiter_size(it: "nditer"): + cdef cnp.NpyIter* cit = npyiter_from_nditer_obj(it) + result = cnp.NpyIter_GetIterSize(cit) + cnp.NpyIter_Deallocate(cit) + return result + + +def get_npyiter_ndim(it: "nditer"): + cdef cnp.NpyIter* cit = npyiter_from_nditer_obj(it) + result = cnp.NpyIter_GetNDim(cit) + cnp.NpyIter_Deallocate(cit) + return result + + +def get_npyiter_nop(it: "nditer"): + cdef cnp.NpyIter* cit = npyiter_from_nditer_obj(it) + result = cnp.NpyIter_GetNOp(cit) + cnp.NpyIter_Deallocate(cit) + return result + + +def get_npyiter_operands(it: "nditer"): + cdef cnp.NpyIter* cit = npyiter_from_nditer_obj(it) + try: + arr = cnp.NpyIter_GetOperandArray(cit) + return tuple([arr[i] for i in range(it.nop)]) + finally: + cnp.NpyIter_Deallocate(cit) + + +def get_npyiter_itviews(it: "nditer"): + cdef cnp.NpyIter* cit = npyiter_from_nditer_obj(it) + result = tuple([cnp.NpyIter_GetIterView(cit, i) for i in range(it.nop)]) + cnp.NpyIter_Deallocate(cit) + return result + + +def get_npyiter_dtypes(it: "nditer"): + cdef cnp.NpyIter* cit = npyiter_from_nditer_obj(it) + try: + arr = cnp.NpyIter_GetDescrArray(cit) + return tuple([arr[i] for i in range(it.nop)]) + finally: + cnp.NpyIter_Deallocate(cit) + + +def npyiter_has_delayed_bufalloc(it: "nditer"): + cdef cnp.NpyIter* cit = npyiter_from_nditer_obj(it) + result = cnp.NpyIter_HasDelayedBufAlloc(cit) + cnp.NpyIter_Deallocate(cit) + return result + + +def npyiter_has_index(it: "nditer"): + cdef cnp.NpyIter* cit = npyiter_from_nditer_obj(it) + result = cnp.NpyIter_HasIndex(cit) + cnp.NpyIter_Deallocate(cit) + return result + + +def npyiter_has_multi_index(it: "nditer"): + cdef cnp.NpyIter* cit = npyiter_from_nditer_obj(it) + result = cnp.NpyIter_HasMultiIndex(cit) + cnp.NpyIter_Deallocate(cit) + return result + + +def test_get_multi_index_iter_next(it: "nditer", cnp.ndarray[cnp.float64_t, ndim=2] arr): + cdef cnp.NpyIter* cit = npyiter_from_nditer_obj(it) + cdef cnp.NpyIter_GetMultiIndexFunc _get_multi_index = \ + cnp.NpyIter_GetGetMultiIndex(cit, NULL) + cdef cnp.NpyIter_IterNextFunc _iternext = \ + cnp.NpyIter_GetIterNext(cit, NULL) + cnp.NpyIter_Deallocate(cit) + return 1 + + +def npyiter_has_finished(it: "nditer"): + cdef cnp.NpyIter* cit + try: + cit = npyiter_from_nditer_obj(it) + cnp.NpyIter_GotoIterIndex(cit, it.index) + return not (cnp.NpyIter_GetIterIndex(cit) < cnp.NpyIter_GetIterSize(cit)) + finally: + cnp.NpyIter_Deallocate(cit) + +def compile_fillwithbyte(): + # Regression test for gh-25878, mostly checks it compiles. + cdef cnp.npy_intp dims[2] + dims = (1, 2) + pos = cnp.PyArray_ZEROS(2, dims, cnp.NPY_UINT8, 0) + cnp.PyArray_FILLWBYTE(pos, 1) + return pos + +def inc2_cfloat_struct(cnp.ndarray[cnp.cfloat_t] arr): + # This works since we compile in C mode, it will fail in cpp mode + arr[1].real += 1 + arr[1].imag += 1 + # This works in both modes + arr[1].real = arr[1].real + 1 + arr[1].imag = arr[1].imag + 1 + + +def npystring_pack(arr): + cdef char *string = "Hello world" + cdef size_t size = 11 + + allocator = cnp.NpyString_acquire_allocator( + cnp.PyArray_DESCR(arr) + ) + + # copy string->packed_string, the pointer to the underlying array buffer + ret = cnp.NpyString_pack( + allocator, cnp.PyArray_DATA(arr), string, size, + ) + + cnp.NpyString_release_allocator(allocator) + return ret + + +def npystring_load(arr): + allocator = cnp.NpyString_acquire_allocator( + cnp.PyArray_DESCR(arr) + ) + + cdef cnp.npy_static_string sdata + sdata.size = 0 + sdata.buf = NULL + + cdef cnp.npy_packed_static_string *packed_string = cnp.PyArray_DATA(arr) + cdef int is_null = cnp.NpyString_load(allocator, packed_string, &sdata) + cnp.NpyString_release_allocator(allocator) + if is_null == -1: + raise ValueError("String unpacking failed.") + elif is_null == 1: + # String in the array buffer is the null string + return "" + else: + # Cython syntax for copying a c string to python bytestring: + # slice the char * by the length of the string + return sdata.buf[:sdata.size].decode('utf-8') + + +def npystring_pack_multiple(arr1, arr2): + cdef cnp.npy_string_allocator *allocators[2] + cdef cnp.PyArray_Descr *descrs[2] + descrs[0] = cnp.PyArray_DESCR(arr1) + descrs[1] = cnp.PyArray_DESCR(arr2) + + cnp.NpyString_acquire_allocators(2, descrs, allocators) + + # Write into the first element of each array + cdef int ret1 = cnp.NpyString_pack( + allocators[0], cnp.PyArray_DATA(arr1), "Hello world", 11, + ) + cdef int ret2 = cnp.NpyString_pack( + allocators[1], cnp.PyArray_DATA(arr2), "test this", 9, + ) + + # Write a null string into the last element + cdef cnp.npy_intp elsize = cnp.PyArray_ITEMSIZE(arr1) + cdef int ret3 = cnp.NpyString_pack_null( + allocators[0], + (cnp.PyArray_DATA(arr1) + 2*elsize), + ) + + cnp.NpyString_release_allocators(2, allocators) + if ret1 == -1 or ret2 == -1 or ret3 == -1: + return -1 + + return 0 + + +def npystring_allocators_other_types(arr1, arr2): + cdef cnp.npy_string_allocator *allocators[2] + cdef cnp.PyArray_Descr *descrs[2] + descrs[0] = cnp.PyArray_DESCR(arr1) + descrs[1] = cnp.PyArray_DESCR(arr2) + + cnp.NpyString_acquire_allocators(2, descrs, allocators) + + # None of the dtypes here are StringDType, so every allocator + # should be NULL upon acquisition. + cdef int ret = 0 + for allocator in allocators: + if allocator != NULL: + ret = -1 + break + + cnp.NpyString_release_allocators(2, allocators) + return ret + + +def check_npy_uintp_type_enum(): + # Regression test for gh-27890: cnp.NPY_UINTP was not defined. + # Cython would fail to compile this before gh-27890 was fixed. + return cnp.NPY_UINTP > 0 + + +def resize_refcheck_test(): + # see gh-30991 + a = np.array([[0, 1], [2, 3]], order='C') + a.resize((2, 1)) diff --git a/local_packages/numpy/_core/tests/examples/cython/meson.build b/local_packages/numpy/_core/tests/examples/cython/meson.build new file mode 100644 index 0000000..8362c33 --- /dev/null +++ b/local_packages/numpy/_core/tests/examples/cython/meson.build @@ -0,0 +1,43 @@ +project('checks', 'c', 'cython') + +py = import('python').find_installation(pure: false) + +cc = meson.get_compiler('c') +cy = meson.get_compiler('cython') + +# Keep synced with pyproject.toml +if not cy.version().version_compare('>=3.0.6') + error('tests requires Cython >= 3.0.6') +endif + +cython_args = [] +if cy.version().version_compare('>=3.1.0') + cython_args += ['-Xfreethreading_compatible=True'] +endif + +npy_include_path = run_command(py, [ + '-c', + 'import os; os.chdir(".."); import numpy; print(os.path.abspath(numpy.get_include()))' + ], check: true).stdout().strip() + +npy_path = run_command(py, [ + '-c', + 'import os; os.chdir(".."); import numpy; print(os.path.dirname(numpy.__file__).removesuffix("numpy"))' + ], check: true).stdout().strip() + +# TODO: This is a hack due to gh-25135, where cython may not find the right +# __init__.pyd file. +add_project_arguments('-I', npy_path, language : 'cython') + +py.extension_module( + 'checks', + 'checks.pyx', + install: false, + c_args: [ + '-DNPY_NO_DEPRECATED_API=0', # Cython still uses old NumPy C API + # Require 1.25+ to test datetime additions + '-DNPY_TARGET_VERSION=NPY_2_0_API_VERSION', + ], + include_directories: [npy_include_path], + cython_args: cython_args, +) diff --git a/local_packages/numpy/_core/tests/examples/cython/setup.py b/local_packages/numpy/_core/tests/examples/cython/setup.py new file mode 100644 index 0000000..eb57477 --- /dev/null +++ b/local_packages/numpy/_core/tests/examples/cython/setup.py @@ -0,0 +1,39 @@ +""" +Provide python-space access to the functions exposed in numpy/__init__.pxd +for testing. +""" + +import os +from distutils.core import setup + +import Cython +from Cython.Build import cythonize +from setuptools.extension import Extension + +import numpy as np +from numpy._utils import _pep440 + +macros = [ + ("NPY_NO_DEPRECATED_API", 0), + # Require 1.25+ to test datetime additions + ("NPY_TARGET_VERSION", "NPY_2_0_API_VERSION"), +] + +checks = Extension( + "checks", + sources=[os.path.join('.', "checks.pyx")], + include_dirs=[np.get_include()], + define_macros=macros, +) + +extensions = [checks] + +compiler_directives = {} +if _pep440.parse(Cython.__version__) >= _pep440.parse("3.1.0a0"): + compiler_directives['freethreading_compatible'] = True + +setup( + ext_modules=cythonize( + extensions, + compiler_directives=compiler_directives) +) diff --git a/local_packages/numpy/_core/tests/examples/limited_api/limited_api1.c b/local_packages/numpy/_core/tests/examples/limited_api/limited_api1.c new file mode 100644 index 0000000..115a3f3 --- /dev/null +++ b/local_packages/numpy/_core/tests/examples/limited_api/limited_api1.c @@ -0,0 +1,15 @@ +#include +#include +#include + +static PyModuleDef moduledef = { + .m_base = PyModuleDef_HEAD_INIT, + .m_name = "limited_api1" +}; + +PyMODINIT_FUNC PyInit_limited_api1(void) +{ + import_array(); + import_umath(); + return PyModule_Create(&moduledef); +} diff --git a/local_packages/numpy/_core/tests/examples/limited_api/limited_api2.pyx b/local_packages/numpy/_core/tests/examples/limited_api/limited_api2.pyx new file mode 100644 index 0000000..327d5b0 --- /dev/null +++ b/local_packages/numpy/_core/tests/examples/limited_api/limited_api2.pyx @@ -0,0 +1,11 @@ +#cython: language_level=3 + +""" +Make sure cython can compile in limited API mode (see meson.build) +""" + +cdef extern from "numpy/arrayobject.h": + pass +cdef extern from "numpy/arrayscalars.h": + pass + diff --git a/local_packages/numpy/_core/tests/examples/limited_api/limited_api_latest.c b/local_packages/numpy/_core/tests/examples/limited_api/limited_api_latest.c new file mode 100644 index 0000000..92d83ea --- /dev/null +++ b/local_packages/numpy/_core/tests/examples/limited_api/limited_api_latest.c @@ -0,0 +1,19 @@ +#include +#include +#include + +#if Py_LIMITED_API != PY_VERSION_HEX & 0xffff0000 + # error "Py_LIMITED_API not defined to Python major+minor version" +#endif + +static PyModuleDef moduledef = { + .m_base = PyModuleDef_HEAD_INIT, + .m_name = "limited_api_latest" +}; + +PyMODINIT_FUNC PyInit_limited_api_latest(void) +{ + import_array(); + import_umath(); + return PyModule_Create(&moduledef); +} diff --git a/local_packages/numpy/_core/tests/examples/limited_api/meson.build b/local_packages/numpy/_core/tests/examples/limited_api/meson.build new file mode 100644 index 0000000..2348b08 --- /dev/null +++ b/local_packages/numpy/_core/tests/examples/limited_api/meson.build @@ -0,0 +1,63 @@ +project( + 'checks', + 'c', 'cython', + meson_version: '>=1.8.3', +) + +py = import('python').find_installation(pure: false) + +cc = meson.get_compiler('c') +cy = meson.get_compiler('cython') + +# Keep synced with pyproject.toml +if not cy.version().version_compare('>=3.0.6') + error('tests requires Cython >= 3.0.6') +endif + +npy_include_path = run_command(py, [ + '-c', + 'import os; os.chdir(".."); import numpy; print(os.path.abspath(numpy.get_include()))' + ], check: true).stdout().strip() + +npy_path = run_command(py, [ + '-c', + 'import os; os.chdir(".."); import numpy; print(os.path.dirname(numpy.__file__).removesuffix("numpy"))' + ], check: true).stdout().strip() + +# TODO: This is a hack due to https://github.com/cython/cython/issues/5820, +# where cython may not find the right __init__.pyd file. +add_project_arguments('-I', npy_path, language : 'cython') + +py.extension_module( + 'limited_api1', + 'limited_api1.c', + c_args: [ + '-DNPY_NO_DEPRECATED_API=NPY_1_21_API_VERSION', + ], + include_directories: [npy_include_path], + limited_api: '3.9', +) + +py.extension_module( + 'limited_api_latest', + 'limited_api_latest.c', + c_args: [ + '-DNPY_NO_DEPRECATED_API=NPY_1_21_API_VERSION', + ], + include_directories: [npy_include_path], + limited_api: py.language_version(), +) + +py.extension_module( + 'limited_api2', + 'limited_api2.pyx', + install: false, + c_args: [ + '-DNPY_NO_DEPRECATED_API=0', + # Require 1.25+ to test datetime additions + '-DNPY_TARGET_VERSION=NPY_2_0_API_VERSION', + '-DCYTHON_LIMITED_API=1', + ], + include_directories: [npy_include_path], + limited_api: '3.9', +) diff --git a/local_packages/numpy/_core/tests/examples/limited_api/setup.py b/local_packages/numpy/_core/tests/examples/limited_api/setup.py new file mode 100644 index 0000000..16adcd1 --- /dev/null +++ b/local_packages/numpy/_core/tests/examples/limited_api/setup.py @@ -0,0 +1,24 @@ +""" +Build an example package using the limited Python C API. +""" + +import os + +from setuptools import Extension, setup + +import numpy as np + +macros = [("NPY_NO_DEPRECATED_API", 0), ("Py_LIMITED_API", "0x03060000")] + +limited_api = Extension( + "limited_api", + sources=[os.path.join('.', "limited_api.c")], + include_dirs=[np.get_include()], + define_macros=macros, +) + +extensions = [limited_api] + +setup( + ext_modules=extensions +) diff --git a/local_packages/numpy/_core/tests/test__exceptions.py b/local_packages/numpy/_core/tests/test__exceptions.py new file mode 100644 index 0000000..35782e7 --- /dev/null +++ b/local_packages/numpy/_core/tests/test__exceptions.py @@ -0,0 +1,90 @@ +""" +Tests of the ._exceptions module. Primarily for exercising the __str__ methods. +""" + +import pickle + +import pytest + +import numpy as np +from numpy.exceptions import AxisError + +_ArrayMemoryError = np._core._exceptions._ArrayMemoryError +_UFuncNoLoopError = np._core._exceptions._UFuncNoLoopError + +class TestArrayMemoryError: + def test_pickling(self): + """ Test that _ArrayMemoryError can be pickled """ + error = _ArrayMemoryError((1023,), np.dtype(np.uint8)) + res = pickle.loads(pickle.dumps(error)) + assert res._total_size == error._total_size + + def test_str(self): + e = _ArrayMemoryError((1023,), np.dtype(np.uint8)) + str(e) # not crashing is enough + + # testing these properties is easier than testing the full string repr + def test__size_to_string(self): + """ Test e._size_to_string """ + f = _ArrayMemoryError._size_to_string + Ki = 1024 + assert f(0) == '0 bytes' + assert f(1) == '1 bytes' + assert f(1023) == '1023 bytes' + assert f(Ki) == '1.00 KiB' + assert f(Ki + 1) == '1.00 KiB' + assert f(10 * Ki) == '10.0 KiB' + assert f(int(999.4 * Ki)) == '999. KiB' + assert f(int(1023.4 * Ki)) == '1023. KiB' + assert f(int(1023.5 * Ki)) == '1.00 MiB' + assert f(Ki * Ki) == '1.00 MiB' + + # 1023.9999 Mib should round to 1 GiB + assert f(int(Ki * Ki * Ki * 0.9999)) == '1.00 GiB' + assert f(Ki * Ki * Ki * Ki * Ki * Ki) == '1.00 EiB' + # larger than sys.maxsize, adding larger prefixes isn't going to help + # anyway. + assert f(Ki * Ki * Ki * Ki * Ki * Ki * 123456) == '123456. EiB' + + def test__total_size(self): + """ Test e._total_size """ + e = _ArrayMemoryError((1,), np.dtype(np.uint8)) + assert e._total_size == 1 + + e = _ArrayMemoryError((2, 4), np.dtype((np.uint64, 16))) + assert e._total_size == 1024 + + +class TestUFuncNoLoopError: + def test_pickling(self): + """ Test that _UFuncNoLoopError can be pickled """ + assert isinstance(pickle.dumps(_UFuncNoLoopError), bytes) + + +@pytest.mark.parametrize("args", [ + (2, 1, None), + (2, 1, "test_prefix"), + ("test message",), +]) +class TestAxisError: + def test_attr(self, args): + """Validate attribute types.""" + exc = AxisError(*args) + if len(args) == 1: + assert exc.axis is None + assert exc.ndim is None + else: + axis, ndim, *_ = args + assert exc.axis == axis + assert exc.ndim == ndim + + def test_pickling(self, args): + """Test that `AxisError` can be pickled.""" + exc = AxisError(*args) + exc2 = pickle.loads(pickle.dumps(exc)) + + assert type(exc) is type(exc2) + for name in ("axis", "ndim", "args"): + attr1 = getattr(exc, name) + attr2 = getattr(exc2, name) + assert attr1 == attr2, name diff --git a/local_packages/numpy/_core/tests/test_abc.py b/local_packages/numpy/_core/tests/test_abc.py new file mode 100644 index 0000000..aee1904 --- /dev/null +++ b/local_packages/numpy/_core/tests/test_abc.py @@ -0,0 +1,54 @@ +import numbers + +import numpy as np +from numpy._core.numerictypes import sctypes +from numpy.testing import assert_ + + +class TestABC: + def test_abstract(self): + assert_(issubclass(np.number, numbers.Number)) + + assert_(issubclass(np.inexact, numbers.Complex)) + assert_(issubclass(np.complexfloating, numbers.Complex)) + assert_(issubclass(np.floating, numbers.Real)) + + assert_(issubclass(np.integer, numbers.Integral)) + assert_(issubclass(np.signedinteger, numbers.Integral)) + assert_(issubclass(np.unsignedinteger, numbers.Integral)) + + def test_floats(self): + for t in sctypes['float']: + assert_(isinstance(t(), numbers.Real), + f"{t.__name__} is not instance of Real") + assert_(issubclass(t, numbers.Real), + f"{t.__name__} is not subclass of Real") + assert_(not isinstance(t(), numbers.Rational), + f"{t.__name__} is instance of Rational") + assert_(not issubclass(t, numbers.Rational), + f"{t.__name__} is subclass of Rational") + + def test_complex(self): + for t in sctypes['complex']: + assert_(isinstance(t(), numbers.Complex), + f"{t.__name__} is not instance of Complex") + assert_(issubclass(t, numbers.Complex), + f"{t.__name__} is not subclass of Complex") + assert_(not isinstance(t(), numbers.Real), + f"{t.__name__} is instance of Real") + assert_(not issubclass(t, numbers.Real), + f"{t.__name__} is subclass of Real") + + def test_int(self): + for t in sctypes['int']: + assert_(isinstance(t(), numbers.Integral), + f"{t.__name__} is not instance of Integral") + assert_(issubclass(t, numbers.Integral), + f"{t.__name__} is not subclass of Integral") + + def test_uint(self): + for t in sctypes['uint']: + assert_(isinstance(t(), numbers.Integral), + f"{t.__name__} is not instance of Integral") + assert_(issubclass(t, numbers.Integral), + f"{t.__name__} is not subclass of Integral") diff --git a/local_packages/numpy/_core/tests/test_api.py b/local_packages/numpy/_core/tests/test_api.py new file mode 100644 index 0000000..216a2c7 --- /dev/null +++ b/local_packages/numpy/_core/tests/test_api.py @@ -0,0 +1,655 @@ +import sys + +import pytest + +import numpy as np +import numpy._core.umath as ncu +from numpy._core._rational_tests import rational +from numpy.lib import stride_tricks +from numpy.testing import ( + HAS_REFCOUNT, + assert_, + assert_array_equal, + assert_equal, + assert_raises, +) + + +def test_array_array(): + tobj = type(object) + ones11 = np.ones((1, 1), np.float64) + tndarray = type(ones11) + # Test is_ndarray + assert_equal(np.array(ones11, dtype=np.float64), ones11) + if HAS_REFCOUNT: + old_refcount = sys.getrefcount(tndarray) + np.array(ones11) + assert_equal(old_refcount, sys.getrefcount(tndarray)) + + # test None + assert_equal(np.array(None, dtype=np.float64), + np.array(np.nan, dtype=np.float64)) + if HAS_REFCOUNT: + old_refcount = sys.getrefcount(tobj) + np.array(None, dtype=np.float64) + assert_equal(old_refcount, sys.getrefcount(tobj)) + + # test scalar + assert_equal(np.array(1.0, dtype=np.float64), + np.ones((), dtype=np.float64)) + if HAS_REFCOUNT: + old_refcount = sys.getrefcount(np.float64) + np.array(np.array(1.0, dtype=np.float64), dtype=np.float64) + assert_equal(old_refcount, sys.getrefcount(np.float64)) + + # test string + S2 = np.dtype((bytes, 2)) + S3 = np.dtype((bytes, 3)) + S5 = np.dtype((bytes, 5)) + assert_equal(np.array(b"1.0", dtype=np.float64), + np.ones((), dtype=np.float64)) + assert_equal(np.array(b"1.0").dtype, S3) + assert_equal(np.array(b"1.0", dtype=bytes).dtype, S3) + assert_equal(np.array(b"1.0", dtype=S2), np.array(b"1.")) + assert_equal(np.array(b"1", dtype=S5), np.ones((), dtype=S5)) + + # test string + U2 = np.dtype((str, 2)) + U3 = np.dtype((str, 3)) + U5 = np.dtype((str, 5)) + assert_equal(np.array("1.0", dtype=np.float64), + np.ones((), dtype=np.float64)) + assert_equal(np.array("1.0").dtype, U3) + assert_equal(np.array("1.0", dtype=str).dtype, U3) + assert_equal(np.array("1.0", dtype=U2), np.array("1.")) + assert_equal(np.array("1", dtype=U5), np.ones((), dtype=U5)) + + builtins = getattr(__builtins__, '__dict__', __builtins__) + assert_(hasattr(builtins, 'get')) + + # test memoryview + dat = np.array(memoryview(b'1.0'), dtype=np.float64) + assert_equal(dat, [49.0, 46.0, 48.0]) + assert_(dat.dtype.type is np.float64) + + dat = np.array(memoryview(b'1.0')) + assert_equal(dat, [49, 46, 48]) + assert_(dat.dtype.type is np.uint8) + + # test array interface + a = np.array(100.0, dtype=np.float64) + o = type("o", (object,), + {"__array_interface__": a.__array_interface__}) + assert_equal(np.array(o, dtype=np.float64), a) + + # test array_struct interface + a = np.array([(1, 4.0, 'Hello'), (2, 6.0, 'World')], + dtype=[('f0', int), ('f1', float), ('f2', str)]) + o = type("o", (object,), + {"__array_struct__": a.__array_struct__}) + # wasn't what I expected... is np.array(o) supposed to equal a ? + # instead we get an array([...], dtype=">V18") + assert_equal(bytes(np.array(o).data), bytes(a.data)) + + # test __array__ + def custom__array__(self, dtype=None, copy=None): + return np.array(100.0, dtype=dtype, copy=copy) + + o = type("o", (object,), {"__array__": custom__array__})() + assert_equal(np.array(o, dtype=np.float64), np.array(100.0, np.float64)) + + # test recursion + nested = 1.5 + for i in range(ncu.MAXDIMS): + nested = [nested] + + # no error + np.array(nested) + + # Exceeds recursion limit + assert_raises(ValueError, np.array, [nested], dtype=np.float64) + + # Try with lists... + # float32 + assert_equal(np.array([None] * 10, dtype=np.float32), + np.full((10,), np.nan, dtype=np.float32)) + assert_equal(np.array([[None]] * 10, dtype=np.float32), + np.full((10, 1), np.nan, dtype=np.float32)) + assert_equal(np.array([[None] * 10], dtype=np.float32), + np.full((1, 10), np.nan, dtype=np.float32)) + assert_equal(np.array([[None] * 10] * 10, dtype=np.float32), + np.full((10, 10), np.nan, dtype=np.float32)) + # float64 + assert_equal(np.array([None] * 10, dtype=np.float64), + np.full((10,), np.nan, dtype=np.float64)) + assert_equal(np.array([[None]] * 10, dtype=np.float64), + np.full((10, 1), np.nan, dtype=np.float64)) + assert_equal(np.array([[None] * 10], dtype=np.float64), + np.full((1, 10), np.nan, dtype=np.float64)) + assert_equal(np.array([[None] * 10] * 10, dtype=np.float64), + np.full((10, 10), np.nan, dtype=np.float64)) + + assert_equal(np.array([1.0] * 10, dtype=np.float64), + np.ones((10,), dtype=np.float64)) + assert_equal(np.array([[1.0]] * 10, dtype=np.float64), + np.ones((10, 1), dtype=np.float64)) + assert_equal(np.array([[1.0] * 10], dtype=np.float64), + np.ones((1, 10), dtype=np.float64)) + assert_equal(np.array([[1.0] * 10] * 10, dtype=np.float64), + np.ones((10, 10), dtype=np.float64)) + + # Try with tuples + assert_equal(np.array((None,) * 10, dtype=np.float64), + np.full((10,), np.nan, dtype=np.float64)) + assert_equal(np.array([(None,)] * 10, dtype=np.float64), + np.full((10, 1), np.nan, dtype=np.float64)) + assert_equal(np.array([(None,) * 10], dtype=np.float64), + np.full((1, 10), np.nan, dtype=np.float64)) + assert_equal(np.array([(None,) * 10] * 10, dtype=np.float64), + np.full((10, 10), np.nan, dtype=np.float64)) + + assert_equal(np.array((1.0,) * 10, dtype=np.float64), + np.ones((10,), dtype=np.float64)) + assert_equal(np.array([(1.0,)] * 10, dtype=np.float64), + np.ones((10, 1), dtype=np.float64)) + assert_equal(np.array([(1.0,) * 10], dtype=np.float64), + np.ones((1, 10), dtype=np.float64)) + assert_equal(np.array([(1.0,) * 10] * 10, dtype=np.float64), + np.ones((10, 10), dtype=np.float64)) + + +@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") +def test___array___refcount(): + class MyArray: + def __init__(self, dtype): + self.val = np.array(-1, dtype=dtype) + + def __array__(self, dtype=None, copy=None): + return self.val.__array__(dtype=dtype, copy=copy) + + # test all possible scenarios: + # dtype(none | same | different) x copy(true | false | none) + dt = np.dtype(np.int32) + old_refcount = sys.getrefcount(dt) + np.array(MyArray(dt)) + assert_equal(old_refcount, sys.getrefcount(dt)) + np.array(MyArray(dt), dtype=dt) + assert_equal(old_refcount, sys.getrefcount(dt)) + np.array(MyArray(dt), copy=None) + assert_equal(old_refcount, sys.getrefcount(dt)) + np.array(MyArray(dt), dtype=dt, copy=None) + assert_equal(old_refcount, sys.getrefcount(dt)) + dt2 = np.dtype(np.int16) + old_refcount2 = sys.getrefcount(dt2) + np.array(MyArray(dt), dtype=dt2) + assert_equal(old_refcount2, sys.getrefcount(dt2)) + np.array(MyArray(dt), dtype=dt2, copy=None) + assert_equal(old_refcount2, sys.getrefcount(dt2)) + with pytest.raises(ValueError): + np.array(MyArray(dt), dtype=dt2, copy=False) + assert_equal(old_refcount2, sys.getrefcount(dt2)) + + +@pytest.mark.parametrize("array", [True, False]) +def test_array_impossible_casts(array): + # All builtin types can be forcibly cast, at least theoretically, + # but user dtypes cannot necessarily. + rt = rational(1, 2) + if array: + rt = np.array(rt) + with assert_raises(TypeError): + np.array(rt, dtype="M8") + + +def test_array_astype(): + a = np.arange(6, dtype='f4').reshape(2, 3) + # Default behavior: allows unsafe casts, keeps memory layout, + # always copies. + b = a.astype('i4') + assert_equal(a, b) + assert_equal(b.dtype, np.dtype('i4')) + assert_equal(a.strides, b.strides) + b = a.T.astype('i4') + assert_equal(a.T, b) + assert_equal(b.dtype, np.dtype('i4')) + assert_equal(a.T.strides, b.strides) + b = a.astype('f4') + assert_equal(a, b) + assert_(not (a is b)) + + # copy=False parameter skips a copy + b = a.astype('f4', copy=False) + assert_(a is b) + + # order parameter allows overriding of the memory layout, + # forcing a copy if the layout is wrong + b = a.astype('f4', order='F', copy=False) + assert_equal(a, b) + assert_(not (a is b)) + assert_(b.flags.f_contiguous) + + b = a.astype('f4', order='C', copy=False) + assert_equal(a, b) + assert_(a is b) + assert_(b.flags.c_contiguous) + + # casting parameter allows catching bad casts + b = a.astype('c8', casting='safe') + assert_equal(a, b) + assert_equal(b.dtype, np.dtype('c8')) + + assert_raises(TypeError, a.astype, 'i4', casting='safe') + + # subok=False passes through a non-subclassed array + b = a.astype('f4', subok=0, copy=False) + assert_(a is b) + + class MyNDArray(np.ndarray): + pass + + a = np.array([[0, 1, 2], [3, 4, 5]], dtype='f4').view(MyNDArray) + + # subok=True passes through a subclass + b = a.astype('f4', subok=True, copy=False) + assert_(a is b) + + # subok=True is default, and creates a subtype on a cast + b = a.astype('i4', copy=False) + assert_equal(a, b) + assert_equal(type(b), MyNDArray) + + # subok=False never returns a subclass + b = a.astype('f4', subok=False, copy=False) + assert_equal(a, b) + assert_(not (a is b)) + assert_(type(b) is not MyNDArray) + + # Make sure converting from string object to fixed length string + # does not truncate. + a = np.array([b'a' * 100], dtype='O') + b = a.astype('S') + assert_equal(a, b) + assert_equal(b.dtype, np.dtype('S100')) + a = np.array(['a' * 100], dtype='O') + b = a.astype('U') + assert_equal(a, b) + assert_equal(b.dtype, np.dtype('U100')) + + # Same test as above but for strings shorter than 64 characters + a = np.array([b'a' * 10], dtype='O') + b = a.astype('S') + assert_equal(a, b) + assert_equal(b.dtype, np.dtype('S10')) + a = np.array(['a' * 10], dtype='O') + b = a.astype('U') + assert_equal(a, b) + assert_equal(b.dtype, np.dtype('U10')) + + a = np.array(123456789012345678901234567890, dtype='O').astype('S') + assert_array_equal(a, np.array(b'1234567890' * 3, dtype='S30')) + a = np.array(123456789012345678901234567890, dtype='O').astype('U') + assert_array_equal(a, np.array('1234567890' * 3, dtype='U30')) + + a = np.array([123456789012345678901234567890], dtype='O').astype('S') + assert_array_equal(a, np.array(b'1234567890' * 3, dtype='S30')) + a = np.array([123456789012345678901234567890], dtype='O').astype('U') + assert_array_equal(a, np.array('1234567890' * 3, dtype='U30')) + + a = np.array(123456789012345678901234567890, dtype='S') + assert_array_equal(a, np.array(b'1234567890' * 3, dtype='S30')) + a = np.array(123456789012345678901234567890, dtype='U') + assert_array_equal(a, np.array('1234567890' * 3, dtype='U30')) + + a = np.array('a\u0140', dtype='U') + b = np.ndarray(buffer=a, dtype='uint32', shape=2) + assert_(b.size == 2) + + a = np.array([1000], dtype='i4') + assert_raises(TypeError, a.astype, 'S1', casting='safe') + + a = np.array(1000, dtype='i4') + assert_raises(TypeError, a.astype, 'U1', casting='safe') + + # gh-24023 + assert_raises(TypeError, a.astype) + +@pytest.mark.parametrize("dt", ["S", "U"]) +def test_array_astype_to_string_discovery_empty(dt): + # See also gh-19085 + arr = np.array([""], dtype=object) + # Note, the itemsize is the `0 -> 1` logic, which should change. + # The important part the test is rather that it does not error. + assert arr.astype(dt).dtype.itemsize == np.dtype(f"{dt}1").itemsize + + # check the same thing for `np.can_cast` (since it accepts arrays) + assert np.can_cast(arr, dt, casting="unsafe") + assert not np.can_cast(arr, dt, casting="same_kind") + # as well as for the object as a descriptor: + assert np.can_cast("O", dt, casting="unsafe") + +@pytest.mark.parametrize("dt", ["d", "f", "S13", "U32"]) +def test_array_astype_to_void(dt): + dt = np.dtype(dt) + arr = np.array([], dtype=dt) + assert arr.astype("V").dtype.itemsize == dt.itemsize + +def test_object_array_astype_to_void(): + # This is different to `test_array_astype_to_void` as object arrays + # are inspected. The default void is "V8" (8 is the length of double) + arr = np.array([], dtype="O").astype("V") + assert arr.dtype == "V8" + +@pytest.mark.parametrize("t", + np._core.sctypes['uint'] + + np._core.sctypes['int'] + + np._core.sctypes['float'] +) +def test_array_astype_warning(t): + # test ComplexWarning when casting from complex to float or int + a = np.array(10, dtype=np.complex128) + pytest.warns(np.exceptions.ComplexWarning, a.astype, t) + +@pytest.mark.parametrize(["dtype", "out_dtype"], + [(np.bytes_, np.bool), + (np.str_, np.bool), + (np.dtype("S10,S9"), np.dtype("?,?")), + # The following also checks unaligned unicode access: + (np.dtype("S7,U9"), np.dtype("?,?"))]) +def test_string_to_boolean_cast(dtype, out_dtype): + # Only the last two (empty) strings are falsy (the `\0` is stripped): + arr = np.array( + ["10", "10\0\0\0", "0\0\0", "0", "False", " ", "", "\0"], + dtype=dtype) + expected = np.array( + [True, True, True, True, True, True, False, False], + dtype=out_dtype) + assert_array_equal(arr.astype(out_dtype), expected) + # As it's similar, check that nonzero behaves the same (structs are + # nonzero if all entries are) + assert_array_equal(np.nonzero(arr), np.nonzero(expected)) + +@pytest.mark.parametrize("str_type", [str, bytes, np.str_]) +@pytest.mark.parametrize("scalar_type", + [np.complex64, np.complex128, np.clongdouble]) +def test_string_to_complex_cast(str_type, scalar_type): + value = scalar_type(b"1+3j") + assert scalar_type(value) == 1 + 3j + assert np.array([value], dtype=object).astype(scalar_type)[()] == 1 + 3j + assert np.array(value).astype(scalar_type)[()] == 1 + 3j + arr = np.zeros(1, dtype=scalar_type) + arr[0] = value + assert arr[0] == 1 + 3j + +@pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) +def test_none_to_nan_cast(dtype): + # Note that at the time of writing this test, the scalar constructors + # reject None + arr = np.zeros(1, dtype=dtype) + arr[0] = None + assert np.isnan(arr)[0] + assert np.isnan(np.array(None, dtype=dtype))[()] + assert np.isnan(np.array([None], dtype=dtype))[0] + assert np.isnan(np.array(None).astype(dtype))[()] + +def test_copyto_fromscalar(): + a = np.arange(6, dtype='f4').reshape(2, 3) + + # Simple copy + np.copyto(a, 1.5) + assert_equal(a, 1.5) + np.copyto(a.T, 2.5) + assert_equal(a, 2.5) + + # Where-masked copy + mask = np.array([[0, 1, 0], [0, 0, 1]], dtype='?') + np.copyto(a, 3.5, where=mask) + assert_equal(a, [[2.5, 3.5, 2.5], [2.5, 2.5, 3.5]]) + mask = np.array([[0, 1], [1, 1], [1, 0]], dtype='?') + np.copyto(a.T, 4.5, where=mask) + assert_equal(a, [[2.5, 4.5, 4.5], [4.5, 4.5, 3.5]]) + +def test_copyto(): + a = np.arange(6, dtype='i4').reshape(2, 3) + + # Simple copy + np.copyto(a, [[3, 1, 5], [6, 2, 1]]) + assert_equal(a, [[3, 1, 5], [6, 2, 1]]) + + # Overlapping copy should work + np.copyto(a[:, :2], a[::-1, 1::-1]) + assert_equal(a, [[2, 6, 5], [1, 3, 1]]) + + # Defaults to 'same_kind' casting + assert_raises(TypeError, np.copyto, a, 1.5) + + # Force a copy with 'unsafe' casting, truncating 1.5 to 1 + np.copyto(a, 1.5, casting='unsafe') + assert_equal(a, 1) + + # Copying with a mask + np.copyto(a, 3, where=[True, False, True]) + assert_equal(a, [[3, 1, 3], [3, 1, 3]]) + + # Casting rule still applies with a mask + assert_raises(TypeError, np.copyto, a, 3.5, where=[True, False, True]) + + # Lists of integer 0's and 1's is ok too + np.copyto(a, 4.0, casting='unsafe', where=[[0, 1, 1], [1, 0, 0]]) + assert_equal(a, [[3, 4, 4], [4, 1, 3]]) + + # Overlapping copy with mask should work + np.copyto(a[:, :2], a[::-1, 1::-1], where=[[0, 1], [1, 1]]) + assert_equal(a, [[3, 4, 4], [4, 3, 3]]) + + # 'dst' must be an array + assert_raises(TypeError, np.copyto, [1, 2, 3], [2, 3, 4]) + + +def test_copyto_cast_safety(): + with pytest.raises(TypeError): + np.copyto(np.arange(3), 3., casting="safe") + + # Can put integer and float scalars safely (and equiv): + np.copyto(np.arange(3), 3, casting="equiv") + np.copyto(np.arange(3.), 3., casting="equiv") + # And also with less precision safely: + np.copyto(np.arange(3, dtype="uint8"), 3, casting="safe") + np.copyto(np.arange(3., dtype="float32"), 3., casting="safe") + + # But not equiv: + with pytest.raises(TypeError): + np.copyto(np.arange(3, dtype="uint8"), 3, casting="equiv") + + with pytest.raises(TypeError): + np.copyto(np.arange(3., dtype="float32"), 3., casting="equiv") + + # As a special thing, object is equiv currently: + np.copyto(np.arange(3, dtype=object), 3, casting="equiv") + + # The following raises an overflow error/gives a warning but not + # type error (due to casting), though: + with pytest.raises(OverflowError): + np.copyto(np.arange(3), 2**80, casting="safe") + + with pytest.warns(RuntimeWarning): + np.copyto(np.arange(3, dtype=np.float32), 2e300, casting="safe") + + +def test_copyto_permut(): + # test explicit overflow case + pad = 500 + l = [True] * pad + [True, True, True, True] + r = np.zeros(len(l) - pad) + d = np.ones(len(l) - pad) + mask = np.array(l)[pad:] + np.copyto(r, d, where=mask[::-1]) + + # test all permutation of possible masks, 9 should be sufficient for + # current 4 byte unrolled code + power = 9 + d = np.ones(power) + for i in range(2**power): + r = np.zeros(power) + l = [(i & x) != 0 for x in range(power)] + mask = np.array(l) + np.copyto(r, d, where=mask) + assert_array_equal(r == 1, l) + assert_equal(r.sum(), sum(l)) + + r = np.zeros(power) + np.copyto(r, d, where=mask[::-1]) + assert_array_equal(r == 1, l[::-1]) + assert_equal(r.sum(), sum(l)) + + r = np.zeros(power) + np.copyto(r[::2], d[::2], where=mask[::2]) + assert_array_equal(r[::2] == 1, l[::2]) + assert_equal(r[::2].sum(), sum(l[::2])) + + r = np.zeros(power) + np.copyto(r[::2], d[::2], where=mask[::-2]) + assert_array_equal(r[::2] == 1, l[::-2]) + assert_equal(r[::2].sum(), sum(l[::-2])) + + for c in [0xFF, 0x7F, 0x02, 0x10]: + r = np.zeros(power) + mask = np.array(l) + imask = np.array(l).view(np.uint8) + imask[mask != 0] = c + np.copyto(r, d, where=mask) + assert_array_equal(r == 1, l) + assert_equal(r.sum(), sum(l)) + + r = np.zeros(power) + np.copyto(r, d, where=True) + assert_equal(r.sum(), r.size) + r = np.ones(power) + d = np.zeros(power) + np.copyto(r, d, where=False) + assert_equal(r.sum(), r.size) + +def test_copy_order(): + a = np.arange(24).reshape(2, 1, 3, 4) + b = a.copy(order='F') + c = np.arange(24).reshape(2, 1, 4, 3).swapaxes(2, 3) + + def check_copy_result(x, y, ccontig, fcontig, strides=False): + assert_(not (x is y)) + assert_equal(x, y) + assert_equal(res.flags.c_contiguous, ccontig) + assert_equal(res.flags.f_contiguous, fcontig) + + # Validate the initial state of a, b, and c + assert_(a.flags.c_contiguous) + assert_(not a.flags.f_contiguous) + assert_(not b.flags.c_contiguous) + assert_(b.flags.f_contiguous) + assert_(not c.flags.c_contiguous) + assert_(not c.flags.f_contiguous) + + # Copy with order='C' + res = a.copy(order='C') + check_copy_result(res, a, ccontig=True, fcontig=False, strides=True) + res = b.copy(order='C') + check_copy_result(res, b, ccontig=True, fcontig=False, strides=False) + res = c.copy(order='C') + check_copy_result(res, c, ccontig=True, fcontig=False, strides=False) + res = np.copy(a, order='C') + check_copy_result(res, a, ccontig=True, fcontig=False, strides=True) + res = np.copy(b, order='C') + check_copy_result(res, b, ccontig=True, fcontig=False, strides=False) + res = np.copy(c, order='C') + check_copy_result(res, c, ccontig=True, fcontig=False, strides=False) + + # Copy with order='F' + res = a.copy(order='F') + check_copy_result(res, a, ccontig=False, fcontig=True, strides=False) + res = b.copy(order='F') + check_copy_result(res, b, ccontig=False, fcontig=True, strides=True) + res = c.copy(order='F') + check_copy_result(res, c, ccontig=False, fcontig=True, strides=False) + res = np.copy(a, order='F') + check_copy_result(res, a, ccontig=False, fcontig=True, strides=False) + res = np.copy(b, order='F') + check_copy_result(res, b, ccontig=False, fcontig=True, strides=True) + res = np.copy(c, order='F') + check_copy_result(res, c, ccontig=False, fcontig=True, strides=False) + + # Copy with order='K' + res = a.copy(order='K') + check_copy_result(res, a, ccontig=True, fcontig=False, strides=True) + res = b.copy(order='K') + check_copy_result(res, b, ccontig=False, fcontig=True, strides=True) + res = c.copy(order='K') + check_copy_result(res, c, ccontig=False, fcontig=False, strides=True) + res = np.copy(a, order='K') + check_copy_result(res, a, ccontig=True, fcontig=False, strides=True) + res = np.copy(b, order='K') + check_copy_result(res, b, ccontig=False, fcontig=True, strides=True) + res = np.copy(c, order='K') + check_copy_result(res, c, ccontig=False, fcontig=False, strides=True) + +def test_contiguous_flags(): + a = np.ones((4, 4, 1))[::2, :, :] + a = stride_tricks.as_strided(a, strides=a.strides[:2] + (-123,)) + b = np.ones((2, 2, 1, 2, 2)).swapaxes(3, 4) + + def check_contig(a, ccontig, fcontig): + assert_(a.flags.c_contiguous == ccontig) + assert_(a.flags.f_contiguous == fcontig) + + # Check if new arrays are correct: + check_contig(a, False, False) + check_contig(b, False, False) + check_contig(np.empty((2, 2, 0, 2, 2)), True, True) + check_contig(np.array([[[1], [2]]], order='F'), True, True) + check_contig(np.empty((2, 2)), True, False) + check_contig(np.empty((2, 2), order='F'), False, True) + + # Check that np.array creates correct contiguous flags: + check_contig(np.array(a, copy=None), False, False) + check_contig(np.array(a, copy=None, order='C'), True, False) + check_contig(np.array(a, ndmin=4, copy=None, order='F'), False, True) + + # Check slicing update of flags and : + check_contig(a[0], True, True) + check_contig(a[None, ::4, ..., None], True, True) + check_contig(b[0, 0, ...], False, True) + check_contig(b[:, :, 0:0, :, :], True, True) + + # Test ravel and squeeze. + check_contig(a.ravel(), True, True) + check_contig(np.ones((1, 3, 1)).squeeze(), True, True) + +def test_broadcast_arrays(): + # Test user defined dtypes + dtype = 'u4,u4,u4' + a = np.array([(1, 2, 3)], dtype=dtype) + b = np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype=dtype) + result = np.broadcast_arrays(a, b) + assert_equal(result[0], np.array([(1, 2, 3), (1, 2, 3), (1, 2, 3)], dtype=dtype)) + assert_equal(result[1], np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype=dtype)) + +@pytest.mark.parametrize(["shape", "fill_value", "expected_output"], + [((2, 2), [5.0, 6.0], np.array([[5.0, 6.0], [5.0, 6.0]])), + ((3, 2), [1.0, 2.0], np.array([[1.0, 2.0], [1.0, 2.0], [1.0, 2.0]]))]) +def test_full_from_list(shape, fill_value, expected_output): + output = np.full(shape, fill_value) + assert_equal(output, expected_output) + +def test_astype_copyflag(): + # test the various copyflag options + arr = np.arange(10, dtype=np.intp) + + res_true = arr.astype(np.intp, copy=True) + assert not np.shares_memory(arr, res_true) + + res_false = arr.astype(np.intp, copy=False) + assert np.shares_memory(arr, res_false) + + res_false_float = arr.astype(np.float64, copy=False) + assert not np.shares_memory(arr, res_false_float) + + # _CopyMode enum isn't allowed + assert_raises(ValueError, arr.astype, np.float64, + copy=np._CopyMode.NEVER) diff --git a/local_packages/numpy/_core/tests/test_argparse.py b/local_packages/numpy/_core/tests/test_argparse.py new file mode 100644 index 0000000..ededced --- /dev/null +++ b/local_packages/numpy/_core/tests/test_argparse.py @@ -0,0 +1,90 @@ +""" +Tests for the private NumPy argument parsing functionality. +They mainly exists to ensure good test coverage without having to try the +weirder cases on actual numpy functions but test them in one place. + +The test function is defined in C to be equivalent to (errors may not always +match exactly, and could be adjusted): + + def func(arg1, /, arg2, *, arg3): + i = integer(arg1) # reproducing the 'i' parsing in Python. + return None +""" + +import threading + +import pytest + +import numpy as np +from numpy._core._multiarray_tests import ( + argparse_example_function as func, + threaded_argparse_example_function as thread_func, +) +from numpy.testing import IS_WASM + + +@pytest.mark.skipif(IS_WASM, reason="wasm doesn't have support for threads") +def test_thread_safe_argparse_cache(): + b = threading.Barrier(8) + + def call_thread_func(): + b.wait() + thread_func(arg1=3, arg2=None) + + tasks = [threading.Thread(target=call_thread_func) for _ in range(8)] + [t.start() for t in tasks] + [t.join() for t in tasks] + + +def test_invalid_integers(): + with pytest.raises(TypeError, + match="integer argument expected, got float"): + func(1.) + with pytest.raises(OverflowError): + func(2**100) + + +def test_missing_arguments(): + with pytest.raises(TypeError, + match="missing required positional argument 0"): + func() + with pytest.raises(TypeError, + match="missing required positional argument 0"): + func(arg2=1, arg3=4) + with pytest.raises(TypeError, + match=r"missing required argument \'arg2\' \(pos 1\)"): + func(1, arg3=5) + + +def test_too_many_positional(): + # the second argument is positional but can be passed as keyword. + with pytest.raises(TypeError, + match="takes from 2 to 3 positional arguments but 4 were given"): + func(1, 2, 3, 4) + + +def test_multiple_values(): + with pytest.raises(TypeError, + match=r"given by name \('arg2'\) and position \(position 1\)"): + func(1, 2, arg2=3) + + +def test_string_fallbacks(): + # We can (currently?) use numpy strings to test the "slow" fallbacks + # that should normally not be taken due to string interning. + arg2 = np.str_("arg2") + missing_arg = np.str_("missing_arg") + func(1, **{arg2: 3}) + with pytest.raises(TypeError, + match="got an unexpected keyword argument 'missing_arg'"): + func(2, **{missing_arg: 3}) + + +def test_too_many_arguments_method_forwarding(): + # Not directly related to the standard argument parsing, but we sometimes + # forward methods to Python: arr.mean() calls np._core._methods._mean() + # This adds code coverage for this `npy_forward_method`. + arr = np.arange(3) + args = range(1000) + with pytest.raises(TypeError): + arr.mean(*args) diff --git a/local_packages/numpy/_core/tests/test_array_api_info.py b/local_packages/numpy/_core/tests/test_array_api_info.py new file mode 100644 index 0000000..4842dbf --- /dev/null +++ b/local_packages/numpy/_core/tests/test_array_api_info.py @@ -0,0 +1,113 @@ +import pytest + +import numpy as np + +info = np.__array_namespace_info__() + + +def test_capabilities(): + caps = info.capabilities() + assert caps["boolean indexing"] is True + assert caps["data-dependent shapes"] is True + + # This will be added in the 2024.12 release of the array API standard. + + # assert caps["max rank"] == 64 + # np.zeros((1,)*64) + # with pytest.raises(ValueError): + # np.zeros((1,)*65) + + +def test_default_device(): + assert info.default_device() == "cpu" == np.asarray(0).device + + +def test_default_dtypes(): + dtypes = info.default_dtypes() + assert dtypes["real floating"] == np.float64 == np.asarray(0.0).dtype + assert dtypes["complex floating"] == np.complex128 == \ + np.asarray(0.0j).dtype + assert dtypes["integral"] == np.intp == np.asarray(0).dtype + assert dtypes["indexing"] == np.intp == np.argmax(np.zeros(10)).dtype + + with pytest.raises(ValueError, match="Device not understood"): + info.default_dtypes(device="gpu") + + +def test_dtypes_all(): + dtypes = info.dtypes() + assert dtypes == { + "bool": np.bool_, + "int8": np.int8, + "int16": np.int16, + "int32": np.int32, + "int64": np.int64, + "uint8": np.uint8, + "uint16": np.uint16, + "uint32": np.uint32, + "uint64": np.uint64, + "float32": np.float32, + "float64": np.float64, + "complex64": np.complex64, + "complex128": np.complex128, + } + + +dtype_categories = { + "bool": {"bool": np.bool_}, + "signed integer": { + "int8": np.int8, + "int16": np.int16, + "int32": np.int32, + "int64": np.int64, + }, + "unsigned integer": { + "uint8": np.uint8, + "uint16": np.uint16, + "uint32": np.uint32, + "uint64": np.uint64, + }, + "integral": ("signed integer", "unsigned integer"), + "real floating": {"float32": np.float32, "float64": np.float64}, + "complex floating": {"complex64": np.complex64, "complex128": + np.complex128}, + "numeric": ("integral", "real floating", "complex floating"), +} + + +@pytest.mark.parametrize("kind", dtype_categories) +def test_dtypes_kind(kind): + expected = dtype_categories[kind] + if isinstance(expected, tuple): + assert info.dtypes(kind=kind) == info.dtypes(kind=expected) + else: + assert info.dtypes(kind=kind) == expected + + +def test_dtypes_tuple(): + dtypes = info.dtypes(kind=("bool", "integral")) + assert dtypes == { + "bool": np.bool_, + "int8": np.int8, + "int16": np.int16, + "int32": np.int32, + "int64": np.int64, + "uint8": np.uint8, + "uint16": np.uint16, + "uint32": np.uint32, + "uint64": np.uint64, + } + + +def test_dtypes_invalid_kind(): + with pytest.raises(ValueError, match="unsupported kind"): + info.dtypes(kind="invalid") + + +def test_dtypes_invalid_device(): + with pytest.raises(ValueError, match="Device not understood"): + info.dtypes(device="gpu") + + +def test_devices(): + assert info.devices() == ["cpu"] diff --git a/local_packages/numpy/_core/tests/test_array_coercion.py b/local_packages/numpy/_core/tests/test_array_coercion.py new file mode 100644 index 0000000..ba28e88 --- /dev/null +++ b/local_packages/numpy/_core/tests/test_array_coercion.py @@ -0,0 +1,928 @@ +""" +Tests for array coercion, mainly through testing `np.array` results directly. +Note that other such tests exist, e.g., in `test_api.py` and many corner-cases +are tested (sometimes indirectly) elsewhere. +""" + +from itertools import permutations, product + +import pytest +from pytest import param + +import numpy as np +import numpy._core._multiarray_umath as ncu +from numpy._core._rational_tests import rational +from numpy.testing import IS_64BIT, IS_PYPY, assert_array_equal + + +def arraylikes(): + """ + Generator for functions converting an array into various array-likes. + If full is True (default) it includes array-likes not capable of handling + all dtypes. + """ + # base array: + def ndarray(a): + return a + + yield param(ndarray, id="ndarray") + + # subclass: + class MyArr(np.ndarray): + pass + + def subclass(a): + return a.view(MyArr) + + yield subclass + + class _SequenceLike: + # Older NumPy versions, sometimes cared whether a protocol array was + # also _SequenceLike. This shouldn't matter, but keep it for now + # for __array__ and not the others. + def __len__(self): + raise TypeError + + def __getitem__(self, _, /): + raise TypeError + + # Array-interface + class ArrayDunder(_SequenceLike): + def __init__(self, a): + self.a = a + + def __array__(self, dtype=None, copy=None): + if dtype is None: + return self.a + return self.a.astype(dtype) + + yield param(ArrayDunder, id="__array__") + + # memory-view + yield param(memoryview, id="memoryview") + + # Array-interface + class ArrayInterface: + def __init__(self, a): + self.a = a # need to hold on to keep interface valid + self.__array_interface__ = a.__array_interface__ + + yield param(ArrayInterface, id="__array_interface__") + + # Array-Struct + class ArrayStruct: + def __init__(self, a): + self.a = a # need to hold on to keep struct valid + self.__array_struct__ = a.__array_struct__ + + yield param(ArrayStruct, id="__array_struct__") + + +def scalar_instances(times=True, extended_precision=True, user_dtype=True): + # Hard-coded list of scalar instances. + # Floats: + yield param(np.sqrt(np.float16(5)), id="float16") + yield param(np.sqrt(np.float32(5)), id="float32") + yield param(np.sqrt(np.float64(5)), id="float64") + if extended_precision: + yield param(np.sqrt(np.longdouble(5)), id="longdouble") + + # Complex: + yield param(np.sqrt(np.complex64(2 + 3j)), id="complex64") + yield param(np.sqrt(np.complex128(2 + 3j)), id="complex128") + if extended_precision: + yield param(np.sqrt(np.clongdouble(2 + 3j)), id="clongdouble") + + # Bool: + # XFAIL: Bool should be added, but has some bad properties when it + # comes to strings, see also gh-9875 + # yield param(np.bool(0), id="bool") + + # Integers: + yield param(np.int8(2), id="int8") + yield param(np.int16(2), id="int16") + yield param(np.int32(2), id="int32") + yield param(np.int64(2), id="int64") + + yield param(np.uint8(2), id="uint8") + yield param(np.uint16(2), id="uint16") + yield param(np.uint32(2), id="uint32") + yield param(np.uint64(2), id="uint64") + + # Rational: + if user_dtype: + yield param(rational(1, 2), id="rational") + + # Cannot create a structured void scalar directly: + structured = np.array([(1, 3)], "i,i")[0] + assert isinstance(structured, np.void) + assert structured.dtype == np.dtype("i,i") + yield param(structured, id="structured") + + if times: + # Datetimes and timedelta + yield param(np.timedelta64(2), id="timedelta64[generic]") + yield param(np.timedelta64(23, "s"), id="timedelta64[s]") + yield param(np.timedelta64("NaT", "s"), id="timedelta64[s](NaT)") + + yield param(np.datetime64("NaT"), id="datetime64[generic](NaT)") + yield param(np.datetime64("2020-06-07 12:43", "ms"), id="datetime64[ms]") + + # Strings and unstructured void: + yield param(np.bytes_(b"1234"), id="bytes") + yield param(np.str_("2345"), id="unicode") + yield param(np.void(b"4321"), id="unstructured_void") + + +def is_parametric_dtype(dtype): + """Returns True if the dtype is a parametric legacy dtype (itemsize + is 0, or a datetime without units) + """ + if dtype.itemsize == 0: + return True + if issubclass(dtype.type, (np.datetime64, np.timedelta64)): + if dtype.name.endswith("64"): + # Generic time units + return True + return False + + +class TestStringDiscovery: + @pytest.mark.parametrize("obj", + [object(), 1.2, 10**43, None, "string"], + ids=["object", "1.2", "10**43", "None", "string"]) + def test_basic_stringlength(self, obj): + length = len(str(obj)) + expected = np.dtype(f"S{length}") + + assert np.array(obj, dtype="S").dtype == expected + assert np.array([obj], dtype="S").dtype == expected + + # A nested array is also discovered correctly + arr = np.array(obj, dtype="O") + assert np.array(arr, dtype="S").dtype == expected + # Also if we use the dtype class + assert np.array(arr, dtype=type(expected)).dtype == expected + # Check that .astype() behaves identical + assert arr.astype("S").dtype == expected + # The DType class is accepted by `.astype()` + assert arr.astype(type(np.dtype("S"))).dtype == expected + + @pytest.mark.parametrize("obj", + [object(), 1.2, 10**43, None, "string"], + ids=["object", "1.2", "10**43", "None", "string"]) + def test_nested_arrays_stringlength(self, obj): + length = len(str(obj)) + expected = np.dtype(f"S{length}") + arr = np.array(obj, dtype="O") + assert np.array([arr, arr], dtype="S").dtype == expected + + @pytest.mark.parametrize("arraylike", arraylikes()) + def test_unpack_first_level(self, arraylike): + # We unpack exactly one level of array likes + obj = np.array([None]) + obj[0] = np.array(1.2) + # the length of the included item, not of the float dtype + length = len(str(obj[0])) + expected = np.dtype(f"S{length}") + + obj = arraylike(obj) + # casting to string usually calls str(obj) + arr = np.array([obj], dtype="S") + assert arr.shape == (1, 1) + assert arr.dtype == expected + + +class TestScalarDiscovery: + def test_void_special_case(self): + # Void dtypes with structures discover tuples as elements + arr = np.array((1, 2, 3), dtype="i,i,i") + assert arr.shape == () + arr = np.array([(1, 2, 3)], dtype="i,i,i") + assert arr.shape == (1,) + + def test_char_special_case(self): + arr = np.array("string", dtype="c") + assert arr.shape == (6,) + assert arr.dtype.char == "c" + arr = np.array(["string"], dtype="c") + assert arr.shape == (1, 6) + assert arr.dtype.char == "c" + + def test_char_special_case_deep(self): + # Check that the character special case errors correctly if the + # array is too deep: + nested = ["string"] # 2 dimensions (due to string being sequence) + for i in range(ncu.MAXDIMS - 2): + nested = [nested] + + arr = np.array(nested, dtype='c') + assert arr.shape == (1,) * (ncu.MAXDIMS - 1) + (6,) + with pytest.raises(ValueError): + np.array([nested], dtype="c") + + def test_unknown_object(self): + arr = np.array(object()) + assert arr.shape == () + assert arr.dtype == np.dtype("O") + + @pytest.mark.parametrize("scalar", scalar_instances()) + def test_scalar(self, scalar): + arr = np.array(scalar) + assert arr.shape == () + assert arr.dtype == scalar.dtype + + arr = np.array([[scalar, scalar]]) + assert arr.shape == (1, 2) + assert arr.dtype == scalar.dtype + + # Additionally to string this test also runs into a corner case + # with datetime promotion (the difference is the promotion order). + @pytest.mark.filterwarnings("ignore:Promotion of numbers:FutureWarning") + def test_scalar_promotion(self): + for sc1, sc2 in product(scalar_instances(), scalar_instances()): + sc1, sc2 = sc1.values[0], sc2.values[0] + # test all combinations: + try: + arr = np.array([sc1, sc2]) + except (TypeError, ValueError): + # The promotion between two times can fail + # XFAIL (ValueError): Some object casts are currently undefined + continue + assert arr.shape == (2,) + try: + dt1, dt2 = sc1.dtype, sc2.dtype + expected_dtype = np.promote_types(dt1, dt2) + assert arr.dtype == expected_dtype + except TypeError as e: + # Will currently always go to object dtype + assert arr.dtype == np.dtype("O") + + @pytest.mark.parametrize("scalar", scalar_instances()) + def test_scalar_coercion(self, scalar): + # This tests various scalar coercion paths, mainly for the numerical + # types. It includes some paths not directly related to `np.array`. + if isinstance(scalar, np.inexact): + # Ensure we have a full-precision number if available + scalar = type(scalar)((scalar * 2)**0.5) + + # Use casting from object: + arr = np.array(scalar, dtype=object).astype(scalar.dtype) + + # Test various ways to create an array containing this scalar: + arr1 = np.array(scalar).reshape(1) + arr2 = np.array([scalar]) + arr3 = np.empty(1, dtype=scalar.dtype) + arr3[0] = scalar + arr4 = np.empty(1, dtype=scalar.dtype) + arr4[:] = [scalar] + # All of these methods should yield the same results + assert_array_equal(arr, arr1) + assert_array_equal(arr, arr2) + assert_array_equal(arr, arr3) + assert_array_equal(arr, arr4) + + @pytest.mark.xfail(IS_PYPY, reason="`int(np.complex128(3))` fails on PyPy") + @pytest.mark.filterwarnings("ignore::numpy.exceptions.ComplexWarning") + @pytest.mark.parametrize("cast_to", scalar_instances()) + def test_scalar_coercion_same_as_cast_and_assignment(self, cast_to): + """ + Test that in most cases: + * `np.array(scalar, dtype=dtype)` + * `np.empty((), dtype=dtype)[()] = scalar` + * `np.array(scalar).astype(dtype)` + should behave the same. The only exceptions are parametric dtypes + (mainly datetime/timedelta without unit) and void without fields. + """ + dtype = cast_to.dtype # use to parametrize only the target dtype + + for scalar in scalar_instances(times=False): + scalar = scalar.values[0] + + if dtype.type == np.void: + if scalar.dtype.fields is not None and dtype.fields is None: + # Here, coercion to "V6" works, but the cast fails. + # Since the types are identical, SETITEM takes care of + # this, but has different rules than the cast. + with pytest.raises(TypeError): + np.array(scalar).astype(dtype) + np.array(scalar, dtype=dtype) + np.array([scalar], dtype=dtype) + continue + + # The main test, we first try to use casting and if it succeeds + # continue below testing that things are the same, otherwise + # test that the alternative paths at least also fail. + try: + cast = np.array(scalar).astype(dtype) + except (TypeError, ValueError, RuntimeError): + # coercion should also raise (error type may change) + with pytest.raises(Exception): # noqa: B017 + np.array(scalar, dtype=dtype) + + if (isinstance(scalar, rational) and + np.issubdtype(dtype, np.signedinteger)): + return + + with pytest.raises(Exception): # noqa: B017 + np.array([scalar], dtype=dtype) + # assignment should also raise + res = np.zeros((), dtype=dtype) + with pytest.raises(Exception): # noqa: B017 + res[()] = scalar + + return + + # Non error path: + arr = np.array(scalar, dtype=dtype) + assert_array_equal(arr, cast) + # assignment behaves the same + ass = np.zeros((), dtype=dtype) + ass[()] = scalar + assert_array_equal(ass, cast) + + @pytest.mark.parametrize("pyscalar", [10, 10.32, 10.14j, 10**100]) + def test_pyscalar_subclasses(self, pyscalar): + """NumPy arrays are read/write which means that anything but invariant + behaviour is on thin ice. However, we currently are happy to discover + subclasses of Python float, int, complex the same as the base classes. + This should potentially be deprecated. + """ + class MyScalar(type(pyscalar)): + pass + + res = np.array(MyScalar(pyscalar)) + expected = np.array(pyscalar) + assert_array_equal(res, expected) + + @pytest.mark.parametrize("dtype_char", np.typecodes["All"]) + def test_default_dtype_instance(self, dtype_char): + if dtype_char in "SU": + dtype = np.dtype(dtype_char + "1") + elif dtype_char == "V": + # Legacy behaviour was to use V8. The reason was float64 being the + # default dtype and that having 8 bytes. + dtype = np.dtype("V8") + else: + dtype = np.dtype(dtype_char) + + discovered_dtype, _ = ncu._discover_array_parameters([], type(dtype)) + + assert discovered_dtype == dtype + assert discovered_dtype.itemsize == dtype.itemsize + + @pytest.mark.parametrize("dtype", np.typecodes["Integer"]) + @pytest.mark.parametrize(["scalar", "error"], + [(np.float64(np.nan), ValueError), + (np.array(-1).astype(np.ulonglong)[()], OverflowError)]) + def test_scalar_to_int_coerce_does_not_cast(self, dtype, scalar, error): + """ + Signed integers are currently different in that they do not cast other + NumPy scalar, but instead use scalar.__int__(). The hardcoded + exception to this rule is `np.array(scalar, dtype=integer)`. + """ + dtype = np.dtype(dtype) + + # This is a special case using casting logic. It warns for the NaN + # but allows the cast (giving undefined behaviour). + with np.errstate(invalid="ignore"): + coerced = np.array(scalar, dtype=dtype) + cast = np.array(scalar).astype(dtype) + assert_array_equal(coerced, cast) + + # However these fail: + with pytest.raises(error): + np.array([scalar], dtype=dtype) + with pytest.raises(error): + cast[()] = scalar + + +class TestTimeScalars: + @pytest.mark.parametrize("dtype", [np.int64, np.float32]) + @pytest.mark.parametrize("scalar", + [param(np.timedelta64("NaT", "s"), id="timedelta64[s](NaT)"), + param(np.timedelta64(123, "s"), id="timedelta64[s]"), + param(np.datetime64("NaT", "generic"), id="datetime64[generic](NaT)"), + param(np.datetime64(1, "D"), id="datetime64[D]")],) + def test_coercion_basic(self, dtype, scalar): + # Note the `[scalar]` is there because np.array(scalar) uses stricter + # `scalar.__int__()` rules for backward compatibility right now. + arr = np.array(scalar, dtype=dtype) + cast = np.array(scalar).astype(dtype) + assert_array_equal(arr, cast) + + ass = np.ones((), dtype=dtype) + if issubclass(dtype, np.integer): + with pytest.raises(TypeError): + # raises, as would np.array([scalar], dtype=dtype), this is + # conversion from times, but behaviour of integers. + ass[()] = scalar + else: + ass[()] = scalar + assert_array_equal(ass, cast) + + @pytest.mark.parametrize("dtype", [np.int64, np.float32]) + @pytest.mark.parametrize("scalar", + [param(np.timedelta64(123, "ns"), id="timedelta64[ns]"), + param(np.timedelta64(12, "generic"), id="timedelta64[generic]")]) + def test_coercion_timedelta_convert_to_number(self, dtype, scalar): + # Only "ns" and "generic" timedeltas can be converted to numbers + # so these are slightly special. + arr = np.array(scalar, dtype=dtype) + cast = np.array(scalar).astype(dtype) + ass = np.ones((), dtype=dtype) + ass[()] = scalar # raises, as would np.array([scalar], dtype=dtype) + + assert_array_equal(arr, cast) + assert_array_equal(cast, cast) + + @pytest.mark.parametrize("dtype", ["S6", "U6"]) + @pytest.mark.parametrize(["val", "unit"], + [param(123, "s", id="[s]"), param(123, "D", id="[D]")]) + def test_coercion_assignment_datetime(self, val, unit, dtype): + # String from datetime64 assignment is currently special cased to + # never use casting. This is because casting will error in this + # case, and traditionally in most cases the behaviour is maintained + # like this. (`np.array(scalar, dtype="U6")` would have failed before) + # TODO: This discrepancy _should_ be resolved, either by relaxing the + # cast, or by deprecating the first part. + scalar = np.datetime64(val, unit) + dtype = np.dtype(dtype) + cut_string = dtype.type(str(scalar)[:6]) + + arr = np.array(scalar, dtype=dtype) + assert arr[()] == cut_string + ass = np.ones((), dtype=dtype) + ass[()] = scalar + assert ass[()] == cut_string + + with pytest.raises(RuntimeError): + # However, unlike the above assignment using `str(scalar)[:6]` + # due to being handled by the string DType and not be casting + # the explicit cast fails: + np.array(scalar).astype(dtype) + + @pytest.mark.parametrize(["val", "unit"], + [param(123, "s", id="[s]"), param(123, "D", id="[D]")]) + def test_coercion_assignment_timedelta(self, val, unit): + scalar = np.timedelta64(val, unit) + + # Unlike datetime64, timedelta allows the unsafe cast: + np.array(scalar, dtype="S6") + cast = np.array(scalar).astype("S6") + ass = np.ones((), dtype="S6") + ass[()] = scalar + expected = scalar.astype("S")[:6] + assert cast[()] == expected + assert ass[()] == expected + +class TestNested: + def test_nested_simple(self): + initial = [1.2] + nested = initial + for i in range(ncu.MAXDIMS - 1): + nested = [nested] + + arr = np.array(nested, dtype="float64") + assert arr.shape == (1,) * ncu.MAXDIMS + with pytest.raises(ValueError): + np.array([nested], dtype="float64") + + with pytest.raises(ValueError, match=".*would exceed the maximum"): + np.array([nested]) # user must ask for `object` explicitly + + arr = np.array([nested], dtype=object) + assert arr.dtype == np.dtype("O") + assert arr.shape == (1,) * ncu.MAXDIMS + assert arr.item() is initial + + def test_pathological_self_containing(self): + # Test that this also works for two nested sequences + l = [] + l.append(l) + arr = np.array([l, l, l], dtype=object) + assert arr.shape == (3,) + (1,) * (ncu.MAXDIMS - 1) + + # Also check a ragged case: + arr = np.array([l, [None], l], dtype=object) + assert arr.shape == (3, 1) + + @pytest.mark.parametrize("arraylike", arraylikes()) + def test_nested_arraylikes(self, arraylike): + # We try storing an array like into an array, but the array-like + # will have too many dimensions. This means the shape discovery + # decides that the array-like must be treated as an object (a special + # case of ragged discovery). The result will be an array with one + # dimension less than the maximum dimensions, and the array being + # assigned to it (which does work for object or if `float(arraylike)` + # works). + initial = arraylike(np.ones((1, 1))) + + nested = initial + for i in range(ncu.MAXDIMS - 1): + nested = [nested] + + with pytest.raises(ValueError, match=".*would exceed the maximum"): + # It will refuse to assign the array into + np.array(nested, dtype="float64") + + # If this is object, we end up assigning a (1, 1) array into (1,) + # (due to running out of dimensions), this is currently supported but + # a special case which is not ideal. + arr = np.array(nested, dtype=object) + assert arr.shape == (1,) * ncu.MAXDIMS + assert arr.item() == np.array(initial).item() + + @pytest.mark.parametrize("arraylike", arraylikes()) + def test_uneven_depth_ragged(self, arraylike): + arr = np.arange(4).reshape((2, 2)) + arr = arraylike(arr) + + # Array is ragged in the second dimension already: + out = np.array([arr, [arr]], dtype=object) + assert out.shape == (2,) + assert out[0] is arr + assert type(out[1]) is list + + # Array is ragged in the third dimension: + with pytest.raises(ValueError): + # This is a broadcast error during assignment, because + # the array shape would be (2, 2, 2) but `arr[0, 0] = arr` fails. + np.array([arr, [arr, arr]], dtype=object) + + def test_empty_sequence(self): + arr = np.array([[], [1], [[1]]], dtype=object) + assert arr.shape == (3,) + + # The empty sequence stops further dimension discovery, so the + # result shape will be (0,) which leads to an error during: + with pytest.raises(ValueError): + np.array([[], np.empty((0, 1))], dtype=object) + + def test_array_of_different_depths(self): + # When multiple arrays (or array-likes) are included in a + # sequences and have different depth, we currently discover + # as many dimensions as they share. (see also gh-17224) + arr = np.zeros((3, 2)) + mismatch_first_dim = np.zeros((1, 2)) + mismatch_second_dim = np.zeros((3, 3)) + + dtype, shape = ncu._discover_array_parameters( + [arr, mismatch_second_dim], dtype=np.dtype("O")) + assert shape == (2, 3) + + dtype, shape = ncu._discover_array_parameters( + [arr, mismatch_first_dim], dtype=np.dtype("O")) + assert shape == (2,) + # The second case is currently supported because the arrays + # can be stored as objects: + res = np.asarray([arr, mismatch_first_dim], dtype=np.dtype("O")) + assert res[0] is arr + assert res[1] is mismatch_first_dim + + +class TestBadSequences: + # These are tests for bad objects passed into `np.array`, in general + # these have undefined behaviour. In the old code they partially worked + # when now they will fail. We could (and maybe should) create a copy + # of all sequences to be safe against bad-actors. + + def test_growing_list(self): + # List to coerce, `mylist` will append to it during coercion + obj = [] + + class mylist(list): + def __len__(self): + obj.append([1, 2]) + return super().__len__() + + obj.append(mylist([1, 2])) + + with pytest.raises(RuntimeError): + np.array(obj) + + # Note: We do not test a shrinking list. These do very evil things + # and the only way to fix them would be to copy all sequences. + # (which may be a real option in the future). + + def test_mutated_list(self): + # List to coerce, `mylist` will mutate the first element + obj = [] + + class mylist(list): + def __len__(self): + obj[0] = [2, 3] # replace with a different list. + return super().__len__() + + obj.append([2, 3]) + obj.append(mylist([1, 2])) + # Does not crash: + np.array(obj) + + def test_replace_0d_array(self): + # List to coerce, `mylist` will mutate the first element + obj = [] + + class baditem: + def __len__(self): + obj[0][0] = 2 # replace with a different list. + raise ValueError("not actually a sequence!") + + def __getitem__(self, _, /): + pass + + # Runs into a corner case in the new code, the `array(2)` is cached + # so replacing it invalidates the cache. + obj.append([np.array(2), baditem()]) + with pytest.raises(RuntimeError): + np.array(obj) + + +class TestArrayLikes: + @pytest.mark.parametrize("arraylike", arraylikes()) + def test_0d_object_special_case(self, arraylike): + arr = np.array(0.) + obj = arraylike(arr) + # A single array-like is always converted: + res = np.array(obj, dtype=object) + assert_array_equal(arr, res) + + # But a single 0-D nested array-like never: + res = np.array([obj], dtype=object) + assert res[0] is obj + + @pytest.mark.parametrize("arraylike", arraylikes()) + @pytest.mark.parametrize("arr", [np.array(0.), np.arange(4)]) + def test_object_assignment_special_case(self, arraylike, arr): + obj = arraylike(arr) + empty = np.arange(1, dtype=object) + empty[:] = [obj] + assert empty[0] is obj + + def test_0d_generic_special_case(self): + class ArraySubclass(np.ndarray): + def __float__(self): + raise TypeError("e.g. quantities raise on this") + + arr = np.array(0.) + obj = arr.view(ArraySubclass) + res = np.array(obj) + # The subclass is simply cast: + assert_array_equal(arr, res) + + # If the 0-D array-like is included, __float__ is currently + # guaranteed to be used. We may want to change that, quantities + # and masked arrays half make use of this. + with pytest.raises(TypeError): + np.array([obj]) + + # The same holds for memoryview: + obj = memoryview(arr) + res = np.array(obj) + assert_array_equal(arr, res) + with pytest.raises(ValueError): + # The error type does not matter much here. + np.array([obj]) + + def test_arraylike_classes(self): + # The classes of array-likes should generally be acceptable to be + # stored inside a numpy (object) array. This tests all of the + # special attributes (since all are checked during coercion). + arr = np.array(np.int64) + assert arr[()] is np.int64 + arr = np.array([np.int64]) + assert arr[0] is np.int64 + + # This also works for properties/unbound methods: + class ArrayLike: + @property + def __array_interface__(self): + pass + + @property + def __array_struct__(self): + pass + + def __array__(self, dtype=None, copy=None): + pass + + arr = np.array(ArrayLike) + assert arr[()] is ArrayLike + arr = np.array([ArrayLike]) + assert arr[0] is ArrayLike + + @pytest.mark.skipif(not IS_64BIT, reason="Needs 64bit platform") + @pytest.mark.thread_unsafe(reason="large slow test in parallel") + def test_too_large_array_error_paths(self): + """Test the error paths, including for memory leaks""" + arr = np.array(0, dtype="uint8") + # Guarantees that a contiguous copy won't work: + arr = np.broadcast_to(arr, 2**62) + + for i in range(5): + # repeat, to ensure caching cannot have an effect: + with pytest.raises(MemoryError): + np.array(arr) + with pytest.raises(MemoryError): + np.array([arr]) + + @pytest.mark.parametrize("attribute", + ["__array_interface__", "__array__", "__array_struct__"]) + @pytest.mark.parametrize("error", [RecursionError, MemoryError]) + def test_bad_array_like_attributes(self, attribute, error): + # RecursionError and MemoryError are considered fatal. All errors + # (except AttributeError) should probably be raised in the future, + # but shapely made use of it, so it will require a deprecation. + + class BadInterface: + def __getattr__(self, attr): + if attr == attribute: + raise error + super().__getattr__(attr) + + with pytest.raises(error): + np.array(BadInterface()) + + @pytest.mark.parametrize("error", [RecursionError, MemoryError]) + def test_bad_array_like_bad_length(self, error): + # RecursionError and MemoryError are considered "critical" in + # sequences. We could expand this more generally though. (NumPy 1.20) + class BadSequence: + def __len__(self): + raise error + + def __getitem__(self, _, /): + # must have getitem to be a Sequence + return 1 + + with pytest.raises(error): + np.array(BadSequence()) + + def test_array_interface_descr_optional(self): + # The descr should be optional regression test for gh-27249 + arr = np.ones(10, dtype="V10") + iface = arr.__array_interface__ + iface.pop("descr") + + class MyClass: + __array_interface__ = iface + + assert_array_equal(np.asarray(MyClass), arr) + + +class TestAsArray: + """Test expected behaviors of ``asarray``.""" + + def test_dtype_identity(self): + """Confirm the intended behavior for *dtype* kwarg. + + The result of ``asarray()`` should have the dtype provided through the + keyword argument, when used. This forces unique array handles to be + produced for unique np.dtype objects, but (for equivalent dtypes), the + underlying data (the base object) is shared with the original array + object. + + Ref https://github.com/numpy/numpy/issues/1468 + """ + int_array = np.array([1, 2, 3], dtype='i') + assert np.asarray(int_array) is int_array + + # The character code resolves to the singleton dtype object provided + # by the numpy package. + assert np.asarray(int_array, dtype='i') is int_array + + # Derive a dtype from n.dtype('i'), but add a metadata object to force + # the dtype to be distinct. + unequal_type = np.dtype('i', metadata={'spam': True}) + annotated_int_array = np.asarray(int_array, dtype=unequal_type) + assert annotated_int_array is not int_array + assert annotated_int_array.base is int_array + # Create an equivalent descriptor with a new and distinct dtype + # instance. + equivalent_requirement = np.dtype('i', metadata={'spam': True}) + annotated_int_array_alt = np.asarray(annotated_int_array, + dtype=equivalent_requirement) + assert unequal_type == equivalent_requirement + assert unequal_type is not equivalent_requirement + assert annotated_int_array_alt is not annotated_int_array + assert annotated_int_array_alt.dtype is equivalent_requirement + + # Check the same logic for a pair of C types whose equivalence may vary + # between computing environments. + # Find an equivalent pair. + integer_type_codes = ('i', 'l', 'q') + integer_dtypes = [np.dtype(code) for code in integer_type_codes] + typeA = None + typeB = None + for typeA, typeB in permutations(integer_dtypes, r=2): + if typeA == typeB: + assert typeA is not typeB + break + assert isinstance(typeA, np.dtype) and isinstance(typeB, np.dtype) + + # These ``asarray()`` calls may produce a new view or a copy, + # but never the same object. + long_int_array = np.asarray(int_array, dtype='l') + long_long_int_array = np.asarray(int_array, dtype='q') + assert long_int_array is not int_array + assert long_long_int_array is not int_array + assert np.asarray(long_int_array, dtype='q') is not long_int_array + array_a = np.asarray(int_array, dtype=typeA) + assert typeA == typeB + assert typeA is not typeB + assert array_a.dtype is typeA + assert array_a is not np.asarray(array_a, dtype=typeB) + assert np.asarray(array_a, dtype=typeB).dtype is typeB + assert array_a is np.asarray(array_a, dtype=typeB).base + + +class TestSpecialAttributeLookupFailure: + # An exception was raised while fetching the attribute + + class WeirdArrayLike: + @property + def __array__(self, dtype=None, copy=None): # noqa: PLR0206 + raise RuntimeError("oops!") + + class WeirdArrayInterface: + @property + def __array_interface__(self): + raise RuntimeError("oops!") + + def test_deprecated(self): + with pytest.raises(RuntimeError): + np.array(self.WeirdArrayLike()) + with pytest.raises(RuntimeError): + np.array(self.WeirdArrayInterface()) + + +def test_subarray_from_array_construction(): + # Arrays are more complex, since they "broadcast" on success: + arr = np.array([1, 2]) + + res = arr.astype("2i") + assert_array_equal(res, [[1, 1], [2, 2]]) + + res = np.array(arr, dtype="(2,)i") + + assert_array_equal(res, [[1, 1], [2, 2]]) + + res = np.array([[(1,), (2,)], arr], dtype="2i") + assert_array_equal(res, [[[1, 1], [2, 2]], [[1, 1], [2, 2]]]) + + # Also try a multi-dimensional example: + arr = np.arange(5 * 2).reshape(5, 2) + expected = np.broadcast_to(arr[:, :, np.newaxis, np.newaxis], (5, 2, 2, 2)) + + res = arr.astype("(2,2)f") + assert_array_equal(res, expected) + + res = np.array(arr, dtype="(2,2)f") + assert_array_equal(res, expected) + + +def test_empty_string(): + # Empty strings are unfortunately often converted to S1 and we need to + # make sure we are filling the S1 and not the (possibly) detected S0 + # result. This should likely just return S0 and if not maybe the decision + # to return S1 should be moved. + res = np.array([""] * 10, dtype="S") + assert_array_equal(res, np.array("\0", "S1")) + assert res.dtype == "S1" + + arr = np.array([""] * 10, dtype=object) + + res = arr.astype("S") + assert_array_equal(res, b"") + assert res.dtype == "S1" + + res = np.array(arr, dtype="S") + assert_array_equal(res, b"") + # TODO: This is arguably weird/wrong, but seems old: + assert res.dtype == f"S{np.dtype('O').itemsize}" + + res = np.array([[""] * 10, arr], dtype="S") + assert_array_equal(res, b"") + assert res.shape == (2, 10) + assert res.dtype == "S1" + + +@pytest.mark.parametrize("dtype", ["S", "U", object]) +@pytest.mark.parametrize("res_dt,hug_val", + [("float16", "1e30"), ("float32", "1e200")]) +def test_string_to_float_coercion_errors(dtype, res_dt, hug_val): + # This test primarly tests setitem + val = np.array(["3M"], dtype=dtype)[0] # use the scalar + + with pytest.raises(ValueError): + np.array(val, dtype=res_dt) + + val = np.array([hug_val], dtype=dtype)[0] # use the scalar + + with np.errstate(all="warn"): + with pytest.warns(RuntimeWarning): + np.array(val, dtype=res_dt) + + with np.errstate(all="raise"): + with pytest.raises(FloatingPointError): + np.array(val, dtype=res_dt) diff --git a/local_packages/numpy/_core/tests/test_array_interface.py b/local_packages/numpy/_core/tests/test_array_interface.py new file mode 100644 index 0000000..afb19f4 --- /dev/null +++ b/local_packages/numpy/_core/tests/test_array_interface.py @@ -0,0 +1,222 @@ +import sys +import sysconfig + +import pytest + +import numpy as np +from numpy.testing import IS_EDITABLE, IS_WASM, extbuild + + +@pytest.fixture +def get_module(tmp_path): + """ Some codes to generate data and manage temporary buffers use when + sharing with numpy via the array interface protocol. + """ + if sys.platform.startswith('cygwin'): + pytest.skip('link fails on cygwin') + if IS_WASM: + pytest.skip("Can't build module inside Wasm") + if IS_EDITABLE: + pytest.skip("Can't build module for editable install") + + prologue = ''' + #include + #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION + #include + #include + #include + + NPY_NO_EXPORT + void delete_array_struct(PyObject *cap) { + + /* get the array interface structure */ + PyArrayInterface *inter = (PyArrayInterface*) + PyCapsule_GetPointer(cap, NULL); + + /* get the buffer by which data was shared */ + double *ptr = (double*)PyCapsule_GetContext(cap); + + /* for the purposes of the regression test set the elements + to nan */ + for (npy_intp i = 0; i < inter->shape[0]; ++i) + ptr[i] = nan(""); + + /* free the shared buffer */ + free(ptr); + + /* free the array interface structure */ + free(inter->shape); + free(inter); + + fprintf(stderr, "delete_array_struct\\ncap = %ld inter = %ld" + " ptr = %ld\\n", (long)cap, (long)inter, (long)ptr); + } + ''' + + functions = [ + ("new_array_struct", "METH_VARARGS", """ + + long long n_elem = 0; + double value = 0.0; + + if (!PyArg_ParseTuple(args, "Ld", &n_elem, &value)) { + Py_RETURN_NONE; + } + + /* allocate and initialize the data to share with numpy */ + long long n_bytes = n_elem*sizeof(double); + double *data = (double*)malloc(n_bytes); + + if (!data) { + PyErr_Format(PyExc_MemoryError, + "Failed to malloc %lld bytes", n_bytes); + + Py_RETURN_NONE; + } + + for (long long i = 0; i < n_elem; ++i) { + data[i] = value; + } + + /* calculate the shape and stride */ + int nd = 1; + + npy_intp *ss = (npy_intp*)malloc(2*nd*sizeof(npy_intp)); + npy_intp *shape = ss; + npy_intp *stride = ss + nd; + + shape[0] = n_elem; + stride[0] = sizeof(double); + + /* construct the array interface */ + PyArrayInterface *inter = (PyArrayInterface*) + malloc(sizeof(PyArrayInterface)); + + memset(inter, 0, sizeof(PyArrayInterface)); + + inter->two = 2; + inter->nd = nd; + inter->typekind = 'f'; + inter->itemsize = sizeof(double); + inter->shape = shape; + inter->strides = stride; + inter->data = data; + inter->flags = NPY_ARRAY_WRITEABLE | NPY_ARRAY_NOTSWAPPED | + NPY_ARRAY_ALIGNED | NPY_ARRAY_C_CONTIGUOUS; + + /* package into a capsule */ + PyObject *cap = PyCapsule_New(inter, NULL, delete_array_struct); + + /* save the pointer to the data */ + PyCapsule_SetContext(cap, data); + + fprintf(stderr, "new_array_struct\\ncap = %ld inter = %ld" + " ptr = %ld\\n", (long)cap, (long)inter, (long)data); + + return cap; + """) + ] + + more_init = "import_array();" + + try: + import array_interface_testing + return array_interface_testing + except ImportError: + pass + + # if it does not exist, build and load it + if sysconfig.get_platform() == "win-arm64": + pytest.skip("Meson unable to find MSVC linker on win-arm64") + return extbuild.build_and_import_extension('array_interface_testing', + functions, + prologue=prologue, + include_dirs=[np.get_include()], + build_dir=tmp_path, + more_init=more_init) + + +@pytest.mark.slow +def test_cstruct(get_module): + + class data_source: + """ + This class is for testing the timing of the PyCapsule destructor + invoked when numpy release its reference to the shared data as part of + the numpy array interface protocol. If the PyCapsule destructor is + called early the shared data is freed and invalid memory accesses will + occur. + """ + + def __init__(self, size, value): + self.size = size + self.value = value + + @property + def __array_struct__(self): + return get_module.new_array_struct(self.size, self.value) + + # write to the same stream as the C code + stderr = sys.__stderr__ + + # used to validate the shared data. + expected_value = -3.1415 + multiplier = -10000.0 + + # create some data to share with numpy via the array interface + # assign the data an expected value. + stderr.write(' ---- create an object to share data ---- \n') + buf = data_source(256, expected_value) + stderr.write(' ---- OK!\n\n') + + # share the data + stderr.write(' ---- share data via the array interface protocol ---- \n') + arr = np.array(buf, copy=False) + stderr.write(f'arr.__array_interface___ = {str(arr.__array_interface__)}\n') + stderr.write(f'arr.base = {str(arr.base)}\n') + stderr.write(' ---- OK!\n\n') + + # release the source of the shared data. this will not release the data + # that was shared with numpy, that is done in the PyCapsule destructor. + stderr.write(' ---- destroy the object that shared data ---- \n') + buf = None + stderr.write(' ---- OK!\n\n') + + # check that we got the expected data. If the PyCapsule destructor we + # defined was prematurely called then this test will fail because our + # destructor sets the elements of the array to NaN before free'ing the + # buffer. Reading the values here may also cause a SEGV + assert np.allclose(arr, expected_value) + + # read the data. If the PyCapsule destructor we defined was prematurely + # called then reading the values here may cause a SEGV and will be reported + # as invalid reads by valgrind + stderr.write(' ---- read shared data ---- \n') + stderr.write(f'arr = {str(arr)}\n') + stderr.write(' ---- OK!\n\n') + + # write to the shared buffer. If the shared data was prematurely deleted + # this will may cause a SEGV and valgrind will report invalid writes + stderr.write(' ---- modify shared data ---- \n') + arr *= multiplier + expected_value *= multiplier + stderr.write(f'arr.__array_interface___ = {str(arr.__array_interface__)}\n') + stderr.write(f'arr.base = {str(arr.base)}\n') + stderr.write(' ---- OK!\n\n') + + # read the data. If the shared data was prematurely deleted this + # will may cause a SEGV and valgrind will report invalid reads + stderr.write(' ---- read modified shared data ---- \n') + stderr.write(f'arr = {str(arr)}\n') + stderr.write(' ---- OK!\n\n') + + # check that we got the expected data. If the PyCapsule destructor we + # defined was prematurely called then this test will fail because our + # destructor sets the elements of the array to NaN before free'ing the + # buffer. Reading the values here may also cause a SEGV + assert np.allclose(arr, expected_value) + + # free the shared data, the PyCapsule destructor should run here + stderr.write(' ---- free shared data ---- \n') + arr = None + stderr.write(' ---- OK!\n\n') diff --git a/local_packages/numpy/_core/tests/test_arraymethod.py b/local_packages/numpy/_core/tests/test_arraymethod.py new file mode 100644 index 0000000..5b3d515 --- /dev/null +++ b/local_packages/numpy/_core/tests/test_arraymethod.py @@ -0,0 +1,84 @@ +""" +This file tests the generic aspects of ArrayMethod. At the time of writing +this is private API, but when added, public API may be added here. +""" + +import types +from typing import Any + +import pytest + +import numpy as np +from numpy._core._multiarray_umath import _get_castingimpl as get_castingimpl + + +class TestResolveDescriptors: + # Test mainly error paths of the resolve_descriptors function, + # note that the `casting_unittests` tests exercise this non-error paths. + + # Casting implementations are the main/only current user: + method = get_castingimpl(type(np.dtype("d")), type(np.dtype("f"))) + + @pytest.mark.parametrize("args", [ + (True,), # Not a tuple. + ((None,)), # Too few elements + ((None, None, None),), # Too many + ((None, None),), # Input dtype is None, which is invalid. + ((np.dtype("d"), True),), # Output dtype is not a dtype + ((np.dtype("f"), None),), # Input dtype does not match method + ]) + def test_invalid_arguments(self, args): + with pytest.raises(TypeError): + self.method._resolve_descriptors(*args) + + +class TestSimpleStridedCall: + # Test mainly error paths of the resolve_descriptors function, + # note that the `casting_unittests` tests exercise this non-error paths. + + # Casting implementations are the main/only current user: + method = get_castingimpl(type(np.dtype("d")), type(np.dtype("f"))) + + @pytest.mark.parametrize(["args", "error"], [ + ((True,), TypeError), # Not a tuple + (((None,),), TypeError), # Too few elements + ((None, None), TypeError), # Inputs are not arrays. + (((None, None, None),), TypeError), # Too many + (((np.arange(3), np.arange(3)),), TypeError), # Incorrect dtypes + (((np.ones(3, dtype=">d"), np.ones(3, dtype=" None: + """Test `ndarray.__class_getitem__`.""" + alias = cls[Any, Any] + assert isinstance(alias, types.GenericAlias) + assert alias.__origin__ is cls + + @pytest.mark.parametrize("arg_len", range(4)) + def test_subscript_tup(self, cls: type[np.ndarray], arg_len: int) -> None: + arg_tup = (Any,) * arg_len + if arg_len in (1, 2): + assert cls[arg_tup] + else: + match = f"Too {'few' if arg_len == 0 else 'many'} arguments" + with pytest.raises(TypeError, match=match): + cls[arg_tup] diff --git a/local_packages/numpy/_core/tests/test_arrayobject.py b/local_packages/numpy/_core/tests/test_arrayobject.py new file mode 100644 index 0000000..f4e268b --- /dev/null +++ b/local_packages/numpy/_core/tests/test_arrayobject.py @@ -0,0 +1,95 @@ +import sys + +import pytest + +import numpy as np +from numpy.testing import HAS_REFCOUNT, assert_array_equal + + +def test_matrix_transpose_raises_error_for_1d(): + msg = "matrix transpose with ndim < 2 is undefined" + arr = np.arange(48) + with pytest.raises(ValueError, match=msg): + arr.mT + + +def test_matrix_transpose_equals_transpose_2d(): + arr = np.arange(48).reshape((6, 8)) + assert_array_equal(arr.T, arr.mT) + + +ARRAY_SHAPES_TO_TEST = ( + (5, 2), + (5, 2, 3), + (5, 2, 3, 4), +) + + +@pytest.mark.parametrize("shape", ARRAY_SHAPES_TO_TEST) +def test_matrix_transpose_equals_swapaxes(shape): + num_of_axes = len(shape) + vec = np.arange(shape[-1]) + arr = np.broadcast_to(vec, shape) + tgt = np.swapaxes(arr, num_of_axes - 2, num_of_axes - 1) + mT = arr.mT + assert_array_equal(tgt, mT) + + +class MyArr(np.ndarray): + def __array_wrap__(self, arr, context=None, return_scalar=None): + return super().__array_wrap__(arr, context, return_scalar) + + +class MyArrNoWrap(np.ndarray): + pass + + +@pytest.mark.parametrize("subclass_self", [np.ndarray, MyArr, MyArrNoWrap]) +@pytest.mark.parametrize("subclass_arr", [np.ndarray, MyArr, MyArrNoWrap]) +def test_array_wrap(subclass_self, subclass_arr): + # NumPy should allow `__array_wrap__` to be called on arrays, it's logic + # is designed in a way that: + # + # * Subclasses never return scalars by default (to preserve their + # information). They can choose to if they wish. + # * NumPy returns scalars, if `return_scalar` is passed as True to allow + # manual calls to `arr.__array_wrap__` to do the right thing. + # * The type of the input should be ignored (it should be a base-class + # array, but I am not sure this is guaranteed). + + arr = np.arange(3).view(subclass_self) + + arr0d = np.array(3, dtype=np.int8).view(subclass_arr) + # With third argument True, ndarray allows "decay" to scalar. + # (I don't think NumPy would pass `None`, but it seems clear to support) + if subclass_self is np.ndarray: + assert type(arr.__array_wrap__(arr0d, None, True)) is np.int8 + else: + assert type(arr.__array_wrap__(arr0d, None, True)) is type(arr) + + # Otherwise, result should be viewed as the subclass + assert type(arr.__array_wrap__(arr0d)) is type(arr) + assert type(arr.__array_wrap__(arr0d, None, None)) is type(arr) + assert type(arr.__array_wrap__(arr0d, None, False)) is type(arr) + + # Non 0-D array can't be converted to scalar, so we ignore that + arr1d = np.array([3], dtype=np.int8).view(subclass_arr) + assert type(arr.__array_wrap__(arr1d, None, True)) is type(arr) + + +@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") +def test_cleanup_with_refs_non_contig(): + # Regression test, leaked the dtype (but also good for rest) + dtype = np.dtype("O,i") + obj = object() + expected_ref_dtype = sys.getrefcount(dtype) + expected_ref_obj = sys.getrefcount(obj) + proto = np.full((3, 4, 5, 6, 7), np.array((obj, 2), dtype=dtype)) + # Give array a non-trivial order to exercise more cleanup paths. + arr = proto.transpose((2, 0, 3, 1, 4)).copy("K") + del proto, arr + + actual_ref_dtype = sys.getrefcount(dtype) + actual_ref_obj = sys.getrefcount(obj) + assert actual_ref_dtype == expected_ref_dtype + assert actual_ref_obj == actual_ref_dtype diff --git a/local_packages/numpy/_core/tests/test_arrayprint.py b/local_packages/numpy/_core/tests/test_arrayprint.py new file mode 100644 index 0000000..06d1306 --- /dev/null +++ b/local_packages/numpy/_core/tests/test_arrayprint.py @@ -0,0 +1,1324 @@ +import gc +import sys +import textwrap + +import pytest +from hypothesis import given +from hypothesis.extra import numpy as hynp + +import numpy as np +from numpy._core.arrayprint import _typelessdata +from numpy.testing import ( + HAS_REFCOUNT, + IS_WASM, + assert_, + assert_equal, + assert_raises, + assert_raises_regex, +) +from numpy.testing._private.utils import run_threaded + + +class TestArrayRepr: + def test_nan_inf(self): + x = np.array([np.nan, np.inf]) + assert_equal(repr(x), 'array([nan, inf])') + + def test_subclass(self): + class sub(np.ndarray): + pass + + # one dimensional + x1d = np.array([1, 2]).view(sub) + assert_equal(repr(x1d), 'sub([1, 2])') + + # two dimensional + x2d = np.array([[1, 2], [3, 4]]).view(sub) + assert_equal(repr(x2d), + 'sub([[1, 2],\n' + ' [3, 4]])') + + # two dimensional with flexible dtype + xstruct = np.ones((2, 2), dtype=[('a', ' 1) + y = sub(None) + x[()] = y + y[()] = x + assert_equal(repr(x), + 'sub(sub(sub(..., dtype=object), dtype=object), dtype=object)') + assert_equal(str(x), '...') + x[()] = 0 # resolve circular references for garbage collector + + # nested 0d-subclass-object + x = sub(None) + x[()] = sub(None) + assert_equal(repr(x), 'sub(sub(None, dtype=object), dtype=object)') + assert_equal(str(x), 'None') + + # gh-10663 + class DuckCounter(np.ndarray): + def __getitem__(self, item): + result = super().__getitem__(item) + if not isinstance(result, DuckCounter): + result = result[...].view(DuckCounter) + return result + + def to_string(self): + return {0: 'zero', 1: 'one', 2: 'two'}.get(self.item(), 'many') + + def __str__(self): + if self.shape == (): + return self.to_string() + else: + fmt = {'all': lambda x: x.to_string()} + return np.array2string(self, formatter=fmt) + + dc = np.arange(5).view(DuckCounter) + assert_equal(str(dc), "[zero one two many many]") + assert_equal(str(dc[0]), "zero") + + def test_self_containing(self): + arr0d = np.array(None) + arr0d[()] = arr0d + assert_equal(repr(arr0d), + 'array(array(..., dtype=object), dtype=object)') + arr0d[()] = 0 # resolve recursion for garbage collector + + arr1d = np.array([None, None]) + arr1d[1] = arr1d + assert_equal(repr(arr1d), + 'array([None, array(..., dtype=object)], dtype=object)') + arr1d[1] = 0 # resolve recursion for garbage collector + + first = np.array(None) + second = np.array(None) + first[()] = second + second[()] = first + assert_equal(repr(first), + 'array(array(array(..., dtype=object), dtype=object), dtype=object)') + first[()] = 0 # resolve circular references for garbage collector + + def test_containing_list(self): + # printing square brackets directly would be ambiguous + arr1d = np.array([None, None]) + arr1d[0] = [1, 2] + arr1d[1] = [3] + assert_equal(repr(arr1d), + 'array([list([1, 2]), list([3])], dtype=object)') + + def test_void_scalar_recursion(self): + # gh-9345 + repr(np.void(b'test')) # RecursionError ? + + def test_fieldless_structured(self): + # gh-10366 + no_fields = np.dtype([]) + arr_no_fields = np.empty(4, dtype=no_fields) + assert_equal(repr(arr_no_fields), 'array([(), (), (), ()], dtype=[])') + + +class TestComplexArray: + def test_str(self): + rvals = [0, 1, -1, np.inf, -np.inf, np.nan] + cvals = [complex(rp, ip) for rp in rvals for ip in rvals] + dtypes = [np.complex64, np.cdouble, np.clongdouble] + actual = [str(np.array([c], dt)) for c in cvals for dt in dtypes] + wanted = [ + '[0.+0.j]', '[0.+0.j]', '[0.+0.j]', + '[0.+1.j]', '[0.+1.j]', '[0.+1.j]', + '[0.-1.j]', '[0.-1.j]', '[0.-1.j]', + '[0.+infj]', '[0.+infj]', '[0.+infj]', + '[0.-infj]', '[0.-infj]', '[0.-infj]', + '[0.+nanj]', '[0.+nanj]', '[0.+nanj]', + '[1.+0.j]', '[1.+0.j]', '[1.+0.j]', + '[1.+1.j]', '[1.+1.j]', '[1.+1.j]', + '[1.-1.j]', '[1.-1.j]', '[1.-1.j]', + '[1.+infj]', '[1.+infj]', '[1.+infj]', + '[1.-infj]', '[1.-infj]', '[1.-infj]', + '[1.+nanj]', '[1.+nanj]', '[1.+nanj]', + '[-1.+0.j]', '[-1.+0.j]', '[-1.+0.j]', + '[-1.+1.j]', '[-1.+1.j]', '[-1.+1.j]', + '[-1.-1.j]', '[-1.-1.j]', '[-1.-1.j]', + '[-1.+infj]', '[-1.+infj]', '[-1.+infj]', + '[-1.-infj]', '[-1.-infj]', '[-1.-infj]', + '[-1.+nanj]', '[-1.+nanj]', '[-1.+nanj]', + '[inf+0.j]', '[inf+0.j]', '[inf+0.j]', + '[inf+1.j]', '[inf+1.j]', '[inf+1.j]', + '[inf-1.j]', '[inf-1.j]', '[inf-1.j]', + '[inf+infj]', '[inf+infj]', '[inf+infj]', + '[inf-infj]', '[inf-infj]', '[inf-infj]', + '[inf+nanj]', '[inf+nanj]', '[inf+nanj]', + '[-inf+0.j]', '[-inf+0.j]', '[-inf+0.j]', + '[-inf+1.j]', '[-inf+1.j]', '[-inf+1.j]', + '[-inf-1.j]', '[-inf-1.j]', '[-inf-1.j]', + '[-inf+infj]', '[-inf+infj]', '[-inf+infj]', + '[-inf-infj]', '[-inf-infj]', '[-inf-infj]', + '[-inf+nanj]', '[-inf+nanj]', '[-inf+nanj]', + '[nan+0.j]', '[nan+0.j]', '[nan+0.j]', + '[nan+1.j]', '[nan+1.j]', '[nan+1.j]', + '[nan-1.j]', '[nan-1.j]', '[nan-1.j]', + '[nan+infj]', '[nan+infj]', '[nan+infj]', + '[nan-infj]', '[nan-infj]', '[nan-infj]', + '[nan+nanj]', '[nan+nanj]', '[nan+nanj]'] + + for res, val in zip(actual, wanted): + assert_equal(res, val) + +class TestArray2String: + def test_basic(self): + """Basic test of array2string.""" + a = np.arange(3) + assert_(np.array2string(a) == '[0 1 2]') + assert_(np.array2string(a, max_line_width=4, legacy='1.13') == '[0 1\n 2]') + assert_(np.array2string(a, max_line_width=4) == '[0\n 1\n 2]') + + def test_unexpected_kwarg(self): + # ensure than an appropriate TypeError + # is raised when array2string receives + # an unexpected kwarg + + with assert_raises_regex(TypeError, 'nonsense'): + np.array2string(np.array([1, 2, 3]), + nonsense=None) + + def test_format_function(self): + """Test custom format function for each element in array.""" + def _format_function(x): + if np.abs(x) < 1: + return '.' + elif np.abs(x) < 2: + return 'o' + else: + return 'O' + + x = np.arange(3) + x_hex = "[0x0 0x1 0x2]" + x_oct = "[0o0 0o1 0o2]" + assert_(np.array2string(x, formatter={'all': _format_function}) == + "[. o O]") + assert_(np.array2string(x, formatter={'int_kind': _format_function}) == + "[. o O]") + assert_(np.array2string(x, formatter={'all': lambda x: f"{x:.4f}"}) == + "[0.0000 1.0000 2.0000]") + assert_equal(np.array2string(x, formatter={'int': hex}), + x_hex) + assert_equal(np.array2string(x, formatter={'int': oct}), + x_oct) + + x = np.arange(3.) + assert_(np.array2string(x, formatter={'float_kind': lambda x: f"{x:.2f}"}) == + "[0.00 1.00 2.00]") + assert_(np.array2string(x, formatter={'float': lambda x: f"{x:.2f}"}) == + "[0.00 1.00 2.00]") + + s = np.array(['abc', 'def']) + assert_(np.array2string(s, formatter={'numpystr': lambda s: s * 2}) == + '[abcabc defdef]') + + def test_structure_format_mixed(self): + dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) + x = np.array([('Sarah', (8.0, 7.0)), ('John', (6.0, 7.0))], dtype=dt) + assert_equal(np.array2string(x), + "[('Sarah', [8., 7.]) ('John', [6., 7.])]") + + np.set_printoptions(legacy='1.13') + try: + # for issue #5692 + A = np.zeros(shape=10, dtype=[("A", "M8[s]")]) + A[5:].fill(np.datetime64('NaT')) + assert_equal( + np.array2string(A), + textwrap.dedent("""\ + [('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) + ('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) ('NaT',) ('NaT',) + ('NaT',) ('NaT',) ('NaT',)]""") + ) + finally: + np.set_printoptions(legacy=False) + + # same again, but with non-legacy behavior + assert_equal( + np.array2string(A), + textwrap.dedent("""\ + [('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) + ('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) + ('1970-01-01T00:00:00',) ( 'NaT',) + ( 'NaT',) ( 'NaT',) + ( 'NaT',) ( 'NaT',)]""") + ) + + # and again, with timedeltas + A = np.full(10, 123456, dtype=[("A", "m8[s]")]) + A[5:].fill(np.datetime64('NaT')) + assert_equal( + np.array2string(A), + textwrap.dedent("""\ + [(123456,) (123456,) (123456,) (123456,) (123456,) ( 'NaT',) ( 'NaT',) + ( 'NaT',) ( 'NaT',) ( 'NaT',)]""") + ) + + def test_structure_format_int(self): + # See #8160 + struct_int = np.array([([1, -1],), ([123, 1],)], + dtype=[('B', 'i4', 2)]) + assert_equal(np.array2string(struct_int), + "[([ 1, -1],) ([123, 1],)]") + struct_2dint = np.array([([[0, 1], [2, 3]],), ([[12, 0], [0, 0]],)], + dtype=[('B', 'i4', (2, 2))]) + assert_equal(np.array2string(struct_2dint), + "[([[ 0, 1], [ 2, 3]],) ([[12, 0], [ 0, 0]],)]") + + def test_structure_format_float(self): + # See #8172 + array_scalar = np.array( + (1., 2.1234567890123456789, 3.), dtype=('f8,f8,f8')) + assert_equal(np.array2string(array_scalar), "(1., 2.12345679, 3.)") + + def test_unstructured_void_repr(self): + a = np.array([27, 91, 50, 75, 7, 65, 10, 8, 27, 91, 51, 49, 109, 82, 101, 100], + dtype='u1').view('V8') + assert_equal(repr(a[0]), + r"np.void(b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08')") + assert_equal(str(a[0]), r"b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08'") + assert_equal(repr(a), + r"array([b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08'," + "\n" + r" b'\x1B\x5B\x33\x31\x6D\x52\x65\x64'], dtype='|V8')") + + assert_equal(eval(repr(a), vars(np)), a) + assert_equal(eval(repr(a[0]), {'np': np}), a[0]) + + def test_edgeitems_kwarg(self): + # previously the global print options would be taken over the kwarg + arr = np.zeros(3, int) + assert_equal( + np.array2string(arr, edgeitems=1, threshold=0), + "[0 ... 0]" + ) + + def test_summarize_1d(self): + A = np.arange(1001) + strA = '[ 0 1 2 ... 998 999 1000]' + assert_equal(str(A), strA) + + reprA = 'array([ 0, 1, 2, ..., 998, 999, 1000])' + try: + np.set_printoptions(legacy='2.1') + assert_equal(repr(A), reprA) + finally: + np.set_printoptions(legacy=False) + + assert_equal(repr(A), reprA.replace(')', ', shape=(1001,))')) + + def test_summarize_2d(self): + A = np.arange(1002).reshape(2, 501) + strA = '[[ 0 1 2 ... 498 499 500]\n' \ + ' [ 501 502 503 ... 999 1000 1001]]' + assert_equal(str(A), strA) + + reprA = 'array([[ 0, 1, 2, ..., 498, 499, 500],\n' \ + ' [ 501, 502, 503, ..., 999, 1000, 1001]])' + try: + np.set_printoptions(legacy='2.1') + assert_equal(repr(A), reprA) + finally: + np.set_printoptions(legacy=False) + + assert_equal(repr(A), reprA.replace(')', ', shape=(2, 501))')) + + def test_summarize_2d_dtype(self): + A = np.arange(1002, dtype='i2').reshape(2, 501) + strA = '[[ 0 1 2 ... 498 499 500]\n' \ + ' [ 501 502 503 ... 999 1000 1001]]' + assert_equal(str(A), strA) + + reprA = ('array([[ 0, 1, 2, ..., 498, 499, 500],\n' + ' [ 501, 502, 503, ..., 999, 1000, 1001]],\n' + ' shape=(2, 501), dtype=int16)') + assert_equal(repr(A), reprA) + + def test_summarize_structure(self): + A = (np.arange(2002, dtype="i8", (2, 1001))]) + strB = "[([[1, 1, 1, ..., 1, 1, 1], [1, 1, 1, ..., 1, 1, 1]],)]" + assert_equal(str(B), strB) + + reprB = ( + "array([([[1, 1, 1, ..., 1, 1, 1], [1, 1, 1, ..., 1, 1, 1]],)],\n" + " dtype=[('i', '>i8', (2, 1001))])" + ) + assert_equal(repr(B), reprB) + + C = (np.arange(22, dtype=" 1: + # if the type is >1 byte, the non-native endian version + # must show endianness. + assert non_native_repr != native_repr + assert f"dtype='{non_native_dtype.byteorder}" in non_native_repr + + def test_linewidth_repr(self): + a = np.full(7, fill_value=2) + np.set_printoptions(linewidth=17) + assert_equal( + repr(a), + textwrap.dedent("""\ + array([2, 2, 2, + 2, 2, 2, + 2])""") + ) + np.set_printoptions(linewidth=17, legacy='1.13') + assert_equal( + repr(a), + textwrap.dedent("""\ + array([2, 2, 2, + 2, 2, 2, 2])""") + ) + + a = np.full(8, fill_value=2) + + np.set_printoptions(linewidth=18, legacy=False) + assert_equal( + repr(a), + textwrap.dedent("""\ + array([2, 2, 2, + 2, 2, 2, + 2, 2])""") + ) + + np.set_printoptions(linewidth=18, legacy='1.13') + assert_equal( + repr(a), + textwrap.dedent("""\ + array([2, 2, 2, 2, + 2, 2, 2, 2])""") + ) + + def test_linewidth_str(self): + a = np.full(18, fill_value=2) + np.set_printoptions(linewidth=18) + assert_equal( + str(a), + textwrap.dedent("""\ + [2 2 2 2 2 2 2 2 + 2 2 2 2 2 2 2 2 + 2 2]""") + ) + np.set_printoptions(linewidth=18, legacy='1.13') + assert_equal( + str(a), + textwrap.dedent("""\ + [2 2 2 2 2 2 2 2 2 + 2 2 2 2 2 2 2 2 2]""") + ) + + def test_edgeitems(self): + np.set_printoptions(edgeitems=1, threshold=1) + a = np.arange(27).reshape((3, 3, 3)) + assert_equal( + repr(a), + textwrap.dedent("""\ + array([[[ 0, ..., 2], + ..., + [ 6, ..., 8]], + + ..., + + [[18, ..., 20], + ..., + [24, ..., 26]]], shape=(3, 3, 3))""") + ) + + b = np.zeros((3, 3, 1, 1)) + assert_equal( + repr(b), + textwrap.dedent("""\ + array([[[[0.]], + + ..., + + [[0.]]], + + + ..., + + + [[[0.]], + + ..., + + [[0.]]]], shape=(3, 3, 1, 1))""") + ) + + # 1.13 had extra trailing spaces, and was missing newlines + try: + np.set_printoptions(legacy='1.13') + assert_equal(repr(a), ( + "array([[[ 0, ..., 2],\n" + " ..., \n" + " [ 6, ..., 8]],\n" + "\n" + " ..., \n" + " [[18, ..., 20],\n" + " ..., \n" + " [24, ..., 26]]])") + ) + assert_equal(repr(b), ( + "array([[[[ 0.]],\n" + "\n" + " ..., \n" + " [[ 0.]]],\n" + "\n" + "\n" + " ..., \n" + " [[[ 0.]],\n" + "\n" + " ..., \n" + " [[ 0.]]]])") + ) + finally: + np.set_printoptions(legacy=False) + + def test_edgeitems_structured(self): + np.set_printoptions(edgeitems=1, threshold=1) + A = np.arange(5 * 2 * 3, dtype="f4')])"), + (np.void(b'a'), r"void(b'\x61')", r"np.void(b'\x61')"), + ]) +def test_scalar_repr_special(scalar, legacy_repr, representation): + # Test NEP 51 scalar repr (and legacy option) for numeric types + assert repr(scalar) == representation + + with np.printoptions(legacy="1.25"): + assert repr(scalar) == legacy_repr + +def test_scalar_void_float_str(): + # Note that based on this currently we do not print the same as a tuple + # would, since the tuple would include the repr() inside for floats, but + # we do not do that. + scalar = np.void((1.0, 2.0), dtype=[('f0', 'f4')]) + assert str(scalar) == "(1.0, 2.0)" + +@pytest.mark.skipif(IS_WASM, reason="wasm doesn't support asyncio") +@pytest.mark.skipif(sys.version_info < (3, 11), + reason="asyncio.barrier was added in Python 3.11") +def test_printoptions_asyncio_safe(): + asyncio = pytest.importorskip("asyncio") + + b = asyncio.Barrier(2) + + async def legacy_113(): + np.set_printoptions(legacy='1.13', precision=12) + await b.wait() + po = np.get_printoptions() + assert po['legacy'] == '1.13' + assert po['precision'] == 12 + orig_linewidth = po['linewidth'] + with np.printoptions(linewidth=34, legacy='1.21'): + po = np.get_printoptions() + assert po['legacy'] == '1.21' + assert po['precision'] == 12 + assert po['linewidth'] == 34 + po = np.get_printoptions() + assert po['linewidth'] == orig_linewidth + assert po['legacy'] == '1.13' + assert po['precision'] == 12 + + async def legacy_125(): + np.set_printoptions(legacy='1.25', precision=7) + await b.wait() + po = np.get_printoptions() + assert po['legacy'] == '1.25' + assert po['precision'] == 7 + orig_linewidth = po['linewidth'] + with np.printoptions(linewidth=6, legacy='1.13'): + po = np.get_printoptions() + assert po['legacy'] == '1.13' + assert po['precision'] == 7 + assert po['linewidth'] == 6 + po = np.get_printoptions() + assert po['linewidth'] == orig_linewidth + assert po['legacy'] == '1.25' + assert po['precision'] == 7 + + async def main(): + await asyncio.gather(legacy_125(), legacy_125()) + + loop = asyncio.new_event_loop() + asyncio.run(main()) + loop.close() + +@pytest.mark.skipif(IS_WASM, reason="wasm doesn't support threads") +@pytest.mark.thread_unsafe(reason="test is already explicitly multi-threaded") +def test_multithreaded_array_printing(): + # the dragon4 implementation uses a static scratch space for performance + # reasons this test makes sure it is set up in a thread-safe manner + + run_threaded(TestPrintOptions().test_floatmode, 500) diff --git a/local_packages/numpy/_core/tests/test_casting_floatingpoint_errors.py b/local_packages/numpy/_core/tests/test_casting_floatingpoint_errors.py new file mode 100644 index 0000000..2f9c01f --- /dev/null +++ b/local_packages/numpy/_core/tests/test_casting_floatingpoint_errors.py @@ -0,0 +1,154 @@ +import pytest +from pytest import param + +import numpy as np +from numpy.testing import IS_WASM + + +def values_and_dtypes(): + """ + Generate value+dtype pairs that generate floating point errors during + casts. The invalid casts to integers will generate "invalid" value + warnings, the float casts all generate "overflow". + + (The Python int/float paths don't need to get tested in all the same + situations, but it does not hurt.) + """ + # Casting to float16: + yield param(70000, "float16", id="int-to-f2") + yield param("70000", "float16", id="str-to-f2") + yield param(70000.0, "float16", id="float-to-f2") + yield param(np.longdouble(70000.), "float16", id="longdouble-to-f2") + yield param(np.float64(70000.), "float16", id="double-to-f2") + yield param(np.float32(70000.), "float16", id="float-to-f2") + # Casting to float32: + yield param(10**100, "float32", id="int-to-f4") + yield param(1e100, "float32", id="float-to-f2") + yield param(np.longdouble(1e300), "float32", id="longdouble-to-f2") + yield param(np.float64(1e300), "float32", id="double-to-f2") + # Casting to float64: + # If longdouble is double-double, its max can be rounded down to the double + # max. So we correct the double spacing (a bit weird, admittedly): + max_ld = np.finfo(np.longdouble).max + spacing = np.spacing(np.nextafter(np.finfo("f8").max, 0)) + if max_ld - spacing > np.finfo("f8").max: + yield param(np.finfo(np.longdouble).max, "float64", + id="longdouble-to-f8") + + # Cast to complex32: + yield param(2e300, "complex64", id="float-to-c8") + yield param(2e300 + 0j, "complex64", id="complex-to-c8") + yield param(2e300j, "complex64", id="complex-to-c8") + yield param(np.longdouble(2e300), "complex64", id="longdouble-to-c8") + + # Invalid float to integer casts: + with np.errstate(over="ignore"): + for to_dt in np.typecodes["AllInteger"]: + for value in [np.inf, np.nan]: + for from_dt in np.typecodes["AllFloat"]: + from_dt = np.dtype(from_dt) + from_val = from_dt.type(value) + + yield param(from_val, to_dt, id=f"{from_val}-to-{to_dt}") + + +def check_operations(dtype, value): + """ + There are many dedicated paths in NumPy which cast and should check for + floating point errors which occurred during those casts. + """ + if dtype.kind != 'i': + # These assignments use the stricter setitem logic: + def assignment(): + arr = np.empty(3, dtype=dtype) + arr[0] = value + + yield assignment + + def fill(): + arr = np.empty(3, dtype=dtype) + arr.fill(value) + + yield fill + + def copyto_scalar(): + arr = np.empty(3, dtype=dtype) + np.copyto(arr, value, casting="unsafe") + + yield copyto_scalar + + def copyto(): + arr = np.empty(3, dtype=dtype) + np.copyto(arr, np.array([value, value, value]), casting="unsafe") + + yield copyto + + def copyto_scalar_masked(): + arr = np.empty(3, dtype=dtype) + np.copyto(arr, value, casting="unsafe", + where=[True, False, True]) + + yield copyto_scalar_masked + + def copyto_masked(): + arr = np.empty(3, dtype=dtype) + np.copyto(arr, np.array([value, value, value]), casting="unsafe", + where=[True, False, True]) + + yield copyto_masked + + def direct_cast(): + np.array([value, value, value]).astype(dtype) + + yield direct_cast + + def direct_cast_nd_strided(): + arr = np.full((5, 5, 5), fill_value=value)[:, ::2, :] + arr.astype(dtype) + + yield direct_cast_nd_strided + + def boolean_array_assignment(): + arr = np.empty(3, dtype=dtype) + arr[[True, False, True]] = np.array([value, value]) + + yield boolean_array_assignment + + def integer_array_assignment(): + arr = np.empty(3, dtype=dtype) + values = np.array([value, value]) + + arr[[0, 1]] = values + + yield integer_array_assignment + + def integer_array_assignment_with_subspace(): + arr = np.empty((5, 3), dtype=dtype) + values = np.array([value, value, value]) + + arr[[0, 2]] = values + + yield integer_array_assignment_with_subspace + + def flat_assignment(): + arr = np.empty((3,), dtype=dtype) + values = np.array([value, value, value]) + arr.flat[:] = values + + yield flat_assignment + +@pytest.mark.skipif(IS_WASM, reason="no wasm fp exception support") +@pytest.mark.parametrize(["value", "dtype"], values_and_dtypes()) +@pytest.mark.filterwarnings("ignore::numpy.exceptions.ComplexWarning") +def test_floatingpoint_errors_casting(dtype, value): + dtype = np.dtype(dtype) + for operation in check_operations(dtype, value): + dtype = np.dtype(dtype) + + match = "invalid" if dtype.kind in 'iu' else "overflow" + with pytest.warns(RuntimeWarning, match=match): + operation() + + with np.errstate(all="raise"): + with pytest.raises(FloatingPointError, match=match): + operation() diff --git a/local_packages/numpy/_core/tests/test_casting_unittests.py b/local_packages/numpy/_core/tests/test_casting_unittests.py new file mode 100644 index 0000000..5f643f8 --- /dev/null +++ b/local_packages/numpy/_core/tests/test_casting_unittests.py @@ -0,0 +1,955 @@ +""" +The tests exercise the casting machinery in a more low-level manner. +The reason is mostly to test a new implementation of the casting machinery. + +Unlike most tests in NumPy, these are closer to unit-tests rather +than integration tests. +""" + +import ctypes +import enum +import random +import textwrap +import warnings + +import pytest + +import numpy as np +from numpy._core._multiarray_umath import _get_castingimpl as get_castingimpl +from numpy.lib.stride_tricks import as_strided +from numpy.testing import assert_array_equal, assert_equal + +# Simple skips object, parametric and long double (unsupported by struct) +simple_dtypes = "?bhilqBHILQefdFD" +if np.dtype("l").itemsize != np.dtype("q").itemsize: + # Remove l and L, the table was generated with 64bit linux in mind. + simple_dtypes = simple_dtypes.replace("l", "").replace("L", "") +simple_dtypes = [type(np.dtype(c)) for c in simple_dtypes] + + +def simple_dtype_instances(): + for dtype_class in simple_dtypes: + dt = dtype_class() + yield pytest.param(dt, id=str(dt)) + if dt.byteorder != "|": + dt = dt.newbyteorder() + yield pytest.param(dt, id=str(dt)) + + +def get_expected_stringlength(dtype): + """Returns the string length when casting the basic dtypes to strings. + """ + if dtype == np.bool: + return 5 + if dtype.kind in "iu": + if dtype.itemsize == 1: + length = 3 + elif dtype.itemsize == 2: + length = 5 + elif dtype.itemsize == 4: + length = 10 + elif dtype.itemsize == 8: + length = 20 + else: + raise AssertionError(f"did not find expected length for {dtype}") + + if dtype.kind == "i": + length += 1 # adds one character for the sign + + return length + + # Note: Can't do dtype comparison for longdouble on windows + if dtype.char == "g": + return 48 + elif dtype.char == "G": + return 48 * 2 + elif dtype.kind == "f": + return 32 # also for half apparently. + elif dtype.kind == "c": + return 32 * 2 + + raise AssertionError(f"did not find expected length for {dtype}") + + +class Casting(enum.IntEnum): + no = 0 + equiv = 1 + safe = 2 + same_kind = 3 + unsafe = 4 + same_value = 64 + + +same_value_dtypes = tuple(type(np.dtype(c)) for c in "?bhilqBHILQefdgFDG") + +def _get_cancast_table(): + table = textwrap.dedent(""" + X ? b h i l q B H I L Q e f d g F D G S U V O M m + ? # = = = = = = = = = = = = = = = = = = = = = . = + b . # = = = = . . . . . = = = = = = = = = = = . = + h . ~ # = = = . . . . . ~ = = = = = = = = = = . = + i . ~ ~ # = = . . . . . ~ ~ = = ~ = = = = = = . = + l . ~ ~ ~ # # . . . . . ~ ~ = = ~ = = = = = = . = + q . ~ ~ ~ # # . . . . . ~ ~ = = ~ = = = = = = . = + B . ~ = = = = # = = = = = = = = = = = = = = = . = + H . ~ ~ = = = ~ # = = = ~ = = = = = = = = = = . = + I . ~ ~ ~ = = ~ ~ # = = ~ ~ = = ~ = = = = = = . = + L . ~ ~ ~ ~ ~ ~ ~ ~ # # ~ ~ = = ~ = = = = = = . ~ + Q . ~ ~ ~ ~ ~ ~ ~ ~ # # ~ ~ = = ~ = = = = = = . ~ + e . . . . . . . . . . . # = = = = = = = = = = . . + f . . . . . . . . . . . ~ # = = = = = = = = = . . + d . . . . . . . . . . . ~ ~ # = ~ = = = = = = . . + g . . . . . . . . . . . ~ ~ ~ # ~ ~ = = = = = . . + F . . . . . . . . . . . . . . . # = = = = = = . . + D . . . . . . . . . . . . . . . ~ # = = = = = . . + G . . . . . . . . . . . . . . . ~ ~ # = = = = . . + S . . . . . . . . . . . . . . . . . . # = = = . . + U . . . . . . . . . . . . . . . . . . . # = = . . + V . . . . . . . . . . . . . . . . . . . . # = . . + O . . . . . . . . . . . . . . . . . . . . = # . . + M . . . . . . . . . . . . . . . . . . . . = = # . + m . . . . . . . . . . . . . . . . . . . . = = . # + """).strip().split("\n") + dtypes = [type(np.dtype(c)) for c in table[0][2::2]] + + convert_cast = {".": Casting.unsafe, "~": Casting.same_kind, + "=": Casting.safe, "#": Casting.equiv, + " ": -1} + + cancast = {} + for from_dt, row in zip(dtypes, table[1:]): + cancast[from_dt] = {} + for to_dt, c in zip(dtypes, row[2::2]): + cancast[from_dt][to_dt] = convert_cast[c] + # Of the types checked, numeric cast support same-value + if from_dt in same_value_dtypes and to_dt in same_value_dtypes: + cancast[from_dt][to_dt] |= Casting.same_value + + return cancast + + +CAST_TABLE = _get_cancast_table() + + +class TestChanges: + """ + These test cases exercise some behaviour changes + """ + @pytest.mark.parametrize("string", ["S", "U"]) + @pytest.mark.parametrize("floating", ["e", "f", "d", "g"]) + def test_float_to_string(self, floating, string): + assert np.can_cast(floating, string) + # 100 is long enough to hold any formatted floating + assert np.can_cast(floating, f"{string}100") + + def test_to_void(self): + # But in general, we do consider these safe: + assert np.can_cast("d", "V") + assert np.can_cast("S20", "V") + + # Do not consider it a safe cast if the void is too smaller: + assert not np.can_cast("d", "V1") + assert not np.can_cast("S20", "V1") + assert not np.can_cast("U1", "V1") + # Structured to unstructured is just like any other: + assert np.can_cast("d,i", "V", casting="same_kind") + # Unstructured void to unstructured is actually no cast at all: + assert np.can_cast("V3", "V", casting="no") + assert np.can_cast("V0", "V", casting="no") + + +class TestCasting: + size = 1500 # Best larger than NPY_LOWLEVEL_BUFFER_BLOCKSIZE * itemsize + + def get_data(self, dtype1, dtype2): + if dtype2 is None or dtype1.itemsize >= dtype2.itemsize: + length = self.size // dtype1.itemsize + else: + length = self.size // dtype2.itemsize + + # Assume that the base array is well enough aligned for all inputs. + arr1 = np.empty(length, dtype=dtype1) + assert arr1.flags.c_contiguous + assert arr1.flags.aligned + + values = [random.randrange(-128, 128) for _ in range(length)] + + for i, value in enumerate(values): + # Use item assignment to ensure this is not using casting: + if value < 0 and dtype1.kind == "u": + # Manually rollover unsigned integers (-1 -> int.max) + value = value + np.iinfo(dtype1).max + 1 + arr1[i] = value + + if dtype2 is None: + if dtype1.char == "?": + values = [bool(v) for v in values] + return arr1, values + + if dtype2.char == "?": + values = [bool(v) for v in values] + + arr2 = np.empty(length, dtype=dtype2) + assert arr2.flags.c_contiguous + assert arr2.flags.aligned + + for i, value in enumerate(values): + # Use item assignment to ensure this is not using casting: + if value < 0 and dtype2.kind == "u": + # Manually rollover unsigned integers (-1 -> int.max) + value = value + np.iinfo(dtype2).max + 1 + arr2[i] = value + + return arr1, arr2, values + + def get_data_variation(self, arr1, arr2, aligned=True, contig=True): + """ + Returns a copy of arr1 that may be non-contiguous or unaligned, and a + matching array for arr2 (although not a copy). + """ + if contig: + stride1 = arr1.dtype.itemsize + stride2 = arr2.dtype.itemsize + elif aligned: + stride1 = 2 * arr1.dtype.itemsize + stride2 = 2 * arr2.dtype.itemsize + else: + stride1 = arr1.dtype.itemsize + 1 + stride2 = arr2.dtype.itemsize + 1 + + max_size1 = len(arr1) * 3 * arr1.dtype.itemsize + 1 + max_size2 = len(arr2) * 3 * arr2.dtype.itemsize + 1 + from_bytes = np.zeros(max_size1, dtype=np.uint8) + to_bytes = np.zeros(max_size2, dtype=np.uint8) + + # Sanity check that the above is large enough: + assert stride1 * len(arr1) <= from_bytes.nbytes + assert stride2 * len(arr2) <= to_bytes.nbytes + + if aligned: + new1 = as_strided(from_bytes[:-1].view(arr1.dtype), + arr1.shape, (stride1,)) + new2 = as_strided(to_bytes[:-1].view(arr2.dtype), + arr2.shape, (stride2,)) + else: + new1 = as_strided(from_bytes[1:].view(arr1.dtype), + arr1.shape, (stride1,)) + new2 = as_strided(to_bytes[1:].view(arr2.dtype), + arr2.shape, (stride2,)) + + new1[...] = arr1 + + if not contig: + # Ensure we did not overwrite bytes that should not be written: + offset = arr1.dtype.itemsize if aligned else 0 + buf = from_bytes[offset::stride1].tobytes() + assert buf.count(b"\0") == len(buf) + + if contig: + assert new1.flags.c_contiguous + assert new2.flags.c_contiguous + else: + assert not new1.flags.c_contiguous + assert not new2.flags.c_contiguous + + if aligned: + assert new1.flags.aligned + assert new2.flags.aligned + else: + assert not new1.flags.aligned or new1.dtype.alignment == 1 + assert not new2.flags.aligned or new2.dtype.alignment == 1 + + return new1, new2 + + @pytest.mark.parametrize("from_Dt", simple_dtypes) + def test_simple_cancast(self, from_Dt): + for to_Dt in simple_dtypes: + cast = get_castingimpl(from_Dt, to_Dt) + + for from_dt in [from_Dt(), from_Dt().newbyteorder()]: + default = cast._resolve_descriptors((from_dt, None))[1][1] + assert default == to_Dt() + del default + + for to_dt in [to_Dt(), to_Dt().newbyteorder()]: + casting, (from_res, to_res), view_off = ( + cast._resolve_descriptors((from_dt, to_dt))) + assert type(from_res) == from_Dt + assert type(to_res) == to_Dt + if view_off is not None: + # If a view is acceptable, this is "no" casting + # and byte order must be matching. + assert casting == Casting.no | Casting.same_value + # The above table lists this as "equivalent", perhaps + # with "same_value" + v = CAST_TABLE[from_Dt][to_Dt] & ~Casting.same_value + assert Casting.equiv == v + # Note that to_res may not be the same as from_dt + assert from_res.isnative == to_res.isnative + else: + if from_Dt == to_Dt: + # Note that to_res may not be the same as from_dt + assert from_res.isnative != to_res.isnative + assert casting == CAST_TABLE[from_Dt][to_Dt] + + if from_Dt is to_Dt: + assert from_dt is from_res + assert to_dt is to_res + + @pytest.mark.filterwarnings("ignore::numpy.exceptions.ComplexWarning") + @pytest.mark.parametrize("from_dt", simple_dtype_instances()) + def test_simple_direct_casts(self, from_dt): + """ + This test checks numeric direct casts for dtypes supported also by the + struct module (plus complex). It tries to be test a wide range of + inputs, but skips over possibly undefined behaviour (e.g. int rollover). + Longdouble and CLongdouble are tested, but only using double precision. + + If this test creates issues, it should possibly just be simplified + or even removed (checking whether unaligned/non-contiguous casts give + the same results is useful, though). + """ + for to_dt in simple_dtype_instances(): + to_dt = to_dt.values[0] + cast = get_castingimpl(type(from_dt), type(to_dt)) + + # print("from_dt", from_dt, "to_dt", to_dt) + casting, (from_res, to_res), view_off = cast._resolve_descriptors( + (from_dt, to_dt)) + + if from_res is not from_dt or to_res is not to_dt: + # Do not test this case, it is handled in multiple steps, + # each of which should is tested individually. + return + + safe = casting <= Casting.safe + del from_res, to_res, casting + + arr1, arr2, values = self.get_data(from_dt, to_dt) + + # print("2", arr1, arr2, cast) + cast._simple_strided_call((arr1, arr2)) + # print("3") + + # Check via python list + assert arr2.tolist() == values + + # Check that the same results are achieved for strided loops + arr1_o, arr2_o = self.get_data_variation(arr1, arr2, True, False) + cast._simple_strided_call((arr1_o, arr2_o)) + + assert_array_equal(arr2_o, arr2) + assert arr2_o.tobytes() == arr2.tobytes() + + # Check if alignment makes a difference, but only if supported + # and only if the alignment can be wrong + if ((from_dt.alignment == 1 and to_dt.alignment == 1) or + not cast._supports_unaligned): + return + + arr1_o, arr2_o = self.get_data_variation(arr1, arr2, False, True) + cast._simple_strided_call((arr1_o, arr2_o)) + + assert_array_equal(arr2_o, arr2) + assert arr2_o.tobytes() == arr2.tobytes() + + arr1_o, arr2_o = self.get_data_variation(arr1, arr2, False, False) + cast._simple_strided_call((arr1_o, arr2_o)) + + assert_array_equal(arr2_o, arr2) + assert arr2_o.tobytes() == arr2.tobytes() + + del arr1_o, arr2_o, cast + + @pytest.mark.parametrize("from_Dt", simple_dtypes) + def test_numeric_to_times(self, from_Dt): + # We currently only implement contiguous loops, so only need to + # test those. + from_dt = from_Dt() + + time_dtypes = [np.dtype("M8"), np.dtype("M8[ms]"), np.dtype("M8[4D]"), + np.dtype("m8"), np.dtype("m8[ms]"), np.dtype("m8[4D]")] + for time_dt in time_dtypes: + cast = get_castingimpl(type(from_dt), type(time_dt)) + + casting, (from_res, to_res), view_off = cast._resolve_descriptors( + (from_dt, time_dt)) + + assert from_res is from_dt + assert to_res is time_dt + del from_res, to_res + + assert casting & CAST_TABLE[from_Dt][type(time_dt)] + assert view_off is None + + int64_dt = np.dtype(np.int64) + arr1, arr2, values = self.get_data(from_dt, int64_dt) + arr2 = arr2.view(time_dt) + arr2[...] = np.datetime64("NaT") + + if time_dt == np.dtype("M8"): + # This is a bit of a strange path, and could probably be removed + arr1[-1] = 0 # ensure at least one value is not NaT + + # The cast currently succeeds, but the values are invalid: + cast._simple_strided_call((arr1, arr2)) + with pytest.raises(ValueError): + str(arr2[-1]) # e.g. conversion to string fails + return + + cast._simple_strided_call((arr1, arr2)) + + assert [int(v) for v in arr2.tolist()] == values + + # Check that the same results are achieved for strided loops + arr1_o, arr2_o = self.get_data_variation(arr1, arr2, True, False) + cast._simple_strided_call((arr1_o, arr2_o)) + + assert_array_equal(arr2_o, arr2) + assert arr2_o.tobytes() == arr2.tobytes() + + @pytest.mark.parametrize( + ["from_dt", "to_dt", "expected_casting", "expected_view_off", + "nom", "denom"], + [("M8[ns]", None, Casting.no, 0, 1, 1), + (str(np.dtype("M8[ns]").newbyteorder()), None, + Casting.equiv, None, 1, 1), + ("M8", "M8[ms]", Casting.safe, 0, 1, 1), + # should be invalid cast: + ("M8[ms]", "M8", Casting.unsafe, None, 1, 1), + ("M8[5ms]", "M8[5ms]", Casting.no, 0, 1, 1), + ("M8[ns]", "M8[ms]", Casting.same_kind, None, 1, 10**6), + ("M8[ms]", "M8[ns]", Casting.safe, None, 10**6, 1), + ("M8[ms]", "M8[7ms]", Casting.same_kind, None, 1, 7), + ("M8[4D]", "M8[1M]", Casting.same_kind, None, None, + # give full values based on NumPy 1.19.x + [-2**63, 0, -1, 1314, -1315, 564442610]), + ("m8[ns]", None, Casting.no, 0, 1, 1), + (str(np.dtype("m8[ns]").newbyteorder()), None, + Casting.equiv, None, 1, 1), + ("m8", "m8[ms]", Casting.safe, 0, 1, 1), + # should be invalid cast: + ("m8[ms]", "m8", Casting.unsafe, None, 1, 1), + ("m8[5ms]", "m8[5ms]", Casting.no, 0, 1, 1), + ("m8[ns]", "m8[ms]", Casting.same_kind, None, 1, 10**6), + ("m8[ms]", "m8[ns]", Casting.safe, None, 10**6, 1), + ("m8[ms]", "m8[7ms]", Casting.same_kind, None, 1, 7), + ("m8[4D]", "m8[1M]", Casting.unsafe, None, None, + # give full values based on NumPy 1.19.x + [-2**63, 0, 0, 1314, -1315, 564442610])]) + def test_time_to_time(self, from_dt, to_dt, + expected_casting, expected_view_off, + nom, denom): + from_dt = np.dtype(from_dt) + if to_dt is not None: + to_dt = np.dtype(to_dt) + + # Test a few values for casting (results generated with NumPy 1.19) + values = np.array([-2**63, 1, 2**63 - 1, 10000, -10000, 2**32]) + values = values.astype(np.dtype("int64").newbyteorder(from_dt.byteorder)) + assert values.dtype.byteorder == from_dt.byteorder + assert np.isnat(values.view(from_dt)[0]) + + DType = type(from_dt) + cast = get_castingimpl(DType, DType) + casting, (from_res, to_res), view_off = cast._resolve_descriptors( + (from_dt, to_dt)) + assert from_res is from_dt + assert to_res is to_dt or to_dt is None + assert casting == expected_casting + assert view_off == expected_view_off + + if nom is not None: + expected_out = (values * nom // denom).view(to_res) + expected_out[0] = "NaT" + else: + expected_out = np.empty_like(values) + expected_out[...] = denom + expected_out = expected_out.view(to_dt) + + orig_arr = values.view(from_dt) + orig_out = np.empty_like(expected_out) + + if casting == Casting.unsafe and (to_dt == "m8" or to_dt == "M8"): # noqa: PLR1714 + # Casting from non-generic to generic units is an error and should + # probably be reported as an invalid cast earlier. + with pytest.raises(ValueError): + cast._simple_strided_call((orig_arr, orig_out)) + return + + for aligned in [True, True]: + for contig in [True, True]: + arr, out = self.get_data_variation( + orig_arr, orig_out, aligned, contig) + out[...] = 0 + cast._simple_strided_call((arr, out)) + assert_array_equal(out.view("int64"), expected_out.view("int64")) + + def string_with_modified_length(self, dtype, change_length): + fact = 1 if dtype.char == "S" else 4 + length = dtype.itemsize // fact + change_length + return np.dtype(f"{dtype.byteorder}{dtype.char}{length}") + + @pytest.mark.parametrize("other_DT", simple_dtypes) + @pytest.mark.parametrize("string_char", ["S", "U"]) + def test_string_cancast(self, other_DT, string_char): + fact = 1 if string_char == "S" else 4 + + string_DT = type(np.dtype(string_char)) + cast = get_castingimpl(other_DT, string_DT) + + other_dt = other_DT() + expected_length = get_expected_stringlength(other_dt) + string_dt = np.dtype(f"{string_char}{expected_length}") + + safety, (res_other_dt, res_dt), view_off = cast._resolve_descriptors( + (other_dt, None)) + assert res_dt.itemsize == expected_length * fact + assert safety == Casting.safe # we consider to string casts "safe" + assert view_off is None + assert isinstance(res_dt, string_DT) + + # These casts currently implement changing the string length, so + # check the cast-safety for too long/fixed string lengths: + for change_length in [-1, 0, 1]: + if change_length >= 0: + expected_safety = Casting.safe + else: + expected_safety = Casting.same_kind + + to_dt = self.string_with_modified_length(string_dt, change_length) + safety, (_, res_dt), view_off = cast._resolve_descriptors( + (other_dt, to_dt)) + assert res_dt is to_dt + assert safety == expected_safety + assert view_off is None + + # The opposite direction is always considered unsafe: + cast = get_castingimpl(string_DT, other_DT) + + safety, _, view_off = cast._resolve_descriptors((string_dt, other_dt)) + assert safety == Casting.unsafe + assert view_off is None + + cast = get_castingimpl(string_DT, other_DT) + safety, (_, res_dt), view_off = cast._resolve_descriptors( + (string_dt, None)) + assert safety == Casting.unsafe + assert view_off is None + assert other_dt is res_dt # returns the singleton for simple dtypes + + @pytest.mark.parametrize("string_char", ["S", "U"]) + @pytest.mark.parametrize("other_dt", simple_dtype_instances()) + def test_simple_string_casts_roundtrip(self, other_dt, string_char): + """ + Tests casts from and to string by checking the roundtripping property. + + The test also covers some string to string casts (but not all). + + If this test creates issues, it should possibly just be simplified + or even removed (checking whether unaligned/non-contiguous casts give + the same results is useful, though). + """ + string_DT = type(np.dtype(string_char)) + + cast = get_castingimpl(type(other_dt), string_DT) + cast_back = get_castingimpl(string_DT, type(other_dt)) + _, (res_other_dt, string_dt), _ = cast._resolve_descriptors( + (other_dt, None)) + + if res_other_dt is not other_dt: + # do not support non-native byteorder, skip test in that case + assert other_dt.byteorder != res_other_dt.byteorder + return + + orig_arr, values = self.get_data(other_dt, None) + str_arr = np.zeros(len(orig_arr), dtype=string_dt) + string_dt_short = self.string_with_modified_length(string_dt, -1) + str_arr_short = np.zeros(len(orig_arr), dtype=string_dt_short) + string_dt_long = self.string_with_modified_length(string_dt, 1) + str_arr_long = np.zeros(len(orig_arr), dtype=string_dt_long) + + assert not cast._supports_unaligned # if support is added, should test + assert not cast_back._supports_unaligned + + for contig in [True, False]: + other_arr, str_arr = self.get_data_variation( + orig_arr, str_arr, True, contig) + _, str_arr_short = self.get_data_variation( + orig_arr, str_arr_short.copy(), True, contig) + _, str_arr_long = self.get_data_variation( + orig_arr, str_arr_long, True, contig) + + cast._simple_strided_call((other_arr, str_arr)) + + cast._simple_strided_call((other_arr, str_arr_short)) + assert_array_equal(str_arr.astype(string_dt_short), str_arr_short) + + cast._simple_strided_call((other_arr, str_arr_long)) + assert_array_equal(str_arr, str_arr_long) + + if other_dt.kind == "b": + # Booleans do not roundtrip + continue + + other_arr[...] = 0 + cast_back._simple_strided_call((str_arr, other_arr)) + assert_array_equal(orig_arr, other_arr) + + other_arr[...] = 0 + cast_back._simple_strided_call((str_arr_long, other_arr)) + assert_array_equal(orig_arr, other_arr) + + @pytest.mark.parametrize("other_dt", ["S8", "U8"]) + @pytest.mark.parametrize("string_char", ["S", "U"]) + def test_string_to_string_cancast(self, other_dt, string_char): + other_dt = np.dtype(other_dt) + + fact = 1 if string_char == "S" else 4 + div = 1 if other_dt.char == "S" else 4 + + string_DT = type(np.dtype(string_char)) + cast = get_castingimpl(type(other_dt), string_DT) + + expected_length = other_dt.itemsize // div + string_dt = np.dtype(f"{string_char}{expected_length}") + + safety, (res_other_dt, res_dt), view_off = cast._resolve_descriptors( + (other_dt, None)) + assert res_dt.itemsize == expected_length * fact + assert isinstance(res_dt, string_DT) + + expected_view_off = None + if other_dt.char == string_char: + if other_dt.isnative: + expected_safety = Casting.no + expected_view_off = 0 + else: + expected_safety = Casting.equiv + elif string_char == "U": + expected_safety = Casting.safe + else: + expected_safety = Casting.unsafe + + assert view_off == expected_view_off + assert expected_safety == safety + + for change_length in [-1, 0, 1]: + to_dt = self.string_with_modified_length(string_dt, change_length) + safety, (_, res_dt), view_off = cast._resolve_descriptors( + (other_dt, to_dt)) + + assert res_dt is to_dt + if change_length <= 0: + assert view_off == expected_view_off + else: + assert view_off is None + if expected_safety == Casting.unsafe: + assert safety == expected_safety + elif change_length < 0: + assert safety == Casting.same_kind + elif change_length == 0: + assert safety == expected_safety + elif change_length > 0: + assert safety == Casting.safe + + @pytest.mark.parametrize("order1", [">", "<"]) + @pytest.mark.parametrize("order2", [">", "<"]) + def test_unicode_byteswapped_cast(self, order1, order2): + # Very specific tests (not using the castingimpl directly) + # that tests unicode bytedwaps including for unaligned array data. + dtype1 = np.dtype(f"{order1}U30") + dtype2 = np.dtype(f"{order2}U30") + data1 = np.empty(30 * 4 + 1, dtype=np.uint8)[1:].view(dtype1) + data2 = np.empty(30 * 4 + 1, dtype=np.uint8)[1:].view(dtype2) + if dtype1.alignment != 1: + # alignment should always be >1, but skip the check if not + assert not data1.flags.aligned + assert not data2.flags.aligned + + element = "this is a ünicode string‽" + data1[()] = element + # Test both `data1` and `data1.copy()` (which should be aligned) + for data in [data1, data1.copy()]: + data2[...] = data1 + assert data2[()] == element + assert data2.copy()[()] == element + + def test_void_to_string_special_case(self): + # Cover a small special case in void to string casting that could + # probably just as well be turned into an error (compare + # `test_object_to_parametric_internal_error` below). + assert np.array([], dtype="V5").astype("S").dtype.itemsize == 5 + assert np.array([], dtype="V5").astype("U").dtype.itemsize == 4 * 5 + + def test_object_to_parametric_internal_error(self): + # We reject casting from object to a parametric type, without + # figuring out the correct instance first. + object_dtype = type(np.dtype(object)) + other_dtype = type(np.dtype(str)) + cast = get_castingimpl(object_dtype, other_dtype) + with pytest.raises(TypeError, + match="casting from object to the parametric DType"): + cast._resolve_descriptors((np.dtype("O"), None)) + + @pytest.mark.parametrize("dtype", simple_dtype_instances()) + def test_object_and_simple_resolution(self, dtype): + # Simple test to exercise the cast when no instance is specified + object_dtype = type(np.dtype(object)) + cast = get_castingimpl(object_dtype, type(dtype)) + + safety, (_, res_dt), view_off = cast._resolve_descriptors( + (np.dtype("O"), dtype)) + assert safety == Casting.unsafe + assert view_off is None + assert res_dt is dtype + + safety, (_, res_dt), view_off = cast._resolve_descriptors( + (np.dtype("O"), None)) + assert safety == Casting.unsafe + assert view_off is None + assert res_dt == dtype.newbyteorder("=") + + @pytest.mark.parametrize("dtype", simple_dtype_instances()) + def test_simple_to_object_resolution(self, dtype): + # Simple test to exercise the cast when no instance is specified + object_dtype = type(np.dtype(object)) + cast = get_castingimpl(type(dtype), object_dtype) + + safety, (_, res_dt), view_off = cast._resolve_descriptors( + (dtype, None)) + assert safety == Casting.safe + assert view_off is None + assert res_dt is np.dtype("O") + + @pytest.mark.parametrize("casting", ["no", "unsafe"]) + def test_void_and_structured_with_subarray(self, casting): + # test case corresponding to gh-19325 + dtype = np.dtype([("foo", " casts may succeed or fail, but a NULL'ed array must + # behave the same as one filled with None's. + arr_normal = np.array([None] * 5) + arr_NULLs = np.empty_like(arr_normal) + ctypes.memset(arr_NULLs.ctypes.data, 0, arr_NULLs.nbytes) + # If the check fails (maybe it should) the test would lose its purpose: + assert arr_NULLs.tobytes() == b"\x00" * arr_NULLs.nbytes + + try: + expected = arr_normal.astype(dtype) + except TypeError: + with pytest.raises(TypeError): + arr_NULLs.astype(dtype) + else: + assert_array_equal(expected, arr_NULLs.astype(dtype)) + + @pytest.mark.parametrize("dtype", + np.typecodes["AllInteger"] + np.typecodes["AllFloat"]) + def test_nonstandard_bool_to_other(self, dtype): + # simple test for casting bool_ to numeric types, which should not + # expose the detail that NumPy bools can sometimes take values other + # than 0 and 1. See also gh-19514. + nonstandard_bools = np.array([0, 3, -7], dtype=np.int8).view(bool) + res = nonstandard_bools.astype(dtype) + expected = [0, 1, 1] + assert_array_equal(res, expected) + + @pytest.mark.parametrize("to_dtype", + np.typecodes["AllInteger"] + np.typecodes["AllFloat"]) + @pytest.mark.parametrize("from_dtype", + np.typecodes["AllInteger"] + np.typecodes["AllFloat"]) + @pytest.mark.filterwarnings("ignore::numpy.exceptions.ComplexWarning") + def test_same_value_overflow(self, from_dtype, to_dtype): + if from_dtype == to_dtype: + return + top1 = 0 + top2 = 0 + try: + top1 = np.iinfo(from_dtype).max + except ValueError: + top1 = np.finfo(from_dtype).max + try: + top2 = np.iinfo(to_dtype).max + except ValueError: + top2 = np.finfo(to_dtype).max + # No need to test if top2 > top1, since the test will also do the + # reverse dtype matching. Catch then warning if the comparison warns, + # i.e. np.int16(65535) < np.float16(6.55e4) + with warnings.catch_warnings(record=True): + warnings.simplefilter("always", RuntimeWarning) + if top2 >= top1: + # will be tested when the dtypes are reversed + return + # Happy path + arr1 = np.array([0] * 10, dtype=from_dtype) + arr2 = np.array([0] * 10, dtype=to_dtype) + arr1_astype = arr1.astype(to_dtype, casting='same_value') + assert_equal(arr1_astype, arr2, strict=True) + # Make it overflow, both aligned and unaligned + arr1[0] = top1 + aligned = np.empty(arr1.itemsize * arr1.size + 1, 'uint8') + unaligned = aligned[1:].view(arr1.dtype) + unaligned[:] = arr1 + with pytest.raises(ValueError): + # Casting float to float with overflow should raise + # RuntimeWarning (fperror) + # Casting float to int with overflow sometimes raises + # RuntimeWarning (fperror) + # Casting with overflow and 'same_value', should raise ValueError + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always", RuntimeWarning) + arr1.astype(to_dtype, casting='same_value') + assert len(w) < 2 + with pytest.raises(ValueError): + # again, unaligned + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always", RuntimeWarning) + unaligned.astype(to_dtype, casting='same_value') + assert len(w) < 2 + + @pytest.mark.parametrize("to_dtype", + np.typecodes["AllInteger"]) + @pytest.mark.parametrize("from_dtype", + np.typecodes["AllFloat"]) + @pytest.mark.filterwarnings("ignore::RuntimeWarning") + def test_same_value_float_to_int(self, from_dtype, to_dtype): + # Should not raise, since the values can round trip + arr1 = np.arange(10, dtype=from_dtype) + aligned = np.empty(arr1.itemsize * arr1.size + 1, 'uint8') + unaligned = aligned[1:].view(arr1.dtype) + unaligned[:] = arr1 + arr2 = np.arange(10, dtype=to_dtype) + assert_array_equal(arr1.astype(to_dtype, casting='same_value'), arr2) + assert_array_equal(unaligned.astype(to_dtype, casting='same_value'), arr2) + + # Should raise, since values cannot round trip. Might warn too about + # FPE errors + arr1_66 = arr1 + 0.666 + unaligned_66 = unaligned + 0.66 + with pytest.raises(ValueError): + arr1_66.astype(to_dtype, casting='same_value') + with pytest.raises(ValueError): + unaligned_66.astype(to_dtype, casting='same_value') + + @pytest.mark.parametrize("to_dtype", + np.typecodes["AllInteger"]) + @pytest.mark.parametrize("from_dtype", + np.typecodes["AllFloat"]) + @pytest.mark.filterwarnings("ignore::RuntimeWarning") + def test_same_value_float_to_int_scalar(self, from_dtype, to_dtype): + # Should not raise, since the values can round trip + s1 = np.array(10, dtype=from_dtype) + assert s1.astype(to_dtype, casting='same_value') == 10 + + # Should raise, since values cannot round trip + s1_66 = s1 + 0.666 + with pytest.raises(ValueError): + s1_66.astype(to_dtype, casting='same_value') + + @pytest.mark.parametrize("value", [np.nan, np.inf, -np.inf]) + @pytest.mark.filterwarnings("ignore::numpy.exceptions.ComplexWarning") + @pytest.mark.filterwarnings("ignore::RuntimeWarning") + def test_same_value_naninf(self, value): + # These work, but may trigger FPE warnings on macOS + np.array([value], dtype=np.half).astype(np.cdouble, casting='same_value') + np.array([value], dtype=np.half).astype(np.double, casting='same_value') + np.array([value], dtype=np.float32).astype(np.cdouble, casting='same_value') + np.array([value], dtype=np.float32).astype(np.double, casting='same_value') + np.array([value], dtype=np.float32).astype(np.half, casting='same_value') + np.array([value], dtype=np.complex64).astype(np.half, casting='same_value') + # These fail + with pytest.raises(ValueError): + np.array([value], dtype=np.half).astype(np.int64, casting='same_value') + with pytest.raises(ValueError): + np.array([value], dtype=np.complex64).astype(np.int64, casting='same_value') + with pytest.raises(ValueError): + np.array([value], dtype=np.float32).astype(np.int64, casting='same_value') + + @pytest.mark.filterwarnings("ignore::numpy.exceptions.ComplexWarning") + def test_same_value_complex(self): + arr = np.array([complex(1, 1)], dtype=np.cdouble) + # This works + arr.astype(np.complex64, casting='same_value') + # Casting with a non-zero imag part fails + with pytest.raises(ValueError): + arr.astype(np.float32, casting='same_value') + + def test_same_value_scalar(self): + i = np.array(123, dtype=np.int64) + f = np.array(123, dtype=np.float64) + assert i.astype(np.float64, casting='same_value') == f + assert f.astype(np.int64, casting='same_value') == f diff --git a/local_packages/numpy/_core/tests/test_conversion_utils.py b/local_packages/numpy/_core/tests/test_conversion_utils.py new file mode 100644 index 0000000..067c297 --- /dev/null +++ b/local_packages/numpy/_core/tests/test_conversion_utils.py @@ -0,0 +1,209 @@ +""" +Tests for numpy/_core/src/multiarray/conversion_utils.c +""" +import re + +import pytest + +import numpy._core._multiarray_tests as mt +from numpy._core.multiarray import CLIP, RAISE, WRAP +from numpy.testing import assert_raises + + +class StringConverterTestCase: + allow_bytes = True + case_insensitive = True + exact_match = False + warn = True + + def _check_value_error(self, val): + pattern = fr'\(got {re.escape(repr(val))}\)' + with pytest.raises(ValueError, match=pattern) as exc: + self.conv(val) + + def _check_conv_assert_warn(self, val, expected): + if self.warn: + with assert_raises(ValueError) as exc: + assert self.conv(val) == expected + else: + assert self.conv(val) == expected + + def _check(self, val, expected): + """Takes valid non-deprecated inputs for converters, + runs converters on inputs, checks correctness of outputs, + warnings and errors""" + assert self.conv(val) == expected + + if self.allow_bytes: + assert self.conv(val.encode('ascii')) == expected + else: + with pytest.raises(TypeError): + self.conv(val.encode('ascii')) + + if len(val) != 1: + if self.exact_match: + self._check_value_error(val[:1]) + self._check_value_error(val + '\0') + else: + self._check_conv_assert_warn(val[:1], expected) + + if self.case_insensitive: + if val != val.lower(): + self._check_conv_assert_warn(val.lower(), expected) + if val != val.upper(): + self._check_conv_assert_warn(val.upper(), expected) + else: + if val != val.lower(): + self._check_value_error(val.lower()) + if val != val.upper(): + self._check_value_error(val.upper()) + + def test_wrong_type(self): + # common cases which apply to all the below + with pytest.raises(TypeError): + self.conv({}) + with pytest.raises(TypeError): + self.conv([]) + + def test_wrong_value(self): + # nonsense strings + self._check_value_error('') + self._check_value_error('\N{greek small letter pi}') + + if self.allow_bytes: + self._check_value_error(b'') + # bytes which can't be converted to strings via utf8 + self._check_value_error(b"\xFF") + if self.exact_match: + self._check_value_error("there's no way this is supported") + + +class TestByteorderConverter(StringConverterTestCase): + """ Tests of PyArray_ByteorderConverter """ + conv = mt.run_byteorder_converter + warn = False + + def test_valid(self): + for s in ['big', '>']: + self._check(s, 'NPY_BIG') + for s in ['little', '<']: + self._check(s, 'NPY_LITTLE') + for s in ['native', '=']: + self._check(s, 'NPY_NATIVE') + for s in ['ignore', '|']: + self._check(s, 'NPY_IGNORE') + for s in ['swap']: + self._check(s, 'NPY_SWAP') + + +class TestSortkindConverter(StringConverterTestCase): + """ Tests of PyArray_SortkindConverter """ + conv = mt.run_sortkind_converter + warn = False + + def test_valid(self): + self._check('quicksort', 'NPY_QUICKSORT') + self._check('heapsort', 'NPY_HEAPSORT') + self._check('mergesort', 'NPY_STABLESORT') # alias + self._check('stable', 'NPY_STABLESORT') + + +class TestSelectkindConverter(StringConverterTestCase): + """ Tests of PyArray_SelectkindConverter """ + conv = mt.run_selectkind_converter + case_insensitive = False + exact_match = True + + def test_valid(self): + self._check('introselect', 'NPY_INTROSELECT') + + +class TestSearchsideConverter(StringConverterTestCase): + """ Tests of PyArray_SearchsideConverter """ + conv = mt.run_searchside_converter + + def test_valid(self): + self._check('left', 'NPY_SEARCHLEFT') + self._check('right', 'NPY_SEARCHRIGHT') + + +class TestOrderConverter(StringConverterTestCase): + """ Tests of PyArray_OrderConverter """ + conv = mt.run_order_converter + warn = False + + def test_valid(self): + self._check('c', 'NPY_CORDER') + self._check('f', 'NPY_FORTRANORDER') + self._check('a', 'NPY_ANYORDER') + self._check('k', 'NPY_KEEPORDER') + + def test_flatten_invalid_order(self): + # invalid after gh-14596 + with pytest.raises(ValueError): + self.conv('Z') + for order in [False, True, 0, 8]: + with pytest.raises(TypeError): + self.conv(order) + + +class TestClipmodeConverter(StringConverterTestCase): + """ Tests of PyArray_ClipmodeConverter """ + conv = mt.run_clipmode_converter + + def test_valid(self): + self._check('clip', 'NPY_CLIP') + self._check('wrap', 'NPY_WRAP') + self._check('raise', 'NPY_RAISE') + + # integer values allowed here + assert self.conv(CLIP) == 'NPY_CLIP' + assert self.conv(WRAP) == 'NPY_WRAP' + assert self.conv(RAISE) == 'NPY_RAISE' + + +class TestCastingConverter(StringConverterTestCase): + """ Tests of PyArray_CastingConverter """ + conv = mt.run_casting_converter + case_insensitive = False + exact_match = True + + def test_valid(self): + self._check("no", "NPY_NO_CASTING") + self._check("equiv", "NPY_EQUIV_CASTING") + self._check("safe", "NPY_SAFE_CASTING") + self._check("unsafe", "NPY_UNSAFE_CASTING") + self._check("same_kind", "NPY_SAME_KIND_CASTING") + + def test_invalid(self): + # Currently, 'same_value' is supported only in ndarray.astype + self._check_value_error("same_value") + +class TestIntpConverter: + """ Tests of PyArray_IntpConverter """ + conv = mt.run_intp_converter + + def test_basic(self): + assert self.conv(1) == (1,) + assert self.conv((1, 2)) == (1, 2) + assert self.conv([1, 2]) == (1, 2) + assert self.conv(()) == () + + def test_none(self): + with pytest.raises(TypeError): + assert self.conv(None) == () + + def test_float(self): + with pytest.raises(TypeError): + self.conv(1.0) + with pytest.raises(TypeError): + self.conv([1, 1.0]) + + def test_too_large(self): + with pytest.raises(ValueError): + self.conv(2**64) + + def test_too_many_dims(self): + assert self.conv([1] * 64) == (1,) * 64 + with pytest.raises(ValueError): + self.conv([1] * 65) diff --git a/local_packages/numpy/_core/tests/test_cpu_dispatcher.py b/local_packages/numpy/_core/tests/test_cpu_dispatcher.py new file mode 100644 index 0000000..01fd53a --- /dev/null +++ b/local_packages/numpy/_core/tests/test_cpu_dispatcher.py @@ -0,0 +1,48 @@ +from numpy._core import _umath_tests +from numpy._core._multiarray_umath import ( + __cpu_baseline__, + __cpu_dispatch__, + __cpu_features__, +) +from numpy.testing import assert_equal + + +def test_dispatcher(): + """ + Testing the utilities of the CPU dispatcher + """ + targets = ( + "X86_V2", "X86_V3", + "VSX", "VSX2", "VSX3", + "NEON", "ASIMD", "ASIMDHP", + "VX", "VXE", "LSX", "RVV" + ) + highest_sfx = "" # no suffix for the baseline + all_sfx = [] + for feature in reversed(targets): + # skip baseline features, by the default `CCompilerOpt` do not generate separated objects + # for the baseline, just one object combined all of them via 'baseline' option + # within the configuration statements. + if feature in __cpu_baseline__: + continue + # check compiler and running machine support + if feature not in __cpu_dispatch__ or not __cpu_features__[feature]: + continue + + if not highest_sfx: + highest_sfx = "_" + feature + all_sfx.append("func" + "_" + feature) + + test = _umath_tests.test_dispatch() + assert_equal(test["func"], "func" + highest_sfx) + assert_equal(test["var"], "var" + highest_sfx) + + if highest_sfx: + assert_equal(test["func_xb"], "func" + highest_sfx) + assert_equal(test["var_xb"], "var" + highest_sfx) + else: + assert_equal(test["func_xb"], "nobase") + assert_equal(test["var_xb"], "nobase") + + all_sfx.append("func") # add the baseline + assert_equal(test["all"], all_sfx) diff --git a/local_packages/numpy/_core/tests/test_cpu_features.py b/local_packages/numpy/_core/tests/test_cpu_features.py new file mode 100644 index 0000000..1a10863 --- /dev/null +++ b/local_packages/numpy/_core/tests/test_cpu_features.py @@ -0,0 +1,482 @@ +import os +import pathlib +import platform +import re +import subprocess +import sys + +import pytest + +from numpy._core._multiarray_umath import ( + __cpu_baseline__, + __cpu_dispatch__, + __cpu_features__, +) + + +def assert_features_equal(actual, desired, fname): + __tracebackhide__ = True # Hide traceback for py.test + actual, desired = str(actual), str(desired) + if actual == desired: + return + detected = str(__cpu_features__).replace("'", "") + try: + with open("/proc/cpuinfo") as fd: + cpuinfo = fd.read(2048) + except Exception as err: + cpuinfo = str(err) + + try: + import subprocess + auxv = subprocess.check_output(['/bin/true'], env={"LD_SHOW_AUXV": "1"}) + auxv = auxv.decode() + except Exception as err: + auxv = str(err) + + import textwrap + error_report = textwrap.indent( +f""" +########################################### +### Extra debugging information +########################################### +------------------------------------------- +--- NumPy Detections +------------------------------------------- +{detected} +------------------------------------------- +--- SYS / CPUINFO +------------------------------------------- +{cpuinfo}.... +------------------------------------------- +--- SYS / AUXV +------------------------------------------- +{auxv} +""", prefix='\r') + + raise AssertionError(( + "Failure Detection\n" + " NAME: '%s'\n" + " ACTUAL: %s\n" + " DESIRED: %s\n" + "%s" + ) % (fname, actual, desired, error_report)) + +def _text_to_list(txt): + out = txt.strip("][\n").replace("'", "").split(', ') + return None if out[0] == "" else out + +class AbstractTest: + features = [] + features_groups = {} + features_map = {} + features_flags = set() + + def load_flags(self): + # a hook + pass + + def test_features(self): + self.load_flags() + for gname, features in self.features_groups.items(): + test_features = [self.cpu_have(f) for f in features] + assert_features_equal(__cpu_features__.get(gname), all(test_features), gname) + + for feature_name in self.features: + cpu_have = self.cpu_have(feature_name) + npy_have = __cpu_features__.get(feature_name) + assert_features_equal(npy_have, cpu_have, feature_name) + + def cpu_have(self, feature_name): + map_names = self.features_map.get(feature_name, feature_name) + if isinstance(map_names, str): + return map_names in self.features_flags + return any(f in self.features_flags for f in map_names) + + def load_flags_cpuinfo(self, magic_key): + self.features_flags = self.get_cpuinfo_item(magic_key) + + def get_cpuinfo_item(self, magic_key): + values = set() + with open('/proc/cpuinfo') as fd: + for line in fd: + if not line.startswith(magic_key): + continue + flags_value = [s.strip() for s in line.split(':', 1)] + if len(flags_value) == 2: + values = values.union(flags_value[1].upper().split()) + return values + + def load_flags_auxv(self): + auxv = subprocess.check_output(['/bin/true'], env={"LD_SHOW_AUXV": "1"}) + for at in auxv.split(b'\n'): + if not at.startswith(b"AT_HWCAP"): + continue + hwcap_value = [s.strip() for s in at.split(b':', 1)] + if len(hwcap_value) == 2: + self.features_flags = self.features_flags.union( + hwcap_value[1].upper().decode().split() + ) + +@pytest.mark.skipif( + sys.platform == 'emscripten', + reason=( + "The subprocess module is not available on WASM platforms and" + " therefore this test class cannot be properly executed." + ), +) +@pytest.mark.thread_unsafe(reason="setup & tmp_path_factory threads-unsafe, modifies environment variables") +class TestEnvPrivation: + cwd = pathlib.Path(__file__).parent.resolve() + env = os.environ.copy() + _enable = os.environ.pop('NPY_ENABLE_CPU_FEATURES', None) + _disable = os.environ.pop('NPY_DISABLE_CPU_FEATURES', None) + SUBPROCESS_ARGS = {"cwd": cwd, "capture_output": True, "text": True, "check": True} + unavailable_feats = [ + feat for feat in __cpu_dispatch__ if not __cpu_features__[feat] + ] + UNAVAILABLE_FEAT = ( + None if len(unavailable_feats) == 0 + else unavailable_feats[0] + ) + BASELINE_FEAT = None if len(__cpu_baseline__) == 0 else __cpu_baseline__[0] + SCRIPT = """ +def main(): + from numpy._core._multiarray_umath import ( + __cpu_features__, + __cpu_dispatch__ + ) + + detected = [feat for feat in __cpu_dispatch__ if __cpu_features__[feat]] + print(detected) + +if __name__ == "__main__": + main() + """ + + @pytest.fixture(autouse=True) + def setup_class(self, tmp_path_factory): + file = tmp_path_factory.mktemp("runtime_test_script") + file /= "_runtime_detect.py" + file.write_text(self.SCRIPT) + self.file = file + + def _run(self): + return subprocess.run( + [sys.executable, self.file], + env=self.env, + **self.SUBPROCESS_ARGS, + ) + + # Helper function mimicking pytest.raises for subprocess call + def _expect_error( + self, + msg, + err_type, + no_error_msg="Failed to generate error" + ): + try: + self._run() + except subprocess.CalledProcessError as e: + assertion_message = f"Expected: {msg}\nGot: {e.stderr}" + assert re.search(msg, e.stderr), assertion_message + + assertion_message = ( + f"Expected error of type: {err_type}; see full " + f"error:\n{e.stderr}" + ) + assert re.search(err_type, e.stderr), assertion_message + else: + assert False, no_error_msg + + def setup_method(self): + """Ensure that the environment is reset""" + self.env = os.environ.copy() + + def test_runtime_feature_selection(self): + """ + Ensure that when selecting `NPY_ENABLE_CPU_FEATURES`, only the + features exactly specified are dispatched. + """ + + # Capture runtime-enabled features + out = self._run() + non_baseline_features = _text_to_list(out.stdout) + + if non_baseline_features is None: + pytest.skip( + "No dispatchable features outside of baseline detected." + ) + feature = non_baseline_features[0] + + # Capture runtime-enabled features when `NPY_ENABLE_CPU_FEATURES` is + # specified + self.env['NPY_ENABLE_CPU_FEATURES'] = feature + out = self._run() + enabled_features = _text_to_list(out.stdout) + + # Ensure that only one feature is enabled, and it is exactly the one + # specified by `NPY_ENABLE_CPU_FEATURES` + assert set(enabled_features) == {feature} + + if len(non_baseline_features) < 2: + pytest.skip("Only one non-baseline feature detected.") + # Capture runtime-enabled features when `NPY_ENABLE_CPU_FEATURES` is + # specified + self.env['NPY_ENABLE_CPU_FEATURES'] = ",".join(non_baseline_features) + out = self._run() + enabled_features = _text_to_list(out.stdout) + + # Ensure that both features are enabled, and they are exactly the ones + # specified by `NPY_ENABLE_CPU_FEATURES` + assert set(enabled_features) == set(non_baseline_features) + + @pytest.mark.parametrize("enabled, disabled", + [ + ("feature", "feature"), + ("feature", "same"), + ]) + def test_both_enable_disable_set(self, enabled, disabled): + """ + Ensure that when both environment variables are set then an + ImportError is thrown + """ + self.env['NPY_ENABLE_CPU_FEATURES'] = enabled + self.env['NPY_DISABLE_CPU_FEATURES'] = disabled + msg = "Both NPY_DISABLE_CPU_FEATURES and NPY_ENABLE_CPU_FEATURES" + err_type = "ImportError" + self._expect_error(msg, err_type) + + @pytest.mark.skipif( + not __cpu_dispatch__, + reason=( + "NPY_*_CPU_FEATURES only parsed if " + "`__cpu_dispatch__` is non-empty" + ) + ) + @pytest.mark.parametrize("action", ["ENABLE", "DISABLE"]) + def test_variable_too_long(self, action): + """ + Test that an error is thrown if the environment variables are too long + to be processed. Current limit is 1024, but this may change later. + """ + MAX_VAR_LENGTH = 1024 + # Actual length is MAX_VAR_LENGTH + 1 due to null-termination + self.env[f'NPY_{action}_CPU_FEATURES'] = "t" * MAX_VAR_LENGTH + msg = ( + f"Length of environment variable 'NPY_{action}_CPU_FEATURES' is " + f"{MAX_VAR_LENGTH + 1}, only {MAX_VAR_LENGTH} accepted" + ) + err_type = "RuntimeError" + self._expect_error(msg, err_type) + + @pytest.mark.skipif( + not __cpu_dispatch__, + reason=( + "NPY_*_CPU_FEATURES only parsed if " + "`__cpu_dispatch__` is non-empty" + ) + ) + def test_impossible_feature_disable(self): + """ + Test that a RuntimeError is thrown if an impossible feature-disabling + request is made. This includes disabling a baseline feature. + """ + + if self.BASELINE_FEAT is None: + pytest.skip("There are no unavailable features to test with") + bad_feature = self.BASELINE_FEAT + self.env['NPY_DISABLE_CPU_FEATURES'] = bad_feature + msg = ( + f"You cannot disable CPU feature '{bad_feature}', since it is " + "part of the baseline optimizations" + ) + err_type = "RuntimeError" + self._expect_error(msg, err_type) + + def test_impossible_feature_enable(self): + """ + Test that a RuntimeError is thrown if an impossible feature-enabling + request is made. This includes enabling a feature not supported by the + machine, or disabling a baseline optimization. + """ + + if self.UNAVAILABLE_FEAT is None: + pytest.skip("There are no unavailable features to test with") + bad_feature = self.UNAVAILABLE_FEAT + self.env['NPY_ENABLE_CPU_FEATURES'] = bad_feature + msg = ( + f"You cannot enable CPU features \\({bad_feature}\\), since " + "they are not supported by your machine." + ) + err_type = "RuntimeError" + self._expect_error(msg, err_type) + + # Ensure that it fails even when providing garbage in addition + feats = f"{bad_feature}, Foobar" + self.env['NPY_ENABLE_CPU_FEATURES'] = feats + msg = ( + f"You cannot enable CPU features \\({bad_feature}\\), since they " + "are not supported by your machine." + ) + self._expect_error(msg, err_type) + + if self.BASELINE_FEAT is not None: + # Ensure that only the bad feature gets reported + feats = f"{bad_feature}, {self.BASELINE_FEAT}" + self.env['NPY_ENABLE_CPU_FEATURES'] = feats + msg = ( + f"You cannot enable CPU features \\({bad_feature}\\), since " + "they are not supported by your machine." + ) + self._expect_error(msg, err_type) + + +is_linux = sys.platform.startswith('linux') +is_cygwin = sys.platform.startswith('cygwin') +machine = platform.machine() +is_x86 = re.match(r"^(amd64|x86|i386|i686)", machine, re.IGNORECASE) +@pytest.mark.skipif( + not (is_linux or is_cygwin) or not is_x86, reason="Only for Linux and x86" +) +class Test_X86_Features(AbstractTest): + features = [] + + features_groups = { + "X86_V2": [ + "SSE", "SSE2", "SSE3", "SSSE3", "SSE41", "SSE42", + "POPCNT", "LAHF", "CX16" + ], + } + features_groups["X86_V3"] = features_groups["X86_V2"] + [ + "AVX", "AVX2", "FMA3", "BMI", "BMI2", + "LZCNT", "F16C", "MOVBE" + ] + features_groups["X86_V4"] = features_groups["X86_V3"] + [ + "AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL" + ] + features_groups["AVX512_ICL"] = features_groups["X86_V4"] + [ + "AVX512IFMA", "AVX512VBMI", "AVX512VNNI", + "AVX512VBMI2", "AVX512BITALG", "AVX512VPOPCNTDQ", + "VAES", "VPCLMULQDQ", "GFNI" + ] + features_groups["AVX512_SPR"] = features_groups["AVX512_ICL"] + ["AVX512FP16", "AVX512BF16"] + + features_map = { + "SSE3": "PNI", "SSE41": "SSE4_1", "SSE42": "SSE4_2", "FMA3": "FMA", + "BMI": "BMI1", "LZCNT": "ABM", "LAHF": "LAHF_LM", + "AVX512VNNI": "AVX512_VNNI", "AVX512BITALG": "AVX512_BITALG", + "AVX512VBMI2": "AVX512_VBMI2", "AVX5124FMAPS": "AVX512_4FMAPS", + "AVX5124VNNIW": "AVX512_4VNNIW", "AVX512VPOPCNTDQ": "AVX512_VPOPCNTDQ", + "AVX512FP16": "AVX512_FP16", "AVX512BF16": "AVX512_BF16" + } + + def load_flags(self): + self.load_flags_cpuinfo("flags") + + +is_power = re.match(r"^(powerpc|ppc)64", machine, re.IGNORECASE) +@pytest.mark.skipif(not is_linux or not is_power, reason="Only for Linux and Power") +class Test_POWER_Features(AbstractTest): + features = ["VSX", "VSX2", "VSX3", "VSX4"] + features_map = { + "VSX": "ARCH_2_06", + "VSX2": "ARCH_2_07", + "VSX3": "ARCH_3_00", + "VSX4": "ARCH_3_1B" + } + + def load_flags(self): + self.load_flags_auxv() + platform = self._get_platform() + + if platform: + power_match = re.search(r'power(\d+)', platform, re.IGNORECASE) + if power_match: + power_gen = int(power_match.group(1)) + if power_gen >= 7: + self.features_flags.add("ARCH_2_06") + if power_gen >= 8: + self.features_flags.add("ARCH_2_07") + if power_gen >= 9: + self.features_flags.add("ARCH_3_00") + if power_gen >= 10: + self.features_flags.add("ARCH_3_1B") + + def _get_platform(self): + """Get the AT_PLATFORM value from AUXV""" + try: + auxv = subprocess.check_output(['/bin/true'], env={"LD_SHOW_AUXV": "1"}) + for line in auxv.split(b'\n'): + if line.startswith(b'AT_PLATFORM'): + parts = line.split(b':', 1) + if len(parts) == 2: + return parts[1].strip().decode().lower() + except Exception: + pass + return None + + +is_zarch = re.match(r"^(s390x)", machine, re.IGNORECASE) +@pytest.mark.skipif(not is_linux or not is_zarch, + reason="Only for Linux and IBM Z") +class Test_ZARCH_Features(AbstractTest): + features = ["VX", "VXE", "VXE2"] + + def load_flags(self): + self.load_flags_auxv() + + +is_arm = re.match(r"^(arm|aarch64)", machine, re.IGNORECASE) +@pytest.mark.skipif(not is_linux or not is_arm, reason="Only for Linux and ARM") +class Test_ARM_Features(AbstractTest): + features = [ + "SVE", "NEON", "ASIMD", "FPHP", "ASIMDHP", "ASIMDDP", "ASIMDFHM" + ] + features_groups = { + "NEON_FP16": ["NEON", "HALF"], + "NEON_VFPV4": ["NEON", "VFPV4"], + } + + def load_flags(self): + self.load_flags_cpuinfo("Features") + arch = self.get_cpuinfo_item("CPU architecture") + # in case of mounting virtual filesystem of aarch64 kernel without linux32 + is_rootfs_v8 = ( + not re.match(r"^armv[0-9]+l$", machine) and + (int('0' + next(iter(arch))) > 7 if arch else 0) + ) + if re.match(r"^(aarch64|AARCH64)", machine) or is_rootfs_v8: + self.features_map = { + "NEON": "ASIMD", "HALF": "ASIMD", "VFPV4": "ASIMD" + } + else: + self.features_map = { + # ELF auxiliary vector and /proc/cpuinfo on Linux kernel(armv8 aarch32) + # doesn't provide information about ASIMD, so we assume that ASIMD is supported + # if the kernel reports any one of the following ARM8 features. + "ASIMD": ("AES", "SHA1", "SHA2", "PMULL", "CRC32") + } + + +is_loongarch = re.match(r"^(loongarch)", machine, re.IGNORECASE) +@pytest.mark.skipif(not is_linux or not is_loongarch, reason="Only for Linux and LoongArch") +class Test_LOONGARCH_Features(AbstractTest): + features = ["LSX"] + + def load_flags(self): + self.load_flags_cpuinfo("Features") + + +is_riscv = re.match(r"^(riscv)", machine, re.IGNORECASE) +@pytest.mark.skipif(not is_linux or not is_riscv, reason="Only for Linux and RISC-V") +class Test_RISCV_Features(AbstractTest): + features = ["RVV"] + + def load_flags(self): + self.load_flags_auxv() + if not self.features_flags: + # Let the test fail and dump if we cannot read HWCAP. + return + hwcap = int(next(iter(self.features_flags)), 16) + if hwcap & (1 << 21): # HWCAP_RISCV_V + self.features_flags.add("RVV") diff --git a/local_packages/numpy/_core/tests/test_custom_dtypes.py b/local_packages/numpy/_core/tests/test_custom_dtypes.py new file mode 100644 index 0000000..2acb4ad --- /dev/null +++ b/local_packages/numpy/_core/tests/test_custom_dtypes.py @@ -0,0 +1,393 @@ +from tempfile import NamedTemporaryFile + +import pytest + +import numpy as np +from numpy._core._multiarray_umath import ( + _discover_array_parameters as discover_array_params, + _get_sfloat_dtype, +) +from numpy.testing import assert_array_equal + +SF = _get_sfloat_dtype() + + +class TestSFloat: + def _get_array(self, scaling, aligned=True): + if not aligned: + a = np.empty(3 * 8 + 1, dtype=np.uint8)[1:] + a = a.view(np.float64) + a[:] = [1., 2., 3.] + else: + a = np.array([1., 2., 3.]) + + a *= 1. / scaling # the casting code also uses the reciprocal. + return a.view(SF(scaling)) + + def test_sfloat_rescaled(self): + sf = SF(1.) + sf2 = sf.scaled_by(2.) + assert sf2.get_scaling() == 2. + sf6 = sf2.scaled_by(3.) + assert sf6.get_scaling() == 6. + + def test_class_discovery(self): + # This does not test much, since we always discover the scaling as 1. + # But most of NumPy (when writing) does not understand DType classes + dt, _ = discover_array_params([1., 2., 3.], dtype=SF) + assert dt == SF(1.) + + @pytest.mark.parametrize("scaling", [1., -1., 2.]) + def test_scaled_float_from_floats(self, scaling): + a = np.array([1., 2., 3.], dtype=SF(scaling)) + + assert a.dtype.get_scaling() == scaling + assert_array_equal(scaling * a.view(np.float64), [1., 2., 3.]) + + def test_repr(self): + # Check the repr, mainly to cover the code paths: + assert repr(SF(scaling=1.)) == "_ScaledFloatTestDType(scaling=1.0)" + + def test_dtype_str(self): + assert SF(1.).str == "_ScaledFloatTestDType(scaling=1.0)" + + def test_dtype_name(self): + assert SF(1.).name == "_ScaledFloatTestDType64" + + def test_sfloat_structured_dtype_printing(self): + dt = np.dtype([("id", int), ("value", SF(0.5))]) + # repr of structured dtypes need special handling because the + # implementation bypasses the object repr + assert "('value', '_ScaledFloatTestDType64')" in repr(dt) + + @pytest.mark.parametrize("scaling", [1., -1., 2.]) + def test_sfloat_from_float(self, scaling): + a = np.array([1., 2., 3.]).astype(dtype=SF(scaling)) + + assert a.dtype.get_scaling() == scaling + assert_array_equal(scaling * a.view(np.float64), [1., 2., 3.]) + + @pytest.mark.parametrize("aligned", [True, False]) + @pytest.mark.parametrize("scaling", [1., -1., 2.]) + def test_sfloat_getitem(self, aligned, scaling): + a = self._get_array(1., aligned) + assert a.tolist() == [1., 2., 3.] + + @pytest.mark.parametrize("aligned", [True, False]) + def test_sfloat_casts(self, aligned): + a = self._get_array(1., aligned) + + assert np.can_cast(a, SF(-1.), casting="equiv") + assert not np.can_cast(a, SF(-1.), casting="no") + na = a.astype(SF(-1.)) + assert_array_equal(-1 * na.view(np.float64), a.view(np.float64)) + + assert np.can_cast(a, SF(2.), casting="same_kind") + assert not np.can_cast(a, SF(2.), casting="safe") + a2 = a.astype(SF(2.)) + assert_array_equal(2 * a2.view(np.float64), a.view(np.float64)) + + @pytest.mark.parametrize("aligned", [True, False]) + def test_sfloat_cast_internal_errors(self, aligned): + a = self._get_array(2e300, aligned) + + with pytest.raises(TypeError, + match="error raised inside the core-loop: non-finite factor!"): + a.astype(SF(2e-300)) + + def test_sfloat_promotion(self): + assert np.result_type(SF(2.), SF(3.)) == SF(3.) + assert np.result_type(SF(3.), SF(2.)) == SF(3.) + # Float64 -> SF(1.) and then promotes normally, so both of this work: + assert np.result_type(SF(3.), np.float64) == SF(3.) + assert np.result_type(np.float64, SF(0.5)) == SF(1.) + + # Test an undefined promotion: + with pytest.raises(TypeError): + np.result_type(SF(1.), np.int64) + + def test_basic_multiply(self): + a = self._get_array(2.) + b = self._get_array(4.) + + res = a * b + # multiplies dtype scaling and content separately: + assert res.dtype.get_scaling() == 8. + expected_view = a.view(np.float64) * b.view(np.float64) + assert_array_equal(res.view(np.float64), expected_view) + + def test_possible_and_impossible_reduce(self): + # For reductions to work, the first and last operand must have the + # same dtype. For this parametric DType that is not necessarily true. + a = self._get_array(2.) + # Addition reduction works (as of writing requires to pass initial + # because setting a scaled-float from the default `0` fails). + res = np.add.reduce(a, initial=0.) + assert res == a.astype(np.float64).sum() + + # But each multiplication changes the factor, so a reduction is not + # possible (the relaxed version of the old refusal to handle any + # flexible dtype). + with pytest.raises(TypeError, + match="the resolved dtypes are not compatible"): + np.multiply.reduce(a) + + def test_basic_ufunc_at(self): + float_a = np.array([1., 2., 3.]) + b = self._get_array(2.) + + float_b = b.view(np.float64).copy() + np.multiply.at(float_b, [1, 1, 1], float_a) + np.multiply.at(b, [1, 1, 1], float_a) + + assert_array_equal(b.view(np.float64), float_b) + + def test_basic_multiply_promotion(self): + float_a = np.array([1., 2., 3.]) + b = self._get_array(2.) + + res1 = float_a * b + res2 = b * float_a + + # one factor is one, so we get the factor of b: + assert res1.dtype == res2.dtype == b.dtype + expected_view = float_a * b.view(np.float64) + assert_array_equal(res1.view(np.float64), expected_view) + assert_array_equal(res2.view(np.float64), expected_view) + + # Check that promotion works when `out` is used: + np.multiply(b, float_a, out=res2) + with pytest.raises(TypeError): + # The promoter accepts this (maybe it should not), but the SFloat + # result cannot be cast to integer: + np.multiply(b, float_a, out=np.arange(3)) + + def test_basic_addition(self): + a = self._get_array(2.) + b = self._get_array(4.) + + res = a + b + # addition uses the type promotion rules for the result: + assert res.dtype == np.result_type(a.dtype, b.dtype) + expected_view = (a.astype(res.dtype).view(np.float64) + + b.astype(res.dtype).view(np.float64)) + assert_array_equal(res.view(np.float64), expected_view) + + def test_addition_cast_safety(self): + """The addition method is special for the scaled float, because it + includes the "cast" between different factors, thus cast-safety + is influenced by the implementation. + """ + a = self._get_array(2.) + b = self._get_array(-2.) + c = self._get_array(3.) + + # sign change is "equiv": + np.add(a, b, casting="equiv") + with pytest.raises(TypeError): + np.add(a, b, casting="no") + + # Different factor is "same_kind" (default) so check that "safe" fails + with pytest.raises(TypeError): + np.add(a, c, casting="safe") + + # Check that casting the output fails also (done by the ufunc here) + with pytest.raises(TypeError): + np.add(a, a, out=c, casting="safe") + + @pytest.mark.parametrize("ufunc", + [np.logical_and, np.logical_or, np.logical_xor]) + def test_logical_ufuncs_casts_to_bool(self, ufunc): + a = self._get_array(2.) + a[0] = 0. # make sure first element is considered False. + + float_equiv = a.astype(float) + expected = ufunc(float_equiv, float_equiv) + res = ufunc(a, a) + assert_array_equal(res, expected) + + # also check that the same works for reductions: + expected = ufunc.reduce(float_equiv) + res = ufunc.reduce(a) + assert_array_equal(res, expected) + + # The output casting does not match the bool, bool -> bool loop: + with pytest.raises(TypeError): + ufunc(a, a, out=np.empty(a.shape, dtype=int), casting="equiv") + + def test_wrapped_and_wrapped_reductions(self): + a = self._get_array(2.) + float_equiv = a.astype(float) + + expected = np.hypot(float_equiv, float_equiv) + res = np.hypot(a, a) + assert res.dtype == a.dtype + res_float = res.view(np.float64) * 2 + assert_array_equal(res_float, expected) + + # Also check reduction (keepdims, due to incorrect getitem) + res = np.hypot.reduce(a, keepdims=True) + assert res.dtype == a.dtype + expected = np.hypot.reduce(float_equiv, keepdims=True) + assert res.view(np.float64) * 2 == expected + + def test_sort(self): + a = self._get_array(1.) + a = a[::-1] # reverse it + + a.sort() + assert_array_equal(a.view(np.float64), [1., 2., 3.]) + + a = self._get_array(1.) + a = a[::-1] # reverse it + + sorted_a = np.sort(a) + assert_array_equal(sorted_a.view(np.float64), [1., 2., 3.]) + # original is unchanged + assert_array_equal(a.view(np.float64), [3., 2., 1.]) + + a = self._get_array(0.5) # different factor + a = a[::2][::-1] # non-contiguous + sorted_a = np.sort(a) + assert_array_equal(sorted_a.view(np.float64), [2., 6.]) + # original is unchanged + assert_array_equal(a.view(np.float64), [6., 2.]) + + a = self._get_array(0.5, aligned=False) + a = a[::-1] # reverse it + sorted_a = np.sort(a) + assert_array_equal(sorted_a.view(np.float64), [2., 4., 6.]) + # original is unchanged + assert_array_equal(a.view(np.float64), [6., 4., 2.]) + + sorted_a = np.sort(a, stable=True) + assert_array_equal(sorted_a.view(np.float64), [2., 4., 6.]) + # original is unchanged + assert_array_equal(a.view(np.float64), [6., 4., 2.]) + + sorted_a = np.sort(a, stable=False) + assert_array_equal(sorted_a.view(np.float64), [2., 4., 6.]) + # original is unchanged + assert_array_equal(a.view(np.float64), [6., 4., 2.]) + + def test_argsort(self): + a = self._get_array(1.) + a = a[::-1] # reverse it + + indices = np.argsort(a) + assert_array_equal(indices, [2, 1, 0]) + # original is unchanged + assert_array_equal(a.view(np.float64), [3., 2., 1.]) + + a = self._get_array(0.5) + a = a[::2][::-1] # reverse it + indices = np.argsort(a) + assert_array_equal(indices, [1, 0]) + # original is unchanged + assert_array_equal(a.view(np.float64), [6., 2.]) + + a = self._get_array(0.5, aligned=False) + a = a[::-1] # reverse it + indices = np.argsort(a) + assert_array_equal(indices, [2, 1, 0]) + # original is unchanged + assert_array_equal(a.view(np.float64), [6., 4., 2.]) + + sorted_indices = np.argsort(a, stable=True) + assert_array_equal(sorted_indices, [2, 1, 0]) + # original is unchanged + assert_array_equal(a.view(np.float64), [6., 4., 2.]) + + sorted_indices = np.argsort(a, stable=False) + assert_array_equal(sorted_indices, [2, 1, 0]) + # original is unchanged + assert_array_equal(a.view(np.float64), [6., 4., 2.]) + + def test_astype_class(self): + # Very simple test that we accept `.astype()` also on the class. + # ScaledFloat always returns the default descriptor, but it does + # check the relevant code paths. + arr = np.array([1., 2., 3.], dtype=object) + + res = arr.astype(SF) # passing the class class + expected = arr.astype(SF(1.)) # above will have discovered 1. scaling + assert_array_equal(res.view(np.float64), expected.view(np.float64)) + + def test_creation_class(self): + # passing in a dtype class should return + # the default descriptor + arr1 = np.array([1., 2., 3.], dtype=SF) + assert arr1.dtype == SF(1.) + arr2 = np.array([1., 2., 3.], dtype=SF(1.)) + assert_array_equal(arr1.view(np.float64), arr2.view(np.float64)) + assert arr1.dtype == arr2.dtype + + assert np.empty(3, dtype=SF).dtype == SF(1.) + assert np.empty_like(arr1, dtype=SF).dtype == SF(1.) + assert np.zeros(3, dtype=SF).dtype == SF(1.) + assert np.zeros_like(arr1, dtype=SF).dtype == SF(1.) + + @pytest.mark.thread_unsafe( + reason="_ScaledFloatTestDType setup is thread-unsafe (gh-29850)" + ) + def test_np_save_load(self): + # this monkeypatch is needed because pickle + # uses the repr of a type to reconstruct it + np._ScaledFloatTestDType = SF + + arr = np.array([1.0, 2.0, 3.0], dtype=SF(1.0)) + + # adapted from RoundtripTest.roundtrip in np.save tests + with NamedTemporaryFile("wb", delete=False, suffix=".npz") as f: + with pytest.warns(UserWarning) as record: + np.savez(f.name, arr) + + assert len(record) == 1 + + with np.load(f.name, allow_pickle=True) as data: + larr = data["arr_0"] + assert_array_equal(arr.view(np.float64), larr.view(np.float64)) + assert larr.dtype == arr.dtype == SF(1.0) + + del np._ScaledFloatTestDType + + def test_flatiter(self): + arr = np.array([1.0, 2.0, 3.0], dtype=SF(1.0)) + + for i, val in enumerate(arr.flat): + assert arr[i] == val + + @pytest.mark.parametrize( + "index", [ + [1, 2], ..., slice(None, 2, None), + np.array([True, True, False]), np.array([0, 1]) + ], ids=["int_list", "ellipsis", "slice", "bool_array", "int_array"]) + def test_flatiter_index(self, index): + arr = np.array([1.0, 2.0, 3.0], dtype=SF(1.0)) + np.testing.assert_array_equal( + arr[index].view(np.float64), arr.flat[index].view(np.float64)) + + arr2 = arr.copy() + arr[index] = 5.0 + arr2.flat[index] = 5.0 + np.testing.assert_array_equal( + arr.view(np.float64), arr2.view(np.float64)) + +@pytest.mark.thread_unsafe( + reason="_ScaledFloatTestDType setup is thread-unsafe (gh-29850)" +) +def test_type_pickle(): + # can't actually unpickle, but we can pickle (if in namespace) + import pickle + + np._ScaledFloatTestDType = SF + + s = pickle.dumps(SF) + res = pickle.loads(s) + assert res is SF + + del np._ScaledFloatTestDType + + +def test_is_numeric(): + assert SF._is_numeric diff --git a/local_packages/numpy/_core/tests/test_cython.py b/local_packages/numpy/_core/tests/test_cython.py new file mode 100644 index 0000000..7e26805 --- /dev/null +++ b/local_packages/numpy/_core/tests/test_cython.py @@ -0,0 +1,367 @@ +import os +import subprocess +import sys +import sysconfig +from datetime import datetime + +import pytest + +import numpy as np +from numpy.testing import IS_EDITABLE, IS_WASM, assert_array_equal + +# This import is copied from random.tests.test_extending +try: + import cython + from Cython.Compiler.Version import version as cython_version +except ImportError: + cython = None +else: + from numpy._utils import _pep440 + + # Note: keep in sync with the one in pyproject.toml + required_version = "3.0.6" + if _pep440.parse(cython_version) < _pep440.Version(required_version): + # too old or wrong cython, skip the test + cython = None + +pytestmark = pytest.mark.skipif(cython is None, reason="requires cython") + + +if IS_EDITABLE: + pytest.skip( + "Editable install doesn't support tests with a compile step", + allow_module_level=True + ) + + +@pytest.fixture(scope='module') +def install_temp(tmpdir_factory): + # Based in part on test_cython from random.tests.test_extending + if IS_WASM: + pytest.skip("No subprocess") + + srcdir = os.path.join(os.path.dirname(__file__), 'examples', 'cython') + build_dir = tmpdir_factory.mktemp("cython_test") / "build" + os.makedirs(build_dir, exist_ok=True) + # Ensure we use the correct Python interpreter even when `meson` is + # installed in a different Python environment (see gh-24956) + native_file = str(build_dir / 'interpreter-native-file.ini') + with open(native_file, 'w') as f: + f.write("[binaries]\n") + f.write(f"python = '{sys.executable}'\n") + f.write(f"python3 = '{sys.executable}'") + + try: + subprocess.check_call(["meson", "--version"]) + except FileNotFoundError: + pytest.skip("No usable 'meson' found") + if sysconfig.get_platform() == "win-arm64": + pytest.skip("Meson unable to find MSVC linker on win-arm64") + if sys.platform == "win32": + subprocess.check_call(["meson", "setup", + "--buildtype=release", + "--vsenv", "--native-file", native_file, + str(srcdir)], + cwd=build_dir, + ) + else: + subprocess.check_call(["meson", "setup", + "--native-file", native_file, str(srcdir)], + cwd=build_dir + ) + try: + subprocess.check_call(["meson", "compile", "-vv"], cwd=build_dir) + except subprocess.CalledProcessError: + print("----------------") + print("meson build failed when doing") + print(f"'meson setup --native-file {native_file} {srcdir}'") + print("'meson compile -vv'") + print(f"in {build_dir}") + print("----------------") + raise + + sys.path.append(str(build_dir)) + + +def test_is_timedelta64_object(install_temp): + import checks + + assert checks.is_td64(np.timedelta64(1234)) + assert checks.is_td64(np.timedelta64(1234, "ns")) + assert checks.is_td64(np.timedelta64("NaT", "ns")) + + assert not checks.is_td64(1) + assert not checks.is_td64(None) + assert not checks.is_td64("foo") + assert not checks.is_td64(np.datetime64("now", "s")) + + +def test_is_datetime64_object(install_temp): + import checks + + assert checks.is_dt64(np.datetime64(1234, "ns")) + assert checks.is_dt64(np.datetime64("NaT", "ns")) + + assert not checks.is_dt64(1) + assert not checks.is_dt64(None) + assert not checks.is_dt64("foo") + assert not checks.is_dt64(np.timedelta64(1234)) + + +def test_get_datetime64_value(install_temp): + import checks + + dt64 = np.datetime64("2016-01-01", "ns") + + result = checks.get_dt64_value(dt64) + expected = dt64.view("i8") + + assert result == expected + + +def test_get_timedelta64_value(install_temp): + import checks + + td64 = np.timedelta64(12345, "h") + + result = checks.get_td64_value(td64) + expected = td64.view("i8") + + assert result == expected + + +def test_get_datetime64_unit(install_temp): + import checks + + dt64 = np.datetime64("2016-01-01", "ns") + result = checks.get_dt64_unit(dt64) + expected = 10 + assert result == expected + + td64 = np.timedelta64(12345, "h") + result = checks.get_dt64_unit(td64) + expected = 5 + assert result == expected + + +def test_abstract_scalars(install_temp): + import checks + + assert checks.is_integer(1) + assert checks.is_integer(np.int8(1)) + assert checks.is_integer(np.uint64(1)) + +def test_default_int(install_temp): + import checks + + assert checks.get_default_integer() is np.dtype(int) + + +def test_ravel_axis(install_temp): + import checks + + assert checks.get_ravel_axis() == np.iinfo("intc").min + + +def test_convert_datetime64_to_datetimestruct(install_temp): + # GH#21199 + import checks + + res = checks.convert_datetime64_to_datetimestruct() + + exp = { + "year": 2022, + "month": 3, + "day": 15, + "hour": 20, + "min": 1, + "sec": 55, + "us": 260292, + "ps": 0, + "as": 0, + } + + assert res == exp + + +class TestDatetimeStrings: + def test_make_iso_8601_datetime(self, install_temp): + # GH#21199 + import checks + dt = datetime(2016, 6, 2, 10, 45, 19) + # uses NPY_FR_s + result = checks.make_iso_8601_datetime(dt) + assert result == b"2016-06-02T10:45:19" + + def test_get_datetime_iso_8601_strlen(self, install_temp): + # GH#21199 + import checks + # uses NPY_FR_ns + res = checks.get_datetime_iso_8601_strlen() + assert res == 48 + + +@pytest.mark.parametrize( + "arrays", + [ + [np.random.rand(2)], + [np.random.rand(2), np.random.rand(3, 1)], + [np.random.rand(2), np.random.rand(2, 3, 2), np.random.rand(1, 3, 2)], + [np.random.rand(2, 1)] * 4 + [np.random.rand(1, 1, 1)], + ] +) +def test_multiiter_fields(install_temp, arrays): + import checks + bcast = np.broadcast(*arrays) + + assert bcast.ndim == checks.get_multiiter_number_of_dims(bcast) + assert bcast.size == checks.get_multiiter_size(bcast) + assert bcast.numiter == checks.get_multiiter_num_of_iterators(bcast) + assert bcast.shape == checks.get_multiiter_shape(bcast) + assert bcast.index == checks.get_multiiter_current_index(bcast) + assert all( + x.base is y.base + for x, y in zip(bcast.iters, checks.get_multiiter_iters(bcast)) + ) + + +def test_dtype_flags(install_temp): + import checks + dtype = np.dtype("i,O") # dtype with somewhat interesting flags + assert dtype.flags == checks.get_dtype_flags(dtype) + + +def test_conv_intp(install_temp): + import checks + + class myint: + def __int__(self): + return 3 + + # These conversion passes via `__int__`, not `__index__`: + assert checks.conv_intp(3.) == 3 + assert checks.conv_intp(myint()) == 3 + + +def test_npyiter_api(install_temp): + import checks + arr = np.random.rand(3, 2) + + it = np.nditer(arr) + assert checks.get_npyiter_size(it) == it.itersize == np.prod(arr.shape) + assert checks.get_npyiter_ndim(it) == it.ndim == 1 + assert checks.npyiter_has_index(it) == it.has_index == False + + it = np.nditer(arr, flags=["c_index"]) + assert checks.npyiter_has_index(it) == it.has_index == True + assert ( + checks.npyiter_has_delayed_bufalloc(it) + == it.has_delayed_bufalloc + == False + ) + + it = np.nditer(arr, flags=["buffered", "delay_bufalloc"]) + assert ( + checks.npyiter_has_delayed_bufalloc(it) + == it.has_delayed_bufalloc + == True + ) + + it = np.nditer(arr, flags=["multi_index"]) + assert checks.get_npyiter_size(it) == it.itersize == np.prod(arr.shape) + assert checks.npyiter_has_multi_index(it) == it.has_multi_index == True + assert checks.get_npyiter_ndim(it) == it.ndim == 2 + assert checks.test_get_multi_index_iter_next(it, arr) + + arr2 = np.random.rand(2, 1, 2) + it = np.nditer([arr, arr2]) + assert checks.get_npyiter_nop(it) == it.nop == 2 + assert checks.get_npyiter_size(it) == it.itersize == 12 + assert checks.get_npyiter_ndim(it) == it.ndim == 3 + assert all( + x is y for x, y in zip(checks.get_npyiter_operands(it), it.operands) + ) + assert all( + np.allclose(x, y) + for x, y in zip(checks.get_npyiter_itviews(it), it.itviews) + ) + + +def test_fillwithbytes(install_temp): + import checks + + arr = checks.compile_fillwithbyte() + assert_array_equal(arr, np.ones((1, 2))) + + +def test_complex(install_temp): + from checks import inc2_cfloat_struct + + arr = np.array([0, 10 + 10j], dtype="F") + inc2_cfloat_struct(arr) + assert arr[1] == (12 + 12j) + + +def test_npystring_pack(install_temp): + """Check that the cython API can write to a vstring array.""" + import checks + + arr = np.array(['a', 'b', 'c'], dtype='T') + assert checks.npystring_pack(arr) == 0 + + # checks.npystring_pack writes to the beginning of the array + assert arr[0] == "Hello world" + +def test_npystring_load(install_temp): + """Check that the cython API can load strings from a vstring array.""" + import checks + + arr = np.array(['abcd', 'b', 'c'], dtype='T') + result = checks.npystring_load(arr) + assert result == 'abcd' + + +def test_npystring_multiple_allocators(install_temp): + """Check that the cython API can acquire/release multiple vstring allocators.""" + import checks + + dt = np.dtypes.StringDType(na_object=None) + arr1 = np.array(['abcd', 'b', 'c'], dtype=dt) + arr2 = np.array(['a', 'b', 'c'], dtype=dt) + + assert checks.npystring_pack_multiple(arr1, arr2) == 0 + assert arr1[0] == "Hello world" + assert arr1[-1] is None + assert arr2[0] == "test this" + + +def test_npystring_allocators_other_dtype(install_temp): + """Check that allocators for non-StringDType arrays is NULL.""" + import checks + + arr1 = np.array([1, 2, 3], dtype='i') + arr2 = np.array([4, 5, 6], dtype='i') + + assert checks.npystring_allocators_other_types(arr1, arr2) == 0 + + +@pytest.mark.skipif(sysconfig.get_platform() == 'win-arm64', + reason='no checks module on win-arm64') +def test_npy_uintp_type_enum(install_temp): + import checks + assert checks.check_npy_uintp_type_enum() + + +@pytest.mark.skipif( + sys.version_info < (3, 14), + reason="Tests behavior that happens on Python 3.14 and newer" +) +@pytest.mark.skipif( + sysconfig.get_platform() == 'win-arm64', + reason='no checks module on win-arm64' +) +def test_resize_refcheck(install_temp): + import checks + msg = "It is possible that this is a false positive." + with pytest.raises(ValueError, match=msg): + checks.resize_refcheck_test() diff --git a/local_packages/numpy/_core/tests/test_datetime.py b/local_packages/numpy/_core/tests/test_datetime.py new file mode 100644 index 0000000..029110c --- /dev/null +++ b/local_packages/numpy/_core/tests/test_datetime.py @@ -0,0 +1,2797 @@ +import datetime +import pickle +import warnings +from zoneinfo import ZoneInfo, ZoneInfoNotFoundError + +import pytest + +import numpy +import numpy as np +from numpy.testing import ( + IS_WASM, + assert_, + assert_array_equal, + assert_equal, + assert_raises, + assert_raises_regex, +) + +try: + RecursionError +except NameError: + RecursionError = RuntimeError # python < 3.5 + +try: + ZoneInfo("US/Central") + _has_tz = True +except ZoneInfoNotFoundError: + _has_tz = False + +def _assert_equal_hash(v1, v2): + assert v1 == v2 + assert hash(v1) == hash(v2) + assert v2 in {v1} + + +class TestDateTime: + + def test_string(self): + msg = "no explicit representation of timezones available for " \ + "np.datetime64" + with pytest.warns(UserWarning, match=msg): + np.datetime64('2000-01-01T00+01') + + def test_datetime(self): + msg = "no explicit representation of timezones available for " \ + "np.datetime64" + with pytest.warns(UserWarning, match=msg): + t0 = np.datetime64('2023-06-09T12:18:40Z', 'ns') + + t0 = np.datetime64('2023-06-09T12:18:40', 'ns') + + def test_datetime_dtype_creation(self): + for unit in ['Y', 'M', 'W', 'D', + 'h', 'm', 's', 'ms', 'us', + 'μs', # alias for us + 'ns', 'ps', 'fs', 'as']: + dt1 = np.dtype(f'M8[750{unit}]') + assert_(dt1 == np.dtype(f'datetime64[750{unit}]')) + dt2 = np.dtype(f'm8[{unit}]') + assert_(dt2 == np.dtype(f'timedelta64[{unit}]')) + + # Generic units shouldn't add [] to the end + assert_equal(str(np.dtype("M8")), "datetime64") + + # Should be possible to specify the endianness + assert_equal(np.dtype("=M8"), np.dtype("M8")) + assert_equal(np.dtype("=M8[s]"), np.dtype("M8[s]")) + assert_(np.dtype(">M8") == np.dtype("M8") or + np.dtype("M8[D]") == np.dtype("M8[D]") or + np.dtype("M8") != np.dtype("m8") == np.dtype("m8") or + np.dtype("m8[D]") == np.dtype("m8[D]") or + np.dtype("m8") != np.dtype(" Scalars + assert_equal(np.datetime64(b, '[s]'), np.datetime64('NaT', '[s]')) + assert_equal(np.datetime64(b, '[ms]'), np.datetime64('NaT', '[ms]')) + assert_equal(np.datetime64(b, '[M]'), np.datetime64('NaT', '[M]')) + assert_equal(np.datetime64(b, '[Y]'), np.datetime64('NaT', '[Y]')) + assert_equal(np.datetime64(b, '[W]'), np.datetime64('NaT', '[W]')) + + # Arrays -> Scalars + assert_equal(np.datetime64(a, '[s]'), np.datetime64('NaT', '[s]')) + assert_equal(np.datetime64(a, '[ms]'), np.datetime64('NaT', '[ms]')) + assert_equal(np.datetime64(a, '[M]'), np.datetime64('NaT', '[M]')) + assert_equal(np.datetime64(a, '[Y]'), np.datetime64('NaT', '[Y]')) + assert_equal(np.datetime64(a, '[W]'), np.datetime64('NaT', '[W]')) + + # NaN -> NaT + nan = np.array([np.nan] * 8 + [0]) + fnan = nan.astype('f') + lnan = nan.astype('g') + cnan = nan.astype('D') + cfnan = nan.astype('F') + clnan = nan.astype('G') + hnan = nan.astype(np.half) + + nat = np.array([np.datetime64('NaT')] * 8 + [np.datetime64(0, 'D')]) + assert_equal(nan.astype('M8[ns]'), nat) + assert_equal(fnan.astype('M8[ns]'), nat) + assert_equal(lnan.astype('M8[ns]'), nat) + assert_equal(cnan.astype('M8[ns]'), nat) + assert_equal(cfnan.astype('M8[ns]'), nat) + assert_equal(clnan.astype('M8[ns]'), nat) + assert_equal(hnan.astype('M8[ns]'), nat) + + nat = np.array([np.timedelta64('NaT')] * 8 + [np.timedelta64(0)]) + assert_equal(nan.astype('timedelta64[ns]'), nat) + assert_equal(fnan.astype('timedelta64[ns]'), nat) + assert_equal(lnan.astype('timedelta64[ns]'), nat) + assert_equal(cnan.astype('timedelta64[ns]'), nat) + assert_equal(cfnan.astype('timedelta64[ns]'), nat) + assert_equal(clnan.astype('timedelta64[ns]'), nat) + assert_equal(hnan.astype('timedelta64[ns]'), nat) + + def test_days_creation(self): + assert_equal(np.array('1599', dtype='M8[D]').astype('i8'), + (1600 - 1970) * 365 - (1972 - 1600) / 4 + 3 - 365) + assert_equal(np.array('1600', dtype='M8[D]').astype('i8'), + (1600 - 1970) * 365 - (1972 - 1600) / 4 + 3) + assert_equal(np.array('1601', dtype='M8[D]').astype('i8'), + (1600 - 1970) * 365 - (1972 - 1600) / 4 + 3 + 366) + assert_equal(np.array('1900', dtype='M8[D]').astype('i8'), + (1900 - 1970) * 365 - (1970 - 1900) // 4) + assert_equal(np.array('1901', dtype='M8[D]').astype('i8'), + (1900 - 1970) * 365 - (1970 - 1900) // 4 + 365) + assert_equal(np.array('1967', dtype='M8[D]').astype('i8'), -3 * 365 - 1) + assert_equal(np.array('1968', dtype='M8[D]').astype('i8'), -2 * 365 - 1) + assert_equal(np.array('1969', dtype='M8[D]').astype('i8'), -1 * 365) + assert_equal(np.array('1970', dtype='M8[D]').astype('i8'), 0 * 365) + assert_equal(np.array('1971', dtype='M8[D]').astype('i8'), 1 * 365) + assert_equal(np.array('1972', dtype='M8[D]').astype('i8'), 2 * 365) + assert_equal(np.array('1973', dtype='M8[D]').astype('i8'), 3 * 365 + 1) + assert_equal(np.array('1974', dtype='M8[D]').astype('i8'), 4 * 365 + 1) + assert_equal(np.array('2000', dtype='M8[D]').astype('i8'), + (2000 - 1970) * 365 + (2000 - 1972) // 4) + assert_equal(np.array('2001', dtype='M8[D]').astype('i8'), + (2000 - 1970) * 365 + (2000 - 1972) // 4 + 366) + assert_equal(np.array('2400', dtype='M8[D]').astype('i8'), + (2400 - 1970) * 365 + (2400 - 1972) // 4 - 3) + assert_equal(np.array('2401', dtype='M8[D]').astype('i8'), + (2400 - 1970) * 365 + (2400 - 1972) // 4 - 3 + 366) + + assert_equal(np.array('1600-02-29', dtype='M8[D]').astype('i8'), + (1600 - 1970) * 365 - (1972 - 1600) // 4 + 3 + 31 + 28) + assert_equal(np.array('1600-03-01', dtype='M8[D]').astype('i8'), + (1600 - 1970) * 365 - (1972 - 1600) // 4 + 3 + 31 + 29) + assert_equal(np.array('2000-02-29', dtype='M8[D]').astype('i8'), + (2000 - 1970) * 365 + (2000 - 1972) // 4 + 31 + 28) + assert_equal(np.array('2000-03-01', dtype='M8[D]').astype('i8'), + (2000 - 1970) * 365 + (2000 - 1972) // 4 + 31 + 29) + assert_equal(np.array('2001-03-22', dtype='M8[D]').astype('i8'), + (2000 - 1970) * 365 + (2000 - 1972) // 4 + 366 + 31 + 28 + 21) + + def test_days_to_pydate(self): + assert_equal(np.array('1599', dtype='M8[D]').astype('O'), + datetime.date(1599, 1, 1)) + assert_equal(np.array('1600', dtype='M8[D]').astype('O'), + datetime.date(1600, 1, 1)) + assert_equal(np.array('1601', dtype='M8[D]').astype('O'), + datetime.date(1601, 1, 1)) + assert_equal(np.array('1900', dtype='M8[D]').astype('O'), + datetime.date(1900, 1, 1)) + assert_equal(np.array('1901', dtype='M8[D]').astype('O'), + datetime.date(1901, 1, 1)) + assert_equal(np.array('2000', dtype='M8[D]').astype('O'), + datetime.date(2000, 1, 1)) + assert_equal(np.array('2001', dtype='M8[D]').astype('O'), + datetime.date(2001, 1, 1)) + assert_equal(np.array('1600-02-29', dtype='M8[D]').astype('O'), + datetime.date(1600, 2, 29)) + assert_equal(np.array('1600-03-01', dtype='M8[D]').astype('O'), + datetime.date(1600, 3, 1)) + assert_equal(np.array('2001-03-22', dtype='M8[D]').astype('O'), + datetime.date(2001, 3, 22)) + + def test_dtype_comparison(self): + assert_(not (np.dtype('M8[us]') == np.dtype('M8[ms]'))) + assert_(np.dtype('M8[us]') != np.dtype('M8[ms]')) + assert_(np.dtype('M8[2D]') != np.dtype('M8[D]')) + assert_(np.dtype('M8[D]') != np.dtype('M8[2D]')) + + def test_pydatetime_creation(self): + a = np.array(['1960-03-12', datetime.date(1960, 3, 12)], dtype='M8[D]') + assert_equal(a[0], a[1]) + a = np.array(['1999-12-31', datetime.date(1999, 12, 31)], dtype='M8[D]') + assert_equal(a[0], a[1]) + a = np.array(['2000-01-01', datetime.date(2000, 1, 1)], dtype='M8[D]') + assert_equal(a[0], a[1]) + # Will fail if the date changes during the exact right moment + a = np.array(['today', datetime.date.today()], dtype='M8[D]') + assert_equal(a[0], a[1]) + # datetime.datetime.now() returns local time, not UTC + #a = np.array(['now', datetime.datetime.now()], dtype='M8[s]') + #assert_equal(a[0], a[1]) + + # we can give a datetime.date time units + assert_equal(np.array(datetime.date(1960, 3, 12), dtype='M8[s]'), + np.array(np.datetime64('1960-03-12T00:00:00'))) + + def test_datetime_string_conversion(self): + a = ['2011-03-16', '1920-01-01', '2013-05-19'] + str_a = np.array(a, dtype='S') + uni_a = np.array(a, dtype='U') + dt_a = np.array(a, dtype='M') + + # String to datetime + assert_equal(dt_a, str_a.astype('M')) + assert_equal(dt_a.dtype, str_a.astype('M').dtype) + dt_b = np.empty_like(dt_a) + dt_b[...] = str_a + assert_equal(dt_a, dt_b) + + # Datetime to string + assert_equal(str_a, dt_a.astype('S0')) + str_b = np.empty_like(str_a) + str_b[...] = dt_a + assert_equal(str_a, str_b) + + # Unicode to datetime + assert_equal(dt_a, uni_a.astype('M')) + assert_equal(dt_a.dtype, uni_a.astype('M').dtype) + dt_b = np.empty_like(dt_a) + dt_b[...] = uni_a + assert_equal(dt_a, dt_b) + + # Datetime to unicode + assert_equal(uni_a, dt_a.astype('U')) + uni_b = np.empty_like(uni_a) + uni_b[...] = dt_a + assert_equal(uni_a, uni_b) + + # Datetime to long string - gh-9712 + assert_equal(str_a, dt_a.astype((np.bytes_, 128))) + str_b = np.empty(str_a.shape, dtype=(np.bytes_, 128)) + str_b[...] = dt_a + assert_equal(str_a, str_b) + + @pytest.mark.parametrize("time_dtype", ["m8[D]", "M8[Y]"]) + def test_time_byteswapping(self, time_dtype): + times = np.array(["2017", "NaT"], dtype=time_dtype) + times_swapped = times.astype(times.dtype.newbyteorder()) + assert_array_equal(times, times_swapped) + + unswapped = times_swapped.view(np.dtype("int64").newbyteorder()) + assert_array_equal(unswapped, times.view(np.int64)) + + @pytest.mark.parametrize(["time1", "time2"], + [("M8[s]", "M8[D]"), ("m8[s]", "m8[ns]")]) + def test_time_byteswapped_cast(self, time1, time2): + dtype1 = np.dtype(time1) + dtype2 = np.dtype(time2) + times = np.array(["2017", "NaT"], dtype=dtype1) + expected = times.astype(dtype2) + + # Test that every byte-swapping combination also returns the same + # results (previous tests check that this comparison works fine). + res = times.astype(dtype1.newbyteorder()).astype(dtype2) + assert_array_equal(res, expected) + res = times.astype(dtype2.newbyteorder()) + assert_array_equal(res, expected) + res = times.astype(dtype1.newbyteorder()).astype(dtype2.newbyteorder()) + assert_array_equal(res, expected) + + @pytest.mark.parametrize("time_dtype", ["m8[D]", "M8[Y]"]) + @pytest.mark.parametrize("str_dtype", ["U", "S"]) + def test_datetime_conversions_byteorders(self, str_dtype, time_dtype): + times = np.array(["2017", "NaT"], dtype=time_dtype) + # Unfortunately, timedelta does not roundtrip: + from_strings = np.array(["2017", "NaT"], dtype=str_dtype) + to_strings = times.astype(str_dtype) # assume this is correct + + # Check that conversion from times to string works if src is swapped: + times_swapped = times.astype(times.dtype.newbyteorder()) + res = times_swapped.astype(str_dtype) + assert_array_equal(res, to_strings) + # And also if both are swapped: + res = times_swapped.astype(to_strings.dtype.newbyteorder()) + assert_array_equal(res, to_strings) + # only destination is swapped: + res = times.astype(to_strings.dtype.newbyteorder()) + assert_array_equal(res, to_strings) + + # Check that conversion from string to times works if src is swapped: + from_strings_swapped = from_strings.astype( + from_strings.dtype.newbyteorder()) + res = from_strings_swapped.astype(time_dtype) + assert_array_equal(res, times) + # And if both are swapped: + res = from_strings_swapped.astype(times.dtype.newbyteorder()) + assert_array_equal(res, times) + # Only destination is swapped: + res = from_strings.astype(times.dtype.newbyteorder()) + assert_array_equal(res, times) + + def test_datetime_array_str(self): + a = np.array(['2011-03-16', '1920-01-01', '2013-05-19'], dtype='M') + assert_equal(str(a), "['2011-03-16' '1920-01-01' '2013-05-19']") + + a = np.array(['2011-03-16T13:55', '1920-01-01T03:12'], dtype='M') + assert_equal(np.array2string(a, separator=', ', + formatter={'datetime': lambda x: + f"'{np.datetime_as_string(x, timezone='UTC')}'"}), + "['2011-03-16T13:55Z', '1920-01-01T03:12Z']") + + # Check that one NaT doesn't corrupt subsequent entries + a = np.array(['2010', 'NaT', '2030']).astype('M') + assert_equal(str(a), "['2010' 'NaT' '2030']") + + def test_timedelta_array_str(self): + a = np.array([-1, 0, 100], dtype='m') + assert_equal(str(a), "[ -1 0 100]") + a = np.array(['NaT', 'NaT'], dtype='m') + assert_equal(str(a), "['NaT' 'NaT']") + # Check right-alignment with NaTs + a = np.array([-1, 'NaT', 0], dtype='m') + assert_equal(str(a), "[ -1 'NaT' 0]") + a = np.array([-1, 'NaT', 1234567], dtype='m') + assert_equal(str(a), "[ -1 'NaT' 1234567]") + + # Test with other byteorder: + a = np.array([-1, 'NaT', 1234567], dtype='>m') + assert_equal(str(a), "[ -1 'NaT' 1234567]") + a = np.array([-1, 'NaT', 1234567], dtype=''\np4\nNNNI-1\nI-1\nI0\n((dp5\n(S'us'\np6\n"\ + b"I1\nI1\nI1\ntp7\ntp8\ntp9\nb." + assert_equal(pickle.loads(pkl), np.dtype('>M8[us]')) + + def test_gh_29555(self): + # check that dtype metadata round-trips when none + dt = np.dtype('>M8[us]') + assert dt.metadata is None + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + res = pickle.loads(pickle.dumps(dt, protocol=proto)) + assert_equal(res, dt) + assert res.metadata is None + + def test_setstate(self): + "Verify that datetime dtype __setstate__ can handle bad arguments" + dt = np.dtype('>M8[us]') + assert_raises(ValueError, dt.__setstate__, + (4, '>', None, None, None, -1, -1, 0, 1)) + assert_(dt.__reduce__()[2] == np.dtype('>M8[us]').__reduce__()[2]) + assert_raises(TypeError, dt.__setstate__, + (4, '>', None, None, None, -1, -1, 0, ({}, 'xxx'))) + assert_(dt.__reduce__()[2] == np.dtype('>M8[us]').__reduce__()[2]) + + def test_dtype_promotion(self): + # datetime datetime computes the metadata gcd + # timedelta timedelta computes the metadata gcd + for mM in ['m', 'M']: + assert_equal( + np.promote_types(np.dtype(mM + '8[2Y]'), np.dtype(mM + '8[2Y]')), + np.dtype(mM + '8[2Y]')) + assert_equal( + np.promote_types(np.dtype(mM + '8[12Y]'), np.dtype(mM + '8[15Y]')), + np.dtype(mM + '8[3Y]')) + assert_equal( + np.promote_types(np.dtype(mM + '8[62M]'), np.dtype(mM + '8[24M]')), + np.dtype(mM + '8[2M]')) + assert_equal( + np.promote_types(np.dtype(mM + '8[1W]'), np.dtype(mM + '8[2D]')), + np.dtype(mM + '8[1D]')) + assert_equal( + np.promote_types(np.dtype(mM + '8[W]'), np.dtype(mM + '8[13s]')), + np.dtype(mM + '8[s]')) + assert_equal( + np.promote_types(np.dtype(mM + '8[13W]'), np.dtype(mM + '8[49s]')), + np.dtype(mM + '8[7s]')) + # timedelta timedelta raises when there is no reasonable gcd + assert_raises(TypeError, np.promote_types, + np.dtype('m8[Y]'), np.dtype('m8[D]')) + assert_raises(TypeError, np.promote_types, + np.dtype('m8[M]'), np.dtype('m8[W]')) + # timedelta and float cannot be safely cast with each other + assert_raises(TypeError, np.promote_types, "float32", "m8") + assert_raises(TypeError, np.promote_types, "m8", "float32") + assert_raises(TypeError, np.promote_types, "uint64", "m8") + assert_raises(TypeError, np.promote_types, "m8", "uint64") + + # timedelta timedelta may overflow with big unit ranges + assert_raises(OverflowError, np.promote_types, + np.dtype('m8[W]'), np.dtype('m8[fs]')) + assert_raises(OverflowError, np.promote_types, + np.dtype('m8[s]'), np.dtype('m8[as]')) + + def test_cast_overflow(self): + # gh-4486 + def cast(): + numpy.datetime64("1971-01-01 00:00:00.000000000000000").astype("datetime64[{unit}]') + assert_equal(np.isnat(arr), res) + arr = np.array([123, -321, "NaT"], dtype=f'timedelta64[{unit}]') + assert_equal(np.isnat(arr), res) + + def test_isnat_error(self): + # Test that only datetime dtype arrays are accepted + for t in np.typecodes["All"]: + if t in np.typecodes["Datetime"]: + continue + assert_raises(TypeError, np.isnat, np.zeros(10, t)) + + def test_isfinite_scalar(self): + assert_(not np.isfinite(np.datetime64('NaT', 'ms'))) + assert_(not np.isfinite(np.datetime64('NaT', 'ns'))) + assert_(np.isfinite(np.datetime64('2038-01-19T03:14:07'))) + + assert_(not np.isfinite(np.timedelta64('NaT', "ms"))) + assert_(np.isfinite(np.timedelta64(34, "ms"))) + + @pytest.mark.parametrize('unit', ['Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms', + 'us', 'ns', 'ps', 'fs', 'as']) + @pytest.mark.parametrize('dstr', ['datetime64[%s]', + 'timedelta64[%s]']) + def test_isfinite_isinf_isnan_units(self, unit, dstr): + '''check isfinite, isinf, isnan for all units of M, m dtypes + ''' + arr_val = [123, -321, "NaT"] + arr = np.array(arr_val, dtype=(dstr % unit)) + pos = np.array([True, True, False]) + neg = np.array([False, False, True]) + false = np.array([False, False, False]) + assert_equal(np.isfinite(arr), pos) + assert_equal(np.isinf(arr), false) + assert_equal(np.isnan(arr), neg) + + def test_assert_equal(self): + assert_raises(AssertionError, assert_equal, + np.datetime64('nat'), np.timedelta64('nat')) + + def test_corecursive_input(self): + # construct a co-recursive list + a, b = [], [] + a.append(b) + b.append(a) + obj_arr = np.array([None]) + obj_arr[0] = a + + # At some point this caused a stack overflow (gh-11154). Now raises + # ValueError since the nested list cannot be converted to a datetime. + assert_raises(ValueError, obj_arr.astype, 'M8') + assert_raises(ValueError, obj_arr.astype, 'm8') + + @pytest.mark.parametrize("shape", [(), (1,)]) + def test_discovery_from_object_array(self, shape): + arr = np.array("2020-10-10", dtype=object).reshape(shape) + res = np.array("2020-10-10", dtype="M8").reshape(shape) + assert res.dtype == np.dtype("M8[D]") + assert_equal(arr.astype("M8"), res) + arr[...] = np.bytes_("2020-10-10") # try a numpy string type + assert_equal(arr.astype("M8"), res) + arr = arr.astype("S") + assert_equal(arr.astype("S").astype("M8"), res) + + @pytest.mark.parametrize("time_unit", [ + "Y", "M", "W", "D", "h", "m", "s", "ms", "us", "ns", "ps", "fs", "as", + # compound units + "10D", "2M", + ]) + def test_limit_symmetry(self, time_unit): + """ + Dates should have symmetric limits around the unix epoch at +/-np.int64 + """ + epoch = np.datetime64(0, time_unit) + latest = np.datetime64(np.iinfo(np.int64).max, time_unit) + earliest = np.datetime64(-np.iinfo(np.int64).max, time_unit) + + # above should not have overflowed + assert earliest < epoch < latest + + @pytest.mark.parametrize("time_unit", [ + "Y", "M", + pytest.param("W", marks=pytest.mark.xfail(reason="gh-13197")), + "D", "h", "m", + "s", "ms", "us", "ns", "ps", "fs", "as", + pytest.param("10D", marks=pytest.mark.xfail(reason="similar to gh-13197")), + ]) + @pytest.mark.parametrize("sign", [-1, 1]) + def test_limit_str_roundtrip(self, time_unit, sign): + """ + Limits should roundtrip when converted to strings. + + This tests the conversion to and from npy_datetimestruct. + """ + # TODO: add absolute (gold standard) time span limit strings + limit = np.datetime64(np.iinfo(np.int64).max * sign, time_unit) + + # Convert to string and back. Explicit unit needed since the day and + # week reprs are not distinguishable. + limit_via_str = np.datetime64(str(limit), time_unit) + assert limit_via_str == limit + + def test_datetime_hash_nat(self): + nat1 = np.datetime64() + nat2 = np.datetime64() + assert nat1 is not nat2 + assert nat1 != nat2 + assert hash(nat1) != hash(nat2) + + @pytest.mark.parametrize('unit', ('Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms', 'us')) + def test_datetime_hash_weeks(self, unit): + dt = np.datetime64(2348, 'W') # 2015-01-01 + dt2 = np.datetime64(dt, unit) + _assert_equal_hash(dt, dt2) + + dt3 = np.datetime64(int(dt2.astype(int)) + 1, unit) + assert hash(dt) != hash(dt3) # doesn't collide + + @pytest.mark.parametrize('unit', ('h', 'm', 's', 'ms', 'us')) + def test_datetime_hash_weeks_vs_pydatetime(self, unit): + dt = np.datetime64(2348, 'W') # 2015-01-01 + dt2 = np.datetime64(dt, unit) + pydt = dt2.astype(datetime.datetime) + assert isinstance(pydt, datetime.datetime) + _assert_equal_hash(pydt, dt2) + + @pytest.mark.parametrize('unit', ('Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms', 'us')) + def test_datetime_hash_big_negative(self, unit): + dt = np.datetime64(-102894, 'W') # -002-01-01 + dt2 = np.datetime64(dt, unit) + _assert_equal_hash(dt, dt2) + + # can only go down to "fs" before integer overflow + @pytest.mark.parametrize('unit', ('m', 's', 'ms', 'us', 'ns', 'ps', 'fs')) + def test_datetime_hash_minutes(self, unit): + dt = np.datetime64(3, 'm') + dt2 = np.datetime64(dt, unit) + _assert_equal_hash(dt, dt2) + + @pytest.mark.parametrize('unit', ('ns', 'ps', 'fs', 'as')) + def test_datetime_hash_ns(self, unit): + dt = np.datetime64(3, 'ns') + dt2 = np.datetime64(dt, unit) + _assert_equal_hash(dt, dt2) + + dt3 = np.datetime64(int(dt2.astype(int)) + 1, unit) + assert hash(dt) != hash(dt3) # doesn't collide + + @pytest.mark.parametrize('wk', range(500000, 500010)) # 11552-09-04 + @pytest.mark.parametrize('unit', ('W', 'D', 'h', 'm', 's', 'ms', 'us')) + def test_datetime_hash_big_positive(self, wk, unit): + dt = np.datetime64(wk, 'W') + dt2 = np.datetime64(dt, unit) + _assert_equal_hash(dt, dt2) + + def test_timedelta_hash_generic(self): + assert_raises(ValueError, hash, np.timedelta64(123)) # generic + + @pytest.mark.parametrize('unit', ('Y', 'M')) + def test_timedelta_hash_year_month(self, unit): + td = np.timedelta64(45, 'Y') + td2 = np.timedelta64(td, unit) + _assert_equal_hash(td, td2) + + @pytest.mark.parametrize('unit', ('W', 'D', 'h', 'm', 's', 'ms', 'us')) + def test_timedelta_hash_weeks(self, unit): + td = np.timedelta64(10, 'W') + td2 = np.timedelta64(td, unit) + _assert_equal_hash(td, td2) + + td3 = np.timedelta64(int(td2.astype(int)) + 1, unit) + assert hash(td) != hash(td3) # doesn't collide + + @pytest.mark.parametrize('unit', ('W', 'D', 'h', 'm', 's', 'ms', 'us')) + def test_timedelta_hash_weeks_vs_pydelta(self, unit): + td = np.timedelta64(10, 'W') + td2 = np.timedelta64(td, unit) + pytd = td2.astype(datetime.timedelta) + assert isinstance(pytd, datetime.timedelta) + _assert_equal_hash(pytd, td2) + + @pytest.mark.parametrize('unit', ('ms', 'us', 'ns', 'ps', 'fs', 'as')) + def test_timedelta_hash_ms(self, unit): + td = np.timedelta64(3, 'ms') + td2 = np.timedelta64(td, unit) + _assert_equal_hash(td, td2) + + td3 = np.timedelta64(int(td2.astype(int)) + 1, unit) + assert hash(td) != hash(td3) # doesn't collide + + @pytest.mark.parametrize('wk', range(500000, 500010)) + @pytest.mark.parametrize('unit', ('W', 'D', 'h', 'm', 's', 'ms', 'us')) + def test_timedelta_hash_big_positive(self, wk, unit): + td = np.timedelta64(wk, 'W') + td2 = np.timedelta64(td, unit) + _assert_equal_hash(td, td2) + + @pytest.mark.parametrize( + "inputs, divisor, expected", + [ + ( + np.array( + [datetime.timedelta(seconds=20), datetime.timedelta(days=2)], + dtype="object", + ), + np.int64(2), + np.array( + [datetime.timedelta(seconds=10), datetime.timedelta(days=1)], + dtype="object", + ), + ), + ( + np.array( + [datetime.timedelta(seconds=20), datetime.timedelta(days=2)], + dtype="object", + ), + np.timedelta64(2, "s"), + np.array( + [10.0, 24.0 * 60.0 * 60.0], + dtype="object", + ), + ), + ( + datetime.timedelta(seconds=2), + np.array( + [datetime.timedelta(seconds=20), datetime.timedelta(days=2)], + dtype="object", + ), + np.array( + [1.0 / 10.0, 1.0 / (24.0 * 60.0 * 60.0)], + dtype="object", + ), + ), + ], + ) + def test_true_divide_object_by_timedelta( + self, + inputs: np.ndarray | type[np.generic], + divisor: np.ndarray | type[np.generic], + expected: np.ndarray, + ): + # gh-30025 + results = inputs / divisor + assert_array_equal(results, expected) + + +class TestDateTimeData: + + def test_basic(self): + a = np.array(['1980-03-23'], dtype=np.datetime64) + assert_equal(np.datetime_data(a.dtype), ('D', 1)) + + def test_bytes(self): + # byte units are converted to unicode + dt = np.datetime64('2000', (b'ms', 5)) + assert np.datetime_data(dt.dtype) == ('ms', 5) + + dt = np.datetime64('2000', b'5ms') + assert np.datetime_data(dt.dtype) == ('ms', 5) + + def test_non_ascii(self): + # μs is normalized to μ + dt = np.datetime64('2000', ('μs', 5)) + assert np.datetime_data(dt.dtype) == ('us', 5) + + dt = np.datetime64('2000', '5μs') + assert np.datetime_data(dt.dtype) == ('us', 5) + + +def test_comparisons_return_not_implemented(): + # GH#17017 + + class custom: + __array_priority__ = 10000 + + obj = custom() + + dt = np.datetime64('2000', 'ns') + td = dt - dt + + for item in [dt, td]: + assert item.__eq__(obj) is NotImplemented + assert item.__ne__(obj) is NotImplemented + assert item.__le__(obj) is NotImplemented + assert item.__lt__(obj) is NotImplemented + assert item.__ge__(obj) is NotImplemented + assert item.__gt__(obj) is NotImplemented diff --git a/local_packages/numpy/_core/tests/test_defchararray.py b/local_packages/numpy/_core/tests/test_defchararray.py new file mode 100644 index 0000000..e98632b --- /dev/null +++ b/local_packages/numpy/_core/tests/test_defchararray.py @@ -0,0 +1,858 @@ +import pytest + +import numpy as np +from numpy._core.multiarray import _vec_string +from numpy.testing import ( + assert_, + assert_array_equal, + assert_equal, + assert_raises, + assert_raises_regex, +) + +kw_unicode_true = {'unicode': True} # make 2to3 work properly +kw_unicode_false = {'unicode': False} + +class TestBasic: + def test_from_object_array(self): + A = np.array([['abc', 2], + ['long ', '0123456789']], dtype='O') + B = np.char.array(A) + assert_equal(B.dtype.itemsize, 10) + assert_array_equal(B, [[b'abc', b'2'], + [b'long', b'0123456789']]) + + def test_from_object_array_unicode(self): + A = np.array([['abc', 'Sigma \u03a3'], + ['long ', '0123456789']], dtype='O') + assert_raises(ValueError, np.char.array, (A,)) + B = np.char.array(A, **kw_unicode_true) + assert_equal(B.dtype.itemsize, 10 * np.array('a', 'U').dtype.itemsize) + assert_array_equal(B, [['abc', 'Sigma \u03a3'], + ['long', '0123456789']]) + + def test_from_string_array(self): + A = np.array([[b'abc', b'foo'], + [b'long ', b'0123456789']]) + assert_equal(A.dtype.type, np.bytes_) + B = np.char.array(A) + assert_array_equal(B, A) + assert_equal(B.dtype, A.dtype) + assert_equal(B.shape, A.shape) + B[0, 0] = 'changed' + assert_(B[0, 0] != A[0, 0]) + C = np.char.asarray(A) + assert_array_equal(C, A) + assert_equal(C.dtype, A.dtype) + C[0, 0] = 'changed again' + assert_(C[0, 0] != B[0, 0]) + assert_(C[0, 0] == A[0, 0]) + + def test_from_unicode_array(self): + A = np.array([['abc', 'Sigma \u03a3'], + ['long ', '0123456789']]) + assert_equal(A.dtype.type, np.str_) + B = np.char.array(A) + assert_array_equal(B, A) + assert_equal(B.dtype, A.dtype) + assert_equal(B.shape, A.shape) + B = np.char.array(A, **kw_unicode_true) + assert_array_equal(B, A) + assert_equal(B.dtype, A.dtype) + assert_equal(B.shape, A.shape) + + def fail(): + np.char.array(A, **kw_unicode_false) + + assert_raises(UnicodeEncodeError, fail) + + def test_unicode_upconvert(self): + A = np.char.array(['abc']) + B = np.char.array(['\u03a3']) + assert_(issubclass((A + B).dtype.type, np.str_)) + + def test_from_string(self): + A = np.char.array(b'abc') + assert_equal(len(A), 1) + assert_equal(len(A[0]), 3) + assert_(issubclass(A.dtype.type, np.bytes_)) + + def test_from_unicode(self): + A = np.char.array('\u03a3') + assert_equal(len(A), 1) + assert_equal(len(A[0]), 1) + assert_equal(A.itemsize, 4) + assert_(issubclass(A.dtype.type, np.str_)) + +class TestVecString: + def test_non_existent_method(self): + + def fail(): + _vec_string('a', np.bytes_, 'bogus') + + assert_raises(AttributeError, fail) + + def test_non_string_array(self): + + def fail(): + _vec_string(1, np.bytes_, 'strip') + + assert_raises(TypeError, fail) + + def test_invalid_args_tuple(self): + + def fail(): + _vec_string(['a'], np.bytes_, 'strip', 1) + + assert_raises(TypeError, fail) + + def test_invalid_type_descr(self): + + def fail(): + _vec_string(['a'], 'BOGUS', 'strip') + + assert_raises(TypeError, fail) + + def test_invalid_function_args(self): + + def fail(): + _vec_string(['a'], np.bytes_, 'strip', (1,)) + + assert_raises(TypeError, fail) + + def test_invalid_result_type(self): + + def fail(): + _vec_string(['a'], np.int_, 'strip') + + assert_raises(TypeError, fail) + + def test_broadcast_error(self): + + def fail(): + _vec_string([['abc', 'def']], np.int_, 'find', (['a', 'd', 'j'],)) + + assert_raises(ValueError, fail) + +class TestWhitespace: + def test1(self): + A = np.array([['abc ', '123 '], + ['789 ', 'xyz ']]).view(np.char.chararray) + B = np.array([['abc', '123'], + ['789', 'xyz']]).view(np.char.chararray) + assert_(np.all(A == B)) + assert_(np.all(A >= B)) + assert_(np.all(A <= B)) + assert_(not np.any(A > B)) + assert_(not np.any(A < B)) + assert_(not np.any(A != B)) + +class TestChar: + def test_it(self): + A = np.array('abc1', dtype='c').view(np.char.chararray) + assert_equal(A.shape, (4,)) + assert_equal(A.upper()[:2].tobytes(), b'AB') + +class TestComparisons: + def A(self): + return np.array([['abc', 'abcc', '123'], + ['789', 'abc', 'xyz']]).view(np.char.chararray) + + def B(self): + return np.array([['efg', 'efg', '123 '], + ['051', 'efgg', 'tuv']]).view(np.char.chararray) + + def test_not_equal(self): + A, B = self.A(), self.B() + assert_array_equal((A != B), + [[True, True, False], [True, True, True]]) + + def test_equal(self): + A, B = self.A(), self.B() + assert_array_equal((A == B), + [[False, False, True], [False, False, False]]) + + def test_greater_equal(self): + A, B = self.A(), self.B() + assert_array_equal((A >= B), + [[False, False, True], [True, False, True]]) + + def test_less_equal(self): + A, B = self.A(), self.B() + assert_array_equal((A <= B), + [[True, True, True], [False, True, False]]) + + def test_greater(self): + A, B = self.A(), self.B() + assert_array_equal((A > B), + [[False, False, False], [True, False, True]]) + + def test_less(self): + A, B = self.A(), self.B() + assert_array_equal((A < B), + [[True, True, False], [False, True, False]]) + + def test_type(self): + A, B = self.A(), self.B() + out1 = np.char.equal(A, B) + out2 = np.char.equal('a', 'a') + assert_(isinstance(out1, np.ndarray)) + assert_(isinstance(out2, np.ndarray)) + +class TestComparisonsMixed1(TestComparisons): + """Ticket #1276""" + + def B(self): + return np.array( + [['efg', 'efg', '123 '], + ['051', 'efgg', 'tuv']], np.str_).view(np.char.chararray) + +class TestComparisonsMixed2(TestComparisons): + """Ticket #1276""" + + def A(self): + return np.array( + [['abc', 'abcc', '123'], + ['789', 'abc', 'xyz']], np.str_).view(np.char.chararray) + +class TestInformation: + def A(self): + return np.array([[' abc ', ''], + ['12345', 'MixedCase'], + ['123 \t 345 \0 ', 'UPPER']]) \ + .view(np.char.chararray) + + def B(self): + return np.array([[' \u03a3 ', ''], + ['12345', 'MixedCase'], + ['123 \t 345 \0 ', 'UPPER']]) \ + .view(np.char.chararray) + + def test_len(self): + A, B = self.A(), self.B() + assert_(issubclass(np.char.str_len(A).dtype.type, np.integer)) + assert_array_equal(np.char.str_len(A), [[5, 0], [5, 9], [12, 5]]) + assert_array_equal(np.char.str_len(B), [[3, 0], [5, 9], [12, 5]]) + + def test_count(self): + A, B = self.A(), self.B() + assert_(issubclass(A.count('').dtype.type, np.integer)) + assert_array_equal(A.count('a'), [[1, 0], [0, 1], [0, 0]]) + assert_array_equal(A.count('123'), [[0, 0], [1, 0], [1, 0]]) + # Python doesn't seem to like counting NULL characters + assert_array_equal(A.count('a', 0, 2), [[1, 0], [0, 0], [0, 0]]) + assert_array_equal(B.count('a'), [[0, 0], [0, 1], [0, 0]]) + assert_array_equal(B.count('123'), [[0, 0], [1, 0], [1, 0]]) + + def test_endswith(self): + A = self.A() + assert_(issubclass(A.endswith('').dtype.type, np.bool)) + assert_array_equal(A.endswith(' '), [[1, 0], [0, 0], [1, 0]]) + assert_array_equal(A.endswith('3', 0, 3), [[0, 0], [1, 0], [1, 0]]) + + def fail(): + A.endswith('3', 'fdjk') + + assert_raises(TypeError, fail) + + @pytest.mark.parametrize( + "dtype, encode", + [("U", str), + ("S", lambda x: x.encode('ascii')), + ]) + def test_find(self, dtype, encode): + A = self.A().astype(dtype) + assert_(issubclass(A.find(encode('a')).dtype.type, np.integer)) + assert_array_equal(A.find(encode('a')), + [[1, -1], [-1, 6], [-1, -1]]) + assert_array_equal(A.find(encode('3')), + [[-1, -1], [2, -1], [2, -1]]) + assert_array_equal(A.find(encode('a'), 0, 2), + [[1, -1], [-1, -1], [-1, -1]]) + assert_array_equal(A.find([encode('1'), encode('P')]), + [[-1, -1], [0, -1], [0, 1]]) + C = (np.array(['ABCDEFGHIJKLMNOPQRSTUVWXYZ', + '01234567890123456789012345']) + .view(np.char.chararray)).astype(dtype) + assert_array_equal(C.find(encode('M')), [12, -1]) + + def test_index(self): + A = self.A() + + def fail(): + A.index('a') + + assert_raises(ValueError, fail) + assert_(np.char.index('abcba', 'b') == 1) + assert_(issubclass(np.char.index('abcba', 'b').dtype.type, np.integer)) + + def test_isalnum(self): + A = self.A() + assert_(issubclass(A.isalnum().dtype.type, np.bool)) + assert_array_equal(A.isalnum(), [[False, False], [True, True], [False, True]]) + + def test_isalpha(self): + A = self.A() + assert_(issubclass(A.isalpha().dtype.type, np.bool)) + assert_array_equal(A.isalpha(), [[False, False], [False, True], [False, True]]) + + def test_isdigit(self): + A = self.A() + assert_(issubclass(A.isdigit().dtype.type, np.bool)) + assert_array_equal(A.isdigit(), [[False, False], [True, False], [False, False]]) + + def test_islower(self): + A = self.A() + assert_(issubclass(A.islower().dtype.type, np.bool)) + assert_array_equal(A.islower(), [[True, False], [False, False], [False, False]]) + + def test_isspace(self): + A = self.A() + assert_(issubclass(A.isspace().dtype.type, np.bool)) + assert_array_equal(A.isspace(), [[False, False], [False, False], [False, False]]) + + def test_istitle(self): + A = self.A() + assert_(issubclass(A.istitle().dtype.type, np.bool)) + assert_array_equal(A.istitle(), [[False, False], [False, False], [False, False]]) + + def test_isupper(self): + A = self.A() + assert_(issubclass(A.isupper().dtype.type, np.bool)) + assert_array_equal(A.isupper(), [[False, False], [False, False], [False, True]]) + + def test_rfind(self): + A = self.A() + assert_(issubclass(A.rfind('a').dtype.type, np.integer)) + assert_array_equal(A.rfind('a'), [[1, -1], [-1, 6], [-1, -1]]) + assert_array_equal(A.rfind('3'), [[-1, -1], [2, -1], [6, -1]]) + assert_array_equal(A.rfind('a', 0, 2), [[1, -1], [-1, -1], [-1, -1]]) + assert_array_equal(A.rfind(['1', 'P']), [[-1, -1], [0, -1], [0, 2]]) + + def test_rindex(self): + A = self.A() + + def fail(): + A.rindex('a') + + assert_raises(ValueError, fail) + assert_(np.char.rindex('abcba', 'b') == 3) + assert_(issubclass(np.char.rindex('abcba', 'b').dtype.type, np.integer)) + + def test_startswith(self): + A = self.A() + assert_(issubclass(A.startswith('').dtype.type, np.bool)) + assert_array_equal(A.startswith(' '), [[1, 0], [0, 0], [0, 0]]) + assert_array_equal(A.startswith('1', 0, 3), [[0, 0], [1, 0], [1, 0]]) + + def fail(): + A.startswith('3', 'fdjk') + + assert_raises(TypeError, fail) + +class TestMethods: + def A(self): + return np.array([[' abc ', ''], + ['12345', 'MixedCase'], + ['123 \t 345 \0 ', 'UPPER']], + dtype='S').view(np.char.chararray) + + def B(self): + return np.array([[' \u03a3 ', ''], + ['12345', 'MixedCase'], + ['123 \t 345 \0 ', 'UPPER']]) \ + .view(np.char.chararray) + + def test_capitalize(self): + A, B = self.A(), self.B() + tgt = [[b' abc ', b''], + [b'12345', b'Mixedcase'], + [b'123 \t 345 \0 ', b'Upper']] + assert_(issubclass(A.capitalize().dtype.type, np.bytes_)) + assert_array_equal(A.capitalize(), tgt) + + tgt = [[' \u03c3 ', ''], + ['12345', 'Mixedcase'], + ['123 \t 345 \0 ', 'Upper']] + assert_(issubclass(B.capitalize().dtype.type, np.str_)) + assert_array_equal(B.capitalize(), tgt) + + def test_center(self): + A = self.A() + assert_(issubclass(A.center(10).dtype.type, np.bytes_)) + C = A.center([10, 20]) + assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]]) + + C = A.center(20, b'#') + assert_(np.all(C.startswith(b'#'))) + assert_(np.all(C.endswith(b'#'))) + + C = np.char.center(b'FOO', [[10, 20], [15, 8]]) + tgt = [[b' FOO ', b' FOO '], + [b' FOO ', b' FOO ']] + assert_(issubclass(C.dtype.type, np.bytes_)) + assert_array_equal(C, tgt) + + def test_decode(self): + A = np.char.array([b'\\u03a3']) + assert_(A.decode('unicode-escape')[0] == '\u03a3') + + def test_encode(self): + B = self.B().encode('unicode_escape') + assert_(B[0][0] == ' \\u03a3 '.encode('latin1')) + + def test_expandtabs(self): + T = self.A().expandtabs() + assert_(T[2, 0] == b'123 345 \0') + + def test_join(self): + # NOTE: list(b'123') == [49, 50, 51] + # so that b','.join(b'123') results to an error on Py3 + A0 = self.A().decode('ascii') + + A = np.char.join([',', '#'], A0) + assert_(issubclass(A.dtype.type, np.str_)) + tgt = np.array([[' ,a,b,c, ', ''], + ['1,2,3,4,5', 'M#i#x#e#d#C#a#s#e'], + ['1,2,3, ,\t, ,3,4,5, ,\x00, ', 'U#P#P#E#R']]) + assert_array_equal(np.char.join([',', '#'], A0), tgt) + + def test_ljust(self): + A = self.A() + assert_(issubclass(A.ljust(10).dtype.type, np.bytes_)) + + C = A.ljust([10, 20]) + assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]]) + + C = A.ljust(20, b'#') + assert_array_equal(C.startswith(b'#'), [ + [False, True], [False, False], [False, False]]) + assert_(np.all(C.endswith(b'#'))) + + C = np.char.ljust(b'FOO', [[10, 20], [15, 8]]) + tgt = [[b'FOO ', b'FOO '], + [b'FOO ', b'FOO ']] + assert_(issubclass(C.dtype.type, np.bytes_)) + assert_array_equal(C, tgt) + + def test_lower(self): + A, B = self.A(), self.B() + tgt = [[b' abc ', b''], + [b'12345', b'mixedcase'], + [b'123 \t 345 \0 ', b'upper']] + assert_(issubclass(A.lower().dtype.type, np.bytes_)) + assert_array_equal(A.lower(), tgt) + + tgt = [[' \u03c3 ', ''], + ['12345', 'mixedcase'], + ['123 \t 345 \0 ', 'upper']] + assert_(issubclass(B.lower().dtype.type, np.str_)) + assert_array_equal(B.lower(), tgt) + + def test_lstrip(self): + A, B = self.A(), self.B() + tgt = [[b'abc ', b''], + [b'12345', b'MixedCase'], + [b'123 \t 345 \0 ', b'UPPER']] + assert_(issubclass(A.lstrip().dtype.type, np.bytes_)) + assert_array_equal(A.lstrip(), tgt) + + tgt = [[b' abc', b''], + [b'2345', b'ixedCase'], + [b'23 \t 345 \x00', b'UPPER']] + assert_array_equal(A.lstrip([b'1', b'M']), tgt) + + tgt = [['\u03a3 ', ''], + ['12345', 'MixedCase'], + ['123 \t 345 \0 ', 'UPPER']] + assert_(issubclass(B.lstrip().dtype.type, np.str_)) + assert_array_equal(B.lstrip(), tgt) + + def test_partition(self): + A = self.A() + P = A.partition([b'3', b'M']) + tgt = [[(b' abc ', b'', b''), (b'', b'', b'')], + [(b'12', b'3', b'45'), (b'', b'M', b'ixedCase')], + [(b'12', b'3', b' \t 345 \0 '), (b'UPPER', b'', b'')]] + assert_(issubclass(P.dtype.type, np.bytes_)) + assert_array_equal(P, tgt) + + def test_replace(self): + A = self.A() + R = A.replace([b'3', b'a'], + [b'##########', b'@']) + tgt = [[b' abc ', b''], + [b'12##########45', b'MixedC@se'], + [b'12########## \t ##########45 \x00 ', b'UPPER']] + assert_(issubclass(R.dtype.type, np.bytes_)) + assert_array_equal(R, tgt) + # Test special cases that should just return the input array, + # since replacements are not possible or do nothing. + S1 = A.replace(b'A very long byte string, longer than A', b'') + assert_array_equal(S1, A) + S2 = A.replace(b'', b'') + assert_array_equal(S2, A) + S3 = A.replace(b'3', b'3') + assert_array_equal(S3, A) + S4 = A.replace(b'3', b'', count=0) + assert_array_equal(S4, A) + + def test_replace_count_and_size(self): + a = np.array(['0123456789' * i for i in range(4)] + ).view(np.char.chararray) + r1 = a.replace('5', 'ABCDE') + assert r1.dtype.itemsize == (3 * 10 + 3 * 4) * 4 + assert_array_equal(r1, np.array(['01234ABCDE6789' * i + for i in range(4)])) + r2 = a.replace('5', 'ABCDE', count=1) + assert r2.dtype.itemsize == (3 * 10 + 4) * 4 + r3 = a.replace('5', 'ABCDE', count=0) + assert r3.dtype.itemsize == a.dtype.itemsize + assert_array_equal(r3, a) + # Negative values mean to replace all. + r4 = a.replace('5', 'ABCDE', count=-1) + assert r4.dtype.itemsize == (3 * 10 + 3 * 4) * 4 + assert_array_equal(r4, r1) + # We can do count on an element-by-element basis. + r5 = a.replace('5', 'ABCDE', count=[-1, -1, -1, 1]) + assert r5.dtype.itemsize == (3 * 10 + 4) * 4 + assert_array_equal(r5, np.array( + ['01234ABCDE6789' * i for i in range(3)] + + ['01234ABCDE6789' + '0123456789' * 2])) + + def test_replace_broadcasting(self): + a = np.array('0,0,0').view(np.char.chararray) + r1 = a.replace('0', '1', count=np.arange(3)) + assert r1.dtype == a.dtype + assert_array_equal(r1, np.array(['0,0,0', '1,0,0', '1,1,0'])) + r2 = a.replace('0', [['1'], ['2']], count=np.arange(1, 4)) + assert_array_equal(r2, np.array([['1,0,0', '1,1,0', '1,1,1'], + ['2,0,0', '2,2,0', '2,2,2']])) + r3 = a.replace(['0', '0,0', '0,0,0'], 'X') + assert_array_equal(r3, np.array(['X,X,X', 'X,0', 'X'])) + + def test_rjust(self): + A = self.A() + assert_(issubclass(A.rjust(10).dtype.type, np.bytes_)) + + C = A.rjust([10, 20]) + assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]]) + + C = A.rjust(20, b'#') + assert_(np.all(C.startswith(b'#'))) + assert_array_equal(C.endswith(b'#'), + [[False, True], [False, False], [False, False]]) + + C = np.char.rjust(b'FOO', [[10, 20], [15, 8]]) + tgt = [[b' FOO', b' FOO'], + [b' FOO', b' FOO']] + assert_(issubclass(C.dtype.type, np.bytes_)) + assert_array_equal(C, tgt) + + def test_rpartition(self): + A = self.A() + P = A.rpartition([b'3', b'M']) + tgt = [[(b'', b'', b' abc '), (b'', b'', b'')], + [(b'12', b'3', b'45'), (b'', b'M', b'ixedCase')], + [(b'123 \t ', b'3', b'45 \0 '), (b'', b'', b'UPPER')]] + assert_(issubclass(P.dtype.type, np.bytes_)) + assert_array_equal(P, tgt) + + def test_rsplit(self): + A = self.A().rsplit(b'3') + tgt = [[[b' abc '], [b'']], + [[b'12', b'45'], [b'MixedCase']], + [[b'12', b' \t ', b'45 \x00 '], [b'UPPER']]] + assert_(issubclass(A.dtype.type, np.object_)) + assert_equal(A.tolist(), tgt) + + def test_rstrip(self): + A, B = self.A(), self.B() + assert_(issubclass(A.rstrip().dtype.type, np.bytes_)) + + tgt = [[b' abc', b''], + [b'12345', b'MixedCase'], + [b'123 \t 345', b'UPPER']] + assert_array_equal(A.rstrip(), tgt) + + tgt = [[b' abc ', b''], + [b'1234', b'MixedCase'], + [b'123 \t 345 \x00', b'UPP'] + ] + assert_array_equal(A.rstrip([b'5', b'ER']), tgt) + + tgt = [[' \u03a3', ''], + ['12345', 'MixedCase'], + ['123 \t 345', 'UPPER']] + assert_(issubclass(B.rstrip().dtype.type, np.str_)) + assert_array_equal(B.rstrip(), tgt) + + def test_strip(self): + A, B = self.A(), self.B() + tgt = [[b'abc', b''], + [b'12345', b'MixedCase'], + [b'123 \t 345', b'UPPER']] + assert_(issubclass(A.strip().dtype.type, np.bytes_)) + assert_array_equal(A.strip(), tgt) + + tgt = [[b' abc ', b''], + [b'234', b'ixedCas'], + [b'23 \t 345 \x00', b'UPP']] + assert_array_equal(A.strip([b'15', b'EReM']), tgt) + + tgt = [['\u03a3', ''], + ['12345', 'MixedCase'], + ['123 \t 345', 'UPPER']] + assert_(issubclass(B.strip().dtype.type, np.str_)) + assert_array_equal(B.strip(), tgt) + + def test_split(self): + A = self.A().split(b'3') + tgt = [ + [[b' abc '], [b'']], + [[b'12', b'45'], [b'MixedCase']], + [[b'12', b' \t ', b'45 \x00 '], [b'UPPER']]] + assert_(issubclass(A.dtype.type, np.object_)) + assert_equal(A.tolist(), tgt) + + def test_splitlines(self): + A = np.char.array(['abc\nfds\nwer']).splitlines() + assert_(issubclass(A.dtype.type, np.object_)) + assert_(A.shape == (1,)) + assert_(len(A[0]) == 3) + + def test_swapcase(self): + A, B = self.A(), self.B() + tgt = [[b' ABC ', b''], + [b'12345', b'mIXEDcASE'], + [b'123 \t 345 \0 ', b'upper']] + assert_(issubclass(A.swapcase().dtype.type, np.bytes_)) + assert_array_equal(A.swapcase(), tgt) + + tgt = [[' \u03c3 ', ''], + ['12345', 'mIXEDcASE'], + ['123 \t 345 \0 ', 'upper']] + assert_(issubclass(B.swapcase().dtype.type, np.str_)) + assert_array_equal(B.swapcase(), tgt) + + def test_title(self): + A, B = self.A(), self.B() + tgt = [[b' Abc ', b''], + [b'12345', b'Mixedcase'], + [b'123 \t 345 \0 ', b'Upper']] + assert_(issubclass(A.title().dtype.type, np.bytes_)) + assert_array_equal(A.title(), tgt) + + tgt = [[' \u03a3 ', ''], + ['12345', 'Mixedcase'], + ['123 \t 345 \0 ', 'Upper']] + assert_(issubclass(B.title().dtype.type, np.str_)) + assert_array_equal(B.title(), tgt) + + def test_upper(self): + A, B = self.A(), self.B() + tgt = [[b' ABC ', b''], + [b'12345', b'MIXEDCASE'], + [b'123 \t 345 \0 ', b'UPPER']] + assert_(issubclass(A.upper().dtype.type, np.bytes_)) + assert_array_equal(A.upper(), tgt) + + tgt = [[' \u03a3 ', ''], + ['12345', 'MIXEDCASE'], + ['123 \t 345 \0 ', 'UPPER']] + assert_(issubclass(B.upper().dtype.type, np.str_)) + assert_array_equal(B.upper(), tgt) + + def test_isnumeric(self): + A, B = self.A(), self.B() + + def fail(): + A.isnumeric() + + assert_raises(TypeError, fail) + assert_(issubclass(B.isnumeric().dtype.type, np.bool)) + assert_array_equal(B.isnumeric(), [ + [False, False], [True, False], [False, False]]) + + def test_isdecimal(self): + A, B = self.A(), self.B() + + def fail(): + A.isdecimal() + + assert_raises(TypeError, fail) + assert_(issubclass(B.isdecimal().dtype.type, np.bool)) + assert_array_equal(B.isdecimal(), [ + [False, False], [True, False], [False, False]]) + +class TestOperations: + def A(self): + return np.array([['abc', '123'], + ['789', 'xyz']]).view(np.char.chararray) + + def B(self): + return np.array([['efg', '456'], + ['051', 'tuv']]).view(np.char.chararray) + + def test_argsort(self): + arr = np.array(['abc'] * 4).view(np.char.chararray) + actual = arr.argsort(stable=True) + assert_array_equal(actual, [0, 1, 2, 3]) + + def test_add(self): + A, B = self.A(), self.B() + AB = np.array([['abcefg', '123456'], + ['789051', 'xyztuv']]).view(np.char.chararray) + assert_array_equal(AB, (A + B)) + assert_(len((A + B)[0][0]) == 6) + + def test_radd(self): + A = self.A() + QA = np.array([['qabc', 'q123'], + ['q789', 'qxyz']]).view(np.char.chararray) + assert_array_equal(QA, ('q' + A)) + + def test_mul(self): + A = self.A() + for r in (2, 3, 5, 7, 197): + Ar = np.array([[A[0, 0] * r, A[0, 1] * r], + [A[1, 0] * r, A[1, 1] * r]]).view(np.char.chararray) + + assert_array_equal(Ar, (A * r)) + + for ob in [object(), 'qrs']: + with assert_raises_regex(ValueError, + 'Can only multiply by integers'): + A * ob + + def test_rmul(self): + A = self.A() + for r in (2, 3, 5, 7, 197): + Ar = np.array([[A[0, 0] * r, A[0, 1] * r], + [A[1, 0] * r, A[1, 1] * r]]).view(np.char.chararray) + assert_array_equal(Ar, (r * A)) + + for ob in [object(), 'qrs']: + with assert_raises_regex(ValueError, + 'Can only multiply by integers'): + ob * A + + def test_mod(self): + """Ticket #856""" + F = np.array([['%d', '%f'], ['%s', '%r']]).view(np.char.chararray) + C = np.array([[3, 7], [19, 1]], dtype=np.int64) + FC = np.array([['3', '7.000000'], + ['19', 'np.int64(1)']]).view(np.char.chararray) + assert_array_equal(FC, F % C) + + A = np.array([['%.3f', '%d'], ['%s', '%r']]).view(np.char.chararray) + A1 = np.array([['1.000', '1'], + ['1', repr(np.array(1)[()])]]).view(np.char.chararray) + assert_array_equal(A1, (A % 1)) + + A2 = np.array([['1.000', '2'], + ['3', repr(np.array(4)[()])]]).view(np.char.chararray) + assert_array_equal(A2, (A % [[1, 2], [3, 4]])) + + def test_rmod(self): + A = self.A() + assert_(f"{A}" == str(A)) + assert_(f"{A!r}" == repr(A)) + + for ob in [42, object()]: + with assert_raises_regex( + TypeError, "unsupported operand type.* and 'chararray'"): + ob % A + + def test_slice(self): + """Regression test for https://github.com/numpy/numpy/issues/5982""" + + arr = np.array([['abc ', 'def '], ['geh ', 'ijk ']], + dtype='S4').view(np.char.chararray) + sl1 = arr[:] + assert_array_equal(sl1, arr) + assert_(sl1.base is arr) + assert_(sl1.base.base is arr.base) + + sl2 = arr[:, :] + assert_array_equal(sl2, arr) + assert_(sl2.base is arr) + assert_(sl2.base.base is arr.base) + + assert_(arr[0, 0] == b'abc') + + @pytest.mark.parametrize('data', [['plate', ' ', 'shrimp'], + [b'retro', b' ', b'encabulator']]) + def test_getitem_length_zero_item(self, data): + # Regression test for gh-26375. + a = np.char.array(data) + # a.dtype.type() will be an empty string or bytes instance. + # The equality test will fail if a[1] has the wrong type + # or does not have length 0. + assert_equal(a[1], a.dtype.type()) + +class TestMethodsEmptyArray: + def test_encode(self): + res = np.char.encode(np.array([], dtype='U')) + assert_array_equal(res, []) + assert_(res.dtype.char == 'S') + + def test_decode(self): + res = np.char.decode(np.array([], dtype='S')) + assert_array_equal(res, []) + assert_(res.dtype.char == 'U') + + def test_decode_with_reshape(self): + res = np.char.decode(np.array([], dtype='S').reshape((1, 0, 1))) + assert_(res.shape == (1, 0, 1)) + +class TestMethodsScalarValues: + def test_mod(self): + A = np.array([[' abc ', ''], + ['12345', 'MixedCase'], + ['123 \t 345 \0 ', 'UPPER']], dtype='S') + tgt = [[b'123 abc ', b'123'], + [b'12312345', b'123MixedCase'], + [b'123123 \t 345 \0 ', b'123UPPER']] + assert_array_equal(np.char.mod(b"123%s", A), tgt) + + def test_decode(self): + bytestring = b'\x81\xc1\x81\xc1\x81\xc1' + assert_equal(np.char.decode(bytestring, encoding='cp037'), + 'aAaAaA') + + def test_encode(self): + unicode = 'aAaAaA' + assert_equal(np.char.encode(unicode, encoding='cp037'), + b'\x81\xc1\x81\xc1\x81\xc1') + + def test_expandtabs(self): + s = "\tone level of indentation\n\t\ttwo levels of indentation" + assert_equal( + np.char.expandtabs(s, tabsize=2), + " one level of indentation\n two levels of indentation" + ) + + def test_join(self): + seps = np.array(['-', '_']) + assert_array_equal(np.char.join(seps, 'hello'), + ['h-e-l-l-o', 'h_e_l_l_o']) + + def test_partition(self): + assert_equal(np.char.partition('This string', ' '), + ['This', ' ', 'string']) + + def test_rpartition(self): + assert_equal(np.char.rpartition('This string here', ' '), + ['This string', ' ', 'here']) + + def test_replace(self): + assert_equal(np.char.replace('Python is good', 'good', 'great'), + 'Python is great') + +def test_empty_indexing(): + """Regression test for ticket 1948.""" + # Check that indexing a chararray with an empty list/array returns an + # empty chararray instead of a chararray with a single empty string in it. + s = np.char.chararray((4,)) + assert_(s[[]].size == 0) diff --git a/local_packages/numpy/_core/tests/test_deprecations.py b/local_packages/numpy/_core/tests/test_deprecations.py new file mode 100644 index 0000000..7cb1fee --- /dev/null +++ b/local_packages/numpy/_core/tests/test_deprecations.py @@ -0,0 +1,460 @@ +""" +Tests related to deprecation warnings. Also a convenient place +to document how deprecations should eventually be turned into errors. + +""" +import contextlib +import warnings + +import pytest + +import numpy as np +import numpy._core._struct_ufunc_tests as struct_ufunc +from numpy._core._multiarray_tests import fromstring_null_term_c_api # noqa: F401 +from numpy.testing import assert_raises + + +class _DeprecationTestCase: + # Just as warning: warnings uses re.match, so the start of this message + # must match. + message = '' + warning_cls = DeprecationWarning + + @contextlib.contextmanager + def filter_warnings(self): + with warnings.catch_warnings(record=True) as w: + # Do *not* ignore other DeprecationWarnings. Ignoring warnings + # can give very confusing results because of + # https://bugs.python.org/issue4180 and it is probably simplest to + # try to keep the tests cleanly giving only the right warning type. + # (While checking them set to "error" those are ignored anyway) + # We still have them show up, because otherwise they would be raised + warnings.filterwarnings("always", category=self.warning_cls) + warnings.filterwarnings("always", message=self.message, + category=self.warning_cls) + yield w + return + + def assert_deprecated(self, function, num=1, ignore_others=False, + function_fails=False, + exceptions=np._NoValue, + args=(), kwargs={}): + """Test if DeprecationWarnings are given and raised. + + This first checks if the function when called gives `num` + DeprecationWarnings, after that it tries to raise these + DeprecationWarnings and compares them with `exceptions`. + The exceptions can be different for cases where this code path + is simply not anticipated and the exception is replaced. + + Parameters + ---------- + function : callable + The function to test + num : int + Number of DeprecationWarnings to expect. This should normally be 1. + ignore_others : bool + Whether warnings of the wrong type should be ignored (note that + the message is not checked) + function_fails : bool + If the function would normally fail, setting this will check for + warnings inside a try/except block. + exceptions : Exception or tuple of Exceptions + Exception to expect when turning the warnings into an error. + The default checks for DeprecationWarnings. If exceptions is + empty the function is expected to run successfully. + args : tuple + Arguments for `function` + kwargs : dict + Keyword arguments for `function` + """ + __tracebackhide__ = True # Hide traceback for py.test + + if exceptions is np._NoValue: + exceptions = (self.warning_cls,) + + if function_fails: + context_manager = contextlib.suppress(Exception) + else: + context_manager = contextlib.nullcontext() + with context_manager: + with self.filter_warnings() as w_context: + function(*args, **kwargs) + + # just in case, clear the registry + num_found = 0 + for warning in w_context: + if warning.category is self.warning_cls: + num_found += 1 + elif not ignore_others: + raise AssertionError( + "expected %s but got: %s" % + (self.warning_cls.__name__, warning.category)) + if num is not None and num_found != num: + msg = f"{len(w_context)} warnings found but {num} expected." + lst = [str(w) for w in w_context] + raise AssertionError("\n".join([msg] + lst)) + + with warnings.catch_warnings(): + warnings.filterwarnings("error", message=self.message, + category=self.warning_cls) + try: + function(*args, **kwargs) + if exceptions != (): + raise AssertionError( + "No error raised during function call") + except exceptions: + if exceptions == (): + raise AssertionError( + "Error raised during function call") + + def assert_not_deprecated(self, function, args=(), kwargs={}): + """Test that warnings are not raised. + + This is just a shorthand for: + + self.assert_deprecated(function, num=0, ignore_others=True, + exceptions=tuple(), args=args, kwargs=kwargs) + """ + self.assert_deprecated(function, num=0, ignore_others=True, + exceptions=(), args=args, kwargs=kwargs) + + +class _VisibleDeprecationTestCase(_DeprecationTestCase): + warning_cls = np.exceptions.VisibleDeprecationWarning + + +class TestTestDeprecated: + def test_assert_deprecated(self): + test_case_instance = _DeprecationTestCase() + assert_raises(AssertionError, + test_case_instance.assert_deprecated, + lambda: None) + + def foo(): + warnings.warn("foo", category=DeprecationWarning, stacklevel=2) + + test_case_instance.assert_deprecated(foo) + + +class TestBincount(_DeprecationTestCase): + # 2024-07-29, 2.1.0 + @pytest.mark.parametrize('badlist', [[0.5, 1.2, 1.5], + ['0', '1', '1']]) + def test_bincount_bad_list(self, badlist): + self.assert_deprecated(lambda: np.bincount(badlist)) + + +class BuiltInRoundComplexDType(_DeprecationTestCase): + # 2020-03-31 1.19.0 + deprecated_types = [np.csingle, np.cdouble, np.clongdouble] + not_deprecated_types = [ + np.int8, np.int16, np.int32, np.int64, + np.uint8, np.uint16, np.uint32, np.uint64, + np.float16, np.float32, np.float64, + ] + + def test_deprecated(self): + for scalar_type in self.deprecated_types: + scalar = scalar_type(0) + self.assert_deprecated(round, args=(scalar,)) + self.assert_deprecated(round, args=(scalar, 0)) + self.assert_deprecated(round, args=(scalar,), kwargs={'ndigits': 0}) + + def test_not_deprecated(self): + for scalar_type in self.not_deprecated_types: + scalar = scalar_type(0) + self.assert_not_deprecated(round, args=(scalar,)) + self.assert_not_deprecated(round, args=(scalar, 0)) + self.assert_not_deprecated(round, args=(scalar,), kwargs={'ndigits': 0}) + + +class FlatteningConcatenateUnsafeCast(_DeprecationTestCase): + # NumPy 1.20, 2020-09-03 + message = "concatenate with `axis=None` will use same-kind casting" + + def test_deprecated(self): + self.assert_deprecated(np.concatenate, + args=(([0.], [1.]),), + kwargs={'axis': None, 'out': np.empty(2, dtype=np.int64)}) + + def test_not_deprecated(self): + self.assert_not_deprecated(np.concatenate, + args=(([0.], [1.]),), + kwargs={'axis': None, 'out': np.empty(2, dtype=np.int64), + 'casting': "unsafe"}) + + with assert_raises(TypeError): + # Tests should notice if the deprecation warning is given first... + np.concatenate(([0.], [1.]), out=np.empty(2, dtype=np.int64), + casting="same_kind") + + +class TestCtypesGetter(_DeprecationTestCase): + ctypes = np.array([1]).ctypes + + @pytest.mark.parametrize("name", ["data", "shape", "strides", "_as_parameter_"]) + def test_not_deprecated(self, name: str) -> None: + self.assert_not_deprecated(lambda: getattr(self.ctypes, name)) + + +class TestPyIntConversion(_DeprecationTestCase): + message = r".*stop allowing conversion of out-of-bound.*" + + @pytest.mark.parametrize("dtype", np.typecodes["AllInteger"]) + def test_deprecated_scalar(self, dtype): + dtype = np.dtype(dtype) + info = np.iinfo(dtype) + + # Cover the most common creation paths (all end up in the + # same place): + def scalar(value, dtype): + dtype.type(value) + + def assign(value, dtype): + arr = np.array([0, 0, 0], dtype=dtype) + arr[2] = value + + def create(value, dtype): + np.array([value], dtype=dtype) + + for creation_func in [scalar, assign, create]: + try: + self.assert_deprecated( + lambda: creation_func(info.min - 1, dtype)) + except OverflowError: + pass # OverflowErrors always happened also before and are OK. + + try: + self.assert_deprecated( + lambda: creation_func(info.max + 1, dtype)) + except OverflowError: + pass # OverflowErrors always happened also before and are OK. + + +@pytest.mark.parametrize("name", ["str", "bytes", "object"]) +def test_future_scalar_attributes(name): + # FutureWarning added 2022-11-17, NumPy 1.24, + assert name not in dir(np) # we may want to not add them + with pytest.warns(FutureWarning, + match=f"In the future .*{name}"): + assert not hasattr(np, name) + + # Unfortunately, they are currently still valid via `np.dtype()` + np.dtype(name) + name in np._core.sctypeDict + + +# Ignore the above future attribute warning for this test. +@pytest.mark.filterwarnings("ignore:In the future:FutureWarning") +class TestRemovedGlobals: + # Removed 2023-01-12, NumPy 1.24.0 + # Not a deprecation, but the large error was added to aid those who missed + # the previous deprecation, and should be removed similarly to one + # (or faster). + @pytest.mark.parametrize("name", + ["object", "float", "complex", "str", "int"]) + def test_attributeerror_includes_info(self, name): + msg = f".*\n`np.{name}` was a deprecated alias for the builtin" + with pytest.raises(AttributeError, match=msg): + getattr(np, name) + + +class TestDeprecatedFinfo(_DeprecationTestCase): + # Deprecated in NumPy 1.25, 2023-01-16 + def test_deprecated_none(self): + self.assert_deprecated(np.finfo, args=(None,)) + + +class TestMathAlias(_DeprecationTestCase): + def test_deprecated_np_lib_math(self): + self.assert_deprecated(lambda: np.lib.math) + + +class TestLibImports(_DeprecationTestCase): + # Deprecated in Numpy 1.26.0, 2023-09 + def test_lib_functions_deprecation_call(self): + from numpy import row_stack + from numpy._core.numerictypes import maximum_sctype + from numpy.lib._npyio_impl import recfromcsv, recfromtxt + from numpy.lib._shape_base_impl import get_array_wrap + from numpy.lib._utils_impl import safe_eval + from numpy.lib.tests.test_io import TextIO + + self.assert_deprecated(lambda: safe_eval("None")) + + data_gen = lambda: TextIO('A,B\n0,1\n2,3') + kwargs = {'delimiter': ",", 'missing_values': "N/A", 'names': True} + self.assert_deprecated(lambda: recfromcsv(data_gen())) + self.assert_deprecated(lambda: recfromtxt(data_gen(), **kwargs)) + + self.assert_deprecated(get_array_wrap) + self.assert_deprecated(lambda: maximum_sctype(int)) + + self.assert_deprecated(lambda: row_stack([[]])) + self.assert_deprecated(lambda: np.chararray) + + +class TestDeprecatedDTypeAliases(_DeprecationTestCase): + + def _check_for_warning(self, func): + with pytest.warns(DeprecationWarning, + match="alias 'a' was deprecated in NumPy 2.0") as w: + func() + assert len(w) == 1 + + def test_a_dtype_alias(self): + for dtype in ["a", "a10"]: + f = lambda: np.dtype(dtype) + self._check_for_warning(f) + self.assert_deprecated(f) + f = lambda: np.array(["hello", "world"]).astype("a10") + self._check_for_warning(f) + self.assert_deprecated(f) + + +class TestDeprecatedArrayWrap(_DeprecationTestCase): + message = "__array_wrap__.*" + + def test_deprecated(self): + class Test1: + def __array__(self, dtype=None, copy=None): + return np.arange(4) + + def __array_wrap__(self, arr, context=None): + self.called = True + return 'pass context' + + class Test2(Test1): + def __array_wrap__(self, arr): + self.called = True + return 'pass' + + test1 = Test1() + test2 = Test2() + self.assert_deprecated(lambda: np.negative(test1)) + assert test1.called + self.assert_deprecated(lambda: np.negative(test2)) + assert test2.called + +class TestDeprecatedArrayAttributeSetting(_DeprecationTestCase): + message = "Setting the .*on a NumPy array has been deprecated.*" + + def test_deprecated_strides_set(self): + x = np.eye(2) + self.assert_deprecated(setattr, args=(x, 'strides', x.strides)) + + +class TestDeprecatedDTypeParenthesizedRepeatCount(_DeprecationTestCase): + message = "Passing in a parenthesized single number" + + @pytest.mark.parametrize("string", ["(2)i,", "(3)3S,", "f,(2)f"]) + def test_parenthesized_repeat_count(self, string): + self.assert_deprecated(np.dtype, args=(string,)) + + +class TestAddNewdocUFunc(_DeprecationTestCase): + # Deprecated in Numpy 2.2, 2024-11 + @pytest.mark.thread_unsafe( + reason="modifies and checks docstring which is global state" + ) + def test_deprecated(self): + doc = struct_ufunc.add_triplet.__doc__ + # gh-26718 + # This test mutates the C-level docstring pointer for add_triplet, + # which is permanent once set. Skip when re-running tests. + if doc is not None and "new docs" in doc: + pytest.skip("Cannot retest deprecation, otherwise ValueError: " + "Cannot change docstring of ufunc with non-NULL docstring") + self.assert_deprecated( + lambda: np._core.umath._add_newdoc_ufunc( + struct_ufunc.add_triplet, "new docs" + ) + ) + + +class TestDTypeAlignBool(_VisibleDeprecationTestCase): + # Deprecated in Numpy 2.4, 2025-07 + # NOTE: As you can see, finalizing this deprecation breaks some (very) old + # pickle files. This may be fine, but needs to be done with some care since + # it breaks all of them and not just some. + # (Maybe it should be a 3.0 or only after warning more explicitly around pickles.) + message = r"dtype\(\): align should be passed as Python or NumPy boolean but got " + + def test_deprecated(self): + # in particular integers should be rejected because one may think they mean + # alignment, or pass them accidentally as a subarray shape (meaning to pass + # a tuple). + self.assert_deprecated(lambda: np.dtype("f8", align=3)) + + @pytest.mark.parametrize("align", [True, False, np.True_, np.False_]) + def test_not_deprecated(self, align): + # if the user passes a bool, it is accepted. + self.assert_not_deprecated(lambda: np.dtype("f8", align=align)) + + +class TestFlatiterIndexing0dBoolIndex(_DeprecationTestCase): + # Deprecated in Numpy 2.4, 2025-07 + message = r"Indexing flat iterators with a 0-dimensional boolean index" + + def test_0d_boolean_index_deprecated(self): + arr = np.arange(3) + # 0d boolean indices on flat iterators are deprecated + self.assert_deprecated(lambda: arr.flat[True]) + + def test_0d_boolean_assign_index_deprecated(self): + arr = np.arange(3) + + def assign_to_index(): + arr.flat[True] = 10 + + self.assert_deprecated(assign_to_index) + + +class TestFlatiterIndexingFloatIndex(_DeprecationTestCase): + # Deprecated in NumPy 2.4, 2025-07 + message = r"Invalid non-array indices for iterator objects" + + def test_float_index_deprecated(self): + arr = np.arange(3) + # float indices on flat iterators are deprecated + self.assert_deprecated(lambda: arr.flat[[1.]]) + + def test_float_assign_index_deprecated(self): + arr = np.arange(3) + + def assign_to_index(): + arr.flat[[1.]] = 10 + + self.assert_deprecated(assign_to_index) + + +@pytest.mark.thread_unsafe( + reason="warning control utilities are deprecated due to being thread-unsafe" +) +class TestWarningUtilityDeprecations(_DeprecationTestCase): + # Deprecation in NumPy 2.4, 2025-08 + message = r"NumPy warning suppression and assertion utilities are deprecated." + + def test_assert_warns_deprecated(self): + def use_assert_warns(): + with np.testing.assert_warns(RuntimeWarning): + warnings.warn("foo", RuntimeWarning, stacklevel=1) + + self.assert_deprecated(use_assert_warns) + + def test_suppress_warnings_deprecated(self): + def use_suppress_warnings(): + with np.testing.suppress_warnings() as sup: + sup.filter(RuntimeWarning, 'invalid value encountered in divide') + + self.assert_deprecated(use_suppress_warnings) + + +class TestTooManyArgsExtremum(_DeprecationTestCase): + # Deprecated in Numpy 2.4, 2025-08, gh-27639 + message = "Passing more than 2 positional arguments to np.maximum and np.minimum " + + @pytest.mark.parametrize("ufunc", [np.minimum, np.maximum]) + def test_extremem_3_args(self, ufunc): + self.assert_deprecated(ufunc, args=(np.ones(1), np.zeros(1), np.empty(1))) diff --git a/local_packages/numpy/_core/tests/test_dlpack.py b/local_packages/numpy/_core/tests/test_dlpack.py new file mode 100644 index 0000000..e8198ac --- /dev/null +++ b/local_packages/numpy/_core/tests/test_dlpack.py @@ -0,0 +1,190 @@ +import sys + +import pytest + +import numpy as np +from numpy.testing import IS_PYPY, assert_array_equal + + +def new_and_old_dlpack(): + yield np.arange(5) + + class OldDLPack(np.ndarray): + # Support only the "old" version + def __dlpack__(self, stream=None): + return super().__dlpack__(stream=None) + + yield np.arange(5).view(OldDLPack) + + +class TestDLPack: + @pytest.mark.skipif(IS_PYPY, reason="PyPy can't get refcounts.") + @pytest.mark.parametrize("max_version", [(0, 0), None, (1, 0), (100, 3)]) + def test_dunder_dlpack_refcount(self, max_version): + x = np.arange(5) + y = x.__dlpack__(max_version=max_version) + startcount = sys.getrefcount(x) + del y + assert startcount - sys.getrefcount(x) == 1 + + def test_dunder_dlpack_stream(self): + x = np.arange(5) + x.__dlpack__(stream=None) + + with pytest.raises(RuntimeError): + x.__dlpack__(stream=1) + + def test_dunder_dlpack_copy(self): + # Checks the argument parsing of __dlpack__ explicitly. + # Honoring the flag is tested in the from_dlpack round-tripping test. + x = np.arange(5) + x.__dlpack__(copy=True) + x.__dlpack__(copy=None) + x.__dlpack__(copy=False) + + with pytest.raises(ValueError): + # NOTE: The copy converter should be stricter, but not just here. + x.__dlpack__(copy=np.array([1, 2, 3])) + + def test_strides_not_multiple_of_itemsize(self): + dt = np.dtype([('int', np.int32), ('char', np.int8)]) + y = np.zeros((5,), dtype=dt) + z = y['int'] + + with pytest.raises(BufferError): + np.from_dlpack(z) + + @pytest.mark.skipif(IS_PYPY, reason="PyPy can't get refcounts.") + @pytest.mark.parametrize("arr", new_and_old_dlpack()) + def test_from_dlpack_refcount(self, arr): + arr = arr.copy() + y = np.from_dlpack(arr) + startcount = sys.getrefcount(arr) + del y + assert startcount - sys.getrefcount(arr) == 1 + + @pytest.mark.parametrize("dtype", [ + np.bool, + np.int8, np.int16, np.int32, np.int64, + np.uint8, np.uint16, np.uint32, np.uint64, + np.float16, np.float32, np.float64, + np.complex64, np.complex128 + ]) + @pytest.mark.parametrize("arr", new_and_old_dlpack()) + def test_dtype_passthrough(self, arr, dtype): + x = arr.astype(dtype) + y = np.from_dlpack(x) + + assert y.dtype == x.dtype + assert_array_equal(x, y) + + def test_invalid_dtype(self): + x = np.asarray(np.datetime64('2021-05-27')) + + with pytest.raises(BufferError): + np.from_dlpack(x) + + def test_invalid_byte_swapping(self): + dt = np.dtype('=i8').newbyteorder() + x = np.arange(5, dtype=dt) + + with pytest.raises(BufferError): + np.from_dlpack(x) + + def test_non_contiguous(self): + x = np.arange(25).reshape((5, 5)) + + y1 = x[0] + assert_array_equal(y1, np.from_dlpack(y1)) + + y2 = x[:, 0] + assert_array_equal(y2, np.from_dlpack(y2)) + + y3 = x[1, :] + assert_array_equal(y3, np.from_dlpack(y3)) + + y4 = x[1] + assert_array_equal(y4, np.from_dlpack(y4)) + + y5 = np.diagonal(x).copy() + assert_array_equal(y5, np.from_dlpack(y5)) + + @pytest.mark.parametrize("ndim", range(33)) + def test_higher_dims(self, ndim): + shape = (1,) * ndim + x = np.zeros(shape, dtype=np.float64) + + assert shape == np.from_dlpack(x).shape + + def test_dlpack_device(self): + x = np.arange(5) + assert x.__dlpack_device__() == (1, 0) + y = np.from_dlpack(x) + assert y.__dlpack_device__() == (1, 0) + z = y[::2] + assert z.__dlpack_device__() == (1, 0) + + def dlpack_deleter_exception(self, max_version): + x = np.arange(5) + _ = x.__dlpack__(max_version=max_version) + raise RuntimeError + + @pytest.mark.parametrize("max_version", [None, (1, 0)]) + def test_dlpack_destructor_exception(self, max_version): + with pytest.raises(RuntimeError): + self.dlpack_deleter_exception(max_version=max_version) + + def test_readonly(self): + x = np.arange(5) + x.flags.writeable = False + # Raises without max_version + with pytest.raises(BufferError): + x.__dlpack__() + + # But works fine if we try with version + y = np.from_dlpack(x) + assert not y.flags.writeable + + def test_writeable(self): + x_new, x_old = new_and_old_dlpack() + + # new dlpacks respect writeability + y = np.from_dlpack(x_new) + assert y.flags.writeable + + # old dlpacks are not writeable for backwards compatibility + y = np.from_dlpack(x_old) + assert not y.flags.writeable + + def test_ndim0(self): + x = np.array(1.0) + y = np.from_dlpack(x) + assert_array_equal(x, y) + + def test_size1dims_arrays(self): + x = np.ndarray(dtype='f8', shape=(10, 5, 1), strides=(8, 80, 4), + buffer=np.ones(1000, dtype=np.uint8), order='F') + y = np.from_dlpack(x) + assert_array_equal(x, y) + + def test_copy(self): + x = np.arange(5) + + y = np.from_dlpack(x) + assert np.may_share_memory(x, y) + y = np.from_dlpack(x, copy=False) + assert np.may_share_memory(x, y) + y = np.from_dlpack(x, copy=True) + assert not np.may_share_memory(x, y) + + def test_device(self): + x = np.arange(5) + # requesting (1, 0), i.e. CPU device works in both calls: + x.__dlpack__(dl_device=(1, 0)) + np.from_dlpack(x, device="cpu") + np.from_dlpack(x, device=None) + + with pytest.raises(BufferError): + x.__dlpack__(dl_device=(10, 0)) + with pytest.raises(ValueError): + np.from_dlpack(x, device="gpu") diff --git a/local_packages/numpy/_core/tests/test_dtype.py b/local_packages/numpy/_core/tests/test_dtype.py new file mode 100644 index 0000000..157d3e4 --- /dev/null +++ b/local_packages/numpy/_core/tests/test_dtype.py @@ -0,0 +1,2110 @@ +import contextlib +import ctypes +import gc +import inspect +import operator +import pickle +import sys +import types +from itertools import permutations +from typing import Any + +import hypothesis +import pytest +from hypothesis.extra import numpy as hynp + +import numpy as np +import numpy.dtypes +from numpy._core._multiarray_tests import create_custom_field_dtype +from numpy._core._rational_tests import rational +from numpy.testing import ( + HAS_REFCOUNT, + IS_64BIT, + IS_PYPY, + IS_PYSTON, + IS_WASM, + assert_, + assert_array_equal, + assert_equal, + assert_raises, +) + + +def assert_dtype_equal(a, b): + assert_equal(a, b) + assert_equal(hash(a), hash(b), + "two equivalent types do not hash to the same value !") + +def assert_dtype_not_equal(a, b): + assert_(a != b) + assert_(hash(a) != hash(b), + "two different types hash to the same value !") + +class TestBuiltin: + @pytest.mark.parametrize('t', [int, float, complex, np.int32, str, object]) + def test_run(self, t): + """Only test hash runs at all.""" + dt = np.dtype(t) + hash(dt) + + @pytest.mark.parametrize('t', [int, float]) + def test_dtype(self, t): + # Make sure equivalent byte order char hash the same (e.g. < and = on + # little endian) + dt = np.dtype(t) + dt2 = dt.newbyteorder("<") + dt3 = dt.newbyteorder(">") + if dt == dt2: + assert_(dt.byteorder != dt2.byteorder, "bogus test") + assert_dtype_equal(dt, dt2) + else: + assert_(dt.byteorder != dt3.byteorder, "bogus test") + assert_dtype_equal(dt, dt3) + + def test_equivalent_dtype_hashing(self): + # Make sure equivalent dtypes with different type num hash equal + uintp = np.dtype(np.uintp) + if uintp.itemsize == 4: + left = uintp + right = np.dtype(np.uint32) + else: + left = uintp + right = np.dtype(np.ulonglong) + assert_(left == right) + assert_(hash(left) == hash(right)) + + def test_invalid_types(self): + # Make sure invalid type strings raise an error + + assert_raises(TypeError, np.dtype, 'O3') + assert_raises(TypeError, np.dtype, 'O5') + assert_raises(TypeError, np.dtype, 'O7') + assert_raises(TypeError, np.dtype, 'b3') + assert_raises(TypeError, np.dtype, 'h4') + assert_raises(TypeError, np.dtype, 'I5') + assert_raises(TypeError, np.dtype, 'e3') + assert_raises(TypeError, np.dtype, 'f5') + + if np.dtype('g').itemsize == 8 or np.dtype('g').itemsize == 16: + assert_raises(TypeError, np.dtype, 'g12') + elif np.dtype('g').itemsize == 12: + assert_raises(TypeError, np.dtype, 'g16') + + if np.dtype('l').itemsize == 8: + assert_raises(TypeError, np.dtype, 'l4') + assert_raises(TypeError, np.dtype, 'L4') + else: + assert_raises(TypeError, np.dtype, 'l8') + assert_raises(TypeError, np.dtype, 'L8') + + if np.dtype('q').itemsize == 8: + assert_raises(TypeError, np.dtype, 'q4') + assert_raises(TypeError, np.dtype, 'Q4') + else: + assert_raises(TypeError, np.dtype, 'q8') + assert_raises(TypeError, np.dtype, 'Q8') + + # Make sure negative-sized dtype raises an error + assert_raises(TypeError, np.dtype, 'S-1') + assert_raises(TypeError, np.dtype, 'U-1') + assert_raises(TypeError, np.dtype, 'V-1') + + def test_richcompare_invalid_dtype_equality(self): + # Make sure objects that cannot be converted to valid + # dtypes results in False/True when compared to valid dtypes. + # Here 7 cannot be converted to dtype. No exceptions should be raised + + assert not np.dtype(np.int32) == 7, "dtype richcompare failed for ==" + assert np.dtype(np.int32) != 7, "dtype richcompare failed for !=" + + @pytest.mark.parametrize( + 'operation', + [operator.le, operator.lt, operator.ge, operator.gt]) + def test_richcompare_invalid_dtype_comparison(self, operation): + # Make sure TypeError is raised for comparison operators + # for invalid dtypes. Here 7 is an invalid dtype. + + with pytest.raises(TypeError): + operation(np.dtype(np.int32), 7) + + @pytest.mark.parametrize("dtype", + ['Bool', 'Bytes0', 'Complex32', 'Complex64', + 'Datetime64', 'Float16', 'Float32', 'Float64', + 'Int8', 'Int16', 'Int32', 'Int64', + 'Object0', 'Str0', 'Timedelta64', + 'UInt8', 'UInt16', 'Uint32', 'UInt32', + 'Uint64', 'UInt64', 'Void0', + "Float128", "Complex128"]) + def test_numeric_style_types_are_invalid(self, dtype): + with assert_raises(TypeError): + np.dtype(dtype) + + def test_expired_dtypes_with_bad_bytesize(self): + match: str = r".*removed in NumPy 2.0.*" + with pytest.raises(TypeError, match=match): + np.dtype("int0") + with pytest.raises(TypeError, match=match): + np.dtype("uint0") + with pytest.raises(TypeError, match=match): + np.dtype("bool8") + with pytest.raises(TypeError, match=match): + np.dtype("bytes0") + with pytest.raises(TypeError, match=match): + np.dtype("str0") + with pytest.raises(TypeError, match=match): + np.dtype("object0") + with pytest.raises(TypeError, match=match): + np.dtype("void0") + + @pytest.mark.parametrize( + 'value', + ['m8', 'M8', 'datetime64', 'timedelta64', + 'i4, (2,3)f8, f4', 'S3, 3u8, (3,4)S10', + '>f', '= (3, 12), + reason="Python 3.12 has immortal refcounts, this test will no longer " + "work. See gh-23986" +) +@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") +class TestStructuredObjectRefcounting: + """These tests cover various uses of complicated structured types which + include objects and thus require reference counting. + """ + @pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'], + iter_struct_object_dtypes()) + @pytest.mark.parametrize(["creation_func", "creation_obj"], [ + pytest.param(np.empty, None, + # None is probably used for too many things + marks=pytest.mark.skip("unreliable due to python's behaviour")), + (np.ones, 1), + (np.zeros, 0)]) + def test_structured_object_create_delete(self, dt, pat, count, singleton, + creation_func, creation_obj): + """Structured object reference counting in creation and deletion""" + # The test assumes that 0, 1, and None are singletons. + gc.collect() + before = sys.getrefcount(creation_obj) + arr = creation_func(3, dt) + + now = sys.getrefcount(creation_obj) + assert now - before == count * 3 + del arr + now = sys.getrefcount(creation_obj) + assert now == before + + @pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'], + iter_struct_object_dtypes()) + def test_structured_object_item_setting(self, dt, pat, count, singleton): + """Structured object reference counting for simple item setting""" + one = 1 + + gc.collect() + before = sys.getrefcount(singleton) + arr = np.array([pat] * 3, dt) + assert sys.getrefcount(singleton) - before == count * 3 + # Fill with `1` and check that it was replaced correctly: + before2 = sys.getrefcount(one) + arr[...] = one + after2 = sys.getrefcount(one) + assert after2 - before2 == count * 3 + del arr + gc.collect() + assert sys.getrefcount(one) == before2 + assert sys.getrefcount(singleton) == before + + @pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'], + iter_struct_object_dtypes()) + @pytest.mark.parametrize( + ['shape', 'index', 'items_changed'], + [((3,), ([0, 2],), 2), + ((3, 2), ([0, 2], slice(None)), 4), + ((3, 2), ([0, 2], [1]), 2), + ((3,), ([True, False, True]), 2)]) + def test_structured_object_indexing(self, shape, index, items_changed, + dt, pat, count, singleton): + """Structured object reference counting for advanced indexing.""" + # Use two small negative values (should be singletons, but less likely + # to run into race-conditions). This failed in some threaded envs + # When using 0 and 1. If it fails again, should remove all explicit + # checks, and rely on `pytest-leaks` reference count checker only. + val0 = -4 + val1 = -5 + + arr = np.full(shape, val0, dt) + + gc.collect() + before_val0 = sys.getrefcount(val0) + before_val1 = sys.getrefcount(val1) + # Test item getting: + part = arr[index] + after_val0 = sys.getrefcount(val0) + assert after_val0 - before_val0 == count * items_changed + del part + # Test item setting: + arr[index] = val1 + gc.collect() + after_val0 = sys.getrefcount(val0) + after_val1 = sys.getrefcount(val1) + assert before_val0 - after_val0 == count * items_changed + assert after_val1 - before_val1 == count * items_changed + + @pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'], + iter_struct_object_dtypes()) + def test_structured_object_take_and_repeat(self, dt, pat, count, singleton): + """Structured object reference counting for specialized functions. + The older functions such as take and repeat use different code paths + then item setting (when writing this). + """ + indices = [0, 1] + + arr = np.array([pat] * 3, dt) + gc.collect() + before = sys.getrefcount(singleton) + res = arr.take(indices) + after = sys.getrefcount(singleton) + assert after - before == count * 2 + new = res.repeat(10) + gc.collect() + after_repeat = sys.getrefcount(singleton) + assert after_repeat - after == count * 2 * 10 + + +class TestStructuredDtypeSparseFields: + """Tests subarray fields which contain sparse dtypes so that + not all memory is used by the dtype work. Such dtype's should + leave the underlying memory unchanged. + """ + dtype = np.dtype([('a', {'names': ['aa', 'ab'], 'formats': ['f', 'f'], + 'offsets': [0, 4]}, (2, 3))]) + sparse_dtype = np.dtype([('a', {'names': ['ab'], 'formats': ['f'], + 'offsets': [4]}, (2, 3))]) + + def test_sparse_field_assignment(self): + arr = np.zeros(3, self.dtype) + sparse_arr = arr.view(self.sparse_dtype) + + sparse_arr[...] = np.finfo(np.float32).max + # dtype is reduced when accessing the field, so shape is (3, 2, 3): + assert_array_equal(arr["a"]["aa"], np.zeros((3, 2, 3))) + + def test_sparse_field_assignment_fancy(self): + # Fancy assignment goes to the copyswap function for complex types: + arr = np.zeros(3, self.dtype) + sparse_arr = arr.view(self.sparse_dtype) + + sparse_arr[[0, 1, 2]] = np.finfo(np.float32).max + # dtype is reduced when accessing the field, so shape is (3, 2, 3): + assert_array_equal(arr["a"]["aa"], np.zeros((3, 2, 3))) + + +class TestMonsterType: + """Test deeply nested subtypes.""" + + def test1(self): + simple1 = np.dtype({'names': ['r', 'b'], 'formats': ['u1', 'u1'], + 'titles': ['Red pixel', 'Blue pixel']}) + a = np.dtype([('yo', int), ('ye', simple1), + ('yi', np.dtype((int, (3, 2))))]) + b = np.dtype([('yo', int), ('ye', simple1), + ('yi', np.dtype((int, (3, 2))))]) + assert_dtype_equal(a, b) + + c = np.dtype([('yo', int), ('ye', simple1), + ('yi', np.dtype((a, (3, 2))))]) + d = np.dtype([('yo', int), ('ye', simple1), + ('yi', np.dtype((a, (3, 2))))]) + assert_dtype_equal(c, d) + + @pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking") + @pytest.mark.skipif(IS_WASM, reason="Pyodide/WASM has limited stack size") + def test_list_recursion(self): + l = [] + l.append(('f', l)) + with pytest.raises(RecursionError): + np.dtype(l) + + @pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking") + @pytest.mark.skipif(IS_WASM, reason="Pyodide/WASM has limited stack size") + def test_tuple_recursion(self): + d = np.int32 + for i in range(100000): + d = (d, (1,)) + # depending on OS and Python version, this might succeed + # see gh-30370 and cpython issue #142253 + with contextlib.suppress(RecursionError): + np.dtype(d) + + @pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking") + @pytest.mark.skipif(IS_WASM, reason="Pyodide/WASM has limited stack size") + def test_dict_recursion(self): + d = {"names": ['self'], "formats": [None], "offsets": [0]} + d['formats'][0] = d + with pytest.raises(RecursionError): + np.dtype(d) + + +class TestMetadata: + def test_no_metadata(self): + d = np.dtype(int) + assert_(d.metadata is None) + + def test_metadata_takes_dict(self): + d = np.dtype(int, metadata={'datum': 1}) + assert_(d.metadata == {'datum': 1}) + + def test_metadata_rejects_nondict(self): + assert_raises(TypeError, np.dtype, int, metadata='datum') + assert_raises(TypeError, np.dtype, int, metadata=1) + assert_raises(TypeError, np.dtype, int, metadata=None) + + def test_nested_metadata(self): + d = np.dtype([('a', np.dtype(int, metadata={'datum': 1}))]) + assert_(d['a'].metadata == {'datum': 1}) + + def test_base_metadata_copied(self): + d = np.dtype((np.void, np.dtype('i4,i4', metadata={'datum': 1}))) + assert_(d.metadata == {'datum': 1}) + +class TestString: + def test_complex_dtype_str(self): + dt = np.dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)), + ('rtile', '>f4', (64, 36))], (3,)), + ('bottom', [('bleft', ('>f4', (8, 64)), (1,)), + ('bright', '>f4', (8, 36))])]) + assert_equal(str(dt), + "[('top', [('tiles', ('>f4', (64, 64)), (1,)), " + "('rtile', '>f4', (64, 36))], (3,)), " + "('bottom', [('bleft', ('>f4', (8, 64)), (1,)), " + "('bright', '>f4', (8, 36))])]") + + # If the sticky aligned flag is set to True, it makes the + # str() function use a dict representation with an 'aligned' flag + dt = np.dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)), + ('rtile', '>f4', (64, 36))], + (3,)), + ('bottom', [('bleft', ('>f4', (8, 64)), (1,)), + ('bright', '>f4', (8, 36))])], + align=True) + assert_equal(str(dt), + "{'names': ['top', 'bottom']," + " 'formats': [([('tiles', ('>f4', (64, 64)), (1,)), " + "('rtile', '>f4', (64, 36))], (3,)), " + "[('bleft', ('>f4', (8, 64)), (1,)), " + "('bright', '>f4', (8, 36))]]," + " 'offsets': [0, 76800]," + " 'itemsize': 80000," + " 'aligned': True}") + with np.printoptions(legacy='1.21'): + assert_equal(str(dt), + "{'names':['top','bottom'], " + "'formats':[([('tiles', ('>f4', (64, 64)), (1,)), " + "('rtile', '>f4', (64, 36))], (3,))," + "[('bleft', ('>f4', (8, 64)), (1,)), " + "('bright', '>f4', (8, 36))]], " + "'offsets':[0,76800], " + "'itemsize':80000, " + "'aligned':True}") + assert_equal(np.dtype(eval(str(dt))), dt) + + dt = np.dtype({'names': ['r', 'g', 'b'], 'formats': ['u1', 'u1', 'u1'], + 'offsets': [0, 1, 2], + 'titles': ['Red pixel', 'Green pixel', 'Blue pixel']}) + assert_equal(str(dt), + "[(('Red pixel', 'r'), 'u1'), " + "(('Green pixel', 'g'), 'u1'), " + "(('Blue pixel', 'b'), 'u1')]") + + dt = np.dtype({'names': ['rgba', 'r', 'g', 'b'], + 'formats': ['f4', (64, 64)), (1,)), + ('rtile', '>f4', (64, 36))], (3,)), + ('bottom', [('bleft', ('>f4', (8, 64)), (1,)), + ('bright', '>f4', (8, 36))])]) + assert_equal(repr(dt), + "dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)), " + "('rtile', '>f4', (64, 36))], (3,)), " + "('bottom', [('bleft', ('>f4', (8, 64)), (1,)), " + "('bright', '>f4', (8, 36))])])") + + dt = np.dtype({'names': ['r', 'g', 'b'], 'formats': ['u1', 'u1', 'u1'], + 'offsets': [0, 1, 2], + 'titles': ['Red pixel', 'Green pixel', 'Blue pixel']}, + align=True) + assert_equal(repr(dt), + "dtype([(('Red pixel', 'r'), 'u1'), " + "(('Green pixel', 'g'), 'u1'), " + "(('Blue pixel', 'b'), 'u1')], align=True)") + + def test_repr_structured_not_packed(self): + dt = np.dtype({'names': ['rgba', 'r', 'g', 'b'], + 'formats': ['i4") + assert np.result_type(dt).isnative + assert np.result_type(dt).num == dt.num + + # dtype with empty space: + struct_dt = np.dtype(">i4,i1,f4', (2, 1)), ('b', 'u4')]) + self.check(BigEndStruct, expected) + + def test_little_endian_structure_packed(self): + class LittleEndStruct(ctypes.LittleEndianStructure): + _fields_ = [ + ('one', ctypes.c_uint8), + ('two', ctypes.c_uint32) + ] + _pack_ = 1 + expected = np.dtype([('one', 'u1'), ('two', 'B'), + ('b', '>H') + ], align=True) + self.check(PaddedStruct, expected) + + def test_simple_endian_types(self): + self.check(ctypes.c_uint16.__ctype_le__, np.dtype('u2')) + self.check(ctypes.c_uint8.__ctype_le__, np.dtype('u1')) + self.check(ctypes.c_uint8.__ctype_be__, np.dtype('u1')) + + all_types = set(np.typecodes['All']) + all_pairs = permutations(all_types, 2) + + @pytest.mark.parametrize("pair", all_pairs) + def test_pairs(self, pair): + """ + Check that np.dtype('x,y') matches [np.dtype('x'), np.dtype('y')] + Example: np.dtype('d,I') -> dtype([('f0', ' None: + alias = np.dtype[Any] + assert isinstance(alias, types.GenericAlias) + assert alias.__origin__ is np.dtype + + @pytest.mark.parametrize("code", np.typecodes["All"]) + def test_dtype_subclass(self, code: str) -> None: + cls = type(np.dtype(code)) + alias = cls[Any] + assert isinstance(alias, types.GenericAlias) + assert alias.__origin__ is cls + + @pytest.mark.parametrize("arg_len", range(4)) + def test_subscript_tuple(self, arg_len: int) -> None: + arg_tup = (Any,) * arg_len + if arg_len == 1: + assert np.dtype[arg_tup] + else: + with pytest.raises(TypeError): + np.dtype[arg_tup] + + def test_subscript_scalar(self) -> None: + assert np.dtype[Any] + + +def test_result_type_integers_and_unitless_timedelta64(): + # Regression test for gh-20077. The following call of `result_type` + # would cause a seg. fault. + td = np.timedelta64(4) + result = np.result_type(0, td) + assert_dtype_equal(result, td.dtype) + + +def test_creating_dtype_with_dtype_class_errors(): + # Regression test for #25031, calling `np.dtype` with itself segfaulted. + with pytest.raises(TypeError, match="Cannot convert np.dtype into a"): + np.array(np.ones(10), dtype=np.dtype) + + +@pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") +@pytest.mark.skipif(IS_PYPY, reason="PyPy does not modify tp_doc") +class TestDTypeSignatures: + def test_signature_dtype(self): + sig = inspect.signature(np.dtype) + + assert len(sig.parameters) == 4 + + assert "dtype" in sig.parameters + assert sig.parameters["dtype"].kind is inspect.Parameter.POSITIONAL_OR_KEYWORD + assert sig.parameters["dtype"].default is inspect.Parameter.empty + + assert "align" in sig.parameters + assert sig.parameters["align"].kind is inspect.Parameter.POSITIONAL_OR_KEYWORD + assert sig.parameters["align"].default is False + + assert "copy" in sig.parameters + assert sig.parameters["copy"].kind is inspect.Parameter.POSITIONAL_OR_KEYWORD + assert sig.parameters["copy"].default is False + + # the optional `metadata` parameter has no default, so `**kwargs` must be used + assert "kwargs" in sig.parameters + assert sig.parameters["kwargs"].kind is inspect.Parameter.VAR_KEYWORD + assert sig.parameters["kwargs"].default is inspect.Parameter.empty + + def test_signature_dtype_newbyteorder(self): + sig = inspect.signature(np.dtype.newbyteorder) + + assert len(sig.parameters) == 2 + + assert "self" in sig.parameters + assert sig.parameters["self"].kind is inspect.Parameter.POSITIONAL_ONLY + assert sig.parameters["self"].default is inspect.Parameter.empty + + assert "new_order" in sig.parameters + assert sig.parameters["new_order"].kind is inspect.Parameter.POSITIONAL_ONLY + assert sig.parameters["new_order"].default == "S" + + @pytest.mark.parametrize("typename", np.dtypes.__all__) + def test_signature_dtypes_classes(self, typename: str): + dtype_type = getattr(np.dtypes, typename) + sig = inspect.signature(dtype_type) + + match typename.lower().removesuffix("dtype"): + case "bytes" | "str": + params_expect = {"size"} + case "void": + params_expect = {"length"} + case "datetime64" | "timedelta64": + params_expect = {"unit"} + case "string": + # `na_object` cannot be used in the text signature because of its + # `np._NoValue` default, which isn't supported by `inspect.signature`, + # so `**kwargs` is used instead. + params_expect = {"coerce", "kwargs"} + case _: + params_expect = set() + + params_actual = set(sig.parameters) + assert params_actual == params_expect diff --git a/local_packages/numpy/_core/tests/test_einsum.py b/local_packages/numpy/_core/tests/test_einsum.py new file mode 100644 index 0000000..375ef03 --- /dev/null +++ b/local_packages/numpy/_core/tests/test_einsum.py @@ -0,0 +1,1351 @@ +import itertools +import warnings + +import pytest + +import numpy as np +from numpy.testing import ( + assert_, + assert_allclose, + assert_almost_equal, + assert_array_equal, + assert_equal, + assert_raises, + assert_raises_regex, +) + +# Setup for optimize einsum +chars = 'abcdefghij' +sizes = np.array([2, 3, 4, 5, 4, 3, 2, 6, 5, 4, 3]) +global_size_dict = dict(zip(chars, sizes)) + + +class TestEinsum: + @pytest.mark.parametrize("do_opt", [True, False]) + @pytest.mark.parametrize("einsum_fn", [np.einsum, np.einsum_path]) + def test_einsum_errors(self, do_opt, einsum_fn): + # Need enough arguments + assert_raises(ValueError, einsum_fn, optimize=do_opt) + assert_raises(ValueError, einsum_fn, "", optimize=do_opt) + + # subscripts must be a string + assert_raises(TypeError, einsum_fn, 0, 0, optimize=do_opt) + + # issue 4528 revealed a segfault with this call + assert_raises(TypeError, einsum_fn, *(None,) * 63, optimize=do_opt) + + # number of operands must match count in subscripts string + assert_raises(ValueError, einsum_fn, "", 0, 0, optimize=do_opt) + assert_raises(ValueError, einsum_fn, ",", 0, [0], [0], + optimize=do_opt) + assert_raises(ValueError, einsum_fn, ",", [0], optimize=do_opt) + + # can't have more subscripts than dimensions in the operand + assert_raises(ValueError, einsum_fn, "i", 0, optimize=do_opt) + assert_raises(ValueError, einsum_fn, "ij", [0, 0], optimize=do_opt) + assert_raises(ValueError, einsum_fn, "...i", 0, optimize=do_opt) + assert_raises(ValueError, einsum_fn, "i...j", [0, 0], optimize=do_opt) + assert_raises(ValueError, einsum_fn, "i...", 0, optimize=do_opt) + assert_raises(ValueError, einsum_fn, "ij...", [0, 0], optimize=do_opt) + + # invalid ellipsis + assert_raises(ValueError, einsum_fn, "i..", [0, 0], optimize=do_opt) + assert_raises(ValueError, einsum_fn, ".i...", [0, 0], optimize=do_opt) + assert_raises(ValueError, einsum_fn, "j->..j", [0, 0], optimize=do_opt) + assert_raises(ValueError, einsum_fn, "j->.j...", [0, 0], + optimize=do_opt) + + # invalid subscript character + assert_raises(ValueError, einsum_fn, "i%...", [0, 0], optimize=do_opt) + assert_raises(ValueError, einsum_fn, "...j$", [0, 0], optimize=do_opt) + assert_raises(ValueError, einsum_fn, "i->&", [0, 0], optimize=do_opt) + + # output subscripts must appear in input + assert_raises(ValueError, einsum_fn, "i->ij", [0, 0], optimize=do_opt) + + # output subscripts may only be specified once + assert_raises(ValueError, einsum_fn, "ij->jij", [[0, 0], [0, 0]], + optimize=do_opt) + + # dimensions must match when being collapsed + assert_raises(ValueError, einsum_fn, "ii", + np.arange(6).reshape(2, 3), optimize=do_opt) + assert_raises(ValueError, einsum_fn, "ii->i", + np.arange(6).reshape(2, 3), optimize=do_opt) + + with assert_raises_regex(ValueError, "'b'"): + # gh-11221 - 'c' erroneously appeared in the error message + a = np.ones((3, 3, 4, 5, 6)) + b = np.ones((3, 4, 5)) + einsum_fn('aabcb,abc', a, b) + + with pytest.raises(ValueError): + a = np.arange(3) + # einsum_path does not yet accept kwarg 'casting' + np.einsum('ij->j', [a, a], casting='same_value') + + def test_einsum_sorting_behavior(self): + # Case 1: 26 dimensions (all lowercase indices) + n1 = 26 + x1 = np.random.random((1,) * n1) + path1 = np.einsum_path(x1, range(n1))[1] # Get einsum path details + output_indices1 = path1.split("->")[-1].strip() # Extract output indices + # Assert indices are only uppercase letters and sorted correctly + assert all(c.isupper() for c in output_indices1), ( + "Output indices for n=26 should use uppercase letters only: " + f"{output_indices1}" + ) + assert_equal( + output_indices1, + ''.join(sorted(output_indices1)), + err_msg=( + "Output indices for n=26 are not lexicographically sorted: " + f"{output_indices1}" + ) + ) + + # Case 2: 27 dimensions (includes uppercase indices) + n2 = 27 + x2 = np.random.random((1,) * n2) + path2 = np.einsum_path(x2, range(n2))[1] + output_indices2 = path2.split("->")[-1].strip() + # Assert indices include both uppercase and lowercase letters + assert any(c.islower() for c in output_indices2), ( + "Output indices for n=27 should include uppercase letters: " + f"{output_indices2}" + ) + # Assert output indices are sorted uppercase before lowercase + assert_equal( + output_indices2, + ''.join(sorted(output_indices2)), + err_msg=( + "Output indices for n=27 are not lexicographically sorted: " + f"{output_indices2}" + ) + ) + + # Additional Check: Ensure dimensions correspond correctly to indices + # Generate expected mapping of dimensions to indices + expected_indices = [ + chr(i + ord('A')) if i < 26 else chr(i - 26 + ord('a')) + for i in range(n2) + ] + assert_equal( + output_indices2, + ''.join(expected_indices), + err_msg=( + "Output indices do not map to the correct dimensions. Expected: " + f"{''.join(expected_indices)}, Got: {output_indices2}" + ) + ) + + @pytest.mark.parametrize("do_opt", [True, False]) + def test_einsum_specific_errors(self, do_opt): + # out parameter must be an array + assert_raises(TypeError, np.einsum, "", 0, out='test', + optimize=do_opt) + + # order parameter must be a valid order + assert_raises(ValueError, np.einsum, "", 0, order='W', + optimize=do_opt) + + # casting parameter must be a valid casting + assert_raises(ValueError, np.einsum, "", 0, casting='blah', + optimize=do_opt) + + # dtype parameter must be a valid dtype + assert_raises(TypeError, np.einsum, "", 0, dtype='bad_data_type', + optimize=do_opt) + + # other keyword arguments are rejected + assert_raises(TypeError, np.einsum, "", 0, bad_arg=0, optimize=do_opt) + + # broadcasting to new dimensions must be enabled explicitly + assert_raises(ValueError, np.einsum, "i", np.arange(6).reshape(2, 3), + optimize=do_opt) + assert_raises(ValueError, np.einsum, "i->i", [[0, 1], [0, 1]], + out=np.arange(4).reshape(2, 2), optimize=do_opt) + + # Check order kwarg, asanyarray allows 1d to pass through + assert_raises(ValueError, np.einsum, "i->i", + np.arange(6).reshape(-1, 1), optimize=do_opt, order='d') + + def test_einsum_object_errors(self): + # Exceptions created by object arithmetic should + # successfully propagate + + class CustomException(Exception): + pass + + class DestructoBox: + + def __init__(self, value, destruct): + self._val = value + self._destruct = destruct + + def __add__(self, other): + tmp = self._val + other._val + if tmp >= self._destruct: + raise CustomException + else: + self._val = tmp + return self + + def __radd__(self, other): + if other == 0: + return self + else: + return self.__add__(other) + + def __mul__(self, other): + tmp = self._val * other._val + if tmp >= self._destruct: + raise CustomException + else: + self._val = tmp + return self + + def __rmul__(self, other): + if other == 0: + return self + else: + return self.__mul__(other) + + a = np.array([DestructoBox(i, 5) for i in range(1, 10)], + dtype='object').reshape(3, 3) + + # raised from unbuffered_loop_nop1_ndim2 + assert_raises(CustomException, np.einsum, "ij->i", a) + + # raised from unbuffered_loop_nop1_ndim3 + b = np.array([DestructoBox(i, 100) for i in range(27)], + dtype='object').reshape(3, 3, 3) + assert_raises(CustomException, np.einsum, "i...k->...", b) + + # raised from unbuffered_loop_nop2_ndim2 + b = np.array([DestructoBox(i, 55) for i in range(1, 4)], + dtype='object') + assert_raises(CustomException, np.einsum, "ij, j", a, b) + + # raised from unbuffered_loop_nop2_ndim3 + assert_raises(CustomException, np.einsum, "ij, jh", a, a) + + # raised from PyArray_EinsteinSum + assert_raises(CustomException, np.einsum, "ij->", a) + + def test_einsum_views(self): + # pass-through + for do_opt in [True, False]: + a = np.arange(6).reshape((2, 3)) + + b = np.einsum("...", a, optimize=do_opt) + assert_(b.base is a.base) + + b = np.einsum(a, [Ellipsis], optimize=do_opt) + assert_(b.base is a.base) + + b = np.einsum("ij", a, optimize=do_opt) + assert_(b.base is a.base) + assert_equal(b, a) + + b = np.einsum(a, [0, 1], optimize=do_opt) + assert_(b.base is a.base) + assert_equal(b, a) + + # output is writeable whenever input is writeable + b = np.einsum("...", a, optimize=do_opt) + assert_(b.flags['WRITEABLE']) + a.flags['WRITEABLE'] = False + b = np.einsum("...", a, optimize=do_opt) + assert_(not b.flags['WRITEABLE']) + + # transpose + a = np.arange(6).reshape((2, 3)) + + b = np.einsum("ji", a, optimize=do_opt) + assert_(b.base is a.base) + assert_equal(b, a.T) + + b = np.einsum(a, [1, 0], optimize=do_opt) + assert_(b.base is a.base) + assert_equal(b, a.T) + + # diagonal + a = np.arange(9).reshape((3, 3)) + + b = np.einsum("ii->i", a, optimize=do_opt) + assert_(b.base is a.base) + assert_equal(b, [a[i, i] for i in range(3)]) + + b = np.einsum(a, [0, 0], [0], optimize=do_opt) + assert_(b.base is a.base) + assert_equal(b, [a[i, i] for i in range(3)]) + + # diagonal with various ways of broadcasting an additional dimension + a = np.arange(27).reshape((3, 3, 3)) + + b = np.einsum("...ii->...i", a, optimize=do_opt) + assert_(b.base is a.base) + assert_equal(b, [[x[i, i] for i in range(3)] for x in a]) + + b = np.einsum(a, [Ellipsis, 0, 0], [Ellipsis, 0], optimize=do_opt) + assert_(b.base is a.base) + assert_equal(b, [[x[i, i] for i in range(3)] for x in a]) + + b = np.einsum("ii...->...i", a, optimize=do_opt) + assert_(b.base is a.base) + assert_equal(b, [[x[i, i] for i in range(3)] + for x in a.transpose(2, 0, 1)]) + + b = np.einsum(a, [0, 0, Ellipsis], [Ellipsis, 0], optimize=do_opt) + assert_(b.base is a.base) + assert_equal(b, [[x[i, i] for i in range(3)] + for x in a.transpose(2, 0, 1)]) + + b = np.einsum("...ii->i...", a, optimize=do_opt) + assert_(b.base is a.base) + assert_equal(b, [a[:, i, i] for i in range(3)]) + + b = np.einsum(a, [Ellipsis, 0, 0], [0, Ellipsis], optimize=do_opt) + assert_(b.base is a.base) + assert_equal(b, [a[:, i, i] for i in range(3)]) + + b = np.einsum("jii->ij", a, optimize=do_opt) + assert_(b.base is a.base) + assert_equal(b, [a[:, i, i] for i in range(3)]) + + b = np.einsum(a, [1, 0, 0], [0, 1], optimize=do_opt) + assert_(b.base is a.base) + assert_equal(b, [a[:, i, i] for i in range(3)]) + + b = np.einsum("ii...->i...", a, optimize=do_opt) + assert_(b.base is a.base) + assert_equal(b, [a.transpose(2, 0, 1)[:, i, i] for i in range(3)]) + + b = np.einsum(a, [0, 0, Ellipsis], [0, Ellipsis], optimize=do_opt) + assert_(b.base is a.base) + assert_equal(b, [a.transpose(2, 0, 1)[:, i, i] for i in range(3)]) + + b = np.einsum("i...i->i...", a, optimize=do_opt) + assert_(b.base is a.base) + assert_equal(b, [a.transpose(1, 0, 2)[:, i, i] for i in range(3)]) + + b = np.einsum(a, [0, Ellipsis, 0], [0, Ellipsis], optimize=do_opt) + assert_(b.base is a.base) + assert_equal(b, [a.transpose(1, 0, 2)[:, i, i] for i in range(3)]) + + b = np.einsum("i...i->...i", a, optimize=do_opt) + assert_(b.base is a.base) + assert_equal(b, [[x[i, i] for i in range(3)] + for x in a.transpose(1, 0, 2)]) + + b = np.einsum(a, [0, Ellipsis, 0], [Ellipsis, 0], optimize=do_opt) + assert_(b.base is a.base) + assert_equal(b, [[x[i, i] for i in range(3)] + for x in a.transpose(1, 0, 2)]) + + # triple diagonal + a = np.arange(27).reshape((3, 3, 3)) + + b = np.einsum("iii->i", a, optimize=do_opt) + assert_(b.base is a.base) + assert_equal(b, [a[i, i, i] for i in range(3)]) + + b = np.einsum(a, [0, 0, 0], [0], optimize=do_opt) + assert_(b.base is a.base) + assert_equal(b, [a[i, i, i] for i in range(3)]) + + # swap axes + a = np.arange(24).reshape((2, 3, 4)) + + b = np.einsum("ijk->jik", a, optimize=do_opt) + assert_(b.base is a.base) + assert_equal(b, a.swapaxes(0, 1)) + + b = np.einsum(a, [0, 1, 2], [1, 0, 2], optimize=do_opt) + assert_(b.base is a.base) + assert_equal(b, a.swapaxes(0, 1)) + + def check_einsum_sums(self, dtype, do_opt=False): + dtype = np.dtype(dtype) + # Check various sums. Does many sizes to exercise unrolled loops. + + # sum(a, axis=-1) + for n in range(1, 17): + a = np.arange(n, dtype=dtype) + b = np.sum(a, axis=-1) + if hasattr(b, 'astype'): + b = b.astype(dtype) + assert_equal(np.einsum("i->", a, optimize=do_opt), b) + assert_equal(np.einsum(a, [0], [], optimize=do_opt), b) + + for n in range(1, 17): + a = np.arange(2 * 3 * n, dtype=dtype).reshape(2, 3, n) + b = np.sum(a, axis=-1) + if hasattr(b, 'astype'): + b = b.astype(dtype) + assert_equal(np.einsum("...i->...", a, optimize=do_opt), b) + assert_equal(np.einsum(a, [Ellipsis, 0], [Ellipsis], optimize=do_opt), b) + + # sum(a, axis=0) + for n in range(1, 17): + a = np.arange(2 * n, dtype=dtype).reshape(2, n) + b = np.sum(a, axis=0) + if hasattr(b, 'astype'): + b = b.astype(dtype) + assert_equal(np.einsum("i...->...", a, optimize=do_opt), b) + assert_equal(np.einsum(a, [0, Ellipsis], [Ellipsis], optimize=do_opt), b) + + for n in range(1, 17): + a = np.arange(2 * 3 * n, dtype=dtype).reshape(2, 3, n) + b = np.sum(a, axis=0) + if hasattr(b, 'astype'): + b = b.astype(dtype) + assert_equal(np.einsum("i...->...", a, optimize=do_opt), b) + assert_equal(np.einsum(a, [0, Ellipsis], [Ellipsis], optimize=do_opt), b) + + # trace(a) + for n in range(1, 17): + a = np.arange(n * n, dtype=dtype).reshape(n, n) + b = np.trace(a) + if hasattr(b, 'astype'): + b = b.astype(dtype) + assert_equal(np.einsum("ii", a, optimize=do_opt), b) + assert_equal(np.einsum(a, [0, 0], optimize=do_opt), b) + + # gh-15961: should accept numpy int64 type in subscript list + np_array = np.asarray([0, 0]) + assert_equal(np.einsum(a, np_array, optimize=do_opt), b) + assert_equal(np.einsum(a, list(np_array), optimize=do_opt), b) + + # multiply(a, b) + assert_equal(np.einsum("..., ...", 3, 4), 12) # scalar case + for n in range(1, 17): + a = np.arange(3 * n, dtype=dtype).reshape(3, n) + b = np.arange(2 * 3 * n, dtype=dtype).reshape(2, 3, n) + assert_equal(np.einsum("..., ...", a, b, optimize=do_opt), + np.multiply(a, b)) + assert_equal(np.einsum(a, [Ellipsis], b, [Ellipsis], optimize=do_opt), + np.multiply(a, b)) + + # inner(a,b) + for n in range(1, 17): + a = np.arange(2 * 3 * n, dtype=dtype).reshape(2, 3, n) + b = np.arange(n, dtype=dtype) + assert_equal(np.einsum("...i, ...i", a, b, optimize=do_opt), np.inner(a, b)) + assert_equal(np.einsum(a, [Ellipsis, 0], b, [Ellipsis, 0], optimize=do_opt), + np.inner(a, b)) + + for n in range(1, 11): + a = np.arange(n * 3 * 2, dtype=dtype).reshape(n, 3, 2) + b = np.arange(n, dtype=dtype) + assert_equal(np.einsum("i..., i...", a, b, optimize=do_opt), + np.inner(a.T, b.T).T) + assert_equal(np.einsum(a, [0, Ellipsis], b, [0, Ellipsis], optimize=do_opt), + np.inner(a.T, b.T).T) + + # outer(a,b) + for n in range(1, 17): + a = np.arange(3, dtype=dtype) + 1 + b = np.arange(n, dtype=dtype) + 1 + assert_equal(np.einsum("i,j", a, b, optimize=do_opt), + np.outer(a, b)) + assert_equal(np.einsum(a, [0], b, [1], optimize=do_opt), + np.outer(a, b)) + + # Suppress the complex warnings for the 'as f8' tests + with warnings.catch_warnings(): + warnings.simplefilter('ignore', np.exceptions.ComplexWarning) + + # matvec(a,b) / a.dot(b) where a is matrix, b is vector + for n in range(1, 17): + a = np.arange(4 * n, dtype=dtype).reshape(4, n) + b = np.arange(n, dtype=dtype) + assert_equal(np.einsum("ij, j", a, b, optimize=do_opt), + np.dot(a, b)) + assert_equal(np.einsum(a, [0, 1], b, [1], optimize=do_opt), + np.dot(a, b)) + + c = np.arange(4, dtype=dtype) + np.einsum("ij,j", a, b, out=c, + dtype='f8', casting='unsafe', optimize=do_opt) + assert_equal(c, + np.dot(a.astype('f8'), + b.astype('f8')).astype(dtype)) + c[...] = 0 + np.einsum(a, [0, 1], b, [1], out=c, + dtype='f8', casting='unsafe', optimize=do_opt) + assert_equal(c, + np.dot(a.astype('f8'), + b.astype('f8')).astype(dtype)) + + for n in range(1, 17): + a = np.arange(4 * n, dtype=dtype).reshape(4, n) + b = np.arange(n, dtype=dtype) + assert_equal(np.einsum("ji,j", a.T, b.T, optimize=do_opt), + np.dot(b.T, a.T)) + assert_equal(np.einsum(a.T, [1, 0], b.T, [1], optimize=do_opt), + np.dot(b.T, a.T)) + + c = np.arange(4, dtype=dtype) + np.einsum("ji,j", a.T, b.T, out=c, + dtype='f8', casting='unsafe', optimize=do_opt) + assert_equal(c, + np.dot(b.T.astype('f8'), + a.T.astype('f8')).astype(dtype)) + c[...] = 0 + np.einsum(a.T, [1, 0], b.T, [1], out=c, + dtype='f8', casting='unsafe', optimize=do_opt) + assert_equal(c, + np.dot(b.T.astype('f8'), + a.T.astype('f8')).astype(dtype)) + + # matmat(a,b) / a.dot(b) where a is matrix, b is matrix + for n in range(1, 17): + if n < 8 or dtype != 'f2': + a = np.arange(4 * n, dtype=dtype).reshape(4, n) + b = np.arange(n * 6, dtype=dtype).reshape(n, 6) + assert_equal(np.einsum("ij,jk", a, b, optimize=do_opt), + np.dot(a, b)) + assert_equal(np.einsum(a, [0, 1], b, [1, 2], optimize=do_opt), + np.dot(a, b)) + + for n in range(1, 17): + a = np.arange(4 * n, dtype=dtype).reshape(4, n) + b = np.arange(n * 6, dtype=dtype).reshape(n, 6) + c = np.arange(24, dtype=dtype).reshape(4, 6) + np.einsum("ij,jk", a, b, out=c, dtype='f8', casting='unsafe', + optimize=do_opt) + assert_equal(c, + np.dot(a.astype('f8'), + b.astype('f8')).astype(dtype)) + c[...] = 0 + np.einsum(a, [0, 1], b, [1, 2], out=c, + dtype='f8', casting='unsafe', optimize=do_opt) + assert_equal(c, + np.dot(a.astype('f8'), + b.astype('f8')).astype(dtype)) + + # matrix triple product (note this is not currently an efficient + # way to multiply 3 matrices) + a = np.arange(12, dtype=dtype).reshape(3, 4) + b = np.arange(20, dtype=dtype).reshape(4, 5) + c = np.arange(30, dtype=dtype).reshape(5, 6) + if dtype != 'f2': + assert_equal(np.einsum("ij,jk,kl", a, b, c, optimize=do_opt), + a.dot(b).dot(c)) + assert_equal(np.einsum(a, [0, 1], b, [1, 2], c, [2, 3], + optimize=do_opt), a.dot(b).dot(c)) + + d = np.arange(18, dtype=dtype).reshape(3, 6) + np.einsum("ij,jk,kl", a, b, c, out=d, + dtype='f8', casting='unsafe', optimize=do_opt) + tgt = a.astype('f8').dot(b.astype('f8')) + tgt = tgt.dot(c.astype('f8')).astype(dtype) + assert_equal(d, tgt) + + d[...] = 0 + np.einsum(a, [0, 1], b, [1, 2], c, [2, 3], out=d, + dtype='f8', casting='unsafe', optimize=do_opt) + tgt = a.astype('f8').dot(b.astype('f8')) + tgt = tgt.dot(c.astype('f8')).astype(dtype) + assert_equal(d, tgt) + + # tensordot(a, b) + if np.dtype(dtype) != np.dtype('f2'): + a = np.arange(60, dtype=dtype).reshape(3, 4, 5) + b = np.arange(24, dtype=dtype).reshape(4, 3, 2) + assert_equal(np.einsum("ijk, jil -> kl", a, b), + np.tensordot(a, b, axes=([1, 0], [0, 1]))) + assert_equal(np.einsum(a, [0, 1, 2], b, [1, 0, 3], [2, 3]), + np.tensordot(a, b, axes=([1, 0], [0, 1]))) + + c = np.arange(10, dtype=dtype).reshape(5, 2) + np.einsum("ijk,jil->kl", a, b, out=c, + dtype='f8', casting='unsafe', optimize=do_opt) + assert_equal(c, np.tensordot(a.astype('f8'), b.astype('f8'), + axes=([1, 0], [0, 1])).astype(dtype)) + c[...] = 0 + np.einsum(a, [0, 1, 2], b, [1, 0, 3], [2, 3], out=c, + dtype='f8', casting='unsafe', optimize=do_opt) + assert_equal(c, np.tensordot(a.astype('f8'), b.astype('f8'), + axes=([1, 0], [0, 1])).astype(dtype)) + + # logical_and(logical_and(a!=0, b!=0), c!=0) + neg_val = -2 if dtype.kind != "u" else np.iinfo(dtype).max - 1 + a = np.array([1, 3, neg_val, 0, 12, 13, 0, 1], dtype=dtype) + b = np.array([0, 3.5, 0., neg_val, 0, 1, 3, 12], dtype=dtype) + c = np.array([True, True, False, True, True, False, True, True]) + + assert_equal(np.einsum("i,i,i->i", a, b, c, + dtype='?', casting='unsafe', optimize=do_opt), + np.logical_and(np.logical_and(a != 0, b != 0), c != 0)) + assert_equal(np.einsum(a, [0], b, [0], c, [0], [0], + dtype='?', casting='unsafe'), + np.logical_and(np.logical_and(a != 0, b != 0), c != 0)) + + a = np.arange(9, dtype=dtype) + assert_equal(np.einsum(",i->", 3, a), 3 * np.sum(a)) + assert_equal(np.einsum(3, [], a, [0], []), 3 * np.sum(a)) + assert_equal(np.einsum("i,->", a, 3), 3 * np.sum(a)) + assert_equal(np.einsum(a, [0], 3, [], []), 3 * np.sum(a)) + + # Various stride0, contiguous, and SSE aligned variants + for n in range(1, 25): + a = np.arange(n, dtype=dtype) + if np.dtype(dtype).itemsize > 1: + assert_equal(np.einsum("...,...", a, a, optimize=do_opt), + np.multiply(a, a)) + assert_equal(np.einsum("i,i", a, a, optimize=do_opt), np.dot(a, a)) + assert_equal(np.einsum("i,->i", a, 2, optimize=do_opt), 2 * a) + assert_equal(np.einsum(",i->i", 2, a, optimize=do_opt), 2 * a) + assert_equal(np.einsum("i,->", a, 2, optimize=do_opt), 2 * np.sum(a)) + assert_equal(np.einsum(",i->", 2, a, optimize=do_opt), 2 * np.sum(a)) + + assert_equal(np.einsum("...,...", a[1:], a[:-1], optimize=do_opt), + np.multiply(a[1:], a[:-1])) + assert_equal(np.einsum("i,i", a[1:], a[:-1], optimize=do_opt), + np.dot(a[1:], a[:-1])) + assert_equal(np.einsum("i,->i", a[1:], 2, optimize=do_opt), 2 * a[1:]) + assert_equal(np.einsum(",i->i", 2, a[1:], optimize=do_opt), 2 * a[1:]) + assert_equal(np.einsum("i,->", a[1:], 2, optimize=do_opt), + 2 * np.sum(a[1:])) + assert_equal(np.einsum(",i->", 2, a[1:], optimize=do_opt), + 2 * np.sum(a[1:])) + + # An object array, summed as the data type + a = np.arange(9, dtype=object) + + b = np.einsum("i->", a, dtype=dtype, casting='unsafe') + assert_equal(b, np.sum(a)) + if hasattr(b, "dtype"): + # Can be a python object when dtype is object + assert_equal(b.dtype, np.dtype(dtype)) + + b = np.einsum(a, [0], [], dtype=dtype, casting='unsafe') + assert_equal(b, np.sum(a)) + if hasattr(b, "dtype"): + # Can be a python object when dtype is object + assert_equal(b.dtype, np.dtype(dtype)) + + # A case which was failing (ticket #1885) + p = np.arange(2) + 1 + q = np.arange(4).reshape(2, 2) + 3 + r = np.arange(4).reshape(2, 2) + 7 + assert_equal(np.einsum('z,mz,zm->', p, q, r), 253) + + # singleton dimensions broadcast (gh-10343) + p = np.ones((10, 2)) + q = np.ones((1, 2)) + assert_array_equal(np.einsum('ij,ij->j', p, q, optimize=True), + np.einsum('ij,ij->j', p, q, optimize=False)) + assert_array_equal(np.einsum('ij,ij->j', p, q, optimize=True), + [10.] * 2) + + # a blas-compatible contraction broadcasting case which was failing + # for optimize=True (ticket #10930) + x = np.array([2., 3.]) + y = np.array([4.]) + assert_array_equal(np.einsum("i, i", x, y, optimize=False), 20.) + assert_array_equal(np.einsum("i, i", x, y, optimize=True), 20.) + + # all-ones array was bypassing bug (ticket #10930) + p = np.ones((1, 5)) / 2 + q = np.ones((5, 5)) / 2 + for optimize in (True, False): + assert_array_equal(np.einsum("...ij,...jk->...ik", p, p, + optimize=optimize), + np.einsum("...ij,...jk->...ik", p, q, + optimize=optimize)) + assert_array_equal(np.einsum("...ij,...jk->...ik", p, q, + optimize=optimize), + np.full((1, 5), 1.25)) + + # Cases which were failing (gh-10899) + x = np.eye(2, dtype=dtype) + y = np.ones(2, dtype=dtype) + assert_array_equal(np.einsum("ji,i->", x, y, optimize=optimize), + [2.]) # contig_contig_outstride0_two + assert_array_equal(np.einsum("i,ij->", y, x, optimize=optimize), + [2.]) # stride0_contig_outstride0_two + assert_array_equal(np.einsum("ij,i->", x, y, optimize=optimize), + [2.]) # contig_stride0_outstride0_two + + def test_einsum_sums_int8(self): + self.check_einsum_sums('i1') + + def test_einsum_sums_uint8(self): + self.check_einsum_sums('u1') + + def test_einsum_sums_int16(self): + self.check_einsum_sums('i2') + + def test_einsum_sums_uint16(self): + self.check_einsum_sums('u2') + + def test_einsum_sums_int32(self): + self.check_einsum_sums('i4') + self.check_einsum_sums('i4', True) + + def test_einsum_sums_uint32(self): + self.check_einsum_sums('u4') + self.check_einsum_sums('u4', True) + + def test_einsum_sums_int64(self): + self.check_einsum_sums('i8') + + def test_einsum_sums_uint64(self): + self.check_einsum_sums('u8') + + def test_einsum_sums_float16(self): + self.check_einsum_sums('f2') + + def test_einsum_sums_float32(self): + self.check_einsum_sums('f4') + + def test_einsum_sums_float64(self): + self.check_einsum_sums('f8') + self.check_einsum_sums('f8', True) + + def test_einsum_sums_longdouble(self): + self.check_einsum_sums(np.longdouble) + + def test_einsum_sums_cfloat64(self): + self.check_einsum_sums('c8') + self.check_einsum_sums('c8', True) + + def test_einsum_sums_cfloat128(self): + self.check_einsum_sums('c16') + + def test_einsum_sums_clongdouble(self): + self.check_einsum_sums(np.clongdouble) + + def test_einsum_sums_object(self): + self.check_einsum_sums('object') + self.check_einsum_sums('object', True) + + def test_einsum_misc(self): + # This call used to crash because of a bug in + # PyArray_AssignZero + a = np.ones((1, 2)) + b = np.ones((2, 2, 1)) + assert_equal(np.einsum('ij...,j...->i...', a, b), [[[2], [2]]]) + assert_equal(np.einsum('ij...,j...->i...', a, b, optimize=True), [[[2], [2]]]) + + # Regression test for issue #10369 (test unicode inputs with Python 2) + assert_equal(np.einsum('ij...,j...->i...', a, b), [[[2], [2]]]) + assert_equal(np.einsum('...i,...i', [1, 2, 3], [2, 3, 4]), 20) + assert_equal(np.einsum('...i,...i', [1, 2, 3], [2, 3, 4], + optimize='greedy'), 20) + + # The iterator had an issue with buffering this reduction + a = np.ones((5, 12, 4, 2, 3), np.int64) + b = np.ones((5, 12, 11), np.int64) + assert_equal(np.einsum('ijklm,ijn,ijn->', a, b, b), + np.einsum('ijklm,ijn->', a, b)) + assert_equal(np.einsum('ijklm,ijn,ijn->', a, b, b, optimize=True), + np.einsum('ijklm,ijn->', a, b, optimize=True)) + + # Issue #2027, was a problem in the contiguous 3-argument + # inner loop implementation + a = np.arange(1, 3) + b = np.arange(1, 5).reshape(2, 2) + c = np.arange(1, 9).reshape(4, 2) + assert_equal(np.einsum('x,yx,zx->xzy', a, b, c), + [[[1, 3], [3, 9], [5, 15], [7, 21]], + [[8, 16], [16, 32], [24, 48], [32, 64]]]) + assert_equal(np.einsum('x,yx,zx->xzy', a, b, c, optimize=True), + [[[1, 3], [3, 9], [5, 15], [7, 21]], + [[8, 16], [16, 32], [24, 48], [32, 64]]]) + + # Ensure explicitly setting out=None does not cause an error + # see issue gh-15776 and issue gh-15256 + assert_equal(np.einsum('i,j', [1], [2], out=None), [[2]]) + + def test_object_loop(self): + + class Mult: + def __mul__(self, other): + return 42 + + objMult = np.array([Mult()]) + objNULL = np.ndarray(buffer=b'\0' * np.intp(0).itemsize, shape=1, dtype=object) + + with pytest.raises(TypeError): + np.einsum("i,j", [1], objNULL) + with pytest.raises(TypeError): + np.einsum("i,j", objNULL, [1]) + assert np.einsum("i,j", objMult, objMult) == 42 + + def test_subscript_range(self): + # Issue #7741, make sure that all letters of Latin alphabet (both uppercase & lowercase) can be used + # when creating a subscript from arrays + a = np.ones((2, 3)) + b = np.ones((3, 4)) + np.einsum(a, [0, 20], b, [20, 2], [0, 2], optimize=False) + np.einsum(a, [0, 27], b, [27, 2], [0, 2], optimize=False) + np.einsum(a, [0, 51], b, [51, 2], [0, 2], optimize=False) + assert_raises(ValueError, lambda: np.einsum(a, [0, 52], b, [52, 2], [0, 2], optimize=False)) + assert_raises(ValueError, lambda: np.einsum(a, [-1, 5], b, [5, 2], [-1, 2], optimize=False)) + + def test_einsum_broadcast(self): + # Issue #2455 change in handling ellipsis + # remove the 'middle broadcast' error + # only use the 'RIGHT' iteration in prepare_op_axes + # adds auto broadcast on left where it belongs + # broadcast on right has to be explicit + # We need to test the optimized parsing as well + + A = np.arange(2 * 3 * 4).reshape(2, 3, 4) + B = np.arange(3) + ref = np.einsum('ijk,j->ijk', A, B, optimize=False) + for opt in [True, False]: + assert_equal(np.einsum('ij...,j...->ij...', A, B, optimize=opt), ref) + assert_equal(np.einsum('ij...,...j->ij...', A, B, optimize=opt), ref) + assert_equal(np.einsum('ij...,j->ij...', A, B, optimize=opt), ref) # used to raise error + + A = np.arange(12).reshape((4, 3)) + B = np.arange(6).reshape((3, 2)) + ref = np.einsum('ik,kj->ij', A, B, optimize=False) + for opt in [True, False]: + assert_equal(np.einsum('ik...,k...->i...', A, B, optimize=opt), ref) + assert_equal(np.einsum('ik...,...kj->i...j', A, B, optimize=opt), ref) + assert_equal(np.einsum('...k,kj', A, B, optimize=opt), ref) # used to raise error + assert_equal(np.einsum('ik,k...->i...', A, B, optimize=opt), ref) # used to raise error + + dims = [2, 3, 4, 5] + a = np.arange(np.prod(dims)).reshape(dims) + v = np.arange(dims[2]) + ref = np.einsum('ijkl,k->ijl', a, v, optimize=False) + for opt in [True, False]: + assert_equal(np.einsum('ijkl,k', a, v, optimize=opt), ref) + assert_equal(np.einsum('...kl,k', a, v, optimize=opt), ref) # used to raise error + assert_equal(np.einsum('...kl,k...', a, v, optimize=opt), ref) + + J, K, M = 160, 160, 120 + A = np.arange(J * K * M).reshape(1, 1, 1, J, K, M) + B = np.arange(J * K * M * 3).reshape(J, K, M, 3) + ref = np.einsum('...lmn,...lmno->...o', A, B, optimize=False) + for opt in [True, False]: + assert_equal(np.einsum('...lmn,lmno->...o', A, B, + optimize=opt), ref) # used to raise error + + def test_einsum_fixedstridebug(self): + # Issue #4485 obscure einsum bug + # This case revealed a bug in nditer where it reported a stride + # as 'fixed' (0) when it was in fact not fixed during processing + # (0 or 4). The reason for the bug was that the check for a fixed + # stride was using the information from the 2D inner loop reuse + # to restrict the iteration dimensions it had to validate to be + # the same, but that 2D inner loop reuse logic is only triggered + # during the buffer copying step, and hence it was invalid to + # rely on those values. The fix is to check all the dimensions + # of the stride in question, which in the test case reveals that + # the stride is not fixed. + # + # NOTE: This test is triggered by the fact that the default buffersize, + # used by einsum, is 8192, and 3*2731 = 8193, is larger than that + # and results in a mismatch between the buffering and the + # striding for operand A. + A = np.arange(2 * 3).reshape(2, 3).astype(np.float32) + B = np.arange(2 * 3 * 2731).reshape(2, 3, 2731).astype(np.int16) + es = np.einsum('cl, cpx->lpx', A, B) + tp = np.tensordot(A, B, axes=(0, 0)) + assert_equal(es, tp) + # The following is the original test case from the bug report, + # made repeatable by changing random arrays to aranges. + A = np.arange(3 * 3).reshape(3, 3).astype(np.float64) + B = np.arange(3 * 3 * 64 * 64).reshape(3, 3, 64, 64).astype(np.float32) + es = np.einsum('cl, cpxy->lpxy', A, B) + tp = np.tensordot(A, B, axes=(0, 0)) + assert_equal(es, tp) + + def test_einsum_fixed_collapsingbug(self): + # Issue #5147. + # The bug only occurred when output argument of einssum was used. + x = np.random.normal(0, 1, (5, 5, 5, 5)) + y1 = np.zeros((5, 5)) + np.einsum('aabb->ab', x, out=y1) + idx = np.arange(5) + y2 = x[idx[:, None], idx[:, None], idx, idx] + assert_equal(y1, y2) + + def test_einsum_failed_on_p9_and_s390x(self): + # Issues gh-14692 and gh-12689 + # Bug with signed vs unsigned char errored on power9 and s390x Linux + tensor = np.random.random_sample((10, 10, 10, 10)) + x = np.einsum('ijij->', tensor) + y = tensor.trace(axis1=0, axis2=2).trace() + assert_allclose(x, y) + + def test_einsum_all_contig_non_contig_output(self): + # Issue gh-5907, tests that the all contiguous special case + # actually checks the contiguity of the output + x = np.ones((5, 5)) + out = np.ones(10)[::2] + correct_base = np.ones(10) + correct_base[::2] = 5 + # Always worked (inner iteration is done with 0-stride): + np.einsum('mi,mi,mi->m', x, x, x, out=out) + assert_array_equal(out.base, correct_base) + # Example 1: + out = np.ones(10)[::2] + np.einsum('im,im,im->m', x, x, x, out=out) + assert_array_equal(out.base, correct_base) + # Example 2, buffering causes x to be contiguous but + # special cases do not catch the operation before: + out = np.ones((2, 2, 2))[..., 0] + correct_base = np.ones((2, 2, 2)) + correct_base[..., 0] = 2 + x = np.ones((2, 2), np.float32) + np.einsum('ij,jk->ik', x, x, out=out) + assert_array_equal(out.base, correct_base) + + @pytest.mark.parametrize("dtype", + np.typecodes["AllFloat"] + np.typecodes["AllInteger"]) + def test_different_paths(self, dtype): + # Test originally added to cover broken float16 path: gh-20305 + # Likely most are covered elsewhere, at least partially. + dtype = np.dtype(dtype) + # Simple test, designed to exercise most specialized code paths, + # note the +0.5 for floats. This makes sure we use a float value + # where the results must be exact. + arr = (np.arange(7) + 0.5).astype(dtype) + scalar = np.array(2, dtype=dtype) + + # contig -> scalar: + res = np.einsum('i->', arr) + assert res == arr.sum() + # contig, contig -> contig: + res = np.einsum('i,i->i', arr, arr) + assert_array_equal(res, arr * arr) + # noncontig, noncontig -> contig: + res = np.einsum('i,i->i', arr.repeat(2)[::2], arr.repeat(2)[::2]) + assert_array_equal(res, arr * arr) + # contig + contig -> scalar + assert np.einsum('i,i->', arr, arr) == (arr * arr).sum() + # contig + scalar -> contig (with out) + out = np.ones(7, dtype=dtype) + res = np.einsum('i,->i', arr, dtype.type(2), out=out) + assert_array_equal(res, arr * dtype.type(2)) + # scalar + contig -> contig (with out) + res = np.einsum(',i->i', scalar, arr) + assert_array_equal(res, arr * dtype.type(2)) + # scalar + contig -> scalar + res = np.einsum(',i->', scalar, arr) + # Use einsum to compare to not have difference due to sum round-offs: + assert res == np.einsum('i->', scalar * arr) + # contig + scalar -> scalar + res = np.einsum('i,->', arr, scalar) + # Use einsum to compare to not have difference due to sum round-offs: + assert res == np.einsum('i->', scalar * arr) + # contig + contig + contig -> scalar + arr = np.array([0.5, 0.5, 0.25, 4.5, 3.], dtype=dtype) + res = np.einsum('i,i,i->', arr, arr, arr) + assert_array_equal(res, (arr * arr * arr).sum()) + # four arrays: + res = np.einsum('i,i,i,i->', arr, arr, arr, arr) + assert_array_equal(res, (arr * arr * arr * arr).sum()) + + def test_small_boolean_arrays(self): + # See gh-5946. + # Use array of True embedded in False. + a = np.zeros((16, 1, 1), dtype=np.bool)[:2] + a[...] = True + out = np.zeros((16, 1, 1), dtype=np.bool)[:2] + tgt = np.ones((2, 1, 1), dtype=np.bool) + res = np.einsum('...ij,...jk->...ik', a, a, out=out) + assert_equal(res, tgt) + + def test_out_is_res(self): + a = np.arange(9).reshape(3, 3) + res = np.einsum('...ij,...jk->...ik', a, a, out=a) + assert res is a + + def optimize_compare(self, subscripts, operands=None): + # Tests all paths of the optimization function against + # conventional einsum + if operands is None: + args = [subscripts] + terms = subscripts.split('->')[0].split(',') + for term in terms: + dims = [global_size_dict[x] for x in term] + args.append(np.random.rand(*dims)) + else: + args = [subscripts] + operands + + noopt = np.einsum(*args, optimize=False) + opt = np.einsum(*args, optimize='greedy') + assert_almost_equal(opt, noopt) + opt = np.einsum(*args, optimize='optimal') + assert_almost_equal(opt, noopt) + + def test_hadamard_like_products(self): + # Hadamard outer products + self.optimize_compare('a,ab,abc->abc') + self.optimize_compare('a,b,ab->ab') + + def test_index_transformations(self): + # Simple index transformation cases + self.optimize_compare('ea,fb,gc,hd,abcd->efgh') + self.optimize_compare('ea,fb,abcd,gc,hd->efgh') + self.optimize_compare('abcd,ea,fb,gc,hd->efgh') + + def test_complex(self): + # Long test cases + self.optimize_compare('acdf,jbje,gihb,hfac,gfac,gifabc,hfac') + self.optimize_compare('acdf,jbje,gihb,hfac,gfac,gifabc,hfac') + self.optimize_compare('cd,bdhe,aidb,hgca,gc,hgibcd,hgac') + self.optimize_compare('abhe,hidj,jgba,hiab,gab') + self.optimize_compare('bde,cdh,agdb,hica,ibd,hgicd,hiac') + self.optimize_compare('chd,bde,agbc,hiad,hgc,hgi,hiad') + self.optimize_compare('chd,bde,agbc,hiad,bdi,cgh,agdb') + self.optimize_compare('bdhe,acad,hiab,agac,hibd') + + def test_collapse(self): + # Inner products + self.optimize_compare('ab,ab,c->') + self.optimize_compare('ab,ab,c->c') + self.optimize_compare('ab,ab,cd,cd->') + self.optimize_compare('ab,ab,cd,cd->ac') + self.optimize_compare('ab,ab,cd,cd->cd') + self.optimize_compare('ab,ab,cd,cd,ef,ef->') + + def test_expand(self): + # Outer products + self.optimize_compare('ab,cd,ef->abcdef') + self.optimize_compare('ab,cd,ef->acdf') + self.optimize_compare('ab,cd,de->abcde') + self.optimize_compare('ab,cd,de->be') + self.optimize_compare('ab,bcd,cd->abcd') + self.optimize_compare('ab,bcd,cd->abd') + + def test_edge_cases(self): + # Difficult edge cases for optimization + self.optimize_compare('eb,cb,fb->cef') + self.optimize_compare('dd,fb,be,cdb->cef') + self.optimize_compare('bca,cdb,dbf,afc->') + self.optimize_compare('dcc,fce,ea,dbf->ab') + self.optimize_compare('fdf,cdd,ccd,afe->ae') + self.optimize_compare('abcd,ad') + self.optimize_compare('ed,fcd,ff,bcf->be') + self.optimize_compare('baa,dcf,af,cde->be') + self.optimize_compare('bd,db,eac->ace') + self.optimize_compare('fff,fae,bef,def->abd') + self.optimize_compare('efc,dbc,acf,fd->abe') + self.optimize_compare('ba,ac,da->bcd') + + def test_inner_product(self): + # Inner products + self.optimize_compare('ab,ab') + self.optimize_compare('ab,ba') + self.optimize_compare('abc,abc') + self.optimize_compare('abc,bac') + self.optimize_compare('abc,cba') + + def test_random_cases(self): + # Randomly built test cases + self.optimize_compare('aab,fa,df,ecc->bde') + self.optimize_compare('ecb,fef,bad,ed->ac') + self.optimize_compare('bcf,bbb,fbf,fc->') + self.optimize_compare('bb,ff,be->e') + self.optimize_compare('bcb,bb,fc,fff->') + self.optimize_compare('fbb,dfd,fc,fc->') + self.optimize_compare('afd,ba,cc,dc->bf') + self.optimize_compare('adb,bc,fa,cfc->d') + self.optimize_compare('bbd,bda,fc,db->acf') + self.optimize_compare('dba,ead,cad->bce') + self.optimize_compare('aef,fbc,dca->bde') + + def test_combined_views_mapping(self): + # gh-10792 + a = np.arange(9).reshape(1, 1, 3, 1, 3) + b = np.einsum('bbcdc->d', a) + assert_equal(b, [12]) + + def test_broadcasting_dot_cases(self): + # Ensures broadcasting cases are not mistaken for GEMM + + a = np.random.rand(1, 5, 4) + b = np.random.rand(4, 6) + c = np.random.rand(5, 6) + d = np.random.rand(10) + + self.optimize_compare('ijk,kl,jl', operands=[a, b, c]) + self.optimize_compare('ijk,kl,jl,i->i', operands=[a, b, c, d]) + + e = np.random.rand(1, 1, 5, 4) + f = np.random.rand(7, 7) + self.optimize_compare('abjk,kl,jl', operands=[e, b, c]) + self.optimize_compare('abjk,kl,jl,ab->ab', operands=[e, b, c, f]) + + # Edge case found in gh-11308 + g = np.arange(64).reshape(2, 4, 8) + self.optimize_compare('obk,ijk->ioj', operands=[g, g]) + + def test_output_order(self): + # Ensure output order is respected for optimize cases, the below + # contraction should yield a reshaped tensor view + # gh-16415 + + a = np.ones((2, 3, 5), order='F') + b = np.ones((4, 3), order='F') + + for opt in [True, False]: + tmp = np.einsum('...ft,mf->...mt', a, b, order='a', optimize=opt) + assert_(tmp.flags.f_contiguous) + + tmp = np.einsum('...ft,mf->...mt', a, b, order='f', optimize=opt) + assert_(tmp.flags.f_contiguous) + + tmp = np.einsum('...ft,mf->...mt', a, b, order='c', optimize=opt) + assert_(tmp.flags.c_contiguous) + + tmp = np.einsum('...ft,mf->...mt', a, b, order='k', optimize=opt) + assert_(tmp.flags.c_contiguous is False) + assert_(tmp.flags.f_contiguous is False) + + tmp = np.einsum('...ft,mf->...mt', a, b, optimize=opt) + assert_(tmp.flags.c_contiguous is False) + assert_(tmp.flags.f_contiguous is False) + + c = np.ones((4, 3), order='C') + for opt in [True, False]: + tmp = np.einsum('...ft,mf->...mt', a, c, order='a', optimize=opt) + assert_(tmp.flags.c_contiguous) + + d = np.ones((2, 3, 5), order='C') + for opt in [True, False]: + tmp = np.einsum('...ft,mf->...mt', d, c, order='a', optimize=opt) + assert_(tmp.flags.c_contiguous) + + def test_singleton_broadcasting(self): + eq = "ijp,ipq,ikq->ijk" + shapes = ((3, 1, 1), (3, 1, 3), (1, 3, 3)) + arrays = [np.random.rand(*shape) for shape in shapes] + self.optimize_compare(eq, operands=arrays) + + eq = "jhcabhijaci,dfijejgh->fgje" + shapes = ( + (1, 1, 1, 1, 3, 1, 1, 1, 1, 1, 1), + (3, 1, 3, 1, 1, 1, 1, 2), + ) + arrays = [np.random.rand(*shape) for shape in shapes] + self.optimize_compare(eq, operands=arrays) + + eq = "baegffahgc,hdggeff->dhg" + shapes = ((2, 1, 4, 1, 1, 1, 1, 2, 1, 1), (1, 1, 1, 1, 4, 1, 1)) + arrays = [np.random.rand(*shape) for shape in shapes] + self.optimize_compare(eq, operands=arrays) + + eq = "cehgbaifff,fhhdegih->cdghbi" + shapes = ((1, 1, 1, 1, 1, 1, 1, 1, 1, 1), (2, 1, 1, 2, 4, 1, 1, 1)) + arrays = [np.random.rand(*shape) for shape in shapes] + self.optimize_compare(eq, operands=arrays) + + eq = "gah,cdbcghefg->ef" + shapes = ((2, 3, 1), (1, 3, 1, 1, 1, 2, 1, 4, 1)) + arrays = [np.random.rand(*shape) for shape in shapes] + self.optimize_compare(eq, operands=arrays) + + eq = "cacc,bcb->" + shapes = ((1, 1, 1, 1), (1, 4, 1)) + arrays = [np.random.rand(*shape) for shape in shapes] + self.optimize_compare(eq, operands=arrays) + + +class TestEinsumPath: + def build_operands(self, string, size_dict=global_size_dict): + + # Builds views based off initial operands + operands = [string] + terms = string.split('->')[0].split(',') + for term in terms: + dims = [size_dict[x] for x in term] + operands.append(np.random.rand(*dims)) + + return operands + + def assert_path_equal(self, comp, benchmark): + # Checks if list of tuples are equivalent + ret = (len(comp) == len(benchmark)) + assert_(ret) + for pos in range(len(comp) - 1): + ret &= isinstance(comp[pos + 1], tuple) + ret &= (comp[pos + 1] == benchmark[pos + 1]) + assert_(ret) + + def test_memory_contraints(self): + # Ensure memory constraints are satisfied + + outer_test = self.build_operands('a,b,c->abc') + + path, path_str = np.einsum_path(*outer_test, optimize=('greedy', 0)) + self.assert_path_equal(path, ['einsum_path', (0, 1, 2)]) + + path, path_str = np.einsum_path(*outer_test, optimize=('optimal', 0)) + self.assert_path_equal(path, ['einsum_path', (0, 1, 2)]) + + long_test = self.build_operands('acdf,jbje,gihb,hfac') + path, path_str = np.einsum_path(*long_test, optimize=('greedy', 0)) + self.assert_path_equal(path, ['einsum_path', (0, 1, 2, 3)]) + + path, path_str = np.einsum_path(*long_test, optimize=('optimal', 0)) + self.assert_path_equal(path, ['einsum_path', (0, 1, 2, 3)]) + + def test_long_paths(self): + # Long complex cases + + # Long test 1 + long_test1 = self.build_operands('acdf,jbje,gihb,hfac,gfac,gifabc,hfac') + path, path_str = np.einsum_path(*long_test1, optimize='greedy') + self.assert_path_equal(path, ['einsum_path', + (3, 6), (3, 4), (2, 4), (2, 3), (0, 2), (0, 1)]) + + path, path_str = np.einsum_path(*long_test1, optimize='optimal') + self.assert_path_equal(path, ['einsum_path', + (3, 6), (3, 4), (2, 4), (2, 3), (0, 2), (0, 1)]) + + # Long test 2 + long_test2 = self.build_operands('chd,bde,agbc,hiad,bdi,cgh,agdb') + path, path_str = np.einsum_path(*long_test2, optimize='greedy') + self.assert_path_equal(path, ['einsum_path', + (3, 4), (0, 3), (3, 4), (1, 3), (1, 2), (0, 1)]) + + path, path_str = np.einsum_path(*long_test2, optimize='optimal') + self.assert_path_equal(path, ['einsum_path', + (0, 5), (1, 4), (3, 4), (1, 3), (1, 2), (0, 1)]) + + def test_edge_paths(self): + # Difficult edge cases + + # Edge test1 + edge_test1 = self.build_operands('eb,cb,fb->cef') + path, path_str = np.einsum_path(*edge_test1, optimize='greedy') + self.assert_path_equal(path, ['einsum_path', (0, 2), (0, 1)]) + + path, path_str = np.einsum_path(*edge_test1, optimize='optimal') + self.assert_path_equal(path, ['einsum_path', (0, 2), (0, 1)]) + + # Edge test2 + edge_test2 = self.build_operands('dd,fb,be,cdb->cef') + path, path_str = np.einsum_path(*edge_test2, optimize='greedy') + self.assert_path_equal(path, ['einsum_path', (0, 3), (0, 1), (0, 1)]) + + path, path_str = np.einsum_path(*edge_test2, optimize='optimal') + self.assert_path_equal(path, ['einsum_path', (0, 3), (0, 1), (0, 1)]) + + # Edge test3 + edge_test3 = self.build_operands('bca,cdb,dbf,afc->') + path, path_str = np.einsum_path(*edge_test3, optimize='greedy') + self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 2), (0, 1)]) + + path, path_str = np.einsum_path(*edge_test3, optimize='optimal') + self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 2), (0, 1)]) + + # Edge test4 + edge_test4 = self.build_operands('dcc,fce,ea,dbf->ab') + path, path_str = np.einsum_path(*edge_test4, optimize='greedy') + self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 1), (0, 1)]) + + path, path_str = np.einsum_path(*edge_test4, optimize='optimal') + self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 2), (0, 1)]) + + # Edge test5 + edge_test4 = self.build_operands('a,ac,ab,ad,cd,bd,bc->', + size_dict={"a": 20, "b": 20, "c": 20, "d": 20}) + path, path_str = np.einsum_path(*edge_test4, optimize='greedy') + self.assert_path_equal(path, ['einsum_path', (0, 1), (0, 1, 2, 3, 4, 5)]) + + path, path_str = np.einsum_path(*edge_test4, optimize='optimal') + self.assert_path_equal(path, ['einsum_path', (0, 1), (0, 1, 2, 3, 4, 5)]) + + def test_path_type_input(self): + # Test explicit path handling + path_test = self.build_operands('dcc,fce,ea,dbf->ab') + + path, path_str = np.einsum_path(*path_test, optimize=False) + self.assert_path_equal(path, ['einsum_path', (0, 1, 2, 3)]) + + path, path_str = np.einsum_path(*path_test, optimize=True) + self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 1), (0, 1)]) + + exp_path = ['einsum_path', (0, 2), (0, 2), (0, 1)] + path, path_str = np.einsum_path(*path_test, optimize=exp_path) + self.assert_path_equal(path, exp_path) + + # Double check einsum works on the input path + noopt = np.einsum(*path_test, optimize=False) + opt = np.einsum(*path_test, optimize=exp_path) + assert_almost_equal(noopt, opt) + + def test_path_type_input_internal_trace(self): + # gh-20962 + path_test = self.build_operands('cab,cdd->ab') + exp_path = ['einsum_path', (1,), (0, 1)] + + path, path_str = np.einsum_path(*path_test, optimize=exp_path) + self.assert_path_equal(path, exp_path) + + # Double check einsum works on the input path + noopt = np.einsum(*path_test, optimize=False) + opt = np.einsum(*path_test, optimize=exp_path) + assert_almost_equal(noopt, opt) + + def test_path_type_input_invalid(self): + path_test = self.build_operands('ab,bc,cd,de->ae') + exp_path = ['einsum_path', (2, 3), (0, 1)] + assert_raises(RuntimeError, np.einsum, *path_test, optimize=exp_path) + assert_raises( + RuntimeError, np.einsum_path, *path_test, optimize=exp_path) + + path_test = self.build_operands('a,a,a->a') + exp_path = ['einsum_path', (1,), (0, 1)] + assert_raises(RuntimeError, np.einsum, *path_test, optimize=exp_path) + assert_raises( + RuntimeError, np.einsum_path, *path_test, optimize=exp_path) + + def test_spaces(self): + # gh-10794 + arr = np.array([[1]]) + for sp in itertools.product(['', ' '], repeat=4): + # no error for any spacing + np.einsum('{}...a{}->{}...a{}'.format(*sp), arr) + +def test_overlap(): + a = np.arange(9, dtype=int).reshape(3, 3) + b = np.arange(9, dtype=int).reshape(3, 3) + d = np.dot(a, b) + # sanity check + c = np.einsum('ij,jk->ik', a, b) + assert_equal(c, d) + # gh-10080, out overlaps one of the operands + c = np.einsum('ij,jk->ik', a, b, out=b) + assert_equal(c, d) + +def test_einsum_chunking_precision(): + """Most einsum operations are reductions and until NumPy 2.3 reductions + never (or almost never?) used the `GROWINNER` mechanism to increase the + inner loop size when no buffers are needed. + Because einsum reductions work roughly: + + def inner(*inputs, out): + accumulate = 0 + for vals in zip(*inputs): + accumulate += prod(vals) + out[0] += accumulate + + Calling the inner-loop more often actually improves accuracy slightly + (same effect as pairwise summation but much less). + Without adding pairwise summation to the inner-loop it seems best to just + not use GROWINNER, a quick tests suggest that is maybe 1% slowdown for + the simplest `einsum("i,i->i", x, x)` case. + + (It is not clear that we should guarantee precision to this extend.) + """ + num = 1_000_000 + value = 1. + np.finfo(np.float64).eps * 8196 + res = np.einsum("i->", np.broadcast_to(np.array(value), num)) / num + + # At with GROWINNER 11 decimals succeed (larger will be less) + assert_almost_equal(res, value, decimal=15) diff --git a/local_packages/numpy/_core/tests/test_errstate.py b/local_packages/numpy/_core/tests/test_errstate.py new file mode 100644 index 0000000..f0735a0 --- /dev/null +++ b/local_packages/numpy/_core/tests/test_errstate.py @@ -0,0 +1,131 @@ +import sysconfig + +import pytest + +import numpy as np +from numpy.testing import IS_WASM, assert_raises + +# The floating point emulation on ARM EABI systems lacking a hardware FPU is +# known to be buggy. This is an attempt to identify these hosts. It may not +# catch all possible cases, but it catches the known cases of gh-413 and +# gh-15562. +hosttype = sysconfig.get_config_var('HOST_GNU_TYPE') +arm_softfloat = False if hosttype is None else hosttype.endswith('gnueabi') + +class TestErrstate: + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + @pytest.mark.skipif(arm_softfloat, + reason='platform/cpu issue with FPU (gh-413,-15562)') + def test_invalid(self): + with np.errstate(all='raise', under='ignore'): + a = -np.arange(3) + # This should work + with np.errstate(invalid='ignore'): + np.sqrt(a) + # While this should fail! + with assert_raises(FloatingPointError): + np.sqrt(a) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + @pytest.mark.skipif(arm_softfloat, + reason='platform/cpu issue with FPU (gh-15562)') + def test_divide(self): + with np.errstate(all='raise', under='ignore'): + a = -np.arange(3) + # This should work + with np.errstate(divide='ignore'): + a // 0 + # While this should fail! + with assert_raises(FloatingPointError): + a // 0 + # As should this, see gh-15562 + with assert_raises(FloatingPointError): + a // a + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + @pytest.mark.skipif(arm_softfloat, + reason='platform/cpu issue with FPU (gh-15562)') + def test_errcall(self): + count = 0 + + def foo(*args): + nonlocal count + count += 1 + + olderrcall = np.geterrcall() + with np.errstate(call=foo): + assert np.geterrcall() is foo + with np.errstate(call=None): + assert np.geterrcall() is None + assert np.geterrcall() is olderrcall + assert count == 0 + + with np.errstate(call=foo, invalid="call"): + np.array(np.inf) - np.array(np.inf) + + assert count == 1 + + def test_errstate_decorator(self): + @np.errstate(all='ignore') + def foo(): + a = -np.arange(3) + a // 0 + + foo() + + def test_errstate_enter_once(self): + errstate = np.errstate(invalid="warn") + with errstate: + pass + + # The errstate context cannot be entered twice as that would not be + # thread-safe + with pytest.raises(TypeError, + match="Cannot enter `np.errstate` twice"): + with errstate: + pass + + @pytest.mark.skipif(IS_WASM, reason="wasm doesn't support asyncio") + def test_asyncio_safe(self): + # asyncio may not always work, let's assume its fine if missing + # Pyodide/wasm doesn't support it. If this test makes problems, + # it should just be skipped liberally (or run differently). + asyncio = pytest.importorskip("asyncio") + + @np.errstate(invalid="ignore") + def decorated(): + # Decorated non-async function (it is not safe to decorate an + # async one) + assert np.geterr()["invalid"] == "ignore" + + async def func1(): + decorated() + await asyncio.sleep(0.1) + decorated() + + async def func2(): + with np.errstate(invalid="raise"): + assert np.geterr()["invalid"] == "raise" + await asyncio.sleep(0.125) + assert np.geterr()["invalid"] == "raise" + + # for good sport, a third one with yet another state: + async def func3(): + with np.errstate(invalid="print"): + assert np.geterr()["invalid"] == "print" + await asyncio.sleep(0.11) + assert np.geterr()["invalid"] == "print" + + async def main(): + # simply run all three function multiple times: + await asyncio.gather( + func1(), func2(), func3(), func1(), func2(), func3(), + func1(), func2(), func3(), func1(), func2(), func3()) + + loop = asyncio.new_event_loop() + with np.errstate(invalid="warn"): + asyncio.run(main()) + assert np.geterr()["invalid"] == "warn" + + assert np.geterr()["invalid"] == "warn" # the default + loop.close() diff --git a/local_packages/numpy/_core/tests/test_extint128.py b/local_packages/numpy/_core/tests/test_extint128.py new file mode 100644 index 0000000..6e4d74b --- /dev/null +++ b/local_packages/numpy/_core/tests/test_extint128.py @@ -0,0 +1,217 @@ +import contextlib +import itertools +import operator + +import pytest + +import numpy as np +import numpy._core._multiarray_tests as mt +from numpy.testing import assert_equal, assert_raises + +INT64_MAX = np.iinfo(np.int64).max +INT64_MIN = np.iinfo(np.int64).min +INT64_MID = 2**32 + +# int128 is not two's complement, the sign bit is separate +INT128_MAX = 2**128 - 1 +INT128_MIN = -INT128_MAX +INT128_MID = 2**64 + +INT64_VALUES = ( + [INT64_MIN + j for j in range(20)] + + [INT64_MAX - j for j in range(20)] + + [INT64_MID + j for j in range(-20, 20)] + + [2 * INT64_MID + j for j in range(-20, 20)] + + [INT64_MID // 2 + j for j in range(-20, 20)] + + list(range(-70, 70)) +) + +INT128_VALUES = ( + [INT128_MIN + j for j in range(20)] + + [INT128_MAX - j for j in range(20)] + + [INT128_MID + j for j in range(-20, 20)] + + [2 * INT128_MID + j for j in range(-20, 20)] + + [INT128_MID // 2 + j for j in range(-20, 20)] + + list(range(-70, 70)) + + [False] # negative zero +) + +INT64_POS_VALUES = [x for x in INT64_VALUES if x > 0] + + +@contextlib.contextmanager +def exc_iter(*args): + """ + Iterate over Cartesian product of *args, and if an exception is raised, + add information of the current iterate. + """ + + value = [None] + + def iterate(): + for v in itertools.product(*args): + value[0] = v + yield v + + try: + yield iterate() + except Exception: + import traceback + msg = f"At: {repr(value[0])!r}\n{traceback.format_exc()}" + raise AssertionError(msg) + + +def test_safe_binop(): + # Test checked arithmetic routines + + ops = [ + (operator.add, 1), + (operator.sub, 2), + (operator.mul, 3) + ] + + with exc_iter(ops, INT64_VALUES, INT64_VALUES) as it: + for xop, a, b in it: + pyop, op = xop + c = pyop(a, b) + + if not (INT64_MIN <= c <= INT64_MAX): + assert_raises(OverflowError, mt.extint_safe_binop, a, b, op) + else: + d = mt.extint_safe_binop(a, b, op) + if c != d: + # assert_equal is slow + assert_equal(d, c) + + +def test_to_128(): + with exc_iter(INT64_VALUES) as it: + for a, in it: + b = mt.extint_to_128(a) + if a != b: + assert_equal(b, a) + + +def test_to_64(): + with exc_iter(INT128_VALUES) as it: + for a, in it: + if not (INT64_MIN <= a <= INT64_MAX): + assert_raises(OverflowError, mt.extint_to_64, a) + else: + b = mt.extint_to_64(a) + if a != b: + assert_equal(b, a) + + +def test_mul_64_64(): + with exc_iter(INT64_VALUES, INT64_VALUES) as it: + for a, b in it: + c = a * b + d = mt.extint_mul_64_64(a, b) + if c != d: + assert_equal(d, c) + + +def test_add_128(): + with exc_iter(INT128_VALUES, INT128_VALUES) as it: + for a, b in it: + c = a + b + if not (INT128_MIN <= c <= INT128_MAX): + assert_raises(OverflowError, mt.extint_add_128, a, b) + else: + d = mt.extint_add_128(a, b) + if c != d: + assert_equal(d, c) + + +def test_sub_128(): + with exc_iter(INT128_VALUES, INT128_VALUES) as it: + for a, b in it: + c = a - b + if not (INT128_MIN <= c <= INT128_MAX): + assert_raises(OverflowError, mt.extint_sub_128, a, b) + else: + d = mt.extint_sub_128(a, b) + if c != d: + assert_equal(d, c) + + +def test_neg_128(): + with exc_iter(INT128_VALUES) as it: + for a, in it: + b = -a + c = mt.extint_neg_128(a) + if b != c: + assert_equal(c, b) + + +def test_shl_128(): + with exc_iter(INT128_VALUES) as it: + for a, in it: + if a < 0: + b = -(((-a) << 1) & (2**128 - 1)) + else: + b = (a << 1) & (2**128 - 1) + c = mt.extint_shl_128(a) + if b != c: + assert_equal(c, b) + + +def test_shr_128(): + with exc_iter(INT128_VALUES) as it: + for a, in it: + if a < 0: + b = -((-a) >> 1) + else: + b = a >> 1 + c = mt.extint_shr_128(a) + if b != c: + assert_equal(c, b) + + +def test_gt_128(): + with exc_iter(INT128_VALUES, INT128_VALUES) as it: + for a, b in it: + c = a > b + d = mt.extint_gt_128(a, b) + if c != d: + assert_equal(d, c) + + +@pytest.mark.slow +def test_divmod_128_64(): + with exc_iter(INT128_VALUES, INT64_POS_VALUES) as it: + for a, b in it: + if a >= 0: + c, cr = divmod(a, b) + else: + c, cr = divmod(-a, b) + c = -c + cr = -cr + + d, dr = mt.extint_divmod_128_64(a, b) + + if c != d or d != dr or b * d + dr != a: + assert_equal(d, c) + assert_equal(dr, cr) + assert_equal(b * d + dr, a) + + +def test_floordiv_128_64(): + with exc_iter(INT128_VALUES, INT64_POS_VALUES) as it: + for a, b in it: + c = a // b + d = mt.extint_floordiv_128_64(a, b) + + if c != d: + assert_equal(d, c) + + +def test_ceildiv_128_64(): + with exc_iter(INT128_VALUES, INT64_POS_VALUES) as it: + for a, b in it: + c = (a + b - 1) // b + d = mt.extint_ceildiv_128_64(a, b) + + if c != d: + assert_equal(d, c) diff --git a/local_packages/numpy/_core/tests/test_finfo.py b/local_packages/numpy/_core/tests/test_finfo.py new file mode 100644 index 0000000..5703b8d --- /dev/null +++ b/local_packages/numpy/_core/tests/test_finfo.py @@ -0,0 +1,86 @@ +import pytest + +import numpy as np +from numpy import exp2, log10 +from numpy._core import numerictypes as ntypes + + +class MachArLike: + """Minimal class to simulate machine arithmetic parameters.""" + def __init__(self, dtype, machep, negep, minexp, maxexp, nmant, iexp): + self.dtype = dtype + self.machep = machep + self.negep = negep + self.minexp = minexp + self.maxexp = maxexp + self.nmant = nmant + self.iexp = iexp + self.eps = exp2(dtype(-nmant)) + self.epsneg = exp2(dtype(negep)) + self.precision = int(-log10(self.eps)) + self.resolution = dtype(10) ** (-self.precision) + + +@pytest.fixture +def float16_ma(): + """Machine arithmetic parameters for float16.""" + f16 = ntypes.float16 + return MachArLike(f16, + machep=-10, + negep=-11, + minexp=-14, + maxexp=16, + nmant=10, + iexp=5) + + +@pytest.fixture +def float32_ma(): + """Machine arithmetic parameters for float32.""" + f32 = ntypes.float32 + return MachArLike(f32, + machep=-23, + negep=-24, + minexp=-126, + maxexp=128, + nmant=23, + iexp=8) + + +@pytest.fixture +def float64_ma(): + """Machine arithmetic parameters for float64.""" + f64 = ntypes.float64 + return MachArLike(f64, + machep=-52, + negep=-53, + minexp=-1022, + maxexp=1024, + nmant=52, + iexp=11) + + +@pytest.mark.parametrize("dtype,ma_fixture", [ + (np.half, "float16_ma"), + (np.float32, "float32_ma"), + (np.float64, "float64_ma"), +]) +@pytest.mark.parametrize("prop", [ + 'machep', 'negep', 'minexp', 'maxexp', 'nmant', 'iexp', + 'eps', 'epsneg', 'precision', 'resolution' +]) +@pytest.mark.thread_unsafe( + reason="complex fixture setup is thread-unsafe (pytest-dev/pytest#13768.)" +) +def test_finfo_properties(dtype, ma_fixture, prop, request): + """Test that finfo properties match expected machine arithmetic values.""" + ma = request.getfixturevalue(ma_fixture) + finfo = np.finfo(dtype) + + actual = getattr(finfo, prop) + expected = getattr(ma, prop) + + assert actual == expected, ( + f"finfo({dtype}) property '{prop}' mismatch: " + f"expected {expected}, got {actual}" + ) diff --git a/local_packages/numpy/_core/tests/test_function_base.py b/local_packages/numpy/_core/tests/test_function_base.py new file mode 100644 index 0000000..c6e1039 --- /dev/null +++ b/local_packages/numpy/_core/tests/test_function_base.py @@ -0,0 +1,504 @@ +import platform +import sys + +import pytest + +import numpy as np +from numpy import ( + arange, + array, + dtype, + errstate, + geomspace, + isnan, + linspace, + logspace, + ndarray, + nextafter, + sqrt, + stack, +) +from numpy._core import sctypes +from numpy._core.function_base import add_newdoc +from numpy.testing import ( + IS_PYPY, + assert_, + assert_allclose, + assert_array_equal, + assert_equal, + assert_raises, +) + + +def _is_armhf(): + # Check if the current platform is ARMHF (32-bit ARM architecture) + architecture = platform.architecture() + return platform.machine().startswith('arm') and architecture[0] == '32bit' + +class PhysicalQuantity(float): + def __new__(cls, value): + return float.__new__(cls, value) + + def __add__(self, x): + assert_(isinstance(x, PhysicalQuantity)) + return PhysicalQuantity(float(x) + float(self)) + __radd__ = __add__ + + def __sub__(self, x): + assert_(isinstance(x, PhysicalQuantity)) + return PhysicalQuantity(float(self) - float(x)) + + def __rsub__(self, x): + assert_(isinstance(x, PhysicalQuantity)) + return PhysicalQuantity(float(x) - float(self)) + + def __mul__(self, x): + return PhysicalQuantity(float(x) * float(self)) + __rmul__ = __mul__ + + def __truediv__(self, x): + return PhysicalQuantity(float(self) / float(x)) + + def __rtruediv__(self, x): + return PhysicalQuantity(float(x) / float(self)) + + +class PhysicalQuantity2(ndarray): + __array_priority__ = 10 + + +class TestLogspace: + + def test_basic(self): + y = logspace(0, 6) + assert_(len(y) == 50) + y = logspace(0, 6, num=100) + assert_(y[-1] == 10 ** 6) + y = logspace(0, 6, endpoint=False) + assert_(y[-1] < 10 ** 6) + y = logspace(0, 6, num=7) + assert_array_equal(y, [1, 10, 100, 1e3, 1e4, 1e5, 1e6]) + + def test_start_stop_array(self): + start = array([0., 1.]) + stop = array([6., 7.]) + t1 = logspace(start, stop, 6) + t2 = stack([logspace(_start, _stop, 6) + for _start, _stop in zip(start, stop)], axis=1) + assert_equal(t1, t2) + t3 = logspace(start, stop[0], 6) + t4 = stack([logspace(_start, stop[0], 6) + for _start in start], axis=1) + assert_equal(t3, t4) + t5 = logspace(start, stop, 6, axis=-1) + assert_equal(t5, t2.T) + + @pytest.mark.parametrize("axis", [0, 1, -1]) + def test_base_array(self, axis: int): + start = 1 + stop = 2 + num = 6 + base = array([1, 2]) + t1 = logspace(start, stop, num=num, base=base, axis=axis) + t2 = stack( + [logspace(start, stop, num=num, base=_base) for _base in base], + axis=(axis + 1) % t1.ndim, + ) + assert_equal(t1, t2) + + @pytest.mark.parametrize("axis", [0, 1, -1]) + def test_stop_base_array(self, axis: int): + start = 1 + stop = array([2, 3]) + num = 6 + base = array([1, 2]) + t1 = logspace(start, stop, num=num, base=base, axis=axis) + t2 = stack( + [logspace(start, _stop, num=num, base=_base) + for _stop, _base in zip(stop, base)], + axis=(axis + 1) % t1.ndim, + ) + assert_equal(t1, t2) + + def test_dtype(self): + y = logspace(0, 6, dtype='float32') + assert_equal(y.dtype, dtype('float32')) + y = logspace(0, 6, dtype='float64') + assert_equal(y.dtype, dtype('float64')) + y = logspace(0, 6, dtype='int32') + assert_equal(y.dtype, dtype('int32')) + + def test_physical_quantities(self): + a = PhysicalQuantity(1.0) + b = PhysicalQuantity(5.0) + assert_equal(logspace(a, b), logspace(1.0, 5.0)) + + def test_subclass(self): + a = array(1).view(PhysicalQuantity2) + b = array(7).view(PhysicalQuantity2) + ls = logspace(a, b) + assert type(ls) is PhysicalQuantity2 + assert_equal(ls, logspace(1.0, 7.0)) + ls = logspace(a, b, 1) + assert type(ls) is PhysicalQuantity2 + assert_equal(ls, logspace(1.0, 7.0, 1)) + + +class TestGeomspace: + + def test_basic(self): + y = geomspace(1, 1e6) + assert_(len(y) == 50) + y = geomspace(1, 1e6, num=100) + assert_(y[-1] == 10 ** 6) + y = geomspace(1, 1e6, endpoint=False) + assert_(y[-1] < 10 ** 6) + y = geomspace(1, 1e6, num=7) + assert_array_equal(y, [1, 10, 100, 1e3, 1e4, 1e5, 1e6]) + + y = geomspace(8, 2, num=3) + assert_allclose(y, [8, 4, 2]) + assert_array_equal(y.imag, 0) + + y = geomspace(-1, -100, num=3) + assert_array_equal(y, [-1, -10, -100]) + assert_array_equal(y.imag, 0) + + y = geomspace(-100, -1, num=3) + assert_array_equal(y, [-100, -10, -1]) + assert_array_equal(y.imag, 0) + + def test_boundaries_match_start_and_stop_exactly(self): + # make sure that the boundaries of the returned array exactly + # equal 'start' and 'stop' - this isn't obvious because + # np.exp(np.log(x)) isn't necessarily exactly equal to x + start = 0.3 + stop = 20.3 + + y = geomspace(start, stop, num=1) + assert_equal(y[0], start) + + y = geomspace(start, stop, num=1, endpoint=False) + assert_equal(y[0], start) + + y = geomspace(start, stop, num=3) + assert_equal(y[0], start) + assert_equal(y[-1], stop) + + y = geomspace(start, stop, num=3, endpoint=False) + assert_equal(y[0], start) + + def test_nan_interior(self): + with errstate(invalid='ignore'): + y = geomspace(-3, 3, num=4) + + assert_equal(y[0], -3.0) + assert_(isnan(y[1:-1]).all()) + assert_equal(y[3], 3.0) + + with errstate(invalid='ignore'): + y = geomspace(-3, 3, num=4, endpoint=False) + + assert_equal(y[0], -3.0) + assert_(isnan(y[1:]).all()) + + def test_complex(self): + # Purely imaginary + y = geomspace(1j, 16j, num=5) + assert_allclose(y, [1j, 2j, 4j, 8j, 16j]) + assert_array_equal(y.real, 0) + + y = geomspace(-4j, -324j, num=5) + assert_allclose(y, [-4j, -12j, -36j, -108j, -324j]) + assert_array_equal(y.real, 0) + + y = geomspace(1 + 1j, 1000 + 1000j, num=4) + assert_allclose(y, [1 + 1j, 10 + 10j, 100 + 100j, 1000 + 1000j]) + + y = geomspace(-1 + 1j, -1000 + 1000j, num=4) + assert_allclose(y, [-1 + 1j, -10 + 10j, -100 + 100j, -1000 + 1000j]) + + # Logarithmic spirals + y = geomspace(-1, 1, num=3, dtype=complex) + assert_allclose(y, [-1, 1j, +1]) + + y = geomspace(0 + 3j, -3 + 0j, 3) + assert_allclose(y, [0 + 3j, -3 / sqrt(2) + 3j / sqrt(2), -3 + 0j]) + y = geomspace(0 + 3j, 3 + 0j, 3) + assert_allclose(y, [0 + 3j, 3 / sqrt(2) + 3j / sqrt(2), 3 + 0j]) + y = geomspace(-3 + 0j, 0 - 3j, 3) + assert_allclose(y, [-3 + 0j, -3 / sqrt(2) - 3j / sqrt(2), 0 - 3j]) + y = geomspace(0 + 3j, -3 + 0j, 3) + assert_allclose(y, [0 + 3j, -3 / sqrt(2) + 3j / sqrt(2), -3 + 0j]) + y = geomspace(-2 - 3j, 5 + 7j, 7) + assert_allclose(y, [-2 - 3j, -0.29058977 - 4.15771027j, + 2.08885354 - 4.34146838j, 4.58345529 - 3.16355218j, + 6.41401745 - 0.55233457j, 6.75707386 + 3.11795092j, + 5 + 7j]) + + # Type promotion should prevent the -5 from becoming a NaN + y = geomspace(3j, -5, 2) + assert_allclose(y, [3j, -5]) + y = geomspace(-5, 3j, 2) + assert_allclose(y, [-5, 3j]) + + def test_complex_shortest_path(self): + # test the shortest logarithmic spiral is used, see gh-25644 + x = 1.2 + 3.4j + y = np.exp(1j * (np.pi - .1)) * x + z = np.geomspace(x, y, 5) + expected = np.array([1.2 + 3.4j, -1.47384 + 3.2905616j, + -3.33577588 + 1.36842949j, -3.36011056 - 1.30753855j, + -1.53343861 - 3.26321406j]) + np.testing.assert_array_almost_equal(z, expected) + + def test_dtype(self): + y = geomspace(1, 1e6, dtype='float32') + assert_equal(y.dtype, dtype('float32')) + y = geomspace(1, 1e6, dtype='float64') + assert_equal(y.dtype, dtype('float64')) + y = geomspace(1, 1e6, dtype='int32') + assert_equal(y.dtype, dtype('int32')) + + # Native types + y = geomspace(1, 1e6, dtype=float) + assert_equal(y.dtype, dtype('float64')) + y = geomspace(1, 1e6, dtype=complex) + assert_equal(y.dtype, dtype('complex128')) + + def test_start_stop_array_scalar(self): + lim1 = array([120, 100], dtype="int8") + lim2 = array([-120, -100], dtype="int8") + lim3 = array([1200, 1000], dtype="uint16") + t1 = geomspace(lim1[0], lim1[1], 5) + t2 = geomspace(lim2[0], lim2[1], 5) + t3 = geomspace(lim3[0], lim3[1], 5) + t4 = geomspace(120.0, 100.0, 5) + t5 = geomspace(-120.0, -100.0, 5) + t6 = geomspace(1200.0, 1000.0, 5) + + # t3 uses float32, t6 uses float64 + assert_allclose(t1, t4, rtol=1e-2) + assert_allclose(t2, t5, rtol=1e-2) + assert_allclose(t3, t6, rtol=1e-5) + + def test_start_stop_array(self): + # Try to use all special cases. + start = array([1.e0, 32., 1j, -4j, 1 + 1j, -1]) + stop = array([1.e4, 2., 16j, -324j, 10000 + 10000j, 1]) + t1 = geomspace(start, stop, 5) + t2 = stack([geomspace(_start, _stop, 5) + for _start, _stop in zip(start, stop)], axis=1) + assert_equal(t1, t2) + t3 = geomspace(start, stop[0], 5) + t4 = stack([geomspace(_start, stop[0], 5) + for _start in start], axis=1) + assert_equal(t3, t4) + t5 = geomspace(start, stop, 5, axis=-1) + assert_equal(t5, t2.T) + + def test_physical_quantities(self): + a = PhysicalQuantity(1.0) + b = PhysicalQuantity(5.0) + assert_equal(geomspace(a, b), geomspace(1.0, 5.0)) + + def test_subclass(self): + a = array(1).view(PhysicalQuantity2) + b = array(7).view(PhysicalQuantity2) + gs = geomspace(a, b) + assert type(gs) is PhysicalQuantity2 + assert_equal(gs, geomspace(1.0, 7.0)) + gs = geomspace(a, b, 1) + assert type(gs) is PhysicalQuantity2 + assert_equal(gs, geomspace(1.0, 7.0, 1)) + + def test_bounds(self): + assert_raises(ValueError, geomspace, 0, 10) + assert_raises(ValueError, geomspace, 10, 0) + assert_raises(ValueError, geomspace, 0, 0) + + +class TestLinspace: + + def test_basic(self): + y = linspace(0, 10) + assert_(len(y) == 50) + y = linspace(2, 10, num=100) + assert_(y[-1] == 10) + y = linspace(2, 10, endpoint=False) + assert_(y[-1] < 10) + assert_raises(ValueError, linspace, 0, 10, num=-1) + + def test_corner(self): + y = list(linspace(0, 1, 1)) + assert_(y == [0.0], y) + assert_raises(TypeError, linspace, 0, 1, num=2.5) + + def test_type(self): + t1 = linspace(0, 1, 0).dtype + t2 = linspace(0, 1, 1).dtype + t3 = linspace(0, 1, 2).dtype + assert_equal(t1, t2) + assert_equal(t2, t3) + + def test_dtype(self): + y = linspace(0, 6, dtype='float32') + assert_equal(y.dtype, dtype('float32')) + y = linspace(0, 6, dtype='float64') + assert_equal(y.dtype, dtype('float64')) + y = linspace(0, 6, dtype='int32') + assert_equal(y.dtype, dtype('int32')) + + def test_start_stop_array_scalar(self): + lim1 = array([-120, 100], dtype="int8") + lim2 = array([120, -100], dtype="int8") + lim3 = array([1200, 1000], dtype="uint16") + t1 = linspace(lim1[0], lim1[1], 5) + t2 = linspace(lim2[0], lim2[1], 5) + t3 = linspace(lim3[0], lim3[1], 5) + t4 = linspace(-120.0, 100.0, 5) + t5 = linspace(120.0, -100.0, 5) + t6 = linspace(1200.0, 1000.0, 5) + assert_equal(t1, t4) + assert_equal(t2, t5) + assert_equal(t3, t6) + + def test_start_stop_array(self): + start = array([-120, 120], dtype="int8") + stop = array([100, -100], dtype="int8") + t1 = linspace(start, stop, 5) + t2 = stack([linspace(_start, _stop, 5) + for _start, _stop in zip(start, stop)], axis=1) + assert_equal(t1, t2) + t3 = linspace(start, stop[0], 5) + t4 = stack([linspace(_start, stop[0], 5) + for _start in start], axis=1) + assert_equal(t3, t4) + t5 = linspace(start, stop, 5, axis=-1) + assert_equal(t5, t2.T) + + def test_complex(self): + lim1 = linspace(1 + 2j, 3 + 4j, 5) + t1 = array([1.0 + 2.j, 1.5 + 2.5j, 2.0 + 3j, 2.5 + 3.5j, 3.0 + 4j]) + lim2 = linspace(1j, 10, 5) + t2 = array([0.0 + 1.j, 2.5 + 0.75j, 5.0 + 0.5j, 7.5 + 0.25j, 10.0 + 0j]) + assert_equal(lim1, t1) + assert_equal(lim2, t2) + + def test_physical_quantities(self): + a = PhysicalQuantity(0.0) + b = PhysicalQuantity(1.0) + assert_equal(linspace(a, b), linspace(0.0, 1.0)) + + def test_subclass(self): + a = array(0).view(PhysicalQuantity2) + b = array(1).view(PhysicalQuantity2) + ls = linspace(a, b) + assert type(ls) is PhysicalQuantity2 + assert_equal(ls, linspace(0.0, 1.0)) + ls = linspace(a, b, 1) + assert type(ls) is PhysicalQuantity2 + assert_equal(ls, linspace(0.0, 1.0, 1)) + + def test_array_interface(self): + # Regression test for https://github.com/numpy/numpy/pull/6659 + # Ensure that start/stop can be objects that implement + # __array_interface__ and are convertible to numeric scalars + + class Arrayish: + """ + A generic object that supports the __array_interface__ and hence + can in principle be converted to a numeric scalar, but is not + otherwise recognized as numeric, but also happens to support + multiplication by floats. + + Data should be an object that implements the buffer interface, + and contains at least 4 bytes. + """ + + def __init__(self, data): + self._data = data + + @property + def __array_interface__(self): + return {'shape': (), 'typestr': ' 250) + assert_(len(np.lib._index_tricks_impl.mgrid.__doc__) > 300) + + @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") + def test_errors_are_ignored(self): + prev_doc = np._core.flatiter.index.__doc__ + # nothing changed, but error ignored, this should probably + # give a warning (or even error) in the future. + add_newdoc("numpy._core", "flatiter", ("index", "bad docstring")) + assert prev_doc == np._core.flatiter.index.__doc__ diff --git a/local_packages/numpy/_core/tests/test_getlimits.py b/local_packages/numpy/_core/tests/test_getlimits.py new file mode 100644 index 0000000..4e911b8 --- /dev/null +++ b/local_packages/numpy/_core/tests/test_getlimits.py @@ -0,0 +1,171 @@ +""" Test functions for limits module. + +""" +import types +import warnings + +import pytest + +import numpy as np +from numpy import double, half, longdouble, single +from numpy._core import finfo, iinfo +from numpy.testing import assert_, assert_equal, assert_raises + +################################################## + +class TestPythonFloat: + def test_singleton(self): + ftype = finfo(float) + ftype2 = finfo(float) + assert_equal(id(ftype), id(ftype2)) + +class TestHalf: + def test_singleton(self): + ftype = finfo(half) + ftype2 = finfo(half) + assert_equal(id(ftype), id(ftype2)) + +class TestSingle: + def test_singleton(self): + ftype = finfo(single) + ftype2 = finfo(single) + assert_equal(id(ftype), id(ftype2)) + +class TestDouble: + def test_singleton(self): + ftype = finfo(double) + ftype2 = finfo(double) + assert_equal(id(ftype), id(ftype2)) + +class TestLongdouble: + def test_singleton(self): + ftype = finfo(longdouble) + ftype2 = finfo(longdouble) + assert_equal(id(ftype), id(ftype2)) + +def assert_finfo_equal(f1, f2): + # assert two finfo instances have the same attributes + for attr in ('bits', 'eps', 'epsneg', 'iexp', 'machep', + 'max', 'maxexp', 'min', 'minexp', 'negep', 'nexp', + 'nmant', 'precision', 'resolution', 'tiny', + 'smallest_normal', 'smallest_subnormal'): + assert_equal(getattr(f1, attr), getattr(f2, attr), + f'finfo instances {f1} and {f2} differ on {attr}') + +def assert_iinfo_equal(i1, i2): + # assert two iinfo instances have the same attributes + for attr in ('bits', 'min', 'max'): + assert_equal(getattr(i1, attr), getattr(i2, attr), + f'iinfo instances {i1} and {i2} differ on {attr}') + +class TestFinfo: + def test_basic(self): + dts = list(zip(['f2', 'f4', 'f8', 'c8', 'c16'], + [np.float16, np.float32, np.float64, np.complex64, + np.complex128])) + for dt1, dt2 in dts: + assert_finfo_equal(finfo(dt1), finfo(dt2)) + + assert_raises(ValueError, finfo, 'i4') + + def test_regression_gh23108(self): + # np.float32(1.0) and np.float64(1.0) have the same hash and are + # equal under the == operator + f1 = np.finfo(np.float32(1.0)) + f2 = np.finfo(np.float64(1.0)) + assert f1 != f2 + + def test_regression_gh23867(self): + class NonHashableWithDtype: + __hash__ = None + dtype = np.dtype('float32') + + x = NonHashableWithDtype() + assert np.finfo(x) == np.finfo(x.dtype) + + +class TestIinfo: + def test_basic(self): + dts = list(zip(['i1', 'i2', 'i4', 'i8', + 'u1', 'u2', 'u4', 'u8'], + [np.int8, np.int16, np.int32, np.int64, + np.uint8, np.uint16, np.uint32, np.uint64])) + for dt1, dt2 in dts: + assert_iinfo_equal(iinfo(dt1), iinfo(dt2)) + + assert_raises(ValueError, iinfo, 'f4') + + def test_unsigned_max(self): + types = np._core.sctypes['uint'] + for T in types: + with np.errstate(over="ignore"): + max_calculated = T(0) - T(1) + assert_equal(iinfo(T).max, max_calculated) + +class TestRepr: + def test_iinfo_repr(self): + expected = "iinfo(min=-32768, max=32767, dtype=int16)" + assert_equal(repr(np.iinfo(np.int16)), expected) + + def test_finfo_repr(self): + expected = "finfo(resolution=1e-06, min=-3.4028235e+38,"\ + " max=3.4028235e+38, dtype=float32)" + assert_equal(repr(np.finfo(np.float32)), expected) + + +def test_instances(): + # Test the finfo and iinfo results on numeric instances agree with + # the results on the corresponding types + + for c in [int, np.int16, np.int32, np.int64]: + class_iinfo = iinfo(c) + instance_iinfo = iinfo(c(12)) + + assert_iinfo_equal(class_iinfo, instance_iinfo) + + for c in [float, np.float16, np.float32, np.float64]: + class_finfo = finfo(c) + instance_finfo = finfo(c(1.2)) + assert_finfo_equal(class_finfo, instance_finfo) + + with pytest.raises(ValueError): + iinfo(10.) + + with pytest.raises(ValueError): + iinfo('hi') + + with pytest.raises(ValueError): + finfo(np.int64(1)) + + +def test_subnormal_warning(): + """Test that the subnormal is zero warning is not being raised.""" + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + # Test for common float types + for dtype in [np.float16, np.float32, np.float64]: + f = finfo(dtype) + _ = f.smallest_subnormal + # Also test longdouble + with np.errstate(all='ignore'): + fld = finfo(np.longdouble) + _ = fld.smallest_subnormal + # Check no warnings were raised + assert len(w) == 0 + + +def test_plausible_finfo(): + # Assert that finfo returns reasonable results for all types + for ftype in np._core.sctypes['float'] + np._core.sctypes['complex']: + info = np.finfo(ftype) + assert_(info.nmant > 1) + assert_(info.minexp < -1) + assert_(info.maxexp > 1) + + +class TestRuntimeSubscriptable: + def test_finfo_generic(self): + assert isinstance(np.finfo[np.float64], types.GenericAlias) + + def test_iinfo_generic(self): + assert isinstance(np.iinfo[np.int_], types.GenericAlias) diff --git a/local_packages/numpy/_core/tests/test_half.py b/local_packages/numpy/_core/tests/test_half.py new file mode 100644 index 0000000..3ced5b4 --- /dev/null +++ b/local_packages/numpy/_core/tests/test_half.py @@ -0,0 +1,593 @@ +import platform + +import pytest + +import numpy as np +from numpy import float16, float32, float64, uint16 +from numpy.testing import IS_WASM, assert_, assert_equal + + +def assert_raises_fpe(strmatch, callable, *args, **kwargs): + try: + callable(*args, **kwargs) + except FloatingPointError as exc: + assert_(str(exc).find(strmatch) >= 0, + f"Did not raise floating point {strmatch} error") + else: + assert_(False, + f"Did not raise floating point {strmatch} error") + +class TestHalf: + def _create_arrays_all(self): + # An array of all possible float16 values + all_f16 = np.arange(0x10000, dtype=uint16) + all_f16 = all_f16.view(float16) + + # NaN value can cause an invalid FP exception if HW is being used + with np.errstate(invalid='ignore'): + all_f32 = np.array(all_f16, dtype=float32) + all_f64 = np.array(all_f16, dtype=float64) + return all_f16, all_f32, all_f64 + + def _create_arrays_nonan(self): + # An array of all non-NaN float16 values, in sorted order + nonan_f16 = np.concatenate( + (np.arange(0xfc00, 0x7fff, -1, dtype=uint16), + np.arange(0x0000, 0x7c01, 1, dtype=uint16))) + nonan_f16 = nonan_f16.view(float16) + nonan_f32 = np.array(nonan_f16, dtype=float32) + nonan_f64 = np.array(nonan_f16, dtype=float64) + return nonan_f16, nonan_f32, nonan_f64 + + def _create_arrays_finite(self): + nonan_f16, nonan_f32, nonan_f64 = self._create_arrays_nonan() + finite_f16 = nonan_f16[1:-1] + finite_f32 = nonan_f32[1:-1] + finite_f64 = nonan_f64[1:-1] + return finite_f16, finite_f32, finite_f64 + + def test_half_conversions(self): + """Checks that all 16-bit values survive conversion + to/from 32-bit and 64-bit float""" + # Because the underlying routines preserve the NaN bits, every + # value is preserved when converting to/from other floats. + all_f16, all_f32, all_f64 = self._create_arrays_all() + nonan_f16, _, _ = self._create_arrays_nonan() + + # Convert from float32 back to float16 + with np.errstate(invalid='ignore'): + b = np.array(all_f32, dtype=float16) + # avoid testing NaNs due to differing bit patterns in Q/S NaNs + b_nn = b == b + assert_equal(all_f16[b_nn].view(dtype=uint16), + b[b_nn].view(dtype=uint16)) + + # Convert from float64 back to float16 + with np.errstate(invalid='ignore'): + b = np.array(all_f64, dtype=float16) + b_nn = b == b + assert_equal(all_f16[b_nn].view(dtype=uint16), + b[b_nn].view(dtype=uint16)) + + # Convert float16 to longdouble and back + # This doesn't necessarily preserve the extra NaN bits, + # so exclude NaNs. + a_ld = np.array(nonan_f16, dtype=np.longdouble) + b = np.array(a_ld, dtype=float16) + assert_equal(nonan_f16.view(dtype=uint16), + b.view(dtype=uint16)) + + # Check the range for which all integers can be represented + i_int = np.arange(-2048, 2049) + i_f16 = np.array(i_int, dtype=float16) + j = np.array(i_f16, dtype=int) + assert_equal(i_int, j) + + @pytest.mark.parametrize("string_dt", ["S", "U"]) + def test_half_conversion_to_string(self, string_dt): + # Currently uses S/U32 (which is sufficient for float32) + expected_dt = np.dtype(f"{string_dt}32") + assert np.promote_types(np.float16, string_dt) == expected_dt + assert np.promote_types(string_dt, np.float16) == expected_dt + + arr = np.ones(3, dtype=np.float16).astype(string_dt) + assert arr.dtype == expected_dt + + @pytest.mark.parametrize("dtype", ["S", "U", object]) + def test_to_half_cast_error(self, dtype): + arr = np.array(["3M"], dtype=dtype) + with pytest.raises(ValueError): + arr.astype(np.float16) + + arr = np.array(["23490349034"], dtype=dtype) + with np.errstate(all="warn"): + with pytest.warns(RuntimeWarning): + arr.astype(np.float16) + + with np.errstate(all="raise"): + with pytest.raises(FloatingPointError): + arr.astype(np.float16) + + @pytest.mark.parametrize("string_dt", ["S", "U"]) + def test_half_conversion_from_string(self, string_dt): + string = np.array("3.1416", dtype=string_dt) + assert string.astype(np.float16) == np.array(3.1416, dtype=np.float16) + + @pytest.mark.parametrize("offset", [None, "up", "down"]) + @pytest.mark.parametrize("shift", [None, "up", "down"]) + @pytest.mark.parametrize("float_t", [np.float32, np.float64]) + def test_half_conversion_rounding(self, float_t, shift, offset): + # Assumes that round to even is used during casting. + max_pattern = np.float16(np.finfo(np.float16).max).view(np.uint16) + + # Test all (positive) finite numbers, denormals are most interesting + # however: + f16s_patterns = np.arange(0, max_pattern + 1, dtype=np.uint16) + f16s_float = f16s_patterns.view(np.float16).astype(float_t) + + # Shift the values by half a bit up or a down (or do not shift), + if shift == "up": + f16s_float = 0.5 * (f16s_float[:-1] + f16s_float[1:])[1:] + elif shift == "down": + f16s_float = 0.5 * (f16s_float[:-1] + f16s_float[1:])[:-1] + else: + f16s_float = f16s_float[1:-1] + + # Increase the float by a minimal value: + if offset == "up": + f16s_float = np.nextafter(f16s_float, float_t(np.inf)) + elif offset == "down": + f16s_float = np.nextafter(f16s_float, float_t(-np.inf)) + + # Convert back to float16 and its bit pattern: + res_patterns = f16s_float.astype(np.float16).view(np.uint16) + + # The above calculation tries the original values, or the exact + # midpoints between the float16 values. It then further offsets them + # by as little as possible. If no offset occurs, "round to even" + # logic will be necessary, an arbitrarily small offset should cause + # normal up/down rounding always. + + # Calculate the expected pattern: + cmp_patterns = f16s_patterns[1:-1].copy() + + if shift == "down" and offset != "up": + shift_pattern = -1 + elif shift == "up" and offset != "down": + shift_pattern = 1 + else: + # There cannot be a shift, either shift is None, so all rounding + # will go back to original, or shift is reduced by offset too much. + shift_pattern = 0 + + # If rounding occurs, is it normal rounding or round to even? + if offset is None: + # Round to even occurs, modify only non-even, cast to allow + (-1) + cmp_patterns[0::2].view(np.int16)[...] += shift_pattern + else: + cmp_patterns.view(np.int16)[...] += shift_pattern + + assert_equal(res_patterns, cmp_patterns) + + @pytest.mark.parametrize(["float_t", "uint_t", "bits"], + [(np.float32, np.uint32, 23), + (np.float64, np.uint64, 52)]) + def test_half_conversion_denormal_round_even(self, float_t, uint_t, bits): + # Test specifically that all bits are considered when deciding + # whether round to even should occur (i.e. no bits are lost at the + # end. Compare also gh-12721. The most bits can get lost for the + # smallest denormal: + smallest_value = np.uint16(1).view(np.float16).astype(float_t) + assert smallest_value == 2**-24 + + # Will be rounded to zero based on round to even rule: + rounded_to_zero = smallest_value / float_t(2) + assert rounded_to_zero.astype(np.float16) == 0 + + # The significand will be all 0 for the float_t, test that we do not + # lose the lower ones of these: + for i in range(bits): + # slightly increasing the value should make it round up: + larger_pattern = rounded_to_zero.view(uint_t) | uint_t(1 << i) + larger_value = larger_pattern.view(float_t) + assert larger_value.astype(np.float16) == smallest_value + + def test_nans_infs(self): + all_f16, all_f32, _ = self._create_arrays_all() + with np.errstate(all='ignore'): + # Check some of the ufuncs + assert_equal(np.isnan(all_f16), np.isnan(all_f32)) + assert_equal(np.isinf(all_f16), np.isinf(all_f32)) + assert_equal(np.isfinite(all_f16), np.isfinite(all_f32)) + assert_equal(np.signbit(all_f16), np.signbit(all_f32)) + assert_equal(np.spacing(float16(65504)), np.inf) + + # Check comparisons of all values with NaN + nan = float16(np.nan) + + assert_(not (all_f16 == nan).any()) + assert_(not (nan == all_f16).any()) + + assert_((all_f16 != nan).all()) + assert_((nan != all_f16).all()) + + assert_(not (all_f16 < nan).any()) + assert_(not (nan < all_f16).any()) + + assert_(not (all_f16 <= nan).any()) + assert_(not (nan <= all_f16).any()) + + assert_(not (all_f16 > nan).any()) + assert_(not (nan > all_f16).any()) + + assert_(not (all_f16 >= nan).any()) + assert_(not (nan >= all_f16).any()) + + def test_half_values(self): + """Confirms a small number of known half values""" + a = np.array([1.0, -1.0, + 2.0, -2.0, + 0.0999755859375, 0.333251953125, # 1/10, 1/3 + 65504, -65504, # Maximum magnitude + 2.0**(-14), -2.0**(-14), # Minimum normal + 2.0**(-24), -2.0**(-24), # Minimum subnormal + 0, -1 / 1e1000, # Signed zeros + np.inf, -np.inf]) + b = np.array([0x3c00, 0xbc00, + 0x4000, 0xc000, + 0x2e66, 0x3555, + 0x7bff, 0xfbff, + 0x0400, 0x8400, + 0x0001, 0x8001, + 0x0000, 0x8000, + 0x7c00, 0xfc00], dtype=uint16) + b = b.view(dtype=float16) + assert_equal(a, b) + + def test_half_rounding(self): + """Checks that rounding when converting to half is correct""" + a = np.array([2.0**-25 + 2.0**-35, # Rounds to minimum subnormal + 2.0**-25, # Underflows to zero (nearest even mode) + 2.0**-26, # Underflows to zero + 1.0 + 2.0**-11 + 2.0**-16, # rounds to 1.0+2**(-10) + 1.0 + 2.0**-11, # rounds to 1.0 (nearest even mode) + 1.0 + 2.0**-12, # rounds to 1.0 + 65519, # rounds to 65504 + 65520], # rounds to inf + dtype=float64) + rounded = [2.0**-24, + 0.0, + 0.0, + 1.0 + 2.0**(-10), + 1.0, + 1.0, + 65504, + np.inf] + + # Check float64->float16 rounding + with np.errstate(over="ignore"): + b = np.array(a, dtype=float16) + assert_equal(b, rounded) + + # Check float32->float16 rounding + a = np.array(a, dtype=float32) + with np.errstate(over="ignore"): + b = np.array(a, dtype=float16) + assert_equal(b, rounded) + + def test_half_correctness(self): + """Take every finite float16, and check the casting functions with + a manual conversion.""" + finite_f16, finite_f32, finite_f64 = self._create_arrays_finite() + + # Create an array of all finite float16s + a_bits = finite_f16.view(dtype=uint16) + + # Convert to 64-bit float manually + a_sgn = (-1.0)**((a_bits & 0x8000) >> 15) + a_exp = np.array((a_bits & 0x7c00) >> 10, dtype=np.int32) - 15 + a_man = (a_bits & 0x03ff) * 2.0**(-10) + # Implicit bit of normalized floats + a_man[a_exp != -15] += 1 + # Denormalized exponent is -14 + a_exp[a_exp == -15] = -14 + + a_manual = a_sgn * a_man * 2.0**a_exp + + a32_fail = np.nonzero(finite_f32 != a_manual)[0] + if len(a32_fail) != 0: + bad_index = a32_fail[0] + assert_equal(finite_f32, a_manual, + "First non-equal is half value 0x%x -> %g != %g" % + (a_bits[bad_index], + finite_f32[bad_index], + a_manual[bad_index])) + + a64_fail = np.nonzero(finite_f64 != a_manual)[0] + if len(a64_fail) != 0: + bad_index = a64_fail[0] + assert_equal(finite_f64, a_manual, + "First non-equal is half value 0x%x -> %g != %g" % + (a_bits[bad_index], + finite_f64[bad_index], + a_manual[bad_index])) + + def test_half_ordering(self): + """Make sure comparisons are working right""" + nonan_f16, _, _ = self._create_arrays_nonan() + + # All non-NaN float16 values in reverse order + a = nonan_f16[::-1].copy() + + # 32-bit float copy + b = np.array(a, dtype=float32) + + # Should sort the same + a.sort() + b.sort() + assert_equal(a, b) + + # Comparisons should work + assert_((a[:-1] <= a[1:]).all()) + assert_(not (a[:-1] > a[1:]).any()) + assert_((a[1:] >= a[:-1]).all()) + assert_(not (a[1:] < a[:-1]).any()) + # All != except for +/-0 + assert_equal(np.nonzero(a[:-1] < a[1:])[0].size, a.size - 2) + assert_equal(np.nonzero(a[1:] > a[:-1])[0].size, a.size - 2) + + def test_half_funcs(self): + """Test the various ArrFuncs""" + + # fill + assert_equal(np.arange(10, dtype=float16), + np.arange(10, dtype=float32)) + + # fillwithscalar + a = np.zeros((5,), dtype=float16) + a.fill(1) + assert_equal(a, np.ones((5,), dtype=float16)) + + # nonzero and copyswap + a = np.array([0, 0, -1, -1 / 1e20, 0, 2.0**-24, 7.629e-6], dtype=float16) + assert_equal(a.nonzero()[0], + [2, 5, 6]) + a = a.byteswap() + a = a.view(a.dtype.newbyteorder()) + assert_equal(a.nonzero()[0], + [2, 5, 6]) + + # dot + a = np.arange(0, 10, 0.5, dtype=float16) + b = np.ones((20,), dtype=float16) + assert_equal(np.dot(a, b), + 95) + + # argmax + a = np.array([0, -np.inf, -2, 0.5, 12.55, 7.3, 2.1, 12.4], dtype=float16) + assert_equal(a.argmax(), + 4) + a = np.array([0, -np.inf, -2, np.inf, 12.55, np.nan, 2.1, 12.4], dtype=float16) + assert_equal(a.argmax(), + 5) + + # getitem + a = np.arange(10, dtype=float16) + for i in range(10): + assert_equal(a.item(i), i) + + def test_spacing_nextafter(self): + """Test np.spacing and np.nextafter""" + # All non-negative finite #'s + a = np.arange(0x7c00, dtype=uint16) + hinf = np.array((np.inf,), dtype=float16) + hnan = np.array((np.nan,), dtype=float16) + a_f16 = a.view(dtype=float16) + + assert_equal(np.spacing(a_f16[:-1]), a_f16[1:] - a_f16[:-1]) + + assert_equal(np.nextafter(a_f16[:-1], hinf), a_f16[1:]) + assert_equal(np.nextafter(a_f16[0], -hinf), -a_f16[1]) + assert_equal(np.nextafter(a_f16[1:], -hinf), a_f16[:-1]) + + assert_equal(np.nextafter(hinf, a_f16), a_f16[-1]) + assert_equal(np.nextafter(-hinf, a_f16), -a_f16[-1]) + + assert_equal(np.nextafter(hinf, hinf), hinf) + assert_equal(np.nextafter(hinf, -hinf), a_f16[-1]) + assert_equal(np.nextafter(-hinf, hinf), -a_f16[-1]) + assert_equal(np.nextafter(-hinf, -hinf), -hinf) + + assert_equal(np.nextafter(a_f16, hnan), hnan[0]) + assert_equal(np.nextafter(hnan, a_f16), hnan[0]) + + assert_equal(np.nextafter(hnan, hnan), hnan) + assert_equal(np.nextafter(hinf, hnan), hnan) + assert_equal(np.nextafter(hnan, hinf), hnan) + + # switch to negatives + a |= 0x8000 + + assert_equal(np.spacing(a_f16[0]), np.spacing(a_f16[1])) + assert_equal(np.spacing(a_f16[1:]), a_f16[:-1] - a_f16[1:]) + + assert_equal(np.nextafter(a_f16[0], hinf), -a_f16[1]) + assert_equal(np.nextafter(a_f16[1:], hinf), a_f16[:-1]) + assert_equal(np.nextafter(a_f16[:-1], -hinf), a_f16[1:]) + + assert_equal(np.nextafter(hinf, a_f16), -a_f16[-1]) + assert_equal(np.nextafter(-hinf, a_f16), a_f16[-1]) + + assert_equal(np.nextafter(a_f16, hnan), hnan[0]) + assert_equal(np.nextafter(hnan, a_f16), hnan[0]) + + def test_half_ufuncs(self): + """Test the various ufuncs""" + + a = np.array([0, 1, 2, 4, 2], dtype=float16) + b = np.array([-2, 5, 1, 4, 3], dtype=float16) + c = np.array([0, -1, -np.inf, np.nan, 6], dtype=float16) + + assert_equal(np.add(a, b), [-2, 6, 3, 8, 5]) + assert_equal(np.subtract(a, b), [2, -4, 1, 0, -1]) + assert_equal(np.multiply(a, b), [0, 5, 2, 16, 6]) + assert_equal(np.divide(a, b), [0, 0.199951171875, 2, 1, 0.66650390625]) + + assert_equal(np.equal(a, b), [False, False, False, True, False]) + assert_equal(np.not_equal(a, b), [True, True, True, False, True]) + assert_equal(np.less(a, b), [False, True, False, False, True]) + assert_equal(np.less_equal(a, b), [False, True, False, True, True]) + assert_equal(np.greater(a, b), [True, False, True, False, False]) + assert_equal(np.greater_equal(a, b), [True, False, True, True, False]) + assert_equal(np.logical_and(a, b), [False, True, True, True, True]) + assert_equal(np.logical_or(a, b), [True, True, True, True, True]) + assert_equal(np.logical_xor(a, b), [True, False, False, False, False]) + assert_equal(np.logical_not(a), [True, False, False, False, False]) + + assert_equal(np.isnan(c), [False, False, False, True, False]) + assert_equal(np.isinf(c), [False, False, True, False, False]) + assert_equal(np.isfinite(c), [True, True, False, False, True]) + assert_equal(np.signbit(b), [True, False, False, False, False]) + + assert_equal(np.copysign(b, a), [2, 5, 1, 4, 3]) + + assert_equal(np.maximum(a, b), [0, 5, 2, 4, 3]) + + x = np.maximum(b, c) + assert_(np.isnan(x[3])) + x[3] = 0 + assert_equal(x, [0, 5, 1, 0, 6]) + + assert_equal(np.minimum(a, b), [-2, 1, 1, 4, 2]) + + x = np.minimum(b, c) + assert_(np.isnan(x[3])) + x[3] = 0 + assert_equal(x, [-2, -1, -np.inf, 0, 3]) + + assert_equal(np.fmax(a, b), [0, 5, 2, 4, 3]) + assert_equal(np.fmax(b, c), [0, 5, 1, 4, 6]) + assert_equal(np.fmin(a, b), [-2, 1, 1, 4, 2]) + assert_equal(np.fmin(b, c), [-2, -1, -np.inf, 4, 3]) + + assert_equal(np.floor_divide(a, b), [0, 0, 2, 1, 0]) + assert_equal(np.remainder(a, b), [0, 1, 0, 0, 2]) + assert_equal(np.divmod(a, b), ([0, 0, 2, 1, 0], [0, 1, 0, 0, 2])) + assert_equal(np.square(b), [4, 25, 1, 16, 9]) + assert_equal(np.reciprocal(b), [-0.5, 0.199951171875, 1, 0.25, 0.333251953125]) + assert_equal(np.ones_like(b), [1, 1, 1, 1, 1]) + assert_equal(np.conjugate(b), b) + assert_equal(np.absolute(b), [2, 5, 1, 4, 3]) + assert_equal(np.negative(b), [2, -5, -1, -4, -3]) + assert_equal(np.positive(b), b) + assert_equal(np.sign(b), [-1, 1, 1, 1, 1]) + assert_equal(np.modf(b), ([0, 0, 0, 0, 0], b)) + assert_equal(np.frexp(b), ([-0.5, 0.625, 0.5, 0.5, 0.75], [2, 3, 1, 3, 2])) + assert_equal(np.ldexp(b, [0, 1, 2, 4, 2]), [-2, 10, 4, 64, 12]) + + def test_half_coercion(self): + """Test that half gets coerced properly with the other types""" + a16 = np.array((1,), dtype=float16) + a32 = np.array((1,), dtype=float32) + b16 = float16(1) + b32 = float32(1) + + assert np.power(a16, 2).dtype == float16 + assert np.power(a16, 2.0).dtype == float16 + assert np.power(a16, b16).dtype == float16 + assert np.power(a16, b32).dtype == float32 + assert np.power(a16, a16).dtype == float16 + assert np.power(a16, a32).dtype == float32 + + assert np.power(b16, 2).dtype == float16 + assert np.power(b16, 2.0).dtype == float16 + assert np.power(b16, b16).dtype, float16 + assert np.power(b16, b32).dtype, float32 + assert np.power(b16, a16).dtype, float16 + assert np.power(b16, a32).dtype, float32 + + assert np.power(a32, a16).dtype == float32 + assert np.power(a32, b16).dtype == float32 + assert np.power(b32, a16).dtype == float32 + assert np.power(b32, b16).dtype == float32 + + @pytest.mark.skipif(platform.machine() == "armv5tel", + reason="See gh-413.") + @pytest.mark.skipif(IS_WASM, + reason="fp exceptions don't work in wasm.") + def test_half_fpe(self): + with np.errstate(all='raise'): + sx16 = np.array((1e-4,), dtype=float16) + bx16 = np.array((1e4,), dtype=float16) + sy16 = float16(1e-4) + by16 = float16(1e4) + + # Underflow errors + assert_raises_fpe('underflow', lambda a, b: a * b, sx16, sx16) + assert_raises_fpe('underflow', lambda a, b: a * b, sx16, sy16) + assert_raises_fpe('underflow', lambda a, b: a * b, sy16, sx16) + assert_raises_fpe('underflow', lambda a, b: a * b, sy16, sy16) + assert_raises_fpe('underflow', lambda a, b: a / b, sx16, bx16) + assert_raises_fpe('underflow', lambda a, b: a / b, sx16, by16) + assert_raises_fpe('underflow', lambda a, b: a / b, sy16, bx16) + assert_raises_fpe('underflow', lambda a, b: a / b, sy16, by16) + assert_raises_fpe('underflow', lambda a, b: a / b, + float16(2.**-14), float16(2**11)) + assert_raises_fpe('underflow', lambda a, b: a / b, + float16(-2.**-14), float16(2**11)) + assert_raises_fpe('underflow', lambda a, b: a / b, + float16(2.**-14 + 2**-24), float16(2)) + assert_raises_fpe('underflow', lambda a, b: a / b, + float16(-2.**-14 - 2**-24), float16(2)) + assert_raises_fpe('underflow', lambda a, b: a / b, + float16(2.**-14 + 2**-23), float16(4)) + + # Overflow errors + assert_raises_fpe('overflow', lambda a, b: a * b, bx16, bx16) + assert_raises_fpe('overflow', lambda a, b: a * b, bx16, by16) + assert_raises_fpe('overflow', lambda a, b: a * b, by16, bx16) + assert_raises_fpe('overflow', lambda a, b: a * b, by16, by16) + assert_raises_fpe('overflow', lambda a, b: a / b, bx16, sx16) + assert_raises_fpe('overflow', lambda a, b: a / b, bx16, sy16) + assert_raises_fpe('overflow', lambda a, b: a / b, by16, sx16) + assert_raises_fpe('overflow', lambda a, b: a / b, by16, sy16) + assert_raises_fpe('overflow', lambda a, b: a + b, + float16(65504), float16(17)) + assert_raises_fpe('overflow', lambda a, b: a - b, + float16(-65504), float16(17)) + assert_raises_fpe('overflow', np.nextafter, float16(65504), float16(np.inf)) + assert_raises_fpe('overflow', np.nextafter, float16(-65504), float16(-np.inf)) # noqa: E501 + assert_raises_fpe('overflow', np.spacing, float16(65504)) + + # Invalid value errors + assert_raises_fpe('invalid', np.divide, float16(np.inf), float16(np.inf)) + assert_raises_fpe('invalid', np.spacing, float16(np.inf)) + assert_raises_fpe('invalid', np.spacing, float16(np.nan)) + + # These should not raise + float16(65472) + float16(32) + float16(2**-13) / float16(2) + float16(2**-14) / float16(2**10) + np.spacing(float16(-65504)) + np.nextafter(float16(65504), float16(-np.inf)) + np.nextafter(float16(-65504), float16(np.inf)) + np.nextafter(float16(np.inf), float16(0)) + np.nextafter(float16(-np.inf), float16(0)) + np.nextafter(float16(0), float16(np.nan)) + np.nextafter(float16(np.nan), float16(0)) + float16(2**-14) / float16(2**10) + float16(-2**-14) / float16(2**10) + float16(2**-14 + 2**-23) / float16(2) + float16(-2**-14 - 2**-23) / float16(2) + + def test_half_array_interface(self): + """Test that half is compatible with __array_interface__""" + class Dummy: + pass + + a = np.ones((1,), dtype=float16) + b = Dummy() + b.__array_interface__ = a.__array_interface__ + c = np.array(b) + assert_(c.dtype == float16) + assert_equal(a, c) diff --git a/local_packages/numpy/_core/tests/test_hashtable.py b/local_packages/numpy/_core/tests/test_hashtable.py new file mode 100644 index 0000000..25a7158 --- /dev/null +++ b/local_packages/numpy/_core/tests/test_hashtable.py @@ -0,0 +1,36 @@ +import random + +import pytest + +from numpy._core._multiarray_tests import identityhash_tester + + +@pytest.mark.parametrize("key_length", [1, 3, 6]) +@pytest.mark.parametrize("length", [1, 16, 2000]) +def test_identity_hashtable(key_length, length): + # use a 30 object pool for everything (duplicates will happen) + pool = [object() for i in range(20)] + keys_vals = [] + for i in range(length): + keys = tuple(random.choices(pool, k=key_length)) + keys_vals.append((keys, random.choice(pool))) + + dictionary = dict(keys_vals) + + # add a random item at the end: + keys_vals.append(random.choice(keys_vals)) + # the expected one could be different with duplicates: + expected = dictionary[keys_vals[-1][0]] + + res = identityhash_tester(key_length, keys_vals, replace=True) + assert res is expected + + if length == 1: + return + + # add a new item with a key that is already used and a new value, this + # should error if replace is False, see gh-26690 + new_key = (keys_vals[1][0], object()) + keys_vals[0] = new_key + with pytest.raises(RuntimeError): + identityhash_tester(key_length, keys_vals) diff --git a/local_packages/numpy/_core/tests/test_indexerrors.py b/local_packages/numpy/_core/tests/test_indexerrors.py new file mode 100644 index 0000000..70e97dd --- /dev/null +++ b/local_packages/numpy/_core/tests/test_indexerrors.py @@ -0,0 +1,122 @@ +import numpy as np +from numpy.testing import assert_raises, assert_raises_regex + + +class TestIndexErrors: + '''Tests to exercise indexerrors not covered by other tests.''' + + def test_arraytypes_fasttake(self): + 'take from a 0-length dimension' + x = np.empty((2, 3, 0, 4)) + assert_raises(IndexError, x.take, [0], axis=2) + assert_raises(IndexError, x.take, [1], axis=2) + assert_raises(IndexError, x.take, [0], axis=2, mode='wrap') + assert_raises(IndexError, x.take, [0], axis=2, mode='clip') + + def test_take_from_object(self): + # Check exception taking from object array + d = np.zeros(5, dtype=object) + assert_raises(IndexError, d.take, [6]) + + # Check exception taking from 0-d array + d = np.zeros((5, 0), dtype=object) + assert_raises(IndexError, d.take, [1], axis=1) + assert_raises(IndexError, d.take, [0], axis=1) + assert_raises(IndexError, d.take, [0]) + assert_raises(IndexError, d.take, [0], mode='wrap') + assert_raises(IndexError, d.take, [0], mode='clip') + + def test_multiindex_exceptions(self): + a = np.empty(5, dtype=object) + assert_raises(IndexError, a.item, 20) + a = np.empty((5, 0), dtype=object) + assert_raises(IndexError, a.item, (0, 0)) + + def test_put_exceptions(self): + a = np.zeros((5, 5)) + assert_raises(IndexError, a.put, 100, 0) + a = np.zeros((5, 5), dtype=object) + assert_raises(IndexError, a.put, 100, 0) + a = np.zeros((5, 5, 0)) + assert_raises(IndexError, a.put, 100, 0) + a = np.zeros((5, 5, 0), dtype=object) + assert_raises(IndexError, a.put, 100, 0) + + def test_iterators_exceptions(self): + "cases in iterators.c" + def assign(obj, ind, val): + obj[ind] = val + + a = np.zeros([1, 2, 3]) + assert_raises(IndexError, lambda: a[0, 5, None, 2]) + assert_raises(IndexError, lambda: a[0, 5, 0, 2]) + assert_raises(IndexError, lambda: assign(a, (0, 5, None, 2), 1)) + assert_raises(IndexError, lambda: assign(a, (0, 5, 0, 2), 1)) + + a = np.zeros([1, 0, 3]) + assert_raises(IndexError, lambda: a[0, 0, None, 2]) + assert_raises(IndexError, lambda: assign(a, (0, 0, None, 2), 1)) + + a = np.zeros([1, 2, 3]) + assert_raises(IndexError, lambda: a.flat[10]) + assert_raises(IndexError, lambda: assign(a.flat, 10, 5)) + a = np.zeros([1, 0, 3]) + assert_raises(IndexError, lambda: a.flat[10]) + assert_raises(IndexError, lambda: assign(a.flat, 10, 5)) + + a = np.zeros([1, 2, 3]) + assert_raises(IndexError, lambda: a.flat[np.array(10)]) + assert_raises(IndexError, lambda: assign(a.flat, np.array(10), 5)) + a = np.zeros([1, 0, 3]) + assert_raises(IndexError, lambda: a.flat[np.array(10)]) + assert_raises(IndexError, lambda: assign(a.flat, np.array(10), 5)) + + a = np.zeros([1, 2, 3]) + assert_raises(IndexError, lambda: a.flat[np.array([10])]) + assert_raises(IndexError, lambda: assign(a.flat, np.array([10]), 5)) + a = np.zeros([1, 0, 3]) + assert_raises(IndexError, lambda: a.flat[np.array([10])]) + assert_raises(IndexError, lambda: assign(a.flat, np.array([10]), 5)) + + def test_mapping(self): + "cases from mapping.c" + + def assign(obj, ind, val): + obj[ind] = val + + a = np.zeros((0, 10)) + assert_raises(IndexError, lambda: a[12]) + + a = np.zeros((3, 5)) + assert_raises(IndexError, lambda: a[(10, 20)]) + assert_raises(IndexError, lambda: assign(a, (10, 20), 1)) + a = np.zeros((3, 0)) + assert_raises(IndexError, lambda: a[(1, 0)]) + assert_raises(IndexError, lambda: assign(a, (1, 0), 1)) + + a = np.zeros((10,)) + assert_raises(IndexError, lambda: assign(a, 10, 1)) + a = np.zeros((0,)) + assert_raises(IndexError, lambda: assign(a, 10, 1)) + + a = np.zeros((3, 5)) + assert_raises(IndexError, lambda: a[(1, [1, 20])]) + assert_raises(IndexError, lambda: assign(a, (1, [1, 20]), 1)) + a = np.zeros((3, 0)) + assert_raises(IndexError, lambda: a[(1, [0, 1])]) + assert_raises(IndexError, lambda: assign(a, (1, [0, 1]), 1)) + + def test_mapping_error_message(self): + a = np.zeros((3, 5)) + index = (1, 2, 3, 4, 5) + assert_raises_regex( + IndexError, + "too many indices for array: " + "array is 2-dimensional, but 5 were indexed", + lambda: a[index]) + + def test_methods(self): + "cases from methods.c" + + a = np.zeros((3, 3)) + assert_raises(IndexError, lambda: a.item(100)) diff --git a/local_packages/numpy/_core/tests/test_indexing.py b/local_packages/numpy/_core/tests/test_indexing.py new file mode 100644 index 0000000..65d42d6 --- /dev/null +++ b/local_packages/numpy/_core/tests/test_indexing.py @@ -0,0 +1,1692 @@ +import functools +import inspect +import operator +import sys +import warnings +from itertools import product + +import pytest + +import numpy as np +from numpy._core._multiarray_tests import array_indexing +from numpy.exceptions import ComplexWarning, VisibleDeprecationWarning +from numpy.testing import ( + HAS_REFCOUNT, + IS_PYPY, + assert_, + assert_array_equal, + assert_equal, + assert_raises, + assert_raises_regex, +) + + +class TestIndexing: + def test_index_no_floats(self): + a = np.array([[[5]]]) + + assert_raises(IndexError, lambda: a[0.0]) + assert_raises(IndexError, lambda: a[0, 0.0]) + assert_raises(IndexError, lambda: a[0.0, 0]) + assert_raises(IndexError, lambda: a[0.0, :]) + assert_raises(IndexError, lambda: a[:, 0.0]) + assert_raises(IndexError, lambda: a[:, 0.0, :]) + assert_raises(IndexError, lambda: a[0.0, :, :]) + assert_raises(IndexError, lambda: a[0, 0, 0.0]) + assert_raises(IndexError, lambda: a[0.0, 0, 0]) + assert_raises(IndexError, lambda: a[0, 0.0, 0]) + assert_raises(IndexError, lambda: a[-1.4]) + assert_raises(IndexError, lambda: a[0, -1.4]) + assert_raises(IndexError, lambda: a[-1.4, 0]) + assert_raises(IndexError, lambda: a[-1.4, :]) + assert_raises(IndexError, lambda: a[:, -1.4]) + assert_raises(IndexError, lambda: a[:, -1.4, :]) + assert_raises(IndexError, lambda: a[-1.4, :, :]) + assert_raises(IndexError, lambda: a[0, 0, -1.4]) + assert_raises(IndexError, lambda: a[-1.4, 0, 0]) + assert_raises(IndexError, lambda: a[0, -1.4, 0]) + assert_raises(IndexError, lambda: a[0.0:, 0.0]) + assert_raises(IndexError, lambda: a[0.0:, 0.0, :]) + + def test_slicing_no_floats(self): + a = np.array([[5]]) + + # start as float. + assert_raises(TypeError, lambda: a[0.0:]) + assert_raises(TypeError, lambda: a[0:, 0.0:2]) + assert_raises(TypeError, lambda: a[0.0::2, :0]) + assert_raises(TypeError, lambda: a[0.0:1:2, :]) + assert_raises(TypeError, lambda: a[:, 0.0:]) + # stop as float. + assert_raises(TypeError, lambda: a[:0.0]) + assert_raises(TypeError, lambda: a[:0, 1:2.0]) + assert_raises(TypeError, lambda: a[:0.0:2, :0]) + assert_raises(TypeError, lambda: a[:0.0, :]) + assert_raises(TypeError, lambda: a[:, 0:4.0:2]) + # step as float. + assert_raises(TypeError, lambda: a[::1.0]) + assert_raises(TypeError, lambda: a[0:, :2:2.0]) + assert_raises(TypeError, lambda: a[1::4.0, :0]) + assert_raises(TypeError, lambda: a[::5.0, :]) + assert_raises(TypeError, lambda: a[:, 0:4:2.0]) + # mixed. + assert_raises(TypeError, lambda: a[1.0:2:2.0]) + assert_raises(TypeError, lambda: a[1.0::2.0]) + assert_raises(TypeError, lambda: a[0:, :2.0:2.0]) + assert_raises(TypeError, lambda: a[1.0:1:4.0, :0]) + assert_raises(TypeError, lambda: a[1.0:5.0:5.0, :]) + assert_raises(TypeError, lambda: a[:, 0.4:4.0:2.0]) + # should still get the DeprecationWarning if step = 0. + assert_raises(TypeError, lambda: a[::0.0]) + + def test_index_no_array_to_index(self): + # No non-scalar arrays. + a = np.array([[[1]]]) + + assert_raises(TypeError, lambda: a[a:a:a]) + + def test_none_index(self): + # `None` index adds newaxis + a = np.array([1, 2, 3]) + assert_equal(a[None], a[np.newaxis]) + assert_equal(a[None].ndim, a.ndim + 1) + + def test_empty_tuple_index(self): + # Empty tuple index creates a view + a = np.array([1, 2, 3]) + assert_equal(a[()], a) + assert_(a[()].base is a) + a = np.array(0) + assert_(isinstance(a[()], np.int_)) + + def test_void_scalar_empty_tuple(self): + s = np.zeros((), dtype='V4') + assert_equal(s[()].dtype, s.dtype) + assert_equal(s[()], s) + assert_equal(type(s[...]), np.ndarray) + + def test_same_kind_index_casting(self): + # Indexes should be cast with same-kind and not safe, even if that + # is somewhat unsafe. So test various different code paths. + index = np.arange(5) + u_index = index.astype(np.uintp) + arr = np.arange(10) + + assert_array_equal(arr[index], arr[u_index]) + arr[u_index] = np.arange(5) + assert_array_equal(arr, np.arange(10)) + + arr = np.arange(10).reshape(5, 2) + assert_array_equal(arr[index], arr[u_index]) + + arr[u_index] = np.arange(5)[:, None] + assert_array_equal(arr, np.arange(5)[:, None].repeat(2, axis=1)) + + arr = np.arange(25).reshape(5, 5) + assert_array_equal(arr[u_index, u_index], arr[index, index]) + + def test_empty_fancy_index(self): + # Empty list index creates an empty array + # with the same dtype (but with weird shape) + a = np.array([1, 2, 3]) + assert_equal(a[[]], []) + assert_equal(a[[]].dtype, a.dtype) + + b = np.array([], dtype=np.intp) + assert_equal(a[[]], []) + assert_equal(a[[]].dtype, a.dtype) + + b = np.array([]) + assert_raises(IndexError, a.__getitem__, b) + + def test_gh_26542(self): + a = np.array([0, 1, 2]) + idx = np.array([2, 1, 0]) + a[idx] = a + expected = np.array([2, 1, 0]) + assert_equal(a, expected) + + def test_gh_26542_2d(self): + a = np.array([[0, 1, 2]]) + idx_row = np.zeros(3, dtype=int) + idx_col = np.array([2, 1, 0]) + a[idx_row, idx_col] = a + expected = np.array([[2, 1, 0]]) + assert_equal(a, expected) + + def test_gh_26542_index_overlap(self): + arr = np.arange(100) + expected_vals = np.copy(arr[:-10]) + arr[10:] = arr[:-10] + actual_vals = arr[10:] + assert_equal(actual_vals, expected_vals) + + def test_gh_26844(self): + expected = [0, 1, 3, 3, 3] + a = np.arange(5) + a[2:][a[:-2]] = 3 + assert_equal(a, expected) + + def test_gh_26844_segfault(self): + # check for absence of segfault for: + # https://github.com/numpy/numpy/pull/26958/files#r1854589178 + a = np.arange(5) + expected = [0, 1, 3, 3, 3] + a[2:][None, a[:-2]] = 3 + assert_equal(a, expected) + + def test_ellipsis_index(self): + a = np.array([[1, 2, 3], + [4, 5, 6], + [7, 8, 9]]) + assert_(a[...] is not a) + assert_equal(a[...], a) + # `a[...]` was `a` in numpy <1.9. + assert_(a[...].base is a) + + # Slicing with ellipsis can skip an + # arbitrary number of dimensions + assert_equal(a[0, ...], a[0]) + assert_equal(a[0, ...], a[0, :]) + assert_equal(a[..., 0], a[:, 0]) + + # Slicing with ellipsis always results + # in an array, not a scalar + assert_equal(a[0, ..., 1], np.array(2)) + + # Assignment with `(Ellipsis,)` on 0-d arrays + b = np.array(1) + b[(Ellipsis,)] = 2 + assert_equal(b, 2) + + def test_single_int_index(self): + # Single integer index selects one row + a = np.array([[1, 2, 3], + [4, 5, 6], + [7, 8, 9]]) + + assert_equal(a[0], [1, 2, 3]) + assert_equal(a[-1], [7, 8, 9]) + + # Index out of bounds produces IndexError + assert_raises(IndexError, a.__getitem__, 1 << 30) + # Index overflow produces IndexError + assert_raises(IndexError, a.__getitem__, 1 << 64) + + def test_single_bool_index(self): + # Single boolean index + a = np.array([[1, 2, 3], + [4, 5, 6], + [7, 8, 9]]) + + assert_equal(a[np.array(True)], a[None]) + assert_equal(a[np.array(False)], a[None][0:0]) + + def test_boolean_shape_mismatch(self): + arr = np.ones((5, 4, 3)) + + index = np.array([True]) + assert_raises(IndexError, arr.__getitem__, index) + + index = np.array([False] * 6) + assert_raises(IndexError, arr.__getitem__, index) + + index = np.zeros((4, 4), dtype=bool) + assert_raises(IndexError, arr.__getitem__, index) + + assert_raises(IndexError, arr.__getitem__, (slice(None), index)) + + def test_boolean_indexing_onedim(self): + # Indexing a 2-dimensional array with + # boolean array of length one + a = np.array([[0., 0., 0.]]) + b = np.array([True], dtype=bool) + assert_equal(a[b], a) + # boolean assignment + a[b] = 1. + assert_equal(a, [[1., 1., 1.]]) + + def test_boolean_assignment_value_mismatch(self): + # A boolean assignment should fail when the shape of the values + # cannot be broadcast to the subscription. (see also gh-3458) + a = np.arange(4) + + def f(a, v): + a[a > -1] = v + + assert_raises(ValueError, f, a, []) + assert_raises(ValueError, f, a, [1, 2, 3]) + assert_raises(ValueError, f, a[:1], [1, 2, 3]) + + def test_boolean_assignment_needs_api(self): + # See also gh-7666 + # This caused a segfault on Python 2 due to the GIL not being + # held when the iterator does not need it, but the transfer function + # does + arr = np.zeros(1000) + indx = np.zeros(1000, dtype=bool) + indx[:100] = True + arr[indx] = np.ones(100, dtype=object) + + expected = np.zeros(1000) + expected[:100] = 1 + assert_array_equal(arr, expected) + + def test_boolean_indexing_twodim(self): + # Indexing a 2-dimensional array with + # 2-dimensional boolean array + a = np.array([[1, 2, 3], + [4, 5, 6], + [7, 8, 9]]) + b = np.array([[ True, False, True], + [False, True, False], + [ True, False, True]]) + assert_equal(a[b], [1, 3, 5, 7, 9]) + assert_equal(a[b[1]], [[4, 5, 6]]) + assert_equal(a[b[0]], a[b[2]]) + + # boolean assignment + a[b] = 0 + assert_equal(a, [[0, 2, 0], + [4, 0, 6], + [0, 8, 0]]) + + def test_boolean_indexing_list(self): + # Regression test for #13715. It's a use-after-free bug which the + # test won't directly catch, but it will show up in valgrind. + a = np.array([1, 2, 3]) + b = [True, False, True] + # Two variants of the test because the first takes a fast path + assert_equal(a[b], [1, 3]) + assert_equal(a[None, b], [[1, 3]]) + + def test_reverse_strides_and_subspace_bufferinit(self): + # This tests that the strides are not reversed for simple and + # subspace fancy indexing. + a = np.ones(5) + b = np.zeros(5, dtype=np.intp)[::-1] + c = np.arange(5)[::-1] + + a[b] = c + # If the strides are not reversed, the 0 in the arange comes last. + assert_equal(a[0], 0) + + # This also tests that the subspace buffer is initialized: + a = np.ones((5, 2)) + c = np.arange(10).reshape(5, 2)[::-1] + a[b, :] = c + assert_equal(a[0], [0, 1]) + + def test_reversed_strides_result_allocation(self): + # Test a bug when calculating the output strides for a result array + # when the subspace size was 1 (and test other cases as well) + a = np.arange(10)[:, None] + i = np.arange(10)[::-1] + assert_array_equal(a[i], a[i.copy('C')]) + + a = np.arange(20).reshape(-1, 2) + + def test_uncontiguous_subspace_assignment(self): + # During development there was a bug activating a skip logic + # based on ndim instead of size. + a = np.full((3, 4, 2), -1) + b = np.full((3, 4, 2), -1) + + a[[0, 1]] = np.arange(2 * 4 * 2).reshape(2, 4, 2).T + b[[0, 1]] = np.arange(2 * 4 * 2).reshape(2, 4, 2).T.copy() + + assert_equal(a, b) + + def test_too_many_fancy_indices_special_case(self): + # Just documents behaviour, this is a small limitation. + a = np.ones((1,) * 64) # 64 is NPY_MAXDIMS + assert_raises(IndexError, a.__getitem__, (np.array([0]),) * 64) + + def test_scalar_array_bool(self): + # NumPy bools can be used as boolean index (python ones as of yet not) + a = np.array(1) + assert_equal(a[np.bool(True)], a[np.array(True)]) + assert_equal(a[np.bool(False)], a[np.array(False)]) + + # After deprecating bools as integers: + #a = np.array([0,1,2]) + #assert_equal(a[True, :], a[None, :]) + #assert_equal(a[:, True], a[:, None]) + # + #assert_(not np.may_share_memory(a, a[True, :])) + + def test_everything_returns_views(self): + # Before `...` would return a itself. + a = np.arange(5) + + assert_(a is not a[()]) + assert_(a is not a[...]) + assert_(a is not a[:]) + + def test_broaderrors_indexing(self): + a = np.zeros((5, 5)) + assert_raises(IndexError, a.__getitem__, ([0, 1], [0, 1, 2])) + assert_raises(IndexError, a.__setitem__, ([0, 1], [0, 1, 2]), 0) + + def test_trivial_fancy_out_of_bounds(self): + a = np.zeros(5) + ind = np.ones(20, dtype=np.intp) + ind[-1] = 10 + assert_raises(IndexError, a.__getitem__, ind) + assert_raises(IndexError, a.__setitem__, ind, 0) + ind = np.ones(20, dtype=np.intp) + ind[0] = 11 + assert_raises(IndexError, a.__getitem__, ind) + assert_raises(IndexError, a.__setitem__, ind, 0) + + def test_trivial_fancy_not_possible(self): + # Test that the fast path for trivial assignment is not incorrectly + # used when the index is not contiguous or 1D, see also gh-11467. + a = np.arange(6) + idx = np.arange(6, dtype=np.intp).reshape(2, 1, 3)[:, :, 0] + assert_array_equal(a[idx], idx) + + # this case must not go into the fast path, note that idx is + # a non-contiguous none 1D array here. + a[idx] = -1 + res = np.arange(6) + res[0] = -1 + res[3] = -1 + assert_array_equal(a, res) + + def test_nonbaseclass_values(self): + class SubClass(np.ndarray): + def __array_finalize__(self, old): + # Have array finalize do funny things + self.fill(99) + + a = np.zeros((5, 5)) + s = a.copy().view(type=SubClass) + s.fill(1) + + a[[0, 1, 2, 3, 4], :] = s + assert_((a == 1).all()) + + # Subspace is last, so transposing might want to finalize + a[:, [0, 1, 2, 3, 4]] = s + assert_((a == 1).all()) + + a.fill(0) + a[...] = s + assert_((a == 1).all()) + + def test_array_like_values(self): + # Similar to the above test, but use a memoryview instead + a = np.zeros((5, 5)) + s = np.arange(25, dtype=np.float64).reshape(5, 5) + + a[[0, 1, 2, 3, 4], :] = memoryview(s) + assert_array_equal(a, s) + + a[:, [0, 1, 2, 3, 4]] = memoryview(s) + assert_array_equal(a, s) + + a[...] = memoryview(s) + assert_array_equal(a, s) + + @pytest.mark.parametrize("writeable", [True, False]) + def test_subclass_writeable(self, writeable): + d = np.rec.array([('NGC1001', 11), ('NGC1002', 1.), ('NGC1003', 1.)], + dtype=[('target', 'S20'), ('V_mag', '>f4')]) + d.flags.writeable = writeable + # Advanced indexing results are always writeable: + ind = np.array([False, True, True], dtype=bool) + assert d[ind].flags.writeable + ind = np.array([0, 1]) + assert d[ind].flags.writeable + # Views should be writeable if the original array is: + assert d[...].flags.writeable == writeable + assert d[0].flags.writeable == writeable + + def test_memory_order(self): + # This is not necessary to preserve. Memory layouts for + # more complex indices are not as simple. + a = np.arange(10) + b = np.arange(10).reshape(5, 2).T + assert_(a[b].flags.f_contiguous) + + # Takes a different implementation branch: + a = a.reshape(-1, 1) + assert_(a[b, 0].flags.f_contiguous) + + def test_scalar_return_type(self): + # Full scalar indices should return scalars and object + # arrays should not call PyArray_Return on their items + class Zero: + # The most basic valid indexing + def __index__(self): + return 0 + + z = Zero() + + class ArrayLike: + # Simple array, should behave like the array + def __array__(self, dtype=None, copy=None): + return np.array(0) + + a = np.zeros(()) + assert_(isinstance(a[()], np.float64)) + a = np.zeros(1) + assert_(isinstance(a[z], np.float64)) + a = np.zeros((1, 1)) + assert_(isinstance(a[z, np.array(0)], np.float64)) + assert_(isinstance(a[z, ArrayLike()], np.float64)) + + # And object arrays do not call it too often: + b = np.array(0) + a = np.array(0, dtype=object) + a[()] = b + assert_(isinstance(a[()], np.ndarray)) + a = np.array([b, None]) + assert_(isinstance(a[z], np.ndarray)) + a = np.array([[b, None]]) + assert_(isinstance(a[z, np.array(0)], np.ndarray)) + assert_(isinstance(a[z, ArrayLike()], np.ndarray)) + + def test_small_regressions(self): + # Reference count of intp for index checks + a = np.array([0]) + if HAS_REFCOUNT: + refcount = sys.getrefcount(np.dtype(np.intp)) + # item setting always checks indices in separate function: + a[np.array([0], dtype=np.intp)] = 1 + a[np.array([0], dtype=np.uint8)] = 1 + assert_raises(IndexError, a.__setitem__, + np.array([1], dtype=np.intp), 1) + assert_raises(IndexError, a.__setitem__, + np.array([1], dtype=np.uint8), 1) + + if HAS_REFCOUNT: + assert_equal(sys.getrefcount(np.dtype(np.intp)), refcount) + + def test_unaligned(self): + v = (np.zeros(64, dtype=np.int8) + ord('a'))[1:-7] + d = v.view(np.dtype("S8")) + # unaligned source + x = (np.zeros(16, dtype=np.int8) + ord('a'))[1:-7] + x = x.view(np.dtype("S8")) + x[...] = np.array("b" * 8, dtype="S") + b = np.arange(d.size) + # trivial + assert_equal(d[b], d) + d[b] = x + # nontrivial + # unaligned index array + b = np.zeros(d.size + 1).view(np.int8)[1:-(np.intp(0).itemsize - 1)] + b = b.view(np.intp)[:d.size] + b[...] = np.arange(d.size) + assert_equal(d[b.astype(np.int16)], d) + d[b.astype(np.int16)] = x + # boolean + d[b % 2 == 0] + d[b % 2 == 0] = x[::2] + + def test_tuple_subclass(self): + arr = np.ones((5, 5)) + + # A tuple subclass should also be an nd-index + class TupleSubclass(tuple): + pass + index = ([1], [1]) + index = TupleSubclass(index) + assert_(arr[index].shape == (1,)) + # Unlike the non nd-index: + assert_(arr[index,].shape != (1,)) + + def test_broken_sequence_not_nd_index(self): + # See gh-5063: + # If we have an object which claims to be a sequence, but fails + # on item getting, this should not be converted to an nd-index (tuple) + # If this object happens to be a valid index otherwise, it should work + # This object here is very dubious and probably bad though: + class SequenceLike: + def __index__(self): + return 0 + + def __len__(self): + return 1 + + def __getitem__(self, item): + raise IndexError('Not possible') + + arr = np.arange(10) + assert_array_equal(arr[SequenceLike()], arr[SequenceLike(),]) + + # also test that field indexing does not segfault + # for a similar reason, by indexing a structured array + arr = np.zeros((1,), dtype=[('f1', 'i8'), ('f2', 'i8')]) + assert_array_equal(arr[SequenceLike()], arr[SequenceLike(),]) + + def test_indexing_array_weird_strides(self): + # See also gh-6221 + # the shapes used here come from the issue and create the correct + # size for the iterator buffering size. + x = np.ones(10) + x2 = np.ones((10, 2)) + ind = np.arange(10)[:, None, None, None] + ind = np.broadcast_to(ind, (10, 55, 4, 4)) + + # single advanced index case + assert_array_equal(x[ind], x[ind.copy()]) + # higher dimensional advanced index + zind = np.zeros(4, dtype=np.intp) + assert_array_equal(x2[ind, zind], x2[ind.copy(), zind]) + + def test_indexing_array_negative_strides(self): + # From gh-8264, + # core dumps if negative strides are used in iteration + arro = np.zeros((4, 4)) + arr = arro[::-1, ::-1] + + slices = (slice(None), [0, 1, 2, 3]) + arr[slices] = 10 + assert_array_equal(arr, 10.) + + def test_character_assignment(self): + # This is an example a function going through CopyObject which + # used to have an untested special path for scalars + # (the character special dtype case, should be deprecated probably) + arr = np.zeros((1, 5), dtype="c") + arr[0] = np.str_("asdfg") # must assign as a sequence + assert_array_equal(arr[0], np.array("asdfg", dtype="c")) + assert arr[0, 1] == b"s" # make sure not all were set to "a" for both + + @pytest.mark.parametrize("index", + [True, False, np.array([0])]) + @pytest.mark.parametrize("num", [64, 80]) + @pytest.mark.parametrize("original_ndim", [1, 64]) + def test_too_many_advanced_indices(self, index, num, original_ndim): + # These are limitations based on the number of arguments we can process. + # For `num=32` (and all boolean cases), the result is actually define; + # but the use of NpyIter (NPY_MAXARGS) limits it for technical reasons. + arr = np.ones((1,) * original_ndim) + with pytest.raises(IndexError): + arr[(index,) * num] + with pytest.raises(IndexError): + arr[(index,) * num] = 1. + + def test_nontuple_ndindex(self): + a = np.arange(25).reshape((5, 5)) + assert_equal(a[[0, 1]], np.array([a[0], a[1]])) + assert_equal(a[[0, 1], [0, 1]], np.array([0, 6])) + assert_raises(IndexError, a.__getitem__, [slice(None)]) + + +class TestFieldIndexing: + def test_scalar_return_type(self): + # Field access on an array should return an array, even if it + # is 0-d. + a = np.zeros((), [('a', 'f8')]) + assert_(isinstance(a['a'], np.ndarray)) + assert_(isinstance(a[['a']], np.ndarray)) + + +class TestBroadcastedAssignments: + def assign(self, a, ind, val): + a[ind] = val + return a + + def test_prepending_ones(self): + a = np.zeros((3, 2)) + + a[...] = np.ones((1, 3, 2)) + # Fancy with subspace with and without transpose + a[[0, 1, 2], :] = np.ones((1, 3, 2)) + a[:, [0, 1]] = np.ones((1, 3, 2)) + # Fancy without subspace (with broadcasting) + a[[[0], [1], [2]], [0, 1]] = np.ones((1, 3, 2)) + + def test_prepend_not_one(self): + assign = self.assign + s_ = np.s_ + a = np.zeros(5) + + # Too large and not only ones. + assert_raises(ValueError, assign, a, s_[...], np.ones((2, 1))) + assert_raises(ValueError, assign, a, s_[[1, 2, 3],], np.ones((2, 1))) + assert_raises(ValueError, assign, a, s_[[[1], [2]],], np.ones((2, 2, 1))) + + def test_simple_broadcasting_errors(self): + assign = self.assign + s_ = np.s_ + a = np.zeros((5, 1)) + + assert_raises(ValueError, assign, a, s_[...], np.zeros((5, 2))) + assert_raises(ValueError, assign, a, s_[...], np.zeros((5, 0))) + assert_raises(ValueError, assign, a, s_[:, [0]], np.zeros((5, 2))) + assert_raises(ValueError, assign, a, s_[:, [0]], np.zeros((5, 0))) + assert_raises(ValueError, assign, a, s_[[0], :], np.zeros((2, 1))) + + @pytest.mark.parametrize("index", [ + (..., [1, 2], slice(None)), + ([0, 1], ..., 0), + (..., [1, 2], [1, 2])]) + def test_broadcast_error_reports_correct_shape(self, index): + values = np.zeros((100, 100)) # will never broadcast below + + arr = np.zeros((3, 4, 5, 6, 7)) + # We currently report without any spaces (could be changed) + shape_str = str(arr[index].shape).replace(" ", "") + + with pytest.raises(ValueError) as e: + arr[index] = values + + assert str(e.value).endswith(shape_str) + + def test_index_is_larger(self): + # Simple case of fancy index broadcasting of the index. + a = np.zeros((5, 5)) + a[[[0], [1], [2]], [0, 1, 2]] = [2, 3, 4] + + assert_((a[:3, :3] == [2, 3, 4]).all()) + + def test_broadcast_subspace(self): + a = np.zeros((100, 100)) + v = np.arange(100)[:, None] + b = np.arange(100)[::-1] + a[b] = v + assert_((a[::-1] == v).all()) + + +class TestSubclasses: + def test_basic(self): + # Test that indexing in various ways produces SubClass instances, + # and that the base is set up correctly: the original subclass + # instance for views, and a new ndarray for advanced/boolean indexing + # where a copy was made (latter a regression test for gh-11983). + class SubClass(np.ndarray): + pass + + a = np.arange(5) + s = a.view(SubClass) + s_slice = s[:3] + assert_(type(s_slice) is SubClass) + assert_(s_slice.base is s) + assert_array_equal(s_slice, a[:3]) + + s_fancy = s[[0, 1, 2]] + assert_(type(s_fancy) is SubClass) + assert_(s_fancy.base is not s) + assert_(type(s_fancy.base) is np.ndarray) + assert_array_equal(s_fancy, a[[0, 1, 2]]) + assert_array_equal(s_fancy.base, a[[0, 1, 2]]) + + s_bool = s[s > 0] + assert_(type(s_bool) is SubClass) + assert_(s_bool.base is not s) + assert_(type(s_bool.base) is np.ndarray) + assert_array_equal(s_bool, a[a > 0]) + assert_array_equal(s_bool.base, a[a > 0]) + + def test_fancy_on_read_only(self): + # Test that fancy indexing on read-only SubClass does not make a + # read-only copy (gh-14132) + class SubClass(np.ndarray): + pass + + a = np.arange(5) + s = a.view(SubClass) + s.flags.writeable = False + s_fancy = s[[0, 1, 2]] + assert_(s_fancy.flags.writeable) + + def test_finalize_gets_full_info(self): + # Array finalize should be called on the filled array. + class SubClass(np.ndarray): + def __array_finalize__(self, old): + self.finalize_status = np.array(self) + self.old = old + + s = np.arange(10).view(SubClass) + new_s = s[:3] + assert_array_equal(new_s.finalize_status, new_s) + assert_array_equal(new_s.old, s) + + new_s = s[[0, 1, 2, 3]] + assert_array_equal(new_s.finalize_status, new_s) + assert_array_equal(new_s.old, s) + + new_s = s[s > 0] + assert_array_equal(new_s.finalize_status, new_s) + assert_array_equal(new_s.old, s) + + +class TestFancyIndexingCast: + def test_boolean_index_cast_assign(self): + # Setup the boolean index and float arrays. + shape = (8, 63) + bool_index = np.zeros(shape).astype(bool) + bool_index[0, 1] = True + zero_array = np.zeros(shape) + + # Assigning float is fine. + zero_array[bool_index] = np.array([1]) + assert_equal(zero_array[0, 1], 1) + + # Fancy indexing works, although we get a cast warning. + pytest.warns(ComplexWarning, + zero_array.__setitem__, ([0], [1]), np.array([2 + 1j])) + assert_equal(zero_array[0, 1], 2) # No complex part + + # Cast complex to float, throwing away the imaginary portion. + pytest.warns(ComplexWarning, + zero_array.__setitem__, bool_index, np.array([1j])) + assert_equal(zero_array[0, 1], 0) + + +class TestFancyIndexingEquivalence: + def test_object_assign(self): + # Check that the field and object special case using copyto is active. + # The right hand side cannot be converted to an array here. + a = np.arange(5, dtype=object) + b = a.copy() + a[:3] = [1, (1, 2), 3] + b[[0, 1, 2]] = [1, (1, 2), 3] + assert_array_equal(a, b) + + # test same for subspace fancy indexing + b = np.arange(5, dtype=object)[None, :] + b[[0], :3] = [[1, (1, 2), 3]] + assert_array_equal(a, b[0]) + + # Check that swapping of axes works. + # There was a bug that made the later assignment throw a ValueError + # do to an incorrectly transposed temporary right hand side (gh-5714) + b = b.T + b[:3, [0]] = [[1], [(1, 2)], [3]] + assert_array_equal(a, b[:, 0]) + + # Another test for the memory order of the subspace + arr = np.ones((3, 4, 5), dtype=object) + # Equivalent slicing assignment for comparison + cmp_arr = arr.copy() + cmp_arr[:1, ...] = [[[1], [2], [3], [4]]] + arr[[0], ...] = [[[1], [2], [3], [4]]] + assert_array_equal(arr, cmp_arr) + arr = arr.copy('F') + arr[[0], ...] = [[[1], [2], [3], [4]]] + assert_array_equal(arr, cmp_arr) + + def test_cast_equivalence(self): + # Yes, normal slicing uses unsafe casting. + a = np.arange(5) + b = a.copy() + + a[:3] = np.array(['2', '-3', '-1']) + b[[0, 2, 1]] = np.array(['2', '-1', '-3']) + assert_array_equal(a, b) + + # test the same for subspace fancy indexing + b = np.arange(5)[None, :] + b[[0], :3] = np.array([['2', '-3', '-1']]) + assert_array_equal(a, b[0]) + + +class TestMultiIndexingAutomated: + """ + These tests use code to mimic the C-Code indexing for selection. + + NOTE: + + * This still lacks tests for complex item setting. + * If you change behavior of indexing, you might want to modify + these tests to try more combinations. + * Behavior was written to match numpy version 1.8. (though a + first version matched 1.7.) + * Only tuple indices are supported by the mimicking code. + (and tested as of writing this) + * Error types should match most of the time as long as there + is only one error. For multiple errors, what gets raised + will usually not be the same one. They are *not* tested. + + Update 2016-11-30: It is probably not worth maintaining this test + indefinitely and it can be dropped if maintenance becomes a burden. + + """ + + def _create_array(self): + return np.arange(np.prod([3, 1, 5, 6])).reshape(3, 1, 5, 6) + + def _create_complex_indices(self): + return ['skip', Ellipsis, + 0, + # Boolean indices, up to 3-d for some special cases of eating up + # dimensions, also need to test all False + np.array([True, False, False]), + np.array([[True, False], [False, True]]), + np.array([[[False, False], [False, False]]]), + # Some slices: + slice(-5, 5, 2), + slice(1, 1, 100), + slice(4, -1, -2), + slice(None, None, -3), + # Some Fancy indexes: + np.empty((0, 1, 1), dtype=np.intp), # empty and can be broadcast + np.array([0, 1, -2]), + np.array([[2], [0], [1]]), + np.array([[0, -1], [0, 1]], dtype=np.dtype('intp').newbyteorder()), + np.array([2, -1], dtype=np.int8), + np.zeros([1] * 31, dtype=int), # trigger too large array. + np.array([0., 1.])] # invalid datatype + + def _get_multi_index(self, arr, indices): + """Mimic multi dimensional indexing. + + Parameters + ---------- + arr : ndarray + Array to be indexed. + indices : tuple of index objects + + Returns + ------- + out : ndarray + An array equivalent to the indexing operation (but always a copy). + `arr[indices]` should be identical. + no_copy : bool + Whether the indexing operation requires a copy. If this is `True`, + `np.may_share_memory(arr, arr[indices])` should be `True` (with + some exceptions for scalars and possibly 0-d arrays). + + Notes + ----- + While the function may mostly match the errors of normal indexing this + is generally not the case. + """ + in_indices = list(indices) + indices = [] + # if False, this is a fancy or boolean index + no_copy = True + # number of fancy/scalar indexes that are not consecutive + num_fancy = 0 + # number of dimensions indexed by a "fancy" index + fancy_dim = 0 + # NOTE: This is a funny twist (and probably OK to change). + # The boolean array has illegal indexes, but this is + # allowed if the broadcast fancy-indices are 0-sized. + # This variable is to catch that case. + error_unless_broadcast_to_empty = False + + # We need to handle Ellipsis and make arrays from indices, also + # check if this is fancy indexing (set no_copy). + ndim = 0 + ellipsis_pos = None # define here mostly to replace all but first. + for i, indx in enumerate(in_indices): + if indx is None: + continue + if isinstance(indx, np.ndarray) and indx.dtype == bool: + no_copy = False + if indx.ndim == 0: + raise IndexError + # boolean indices can have higher dimensions + ndim += indx.ndim + fancy_dim += indx.ndim + continue + if indx is Ellipsis: + if ellipsis_pos is None: + ellipsis_pos = i + continue # do not increment ndim counter + raise IndexError + if isinstance(indx, slice): + ndim += 1 + continue + if not isinstance(indx, np.ndarray): + # This could be open for changes in numpy. + # numpy should maybe raise an error if casting to intp + # is not safe. It rejects np.array([1., 2.]) but not + # [1., 2.] as index (same for ie. np.take). + # (Note the importance of empty lists if changing this here) + try: + indx = np.array(indx, dtype=np.intp) + except ValueError: + raise IndexError + in_indices[i] = indx + elif indx.dtype.kind not in 'bi': + raise IndexError('arrays used as indices must be of ' + 'integer (or boolean) type') + if indx.ndim != 0: + no_copy = False + ndim += 1 + fancy_dim += 1 + + if arr.ndim - ndim < 0: + # we can't take more dimensions then we have, not even for 0-d + # arrays. since a[()] makes sense, but not a[(),]. We will + # raise an error later on, unless a broadcasting error occurs + # first. + raise IndexError + + if ndim == 0 and None not in in_indices: + # Well we have no indexes or one Ellipsis. This is legal. + return arr.copy(), no_copy + + if ellipsis_pos is not None: + in_indices[ellipsis_pos:ellipsis_pos + 1] = ([slice(None, None)] * + (arr.ndim - ndim)) + + for ax, indx in enumerate(in_indices): + if isinstance(indx, slice): + # convert to an index array + indx = np.arange(*indx.indices(arr.shape[ax])) + indices.append(['s', indx]) + continue + elif indx is None: + # this is like taking a slice with one element from a new axis: + indices.append(['n', np.array([0], dtype=np.intp)]) + arr = arr.reshape(arr.shape[:ax] + (1,) + arr.shape[ax:]) + continue + if isinstance(indx, np.ndarray) and indx.dtype == bool: + if indx.shape != arr.shape[ax:ax + indx.ndim]: + raise IndexError + + try: + flat_indx = np.ravel_multi_index(np.nonzero(indx), + arr.shape[ax:ax + indx.ndim], mode='raise') + except Exception: + error_unless_broadcast_to_empty = True + # fill with 0s instead, and raise error later + flat_indx = np.array([0] * indx.sum(), dtype=np.intp) + # concatenate axis into a single one: + if indx.ndim != 0: + arr = arr.reshape(arr.shape[:ax] + + (np.prod(arr.shape[ax:ax + indx.ndim]),) + + arr.shape[ax + indx.ndim:]) + indx = flat_indx + else: + # This could be changed, a 0-d boolean index can + # make sense (even outside the 0-d indexed array case) + # Note that originally this is could be interpreted as + # integer in the full integer special case. + raise IndexError + # If the index is a singleton, the bounds check is done + # before the broadcasting. This used to be different in <1.9 + elif indx.ndim == 0 and not ( + -arr.shape[ax] <= indx < arr.shape[ax] + ): + raise IndexError + if indx.ndim == 0: + # The index is a scalar. This used to be two fold, but if + # fancy indexing was active, the check was done later, + # possibly after broadcasting it away (1.7. or earlier). + # Now it is always done. + if indx >= arr.shape[ax] or indx < - arr.shape[ax]: + raise IndexError + if (len(indices) > 0 and + indices[-1][0] == 'f' and + ax != ellipsis_pos): + # NOTE: There could still have been a 0-sized Ellipsis + # between them. Checked that with ellipsis_pos. + indices[-1].append(indx) + else: + # We have a fancy index that is not after an existing one. + # NOTE: A 0-d array triggers this as well, while one may + # expect it to not trigger it, since a scalar would not be + # considered fancy indexing. + num_fancy += 1 + indices.append(['f', indx]) + + if num_fancy > 1 and not no_copy: + # We have to flush the fancy indexes left + new_indices = indices[:] + axes = list(range(arr.ndim)) + fancy_axes = [] + new_indices.insert(0, ['f']) + ni = 0 + ai = 0 + for indx in indices: + ni += 1 + if indx[0] == 'f': + new_indices[0].extend(indx[1:]) + del new_indices[ni] + ni -= 1 + for ax in range(ai, ai + len(indx[1:])): + fancy_axes.append(ax) + axes.remove(ax) + ai += len(indx) - 1 # axis we are at + indices = new_indices + # and now we need to transpose arr: + arr = arr.transpose(*(fancy_axes + axes)) + + # We only have one 'f' index now and arr is transposed accordingly. + # Now handle newaxis by reshaping... + ax = 0 + for indx in indices: + if indx[0] == 'f': + if len(indx) == 1: + continue + # First of all, reshape arr to combine fancy axes into one: + orig_shape = arr.shape + orig_slice = orig_shape[ax:ax + len(indx[1:])] + arr = arr.reshape(arr.shape[:ax] + + (np.prod(orig_slice).astype(int),) + + arr.shape[ax + len(indx[1:]):]) + + # Check if broadcasting works + res = np.broadcast(*indx[1:]) + # unfortunately the indices might be out of bounds. So check + # that first, and use mode='wrap' then. However only if + # there are any indices... + if res.size != 0: + if error_unless_broadcast_to_empty: + raise IndexError + for _indx, _size in zip(indx[1:], orig_slice): + if _indx.size == 0: + continue + if np.any(_indx >= _size) or np.any(_indx < -_size): + raise IndexError + if len(indx[1:]) == len(orig_slice): + if np.prod(orig_slice) == 0: + # Work around for a crash or IndexError with 'wrap' + # in some 0-sized cases. + try: + mi = np.ravel_multi_index(indx[1:], orig_slice, + mode='raise') + except Exception: + # This happens with 0-sized orig_slice (sometimes?) + # here it is a ValueError, but indexing gives a: + raise IndexError('invalid index into 0-sized') + else: + mi = np.ravel_multi_index(indx[1:], orig_slice, + mode='wrap') + else: + # Maybe never happens... + raise ValueError + arr = arr.take(mi.ravel(), axis=ax) + try: + arr = arr.reshape(arr.shape[:ax] + + mi.shape + + arr.shape[ax + 1:]) + except ValueError: + # too many dimensions, probably + raise IndexError + ax += mi.ndim + continue + + # If we are here, we have a 1D array for take: + arr = arr.take(indx[1], axis=ax) + ax += 1 + + return arr, no_copy + + def _check_multi_index(self, arr, index): + """Check a multi index item getting and simple setting. + + Parameters + ---------- + arr : ndarray + Array to be indexed, must be a reshaped arange. + index : tuple of indexing objects + Index being tested. + """ + # Test item getting + try: + mimic_get, no_copy = self._get_multi_index(arr, index) + except Exception as e: + if HAS_REFCOUNT: + prev_refcount = sys.getrefcount(arr) + assert_raises(type(e), arr.__getitem__, index) + assert_raises(type(e), arr.__setitem__, index, 0) + if HAS_REFCOUNT: + assert_equal(prev_refcount, sys.getrefcount(arr)) + return + + self._compare_index_result(arr, index, mimic_get, no_copy) + + def _check_single_index(self, arr, index): + """Check a single index item getting and simple setting. + + Parameters + ---------- + arr : ndarray + Array to be indexed, must be an arange. + index : indexing object + Index being tested. Must be a single index and not a tuple + of indexing objects (see also `_check_multi_index`). + """ + try: + mimic_get, no_copy = self._get_multi_index(arr, (index,)) + except Exception as e: + if HAS_REFCOUNT: + prev_refcount = sys.getrefcount(arr) + assert_raises(type(e), arr.__getitem__, index) + assert_raises(type(e), arr.__setitem__, index, 0) + if HAS_REFCOUNT: + assert_equal(prev_refcount, sys.getrefcount(arr)) + return + + self._compare_index_result(arr, index, mimic_get, no_copy) + + def _compare_index_result(self, arr, index, mimic_get, no_copy): + """Compare mimicked result to indexing result. + """ + arr = arr.copy() + if HAS_REFCOUNT: + startcount = sys.getrefcount(arr) + indexed_arr = arr[index] + assert_array_equal(indexed_arr, mimic_get) + # Check if we got a view, unless its a 0-sized or 0-d array. + # (then its not a view, and that does not matter) + if indexed_arr.size != 0 and indexed_arr.ndim != 0: + assert_(np.may_share_memory(indexed_arr, arr) == no_copy) + # Check reference count of the original array + if HAS_REFCOUNT: + if no_copy: + # refcount increases by one: + assert_equal(sys.getrefcount(arr), startcount + 1) + else: + assert_equal(sys.getrefcount(arr), startcount) + + # Test non-broadcast setitem: + b = arr.copy() + b[index] = mimic_get + 1000 + if b.size == 0: + return # nothing to compare here... + if no_copy and indexed_arr.ndim != 0: + # change indexed_arr in-place to manipulate original: + indexed_arr += 1000 + assert_array_equal(arr, b) + return + # Use the fact that the array is originally an arange: + arr.flat[indexed_arr.ravel()] += 1000 + assert_array_equal(arr, b) + + def test_boolean(self): + a = np.array(5) + assert_equal(a[np.array(True)], 5) + a[np.array(True)] = 1 + assert_equal(a, 1) + # NOTE: This is different from normal broadcasting, as + # arr[boolean_array] works like in a multi index. Which means + # it is aligned to the left. This is probably correct for + # consistency with arr[boolean_array,] also no broadcasting + # is done at all + a = self._create_array() + self._check_multi_index( + a, (np.zeros_like(a, dtype=bool),)) + self._check_multi_index( + a, (np.zeros_like(a, dtype=bool)[..., 0],)) + self._check_multi_index( + a, (np.zeros_like(a, dtype=bool)[None, ...],)) + + def test_multidim(self): + # Automatically test combinations with complex indexes on 2nd (or 1st) + # spot and the simple ones in one other spot. + a = self._create_array() + b = np.empty((3, 0, 5, 6)) + complex_indices = self._create_complex_indices() + simple_indices = [Ellipsis, None, -1, [1], np.array([True]), 'skip'] + fill_indices = [slice(None, None), 0] + + with warnings.catch_warnings(): + # This is so that np.array(True) is not accepted in a full integer + # index, when running the file separately. + warnings.filterwarnings('error', '', DeprecationWarning) + warnings.filterwarnings('error', '', VisibleDeprecationWarning) + + def isskip(idx): + return isinstance(idx, str) and idx == "skip" + + for simple_pos in [0, 2, 3]: + tocheck = [fill_indices, complex_indices, + fill_indices, fill_indices] + tocheck[simple_pos] = simple_indices + for index in product(*tocheck): + index = tuple(i for i in index if not isskip(i)) + self._check_multi_index(a, index) + self._check_multi_index(b, index) + + # Check very simple item getting: + self._check_multi_index(a, (0, 0, 0, 0)) + self._check_multi_index(b, (0, 0, 0, 0)) + # Also check (simple cases of) too many indices: + assert_raises(IndexError, a.__getitem__, (0, 0, 0, 0, 0)) + assert_raises(IndexError, a.__setitem__, (0, 0, 0, 0, 0), 0) + assert_raises(IndexError, a.__getitem__, (0, 0, [1], 0, 0)) + assert_raises(IndexError, a.__setitem__, (0, 0, [1], 0, 0), 0) + + def test_1d(self): + a = np.arange(10) + complex_indices = self._create_complex_indices() + for index in complex_indices: + self._check_single_index(a, index) + + +class TestFloatNonIntegerArgument: + """ + These test that ``TypeError`` is raised when you try to use + non-integers as arguments to for indexing and slicing e.g. ``a[0.0:5]`` + and ``a[0.5]``, or other functions like ``array.reshape(1., -1)``. + + """ + def test_valid_indexing(self): + # These should raise no errors. + a = np.array([[[5]]]) + + a[np.array([0])] + a[[0, 0]] + a[:, [0, 0]] + a[:, 0, :] + a[:, :, :] + + def test_valid_slicing(self): + # These should raise no errors. + a = np.array([[[5]]]) + + a[::] + a[0:] + a[:2] + a[0:2] + a[::2] + a[1::2] + a[:2:2] + a[1:2:2] + + def test_non_integer_argument_errors(self): + a = np.array([[5]]) + + assert_raises(TypeError, np.reshape, a, (1., 1., -1)) + assert_raises(TypeError, np.reshape, a, (np.array(1.), -1)) + assert_raises(TypeError, np.take, a, [0], 1.) + assert_raises(TypeError, np.take, a, [0], np.float64(1.)) + + def test_non_integer_sequence_multiplication(self): + # NumPy scalar sequence multiply should not work with non-integers + def mult(a, b): + return a * b + + assert_raises(TypeError, mult, [1], np.float64(3)) + # following should be OK + mult([1], np.int_(3)) + + def test_reduce_axis_float_index(self): + d = np.zeros((3, 3, 3)) + assert_raises(TypeError, np.min, d, 0.5) + assert_raises(TypeError, np.min, d, (0.5, 1)) + assert_raises(TypeError, np.min, d, (1, 2.2)) + assert_raises(TypeError, np.min, d, (.2, 1.2)) + + +class TestBooleanIndexing: + # Using a boolean as integer argument/indexing is an error. + def test_bool_as_int_argument_errors(self): + a = np.array([[[1]]]) + + assert_raises(TypeError, np.reshape, a, (True, -1)) + assert_raises(TypeError, np.reshape, a, (np.bool(True), -1)) + # Note that operator.index(np.array(True)) does not work, a boolean + # array is thus also deprecated, but not with the same message: + assert_raises(TypeError, operator.index, np.array(True)) + assert_raises(TypeError, operator.index, np.True_) + assert_raises(TypeError, np.take, args=(a, [0], False)) + + def test_boolean_indexing_weirdness(self): + # Weird boolean indexing things + a = np.ones((2, 3, 4)) + assert a[False, True, ...].shape == (0, 2, 3, 4) + assert a[True, [0, 1], True, True, [1], [[2]]].shape == (1, 2) + assert_raises(IndexError, lambda: a[False, [0, 1], ...]) + + def test_boolean_indexing_fast_path(self): + # These used to either give the wrong error, or incorrectly give no + # error. + a = np.ones((3, 3)) + + # This used to incorrectly work (and give an array of shape (0,)) + idx1 = np.array([[False] * 9]) + assert_raises_regex(IndexError, + "boolean index did not match indexed array along axis 0; " + "size of axis is 3 but size of corresponding boolean axis is 1", + lambda: a[idx1]) + + # This used to incorrectly give a ValueError: operands could not be + # broadcast together + idx2 = np.array([[False] * 8 + [True]]) + assert_raises_regex(IndexError, + "boolean index did not match indexed array along axis 0; " + "size of axis is 3 but size of corresponding boolean axis is 1", + lambda: a[idx2]) + + # This is the same as it used to be. The above two should work like this. + idx3 = np.array([[False] * 10]) + assert_raises_regex(IndexError, + "boolean index did not match indexed array along axis 0; " + "size of axis is 3 but size of corresponding boolean axis is 1", + lambda: a[idx3]) + + # This used to give ValueError: non-broadcastable operand + a = np.ones((1, 1, 2)) + idx = np.array([[[True], [False]]]) + assert_raises_regex(IndexError, + "boolean index did not match indexed array along axis 1; " + "size of axis is 1 but size of corresponding boolean axis is 2", + lambda: a[idx]) + + +class TestArrayToIndexDeprecation: + """Creating an index from array not 0-D is an error. + + """ + def test_array_to_index_error(self): + # so no exception is expected. The raising is effectively tested above. + a = np.array([[[1]]]) + + assert_raises(TypeError, operator.index, np.array([1])) + assert_raises(TypeError, np.reshape, a, (a, -1)) + assert_raises(TypeError, np.take, a, [0], a) + + +class TestNonIntegerArrayLike: + """Tests that array_likes only valid if can safely cast to integer. + + For instance, lists give IndexError when they cannot be safely cast to + an integer. + + """ + def test_basic(self): + a = np.arange(10) + + assert_raises(IndexError, a.__getitem__, [0.5, 1.5]) + assert_raises(IndexError, a.__getitem__, (['1', '2'],)) + + # The following is valid + a.__getitem__([]) + + +class TestMultipleEllipsisError: + """An index can only have a single ellipsis. + + """ + def test_basic(self): + a = np.arange(10) + assert_raises(IndexError, lambda: a[..., ...]) + assert_raises(IndexError, a.__getitem__, ((Ellipsis,) * 2,)) + assert_raises(IndexError, a.__getitem__, ((Ellipsis,) * 3,)) + + +class TestCApiAccess: + def test_getitem(self): + subscript = functools.partial(array_indexing, 0) + + # 0-d arrays don't work: + assert_raises(IndexError, subscript, np.ones(()), 0) + # Out of bound values: + assert_raises(IndexError, subscript, np.ones(10), 11) + assert_raises(IndexError, subscript, np.ones(10), -11) + assert_raises(IndexError, subscript, np.ones((10, 10)), 11) + assert_raises(IndexError, subscript, np.ones((10, 10)), -11) + + a = np.arange(10) + assert_array_equal(a[4], subscript(a, 4)) + a = a.reshape(5, 2) + assert_array_equal(a[-4], subscript(a, -4)) + + def test_setitem(self): + assign = functools.partial(array_indexing, 1) + + # Deletion is impossible: + assert_raises(ValueError, assign, np.ones(10), 0) + # 0-d arrays don't work: + assert_raises(IndexError, assign, np.ones(()), 0, 0) + # Out of bound values: + assert_raises(IndexError, assign, np.ones(10), 11, 0) + assert_raises(IndexError, assign, np.ones(10), -11, 0) + assert_raises(IndexError, assign, np.ones((10, 10)), 11, 0) + assert_raises(IndexError, assign, np.ones((10, 10)), -11, 0) + + a = np.arange(10) + assign(a, 4, 10) + assert_(a[4] == 10) + + a = a.reshape(5, 2) + assign(a, 4, 10) + assert_array_equal(a[-1], [10, 10]) + + +class TestFlatiterIndexing: + def test_flatiter_indexing_single_integer(self): + a = np.arange(9).reshape((3, 3)) + assert_array_equal(a.flat[0], 0) + assert_array_equal(a.flat[4], 4) + assert_array_equal(a.flat[-1], 8) + + with pytest.raises(IndexError, match="index 9 is out of bounds"): + a.flat[9] + + def test_flatiter_indexing_slice(self): + a = np.arange(9).reshape((3, 3)) + assert_array_equal(a.flat[:], np.arange(9)) + assert_array_equal(a.flat[:5], np.arange(5)) + assert_array_equal(a.flat[5:10], np.arange(5, 9)) + assert_array_equal(a.flat[::2], np.arange(0, 9, 2)) + assert_array_equal(a.flat[::-1], np.arange(8, -1, -1)) + assert_array_equal(a.flat[10:5], np.array([])) + + assert_array_equal(a.flat[()], np.arange(9)) + assert_array_equal(a.flat[...], np.arange(9)) + + def test_flatiter_indexing_boolean(self): + a = np.arange(9).reshape((3, 3)) + + with pytest.warns(DeprecationWarning, match="0-dimensional boolean index"): + assert_array_equal(a.flat[True], 0) + with pytest.warns(DeprecationWarning, match="0-dimensional boolean index"): + assert_array_equal(a.flat[False], np.array([])) + + mask = np.zeros(len(a.flat), dtype=bool) + mask[::2] = True + assert_array_equal(a.flat[mask], np.arange(0, 9, 2)) + + wrong_mask = np.zeros(len(a.flat) + 1, dtype=bool) + with pytest.raises(IndexError, + match="boolean index did not match indexed flat iterator"): + a.flat[wrong_mask] + + def test_flatiter_indexing_fancy(self): + a = np.arange(9).reshape((3, 3)) + + indices = np.array([1, 3, 5]) + assert_array_equal(a.flat[indices], indices) + + assert_array_equal(a.flat[[-1, -2]], np.array([8, 7])) + + indices_2d = np.array([[1, 2], [3, 4]]) + assert_array_equal(a.flat[indices_2d], indices_2d) + + assert_array_equal(a.flat[[True, 1]], np.array([1, 1])) + + assert_array_equal(a.flat[[]], np.array([], dtype=a.dtype)) + + with pytest.raises(IndexError, + match="boolean indices for iterators are not supported"): + a.flat[[True, True]] + + a = np.arange(3) + with pytest.raises(IndexError, + match="boolean indices for iterators are not supported"): + a.flat[[True, False, True]] + assert_array_equal(a.flat[np.asarray([True, False, True])], np.array([0, 2])) + + def test_flatiter_indexing_not_supported_newaxis_mutlidimensional_float(self): + a = np.arange(9).reshape((3, 3)) + with pytest.raises(IndexError, + match=r"only integers, slices \(`:`\), " + r"ellipsis \(`\.\.\.`\) and " + r"integer or boolean arrays are valid indices"): + a.flat[None] + + with pytest.raises(IndexError, + match=r"too many indices for flat iterator: flat iterator " + r"is 1-dimensional, but 2 were indexed"): + a.flat[1, 2] + + with pytest.warns(DeprecationWarning, + match="Invalid non-array indices for iterator objects are " + "deprecated"): + assert_array_equal(a.flat[[1.0, 2.0]], np.array([1, 2])) + + def test_flatiter_assign_single_integer(self): + a = np.arange(9).reshape((3, 3)) + + a.flat[0] = 10 + assert_array_equal(a, np.array([[10, 1, 2], [3, 4, 5], [6, 7, 8]])) + + a.flat[4] = 20 + assert_array_equal(a, np.array([[10, 1, 2], [3, 20, 5], [6, 7, 8]])) + + a.flat[-1] = 30 + assert_array_equal(a, np.array([[10, 1, 2], [3, 20, 5], [6, 7, 30]])) + + with pytest.raises(IndexError, match="index 9 is out of bounds"): + a.flat[9] = 40 + + def test_flatiter_indexing_slice_assign(self): + a = np.arange(9).reshape((3, 3)) + a.flat[:] = 10 + assert_array_equal(a, np.full((3, 3), 10)) + + a = np.arange(9).reshape((3, 3)) + a.flat[:5] = 20 + assert_array_equal(a, np.array([[20, 20, 20], [20, 20, 5], [6, 7, 8]])) + + a = np.arange(9).reshape((3, 3)) + a.flat[5:10] = 30 + assert_array_equal(a, np.array([[0, 1, 2], [3, 4, 30], [30, 30, 30]])) + + a = np.arange(9).reshape((3, 3)) + a.flat[::2] = 40 + assert_array_equal(a, np.array([[40, 1, 40], [3, 40, 5], [40, 7, 40]])) + + a = np.arange(9).reshape((3, 3)) + a.flat[::-1] = 50 + assert_array_equal(a, np.full((3, 3), 50)) + + a = np.arange(9).reshape((3, 3)) + a.flat[10:5] = 60 + assert_array_equal(a, np.arange(9).reshape((3, 3))) + + a = np.arange(9).reshape((3, 3)) + with pytest.raises(IndexError, + match="Assigning to a flat iterator with a 0-D index"): + a.flat[()] = 70 + + a = np.arange(9).reshape((3, 3)) + a.flat[...] = 80 + assert_array_equal(a, np.full((3, 3), 80)) + + def test_flatiter_indexing_boolean_assign(self): + a = np.arange(9).reshape((3, 3)) + with pytest.warns(DeprecationWarning, match="0-dimensional boolean index"): + a.flat[True] = 10 + assert_array_equal(a, np.array([[10, 1, 2], [3, 4, 5], [6, 7, 8]])) + + a = np.arange(9).reshape((3, 3)) + with pytest.warns(DeprecationWarning, match="0-dimensional boolean index"): + a.flat[False] = 20 + assert_array_equal(a, np.arange(9).reshape((3, 3))) + + a = np.arange(9).reshape((3, 3)) + mask = np.zeros(len(a.flat), dtype=bool) + mask[::2] = True + a.flat[mask] = 30 + assert_array_equal(a, np.array([[30, 1, 30], [3, 30, 5], [30, 7, 30]])) + + wrong_mask = np.zeros(len(a.flat) + 1, dtype=bool) + with pytest.raises(IndexError, + match="boolean index did not match indexed flat iterator"): + a.flat[wrong_mask] = 40 + + def test_flatiter_indexing_fancy_assign(self): + a = np.arange(9).reshape((3, 3)) + indices = np.array([1, 3, 5]) + a.flat[indices] = 10 + assert_array_equal(a, np.array([[0, 10, 2], [10, 4, 10], [6, 7, 8]])) + + a.flat[[-1, -2]] = 20 + assert_array_equal(a, np.array([[0, 10, 2], [10, 4, 10], [6, 20, 20]])) + + a = np.arange(9).reshape((3, 3)) + indices_2d = np.array([[1, 2], [3, 4]]) + a.flat[indices_2d] = 30 + assert_array_equal(a, np.array([[0, 30, 30], [30, 30, 5], [6, 7, 8]])) + + a.flat[[True, 1]] = 40 + assert_array_equal(a, np.array([[0, 40, 30], [30, 30, 5], [6, 7, 8]])) + + with pytest.raises(IndexError, + match="boolean indices for iterators are not supported"): + a.flat[[True, True]] = 50 + + a = np.arange(3) + with pytest.raises(IndexError, + match="boolean indices for iterators are not supported"): + a.flat[[True, False, True]] = 20 + a.flat[np.asarray([True, False, True])] = 20 + assert_array_equal(a, np.array([20, 1, 20])) + + def test_flatiter_indexing_fancy_int16_dtype(self): + a = np.arange(9).reshape((3, 3)) + indices = np.array([1, 3, 5], dtype=np.int16) + assert_array_equal(a.flat[indices], np.array([1, 3, 5])) + + a.flat[indices] = 10 + assert_array_equal(a, np.array([[0, 10, 2], [10, 4, 10], [6, 7, 8]])) + + def test_flatiter_indexing_not_supported_newaxis_mutlid_float_assign(self): + a = np.arange(9).reshape((3, 3)) + with pytest.raises(IndexError, + match=r"only integers, slices \(`:`\), " + r"ellipsis \(`\.\.\.`\) and " + r"integer or boolean arrays are valid indices"): + a.flat[None] = 10 + + a.flat[[1, 2]] = 10 + assert_array_equal(a, np.array([[0, 10, 10], [3, 4, 5], [6, 7, 8]])) + + with pytest.warns(DeprecationWarning, + match="Invalid non-array indices for iterator objects are " + "deprecated"): + a.flat[[1.0, 2.0]] = 20 + assert_array_equal(a, np.array([[0, 20, 20], [3, 4, 5], [6, 7, 8]])) + + def test_flat_index_on_flatiter(self): + a = np.arange(9).reshape((3, 3)) + b = np.array([0, 5, 6]) + assert_equal(a.flat[b.flat], np.array([0, 5, 6])) + + def test_empty_string_flat_index_on_flatiter(self): + a = np.arange(9).reshape((3, 3)) + b = np.array([], dtype="S") + # This is arguably incorrect, and should be removed (ideally with + # deprecation). But it matches the array path and comes from not + # distinguishing `arr[np.array([]).flat]` and `arr[[]]` and the latter + # must pass. + assert_equal(a.flat[b.flat], np.array([])) + + def test_nonempty_string_flat_index_on_flatiter(self): + a = np.arange(9).reshape((3, 3)) + b = np.array(["a"], dtype="S") + with pytest.raises(IndexError, + match=r"only integers, slices \(`:`\), ellipsis \(`\.\.\.`\) " + r"and integer or boolean arrays are valid indices"): + a.flat[b.flat] + + +@pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") +@pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") +@pytest.mark.parametrize("methodname", ["__array__", "copy"]) +def test_flatiter_method_signatures(methodname: str): + method = getattr(np.flatiter, methodname) + assert callable(method) + + try: + sig = inspect.signature(method) + except ValueError as e: + pytest.fail(f"Could not get signature for np.flatiter.{methodname}: {e}") + + assert "self" in sig.parameters + assert sig.parameters["self"].kind is inspect.Parameter.POSITIONAL_ONLY diff --git a/local_packages/numpy/_core/tests/test_item_selection.py b/local_packages/numpy/_core/tests/test_item_selection.py new file mode 100644 index 0000000..79fb82d --- /dev/null +++ b/local_packages/numpy/_core/tests/test_item_selection.py @@ -0,0 +1,167 @@ +import sys + +import pytest + +import numpy as np +from numpy.testing import HAS_REFCOUNT, assert_, assert_array_equal, assert_raises + + +class TestTake: + def test_simple(self): + a = [[1, 2], [3, 4]] + a_str = [[b'1', b'2'], [b'3', b'4']] + modes = ['raise', 'wrap', 'clip'] + indices = [-1, 4] + index_arrays = [np.empty(0, dtype=np.intp), + np.empty((), dtype=np.intp), + np.empty((1, 1), dtype=np.intp)] + real_indices = {'raise': {-1: 1, 4: IndexError}, + 'wrap': {-1: 1, 4: 0}, + 'clip': {-1: 0, 4: 1}} + # Currently all types but object, use the same function generation. + # So it should not be necessary to test all. However test also a non + # refcounted struct on top of object, which has a size that hits the + # default (non-specialized) path. + types = int, object, np.dtype([('', 'i2', 3)]) + for t in types: + # ta works, even if the array may be odd if buffer interface is used + ta = np.array(a if np.issubdtype(t, np.number) else a_str, dtype=t) + tresult = list(ta.T.copy()) + for index_array in index_arrays: + if index_array.size != 0: + tresult[0].shape = (2,) + index_array.shape + tresult[1].shape = (2,) + index_array.shape + for mode in modes: + for index in indices: + real_index = real_indices[mode][index] + if real_index is IndexError and index_array.size != 0: + index_array.put(0, index) + assert_raises(IndexError, ta.take, index_array, + mode=mode, axis=1) + elif index_array.size != 0: + index_array.put(0, index) + res = ta.take(index_array, mode=mode, axis=1) + assert_array_equal(res, tresult[real_index]) + else: + res = ta.take(index_array, mode=mode, axis=1) + assert_(res.shape == (2,) + index_array.shape) + + def test_refcounting(self): + objects = [object() for i in range(10)] + if HAS_REFCOUNT: + orig_rcs = [sys.getrefcount(o) for o in objects] + for mode in ('raise', 'clip', 'wrap'): + a = np.array(objects) + b = np.array([2, 2, 4, 5, 3, 5]) + a.take(b, out=a[:6], mode=mode) + del a + if HAS_REFCOUNT: + assert_(all(sys.getrefcount(o) == rc + 1 + for o, rc in zip(objects, orig_rcs))) + # not contiguous, example: + a = np.array(objects * 2)[::2] + a.take(b, out=a[:6], mode=mode) + del a + if HAS_REFCOUNT: + assert_(all(sys.getrefcount(o) == rc + 1 + for o, rc in zip(objects, orig_rcs))) + + def test_unicode_mode(self): + d = np.arange(10) + k = b'\xc3\xa4'.decode("UTF8") + assert_raises(ValueError, d.take, 5, mode=k) + + def test_empty_partition(self): + # In reference to github issue #6530 + a_original = np.array([0, 2, 4, 6, 8, 10]) + a = a_original.copy() + + # An empty partition should be a successful no-op + a.partition(np.array([], dtype=np.int16)) + + assert_array_equal(a, a_original) + + def test_empty_argpartition(self): + # In reference to github issue #6530 + a = np.array([0, 2, 4, 6, 8, 10]) + a = a.argpartition(np.array([], dtype=np.int16)) + + b = np.array([0, 1, 2, 3, 4, 5]) + assert_array_equal(a, b) + + +class TestPutMask: + @pytest.mark.parametrize("dtype", list(np.typecodes["All"]) + ["i,O"]) + def test_simple(self, dtype): + if dtype.lower() == "m": + dtype += "8[ns]" + + # putmask is weird and doesn't care about value length (even shorter) + vals = np.arange(1001).astype(dtype=dtype) + + mask = np.random.randint(2, size=1000).astype(bool) + # Use vals.dtype in case of flexible dtype (i.e. string) + arr = np.zeros(1000, dtype=vals.dtype) + zeros = arr.copy() + + np.putmask(arr, mask, vals) + assert_array_equal(arr[mask], vals[:len(mask)][mask]) + assert_array_equal(arr[~mask], zeros[~mask]) + + @pytest.mark.parametrize("dtype", list(np.typecodes["All"])[1:] + ["i,O"]) + @pytest.mark.parametrize("mode", ["raise", "wrap", "clip"]) + def test_empty(self, dtype, mode): + arr = np.zeros(1000, dtype=dtype) + arr_copy = arr.copy() + mask = np.random.randint(2, size=1000).astype(bool) + + # Allowing empty values like this is weird... + np.put(arr, mask, []) + assert_array_equal(arr, arr_copy) + + +class TestPut: + @pytest.mark.parametrize("dtype", list(np.typecodes["All"])[1:] + ["i,O"]) + @pytest.mark.parametrize("mode", ["raise", "wrap", "clip"]) + def test_simple(self, dtype, mode): + if dtype.lower() == "m": + dtype += "8[ns]" + + # put is weird and doesn't care about value length (even shorter) + vals = np.arange(1001).astype(dtype=dtype) + + # Use vals.dtype in case of flexible dtype (i.e. string) + arr = np.zeros(1000, dtype=vals.dtype) + zeros = arr.copy() + + if mode == "clip": + # Special because 0 and -1 value are "reserved" for clip test + indx = np.random.permutation(len(arr) - 2)[:-500] + 1 + + indx[-1] = 0 + indx[-2] = len(arr) - 1 + indx_put = indx.copy() + indx_put[-1] = -1389 + indx_put[-2] = 1321 + else: + # Avoid duplicates (for simplicity) and fill half only + indx = np.random.permutation(len(arr) - 3)[:-500] + indx_put = indx + if mode == "wrap": + indx_put = indx_put + len(arr) + + np.put(arr, indx_put, vals, mode=mode) + assert_array_equal(arr[indx], vals[:len(indx)]) + untouched = np.ones(len(arr), dtype=bool) + untouched[indx] = False + assert_array_equal(arr[untouched], zeros[:untouched.sum()]) + + @pytest.mark.parametrize("dtype", list(np.typecodes["All"])[1:] + ["i,O"]) + @pytest.mark.parametrize("mode", ["raise", "wrap", "clip"]) + def test_empty(self, dtype, mode): + arr = np.zeros(1000, dtype=dtype) + arr_copy = arr.copy() + + # Allowing empty values like this is weird... + np.put(arr, [1, 2, 3], []) + assert_array_equal(arr, arr_copy) diff --git a/local_packages/numpy/_core/tests/test_limited_api.py b/local_packages/numpy/_core/tests/test_limited_api.py new file mode 100644 index 0000000..984210e --- /dev/null +++ b/local_packages/numpy/_core/tests/test_limited_api.py @@ -0,0 +1,102 @@ +import os +import subprocess +import sys +import sysconfig + +import pytest + +from numpy.testing import IS_EDITABLE, IS_PYPY, IS_WASM, NOGIL_BUILD + +# This import is copied from random.tests.test_extending +try: + import cython + from Cython.Compiler.Version import version as cython_version +except ImportError: + cython = None +else: + from numpy._utils import _pep440 + + # Note: keep in sync with the one in pyproject.toml + required_version = "3.0.6" + if _pep440.parse(cython_version) < _pep440.Version(required_version): + # too old or wrong cython, skip the test + cython = None + +pytestmark = pytest.mark.skipif(cython is None, reason="requires cython") + + +if IS_EDITABLE: + pytest.skip( + "Editable install doesn't support tests with a compile step", + allow_module_level=True + ) + + +@pytest.fixture(scope='module') +def install_temp(tmpdir_factory): + # Based in part on test_cython from random.tests.test_extending + if IS_WASM: + pytest.skip("No subprocess") + + srcdir = os.path.join(os.path.dirname(__file__), 'examples', 'limited_api') + build_dir = tmpdir_factory.mktemp("limited_api") / "build" + os.makedirs(build_dir, exist_ok=True) + # Ensure we use the correct Python interpreter even when `meson` is + # installed in a different Python environment (see gh-24956) + native_file = str(build_dir / 'interpreter-native-file.ini') + with open(native_file, 'w') as f: + f.write("[binaries]\n") + f.write(f"python = '{sys.executable}'\n") + f.write(f"python3 = '{sys.executable}'") + + try: + subprocess.check_call(["meson", "--version"]) + except FileNotFoundError: + pytest.skip("No usable 'meson' found") + if sysconfig.get_platform() == "win-arm64": + pytest.skip("Meson unable to find MSVC linker on win-arm64") + if sys.platform == "win32": + subprocess.check_call(["meson", "setup", + "--werror", + "--buildtype=release", + "--vsenv", "--native-file", native_file, + str(srcdir)], + cwd=build_dir, + ) + else: + subprocess.check_call(["meson", "setup", "--werror", + "--native-file", native_file, str(srcdir)], + cwd=build_dir + ) + try: + subprocess.check_call( + ["meson", "compile", "-vv"], cwd=build_dir) + except subprocess.CalledProcessError as p: + print(f"{p.stdout=}") + print(f"{p.stderr=}") + raise + + sys.path.append(str(build_dir)) + + +@pytest.mark.skipif(IS_WASM, reason="Can't start subprocess") +@pytest.mark.xfail( + sysconfig.get_config_var("Py_DEBUG"), + reason=( + "Py_LIMITED_API is incompatible with Py_DEBUG, Py_TRACE_REFS, " + "and Py_REF_DEBUG" + ), +) +@pytest.mark.xfail( + NOGIL_BUILD, + reason="Py_GIL_DISABLED builds do not currently support the limited API", +) +@pytest.mark.skipif(IS_PYPY, reason="no support for limited API in PyPy") +def test_limited_api(install_temp): + """Test building a third-party C extension with the limited API + and building a cython extension with the limited API + """ + + import limited_api1 # Earliest (3.6) # noqa: F401 + import limited_api2 # cython # noqa: F401 + import limited_api_latest # Latest version (current Python) # noqa: F401 diff --git a/local_packages/numpy/_core/tests/test_longdouble.py b/local_packages/numpy/_core/tests/test_longdouble.py new file mode 100644 index 0000000..a7aa914 --- /dev/null +++ b/local_packages/numpy/_core/tests/test_longdouble.py @@ -0,0 +1,370 @@ +import platform +import warnings + +import pytest + +import numpy as np +from numpy._core.tests._locales import CommaDecimalPointLocale +from numpy.testing import ( + IS_MUSL, + assert_, + assert_array_equal, + assert_equal, + assert_raises, + temppath, +) + +LD_INFO = np.finfo(np.longdouble) +longdouble_longer_than_double = (LD_INFO.eps < np.finfo(np.double).eps) + + +_o = 1 + LD_INFO.eps +string_to_longdouble_inaccurate = (_o != np.longdouble(str(_o))) +del _o + + +def test_scalar_extraction(): + """Confirm that extracting a value doesn't convert to python float""" + o = 1 + LD_INFO.eps + a = np.array([o, o, o]) + assert_equal(a[1], o) + + +# Conversions string -> long double + +# 0.1 not exactly representable in base 2 floating point. +repr_precision = len(repr(np.longdouble(0.1))) +# +2 from macro block starting around line 842 in scalartypes.c.src. + + +@pytest.mark.skipif(IS_MUSL, + reason="test flaky on musllinux") +@pytest.mark.skipif(LD_INFO.precision + 2 >= repr_precision, + reason="repr precision not enough to show eps") +def test_str_roundtrip(): + # We will only see eps in repr if within printing precision. + o = 1 + LD_INFO.eps + assert_equal(np.longdouble(str(o)), o, f"str was {str(o)}") + + +@pytest.mark.skipif(string_to_longdouble_inaccurate, reason="Need strtold_l") +def test_str_roundtrip_bytes(): + o = 1 + LD_INFO.eps + assert_equal(np.longdouble(str(o).encode("ascii")), o) + + +@pytest.mark.skipif(string_to_longdouble_inaccurate, reason="Need strtold_l") +@pytest.mark.parametrize("strtype", (np.str_, np.bytes_, str, bytes)) +def test_array_and_stringlike_roundtrip(strtype): + """ + Test that string representations of long-double roundtrip both + for array casting and scalar coercion, see also gh-15608. + """ + o = 1 + LD_INFO.eps + + if strtype in (np.bytes_, bytes): + o_str = strtype(str(o).encode("ascii")) + else: + o_str = strtype(str(o)) + + # Test that `o` is correctly coerced from the string-like + assert o == np.longdouble(o_str) + + # Test that arrays also roundtrip correctly: + o_strarr = np.asarray([o] * 3, dtype=strtype) + assert (o == o_strarr.astype(np.longdouble)).all() + + # And array coercion and casting to string give the same as scalar repr: + assert (o_strarr == o_str).all() + assert (np.asarray([o] * 3).astype(strtype) == o_str).all() + + +def test_bogus_string(): + assert_raises(ValueError, np.longdouble, "spam") + assert_raises(ValueError, np.longdouble, "1.0 flub") + + +@pytest.mark.skipif(string_to_longdouble_inaccurate, reason="Need strtold_l") +def test_fromstring(): + o = 1 + LD_INFO.eps + s = (" " + str(o)) * 5 + a = np.array([o] * 5) + assert_equal(np.fromstring(s, sep=" ", dtype=np.longdouble), a, + err_msg=f"reading '{s}'") + + +def test_fromstring_complex(): + for ctype in ["complex", "cdouble"]: + # Check spacing between separator + assert_equal(np.fromstring("1, 2 , 3 ,4", sep=",", dtype=ctype), + np.array([1., 2., 3., 4.])) + # Real component not specified + assert_equal(np.fromstring("1j, -2j, 3j, 4e1j", sep=",", dtype=ctype), + np.array([1.j, -2.j, 3.j, 40.j])) + # Both components specified + assert_equal(np.fromstring("1+1j,2-2j, -3+3j, -4e1+4j", sep=",", dtype=ctype), + np.array([1. + 1.j, 2. - 2.j, - 3. + 3.j, - 40. + 4j])) + # Spaces at wrong places + with assert_raises(ValueError): + np.fromstring("1+2 j,3", dtype=ctype, sep=",") + with assert_raises(ValueError): + np.fromstring("1+ 2j,3", dtype=ctype, sep=",") + with assert_raises(ValueError): + np.fromstring("1 +2j,3", dtype=ctype, sep=",") + with assert_raises(ValueError): + np.fromstring("1+j", dtype=ctype, sep=",") + with assert_raises(ValueError): + np.fromstring("1+", dtype=ctype, sep=",") + with assert_raises(ValueError): + np.fromstring("1j+1", dtype=ctype, sep=",") + + +def test_fromstring_bogus(): + with assert_raises(ValueError): + np.fromstring("1. 2. 3. flop 4.", dtype=float, sep=" ") + + +def test_fromstring_empty(): + with assert_raises(ValueError): + np.fromstring("xxxxx", sep="x") + + +def test_fromstring_missing(): + with assert_raises(ValueError): + np.fromstring("1xx3x4x5x6", sep="x") + + +class TestFileBased: + + ldbl = 1 + LD_INFO.eps + tgt = np.array([ldbl] * 5) + out = ''.join([str(t) + '\n' for t in tgt]) + + def test_fromfile_bogus(self): + with temppath() as path: + with open(path, 'w') as f: + f.write("1. 2. 3. flop 4.\n") + + with assert_raises(ValueError): + np.fromfile(path, dtype=float, sep=" ") + + def test_fromfile_complex(self): + for ctype in ["complex", "cdouble"]: + # Check spacing between separator and only real component specified + with temppath() as path: + with open(path, 'w') as f: + f.write("1, 2 , 3 ,4\n") + + res = np.fromfile(path, dtype=ctype, sep=",") + assert_equal(res, np.array([1., 2., 3., 4.])) + + # Real component not specified + with temppath() as path: + with open(path, 'w') as f: + f.write("1j, -2j, 3j, 4e1j\n") + + res = np.fromfile(path, dtype=ctype, sep=",") + assert_equal(res, np.array([1.j, -2.j, 3.j, 40.j])) + + # Both components specified + with temppath() as path: + with open(path, 'w') as f: + f.write("1+1j,2-2j, -3+3j, -4e1+4j\n") + + res = np.fromfile(path, dtype=ctype, sep=",") + assert_equal(res, np.array([1. + 1.j, 2. - 2.j, - 3. + 3.j, - 40. + 4j])) + + # Spaces at wrong places + with temppath() as path: + with open(path, 'w') as f: + f.write("1+2 j,3\n") + + with assert_raises(ValueError): + np.fromfile(path, dtype=ctype, sep=",") + + # Spaces at wrong places + with temppath() as path: + with open(path, 'w') as f: + f.write("1+ 2j,3\n") + + with assert_raises(ValueError): + np.fromfile(path, dtype=ctype, sep=",") + + # Spaces at wrong places + with temppath() as path: + with open(path, 'w') as f: + f.write("1 +2j,3\n") + + with assert_raises(ValueError): + np.fromfile(path, dtype=ctype, sep=",") + + # Wrong sep + with temppath() as path: + with open(path, 'w') as f: + f.write("1+j\n") + + with assert_raises(ValueError): + np.fromfile(path, dtype=ctype, sep=",") + + # Wrong sep + with temppath() as path: + with open(path, 'w') as f: + f.write("1+\n") + + with assert_raises(ValueError): + np.fromfile(path, dtype=ctype, sep=",") + + # Wrong sep + with temppath() as path: + with open(path, 'w') as f: + f.write("1j+1\n") + + with assert_raises(ValueError): + np.fromfile(path, dtype=ctype, sep=",") + + @pytest.mark.skipif(string_to_longdouble_inaccurate, + reason="Need strtold_l") + def test_fromfile(self): + with temppath() as path: + with open(path, 'w') as f: + f.write(self.out) + res = np.fromfile(path, dtype=np.longdouble, sep="\n") + assert_equal(res, self.tgt) + + @pytest.mark.skipif(string_to_longdouble_inaccurate, + reason="Need strtold_l") + def test_genfromtxt(self): + with temppath() as path: + with open(path, 'w') as f: + f.write(self.out) + res = np.genfromtxt(path, dtype=np.longdouble) + assert_equal(res, self.tgt) + + @pytest.mark.skipif(string_to_longdouble_inaccurate, + reason="Need strtold_l") + def test_loadtxt(self): + with temppath() as path: + with open(path, 'w') as f: + f.write(self.out) + res = np.loadtxt(path, dtype=np.longdouble) + assert_equal(res, self.tgt) + + @pytest.mark.skipif(string_to_longdouble_inaccurate, + reason="Need strtold_l") + def test_tofile_roundtrip(self): + with temppath() as path: + self.tgt.tofile(path, sep=" ") + res = np.fromfile(path, dtype=np.longdouble, sep=" ") + assert_equal(res, self.tgt) + + +# Conversions long double -> string + + +def test_str_exact(): + o = 1 + LD_INFO.eps + assert_(str(o) != '1') + + +@pytest.mark.skipif(longdouble_longer_than_double, reason="BUG #2376") +@pytest.mark.skipif(string_to_longdouble_inaccurate, + reason="Need strtold_l") +def test_format(): + assert_(f"{1 + LD_INFO.eps:.40g}" != '1') + + +@pytest.mark.skipif(longdouble_longer_than_double, reason="BUG #2376") +@pytest.mark.skipif(string_to_longdouble_inaccurate, + reason="Need strtold_l") +def test_percent(): + o = 1 + LD_INFO.eps + assert_(f"{o:.40g}" != '1') + + +@pytest.mark.skipif(longdouble_longer_than_double, + reason="array repr problem") +@pytest.mark.skipif(string_to_longdouble_inaccurate, + reason="Need strtold_l") +def test_array_repr(): + o = 1 + LD_INFO.eps + a = np.array([o]) + b = np.array([1], dtype=np.longdouble) + if not np.all(a != b): + raise ValueError("precision loss creating arrays") + with np.printoptions(precision=LD_INFO.precision + 1): + assert_(repr(a) != repr(b)) + +# +# Locale tests: scalar types formatting should be independent of the locale +# + +class TestCommaDecimalPointLocale(CommaDecimalPointLocale): + + def test_str_roundtrip_foreign(self): + o = 1.5 + assert_equal(o, np.longdouble(str(o))) + + def test_fromstring_foreign_repr(self): + f = 1.234 + a = np.fromstring(repr(f), dtype=float, sep=" ") + assert_equal(a[0], f) + + def test_fromstring_foreign(self): + s = "1.234" + a = np.fromstring(s, dtype=np.longdouble, sep=" ") + assert_equal(a[0], np.longdouble(s)) + + def test_fromstring_foreign_sep(self): + a = np.array([1, 2, 3, 4]) + b = np.fromstring("1,2,3,4,", dtype=np.longdouble, sep=",") + assert_array_equal(a, b) + + def test_fromstring_foreign_value(self): + with assert_raises(ValueError): + np.fromstring("1,234", dtype=np.longdouble, sep=" ") + + +@pytest.mark.parametrize("int_val", [ + # cases discussed in gh-10723 + # and gh-9968 + 2 ** 1024, 0]) +def test_longdouble_from_int(int_val): + # for issue gh-9968 + str_val = str(int_val) + # we'll expect a RuntimeWarning on platforms + # with np.longdouble equivalent to np.double + # for large integer input + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', RuntimeWarning) + # can be inf==inf on some platforms + assert np.longdouble(int_val) == np.longdouble(str_val) + # we can't directly compare the int and + # max longdouble value on all platforms + if np.allclose(np.finfo(np.longdouble).max, + np.finfo(np.double).max) and w: + assert w[0].category is RuntimeWarning + +@pytest.mark.parametrize("bool_val", [ + True, False]) +def test_longdouble_from_bool(bool_val): + assert np.longdouble(bool_val) == np.longdouble(int(bool_val)) + + +@pytest.mark.skipif( + not (IS_MUSL and platform.machine() == "x86_64"), + reason="only need to run on musllinux_x86_64" +) +def test_musllinux_x86_64_signature(): + # this test may fail if you're emulating musllinux_x86_64 on a different + # architecture, but should pass natively. + known_sigs = [b'\xcd\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xfb\xbf'] + sig = (np.longdouble(-1.0) / np.longdouble(10.0)) + sig = sig.view(sig.dtype.newbyteorder('<')).tobytes()[:10] + assert sig in known_sigs + + +def test_eps_positive(): + # np.finfo('g').eps should be positive on all platforms. If this isn't true + # then something may have gone wrong with the MachArLike, e.g. if + # np._core.getlimits._discovered_machar didn't work properly + assert np.finfo(np.longdouble).eps > 0. diff --git a/local_packages/numpy/_core/tests/test_mem_overlap.py b/local_packages/numpy/_core/tests/test_mem_overlap.py new file mode 100644 index 0000000..240ea62 --- /dev/null +++ b/local_packages/numpy/_core/tests/test_mem_overlap.py @@ -0,0 +1,933 @@ +import itertools + +import pytest + +import numpy as np +from numpy._core import _umath_tests +from numpy._core._multiarray_tests import internal_overlap, solve_diophantine +from numpy.lib.stride_tricks import as_strided +from numpy.testing import assert_, assert_array_equal, assert_equal, assert_raises + +ndims = 2 +size = 10 +shape = tuple([size] * ndims) + +MAY_SHARE_BOUNDS = 0 +MAY_SHARE_EXACT = -1 + + +def _indices_for_nelems(nelems): + """Returns slices of length nelems, from start onwards, in direction sign.""" + + if nelems == 0: + return [size // 2] # int index + + res = [] + for step in (1, 2): + for sign in (-1, 1): + start = size // 2 - nelems * step * sign // 2 + stop = start + nelems * step * sign + res.append(slice(start, stop, step * sign)) + + return res + + +def _indices_for_axis(): + """Returns (src, dst) pairs of indices.""" + + res = [] + for nelems in (0, 2, 3): + ind = _indices_for_nelems(nelems) + res.extend(itertools.product(ind, ind)) # all assignments of size "nelems" + + return res + + +def _indices(ndims): + """Returns ((axis0_src, axis0_dst), (axis1_src, axis1_dst), ... ) index pairs.""" + + ind = _indices_for_axis() + return itertools.product(ind, repeat=ndims) + + +def _check_assignment(srcidx, dstidx): + """Check assignment arr[dstidx] = arr[srcidx] works.""" + + arr = np.arange(np.prod(shape)).reshape(shape) + + cpy = arr.copy() + + cpy[dstidx] = arr[srcidx] + arr[dstidx] = arr[srcidx] + + assert_(np.all(arr == cpy), + f'assigning arr[{dstidx}] = arr[{srcidx}]') + + +def test_overlapping_assignments(): + # Test automatically generated assignments which overlap in memory. + + inds = _indices(ndims) + + for ind in inds: + srcidx = tuple(a[0] for a in ind) + dstidx = tuple(a[1] for a in ind) + + _check_assignment(srcidx, dstidx) + + +@pytest.mark.slow +def test_diophantine_fuzz(): + # Fuzz test the diophantine solver + rng = np.random.RandomState(1234) + + max_int = np.iinfo(np.intp).max + + for ndim in range(10): + feasible_count = 0 + infeasible_count = 0 + + min_count = 500 // (ndim + 1) + + while min(feasible_count, infeasible_count) < min_count: + # Ensure big and small integer problems + A_max = 1 + rng.randint(0, 11, dtype=np.intp)**6 + U_max = rng.randint(0, 11, dtype=np.intp)**6 + + A_max = min(max_int, A_max) + U_max = min(max_int - 1, U_max) + + A = tuple(int(rng.randint(1, A_max + 1, dtype=np.intp)) + for j in range(ndim)) + U = tuple(int(rng.randint(0, U_max + 2, dtype=np.intp)) + for j in range(ndim)) + + b_ub = min(max_int - 2, sum(a * ub for a, ub in zip(A, U))) + b = int(rng.randint(-1, b_ub + 2, dtype=np.intp)) + + if ndim == 0 and feasible_count < min_count: + b = 0 + + X = solve_diophantine(A, U, b) + + if X is None: + # Check the simplified decision problem agrees + X_simplified = solve_diophantine(A, U, b, simplify=1) + assert_(X_simplified is None, (A, U, b, X_simplified)) + + # Check no solution exists (provided the problem is + # small enough so that brute force checking doesn't + # take too long) + ranges = tuple(range(0, a * ub + 1, a) for a, ub in zip(A, U)) + + size = 1 + for r in ranges: + size *= len(r) + if size < 100000: + assert_(not any(sum(w) == b for w in itertools.product(*ranges))) + infeasible_count += 1 + else: + # Check the simplified decision problem agrees + X_simplified = solve_diophantine(A, U, b, simplify=1) + assert_(X_simplified is not None, (A, U, b, X_simplified)) + + # Check validity + assert_(sum(a * x for a, x in zip(A, X)) == b) + assert_(all(0 <= x <= ub for x, ub in zip(X, U))) + feasible_count += 1 + + +def test_diophantine_overflow(): + # Smoke test integer overflow detection + max_intp = np.iinfo(np.intp).max + max_int64 = np.iinfo(np.int64).max + + if max_int64 <= max_intp: + # Check that the algorithm works internally in 128-bit; + # solving this problem requires large intermediate numbers + A = (max_int64 // 2, max_int64 // 2 - 10) + U = (max_int64 // 2, max_int64 // 2 - 10) + b = 2 * (max_int64 // 2) - 10 + + assert_equal(solve_diophantine(A, U, b), (1, 1)) + + +def check_may_share_memory_exact(a, b): + got = np.may_share_memory(a, b, max_work=MAY_SHARE_EXACT) + + assert_equal(np.may_share_memory(a, b), + np.may_share_memory(a, b, max_work=MAY_SHARE_BOUNDS)) + + a.fill(0) + b.fill(0) + a.fill(1) + exact = b.any() + + err_msg = "" + if got != exact: + base_delta = a.__array_interface__['data'][0] - b.__array_interface__['data'][0] + err_msg = " " + "\n ".join([ + f"base_a - base_b = {base_delta!r}", + f"shape_a = {a.shape!r}", + f"shape_b = {b.shape!r}", + f"strides_a = {a.strides!r}", + f"strides_b = {b.strides!r}", + f"size_a = {a.size!r}", + f"size_b = {b.size!r}" + ]) + + assert_equal(got, exact, err_msg=err_msg) + + +def test_may_share_memory_manual(): + # Manual test cases for may_share_memory + + # Base arrays + xs0 = [ + np.zeros([13, 21, 23, 22], dtype=np.int8), + np.zeros([13, 21, 23 * 2, 22], dtype=np.int8)[:, :, ::2, :] + ] + + # Generate all negative stride combinations + xs = [] + for x in xs0: + for ss in itertools.product(*(([slice(None), slice(None, None, -1)],) * 4)): + xp = x[ss] + xs.append(xp) + + for x in xs: + # The default is a simple extent check + assert_(np.may_share_memory(x[:, 0, :], x[:, 1, :])) + assert_(np.may_share_memory(x[:, 0, :], x[:, 1, :], max_work=None)) + + # Exact checks + check_may_share_memory_exact(x[:, 0, :], x[:, 1, :]) + check_may_share_memory_exact(x[:, ::7], x[:, 3::3]) + + try: + xp = x.ravel() + if xp.flags.owndata: + continue + xp = xp.view(np.int16) + except ValueError: + continue + + # 0-size arrays cannot overlap + check_may_share_memory_exact(x.ravel()[6:6], + xp.reshape(13, 21, 23, 11)[:, ::7]) + + # Test itemsize is dealt with + check_may_share_memory_exact(x[:, ::7], + xp.reshape(13, 21, 23, 11)) + check_may_share_memory_exact(x[:, ::7], + xp.reshape(13, 21, 23, 11)[:, 3::3]) + check_may_share_memory_exact(x.ravel()[6:7], + xp.reshape(13, 21, 23, 11)[:, ::7]) + + # Check unit size + x = np.zeros([1], dtype=np.int8) + check_may_share_memory_exact(x, x) + check_may_share_memory_exact(x, x.copy()) + + +def iter_random_view_pairs(x, same_steps=True, equal_size=False): + rng = np.random.RandomState(1234) + + if equal_size and same_steps: + raise ValueError + + def random_slice(n, step): + start = rng.randint(0, n + 1, dtype=np.intp) + stop = rng.randint(start, n + 1, dtype=np.intp) + if rng.randint(0, 2, dtype=np.intp) == 0: + stop, start = start, stop + step *= -1 + return slice(start, stop, step) + + def random_slice_fixed_size(n, step, size): + start = rng.randint(0, n + 1 - size * step) + stop = start + (size - 1) * step + 1 + if rng.randint(0, 2) == 0: + stop, start = start - 1, stop - 1 + if stop < 0: + stop = None + step *= -1 + return slice(start, stop, step) + + # First a few regular views + yield x, x + for j in range(1, 7, 3): + yield x[j:], x[:-j] + yield x[..., j:], x[..., :-j] + + # An array with zero stride internal overlap + strides = list(x.strides) + strides[0] = 0 + xp = as_strided(x, shape=x.shape, strides=strides) + yield x, xp + yield xp, xp + + # An array with non-zero stride internal overlap + strides = list(x.strides) + if strides[0] > 1: + strides[0] = 1 + xp = as_strided(x, shape=x.shape, strides=strides) + yield x, xp + yield xp, xp + + # Then discontiguous views + while True: + steps = tuple(rng.randint(1, 11, dtype=np.intp) + if rng.randint(0, 5, dtype=np.intp) == 0 else 1 + for j in range(x.ndim)) + s1 = tuple(random_slice(p, s) for p, s in zip(x.shape, steps)) + + t1 = np.arange(x.ndim) + rng.shuffle(t1) + + if equal_size: + t2 = t1 + else: + t2 = np.arange(x.ndim) + rng.shuffle(t2) + + a = x[s1] + + if equal_size: + if a.size == 0: + continue + + steps2 = tuple(rng.randint(1, max(2, p // (1 + pa))) + if rng.randint(0, 5) == 0 else 1 + for p, s, pa in zip(x.shape, s1, a.shape)) + s2 = tuple(random_slice_fixed_size(p, s, pa) + for p, s, pa in zip(x.shape, steps2, a.shape)) + elif same_steps: + steps2 = steps + else: + steps2 = tuple(rng.randint(1, 11, dtype=np.intp) + if rng.randint(0, 5, dtype=np.intp) == 0 else 1 + for j in range(x.ndim)) + + if not equal_size: + s2 = tuple(random_slice(p, s) for p, s in zip(x.shape, steps2)) + + a = a.transpose(t1) + b = x[s2].transpose(t2) + + yield a, b + + +def check_may_share_memory_easy_fuzz(get_max_work, same_steps, min_count): + # Check that overlap problems with common strides are solved with + # little work. + x = np.zeros([17, 34, 71, 97], dtype=np.int16) + + feasible = 0 + infeasible = 0 + + pair_iter = iter_random_view_pairs(x, same_steps) + + while min(feasible, infeasible) < min_count: + a, b = next(pair_iter) + + bounds_overlap = np.may_share_memory(a, b) + may_share_answer = np.may_share_memory(a, b) + easy_answer = np.may_share_memory(a, b, max_work=get_max_work(a, b)) + exact_answer = np.may_share_memory(a, b, max_work=MAY_SHARE_EXACT) + + if easy_answer != exact_answer: + # assert_equal is slow... + assert_equal(easy_answer, exact_answer) + + if may_share_answer != bounds_overlap: + assert_equal(may_share_answer, bounds_overlap) + + if bounds_overlap: + if exact_answer: + feasible += 1 + else: + infeasible += 1 + + +@pytest.mark.slow +def test_may_share_memory_easy_fuzz(): + # Check that overlap problems with common strides are always + # solved with little work. + + check_may_share_memory_easy_fuzz(get_max_work=lambda a, b: 1, + same_steps=True, + min_count=2000) + + +@pytest.mark.slow +def test_may_share_memory_harder_fuzz(): + # Overlap problems with not necessarily common strides take more + # work. + # + # The work bound below can't be reduced much. Harder problems can + # also exist but not be detected here, as the set of problems + # comes from RNG. + + check_may_share_memory_easy_fuzz(get_max_work=lambda a, b: max(a.size, b.size) // 2, + same_steps=False, + min_count=2000) + + +def test_shares_memory_api(): + x = np.zeros([4, 5, 6], dtype=np.int8) + + assert_equal(np.shares_memory(x, x), True) + assert_equal(np.shares_memory(x, x.copy()), False) + + a = x[:, ::2, ::3] + b = x[:, ::3, ::2] + assert_equal(np.shares_memory(a, b), True) + assert_equal(np.shares_memory(a, b, max_work=None), True) + assert_raises( + np.exceptions.TooHardError, np.shares_memory, a, b, max_work=1 + ) + + +def test_may_share_memory_bad_max_work(): + x = np.zeros([1]) + assert_raises(OverflowError, np.may_share_memory, x, x, max_work=10**100) + assert_raises(OverflowError, np.shares_memory, x, x, max_work=10**100) + + +def test_internal_overlap_diophantine(): + def check(A, U, exists=None): + X = solve_diophantine(A, U, 0, require_ub_nontrivial=1) + + if exists is None: + exists = (X is not None) + + if X is not None: + sum_ax = sum(a * x for a, x in zip(A, X)) + sum_au_half = sum(a * u // 2 for a, u in zip(A, U)) + assert_(sum_ax == sum_au_half) + assert_(all(0 <= x <= u for x, u in zip(X, U))) + assert_(any(x != u // 2 for x, u in zip(X, U))) + + if exists: + assert_(X is not None, repr(X)) + else: + assert_(X is None, repr(X)) + + # Smoke tests + check((3, 2), (2 * 2, 3 * 2), exists=True) + check((3 * 2, 2), (15 * 2, (3 - 1) * 2), exists=False) + + +def test_internal_overlap_slices(): + # Slicing an array never generates internal overlap + + x = np.zeros([17, 34, 71, 97], dtype=np.int16) + + rng = np.random.RandomState(1234) + + def random_slice(n, step): + start = rng.randint(0, n + 1, dtype=np.intp) + stop = rng.randint(start, n + 1, dtype=np.intp) + if rng.randint(0, 2, dtype=np.intp) == 0: + stop, start = start, stop + step *= -1 + return slice(start, stop, step) + + cases = 0 + min_count = 5000 + + while cases < min_count: + steps = tuple(rng.randint(1, 11, dtype=np.intp) + if rng.randint(0, 5, dtype=np.intp) == 0 else 1 + for j in range(x.ndim)) + t1 = np.arange(x.ndim) + rng.shuffle(t1) + s1 = tuple(random_slice(p, s) for p, s in zip(x.shape, steps)) + a = x[s1].transpose(t1) + + assert_(not internal_overlap(a)) + cases += 1 + + +def check_internal_overlap(a, manual_expected=None): + got = internal_overlap(a) + + # Brute-force check + m = set() + ranges = tuple(range(n) for n in a.shape) + for v in itertools.product(*ranges): + offset = sum(s * w for s, w in zip(a.strides, v)) + if offset in m: + expected = True + break + else: + m.add(offset) + else: + expected = False + + # Compare + if got != expected: + assert_equal(got, expected, err_msg=repr((a.strides, a.shape))) + if manual_expected is not None and expected != manual_expected: + assert_equal(expected, manual_expected) + return got + + +def test_internal_overlap_manual(): + # Stride tricks can construct arrays with internal overlap + + # We don't care about memory bounds, the array is not + # read/write accessed + x = np.arange(1).astype(np.int8) + + # Check low-dimensional special cases + + check_internal_overlap(x, False) # 1-dim + check_internal_overlap(x.reshape([]), False) # 0-dim + + a = as_strided(x, strides=(3, 4), shape=(4, 4)) + check_internal_overlap(a, False) + + a = as_strided(x, strides=(3, 4), shape=(5, 4)) + check_internal_overlap(a, True) + + a = as_strided(x, strides=(0,), shape=(0,)) + check_internal_overlap(a, False) + + a = as_strided(x, strides=(0,), shape=(1,)) + check_internal_overlap(a, False) + + a = as_strided(x, strides=(0,), shape=(2,)) + check_internal_overlap(a, True) + + a = as_strided(x, strides=(0, -9993), shape=(87, 22)) + check_internal_overlap(a, True) + + a = as_strided(x, strides=(0, -9993), shape=(1, 22)) + check_internal_overlap(a, False) + + a = as_strided(x, strides=(0, -9993), shape=(0, 22)) + check_internal_overlap(a, False) + + +def test_internal_overlap_fuzz(): + # Fuzz check; the brute-force check is fairly slow + + x = np.arange(1).astype(np.int8) + + overlap = 0 + no_overlap = 0 + min_count = 100 + + rng = np.random.RandomState(1234) + + while min(overlap, no_overlap) < min_count: + ndim = rng.randint(1, 4, dtype=np.intp) + + strides = tuple(rng.randint(-1000, 1000, dtype=np.intp) + for j in range(ndim)) + shape = tuple(rng.randint(1, 30, dtype=np.intp) + for j in range(ndim)) + + a = as_strided(x, strides=strides, shape=shape) + result = check_internal_overlap(a) + + if result: + overlap += 1 + else: + no_overlap += 1 + + +def test_non_ndarray_inputs(): + # Regression check for gh-5604 + + class MyArray: + def __init__(self, data): + self.data = data + + @property + def __array_interface__(self): + return self.data.__array_interface__ + + class MyArray2: + def __init__(self, data): + self.data = data + + def __array__(self, dtype=None, copy=None): + return self.data + + for cls in [MyArray, MyArray2]: + x = np.arange(5) + + assert_(np.may_share_memory(cls(x[::2]), x[1::2])) + assert_(not np.shares_memory(cls(x[::2]), x[1::2])) + + assert_(np.shares_memory(cls(x[1::3]), x[::2])) + assert_(np.may_share_memory(cls(x[1::3]), x[::2])) + + +def view_element_first_byte(x): + """Construct an array viewing the first byte of each element of `x`""" + from numpy.lib._stride_tricks_impl import DummyArray + interface = dict(x.__array_interface__) + interface['typestr'] = '|b1' + interface['descr'] = [('', '|b1')] + return np.asarray(DummyArray(interface, x)) + + +def assert_copy_equivalent(operation, args, out, **kwargs): + """ + Check that operation(*args, out=out) produces results + equivalent to out[...] = operation(*args, out=out.copy()) + """ + + kwargs['out'] = out + kwargs2 = dict(kwargs) + kwargs2['out'] = out.copy() + + out_orig = out.copy() + out[...] = operation(*args, **kwargs2) + expected = out.copy() + out[...] = out_orig + + got = operation(*args, **kwargs).copy() + + if (got != expected).any(): + assert_equal(got, expected) + + +class TestUFunc: + """ + Test ufunc call memory overlap handling + """ + + def check_unary_fuzz(self, operation, get_out_axis_size, dtype=np.int16, + count=5000): + shapes = [7, 13, 8, 21, 29, 32] + + rng = np.random.RandomState(1234) + + for ndim in range(1, 6): + x = rng.randint(0, 2**16, size=shapes[:ndim]).astype(dtype) + + it = iter_random_view_pairs(x, same_steps=False, equal_size=True) + + min_count = count // (ndim + 1)**2 + + overlapping = 0 + while overlapping < min_count: + a, b = next(it) + + a_orig = a.copy() + b_orig = b.copy() + + if get_out_axis_size is None: + assert_copy_equivalent(operation, [a], out=b) + + if np.shares_memory(a, b): + overlapping += 1 + else: + for axis in itertools.chain(range(ndim), [None]): + a[...] = a_orig + b[...] = b_orig + + # Determine size for reduction axis (None if scalar) + outsize, scalarize = get_out_axis_size(a, b, axis) + if outsize == 'skip': + continue + + # Slice b to get an output array of the correct size + sl = [slice(None)] * ndim + if axis is None: + if outsize is None: + sl = [slice(0, 1)] + [0] * (ndim - 1) + else: + sl = [slice(0, outsize)] + [0] * (ndim - 1) + elif outsize is None: + k = b.shape[axis] // 2 + if ndim == 1: + sl[axis] = slice(k, k + 1) + else: + sl[axis] = k + else: + assert b.shape[axis] >= outsize + sl[axis] = slice(0, outsize) + b_out = b[tuple(sl)] + + if scalarize: + b_out = b_out.reshape([]) + + if np.shares_memory(a, b_out): + overlapping += 1 + + # Check result + assert_copy_equivalent(operation, [a], out=b_out, axis=axis) + + @pytest.mark.slow + def test_unary_ufunc_call_fuzz(self): + self.check_unary_fuzz(np.invert, None, np.int16) + + @pytest.mark.slow + def test_unary_ufunc_call_complex_fuzz(self): + # Complex typically has a smaller alignment than itemsize + self.check_unary_fuzz(np.negative, None, np.complex128, count=500) + + def test_binary_ufunc_accumulate_fuzz(self): + def get_out_axis_size(a, b, axis): + if axis is None: + if a.ndim == 1: + return a.size, False + else: + return 'skip', False # accumulate doesn't support this + else: + return a.shape[axis], False + + self.check_unary_fuzz(np.add.accumulate, get_out_axis_size, + dtype=np.int16, count=500) + + def test_binary_ufunc_reduce_fuzz(self): + def get_out_axis_size(a, b, axis): + return None, (axis is None or a.ndim == 1) + + self.check_unary_fuzz(np.add.reduce, get_out_axis_size, + dtype=np.int16, count=500) + + def test_binary_ufunc_reduceat_fuzz(self): + def get_out_axis_size(a, b, axis): + if axis is None: + if a.ndim == 1: + return a.size, False + else: + return 'skip', False # reduceat doesn't support this + else: + return a.shape[axis], False + + def do_reduceat(a, out, axis): + if axis is None: + size = len(a) + step = size // len(out) + else: + size = a.shape[axis] + step = a.shape[axis] // out.shape[axis] + idx = np.arange(0, size, step) + return np.add.reduceat(a, idx, out=out, axis=axis) + + self.check_unary_fuzz(do_reduceat, get_out_axis_size, + dtype=np.int16, count=500) + + def test_binary_ufunc_reduceat_manual(self): + def check(ufunc, a, ind, out): + c1 = ufunc.reduceat(a.copy(), ind.copy(), out=out.copy()) + c2 = ufunc.reduceat(a, ind, out=out) + assert_array_equal(c1, c2) + + # Exactly same input/output arrays + a = np.arange(10000, dtype=np.int16) + check(np.add, a, a[::-1].copy(), a) + + # Overlap with index + a = np.arange(10000, dtype=np.int16) + check(np.add, a, a[::-1], a) + + @pytest.mark.slow + def test_unary_gufunc_fuzz(self): + shapes = [7, 13, 8, 21, 29, 32] + gufunc = _umath_tests.euclidean_pdist + + rng = np.random.RandomState(1234) + + for ndim in range(2, 6): + x = rng.rand(*shapes[:ndim]) + + it = iter_random_view_pairs(x, same_steps=False, equal_size=True) + + min_count = 500 // (ndim + 1)**2 + + overlapping = 0 + while overlapping < min_count: + a, b = next(it) + + if min(a.shape[-2:]) < 2 or min(b.shape[-2:]) < 2 or a.shape[-1] < 2: + continue + + # Ensure the shapes are so that euclidean_pdist is happy + if b.shape[-1] > b.shape[-2]: + b = b[..., 0, :] + else: + b = b[..., :, 0] + + n = a.shape[-2] + p = n * (n - 1) // 2 + if p <= b.shape[-1] and p > 0: + b = b[..., :p] + else: + n = max(2, int(np.sqrt(b.shape[-1])) // 2) + p = n * (n - 1) // 2 + a = a[..., :n, :] + b = b[..., :p] + + # Call + if np.shares_memory(a, b): + overlapping += 1 + + with np.errstate(over='ignore', invalid='ignore'): + assert_copy_equivalent(gufunc, [a], out=b) + + def test_ufunc_at_manual(self): + def check(ufunc, a, ind, b=None): + a0 = a.copy() + if b is None: + ufunc.at(a0, ind.copy()) + c1 = a0.copy() + ufunc.at(a, ind) + c2 = a.copy() + else: + ufunc.at(a0, ind.copy(), b.copy()) + c1 = a0.copy() + ufunc.at(a, ind, b) + c2 = a.copy() + assert_array_equal(c1, c2) + + # Overlap with index + a = np.arange(10000, dtype=np.int16) + check(np.invert, a[::-1], a) + + # Overlap with second data array + a = np.arange(100, dtype=np.int16) + ind = np.arange(0, 100, 2, dtype=np.int16) + check(np.add, a, ind, a[25:75]) + + def test_unary_ufunc_1d_manual(self): + # Exercise ufunc fast-paths (that avoid creation of an `np.nditer`) + + def check(a, b): + a_orig = a.copy() + b_orig = b.copy() + + b0 = b.copy() + c1 = ufunc(a, out=b0) + c2 = ufunc(a, out=b) + assert_array_equal(c1, c2) + + # Trigger "fancy ufunc loop" code path + mask = view_element_first_byte(b).view(np.bool) + + a[...] = a_orig + b[...] = b_orig + c1 = ufunc(a, out=b.copy(), where=mask.copy()).copy() + + a[...] = a_orig + b[...] = b_orig + c2 = ufunc(a, out=b, where=mask.copy()).copy() + + # Also, mask overlapping with output + a[...] = a_orig + b[...] = b_orig + c3 = ufunc(a, out=b, where=mask).copy() + + assert_array_equal(c1, c2) + assert_array_equal(c1, c3) + + dtypes = [np.int8, np.int16, np.int32, np.int64, np.float32, + np.float64, np.complex64, np.complex128] + dtypes = [np.dtype(x) for x in dtypes] + + for dtype in dtypes: + if np.issubdtype(dtype, np.integer): + ufunc = np.invert + else: + ufunc = np.reciprocal + + n = 1000 + k = 10 + indices = [ + np.index_exp[:n], + np.index_exp[k:k + n], + np.index_exp[n - 1::-1], + np.index_exp[k + n - 1:k - 1:-1], + np.index_exp[:2 * n:2], + np.index_exp[k:k + 2 * n:2], + np.index_exp[2 * n - 1::-2], + np.index_exp[k + 2 * n - 1:k - 1:-2], + ] + + for xi, yi in itertools.product(indices, indices): + v = np.arange(1, 1 + n * 2 + k, dtype=dtype) + x = v[xi] + y = v[yi] + + with np.errstate(all='ignore'): + check(x, y) + + # Scalar cases + check(x[:1], y) + check(x[-1:], y) + check(x[:1].reshape([]), y) + check(x[-1:].reshape([]), y) + + def test_unary_ufunc_where_same(self): + # Check behavior at wheremask overlap + ufunc = np.invert + + def check(a, out, mask): + c1 = ufunc(a, out=out.copy(), where=mask.copy()) + c2 = ufunc(a, out=out, where=mask) + assert_array_equal(c1, c2) + + # Check behavior with same input and output arrays + x = np.arange(100).astype(np.bool) + check(x, x, x) + check(x, x.copy(), x) + check(x, x, x.copy()) + + @pytest.mark.slow + def test_binary_ufunc_1d_manual(self): + ufunc = np.add + + def check(a, b, c): + c0 = c.copy() + c1 = ufunc(a, b, out=c0) + c2 = ufunc(a, b, out=c) + assert_array_equal(c1, c2) + + for dtype in [np.int8, np.int16, np.int32, np.int64, + np.float32, np.float64, np.complex64, np.complex128]: + # Check different data dependency orders + + n = 1000 + k = 10 + + indices = [] + for p in [1, 2]: + indices.extend([ + np.index_exp[:p * n:p], + np.index_exp[k:k + p * n:p], + np.index_exp[p * n - 1::-p], + np.index_exp[k + p * n - 1:k - 1:-p], + ]) + + for x, y, z in itertools.product(indices, indices, indices): + v = np.arange(6 * n).astype(dtype) + x = v[x] + y = v[y] + z = v[z] + + check(x, y, z) + + # Scalar cases + check(x[:1], y, z) + check(x[-1:], y, z) + check(x[:1].reshape([]), y, z) + check(x[-1:].reshape([]), y, z) + check(x, y[:1], z) + check(x, y[-1:], z) + check(x, y[:1].reshape([]), z) + check(x, y[-1:].reshape([]), z) + + def test_inplace_op_simple_manual(self): + rng = np.random.RandomState(1234) + x = rng.rand(200, 200) # bigger than bufsize + + x += x.T + assert_array_equal(x - x.T, 0) diff --git a/local_packages/numpy/_core/tests/test_mem_policy.py b/local_packages/numpy/_core/tests/test_mem_policy.py new file mode 100644 index 0000000..313d3ef --- /dev/null +++ b/local_packages/numpy/_core/tests/test_mem_policy.py @@ -0,0 +1,453 @@ +import asyncio +import gc +import os +import sys +import sysconfig +import threading + +import pytest + +import numpy as np +from numpy._core.multiarray import get_handler_name +from numpy.testing import IS_EDITABLE, IS_WASM, extbuild + + +@pytest.fixture +def get_module(tmp_path): + """ Add a memory policy that returns a false pointer 64 bytes into the + actual allocation, and fill the prefix with some text. Then check at each + memory manipulation that the prefix exists, to make sure all alloc/realloc/ + free/calloc go via the functions here. + """ + if sys.platform.startswith('cygwin'): + pytest.skip('link fails on cygwin') + if IS_WASM: + pytest.skip("Can't build module inside Wasm") + if IS_EDITABLE: + pytest.skip("Can't build module for editable install") + + functions = [ + ("get_default_policy", "METH_NOARGS", """ + Py_INCREF(PyDataMem_DefaultHandler); + return PyDataMem_DefaultHandler; + """), + ("set_secret_data_policy", "METH_NOARGS", """ + PyObject *secret_data = + PyCapsule_New(&secret_data_handler, "mem_handler", NULL); + if (secret_data == NULL) { + return NULL; + } + PyObject *old = PyDataMem_SetHandler(secret_data); + Py_DECREF(secret_data); + return old; + """), + ("set_wrong_capsule_name_data_policy", "METH_NOARGS", """ + PyObject *wrong_name_capsule = + PyCapsule_New(&secret_data_handler, "not_mem_handler", NULL); + if (wrong_name_capsule == NULL) { + return NULL; + } + PyObject *old = PyDataMem_SetHandler(wrong_name_capsule); + Py_DECREF(wrong_name_capsule); + return old; + """), + ("set_old_policy", "METH_O", """ + PyObject *old; + if (args != NULL && PyCapsule_CheckExact(args)) { + old = PyDataMem_SetHandler(args); + } + else { + old = PyDataMem_SetHandler(NULL); + } + return old; + """), + ("get_array", "METH_NOARGS", """ + char *buf = (char *)malloc(20); + npy_intp dims[1]; + dims[0] = 20; + PyArray_Descr *descr = PyArray_DescrNewFromType(NPY_UINT8); + return PyArray_NewFromDescr(&PyArray_Type, descr, 1, dims, NULL, + buf, NPY_ARRAY_WRITEABLE, NULL); + """), + ("set_own", "METH_O", """ + if (!PyArray_Check(args)) { + PyErr_SetString(PyExc_ValueError, + "need an ndarray"); + return NULL; + } + PyArray_ENABLEFLAGS((PyArrayObject*)args, NPY_ARRAY_OWNDATA); + // Maybe try this too? + // PyArray_BASE(PyArrayObject *)args) = NULL; + Py_RETURN_NONE; + """), + ("get_array_with_base", "METH_NOARGS", """ + char *buf = (char *)malloc(20); + npy_intp dims[1]; + dims[0] = 20; + PyArray_Descr *descr = PyArray_DescrNewFromType(NPY_UINT8); + PyObject *arr = PyArray_NewFromDescr(&PyArray_Type, descr, 1, dims, + NULL, buf, + NPY_ARRAY_WRITEABLE, NULL); + if (arr == NULL) return NULL; + PyObject *obj = PyCapsule_New(buf, "buf capsule", + (PyCapsule_Destructor)&warn_on_free); + if (obj == NULL) { + Py_DECREF(arr); + return NULL; + } + if (PyArray_SetBaseObject((PyArrayObject *)arr, obj) < 0) { + Py_DECREF(arr); + Py_DECREF(obj); + return NULL; + } + return arr; + + """), + ] + prologue = ''' + #define NPY_TARGET_VERSION NPY_1_22_API_VERSION + #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION + #include + /* + * This struct allows the dynamic configuration of the allocator funcs + * of the `secret_data_allocator`. It is provided here for + * demonstration purposes, as a valid `ctx` use-case scenario. + */ + typedef struct { + void *(*malloc)(size_t); + void *(*calloc)(size_t, size_t); + void *(*realloc)(void *, size_t); + void (*free)(void *); + } SecretDataAllocatorFuncs; + + NPY_NO_EXPORT void * + shift_alloc(void *ctx, size_t sz) { + SecretDataAllocatorFuncs *funcs = (SecretDataAllocatorFuncs *)ctx; + char *real = (char *)funcs->malloc(sz + 64); + if (real == NULL) { + return NULL; + } + snprintf(real, 64, "originally allocated %ld", (unsigned long)sz); + return (void *)(real + 64); + } + NPY_NO_EXPORT void * + shift_zero(void *ctx, size_t sz, size_t cnt) { + SecretDataAllocatorFuncs *funcs = (SecretDataAllocatorFuncs *)ctx; + char *real = (char *)funcs->calloc(sz + 64, cnt); + if (real == NULL) { + return NULL; + } + snprintf(real, 64, "originally allocated %ld via zero", + (unsigned long)sz); + return (void *)(real + 64); + } + NPY_NO_EXPORT void + shift_free(void *ctx, void * p, npy_uintp sz) { + SecretDataAllocatorFuncs *funcs = (SecretDataAllocatorFuncs *)ctx; + if (p == NULL) { + return ; + } + char *real = (char *)p - 64; + if (strncmp(real, "originally allocated", 20) != 0) { + fprintf(stdout, "uh-oh, unmatched shift_free, " + "no appropriate prefix\\n"); + /* Make C runtime crash by calling free on the wrong address */ + funcs->free((char *)p + 10); + /* funcs->free(real); */ + } + else { + npy_uintp i = (npy_uintp)atoi(real +20); + if (i != sz) { + fprintf(stderr, "uh-oh, unmatched shift_free" + "(ptr, %ld) but allocated %ld\\n", sz, i); + /* This happens in some places, only print */ + funcs->free(real); + } + else { + funcs->free(real); + } + } + } + NPY_NO_EXPORT void * + shift_realloc(void *ctx, void * p, npy_uintp sz) { + SecretDataAllocatorFuncs *funcs = (SecretDataAllocatorFuncs *)ctx; + if (p != NULL) { + char *real = (char *)p - 64; + if (strncmp(real, "originally allocated", 20) != 0) { + fprintf(stdout, "uh-oh, unmatched shift_realloc\\n"); + return realloc(p, sz); + } + return (void *)((char *)funcs->realloc(real, sz + 64) + 64); + } + else { + char *real = (char *)funcs->realloc(p, sz + 64); + if (real == NULL) { + return NULL; + } + snprintf(real, 64, "originally allocated " + "%ld via realloc", (unsigned long)sz); + return (void *)(real + 64); + } + } + /* As an example, we use the standard {m|c|re}alloc/free funcs. */ + static SecretDataAllocatorFuncs secret_data_handler_ctx = { + malloc, + calloc, + realloc, + free + }; + static PyDataMem_Handler secret_data_handler = { + "secret_data_allocator", + 1, + { + &secret_data_handler_ctx, /* ctx */ + shift_alloc, /* malloc */ + shift_zero, /* calloc */ + shift_realloc, /* realloc */ + shift_free /* free */ + } + }; + void warn_on_free(void *capsule) { + PyErr_WarnEx(PyExc_UserWarning, "in warn_on_free", 1); + void * obj = PyCapsule_GetPointer(capsule, + PyCapsule_GetName(capsule)); + free(obj); + }; + ''' + more_init = "import_array();" + try: + import mem_policy + return mem_policy + except ImportError: + pass + # if it does not exist, build and load it + if sysconfig.get_platform() == "win-arm64": + pytest.skip("Meson unable to find MSVC linker on win-arm64") + return extbuild.build_and_import_extension('mem_policy', + functions, + prologue=prologue, + include_dirs=[np.get_include()], + build_dir=tmp_path, + more_init=more_init) + + +def test_set_policy(get_module): + + get_handler_name = np._core.multiarray.get_handler_name + get_handler_version = np._core.multiarray.get_handler_version + orig_policy_name = get_handler_name() + + a = np.arange(10).reshape((2, 5)) # a doesn't own its own data + assert get_handler_name(a) is None + assert get_handler_version(a) is None + assert get_handler_name(a.base) == orig_policy_name + assert get_handler_version(a.base) == 1 + + orig_policy = get_module.set_secret_data_policy() + + b = np.arange(10).reshape((2, 5)) # b doesn't own its own data + assert get_handler_name(b) is None + assert get_handler_version(b) is None + assert get_handler_name(b.base) == 'secret_data_allocator' + assert get_handler_version(b.base) == 1 + + if orig_policy_name == 'default_allocator': + get_module.set_old_policy(None) # tests PyDataMem_SetHandler(NULL) + assert get_handler_name() == 'default_allocator' + else: + get_module.set_old_policy(orig_policy) + assert get_handler_name() == orig_policy_name + + with pytest.raises(ValueError, + match="Capsule must be named 'mem_handler'"): + get_module.set_wrong_capsule_name_data_policy() + + +def test_default_policy_singleton(get_module): + get_handler_name = np._core.multiarray.get_handler_name + + # set the policy to default + orig_policy = get_module.set_old_policy(None) + + assert get_handler_name() == 'default_allocator' + + # re-set the policy to default + def_policy_1 = get_module.set_old_policy(None) + + assert get_handler_name() == 'default_allocator' + + # set the policy to original + def_policy_2 = get_module.set_old_policy(orig_policy) + + # since default policy is a singleton, + # these should be the same object + assert def_policy_1 is def_policy_2 is get_module.get_default_policy() + + +def test_policy_propagation(get_module): + # The memory policy goes hand-in-hand with flags.owndata + + class MyArr(np.ndarray): + pass + + get_handler_name = np._core.multiarray.get_handler_name + orig_policy_name = get_handler_name() + a = np.arange(10).view(MyArr).reshape((2, 5)) + assert get_handler_name(a) is None + assert a.flags.owndata is False + + assert get_handler_name(a.base) is None + assert a.base.flags.owndata is False + + assert get_handler_name(a.base.base) == orig_policy_name + assert a.base.base.flags.owndata is True + + +async def concurrent_context1(get_module, orig_policy_name, event): + if orig_policy_name == 'default_allocator': + get_module.set_secret_data_policy() + assert get_handler_name() == 'secret_data_allocator' + else: + get_module.set_old_policy(None) + assert get_handler_name() == 'default_allocator' + event.set() + + +async def concurrent_context2(get_module, orig_policy_name, event): + await event.wait() + # the policy is not affected by changes in parallel contexts + assert get_handler_name() == orig_policy_name + # change policy in the child context + if orig_policy_name == 'default_allocator': + get_module.set_secret_data_policy() + assert get_handler_name() == 'secret_data_allocator' + else: + get_module.set_old_policy(None) + assert get_handler_name() == 'default_allocator' + + +async def async_test_context_locality(get_module): + orig_policy_name = np._core.multiarray.get_handler_name() + + event = asyncio.Event() + # the child contexts inherit the parent policy + concurrent_task1 = asyncio.create_task( + concurrent_context1(get_module, orig_policy_name, event)) + concurrent_task2 = asyncio.create_task( + concurrent_context2(get_module, orig_policy_name, event)) + await concurrent_task1 + await concurrent_task2 + + # the parent context is not affected by child policy changes + assert np._core.multiarray.get_handler_name() == orig_policy_name + + +def test_context_locality(get_module): + if (sys.implementation.name == 'pypy' + and sys.pypy_version_info[:3] < (7, 3, 6)): + pytest.skip('no context-locality support in PyPy < 7.3.6') + asyncio.run(async_test_context_locality(get_module)) + + +def concurrent_thread1(get_module, event): + get_module.set_secret_data_policy() + assert np._core.multiarray.get_handler_name() == 'secret_data_allocator' + event.set() + + +def concurrent_thread2(get_module, event): + event.wait() + # the policy is not affected by changes in parallel threads + assert np._core.multiarray.get_handler_name() == 'default_allocator' + # change policy in the child thread + get_module.set_secret_data_policy() + + +def test_thread_locality(get_module): + orig_policy_name = np._core.multiarray.get_handler_name() + + event = threading.Event() + # the child threads do not inherit the parent policy + concurrent_task1 = threading.Thread(target=concurrent_thread1, + args=(get_module, event)) + concurrent_task2 = threading.Thread(target=concurrent_thread2, + args=(get_module, event)) + concurrent_task1.start() + concurrent_task2.start() + concurrent_task1.join() + concurrent_task2.join() + + # the parent thread is not affected by child policy changes + assert np._core.multiarray.get_handler_name() == orig_policy_name + + +@pytest.mark.skip(reason="too slow, see gh-23975") +def test_new_policy(get_module): + a = np.arange(10) + orig_policy_name = np._core.multiarray.get_handler_name(a) + + orig_policy = get_module.set_secret_data_policy() + + b = np.arange(10) + assert np._core.multiarray.get_handler_name(b) == 'secret_data_allocator' + + # test array manipulation. This is slow + if orig_policy_name == 'default_allocator': + # when the np._core.test tests recurse into this test, the + # policy will be set so this "if" will be false, preventing + # infinite recursion + # + # if needed, debug this by + # - running tests with -- -s (to not capture stdout/stderr + # - setting verbose=2 + # - setting extra_argv=['-vv'] here + assert np._core.test('full', verbose=1, extra_argv=[]) + # also try the ma tests, the pickling test is quite tricky + assert np.ma.test('full', verbose=1, extra_argv=[]) + + get_module.set_old_policy(orig_policy) + + c = np.arange(10) + assert np._core.multiarray.get_handler_name(c) == orig_policy_name + + +@pytest.mark.xfail(sys.implementation.name == "pypy", + reason=("bad interaction between getenv and " + "os.environ inside pytest")) +@pytest.mark.parametrize("policy", ["0", "1", None]) +@pytest.mark.thread_unsafe(reason="modifies environment variables") +def test_switch_owner(get_module, policy): + a = get_module.get_array() + assert np._core.multiarray.get_handler_name(a) is None + get_module.set_own(a) + + if policy is None: + # See what we expect to be set based on the env variable + policy = os.getenv("NUMPY_WARN_IF_NO_MEM_POLICY", "0") == "1" + oldval = None + else: + policy = policy == "1" + oldval = np._core._multiarray_umath._set_numpy_warn_if_no_mem_policy( + policy) + try: + # The policy should be NULL, so we have to assume we can call + # "free". A warning is given if the policy == "1" + if policy: + with pytest.warns(RuntimeWarning) as w: + del a + gc.collect() + else: + del a + gc.collect() + + finally: + if oldval is not None: + np._core._multiarray_umath._set_numpy_warn_if_no_mem_policy(oldval) + + +def test_owner_is_base(get_module): + a = get_module.get_array_with_base() + with pytest.warns(UserWarning, match='warn_on_free'): + del a + gc.collect() + gc.collect() diff --git a/local_packages/numpy/_core/tests/test_memmap.py b/local_packages/numpy/_core/tests/test_memmap.py new file mode 100644 index 0000000..8e2aa0a --- /dev/null +++ b/local_packages/numpy/_core/tests/test_memmap.py @@ -0,0 +1,248 @@ +import mmap +import os +import sys +import warnings +from pathlib import Path +from tempfile import NamedTemporaryFile, TemporaryFile + +import pytest + +from numpy import ( + add, + allclose, + arange, + asarray, + average, + isscalar, + memmap, + multiply, + ndarray, + prod, + subtract, + sum, +) +from numpy.testing import ( + IS_PYPY, + assert_, + assert_array_equal, + assert_equal, + break_cycles, +) + + +@pytest.mark.thread_unsafe(reason="setup & memmap is thread-unsafe (gh-29126)") +class TestMemmap: + def setup_method(self): + self.tmpfp = NamedTemporaryFile(prefix='mmap') + self.shape = (3, 4) + self.dtype = 'float32' + self.data = arange(12, dtype=self.dtype) + self.data.resize(self.shape) + + def teardown_method(self): + self.tmpfp.close() + self.data = None + if IS_PYPY: + break_cycles() + break_cycles() + + def test_roundtrip(self): + # Write data to file + fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+', + shape=self.shape) + fp[:] = self.data[:] + del fp # Test __del__ machinery, which handles cleanup + + # Read data back from file + newfp = memmap(self.tmpfp, dtype=self.dtype, mode='r', + shape=self.shape) + assert_(allclose(self.data, newfp)) + assert_array_equal(self.data, newfp) + assert_equal(newfp.flags.writeable, False) + + def test_open_with_filename(self, tmp_path): + tmpname = tmp_path / 'mmap' + fp = memmap(tmpname, dtype=self.dtype, mode='w+', + shape=self.shape) + fp[:] = self.data[:] + del fp + + def test_unnamed_file(self): + with TemporaryFile() as f: + fp = memmap(f, dtype=self.dtype, shape=self.shape) + del fp + + def test_attributes(self): + offset = 1 + mode = "w+" + fp = memmap(self.tmpfp, dtype=self.dtype, mode=mode, + shape=self.shape, offset=offset) + assert_equal(offset, fp.offset) + assert_equal(mode, fp.mode) + del fp + + def test_filename(self, tmp_path): + tmpname = tmp_path / "mmap" + fp = memmap(tmpname, dtype=self.dtype, mode='w+', + shape=self.shape) + abspath = Path(os.path.abspath(tmpname)) + fp[:] = self.data[:] + assert_equal(abspath, fp.filename) + b = fp[:1] + assert_equal(abspath, b.filename) + del b + del fp + + def test_path(self, tmp_path): + tmpname = tmp_path / "mmap" + fp = memmap(Path(tmpname), dtype=self.dtype, mode='w+', + shape=self.shape) + # os.path.realpath does not resolve symlinks on Windows + # see: https://bugs.python.org/issue9949 + # use Path.resolve, just as memmap class does internally + abspath = str(Path(tmpname).resolve()) + fp[:] = self.data[:] + assert_equal(abspath, str(fp.filename.resolve())) + b = fp[:1] + assert_equal(abspath, str(b.filename.resolve())) + del b + del fp + + def test_filename_fileobj(self): + fp = memmap(self.tmpfp, dtype=self.dtype, mode="w+", + shape=self.shape) + assert_equal(fp.filename, self.tmpfp.name) + + @pytest.mark.skipif(sys.platform == 'gnu0', + reason="Known to fail on hurd") + def test_flush(self): + fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+', + shape=self.shape) + fp[:] = self.data[:] + assert_equal(fp[0], self.data[0]) + fp.flush() + + def test_del(self): + # Make sure a view does not delete the underlying mmap + fp_base = memmap(self.tmpfp, dtype=self.dtype, mode='w+', + shape=self.shape) + fp_base[0] = 5 + fp_view = fp_base[0:1] + assert_equal(fp_view[0], 5) + del fp_view + # Should still be able to access and assign values after + # deleting the view + assert_equal(fp_base[0], 5) + fp_base[0] = 6 + assert_equal(fp_base[0], 6) + + def test_arithmetic_drops_references(self): + fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+', + shape=self.shape) + tmp = (fp + 10) + if isinstance(tmp, memmap): + assert_(tmp._mmap is not fp._mmap) + + def test_indexing_drops_references(self): + fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+', + shape=self.shape) + tmp = fp[(1, 2), (2, 3)] + if isinstance(tmp, memmap): + assert_(tmp._mmap is not fp._mmap) + + def test_slicing_keeps_references(self): + fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+', + shape=self.shape) + assert_(fp[:2, :2]._mmap is fp._mmap) + + def test_view(self): + fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape) + new1 = fp.view() + new2 = new1.view() + assert_(new1.base is fp) + assert_(new2.base is fp) + new_array = asarray(fp) + assert_(new_array.base is fp) + + def test_ufunc_return_ndarray(self): + fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape) + fp[:] = self.data + + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', "np.average currently does not preserve", FutureWarning) + for unary_op in [sum, average, prod]: + result = unary_op(fp) + assert_(isscalar(result)) + assert_(result.__class__ is self.data[0, 0].__class__) + + assert_(unary_op(fp, axis=0).__class__ is ndarray) + assert_(unary_op(fp, axis=1).__class__ is ndarray) + + for binary_op in [add, subtract, multiply]: + assert_(binary_op(fp, self.data).__class__ is ndarray) + assert_(binary_op(self.data, fp).__class__ is ndarray) + assert_(binary_op(fp, fp).__class__ is ndarray) + + fp += 1 + assert fp.__class__ is memmap + add(fp, 1, out=fp) + assert fp.__class__ is memmap + + def test_getitem(self): + fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape) + fp[:] = self.data + + assert_(fp[1:, :-1].__class__ is memmap) + # Fancy indexing returns a copy that is not memmapped + assert_(fp[[0, 1]].__class__ is ndarray) + + def test_memmap_subclass(self): + class MemmapSubClass(memmap): + pass + + fp = MemmapSubClass(self.tmpfp, dtype=self.dtype, shape=self.shape) + fp[:] = self.data + + # We keep previous behavior for subclasses of memmap, i.e. the + # ufunc and __getitem__ output is never turned into a ndarray + assert_(sum(fp, axis=0).__class__ is MemmapSubClass) + assert_(sum(fp).__class__ is MemmapSubClass) + assert_(fp[1:, :-1].__class__ is MemmapSubClass) + assert fp[[0, 1]].__class__ is MemmapSubClass + + def test_mmap_offset_greater_than_allocation_granularity(self): + size = 5 * mmap.ALLOCATIONGRANULARITY + offset = mmap.ALLOCATIONGRANULARITY + 1 + fp = memmap(self.tmpfp, shape=size, mode='w+', offset=offset) + assert_(fp.offset == offset) + + def test_empty_array_with_offset_multiple_of_allocation_granularity(self): + self.tmpfp.write(b'a' * mmap.ALLOCATIONGRANULARITY) + size = 0 + offset = mmap.ALLOCATIONGRANULARITY + fp = memmap(self.tmpfp, shape=size, mode='w+', offset=offset) + assert_equal(fp.offset, offset) + + def test_no_shape(self): + self.tmpfp.write(b'a' * 16) + mm = memmap(self.tmpfp, dtype='float64') + assert_equal(mm.shape, (2,)) + + def test_empty_array(self): + # gh-12653 + with pytest.raises(ValueError, match='empty file'): + memmap(self.tmpfp, shape=(0, 4), mode='r') + + # gh-27723 + # empty memmap works with mode in ('w+','r+') + memmap(self.tmpfp, shape=(0, 4), mode='w+') + + # ok now the file is not empty + memmap(self.tmpfp, shape=(0, 4), mode='w+') + + def test_shape_type(self): + memmap(self.tmpfp, shape=3, mode='w+') + memmap(self.tmpfp, shape=self.shape, mode='w+') + memmap(self.tmpfp, shape=list(self.shape), mode='w+') + memmap(self.tmpfp, shape=asarray(self.shape), mode='w+') diff --git a/local_packages/numpy/_core/tests/test_multiarray.py b/local_packages/numpy/_core/tests/test_multiarray.py new file mode 100644 index 0000000..fe3bd77 --- /dev/null +++ b/local_packages/numpy/_core/tests/test_multiarray.py @@ -0,0 +1,11031 @@ +import builtins +import collections.abc +import ctypes +import functools +import gc +import importlib +import inspect +import io +import itertools +import mmap +import operator +import os +import pathlib +import pickle +import re +import subprocess +import sys +import tempfile +import textwrap +import warnings +import weakref +from contextlib import contextmanager + +# Need to test an object that does not fully implement math interface +from datetime import datetime, timedelta +from decimal import Decimal + +import pytest + +import numpy as np +import numpy._core._multiarray_tests as _multiarray_tests +from numpy._core._rational_tests import rational +from numpy._core.multiarray import _get_ndarray_c_version, dot +from numpy._core.tests._locales import CommaDecimalPointLocale +from numpy.exceptions import AxisError, ComplexWarning +from numpy.lib import stride_tricks +from numpy.lib.recfunctions import repack_fields +from numpy.testing import ( + BLAS_SUPPORTS_FPE, + HAS_REFCOUNT, + IS_64BIT, + IS_PYPY, + IS_PYSTON, + IS_WASM, + assert_, + assert_allclose, + assert_almost_equal, + assert_array_almost_equal, + assert_array_compare, + assert_array_equal, + assert_array_less, + assert_equal, + assert_raises, + assert_raises_regex, + break_cycles, + check_support_sve, + runstring, + temppath, +) +from numpy.testing._private.utils import _no_tracing, requires_memory + + +def assert_arg_sorted(arr, arg): + # resulting array should be sorted and arg values should be unique + assert_equal(arr[arg], np.sort(arr)) + assert_equal(np.sort(arg), np.arange(len(arg))) + + +def assert_arr_partitioned(kth, k, arr_part): + assert_equal(arr_part[k], kth) + assert_array_compare(operator.__le__, arr_part[:k], kth) + assert_array_compare(operator.__ge__, arr_part[k:], kth) + + +def _aligned_zeros(shape, dtype=float, order="C", align=None): + """ + Allocate a new ndarray with aligned memory. + + The ndarray is guaranteed *not* aligned to twice the requested alignment. + Eg, if align=4, guarantees it is not aligned to 8. If align=None uses + dtype.alignment.""" + dtype = np.dtype(dtype) + if dtype == np.dtype(object): + # Can't do this, fall back to standard allocation (which + # should always be sufficiently aligned) + if align is not None: + raise ValueError("object array alignment not supported") + return np.zeros(shape, dtype=dtype, order=order) + if align is None: + align = dtype.alignment + if not hasattr(shape, '__len__'): + shape = (shape,) + size = functools.reduce(operator.mul, shape) * dtype.itemsize + buf = np.empty(size + 2 * align + 1, np.uint8) + + ptr = buf.__array_interface__['data'][0] + offset = ptr % align + if offset != 0: + offset = align - offset + if (ptr % (2 * align)) == 0: + offset += align + + # Note: slices producing 0-size arrays do not necessarily change + # data pointer --- so we use and allocate size+1 + buf = buf[offset:offset + size + 1][:-1] + buf.fill(0) + data = np.ndarray(shape, dtype, buf, order=order) + return data + + +class TestFlags: + def test_writeable(self): + arr = np.arange(10) + mydict = locals() + arr.flags.writeable = False + assert_raises(ValueError, runstring, 'arr[0] = 3', mydict) + arr.flags.writeable = True + arr[0] = 5 + arr[0] = 0 + + def test_writeable_any_base(self): + # Ensure that any base being writeable is sufficient to change flag; + # this is especially interesting for arrays from an array interface. + arr = np.arange(10) + + class subclass(np.ndarray): + pass + + # Create subclass so base will not be collapsed, this is OK to change + view1 = arr.view(subclass) + view2 = view1[...] + arr.flags.writeable = False + view2.flags.writeable = False + view2.flags.writeable = True # Can be set to True again. + + arr = np.arange(10) + + class frominterface: + def __init__(self, arr): + self.arr = arr + self.__array_interface__ = arr.__array_interface__ + + view1 = np.asarray(frominterface) + view2 = view1[...] + view2.flags.writeable = False + view2.flags.writeable = True + + view1.flags.writeable = False + view2.flags.writeable = False + with assert_raises(ValueError): + # Must assume not writeable, since only base is not: + view2.flags.writeable = True + + def test_writeable_from_readonly(self): + # gh-9440 - make sure fromstring, from buffer on readonly buffers + # set writeable False + data = b'\x00' * 100 + vals = np.frombuffer(data, 'B') + assert_raises(ValueError, vals.setflags, write=True) + types = np.dtype([('vals', 'u1'), ('res3', 'S4')]) + values = np._core.records.fromstring(data, types) + vals = values['vals'] + assert_raises(ValueError, vals.setflags, write=True) + + def test_writeable_from_buffer(self): + data = bytearray(b'\x00' * 100) + vals = np.frombuffer(data, 'B') + assert_(vals.flags.writeable) + vals.setflags(write=False) + assert_(vals.flags.writeable is False) + vals.setflags(write=True) + assert_(vals.flags.writeable) + types = np.dtype([('vals', 'u1'), ('res3', 'S4')]) + values = np._core.records.fromstring(data, types) + vals = values['vals'] + assert_(vals.flags.writeable) + vals.setflags(write=False) + assert_(vals.flags.writeable is False) + vals.setflags(write=True) + assert_(vals.flags.writeable) + + @pytest.mark.skipif(IS_PYPY, reason="PyPy always copies") + def test_writeable_pickle(self): + import pickle + # Small arrays will be copied without setting base. + # See condition for using PyArray_SetBaseObject in + # array_setstate. + a = np.arange(1000) + for v in range(pickle.HIGHEST_PROTOCOL): + vals = pickle.loads(pickle.dumps(a, v)) + assert_(vals.flags.writeable) + assert_(isinstance(vals.base, bytes)) + + def test_writeable_from_c_data(self): + # Test that the writeable flag can be changed for an array wrapping + # low level C-data, but not owning its data. + # Also see that this is deprecated to change from python. + from numpy._core._multiarray_tests import get_c_wrapping_array + + arr_writeable = get_c_wrapping_array(True) + assert not arr_writeable.flags.owndata + assert arr_writeable.flags.writeable + view = arr_writeable[...] + + # Toggling the writeable flag works on the view: + view.flags.writeable = False + assert not view.flags.writeable + view.flags.writeable = True + assert view.flags.writeable + # Flag can be unset on the arr_writeable: + arr_writeable.flags.writeable = False + + arr_readonly = get_c_wrapping_array(False) + assert not arr_readonly.flags.owndata + assert not arr_readonly.flags.writeable + + for arr in [arr_writeable, arr_readonly]: + view = arr[...] + view.flags.writeable = False # make sure it is readonly + arr.flags.writeable = False + assert not arr.flags.writeable + + with assert_raises(ValueError): + view.flags.writeable = True + + with assert_raises(ValueError): + arr.flags.writeable = True + + def test_warnonwrite(self): + a = np.arange(10) + a.flags._warn_on_write = True + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always') + a[1] = 10 + a[2] = 10 + # only warn once + assert_(len(w) == 1) + + @pytest.mark.parametrize(["flag", "flag_value", "writeable"], + [("writeable", True, True), + # Delete _warn_on_write after deprecation and simplify + # the parameterization: + ("_warn_on_write", True, False), + ("writeable", False, False)]) + def test_readonly_flag_protocols(self, flag, flag_value, writeable): + a = np.arange(10) + setattr(a.flags, flag, flag_value) + + class MyArr: + __array_struct__ = a.__array_struct__ + + assert memoryview(a).readonly is not writeable + assert a.__array_interface__['data'][1] is not writeable + assert np.asarray(MyArr()).flags.writeable is writeable + + def test_otherflags(self): + arr = np.arange(10) + assert_equal(arr.flags.carray, True) + assert_equal(arr.flags['C'], True) + assert_equal(arr.flags.farray, False) + assert_equal(arr.flags.behaved, True) + assert_equal(arr.flags.fnc, False) + assert_equal(arr.flags.forc, True) + assert_equal(arr.flags.owndata, True) + assert_equal(arr.flags.writeable, True) + assert_equal(arr.flags.aligned, True) + assert_equal(arr.flags.writebackifcopy, False) + assert_equal(arr.flags['X'], False) + assert_equal(arr.flags['WRITEBACKIFCOPY'], False) + + def test_string_align(self): + a = np.zeros(4, dtype=np.dtype('|S4')) + assert_(a.flags.aligned) + # not power of two are accessed byte-wise and thus considered aligned + a = np.zeros(5, dtype=np.dtype('|S4')) + assert_(a.flags.aligned) + + def test_void_align(self): + a = np.zeros(4, dtype=np.dtype([("a", "i4"), ("b", "i4")])) + assert_(a.flags.aligned) + + @pytest.mark.parametrize("row_size", [5, 1 << 16]) + @pytest.mark.parametrize("row_count", [1, 5]) + @pytest.mark.parametrize("ndmin", [0, 1, 2]) + def test_xcontiguous_load_txt(self, row_size, row_count, ndmin): + s = io.StringIO('\n'.join(['1.0 ' * row_size] * row_count)) + a = np.loadtxt(s, ndmin=ndmin) + + assert a.flags.c_contiguous + x = [i for i in a.shape if i != 1] + assert a.flags.f_contiguous == (len(x) <= 1) + + +class TestHash: + # see #3793 + def test_int(self): + for st, ut, s in [(np.int8, np.uint8, 8), + (np.int16, np.uint16, 16), + (np.int32, np.uint32, 32), + (np.int64, np.uint64, 64)]: + for i in range(1, s): + assert_equal(hash(st(-2**i)), hash(-2**i), + err_msg="%r: -2**%d" % (st, i)) + assert_equal(hash(st(2**(i - 1))), hash(2**(i - 1)), + err_msg="%r: 2**%d" % (st, i - 1)) + assert_equal(hash(st(2**i - 1)), hash(2**i - 1), + err_msg="%r: 2**%d - 1" % (st, i)) + + i = max(i - 1, 1) + assert_equal(hash(ut(2**(i - 1))), hash(2**(i - 1)), + err_msg="%r: 2**%d" % (ut, i - 1)) + assert_equal(hash(ut(2**i - 1)), hash(2**i - 1), + err_msg="%r: 2**%d - 1" % (ut, i)) + + +class TestAttributes: + def _create_arrays(self): + one = np.arange(10) + two = np.arange(20).reshape(4, 5) + three = np.arange(60, dtype=np.float64).reshape(2, 5, 6) + return one, two, three + + def test_attributes(self): + one, two, three = self._create_arrays() + assert_equal(one.shape, (10,)) + assert_equal(two.shape, (4, 5)) + assert_equal(three.shape, (2, 5, 6)) + three.shape = (10, 3, 2) + assert_equal(three.shape, (10, 3, 2)) + three.shape = (2, 5, 6) + assert_equal(one.strides, (one.itemsize,)) + num = two.itemsize + assert_equal(two.strides, (5 * num, num)) + num = three.itemsize + assert_equal(three.strides, (30 * num, 6 * num, num)) + assert_equal(one.ndim, 1) + assert_equal(two.ndim, 2) + assert_equal(three.ndim, 3) + num = two.itemsize + assert_equal(two.size, 20) + assert_equal(two.nbytes, 20 * num) + assert_equal(two.itemsize, two.dtype.itemsize) + assert_equal(two.base, np.arange(20)) + + def test_dtypeattr(self): + one, _, three = self._create_arrays() + assert_equal(one.dtype, np.dtype(np.int_)) + assert_equal(three.dtype, np.dtype(np.float64)) + assert_equal(one.dtype.char, np.dtype(int).char) + assert one.dtype.char in "lq" + assert_equal(three.dtype.char, 'd') + assert_(three.dtype.str[0] in '<>') + assert_equal(one.dtype.str[1], 'i') + assert_equal(three.dtype.str[1], 'f') + + def test_int_subclassing(self): + # Regression test for https://github.com/numpy/numpy/pull/3526 + + numpy_int = np.int_(0) + + # int_ doesn't inherit from Python int, because it's not fixed-width + assert_(not isinstance(numpy_int, int)) + + def test_stridesattr(self): + x, _, _ = self._create_arrays() + + def make_array(size, offset, strides): + return np.ndarray(size, buffer=x, dtype=int, + offset=offset * x.itemsize, + strides=strides * x.itemsize) + + assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1])) + assert_raises(ValueError, make_array, 4, 4, -2) + assert_raises(ValueError, make_array, 4, 2, -1) + assert_raises(ValueError, make_array, 8, 3, 1) + assert_equal(make_array(8, 3, 0), np.array([3] * 8)) + # Check behavior reported in gh-2503: + assert_raises(ValueError, make_array, (2, 3), 5, np.array([-2, -3])) + make_array(0, 0, 10) + + def test_set_stridesattr(self): + x, _, _ = self._create_arrays() + + def make_array(size, offset, strides): + try: + r = np.ndarray([size], dtype=int, buffer=x, + offset=offset * x.itemsize) + except Exception as e: + raise RuntimeError(e) + with pytest.warns(DeprecationWarning): + r.strides = strides * x.itemsize + return r + + assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1])) + assert_equal(make_array(7, 3, 1), np.array([3, 4, 5, 6, 7, 8, 9])) + assert_raises(ValueError, make_array, 4, 4, -2) + assert_raises(ValueError, make_array, 4, 2, -1) + assert_raises(RuntimeError, make_array, 8, 3, 1) + # Check that the true extent of the array is used. + # Test relies on as_strided base not exposing a buffer. + x = stride_tricks.as_strided(np.arange(1), (10, 10), (0, 0)) + + def set_strides(arr, strides): + with pytest.warns(DeprecationWarning): + arr.strides = strides + + assert_raises(ValueError, set_strides, x, (10 * x.itemsize, x.itemsize)) + + # Test for offset calculations: + x = stride_tricks.as_strided(np.arange(10, dtype=np.int8)[-1], + shape=(10,), strides=(-1,)) + assert_raises(ValueError, set_strides, x[::-1], -1) + a = x[::-1] + with pytest.warns(DeprecationWarning): + a.strides = 1 + with pytest.warns(DeprecationWarning): + a[::2].strides = 2 + + # test 0d + arr_0d = np.array(0) + with pytest.warns(DeprecationWarning): + arr_0d.strides = () + assert_raises(TypeError, set_strides, arr_0d, None) + + def test_fill(self): + for t in "?bhilqpBHILQPfdgFDGO": + x = np.empty((3, 2, 1), t) + y = np.empty((3, 2, 1), t) + x.fill(1) + y[...] = 1 + assert_equal(x, y) + + def test_fill_max_uint64(self): + x = np.empty((3, 2, 1), dtype=np.uint64) + y = np.empty((3, 2, 1), dtype=np.uint64) + value = 2**64 - 1 + y[...] = value + x.fill(value) + assert_array_equal(x, y) + + def test_fill_struct_array(self): + # Filling from a scalar + x = np.array([(0, 0.0), (1, 1.0)], dtype='i4,f8') + x.fill(x[0]) + assert_equal(x['f1'][1], x['f1'][0]) + # Filling from a tuple that can be converted + # to a scalar + x = np.zeros(2, dtype=[('a', 'f8'), ('b', 'i4')]) + x.fill((3.5, -2)) + assert_array_equal(x['a'], [3.5, 3.5]) + assert_array_equal(x['b'], [-2, -2]) + + def test_fill_readonly(self): + # gh-22922 + a = np.zeros(11) + a.setflags(write=False) + with pytest.raises(ValueError, match=".*read-only"): + a.fill(0) + + def test_fill_subarrays(self): + # NOTE: + # This is also a regression test for a crash with PYTHONMALLOC=debug + + dtype = np.dtype("2= 3 + + arg0 = "object" if func is np.array else "a" + assert arg0 in sig.parameters + assert sig.parameters[arg0].default is inspect.Parameter.empty + assert sig.parameters[arg0].kind is inspect.Parameter.POSITIONAL_OR_KEYWORD + + assert "dtype" in sig.parameters + assert sig.parameters["dtype"].default is None + assert sig.parameters["dtype"].kind is inspect.Parameter.POSITIONAL_OR_KEYWORD + + assert "like" in sig.parameters + assert sig.parameters["like"].default is None + assert sig.parameters["like"].kind is inspect.Parameter.KEYWORD_ONLY + + +class TestAssignment: + def test_assignment_broadcasting(self): + a = np.arange(6).reshape(2, 3) + + # Broadcasting the input to the output + a[...] = np.arange(3) + assert_equal(a, [[0, 1, 2], [0, 1, 2]]) + a[...] = np.arange(2).reshape(2, 1) + assert_equal(a, [[0, 0, 0], [1, 1, 1]]) + + # For compatibility with <= 1.5, a limited version of broadcasting + # the output to the input. + # + # This behavior is inconsistent with NumPy broadcasting + # in general, because it only uses one of the two broadcasting + # rules (adding a new "1" dimension to the left of the shape), + # applied to the output instead of an input. In NumPy 2.0, this kind + # of broadcasting assignment will likely be disallowed. + a[...] = np.arange(6)[::-1].reshape(1, 2, 3) + assert_equal(a, [[5, 4, 3], [2, 1, 0]]) + # The other type of broadcasting would require a reduction operation. + + def assign(a, b): + a[...] = b + + assert_raises(ValueError, assign, a, np.arange(12).reshape(2, 2, 3)) + + def test_assignment_errors(self): + # Address issue #2276 + class C: + pass + a = np.zeros(1) + + def assign(v): + a[0] = v + + assert_raises((AttributeError, TypeError), assign, C()) + assert_raises(ValueError, assign, [1]) + + @pytest.mark.filterwarnings( + "ignore:.*set_string_function.*:DeprecationWarning" + ) + def test_unicode_assignment(self): + # gh-5049 + from numpy._core.arrayprint import set_printoptions + + @contextmanager + def inject_str(s): + """ replace ndarray.__str__ temporarily """ + set_printoptions(formatter={"all": lambda x: s}) + try: + yield + finally: + set_printoptions() + + a1d = np.array(['test']) + a0d = np.array('done') + with inject_str('bad'): + a1d[0] = a0d # previously this would invoke __str__ + assert_equal(a1d[0], 'done') + + # this would crash for the same reason + np.array([np.array('\xe5\xe4\xf6')]) + + def test_stringlike_empty_list(self): + # gh-8902 + u = np.array(['done']) + b = np.array([b'done']) + + class bad_sequence: + def __getitem__(self, _, /): pass + def __len__(self): raise RuntimeError + + assert_raises(ValueError, operator.setitem, u, 0, []) + assert_raises(ValueError, operator.setitem, b, 0, []) + + assert_raises(ValueError, operator.setitem, u, 0, bad_sequence()) + assert_raises(ValueError, operator.setitem, b, 0, bad_sequence()) + + def test_longdouble_assignment(self): + # only relevant if longdouble is larger than float + # we're looking for loss of precision + + for dtype in (np.longdouble, np.clongdouble): + # gh-8902 + tinyb = np.nextafter(np.longdouble(0), 1).astype(dtype) + tinya = np.nextafter(np.longdouble(0), -1).astype(dtype) + + # construction + tiny1d = np.array([tinya]) + assert_equal(tiny1d[0], tinya) + + # scalar = scalar + tiny1d[0] = tinyb + assert_equal(tiny1d[0], tinyb) + + # 0d = scalar + tiny1d[0, ...] = tinya + assert_equal(tiny1d[0], tinya) + + # 0d = 0d + tiny1d[0, ...] = tinyb[...] + assert_equal(tiny1d[0], tinyb) + + # scalar = 0d + tiny1d[0] = tinyb[...] + assert_equal(tiny1d[0], tinyb) + + arr = np.array([np.array(tinya)]) + assert_equal(arr[0], tinya) + + def test_cast_to_string(self): + # cast to str should do "str(scalar)", not "str(scalar.item())" + # When converting a float to a string via array assignment, we + # want to ensure that the conversion uses str(scalar) to preserve + # the expected precision. + a = np.zeros(1, dtype='S20') + a[:] = np.array(['1.12345678901234567890'], dtype='f8') + assert_equal(a[0], b"1.1234567890123457") + + +class TestDtypedescr: + def test_construction(self): + d1 = np.dtype('i4') + assert_equal(d1, np.dtype(np.int32)) + d2 = np.dtype('f8') + assert_equal(d2, np.dtype(np.float64)) + + def test_byteorders(self): + assert_(np.dtype('i4')) + assert_(np.dtype([('a', 'i4')])) + + def test_structured_non_void(self): + fields = [('a', ' ndmax validation + data = np.array([[1, 2, 3], [4, 5, 6]]) + with pytest.raises(ValueError, match="object too deep for desired array"): + np.array(data, ndmax=1, dtype=object) + + +class TestStructured: + def test_subarray_field_access(self): + a = np.zeros((3, 5), dtype=[('a', ('i4', (2, 2)))]) + a['a'] = np.arange(60).reshape(3, 5, 2, 2) + + # Since the subarray is always in C-order, a transpose + # does not swap the subarray: + assert_array_equal(a.T['a'], a['a'].transpose(1, 0, 2, 3)) + + # In Fortran order, the subarray gets appended + # like in all other cases, not prepended as a special case + b = a.copy(order='F') + assert_equal(a['a'].shape, b['a'].shape) + assert_equal(a.T['a'].shape, a.T.copy()['a'].shape) + + def test_subarray_comparison(self): + # Check that comparisons between record arrays with + # multi-dimensional field types work properly + a = np.rec.fromrecords( + [([1, 2, 3], 'a', [[1, 2], [3, 4]]), ([3, 3, 3], 'b', [[0, 0], [0, 0]])], + dtype=[('a', ('f4', 3)), ('b', object), ('c', ('i4', (2, 2)))]) + b = a.copy() + assert_equal(a == b, [True, True]) + assert_equal(a != b, [False, False]) + b[1].b = 'c' + assert_equal(a == b, [True, False]) + assert_equal(a != b, [False, True]) + for i in range(3): + b[0].a = a[0].a + b[0].a[i] = 5 + assert_equal(a == b, [False, False]) + assert_equal(a != b, [True, True]) + for i in range(2): + for j in range(2): + b = a.copy() + b[0].c[i, j] = 10 + assert_equal(a == b, [False, True]) + assert_equal(a != b, [True, False]) + + # Check that broadcasting with a subarray works, including cases that + # require promotion to work: + a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8')]) + b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8')]) + assert_equal(a == b, [[True, True, False], [False, False, True]]) + assert_equal(b == a, [[True, True, False], [False, False, True]]) + a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8', (1,))]) + b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8', (1,))]) + assert_equal(a == b, [[True, True, False], [False, False, True]]) + assert_equal(b == a, [[True, True, False], [False, False, True]]) + a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))]) + b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))]) + assert_equal(a == b, [[True, False, False], [False, False, True]]) + assert_equal(b == a, [[True, False, False], [False, False, True]]) + + # Check that broadcasting Fortran-style arrays with a subarray work + a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))], order='F') + b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))]) + assert_equal(a == b, [[True, False, False], [False, False, True]]) + assert_equal(b == a, [[True, False, False], [False, False, True]]) + + # Check that incompatible sub-array shapes don't result to broadcasting + x = np.zeros((1,), dtype=[('a', ('f4', (1, 2))), ('b', 'i1')]) + y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')]) + # The main importance is that it does not return True: + with pytest.raises(TypeError): + x == y + + x = np.zeros((1,), dtype=[('a', ('f4', (2, 1))), ('b', 'i1')]) + y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')]) + # The main importance is that it does not return True: + with pytest.raises(TypeError): + x == y + + def test_empty_structured_array_comparison(self): + # Check that comparison works on empty arrays with nontrivially + # shaped fields + a = np.zeros(0, [('a', 'i8'), ('b', 'f8')]) + assert_equal(a == b, [False, True]) + assert_equal(a != b, [True, False]) + + a = np.array([(5, 42), (10, 1)], dtype=[('a', '>f8'), ('b', 'i8')]) + assert_equal(a == b, [False, True]) + assert_equal(a != b, [True, False]) + + # Including with embedded subarray dtype (although subarray comparison + # itself may still be a bit weird and compare the raw data) + a = np.array([(5, 42), (10, 1)], dtype=[('a', '10>f8'), ('b', '5i8')]) + assert_equal(a == b, [False, True]) + assert_equal(a != b, [True, False]) + + @pytest.mark.parametrize("op", [ + operator.eq, lambda x, y: operator.eq(y, x), + operator.ne, lambda x, y: operator.ne(y, x)]) + def test_void_comparison_failures(self, op): + # In principle, one could decide to return an array of False for some + # if comparisons are impossible. But right now we return TypeError + # when "void" dtype are involved. + x = np.zeros(3, dtype=[('a', 'i1')]) + y = np.zeros(3) + # Cannot compare non-structured to structured: + with pytest.raises(TypeError): + op(x, y) + + # Added title prevents promotion, but casts are OK: + y = np.zeros(3, dtype=[(('title', 'a'), 'i1')]) + assert np.can_cast(y.dtype, x.dtype) + with pytest.raises(TypeError): + op(x, y) + + x = np.zeros(3, dtype="V7") + y = np.zeros(3, dtype="V8") + with pytest.raises(TypeError): + op(x, y) + + def test_casting(self): + # Check that casting a structured array to change its byte order + # works + a = np.array([(1,)], dtype=[('a', 'i4')], casting='unsafe')) + b = a.astype([('a', '>i4')]) + a_tmp = a.byteswap() + a_tmp = a_tmp.view(a_tmp.dtype.newbyteorder()) + assert_equal(b, a_tmp) + assert_equal(a['a'][0], b['a'][0]) + + # Check that equality comparison works on structured arrays if + # they are 'equiv'-castable + a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i4'), ('b', 'f8')]) + assert_(np.can_cast(a.dtype, b.dtype, casting='equiv')) + assert_equal(a == b, [True, True]) + + # Check that 'equiv' casting can change byte order + assert_(np.can_cast(a.dtype, b.dtype, casting='equiv')) + c = a.astype(b.dtype, casting='equiv') + assert_equal(a == c, [True, True]) + + # Check that 'safe' casting can change byte order and up-cast + # fields + t = [('a', 'f8')] + assert_(np.can_cast(a.dtype, t, casting='safe')) + c = a.astype(t, casting='safe') + assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)), + [True, True]) + + # Check that 'same_kind' casting can change byte order and + # change field widths within a "kind" + t = [('a', 'f4')] + assert_(np.can_cast(a.dtype, t, casting='same_kind')) + c = a.astype(t, casting='same_kind') + assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)), + [True, True]) + + # Check that casting fails if the casting rule should fail on + # any of the fields + t = [('a', '>i8'), ('b', 'i2'), ('b', 'i8'), ('b', 'i4')] + assert_(not np.can_cast(a.dtype, t, casting=casting)) + t = [('a', '>i4'), ('b', 'i8") + ab = np.array([(1, 2)], dtype=[A, B]) + ba = np.array([(1, 2)], dtype=[B, A]) + assert_raises(TypeError, np.concatenate, ab, ba) + assert_raises(TypeError, np.result_type, ab.dtype, ba.dtype) + assert_raises(TypeError, np.promote_types, ab.dtype, ba.dtype) + + # dtypes with same field names/order but different memory offsets + # and byte-order are promotable to packed nbo. + assert_equal(np.promote_types(ab.dtype, ba[['a', 'b']].dtype), + repack_fields(ab.dtype.newbyteorder('N'))) + + # gh-13667 + # dtypes with different fieldnames but castable field types are castable + assert_equal(np.can_cast(ab.dtype, ba.dtype), True) + assert_equal(ab.astype(ba.dtype).dtype, ba.dtype) + assert_equal(np.can_cast('f8,i8', [('f0', 'f8'), ('f1', 'i8')]), True) + assert_equal(np.can_cast('f8,i8', [('f1', 'f8'), ('f0', 'i8')]), True) + assert_equal(np.can_cast('f8,i8', [('f1', 'i8'), ('f0', 'f8')]), False) + assert_equal(np.can_cast('f8,i8', [('f1', 'i8'), ('f0', 'f8')], + casting='unsafe'), True) + + ab[:] = ba # make sure assignment still works + + # tests of type-promotion of corresponding fields + dt1 = np.dtype([("", "i4")]) + dt2 = np.dtype([("", "i8")]) + assert_equal(np.promote_types(dt1, dt2), np.dtype([('f0', 'i8')])) + assert_equal(np.promote_types(dt2, dt1), np.dtype([('f0', 'i8')])) + assert_raises(TypeError, np.promote_types, dt1, np.dtype([("", "V3")])) + assert_equal(np.promote_types('i4,f8', 'i8,f4'), + np.dtype([('f0', 'i8'), ('f1', 'f8')])) + # test nested case + dt1nest = np.dtype([("", dt1)]) + dt2nest = np.dtype([("", dt2)]) + assert_equal(np.promote_types(dt1nest, dt2nest), + np.dtype([('f0', np.dtype([('f0', 'i8')]))])) + + # note that offsets are lost when promoting: + dt = np.dtype({'names': ['x'], 'formats': ['i4'], 'offsets': [8]}) + a = np.ones(3, dtype=dt) + assert_equal(np.concatenate([a, a]).dtype, np.dtype([('x', 'i4')])) + + @pytest.mark.parametrize("dtype_dict", [ + {"names": ["a", "b"], "formats": ["i4", "f"], "itemsize": 100}, + {"names": ["a", "b"], "formats": ["i4", "f"], + "offsets": [0, 12]}]) + @pytest.mark.parametrize("align", [True, False]) + def test_structured_promotion_packs(self, dtype_dict, align): + # Structured dtypes are packed when promoted (we consider the packed + # form to be "canonical"), so tere is no extra padding. + dtype = np.dtype(dtype_dict, align=align) + # Remove non "canonical" dtype options: + dtype_dict.pop("itemsize", None) + dtype_dict.pop("offsets", None) + expected = np.dtype(dtype_dict, align=align) + + res = np.promote_types(dtype, dtype) + assert res.itemsize == expected.itemsize + assert res.fields == expected.fields + + # But the "expected" one, should just be returned unchanged: + res = np.promote_types(expected, expected) + assert res is expected + + def test_structured_asarray_is_view(self): + # A scalar viewing an array preserves its view even when creating a + # new array. This test documents behaviour, it may not be the best + # desired behaviour. + arr = np.array([1], dtype="i,i") + scalar = arr[0] + assert not scalar.flags.owndata # view into the array + assert np.asarray(scalar).base is scalar + # But never when a dtype is passed in: + assert np.asarray(scalar, dtype=scalar.dtype).base is None + # A scalar which owns its data does not have this property. + # It is not easy to create one, one method is to use pickle: + scalar = pickle.loads(pickle.dumps(scalar)) + assert scalar.flags.owndata + assert np.asarray(scalar).base is None + +class TestBool: + def test_test_interning(self): + a0 = np.bool(0) + b0 = np.bool(False) + assert_(a0 is b0) + a1 = np.bool(1) + b1 = np.bool(True) + assert_(a1 is b1) + assert_(np.array([True])[0] is a1) + assert_(np.array(True)[()] is a1) + + def test_sum(self): + d = np.ones(101, dtype=bool) + assert_equal(d.sum(), d.size) + assert_equal(d[::2].sum(), d[::2].size) + assert_equal(d[::-2].sum(), d[::-2].size) + + d = np.frombuffer(b'\xff\xff' * 100, dtype=bool) + assert_equal(d.sum(), d.size) + assert_equal(d[::2].sum(), d[::2].size) + assert_equal(d[::-2].sum(), d[::-2].size) + + def check_count_nonzero(self, power, length): + powers = [2 ** i for i in range(length)] + for i in range(2**power): + l = [(i & x) != 0 for x in powers] + a = np.array(l, dtype=bool) + c = builtins.sum(l) + assert_equal(np.count_nonzero(a), c) + av = a.view(np.uint8) + av *= 3 + assert_equal(np.count_nonzero(a), c) + av *= 4 + assert_equal(np.count_nonzero(a), c) + av[av != 0] = 0xFF + assert_equal(np.count_nonzero(a), c) + + def test_count_nonzero(self): + # check all 12 bit combinations in a length 17 array + # covers most cases of the 16 byte unrolled code + self.check_count_nonzero(12, 17) + + @pytest.mark.slow + def test_count_nonzero_all(self): + # check all combinations in a length 17 array + # covers all cases of the 16 byte unrolled code + self.check_count_nonzero(17, 17) + + def test_count_nonzero_unaligned(self): + # prevent mistakes as e.g. gh-4060 + for o in range(7): + a = np.zeros((18,), dtype=bool)[o + 1:] + a[:o] = True + assert_equal(np.count_nonzero(a), builtins.sum(a.tolist())) + a = np.ones((18,), dtype=bool)[o + 1:] + a[:o] = False + assert_equal(np.count_nonzero(a), builtins.sum(a.tolist())) + + def _test_cast_from_flexible(self, dtype): + # empty string -> false + for n in range(3): + v = np.array(b'', (dtype, n)) + assert_equal(bool(v), False) + assert_equal(bool(v[()]), False) + assert_equal(v.astype(bool), False) + assert_(isinstance(v.astype(bool), np.ndarray)) + assert_(v[()].astype(bool) is np.False_) + + # anything else -> true + for n in range(1, 4): + for val in [b'a', b'0', b' ']: + v = np.array(val, (dtype, n)) + assert_equal(bool(v), True) + assert_equal(bool(v[()]), True) + assert_equal(v.astype(bool), True) + assert_(isinstance(v.astype(bool), np.ndarray)) + assert_(v[()].astype(bool) is np.True_) + + def test_cast_from_void(self): + self._test_cast_from_flexible(np.void) + + def test_cast_from_unicode(self): + self._test_cast_from_flexible(np.str_) + + def test_cast_from_bytes(self): + self._test_cast_from_flexible(np.bytes_) + + +class TestZeroSizeFlexible: + @staticmethod + def _zeros(shape, dtype=str): + dtype = np.dtype(dtype) + if dtype == np.void: + return np.zeros(shape, dtype=(dtype, 0)) + + # not constructable directly + dtype = np.dtype([('x', dtype, 0)]) + return np.zeros(shape, dtype=dtype)['x'] + + def test_create(self): + zs = self._zeros(10, bytes) + assert_equal(zs.itemsize, 0) + zs = self._zeros(10, np.void) + assert_equal(zs.itemsize, 0) + zs = self._zeros(10, str) + assert_equal(zs.itemsize, 0) + + def _test_sort_partition(self, name, kinds, **kwargs): + # Previously, these would all hang + for dt in [bytes, np.void, str]: + zs = self._zeros(10, dt) + sort_method = getattr(zs, name) + sort_func = getattr(np, name) + for kind in kinds: + sort_method(kind=kind, **kwargs) + sort_func(zs, kind=kind, **kwargs) + + def test_sort(self): + self._test_sort_partition('sort', kinds='qhs') + + def test_argsort(self): + self._test_sort_partition('argsort', kinds='qhs') + + def test_partition(self): + self._test_sort_partition('partition', kinds=['introselect'], kth=2) + + def test_argpartition(self): + self._test_sort_partition('argpartition', kinds=['introselect'], kth=2) + + def test_resize(self): + # previously an error + for dt in [bytes, np.void, str]: + zs = self._zeros(10, dt) + zs.resize(25) + zs.resize((10, 10)) + + def test_view(self): + for dt in [bytes, np.void, str]: + zs = self._zeros(10, dt) + + # viewing as itself should be allowed + assert_equal(zs.view(dt).dtype, np.dtype(dt)) + + # viewing as any non-empty type gives an empty result + assert_equal(zs.view((dt, 1)).shape, (0,)) + + def test_dumps(self): + zs = self._zeros(10, int) + assert_equal(zs, pickle.loads(zs.dumps())) + + def test_pickle(self): + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + for dt in [bytes, np.void, str]: + zs = self._zeros(10, dt) + p = pickle.dumps(zs, protocol=proto) + zs2 = pickle.loads(p) + + assert_equal(zs.dtype, zs2.dtype) + + def test_pickle_empty(self): + """Checking if an empty array pickled and un-pickled will not cause a + segmentation fault""" + arr = np.array([]).reshape(999999, 0) + pk_dmp = pickle.dumps(arr) + pk_load = pickle.loads(pk_dmp) + + assert pk_load.size == 0 + + @pytest.mark.skipif(pickle.HIGHEST_PROTOCOL < 5, + reason="requires pickle protocol 5") + def test_pickle_with_buffercallback(self): + array = np.arange(10) + buffers = [] + bytes_string = pickle.dumps(array, buffer_callback=buffers.append, + protocol=5) + array_from_buffer = pickle.loads(bytes_string, buffers=buffers) + # when using pickle protocol 5 with buffer callbacks, + # array_from_buffer is reconstructed from a buffer holding a view + # to the initial array's data, so modifying an element in array + # should modify it in array_from_buffer too. + array[0] = -1 + assert array_from_buffer[0] == -1, array_from_buffer[0] + + +class TestMethods: + + sort_kinds = ['quicksort', 'heapsort', 'stable'] + + def test_all_where(self): + a = np.array([[True, False, True], + [False, False, False], + [True, True, True]]) + wh_full = np.array([[True, False, True], + [False, False, False], + [True, False, True]]) + wh_lower = np.array([[False], + [False], + [True]]) + for _ax in [0, None]: + assert_equal(a.all(axis=_ax, where=wh_lower), + np.all(a[wh_lower[:, 0], :], axis=_ax)) + assert_equal(np.all(a, axis=_ax, where=wh_lower), + a[wh_lower[:, 0], :].all(axis=_ax)) + + assert_equal(a.all(where=wh_full), True) + assert_equal(np.all(a, where=wh_full), True) + assert_equal(a.all(where=False), True) + assert_equal(np.all(a, where=False), True) + + def test_any_where(self): + a = np.array([[True, False, True], + [False, False, False], + [True, True, True]]) + wh_full = np.array([[False, True, False], + [True, True, True], + [False, False, False]]) + wh_middle = np.array([[False], + [True], + [False]]) + for _ax in [0, None]: + assert_equal(a.any(axis=_ax, where=wh_middle), + np.any(a[wh_middle[:, 0], :], axis=_ax)) + assert_equal(np.any(a, axis=_ax, where=wh_middle), + a[wh_middle[:, 0], :].any(axis=_ax)) + assert_equal(a.any(where=wh_full), False) + assert_equal(np.any(a, where=wh_full), False) + assert_equal(a.any(where=False), False) + assert_equal(np.any(a, where=False), False) + + @pytest.mark.parametrize("dtype", + ["i8", "U10", "object", "datetime64[ms]"]) + def test_any_and_all_result_dtype(self, dtype): + arr = np.ones(3, dtype=dtype) + assert arr.any().dtype == np.bool + assert arr.all().dtype == np.bool + + def test_any_and_all_object_dtype(self): + # (seberg) Not sure we should even allow dtype here, but it is. + arr = np.ones(3, dtype=object) + # keepdims to prevent getting a scalar. + assert arr.any(dtype=object, keepdims=True).dtype == object + assert arr.all(dtype=object, keepdims=True).dtype == object + + def test_compress(self): + tgt = [[5, 6, 7, 8, 9]] + arr = np.arange(10).reshape(2, 5) + out = arr.compress([0, 1], axis=0) + assert_equal(out, tgt) + + tgt = [[1, 3], [6, 8]] + out = arr.compress([0, 1, 0, 1, 0], axis=1) + assert_equal(out, tgt) + + tgt = [[1], [6]] + arr = np.arange(10).reshape(2, 5) + out = arr.compress([0, 1], axis=1) + assert_equal(out, tgt) + + arr = np.arange(10).reshape(2, 5) + out = arr.compress([0, 1]) + assert_equal(out, 1) + + def test_choose(self): + x = 2 * np.ones((3,), dtype=int) + y = 3 * np.ones((3,), dtype=int) + x2 = 2 * np.ones((2, 3), dtype=int) + y2 = 3 * np.ones((2, 3), dtype=int) + ind = np.array([0, 0, 1]) + + A = ind.choose((x, y)) + assert_equal(A, [2, 2, 3]) + + A = ind.choose((x2, y2)) + assert_equal(A, [[2, 2, 3], [2, 2, 3]]) + + A = ind.choose((x, y2)) + assert_equal(A, [[2, 2, 3], [2, 2, 3]]) + + oned = np.ones(1) + # gh-12031, caused SEGFAULT + assert_raises(TypeError, oned.choose, np.void(0), [oned]) + + out = np.array(0) + ret = np.choose(np.array(1), [10, 20, 30], out=out) + assert out is ret + assert_equal(out[()], 20) + + # gh-6272 check overlap on out + x = np.arange(5) + y = np.choose([0, 0, 0], [x[:3], x[:3], x[:3]], out=x[1:4], mode='wrap') + assert_equal(y, np.array([0, 1, 2])) + + # gh_28206 check fail when out not writeable + x = np.arange(3) + out = np.zeros(3) + out.setflags(write=False) + assert_raises(ValueError, np.choose, [0, 1, 2], [x, x, x], out=out) + + def test_prod(self): + ba = [1, 2, 10, 11, 6, 5, 4] + ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]] + + for ctype in [np.int16, np.uint16, np.int32, np.uint32, + np.float32, np.float64, np.complex64, np.complex128]: + a = np.array(ba, ctype) + a2 = np.array(ba2, ctype) + if ctype in ['1', 'b']: + assert_raises(ArithmeticError, a.prod) + assert_raises(ArithmeticError, a2.prod, axis=1) + else: + assert_equal(a.prod(axis=0), 26400) + assert_array_equal(a2.prod(axis=0), + np.array([50, 36, 84, 180], ctype)) + assert_array_equal(a2.prod(axis=-1), + np.array([24, 1890, 600], ctype)) + + @pytest.mark.parametrize('dtype', [None, object]) + def test_repeat(self, dtype): + m = np.array([1, 2, 3, 4, 5, 6], dtype=dtype) + m_rect = m.reshape((2, 3)) + + A = m.repeat([1, 3, 2, 1, 1, 2]) + assert_equal(A, [1, 2, 2, 2, 3, + 3, 4, 5, 6, 6]) + + A = m.repeat(2) + assert_equal(A, [1, 1, 2, 2, 3, 3, + 4, 4, 5, 5, 6, 6]) + + A = m_rect.repeat([2, 1], axis=0) + assert_equal(A, [[1, 2, 3], + [1, 2, 3], + [4, 5, 6]]) + + A = m_rect.repeat([1, 3, 2], axis=1) + assert_equal(A, [[1, 2, 2, 2, 3, 3], + [4, 5, 5, 5, 6, 6]]) + + A = m_rect.repeat(2, axis=0) + assert_equal(A, [[1, 2, 3], + [1, 2, 3], + [4, 5, 6], + [4, 5, 6]]) + + A = m_rect.repeat(2, axis=1) + assert_equal(A, [[1, 1, 2, 2, 3, 3], + [4, 4, 5, 5, 6, 6]]) + + def test_reshape(self): + arr = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]) + + tgt = [[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]] + assert_equal(arr.reshape(2, 6), tgt) + + tgt = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]] + assert_equal(arr.reshape(3, 4), tgt) + + tgt = [[1, 10, 8, 6], [4, 2, 11, 9], [7, 5, 3, 12]] + assert_equal(arr.reshape((3, 4), order='F'), tgt) + + tgt = [[1, 4, 7, 10], [2, 5, 8, 11], [3, 6, 9, 12]] + assert_equal(arr.T.reshape((3, 4), order='C'), tgt) + + def test_round(self): + def check_round(arr, expected, *round_args): + assert_equal(arr.round(*round_args), expected) + # With output array + out = np.zeros_like(arr) + res = arr.round(*round_args, out=out) + assert_equal(out, expected) + assert out is res + + check_round(np.array([1, 2, 3]), [1, 2, 3]) + check_round(np.array([1.2, 1.5]), [1, 2]) + check_round(np.array(1.5), 2) + check_round(np.array([12.2, 15.5]), [10, 20], -1) + check_round(np.array([12.15, 15.51]), [12.2, 15.5], 1) + # Complex rounding + check_round(np.array([4.5 + 1.5j]), [4 + 2j]) + check_round(np.array([12.5 + 15.5j]), [10 + 20j], -1) + + @pytest.mark.parametrize('dt', ['uint8', int, float, complex]) + def test_round_copies(self, dt): + a = np.arange(3, dtype=dt) + assert not np.shares_memory(a.round(), a) + assert not np.shares_memory(a.round(decimals=2), a) + + out = np.empty(3, dtype=dt) + assert not np.shares_memory(a.round(out=out), a) + + a = np.arange(12).astype(dt).reshape(3, 4).T + + assert a.flags.f_contiguous + assert np.round(a).flags.f_contiguous + + def test_squeeze(self): + a = np.array([[[1], [2], [3]]]) + assert_equal(a.squeeze(), [1, 2, 3]) + assert_equal(a.squeeze(axis=(0,)), [[1], [2], [3]]) + assert_raises(ValueError, a.squeeze, axis=(1,)) + assert_equal(a.squeeze(axis=(2,)), [[1, 2, 3]]) + + def test_transpose(self): + a = np.array([[1, 2], [3, 4]]) + assert_equal(a.transpose(), [[1, 3], [2, 4]]) + assert_raises(ValueError, lambda: a.transpose(0)) + assert_raises(ValueError, lambda: a.transpose(0, 0)) + assert_raises(ValueError, lambda: a.transpose(0, 1, 2)) + + def test_sort(self): + # test ordering for floats and complex containing nans. It is only + # necessary to check the less-than comparison, so sorts that + # only follow the insertion sort path are sufficient. We only + # test doubles and complex doubles as the logic is the same. + + # check doubles + msg = "Test real sort order with nans" + a = np.array([np.nan, 1, 0]) + b = np.sort(a) + assert_equal(b, a[::-1], msg) + # check complex + msg = "Test complex sort order with nans" + a = np.zeros(9, dtype=np.complex128) + a.real += [np.nan, np.nan, np.nan, 1, 0, 1, 1, 0, 0] + a.imag += [np.nan, 1, 0, np.nan, np.nan, 1, 0, 1, 0] + b = np.sort(a) + assert_equal(b, a[::-1], msg) + + with assert_raises_regex( + ValueError, + "`kind` and keyword parameters can't be provided at the same time" + ): + np.sort(a, kind="stable", stable=True) + + # all c scalar sorts use the same code with different types + # so it suffices to run a quick check with one type. The number + # of sorted items must be greater than ~50 to check the actual + # algorithm because quick and merge sort fall over to insertion + # sort for small arrays. + + @pytest.mark.parametrize('dtype', [np.uint8, np.uint16, np.uint32, np.uint64, + np.float16, np.float32, np.float64, + np.longdouble]) + def test_sort_unsigned(self, dtype): + a = np.arange(101, dtype=dtype) + b = a[::-1].copy() + for kind in self.sort_kinds: + msg = f"scalar sort, kind={kind}" + c = a.copy() + c.sort(kind=kind) + assert_equal(c, a, msg) + c = b.copy() + c.sort(kind=kind) + assert_equal(c, a, msg) + + @pytest.mark.parametrize('dtype', + [np.int8, np.int16, np.int32, np.int64, np.float16, + np.float32, np.float64, np.longdouble]) + def test_sort_signed(self, dtype): + a = np.arange(-50, 51, dtype=dtype) + b = a[::-1].copy() + for kind in self.sort_kinds: + msg = f"scalar sort, kind={kind}" + c = a.copy() + c.sort(kind=kind) + assert_equal(c, a, msg) + c = b.copy() + c.sort(kind=kind) + assert_equal(c, a, msg) + + @pytest.mark.parametrize('dtype', [np.float32, np.float64, np.longdouble]) + @pytest.mark.parametrize('part', ['real', 'imag']) + def test_sort_complex(self, part, dtype): + # test complex sorts. These use the same code as the scalars + # but the compare function differs. + cdtype = { + np.single: np.csingle, + np.double: np.cdouble, + np.longdouble: np.clongdouble, + }[dtype] + a = np.arange(-50, 51, dtype=dtype) + b = a[::-1].copy() + ai = (a * (1 + 1j)).astype(cdtype) + bi = (b * (1 + 1j)).astype(cdtype) + setattr(ai, part, 1) + setattr(bi, part, 1) + for kind in self.sort_kinds: + msg = f"complex sort, {part} part == 1, kind={kind}" + c = ai.copy() + c.sort(kind=kind) + assert_equal(c, ai, msg) + c = bi.copy() + c.sort(kind=kind) + assert_equal(c, ai, msg) + + def test_sort_complex_byte_swapping(self): + # test sorting of complex arrays requiring byte-swapping, gh-5441 + for endianness in '<>': + for dt in np.typecodes['Complex']: + arr = np.array([1 + 3.j, 2 + 2.j, 3 + 1.j], dtype=endianness + dt) + c = arr.copy() + c.sort() + msg = f'byte-swapped complex sort, dtype={dt}' + assert_equal(c, arr, msg) + + @pytest.mark.parametrize('dtype', [np.bytes_, np.str_]) + def test_sort_string(self, dtype): + # np.array will perform the encoding to bytes for us in the bytes test + a = np.array(['aaaaaaaa' + chr(i) for i in range(101)], dtype=dtype) + b = a[::-1].copy() + for kind in self.sort_kinds: + msg = f"kind={kind}" + c = a.copy() + c.sort(kind=kind) + assert_equal(c, a, msg) + c = b.copy() + c.sort(kind=kind) + assert_equal(c, a, msg) + + def test_sort_object(self): + # test object array sorts. + a = np.empty((101,), dtype=object) + a[:] = list(range(101)) + b = a[::-1] + for kind in ['q', 'h', 'm']: + msg = f"kind={kind}" + c = a.copy() + c.sort(kind=kind) + assert_equal(c, a, msg) + c = b.copy() + c.sort(kind=kind) + assert_equal(c, a, msg) + + @pytest.mark.parametrize("dt", [ + np.dtype([('f', float), ('i', int)]), + np.dtype([('f', float), ('i', object)])]) + @pytest.mark.parametrize("step", [1, 2]) + def test_sort_structured(self, dt, step): + # test record array sorts. + a = np.array([(i, i) for i in range(101 * step)], dtype=dt) + b = a[::-1] + for kind in ['q', 'h', 'm']: + msg = f"kind={kind}" + c = a.copy()[::step] + indx = c.argsort(kind=kind) + c.sort(kind=kind) + assert_equal(c, a[::step], msg) + assert_equal(a[::step][indx], a[::step], msg) + c = b.copy()[::step] + indx = c.argsort(kind=kind) + c.sort(kind=kind) + assert_equal(c, a[step - 1::step], msg) + assert_equal(b[::step][indx], a[step - 1::step], msg) + + @pytest.mark.parametrize('dtype', ['datetime64[D]', 'timedelta64[D]']) + def test_sort_time(self, dtype): + # test datetime64 and timedelta64 sorts. + a = np.arange(0, 101, dtype=dtype) + b = a[::-1] + for kind in ['q', 'h', 'm']: + msg = f"kind={kind}" + c = a.copy() + c.sort(kind=kind) + assert_equal(c, a, msg) + c = b.copy() + c.sort(kind=kind) + assert_equal(c, a, msg) + + def test_sort_axis(self): + # check axis handling. This should be the same for all type + # specific sorts, so we only check it for one type and one kind + a = np.array([[3, 2], [1, 0]]) + b = np.array([[1, 0], [3, 2]]) + c = np.array([[2, 3], [0, 1]]) + d = a.copy() + d.sort(axis=0) + assert_equal(d, b, "test sort with axis=0") + d = a.copy() + d.sort(axis=1) + assert_equal(d, c, "test sort with axis=1") + d = a.copy() + d.sort() + assert_equal(d, c, "test sort with default axis") + + def test_sort_size_0(self): + # check axis handling for multidimensional empty arrays + a = np.array([]).reshape((3, 2, 1, 0)) + for axis in range(-a.ndim, a.ndim): + msg = f'test empty array sort with axis={axis}' + assert_equal(np.sort(a, axis=axis), a, msg) + msg = 'test empty array sort with axis=None' + assert_equal(np.sort(a, axis=None), a.ravel(), msg) + + def test_sort_bad_ordering(self): + # test generic class with bogus ordering, + # should not segfault. + class Boom: + def __lt__(self, other): + return True + + a = np.array([Boom()] * 100, dtype=object) + for kind in self.sort_kinds: + msg = f"kind={kind}" + c = a.copy() + c.sort(kind=kind) + assert_equal(c, a, msg) + + def test_void_sort(self): + # gh-8210 - previously segfaulted + for i in range(4): + rand = np.random.randint(256, size=4000, dtype=np.uint8) + arr = rand.view('V4') + arr[::-1].sort() + + dt = np.dtype([('val', 'i4', (1,))]) + for i in range(4): + rand = np.random.randint(256, size=4000, dtype=np.uint8) + arr = rand.view(dt) + arr[::-1].sort() + + def test_sort_raises(self): + # gh-9404 + arr = np.array([0, datetime.now(), 1], dtype=object) + for kind in self.sort_kinds: + assert_raises(TypeError, arr.sort, kind=kind) + # gh-3879 + + class Raiser: + def raises_anything(*args, **kwargs): + raise TypeError("SOMETHING ERRORED") + __eq__ = __ne__ = __lt__ = __gt__ = __ge__ = __le__ = raises_anything + arr = np.array([[Raiser(), n] for n in range(10)]).reshape(-1) + np.random.shuffle(arr) + for kind in self.sort_kinds: + assert_raises(TypeError, arr.sort, kind=kind) + + def test_sort_degraded(self): + # test degraded dataset would take minutes to run with normal qsort + d = np.arange(1000000) + do = d.copy() + x = d + # create a median of 3 killer where each median is the sorted second + # last element of the quicksort partition + while x.size > 3: + mid = x.size // 2 + x[mid], x[-2] = x[-2], x[mid] + x = x[:-2] + + assert_equal(np.sort(d), do) + assert_equal(d[np.argsort(d)], do) + + def test_copy(self): + def assert_fortran(arr): + assert_(arr.flags.fortran) + assert_(arr.flags.f_contiguous) + assert_(not arr.flags.c_contiguous) + + def assert_c(arr): + assert_(not arr.flags.fortran) + assert_(not arr.flags.f_contiguous) + assert_(arr.flags.c_contiguous) + + a = np.empty((2, 2), order='F') + # Test copying a Fortran array + assert_c(a.copy()) + assert_c(a.copy('C')) + assert_fortran(a.copy('F')) + assert_fortran(a.copy('A')) + + # Now test starting with a C array. + a = np.empty((2, 2), order='C') + assert_c(a.copy()) + assert_c(a.copy('C')) + assert_fortran(a.copy('F')) + assert_c(a.copy('A')) + + @pytest.mark.parametrize("dtype", ['O', np.int32, 'i,O']) + def test__deepcopy__(self, dtype): + # Force the entry of NULLs into array + a = np.empty(4, dtype=dtype) + ctypes.memset(a.ctypes.data, 0, a.nbytes) + + # Ensure no error is raised, see gh-21833 + b = a.__deepcopy__({}) + + a[0] = 42 + with pytest.raises(AssertionError): + assert_array_equal(a, b) + + def test__deepcopy___void_scalar(self): + # see comments in gh-29643 + value = np.void('Rex', dtype=[('name', 'U10')]) + value_deepcopy = value.__deepcopy__(None) + value[0] = None + assert value_deepcopy[0] == 'Rex' + + @pytest.mark.parametrize("sctype", [np.int64, np.float32, np.float64]) + def test__deepcopy__scalar(self, sctype): + # test optimization from gh-29656 + value = sctype(1.1) + value_deepcopy = value.__deepcopy__(None) + assert value is value_deepcopy + + def test__deepcopy__catches_failure(self): + class MyObj: + def __deepcopy__(self, *args, **kwargs): + raise RuntimeError + + arr = np.array([1, MyObj(), 3], dtype='O') + with pytest.raises(RuntimeError): + arr.__deepcopy__({}) + + def test_sort_order(self): + # Test sorting an array with fields + x1 = np.array([21, 32, 14]) + x2 = np.array(['my', 'first', 'name']) + x3 = np.array([3.1, 4.5, 6.2]) + r = np.rec.fromarrays([x1, x2, x3], names='id,word,number') + + r.sort(order=['id']) + assert_equal(r.id, np.array([14, 21, 32])) + assert_equal(r.word, np.array(['name', 'my', 'first'])) + assert_equal(r.number, np.array([6.2, 3.1, 4.5])) + + r.sort(order=['word']) + assert_equal(r.id, np.array([32, 21, 14])) + assert_equal(r.word, np.array(['first', 'my', 'name'])) + assert_equal(r.number, np.array([4.5, 3.1, 6.2])) + + r.sort(order=['number']) + assert_equal(r.id, np.array([21, 32, 14])) + assert_equal(r.word, np.array(['my', 'first', 'name'])) + assert_equal(r.number, np.array([3.1, 4.5, 6.2])) + + assert_raises_regex(ValueError, 'duplicate', + lambda: r.sort(order=['id', 'id'])) + + if sys.byteorder == 'little': + strtype = '>i2' + else: + strtype = '': + for dt in np.typecodes['Complex']: + arr = np.array([1 + 3.j, 2 + 2.j, 3 + 1.j], dtype=endianness + dt) + msg = f'byte-swapped complex argsort, dtype={dt}' + assert_equal(arr.argsort(), + np.arange(len(arr), dtype=np.intp), msg) + + # test string argsorts. + s = 'aaaaaaaa' + a = np.array([s + chr(i) for i in range(101)]) + b = a[::-1].copy() + r = np.arange(101) + rr = r[::-1] + for kind in self.sort_kinds: + msg = f"string argsort, kind={kind}" + assert_equal(a.copy().argsort(kind=kind), r, msg) + assert_equal(b.copy().argsort(kind=kind), rr, msg) + + # test unicode argsorts. + s = 'aaaaaaaa' + a = np.array([s + chr(i) for i in range(101)], dtype=np.str_) + b = a[::-1] + r = np.arange(101) + rr = r[::-1] + for kind in self.sort_kinds: + msg = f"unicode argsort, kind={kind}" + assert_equal(a.copy().argsort(kind=kind), r, msg) + assert_equal(b.copy().argsort(kind=kind), rr, msg) + + # test object array argsorts. + a = np.empty((101,), dtype=object) + a[:] = list(range(101)) + b = a[::-1] + r = np.arange(101) + rr = r[::-1] + for kind in self.sort_kinds: + msg = f"object argsort, kind={kind}" + assert_equal(a.copy().argsort(kind=kind), r, msg) + assert_equal(b.copy().argsort(kind=kind), rr, msg) + + # test structured array argsorts. + dt = np.dtype([('f', float), ('i', int)]) + a = np.array([(i, i) for i in range(101)], dtype=dt) + b = a[::-1] + r = np.arange(101) + rr = r[::-1] + for kind in self.sort_kinds: + msg = f"structured array argsort, kind={kind}" + assert_equal(a.copy().argsort(kind=kind), r, msg) + assert_equal(b.copy().argsort(kind=kind), rr, msg) + + # test datetime64 argsorts. + a = np.arange(0, 101, dtype='datetime64[D]') + b = a[::-1] + r = np.arange(101) + rr = r[::-1] + for kind in ['q', 'h', 'm']: + msg = f"datetime64 argsort, kind={kind}" + assert_equal(a.copy().argsort(kind=kind), r, msg) + assert_equal(b.copy().argsort(kind=kind), rr, msg) + + # test timedelta64 argsorts. + a = np.arange(0, 101, dtype='timedelta64[D]') + b = a[::-1] + r = np.arange(101) + rr = r[::-1] + for kind in ['q', 'h', 'm']: + msg = f"timedelta64 argsort, kind={kind}" + assert_equal(a.copy().argsort(kind=kind), r, msg) + assert_equal(b.copy().argsort(kind=kind), rr, msg) + + # check axis handling. This should be the same for all type + # specific argsorts, so we only check it for one type and one kind + a = np.array([[3, 2], [1, 0]]) + b = np.array([[1, 1], [0, 0]]) + c = np.array([[1, 0], [1, 0]]) + assert_equal(a.copy().argsort(axis=0), b) + assert_equal(a.copy().argsort(axis=1), c) + assert_equal(a.copy().argsort(), c) + + # check axis handling for multidimensional empty arrays + a = np.array([]).reshape((3, 2, 1, 0)) + for axis in range(-a.ndim, a.ndim): + msg = f'test empty array argsort with axis={axis}' + assert_equal(np.argsort(a, axis=axis), + np.zeros_like(a, dtype=np.intp), msg) + msg = 'test empty array argsort with axis=None' + assert_equal(np.argsort(a, axis=None), + np.zeros_like(a.ravel(), dtype=np.intp), msg) + + # check that stable argsorts are stable + r = np.arange(100) + # scalars + a = np.zeros(100) + assert_equal(a.argsort(kind='m'), r) + # complex + a = np.zeros(100, dtype=complex) + assert_equal(a.argsort(kind='m'), r) + # string + a = np.array(['aaaaaaaaa' for i in range(100)]) + assert_equal(a.argsort(kind='m'), r) + # unicode + a = np.array(['aaaaaaaaa' for i in range(100)], dtype=np.str_) + assert_equal(a.argsort(kind='m'), r) + + with assert_raises_regex( + ValueError, + "`kind` and keyword parameters can't be provided at the same time" + ): + np.argsort(a, kind="stable", stable=True) + + def test_sort_unicode_kind(self): + d = np.arange(10) + k = b'\xc3\xa4'.decode("UTF8") + assert_raises(ValueError, d.sort, kind=k) + assert_raises(ValueError, d.argsort, kind=k) + + @pytest.mark.parametrize('a', [ + np.array([0, 1, np.nan], dtype=np.float16), + np.array([0, 1, np.nan], dtype=np.float32), + np.array([0, 1, np.nan]), + ]) + def test_searchsorted_floats(self, a): + # test for floats arrays containing nans. Explicitly test + # half, single, and double precision floats to verify that + # the NaN-handling is correct. + msg = f"Test real ({a.dtype}) searchsorted with nans, side='l'" + b = a.searchsorted(a, side='left') + assert_equal(b, np.arange(3), msg) + msg = f"Test real ({a.dtype}) searchsorted with nans, side='r'" + b = a.searchsorted(a, side='right') + assert_equal(b, np.arange(1, 4), msg) + # check keyword arguments + a.searchsorted(v=1) + x = np.array([0, 1, np.nan], dtype='float32') + y = np.searchsorted(x, x[-1]) + assert_equal(y, 2) + + def test_searchsorted_complex(self): + # test for complex arrays containing nans. + # The search sorted routines use the compare functions for the + # array type, so this checks if that is consistent with the sort + # order. + # check double complex + a = np.zeros(9, dtype=np.complex128) + a.real += [0, 0, 1, 1, 0, 1, np.nan, np.nan, np.nan] + a.imag += [0, 1, 0, 1, np.nan, np.nan, 0, 1, np.nan] + msg = "Test complex searchsorted with nans, side='l'" + b = a.searchsorted(a, side='left') + assert_equal(b, np.arange(9), msg) + msg = "Test complex searchsorted with nans, side='r'" + b = a.searchsorted(a, side='right') + assert_equal(b, np.arange(1, 10), msg) + msg = "Test searchsorted with little endian, side='l'" + a = np.array([0, 128], dtype=' p[:, i]).all(), + msg="%d: %r < %r" % (i, p[:, i], p[:, i + 1:].T)) + for row in range(p.shape[0]): + self.assert_partitioned(p[row], [i]) + self.assert_partitioned(parg[row], [i]) + + p = np.partition(d0, i, axis=0, kind=k) + parg = d0[np.argpartition(d0, i, axis=0, kind=k), + np.arange(d0.shape[1])[None, :]] + aae(p[i, :], np.array([i] * d1.shape[0], dtype=dt)) + # array_less does not seem to work right + at((p[:i, :] <= p[i, :]).all(), + msg="%d: %r <= %r" % (i, p[i, :], p[:i, :])) + at((p[i + 1:, :] > p[i, :]).all(), + msg="%d: %r < %r" % (i, p[i, :], p[:, i + 1:])) + for col in range(p.shape[1]): + self.assert_partitioned(p[:, col], [i]) + self.assert_partitioned(parg[:, col], [i]) + + # check inplace + dc = d.copy() + dc.partition(i, kind=k) + assert_equal(dc, np.partition(d, i, kind=k)) + dc = d0.copy() + dc.partition(i, axis=0, kind=k) + assert_equal(dc, np.partition(d0, i, axis=0, kind=k)) + dc = d1.copy() + dc.partition(i, axis=1, kind=k) + assert_equal(dc, np.partition(d1, i, axis=1, kind=k)) + + def assert_partitioned(self, d, kth): + prev = 0 + for k in np.sort(kth): + assert_array_compare(operator.__le__, d[prev:k], d[k], + err_msg='kth %d' % k) + assert_((d[k:] >= d[k]).all(), + msg="kth %d, %r not greater equal %r" % (k, d[k:], d[k])) + prev = k + 1 + + def test_partition_iterative(self): + d = np.arange(17) + kth = (0, 1, 2, 429, 231) + assert_raises(ValueError, d.partition, kth) + assert_raises(ValueError, d.argpartition, kth) + d = np.arange(10).reshape((2, 5)) + assert_raises(ValueError, d.partition, kth, axis=0) + assert_raises(ValueError, d.partition, kth, axis=1) + assert_raises(ValueError, np.partition, d, kth, axis=1) + assert_raises(ValueError, np.partition, d, kth, axis=None) + + d = np.array([3, 4, 2, 1]) + p = np.partition(d, (0, 3)) + self.assert_partitioned(p, (0, 3)) + self.assert_partitioned(d[np.argpartition(d, (0, 3))], (0, 3)) + + assert_array_equal(p, np.partition(d, (-3, -1))) + assert_array_equal(p, d[np.argpartition(d, (-3, -1))]) + + d = np.arange(17) + np.random.shuffle(d) + d.partition(range(d.size)) + assert_array_equal(np.arange(17), d) + np.random.shuffle(d) + assert_array_equal(np.arange(17), d[d.argpartition(range(d.size))]) + + # test unsorted kth + d = np.arange(17) + np.random.shuffle(d) + keys = np.array([1, 3, 8, -2]) + np.random.shuffle(d) + p = np.partition(d, keys) + self.assert_partitioned(p, keys) + p = d[np.argpartition(d, keys)] + self.assert_partitioned(p, keys) + np.random.shuffle(keys) + assert_array_equal(np.partition(d, keys), p) + assert_array_equal(d[np.argpartition(d, keys)], p) + + # equal kth + d = np.arange(20)[::-1] + self.assert_partitioned(np.partition(d, [5] * 4), [5]) + self.assert_partitioned(np.partition(d, [5] * 4 + [6, 13]), + [5] * 4 + [6, 13]) + self.assert_partitioned(d[np.argpartition(d, [5] * 4)], [5]) + self.assert_partitioned(d[np.argpartition(d, [5] * 4 + [6, 13])], + [5] * 4 + [6, 13]) + + d = np.arange(12) + np.random.shuffle(d) + d1 = np.tile(np.arange(12), (4, 1)) + map(np.random.shuffle, d1) + d0 = np.transpose(d1) + + kth = (1, 6, 7, -1) + p = np.partition(d1, kth, axis=1) + pa = d1[np.arange(d1.shape[0])[:, None], + d1.argpartition(kth, axis=1)] + assert_array_equal(p, pa) + for i in range(d1.shape[0]): + self.assert_partitioned(p[i, :], kth) + p = np.partition(d0, kth, axis=0) + pa = d0[np.argpartition(d0, kth, axis=0), + np.arange(d0.shape[1])[None, :]] + assert_array_equal(p, pa) + for i in range(d0.shape[1]): + self.assert_partitioned(p[:, i], kth) + + def test_partition_cdtype(self): + d = np.array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41), + ('Lancelot', 1.9, 38)], + dtype=[('name', '|S10'), ('height', ' (numpy ufunc, has_in_place_version, preferred_dtype) + ops = { + 'add': (np.add, True, float), + 'sub': (np.subtract, True, float), + 'mul': (np.multiply, True, float), + 'truediv': (np.true_divide, True, float), + 'floordiv': (np.floor_divide, True, float), + 'mod': (np.remainder, True, float), + 'divmod': (np.divmod, False, float), + 'pow': (np.power, True, int), + 'lshift': (np.left_shift, True, int), + 'rshift': (np.right_shift, True, int), + 'and': (np.bitwise_and, True, int), + 'xor': (np.bitwise_xor, True, int), + 'or': (np.bitwise_or, True, int), + 'matmul': (np.matmul, True, float), + # 'ge': (np.less_equal, False), + # 'gt': (np.less, False), + # 'le': (np.greater_equal, False), + # 'lt': (np.greater, False), + # 'eq': (np.equal, False), + # 'ne': (np.not_equal, False), + } + + class Coerced(Exception): + pass + + def array_impl(self): + raise Coerced + + def op_impl(self, other): + return "forward" + + def rop_impl(self, other): + return "reverse" + + def iop_impl(self, other): + return "in-place" + + def array_ufunc_impl(self, ufunc, method, *args, **kwargs): + return ("__array_ufunc__", ufunc, method, args, kwargs) + + # Create an object with the given base, in the given module, with a + # bunch of placeholder __op__ methods, and optionally a + # __array_ufunc__ and __array_priority__. + def make_obj(base, array_priority=False, array_ufunc=False, + alleged_module="__main__"): + class_namespace = {"__array__": array_impl} + if array_priority is not False: + class_namespace["__array_priority__"] = array_priority + for op in ops: + class_namespace[f"__{op}__"] = op_impl + class_namespace[f"__r{op}__"] = rop_impl + class_namespace[f"__i{op}__"] = iop_impl + if array_ufunc is not False: + class_namespace["__array_ufunc__"] = array_ufunc + eval_namespace = {"base": base, + "class_namespace": class_namespace, + "__name__": alleged_module, + } + MyType = eval("type('MyType', (base,), class_namespace)", + eval_namespace) + if issubclass(MyType, np.ndarray): + # Use this range to avoid special case weirdnesses around + # divide-by-0, pow(x, 2), overflow due to pow(big, big), etc. + return np.arange(3, 7).reshape(2, 2).view(MyType) + else: + return MyType() + + def check(obj, binop_override_expected, ufunc_override_expected, + inplace_override_expected, check_scalar=True): + for op, (ufunc, has_inplace, dtype) in ops.items(): + err_msg = ('op: %s, ufunc: %s, has_inplace: %s, dtype: %s' + % (op, ufunc, has_inplace, dtype)) + check_objs = [np.arange(3, 7, dtype=dtype).reshape(2, 2)] + if check_scalar: + check_objs.append(check_objs[0][0]) + for arr in check_objs: + arr_method = getattr(arr, f"__{op}__") + + def first_out_arg(result): + if op == "divmod": + assert_(isinstance(result, tuple)) + return result[0] + else: + return result + + # arr __op__ obj + if binop_override_expected: + assert_equal(arr_method(obj), NotImplemented, err_msg) + elif ufunc_override_expected: + assert_equal(arr_method(obj)[0], "__array_ufunc__", + err_msg) + elif (isinstance(obj, np.ndarray) and + (type(obj).__array_ufunc__ is + np.ndarray.__array_ufunc__)): + # __array__ gets ignored + res = first_out_arg(arr_method(obj)) + assert_(res.__class__ is obj.__class__, err_msg) + else: + assert_raises((TypeError, Coerced), + arr_method, obj, err_msg=err_msg) + # obj __op__ arr + arr_rmethod = getattr(arr, f"__r{op}__") + if ufunc_override_expected: + res = arr_rmethod(obj) + assert_equal(res[0], "__array_ufunc__", + err_msg=err_msg) + assert_equal(res[1], ufunc, err_msg=err_msg) + elif (isinstance(obj, np.ndarray) and + (type(obj).__array_ufunc__ is + np.ndarray.__array_ufunc__)): + # __array__ gets ignored + res = first_out_arg(arr_rmethod(obj)) + assert_(res.__class__ is obj.__class__, err_msg) + else: + # __array_ufunc__ = "asdf" creates a TypeError + assert_raises((TypeError, Coerced), + arr_rmethod, obj, err_msg=err_msg) + + # arr __iop__ obj + # array scalars don't have in-place operators + if has_inplace and isinstance(arr, np.ndarray): + arr_imethod = getattr(arr, f"__i{op}__") + if inplace_override_expected: + assert_equal(arr_method(obj), NotImplemented, + err_msg=err_msg) + elif ufunc_override_expected: + res = arr_imethod(obj) + assert_equal(res[0], "__array_ufunc__", err_msg) + assert_equal(res[1], ufunc, err_msg) + assert_(type(res[-1]["out"]) is tuple, err_msg) + assert_(res[-1]["out"][0] is arr, err_msg) + elif (isinstance(obj, np.ndarray) and + (type(obj).__array_ufunc__ is + np.ndarray.__array_ufunc__)): + # __array__ gets ignored + assert_(arr_imethod(obj) is arr, err_msg) + else: + assert_raises((TypeError, Coerced), + arr_imethod, obj, + err_msg=err_msg) + + op_fn = getattr(operator, op, None) + if op_fn is None: + op_fn = getattr(operator, op + "_", None) + if op_fn is None: + op_fn = getattr(builtins, op) + assert_equal(op_fn(obj, arr), "forward", err_msg) + if not isinstance(obj, np.ndarray): + if binop_override_expected: + assert_equal(op_fn(arr, obj), "reverse", err_msg) + elif ufunc_override_expected: + assert_equal(op_fn(arr, obj)[0], "__array_ufunc__", + err_msg) + if ufunc_override_expected: + assert_equal(ufunc(obj, arr)[0], "__array_ufunc__", + err_msg) + + # No array priority, no array_ufunc -> nothing called + check(make_obj(object), False, False, False) + # Negative array priority, no array_ufunc -> nothing called + # (has to be very negative, because scalar priority is -1000000.0) + check(make_obj(object, array_priority=-2**30), False, False, False) + # Positive array priority, no array_ufunc -> binops and iops only + check(make_obj(object, array_priority=1), True, False, True) + # ndarray ignores array_priority for ndarray subclasses + check(make_obj(np.ndarray, array_priority=1), False, False, False, + check_scalar=False) + # Positive array_priority and array_ufunc -> array_ufunc only + check(make_obj(object, array_priority=1, + array_ufunc=array_ufunc_impl), False, True, False) + check(make_obj(np.ndarray, array_priority=1, + array_ufunc=array_ufunc_impl), False, True, False) + # array_ufunc set to None -> defer binops only + check(make_obj(object, array_ufunc=None), True, False, False) + check(make_obj(np.ndarray, array_ufunc=None), True, False, False, + check_scalar=False) + + @pytest.mark.parametrize("priority", [None, "runtime error"]) + def test_ufunc_binop_bad_array_priority(self, priority): + # Mainly checks that this does not crash. The second array has a lower + # priority than -1 ("error value"). If the __radd__ actually exists, + # bad things can happen (I think via the scalar paths). + # In principle both of these can probably just be errors in the future. + class BadPriority: + @property + def __array_priority__(self): + if priority == "runtime error": + raise RuntimeError("RuntimeError in __array_priority__!") + return priority + + def __radd__(self, other): + return "result" + + class LowPriority(np.ndarray): + __array_priority__ = -1000 + + # Priority failure uses the same as scalars (smaller -1000). So the + # LowPriority wins with 'result' for each element (inner operation). + res = np.arange(3).view(LowPriority) + BadPriority() + assert res.shape == (3,) + assert res[0] == 'result' + + @pytest.mark.parametrize("scalar", [ + np.longdouble(1), np.timedelta64(120, 'm')]) + @pytest.mark.parametrize("op", [operator.add, operator.xor]) + def test_scalar_binop_guarantees_ufunc(self, scalar, op): + # Test that __array_ufunc__ will always cause ufunc use even when + # we have to protect some other calls from recursing (see gh-26904). + class SomeClass: + def __array_ufunc__(self, ufunc, method, *inputs, **kw): + return "result" + + assert SomeClass() + scalar == "result" + assert scalar + SomeClass() == "result" + + def test_ufunc_override_normalize_signature(self): + # gh-5674 + class SomeClass: + def __array_ufunc__(self, ufunc, method, *inputs, **kw): + return kw + + a = SomeClass() + kw = np.add(a, [1]) + assert_('sig' not in kw and 'signature' not in kw) + kw = np.add(a, [1], sig='ii->i') + assert_('sig' not in kw and 'signature' in kw) + assert_equal(kw['signature'], 'ii->i') + kw = np.add(a, [1], signature='ii->i') + assert_('sig' not in kw and 'signature' in kw) + assert_equal(kw['signature'], 'ii->i') + + def test_array_ufunc_index(self): + # Check that index is set appropriately, also if only an output + # is passed on (latter is another regression tests for github bug 4753) + # This also checks implicitly that 'out' is always a tuple. + class CheckIndex: + def __array_ufunc__(self, ufunc, method, *inputs, **kw): + for i, a in enumerate(inputs): + if a is self: + return i + # calls below mean we must be in an output. + for j, a in enumerate(kw['out']): + if a is self: + return (j,) + + a = CheckIndex() + dummy = np.arange(2.) + # 1 input, 1 output + assert_equal(np.sin(a), 0) + assert_equal(np.sin(dummy, a), (0,)) + assert_equal(np.sin(dummy, out=a), (0,)) + assert_equal(np.sin(dummy, out=(a,)), (0,)) + assert_equal(np.sin(a, a), 0) + assert_equal(np.sin(a, out=a), 0) + assert_equal(np.sin(a, out=(a,)), 0) + # 1 input, 2 outputs + assert_equal(np.modf(dummy, a), (0,)) + assert_equal(np.modf(dummy, None, a), (1,)) + assert_equal(np.modf(dummy, dummy, a), (1,)) + assert_equal(np.modf(dummy, out=(a, None)), (0,)) + assert_equal(np.modf(dummy, out=(a, dummy)), (0,)) + assert_equal(np.modf(dummy, out=(None, a)), (1,)) + assert_equal(np.modf(dummy, out=(dummy, a)), (1,)) + assert_equal(np.modf(a, out=(dummy, a)), 0) + with assert_raises(TypeError): + # Out argument must be tuple, since there are multiple outputs + np.modf(dummy, out=a) + + assert_raises(ValueError, np.modf, dummy, out=(a,)) + + # 2 inputs, 1 output + assert_equal(np.add(a, dummy), 0) + assert_equal(np.add(dummy, a), 1) + assert_equal(np.add(dummy, dummy, a), (0,)) + assert_equal(np.add(dummy, a, a), 1) + assert_equal(np.add(dummy, dummy, out=a), (0,)) + assert_equal(np.add(dummy, dummy, out=(a,)), (0,)) + assert_equal(np.add(a, dummy, out=a), 0) + + def test_out_override(self): + # regression test for github bug 4753 + class OutClass(np.ndarray): + def __array_ufunc__(self, ufunc, method, *inputs, **kw): + if 'out' in kw: + tmp_kw = kw.copy() + tmp_kw.pop('out') + func = getattr(ufunc, method) + kw['out'][0][...] = func(*inputs, **tmp_kw) + + A = np.array([0]).view(OutClass) + B = np.array([5]) + C = np.array([6]) + np.multiply(C, B, A) + assert_equal(A[0], 30) + assert_(isinstance(A, OutClass)) + A[0] = 0 + np.multiply(C, B, out=A) + assert_equal(A[0], 30) + assert_(isinstance(A, OutClass)) + + def test_pow_array_object_dtype(self): + # test pow on arrays of object dtype + class SomeClass: + def __init__(self, num=None): + self.num = num + + # want to ensure a fast pow path is not taken + def __mul__(self, other): + raise AssertionError('__mul__ should not be called') + + def __truediv__(self, other): + raise AssertionError('__truediv__ should not be called') + + def __pow__(self, exp): + return SomeClass(num=self.num ** exp) + + def __eq__(self, other): + if isinstance(other, SomeClass): + return self.num == other.num + + __rpow__ = __pow__ + + def pow_for(exp, arr): + return np.array([x ** exp for x in arr]) + + obj_arr = np.array([SomeClass(1), SomeClass(2), SomeClass(3)]) + + assert_equal(obj_arr ** 0.5, pow_for(0.5, obj_arr)) + assert_equal(obj_arr ** 0, pow_for(0, obj_arr)) + assert_equal(obj_arr ** 1, pow_for(1, obj_arr)) + assert_equal(obj_arr ** -1, pow_for(-1, obj_arr)) + assert_equal(obj_arr ** 2, pow_for(2, obj_arr)) + + def test_pow_calls_square_structured_dtype(self): + # gh-29388 + dt = np.dtype([('a', 'i4'), ('b', 'i4')]) + a = np.array([(1, 2), (3, 4)], dtype=dt) + with pytest.raises(TypeError, match="ufunc 'square' not supported"): + a ** 2 + + def test_pos_array_ufunc_override(self): + class A(np.ndarray): + def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): + return getattr(ufunc, method)(*[i.view(np.ndarray) for + i in inputs], **kwargs) + tst = np.array('foo').view(A) + with assert_raises(TypeError): + +tst + + +class TestTemporaryElide: + # elision is only triggered on relatively large arrays + + def test_extension_incref_elide(self): + # test extension (e.g. cython) calling PyNumber_* slots without + # increasing the reference counts + # + # def incref_elide(a): + # d = input.copy() # refcount 1 + # return d, d + d # PyNumber_Add without increasing refcount + from numpy._core._multiarray_tests import incref_elide + d = np.ones(100000) + orig, res = incref_elide(d) + d + d + # the return original should not be changed to an inplace operation + assert_array_equal(orig, d) + assert_array_equal(res, d + d) + + def test_extension_incref_elide_stack(self): + # scanning if the refcount == 1 object is on the python stack to check + # that we are called directly from python is flawed as object may still + # be above the stack pointer and we have no access to the top of it + # + # def incref_elide_l(d): + # return l[4] + l[4] # PyNumber_Add without increasing refcount + from numpy._core._multiarray_tests import incref_elide_l + # padding with 1 makes sure the object on the stack is not overwritten + l = [1, 1, 1, 1, np.ones(100000)] + res = incref_elide_l(l) + # the return original should not be changed to an inplace operation + assert_array_equal(l[4], np.ones(100000)) + assert_array_equal(res, l[4] + l[4]) + + def test_temporary_with_cast(self): + # check that we don't elide into a temporary which would need casting + d = np.ones(200000, dtype=np.int64) + r = ((d + d) + np.array(2**222, dtype='O')) + assert_equal(r.dtype, np.dtype('O')) + + r = ((d + d) / 2) + assert_equal(r.dtype, np.dtype('f8')) + + r = np.true_divide((d + d), 2) + assert_equal(r.dtype, np.dtype('f8')) + + r = ((d + d) / 2.) + assert_equal(r.dtype, np.dtype('f8')) + + r = ((d + d) // 2) + assert_equal(r.dtype, np.dtype(np.int64)) + + # commutative elision into the astype result + f = np.ones(100000, dtype=np.float32) + assert_equal(((f + f) + f.astype(np.float64)).dtype, np.dtype('f8')) + + # no elision into lower type + d = f.astype(np.float64) + assert_equal(((f + f) + d).dtype, d.dtype) + l = np.ones(100000, dtype=np.longdouble) + assert_equal(((d + d) + l).dtype, l.dtype) + + # test unary abs with different output dtype + for dt in (np.complex64, np.complex128, np.clongdouble): + c = np.ones(100000, dtype=dt) + r = abs(c * 2.0) + assert_equal(r.dtype, np.dtype('f%d' % (c.itemsize // 2))) + + def test_elide_broadcast(self): + # test no elision on broadcast to higher dimension + # only triggers elision code path in debug mode as triggering it in + # normal mode needs 256kb large matching dimension, so a lot of memory + d = np.ones((2000, 1), dtype=int) + b = np.ones((2000), dtype=bool) + r = (1 - d) + b + assert_equal(r, 1) + assert_equal(r.shape, (2000, 2000)) + + def test_elide_scalar(self): + # check inplace op does not create ndarray from scalars + a = np.bool() + assert_(type(~(a & a)) is np.bool) + + def test_elide_scalar_readonly(self): + # The imaginary part of a real array is readonly. This needs to go + # through fast_scalar_power which is only called for powers of + # +1, -1, 0, 0.5, and 2, so use 2. Also need valid refcount for + # elision which can be gotten for the imaginary part of a real + # array. Should not error. + a = np.empty(100000, dtype=np.float64) + a.imag ** 2 + + def test_elide_readonly(self): + # don't try to elide readonly temporaries + r = np.asarray(np.broadcast_to(np.zeros(1), 100000).flat) * 0.0 + assert_equal(r, 0) + + def test_elide_updateifcopy(self): + a = np.ones(2**20)[::2] + b = a.flat.__array__() + 1 + del b + assert_equal(a, 1) + + +class TestCAPI: + def test_IsPythonScalar(self): + from numpy._core._multiarray_tests import IsPythonScalar + assert_(IsPythonScalar(b'foobar')) + assert_(IsPythonScalar(1)) + assert_(IsPythonScalar(2**80)) + assert_(IsPythonScalar(2.)) + assert_(IsPythonScalar("a")) + + @pytest.mark.parametrize("converter", + [_multiarray_tests.run_scalar_intp_converter, + _multiarray_tests.run_scalar_intp_from_sequence]) + def test_intp_sequence_converters(self, converter): + # Test simple values (-1 is special for error return paths) + assert converter(10) == (10,) + assert converter(-1) == (-1,) + # A 0-D array looks a bit like a sequence but must take the integer + # path: + assert converter(np.array(123)) == (123,) + # Test simple sequences (intp_from_sequence only supports length 1): + assert converter((10,)) == (10,) + assert converter(np.array([11])) == (11,) + + @pytest.mark.parametrize("converter", + [_multiarray_tests.run_scalar_intp_converter, + _multiarray_tests.run_scalar_intp_from_sequence]) + @pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), + reason="PyPy bug in error formatting") + def test_intp_sequence_converters_errors(self, converter): + with pytest.raises(TypeError, + match="expected a sequence of integers or a single integer, "): + converter(object()) + with pytest.raises(TypeError, + match="expected a sequence of integers or a single integer, " + "got '32.0'"): + converter(32.) + with pytest.raises(TypeError, + match="'float' object cannot be interpreted as an integer"): + converter([32.]) + with pytest.raises(ValueError, + match="Maximum allowed dimension"): + # These converters currently convert overflows to a ValueError + converter(2**64) + + @pytest.mark.parametrize( + "entry_point", + [ + module + item + for item in ("sin", "strings.str_len", "fft._pocketfft_umath.ifft") + for module in ("", "numpy:") + ] + [ + "numpy.strings:str_len", + "functools:reduce", + "functools:reduce.__doc__" + ] + ) + def test_import_entry_point(self, entry_point): + modname, _, items = entry_point.rpartition(":") + if modname: + module = obj = importlib.import_module(modname) + else: + module = np + exp = functools.reduce(getattr, items.split("."), module) + got = _multiarray_tests.npy_import_entry_point(entry_point) + assert got == exp + + @pytest.mark.parametrize( + "entry_point", + ["sin.", "numpy:", "numpy:sin:__call__", "numpy.sin:__call__."] + ) + def test_import_entry_point_errors(self, entry_point): + # Don't really care about precise error. + with pytest.raises((ImportError, AttributeError)): + _multiarray_tests.npy_import_entry_point(entry_point) + + +class TestSubscripting: + def test_test_zero_rank(self): + x = np.array([1, 2, 3]) + assert_(isinstance(x[0], np.int_)) + assert_(type(x[0, ...]) is np.ndarray) + + +class TestPickling: + @pytest.mark.skipif(pickle.HIGHEST_PROTOCOL >= 5, + reason=('this tests the error messages when trying to' + 'protocol 5 although it is not available')) + def test_correct_protocol5_error_message(self): + array = np.arange(10) + + def test_record_array_with_object_dtype(self): + my_object = object() + + arr_with_object = np.array( + [(my_object, 1, 2.0)], + dtype=[('a', object), ('b', int), ('c', float)]) + arr_without_object = np.array( + [('xxx', 1, 2.0)], + dtype=[('a', str), ('b', int), ('c', float)]) + + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + depickled_arr_with_object = pickle.loads( + pickle.dumps(arr_with_object, protocol=proto)) + depickled_arr_without_object = pickle.loads( + pickle.dumps(arr_without_object, protocol=proto)) + + assert_equal(arr_with_object.dtype, + depickled_arr_with_object.dtype) + assert_equal(arr_without_object.dtype, + depickled_arr_without_object.dtype) + + @pytest.mark.skipif(pickle.HIGHEST_PROTOCOL < 5, + reason="requires pickle protocol 5") + def test_f_contiguous_array(self): + f_contiguous_array = np.array([[1, 2, 3], [4, 5, 6]], order='F') + buffers = [] + + # When using pickle protocol 5, Fortran-contiguous arrays can be + # serialized using out-of-band buffers + bytes_string = pickle.dumps(f_contiguous_array, protocol=5, + buffer_callback=buffers.append) + + assert len(buffers) > 0 + + depickled_f_contiguous_array = pickle.loads(bytes_string, + buffers=buffers) + + assert_equal(f_contiguous_array, depickled_f_contiguous_array) + + @pytest.mark.skipif(pickle.HIGHEST_PROTOCOL < 5, reason="requires pickle protocol 5") + @pytest.mark.parametrize('transposed_contiguous_array', + [np.random.default_rng(42).random((2, 3, 4)).transpose((1, 0, 2)), + np.random.default_rng(42).random((2, 3, 4, 5)).transpose((1, 3, 0, 2))] + + [np.random.default_rng(42).random(np.arange(2, 7)).transpose(np.random.permutation(5)) for _ in range(3)]) + def test_transposed_contiguous_array(self, transposed_contiguous_array): + buffers = [] + # When using pickle protocol 5, arrays which can be transposed to c_contiguous + # can be serialized using out-of-band buffers + bytes_string = pickle.dumps(transposed_contiguous_array, protocol=5, + buffer_callback=buffers.append) + + assert len(buffers) > 0 + + depickled_transposed_contiguous_array = pickle.loads(bytes_string, + buffers=buffers) + + assert_equal(transposed_contiguous_array, depickled_transposed_contiguous_array) + + @pytest.mark.skipif(pickle.HIGHEST_PROTOCOL < 5, reason="requires pickle protocol 5") + def test_load_legacy_pkl_protocol5(self): + # legacy byte strs are dumped in 2.2.1 + c_contiguous_dumped = b'\x80\x05\x95\x90\x00\x00\x00\x00\x00\x00\x00\x8c\x13numpy._core.numeric\x94\x8c\x0b_frombuffer\x94\x93\x94(\x96\x18\x00\x00\x00\x00\x00\x00\x00\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x94\x8c\x05numpy\x94\x8c\x05dtype\x94\x93\x94\x8c\x02u1\x94\x89\x88\x87\x94R\x94(K\x03\x8c\x01|\x94NNNJ\xff\xff\xff\xffJ\xff\xff\xff\xffK\x00t\x94bK\x03K\x04K\x02\x87\x94\x8c\x01C\x94t\x94R\x94.' # noqa: E501 + f_contiguous_dumped = b'\x80\x05\x95\x90\x00\x00\x00\x00\x00\x00\x00\x8c\x13numpy._core.numeric\x94\x8c\x0b_frombuffer\x94\x93\x94(\x96\x18\x00\x00\x00\x00\x00\x00\x00\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x94\x8c\x05numpy\x94\x8c\x05dtype\x94\x93\x94\x8c\x02u1\x94\x89\x88\x87\x94R\x94(K\x03\x8c\x01|\x94NNNJ\xff\xff\xff\xffJ\xff\xff\xff\xffK\x00t\x94bK\x03K\x04K\x02\x87\x94\x8c\x01F\x94t\x94R\x94.' # noqa: E501 + transposed_contiguous_dumped = b'\x80\x05\x95\xa5\x00\x00\x00\x00\x00\x00\x00\x8c\x16numpy._core.multiarray\x94\x8c\x0c_reconstruct\x94\x93\x94\x8c\x05numpy\x94\x8c\x07ndarray\x94\x93\x94K\x00\x85\x94C\x01b\x94\x87\x94R\x94(K\x01K\x04K\x03K\x02\x87\x94h\x03\x8c\x05dtype\x94\x93\x94\x8c\x02u1\x94\x89\x88\x87\x94R\x94(K\x03\x8c\x01|\x94NNNJ\xff\xff\xff\xffJ\xff\xff\xff\xffK\x00t\x94b\x89C\x18\x00\x01\x08\t\x10\x11\x02\x03\n\x0b\x12\x13\x04\x05\x0c\r\x14\x15\x06\x07\x0e\x0f\x16\x17\x94t\x94b.' # noqa: E501 + no_contiguous_dumped = b'\x80\x05\x95\x91\x00\x00\x00\x00\x00\x00\x00\x8c\x16numpy._core.multiarray\x94\x8c\x0c_reconstruct\x94\x93\x94\x8c\x05numpy\x94\x8c\x07ndarray\x94\x93\x94K\x00\x85\x94C\x01b\x94\x87\x94R\x94(K\x01K\x03K\x02\x86\x94h\x03\x8c\x05dtype\x94\x93\x94\x8c\x02u1\x94\x89\x88\x87\x94R\x94(K\x03\x8c\x01|\x94NNNJ\xff\xff\xff\xffJ\xff\xff\xff\xffK\x00t\x94b\x89C\x06\x00\x01\x04\x05\x08\t\x94t\x94b.' # noqa: E501 + x = np.arange(24, dtype='uint8').reshape(3, 4, 2) + assert_equal(x, pickle.loads(c_contiguous_dumped)) + x = np.arange(24, dtype='uint8').reshape(3, 4, 2, order='F') + assert_equal(x, pickle.loads(f_contiguous_dumped)) + x = np.arange(24, dtype='uint8').reshape(3, 4, 2).transpose((1, 0, 2)) + assert_equal(x, pickle.loads(transposed_contiguous_dumped)) + x = np.arange(12, dtype='uint8').reshape(3, 4)[:, :2] + assert_equal(x, pickle.loads(no_contiguous_dumped)) + + def test_non_contiguous_array(self): + non_contiguous_array = np.arange(12).reshape(3, 4)[:, :2] + assert not non_contiguous_array.flags.c_contiguous + assert not non_contiguous_array.flags.f_contiguous + + # make sure non-contiguous arrays can be pickled-depickled + # using any protocol + buffers = [] + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + depickled_non_contiguous_array = pickle.loads( + pickle.dumps(non_contiguous_array, protocol=proto, + buffer_callback=buffers.append if proto >= 5 else None)) + + assert_equal(len(buffers), 0) + assert_equal(non_contiguous_array, depickled_non_contiguous_array) + + @pytest.mark.thread_unsafe(reason="calls gc.collect()") + def test_roundtrip(self): + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + carray = np.array([[2, 9], [7, 0], [3, 8]]) + DATA = [ + carray, + np.transpose(carray), + np.array([('xxx', 1, 2.0)], dtype=[('a', (str, 3)), ('b', int), + ('c', float)]) + ] + + refs = [weakref.ref(a) for a in DATA] + for a in DATA: + assert_equal( + a, pickle.loads(pickle.dumps(a, protocol=proto)), + err_msg=f"{a!r}") + del a, DATA, carray + break_cycles() + # check for reference leaks (gh-12793) + for ref in refs: + assert ref() is None + + def _loads(self, obj): + return pickle.loads(obj, encoding='latin1') + + # version 0 pickles, using protocol=2 to pickle + # version 0 doesn't have a version field + @pytest.mark.filterwarnings( + "ignore:.*align should be passed:numpy.exceptions.VisibleDeprecationWarning") + def test_version0_int8(self): + s = b"\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb." + a = np.array([1, 2, 3, 4], dtype=np.int8) + p = self._loads(s) + assert_equal(a, p) + + @pytest.mark.filterwarnings( + "ignore:.*align should be passed:numpy.exceptions.VisibleDeprecationWarning") + def test_version0_float32(self): + s = b"\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(U\x01= g2, [g1[i] >= g2[i] for i in [0, 1, 2]]) + assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]]) + assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]]) + + def test_mixed(self): + g1 = np.array(["spam", "spa", "spammer", "and eggs"]) + g2 = "spam" + assert_array_equal(g1 == g2, [x == g2 for x in g1]) + assert_array_equal(g1 != g2, [x != g2 for x in g1]) + assert_array_equal(g1 < g2, [x < g2 for x in g1]) + assert_array_equal(g1 > g2, [x > g2 for x in g1]) + assert_array_equal(g1 <= g2, [x <= g2 for x in g1]) + assert_array_equal(g1 >= g2, [x >= g2 for x in g1]) + + def test_unicode(self): + g1 = np.array(["This", "is", "example"]) + g2 = np.array(["This", "was", "example"]) + assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]]) + assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]]) + assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]]) + assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]]) + assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]]) + assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]]) + +class TestArgmaxArgminCommon: + + sizes = [(), (3,), (3, 2), (2, 3), + (3, 3), (2, 3, 4), (4, 3, 2), + (1, 2, 3, 4), (2, 3, 4, 1), + (3, 4, 1, 2), (4, 1, 2, 3), + (64,), (128,), (256,)] + + @pytest.mark.parametrize("size, axis", itertools.chain(*[[(size, axis) + for axis in list(range(-len(size), len(size))) + [None]] + for size in sizes])) + @pytest.mark.parametrize('method', [np.argmax, np.argmin]) + def test_np_argmin_argmax_keepdims(self, size, axis, method): + + arr = np.random.normal(size=size) + + # contiguous arrays + if axis is None: + new_shape = [1 for _ in range(len(size))] + else: + new_shape = list(size) + new_shape[axis] = 1 + new_shape = tuple(new_shape) + + _res_orig = method(arr, axis=axis) + res_orig = _res_orig.reshape(new_shape) + res = method(arr, axis=axis, keepdims=True) + assert_equal(res, res_orig) + assert_(res.shape == new_shape) + outarray = np.empty(res.shape, dtype=res.dtype) + res1 = method(arr, axis=axis, out=outarray, + keepdims=True) + assert_(res1 is outarray) + assert_equal(res, outarray) + + if len(size) > 0: + wrong_shape = list(new_shape) + if axis is not None: + wrong_shape[axis] = 2 + else: + wrong_shape[0] = 2 + wrong_outarray = np.empty(wrong_shape, dtype=res.dtype) + with pytest.raises(ValueError): + method(arr.T, axis=axis, + out=wrong_outarray, keepdims=True) + + # non-contiguous arrays + if axis is None: + new_shape = [1 for _ in range(len(size))] + else: + new_shape = list(size)[::-1] + new_shape[axis] = 1 + new_shape = tuple(new_shape) + + _res_orig = method(arr.T, axis=axis) + res_orig = _res_orig.reshape(new_shape) + res = method(arr.T, axis=axis, keepdims=True) + assert_equal(res, res_orig) + assert_(res.shape == new_shape) + outarray = np.empty(new_shape[::-1], dtype=res.dtype) + outarray = outarray.T + res1 = method(arr.T, axis=axis, out=outarray, + keepdims=True) + assert_(res1 is outarray) + assert_equal(res, outarray) + + if len(size) > 0: + # one dimension lesser for non-zero sized + # array should raise an error + with pytest.raises(ValueError): + method(arr[0], axis=axis, + out=outarray, keepdims=True) + + if len(size) > 0: + wrong_shape = list(new_shape) + if axis is not None: + wrong_shape[axis] = 2 + else: + wrong_shape[0] = 2 + wrong_outarray = np.empty(wrong_shape, dtype=res.dtype) + with pytest.raises(ValueError): + method(arr.T, axis=axis, + out=wrong_outarray, keepdims=True) + + @pytest.mark.parametrize('method', ['max', 'min']) + def test_all(self, method): + a = np.random.normal(0, 1, (4, 5, 6, 7, 8)) + arg_method = getattr(a, 'arg' + method) + val_method = getattr(a, method) + for i in range(a.ndim): + a_maxmin = val_method(i) + aarg_maxmin = arg_method(i) + axes = list(range(a.ndim)) + axes.remove(i) + assert_(np.all(a_maxmin == aarg_maxmin.choose( + *a.transpose(i, *axes)))) + + @pytest.mark.parametrize('method', ['argmax', 'argmin']) + def test_output_shape(self, method): + # see also gh-616 + a = np.ones((10, 5)) + arg_method = getattr(a, method) + # Check some simple shape mismatches + out = np.ones(11, dtype=np.int_) + assert_raises(ValueError, arg_method, -1, out) + + out = np.ones((2, 5), dtype=np.int_) + assert_raises(ValueError, arg_method, -1, out) + + # these could be relaxed possibly (used to allow even the previous) + out = np.ones((1, 10), dtype=np.int_) + assert_raises(ValueError, arg_method, -1, out) + + out = np.ones(10, dtype=np.int_) + arg_method(-1, out=out) + assert_equal(out, arg_method(-1)) + + @pytest.mark.parametrize('ndim', [0, 1]) + @pytest.mark.parametrize('method', ['argmax', 'argmin']) + def test_ret_is_out(self, ndim, method): + a = np.ones((4,) + (256,) * ndim) + arg_method = getattr(a, method) + out = np.empty((256,) * ndim, dtype=np.intp) + ret = arg_method(axis=0, out=out) + assert ret is out + + @pytest.mark.parametrize('np_array, method, idx, val', + [(np.zeros, 'argmax', 5942, "as"), + (np.ones, 'argmin', 6001, "0")]) + def test_unicode(self, np_array, method, idx, val): + d = np_array(6031, dtype='= cmin)) + assert_(np.all(x <= cmax)) + + def _clip_type(self, type_group, array_max, + clip_min, clip_max, inplace=False, + expected_min=None, expected_max=None): + if expected_min is None: + expected_min = clip_min + if expected_max is None: + expected_max = clip_max + + for T in np._core.sctypes[type_group]: + if sys.byteorder == 'little': + byte_orders = ['=', '>'] + else: + byte_orders = ['<', '='] + + for byteorder in byte_orders: + dtype = np.dtype(T).newbyteorder(byteorder) + + x = (np.random.random(1000) * array_max).astype(dtype) + if inplace: + # The tests that call us pass clip_min and clip_max that + # might not fit in the destination dtype. They were written + # assuming the previous unsafe casting, which now must be + # passed explicitly to avoid a warning. + x.clip(clip_min, clip_max, x, casting='unsafe') + else: + x = x.clip(clip_min, clip_max) + byteorder = '=' + + if x.dtype.byteorder == '|': + byteorder = '|' + assert_equal(x.dtype.byteorder, byteorder) + self._check_range(x, expected_min, expected_max) + return x + + def test_basic(self): + for inplace in [False, True]: + self._clip_type( + 'float', 1024, -12.8, 100.2, inplace=inplace) + self._clip_type( + 'float', 1024, 0, 0, inplace=inplace) + + self._clip_type( + 'int', 1024, -120, 100, inplace=inplace) + self._clip_type( + 'int', 1024, 0, 0, inplace=inplace) + + self._clip_type( + 'uint', 1024, 0, 0, inplace=inplace) + self._clip_type( + 'uint', 1024, 10, 100, inplace=inplace) + + @pytest.mark.parametrize("inplace", [False, True]) + def test_int_out_of_range(self, inplace): + # Simple check for out-of-bound integers, also testing the in-place + # path. + x = (np.random.random(1000) * 255).astype("uint8") + out = np.empty_like(x) + res = x.clip(-1, 300, out=out if inplace else None) + assert res is out or not inplace + assert (res == x).all() + + res = x.clip(-1, 50, out=out if inplace else None) + assert res is out or not inplace + assert (res <= 50).all() + assert (res[x <= 50] == x[x <= 50]).all() + + res = x.clip(100, 1000, out=out if inplace else None) + assert res is out or not inplace + assert (res >= 100).all() + assert (res[x >= 100] == x[x >= 100]).all() + + def test_record_array(self): + rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)], + dtype=[('x', '= 3)) + x = val.clip(min=3) + assert_(np.all(x >= 3)) + x = val.clip(max=4) + assert_(np.all(x <= 4)) + + def test_nan(self): + input_arr = np.array([-2., np.nan, 0.5, 3., 0.25, np.nan]) + result = input_arr.clip(-1, 1) + expected = np.array([-1., np.nan, 0.5, 1., 0.25, np.nan]) + assert_array_equal(result, expected) + + +class TestCompress: + def test_axis(self): + tgt = [[5, 6, 7, 8, 9]] + arr = np.arange(10).reshape(2, 5) + out = np.compress([0, 1], arr, axis=0) + assert_equal(out, tgt) + + tgt = [[1, 3], [6, 8]] + out = np.compress([0, 1, 0, 1, 0], arr, axis=1) + assert_equal(out, tgt) + + def test_truncate(self): + tgt = [[1], [6]] + arr = np.arange(10).reshape(2, 5) + out = np.compress([0, 1], arr, axis=1) + assert_equal(out, tgt) + + def test_flatten(self): + arr = np.arange(10).reshape(2, 5) + out = np.compress([0, 1], arr) + assert_equal(out, 1) + + +class TestPutmask: + def tst_basic(self, x, T, mask, val): + np.putmask(x, mask, val) + assert_equal(x[mask], np.array(val, T)) + + def test_ip_types(self): + unchecked_types = [bytes, str, np.void] + + x = np.random.random(1000) * 100 + mask = x < 40 + + for val in [-100, 0, 15]: + for types in np._core.sctypes.values(): + for T in types: + if T not in unchecked_types: + if val < 0 and np.dtype(T).kind == "u": + val = np.iinfo(T).max - 99 + self.tst_basic(x.copy().astype(T), T, mask, val) + + # Also test string of a length which uses an untypical length + dt = np.dtype("S3") + self.tst_basic(x.astype(dt), dt.type, mask, dt.type(val)[:3]) + + def test_mask_size(self): + assert_raises(ValueError, np.putmask, np.array([1, 2, 3]), [True], 5) + + @pytest.mark.parametrize('dtype', ('>i4', 'f8'), ('z', '= 2, 3) + + def test_kwargs(self): + x = np.array([0, 0]) + np.putmask(x, [0, 1], [-1, -2]) + assert_array_equal(x, [0, -2]) + + x = np.array([0, 0]) + np.putmask(x, mask=[0, 1], values=[-1, -2]) + assert_array_equal(x, [0, -2]) + + x = np.array([0, 0]) + np.putmask(x, values=[-1, -2], mask=[0, 1]) + assert_array_equal(x, [0, -2]) + + with pytest.raises(TypeError): + np.putmask(a=x, values=[-1, -2], mask=[0, 1]) + + +class TestTake: + def tst_basic(self, x): + ind = list(range(x.shape[0])) + assert_array_equal(x.take(ind, axis=0), x) + + def test_ip_types(self): + unchecked_types = [bytes, str, np.void] + + x = np.random.random(24) * 100 + x = x.reshape((2, 3, 4)) + for types in np._core.sctypes.values(): + for T in types: + if T not in unchecked_types: + self.tst_basic(x.copy().astype(T)) + + # Also test string of a length which uses an untypical length + self.tst_basic(x.astype("S3")) + + def test_raise(self): + x = np.random.random(24) * 100 + x = x.reshape((2, 3, 4)) + assert_raises(IndexError, x.take, [0, 1, 2], axis=0) + assert_raises(IndexError, x.take, [-3], axis=0) + assert_array_equal(x.take([-1], axis=0)[0], x[1]) + + def test_clip(self): + x = np.random.random(24) * 100 + x = x.reshape((2, 3, 4)) + assert_array_equal(x.take([-1], axis=0, mode='clip')[0], x[0]) + assert_array_equal(x.take([2], axis=0, mode='clip')[0], x[1]) + + def test_wrap(self): + x = np.random.random(24) * 100 + x = x.reshape((2, 3, 4)) + assert_array_equal(x.take([-1], axis=0, mode='wrap')[0], x[1]) + assert_array_equal(x.take([2], axis=0, mode='wrap')[0], x[0]) + assert_array_equal(x.take([3], axis=0, mode='wrap')[0], x[1]) + + @pytest.mark.parametrize('dtype', ('>i4', 'f8'), ('z', ' 16MB + tmp_filename = normalize_filename(tmp_path, param_filename) + d = np.zeros(4 * 1024 ** 2) + d.tofile(tmp_filename) + assert_equal(os.path.getsize(tmp_filename), d.nbytes) + assert_array_equal(d, np.fromfile(tmp_filename)) + # check offset + with open(tmp_filename, "r+b") as f: + f.seek(d.nbytes) + d.tofile(f) + assert_equal(os.path.getsize(tmp_filename), d.nbytes * 2) + # check append mode (gh-8329) + open(tmp_filename, "w").close() # delete file contents + with open(tmp_filename, "ab") as f: + d.tofile(f) + assert_array_equal(d, np.fromfile(tmp_filename)) + with open(tmp_filename, "ab") as f: + d.tofile(f) + assert_equal(os.path.getsize(tmp_filename), d.nbytes * 2) + + def test_io_open_buffered_fromfile(self, tmp_path, param_filename): + # gh-6632 + tmp_filename = normalize_filename(tmp_path, param_filename) + x = self._create_data() + x.tofile(tmp_filename) + with open(tmp_filename, 'rb', buffering=-1) as f: + y = np.fromfile(f, dtype=x.dtype) + assert_array_equal(y, x.flat) + + def test_file_position_after_fromfile(self, tmp_path, param_filename): + # gh-4118 + sizes = [io.DEFAULT_BUFFER_SIZE // 8, + io.DEFAULT_BUFFER_SIZE, + io.DEFAULT_BUFFER_SIZE * 8] + tmp_filename = normalize_filename(tmp_path, param_filename) + + for size in sizes: + with open(tmp_filename, 'wb') as f: + f.seek(size - 1) + f.write(b'\0') + + for mode in ['rb', 'r+b']: + err_msg = "%d %s" % (size, mode) + + with open(tmp_filename, mode) as f: + f.read(2) + np.fromfile(f, dtype=np.float64, count=1) + pos = f.tell() + assert_equal(pos, 10, err_msg=err_msg) + + def test_file_position_after_tofile(self, tmp_path, param_filename): + # gh-4118 + sizes = [io.DEFAULT_BUFFER_SIZE // 8, + io.DEFAULT_BUFFER_SIZE, + io.DEFAULT_BUFFER_SIZE * 8] + tmp_filename = normalize_filename(tmp_path, param_filename) + + for size in sizes: + err_msg = "%d" % (size,) + + with open(tmp_filename, 'wb') as f: + f.seek(size - 1) + f.write(b'\0') + f.seek(10) + f.write(b'12') + np.array([0], dtype=np.float64).tofile(f) + pos = f.tell() + assert_equal(pos, 10 + 2 + 8, err_msg=err_msg) + + with open(tmp_filename, 'r+b') as f: + f.read(2) + f.seek(0, 1) # seek between read&write required by ANSI C + np.array([0], dtype=np.float64).tofile(f) + pos = f.tell() + assert_equal(pos, 10, err_msg=err_msg) + + def test_load_object_array_fromfile(self, tmp_path, param_filename): + # gh-12300 + tmp_filename = normalize_filename(tmp_path, param_filename) + with open(tmp_filename, 'w') as f: + # Ensure we have a file with consistent contents + pass + + with open(tmp_filename, 'rb') as f: + assert_raises_regex(ValueError, "Cannot read into object array", + np.fromfile, f, dtype=object) + + assert_raises_regex(ValueError, "Cannot read into object array", + np.fromfile, tmp_filename, dtype=object) + + def test_fromfile_offset(self, tmp_path, param_filename): + tmp_filename = normalize_filename(tmp_path, param_filename) + x = self._create_data() + with open(tmp_filename, 'wb') as f: + x.tofile(f) + + with open(tmp_filename, 'rb') as f: + y = np.fromfile(f, dtype=x.dtype, offset=0) + assert_array_equal(y, x.flat) + + with open(tmp_filename, 'rb') as f: + count_items = len(x.flat) // 8 + offset_items = len(x.flat) // 4 + offset_bytes = x.dtype.itemsize * offset_items + y = np.fromfile( + f, dtype=x.dtype, count=count_items, offset=offset_bytes + ) + assert_array_equal( + y, x.flat[offset_items:offset_items + count_items] + ) + + # subsequent seeks should stack + offset_bytes = x.dtype.itemsize + z = np.fromfile(f, dtype=x.dtype, offset=offset_bytes) + assert_array_equal(z, x.flat[offset_items + count_items + 1:]) + + with open(tmp_filename, 'wb') as f: + x.tofile(f, sep=",") + + with open(tmp_filename, 'rb') as f: + assert_raises_regex( + TypeError, + "'offset' argument only permitted for binary files", + np.fromfile, tmp_filename, dtype=x.dtype, + sep=",", offset=1) + + @pytest.mark.skipif(IS_PYPY, reason="bug in PyPy's PyNumber_AsSsize_t") + def test_fromfile_bad_dup(self, tmp_path, param_filename, monkeypatch): + def dup_str(fd): + return 'abc' + + def dup_bigint(fd): + return 2**68 + + tmp_filename = normalize_filename(tmp_path, param_filename) + x = self._create_data() + + with open(tmp_filename, 'wb') as f: + x.tofile(f) + for dup, exc in ((dup_str, TypeError), (dup_bigint, OSError)): + monkeypatch.setattr(os, "dup", dup) + assert_raises(exc, np.fromfile, f) + + def _check_from(self, s, value, filename, **kw): + if 'sep' not in kw: + y = np.frombuffer(s, **kw) + else: + y = np.fromstring(s, **kw) + assert_array_equal(y, value) + + with open(filename, 'wb') as f: + f.write(s) + y = np.fromfile(filename, **kw) + assert_array_equal(y, value) + + @pytest.fixture(params=["period", "comma"]) + def decimal_sep_localization(self, request): + """ + Including this fixture in a test will automatically + execute it with both types of decimal separator. + + So:: + + def test_decimal(decimal_sep_localization): + pass + + is equivalent to the following two tests:: + + def test_decimal_period_separator(): + pass + + def test_decimal_comma_separator(): + with CommaDecimalPointLocale(): + pass + """ + if request.param == "period": + yield + elif request.param == "comma": + with CommaDecimalPointLocale(): + yield + else: + assert False, request.param + + def test_nan(self, tmp_path, param_filename, decimal_sep_localization): + tmp_filename = normalize_filename(tmp_path, param_filename) + self._check_from( + b"nan +nan -nan NaN nan(foo) +NaN(BAR) -NAN(q_u_u_x_)", + [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan], + tmp_filename, + sep=' ') + + def test_inf(self, tmp_path, param_filename, decimal_sep_localization): + tmp_filename = normalize_filename(tmp_path, param_filename) + self._check_from( + b"inf +inf -inf infinity -Infinity iNfInItY -inF", + [np.inf, np.inf, -np.inf, np.inf, -np.inf, np.inf, -np.inf], + tmp_filename, + sep=' ') + + def test_numbers(self, tmp_path, param_filename, decimal_sep_localization): + tmp_filename = normalize_filename(tmp_path, param_filename) + self._check_from( + b"1.234 -1.234 .3 .3e55 -123133.1231e+133", + [1.234, -1.234, .3, .3e55, -123133.1231e+133], + tmp_filename, + sep=' ') + + def test_binary(self, tmp_path, param_filename): + tmp_filename = normalize_filename(tmp_path, param_filename) + self._check_from( + b'\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@', + np.array([1, 2, 3, 4]), + tmp_filename, + dtype='']) + @pytest.mark.parametrize('dtype', [float, int, complex]) + def test_basic(self, byteorder, dtype): + dt = np.dtype(dtype).newbyteorder(byteorder) + x = (np.random.random((4, 7)) * 5).astype(dt) + buf = x.tobytes() + assert_array_equal(np.frombuffer(buf, dtype=dt), x.flat) + + @pytest.mark.parametrize("obj", [np.arange(10), b"12345678"]) + def test_array_base(self, obj): + # Objects (including NumPy arrays), which do not use the + # `release_buffer` slot should be directly used as a base object. + # See also gh-21612 + new = np.frombuffer(obj) + assert new.base is obj + + def test_empty(self): + assert_array_equal(np.frombuffer(b''), np.array([])) + + @pytest.mark.skipif(IS_PYPY, + reason="PyPy's memoryview currently does not track exports. See: " + "https://foss.heptapod.net/pypy/pypy/-/issues/3724") + def test_mmap_close(self): + # The old buffer protocol was not safe for some things that the new + # one is. But `frombuffer` always used the old one for a long time. + # Checks that it is safe with the new one (using memoryviews) + with tempfile.TemporaryFile(mode='wb') as tmp: + tmp.write(b"asdf") + tmp.flush() + mm = mmap.mmap(tmp.fileno(), 0) + arr = np.frombuffer(mm, dtype=np.uint8) + with pytest.raises(BufferError): + mm.close() # cannot close while array uses the buffer + del arr + mm.close() + +class TestFlat: + def _create_arrays(self): + a = np.arange(20.0).reshape(4, 5) + a.flags.writeable = False + b = a[::2, ::2] + return a, b + + def test_contiguous(self): + testpassed = False + a, _ = self._create_arrays() + try: + a.flat[12] = 100.0 + except ValueError: + testpassed = True + assert_(testpassed) + assert_(a.flat[12] == 12.0) + + def test_discontiguous(self): + testpassed = False + _, b = self._create_arrays() + try: + b.flat[4] = 100.0 + except ValueError: + testpassed = True + assert_(testpassed) + assert_(b.flat[4] == 12.0) + + def test___array__(self): + a0 = np.arange(20.0) + a = a0.reshape(4, 5) + a0 = a0.reshape((4, 5)) + a.flags.writeable = False + b = a[::2, ::2] + b0 = a0[::2, ::2] + c = a.flat.__array__() + d = b.flat.__array__() + e = a0.flat.__array__() + f = b0.flat.__array__() + + assert_(c.flags.writeable is False) + assert_(d.flags.writeable is False) + assert_(e.flags.writeable is True) + assert_(f.flags.writeable is False) + assert_(c.flags.writebackifcopy is False) + assert_(d.flags.writebackifcopy is False) + assert_(e.flags.writebackifcopy is False) + assert_(f.flags.writebackifcopy is False) + + @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") + def test_refcount(self): + # includes regression test for reference count error gh-13165 + a, _ = self._create_arrays() + inds = [np.intp(0), np.array([True] * a.size), np.array([0]), None] + indtype = np.dtype(np.intp) + rc_indtype = sys.getrefcount(indtype) + for ind in inds: + rc_ind = sys.getrefcount(ind) + for _ in range(100): + try: + a.flat[ind] + except IndexError: + pass + assert_(abs(sys.getrefcount(ind) - rc_ind) < 50) + assert_(abs(sys.getrefcount(indtype) - rc_indtype) < 50) + + def test_index_getset(self): + it = np.arange(10).reshape(2, 1, 5).flat + with pytest.raises(AttributeError): + it.index = 10 + + for _ in it: + pass + # Check the value of `.index` is updated correctly (see also gh-19153) + # If the type was incorrect, this would show up on big-endian machines + assert it.index == it.base.size + + def test_maxdims(self): + # The flat iterator and thus attribute is currently unfortunately + # limited to only 32 dimensions (after bumping it to 64 for 2.0) + a = np.ones((1,) * 64) + + with pytest.raises(RuntimeError, + match=".*32 dimensions but the array has 64"): + a.flat + + +class TestResize: + + @_no_tracing + def test_basic(self): + x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) + if IS_PYPY: + x.resize((5, 5), refcheck=False) + else: + x.resize((5, 5)) + assert_array_equal(x.flat[:9], + np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]).flat) + assert_array_equal(x[9:].flat, 0) + + def test_check_reference(self): + x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) + y = x + assert_raises(ValueError, x.resize, (5, 1)) + + @pytest.mark.skipif(IS_WASM, reason="Cannot start subprocess") + @pytest.mark.skipif(IS_PYPY, reason="The method always raises") + def test_check_reference_module_scope(self): + code = textwrap.dedent(""" + import numpy as np + + # See gh-30991 + a = np.array([[0, 1], [2, 3]], order='C') + a.resize((2, 1)) + """) + try: + subprocess.check_output([sys.executable, "-c", code], + stderr=subprocess.STDOUT, text=True) + except subprocess.CalledProcessError as e: + assert sys.version_info >= (3, 14) + assert "ValueError" in e.stdout + assert "It is possible that this is a false positive." in e.stdout + else: + if sys.version_info >= (3, 14): + raise AssertionError("Unexpected success of resize refcheck") + + def test_check_reference_2(self): + # see gh-30265 + x = np.zeros((2, 2)) + y = x + with pytest.raises(ValueError): + x.resize((5, 5)) + + @_no_tracing + def test_int_shape(self): + x = np.eye(3) + if IS_PYPY: + x.resize(3, refcheck=False) + else: + x.resize(3) + assert_array_equal(x, np.eye(3)[0, :]) + + def test_none_shape(self): + x = np.eye(3) + x.resize(None) + assert_array_equal(x, np.eye(3)) + x.resize() + assert_array_equal(x, np.eye(3)) + + def test_0d_shape(self): + # to it multiple times to test it does not break alloc cache gh-9216 + for i in range(10): + x = np.empty((1,)) + x.resize(()) + assert_equal(x.shape, ()) + assert_equal(x.size, 1) + x = np.empty(()) + x.resize((1,)) + assert_equal(x.shape, (1,)) + assert_equal(x.size, 1) + + def test_invalid_arguments(self): + assert_raises(TypeError, np.eye(3).resize, 'hi') + assert_raises(ValueError, np.eye(3).resize, -1) + assert_raises(TypeError, np.eye(3).resize, order=1) + assert_raises(TypeError, np.eye(3).resize, refcheck='hi') + + @_no_tracing + def test_freeform_shape(self): + x = np.eye(3) + if IS_PYPY: + x.resize(3, 2, 1, refcheck=False) + else: + x.resize(3, 2, 1) + assert_(x.shape == (3, 2, 1)) + + @_no_tracing + def test_zeros_appended(self): + x = np.eye(3) + if IS_PYPY: + x.resize(2, 3, 3, refcheck=False) + else: + x.resize(2, 3, 3) + assert_array_equal(x[0], np.eye(3)) + assert_array_equal(x[1], np.zeros((3, 3))) + + @_no_tracing + def test_obj_obj(self): + # check memory is initialized on resize, gh-4857 + a = np.ones(10, dtype=[('k', object, 2)]) + if IS_PYPY: + a.resize(15, refcheck=False) + else: + a.resize(15,) + assert_equal(a.shape, (15,)) + assert_array_equal(a['k'][-5:], 0) + assert_array_equal(a['k'][:-5], 1) + + @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") + @pytest.mark.parametrize("dtype", ["O", "O,O"]) + def test_obj_obj_shrinking(self, dtype): + # check that memory is freed when shrinking an array. + test_obj = object() + expected = sys.getrefcount(test_obj) + a = np.array([test_obj, test_obj, test_obj], dtype=dtype) + assert a.size == 3 + a.resize((2, 1)) # two elements, not three! + assert a.size == 2 + del a + # if all is well, then we reclaimed all references + assert sys.getrefcount(test_obj) == expected + + def test_empty_view(self): + # check that sizes containing a zero don't trigger a reallocate for + # already empty arrays + x = np.zeros((10, 0), int) + x_view = x[...] + x_view.resize((0, 10)) + x_view.resize((0, 100)) + + def test_check_weakref(self): + x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) + xref = weakref.ref(x) + assert_raises(ValueError, x.resize, (5, 1)) + + +class TestRecord: + def test_field_rename(self): + dt = np.dtype([('f', float), ('i', int)]) + dt.names = ['p', 'q'] + assert_equal(dt.names, ['p', 'q']) + + def test_multiple_field_name_occurrence(self): + def test_dtype_init(): + np.dtype([("A", "f8"), ("B", "f8"), ("A", "f8")]) + + # Error raised when multiple fields have the same name + assert_raises(ValueError, test_dtype_init) + + def test_bytes_fields(self): + # Bytes are not allowed in field names and not recognized in titles + # on Py3 + assert_raises(TypeError, np.dtype, [(b'a', int)]) + assert_raises(TypeError, np.dtype, [(('b', b'a'), int)]) + + dt = np.dtype([((b'a', 'b'), int)]) + assert_raises(TypeError, dt.__getitem__, b'a') + + x = np.array([(1,), (2,), (3,)], dtype=dt) + assert_raises(IndexError, x.__getitem__, b'a') + + y = x[0] + assert_raises(IndexError, y.__getitem__, b'a') + + def test_multiple_field_name_unicode(self): + def test_dtype_unicode(): + np.dtype([("\u20B9", "f8"), ("B", "f8"), ("\u20B9", "f8")]) + + # Error raised when multiple fields have the same name(unicode included) + assert_raises(ValueError, test_dtype_unicode) + + def test_fromarrays_unicode(self): + # A single name string provided to fromarrays() is allowed to be unicode + x = np._core.records.fromarrays( + [[0], [1]], names='a,b', formats='i4,i4') + assert_equal(x['a'][0], 0) + assert_equal(x['b'][0], 1) + + def test_unicode_order(self): + # Test that we can sort with order as a unicode field name + name = 'b' + x = np.array([1, 3, 2], dtype=[(name, int)]) + x.sort(order=name) + assert_equal(x['b'], np.array([1, 2, 3])) + + def test_field_names(self): + # Test unicode and 8-bit / byte strings can be used + a = np.zeros((1,), dtype=[('f1', 'i4'), + ('f2', 'i4'), + ('f3', [('sf1', 'i4')])]) + # byte string indexing fails gracefully + assert_raises(IndexError, a.__setitem__, b'f1', 1) + assert_raises(IndexError, a.__getitem__, b'f1') + assert_raises(IndexError, a['f1'].__setitem__, b'sf1', 1) + assert_raises(IndexError, a['f1'].__getitem__, b'sf1') + b = a.copy() + fn1 = 'f1' + b[fn1] = 1 + assert_equal(b[fn1], 1) + fnn = 'not at all' + assert_raises(ValueError, b.__setitem__, fnn, 1) + assert_raises(ValueError, b.__getitem__, fnn) + b[0][fn1] = 2 + assert_equal(b[fn1], 2) + # Subfield + assert_raises(ValueError, b[0].__setitem__, fnn, 1) + assert_raises(ValueError, b[0].__getitem__, fnn) + # Subfield + fn3 = 'f3' + sfn1 = 'sf1' + b[fn3][sfn1] = 1 + assert_equal(b[fn3][sfn1], 1) + assert_raises(ValueError, b[fn3].__setitem__, fnn, 1) + assert_raises(ValueError, b[fn3].__getitem__, fnn) + # multiple subfields + fn2 = 'f2' + b[fn2] = 3 + + assert_equal(b[['f1', 'f2']][0].tolist(), (2, 3)) + assert_equal(b[['f2', 'f1']][0].tolist(), (3, 2)) + assert_equal(b[['f1', 'f3']][0].tolist(), (2, (1,))) + + # non-ascii unicode field indexing is well behaved + assert_raises(ValueError, a.__setitem__, '\u03e0', 1) + assert_raises(ValueError, a.__getitem__, '\u03e0') + + def test_record_hash(self): + a = np.array([(1, 2), (1, 2)], dtype='i1,i2') + a.flags.writeable = False + b = np.array([(1, 2), (3, 4)], dtype=[('num1', 'i1'), ('num2', 'i2')]) + b.flags.writeable = False + c = np.array([(1, 2), (3, 4)], dtype='i1,i2') + c.flags.writeable = False + assert_(hash(a[0]) == hash(a[1])) + assert_(hash(a[0]) == hash(b[0])) + assert_(hash(a[0]) != hash(b[1])) + assert_(hash(c[0]) == hash(a[0]) and c[0] == a[0]) + + def test_record_no_hash(self): + a = np.array([(1, 2), (1, 2)], dtype='i1,i2') + assert_raises(TypeError, hash, a[0]) + + def test_empty_structure_creation(self): + # make sure these do not raise errors (gh-5631) + np.array([()], dtype={'names': [], 'formats': [], + 'offsets': [], 'itemsize': 12}) + np.array([(), (), (), (), ()], dtype={'names': [], 'formats': [], + 'offsets': [], 'itemsize': 12}) + + def test_multifield_indexing_view(self): + a = np.ones(3, dtype=[('a', 'i4'), ('b', 'f4'), ('c', 'u4')]) + v = a[['a', 'c']] + assert_(v.base is a) + assert_(v.dtype == np.dtype({'names': ['a', 'c'], + 'formats': ['i4', 'u4'], + 'offsets': [0, 8]})) + v[:] = (4, 5) + assert_equal(a[0].item(), (4, 1, 5)) + +class TestView: + def test_basic(self): + x = np.array([(1, 2, 3, 4), (5, 6, 7, 8)], + dtype=[('r', np.int8), ('g', np.int8), + ('b', np.int8), ('a', np.int8)]) + # We must be specific about the endianness here: + y = x.view(dtype=' 0) + assert_(issubclass(w[0].category, RuntimeWarning)) + + def test_empty(self): + A = np.zeros((0, 3)) + for f in self.funcs: + for axis in [0, None]: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + assert_(np.isnan(f(A, axis=axis)).all()) + assert_(len(w) > 0) + assert_(issubclass(w[0].category, RuntimeWarning)) + for axis in [1]: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + assert_equal(f(A, axis=axis), np.zeros([])) + + def test_mean_values(self): + rmat, cmat, omat = self._create_data() + for mat in [rmat, cmat, omat]: + for axis in [0, 1]: + tgt = mat.sum(axis=axis) + res = _mean(mat, axis=axis) * mat.shape[axis] + assert_almost_equal(res, tgt) + for axis in [None]: + tgt = mat.sum(axis=axis) + res = _mean(mat, axis=axis) * np.prod(mat.shape) + assert_almost_equal(res, tgt) + + def test_mean_float16(self): + # This fail if the sum inside mean is done in float16 instead + # of float32. + assert_(_mean(np.ones(100000, dtype='float16')) == 1) + + def test_mean_axis_error(self): + # Ensure that AxisError is raised instead of IndexError when axis is + # out of bounds, see gh-15817. + with assert_raises(np.exceptions.AxisError): + np.arange(10).mean(axis=2) + + def test_mean_where(self): + a = np.arange(16).reshape((4, 4)) + wh_full = np.array([[False, True, False, True], + [True, False, True, False], + [True, True, False, False], + [False, False, True, True]]) + wh_partial = np.array([[False], + [True], + [True], + [False]]) + _cases = [(1, True, [1.5, 5.5, 9.5, 13.5]), + (0, wh_full, [6., 5., 10., 9.]), + (1, wh_full, [2., 5., 8.5, 14.5]), + (0, wh_partial, [6., 7., 8., 9.])] + for _ax, _wh, _res in _cases: + assert_allclose(a.mean(axis=_ax, where=_wh), + np.array(_res)) + assert_allclose(np.mean(a, axis=_ax, where=_wh), + np.array(_res)) + + a3d = np.arange(16).reshape((2, 2, 4)) + _wh_partial = np.array([False, True, True, False]) + _res = [[1.5, 5.5], [9.5, 13.5]] + assert_allclose(a3d.mean(axis=2, where=_wh_partial), + np.array(_res)) + assert_allclose(np.mean(a3d, axis=2, where=_wh_partial), + np.array(_res)) + + with pytest.warns(RuntimeWarning) as w: + assert_allclose(a.mean(axis=1, where=wh_partial), + np.array([np.nan, 5.5, 9.5, np.nan])) + with pytest.warns(RuntimeWarning) as w: + assert_equal(a.mean(where=False), np.nan) + with pytest.warns(RuntimeWarning) as w: + assert_equal(np.mean(a, where=False), np.nan) + + def test_var_values(self): + rmat, cmat, omat = self._create_data() + for mat in [rmat, cmat, omat]: + for axis in [0, 1, None]: + msqr = _mean(mat * mat.conj(), axis=axis) + mean = _mean(mat, axis=axis) + tgt = msqr - mean * mean.conjugate() + res = _var(mat, axis=axis) + assert_almost_equal(res, tgt) + + @pytest.mark.parametrize(('complex_dtype', 'ndec'), ( + ('complex64', 6), + ('complex128', 7), + ('clongdouble', 7), + )) + def test_var_complex_values(self, complex_dtype, ndec): + _, cmat, _ = self._create_data() + # Test fast-paths for every builtin complex type + for axis in [0, 1, None]: + mat = cmat.copy().astype(complex_dtype) + msqr = _mean(mat * mat.conj(), axis=axis) + mean = _mean(mat, axis=axis) + tgt = msqr - mean * mean.conjugate() + res = _var(mat, axis=axis) + assert_almost_equal(res, tgt, decimal=ndec) + + def test_var_dimensions(self): + # _var paths for complex number introduce additions on views that + # increase dimensions. Ensure this generalizes to higher dims + _, cmat, _ = self._create_data() + mat = np.stack([cmat] * 3) + for axis in [0, 1, 2, -1, None]: + msqr = _mean(mat * mat.conj(), axis=axis) + mean = _mean(mat, axis=axis) + tgt = msqr - mean * mean.conjugate() + res = _var(mat, axis=axis) + assert_almost_equal(res, tgt) + + def test_var_complex_byteorder(self): + # Test that var fast-path does not cause failures for complex arrays + # with non-native byteorder + _, cmat, _ = self._create_data() + cmat = cmat.copy().astype('complex128') + cmat_swapped = cmat.astype(cmat.dtype.newbyteorder()) + assert_almost_equal(cmat.var(), cmat_swapped.var()) + + def test_var_axis_error(self): + # Ensure that AxisError is raised instead of IndexError when axis is + # out of bounds, see gh-15817. + with assert_raises(np.exceptions.AxisError): + np.arange(10).var(axis=2) + + def test_var_where(self): + a = np.arange(25).reshape((5, 5)) + wh_full = np.array([[False, True, False, True, True], + [True, False, True, True, False], + [True, True, False, False, True], + [False, True, True, False, True], + [True, False, True, True, False]]) + wh_partial = np.array([[False], + [True], + [True], + [False], + [True]]) + _cases = [(0, True, [50., 50., 50., 50., 50.]), + (1, True, [2., 2., 2., 2., 2.])] + for _ax, _wh, _res in _cases: + assert_allclose(a.var(axis=_ax, where=_wh), + np.array(_res)) + assert_allclose(np.var(a, axis=_ax, where=_wh), + np.array(_res)) + + a3d = np.arange(16).reshape((2, 2, 4)) + _wh_partial = np.array([False, True, True, False]) + _res = [[0.25, 0.25], [0.25, 0.25]] + assert_allclose(a3d.var(axis=2, where=_wh_partial), + np.array(_res)) + assert_allclose(np.var(a3d, axis=2, where=_wh_partial), + np.array(_res)) + + assert_allclose(np.var(a, axis=1, where=wh_full), + np.var(a[wh_full].reshape((5, 3)), axis=1)) + assert_allclose(np.var(a, axis=0, where=wh_partial), + np.var(a[wh_partial[:, 0]], axis=0)) + with pytest.warns(RuntimeWarning) as w: + assert_equal(a.var(where=False), np.nan) + with pytest.warns(RuntimeWarning) as w: + assert_equal(np.var(a, where=False), np.nan) + + def test_std_values(self): + rmat, cmat, omat = self._create_data() + for mat in [rmat, cmat, omat]: + for axis in [0, 1, None]: + tgt = np.sqrt(_var(mat, axis=axis)) + res = _std(mat, axis=axis) + assert_almost_equal(res, tgt) + + def test_std_where(self): + a = np.arange(25).reshape((5, 5))[::-1] + whf = np.array([[False, True, False, True, True], + [True, False, True, False, True], + [True, True, False, True, False], + [True, False, True, True, False], + [False, True, False, True, True]]) + whp = np.array([[False], + [False], + [True], + [True], + [False]]) + _cases = [ + (0, True, 7.07106781 * np.ones(5)), + (1, True, 1.41421356 * np.ones(5)), + (0, whf, + np.array([4.0824829, 8.16496581, 5., 7.39509973, 8.49836586])), + (0, whp, 2.5 * np.ones(5)) + ] + for _ax, _wh, _res in _cases: + assert_allclose(a.std(axis=_ax, where=_wh), _res) + assert_allclose(np.std(a, axis=_ax, where=_wh), _res) + + a3d = np.arange(16).reshape((2, 2, 4)) + _wh_partial = np.array([False, True, True, False]) + _res = [[0.5, 0.5], [0.5, 0.5]] + assert_allclose(a3d.std(axis=2, where=_wh_partial), + np.array(_res)) + assert_allclose(np.std(a3d, axis=2, where=_wh_partial), + np.array(_res)) + + assert_allclose(a.std(axis=1, where=whf), + np.std(a[whf].reshape((5, 3)), axis=1)) + assert_allclose(np.std(a, axis=1, where=whf), + (a[whf].reshape((5, 3))).std(axis=1)) + assert_allclose(a.std(axis=0, where=whp), + np.std(a[whp[:, 0]], axis=0)) + assert_allclose(np.std(a, axis=0, where=whp), + (a[whp[:, 0]]).std(axis=0)) + with pytest.warns(RuntimeWarning) as w: + assert_equal(a.std(where=False), np.nan) + with pytest.warns(RuntimeWarning) as w: + assert_equal(np.std(a, where=False), np.nan) + + def test_subclass(self): + class TestArray(np.ndarray): + def __new__(cls, data, info): + result = np.array(data) + result = result.view(cls) + result.info = info + return result + + def __array_finalize__(self, obj): + self.info = getattr(obj, "info", '') + + dat = TestArray([[1, 2, 3, 4], [5, 6, 7, 8]], 'jubba') + res = dat.mean(1) + assert_(res.info == dat.info) + res = dat.std(1) + assert_(res.info == dat.info) + res = dat.var(1) + assert_(res.info == dat.info) + + +class TestVdot: + def test_basic(self): + dt_numeric = np.typecodes['AllFloat'] + np.typecodes['AllInteger'] + dt_complex = np.typecodes['Complex'] + + # test real + a = np.eye(3) + for dt in dt_numeric + 'O': + b = a.astype(dt) + res = np.vdot(b, b) + assert_(np.isscalar(res)) + assert_equal(np.vdot(b, b), 3) + + # test complex + a = np.eye(3) * 1j + for dt in dt_complex + 'O': + b = a.astype(dt) + res = np.vdot(b, b) + assert_(np.isscalar(res)) + assert_equal(np.vdot(b, b), 3) + + # test boolean + b = np.eye(3, dtype=bool) + res = np.vdot(b, b) + assert_(np.isscalar(res)) + assert_equal(np.vdot(b, b), True) + + def test_vdot_array_order(self): + a = np.array([[1, 2], [3, 4]], order='C') + b = np.array([[1, 2], [3, 4]], order='F') + res = np.vdot(a, a) + + # integer arrays are exact + assert_equal(np.vdot(a, b), res) + assert_equal(np.vdot(b, a), res) + assert_equal(np.vdot(b, b), res) + + def test_vdot_uncontiguous(self): + for size in [2, 1000]: + # Different sizes match different branches in vdot. + a = np.zeros((size, 2, 2)) + b = np.zeros((size, 2, 2)) + a[:, 0, 0] = np.arange(size) + b[:, 0, 0] = np.arange(size) + 1 + # Make a and b uncontiguous: + a = a[..., 0] + b = b[..., 0] + + assert_equal(np.vdot(a, b), + np.vdot(a.flatten(), b.flatten())) + assert_equal(np.vdot(a, b.copy()), + np.vdot(a.flatten(), b.flatten())) + assert_equal(np.vdot(a.copy(), b), + np.vdot(a.flatten(), b.flatten())) + assert_equal(np.vdot(a.copy('F'), b), + np.vdot(a.flatten(), b.flatten())) + assert_equal(np.vdot(a, b.copy('F')), + np.vdot(a.flatten(), b.flatten())) + + +class TestDot: + N = 7 + + def _create_data(self): + rng = np.random.RandomState(128) + A = rng.random((4, 2)) + b1 = rng.random((2, 1)) + b2 = rng.random(2) + b3 = rng.random((1, 2)) + b4 = rng.random(4) + return A, b1, b2, b3, b4 + + def test_dotmatmat(self): + A, _, _, _, _ = self._create_data() + res = np.dot(A.transpose(), A) + tgt = np.array([[1.45046013, 0.86323640], + [0.86323640, 0.84934569]]) + assert_almost_equal(res, tgt, decimal=self.N) + + def test_dotmatvec(self): + A, b1, _, _, _ = self._create_data() + res = np.dot(A, b1) + tgt = np.array([[0.32114320], [0.04889721], + [0.15696029], [0.33612621]]) + assert_almost_equal(res, tgt, decimal=self.N) + + def test_dotmatvec2(self): + A, _, b2, _, _ = self._create_data() + res = np.dot(A, b2) + tgt = np.array([0.29677940, 0.04518649, 0.14468333, 0.31039293]) + assert_almost_equal(res, tgt, decimal=self.N) + + def test_dotvecmat(self): + A, _, _, _, b4 = self._create_data() + res = np.dot(b4, A) + tgt = np.array([1.23495091, 1.12222648]) + assert_almost_equal(res, tgt, decimal=self.N) + + def test_dotvecmat2(self): + A, _, _, b3, _ = self._create_data() + res = np.dot(b3, A.transpose()) + tgt = np.array([[0.58793804, 0.08957460, 0.30605758, 0.62716383]]) + assert_almost_equal(res, tgt, decimal=self.N) + + def test_dotvecmat3(self): + A, _, _, _, b4 = self._create_data() + res = np.dot(A.transpose(), b4) + tgt = np.array([1.23495091, 1.12222648]) + assert_almost_equal(res, tgt, decimal=self.N) + + def test_dotvecvecouter(self): + _, b1, _, b3, _ = self._create_data() + res = np.dot(b1, b3) + tgt = np.array([[0.20128610, 0.08400440], [0.07190947, 0.03001058]]) + assert_almost_equal(res, tgt, decimal=self.N) + + def test_dotvecvecinner(self): + _, b1, _, b3, _ = self._create_data() + res = np.dot(b3, b1) + tgt = np.array([[0.23129668]]) + assert_almost_equal(res, tgt, decimal=self.N) + + def test_dotcolumnvect1(self): + b1 = np.ones((3, 1)) + b2 = [5.3] + res = np.dot(b1, b2) + tgt = np.array([5.3, 5.3, 5.3]) + assert_almost_equal(res, tgt, decimal=self.N) + + def test_dotcolumnvect2(self): + b1 = np.ones((3, 1)).transpose() + b2 = [6.2] + res = np.dot(b2, b1) + tgt = np.array([6.2, 6.2, 6.2]) + assert_almost_equal(res, tgt, decimal=self.N) + + def test_dotvecscalar(self): + rng = np.random.RandomState(100) + b1 = rng.random((1, 1)) + b2 = rng.random((1, 4)) + res = np.dot(b1, b2) + tgt = np.array([[0.15126730, 0.23068496, 0.45905553, 0.00256425]]) + assert_almost_equal(res, tgt, decimal=self.N) + + def test_dotvecscalar2(self): + rng = np.random.RandomState(100) + b1 = rng.random((4, 1)) + b2 = rng.random((1, 1)) + res = np.dot(b1, b2) + tgt = np.array([[0.00256425], [0.00131359], [0.00200324], [0.00398638]]) + assert_almost_equal(res, tgt, decimal=self.N) + + def test_all(self): + dims = [(), (1,), (1, 1)] + dout = [(), (1,), (1, 1), (1,), (), (1,), (1, 1), (1,), (1, 1)] + for dim, (dim1, dim2) in zip(dout, itertools.product(dims, dims)): + b1 = np.zeros(dim1) + b2 = np.zeros(dim2) + res = np.dot(b1, b2) + tgt = np.zeros(dim) + assert_(res.shape == tgt.shape) + assert_almost_equal(res, tgt, decimal=self.N) + + def test_vecobject(self): + class Vec: + def __init__(self, sequence=None): + if sequence is None: + sequence = [] + self.array = np.array(sequence) + + def __add__(self, other): + out = Vec() + out.array = self.array + other.array + return out + + def __sub__(self, other): + out = Vec() + out.array = self.array - other.array + return out + + def __mul__(self, other): # with scalar + out = Vec(self.array.copy()) + out.array *= other + return out + + def __rmul__(self, other): + return self * other + + U_non_cont = np.transpose([[1., 1.], [1., 2.]]) + U_cont = np.ascontiguousarray(U_non_cont) + x = np.array([Vec([1., 0.]), Vec([0., 1.])]) + zeros = np.array([Vec([0., 0.]), Vec([0., 0.])]) + zeros_test = np.dot(U_cont, x) - np.dot(U_non_cont, x) + assert_equal(zeros[0].array, zeros_test[0].array) + assert_equal(zeros[1].array, zeros_test[1].array) + + def test_dot_2args(self): + + a = np.array([[1, 2], [3, 4]], dtype=float) + b = np.array([[1, 0], [1, 1]], dtype=float) + c = np.array([[3, 2], [7, 4]], dtype=float) + + d = dot(a, b) + assert_allclose(c, d) + + def test_dot_3args(self): + + np.random.seed(22) + f = np.random.random_sample((1024, 16)) + v = np.random.random_sample((16, 32)) + + r = np.empty((1024, 32)) + if HAS_REFCOUNT: + orig_refcount = sys.getrefcount(r) + for i in range(12): + dot(f, v, r) + if HAS_REFCOUNT: + assert_equal(sys.getrefcount(r), orig_refcount) + r2 = dot(f, v, out=None) + assert_array_equal(r2, r) + assert_(r is dot(f, v, out=r)) + + v = v[:, 0].copy() # v.shape == (16,) + r = r[:, 0].copy() # r.shape == (1024,) + r2 = dot(f, v) + assert_(r is dot(f, v, r)) + assert_array_equal(r2, r) + + def test_dot_3args_errors(self): + + np.random.seed(22) + f = np.random.random_sample((1024, 16)) + v = np.random.random_sample((16, 32)) + + r = np.empty((1024, 31)) + assert_raises(ValueError, dot, f, v, r) + + r = np.empty((1024,)) + assert_raises(ValueError, dot, f, v, r) + + r = np.empty((32,)) + assert_raises(ValueError, dot, f, v, r) + + r = np.empty((32, 1024)) + assert_raises(ValueError, dot, f, v, r) + assert_raises(ValueError, dot, f, v, r.T) + + r = np.empty((1024, 64)) + assert_raises(ValueError, dot, f, v, r[:, ::2]) + assert_raises(ValueError, dot, f, v, r[:, :32]) + + r = np.empty((1024, 32), dtype=np.float32) + assert_raises(ValueError, dot, f, v, r) + + r = np.empty((1024, 32), dtype=int) + assert_raises(ValueError, dot, f, v, r) + + def test_dot_out_result(self): + x = np.ones((), dtype=np.float16) + y = np.ones((5,), dtype=np.float16) + z = np.zeros((5,), dtype=np.float16) + res = x.dot(y, out=z) + assert np.array_equal(res, y) + assert np.array_equal(z, y) + + def test_dot_out_aliasing(self): + x = np.ones((), dtype=np.float16) + y = np.ones((5,), dtype=np.float16) + z = np.zeros((5,), dtype=np.float16) + res = x.dot(y, out=z) + z[0] = 2 + assert np.array_equal(res, z) + + def test_dot_array_order(self): + a = np.array([[1, 2], [3, 4]], order='C') + b = np.array([[1, 2], [3, 4]], order='F') + res = np.dot(a, a) + + # integer arrays are exact + assert_equal(np.dot(a, b), res) + assert_equal(np.dot(b, a), res) + assert_equal(np.dot(b, b), res) + + def test_accelerate_framework_sgemv_fix(self): + + def aligned_array(shape, align, dtype, order='C'): + d = dtype(0) + N = np.prod(shape) + tmp = np.zeros(N * d.nbytes + align, dtype=np.uint8) + address = tmp.__array_interface__["data"][0] + for offset in range(align): + if (address + offset) % align == 0: + break + tmp = tmp[offset:offset + N * d.nbytes].view(dtype=dtype) + return tmp.reshape(shape, order=order) + + def as_aligned(arr, align, dtype, order='C'): + aligned = aligned_array(arr.shape, align, dtype, order) + aligned[:] = arr[:] + return aligned + + def assert_dot_close(A, X, desired): + assert_allclose(np.dot(A, X), desired, rtol=1e-5, atol=1e-7) + + m = aligned_array(100, 15, np.float32) + s = aligned_array((100, 100), 15, np.float32) + np.dot(s, m) # this will always segfault if the bug is present + + testdata = itertools.product((15, 32), (10000,), (200, 89), ('C', 'F')) + for align, m, n, a_order in testdata: + # Calculation in double precision + A_d = np.random.rand(m, n) + X_d = np.random.rand(n) + desired = np.dot(A_d, X_d) + # Calculation with aligned single precision + A_f = as_aligned(A_d, align, np.float32, order=a_order) + X_f = as_aligned(X_d, align, np.float32) + assert_dot_close(A_f, X_f, desired) + # Strided A rows + A_d_2 = A_d[::2] + desired = np.dot(A_d_2, X_d) + A_f_2 = A_f[::2] + assert_dot_close(A_f_2, X_f, desired) + # Strided A columns, strided X vector + A_d_22 = A_d_2[:, ::2] + X_d_2 = X_d[::2] + desired = np.dot(A_d_22, X_d_2) + A_f_22 = A_f_2[:, ::2] + X_f_2 = X_f[::2] + assert_dot_close(A_f_22, X_f_2, desired) + # Check the strides are as expected + if a_order == 'F': + assert_equal(A_f_22.strides, (8, 8 * m)) + else: + assert_equal(A_f_22.strides, (8 * n, 8)) + assert_equal(X_f_2.strides, (8,)) + # Strides in A rows + cols only + X_f_2c = as_aligned(X_f_2, align, np.float32) + assert_dot_close(A_f_22, X_f_2c, desired) + # Strides just in A cols + A_d_12 = A_d[:, ::2] + desired = np.dot(A_d_12, X_d_2) + A_f_12 = A_f[:, ::2] + assert_dot_close(A_f_12, X_f_2c, desired) + # Strides in A cols and X + assert_dot_close(A_f_12, X_f_2, desired) + + @pytest.mark.slow + @pytest.mark.parametrize("dtype", [np.float64, np.complex128]) + @requires_memory(free_bytes=18e9) # complex case needs 18GiB+ + @pytest.mark.thread_unsafe(reason="crashes with low memory") + def test_huge_vectordot(self, dtype): + # Large vector multiplications are chunked with 32bit BLAS + # Test that the chunking does the right thing, see also gh-22262 + data = np.ones(2**30 + 100, dtype=dtype) + res = np.dot(data, data) + assert res == 2**30 + 100 + + def test_dtype_discovery_fails(self): + # See gh-14247, error checking was missing for failed dtype discovery + class BadObject: + def __array__(self, dtype=None, copy=None): + raise TypeError("just this tiny mint leaf") + + with pytest.raises(TypeError): + np.dot(BadObject(), BadObject()) + + with pytest.raises(TypeError): + np.dot(3.0, BadObject()) + + +class MatmulCommon: + """Common tests for '@' operator and numpy.matmul. + + """ + # Should work with these types. Will want to add + # "O" at some point + types = "?bhilqBHILQefdgFDGO" + + def test_exceptions(self): + dims = [ + ((1,), (2,)), # mismatched vector vector + ((2, 1,), (2,)), # mismatched matrix vector + ((2,), (1, 2)), # mismatched vector matrix + ((1, 2), (3, 1)), # mismatched matrix matrix + ((1,), ()), # vector scalar + ((), (1)), # scalar vector + ((1, 1), ()), # matrix scalar + ((), (1, 1)), # scalar matrix + ((2, 2, 1), (3, 1, 2)), # cannot broadcast + ] + + for dt, (dm1, dm2) in itertools.product(self.types, dims): + a = np.ones(dm1, dtype=dt) + b = np.ones(dm2, dtype=dt) + assert_raises(ValueError, self.matmul, a, b) + + def test_shapes(self): + dims = [ + ((1, 1), (2, 1, 1)), # broadcast first argument + ((2, 1, 1), (1, 1)), # broadcast second argument + ((2, 1, 1), (2, 1, 1)), # matrix stack sizes match + ] + + for dt, (dm1, dm2) in itertools.product(self.types, dims): + a = np.ones(dm1, dtype=dt) + b = np.ones(dm2, dtype=dt) + res = self.matmul(a, b) + assert_(res.shape == (2, 1, 1)) + + # vector vector returns scalars. + for dt in self.types: + a = np.ones((2,), dtype=dt) + b = np.ones((2,), dtype=dt) + c = self.matmul(a, b) + assert_(np.array(c).shape == ()) + + def test_result_types(self): + mat = np.ones((1, 1)) + vec = np.ones((1,)) + for dt in self.types: + m = mat.astype(dt) + v = vec.astype(dt) + for arg in [(m, v), (v, m), (m, m)]: + res = self.matmul(*arg) + assert_(res.dtype == dt) + + # vector vector returns scalars + if dt != "O": + res = self.matmul(v, v) + assert_(type(res) is np.dtype(dt).type) + + def test_scalar_output(self): + vec1 = np.array([2]) + vec2 = np.array([3, 4]).reshape(1, -1) + tgt = np.array([6, 8]) + for dt in self.types[1:]: + v1 = vec1.astype(dt) + v2 = vec2.astype(dt) + res = self.matmul(v1, v2) + assert_equal(res, tgt) + res = self.matmul(v2.T, v1) + assert_equal(res, tgt) + + # boolean type + vec = np.array([True, True], dtype='?').reshape(1, -1) + res = self.matmul(vec[:, 0], vec) + assert_equal(res, True) + + def test_vector_vector_values(self): + vec1 = np.array([1, 2]) + vec2 = np.array([3, 4]).reshape(-1, 1) + tgt1 = np.array([11]) + tgt2 = np.array([[3, 6], [4, 8]]) + for dt in self.types[1:]: + v1 = vec1.astype(dt) + v2 = vec2.astype(dt) + res = self.matmul(v1, v2) + assert_equal(res, tgt1) + # no broadcast, we must make v1 into a 2d ndarray + res = self.matmul(v2, v1.reshape(1, -1)) + assert_equal(res, tgt2) + + # boolean type + vec = np.array([True, True], dtype='?') + res = self.matmul(vec, vec) + assert_equal(res, True) + + def test_vector_matrix_values(self): + vec = np.array([1, 2]) + mat1 = np.array([[1, 2], [3, 4]]) + mat2 = np.stack([mat1] * 2, axis=0) + tgt1 = np.array([7, 10]) + tgt2 = np.stack([tgt1] * 2, axis=0) + for dt in self.types[1:]: + v = vec.astype(dt) + m1 = mat1.astype(dt) + m2 = mat2.astype(dt) + res = self.matmul(v, m1) + assert_equal(res, tgt1) + res = self.matmul(v, m2) + assert_equal(res, tgt2) + + # boolean type + vec = np.array([True, False]) + mat1 = np.array([[True, False], [False, True]]) + mat2 = np.stack([mat1] * 2, axis=0) + tgt1 = np.array([True, False]) + tgt2 = np.stack([tgt1] * 2, axis=0) + + res = self.matmul(vec, mat1) + assert_equal(res, tgt1) + res = self.matmul(vec, mat2) + assert_equal(res, tgt2) + + def test_matrix_vector_values(self): + vec = np.array([1, 2]) + mat1 = np.array([[1, 2], [3, 4]]) + mat2 = np.stack([mat1] * 2, axis=0) + tgt1 = np.array([5, 11]) + tgt2 = np.stack([tgt1] * 2, axis=0) + for dt in self.types[1:]: + v = vec.astype(dt) + m1 = mat1.astype(dt) + m2 = mat2.astype(dt) + res = self.matmul(m1, v) + assert_equal(res, tgt1) + res = self.matmul(m2, v) + assert_equal(res, tgt2) + + # boolean type + vec = np.array([True, False]) + mat1 = np.array([[True, False], [False, True]]) + mat2 = np.stack([mat1] * 2, axis=0) + tgt1 = np.array([True, False]) + tgt2 = np.stack([tgt1] * 2, axis=0) + + res = self.matmul(vec, mat1) + assert_equal(res, tgt1) + res = self.matmul(vec, mat2) + assert_equal(res, tgt2) + + def test_matrix_matrix_values(self): + mat1 = np.array([[1, 2], [3, 4]]) + mat2 = np.array([[1, 0], [1, 1]]) + mat12 = np.stack([mat1, mat2], axis=0) + mat21 = np.stack([mat2, mat1], axis=0) + tgt11 = np.array([[7, 10], [15, 22]]) + tgt12 = np.array([[3, 2], [7, 4]]) + tgt21 = np.array([[1, 2], [4, 6]]) + tgt12_21 = np.stack([tgt12, tgt21], axis=0) + tgt11_12 = np.stack((tgt11, tgt12), axis=0) + tgt11_21 = np.stack((tgt11, tgt21), axis=0) + for dt in self.types[1:]: + m1 = mat1.astype(dt) + m2 = mat2.astype(dt) + m12 = mat12.astype(dt) + m21 = mat21.astype(dt) + + # matrix @ matrix + res = self.matmul(m1, m2) + assert_equal(res, tgt12) + res = self.matmul(m2, m1) + assert_equal(res, tgt21) + + # stacked @ matrix + res = self.matmul(m12, m1) + assert_equal(res, tgt11_21) + + # matrix @ stacked + res = self.matmul(m1, m12) + assert_equal(res, tgt11_12) + + # stacked @ stacked + res = self.matmul(m12, m21) + assert_equal(res, tgt12_21) + + # boolean type + m1 = np.array([[1, 1], [0, 0]], dtype=np.bool) + m2 = np.array([[1, 0], [1, 1]], dtype=np.bool) + m12 = np.stack([m1, m2], axis=0) + m21 = np.stack([m2, m1], axis=0) + tgt11 = m1 + tgt12 = m1 + tgt21 = np.array([[1, 1], [1, 1]], dtype=np.bool) + tgt12_21 = np.stack([tgt12, tgt21], axis=0) + tgt11_12 = np.stack((tgt11, tgt12), axis=0) + tgt11_21 = np.stack((tgt11, tgt21), axis=0) + + # matrix @ matrix + res = self.matmul(m1, m2) + assert_equal(res, tgt12) + res = self.matmul(m2, m1) + assert_equal(res, tgt21) + + # stacked @ matrix + res = self.matmul(m12, m1) + assert_equal(res, tgt11_21) + + # matrix @ stacked + res = self.matmul(m1, m12) + assert_equal(res, tgt11_12) + + # stacked @ stacked + res = self.matmul(m12, m21) + assert_equal(res, tgt12_21) + + +class TestMatmul(MatmulCommon): + matmul = np.matmul + + def test_out_arg(self): + a = np.ones((5, 2), dtype=float) + b = np.array([[1, 3], [5, 7]], dtype=float) + tgt = np.dot(a, b) + + # test as positional argument + msg = "out positional argument" + out = np.zeros((5, 2), dtype=float) + self.matmul(a, b, out) + assert_array_equal(out, tgt, err_msg=msg) + + # test as keyword argument + msg = "out keyword argument" + out = np.zeros((5, 2), dtype=float) + self.matmul(a, b, out=out) + assert_array_equal(out, tgt, err_msg=msg) + + # test out with not allowed type cast (safe casting) + msg = "Cannot cast ufunc .* output" + out = np.zeros((5, 2), dtype=np.int32) + assert_raises_regex(TypeError, msg, self.matmul, a, b, out=out) + + # test out with type upcast to complex + out = np.zeros((5, 2), dtype=np.complex128) + c = self.matmul(a, b, out=out) + assert_(c is out) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', ComplexWarning) + c = c.astype(tgt.dtype) + assert_array_equal(c, tgt) + + def test_empty_out(self): + # Check that the output cannot be broadcast, so that it cannot be + # size zero when the outer dimensions (iterator size) has size zero. + arr = np.ones((0, 1, 1)) + out = np.ones((1, 1, 1)) + assert self.matmul(arr, arr).shape == (0, 1, 1) + + with pytest.raises(ValueError, match=r"non-broadcastable"): + self.matmul(arr, arr, out=out) + + def test_out_contiguous(self): + a = np.ones((5, 2), dtype=float) + b = np.array([[1, 3], [5, 7]], dtype=float) + v = np.array([1, 3], dtype=float) + tgt = np.dot(a, b) + tgt_mv = np.dot(a, v) + + # test out non-contiguous + out = np.ones((5, 2, 2), dtype=float) + c = self.matmul(a, b, out=out[..., 0]) + assert c.base is out + assert_array_equal(c, tgt) + c = self.matmul(a, v, out=out[:, 0, 0]) + assert_array_equal(c, tgt_mv) + c = self.matmul(v, a.T, out=out[:, 0, 0]) + assert_array_equal(c, tgt_mv) + + # test out contiguous in only last dim + out = np.ones((10, 2), dtype=float) + c = self.matmul(a, b, out=out[::2, :]) + assert_array_equal(c, tgt) + + # test transposes of out, args + out = np.ones((5, 2), dtype=float) + c = self.matmul(b.T, a.T, out=out.T) + assert_array_equal(out, tgt) + + m1 = np.arange(15.).reshape(5, 3) + m2 = np.arange(21.).reshape(3, 7) + m3 = np.arange(30.).reshape(5, 6)[:, ::2] # non-contiguous + vc = np.arange(10.) + vr = np.arange(6.) + m0 = np.zeros((3, 0)) + + @pytest.mark.parametrize('args', ( + # matrix-matrix + (m1, m2), (m2.T, m1.T), (m2.T.copy(), m1.T), (m2.T, m1.T.copy()), + # matrix-matrix-transpose, contiguous and non + (m1, m1.T), (m1.T, m1), (m1, m3.T), (m3, m1.T), + (m3, m3.T), (m3.T, m3), + # matrix-matrix non-contiguous + (m3, m2), (m2.T, m3.T), (m2.T.copy(), m3.T), + # vector-matrix, matrix-vector, contiguous + (m1, vr[:3]), (vc[:5], m1), (m1.T, vc[:5]), (vr[:3], m1.T), + # vector-matrix, matrix-vector, vector non-contiguous + (m1, vr[::2]), (vc[::2], m1), (m1.T, vc[::2]), (vr[::2], m1.T), + # vector-matrix, matrix-vector, matrix non-contiguous + (m3, vr[:3]), (vc[:5], m3), (m3.T, vc[:5]), (vr[:3], m3.T), + # vector-matrix, matrix-vector, both non-contiguous + (m3, vr[::2]), (vc[::2], m3), (m3.T, vc[::2]), (vr[::2], m3.T), + # size == 0 + (m0, m0.T), (m0.T, m0), (m1, m0), (m0.T, m1.T), + )) + def test_dot_equivalent(self, args): + r1 = np.matmul(*args) + r2 = np.dot(*args) + assert_equal(r1, r2) + + r3 = np.matmul(args[0].copy(), args[1].copy()) + assert_equal(r1, r3) + + # issue 29164 with extra checks + @pytest.mark.parametrize('dtype', ( + np.float32, np.float64, np.complex64, np.complex128 + )) + def test_dot_equivalent_matrix_matrix_blastypes(self, dtype): + modes = list(itertools.product(['C', 'F'], [True, False])) + + def apply_mode(m, mode): + order, is_contiguous = mode + if is_contiguous: + return m.copy() if order == 'C' else m.T.copy().T + + retval = np.zeros( + (m.shape[0] * 2, m.shape[1] * 2), dtype=m.dtype, order=order + )[::2, ::2] + retval[...] = m + return retval + + is_complex = np.issubdtype(dtype, np.complexfloating) + m1 = self.m1.astype(dtype) + (1j if is_complex else 0) + m2 = self.m2.astype(dtype) + (1j if is_complex else 0) + dot_res = np.dot(m1, m2) + mo = np.zeros_like(dot_res) + + for mode in itertools.product(*[modes] * 3): + m1_, m2_, mo_ = [apply_mode(*x) for x in zip([m1, m2, mo], mode)] + assert_equal(np.matmul(m1_, m2_, out=mo_), dot_res) + + def test_matmul_object(self): + import fractions + + f = np.vectorize(fractions.Fraction) + + def random_ints(): + return np.random.randint(1, 1000, size=(10, 3, 3)) + M1 = f(random_ints(), random_ints()) + M2 = f(random_ints(), random_ints()) + + M3 = self.matmul(M1, M2) + + [N1, N2, N3] = [a.astype(float) for a in [M1, M2, M3]] + + assert_allclose(N3, self.matmul(N1, N2)) + + def test_matmul_object_type_scalar(self): + from fractions import Fraction as F + v = np.array([F(2, 3), F(5, 7)]) + res = self.matmul(v, v) + assert_(type(res) is F) + + def test_matmul_empty(self): + a = np.empty((3, 0), dtype=object) + b = np.empty((0, 3), dtype=object) + c = np.zeros((3, 3)) + assert_array_equal(np.matmul(a, b), c) + + def test_matmul_exception_multiply(self): + # test that matmul fails if `__mul__` is missing + class add_not_multiply: + def __add__(self, other): + return self + a = np.full((3, 3), add_not_multiply()) + with assert_raises(TypeError): + b = np.matmul(a, a) + + def test_matmul_exception_add(self): + # test that matmul fails if `__add__` is missing + class multiply_not_add: + def __mul__(self, other): + return self + a = np.full((3, 3), multiply_not_add()) + with assert_raises(TypeError): + b = np.matmul(a, a) + + def test_matmul_bool(self): + # gh-14439 + a = np.array([[1, 0], [1, 1]], dtype=bool) + assert np.max(a.view(np.uint8)) == 1 + b = np.matmul(a, a) + # matmul with boolean output should always be 0, 1 + assert np.max(b.view(np.uint8)) == 1 + + rg = np.random.default_rng(np.random.PCG64(43)) + d = rg.integers(2, size=4 * 5, dtype=np.int8) + d = d.reshape(4, 5) > 0 + out1 = np.matmul(d, d.reshape(5, 4)) + out2 = np.dot(d, d.reshape(5, 4)) + assert_equal(out1, out2) + + c = np.matmul(np.zeros((2, 0), dtype=bool), np.zeros(0, dtype=bool)) + assert not np.any(c) + + +class TestMatmulOperator(MatmulCommon): + import operator + matmul = operator.matmul + + def test_array_priority_override(self): + + class A: + __array_priority__ = 1000 + + def __matmul__(self, other): + return "A" + + def __rmatmul__(self, other): + return "A" + + a = A() + b = np.ones(2) + assert_equal(self.matmul(a, b), "A") + assert_equal(self.matmul(b, a), "A") + + def test_matmul_raises(self): + assert_raises(TypeError, self.matmul, np.int8(5), np.int8(5)) + assert_raises(TypeError, self.matmul, np.void(b'abc'), np.void(b'abc')) + assert_raises(TypeError, self.matmul, np.arange(10), np.void(b'abc')) + + +class TestMatmulInplace: + DTYPES = {} + for i in MatmulCommon.types: + for j in MatmulCommon.types: + if np.can_cast(j, i): + DTYPES[f"{i}-{j}"] = (np.dtype(i), np.dtype(j)) + + @pytest.mark.parametrize("dtype1,dtype2", DTYPES.values(), ids=DTYPES) + def test_basic(self, dtype1: np.dtype, dtype2: np.dtype) -> None: + a = np.arange(10).reshape(5, 2).astype(dtype1) + a_id = id(a) + b = np.ones((2, 2), dtype=dtype2) + + ref = a @ b + a @= b + + assert id(a) == a_id + assert a.dtype == dtype1 + assert a.shape == (5, 2) + if dtype1.kind in "fc": + np.testing.assert_allclose(a, ref) + else: + np.testing.assert_array_equal(a, ref) + + SHAPES = { + "2d_large": ((10**5, 10), (10, 10)), + "3d_large": ((10**4, 10, 10), (1, 10, 10)), + "1d": ((3,), (3,)), + "2d_1d": ((3, 3), (3,)), + "1d_2d": ((3,), (3, 3)), + "2d_broadcast": ((3, 3), (3, 1)), + "2d_broadcast_reverse": ((1, 3), (3, 3)), + "3d_broadcast1": ((3, 3, 3), (1, 3, 1)), + "3d_broadcast2": ((3, 3, 3), (1, 3, 3)), + "3d_broadcast3": ((3, 3, 3), (3, 3, 1)), + "3d_broadcast_reverse1": ((1, 3, 3), (3, 3, 3)), + "3d_broadcast_reverse2": ((3, 1, 3), (3, 3, 3)), + "3d_broadcast_reverse3": ((1, 1, 3), (3, 3, 3)), + } + + @pytest.mark.parametrize("a_shape,b_shape", SHAPES.values(), ids=SHAPES) + def test_shapes(self, a_shape: tuple[int, ...], b_shape: tuple[int, ...]): + a_size = np.prod(a_shape) + a = np.arange(a_size).reshape(a_shape).astype(np.float64) + a_id = id(a) + + b_size = np.prod(b_shape) + b = np.arange(b_size).reshape(b_shape) + + ref = a @ b + if ref.shape != a_shape: + with pytest.raises(ValueError): + a @= b + return + else: + a @= b + + assert id(a) == a_id + assert a.dtype.type == np.float64 + assert a.shape == a_shape + np.testing.assert_allclose(a, ref) + + +def test_matmul_axes(): + a = np.arange(3 * 4 * 5).reshape(3, 4, 5) + c = np.matmul(a, a, axes=[(-2, -1), (-1, -2), (1, 2)]) + assert c.shape == (3, 4, 4) + d = np.matmul(a, a, axes=[(-2, -1), (-1, -2), (0, 1)]) + assert d.shape == (4, 4, 3) + e = np.swapaxes(d, 0, 2) + assert_array_equal(e, c) + f = np.matmul(a, np.arange(3), axes=[(1, 0), (0), (0)]) + assert f.shape == (4, 5) + + +class TestInner: + + def test_inner_type_mismatch(self): + c = 1. + A = np.array((1, 1), dtype='i,i') + + assert_raises(TypeError, np.inner, c, A) + assert_raises(TypeError, np.inner, A, c) + + def test_inner_scalar_and_vector(self): + for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?': + sca = np.array(3, dtype=dt)[()] + vec = np.array([1, 2], dtype=dt) + desired = np.array([3, 6], dtype=dt) + assert_equal(np.inner(vec, sca), desired) + assert_equal(np.inner(sca, vec), desired) + + def test_vecself(self): + # Ticket 844. + # Inner product of a vector with itself segfaults or give + # meaningless result + a = np.zeros(shape=(1, 80), dtype=np.float64) + p = np.inner(a, a) + assert_almost_equal(p, 0, decimal=14) + + def test_inner_product_with_various_contiguities(self): + # github issue 6532 + for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?': + # check an inner product involving a matrix transpose + A = np.array([[1, 2], [3, 4]], dtype=dt) + B = np.array([[1, 3], [2, 4]], dtype=dt) + C = np.array([1, 1], dtype=dt) + desired = np.array([4, 6], dtype=dt) + assert_equal(np.inner(A.T, C), desired) + assert_equal(np.inner(C, A.T), desired) + assert_equal(np.inner(B, C), desired) + assert_equal(np.inner(C, B), desired) + # check a matrix product + desired = np.array([[7, 10], [15, 22]], dtype=dt) + assert_equal(np.inner(A, B), desired) + # check the syrk vs. gemm paths + desired = np.array([[5, 11], [11, 25]], dtype=dt) + assert_equal(np.inner(A, A), desired) + assert_equal(np.inner(A, A.copy()), desired) + # check an inner product involving an aliased and reversed view + a = np.arange(5).astype(dt) + b = a[::-1] + desired = np.array(10, dtype=dt).item() + assert_equal(np.inner(b, a), desired) + + def test_3d_tensor(self): + for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?': + a = np.arange(24).reshape(2, 3, 4).astype(dt) + b = np.arange(24, 48).reshape(2, 3, 4).astype(dt) + desired = np.array( + [[[[ 158, 182, 206], + [ 230, 254, 278]], + + [[ 566, 654, 742], + [ 830, 918, 1006]], + + [[ 974, 1126, 1278], + [1430, 1582, 1734]]], + + [[[1382, 1598, 1814], + [2030, 2246, 2462]], + + [[1790, 2070, 2350], + [2630, 2910, 3190]], + + [[2198, 2542, 2886], + [3230, 3574, 3918]]]] + ).astype(dt) + assert_equal(np.inner(a, b), desired) + assert_equal(np.inner(b, a).transpose(2, 3, 0, 1), desired) + + +class TestChoose: + def _create_data(self): + x = 2 * np.ones((3,), dtype=int) + y = 3 * np.ones((3,), dtype=int) + x2 = 2 * np.ones((2, 3), dtype=int) + y2 = 3 * np.ones((2, 3), dtype=int) + ind = [0, 0, 1] + return x, y, x2, y2, ind + + def test_basic(self): + x, y, _, _, ind = self._create_data() + A = np.choose(ind, (x, y)) + assert_equal(A, [2, 2, 3]) + + def test_broadcast1(self): + _, _, x2, y2, ind = self._create_data() + A = np.choose(ind, (x2, y2)) + assert_equal(A, [[2, 2, 3], [2, 2, 3]]) + + def test_broadcast2(self): + x, _, _, y2, ind = self._create_data() + A = np.choose(ind, (x, y2)) + assert_equal(A, [[2, 2, 3], [2, 2, 3]]) + + @pytest.mark.parametrize("ops", + [(1000, np.array([1], dtype=np.uint8)), + (-1, np.array([1], dtype=np.uint8)), + (1., np.float32(3)), + (1., np.array([3], dtype=np.float32))],) + def test_output_dtype(self, ops): + expected_dt = np.result_type(*ops) + assert np.choose([0], ops).dtype == expected_dt + + def test_dimension_and_args_limit(self): + # Maxdims for the legacy iterator is 32, but the maximum number + # of arguments is actually larger (a itself also counts here) + a = np.ones((1,) * 32, dtype=np.intp) + res = a.choose([0, a] + [2] * 61) + with pytest.raises(ValueError, + match="Need at least 0 and at most 64 array objects"): + a.choose([0, a] + [2] * 62) + + assert_array_equal(res, a) + # Choose is unfortunately limited to 32 dims as of NumPy 2.0 + a = np.ones((1,) * 60, dtype=np.intp) + with pytest.raises(RuntimeError, + match=".*32 dimensions but the array has 60"): + a.choose([a, a]) + + +class TestRepeat: + def _create_data(self): + m = np.array([1, 2, 3, 4, 5, 6]) + m_rect = m.reshape((2, 3)) + return m, m_rect + + def test_basic(self): + m, _ = self._create_data() + A = np.repeat(m, [1, 3, 2, 1, 1, 2]) + assert_equal(A, [1, 2, 2, 2, 3, + 3, 4, 5, 6, 6]) + + def test_broadcast1(self): + m, _ = self._create_data() + A = np.repeat(m, 2) + assert_equal(A, [1, 1, 2, 2, 3, 3, + 4, 4, 5, 5, 6, 6]) + + def test_axis_spec(self): + _, m_rect = self._create_data() + A = np.repeat(m_rect, [2, 1], axis=0) + assert_equal(A, [[1, 2, 3], + [1, 2, 3], + [4, 5, 6]]) + + A = np.repeat(m_rect, [1, 3, 2], axis=1) + assert_equal(A, [[1, 2, 2, 2, 3, 3], + [4, 5, 5, 5, 6, 6]]) + + def test_broadcast2(self): + _, m_rect = self._create_data() + A = np.repeat(m_rect, 2, axis=0) + assert_equal(A, [[1, 2, 3], + [1, 2, 3], + [4, 5, 6], + [4, 5, 6]]) + + A = np.repeat(m_rect, 2, axis=1) + assert_equal(A, [[1, 1, 2, 2, 3, 3], + [4, 4, 5, 5, 6, 6]]) + + +# TODO: test for multidimensional +NEIGH_MODE = {'zero': 0, 'one': 1, 'constant': 2, 'circular': 3, 'mirror': 4} + + +@pytest.mark.parametrize('dt', [float, Decimal], ids=['float', 'object']) +class TestNeighborhoodIter: + # Simple, 2d tests + def test_simple2d(self, dt): + # Test zero and one padding for simple data type + x = np.array([[0, 1], [2, 3]], dtype=dt) + r = [np.array([[0, 0, 0], [0, 0, 1]], dtype=dt), + np.array([[0, 0, 0], [0, 1, 0]], dtype=dt), + np.array([[0, 0, 1], [0, 2, 3]], dtype=dt), + np.array([[0, 1, 0], [2, 3, 0]], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator( + x, [-1, 0, -1, 1], x[0], NEIGH_MODE['zero']) + assert_array_equal(l, r) + + r = [np.array([[1, 1, 1], [1, 0, 1]], dtype=dt), + np.array([[1, 1, 1], [0, 1, 1]], dtype=dt), + np.array([[1, 0, 1], [1, 2, 3]], dtype=dt), + np.array([[0, 1, 1], [2, 3, 1]], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator( + x, [-1, 0, -1, 1], x[0], NEIGH_MODE['one']) + assert_array_equal(l, r) + + r = [np.array([[4, 4, 4], [4, 0, 1]], dtype=dt), + np.array([[4, 4, 4], [0, 1, 4]], dtype=dt), + np.array([[4, 0, 1], [4, 2, 3]], dtype=dt), + np.array([[0, 1, 4], [2, 3, 4]], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator( + x, [-1, 0, -1, 1], 4, NEIGH_MODE['constant']) + assert_array_equal(l, r) + + # Test with start in the middle + r = [np.array([[4, 0, 1], [4, 2, 3]], dtype=dt), + np.array([[0, 1, 4], [2, 3, 4]], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator( + x, [-1, 0, -1, 1], 4, NEIGH_MODE['constant'], 2) + assert_array_equal(l, r) + + def test_mirror2d(self, dt): + x = np.array([[0, 1], [2, 3]], dtype=dt) + r = [np.array([[0, 0, 1], [0, 0, 1]], dtype=dt), + np.array([[0, 1, 1], [0, 1, 1]], dtype=dt), + np.array([[0, 0, 1], [2, 2, 3]], dtype=dt), + np.array([[0, 1, 1], [2, 3, 3]], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator( + x, [-1, 0, -1, 1], x[0], NEIGH_MODE['mirror']) + assert_array_equal(l, r) + + # Simple, 1d tests + def test_simple(self, dt): + # Test padding with constant values + x = np.linspace(1, 5, 5).astype(dt) + r = [[0, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 0]] + l = _multiarray_tests.test_neighborhood_iterator( + x, [-1, 1], x[0], NEIGH_MODE['zero']) + assert_array_equal(l, r) + + r = [[1, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 1]] + l = _multiarray_tests.test_neighborhood_iterator( + x, [-1, 1], x[0], NEIGH_MODE['one']) + assert_array_equal(l, r) + + r = [[x[4], 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, x[4]]] + l = _multiarray_tests.test_neighborhood_iterator( + x, [-1, 1], x[4], NEIGH_MODE['constant']) + assert_array_equal(l, r) + + # Test mirror modes + def test_mirror(self, dt): + x = np.linspace(1, 5, 5).astype(dt) + r = np.array([[2, 1, 1, 2, 3], [1, 1, 2, 3, 4], [1, 2, 3, 4, 5], + [2, 3, 4, 5, 5], [3, 4, 5, 5, 4]], dtype=dt) + l = _multiarray_tests.test_neighborhood_iterator( + x, [-2, 2], x[1], NEIGH_MODE['mirror']) + assert_([i.dtype == dt for i in l]) + assert_array_equal(l, r) + + # Circular mode + def test_circular(self, dt): + x = np.linspace(1, 5, 5).astype(dt) + r = np.array([[4, 5, 1, 2, 3], [5, 1, 2, 3, 4], [1, 2, 3, 4, 5], + [2, 3, 4, 5, 1], [3, 4, 5, 1, 2]], dtype=dt) + l = _multiarray_tests.test_neighborhood_iterator( + x, [-2, 2], x[0], NEIGH_MODE['circular']) + assert_array_equal(l, r) + + +# Test stacking neighborhood iterators +class TestStackedNeighborhoodIter: + # Simple, 1d test: stacking 2 constant-padded neigh iterators + def test_simple_const(self): + dt = np.float64 + # Test zero and one padding for simple data type + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([0], dtype=dt), + np.array([0], dtype=dt), + np.array([1], dtype=dt), + np.array([2], dtype=dt), + np.array([3], dtype=dt), + np.array([0], dtype=dt), + np.array([0], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator_oob( + x, [-2, 4], NEIGH_MODE['zero'], [0, 0], NEIGH_MODE['zero']) + assert_array_equal(l, r) + + r = [np.array([1, 0, 1], dtype=dt), + np.array([0, 1, 2], dtype=dt), + np.array([1, 2, 3], dtype=dt), + np.array([2, 3, 0], dtype=dt), + np.array([3, 0, 1], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator_oob( + x, [-1, 3], NEIGH_MODE['zero'], [-1, 1], NEIGH_MODE['one']) + assert_array_equal(l, r) + + # 2nd simple, 1d test: stacking 2 neigh iterators, mixing const padding and + # mirror padding + def test_simple_mirror(self): + dt = np.float64 + # Stacking zero on top of mirror + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([0, 1, 1], dtype=dt), + np.array([1, 1, 2], dtype=dt), + np.array([1, 2, 3], dtype=dt), + np.array([2, 3, 3], dtype=dt), + np.array([3, 3, 0], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator_oob( + x, [-1, 3], NEIGH_MODE['mirror'], [-1, 1], NEIGH_MODE['zero']) + assert_array_equal(l, r) + + # Stacking mirror on top of zero + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([1, 0, 0], dtype=dt), + np.array([0, 0, 1], dtype=dt), + np.array([0, 1, 2], dtype=dt), + np.array([1, 2, 3], dtype=dt), + np.array([2, 3, 0], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator_oob( + x, [-1, 3], NEIGH_MODE['zero'], [-2, 0], NEIGH_MODE['mirror']) + assert_array_equal(l, r) + + # Stacking mirror on top of zero: 2nd + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([0, 1, 2], dtype=dt), + np.array([1, 2, 3], dtype=dt), + np.array([2, 3, 0], dtype=dt), + np.array([3, 0, 0], dtype=dt), + np.array([0, 0, 3], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator_oob( + x, [-1, 3], NEIGH_MODE['zero'], [0, 2], NEIGH_MODE['mirror']) + assert_array_equal(l, r) + + # Stacking mirror on top of zero: 3rd + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([1, 0, 0, 1, 2], dtype=dt), + np.array([0, 0, 1, 2, 3], dtype=dt), + np.array([0, 1, 2, 3, 0], dtype=dt), + np.array([1, 2, 3, 0, 0], dtype=dt), + np.array([2, 3, 0, 0, 3], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator_oob( + x, [-1, 3], NEIGH_MODE['zero'], [-2, 2], NEIGH_MODE['mirror']) + assert_array_equal(l, r) + + # 3rd simple, 1d test: stacking 2 neigh iterators, mixing const padding and + # circular padding + def test_simple_circular(self): + dt = np.float64 + # Stacking zero on top of mirror + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([0, 3, 1], dtype=dt), + np.array([3, 1, 2], dtype=dt), + np.array([1, 2, 3], dtype=dt), + np.array([2, 3, 1], dtype=dt), + np.array([3, 1, 0], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator_oob( + x, [-1, 3], NEIGH_MODE['circular'], [-1, 1], NEIGH_MODE['zero']) + assert_array_equal(l, r) + + # Stacking mirror on top of zero + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([3, 0, 0], dtype=dt), + np.array([0, 0, 1], dtype=dt), + np.array([0, 1, 2], dtype=dt), + np.array([1, 2, 3], dtype=dt), + np.array([2, 3, 0], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator_oob( + x, [-1, 3], NEIGH_MODE['zero'], [-2, 0], NEIGH_MODE['circular']) + assert_array_equal(l, r) + + # Stacking mirror on top of zero: 2nd + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([0, 1, 2], dtype=dt), + np.array([1, 2, 3], dtype=dt), + np.array([2, 3, 0], dtype=dt), + np.array([3, 0, 0], dtype=dt), + np.array([0, 0, 1], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator_oob( + x, [-1, 3], NEIGH_MODE['zero'], [0, 2], NEIGH_MODE['circular']) + assert_array_equal(l, r) + + # Stacking mirror on top of zero: 3rd + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([3, 0, 0, 1, 2], dtype=dt), + np.array([0, 0, 1, 2, 3], dtype=dt), + np.array([0, 1, 2, 3, 0], dtype=dt), + np.array([1, 2, 3, 0, 0], dtype=dt), + np.array([2, 3, 0, 0, 1], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator_oob( + x, [-1, 3], NEIGH_MODE['zero'], [-2, 2], NEIGH_MODE['circular']) + assert_array_equal(l, r) + + # 4th simple, 1d test: stacking 2 neigh iterators, but with lower iterator + # being strictly within the array + def test_simple_strict_within(self): + dt = np.float64 + # Stacking zero on top of zero, first neighborhood strictly inside the + # array + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([1, 2, 3, 0], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator_oob( + x, [1, 1], NEIGH_MODE['zero'], [-1, 2], NEIGH_MODE['zero']) + assert_array_equal(l, r) + + # Stacking mirror on top of zero, first neighborhood strictly inside the + # array + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([1, 2, 3, 3], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator_oob( + x, [1, 1], NEIGH_MODE['zero'], [-1, 2], NEIGH_MODE['mirror']) + assert_array_equal(l, r) + + # Stacking mirror on top of zero, first neighborhood strictly inside the + # array + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([1, 2, 3, 1], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator_oob( + x, [1, 1], NEIGH_MODE['zero'], [-1, 2], NEIGH_MODE['circular']) + assert_array_equal(l, r) + +class TestWarnings: + + def test_complex_warning(self): + x = np.array([1, 2]) + y = np.array([1 - 2j, 1 + 2j]) + + with warnings.catch_warnings(): + warnings.simplefilter("error", ComplexWarning) + assert_raises(ComplexWarning, x.__setitem__, slice(None), y) + assert_equal(x, [1, 2]) + + +class TestMinScalarType: + + def test_usigned_shortshort(self): + dt = np.min_scalar_type(2**8 - 1) + wanted = np.dtype('uint8') + assert_equal(wanted, dt) + + def test_usigned_short(self): + dt = np.min_scalar_type(2**16 - 1) + wanted = np.dtype('uint16') + assert_equal(wanted, dt) + + def test_usigned_int(self): + dt = np.min_scalar_type(2**32 - 1) + wanted = np.dtype('uint32') + assert_equal(wanted, dt) + + def test_usigned_longlong(self): + dt = np.min_scalar_type(2**63 - 1) + wanted = np.dtype('uint64') + assert_equal(wanted, dt) + + def test_object(self): + dt = np.min_scalar_type(2**64) + wanted = np.dtype('O') + assert_equal(wanted, dt) + + +from numpy._core._internal import _dtype_from_pep3118 + + +class TestPEP3118Dtype: + def _check(self, spec, wanted): + dt = np.dtype(wanted) + actual = _dtype_from_pep3118(spec) + assert_equal(actual, dt, + err_msg=f"spec {spec!r} != dtype {wanted!r}") + + def test_native_padding(self): + align = np.dtype('i').alignment + for j in range(8): + if j == 0: + s = 'bi' + else: + s = 'b%dxi' % j + self._check('@' + s, {'f0': ('i1', 0), + 'f1': ('i', align * (1 + j // align))}) + self._check('=' + s, {'f0': ('i1', 0), + 'f1': ('i', 1 + j)}) + + def test_native_padding_2(self): + # Native padding should work also for structs and sub-arrays + self._check('x3T{xi}', {'f0': (({'f0': ('i', 4)}, (3,)), 4)}) + self._check('^x3T{xi}', {'f0': (({'f0': ('i', 1)}, (3,)), 1)}) + + def test_trailing_padding(self): + # Trailing padding should be included, *and*, the item size + # should match the alignment if in aligned mode + align = np.dtype('i').alignment + size = np.dtype('i').itemsize + + def aligned(n): + return align * (1 + (n - 1) // align) + + base = {"formats": ['i'], "names": ['f0']} + + self._check('ix', dict(itemsize=aligned(size + 1), **base)) + self._check('ixx', dict(itemsize=aligned(size + 2), **base)) + self._check('ixxx', dict(itemsize=aligned(size + 3), **base)) + self._check('ixxxx', dict(itemsize=aligned(size + 4), **base)) + self._check('i7x', dict(itemsize=aligned(size + 7), **base)) + + self._check('^ix', dict(itemsize=size + 1, **base)) + self._check('^ixx', dict(itemsize=size + 2, **base)) + self._check('^ixxx', dict(itemsize=size + 3, **base)) + self._check('^ixxxx', dict(itemsize=size + 4, **base)) + self._check('^i7x', dict(itemsize=size + 7, **base)) + + def test_native_padding_3(self): + dt = np.dtype( + [('a', 'b'), ('b', 'i'), + ('sub', np.dtype('b,i')), ('c', 'i')], + align=True) + self._check("T{b:a:xxxi:b:T{b:f0:=i:f1:}:sub:xxxi:c:}", dt) + + dt = np.dtype( + [('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'), + ('e', 'b'), ('sub', np.dtype('b,i', align=True))]) + self._check("T{b:a:=i:b:b:c:b:d:b:e:T{b:f0:xxxi:f1:}:sub:}", dt) + + def test_padding_with_array_inside_struct(self): + dt = np.dtype( + [('a', 'b'), ('b', 'i'), ('c', 'b', (3,)), + ('d', 'i')], + align=True) + self._check("T{b:a:xxxi:b:3b:c:xi:d:}", dt) + + def test_byteorder_inside_struct(self): + # The byte order after @T{=i} should be '=', not '@'. + # Check this by noting the absence of native alignment. + self._check('@T{^i}xi', {'f0': ({'f0': ('i', 0)}, 0), + 'f1': ('i', 5)}) + + def test_intra_padding(self): + # Natively aligned sub-arrays may require some internal padding + align = np.dtype('i').alignment + size = np.dtype('i').itemsize + + def aligned(n): + return (align * (1 + (n - 1) // align)) + + self._check('(3)T{ix}', ({ + "names": ['f0'], + "formats": ['i'], + "offsets": [0], + "itemsize": aligned(size + 1) + }, (3,))) + + def test_char_vs_string(self): + dt = np.dtype('c') + self._check('c', dt) + + dt = np.dtype([('f0', 'S1', (4,)), ('f1', 'S4')]) + self._check('4c4s', dt) + + def test_field_order(self): + # gh-9053 - previously, we relied on dictionary key order + self._check("(0)I:a:f:b:", [('a', 'I', (0,)), ('b', 'f')]) + self._check("(0)I:b:f:a:", [('b', 'I', (0,)), ('a', 'f')]) + + def test_unnamed_fields(self): + self._check('ii', [('f0', 'i'), ('f1', 'i')]) + self._check('ii:f0:', [('f1', 'i'), ('f0', 'i')]) + + self._check('i', 'i') + self._check('i:f0:', [('f0', 'i')]) + + +class TestNewBufferProtocol: + """ Test PEP3118 buffers """ + + def _check_roundtrip(self, obj): + obj = np.asarray(obj) + x = memoryview(obj) + y = np.asarray(x) + y2 = np.array(x) + assert_(not y.flags.owndata) + assert_(y2.flags.owndata) + + assert_equal(y.dtype, obj.dtype) + assert_equal(y.shape, obj.shape) + assert_array_equal(obj, y) + + assert_equal(y2.dtype, obj.dtype) + assert_equal(y2.shape, obj.shape) + assert_array_equal(obj, y2) + + def test_roundtrip(self): + x = np.array([1, 2, 3, 4, 5], dtype='i4') + self._check_roundtrip(x) + + x = np.array([[1, 2], [3, 4]], dtype=np.float64) + self._check_roundtrip(x) + + x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0, :] + self._check_roundtrip(x) + + dt = [('a', 'b'), + ('b', 'h'), + ('c', 'i'), + ('d', 'l'), + ('dx', 'q'), + ('e', 'B'), + ('f', 'H'), + ('g', 'I'), + ('h', 'L'), + ('hx', 'Q'), + ('i', np.single), + ('j', np.double), + ('k', np.longdouble), + ('ix', np.csingle), + ('jx', np.cdouble), + ('kx', np.clongdouble), + ('l', 'S4'), + ('m', 'U4'), + ('n', 'V3'), + ('o', '?'), + ('p', np.half), + ] + x = np.array( + [(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + b'aaaa', 'bbbb', b'xxx', True, 1.0)], + dtype=dt) + self._check_roundtrip(x) + + x = np.array(([[1, 2], [3, 4]],), dtype=[('a', (int, (2, 2)))]) + self._check_roundtrip(x) + + x = np.array([1, 2, 3], dtype='>i2') + self._check_roundtrip(x) + + x = np.array([1, 2, 3], dtype='') + x = np.zeros(4, dtype=dt) + self._check_roundtrip(x) + + def test_roundtrip_scalar(self): + # Issue #4015. + self._check_roundtrip(0) + + def test_invalid_buffer_format(self): + # datetime64 cannot be used fully in a buffer yet + # Should be fixed in the next Numpy major release + dt = np.dtype([('a', 'uint16'), ('b', 'M8[s]')]) + a = np.empty(3, dt) + assert_raises((ValueError, BufferError), memoryview, a) + assert_raises((ValueError, BufferError), memoryview, np.array((3), 'M8[D]')) + + def test_export_simple_1d(self): + x = np.array([1, 2, 3, 4, 5], dtype='i') + y = memoryview(x) + assert_equal(y.format, 'i') + assert_equal(y.shape, (5,)) + assert_equal(y.ndim, 1) + assert_equal(y.strides, (4,)) + assert_equal(y.suboffsets, ()) + assert_equal(y.itemsize, 4) + + def test_export_simple_nd(self): + x = np.array([[1, 2], [3, 4]], dtype=np.float64) + y = memoryview(x) + assert_equal(y.format, 'd') + assert_equal(y.shape, (2, 2)) + assert_equal(y.ndim, 2) + assert_equal(y.strides, (16, 8)) + assert_equal(y.suboffsets, ()) + assert_equal(y.itemsize, 8) + + def test_export_discontiguous(self): + x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0, :] + y = memoryview(x) + assert_equal(y.format, 'f') + assert_equal(y.shape, (3, 3)) + assert_equal(y.ndim, 2) + assert_equal(y.strides, (36, 4)) + assert_equal(y.suboffsets, ()) + assert_equal(y.itemsize, 4) + + def test_export_record(self): + dt = [('a', 'b'), + ('b', 'h'), + ('c', 'i'), + ('d', 'l'), + ('dx', 'q'), + ('e', 'B'), + ('f', 'H'), + ('g', 'I'), + ('h', 'L'), + ('hx', 'Q'), + ('i', np.single), + ('j', np.double), + ('k', np.longdouble), + ('ix', np.csingle), + ('jx', np.cdouble), + ('kx', np.clongdouble), + ('l', 'S4'), + ('m', 'U4'), + ('n', 'V3'), + ('o', '?'), + ('p', np.half), + ] + x = np.array( + [(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + b'aaaa', 'bbbb', b' ', True, 1.0)], + dtype=dt) + y = memoryview(x) + assert_equal(y.shape, (1,)) + assert_equal(y.ndim, 1) + assert_equal(y.suboffsets, ()) + + sz = sum(np.dtype(b).itemsize for a, b in dt) + if np.dtype('l').itemsize == 4: + assert_equal(y.format, 'T{b:a:=h:b:i:c:l:d:q:dx:B:e:@H:f:=I:g:L:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}') + else: + assert_equal(y.format, 'T{b:a:=h:b:i:c:q:d:q:dx:B:e:@H:f:=I:g:Q:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}') + assert_equal(y.strides, (sz,)) + assert_equal(y.itemsize, sz) + + def test_export_subarray(self): + x = np.array(([[1, 2], [3, 4]],), dtype=[('a', ('i', (2, 2)))]) + y = memoryview(x) + assert_equal(y.format, 'T{(2,2)i:a:}') + assert_equal(y.shape, ()) + assert_equal(y.ndim, 0) + assert_equal(y.strides, ()) + assert_equal(y.suboffsets, ()) + assert_equal(y.itemsize, 16) + + def test_export_endian(self): + x = np.array([1, 2, 3], dtype='>i') + y = memoryview(x) + if sys.byteorder == 'little': + assert_equal(y.format, '>i') + else: + assert_equal(y.format, 'i') + + x = np.array([1, 2, 3], dtype=' np.array(0, dtype=dt1), f"type {dt1} failed") + assert_(not 1 < np.array(0, dtype=dt1), f"type {dt1} failed") + + for dt2 in np.typecodes['AllInteger']: + assert_(np.array(1, dtype=dt1) > np.array(0, dtype=dt2), + f"type {dt1} and {dt2} failed") + assert_(not np.array(1, dtype=dt1) < np.array(0, dtype=dt2), + f"type {dt1} and {dt2} failed") + + # Unsigned integers + for dt1 in 'BHILQP': + assert_(-1 < np.array(1, dtype=dt1), f"type {dt1} failed") + assert_(not -1 > np.array(1, dtype=dt1), f"type {dt1} failed") + assert_(-1 != np.array(1, dtype=dt1), f"type {dt1} failed") + + # Unsigned vs signed + for dt2 in 'bhilqp': + assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2), + f"type {dt1} and {dt2} failed") + assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2), + f"type {dt1} and {dt2} failed") + assert_(np.array(1, dtype=dt1) != np.array(-1, dtype=dt2), + f"type {dt1} and {dt2} failed") + + # Signed integers and floats + for dt1 in 'bhlqp' + np.typecodes['Float']: + assert_(1 > np.array(-1, dtype=dt1), f"type {dt1} failed") + assert_(not 1 < np.array(-1, dtype=dt1), f"type {dt1} failed") + assert_(-1 == np.array(-1, dtype=dt1), f"type {dt1} failed") + + for dt2 in 'bhlqp' + np.typecodes['Float']: + assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2), + f"type {dt1} and {dt2} failed") + assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2), + f"type {dt1} and {dt2} failed") + assert_(np.array(-1, dtype=dt1) == np.array(-1, dtype=dt2), + f"type {dt1} and {dt2} failed") + + def test_to_bool_scalar(self): + assert_equal(bool(np.array([False])), False) + assert_equal(bool(np.array([True])), True) + assert_equal(bool(np.array([[42]])), True) + + def test_to_bool_scalar_not_convertible(self): + + class NotConvertible: + def __bool__(self): + raise NotImplementedError + + assert_raises(NotImplementedError, bool, np.array(NotConvertible())) + assert_raises(NotImplementedError, bool, np.array([NotConvertible()])) + if IS_PYSTON: + pytest.skip("Pyston disables recursion checking") + if IS_WASM: + pytest.skip("Pyodide/WASM has limited stack size") + + self_containing = np.array([None]) + self_containing[0] = self_containing + + Error = RecursionError + + assert_raises(Error, bool, self_containing) # previously stack overflow + self_containing[0] = None # resolve circular reference + + def test_to_bool_scalar_size_errors(self): + with pytest.raises(ValueError, match=".*one element is ambiguous"): + bool(np.array([1, 2])) + + with pytest.raises(ValueError, match=".*empty array is ambiguous"): + bool(np.empty((3, 0))) + + with pytest.raises(ValueError, match=".*empty array is ambiguous"): + bool(np.empty((0,))) + + def test_to_int_scalar(self): + # gh-9972 means that these aren't always the same + int_funcs = (int, lambda x: x.__int__()) + for int_func in int_funcs: + assert_equal(int_func(np.array(0)), 0) + assert_raises(TypeError, int_func, np.array([1])) + assert_raises(TypeError, int_func, np.array([[42]])) + assert_raises(TypeError, int_func, np.array([1, 2])) + + # gh-9972 + assert_equal(4, int_func(np.array('4'))) + assert_equal(5, int_func(np.bytes_(b'5'))) + assert_equal(6, int_func(np.str_('6'))) + + class NotConvertible: + def __int__(self): + raise NotImplementedError + assert_raises(NotImplementedError, + int_func, np.array(NotConvertible())) + assert_raises(TypeError, + int_func, np.array([NotConvertible()])) + + def test_to_float_scalar(self): + float_funcs = (float, lambda x: x.__float__()) + for float_func in float_funcs: + assert_equal(float_func(np.array(0)), 0.0) + assert_equal(float_func(np.array(1.0, np.float64)), 1.0) + assert_raises(TypeError, float_func, np.array([2])) + assert_raises(TypeError, float_func, np.array([3.14])) + assert_raises(TypeError, float_func, np.array([[4.0]])) + + assert_equal(5.0, float_func(np.array('5'))) + assert_equal(5.1, float_func(np.array('5.1'))) + assert_equal(6.0, float_func(np.bytes_(b'6'))) + assert_equal(6.1, float_func(np.bytes_(b'6.1'))) + assert_equal(7.0, float_func(np.str_('7'))) + assert_equal(7.1, float_func(np.str_('7.1'))) + + +class TestWhere: + def test_basic(self): + dts = [bool, np.int16, np.int32, np.int64, np.double, np.complex128, + np.longdouble, np.clongdouble] + for dt in dts: + c = np.ones(53, dtype=bool) + assert_equal(np.where( c, dt(0), dt(1)), dt(0)) + assert_equal(np.where(~c, dt(0), dt(1)), dt(1)) + assert_equal(np.where(True, dt(0), dt(1)), dt(0)) + assert_equal(np.where(False, dt(0), dt(1)), dt(1)) + d = np.ones_like(c).astype(dt) + e = np.zeros_like(d) + r = d.astype(dt) + c[7] = False + r[7] = e[7] + assert_equal(np.where(c, e, e), e) + assert_equal(np.where(c, d, e), r) + assert_equal(np.where(c, d, e[0]), r) + assert_equal(np.where(c, d[0], e), r) + assert_equal(np.where(c[::2], d[::2], e[::2]), r[::2]) + assert_equal(np.where(c[1::2], d[1::2], e[1::2]), r[1::2]) + assert_equal(np.where(c[::3], d[::3], e[::3]), r[::3]) + assert_equal(np.where(c[1::3], d[1::3], e[1::3]), r[1::3]) + assert_equal(np.where(c[::-2], d[::-2], e[::-2]), r[::-2]) + assert_equal(np.where(c[::-3], d[::-3], e[::-3]), r[::-3]) + assert_equal(np.where(c[1::-3], d[1::-3], e[1::-3]), r[1::-3]) + + @pytest.mark.skipif(IS_WASM, reason="no wasm fp exception support") + def test_exotic(self): + # object + assert_array_equal(np.where(True, None, None), np.array(None)) + # zero sized + m = np.array([], dtype=bool).reshape(0, 3) + b = np.array([], dtype=np.float64).reshape(0, 3) + assert_array_equal(np.where(m, 0, b), np.array([]).reshape(0, 3)) + + # object cast + d = np.array([-1.34, -0.16, -0.54, -0.31, -0.08, -0.95, 0.000, 0.313, + 0.547, -0.18, 0.876, 0.236, 1.969, 0.310, 0.699, 1.013, + 1.267, 0.229, -1.39, 0.487]) + nan = float('NaN') + e = np.array(['5z', '0l', nan, 'Wz', nan, nan, 'Xq', 'cs', nan, nan, + 'QN', nan, nan, 'Fd', nan, nan, 'kp', nan, '36', 'i1'], + dtype=object) + m = np.array([0, 0, 1, 0, 1, 1, 0, 0, 1, 1, + 0, 1, 1, 0, 1, 1, 0, 1, 0, 0], dtype=bool) + + r = e[:] + r[np.where(m)] = d[np.where(m)] + assert_array_equal(np.where(m, d, e), r) + + r = e[:] + r[np.where(~m)] = d[np.where(~m)] + assert_array_equal(np.where(m, e, d), r) + + assert_array_equal(np.where(m, e, e), e) + + # minimal dtype result with NaN scalar (e.g required by pandas) + d = np.array([1., 2.], dtype=np.float32) + e = float('NaN') + assert_equal(np.where(True, d, e).dtype, np.float32) + e = float('Infinity') + assert_equal(np.where(True, d, e).dtype, np.float32) + e = float('-Infinity') + assert_equal(np.where(True, d, e).dtype, np.float32) + # With NEP 50 adopted, the float will overflow here: + e = 1e150 + with pytest.warns(RuntimeWarning, match="overflow"): + res = np.where(True, d, e) + assert res.dtype == np.float32 + + def test_ndim(self): + c = [True, False] + a = np.zeros((2, 25)) + b = np.ones((2, 25)) + r = np.where(np.array(c)[:, np.newaxis], a, b) + assert_array_equal(r[0], a[0]) + assert_array_equal(r[1], b[0]) + + a = a.T + b = b.T + r = np.where(c, a, b) + assert_array_equal(r[:, 0], a[:, 0]) + assert_array_equal(r[:, 1], b[:, 0]) + + def test_dtype_mix(self): + c = np.array([False, True, False, False, False, False, True, False, + False, False, True, False]) + a = np.uint32(1) + b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.], + dtype=np.float64) + r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.], + dtype=np.float64) + assert_equal(np.where(c, a, b), r) + + a = a.astype(np.float32) + b = b.astype(np.int64) + assert_equal(np.where(c, a, b), r) + + # non bool mask + c = c.astype(int) + c[c != 0] = 34242324 + assert_equal(np.where(c, a, b), r) + # invert + tmpmask = c != 0 + c[c == 0] = 41247212 + c[tmpmask] = 0 + assert_equal(np.where(c, b, a), r) + + def test_foreign(self): + c = np.array([False, True, False, False, False, False, True, False, + False, False, True, False]) + r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.], + dtype=np.float64) + a = np.ones(1, dtype='>i4') + b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.], + dtype=np.float64) + assert_equal(np.where(c, a, b), r) + + b = b.astype('>f8') + assert_equal(np.where(c, a, b), r) + + a = a.astype('i4') + assert_equal(np.where(c, a, b), r) + + def test_error(self): + c = [True, True] + a = np.ones((4, 5)) + b = np.ones((5, 5)) + assert_raises(ValueError, np.where, c, a, a) + assert_raises(ValueError, np.where, c[0], a, b) + + def test_string(self): + # gh-4778 check strings are properly filled with nulls + a = np.array("abc") + b = np.array("x" * 753) + assert_equal(np.where(True, a, b), "abc") + assert_equal(np.where(False, b, a), "abc") + + # check native datatype sized strings + a = np.array("abcd") + b = np.array("x" * 8) + assert_equal(np.where(True, a, b), "abcd") + assert_equal(np.where(False, b, a), "abcd") + + def test_empty_result(self): + # pass empty where result through an assignment which reads the data of + # empty arrays, error detectable with valgrind, see gh-8922 + x = np.zeros((1, 1)) + ibad = np.vstack(np.where(x == 99.)) + assert_array_equal(ibad, + np.atleast_2d(np.array([[], []], dtype=np.intp))) + + def test_largedim(self): + # invalid read regression gh-9304 + shape = [10, 2, 3, 4, 5, 6] + np.random.seed(2) + array = np.random.rand(*shape) + + for i in range(10): + benchmark = array.nonzero() + result = array.nonzero() + assert_array_equal(benchmark, result) + + def test_kwargs(self): + a = np.zeros(1) + with assert_raises(TypeError): + np.where(a, x=a, y=a) + + +if not IS_PYPY: + # sys.getsizeof() is not valid on PyPy + class TestSizeOf: + + def test_empty_array(self): + x = np.array([]) + assert_(sys.getsizeof(x) > 0) + + def check_array(self, dtype): + elem_size = dtype(0).itemsize + + for length in [10, 50, 100, 500]: + x = np.arange(length, dtype=dtype) + assert_(sys.getsizeof(x) > length * elem_size) + + def test_array_int32(self): + self.check_array(np.int32) + + def test_array_int64(self): + self.check_array(np.int64) + + def test_array_float32(self): + self.check_array(np.float32) + + def test_array_float64(self): + self.check_array(np.float64) + + def test_view(self): + d = np.ones(100) + assert_(sys.getsizeof(d[...]) < sys.getsizeof(d)) + + def test_reshape(self): + d = np.ones(100) + assert_(sys.getsizeof(d) < sys.getsizeof(d.reshape(100, 1, 1).copy())) + + @_no_tracing + def test_resize(self): + d = np.ones(100) + old = sys.getsizeof(d) + d.resize(50) + assert_(old > sys.getsizeof(d)) + d.resize(150) + assert_(old < sys.getsizeof(d)) + + @pytest.mark.parametrize("dtype", ["u4,f4", "u4,O"]) + def test_resize_structured(self, dtype): + a = np.array([(0, 0.0) for i in range(5)], dtype=dtype) + a.resize(1000) + assert_array_equal(a, np.zeros(1000, dtype=dtype)) + + def test_error(self): + d = np.ones(100) + assert_raises(TypeError, d.__sizeof__, "a") + + +class TestHashing: + + def test_arrays_not_hashable(self): + x = np.ones(3) + assert_raises(TypeError, hash, x) + + def test_collections_hashable(self): + x = np.array([]) + assert_(not isinstance(x, collections.abc.Hashable)) + + +class TestArrayPriority: + # This will go away when __array_priority__ is settled, meanwhile + # it serves to check unintended changes. + op = operator + binary_ops = [ + op.pow, op.add, op.sub, op.mul, op.floordiv, op.truediv, op.mod, + op.and_, op.or_, op.xor, op.lshift, op.rshift, op.mod, op.gt, + op.ge, op.lt, op.le, op.ne, op.eq + ] + + class Foo(np.ndarray): + __array_priority__ = 100. + + def __new__(cls, *args, **kwargs): + return np.array(*args, **kwargs).view(cls) + + class Bar(np.ndarray): + __array_priority__ = 101. + + def __new__(cls, *args, **kwargs): + return np.array(*args, **kwargs).view(cls) + + class Other: + __array_priority__ = 1000. + + def _all(self, other): + return self.__class__() + + __add__ = __radd__ = _all + __sub__ = __rsub__ = _all + __mul__ = __rmul__ = _all + __pow__ = __rpow__ = _all + __mod__ = __rmod__ = _all + __truediv__ = __rtruediv__ = _all + __floordiv__ = __rfloordiv__ = _all + __and__ = __rand__ = _all + __xor__ = __rxor__ = _all + __or__ = __ror__ = _all + __lshift__ = __rlshift__ = _all + __rshift__ = __rrshift__ = _all + __eq__ = _all + __ne__ = _all + __gt__ = _all + __ge__ = _all + __lt__ = _all + __le__ = _all + + def test_ndarray_subclass(self): + a = np.array([1, 2]) + b = self.Bar([1, 2]) + for f in self.binary_ops: + msg = repr(f) + assert_(isinstance(f(a, b), self.Bar), msg) + assert_(isinstance(f(b, a), self.Bar), msg) + + def test_ndarray_other(self): + a = np.array([1, 2]) + b = self.Other() + for f in self.binary_ops: + msg = repr(f) + assert_(isinstance(f(a, b), self.Other), msg) + assert_(isinstance(f(b, a), self.Other), msg) + + def test_subclass_subclass(self): + a = self.Foo([1, 2]) + b = self.Bar([1, 2]) + for f in self.binary_ops: + msg = repr(f) + assert_(isinstance(f(a, b), self.Bar), msg) + assert_(isinstance(f(b, a), self.Bar), msg) + + def test_subclass_other(self): + a = self.Foo([1, 2]) + b = self.Other() + for f in self.binary_ops: + msg = repr(f) + assert_(isinstance(f(a, b), self.Other), msg) + assert_(isinstance(f(b, a), self.Other), msg) + + +class TestBytestringArrayNonzero: + + def test_empty_bstring_array_is_falsey(self): + assert_(not np.array([''], dtype=str)) + + def test_whitespace_bstring_array_is_truthy(self): + a = np.array(['spam'], dtype=str) + a[0] = ' \0\0' + assert_(a) + + def test_all_null_bstring_array_is_falsey(self): + a = np.array(['spam'], dtype=str) + a[0] = '\0\0\0\0' + assert_(not a) + + def test_null_inside_bstring_array_is_truthy(self): + a = np.array(['spam'], dtype=str) + a[0] = ' \0 \0' + assert_(a) + + +class TestUnicodeEncoding: + """ + Tests for encoding related bugs, such as UCS2 vs UCS4, round-tripping + issues, etc + """ + def test_round_trip(self): + """ Tests that GETITEM, SETITEM, and PyArray_Scalar roundtrip """ + # gh-15363 + arr = np.zeros(shape=(), dtype="U1") + for i in range(1, sys.maxunicode + 1): + expected = chr(i) + arr[()] = expected + assert arr[()] == expected + assert arr.item() == expected + + def test_assign_scalar(self): + # gh-3258 + l = np.array(['aa', 'bb']) + l[:] = np.str_('cc') + assert_equal(l, ['cc', 'cc']) + + def test_fill_scalar(self): + # gh-7227 + l = np.array(['aa', 'bb']) + l.fill(np.str_('cc')) + assert_equal(l, ['cc', 'cc']) + + +class TestUnicodeArrayNonzero: + + def test_empty_ustring_array_is_falsey(self): + assert_(not np.array([''], dtype=np.str_)) + + def test_whitespace_ustring_array_is_truthy(self): + a = np.array(['eggs'], dtype=np.str_) + a[0] = ' \0\0' + assert_(a) + + def test_all_null_ustring_array_is_falsey(self): + a = np.array(['eggs'], dtype=np.str_) + a[0] = '\0\0\0\0' + assert_(not a) + + def test_null_inside_ustring_array_is_truthy(self): + a = np.array(['eggs'], dtype=np.str_) + a[0] = ' \0 \0' + assert_(a) + + +class TestFormat: + + def test_0d(self): + a = np.array(np.pi) + assert_equal(f'{a:0.3g}', '3.14') + assert_equal(f'{a[()]:0.3g}', '3.14') + + def test_1d_no_format(self): + a = np.array([np.pi]) + assert_equal(f'{a}', str(a)) + + def test_1d_format(self): + # until gh-5543, ensure that the behaviour matches what it used to be + a = np.array([np.pi]) + assert_raises(TypeError, '{:30}'.format, a) + + +from numpy.testing import IS_PYPY + + +class TestCTypes: + + def test_ctypes_is_available(self): + test_arr = np.array([[1, 2, 3], [4, 5, 6]]) + + assert_equal(ctypes, test_arr.ctypes._ctypes) + assert_equal(tuple(test_arr.ctypes.shape), (2, 3)) + + @pytest.mark.thread_unsafe(reason="modifies global module state") + def test_ctypes_is_not_available(self): + from numpy._core import _internal + _internal.ctypes = None + try: + test_arr = np.array([[1, 2, 3], [4, 5, 6]]) + + assert_(isinstance(test_arr.ctypes._ctypes, + _internal._missing_ctypes)) + assert_equal(tuple(test_arr.ctypes.shape), (2, 3)) + finally: + _internal.ctypes = ctypes + + def _make_readonly(x): + x.flags.writeable = False + return x + + @pytest.mark.thread_unsafe(reason="calls gc.collect()") + @pytest.mark.parametrize('arr', [ + np.array([1, 2, 3]), + np.array([['one', 'two'], ['three', 'four']]), + np.array((1, 2), dtype='i4,i4'), + np.zeros((2,), dtype=np.dtype({ + "formats": [' 2, [44, 55]) + assert_equal(a, np.array([[0, 44], [1, 55], [2, 44]])) + # hit one of the failing paths + assert_raises(ValueError, np.place, a, a > 20, []) + + def test_put_noncontiguous(self): + a = np.arange(6).reshape(2, 3).T # force non-c-contiguous + np.put(a, [0, 2], [44, 55]) + assert_equal(a, np.array([[44, 3], [55, 4], [2, 5]])) + + def test_putmask_noncontiguous(self): + a = np.arange(6).reshape(2, 3).T # force non-c-contiguous + # uses arr_putmask + np.putmask(a, a > 2, a**2) + assert_equal(a, np.array([[0, 9], [1, 16], [2, 25]])) + + def test_take_mode_raise(self): + a = np.arange(6, dtype='int') + out = np.empty(2, dtype='int') + np.take(a, [0, 2], out=out, mode='raise') + assert_equal(out, np.array([0, 2])) + + def test_choose_mod_raise(self): + a = np.array([[1, 0, 1], [0, 1, 0], [1, 0, 1]]) + out = np.empty((3, 3), dtype='int') + choices = [-10, 10] + np.choose(a, choices, out=out, mode='raise') + assert_equal(out, np.array([[ 10, -10, 10], + [-10, 10, -10], + [ 10, -10, 10]])) + + def test_flatiter__array__(self): + a = np.arange(9).reshape(3, 3) + b = a.T.flat + c = b.__array__() + # triggers the WRITEBACKIFCOPY resolution, assuming refcount semantics + del c + + def test_dot_out(self): + # if HAVE_CBLAS, will use WRITEBACKIFCOPY + a = np.arange(9, dtype=float).reshape(3, 3) + b = np.dot(a, a, out=a) + assert_equal(b, np.array([[15, 18, 21], [42, 54, 66], [69, 90, 111]])) + + def test_view_assign(self): + from numpy._core._multiarray_tests import ( + npy_create_writebackifcopy, + npy_resolve, + ) + + arr = np.arange(9).reshape(3, 3).T + arr_wb = npy_create_writebackifcopy(arr) + assert_(arr_wb.flags.writebackifcopy) + assert_(arr_wb.base is arr) + arr_wb[...] = -100 + npy_resolve(arr_wb) + # arr changes after resolve, even though we assigned to arr_wb + assert_equal(arr, -100) + # after resolve, the two arrays no longer reference each other + assert_(arr_wb.ctypes.data != 0) + assert_equal(arr_wb.base, None) + # assigning to arr_wb does not get transferred to arr + arr_wb[...] = 100 + assert_equal(arr, -100) + + @pytest.mark.leaks_references( + reason="increments self in dealloc; ignore since deprecated path.") + def test_dealloc_warning(self): + arr = np.arange(9).reshape(3, 3) + v = arr.T + with pytest.warns(RuntimeWarning): + _multiarray_tests.npy_abuse_writebackifcopy(v) + + def test_view_discard_refcount(self): + from numpy._core._multiarray_tests import ( + npy_create_writebackifcopy, + npy_discard, + ) + + arr = np.arange(9).reshape(3, 3).T + orig = arr.copy() + if HAS_REFCOUNT: + arr_cnt = sys.getrefcount(arr) + arr_wb = npy_create_writebackifcopy(arr) + assert_(arr_wb.flags.writebackifcopy) + assert_(arr_wb.base is arr) + arr_wb[...] = -100 + npy_discard(arr_wb) + # arr remains unchanged after discard + assert_equal(arr, orig) + # after discard, the two arrays no longer reference each other + assert_(arr_wb.ctypes.data != 0) + assert_equal(arr_wb.base, None) + if HAS_REFCOUNT: + assert_equal(arr_cnt, sys.getrefcount(arr)) + # assigning to arr_wb does not get transferred to arr + arr_wb[...] = 100 + assert_equal(arr, orig) + + +class TestArange: + def test_infinite(self): + assert_raises_regex( + ValueError, "size exceeded", + np.arange, 0, np.inf + ) + + def test_nan_step(self): + assert_raises_regex( + ValueError, "cannot compute length", + np.arange, 0, 1, np.nan + ) + + def test_zero_step(self): + assert_raises(ZeroDivisionError, np.arange, 0, 10, 0) + assert_raises(ZeroDivisionError, np.arange, 0.0, 10.0, 0.0) + + # empty range + assert_raises(ZeroDivisionError, np.arange, 0, 0, 0) + assert_raises(ZeroDivisionError, np.arange, 0.0, 0.0, 0.0) + + def test_require_range(self): + assert_raises(TypeError, np.arange) + assert_raises(TypeError, np.arange, step=3) + assert_raises(TypeError, np.arange, dtype='int64') + assert_raises(TypeError, np.arange, start=4) + + def test_start_stop_kwarg(self): + keyword_stop = np.arange(stop=3) + keyword_zerotostop = np.arange(0, stop=3) + keyword_start_stop = np.arange(start=3, stop=9) + + assert len(keyword_stop) == 3 + assert len(keyword_zerotostop) == 3 + assert len(keyword_start_stop) == 6 + assert_array_equal(keyword_stop, keyword_zerotostop) + + def test_arange_booleans(self): + # Arange makes some sense for booleans and works up to length 2. + # But it is weird since `arange(2, 4, dtype=bool)` works. + # Arguably, much or all of this could be deprecated/removed. + res = np.arange(False, dtype=bool) + assert_array_equal(res, np.array([], dtype="bool")) + + res = np.arange(True, dtype="bool") + assert_array_equal(res, [False]) + + res = np.arange(2, dtype="bool") + assert_array_equal(res, [False, True]) + + # This case is especially weird, but drops out without special case: + res = np.arange(6, 8, dtype="bool") + assert_array_equal(res, [True, True]) + + with pytest.raises(TypeError): + np.arange(3, dtype="bool") + + @pytest.mark.parametrize("dtype", ["S3", "U", "5i"]) + def test_rejects_bad_dtypes(self, dtype): + dtype = np.dtype(dtype) + DType_name = re.escape(str(type(dtype))) + with pytest.raises(TypeError, + match=rf"arange\(\) not supported for inputs .* {DType_name}"): + np.arange(2, dtype=dtype) + + def test_rejects_strings(self): + # Explicitly test error for strings which may call "b" - "a": + DType_name = re.escape(str(type(np.array("a").dtype))) + with pytest.raises(TypeError, + match=rf"arange\(\) not supported for inputs .* {DType_name}"): + np.arange("a", "b") + + def test_byteswapped(self): + res_be = np.arange(1, 1000, dtype=">i4") + res_le = np.arange(1, 1000, dtype="i4" + assert res_le.dtype == " arr2 + + +@pytest.mark.parametrize("op", [ + operator.eq, operator.ne, operator.le, operator.lt, operator.ge, + operator.gt]) +def test_comparisons_forwards_error(op): + class NotArray: + def __array__(self, dtype=None, copy=None): + raise TypeError("run you fools") + + with pytest.raises(TypeError, match="run you fools"): + op(np.arange(2), NotArray()) + + with pytest.raises(TypeError, match="run you fools"): + op(NotArray(), np.arange(2)) + + +def test_richcompare_scalar_boolean_singleton_return(): + # These are currently guaranteed to be the boolean numpy singletons + assert (np.array(0) == "a") is np.bool_(False) + assert (np.array(0) != "a") is np.bool_(True) + assert (np.int16(0) == "a") is np.bool_(False) + assert (np.int16(0) != "a") is np.bool_(True) + + +@pytest.mark.parametrize("op", [ + operator.eq, operator.ne, operator.le, operator.lt, operator.ge, + operator.gt]) +def test_ragged_comparison_fails(op): + # This needs to convert the internal array to True/False, which fails: + a = np.array([1, np.array([1, 2, 3])], dtype=object) + b = np.array([1, np.array([1, 2, 3])], dtype=object) + + with pytest.raises(ValueError, match="The truth value.*ambiguous"): + op(a, b) + + +@pytest.mark.parametrize( + ["fun", "npfun"], + [ + (_multiarray_tests.npy_cabs, np.absolute), + (_multiarray_tests.npy_carg, np.angle) + ] +) +@pytest.mark.parametrize("x", [1, np.inf, -np.inf, np.nan]) +@pytest.mark.parametrize("y", [1, np.inf, -np.inf, np.nan]) +@pytest.mark.parametrize("test_dtype", np.complexfloating.__subclasses__()) +def test_npymath_complex(fun, npfun, x, y, test_dtype): + # Smoketest npymath functions + z = test_dtype(complex(x, y)) + with np.errstate(invalid='ignore'): + # Fallback implementations may emit a warning for +-inf (see gh-24876): + # RuntimeWarning: invalid value encountered in absolute + got = fun(z) + expected = npfun(z) + assert_allclose(got, expected) + + +def test_npymath_real(): + # Smoketest npymath functions + from numpy._core._multiarray_tests import ( + npy_cosh, + npy_log10, + npy_sinh, + npy_tan, + npy_tanh, + ) + + funcs = {npy_log10: np.log10, + npy_cosh: np.cosh, + npy_sinh: np.sinh, + npy_tan: np.tan, + npy_tanh: np.tanh} + vals = (1, np.inf, -np.inf, np.nan) + types = (np.float32, np.float64, np.longdouble) + + with np.errstate(all='ignore'): + for fun, npfun in funcs.items(): + for x, t in itertools.product(vals, types): + z = t(x) + got = fun(z) + expected = npfun(z) + assert_allclose(got, expected) + +def test_uintalignment_and_alignment(): + # alignment code needs to satisfy these requirements: + # 1. numpy structs match C struct layout + # 2. ufuncs/casting is safe wrt to aligned access + # 3. copy code is safe wrt to "uint alidned" access + # + # Complex types are the main problem, whose alignment may not be the same + # as their "uint alignment". + # + # This test might only fail on certain platforms, where uint64 alignment is + # not equal to complex64 alignment. The second 2 tests will only fail + # for DEBUG=1. + + d1 = np.dtype('u1,c8', align=True) + d2 = np.dtype('u4,c8', align=True) + d3 = np.dtype({'names': ['a', 'b'], 'formats': ['u1', d1]}, align=True) + + assert_equal(np.zeros(1, dtype=d1)['f1'].flags['ALIGNED'], True) + assert_equal(np.zeros(1, dtype=d2)['f1'].flags['ALIGNED'], True) + assert_equal(np.zeros(1, dtype='u1,c8')['f1'].flags['ALIGNED'], False) + + # check that C struct matches numpy struct size + s = _multiarray_tests.get_struct_alignments() + for d, (alignment, size) in zip([d1, d2, d3], s): + assert_equal(d.alignment, alignment) + assert_equal(d.itemsize, size) + + # check that ufuncs don't complain in debug mode + # (this is probably OK if the aligned flag is true above) + src = np.zeros((2, 2), dtype=d1)['f1'] # 4-byte aligned, often + np.exp(src) # assert fails? + + # check that copy code doesn't complain in debug mode + dst = np.zeros((2, 2), dtype='c8') + dst[:, 1] = src[:, 1] # assert in lowlevel_strided_loops fails? + +class TestAlignment: + # adapted from scipy._lib.tests.test__util.test__aligned_zeros + # Checks that unusual memory alignments don't trip up numpy. + + def check(self, shape, dtype, order, align): + err_msg = repr((shape, dtype, order, align)) + x = _aligned_zeros(shape, dtype, order, align=align) + if align is None: + align = np.dtype(dtype).alignment + assert_equal(x.__array_interface__['data'][0] % align, 0) + if hasattr(shape, '__len__'): + assert_equal(x.shape, shape, err_msg) + else: + assert_equal(x.shape, (shape,), err_msg) + assert_equal(x.dtype, dtype) + if order == "C": + assert_(x.flags.c_contiguous, err_msg) + elif order == "F": + if x.size > 0: + assert_(x.flags.f_contiguous, err_msg) + elif order is None: + assert_(x.flags.c_contiguous, err_msg) + else: + raise ValueError + + def test_various_alignments(self): + for align in [1, 2, 3, 4, 8, 12, 16, 32, 64, None]: + for n in [0, 1, 3, 11]: + for order in ["C", "F", None]: + for dtype in list(np.typecodes["All"]) + ['i4,i4,i4']: + if dtype == 'O': + # object dtype can't be misaligned + continue + for shape in [n, (1, 2, 3, n)]: + self.check(shape, np.dtype(dtype), order, align) + + def test_strided_loop_alignments(self): + # particularly test that complex64 and float128 use right alignment + # code-paths, since these are particularly problematic. It is useful to + # turn on USE_DEBUG for this test, so lowlevel-loop asserts are run. + for align in [1, 2, 4, 8, 12, 16, None]: + xf64 = _aligned_zeros(3, np.float64) + + xc64 = _aligned_zeros(3, np.complex64, align=align) + xf128 = _aligned_zeros(3, np.longdouble, align=align) + + # test casting, both to and from misaligned + with warnings.catch_warnings(): + warnings.filterwarnings('ignore', "Casting complex values", ComplexWarning) + xc64.astype('f8') + xf64.astype(np.complex64) + test = xc64 + xf64 + + xf128.astype('f8') + xf64.astype(np.longdouble) + test = xf128 + xf64 + + test = xf128 + xc64 + + # test copy, both to and from misaligned + # contig copy + xf64[:] = xf64.copy() + xc64[:] = xc64.copy() + xf128[:] = xf128.copy() + # strided copy + xf64[::2] = xf64[::2].copy() + xc64[::2] = xc64[::2].copy() + xf128[::2] = xf128[::2].copy() + +def test_getfield(): + a = np.arange(32, dtype='uint16') + if sys.byteorder == 'little': + i = 0 + j = 1 + else: + i = 1 + j = 0 + b = a.getfield('int8', i) + assert_equal(b, a) + b = a.getfield('int8', j) + assert_equal(b, 0) + pytest.raises(ValueError, a.getfield, 'uint8', -1) + pytest.raises(ValueError, a.getfield, 'uint8', 16) + pytest.raises(ValueError, a.getfield, 'uint64', 0) + +class TestViewDtype: + """ + Verify that making a view of a non-contiguous array works as expected. + """ + def test_smaller_dtype_multiple(self): + # x is non-contiguous + x = np.arange(10, dtype=' 10: + arrs.pop(0) + elif len(arrs) <= 10: + arrs.extend([np.array([1, 2, 3]) for _ in range(1000)]) + + def replace_list_items(b): + b.wait() + rng = np.random.RandomState() + rng.seed(0x4d3d3d3) + while done < 4: + data = rng.randint(0, 1000, size=4) + arrs[data[0]] = data[1:] + + for mutation_func in (replace_list_items, contract_and_expand_list): + b = threading.Barrier(5) + try: + with concurrent.futures.ThreadPoolExecutor(max_workers=5) as tpe: + tasks = [tpe.submit(read_arrs, b) for _ in range(4)] + tasks.append(tpe.submit(mutation_func, b)) + for t in tasks: + t.result() + except RuntimeError as e: + if outcome == "success": + raise + assert "Inconsistent object during array creation?" in str(e) + msg = "replace_list_items should not raise errors" + assert mutation_func is contract_and_expand_list, msg + finally: + if len(tasks) < 5: + b.abort() + +@pytest.mark.skipif(sys.version_info < (3, 12), reason="Python >= 3.12 required") +def test_array__buffer__thread_safety(): + import inspect + arr = np.arange(1000) + flags = [inspect.BufferFlags.STRIDED, inspect.BufferFlags.READ] + + def func(b): + b.wait() + for i in range(100): + arr.__buffer__(flags[i % 2]) + + run_threaded(func, max_workers=8, pass_barrier=True) + +@pytest.mark.skipif(sys.version_info < (3, 12), reason="Python >= 3.12 required") +def test_void_dtype__buffer__thread_safety(): + import inspect + dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) + x = np.array(('ndarray_scalar', (1.2, 3.0)), dtype=dt)[()] + assert isinstance(x, np.void) + flags = [inspect.BufferFlags.STRIDES, inspect.BufferFlags.READ] + + def func(b): + b.wait() + for i in range(100): + x.__buffer__(flags[i % 2]) + + run_threaded(func, max_workers=8, pass_barrier=True) diff --git a/local_packages/numpy/_core/tests/test_nditer.py b/local_packages/numpy/_core/tests/test_nditer.py new file mode 100644 index 0000000..d2fc69a --- /dev/null +++ b/local_packages/numpy/_core/tests/test_nditer.py @@ -0,0 +1,3533 @@ +import inspect +import subprocess +import sys +import textwrap +import warnings + +import pytest + +import numpy as np +import numpy._core._multiarray_tests as _multiarray_tests +import numpy._core.umath as ncu +from numpy import all, arange, array, nditer +from numpy.testing import ( + HAS_REFCOUNT, + IS_64BIT, + IS_PYPY, + IS_WASM, + assert_, + assert_array_equal, + assert_equal, + assert_raises, +) +from numpy.testing._private.utils import requires_memory + + +def iter_multi_index(i): + ret = [] + while not i.finished: + ret.append(i.multi_index) + i.iternext() + return ret + +def iter_indices(i): + ret = [] + while not i.finished: + ret.append(i.index) + i.iternext() + return ret + +def iter_iterindices(i): + ret = [] + while not i.finished: + ret.append(i.iterindex) + i.iternext() + return ret + +@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") +def test_iter_refcount(): + # Make sure the iterator doesn't leak + + # Basic + a = arange(6) + dt = np.dtype('f4').newbyteorder() + rc_a = sys.getrefcount(a) + rc_dt = sys.getrefcount(dt) + with nditer(a, [], + [['readwrite', 'updateifcopy']], + casting='unsafe', + op_dtypes=[dt]) as it: + assert_(not it.iterationneedsapi) + assert_(sys.getrefcount(a) > rc_a) + assert_(sys.getrefcount(dt) > rc_dt) + # del 'it' + it = None + assert_equal(sys.getrefcount(a), rc_a) + assert_equal(sys.getrefcount(dt), rc_dt) + + # With a copy + a = arange(6, dtype='f4') + dt = np.dtype('f4') + rc_a = sys.getrefcount(a) + rc_dt = sys.getrefcount(dt) + it = nditer(a, [], + [['readwrite']], + op_dtypes=[dt]) + rc2_a = sys.getrefcount(a) + rc2_dt = sys.getrefcount(dt) + it2 = it.copy() + assert_(sys.getrefcount(a) > rc2_a) + if sys.version_info < (3, 13): + # np.dtype('f4') is immortal after Python 3.13 + assert_(sys.getrefcount(dt) > rc2_dt) + it = None + assert_equal(sys.getrefcount(a), rc2_a) + assert_equal(sys.getrefcount(dt), rc2_dt) + it2 = None + assert_equal(sys.getrefcount(a), rc_a) + assert_equal(sys.getrefcount(dt), rc_dt) + +def test_iter_best_order(): + # The iterator should always find the iteration order + # with increasing memory addresses + + # Test the ordering for 1-D to 5-D shapes + for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]: + a = arange(np.prod(shape)) + # Test each combination of positive and negative strides + for dirs in range(2**len(shape)): + dirs_index = [slice(None)] * len(shape) + for bit in range(len(shape)): + if ((2**bit) & dirs): + dirs_index[bit] = slice(None, None, -1) + dirs_index = tuple(dirs_index) + + aview = a.reshape(shape)[dirs_index] + # C-order + i = nditer(aview, [], [['readonly']]) + assert_equal(list(i), a) + # Fortran-order + i = nditer(aview.T, [], [['readonly']]) + assert_equal(list(i), a) + # Other order + if len(shape) > 2: + i = nditer(aview.swapaxes(0, 1), [], [['readonly']]) + assert_equal(list(i), a) + +def test_iter_c_order(): + # Test forcing C order + + # Test the ordering for 1-D to 5-D shapes + for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]: + a = arange(np.prod(shape)) + # Test each combination of positive and negative strides + for dirs in range(2**len(shape)): + dirs_index = [slice(None)] * len(shape) + for bit in range(len(shape)): + if ((2**bit) & dirs): + dirs_index[bit] = slice(None, None, -1) + dirs_index = tuple(dirs_index) + + aview = a.reshape(shape)[dirs_index] + # C-order + i = nditer(aview, order='C') + assert_equal(list(i), aview.ravel(order='C')) + # Fortran-order + i = nditer(aview.T, order='C') + assert_equal(list(i), aview.T.ravel(order='C')) + # Other order + if len(shape) > 2: + i = nditer(aview.swapaxes(0, 1), order='C') + assert_equal(list(i), + aview.swapaxes(0, 1).ravel(order='C')) + +def test_iter_f_order(): + # Test forcing F order + + # Test the ordering for 1-D to 5-D shapes + for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]: + a = arange(np.prod(shape)) + # Test each combination of positive and negative strides + for dirs in range(2**len(shape)): + dirs_index = [slice(None)] * len(shape) + for bit in range(len(shape)): + if ((2**bit) & dirs): + dirs_index[bit] = slice(None, None, -1) + dirs_index = tuple(dirs_index) + + aview = a.reshape(shape)[dirs_index] + # C-order + i = nditer(aview, order='F') + assert_equal(list(i), aview.ravel(order='F')) + # Fortran-order + i = nditer(aview.T, order='F') + assert_equal(list(i), aview.T.ravel(order='F')) + # Other order + if len(shape) > 2: + i = nditer(aview.swapaxes(0, 1), order='F') + assert_equal(list(i), + aview.swapaxes(0, 1).ravel(order='F')) + +def test_iter_c_or_f_order(): + # Test forcing any contiguous (C or F) order + + # Test the ordering for 1-D to 5-D shapes + for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]: + a = arange(np.prod(shape)) + # Test each combination of positive and negative strides + for dirs in range(2**len(shape)): + dirs_index = [slice(None)] * len(shape) + for bit in range(len(shape)): + if ((2**bit) & dirs): + dirs_index[bit] = slice(None, None, -1) + dirs_index = tuple(dirs_index) + + aview = a.reshape(shape)[dirs_index] + # C-order + i = nditer(aview, order='A') + assert_equal(list(i), aview.ravel(order='A')) + # Fortran-order + i = nditer(aview.T, order='A') + assert_equal(list(i), aview.T.ravel(order='A')) + # Other order + if len(shape) > 2: + i = nditer(aview.swapaxes(0, 1), order='A') + assert_equal(list(i), + aview.swapaxes(0, 1).ravel(order='A')) + +def test_nditer_multi_index_set(): + # Test the multi_index set + a = np.arange(6).reshape(2, 3) + it = np.nditer(a, flags=['multi_index']) + + # Removes the iteration on two first elements of a[0] + it.multi_index = (0, 2,) + + assert_equal(list(it), [2, 3, 4, 5]) + +@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") +def test_nditer_multi_index_set_refcount(): + # Test if the reference count on index variable is decreased + + index = 0 + i = np.nditer(np.array([111, 222, 333, 444]), flags=['multi_index']) + + start_count = sys.getrefcount(index) + i.multi_index = (index,) + end_count = sys.getrefcount(index) + + assert_equal(start_count, end_count) + +def test_iter_best_order_multi_index_1d(): + # The multi-indices should be correct with any reordering + + a = arange(4) + # 1D order + i = nditer(a, ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), [(0,), (1,), (2,), (3,)]) + # 1D reversed order + i = nditer(a[::-1], ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), [(3,), (2,), (1,), (0,)]) + +def test_iter_best_order_multi_index_2d(): + # The multi-indices should be correct with any reordering + + a = arange(6) + # 2D C-order + i = nditer(a.reshape(2, 3), ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), [(0, 0), (0, 1), (0, 2), (1, 0), (1, 1), (1, 2)]) + # 2D Fortran-order + i = nditer(a.reshape(2, 3).copy(order='F'), ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), [(0, 0), (1, 0), (0, 1), (1, 1), (0, 2), (1, 2)]) + # 2D reversed C-order + i = nditer(a.reshape(2, 3)[::-1], ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), [(1, 0), (1, 1), (1, 2), (0, 0), (0, 1), (0, 2)]) + i = nditer(a.reshape(2, 3)[:, ::-1], ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), [(0, 2), (0, 1), (0, 0), (1, 2), (1, 1), (1, 0)]) + i = nditer(a.reshape(2, 3)[::-1, ::-1], ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), [(1, 2), (1, 1), (1, 0), (0, 2), (0, 1), (0, 0)]) + # 2D reversed Fortran-order + i = nditer(a.reshape(2, 3).copy(order='F')[::-1], ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), [(1, 0), (0, 0), (1, 1), (0, 1), (1, 2), (0, 2)]) + i = nditer(a.reshape(2, 3).copy(order='F')[:, ::-1], + ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), [(0, 2), (1, 2), (0, 1), (1, 1), (0, 0), (1, 0)]) + i = nditer(a.reshape(2, 3).copy(order='F')[::-1, ::-1], + ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), [(1, 2), (0, 2), (1, 1), (0, 1), (1, 0), (0, 0)]) + +def test_iter_best_order_multi_index_3d(): + # The multi-indices should be correct with any reordering + + a = arange(12) + # 3D C-order + i = nditer(a.reshape(2, 3, 2), ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), + [(0, 0, 0), (0, 0, 1), (0, 1, 0), (0, 1, 1), (0, 2, 0), (0, 2, 1), + (1, 0, 0), (1, 0, 1), (1, 1, 0), (1, 1, 1), (1, 2, 0), (1, 2, 1)]) + # 3D Fortran-order + i = nditer(a.reshape(2, 3, 2).copy(order='F'), ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), + [(0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 2, 0), (1, 2, 0), + (0, 0, 1), (1, 0, 1), (0, 1, 1), (1, 1, 1), (0, 2, 1), (1, 2, 1)]) + # 3D reversed C-order + i = nditer(a.reshape(2, 3, 2)[::-1], ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), + [(1, 0, 0), (1, 0, 1), (1, 1, 0), (1, 1, 1), (1, 2, 0), (1, 2, 1), + (0, 0, 0), (0, 0, 1), (0, 1, 0), (0, 1, 1), (0, 2, 0), (0, 2, 1)]) + i = nditer(a.reshape(2, 3, 2)[:, ::-1], ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), + [(0, 2, 0), (0, 2, 1), (0, 1, 0), (0, 1, 1), (0, 0, 0), (0, 0, 1), + (1, 2, 0), (1, 2, 1), (1, 1, 0), (1, 1, 1), (1, 0, 0), (1, 0, 1)]) + i = nditer(a.reshape(2, 3, 2)[:, :, ::-1], ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), + [(0, 0, 1), (0, 0, 0), (0, 1, 1), (0, 1, 0), (0, 2, 1), (0, 2, 0), + (1, 0, 1), (1, 0, 0), (1, 1, 1), (1, 1, 0), (1, 2, 1), (1, 2, 0)]) + # 3D reversed Fortran-order + i = nditer(a.reshape(2, 3, 2).copy(order='F')[::-1], + ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), + [(1, 0, 0), (0, 0, 0), (1, 1, 0), (0, 1, 0), (1, 2, 0), (0, 2, 0), + (1, 0, 1), (0, 0, 1), (1, 1, 1), (0, 1, 1), (1, 2, 1), (0, 2, 1)]) + i = nditer(a.reshape(2, 3, 2).copy(order='F')[:, ::-1], + ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), + [(0, 2, 0), (1, 2, 0), (0, 1, 0), (1, 1, 0), (0, 0, 0), (1, 0, 0), + (0, 2, 1), (1, 2, 1), (0, 1, 1), (1, 1, 1), (0, 0, 1), (1, 0, 1)]) + i = nditer(a.reshape(2, 3, 2).copy(order='F')[:, :, ::-1], + ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), + [(0, 0, 1), (1, 0, 1), (0, 1, 1), (1, 1, 1), (0, 2, 1), (1, 2, 1), + (0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 2, 0), (1, 2, 0)]) + +def test_iter_best_order_c_index_1d(): + # The C index should be correct with any reordering + + a = arange(4) + # 1D order + i = nditer(a, ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), [0, 1, 2, 3]) + # 1D reversed order + i = nditer(a[::-1], ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), [3, 2, 1, 0]) + +def test_iter_best_order_c_index_2d(): + # The C index should be correct with any reordering + + a = arange(6) + # 2D C-order + i = nditer(a.reshape(2, 3), ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), [0, 1, 2, 3, 4, 5]) + # 2D Fortran-order + i = nditer(a.reshape(2, 3).copy(order='F'), + ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), [0, 3, 1, 4, 2, 5]) + # 2D reversed C-order + i = nditer(a.reshape(2, 3)[::-1], ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), [3, 4, 5, 0, 1, 2]) + i = nditer(a.reshape(2, 3)[:, ::-1], ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), [2, 1, 0, 5, 4, 3]) + i = nditer(a.reshape(2, 3)[::-1, ::-1], ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), [5, 4, 3, 2, 1, 0]) + # 2D reversed Fortran-order + i = nditer(a.reshape(2, 3).copy(order='F')[::-1], + ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), [3, 0, 4, 1, 5, 2]) + i = nditer(a.reshape(2, 3).copy(order='F')[:, ::-1], + ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), [2, 5, 1, 4, 0, 3]) + i = nditer(a.reshape(2, 3).copy(order='F')[::-1, ::-1], + ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), [5, 2, 4, 1, 3, 0]) + +def test_iter_best_order_c_index_3d(): + # The C index should be correct with any reordering + + a = arange(12) + # 3D C-order + i = nditer(a.reshape(2, 3, 2), ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) + # 3D Fortran-order + i = nditer(a.reshape(2, 3, 2).copy(order='F'), + ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), + [0, 6, 2, 8, 4, 10, 1, 7, 3, 9, 5, 11]) + # 3D reversed C-order + i = nditer(a.reshape(2, 3, 2)[::-1], ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), + [6, 7, 8, 9, 10, 11, 0, 1, 2, 3, 4, 5]) + i = nditer(a.reshape(2, 3, 2)[:, ::-1], ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), + [4, 5, 2, 3, 0, 1, 10, 11, 8, 9, 6, 7]) + i = nditer(a.reshape(2, 3, 2)[:, :, ::-1], ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), + [1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10]) + # 3D reversed Fortran-order + i = nditer(a.reshape(2, 3, 2).copy(order='F')[::-1], + ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), + [6, 0, 8, 2, 10, 4, 7, 1, 9, 3, 11, 5]) + i = nditer(a.reshape(2, 3, 2).copy(order='F')[:, ::-1], + ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), + [4, 10, 2, 8, 0, 6, 5, 11, 3, 9, 1, 7]) + i = nditer(a.reshape(2, 3, 2).copy(order='F')[:, :, ::-1], + ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), + [1, 7, 3, 9, 5, 11, 0, 6, 2, 8, 4, 10]) + +def test_iter_best_order_f_index_1d(): + # The Fortran index should be correct with any reordering + + a = arange(4) + # 1D order + i = nditer(a, ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), [0, 1, 2, 3]) + # 1D reversed order + i = nditer(a[::-1], ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), [3, 2, 1, 0]) + +def test_iter_best_order_f_index_2d(): + # The Fortran index should be correct with any reordering + + a = arange(6) + # 2D C-order + i = nditer(a.reshape(2, 3), ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), [0, 2, 4, 1, 3, 5]) + # 2D Fortran-order + i = nditer(a.reshape(2, 3).copy(order='F'), + ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), [0, 1, 2, 3, 4, 5]) + # 2D reversed C-order + i = nditer(a.reshape(2, 3)[::-1], ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), [1, 3, 5, 0, 2, 4]) + i = nditer(a.reshape(2, 3)[:, ::-1], ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), [4, 2, 0, 5, 3, 1]) + i = nditer(a.reshape(2, 3)[::-1, ::-1], ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), [5, 3, 1, 4, 2, 0]) + # 2D reversed Fortran-order + i = nditer(a.reshape(2, 3).copy(order='F')[::-1], + ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), [1, 0, 3, 2, 5, 4]) + i = nditer(a.reshape(2, 3).copy(order='F')[:, ::-1], + ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), [4, 5, 2, 3, 0, 1]) + i = nditer(a.reshape(2, 3).copy(order='F')[::-1, ::-1], + ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), [5, 4, 3, 2, 1, 0]) + +def test_iter_best_order_f_index_3d(): + # The Fortran index should be correct with any reordering + + a = arange(12) + # 3D C-order + i = nditer(a.reshape(2, 3, 2), ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), + [0, 6, 2, 8, 4, 10, 1, 7, 3, 9, 5, 11]) + # 3D Fortran-order + i = nditer(a.reshape(2, 3, 2).copy(order='F'), + ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) + # 3D reversed C-order + i = nditer(a.reshape(2, 3, 2)[::-1], ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), + [1, 7, 3, 9, 5, 11, 0, 6, 2, 8, 4, 10]) + i = nditer(a.reshape(2, 3, 2)[:, ::-1], ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), + [4, 10, 2, 8, 0, 6, 5, 11, 3, 9, 1, 7]) + i = nditer(a.reshape(2, 3, 2)[:, :, ::-1], ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), + [6, 0, 8, 2, 10, 4, 7, 1, 9, 3, 11, 5]) + # 3D reversed Fortran-order + i = nditer(a.reshape(2, 3, 2).copy(order='F')[::-1], + ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), + [1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10]) + i = nditer(a.reshape(2, 3, 2).copy(order='F')[:, ::-1], + ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), + [4, 5, 2, 3, 0, 1, 10, 11, 8, 9, 6, 7]) + i = nditer(a.reshape(2, 3, 2).copy(order='F')[:, :, ::-1], + ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), + [6, 7, 8, 9, 10, 11, 0, 1, 2, 3, 4, 5]) + +def test_iter_no_inner_full_coalesce(): + # Check no_inner iterators which coalesce into a single inner loop + + for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]: + size = np.prod(shape) + a = arange(size) + # Test each combination of forward and backwards indexing + for dirs in range(2**len(shape)): + dirs_index = [slice(None)] * len(shape) + for bit in range(len(shape)): + if ((2**bit) & dirs): + dirs_index[bit] = slice(None, None, -1) + dirs_index = tuple(dirs_index) + + aview = a.reshape(shape)[dirs_index] + # C-order + i = nditer(aview, ['external_loop'], [['readonly']]) + assert_equal(i.ndim, 1) + assert_equal(i[0].shape, (size,)) + # Fortran-order + i = nditer(aview.T, ['external_loop'], [['readonly']]) + assert_equal(i.ndim, 1) + assert_equal(i[0].shape, (size,)) + # Other order + if len(shape) > 2: + i = nditer(aview.swapaxes(0, 1), + ['external_loop'], [['readonly']]) + assert_equal(i.ndim, 1) + assert_equal(i[0].shape, (size,)) + +def test_iter_no_inner_dim_coalescing(): + # Check no_inner iterators whose dimensions may not coalesce completely + + # Skipping the last element in a dimension prevents coalescing + # with the next-bigger dimension + a = arange(24).reshape(2, 3, 4)[:, :, :-1] + i = nditer(a, ['external_loop'], [['readonly']]) + assert_equal(i.ndim, 2) + assert_equal(i[0].shape, (3,)) + a = arange(24).reshape(2, 3, 4)[:, :-1, :] + i = nditer(a, ['external_loop'], [['readonly']]) + assert_equal(i.ndim, 2) + assert_equal(i[0].shape, (8,)) + a = arange(24).reshape(2, 3, 4)[:-1, :, :] + i = nditer(a, ['external_loop'], [['readonly']]) + assert_equal(i.ndim, 1) + assert_equal(i[0].shape, (12,)) + + # Even with lots of 1-sized dimensions, should still coalesce + a = arange(24).reshape(1, 1, 2, 1, 1, 3, 1, 1, 4, 1, 1) + i = nditer(a, ['external_loop'], [['readonly']]) + assert_equal(i.ndim, 1) + assert_equal(i[0].shape, (24,)) + +def test_iter_dim_coalescing(): + # Check that the correct number of dimensions are coalesced + + # Tracking a multi-index disables coalescing + a = arange(24).reshape(2, 3, 4) + i = nditer(a, ['multi_index'], [['readonly']]) + assert_equal(i.ndim, 3) + + # A tracked index can allow coalescing if it's compatible with the array + a3d = arange(24).reshape(2, 3, 4) + i = nditer(a3d, ['c_index'], [['readonly']]) + assert_equal(i.ndim, 1) + i = nditer(a3d.swapaxes(0, 1), ['c_index'], [['readonly']]) + assert_equal(i.ndim, 3) + i = nditer(a3d.T, ['c_index'], [['readonly']]) + assert_equal(i.ndim, 3) + i = nditer(a3d.T, ['f_index'], [['readonly']]) + assert_equal(i.ndim, 1) + i = nditer(a3d.T.swapaxes(0, 1), ['f_index'], [['readonly']]) + assert_equal(i.ndim, 3) + + # When C or F order is forced, coalescing may still occur + a3d = arange(24).reshape(2, 3, 4) + i = nditer(a3d, order='C') + assert_equal(i.ndim, 1) + i = nditer(a3d.T, order='C') + assert_equal(i.ndim, 3) + i = nditer(a3d, order='F') + assert_equal(i.ndim, 3) + i = nditer(a3d.T, order='F') + assert_equal(i.ndim, 1) + i = nditer(a3d, order='A') + assert_equal(i.ndim, 1) + i = nditer(a3d.T, order='A') + assert_equal(i.ndim, 1) + +def test_iter_broadcasting(): + # Standard NumPy broadcasting rules + + # 1D with scalar + i = nditer([arange(6), np.int32(2)], ['multi_index'], [['readonly']] * 2) + assert_equal(i.itersize, 6) + assert_equal(i.shape, (6,)) + + # 2D with scalar + i = nditer([arange(6).reshape(2, 3), np.int32(2)], + ['multi_index'], [['readonly']] * 2) + assert_equal(i.itersize, 6) + assert_equal(i.shape, (2, 3)) + # 2D with 1D + i = nditer([arange(6).reshape(2, 3), arange(3)], + ['multi_index'], [['readonly']] * 2) + assert_equal(i.itersize, 6) + assert_equal(i.shape, (2, 3)) + i = nditer([arange(2).reshape(2, 1), arange(3)], + ['multi_index'], [['readonly']] * 2) + assert_equal(i.itersize, 6) + assert_equal(i.shape, (2, 3)) + # 2D with 2D + i = nditer([arange(2).reshape(2, 1), arange(3).reshape(1, 3)], + ['multi_index'], [['readonly']] * 2) + assert_equal(i.itersize, 6) + assert_equal(i.shape, (2, 3)) + + # 3D with scalar + i = nditer([np.int32(2), arange(24).reshape(4, 2, 3)], + ['multi_index'], [['readonly']] * 2) + assert_equal(i.itersize, 24) + assert_equal(i.shape, (4, 2, 3)) + # 3D with 1D + i = nditer([arange(3), arange(24).reshape(4, 2, 3)], + ['multi_index'], [['readonly']] * 2) + assert_equal(i.itersize, 24) + assert_equal(i.shape, (4, 2, 3)) + i = nditer([arange(3), arange(8).reshape(4, 2, 1)], + ['multi_index'], [['readonly']] * 2) + assert_equal(i.itersize, 24) + assert_equal(i.shape, (4, 2, 3)) + # 3D with 2D + i = nditer([arange(6).reshape(2, 3), arange(24).reshape(4, 2, 3)], + ['multi_index'], [['readonly']] * 2) + assert_equal(i.itersize, 24) + assert_equal(i.shape, (4, 2, 3)) + i = nditer([arange(2).reshape(2, 1), arange(24).reshape(4, 2, 3)], + ['multi_index'], [['readonly']] * 2) + assert_equal(i.itersize, 24) + assert_equal(i.shape, (4, 2, 3)) + i = nditer([arange(3).reshape(1, 3), arange(8).reshape(4, 2, 1)], + ['multi_index'], [['readonly']] * 2) + assert_equal(i.itersize, 24) + assert_equal(i.shape, (4, 2, 3)) + # 3D with 3D + i = nditer([arange(2).reshape(1, 2, 1), arange(3).reshape(1, 1, 3), + arange(4).reshape(4, 1, 1)], + ['multi_index'], [['readonly']] * 3) + assert_equal(i.itersize, 24) + assert_equal(i.shape, (4, 2, 3)) + i = nditer([arange(6).reshape(1, 2, 3), arange(4).reshape(4, 1, 1)], + ['multi_index'], [['readonly']] * 2) + assert_equal(i.itersize, 24) + assert_equal(i.shape, (4, 2, 3)) + i = nditer([arange(24).reshape(4, 2, 3), arange(12).reshape(4, 1, 3)], + ['multi_index'], [['readonly']] * 2) + assert_equal(i.itersize, 24) + assert_equal(i.shape, (4, 2, 3)) + +def test_iter_itershape(): + # Check that allocated outputs work with a specified shape + a = np.arange(6, dtype='i2').reshape(2, 3) + i = nditer([a, None], [], [['readonly'], ['writeonly', 'allocate']], + op_axes=[[0, 1, None], None], + itershape=(-1, -1, 4)) + assert_equal(i.operands[1].shape, (2, 3, 4)) + assert_equal(i.operands[1].strides, (24, 8, 2)) + + i = nditer([a.T, None], [], [['readonly'], ['writeonly', 'allocate']], + op_axes=[[0, 1, None], None], + itershape=(-1, -1, 4)) + assert_equal(i.operands[1].shape, (3, 2, 4)) + assert_equal(i.operands[1].strides, (8, 24, 2)) + + i = nditer([a.T, None], [], [['readonly'], ['writeonly', 'allocate']], + order='F', + op_axes=[[0, 1, None], None], + itershape=(-1, -1, 4)) + assert_equal(i.operands[1].shape, (3, 2, 4)) + assert_equal(i.operands[1].strides, (2, 6, 12)) + + # If we specify 1 in the itershape, it shouldn't allow broadcasting + # of that dimension to a bigger value + assert_raises(ValueError, nditer, [a, None], [], + [['readonly'], ['writeonly', 'allocate']], + op_axes=[[0, 1, None], None], + itershape=(-1, 1, 4)) + # Test bug that for no op_axes but itershape, they are NULLed correctly + i = np.nditer([np.ones(2), None, None], itershape=(2,)) + +def test_iter_broadcasting_errors(): + # Check that errors are thrown for bad broadcasting shapes + + # 1D with 1D + assert_raises(ValueError, nditer, [arange(2), arange(3)], + [], [['readonly']] * 2) + # 2D with 1D + assert_raises(ValueError, nditer, + [arange(6).reshape(2, 3), arange(2)], + [], [['readonly']] * 2) + # 2D with 2D + assert_raises(ValueError, nditer, + [arange(6).reshape(2, 3), arange(9).reshape(3, 3)], + [], [['readonly']] * 2) + assert_raises(ValueError, nditer, + [arange(6).reshape(2, 3), arange(4).reshape(2, 2)], + [], [['readonly']] * 2) + # 3D with 3D + assert_raises(ValueError, nditer, + [arange(36).reshape(3, 3, 4), arange(24).reshape(2, 3, 4)], + [], [['readonly']] * 2) + assert_raises(ValueError, nditer, + [arange(8).reshape(2, 4, 1), arange(24).reshape(2, 3, 4)], + [], [['readonly']] * 2) + + # Verify that the error message mentions the right shapes + try: + nditer([arange(2).reshape(1, 2, 1), + arange(3).reshape(1, 3), + arange(6).reshape(2, 3)], + [], + [['readonly'], ['readonly'], ['writeonly', 'no_broadcast']]) + raise AssertionError('Should have raised a broadcast error') + except ValueError as e: + msg = str(e) + # The message should contain the shape of the 3rd operand + assert_(msg.find('(2,3)') >= 0, + f'Message "{msg}" doesn\'t contain operand shape (2,3)') + # The message should contain the broadcast shape + assert_(msg.find('(1,2,3)') >= 0, + f'Message "{msg}" doesn\'t contain broadcast shape (1,2,3)') + + try: + nditer([arange(6).reshape(2, 3), arange(2)], + [], + [['readonly'], ['readonly']], + op_axes=[[0, 1], [0, np.newaxis]], + itershape=(4, 3)) + raise AssertionError('Should have raised a broadcast error') + except ValueError as e: + msg = str(e) + # The message should contain "shape->remappedshape" for each operand + assert_(msg.find('(2,3)->(2,3)') >= 0, + f'Message "{msg}" doesn\'t contain operand shape (2,3)->(2,3)') + assert_(msg.find('(2,)->(2,newaxis)') >= 0, + ('Message "%s" doesn\'t contain remapped operand shape' + '(2,)->(2,newaxis)') % msg) + # The message should contain the itershape parameter + assert_(msg.find('(4,3)') >= 0, + f'Message "{msg}" doesn\'t contain itershape parameter (4,3)') + + try: + nditer([np.zeros((2, 1, 1)), np.zeros((2,))], + [], + [['writeonly', 'no_broadcast'], ['readonly']]) + raise AssertionError('Should have raised a broadcast error') + except ValueError as e: + msg = str(e) + # The message should contain the shape of the bad operand + assert_(msg.find('(2,1,1)') >= 0, + f'Message "{msg}" doesn\'t contain operand shape (2,1,1)') + # The message should contain the broadcast shape + assert_(msg.find('(2,1,2)') >= 0, + f'Message "{msg}" doesn\'t contain the broadcast shape (2,1,2)') + +def test_iter_flags_errors(): + # Check that bad combinations of flags produce errors + + a = arange(6) + + # Not enough operands + assert_raises(ValueError, nditer, [], [], []) + # Bad global flag + assert_raises(ValueError, nditer, [a], ['bad flag'], [['readonly']]) + # Bad op flag + assert_raises(ValueError, nditer, [a], [], [['readonly', 'bad flag']]) + # Bad order parameter + assert_raises(ValueError, nditer, [a], [], [['readonly']], order='G') + # Bad casting parameter + assert_raises(ValueError, nditer, [a], [], [['readonly']], casting='noon') + # op_flags must match ops + assert_raises(ValueError, nditer, [a] * 3, [], [['readonly']] * 2) + # Cannot track both a C and an F index + assert_raises(ValueError, nditer, a, + ['c_index', 'f_index'], [['readonly']]) + # Inner iteration and multi-indices/indices are incompatible + assert_raises(ValueError, nditer, a, + ['external_loop', 'multi_index'], [['readonly']]) + assert_raises(ValueError, nditer, a, + ['external_loop', 'c_index'], [['readonly']]) + assert_raises(ValueError, nditer, a, + ['external_loop', 'f_index'], [['readonly']]) + # Must specify exactly one of readwrite/readonly/writeonly per operand + assert_raises(ValueError, nditer, a, [], [[]]) + assert_raises(ValueError, nditer, a, [], [['readonly', 'writeonly']]) + assert_raises(ValueError, nditer, a, [], [['readonly', 'readwrite']]) + assert_raises(ValueError, nditer, a, [], [['writeonly', 'readwrite']]) + assert_raises(ValueError, nditer, a, + [], [['readonly', 'writeonly', 'readwrite']]) + # Python scalars are always readonly + assert_raises(TypeError, nditer, 1.5, [], [['writeonly']]) + assert_raises(TypeError, nditer, 1.5, [], [['readwrite']]) + # Array scalars are always readonly + assert_raises(TypeError, nditer, np.int32(1), [], [['writeonly']]) + assert_raises(TypeError, nditer, np.int32(1), [], [['readwrite']]) + # Check readonly array + a.flags.writeable = False + assert_raises(ValueError, nditer, a, [], [['writeonly']]) + assert_raises(ValueError, nditer, a, [], [['readwrite']]) + a.flags.writeable = True + # Multi-indices available only with the multi_index flag + i = nditer(arange(6), [], [['readonly']]) + assert_raises(ValueError, lambda i: i.multi_index, i) + # Index available only with an index flag + assert_raises(ValueError, lambda i: i.index, i) + # GotoCoords and GotoIndex incompatible with buffering or no_inner + + def assign_multi_index(i): + i.multi_index = (0,) + + def assign_index(i): + i.index = 0 + + def assign_iterindex(i): + i.iterindex = 0 + + def assign_iterrange(i): + i.iterrange = (0, 1) + i = nditer(arange(6), ['external_loop']) + assert_raises(ValueError, assign_multi_index, i) + assert_raises(ValueError, assign_index, i) + assert_raises(ValueError, assign_iterindex, i) + assert_raises(ValueError, assign_iterrange, i) + i = nditer(arange(6), ['buffered']) + assert_raises(ValueError, assign_multi_index, i) + assert_raises(ValueError, assign_index, i) + assert_raises(ValueError, assign_iterrange, i) + # Can't iterate if size is zero + assert_raises(ValueError, nditer, np.array([])) + +def test_iter_slice(): + a, b, c = np.arange(3), np.arange(3), np.arange(3.) + i = nditer([a, b, c], [], ['readwrite']) + with i: + i[0:2] = (3, 3) + assert_equal(a, [3, 1, 2]) + assert_equal(b, [3, 1, 2]) + assert_equal(c, [0, 1, 2]) + i[1] = 12 + assert_equal(i[0:2], [3, 12]) + +def test_iter_assign_mapping(): + a = np.arange(24, dtype='f8').reshape(2, 3, 4).T + it = np.nditer(a, [], [['readwrite', 'updateifcopy']], + casting='same_kind', op_dtypes=[np.dtype('f4')]) + with it: + it.operands[0][...] = 3 + it.operands[0][...] = 14 + assert_equal(a, 14) + it = np.nditer(a, [], [['readwrite', 'updateifcopy']], + casting='same_kind', op_dtypes=[np.dtype('f4')]) + with it: + x = it.operands[0][-1:1] + x[...] = 14 + it.operands[0][...] = -1234 + assert_equal(a, -1234) + # check for no warnings on dealloc + x = None + it = None + +def test_iter_nbo_align_contig(): + # Check that byte order, alignment, and contig changes work + + # Byte order change by requesting a specific dtype + a = np.arange(6, dtype='f4') + au = a.byteswap() + au = au.view(au.dtype.newbyteorder()) + assert_(a.dtype.byteorder != au.dtype.byteorder) + i = nditer(au, [], [['readwrite', 'updateifcopy']], + casting='equiv', + op_dtypes=[np.dtype('f4')]) + with i: + # context manager triggers WRITEBACKIFCOPY on i at exit + assert_equal(i.dtypes[0].byteorder, a.dtype.byteorder) + assert_equal(i.operands[0].dtype.byteorder, a.dtype.byteorder) + assert_equal(i.operands[0], a) + i.operands[0][:] = 2 + assert_equal(au, [2] * 6) + del i # should not raise a warning + # Byte order change by requesting NBO + a = np.arange(6, dtype='f4') + au = a.byteswap() + au = au.view(au.dtype.newbyteorder()) + assert_(a.dtype.byteorder != au.dtype.byteorder) + with nditer(au, [], [['readwrite', 'updateifcopy', 'nbo']], + casting='equiv') as i: + # context manager triggers UPDATEIFCOPY on i at exit + assert_equal(i.dtypes[0].byteorder, a.dtype.byteorder) + assert_equal(i.operands[0].dtype.byteorder, a.dtype.byteorder) + assert_equal(i.operands[0], a) + i.operands[0][:] = 12345 + i.operands[0][:] = 2 + assert_equal(au, [2] * 6) + + # Unaligned input + a = np.zeros((6 * 4 + 1,), dtype='i1')[1:] + a = a.view('f4') + a[:] = np.arange(6, dtype='f4') + assert_(not a.flags.aligned) + # Without 'aligned', shouldn't copy + i = nditer(a, [], [['readonly']]) + assert_(not i.operands[0].flags.aligned) + assert_equal(i.operands[0], a) + # With 'aligned', should make a copy + with nditer(a, [], [['readwrite', 'updateifcopy', 'aligned']]) as i: + assert_(i.operands[0].flags.aligned) + # context manager triggers UPDATEIFCOPY on i at exit + assert_equal(i.operands[0], a) + i.operands[0][:] = 3 + assert_equal(a, [3] * 6) + + # Discontiguous input + a = arange(12) + # If it is contiguous, shouldn't copy + i = nditer(a[:6], [], [['readonly']]) + assert_(i.operands[0].flags.contiguous) + assert_equal(i.operands[0], a[:6]) + # If it isn't contiguous, should buffer + i = nditer(a[::2], ['buffered', 'external_loop'], + [['readonly', 'contig']], + buffersize=10) + assert_(i[0].flags.contiguous) + assert_equal(i[0], a[::2]) + +def test_iter_array_cast(): + # Check that arrays are cast as requested + + # No cast 'f4' -> 'f4' + a = np.arange(6, dtype='f4').reshape(2, 3) + i = nditer(a, [], [['readwrite']], op_dtypes=[np.dtype('f4')]) + with i: + assert_equal(i.operands[0], a) + assert_equal(i.operands[0].dtype, np.dtype('f4')) + + # Byte-order cast ' '>f4' + a = np.arange(6, dtype='f4')]) as i: + assert_equal(i.operands[0], a) + assert_equal(i.operands[0].dtype, np.dtype('>f4')) + + # Safe case 'f4' -> 'f8' + a = np.arange(24, dtype='f4').reshape(2, 3, 4).swapaxes(1, 2) + i = nditer(a, [], [['readonly', 'copy']], + casting='safe', + op_dtypes=[np.dtype('f8')]) + assert_equal(i.operands[0], a) + assert_equal(i.operands[0].dtype, np.dtype('f8')) + # The memory layout of the temporary should match a (a is (48,4,16)) + # except negative strides get flipped to positive strides. + assert_equal(i.operands[0].strides, (96, 8, 32)) + a = a[::-1, :, ::-1] + i = nditer(a, [], [['readonly', 'copy']], + casting='safe', + op_dtypes=[np.dtype('f8')]) + assert_equal(i.operands[0], a) + assert_equal(i.operands[0].dtype, np.dtype('f8')) + assert_equal(i.operands[0].strides, (96, 8, 32)) + + # Same-kind cast 'f8' -> 'f4' -> 'f8' + a = np.arange(24, dtype='f8').reshape(2, 3, 4).T + with nditer(a, [], + [['readwrite', 'updateifcopy']], + casting='same_kind', + op_dtypes=[np.dtype('f4')]) as i: + assert_equal(i.operands[0], a) + assert_equal(i.operands[0].dtype, np.dtype('f4')) + assert_equal(i.operands[0].strides, (4, 16, 48)) + # Check that WRITEBACKIFCOPY is activated at exit + i.operands[0][2, 1, 1] = -12.5 + assert_(a[2, 1, 1] != -12.5) + assert_equal(a[2, 1, 1], -12.5) + + a = np.arange(6, dtype='i4')[::-2] + with nditer(a, [], + [['writeonly', 'updateifcopy']], + casting='unsafe', + op_dtypes=[np.dtype('f4')]) as i: + assert_equal(i.operands[0].dtype, np.dtype('f4')) + # Even though the stride was negative in 'a', it + # becomes positive in the temporary + assert_equal(i.operands[0].strides, (4,)) + i.operands[0][:] = [1, 2, 3] + assert_equal(a, [1, 2, 3]) + +def test_iter_array_cast_errors(): + # Check that invalid casts are caught + + # Need to enable copying for casts to occur + assert_raises(TypeError, nditer, arange(2, dtype='f4'), [], + [['readonly']], op_dtypes=[np.dtype('f8')]) + # Also need to allow casting for casts to occur + assert_raises(TypeError, nditer, arange(2, dtype='f4'), [], + [['readonly', 'copy']], casting='no', + op_dtypes=[np.dtype('f8')]) + assert_raises(TypeError, nditer, arange(2, dtype='f4'), [], + [['readonly', 'copy']], casting='equiv', + op_dtypes=[np.dtype('f8')]) + assert_raises(TypeError, nditer, arange(2, dtype='f8'), [], + [['writeonly', 'updateifcopy']], + casting='no', + op_dtypes=[np.dtype('f4')]) + assert_raises(TypeError, nditer, arange(2, dtype='f8'), [], + [['writeonly', 'updateifcopy']], + casting='equiv', + op_dtypes=[np.dtype('f4')]) + # ' '>f4' should not work with casting='no' + assert_raises(TypeError, nditer, arange(2, dtype='f4')]) + # 'f4' -> 'f8' is a safe cast, but 'f8' -> 'f4' isn't + assert_raises(TypeError, nditer, arange(2, dtype='f4'), [], + [['readwrite', 'updateifcopy']], + casting='safe', + op_dtypes=[np.dtype('f8')]) + assert_raises(TypeError, nditer, arange(2, dtype='f8'), [], + [['readwrite', 'updateifcopy']], + casting='safe', + op_dtypes=[np.dtype('f4')]) + # 'f4' -> 'i4' is neither a safe nor a same-kind cast + assert_raises(TypeError, nditer, arange(2, dtype='f4'), [], + [['readonly', 'copy']], + casting='same_kind', + op_dtypes=[np.dtype('i4')]) + assert_raises(TypeError, nditer, arange(2, dtype='i4'), [], + [['writeonly', 'updateifcopy']], + casting='same_kind', + op_dtypes=[np.dtype('f4')]) + +def test_iter_scalar_cast(): + # Check that scalars are cast as requested + + # No cast 'f4' -> 'f4' + i = nditer(np.float32(2.5), [], [['readonly']], + op_dtypes=[np.dtype('f4')]) + assert_equal(i.dtypes[0], np.dtype('f4')) + assert_equal(i.value.dtype, np.dtype('f4')) + assert_equal(i.value, 2.5) + # Safe cast 'f4' -> 'f8' + i = nditer(np.float32(2.5), [], + [['readonly', 'copy']], + casting='safe', + op_dtypes=[np.dtype('f8')]) + assert_equal(i.dtypes[0], np.dtype('f8')) + assert_equal(i.value.dtype, np.dtype('f8')) + assert_equal(i.value, 2.5) + # Same-kind cast 'f8' -> 'f4' + i = nditer(np.float64(2.5), [], + [['readonly', 'copy']], + casting='same_kind', + op_dtypes=[np.dtype('f4')]) + assert_equal(i.dtypes[0], np.dtype('f4')) + assert_equal(i.value.dtype, np.dtype('f4')) + assert_equal(i.value, 2.5) + # Unsafe cast 'f8' -> 'i4' + i = nditer(np.float64(3.0), [], + [['readonly', 'copy']], + casting='unsafe', + op_dtypes=[np.dtype('i4')]) + assert_equal(i.dtypes[0], np.dtype('i4')) + assert_equal(i.value.dtype, np.dtype('i4')) + assert_equal(i.value, 3) + # Readonly scalars may be cast even without setting COPY or BUFFERED + i = nditer(3, [], [['readonly']], op_dtypes=[np.dtype('f8')]) + assert_equal(i[0].dtype, np.dtype('f8')) + assert_equal(i[0], 3.) + +def test_iter_scalar_cast_errors(): + # Check that invalid casts are caught + + # Need to allow copying/buffering for write casts of scalars to occur + assert_raises(TypeError, nditer, np.float32(2), [], + [['readwrite']], op_dtypes=[np.dtype('f8')]) + assert_raises(TypeError, nditer, 2.5, [], + [['readwrite']], op_dtypes=[np.dtype('f4')]) + # 'f8' -> 'f4' isn't a safe cast if the value would overflow + assert_raises(TypeError, nditer, np.float64(1e60), [], + [['readonly']], + casting='safe', + op_dtypes=[np.dtype('f4')]) + # 'f4' -> 'i4' is neither a safe nor a same-kind cast + assert_raises(TypeError, nditer, np.float32(2), [], + [['readonly']], + casting='same_kind', + op_dtypes=[np.dtype('i4')]) + +def test_iter_object_arrays_basic(): + # Check that object arrays work + + obj = {'a': 3, 'b': 'd'} + a = np.array([[1, 2, 3], None, obj, None], dtype='O') + if HAS_REFCOUNT: + rc = sys.getrefcount(obj) + + # Need to allow references for object arrays + assert_raises(TypeError, nditer, a) + if HAS_REFCOUNT: + assert_equal(sys.getrefcount(obj), rc) + + i = nditer(a, ['refs_ok'], ['readonly']) + vals = [x_[()] for x_ in i] + assert_equal(np.array(vals, dtype='O'), a) + vals, i, x = [None] * 3 + if HAS_REFCOUNT: + assert_equal(sys.getrefcount(obj), rc) + + i = nditer(a.reshape(2, 2).T, ['refs_ok', 'buffered'], + ['readonly'], order='C') + assert_(i.iterationneedsapi) + vals = [x_[()] for x_ in i] + assert_equal(np.array(vals, dtype='O'), a.reshape(2, 2).ravel(order='F')) + vals, i, x = [None] * 3 + if HAS_REFCOUNT: + assert_equal(sys.getrefcount(obj), rc) + + i = nditer(a.reshape(2, 2).T, ['refs_ok', 'buffered'], + ['readwrite'], order='C') + with i: + for x in i: + x[...] = None + vals, i, x = [None] * 3 + if HAS_REFCOUNT: + assert_(sys.getrefcount(obj) == rc - 1) + assert_equal(a, np.array([None] * 4, dtype='O')) + +def test_iter_object_arrays_conversions(): + # Conversions to/from objects + a = np.arange(6, dtype='O') + i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'], + casting='unsafe', op_dtypes='i4') + with i: + for x in i: + x[...] += 1 + assert_equal(a, np.arange(6) + 1) + + a = np.arange(6, dtype='i4') + i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'], + casting='unsafe', op_dtypes='O') + with i: + for x in i: + x[...] += 1 + assert_equal(a, np.arange(6) + 1) + + # Non-contiguous object array + a = np.zeros((6,), dtype=[('p', 'i1'), ('a', 'O')]) + a = a['a'] + a[:] = np.arange(6) + i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'], + casting='unsafe', op_dtypes='i4') + with i: + for x in i: + x[...] += 1 + assert_equal(a, np.arange(6) + 1) + + # Non-contiguous value array + a = np.zeros((6,), dtype=[('p', 'i1'), ('a', 'i4')]) + a = a['a'] + a[:] = np.arange(6) + 98172488 + i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'], + casting='unsafe', op_dtypes='O') + with i: + ob = i[0][()] + if HAS_REFCOUNT: + rc = sys.getrefcount(ob) + for x in i: + x[...] += 1 + if HAS_REFCOUNT: + newrc = sys.getrefcount(ob) + assert_(newrc == rc - 1) + assert_equal(a, np.arange(6) + 98172489) + +def test_iter_common_dtype(): + # Check that the iterator finds a common data type correctly + # (some checks are somewhat duplicate after adopting NEP 50) + + i = nditer([array([3], dtype='f4'), array([0], dtype='f8')], + ['common_dtype'], + [['readonly', 'copy']] * 2, + casting='safe') + assert_equal(i.dtypes[0], np.dtype('f8')) + assert_equal(i.dtypes[1], np.dtype('f8')) + i = nditer([array([3], dtype='i4'), array([0], dtype='f4')], + ['common_dtype'], + [['readonly', 'copy']] * 2, + casting='safe') + assert_equal(i.dtypes[0], np.dtype('f8')) + assert_equal(i.dtypes[1], np.dtype('f8')) + i = nditer([array([3], dtype='f4'), array(0, dtype='f8')], + ['common_dtype'], + [['readonly', 'copy']] * 2, + casting='same_kind') + assert_equal(i.dtypes[0], np.dtype('f8')) + assert_equal(i.dtypes[1], np.dtype('f8')) + i = nditer([array([3], dtype='u4'), array(0, dtype='i4')], + ['common_dtype'], + [['readonly', 'copy']] * 2, + casting='safe') + assert_equal(i.dtypes[0], np.dtype('i8')) + assert_equal(i.dtypes[1], np.dtype('i8')) + i = nditer([array([3], dtype='u4'), array(-12, dtype='i4')], + ['common_dtype'], + [['readonly', 'copy']] * 2, + casting='safe') + assert_equal(i.dtypes[0], np.dtype('i8')) + assert_equal(i.dtypes[1], np.dtype('i8')) + i = nditer([array([3], dtype='u4'), array(-12, dtype='i4'), + array([2j], dtype='c8'), array([9], dtype='f8')], + ['common_dtype'], + [['readonly', 'copy']] * 4, + casting='safe') + assert_equal(i.dtypes[0], np.dtype('c16')) + assert_equal(i.dtypes[1], np.dtype('c16')) + assert_equal(i.dtypes[2], np.dtype('c16')) + assert_equal(i.dtypes[3], np.dtype('c16')) + assert_equal(i.value, (3, -12, 2j, 9)) + + # When allocating outputs, other outputs aren't factored in + i = nditer([array([3], dtype='i4'), None, array([2j], dtype='c16')], [], + [['readonly', 'copy'], + ['writeonly', 'allocate'], + ['writeonly']], + casting='safe') + assert_equal(i.dtypes[0], np.dtype('i4')) + assert_equal(i.dtypes[1], np.dtype('i4')) + assert_equal(i.dtypes[2], np.dtype('c16')) + # But, if common data types are requested, they are + i = nditer([array([3], dtype='i4'), None, array([2j], dtype='c16')], + ['common_dtype'], + [['readonly', 'copy'], + ['writeonly', 'allocate'], + ['writeonly']], + casting='safe') + assert_equal(i.dtypes[0], np.dtype('c16')) + assert_equal(i.dtypes[1], np.dtype('c16')) + assert_equal(i.dtypes[2], np.dtype('c16')) + +def test_iter_copy_if_overlap(): + # Ensure the iterator makes copies on read/write overlap, if requested + + # Copy not needed, 1 op + for flag in ['readonly', 'writeonly', 'readwrite']: + a = arange(10) + i = nditer([a], ['copy_if_overlap'], [[flag]]) + with i: + assert_(i.operands[0] is a) + + # Copy needed, 2 ops, read-write overlap + x = arange(10) + a = x[1:] + b = x[:-1] + with nditer([a, b], ['copy_if_overlap'], [['readonly'], ['readwrite']]) as i: + assert_(not np.shares_memory(*i.operands)) + + # Copy not needed with elementwise, 2 ops, exactly same arrays + x = arange(10) + a = x + b = x + i = nditer([a, b], ['copy_if_overlap'], [['readonly', 'overlap_assume_elementwise'], + ['readwrite', 'overlap_assume_elementwise']]) + with i: + assert_(i.operands[0] is a and i.operands[1] is b) + with nditer([a, b], ['copy_if_overlap'], [['readonly'], ['readwrite']]) as i: + assert_(i.operands[0] is a and not np.shares_memory(i.operands[1], b)) + + # Copy not needed, 2 ops, no overlap + x = arange(10) + a = x[::2] + b = x[1::2] + i = nditer([a, b], ['copy_if_overlap'], [['readonly'], ['writeonly']]) + assert_(i.operands[0] is a and i.operands[1] is b) + + # Copy needed, 2 ops, read-write overlap + x = arange(4, dtype=np.int8) + a = x[3:] + b = x.view(np.int32)[:1] + with nditer([a, b], ['copy_if_overlap'], [['readonly'], ['writeonly']]) as i: + assert_(not np.shares_memory(*i.operands)) + + # Copy needed, 3 ops, read-write overlap + for flag in ['writeonly', 'readwrite']: + x = np.ones([10, 10]) + a = x + b = x.T + c = x + with nditer([a, b, c], ['copy_if_overlap'], + [['readonly'], ['readonly'], [flag]]) as i: + a2, b2, c2 = i.operands + assert_(not np.shares_memory(a2, c2)) + assert_(not np.shares_memory(b2, c2)) + + # Copy not needed, 3 ops, read-only overlap + x = np.ones([10, 10]) + a = x + b = x.T + c = x + i = nditer([a, b, c], ['copy_if_overlap'], + [['readonly'], ['readonly'], ['readonly']]) + a2, b2, c2 = i.operands + assert_(a is a2) + assert_(b is b2) + assert_(c is c2) + + # Copy not needed, 3 ops, read-only overlap + x = np.ones([10, 10]) + a = x + b = np.ones([10, 10]) + c = x.T + i = nditer([a, b, c], ['copy_if_overlap'], + [['readonly'], ['writeonly'], ['readonly']]) + a2, b2, c2 = i.operands + assert_(a is a2) + assert_(b is b2) + assert_(c is c2) + + # Copy not needed, 3 ops, write-only overlap + x = np.arange(7) + a = x[:3] + b = x[3:6] + c = x[4:7] + i = nditer([a, b, c], ['copy_if_overlap'], + [['readonly'], ['writeonly'], ['writeonly']]) + a2, b2, c2 = i.operands + assert_(a is a2) + assert_(b is b2) + assert_(c is c2) + +def test_iter_op_axes(): + # Check that custom axes work + + # Reverse the axes + a = arange(6).reshape(2, 3) + i = nditer([a, a.T], [], [['readonly']] * 2, op_axes=[[0, 1], [1, 0]]) + assert_(all([x == y for (x, y) in i])) + a = arange(24).reshape(2, 3, 4) + i = nditer([a.T, a], [], [['readonly']] * 2, op_axes=[[2, 1, 0], None]) + assert_(all([x == y for (x, y) in i])) + + # Broadcast 1D to any dimension + a = arange(1, 31).reshape(2, 3, 5) + b = arange(1, 3) + i = nditer([a, b], [], [['readonly']] * 2, op_axes=[None, [0, -1, -1]]) + assert_equal([x * y for (x, y) in i], (a * b.reshape(2, 1, 1)).ravel()) + b = arange(1, 4) + i = nditer([a, b], [], [['readonly']] * 2, op_axes=[None, [-1, 0, -1]]) + assert_equal([x * y for (x, y) in i], (a * b.reshape(1, 3, 1)).ravel()) + b = arange(1, 6) + i = nditer([a, b], [], [['readonly']] * 2, + op_axes=[None, [np.newaxis, np.newaxis, 0]]) + assert_equal([x * y for (x, y) in i], (a * b.reshape(1, 1, 5)).ravel()) + + # Inner product-style broadcasting + a = arange(24).reshape(2, 3, 4) + b = arange(40).reshape(5, 2, 4) + i = nditer([a, b], ['multi_index'], [['readonly']] * 2, + op_axes=[[0, 1, -1, -1], [-1, -1, 0, 1]]) + assert_equal(i.shape, (2, 3, 5, 2)) + + # Matrix product-style broadcasting + a = arange(12).reshape(3, 4) + b = arange(20).reshape(4, 5) + i = nditer([a, b], ['multi_index'], [['readonly']] * 2, + op_axes=[[0, -1], [-1, 1]]) + assert_equal(i.shape, (3, 5)) + +def test_iter_op_axes_errors(): + # Check that custom axes throws errors for bad inputs + + # Wrong number of items in op_axes + a = arange(6).reshape(2, 3) + assert_raises(ValueError, nditer, [a, a], [], [['readonly']] * 2, + op_axes=[[0], [1], [0]]) + # Out of bounds items in op_axes + assert_raises(ValueError, nditer, [a, a], [], [['readonly']] * 2, + op_axes=[[2, 1], [0, 1]]) + assert_raises(ValueError, nditer, [a, a], [], [['readonly']] * 2, + op_axes=[[0, 1], [2, -1]]) + # Duplicate items in op_axes + assert_raises(ValueError, nditer, [a, a], [], [['readonly']] * 2, + op_axes=[[0, 0], [0, 1]]) + assert_raises(ValueError, nditer, [a, a], [], [['readonly']] * 2, + op_axes=[[0, 1], [1, 1]]) + + # Different sized arrays in op_axes + assert_raises(ValueError, nditer, [a, a], [], [['readonly']] * 2, + op_axes=[[0, 1], [0, 1, 0]]) + + # Non-broadcastable dimensions in the result + assert_raises(ValueError, nditer, [a, a], [], [['readonly']] * 2, + op_axes=[[0, 1], [1, 0]]) + +def test_iter_copy(): + # Check that copying the iterator works correctly + a = arange(24).reshape(2, 3, 4) + + # Simple iterator + i = nditer(a) + j = i.copy() + assert_equal([x[()] for x in i], [x[()] for x in j]) + + i.iterindex = 3 + j = i.copy() + assert_equal([x[()] for x in i], [x[()] for x in j]) + + # Buffered iterator + i = nditer(a, ['buffered', 'ranged'], order='F', buffersize=3) + j = i.copy() + assert_equal([x[()] for x in i], [x[()] for x in j]) + + i.iterindex = 3 + j = i.copy() + assert_equal([x[()] for x in i], [x[()] for x in j]) + + i.iterrange = (3, 9) + j = i.copy() + assert_equal([x[()] for x in i], [x[()] for x in j]) + + i.iterrange = (2, 18) + next(i) + next(i) + j = i.copy() + assert_equal([x[()] for x in i], [x[()] for x in j]) + + # Casting iterator + with nditer(a, ['buffered'], order='F', casting='unsafe', + op_dtypes='f8', buffersize=5) as i: + j = i.copy() + assert_equal([x[()] for x in j], a.ravel(order='F')) + + a = arange(24, dtype=' unstructured (any to object), and many other + # casts, which cause this to require all steps in the casting machinery + # one level down as well as the iterator copy (which uses NpyAuxData clone) + in_dtype = np.dtype([("a", np.dtype("i,")), + ("b", np.dtype(">i,d,S17,>d,3f,O,i1"))]) + out_dtype = np.dtype([("a", np.dtype("O")), + ("b", np.dtype(">i,>i,S17,>d,>U3,3d,i1,O"))]) + arr = np.ones(1000, dtype=in_dtype) + + it = np.nditer((arr,), ["buffered", "external_loop", "refs_ok"], + op_dtypes=[out_dtype], casting="unsafe") + it_copy = it.copy() + + res1 = next(it) + del it + res2 = next(it_copy) + del it_copy + + expected = arr["a"].astype(out_dtype["a"]) + assert_array_equal(res1["a"], expected) + assert_array_equal(res2["a"], expected) + + for field in in_dtype["b"].names: + # Note that the .base avoids the subarray field + expected = arr["b"][field].astype(out_dtype["b"][field].base) + assert_array_equal(res1["b"][field], expected) + assert_array_equal(res2["b"][field], expected) + + +def test_iter_copy_casts_structured2(): + # Similar to the above, this is a fairly arcane test to cover internals + in_dtype = np.dtype([("a", np.dtype("O,O")), + ("b", np.dtype("5O,3O,(1,)O,(1,)i,(1,)O"))]) + out_dtype = np.dtype([("a", np.dtype("O")), + ("b", np.dtype("O,3i,4O,4O,4i"))]) + + arr = np.ones(1, dtype=in_dtype) + it = np.nditer((arr,), ["buffered", "external_loop", "refs_ok"], + op_dtypes=[out_dtype], casting="unsafe") + it_copy = it.copy() + + res1 = next(it) + del it + res2 = next(it_copy) + del it_copy + + # Array of two structured scalars: + for res in res1, res2: + # Cast to tuple by getitem, which may be weird and changeable?: + assert isinstance(res["a"][0], tuple) + assert res["a"][0] == (1, 1) + + for res in res1, res2: + assert_array_equal(res["b"]["f0"][0], np.ones(5, dtype=object)) + assert_array_equal(res["b"]["f1"], np.ones((1, 3), dtype="i")) + assert res["b"]["f2"].shape == (1, 4) + assert_array_equal(res["b"]["f2"][0], np.ones(4, dtype=object)) + assert_array_equal(res["b"]["f3"][0], np.ones(4, dtype=object)) + assert_array_equal(res["b"]["f3"][0], np.ones(4, dtype="i")) + + +def test_iter_allocate_output_simple(): + # Check that the iterator will properly allocate outputs + + # Simple case + a = arange(6) + i = nditer([a, None], [], [['readonly'], ['writeonly', 'allocate']], + op_dtypes=[None, np.dtype('f4')]) + assert_equal(i.operands[1].shape, a.shape) + assert_equal(i.operands[1].dtype, np.dtype('f4')) + +def test_iter_allocate_output_buffered_readwrite(): + # Allocated output with buffering + delay_bufalloc + + a = arange(6) + i = nditer([a, None], ['buffered', 'delay_bufalloc'], + [['readonly'], ['allocate', 'readwrite']]) + with i: + i.operands[1][:] = 1 + i.reset() + for x in i: + x[1][...] += x[0][...] + assert_equal(i.operands[1], a + 1) + +def test_iter_allocate_output_itorder(): + # The allocated output should match the iteration order + + # C-order input, best iteration order + a = arange(6, dtype='i4').reshape(2, 3) + i = nditer([a, None], [], [['readonly'], ['writeonly', 'allocate']], + op_dtypes=[None, np.dtype('f4')]) + assert_equal(i.operands[1].shape, a.shape) + assert_equal(i.operands[1].strides, a.strides) + assert_equal(i.operands[1].dtype, np.dtype('f4')) + # F-order input, best iteration order + a = arange(24, dtype='i4').reshape(2, 3, 4).T + i = nditer([a, None], [], [['readonly'], ['writeonly', 'allocate']], + op_dtypes=[None, np.dtype('f4')]) + assert_equal(i.operands[1].shape, a.shape) + assert_equal(i.operands[1].strides, a.strides) + assert_equal(i.operands[1].dtype, np.dtype('f4')) + # Non-contiguous input, C iteration order + a = arange(24, dtype='i4').reshape(2, 3, 4).swapaxes(0, 1) + i = nditer([a, None], [], + [['readonly'], ['writeonly', 'allocate']], + order='C', + op_dtypes=[None, np.dtype('f4')]) + assert_equal(i.operands[1].shape, a.shape) + assert_equal(i.operands[1].strides, (32, 16, 4)) + assert_equal(i.operands[1].dtype, np.dtype('f4')) + +def test_iter_allocate_output_opaxes(): + # Specifying op_axes should work + + a = arange(24, dtype='i4').reshape(2, 3, 4) + i = nditer([None, a], [], [['writeonly', 'allocate'], ['readonly']], + op_dtypes=[np.dtype('u4'), None], + op_axes=[[1, 2, 0], None]) + assert_equal(i.operands[0].shape, (4, 2, 3)) + assert_equal(i.operands[0].strides, (4, 48, 16)) + assert_equal(i.operands[0].dtype, np.dtype('u4')) + +def test_iter_allocate_output_types_promotion(): + # Check type promotion of automatic outputs (this was more interesting + # before NEP 50...) + + i = nditer([array([3], dtype='f4'), array([0], dtype='f8'), None], [], + [['readonly']] * 2 + [['writeonly', 'allocate']]) + assert_equal(i.dtypes[2], np.dtype('f8')) + i = nditer([array([3], dtype='i4'), array([0], dtype='f4'), None], [], + [['readonly']] * 2 + [['writeonly', 'allocate']]) + assert_equal(i.dtypes[2], np.dtype('f8')) + i = nditer([array([3], dtype='f4'), array(0, dtype='f8'), None], [], + [['readonly']] * 2 + [['writeonly', 'allocate']]) + assert_equal(i.dtypes[2], np.dtype('f8')) + i = nditer([array([3], dtype='u4'), array(0, dtype='i4'), None], [], + [['readonly']] * 2 + [['writeonly', 'allocate']]) + assert_equal(i.dtypes[2], np.dtype('i8')) + i = nditer([array([3], dtype='u4'), array(-12, dtype='i4'), None], [], + [['readonly']] * 2 + [['writeonly', 'allocate']]) + assert_equal(i.dtypes[2], np.dtype('i8')) + +def test_iter_allocate_output_types_byte_order(): + # Verify the rules for byte order changes + + # When there's just one input, the output type exactly matches + a = array([3], dtype='u4') + a = a.view(a.dtype.newbyteorder()) + i = nditer([a, None], [], + [['readonly'], ['writeonly', 'allocate']]) + assert_equal(i.dtypes[0], i.dtypes[1]) + # With two or more inputs, the output type is in native byte order + i = nditer([a, a, None], [], + [['readonly'], ['readonly'], ['writeonly', 'allocate']]) + assert_(i.dtypes[0] != i.dtypes[2]) + assert_equal(i.dtypes[0].newbyteorder('='), i.dtypes[2]) + +def test_iter_allocate_output_types_scalar(): + # If the inputs are all scalars, the output should be a scalar + + i = nditer([None, 1, 2.3, np.float32(12), np.complex128(3)], [], + [['writeonly', 'allocate']] + [['readonly']] * 4) + assert_equal(i.operands[0].dtype, np.dtype('complex128')) + assert_equal(i.operands[0].ndim, 0) + +def test_iter_allocate_output_subtype(): + # Make sure that the subtype with priority wins + class MyNDArray(np.ndarray): + __array_priority__ = 15 + + # subclass vs ndarray + a = np.array([[1, 2], [3, 4]]).view(MyNDArray) + b = np.arange(4).reshape(2, 2).T + i = nditer([a, b, None], [], + [['readonly'], ['readonly'], ['writeonly', 'allocate']]) + assert_equal(type(a), type(i.operands[2])) + assert_(type(b) is not type(i.operands[2])) + assert_equal(i.operands[2].shape, (2, 2)) + + # If subtypes are disabled, we should get back an ndarray. + i = nditer([a, b, None], [], + [['readonly'], ['readonly'], + ['writeonly', 'allocate', 'no_subtype']]) + assert_equal(type(b), type(i.operands[2])) + assert_(type(a) is not type(i.operands[2])) + assert_equal(i.operands[2].shape, (2, 2)) + +def test_iter_allocate_output_errors(): + # Check that the iterator will throw errors for bad output allocations + + # Need an input if no output data type is specified + a = arange(6) + assert_raises(TypeError, nditer, [a, None], [], + [['writeonly'], ['writeonly', 'allocate']]) + # Allocated output should be flagged for writing + assert_raises(ValueError, nditer, [a, None], [], + [['readonly'], ['allocate', 'readonly']]) + # Allocated output can't have buffering without delayed bufalloc + assert_raises(ValueError, nditer, [a, None], ['buffered'], + ['allocate', 'readwrite']) + # Must specify dtype if there are no inputs (cannot promote existing ones; + # maybe this should use the 'f4' here, but it does not historically.) + assert_raises(TypeError, nditer, [None, None], [], + [['writeonly', 'allocate'], + ['writeonly', 'allocate']], + op_dtypes=[None, np.dtype('f4')]) + # If using op_axes, must specify all the axes + a = arange(24, dtype='i4').reshape(2, 3, 4) + assert_raises(ValueError, nditer, [a, None], [], + [['readonly'], ['writeonly', 'allocate']], + op_dtypes=[None, np.dtype('f4')], + op_axes=[None, [0, np.newaxis, 1]]) + # If using op_axes, the axes must be within bounds + assert_raises(ValueError, nditer, [a, None], [], + [['readonly'], ['writeonly', 'allocate']], + op_dtypes=[None, np.dtype('f4')], + op_axes=[None, [0, 3, 1]]) + # If using op_axes, there can't be duplicates + assert_raises(ValueError, nditer, [a, None], [], + [['readonly'], ['writeonly', 'allocate']], + op_dtypes=[None, np.dtype('f4')], + op_axes=[None, [0, 2, 1, 0]]) + # Not all axes may be specified if a reduction. If there is a hole + # in op_axes, this is an error. + a = arange(24, dtype='i4').reshape(2, 3, 4) + assert_raises(ValueError, nditer, [a, None], ["reduce_ok"], + [['readonly'], ['readwrite', 'allocate']], + op_dtypes=[None, np.dtype('f4')], + op_axes=[None, [0, np.newaxis, 2]]) + +def test_all_allocated(): + # When no output and no shape is given, `()` is used as shape. + i = np.nditer([None], op_dtypes=["int64"]) + assert i.operands[0].shape == () + assert i.dtypes == (np.dtype("int64"),) + + i = np.nditer([None], op_dtypes=["int64"], itershape=(2, 3, 4)) + assert i.operands[0].shape == (2, 3, 4) + +def test_iter_remove_axis(): + a = arange(24).reshape(2, 3, 4) + + i = nditer(a, ['multi_index']) + i.remove_axis(1) + assert_equal(list(i), a[:, 0, :].ravel()) + + a = a[::-1, :, :] + i = nditer(a, ['multi_index']) + i.remove_axis(0) + assert_equal(list(i), a[0, :, :].ravel()) + +def test_iter_remove_multi_index_inner_loop(): + # Check that removing multi-index support works + + a = arange(24).reshape(2, 3, 4) + + i = nditer(a, ['multi_index']) + assert_equal(i.ndim, 3) + assert_equal(i.shape, (2, 3, 4)) + assert_equal(i.itviews[0].shape, (2, 3, 4)) + + # Removing the multi-index tracking causes all dimensions to coalesce + before = list(i) + i.remove_multi_index() + after = list(i) + + assert_equal(before, after) + assert_equal(i.ndim, 1) + assert_raises(ValueError, lambda i: i.shape, i) + assert_equal(i.itviews[0].shape, (24,)) + + # Removing the inner loop means there's just one iteration + i.reset() + assert_equal(i.itersize, 24) + assert_equal(i[0].shape, ()) + i.enable_external_loop() + assert_equal(i.itersize, 24) + assert_equal(i[0].shape, (24,)) + assert_equal(i.value, arange(24)) + +def test_iter_iterindex(): + # Make sure iterindex works + + buffersize = 5 + a = arange(24).reshape(4, 3, 2) + for flags in ([], ['buffered']): + i = nditer(a, flags, buffersize=buffersize) + assert_equal(iter_iterindices(i), list(range(24))) + i.iterindex = 2 + assert_equal(iter_iterindices(i), list(range(2, 24))) + + i = nditer(a, flags, order='F', buffersize=buffersize) + assert_equal(iter_iterindices(i), list(range(24))) + i.iterindex = 5 + assert_equal(iter_iterindices(i), list(range(5, 24))) + + i = nditer(a[::-1], flags, order='F', buffersize=buffersize) + assert_equal(iter_iterindices(i), list(range(24))) + i.iterindex = 9 + assert_equal(iter_iterindices(i), list(range(9, 24))) + + i = nditer(a[::-1, ::-1], flags, order='C', buffersize=buffersize) + assert_equal(iter_iterindices(i), list(range(24))) + i.iterindex = 13 + assert_equal(iter_iterindices(i), list(range(13, 24))) + + i = nditer(a[::1, ::-1], flags, buffersize=buffersize) + assert_equal(iter_iterindices(i), list(range(24))) + i.iterindex = 23 + assert_equal(iter_iterindices(i), list(range(23, 24))) + i.reset() + i.iterindex = 2 + assert_equal(iter_iterindices(i), list(range(2, 24))) + +def test_iter_iterrange(): + # Make sure getting and resetting the iterrange works + + buffersize = 5 + a = arange(24, dtype='i4').reshape(4, 3, 2) + a_fort = a.ravel(order='F') + + i = nditer(a, ['ranged'], ['readonly'], order='F', + buffersize=buffersize) + assert_equal(i.iterrange, (0, 24)) + assert_equal([x[()] for x in i], a_fort) + for r in [(0, 24), (1, 2), (3, 24), (5, 5), (0, 20), (23, 24)]: + i.iterrange = r + assert_equal(i.iterrange, r) + assert_equal([x[()] for x in i], a_fort[r[0]:r[1]]) + + i = nditer(a, ['ranged', 'buffered'], ['readonly'], order='F', + op_dtypes='f8', buffersize=buffersize) + assert_equal(i.iterrange, (0, 24)) + assert_equal([x[()] for x in i], a_fort) + for r in [(0, 24), (1, 2), (3, 24), (5, 5), (0, 20), (23, 24)]: + i.iterrange = r + assert_equal(i.iterrange, r) + assert_equal([x[()] for x in i], a_fort[r[0]:r[1]]) + + def get_array(i): + val = np.array([], dtype='f8') + for x in i: + val = np.concatenate((val, x)) + return val + + i = nditer(a, ['ranged', 'buffered', 'external_loop'], + ['readonly'], order='F', + op_dtypes='f8', buffersize=buffersize) + assert_equal(i.iterrange, (0, 24)) + assert_equal(get_array(i), a_fort) + for r in [(0, 24), (1, 2), (3, 24), (5, 5), (0, 20), (23, 24)]: + i.iterrange = r + assert_equal(i.iterrange, r) + assert_equal(get_array(i), a_fort[r[0]:r[1]]) + +def test_iter_buffering(): + # Test buffering with several buffer sizes and types + arrays = [] + # F-order swapped array + _tmp = np.arange(24, dtype='c16').reshape(2, 3, 4).T + _tmp = _tmp.view(_tmp.dtype.newbyteorder()).byteswap() + arrays.append(_tmp) + # Contiguous 1-dimensional array + arrays.append(np.arange(10, dtype='f4')) + # Unaligned array + a = np.zeros((4 * 16 + 1,), dtype='i1')[1:] + a = a.view('i4') + a[:] = np.arange(16, dtype='i4') + arrays.append(a) + # 4-D F-order array + arrays.append(np.arange(120, dtype='i4').reshape(5, 3, 2, 4).T) + for a in arrays: + for buffersize in (1, 2, 3, 5, 8, 11, 16, 1024): + vals = [] + i = nditer(a, ['buffered', 'external_loop'], + [['readonly', 'nbo', 'aligned']], + order='C', + casting='equiv', + buffersize=buffersize) + while not i.finished: + assert_(i[0].size <= buffersize) + vals.append(i[0].copy()) + i.iternext() + assert_equal(np.concatenate(vals), a.ravel(order='C')) + +def test_iter_write_buffering(): + # Test that buffering of writes is working + + # F-order swapped array + a = np.arange(24).reshape(2, 3, 4).T + a = a.view(a.dtype.newbyteorder()).byteswap() + i = nditer(a, ['buffered'], + [['readwrite', 'nbo', 'aligned']], + casting='equiv', + order='C', + buffersize=16) + x = 0 + with i: + while not i.finished: + i[0] = x + x += 1 + i.iternext() + assert_equal(a.ravel(order='C'), np.arange(24)) + +def test_iter_buffering_delayed_alloc(): + # Test that delaying buffer allocation works + + a = np.arange(6) + b = np.arange(1, dtype='f4') + i = nditer([a, b], ['buffered', 'delay_bufalloc', 'multi_index', 'reduce_ok'], + ['readwrite'], + casting='unsafe', + op_dtypes='f4') + assert_(i.has_delayed_bufalloc) + assert_raises(ValueError, lambda i: i.multi_index, i) + assert_raises(ValueError, lambda i: i[0], i) + assert_raises(ValueError, lambda i: i[0:2], i) + + def assign_iter(i): + i[0] = 0 + assert_raises(ValueError, assign_iter, i) + + i.reset() + assert_(not i.has_delayed_bufalloc) + assert_equal(i.multi_index, (0,)) + with i: + assert_equal(i[0], 0) + i[1] = 1 + assert_equal(i[0:2], [0, 1]) + assert_equal([[x[0][()], x[1][()]] for x in i], list(zip(range(6), [1] * 6))) + +def test_iter_buffered_cast_simple(): + # Test that buffering can handle a simple cast + + a = np.arange(10, dtype='f4') + i = nditer(a, ['buffered', 'external_loop'], + [['readwrite', 'nbo', 'aligned']], + casting='same_kind', + op_dtypes=[np.dtype('f8')], + buffersize=3) + with i: + for v in i: + v[...] *= 2 + + assert_equal(a, 2 * np.arange(10, dtype='f4')) + +def test_iter_buffered_cast_byteswapped(): + # Test that buffering can handle a cast which requires swap->cast->swap + + a = np.arange(10, dtype='f4') + a = a.view(a.dtype.newbyteorder()).byteswap() + i = nditer(a, ['buffered', 'external_loop'], + [['readwrite', 'nbo', 'aligned']], + casting='same_kind', + op_dtypes=[np.dtype('f8').newbyteorder()], + buffersize=3) + with i: + for v in i: + v[...] *= 2 + + assert_equal(a, 2 * np.arange(10, dtype='f4')) + + with warnings.catch_warnings(): + warnings.simplefilter('ignore', np.exceptions.ComplexWarning) + + a = np.arange(10, dtype='f8') + a = a.view(a.dtype.newbyteorder()).byteswap() + i = nditer(a, ['buffered', 'external_loop'], + [['readwrite', 'nbo', 'aligned']], + casting='unsafe', + op_dtypes=[np.dtype('c8').newbyteorder()], + buffersize=3) + with i: + for v in i: + v[...] *= 2 + + assert_equal(a, 2 * np.arange(10, dtype='f8')) + +def test_iter_buffered_cast_byteswapped_complex(): + # Test that buffering can handle a cast which requires swap->cast->copy + + a = np.arange(10, dtype='c8') + a = a.view(a.dtype.newbyteorder()).byteswap() + a += 2j + i = nditer(a, ['buffered', 'external_loop'], + [['readwrite', 'nbo', 'aligned']], + casting='same_kind', + op_dtypes=[np.dtype('c16')], + buffersize=3) + with i: + for v in i: + v[...] *= 2 + assert_equal(a, 2 * np.arange(10, dtype='c8') + 4j) + + a = np.arange(10, dtype='c8') + a += 2j + i = nditer(a, ['buffered', 'external_loop'], + [['readwrite', 'nbo', 'aligned']], + casting='same_kind', + op_dtypes=[np.dtype('c16').newbyteorder()], + buffersize=3) + with i: + for v in i: + v[...] *= 2 + assert_equal(a, 2 * np.arange(10, dtype='c8') + 4j) + + a = np.arange(10, dtype=np.clongdouble) + a = a.view(a.dtype.newbyteorder()).byteswap() + a += 2j + i = nditer(a, ['buffered', 'external_loop'], + [['readwrite', 'nbo', 'aligned']], + casting='same_kind', + op_dtypes=[np.dtype('c16')], + buffersize=3) + with i: + for v in i: + v[...] *= 2 + assert_equal(a, 2 * np.arange(10, dtype=np.clongdouble) + 4j) + + a = np.arange(10, dtype=np.longdouble) + a = a.view(a.dtype.newbyteorder()).byteswap() + i = nditer(a, ['buffered', 'external_loop'], + [['readwrite', 'nbo', 'aligned']], + casting='same_kind', + op_dtypes=[np.dtype('f4')], + buffersize=7) + with i: + for v in i: + v[...] *= 2 + assert_equal(a, 2 * np.arange(10, dtype=np.longdouble)) + +def test_iter_buffered_cast_structured_type(): + # Tests buffering of structured types + + # simple -> struct type (duplicates the value) + sdt = [('a', 'f4'), ('b', 'i8'), ('c', 'c8', (2, 3)), ('d', 'O')] + a = np.arange(3, dtype='f4') + 0.5 + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes=sdt) + vals = [np.array(x) for x in i] + assert_equal(vals[0]['a'], 0.5) + assert_equal(vals[0]['b'], 0) + assert_equal(vals[0]['c'], [[(0.5)] * 3] * 2) + assert_equal(vals[0]['d'], 0.5) + assert_equal(vals[1]['a'], 1.5) + assert_equal(vals[1]['b'], 1) + assert_equal(vals[1]['c'], [[(1.5)] * 3] * 2) + assert_equal(vals[1]['d'], 1.5) + assert_equal(vals[0].dtype, np.dtype(sdt)) + + # object -> struct type + sdt = [('a', 'f4'), ('b', 'i8'), ('c', 'c8', (2, 3)), ('d', 'O')] + a = np.zeros((3,), dtype='O') + a[0] = (0.5, 0.5, [[0.5, 0.5, 0.5], [0.5, 0.5, 0.5]], 0.5) + a[1] = (1.5, 1.5, [[1.5, 1.5, 1.5], [1.5, 1.5, 1.5]], 1.5) + a[2] = (2.5, 2.5, [[2.5, 2.5, 2.5], [2.5, 2.5, 2.5]], 2.5) + if HAS_REFCOUNT: + rc = sys.getrefcount(a[0]) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes=sdt) + vals = [x.copy() for x in i] + assert_equal(vals[0]['a'], 0.5) + assert_equal(vals[0]['b'], 0) + assert_equal(vals[0]['c'], [[(0.5)] * 3] * 2) + assert_equal(vals[0]['d'], 0.5) + assert_equal(vals[1]['a'], 1.5) + assert_equal(vals[1]['b'], 1) + assert_equal(vals[1]['c'], [[(1.5)] * 3] * 2) + assert_equal(vals[1]['d'], 1.5) + assert_equal(vals[0].dtype, np.dtype(sdt)) + vals, i, x = [None] * 3 + if HAS_REFCOUNT: + assert_equal(sys.getrefcount(a[0]), rc) + + # single-field struct type -> simple + sdt = [('a', 'f4')] + a = np.array([(5.5,), (8,)], dtype=sdt) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes='i4') + assert_equal([x_[()] for x_ in i], [5, 8]) + + # make sure multi-field struct type -> simple doesn't work + sdt = [('a', 'f4'), ('b', 'i8'), ('d', 'O')] + a = np.array([(5.5, 7, 'test'), (8, 10, 11)], dtype=sdt) + assert_raises(TypeError, lambda: ( + nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes='i4'))) + + # struct type -> struct type (field-wise copy) + sdt1 = [('a', 'f4'), ('b', 'i8'), ('d', 'O')] + sdt2 = [('d', 'u2'), ('a', 'O'), ('b', 'f8')] + a = np.array([(1, 2, 3), (4, 5, 6)], dtype=sdt1) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes=sdt2) + assert_equal(i[0].dtype, np.dtype(sdt2)) + assert_equal([np.array(x_) for x_ in i], + [np.array((1, 2, 3), dtype=sdt2), + np.array((4, 5, 6), dtype=sdt2)]) + + +def test_iter_buffered_cast_structured_type_failure_with_cleanup(): + # make sure struct type -> struct type with different + # number of fields fails + sdt1 = [('a', 'f4'), ('b', 'i8'), ('d', 'O')] + sdt2 = [('b', 'O'), ('a', 'f8')] + a = np.array([(1, 2, 3), (4, 5, 6)], dtype=sdt1) + + for intent in ["readwrite", "readonly", "writeonly"]: + # This test was initially designed to test an error at a different + # place, but will now raise earlier to to the cast not being possible: + # `assert np.can_cast(a.dtype, sdt2, casting="unsafe")` fails. + # Without a faulty DType, there is probably no reliable + # way to get the initial tested behaviour. + simple_arr = np.array([1, 2], dtype="i,i") # requires clean up + with pytest.raises(TypeError): + nditer((simple_arr, a), ['buffered', 'refs_ok'], [intent, intent], + casting='unsafe', op_dtypes=["f,f", sdt2]) + + +def test_buffered_cast_error_paths(): + with pytest.raises(ValueError): + # The input is cast into an `S3` buffer + np.nditer((np.array("a", dtype="S1"),), op_dtypes=["i"], + casting="unsafe", flags=["buffered"]) + + # The `M8[ns]` is cast into the `S3` output + it = np.nditer((np.array(1, dtype="i"),), op_dtypes=["S1"], + op_flags=["writeonly"], casting="unsafe", flags=["buffered"]) + with pytest.raises(ValueError): + with it: + buf = next(it) + buf[...] = "a" # cannot be converted to int. + +@pytest.mark.skipif(IS_WASM, reason="Cannot start subprocess") +@pytest.mark.skipif(not HAS_REFCOUNT, reason="PyPy seems to not hit this.") +def test_buffered_cast_error_paths_unraisable(): + # The following gives an unraisable error. Pytest sometimes captures that + # (depending python and/or pytest version). So with Python>=3.8 this can + # probably be cleaned out in the future to check for + # pytest.PytestUnraisableExceptionWarning: + code = textwrap.dedent(""" + import numpy as np + + it = np.nditer((np.array(1, dtype="i"),), op_dtypes=["S1"], + op_flags=["writeonly"], casting="unsafe", flags=["buffered"]) + buf = next(it) + buf[...] = "a" + del buf, it # Flushing only happens during deallocate right now. + """) + res = subprocess.check_output([sys.executable, "-c", code], + stderr=subprocess.STDOUT, text=True) + assert "ValueError" in res + + +def test_iter_buffered_cast_subarray(): + # Tests buffering of subarrays + + # one element -> many (copies it to all) + sdt1 = [('a', 'f4')] + sdt2 = [('a', 'f8', (3, 2, 2))] + a = np.zeros((6,), dtype=sdt1) + a['a'] = np.arange(6) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes=sdt2) + assert_equal(i[0].dtype, np.dtype(sdt2)) + for x, count in zip(i, list(range(6))): + assert_(np.all(x['a'] == count)) + + # one element -> many -> back (copies it to all) + sdt1 = [('a', 'O', (1, 1))] + sdt2 = [('a', 'O', (3, 2, 2))] + a = np.zeros((6,), dtype=sdt1) + a['a'][:, 0, 0] = np.arange(6) + i = nditer(a, ['buffered', 'refs_ok'], ['readwrite'], + casting='unsafe', + op_dtypes=sdt2) + with i: + assert_equal(i[0].dtype, np.dtype(sdt2)) + count = 0 + for x in i: + assert_(np.all(x['a'] == count)) + x['a'][0] += 2 + count += 1 + assert_equal(a['a'], np.arange(6).reshape(6, 1, 1) + 2) + + # many -> one element -> back (copies just element 0) + sdt1 = [('a', 'O', (3, 2, 2))] + sdt2 = [('a', 'O', (1,))] + a = np.zeros((6,), dtype=sdt1) + a['a'][:, 0, 0, 0] = np.arange(6) + i = nditer(a, ['buffered', 'refs_ok'], ['readwrite'], + casting='unsafe', + op_dtypes=sdt2) + with i: + assert_equal(i[0].dtype, np.dtype(sdt2)) + count = 0 + for x in i: + assert_equal(x['a'], count) + x['a'] += 2 + count += 1 + assert_equal(a['a'], np.arange(6).reshape(6, 1, 1, 1) * np.ones((1, 3, 2, 2)) + 2) + + # many -> one element -> back (copies just element 0) + sdt1 = [('a', 'f8', (3, 2, 2))] + sdt2 = [('a', 'O', (1,))] + a = np.zeros((6,), dtype=sdt1) + a['a'][:, 0, 0, 0] = np.arange(6) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes=sdt2) + assert_equal(i[0].dtype, np.dtype(sdt2)) + count = 0 + for x in i: + assert_equal(x['a'], count) + count += 1 + + # many -> one element (copies just element 0) + sdt1 = [('a', 'O', (3, 2, 2))] + sdt2 = [('a', 'f4', (1,))] + a = np.zeros((6,), dtype=sdt1) + a['a'][:, 0, 0, 0] = np.arange(6) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes=sdt2) + assert_equal(i[0].dtype, np.dtype(sdt2)) + count = 0 + for x in i: + assert_equal(x['a'], count) + count += 1 + + # many -> matching shape (straightforward copy) + sdt1 = [('a', 'O', (3, 2, 2))] + sdt2 = [('a', 'f4', (3, 2, 2))] + a = np.zeros((6,), dtype=sdt1) + a['a'] = np.arange(6 * 3 * 2 * 2).reshape(6, 3, 2, 2) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes=sdt2) + assert_equal(i[0].dtype, np.dtype(sdt2)) + count = 0 + for x in i: + assert_equal(x['a'], a[count]['a']) + count += 1 + + # vector -> smaller vector (truncates) + sdt1 = [('a', 'f8', (6,))] + sdt2 = [('a', 'f4', (2,))] + a = np.zeros((6,), dtype=sdt1) + a['a'] = np.arange(6 * 6).reshape(6, 6) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes=sdt2) + assert_equal(i[0].dtype, np.dtype(sdt2)) + count = 0 + for x in i: + assert_equal(x['a'], a[count]['a'][:2]) + count += 1 + + # vector -> bigger vector (pads with zeros) + sdt1 = [('a', 'f8', (2,))] + sdt2 = [('a', 'f4', (6,))] + a = np.zeros((6,), dtype=sdt1) + a['a'] = np.arange(6 * 2).reshape(6, 2) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes=sdt2) + assert_equal(i[0].dtype, np.dtype(sdt2)) + count = 0 + for x in i: + assert_equal(x['a'][:2], a[count]['a']) + assert_equal(x['a'][2:], [0, 0, 0, 0]) + count += 1 + + # vector -> matrix (broadcasts) + sdt1 = [('a', 'f8', (2,))] + sdt2 = [('a', 'f4', (2, 2))] + a = np.zeros((6,), dtype=sdt1) + a['a'] = np.arange(6 * 2).reshape(6, 2) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes=sdt2) + assert_equal(i[0].dtype, np.dtype(sdt2)) + count = 0 + for x in i: + assert_equal(x['a'][0], a[count]['a']) + assert_equal(x['a'][1], a[count]['a']) + count += 1 + + # vector -> matrix (broadcasts and zero-pads) + sdt1 = [('a', 'f8', (2, 1))] + sdt2 = [('a', 'f4', (3, 2))] + a = np.zeros((6,), dtype=sdt1) + a['a'] = np.arange(6 * 2).reshape(6, 2, 1) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes=sdt2) + assert_equal(i[0].dtype, np.dtype(sdt2)) + count = 0 + for x in i: + assert_equal(x['a'][:2, 0], a[count]['a'][:, 0]) + assert_equal(x['a'][:2, 1], a[count]['a'][:, 0]) + assert_equal(x['a'][2, :], [0, 0]) + count += 1 + + # matrix -> matrix (truncates and zero-pads) + sdt1 = [('a', 'f8', (2, 3))] + sdt2 = [('a', 'f4', (3, 2))] + a = np.zeros((6,), dtype=sdt1) + a['a'] = np.arange(6 * 2 * 3).reshape(6, 2, 3) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes=sdt2) + assert_equal(i[0].dtype, np.dtype(sdt2)) + count = 0 + for x in i: + assert_equal(x['a'][:2, 0], a[count]['a'][:, 0]) + assert_equal(x['a'][:2, 1], a[count]['a'][:, 1]) + assert_equal(x['a'][2, :], [0, 0]) + count += 1 + +def test_iter_buffering_badwriteback(): + # Writing back from a buffer cannot combine elements + + # a needs write buffering, but had a broadcast dimension + a = np.arange(6).reshape(2, 3, 1) + b = np.arange(12).reshape(2, 3, 2) + assert_raises(ValueError, nditer, [a, b], + ['buffered', 'external_loop'], + [['readwrite'], ['writeonly']], + order='C') + + # But if a is readonly, it's fine + nditer([a, b], ['buffered', 'external_loop'], + [['readonly'], ['writeonly']], + order='C') + + # If a has just one element, it's fine too (constant 0 stride, a reduction) + a = np.arange(1).reshape(1, 1, 1) + nditer([a, b], ['buffered', 'external_loop', 'reduce_ok'], + [['readwrite'], ['writeonly']], + order='C') + + # check that it fails on other dimensions too + a = np.arange(6).reshape(1, 3, 2) + assert_raises(ValueError, nditer, [a, b], + ['buffered', 'external_loop'], + [['readwrite'], ['writeonly']], + order='C') + a = np.arange(4).reshape(2, 1, 2) + assert_raises(ValueError, nditer, [a, b], + ['buffered', 'external_loop'], + [['readwrite'], ['writeonly']], + order='C') + +def test_iter_buffering_string(): + # Safe casting disallows shrinking strings + a = np.array(['abc', 'a', 'abcd'], dtype=np.bytes_) + assert_equal(a.dtype, np.dtype('S4')) + assert_raises(TypeError, nditer, a, ['buffered'], ['readonly'], + op_dtypes='S2') + i = nditer(a, ['buffered'], ['readonly'], op_dtypes='S6') + assert_equal(i[0], b'abc') + assert_equal(i[0].dtype, np.dtype('S6')) + + a = np.array(['abc', 'a', 'abcd'], dtype=np.str_) + assert_equal(a.dtype, np.dtype('U4')) + assert_raises(TypeError, nditer, a, ['buffered'], ['readonly'], + op_dtypes='U2') + i = nditer(a, ['buffered'], ['readonly'], op_dtypes='U6') + assert_equal(i[0], 'abc') + assert_equal(i[0].dtype, np.dtype('U6')) + +def test_iter_buffering_growinner(): + # Test that the inner loop grows when no buffering is needed + a = np.arange(30) + i = nditer(a, ['buffered', 'growinner', 'external_loop'], + buffersize=5) + # Should end up with just one inner loop here + assert_equal(i[0].size, a.size) + + +@pytest.mark.parametrize("read_or_readwrite", ["readonly", "readwrite"]) +def test_iter_contig_flag_reduce_error(read_or_readwrite): + # Test that a non-contiguous operand is rejected without buffering. + # NOTE: This is true even for a reduction, where we return a 0-stride + # below! + with pytest.raises(TypeError, match="Iterator operand required buffering"): + it = np.nditer( + (np.zeros(()),), flags=["external_loop", "reduce_ok"], + op_flags=[(read_or_readwrite, "contig"),], itershape=(10,)) + + +@pytest.mark.parametrize("arr", [ + lambda: np.zeros(()), + lambda: np.zeros((20, 1))[::20], + lambda: np.zeros((1, 20))[:, ::20] + ]) +def test_iter_contig_flag_single_operand_strides(arr): + """ + Tests the strides with the contig flag for both broadcast and non-broadcast + operands in 3 cases where the logic is needed: + 1. When everything has a zero stride, the broadcast op needs to repeated + 2. When the reduce axis is the last axis (first to iterate). + 3. When the reduce axis is the first axis (last to iterate). + + NOTE: The semantics of the cast flag are not clearly defined when + it comes to reduction. It is unclear that there are any users. + """ + first_op = np.ones((10, 10)) + broadcast_op = arr() + red_op = arr() + # Add a first operand to ensure no axis-reordering and the result shape. + iterator = np.nditer( + (first_op, broadcast_op, red_op), + flags=["external_loop", "reduce_ok", "buffered", "delay_bufalloc"], + op_flags=[("readonly", "contig")] * 2 + [("readwrite", "contig")]) + + with iterator: + iterator.reset() + for f, b, r in iterator: + # The first operand is contigouos, we should have a view + assert np.shares_memory(f, first_op) + # Although broadcast, the second op always has a contiguous stride + assert b.strides[0] == 8 + assert not np.shares_memory(b, broadcast_op) + # The reduction has a contiguous stride or a 0 stride + if red_op.ndim == 0 or red_op.shape[-1] == 1: + assert r.strides[0] == 0 + else: + # The stride is 8, although it was not originally: + assert r.strides[0] == 8 + # If the reduce stride is 0, buffering makes no difference, but we + # do it anyway right now: + assert not np.shares_memory(r, red_op) + + +@pytest.mark.xfail(reason="The contig flag was always buggy.") +def test_iter_contig_flag_incorrect(): + # This case does the wrong thing... + iterator = np.nditer( + (np.ones((10, 10)).T, np.ones((1, 10))), + flags=["external_loop", "reduce_ok", "buffered", "delay_bufalloc"], + op_flags=[("readonly", "contig")] * 2) + + with iterator: + iterator.reset() + for a, b in iterator: + # Remove a and b from locals (pytest may want to format them) + a, b = a.strides, b.strides + assert a == 8 + assert b == 8 # should be 8 but is 0 due to axis reorder + + +@pytest.mark.slow +def test_iter_buffered_reduce_reuse(): + # large enough array for all views, including negative strides. + a = np.arange(2 * 3**5)[3**5:3**5 + 1] + flags = ['buffered', 'delay_bufalloc', 'multi_index', 'reduce_ok', 'refs_ok'] + op_flags = [('readonly',), ('readwrite', 'allocate')] + op_axes_list = [[(0, 1, 2), (0, 1, -1)], [(0, 1, 2), (0, -1, -1)]] + # wrong dtype to force buffering + op_dtypes = [float, a.dtype] + + def get_params(): + for xs in range(-3**2, 3**2 + 1): + for ys in range(xs, 3**2 + 1): + for op_axes in op_axes_list: + # last stride is reduced and because of that not + # important for this test, as it is the inner stride. + strides = (xs * a.itemsize, ys * a.itemsize, a.itemsize) + arr = np.lib.stride_tricks.as_strided(a, (3, 3, 3), strides) + + for skip in [0, 1]: + yield arr, op_axes, skip + + for arr, op_axes, skip in get_params(): + nditer2 = np.nditer([arr.copy(), None], + op_axes=op_axes, flags=flags, op_flags=op_flags, + op_dtypes=op_dtypes) + with nditer2: + nditer2.operands[-1][...] = 0 + nditer2.reset() + nditer2.iterindex = skip + + for (a2_in, b2_in) in nditer2: + b2_in += a2_in.astype(np.int_) + + comp_res = nditer2.operands[-1] + + for bufsize in range(3**3): + nditer1 = np.nditer([arr, None], + op_axes=op_axes, flags=flags, op_flags=op_flags, + buffersize=bufsize, op_dtypes=op_dtypes) + with nditer1: + nditer1.operands[-1][...] = 0 + nditer1.reset() + nditer1.iterindex = skip + + for (a1_in, b1_in) in nditer1: + b1_in += a1_in.astype(np.int_) + + res = nditer1.operands[-1] + assert_array_equal(res, comp_res) + + +def test_iter_buffered_reduce_reuse_core(): + # NumPy re-uses buffers for broadcast operands (as of writing when reading). + # Test this even if the offset is manually set at some point during + # the iteration. (not a particularly tricky path) + arr = np.empty((1, 6, 4, 1)).reshape(1, 6, 4, 1)[:, ::3, ::2, :] + arr[...] = np.arange(arr.size).reshape(arr.shape) + # First and last dimension are broadcast dimensions. + arr = np.broadcast_to(arr, (100, 2, 2, 2)) + + flags = ['buffered', 'reduce_ok', 'refs_ok', 'multi_index'] + op_flags = [('readonly',)] + + buffersize = 100 # small enough to not fit the whole array + it = np.nditer(arr, flags=flags, op_flags=op_flags, buffersize=100) + + # Iterate a bit (this will cause buffering internally) + expected = [next(it) for i in range(11)] + # Now, manually advance to inside the core (the +1) + it.iterindex = 10 * (2 * 2 * 2) + 1 + result = [next(it) for i in range(10)] + + assert expected[1:] == result + + +def test_iter_no_broadcast(): + # Test that the no_broadcast flag works + a = np.arange(24).reshape(2, 3, 4) + b = np.arange(6).reshape(2, 3, 1) + c = np.arange(12).reshape(3, 4) + + nditer([a, b, c], [], + [['readonly', 'no_broadcast'], + ['readonly'], ['readonly']]) + assert_raises(ValueError, nditer, [a, b, c], [], + [['readonly'], ['readonly', 'no_broadcast'], ['readonly']]) + assert_raises(ValueError, nditer, [a, b, c], [], + [['readonly'], ['readonly'], ['readonly', 'no_broadcast']]) + + +class TestIterNested: + + def test_basic(self): + # Test nested iteration basic usage + a = arange(12).reshape(2, 3, 2) + + i, j = np.nested_iters(a, [[0], [1, 2]]) + vals = [list(j) for _ in i] + assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]]) + + i, j = np.nested_iters(a, [[0, 1], [2]]) + vals = [list(j) for _ in i] + assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]]) + + i, j = np.nested_iters(a, [[0, 2], [1]]) + vals = [list(j) for _ in i] + assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]]) + + def test_reorder(self): + # Test nested iteration basic usage + a = arange(12).reshape(2, 3, 2) + + # In 'K' order (default), it gets reordered + i, j = np.nested_iters(a, [[0], [2, 1]]) + vals = [list(j) for _ in i] + assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]]) + + i, j = np.nested_iters(a, [[1, 0], [2]]) + vals = [list(j) for _ in i] + assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]]) + + i, j = np.nested_iters(a, [[2, 0], [1]]) + vals = [list(j) for _ in i] + assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]]) + + # In 'C' order, it doesn't + i, j = np.nested_iters(a, [[0], [2, 1]], order='C') + vals = [list(j) for _ in i] + assert_equal(vals, [[0, 2, 4, 1, 3, 5], [6, 8, 10, 7, 9, 11]]) + + i, j = np.nested_iters(a, [[1, 0], [2]], order='C') + vals = [list(j) for _ in i] + assert_equal(vals, [[0, 1], [6, 7], [2, 3], [8, 9], [4, 5], [10, 11]]) + + i, j = np.nested_iters(a, [[2, 0], [1]], order='C') + vals = [list(j) for _ in i] + assert_equal(vals, [[0, 2, 4], [6, 8, 10], [1, 3, 5], [7, 9, 11]]) + + def test_flip_axes(self): + # Test nested iteration with negative axes + a = arange(12).reshape(2, 3, 2)[::-1, ::-1, ::-1] + + # In 'K' order (default), the axes all get flipped + i, j = np.nested_iters(a, [[0], [1, 2]]) + vals = [list(j) for _ in i] + assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]]) + + i, j = np.nested_iters(a, [[0, 1], [2]]) + vals = [list(j) for _ in i] + assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]]) + + i, j = np.nested_iters(a, [[0, 2], [1]]) + vals = [list(j) for _ in i] + assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]]) + + # In 'C' order, flipping axes is disabled + i, j = np.nested_iters(a, [[0], [1, 2]], order='C') + vals = [list(j) for _ in i] + assert_equal(vals, [[11, 10, 9, 8, 7, 6], [5, 4, 3, 2, 1, 0]]) + + i, j = np.nested_iters(a, [[0, 1], [2]], order='C') + vals = [list(j) for _ in i] + assert_equal(vals, [[11, 10], [9, 8], [7, 6], [5, 4], [3, 2], [1, 0]]) + + i, j = np.nested_iters(a, [[0, 2], [1]], order='C') + vals = [list(j) for _ in i] + assert_equal(vals, [[11, 9, 7], [10, 8, 6], [5, 3, 1], [4, 2, 0]]) + + def test_broadcast(self): + # Test nested iteration with broadcasting + a = arange(2).reshape(2, 1) + b = arange(3).reshape(1, 3) + + i, j = np.nested_iters([a, b], [[0], [1]]) + vals = [list(j) for _ in i] + assert_equal(vals, [[[0, 0], [0, 1], [0, 2]], [[1, 0], [1, 1], [1, 2]]]) + + i, j = np.nested_iters([a, b], [[1], [0]]) + vals = [list(j) for _ in i] + assert_equal(vals, [[[0, 0], [1, 0]], [[0, 1], [1, 1]], [[0, 2], [1, 2]]]) + + def test_dtype_copy(self): + # Test nested iteration with a copy to change dtype + + # copy + a = arange(6, dtype='i4').reshape(2, 3) + i, j = np.nested_iters(a, [[0], [1]], + op_flags=['readonly', 'copy'], + op_dtypes='f8') + assert_equal(j[0].dtype, np.dtype('f8')) + vals = [list(j) for _ in i] + assert_equal(vals, [[0, 1, 2], [3, 4, 5]]) + vals = None + + # writebackifcopy - using context manager + a = arange(6, dtype='f4').reshape(2, 3) + i, j = np.nested_iters(a, [[0], [1]], + op_flags=['readwrite', 'updateifcopy'], + casting='same_kind', + op_dtypes='f8') + with i, j: + assert_equal(j[0].dtype, np.dtype('f8')) + for x in i: + for y in j: + y[...] += 1 + assert_equal(a, [[0, 1, 2], [3, 4, 5]]) + assert_equal(a, [[1, 2, 3], [4, 5, 6]]) + + # writebackifcopy - using close() + a = arange(6, dtype='f4').reshape(2, 3) + i, j = np.nested_iters(a, [[0], [1]], + op_flags=['readwrite', 'updateifcopy'], + casting='same_kind', + op_dtypes='f8') + assert_equal(j[0].dtype, np.dtype('f8')) + for x in i: + for y in j: + y[...] += 1 + assert_equal(a, [[0, 1, 2], [3, 4, 5]]) + i.close() + j.close() + assert_equal(a, [[1, 2, 3], [4, 5, 6]]) + + def test_dtype_buffered(self): + # Test nested iteration with buffering to change dtype + + a = arange(6, dtype='f4').reshape(2, 3) + i, j = np.nested_iters(a, [[0], [1]], + flags=['buffered'], + op_flags=['readwrite'], + casting='same_kind', + op_dtypes='f8') + assert_equal(j[0].dtype, np.dtype('f8')) + for x in i: + for y in j: + y[...] += 1 + assert_equal(a, [[1, 2, 3], [4, 5, 6]]) + + def test_0d(self): + a = np.arange(12).reshape(2, 3, 2) + i, j = np.nested_iters(a, [[], [1, 0, 2]]) + vals = [list(j) for _ in i] + assert_equal(vals, [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]]) + + i, j = np.nested_iters(a, [[1, 0, 2], []]) + vals = [list(j) for _ in i] + assert_equal(vals, [[0], [1], [2], [3], [4], [5], [6], [7], [8], [9], [10], [11]]) + + i, j, k = np.nested_iters(a, [[2, 0], [], [1]]) + vals = [] + for x in i: + for y in j: + vals.append(list(k)) + assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]]) + + def test_iter_nested_iters_dtype_buffered(self): + # Test nested iteration with buffering to change dtype + + a = arange(6, dtype='f4').reshape(2, 3) + i, j = np.nested_iters(a, [[0], [1]], + flags=['buffered'], + op_flags=['readwrite'], + casting='same_kind', + op_dtypes='f8') + with i, j: + assert_equal(j[0].dtype, np.dtype('f8')) + for x in i: + for y in j: + y[...] += 1 + assert_equal(a, [[1, 2, 3], [4, 5, 6]]) + +def test_iter_reduction_error(): + + a = np.arange(6) + assert_raises(ValueError, nditer, [a, None], [], + [['readonly'], ['readwrite', 'allocate']], + op_axes=[[0], [-1]]) + + a = np.arange(6).reshape(2, 3) + assert_raises(ValueError, nditer, [a, None], ['external_loop'], + [['readonly'], ['readwrite', 'allocate']], + op_axes=[[0, 1], [-1, -1]]) + +def test_iter_reduction(): + # Test doing reductions with the iterator + + a = np.arange(6) + i = nditer([a, None], ['reduce_ok'], + [['readonly'], ['readwrite', 'allocate']], + op_axes=[[0], [-1]]) + # Need to initialize the output operand to the addition unit + with i: + i.operands[1][...] = 0 + # Do the reduction + for x, y in i: + y[...] += x + # Since no axes were specified, should have allocated a scalar + assert_equal(i.operands[1].ndim, 0) + assert_equal(i.operands[1], np.sum(a)) + + a = np.arange(6).reshape(2, 3) + i = nditer([a, None], ['reduce_ok', 'external_loop'], + [['readonly'], ['readwrite', 'allocate']], + op_axes=[[0, 1], [-1, -1]]) + # Need to initialize the output operand to the addition unit + with i: + i.operands[1][...] = 0 + # Reduction shape/strides for the output + assert_equal(i[1].shape, (6,)) + assert_equal(i[1].strides, (0,)) + # Do the reduction + for x, y in i: + # Use a for loop instead of ``y[...] += x`` + # (equivalent to ``y[...] = y[...].copy() + x``), + # because y has zero strides we use for the reduction + for j in range(len(y)): + y[j] += x[j] + # Since no axes were specified, should have allocated a scalar + assert_equal(i.operands[1].ndim, 0) + assert_equal(i.operands[1], np.sum(a)) + + # This is a tricky reduction case for the buffering double loop + # to handle + a = np.ones((2, 3, 5)) + it1 = nditer([a, None], ['reduce_ok', 'external_loop'], + [['readonly'], ['readwrite', 'allocate']], + op_axes=[None, [0, -1, 1]]) + it2 = nditer([a, None], ['reduce_ok', 'external_loop', + 'buffered', 'delay_bufalloc'], + [['readonly'], ['readwrite', 'allocate']], + op_axes=[None, [0, -1, 1]], buffersize=10) + with it1, it2: + it1.operands[1].fill(0) + it2.operands[1].fill(0) + it2.reset() + for x in it1: + x[1][...] += x[0] + for x in it2: + x[1][...] += x[0] + assert_equal(it1.operands[1], it2.operands[1]) + assert_equal(it2.operands[1].sum(), a.size) + +def test_iter_buffering_reduction(): + # Test doing buffered reductions with the iterator + + a = np.arange(6) + b = np.array(0., dtype='f8').byteswap() + b = b.view(b.dtype.newbyteorder()) + i = nditer([a, b], ['reduce_ok', 'buffered'], + [['readonly'], ['readwrite', 'nbo']], + op_axes=[[0], [-1]]) + with i: + assert_equal(i[1].dtype, np.dtype('f8')) + assert_(i[1].dtype != b.dtype) + # Do the reduction + for x, y in i: + y[...] += x + # Since no axes were specified, should have allocated a scalar + assert_equal(b, np.sum(a)) + + a = np.arange(6).reshape(2, 3) + b = np.array([0, 0], dtype='f8').byteswap() + b = b.view(b.dtype.newbyteorder()) + i = nditer([a, b], ['reduce_ok', 'external_loop', 'buffered'], + [['readonly'], ['readwrite', 'nbo']], + op_axes=[[0, 1], [0, -1]]) + # Reduction shape/strides for the output + with i: + assert_equal(i[1].shape, (3,)) + assert_equal(i[1].strides, (0,)) + # Do the reduction + for x, y in i: + # Use a for loop instead of ``y[...] += x`` + # (equivalent to ``y[...] = y[...].copy() + x``), + # because y has zero strides we use for the reduction + for j in range(len(y)): + y[j] += x[j] + assert_equal(b, np.sum(a, axis=1)) + + # Iterator inner double loop was wrong on this one + p = np.arange(2) + 1 + it = np.nditer([p, None], + ['delay_bufalloc', 'reduce_ok', 'buffered', 'external_loop'], + [['readonly'], ['readwrite', 'allocate']], + op_axes=[[-1, 0], [-1, -1]], + itershape=(2, 2)) + with it: + it.operands[1].fill(0) + it.reset() + assert_equal(it[0], [1, 2, 1, 2]) + + # Iterator inner loop should take argument contiguity into account + x = np.ones((7, 13, 8), np.int8)[4:6, 1:11:6, 1:5].transpose(1, 2, 0) + x[...] = np.arange(x.size).reshape(x.shape) + y_base = np.arange(4 * 4, dtype=np.int8).reshape(4, 4) + y_base_copy = y_base.copy() + y = y_base[::2, :, None] + + it = np.nditer([y, x], + ['buffered', 'external_loop', 'reduce_ok'], + [['readwrite'], ['readonly']]) + with it: + for a, b in it: + a.fill(2) + + assert_equal(y_base[1::2], y_base_copy[1::2]) + assert_equal(y_base[::2], 2) + +def test_iter_buffering_reduction_reuse_reduce_loops(): + # There was a bug triggering reuse of the reduce loop inappropriately, + # which caused processing to happen in unnecessarily small chunks + # and overran the buffer. + + a = np.zeros((2, 7)) + b = np.zeros((1, 7)) + it = np.nditer([a, b], flags=['reduce_ok', 'external_loop', 'buffered'], + op_flags=[['readonly'], ['readwrite']], + buffersize=5) + + with it: + bufsizes = [x.shape[0] for x, y in it] + assert_equal(bufsizes, [5, 2, 5, 2]) + assert_equal(sum(bufsizes), a.size) + +def test_iter_writemasked_badinput(): + a = np.zeros((2, 3)) + b = np.zeros((3,)) + m = np.array([[True, True, False], [False, True, False]]) + m2 = np.array([True, True, False]) + m3 = np.array([0, 1, 1], dtype='u1') + mbad1 = np.array([0, 1, 1], dtype='i1') + mbad2 = np.array([0, 1, 1], dtype='f4') + + # Need an 'arraymask' if any operand is 'writemasked' + assert_raises(ValueError, nditer, [a, m], [], + [['readwrite', 'writemasked'], ['readonly']]) + + # A 'writemasked' operand must not be readonly + assert_raises(ValueError, nditer, [a, m], [], + [['readonly', 'writemasked'], ['readonly', 'arraymask']]) + + # 'writemasked' and 'arraymask' may not be used together + assert_raises(ValueError, nditer, [a, m], [], + [['readonly'], ['readwrite', 'arraymask', 'writemasked']]) + + # 'arraymask' may only be specified once + assert_raises(ValueError, nditer, [a, m, m2], [], + [['readwrite', 'writemasked'], + ['readonly', 'arraymask'], + ['readonly', 'arraymask']]) + + # An 'arraymask' with nothing 'writemasked' also doesn't make sense + assert_raises(ValueError, nditer, [a, m], [], + [['readwrite'], ['readonly', 'arraymask']]) + + # A writemasked reduction requires a similarly smaller mask + assert_raises(ValueError, nditer, [a, b, m], ['reduce_ok'], + [['readonly'], + ['readwrite', 'writemasked'], + ['readonly', 'arraymask']]) + # But this should work with a smaller/equal mask to the reduction operand + np.nditer([a, b, m2], ['reduce_ok'], + [['readonly'], + ['readwrite', 'writemasked'], + ['readonly', 'arraymask']]) + # The arraymask itself cannot be a reduction + assert_raises(ValueError, nditer, [a, b, m2], ['reduce_ok'], + [['readonly'], + ['readwrite', 'writemasked'], + ['readwrite', 'arraymask']]) + + # A uint8 mask is ok too + np.nditer([a, m3], ['buffered'], + [['readwrite', 'writemasked'], + ['readonly', 'arraymask']], + op_dtypes=['f4', None], + casting='same_kind') + # An int8 mask isn't ok + assert_raises(TypeError, np.nditer, [a, mbad1], ['buffered'], + [['readwrite', 'writemasked'], + ['readonly', 'arraymask']], + op_dtypes=['f4', None], + casting='same_kind') + # A float32 mask isn't ok + assert_raises(TypeError, np.nditer, [a, mbad2], ['buffered'], + [['readwrite', 'writemasked'], + ['readonly', 'arraymask']], + op_dtypes=['f4', None], + casting='same_kind') + + +def _is_buffered(iterator): + try: + iterator.itviews + except ValueError: + return True + return False + +@pytest.mark.parametrize("arrs", + [np.zeros((3,), dtype='f8'), + np.zeros((9876, 3 * 5), dtype='f8')[::2, :], + np.zeros((4, 312, 124, 3), dtype='f8')[::2, :, ::2, :], + # Also test with the last dimension strided (so it does not fit if + # there is repeated access) + np.zeros((9,), dtype='f8')[::3], + np.zeros((9876, 3 * 10), dtype='f8')[::2, ::5], + np.zeros((4, 312, 124, 3), dtype='f8')[::2, :, ::2, ::-1]]) +def test_iter_writemasked(arrs): + # Note, the slicing above is to ensure that nditer cannot combine multiple + # axes into one. The repetition is just to make things a bit more + # interesting. + a = arrs.copy() + shape = a.shape + reps = shape[-1] // 3 + msk = np.empty(shape, dtype=bool) + msk[...] = [True, True, False] * reps + + # When buffering is unused, 'writemasked' effectively does nothing. + # It's up to the user of the iterator to obey the requested semantics. + it = np.nditer([a, msk], [], + [['readwrite', 'writemasked'], + ['readonly', 'arraymask']]) + with it: + for x, m in it: + x[...] = 1 + # Because we violated the semantics, all the values became 1 + assert_equal(a, np.broadcast_to([1, 1, 1] * reps, shape)) + + # Even if buffering is enabled, we still may be accessing the array + # directly. + it = np.nditer([a, msk], ['buffered'], + [['readwrite', 'writemasked'], + ['readonly', 'arraymask']]) + # @seberg: I honestly don't currently understand why a "buffered" iterator + # would end up not using a buffer for the small array here at least when + # "writemasked" is used, that seems confusing... Check by testing for + # actual memory overlap! + is_buffered = True + with it: + for x, m in it: + x[...] = 2.5 + if np.may_share_memory(x, a): + is_buffered = False + + if not is_buffered: + # Because we violated the semantics, all the values became 2.5 + assert_equal(a, np.broadcast_to([2.5, 2.5, 2.5] * reps, shape)) + else: + # For large sizes, the iterator may be buffered: + assert_equal(a, np.broadcast_to([2.5, 2.5, 1] * reps, shape)) + a[...] = 2.5 + + # If buffering will definitely happening, for instance because of + # a cast, only the items selected by the mask will be copied back from + # the buffer. + it = np.nditer([a, msk], ['buffered'], + [['readwrite', 'writemasked'], + ['readonly', 'arraymask']], + op_dtypes=['i8', None], + casting='unsafe') + with it: + for x, m in it: + x[...] = 3 + # Even though we violated the semantics, only the selected values + # were copied back + assert_equal(a, np.broadcast_to([3, 3, 2.5] * reps, shape)) + + +@pytest.mark.parametrize(["mask", "mask_axes"], [ + # Allocated operand (only broadcasts with -1) + (None, [-1, 0]), + # Reduction along the first dimension (with and without op_axes) + (np.zeros((1, 4), dtype="bool"), [0, 1]), + (np.zeros((1, 4), dtype="bool"), None), + # Test 0-D and -1 op_axes + (np.zeros(4, dtype="bool"), [-1, 0]), + (np.zeros((), dtype="bool"), [-1, -1]), + (np.zeros((), dtype="bool"), None)]) +def test_iter_writemasked_broadcast_error(mask, mask_axes): + # This assumes that a readwrite mask makes sense. This is likely not the + # case and should simply be deprecated. + arr = np.zeros((3, 4)) + itflags = ["reduce_ok"] + mask_flags = ["arraymask", "readwrite", "allocate"] + a_flags = ["writeonly", "writemasked"] + if mask_axes is None: + op_axes = None + else: + op_axes = [mask_axes, [0, 1]] + + with assert_raises(ValueError): + np.nditer((mask, arr), flags=itflags, op_flags=[mask_flags, a_flags], + op_axes=op_axes) + + +def test_iter_writemasked_decref(): + # force casting (to make it interesting) by using a structured dtype. + arr = np.arange(10000).astype(">i,O") + original = arr.copy() + mask = np.random.randint(0, 2, size=10000).astype(bool) + + it = np.nditer([arr, mask], ['buffered', "refs_ok"], + [['readwrite', 'writemasked'], + ['readonly', 'arraymask']], + op_dtypes=[" string -> longdouble` for the + # conversion. But Python may refuse `str(int)` for huge ints. + # In that case, RuntimeWarning would be correct, but conversion + # fails earlier (seems to happen on 32bit linux, possibly only debug). + if dtype in "gG": + try: + str(too_big_int) + except ValueError: + pytest.skip("`huge_int -> string -> longdouble` failed") + + # Otherwise, we overflow to infinity: + with pytest.warns(RuntimeWarning): + res = scalar_type(1) + too_big_int + assert res.dtype == dtype + assert res == np.inf + + with pytest.warns(RuntimeWarning): + # We force the dtype here, since windows may otherwise pick the + # double instead of the longdouble loop. That leads to slightly + # different results (conversion of the int fails as above). + res = np.add(np.array(1, dtype=dtype), too_big_int, dtype=dtype) + assert res.dtype == dtype + assert res == np.inf + + +@pytest.mark.parametrize("op", [operator.add, operator.pow]) +def test_weak_promotion_scalar_path(op): + # Some additional paths exercising the weak scalars. + + # Integer path: + res = op(np.uint8(3), 5) + assert res == op(3, 5) + assert res.dtype == np.uint8 or res.dtype == bool # noqa: PLR1714 + + with pytest.raises(OverflowError): + op(np.uint8(3), 1000) + + # Float path: + res = op(np.float32(3), 5.) + assert res == op(3., 5.) + assert res.dtype == np.float32 or res.dtype == bool # noqa: PLR1714 + + +def test_nep50_complex_promotion(): + with pytest.warns(RuntimeWarning, match=".*overflow"): + res = np.complex64(3) + complex(2**300) + + assert type(res) == np.complex64 + + +def test_nep50_integer_conversion_errors(): + # Implementation for error paths is mostly missing (as of writing) + with pytest.raises(OverflowError, match=".*uint8"): + np.array([1], np.uint8) + 300 + + with pytest.raises(OverflowError, match=".*uint8"): + np.uint8(1) + 300 + + # Error message depends on platform (maybe unsigned int or unsigned long) + with pytest.raises(OverflowError, + match="Python integer -1 out of bounds for uint8"): + np.uint8(1) + -1 + + +def test_nep50_with_axisconcatenator(): + # Concatenate/r_ does not promote, so this has to error: + with pytest.raises(OverflowError): + np.r_[np.arange(5, dtype=np.int8), 255] + + +@pytest.mark.parametrize("ufunc", [np.add, np.power]) +def test_nep50_huge_integers(ufunc): + # Very large integers are complicated, because they go to uint64 or + # object dtype. This tests covers a few possible paths. + with pytest.raises(OverflowError): + ufunc(np.int64(0), 2**63) # 2**63 too large for int64 + + with pytest.raises(OverflowError): + ufunc(np.uint64(0), 2**64) # 2**64 cannot be represented by uint64 + + # However, 2**63 can be represented by the uint64 (and that is used): + res = ufunc(np.uint64(1), 2**63) + + assert res.dtype == np.uint64 + assert res == ufunc(1, 2**63, dtype=object) + + # The following paths fail to warn correctly about the change: + with pytest.raises(OverflowError): + ufunc(np.int64(1), 2**63) # np.array(2**63) would go to uint + + with pytest.raises(OverflowError): + ufunc(np.int64(1), 2**100) # np.array(2**100) would go to object + + # This would go to object and thus a Python float, not a NumPy one: + res = ufunc(1.0, 2**100) + assert isinstance(res, np.float64) + + +def test_nep50_in_concat_and_choose(): + res = np.concatenate([np.float32(1), 1.], axis=None) + assert res.dtype == "float32" + + res = np.choose(1, [np.float32(1), 1.]) + assert res.dtype == "float32" + + +@pytest.mark.parametrize("expected,dtypes,optional_dtypes", [ + (np.float32, [np.float32], + [np.float16, 0.0, np.uint16, np.int16, np.int8, 0]), + (np.complex64, [np.float32, 0j], + [np.float16, 0.0, np.uint16, np.int16, np.int8, 0]), + (np.float32, [np.int16, np.uint16, np.float16], + [np.int8, np.uint8, np.float32, 0., 0]), + (np.int32, [np.int16, np.uint16], + [np.int8, np.uint8, 0, np.bool]), + ]) +@hypothesis.given(data=strategies.data()) +def test_expected_promotion(expected, dtypes, optional_dtypes, data): + # Sample randomly while ensuring "dtypes" is always present: + optional = data.draw(strategies.lists( + strategies.sampled_from(dtypes + optional_dtypes))) + all_dtypes = dtypes + optional + dtypes_sample = data.draw(strategies.permutations(all_dtypes)) + + res = np.result_type(*dtypes_sample) + assert res == expected + + +@pytest.mark.parametrize("sctype", + [np.int8, np.int16, np.int32, np.int64, + np.uint8, np.uint16, np.uint32, np.uint64]) +@pytest.mark.parametrize("other_val", + [-2 * 100, -1, 0, 9, 10, 11, 2**63, 2 * 100]) +@pytest.mark.parametrize("comp", + [operator.eq, operator.ne, operator.le, operator.lt, + operator.ge, operator.gt]) +def test_integer_comparison(sctype, other_val, comp): + # Test that comparisons with integers (especially out-of-bound) ones + # works correctly. + val_obj = 10 + val = sctype(val_obj) + # Check that the scalar behaves the same as the python int: + assert comp(10, other_val) == comp(val, other_val) + assert comp(val, other_val) == comp(10, other_val) + # Except for the result type: + assert type(comp(val, other_val)) is np.bool + + # Check that the integer array and object array behave the same: + val_obj = np.array([10, 10], dtype=object) + val = val_obj.astype(sctype) + assert_array_equal(comp(val_obj, other_val), comp(val, other_val)) + assert_array_equal(comp(other_val, val_obj), comp(other_val, val)) + + +@pytest.mark.parametrize("arr", [ + np.ones((100, 100), dtype=np.uint8)[::2], # not trivially iterable + np.ones(20000, dtype=">u4"), # cast and >buffersize + np.ones(100, dtype=">u4"), # fast path compatible with cast +]) +def test_integer_comparison_with_cast(arr): + # Similar to above, but mainly test a few cases that cover the slow path + # the test is limited to unsigned ints and -1 for simplicity. + res = arr >= -1 + assert_array_equal(res, np.ones_like(arr, dtype=bool)) + res = arr < -1 + assert_array_equal(res, np.zeros_like(arr, dtype=bool)) + + +@pytest.mark.parametrize("comp", + [np.equal, np.not_equal, np.less_equal, np.less, + np.greater_equal, np.greater]) +def test_integer_integer_comparison(comp): + # Test that the NumPy comparison ufuncs work with large Python integers + assert comp(2**200, -2**200) == comp(2**200, -2**200, dtype=object) + + +def create_with_scalar(sctype, value): + return sctype(value) + + +def create_with_array(sctype, value): + return np.array([value], dtype=sctype) + + +@pytest.mark.parametrize("sctype", + [np.int8, np.int16, np.int32, np.int64, + np.uint8, np.uint16, np.uint32, np.uint64]) +@pytest.mark.parametrize("create", [create_with_scalar, create_with_array]) +def test_oob_creation(sctype, create): + iinfo = np.iinfo(sctype) + + with pytest.raises(OverflowError): + create(sctype, iinfo.min - 1) + + with pytest.raises(OverflowError): + create(sctype, iinfo.max + 1) + + with pytest.raises(OverflowError): + create(sctype, str(iinfo.min - 1)) + + with pytest.raises(OverflowError): + create(sctype, str(iinfo.max + 1)) + + assert create(sctype, iinfo.min) == iinfo.min + assert create(sctype, iinfo.max) == iinfo.max diff --git a/local_packages/numpy/_core/tests/test_numeric.py b/local_packages/numpy/_core/tests/test_numeric.py new file mode 100644 index 0000000..1047f4c --- /dev/null +++ b/local_packages/numpy/_core/tests/test_numeric.py @@ -0,0 +1,4301 @@ +import inspect +import itertools +import math +import platform +import sys +import warnings +from decimal import Decimal + +import pytest +from hypothesis import given, strategies as st +from hypothesis.extra import numpy as hynp + +import numpy as np +from numpy import ma +from numpy._core import sctypes +from numpy._core._rational_tests import rational +from numpy._core.numerictypes import obj2sctype +from numpy.exceptions import AxisError +from numpy.random import rand, randint, randn +from numpy.testing import ( + HAS_REFCOUNT, + IS_PYPY, + IS_WASM, + assert_, + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, + assert_array_max_ulp, + assert_equal, + assert_raises, + assert_raises_regex, +) + + +class TestResize: + def test_copies(self): + A = np.array([[1, 2], [3, 4]]) + Ar1 = np.array([[1, 2, 3, 4], [1, 2, 3, 4]]) + assert_equal(np.resize(A, (2, 4)), Ar1) + + Ar2 = np.array([[1, 2], [3, 4], [1, 2], [3, 4]]) + assert_equal(np.resize(A, (4, 2)), Ar2) + + Ar3 = np.array([[1, 2, 3], [4, 1, 2], [3, 4, 1], [2, 3, 4]]) + assert_equal(np.resize(A, (4, 3)), Ar3) + + def test_repeats(self): + A = np.array([1, 2, 3]) + Ar1 = np.array([[1, 2, 3, 1], [2, 3, 1, 2]]) + assert_equal(np.resize(A, (2, 4)), Ar1) + + Ar2 = np.array([[1, 2], [3, 1], [2, 3], [1, 2]]) + assert_equal(np.resize(A, (4, 2)), Ar2) + + Ar3 = np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3], [1, 2, 3]]) + assert_equal(np.resize(A, (4, 3)), Ar3) + + def test_zeroresize(self): + A = np.array([[1, 2], [3, 4]]) + Ar = np.resize(A, (0,)) + assert_array_equal(Ar, np.array([])) + assert_equal(A.dtype, Ar.dtype) + + Ar = np.resize(A, (0, 2)) + assert_equal(Ar.shape, (0, 2)) + + Ar = np.resize(A, (2, 0)) + assert_equal(Ar.shape, (2, 0)) + + def test_reshape_from_zero(self): + # See also gh-6740 + A = np.zeros(0, dtype=[('a', np.float32)]) + Ar = np.resize(A, (2, 1)) + assert_array_equal(Ar, np.zeros((2, 1), Ar.dtype)) + assert_equal(A.dtype, Ar.dtype) + + def test_negative_resize(self): + A = np.arange(0, 10, dtype=np.float32) + new_shape = (-10, -1) + with pytest.raises(ValueError, match=r"negative"): + np.resize(A, new_shape=new_shape) + + def test_unsigned_resize(self): + # ensure unsigned integer sizes don't lead to underflows + for dt_pair in [(np.int32, np.uint32), (np.int64, np.uint64)]: + arr = np.array([[23, 95], [66, 37]]) + assert_array_equal(np.resize(arr, dt_pair[0](1)), + np.resize(arr, dt_pair[1](1))) + + def test_subclass(self): + class MyArray(np.ndarray): + __array_priority__ = 1. + + my_arr = np.array([1]).view(MyArray) + assert type(np.resize(my_arr, 5)) is MyArray + assert type(np.resize(my_arr, 0)) is MyArray + + my_arr = np.array([]).view(MyArray) + assert type(np.resize(my_arr, 5)) is MyArray + + +class TestNonarrayArgs: + # check that non-array arguments to functions wrap them in arrays + def test_choose(self): + choices = [[0, 1, 2], + [3, 4, 5], + [5, 6, 7]] + tgt = [5, 1, 5] + a = [2, 0, 1] + + out = np.choose(a, choices) + assert_equal(out, tgt) + + def test_clip(self): + arr = [-1, 5, 2, 3, 10, -4, -9] + out = np.clip(arr, 2, 7) + tgt = [2, 5, 2, 3, 7, 2, 2] + assert_equal(out, tgt) + + def test_compress(self): + arr = [[0, 1, 2, 3, 4], + [5, 6, 7, 8, 9]] + tgt = [[5, 6, 7, 8, 9]] + out = np.compress([0, 1], arr, axis=0) + assert_equal(out, tgt) + + def test_count_nonzero(self): + arr = [[0, 1, 7, 0, 0], + [3, 0, 0, 2, 19]] + tgt = np.array([2, 3]) + out = np.count_nonzero(arr, axis=1) + assert_equal(out, tgt) + + def test_diagonal(self): + a = [[0, 1, 2, 3], + [4, 5, 6, 7], + [8, 9, 10, 11]] + out = np.diagonal(a) + tgt = [0, 5, 10] + + assert_equal(out, tgt) + + def test_mean(self): + A = [[1, 2, 3], [4, 5, 6]] + assert_(np.mean(A) == 3.5) + assert_(np.all(np.mean(A, 0) == np.array([2.5, 3.5, 4.5]))) + assert_(np.all(np.mean(A, 1) == np.array([2., 5.]))) + + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', RuntimeWarning) + assert_(np.isnan(np.mean([]))) + assert_(w[0].category is RuntimeWarning) + + def test_ptp(self): + a = [3, 4, 5, 10, -3, -5, 6.0] + assert_equal(np.ptp(a, axis=0), 15.0) + + def test_prod(self): + arr = [[1, 2, 3, 4], + [5, 6, 7, 9], + [10, 3, 4, 5]] + tgt = [24, 1890, 600] + + assert_equal(np.prod(arr, axis=-1), tgt) + + def test_ravel(self): + a = [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]] + tgt = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12] + assert_equal(np.ravel(a), tgt) + + def test_repeat(self): + a = [1, 2, 3] + tgt = [1, 1, 2, 2, 3, 3] + + out = np.repeat(a, 2) + assert_equal(out, tgt) + + def test_reshape(self): + arr = [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]] + tgt = [[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]] + assert_equal(np.reshape(arr, (2, 6)), tgt) + + def test_reshape_shape_arg(self): + arr = np.arange(12) + shape = (3, 4) + expected = arr.reshape(shape) + + with pytest.raises( + TypeError, + match=r"reshape\(\) missing 1 required positional " + "argument: 'shape'" + ): + np.reshape(arr) + + assert_equal(np.reshape(arr, shape), expected) + assert_equal(np.reshape(arr, shape, order="C"), expected) + assert_equal(np.reshape(arr, shape, "C"), expected) + assert_equal(np.reshape(arr, shape=shape), expected) + assert_equal(np.reshape(arr, shape=shape, order="C"), expected) + + def test_reshape_copy_arg(self): + arr = np.arange(24).reshape(2, 3, 4) + arr_f_ord = np.array(arr, order="F") + shape = (12, 2) + + assert np.shares_memory(np.reshape(arr, shape), arr) + assert np.shares_memory(np.reshape(arr, shape, order="C"), arr) + assert np.shares_memory( + np.reshape(arr_f_ord, shape, order="F"), arr_f_ord) + assert np.shares_memory(np.reshape(arr, shape, copy=None), arr) + assert np.shares_memory(np.reshape(arr, shape, copy=False), arr) + assert np.shares_memory(arr.reshape(shape, copy=False), arr) + assert not np.shares_memory(np.reshape(arr, shape, copy=True), arr) + assert not np.shares_memory( + np.reshape(arr, shape, order="C", copy=True), arr) + assert not np.shares_memory( + np.reshape(arr, shape, order="F", copy=True), arr) + assert not np.shares_memory( + np.reshape(arr, shape, order="F", copy=None), arr) + + err_msg = "Unable to avoid creating a copy while reshaping." + with pytest.raises(ValueError, match=err_msg): + np.reshape(arr, shape, order="F", copy=False) + with pytest.raises(ValueError, match=err_msg): + np.reshape(arr_f_ord, shape, order="C", copy=False) + + def test_round(self): + arr = [1.56, 72.54, 6.35, 3.25] + tgt = [1.6, 72.5, 6.4, 3.2] + assert_equal(np.around(arr, decimals=1), tgt) + s = np.float64(1.) + assert_(isinstance(s.round(), np.float64)) + assert_equal(s.round(), 1.) + + @pytest.mark.parametrize('dtype', [ + np.int8, np.int16, np.int32, np.int64, + np.uint8, np.uint16, np.uint32, np.uint64, + np.float16, np.float32, np.float64, + ]) + def test_dunder_round(self, dtype): + s = dtype(1) + assert_(isinstance(round(s), int)) + assert_(isinstance(round(s, None), int)) + assert_(isinstance(round(s, ndigits=None), int)) + assert_equal(round(s), 1) + assert_equal(round(s, None), 1) + assert_equal(round(s, ndigits=None), 1) + + @pytest.mark.parametrize('val, ndigits', [ + pytest.param(2**31 - 1, -1, + marks=pytest.mark.skip(reason="Out of range of int32") + ), + (2**31 - 1, 1 - math.ceil(math.log10(2**31 - 1))), + (2**31 - 1, -math.ceil(math.log10(2**31 - 1))) + ]) + def test_dunder_round_edgecases(self, val, ndigits): + assert_equal(round(val, ndigits), round(np.int32(val), ndigits)) + + def test_dunder_round_accuracy(self): + f = np.float64(5.1 * 10**73) + assert_(isinstance(round(f, -73), np.float64)) + assert_array_max_ulp(round(f, -73), 5.0 * 10**73) + assert_(isinstance(round(f, ndigits=-73), np.float64)) + assert_array_max_ulp(round(f, ndigits=-73), 5.0 * 10**73) + + i = np.int64(501) + assert_(isinstance(round(i, -2), np.int64)) + assert_array_max_ulp(round(i, -2), 500) + assert_(isinstance(round(i, ndigits=-2), np.int64)) + assert_array_max_ulp(round(i, ndigits=-2), 500) + + @pytest.mark.xfail(raises=AssertionError, reason="gh-15896") + def test_round_py_consistency(self): + f = 5.1 * 10**73 + assert_equal(round(np.float64(f), -73), round(f, -73)) + + def test_searchsorted(self): + arr = [-8, -5, -1, 3, 6, 10] + out = np.searchsorted(arr, 0) + assert_equal(out, 3) + + def test_size(self): + A = [[1, 2, 3], [4, 5, 6]] + assert_(np.size(A) == 6) + assert_(np.size(A, 0) == 2) + assert_(np.size(A, 1) == 3) + assert_(np.size(A, ()) == 1) + assert_(np.size(A, (0,)) == 2) + assert_(np.size(A, (1,)) == 3) + assert_(np.size(A, (0, 1)) == 6) + + def test_squeeze(self): + A = [[[1, 1, 1], [2, 2, 2], [3, 3, 3]]] + assert_equal(np.squeeze(A).shape, (3, 3)) + assert_equal(np.squeeze(np.zeros((1, 3, 1))).shape, (3,)) + assert_equal(np.squeeze(np.zeros((1, 3, 1)), axis=0).shape, (3, 1)) + assert_equal(np.squeeze(np.zeros((1, 3, 1)), axis=-1).shape, (1, 3)) + assert_equal(np.squeeze(np.zeros((1, 3, 1)), axis=2).shape, (1, 3)) + assert_equal(np.squeeze([np.zeros((3, 1))]).shape, (3,)) + assert_equal(np.squeeze([np.zeros((3, 1))], axis=0).shape, (3, 1)) + assert_equal(np.squeeze([np.zeros((3, 1))], axis=2).shape, (1, 3)) + assert_equal(np.squeeze([np.zeros((3, 1))], axis=-1).shape, (1, 3)) + + def test_std(self): + A = [[1, 2, 3], [4, 5, 6]] + assert_almost_equal(np.std(A), 1.707825127659933) + assert_almost_equal(np.std(A, 0), np.array([1.5, 1.5, 1.5])) + assert_almost_equal(np.std(A, 1), np.array([0.81649658, 0.81649658])) + + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', RuntimeWarning) + assert_(np.isnan(np.std([]))) + assert_(w[0].category is RuntimeWarning) + + def test_swapaxes(self): + tgt = [[[0, 4], [2, 6]], [[1, 5], [3, 7]]] + a = [[[0, 1], [2, 3]], [[4, 5], [6, 7]]] + out = np.swapaxes(a, 0, 2) + assert_equal(out, tgt) + + def test_sum(self): + m = [[1, 2, 3], + [4, 5, 6], + [7, 8, 9]] + tgt = [[6], [15], [24]] + out = np.sum(m, axis=1, keepdims=True) + + assert_equal(tgt, out) + + def test_take(self): + tgt = [2, 3, 5] + indices = [1, 2, 4] + a = [1, 2, 3, 4, 5] + + out = np.take(a, indices) + assert_equal(out, tgt) + + pairs = [ + (np.int32, np.int32), (np.int32, np.int64), + (np.int64, np.int32), (np.int64, np.int64) + ] + for array_type, indices_type in pairs: + x = np.array([1, 2, 3, 4, 5], dtype=array_type) + ind = np.array([0, 2, 2, 3], dtype=indices_type) + tgt = np.array([1, 3, 3, 4], dtype=array_type) + out = np.take(x, ind) + assert_equal(out, tgt) + assert_equal(out.dtype, tgt.dtype) + + def test_trace(self): + c = [[1, 2], [3, 4], [5, 6]] + assert_equal(np.trace(c), 5) + + def test_transpose(self): + arr = [[1, 2], [3, 4], [5, 6]] + tgt = [[1, 3, 5], [2, 4, 6]] + assert_equal(np.transpose(arr, (1, 0)), tgt) + assert_equal(np.transpose(arr, (-1, -2)), tgt) + assert_equal(np.matrix_transpose(arr), tgt) + + def test_var(self): + A = [[1, 2, 3], [4, 5, 6]] + assert_almost_equal(np.var(A), 2.9166666666666665) + assert_almost_equal(np.var(A, 0), np.array([2.25, 2.25, 2.25])) + assert_almost_equal(np.var(A, 1), np.array([0.66666667, 0.66666667])) + + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', RuntimeWarning) + assert_(np.isnan(np.var([]))) + assert_(w[0].category is RuntimeWarning) + + B = np.array([None, 0]) + B[0] = 1j + assert_almost_equal(np.var(B), 0.25) + + def test_std_with_mean_keyword(self): + # Setting the seed to make the test reproducible + rng = np.random.RandomState(1234) + A = rng.randn(10, 20, 5) + 0.5 + + mean_out = np.zeros((10, 1, 5)) + std_out = np.zeros((10, 1, 5)) + + mean = np.mean(A, + out=mean_out, + axis=1, + keepdims=True) + + # The returned object should be the object specified during calling + assert mean_out is mean + + std = np.std(A, + out=std_out, + axis=1, + keepdims=True, + mean=mean) + + # The returned object should be the object specified during calling + assert std_out is std + + # Shape of returned mean and std should be same + assert std.shape == mean.shape + assert std.shape == (10, 1, 5) + + # Output should be the same as from the individual algorithms + std_old = np.std(A, axis=1, keepdims=True) + + assert std_old.shape == mean.shape + assert_almost_equal(std, std_old) + + def test_var_with_mean_keyword(self): + # Setting the seed to make the test reproducible + rng = np.random.RandomState(1234) + A = rng.randn(10, 20, 5) + 0.5 + + mean_out = np.zeros((10, 1, 5)) + var_out = np.zeros((10, 1, 5)) + + mean = np.mean(A, + out=mean_out, + axis=1, + keepdims=True) + + # The returned object should be the object specified during calling + assert mean_out is mean + + var = np.var(A, + out=var_out, + axis=1, + keepdims=True, + mean=mean) + + # The returned object should be the object specified during calling + assert var_out is var + + # Shape of returned mean and var should be same + assert var.shape == mean.shape + assert var.shape == (10, 1, 5) + + # Output should be the same as from the individual algorithms + var_old = np.var(A, axis=1, keepdims=True) + + assert var_old.shape == mean.shape + assert_almost_equal(var, var_old) + + def test_std_with_mean_keyword_keepdims_false(self): + rng = np.random.RandomState(1234) + A = rng.randn(10, 20, 5) + 0.5 + + mean = np.mean(A, + axis=1, + keepdims=True) + + std = np.std(A, + axis=1, + keepdims=False, + mean=mean) + + # Shape of returned mean and std should be same + assert std.shape == (10, 5) + + # Output should be the same as from the individual algorithms + std_old = np.std(A, axis=1, keepdims=False) + mean_old = np.mean(A, axis=1, keepdims=False) + + assert std_old.shape == mean_old.shape + assert_equal(std, std_old) + + def test_var_with_mean_keyword_keepdims_false(self): + rng = np.random.RandomState(1234) + A = rng.randn(10, 20, 5) + 0.5 + + mean = np.mean(A, + axis=1, + keepdims=True) + + var = np.var(A, + axis=1, + keepdims=False, + mean=mean) + + # Shape of returned mean and var should be same + assert var.shape == (10, 5) + + # Output should be the same as from the individual algorithms + var_old = np.var(A, axis=1, keepdims=False) + mean_old = np.mean(A, axis=1, keepdims=False) + + assert var_old.shape == mean_old.shape + assert_equal(var, var_old) + + def test_std_with_mean_keyword_where_nontrivial(self): + rng = np.random.RandomState(1234) + A = rng.randn(10, 20, 5) + 0.5 + + where = A > 0.5 + + mean = np.mean(A, + axis=1, + keepdims=True, + where=where) + + std = np.std(A, + axis=1, + keepdims=False, + mean=mean, + where=where) + + # Shape of returned mean and std should be same + assert std.shape == (10, 5) + + # Output should be the same as from the individual algorithms + std_old = np.std(A, axis=1, where=where) + mean_old = np.mean(A, axis=1, where=where) + + assert std_old.shape == mean_old.shape + assert_equal(std, std_old) + + def test_var_with_mean_keyword_where_nontrivial(self): + rng = np.random.RandomState(1234) + A = rng.randn(10, 20, 5) + 0.5 + + where = A > 0.5 + + mean = np.mean(A, + axis=1, + keepdims=True, + where=where) + + var = np.var(A, + axis=1, + keepdims=False, + mean=mean, + where=where) + + # Shape of returned mean and var should be same + assert var.shape == (10, 5) + + # Output should be the same as from the individual algorithms + var_old = np.var(A, axis=1, where=where) + mean_old = np.mean(A, axis=1, where=where) + + assert var_old.shape == mean_old.shape + assert_equal(var, var_old) + + def test_std_with_mean_keyword_multiple_axis(self): + # Setting the seed to make the test reproducible + rng = np.random.RandomState(1234) + A = rng.randn(10, 20, 5) + 0.5 + + axis = (0, 2) + + mean = np.mean(A, + out=None, + axis=axis, + keepdims=True) + + std = np.std(A, + out=None, + axis=axis, + keepdims=False, + mean=mean) + + # Shape of returned mean and std should be same + assert std.shape == (20,) + + # Output should be the same as from the individual algorithms + std_old = np.std(A, axis=axis, keepdims=False) + + assert_almost_equal(std, std_old) + + def test_std_with_mean_keyword_axis_None(self): + # Setting the seed to make the test reproducible + rng = np.random.RandomState(1234) + A = rng.randn(10, 20, 5) + 0.5 + + axis = None + + mean = np.mean(A, + out=None, + axis=axis, + keepdims=True) + + std = np.std(A, + out=None, + axis=axis, + keepdims=False, + mean=mean) + + # Shape of returned mean and std should be same + assert std.shape == () + + # Output should be the same as from the individual algorithms + std_old = np.std(A, axis=axis, keepdims=False) + + assert_almost_equal(std, std_old) + + def test_std_with_mean_keyword_keepdims_true_masked(self): + + A = ma.array([[2., 3., 4., 5.], + [1., 2., 3., 4.]], + mask=[[True, False, True, False], + [True, False, True, False]]) + + B = ma.array([[100., 3., 104., 5.], + [101., 2., 103., 4.]], + mask=[[True, False, True, False], + [True, False, True, False]]) + + mean_out = ma.array([[0., 0., 0., 0.]], + mask=[[False, False, False, False]]) + std_out = ma.array([[0., 0., 0., 0.]], + mask=[[False, False, False, False]]) + + axis = 0 + + mean = np.mean(A, out=mean_out, + axis=axis, keepdims=True) + + std = np.std(A, out=std_out, + axis=axis, keepdims=True, + mean=mean) + + # Shape of returned mean and std should be same + assert std.shape == mean.shape + assert std.shape == (1, 4) + + # Output should be the same as from the individual algorithms + std_old = np.std(A, axis=axis, keepdims=True) + mean_old = np.mean(A, axis=axis, keepdims=True) + + assert std_old.shape == mean_old.shape + assert_almost_equal(std, std_old) + assert_almost_equal(mean, mean_old) + + assert mean_out is mean + assert std_out is std + + # masked elements should be ignored + mean_b = np.mean(B, axis=axis, keepdims=True) + std_b = np.std(B, axis=axis, keepdims=True, mean=mean_b) + assert_almost_equal(std, std_b) + assert_almost_equal(mean, mean_b) + + def test_var_with_mean_keyword_keepdims_true_masked(self): + + A = ma.array([[2., 3., 4., 5.], + [1., 2., 3., 4.]], + mask=[[True, False, True, False], + [True, False, True, False]]) + + B = ma.array([[100., 3., 104., 5.], + [101., 2., 103., 4.]], + mask=[[True, False, True, False], + [True, False, True, False]]) + + mean_out = ma.array([[0., 0., 0., 0.]], + mask=[[False, False, False, False]]) + var_out = ma.array([[0., 0., 0., 0.]], + mask=[[False, False, False, False]]) + + axis = 0 + + mean = np.mean(A, out=mean_out, + axis=axis, keepdims=True) + + var = np.var(A, out=var_out, + axis=axis, keepdims=True, + mean=mean) + + # Shape of returned mean and var should be same + assert var.shape == mean.shape + assert var.shape == (1, 4) + + # Output should be the same as from the individual algorithms + var_old = np.var(A, axis=axis, keepdims=True) + mean_old = np.mean(A, axis=axis, keepdims=True) + + assert var_old.shape == mean_old.shape + assert_almost_equal(var, var_old) + assert_almost_equal(mean, mean_old) + + assert mean_out is mean + assert var_out is var + + # masked elements should be ignored + mean_b = np.mean(B, axis=axis, keepdims=True) + var_b = np.var(B, axis=axis, keepdims=True, mean=mean_b) + assert_almost_equal(var, var_b) + assert_almost_equal(mean, mean_b) + + +class TestIsscalar: + def test_isscalar(self): + assert_(np.isscalar(3.1)) + assert_(np.isscalar(np.int16(12345))) + assert_(np.isscalar(False)) + assert_(np.isscalar('numpy')) + assert_(not np.isscalar([3.1])) + assert_(not np.isscalar(None)) + + # PEP 3141 + from fractions import Fraction + assert_(np.isscalar(Fraction(5, 17))) + from numbers import Number + assert_(np.isscalar(Number())) + + +class TestBoolScalar: + def test_logical(self): + f = np.False_ + t = np.True_ + s = "xyz" + assert_((t and s) is s) + assert_((f and s) is f) + + def test_bitwise_or(self): + f = np.False_ + t = np.True_ + assert_((t | t) is t) + assert_((f | t) is t) + assert_((t | f) is t) + assert_((f | f) is f) + + def test_bitwise_and(self): + f = np.False_ + t = np.True_ + assert_((t & t) is t) + assert_((f & t) is f) + assert_((t & f) is f) + assert_((f & f) is f) + + def test_bitwise_xor(self): + f = np.False_ + t = np.True_ + assert_((t ^ t) is f) + assert_((f ^ t) is t) + assert_((t ^ f) is t) + assert_((f ^ f) is f) + + +class TestBoolArray: + def _create_bool_arrays(self): + # offset for simd tests + t = np.array([True] * 41, dtype=bool)[1::] + f = np.array([False] * 41, dtype=bool)[1::] + o = np.array([False] * 42, dtype=bool)[2::] + nm = f.copy() + im = t.copy() + nm[3] = True + nm[-2] = True + im[3] = False + im[-2] = False + return t, f, o, nm, im + + def test_all_any(self): + t, f, _, nm, im = self._create_bool_arrays() + assert_(t.all()) + assert_(t.any()) + assert_(not f.all()) + assert_(not f.any()) + assert_(nm.any()) + assert_(im.any()) + assert_(not nm.all()) + assert_(not im.all()) + # check bad element in all positions + for i in range(256 - 7): + d = np.array([False] * 256, dtype=bool)[7::] + d[i] = True + assert_(np.any(d)) + e = np.array([True] * 256, dtype=bool)[7::] + e[i] = False + assert_(not np.all(e)) + assert_array_equal(e, ~d) + # big array test for blocked libc loops + for i in list(range(9, 6000, 507)) + [7764, 90021, -10]: + d = np.array([False] * 100043, dtype=bool) + d[i] = True + assert_(np.any(d), msg=f"{i!r}") + e = np.array([True] * 100043, dtype=bool) + e[i] = False + assert_(not np.all(e), msg=f"{i!r}") + + def test_logical_not_abs(self): + t, f, o, nm, im = self._create_bool_arrays() + assert_array_equal(~t, f) + assert_array_equal(np.abs(~t), f) + assert_array_equal(np.abs(~f), t) + assert_array_equal(np.abs(f), f) + assert_array_equal(~np.abs(f), t) + assert_array_equal(~np.abs(t), f) + assert_array_equal(np.abs(~nm), im) + np.logical_not(t, out=o) + assert_array_equal(o, f) + np.abs(t, out=o) + assert_array_equal(o, t) + + def test_logical_and_or_xor(self): + t, f, o, nm, im = self._create_bool_arrays() + assert_array_equal(t | t, t) + assert_array_equal(f | f, f) + assert_array_equal(t | f, t) + assert_array_equal(f | t, t) + np.logical_or(t, t, out=o) + assert_array_equal(o, t) + assert_array_equal(t & t, t) + assert_array_equal(f & f, f) + assert_array_equal(t & f, f) + assert_array_equal(f & t, f) + np.logical_and(t, t, out=o) + assert_array_equal(o, t) + assert_array_equal(t ^ t, f) + assert_array_equal(f ^ f, f) + assert_array_equal(t ^ f, t) + assert_array_equal(f ^ t, t) + np.logical_xor(t, t, out=o) + assert_array_equal(o, f) + + assert_array_equal(nm & t, nm) + assert_array_equal(im & f, False) + assert_array_equal(nm & True, nm) + assert_array_equal(im & False, f) + assert_array_equal(nm | t, t) + assert_array_equal(im | f, im) + assert_array_equal(nm | True, t) + assert_array_equal(im | False, im) + assert_array_equal(nm ^ t, im) + assert_array_equal(im ^ f, im) + assert_array_equal(nm ^ True, im) + assert_array_equal(im ^ False, im) + + +class TestBoolCmp: + def _create_data(self, dtype, size): + # generate data using given dtype and num for size of array + a = np.ones(size, dtype=dtype) + e = np.ones(a.size, dtype=bool) + # generate values for all permutation of 256bit simd vectors + s = 0 + r = int(size / 32) + for i in range(int(size / 8)): + a[s:s + r] = [i & 2**x for x in range(r)] + e[s:s + r] = [(i & 2**x) != 0 for x in range(r)] + s += r + n = a.copy() + n[e] = np.nan + + inf = a.copy() + inf[::3][e[::3]] = np.inf + inf[1::3][e[1::3]] = -np.inf + inf[2::3][e[2::3]] = np.nan + enonan = e.copy() + enonan[2::3] = False + + sign = a.copy() + sign[e] *= -1. + sign[1::6][e[1::6]] = -np.inf + # On RISC-V, many operations that produce NaNs, such as converting + # a -NaN from f64 to f32, return a canonical NaN. The canonical + # NaNs are always positive. See section 11.3 NaN Generation and + # Propagation of the RISC-V Unprivileged ISA for more details. + # We disable the float32 sign test on riscv64 for -np.nan as the sign + # of the NaN will be lost when it's converted to a float32. + if not (dtype == np.float32 and platform.machine() == 'riscv64'): + sign[3::6][e[3::6]] = -np.nan + sign[4::6][e[4::6]] = -0. + return a, e, n, inf, enonan, sign + + def test_float(self): + # offset for alignment test + f, ef, nf, inff, efnonan, signf = self._create_data(np.float32, 256) + for i in range(4): + assert_array_equal(f[i:] > 0, ef[i:]) + assert_array_equal(f[i:] - 1 >= 0, ef[i:]) + assert_array_equal(f[i:] == 0, ~ef[i:]) + assert_array_equal(-f[i:] < 0, ef[i:]) + assert_array_equal(-f[i:] + 1 <= 0, ef[i:]) + r = f[i:] != 0 + assert_array_equal(r, ef[i:]) + r2 = f[i:] != np.zeros_like(f[i:]) + r3 = 0 != f[i:] + assert_array_equal(r, r2) + assert_array_equal(r, r3) + # check bool == 0x1 + assert_array_equal(r.view(np.int8), r.astype(np.int8)) + assert_array_equal(r2.view(np.int8), r2.astype(np.int8)) + assert_array_equal(r3.view(np.int8), r3.astype(np.int8)) + + # isnan on amd64 takes the same code path + assert_array_equal(np.isnan(nf[i:]), ef[i:]) + assert_array_equal(np.isfinite(nf[i:]), ~ef[i:]) + assert_array_equal(np.isfinite(inff[i:]), ~ef[i:]) + assert_array_equal(np.isinf(inff[i:]), efnonan[i:]) + assert_array_equal(np.signbit(signf[i:]), ef[i:]) + + def test_double(self): + # offset for alignment test + d, ed, nd, infd, ednonan, signd = self._create_data(np.float64, 128) + for i in range(2): + assert_array_equal(d[i:] > 0, ed[i:]) + assert_array_equal(d[i:] - 1 >= 0, ed[i:]) + assert_array_equal(d[i:] == 0, ~ed[i:]) + assert_array_equal(-d[i:] < 0, ed[i:]) + assert_array_equal(-d[i:] + 1 <= 0, ed[i:]) + r = d[i:] != 0 + assert_array_equal(r, ed[i:]) + r2 = d[i:] != np.zeros_like(d[i:]) + r3 = 0 != d[i:] + assert_array_equal(r, r2) + assert_array_equal(r, r3) + # check bool == 0x1 + assert_array_equal(r.view(np.int8), r.astype(np.int8)) + assert_array_equal(r2.view(np.int8), r2.astype(np.int8)) + assert_array_equal(r3.view(np.int8), r3.astype(np.int8)) + + # isnan on amd64 takes the same code path + assert_array_equal(np.isnan(nd[i:]), ed[i:]) + assert_array_equal(np.isfinite(nd[i:]), ~ed[i:]) + assert_array_equal(np.isfinite(infd[i:]), ~ed[i:]) + assert_array_equal(np.isinf(infd[i:]), ednonan[i:]) + assert_array_equal(np.signbit(signd[i:]), ed[i:]) + + +class TestSeterr: + def test_default(self): + err = np.geterr() + assert_equal(err, + {'divide': 'warn', + 'invalid': 'warn', + 'over': 'warn', + 'under': 'ignore'} + ) + + def test_set(self): + with np.errstate(): + err = np.seterr() + old = np.seterr(divide='print') + assert_(err == old) + new = np.seterr() + assert_(new['divide'] == 'print') + np.seterr(over='raise') + assert_(np.geterr()['over'] == 'raise') + assert_(new['divide'] == 'print') + np.seterr(**old) + assert_(np.geterr() == old) + + @pytest.mark.skipif(IS_WASM, reason="no wasm fp exception support") + @pytest.mark.skipif(platform.machine() == "armv5tel", reason="See gh-413.") + def test_divide_err(self): + with np.errstate(divide='raise'): + with assert_raises(FloatingPointError): + np.array([1.]) / np.array([0.]) + + np.seterr(divide='ignore') + np.array([1.]) / np.array([0.]) + + +class TestFloatExceptions: + def assert_raises_fpe(self, fpeerr, flop, x, y): + ftype = type(x) + try: + flop(x, y) + assert_(False, + f"Type {ftype} did not raise fpe error '{fpeerr}'.") + except FloatingPointError as exc: + assert_(str(exc).find(fpeerr) >= 0, + f"Type {ftype} raised wrong fpe error '{exc}'.") + + def assert_op_raises_fpe(self, fpeerr, flop, sc1, sc2): + # Check that fpe exception is raised. + # + # Given a floating operation `flop` and two scalar values, check that + # the operation raises the floating point exception specified by + # `fpeerr`. Tests all variants with 0-d array scalars as well. + + self.assert_raises_fpe(fpeerr, flop, sc1, sc2) + self.assert_raises_fpe(fpeerr, flop, sc1[()], sc2) + self.assert_raises_fpe(fpeerr, flop, sc1, sc2[()]) + self.assert_raises_fpe(fpeerr, flop, sc1[()], sc2[()]) + + # Test for all real and complex float types + @pytest.mark.skipif(IS_WASM, reason="no wasm fp exception support") + @pytest.mark.parametrize("typecode", np.typecodes["AllFloat"]) + def test_floating_exceptions(self, typecode): + if 'bsd' in sys.platform and typecode in 'gG': + pytest.skip(reason="Fallback impl for (c)longdouble may not raise " + "FPE errors as expected on BSD OSes, " + "see gh-24876, gh-23379") + + # Test basic arithmetic function errors + with np.errstate(all='raise'): + ftype = obj2sctype(typecode) + if np.dtype(ftype).kind == 'f': + # Get some extreme values for the type + fi = np.finfo(ftype) + ft_tiny = fi.tiny + ft_max = fi.max + ft_eps = fi.eps + underflow = 'underflow' + divbyzero = 'divide by zero' + else: + # 'c', complex, corresponding real dtype + rtype = type(ftype(0).real) + fi = np.finfo(rtype) + ft_tiny = ftype(fi.tiny) + ft_max = ftype(fi.max) + ft_eps = ftype(fi.eps) + # The complex types raise different exceptions + underflow = '' + divbyzero = '' + overflow = 'overflow' + invalid = 'invalid' + + # The value of tiny for double double is NaN, so we need to + # pass the assert + if not np.isnan(ft_tiny): + self.assert_raises_fpe(underflow, + lambda a, b: a / b, ft_tiny, ft_max) + self.assert_raises_fpe(underflow, + lambda a, b: a * b, ft_tiny, ft_tiny) + self.assert_raises_fpe(overflow, + lambda a, b: a * b, ft_max, ftype(2)) + self.assert_raises_fpe(overflow, + lambda a, b: a / b, ft_max, ftype(0.5)) + self.assert_raises_fpe(overflow, + lambda a, b: a + b, ft_max, ft_max * ft_eps) + self.assert_raises_fpe(overflow, + lambda a, b: a - b, -ft_max, ft_max * ft_eps) + # On AIX, pow() with double does not raise the overflow exception, + # it returns inf. Long double is the same as double. + if sys.platform != 'aix' or typecode not in 'dDgG': + self.assert_raises_fpe(overflow, + np.power, ftype(2), ftype(2**fi.nexp)) + self.assert_raises_fpe(divbyzero, + lambda a, b: a / b, ftype(1), ftype(0)) + self.assert_raises_fpe( + invalid, lambda a, b: a / b, ftype(np.inf), ftype(np.inf) + ) + self.assert_raises_fpe(invalid, + lambda a, b: a / b, ftype(0), ftype(0)) + self.assert_raises_fpe( + invalid, lambda a, b: a - b, ftype(np.inf), ftype(np.inf) + ) + self.assert_raises_fpe( + invalid, lambda a, b: a + b, ftype(np.inf), ftype(-np.inf) + ) + self.assert_raises_fpe(invalid, + lambda a, b: a * b, ftype(0), ftype(np.inf)) + + @pytest.mark.skipif(IS_WASM, reason="no wasm fp exception support") + def test_warnings(self): + # test warning code path + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + with np.errstate(all="warn"): + np.divide(1, 0.) + assert_equal(len(w), 1) + assert_("divide by zero" in str(w[0].message)) + np.array(1e300) * np.array(1e300) + assert_equal(len(w), 2) + assert_("overflow" in str(w[-1].message)) + np.array(np.inf) - np.array(np.inf) + assert_equal(len(w), 3) + assert_("invalid value" in str(w[-1].message)) + np.array(1e-300) * np.array(1e-300) + assert_equal(len(w), 4) + assert_("underflow" in str(w[-1].message)) + + +class TestTypes: + def check_promotion_cases(self, promote_func): + # tests that the scalars get coerced correctly. + b = np.bool(0) + i8, i16, i32, i64 = np.int8(0), np.int16(0), np.int32(0), np.int64(0) + u8, u16, u32, u64 = np.uint8(0), np.uint16(0), np.uint32(0), np.uint64(0) + f32, f64, fld = np.float32(0), np.float64(0), np.longdouble(0) + c64, c128, cld = np.complex64(0), np.complex128(0), np.clongdouble(0) + + # coercion within the same kind + assert_equal(promote_func(i8, i16), np.dtype(np.int16)) + assert_equal(promote_func(i32, i8), np.dtype(np.int32)) + assert_equal(promote_func(i16, i64), np.dtype(np.int64)) + assert_equal(promote_func(u8, u32), np.dtype(np.uint32)) + assert_equal(promote_func(f32, f64), np.dtype(np.float64)) + assert_equal(promote_func(fld, f32), np.dtype(np.longdouble)) + assert_equal(promote_func(f64, fld), np.dtype(np.longdouble)) + assert_equal(promote_func(c128, c64), np.dtype(np.complex128)) + assert_equal(promote_func(cld, c128), np.dtype(np.clongdouble)) + assert_equal(promote_func(c64, fld), np.dtype(np.clongdouble)) + + # coercion between kinds + assert_equal(promote_func(b, i32), np.dtype(np.int32)) + assert_equal(promote_func(b, u8), np.dtype(np.uint8)) + assert_equal(promote_func(i8, u8), np.dtype(np.int16)) + assert_equal(promote_func(u8, i32), np.dtype(np.int32)) + assert_equal(promote_func(i64, u32), np.dtype(np.int64)) + assert_equal(promote_func(u64, i32), np.dtype(np.float64)) + assert_equal(promote_func(i32, f32), np.dtype(np.float64)) + assert_equal(promote_func(i64, f32), np.dtype(np.float64)) + assert_equal(promote_func(f32, i16), np.dtype(np.float32)) + assert_equal(promote_func(f32, u32), np.dtype(np.float64)) + assert_equal(promote_func(f32, c64), np.dtype(np.complex64)) + assert_equal(promote_func(c128, f32), np.dtype(np.complex128)) + assert_equal(promote_func(cld, f64), np.dtype(np.clongdouble)) + + # coercion between scalars and 1-D arrays + assert_equal(promote_func(np.array([b]), i8), np.dtype(np.int8)) + assert_equal(promote_func(np.array([b]), u8), np.dtype(np.uint8)) + assert_equal(promote_func(np.array([b]), i32), np.dtype(np.int32)) + assert_equal(promote_func(np.array([b]), u32), np.dtype(np.uint32)) + assert_equal(promote_func(np.array([i8]), i64), np.dtype(np.int64)) + # unsigned and signed unfortunately tend to promote to float64: + assert_equal(promote_func(u64, np.array([i32])), np.dtype(np.float64)) + assert_equal(promote_func(i64, np.array([u32])), np.dtype(np.int64)) + assert_equal(promote_func(np.array([u16]), i32), np.dtype(np.int32)) + assert_equal(promote_func(np.int32(-1), np.array([u64])), + np.dtype(np.float64)) + assert_equal(promote_func(f64, np.array([f32])), np.dtype(np.float64)) + assert_equal(promote_func(fld, np.array([f32])), + np.dtype(np.longdouble)) + assert_equal(promote_func(np.array([f64]), fld), + np.dtype(np.longdouble)) + assert_equal(promote_func(fld, np.array([c64])), + np.dtype(np.clongdouble)) + assert_equal(promote_func(c64, np.array([f64])), + np.dtype(np.complex128)) + assert_equal(promote_func(np.complex64(3j), np.array([f64])), + np.dtype(np.complex128)) + assert_equal(promote_func(np.array([f32]), c128), + np.dtype(np.complex128)) + + # coercion between scalars and 1-D arrays, where + # the scalar has greater kind than the array + assert_equal(promote_func(np.array([b]), f64), np.dtype(np.float64)) + assert_equal(promote_func(np.array([b]), i64), np.dtype(np.int64)) + assert_equal(promote_func(np.array([b]), u64), np.dtype(np.uint64)) + assert_equal(promote_func(np.array([i8]), f64), np.dtype(np.float64)) + assert_equal(promote_func(np.array([u16]), f64), np.dtype(np.float64)) + + def test_coercion(self): + def res_type(a, b): + return np.add(a, b).dtype + + self.check_promotion_cases(res_type) + + # Use-case: float/complex scalar * bool/int8 array + # shouldn't narrow the float/complex type + for a in [np.array([True, False]), np.array([-3, 12], dtype=np.int8)]: + b = 1.234 * a + assert_equal(b.dtype, np.dtype('f8'), f"array type {a.dtype}") + b = np.longdouble(1.234) * a + assert_equal(b.dtype, np.dtype(np.longdouble), + f"array type {a.dtype}") + b = np.float64(1.234) * a + assert_equal(b.dtype, np.dtype('f8'), f"array type {a.dtype}") + b = np.float32(1.234) * a + assert_equal(b.dtype, np.dtype('f4'), f"array type {a.dtype}") + b = np.float16(1.234) * a + assert_equal(b.dtype, np.dtype('f2'), f"array type {a.dtype}") + + b = 1.234j * a + assert_equal(b.dtype, np.dtype('c16'), f"array type {a.dtype}") + b = np.clongdouble(1.234j) * a + assert_equal(b.dtype, np.dtype(np.clongdouble), + f"array type {a.dtype}") + b = np.complex128(1.234j) * a + assert_equal(b.dtype, np.dtype('c16'), f"array type {a.dtype}") + b = np.complex64(1.234j) * a + assert_equal(b.dtype, np.dtype('c8'), f"array type {a.dtype}") + + # The following use-case is problematic, and to resolve its + # tricky side-effects requires more changes. + # + # Use-case: (1-t)*a, where 't' is a boolean array and 'a' is + # a float32, shouldn't promote to float64 + # + # a = np.array([1.0, 1.5], dtype=np.float32) + # t = np.array([True, False]) + # b = t*a + # assert_equal(b, [1.0, 0.0]) + # assert_equal(b.dtype, np.dtype('f4')) + # b = (1-t)*a + # assert_equal(b, [0.0, 1.5]) + # assert_equal(b.dtype, np.dtype('f4')) + # + # Probably ~t (bitwise negation) is more proper to use here, + # but this is arguably less intuitive to understand at a glance, and + # would fail if 't' is actually an integer array instead of boolean: + # + # b = (~t)*a + # assert_equal(b, [0.0, 1.5]) + # assert_equal(b.dtype, np.dtype('f4')) + + def test_result_type(self): + self.check_promotion_cases(np.result_type) + assert_(np.result_type(None) == np.dtype(None)) + + def test_promote_types_endian(self): + # promote_types should always return native-endian types + assert_equal(np.promote_types('i8', '>i8'), np.dtype('i8')) + + assert_equal(np.promote_types('>i8', '>U16'), np.dtype('U21')) + assert_equal(np.promote_types('U16', '>i8'), np.dtype('U21')) + assert_equal(np.promote_types('S5', '>U8'), np.dtype('U8')) + assert_equal(np.promote_types('U8', '>S5'), np.dtype('U8')) + assert_equal(np.promote_types('U8', '>U5'), np.dtype('U8')) + + assert_equal(np.promote_types('M8', '>M8'), np.dtype('M8')) + assert_equal(np.promote_types('m8', '>m8'), np.dtype('m8')) + + def test_can_cast_and_promote_usertypes(self): + # The rational type defines safe casting for signed integers, + # boolean. Rational itself *does* cast safely to double. + # (rational does not actually cast to all signed integers, e.g. + # int64 can be both long and longlong and it registers only the first) + valid_types = ["int8", "int16", "int32", "int64", "bool"] + invalid_types = "BHILQP" + "FDG" + "mM" + "f" + "V" + + rational_dt = np.dtype(rational) + for numpy_dtype in valid_types: + numpy_dtype = np.dtype(numpy_dtype) + assert np.can_cast(numpy_dtype, rational_dt) + assert np.promote_types(numpy_dtype, rational_dt) is rational_dt + + for numpy_dtype in invalid_types: + numpy_dtype = np.dtype(numpy_dtype) + assert not np.can_cast(numpy_dtype, rational_dt) + with pytest.raises(TypeError): + np.promote_types(numpy_dtype, rational_dt) + + double_dt = np.dtype("double") + assert np.can_cast(rational_dt, double_dt) + assert np.promote_types(double_dt, rational_dt) is double_dt + + @pytest.mark.parametrize("swap", ["", "swap"]) + @pytest.mark.parametrize("string_dtype", ["U", "S"]) + def test_promote_types_strings(self, swap, string_dtype): + if swap == "swap": + promote_types = lambda a, b: np.promote_types(b, a) + else: + promote_types = np.promote_types + + S = string_dtype + + # Promote numeric with unsized string: + assert_equal(promote_types('bool', S), np.dtype(S + '5')) + assert_equal(promote_types('b', S), np.dtype(S + '4')) + assert_equal(promote_types('u1', S), np.dtype(S + '3')) + assert_equal(promote_types('u2', S), np.dtype(S + '5')) + assert_equal(promote_types('u4', S), np.dtype(S + '10')) + assert_equal(promote_types('u8', S), np.dtype(S + '20')) + assert_equal(promote_types('i1', S), np.dtype(S + '4')) + assert_equal(promote_types('i2', S), np.dtype(S + '6')) + assert_equal(promote_types('i4', S), np.dtype(S + '11')) + assert_equal(promote_types('i8', S), np.dtype(S + '21')) + # Promote numeric with sized string: + assert_equal(promote_types('bool', S + '1'), np.dtype(S + '5')) + assert_equal(promote_types('bool', S + '30'), np.dtype(S + '30')) + assert_equal(promote_types('b', S + '1'), np.dtype(S + '4')) + assert_equal(promote_types('b', S + '30'), np.dtype(S + '30')) + assert_equal(promote_types('u1', S + '1'), np.dtype(S + '3')) + assert_equal(promote_types('u1', S + '30'), np.dtype(S + '30')) + assert_equal(promote_types('u2', S + '1'), np.dtype(S + '5')) + assert_equal(promote_types('u2', S + '30'), np.dtype(S + '30')) + assert_equal(promote_types('u4', S + '1'), np.dtype(S + '10')) + assert_equal(promote_types('u4', S + '30'), np.dtype(S + '30')) + assert_equal(promote_types('u8', S + '1'), np.dtype(S + '20')) + assert_equal(promote_types('u8', S + '30'), np.dtype(S + '30')) + # Promote with object: + assert_equal(promote_types('O', S + '30'), np.dtype('O')) + + @pytest.mark.parametrize(["dtype1", "dtype2"], + [[np.dtype("V6"), np.dtype("V10")], # mismatch shape + # Mismatching names: + [np.dtype([("name1", "i8")]), np.dtype([("name2", "i8")])], + ]) + def test_invalid_void_promotion(self, dtype1, dtype2): + with pytest.raises(TypeError): + np.promote_types(dtype1, dtype2) + + @pytest.mark.parametrize(["dtype1", "dtype2"], + [[np.dtype("V10"), np.dtype("V10")], + [np.dtype([("name1", "i8")]), + np.dtype([("name1", np.dtype("i8").newbyteorder())])], + [np.dtype("i8,i8"), np.dtype("i8,>i8")], + [np.dtype("i8,i8"), np.dtype("i4,i4")], + ]) + def test_valid_void_promotion(self, dtype1, dtype2): + assert np.promote_types(dtype1, dtype2) == dtype1 + + @pytest.mark.parametrize("dtype", + list(np.typecodes["All"]) + + ["i,i", "10i", "S3", "S100", "U3", "U100", rational]) + def test_promote_identical_types_metadata(self, dtype): + # The same type passed in twice to promote types always + # preserves metadata + metadata = {1: 1} + dtype = np.dtype(dtype, metadata=metadata) + + res = np.promote_types(dtype, dtype) + assert res.metadata == dtype.metadata + + # byte-swapping preserves and makes the dtype native: + dtype = dtype.newbyteorder() + if dtype.isnative: + # The type does not have byte swapping + return + + res = np.promote_types(dtype, dtype) + + # Metadata is (currently) generally lost on byte-swapping (except for + # unicode. + if dtype.char != "U": + assert res.metadata is None + else: + assert res.metadata == metadata + assert res.isnative + + @pytest.mark.slow + @pytest.mark.filterwarnings('ignore:Promotion of numbers:FutureWarning') + @pytest.mark.parametrize(["dtype1", "dtype2"], + itertools.product( + list(np.typecodes["All"]) + + ["i,i", "S3", "S100", "U3", "U100", rational], + repeat=2)) + def test_promote_types_metadata(self, dtype1, dtype2): + """Metadata handling in promotion does not appear formalized + right now in NumPy. This test should thus be considered to + document behaviour, rather than test the correct definition of it. + + This test is very ugly, it was useful for rewriting part of the + promotion, but probably should eventually be replaced/deleted + (i.e. when metadata handling in promotion is better defined). + """ + metadata1 = {1: 1} + metadata2 = {2: 2} + dtype1 = np.dtype(dtype1, metadata=metadata1) + dtype2 = np.dtype(dtype2, metadata=metadata2) + + try: + res = np.promote_types(dtype1, dtype2) + except TypeError: + # Promotion failed, this test only checks metadata + return + + if res.char not in "USV" or res.names is not None or res.shape != (): + # All except string dtypes (and unstructured void) lose metadata + # on promotion (unless both dtypes are identical). + # At some point structured ones did not, but were restrictive. + assert res.metadata is None + elif res == dtype1: + # If one result is the result, it is usually returned unchanged: + assert res is dtype1 + elif res == dtype2: + # dtype1 may have been cast to the same type/kind as dtype2. + # If the resulting dtype is identical we currently pick the cast + # version of dtype1, which lost the metadata: + if np.promote_types(dtype1, dtype2.kind) == dtype2: + res.metadata is None + else: + res.metadata == metadata2 + else: + assert res.metadata is None + + # Try again for byteswapped version + dtype1 = dtype1.newbyteorder() + assert dtype1.metadata == metadata1 + res_bs = np.promote_types(dtype1, dtype2) + assert res_bs == res + assert res_bs.metadata == res.metadata + + def test_can_cast(self): + assert_(np.can_cast(np.int32, np.int64)) + assert_(np.can_cast(np.float64, complex)) + assert_(not np.can_cast(complex, float)) + + assert_(np.can_cast('i8', 'f8')) + assert_(not np.can_cast('i8', 'f4')) + assert_(np.can_cast('i4', 'S11')) + + assert_(np.can_cast('i8', 'i8', 'no')) + assert_(not np.can_cast('i8', 'no')) + + assert_(np.can_cast('i8', 'equiv')) + assert_(not np.can_cast('i8', 'equiv')) + + assert_(np.can_cast('i8', 'safe')) + assert_(not np.can_cast('i4', 'safe')) + + assert_(np.can_cast('i4', 'same_kind')) + assert_(not np.can_cast('u4', 'same_kind')) + + assert_(np.can_cast('u4', 'unsafe')) + + assert_(np.can_cast('bool', 'S5')) + assert_(not np.can_cast('bool', 'S4')) + + assert_(np.can_cast('b', 'S4')) + assert_(not np.can_cast('b', 'S3')) + + assert_(np.can_cast('u1', 'S3')) + assert_(not np.can_cast('u1', 'S2')) + assert_(np.can_cast('u2', 'S5')) + assert_(not np.can_cast('u2', 'S4')) + assert_(np.can_cast('u4', 'S10')) + assert_(not np.can_cast('u4', 'S9')) + assert_(np.can_cast('u8', 'S20')) + assert_(not np.can_cast('u8', 'S19')) + + assert_(np.can_cast('i1', 'S4')) + assert_(not np.can_cast('i1', 'S3')) + assert_(np.can_cast('i2', 'S6')) + assert_(not np.can_cast('i2', 'S5')) + assert_(np.can_cast('i4', 'S11')) + assert_(not np.can_cast('i4', 'S10')) + assert_(np.can_cast('i8', 'S21')) + assert_(not np.can_cast('i8', 'S20')) + + assert_(np.can_cast('bool', 'S5')) + assert_(not np.can_cast('bool', 'S4')) + + assert_(np.can_cast('b', 'U4')) + assert_(not np.can_cast('b', 'U3')) + + assert_(np.can_cast('u1', 'U3')) + assert_(not np.can_cast('u1', 'U2')) + assert_(np.can_cast('u2', 'U5')) + assert_(not np.can_cast('u2', 'U4')) + assert_(np.can_cast('u4', 'U10')) + assert_(not np.can_cast('u4', 'U9')) + assert_(np.can_cast('u8', 'U20')) + assert_(not np.can_cast('u8', 'U19')) + + assert_(np.can_cast('i1', 'U4')) + assert_(not np.can_cast('i1', 'U3')) + assert_(np.can_cast('i2', 'U6')) + assert_(not np.can_cast('i2', 'U5')) + assert_(np.can_cast('i4', 'U11')) + assert_(not np.can_cast('i4', 'U10')) + assert_(np.can_cast('i8', 'U21')) + assert_(not np.can_cast('i8', 'U20')) + + assert_raises(TypeError, np.can_cast, 'i4', None) + assert_raises(TypeError, np.can_cast, None, 'i4') + + # Also test keyword arguments + assert_(np.can_cast(from_=np.int32, to=np.int64)) + + def test_can_cast_simple_to_structured(self): + # Non-structured can only be cast to structured in 'unsafe' mode. + assert_(not np.can_cast('i4', 'i4,i4')) + assert_(not np.can_cast('i4', 'i4,i2')) + assert_(np.can_cast('i4', 'i4,i4', casting='unsafe')) + assert_(np.can_cast('i4', 'i4,i2', casting='unsafe')) + # Even if there is just a single field which is OK. + assert_(not np.can_cast('i2', [('f1', 'i4')])) + assert_(not np.can_cast('i2', [('f1', 'i4')], casting='same_kind')) + assert_(np.can_cast('i2', [('f1', 'i4')], casting='unsafe')) + # It should be the same for recursive structured or subarrays. + assert_(not np.can_cast('i2', [('f1', 'i4,i4')])) + assert_(np.can_cast('i2', [('f1', 'i4,i4')], casting='unsafe')) + assert_(not np.can_cast('i2', [('f1', '(2,3)i4')])) + assert_(np.can_cast('i2', [('f1', '(2,3)i4')], casting='unsafe')) + + def test_can_cast_structured_to_simple(self): + # Need unsafe casting for structured to simple. + assert_(not np.can_cast([('f1', 'i4')], 'i4')) + assert_(np.can_cast([('f1', 'i4')], 'i4', casting='unsafe')) + assert_(np.can_cast([('f1', 'i4')], 'i2', casting='unsafe')) + # Since it is unclear what is being cast, multiple fields to + # single should not work even for unsafe casting. + assert_(not np.can_cast('i4,i4', 'i4', casting='unsafe')) + # But a single field inside a single field is OK. + assert_(not np.can_cast([('f1', [('x', 'i4')])], 'i4')) + assert_(np.can_cast([('f1', [('x', 'i4')])], 'i4', casting='unsafe')) + # And a subarray is fine too - it will just take the first element + # (arguably not very consistently; might also take the first field). + assert_(not np.can_cast([('f0', '(3,)i4')], 'i4')) + assert_(np.can_cast([('f0', '(3,)i4')], 'i4', casting='unsafe')) + # But a structured subarray with multiple fields should fail. + assert_(not np.can_cast([('f0', ('i4,i4'), (2,))], 'i4', + casting='unsafe')) + + def test_can_cast_values(self): + # With NumPy 2 and NEP 50, can_cast errors on Python scalars. We could + # define this as (usually safe) at some point, and already do so + # in `copyto` and ufuncs (but there an error is raised if the integer + # is out of bounds and a warning for out-of-bound floats). + # Raises even for unsafe, previously checked within range (for floats + # that was approximately whether it would overflow to inf). + with pytest.raises(TypeError): + np.can_cast(4, "int8", casting="unsafe") + + with pytest.raises(TypeError): + np.can_cast(4.0, "float64", casting="unsafe") + + with pytest.raises(TypeError): + np.can_cast(4j, "complex128", casting="unsafe") + + @pytest.mark.parametrize("dtype", + list("?bhilqBHILQefdgFDG") + [rational]) + def test_can_cast_scalars(self, dtype): + # Basic test to ensure that scalars are supported in can-cast + # (does not check behavior exhaustively). + dtype = np.dtype(dtype) + scalar = dtype.type(0) + + assert np.can_cast(scalar, "int64") == np.can_cast(dtype, "int64") + assert np.can_cast(scalar, "float32", casting="unsafe") + + +# Custom exception class to test exception propagation in fromiter +class NIterError(Exception): + pass + + +class TestFromiter: + def makegen(self): + return (x**2 for x in range(24)) + + def test_types(self): + ai32 = np.fromiter(self.makegen(), np.int32) + ai64 = np.fromiter(self.makegen(), np.int64) + af = np.fromiter(self.makegen(), float) + assert_(ai32.dtype == np.dtype(np.int32)) + assert_(ai64.dtype == np.dtype(np.int64)) + assert_(af.dtype == np.dtype(float)) + + def test_lengths(self): + expected = np.array(list(self.makegen())) + a = np.fromiter(self.makegen(), int) + a20 = np.fromiter(self.makegen(), int, 20) + assert_(len(a) == len(expected)) + assert_(len(a20) == 20) + assert_raises(ValueError, np.fromiter, + self.makegen(), int, len(expected) + 10) + + def test_values(self): + expected = np.array(list(self.makegen())) + a = np.fromiter(self.makegen(), int) + a20 = np.fromiter(self.makegen(), int, 20) + assert_(np.all(a == expected, axis=0)) + assert_(np.all(a20 == expected[:20], axis=0)) + + def load_data(self, n, eindex): + # Utility method for the issue 2592 tests. + # Raise an exception at the desired index in the iterator. + for e in range(n): + if e == eindex: + raise NIterError(f'error at index {eindex}') + yield e + + @pytest.mark.parametrize("dtype", [int, object]) + @pytest.mark.parametrize(["count", "error_index"], [(10, 5), (10, 9)]) + def test_2592(self, count, error_index, dtype): + # Test iteration exceptions are correctly raised. The data/generator + # has `count` elements but errors at `error_index` + iterable = self.load_data(count, error_index) + with pytest.raises(NIterError): + np.fromiter(iterable, dtype=dtype, count=count) + + @pytest.mark.parametrize("dtype", ["S", "S0", "V0", "U0"]) + def test_empty_not_structured(self, dtype): + # Note, "S0" could be allowed at some point, so long "S" (without + # any length) is rejected. + with pytest.raises(ValueError, match="Must specify length"): + np.fromiter([], dtype=dtype) + + @pytest.mark.parametrize(["dtype", "data"], + [("d", [1, 2, 3, 4, 5, 6, 7, 8, 9]), + ("O", [1, 2, 3, 4, 5, 6, 7, 8, 9]), + ("i,O", [(1, 2), (5, 4), (2, 3), (9, 8), (6, 7)]), + # subarray dtypes (important because their dimensions end up + # in the result arrays dimension: + ("2i", [(1, 2), (5, 4), (2, 3), (9, 8), (6, 7)]), + (np.dtype(("O", (2, 3))), + [((1, 2, 3), (3, 4, 5)), ((3, 2, 1), (5, 4, 3))])]) + @pytest.mark.parametrize("length_hint", [0, 1]) + def test_growth_and_complicated_dtypes(self, dtype, data, length_hint): + dtype = np.dtype(dtype) + + data = data * 100 # make sure we realloc a bit + + class MyIter: + # Class/example from gh-15789 + def __length_hint__(self): + # only required to be an estimate, this is legal + return length_hint # 0 or 1 + + def __iter__(self): + return iter(data) + + res = np.fromiter(MyIter(), dtype=dtype) + expected = np.array(data, dtype=dtype) + + assert_array_equal(res, expected) + + def test_empty_result(self): + class MyIter: + def __length_hint__(self): + return 10 + + def __iter__(self): + return iter([]) # actual iterator is empty. + + res = np.fromiter(MyIter(), dtype="d") + assert res.shape == (0,) + assert res.dtype == "d" + + def test_too_few_items(self): + msg = "iterator too short: Expected 10 but iterator had only 3 items." + with pytest.raises(ValueError, match=msg): + np.fromiter([1, 2, 3], count=10, dtype=int) + + def test_failed_itemsetting(self): + with pytest.raises(TypeError): + np.fromiter([1, None, 3], dtype=int) + + # The following manages to hit somewhat trickier code paths: + iterable = ((2, 3, 4) for i in range(5)) + with pytest.raises(ValueError): + np.fromiter(iterable, dtype=np.dtype((int, 2))) + + +class TestNonzero: + def test_nonzero_trivial(self): + assert_equal(np.count_nonzero(np.array([])), 0) + assert_equal(np.count_nonzero(np.array([], dtype='?')), 0) + assert_equal(np.nonzero(np.array([])), ([],)) + + assert_equal(np.count_nonzero(np.array([0])), 0) + assert_equal(np.count_nonzero(np.array([0], dtype='?')), 0) + assert_equal(np.nonzero(np.array([0])), ([],)) + + assert_equal(np.count_nonzero(np.array([1])), 1) + assert_equal(np.count_nonzero(np.array([1], dtype='?')), 1) + assert_equal(np.nonzero(np.array([1])), ([0],)) + + def test_nonzero_zerodim(self): + err_msg = "Calling nonzero on 0d arrays is not allowed" + with assert_raises_regex(ValueError, err_msg): + np.nonzero(np.array(0)) + with assert_raises_regex(ValueError, err_msg): + np.array(1).nonzero() + + def test_nonzero_onedim(self): + x = np.array([1, 0, 2, -1, 0, 0, 8]) + assert_equal(np.count_nonzero(x), 4) + assert_equal(np.count_nonzero(x), 4) + assert_equal(np.nonzero(x), ([0, 2, 3, 6],)) + + # x = np.array([(1, 2), (0, 0), (1, 1), (-1, 3), (0, 7)], + # dtype=[('a', 'i4'), ('b', 'i2')]) + x = np.array( + [(1, 2, -5, -3), (0, 0, 2, 7), (1, 1, 0, 1), (-1, 3, 1, 0), (0, 7, 0, 4)], + dtype=[('a', 'i4'), ('b', 'i2'), ('c', 'i1'), ('d', 'i8')] + ) + assert_equal(np.count_nonzero(x['a']), 3) + assert_equal(np.count_nonzero(x['b']), 4) + assert_equal(np.count_nonzero(x['c']), 3) + assert_equal(np.count_nonzero(x['d']), 4) + assert_equal(np.nonzero(x['a']), ([0, 2, 3],)) + assert_equal(np.nonzero(x['b']), ([0, 2, 3, 4],)) + + def test_nonzero_twodim(self): + x = np.array([[0, 1, 0], [2, 0, 3]]) + assert_equal(np.count_nonzero(x.astype('i1')), 3) + assert_equal(np.count_nonzero(x.astype('i2')), 3) + assert_equal(np.count_nonzero(x.astype('i4')), 3) + assert_equal(np.count_nonzero(x.astype('i8')), 3) + assert_equal(np.nonzero(x), ([0, 1, 1], [1, 0, 2])) + + x = np.eye(3) + assert_equal(np.count_nonzero(x.astype('i1')), 3) + assert_equal(np.count_nonzero(x.astype('i2')), 3) + assert_equal(np.count_nonzero(x.astype('i4')), 3) + assert_equal(np.count_nonzero(x.astype('i8')), 3) + assert_equal(np.nonzero(x), ([0, 1, 2], [0, 1, 2])) + + x = np.array([[(0, 1), (0, 0), (1, 11)], + [(1, 1), (1, 0), (0, 0)], + [(0, 0), (1, 5), (0, 1)]], dtype=[('a', 'f4'), ('b', 'u1')]) + assert_equal(np.count_nonzero(x['a']), 4) + assert_equal(np.count_nonzero(x['b']), 5) + assert_equal(np.nonzero(x['a']), ([0, 1, 1, 2], [2, 0, 1, 1])) + assert_equal(np.nonzero(x['b']), ([0, 0, 1, 2, 2], [0, 2, 0, 1, 2])) + + assert_(not x['a'].T.flags.aligned) + assert_equal(np.count_nonzero(x['a'].T), 4) + assert_equal(np.count_nonzero(x['b'].T), 5) + assert_equal(np.nonzero(x['a'].T), ([0, 1, 1, 2], [1, 1, 2, 0])) + assert_equal(np.nonzero(x['b'].T), ([0, 0, 1, 2, 2], [0, 1, 2, 0, 2])) + + def test_sparse(self): + # test special sparse condition boolean code path + for i in range(20): + c = np.zeros(200, dtype=bool) + c[i::20] = True + assert_equal(np.nonzero(c)[0], np.arange(i, 200 + i, 20)) + + c = np.zeros(400, dtype=bool) + c[10 + i:20 + i] = True + c[20 + i * 2] = True + assert_equal(np.nonzero(c)[0], + np.concatenate((np.arange(10 + i, 20 + i), [20 + i * 2]))) + + @pytest.mark.parametrize('dtype', [np.float32, np.float64]) + def test_nonzero_float_dtypes(self, dtype): + rng = np.random.default_rng(seed=10) + x = ((2**33) * rng.normal(size=100)).astype(dtype) + x[rng.choice(50, size=100)] = 0 + idxs = np.nonzero(x)[0] + assert_equal(np.array_equal(np.where(x != 0)[0], idxs), True) + + @pytest.mark.parametrize('dtype', [bool, np.int8, np.int16, np.int32, np.int64, + np.uint8, np.uint16, np.uint32, np.uint64]) + def test_nonzero_integer_dtypes(self, dtype): + rng = np.random.default_rng(seed=10) + x = rng.integers(0, 255, size=100).astype(dtype) + x[rng.choice(50, size=100)] = 0 + idxs = np.nonzero(x)[0] + assert_equal(np.array_equal(np.where(x != 0)[0], idxs), True) + + def test_return_type(self): + class C(np.ndarray): + pass + + for view in (C, np.ndarray): + for nd in range(1, 4): + shape = tuple(range(2, 2 + nd)) + x = np.arange(np.prod(shape)).reshape(shape).view(view) + for nzx in (np.nonzero(x), x.nonzero()): + for nzx_i in nzx: + assert_(type(nzx_i) is np.ndarray) + assert_(nzx_i.flags.writeable) + + def test_count_nonzero_axis(self): + # Basic check of functionality + m = np.array([[0, 1, 7, 0, 0], [3, 0, 0, 2, 19]]) + + expected = np.array([1, 1, 1, 1, 1]) + assert_equal(np.count_nonzero(m, axis=0), expected) + + expected = np.array([2, 3]) + assert_equal(np.count_nonzero(m, axis=1), expected) + + assert_raises(ValueError, np.count_nonzero, m, axis=(1, 1)) + assert_raises(TypeError, np.count_nonzero, m, axis='foo') + assert_raises(AxisError, np.count_nonzero, m, axis=3) + assert_raises(TypeError, np.count_nonzero, + m, axis=np.array([[1], [2]])) + + def test_count_nonzero_axis_all_dtypes(self): + # More thorough test that the axis argument is respected + # for all dtypes and responds correctly when presented with + # either integer or tuple arguments for axis + msg = "Mismatch for dtype: %s" + + def assert_equal_w_dt(a, b, err_msg): + assert_equal(a.dtype, b.dtype, err_msg=err_msg) + assert_equal(a, b, err_msg=err_msg) + + for dt in np.typecodes['All']: + err_msg = msg % (np.dtype(dt).name,) + + if dt != 'V': + if dt != 'M': + m = np.zeros((3, 3), dtype=dt) + n = np.ones(1, dtype=dt) + + m[0, 0] = n[0] + m[1, 0] = n[0] + + else: # np.zeros doesn't work for np.datetime64 + m = np.array(['1970-01-01'] * 9) + m = m.reshape((3, 3)) + + m[0, 0] = '1970-01-12' + m[1, 0] = '1970-01-12' + m = m.astype(dt) + + expected = np.array([2, 0, 0], dtype=np.intp) + assert_equal_w_dt(np.count_nonzero(m, axis=0), + expected, err_msg=err_msg) + + expected = np.array([1, 1, 0], dtype=np.intp) + assert_equal_w_dt(np.count_nonzero(m, axis=1), + expected, err_msg=err_msg) + + expected = np.array(2) + assert_equal(np.count_nonzero(m, axis=(0, 1)), + expected, err_msg=err_msg) + assert_equal(np.count_nonzero(m, axis=None), + expected, err_msg=err_msg) + assert_equal(np.count_nonzero(m), + expected, err_msg=err_msg) + + if dt == 'V': + # There are no 'nonzero' objects for np.void, so the testing + # setup is slightly different for this dtype + m = np.array([np.void(1)] * 6).reshape((2, 3)) + + expected = np.array([0, 0, 0], dtype=np.intp) + assert_equal_w_dt(np.count_nonzero(m, axis=0), + expected, err_msg=err_msg) + + expected = np.array([0, 0], dtype=np.intp) + assert_equal_w_dt(np.count_nonzero(m, axis=1), + expected, err_msg=err_msg) + + expected = np.array(0) + assert_equal(np.count_nonzero(m, axis=(0, 1)), + expected, err_msg=err_msg) + assert_equal(np.count_nonzero(m, axis=None), + expected, err_msg=err_msg) + assert_equal(np.count_nonzero(m), + expected, err_msg=err_msg) + + def test_count_nonzero_axis_consistent(self): + # Check that the axis behaviour for valid axes in + # non-special cases is consistent (and therefore + # correct) by checking it against an integer array + # that is then casted to the generic object dtype + from itertools import combinations, permutations + + axis = (0, 1, 2, 3) + size = (5, 5, 5, 5) + msg = "Mismatch for axis: %s" + + rng = np.random.RandomState(1234) + m = rng.randint(-100, 100, size=size) + n = m.astype(object) + + for length in range(len(axis)): + for combo in combinations(axis, length): + for perm in permutations(combo): + assert_equal( + np.count_nonzero(m, axis=perm), + np.count_nonzero(n, axis=perm), + err_msg=msg % (perm,)) + + def test_countnonzero_axis_empty(self): + a = np.array([[0, 0, 1], [1, 0, 1]]) + assert_equal(np.count_nonzero(a, axis=()), a.astype(bool)) + + def test_countnonzero_keepdims(self): + a = np.array([[0, 0, 1, 0], + [0, 3, 5, 0], + [7, 9, 2, 0]]) + assert_equal(np.count_nonzero(a, axis=0, keepdims=True), + [[1, 2, 3, 0]]) + assert_equal(np.count_nonzero(a, axis=1, keepdims=True), + [[1], [2], [3]]) + assert_equal(np.count_nonzero(a, keepdims=True), + [[6]]) + + def test_array_method(self): + # Tests that the array method + # call to nonzero works + m = np.array([[1, 0, 0], [4, 0, 6]]) + tgt = [[0, 1, 1], [0, 0, 2]] + + assert_equal(m.nonzero(), tgt) + + def test_nonzero_invalid_object(self): + # gh-9295 + a = np.array([np.array([1, 2]), 3], dtype=object) + assert_raises(ValueError, np.nonzero, a) + + class BoolErrors: + def __bool__(self): + raise ValueError("Not allowed") + + assert_raises(ValueError, np.nonzero, np.array([BoolErrors()])) + + def test_nonzero_sideeffect_safety(self): + # gh-13631 + class FalseThenTrue: + _val = False + + def __bool__(self): + try: + return self._val + finally: + self._val = True + + class TrueThenFalse: + _val = True + + def __bool__(self): + try: + return self._val + finally: + self._val = False + + # result grows on the second pass + a = np.array([True, FalseThenTrue()]) + assert_raises(RuntimeError, np.nonzero, a) + + a = np.array([[True], [FalseThenTrue()]]) + assert_raises(RuntimeError, np.nonzero, a) + + # result shrinks on the second pass + a = np.array([False, TrueThenFalse()]) + assert_raises(RuntimeError, np.nonzero, a) + + a = np.array([[False], [TrueThenFalse()]]) + assert_raises(RuntimeError, np.nonzero, a) + + def test_nonzero_sideffects_structured_void(self): + # Checks that structured void does not mutate alignment flag of + # original array. + arr = np.zeros(5, dtype="i1,i8,i8") # `ones` may short-circuit + assert arr.flags.aligned # structs are considered "aligned" + assert not arr["f2"].flags.aligned + # make sure that nonzero/count_nonzero do not flip the flag: + np.nonzero(arr) + assert arr.flags.aligned + np.count_nonzero(arr) + assert arr.flags.aligned + + def test_nonzero_exception_safe(self): + # gh-13930 + + class ThrowsAfter: + def __init__(self, iters): + self.iters_left = iters + + def __bool__(self): + if self.iters_left == 0: + raise ValueError("called `iters` times") + + self.iters_left -= 1 + return True + + """ + Test that a ValueError is raised instead of a SystemError + + If the __bool__ function is called after the error state is set, + Python (cpython) will raise a SystemError. + """ + + # assert that an exception in first pass is handled correctly + a = np.array([ThrowsAfter(5)] * 10) + assert_raises(ValueError, np.nonzero, a) + + # raise exception in second pass for 1-dimensional loop + a = np.array([ThrowsAfter(15)] * 10) + assert_raises(ValueError, np.nonzero, a) + + # raise exception in second pass for n-dimensional loop + a = np.array([[ThrowsAfter(15)]] * 10) + assert_raises(ValueError, np.nonzero, a) + + def test_nonzero_byteorder(self): + values = [0., -0., 1, float('nan'), 0, 1, + np.float16(0), np.float16(12.3)] + expected_values = [0, 0, 1, 1, 0, 1, 0, 1] + + for value, expected in zip(values, expected_values): + A = np.array([value]) + A_byteswapped = (A.view(A.dtype.newbyteorder()).byteswap()).copy() + + assert np.count_nonzero(A) == expected + assert np.count_nonzero(A_byteswapped) == expected + + def test_count_nonzero_non_aligned_array(self): + # gh-27523 + b = np.zeros(64 + 1, dtype=np.int8)[1:] + b = b.view(int) + b[:] = np.arange(b.size) + b[::2] = 0 + assert b.flags.aligned is False + assert np.count_nonzero(b) == b.size / 2 + + b = np.zeros(64 + 1, dtype=np.float16)[1:] + b = b.view(float) + b[:] = np.arange(b.size) + b[::2] = 0 + assert b.flags.aligned is False + assert np.count_nonzero(b) == b.size / 2 + + +class TestIndex: + def test_boolean(self): + a = rand(3, 5, 8) + V = rand(5, 8) + g1 = randint(0, 5, size=15) + g2 = randint(0, 8, size=15) + V[g1, g2] = -V[g1, g2] + assert_( + (np.array([a[0][V > 0], a[1][V > 0], a[2][V > 0]]) == a[:, V > 0]).all() + ) + + def test_boolean_edgecase(self): + a = np.array([], dtype='int32') + b = np.array([], dtype='bool') + c = a[b] + assert_equal(c, []) + assert_equal(c.dtype, np.dtype('int32')) + + +class TestBinaryRepr: + def test_zero(self): + assert_equal(np.binary_repr(0), '0') + + def test_positive(self): + assert_equal(np.binary_repr(10), '1010') + assert_equal(np.binary_repr(12522), + '11000011101010') + assert_equal(np.binary_repr(10736848), + '101000111101010011010000') + + def test_negative(self): + assert_equal(np.binary_repr(-1), '-1') + assert_equal(np.binary_repr(-10), '-1010') + assert_equal(np.binary_repr(-12522), + '-11000011101010') + assert_equal(np.binary_repr(-10736848), + '-101000111101010011010000') + + def test_sufficient_width(self): + assert_equal(np.binary_repr(0, width=5), '00000') + assert_equal(np.binary_repr(10, width=7), '0001010') + assert_equal(np.binary_repr(-5, width=7), '1111011') + + def test_neg_width_boundaries(self): + # see gh-8670 + + # Ensure that the example in the issue does not + # break before proceeding to a more thorough test. + assert_equal(np.binary_repr(-128, width=8), '10000000') + + for width in range(1, 11): + num = -2**(width - 1) + exp = '1' + (width - 1) * '0' + assert_equal(np.binary_repr(num, width=width), exp) + + def test_large_neg_int64(self): + # See gh-14289. + assert_equal(np.binary_repr(np.int64(-2**62), width=64), + '11' + '0' * 62) + + +class TestBaseRepr: + def test_base3(self): + assert_equal(np.base_repr(3**5, 3), '100000') + + def test_positive(self): + assert_equal(np.base_repr(12, 10), '12') + assert_equal(np.base_repr(12, 10, 4), '000012') + assert_equal(np.base_repr(12, 4), '30') + assert_equal(np.base_repr(3731624803700888, 36), '10QR0ROFCEW') + + def test_negative(self): + assert_equal(np.base_repr(-12, 10), '-12') + assert_equal(np.base_repr(-12, 10, 4), '-000012') + assert_equal(np.base_repr(-12, 4), '-30') + + def test_base_range(self): + with assert_raises(ValueError): + np.base_repr(1, 1) + with assert_raises(ValueError): + np.base_repr(1, 37) + + def test_minimal_signed_int(self): + assert_equal(np.base_repr(np.int8(-128)), '-10000000') + + +def _test_array_equal_parametrizations(): + """ + we pre-create arrays as we sometime want to pass the same instance + and sometime not. Passing the same instances may not mean the array are + equal, especially when containing None + """ + # those are 0-d arrays, it used to be a special case + # where (e0 == e0).all() would raise + e0 = np.array(0, dtype="int") + e1 = np.array(1, dtype="float") + # x,y, nan_equal, expected_result + yield (e0, e0.copy(), None, True) + yield (e0, e0.copy(), False, True) + yield (e0, e0.copy(), True, True) + + # + yield (e1, e1.copy(), None, True) + yield (e1, e1.copy(), False, True) + yield (e1, e1.copy(), True, True) + + # Non-nanable - those cannot hold nans + a12 = np.array([1, 2]) + a12b = a12.copy() + a123 = np.array([1, 2, 3]) + a13 = np.array([1, 3]) + a34 = np.array([3, 4]) + + aS1 = np.array(["a"], dtype="S1") + aS1b = aS1.copy() + aS1u4 = np.array([("a", 1)], dtype="S1,u4") + aS1u4b = aS1u4.copy() + + yield (a12, a12b, None, True) + yield (a12, a12, None, True) + yield (a12, a123, None, False) + yield (a12, a34, None, False) + yield (a12, a13, None, False) + yield (aS1, aS1b, None, True) + yield (aS1, aS1, None, True) + + # Non-float dtype - equal_nan should have no effect, + yield (a123, a123, None, True) + yield (a123, a123, False, True) + yield (a123, a123, True, True) + yield (a123, a123.copy(), None, True) + yield (a123, a123.copy(), False, True) + yield (a123, a123.copy(), True, True) + yield (a123.astype("float"), a123.astype("float"), None, True) + yield (a123.astype("float"), a123.astype("float"), False, True) + yield (a123.astype("float"), a123.astype("float"), True, True) + + # these can hold None + b1 = np.array([1, 2, np.nan]) + b2 = np.array([1, np.nan, 2]) + b3 = np.array([1, 2, np.inf]) + b4 = np.array(np.nan) + + # instances are the same + yield (b1, b1, None, False) + yield (b1, b1, False, False) + yield (b1, b1, True, True) + + # equal but not same instance + yield (b1, b1.copy(), None, False) + yield (b1, b1.copy(), False, False) + yield (b1, b1.copy(), True, True) + + # same once stripped of Nan + yield (b1, b2, None, False) + yield (b1, b2, False, False) + yield (b1, b2, True, False) + + # nan's not conflated with inf's + yield (b1, b3, None, False) + yield (b1, b3, False, False) + yield (b1, b3, True, False) + + # all Nan + yield (b4, b4, None, False) + yield (b4, b4, False, False) + yield (b4, b4, True, True) + yield (b4, b4.copy(), None, False) + yield (b4, b4.copy(), False, False) + yield (b4, b4.copy(), True, True) + + t1 = b1.astype("timedelta64") + t2 = b2.astype("timedelta64") + + # Timedeltas are particular + yield (t1, t1, None, False) + yield (t1, t1, False, False) + yield (t1, t1, True, True) + + yield (t1, t1.copy(), None, False) + yield (t1, t1.copy(), False, False) + yield (t1, t1.copy(), True, True) + + yield (t1, t2, None, False) + yield (t1, t2, False, False) + yield (t1, t2, True, False) + + # Multi-dimensional array + md1 = np.array([[0, 1], [np.nan, 1]]) + + yield (md1, md1, None, False) + yield (md1, md1, False, False) + yield (md1, md1, True, True) + yield (md1, md1.copy(), None, False) + yield (md1, md1.copy(), False, False) + yield (md1, md1.copy(), True, True) + # both complexes are nan+nan.j but the same instance + cplx1, cplx2 = [np.array([np.nan + np.nan * 1j])] * 2 + + # only real or img are nan. + cplx3, cplx4 = np.complex64(1, np.nan), np.complex64(np.nan, 1) + + # Complex values + yield (cplx1, cplx2, None, False) + yield (cplx1, cplx2, False, False) + yield (cplx1, cplx2, True, True) + + # Complex values, 1+nan, nan+1j + yield (cplx3, cplx4, None, False) + yield (cplx3, cplx4, False, False) + yield (cplx3, cplx4, True, True) + + +class TestArrayComparisons: + @pytest.mark.parametrize( + "bx,by,equal_nan,expected", _test_array_equal_parametrizations() + ) + def test_array_equal_equal_nan(self, bx, by, equal_nan, expected): + """ + This test array_equal for a few combinations: + + - are the two inputs the same object or not (same object may not + be equal if contains NaNs) + - Whether we should consider or not, NaNs, being equal. + + """ + if equal_nan is None: + res = np.array_equal(bx, by) + else: + res = np.array_equal(bx, by, equal_nan=equal_nan) + assert_(res is expected) + assert_(type(res) is bool) + + def test_array_equal_different_scalar_types(self): + # https://github.com/numpy/numpy/issues/27271 + a = np.array("foo") + b = np.array(1) + assert not np.array_equal(a, b) + assert not np.array_equiv(a, b) + + def test_none_compares_elementwise(self): + a = np.array([None, 1, None], dtype=object) + assert_equal(a == None, [True, False, True]) # noqa: E711 + assert_equal(a != None, [False, True, False]) # noqa: E711 + + a = np.ones(3) + assert_equal(a == None, [False, False, False]) # noqa: E711 + assert_equal(a != None, [True, True, True]) # noqa: E711 + + def test_array_equiv(self): + res = np.array_equiv(np.array([1, 2]), np.array([1, 2])) + assert_(res) + assert_(type(res) is bool) + res = np.array_equiv(np.array([1, 2]), np.array([1, 2, 3])) + assert_(not res) + assert_(type(res) is bool) + res = np.array_equiv(np.array([1, 2]), np.array([3, 4])) + assert_(not res) + assert_(type(res) is bool) + res = np.array_equiv(np.array([1, 2]), np.array([1, 3])) + assert_(not res) + assert_(type(res) is bool) + + res = np.array_equiv(np.array([1, 1]), np.array([1])) + assert_(res) + assert_(type(res) is bool) + res = np.array_equiv(np.array([1, 1]), np.array([[1], [1]])) + assert_(res) + assert_(type(res) is bool) + res = np.array_equiv(np.array([1, 2]), np.array([2])) + assert_(not res) + assert_(type(res) is bool) + res = np.array_equiv(np.array([1, 2]), np.array([[1], [2]])) + assert_(not res) + assert_(type(res) is bool) + res = np.array_equiv( + np.array([1, 2]), + np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), + ) + assert_(not res) + assert_(type(res) is bool) + + @pytest.mark.parametrize("dtype", ["V0", "V3", "V10"]) + def test_compare_unstructured_voids(self, dtype): + zeros = np.zeros(3, dtype=dtype) + + assert_array_equal(zeros, zeros) + assert not (zeros != zeros).any() + + if dtype == "V0": + # Can't test != of actually different data + return + + nonzeros = np.array([b"1", b"2", b"3"], dtype=dtype) + + assert not (zeros == nonzeros).any() + assert (zeros != nonzeros).all() + + +def assert_array_strict_equal(x, y): + assert_array_equal(x, y) + # Check flags, 32 bit arches typically don't provide 16 byte alignment + if ((x.dtype.alignment <= 8 or + np.intp().dtype.itemsize != 4) and + sys.platform != 'win32'): + assert_(x.flags == y.flags) + else: + assert_(x.flags.owndata == y.flags.owndata) + assert_(x.flags.writeable == y.flags.writeable) + assert_(x.flags.c_contiguous == y.flags.c_contiguous) + assert_(x.flags.f_contiguous == y.flags.f_contiguous) + assert_(x.flags.writebackifcopy == y.flags.writebackifcopy) + # check endianness + assert_(x.dtype.isnative == y.dtype.isnative) + + +class TestClip: + nr = 5 + nc = 3 + + def fastclip(self, a, m, M, out=None, **kwargs): + return a.clip(m, M, out=out, **kwargs) + + def clip(self, a, m, M, out=None): + # use a.choose to verify fastclip result + selector = np.less(a, m) + 2 * np.greater(a, M) + return selector.choose((a, m, M), out=out) + + # Handy functions + def _generate_data(self, n, m): + return randn(n, m) + + def _generate_data_complex(self, n, m): + return randn(n, m) + 1.j * rand(n, m) + + def _generate_flt_data(self, n, m): + return (randn(n, m)).astype(np.float32) + + def _neg_byteorder(self, a): + a = np.asarray(a) + if sys.byteorder == 'little': + a = a.astype(a.dtype.newbyteorder('>')) + else: + a = a.astype(a.dtype.newbyteorder('<')) + return a + + def _generate_non_native_data(self, n, m): + data = randn(n, m) + data = self._neg_byteorder(data) + assert_(not data.dtype.isnative) + return data + + def _generate_int_data(self, n, m): + return (10 * rand(n, m)).astype(np.int64) + + def _generate_int32_data(self, n, m): + return (10 * rand(n, m)).astype(np.int32) + + # Now the real test cases + + @pytest.mark.parametrize("dtype", '?bhilqpBHILQPefdgFDGO') + def test_ones_pathological(self, dtype): + # for preservation of behavior described in + # gh-12519; amin > amax behavior may still change + # in the future + arr = np.ones(10, dtype=dtype) + expected = np.zeros(10, dtype=dtype) + actual = np.clip(arr, 1, 0) + if dtype == 'O': + assert actual.tolist() == expected.tolist() + else: + assert_equal(actual, expected) + + def test_simple_double(self): + # Test native double input with scalar min/max. + a = self._generate_data(self.nr, self.nc) + m = 0.1 + M = 0.6 + ac = self.fastclip(a, m, M) + act = self.clip(a, m, M) + assert_array_strict_equal(ac, act) + + def test_simple_int(self): + # Test native int input with scalar min/max. + a = self._generate_int_data(self.nr, self.nc) + a = a.astype(int) + m = -2 + M = 4 + ac = self.fastclip(a, m, M) + act = self.clip(a, m, M) + assert_array_strict_equal(ac, act) + + def test_array_double(self): + # Test native double input with array min/max. + a = self._generate_data(self.nr, self.nc) + m = np.zeros(a.shape) + M = m + 0.5 + ac = self.fastclip(a, m, M) + act = self.clip(a, m, M) + assert_array_strict_equal(ac, act) + + def test_simple_nonnative(self): + # Test non native double input with scalar min/max. + # Test native double input with non native double scalar min/max. + a = self._generate_non_native_data(self.nr, self.nc) + m = -0.5 + M = 0.6 + ac = self.fastclip(a, m, M) + act = self.clip(a, m, M) + assert_array_equal(ac, act) + + # Test native double input with non native double scalar min/max. + a = self._generate_data(self.nr, self.nc) + m = -0.5 + M = self._neg_byteorder(0.6) + assert_(not M.dtype.isnative) + ac = self.fastclip(a, m, M) + act = self.clip(a, m, M) + assert_array_equal(ac, act) + + def test_simple_complex(self): + # Test native complex input with native double scalar min/max. + # Test native input with complex double scalar min/max. + a = 3 * self._generate_data_complex(self.nr, self.nc) + m = -0.5 + M = 1. + ac = self.fastclip(a, m, M) + act = self.clip(a, m, M) + assert_array_strict_equal(ac, act) + + # Test native input with complex double scalar min/max. + a = 3 * self._generate_data(self.nr, self.nc) + m = -0.5 + 1.j + M = 1. + 2.j + ac = self.fastclip(a, m, M) + act = self.clip(a, m, M) + assert_array_strict_equal(ac, act) + + def test_clip_complex(self): + # Address Issue gh-5354 for clipping complex arrays + # Test native complex input without explicit min/max + # ie, either min=None or max=None + a = np.ones(10, dtype=complex) + m = a.min() + M = a.max() + am = self.fastclip(a, m, None) + aM = self.fastclip(a, None, M) + assert_array_strict_equal(am, a) + assert_array_strict_equal(aM, a) + + def test_clip_non_contig(self): + # Test clip for non contiguous native input and native scalar min/max. + a = self._generate_data(self.nr * 2, self.nc * 3) + a = a[::2, ::3] + assert_(not a.flags['F_CONTIGUOUS']) + assert_(not a.flags['C_CONTIGUOUS']) + ac = self.fastclip(a, -1.6, 1.7) + act = self.clip(a, -1.6, 1.7) + assert_array_strict_equal(ac, act) + + def test_simple_out(self): + # Test native double input with scalar min/max. + a = self._generate_data(self.nr, self.nc) + m = -0.5 + M = 0.6 + ac = np.zeros(a.shape) + act = np.zeros(a.shape) + self.fastclip(a, m, M, ac) + self.clip(a, m, M, act) + assert_array_strict_equal(ac, act) + + @pytest.mark.parametrize("casting", [None, "unsafe"]) + def test_simple_int32_inout(self, casting): + # Test native int32 input with double min/max and int32 out. + a = self._generate_int32_data(self.nr, self.nc) + m = np.float64(0) + M = np.float64(2) + ac = np.zeros(a.shape, dtype=np.int32) + act = ac.copy() + if casting is None: + with pytest.raises(TypeError): + self.fastclip(a, m, M, ac, casting=casting) + else: + # explicitly passing "unsafe" will silence warning + self.fastclip(a, m, M, ac, casting=casting) + self.clip(a, m, M, act) + assert_array_strict_equal(ac, act) + + def test_simple_int64_out(self): + # Test native int32 input with int32 scalar min/max and int64 out. + a = self._generate_int32_data(self.nr, self.nc) + m = np.int32(-1) + M = np.int32(1) + ac = np.zeros(a.shape, dtype=np.int64) + act = ac.copy() + self.fastclip(a, m, M, ac) + self.clip(a, m, M, act) + assert_array_strict_equal(ac, act) + + def test_simple_int64_inout(self): + # Test native int32 input with double array min/max and int32 out. + a = self._generate_int32_data(self.nr, self.nc) + m = np.zeros(a.shape, np.float64) + M = np.float64(1) + ac = np.zeros(a.shape, dtype=np.int32) + act = ac.copy() + self.fastclip(a, m, M, out=ac, casting="unsafe") + self.clip(a, m, M, act) + assert_array_strict_equal(ac, act) + + def test_simple_int32_out(self): + # Test native double input with scalar min/max and int out. + a = self._generate_data(self.nr, self.nc) + m = -1.0 + M = 2.0 + ac = np.zeros(a.shape, dtype=np.int32) + act = ac.copy() + self.fastclip(a, m, M, out=ac, casting="unsafe") + self.clip(a, m, M, act) + assert_array_strict_equal(ac, act) + + def test_simple_inplace_01(self): + # Test native double input with array min/max in-place. + a = self._generate_data(self.nr, self.nc) + ac = a.copy() + m = np.zeros(a.shape) + M = 1.0 + self.fastclip(a, m, M, a) + self.clip(a, m, M, ac) + assert_array_strict_equal(a, ac) + + def test_simple_inplace_02(self): + # Test native double input with scalar min/max in-place. + a = self._generate_data(self.nr, self.nc) + ac = a.copy() + m = -0.5 + M = 0.6 + self.fastclip(a, m, M, a) + self.clip(ac, m, M, ac) + assert_array_strict_equal(a, ac) + + def test_noncontig_inplace(self): + # Test non contiguous double input with double scalar min/max in-place. + a = self._generate_data(self.nr * 2, self.nc * 3) + a = a[::2, ::3] + assert_(not a.flags['F_CONTIGUOUS']) + assert_(not a.flags['C_CONTIGUOUS']) + ac = a.copy() + m = -0.5 + M = 0.6 + self.fastclip(a, m, M, a) + self.clip(ac, m, M, ac) + assert_array_equal(a, ac) + + def test_type_cast_01(self): + # Test native double input with scalar min/max. + a = self._generate_data(self.nr, self.nc) + m = -0.5 + M = 0.6 + ac = self.fastclip(a, m, M) + act = self.clip(a, m, M) + assert_array_strict_equal(ac, act) + + def test_type_cast_02(self): + # Test native int32 input with int32 scalar min/max. + a = self._generate_int_data(self.nr, self.nc) + a = a.astype(np.int32) + m = -2 + M = 4 + ac = self.fastclip(a, m, M) + act = self.clip(a, m, M) + assert_array_strict_equal(ac, act) + + def test_type_cast_03(self): + # Test native int32 input with float64 scalar min/max. + a = self._generate_int32_data(self.nr, self.nc) + m = -2 + M = 4 + ac = self.fastclip(a, np.float64(m), np.float64(M)) + act = self.clip(a, np.float64(m), np.float64(M)) + assert_array_strict_equal(ac, act) + + def test_type_cast_04(self): + # Test native int32 input with float32 scalar min/max. + a = self._generate_int32_data(self.nr, self.nc) + m = np.float32(-2) + M = np.float32(4) + act = self.fastclip(a, m, M) + ac = self.clip(a, m, M) + assert_array_strict_equal(ac, act) + + def test_type_cast_05(self): + # Test native int32 with double arrays min/max. + a = self._generate_int_data(self.nr, self.nc) + m = -0.5 + M = 1. + ac = self.fastclip(a, m * np.zeros(a.shape), M) + act = self.clip(a, m * np.zeros(a.shape), M) + assert_array_strict_equal(ac, act) + + def test_type_cast_06(self): + # Test native with NON native scalar min/max. + a = self._generate_data(self.nr, self.nc) + m = 0.5 + m_s = self._neg_byteorder(m) + M = 1. + act = self.clip(a, m_s, M) + ac = self.fastclip(a, m_s, M) + assert_array_strict_equal(ac, act) + + def test_type_cast_07(self): + # Test NON native with native array min/max. + a = self._generate_data(self.nr, self.nc) + m = -0.5 * np.ones(a.shape) + M = 1. + a_s = self._neg_byteorder(a) + assert_(not a_s.dtype.isnative) + act = a_s.clip(m, M) + ac = self.fastclip(a_s, m, M) + assert_array_strict_equal(ac, act) + + def test_type_cast_08(self): + # Test NON native with native scalar min/max. + a = self._generate_data(self.nr, self.nc) + m = -0.5 + M = 1. + a_s = self._neg_byteorder(a) + assert_(not a_s.dtype.isnative) + ac = self.fastclip(a_s, m, M) + act = a_s.clip(m, M) + assert_array_strict_equal(ac, act) + + def test_type_cast_09(self): + # Test native with NON native array min/max. + a = self._generate_data(self.nr, self.nc) + m = -0.5 * np.ones(a.shape) + M = 1. + m_s = self._neg_byteorder(m) + assert_(not m_s.dtype.isnative) + ac = self.fastclip(a, m_s, M) + act = self.clip(a, m_s, M) + assert_array_strict_equal(ac, act) + + def test_type_cast_10(self): + # Test native int32 with float min/max and float out for output argument. + a = self._generate_int_data(self.nr, self.nc) + b = np.zeros(a.shape, dtype=np.float32) + m = np.float32(-0.5) + M = np.float32(1) + act = self.clip(a, m, M, out=b) + ac = self.fastclip(a, m, M, out=b) + assert_array_strict_equal(ac, act) + + def test_type_cast_11(self): + # Test non native with native scalar, min/max, out non native + a = self._generate_non_native_data(self.nr, self.nc) + b = a.copy() + b = b.astype(b.dtype.newbyteorder('>')) + bt = b.copy() + m = -0.5 + M = 1. + self.fastclip(a, m, M, out=b) + self.clip(a, m, M, out=bt) + assert_array_strict_equal(b, bt) + + def test_type_cast_12(self): + # Test native int32 input and min/max and float out + a = self._generate_int_data(self.nr, self.nc) + b = np.zeros(a.shape, dtype=np.float32) + m = np.int32(0) + M = np.int32(1) + act = self.clip(a, m, M, out=b) + ac = self.fastclip(a, m, M, out=b) + assert_array_strict_equal(ac, act) + + def test_clip_with_out_simple(self): + # Test native double input with scalar min/max + a = self._generate_data(self.nr, self.nc) + m = -0.5 + M = 0.6 + ac = np.zeros(a.shape) + act = np.zeros(a.shape) + self.fastclip(a, m, M, ac) + self.clip(a, m, M, act) + assert_array_strict_equal(ac, act) + + def test_clip_with_out_simple2(self): + # Test native int32 input with double min/max and int32 out + a = self._generate_int32_data(self.nr, self.nc) + m = np.float64(0) + M = np.float64(2) + ac = np.zeros(a.shape, dtype=np.int32) + act = ac.copy() + self.fastclip(a, m, M, out=ac, casting="unsafe") + self.clip(a, m, M, act) + assert_array_strict_equal(ac, act) + + def test_clip_with_out_simple_int32(self): + # Test native int32 input with int32 scalar min/max and int64 out + a = self._generate_int32_data(self.nr, self.nc) + m = np.int32(-1) + M = np.int32(1) + ac = np.zeros(a.shape, dtype=np.int64) + act = ac.copy() + self.fastclip(a, m, M, ac) + self.clip(a, m, M, act) + assert_array_strict_equal(ac, act) + + def test_clip_with_out_array_int32(self): + # Test native int32 input with double array min/max and int32 out + a = self._generate_int32_data(self.nr, self.nc) + m = np.zeros(a.shape, np.float64) + M = np.float64(1) + ac = np.zeros(a.shape, dtype=np.int32) + act = ac.copy() + self.fastclip(a, m, M, out=ac, casting="unsafe") + self.clip(a, m, M, act) + assert_array_strict_equal(ac, act) + + def test_clip_with_out_array_outint32(self): + # Test native double input with scalar min/max and int out + a = self._generate_data(self.nr, self.nc) + m = -1.0 + M = 2.0 + ac = np.zeros(a.shape, dtype=np.int32) + act = ac.copy() + self.fastclip(a, m, M, out=ac, casting="unsafe") + self.clip(a, m, M, act) + assert_array_strict_equal(ac, act) + + def test_clip_with_out_transposed(self): + # Test that the out argument works when transposed + a = np.arange(16).reshape(4, 4) + out = np.empty_like(a).T + a.clip(4, 10, out=out) + expected = self.clip(a, 4, 10) + assert_array_equal(out, expected) + + def test_clip_with_out_memory_overlap(self): + # Test that the out argument works when it has memory overlap + a = np.arange(16).reshape(4, 4) + ac = a.copy() + a[:-1].clip(4, 10, out=a[1:]) + expected = self.clip(ac[:-1], 4, 10) + assert_array_equal(a[1:], expected) + + def test_clip_inplace_array(self): + # Test native double input with array min/max + a = self._generate_data(self.nr, self.nc) + ac = a.copy() + m = np.zeros(a.shape) + M = 1.0 + self.fastclip(a, m, M, a) + self.clip(a, m, M, ac) + assert_array_strict_equal(a, ac) + + def test_clip_inplace_simple(self): + # Test native double input with scalar min/max + a = self._generate_data(self.nr, self.nc) + ac = a.copy() + m = -0.5 + M = 0.6 + self.fastclip(a, m, M, a) + self.clip(a, m, M, ac) + assert_array_strict_equal(a, ac) + + def test_clip_func_takes_out(self): + # Ensure that the clip() function takes an out=argument. + a = self._generate_data(self.nr, self.nc) + ac = a.copy() + m = -0.5 + M = 0.6 + a2 = np.clip(a, m, M, out=a) + self.clip(a, m, M, ac) + assert_array_strict_equal(a2, ac) + assert_(a2 is a) + + def test_clip_nan(self): + d = np.arange(7.) + assert_equal(d.clip(min=np.nan), np.nan) + assert_equal(d.clip(max=np.nan), np.nan) + assert_equal(d.clip(min=np.nan, max=np.nan), np.nan) + assert_equal(d.clip(min=-2, max=np.nan), np.nan) + assert_equal(d.clip(min=np.nan, max=10), np.nan) + + def test_object_clip(self): + a = np.arange(10, dtype=object) + actual = np.clip(a, 1, 5) + expected = np.array([1, 1, 2, 3, 4, 5, 5, 5, 5, 5]) + assert actual.tolist() == expected.tolist() + + def test_clip_all_none(self): + arr = np.arange(10, dtype=object) + assert_equal(np.clip(arr, None, None), arr) + assert_equal(np.clip(arr), arr) + + def test_clip_invalid_casting(self): + a = np.arange(10, dtype=object) + with assert_raises_regex(ValueError, + 'casting must be one of'): + self.fastclip(a, 1, 8, casting="garbage") + + @pytest.mark.parametrize("amin, amax", [ + # two scalars + (1, 0), + # mix scalar and array + (1, np.zeros(10)), + # two arrays + (np.ones(10), np.zeros(10)), + ]) + def test_clip_value_min_max_flip(self, amin, amax): + a = np.arange(10, dtype=np.int64) + # requirement from ufunc_docstrings.py + expected = np.minimum(np.maximum(a, amin), amax) + actual = np.clip(a, amin, amax) + assert_equal(actual, expected) + + @pytest.mark.parametrize("arr, amin, amax, exp", [ + # for a bug in npy_ObjectClip, based on a + # case produced by hypothesis + (np.zeros(10, dtype=object), + 0, + -2**64 + 1, + np.full(10, -2**64 + 1, dtype=object)), + # for bugs in NPY_TIMEDELTA_MAX, based on a case + # produced by hypothesis + (np.zeros(10, dtype='m8') - 1, + 0, + 0, + np.zeros(10, dtype='m8')), + ]) + def test_clip_problem_cases(self, arr, amin, amax, exp): + actual = np.clip(arr, amin, amax) + assert_equal(actual, exp) + + @pytest.mark.parametrize("arr, amin, amax", [ + # problematic scalar nan case from hypothesis + (np.zeros(10, dtype=np.int64), + np.array(np.nan), + np.zeros(10, dtype=np.int32)), + ]) + def test_clip_scalar_nan_propagation(self, arr, amin, amax): + # enforcement of scalar nan propagation for comparisons + # called through clip() + expected = np.minimum(np.maximum(arr, amin), amax) + actual = np.clip(arr, amin, amax) + assert_equal(actual, expected) + + @pytest.mark.parametrize("arr, amin, amax", [ + (np.array([1] * 10, dtype='m8'), + np.timedelta64('NaT'), + np.zeros(10, dtype=np.int32)), + ]) + def test_NaT_propagation(self, arr, amin, amax): + expected = np.minimum(np.maximum(arr, amin), amax) + actual = np.clip(arr, amin, amax) + assert_equal(actual, expected) + + @given( + data=st.data(), + arr=hynp.arrays( + dtype=hynp.integer_dtypes() | hynp.floating_dtypes(), + shape=hynp.array_shapes() + ) + ) + def test_clip_property(self, data, arr): + """A property-based test using Hypothesis. + + This aims for maximum generality: it could in principle generate *any* + valid inputs to np.clip, and in practice generates much more varied + inputs than human testers come up with. + + Because many of the inputs have tricky dependencies - compatible dtypes + and mutually-broadcastable shapes - we use `st.data()` strategy draw + values *inside* the test function, from strategies we construct based + on previous values. An alternative would be to define a custom strategy + with `@st.composite`, but until we have duplicated code inline is fine. + + That accounts for most of the function; the actual test is just three + lines to calculate and compare actual vs expected results! + """ + numeric_dtypes = hynp.integer_dtypes() | hynp.floating_dtypes() + # Generate shapes for the bounds which can be broadcast with each other + # and with the base shape. Below, we might decide to use scalar bounds, + # but it's clearer to generate these shapes unconditionally in advance. + in_shapes, result_shape = data.draw( + hynp.mutually_broadcastable_shapes( + num_shapes=2, base_shape=arr.shape + ) + ) + # Scalar `nan` is deprecated due to the differing behaviour it shows. + s = numeric_dtypes.flatmap( + lambda x: hynp.from_dtype(x, allow_nan=False)) + amin = data.draw(s | hynp.arrays(dtype=numeric_dtypes, + shape=in_shapes[0], elements={"allow_nan": False})) + amax = data.draw(s | hynp.arrays(dtype=numeric_dtypes, + shape=in_shapes[1], elements={"allow_nan": False})) + + # Then calculate our result and expected result and check that they're + # equal! See gh-12519 and gh-19457 for discussion deciding on this + # property and the result_type argument. + result = np.clip(arr, amin, amax) + t = np.result_type(arr, amin, amax) + expected = np.minimum(amax, np.maximum(arr, amin, dtype=t), dtype=t) + assert result.dtype == t + assert_array_equal(result, expected) + + def test_clip_min_max_args(self): + arr = np.arange(5) + + assert_array_equal(np.clip(arr), arr) + assert_array_equal(np.clip(arr, min=2, max=3), np.clip(arr, 2, 3)) + assert_array_equal(np.clip(arr, min=None, max=2), + np.clip(arr, None, 2)) + + with assert_raises_regex(TypeError, "missing 1 required positional " + "argument: 'a_max'"): + np.clip(arr, 2) + with assert_raises_regex(TypeError, "missing 1 required positional " + "argument: 'a_min'"): + np.clip(arr, a_max=2) + msg = ("Passing `min` or `max` keyword argument when `a_min` and " + "`a_max` are provided is forbidden.") + with assert_raises_regex(ValueError, msg): + np.clip(arr, 2, 3, max=3) + with assert_raises_regex(ValueError, msg): + np.clip(arr, 2, 3, min=2) + + @pytest.mark.parametrize("dtype,min,max", [ + ("int32", -2**32 - 1, 2**32), + ("int32", -2**320, None), + ("int32", None, 2**300), + ("int32", -1000, 2**32), + ("int32", -2**32 - 1, 1000), + ("uint8", -1, 129), + ]) + def test_out_of_bound_pyints(self, dtype, min, max): + a = np.arange(10000).astype(dtype) + # Check min only + c = np.clip(a, min=min, max=max) + assert not np.may_share_memory(a, c) + assert c.dtype == a.dtype + if min is not None: + assert (c >= min).all() + if max is not None: + assert (c <= max).all() + + +class TestAllclose: + rtol = 1e-5 + atol = 1e-8 + + def setup_method(self): + self.olderr = np.seterr(invalid='ignore') + + def teardown_method(self): + np.seterr(**self.olderr) + + def tst_allclose(self, x, y): + assert_(np.allclose(x, y), f"{x} and {y} not close") + + def tst_not_allclose(self, x, y): + assert_(not np.allclose(x, y), f"{x} and {y} shouldn't be close") + + def test_ip_allclose(self): + # Parametric test factory. + arr = np.array([100, 1000]) + aran = np.arange(125).reshape((5, 5, 5)) + + atol = self.atol + rtol = self.rtol + + data = [([1, 0], [1, 0]), + ([atol], [0]), + ([1], [1 + rtol + atol]), + (arr, arr + arr * rtol), + (arr, arr + arr * rtol + atol * 2), + (aran, aran + aran * rtol), + (np.inf, np.inf), + (np.inf, [np.inf])] + + for (x, y) in data: + self.tst_allclose(x, y) + + def test_ip_not_allclose(self): + # Parametric test factory. + aran = np.arange(125).reshape((5, 5, 5)) + + atol = self.atol + rtol = self.rtol + + data = [([np.inf, 0], [1, np.inf]), + ([np.inf, 0], [1, 0]), + ([np.inf, np.inf], [1, np.inf]), + ([np.inf, np.inf], [1, 0]), + ([-np.inf, 0], [np.inf, 0]), + ([np.nan, 0], [np.nan, 0]), + ([atol * 2], [0]), + ([1], [1 + rtol + atol * 2]), + (aran, aran + aran * atol + atol * 2), + (np.array([np.inf, 1]), np.array([0, np.inf]))] + + for (x, y) in data: + self.tst_not_allclose(x, y) + + def test_no_parameter_modification(self): + x = np.array([np.inf, 1]) + y = np.array([0, np.inf]) + np.allclose(x, y) + assert_array_equal(x, np.array([np.inf, 1])) + assert_array_equal(y, np.array([0, np.inf])) + + def test_min_int(self): + # Could make problems because of abs(min_int) == min_int + min_int = np.iinfo(np.int_).min + a = np.array([min_int], dtype=np.int_) + assert_(np.allclose(a, a)) + + def test_equalnan(self): + x = np.array([1.0, np.nan]) + assert_(np.allclose(x, x, equal_nan=True)) + + def test_return_class_is_ndarray(self): + # Issue gh-6475 + # Check that allclose does not preserve subtypes + class Foo(np.ndarray): + def __new__(cls, *args, **kwargs): + return np.array(*args, **kwargs).view(cls) + + a = Foo([1]) + assert_(type(np.allclose(a, a)) is bool) + + +class TestIsclose: + rtol = 1e-5 + atol = 1e-8 + + def _setup(self): + atol = self.atol + rtol = self.rtol + arr = np.array([100, 1000]) + aran = np.arange(125).reshape((5, 5, 5)) + + self.all_close_tests = [ + ([1, 0], [1, 0]), + ([atol], [0]), + ([1], [1 + rtol + atol]), + (arr, arr + arr * rtol), + (arr, arr + arr * rtol + atol), + (aran, aran + aran * rtol), + (np.inf, np.inf), + (np.inf, [np.inf]), + ([np.inf, -np.inf], [np.inf, -np.inf]), + ] + self.none_close_tests = [ + ([np.inf, 0], [1, np.inf]), + ([np.inf, -np.inf], [1, 0]), + ([np.inf, np.inf], [1, -np.inf]), + ([np.inf, np.inf], [1, 0]), + ([np.nan, 0], [np.nan, -np.inf]), + ([atol * 2], [0]), + ([1], [1 + rtol + atol * 2]), + (aran, aran + rtol * 1.1 * aran + atol * 1.1), + (np.array([np.inf, 1]), np.array([0, np.inf])), + ] + self.some_close_tests = [ + ([np.inf, 0], [np.inf, atol * 2]), + ([atol, 1, 1e6 * (1 + 2 * rtol) + atol], [0, np.nan, 1e6]), + (np.arange(3), [0, 1, 2.1]), + (np.nan, [np.nan, np.nan, np.nan]), + ([0], [atol, np.inf, -np.inf, np.nan]), + (0, [atol, np.inf, -np.inf, np.nan]), + ] + self.some_close_results = [ + [True, False], + [True, False, False], + [True, True, False], + [False, False, False], + [True, False, False, False], + [True, False, False, False], + ] + + def test_ip_isclose(self): + self._setup() + tests = self.some_close_tests + results = self.some_close_results + for (x, y), result in zip(tests, results): + assert_array_equal(np.isclose(x, y), result) + + x = np.array([2.1, 2.1, 2.1, 2.1, 5, np.nan]) + y = np.array([2, 2, 2, 2, np.nan, 5]) + atol = [0.11, 0.09, 1e-8, 1e-8, 1, 1] + rtol = [1e-8, 1e-8, 0.06, 0.04, 1, 1] + expected = np.array([True, False, True, False, False, False]) + assert_array_equal(np.isclose(x, y, rtol=rtol, atol=atol), expected) + + message = "operands could not be broadcast together..." + atol = np.array([1e-8, 1e-8]) + with assert_raises(ValueError, msg=message): + np.isclose(x, y, atol=atol) + + rtol = np.array([1e-5, 1e-5]) + with assert_raises(ValueError, msg=message): + np.isclose(x, y, rtol=rtol) + + def test_nep50_isclose(self): + below_one = float(1. - np.finfo('f8').eps) + f32 = np.array(below_one, 'f4') # This is just 1 at float32 precision + assert f32 > np.array(below_one) + # NEP 50 broadcasting of python scalars + assert f32 == below_one + # Test that it works for isclose arguments too (and that those fail if + # one uses a numpy float64). + assert np.isclose(f32, below_one, atol=0, rtol=0) + assert np.isclose(f32, np.float32(0), atol=below_one) + assert np.isclose(f32, 2, atol=0, rtol=below_one / 2) + assert not np.isclose(f32, np.float64(below_one), atol=0, rtol=0) + assert not np.isclose(f32, np.float32(0), atol=np.float64(below_one)) + assert not np.isclose(f32, 2, atol=0, rtol=np.float64(below_one / 2)) + + def tst_all_isclose(self, x, y): + assert_(np.all(np.isclose(x, y)), f"{x} and {y} not close") + + def tst_none_isclose(self, x, y): + msg = "%s and %s shouldn't be close" + assert_(not np.any(np.isclose(x, y)), msg % (x, y)) + + def tst_isclose_allclose(self, x, y): + msg = "isclose.all() and allclose aren't same for %s and %s" + msg2 = "isclose and allclose aren't same for %s and %s" + if np.isscalar(x) and np.isscalar(y): + assert_(np.isclose(x, y) == np.allclose(x, y), msg=msg2 % (x, y)) + else: + assert_array_equal( + np.isclose(x, y).all(), np.allclose(x, y), msg % (x, y) + ) + + def test_ip_all_isclose(self): + self._setup() + for (x, y) in self.all_close_tests: + self.tst_all_isclose(x, y) + + x = np.array([2.3, 3.6, 4.4, np.nan]) + y = np.array([2, 3, 4, np.nan]) + atol = [0.31, 0, 0, 1] + rtol = [0, 0.21, 0.11, 1] + assert np.allclose(x, y, atol=atol, rtol=rtol, equal_nan=True) + assert not np.allclose(x, y, atol=0.1, rtol=0.1, equal_nan=True) + + # Show that gh-14330 is resolved + assert np.allclose([1, 2, float('nan')], [1, 2, float('nan')], + atol=[1, 1, 1], equal_nan=True) + + def test_ip_none_isclose(self): + self._setup() + for (x, y) in self.none_close_tests: + self.tst_none_isclose(x, y) + + def test_ip_isclose_allclose(self): + self._setup() + tests = (self.all_close_tests + self.none_close_tests + + self.some_close_tests) + for (x, y) in tests: + self.tst_isclose_allclose(x, y) + + def test_equal_nan(self): + assert_array_equal(np.isclose(np.nan, np.nan, equal_nan=True), [True]) + arr = np.array([1.0, np.nan]) + assert_array_equal(np.isclose(arr, arr, equal_nan=True), [True, True]) + + def test_masked_arrays(self): + # Make sure to test the output type when arguments are interchanged. + + x = np.ma.masked_where([True, True, False], np.arange(3)) + assert_(type(x) is type(np.isclose(2, x))) + assert_(type(x) is type(np.isclose(x, 2))) + + x = np.ma.masked_where([True, True, False], [np.nan, np.inf, np.nan]) + assert_(type(x) is type(np.isclose(np.inf, x))) + assert_(type(x) is type(np.isclose(x, np.inf))) + + x = np.ma.masked_where([True, True, False], [np.nan, np.nan, np.nan]) + y = np.isclose(np.nan, x, equal_nan=True) + assert_(type(x) is type(y)) + # Ensure that the mask isn't modified... + assert_array_equal([True, True, False], y.mask) + y = np.isclose(x, np.nan, equal_nan=True) + assert_(type(x) is type(y)) + # Ensure that the mask isn't modified... + assert_array_equal([True, True, False], y.mask) + + x = np.ma.masked_where([True, True, False], [np.nan, np.nan, np.nan]) + y = np.isclose(x, x, equal_nan=True) + assert_(type(x) is type(y)) + # Ensure that the mask isn't modified... + assert_array_equal([True, True, False], y.mask) + + def test_scalar_return(self): + assert_(np.isscalar(np.isclose(1, 1))) + + def test_no_parameter_modification(self): + x = np.array([np.inf, 1]) + y = np.array([0, np.inf]) + np.isclose(x, y) + assert_array_equal(x, np.array([np.inf, 1])) + assert_array_equal(y, np.array([0, np.inf])) + + def test_non_finite_scalar(self): + # GH7014, when two scalars are compared the output should also be a + # scalar + assert_(np.isclose(np.inf, -np.inf) is np.False_) + assert_(np.isclose(0, np.inf) is np.False_) + assert_(type(np.isclose(0, np.inf)) is np.bool) + + def test_timedelta(self): + # Allclose currently works for timedelta64 as long as `atol` is + # an integer or also a timedelta64 + a = np.array([[1, 2, 3, "NaT"]], dtype="m8[ns]") + assert np.isclose(a, a, atol=0, equal_nan=True).all() + assert np.isclose(a, a, atol=np.timedelta64(1, "ns"), equal_nan=True).all() + assert np.allclose(a, a, atol=0, equal_nan=True) + assert np.allclose(a, a, atol=np.timedelta64(1, "ns"), equal_nan=True) + + def test_tol_warnings(self): + a = np.array([1, 2, 3]) + b = np.array([np.inf, np.nan, 1]) + + for i in b: + for j in b: + # Making sure that i and j are not both numbers, + # because that won't create a warning + if (i == 1) and (j == 1): + continue + + with warnings.catch_warnings(record=True) as w: + + warnings.simplefilter("always") + c = np.isclose(a, a, atol=i, rtol=j) + assert len(w) == 1 + assert issubclass(w[-1].category, RuntimeWarning) + expected = f"One of rtol or atol is not valid, atol: {i}, rtol: {j}" + assert expected in str(w[-1].message) + + +class TestStdVar: + def _create_data(self): + A = np.array([1, -1, 1, -1]) + real_var = 1 + return A, real_var + + def test_basic(self): + A, real_var = self._create_data() + assert_almost_equal(np.var(A), real_var) + assert_almost_equal(np.std(A)**2, real_var) + + def test_scalars(self): + assert_equal(np.var(1), 0) + assert_equal(np.std(1), 0) + + def test_ddof1(self): + A, real_var = self._create_data() + assert_almost_equal(np.var(A, ddof=1), + real_var * len(A) / (len(A) - 1)) + assert_almost_equal(np.std(A, ddof=1)**2, + real_var * len(A) / (len(A) - 1)) + + def test_ddof2(self): + A, real_var = self._create_data() + assert_almost_equal(np.var(A, ddof=2), + real_var * len(A) / (len(A) - 2)) + assert_almost_equal(np.std(A, ddof=2)**2, + real_var * len(A) / (len(A) - 2)) + + def test_correction(self): + A, _ = self._create_data() + assert_almost_equal( + np.var(A, correction=1), np.var(A, ddof=1) + ) + assert_almost_equal( + np.std(A, correction=1), np.std(A, ddof=1) + ) + + err_msg = "ddof and correction can't be provided simultaneously." + + with assert_raises_regex(ValueError, err_msg): + np.var(A, ddof=1, correction=0) + + with assert_raises_regex(ValueError, err_msg): + np.std(A, ddof=1, correction=1) + + def test_out_scalar(self): + d = np.arange(10) + out = np.array(0.) + r = np.std(d, out=out) + assert_(r is out) + assert_array_equal(r, out) + r = np.var(d, out=out) + assert_(r is out) + assert_array_equal(r, out) + r = np.mean(d, out=out) + assert_(r is out) + assert_array_equal(r, out) + + +class TestStdVarComplex: + def test_basic(self): + A = np.array([1, 1.j, -1, -1.j]) + real_var = 1 + assert_almost_equal(np.var(A), real_var) + assert_almost_equal(np.std(A)**2, real_var) + + def test_scalars(self): + assert_equal(np.var(1j), 0) + assert_equal(np.std(1j), 0) + + +class TestCreationFuncs: + def check_function(self, func, fill_value=None): + dtypes_info = {np.dtype(tp) for tp in itertools.chain(*sctypes.values())} + keyfunc = lambda dtype: dtype.str + variable_sized = {tp for tp in dtypes_info if tp.str.endswith('0')} + dtypes = sorted(dtypes_info - variable_sized | + {np.dtype(tp.str.replace("0", str(i))) + for tp in variable_sized for i in range(1, 10)}, + key=keyfunc) + dtypes += [type(dt) for dt in sorted(dtypes_info, key=keyfunc)] + orders = {'C': 'c_contiguous', 'F': 'f_contiguous'} + ndims = 10 + + par = ((0, 1, 2), + range(ndims), + orders, + dtypes) + fill_kwarg = {} + if fill_value is not None: + fill_kwarg = {'fill_value': fill_value} + + for size, ndims, order, dtype in itertools.product(*par): + shape = ndims * [size] + + is_void = dtype is np.dtypes.VoidDType or ( + isinstance(dtype, np.dtype) and dtype.str.startswith('|V')) + + # do not fill void type + if fill_kwarg and is_void: + continue + + arr = func(shape, order=order, dtype=dtype, + **fill_kwarg) + + if isinstance(dtype, np.dtype): + assert_equal(arr.dtype, dtype) + elif isinstance(dtype, type(np.dtype)): + if dtype in (np.dtypes.StrDType, np.dtypes.BytesDType): + dtype_str = np.dtype(dtype.type).str.replace('0', '1') + assert_equal(arr.dtype, np.dtype(dtype_str)) + else: + assert_equal(arr.dtype, np.dtype(dtype.type)) + assert_(getattr(arr.flags, orders[order])) + + if fill_value is not None: + if arr.dtype.str.startswith('|S'): + val = str(fill_value) + else: + val = fill_value + assert_equal(arr, dtype.type(val)) + + def test_zeros(self): + self.check_function(np.zeros) + + def test_ones(self): + self.check_function(np.ones) + + def test_empty(self): + self.check_function(np.empty) + + def test_full(self): + self.check_function(np.full, 0) + self.check_function(np.full, 1) + + @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") + def test_for_reference_leak(self): + # Make sure we have an object for reference + dim = 1 + beg = sys.getrefcount(dim) + np.zeros([dim] * 10) + assert_(sys.getrefcount(dim) == beg) + np.ones([dim] * 10) + assert_(sys.getrefcount(dim) == beg) + np.empty([dim] * 10) + assert_(sys.getrefcount(dim) == beg) + np.full([dim] * 10, 0) + assert_(sys.getrefcount(dim) == beg) + + @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") + @pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") + @pytest.mark.parametrize("func", [np.empty, np.zeros, np.ones, np.full]) + def test_signatures(self, func): + sig = inspect.signature(func) + params = sig.parameters + + assert len(params) in {5, 6} + + assert 'shape' in params + assert params["shape"].kind is inspect.Parameter.POSITIONAL_OR_KEYWORD + assert params["shape"].default is inspect.Parameter.empty + + assert 'dtype' in params + assert params["dtype"].kind is inspect.Parameter.POSITIONAL_OR_KEYWORD + assert params["dtype"].default is None + + assert 'order' in params + assert params["order"].kind is inspect.Parameter.POSITIONAL_OR_KEYWORD + assert params["order"].default == "C" + + assert 'device' in params + assert params["device"].kind is inspect.Parameter.KEYWORD_ONLY + assert params["device"].default is None + + assert 'like' in params + assert params["like"].kind is inspect.Parameter.KEYWORD_ONLY + assert params["like"].default is None + + +class TestLikeFuncs: + '''Test ones_like, zeros_like, empty_like and full_like''' + + def compare_array_value(self, dz, value, fill_value): + if value is not None: + if fill_value: + # Conversion is close to what np.full_like uses + # but we may want to convert directly in the future + # which may result in errors (where this does not). + z = np.array(value).astype(dz.dtype) + assert_(np.all(dz == z)) + else: + assert_(np.all(dz == value)) + + def check_like_function(self, like_function, value, fill_value=False): + data = [ + # Array scalars + (np.array(3.), None), + (np.array(3), 'f8'), + # 1D arrays + (np.arange(6, dtype='f4'), None), + (np.arange(6), 'c16'), + # 2D C-layout arrays + (np.arange(6).reshape(2, 3), None), + (np.arange(6).reshape(3, 2), 'i1'), + # 2D F-layout arrays + (np.arange(6).reshape((2, 3), order='F'), None), + (np.arange(6).reshape((3, 2), order='F'), 'i1'), + # 3D C-layout arrays + (np.arange(24).reshape(2, 3, 4), None), + (np.arange(24).reshape(4, 3, 2), 'f4'), + # 3D F-layout arrays + (np.arange(24).reshape((2, 3, 4), order='F'), None), + (np.arange(24).reshape((4, 3, 2), order='F'), 'f4'), + # 3D non-C/F-layout arrays + (np.arange(24).reshape(2, 3, 4).swapaxes(0, 1), None), + (np.arange(24).reshape(4, 3, 2).swapaxes(0, 1), '?'), + ] + shapes = [(), (5,), (5, 6,), (5, 6, 7,)] + + if fill_value: + fill_kwarg = {'fill_value': value} + else: + fill_kwarg = {} + for d, dtype in data: + # default (K) order, dtype + dz = like_function(d, dtype=dtype, **fill_kwarg) + assert_equal(dz.shape, d.shape) + assert_equal(np.array(dz.strides) * d.dtype.itemsize, + np.array(d.strides) * dz.dtype.itemsize) + assert_equal(d.flags.c_contiguous, dz.flags.c_contiguous) + assert_equal(d.flags.f_contiguous, dz.flags.f_contiguous) + if dtype is None: + assert_equal(dz.dtype, d.dtype) + else: + assert_equal(dz.dtype, np.dtype(dtype)) + self.compare_array_value(dz, value, fill_value) + + # C order, default dtype + dz = like_function(d, order='C', dtype=dtype, **fill_kwarg) + assert_equal(dz.shape, d.shape) + assert_(dz.flags.c_contiguous) + if dtype is None: + assert_equal(dz.dtype, d.dtype) + else: + assert_equal(dz.dtype, np.dtype(dtype)) + self.compare_array_value(dz, value, fill_value) + + # F order, default dtype + dz = like_function(d, order='F', dtype=dtype, **fill_kwarg) + assert_equal(dz.shape, d.shape) + assert_(dz.flags.f_contiguous) + if dtype is None: + assert_equal(dz.dtype, d.dtype) + else: + assert_equal(dz.dtype, np.dtype(dtype)) + self.compare_array_value(dz, value, fill_value) + + # A order + dz = like_function(d, order='A', dtype=dtype, **fill_kwarg) + assert_equal(dz.shape, d.shape) + if d.flags.f_contiguous: + assert_(dz.flags.f_contiguous) + else: + assert_(dz.flags.c_contiguous) + if dtype is None: + assert_equal(dz.dtype, d.dtype) + else: + assert_equal(dz.dtype, np.dtype(dtype)) + self.compare_array_value(dz, value, fill_value) + + # Test the 'shape' parameter + for s in shapes: + for o in 'CFA': + sz = like_function(d, dtype=dtype, shape=s, order=o, + **fill_kwarg) + assert_equal(sz.shape, s) + if dtype is None: + assert_equal(sz.dtype, d.dtype) + else: + assert_equal(sz.dtype, np.dtype(dtype)) + if o == 'C' or (o == 'A' and d.flags.c_contiguous): + assert_(sz.flags.c_contiguous) + elif o == 'F' or (o == 'A' and d.flags.f_contiguous): + assert_(sz.flags.f_contiguous) + self.compare_array_value(sz, value, fill_value) + + if (d.ndim != len(s)): + assert_equal(np.argsort(like_function(d, dtype=dtype, + shape=s, order='K', + **fill_kwarg).strides), + np.argsort(np.empty(s, dtype=dtype, + order='C').strides)) + else: + assert_equal(np.argsort(like_function(d, dtype=dtype, + shape=s, order='K', + **fill_kwarg).strides), + np.argsort(d.strides)) + + # Test the 'subok' parameter + class MyNDArray(np.ndarray): + pass + + a = np.array([[1, 2], [3, 4]]).view(MyNDArray) + + b = like_function(a, **fill_kwarg) + assert_(type(b) is MyNDArray) + + b = like_function(a, subok=False, **fill_kwarg) + assert_(type(b) is not MyNDArray) + + # Test invalid dtype + with assert_raises(TypeError): + a = np.array(b"abc") + like_function(a, dtype="S-1", **fill_kwarg) + + def test_ones_like(self): + self.check_like_function(np.ones_like, 1) + + def test_zeros_like(self): + self.check_like_function(np.zeros_like, 0) + + def test_empty_like(self): + self.check_like_function(np.empty_like, None) + + def test_filled_like(self): + self.check_like_function(np.full_like, 0, True) + self.check_like_function(np.full_like, 1, True) + # Large integers may overflow, but using int64 is OK (casts) + # see also gh-27075 + with pytest.raises(OverflowError): + np.full_like(np.ones(3, dtype=np.int8), 1000) + self.check_like_function(np.full_like, np.int64(1000), True) + self.check_like_function(np.full_like, 123.456, True) + # Inf to integer casts cause invalid-value errors: ignore them. + with np.errstate(invalid="ignore"): + self.check_like_function(np.full_like, np.inf, True) + + @pytest.mark.parametrize('likefunc', [np.empty_like, np.full_like, + np.zeros_like, np.ones_like]) + @pytest.mark.parametrize('dtype', [str, bytes]) + def test_dtype_str_bytes(self, likefunc, dtype): + # Regression test for gh-19860 + a = np.arange(16).reshape(2, 8) + b = a[:, ::2] # Ensure b is not contiguous. + kwargs = {'fill_value': ''} if likefunc == np.full_like else {} + result = likefunc(b, dtype=dtype, **kwargs) + if dtype == str: + assert result.strides == (16, 4) + else: + # dtype is bytes + assert result.strides == (4, 1) + + +class TestCorrelate: + def _setup(self, dt): + self.x = np.array([1, 2, 3, 4, 5], dtype=dt) + self.xs = np.arange(1, 20)[::3] + self.y = np.array([-1, -2, -3], dtype=dt) + self.z1 = np.array([-3., -8., -14., -20., -26., -14., -5.], dtype=dt) + self.z1_4 = np.array([-2., -5., -8., -11., -14., -5.], dtype=dt) + self.z1r = np.array([-15., -22., -22., -16., -10., -4., -1.], dtype=dt) + self.z2 = np.array([-5., -14., -26., -20., -14., -8., -3.], dtype=dt) + self.z2r = np.array([-1., -4., -10., -16., -22., -22., -15.], dtype=dt) + self.zs = np.array([-3., -14., -30., -48., -66., -84., + -102., -54., -19.], dtype=dt) + + def test_float(self): + self._setup(float) + z = np.correlate(self.x, self.y, 'full') + assert_array_almost_equal(z, self.z1) + z = np.correlate(self.x, self.y[:-1], 'full') + assert_array_almost_equal(z, self.z1_4) + z = np.correlate(self.y, self.x, 'full') + assert_array_almost_equal(z, self.z2) + z = np.correlate(self.x[::-1], self.y, 'full') + assert_array_almost_equal(z, self.z1r) + z = np.correlate(self.y, self.x[::-1], 'full') + assert_array_almost_equal(z, self.z2r) + z = np.correlate(self.xs, self.y, 'full') + assert_array_almost_equal(z, self.zs) + + def test_object(self): + self._setup(Decimal) + z = np.correlate(self.x, self.y, 'full') + assert_array_almost_equal(z, self.z1) + z = np.correlate(self.y, self.x, 'full') + assert_array_almost_equal(z, self.z2) + + def test_no_overwrite(self): + d = np.ones(100) + k = np.ones(3) + np.correlate(d, k) + assert_array_equal(d, np.ones(100)) + assert_array_equal(k, np.ones(3)) + + def test_complex(self): + x = np.array([1, 2, 3, 4 + 1j], dtype=complex) + y = np.array([-1, -2j, 3 + 1j], dtype=complex) + r_z = np.array([3 - 1j, 6, 8 + 1j, 11 + 5j, -5 + 8j, -4 - 1j], dtype=complex) + r_z = r_z[::-1].conjugate() + z = np.correlate(y, x, mode='full') + assert_array_almost_equal(z, r_z) + + def test_zero_size(self): + with pytest.raises(ValueError): + np.correlate(np.array([]), np.ones(1000), mode='full') + with pytest.raises(ValueError): + np.correlate(np.ones(1000), np.array([]), mode='full') + + def test_mode(self): + d = np.ones(100) + k = np.ones(3) + default_mode = np.correlate(d, k, mode='valid') + with assert_raises(ValueError): + np.correlate(d, k, mode='v') + # integer mode + with assert_raises(ValueError): + np.correlate(d, k, mode=-1) + # assert_array_equal(np.correlate(d, k, mode=), default_mode) + # illegal arguments + with assert_raises(TypeError): + np.correlate(d, k, mode=None) + + +class TestConvolve: + def test_object(self): + d = [1.] * 100 + k = [1.] * 3 + assert_array_almost_equal(np.convolve(d, k)[2:-2], np.full(98, 3)) + + def test_no_overwrite(self): + d = np.ones(100) + k = np.ones(3) + np.convolve(d, k) + assert_array_equal(d, np.ones(100)) + assert_array_equal(k, np.ones(3)) + + def test_mode(self): + d = np.ones(100) + k = np.ones(3) + default_mode = np.convolve(d, k, mode='full') + with assert_raises(ValueError): + np.convolve(d, k, mode='f') + # integer mode + with assert_raises(ValueError): + np.convolve(d, k, mode=-1) + assert_array_equal(np.convolve(d, k, mode=2), default_mode) + # illegal arguments + with assert_raises(TypeError): + np.convolve(d, k, mode=None) + + def test_convolve_empty_input_error_message(self): + """ + Test that convolve raises the correct error message when inputs are empty. + Regression test for gh-30272 (variable swapping bug). + """ + with pytest.raises(ValueError, match="a cannot be empty"): + np.convolve(np.array([]), np.array([1, 2])) + + with pytest.raises(ValueError, match="v cannot be empty"): + np.convolve(np.array([1, 2]), np.array([])) + +class TestArgwhere: + + @pytest.mark.parametrize('nd', [0, 1, 2]) + def test_nd(self, nd): + # get an nd array with multiple elements in every dimension + x = np.empty((2,) * nd, bool) + + # none + x[...] = False + assert_equal(np.argwhere(x).shape, (0, nd)) + + # only one + x[...] = False + x.flat[0] = True + assert_equal(np.argwhere(x).shape, (1, nd)) + + # all but one + x[...] = True + x.flat[0] = False + assert_equal(np.argwhere(x).shape, (x.size - 1, nd)) + + # all + x[...] = True + assert_equal(np.argwhere(x).shape, (x.size, nd)) + + def test_2D(self): + x = np.arange(6).reshape((2, 3)) + assert_array_equal(np.argwhere(x > 1), + [[0, 2], + [1, 0], + [1, 1], + [1, 2]]) + + def test_list(self): + assert_equal(np.argwhere([4, 0, 2, 1, 3]), [[0], [2], [3], [4]]) + + +class TestRoll: + def test_roll1d(self): + x = np.arange(10) + xr = np.roll(x, 2) + assert_equal(xr, np.array([8, 9, 0, 1, 2, 3, 4, 5, 6, 7])) + + def test_roll2d(self): + x2 = np.reshape(np.arange(10), (2, 5)) + x2r = np.roll(x2, 1) + assert_equal(x2r, np.array([[9, 0, 1, 2, 3], [4, 5, 6, 7, 8]])) + + x2r = np.roll(x2, 1, axis=0) + assert_equal(x2r, np.array([[5, 6, 7, 8, 9], [0, 1, 2, 3, 4]])) + + x2r = np.roll(x2, 1, axis=1) + assert_equal(x2r, np.array([[4, 0, 1, 2, 3], [9, 5, 6, 7, 8]])) + + # Roll multiple axes at once. + x2r = np.roll(x2, 1, axis=(0, 1)) + assert_equal(x2r, np.array([[9, 5, 6, 7, 8], [4, 0, 1, 2, 3]])) + + x2r = np.roll(x2, (1, 0), axis=(0, 1)) + assert_equal(x2r, np.array([[5, 6, 7, 8, 9], [0, 1, 2, 3, 4]])) + + x2r = np.roll(x2, (-1, 0), axis=(0, 1)) + assert_equal(x2r, np.array([[5, 6, 7, 8, 9], [0, 1, 2, 3, 4]])) + + x2r = np.roll(x2, (0, 1), axis=(0, 1)) + assert_equal(x2r, np.array([[4, 0, 1, 2, 3], [9, 5, 6, 7, 8]])) + + x2r = np.roll(x2, (0, -1), axis=(0, 1)) + assert_equal(x2r, np.array([[1, 2, 3, 4, 0], [6, 7, 8, 9, 5]])) + + x2r = np.roll(x2, (1, 1), axis=(0, 1)) + assert_equal(x2r, np.array([[9, 5, 6, 7, 8], [4, 0, 1, 2, 3]])) + + x2r = np.roll(x2, (-1, -1), axis=(0, 1)) + assert_equal(x2r, np.array([[6, 7, 8, 9, 5], [1, 2, 3, 4, 0]])) + + # Roll the same axis multiple times. + x2r = np.roll(x2, 1, axis=(0, 0)) + assert_equal(x2r, np.array([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]])) + + x2r = np.roll(x2, 1, axis=(1, 1)) + assert_equal(x2r, np.array([[3, 4, 0, 1, 2], [8, 9, 5, 6, 7]])) + + # Roll more than one turn in either direction. + x2r = np.roll(x2, 6, axis=1) + assert_equal(x2r, np.array([[4, 0, 1, 2, 3], [9, 5, 6, 7, 8]])) + + x2r = np.roll(x2, -4, axis=1) + assert_equal(x2r, np.array([[4, 0, 1, 2, 3], [9, 5, 6, 7, 8]])) + + def test_roll_empty(self): + x = np.array([]) + assert_equal(np.roll(x, 1), np.array([])) + + def test_roll_unsigned_shift(self): + x = np.arange(4) + shift = np.uint16(2) + assert_equal(np.roll(x, shift), np.roll(x, 2)) + + shift = np.uint64(2**63 + 2) + assert_equal(np.roll(x, shift), np.roll(x, 2)) + + def test_roll_big_int(self): + x = np.arange(4) + assert_equal(np.roll(x, 2**100), x) + + +class TestRollaxis: + + # expected shape indexed by (axis, start) for array of + # shape (1, 2, 3, 4) + tgtshape = {(0, 0): (1, 2, 3, 4), (0, 1): (1, 2, 3, 4), + (0, 2): (2, 1, 3, 4), (0, 3): (2, 3, 1, 4), + (0, 4): (2, 3, 4, 1), + (1, 0): (2, 1, 3, 4), (1, 1): (1, 2, 3, 4), + (1, 2): (1, 2, 3, 4), (1, 3): (1, 3, 2, 4), + (1, 4): (1, 3, 4, 2), + (2, 0): (3, 1, 2, 4), (2, 1): (1, 3, 2, 4), + (2, 2): (1, 2, 3, 4), (2, 3): (1, 2, 3, 4), + (2, 4): (1, 2, 4, 3), + (3, 0): (4, 1, 2, 3), (3, 1): (1, 4, 2, 3), + (3, 2): (1, 2, 4, 3), (3, 3): (1, 2, 3, 4), + (3, 4): (1, 2, 3, 4)} + + def test_exceptions(self): + a = np.arange(1 * 2 * 3 * 4).reshape(1, 2, 3, 4) + assert_raises(AxisError, np.rollaxis, a, -5, 0) + assert_raises(AxisError, np.rollaxis, a, 0, -5) + assert_raises(AxisError, np.rollaxis, a, 4, 0) + assert_raises(AxisError, np.rollaxis, a, 0, 5) + + def test_results(self): + a = np.arange(1 * 2 * 3 * 4).reshape(1, 2, 3, 4).copy() + aind = np.indices(a.shape) + assert_(a.flags['OWNDATA']) + for (i, j) in self.tgtshape: + # positive axis, positive start + res = np.rollaxis(a, axis=i, start=j) + i0, i1, i2, i3 = aind[np.array(res.shape) - 1] + assert_(np.all(res[i0, i1, i2, i3] == a)) + assert_(res.shape == self.tgtshape[(i, j)], str((i, j))) + assert_(not res.flags['OWNDATA']) + + # negative axis, positive start + ip = i + 1 + res = np.rollaxis(a, axis=-ip, start=j) + i0, i1, i2, i3 = aind[np.array(res.shape) - 1] + assert_(np.all(res[i0, i1, i2, i3] == a)) + assert_(res.shape == self.tgtshape[(4 - ip, j)]) + assert_(not res.flags['OWNDATA']) + + # positive axis, negative start + jp = j + 1 if j < 4 else j + res = np.rollaxis(a, axis=i, start=-jp) + i0, i1, i2, i3 = aind[np.array(res.shape) - 1] + assert_(np.all(res[i0, i1, i2, i3] == a)) + assert_(res.shape == self.tgtshape[(i, 4 - jp)]) + assert_(not res.flags['OWNDATA']) + + # negative axis, negative start + ip = i + 1 + jp = j + 1 if j < 4 else j + res = np.rollaxis(a, axis=-ip, start=-jp) + i0, i1, i2, i3 = aind[np.array(res.shape) - 1] + assert_(np.all(res[i0, i1, i2, i3] == a)) + assert_(res.shape == self.tgtshape[(4 - ip, 4 - jp)]) + assert_(not res.flags['OWNDATA']) + + +class TestMoveaxis: + def test_move_to_end(self): + x = np.random.randn(5, 6, 7) + for source, expected in [(0, (6, 7, 5)), + (1, (5, 7, 6)), + (2, (5, 6, 7)), + (-1, (5, 6, 7))]: + actual = np.moveaxis(x, source, -1).shape + assert_(actual, expected) + + def test_move_new_position(self): + x = np.random.randn(1, 2, 3, 4) + for source, destination, expected in [ + (0, 1, (2, 1, 3, 4)), + (1, 2, (1, 3, 2, 4)), + (1, -1, (1, 3, 4, 2)), + ]: + actual = np.moveaxis(x, source, destination).shape + assert_(actual, expected) + + def test_preserve_order(self): + x = np.zeros((1, 2, 3, 4)) + for source, destination in [ + (0, 0), + (3, -1), + (-1, 3), + ([0, -1], [0, -1]), + ([2, 0], [2, 0]), + (range(4), range(4)), + ]: + actual = np.moveaxis(x, source, destination).shape + assert_(actual, (1, 2, 3, 4)) + + def test_move_multiples(self): + x = np.zeros((0, 1, 2, 3)) + for source, destination, expected in [ + ([0, 1], [2, 3], (2, 3, 0, 1)), + ([2, 3], [0, 1], (2, 3, 0, 1)), + ([0, 1, 2], [2, 3, 0], (2, 3, 0, 1)), + ([3, 0], [1, 0], (0, 3, 1, 2)), + ([0, 3], [0, 1], (0, 3, 1, 2)), + ]: + actual = np.moveaxis(x, source, destination).shape + assert_(actual, expected) + + def test_errors(self): + x = np.random.randn(1, 2, 3) + assert_raises_regex(AxisError, 'source.*out of bounds', + np.moveaxis, x, 3, 0) + assert_raises_regex(AxisError, 'source.*out of bounds', + np.moveaxis, x, -4, 0) + assert_raises_regex(AxisError, 'destination.*out of bounds', + np.moveaxis, x, 0, 5) + assert_raises_regex(ValueError, 'repeated axis in `source`', + np.moveaxis, x, [0, 0], [0, 1]) + assert_raises_regex(ValueError, 'repeated axis in `destination`', + np.moveaxis, x, [0, 1], [1, 1]) + assert_raises_regex(ValueError, 'must have the same number', + np.moveaxis, x, 0, [0, 1]) + assert_raises_regex(ValueError, 'must have the same number', + np.moveaxis, x, [0, 1], [0]) + + def test_array_likes(self): + x = np.ma.zeros((1, 2, 3)) + result = np.moveaxis(x, 0, 0) + assert_(x.shape, result.shape) + assert_(isinstance(result, np.ma.MaskedArray)) + + x = [1, 2, 3] + result = np.moveaxis(x, 0, 0) + assert_(x, list(result)) + assert_(isinstance(result, np.ndarray)) + + +class TestCross: + @pytest.mark.filterwarnings( + "ignore:.*2-dimensional vectors.*:DeprecationWarning" + ) + def test_2x2(self): + u = [1, 2] + v = [3, 4] + z = -2 + cp = np.cross(u, v) + assert_equal(cp, z) + cp = np.cross(v, u) + assert_equal(cp, -z) + + @pytest.mark.filterwarnings( + "ignore:.*2-dimensional vectors.*:DeprecationWarning" + ) + def test_2x3(self): + u = [1, 2] + v = [3, 4, 5] + z = np.array([10, -5, -2]) + cp = np.cross(u, v) + assert_equal(cp, z) + cp = np.cross(v, u) + assert_equal(cp, -z) + + def test_3x3(self): + u = [1, 2, 3] + v = [4, 5, 6] + z = np.array([-3, 6, -3]) + cp = np.cross(u, v) + assert_equal(cp, z) + cp = np.cross(v, u) + assert_equal(cp, -z) + + @pytest.mark.filterwarnings( + "ignore:.*2-dimensional vectors.*:DeprecationWarning" + ) + def test_broadcasting(self): + # Ticket #2624 (Trac #2032) + u = np.tile([1, 2], (11, 1)) + v = np.tile([3, 4], (11, 1)) + z = -2 + assert_equal(np.cross(u, v), z) + assert_equal(np.cross(v, u), -z) + assert_equal(np.cross(u, u), 0) + + u = np.tile([1, 2], (11, 1)).T + v = np.tile([3, 4, 5], (11, 1)) + z = np.tile([10, -5, -2], (11, 1)) + assert_equal(np.cross(u, v, axisa=0), z) + assert_equal(np.cross(v, u.T), -z) + assert_equal(np.cross(v, v), 0) + + u = np.tile([1, 2, 3], (11, 1)).T + v = np.tile([3, 4], (11, 1)).T + z = np.tile([-12, 9, -2], (11, 1)) + assert_equal(np.cross(u, v, axisa=0, axisb=0), z) + assert_equal(np.cross(v.T, u.T), -z) + assert_equal(np.cross(u.T, u.T), 0) + + u = np.tile([1, 2, 3], (5, 1)) + v = np.tile([4, 5, 6], (5, 1)).T + z = np.tile([-3, 6, -3], (5, 1)) + assert_equal(np.cross(u, v, axisb=0), z) + assert_equal(np.cross(v.T, u), -z) + assert_equal(np.cross(u, u), 0) + + @pytest.mark.filterwarnings( + "ignore:.*2-dimensional vectors.*:DeprecationWarning" + ) + def test_broadcasting_shapes(self): + u = np.ones((2, 1, 3)) + v = np.ones((5, 3)) + assert_equal(np.cross(u, v).shape, (2, 5, 3)) + u = np.ones((10, 3, 5)) + v = np.ones((2, 5)) + assert_equal(np.cross(u, v, axisa=1, axisb=0).shape, (10, 5, 3)) + assert_raises(AxisError, np.cross, u, v, axisa=1, axisb=2) + assert_raises(AxisError, np.cross, u, v, axisa=3, axisb=0) + u = np.ones((10, 3, 5, 7)) + v = np.ones((5, 7, 2)) + assert_equal(np.cross(u, v, axisa=1, axisc=2).shape, (10, 5, 3, 7)) + assert_raises(AxisError, np.cross, u, v, axisa=-5, axisb=2) + assert_raises(AxisError, np.cross, u, v, axisa=1, axisb=-4) + # gh-5885 + u = np.ones((3, 4, 2)) + for axisc in range(-2, 2): + assert_equal(np.cross(u, u, axisc=axisc).shape, (3, 4)) + + def test_uint8_int32_mixed_dtypes(self): + # regression test for gh-19138 + u = np.array([[195, 8, 9]], np.uint8) + v = np.array([250, 166, 68], np.int32) + z = np.array([[950, 11010, -30370]], dtype=np.int32) + assert_equal(np.cross(v, u), z) + assert_equal(np.cross(u, v), -z) + + @pytest.mark.parametrize("a, b", [(0, [1, 2]), ([1, 2], 3)]) + def test_zero_dimension(self, a, b): + with pytest.raises(ValueError) as exc: + np.cross(a, b) + assert "At least one array has zero dimension" in str(exc.value) + + +def test_outer_out_param(): + arr1 = np.ones((5,)) + arr2 = np.ones((2,)) + arr3 = np.linspace(-2, 2, 5) + out1 = np.ndarray(shape=(5, 5)) + out2 = np.ndarray(shape=(2, 5)) + res1 = np.outer(arr1, arr3, out1) + assert_equal(res1, out1) + assert_equal(np.outer(arr2, arr3, out2), out2) + + +class TestIndices: + + def test_simple(self): + [x, y] = np.indices((4, 3)) + assert_array_equal(x, np.array([[0, 0, 0], + [1, 1, 1], + [2, 2, 2], + [3, 3, 3]])) + assert_array_equal(y, np.array([[0, 1, 2], + [0, 1, 2], + [0, 1, 2], + [0, 1, 2]])) + + def test_single_input(self): + [x] = np.indices((4,)) + assert_array_equal(x, np.array([0, 1, 2, 3])) + + [x] = np.indices((4,), sparse=True) + assert_array_equal(x, np.array([0, 1, 2, 3])) + + def test_scalar_input(self): + assert_array_equal([], np.indices(())) + assert_array_equal([], np.indices((), sparse=True)) + assert_array_equal([[]], np.indices((0,))) + assert_array_equal([[]], np.indices((0,), sparse=True)) + + def test_sparse(self): + [x, y] = np.indices((4, 3), sparse=True) + assert_array_equal(x, np.array([[0], [1], [2], [3]])) + assert_array_equal(y, np.array([[0, 1, 2]])) + + @pytest.mark.parametrize("dtype", [np.int32, np.int64, np.float32, np.float64]) + @pytest.mark.parametrize("dims", [(), (0,), (4, 3)]) + def test_return_type(self, dtype, dims): + inds = np.indices(dims, dtype=dtype) + assert_(inds.dtype == dtype) + + for arr in np.indices(dims, dtype=dtype, sparse=True): + assert_(arr.dtype == dtype) + + +class TestRequire: + flag_names = ['C', 'C_CONTIGUOUS', 'CONTIGUOUS', + 'F', 'F_CONTIGUOUS', 'FORTRAN', + 'A', 'ALIGNED', + 'W', 'WRITEABLE', + 'O', 'OWNDATA'] + + def generate_all_false(self, dtype): + arr = np.zeros((2, 2), [('junk', 'i1'), ('a', dtype)]) + arr.setflags(write=False) + a = arr['a'] + assert_(not a.flags['C']) + assert_(not a.flags['F']) + assert_(not a.flags['O']) + assert_(not a.flags['W']) + assert_(not a.flags['A']) + return a + + def set_and_check_flag(self, flag, dtype, arr): + if dtype is None: + dtype = arr.dtype + b = np.require(arr, dtype, [flag]) + assert_(b.flags[flag]) + assert_(b.dtype == dtype) + + # a further call to np.require ought to return the same array + # unless OWNDATA is specified. + c = np.require(b, None, [flag]) + if flag[0] != 'O': + assert_(c is b) + else: + assert_(c.flags[flag]) + + def test_require_each(self): + + id = ['f8', 'i4'] + fd = [None, 'f8', 'c16'] + for idtype, fdtype, flag in itertools.product(id, fd, self.flag_names): + a = self.generate_all_false(idtype) + self.set_and_check_flag(flag, fdtype, a) + + def test_unknown_requirement(self): + a = self.generate_all_false('f8') + assert_raises(KeyError, np.require, a, None, 'Q') + + def test_non_array_input(self): + a = np.require([1, 2, 3, 4], 'i4', ['C', 'A', 'O']) + assert_(a.flags['O']) + assert_(a.flags['C']) + assert_(a.flags['A']) + assert_(a.dtype == 'i4') + assert_equal(a, [1, 2, 3, 4]) + + def test_C_and_F_simul(self): + a = self.generate_all_false('f8') + assert_raises(ValueError, np.require, a, None, ['C', 'F']) + + def test_ensure_array(self): + class ArraySubclass(np.ndarray): + pass + + a = ArraySubclass((2, 2)) + b = np.require(a, None, ['E']) + assert_(type(b) is np.ndarray) + + def test_preserve_subtype(self): + class ArraySubclass(np.ndarray): + pass + + for flag in self.flag_names: + a = ArraySubclass((2, 2)) + self.set_and_check_flag(flag, None, a) + + +class TestBroadcast: + def test_broadcast_in_args(self): + # gh-5881 + arrs = [np.empty((6, 7)), np.empty((5, 6, 1)), np.empty((7,)), + np.empty((5, 1, 7))] + mits = [np.broadcast(*arrs), + np.broadcast(np.broadcast(*arrs[:0]), np.broadcast(*arrs[0:])), + np.broadcast(np.broadcast(*arrs[:1]), np.broadcast(*arrs[1:])), + np.broadcast(np.broadcast(*arrs[:2]), np.broadcast(*arrs[2:])), + np.broadcast(arrs[0], np.broadcast(*arrs[1:-1]), arrs[-1])] + for mit in mits: + assert_equal(mit.shape, (5, 6, 7)) + assert_equal(mit.ndim, 3) + assert_equal(mit.nd, 3) + assert_equal(mit.numiter, 4) + for a, ia in zip(arrs, mit.iters): + assert_(a is ia.base) + + def test_broadcast_single_arg(self): + # gh-6899 + arrs = [np.empty((5, 6, 7))] + mit = np.broadcast(*arrs) + assert_equal(mit.shape, (5, 6, 7)) + assert_equal(mit.ndim, 3) + assert_equal(mit.nd, 3) + assert_equal(mit.numiter, 1) + assert_(arrs[0] is mit.iters[0].base) + + def test_number_of_arguments(self): + arr = np.empty((5,)) + for j in range(70): + arrs = [arr] * j + if j > 64: + assert_raises(ValueError, np.broadcast, *arrs) + else: + mit = np.broadcast(*arrs) + assert_equal(mit.numiter, j) + + def test_broadcast_error_kwargs(self): + # gh-13455 + arrs = [np.empty((5, 6, 7))] + mit = np.broadcast(*arrs) + mit2 = np.broadcast(*arrs, **{}) # noqa: PIE804 + assert_equal(mit.shape, mit2.shape) + assert_equal(mit.ndim, mit2.ndim) + assert_equal(mit.nd, mit2.nd) + assert_equal(mit.numiter, mit2.numiter) + assert_(mit.iters[0].base is mit2.iters[0].base) + + assert_raises(ValueError, np.broadcast, 1, x=1) + + def test_shape_mismatch_error_message(self): + with pytest.raises(ValueError, match=r"arg 0 with shape \(1, 3\) and " + r"arg 2 with shape \(2,\)"): + np.broadcast([[1, 2, 3]], [[4], [5]], [6, 7]) + + @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") + @pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") + def test_signatures(self): + sig_new = inspect.signature(np.broadcast) + assert len(sig_new.parameters) == 1 + assert "arrays" in sig_new.parameters + assert sig_new.parameters["arrays"].kind == inspect.Parameter.VAR_POSITIONAL + + sig_reset = inspect.signature(np.broadcast.reset) + assert len(sig_reset.parameters) == 1 + assert "self" in sig_reset.parameters + assert sig_reset.parameters["self"].kind == inspect.Parameter.POSITIONAL_ONLY + + +class TestKeepdims: + + class sub_array(np.ndarray): + def sum(self, axis=None, dtype=None, out=None): + return np.ndarray.sum(self, axis, dtype, out, keepdims=True) + + def test_raise(self): + sub_class = self.sub_array + x = np.arange(30).view(sub_class) + assert_raises(TypeError, np.sum, x, keepdims=True) + + +class TestTensordot: + + def test_rejects_duplicate_axes(self): + a = np.ones((2, 3, 3)) + b = np.ones((3, 3, 4)) + with pytest.raises(ValueError): + np.tensordot(a, b, axes=([1, 1], [0, 0])) + + def test_zero_dimension(self): + # Test resolution to issue #5663 + a = np.ndarray((3, 0)) + b = np.ndarray((0, 4)) + td = np.tensordot(a, b, (1, 0)) + assert_array_equal(td, np.dot(a, b)) + assert_array_equal(td, np.einsum('ij,jk', a, b)) + + def test_zero_dimensional(self): + # gh-12130 + arr_0d = np.array(1) + # contracting no axes is well defined + ret = np.tensordot(arr_0d, arr_0d, ([], [])) + assert_array_equal(ret, arr_0d) + + +class TestAsType: + + def test_astype(self): + data = [[1, 2], [3, 4]] + actual = np.astype( + np.array(data, dtype=np.int64), np.uint32 + ) + expected = np.array(data, dtype=np.uint32) + + assert_array_equal(actual, expected) + assert_equal(actual.dtype, expected.dtype) + + assert np.shares_memory( + actual, np.astype(actual, actual.dtype, copy=False) + ) + + actual = np.astype(np.int64(10), np.float64) + expected = np.float64(10) + assert_equal(actual, expected) + assert_equal(actual.dtype, expected.dtype) + + with pytest.raises(TypeError, match="Input should be a NumPy array"): + np.astype(data, np.float64) diff --git a/local_packages/numpy/_core/tests/test_numerictypes.py b/local_packages/numpy/_core/tests/test_numerictypes.py new file mode 100644 index 0000000..5763a96 --- /dev/null +++ b/local_packages/numpy/_core/tests/test_numerictypes.py @@ -0,0 +1,650 @@ +import itertools +import sys + +import pytest + +import numpy as np +import numpy._core.numerictypes as nt +from numpy._core.numerictypes import issctype, maximum_sctype, sctype2char, sctypes +from numpy.testing import ( + IS_PYPY, + assert_, + assert_equal, + assert_raises, + assert_raises_regex, +) + +# This is the structure of the table used for plain objects: +# +# +-+-+-+ +# |x|y|z| +# +-+-+-+ + +# Structure of a plain array description: +Pdescr = [ + ('x', 'i4', (2,)), + ('y', 'f8', (2, 2)), + ('z', 'u1')] + +# A plain list of tuples with values for testing: +PbufferT = [ + # x y z + ([3, 2], [[6., 4.], [6., 4.]], 8), + ([4, 3], [[7., 5.], [7., 5.]], 9), + ] + + +# This is the structure of the table used for nested objects (DON'T PANIC!): +# +# +-+---------------------------------+-----+----------+-+-+ +# |x|Info |color|info |y|z| +# | +-----+--+----------------+----+--+ +----+-----+ | | +# | |value|y2|Info2 |name|z2| |Name|Value| | | +# | | | +----+-----+--+--+ | | | | | | | +# | | | |name|value|y3|z3| | | | | | | | +# +-+-----+--+----+-----+--+--+----+--+-----+----+-----+-+-+ +# + +# The corresponding nested array description: +Ndescr = [ + ('x', 'i4', (2,)), + ('Info', [ + ('value', 'c16'), + ('y2', 'f8'), + ('Info2', [ + ('name', 'S2'), + ('value', 'c16', (2,)), + ('y3', 'f8', (2,)), + ('z3', 'u4', (2,))]), + ('name', 'S2'), + ('z2', 'b1')]), + ('color', 'S2'), + ('info', [ + ('Name', 'U8'), + ('Value', 'c16')]), + ('y', 'f8', (2, 2)), + ('z', 'u1')] + +NbufferT = [ + # x Info color info y z + # value y2 Info2 name z2 Name Value + # name value y3 z3 + ([3, 2], (6j, 6., (b'nn', [6j, 4j], [6., 4.], [1, 2]), b'NN', True), + b'cc', ('NN', 6j), [[6., 4.], [6., 4.]], 8), + ([4, 3], (7j, 7., (b'oo', [7j, 5j], [7., 5.], [2, 1]), b'OO', False), + b'dd', ('OO', 7j), [[7., 5.], [7., 5.]], 9), + ] + + +byteorder = {'little': '<', 'big': '>'}[sys.byteorder] + +def normalize_descr(descr): + "Normalize a description adding the platform byteorder." + + out = [] + for item in descr: + dtype = item[1] + if isinstance(dtype, str): + if dtype[0] not in ['|', '<', '>']: + onebyte = dtype[1:] == "1" + if onebyte or dtype[0] in ['S', 'V', 'b']: + dtype = "|" + dtype + else: + dtype = byteorder + dtype + if len(item) > 2 and np.prod(item[2]) > 1: + nitem = (item[0], dtype, item[2]) + else: + nitem = (item[0], dtype) + out.append(nitem) + elif isinstance(dtype, list): + l = normalize_descr(dtype) + out.append((item[0], l)) + else: + raise ValueError(f"Expected a str or list and got {type(item)}") + return out + + +############################################################ +# Creation tests +############################################################ + +class CreateZeros: + """Check the creation of heterogeneous arrays zero-valued""" + + def test_zeros0D(self): + """Check creation of 0-dimensional objects""" + h = np.zeros((), dtype=self._descr) + assert_(normalize_descr(self._descr) == h.dtype.descr) + assert_(h.dtype.fields['x'][0].name[:4] == 'void') + assert_(h.dtype.fields['x'][0].char == 'V') + assert_(h.dtype.fields['x'][0].type == np.void) + # A small check that data is ok + assert_equal(h['z'], np.zeros((), dtype='u1')) + + def test_zerosSD(self): + """Check creation of single-dimensional objects""" + h = np.zeros((2,), dtype=self._descr) + assert_(normalize_descr(self._descr) == h.dtype.descr) + assert_(h.dtype['y'].name[:4] == 'void') + assert_(h.dtype['y'].char == 'V') + assert_(h.dtype['y'].type == np.void) + # A small check that data is ok + assert_equal(h['z'], np.zeros((2,), dtype='u1')) + + def test_zerosMD(self): + """Check creation of multi-dimensional objects""" + h = np.zeros((2, 3), dtype=self._descr) + assert_(normalize_descr(self._descr) == h.dtype.descr) + assert_(h.dtype['z'].name == 'uint8') + assert_(h.dtype['z'].char == 'B') + assert_(h.dtype['z'].type == np.uint8) + # A small check that data is ok + assert_equal(h['z'], np.zeros((2, 3), dtype='u1')) + + +class TestCreateZerosPlain(CreateZeros): + """Check the creation of heterogeneous arrays zero-valued (plain)""" + _descr = Pdescr + +class TestCreateZerosNested(CreateZeros): + """Check the creation of heterogeneous arrays zero-valued (nested)""" + _descr = Ndescr + + +class CreateValues: + """Check the creation of heterogeneous arrays with values""" + + def test_tuple(self): + """Check creation from tuples""" + h = np.array(self._buffer, dtype=self._descr) + assert_(normalize_descr(self._descr) == h.dtype.descr) + if self.multiple_rows: + assert_(h.shape == (2,)) + else: + assert_(h.shape == ()) + + def test_list_of_tuple(self): + """Check creation from list of tuples""" + h = np.array([self._buffer], dtype=self._descr) + assert_(normalize_descr(self._descr) == h.dtype.descr) + if self.multiple_rows: + assert_(h.shape == (1, 2)) + else: + assert_(h.shape == (1,)) + + def test_list_of_list_of_tuple(self): + """Check creation from list of list of tuples""" + h = np.array([[self._buffer]], dtype=self._descr) + assert_(normalize_descr(self._descr) == h.dtype.descr) + if self.multiple_rows: + assert_(h.shape == (1, 1, 2)) + else: + assert_(h.shape == (1, 1)) + + +class TestCreateValuesPlainSingle(CreateValues): + """Check the creation of heterogeneous arrays (plain, single row)""" + _descr = Pdescr + multiple_rows = 0 + _buffer = PbufferT[0] + +class TestCreateValuesPlainMultiple(CreateValues): + """Check the creation of heterogeneous arrays (plain, multiple rows)""" + _descr = Pdescr + multiple_rows = 1 + _buffer = PbufferT + +class TestCreateValuesNestedSingle(CreateValues): + """Check the creation of heterogeneous arrays (nested, single row)""" + _descr = Ndescr + multiple_rows = 0 + _buffer = NbufferT[0] + +class TestCreateValuesNestedMultiple(CreateValues): + """Check the creation of heterogeneous arrays (nested, multiple rows)""" + _descr = Ndescr + multiple_rows = 1 + _buffer = NbufferT + + +############################################################ +# Reading tests +############################################################ + +class ReadValuesPlain: + """Check the reading of values in heterogeneous arrays (plain)""" + + def test_access_fields(self): + h = np.array(self._buffer, dtype=self._descr) + if not self.multiple_rows: + assert_(h.shape == ()) + assert_equal(h['x'], np.array(self._buffer[0], dtype='i4')) + assert_equal(h['y'], np.array(self._buffer[1], dtype='f8')) + assert_equal(h['z'], np.array(self._buffer[2], dtype='u1')) + else: + assert_(len(h) == 2) + assert_equal(h['x'], np.array([self._buffer[0][0], + self._buffer[1][0]], dtype='i4')) + assert_equal(h['y'], np.array([self._buffer[0][1], + self._buffer[1][1]], dtype='f8')) + assert_equal(h['z'], np.array([self._buffer[0][2], + self._buffer[1][2]], dtype='u1')) + + +class TestReadValuesPlainSingle(ReadValuesPlain): + """Check the creation of heterogeneous arrays (plain, single row)""" + _descr = Pdescr + multiple_rows = 0 + _buffer = PbufferT[0] + +class TestReadValuesPlainMultiple(ReadValuesPlain): + """Check the values of heterogeneous arrays (plain, multiple rows)""" + _descr = Pdescr + multiple_rows = 1 + _buffer = PbufferT + +class ReadValuesNested: + """Check the reading of values in heterogeneous arrays (nested)""" + + def test_access_top_fields(self): + """Check reading the top fields of a nested array""" + h = np.array(self._buffer, dtype=self._descr) + if not self.multiple_rows: + assert_(h.shape == ()) + assert_equal(h['x'], np.array(self._buffer[0], dtype='i4')) + assert_equal(h['y'], np.array(self._buffer[4], dtype='f8')) + assert_equal(h['z'], np.array(self._buffer[5], dtype='u1')) + else: + assert_(len(h) == 2) + assert_equal(h['x'], np.array([self._buffer[0][0], + self._buffer[1][0]], dtype='i4')) + assert_equal(h['y'], np.array([self._buffer[0][4], + self._buffer[1][4]], dtype='f8')) + assert_equal(h['z'], np.array([self._buffer[0][5], + self._buffer[1][5]], dtype='u1')) + + def test_nested1_acessors(self): + """Check reading the nested fields of a nested array (1st level)""" + h = np.array(self._buffer, dtype=self._descr) + if not self.multiple_rows: + assert_equal(h['Info']['value'], + np.array(self._buffer[1][0], dtype='c16')) + assert_equal(h['Info']['y2'], + np.array(self._buffer[1][1], dtype='f8')) + assert_equal(h['info']['Name'], + np.array(self._buffer[3][0], dtype='U2')) + assert_equal(h['info']['Value'], + np.array(self._buffer[3][1], dtype='c16')) + else: + assert_equal(h['Info']['value'], + np.array([self._buffer[0][1][0], + self._buffer[1][1][0]], + dtype='c16')) + assert_equal(h['Info']['y2'], + np.array([self._buffer[0][1][1], + self._buffer[1][1][1]], + dtype='f8')) + assert_equal(h['info']['Name'], + np.array([self._buffer[0][3][0], + self._buffer[1][3][0]], + dtype='U2')) + assert_equal(h['info']['Value'], + np.array([self._buffer[0][3][1], + self._buffer[1][3][1]], + dtype='c16')) + + def test_nested2_acessors(self): + """Check reading the nested fields of a nested array (2nd level)""" + h = np.array(self._buffer, dtype=self._descr) + if not self.multiple_rows: + assert_equal(h['Info']['Info2']['value'], + np.array(self._buffer[1][2][1], dtype='c16')) + assert_equal(h['Info']['Info2']['z3'], + np.array(self._buffer[1][2][3], dtype='u4')) + else: + assert_equal(h['Info']['Info2']['value'], + np.array([self._buffer[0][1][2][1], + self._buffer[1][1][2][1]], + dtype='c16')) + assert_equal(h['Info']['Info2']['z3'], + np.array([self._buffer[0][1][2][3], + self._buffer[1][1][2][3]], + dtype='u4')) + + def test_nested1_descriptor(self): + """Check access nested descriptors of a nested array (1st level)""" + h = np.array(self._buffer, dtype=self._descr) + assert_(h.dtype['Info']['value'].name == 'complex128') + assert_(h.dtype['Info']['y2'].name == 'float64') + assert_(h.dtype['info']['Name'].name == 'str256') + assert_(h.dtype['info']['Value'].name == 'complex128') + + def test_nested2_descriptor(self): + """Check access nested descriptors of a nested array (2nd level)""" + h = np.array(self._buffer, dtype=self._descr) + assert_(h.dtype['Info']['Info2']['value'].name == 'void256') + assert_(h.dtype['Info']['Info2']['z3'].name == 'void64') + + +class TestReadValuesNestedSingle(ReadValuesNested): + """Check the values of heterogeneous arrays (nested, single row)""" + _descr = Ndescr + multiple_rows = False + _buffer = NbufferT[0] + +class TestReadValuesNestedMultiple(ReadValuesNested): + """Check the values of heterogeneous arrays (nested, multiple rows)""" + _descr = Ndescr + multiple_rows = True + _buffer = NbufferT + +class TestEmptyField: + def test_assign(self): + a = np.arange(10, dtype=np.float32) + a.dtype = [("int", "<0i4"), ("float", "<2f4")] + assert_(a['int'].shape == (5, 0)) + assert_(a['float'].shape == (5, 2)) + + +class TestMultipleFields: + def _bad_call(self): + ary = np.array([(1, 2, 3, 4), (5, 6, 7, 8)], dtype='i4,f4,i2,c8') + return ary['f0', 'f1'] + + def test_no_tuple(self): + assert_raises(IndexError, self._bad_call) + + def test_return(self): + ary = np.array([(1, 2, 3, 4), (5, 6, 7, 8)], dtype='i4,f4,i2,c8') + res = ary[['f0', 'f2']].tolist() + assert_(res == [(1, 3), (5, 7)]) + + +class TestIsSubDType: + # scalar types can be promoted into dtypes + wrappers = [np.dtype, lambda x: x] + + def test_both_abstract(self): + assert_(np.issubdtype(np.floating, np.inexact)) + assert_(not np.issubdtype(np.inexact, np.floating)) + + def test_same(self): + for cls in (np.float32, np.int32): + for w1, w2 in itertools.product(self.wrappers, repeat=2): + assert_(np.issubdtype(w1(cls), w2(cls))) + + def test_subclass(self): + # note we cannot promote floating to a dtype, as it would turn into a + # concrete type + for w in self.wrappers: + assert_(np.issubdtype(w(np.float32), np.floating)) + assert_(np.issubdtype(w(np.float64), np.floating)) + + def test_subclass_backwards(self): + for w in self.wrappers: + assert_(not np.issubdtype(np.floating, w(np.float32))) + assert_(not np.issubdtype(np.floating, w(np.float64))) + + def test_sibling_class(self): + for w1, w2 in itertools.product(self.wrappers, repeat=2): + assert_(not np.issubdtype(w1(np.float32), w2(np.float64))) + assert_(not np.issubdtype(w1(np.float64), w2(np.float32))) + + def test_nondtype_nonscalartype(self): + # See gh-14619 and gh-9505 which introduced the deprecation to fix + # this. These tests are directly taken from gh-9505 + assert not np.issubdtype(np.float32, 'float64') + assert not np.issubdtype(np.float32, 'f8') + assert not np.issubdtype(np.int32, str) + assert not np.issubdtype(np.int32, 'int64') + assert not np.issubdtype(np.str_, 'void') + # for the following the correct spellings are + # np.integer, np.floating, or np.complexfloating respectively: + assert not np.issubdtype(np.int8, int) # np.int8 is never np.int_ + assert not np.issubdtype(np.float32, float) + assert not np.issubdtype(np.complex64, complex) + assert not np.issubdtype(np.float32, "float") + assert not np.issubdtype(np.float64, "f") + + # Test the same for the correct first datatype and abstract one + # in the case of int, float, complex: + assert np.issubdtype(np.float64, 'float64') + assert np.issubdtype(np.float64, 'f8') + assert np.issubdtype(np.str_, str) + assert np.issubdtype(np.int64, 'int64') + assert np.issubdtype(np.void, 'void') + assert np.issubdtype(np.int8, np.integer) + assert np.issubdtype(np.float32, np.floating) + assert np.issubdtype(np.complex64, np.complexfloating) + assert np.issubdtype(np.float64, "float") + assert np.issubdtype(np.float32, "f") + + +class TestIsDType: + """ + Check correctness of `np.isdtype`. The test considers different argument + configurations: `np.isdtype(dtype, k1)` and `np.isdtype(dtype, (k1, k2))` + with concrete dtypes and dtype groups. + """ + dtype_group_dict = { + "signed integer": sctypes["int"], + "unsigned integer": sctypes["uint"], + "integral": sctypes["int"] + sctypes["uint"], + "real floating": sctypes["float"], + "complex floating": sctypes["complex"], + "numeric": ( + sctypes["int"] + sctypes["uint"] + sctypes["float"] + + sctypes["complex"] + ) + } + + @pytest.mark.parametrize( + "dtype,close_dtype", + [ + (np.int64, np.int32), (np.uint64, np.uint32), + (np.float64, np.float32), (np.complex128, np.complex64) + ] + ) + @pytest.mark.parametrize( + "dtype_group", + [ + None, "signed integer", "unsigned integer", "integral", + "real floating", "complex floating", "numeric" + ] + ) + def test_isdtype(self, dtype, close_dtype, dtype_group): + # First check if same dtypes return `true` and different ones + # give `false` (even if they're close in the dtype hierarchy!) + if dtype_group is None: + assert np.isdtype(dtype, dtype) + assert not np.isdtype(dtype, close_dtype) + assert np.isdtype(dtype, (dtype, close_dtype)) + + # Check that dtype and a dtype group that it belongs to + # return `true`, and `false` otherwise. + elif dtype in self.dtype_group_dict[dtype_group]: + assert np.isdtype(dtype, dtype_group) + assert np.isdtype(dtype, (close_dtype, dtype_group)) + else: + assert not np.isdtype(dtype, dtype_group) + + def test_isdtype_invalid_args(self): + with assert_raises_regex(TypeError, r".*must be a NumPy dtype.*"): + np.isdtype("int64", np.int64) + with assert_raises_regex(TypeError, r".*kind argument must.*"): + np.isdtype(np.int64, 1) + with assert_raises_regex(ValueError, r".*not a known kind name.*"): + np.isdtype(np.int64, "int64") + + def test_sctypes_complete(self): + # issue 26439: int32/intc were masking each other on 32-bit builds + assert np.int32 in sctypes['int'] + assert np.intc in sctypes['int'] + assert np.int64 in sctypes['int'] + assert np.uint32 in sctypes['uint'] + assert np.uintc in sctypes['uint'] + assert np.uint64 in sctypes['uint'] + +class TestSctypeDict: + def test_longdouble(self): + assert_(np._core.sctypeDict['float64'] is not np.longdouble) + assert_(np._core.sctypeDict['complex128'] is not np.clongdouble) + + def test_ulong(self): + assert np._core.sctypeDict['ulong'] is np.ulong + assert np.dtype(np.ulong) is np.dtype("ulong") + assert np.dtype(np.ulong).itemsize == np.dtype(np.long).itemsize + + +@pytest.mark.filterwarnings("ignore:.*maximum_sctype.*:DeprecationWarning") +class TestMaximumSctype: + + # note that parametrizing with sctype['int'] and similar would skip types + # with the same size (gh-11923) + + @pytest.mark.parametrize( + 't', [np.byte, np.short, np.intc, np.long, np.longlong] + ) + def test_int(self, t): + assert_equal(maximum_sctype(t), np._core.sctypes['int'][-1]) + + @pytest.mark.parametrize( + 't', [np.ubyte, np.ushort, np.uintc, np.ulong, np.ulonglong] + ) + def test_uint(self, t): + assert_equal(maximum_sctype(t), np._core.sctypes['uint'][-1]) + + @pytest.mark.parametrize('t', [np.half, np.single, np.double, np.longdouble]) + def test_float(self, t): + assert_equal(maximum_sctype(t), np._core.sctypes['float'][-1]) + + @pytest.mark.parametrize('t', [np.csingle, np.cdouble, np.clongdouble]) + def test_complex(self, t): + assert_equal(maximum_sctype(t), np._core.sctypes['complex'][-1]) + + @pytest.mark.parametrize('t', [np.bool, np.object_, np.str_, np.bytes_, + np.void]) + def test_other(self, t): + assert_equal(maximum_sctype(t), t) + + +class Test_sctype2char: + # This function is old enough that we're really just documenting the quirks + # at this point. + + def test_scalar_type(self): + assert_equal(sctype2char(np.double), 'd') + assert_equal(sctype2char(np.long), 'l') + assert_equal(sctype2char(np.int_), np.array(0).dtype.char) + assert_equal(sctype2char(np.str_), 'U') + assert_equal(sctype2char(np.bytes_), 'S') + + def test_other_type(self): + assert_equal(sctype2char(float), 'd') + assert_equal(sctype2char(list), 'O') + assert_equal(sctype2char(np.ndarray), 'O') + + def test_third_party_scalar_type(self): + from numpy._core._rational_tests import rational + assert_raises(KeyError, sctype2char, rational) + assert_raises(KeyError, sctype2char, rational(1)) + + def test_array_instance(self): + assert_equal(sctype2char(np.array([1.0, 2.0])), 'd') + + def test_abstract_type(self): + assert_raises(KeyError, sctype2char, np.floating) + + def test_non_type(self): + assert_raises(ValueError, sctype2char, 1) + +@pytest.mark.parametrize("rep, expected", [ + (np.int32, True), + (list, False), + (1.1, False), + (str, True), + (np.dtype(np.float64), True), + (np.dtype((np.int16, (3, 4))), True), + (np.dtype([('a', np.int8)]), True), + ]) +def test_issctype(rep, expected): + # ensure proper identification of scalar + # data-types by issctype() + actual = issctype(rep) + assert type(actual) is bool + assert_equal(actual, expected) + + +@pytest.mark.skipif(sys.flags.optimize > 1, + reason="no docstrings present to inspect when PYTHONOPTIMIZE/Py_OptimizeFlag > 1") +@pytest.mark.xfail(IS_PYPY, + reason="PyPy cannot modify tp_doc after PyType_Ready") +class TestDocStrings: + def test_platform_dependent_aliases(self): + if np.int64 is np.int_: + assert_('int64' in np.int_.__doc__) + elif np.int64 is np.longlong: + assert_('int64' in np.longlong.__doc__) + + +class TestScalarTypeNames: + # gh-9799 + + numeric_types = [ + np.byte, np.short, np.intc, np.long, np.longlong, + np.ubyte, np.ushort, np.uintc, np.ulong, np.ulonglong, + np.half, np.single, np.double, np.longdouble, + np.csingle, np.cdouble, np.clongdouble, + ] + + def test_names_are_unique(self): + # none of the above may be aliases for each other + assert len(set(self.numeric_types)) == len(self.numeric_types) + + # names must be unique + names = [t.__name__ for t in self.numeric_types] + assert len(set(names)) == len(names) + + @pytest.mark.parametrize('t', numeric_types) + def test_names_reflect_attributes(self, t): + """ Test that names correspond to where the type is under ``np.`` """ + assert getattr(np, t.__name__) is t + + @pytest.mark.parametrize('t', numeric_types) + def test_names_are_undersood_by_dtype(self, t): + """ Test the dtype constructor maps names back to the type """ + assert np.dtype(t.__name__).type is t + + +class TestScalarTypeOrder: + @pytest.mark.parametrize(('a', 'b'), [ + # signedinteger + (np.byte, np.short), + (np.short, np.intc), + (np.intc, np.long), + (np.long, np.longlong), + # unsignedinteger + (np.ubyte, np.ushort), + (np.ushort, np.uintc), + (np.uintc, np.ulong), + (np.ulong, np.ulonglong), + # floating + (np.half, np.single), + (np.single, np.double), + (np.double, np.longdouble), + # complexfloating + (np.csingle, np.cdouble), + (np.cdouble, np.clongdouble), + # flexible + (np.bytes_, np.str_), + (np.str_, np.void), + # bouncy castles + (np.datetime64, np.timedelta64), + ]) + def test_stable_ordering(self, a: type[np.generic], b: type[np.generic]): + assert np.ScalarType.index(a) <= np.ScalarType.index(b) + + +class TestBoolDefinition: + def test_bool_definition(self): + assert nt.bool is np.bool diff --git a/local_packages/numpy/_core/tests/test_overrides.py b/local_packages/numpy/_core/tests/test_overrides.py new file mode 100644 index 0000000..ebcf2f0 --- /dev/null +++ b/local_packages/numpy/_core/tests/test_overrides.py @@ -0,0 +1,800 @@ +import inspect +import os +import pickle +import sys +import tempfile +from io import StringIO +from unittest import mock + +import pytest + +import numpy as np +from numpy._core.overrides import ( + _get_implementing_args, + array_function_dispatch, + verify_matching_signatures, +) +from numpy.testing import assert_, assert_equal, assert_raises, assert_raises_regex +from numpy.testing.overrides import get_overridable_numpy_array_functions + + +def _return_not_implemented(self, *args, **kwargs): + return NotImplemented + + +# need to define this at the top level to test pickling +@array_function_dispatch(lambda array: (array,)) +def dispatched_one_arg(array): + """Docstring.""" + return 'original' + + +@array_function_dispatch(lambda array1, array2: (array1, array2)) +def dispatched_two_arg(array1, array2): + """Docstring.""" + return 'original' + + +class TestGetImplementingArgs: + + def test_ndarray(self): + array = np.array(1) + + args = _get_implementing_args([array]) + assert_equal(list(args), [array]) + + args = _get_implementing_args([array, array]) + assert_equal(list(args), [array]) + + args = _get_implementing_args([array, 1]) + assert_equal(list(args), [array]) + + args = _get_implementing_args([1, array]) + assert_equal(list(args), [array]) + + def test_ndarray_subclasses(self): + + class OverrideSub(np.ndarray): + __array_function__ = _return_not_implemented + + class NoOverrideSub(np.ndarray): + pass + + array = np.array(1).view(np.ndarray) + override_sub = np.array(1).view(OverrideSub) + no_override_sub = np.array(1).view(NoOverrideSub) + + args = _get_implementing_args([array, override_sub]) + assert_equal(list(args), [override_sub, array]) + + args = _get_implementing_args([array, no_override_sub]) + assert_equal(list(args), [no_override_sub, array]) + + args = _get_implementing_args( + [override_sub, no_override_sub]) + assert_equal(list(args), [override_sub, no_override_sub]) + + def test_ndarray_and_duck_array(self): + + class Other: + __array_function__ = _return_not_implemented + + array = np.array(1) + other = Other() + + args = _get_implementing_args([other, array]) + assert_equal(list(args), [other, array]) + + args = _get_implementing_args([array, other]) + assert_equal(list(args), [array, other]) + + def test_ndarray_subclass_and_duck_array(self): + + class OverrideSub(np.ndarray): + __array_function__ = _return_not_implemented + + class Other: + __array_function__ = _return_not_implemented + + array = np.array(1) + subarray = np.array(1).view(OverrideSub) + other = Other() + + assert_equal(_get_implementing_args([array, subarray, other]), + [subarray, array, other]) + assert_equal(_get_implementing_args([array, other, subarray]), + [subarray, array, other]) + + def test_many_duck_arrays(self): + + class A: + __array_function__ = _return_not_implemented + + class B(A): + __array_function__ = _return_not_implemented + + class C(A): + __array_function__ = _return_not_implemented + + class D: + __array_function__ = _return_not_implemented + + a = A() + b = B() + c = C() + d = D() + + assert_equal(_get_implementing_args([1]), []) + assert_equal(_get_implementing_args([a]), [a]) + assert_equal(_get_implementing_args([a, 1]), [a]) + assert_equal(_get_implementing_args([a, a, a]), [a]) + assert_equal(_get_implementing_args([a, d, a]), [a, d]) + assert_equal(_get_implementing_args([a, b]), [b, a]) + assert_equal(_get_implementing_args([b, a]), [b, a]) + assert_equal(_get_implementing_args([a, b, c]), [b, c, a]) + assert_equal(_get_implementing_args([a, c, b]), [c, b, a]) + + def test_too_many_duck_arrays(self): + namespace = {'__array_function__': _return_not_implemented} + types = [type('A' + str(i), (object,), namespace) for i in range(65)] + relevant_args = [t() for t in types] + + actual = _get_implementing_args(relevant_args[:64]) + assert_equal(actual, relevant_args[:64]) + + with assert_raises_regex(TypeError, 'distinct argument types'): + _get_implementing_args(relevant_args) + + +class TestNDArrayArrayFunction: + + def test_method(self): + + class Other: + __array_function__ = _return_not_implemented + + class NoOverrideSub(np.ndarray): + pass + + class OverrideSub(np.ndarray): + __array_function__ = _return_not_implemented + + array = np.array([1]) + other = Other() + no_override_sub = array.view(NoOverrideSub) + override_sub = array.view(OverrideSub) + + result = array.__array_function__(func=dispatched_two_arg, + types=(np.ndarray,), + args=(array, 1.), kwargs={}) + assert_equal(result, 'original') + + result = array.__array_function__(func=dispatched_two_arg, + types=(np.ndarray, Other), + args=(array, other), kwargs={}) + assert_(result is NotImplemented) + + result = array.__array_function__(func=dispatched_two_arg, + types=(np.ndarray, NoOverrideSub), + args=(array, no_override_sub), + kwargs={}) + assert_equal(result, 'original') + + result = array.__array_function__(func=dispatched_two_arg, + types=(np.ndarray, OverrideSub), + args=(array, override_sub), + kwargs={}) + assert_equal(result, 'original') + + with assert_raises_regex(TypeError, 'no implementation found'): + np.concatenate((array, other)) + + expected = np.concatenate((array, array)) + result = np.concatenate((array, no_override_sub)) + assert_equal(result, expected.view(NoOverrideSub)) + result = np.concatenate((array, override_sub)) + assert_equal(result, expected.view(OverrideSub)) + + def test_no_wrapper(self): + # Regular numpy functions have wrappers, but do not presume + # all functions do (array creation ones do not): check that + # we just call the function in that case. + array = np.array(1) + func = lambda x: x * 2 + result = array.__array_function__(func=func, types=(np.ndarray,), + args=(array,), kwargs={}) + assert_equal(result, array * 2) + + def test_wrong_arguments(self): + # Check our implementation guards against wrong arguments. + a = np.array([1, 2]) + with pytest.raises(TypeError, match="args must be a tuple"): + a.__array_function__(np.reshape, (np.ndarray,), a, (2, 1)) + with pytest.raises(TypeError, match="kwargs must be a dict"): + a.__array_function__(np.reshape, (np.ndarray,), (a,), (2, 1)) + + +class TestArrayFunctionDispatch: + + def test_pickle(self): + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + roundtripped = pickle.loads( + pickle.dumps(dispatched_one_arg, protocol=proto)) + assert_(roundtripped is dispatched_one_arg) + + def test_name_and_docstring(self): + assert_equal(dispatched_one_arg.__name__, 'dispatched_one_arg') + if sys.flags.optimize < 2: + assert_equal(dispatched_one_arg.__doc__, 'Docstring.') + + def test_interface(self): + + class MyArray: + def __array_function__(self, func, types, args, kwargs): + return (self, func, types, args, kwargs) + + original = MyArray() + (obj, func, types, args, kwargs) = dispatched_one_arg(original) + assert_(obj is original) + assert_(func is dispatched_one_arg) + assert_equal(set(types), {MyArray}) + # assert_equal uses the overloaded np.iscomplexobj() internally + assert_(args == (original,)) + assert_equal(kwargs, {}) + + def test_not_implemented(self): + + class MyArray: + def __array_function__(self, func, types, args, kwargs): + return NotImplemented + + array = MyArray() + with assert_raises_regex(TypeError, 'no implementation found'): + dispatched_one_arg(array) + + def test_where_dispatch(self): + + class DuckArray: + def __array_function__(self, ufunc, method, *inputs, **kwargs): + return "overridden" + + array = np.array(1) + duck_array = DuckArray() + + result = np.std(array, where=duck_array) + + assert_equal(result, "overridden") + + +class TestVerifyMatchingSignatures: + + def test_verify_matching_signatures(self): + + verify_matching_signatures(lambda x: 0, lambda x: 0) + verify_matching_signatures(lambda x=None: 0, lambda x=None: 0) + verify_matching_signatures(lambda x=1: 0, lambda x=None: 0) + + with assert_raises(RuntimeError): + verify_matching_signatures(lambda a: 0, lambda b: 0) + with assert_raises(RuntimeError): + verify_matching_signatures(lambda x: 0, lambda x=None: 0) + with assert_raises(RuntimeError): + verify_matching_signatures(lambda x=None: 0, lambda y=None: 0) + with assert_raises(RuntimeError): + verify_matching_signatures(lambda x=1: 0, lambda y=1: 0) + + def test_array_function_dispatch(self): + + with assert_raises(RuntimeError): + @array_function_dispatch(lambda x: (x,)) + def f(y): + pass + + # should not raise + @array_function_dispatch(lambda x: (x,), verify=False) + def f(y): + pass + + +def _new_duck_type_and_implements(): + """Create a duck array type and implements functions.""" + HANDLED_FUNCTIONS = {} + + class MyArray: + def __array_function__(self, func, types, args, kwargs): + if func not in HANDLED_FUNCTIONS: + return NotImplemented + if not all(issubclass(t, MyArray) for t in types): + return NotImplemented + return HANDLED_FUNCTIONS[func](*args, **kwargs) + + def implements(numpy_function): + """Register an __array_function__ implementations.""" + def decorator(func): + HANDLED_FUNCTIONS[numpy_function] = func + return func + return decorator + + return (MyArray, implements) + + +class TestArrayFunctionImplementation: + + def test_one_arg(self): + MyArray, implements = _new_duck_type_and_implements() + + @implements(dispatched_one_arg) + def _(array): + return 'myarray' + + assert_equal(dispatched_one_arg(1), 'original') + assert_equal(dispatched_one_arg(MyArray()), 'myarray') + + def test_optional_args(self): + MyArray, implements = _new_duck_type_and_implements() + + @array_function_dispatch(lambda array, option=None: (array,)) + def func_with_option(array, option='default'): + return option + + @implements(func_with_option) + def my_array_func_with_option(array, new_option='myarray'): + return new_option + + # we don't need to implement every option on __array_function__ + # implementations + assert_equal(func_with_option(1), 'default') + assert_equal(func_with_option(1, option='extra'), 'extra') + assert_equal(func_with_option(MyArray()), 'myarray') + with assert_raises(TypeError): + func_with_option(MyArray(), option='extra') + + # but new options on implementations can't be used + result = my_array_func_with_option(MyArray(), new_option='yes') + assert_equal(result, 'yes') + with assert_raises(TypeError): + func_with_option(MyArray(), new_option='no') + + def test_not_implemented(self): + MyArray, implements = _new_duck_type_and_implements() + + @array_function_dispatch(lambda array: (array,), module='my') + def func(array): + return array + + array = np.array(1) + assert_(func(array) is array) + assert_equal(func.__module__, 'my') + + with assert_raises_regex( + TypeError, "no implementation found for 'my.func'"): + func(MyArray()) + + @pytest.mark.parametrize("name", ["concatenate", "mean", "asarray"]) + def test_signature_error_message_simple(self, name): + func = getattr(np, name) + try: + # all of these functions need an argument: + func() + except TypeError as e: + exc = e + + assert exc.args[0].startswith(f"{name}()") + + def test_signature_error_message(self): + # The lambda function will be named "", but the TypeError + # should show the name as "func" + def _dispatcher(): + return () + + @array_function_dispatch(_dispatcher) + def func(): + pass + + try: + func._implementation(bad_arg=3) + except TypeError as e: + expected_exception = e + + try: + func(bad_arg=3) + raise AssertionError("must fail") + except TypeError as exc: + if exc.args[0].startswith("_dispatcher"): + # We replace the qualname currently, but it used `__name__` + # (relevant functions have the same name and qualname anyway) + pytest.skip("Python version is not using __qualname__ for " + "TypeError formatting.") + + assert exc.args == expected_exception.args + + @pytest.mark.parametrize("value", [234, "this func is not replaced"]) + def test_dispatcher_error(self, value): + # If the dispatcher raises an error, we must not attempt to mutate it + error = TypeError(value) + + def dispatcher(): + raise error + + @array_function_dispatch(dispatcher) + def func(): + return 3 + + try: + func() + raise AssertionError("must fail") + except TypeError as exc: + assert exc is error # unmodified exception + + def test_properties(self): + # Check that str and repr are sensible + func = dispatched_two_arg + assert str(func) == str(func._implementation) + repr_no_id = repr(func).split("at ")[0] + repr_no_id_impl = repr(func._implementation).split("at ")[0] + assert repr_no_id == repr_no_id_impl + + @pytest.mark.parametrize("func", [ + lambda x, y: 0, # no like argument + lambda like=None: 0, # not keyword only + lambda *, like=None, a=3: 0, # not last (not that it matters) + ]) + def test_bad_like_sig(self, func): + # We sanity check the signature, and these should fail. + with pytest.raises(RuntimeError): + array_function_dispatch()(func) + + def test_bad_like_passing(self): + # Cover internal sanity check for passing like as first positional arg + def func(*, like=None): + pass + + func_with_like = array_function_dispatch()(func) + with pytest.raises(TypeError): + func_with_like() + with pytest.raises(TypeError): + func_with_like(like=234) + + def test_too_many_args(self): + # Mainly a unit-test to increase coverage + objs = [] + for i in range(80): + class MyArr: + def __array_function__(self, *args, **kwargs): + return NotImplemented + + objs.append(MyArr()) + + def _dispatch(*args): + return args + + @array_function_dispatch(_dispatch) + def func(*args): + pass + + with pytest.raises(TypeError, match="maximum number"): + func(*objs) + + +class TestNDArrayMethods: + + def test_repr(self): + # gh-12162: should still be defined even if __array_function__ doesn't + # implement np.array_repr() + + class MyArray(np.ndarray): + def __array_function__(*args, **kwargs): + return NotImplemented + + array = np.array(1).view(MyArray) + assert_equal(repr(array), 'MyArray(1)') + assert_equal(str(array), '1') + + +class TestNumPyFunctions: + + def test_set_module(self): + assert_equal(np.sum.__module__, 'numpy') + assert_equal(np.char.equal.__module__, 'numpy.char') + assert_equal(np.fft.fft.__module__, 'numpy.fft') + assert_equal(np.linalg.solve.__module__, 'numpy.linalg') + + def test_inspect_sum(self): + signature = inspect.signature(np.sum) + assert_('axis' in signature.parameters) + + def test_override_sum(self): + MyArray, implements = _new_duck_type_and_implements() + + @implements(np.sum) + def _(array): + return 'yes' + + assert_equal(np.sum(MyArray()), 'yes') + + def test_sum_on_mock_array(self): + + # We need a proxy for mocks because __array_function__ is only looked + # up in the class dict + class ArrayProxy: + def __init__(self, value): + self.value = value + + def __array_function__(self, *args, **kwargs): + return self.value.__array_function__(*args, **kwargs) + + def __array__(self, *args, **kwargs): + return self.value.__array__(*args, **kwargs) + + proxy = ArrayProxy(mock.Mock(spec=ArrayProxy)) + proxy.value.__array_function__.return_value = 1 + result = np.sum(proxy) + assert_equal(result, 1) + proxy.value.__array_function__.assert_called_once_with( + np.sum, (ArrayProxy,), (proxy,), {}) + proxy.value.__array__.assert_not_called() + + def test_sum_forwarding_implementation(self): + + class MyArray(np.ndarray): + + def sum(self, axis, out): + return 'summed' + + def __array_function__(self, func, types, args, kwargs): + return super().__array_function__(func, types, args, kwargs) + + # note: the internal implementation of np.sum() calls the .sum() method + array = np.array(1).view(MyArray) + assert_equal(np.sum(array), 'summed') + + +class TestArrayLike: + def _create_MyArray(self): + class MyArray: + def __init__(self, function=None): + self.function = function + + def __array_function__(self, func, types, args, kwargs): + assert func is getattr(np, func.__name__) + try: + my_func = getattr(self, func.__name__) + except AttributeError: + return NotImplemented + return my_func(*args, **kwargs) + + return MyArray + + def _create_MyNoArrayFunctionArray(self): + class MyNoArrayFunctionArray: + def __init__(self, function=None): + self.function = function + + return MyNoArrayFunctionArray + + def _create_MySubclass(self): + class MySubclass(np.ndarray): + def __array_function__(self, func, types, args, kwargs): + result = super().__array_function__(func, types, args, kwargs) + return result.view(self.__class__) + + return MySubclass + + def add_method(self, name, arr_class, enable_value_error=False): + def _definition(*args, **kwargs): + # Check that `like=` isn't propagated downstream + assert 'like' not in kwargs + + if enable_value_error and 'value_error' in kwargs: + raise ValueError + + return arr_class(getattr(arr_class, name)) + setattr(arr_class, name, _definition) + + def func_args(*args, **kwargs): + return args, kwargs + + def test_array_like_not_implemented(self): + MyArray = self._create_MyArray() + self.add_method('array', MyArray) + + ref = MyArray.array() + + with assert_raises_regex(TypeError, 'no implementation found'): + array_like = np.asarray(1, like=ref) + + _array_tests = [ + ('array', *func_args((1,))), + ('asarray', *func_args((1,))), + ('asanyarray', *func_args((1,))), + ('ascontiguousarray', *func_args((2, 3))), + ('asfortranarray', *func_args((2, 3))), + ('require', *func_args((np.arange(6).reshape(2, 3),), + requirements=['A', 'F'])), + ('empty', *func_args((1,))), + ('full', *func_args((1,), 2)), + ('ones', *func_args((1,))), + ('zeros', *func_args((1,))), + ('arange', *func_args(3)), + ('frombuffer', *func_args(b'\x00' * 8, dtype=int)), + ('fromiter', *func_args(range(3), dtype=int)), + ('fromstring', *func_args('1,2', dtype=int, sep=',')), + ('loadtxt', *func_args(lambda: StringIO('0 1\n2 3'))), + ('genfromtxt', *func_args(lambda: StringIO('1,2.1'), + dtype=[('int', 'i8'), ('float', 'f8')], + delimiter=',')), + ] + + def test_nep35_functions_as_array_functions(self,): + all_array_functions = get_overridable_numpy_array_functions() + like_array_functions_subset = { + getattr(np, func_name) for func_name, *_ in self.__class__._array_tests + } + assert like_array_functions_subset.issubset(all_array_functions) + + nep35_python_functions = { + np.eye, np.fromfunction, np.full, np.genfromtxt, + np.identity, np.loadtxt, np.ones, np.require, np.tri, + } + assert nep35_python_functions.issubset(all_array_functions) + + nep35_C_functions = { + np.arange, np.array, np.asanyarray, np.asarray, + np.ascontiguousarray, np.asfortranarray, np.empty, + np.frombuffer, np.fromfile, np.fromiter, np.fromstring, + np.zeros, + } + assert nep35_C_functions.issubset(all_array_functions) + + @pytest.mark.parametrize('function, args, kwargs', _array_tests) + @pytest.mark.parametrize('numpy_ref', [True, False]) + def test_array_like(self, function, args, kwargs, numpy_ref): + MyArray = self._create_MyArray() + self.add_method('array', MyArray) + self.add_method(function, MyArray) + np_func = getattr(np, function) + my_func = getattr(MyArray, function) + + if numpy_ref is True: + ref = np.array(1) + else: + ref = MyArray.array() + + like_args = tuple(a() if callable(a) else a for a in args) + array_like = np_func(*like_args, **kwargs, like=ref) + + if numpy_ref is True: + assert type(array_like) is np.ndarray + + np_args = tuple(a() if callable(a) else a for a in args) + np_arr = np_func(*np_args, **kwargs) + + # Special-case np.empty to ensure values match + if function == "empty": + np_arr.fill(1) + array_like.fill(1) + + assert_equal(array_like, np_arr) + else: + assert type(array_like) is MyArray + assert array_like.function is my_func + + @pytest.mark.parametrize('function, args, kwargs', _array_tests) + @pytest.mark.parametrize('ref', [1, [1], "MyNoArrayFunctionArray"]) + def test_no_array_function_like(self, function, args, kwargs, ref): + MyNoArrayFunctionArray = self._create_MyNoArrayFunctionArray() + self.add_method('array', MyNoArrayFunctionArray) + self.add_method(function, MyNoArrayFunctionArray) + np_func = getattr(np, function) + + # Instantiate ref if it's the MyNoArrayFunctionArray class + if ref == "MyNoArrayFunctionArray": + ref = MyNoArrayFunctionArray.array() + + like_args = tuple(a() if callable(a) else a for a in args) + + with assert_raises_regex(TypeError, + 'The `like` argument must be an array-like that implements'): + np_func(*like_args, **kwargs, like=ref) + + @pytest.mark.parametrize('function, args, kwargs', _array_tests) + def test_subclass(self, function, args, kwargs): + MySubclass = self._create_MySubclass() + ref = np.array(1).view(MySubclass) + np_func = getattr(np, function) + like_args = tuple(a() if callable(a) else a for a in args) + array_like = np_func(*like_args, **kwargs, like=ref) + assert type(array_like) is MySubclass + if np_func is np.empty: + return + np_args = tuple(a() if callable(a) else a for a in args) + np_arr = np_func(*np_args, **kwargs) + assert_equal(array_like.view(np.ndarray), np_arr) + + @pytest.mark.parametrize('numpy_ref', [True, False]) + def test_array_like_fromfile(self, numpy_ref): + MyArray = self._create_MyArray() + self.add_method('array', MyArray) + self.add_method("fromfile", MyArray) + + if numpy_ref is True: + ref = np.array(1) + else: + ref = MyArray.array() + + data = np.random.random(5) + + with tempfile.TemporaryDirectory() as tmpdir: + fname = os.path.join(tmpdir, "testfile") + data.tofile(fname) + + array_like = np.fromfile(fname, like=ref) + if numpy_ref is True: + assert type(array_like) is np.ndarray + np_res = np.fromfile(fname, like=ref) + assert_equal(np_res, data) + assert_equal(array_like, np_res) + else: + assert type(array_like) is MyArray + assert array_like.function is MyArray.fromfile + + def test_exception_handling(self): + MyArray = self._create_MyArray() + self.add_method('array', MyArray, enable_value_error=True) + + ref = MyArray.array() + + with assert_raises(TypeError): + # Raises the error about `value_error` being invalid first + np.array(1, value_error=True, like=ref) + + @pytest.mark.parametrize('function, args, kwargs', _array_tests) + def test_like_as_none(self, function, args, kwargs): + MyArray = self._create_MyArray() + self.add_method('array', MyArray) + self.add_method(function, MyArray) + np_func = getattr(np, function) + + like_args = tuple(a() if callable(a) else a for a in args) + # required for loadtxt and genfromtxt to init w/o error. + like_args_exp = tuple(a() if callable(a) else a for a in args) + + array_like = np_func(*like_args, **kwargs, like=None) + expected = np_func(*like_args_exp, **kwargs) + # Special-case np.empty to ensure values match + if function == "empty": + array_like.fill(1) + expected.fill(1) + assert_equal(array_like, expected) + + +def test_function_like(): + # We provide a `__get__` implementation, make sure it works + assert type(np.mean) is np._core._multiarray_umath._ArrayFunctionDispatcher + + class MyClass: + def __array__(self, dtype=None, copy=None): + # valid argument to mean: + return np.arange(3) + + func1 = staticmethod(np.mean) + func2 = np.mean + func3 = classmethod(np.mean) + + m = MyClass() + assert m.func1([10]) == 10 + assert m.func2() == 1 # mean of the arange + with pytest.raises(TypeError, match="unsupported operand type"): + # Tries to operate on the class + m.func3() + + # Manual binding also works (the above may shortcut): + bound = np.mean.__get__(m, MyClass) + assert bound() == 1 + + bound = np.mean.__get__(None, MyClass) # unbound actually + assert bound([10]) == 10 + + bound = np.mean.__get__(MyClass) # classmethod + with pytest.raises(TypeError, match="unsupported operand type"): + bound() diff --git a/local_packages/numpy/_core/tests/test_print.py b/local_packages/numpy/_core/tests/test_print.py new file mode 100644 index 0000000..95a177b --- /dev/null +++ b/local_packages/numpy/_core/tests/test_print.py @@ -0,0 +1,202 @@ +import sys +from io import StringIO + +import pytest + +import numpy as np +from numpy._core.tests._locales import CommaDecimalPointLocale +from numpy.testing import IS_MUSL, assert_, assert_equal + +_REF = {np.inf: 'inf', -np.inf: '-inf', np.nan: 'nan'} + + +@pytest.mark.parametrize('tp', [np.float32, np.double, np.longdouble]) +def test_float_types(tp): + """ Check formatting. + + This is only for the str function, and only for simple types. + The precision of np.float32 and np.longdouble aren't the same as the + python float precision. + + """ + for x in [0, 1, -1, 1e20]: + assert_equal(str(tp(x)), str(float(x)), + err_msg=f'Failed str formatting for type {tp}') + + if tp(1e16).itemsize > 4: + assert_equal(str(tp(1e16)), str(float('1e16')), + err_msg=f'Failed str formatting for type {tp}') + else: + ref = '1e+16' + assert_equal(str(tp(1e16)), ref, + err_msg=f'Failed str formatting for type {tp}') + + +@pytest.mark.parametrize('tp', [np.float32, np.double, np.longdouble]) +def test_nan_inf_float(tp): + """ Check formatting of nan & inf. + + This is only for the str function, and only for simple types. + The precision of np.float32 and np.longdouble aren't the same as the + python float precision. + + """ + for x in [np.inf, -np.inf, np.nan]: + assert_equal(str(tp(x)), _REF[x], + err_msg=f'Failed str formatting for type {tp}') + + +@pytest.mark.parametrize('tp', [np.complex64, np.cdouble, np.clongdouble]) +def test_complex_types(tp): + """Check formatting of complex types. + + This is only for the str function, and only for simple types. + The precision of np.float32 and np.longdouble aren't the same as the + python float precision. + + """ + for x in [0, 1, -1, 1e20]: + assert_equal(str(tp(x)), str(complex(x)), + err_msg=f'Failed str formatting for type {tp}') + assert_equal(str(tp(x * 1j)), str(complex(x * 1j)), + err_msg=f'Failed str formatting for type {tp}') + assert_equal(str(tp(x + x * 1j)), str(complex(x + x * 1j)), + err_msg=f'Failed str formatting for type {tp}') + + if tp(1e16).itemsize > 8: + assert_equal(str(tp(1e16)), str(complex(1e16)), + err_msg=f'Failed str formatting for type {tp}') + else: + ref = '(1e+16+0j)' + assert_equal(str(tp(1e16)), ref, + err_msg=f'Failed str formatting for type {tp}') + + +@pytest.mark.parametrize('dtype', [np.complex64, np.cdouble, np.clongdouble]) +def test_complex_inf_nan(dtype): + """Check inf/nan formatting of complex types.""" + TESTS = { + complex(np.inf, 0): "(inf+0j)", + complex(0, np.inf): "infj", + complex(-np.inf, 0): "(-inf+0j)", + complex(0, -np.inf): "-infj", + complex(np.inf, 1): "(inf+1j)", + complex(1, np.inf): "(1+infj)", + complex(-np.inf, 1): "(-inf+1j)", + complex(1, -np.inf): "(1-infj)", + complex(np.nan, 0): "(nan+0j)", + complex(0, np.nan): "nanj", + complex(-np.nan, 0): "(nan+0j)", + complex(0, -np.nan): "nanj", + complex(np.nan, 1): "(nan+1j)", + complex(1, np.nan): "(1+nanj)", + complex(-np.nan, 1): "(nan+1j)", + complex(1, -np.nan): "(1+nanj)", + } + for c, s in TESTS.items(): + assert_equal(str(dtype(c)), s) + + +# print tests +def _test_redirected_print(x, tp, ref=None): + file = StringIO() + file_tp = StringIO() + stdout = sys.stdout + try: + sys.stdout = file_tp + print(tp(x)) + sys.stdout = file + if ref: + print(ref) + else: + print(x) + finally: + sys.stdout = stdout + + assert_equal(file.getvalue(), file_tp.getvalue(), + err_msg=f'print failed for type{tp}') + + +@pytest.mark.thread_unsafe(reason="sys.stdout not thread-safe") +@pytest.mark.parametrize('tp', [np.float32, np.double, np.longdouble]) +def test_float_type_print(tp): + """Check formatting when using print """ + for x in [0, 1, -1, 1e20]: + _test_redirected_print(float(x), tp) + + for x in [np.inf, -np.inf, np.nan]: + _test_redirected_print(float(x), tp, _REF[x]) + + if tp(1e16).itemsize > 4: + _test_redirected_print(1e16, tp) + else: + ref = '1e+16' + _test_redirected_print(1e16, tp, ref) + + +@pytest.mark.thread_unsafe(reason="sys.stdout not thread-safe") +@pytest.mark.parametrize('tp', [np.complex64, np.cdouble, np.clongdouble]) +def test_complex_type_print(tp): + """Check formatting when using print """ + # We do not create complex with inf/nan directly because the feature is + # missing in python < 2.6 + for x in [0, 1, -1, 1e20]: + _test_redirected_print(complex(x), tp) + + if tp(1e16).itemsize > 8: + _test_redirected_print(complex(1e16), tp) + else: + ref = '(1e+16+0j)' + _test_redirected_print(complex(1e16), tp, ref) + + _test_redirected_print(complex(np.inf, 1), tp, '(inf+1j)') + _test_redirected_print(complex(-np.inf, 1), tp, '(-inf+1j)') + _test_redirected_print(complex(-np.nan, 1), tp, '(nan+1j)') + + +def test_scalar_format(): + """Test the str.format method with NumPy scalar types""" + tests = [('{0}', True, np.bool), + ('{0}', False, np.bool), + ('{0:d}', 130, np.uint8), + ('{0:d}', 50000, np.uint16), + ('{0:d}', 3000000000, np.uint32), + ('{0:d}', 15000000000000000000, np.uint64), + ('{0:d}', -120, np.int8), + ('{0:d}', -30000, np.int16), + ('{0:d}', -2000000000, np.int32), + ('{0:d}', -7000000000000000000, np.int64), + ('{0:g}', 1.5, np.float16), + ('{0:g}', 1.5, np.float32), + ('{0:g}', 1.5, np.float64), + ('{0:g}', 1.5, np.longdouble), + ('{0:g}', 1.5 + 0.5j, np.complex64), + ('{0:g}', 1.5 + 0.5j, np.complex128), + ('{0:g}', 1.5 + 0.5j, np.clongdouble)] + + for (fmat, val, valtype) in tests: + try: + assert_equal(fmat.format(val), fmat.format(valtype(val)), + f"failed with val {val}, type {valtype}") + except ValueError as e: + assert_(False, + "format raised exception (fmt='%s', val=%s, type=%s, exc='%s')" % + (fmat, repr(val), repr(valtype), str(e))) + + +# +# Locale tests: scalar types formatting should be independent of the locale +# + +class TestCommaDecimalPointLocale(CommaDecimalPointLocale): + + def test_locale_single(self): + assert_equal(str(np.float32(1.2)), str(1.2)) + + def test_locale_double(self): + assert_equal(str(np.double(1.2)), str(1.2)) + + @pytest.mark.skipif(IS_MUSL, + reason="test flaky on musllinux") + def test_locale_longdouble(self): + assert_equal(str(np.longdouble('1.2')), str(1.2)) diff --git a/local_packages/numpy/_core/tests/test_protocols.py b/local_packages/numpy/_core/tests/test_protocols.py new file mode 100644 index 0000000..96bb600 --- /dev/null +++ b/local_packages/numpy/_core/tests/test_protocols.py @@ -0,0 +1,46 @@ +import warnings + +import pytest + +import numpy as np + + +@pytest.mark.filterwarnings("error") +def test_getattr_warning(): + # issue gh-14735: make sure we clear only getattr errors, and let warnings + # through + class Wrapper: + def __init__(self, array): + self.array = array + + def __len__(self): + return len(self.array) + + def __getitem__(self, item): + return type(self)(self.array[item]) + + def __getattr__(self, name): + if name.startswith("__array_"): + warnings.warn("object got converted", UserWarning, stacklevel=1) + + return getattr(self.array, name) + + def __repr__(self): + return f"" + + array = Wrapper(np.arange(10)) + with pytest.raises(UserWarning, match="object got converted"): + np.asarray(array) + + +def test_array_called(): + class Wrapper: + val = '0' * 100 + + def __array__(self, dtype=None, copy=None): + return np.array([self.val], dtype=dtype, copy=copy) + + wrapped = Wrapper() + arr = np.array(wrapped, dtype=str) + assert arr.dtype == 'U100' + assert arr[0] == Wrapper.val diff --git a/local_packages/numpy/_core/tests/test_records.py b/local_packages/numpy/_core/tests/test_records.py new file mode 100644 index 0000000..7ed6ea7 --- /dev/null +++ b/local_packages/numpy/_core/tests/test_records.py @@ -0,0 +1,544 @@ +import collections.abc +import pickle +import textwrap +from io import BytesIO +from os import path +from pathlib import Path + +import pytest + +import numpy as np +from numpy.testing import ( + assert_, + assert_array_almost_equal, + assert_array_equal, + assert_equal, + assert_raises, + temppath, +) + + +class TestFromrecords: + def test_fromrecords(self): + r = np.rec.fromrecords([[456, 'dbe', 1.2], [2, 'de', 1.3]], + names='col1,col2,col3') + assert_equal(r[0].item(), (456, 'dbe', 1.2)) + assert_equal(r['col1'].dtype.kind, 'i') + assert_equal(r['col2'].dtype.kind, 'U') + assert_equal(r['col2'].dtype.itemsize, 12) + assert_equal(r['col3'].dtype.kind, 'f') + + def test_fromrecords_0len(self): + """ Verify fromrecords works with a 0-length input """ + dtype = [('a', float), ('b', float)] + r = np.rec.fromrecords([], dtype=dtype) + assert_equal(r.shape, (0,)) + + def test_fromrecords_2d(self): + data = [ + [(1, 2), (3, 4), (5, 6)], + [(6, 5), (4, 3), (2, 1)] + ] + expected_a = [[1, 3, 5], [6, 4, 2]] + expected_b = [[2, 4, 6], [5, 3, 1]] + + # try with dtype + r1 = np.rec.fromrecords(data, dtype=[('a', int), ('b', int)]) + assert_equal(r1['a'], expected_a) + assert_equal(r1['b'], expected_b) + + # try with names + r2 = np.rec.fromrecords(data, names=['a', 'b']) + assert_equal(r2['a'], expected_a) + assert_equal(r2['b'], expected_b) + + assert_equal(r1, r2) + + def test_method_array(self): + r = np.rec.array( + b'abcdefg' * 100, formats='i2,S3,i4', shape=3, byteorder='big' + ) + assert_equal(r[1].item(), (25444, b'efg', 1633837924)) + + def test_method_array2(self): + r = np.rec.array( + [ + (1, 11, 'a'), (2, 22, 'b'), (3, 33, 'c'), (4, 44, 'd'), + (5, 55, 'ex'), (6, 66, 'f'), (7, 77, 'g') + ], + formats='u1,f4,S1' + ) + assert_equal(r[1].item(), (2, 22.0, b'b')) + + def test_recarray_slices(self): + r = np.rec.array( + [ + (1, 11, 'a'), (2, 22, 'b'), (3, 33, 'c'), (4, 44, 'd'), + (5, 55, 'ex'), (6, 66, 'f'), (7, 77, 'g') + ], + formats='u1,f4,S1' + ) + assert_equal(r[1::2][1].item(), (4, 44.0, b'd')) + + def test_recarray_fromarrays(self): + x1 = np.array([1, 2, 3, 4]) + x2 = np.array(['a', 'dd', 'xyz', '12']) + x3 = np.array([1.1, 2, 3, 4]) + r = np.rec.fromarrays([x1, x2, x3], names='a,b,c') + assert_equal(r[1].item(), (2, 'dd', 2.0)) + x1[1] = 34 + assert_equal(r.a, np.array([1, 2, 3, 4])) + + def test_recarray_fromfile(self): + data_dir = path.join(path.dirname(__file__), 'data') + filename = path.join(data_dir, 'recarray_from_file.fits') + fd = open(filename, 'rb') + fd.seek(2880 * 2) + r1 = np.rec.fromfile(fd, formats='f8,i4,S5', shape=3, byteorder='big') + fd.seek(2880 * 2) + r2 = np.rec.array(fd, formats='f8,i4,S5', shape=3, byteorder='big') + fd.seek(2880 * 2) + bytes_array = BytesIO() + bytes_array.write(fd.read()) + bytes_array.seek(0) + r3 = np.rec.fromfile( + bytes_array, formats='f8,i4,S5', shape=3, byteorder='big' + ) + fd.close() + assert_equal(r1, r2) + assert_equal(r2, r3) + + def test_recarray_from_obj(self): + count = 10 + a = np.zeros(count, dtype='O') + b = np.zeros(count, dtype='f8') + c = np.zeros(count, dtype='f8') + for i in range(len(a)): + a[i] = list(range(1, 10)) + + mine = np.rec.fromarrays([a, b, c], names='date,data1,data2') + for i in range(len(a)): + assert_(mine.date[i] == list(range(1, 10))) + assert_(mine.data1[i] == 0.0) + assert_(mine.data2[i] == 0.0) + + def test_recarray_repr(self): + a = np.array([(1, 0.1), (2, 0.2)], + dtype=[('foo', ' 2) & (a < 6)) + xb = np.where((b > 2) & (b < 6)) + ya = ((a > 2) & (a < 6)) + yb = ((b > 2) & (b < 6)) + assert_array_almost_equal(xa, ya.nonzero()) + assert_array_almost_equal(xb, yb.nonzero()) + assert_(np.all(a[ya] > 0.5)) + assert_(np.all(b[yb] > 0.5)) + + def test_endian_where(self): + # GitHub issue #369 + net = np.zeros(3, dtype='>f4') + net[1] = 0.00458849 + net[2] = 0.605202 + max_net = net.max() + test = np.where(net <= 0., max_net, net) + correct = np.array([0.60520202, 0.00458849, 0.60520202]) + assert_array_almost_equal(test, correct) + + def test_endian_recarray(self): + # Ticket #2185 + dt = np.dtype([ + ('head', '>u4'), + ('data', '>u4', 2), + ]) + buf = np.recarray(1, dtype=dt) + buf[0]['head'] = 1 + buf[0]['data'][:] = [1, 1] + + h = buf[0]['head'] + d = buf[0]['data'][0] + buf[0]['head'] = h + buf[0]['data'][0] = d + assert_(buf[0]['head'] == 1) + + def test_mem_dot(self): + # Ticket #106 + x = np.random.randn(0, 1) + y = np.random.randn(10, 1) + # Dummy array to detect bad memory access: + _z = np.ones(10) + _dummy = np.empty((0, 10)) + z = as_strided(_z, _dummy.shape, _dummy.strides) + np.dot(x, np.transpose(y), out=z) + assert_equal(_z, np.ones(10)) + # Do the same for the built-in dot: + np._core.multiarray.dot(x, np.transpose(y), out=z) + assert_equal(_z, np.ones(10)) + + def test_arange_endian(self): + # Ticket #111 + ref = np.arange(10) + x = np.arange(10, dtype=' 1 and x['two'] > 2) + + def test_method_args(self): + # Make sure methods and functions have same default axis + # keyword and arguments + funcs1 = ['argmax', 'argmin', 'sum', 'any', 'all', 'cumsum', + 'cumprod', 'prod', 'std', 'var', 'mean', + 'round', 'min', 'max', 'argsort', 'sort'] + funcs2 = ['compress', 'take', 'repeat'] + + for func in funcs1: + arr = np.random.rand(8, 7) + arr2 = arr.copy() + res1 = getattr(arr, func)() + res2 = getattr(np, func)(arr2) + if res1 is None: + res1 = arr + + if res1.dtype.kind in 'uib': + assert_((res1 == res2).all(), func) + else: + assert_(abs(res1 - res2).max() < 1e-8, func) + + for func in funcs2: + arr1 = np.random.rand(8, 7) + arr2 = np.random.rand(8, 7) + res1 = None + if func == 'compress': + arr1 = arr1.ravel() + res1 = getattr(arr2, func)(arr1) + else: + arr2 = (15 * arr2).astype(int).ravel() + if res1 is None: + res1 = getattr(arr1, func)(arr2) + res2 = getattr(np, func)(arr1, arr2) + assert_(abs(res1 - res2).max() < 1e-8, func) + + def test_mem_lexsort_strings(self): + # Ticket #298 + lst = ['abc', 'cde', 'fgh'] + np.lexsort((lst,)) + + def test_fancy_index(self): + # Ticket #302 + x = np.array([1, 2])[np.array([0])] + assert_equal(x.shape, (1,)) + + def test_recarray_copy(self): + # Ticket #312 + dt = [('x', np.int16), ('y', np.float64)] + ra = np.array([(1, 2.3)], dtype=dt) + rb = np.rec.array(ra, dtype=dt) + rb['x'] = 2. + assert_(ra['x'] != rb['x']) + + def test_rec_fromarray(self): + # Ticket #322 + x1 = np.array([[1, 2], [3, 4], [5, 6]]) + x2 = np.array(['a', 'dd', 'xyz']) + x3 = np.array([1.1, 2, 3]) + np.rec.fromarrays([x1, x2, x3], formats="(2,)i4,S3,f8") + + def test_object_array_assign(self): + x = np.empty((2, 2), object) + x.flat[2] = (1, 2, 3) + assert_equal(x.flat[2], (1, 2, 3)) + + def test_ndmin_float64(self): + # Ticket #324 + x = np.array([1, 2, 3], dtype=np.float64) + assert_equal(np.array(x, dtype=np.float32, ndmin=2).ndim, 2) + assert_equal(np.array(x, dtype=np.float64, ndmin=2).ndim, 2) + + def test_ndmin_order(self): + # Issue #465 and related checks + assert_(np.array([1, 2], order='C', ndmin=3).flags.c_contiguous) + assert_(np.array([1, 2], order='F', ndmin=3).flags.f_contiguous) + assert_(np.array(np.ones((2, 2), order='F'), ndmin=3).flags.f_contiguous) + assert_(np.array(np.ones((2, 2), order='C'), ndmin=3).flags.c_contiguous) + + def test_mem_axis_minimization(self): + # Ticket #327 + data = np.arange(5) + data = np.add.outer(data, data) + + def test_mem_float_imag(self): + # Ticket #330 + np.float64(1.0).imag + + def test_dtype_tuple(self): + # Ticket #334 + assert_(np.dtype('i4') == np.dtype(('i4', ()))) + + def test_dtype_posttuple(self): + # Ticket #335 + np.dtype([('col1', '()i4')]) + + def test_numeric_carray_compare(self): + # Ticket #341 + assert_equal(np.array(['X'], 'c'), b'X') + + def test_string_array_size(self): + # Ticket #342 + assert_raises(ValueError, + np.array, [['X'], ['X', 'X', 'X']], '|S1') + + def test_dtype_repr(self): + # Ticket #344 + dt1 = np.dtype(('uint32', 2)) + dt2 = np.dtype(('uint32', (2,))) + assert_equal(dt1.__repr__(), dt2.__repr__()) + + def test_reshape_order(self): + # Make sure reshape order works. + a = np.arange(6).reshape(2, 3, order='F') + assert_equal(a, [[0, 2, 4], [1, 3, 5]]) + a = np.array([[1, 2], [3, 4], [5, 6], [7, 8]]) + b = a[:, 1] + assert_equal(b.reshape(2, 2, order='F'), [[2, 6], [4, 8]]) + + def test_reshape_zero_strides(self): + # Issue #380, test reshaping of zero strided arrays + a = np.ones(1) + a = as_strided(a, shape=(5,), strides=(0,)) + assert_(a.reshape(5, 1).strides[0] == 0) + + def test_reshape_zero_size(self): + # GitHub Issue #2700, setting shape failed for 0-sized arrays + a = np.ones((0, 2)) + a.shape = (-1, 2) + + def test_reshape_trailing_ones_strides(self): + # GitHub issue gh-2949, bad strides for trailing ones of new shape + a = np.zeros(12, dtype=np.int32)[::2] # not contiguous + strides_c = (16, 8, 8, 8) + strides_f = (8, 24, 48, 48) + assert_equal(a.reshape(3, 2, 1, 1).strides, strides_c) + assert_equal(a.reshape(3, 2, 1, 1, order='F').strides, strides_f) + assert_equal(np.array(0, dtype=np.int32).reshape(1, 1).strides, (4, 4)) + + def test_repeat_discont(self): + # Ticket #352 + a = np.arange(12).reshape(4, 3)[:, 2] + assert_equal(a.repeat(3), [2, 2, 2, 5, 5, 5, 8, 8, 8, 11, 11, 11]) + + def test_array_index(self): + # Make sure optimization is not called in this case. + a = np.array([1, 2, 3]) + a2 = np.array([[1, 2, 3]]) + assert_equal(a[np.where(a == 3)], a2[np.where(a2 == 3)]) + + def test_object_argmax(self): + a = np.array([1, 2, 3], dtype=object) + assert_(a.argmax() == 2) + + def test_recarray_fields(self): + # Ticket #372 + dt0 = np.dtype([('f0', 'i4'), ('f1', 'i4')]) + dt1 = np.dtype([('f0', 'i8'), ('f1', 'i8')]) + for a in [np.array([(1, 2), (3, 4)], "i4,i4"), + np.rec.array([(1, 2), (3, 4)], "i4,i4"), + np.rec.array([(1, 2), (3, 4)]), + np.rec.fromarrays([(1, 2), (3, 4)], "i4,i4"), + np.rec.fromarrays([(1, 2), (3, 4)])]: + assert_(a.dtype in [dt0, dt1]) + + def test_random_shuffle(self): + # Ticket #374 + a = np.arange(5).reshape((5, 1)) + b = a.copy() + np.random.shuffle(b) + assert_equal(np.sort(b, axis=0), a) + + def test_refcount_vdot(self): + # Changeset #3443 + _assert_valid_refcount(np.vdot) + + def test_startswith(self): + ca = np.char.array(['Hi', 'There']) + assert_equal(ca.startswith('H'), [True, False]) + + def test_noncommutative_reduce_accumulate(self): + # Ticket #413 + tosubtract = np.arange(5) + todivide = np.array([2.0, 0.5, 0.25]) + assert_equal(np.subtract.reduce(tosubtract), -10) + assert_equal(np.divide.reduce(todivide), 16.0) + assert_array_equal(np.subtract.accumulate(tosubtract), + np.array([0, -1, -3, -6, -10])) + assert_array_equal(np.divide.accumulate(todivide), + np.array([2., 4., 16.])) + + def test_convolve_empty(self): + # Convolve should raise an error for empty input array. + assert_raises(ValueError, np.convolve, [], [1]) + assert_raises(ValueError, np.convolve, [1], []) + + def test_multidim_byteswap(self): + # Ticket #449 + r = np.array([(1, (0, 1, 2))], dtype="i2,3i2") + assert_array_equal(r.byteswap(), + np.array([(256, (0, 256, 512))], r.dtype)) + + def test_string_NULL(self): + # Changeset 3557 + assert_equal(np.array("a\x00\x0b\x0c\x00").item(), + 'a\x00\x0b\x0c') + + def test_junk_in_string_fields_of_recarray(self): + # Ticket #483 + r = np.array([[b'abc']], dtype=[('var1', '|S20')]) + assert_(asbytes(r['var1'][0][0]) == b'abc') + + def test_take_output(self): + # Ensure that 'take' honours output parameter. + x = np.arange(12).reshape((3, 4)) + a = np.take(x, [0, 2], axis=1) + b = np.zeros_like(a) + np.take(x, [0, 2], axis=1, out=b) + assert_array_equal(a, b) + + def test_take_object_fail(self): + # Issue gh-3001 + d = 123. + a = np.array([d, 1], dtype=object) + if HAS_REFCOUNT: + ref_d = sys.getrefcount(d) + try: + a.take([0, 100]) + except IndexError: + pass + if HAS_REFCOUNT: + assert_(ref_d == sys.getrefcount(d)) + + def test_array_str_64bit(self): + # Ticket #501 + s = np.array([1, np.nan], dtype=np.float64) + with np.errstate(all='raise'): + np.array_str(s) # Should succeed + + def test_frompyfunc_endian(self): + # Ticket #503 + from math import radians + uradians = np.frompyfunc(radians, 1, 1) + big_endian = np.array([83.4, 83.5], dtype='>f8') + little_endian = np.array([83.4, 83.5], dtype=' object + # casting succeeds + def rs(): + x = np.ones([484, 286]) + y = np.zeros([484, 286]) + x |= y + + assert_raises(TypeError, rs) + + def test_unicode_scalar(self): + # Ticket #600 + x = np.array(["DROND", "DROND1"], dtype="U6") + el = x[1] + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + new = pickle.loads(pickle.dumps(el, protocol=proto)) + assert_equal(new, el) + + def test_arange_non_native_dtype(self): + # Ticket #616 + for T in ('>f4', ' 0)] = v + + assert_raises(IndexError, ia, x, s, np.zeros(9, dtype=float)) + assert_raises(IndexError, ia, x, s, np.zeros(11, dtype=float)) + + # Old special case (different code path): + assert_raises(IndexError, ia, x.flat, s, np.zeros(9, dtype=float)) + assert_raises(IndexError, ia, x.flat, s, np.zeros(11, dtype=float)) + + def test_mem_scalar_indexing(self): + # Ticket #603 + x = np.array([0], dtype=float) + index = np.array(0, dtype=np.int32) + x[index] + + def test_binary_repr_0_width(self): + assert_equal(np.binary_repr(0, width=3), '000') + + def test_fromstring(self): + assert_equal(np.fromstring("12:09:09", dtype=int, sep=":"), + [12, 9, 9]) + + def test_searchsorted_variable_length(self): + x = np.array(['a', 'aa', 'b']) + y = np.array(['d', 'e']) + assert_equal(x.searchsorted(y), [3, 3]) + + def test_string_argsort_with_zeros(self): + # Check argsort for strings containing zeros. + x = np.frombuffer(b"\x00\x02\x00\x01", dtype="|S2") + assert_array_equal(x.argsort(kind='m'), np.array([1, 0])) + assert_array_equal(x.argsort(kind='q'), np.array([1, 0])) + + def test_string_sort_with_zeros(self): + # Check sort for strings containing zeros. + x = np.frombuffer(b"\x00\x02\x00\x01", dtype="|S2") + y = np.frombuffer(b"\x00\x01\x00\x02", dtype="|S2") + assert_array_equal(np.sort(x, kind="q"), y) + + def test_copy_detection_zero_dim(self): + # Ticket #658 + np.indices((0, 3, 4)).T.reshape(-1, 3) + + def test_flat_byteorder(self): + # Ticket #657 + x = np.arange(10) + assert_array_equal(x.astype('>i4'), x.astype('i4').flat[:], x.astype('i4')): + x = np.array([-1, 0, 1], dtype=dt) + assert_equal(x.flat[0].dtype, x[0].dtype) + + def test_copy_detection_corner_case(self): + # Ticket #658 + np.indices((0, 3, 4)).T.reshape(-1, 3) + + def test_object_array_refcounting(self): + # Ticket #633 + if not hasattr(sys, 'getrefcount'): + return + + # NB. this is probably CPython-specific + + cnt = sys.getrefcount + + a = object() + b = object() + c = object() + + cnt0_a = cnt(a) + cnt0_b = cnt(b) + cnt0_c = cnt(c) + + # -- 0d -> 1-d broadcast slice assignment + + arr = np.zeros(5, dtype=np.object_) + + arr[:] = a + assert_equal(cnt(a), cnt0_a + 5) + + arr[:] = b + assert_equal(cnt(a), cnt0_a) + assert_equal(cnt(b), cnt0_b + 5) + + arr[:2] = c + assert_equal(cnt(b), cnt0_b + 3) + assert_equal(cnt(c), cnt0_c + 2) + + del arr + + # -- 1-d -> 2-d broadcast slice assignment + + arr = np.zeros((5, 2), dtype=np.object_) + arr0 = np.zeros(2, dtype=np.object_) + + arr0[0] = a + assert_(cnt(a) == cnt0_a + 1) + arr0[1] = b + assert_(cnt(b) == cnt0_b + 1) + + arr[:, :] = arr0 + assert_(cnt(a) == cnt0_a + 6) + assert_(cnt(b) == cnt0_b + 6) + + arr[:, 0] = None + assert_(cnt(a) == cnt0_a + 1) + + del arr, arr0 + + # -- 2-d copying + flattening + + arr = np.zeros((5, 2), dtype=np.object_) + + arr[:, 0] = a + arr[:, 1] = b + assert_(cnt(a) == cnt0_a + 5) + assert_(cnt(b) == cnt0_b + 5) + + arr2 = arr.copy() + assert_(cnt(a) == cnt0_a + 10) + assert_(cnt(b) == cnt0_b + 10) + + arr2 = arr[:, 0].copy() + assert_(cnt(a) == cnt0_a + 10) + assert_(cnt(b) == cnt0_b + 5) + + arr2 = arr.flatten() + assert_(cnt(a) == cnt0_a + 10) + assert_(cnt(b) == cnt0_b + 10) + + del arr, arr2 + + # -- concatenate, repeat, take, choose + + arr1 = np.zeros((5, 1), dtype=np.object_) + arr2 = np.zeros((5, 1), dtype=np.object_) + + arr1[...] = a + arr2[...] = b + assert_(cnt(a) == cnt0_a + 5) + assert_(cnt(b) == cnt0_b + 5) + + tmp = np.concatenate((arr1, arr2)) + assert_(cnt(a) == cnt0_a + 5 + 5) + assert_(cnt(b) == cnt0_b + 5 + 5) + + tmp = arr1.repeat(3, axis=0) + assert_(cnt(a) == cnt0_a + 5 + 3 * 5) + + tmp = arr1.take([1, 2, 3], axis=0) + assert_(cnt(a) == cnt0_a + 5 + 3) + + x = np.array([[0], [1], [0], [1], [1]], int) + tmp = x.choose(arr1, arr2) + assert_(cnt(a) == cnt0_a + 5 + 2) + assert_(cnt(b) == cnt0_b + 5 + 3) + + def test_mem_custom_float_to_array(self): + # Ticket 702 + class MyFloat: + def __float__(self): + return 1.0 + + tmp = np.atleast_1d([MyFloat()]) + tmp.astype(float) # Should succeed + + def test_object_array_refcount_self_assign(self): + # Ticket #711 + class VictimObject: + deleted = False + + def __del__(self): + self.deleted = True + + d = VictimObject() + arr = np.zeros(5, dtype=np.object_) + arr[:] = d + del d + arr[:] = arr # refcount of 'd' might hit zero here + assert_(not arr[0].deleted) + arr[:] = arr # trying to induce a segfault by doing it again... + assert_(not arr[0].deleted) + + def test_mem_fromiter_invalid_dtype_string(self): + x = [1, 2, 3] + assert_raises(ValueError, + np.fromiter, list(x), dtype='S') + + def test_reduce_big_object_array(self): + # Ticket #713 + oldsize = np.setbufsize(10 * 16) + a = np.array([None] * 161, object) + assert_(not np.any(a)) + np.setbufsize(oldsize) + + def test_mem_0d_array_index(self): + # Ticket #714 + np.zeros(10)[np.array(0)] + + def test_nonnative_endian_fill(self): + # Non-native endian arrays were incorrectly filled with scalars + # before r5034. + if sys.byteorder == 'little': + dtype = np.dtype('>i4') + else: + dtype = np.dtype('data contains non-zero floats + x = np.array([123456789e199], dtype=np.float64) + if IS_PYPY: + x.resize((m, 0), refcheck=False) + else: + x.resize((m, 0)) + y = np.array([123456789e199], dtype=np.float64) + if IS_PYPY: + y.resize((0, n), refcheck=False) + else: + y.resize((0, n)) + + # `dot` should just return zero (m, n) matrix + z = np.dot(x, y) + assert_(np.all(z == 0)) + assert_(z.shape == (m, n)) + + def test_zeros(self): + # Regression test for #1061. + # Set a size which cannot fit into a 64 bits signed integer + sz = 2 ** 64 + with assert_raises_regex(ValueError, + 'Maximum allowed dimension exceeded'): + np.empty(sz) + + def test_huge_arange(self): + # Regression test for #1062. + # Set a size which cannot fit into a 64 bits signed integer + sz = 2 ** 64 + with assert_raises_regex(ValueError, + 'Maximum allowed size exceeded'): + np.arange(sz) + assert_(np.size == sz) + + def test_fromiter_bytes(self): + # Ticket #1058 + a = np.fromiter(list(range(10)), dtype='b') + b = np.fromiter(list(range(10)), dtype='B') + assert_(np.all(a == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]))) + assert_(np.all(b == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]))) + + def test_array_from_sequence_scalar_array(self): + # Ticket #1078: segfaults when creating an array with a sequence of + # 0d arrays. + a = np.array((np.ones(2), np.array(2)), dtype=object) + assert_equal(a.shape, (2,)) + assert_equal(a.dtype, np.dtype(object)) + assert_equal(a[0], np.ones(2)) + assert_equal(a[1], np.array(2)) + + a = np.array(((1,), np.array(1)), dtype=object) + assert_equal(a.shape, (2,)) + assert_equal(a.dtype, np.dtype(object)) + assert_equal(a[0], (1,)) + assert_equal(a[1], np.array(1)) + + def test_array_from_sequence_scalar_array2(self): + # Ticket #1081: weird array with strange input... + t = np.array([np.array([]), np.array(0, object)], dtype=object) + assert_equal(t.shape, (2,)) + assert_equal(t.dtype, np.dtype(object)) + + def test_array_too_big(self): + # Ticket #1080. + assert_raises(ValueError, np.zeros, [975] * 7, np.int8) + assert_raises(ValueError, np.zeros, [26244] * 5, np.int8) + + def test_dtype_keyerrors_(self): + # Ticket #1106. + dt = np.dtype([('f1', np.uint)]) + assert_raises(KeyError, dt.__getitem__, "f2") + assert_raises(IndexError, dt.__getitem__, 1) + assert_raises(TypeError, dt.__getitem__, 0.0) + + def test_lexsort_buffer_length(self): + # Ticket #1217, don't segfault. + a = np.ones(100, dtype=np.int8) + b = np.ones(100, dtype=np.int32) + i = np.lexsort((a[::-1], b)) + assert_equal(i, np.arange(100, dtype=int)) + + def test_object_array_to_fixed_string(self): + # Ticket #1235. + a = np.array(['abcdefgh', 'ijklmnop'], dtype=np.object_) + b = np.array(a, dtype=(np.str_, 8)) + assert_equal(a, b) + c = np.array(a, dtype=(np.str_, 5)) + assert_equal(c, np.array(['abcde', 'ijklm'])) + d = np.array(a, dtype=(np.str_, 12)) + assert_equal(a, d) + e = np.empty((2, ), dtype=(np.str_, 8)) + e[:] = a[:] + assert_equal(a, e) + + def test_unicode_to_string_cast(self): + # Ticket #1240. + a = np.array([['abc', '\u03a3'], + ['asdf', 'erw']], + dtype='U') + assert_raises(UnicodeEncodeError, np.array, a, 'S4') + + def test_unicode_to_string_cast_error(self): + # gh-15790 + a = np.array(['\x80'] * 129, dtype='U3') + assert_raises(UnicodeEncodeError, np.array, a, 'S') + b = a.reshape(3, 43)[:-1, :-1] + assert_raises(UnicodeEncodeError, np.array, b, 'S') + + def test_mixed_string_byte_array_creation(self): + a = np.array(['1234', b'123']) + assert_(a.itemsize == 16) + a = np.array([b'123', '1234']) + assert_(a.itemsize == 16) + a = np.array(['1234', b'123', '12345']) + assert_(a.itemsize == 20) + a = np.array([b'123', '1234', b'12345']) + assert_(a.itemsize == 20) + a = np.array([b'123', '1234', b'1234']) + assert_(a.itemsize == 16) + + def test_misaligned_objects_segfault(self): + # Ticket #1198 and #1267 + a1 = np.zeros((10,), dtype='O,c') + a2 = np.array(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'], 'S10') + a1['f0'] = a2 + repr(a1) + np.argmax(a1['f0']) + a1['f0'][1] = "FOO" + a1['f0'] = "FOO" + np.array(a1['f0'], dtype='S') + np.nonzero(a1['f0']) + a1.sort() + copy.deepcopy(a1) + + def test_misaligned_scalars_segfault(self): + # Ticket #1267 + s1 = np.array(('a', 'Foo'), dtype='c,O') + s2 = np.array(('b', 'Bar'), dtype='c,O') + s1['f1'] = s2['f1'] + s1['f1'] = 'Baz' + + def test_misaligned_dot_product_objects(self): + # Ticket #1267 + # This didn't require a fix, but it's worth testing anyway, because + # it may fail if .dot stops enforcing the arrays to be BEHAVED + a = np.array([[(1, 'a'), (0, 'a')], [(0, 'a'), (1, 'a')]], dtype='O,c') + b = np.array([[(4, 'a'), (1, 'a')], [(2, 'a'), (2, 'a')]], dtype='O,c') + np.dot(a['f0'], b['f0']) + + def test_byteswap_complex_scalar(self): + # Ticket #1259 and gh-441 + for dtype in [np.dtype('<' + t) for t in np.typecodes['Complex']]: + z = np.array([2.2 - 1.1j], dtype) + x = z[0] # always native-endian + y = x.byteswap() + if x.dtype.byteorder == z.dtype.byteorder: + # little-endian machine + assert_equal(x, np.frombuffer(y.tobytes(), dtype=dtype.newbyteorder())) + else: + # big-endian machine + assert_equal(x, np.frombuffer(y.tobytes(), dtype=dtype)) + # double check real and imaginary parts: + assert_equal(x.real, y.real.byteswap()) + assert_equal(x.imag, y.imag.byteswap()) + + def test_structured_arrays_with_objects1(self): + # Ticket #1299 + stra = 'aaaa' + strb = 'bbbb' + x = np.array([[(0, stra), (1, strb)]], 'i8,O') + x[x.nonzero()] = x.ravel()[:1] + assert_(x[0, 1] == x[0, 0]) + + @pytest.mark.skipif( + sys.version_info >= (3, 12), + reason="Python 3.12 has immortal refcounts, this test no longer works." + ) + @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") + def test_structured_arrays_with_objects2(self): + # Ticket #1299 second test + stra = 'aaaa' + strb = 'bbbb' + numb = sys.getrefcount(strb) + numa = sys.getrefcount(stra) + x = np.array([[(0, stra), (1, strb)]], 'i8,O') + x[x.nonzero()] = x.ravel()[:1] + assert_(sys.getrefcount(strb) == numb) + assert_(sys.getrefcount(stra) == numa + 2) + + def test_duplicate_title_and_name(self): + # Ticket #1254 + dtspec = [(('a', 'a'), 'i'), ('b', 'i')] + assert_raises(ValueError, np.dtype, dtspec) + + def test_signed_integer_division_overflow(self): + # Ticket #1317. + def test_type(t): + min = np.array([np.iinfo(t).min]) + min //= -1 + + with np.errstate(over="ignore"): + for t in (np.int8, np.int16, np.int32, np.int64, int): + test_type(t) + + def test_buffer_hashlib(self): + from hashlib import sha256 + + x = np.array([1, 2, 3], dtype=np.dtype('c') + + def test_log1p_compiler_shenanigans(self): + # Check if log1p is behaving on 32 bit intel systems. + assert_(np.isfinite(np.log1p(np.exp2(-53)))) + + def test_fromiter_comparison(self): + a = np.fromiter(list(range(10)), dtype='b') + b = np.fromiter(list(range(10)), dtype='B') + assert_(np.all(a == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]))) + assert_(np.all(b == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]))) + + def test_fromstring_crash(self): + with assert_raises(ValueError): + np.fromstring(b'aa, aa, 1.0', sep=',') + + def test_ticket_1539(self): + dtypes = [x for x in np._core.sctypeDict.values() + if (issubclass(x, np.number) + and not issubclass(x, np.timedelta64))] + a = np.array([], np.bool) # not x[0] because it is unordered + failures = [] + + for x in dtypes: + b = a.astype(x) + for y in dtypes: + c = a.astype(y) + try: + d = np.dot(b, c) + except TypeError: + failures.append((x, y)) + else: + if d != 0: + failures.append((x, y)) + if failures: + raise AssertionError(f"Failures: {failures!r}") + + def test_ticket_1538(self): + x = np.finfo(np.float32) + for name in ('eps', 'epsneg', 'max', 'min', 'resolution', 'tiny'): + assert_equal(type(getattr(x, name)), np.float32, + err_msg=name) + + def test_ticket_1434(self): + # Check that the out= argument in var and std has an effect + data = np.array(((1, 2, 3), (4, 5, 6), (7, 8, 9))) + out = np.zeros((3,)) + + ret = data.var(axis=1, out=out) + assert_(ret is out) + assert_array_equal(ret, data.var(axis=1)) + + ret = data.std(axis=1, out=out) + assert_(ret is out) + assert_array_equal(ret, data.std(axis=1)) + + def test_complex_nan_maximum(self): + cnan = complex(0, np.nan) + assert_equal(np.maximum(1, cnan), cnan) + + def test_subclass_int_tuple_assignment(self): + # ticket #1563 + class Subclass(np.ndarray): + def __new__(cls, i): + return np.ones((i,)).view(cls) + + x = Subclass(5) + x[(0,)] = 2 # shouldn't raise an exception + assert_equal(x[0], 2) + + def test_ufunc_no_unnecessary_views(self): + # ticket #1548 + class Subclass(np.ndarray): + pass + x = np.array([1, 2, 3]).view(Subclass) + y = np.add(x, x, x) + assert_equal(id(x), id(y)) + + @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") + def test_take_refcount(self): + # ticket #939 + a = np.arange(16, dtype=float) + a.shape = (4, 4) + lut = np.ones((5 + 3, 4), float) + rgba = np.empty(shape=a.shape + (4,), dtype=lut.dtype) + c1 = sys.getrefcount(rgba) + try: + lut.take(a, axis=0, mode='clip', out=rgba) + except TypeError: + pass + c2 = sys.getrefcount(rgba) + assert_equal(c1, c2) + + def test_fromfile_tofile_seeks(self): + # tofile/fromfile used to get (#1610) the Python file handle out of sync + with tempfile.NamedTemporaryFile() as f: + f.write(np.arange(255, dtype='u1').tobytes()) + + f.seek(20) + ret = np.fromfile(f, count=4, dtype='u1') + assert_equal(ret, np.array([20, 21, 22, 23], dtype='u1')) + assert_equal(f.tell(), 24) + + f.seek(40) + np.array([1, 2, 3], dtype='u1').tofile(f) + assert_equal(f.tell(), 43) + + f.seek(40) + data = f.read(3) + assert_equal(data, b"\x01\x02\x03") + + f.seek(80) + f.read(4) + data = np.fromfile(f, dtype='u1', count=4) + assert_equal(data, np.array([84, 85, 86, 87], dtype='u1')) + + def test_complex_scalar_warning(self): + for tp in [np.csingle, np.cdouble, np.clongdouble]: + x = tp(1 + 2j) + pytest.warns(ComplexWarning, float, x) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', ComplexWarning) + assert_equal(float(x), float(x.real)) + + def test_complex_scalar_complex_cast(self): + for tp in [np.csingle, np.cdouble, np.clongdouble]: + x = tp(1 + 2j) + assert_equal(complex(x), 1 + 2j) + + def test_complex_boolean_cast(self): + # Ticket #2218 + for tp in [np.csingle, np.cdouble, np.clongdouble]: + x = np.array([0, 0 + 0.5j, 0.5 + 0j], dtype=tp) + assert_equal(x.astype(bool), np.array([0, 1, 1], dtype=bool)) + assert_(np.any(x)) + assert_(np.all(x[1:])) + + def test_uint_int_conversion(self): + x = 2**64 - 1 + assert_equal(int(np.uint64(x)), x) + + def test_duplicate_field_names_assign(self): + ra = np.fromiter(((i * 3, i * 2) for i in range(10)), dtype='i8,f8') + ra.dtype.names = ('f1', 'f2') + repr(ra) # should not cause a segmentation fault + assert_raises(ValueError, setattr, ra.dtype, 'names', ('f1', 'f1')) + + def test_eq_string_and_object_array(self): + # From e-mail thread "__eq__ with str and object" (Keith Goodman) + a1 = np.array(['a', 'b'], dtype=object) + a2 = np.array(['a', 'c']) + assert_array_equal(a1 == a2, [True, False]) + assert_array_equal(a2 == a1, [True, False]) + + def test_nonzero_byteswap(self): + a = np.array([0x80000000, 0x00000080, 0], dtype=np.uint32) + a = a.view(np.float32) + assert_equal(a.nonzero()[0], [1]) + a = a.byteswap() + a = a.view(a.dtype.newbyteorder()) + assert_equal(a.nonzero()[0], [1]) # [0] if nonzero() ignores swap + + def test_empty_mul(self): + a = np.array([1.]) + a[1:1] *= 2 + assert_equal(a, [1.]) + + def test_array_side_effect(self): + # The second use of itemsize was throwing an exception because in + # ctors.c, discover_itemsize was calling PyObject_Length without + # checking the return code. This failed to get the length of the + # number 2, and the exception hung around until something checked + # PyErr_Occurred() and returned an error. + assert_equal(np.dtype('S10').itemsize, 10) + np.array([['abc', 2], ['long ', '0123456789']], dtype=np.bytes_) + assert_equal(np.dtype('S10').itemsize, 10) + + def test_any_float(self): + # all and any for floats + a = np.array([0.1, 0.9]) + assert_(np.any(a)) + assert_(np.all(a)) + + def test_large_float_sum(self): + a = np.arange(10000, dtype='f') + assert_equal(a.sum(dtype='d'), a.astype('d').sum()) + + def test_ufunc_casting_out(self): + a = np.array(1.0, dtype=np.float32) + b = np.array(1.0, dtype=np.float64) + c = np.array(1.0, dtype=np.float32) + np.add(a, b, out=c) + assert_equal(c, 2.0) + + def test_array_scalar_contiguous(self): + # Array scalars are both C and Fortran contiguous + assert_(np.array(1.0).flags.c_contiguous) + assert_(np.array(1.0).flags.f_contiguous) + assert_(np.array(np.float32(1.0)).flags.c_contiguous) + assert_(np.array(np.float32(1.0)).flags.f_contiguous) + + def test_squeeze_contiguous(self): + # Similar to GitHub issue #387 + a = np.zeros((1, 2)).squeeze() + b = np.zeros((2, 2, 2), order='F')[:, :, ::2].squeeze() + assert_(a.flags.c_contiguous) + assert_(a.flags.f_contiguous) + assert_(b.flags.f_contiguous) + + def test_squeeze_axis_handling(self): + # Issue #10779 + # Ensure proper handling of objects + # that don't support axis specification + # when squeezing + + class OldSqueeze(np.ndarray): + + def __new__(cls, + input_array): + obj = np.asarray(input_array).view(cls) + return obj + + # it is perfectly reasonable that prior + # to numpy version 1.7.0 a subclass of ndarray + # might have been created that did not expect + # squeeze to have an axis argument + # NOTE: this example is somewhat artificial; + # it is designed to simulate an old API + # expectation to guard against regression + def squeeze(self): + return super().squeeze() + + oldsqueeze = OldSqueeze(np.array([[1], [2], [3]])) + + # if no axis argument is specified the old API + # expectation should give the correct result + assert_equal(np.squeeze(oldsqueeze), + np.array([1, 2, 3])) + + # likewise, axis=None should work perfectly well + # with the old API expectation + assert_equal(np.squeeze(oldsqueeze, axis=None), + np.array([1, 2, 3])) + + # however, specification of any particular axis + # should raise a TypeError in the context of the + # old API specification, even when using a valid + # axis specification like 1 for this array + with assert_raises(TypeError): + # this would silently succeed for array + # subclasses / objects that did not support + # squeeze axis argument handling before fixing + # Issue #10779 + np.squeeze(oldsqueeze, axis=1) + + # check for the same behavior when using an invalid + # axis specification -- in this case axis=0 does not + # have size 1, but the priority should be to raise + # a TypeError for the axis argument and NOT a + # ValueError for squeezing a non-empty dimension + with assert_raises(TypeError): + np.squeeze(oldsqueeze, axis=0) + + # the new API knows how to handle the axis + # argument and will return a ValueError if + # attempting to squeeze an axis that is not + # of length 1 + with assert_raises(ValueError): + np.squeeze(np.array([[1], [2], [3]]), axis=0) + + def test_reduce_contiguous(self): + # GitHub issue #387 + a = np.add.reduce(np.zeros((2, 1, 2)), (0, 1)) + b = np.add.reduce(np.zeros((2, 1, 2)), 1) + assert_(a.flags.c_contiguous) + assert_(a.flags.f_contiguous) + assert_(b.flags.c_contiguous) + + @pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking") + @pytest.mark.skipif(IS_WASM, reason="Pyodide/WASM has limited stack size") + def test_object_array_self_reference(self): + # Object arrays with references to themselves can cause problems + a = np.array(0, dtype=object) + a[()] = a + assert_raises(RecursionError, int, a) + assert_raises(RecursionError, float, a) + a[()] = None + + @pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking") + @pytest.mark.skipif(IS_WASM, reason="Pyodide/WASM has limited stack size") + def test_object_array_circular_reference(self): + # Test the same for a circular reference. + a = np.array(0, dtype=object) + b = np.array(0, dtype=object) + a[()] = b + b[()] = a + assert_raises(RecursionError, int, a) + # NumPy has no tp_traverse currently, so circular references + # cannot be detected. So resolve it: + a[()] = None + + # This was causing a to become like the above + a = np.array(0, dtype=object) + a[...] += 1 + assert_equal(a, 1) + + def test_object_array_nested(self): + # but is fine with a reference to a different array + a = np.array(0, dtype=object) + b = np.array(0, dtype=object) + a[()] = b + assert_equal(int(a), int(0)) # noqa: UP018 + assert_equal(float(a), float(0)) + + def test_object_array_self_copy(self): + # An object array being copied into itself DECREF'ed before INCREF'ing + # causing segmentation faults (gh-3787) + a = np.array(object(), dtype=object) + np.copyto(a, a) + if HAS_REFCOUNT: + assert_(sys.getrefcount(a[()]) == 2) + a[()].__class__ # will segfault if object was deleted + + def test_zerosize_accumulate(self): + "Ticket #1733" + x = np.array([[42, 0]], dtype=np.uint32) + assert_equal(np.add.accumulate(x[:-1, 0]), []) + + def test_objectarray_setfield(self): + # Setfield should not overwrite Object fields with non-Object data + x = np.array([1, 2, 3], dtype=object) + assert_raises(TypeError, x.setfield, 4, np.int32, 0) + + def test_setting_rank0_string(self): + "Ticket #1736" + s1 = b"hello1" + s2 = b"hello2" + a = np.zeros((), dtype="S10") + a[()] = s1 + assert_equal(a, np.array(s1)) + a[()] = np.array(s2) + assert_equal(a, np.array(s2)) + + a = np.zeros((), dtype='f4') + a[()] = 3 + assert_equal(a, np.array(3)) + a[()] = np.array(4) + assert_equal(a, np.array(4)) + + def test_string_astype(self): + "Ticket #1748" + s1 = b'black' + s2 = b'white' + s3 = b'other' + a = np.array([[s1], [s2], [s3]]) + assert_equal(a.dtype, np.dtype('S5')) + b = a.astype(np.dtype('S0')) + assert_equal(b.dtype, np.dtype('S5')) + + def test_ticket_1756(self): + # Ticket #1756 + s = b'0123456789abcdef' + a = np.array([s] * 5) + for i in range(1, 17): + a1 = np.array(a, "|S%d" % i) + a2 = np.array([s[:i]] * 5) + assert_equal(a1, a2) + + def test_fields_strides(self): + "gh-2355" + r = np.frombuffer(b'abcdefghijklmnop' * 4 * 3, dtype='i4,(2,3)u2') + assert_equal(r[0:3:2]['f1'], r['f1'][0:3:2]) + assert_equal(r[0:3:2]['f1'][0], r[0:3:2][0]['f1']) + assert_equal(r[0:3:2]['f1'][0][()], r[0:3:2][0]['f1'][()]) + assert_equal(r[0:3:2]['f1'][0].strides, r[0:3:2][0]['f1'].strides) + + def test_alignment_update(self): + # Check that alignment flag is updated on stride setting + a = np.arange(10) + assert_(a.flags.aligned) + with pytest.warns(DeprecationWarning): + a.strides = 3 + assert_(not a.flags.aligned) + + def test_ticket_1770(self): + "Should not segfault on python 3k" + import numpy as np + try: + a = np.zeros((1,), dtype=[('f1', 'f')]) + a['f1'] = 1 + a['f2'] = 1 + except ValueError: + pass + except Exception: + raise AssertionError + + def test_ticket_1608(self): + "x.flat shouldn't modify data" + x = np.array([[1, 2], [3, 4]]).T + np.array(x.flat) + assert_equal(x, [[1, 3], [2, 4]]) + + def test_pickle_string_overwrite(self): + import re + + data = np.array([1], dtype='b') + blob = pickle.dumps(data, protocol=1) + data = pickle.loads(blob) + + # Check that loads does not clobber interned strings + s = re.sub(r"a(.)", "\x01\\1", "a_") + assert_equal(s[0], "\x01") + data[0] = 0x6a + s = re.sub(r"a(.)", "\x01\\1", "a_") + assert_equal(s[0], "\x01") + + def test_pickle_bytes_overwrite(self): + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + data = np.array([1], dtype='b') + data = pickle.loads(pickle.dumps(data, protocol=proto)) + data[0] = 0x7d + bytestring = "\x01 ".encode('ascii') + assert_equal(bytestring[0:1], '\x01'.encode('ascii')) + + @pytest.mark.filterwarnings( + "ignore:.*align should be passed:numpy.exceptions.VisibleDeprecationWarning") + def test_pickle_py2_array_latin1_hack(self): + # Check that unpickling hacks in Py3 that support + # encoding='latin1' work correctly. + + # Python2 output for pickle.dumps(numpy.array([129], dtype='b')) + data = b"cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n(I0\ntp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n(S'i1'\np8\nI0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nNNNI-1\nI-1\nI0\ntp12\nbI00\nS'\\x81'\np13\ntp14\nb." + # This should work: + result = pickle.loads(data, encoding='latin1') + assert_array_equal(result, np.array([129]).astype('b')) + # Should not segfault: + assert_raises(Exception, pickle.loads, data, encoding='koi8-r') + + @pytest.mark.filterwarnings( + "ignore:.*align should be passed:numpy.exceptions.VisibleDeprecationWarning") + def test_pickle_py2_scalar_latin1_hack(self): + # Check that scalar unpickling hack in Py3 that supports + # encoding='latin1' work correctly. + + # Python2 output for pickle.dumps(...) + datas = [ + # (original, python2_pickle, koi8r_validity) + (np.str_('\u6bd2'), + b"cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n(S'U1'\np2\nI0\nI1\ntp3\nRp4\n(I3\nS'<'\np5\nNNNI4\nI4\nI0\ntp6\nbS'\\xd2k\\x00\\x00'\np7\ntp8\nRp9\n.", + 'invalid'), + + (np.float64(9e123), + b"cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n(S'f8'\np2\nI0\nI1\ntp3\nRp4\n(I3\nS'<'\np5\nNNNI-1\nI-1\nI0\ntp6\nbS'O\\x81\\xb7Z\\xaa:\\xabY'\np7\ntp8\nRp9\n.", + 'invalid'), + + # different 8-bit code point in KOI8-R vs latin1 + (np.bytes_(b'\x9c'), + b"cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n(S'S1'\np2\nI0\nI1\ntp3\nRp4\n(I3\nS'|'\np5\nNNNI1\nI1\nI0\ntp6\nbS'\\x9c'\np7\ntp8\nRp9\n.", + 'different'), + ] + for original, data, koi8r_validity in datas: + result = pickle.loads(data, encoding='latin1') + assert_equal(result, original) + + # Decoding under non-latin1 encoding (e.g.) KOI8-R can + # produce bad results, but should not segfault. + if koi8r_validity == 'different': + # Unicode code points happen to lie within latin1, + # but are different in koi8-r, resulting to silent + # bogus results + result = pickle.loads(data, encoding='koi8-r') + assert_(result != original) + elif koi8r_validity == 'invalid': + # Unicode code points outside latin1, so results + # to an encoding exception + assert_raises( + ValueError, pickle.loads, data, encoding='koi8-r' + ) + else: + raise ValueError(koi8r_validity) + + def test_structured_type_to_object(self): + a_rec = np.array([(0, 1), (3, 2)], dtype='i4,i8') + a_obj = np.empty((2,), dtype=object) + a_obj[0] = (0, 1) + a_obj[1] = (3, 2) + # astype records -> object + assert_equal(a_rec.astype(object), a_obj) + # '=' records -> object + b = np.empty_like(a_obj) + b[...] = a_rec + assert_equal(b, a_obj) + # '=' object -> records + b = np.empty_like(a_rec) + b[...] = a_obj + assert_equal(b, a_rec) + + def test_assign_obj_listoflists(self): + # Ticket # 1870 + # The inner list should get assigned to the object elements + a = np.zeros(4, dtype=object) + b = a.copy() + a[0] = [1] + a[1] = [2] + a[2] = [3] + a[3] = [4] + b[...] = [[1], [2], [3], [4]] + assert_equal(a, b) + # The first dimension should get broadcast + a = np.zeros((2, 2), dtype=object) + a[...] = [[1, 2]] + assert_equal(a, [[1, 2], [1, 2]]) + + @pytest.mark.slow_pypy + def test_memoryleak(self): + # Ticket #1917 - ensure that array data doesn't leak + for i in range(1000): + # 100MB times 1000 would give 100GB of memory usage if it leaks + a = np.empty((100000000,), dtype='i1') + del a + + @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") + def test_ufunc_reduce_memoryleak(self): + a = np.arange(6) + acnt = sys.getrefcount(a) + np.add.reduce(a) + assert_equal(sys.getrefcount(a), acnt) + + def test_search_sorted_invalid_arguments(self): + # Ticket #2021, should not segfault. + x = np.arange(0, 4, dtype='datetime64[D]') + assert_raises(TypeError, x.searchsorted, 1) + + def test_string_truncation(self): + # Ticket #1990 - Data can be truncated in creation of an array from a + # mixed sequence of numeric values and strings (gh-2583) + for val in [True, 1234, 123.4, complex(1, 234)]: + for tostr, dtype in [(asunicode, "U"), (asbytes, "S")]: + b = np.array([val, tostr('xx')], dtype=dtype) + assert_equal(tostr(b[0]), tostr(val)) + b = np.array([tostr('xx'), val], dtype=dtype) + assert_equal(tostr(b[1]), tostr(val)) + + # test also with longer strings + b = np.array([val, tostr('xxxxxxxxxx')], dtype=dtype) + assert_equal(tostr(b[0]), tostr(val)) + b = np.array([tostr('xxxxxxxxxx'), val], dtype=dtype) + assert_equal(tostr(b[1]), tostr(val)) + + def test_string_truncation_ucs2(self): + # Ticket #2081. Python compiled with two byte unicode + # can lead to truncation if itemsize is not properly + # adjusted for NumPy's four byte unicode. + a = np.array(['abcd']) + assert_equal(a.dtype.itemsize, 16) + + def test_unique_stable(self): + # Ticket #2063 must always choose stable sort for argsort to + # get consistent results + v = np.array(([0] * 5 + [1] * 6 + [2] * 6) * 4) + res = np.unique(v, return_index=True) + tgt = (np.array([0, 1, 2]), np.array([0, 5, 11])) + assert_equal(res, tgt) + + def test_unicode_alloc_dealloc_match(self): + # Ticket #1578, the mismatch only showed up when running + # python-debug for python versions >= 2.7, and then as + # a core dump and error message. + a = np.array(['abc'], dtype=np.str_)[0] + del a + + def test_refcount_error_in_clip(self): + # Ticket #1588 + a = np.zeros((2,), dtype='>i2').clip(min=0) + x = a + a + # This used to segfault: + y = str(x) + # Check the final string: + assert_(y == "[0 0]") + + def test_searchsorted_wrong_dtype(self): + # Ticket #2189, it used to segfault, so we check that it raises the + # proper exception. + a = np.array([('a', 1)], dtype='S1, int') + assert_raises(TypeError, np.searchsorted, a, 1.2) + # Ticket #2066, similar problem: + dtype = np.rec.format_parser(['i4', 'i4'], [], []) + a = np.recarray((2,), dtype) + a[...] = [(1, 2), (3, 4)] + assert_raises(TypeError, np.searchsorted, a, 1) + + def test_complex64_alignment(self): + # Issue gh-2668 (trac 2076), segfault on sparc due to misalignment + dtt = np.complex64 + arr = np.arange(10, dtype=dtt) + # 2D array + arr2 = np.reshape(arr, (2, 5)) + # Fortran write followed by (C or F) read caused bus error + data_str = arr2.tobytes('F') + data_back = np.ndarray(arr2.shape, + arr2.dtype, + buffer=data_str, + order='F') + assert_array_equal(arr2, data_back) + + def test_structured_count_nonzero(self): + arr = np.array([0, 1]).astype('i4, 2i4')[:1] + count = np.count_nonzero(arr) + assert_equal(count, 0) + + def test_copymodule_preserves_f_contiguity(self): + a = np.empty((2, 2), order='F') + b = copy.copy(a) + c = copy.deepcopy(a) + assert_(b.flags.fortran) + assert_(b.flags.f_contiguous) + assert_(c.flags.fortran) + assert_(c.flags.f_contiguous) + + def test_fortran_order_buffer(self): + import numpy as np + a = np.array([['Hello', 'Foob']], dtype='U5', order='F') + arr = np.ndarray(shape=[1, 2, 5], dtype='U1', buffer=a) + arr2 = np.array([[['H', 'e', 'l', 'l', 'o'], + ['F', 'o', 'o', 'b', '']]]) + assert_array_equal(arr, arr2) + + def test_assign_from_sequence_error(self): + # Ticket #4024. + arr = np.array([1, 2, 3]) + assert_raises(ValueError, arr.__setitem__, slice(None), [9, 9]) + arr.__setitem__(slice(None), [9]) + assert_equal(arr, [9, 9, 9]) + + def test_format_on_flex_array_element(self): + # Ticket #4369. + dt = np.dtype([('date', ' 0: + # unpickling ndarray goes through _frombuffer for protocol 5 + assert b'numpy._core.numeric' in s + else: + assert b'numpy._core.multiarray' in s + + def test_object_casting_errors(self): + # gh-11993 update to ValueError (see gh-16909), since strings can in + # principle be converted to complex, but this string cannot. + arr = np.array(['AAAAA', 18465886.0, 18465886.0], dtype=object) + assert_raises(ValueError, arr.astype, 'c8') + + def test_eff1d_casting(self): + # gh-12711 + x = np.array([1, 2, 4, 7, 0], dtype=np.int16) + res = np.ediff1d(x, to_begin=-99, to_end=np.array([88, 99])) + assert_equal(res, [-99, 1, 2, 3, -7, 88, 99]) + + # The use of safe casting means, that 1<<20 is cast unsafely, an + # error may be better, but currently there is no mechanism for it. + res = np.ediff1d(x, to_begin=(1 << 20), to_end=(1 << 20)) + assert_equal(res, [0, 1, 2, 3, -7, 0]) + + def test_pickle_datetime64_array(self): + # gh-12745 (would fail with pickle5 installed) + d = np.datetime64('2015-07-04 12:59:59.50', 'ns') + arr = np.array([d]) + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + dumped = pickle.dumps(arr, protocol=proto) + assert_equal(pickle.loads(dumped), arr) + + def test_bad_array_interface(self): + class T: + __array_interface__ = {} + + with assert_raises(ValueError): + np.array([T()]) + + def test_2d__array__shape(self): + class T: + def __array__(self, dtype=None, copy=None): + return np.ndarray(shape=(0, 0)) + + # Make sure __array__ is used instead of Sequence methods. + def __iter__(self): + return iter([]) + + def __getitem__(self, idx): + raise AssertionError("__getitem__ was called") + + def __len__(self): + return 0 + + t = T() + # gh-13659, would raise in broadcasting [x=t for x in result] + arr = np.array([t]) + assert arr.shape == (1, 0, 0) + + @pytest.mark.skipif(sys.maxsize < 2 ** 31 + 1, reason='overflows 32-bit python') + def test_to_ctypes(self): + # gh-14214 + arr = np.zeros((2 ** 31 + 1,), 'b') + assert arr.size * arr.itemsize > 2 ** 31 + c_arr = np.ctypeslib.as_ctypes(arr) + assert_equal(c_arr._length_, arr.size) + + def test_complex_conversion_error(self): + # gh-17068 + with pytest.raises(TypeError, match=r"Unable to convert dtype.*"): + complex(np.array("now", np.datetime64)) + + def test__array_interface__descr(self): + # gh-17068 + dt = np.dtype({'names': ['a', 'b'], + 'offsets': [0, 0], + 'formats': [np.int64, np.int64]}) + descr = np.array((1, 1), dtype=dt).__array_interface__['descr'] + assert descr == [('', '|V8')] # instead of [(b'', '|V8')] + + @pytest.mark.skipif(sys.maxsize < 2 ** 31 + 1, reason='overflows 32-bit python') + @requires_memory(free_bytes=9e9) + @pytest.mark.thread_unsafe(reason="crashes with low memory") + def test_dot_big_stride(self): + # gh-17111 + # blas stride = stride//itemsize > int32 max + int32_max = np.iinfo(np.int32).max + n = int32_max + 3 + a = np.empty([n], dtype=np.float32) + b = a[::n - 1] + b[...] = 1 + assert b.strides[0] > int32_max * b.dtype.itemsize + assert np.dot(b, b) == 2.0 + + def test_frompyfunc_name(self): + # name conversion was failing for python 3 strings + # resulting in the default '?' name. Also test utf-8 + # encoding using non-ascii name. + def cassé(x): + return x + + f = np.frompyfunc(cassé, 1, 1) + assert str(f) == "" + + @pytest.mark.parametrize("operation", [ + 'add', 'subtract', 'multiply', 'floor_divide', + 'conjugate', 'fmod', 'square', 'reciprocal', + 'power', 'absolute', 'negative', 'positive', + 'greater', 'greater_equal', 'less', + 'less_equal', 'equal', 'not_equal', 'logical_and', + 'logical_not', 'logical_or', 'bitwise_and', 'bitwise_or', + 'bitwise_xor', 'invert', 'left_shift', 'right_shift', + 'gcd', 'lcm' + ] + ) + @pytest.mark.parametrize("order", [ + ('b->', 'B->'), + ('h->', 'H->'), + ('i->', 'I->'), + ('l->', 'L->'), + ('q->', 'Q->'), + ] + ) + def test_ufunc_order(self, operation, order): + # gh-18075 + # Ensure signed types before unsigned + def get_idx(string, str_lst): + for i, s in enumerate(str_lst): + if string in s: + return i + raise ValueError(f"{string} not in list") + types = getattr(np, operation).types + assert get_idx(order[0], types) < get_idx(order[1], types), ( + f"Unexpected types order of ufunc in {operation}" + f"for {order}. Possible fix: Use signed before unsigned" + "in generate_umath.py") + + def test_nonbool_logical(self): + # gh-22845 + # create two arrays with bit patterns that do not overlap. + # needs to be large enough to test both SIMD and scalar paths + size = 100 + a = np.frombuffer(b'\x01' * size, dtype=np.bool) + b = np.frombuffer(b'\x80' * size, dtype=np.bool) + expected = np.ones(size, dtype=np.bool) + assert_array_equal(np.logical_and(a, b), expected) + + @pytest.mark.skipif(IS_PYPY, reason="PyPy issue 2742") + def test_gh_23737(self): + with pytest.raises(TypeError, match="not an acceptable base type"): + class Y(np.flexible): + pass + + with pytest.raises(TypeError, match="not an acceptable base type"): + class X(np.flexible, np.ma.core.MaskedArray): + pass + + def test_load_ufunc_pickle(self): + # ufuncs are pickled with a semi-private path in + # numpy.core._multiarray_umath and must be loadable without warning + # despite np.core being deprecated. + test_data = b'\x80\x04\x95(\x00\x00\x00\x00\x00\x00\x00\x8c\x1cnumpy.core._multiarray_umath\x94\x8c\x03add\x94\x93\x94.' + result = pickle.loads(test_data, encoding='bytes') + assert result is np.add + + def test__array_namespace__(self): + arr = np.arange(2) + + xp = arr.__array_namespace__() + assert xp is np + xp = arr.__array_namespace__(api_version="2021.12") + assert xp is np + xp = arr.__array_namespace__(api_version="2022.12") + assert xp is np + xp = arr.__array_namespace__(api_version="2023.12") + assert xp is np + xp = arr.__array_namespace__(api_version="2024.12") + assert xp is np + xp = arr.__array_namespace__(api_version=None) + assert xp is np + + with pytest.raises( + ValueError, + match="Version \"2025.12\" of the Array API Standard " + "is not supported." + ): + arr.__array_namespace__(api_version="2025.12") + + with pytest.raises( + ValueError, + match="Only None and strings are allowed as the Array API version" + ): + arr.__array_namespace__(api_version=2024) + + def test_isin_refcnt_bug(self): + # gh-25295 + for _ in range(1000): + np.isclose(np.int64(2), np.int64(2), atol=1e-15, rtol=1e-300) + + def test_replace_regression(self): + # gh-25513 segfault + carr = np.char.chararray((2,), itemsize=25) + test_strings = [b' 4.52173913043478315E+00', + b' 4.95652173913043548E+00'] + carr[:] = test_strings + out = carr.replace(b"E", b"D") + expected = np.char.chararray((2,), itemsize=25) + expected[:] = [s.replace(b"E", b"D") for s in test_strings] + assert_array_equal(out, expected) + + def test_logspace_base_does_not_determine_dtype(self): + # gh-24957 and cupy/cupy/issues/7946 + start = np.array([0, 2], dtype=np.float16) + stop = np.array([2, 0], dtype=np.float16) + out = np.logspace(start, stop, num=5, axis=1, dtype=np.float32) + expected = np.array([[1., 3.1621094, 10., 31.625, 100.], + [100., 31.625, 10., 3.1621094, 1.]], + dtype=np.float32) + assert_almost_equal(out, expected) + # Check test fails if the calculation is done in float64, as happened + # before when a python float base incorrectly influenced the dtype. + out2 = np.logspace(start, stop, num=5, axis=1, dtype=np.float32, + base=np.array([10.0])) + with pytest.raises(AssertionError, match="not almost equal"): + assert_almost_equal(out2, expected) + + def test_vectorize_fixed_width_string(self): + arr = np.array(["SOme wOrd DŽ ß ᾛ ΣΣ ffi⁵Å Ç Ⅰ"]).astype(np.str_) + f = str.casefold + res = np.vectorize(f, otypes=[arr.dtype])(arr) + assert res.dtype == "U30" + + def test_repeated_square_consistency(self): + # gh-26940 + buf = np.array([-5.171866611150749e-07 + 2.5618634555957426e-07j, + 0, 0, 0, 0, 0]) + # Test buffer with regular and reverse strides + for in_vec in [buf[:3], buf[:3][::-1]]: + expected_res = np.square(in_vec) + # Output vector immediately follows input vector + # to reproduce off-by-one in nomemoverlap check. + for res in [buf[3:], buf[3:][::-1]]: + res = buf[3:] + np.square(in_vec, out=res) + assert_equal(res, expected_res) + + def test_sort_unique_crash(self): + # gh-27037 + for _ in range(4): + vals = np.linspace(0, 1, num=128) + data = np.broadcast_to(vals, (128, 128, 128)) + data = data.transpose(0, 2, 1).copy() + np.unique(data) + + def test_sort_overlap(self): + # gh-27273 + size = 100 + inp = np.linspace(0, size, num=size, dtype=np.intc) + out = np.sort(inp) + assert_equal(inp, out) + + def test_searchsorted_structured(self): + # gh-28190 + x = np.array([(0, 1.)], dtype=[('time', ' None: + cls = np.dtype(code).type + value = cls(str_value) + assert not value.is_integer() + + @pytest.mark.parametrize( + "code", np.typecodes["Float"] + np.typecodes["AllInteger"] + ) + def test_true(self, code: str) -> None: + float_array = np.arange(-5, 5).astype(code) + for value in float_array: + assert value.is_integer() + + @pytest.mark.parametrize("code", np.typecodes["Float"]) + def test_false(self, code: str) -> None: + float_array = np.arange(-5, 5).astype(code) + float_array *= 1.1 + for value in float_array: + if value == 0: + continue + assert not value.is_integer() + + +class TestClassGetItem: + @pytest.mark.parametrize("cls", [ + np.number, + np.integer, + np.inexact, + np.unsignedinteger, + np.signedinteger, + np.floating, + ]) + def test_abc(self, cls: type[np.number]) -> None: + alias = cls[Any] + assert isinstance(alias, types.GenericAlias) + assert alias.__origin__ is cls + + def test_abc_complexfloating(self) -> None: + alias = np.complexfloating[Any, Any] + assert isinstance(alias, types.GenericAlias) + assert alias.__origin__ is np.complexfloating + + @pytest.mark.parametrize("arg_len", range(4)) + def test_abc_complexfloating_subscript_tuple(self, arg_len: int) -> None: + arg_tup = (Any,) * arg_len + if arg_len in (1, 2): + assert np.complexfloating[arg_tup] + else: + match = f"Too {'few' if arg_len == 0 else 'many'} arguments" + with pytest.raises(TypeError, match=match): + np.complexfloating[arg_tup] + + @pytest.mark.parametrize("cls", [np.generic, np.flexible, np.character]) + def test_abc_non_numeric(self, cls: type[np.generic]) -> None: + with pytest.raises(TypeError): + cls[Any] + + @pytest.mark.parametrize("code", np.typecodes["All"]) + def test_concrete(self, code: str) -> None: + cls = np.dtype(code).type + if cls in {np.bool, np.datetime64}: + # these are intentionally subscriptable + assert cls[Any] + else: + with pytest.raises(TypeError): + cls[Any] + + @pytest.mark.parametrize("arg_len", range(4)) + def test_subscript_tuple(self, arg_len: int) -> None: + arg_tup = (Any,) * arg_len + if arg_len == 1: + assert np.number[arg_tup] + else: + with pytest.raises(TypeError): + np.number[arg_tup] + + def test_subscript_scalar(self) -> None: + assert np.number[Any] + + @pytest.mark.parametrize("subscript", [Literal[True], Literal[False]]) + def test_subscript_bool(self, subscript: Literal[True, False]) -> None: + assert isinstance(np.bool[subscript], types.GenericAlias) + + +class TestBitCount: + # derived in part from the cpython test "test_bit_count" + + @pytest.mark.parametrize("itype", sctypes['int'] + sctypes['uint']) + def test_small(self, itype): + for a in range(max(np.iinfo(itype).min, 0), 128): + msg = f"Smoke test for {itype}({a}).bit_count()" + assert itype(a).bit_count() == a.bit_count(), msg + + def test_bit_count(self): + for exp in [10, 17, 63]: + a = 2**exp + assert np.uint64(a).bit_count() == 1 + assert np.uint64(a - 1).bit_count() == exp + assert np.uint64(a ^ 63).bit_count() == 7 + assert np.uint64((a - 1) ^ 510).bit_count() == exp - 8 + + +class TestDevice: + """ + Test scalar.device attribute and scalar.to_device() method. + """ + scalars = [np.bool(True), np.int64(1), np.uint64(1), np.float64(1.0), + np.complex128(1 + 1j)] + + @pytest.mark.parametrize("scalar", scalars) + def test_device(self, scalar): + assert scalar.device == "cpu" + + @pytest.mark.parametrize("scalar", scalars) + def test_to_device(self, scalar): + assert scalar.to_device("cpu") is scalar + + @pytest.mark.parametrize("scalar", scalars) + def test___array_namespace__(self, scalar): + assert scalar.__array_namespace__() is np + + +@pytest.mark.parametrize("scalar", [np.bool(True), np.int8(1), np.float64(1)]) +def test_array_wrap(scalar): + # Test scalars array wrap as long as it exists. NumPy itself should + # probably not use it, so it may not be necessary to keep it around. + + arr0d = np.array(3, dtype=np.int8) + # Third argument not passed, None, or True "decays" to scalar. + # (I don't think NumPy would pass `None`, but it seems clear to support) + assert type(scalar.__array_wrap__(arr0d)) is np.int8 + assert type(scalar.__array_wrap__(arr0d, None, None)) is np.int8 + assert type(scalar.__array_wrap__(arr0d, None, True)) is np.int8 + + # Otherwise, result should be the input + assert scalar.__array_wrap__(arr0d, None, False) is arr0d + + # An old bug. A non 0-d array cannot be converted to scalar: + arr1d = np.array([3], dtype=np.int8) + assert scalar.__array_wrap__(arr1d) is arr1d + assert scalar.__array_wrap__(arr1d, None, True) is arr1d + + +@pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") +@pytest.mark.skipif(IS_PYPY, reason="PyPy does not modify tp_doc") +class TestSignature: + # test that scalar types have a valid __text_signature__ or __signature__ set + @pytest.mark.parametrize( + "sctype", + [ + *sctypes["int"], + *sctypes["uint"], + *sctypes["float"], + *sctypes["complex"], + *sctypes["others"], + np.datetime64, + np.timedelta64, + ], + ) + def test_constructor_signatures(self, sctype: type[np.generic]): + try: + sig = inspect.signature(sctype) + except ValueError: + pytest.fail(f"missing signature: {sctype}") + + assert sig.parameters + + @pytest.mark.parametrize( + "sctype", + [np.integer, *sctypes["int"], *sctypes["uint"], *sctypes["float"]], + ) + def test_method_signatures_is_integer(self, sctype: type[np.integer | np.floating]): + try: + sig = inspect.signature(sctype.is_integer) + except ValueError: + pytest.fail(f"missing signature: {sctype.__name__}.is_integer") + + assert len(sig.parameters) == 1 + assert sig.parameters["self"].kind == inspect.Parameter.POSITIONAL_ONLY + + @pytest.mark.parametrize("sctype", sctypes["float"]) + def test_method_signatures_as_integer_ratio(self, sctype: type[np.floating]): + try: + sig = inspect.signature(sctype.as_integer_ratio) + except ValueError: + pytest.fail(f"missing signature: {sctype.__name__}.as_integer_ratio") + + assert len(sig.parameters) == 1 + assert sig.parameters["self"].kind == inspect.Parameter.POSITIONAL_ONLY + + @pytest.mark.parametrize( + "method_name", + [ + "__array_namespace__", "__copy__", "__deepcopy__", "all", "any", "argmax", + "argmin", "argsort", "astype", "byteswap", "choose", "clip", "compress", + "conj", "conjugate", "copy", "cumprod", "cumsum", "diagonal", "dump", + "dumps", "fill", "flatten", "getfield", "item", "max", "mean", "min", + "nonzero", "prod", "put", "ravel", "repeat", "reshape", "resize", "round", + "searchsorted", "setfield", "setflags", "sort", "squeeze", "std", "sum", + "swapaxes", "take", "to_device", "tobytes", "tofile", "tolist", "trace", + "transpose", "var", "view", + ], + ) + def test_array_scalar_method_signatures(self, method_name: str): + # methods shared by np.generic and np.ndarray should have the same signature + fn_generic = getattr(np.generic, method_name) + sig_generic = inspect.signature(fn_generic) + assert "self" in sig_generic.parameters + assert sig_generic.parameters["self"].kind is inspect.Parameter.POSITIONAL_ONLY + + fn_ndarray = getattr(np.ndarray, method_name) + sig_ndarray = inspect.signature(fn_ndarray) + assert sig_generic == sig_ndarray diff --git a/local_packages/numpy/_core/tests/test_scalarbuffer.py b/local_packages/numpy/_core/tests/test_scalarbuffer.py new file mode 100644 index 0000000..4d2744b --- /dev/null +++ b/local_packages/numpy/_core/tests/test_scalarbuffer.py @@ -0,0 +1,153 @@ +""" +Test scalar buffer interface adheres to PEP 3118 +""" +import pytest + +import numpy as np +from numpy._core._multiarray_tests import get_buffer_info +from numpy._core._rational_tests import rational +from numpy.testing import assert_, assert_equal, assert_raises + +# PEP3118 format strings for native (standard alignment and byteorder) types +scalars_and_codes = [ + (np.bool, '?'), + (np.byte, 'b'), + (np.short, 'h'), + (np.intc, 'i'), + (np.long, 'l'), + (np.longlong, 'q'), + (np.ubyte, 'B'), + (np.ushort, 'H'), + (np.uintc, 'I'), + (np.ulong, 'L'), + (np.ulonglong, 'Q'), + (np.half, 'e'), + (np.single, 'f'), + (np.double, 'd'), + (np.longdouble, 'g'), + (np.csingle, 'Zf'), + (np.cdouble, 'Zd'), + (np.clongdouble, 'Zg'), +] +scalars_only, codes_only = zip(*scalars_and_codes) + + +class TestScalarPEP3118: + + @pytest.mark.parametrize('scalar', scalars_only, ids=codes_only) + def test_scalar_match_array(self, scalar): + x = scalar() + a = np.array([], dtype=np.dtype(scalar)) + mv_x = memoryview(x) + mv_a = memoryview(a) + assert_equal(mv_x.format, mv_a.format) + + @pytest.mark.parametrize('scalar', scalars_only, ids=codes_only) + def test_scalar_dim(self, scalar): + x = scalar() + mv_x = memoryview(x) + assert_equal(mv_x.itemsize, np.dtype(scalar).itemsize) + assert_equal(mv_x.ndim, 0) + assert_equal(mv_x.shape, ()) + assert_equal(mv_x.strides, ()) + assert_equal(mv_x.suboffsets, ()) + + @pytest.mark.parametrize('scalar, code', scalars_and_codes, ids=codes_only) + def test_scalar_code_and_properties(self, scalar, code): + x = scalar() + expected = {'strides': (), 'itemsize': x.dtype.itemsize, 'ndim': 0, + 'shape': (), 'format': code, 'readonly': True} + + mv_x = memoryview(x) + assert self._as_dict(mv_x) == expected + + @pytest.mark.parametrize('scalar', scalars_only, ids=codes_only) + def test_scalar_buffers_readonly(self, scalar): + x = scalar() + with pytest.raises(BufferError, match="scalar buffer is readonly"): + get_buffer_info(x, ["WRITABLE"]) + + def test_void_scalar_structured_data(self): + dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) + x = np.array(('ndarray_scalar', (1.2, 3.0)), dtype=dt)[()] + assert_(isinstance(x, np.void)) + mv_x = memoryview(x) + expected_size = 16 * np.dtype((np.str_, 1)).itemsize + expected_size += 2 * np.dtype(np.float64).itemsize + assert_equal(mv_x.itemsize, expected_size) + assert_equal(mv_x.ndim, 0) + assert_equal(mv_x.shape, ()) + assert_equal(mv_x.strides, ()) + assert_equal(mv_x.suboffsets, ()) + + # check scalar format string against ndarray format string + a = np.array([('Sarah', (8.0, 7.0)), ('John', (6.0, 7.0))], dtype=dt) + assert_(isinstance(a, np.ndarray)) + mv_a = memoryview(a) + assert_equal(mv_x.itemsize, mv_a.itemsize) + assert_equal(mv_x.format, mv_a.format) + + # Check that we do not allow writeable buffer export (technically + # we could allow it sometimes here...) + with pytest.raises(BufferError, match="scalar buffer is readonly"): + get_buffer_info(x, ["WRITABLE"]) + + def _as_dict(self, m): + return {'strides': m.strides, 'shape': m.shape, 'itemsize': m.itemsize, + 'ndim': m.ndim, 'format': m.format, 'readonly': m.readonly} + + def test_datetime_memoryview(self): + # gh-11656 + # Values verified with v1.13.3, shape is not () as in test_scalar_dim + + dt1 = np.datetime64('2016-01-01') + dt2 = np.datetime64('2017-01-01') + expected = {'strides': (1,), 'itemsize': 1, 'ndim': 1, 'shape': (8,), + 'format': 'B', 'readonly': True} + v = memoryview(dt1) + assert self._as_dict(v) == expected + + v = memoryview(dt2 - dt1) + assert self._as_dict(v) == expected + + dt = np.dtype([('a', 'uint16'), ('b', 'M8[s]')]) + a = np.empty(1, dt) + # Fails to create a PEP 3118 valid buffer + assert_raises((ValueError, BufferError), memoryview, a[0]) + + # Check that we do not allow writeable buffer export + with pytest.raises(BufferError, match="scalar buffer is readonly"): + get_buffer_info(dt1, ["WRITABLE"]) + + @pytest.mark.parametrize('s', [ + pytest.param("\x32\x32", id="ascii"), + pytest.param("\uFE0F\uFE0F", id="basic multilingual"), + pytest.param("\U0001f4bb\U0001f4bb", id="non-BMP"), + ]) + def test_str_ucs4(self, s): + s = np.str_(s) # only our subclass implements the buffer protocol + + # all the same, characters always encode as ucs4 + expected = {'strides': (), 'itemsize': 8, 'ndim': 0, 'shape': (), + 'format': '2w', 'readonly': True} + + v = memoryview(s) + assert self._as_dict(v) == expected + + # integers of the paltform-appropriate endianness + code_points = np.frombuffer(v, dtype='i4') + + assert_equal(code_points, [ord(c) for c in s]) + + # Check that we do not allow writeable buffer export + with pytest.raises(BufferError, match="scalar buffer is readonly"): + get_buffer_info(s, ["WRITABLE"]) + + def test_user_scalar_fails_buffer(self): + r = rational(1) + with assert_raises(TypeError): + memoryview(r) + + # Check that we do not allow writeable buffer export + with pytest.raises(BufferError, match="scalar buffer is readonly"): + get_buffer_info(r, ["WRITABLE"]) diff --git a/local_packages/numpy/_core/tests/test_scalarinherit.py b/local_packages/numpy/_core/tests/test_scalarinherit.py new file mode 100644 index 0000000..746a157 --- /dev/null +++ b/local_packages/numpy/_core/tests/test_scalarinherit.py @@ -0,0 +1,105 @@ +""" Test printing of scalar types. + +""" +import pytest + +import numpy as np +from numpy.testing import assert_, assert_raises + + +class A: + pass +class B(A, np.float64): + pass + +class C(B): + pass +class D(C, B): + pass + +class B0(np.float64, A): + pass +class C0(B0): + pass + +class HasNew: + def __new__(cls, *args, **kwargs): + return cls, args, kwargs + +class B1(np.float64, HasNew): + pass + + +class TestInherit: + def test_init(self): + x = B(1.0) + assert_(str(x) == '1.0') + y = C(2.0) + assert_(str(y) == '2.0') + z = D(3.0) + assert_(str(z) == '3.0') + + def test_init2(self): + x = B0(1.0) + assert_(str(x) == '1.0') + y = C0(2.0) + assert_(str(y) == '2.0') + + def test_gh_15395(self): + # HasNew is the second base, so `np.float64` should have priority + x = B1(1.0) + assert_(str(x) == '1.0') + + # previously caused RecursionError!? + with pytest.raises(TypeError): + B1(1.0, 2.0) + + def test_int_repr(self): + # Test that integer repr works correctly for subclasses (gh-27106) + class my_int16(np.int16): + pass + + s = repr(my_int16(3)) + assert s == "my_int16(3)" + +class TestCharacter: + def test_char_radd(self): + # GH issue 9620, reached gentype_add and raise TypeError + np_s = np.bytes_('abc') + np_u = np.str_('abc') + s = b'def' + u = 'def' + assert_(np_s.__radd__(np_s) is NotImplemented) + assert_(np_s.__radd__(np_u) is NotImplemented) + assert_(np_s.__radd__(s) is NotImplemented) + assert_(np_s.__radd__(u) is NotImplemented) + assert_(np_u.__radd__(np_s) is NotImplemented) + assert_(np_u.__radd__(np_u) is NotImplemented) + assert_(np_u.__radd__(s) is NotImplemented) + assert_(np_u.__radd__(u) is NotImplemented) + assert_(s + np_s == b'defabc') + assert_(u + np_u == 'defabc') + + class MyStr(str, np.generic): + # would segfault + pass + + with assert_raises(TypeError): + # Previously worked, but gave completely wrong result + ret = s + MyStr('abc') + + class MyBytes(bytes, np.generic): + # would segfault + pass + + ret = s + MyBytes(b'abc') + assert type(ret) is type(s) + assert ret == b"defabc" + + def test_char_repeat(self): + np_s = np.bytes_('abc') + np_u = np.str_('abc') + res_s = b'abc' * 5 + res_u = 'abc' * 5 + assert_(np_s * 5 == res_s) + assert_(np_u * 5 == res_u) diff --git a/local_packages/numpy/_core/tests/test_scalarmath.py b/local_packages/numpy/_core/tests/test_scalarmath.py new file mode 100644 index 0000000..bfbc9a5 --- /dev/null +++ b/local_packages/numpy/_core/tests/test_scalarmath.py @@ -0,0 +1,1168 @@ +import contextlib +import itertools +import operator +import platform +import sys +import warnings + +import pytest +from hypothesis import given, settings +from hypothesis.extra import numpy as hynp +from hypothesis.strategies import sampled_from + +import numpy as np +from numpy._core._rational_tests import rational +from numpy._utils import _pep440 +from numpy.exceptions import ComplexWarning +from numpy.testing import ( + IS_PYPY, + _gen_alignment_data, + assert_, + assert_almost_equal, + assert_array_equal, + assert_equal, + assert_raises, + check_support_sve, +) + +types = [np.bool, np.byte, np.ubyte, np.short, np.ushort, np.intc, np.uintc, + np.int_, np.uint, np.longlong, np.ulonglong, + np.single, np.double, np.longdouble, np.csingle, + np.cdouble, np.clongdouble] + +floating_types = np.floating.__subclasses__() +complex_floating_types = np.complexfloating.__subclasses__() + +objecty_things = [object(), None, np.array(None, dtype=object)] + +binary_operators_for_scalars = [ + operator.lt, operator.le, operator.eq, operator.ne, operator.ge, + operator.gt, operator.add, operator.floordiv, operator.mod, + operator.mul, operator.pow, operator.sub, operator.truediv +] +binary_operators_for_scalar_ints = binary_operators_for_scalars + [ + operator.xor, operator.or_, operator.and_ +] + + +# This compares scalarmath against ufuncs. + +class TestTypes: + def test_types(self): + for atype in types: + a = atype(1) + assert_(a == 1, f"error with {atype!r}: got {a!r}") + + def test_type_add(self): + # list of types + for k, atype in enumerate(types): + a_scalar = atype(3) + a_array = np.array([3], dtype=atype) + for l, btype in enumerate(types): + b_scalar = btype(1) + b_array = np.array([1], dtype=btype) + c_scalar = a_scalar + b_scalar + c_array = a_array + b_array + # It was comparing the type numbers, but the new ufunc + # function-finding mechanism finds the lowest function + # to which both inputs can be cast - which produces 'l' + # when you do 'q' + 'b'. The old function finding mechanism + # skipped ahead based on the first argument, but that + # does not produce properly symmetric results... + assert_equal(c_scalar.dtype, c_array.dtype, + "error with types (%d/'%c' + %d/'%c')" % + (k, np.dtype(atype).char, l, np.dtype(btype).char)) + + def test_type_create(self): + for atype in types: + a = np.array([1, 2, 3], atype) + b = atype([1, 2, 3]) + assert_equal(a, b) + + def test_leak(self): + # test leak of scalar objects + # a leak would show up in valgrind as still-reachable of ~2.6MB + for i in range(200000): + np.add(1, 1) + + +def check_ufunc_scalar_equivalence(op, arr1, arr2): + scalar1 = arr1[()] + scalar2 = arr2[()] + assert isinstance(scalar1, np.generic) + assert isinstance(scalar2, np.generic) + + if arr1.dtype.kind == "c" or arr2.dtype.kind == "c": + comp_ops = {operator.ge, operator.gt, operator.le, operator.lt} + if op in comp_ops and (np.isnan(scalar1) or np.isnan(scalar2)): + pytest.xfail("complex comp ufuncs use sort-order, scalars do not.") + if op == operator.pow and arr2.item() in [-1, 0, 0.5, 1, 2]: + # array**scalar special case can have different result dtype + # (Other powers may have issues also, but are not hit here.) + # TODO: It would be nice to resolve this issue. + pytest.skip("array**2 can have incorrect/weird result dtype") + + # ignore fpe's since they may just mismatch for integers anyway. + with warnings.catch_warnings(), np.errstate(all="ignore"): + # Comparisons DeprecationWarnings replacing errors (2022-03): + warnings.simplefilter("error", DeprecationWarning) + try: + res = op(arr1, arr2) + except Exception as e: + with pytest.raises(type(e)): + op(scalar1, scalar2) + else: + scalar_res = op(scalar1, scalar2) + assert_array_equal(scalar_res, res, strict=True) + + +@pytest.mark.slow +@settings(max_examples=10000, deadline=2000) +@given(sampled_from(binary_operators_for_scalars), + hynp.arrays(dtype=hynp.scalar_dtypes(), shape=()), + hynp.arrays(dtype=hynp.scalar_dtypes(), shape=())) +def test_array_scalar_ufunc_equivalence(op, arr1, arr2): + """ + This is a thorough test attempting to cover important promotion paths + and ensuring that arrays and scalars stay as aligned as possible. + However, if it creates troubles, it should maybe just be removed. + """ + check_ufunc_scalar_equivalence(op, arr1, arr2) + + +@pytest.mark.slow +@given(sampled_from(binary_operators_for_scalars), + hynp.scalar_dtypes(), hynp.scalar_dtypes()) +def test_array_scalar_ufunc_dtypes(op, dt1, dt2): + # Same as above, but don't worry about sampling weird values so that we + # do not have to sample as much + arr1 = np.array(2, dtype=dt1) + arr2 = np.array(3, dtype=dt2) # some power do weird things. + + check_ufunc_scalar_equivalence(op, arr1, arr2) + + +@pytest.mark.parametrize("fscalar", [np.float16, np.float32]) +def test_int_float_promotion_truediv(fscalar): + # Promotion for mixed int and float32/float16 must not go to float64 + i = np.int8(1) + f = fscalar(1) + expected = np.result_type(i, f) + assert (i / f).dtype == expected + assert (f / i).dtype == expected + # But normal int / int true division goes to float64: + assert (i / i).dtype == np.dtype("float64") + # For int16, result has to be ast least float32 (takes ufunc path): + assert (np.int16(1) / f).dtype == np.dtype("float32") + + +class TestBaseMath: + @pytest.mark.xfail(check_support_sve(), reason="gh-22982") + def test_blocked(self): + # test alignments offsets for simd instructions + # alignments for vz + 2 * (vs - 1) + 1 + for dt, sz in [(np.float32, 11), (np.float64, 7), (np.int32, 11)]: + for out, inp1, inp2, msg in _gen_alignment_data(dtype=dt, + type='binary', + max_size=sz): + exp1 = np.ones_like(inp1) + inp1[...] = np.ones_like(inp1) + inp2[...] = np.zeros_like(inp2) + assert_almost_equal(np.add(inp1, inp2), exp1, err_msg=msg) + assert_almost_equal(np.add(inp1, 2), exp1 + 2, err_msg=msg) + assert_almost_equal(np.add(1, inp2), exp1, err_msg=msg) + + np.add(inp1, inp2, out=out) + assert_almost_equal(out, exp1, err_msg=msg) + + inp2[...] += np.arange(inp2.size, dtype=dt) + 1 + assert_almost_equal(np.square(inp2), + np.multiply(inp2, inp2), err_msg=msg) + # skip true divide for ints + if dt != np.int32: + assert_almost_equal(np.reciprocal(inp2), + np.divide(1, inp2), err_msg=msg) + + inp1[...] = np.ones_like(inp1) + np.add(inp1, 2, out=out) + assert_almost_equal(out, exp1 + 2, err_msg=msg) + inp2[...] = np.ones_like(inp2) + np.add(2, inp2, out=out) + assert_almost_equal(out, exp1 + 2, err_msg=msg) + + def test_lower_align(self): + # check data that is not aligned to element size + # i.e doubles are aligned to 4 bytes on i386 + d = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64) + o = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64) + assert_almost_equal(d + d, d * 2) + np.add(d, d, out=o) + np.add(np.ones_like(d), d, out=o) + np.add(d, np.ones_like(d), out=o) + np.add(np.ones_like(d), d) + np.add(d, np.ones_like(d)) + + +class TestPower: + def test_small_types(self): + for t in [np.int8, np.int16, np.float16]: + a = t(3) + b = a ** 4 + assert_(b == 81, f"error with {t!r}: got {b!r}") + + def test_large_types(self): + for t in [np.int32, np.int64, np.float32, np.float64, np.longdouble]: + a = t(51) + b = a ** 4 + msg = f"error with {t!r}: got {b!r}" + if np.issubdtype(t, np.integer): + assert_(b == 6765201, msg) + else: + assert_almost_equal(b, 6765201, err_msg=msg) + + def test_integers_to_negative_integer_power(self): + # Note that the combination of uint64 with a signed integer + # has common type np.float64. The other combinations should all + # raise a ValueError for integer ** negative integer. + exp = [np.array(-1, dt)[()] for dt in 'bhilq'] + + # 1 ** -1 possible special case + base = [np.array(1, dt)[()] for dt in 'bhilqBHILQ'] + for i1, i2 in itertools.product(base, exp): + if i1.dtype != np.uint64: + assert_raises(ValueError, operator.pow, i1, i2) + else: + res = operator.pow(i1, i2) + assert_(res.dtype.type is np.float64) + assert_almost_equal(res, 1.) + + # -1 ** -1 possible special case + base = [np.array(-1, dt)[()] for dt in 'bhilq'] + for i1, i2 in itertools.product(base, exp): + if i1.dtype != np.uint64: + assert_raises(ValueError, operator.pow, i1, i2) + else: + res = operator.pow(i1, i2) + assert_(res.dtype.type is np.float64) + assert_almost_equal(res, -1.) + + # 2 ** -1 perhaps generic + base = [np.array(2, dt)[()] for dt in 'bhilqBHILQ'] + for i1, i2 in itertools.product(base, exp): + if i1.dtype != np.uint64: + assert_raises(ValueError, operator.pow, i1, i2) + else: + res = operator.pow(i1, i2) + assert_(res.dtype.type is np.float64) + assert_almost_equal(res, .5) + + def test_mixed_types(self): + typelist = [np.int8, np.int16, np.float16, + np.float32, np.float64, np.int8, + np.int16, np.int32, np.int64] + for t1 in typelist: + for t2 in typelist: + a = t1(3) + b = t2(2) + result = a**b + msg = f"error with {t1!r} and {t2!r}:got {result!r}, expected {9!r}" + if np.issubdtype(np.dtype(result), np.integer): + assert_(result == 9, msg) + else: + assert_almost_equal(result, 9, err_msg=msg) + + def test_modular_power(self): + # modular power is not implemented, so ensure it errors + a = 5 + b = 4 + c = 10 + expected = pow(a, b, c) # noqa: F841 + for t in (np.int32, np.float32, np.complex64): + # note that 3-operand power only dispatches on the first argument + assert_raises(TypeError, operator.pow, t(a), b, c) + assert_raises(TypeError, operator.pow, np.array(t(a)), b, c) + + +def floordiv_and_mod(x, y): + return (x // y, x % y) + + +def _signs(dt): + if dt in np.typecodes['UnsignedInteger']: + return (+1,) + else: + return (+1, -1) + + +class TestModulus: + + def test_modulus_basic(self): + dt = np.typecodes['AllInteger'] + np.typecodes['Float'] + for op in [floordiv_and_mod, divmod]: + for dt1, dt2 in itertools.product(dt, dt): + for sg1, sg2 in itertools.product(_signs(dt1), _signs(dt2)): + fmt = 'op: %s, dt1: %s, dt2: %s, sg1: %s, sg2: %s' + msg = fmt % (op.__name__, dt1, dt2, sg1, sg2) + a = np.array(sg1 * 71, dtype=dt1)[()] + b = np.array(sg2 * 19, dtype=dt2)[()] + div, rem = op(a, b) + assert_equal(div * b + rem, a, err_msg=msg) + if sg2 == -1: + assert_(b < rem <= 0, msg) + else: + assert_(b > rem >= 0, msg) + + def test_float_modulus_exact(self): + # test that float results are exact for small integers. This also + # holds for the same integers scaled by powers of two. + nlst = list(range(-127, 0)) + plst = list(range(1, 128)) + dividend = nlst + [0] + plst + divisor = nlst + plst + arg = list(itertools.product(dividend, divisor)) + tgt = [divmod(*t) for t in arg] + + a, b = np.array(arg, dtype=int).T + # convert exact integer results from Python to float so that + # signed zero can be used, it is checked. + tgtdiv, tgtrem = np.array(tgt, dtype=float).T + tgtdiv = np.where((tgtdiv == 0.0) & ((b < 0) ^ (a < 0)), -0.0, tgtdiv) + tgtrem = np.where((tgtrem == 0.0) & (b < 0), -0.0, tgtrem) + + for op in [floordiv_and_mod, divmod]: + for dt in np.typecodes['Float']: + msg = f'op: {op.__name__}, dtype: {dt}' + fa = a.astype(dt) + fb = b.astype(dt) + # use list comprehension so a_ and b_ are scalars + div, rem = zip(*[op(a_, b_) for a_, b_ in zip(fa, fb)]) + assert_equal(div, tgtdiv, err_msg=msg) + assert_equal(rem, tgtrem, err_msg=msg) + + def test_float_modulus_roundoff(self): + # gh-6127 + dt = np.typecodes['Float'] + for op in [floordiv_and_mod, divmod]: + for dt1, dt2 in itertools.product(dt, dt): + for sg1, sg2 in itertools.product((+1, -1), (+1, -1)): + fmt = 'op: %s, dt1: %s, dt2: %s, sg1: %s, sg2: %s' + msg = fmt % (op.__name__, dt1, dt2, sg1, sg2) + a = np.array(sg1 * 78 * 6e-8, dtype=dt1)[()] + b = np.array(sg2 * 6e-8, dtype=dt2)[()] + div, rem = op(a, b) + # Equal assertion should hold when fmod is used + assert_equal(div * b + rem, a, err_msg=msg) + if sg2 == -1: + assert_(b < rem <= 0, msg) + else: + assert_(b > rem >= 0, msg) + + def test_float_modulus_corner_cases(self): + # Check remainder magnitude. + for dt in np.typecodes['Float']: + b = np.array(1.0, dtype=dt) + a = np.nextafter(np.array(0.0, dtype=dt), -b) + rem = operator.mod(a, b) + assert_(rem <= b, f'dt: {dt}') + rem = operator.mod(-a, -b) + assert_(rem >= -b, f'dt: {dt}') + + # Check nans, inf + with warnings.catch_warnings(), np.errstate(all='ignore'): + for dt in np.typecodes['Float']: + fone = np.array(1.0, dtype=dt) + fzer = np.array(0.0, dtype=dt) + finf = np.array(np.inf, dtype=dt) + fnan = np.array(np.nan, dtype=dt) + rem = operator.mod(fone, fzer) + assert_(np.isnan(rem), f'dt: {dt}') + # MSVC 2008 returns NaN here, so disable the check. + #rem = operator.mod(fone, finf) + #assert_(rem == fone, 'dt: %s' % dt) + rem = operator.mod(fone, fnan) + assert_(np.isnan(rem), f'dt: {dt}') + rem = operator.mod(finf, fone) + assert_(np.isnan(rem), f'dt: {dt}') + for op in [floordiv_and_mod, divmod]: + div, mod = op(fone, fzer) + assert_(np.isinf(div)) and assert_(np.isnan(mod)) + + def test_inplace_floordiv_handling(self): + # issue gh-12927 + # this only applies to in-place floordiv //=, because the output type + # promotes to float which does not fit + a = np.array([1, 2], np.int64) + b = np.array([1, 2], np.uint64) + with pytest.raises(TypeError, + match=r"Cannot cast ufunc 'floor_divide' output from"): + a //= b + +class TestComparison: + def test_comparision_different_types(self): + x = np.array(1) + y = np.array('s') + eq = x == y + neq = x != y + assert eq is np.bool_(False) + assert neq is np.bool_(True) + + +class TestComplexDivision: + def test_zero_division(self): + with np.errstate(all="ignore"): + for t in [np.complex64, np.complex128]: + a = t(0.0) + b = t(1.0) + assert_(np.isinf(b / a)) + b = t(complex(np.inf, np.inf)) + assert_(np.isinf(b / a)) + b = t(complex(np.inf, np.nan)) + assert_(np.isinf(b / a)) + b = t(complex(np.nan, np.inf)) + assert_(np.isinf(b / a)) + b = t(complex(np.nan, np.nan)) + assert_(np.isnan(b / a)) + b = t(0.) + assert_(np.isnan(b / a)) + + def test_signed_zeros(self): + with np.errstate(all="ignore"): + for t in [np.complex64, np.complex128]: + # tupled (numerator, denominator, expected) + # for testing as expected == numerator/denominator + data = ( + (( 0.0, -1.0), ( 0.0, 1.0), (-1.0, -0.0)), + (( 0.0, -1.0), ( 0.0, -1.0), ( 1.0, -0.0)), + (( 0.0, -1.0), (-0.0, -1.0), ( 1.0, 0.0)), + (( 0.0, -1.0), (-0.0, 1.0), (-1.0, 0.0)), + (( 0.0, 1.0), ( 0.0, -1.0), (-1.0, 0.0)), + (( 0.0, -1.0), ( 0.0, -1.0), ( 1.0, -0.0)), + ((-0.0, -1.0), ( 0.0, -1.0), ( 1.0, -0.0)), + ((-0.0, 1.0), ( 0.0, -1.0), (-1.0, -0.0)) + ) + for cases in data: + n = cases[0] + d = cases[1] + ex = cases[2] + result = t(complex(n[0], n[1])) / t(complex(d[0], d[1])) + # check real and imag parts separately to avoid comparison + # in array context, which does not account for signed zeros + assert_equal(result.real, ex[0]) + assert_equal(result.imag, ex[1]) + + def test_branches(self): + with np.errstate(all="ignore"): + for t in [np.complex64, np.complex128]: + # tupled (numerator, denominator, expected) + # for testing as expected == numerator/denominator + data = [] + + # trigger branch: real(fabs(denom)) > imag(fabs(denom)) + # followed by else condition as neither are == 0 + data.append((( 2.0, 1.0), ( 2.0, 1.0), (1.0, 0.0))) + + # trigger branch: real(fabs(denom)) > imag(fabs(denom)) + # followed by if condition as both are == 0 + # is performed in test_zero_division(), so this is skipped + + # trigger else if branch: real(fabs(denom)) < imag(fabs(denom)) + data.append(((1.0, 2.0), (1.0, 2.0), (1.0, 0.0))) + + for cases in data: + n = cases[0] + d = cases[1] + ex = cases[2] + result = t(complex(n[0], n[1])) / t(complex(d[0], d[1])) + # check real and imag parts separately to avoid comparison + # in array context, which does not account for signed zeros + assert_equal(result.real, ex[0]) + assert_equal(result.imag, ex[1]) + + +class TestConversion: + def test_int_from_long(self): + l = [1e6, 1e12, 1e18, -1e6, -1e12, -1e18] + li = [10**6, 10**12, 10**18, -10**6, -10**12, -10**18] + for T in [None, np.float64, np.int64]: + a = np.array(l, dtype=T) + assert_equal([int(_m) for _m in a], li) + + a = np.array(l[:3], dtype=np.uint64) + assert_equal([int(_m) for _m in a], li[:3]) + + def test_iinfo_long_values(self): + for code in 'bBhH': + with pytest.raises(OverflowError): + np.array(np.iinfo(code).max + 1, dtype=code) + + for code in np.typecodes['AllInteger']: + res = np.array(np.iinfo(code).max, dtype=code) + tgt = np.iinfo(code).max + assert_(res == tgt) + + for code in np.typecodes['AllInteger']: + res = np.dtype(code).type(np.iinfo(code).max) + tgt = np.iinfo(code).max + assert_(res == tgt) + + def test_int_raise_behaviour(self): + def overflow_error_func(dtype): + dtype(np.iinfo(dtype).max + 1) + + for code in [np.int_, np.uint, np.longlong, np.ulonglong]: + assert_raises(OverflowError, overflow_error_func, code) + + def test_int_from_infinite_longdouble(self): + # gh-627 + x = np.longdouble(np.inf) + assert_raises(OverflowError, int, x) + with pytest.warns(ComplexWarning): + x = np.clongdouble(np.inf) + assert_raises(OverflowError, int, x) + + @pytest.mark.skipif(not IS_PYPY, reason="Test is PyPy only (gh-9972)") + def test_int_from_infinite_longdouble___int__(self): + x = np.longdouble(np.inf) + assert_raises(OverflowError, x.__int__) + with pytest.warns(ComplexWarning): + x = np.clongdouble(np.inf) + assert_raises(OverflowError, x.__int__) + + @pytest.mark.skipif(np.finfo(np.double) == np.finfo(np.longdouble), + reason="long double is same as double") + @pytest.mark.skipif(platform.machine().startswith("ppc"), + reason="IBM double double") + def test_int_from_huge_longdouble(self): + # Produce a longdouble that would overflow a double, + # use exponent that avoids bug in Darwin pow function. + exp = np.finfo(np.double).maxexp - 1 + huge_ld = 2 * 1234 * np.longdouble(2) ** exp + huge_i = 2 * 1234 * 2 ** exp + assert_(huge_ld != np.inf) + assert_equal(int(huge_ld), huge_i) + + def test_int_from_longdouble(self): + x = np.longdouble(1.5) + assert_equal(int(x), 1) + x = np.longdouble(-10.5) + assert_equal(int(x), -10) + + def test_numpy_scalar_relational_operators(self): + # All integer + for dt1 in np.typecodes['AllInteger']: + assert_(1 > np.array(0, dtype=dt1)[()], f"type {dt1} failed") + assert_(not 1 < np.array(0, dtype=dt1)[()], f"type {dt1} failed") + + for dt2 in np.typecodes['AllInteger']: + assert_(np.array(1, dtype=dt1)[()] > np.array(0, dtype=dt2)[()], + f"type {dt1} and {dt2} failed") + assert_(not np.array(1, dtype=dt1)[()] < np.array(0, dtype=dt2)[()], + f"type {dt1} and {dt2} failed") + + # Unsigned integers + for dt1 in 'BHILQP': + assert_(-1 < np.array(1, dtype=dt1)[()], f"type {dt1} failed") + assert_(not -1 > np.array(1, dtype=dt1)[()], f"type {dt1} failed") + assert_(-1 != np.array(1, dtype=dt1)[()], f"type {dt1} failed") + + # unsigned vs signed + for dt2 in 'bhilqp': + assert_(np.array(1, dtype=dt1)[()] > np.array(-1, dtype=dt2)[()], + f"type {dt1} and {dt2} failed") + assert_(not np.array(1, dtype=dt1)[()] < np.array(-1, dtype=dt2)[()], + f"type {dt1} and {dt2} failed") + assert_(np.array(1, dtype=dt1)[()] != np.array(-1, dtype=dt2)[()], + f"type {dt1} and {dt2} failed") + + # Signed integers and floats + for dt1 in 'bhlqp' + np.typecodes['Float']: + assert_(1 > np.array(-1, dtype=dt1)[()], f"type {dt1} failed") + assert_(not 1 < np.array(-1, dtype=dt1)[()], f"type {dt1} failed") + assert_(-1 == np.array(-1, dtype=dt1)[()], f"type {dt1} failed") + + for dt2 in 'bhlqp' + np.typecodes['Float']: + assert_(np.array(1, dtype=dt1)[()] > np.array(-1, dtype=dt2)[()], + f"type {dt1} and {dt2} failed") + assert_(not np.array(1, dtype=dt1)[()] < np.array(-1, dtype=dt2)[()], + f"type {dt1} and {dt2} failed") + assert_(np.array(-1, dtype=dt1)[()] == np.array(-1, dtype=dt2)[()], + f"type {dt1} and {dt2} failed") + + def test_scalar_comparison_to_none(self): + # Scalars should just return False and not give a warnings. + # The comparisons are flagged by pep8, ignore that. + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', FutureWarning) + assert_(not np.float32(1) == None) # noqa: E711 + assert_(not np.str_('test') == None) # noqa: E711 + # This is dubious (see below): + assert_(not np.datetime64('NaT') == None) # noqa: E711 + + assert_(np.float32(1) != None) # noqa: E711 + assert_(np.str_('test') != None) # noqa: E711 + # This is dubious (see below): + assert_(np.datetime64('NaT') != None) # noqa: E711 + assert_(len(w) == 0) + + # For documentation purposes, this is why the datetime is dubious. + # At the time of deprecation this was no behaviour change, but + # it has to be considered when the deprecations are done. + assert_(np.equal(np.datetime64('NaT'), None)) + + +#class TestRepr: +# def test_repr(self): +# for t in types: +# val = t(1197346475.0137341) +# val_repr = repr(val) +# val2 = eval(val_repr) +# assert_equal( val, val2 ) + + +class TestRepr: + def _test_type_repr(self, t): + finfo = np.finfo(t) + last_fraction_bit_idx = finfo.nexp + finfo.nmant + last_exponent_bit_idx = finfo.nexp + storage_bytes = np.dtype(t).itemsize * 8 + # could add some more types to the list below + for which in ['small denorm', 'small norm']: + # Values from https://en.wikipedia.org/wiki/IEEE_754 + constr = np.array([0x00] * storage_bytes, dtype=np.uint8) + if which == 'small denorm': + byte = last_fraction_bit_idx // 8 + bytebit = 7 - (last_fraction_bit_idx % 8) + constr[byte] = 1 << bytebit + elif which == 'small norm': + byte = last_exponent_bit_idx // 8 + bytebit = 7 - (last_exponent_bit_idx % 8) + constr[byte] = 1 << bytebit + else: + raise ValueError('hmm') + val = constr.view(t)[0] + val_repr = repr(val) + val2 = t(eval(val_repr)) + if not (val2 == 0 and val < 1e-100): + assert_equal(val, val2) + + def test_float_repr(self): + # long double test cannot work, because eval goes through a python + # float + for t in [np.float32, np.float64]: + self._test_type_repr(t) + + +if not IS_PYPY: + # sys.getsizeof() is not valid on PyPy + class TestSizeOf: + + def test_equal_nbytes(self): + for type in types: + x = type(0) + assert_(sys.getsizeof(x) > x.nbytes) + + def test_error(self): + d = np.float32() + assert_raises(TypeError, d.__sizeof__, "a") + + +class TestMultiply: + def test_seq_repeat(self): + # Test that basic sequences get repeated when multiplied with + # numpy integers. And errors are raised when multiplied with others. + # Some of this behaviour may be controversial and could be open for + # change. + accepted_types = set(np.typecodes["AllInteger"]) + deprecated_types = {'?'} + forbidden_types = ( + set(np.typecodes["All"]) - accepted_types - deprecated_types) + forbidden_types -= {'V'} # can't default-construct void scalars + + for seq_type in (list, tuple): + seq = seq_type([1, 2, 3]) + for numpy_type in accepted_types: + i = np.dtype(numpy_type).type(2) + assert_equal(seq * i, seq * int(i)) + assert_equal(i * seq, int(i) * seq) + + for numpy_type in deprecated_types: + i = np.dtype(numpy_type).type() + with assert_raises(TypeError): + operator.mul(seq, i) + + for numpy_type in forbidden_types: + i = np.dtype(numpy_type).type() + assert_raises(TypeError, operator.mul, seq, i) + assert_raises(TypeError, operator.mul, i, seq) + + def test_no_seq_repeat_basic_array_like(self): + # Test that an array-like which does not know how to be multiplied + # does not attempt sequence repeat (raise TypeError). + # See also gh-7428. + class ArrayLike: + def __init__(self, arr): + self.arr = arr + + def __array__(self, dtype=None, copy=None): + return self.arr + + # Test for simple ArrayLike above and memoryviews (original report) + for arr_like in (ArrayLike(np.ones(3)), memoryview(np.ones(3))): + assert_array_equal(arr_like * np.float32(3.), np.full(3, 3.)) + assert_array_equal(np.float32(3.) * arr_like, np.full(3, 3.)) + assert_array_equal(arr_like * np.int_(3), np.full(3, 3)) + assert_array_equal(np.int_(3) * arr_like, np.full(3, 3)) + + +class TestNegative: + def test_exceptions(self): + a = np.ones((), dtype=np.bool)[()] + assert_raises(TypeError, operator.neg, a) + + def test_result(self): + types = np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + with warnings.catch_warnings(): + warnings.simplefilter('ignore', RuntimeWarning) + for dt in types: + a = np.ones((), dtype=dt)[()] + if dt in np.typecodes['UnsignedInteger']: + st = np.dtype(dt).type + max = st(np.iinfo(dt).max) + assert_equal(operator.neg(a), max) + else: + assert_equal(operator.neg(a) + a, 0) + +class TestSubtract: + def test_exceptions(self): + a = np.ones((), dtype=np.bool)[()] + assert_raises(TypeError, operator.sub, a, a) + + def test_result(self): + types = np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + with warnings.catch_warnings(): + warnings.simplefilter('ignore', RuntimeWarning) + for dt in types: + a = np.ones((), dtype=dt)[()] + assert_equal(operator.sub(a, a), 0) + + +class TestAbs: + def _test_abs_func(self, absfunc, test_dtype): + x = test_dtype(-1.5) + assert_equal(absfunc(x), 1.5) + x = test_dtype(0.0) + res = absfunc(x) + # assert_equal() checks zero signedness + assert_equal(res, 0.0) + x = test_dtype(-0.0) + res = absfunc(x) + assert_equal(res, 0.0) + + x = test_dtype(np.finfo(test_dtype).max) + assert_equal(absfunc(x), x.real) + + with warnings.catch_warnings(): + warnings.simplefilter('ignore', UserWarning) + x = test_dtype(np.finfo(test_dtype).tiny) + assert_equal(absfunc(x), x.real) + + x = test_dtype(np.finfo(test_dtype).min) + assert_equal(absfunc(x), -x.real) + + @pytest.mark.parametrize("dtype", floating_types + complex_floating_types) + def test_builtin_abs(self, dtype): + if ( + sys.platform == "cygwin" and dtype == np.clongdouble and + ( + _pep440.parse(platform.release().split("-")[0]) + < _pep440.Version("3.3.0") + ) + ): + pytest.xfail( + reason="absl is computed in double precision on cygwin < 3.3" + ) + self._test_abs_func(abs, dtype) + + @pytest.mark.parametrize("dtype", floating_types + complex_floating_types) + def test_numpy_abs(self, dtype): + if ( + sys.platform == "cygwin" and dtype == np.clongdouble and + ( + _pep440.parse(platform.release().split("-")[0]) + < _pep440.Version("3.3.0") + ) + ): + pytest.xfail( + reason="absl is computed in double precision on cygwin < 3.3" + ) + self._test_abs_func(np.abs, dtype) + +class TestBitShifts: + + @pytest.mark.parametrize('type_code', np.typecodes['AllInteger']) + @pytest.mark.parametrize('op', + [operator.rshift, operator.lshift], ids=['>>', '<<']) + def test_shift_all_bits(self, type_code, op): + """Shifts where the shift amount is the width of the type or wider """ + # gh-2449 + dt = np.dtype(type_code) + nbits = dt.itemsize * 8 + for val in [5, -5]: + for shift in [nbits, nbits + 4]: + val_scl = np.array(val).astype(dt)[()] + shift_scl = dt.type(shift) + res_scl = op(val_scl, shift_scl) + if val_scl < 0 and op is operator.rshift: + # sign bit is preserved + assert_equal(res_scl, -1) + else: + assert_equal(res_scl, 0) + + # Result on scalars should be the same as on arrays + val_arr = np.array([val_scl] * 32, dtype=dt) + shift_arr = np.array([shift] * 32, dtype=dt) + res_arr = op(val_arr, shift_arr) + assert_equal(res_arr, res_scl) + + +class TestHash: + @pytest.mark.parametrize("type_code", np.typecodes['AllInteger']) + def test_integer_hashes(self, type_code): + scalar = np.dtype(type_code).type + for i in range(128): + assert hash(i) == hash(scalar(i)) + + @pytest.mark.parametrize("type_code", np.typecodes['AllFloat']) + def test_float_and_complex_hashes(self, type_code): + scalar = np.dtype(type_code).type + for val in [np.pi, np.inf, 3, 6.]: + numpy_val = scalar(val) + # Cast back to Python, in case the NumPy scalar has less precision + if numpy_val.dtype.kind == 'c': + val = complex(numpy_val) + else: + val = float(numpy_val) + assert val == numpy_val + assert hash(val) == hash(numpy_val) + + if hash(float(np.nan)) != hash(float(np.nan)): + # If Python distinguishes different NaNs we do so too (gh-18833) + assert hash(scalar(np.nan)) != hash(scalar(np.nan)) + + @pytest.mark.parametrize("type_code", np.typecodes['Complex']) + def test_complex_hashes(self, type_code): + # Test some complex valued hashes specifically: + scalar = np.dtype(type_code).type + for val in [np.pi + 1j, np.inf - 3j, 3j, 6. + 1j]: + numpy_val = scalar(val) + assert hash(complex(numpy_val)) == hash(numpy_val) + + +@contextlib.contextmanager +def recursionlimit(n): + o = sys.getrecursionlimit() + try: + sys.setrecursionlimit(n) + yield + finally: + sys.setrecursionlimit(o) + + +@given(sampled_from(objecty_things), + sampled_from(binary_operators_for_scalar_ints), + sampled_from(types + [rational])) +@pytest.mark.thread_unsafe(reason="sets recursion limit globally") +def test_operator_object_left(o, op, type_): + try: + with recursionlimit(200): + op(o, type_(1)) + except TypeError: + pass + + +@given(sampled_from(objecty_things), + sampled_from(binary_operators_for_scalar_ints), + sampled_from(types + [rational])) +@pytest.mark.thread_unsafe(reason="sets recursion limit globally") +def test_operator_object_right(o, op, type_): + try: + with recursionlimit(200): + op(type_(1), o) + except TypeError: + pass + + +@given(sampled_from(binary_operators_for_scalars), + sampled_from(types), + sampled_from(types)) +def test_operator_scalars(op, type1, type2): + try: + op(type1(1), type2(1)) + except TypeError: + pass + + +@pytest.mark.parametrize("op", binary_operators_for_scalars) +@pytest.mark.parametrize("sctype", [np.longdouble, np.clongdouble]) +def test_longdouble_operators_with_obj(sctype, op): + # This is/used to be tricky, because NumPy generally falls back to + # using the ufunc via `np.asarray()`, this effectively might do: + # longdouble + None + # -> asarray(longdouble) + np.array(None, dtype=object) + # -> asarray(longdouble).astype(object) + np.array(None, dtype=object) + # And after getting the scalars in the inner loop: + # -> longdouble + None + # + # That would recurse infinitely. Other scalars return the python object + # on cast, so this type of things works OK. + # + # As of NumPy 2.1, this has been consolidated into the np.generic binops + # and now checks `.item()`. That also allows the below path to work now. + try: + op(sctype(3), None) + except TypeError: + pass + try: + op(None, sctype(3)) + except TypeError: + pass + + +@pytest.mark.parametrize("op", [operator.add, operator.pow, operator.sub]) +@pytest.mark.parametrize("sctype", [np.longdouble, np.clongdouble]) +def test_longdouble_with_arrlike(sctype, op): + # As of NumPy 2.1, longdouble behaves like other types and can coerce + # e.g. lists. (Not necessarily better, but consistent.) + assert_array_equal(op(sctype(3), [1, 2]), op(3, np.array([1, 2]))) + assert_array_equal(op([1, 2], sctype(3)), op(np.array([1, 2]), 3)) + + +@pytest.mark.parametrize("op", binary_operators_for_scalars) +@pytest.mark.parametrize("sctype", [np.longdouble, np.clongdouble]) +@np.errstate(all="ignore") +def test_longdouble_operators_with_large_int(sctype, op): + # (See `test_longdouble_operators_with_obj` for why longdouble is special) + # NEP 50 means that the result is clearly a (c)longdouble here: + if sctype == np.clongdouble and op in [operator.mod, operator.floordiv]: + # The above operators are not support for complex though... + with pytest.raises(TypeError): + op(sctype(3), 2**64) + with pytest.raises(TypeError): + op(sctype(3), 2**64) + else: + assert op(sctype(3), -2**64) == op(sctype(3), sctype(-2**64)) + assert op(2**64, sctype(3)) == op(sctype(2**64), sctype(3)) + + +@pytest.mark.parametrize("dtype", np.typecodes["AllInteger"]) +@pytest.mark.parametrize("operation", [ + lambda min, max: max + max, + lambda min, max: min - max, + lambda min, max: max * max], ids=["+", "-", "*"]) +def test_scalar_integer_operation_overflow(dtype, operation): + st = np.dtype(dtype).type + min = st(np.iinfo(dtype).min) + max = st(np.iinfo(dtype).max) + + with pytest.warns(RuntimeWarning, match="overflow encountered"): + operation(min, max) + + +@pytest.mark.parametrize("dtype", np.typecodes["Integer"]) +@pytest.mark.parametrize("operation", [ + lambda min, neg_1: -min, + lambda min, neg_1: abs(min), + lambda min, neg_1: min * neg_1, + pytest.param(lambda min, neg_1: min // neg_1, + marks=pytest.mark.skip(reason="broken on some platforms"))], + ids=["neg", "abs", "*", "//"]) +def test_scalar_signed_integer_overflow(dtype, operation): + # The minimum signed integer can "overflow" for some additional operations + st = np.dtype(dtype).type + min = st(np.iinfo(dtype).min) + neg_1 = st(-1) + + with pytest.warns(RuntimeWarning, match="overflow encountered"): + operation(min, neg_1) + + +@pytest.mark.parametrize("dtype", np.typecodes["UnsignedInteger"]) +def test_scalar_unsigned_integer_overflow(dtype): + val = np.dtype(dtype).type(8) + with pytest.warns(RuntimeWarning, match="overflow encountered"): + -val + + zero = np.dtype(dtype).type(0) + -zero # does not warn + +@pytest.mark.parametrize("dtype", np.typecodes["AllInteger"]) +@pytest.mark.parametrize("operation", [ + lambda val, zero: val // zero, + lambda val, zero: val % zero, ], ids=["//", "%"]) +def test_scalar_integer_operation_divbyzero(dtype, operation): + st = np.dtype(dtype).type + val = st(100) + zero = st(0) + + with pytest.warns(RuntimeWarning, match="divide by zero"): + operation(val, zero) + + +ops_with_names = [ + ("__lt__", "__gt__", operator.lt, True), + ("__le__", "__ge__", operator.le, True), + ("__eq__", "__eq__", operator.eq, True), + # Note __op__ and __rop__ may be identical here: + ("__ne__", "__ne__", operator.ne, True), + ("__gt__", "__lt__", operator.gt, True), + ("__ge__", "__le__", operator.ge, True), + ("__floordiv__", "__rfloordiv__", operator.floordiv, False), + ("__truediv__", "__rtruediv__", operator.truediv, False), + ("__add__", "__radd__", operator.add, False), + ("__mod__", "__rmod__", operator.mod, False), + ("__mul__", "__rmul__", operator.mul, False), + ("__pow__", "__rpow__", operator.pow, False), + ("__sub__", "__rsub__", operator.sub, False), +] + + +@pytest.mark.parametrize(["__op__", "__rop__", "op", "cmp"], ops_with_names) +@pytest.mark.parametrize("sctype", [np.float32, np.float64, np.longdouble]) +def test_subclass_deferral(sctype, __op__, __rop__, op, cmp): + """ + This test covers scalar subclass deferral. Note that this is exceedingly + complicated, especially since it tends to fall back to the array paths and + these additionally add the "array priority" mechanism. + + The behaviour was modified subtly in 1.22 (to make it closer to how Python + scalars work). Due to its complexity and the fact that subclassing NumPy + scalars is probably a bad idea to begin with. There is probably room + for adjustments here. + """ + class myf_simple1(sctype): + pass + + class myf_simple2(sctype): + pass + + def op_func(self, other): + return __op__ + + def rop_func(self, other): + return __rop__ + + myf_op = type("myf_op", (sctype,), {__op__: op_func, __rop__: rop_func}) + + # inheritance has to override, or this is correctly lost: + res = op(myf_simple1(1), myf_simple2(2)) + assert type(res) == sctype or type(res) == np.bool + assert op(myf_simple1(1), myf_simple2(2)) == op(1, 2) # inherited + + # Two independent subclasses do not really define an order. This could + # be attempted, but we do not since Python's `int` does neither: + assert op(myf_op(1), myf_simple1(2)) == __op__ + assert op(myf_simple1(1), myf_op(2)) == op(1, 2) # inherited + + +def test_longdouble_complex(): + # Simple test to check longdouble and complex combinations, since these + # need to go through promotion, which longdouble needs to be careful about. + x = np.longdouble(1) + assert x + 1j == 1 + 1j + assert 1j + x == 1 + 1j + + +@pytest.mark.parametrize(["__op__", "__rop__", "op", "cmp"], ops_with_names) +@pytest.mark.parametrize("subtype", [float, int, complex, np.float16]) +def test_pyscalar_subclasses(subtype, __op__, __rop__, op, cmp): + # This tests that python scalar subclasses behave like a float64 (if they + # don't override it). + # In an earlier version of NEP 50, they behaved like the Python buildins. + def op_func(self, other): + return __op__ + + def rop_func(self, other): + return __rop__ + + # Check that deferring is indicated using `__array_ufunc__`: + myt = type("myt", (subtype,), + {__op__: op_func, __rop__: rop_func, "__array_ufunc__": None}) + + # Just like normally, we should never presume we can modify the float. + assert op(myt(1), np.float64(2)) == __op__ + assert op(np.float64(1), myt(2)) == __rop__ + + if op in {operator.mod, operator.floordiv} and subtype == complex: + return # module is not support for complex. Do not test. + + if __rop__ == __op__: + return + + # When no deferring is indicated, subclasses are handled normally. + myt = type("myt", (subtype,), {__rop__: rop_func}) + behaves_like = lambda x: np.array(subtype(x))[()] + + # Check for float32, as a float subclass float64 may behave differently + res = op(myt(1), np.float16(2)) + expected = op(behaves_like(1), np.float16(2)) + assert res == expected + assert type(res) == type(expected) + res = op(np.float32(2), myt(1)) + expected = op(np.float32(2), behaves_like(1)) + assert res == expected + assert type(res) == type(expected) + + # Same check for longdouble (compare via dtype to accept float64 when + # longdouble has the identical size), which is currently not perfectly + # consistent. + res = op(myt(1), np.longdouble(2)) + expected = op(behaves_like(1), np.longdouble(2)) + assert res == expected + assert np.dtype(type(res)) == np.dtype(type(expected)) + res = op(np.float32(2), myt(1)) + expected = op(np.float32(2), behaves_like(1)) + assert res == expected + assert np.dtype(type(res)) == np.dtype(type(expected)) + + +def test_truediv_int(): + # This should work, as the result is float: + assert np.uint8(3) / 123454 == np.float64(3) / 123454 + + +@pytest.mark.slow +@pytest.mark.parametrize("op", + # TODO: Power is a bit special, but here mostly bools seem to behave oddly + [op for op in binary_operators_for_scalars if op is not operator.pow]) +@pytest.mark.parametrize("sctype", types) +@pytest.mark.parametrize("other_type", [float, int, complex]) +@pytest.mark.parametrize("rop", [True, False]) +def test_scalar_matches_array_op_with_pyscalar(op, sctype, other_type, rop): + # Check that the ufunc path matches by coercing to an array explicitly + val1 = sctype(2) + val2 = other_type(2) + + if rop: + _op = op + op = lambda x, y: _op(y, x) + + try: + res = op(val1, val2) + except TypeError: + try: + expected = op(np.asarray(val1), val2) + raise AssertionError("ufunc didn't raise.") + except TypeError: + return + else: + expected = op(np.asarray(val1), val2) + + # Note that we only check dtype equivalency, as ufuncs may pick the lower + # dtype if they are equivalent. + assert res == expected + if isinstance(val1, float) and other_type is complex and rop: + # Python complex accepts float subclasses, so we don't get a chance + # and the result may be a Python complex (thus, the `np.array()``) + assert np.array(res).dtype == expected.dtype + else: + assert res.dtype == expected.dtype diff --git a/local_packages/numpy/_core/tests/test_scalarprint.py b/local_packages/numpy/_core/tests/test_scalarprint.py new file mode 100644 index 0000000..38ed778 --- /dev/null +++ b/local_packages/numpy/_core/tests/test_scalarprint.py @@ -0,0 +1,403 @@ +""" Test printing of scalar types. + +""" +import platform + +import pytest + +import numpy as np +from numpy.testing import IS_MUSL, assert_, assert_equal, assert_raises + + +class TestRealScalars: + def test_str(self): + svals = [0.0, -0.0, 1, -1, np.inf, -np.inf, np.nan] + styps = [np.float16, np.float32, np.float64, np.longdouble] + wanted = [ + ['0.0', '0.0', '0.0', '0.0' ], # noqa: E202 + ['-0.0', '-0.0', '-0.0', '-0.0'], + ['1.0', '1.0', '1.0', '1.0' ], # noqa: E202 + ['-1.0', '-1.0', '-1.0', '-1.0'], + ['inf', 'inf', 'inf', 'inf' ], # noqa: E202 + ['-inf', '-inf', '-inf', '-inf'], + ['nan', 'nan', 'nan', 'nan' ]] # noqa: E202 + + for wants, val in zip(wanted, svals): + for want, styp in zip(wants, styps): + msg = f'for str({np.dtype(styp).name}({val!r}))' + assert_equal(str(styp(val)), want, err_msg=msg) + + def test_scalar_cutoffs(self): + # test that both the str and repr of np.float64 behaves + # like python floats in python3. + def check(v): + assert_equal(str(np.float64(v)), str(v)) + assert_equal(str(np.float64(v)), repr(v)) + assert_equal(repr(np.float64(v)), f"np.float64({v!r})") + assert_equal(repr(np.float64(v)), f"np.float64({v})") + + # check we use the same number of significant digits + check(1.12345678901234567890) + check(0.0112345678901234567890) + + # check switch from scientific output to positional and back + check(1e-5) + check(1e-4) + check(1e15) + check(1e16) + + test_cases_gh_28679 = [ + (np.half, -0.000099, "-9.9e-05"), + (np.half, 0.0001, "0.0001"), + (np.half, 999, "999.0"), + (np.half, -1000, "-1e+03"), + (np.single, 0.000099, "9.9e-05"), + (np.single, -0.000100001, "-0.000100001"), + (np.single, 999999, "999999.0"), + (np.single, -1000000, "-1e+06") + ] + + @pytest.mark.parametrize("dtype, input_val, expected_str", test_cases_gh_28679) + def test_gh_28679(self, dtype, input_val, expected_str): + # test cutoff to exponent notation for half and single + assert_equal(str(dtype(input_val)), expected_str) + + test_cases_legacy_2_2 = [ + (np.half(65504), "65500.0"), + (np.single(1.e15), "1000000000000000.0"), + (np.single(1.e16), "1e+16"), + ] + + @pytest.mark.parametrize("input_val, expected_str", test_cases_legacy_2_2) + def test_legacy_2_2_mode(self, input_val, expected_str): + # test legacy cutoff to exponent notation for half and single + with np.printoptions(legacy='2.2'): + assert_equal(str(input_val), expected_str) + + def test_dragon4(self): + # these tests are adapted from Ryan Juckett's dragon4 implementation, + # see dragon4.c for details. + + fpos32 = lambda x, **k: np.format_float_positional(np.float32(x), **k) + fsci32 = lambda x, **k: np.format_float_scientific(np.float32(x), **k) + fpos64 = lambda x, **k: np.format_float_positional(np.float64(x), **k) + fsci64 = lambda x, **k: np.format_float_scientific(np.float64(x), **k) + + preckwd = lambda prec: {'unique': False, 'precision': prec} + + assert_equal(fpos32('1.0'), "1.") + assert_equal(fsci32('1.0'), "1.e+00") + assert_equal(fpos32('10.234'), "10.234") + assert_equal(fpos32('-10.234'), "-10.234") + assert_equal(fsci32('10.234'), "1.0234e+01") + assert_equal(fsci32('-10.234'), "-1.0234e+01") + assert_equal(fpos32('1000.0'), "1000.") + assert_equal(fpos32('1.0', precision=0), "1.") + assert_equal(fsci32('1.0', precision=0), "1.e+00") + assert_equal(fpos32('10.234', precision=0), "10.") + assert_equal(fpos32('-10.234', precision=0), "-10.") + assert_equal(fsci32('10.234', precision=0), "1.e+01") + assert_equal(fsci32('-10.234', precision=0), "-1.e+01") + assert_equal(fpos32('10.234', precision=2), "10.23") + assert_equal(fsci32('-10.234', precision=2), "-1.02e+01") + assert_equal(fsci64('9.9999999999999995e-08', **preckwd(16)), + '9.9999999999999995e-08') + assert_equal(fsci64('9.8813129168249309e-324', **preckwd(16)), + '9.8813129168249309e-324') + assert_equal(fsci64('9.9999999999999694e-311', **preckwd(16)), + '9.9999999999999694e-311') + + # test rounding + # 3.1415927410 is closest float32 to np.pi + assert_equal(fpos32('3.14159265358979323846', **preckwd(10)), + "3.1415927410") + assert_equal(fsci32('3.14159265358979323846', **preckwd(10)), + "3.1415927410e+00") + assert_equal(fpos64('3.14159265358979323846', **preckwd(10)), + "3.1415926536") + assert_equal(fsci64('3.14159265358979323846', **preckwd(10)), + "3.1415926536e+00") + # 299792448 is closest float32 to 299792458 + assert_equal(fpos32('299792458.0', **preckwd(5)), "299792448.00000") + assert_equal(fsci32('299792458.0', **preckwd(5)), "2.99792e+08") + assert_equal(fpos64('299792458.0', **preckwd(5)), "299792458.00000") + assert_equal(fsci64('299792458.0', **preckwd(5)), "2.99792e+08") + + assert_equal(fpos32('3.14159265358979323846', **preckwd(25)), + "3.1415927410125732421875000") + assert_equal(fpos64('3.14159265358979323846', **preckwd(50)), + "3.14159265358979311599796346854418516159057617187500") + assert_equal(fpos64('3.14159265358979323846'), "3.141592653589793") + + # smallest numbers + assert_equal(fpos32(0.5**(126 + 23), unique=False, precision=149), + "0.00000000000000000000000000000000000000000000140129846432" + "4817070923729583289916131280261941876515771757068283889791" + "08268586060148663818836212158203125") + + assert_equal(fpos64(5e-324, unique=False, precision=1074), + "0.00000000000000000000000000000000000000000000000000000000" + "0000000000000000000000000000000000000000000000000000000000" + "0000000000000000000000000000000000000000000000000000000000" + "0000000000000000000000000000000000000000000000000000000000" + "0000000000000000000000000000000000000000000000000000000000" + "0000000000000000000000000000000000049406564584124654417656" + "8792868221372365059802614324764425585682500675507270208751" + "8652998363616359923797965646954457177309266567103559397963" + "9877479601078187812630071319031140452784581716784898210368" + "8718636056998730723050006387409153564984387312473397273169" + "6151400317153853980741262385655911710266585566867681870395" + "6031062493194527159149245532930545654440112748012970999954" + "1931989409080416563324524757147869014726780159355238611550" + "1348035264934720193790268107107491703332226844753335720832" + "4319360923828934583680601060115061698097530783422773183292" + "4790498252473077637592724787465608477820373446969953364701" + "7972677717585125660551199131504891101451037862738167250955" + "8373897335989936648099411642057026370902792427675445652290" + "87538682506419718265533447265625") + + # largest numbers + f32x = np.finfo(np.float32).max + assert_equal(fpos32(f32x, **preckwd(0)), + "340282346638528859811704183484516925440.") + assert_equal(fpos64(np.finfo(np.float64).max, **preckwd(0)), + "1797693134862315708145274237317043567980705675258449965989" + "1747680315726078002853876058955863276687817154045895351438" + "2464234321326889464182768467546703537516986049910576551282" + "0762454900903893289440758685084551339423045832369032229481" + "6580855933212334827479782620414472316873817718091929988125" + "0404026184124858368.") + # Warning: In unique mode only the integer digits necessary for + # uniqueness are computed, the rest are 0. + assert_equal(fpos32(f32x), + "340282350000000000000000000000000000000.") + + # Further tests of zero-padding vs rounding in different combinations + # of unique, fractional, precision, min_digits + # precision can only reduce digits, not add them. + # min_digits can only extend digits, not reduce them. + assert_equal(fpos32(f32x, unique=True, fractional=True, precision=0), + "340282350000000000000000000000000000000.") + assert_equal(fpos32(f32x, unique=True, fractional=True, precision=4), + "340282350000000000000000000000000000000.") + assert_equal(fpos32(f32x, unique=True, fractional=True, min_digits=0), + "340282346638528859811704183484516925440.") + assert_equal(fpos32(f32x, unique=True, fractional=True, min_digits=4), + "340282346638528859811704183484516925440.0000") + assert_equal(fpos32(f32x, unique=True, fractional=True, + min_digits=4, precision=4), + "340282346638528859811704183484516925440.0000") + assert_raises(ValueError, fpos32, f32x, unique=True, fractional=False, + precision=0) + assert_equal(fpos32(f32x, unique=True, fractional=False, precision=4), + "340300000000000000000000000000000000000.") + assert_equal(fpos32(f32x, unique=True, fractional=False, precision=20), + "340282350000000000000000000000000000000.") + assert_equal(fpos32(f32x, unique=True, fractional=False, min_digits=4), + "340282350000000000000000000000000000000.") + assert_equal(fpos32(f32x, unique=True, fractional=False, + min_digits=20), + "340282346638528859810000000000000000000.") + assert_equal(fpos32(f32x, unique=True, fractional=False, + min_digits=15), + "340282346638529000000000000000000000000.") + assert_equal(fpos32(f32x, unique=False, fractional=False, precision=4), + "340300000000000000000000000000000000000.") + # test that unique rounding is preserved when precision is supplied + # but no extra digits need to be printed (gh-18609) + a = np.float64.fromhex('-1p-97') + assert_equal(fsci64(a, unique=True), '-6.310887241768095e-30') + assert_equal(fsci64(a, unique=False, precision=15), + '-6.310887241768094e-30') + assert_equal(fsci64(a, unique=True, precision=15), + '-6.310887241768095e-30') + assert_equal(fsci64(a, unique=True, min_digits=15), + '-6.310887241768095e-30') + assert_equal(fsci64(a, unique=True, precision=15, min_digits=15), + '-6.310887241768095e-30') + # adds/remove digits in unique mode with unbiased rnding + assert_equal(fsci64(a, unique=True, precision=14), + '-6.31088724176809e-30') + assert_equal(fsci64(a, unique=True, min_digits=16), + '-6.3108872417680944e-30') + assert_equal(fsci64(a, unique=True, precision=16), + '-6.310887241768095e-30') + assert_equal(fsci64(a, unique=True, min_digits=14), + '-6.310887241768095e-30') + # test min_digits in unique mode with different rounding cases + assert_equal(fsci64('1e120', min_digits=3), '1.000e+120') + assert_equal(fsci64('1e100', min_digits=3), '1.000e+100') + + # test trailing zeros + assert_equal(fpos32('1.0', unique=False, precision=3), "1.000") + assert_equal(fpos64('1.0', unique=False, precision=3), "1.000") + assert_equal(fsci32('1.0', unique=False, precision=3), "1.000e+00") + assert_equal(fsci64('1.0', unique=False, precision=3), "1.000e+00") + assert_equal(fpos32('1.5', unique=False, precision=3), "1.500") + assert_equal(fpos64('1.5', unique=False, precision=3), "1.500") + assert_equal(fsci32('1.5', unique=False, precision=3), "1.500e+00") + assert_equal(fsci64('1.5', unique=False, precision=3), "1.500e+00") + # gh-10713 + assert_equal(fpos64('324', unique=False, precision=5, + fractional=False), "324.00") + + available_float_dtypes = [np.float16, np.float32, np.float64, np.float128]\ + if hasattr(np, 'float128') else [np.float16, np.float32, np.float64] + + @pytest.mark.parametrize("tp", available_float_dtypes) + def test_dragon4_positional_interface(self, tp): + # test is flaky for musllinux on np.float128 + if IS_MUSL and tp == np.float128: + pytest.skip("Skipping flaky test of float128 on musllinux") + + fpos = np.format_float_positional + + # test padding + assert_equal(fpos(tp('1.0'), pad_left=4, pad_right=4), " 1. ") + assert_equal(fpos(tp('-1.0'), pad_left=4, pad_right=4), " -1. ") + assert_equal(fpos(tp('-10.2'), + pad_left=4, pad_right=4), " -10.2 ") + + # test fixed (non-unique) mode + assert_equal(fpos(tp('1.0'), unique=False, precision=4), "1.0000") + + @pytest.mark.parametrize("tp", available_float_dtypes) + def test_dragon4_positional_interface_trim(self, tp): + # test is flaky for musllinux on np.float128 + if IS_MUSL and tp == np.float128: + pytest.skip("Skipping flaky test of float128 on musllinux") + + fpos = np.format_float_positional + # test trimming + # trim of 'k' or '.' only affects non-unique mode, since unique + # mode will not output trailing 0s. + assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='k'), + "1.0000") + + assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='.'), + "1.") + assert_equal(fpos(tp('1.2'), unique=False, precision=4, trim='.'), + "1.2" if tp != np.float16 else "1.2002") + + assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='0'), + "1.0") + assert_equal(fpos(tp('1.2'), unique=False, precision=4, trim='0'), + "1.2" if tp != np.float16 else "1.2002") + assert_equal(fpos(tp('1.'), trim='0'), "1.0") + + assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='-'), + "1") + assert_equal(fpos(tp('1.2'), unique=False, precision=4, trim='-'), + "1.2" if tp != np.float16 else "1.2002") + assert_equal(fpos(tp('1.'), trim='-'), "1") + assert_equal(fpos(tp('1.001'), precision=1, trim='-'), "1") + + @pytest.mark.parametrize("tp", available_float_dtypes) + @pytest.mark.parametrize("pad_val", [10**5, np.iinfo("int32").max]) + def test_dragon4_positional_interface_overflow(self, tp, pad_val): + # test is flaky for musllinux on np.float128 + if IS_MUSL and tp == np.float128: + pytest.skip("Skipping flaky test of float128 on musllinux") + + fpos = np.format_float_positional + + # gh-28068 + with pytest.raises(RuntimeError, + match="Float formatting result too large"): + fpos(tp('1.047'), unique=False, precision=pad_val) + + with pytest.raises(RuntimeError, + match="Float formatting result too large"): + fpos(tp('1.047'), precision=2, pad_left=pad_val) + + with pytest.raises(RuntimeError, + match="Float formatting result too large"): + fpos(tp('1.047'), precision=2, pad_right=pad_val) + + @pytest.mark.parametrize("tp", available_float_dtypes) + def test_dragon4_scientific_interface(self, tp): + # test is flaky for musllinux on np.float128 + if IS_MUSL and tp == np.float128: + pytest.skip("Skipping flaky test of float128 on musllinux") + + fsci = np.format_float_scientific + + # test exp_digits + assert_equal(fsci(tp('1.23e1'), exp_digits=5), "1.23e+00001") + + # test fixed (non-unique) mode + assert_equal(fsci(tp('1.0'), unique=False, precision=4), + "1.0000e+00") + + @pytest.mark.skipif(not platform.machine().startswith("ppc64"), + reason="only applies to ppc float128 values") + def test_ppc64_ibm_double_double128(self): + # check that the precision decreases once we get into the subnormal + # range. Unlike float64, this starts around 1e-292 instead of 1e-308, + # which happens when the first double is normal and the second is + # subnormal. + x = np.float128('2.123123123123123123123123123123123e-286') + got = [str(x / np.float128('2e' + str(i))) for i in range(40)] + expected = [ + "1.06156156156156156156156156156157e-286", + "1.06156156156156156156156156156158e-287", + "1.06156156156156156156156156156159e-288", + "1.0615615615615615615615615615616e-289", + "1.06156156156156156156156156156157e-290", + "1.06156156156156156156156156156156e-291", + "1.0615615615615615615615615615616e-292", + "1.0615615615615615615615615615615e-293", + "1.061561561561561561561561561562e-294", + "1.06156156156156156156156156155e-295", + "1.0615615615615615615615615616e-296", + "1.06156156156156156156156156e-297", + "1.06156156156156156156156157e-298", + "1.0615615615615615615615616e-299", + "1.06156156156156156156156e-300", + "1.06156156156156156156155e-301", + "1.0615615615615615615616e-302", + "1.061561561561561561562e-303", + "1.06156156156156156156e-304", + "1.0615615615615615618e-305", + "1.06156156156156156e-306", + "1.06156156156156157e-307", + "1.0615615615615616e-308", + "1.06156156156156e-309", + "1.06156156156157e-310", + "1.0615615615616e-311", + "1.06156156156e-312", + "1.06156156154e-313", + "1.0615615616e-314", + "1.06156156e-315", + "1.06156155e-316", + "1.061562e-317", + "1.06156e-318", + "1.06155e-319", + "1.0617e-320", + "1.06e-321", + "1.04e-322", + "1e-323", + "0.0", + "0.0"] + assert_equal(got, expected) + + # Note: we follow glibc behavior, but it (or gcc) might not be right. + # In particular we can get two values that print the same but are not + # equal: + a = np.float128('2') / np.float128('3') + b = np.float128(str(a)) + assert_equal(str(a), str(b)) + assert_(a != b) + + def float32_roundtrip(self): + # gh-9360 + x = np.float32(1024 - 2**-14) + y = np.float32(1024 - 2**-13) + assert_(repr(x) != repr(y)) + assert_equal(np.float32(repr(x)), x) + assert_equal(np.float32(repr(y)), y) + + def float64_vs_python(self): + # gh-2643, gh-6136, gh-6908 + assert_equal(repr(np.float64(0.1)), repr(0.1)) + assert_(repr(np.float64(0.20000000000000004)) != repr(0.2)) diff --git a/local_packages/numpy/_core/tests/test_shape_base.py b/local_packages/numpy/_core/tests/test_shape_base.py new file mode 100644 index 0000000..5be3d05 --- /dev/null +++ b/local_packages/numpy/_core/tests/test_shape_base.py @@ -0,0 +1,904 @@ +import sys + +import pytest + +import numpy as np +from numpy._core import ( + arange, + array, + atleast_1d, + atleast_2d, + atleast_3d, + block, + concatenate, + hstack, + newaxis, + stack, + vstack, +) +from numpy._core.shape_base import ( + _block_concatenate, + _block_dispatcher, + _block_setup, + _block_slicing, +) +from numpy.exceptions import AxisError +from numpy.testing import ( + IS_PYPY, + assert_, + assert_array_equal, + assert_equal, + assert_raises, + assert_raises_regex, +) +from numpy.testing._private.utils import requires_memory + + +class TestAtleast1d: + def test_0D_array(self): + a = array(1) + b = array(2) + res = [atleast_1d(a), atleast_1d(b)] + desired = [array([1]), array([2])] + assert_array_equal(res, desired) + + def test_1D_array(self): + a = array([1, 2]) + b = array([2, 3]) + res = [atleast_1d(a), atleast_1d(b)] + desired = [array([1, 2]), array([2, 3])] + assert_array_equal(res, desired) + + def test_2D_array(self): + a = array([[1, 2], [1, 2]]) + b = array([[2, 3], [2, 3]]) + res = [atleast_1d(a), atleast_1d(b)] + desired = [a, b] + assert_array_equal(res, desired) + + def test_3D_array(self): + a = array([[1, 2], [1, 2]]) + b = array([[2, 3], [2, 3]]) + a = array([a, a]) + b = array([b, b]) + res = [atleast_1d(a), atleast_1d(b)] + desired = [a, b] + assert_array_equal(res, desired) + + def test_r1array(self): + """ Test to make sure equivalent Travis O's r1array function + """ + assert_(atleast_1d(3).shape == (1,)) + assert_(atleast_1d(3j).shape == (1,)) + assert_(atleast_1d(3.0).shape == (1,)) + assert_(atleast_1d([[2, 3], [4, 5]]).shape == (2, 2)) + + +class TestAtleast2d: + def test_0D_array(self): + a = array(1) + b = array(2) + res = [atleast_2d(a), atleast_2d(b)] + desired = [array([[1]]), array([[2]])] + assert_array_equal(res, desired) + + def test_1D_array(self): + a = array([1, 2]) + b = array([2, 3]) + res = [atleast_2d(a), atleast_2d(b)] + desired = [array([[1, 2]]), array([[2, 3]])] + assert_array_equal(res, desired) + + def test_2D_array(self): + a = array([[1, 2], [1, 2]]) + b = array([[2, 3], [2, 3]]) + res = [atleast_2d(a), atleast_2d(b)] + desired = [a, b] + assert_array_equal(res, desired) + + def test_3D_array(self): + a = array([[1, 2], [1, 2]]) + b = array([[2, 3], [2, 3]]) + a = array([a, a]) + b = array([b, b]) + res = [atleast_2d(a), atleast_2d(b)] + desired = [a, b] + assert_array_equal(res, desired) + + def test_r2array(self): + """ Test to make sure equivalent Travis O's r2array function + """ + assert_(atleast_2d(3).shape == (1, 1)) + assert_(atleast_2d([3j, 1]).shape == (1, 2)) + assert_(atleast_2d([[[3, 1], [4, 5]], [[3, 5], [1, 2]]]).shape == (2, 2, 2)) + + +class TestAtleast3d: + def test_0D_array(self): + a = array(1) + b = array(2) + res = [atleast_3d(a), atleast_3d(b)] + desired = [array([[[1]]]), array([[[2]]])] + assert_array_equal(res, desired) + + def test_1D_array(self): + a = array([1, 2]) + b = array([2, 3]) + res = [atleast_3d(a), atleast_3d(b)] + desired = [array([[[1], [2]]]), array([[[2], [3]]])] + assert_array_equal(res, desired) + + def test_2D_array(self): + a = array([[1, 2], [1, 2]]) + b = array([[2, 3], [2, 3]]) + res = [atleast_3d(a), atleast_3d(b)] + desired = [a[:, :, newaxis], b[:, :, newaxis]] + assert_array_equal(res, desired) + + def test_3D_array(self): + a = array([[1, 2], [1, 2]]) + b = array([[2, 3], [2, 3]]) + a = array([a, a]) + b = array([b, b]) + res = [atleast_3d(a), atleast_3d(b)] + desired = [a, b] + assert_array_equal(res, desired) + + +class TestHstack: + def test_non_iterable(self): + assert_raises(TypeError, hstack, 1) + + def test_empty_input(self): + assert_raises(ValueError, hstack, ()) + + def test_0D_array(self): + a = array(1) + b = array(2) + res = hstack([a, b]) + desired = array([1, 2]) + assert_array_equal(res, desired) + + def test_1D_array(self): + a = array([1]) + b = array([2]) + res = hstack([a, b]) + desired = array([1, 2]) + assert_array_equal(res, desired) + + def test_2D_array(self): + a = array([[1], [2]]) + b = array([[1], [2]]) + res = hstack([a, b]) + desired = array([[1, 1], [2, 2]]) + assert_array_equal(res, desired) + + def test_generator(self): + with pytest.raises(TypeError, match="arrays to stack must be"): + hstack(np.arange(3) for _ in range(2)) + with pytest.raises(TypeError, match="arrays to stack must be"): + hstack(x for x in np.ones((3, 2))) + + def test_casting_and_dtype(self): + a = np.array([1, 2, 3]) + b = np.array([2.5, 3.5, 4.5]) + res = np.hstack((a, b), casting="unsafe", dtype=np.int64) + expected_res = np.array([1, 2, 3, 2, 3, 4]) + assert_array_equal(res, expected_res) + + def test_casting_and_dtype_type_error(self): + a = np.array([1, 2, 3]) + b = np.array([2.5, 3.5, 4.5]) + with pytest.raises(TypeError): + hstack((a, b), casting="safe", dtype=np.int64) + + +class TestVstack: + def test_non_iterable(self): + assert_raises(TypeError, vstack, 1) + + def test_empty_input(self): + assert_raises(ValueError, vstack, ()) + + def test_0D_array(self): + a = array(1) + b = array(2) + res = vstack([a, b]) + desired = array([[1], [2]]) + assert_array_equal(res, desired) + + def test_1D_array(self): + a = array([1]) + b = array([2]) + res = vstack([a, b]) + desired = array([[1], [2]]) + assert_array_equal(res, desired) + + def test_2D_array(self): + a = array([[1], [2]]) + b = array([[1], [2]]) + res = vstack([a, b]) + desired = array([[1], [2], [1], [2]]) + assert_array_equal(res, desired) + + def test_2D_array2(self): + a = array([1, 2]) + b = array([1, 2]) + res = vstack([a, b]) + desired = array([[1, 2], [1, 2]]) + assert_array_equal(res, desired) + + def test_generator(self): + with pytest.raises(TypeError, match="arrays to stack must be"): + vstack(np.arange(3) for _ in range(2)) + + def test_casting_and_dtype(self): + a = np.array([1, 2, 3]) + b = np.array([2.5, 3.5, 4.5]) + res = np.vstack((a, b), casting="unsafe", dtype=np.int64) + expected_res = np.array([[1, 2, 3], [2, 3, 4]]) + assert_array_equal(res, expected_res) + + def test_casting_and_dtype_type_error(self): + a = np.array([1, 2, 3]) + b = np.array([2.5, 3.5, 4.5]) + with pytest.raises(TypeError): + vstack((a, b), casting="safe", dtype=np.int64) + + +class TestConcatenate: + def test_returns_copy(self): + a = np.eye(3) + b = np.concatenate([a]) + b[0, 0] = 2 + assert b[0, 0] != a[0, 0] + + def test_exceptions(self): + # test axis must be in bounds + for ndim in [1, 2, 3]: + a = np.ones((1,) * ndim) + np.concatenate((a, a), axis=0) # OK + assert_raises(AxisError, np.concatenate, (a, a), axis=ndim) + assert_raises(AxisError, np.concatenate, (a, a), axis=-(ndim + 1)) + + # Scalars cannot be concatenated + assert_raises(ValueError, concatenate, (0,)) + assert_raises(ValueError, concatenate, (np.array(0),)) + + # dimensionality must match + assert_raises_regex( + ValueError, + r"all the input arrays must have same number of dimensions, but " + r"the array at index 0 has 1 dimension\(s\) and the array at " + r"index 1 has 2 dimension\(s\)", + np.concatenate, (np.zeros(1), np.zeros((1, 1)))) + + # test shapes must match except for concatenation axis + a = np.ones((1, 2, 3)) + b = np.ones((2, 2, 3)) + axis = list(range(3)) + for i in range(3): + np.concatenate((a, b), axis=axis[0]) # OK + assert_raises_regex( + ValueError, + "all the input array dimensions except for the concatenation axis " + f"must match exactly, but along dimension {i}, the array at " + "index 0 has size 1 and the array at index 1 has size 2", + np.concatenate, (a, b), axis=axis[1]) + assert_raises(ValueError, np.concatenate, (a, b), axis=axis[2]) + a = np.moveaxis(a, -1, 0) + b = np.moveaxis(b, -1, 0) + axis.append(axis.pop(0)) + + # No arrays to concatenate raises ValueError + assert_raises(ValueError, concatenate, ()) + + @pytest.mark.slow + @pytest.mark.skipif( + sys.maxsize < 2**32, + reason="only problematic on 64bit platforms" + ) + @requires_memory(2 * np.iinfo(np.intc).max) + @pytest.mark.thread_unsafe(reason="crashes with low memory") + def test_huge_list_error(self): + a = np.array([1]) + max_int = np.iinfo(np.intc).max + arrs = (a,) * (max_int + 1) + msg = (fr"concatenate\(\) only supports up to {max_int} arrays" + f" but got {max_int + 1}.") + with pytest.raises(ValueError, match=msg): + np.concatenate(arrs) + + def test_concatenate_axis_None(self): + a = np.arange(4, dtype=np.float64).reshape((2, 2)) + b = list(range(3)) + c = ['x'] + r = np.concatenate((a, a), axis=None) + assert_equal(r.dtype, a.dtype) + assert_equal(r.ndim, 1) + r = np.concatenate((a, b), axis=None) + assert_equal(r.size, a.size + len(b)) + assert_equal(r.dtype, a.dtype) + r = np.concatenate((a, b, c), axis=None, dtype="U") + d = array(['0.0', '1.0', '2.0', '3.0', + '0', '1', '2', 'x']) + assert_array_equal(r, d) + + out = np.zeros(a.size + len(b)) + r = np.concatenate((a, b), axis=None) + rout = np.concatenate((a, b), axis=None, out=out) + assert_(out is rout) + assert_equal(r, rout) + + def test_large_concatenate_axis_None(self): + # When no axis is given, concatenate uses flattened versions. + # This also had a bug with many arrays (see gh-5979). + x = np.arange(1, 100) + r = np.concatenate(x, None) + assert_array_equal(x, r) + + # Once upon a time, this was the same as `axis=None` now it fails + # (with an unspecified error, as multiple things are wrong here) + with pytest.raises(ValueError): + np.concatenate(x, 100) + + def test_concatenate(self): + # Test concatenate function + # One sequence returns unmodified (but as array) + r4 = list(range(4)) + assert_array_equal(concatenate((r4,)), r4) + # Any sequence + assert_array_equal(concatenate((tuple(r4),)), r4) + assert_array_equal(concatenate((array(r4),)), r4) + # 1D default concatenation + r3 = list(range(3)) + assert_array_equal(concatenate((r4, r3)), r4 + r3) + # Mixed sequence types + assert_array_equal(concatenate((tuple(r4), r3)), r4 + r3) + assert_array_equal(concatenate((array(r4), r3)), r4 + r3) + # Explicit axis specification + assert_array_equal(concatenate((r4, r3), 0), r4 + r3) + # Including negative + assert_array_equal(concatenate((r4, r3), -1), r4 + r3) + # 2D + a23 = array([[10, 11, 12], [13, 14, 15]]) + a13 = array([[0, 1, 2]]) + res = array([[10, 11, 12], [13, 14, 15], [0, 1, 2]]) + assert_array_equal(concatenate((a23, a13)), res) + assert_array_equal(concatenate((a23, a13), 0), res) + assert_array_equal(concatenate((a23.T, a13.T), 1), res.T) + assert_array_equal(concatenate((a23.T, a13.T), -1), res.T) + # Arrays much match shape + assert_raises(ValueError, concatenate, (a23.T, a13.T), 0) + # 3D + res = arange(2 * 3 * 7).reshape((2, 3, 7)) + a0 = res[..., :4] + a1 = res[..., 4:6] + a2 = res[..., 6:] + assert_array_equal(concatenate((a0, a1, a2), 2), res) + assert_array_equal(concatenate((a0, a1, a2), -1), res) + assert_array_equal(concatenate((a0.T, a1.T, a2.T), 0), res.T) + + out = res.copy() + rout = concatenate((a0, a1, a2), 2, out=out) + assert_(out is rout) + assert_equal(res, rout) + + def test_concatenate_same_value(self): + r4 = list(range(4)) + with pytest.raises(ValueError, match="^casting must be one of"): + concatenate([r4, r4], casting="same_value") + + @pytest.mark.skipif( + IS_PYPY, + reason="PYPY handles sq_concat, nb_add differently than cpython" + ) + def test_operator_concat(self): + import operator + a = array([1, 2]) + b = array([3, 4]) + n = [1, 2] + res = array([1, 2, 3, 4]) + assert_raises(TypeError, operator.concat, a, b) + assert_raises(TypeError, operator.concat, a, n) + assert_raises(TypeError, operator.concat, n, a) + assert_raises(TypeError, operator.concat, a, 1) + assert_raises(TypeError, operator.concat, 1, a) + + def test_bad_out_shape(self): + a = array([1, 2]) + b = array([3, 4]) + + assert_raises(ValueError, concatenate, (a, b), out=np.empty(5)) + assert_raises(ValueError, concatenate, (a, b), out=np.empty((4, 1))) + assert_raises(ValueError, concatenate, (a, b), out=np.empty((1, 4))) + concatenate((a, b), out=np.empty(4)) + + @pytest.mark.parametrize("axis", [None, 0]) + @pytest.mark.parametrize("out_dtype", ["c8", "f4", "f8", ">f8", "i8", "S4"]) + @pytest.mark.parametrize("casting", + ['no', 'equiv', 'safe', 'same_kind', 'unsafe']) + def test_out_and_dtype(self, axis, out_dtype, casting): + # Compare usage of `out=out` with `dtype=out.dtype` + out = np.empty(4, dtype=out_dtype) + to_concat = (array([1.1, 2.2]), array([3.3, 4.4])) + + if not np.can_cast(to_concat[0], out_dtype, casting=casting): + with assert_raises(TypeError): + concatenate(to_concat, out=out, axis=axis, casting=casting) + with assert_raises(TypeError): + concatenate(to_concat, dtype=out.dtype, + axis=axis, casting=casting) + else: + res_out = concatenate(to_concat, out=out, + axis=axis, casting=casting) + res_dtype = concatenate(to_concat, dtype=out.dtype, + axis=axis, casting=casting) + assert res_out is out + assert_array_equal(out, res_dtype) + assert res_dtype.dtype == out_dtype + + with assert_raises(TypeError): + concatenate(to_concat, out=out, dtype=out_dtype, axis=axis) + + @pytest.mark.parametrize("axis", [None, 0]) + @pytest.mark.parametrize("string_dt", ["S", "U", "S0", "U0"]) + @pytest.mark.parametrize("arrs", + [([0.],), ([0.], [1]), ([0], ["string"], [1.])]) + def test_dtype_with_promotion(self, arrs, string_dt, axis): + # Note that U0 and S0 should be deprecated eventually and changed to + # actually give the empty string result (together with `np.array`) + res = np.concatenate(arrs, axis=axis, dtype=string_dt, casting="unsafe") + # The actual dtype should be identical to a cast (of a double array): + assert res.dtype == np.array(1.).astype(string_dt).dtype + + @pytest.mark.parametrize("axis", [None, 0]) + def test_string_dtype_does_not_inspect(self, axis): + with pytest.raises(TypeError): + np.concatenate(([None], [1]), dtype="S", axis=axis) + with pytest.raises(TypeError): + np.concatenate(([None], [1]), dtype="U", axis=axis) + + @pytest.mark.parametrize("axis", [None, 0]) + def test_subarray_error(self, axis): + with pytest.raises(TypeError, match=".*subarray dtype"): + np.concatenate(([1], [1]), dtype="(2,)i", axis=axis) + + +def test_stack(): + # non-iterable input + assert_raises(TypeError, stack, 1) + + # 0d input + for input_ in [(1, 2, 3), + [np.int32(1), np.int32(2), np.int32(3)], + [np.array(1), np.array(2), np.array(3)]]: + assert_array_equal(stack(input_), [1, 2, 3]) + # 1d input examples + a = np.array([1, 2, 3]) + b = np.array([4, 5, 6]) + r1 = array([[1, 2, 3], [4, 5, 6]]) + assert_array_equal(np.stack((a, b)), r1) + assert_array_equal(np.stack((a, b), axis=1), r1.T) + # all input types + assert_array_equal(np.stack([a, b]), r1) + assert_array_equal(np.stack(array([a, b])), r1) + # all shapes for 1d input + arrays = [np.random.randn(3) for _ in range(10)] + axes = [0, 1, -1, -2] + expected_shapes = [(10, 3), (3, 10), (3, 10), (10, 3)] + for axis, expected_shape in zip(axes, expected_shapes): + assert_equal(np.stack(arrays, axis).shape, expected_shape) + assert_raises_regex(AxisError, 'out of bounds', stack, arrays, axis=2) + assert_raises_regex(AxisError, 'out of bounds', stack, arrays, axis=-3) + # all shapes for 2d input + arrays = [np.random.randn(3, 4) for _ in range(10)] + axes = [0, 1, 2, -1, -2, -3] + expected_shapes = [(10, 3, 4), (3, 10, 4), (3, 4, 10), + (3, 4, 10), (3, 10, 4), (10, 3, 4)] + for axis, expected_shape in zip(axes, expected_shapes): + assert_equal(np.stack(arrays, axis).shape, expected_shape) + # empty arrays + assert_(stack([[], [], []]).shape == (3, 0)) + assert_(stack([[], [], []], axis=1).shape == (0, 3)) + # out + out = np.zeros_like(r1) + np.stack((a, b), out=out) + assert_array_equal(out, r1) + # edge cases + assert_raises_regex(ValueError, 'need at least one array', stack, []) + assert_raises_regex(ValueError, 'must have the same shape', + stack, [1, np.arange(3)]) + assert_raises_regex(ValueError, 'must have the same shape', + stack, [np.arange(3), 1]) + assert_raises_regex(ValueError, 'must have the same shape', + stack, [np.arange(3), 1], axis=1) + assert_raises_regex(ValueError, 'must have the same shape', + stack, [np.zeros((3, 3)), np.zeros(3)], axis=1) + assert_raises_regex(ValueError, 'must have the same shape', + stack, [np.arange(2), np.arange(3)]) + + # do not accept generators + with pytest.raises(TypeError, match="arrays to stack must be"): + stack(x for x in range(3)) + + # casting and dtype test + a = np.array([1, 2, 3]) + b = np.array([2.5, 3.5, 4.5]) + res = np.stack((a, b), axis=1, casting="unsafe", dtype=np.int64) + expected_res = np.array([[1, 2], [2, 3], [3, 4]]) + assert_array_equal(res, expected_res) + # casting and dtype with TypeError + with assert_raises(TypeError): + stack((a, b), dtype=np.int64, axis=1, casting="safe") + + +def test_unstack(): + a = np.arange(24).reshape((2, 3, 4)) + + for stacks in [np.unstack(a), + np.unstack(a, axis=0), + np.unstack(a, axis=-3)]: + assert isinstance(stacks, tuple) + assert len(stacks) == 2 + assert_array_equal(stacks[0], a[0]) + assert_array_equal(stacks[1], a[1]) + + for stacks in [np.unstack(a, axis=1), + np.unstack(a, axis=-2)]: + assert isinstance(stacks, tuple) + assert len(stacks) == 3 + assert_array_equal(stacks[0], a[:, 0]) + assert_array_equal(stacks[1], a[:, 1]) + assert_array_equal(stacks[2], a[:, 2]) + + for stacks in [np.unstack(a, axis=2), + np.unstack(a, axis=-1)]: + assert isinstance(stacks, tuple) + assert len(stacks) == 4 + assert_array_equal(stacks[0], a[:, :, 0]) + assert_array_equal(stacks[1], a[:, :, 1]) + assert_array_equal(stacks[2], a[:, :, 2]) + assert_array_equal(stacks[3], a[:, :, 3]) + + assert_raises(ValueError, np.unstack, a, axis=3) + assert_raises(ValueError, np.unstack, a, axis=-4) + assert_raises(ValueError, np.unstack, np.array(0), axis=0) + + +@pytest.mark.parametrize("axis", [0]) +@pytest.mark.parametrize("out_dtype", ["c8", "f4", "f8", ">f8", "i8"]) +@pytest.mark.parametrize("casting", + ['no', 'equiv', 'safe', 'same_kind', 'unsafe']) +def test_stack_out_and_dtype(axis, out_dtype, casting): + to_concat = (array([1, 2]), array([3, 4])) + res = array([[1, 2], [3, 4]]) + out = np.zeros_like(res) + + if not np.can_cast(to_concat[0], out_dtype, casting=casting): + with assert_raises(TypeError): + stack(to_concat, dtype=out_dtype, + axis=axis, casting=casting) + else: + res_out = stack(to_concat, out=out, + axis=axis, casting=casting) + res_dtype = stack(to_concat, dtype=out_dtype, + axis=axis, casting=casting) + assert res_out is out + assert_array_equal(out, res_dtype) + assert res_dtype.dtype == out_dtype + + with assert_raises(TypeError): + stack(to_concat, out=out, dtype=out_dtype, axis=axis) + + +class TestBlock: + @pytest.fixture(params=['block', 'force_concatenate', 'force_slicing']) + def block(self, request): + # blocking small arrays and large arrays go through different paths. + # the algorithm is triggered depending on the number of element + # copies required. + # We define a test fixture that forces most tests to go through + # both code paths. + # Ultimately, this should be removed if a single algorithm is found + # to be faster for both small and large arrays. + def _block_force_concatenate(arrays): + arrays, list_ndim, result_ndim, _ = _block_setup(arrays) + return _block_concatenate(arrays, list_ndim, result_ndim) + + def _block_force_slicing(arrays): + arrays, list_ndim, result_ndim, _ = _block_setup(arrays) + return _block_slicing(arrays, list_ndim, result_ndim) + + if request.param == 'force_concatenate': + return _block_force_concatenate + elif request.param == 'force_slicing': + return _block_force_slicing + elif request.param == 'block': + return block + else: + raise ValueError('Unknown blocking request. There is a typo in the tests.') + + def test_returns_copy(self, block): + a = np.eye(3) + b = block(a) + b[0, 0] = 2 + assert b[0, 0] != a[0, 0] + + def test_block_total_size_estimate(self, block): + _, _, _, total_size = _block_setup([1]) + assert total_size == 1 + + _, _, _, total_size = _block_setup([[1]]) + assert total_size == 1 + + _, _, _, total_size = _block_setup([[1, 1]]) + assert total_size == 2 + + _, _, _, total_size = _block_setup([[1], [1]]) + assert total_size == 2 + + _, _, _, total_size = _block_setup([[1, 2], [3, 4]]) + assert total_size == 4 + + def test_block_simple_row_wise(self, block): + a_2d = np.ones((2, 2)) + b_2d = 2 * a_2d + desired = np.array([[1, 1, 2, 2], + [1, 1, 2, 2]]) + result = block([a_2d, b_2d]) + assert_equal(desired, result) + + def test_block_simple_column_wise(self, block): + a_2d = np.ones((2, 2)) + b_2d = 2 * a_2d + expected = np.array([[1, 1], + [1, 1], + [2, 2], + [2, 2]]) + result = block([[a_2d], [b_2d]]) + assert_equal(expected, result) + + def test_block_with_1d_arrays_row_wise(self, block): + # # # 1-D vectors are treated as row arrays + a = np.array([1, 2, 3]) + b = np.array([2, 3, 4]) + expected = np.array([1, 2, 3, 2, 3, 4]) + result = block([a, b]) + assert_equal(expected, result) + + def test_block_with_1d_arrays_multiple_rows(self, block): + a = np.array([1, 2, 3]) + b = np.array([2, 3, 4]) + expected = np.array([[1, 2, 3, 2, 3, 4], + [1, 2, 3, 2, 3, 4]]) + result = block([[a, b], [a, b]]) + assert_equal(expected, result) + + def test_block_with_1d_arrays_column_wise(self, block): + # # # 1-D vectors are treated as row arrays + a_1d = np.array([1, 2, 3]) + b_1d = np.array([2, 3, 4]) + expected = np.array([[1, 2, 3], + [2, 3, 4]]) + result = block([[a_1d], [b_1d]]) + assert_equal(expected, result) + + def test_block_mixed_1d_and_2d(self, block): + a_2d = np.ones((2, 2)) + b_1d = np.array([2, 2]) + result = block([[a_2d], [b_1d]]) + expected = np.array([[1, 1], + [1, 1], + [2, 2]]) + assert_equal(expected, result) + + def test_block_complicated(self, block): + # a bit more complicated + one_2d = np.array([[1, 1, 1]]) + two_2d = np.array([[2, 2, 2]]) + three_2d = np.array([[3, 3, 3, 3, 3, 3]]) + four_1d = np.array([4, 4, 4, 4, 4, 4]) + five_0d = np.array(5) + six_1d = np.array([6, 6, 6, 6, 6]) + zero_2d = np.zeros((2, 6)) + + expected = np.array([[1, 1, 1, 2, 2, 2], + [3, 3, 3, 3, 3, 3], + [4, 4, 4, 4, 4, 4], + [5, 6, 6, 6, 6, 6], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0]]) + + result = block([[one_2d, two_2d], + [three_2d], + [four_1d], + [five_0d, six_1d], + [zero_2d]]) + assert_equal(result, expected) + + def test_nested(self, block): + one = np.array([1, 1, 1]) + two = np.array([[2, 2, 2], [2, 2, 2], [2, 2, 2]]) + three = np.array([3, 3, 3]) + four = np.array([4, 4, 4]) + five = np.array(5) + six = np.array([6, 6, 6, 6, 6]) + zero = np.zeros((2, 6)) + + result = block([ + [ + block([ + [one], + [three], + [four] + ]), + two + ], + [five, six], + [zero] + ]) + expected = np.array([[1, 1, 1, 2, 2, 2], + [3, 3, 3, 2, 2, 2], + [4, 4, 4, 2, 2, 2], + [5, 6, 6, 6, 6, 6], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0]]) + + assert_equal(result, expected) + + def test_3d(self, block): + a000 = np.ones((2, 2, 2), int) * 1 + + a100 = np.ones((3, 2, 2), int) * 2 + a010 = np.ones((2, 3, 2), int) * 3 + a001 = np.ones((2, 2, 3), int) * 4 + + a011 = np.ones((2, 3, 3), int) * 5 + a101 = np.ones((3, 2, 3), int) * 6 + a110 = np.ones((3, 3, 2), int) * 7 + + a111 = np.ones((3, 3, 3), int) * 8 + + result = block([ + [ + [a000, a001], + [a010, a011], + ], + [ + [a100, a101], + [a110, a111], + ] + ]) + expected = array([[[1, 1, 4, 4, 4], + [1, 1, 4, 4, 4], + [3, 3, 5, 5, 5], + [3, 3, 5, 5, 5], + [3, 3, 5, 5, 5]], + + [[1, 1, 4, 4, 4], + [1, 1, 4, 4, 4], + [3, 3, 5, 5, 5], + [3, 3, 5, 5, 5], + [3, 3, 5, 5, 5]], + + [[2, 2, 6, 6, 6], + [2, 2, 6, 6, 6], + [7, 7, 8, 8, 8], + [7, 7, 8, 8, 8], + [7, 7, 8, 8, 8]], + + [[2, 2, 6, 6, 6], + [2, 2, 6, 6, 6], + [7, 7, 8, 8, 8], + [7, 7, 8, 8, 8], + [7, 7, 8, 8, 8]], + + [[2, 2, 6, 6, 6], + [2, 2, 6, 6, 6], + [7, 7, 8, 8, 8], + [7, 7, 8, 8, 8], + [7, 7, 8, 8, 8]]]) + + assert_array_equal(result, expected) + + def test_block_with_mismatched_shape(self, block): + a = np.array([0, 0]) + b = np.eye(2) + assert_raises(ValueError, block, [a, b]) + assert_raises(ValueError, block, [b, a]) + + to_block = [[np.ones((2, 3)), np.ones((2, 2))], + [np.ones((2, 2)), np.ones((2, 2))]] + assert_raises(ValueError, block, to_block) + + def test_no_lists(self, block): + assert_equal(block(1), np.array(1)) + assert_equal(block(np.eye(3)), np.eye(3)) + + def test_invalid_nesting(self, block): + msg = 'depths are mismatched' + assert_raises_regex(ValueError, msg, block, [1, [2]]) + assert_raises_regex(ValueError, msg, block, [1, []]) + assert_raises_regex(ValueError, msg, block, [[1], 2]) + assert_raises_regex(ValueError, msg, block, [[], 2]) + assert_raises_regex(ValueError, msg, block, [ + [[1], [2]], + [[3, 4]], + [5] # missing brackets + ]) + + def test_empty_lists(self, block): + assert_raises_regex(ValueError, 'empty', block, []) + assert_raises_regex(ValueError, 'empty', block, [[]]) + assert_raises_regex(ValueError, 'empty', block, [[1], []]) + + def test_tuple(self, block): + assert_raises_regex(TypeError, 'tuple', block, ([1, 2], [3, 4])) + assert_raises_regex(TypeError, 'tuple', block, [(1, 2), (3, 4)]) + + def test_different_ndims(self, block): + a = 1. + b = 2 * np.ones((1, 2)) + c = 3 * np.ones((1, 1, 3)) + + result = block([a, b, c]) + expected = np.array([[[1., 2., 2., 3., 3., 3.]]]) + + assert_equal(result, expected) + + def test_different_ndims_depths(self, block): + a = 1. + b = 2 * np.ones((1, 2)) + c = 3 * np.ones((1, 2, 3)) + + result = block([[a, b], [c]]) + expected = np.array([[[1., 2., 2.], + [3., 3., 3.], + [3., 3., 3.]]]) + + assert_equal(result, expected) + + def test_block_memory_order(self, block): + # 3D + arr_c = np.zeros((3,) * 3, order='C') + arr_f = np.zeros((3,) * 3, order='F') + + b_c = [[[arr_c, arr_c], + [arr_c, arr_c]], + [[arr_c, arr_c], + [arr_c, arr_c]]] + + b_f = [[[arr_f, arr_f], + [arr_f, arr_f]], + [[arr_f, arr_f], + [arr_f, arr_f]]] + + assert block(b_c).flags['C_CONTIGUOUS'] + assert block(b_f).flags['F_CONTIGUOUS'] + + arr_c = np.zeros((3, 3), order='C') + arr_f = np.zeros((3, 3), order='F') + # 2D + b_c = [[arr_c, arr_c], + [arr_c, arr_c]] + + b_f = [[arr_f, arr_f], + [arr_f, arr_f]] + + assert block(b_c).flags['C_CONTIGUOUS'] + assert block(b_f).flags['F_CONTIGUOUS'] + + +def test_block_dispatcher(): + class ArrayLike: + pass + a = ArrayLike() + b = ArrayLike() + c = ArrayLike() + assert_equal(list(_block_dispatcher(a)), [a]) + assert_equal(list(_block_dispatcher([a])), [a]) + assert_equal(list(_block_dispatcher([a, b])), [a, b]) + assert_equal(list(_block_dispatcher([[a], [b, [c]]])), [a, b, c]) + # don't recurse into non-lists + assert_equal(list(_block_dispatcher((a, b))), [(a, b)]) diff --git a/local_packages/numpy/_core/tests/test_simd.py b/local_packages/numpy/_core/tests/test_simd.py new file mode 100644 index 0000000..335abc9 --- /dev/null +++ b/local_packages/numpy/_core/tests/test_simd.py @@ -0,0 +1,1345 @@ +# NOTE: Please avoid the use of numpy.testing since NPYV intrinsics +# may be involved in their functionality. +import itertools +import math +import operator +import re + +import pytest + +from numpy._core._multiarray_umath import __cpu_baseline__ +from numpy._core._simd import clear_floatstatus, get_floatstatus, targets + + +def check_floatstatus(divbyzero=False, overflow=False, + underflow=False, invalid=False, + all=False): + #define NPY_FPE_DIVIDEBYZERO 1 + #define NPY_FPE_OVERFLOW 2 + #define NPY_FPE_UNDERFLOW 4 + #define NPY_FPE_INVALID 8 + err = get_floatstatus() + ret = (all or divbyzero) and (err & 1) != 0 + ret |= (all or overflow) and (err & 2) != 0 + ret |= (all or underflow) and (err & 4) != 0 + ret |= (all or invalid) and (err & 8) != 0 + return ret + +class _Test_Utility: + # submodule of the desired SIMD extension, e.g. targets["AVX512F"] + npyv = None + # the current data type suffix e.g. 's8' + sfx = None + # target name can be 'baseline' or one or more of CPU features + target_name = None + + def __getattr__(self, attr): + """ + To call NPV intrinsics without the attribute 'npyv' and + auto suffixing intrinsics according to class attribute 'sfx' + """ + return getattr(self.npyv, attr + "_" + self.sfx) + + def _x2(self, intrin_name): + return getattr(self.npyv, f"{intrin_name}_{self.sfx}x2") + + def _data(self, start=None, count=None, reverse=False): + """ + Create list of consecutive numbers according to number of vector's lanes. + """ + if start is None: + start = 1 + if count is None: + count = self.nlanes + rng = range(start, start + count) + if reverse: + rng = reversed(rng) + if self._is_fp(): + return [x / 1.0 for x in rng] + return list(rng) + + def _is_unsigned(self): + return self.sfx[0] == 'u' + + def _is_signed(self): + return self.sfx[0] == 's' + + def _is_fp(self): + return self.sfx[0] == 'f' + + def _scalar_size(self): + return int(self.sfx[1:]) + + def _int_clip(self, seq): + if self._is_fp(): + return seq + max_int = self._int_max() + min_int = self._int_min() + return [min(max(v, min_int), max_int) for v in seq] + + def _int_max(self): + if self._is_fp(): + return None + max_u = self._to_unsigned(self.setall(-1))[0] + if self._is_signed(): + return max_u // 2 + return max_u + + def _int_min(self): + if self._is_fp(): + return None + if self._is_unsigned(): + return 0 + return -(self._int_max() + 1) + + def _true_mask(self): + max_unsig = getattr(self.npyv, "setall_u" + self.sfx[1:])(-1) + return max_unsig[0] + + def _to_unsigned(self, vector): + if isinstance(vector, (list, tuple)): + return getattr(self.npyv, "load_u" + self.sfx[1:])(vector) + else: + sfx = vector.__name__.replace("npyv_", "") + if sfx[0] == "b": + cvt_intrin = "cvt_u{0}_b{0}" + else: + cvt_intrin = "reinterpret_u{0}_{1}" + return getattr(self.npyv, cvt_intrin.format(sfx[1:], sfx))(vector) + + def _pinfinity(self): + return float("inf") + + def _ninfinity(self): + return -float("inf") + + def _nan(self): + return float("nan") + + def _cpu_features(self): + target = self.target_name + if target == "baseline": + target = __cpu_baseline__ + else: + target = target.split('__') # multi-target separator + return ' '.join(target) + +class _SIMD_BOOL(_Test_Utility): + """ + To test all boolean vector types at once + """ + def _nlanes(self): + return getattr(self.npyv, "nlanes_u" + self.sfx[1:]) + + def _data(self, start=None, count=None, reverse=False): + true_mask = self._true_mask() + rng = range(self._nlanes()) + if reverse: + rng = reversed(rng) + return [true_mask if x % 2 else 0 for x in rng] + + def _load_b(self, data): + len_str = self.sfx[1:] + load = getattr(self.npyv, "load_u" + len_str) + cvt = getattr(self.npyv, f"cvt_b{len_str}_u{len_str}") + return cvt(load(data)) + + def test_operators_logical(self): + """ + Logical operations for boolean types. + Test intrinsics: + npyv_xor_##SFX, npyv_and_##SFX, npyv_or_##SFX, npyv_not_##SFX, + npyv_andc_b8, npvy_orc_b8, nvpy_xnor_b8 + """ + data_a = self._data() + data_b = self._data(reverse=True) + vdata_a = self._load_b(data_a) + vdata_b = self._load_b(data_b) + + data_and = [a & b for a, b in zip(data_a, data_b)] + vand = getattr(self, "and")(vdata_a, vdata_b) + assert vand == data_and + + data_or = [a | b for a, b in zip(data_a, data_b)] + vor = getattr(self, "or")(vdata_a, vdata_b) + assert vor == data_or + + data_xor = [a ^ b for a, b in zip(data_a, data_b)] + vxor = self.xor(vdata_a, vdata_b) + assert vxor == data_xor + + vnot = getattr(self, "not")(vdata_a) + assert vnot == data_b + + # among the boolean types, andc, orc and xnor only support b8 + if self.sfx not in ("b8"): + return + + data_andc = [(a & ~b) & 0xFF for a, b in zip(data_a, data_b)] + vandc = self.andc(vdata_a, vdata_b) + assert data_andc == vandc + + data_orc = [(a | ~b) & 0xFF for a, b in zip(data_a, data_b)] + vorc = self.orc(vdata_a, vdata_b) + assert data_orc == vorc + + data_xnor = [~(a ^ b) & 0xFF for a, b in zip(data_a, data_b)] + vxnor = self.xnor(vdata_a, vdata_b) + assert data_xnor == vxnor + + def test_tobits(self): + data2bits = lambda data: sum(int(x != 0) << i for i, x in enumerate(data, 0)) + for data in (self._data(), self._data(reverse=True)): + vdata = self._load_b(data) + data_bits = data2bits(data) + tobits = self.tobits(vdata) + bin_tobits = bin(tobits) + assert bin_tobits == bin(data_bits) + + def test_pack(self): + """ + Pack multiple vectors into one + Test intrinsics: + npyv_pack_b8_b16 + npyv_pack_b8_b32 + npyv_pack_b8_b64 + """ + if self.sfx not in ("b16", "b32", "b64"): + return + # create the vectors + data = self._data() + rdata = self._data(reverse=True) + vdata = self._load_b(data) + vrdata = self._load_b(rdata) + pack_simd = getattr(self.npyv, f"pack_b8_{self.sfx}") + # for scalar execution, concatenate the elements of the multiple lists + # into a single list (spack) and then iterate over the elements of + # the created list applying a mask to capture the first byte of them. + if self.sfx == "b16": + spack = [(i & 0xFF) for i in (list(rdata) + list(data))] + vpack = pack_simd(vrdata, vdata) + elif self.sfx == "b32": + spack = [(i & 0xFF) for i in (2 * list(rdata) + 2 * list(data))] + vpack = pack_simd(vrdata, vrdata, vdata, vdata) + elif self.sfx == "b64": + spack = [(i & 0xFF) for i in (4 * list(rdata) + 4 * list(data))] + vpack = pack_simd(vrdata, vrdata, vrdata, vrdata, + vdata, vdata, vdata, vdata) + assert vpack == spack + + @pytest.mark.parametrize("intrin", ["any", "all"]) + @pytest.mark.parametrize("data", ( + [-1, 0], + [0, -1], + [-1], + [0] + )) + def test_operators_crosstest(self, intrin, data): + """ + Test intrinsics: + npyv_any_##SFX + npyv_all_##SFX + """ + data_a = self._load_b(data * self._nlanes()) + func = eval(intrin) + intrin = getattr(self, intrin) + desired = func(data_a) + simd = intrin(data_a) + assert not not simd == desired + +class _SIMD_INT(_Test_Utility): + """ + To test all integer vector types at once + """ + def test_operators_shift(self): + if self.sfx in ("u8", "s8"): + return + + data_a = self._data(self._int_max() - self.nlanes) + data_b = self._data(self._int_min(), reverse=True) + vdata_a, vdata_b = self.load(data_a), self.load(data_b) + + for count in range(self._scalar_size()): + # load to cast + data_shl_a = self.load([a << count for a in data_a]) + # left shift + shl = self.shl(vdata_a, count) + assert shl == data_shl_a + # load to cast + data_shr_a = self.load([a >> count for a in data_a]) + # right shift + shr = self.shr(vdata_a, count) + assert shr == data_shr_a + + # shift by zero or max or out-range immediate constant is not + # applicable and illogical + for count in range(1, self._scalar_size()): + # load to cast + data_shl_a = self.load([a << count for a in data_a]) + # left shift by an immediate constant + shli = self.shli(vdata_a, count) + assert shli == data_shl_a + # load to cast + data_shr_a = self.load([a >> count for a in data_a]) + # right shift by an immediate constant + shri = self.shri(vdata_a, count) + assert shri == data_shr_a + + def test_arithmetic_subadd_saturated(self): + if self.sfx in ("u32", "s32", "u64", "s64"): + return + + data_a = self._data(self._int_max() - self.nlanes) + data_b = self._data(self._int_min(), reverse=True) + vdata_a, vdata_b = self.load(data_a), self.load(data_b) + + data_adds = self._int_clip([a + b for a, b in zip(data_a, data_b)]) + adds = self.adds(vdata_a, vdata_b) + assert adds == data_adds + + data_subs = self._int_clip([a - b for a, b in zip(data_a, data_b)]) + subs = self.subs(vdata_a, vdata_b) + assert subs == data_subs + + def test_math_max_min(self): + data_a = self._data() + data_b = self._data(self.nlanes) + vdata_a, vdata_b = self.load(data_a), self.load(data_b) + + data_max = [max(a, b) for a, b in zip(data_a, data_b)] + simd_max = self.max(vdata_a, vdata_b) + assert simd_max == data_max + + data_min = [min(a, b) for a, b in zip(data_a, data_b)] + simd_min = self.min(vdata_a, vdata_b) + assert simd_min == data_min + + @pytest.mark.parametrize("start", [-100, -10000, 0, 100, 10000]) + def test_reduce_max_min(self, start): + """ + Test intrinsics: + npyv_reduce_max_##sfx + npyv_reduce_min_##sfx + """ + vdata_a = self.load(self._data(start)) + assert self.reduce_max(vdata_a) == max(vdata_a) + assert self.reduce_min(vdata_a) == min(vdata_a) + + +class _SIMD_FP32(_Test_Utility): + """ + To only test single precision + """ + def test_conversions(self): + """ + Round to nearest even integer, assume CPU control register is set to rounding. + Test intrinsics: + npyv_round_s32_##SFX + """ + features = self._cpu_features() + if not self.npyv.simd_f64 and re.match(r".*(NEON|ASIMD)", features): + # very costly to emulate nearest even on Armv7 + # instead we round halves to up. e.g. 0.5 -> 1, -0.5 -> -1 + _round = lambda v: int(v + (0.5 if v >= 0 else -0.5)) + else: + _round = round + vdata_a = self.load(self._data()) + vdata_a = self.sub(vdata_a, self.setall(0.5)) + data_round = [_round(x) for x in vdata_a] + vround = self.round_s32(vdata_a) + assert vround == data_round + +class _SIMD_FP64(_Test_Utility): + """ + To only test double precision + """ + def test_conversions(self): + """ + Round to nearest even integer, assume CPU control register is set to rounding. + Test intrinsics: + npyv_round_s32_##SFX + """ + vdata_a = self.load(self._data()) + vdata_a = self.sub(vdata_a, self.setall(0.5)) + vdata_b = self.mul(vdata_a, self.setall(-1.5)) + data_round = [round(x) for x in list(vdata_a) + list(vdata_b)] + vround = self.round_s32(vdata_a, vdata_b) + assert vround == data_round + +class _SIMD_FP(_Test_Utility): + """ + To test all float vector types at once + """ + def test_arithmetic_fused(self): + vdata_a, vdata_b, vdata_c = [self.load(self._data())] * 3 + vdata_cx2 = self.add(vdata_c, vdata_c) + # multiply and add, a*b + c + data_fma = self.load([a * b + c for a, b, c in zip(vdata_a, vdata_b, vdata_c)]) + fma = self.muladd(vdata_a, vdata_b, vdata_c) + assert fma == data_fma + # multiply and subtract, a*b - c + fms = self.mulsub(vdata_a, vdata_b, vdata_c) + data_fms = self.sub(data_fma, vdata_cx2) + assert fms == data_fms + # negate multiply and add, -(a*b) + c + nfma = self.nmuladd(vdata_a, vdata_b, vdata_c) + data_nfma = self.sub(vdata_cx2, data_fma) + assert nfma == data_nfma + # negate multiply and subtract, -(a*b) - c + nfms = self.nmulsub(vdata_a, vdata_b, vdata_c) + data_nfms = self.mul(data_fma, self.setall(-1)) + assert nfms == data_nfms + # multiply, add for odd elements and subtract even elements. + # (a * b) -+ c + fmas = list(self.muladdsub(vdata_a, vdata_b, vdata_c)) + assert fmas[0::2] == list(data_fms)[0::2] + assert fmas[1::2] == list(data_fma)[1::2] + + def test_abs(self): + pinf, ninf, nan = self._pinfinity(), self._ninfinity(), self._nan() + data = self._data() + vdata = self.load(self._data()) + + abs_cases = ((-0, 0), (ninf, pinf), (pinf, pinf), (nan, nan)) + for case, desired in abs_cases: + data_abs = [desired] * self.nlanes + vabs = self.abs(self.setall(case)) + assert vabs == pytest.approx(data_abs, nan_ok=True) + + vabs = self.abs(self.mul(vdata, self.setall(-1))) + assert vabs == data + + def test_sqrt(self): + pinf, ninf, nan = self._pinfinity(), self._ninfinity(), self._nan() + data = self._data() + vdata = self.load(self._data()) + + sqrt_cases = ((-0.0, -0.0), (0.0, 0.0), (-1.0, nan), (ninf, nan), (pinf, pinf)) + for case, desired in sqrt_cases: + data_sqrt = [desired] * self.nlanes + sqrt = self.sqrt(self.setall(case)) + assert sqrt == pytest.approx(data_sqrt, nan_ok=True) + + # load to truncate precision + data_sqrt = self.load([math.sqrt(x) for x in data]) + sqrt = self.sqrt(vdata) + assert sqrt == data_sqrt + + def test_square(self): + pinf, ninf, nan = self._pinfinity(), self._ninfinity(), self._nan() + data = self._data() + vdata = self.load(self._data()) + # square + square_cases = ((nan, nan), (pinf, pinf), (ninf, pinf)) + for case, desired in square_cases: + data_square = [desired] * self.nlanes + square = self.square(self.setall(case)) + assert square == pytest.approx(data_square, nan_ok=True) + + data_square = [x * x for x in data] + square = self.square(vdata) + assert square == data_square + + @pytest.mark.parametrize("intrin, func", [("ceil", math.ceil), + ("trunc", math.trunc), ("floor", math.floor), ("rint", round)]) + def test_rounding(self, intrin, func): + """ + Test intrinsics: + npyv_rint_##SFX + npyv_ceil_##SFX + npyv_trunc_##SFX + npyv_floor##SFX + """ + intrin_name = intrin + intrin = getattr(self, intrin) + pinf, ninf, nan = self._pinfinity(), self._ninfinity(), self._nan() + # special cases + round_cases = ((nan, nan), (pinf, pinf), (ninf, ninf)) + for case, desired in round_cases: + data_round = [desired] * self.nlanes + _round = intrin(self.setall(case)) + assert _round == pytest.approx(data_round, nan_ok=True) + + for x in range(0, 2**20, 256**2): + for w in (-1.05, -1.10, -1.15, 1.05, 1.10, 1.15): + data = self.load([(x + a) * w for a in range(self.nlanes)]) + data_round = [func(x) for x in data] + _round = intrin(data) + assert _round == data_round + + # test large numbers + for i in ( + 1.1529215045988576e+18, 4.6116860183954304e+18, + 5.902958103546122e+20, 2.3611832414184488e+21 + ): + x = self.setall(i) + y = intrin(x) + data_round = [func(n) for n in x] + assert y == data_round + + # signed zero + if intrin_name == "floor": + data_szero = (-0.0,) + else: + data_szero = (-0.0, -0.25, -0.30, -0.45, -0.5) + + for w in data_szero: + _round = self._to_unsigned(intrin(self.setall(w))) + data_round = self._to_unsigned(self.setall(-0.0)) + assert _round == data_round + + @pytest.mark.parametrize("intrin", [ + "max", "maxp", "maxn", "min", "minp", "minn" + ]) + def test_max_min(self, intrin): + """ + Test intrinsics: + npyv_max_##sfx + npyv_maxp_##sfx + npyv_maxn_##sfx + npyv_min_##sfx + npyv_minp_##sfx + npyv_minn_##sfx + npyv_reduce_max_##sfx + npyv_reduce_maxp_##sfx + npyv_reduce_maxn_##sfx + npyv_reduce_min_##sfx + npyv_reduce_minp_##sfx + npyv_reduce_minn_##sfx + """ + pinf, ninf, nan = self._pinfinity(), self._ninfinity(), self._nan() + chk_nan = {"xp": 1, "np": 1, "nn": 2, "xn": 2}.get(intrin[-2:], 0) + func = eval(intrin[:3]) + reduce_intrin = getattr(self, "reduce_" + intrin) + intrin = getattr(self, intrin) + hf_nlanes = self.nlanes // 2 + + cases = ( + ([0.0, -0.0], [-0.0, 0.0]), + ([10, -10], [10, -10]), + ([pinf, 10], [10, ninf]), + ([10, pinf], [ninf, 10]), + ([10, -10], [10, -10]), + ([-10, 10], [-10, 10]) + ) + for op1, op2 in cases: + vdata_a = self.load(op1 * hf_nlanes) + vdata_b = self.load(op2 * hf_nlanes) + data = func(vdata_a, vdata_b) + simd = intrin(vdata_a, vdata_b) + assert simd == data + data = func(vdata_a) + simd = reduce_intrin(vdata_a) + assert simd == data + + if not chk_nan: + return + if chk_nan == 1: + test_nan = lambda a, b: ( + b if math.isnan(a) else a if math.isnan(b) else b + ) + else: + test_nan = lambda a, b: ( + nan if math.isnan(a) or math.isnan(b) else b + ) + cases = ( + (nan, 10), + (10, nan), + (nan, pinf), + (pinf, nan), + (nan, nan) + ) + for op1, op2 in cases: + vdata_ab = self.load([op1, op2] * hf_nlanes) + data = test_nan(op1, op2) + simd = reduce_intrin(vdata_ab) + assert simd == pytest.approx(data, nan_ok=True) + vdata_a = self.setall(op1) + vdata_b = self.setall(op2) + data = [data] * self.nlanes + simd = intrin(vdata_a, vdata_b) + assert simd == pytest.approx(data, nan_ok=True) + + def test_reciprocal(self): + pinf, ninf, nan = self._pinfinity(), self._ninfinity(), self._nan() + data = self._data() + vdata = self.load(self._data()) + + recip_cases = ((nan, nan), (pinf, 0.0), (ninf, -0.0), (0.0, pinf), (-0.0, ninf)) + for case, desired in recip_cases: + data_recip = [desired] * self.nlanes + recip = self.recip(self.setall(case)) + assert recip == pytest.approx(data_recip, nan_ok=True) + + data_recip = self.load([1 / x for x in data]) # load to truncate precision + recip = self.recip(vdata) + assert recip == data_recip + + def test_special_cases(self): + """ + Compare Not NaN. Test intrinsics: + npyv_notnan_##SFX + """ + nnan = self.notnan(self.setall(self._nan())) + assert nnan == [0] * self.nlanes + + @pytest.mark.parametrize("intrin_name", [ + "rint", "trunc", "ceil", "floor" + ]) + def test_unary_invalid_fpexception(self, intrin_name): + intrin = getattr(self, intrin_name) + for d in [float("nan"), float("inf"), -float("inf")]: + v = self.setall(d) + clear_floatstatus() + intrin(v) + assert check_floatstatus(invalid=True) is False + + @pytest.mark.parametrize('py_comp,np_comp', [ + (operator.lt, "cmplt"), + (operator.le, "cmple"), + (operator.gt, "cmpgt"), + (operator.ge, "cmpge"), + (operator.eq, "cmpeq"), + (operator.ne, "cmpneq") + ]) + def test_comparison_with_nan(self, py_comp, np_comp): + pinf, ninf, nan = self._pinfinity(), self._ninfinity(), self._nan() + mask_true = self._true_mask() + + def to_bool(vector): + return [lane == mask_true for lane in vector] + + intrin = getattr(self, np_comp) + cmp_cases = ((0, nan), (nan, 0), (nan, nan), (pinf, nan), + (ninf, nan), (-0.0, +0.0)) + for case_operand1, case_operand2 in cmp_cases: + data_a = [case_operand1] * self.nlanes + data_b = [case_operand2] * self.nlanes + vdata_a = self.setall(case_operand1) + vdata_b = self.setall(case_operand2) + vcmp = to_bool(intrin(vdata_a, vdata_b)) + data_cmp = [py_comp(a, b) for a, b in zip(data_a, data_b)] + assert vcmp == data_cmp + + @pytest.mark.parametrize("intrin", ["any", "all"]) + @pytest.mark.parametrize("data", ( + [float("nan"), 0], + [0, float("nan")], + [float("nan"), 1], + [1, float("nan")], + [float("nan"), float("nan")], + [0.0, -0.0], + [-0.0, 0.0], + [1.0, -0.0] + )) + def test_operators_crosstest(self, intrin, data): + """ + Test intrinsics: + npyv_any_##SFX + npyv_all_##SFX + """ + data_a = self.load(data * self.nlanes) + func = eval(intrin) + intrin = getattr(self, intrin) + desired = func(data_a) + simd = intrin(data_a) + assert not not simd == desired + +class _SIMD_ALL(_Test_Utility): + """ + To test all vector types at once + """ + def test_memory_load(self): + data = self._data() + # unaligned load + load_data = self.load(data) + assert load_data == data + # aligned load + loada_data = self.loada(data) + assert loada_data == data + # stream load + loads_data = self.loads(data) + assert loads_data == data + # load lower part + loadl = self.loadl(data) + loadl_half = list(loadl)[:self.nlanes // 2] + data_half = data[:self.nlanes // 2] + assert loadl_half == data_half + assert loadl != data # detect overflow + + def test_memory_store(self): + data = self._data() + vdata = self.load(data) + # unaligned store + store = [0] * self.nlanes + self.store(store, vdata) + assert store == data + # aligned store + store_a = [0] * self.nlanes + self.storea(store_a, vdata) + assert store_a == data + # stream store + store_s = [0] * self.nlanes + self.stores(store_s, vdata) + assert store_s == data + # store lower part + store_l = [0] * self.nlanes + self.storel(store_l, vdata) + assert store_l[:self.nlanes // 2] == data[:self.nlanes // 2] + assert store_l != vdata # detect overflow + # store higher part + store_h = [0] * self.nlanes + self.storeh(store_h, vdata) + assert store_h[:self.nlanes // 2] == data[self.nlanes // 2:] + assert store_h != vdata # detect overflow + + @pytest.mark.parametrize("intrin, elsizes, scale, fill", [ + ("self.load_tillz, self.load_till", (32, 64), 1, [0xffff]), + ("self.load2_tillz, self.load2_till", (32, 64), 2, [0xffff, 0x7fff]), + ]) + def test_memory_partial_load(self, intrin, elsizes, scale, fill): + if self._scalar_size() not in elsizes: + return + npyv_load_tillz, npyv_load_till = eval(intrin) + data = self._data() + lanes = list(range(1, self.nlanes + 1)) + lanes += [self.nlanes**2, self.nlanes**4] # test out of range + for n in lanes: + load_till = npyv_load_till(data, n, *fill) + load_tillz = npyv_load_tillz(data, n) + n *= scale + data_till = data[:n] + fill * ((self.nlanes - n) // scale) + assert load_till == data_till + data_tillz = data[:n] + [0] * (self.nlanes - n) + assert load_tillz == data_tillz + + @pytest.mark.parametrize("intrin, elsizes, scale", [ + ("self.store_till", (32, 64), 1), + ("self.store2_till", (32, 64), 2), + ]) + def test_memory_partial_store(self, intrin, elsizes, scale): + if self._scalar_size() not in elsizes: + return + npyv_store_till = eval(intrin) + data = self._data() + data_rev = self._data(reverse=True) + vdata = self.load(data) + lanes = list(range(1, self.nlanes + 1)) + lanes += [self.nlanes**2, self.nlanes**4] + for n in lanes: + data_till = data_rev.copy() + data_till[:n * scale] = data[:n * scale] + store_till = self._data(reverse=True) + npyv_store_till(store_till, n, vdata) + assert store_till == data_till + + @pytest.mark.parametrize("intrin, elsizes, scale", [ + ("self.loadn", (32, 64), 1), + ("self.loadn2", (32, 64), 2), + ]) + def test_memory_noncont_load(self, intrin, elsizes, scale): + if self._scalar_size() not in elsizes: + return + npyv_loadn = eval(intrin) + for stride in range(-64, 64): + if stride < 0: + data = self._data(stride, -stride * self.nlanes) + data_stride = list(itertools.chain( + *zip(*[data[-i::stride] for i in range(scale, 0, -1)]) + )) + elif stride == 0: + data = self._data() + data_stride = data[0:scale] * (self.nlanes // scale) + else: + data = self._data(count=stride * self.nlanes) + data_stride = list(itertools.chain( + *zip(*[data[i::stride] for i in range(scale)])) + ) + data_stride = self.load(data_stride) # cast unsigned + loadn = npyv_loadn(data, stride) + assert loadn == data_stride + + @pytest.mark.parametrize("intrin, elsizes, scale, fill", [ + ("self.loadn_tillz, self.loadn_till", (32, 64), 1, [0xffff]), + ("self.loadn2_tillz, self.loadn2_till", (32, 64), 2, [0xffff, 0x7fff]), + ]) + def test_memory_noncont_partial_load(self, intrin, elsizes, scale, fill): + if self._scalar_size() not in elsizes: + return + npyv_loadn_tillz, npyv_loadn_till = eval(intrin) + lanes = list(range(1, self.nlanes + 1)) + lanes += [self.nlanes**2, self.nlanes**4] + for stride in range(-64, 64): + if stride < 0: + data = self._data(stride, -stride * self.nlanes) + data_stride = list(itertools.chain( + *zip(*[data[-i::stride] for i in range(scale, 0, -1)]) + )) + elif stride == 0: + data = self._data() + data_stride = data[0:scale] * (self.nlanes // scale) + else: + data = self._data(count=stride * self.nlanes) + data_stride = list(itertools.chain( + *zip(*[data[i::stride] for i in range(scale)]) + )) + data_stride = list(self.load(data_stride)) # cast unsigned + for n in lanes: + nscale = n * scale + llanes = self.nlanes - nscale + data_stride_till = ( + data_stride[:nscale] + fill * (llanes // scale) + ) + loadn_till = npyv_loadn_till(data, stride, n, *fill) + assert loadn_till == data_stride_till + data_stride_tillz = data_stride[:nscale] + [0] * llanes + loadn_tillz = npyv_loadn_tillz(data, stride, n) + assert loadn_tillz == data_stride_tillz + + @pytest.mark.parametrize("intrin, elsizes, scale", [ + ("self.storen", (32, 64), 1), + ("self.storen2", (32, 64), 2), + ]) + def test_memory_noncont_store(self, intrin, elsizes, scale): + if self._scalar_size() not in elsizes: + return + npyv_storen = eval(intrin) + data = self._data() + vdata = self.load(data) + hlanes = self.nlanes // scale + for stride in range(1, 64): + data_storen = [0xff] * stride * self.nlanes + for s in range(0, hlanes * stride, stride): + i = (s // stride) * scale + data_storen[s:s + scale] = data[i:i + scale] + storen = [0xff] * stride * self.nlanes + storen += [0x7f] * 64 + npyv_storen(storen, stride, vdata) + assert storen[:-64] == data_storen + assert storen[-64:] == [0x7f] * 64 # detect overflow + + for stride in range(-64, 0): + data_storen = [0xff] * -stride * self.nlanes + for s in range(0, hlanes * stride, stride): + i = (s // stride) * scale + data_storen[s - scale:s or None] = data[i:i + scale] + storen = [0x7f] * 64 + storen += [0xff] * -stride * self.nlanes + npyv_storen(storen, stride, vdata) + assert storen[64:] == data_storen + assert storen[:64] == [0x7f] * 64 # detect overflow + # stride 0 + data_storen = [0x7f] * self.nlanes + storen = data_storen.copy() + data_storen[0:scale] = data[-scale:] + npyv_storen(storen, 0, vdata) + assert storen == data_storen + + @pytest.mark.parametrize("intrin, elsizes, scale", [ + ("self.storen_till", (32, 64), 1), + ("self.storen2_till", (32, 64), 2), + ]) + def test_memory_noncont_partial_store(self, intrin, elsizes, scale): + if self._scalar_size() not in elsizes: + return + npyv_storen_till = eval(intrin) + data = self._data() + vdata = self.load(data) + lanes = list(range(1, self.nlanes + 1)) + lanes += [self.nlanes**2, self.nlanes**4] + hlanes = self.nlanes // scale + for stride in range(1, 64): + for n in lanes: + data_till = [0xff] * stride * self.nlanes + tdata = data[:n * scale] + [0xff] * (self.nlanes - n * scale) + for s in range(0, hlanes * stride, stride)[:n]: + i = (s // stride) * scale + data_till[s:s + scale] = tdata[i:i + scale] + storen_till = [0xff] * stride * self.nlanes + storen_till += [0x7f] * 64 + npyv_storen_till(storen_till, stride, n, vdata) + assert storen_till[:-64] == data_till + assert storen_till[-64:] == [0x7f] * 64 # detect overflow + + for stride in range(-64, 0): + for n in lanes: + data_till = [0xff] * -stride * self.nlanes + tdata = data[:n * scale] + [0xff] * (self.nlanes - n * scale) + for s in range(0, hlanes * stride, stride)[:n]: + i = (s // stride) * scale + data_till[s - scale:s or None] = tdata[i:i + scale] + storen_till = [0x7f] * 64 + storen_till += [0xff] * -stride * self.nlanes + npyv_storen_till(storen_till, stride, n, vdata) + assert storen_till[64:] == data_till + assert storen_till[:64] == [0x7f] * 64 # detect overflow + + # stride 0 + for n in lanes: + data_till = [0x7f] * self.nlanes + storen_till = data_till.copy() + data_till[0:scale] = data[:n * scale][-scale:] + npyv_storen_till(storen_till, 0, n, vdata) + assert storen_till == data_till + + @pytest.mark.parametrize("intrin, table_size, elsize", [ + ("self.lut32", 32, 32), + ("self.lut16", 16, 64) + ]) + def test_lut(self, intrin, table_size, elsize): + """ + Test lookup table intrinsics: + npyv_lut32_##sfx + npyv_lut16_##sfx + """ + if elsize != self._scalar_size(): + return + intrin = eval(intrin) + idx_itrin = getattr(self.npyv, f"setall_u{elsize}") + table = range(table_size) + for i in table: + broadi = self.setall(i) + idx = idx_itrin(i) + lut = intrin(table, idx) + assert lut == broadi + + def test_misc(self): + broadcast_zero = self.zero() + assert broadcast_zero == [0] * self.nlanes + for i in range(1, 10): + broadcasti = self.setall(i) + assert broadcasti == [i] * self.nlanes + + data_a, data_b = self._data(), self._data(reverse=True) + vdata_a, vdata_b = self.load(data_a), self.load(data_b) + + # py level of npyv_set_* don't support ignoring the extra specified lanes or + # fill non-specified lanes with zero. + vset = self.set(*data_a) + assert vset == data_a + # py level of npyv_setf_* don't support ignoring the extra specified lanes or + # fill non-specified lanes with the specified scalar. + vsetf = self.setf(10, *data_a) + assert vsetf == data_a + + # We're testing the sanity of _simd's type-vector, + # reinterpret* intrinsics itself are tested via compiler + # during the build of _simd module + sfxes = ["u8", "s8", "u16", "s16", "u32", "s32", "u64", "s64"] + if self.npyv.simd_f64: + sfxes.append("f64") + if self.npyv.simd_f32: + sfxes.append("f32") + for sfx in sfxes: + vec_name = getattr(self, "reinterpret_" + sfx)(vdata_a).__name__ + assert vec_name == "npyv_" + sfx + + # select & mask operations + select_a = self.select(self.cmpeq(self.zero(), self.zero()), vdata_a, vdata_b) + assert select_a == data_a + select_b = self.select(self.cmpneq(self.zero(), self.zero()), vdata_a, vdata_b) + assert select_b == data_b + + # test extract elements + assert self.extract0(vdata_b) == vdata_b[0] + + # cleanup intrinsic is only used with AVX for + # zeroing registers to avoid the AVX-SSE transition penalty, + # so nothing to test here + self.npyv.cleanup() + + def test_reorder(self): + data_a, data_b = self._data(), self._data(reverse=True) + vdata_a, vdata_b = self.load(data_a), self.load(data_b) + # lower half part + data_a_lo = data_a[:self.nlanes // 2] + data_b_lo = data_b[:self.nlanes // 2] + # higher half part + data_a_hi = data_a[self.nlanes // 2:] + data_b_hi = data_b[self.nlanes // 2:] + # combine two lower parts + combinel = self.combinel(vdata_a, vdata_b) + assert combinel == data_a_lo + data_b_lo + # combine two higher parts + combineh = self.combineh(vdata_a, vdata_b) + assert combineh == data_a_hi + data_b_hi + # combine x2 + combine = self.combine(vdata_a, vdata_b) + assert combine == (data_a_lo + data_b_lo, data_a_hi + data_b_hi) + + # zip(interleave) + data_zipl = self.load([ + v for p in zip(data_a_lo, data_b_lo) for v in p + ]) + data_ziph = self.load([ + v for p in zip(data_a_hi, data_b_hi) for v in p + ]) + vzip = self.zip(vdata_a, vdata_b) + assert vzip == (data_zipl, data_ziph) + vzip = [0] * self.nlanes * 2 + self._x2("store")(vzip, (vdata_a, vdata_b)) + assert vzip == list(data_zipl) + list(data_ziph) + + # unzip(deinterleave) + unzip = self.unzip(data_zipl, data_ziph) + assert unzip == (data_a, data_b) + unzip = self._x2("load")(list(data_zipl) + list(data_ziph)) + assert unzip == (data_a, data_b) + + def test_reorder_rev64(self): + # Reverse elements of each 64-bit lane + ssize = self._scalar_size() + if ssize == 64: + return + data_rev64 = [ + y for x in range(0, self.nlanes, 64 // ssize) + for y in reversed(range(x, x + 64 // ssize)) + ] + rev64 = self.rev64(self.load(range(self.nlanes))) + assert rev64 == data_rev64 + + def test_reorder_permi128(self): + """ + Test permuting elements for each 128-bit lane. + npyv_permi128_##sfx + """ + ssize = self._scalar_size() + if ssize < 32: + return + data = self.load(self._data()) + permn = 128 // ssize + permd = permn - 1 + nlane128 = self.nlanes // permn + shfl = [0, 1] if ssize == 64 else [0, 2, 4, 6] + for i in range(permn): + indices = [(i >> shf) & permd for shf in shfl] + vperm = self.permi128(data, *indices) + data_vperm = [ + data[j + (e & -permn)] + for e, j in enumerate(indices * nlane128) + ] + assert vperm == data_vperm + + @pytest.mark.parametrize('func, intrin', [ + (operator.lt, "cmplt"), + (operator.le, "cmple"), + (operator.gt, "cmpgt"), + (operator.ge, "cmpge"), + (operator.eq, "cmpeq") + ]) + def test_operators_comparison(self, func, intrin): + if self._is_fp(): + data_a = self._data() + else: + data_a = self._data(self._int_max() - self.nlanes) + data_b = self._data(self._int_min(), reverse=True) + vdata_a, vdata_b = self.load(data_a), self.load(data_b) + intrin = getattr(self, intrin) + + mask_true = self._true_mask() + + def to_bool(vector): + return [lane == mask_true for lane in vector] + + data_cmp = [func(a, b) for a, b in zip(data_a, data_b)] + cmp = to_bool(intrin(vdata_a, vdata_b)) + assert cmp == data_cmp + + def test_operators_logical(self): + if self._is_fp(): + data_a = self._data() + else: + data_a = self._data(self._int_max() - self.nlanes) + data_b = self._data(self._int_min(), reverse=True) + vdata_a, vdata_b = self.load(data_a), self.load(data_b) + + if self._is_fp(): + data_cast_a = self._to_unsigned(vdata_a) + data_cast_b = self._to_unsigned(vdata_b) + cast, cast_data = self._to_unsigned, self._to_unsigned + else: + data_cast_a, data_cast_b = data_a, data_b + cast, cast_data = lambda a: a, self.load + + data_xor = cast_data([a ^ b for a, b in zip(data_cast_a, data_cast_b)]) + vxor = cast(self.xor(vdata_a, vdata_b)) + assert vxor == data_xor + + data_or = cast_data([a | b for a, b in zip(data_cast_a, data_cast_b)]) + vor = cast(getattr(self, "or")(vdata_a, vdata_b)) + assert vor == data_or + + data_and = cast_data([a & b for a, b in zip(data_cast_a, data_cast_b)]) + vand = cast(getattr(self, "and")(vdata_a, vdata_b)) + assert vand == data_and + + data_not = cast_data([~a for a in data_cast_a]) + vnot = cast(getattr(self, "not")(vdata_a)) + assert vnot == data_not + + if self.sfx not in ("u8"): + return + data_andc = [a & ~b for a, b in zip(data_cast_a, data_cast_b)] + vandc = cast(self.andc(vdata_a, vdata_b)) + assert vandc == data_andc + + @pytest.mark.parametrize("intrin", ["any", "all"]) + @pytest.mark.parametrize("data", ( + [1, 2, 3, 4], + [-1, -2, -3, -4], + [0, 1, 2, 3, 4], + [0x7f, 0x7fff, 0x7fffffff, 0x7fffffffffffffff], + [0, -1, -2, -3, 4], + [0], + [1], + [-1] + )) + def test_operators_crosstest(self, intrin, data): + """ + Test intrinsics: + npyv_any_##SFX + npyv_all_##SFX + """ + data_a = self.load(data * self.nlanes) + func = eval(intrin) + intrin = getattr(self, intrin) + desired = func(data_a) + simd = intrin(data_a) + assert not not simd == desired + + def test_conversion_boolean(self): + bsfx = "b" + self.sfx[1:] + to_boolean = getattr(self.npyv, f"cvt_{bsfx}_{self.sfx}") + from_boolean = getattr(self.npyv, f"cvt_{self.sfx}_{bsfx}") + + false_vb = to_boolean(self.setall(0)) + true_vb = self.cmpeq(self.setall(0), self.setall(0)) + assert false_vb != true_vb + + false_vsfx = from_boolean(false_vb) + true_vsfx = from_boolean(true_vb) + assert false_vsfx != true_vsfx + + def test_conversion_expand(self): + """ + Test expand intrinsics: + npyv_expand_u16_u8 + npyv_expand_u32_u16 + """ + if self.sfx not in ("u8", "u16"): + return + totype = self.sfx[0] + str(int(self.sfx[1:]) * 2) + expand = getattr(self.npyv, f"expand_{totype}_{self.sfx}") + # close enough from the edge to detect any deviation + data = self._data(self._int_max() - self.nlanes) + vdata = self.load(data) + edata = expand(vdata) + # lower half part + data_lo = data[:self.nlanes // 2] + # higher half part + data_hi = data[self.nlanes // 2:] + assert edata == (data_lo, data_hi) + + def test_arithmetic_subadd(self): + if self._is_fp(): + data_a = self._data() + else: + data_a = self._data(self._int_max() - self.nlanes) + data_b = self._data(self._int_min(), reverse=True) + vdata_a, vdata_b = self.load(data_a), self.load(data_b) + + # non-saturated + data_add = self.load([a + b for a, b in zip(data_a, data_b)]) # load to cast + add = self.add(vdata_a, vdata_b) + assert add == data_add + data_sub = self.load([a - b for a, b in zip(data_a, data_b)]) + sub = self.sub(vdata_a, vdata_b) + assert sub == data_sub + + def test_arithmetic_mul(self): + if self.sfx in ("u64", "s64"): + return + + if self._is_fp(): + data_a = self._data() + else: + data_a = self._data(self._int_max() - self.nlanes) + data_b = self._data(self._int_min(), reverse=True) + vdata_a, vdata_b = self.load(data_a), self.load(data_b) + + data_mul = self.load([a * b for a, b in zip(data_a, data_b)]) + mul = self.mul(vdata_a, vdata_b) + assert mul == data_mul + + def test_arithmetic_div(self): + if not self._is_fp(): + return + + data_a, data_b = self._data(), self._data(reverse=True) + vdata_a, vdata_b = self.load(data_a), self.load(data_b) + + # load to truncate f64 to precision of f32 + data_div = self.load([a / b for a, b in zip(data_a, data_b)]) + div = self.div(vdata_a, vdata_b) + assert div == data_div + + def test_arithmetic_intdiv(self): + """ + Test integer division intrinsics: + npyv_divisor_##sfx + npyv_divc_##sfx + """ + if self._is_fp(): + return + + int_min = self._int_min() + + def trunc_div(a, d): + """ + Divide towards zero works with large integers > 2^53, + and wrap around overflow similar to what C does. + """ + if d == -1 and a == int_min: + return a + sign_a, sign_d = a < 0, d < 0 + if a == 0 or sign_a == sign_d: + return a // d + return (a + sign_d - sign_a) // d + 1 + + data = [1, -int_min] # to test overflow + data += range(0, 2**8, 2**5) + data += range(0, 2**8, 2**5 - 1) + bsize = self._scalar_size() + if bsize > 8: + data += range(2**8, 2**16, 2**13) + data += range(2**8, 2**16, 2**13 - 1) + if bsize > 16: + data += range(2**16, 2**32, 2**29) + data += range(2**16, 2**32, 2**29 - 1) + if bsize > 32: + data += range(2**32, 2**64, 2**61) + data += range(2**32, 2**64, 2**61 - 1) + # negate + data += [-x for x in data] + for dividend, divisor in itertools.product(data, data): + divisor = self.setall(divisor)[0] # cast + if divisor == 0: + continue + dividend = self.load(self._data(dividend)) + data_divc = [trunc_div(a, divisor) for a in dividend] + divisor_parms = self.divisor(divisor) + divc = self.divc(dividend, divisor_parms) + assert divc == data_divc + + def test_arithmetic_reduce_sum(self): + """ + Test reduce sum intrinsics: + npyv_sum_##sfx + """ + if self.sfx not in ("u32", "u64", "f32", "f64"): + return + # reduce sum + data = self._data() + vdata = self.load(data) + + data_sum = sum(data) + vsum = self.sum(vdata) + assert vsum == data_sum + + def test_arithmetic_reduce_sumup(self): + """ + Test extend reduce sum intrinsics: + npyv_sumup_##sfx + """ + if self.sfx not in ("u8", "u16"): + return + rdata = (0, self.nlanes, self._int_min(), self._int_max() - self.nlanes) + for r in rdata: + data = self._data(r) + vdata = self.load(data) + data_sum = sum(data) + vsum = self.sumup(vdata) + assert vsum == data_sum + + def test_mask_conditional(self): + """ + Conditional addition and subtraction for all supported data types. + Test intrinsics: + npyv_ifadd_##SFX, npyv_ifsub_##SFX + """ + vdata_a = self.load(self._data()) + vdata_b = self.load(self._data(reverse=True)) + true_mask = self.cmpeq(self.zero(), self.zero()) + false_mask = self.cmpneq(self.zero(), self.zero()) + + data_sub = self.sub(vdata_b, vdata_a) + ifsub = self.ifsub(true_mask, vdata_b, vdata_a, vdata_b) + assert ifsub == data_sub + ifsub = self.ifsub(false_mask, vdata_a, vdata_b, vdata_b) + assert ifsub == vdata_b + + data_add = self.add(vdata_b, vdata_a) + ifadd = self.ifadd(true_mask, vdata_b, vdata_a, vdata_b) + assert ifadd == data_add + ifadd = self.ifadd(false_mask, vdata_a, vdata_b, vdata_b) + assert ifadd == vdata_b + + if not self._is_fp(): + return + data_div = self.div(vdata_b, vdata_a) + ifdiv = self.ifdiv(true_mask, vdata_b, vdata_a, vdata_b) + assert ifdiv == data_div + ifdivz = self.ifdivz(true_mask, vdata_b, vdata_a) + assert ifdivz == data_div + ifdiv = self.ifdiv(false_mask, vdata_a, vdata_b, vdata_b) + assert ifdiv == vdata_b + ifdivz = self.ifdivz(false_mask, vdata_a, vdata_b) + assert ifdivz == self.zero() + + +bool_sfx = ("b8", "b16", "b32", "b64") +int_sfx = ("u8", "s8", "u16", "s16", "u32", "s32", "u64", "s64") +fp_sfx = ("f32", "f64") +all_sfx = int_sfx + fp_sfx +tests_registry = { + bool_sfx: _SIMD_BOOL, + int_sfx: _SIMD_INT, + fp_sfx: _SIMD_FP, + ("f32",): _SIMD_FP32, + ("f64",): _SIMD_FP64, + all_sfx: _SIMD_ALL +} +for target_name, npyv in targets.items(): + simd_width = npyv.simd if npyv else '' + pretty_name = target_name.split('__') # multi-target separator + if len(pretty_name) > 1: + # multi-target + pretty_name = f"({' '.join(pretty_name)})" + else: + pretty_name = pretty_name[0] + + skip = "" + skip_sfx = {} + if not npyv: + skip = f"target '{pretty_name}' isn't supported by current machine" + elif not npyv.simd: + skip = f"target '{pretty_name}' isn't supported by NPYV" + else: + if not npyv.simd_f32: + skip_sfx["f32"] = f"target '{pretty_name}' "\ + "doesn't support single-precision" + if not npyv.simd_f64: + skip_sfx["f64"] = f"target '{pretty_name}' doesn't"\ + "support double-precision" + + for sfxes, cls in tests_registry.items(): + for sfx in sfxes: + skip_m = skip_sfx.get(sfx, skip) + inhr = (cls,) + attr = {"npyv": targets[target_name], "sfx": sfx, + "target_name": target_name} + type_name = f"Test{cls.__name__}_{simd_width}_{target_name}_{sfx}" + tcls = type(type_name, inhr, attr) + if skip_m: + pytest.mark.skip(reason=skip_m)(tcls) + globals()[tcls.__name__] = tcls diff --git a/local_packages/numpy/_core/tests/test_simd_module.py b/local_packages/numpy/_core/tests/test_simd_module.py new file mode 100644 index 0000000..3de1596 --- /dev/null +++ b/local_packages/numpy/_core/tests/test_simd_module.py @@ -0,0 +1,105 @@ +import pytest + +from numpy._core._simd import targets + +""" +This testing unit only for checking the sanity of common functionality, +therefore all we need is just to take one submodule that represents any +of enabled SIMD extensions to run the test on it and the second submodule +required to run only one check related to the possibility of mixing +the data types among each submodule. +""" +npyvs = [npyv_mod for npyv_mod in targets.values() if npyv_mod and npyv_mod.simd] +npyv, npyv2 = (npyvs + [None, None])[:2] + +unsigned_sfx = ["u8", "u16", "u32", "u64"] +signed_sfx = ["s8", "s16", "s32", "s64"] +fp_sfx = [] +if npyv and npyv.simd_f32: + fp_sfx.append("f32") +if npyv and npyv.simd_f64: + fp_sfx.append("f64") + +int_sfx = unsigned_sfx + signed_sfx +all_sfx = unsigned_sfx + int_sfx + +@pytest.mark.skipif(not npyv, + reason="could not find any SIMD extension with NPYV support") +class Test_SIMD_MODULE: + + @pytest.mark.parametrize('sfx', all_sfx) + def test_num_lanes(self, sfx): + nlanes = getattr(npyv, "nlanes_" + sfx) + vector = getattr(npyv, "setall_" + sfx)(1) + assert len(vector) == nlanes + + @pytest.mark.parametrize('sfx', all_sfx) + def test_type_name(self, sfx): + vector = getattr(npyv, "setall_" + sfx)(1) + assert vector.__name__ == "npyv_" + sfx + + def test_raises(self): + a, b = [npyv.setall_u32(1)] * 2 + for sfx in all_sfx: + vcb = lambda intrin: getattr(npyv, f"{intrin}_{sfx}") + pytest.raises(TypeError, vcb("add"), a) + pytest.raises(TypeError, vcb("add"), a, b, a) + pytest.raises(TypeError, vcb("setall")) + pytest.raises(TypeError, vcb("setall"), [1]) + pytest.raises(TypeError, vcb("load"), 1) + pytest.raises(ValueError, vcb("load"), [1]) + value = getattr(npyv, f"reinterpret_{sfx}_u32")(a) + pytest.raises(ValueError, vcb("store"), [1], value) + + @pytest.mark.skipif(not npyv2, reason=( + "could not find a second SIMD extension with NPYV support" + )) + def test_nomix(self): + # mix among submodules isn't allowed + a = npyv.setall_u32(1) + a2 = npyv2.setall_u32(1) + pytest.raises(TypeError, npyv.add_u32, a2, a2) + pytest.raises(TypeError, npyv2.add_u32, a, a) + + @pytest.mark.parametrize('sfx', unsigned_sfx) + def test_unsigned_overflow(self, sfx): + nlanes = getattr(npyv, "nlanes_" + sfx) + maxu = (1 << int(sfx[1:])) - 1 + maxu_72 = (1 << 72) - 1 + lane = getattr(npyv, "setall_" + sfx)(maxu_72)[0] + assert lane == maxu + lanes = getattr(npyv, "load_" + sfx)([maxu_72] * nlanes) + assert lanes == [maxu] * nlanes + lane = getattr(npyv, "setall_" + sfx)(-1)[0] + assert lane == maxu + lanes = getattr(npyv, "load_" + sfx)([-1] * nlanes) + assert lanes == [maxu] * nlanes + + @pytest.mark.parametrize('sfx', signed_sfx) + def test_signed_overflow(self, sfx): + nlanes = getattr(npyv, "nlanes_" + sfx) + maxs_72 = (1 << 71) - 1 + lane = getattr(npyv, "setall_" + sfx)(maxs_72)[0] + assert lane == -1 + lanes = getattr(npyv, "load_" + sfx)([maxs_72] * nlanes) + assert lanes == [-1] * nlanes + mins_72 = -1 << 71 + lane = getattr(npyv, "setall_" + sfx)(mins_72)[0] + assert lane == 0 + lanes = getattr(npyv, "load_" + sfx)([mins_72] * nlanes) + assert lanes == [0] * nlanes + + def test_truncate_f32(self): + if not npyv.simd_f32: + pytest.skip("F32 isn't support by the SIMD extension") + f32 = npyv.setall_f32(0.1)[0] + assert f32 != 0.1 + assert round(f32, 1) == 0.1 + + def test_compare(self): + data_range = range(npyv.nlanes_u32) + vdata = npyv.load_u32(data_range) + assert vdata == list(data_range) + assert vdata == tuple(data_range) + for i in data_range: + assert vdata[i] == data_range[i] diff --git a/local_packages/numpy/_core/tests/test_stringdtype.py b/local_packages/numpy/_core/tests/test_stringdtype.py new file mode 100644 index 0000000..4928940 --- /dev/null +++ b/local_packages/numpy/_core/tests/test_stringdtype.py @@ -0,0 +1,1855 @@ +import copy +import itertools +import os +import pickle +import string +import sys +import tempfile + +import pytest + +import numpy as np +from numpy._core.tests._natype import pd_NA +from numpy.dtypes import StringDType +from numpy.testing import IS_PYPY, assert_array_equal + + +def random_unicode_string_list(): + """Returns an array of 10 100-character strings containing random text""" + chars = list(string.ascii_letters + string.digits) + chars = np.array(chars, dtype="U1") + ret = np.random.choice(chars, size=100 * 10, replace=True) + return ret.view("U100") + + +def get_dtype(na_object, coerce=True): + """Helper to work around pd_NA boolean behavior""" + # explicit is check for pd_NA because != with pd_NA returns pd_NA + if na_object is pd_NA or na_object != "unset": + return np.dtypes.StringDType(na_object=na_object, coerce=coerce) + else: + return np.dtypes.StringDType(coerce=coerce) + + +@pytest.fixture(params=[True, False]) +def coerce(request): + """Coerce input to strings or raise an error for non-string input""" + return request.param + + +@pytest.fixture( + params=["unset", None, pd_NA, np.nan, float("nan"), "__nan__"], + ids=["unset", "None", "pandas.NA", "np.nan", "float('nan')", "string nan"], +) +def na_object(request): + """Possible values for the missing data sentinel""" + return request.param + + +@pytest.fixture() +def dtype(na_object, coerce): + """Cartesian project of missing data sentinel and string coercion options""" + return get_dtype(na_object, coerce) + +@pytest.fixture +def string_list(): + """Mix of short and long strings, some with unicode, some without""" + return ["abc", "def", "ghi" * 10, "A¢☃€ 😊" * 100, "Abc" * 1000, "DEF"] + + +@pytest.fixture(params=[True, False]) +def coerce2(request): + """Second copy of the coerce fixture for tests that need two instances""" + return request.param + + +@pytest.fixture( + params=["unset", None, pd_NA, np.nan, float("nan"), "__nan__"], + ids=["unset", "None", "pandas.NA", "np.nan", "float('nan')", "string nan"], +) +def na_object2(request): + """Second copy of the na_object fixture for tests that need two instances""" + return request.param + + +@pytest.fixture() +def dtype2(na_object2, coerce2): + """Second copy of the dtype fixture for tests that need two instances""" + # explicit is check for pd_NA because != with pd_NA returns pd_NA + if na_object2 is pd_NA or na_object2 != "unset": + return StringDType(na_object=na_object2, coerce=coerce2) + else: + return StringDType(coerce=coerce2) + + +def test_dtype_creation(): + hashes = set() + dt = StringDType() + assert not hasattr(dt, "na_object") and dt.coerce is True + hashes.add(hash(dt)) + + dt = StringDType(na_object=None) + assert dt.na_object is None and dt.coerce is True + hashes.add(hash(dt)) + + dt = StringDType(coerce=False) + assert not hasattr(dt, "na_object") and dt.coerce is False + hashes.add(hash(dt)) + + dt = StringDType(na_object=None, coerce=False) + assert dt.na_object is None and dt.coerce is False + hashes.add(hash(dt)) + + assert len(hashes) == 4 + + dt = np.dtype("T") + assert dt == StringDType() + assert dt.kind == "T" + assert dt.char == "T" + + hashes.add(hash(dt)) + assert len(hashes) == 4 + + +def test_dtype_equality(dtype): + assert dtype == dtype + for ch in "SU": + assert dtype != np.dtype(ch) + assert dtype != np.dtype(f"{ch}8") + + +def test_dtype_repr(dtype): + if not hasattr(dtype, "na_object") and dtype.coerce: + assert repr(dtype) == "StringDType()" + elif dtype.coerce: + assert repr(dtype) == f"StringDType(na_object={dtype.na_object!r})" + elif not hasattr(dtype, "na_object"): + assert repr(dtype) == "StringDType(coerce=False)" + else: + assert ( + repr(dtype) + == f"StringDType(na_object={dtype.na_object!r}, coerce=False)" + ) + + +def test_create_with_na(dtype): + if not hasattr(dtype, "na_object"): + pytest.skip("does not have an na object") + na_val = dtype.na_object + string_list = ["hello", na_val, "world"] + arr = np.array(string_list, dtype=dtype) + assert str(arr) == "[" + " ".join([repr(s) for s in string_list]) + "]" + assert arr[1] is dtype.na_object + + +@pytest.mark.parametrize("i", list(range(5))) +def test_set_replace_na(i): + # Test strings of various lengths can be set to NaN and then replaced. + s_empty = "" + s_short = "0123456789" + s_medium = "abcdefghijklmnopqrstuvwxyz" + s_long = "-=+" * 100 + strings = [s_medium, s_empty, s_short, s_medium, s_long] + a = np.array(strings, StringDType(na_object=np.nan)) + for s in [a[i], s_medium + s_short, s_short, s_empty, s_long]: + a[i] = np.nan + assert np.isnan(a[i]) + a[i] = s + assert a[i] == s + assert_array_equal(a, strings[:i] + [s] + strings[i + 1:]) + + +def test_null_roundtripping(): + data = ["hello\0world", "ABC\0DEF\0\0"] + arr = np.array(data, dtype="T") + assert data[0] == arr[0] + assert data[1] == arr[1] + + +def test_string_too_large_error(): + arr = np.array(["a", "b", "c"], dtype=StringDType()) + with pytest.raises(OverflowError): + arr * (sys.maxsize + 1) + + +@pytest.mark.parametrize( + "data", + [ + ["abc", "def", "ghi"], + ["🤣", "📵", "😰"], + ["🚜", "🙃", "😾"], + ["😹", "🚠", "🚌"], + ], +) +def test_array_creation_utf8(dtype, data): + arr = np.array(data, dtype=dtype) + assert str(arr) == "[" + " ".join(["'" + str(d) + "'" for d in data]) + "]" + assert arr.dtype == dtype + + +@pytest.mark.parametrize( + "data", + [ + [1, 2, 3], + [b"abc", b"def", b"ghi"], + [object, object, object], + ], +) +def test_scalars_string_conversion(data, dtype): + try: + str_vals = [str(d.decode('utf-8')) for d in data] + except AttributeError: + str_vals = [str(d) for d in data] + if dtype.coerce: + assert_array_equal( + np.array(data, dtype=dtype), + np.array(str_vals, dtype=dtype), + ) + else: + with pytest.raises(ValueError): + np.array(data, dtype=dtype) + + +@pytest.mark.parametrize( + ("strings"), + [ + ["this", "is", "an", "array"], + ["€", "", "😊"], + ["A¢☃€ 😊", " A☃€¢😊", "☃€😊 A¢", "😊☃A¢ €"], + ], +) +def test_self_casts(dtype, dtype2, strings): + if hasattr(dtype, "na_object"): + strings = strings + [dtype.na_object] + elif hasattr(dtype2, "na_object"): + strings = strings + [""] + arr = np.array(strings, dtype=dtype) + newarr = arr.astype(dtype2) + + if hasattr(dtype, "na_object") and not hasattr(dtype2, "na_object"): + assert newarr[-1] == str(dtype.na_object) + with pytest.raises(TypeError): + arr.astype(dtype2, casting="safe") + elif hasattr(dtype, "na_object") and hasattr(dtype2, "na_object"): + assert newarr[-1] is dtype2.na_object + arr.astype(dtype2, casting="safe") + elif hasattr(dtype2, "na_object"): + assert newarr[-1] == "" + arr.astype(dtype2, casting="safe") + else: + arr.astype(dtype2, casting="safe") + + if hasattr(dtype, "na_object") and hasattr(dtype2, "na_object"): + na1 = dtype.na_object + na2 = dtype2.na_object + if (na1 is not na2 and + # check for pd_NA first because bool(pd_NA) is an error + ((na1 is pd_NA or na2 is pd_NA) or + # the second check is a NaN check, spelled this way + # to avoid errors from math.isnan and np.isnan + (na1 != na2 and not (na1 != na1 and na2 != na2)))): + with pytest.raises(TypeError): + arr[:-1] == newarr[:-1] + return + assert_array_equal(arr[:-1], newarr[:-1]) + + +@pytest.mark.parametrize( + ("strings"), + [ + ["this", "is", "an", "array"], + ["€", "", "😊"], + ["A¢☃€ 😊", " A☃€¢😊", "☃€😊 A¢", "😊☃A¢ €"], + ], +) +class TestStringLikeCasts: + def test_unicode_casts(self, dtype, strings): + arr = np.array(strings, dtype=np.str_).astype(dtype) + expected = np.array(strings, dtype=dtype) + assert_array_equal(arr, expected) + + arr_as_U8 = expected.astype("U8") + assert_array_equal(arr_as_U8, np.array(strings, dtype="U8")) + assert_array_equal(arr_as_U8.astype(dtype), arr) + arr_as_U3 = expected.astype("U3") + assert_array_equal(arr_as_U3, np.array(strings, dtype="U3")) + assert_array_equal( + arr_as_U3.astype(dtype), + np.array([s[:3] for s in strings], dtype=dtype), + ) + + def test_void_casts(self, dtype, strings): + sarr = np.array(strings, dtype=dtype) + utf8_bytes = [s.encode("utf-8") for s in strings] + void_dtype = f"V{max(len(s) for s in utf8_bytes)}" + varr = np.array(utf8_bytes, dtype=void_dtype) + assert_array_equal(varr, sarr.astype(void_dtype)) + assert_array_equal(varr.astype(dtype), sarr) + + def test_bytes_casts(self, dtype, strings): + sarr = np.array(strings, dtype=dtype) + try: + utf8_bytes = [s.encode("ascii") for s in strings] + bytes_dtype = f"S{max(len(s) for s in utf8_bytes)}" + barr = np.array(utf8_bytes, dtype=bytes_dtype) + assert_array_equal(barr, sarr.astype(bytes_dtype)) + assert_array_equal(barr.astype(dtype), sarr) + if dtype.coerce: + barr = np.array(utf8_bytes, dtype=dtype) + assert_array_equal(barr, sarr) + barr = np.array(utf8_bytes, dtype="O") + assert_array_equal(barr.astype(dtype), sarr) + else: + with pytest.raises(ValueError): + np.array(utf8_bytes, dtype=dtype) + except UnicodeEncodeError: + with pytest.raises(UnicodeEncodeError): + sarr.astype("S20") + + +def test_additional_unicode_cast(dtype): + string_list = random_unicode_string_list() + arr = np.array(string_list, dtype=dtype) + # test that this short-circuits correctly + assert_array_equal(arr, arr.astype(arr.dtype)) + # tests the casts via the comparison promoter + assert_array_equal(arr, arr.astype(string_list.dtype)) + + +def test_insert_scalar(dtype, string_list): + """Test that inserting a scalar works.""" + arr = np.array(string_list, dtype=dtype) + scalar_instance = "what" + arr[1] = scalar_instance + assert_array_equal( + arr, + np.array(string_list[:1] + ["what"] + string_list[2:], dtype=dtype), + ) + + +comparison_operators = [ + np.equal, + np.not_equal, + np.greater, + np.greater_equal, + np.less, + np.less_equal, +] + + +@pytest.mark.parametrize("op", comparison_operators) +@pytest.mark.parametrize("o_dtype", [np.str_, object, StringDType()]) +def test_comparisons(string_list, dtype, op, o_dtype): + sarr = np.array(string_list, dtype=dtype) + oarr = np.array(string_list, dtype=o_dtype) + + # test that comparison operators work + res = op(sarr, sarr) + ores = op(oarr, oarr) + # test that promotion works as well + orres = op(sarr, oarr) + olres = op(oarr, sarr) + + assert_array_equal(res, ores) + assert_array_equal(res, orres) + assert_array_equal(res, olres) + + # test we get the correct answer for unequal length strings + sarr2 = np.array([s + "2" for s in string_list], dtype=dtype) + oarr2 = np.array([s + "2" for s in string_list], dtype=o_dtype) + + res = op(sarr, sarr2) + ores = op(oarr, oarr2) + olres = op(oarr, sarr2) + orres = op(sarr, oarr2) + + assert_array_equal(res, ores) + assert_array_equal(res, olres) + assert_array_equal(res, orres) + + res = op(sarr2, sarr) + ores = op(oarr2, oarr) + olres = op(oarr2, sarr) + orres = op(sarr2, oarr) + + assert_array_equal(res, ores) + assert_array_equal(res, olres) + assert_array_equal(res, orres) + + +def test_isnan(dtype, string_list): + if not hasattr(dtype, "na_object"): + pytest.skip("no na support") + sarr = np.array(string_list + [dtype.na_object], dtype=dtype) + is_nan = isinstance(dtype.na_object, float) and np.isnan(dtype.na_object) + bool_errors = 0 + try: + bool(dtype.na_object) + except TypeError: + bool_errors = 1 + if is_nan or bool_errors: + # isnan is only true when na_object is a NaN + assert_array_equal( + np.isnan(sarr), + np.array([0] * len(string_list) + [1], dtype=np.bool), + ) + else: + assert not np.any(np.isnan(sarr)) + + +def test_pickle(dtype, string_list): + arr = np.array(string_list, dtype=dtype) + + with tempfile.NamedTemporaryFile("wb", delete=False) as f: + pickle.dump([arr, dtype], f) + + with open(f.name, "rb") as f: + res = pickle.load(f) + + assert_array_equal(res[0], arr) + assert res[1] == dtype + + os.remove(f.name) + + +def test_stdlib_copy(dtype, string_list): + arr = np.array(string_list, dtype=dtype) + + assert_array_equal(copy.copy(arr), arr) + assert_array_equal(copy.deepcopy(arr), arr) + + +@pytest.mark.parametrize( + "strings", + [ + ["left", "right", "leftovers", "righty", "up", "down"], + [ + "left" * 10, + "right" * 10, + "leftovers" * 10, + "righty" * 10, + "up" * 10, + ], + ["🤣🤣", "🤣", "📵", "😰"], + ["🚜", "🙃", "😾"], + ["😹", "🚠", "🚌"], + ["A¢☃€ 😊", " A☃€¢😊", "☃€😊 A¢", "😊☃A¢ €"], + ], +) +def test_sort(dtype, strings): + """Test that sorting matches python's internal sorting.""" + + def test_sort(strings, arr_sorted): + arr = np.array(strings, dtype=dtype) + na_object = getattr(arr.dtype, "na_object", "") + if na_object is None and None in strings: + with pytest.raises( + ValueError, + match="Cannot compare null that is not a nan-like value", + ): + np.argsort(arr) + argsorted = None + elif na_object is pd_NA or na_object != '': + argsorted = None + else: + argsorted = np.argsort(arr) + np.random.default_rng().shuffle(arr) + if na_object is None and None in strings: + with pytest.raises( + ValueError, + match="Cannot compare null that is not a nan-like value", + ): + arr.sort() + else: + arr.sort() + assert np.array_equal(arr, arr_sorted, equal_nan=True) + if argsorted is not None: + assert np.array_equal(argsorted, np.argsort(strings)) + + # make a copy so we don't mutate the lists in the fixture + strings = strings.copy() + arr_sorted = np.array(sorted(strings), dtype=dtype) + test_sort(strings, arr_sorted) + + if not hasattr(dtype, "na_object"): + return + + # make sure NAs get sorted to the end of the array and string NAs get + # sorted like normal strings + strings.insert(0, dtype.na_object) + strings.insert(2, dtype.na_object) + # can't use append because doing that with NA converts + # the result to object dtype + if not isinstance(dtype.na_object, str): + arr_sorted = np.array( + arr_sorted.tolist() + [dtype.na_object, dtype.na_object], + dtype=dtype, + ) + else: + arr_sorted = np.array(sorted(strings), dtype=dtype) + + test_sort(strings, arr_sorted) + + +@pytest.mark.parametrize( + "strings", + [ + ["A¢☃€ 😊", " A☃€¢😊", "☃€😊 A¢", "😊☃A¢ €"], + ["A¢☃€ 😊", "", " ", " "], + ["", "a", "😸", "ááðfáíóåéë"], + ], +) +def test_nonzero(strings, na_object): + dtype = get_dtype(na_object) + arr = np.array(strings, dtype=dtype) + is_nonzero = np.array( + [i for i, item in enumerate(strings) if len(item) != 0]) + assert_array_equal(arr.nonzero()[0], is_nonzero) + + if na_object is not pd_NA and na_object == 'unset': + return + + strings_with_na = np.array(strings + [na_object], dtype=dtype) + is_nan = np.isnan(np.array([dtype.na_object], dtype=dtype))[0] + + if is_nan: + assert strings_with_na.nonzero()[0][-1] == 4 + else: + assert strings_with_na.nonzero()[0][-1] == 3 + + # check that the casting to bool and nonzero give consistent results + assert_array_equal(strings_with_na[strings_with_na.nonzero()], + strings_with_na[strings_with_na.astype(bool)]) + + +def test_where(string_list, na_object): + dtype = get_dtype(na_object) + a = np.array(string_list, dtype=dtype) + b = a[::-1] + res = np.where([True, False, True, False, True, False], a, b) + assert_array_equal(res, [a[0], b[1], a[2], b[3], a[4], b[5]]) + + +def test_fancy_indexing(string_list): + sarr = np.array(string_list, dtype="T") + assert_array_equal(sarr, sarr[np.arange(sarr.shape[0])]) + + inds = [ + [True, True], + [0, 1], + ..., + np.array([0, 1], dtype='uint8'), + ] + + lops = [ + ['a' * 25, 'b' * 25], + ['', ''], + ['hello', 'world'], + ['hello', 'world' * 25], + ] + + # see gh-27003 and gh-27053 + for ind in inds: + for lop in lops: + a = np.array(lop, dtype="T") + assert_array_equal(a[ind], a) + rop = ['d' * 25, 'e' * 25] + for b in [rop, np.array(rop, dtype="T")]: + a[ind] = b + assert_array_equal(a, b) + assert a[0] == 'd' * 25 + + # see gh-29279 + data = [ + ["AAAAAAAAAAAAAAAAA"], + ["BBBBBBBBBBBBBBBBBBBBBBBBBBBBB"], + ["CCCCCCCCCCCCCCCCC"], + ["DDDDDDDDDDDDDDDDD"], + ] + sarr = np.array(data, dtype=np.dtypes.StringDType()) + uarr = np.array(data, dtype="U30") + for ind in [[0], [1], [2], [3], [[0, 0]], [[1, 1, 3]], [[1, 1]]]: + assert_array_equal(sarr[ind], uarr[ind]) + + +def test_flatiter_indexing(): + # see gh-29659 + arr = np.array(['hello', 'world'], dtype='T') + arr.flat[:] = 9223372036854775 + assert_array_equal(arr, np.array([9223372036854775] * 2, dtype='T')) + + +def test_creation_functions(): + assert_array_equal(np.zeros(3, dtype="T"), ["", "", ""]) + assert_array_equal(np.empty(3, dtype="T"), ["", "", ""]) + + assert np.zeros(3, dtype="T")[0] == "" + assert np.empty(3, dtype="T")[0] == "" + + +def test_concatenate(string_list): + sarr = np.array(string_list, dtype="T") + sarr_cat = np.array(string_list + string_list, dtype="T") + + assert_array_equal(np.concatenate([sarr], axis=0), sarr) + + +def test_resize_method(string_list): + sarr = np.array(string_list, dtype="T") + if IS_PYPY: + sarr.resize(len(string_list) + 3, refcheck=False) + else: + sarr.resize(len(string_list) + 3) + assert_array_equal(sarr, np.array(string_list + [''] * 3, dtype="T")) + + +def test_create_with_copy_none(string_list): + arr = np.array(string_list, dtype=StringDType()) + # create another stringdtype array with an arena that has a different + # in-memory layout than the first array + arr_rev = np.array(string_list[::-1], dtype=StringDType()) + + # this should create a copy and the resulting array + # shouldn't share an allocator or arena with arr_rev, despite + # explicitly passing arr_rev.dtype + arr_copy = np.array(arr, copy=None, dtype=arr_rev.dtype) + np.testing.assert_array_equal(arr, arr_copy) + assert arr_copy.base is None + + with pytest.raises(ValueError, match="Unable to avoid copy"): + np.array(arr, copy=False, dtype=arr_rev.dtype) + + # because we're using arr's dtype instance, the view is safe + arr_view = np.array(arr, copy=None, dtype=arr.dtype) + np.testing.assert_array_equal(arr, arr) + np.testing.assert_array_equal(arr_view[::-1], arr_rev) + assert arr_view is arr + + +def test_astype_copy_false(): + orig_dt = StringDType() + arr = np.array(["hello", "world"], dtype=StringDType()) + assert not arr.astype(StringDType(coerce=False), copy=False).dtype.coerce + + assert arr.astype(orig_dt, copy=False).dtype is orig_dt + +@pytest.mark.parametrize( + "strings", + [ + ["left", "right", "leftovers", "righty", "up", "down"], + ["🤣🤣", "🤣", "📵", "😰"], + ["🚜", "🙃", "😾"], + ["😹", "🚠", "🚌"], + ["A¢☃€ 😊", " A☃€¢😊", "☃€😊 A¢", "😊☃A¢ €"], + ], +) +def test_argmax(strings): + """Test that argmax/argmin matches what python calculates.""" + arr = np.array(strings, dtype="T") + assert np.argmax(arr) == strings.index(max(strings)) + assert np.argmin(arr) == strings.index(min(strings)) + + +@pytest.mark.parametrize( + "arrfunc,expected", + [ + [np.sort, None], + [np.nonzero, (np.array([], dtype=np.int_),)], + [np.argmax, 0], + [np.argmin, 0], + ], +) +def test_arrfuncs_zeros(arrfunc, expected): + arr = np.zeros(10, dtype="T") + result = arrfunc(arr) + if expected is None: + expected = arr + assert_array_equal(result, expected, strict=True) + + +@pytest.mark.parametrize( + ("strings", "cast_answer", "any_answer", "all_answer"), + [ + [["hello", "world"], [True, True], True, True], + [["", ""], [False, False], False, False], + [["hello", ""], [True, False], True, False], + [["", "world"], [False, True], True, False], + ], +) +def test_cast_to_bool(strings, cast_answer, any_answer, all_answer): + sarr = np.array(strings, dtype="T") + assert_array_equal(sarr.astype("bool"), cast_answer) + + assert np.any(sarr) == any_answer + assert np.all(sarr) == all_answer + + +@pytest.mark.parametrize( + ("strings", "cast_answer"), + [ + [[True, True], ["True", "True"]], + [[False, False], ["False", "False"]], + [[True, False], ["True", "False"]], + [[False, True], ["False", "True"]], + ], +) +def test_cast_from_bool(strings, cast_answer): + barr = np.array(strings, dtype=bool) + assert_array_equal(barr.astype("T"), np.array(cast_answer, dtype="T")) + + +@pytest.mark.parametrize("bitsize", [8, 16, 32, 64]) +@pytest.mark.parametrize("signed", [True, False]) +def test_sized_integer_casts(bitsize, signed): + idtype = f"int{bitsize}" + if signed: + inp = [-(2**p - 1) for p in reversed(range(bitsize - 1))] + inp += [2**p - 1 for p in range(1, bitsize - 1)] + else: + idtype = "u" + idtype + inp = [2**p - 1 for p in range(bitsize)] + ainp = np.array(inp, dtype=idtype) + assert_array_equal(ainp, ainp.astype("T").astype(idtype)) + + # safe casting works + ainp.astype("T", casting="safe") + + with pytest.raises(TypeError): + ainp.astype("T").astype(idtype, casting="safe") + + oob = [str(2**bitsize), str(-(2**bitsize))] + with pytest.raises(OverflowError): + np.array(oob, dtype="T").astype(idtype) + + with pytest.raises(ValueError): + np.array(["1", np.nan, "3"], + dtype=StringDType(na_object=np.nan)).astype(idtype) + + +@pytest.mark.parametrize("typename", ["byte", "short", "int", "longlong"]) +@pytest.mark.parametrize("signed", ["", "u"]) +def test_unsized_integer_casts(typename, signed): + idtype = f"{signed}{typename}" + + inp = [1, 2, 3, 4] + ainp = np.array(inp, dtype=idtype) + assert_array_equal(ainp, ainp.astype("T").astype(idtype)) + + +@pytest.mark.parametrize( + "typename", + [ + pytest.param( + "longdouble", + marks=pytest.mark.xfail( + np.dtypes.LongDoubleDType() != np.dtypes.Float64DType(), + reason="numpy lacks an ld2a implementation", + strict=True, + ), + ), + "float64", + "float32", + "float16", + ], +) +def test_float_casts(typename): + inp = [1.1, 2.8, -3.2, 2.7e4] + ainp = np.array(inp, dtype=typename) + assert_array_equal(ainp, ainp.astype("T").astype(typename)) + + inp = [0.1] + sres = np.array(inp, dtype=typename).astype("T") + res = sres.astype(typename) + assert_array_equal(np.array(inp, dtype=typename), res) + assert sres[0] == "0.1" + + if typename == "longdouble": + # let's not worry about platform-dependent rounding of longdouble + return + + fi = np.finfo(typename) + + inp = [1e-324, fi.smallest_subnormal, -1e-324, -fi.smallest_subnormal] + eres = [0, fi.smallest_subnormal, -0, -fi.smallest_subnormal] + res = np.array(inp, dtype=typename).astype("T").astype(typename) + assert_array_equal(eres, res) + + inp = [2e308, fi.max, -2e308, fi.min] + eres = [np.inf, fi.max, -np.inf, fi.min] + res = np.array(inp, dtype=typename).astype("T").astype(typename) + assert_array_equal(eres, res) + + +def test_float_nan_cast_na_object(): + # gh-28157 + dt = np.dtypes.StringDType(na_object=np.nan) + arr1 = np.full((1,), fill_value=np.nan, dtype=dt) + arr2 = np.full_like(arr1, fill_value=np.nan) + + assert arr1.item() is np.nan + assert arr2.item() is np.nan + + inp = [1.2, 2.3, np.nan] + arr = np.array(inp).astype(dt) + assert arr[2] is np.nan + assert arr[0] == '1.2' + + +@pytest.mark.parametrize( + "typename", + [ + "csingle", + "cdouble", + pytest.param( + "clongdouble", + marks=pytest.mark.xfail( + np.dtypes.CLongDoubleDType() != np.dtypes.Complex128DType(), + reason="numpy lacks an ld2a implementation", + strict=True, + ), + ), + ], +) +def test_cfloat_casts(typename): + inp = [1.1 + 1.1j, 2.8 + 2.8j, -3.2 - 3.2j, 2.7e4 + 2.7e4j] + ainp = np.array(inp, dtype=typename) + assert_array_equal(ainp, ainp.astype("T").astype(typename)) + + inp = [0.1 + 0.1j] + sres = np.array(inp, dtype=typename).astype("T") + res = sres.astype(typename) + assert_array_equal(np.array(inp, dtype=typename), res) + assert sres[0] == "(0.1+0.1j)" + + +def test_take(string_list): + sarr = np.array(string_list, dtype="T") + res = sarr.take(np.arange(len(string_list))) + assert_array_equal(sarr, res) + + # make sure it also works for out + out = np.empty(len(string_list), dtype="T") + out[0] = "hello" + res = sarr.take(np.arange(len(string_list)), out=out) + assert res is out + assert_array_equal(sarr, res) + + +@pytest.mark.parametrize("use_out", [True, False]) +@pytest.mark.parametrize( + "ufunc_name,func", + [ + ("min", min), + ("max", max), + ], +) +def test_ufuncs_minmax(string_list, ufunc_name, func, use_out): + """Test that the min/max ufuncs match Python builtin min/max behavior.""" + arr = np.array(string_list, dtype="T") + uarr = np.array(string_list, dtype=str) + res = np.array(func(string_list), dtype="T") + assert_array_equal(getattr(arr, ufunc_name)(), res) + + ufunc = getattr(np, ufunc_name + "imum") + + if use_out: + res = ufunc(arr, arr, out=arr) + else: + res = ufunc(arr, arr) + + assert_array_equal(uarr, res) + assert_array_equal(getattr(arr, ufunc_name)(), func(string_list)) + + +def test_max_regression(): + arr = np.array(['y', 'y', 'z'], dtype="T") + assert arr.max() == 'z' + + +@pytest.mark.parametrize("use_out", [True, False]) +@pytest.mark.parametrize( + "other_strings", + [ + ["abc", "def" * 500, "ghi" * 16, "🤣" * 100, "📵", "😰"], + ["🚜", "🙃", "😾", "😹", "🚠", "🚌"], + ["🥦", "¨", "⨯", "∰ ", "⨌ ", "⎶ "], + ], +) +def test_ufunc_add(dtype, string_list, other_strings, use_out): + arr1 = np.array(string_list, dtype=dtype) + arr2 = np.array(other_strings, dtype=dtype) + result = np.array([a + b for a, b in zip(arr1, arr2)], dtype=dtype) + + if use_out: + res = np.add(arr1, arr2, out=arr1) + else: + res = np.add(arr1, arr2) + + assert_array_equal(res, result) + + if not hasattr(dtype, "na_object"): + return + + is_nan = isinstance(dtype.na_object, float) and np.isnan(dtype.na_object) + is_str = isinstance(dtype.na_object, str) + bool_errors = 0 + try: + bool(dtype.na_object) + except TypeError: + bool_errors = 1 + + arr1 = np.array([dtype.na_object] + string_list, dtype=dtype) + arr2 = np.array(other_strings + [dtype.na_object], dtype=dtype) + + if is_nan or bool_errors or is_str: + res = np.add(arr1, arr2) + assert_array_equal(res[1:-1], arr1[1:-1] + arr2[1:-1]) + if not is_str: + assert res[0] is dtype.na_object and res[-1] is dtype.na_object + else: + assert res[0] == dtype.na_object + arr2[0] + assert res[-1] == arr1[-1] + dtype.na_object + else: + with pytest.raises(ValueError): + np.add(arr1, arr2) + + +def test_ufunc_add_reduce(dtype): + values = ["a", "this is a long string", "c"] + arr = np.array(values, dtype=dtype) + out = np.empty((), dtype=dtype) + + expected = np.array("".join(values), dtype=dtype) + assert_array_equal(np.add.reduce(arr), expected) + + np.add.reduce(arr, out=out) + assert_array_equal(out, expected) + + +def test_add_promoter(string_list): + arr = np.array(string_list, dtype=StringDType()) + lresult = np.array(["hello" + s for s in string_list], dtype=StringDType()) + rresult = np.array([s + "hello" for s in string_list], dtype=StringDType()) + + for op in ["hello", np.str_("hello"), np.array(["hello"])]: + assert_array_equal(op + arr, lresult) + assert_array_equal(arr + op, rresult) + + # The promoter should be able to handle things if users pass `dtype=` + res = np.add("hello", string_list, dtype=StringDType) + assert res.dtype == StringDType() + + # The promoter should not kick in if users override the input, + # which means arr is cast, this fails because of the unknown length. + with pytest.raises(TypeError, match="cannot cast dtype"): + np.add(arr, "add", signature=("U", "U", None), casting="unsafe") + + # But it must simply reject the following: + with pytest.raises(TypeError, match=".*did not contain a loop"): + np.add(arr, "add", signature=(None, "U", None)) + + with pytest.raises(TypeError, match=".*did not contain a loop"): + np.add("a", "b", signature=("U", "U", StringDType)) + + +def test_add_no_legacy_promote_with_signature(): + # Possibly misplaced, but useful to test with string DType. We check that + # if there is clearly no loop found, a stray `dtype=` doesn't break things + # Regression test for the bad error in gh-26735 + # (If legacy promotion is gone, this can be deleted...) + with pytest.raises(TypeError, match=".*did not contain a loop"): + np.add("3", 6, dtype=StringDType) + + +def test_add_promoter_reduce(): + # Exact TypeError could change, but ensure StringDtype doesn't match + with pytest.raises(TypeError, match="the resolved dtypes are not"): + np.add.reduce(np.array(["a", "b"], dtype="U")) + + # On the other hand, using `dtype=T` in the *ufunc* should work. + np.add.reduce(np.array(["a", "b"], dtype="U"), dtype=np.dtypes.StringDType) + + +def test_multiply_reduce(): + # At the time of writing (NumPy 2.0) this is very limited (and rather + # ridiculous anyway). But it works and actually makes some sense... + # (NumPy does not allow non-scalar initial values) + repeats = np.array([2, 3, 4]) + val = "school-🚌" + res = np.multiply.reduce(repeats, initial=val, dtype=np.dtypes.StringDType) + assert res == val * np.prod(repeats) + + +def test_multiply_two_string_raises(): + arr = np.array(["hello", "world"], dtype="T") + with pytest.raises(np._core._exceptions._UFuncNoLoopError): + np.multiply(arr, arr) + + +@pytest.mark.parametrize("use_out", [True, False]) +@pytest.mark.parametrize("other", [2, [2, 1, 3, 4, 1, 3]]) +@pytest.mark.parametrize( + "other_dtype", + [ + None, + "int8", + "int16", + "int32", + "int64", + "uint8", + "uint16", + "uint32", + "uint64", + "short", + "int", + "intp", + "long", + "longlong", + "ushort", + "uint", + "uintp", + "ulong", + "ulonglong", + ], +) +def test_ufunc_multiply(dtype, string_list, other, other_dtype, use_out): + """Test the two-argument ufuncs match python builtin behavior.""" + arr = np.array(string_list, dtype=dtype) + if other_dtype is not None: + other_dtype = np.dtype(other_dtype) + try: + len(other) + result = [s * o for s, o in zip(string_list, other)] + other = np.array(other) + if other_dtype is not None: + other = other.astype(other_dtype) + except TypeError: + if other_dtype is not None: + other = other_dtype.type(other) + result = [s * other for s in string_list] + + if use_out: + arr_cache = arr.copy() + lres = np.multiply(arr, other, out=arr) + assert_array_equal(lres, result) + arr[:] = arr_cache + assert lres is arr + arr *= other + assert_array_equal(arr, result) + arr[:] = arr_cache + rres = np.multiply(other, arr, out=arr) + assert rres is arr + assert_array_equal(rres, result) + else: + lres = arr * other + assert_array_equal(lres, result) + rres = other * arr + assert_array_equal(rres, result) + + if not hasattr(dtype, "na_object"): + return + + is_nan = np.isnan(np.array([dtype.na_object], dtype=dtype))[0] + is_str = isinstance(dtype.na_object, str) + bool_errors = 0 + try: + bool(dtype.na_object) + except TypeError: + bool_errors = 1 + + arr = np.array(string_list + [dtype.na_object], dtype=dtype) + + try: + len(other) + other = np.append(other, 3) + if other_dtype is not None: + other = other.astype(other_dtype) + except TypeError: + pass + + if is_nan or bool_errors or is_str: + for res in [arr * other, other * arr]: + assert_array_equal(res[:-1], result) + if not is_str: + assert res[-1] is dtype.na_object + else: + try: + assert res[-1] == dtype.na_object * other[-1] + except (IndexError, TypeError): + assert res[-1] == dtype.na_object * other + else: + with pytest.raises(TypeError): + arr * other + with pytest.raises(TypeError): + other * arr + + +def test_findlike_promoters(): + r = "Wally" + l = "Where's Wally?" + s = np.int32(3) + e = np.int8(13) + for dtypes in [("T", "U"), ("U", "T")]: + for function, answer in [ + (np.strings.index, 8), + (np.strings.endswith, True), + ]: + assert answer == function( + np.array(l, dtype=dtypes[0]), np.array(r, dtype=dtypes[1]), s, e + ) + + +def test_strip_promoter(): + arg = ["Hello!!!!", "Hello??!!"] + strip_char = "!" + answer = ["Hello", "Hello??"] + for dtypes in [("T", "U"), ("U", "T")]: + result = np.strings.strip( + np.array(arg, dtype=dtypes[0]), + np.array(strip_char, dtype=dtypes[1]) + ) + assert_array_equal(result, answer) + assert result.dtype.char == "T" + + +def test_replace_promoter(): + arg = ["Hello, planet!", "planet, Hello!"] + old = "planet" + new = "world" + answer = ["Hello, world!", "world, Hello!"] + for dtypes in itertools.product("TU", repeat=3): + if dtypes == ("U", "U", "U"): + continue + answer_arr = np.strings.replace( + np.array(arg, dtype=dtypes[0]), + np.array(old, dtype=dtypes[1]), + np.array(new, dtype=dtypes[2]), + ) + assert_array_equal(answer_arr, answer) + assert answer_arr.dtype.char == "T" + + +def test_center_promoter(): + arg = ["Hello", "planet!"] + fillchar = "/" + for dtypes in [("T", "U"), ("U", "T")]: + answer = np.strings.center( + np.array(arg, dtype=dtypes[0]), 9, np.array(fillchar, dtype=dtypes[1]) + ) + assert_array_equal(answer, ["//Hello//", "/planet!/"]) + assert answer.dtype.char == "T" + + +DATETIME_INPUT = [ + np.datetime64("1923-04-14T12:43:12"), + np.datetime64("1994-06-21T14:43:15"), + np.datetime64("2001-10-15T04:10:32"), + np.datetime64("NaT"), + np.datetime64("1995-11-25T16:02:16"), + np.datetime64("2005-01-04T03:14:12"), + np.datetime64("2041-12-03T14:05:03"), +] + + +TIMEDELTA_INPUT = [ + np.timedelta64(12358, "s"), + np.timedelta64(23, "s"), + np.timedelta64(74, "s"), + np.timedelta64("NaT"), + np.timedelta64(23, "s"), + np.timedelta64(73, "s"), + np.timedelta64(7, "s"), +] + + +@pytest.mark.parametrize( + "input_data, input_dtype", + [ + (DATETIME_INPUT, "M8[s]"), + (TIMEDELTA_INPUT, "m8[s]") + ] +) +def test_datetime_timedelta_cast(dtype, input_data, input_dtype): + + a = np.array(input_data, dtype=input_dtype) + + has_na = hasattr(dtype, "na_object") + is_str = isinstance(getattr(dtype, "na_object", None), str) + + if not has_na or is_str: + a = np.delete(a, 3) + + sa = a.astype(dtype) + ra = sa.astype(a.dtype) + + if has_na and not is_str: + assert sa[3] is dtype.na_object + assert np.isnat(ra[3]) + + assert_array_equal(a, ra) + + if has_na and not is_str: + # don't worry about comparing how NaT is converted + sa = np.delete(sa, 3) + a = np.delete(a, 3) + + if input_dtype.startswith("M"): + assert_array_equal(sa, a.astype("U")) + else: + # The timedelta to unicode cast produces strings + # that aren't round-trippable and we don't want to + # reproduce that behavior in stringdtype + assert_array_equal(sa, a.astype("int64").astype("U")) + + +def test_nat_casts(): + s = 'nat' + all_nats = itertools.product(*zip(s.upper(), s.lower())) + all_nats = list(map(''.join, all_nats)) + NaT_dt = np.datetime64('NaT') + NaT_td = np.timedelta64('NaT') + for na_object in [np._NoValue, None, np.nan, 'nat', '']: + # numpy treats empty string and all case combinations of 'nat' as NaT + dtype = StringDType(na_object=na_object) + arr = np.array([''] + all_nats, dtype=dtype) + dt_array = arr.astype('M8[s]') + td_array = arr.astype('m8[s]') + assert_array_equal(dt_array, NaT_dt) + assert_array_equal(td_array, NaT_td) + + if na_object is np._NoValue: + output_object = 'NaT' + else: + output_object = na_object + + for arr in [dt_array, td_array]: + assert_array_equal( + arr.astype(dtype), + np.array([output_object] * arr.size, dtype=dtype)) + + +def test_nat_conversion(): + for nat in [np.datetime64("NaT", "s"), np.timedelta64("NaT", "s")]: + with pytest.raises(ValueError, match="string coercion is disabled"): + np.array(["a", nat], dtype=StringDType(coerce=False)) + + +def test_growing_strings(dtype): + # growing a string leads to a heap allocation, this tests to make sure + # we do that bookkeeping correctly for all possible starting cases + data = [ + "hello", # a short string + "abcdefghijklmnopqestuvwxyz", # a medium heap-allocated string + "hello" * 200, # a long heap-allocated string + ] + + arr = np.array(data, dtype=dtype) + uarr = np.array(data, dtype=str) + + for _ in range(5): + arr = arr + arr + uarr = uarr + uarr + + assert_array_equal(arr, uarr) + + +def test_assign_medium_strings(): + # see gh-29261 + N = 9 + src = np.array( + ( + ['0' * 256] * 3 + ['0' * 255] + ['0' * 256] + ['0' * 255] + + ['0' * 256] * 2 + ['0' * 255] + ), dtype='T') + dst = np.array( + ( + ['0' * 255] + ['0' * 256] * 2 + ['0' * 255] + ['0' * 256] + + ['0' * 255] + [''] * 5 + ), dtype='T') + + dst[1:N + 1] = src + assert_array_equal(dst[1:N + 1], src) + + +UFUNC_TEST_DATA = [ + "hello" * 10, + "Ae¢☃€ 😊" * 20, + "entry\nwith\nnewlines", + "entry\twith\ttabs", +] + + +@pytest.fixture +def string_array(dtype): + return np.array(UFUNC_TEST_DATA, dtype=dtype) + + +@pytest.fixture +def unicode_array(): + return np.array(UFUNC_TEST_DATA, dtype=np.str_) + + +NAN_PRESERVING_FUNCTIONS = [ + "capitalize", + "expandtabs", + "lower", + "lstrip", + "rstrip", + "splitlines", + "strip", + "swapcase", + "title", + "upper", +] + +BOOL_OUTPUT_FUNCTIONS = [ + "isalnum", + "isalpha", + "isdigit", + "islower", + "isspace", + "istitle", + "isupper", + "isnumeric", + "isdecimal", +] + +UNARY_FUNCTIONS = [ + "str_len", + "capitalize", + "expandtabs", + "isalnum", + "isalpha", + "isdigit", + "islower", + "isspace", + "istitle", + "isupper", + "lower", + "lstrip", + "rstrip", + "splitlines", + "strip", + "swapcase", + "title", + "upper", + "isnumeric", + "isdecimal", + "isalnum", + "islower", + "istitle", + "isupper", +] + +UNIMPLEMENTED_VEC_STRING_FUNCTIONS = [ + "capitalize", + "expandtabs", + "lower", + "splitlines", + "swapcase", + "title", + "upper", +] + +ONLY_IN_NP_CHAR = [ + "join", + "split", + "rsplit", + "splitlines" +] + + +@pytest.mark.parametrize("function_name", UNARY_FUNCTIONS) +def test_unary(string_array, unicode_array, function_name): + if function_name in ONLY_IN_NP_CHAR: + func = getattr(np.char, function_name) + else: + func = getattr(np.strings, function_name) + dtype = string_array.dtype + sres = func(string_array) + ures = func(unicode_array) + if sres.dtype == StringDType(): + ures = ures.astype(StringDType()) + assert_array_equal(sres, ures) + + if not hasattr(dtype, "na_object"): + return + + is_nan = np.isnan(np.array([dtype.na_object], dtype=dtype))[0] + is_str = isinstance(dtype.na_object, str) + na_arr = np.insert(string_array, 0, dtype.na_object) + + if function_name in UNIMPLEMENTED_VEC_STRING_FUNCTIONS: + if not is_str: + # to avoid these errors we'd need to add NA support to _vec_string + with pytest.raises((ValueError, TypeError)): + func(na_arr) + elif function_name == "splitlines": + assert func(na_arr)[0] == func(dtype.na_object)[()] + else: + assert func(na_arr)[0] == func(dtype.na_object) + return + if function_name == "str_len" and not is_str: + # str_len always errors for any non-string null, even NA ones because + # it has an integer result + with pytest.raises(ValueError): + func(na_arr) + return + if function_name in BOOL_OUTPUT_FUNCTIONS: + if is_nan: + assert func(na_arr)[0] is np.False_ + elif is_str: + assert func(na_arr)[0] == func(dtype.na_object) + else: + with pytest.raises(ValueError): + func(na_arr) + return + if not (is_nan or is_str): + with pytest.raises(ValueError): + func(na_arr) + return + res = func(na_arr) + if is_nan and function_name in NAN_PRESERVING_FUNCTIONS: + assert res[0] is dtype.na_object + elif is_str: + assert res[0] == func(dtype.na_object) + + +unicode_bug_fail = pytest.mark.xfail( + reason="unicode output width is buggy", strict=True +) + +# None means that the argument is a string array +BINARY_FUNCTIONS = [ + ("add", (None, None)), + ("multiply", (None, 2)), + ("mod", ("format: %s", None)), + ("center", (None, 25)), + ("count", (None, "A")), + ("encode", (None, "UTF-8")), + ("endswith", (None, "lo")), + ("find", (None, "A")), + ("index", (None, "e")), + ("join", ("-", None)), + ("ljust", (None, 12)), + ("lstrip", (None, "A")), + ("partition", (None, "A")), + ("replace", (None, "A", "B")), + ("rfind", (None, "A")), + ("rindex", (None, "e")), + ("rjust", (None, 12)), + ("rsplit", (None, "A")), + ("rstrip", (None, "A")), + ("rpartition", (None, "A")), + ("split", (None, "A")), + ("strip", (None, "A")), + ("startswith", (None, "A")), + ("zfill", (None, 12)), +] + +PASSES_THROUGH_NAN_NULLS = [ + "add", + "center", + "ljust", + "multiply", + "replace", + "rjust", + "strip", + "lstrip", + "rstrip", + "replace" + "zfill", +] + +NULLS_ARE_FALSEY = [ + "startswith", + "endswith", +] + +NULLS_ALWAYS_ERROR = [ + "count", + "find", + "rfind", +] + +SUPPORTS_NULLS = ( + PASSES_THROUGH_NAN_NULLS + + NULLS_ARE_FALSEY + + NULLS_ALWAYS_ERROR +) + + +def call_func(func, args, array, sanitize=True): + if args == (None, None): + return func(array, array) + if args[0] is None: + if sanitize: + san_args = tuple( + np.array(arg, dtype=array.dtype) if isinstance(arg, str) else + arg for arg in args[1:] + ) + else: + san_args = args[1:] + return func(array, *san_args) + if args[1] is None: + return func(args[0], array) + # shouldn't ever happen + assert 0 + + +@pytest.mark.parametrize("function_name, args", BINARY_FUNCTIONS) +def test_binary(string_array, unicode_array, function_name, args): + if function_name in ONLY_IN_NP_CHAR: + func = getattr(np.char, function_name) + else: + func = getattr(np.strings, function_name) + sres = call_func(func, args, string_array) + ures = call_func(func, args, unicode_array, sanitize=False) + if not isinstance(sres, tuple) and sres.dtype == StringDType(): + ures = ures.astype(StringDType()) + assert_array_equal(sres, ures) + + dtype = string_array.dtype + if function_name not in SUPPORTS_NULLS or not hasattr(dtype, "na_object"): + return + + na_arr = np.insert(string_array, 0, dtype.na_object) + is_nan = np.isnan(np.array([dtype.na_object], dtype=dtype))[0] + is_str = isinstance(dtype.na_object, str) + should_error = not (is_nan or is_str) + + if ( + (function_name in NULLS_ALWAYS_ERROR and not is_str) + or (function_name in PASSES_THROUGH_NAN_NULLS and should_error) + or (function_name in NULLS_ARE_FALSEY and should_error) + ): + with pytest.raises((ValueError, TypeError)): + call_func(func, args, na_arr) + return + + res = call_func(func, args, na_arr) + + if is_str: + assert res[0] == call_func(func, args, na_arr[:1]) + elif function_name in NULLS_ARE_FALSEY: + assert res[0] is np.False_ + elif function_name in PASSES_THROUGH_NAN_NULLS: + assert res[0] is dtype.na_object + else: + # shouldn't ever get here + assert 0 + + +@pytest.mark.parametrize("function, expected", [ + (np.strings.find, [[2, -1], [1, -1]]), + (np.strings.startswith, [[False, False], [True, False]])]) +@pytest.mark.parametrize("start, stop", [ + (1, 4), + (np.int8(1), np.int8(4)), + (np.array([1, 1], dtype='u2'), np.array([4, 4], dtype='u2'))]) +def test_non_default_start_stop(function, start, stop, expected): + a = np.array([["--🐍--", "--🦜--"], + ["-🐍---", "-🦜---"]], "T") + indx = function(a, "🐍", start, stop) + assert_array_equal(indx, expected) + + +@pytest.mark.parametrize("count", [2, np.int8(2), np.array([2, 2], 'u2')]) +def test_replace_non_default_repeat(count): + a = np.array(["🐍--", "🦜-🦜-"], "T") + result = np.strings.replace(a, "🦜-", "🦜†", count) + assert_array_equal(result, np.array(["🐍--", "🦜†🦜†"], "T")) + + +def test_strip_ljust_rjust_consistency(string_array, unicode_array): + rjs = np.char.rjust(string_array, 1000) + rju = np.char.rjust(unicode_array, 1000) + + ljs = np.char.ljust(string_array, 1000) + lju = np.char.ljust(unicode_array, 1000) + + assert_array_equal( + np.char.lstrip(rjs), + np.char.lstrip(rju).astype(StringDType()), + ) + + assert_array_equal( + np.char.rstrip(ljs), + np.char.rstrip(lju).astype(StringDType()), + ) + + assert_array_equal( + np.char.strip(ljs), + np.char.strip(lju).astype(StringDType()), + ) + + assert_array_equal( + np.char.strip(rjs), + np.char.strip(rju).astype(StringDType()), + ) + + +def test_unset_na_coercion(): + # a dtype instance with an unset na object is compatible + # with a dtype that has one set + + # this test uses the "add" and "equal" ufunc but all ufuncs that + # accept more than one string argument and produce a string should + # behave this way + # TODO: generalize to more ufuncs + inp = ["hello", "world"] + arr = np.array(inp, dtype=StringDType(na_object=None)) + for op_dtype in [None, StringDType(), StringDType(coerce=False), + StringDType(na_object=None)]: + if op_dtype is None: + op = "2" + else: + op = np.array("2", dtype=op_dtype) + res = arr + op + assert_array_equal(res, ["hello2", "world2"]) + + # dtype instances with distinct explicitly set NA objects are incompatible + for op_dtype in [StringDType(na_object=pd_NA), StringDType(na_object="")]: + op = np.array("2", dtype=op_dtype) + with pytest.raises(TypeError): + arr + op + + # comparisons only consider the na_object + for op_dtype in [None, StringDType(), StringDType(coerce=True), + StringDType(na_object=None)]: + if op_dtype is None: + op = inp + else: + op = np.array(inp, dtype=op_dtype) + assert_array_equal(arr, op) + + for op_dtype in [StringDType(na_object=pd_NA), + StringDType(na_object=np.nan)]: + op = np.array(inp, dtype=op_dtype) + with pytest.raises(TypeError): + arr == op + + +def test_repeat(string_array): + res = string_array.repeat(1000) + # Create an empty array with expanded dimension, and fill it. Then, + # reshape it to the expected result. + expected = np.empty_like(string_array, shape=string_array.shape + (1000,)) + expected[...] = string_array[:, np.newaxis] + expected = expected.reshape(-1) + + assert_array_equal(res, expected, strict=True) + + +@pytest.mark.parametrize("tile", [1, 6, (2, 5)]) +def test_accumulation(string_array, tile): + """Accumulation is odd for StringDType but tests dtypes with references. + """ + # Fill with mostly empty strings to not create absurdly big strings + arr = np.zeros_like(string_array, shape=(100,)) + arr[:len(string_array)] = string_array + arr[-len(string_array):] = string_array + + # Bloat size a bit (get above thresholds and test >1 ndim). + arr = np.tile(string_array, tile) + + res = np.add.accumulate(arr, axis=0) + res_obj = np.add.accumulate(arr.astype(object), axis=0) + assert_array_equal(res, res_obj.astype(arr.dtype), strict=True) + + if arr.ndim > 1: + res = np.add.accumulate(arr, axis=-1) + res_obj = np.add.accumulate(arr.astype(object), axis=-1) + + assert_array_equal(res, res_obj.astype(arr.dtype), strict=True) + + +class TestImplementation: + """Check that strings are stored in the arena when possible. + + This tests implementation details, so should be adjusted if + the implementation changes. + """ + + @classmethod + def setup_class(cls): + cls.MISSING = 0x80 + cls.INITIALIZED = 0x40 + cls.OUTSIDE_ARENA = 0x20 + cls.LONG = 0x10 + cls.dtype = StringDType(na_object=np.nan) + cls.sizeofstr = cls.dtype.itemsize + sp = cls.dtype.itemsize // 2 # pointer size = sizeof(size_t) + # Below, size is not strictly correct, since it really uses + # 7 (or 3) bytes, but good enough for the tests here. + cls.view_dtype = np.dtype([ + ('offset', f'u{sp}'), + ('size', f'u{sp // 2}'), + ('xsiz', f'V{sp // 2 - 1}'), + ('size_and_flags', 'u1'), + ] if sys.byteorder == 'little' else [ + ('size_and_flags', 'u1'), + ('xsiz', f'V{sp // 2 - 1}'), + ('size', f'u{sp // 2}'), + ('offset', f'u{sp}'), + ]) + cls.s_empty = "" + cls.s_short = "01234" + cls.s_medium = "abcdefghijklmnopqrstuvwxyz" + cls.s_long = "-=+" * 100 + cls.a = np.array( + [cls.s_empty, cls.s_short, cls.s_medium, cls.s_long], + cls.dtype) + + def get_view(self, a): + # Cannot view a StringDType as anything else directly, since + # it has references. So, we use a stride trick hack. + from numpy.lib._stride_tricks_impl import DummyArray + interface = dict(a.__array_interface__) + interface['descr'] = self.view_dtype.descr + interface['typestr'] = self.view_dtype.str + return np.asarray(DummyArray(interface, base=a)) + + def get_flags(self, a): + return self.get_view(a)['size_and_flags'] & 0xf0 + + def is_short(self, a): + return self.get_flags(a) == self.INITIALIZED | self.OUTSIDE_ARENA + + def is_on_heap(self, a): + return self.get_flags(a) == (self.INITIALIZED + | self.OUTSIDE_ARENA + | self.LONG) + + def is_missing(self, a): + return self.get_flags(a) & self.MISSING == self.MISSING + + def in_arena(self, a): + return (self.get_flags(a) & (self.INITIALIZED | self.OUTSIDE_ARENA) + == self.INITIALIZED) + + def test_setup(self): + is_short = self.is_short(self.a) + length = np.strings.str_len(self.a) + assert_array_equal(is_short, (length > 0) & (length <= 15)) + assert_array_equal(self.in_arena(self.a), [False, False, True, True]) + assert_array_equal(self.is_on_heap(self.a), False) + assert_array_equal(self.is_missing(self.a), False) + view = self.get_view(self.a) + sizes = np.where(is_short, view['size_and_flags'] & 0xf, + view['size']) + assert_array_equal(sizes, np.strings.str_len(self.a)) + assert_array_equal(view['xsiz'][2:], + np.void(b'\x00' * (self.sizeofstr // 4 - 1))) + # Check that the medium string uses only 1 byte for its length + # in the arena, while the long string takes 8 (or 4). + offsets = view['offset'] + assert offsets[2] == 1 + assert offsets[3] == 1 + len(self.s_medium) + self.sizeofstr // 2 + + def test_empty(self): + e = np.empty((3,), self.dtype) + assert_array_equal(self.get_flags(e), 0) + assert_array_equal(e, "") + + def test_zeros(self): + z = np.zeros((2,), self.dtype) + assert_array_equal(self.get_flags(z), 0) + assert_array_equal(z, "") + + def test_copy(self): + for c in [self.a.copy(), copy.copy(self.a), copy.deepcopy(self.a)]: + assert_array_equal(self.get_flags(c), self.get_flags(self.a)) + assert_array_equal(c, self.a) + offsets = self.get_view(c)['offset'] + assert offsets[2] == 1 + assert offsets[3] == 1 + len(self.s_medium) + self.sizeofstr // 2 + + def test_arena_use_with_setting(self): + c = np.zeros_like(self.a) + assert_array_equal(self.get_flags(c), 0) + c[:] = self.a + assert_array_equal(self.get_flags(c), self.get_flags(self.a)) + assert_array_equal(c, self.a) + + def test_arena_reuse_with_setting(self): + c = self.a.copy() + c[:] = self.a + assert_array_equal(self.get_flags(c), self.get_flags(self.a)) + assert_array_equal(c, self.a) + + def test_arena_reuse_after_missing(self): + c = self.a.copy() + c[:] = np.nan + assert np.all(self.is_missing(c)) + # Replacing with the original strings, the arena should be reused. + c[:] = self.a + assert_array_equal(self.get_flags(c), self.get_flags(self.a)) + assert_array_equal(c, self.a) + + def test_arena_reuse_after_empty(self): + c = self.a.copy() + c[:] = "" + assert_array_equal(c, "") + # Replacing with the original strings, the arena should be reused. + c[:] = self.a + assert_array_equal(self.get_flags(c), self.get_flags(self.a)) + assert_array_equal(c, self.a) + + def test_arena_reuse_for_shorter(self): + c = self.a.copy() + # A string slightly shorter than the shortest in the arena + # should be used for all strings in the arena. + c[:] = self.s_medium[:-1] + assert_array_equal(c, self.s_medium[:-1]) + # first empty string in original was never initialized, so + # filling it in now leaves it initialized inside the arena. + # second string started as a short string so it can never live + # in the arena. + in_arena = np.array([True, False, True, True]) + assert_array_equal(self.in_arena(c), in_arena) + # But when a short string is replaced, it will go on the heap. + assert_array_equal(self.is_short(c), False) + assert_array_equal(self.is_on_heap(c), ~in_arena) + # We can put the originals back, and they'll still fit, + # and short strings are back as short strings + c[:] = self.a + assert_array_equal(c, self.a) + assert_array_equal(self.in_arena(c), in_arena) + assert_array_equal(self.is_short(c), self.is_short(self.a)) + assert_array_equal(self.is_on_heap(c), False) + + def test_arena_reuse_if_possible(self): + c = self.a.copy() + # A slightly longer string will not fit in the arena for + # the medium string, but will fit for the longer one. + c[:] = self.s_medium + "±" + assert_array_equal(c, self.s_medium + "±") + in_arena_exp = np.strings.str_len(self.a) >= len(self.s_medium) + 1 + # first entry started uninitialized and empty, so filling it leaves + # it in the arena + in_arena_exp[0] = True + assert not np.all(in_arena_exp == self.in_arena(self.a)) + assert_array_equal(self.in_arena(c), in_arena_exp) + assert_array_equal(self.is_short(c), False) + assert_array_equal(self.is_on_heap(c), ~in_arena_exp) + # And once outside arena, it stays outside, since offset is lost. + # But short strings are used again. + c[:] = self.a + is_short_exp = self.is_short(self.a) + assert_array_equal(c, self.a) + assert_array_equal(self.in_arena(c), in_arena_exp) + assert_array_equal(self.is_short(c), is_short_exp) + assert_array_equal(self.is_on_heap(c), ~in_arena_exp & ~is_short_exp) + + def test_arena_no_reuse_after_short(self): + c = self.a.copy() + # If we replace a string with a short string, it cannot + # go into the arena after because the offset is lost. + c[:] = self.s_short + assert_array_equal(c, self.s_short) + assert_array_equal(self.in_arena(c), False) + c[:] = self.a + assert_array_equal(c, self.a) + assert_array_equal(self.in_arena(c), False) + assert_array_equal(self.is_on_heap(c), self.in_arena(self.a)) diff --git a/local_packages/numpy/_core/tests/test_strings.py b/local_packages/numpy/_core/tests/test_strings.py new file mode 100644 index 0000000..42e0d30 --- /dev/null +++ b/local_packages/numpy/_core/tests/test_strings.py @@ -0,0 +1,1523 @@ +import operator +import sys + +import pytest + +import numpy as np +from numpy._core._exceptions import _UFuncNoLoopError +from numpy.testing import IS_PYPY, assert_array_equal, assert_raises +from numpy.testing._private.utils import requires_memory + +COMPARISONS = [ + (operator.eq, np.equal, "=="), + (operator.ne, np.not_equal, "!="), + (operator.lt, np.less, "<"), + (operator.le, np.less_equal, "<="), + (operator.gt, np.greater, ">"), + (operator.ge, np.greater_equal, ">="), +] + +MAX = np.iinfo(np.int64).max + +IS_PYPY_LT_7_3_16 = IS_PYPY and sys.implementation.version < (7, 3, 16) + +@pytest.mark.parametrize(["op", "ufunc", "sym"], COMPARISONS) +def test_mixed_string_comparison_ufuncs_fail(op, ufunc, sym): + arr_string = np.array(["a", "b"], dtype="S") + arr_unicode = np.array(["a", "c"], dtype="U") + + with pytest.raises(TypeError, match="did not contain a loop"): + ufunc(arr_string, arr_unicode) + + with pytest.raises(TypeError, match="did not contain a loop"): + ufunc(arr_unicode, arr_string) + +@pytest.mark.parametrize(["op", "ufunc", "sym"], COMPARISONS) +def test_mixed_string_comparisons_ufuncs_with_cast(op, ufunc, sym): + arr_string = np.array(["a", "b"], dtype="S") + arr_unicode = np.array(["a", "c"], dtype="U") + + # While there is no loop, manual casting is acceptable: + res1 = ufunc(arr_string, arr_unicode, signature="UU->?", casting="unsafe") + res2 = ufunc(arr_string, arr_unicode, signature="SS->?", casting="unsafe") + + expected = op(arr_string.astype("U"), arr_unicode) + assert_array_equal(res1, expected) + assert_array_equal(res2, expected) + + +@pytest.mark.parametrize(["op", "ufunc", "sym"], COMPARISONS) +@pytest.mark.parametrize("dtypes", [ + ("S2", "S2"), ("S2", "S10"), + ("U1"), (">U1", ">U1"), + ("U10")]) +@pytest.mark.parametrize("aligned", [True, False]) +def test_string_comparisons(op, ufunc, sym, dtypes, aligned): + # ensure native byte-order for the first view to stay within unicode range + native_dt = np.dtype(dtypes[0]).newbyteorder("=") + arr = np.arange(2**15).view(native_dt).astype(dtypes[0]) + if not aligned: + # Make `arr` unaligned: + new = np.zeros(arr.nbytes + 1, dtype=np.uint8)[1:].view(dtypes[0]) + new[...] = arr + arr = new + + arr2 = arr.astype(dtypes[1], copy=True) + np.random.shuffle(arr2) + arr[0] = arr2[0] # make sure one matches + + expected = [op(d1, d2) for d1, d2 in zip(arr.tolist(), arr2.tolist())] + assert_array_equal(op(arr, arr2), expected) + assert_array_equal(ufunc(arr, arr2), expected) + assert_array_equal( + np.char.compare_chararrays(arr, arr2, sym, False), expected + ) + + expected = [op(d2, d1) for d1, d2 in zip(arr.tolist(), arr2.tolist())] + assert_array_equal(op(arr2, arr), expected) + assert_array_equal(ufunc(arr2, arr), expected) + assert_array_equal( + np.char.compare_chararrays(arr2, arr, sym, False), expected + ) + + +@pytest.mark.parametrize(["op", "ufunc", "sym"], COMPARISONS) +@pytest.mark.parametrize("dtypes", [ + ("S2", "S2"), ("S2", "S10"), ("U10")]) +def test_string_comparisons_empty(op, ufunc, sym, dtypes): + arr = np.empty((1, 0, 1, 5), dtype=dtypes[0]) + arr2 = np.empty((100, 1, 0, 1), dtype=dtypes[1]) + + expected = np.empty(np.broadcast_shapes(arr.shape, arr2.shape), dtype=bool) + assert_array_equal(op(arr, arr2), expected) + assert_array_equal(ufunc(arr, arr2), expected) + assert_array_equal( + np.char.compare_chararrays(arr, arr2, sym, False), expected + ) + + +@pytest.mark.parametrize("str_dt", ["S", "U"]) +@pytest.mark.parametrize("float_dt", np.typecodes["AllFloat"]) +def test_float_to_string_cast(str_dt, float_dt): + float_dt = np.dtype(float_dt) + fi = np.finfo(float_dt) + arr = np.array([np.nan, np.inf, -np.inf, fi.max, fi.min], dtype=float_dt) + expected = ["nan", "inf", "-inf", str(fi.max), str(fi.min)] + if float_dt.kind == "c": + expected = [f"({r}+0j)" for r in expected] + + res = arr.astype(str_dt) + assert_array_equal(res, np.array(expected, dtype=str_dt)) + + +@pytest.mark.parametrize("str_dt", "US") +@pytest.mark.parametrize("size", [-1, np.iinfo(np.intc).max]) +def test_string_size_dtype_errors(str_dt, size): + if size > 0: + size = size // np.dtype(f"{str_dt}1").itemsize + 1 + + with pytest.raises(ValueError): + np.dtype((str_dt, size)) + with pytest.raises(TypeError): + np.dtype(f"{str_dt}{size}") + + +@pytest.mark.parametrize("str_dt", "US") +def test_string_size_dtype_large_repr(str_dt): + size = np.iinfo(np.intc).max // np.dtype(f"{str_dt}1").itemsize + size_str = str(size) + + dtype = np.dtype((str_dt, size)) + assert size_str in dtype.str + assert size_str in str(dtype) + assert size_str in repr(dtype) + + +@pytest.mark.slow +@requires_memory(2 * np.iinfo(np.intc).max) +@pytest.mark.parametrize("str_dt", "US") +@pytest.mark.thread_unsafe(reason="crashes with low memory") +def test_large_string_coercion_error(str_dt): + very_large = np.iinfo(np.intc).max // np.dtype(f"{str_dt}1").itemsize + try: + large_string = "A" * (very_large + 1) + except Exception: + # We may not be able to create this Python string on 32bit. + pytest.skip("python failed to create huge string") + + class MyStr: + def __str__(self): + return large_string + + try: + # TypeError from NumPy, or OverflowError from 32bit Python. + with pytest.raises((TypeError, OverflowError)): + np.array([large_string], dtype=str_dt) + + # Same as above, but input has to be converted to a string. + with pytest.raises((TypeError, OverflowError)): + np.array([MyStr()], dtype=str_dt) + except MemoryError: + # Catch memory errors, because `requires_memory` would do so. + raise AssertionError("Ops should raise before any large allocation.") + +@pytest.mark.slow +@requires_memory(2 * np.iinfo(np.intc).max) +@pytest.mark.parametrize("str_dt", "US") +@pytest.mark.thread_unsafe(reason="crashes with low memory") +def test_large_string_addition_error(str_dt): + very_large = np.iinfo(np.intc).max // np.dtype(f"{str_dt}1").itemsize + + a = np.array(["A" * very_large], dtype=str_dt) + b = np.array("B", dtype=str_dt) + try: + with pytest.raises(TypeError): + np.add(a, b) + with pytest.raises(TypeError): + np.add(a, a) + except MemoryError: + # Catch memory errors, because `requires_memory` would do so. + raise AssertionError("Ops should raise before any large allocation.") + + +def test_large_string_cast(): + very_large = np.iinfo(np.intc).max // 4 + # Could be nice to test very large path, but it makes too many huge + # allocations right now (need non-legacy cast loops for this). + # a = np.array([], dtype=np.dtype(("S", very_large))) + # assert a.astype("U").dtype.itemsize == very_large * 4 + + a = np.array([], dtype=np.dtype(("S", very_large + 1))) + # It is not perfect but OK if this raises a MemoryError during setup + # (this happens due clunky code and/or buffer setup.) + with pytest.raises((TypeError, MemoryError)): + a.astype("U") + + +@pytest.mark.parametrize("dt", ["S1", "U1"]) +def test_in_place_mutiply_no_overflow(dt): + # see gh-30495 + a = np.array("a", dtype=dt) + a *= 20 + assert_array_equal(a, np.array("a", dtype=dt)) + + +@pytest.mark.parametrize("dt", ["S", "U", "T"]) +class TestMethods: + + @pytest.mark.parametrize("in1,in2,out", [ + ("", "", ""), + ("abc", "abc", "abcabc"), + ("12345", "12345", "1234512345"), + ("MixedCase", "MixedCase", "MixedCaseMixedCase"), + ("12345 \0 ", "12345 \0 ", "12345 \0 12345 \0 "), + ("UPPER", "UPPER", "UPPERUPPER"), + (["abc", "def"], ["hello", "world"], ["abchello", "defworld"]), + ]) + def test_add(self, in1, in2, out, dt): + in1 = np.array(in1, dtype=dt) + in2 = np.array(in2, dtype=dt) + out = np.array(out, dtype=dt) + assert_array_equal(np.strings.add(in1, in2), out) + + @pytest.mark.parametrize("in1,in2,out", [ + ("abc", 3, "abcabcabc"), + ("abc", 0, ""), + ("abc", -1, ""), + (["abc", "def"], [1, 4], ["abc", "defdefdefdef"]), + ]) + def test_multiply(self, in1, in2, out, dt): + in1 = np.array(in1, dtype=dt) + out = np.array(out, dtype=dt) + assert_array_equal(np.strings.multiply(in1, in2), out) + + def test_multiply_raises(self, dt): + with pytest.raises(TypeError, match="unsupported type"): + np.strings.multiply(np.array("abc", dtype=dt), 3.14) + + with pytest.raises(OverflowError): + np.strings.multiply(np.array("abc", dtype=dt), sys.maxsize) + + def test_inplace_multiply(self, dt): + arr = np.array(['foo ', 'bar'], dtype=dt) + arr *= 2 + if dt != "T": + assert_array_equal(arr, np.array(['foo ', 'barb'], dtype=dt)) + else: + assert_array_equal(arr, ['foo foo ', 'barbar']) + + with pytest.raises(OverflowError): + arr *= sys.maxsize + + @pytest.mark.parametrize("i_dt", [np.int8, np.int16, np.int32, + np.int64, np.int_]) + def test_multiply_integer_dtypes(self, i_dt, dt): + a = np.array("abc", dtype=dt) + i = np.array(3, dtype=i_dt) + res = np.array("abcabcabc", dtype=dt) + assert_array_equal(np.strings.multiply(a, i), res) + + @pytest.mark.parametrize("in_,out", [ + ("", False), + ("a", True), + ("A", True), + ("\n", False), + ("abc", True), + ("aBc123", False), + ("abc\n", False), + (["abc", "aBc123"], [True, False]), + ]) + def test_isalpha(self, in_, out, dt): + in_ = np.array(in_, dtype=dt) + assert_array_equal(np.strings.isalpha(in_), out) + + @pytest.mark.parametrize("in_,out", [ + ('', False), + ('a', True), + ('A', True), + ('\n', False), + ('123abc456', True), + ('a1b3c', True), + ('aBc000 ', False), + ('abc\n', False), + ]) + def test_isalnum(self, in_, out, dt): + in_ = np.array(in_, dtype=dt) + assert_array_equal(np.strings.isalnum(in_), out) + + @pytest.mark.parametrize("in_,out", [ + ("", False), + ("a", False), + ("0", True), + ("012345", True), + ("012345a", False), + (["a", "012345"], [False, True]), + ]) + def test_isdigit(self, in_, out, dt): + in_ = np.array(in_, dtype=dt) + assert_array_equal(np.strings.isdigit(in_), out) + + @pytest.mark.parametrize("in_,out", [ + ("", False), + ("a", False), + ("1", False), + (" ", True), + ("\t", True), + ("\r", True), + ("\n", True), + (" \t\r \n", True), + (" \t\r\na", False), + (["\t1", " \t\r \n"], [False, True]) + ]) + def test_isspace(self, in_, out, dt): + in_ = np.array(in_, dtype=dt) + assert_array_equal(np.strings.isspace(in_), out) + + @pytest.mark.parametrize("in_,out", [ + ('', False), + ('a', True), + ('A', False), + ('\n', False), + ('abc', True), + ('aBc', False), + ('abc\n', True), + ]) + def test_islower(self, in_, out, dt): + in_ = np.array(in_, dtype=dt) + assert_array_equal(np.strings.islower(in_), out) + + @pytest.mark.parametrize("in_,out", [ + ('', False), + ('a', False), + ('A', True), + ('\n', False), + ('ABC', True), + ('AbC', False), + ('ABC\n', True), + ]) + def test_isupper(self, in_, out, dt): + in_ = np.array(in_, dtype=dt) + assert_array_equal(np.strings.isupper(in_), out) + + @pytest.mark.parametrize("in_,out", [ + ('', False), + ('a', False), + ('A', True), + ('\n', False), + ('A Titlecased Line', True), + ('A\nTitlecased Line', True), + ('A Titlecased, Line', True), + ('Not a capitalized String', False), + ('Not\ta Titlecase String', False), + ('Not--a Titlecase String', False), + ('NOT', False), + ]) + def test_istitle(self, in_, out, dt): + in_ = np.array(in_, dtype=dt) + assert_array_equal(np.strings.istitle(in_), out) + + @pytest.mark.parametrize("in_,out", [ + ("", 0), + ("abc", 3), + ("12345", 5), + ("MixedCase", 9), + ("12345 \x00 ", 8), + ("UPPER", 5), + (["abc", "12345 \x00 "], [3, 8]), + ]) + def test_str_len(self, in_, out, dt): + in_ = np.array(in_, dtype=dt) + assert_array_equal(np.strings.str_len(in_), out) + + @pytest.mark.parametrize("a,sub,start,end,out", [ + ("abcdefghiabc", "abc", 0, None, 0), + ("abcdefghiabc", "abc", 1, None, 9), + ("abcdefghiabc", "def", 4, None, -1), + ("abc", "", 0, None, 0), + ("abc", "", 3, None, 3), + ("abc", "", 4, None, -1), + ("rrarrrrrrrrra", "a", 0, None, 2), + ("rrarrrrrrrrra", "a", 4, None, 12), + ("rrarrrrrrrrra", "a", 4, 6, -1), + ("", "", 0, None, 0), + ("", "", 1, 1, -1), + ("", "", MAX, 0, -1), + ("", "xx", 0, None, -1), + ("", "xx", 1, 1, -1), + ("", "xx", MAX, 0, -1), + pytest.param(99 * "a" + "b", "b", 0, None, 99, + id="99*a+b-b-0-None-99"), + pytest.param(98 * "a" + "ba", "ba", 0, None, 98, + id="98*a+ba-ba-0-None-98"), + pytest.param(100 * "a", "b", 0, None, -1, + id="100*a-b-0-None--1"), + pytest.param(30000 * "a" + 100 * "b", 100 * "b", 0, None, 30000, + id="30000*a+100*b-100*b-0-None-30000"), + pytest.param(30000 * "a", 100 * "b", 0, None, -1, + id="30000*a-100*b-0-None--1"), + pytest.param(15000 * "a" + 15000 * "b", 15000 * "b", 0, None, 15000, + id="15000*a+15000*b-15000*b-0-None-15000"), + pytest.param(15000 * "a" + 15000 * "b", 15000 * "c", 0, None, -1, + id="15000*a+15000*b-15000*c-0-None--1"), + (["abcdefghiabc", "rrarrrrrrrrra"], ["def", "arr"], [0, 3], + None, [3, -1]), + ("Ae¢☃€ 😊" * 2, "😊", 0, None, 6), + ("Ae¢☃€ 😊" * 2, "😊", 7, None, 13), + pytest.param("A" * (2 ** 17), r"[\w]+\Z", 0, None, -1, + id=r"A*2**17-[\w]+\Z-0-None--1"), + ]) + def test_find(self, a, sub, start, end, out, dt): + if "😊" in a and dt == "S": + pytest.skip("Bytes dtype does not support non-ascii input") + a = np.array(a, dtype=dt) + sub = np.array(sub, dtype=dt) + assert_array_equal(np.strings.find(a, sub, start, end), out) + + @pytest.mark.parametrize("a,sub,start,end,out", [ + ("abcdefghiabc", "abc", 0, None, 9), + ("abcdefghiabc", "", 0, None, 12), + ("abcdefghiabc", "abcd", 0, None, 0), + ("abcdefghiabc", "abcz", 0, None, -1), + ("abc", "", 0, None, 3), + ("abc", "", 3, None, 3), + ("abc", "", 4, None, -1), + ("rrarrrrrrrrra", "a", 0, None, 12), + ("rrarrrrrrrrra", "a", 4, None, 12), + ("rrarrrrrrrrra", "a", 4, 6, -1), + (["abcdefghiabc", "rrarrrrrrrrra"], ["abc", "a"], [0, 0], + None, [9, 12]), + ("Ae¢☃€ 😊" * 2, "😊", 0, None, 13), + ("Ae¢☃€ 😊" * 2, "😊", 0, 7, 6), + ]) + def test_rfind(self, a, sub, start, end, out, dt): + if "😊" in a and dt == "S": + pytest.skip("Bytes dtype does not support non-ascii input") + a = np.array(a, dtype=dt) + sub = np.array(sub, dtype=dt) + assert_array_equal(np.strings.rfind(a, sub, start, end), out) + + @pytest.mark.parametrize("a,sub,start,end,out", [ + ("aaa", "a", 0, None, 3), + ("aaa", "b", 0, None, 0), + ("aaa", "a", 1, None, 2), + ("aaa", "a", 10, None, 0), + ("aaa", "a", -1, None, 1), + ("aaa", "a", -10, None, 3), + ("aaa", "a", 0, 1, 1), + ("aaa", "a", 0, 10, 3), + ("aaa", "a", 0, -1, 2), + ("aaa", "a", 0, -10, 0), + ("aaa", "", 1, None, 3), + ("aaa", "", 3, None, 1), + ("aaa", "", 10, None, 0), + ("aaa", "", -1, None, 2), + ("aaa", "", -10, None, 4), + ("aaa", "aaaa", 0, None, 0), + pytest.param(98 * "a" + "ba", "ba", 0, None, 1, + id="98*a+ba-ba-0-None-1"), + pytest.param(30000 * "a" + 100 * "b", 100 * "b", 0, None, 1, + id="30000*a+100*b-100*b-0-None-1"), + pytest.param(30000 * "a", 100 * "b", 0, None, 0, + id="30000*a-100*b-0-None-0"), + pytest.param(30000 * "a" + 100 * "ab", "ab", 0, None, 100, + id="30000*a+100*ab-ab-0-None-100"), + pytest.param(15000 * "a" + 15000 * "b", 15000 * "b", 0, None, 1, + id="15000*a+15000*b-15000*b-0-None-1"), + pytest.param(15000 * "a" + 15000 * "b", 15000 * "c", 0, None, 0, + id="15000*a+15000*b-15000*c-0-None-0"), + ("", "", 0, None, 1), + ("", "", 1, 1, 0), + ("", "", MAX, 0, 0), + ("", "xx", 0, None, 0), + ("", "xx", 1, 1, 0), + ("", "xx", MAX, 0, 0), + (["aaa", ""], ["a", ""], [0, 0], None, [3, 1]), + ("Ae¢☃€ 😊" * 100, "😊", 0, None, 100), + ]) + def test_count(self, a, sub, start, end, out, dt): + if "😊" in a and dt == "S": + pytest.skip("Bytes dtype does not support non-ascii input") + a = np.array(a, dtype=dt) + sub = np.array(sub, dtype=dt) + assert_array_equal(np.strings.count(a, sub, start, end), out) + + @pytest.mark.parametrize("a,prefix,start,end,out", [ + ("hello", "he", 0, None, True), + ("hello", "hello", 0, None, True), + ("hello", "hello world", 0, None, False), + ("hello", "", 0, None, True), + ("hello", "ello", 0, None, False), + ("hello", "ello", 1, None, True), + ("hello", "o", 4, None, True), + ("hello", "o", 5, None, False), + ("hello", "", 5, None, True), + ("hello", "lo", 6, None, False), + ("helloworld", "lowo", 3, None, True), + ("helloworld", "lowo", 3, 7, True), + ("helloworld", "lowo", 3, 6, False), + ("", "", 0, 1, True), + ("", "", 0, 0, True), + ("", "", 1, 0, False), + ("hello", "he", 0, -1, True), + ("hello", "he", -53, -1, True), + ("hello", "hello", 0, -1, False), + ("hello", "hello world", -1, -10, False), + ("hello", "ello", -5, None, False), + ("hello", "ello", -4, None, True), + ("hello", "o", -2, None, False), + ("hello", "o", -1, None, True), + ("hello", "", -3, -3, True), + ("hello", "lo", -9, None, False), + (["hello", ""], ["he", ""], [0, 0], None, [True, True]), + ]) + def test_startswith(self, a, prefix, start, end, out, dt): + a = np.array(a, dtype=dt) + prefix = np.array(prefix, dtype=dt) + assert_array_equal(np.strings.startswith(a, prefix, start, end), out) + + @pytest.mark.parametrize("a,suffix,start,end,out", [ + ("hello", "lo", 0, None, True), + ("hello", "he", 0, None, False), + ("hello", "", 0, None, True), + ("hello", "hello world", 0, None, False), + ("helloworld", "worl", 0, None, False), + ("helloworld", "worl", 3, 9, True), + ("helloworld", "world", 3, 12, True), + ("helloworld", "lowo", 1, 7, True), + ("helloworld", "lowo", 2, 7, True), + ("helloworld", "lowo", 3, 7, True), + ("helloworld", "lowo", 4, 7, False), + ("helloworld", "lowo", 3, 8, False), + ("ab", "ab", 0, 1, False), + ("ab", "ab", 0, 0, False), + ("", "", 0, 1, True), + ("", "", 0, 0, True), + ("", "", 1, 0, False), + ("hello", "lo", -2, None, True), + ("hello", "he", -2, None, False), + ("hello", "", -3, -3, True), + ("hello", "hello world", -10, -2, False), + ("helloworld", "worl", -6, None, False), + ("helloworld", "worl", -5, -1, True), + ("helloworld", "worl", -5, 9, True), + ("helloworld", "world", -7, 12, True), + ("helloworld", "lowo", -99, -3, True), + ("helloworld", "lowo", -8, -3, True), + ("helloworld", "lowo", -7, -3, True), + ("helloworld", "lowo", 3, -4, False), + ("helloworld", "lowo", -8, -2, False), + (["hello", "helloworld"], ["lo", "worl"], [0, -6], None, + [True, False]), + ]) + def test_endswith(self, a, suffix, start, end, out, dt): + a = np.array(a, dtype=dt) + suffix = np.array(suffix, dtype=dt) + assert_array_equal(np.strings.endswith(a, suffix, start, end), out) + + @pytest.mark.parametrize("a,chars,out", [ + ("", None, ""), + (" hello ", None, "hello "), + ("hello", None, "hello"), + (" \t\n\r\f\vabc \t\n\r\f\v", None, "abc \t\n\r\f\v"), + ([" hello ", "hello"], None, ["hello ", "hello"]), + ("", "", ""), + ("", "xyz", ""), + ("hello", "", "hello"), + ("xyzzyhelloxyzzy", "xyz", "helloxyzzy"), + ("hello", "xyz", "hello"), + ("xyxz", "xyxz", ""), + ("xyxzx", "x", "yxzx"), + (["xyzzyhelloxyzzy", "hello"], ["xyz", "xyz"], + ["helloxyzzy", "hello"]), + (["ba", "ac", "baa", "bba"], "b", ["a", "ac", "aa", "a"]), + ]) + def test_lstrip(self, a, chars, out, dt): + a = np.array(a, dtype=dt) + out = np.array(out, dtype=dt) + if chars is not None: + chars = np.array(chars, dtype=dt) + assert_array_equal(np.strings.lstrip(a, chars), out) + else: + assert_array_equal(np.strings.lstrip(a), out) + + @pytest.mark.parametrize("a,chars,out", [ + ("", None, ""), + (" hello ", None, " hello"), + ("hello", None, "hello"), + (" \t\n\r\f\vabc \t\n\r\f\v", None, " \t\n\r\f\vabc"), + ([" hello ", "hello"], None, [" hello", "hello"]), + ("", "", ""), + ("", "xyz", ""), + ("hello", "", "hello"), + (["hello ", "abcdefghijklmnop"], None, + ["hello", "abcdefghijklmnop"]), + ("xyzzyhelloxyzzy", "xyz", "xyzzyhello"), + ("hello", "xyz", "hello"), + ("xyxz", "xyxz", ""), + (" ", None, ""), + ("xyxzx", "x", "xyxz"), + (["xyzzyhelloxyzzy", "hello"], ["xyz", "xyz"], + ["xyzzyhello", "hello"]), + (["ab", "ac", "aab", "abb"], "b", ["a", "ac", "aa", "a"]), + ]) + def test_rstrip(self, a, chars, out, dt): + a = np.array(a, dtype=dt) + out = np.array(out, dtype=dt) + if chars is not None: + chars = np.array(chars, dtype=dt) + assert_array_equal(np.strings.rstrip(a, chars), out) + else: + assert_array_equal(np.strings.rstrip(a), out) + + @pytest.mark.parametrize("a,chars,out", [ + ("", None, ""), + (" hello ", None, "hello"), + ("hello", None, "hello"), + (" \t\n\r\f\vabc \t\n\r\f\v", None, "abc"), + ([" hello ", "hello"], None, ["hello", "hello"]), + ("", "", ""), + ("", "xyz", ""), + ("hello", "", "hello"), + ("xyzzyhelloxyzzy", "xyz", "hello"), + ("hello", "xyz", "hello"), + ("xyxz", "xyxz", ""), + ("xyxzx", "x", "yxz"), + (["xyzzyhelloxyzzy", "hello"], ["xyz", "xyz"], + ["hello", "hello"]), + (["bab", "ac", "baab", "bbabb"], "b", ["a", "ac", "aa", "a"]), + ]) + def test_strip(self, a, chars, out, dt): + a = np.array(a, dtype=dt) + if chars is not None: + chars = np.array(chars, dtype=dt) + out = np.array(out, dtype=dt) + assert_array_equal(np.strings.strip(a, chars), out) + + @pytest.mark.parametrize("buf,old,new,count,res", [ + ("", "", "", -1, ""), + ("", "", "A", -1, "A"), + ("", "A", "", -1, ""), + ("", "A", "A", -1, ""), + ("", "", "", 100, ""), + ("", "", "A", 100, "A"), + ("A", "", "", -1, "A"), + ("A", "", "*", -1, "*A*"), + ("A", "", "*1", -1, "*1A*1"), + ("A", "", "*-#", -1, "*-#A*-#"), + ("AA", "", "*-", -1, "*-A*-A*-"), + ("AA", "", "*-", -1, "*-A*-A*-"), + ("AA", "", "*-", 4, "*-A*-A*-"), + ("AA", "", "*-", 3, "*-A*-A*-"), + ("AA", "", "*-", 2, "*-A*-A"), + ("AA", "", "*-", 1, "*-AA"), + ("AA", "", "*-", 0, "AA"), + ("A", "A", "", -1, ""), + ("AAA", "A", "", -1, ""), + ("AAA", "A", "", -1, ""), + ("AAA", "A", "", 4, ""), + ("AAA", "A", "", 3, ""), + ("AAA", "A", "", 2, "A"), + ("AAA", "A", "", 1, "AA"), + ("AAA", "A", "", 0, "AAA"), + ("AAAAAAAAAA", "A", "", -1, ""), + ("ABACADA", "A", "", -1, "BCD"), + ("ABACADA", "A", "", -1, "BCD"), + ("ABACADA", "A", "", 5, "BCD"), + ("ABACADA", "A", "", 4, "BCD"), + ("ABACADA", "A", "", 3, "BCDA"), + ("ABACADA", "A", "", 2, "BCADA"), + ("ABACADA", "A", "", 1, "BACADA"), + ("ABACADA", "A", "", 0, "ABACADA"), + ("ABCAD", "A", "", -1, "BCD"), + ("ABCADAA", "A", "", -1, "BCD"), + ("BCD", "A", "", -1, "BCD"), + ("*************", "A", "", -1, "*************"), + ("^" + "A" * 1000 + "^", "A", "", 999, "^A^"), + ("the", "the", "", -1, ""), + ("theater", "the", "", -1, "ater"), + ("thethe", "the", "", -1, ""), + ("thethethethe", "the", "", -1, ""), + ("theatheatheathea", "the", "", -1, "aaaa"), + ("that", "the", "", -1, "that"), + ("thaet", "the", "", -1, "thaet"), + ("here and there", "the", "", -1, "here and re"), + ("here and there and there", "the", "", -1, "here and re and re"), + ("here and there and there", "the", "", 3, "here and re and re"), + ("here and there and there", "the", "", 2, "here and re and re"), + ("here and there and there", "the", "", 1, "here and re and there"), + ("here and there and there", "the", "", 0, "here and there and there"), + ("here and there and there", "the", "", -1, "here and re and re"), + ("abc", "the", "", -1, "abc"), + ("abcdefg", "the", "", -1, "abcdefg"), + ("bbobob", "bob", "", -1, "bob"), + ("bbobobXbbobob", "bob", "", -1, "bobXbob"), + ("aaaaaaabob", "bob", "", -1, "aaaaaaa"), + ("aaaaaaa", "bob", "", -1, "aaaaaaa"), + ("Who goes there?", "o", "o", -1, "Who goes there?"), + ("Who goes there?", "o", "O", -1, "WhO gOes there?"), + ("Who goes there?", "o", "O", -1, "WhO gOes there?"), + ("Who goes there?", "o", "O", 3, "WhO gOes there?"), + ("Who goes there?", "o", "O", 2, "WhO gOes there?"), + ("Who goes there?", "o", "O", 1, "WhO goes there?"), + ("Who goes there?", "o", "O", 0, "Who goes there?"), + ("Who goes there?", "a", "q", -1, "Who goes there?"), + ("Who goes there?", "W", "w", -1, "who goes there?"), + ("WWho goes there?WW", "W", "w", -1, "wwho goes there?ww"), + ("Who goes there?", "?", "!", -1, "Who goes there!"), + ("Who goes there??", "?", "!", -1, "Who goes there!!"), + ("Who goes there?", ".", "!", -1, "Who goes there?"), + ("This is a tissue", "is", "**", -1, "Th** ** a t**sue"), + ("This is a tissue", "is", "**", -1, "Th** ** a t**sue"), + ("This is a tissue", "is", "**", 4, "Th** ** a t**sue"), + ("This is a tissue", "is", "**", 3, "Th** ** a t**sue"), + ("This is a tissue", "is", "**", 2, "Th** ** a tissue"), + ("This is a tissue", "is", "**", 1, "Th** is a tissue"), + ("This is a tissue", "is", "**", 0, "This is a tissue"), + ("bobob", "bob", "cob", -1, "cobob"), + ("bobobXbobobob", "bob", "cob", -1, "cobobXcobocob"), + ("bobob", "bot", "bot", -1, "bobob"), + ("Reykjavik", "k", "KK", -1, "ReyKKjaviKK"), + ("Reykjavik", "k", "KK", -1, "ReyKKjaviKK"), + ("Reykjavik", "k", "KK", 2, "ReyKKjaviKK"), + ("Reykjavik", "k", "KK", 1, "ReyKKjavik"), + ("Reykjavik", "k", "KK", 0, "Reykjavik"), + ("A.B.C.", ".", "----", -1, "A----B----C----"), + ("Reykjavik", "q", "KK", -1, "Reykjavik"), + ("spam, spam, eggs and spam", "spam", "ham", -1, + "ham, ham, eggs and ham"), + ("spam, spam, eggs and spam", "spam", "ham", -1, + "ham, ham, eggs and ham"), + ("spam, spam, eggs and spam", "spam", "ham", 4, + "ham, ham, eggs and ham"), + ("spam, spam, eggs and spam", "spam", "ham", 3, + "ham, ham, eggs and ham"), + ("spam, spam, eggs and spam", "spam", "ham", 2, + "ham, ham, eggs and spam"), + ("spam, spam, eggs and spam", "spam", "ham", 1, + "ham, spam, eggs and spam"), + ("spam, spam, eggs and spam", "spam", "ham", 0, + "spam, spam, eggs and spam"), + ("bobobob", "bobob", "bob", -1, "bobob"), + ("bobobobXbobobob", "bobob", "bob", -1, "bobobXbobob"), + ("BOBOBOB", "bob", "bobby", -1, "BOBOBOB"), + ("one!two!three!", "!", "@", 1, "one@two!three!"), + ("one!two!three!", "!", "", -1, "onetwothree"), + ("one!two!three!", "!", "@", 2, "one@two@three!"), + ("one!two!three!", "!", "@", 3, "one@two@three@"), + ("one!two!three!", "!", "@", 4, "one@two@three@"), + ("one!two!three!", "!", "@", 0, "one!two!three!"), + ("one!two!three!", "!", "@", -1, "one@two@three@"), + ("one!two!three!", "x", "@", -1, "one!two!three!"), + ("one!two!three!", "x", "@", 2, "one!two!three!"), + ("abc", "", "-", -1, "-a-b-c-"), + ("abc", "", "-", 3, "-a-b-c"), + ("abc", "", "-", 0, "abc"), + ("abc", "ab", "--", 0, "abc"), + ("abc", "xy", "--", -1, "abc"), + (["abbc", "abbd"], "b", "z", [1, 2], ["azbc", "azzd"]), + ]) + def test_replace(self, buf, old, new, count, res, dt): + if "😊" in buf and dt == "S": + pytest.skip("Bytes dtype does not support non-ascii input") + buf = np.array(buf, dtype=dt) + old = np.array(old, dtype=dt) + new = np.array(new, dtype=dt) + res = np.array(res, dtype=dt) + assert_array_equal(np.strings.replace(buf, old, new, count), res) + + @pytest.mark.parametrize("buf,sub,start,end,res", [ + ("abcdefghiabc", "", 0, None, 0), + ("abcdefghiabc", "def", 0, None, 3), + ("abcdefghiabc", "abc", 0, None, 0), + ("abcdefghiabc", "abc", 1, None, 9), + ]) + def test_index(self, buf, sub, start, end, res, dt): + buf = np.array(buf, dtype=dt) + sub = np.array(sub, dtype=dt) + assert_array_equal(np.strings.index(buf, sub, start, end), res) + + @pytest.mark.parametrize("buf,sub,start,end", [ + ("abcdefghiabc", "hib", 0, None), + ("abcdefghiab", "abc", 1, None), + ("abcdefghi", "ghi", 8, None), + ("abcdefghi", "ghi", -1, None), + ("rrarrrrrrrrra", "a", 4, 6), + ]) + def test_index_raises(self, buf, sub, start, end, dt): + buf = np.array(buf, dtype=dt) + sub = np.array(sub, dtype=dt) + with pytest.raises(ValueError, match="substring not found"): + np.strings.index(buf, sub, start, end) + + @pytest.mark.parametrize("buf,sub,start,end,res", [ + ("abcdefghiabc", "", 0, None, 12), + ("abcdefghiabc", "def", 0, None, 3), + ("abcdefghiabc", "abc", 0, None, 9), + ("abcdefghiabc", "abc", 0, -1, 0), + ]) + def test_rindex(self, buf, sub, start, end, res, dt): + buf = np.array(buf, dtype=dt) + sub = np.array(sub, dtype=dt) + assert_array_equal(np.strings.rindex(buf, sub, start, end), res) + + @pytest.mark.parametrize("buf,sub,start,end", [ + ("abcdefghiabc", "hib", 0, None), + ("defghiabc", "def", 1, None), + ("defghiabc", "abc", 0, -1), + ("abcdefghi", "ghi", 0, 8), + ("abcdefghi", "ghi", 0, -1), + ("rrarrrrrrrrra", "a", 4, 6), + ]) + def test_rindex_raises(self, buf, sub, start, end, dt): + buf = np.array(buf, dtype=dt) + sub = np.array(sub, dtype=dt) + with pytest.raises(ValueError, match="substring not found"): + np.strings.rindex(buf, sub, start, end) + + @pytest.mark.parametrize("buf,tabsize,res", [ + ("abc\rab\tdef\ng\thi", 8, "abc\rab def\ng hi"), + ("abc\rab\tdef\ng\thi", 4, "abc\rab def\ng hi"), + ("abc\r\nab\tdef\ng\thi", 8, "abc\r\nab def\ng hi"), + ("abc\r\nab\tdef\ng\thi", 4, "abc\r\nab def\ng hi"), + ("abc\r\nab\r\ndef\ng\r\nhi", 4, "abc\r\nab\r\ndef\ng\r\nhi"), + (" \ta\n\tb", 1, " a\n b"), + ]) + def test_expandtabs(self, buf, tabsize, res, dt): + buf = np.array(buf, dtype=dt) + res = np.array(res, dtype=dt) + assert_array_equal(np.strings.expandtabs(buf, tabsize), res) + + def test_expandtabs_raises_overflow(self, dt): + with pytest.raises(OverflowError, match="new string is too long"): + np.strings.expandtabs(np.array("\ta\n\tb", dtype=dt), sys.maxsize) + np.strings.expandtabs(np.array("\ta\n\tb", dtype=dt), 2**61) + + def test_expandtabs_length_not_cause_segfault(self, dt): + # see gh-28829 + with pytest.raises( + _UFuncNoLoopError, + match="did not contain a loop with signature matching types", + ): + np._core.strings._expandtabs_length.reduce(np.zeros(200)) + + with pytest.raises( + _UFuncNoLoopError, + match="did not contain a loop with signature matching types", + ): + np.strings.expandtabs(np.zeros(200)) + + FILL_ERROR = "The fill character must be exactly one character long" + + def test_center_raises_multiple_character_fill(self, dt): + buf = np.array("abc", dtype=dt) + fill = np.array("**", dtype=dt) + with pytest.raises(TypeError, match=self.FILL_ERROR): + np.strings.center(buf, 10, fill) + + def test_ljust_raises_multiple_character_fill(self, dt): + buf = np.array("abc", dtype=dt) + fill = np.array("**", dtype=dt) + with pytest.raises(TypeError, match=self.FILL_ERROR): + np.strings.ljust(buf, 10, fill) + + def test_rjust_raises_multiple_character_fill(self, dt): + buf = np.array("abc", dtype=dt) + fill = np.array("**", dtype=dt) + with pytest.raises(TypeError, match=self.FILL_ERROR): + np.strings.rjust(buf, 10, fill) + + @pytest.mark.parametrize("buf,width,fillchar,res", [ + ('abc', 10, ' ', ' abc '), + ('abc', 6, ' ', ' abc '), + ('abc', 3, ' ', 'abc'), + ('abc', 2, ' ', 'abc'), + ('abc', -2, ' ', 'abc'), + ('abc', 10, '*', '***abc****'), + ]) + def test_center(self, buf, width, fillchar, res, dt): + buf = np.array(buf, dtype=dt) + fillchar = np.array(fillchar, dtype=dt) + res = np.array(res, dtype=dt) + assert_array_equal(np.strings.center(buf, width, fillchar), res) + + @pytest.mark.parametrize("buf,width,fillchar,res", [ + ('abc', 10, ' ', 'abc '), + ('abc', 6, ' ', 'abc '), + ('abc', 3, ' ', 'abc'), + ('abc', 2, ' ', 'abc'), + ('abc', -2, ' ', 'abc'), + ('abc', 10, '*', 'abc*******'), + ]) + def test_ljust(self, buf, width, fillchar, res, dt): + buf = np.array(buf, dtype=dt) + fillchar = np.array(fillchar, dtype=dt) + res = np.array(res, dtype=dt) + assert_array_equal(np.strings.ljust(buf, width, fillchar), res) + + @pytest.mark.parametrize("buf,width,fillchar,res", [ + ('abc', 10, ' ', ' abc'), + ('abc', 6, ' ', ' abc'), + ('abc', 3, ' ', 'abc'), + ('abc', 2, ' ', 'abc'), + ('abc', -2, ' ', 'abc'), + ('abc', 10, '*', '*******abc'), + ]) + def test_rjust(self, buf, width, fillchar, res, dt): + buf = np.array(buf, dtype=dt) + fillchar = np.array(fillchar, dtype=dt) + res = np.array(res, dtype=dt) + assert_array_equal(np.strings.rjust(buf, width, fillchar), res) + + @pytest.mark.parametrize("buf,width,res", [ + ('123', 2, '123'), + ('123', 3, '123'), + ('0123', 4, '0123'), + ('+123', 3, '+123'), + ('+123', 4, '+123'), + ('+123', 5, '+0123'), + ('+0123', 5, '+0123'), + ('-123', 3, '-123'), + ('-123', 4, '-123'), + ('-0123', 5, '-0123'), + ('000', 3, '000'), + ('34', 1, '34'), + ('34', -1, '34'), + ('0034', 4, '0034'), + ]) + def test_zfill(self, buf, width, res, dt): + buf = np.array(buf, dtype=dt) + res = np.array(res, dtype=dt) + assert_array_equal(np.strings.zfill(buf, width), res) + + @pytest.mark.parametrize("buf,sep,res1,res2,res3", [ + ("this is the partition method", "ti", "this is the par", + "ti", "tion method"), + ("http://www.python.org", "://", "http", "://", "www.python.org"), + ("http://www.python.org", "?", "http://www.python.org", "", ""), + ("http://www.python.org", "http://", "", "http://", "www.python.org"), + ("http://www.python.org", "org", "http://www.python.", "org", ""), + ("http://www.python.org", ["://", "?", "http://", "org"], + ["http", "http://www.python.org", "", "http://www.python."], + ["://", "", "http://", "org"], + ["www.python.org", "", "www.python.org", ""]), + ("mississippi", "ss", "mi", "ss", "issippi"), + ("mississippi", "i", "m", "i", "ssissippi"), + ("mississippi", "w", "mississippi", "", ""), + ]) + def test_partition(self, buf, sep, res1, res2, res3, dt): + buf = np.array(buf, dtype=dt) + sep = np.array(sep, dtype=dt) + res1 = np.array(res1, dtype=dt) + res2 = np.array(res2, dtype=dt) + res3 = np.array(res3, dtype=dt) + act1, act2, act3 = np.strings.partition(buf, sep) + assert_array_equal(act1, res1) + assert_array_equal(act2, res2) + assert_array_equal(act3, res3) + assert_array_equal(act1 + act2 + act3, buf) + + @pytest.mark.parametrize("buf,sep,res1,res2,res3", [ + ("this is the partition method", "ti", "this is the parti", + "ti", "on method"), + ("http://www.python.org", "://", "http", "://", "www.python.org"), + ("http://www.python.org", "?", "", "", "http://www.python.org"), + ("http://www.python.org", "http://", "", "http://", "www.python.org"), + ("http://www.python.org", "org", "http://www.python.", "org", ""), + ("http://www.python.org", ["://", "?", "http://", "org"], + ["http", "", "", "http://www.python."], + ["://", "", "http://", "org"], + ["www.python.org", "http://www.python.org", "www.python.org", ""]), + ("mississippi", "ss", "missi", "ss", "ippi"), + ("mississippi", "i", "mississipp", "i", ""), + ("mississippi", "w", "", "", "mississippi"), + ]) + def test_rpartition(self, buf, sep, res1, res2, res3, dt): + buf = np.array(buf, dtype=dt) + sep = np.array(sep, dtype=dt) + res1 = np.array(res1, dtype=dt) + res2 = np.array(res2, dtype=dt) + res3 = np.array(res3, dtype=dt) + act1, act2, act3 = np.strings.rpartition(buf, sep) + assert_array_equal(act1, res1) + assert_array_equal(act2, res2) + assert_array_equal(act3, res3) + assert_array_equal(act1 + act2 + act3, buf) + + @pytest.mark.parametrize("args", [ + (None,), + (None, None), + (None, None, -1), + (0,), + (0, None), + (0, None, -1), + (1,), + (1, None), + (1, None, -1), + (3,), + (3, None), + (5,), + (5, None), + (5, 5), + (5, 5, -1), + (6,), # test index past the end + (6, None), + (6, None, -1), + (6, 7), # test start and stop index past the end + (4, 3), # test start > stop index + (-1,), + (-1, None), + (-1, None, -1), + (-3,), + (-3, None), + ([3, 4],), + ([3, 4], None), + ([2, 4],), + ([-3, 5],), + ([-3, 5], None), + ([-3, 5], None, -1), + ([0, -5],), + ([0, -5], None), + ([0, -5], None, -1), + (1, 4), + (-3, 5), + (None, -1), + (0, [4, 2]), + ([1, 2], [-1, -2]), + (1, 5, 2), + (None, None, -1), + ([0, 6], [-1, 0], [2, -1]), + ]) + @pytest.mark.parametrize("buf", [ + ["hello", "world"], + ['hello world', 'γεια σου κόσμε', '你好世界', '👋 🌍'], + ]) + def test_slice(self, args, buf, dt): + if dt == "S" and "你好世界" in buf: + pytest.skip("Bytes dtype does not support non-ascii input") + if len(buf) == 4: + args = tuple(s * 2 if isinstance(s, list) else s for s in args) + buf = np.array(buf, dtype=dt) + act = np.strings.slice(buf, *args) + bcast_args = tuple(np.broadcast_to(arg, buf.shape) for arg in args) + res = np.array([s[slice(*arg)] + for s, arg in zip(buf, zip(*bcast_args))], + dtype=dt) + assert_array_equal(act, res) + + def test_slice_unsupported(self, dt): + with pytest.raises(TypeError, match="did not contain a loop"): + np.strings.slice(np.array([1, 2, 3]), 4) + + regexp = (r"Cannot cast ufunc '_slice' input .* " + r"from .* to dtype\('int(64|32)'\)") + with pytest.raises(TypeError, match=regexp): + np.strings.slice(np.array(['foo', 'bar'], dtype=dt), + np.array(['foo', 'bar'], dtype=dt)) + + @pytest.mark.parametrize("int_dt", [np.int8, np.int16, np.int32, + np.int64, np.uint8, np.uint16, + np.uint32, np.uint64]) + def test_slice_int_type_promotion(self, int_dt, dt): + buf = np.array(["hello", "world"], dtype=dt) + np_slice = np.strings.slice + assert_array_equal(np_slice(buf, int_dt(4)), + np.array(["hell", "worl"], dtype=dt)) + assert_array_equal(np_slice(buf, np.array([4, 4], dtype=int_dt)), + np.array(["hell", "worl"], dtype=dt)) + + assert_array_equal(np_slice(buf, int_dt(2), int_dt(4)), + np.array(["ll", "rl"], dtype=dt)) + assert_array_equal(np_slice(buf, np.array([2, 2], dtype=int_dt), + np.array([4, 4], dtype=int_dt)), + np.array(["ll", "rl"], dtype=dt)) + + assert_array_equal(np_slice(buf, int_dt(0), int_dt(4), int_dt(2)), + np.array(["hl", "wr"], dtype=dt)) + assert_array_equal(np_slice(buf, + np.array([0, 0], dtype=int_dt), + np.array([4, 4], dtype=int_dt), + np.array([2, 2], dtype=int_dt)), + np.array(["hl", "wr"], dtype=dt)) + +@pytest.mark.parametrize("dt", ["U", "T"]) +class TestMethodsWithUnicode: + @pytest.mark.parametrize("in_,out", [ + ("", False), + ("a", False), + ("0", True), + ("\u2460", False), # CIRCLED DIGIT 1 + ("\xbc", False), # VULGAR FRACTION ONE QUARTER + ("\u0660", True), # ARABIC_INDIC DIGIT ZERO + ("012345", True), + ("012345a", False), + (["0", "a"], [True, False]), + ]) + def test_isdecimal_unicode(self, in_, out, dt): + buf = np.array(in_, dtype=dt) + assert_array_equal(np.strings.isdecimal(buf), out) + + @pytest.mark.parametrize("in_,out", [ + ("", False), + ("a", False), + ("0", True), + ("\u2460", True), # CIRCLED DIGIT 1 + ("\xbc", True), # VULGAR FRACTION ONE QUARTER + ("\u0660", True), # ARABIC_INDIC DIGIT ZERO + ("012345", True), + ("012345a", False), + (["0", "a"], [True, False]), + ]) + def test_isnumeric_unicode(self, in_, out, dt): + buf = np.array(in_, dtype=dt) + assert_array_equal(np.strings.isnumeric(buf), out) + + @pytest.mark.parametrize("buf,old,new,count,res", [ + ("...\u043c......<", "<", "<", -1, "...\u043c......<"), + ("Ae¢☃€ 😊" * 2, "A", "B", -1, "Be¢☃€ 😊Be¢☃€ 😊"), + ("Ae¢☃€ 😊" * 2, "😊", "B", -1, "Ae¢☃€ BAe¢☃€ B"), + ]) + def test_replace_unicode(self, buf, old, new, count, res, dt): + buf = np.array(buf, dtype=dt) + old = np.array(old, dtype=dt) + new = np.array(new, dtype=dt) + res = np.array(res, dtype=dt) + assert_array_equal(np.strings.replace(buf, old, new, count), res) + + @pytest.mark.parametrize("in_", [ + '\U00010401', + '\U00010427', + '\U00010429', + '\U0001044E', + '\U0001D7F6', + '\U00011066', + '\U000104A0', + pytest.param('\U0001F107', marks=pytest.mark.xfail( + sys.platform == 'win32' and IS_PYPY_LT_7_3_16, + reason="PYPY bug in Py_UNICODE_ISALNUM", + strict=True)), + ]) + def test_isalnum_unicode(self, in_, dt): + in_ = np.array(in_, dtype=dt) + assert_array_equal(np.strings.isalnum(in_), True) + + @pytest.mark.parametrize("in_,out", [ + ('\u1FFc', False), + ('\u2167', False), + ('\U00010401', False), + ('\U00010427', False), + ('\U0001F40D', False), + ('\U0001F46F', False), + ('\u2177', True), + pytest.param('\U00010429', True, marks=pytest.mark.xfail( + sys.platform == 'win32' and IS_PYPY_LT_7_3_16, + reason="PYPY bug in Py_UNICODE_ISLOWER", + strict=True)), + ('\U0001044E', True), + ]) + def test_islower_unicode(self, in_, out, dt): + in_ = np.array(in_, dtype=dt) + assert_array_equal(np.strings.islower(in_), out) + + @pytest.mark.parametrize("in_,out", [ + ('\u1FFc', False), + ('\u2167', True), + ('\U00010401', True), + ('\U00010427', True), + ('\U0001F40D', False), + ('\U0001F46F', False), + ('\u2177', False), + pytest.param('\U00010429', False, marks=pytest.mark.xfail( + sys.platform == 'win32' and IS_PYPY_LT_7_3_16, + reason="PYPY bug in Py_UNICODE_ISUPPER", + strict=True)), + ('\U0001044E', False), + ]) + def test_isupper_unicode(self, in_, out, dt): + in_ = np.array(in_, dtype=dt) + assert_array_equal(np.strings.isupper(in_), out) + + @pytest.mark.parametrize("in_,out", [ + ('\u1FFc', True), + ('Greek \u1FFcitlecases ...', True), + pytest.param('\U00010401\U00010429', True, marks=pytest.mark.xfail( + sys.platform == 'win32' and IS_PYPY_LT_7_3_16, + reason="PYPY bug in Py_UNICODE_ISISTITLE", + strict=True)), + ('\U00010427\U0001044E', True), + pytest.param('\U00010429', False, marks=pytest.mark.xfail( + sys.platform == 'win32' and IS_PYPY_LT_7_3_16, + reason="PYPY bug in Py_UNICODE_ISISTITLE", + strict=True)), + ('\U0001044E', False), + ('\U0001F40D', False), + ('\U0001F46F', False), + ]) + def test_istitle_unicode(self, in_, out, dt): + in_ = np.array(in_, dtype=dt) + assert_array_equal(np.strings.istitle(in_), out) + + @pytest.mark.parametrize("buf,sub,start,end,res", [ + ("Ae¢☃€ 😊" * 2, "😊", 0, None, 6), + ("Ae¢☃€ 😊" * 2, "😊", 7, None, 13), + ]) + def test_index_unicode(self, buf, sub, start, end, res, dt): + buf = np.array(buf, dtype=dt) + sub = np.array(sub, dtype=dt) + assert_array_equal(np.strings.index(buf, sub, start, end), res) + + def test_index_raises_unicode(self, dt): + with pytest.raises(ValueError, match="substring not found"): + np.strings.index("Ae¢☃€ 😊", "😀") + + @pytest.mark.parametrize("buf,res", [ + ("Ae¢☃€ \t 😊", "Ae¢☃€ 😊"), + ("\t\U0001044E", " \U0001044E"), + ]) + def test_expandtabs(self, buf, res, dt): + buf = np.array(buf, dtype=dt) + res = np.array(res, dtype=dt) + assert_array_equal(np.strings.expandtabs(buf), res) + + @pytest.mark.parametrize("buf,width,fillchar,res", [ + ('x', 2, '\U0001044E', 'x\U0001044E'), + ('x', 3, '\U0001044E', '\U0001044Ex\U0001044E'), + ('x', 4, '\U0001044E', '\U0001044Ex\U0001044E\U0001044E'), + ]) + def test_center(self, buf, width, fillchar, res, dt): + buf = np.array(buf, dtype=dt) + fillchar = np.array(fillchar, dtype=dt) + res = np.array(res, dtype=dt) + assert_array_equal(np.strings.center(buf, width, fillchar), res) + + @pytest.mark.parametrize("buf,width,fillchar,res", [ + ('x', 2, '\U0001044E', 'x\U0001044E'), + ('x', 3, '\U0001044E', 'x\U0001044E\U0001044E'), + ('x', 4, '\U0001044E', 'x\U0001044E\U0001044E\U0001044E'), + ]) + def test_ljust(self, buf, width, fillchar, res, dt): + buf = np.array(buf, dtype=dt) + fillchar = np.array(fillchar, dtype=dt) + res = np.array(res, dtype=dt) + assert_array_equal(np.strings.ljust(buf, width, fillchar), res) + + @pytest.mark.parametrize("buf,width,fillchar,res", [ + ('x', 2, '\U0001044E', '\U0001044Ex'), + ('x', 3, '\U0001044E', '\U0001044E\U0001044Ex'), + ('x', 4, '\U0001044E', '\U0001044E\U0001044E\U0001044Ex'), + ]) + def test_rjust(self, buf, width, fillchar, res, dt): + buf = np.array(buf, dtype=dt) + fillchar = np.array(fillchar, dtype=dt) + res = np.array(res, dtype=dt) + assert_array_equal(np.strings.rjust(buf, width, fillchar), res) + + @pytest.mark.parametrize("buf,sep,res1,res2,res3", [ + ("āāāāĀĀĀĀ", "Ă", "āāāāĀĀĀĀ", "", ""), + ("āāāāĂĀĀĀĀ", "Ă", "āāāā", "Ă", "ĀĀĀĀ"), + ("āāāāĂĂĀĀĀĀ", "ĂĂ", "āāāā", "ĂĂ", "ĀĀĀĀ"), + ("𐌁𐌁𐌁𐌁𐌀𐌀𐌀𐌀", "𐌂", "𐌁𐌁𐌁𐌁𐌀𐌀𐌀𐌀", "", ""), + ("𐌁𐌁𐌁𐌁𐌂𐌀𐌀𐌀𐌀", "𐌂", "𐌁𐌁𐌁𐌁", "𐌂", "𐌀𐌀𐌀𐌀"), + ("𐌁𐌁𐌁𐌁𐌂𐌂𐌀𐌀𐌀𐌀", "𐌂𐌂", "𐌁𐌁𐌁𐌁", "𐌂𐌂", "𐌀𐌀𐌀𐌀"), + ("𐌁𐌁𐌁𐌁𐌂𐌂𐌂𐌂𐌀𐌀𐌀𐌀", "𐌂𐌂𐌂𐌂", "𐌁𐌁𐌁𐌁", "𐌂𐌂𐌂𐌂", "𐌀𐌀𐌀𐌀"), + ]) + def test_partition(self, buf, sep, res1, res2, res3, dt): + buf = np.array(buf, dtype=dt) + sep = np.array(sep, dtype=dt) + res1 = np.array(res1, dtype=dt) + res2 = np.array(res2, dtype=dt) + res3 = np.array(res3, dtype=dt) + act1, act2, act3 = np.strings.partition(buf, sep) + assert_array_equal(act1, res1) + assert_array_equal(act2, res2) + assert_array_equal(act3, res3) + assert_array_equal(act1 + act2 + act3, buf) + + @pytest.mark.parametrize("buf,sep,res1,res2,res3", [ + ("āāāāĀĀĀĀ", "Ă", "", "", "āāāāĀĀĀĀ"), + ("āāāāĂĀĀĀĀ", "Ă", "āāāā", "Ă", "ĀĀĀĀ"), + ("āāāāĂĂĀĀĀĀ", "ĂĂ", "āāāā", "ĂĂ", "ĀĀĀĀ"), + ("𐌁𐌁𐌁𐌁𐌀𐌀𐌀𐌀", "𐌂", "", "", "𐌁𐌁𐌁𐌁𐌀𐌀𐌀𐌀"), + ("𐌁𐌁𐌁𐌁𐌂𐌀𐌀𐌀𐌀", "𐌂", "𐌁𐌁𐌁𐌁", "𐌂", "𐌀𐌀𐌀𐌀"), + ("𐌁𐌁𐌁𐌁𐌂𐌂𐌀𐌀𐌀𐌀", "𐌂𐌂", "𐌁𐌁𐌁𐌁", "𐌂𐌂", "𐌀𐌀𐌀𐌀"), + ]) + def test_rpartition(self, buf, sep, res1, res2, res3, dt): + buf = np.array(buf, dtype=dt) + sep = np.array(sep, dtype=dt) + res1 = np.array(res1, dtype=dt) + res2 = np.array(res2, dtype=dt) + res3 = np.array(res3, dtype=dt) + act1, act2, act3 = np.strings.rpartition(buf, sep) + assert_array_equal(act1, res1) + assert_array_equal(act2, res2) + assert_array_equal(act3, res3) + assert_array_equal(act1 + act2 + act3, buf) + + @pytest.mark.parametrize("method", ["strip", "lstrip", "rstrip"]) + @pytest.mark.parametrize( + "source,strip", + [ + ("λμ", "μ"), + ("λμ", "λ"), + ("λ" * 5 + "μ" * 2, "μ"), + ("λ" * 5 + "μ" * 2, "λ"), + ("λ" * 5 + "A" + "μ" * 2, "μλ"), + ("λμ" * 5, "μ"), + ("λμ" * 5, "λ"), + ]) + def test_strip_functions_unicode(self, source, strip, method, dt): + src_array = np.array([source], dtype=dt) + + npy_func = getattr(np.strings, method) + py_func = getattr(str, method) + + expected = np.array([py_func(source, strip)], dtype=dt) + actual = npy_func(src_array, strip) + + assert_array_equal(actual, expected) + + @pytest.mark.parametrize("args", [ + (None,), + (0,), + (1,), + (5,), + (15,), + (22,), + (-1,), + (-3,), + ([3, 4],), + ([-5, 5],), + ([0, -8],), + (1, 12), + (-12, 15), + (None, -1), + (0, [17, 6]), + ([1, 2], [-1, -2]), + (1, 11, 2), + (None, None, -1), + ([0, 10], [-1, 0], [2, -1]), + ]) + def test_slice(self, args, dt): + buf = np.array(["Приве́т नमस्ते שָׁלוֹם", "😀😃😄😁😆😅🤣😂🙂🙃"], + dtype=dt) + act = np.strings.slice(buf, *args) + bcast_args = tuple(np.broadcast_to(arg, buf.shape) for arg in args) + res = np.array([s[slice(*arg)] + for s, arg in zip(buf, zip(*bcast_args))], + dtype=dt) + assert_array_equal(act, res) + + +class TestMixedTypeMethods: + def test_center(self): + buf = np.array("😊", dtype="U") + fill = np.array("*", dtype="S") + res = np.array("*😊*", dtype="U") + assert_array_equal(np.strings.center(buf, 3, fill), res) + + buf = np.array("s", dtype="S") + fill = np.array("*", dtype="U") + res = np.array("*s*", dtype="S") + assert_array_equal(np.strings.center(buf, 3, fill), res) + + with pytest.raises(ValueError, match="'ascii' codec can't encode"): + buf = np.array("s", dtype="S") + fill = np.array("😊", dtype="U") + np.strings.center(buf, 3, fill) + + def test_ljust(self): + buf = np.array("😊", dtype="U") + fill = np.array("*", dtype="S") + res = np.array("😊**", dtype="U") + assert_array_equal(np.strings.ljust(buf, 3, fill), res) + + buf = np.array("s", dtype="S") + fill = np.array("*", dtype="U") + res = np.array("s**", dtype="S") + assert_array_equal(np.strings.ljust(buf, 3, fill), res) + + with pytest.raises(ValueError, match="'ascii' codec can't encode"): + buf = np.array("s", dtype="S") + fill = np.array("😊", dtype="U") + np.strings.ljust(buf, 3, fill) + + def test_rjust(self): + buf = np.array("😊", dtype="U") + fill = np.array("*", dtype="S") + res = np.array("**😊", dtype="U") + assert_array_equal(np.strings.rjust(buf, 3, fill), res) + + buf = np.array("s", dtype="S") + fill = np.array("*", dtype="U") + res = np.array("**s", dtype="S") + assert_array_equal(np.strings.rjust(buf, 3, fill), res) + + with pytest.raises(ValueError, match="'ascii' codec can't encode"): + buf = np.array("s", dtype="S") + fill = np.array("😊", dtype="U") + np.strings.rjust(buf, 3, fill) + + +class TestUnicodeOnlyMethodsRaiseWithBytes: + def test_isdecimal_raises(self): + in_ = np.array(b"1") + with assert_raises(TypeError): + np.strings.isdecimal(in_) + + def test_isnumeric_bytes(self): + in_ = np.array(b"1") + with assert_raises(TypeError): + np.strings.isnumeric(in_) + + +def check_itemsize(n_elem, dt): + if dt == "T": + return np.dtype(dt).itemsize + if dt == "S": + return n_elem + if dt == "U": + return n_elem * 4 + +@pytest.mark.parametrize("dt", ["S", "U", "T"]) +class TestReplaceOnArrays: + + def test_replace_count_and_size(self, dt): + a = np.array(["0123456789" * i for i in range(4)], dtype=dt) + r1 = np.strings.replace(a, "5", "ABCDE") + assert r1.dtype.itemsize == check_itemsize(3 * 10 + 3 * 4, dt) + r1_res = np.array(["01234ABCDE6789" * i for i in range(4)], dtype=dt) + assert_array_equal(r1, r1_res) + r2 = np.strings.replace(a, "5", "ABCDE", 1) + assert r2.dtype.itemsize == check_itemsize(3 * 10 + 4, dt) + r3 = np.strings.replace(a, "5", "ABCDE", 0) + assert r3.dtype.itemsize == a.dtype.itemsize + assert_array_equal(r3, a) + # Negative values mean to replace all. + r4 = np.strings.replace(a, "5", "ABCDE", -1) + assert r4.dtype.itemsize == check_itemsize(3 * 10 + 3 * 4, dt) + assert_array_equal(r4, r1) + # We can do count on an element-by-element basis. + r5 = np.strings.replace(a, "5", "ABCDE", [-1, -1, -1, 1]) + assert r5.dtype.itemsize == check_itemsize(3 * 10 + 4, dt) + assert_array_equal(r5, np.array( + ["01234ABCDE6789" * i for i in range(3)] + + ["01234ABCDE6789" + "0123456789" * 2], dtype=dt)) + + def test_replace_broadcasting(self, dt): + a = np.array("0,0,0", dtype=dt) + r1 = np.strings.replace(a, "0", "1", np.arange(3)) + assert r1.dtype == a.dtype + assert_array_equal(r1, np.array(["0,0,0", "1,0,0", "1,1,0"], dtype=dt)) + r2 = np.strings.replace(a, "0", [["1"], ["2"]], np.arange(1, 4)) + assert_array_equal(r2, np.array([["1,0,0", "1,1,0", "1,1,1"], + ["2,0,0", "2,2,0", "2,2,2"]], + dtype=dt)) + r3 = np.strings.replace(a, ["0", "0,0", "0,0,0"], "X") + assert_array_equal(r3, np.array(["X,X,X", "X,0", "X"], dtype=dt)) + + +class TestOverride: + @classmethod + def setup_class(cls): + class Override: + + def __array_function__(self, *args, **kwargs): + return "function" + + def __array_ufunc__(self, *args, **kwargs): + return "ufunc" + + cls.override = Override() + + @pytest.mark.parametrize("func, kwargs", [ + (np.strings.center, dict(width=10)), + (np.strings.capitalize, {}), + (np.strings.decode, {}), + (np.strings.encode, {}), + (np.strings.expandtabs, {}), + (np.strings.ljust, dict(width=10)), + (np.strings.lower, {}), + (np.strings.mod, dict(values=2)), + (np.strings.multiply, dict(i=2)), + (np.strings.partition, dict(sep="foo")), + (np.strings.rjust, dict(width=10)), + (np.strings.rpartition, dict(sep="foo")), + (np.strings.swapcase, {}), + (np.strings.title, {}), + (np.strings.translate, dict(table=None)), + (np.strings.upper, {}), + (np.strings.zfill, dict(width=10)), + ]) + def test_override_function(self, func, kwargs): + assert func(self.override, **kwargs) == "function" + + @pytest.mark.parametrize("func, args, kwargs", [ + (np.strings.add, (None, ), {}), + (np.strings.lstrip, (), {}), + (np.strings.rstrip, (), {}), + (np.strings.strip, (), {}), + (np.strings.equal, (None, ), {}), + (np.strings.not_equal, (None, ), {}), + (np.strings.greater_equal, (None, ), {}), + (np.strings.less_equal, (None, ), {}), + (np.strings.greater, (None, ), {}), + (np.strings.less, (None, ), {}), + (np.strings.count, ("foo", ), {}), + (np.strings.endswith, ("foo", ), {}), + (np.strings.find, ("foo", ), {}), + (np.strings.index, ("foo", ), {}), + (np.strings.isalnum, (), {}), + (np.strings.isalpha, (), {}), + (np.strings.isdecimal, (), {}), + (np.strings.isdigit, (), {}), + (np.strings.islower, (), {}), + (np.strings.isnumeric, (), {}), + (np.strings.isspace, (), {}), + (np.strings.istitle, (), {}), + (np.strings.isupper, (), {}), + (np.strings.rfind, ("foo", ), {}), + (np.strings.rindex, ("foo", ), {}), + (np.strings.startswith, ("foo", ), {}), + (np.strings.str_len, (), {}), + ]) + def test_override_ufunc(self, func, args, kwargs): + assert func(self.override, *args, **kwargs) == "ufunc" diff --git a/local_packages/numpy/_core/tests/test_ufunc.py b/local_packages/numpy/_core/tests/test_ufunc.py new file mode 100644 index 0000000..5c507c1 --- /dev/null +++ b/local_packages/numpy/_core/tests/test_ufunc.py @@ -0,0 +1,3409 @@ +import ctypes as ct +import inspect +import itertools +import pickle +import sys +import warnings + +import pytest +from pytest import param + +import numpy as np +import numpy._core._operand_flag_tests as opflag_tests +import numpy._core._rational_tests as _rational_tests +import numpy._core._umath_tests as umt +import numpy._core.umath as ncu +import numpy.linalg._umath_linalg as uml +from numpy.exceptions import AxisError +from numpy.testing import ( + HAS_REFCOUNT, + IS_PYPY, + IS_WASM, + assert_, + assert_allclose, + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, + assert_equal, + assert_no_warnings, + assert_raises, +) +from numpy.testing._private.utils import requires_memory + +UNARY_UFUNCS = [obj for obj in np._core.umath.__dict__.values() + if isinstance(obj, np.ufunc)] +UNARY_OBJECT_UFUNCS = [uf for uf in UNARY_UFUNCS if "O->O" in uf.types] + +# Remove functions that do not support `floats` +UNARY_OBJECT_UFUNCS.remove(np.bitwise_count) + + +class TestUfuncKwargs: + def test_kwarg_exact(self): + assert_raises(TypeError, np.add, 1, 2, castingx='safe') + assert_raises(TypeError, np.add, 1, 2, dtypex=int) + assert_raises(TypeError, np.add, 1, 2, extobjx=[4096]) + assert_raises(TypeError, np.add, 1, 2, outx=None) + assert_raises(TypeError, np.add, 1, 2, sigx='ii->i') + assert_raises(TypeError, np.add, 1, 2, signaturex='ii->i') + assert_raises(TypeError, np.add, 1, 2, subokx=False) + assert_raises(TypeError, np.add, 1, 2, wherex=[True]) + + def test_sig_signature(self): + assert_raises(TypeError, np.add, 1, 2, sig='ii->i', + signature='ii->i') + + def test_sig_dtype(self): + assert_raises(TypeError, np.add, 1, 2, sig='ii->i', + dtype=int) + assert_raises(TypeError, np.add, 1, 2, signature='ii->i', + dtype=int) + + def test_extobj_removed(self): + assert_raises(TypeError, np.add, 1, 2, extobj=[4096]) + + +class TestUfuncGenericLoops: + """Test generic loops. + + The loops to be tested are: + + PyUFunc_ff_f_As_dd_d + PyUFunc_ff_f + PyUFunc_dd_d + PyUFunc_gg_g + PyUFunc_FF_F_As_DD_D + PyUFunc_DD_D + PyUFunc_FF_F + PyUFunc_GG_G + PyUFunc_OO_O + PyUFunc_OO_O_method + PyUFunc_f_f_As_d_d + PyUFunc_d_d + PyUFunc_f_f + PyUFunc_g_g + PyUFunc_F_F_As_D_D + PyUFunc_F_F + PyUFunc_D_D + PyUFunc_G_G + PyUFunc_O_O + PyUFunc_O_O_method + PyUFunc_On_Om + + Where: + + f -- float + d -- double + g -- long double + F -- complex float + D -- complex double + G -- complex long double + O -- python object + + It is difficult to assure that each of these loops is entered from the + Python level as the special cased loops are a moving target and the + corresponding types are architecture dependent. We probably need to + define C level testing ufuncs to get at them. For the time being, I've + just looked at the signatures registered in the build directory to find + relevant functions. + + """ + np_dtypes = [ + (np.single, np.single), (np.single, np.double), + (np.csingle, np.csingle), (np.csingle, np.cdouble), + (np.double, np.double), (np.longdouble, np.longdouble), + (np.cdouble, np.cdouble), (np.clongdouble, np.clongdouble)] + + @pytest.mark.parametrize('input_dtype,output_dtype', np_dtypes) + def test_unary_PyUFunc(self, input_dtype, output_dtype, f=np.exp, x=0, y=1): + xs = np.full(10, input_dtype(x), dtype=output_dtype) + ys = f(xs)[::2] + assert_allclose(ys, y) + assert_equal(ys.dtype, output_dtype) + + def f2(x, y): + return x**y + + @pytest.mark.parametrize('input_dtype,output_dtype', np_dtypes) + def test_binary_PyUFunc(self, input_dtype, output_dtype, f=f2, x=0, y=1): + xs = np.full(10, input_dtype(x), dtype=output_dtype) + ys = f(xs, xs)[::2] + assert_allclose(ys, y) + assert_equal(ys.dtype, output_dtype) + + # class to use in testing object method loops + class foo: + def conjugate(self): + return np.bool(1) + + def logical_xor(self, obj): + return np.bool(1) + + def test_unary_PyUFunc_O_O(self): + x = np.ones(10, dtype=object) + assert_(np.all(np.abs(x) == 1)) + + def test_unary_PyUFunc_O_O_method_simple(self, foo=foo): + x = np.full(10, foo(), dtype=object) + assert_(np.all(np.conjugate(x) == True)) + + def test_binary_PyUFunc_OO_O(self): + x = np.ones(10, dtype=object) + assert_(np.all(np.add(x, x) == 2)) + + def test_binary_PyUFunc_OO_O_method(self, foo=foo): + x = np.full(10, foo(), dtype=object) + assert_(np.all(np.logical_xor(x, x))) + + def test_binary_PyUFunc_On_Om_method(self, foo=foo): + x = np.full((10, 2, 3), foo(), dtype=object) + assert_(np.all(np.logical_xor(x, x))) + + def test_python_complex_conjugate(self): + # The conjugate ufunc should fall back to calling the method: + arr = np.array([1 + 2j, 3 - 4j], dtype="O") + assert isinstance(arr[0], complex) + res = np.conjugate(arr) + assert res.dtype == np.dtype("O") + assert_array_equal(res, np.array([1 - 2j, 3 + 4j], dtype="O")) + + @pytest.mark.parametrize("ufunc", UNARY_OBJECT_UFUNCS) + def test_unary_PyUFunc_O_O_method_full(self, ufunc): + """Compare the result of the object loop with non-object one""" + val = np.float64(np.pi / 4) + + class MyFloat(np.float64): + def __getattr__(self, attr): + try: + return super().__getattr__(attr) + except AttributeError: + return lambda: getattr(np._core.umath, attr)(val) + + # Use 0-D arrays, to ensure the same element call + num_arr = np.array(val, dtype=np.float64) + obj_arr = np.array(MyFloat(val), dtype="O") + + with np.errstate(all="raise"): + try: + res_num = ufunc(num_arr) + except Exception as exc: + with assert_raises(type(exc)): + ufunc(obj_arr) + else: + res_obj = ufunc(obj_arr) + assert_array_almost_equal(res_num.astype("O"), res_obj) + + +def _pickleable_module_global(): + pass + + +class TestUfunc: + def test_pickle(self): + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + assert_(pickle.loads(pickle.dumps(np.sin, + protocol=proto)) is np.sin) + + # Check that ufunc not defined in the top level numpy namespace + # such as numpy._core._rational_tests.test_add can also be pickled + res = pickle.loads(pickle.dumps(_rational_tests.test_add, + protocol=proto)) + assert_(res is _rational_tests.test_add) + + def test_pickle_withstring(self): + astring = (b"cnumpy.core\n_ufunc_reconstruct\np0\n" + b"(S'numpy._core.umath'\np1\nS'cos'\np2\ntp3\nRp4\n.") + assert_(pickle.loads(astring) is np.cos) + + @pytest.mark.skipif(IS_PYPY, reason="'is' check does not work on PyPy") + def test_pickle_name_is_qualname(self): + # This tests that a simplification of our ufunc pickle code will + # lead to allowing qualnames as names. Future ufuncs should + # possible add a specific qualname, or a hook into pickling instead + # (dask+numba may benefit). + _pickleable_module_global.ufunc = umt._pickleable_module_global_ufunc + + obj = pickle.loads(pickle.dumps(_pickleable_module_global.ufunc)) + assert obj is umt._pickleable_module_global_ufunc + + def test_reduceat_shifting_sum(self): + L = 6 + x = np.arange(L) + idx = np.array(list(zip(np.arange(L - 2), np.arange(L - 2) + 2))).ravel() + assert_array_equal(np.add.reduceat(x, idx)[::2], [1, 3, 5, 7]) + + def test_all_ufunc(self): + """Try to check presence and results of all ufuncs. + + The list of ufuncs comes from generate_umath.py and is as follows: + + ===== ==== ============= =============== ======================== + done args function types notes + ===== ==== ============= =============== ======================== + n 1 conjugate nums + O + n 1 absolute nums + O complex -> real + n 1 negative nums + O + n 1 sign nums + O -> int + n 1 invert bool + ints + O flts raise an error + n 1 degrees real + M cmplx raise an error + n 1 radians real + M cmplx raise an error + n 1 arccos flts + M + n 1 arccosh flts + M + n 1 arcsin flts + M + n 1 arcsinh flts + M + n 1 arctan flts + M + n 1 arctanh flts + M + n 1 cos flts + M + n 1 sin flts + M + n 1 tan flts + M + n 1 cosh flts + M + n 1 sinh flts + M + n 1 tanh flts + M + n 1 exp flts + M + n 1 expm1 flts + M + n 1 log flts + M + n 1 log10 flts + M + n 1 log1p flts + M + n 1 sqrt flts + M real x < 0 raises error + n 1 ceil real + M + n 1 trunc real + M + n 1 floor real + M + n 1 fabs real + M + n 1 rint flts + M + n 1 isnan flts -> bool + n 1 isinf flts -> bool + n 1 isfinite flts -> bool + n 1 signbit real -> bool + n 1 modf real -> (frac, int) + n 1 logical_not bool + nums + M -> bool + n 2 left_shift ints + O flts raise an error + n 2 right_shift ints + O flts raise an error + n 2 add bool + nums + O boolean + is || + n 2 subtract bool + nums + O boolean - is ^ + n 2 multiply bool + nums + O boolean * is & + n 2 divide nums + O + n 2 floor_divide nums + O + n 2 true_divide nums + O bBhH -> f, iIlLqQ -> d + n 2 fmod nums + M + n 2 power nums + O + n 2 greater bool + nums + O -> bool + n 2 greater_equal bool + nums + O -> bool + n 2 less bool + nums + O -> bool + n 2 less_equal bool + nums + O -> bool + n 2 equal bool + nums + O -> bool + n 2 not_equal bool + nums + O -> bool + n 2 logical_and bool + nums + M -> bool + n 2 logical_or bool + nums + M -> bool + n 2 logical_xor bool + nums + M -> bool + n 2 maximum bool + nums + O + n 2 minimum bool + nums + O + n 2 bitwise_and bool + ints + O flts raise an error + n 2 bitwise_or bool + ints + O flts raise an error + n 2 bitwise_xor bool + ints + O flts raise an error + n 2 arctan2 real + M + n 2 remainder ints + real + O + n 2 hypot real + M + ===== ==== ============= =============== ======================== + + Types other than those listed will be accepted, but they are cast to + the smallest compatible type for which the function is defined. The + casting rules are: + + bool -> int8 -> float32 + ints -> double + + """ + pass + + # from include/numpy/ufuncobject.h + size_inferred = 2 + can_ignore = 4 + + def test_signature0(self): + # the arguments to test_signature are: nin, nout, core_signature + enabled, num_dims, ixs, flags, sizes = umt.test_signature( + 2, 1, "(i),(i)->()") + assert_equal(enabled, 1) + assert_equal(num_dims, (1, 1, 0)) + assert_equal(ixs, (0, 0)) + assert_equal(flags, (self.size_inferred,)) + assert_equal(sizes, (-1,)) + + def test_signature1(self): + # empty core signature; treat as plain ufunc (with trivial core) + enabled, num_dims, ixs, flags, sizes = umt.test_signature( + 2, 1, "(),()->()") + assert_equal(enabled, 0) + assert_equal(num_dims, (0, 0, 0)) + assert_equal(ixs, ()) + assert_equal(flags, ()) + assert_equal(sizes, ()) + + def test_signature2(self): + # more complicated names for variables + enabled, num_dims, ixs, flags, sizes = umt.test_signature( + 2, 1, "(i1,i2),(J_1)->(_kAB)") + assert_equal(enabled, 1) + assert_equal(num_dims, (2, 1, 1)) + assert_equal(ixs, (0, 1, 2, 3)) + assert_equal(flags, (self.size_inferred,) * 4) + assert_equal(sizes, (-1, -1, -1, -1)) + + def test_signature3(self): + enabled, num_dims, ixs, flags, sizes = umt.test_signature( + 2, 1, "(i1, i12), (J_1)->(i12, i2)") + assert_equal(enabled, 1) + assert_equal(num_dims, (2, 1, 2)) + assert_equal(ixs, (0, 1, 2, 1, 3)) + assert_equal(flags, (self.size_inferred,) * 4) + assert_equal(sizes, (-1, -1, -1, -1)) + + def test_signature4(self): + # matrix_multiply signature from _umath_tests + enabled, num_dims, ixs, flags, sizes = umt.test_signature( + 2, 1, "(n,k),(k,m)->(n,m)") + assert_equal(enabled, 1) + assert_equal(num_dims, (2, 2, 2)) + assert_equal(ixs, (0, 1, 1, 2, 0, 2)) + assert_equal(flags, (self.size_inferred,) * 3) + assert_equal(sizes, (-1, -1, -1)) + + def test_signature5(self): + # matmul signature from _umath_tests + enabled, num_dims, ixs, flags, sizes = umt.test_signature( + 2, 1, "(n?,k),(k,m?)->(n?,m?)") + assert_equal(enabled, 1) + assert_equal(num_dims, (2, 2, 2)) + assert_equal(ixs, (0, 1, 1, 2, 0, 2)) + assert_equal(flags, (self.size_inferred | self.can_ignore, + self.size_inferred, + self.size_inferred | self.can_ignore)) + assert_equal(sizes, (-1, -1, -1)) + + def test_signature6(self): + enabled, num_dims, ixs, flags, sizes = umt.test_signature( + 1, 1, "(3)->()") + assert_equal(enabled, 1) + assert_equal(num_dims, (1, 0)) + assert_equal(ixs, (0,)) + assert_equal(flags, (0,)) + assert_equal(sizes, (3,)) + + def test_signature7(self): + enabled, num_dims, ixs, flags, sizes = umt.test_signature( + 3, 1, "(3),(03,3),(n)->(9)") + assert_equal(enabled, 1) + assert_equal(num_dims, (1, 2, 1, 1)) + assert_equal(ixs, (0, 0, 0, 1, 2)) + assert_equal(flags, (0, self.size_inferred, 0)) + assert_equal(sizes, (3, -1, 9)) + + def test_signature8(self): + enabled, num_dims, ixs, flags, sizes = umt.test_signature( + 3, 1, "(3?),(3?,3?),(n)->(9)") + assert_equal(enabled, 1) + assert_equal(num_dims, (1, 2, 1, 1)) + assert_equal(ixs, (0, 0, 0, 1, 2)) + assert_equal(flags, (self.can_ignore, self.size_inferred, 0)) + assert_equal(sizes, (3, -1, 9)) + + def test_signature9(self): + enabled, num_dims, ixs, flags, sizes = umt.test_signature( + 1, 1, "( 3) -> ( )") + assert_equal(enabled, 1) + assert_equal(num_dims, (1, 0)) + assert_equal(ixs, (0,)) + assert_equal(flags, (0,)) + assert_equal(sizes, (3,)) + + def test_signature10(self): + enabled, num_dims, ixs, flags, sizes = umt.test_signature( + 3, 1, "( 3? ) , (3? , 3?) ,(n )-> ( 9)") + assert_equal(enabled, 1) + assert_equal(num_dims, (1, 2, 1, 1)) + assert_equal(ixs, (0, 0, 0, 1, 2)) + assert_equal(flags, (self.can_ignore, self.size_inferred, 0)) + assert_equal(sizes, (3, -1, 9)) + + def test_signature_failure_extra_parenthesis(self): + with assert_raises(ValueError): + umt.test_signature(2, 1, "((i)),(i)->()") + + def test_signature_failure_mismatching_parenthesis(self): + with assert_raises(ValueError): + umt.test_signature(2, 1, "(i),)i(->()") + + def test_signature_failure_signature_missing_input_arg(self): + with assert_raises(ValueError): + umt.test_signature(2, 1, "(i),->()") + + def test_signature_failure_signature_missing_output_arg(self): + with assert_raises(ValueError): + umt.test_signature(2, 2, "(i),(i)->()") + + def test_get_signature(self): + assert_equal(np.vecdot.signature, "(n),(n)->()") + + def test_forced_sig(self): + a = 0.5 * np.arange(3, dtype='f8') + assert_equal(np.add(a, 0.5), [0.5, 1, 1.5]) + with assert_raises(TypeError): + np.add(a, 0.5, sig='i', casting='unsafe') + assert_equal(np.add(a, 0.5, sig='ii->i', casting='unsafe'), [0, 0, 1]) + with assert_raises(TypeError): + np.add(a, 0.5, sig=('i4',), casting='unsafe') + assert_equal(np.add(a, 0.5, sig=('i4', 'i4', 'i4'), + casting='unsafe'), [0, 0, 1]) + + b = np.zeros((3,), dtype='f8') + np.add(a, 0.5, out=b) + assert_equal(b, [0.5, 1, 1.5]) + b[:] = 0 + with assert_raises(TypeError): + np.add(a, 0.5, sig='i', out=b, casting='unsafe') + assert_equal(b, [0, 0, 0]) + np.add(a, 0.5, sig='ii->i', out=b, casting='unsafe') + assert_equal(b, [0, 0, 1]) + b[:] = 0 + with assert_raises(TypeError): + np.add(a, 0.5, sig=('i4',), out=b, casting='unsafe') + assert_equal(b, [0, 0, 0]) + np.add(a, 0.5, sig=('i4', 'i4', 'i4'), out=b, casting='unsafe') + assert_equal(b, [0, 0, 1]) + + def test_signature_all_None(self): + # signature all None, is an acceptable alternative (since 1.21) + # to not providing a signature. + res1 = np.add([3], [4], sig=(None, None, None)) + res2 = np.add([3], [4]) + assert_array_equal(res1, res2) + res1 = np.maximum([3], [4], sig=(None, None, None)) + res2 = np.maximum([3], [4]) + assert_array_equal(res1, res2) + + with pytest.raises(TypeError): + # special case, that would be deprecated anyway, so errors: + np.add(3, 4, signature=(None,)) + + def test_signature_dtype_type(self): + # Since that will be the normal behaviour (past NumPy 1.21) + # we do support the types already: + float_dtype = type(np.dtype(np.float64)) + np.add(3, 4, signature=(float_dtype, float_dtype, None)) + + @pytest.mark.parametrize("get_kwarg", [ + param(lambda dt: {"dtype": dt}, id="dtype"), + param(lambda dt: {"signature": (dt, None, None)}, id="signature")]) + def test_signature_dtype_instances_allowed(self, get_kwarg): + # We allow certain dtype instances when there is a clear singleton + # and the given one is equivalent; mainly for backcompat. + int64 = np.dtype("int64") + int64_2 = pickle.loads(pickle.dumps(int64)) + # Relies on pickling behavior, if assert fails just remove test... + assert int64 is not int64_2 + + assert np.add(1, 2, **get_kwarg(int64_2)).dtype == int64 + td = np.timedelta64(2, "s") + assert np.add(td, td, **get_kwarg("m8")).dtype == "m8[s]" + + msg = "The `dtype` and `signature` arguments to ufuncs" + + with pytest.raises(TypeError, match=msg): + np.add(3, 5, **get_kwarg(np.dtype("int64").newbyteorder())) + with pytest.raises(TypeError, match=msg): + np.add(3, 5, **get_kwarg(np.dtype("m8[ns]"))) + with pytest.raises(TypeError, match=msg): + np.add(3, 5, **get_kwarg("m8[ns]")) + + @pytest.mark.parametrize("casting", ["unsafe", "same_kind", "safe"]) + def test_partial_signature_mismatch(self, casting): + # If the second argument matches already, no need to specify it: + res = np.ldexp(np.float32(1.), np.int_(2), dtype="d") + assert res.dtype == "d" + res = np.ldexp(np.float32(1.), np.int_(2), signature=(None, None, "d")) + assert res.dtype == "d" + + # ldexp only has a loop for long input as second argument, overriding + # the output cannot help with that (no matter the casting) + with pytest.raises(TypeError): + np.ldexp(1., np.uint64(3), dtype="d") + with pytest.raises(TypeError): + np.ldexp(1., np.uint64(3), signature=(None, None, "d")) + + def test_partial_signature_mismatch_with_cache(self): + with pytest.raises(TypeError): + np.add(np.float16(1), np.uint64(2), sig=("e", "d", None)) + # Ensure e,d->None is in the dispatching cache (double loop) + np.add(np.float16(1), np.float64(2)) + # The error must still be raised: + with pytest.raises(TypeError): + np.add(np.float16(1), np.uint64(2), sig=("e", "d", None)) + + def test_use_output_signature_for_all_arguments(self): + # Test that providing only `dtype=` or `signature=(None, None, dtype)` + # is sufficient if falling back to a homogeneous signature works. + # In this case, the `intp, intp -> intp` loop is chosen. + res = np.power(1.5, 2.8, dtype=np.intp, casting="unsafe") + assert res == 1 # the cast happens first. + res = np.power(1.5, 2.8, signature=(None, None, np.intp), + casting="unsafe") + assert res == 1 + with pytest.raises(TypeError): + # the unsafe casting would normally cause errors though: + np.power(1.5, 2.8, dtype=np.intp) + + def test_signature_errors(self): + with pytest.raises(TypeError, + match="the signature object to ufunc must be a string or"): + np.add(3, 4, signature=123.) # neither a string nor a tuple + + with pytest.raises(ValueError): + # bad symbols that do not translate to dtypes + np.add(3, 4, signature="%^->#") + + with pytest.raises(ValueError): + np.add(3, 4, signature=b"ii-i") # incomplete and byte string + + with pytest.raises(ValueError): + np.add(3, 4, signature="ii>i") # incomplete string + + with pytest.raises(ValueError): + np.add(3, 4, signature=(None, "f8")) # bad length + + with pytest.raises(UnicodeDecodeError): + np.add(3, 4, signature=b"\xff\xff->i") + + def test_forced_dtype_times(self): + # Signatures only set the type numbers (not the actual loop dtypes) + # so using `M` in a signature/dtype should generally work: + a = np.array(['2010-01-02', '1999-03-14', '1833-03'], dtype='>M8[D]') + np.maximum(a, a, dtype="M") + np.maximum.reduce(a, dtype="M") + + arr = np.arange(10, dtype="m8[s]") + np.add(arr, arr, dtype="m") + np.maximum(arr, arr, dtype="m") + + @pytest.mark.parametrize("ufunc", [np.add, np.sqrt]) + def test_cast_safety(self, ufunc): + """Basic test for the safest casts, because ufuncs inner loops can + indicate a cast-safety as well (which is normally always "no"). + """ + def call_ufunc(arr, **kwargs): + return ufunc(*(arr,) * ufunc.nin, **kwargs) + + arr = np.array([1., 2., 3.], dtype=np.float32) + arr_bs = arr.astype(arr.dtype.newbyteorder()) + expected = call_ufunc(arr) + # Normally, a "no" cast: + res = call_ufunc(arr, casting="no") + assert_array_equal(expected, res) + # Byte-swapping is not allowed with "no" though: + with pytest.raises(TypeError): + call_ufunc(arr_bs, casting="no") + + # But is allowed with "equiv": + res = call_ufunc(arr_bs, casting="equiv") + assert_array_equal(expected, res) + + # Casting to float64 is safe, but not equiv: + with pytest.raises(TypeError): + call_ufunc(arr_bs, dtype=np.float64, casting="equiv") + + # but it is safe cast: + res = call_ufunc(arr_bs, dtype=np.float64, casting="safe") + expected = call_ufunc(arr.astype(np.float64)) # upcast + assert_array_equal(expected, res) + + @pytest.mark.parametrize("ufunc", [np.add, np.equal]) + def test_cast_safety_scalar(self, ufunc): + # We test add and equal, because equal has special scalar handling + # Note that the "equiv" casting behavior should maybe be considered + # a current implementation detail. + with pytest.raises(TypeError): + # this picks an integer loop, which is not safe + ufunc(3., 4., dtype=int, casting="safe") + + with pytest.raises(TypeError): + # We accept python float as float64 but not float32 for equiv. + ufunc(3., 4., dtype="float32", casting="equiv") + + # Special case for object and equal (note that equiv implies safe) + ufunc(3, 4, dtype=object, casting="equiv") + # Picks a double loop for both, first is equiv, second safe: + ufunc(np.array([3.]), 3., casting="equiv") + ufunc(np.array([3.]), 3, casting="safe") + ufunc(np.array([3]), 3, casting="equiv") + + def test_cast_safety_scalar_special(self): + # We allow this (and it succeeds) via object, although the equiv + # part may not be important. + np.equal(np.array([3]), 2**300, casting="equiv") + + def test_true_divide(self): + a = np.array(10) + b = np.array(20) + tgt = np.array(0.5) + + for tc in 'bhilqBHILQefdgFDG': + dt = np.dtype(tc) + aa = a.astype(dt) + bb = b.astype(dt) + + # Check result value and dtype. + for x, y in itertools.product([aa, -aa], [bb, -bb]): + + # Check with no output type specified + if tc in 'FDG': + tgt = complex(x) / complex(y) + else: + tgt = float(x) / float(y) + + res = np.true_divide(x, y) + rtol = max(np.finfo(res).resolution, 1e-15) + assert_allclose(res, tgt, rtol=rtol) + + if tc in 'bhilqBHILQ': + assert_(res.dtype.name == 'float64') + else: + assert_(res.dtype.name == dt.name) + + # Check with output type specified. This also checks for the + # incorrect casts in issue gh-3484 because the unary '-' does + # not change types, even for unsigned types, Hence casts in the + # ufunc from signed to unsigned and vice versa will lead to + # errors in the values. + for tcout in 'bhilqBHILQ': + dtout = np.dtype(tcout) + assert_raises(TypeError, np.true_divide, x, y, dtype=dtout) + + for tcout in 'efdg': + dtout = np.dtype(tcout) + if tc in 'FDG': + # Casting complex to float is not allowed + assert_raises(TypeError, np.true_divide, x, y, dtype=dtout) + else: + tgt = float(x) / float(y) + rtol = max(np.finfo(dtout).resolution, 1e-15) + # The value of tiny for double double is NaN + with warnings.catch_warnings(): + warnings.simplefilter('ignore', UserWarning) + if not np.isnan(np.finfo(dtout).tiny): + atol = max(np.finfo(dtout).tiny, 3e-308) + else: + atol = 3e-308 + # Some test values result in invalid for float16 + # and the cast to it may overflow to inf. + with np.errstate(invalid='ignore', over='ignore'): + res = np.true_divide(x, y, dtype=dtout) + if not np.isfinite(res) and tcout == 'e': + continue + assert_allclose(res, tgt, rtol=rtol, atol=atol) + assert_(res.dtype.name == dtout.name) + + for tcout in 'FDG': + dtout = np.dtype(tcout) + tgt = complex(x) / complex(y) + rtol = max(np.finfo(dtout).resolution, 1e-15) + # The value of tiny for double double is NaN + with warnings.catch_warnings(): + warnings.simplefilter('ignore', UserWarning) + if not np.isnan(np.finfo(dtout).tiny): + atol = max(np.finfo(dtout).tiny, 3e-308) + else: + atol = 3e-308 + res = np.true_divide(x, y, dtype=dtout) + if not np.isfinite(res): + continue + assert_allclose(res, tgt, rtol=rtol, atol=atol) + assert_(res.dtype.name == dtout.name) + + # Check booleans + a = np.ones((), dtype=np.bool) + res = np.true_divide(a, a) + assert_(res == 1.0) + assert_(res.dtype.name == 'float64') + res = np.true_divide(~a, a) + assert_(res == 0.0) + assert_(res.dtype.name == 'float64') + + def test_sum_stability(self): + a = np.ones(500, dtype=np.float32) + assert_almost_equal((a / 10.).sum() - a.size / 10., 0, 4) + + a = np.ones(500, dtype=np.float64) + assert_almost_equal((a / 10.).sum() - a.size / 10., 0, 13) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + def test_sum(self): + for dt in (int, np.float16, np.float32, np.float64, np.longdouble): + for v in (0, 1, 2, 7, 8, 9, 15, 16, 19, 127, + 128, 1024, 1235): + # warning if sum overflows, which it does in float16 + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always", RuntimeWarning) + + tgt = dt(v * (v + 1) / 2) + overflow = not np.isfinite(tgt) + assert_equal(len(w), 1 * overflow) + + d = np.arange(1, v + 1, dtype=dt) + + assert_almost_equal(np.sum(d), tgt) + assert_equal(len(w), 2 * overflow) + + assert_almost_equal(np.sum(d[::-1]), tgt) + assert_equal(len(w), 3 * overflow) + + d = np.ones(500, dtype=dt) + assert_almost_equal(np.sum(d[::2]), 250.) + assert_almost_equal(np.sum(d[1::2]), 250.) + assert_almost_equal(np.sum(d[::3]), 167.) + assert_almost_equal(np.sum(d[1::3]), 167.) + assert_almost_equal(np.sum(d[::-2]), 250.) + assert_almost_equal(np.sum(d[-1::-2]), 250.) + assert_almost_equal(np.sum(d[::-3]), 167.) + assert_almost_equal(np.sum(d[-1::-3]), 167.) + # sum with first reduction entry != 0 + d = np.ones((1,), dtype=dt) + d += d + assert_almost_equal(d, 2.) + + def test_sum_complex(self): + for dt in (np.complex64, np.complex128, np.clongdouble): + for v in (0, 1, 2, 7, 8, 9, 15, 16, 19, 127, + 128, 1024, 1235): + tgt = dt(v * (v + 1) / 2) - dt((v * (v + 1) / 2) * 1j) + d = np.empty(v, dtype=dt) + d.real = np.arange(1, v + 1) + d.imag = -np.arange(1, v + 1) + assert_almost_equal(np.sum(d), tgt) + assert_almost_equal(np.sum(d[::-1]), tgt) + + d = np.ones(500, dtype=dt) + 1j + assert_almost_equal(np.sum(d[::2]), 250. + 250j) + assert_almost_equal(np.sum(d[1::2]), 250. + 250j) + assert_almost_equal(np.sum(d[::3]), 167. + 167j) + assert_almost_equal(np.sum(d[1::3]), 167. + 167j) + assert_almost_equal(np.sum(d[::-2]), 250. + 250j) + assert_almost_equal(np.sum(d[-1::-2]), 250. + 250j) + assert_almost_equal(np.sum(d[::-3]), 167. + 167j) + assert_almost_equal(np.sum(d[-1::-3]), 167. + 167j) + # sum with first reduction entry != 0 + d = np.ones((1,), dtype=dt) + 1j + d += d + assert_almost_equal(d, 2. + 2j) + + def test_sum_initial(self): + # Integer, single axis + assert_equal(np.sum([3], initial=2), 5) + + # Floating point + assert_almost_equal(np.sum([0.2], initial=0.1), 0.3) + + # Multiple non-adjacent axes + assert_equal(np.sum(np.ones((2, 3, 5), dtype=np.int64), axis=(0, 2), initial=2), + [12, 12, 12]) + + def test_sum_where(self): + # More extensive tests done in test_reduction_with_where. + assert_equal(np.sum([[1., 2.], [3., 4.]], where=[True, False]), 4.) + assert_equal(np.sum([[1., 2.], [3., 4.]], axis=0, initial=5., + where=[True, False]), [9., 5.]) + + def test_vecdot(self): + arr1 = np.arange(6).reshape((2, 3)) + arr2 = np.arange(3).reshape((1, 3)) + + actual = np.vecdot(arr1, arr2) + expected = np.array([5, 14]) + + assert_array_equal(actual, expected) + + actual2 = np.vecdot(arr1.T, arr2.T, axis=-2) + assert_array_equal(actual2, expected) + + actual3 = np.vecdot(arr1.astype("object"), arr2) + assert_array_equal(actual3, expected.astype("object")) + + def test_matvec(self): + arr1 = np.arange(6).reshape((2, 3)) + arr2 = np.arange(3).reshape((1, 3)) + + actual = np.matvec(arr1, arr2) + expected = np.array([[5, 14]]) + + assert_array_equal(actual, expected) + + actual2 = np.matvec(arr1.T, arr2.T, axes=[(-1, -2), -2, -1]) + assert_array_equal(actual2, expected) + + actual3 = np.matvec(arr1.astype("object"), arr2) + assert_array_equal(actual3, expected.astype("object")) + + @pytest.mark.parametrize("vec", [ + np.array([[1., 2., 3.], [4., 5., 6.]]), + np.array([[1., 2j, 3.], [4., 5., 6j]]), + np.array([[1., 2., 3.], [4., 5., 6.]], dtype=object), + np.array([[1., 2j, 3.], [4., 5., 6j]], dtype=object)]) + @pytest.mark.parametrize("matrix", [ + None, + np.array([[1. + 1j, 0.5, -0.5j], + [0.25, 2j, 0.], + [4., 0., -1j]])]) + def test_vecmatvec_identity(self, matrix, vec): + """Check that (x†A)x equals x†(Ax).""" + mat = matrix if matrix is not None else np.eye(3) + matvec = np.matvec(mat, vec) # Ax + vecmat = np.vecmat(vec, mat) # x†A + if matrix is None: + assert_array_equal(matvec, vec) + assert_array_equal(vecmat.conj(), vec) + assert_array_equal(matvec, (mat @ vec[..., np.newaxis]).squeeze(-1)) + assert_array_equal(vecmat, (vec[..., np.newaxis].mT.conj() + @ mat).squeeze(-2)) + expected = np.einsum('...i,ij,...j', vec.conj(), mat, vec) + vec_matvec = (vec.conj() * matvec).sum(-1) + vecmat_vec = (vecmat * vec).sum(-1) + assert_array_equal(vec_matvec, expected) + assert_array_equal(vecmat_vec, expected) + + @pytest.mark.parametrize("ufunc, shape1, shape2, conj", [ + (np.vecdot, (3,), (3,), True), + (np.vecmat, (3,), (3, 1), True), + (np.matvec, (1, 3), (3,), False), + (np.matmul, (1, 3), (3, 1), False), + ]) + def test_vecdot_matvec_vecmat_complex(self, ufunc, shape1, shape2, conj): + arr1 = np.array([1, 2j, 3]) + arr2 = np.array([1, 2, 3]) + + actual1 = ufunc(arr1.reshape(shape1), arr2.reshape(shape2)) + expected1 = np.array(((arr1.conj() if conj else arr1) * arr2).sum(), + ndmin=min(len(shape1), len(shape2))) + assert_array_equal(actual1, expected1) + # This would fail for conj=True, since matmul omits the conjugate. + if not conj: + assert_array_equal(arr1.reshape(shape1) @ arr2.reshape(shape2), + expected1) + + actual2 = ufunc(arr2.reshape(shape1), arr1.reshape(shape2)) + expected2 = np.array(((arr2.conj() if conj else arr2) * arr1).sum(), + ndmin=min(len(shape1), len(shape2))) + assert_array_equal(actual2, expected2) + + actual3 = ufunc(arr1.reshape(shape1).astype("object"), + arr2.reshape(shape2).astype("object")) + expected3 = expected1.astype(object) + assert_array_equal(actual3, expected3) + + def test_vecdot_subclass(self): + class MySubclass(np.ndarray): + pass + + arr1 = np.arange(6).reshape((2, 3)).view(MySubclass) + arr2 = np.arange(3).reshape((1, 3)).view(MySubclass) + result = np.vecdot(arr1, arr2) + assert isinstance(result, MySubclass) + + def test_vecdot_object_no_conjugate(self): + arr = np.array(["1", "2"], dtype=object) + with pytest.raises(AttributeError, match="conjugate"): + np.vecdot(arr, arr) + + def test_vecdot_object_breaks_outer_loop_on_error(self): + arr1 = np.ones((3, 3)).astype(object) + arr2 = arr1.copy() + arr2[1, 1] = None + out = np.zeros(3).astype(object) + with pytest.raises(TypeError, match=r"\*: 'float' and 'NoneType'"): + np.vecdot(arr1, arr2, out=out) + assert out[0] == 3 + assert out[1] == out[2] == 0 + + def test_broadcast(self): + msg = "broadcast" + a = np.arange(4).reshape((2, 1, 2)) + b = np.arange(4).reshape((1, 2, 2)) + assert_array_equal(np.vecdot(a, b), np.sum(a * b, axis=-1), err_msg=msg) + msg = "extend & broadcast loop dimensions" + b = np.arange(4).reshape((2, 2)) + assert_array_equal(np.vecdot(a, b), np.sum(a * b, axis=-1), err_msg=msg) + # Broadcast in core dimensions should fail + a = np.arange(8).reshape((4, 2)) + b = np.arange(4).reshape((4, 1)) + assert_raises(ValueError, np.vecdot, a, b) + # Extend core dimensions should fail + a = np.arange(8).reshape((4, 2)) + b = np.array(7) + assert_raises(ValueError, np.vecdot, a, b) + # Broadcast should fail + a = np.arange(2).reshape((2, 1, 1)) + b = np.arange(3).reshape((3, 1, 1)) + assert_raises(ValueError, np.vecdot, a, b) + + # Writing to a broadcasted array with overlap should warn, gh-2705 + a = np.arange(2) + b = np.arange(4).reshape((2, 2)) + u, v = np.broadcast_arrays(a, b) + assert_equal(u.strides[0], 0) + x = u + v + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + u += v + assert_equal(len(w), 1) + assert_(x[0, 0] != u[0, 0]) + + # Output reduction should not be allowed. + # See gh-15139 + a = np.arange(6).reshape(3, 2) + b = np.ones(2) + out = np.empty(()) + assert_raises(ValueError, np.vecdot, a, b, out) + out2 = np.empty(3) + c = np.vecdot(a, b, out2) + assert_(c is out2) + + def test_out_broadcasts(self): + # For ufuncs and gufuncs (not for reductions), we currently allow + # the output to cause broadcasting of the input arrays. + # both along dimensions with shape 1 and dimensions which do not + # exist at all in the inputs. + arr = np.arange(3).reshape(1, 3) + out = np.empty((5, 4, 3)) + np.add(arr, arr, out=out) + assert (out == np.arange(3) * 2).all() + + # The same holds for gufuncs (gh-16484) + np.vecdot(arr, arr, out=out) + # the result would be just a scalar `5`, but is broadcast fully: + assert (out == 5).all() + + @pytest.mark.parametrize(["arr", "out"], [ + ([2], np.empty(())), + ([1, 2], np.empty(1)), + (np.ones((4, 3)), np.empty((4, 1)))], + ids=["(1,)->()", "(2,)->(1,)", "(4, 3)->(4, 1)"]) + def test_out_broadcast_errors(self, arr, out): + # Output is (currently) allowed to broadcast inputs, but it cannot be + # smaller than the actual result. + with pytest.raises(ValueError, match="non-broadcastable"): + np.positive(arr, out=out) + + with pytest.raises(ValueError, match="non-broadcastable"): + np.add(np.ones(()), arr, out=out) + + def test_type_cast(self): + msg = "type cast" + a = np.arange(6, dtype='short').reshape((2, 3)) + assert_array_equal(np.vecdot(a, a), np.sum(a * a, axis=-1), + err_msg=msg) + msg = "type cast on one argument" + a = np.arange(6).reshape((2, 3)) + b = a + 0.1 + assert_array_almost_equal(np.vecdot(a, b), np.sum(a * b, axis=-1), + err_msg=msg) + + def test_endian(self): + msg = "big endian" + a = np.arange(6, dtype='>i4').reshape((2, 3)) + assert_array_equal(np.vecdot(a, a), np.sum(a * a, axis=-1), + err_msg=msg) + msg = "little endian" + a = np.arange(6, dtype='()' + a = np.arange(27.).reshape((3, 3, 3)) + b = np.arange(10., 19.).reshape((3, 1, 3)) + # basic tests on inputs (outputs tested below with matrix_multiply). + c = np.vecdot(a, b) + assert_array_equal(c, (a * b).sum(-1)) + # default + c = np.vecdot(a, b, axes=[(-1,), (-1,), ()]) + assert_array_equal(c, (a * b).sum(-1)) + # integers ok for single axis. + c = np.vecdot(a, b, axes=[-1, -1, ()]) + assert_array_equal(c, (a * b).sum(-1)) + # mix fine + c = np.vecdot(a, b, axes=[(-1,), -1, ()]) + assert_array_equal(c, (a * b).sum(-1)) + # can omit last axis. + c = np.vecdot(a, b, axes=[-1, -1]) + assert_array_equal(c, (a * b).sum(-1)) + # can pass in other types of integer (with __index__ protocol) + c = np.vecdot(a, b, axes=[np.int8(-1), np.array(-1, dtype=np.int32)]) + assert_array_equal(c, (a * b).sum(-1)) + # swap some axes + c = np.vecdot(a, b, axes=[0, 0]) + assert_array_equal(c, (a * b).sum(0)) + c = np.vecdot(a, b, axes=[0, 2]) + assert_array_equal(c, (a.transpose(1, 2, 0) * b).sum(-1)) + # Check errors for improperly constructed axes arguments. + # should have list. + assert_raises(TypeError, np.vecdot, a, b, axes=-1) + # needs enough elements + assert_raises(ValueError, np.vecdot, a, b, axes=[-1]) + # should pass in indices. + assert_raises(TypeError, np.vecdot, a, b, axes=[-1.0, -1.0]) + assert_raises(TypeError, np.vecdot, a, b, axes=[(-1.0,), -1]) + assert_raises(TypeError, np.vecdot, a, b, axes=[None, 1]) + # cannot pass an index unless there is only one dimension + # (output is wrong in this case) + assert_raises(AxisError, np.vecdot, a, b, axes=[-1, -1, -1]) + # or pass in generally the wrong number of axes + assert_raises(AxisError, np.vecdot, a, b, axes=[-1, -1, (-1,)]) + assert_raises(AxisError, np.vecdot, a, b, axes=[-1, (-2, -1), ()]) + # axes need to have same length. + assert_raises(ValueError, np.vecdot, a, b, axes=[0, 1]) + + # matrix_multiply signature: '(m,n),(n,p)->(m,p)' + mm = umt.matrix_multiply + a = np.arange(12).reshape((2, 3, 2)) + b = np.arange(8).reshape((2, 2, 2, 1)) + 1 + # Sanity check. + c = mm(a, b) + assert_array_equal(c, np.matmul(a, b)) + # Default axes. + c = mm(a, b, axes=[(-2, -1), (-2, -1), (-2, -1)]) + assert_array_equal(c, np.matmul(a, b)) + # Default with explicit axes. + c = mm(a, b, axes=[(1, 2), (2, 3), (2, 3)]) + assert_array_equal(c, np.matmul(a, b)) + # swap some axes. + c = mm(a, b, axes=[(0, -1), (1, 2), (-2, -1)]) + assert_array_equal(c, np.matmul(a.transpose(1, 0, 2), + b.transpose(0, 3, 1, 2))) + # Default with output array. + c = np.empty((2, 2, 3, 1)) + d = mm(a, b, out=c, axes=[(1, 2), (2, 3), (2, 3)]) + assert_(c is d) + assert_array_equal(c, np.matmul(a, b)) + # Transposed output array + c = np.empty((1, 2, 2, 3)) + d = mm(a, b, out=c, axes=[(-2, -1), (-2, -1), (3, 0)]) + assert_(c is d) + assert_array_equal(c, np.matmul(a, b).transpose(3, 0, 1, 2)) + # Check errors for improperly constructed axes arguments. + # wrong argument + assert_raises(TypeError, mm, a, b, axis=1) + # axes should be list + assert_raises(TypeError, mm, a, b, axes=1) + assert_raises(TypeError, mm, a, b, axes=((-2, -1), (-2, -1), (-2, -1))) + # list needs to have right length + assert_raises(ValueError, mm, a, b, axes=[]) + assert_raises(ValueError, mm, a, b, axes=[(-2, -1)]) + # list should not contain None, or lists + assert_raises(TypeError, mm, a, b, axes=[None, None, None]) + assert_raises(TypeError, + mm, a, b, axes=[[-2, -1], [-2, -1], [-2, -1]]) + assert_raises(TypeError, + mm, a, b, axes=[(-2, -1), (-2, -1), [-2, -1]]) + assert_raises(TypeError, mm, a, b, axes=[(-2, -1), (-2, -1), None]) + # single integers are AxisErrors if more are required + assert_raises(AxisError, mm, a, b, axes=[-1, -1, -1]) + assert_raises(AxisError, mm, a, b, axes=[(-2, -1), (-2, -1), -1]) + # tuples should not have duplicated values + assert_raises(ValueError, mm, a, b, axes=[(-2, -1), (-2, -1), (-2, -2)]) + # arrays should have enough axes. + z = np.zeros((2, 2)) + assert_raises(ValueError, mm, z, z[0]) + assert_raises(ValueError, mm, z, z, out=z[:, 0]) + assert_raises(ValueError, mm, z[1], z, axes=[0, 1]) + assert_raises(ValueError, mm, z, z, out=z[0], axes=[0, 1]) + # Regular ufuncs should not accept axes. + assert_raises(TypeError, np.add, 1., 1., axes=[0]) + # should be able to deal with bad unrelated kwargs. + assert_raises(TypeError, mm, z, z, axes=[0, 1], parrot=True) + + def test_axis_argument(self): + # vecdot signature: '(n),(n)->()' + a = np.arange(27.).reshape((3, 3, 3)) + b = np.arange(10., 19.).reshape((3, 1, 3)) + c = np.vecdot(a, b) + assert_array_equal(c, (a * b).sum(-1)) + c = np.vecdot(a, b, axis=-1) + assert_array_equal(c, (a * b).sum(-1)) + out = np.zeros_like(c) + d = np.vecdot(a, b, axis=-1, out=out) + assert_(d is out) + assert_array_equal(d, c) + c = np.vecdot(a, b, axis=0) + assert_array_equal(c, (a * b).sum(0)) + # Sanity checks on innerwt and cumsum. + a = np.arange(6).reshape((2, 3)) + b = np.arange(10, 16).reshape((2, 3)) + w = np.arange(20, 26).reshape((2, 3)) + assert_array_equal(umt.innerwt(a, b, w, axis=0), + np.sum(a * b * w, axis=0)) + assert_array_equal(umt.cumsum(a, axis=0), np.cumsum(a, axis=0)) + assert_array_equal(umt.cumsum(a, axis=-1), np.cumsum(a, axis=-1)) + out = np.empty_like(a) + b = umt.cumsum(a, out=out, axis=0) + assert_(out is b) + assert_array_equal(b, np.cumsum(a, axis=0)) + b = umt.cumsum(a, out=out, axis=1) + assert_(out is b) + assert_array_equal(b, np.cumsum(a, axis=-1)) + # Check errors. + # Cannot pass in both axis and axes. + assert_raises(TypeError, np.vecdot, a, b, axis=0, axes=[0, 0]) + # Not an integer. + assert_raises(TypeError, np.vecdot, a, b, axis=[0]) + # more than 1 core dimensions. + mm = umt.matrix_multiply + assert_raises(TypeError, mm, a, b, axis=1) + # Output wrong size in axis. + out = np.empty((1, 2, 3), dtype=a.dtype) + assert_raises(ValueError, umt.cumsum, a, out=out, axis=0) + # Regular ufuncs should not accept axis. + assert_raises(TypeError, np.add, 1., 1., axis=0) + + def test_keepdims_argument(self): + # vecdot signature: '(n),(n)->()' + a = np.arange(27.).reshape((3, 3, 3)) + b = np.arange(10., 19.).reshape((3, 1, 3)) + c = np.vecdot(a, b) + assert_array_equal(c, (a * b).sum(-1)) + c = np.vecdot(a, b, keepdims=False) + assert_array_equal(c, (a * b).sum(-1)) + c = np.vecdot(a, b, keepdims=True) + assert_array_equal(c, (a * b).sum(-1, keepdims=True)) + out = np.zeros_like(c) + d = np.vecdot(a, b, keepdims=True, out=out) + assert_(d is out) + assert_array_equal(d, c) + # Now combined with axis and axes. + c = np.vecdot(a, b, axis=-1, keepdims=False) + assert_array_equal(c, (a * b).sum(-1, keepdims=False)) + c = np.vecdot(a, b, axis=-1, keepdims=True) + assert_array_equal(c, (a * b).sum(-1, keepdims=True)) + c = np.vecdot(a, b, axis=0, keepdims=False) + assert_array_equal(c, (a * b).sum(0, keepdims=False)) + c = np.vecdot(a, b, axis=0, keepdims=True) + assert_array_equal(c, (a * b).sum(0, keepdims=True)) + c = np.vecdot(a, b, axes=[(-1,), (-1,), ()], keepdims=False) + assert_array_equal(c, (a * b).sum(-1)) + c = np.vecdot(a, b, axes=[(-1,), (-1,), (-1,)], keepdims=True) + assert_array_equal(c, (a * b).sum(-1, keepdims=True)) + c = np.vecdot(a, b, axes=[0, 0], keepdims=False) + assert_array_equal(c, (a * b).sum(0)) + c = np.vecdot(a, b, axes=[0, 0, 0], keepdims=True) + assert_array_equal(c, (a * b).sum(0, keepdims=True)) + c = np.vecdot(a, b, axes=[0, 2], keepdims=False) + assert_array_equal(c, (a.transpose(1, 2, 0) * b).sum(-1)) + c = np.vecdot(a, b, axes=[0, 2], keepdims=True) + assert_array_equal(c, (a.transpose(1, 2, 0) * b).sum(-1, + keepdims=True)) + c = np.vecdot(a, b, axes=[0, 2, 2], keepdims=True) + assert_array_equal(c, (a.transpose(1, 2, 0) * b).sum(-1, + keepdims=True)) + c = np.vecdot(a, b, axes=[0, 2, 0], keepdims=True) + assert_array_equal(c, (a * b.transpose(2, 0, 1)).sum(0, keepdims=True)) + # Hardly useful, but should work. + c = np.vecdot(a, b, axes=[0, 2, 1], keepdims=True) + assert_array_equal(c, (a.transpose(1, 0, 2) * b.transpose(0, 2, 1)) + .sum(1, keepdims=True)) + # Check with two core dimensions. + a = np.eye(3) * np.arange(4.)[:, np.newaxis, np.newaxis] + expected = uml.det(a) + c = uml.det(a, keepdims=False) + assert_array_equal(c, expected) + c = uml.det(a, keepdims=True) + assert_array_equal(c, expected[:, np.newaxis, np.newaxis]) + a = np.eye(3) * np.arange(4.)[:, np.newaxis, np.newaxis] + expected_s, expected_l = uml.slogdet(a) + cs, cl = uml.slogdet(a, keepdims=False) + assert_array_equal(cs, expected_s) + assert_array_equal(cl, expected_l) + cs, cl = uml.slogdet(a, keepdims=True) + assert_array_equal(cs, expected_s[:, np.newaxis, np.newaxis]) + assert_array_equal(cl, expected_l[:, np.newaxis, np.newaxis]) + # Sanity check on innerwt. + a = np.arange(6).reshape((2, 3)) + b = np.arange(10, 16).reshape((2, 3)) + w = np.arange(20, 26).reshape((2, 3)) + assert_array_equal(umt.innerwt(a, b, w, keepdims=True), + np.sum(a * b * w, axis=-1, keepdims=True)) + assert_array_equal(umt.innerwt(a, b, w, axis=0, keepdims=True), + np.sum(a * b * w, axis=0, keepdims=True)) + # Check errors. + # Not a boolean + assert_raises(TypeError, np.vecdot, a, b, keepdims='true') + # More than 1 core dimension, and core output dimensions. + mm = umt.matrix_multiply + assert_raises(TypeError, mm, a, b, keepdims=True) + assert_raises(TypeError, mm, a, b, keepdims=False) + # Regular ufuncs should not accept keepdims. + assert_raises(TypeError, np.add, 1., 1., keepdims=False) + + def test_innerwt(self): + a = np.arange(6).reshape((2, 3)) + b = np.arange(10, 16).reshape((2, 3)) + w = np.arange(20, 26).reshape((2, 3)) + assert_array_equal(umt.innerwt(a, b, w), np.sum(a * b * w, axis=-1)) + a = np.arange(100, 124).reshape((2, 3, 4)) + b = np.arange(200, 224).reshape((2, 3, 4)) + w = np.arange(300, 324).reshape((2, 3, 4)) + assert_array_equal(umt.innerwt(a, b, w), np.sum(a * b * w, axis=-1)) + + def test_innerwt_empty(self): + """Test generalized ufunc with zero-sized operands""" + a = np.array([], dtype='f8') + b = np.array([], dtype='f8') + w = np.array([], dtype='f8') + assert_array_equal(umt.innerwt(a, b, w), np.sum(a * b * w, axis=-1)) + + def test_cross1d(self): + """Test with fixed-sized signature.""" + a = np.eye(3) + assert_array_equal(umt.cross1d(a, a), np.zeros((3, 3))) + out = np.zeros((3, 3)) + result = umt.cross1d(a[0], a, out) + assert_(result is out) + assert_array_equal(result, np.vstack((np.zeros(3), a[2], -a[1]))) + assert_raises(ValueError, umt.cross1d, np.eye(4), np.eye(4)) + assert_raises(ValueError, umt.cross1d, a, np.arange(4.)) + # Wrong output core dimension. + assert_raises(ValueError, umt.cross1d, a, np.arange(3.), np.zeros((3, 4))) + # Wrong output broadcast dimension (see gh-15139). + assert_raises(ValueError, umt.cross1d, a, np.arange(3.), np.zeros(3)) + + def test_can_ignore_signature(self): + # Comparing the effects of ? in signature: + # matrix_multiply: (m,n),(n,p)->(m,p) # all must be there. + # matmul: (m?,n),(n,p?)->(m?,p?) # allow missing m, p. + mat = np.arange(12).reshape((2, 3, 2)) + single_vec = np.arange(2) + col_vec = single_vec[:, np.newaxis] + col_vec_array = np.arange(8).reshape((2, 2, 2, 1)) + 1 + # matrix @ single column vector with proper dimension + mm_col_vec = umt.matrix_multiply(mat, col_vec) + # matmul does the same thing + matmul_col_vec = umt.matmul(mat, col_vec) + assert_array_equal(matmul_col_vec, mm_col_vec) + # matrix @ vector without dimension making it a column vector. + # matrix multiply fails -> missing core dim. + assert_raises(ValueError, umt.matrix_multiply, mat, single_vec) + # matmul mimicker passes, and returns a vector. + matmul_col = umt.matmul(mat, single_vec) + assert_array_equal(matmul_col, mm_col_vec.squeeze()) + # Now with a column array: same as for column vector, + # broadcasting sensibly. + mm_col_vec = umt.matrix_multiply(mat, col_vec_array) + matmul_col_vec = umt.matmul(mat, col_vec_array) + assert_array_equal(matmul_col_vec, mm_col_vec) + # As above, but for row vector + single_vec = np.arange(3) + row_vec = single_vec[np.newaxis, :] + row_vec_array = np.arange(24).reshape((4, 2, 1, 1, 3)) + 1 + # row vector @ matrix + mm_row_vec = umt.matrix_multiply(row_vec, mat) + matmul_row_vec = umt.matmul(row_vec, mat) + assert_array_equal(matmul_row_vec, mm_row_vec) + # single row vector @ matrix + assert_raises(ValueError, umt.matrix_multiply, single_vec, mat) + matmul_row = umt.matmul(single_vec, mat) + assert_array_equal(matmul_row, mm_row_vec.squeeze()) + # row vector array @ matrix + mm_row_vec = umt.matrix_multiply(row_vec_array, mat) + matmul_row_vec = umt.matmul(row_vec_array, mat) + assert_array_equal(matmul_row_vec, mm_row_vec) + # Now for vector combinations + # row vector @ column vector + col_vec = row_vec.T + col_vec_array = row_vec_array.swapaxes(-2, -1) + mm_row_col_vec = umt.matrix_multiply(row_vec, col_vec) + matmul_row_col_vec = umt.matmul(row_vec, col_vec) + assert_array_equal(matmul_row_col_vec, mm_row_col_vec) + # single row vector @ single col vector + assert_raises(ValueError, umt.matrix_multiply, single_vec, single_vec) + matmul_row_col = umt.matmul(single_vec, single_vec) + assert_array_equal(matmul_row_col, mm_row_col_vec.squeeze()) + # row vector array @ matrix + mm_row_col_array = umt.matrix_multiply(row_vec_array, col_vec_array) + matmul_row_col_array = umt.matmul(row_vec_array, col_vec_array) + assert_array_equal(matmul_row_col_array, mm_row_col_array) + # Finally, check that things are *not* squeezed if one gives an + # output. + out = np.zeros_like(mm_row_col_array) + out = umt.matrix_multiply(row_vec_array, col_vec_array, out=out) + assert_array_equal(out, mm_row_col_array) + out[:] = 0 + out = umt.matmul(row_vec_array, col_vec_array, out=out) + assert_array_equal(out, mm_row_col_array) + # And check one cannot put missing dimensions back. + out = np.zeros_like(mm_row_col_vec) + assert_raises(ValueError, umt.matrix_multiply, single_vec, single_vec, + out) + # But fine for matmul, since it is just a broadcast. + out = umt.matmul(single_vec, single_vec, out) + assert_array_equal(out, mm_row_col_vec.squeeze()) + + def test_matrix_multiply(self): + self.compare_matrix_multiply_results(np.int64) + self.compare_matrix_multiply_results(np.double) + + def test_matrix_multiply_umath_empty(self): + res = umt.matrix_multiply(np.ones((0, 10)), np.ones((10, 0))) + assert_array_equal(res, np.zeros((0, 0))) + res = umt.matrix_multiply(np.ones((10, 0)), np.ones((0, 10))) + assert_array_equal(res, np.zeros((10, 10))) + + def compare_matrix_multiply_results(self, tp): + d1 = np.array(np.random.rand(2, 3, 4), dtype=tp) + d2 = np.array(np.random.rand(2, 3, 4), dtype=tp) + msg = f"matrix multiply on type {d1.dtype.name}" + + def permute_n(n): + if n == 1: + return ([0],) + ret = () + base = permute_n(n - 1) + for perm in base: + for i in range(n): + new = perm + [n - 1] + new[n - 1] = new[i] + new[i] = n - 1 + ret += (new,) + return ret + + def slice_n(n): + if n == 0: + return ((),) + ret = () + base = slice_n(n - 1) + for sl in base: + ret += (sl + (slice(None),),) + ret += (sl + (slice(0, 1),),) + return ret + + def broadcastable(s1, s2): + return s1 == s2 or 1 in {s1, s2} + + permute_3 = permute_n(3) + slice_3 = slice_n(3) + ((slice(None, None, -1),) * 3,) + + ref = True + for p1 in permute_3: + for p2 in permute_3: + for s1 in slice_3: + for s2 in slice_3: + a1 = d1.transpose(p1)[s1] + a2 = d2.transpose(p2)[s2] + ref = ref and a1.base is not None + ref = ref and a2.base is not None + if (a1.shape[-1] == a2.shape[-2] and + broadcastable(a1.shape[0], a2.shape[0])): + assert_array_almost_equal( + umt.matrix_multiply(a1, a2), + np.sum(a2[..., np.newaxis].swapaxes(-3, -1) * + a1[..., np.newaxis, :], axis=-1), + err_msg=msg + f' {str(a1.shape)} {str(a2.shape)}') + + assert_equal(ref, True, err_msg="reference check") + + def test_euclidean_pdist(self): + a = np.arange(12, dtype=float).reshape(4, 3) + out = np.empty((a.shape[0] * (a.shape[0] - 1) // 2,), dtype=a.dtype) + umt.euclidean_pdist(a, out) + b = np.sqrt(np.sum((a[:, None] - a)**2, axis=-1)) + b = b[~np.tri(a.shape[0], dtype=bool)] + assert_almost_equal(out, b) + # An output array is required to determine p with signature (n,d)->(p) + assert_raises(ValueError, umt.euclidean_pdist, a) + + def test_cumsum(self): + a = np.arange(10) + result = umt.cumsum(a) + assert_array_equal(result, a.cumsum()) + + def test_object_logical(self): + a = np.array([3, None, True, False, "test", ""], dtype=object) + assert_equal(np.logical_or(a, None), + np.array([x or None for x in a], dtype=object)) + assert_equal(np.logical_or(a, True), + np.array([x or True for x in a], dtype=object)) + assert_equal(np.logical_or(a, 12), + np.array([x or 12 for x in a], dtype=object)) + assert_equal(np.logical_or(a, "blah"), + np.array([x or "blah" for x in a], dtype=object)) + + assert_equal(np.logical_and(a, None), + np.array([x and None for x in a], dtype=object)) + assert_equal(np.logical_and(a, True), + np.array([x and True for x in a], dtype=object)) + assert_equal(np.logical_and(a, 12), + np.array([x and 12 for x in a], dtype=object)) + assert_equal(np.logical_and(a, "blah"), + np.array([x and "blah" for x in a], dtype=object)) + + assert_equal(np.logical_not(a), + np.array([not x for x in a], dtype=object)) + + assert_equal(np.logical_or.reduce(a), 3) + assert_equal(np.logical_and.reduce(a), None) + + def test_object_comparison(self): + class HasComparisons: + def __eq__(self, other): + return '==' + + arr0d = np.array(HasComparisons()) + assert_equal(arr0d == arr0d, True) + assert_equal(np.equal(arr0d, arr0d), True) # normal behavior is a cast + + arr1d = np.array([HasComparisons()]) + assert_equal(arr1d == arr1d, np.array([True])) + # normal behavior is a cast + assert_equal(np.equal(arr1d, arr1d), np.array([True])) + assert_equal(np.equal(arr1d, arr1d, dtype=object), np.array(['=='])) + + def test_object_array_reduction(self): + # Reductions on object arrays + a = np.array(['a', 'b', 'c'], dtype=object) + assert_equal(np.sum(a), 'abc') + assert_equal(np.max(a), 'c') + assert_equal(np.min(a), 'a') + a = np.array([True, False, True], dtype=object) + assert_equal(np.sum(a), 2) + assert_equal(np.prod(a), 0) + assert_equal(np.any(a), True) + assert_equal(np.all(a), False) + assert_equal(np.max(a), True) + assert_equal(np.min(a), False) + assert_equal(np.array([[1]], dtype=object).sum(), 1) + assert_equal(np.array([[[1, 2]]], dtype=object).sum((0, 1)), [1, 2]) + assert_equal(np.array([1], dtype=object).sum(initial=1), 2) + assert_equal(np.array([[1], [2, 3]], dtype=object) + .sum(initial=[0], where=[False, True]), [0, 2, 3]) + + def test_object_array_accumulate_inplace(self): + # Checks that in-place accumulates work, see also gh-7402 + arr = np.ones(4, dtype=object) + arr[:] = [[1] for i in range(4)] + # Twice reproduced also for tuples: + np.add.accumulate(arr, out=arr) + np.add.accumulate(arr, out=arr) + assert_array_equal(arr, + np.array([[1] * i for i in [1, 3, 6, 10]], dtype=object), + ) + + # And the same if the axis argument is used + arr = np.ones((2, 4), dtype=object) + arr[0, :] = [[2] for i in range(4)] + np.add.accumulate(arr, out=arr, axis=-1) + np.add.accumulate(arr, out=arr, axis=-1) + assert_array_equal(arr[0, :], + np.array([[2] * i for i in [1, 3, 6, 10]], dtype=object), + ) + + def test_object_array_accumulate_failure(self): + # Typical accumulation on object works as expected: + res = np.add.accumulate(np.array([1, 0, 2], dtype=object)) + assert_array_equal(res, np.array([1, 1, 3], dtype=object)) + # But errors are propagated from the inner-loop if they occur: + with pytest.raises(TypeError): + np.add.accumulate([1, None, 2]) + + def test_object_array_reduceat_inplace(self): + # Checks that in-place reduceats work, see also gh-7465 + arr = np.empty(4, dtype=object) + arr[:] = [[1] for i in range(4)] + out = np.empty(4, dtype=object) + out[:] = [[1] for i in range(4)] + np.add.reduceat(arr, np.arange(4), out=arr) + np.add.reduceat(arr, np.arange(4), out=arr) + assert_array_equal(arr, out) + + # And the same if the axis argument is used + arr = np.ones((2, 4), dtype=object) + arr[0, :] = [[2] for i in range(4)] + out = np.ones((2, 4), dtype=object) + out[0, :] = [[2] for i in range(4)] + np.add.reduceat(arr, np.arange(4), out=arr, axis=-1) + np.add.reduceat(arr, np.arange(4), out=arr, axis=-1) + assert_array_equal(arr, out) + + def test_object_array_reduceat_failure(self): + # Reduceat works as expected when no invalid operation occurs (None is + # not involved in an operation here) + res = np.add.reduceat(np.array([1, None, 2], dtype=object), [1, 2]) + assert_array_equal(res, np.array([None, 2], dtype=object)) + # But errors when None would be involved in an operation: + with pytest.raises(TypeError): + np.add.reduceat([1, None, 2], [0, 2]) + + def test_zerosize_reduction(self): + # Test with default dtype and object dtype + for a in [[], np.array([], dtype=object)]: + assert_equal(np.sum(a), 0) + assert_equal(np.prod(a), 1) + assert_equal(np.any(a), False) + assert_equal(np.all(a), True) + assert_raises(ValueError, np.max, a) + assert_raises(ValueError, np.min, a) + + def test_axis_out_of_bounds(self): + a = np.array([False, False]) + assert_raises(AxisError, a.all, axis=1) + a = np.array([False, False]) + assert_raises(AxisError, a.all, axis=-2) + + a = np.array([False, False]) + assert_raises(AxisError, a.any, axis=1) + a = np.array([False, False]) + assert_raises(AxisError, a.any, axis=-2) + + def test_scalar_reduction(self): + # The functions 'sum', 'prod', etc allow specifying axis=0 + # even for scalars + assert_equal(np.sum(3, axis=0), 3) + assert_equal(np.prod(3.5, axis=0), 3.5) + assert_equal(np.any(True, axis=0), True) + assert_equal(np.all(False, axis=0), False) + assert_equal(np.max(3, axis=0), 3) + assert_equal(np.min(2.5, axis=0), 2.5) + + # Check scalar behaviour for ufuncs without an identity + assert_equal(np.power.reduce(3), 3) + + # Make sure that scalars are coming out from this operation + assert_(type(np.prod(np.float32(2.5), axis=0)) is np.float32) + assert_(type(np.sum(np.float32(2.5), axis=0)) is np.float32) + assert_(type(np.max(np.float32(2.5), axis=0)) is np.float32) + assert_(type(np.min(np.float32(2.5), axis=0)) is np.float32) + + # check if scalars/0-d arrays get cast + assert_(type(np.any(0, axis=0)) is np.bool) + + # assert that 0-d arrays get wrapped + class MyArray(np.ndarray): + pass + a = np.array(1).view(MyArray) + assert_(type(np.any(a)) is MyArray) + + def test_casting_out_param(self): + # Test that it's possible to do casts on output + a = np.ones((200, 100), np.int64) + b = np.ones((200, 100), np.int64) + c = np.ones((200, 100), np.float64) + np.add(a, b, out=c) + assert_equal(c, 2) + + a = np.zeros(65536) + b = np.zeros(65536, dtype=np.float32) + np.subtract(a, 0, out=b) + assert_equal(b, 0) + + def test_where_param(self): + # Test that the where= ufunc parameter works with regular arrays + a = np.arange(7) + b = np.ones(7) + c = np.zeros(7) + np.add(a, b, out=c, where=(a % 2 == 1)) + assert_equal(c, [0, 2, 0, 4, 0, 6, 0]) + + a = np.arange(4).reshape(2, 2) + 2 + np.power(a, [2, 3], out=a, where=[[0, 1], [1, 0]]) + assert_equal(a, [[2, 27], [16, 5]]) + # Broadcasting the where= parameter + np.subtract(a, 2, out=a, where=[True, False]) + assert_equal(a, [[0, 27], [14, 5]]) + + def test_where_param_buffer_output(self): + # With casting on output + a = np.ones(10, np.int64) + b = np.ones(10, np.int64) + c = 1.5 * np.ones(10, np.float64) + np.add(a, b, out=c, where=[1, 0, 0, 1, 0, 0, 1, 1, 1, 0]) + assert_equal(c, [2, 1.5, 1.5, 2, 1.5, 1.5, 2, 2, 2, 1.5]) + + def test_where_param_alloc(self): + # With casting and allocated output + a = np.array([1], dtype=np.int64) + m = np.array([True], dtype=bool) + assert_equal(np.sqrt(a, where=m, out=None), [1]) + + # No casting and allocated output + a = np.array([1], dtype=np.float64) + m = np.array([True], dtype=bool) + assert_equal(np.sqrt(a, where=m, out=None), [1]) + + def test_where_with_broadcasting(self): + # See gh-17198 + a = np.random.random((5000, 4)) + b = np.random.random((5000, 1)) + + where = a > 0.3 + out = np.full_like(a, 0) + np.less(a, b, where=where, out=out) + b_where = np.broadcast_to(b, a.shape)[where] + assert_array_equal((a[where] < b_where), out[where].astype(bool)) + assert not out[~where].any() # outside mask, out remains all 0 + + def test_where_warns(self): + a = np.arange(7) + mask = a % 2 == 0 + with pytest.warns(UserWarning, match="'where' used without 'out'"): + result1 = np.add(a, a, where=mask) + # Does not warn + result2 = np.add(a, a, where=mask, out=None) + # Sanity check + assert np.all(result1[::2] == [0, 4, 8, 12]) + assert np.all(result2[::2] == [0, 4, 8, 12]) + # Also no warning for where=True + result3 = np.add(a, a, where=True) + # Sanity check + assert_array_equal(result3, a + a) + + @staticmethod + def identityless_reduce_arrs(): + yield np.empty((2, 3, 4), order='C') + yield np.empty((2, 3, 4), order='F') + # Mixed order (reduce order differs outer) + yield np.empty((2, 4, 3), order='C').swapaxes(1, 2) + # Reversed order + yield np.empty((2, 3, 4), order='C')[::-1, ::-1, ::-1] + # Not contiguous + yield np.empty((3, 5, 4), order='C').swapaxes(1, 2)[1:, 1:, 1:] + # Not contiguous and not aligned + a = np.empty((3 * 4 * 5 * 8 + 1,), dtype='i1') + a = a[1:].view(dtype='f8') + a.shape = (3, 4, 5) + a = a[1:, 1:, 1:] + yield a + + @pytest.mark.parametrize("arrs", identityless_reduce_arrs()) + @pytest.mark.parametrize("pos", [(1, 0, 0), (0, 1, 0), (0, 0, 1)]) + def test_identityless_reduction(self, arrs, pos): + # np.minimum.reduce is an identityless reduction + a = arrs.copy() + a[...] = 1 + a[pos] = 0 + + for axis in [None, (0, 1), (0, 2), (1, 2), 0, 1, 2, ()]: + if axis is None: + axes = np.array([], dtype=np.intp) + else: + axes = np.delete(np.arange(a.ndim), axis) + + expected_pos = tuple(np.array(pos)[axes]) + expected = np.ones(np.array(a.shape)[axes]) + expected[expected_pos] = 0 + + res = np.minimum.reduce(a, axis=axis) + assert_equal(res, expected, strict=True) + + res = np.full_like(res, np.nan) + np.minimum.reduce(a, axis=axis, out=res) + assert_equal(res, expected, strict=True) + + @requires_memory(6 * 1024**3) + @pytest.mark.skipif(sys.maxsize < 2**32, + reason="test array too large for 32bit platform") + @pytest.mark.thread_unsafe(reason="crashes with low memory") + def test_identityless_reduction_huge_array(self): + # Regression test for gh-20921 (copying identity incorrectly failed) + arr = np.zeros((2, 2**31), 'uint8') + arr[:, 0] = [1, 3] + arr[:, -1] = [4, 1] + res = np.maximum.reduce(arr, axis=0) + del arr + assert res[0] == 3 + assert res[-1] == 4 + + def test_reduce_identity_depends_on_loop(self): + """ + The type of the result should always depend on the selected loop, not + necessarily the output (only relevant for object arrays). + """ + # For an object loop, the default value 0 with type int is used: + assert type(np.add.reduce([], dtype=object)) is int + out = np.array(None, dtype=object) + # When the loop is float64 but `out` is object this does not happen, + # the result is float64 cast to object (which gives Python `float`). + np.add.reduce([], out=out, dtype=np.float64) + assert type(out[()]) is float + + def test_initial_reduction(self): + # np.minimum.reduce is an identityless reduction + + # For cases like np.maximum(np.abs(...), initial=0) + # More generally, a supremum over non-negative numbers. + assert_equal(np.maximum.reduce([], initial=0), 0) + + # For cases like reduction of an empty array over the reals. + assert_equal(np.minimum.reduce([], initial=np.inf), np.inf) + assert_equal(np.maximum.reduce([], initial=-np.inf), -np.inf) + + # Random tests + assert_equal(np.minimum.reduce([5], initial=4), 4) + assert_equal(np.maximum.reduce([4], initial=5), 5) + assert_equal(np.maximum.reduce([5], initial=4), 5) + assert_equal(np.minimum.reduce([4], initial=5), 4) + + # Check initial=None raises ValueError for both types of ufunc reductions + assert_raises(ValueError, np.minimum.reduce, [], initial=None) + assert_raises(ValueError, np.add.reduce, [], initial=None) + # Also in the somewhat special object case: + with pytest.raises(ValueError): + np.add.reduce([], initial=None, dtype=object) + + # Check that np._NoValue gives default behavior. + assert_equal(np.add.reduce([], initial=np._NoValue), 0) + + # Check that initial kwarg behaves as intended for dtype=object + a = np.array([10], dtype=object) + res = np.add.reduce(a, initial=5) + assert_equal(res, 15) + + def test_empty_reduction_and_identity(self): + arr = np.zeros((0, 5)) + # OK, since the reduction itself is *not* empty, the result is + assert np.true_divide.reduce(arr, axis=1).shape == (0,) + # Not OK, the reduction itself is empty and we have no identity + with pytest.raises(ValueError): + np.true_divide.reduce(arr, axis=0) + + # Test that an empty reduction fails also if the result is empty + arr = np.zeros((0, 0, 5)) + with pytest.raises(ValueError): + np.true_divide.reduce(arr, axis=1) + + # Division reduction makes sense with `initial=1` (empty or not): + res = np.true_divide.reduce(arr, axis=1, initial=1) + assert_array_equal(res, np.ones((0, 5))) + + @pytest.mark.parametrize('axis', (0, 1, None)) + @pytest.mark.parametrize('where', (np.array([False, True, True]), + np.array([[True], [False], [True]]), + np.array([[True, False, False], + [False, True, False], + [False, True, True]]))) + def test_reduction_with_where(self, axis, where): + a = np.arange(9.).reshape(3, 3) + a_copy = a.copy() + a_check = np.zeros_like(a) + np.positive(a, out=a_check, where=where) + + res = np.add.reduce(a, axis=axis, where=where) + check = a_check.sum(axis) + assert_equal(res, check) + # Check we do not overwrite elements of a internally. + assert_array_equal(a, a_copy) + + @pytest.mark.parametrize(('axis', 'where'), + ((0, np.array([True, False, True])), + (1, [True, True, False]), + (None, True))) + @pytest.mark.parametrize('initial', (-np.inf, 5.)) + def test_reduction_with_where_and_initial(self, axis, where, initial): + a = np.arange(9.).reshape(3, 3) + a_copy = a.copy() + a_check = np.full(a.shape, -np.inf) + np.positive(a, out=a_check, where=where) + + res = np.maximum.reduce(a, axis=axis, where=where, initial=initial) + check = a_check.max(axis, initial=initial) + assert_equal(res, check) + + def test_reduction_where_initial_needed(self): + a = np.arange(9.).reshape(3, 3) + m = [False, True, False] + assert_raises(ValueError, np.maximum.reduce, a, where=m) + + def test_identityless_reduction_nonreorderable(self): + a = np.array([[8.0, 2.0, 2.0], [1.0, 0.5, 0.25]]) + + res = np.divide.reduce(a, axis=0) + assert_equal(res, [8.0, 4.0, 8.0]) + + res = np.divide.reduce(a, axis=1) + assert_equal(res, [2.0, 8.0]) + + res = np.divide.reduce(a, axis=()) + assert_equal(res, a) + + assert_raises(ValueError, np.divide.reduce, a, axis=(0, 1)) + + def test_reduce_zero_axis(self): + # If we have an n x m array and do a reduction with axis=1, then we are + # doing n reductions, and each reduction takes an m-element array. For + # a reduction operation without an identity, then: + # n > 0, m > 0: fine + # n = 0, m > 0: fine, doing 0 reductions of m-element arrays + # n > 0, m = 0: can't reduce a 0-element array, ValueError + # n = 0, m = 0: can't reduce a 0-element array, ValueError (for + # consistency with the above case) + # This test doesn't actually look at return values, it just checks to + # make sure that error we get an error in exactly those cases where we + # expect one, and assumes the calculations themselves are done + # correctly. + + def ok(f, *args, **kwargs): + f(*args, **kwargs) + + def err(f, *args, **kwargs): + assert_raises(ValueError, f, *args, **kwargs) + + def t(expect, func, n, m): + expect(func, np.zeros((n, m)), axis=1) + expect(func, np.zeros((m, n)), axis=0) + expect(func, np.zeros((n // 2, n // 2, m)), axis=2) + expect(func, np.zeros((n // 2, m, n // 2)), axis=1) + expect(func, np.zeros((n, m // 2, m // 2)), axis=(1, 2)) + expect(func, np.zeros((m // 2, n, m // 2)), axis=(0, 2)) + expect(func, np.zeros((m // 3, m // 3, m // 3, + n // 2, n // 2)), + axis=(0, 1, 2)) + # Check what happens if the inner (resp. outer) dimensions are a + # mix of zero and non-zero: + expect(func, np.zeros((10, m, n)), axis=(0, 1)) + expect(func, np.zeros((10, n, m)), axis=(0, 2)) + expect(func, np.zeros((m, 10, n)), axis=0) + expect(func, np.zeros((10, m, n)), axis=1) + expect(func, np.zeros((10, n, m)), axis=2) + + # np.maximum is just an arbitrary ufunc with no reduction identity + assert_equal(np.maximum.identity, None) + t(ok, np.maximum.reduce, 30, 30) + t(ok, np.maximum.reduce, 0, 30) + t(err, np.maximum.reduce, 30, 0) + t(err, np.maximum.reduce, 0, 0) + err(np.maximum.reduce, []) + np.maximum.reduce(np.zeros((0, 0)), axis=()) + + # all of the combinations are fine for a reduction that has an + # identity + t(ok, np.add.reduce, 30, 30) + t(ok, np.add.reduce, 0, 30) + t(ok, np.add.reduce, 30, 0) + t(ok, np.add.reduce, 0, 0) + np.add.reduce([]) + np.add.reduce(np.zeros((0, 0)), axis=()) + + # OTOH, accumulate always makes sense for any combination of n and m, + # because it maps an m-element array to an m-element array. These + # tests are simpler because accumulate doesn't accept multiple axes. + for uf in (np.maximum, np.add): + uf.accumulate(np.zeros((30, 0)), axis=0) + uf.accumulate(np.zeros((0, 30)), axis=0) + uf.accumulate(np.zeros((30, 30)), axis=0) + uf.accumulate(np.zeros((0, 0)), axis=0) + + def test_safe_casting(self): + # In old versions of numpy, in-place operations used the 'unsafe' + # casting rules. In versions >= 1.10, 'same_kind' is the + # default and an exception is raised instead of a warning. + # when 'same_kind' is not satisfied. + a = np.array([1, 2, 3], dtype=int) + # Non-in-place addition is fine + assert_array_equal(assert_no_warnings(np.add, a, 1.1), + [2.1, 3.1, 4.1]) + assert_raises(TypeError, np.add, a, 1.1, out=a) + + def add_inplace(a, b): + a += b + + assert_raises(TypeError, add_inplace, a, 1.1) + # Make sure that explicitly overriding the exception is allowed: + assert_no_warnings(np.add, a, 1.1, out=a, casting="unsafe") + assert_array_equal(a, [2, 3, 4]) + + def test_ufunc_custom_out(self): + # Test ufunc with built in input types and custom output type + + a = np.array([0, 1, 2], dtype='i8') + b = np.array([0, 1, 2], dtype='i8') + c = np.empty(3, dtype=_rational_tests.rational) + + # Output must be specified so numpy knows what + # ufunc signature to look for + result = _rational_tests.test_add(a, b, c) + target = np.array([0, 2, 4], dtype=_rational_tests.rational) + assert_equal(result, target) + + # The new resolution means that we can (usually) find custom loops + # as long as they match exactly: + result = _rational_tests.test_add(a, b) + assert_equal(result, target) + + # This works even more generally, so long the default common-dtype + # promoter works out: + result = _rational_tests.test_add(a, b.astype(np.uint16), out=c) + assert_equal(result, target) + + # This scalar path used to go into legacy promotion, but doesn't now: + result = _rational_tests.test_add(a, np.uint16(2)) + target = np.array([2, 3, 4], dtype=_rational_tests.rational) + assert_equal(result, target) + + def test_operand_flags(self): + a = np.arange(16, dtype=int).reshape(4, 4) + b = np.arange(9, dtype=int).reshape(3, 3) + opflag_tests.inplace_add(a[:-1, :-1], b) + assert_equal(a, np.array([[0, 2, 4, 3], [7, 9, 11, 7], + [14, 16, 18, 11], [12, 13, 14, 15]])) + + a = np.array(0) + opflag_tests.inplace_add(a, 3) + assert_equal(a, 3) + opflag_tests.inplace_add(a, [3, 4]) + assert_equal(a, 10) + + def test_struct_ufunc(self): + import numpy._core._struct_ufunc_tests as struct_ufunc + + a = np.array([(1, 2, 3)], dtype='u8,u8,u8') + b = np.array([(1, 2, 3)], dtype='u8,u8,u8') + + result = struct_ufunc.add_triplet(a, b) + assert_equal(result, np.array([(2, 4, 6)], dtype='u8,u8,u8')) + assert_raises(RuntimeError, struct_ufunc.register_fail) + + def test_custom_ufunc(self): + a = np.array( + [_rational_tests.rational(1, 2), + _rational_tests.rational(1, 3), + _rational_tests.rational(1, 4)], + dtype=_rational_tests.rational) + b = np.array( + [_rational_tests.rational(1, 2), + _rational_tests.rational(1, 3), + _rational_tests.rational(1, 4)], + dtype=_rational_tests.rational) + + result = _rational_tests.test_add_rationals(a, b) + expected = np.array( + [_rational_tests.rational(1), + _rational_tests.rational(2, 3), + _rational_tests.rational(1, 2)], + dtype=_rational_tests.rational) + assert_equal(result, expected) + + def test_custom_ufunc_forced_sig(self): + # gh-9351 - looking for a non-first userloop would previously hang + with assert_raises(TypeError): + np.multiply(_rational_tests.rational(1), 1, + signature=(_rational_tests.rational, int, None)) + + def test_custom_array_like(self): + + class MyThing: + __array_priority__ = 1000 + + rmul_count = 0 + getitem_count = 0 + + def __init__(self, shape): + self.shape = shape + + def __len__(self): + return self.shape[0] + + def __getitem__(self, i): + MyThing.getitem_count += 1 + if not isinstance(i, tuple): + i = (i,) + if len(i) > self.ndim: + raise IndexError("boo") + + return MyThing(self.shape[len(i):]) + + def __rmul__(self, other): + MyThing.rmul_count += 1 + return self + + np.float64(5) * MyThing((3, 3)) + assert_(MyThing.rmul_count == 1, MyThing.rmul_count) + assert_(MyThing.getitem_count <= 2, MyThing.getitem_count) + + def test_array_wrap_array_priority(self): + class ArrayPriorityBase(np.ndarray): + @classmethod + def __array_wrap__(cls, array, context=None, return_scalar=False): + return cls + + class ArrayPriorityMinus0(ArrayPriorityBase): + __array_priority__ = 0 + + class ArrayPriorityMinus1000(ArrayPriorityBase): + __array_priority__ = -1000 + + class ArrayPriorityMinus1000b(ArrayPriorityBase): + __array_priority__ = -1000 + + class ArrayPriorityMinus2000(ArrayPriorityBase): + __array_priority__ = -2000 + + x = np.ones(2).view(ArrayPriorityMinus1000) + xb = np.ones(2).view(ArrayPriorityMinus1000b) + y = np.ones(2).view(ArrayPriorityMinus2000) + + assert np.add(x, y) is ArrayPriorityMinus1000 + assert np.add(y, x) is ArrayPriorityMinus1000 + assert np.add(x, xb) is ArrayPriorityMinus1000 + assert np.add(xb, x) is ArrayPriorityMinus1000b + y_minus0 = np.zeros(2).view(ArrayPriorityMinus0) + assert np.add(np.zeros(2), y_minus0) is ArrayPriorityMinus0 + assert type(np.add(xb, x, np.zeros(2))) is np.ndarray + + @pytest.mark.parametrize("a", ( + np.arange(10, dtype=int), + np.arange(10, dtype=_rational_tests.rational), + )) + def test_ufunc_at_basic(self, a): + + aa = a.copy() + np.add.at(aa, [2, 5, 2], 1) + assert_equal(aa, [0, 1, 4, 3, 4, 6, 6, 7, 8, 9]) + + with pytest.raises(ValueError): + # missing second operand + np.add.at(aa, [2, 5, 3]) + + aa = a.copy() + np.negative.at(aa, [2, 5, 3]) + assert_equal(aa, [0, 1, -2, -3, 4, -5, 6, 7, 8, 9]) + + aa = a.copy() + b = np.array([100, 100, 100]) + np.add.at(aa, [2, 5, 2], b) + assert_equal(aa, [0, 1, 202, 3, 4, 105, 6, 7, 8, 9]) + + with pytest.raises(ValueError): + # extraneous second operand + np.negative.at(a, [2, 5, 3], [1, 2, 3]) + + with pytest.raises(ValueError): + # second operand cannot be converted to an array + np.add.at(a, [2, 5, 3], [[1, 2], 1]) + + # ufuncs with indexed loops for performance in ufunc.at + indexed_ufuncs = [np.add, np.subtract, np.multiply, np.floor_divide, + np.maximum, np.minimum, np.fmax, np.fmin] + + @pytest.mark.parametrize( + "typecode", np.typecodes['AllInteger'] + np.typecodes['Float']) + @pytest.mark.parametrize("ufunc", indexed_ufuncs) + def test_ufunc_at_inner_loops(self, typecode, ufunc): + if ufunc is np.divide and typecode in np.typecodes['AllInteger']: + # Avoid divide-by-zero and inf for integer divide + a = np.ones(100, dtype=typecode) + indx = np.random.randint(100, size=30, dtype=np.intp) + vals = np.arange(1, 31, dtype=typecode) + else: + a = np.ones(1000, dtype=typecode) + indx = np.random.randint(1000, size=3000, dtype=np.intp) + vals = np.arange(3000, dtype=typecode) + atag = a.copy() + # Do the calculation twice and compare the answers + with warnings.catch_warnings(record=True) as w_at: + warnings.simplefilter('always') + ufunc.at(a, indx, vals) + with warnings.catch_warnings(record=True) as w_loop: + warnings.simplefilter('always') + for i, v in zip(indx, vals): + # Make sure all the work happens inside the ufunc + # in order to duplicate error/warning handling + ufunc(atag[i], v, out=atag[i:i + 1], casting="unsafe") + assert_equal(atag, a) + # If w_loop warned, make sure w_at warned as well + if len(w_loop) > 0: + # + assert len(w_at) > 0 + assert w_at[0].category == w_loop[0].category + assert str(w_at[0].message)[:10] == str(w_loop[0].message)[:10] + + @pytest.mark.parametrize("typecode", np.typecodes['Complex']) + @pytest.mark.parametrize("ufunc", [np.add, np.subtract, np.multiply]) + def test_ufunc_at_inner_loops_complex(self, typecode, ufunc): + a = np.ones(10, dtype=typecode) + indx = np.concatenate([np.ones(6, dtype=np.intp), + np.full(18, 4, dtype=np.intp)]) + value = a.dtype.type(1j) + ufunc.at(a, indx, value) + expected = np.ones_like(a) + if ufunc is np.multiply: + expected[1] = expected[4] = -1 + else: + expected[1] += 6 * (value if ufunc is np.add else -value) + expected[4] += 18 * (value if ufunc is np.add else -value) + + assert_array_equal(a, expected) + + def test_ufunc_at_ellipsis(self): + # Make sure the indexed loop check does not choke on iters + # with subspaces + arr = np.zeros(5) + np.add.at(arr, slice(None), np.ones(5)) + assert_array_equal(arr, np.ones(5)) + + def test_ufunc_at_negative(self): + arr = np.ones(5, dtype=np.int32) + indx = np.arange(5) + umt.indexed_negative.at(arr, indx) + # If it is [-1, -1, -1, -100, 0] then the regular strided loop was used + assert np.all(arr == [-1, -1, -1, -200, -1]) + + def test_ufunc_at_large(self): + # issue gh-23457 + indices = np.zeros(8195, dtype=np.int16) + b = np.zeros(8195, dtype=float) + b[0] = 10 + b[1] = 5 + b[8192:] = 100 + a = np.zeros(1, dtype=float) + np.add.at(a, indices, b) + assert a[0] == b.sum() + + def test_cast_index_fastpath(self): + arr = np.zeros(10) + values = np.ones(100000) + # index must be cast, which may be buffered in chunks: + index = np.zeros(len(values), dtype=np.uint8) + np.add.at(arr, index, values) + assert arr[0] == len(values) + + @pytest.mark.parametrize("value", [ + np.ones(1), np.ones(()), np.float64(1.), 1.]) + def test_ufunc_at_scalar_value_fastpath(self, value): + arr = np.zeros(1000) + # index must be cast, which may be buffered in chunks: + index = np.repeat(np.arange(1000), 2) + np.add.at(arr, index, value) + assert_array_equal(arr, np.full_like(arr, 2 * value)) + + def test_ufunc_at_multiD(self): + a = np.arange(9).reshape(3, 3) + b = np.array([[100, 100, 100], [200, 200, 200], [300, 300, 300]]) + np.add.at(a, (slice(None), [1, 2, 1]), b) + assert_equal(a, [[0, 201, 102], [3, 404, 205], [6, 607, 308]]) + + a = np.arange(27).reshape(3, 3, 3) + b = np.array([100, 200, 300]) + np.add.at(a, (slice(None), slice(None), [1, 2, 1]), b) + assert_equal(a, + [[[0, 401, 202], + [3, 404, 205], + [6, 407, 208]], + + [[9, 410, 211], + [12, 413, 214], + [15, 416, 217]], + + [[18, 419, 220], + [21, 422, 223], + [24, 425, 226]]]) + + a = np.arange(9).reshape(3, 3) + b = np.array([[100, 100, 100], [200, 200, 200], [300, 300, 300]]) + np.add.at(a, ([1, 2, 1], slice(None)), b) + assert_equal(a, [[0, 1, 2], [403, 404, 405], [206, 207, 208]]) + + a = np.arange(27).reshape(3, 3, 3) + b = np.array([100, 200, 300]) + np.add.at(a, (slice(None), [1, 2, 1], slice(None)), b) + assert_equal(a, + [[[0, 1, 2], + [203, 404, 605], + [106, 207, 308]], + + [[9, 10, 11], + [212, 413, 614], + [115, 216, 317]], + + [[18, 19, 20], + [221, 422, 623], + [124, 225, 326]]]) + + a = np.arange(9).reshape(3, 3) + b = np.array([100, 200, 300]) + np.add.at(a, (0, [1, 2, 1]), b) + assert_equal(a, [[0, 401, 202], [3, 4, 5], [6, 7, 8]]) + + a = np.arange(27).reshape(3, 3, 3) + b = np.array([100, 200, 300]) + np.add.at(a, ([1, 2, 1], 0, slice(None)), b) + assert_equal(a, + [[[0, 1, 2], + [3, 4, 5], + [6, 7, 8]], + + [[209, 410, 611], + [12, 13, 14], + [15, 16, 17]], + + [[118, 219, 320], + [21, 22, 23], + [24, 25, 26]]]) + + a = np.arange(27).reshape(3, 3, 3) + b = np.array([100, 200, 300]) + np.add.at(a, (slice(None), slice(None), slice(None)), b) + assert_equal(a, + [[[100, 201, 302], + [103, 204, 305], + [106, 207, 308]], + + [[109, 210, 311], + [112, 213, 314], + [115, 216, 317]], + + [[118, 219, 320], + [121, 222, 323], + [124, 225, 326]]]) + + def test_ufunc_at_0D(self): + a = np.array(0) + np.add.at(a, (), 1) + assert_equal(a, 1) + + assert_raises(IndexError, np.add.at, a, 0, 1) + assert_raises(IndexError, np.add.at, a, [], 1) + + def test_ufunc_at_dtypes(self): + # Test mixed dtypes + a = np.arange(10) + np.power.at(a, [1, 2, 3, 2], 3.5) + assert_equal(a, np.array([0, 1, 4414, 46, 4, 5, 6, 7, 8, 9])) + + def test_ufunc_at_boolean(self): + # Test boolean indexing and boolean ufuncs + a = np.arange(10) + index = a % 2 == 0 + np.equal.at(a, index, [0, 2, 4, 6, 8]) + assert_equal(a, [1, 1, 1, 3, 1, 5, 1, 7, 1, 9]) + + # Test unary operator + a = np.arange(10, dtype='u4') + np.invert.at(a, [2, 5, 2]) + assert_equal(a, [0, 1, 2, 3, 4, 5 ^ 0xffffffff, 6, 7, 8, 9]) + + def test_ufunc_at_advanced(self): + # Test empty subspace + orig = np.arange(4) + a = orig[:, None][:, 0:0] + np.add.at(a, [0, 1], 3) + assert_array_equal(orig, np.arange(4)) + + # Test with swapped byte order + index = np.array([1, 2, 1], np.dtype('i').newbyteorder()) + values = np.array([1, 2, 3, 4], np.dtype('f').newbyteorder()) + np.add.at(values, index, 3) + assert_array_equal(values, [1, 8, 6, 4]) + + # Test exception thrown + values = np.array(['a', 1], dtype=object) + assert_raises(TypeError, np.add.at, values, [0, 1], 1) + assert_array_equal(values, np.array(['a', 1], dtype=object)) + + # Test multiple output ufuncs raise error, gh-5665 + assert_raises(ValueError, np.modf.at, np.arange(10), [1]) + + # Test maximum + a = np.array([1, 2, 3]) + np.maximum.at(a, [0], 0) + assert_equal(a, np.array([1, 2, 3])) + + @pytest.mark.parametrize("dtype", + np.typecodes['AllInteger'] + np.typecodes['Float']) + @pytest.mark.parametrize("ufunc", + [np.add, np.subtract, np.divide, np.minimum, np.maximum]) + def test_at_negative_indexes(self, dtype, ufunc): + a = np.arange(0, 10).astype(dtype) + indxs = np.array([-1, 1, -1, 2]).astype(np.intp) + vals = np.array([1, 5, 2, 10], dtype=a.dtype) + + expected = a.copy() + for i, v in zip(indxs, vals): + expected[i] = ufunc(expected[i], v) + + ufunc.at(a, indxs, vals) + assert_array_equal(a, expected) + assert np.all(indxs == [-1, 1, -1, 2]) + + def test_at_not_none_signature(self): + # Test ufuncs with non-trivial signature raise a TypeError + a = np.ones((2, 2, 2)) + b = np.ones((1, 2, 2)) + assert_raises(TypeError, np.matmul.at, a, [0], b) + + a = np.array([[[1, 2], [3, 4]]]) + assert_raises(TypeError, np.linalg._umath_linalg.det.at, a, [0]) + + def test_at_no_loop_for_op(self): + # str dtype does not have a ufunc loop for np.add + arr = np.ones(10, dtype=str) + with pytest.raises(np._core._exceptions._UFuncNoLoopError): + np.add.at(arr, [0, 1], [0, 1]) + + def test_at_output_casting(self): + arr = np.array([-1]) + np.equal.at(arr, [0], [0]) + assert arr[0] == 0 + + def test_at_broadcast_failure(self): + arr = np.arange(5) + with pytest.raises(ValueError): + np.add.at(arr, [0, 1], [1, 2, 3]) + + def test_reduce_arguments(self): + f = np.add.reduce + d = np.ones((5, 2), dtype=int) + o = np.ones((2,), dtype=d.dtype) + r = o * 5 + assert_equal(f(d), r) + # a, axis=0, dtype=None, out=None, keepdims=False + assert_equal(f(d, axis=0), r) + assert_equal(f(d, 0), r) + assert_equal(f(d, 0, dtype=None), r) + assert_equal(f(d, 0, dtype='i'), r) + assert_equal(f(d, 0, 'i'), r) + assert_equal(f(d, 0, None), r) + assert_equal(f(d, 0, None, out=None), r) + assert_equal(f(d, 0, None, out=o), r) + assert_equal(f(d, 0, None, o), r) + assert_equal(f(d, 0, None, None), r) + assert_equal(f(d, 0, None, None, keepdims=False), r) + assert_equal(f(d, 0, None, None, True), r.reshape((1,) + r.shape)) + assert_equal(f(d, 0, None, None, False, 0), r) + assert_equal(f(d, 0, None, None, False, initial=0), r) + assert_equal(f(d, 0, None, None, False, 0, True), r) + assert_equal(f(d, 0, None, None, False, 0, where=True), r) + # multiple keywords + assert_equal(f(d, axis=0, dtype=None, out=None, keepdims=False), r) + assert_equal(f(d, 0, dtype=None, out=None, keepdims=False), r) + assert_equal(f(d, 0, None, out=None, keepdims=False), r) + assert_equal(f(d, 0, None, out=None, keepdims=False, initial=0, + where=True), r) + + # too little + assert_raises(TypeError, f) + # too much + assert_raises(TypeError, f, d, 0, None, None, False, 0, True, 1) + # invalid axis + assert_raises(TypeError, f, d, "invalid") + assert_raises(TypeError, f, d, axis="invalid") + assert_raises(TypeError, f, d, axis="invalid", dtype=None, + keepdims=True) + # invalid dtype + assert_raises(TypeError, f, d, 0, "invalid") + assert_raises(TypeError, f, d, dtype="invalid") + assert_raises(TypeError, f, d, dtype="invalid", out=None) + # invalid out + assert_raises(TypeError, f, d, 0, None, "invalid") + assert_raises(TypeError, f, d, out="invalid") + assert_raises(TypeError, f, d, out="invalid", dtype=None) + # keepdims boolean, no invalid value + # assert_raises(TypeError, f, d, 0, None, None, "invalid") + # assert_raises(TypeError, f, d, keepdims="invalid", axis=0, dtype=None) + # invalid mix + assert_raises(TypeError, f, d, 0, keepdims="invalid", dtype="invalid", + out=None) + + # invalid keyword + assert_raises(TypeError, f, d, axis=0, dtype=None, invalid=0) + assert_raises(TypeError, f, d, invalid=0) + assert_raises(TypeError, f, d, 0, keepdims=True, invalid="invalid", + out=None) + assert_raises(TypeError, f, d, axis=0, dtype=None, keepdims=True, + out=None, invalid=0) + assert_raises(TypeError, f, d, axis=0, dtype=None, + out=None, invalid=0) + + def test_structured_equal(self): + # https://github.com/numpy/numpy/issues/4855 + + class MyA(np.ndarray): + def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): + return getattr(ufunc, method)(*(input.view(np.ndarray) + for input in inputs), **kwargs) + a = np.arange(12.).reshape(4, 3) + ra = a.view(dtype=('f8,f8,f8')).squeeze() + mra = ra.view(MyA) + + target = np.array([True, False, False, False], dtype=bool) + assert_equal(np.all(target == (mra == ra[0])), True) + + def test_scalar_equal(self): + # Scalar comparisons should always work, without deprecation warnings. + # even when the ufunc fails. + a = np.array(0.) + b = np.array('a') + assert_(a != b) + assert_(b != a) + assert_(not (a == b)) + assert_(not (b == a)) + + def test_NotImplemented_not_returned(self): + # See gh-5964 and gh-2091. Some of these functions are not operator + # related and were fixed for other reasons in the past. + binary_funcs = [ + np.power, np.add, np.subtract, np.multiply, np.divide, + np.true_divide, np.floor_divide, np.bitwise_and, np.bitwise_or, + np.bitwise_xor, np.left_shift, np.right_shift, np.fmax, + np.fmin, np.fmod, np.hypot, np.logaddexp, np.logaddexp2, + np.maximum, np.minimum, np.mod, + np.greater, np.greater_equal, np.less, np.less_equal, + np.equal, np.not_equal] + + a = np.array('1') + b = 1 + c = np.array([1., 2.]) + for f in binary_funcs: + assert_raises(TypeError, f, a, b) + assert_raises(TypeError, f, c, a) + + @pytest.mark.parametrize("ufunc", + [np.logical_and, np.logical_or]) # logical_xor object loop is bad + @pytest.mark.parametrize("signature", + [(None, None, object), (object, None, None), + (None, object, None)]) + def test_logical_ufuncs_object_signatures(self, ufunc, signature): + a = np.array([True, None, False], dtype=object) + res = ufunc(a, a, signature=signature) + assert res.dtype == object + + @pytest.mark.parametrize("ufunc", + [np.logical_and, np.logical_or, np.logical_xor]) + @pytest.mark.parametrize("signature", + [(bool, None, object), (object, None, bool), + (None, object, bool)]) + def test_logical_ufuncs_mixed_object_signatures(self, ufunc, signature): + # Most mixed signatures fail (except those with bool out, e.g. `OO->?`) + a = np.array([True, None, False]) + with pytest.raises(TypeError): + ufunc(a, a, signature=signature) + + @pytest.mark.parametrize("ufunc", + [np.logical_and, np.logical_or, np.logical_xor]) + def test_logical_ufuncs_support_anything(self, ufunc): + # The logical ufuncs support even input that can't be promoted: + a = np.array(b'1', dtype="V3") + c = np.array([1., 2.]) + assert_array_equal(ufunc(a, c), ufunc([True, True], True)) + assert ufunc.reduce(a) == True + # check that the output has no effect: + out = np.zeros(2, dtype=np.int32) + expected = ufunc([True, True], True).astype(out.dtype) + assert_array_equal(ufunc(a, c, out=out), expected) + out = np.zeros((), dtype=np.int32) + assert ufunc.reduce(a, out=out) == True + # Last check, test reduction when out and a match (the complexity here + # is that the "i,i->?" may seem right, but should not match. + a = np.array([3], dtype="i") + out = np.zeros((), dtype=a.dtype) + assert ufunc.reduce(a, out=out) == 1 + + @pytest.mark.parametrize("ufunc", + [np.logical_and, np.logical_or, np.logical_xor]) + @pytest.mark.parametrize("dtype", ["S", "U"]) + @pytest.mark.parametrize("values", [["1", "hi", "0"], ["", ""]]) + def test_logical_ufuncs_supports_string(self, ufunc, dtype, values): + # note that values are either all true or all false + arr = np.array(values, dtype=dtype) + obj_arr = np.array(values, dtype=object) + res = ufunc(arr, arr) + expected = ufunc(obj_arr, obj_arr, dtype=bool) + + assert_array_equal(res, expected) + + res = ufunc.reduce(arr) + expected = ufunc.reduce(obj_arr, dtype=bool) + assert_array_equal(res, expected) + + @pytest.mark.parametrize("ufunc", + [np.logical_and, np.logical_or, np.logical_xor]) + def test_logical_ufuncs_out_cast_check(self, ufunc): + a = np.array('1') + c = np.array([1., 2.]) + out = a.copy() + with pytest.raises(TypeError): + # It would be safe, but not equiv casting: + ufunc(a, c, out=out, casting="equiv") + + def test_reducelike_byteorder_resolution(self): + # See gh-20699, byte-order changes need some extra care in the type + # resolution to make the following succeed: + arr_be = np.arange(10, dtype=">i8") + arr_le = np.arange(10, dtype="i + if 'O' in typ or '?' in typ: + continue + inp, out = typ.split('->') + args = [np.ones((3, 3), t) for t in inp] + with warnings.catch_warnings(record=True): + warnings.filterwarnings("always") + res = ufunc(*args) + if isinstance(res, tuple): + outs = tuple(out) + assert len(res) == len(outs) + for r, t in zip(res, outs): + assert r.dtype == np.dtype(t) + else: + assert res.dtype == np.dtype(out) + +@pytest.mark.parametrize('ufunc', [getattr(np, x) for x in dir(np) + if isinstance(getattr(np, x), np.ufunc)]) +def test_ufunc_noncontiguous(ufunc): + ''' + Check that contiguous and non-contiguous calls to ufuncs + have the same results for values in range(9) + ''' + for typ in ufunc.types: + # types is a list of strings like ii->i + if any(set('O?mM') & set(typ)): + # bool, object, datetime are too irregular for this simple test + continue + inp, out = typ.split('->') + args_c = [np.empty((6, 6), t) for t in inp] + # non contiguous (2, 3 step on the two dimensions) + args_n = [np.empty((12, 18), t)[::2, ::3] for t in inp] + # alignment != itemsize is possible. So create an array with such + # an odd step manually. + args_o = [] + for t in inp: + orig_dt = np.dtype(t) + off_dt = f"S{orig_dt.alignment}" # offset by alignment + dtype = np.dtype([("_", off_dt), ("t", orig_dt)], align=False) + args_o.append(np.empty((6, 6), dtype=dtype)["t"]) + for a in args_c + args_n + args_o: + a.flat = range(1, 37) + + with warnings.catch_warnings(record=True): + warnings.filterwarnings("always") + res_c = ufunc(*args_c) + res_n = ufunc(*args_n) + res_o = ufunc(*args_o) + if len(out) == 1: + res_c = (res_c,) + res_n = (res_n,) + res_o = (res_o,) + for c_ar, n_ar, o_ar in zip(res_c, res_n, res_o): + dt = c_ar.dtype + if np.issubdtype(dt, np.floating): + # for floating point results allow a small fuss in comparisons + # since different algorithms (libm vs. intrinsics) can be used + # for different input strides + res_eps = np.finfo(dt).eps + tol = 3 * res_eps + assert_allclose(res_c, res_n, atol=tol, rtol=tol) + assert_allclose(res_c, res_o, atol=tol, rtol=tol) + else: + assert_equal(c_ar, n_ar) + assert_equal(c_ar, o_ar) + + +@pytest.mark.parametrize('ufunc', [np.sign, np.equal]) +def test_ufunc_warn_with_nan(ufunc): + # issue gh-15127 + # test that calling certain ufuncs with a non-standard `nan` value does not + # emit a warning + # `b` holds a 64 bit signaling nan: the most significant bit of the + # significand is zero. + b = np.array([0x7ff0000000000001], 'i8').view('f8') + assert np.isnan(b) + if ufunc.nin == 1: + ufunc(b) + elif ufunc.nin == 2: + ufunc(b, b.copy()) + else: + raise ValueError('ufunc with more than 2 inputs') + + +@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") +def test_ufunc_out_casterrors(): + # Tests that casting errors are correctly reported and buffers are + # cleared. + # The following array can be added to itself as an object array, but + # the result cannot be cast to an integer output: + value = 123 # relies on python cache (leak-check will still find it) + arr = np.array([value] * int(ncu.BUFSIZE * 1.5) + + ["string"] + + [value] * int(1.5 * ncu.BUFSIZE), dtype=object) + out = np.ones(len(arr), dtype=np.intp) + + count = sys.getrefcount(value) + with pytest.raises(ValueError): + # Output casting failure: + np.add(arr, arr, out=out, casting="unsafe") + + assert count == sys.getrefcount(value) + # output is unchanged after the error, this shows that the iteration + # was aborted (this is not necessarily defined behaviour) + assert out[-1] == 1 + + with pytest.raises(ValueError): + # Input casting failure: + np.add(arr, arr, out=out, dtype=np.intp, casting="unsafe") + + assert count == sys.getrefcount(value) + # output is unchanged after the error, this shows that the iteration + # was aborted (this is not necessarily defined behaviour) + assert out[-1] == 1 + + +@pytest.mark.parametrize("bad_offset", [0, int(ncu.BUFSIZE * 1.5)]) +def test_ufunc_input_casterrors(bad_offset): + value = 123 + arr = np.array([value] * bad_offset + + ["string"] + + [value] * int(1.5 * ncu.BUFSIZE), dtype=object) + with pytest.raises(ValueError): + # Force cast inputs, but the buffered cast of `arr` to intp fails: + np.add(arr, arr, dtype=np.intp, casting="unsafe") + + +@pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") +@pytest.mark.parametrize("bad_offset", [0, int(ncu.BUFSIZE * 1.5)]) +def test_ufunc_input_floatingpoint_error(bad_offset): + value = 123 + arr = np.array([value] * bad_offset + + [np.nan] + + [value] * int(1.5 * ncu.BUFSIZE)) + with np.errstate(invalid="raise"), pytest.raises(FloatingPointError): + # Force cast inputs, but the buffered cast of `arr` to intp fails: + np.add(arr, arr, dtype=np.intp, casting="unsafe") + + +@pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") +@pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") +@pytest.mark.parametrize( + "methodname", + ["__call__", "accumulate", "at", "outer", "reduce", "reduceat", "resolve_dtypes"], +) +def test_ufunc_method_signatures(methodname: str): + method = getattr(np.ufunc, methodname) + + try: + _ = inspect.signature(method) + except ValueError as e: + pytest.fail(e.args[0]) + + +def test_trivial_loop_invalid_cast(): + # This tests the fast-path "invalid cast", see gh-19904. + with pytest.raises(TypeError, + match="cast ufunc 'add' input 0"): + # the void dtype definitely cannot cast to double: + np.add(np.array(1, "i,i"), 3, signature="dd->d") + + +@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") +@pytest.mark.parametrize("offset", + [0, ncu.BUFSIZE // 2, int(1.5 * ncu.BUFSIZE)]) +def test_reduce_casterrors(offset): + # Test reporting of casting errors in reductions, we test various + # offsets to where the casting error will occur, since these may occur + # at different places during the reduction procedure. For example + # the first item may be special. + value = 123 # relies on python cache (leak-check will still find it) + arr = np.array([value] * offset + + ["string"] + + [value] * int(1.5 * ncu.BUFSIZE), dtype=object) + out = np.array(-1, dtype=np.intp) + + count = sys.getrefcount(value) + with pytest.raises(ValueError, match="invalid literal"): + # This is an unsafe cast, but we currently always allow that. + # Note that the double loop is picked, but the cast fails. + # `initial=None` disables the use of an identity here to test failures + # while copying the first values path (not used when identity exists). + np.add.reduce(arr, dtype=np.intp, out=out, initial=None) + assert count == sys.getrefcount(value) + # If an error occurred during casting, the operation is done at most until + # the error occurs (the result of which would be `value * offset`) and -1 + # if the error happened immediately. + # This does not define behaviour, the output is invalid and thus undefined + assert out[()] < value * offset + + +@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") +def test_reduction_no_reference_leak(): + # Test that the generic reduction does not leak references. + # gh-29358 + arr = np.array([1, 2, 3], dtype=np.int32) + count = sys.getrefcount(arr) + + np.add.reduce(arr, dtype=np.int32, initial=0) + assert count == sys.getrefcount(arr) + + np.add.accumulate(arr, dtype=np.int32) + assert count == sys.getrefcount(arr) + + np.add.reduceat(arr, [0, 1], dtype=np.int32) + assert count == sys.getrefcount(arr) + + # with `out=` the reference count is not changed + out = np.empty((), dtype=np.int32) + out_count = sys.getrefcount(out) + + np.add.reduce(arr, dtype=np.int32, out=out, initial=0) + assert count == sys.getrefcount(arr) + assert out_count == sys.getrefcount(out) + + out = np.empty(arr.shape, dtype=np.int32) + out_count = sys.getrefcount(out) + + np.add.accumulate(arr, dtype=np.int32, out=out) + assert count == sys.getrefcount(arr) + assert out_count == sys.getrefcount(out) + + out = np.empty((2,), dtype=np.int32) + out_count = sys.getrefcount(out) + + np.add.reduceat(arr, [0, 1], dtype=np.int32, out=out) + assert count == sys.getrefcount(arr) + assert out_count == sys.getrefcount(out) + + +def test_object_reduce_cleanup_on_failure(): + # Test cleanup, including of the initial value (manually provided or not) + with pytest.raises(TypeError): + np.add.reduce([1, 2, None], initial=4) + + with pytest.raises(TypeError): + np.add.reduce([1, 2, None]) + + +@pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") +@pytest.mark.parametrize("method", + [np.add.accumulate, np.add.reduce, + pytest.param(lambda x: np.add.reduceat(x, [0]), id="reduceat"), + pytest.param(lambda x: np.log.at(x, [2]), id="at")]) +def test_ufunc_methods_floaterrors(method): + # adding inf and -inf (or log(-inf) creates an invalid float and warns + arr = np.array([np.inf, 0, -np.inf]) + with np.errstate(all="warn"): + with pytest.warns(RuntimeWarning, match="invalid value"): + method(arr) + + arr = np.array([np.inf, 0, -np.inf]) + with np.errstate(all="raise"): + with pytest.raises(FloatingPointError): + method(arr) + + +def _check_neg_zero(value): + if value != 0.0: + return False + if not np.signbit(value.real): + return False + if value.dtype.kind == "c": + return np.signbit(value.imag) + return True + +@pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) +def test_addition_negative_zero(dtype): + dtype = np.dtype(dtype) + if dtype.kind == "c": + neg_zero = dtype.type(complex(-0.0, -0.0)) + else: + neg_zero = dtype.type(-0.0) + + arr = np.array(neg_zero) + arr2 = np.array(neg_zero) + + assert _check_neg_zero(arr + arr2) + # In-place ops may end up on a different path (reduce path) see gh-21211 + arr += arr2 + assert _check_neg_zero(arr) + + +@pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) +@pytest.mark.parametrize("use_initial", [True, False]) +def test_addition_reduce_negative_zero(dtype, use_initial): + dtype = np.dtype(dtype) + if dtype.kind == "c": + neg_zero = dtype.type(complex(-0.0, -0.0)) + else: + neg_zero = dtype.type(-0.0) + + kwargs = {} + if use_initial: + kwargs["initial"] = neg_zero + else: + pytest.xfail("-0. propagation in sum currently requires initial") + + # Test various length, in case SIMD paths or chunking play a role. + # 150 extends beyond the pairwise blocksize; probably not important. + for i in range(150): + arr = np.array([neg_zero] * i, dtype=dtype) + res = np.sum(arr, **kwargs) + if i > 0 or use_initial: + assert _check_neg_zero(res) + else: + # `sum([])` should probably be 0.0 and not -0.0 like `sum([-0.0])` + assert not np.signbit(res.real) + assert not np.signbit(res.imag) + + +@pytest.mark.parametrize(["dt1", "dt2"], + [("S", "U"), ("U", "S"), ("S", "d"), ("S", "V"), ("U", "l")]) +def test_addition_string_types(dt1, dt2): + arr1 = np.array([1234234], dtype=dt1) + arr2 = np.array([b"423"], dtype=dt2) + with pytest.raises(np._core._exceptions.UFuncTypeError) as exc: + np.add(arr1, arr2) + + +@pytest.mark.parametrize("order1,order2", + [(">", ">"), ("<", "<"), (">", "<"), ("<", ">")]) +def test_addition_unicode_inverse_byte_order(order1, order2): + element = 'abcd' + arr1 = np.array([element], dtype=f"{order1}U4") + arr2 = np.array([element], dtype=f"{order2}U4") + result = arr1 + arr2 + assert result == 2 * element + + +@pytest.mark.parametrize("dtype", [np.int8, np.int16, np.int32, np.int64]) +def test_find_non_long_args(dtype): + element = 'abcd' + start = dtype(0) + end = dtype(len(element)) + arr = np.array([element]) + result = np._core.umath.find(arr, "a", start, end) + assert result.dtype == np.dtype("intp") + assert result == 0 + + +def test_find_access_past_buffer(): + # This checks that no read past the string buffer occurs in + # string_fastsearch.h. The buffer class makes sure this is checked. + # To see it in action, you can remove the checks in the buffer and + # this test will produce an 'Invalid read' if run under valgrind. + arr = np.array([b'abcd', b'ebcd']) + result = np._core.umath.find(arr, b'cde', 0, np.iinfo(np.int64).max) + assert np.all(result == -1) + + +class TestLowlevelAPIAccess: + def test_resolve_dtypes_basic(self): + # Basic test for dtype resolution: + i4 = np.dtype("i4") + f4 = np.dtype("f4") + f8 = np.dtype("f8") + + r = np.add.resolve_dtypes((i4, f4, None)) + assert r == (f8, f8, f8) + + # Signature uses the same logic to parse as ufunc (less strict) + # the following is "same-kind" casting so works: + r = np.add.resolve_dtypes(( + i4, i4, None), signature=(None, None, "f4")) + assert r == (f4, f4, f4) + + # Check NEP 50 "weak" promotion also: + r = np.add.resolve_dtypes((f4, int, None)) + assert r == (f4, f4, f4) + + with pytest.raises(TypeError): + np.add.resolve_dtypes((i4, f4, None), casting="no") + + def test_resolve_dtypes_comparison(self): + i4 = np.dtype("i4") + i8 = np.dtype("i8") + b = np.dtype("?") + r = np.equal.resolve_dtypes((i4, i8, None)) + assert r == (i8, i8, b) + + def test_weird_dtypes(self): + S0 = np.dtype("S0") + # S0 is often converted by NumPy to S1, but not here: + r = np.equal.resolve_dtypes((S0, S0, None)) + assert r == (S0, S0, np.dtype(bool)) + + # Subarray dtypes are weird and may not work fully, we preserve them + # leading to a TypeError (currently no equal loop for void/structured) + dts = np.dtype("10i") + with pytest.raises(TypeError): + np.equal.resolve_dtypes((dts, dts, None)) + + def test_resolve_dtypes_reduction(self): + i2 = np.dtype("i2") + default_int_ = np.dtype(np.int_) + # Check special addition resolution: + res = np.add.resolve_dtypes((None, i2, None), reduction=True) + assert res == (default_int_, default_int_, default_int_) + + def test_resolve_dtypes_reduction_no_output(self): + i4 = np.dtype("i4") + with pytest.raises(TypeError): + # May be allowable at some point? + np.add.resolve_dtypes((i4, i4, i4), reduction=True) + + @pytest.mark.parametrize("dtypes", [ + (np.dtype("i"), np.dtype("i")), + (None, np.dtype("i"), np.dtype("f")), + (np.dtype("i"), None, np.dtype("f")), + ("i4", "i4", None)]) + def test_resolve_dtypes_errors(self, dtypes): + with pytest.raises(TypeError): + np.add.resolve_dtypes(dtypes) + + def test_resolve_dtypes_reduction_errors(self): + i2 = np.dtype("i2") + + with pytest.raises(TypeError): + np.add.resolve_dtypes((None, i2, i2)) + + with pytest.raises(TypeError): + np.add.signature((None, None, "i4")) + + @pytest.mark.skipif(not hasattr(ct, "pythonapi"), + reason="`ctypes.pythonapi` required for capsule unpacking.") + @pytest.mark.thread_unsafe(reason="modifies global object in the ctypes API") + def test_loop_access(self): + # This is a basic test for the full strided loop access + data_t = ct.c_char_p * 2 + dim_t = ct.c_ssize_t * 1 + strides_t = ct.c_ssize_t * 2 + strided_loop_t = ct.CFUNCTYPE( + ct.c_int, ct.c_void_p, data_t, dim_t, strides_t, ct.c_void_p) + + class call_info_t(ct.Structure): + _fields_ = [ + ("strided_loop", strided_loop_t), + ("context", ct.c_void_p), + ("auxdata", ct.c_void_p), + ("requires_pyapi", ct.c_byte), + ("no_floatingpoint_errors", ct.c_byte), + ] + + i4 = np.dtype("i4") + dt, call_info_obj = np.negative._resolve_dtypes_and_context((i4, i4)) + assert dt == (i4, i4) # can be used without casting + + # Fill in the rest of the information: + np.negative._get_strided_loop(call_info_obj) + + ct.pythonapi.PyCapsule_GetPointer.restype = ct.c_void_p + call_info = ct.pythonapi.PyCapsule_GetPointer( + ct.py_object(call_info_obj), + ct.c_char_p(b"numpy_1.24_ufunc_call_info")) + + call_info = ct.cast(call_info, ct.POINTER(call_info_t)).contents + + arr = np.arange(10, dtype=i4) + call_info.strided_loop( + call_info.context, + data_t(arr.ctypes.data, arr.ctypes.data), + arr.ctypes.shape, # is a C-array with 10 here + strides_t(arr.ctypes.strides[0], arr.ctypes.strides[0]), + call_info.auxdata) + + # We just directly called the negative inner-loop in-place: + assert_array_equal(arr, -np.arange(10, dtype=i4)) + + @pytest.mark.parametrize("strides", [1, (1, 2, 3), (1, "2")]) + def test__get_strided_loop_errors_bad_strides(self, strides): + i4 = np.dtype("i4") + dt, call_info = np.negative._resolve_dtypes_and_context((i4, i4)) + + with pytest.raises(TypeError, match="fixed_strides.*tuple.*or None"): + np.negative._get_strided_loop(call_info, fixed_strides=strides) + + def test__get_strided_loop_errors_bad_call_info(self): + i4 = np.dtype("i4") + dt, call_info = np.negative._resolve_dtypes_and_context((i4, i4)) + + with pytest.raises(ValueError, match="PyCapsule"): + np.negative._get_strided_loop("not the capsule!") + + with pytest.raises(TypeError, match=".*incompatible context"): + np.add._get_strided_loop(call_info) + + np.negative._get_strided_loop(call_info) + with pytest.raises(TypeError): + # cannot call it a second time: + np.negative._get_strided_loop(call_info) + + def test_long_arrays(self): + t = np.zeros((1029, 917), dtype=np.single) + t[0][0] = 1 + t[28][414] = 1 + tc = np.cos(t) + assert_equal(tc[0][0], tc[28][414]) + + +class TestUFuncInspectSignature: + PARAMS_COMMON = { + "casting": "same_kind", + "order": "K", + "dtype": None, + "subok": True, + "signature": None, + } + + PARAMS_UFUNC = { + "where": True, + } | PARAMS_COMMON + + PARAMS_GUFUNC = { + "axes": np._NoValue, + "axis": np._NoValue, + "keepdims": False, + } | PARAMS_COMMON + + @pytest.mark.parametrize("ufunc", [np.log, np.gcd, np.frexp, np.divmod, np.matvec]) + def test_dunder_signature_attr(self, ufunc: np.ufunc): + assert hasattr(ufunc, "__signature__") + assert isinstance(ufunc.__signature__, inspect.Signature) + assert inspect.signature(ufunc) == ufunc.__signature__ + + @pytest.mark.parametrize("ufunc", [np.exp, np.mod, np.frexp, np.divmod, np.vecmat]) + def test_params_common_positional(self, ufunc: np.ufunc): + sig = inspect.signature(ufunc) + + # check positional-only parameters + posonly_params = {name: param.default + for name, param in sig.parameters.items() + if param.kind is param.POSITIONAL_ONLY} + assert len(posonly_params) == ufunc.nin + assert all(default is inspect.Parameter.empty + for default in posonly_params.values()) + + # check 'out' parameter + out_param = sig.parameters.get("out") + assert out_param is not None + assert out_param.kind is inspect.Parameter.POSITIONAL_OR_KEYWORD + + @pytest.mark.parametrize("ufunc", [np.sin, np.add, np.frexp, np.divmod]) + def test_params_common_ufunc(self, ufunc: np.ufunc): + assert ufunc.signature is None # sanity check + + sig = inspect.signature(ufunc) + + # check keyword-only parameters + keyword_params = {name: param.default + for name, param in sig.parameters.items() + if param.kind is param.KEYWORD_ONLY} + assert keyword_params == self.PARAMS_UFUNC + + @pytest.mark.parametrize("gufunc", [np.matmul, np.matvec, np.vecdot, np.vecmat]) + def test_params_common_gufunc(self, gufunc: np.ufunc): + assert gufunc.signature is not None # sanity check + + sig = inspect.signature(gufunc) + + # check keyword-only parameters + keyword_params = {name: param.default + for name, param in sig.parameters.items() + if param.kind is param.KEYWORD_ONLY} + assert keyword_params == self.PARAMS_GUFUNC diff --git a/local_packages/numpy/_core/tests/test_umath.py b/local_packages/numpy/_core/tests/test_umath.py new file mode 100644 index 0000000..ab9c594 --- /dev/null +++ b/local_packages/numpy/_core/tests/test_umath.py @@ -0,0 +1,4975 @@ +import fnmatch +import inspect +import itertools +import operator +import platform +import sys +import warnings +from collections import namedtuple +from fractions import Fraction +from functools import reduce + +import pytest + +import numpy as np +import numpy._core.umath as ncu +from numpy._core import _umath_tests as ncu_tests, sctypes +from numpy.testing import ( + HAS_REFCOUNT, + IS_MUSL, + IS_PYPY, + IS_WASM, + _gen_alignment_data, + assert_, + assert_allclose, + assert_almost_equal, + assert_array_almost_equal, + assert_array_almost_equal_nulp, + assert_array_equal, + assert_array_max_ulp, + assert_equal, + assert_no_warnings, + assert_raises, + assert_raises_regex, +) +from numpy.testing._private.utils import _glibc_older_than + +UFUNCS = [obj for obj in np._core.umath.__dict__.values() + if isinstance(obj, np.ufunc)] + +UFUNCS_UNARY = [ + uf for uf in UFUNCS if uf.nin == 1 +] +UFUNCS_UNARY_FP = [ + uf for uf in UFUNCS_UNARY if 'f->f' in uf.types +] + +UFUNCS_BINARY = [ + uf for uf in UFUNCS if uf.nin == 2 +] +UFUNCS_BINARY_ACC = [ + uf for uf in UFUNCS_BINARY if hasattr(uf, "accumulate") and uf.nout == 1 +] + +def interesting_binop_operands(val1, val2, dtype): + """ + Helper to create "interesting" operands to cover common code paths: + * scalar inputs + * only first "values" is an array (e.g. scalar division fast-paths) + * Longer array (SIMD) placing the value of interest at different positions + * Oddly strided arrays which may not be SIMD compatible + + It does not attempt to cover unaligned access or mixed dtypes. + These are normally handled by the casting/buffering machinery. + + This is not a fixture (currently), since I believe a fixture normally + only yields once? + """ + fill_value = 1 # could be a parameter, but maybe not an optional one? + + arr1 = np.full(10003, dtype=dtype, fill_value=fill_value) + arr2 = np.full(10003, dtype=dtype, fill_value=fill_value) + + arr1[0] = val1 + arr2[0] = val2 + + extractor = lambda res: res + yield arr1[0], arr2[0], extractor, "scalars" + + extractor = lambda res: res + yield arr1[0, ...], arr2[0, ...], extractor, "scalar-arrays" + + # reset array values to fill_value: + arr1[0] = fill_value + arr2[0] = fill_value + + for pos in [0, 1, 2, 3, 4, 5, -1, -2, -3, -4]: + arr1[pos] = val1 + arr2[pos] = val2 + + extractor = lambda res: res[pos] + yield arr1, arr2, extractor, f"off-{pos}" + yield arr1, arr2[pos], extractor, f"off-{pos}-with-scalar" + + arr1[pos] = fill_value + arr2[pos] = fill_value + + for stride in [-1, 113]: + op1 = arr1[::stride] + op2 = arr2[::stride] + op1[10] = val1 + op2[10] = val2 + + extractor = lambda res: res[10] + yield op1, op2, extractor, f"stride-{stride}" + + op1[10] = fill_value + op2[10] = fill_value + + +def on_powerpc(): + """ True if we are running on a Power PC platform.""" + return platform.processor() == 'powerpc' or \ + platform.machine().startswith('ppc') + + +def bad_arcsinh(): + """The blocklisted trig functions are not accurate on aarch64/PPC for + complex256. Rather than dig through the actual problem skip the + test. This should be fixed when we can move past glibc2.17 + which is the version in manylinux2014 + """ + if platform.machine() == 'aarch64': + x = 1.78e-10 + elif on_powerpc(): + x = 2.16e-10 + else: + return False + v1 = np.arcsinh(np.float128(x)) + v2 = np.arcsinh(np.complex256(x)).real + # The eps for float128 is 1-e33, so this is way bigger + return abs((v1 / v2) - 1.0) > 1e-23 + + +class _FilterInvalids: + def setup_method(self): + self.olderr = np.seterr(invalid='ignore') + + def teardown_method(self): + np.seterr(**self.olderr) + + +class TestConstants: + def test_pi(self): + assert_allclose(ncu.pi, 3.141592653589793, 1e-15) + + def test_e(self): + assert_allclose(ncu.e, 2.718281828459045, 1e-15) + + def test_euler_gamma(self): + assert_allclose(ncu.euler_gamma, 0.5772156649015329, 1e-15) + + +class TestOut: + def test_out_subok(self): + for subok in (True, False): + a = np.array(0.5) + o = np.empty(()) + + r = np.add(a, 2, o, subok=subok) + assert_(r is o) + r = np.add(a, 2, out=o, subok=subok) + assert_(r is o) + r = np.add(a, 2, out=(o,), subok=subok) + assert_(r is o) + + d = np.array(5.7) + o1 = np.empty(()) + o2 = np.empty((), dtype=np.int32) + + r1, r2 = np.frexp(d, o1, None, subok=subok) + assert_(r1 is o1) + r1, r2 = np.frexp(d, None, o2, subok=subok) + assert_(r2 is o2) + r1, r2 = np.frexp(d, o1, o2, subok=subok) + assert_(r1 is o1) + assert_(r2 is o2) + + r1, r2 = np.frexp(d, out=(o1, None), subok=subok) + assert_(r1 is o1) + r1, r2 = np.frexp(d, out=(None, o2), subok=subok) + assert_(r2 is o2) + r1, r2 = np.frexp(d, out=(o1, o2), subok=subok) + assert_(r1 is o1) + assert_(r2 is o2) + + with assert_raises(TypeError): + # Out argument must be tuple, since there are multiple outputs. + r1, r2 = np.frexp(d, out=o1, subok=subok) + + assert_raises(TypeError, np.add, a, 2, o, o, subok=subok) + assert_raises(TypeError, np.add, a, 2, o, out=o, subok=subok) + assert_raises(TypeError, np.add, a, 2, None, out=o, subok=subok) + assert_raises(ValueError, np.add, a, 2, out=(o, o), subok=subok) + assert_raises(ValueError, np.add, a, 2, out=(), subok=subok) + assert_raises(TypeError, np.add, a, 2, [], subok=subok) + assert_raises(TypeError, np.add, a, 2, out=[], subok=subok) + assert_raises(TypeError, np.add, a, 2, out=([],), subok=subok) + o.flags.writeable = False + assert_raises(ValueError, np.add, a, 2, o, subok=subok) + assert_raises(ValueError, np.add, a, 2, out=o, subok=subok) + assert_raises(ValueError, np.add, a, 2, out=(o,), subok=subok) + + def test_out_wrap_subok(self): + class ArrayWrap(np.ndarray): + __array_priority__ = 10 + + def __new__(cls, arr): + return np.asarray(arr).view(cls).copy() + + def __array_wrap__(self, arr, context=None, return_scalar=False): + return arr.view(type(self)) + + for subok in (True, False): + a = ArrayWrap([0.5]) + + r = np.add(a, 2, subok=subok) + if subok: + assert_(isinstance(r, ArrayWrap)) + else: + assert_(type(r) == np.ndarray) + + r = np.add(a, 2, None, subok=subok) + if subok: + assert_(isinstance(r, ArrayWrap)) + else: + assert_(type(r) == np.ndarray) + + r = np.add(a, 2, out=None, subok=subok) + if subok: + assert_(isinstance(r, ArrayWrap)) + else: + assert_(type(r) == np.ndarray) + + r = np.add(a, 2, out=(None,), subok=subok) + if subok: + assert_(isinstance(r, ArrayWrap)) + else: + assert_(type(r) == np.ndarray) + + d = ArrayWrap([5.7]) + o1 = np.empty((1,)) + o2 = np.empty((1,), dtype=np.int32) + + r1, r2 = np.frexp(d, o1, subok=subok) + if subok: + assert_(isinstance(r2, ArrayWrap)) + else: + assert_(type(r2) == np.ndarray) + + r1, r2 = np.frexp(d, o1, None, subok=subok) + if subok: + assert_(isinstance(r2, ArrayWrap)) + else: + assert_(type(r2) == np.ndarray) + + r1, r2 = np.frexp(d, None, o2, subok=subok) + if subok: + assert_(isinstance(r1, ArrayWrap)) + else: + assert_(type(r1) == np.ndarray) + + r1, r2 = np.frexp(d, out=(o1, None), subok=subok) + if subok: + assert_(isinstance(r2, ArrayWrap)) + else: + assert_(type(r2) == np.ndarray) + + r1, r2 = np.frexp(d, out=(None, o2), subok=subok) + if subok: + assert_(isinstance(r1, ArrayWrap)) + else: + assert_(type(r1) == np.ndarray) + + with assert_raises(TypeError): + # Out argument must be tuple, since there are multiple outputs. + r1, r2 = np.frexp(d, out=o1, subok=subok) + + @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") + def test_out_wrap_no_leak(self): + # Regression test for gh-26545 + class ArrSubclass(np.ndarray): + pass + + arr = np.arange(10).view(ArrSubclass) + orig_refcount = sys.getrefcount(arr) + arr *= 1 + assert sys.getrefcount(arr) == orig_refcount + + +class TestComparisons: + import operator + + @pytest.mark.parametrize('dtype', sctypes['uint'] + sctypes['int'] + + sctypes['float'] + [np.bool]) + @pytest.mark.parametrize('py_comp,np_comp', [ + (operator.lt, np.less), + (operator.le, np.less_equal), + (operator.gt, np.greater), + (operator.ge, np.greater_equal), + (operator.eq, np.equal), + (operator.ne, np.not_equal) + ]) + def test_comparison_functions(self, dtype, py_comp, np_comp): + # Initialize input arrays + if dtype == np.bool: + a = np.random.choice(a=[False, True], size=1000) + b = np.random.choice(a=[False, True], size=1000) + scalar = True + else: + a = np.random.randint(low=1, high=10, size=1000).astype(dtype) + b = np.random.randint(low=1, high=10, size=1000).astype(dtype) + scalar = 5 + np_scalar = np.dtype(dtype).type(scalar) + a_lst = a.tolist() + b_lst = b.tolist() + + # (Binary) Comparison (x1=array, x2=array) + comp_b = np_comp(a, b).view(np.uint8) + comp_b_list = [int(py_comp(x, y)) for x, y in zip(a_lst, b_lst)] + + # (Scalar1) Comparison (x1=scalar, x2=array) + comp_s1 = np_comp(np_scalar, b).view(np.uint8) + comp_s1_list = [int(py_comp(scalar, x)) for x in b_lst] + + # (Scalar2) Comparison (x1=array, x2=scalar) + comp_s2 = np_comp(a, np_scalar).view(np.uint8) + comp_s2_list = [int(py_comp(x, scalar)) for x in a_lst] + + # Sequence: Binary, Scalar1 and Scalar2 + assert_(comp_b.tolist() == comp_b_list, + f"Failed comparison ({py_comp.__name__})") + assert_(comp_s1.tolist() == comp_s1_list, + f"Failed comparison ({py_comp.__name__})") + assert_(comp_s2.tolist() == comp_s2_list, + f"Failed comparison ({py_comp.__name__})") + + def test_ignore_object_identity_in_equal(self): + # Check comparing identical objects whose comparison + # is not a simple boolean, e.g., arrays that are compared elementwise. + a = np.array([np.array([1, 2, 3]), None], dtype=object) + assert_raises(ValueError, np.equal, a, a) + + # Check error raised when comparing identical non-comparable objects. + class FunkyType: + def __eq__(self, other): + raise TypeError("I won't compare") + + a = np.array([FunkyType()]) + assert_raises(TypeError, np.equal, a, a) + + # Check identity doesn't override comparison mismatch. + a = np.array([np.nan], dtype=object) + assert_equal(np.equal(a, a), [False]) + + def test_ignore_object_identity_in_not_equal(self): + # Check comparing identical objects whose comparison + # is not a simple boolean, e.g., arrays that are compared elementwise. + a = np.array([np.array([1, 2, 3]), None], dtype=object) + assert_raises(ValueError, np.not_equal, a, a) + + # Check error raised when comparing identical non-comparable objects. + class FunkyType: + def __ne__(self, other): + raise TypeError("I won't compare") + + a = np.array([FunkyType()]) + assert_raises(TypeError, np.not_equal, a, a) + + # Check identity doesn't override comparison mismatch. + a = np.array([np.nan], dtype=object) + assert_equal(np.not_equal(a, a), [True]) + + def test_error_in_equal_reduce(self): + # gh-20929 + # make sure np.equal.reduce raises a TypeError if an array is passed + # without specifying the dtype + a = np.array([0, 0]) + assert_equal(np.equal.reduce(a, dtype=bool), True) + assert_raises(TypeError, np.equal.reduce, a) + + def test_object_dtype(self): + assert np.equal(1, [1], dtype=object).dtype == object + assert np.equal(1, [1], signature=(None, None, "O")).dtype == object + + def test_object_nonbool_dtype_error(self): + # bool output dtype is fine of course: + assert np.equal(1, [1], dtype=bool).dtype == bool + + # but the following are examples do not have a loop: + with pytest.raises(TypeError, match="No loop matching"): + np.equal(1, 1, dtype=np.int64) + + with pytest.raises(TypeError, match="No loop matching"): + np.equal(1, 1, sig=(None, None, "l")) + + @pytest.mark.parametrize("dtypes", ["qQ", "Qq"]) + @pytest.mark.parametrize('py_comp, np_comp', [ + (operator.lt, np.less), + (operator.le, np.less_equal), + (operator.gt, np.greater), + (operator.ge, np.greater_equal), + (operator.eq, np.equal), + (operator.ne, np.not_equal) + ]) + @pytest.mark.parametrize("vals", [(2**60, 2**60 + 1), (2**60 + 1, 2**60)]) + def test_large_integer_direct_comparison( + self, dtypes, py_comp, np_comp, vals): + # Note that float(2**60) + 1 == float(2**60). + a1 = np.array([2**60], dtype=dtypes[0]) + a2 = np.array([2**60 + 1], dtype=dtypes[1]) + expected = py_comp(2**60, 2**60 + 1) + + assert py_comp(a1, a2) == expected + assert np_comp(a1, a2) == expected + # Also check the scalars: + s1 = a1[0] + s2 = a2[0] + assert isinstance(s1, np.integer) + assert isinstance(s2, np.integer) + # The Python operator here is mainly interesting: + assert py_comp(s1, s2) == expected + assert np_comp(s1, s2) == expected + + @pytest.mark.parametrize("dtype", np.typecodes['UnsignedInteger']) + @pytest.mark.parametrize('py_comp_func, np_comp_func', [ + (operator.lt, np.less), + (operator.le, np.less_equal), + (operator.gt, np.greater), + (operator.ge, np.greater_equal), + (operator.eq, np.equal), + (operator.ne, np.not_equal) + ]) + @pytest.mark.parametrize("flip", [True, False]) + def test_unsigned_signed_direct_comparison( + self, dtype, py_comp_func, np_comp_func, flip): + if flip: + py_comp = lambda x, y: py_comp_func(y, x) + np_comp = lambda x, y: np_comp_func(y, x) + else: + py_comp = py_comp_func + np_comp = np_comp_func + + arr = np.array([np.iinfo(dtype).max], dtype=dtype) + expected = py_comp(int(arr[0]), -1) + + assert py_comp(arr, -1) == expected + assert np_comp(arr, -1) == expected + + scalar = arr[0] + assert isinstance(scalar, np.integer) + # The Python operator here is mainly interesting: + assert py_comp(scalar, -1) == expected + assert np_comp(scalar, -1) == expected + + +class TestAdd: + def test_reduce_alignment(self): + # gh-9876 + # make sure arrays with weird strides work with the optimizations in + # pairwise_sum_@TYPE@. On x86, the 'b' field will count as aligned at a + # 4 byte offset, even though its itemsize is 8. + a = np.zeros(2, dtype=[('a', np.int32), ('b', np.float64)]) + a['a'] = -1 + assert_equal(a['b'].sum(), 0) + + +class TestDivision: + def test_division_int(self): + # int division should follow Python + x = np.array([5, 10, 90, 100, -5, -10, -90, -100, -120]) + if 5 / 10 == 0.5: + assert_equal(x / 100, [0.05, 0.1, 0.9, 1, + -0.05, -0.1, -0.9, -1, -1.2]) + else: + assert_equal(x / 100, [0, 0, 0, 1, -1, -1, -1, -1, -2]) + assert_equal(x // 100, [0, 0, 0, 1, -1, -1, -1, -1, -2]) + assert_equal(x % 100, [5, 10, 90, 0, 95, 90, 10, 0, 80]) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + @pytest.mark.parametrize("dtype,ex_val", itertools.product( + sctypes['int'] + sctypes['uint'], ( + ( + # dividend + "np.array(range(fo.max-lsize, fo.max)).astype(dtype)," + # divisors + "np.arange(lsize).astype(dtype)," + # scalar divisors + "range(15)" + ), + ( + # dividend + "np.arange(fo.min, fo.min+lsize).astype(dtype)," + # divisors + "np.arange(lsize//-2, lsize//2).astype(dtype)," + # scalar divisors + "range(fo.min, fo.min + 15)" + ), ( + # dividend + "np.array(range(fo.max-lsize, fo.max)).astype(dtype)," + # divisors + "np.arange(lsize).astype(dtype)," + # scalar divisors + "[1,3,9,13,neg, fo.min+1, fo.min//2, fo.max//3, fo.max//4]" + ) + ) + )) + def test_division_int_boundary(self, dtype, ex_val): + fo = np.iinfo(dtype) + neg = -1 if fo.min < 0 else 1 + # Large enough to test SIMD loops and remainder elements + lsize = 512 + 7 + a, b, divisors = eval(ex_val) + a_lst, b_lst = a.tolist(), b.tolist() + + c_div = lambda n, d: ( + 0 if d == 0 else ( + fo.min if (n and n == fo.min and d == -1) else n // d + ) + ) + with np.errstate(divide='ignore'): + ac = a.copy() + ac //= b + div_ab = a // b + div_lst = [c_div(x, y) for x, y in zip(a_lst, b_lst)] + + msg = "Integer arrays floor division check (//)" + assert all(div_ab == div_lst), msg + msg_eq = "Integer arrays floor division check (//=)" + assert all(ac == div_lst), msg_eq + + for divisor in divisors: + ac = a.copy() + with np.errstate(divide='ignore', over='ignore'): + div_a = a // divisor + ac //= divisor + div_lst = [c_div(i, divisor) for i in a_lst] + + assert all(div_a == div_lst), msg + assert all(ac == div_lst), msg_eq + + with np.errstate(divide='raise', over='raise'): + if 0 in b: + # Verify overflow case + with pytest.raises(FloatingPointError, + match="divide by zero encountered in floor_divide"): + a // b + else: + a // b + if fo.min and fo.min in a: + with pytest.raises(FloatingPointError, + match='overflow encountered in floor_divide'): + a // -1 + elif fo.min: + a // -1 + with pytest.raises(FloatingPointError, + match="divide by zero encountered in floor_divide"): + a // 0 + with pytest.raises(FloatingPointError, + match="divide by zero encountered in floor_divide"): + ac = a.copy() + ac //= 0 + + np.array([], dtype=dtype) // 0 + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + @pytest.mark.parametrize("dtype,ex_val", itertools.product( + sctypes['int'] + sctypes['uint'], ( + "np.array([fo.max, 1, 2, 1, 1, 2, 3], dtype=dtype)", + "np.array([fo.min, 1, -2, 1, 1, 2, -3]).astype(dtype)", + "np.arange(fo.min, fo.min+(100*10), 10, dtype=dtype)", + "np.array(range(fo.max-(100*7), fo.max, 7)).astype(dtype)", + ) + )) + def test_division_int_reduce(self, dtype, ex_val): + fo = np.iinfo(dtype) + a = eval(ex_val) + lst = a.tolist() + c_div = lambda n, d: ( + 0 if d == 0 or (n and n == fo.min and d == -1) else n // d + ) + + with np.errstate(divide='ignore'): + div_a = np.floor_divide.reduce(a) + div_lst = reduce(c_div, lst) + msg = "Reduce floor integer division check" + assert div_a == div_lst, msg + + with np.errstate(divide='raise', over='raise'): + with pytest.raises(FloatingPointError, + match="divide by zero encountered in reduce"): + np.floor_divide.reduce(np.arange(-100, 100).astype(dtype)) + if fo.min: + with pytest.raises(FloatingPointError, + match='overflow encountered in reduce'): + np.floor_divide.reduce( + np.array([fo.min, 1, -1], dtype=dtype) + ) + + @pytest.mark.parametrize( + "dividend,divisor,quotient", + [(np.timedelta64(2, 'Y'), np.timedelta64(2, 'M'), 12), + (np.timedelta64(2, 'Y'), np.timedelta64(-2, 'M'), -12), + (np.timedelta64(-2, 'Y'), np.timedelta64(2, 'M'), -12), + (np.timedelta64(-2, 'Y'), np.timedelta64(-2, 'M'), 12), + (np.timedelta64(2, 'M'), np.timedelta64(-2, 'Y'), -1), + (np.timedelta64(2, 'Y'), np.timedelta64(0, 'M'), 0), + (np.timedelta64(2, 'Y'), 2, np.timedelta64(1, 'Y')), + (np.timedelta64(2, 'Y'), -2, np.timedelta64(-1, 'Y')), + (np.timedelta64(-2, 'Y'), 2, np.timedelta64(-1, 'Y')), + (np.timedelta64(-2, 'Y'), -2, np.timedelta64(1, 'Y')), + (np.timedelta64(-2, 'Y'), -2, np.timedelta64(1, 'Y')), + (np.timedelta64(-2, 'Y'), -3, np.timedelta64(0, 'Y')), + (np.timedelta64(-2, 'Y'), 0, np.timedelta64('Nat', 'Y')), + ]) + def test_division_int_timedelta(self, dividend, divisor, quotient): + # If either divisor is 0 or quotient is Nat, check for division by 0 + if divisor and (isinstance(quotient, int) or not np.isnat(quotient)): + msg = "Timedelta floor division check" + assert dividend // divisor == quotient, msg + + # Test for arrays as well + msg = "Timedelta arrays floor division check" + dividend_array = np.array([dividend] * 5) + quotient_array = np.array([quotient] * 5) + assert all(dividend_array // divisor == quotient_array), msg + else: + if IS_WASM: + pytest.skip("fp errors don't work in wasm") + with np.errstate(divide='raise', invalid='raise'): + with pytest.raises(FloatingPointError): + dividend // divisor + + def test_division_complex(self): + # check that implementation is correct + msg = "Complex division implementation check" + x = np.array([1. + 1. * 1j, 1. + .5 * 1j, 1. + 2. * 1j], dtype=np.complex128) + assert_almost_equal(x**2 / x, x, err_msg=msg) + # check overflow, underflow + msg = "Complex division overflow/underflow check" + x = np.array([1.e+110, 1.e-110], dtype=np.complex128) + y = x**2 / x + assert_almost_equal(y / x, [1, 1], err_msg=msg) + + def test_zero_division_complex(self): + with np.errstate(invalid="ignore", divide="ignore"): + x = np.array([0.0], dtype=np.complex128) + y = 1.0 / x + assert_(np.isinf(y)[0]) + y = complex(np.inf, np.nan) / x + assert_(np.isinf(y)[0]) + y = complex(np.nan, np.inf) / x + assert_(np.isinf(y)[0]) + y = complex(np.inf, np.inf) / x + assert_(np.isinf(y)[0]) + y = 0.0 / x + assert_(np.isnan(y)[0]) + + def test_floor_division_complex(self): + # check that floor division, divmod and remainder raises type errors + x = np.array([.9 + 1j, -.1 + 1j, .9 + .5 * 1j, .9 + 2. * 1j], dtype=np.complex128) + with pytest.raises(TypeError): + x // 7 + with pytest.raises(TypeError): + np.divmod(x, 7) + with pytest.raises(TypeError): + np.remainder(x, 7) + + def test_floor_division_signed_zero(self): + # Check that the sign bit is correctly set when dividing positive and + # negative zero by one. + x = np.zeros(10) + assert_equal(np.signbit(x // 1), 0) + assert_equal(np.signbit((-x) // 1), 1) + + @pytest.mark.skipif(hasattr(np.__config__, "blas_ssl2_info"), + reason="gh-22982") + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + @pytest.mark.parametrize('dtype', np.typecodes['Float']) + def test_floor_division_errors(self, dtype): + fnan = np.array(np.nan, dtype=dtype) + fone = np.array(1.0, dtype=dtype) + fzer = np.array(0.0, dtype=dtype) + finf = np.array(np.inf, dtype=dtype) + # divide by zero error check + with np.errstate(divide='raise', invalid='ignore'): + assert_raises(FloatingPointError, np.floor_divide, fone, fzer) + with np.errstate(divide='ignore', invalid='raise'): + np.floor_divide(fone, fzer) + + # The following already contain a NaN and should not warn + with np.errstate(all='raise'): + np.floor_divide(fnan, fone) + np.floor_divide(fone, fnan) + np.floor_divide(fnan, fzer) + np.floor_divide(fzer, fnan) + + @pytest.mark.parametrize('dtype', np.typecodes['Float']) + def test_floor_division_corner_cases(self, dtype): + # test corner cases like 1.0//0.0 for errors and return vals + x = np.zeros(10, dtype=dtype) + y = np.ones(10, dtype=dtype) + fnan = np.array(np.nan, dtype=dtype) + fone = np.array(1.0, dtype=dtype) + fzer = np.array(0.0, dtype=dtype) + finf = np.array(np.inf, dtype=dtype) + with warnings.catch_warnings(): + warnings.filterwarnings('ignore', "invalid value encountered in floor_divide", RuntimeWarning) + div = np.floor_divide(fnan, fone) + assert np.isnan(div), f"div: {div}" + div = np.floor_divide(fone, fnan) + assert np.isnan(div), f"div: {div}" + div = np.floor_divide(fnan, fzer) + assert np.isnan(div), f"div: {div}" + # verify 1.0//0.0 computations return inf + with np.errstate(divide='ignore'): + z = np.floor_divide(y, x) + assert_(np.isinf(z).all()) + +def floor_divide_and_remainder(x, y): + return (np.floor_divide(x, y), np.remainder(x, y)) + + +def _signs(dt): + if dt in np.typecodes['UnsignedInteger']: + return (+1,) + else: + return (+1, -1) + + +class TestRemainder: + + def test_remainder_basic(self): + dt = np.typecodes['AllInteger'] + np.typecodes['Float'] + for op in [floor_divide_and_remainder, np.divmod]: + for dt1, dt2 in itertools.product(dt, dt): + for sg1, sg2 in itertools.product(_signs(dt1), _signs(dt2)): + fmt = 'op: %s, dt1: %s, dt2: %s, sg1: %s, sg2: %s' + msg = fmt % (op.__name__, dt1, dt2, sg1, sg2) + a = np.array(sg1 * 71, dtype=dt1) + b = np.array(sg2 * 19, dtype=dt2) + div, rem = op(a, b) + assert_equal(div * b + rem, a, err_msg=msg) + if sg2 == -1: + assert_(b < rem <= 0, msg) + else: + assert_(b > rem >= 0, msg) + + def test_float_remainder_exact(self): + # test that float results are exact for small integers. This also + # holds for the same integers scaled by powers of two. + nlst = list(range(-127, 0)) + plst = list(range(1, 128)) + dividend = nlst + [0] + plst + divisor = nlst + plst + arg = list(itertools.product(dividend, divisor)) + tgt = [divmod(*t) for t in arg] + + a, b = np.array(arg, dtype=int).T + # convert exact integer results from Python to float so that + # signed zero can be used, it is checked. + tgtdiv, tgtrem = np.array(tgt, dtype=float).T + tgtdiv = np.where((tgtdiv == 0.0) & ((b < 0) ^ (a < 0)), -0.0, tgtdiv) + tgtrem = np.where((tgtrem == 0.0) & (b < 0), -0.0, tgtrem) + + for op in [floor_divide_and_remainder, np.divmod]: + for dt in np.typecodes['Float']: + msg = f'op: {op.__name__}, dtype: {dt}' + fa = a.astype(dt) + fb = b.astype(dt) + div, rem = op(fa, fb) + assert_equal(div, tgtdiv, err_msg=msg) + assert_equal(rem, tgtrem, err_msg=msg) + + def test_float_remainder_roundoff(self): + # gh-6127 + dt = np.typecodes['Float'] + for op in [floor_divide_and_remainder, np.divmod]: + for dt1, dt2 in itertools.product(dt, dt): + for sg1, sg2 in itertools.product((+1, -1), (+1, -1)): + fmt = 'op: %s, dt1: %s, dt2: %s, sg1: %s, sg2: %s' + msg = fmt % (op.__name__, dt1, dt2, sg1, sg2) + a = np.array(sg1 * 78 * 6e-8, dtype=dt1) + b = np.array(sg2 * 6e-8, dtype=dt2) + div, rem = op(a, b) + # Equal assertion should hold when fmod is used + assert_equal(div * b + rem, a, err_msg=msg) + if sg2 == -1: + assert_(b < rem <= 0, msg) + else: + assert_(b > rem >= 0, msg) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + @pytest.mark.xfail(sys.platform.startswith("darwin"), + reason="MacOS seems to not give the correct 'invalid' warning for " + "`fmod`. Hopefully, others always do.") + @pytest.mark.parametrize('dtype', np.typecodes['Float']) + def test_float_divmod_errors(self, dtype): + # Check valid errors raised for divmod and remainder + fzero = np.array(0.0, dtype=dtype) + fone = np.array(1.0, dtype=dtype) + finf = np.array(np.inf, dtype=dtype) + fnan = np.array(np.nan, dtype=dtype) + # since divmod is combination of both remainder and divide + # ops it will set both dividebyzero and invalid flags + with np.errstate(divide='raise', invalid='ignore'): + assert_raises(FloatingPointError, np.divmod, fone, fzero) + with np.errstate(divide='ignore', invalid='raise'): + assert_raises(FloatingPointError, np.divmod, fone, fzero) + with np.errstate(invalid='raise'): + assert_raises(FloatingPointError, np.divmod, fzero, fzero) + with np.errstate(invalid='raise'): + assert_raises(FloatingPointError, np.divmod, finf, finf) + with np.errstate(divide='ignore', invalid='raise'): + assert_raises(FloatingPointError, np.divmod, finf, fzero) + with np.errstate(divide='raise', invalid='ignore'): + # inf / 0 does not set any flags, only the modulo creates a NaN + np.divmod(finf, fzero) + + @pytest.mark.skipif(hasattr(np.__config__, "blas_ssl2_info"), + reason="gh-22982") + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + @pytest.mark.xfail(sys.platform.startswith("darwin"), + reason="MacOS seems to not give the correct 'invalid' warning for " + "`fmod`. Hopefully, others always do.") + @pytest.mark.parametrize('dtype', np.typecodes['Float']) + @pytest.mark.parametrize('fn', [np.fmod, np.remainder]) + def test_float_remainder_errors(self, dtype, fn): + fzero = np.array(0.0, dtype=dtype) + fone = np.array(1.0, dtype=dtype) + finf = np.array(np.inf, dtype=dtype) + fnan = np.array(np.nan, dtype=dtype) + + # The following already contain a NaN and should not warn. + with np.errstate(all='raise'): + with pytest.raises(FloatingPointError, + match="invalid value"): + fn(fone, fzero) + fn(fnan, fzero) + fn(fzero, fnan) + fn(fone, fnan) + fn(fnan, fone) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + def test_float_remainder_overflow(self): + a = np.finfo(np.float64).tiny + with np.errstate(over='ignore', invalid='ignore'): + div, mod = np.divmod(4, a) + np.isinf(div) + assert_(mod == 0) + with np.errstate(over='raise', invalid='ignore'): + assert_raises(FloatingPointError, np.divmod, 4, a) + with np.errstate(invalid='raise', over='ignore'): + assert_raises(FloatingPointError, np.divmod, 4, a) + + def test_float_divmod_corner_cases(self): + # check nan cases + for dt in np.typecodes['Float']: + fnan = np.array(np.nan, dtype=dt) + fone = np.array(1.0, dtype=dt) + fzer = np.array(0.0, dtype=dt) + finf = np.array(np.inf, dtype=dt) + with warnings.catch_warnings(): + warnings.filterwarnings('ignore', "invalid value encountered in divmod", RuntimeWarning) + warnings.filterwarnings('ignore', "divide by zero encountered in divmod", RuntimeWarning) + div, rem = np.divmod(fone, fzer) + assert np.isinf(div), f'dt: {dt}, div: {rem}' + assert np.isnan(rem), f'dt: {dt}, rem: {rem}' + div, rem = np.divmod(fzer, fzer) + assert np.isnan(rem), f'dt: {dt}, rem: {rem}' + assert_(np.isnan(div)), f'dt: {dt}, rem: {rem}' + div, rem = np.divmod(finf, finf) + assert np.isnan(div), f'dt: {dt}, rem: {rem}' + assert np.isnan(rem), f'dt: {dt}, rem: {rem}' + div, rem = np.divmod(finf, fzer) + assert np.isinf(div), f'dt: {dt}, rem: {rem}' + assert np.isnan(rem), f'dt: {dt}, rem: {rem}' + div, rem = np.divmod(fnan, fone) + assert np.isnan(rem), f"dt: {dt}, rem: {rem}" + assert np.isnan(div), f"dt: {dt}, rem: {rem}" + div, rem = np.divmod(fone, fnan) + assert np.isnan(rem), f"dt: {dt}, rem: {rem}" + assert np.isnan(div), f"dt: {dt}, rem: {rem}" + div, rem = np.divmod(fnan, fzer) + assert np.isnan(rem), f"dt: {dt}, rem: {rem}" + assert np.isnan(div), f"dt: {dt}, rem: {rem}" + + def test_float_remainder_corner_cases(self): + # Check remainder magnitude. + for dt in np.typecodes['Float']: + fone = np.array(1.0, dtype=dt) + fzer = np.array(0.0, dtype=dt) + fnan = np.array(np.nan, dtype=dt) + b = np.array(1.0, dtype=dt) + a = np.nextafter(np.array(0.0, dtype=dt), -b) + rem = np.remainder(a, b) + assert_(rem <= b, f'dt: {dt}') + rem = np.remainder(-a, -b) + assert_(rem >= -b, f'dt: {dt}') + + # Check nans, inf + with warnings.catch_warnings(): + warnings.filterwarnings('ignore', "invalid value encountered in remainder", RuntimeWarning) + warnings.filterwarnings('ignore', "invalid value encountered in fmod", RuntimeWarning) + for dt in np.typecodes['Float']: + fone = np.array(1.0, dtype=dt) + fzer = np.array(0.0, dtype=dt) + finf = np.array(np.inf, dtype=dt) + fnan = np.array(np.nan, dtype=dt) + rem = np.remainder(fone, fzer) + assert_(np.isnan(rem), f'dt: {dt}, rem: {rem}') + # MSVC 2008 returns NaN here, so disable the check. + #rem = np.remainder(fone, finf) + #assert_(rem == fone, 'dt: %s, rem: %s' % (dt, rem)) + rem = np.remainder(finf, fone) + fmod = np.fmod(finf, fone) + assert_(np.isnan(fmod), f'dt: {dt}, fmod: {fmod}') + assert_(np.isnan(rem), f'dt: {dt}, rem: {rem}') + rem = np.remainder(finf, finf) + fmod = np.fmod(finf, fone) + assert_(np.isnan(rem), f'dt: {dt}, rem: {rem}') + assert_(np.isnan(fmod), f'dt: {dt}, fmod: {fmod}') + rem = np.remainder(finf, fzer) + fmod = np.fmod(finf, fzer) + assert_(np.isnan(rem), f'dt: {dt}, rem: {rem}') + assert_(np.isnan(fmod), f'dt: {dt}, fmod: {fmod}') + rem = np.remainder(fone, fnan) + fmod = np.fmod(fone, fnan) + assert_(np.isnan(rem), f'dt: {dt}, rem: {rem}') + assert_(np.isnan(fmod), f'dt: {dt}, fmod: {fmod}') + rem = np.remainder(fnan, fzer) + fmod = np.fmod(fnan, fzer) + assert_(np.isnan(rem), f'dt: {dt}, rem: {rem}') + assert_(np.isnan(fmod), f'dt: {dt}, fmod: {rem}') + rem = np.remainder(fnan, fone) + fmod = np.fmod(fnan, fone) + assert_(np.isnan(rem), f'dt: {dt}, rem: {rem}') + assert_(np.isnan(fmod), f'dt: {dt}, fmod: {rem}') + + +class TestDivisionIntegerOverflowsAndDivideByZero: + result_type = namedtuple('result_type', + ['nocast', 'casted']) + helper_lambdas = { + 'zero': lambda dtype: 0, + 'min': lambda dtype: np.iinfo(dtype).min, + 'neg_min': lambda dtype: -np.iinfo(dtype).min, + 'min-zero': lambda dtype: (np.iinfo(dtype).min, 0), + 'neg_min-zero': lambda dtype: (-np.iinfo(dtype).min, 0), + } + overflow_results = { + np.remainder: result_type( + helper_lambdas['zero'], helper_lambdas['zero']), + np.fmod: result_type( + helper_lambdas['zero'], helper_lambdas['zero']), + operator.mod: result_type( + helper_lambdas['zero'], helper_lambdas['zero']), + operator.floordiv: result_type( + helper_lambdas['min'], helper_lambdas['neg_min']), + np.floor_divide: result_type( + helper_lambdas['min'], helper_lambdas['neg_min']), + np.divmod: result_type( + helper_lambdas['min-zero'], helper_lambdas['neg_min-zero']) + } + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + @pytest.mark.parametrize("dtype", np.typecodes["Integer"]) + def test_signed_division_overflow(self, dtype): + to_check = interesting_binop_operands(np.iinfo(dtype).min, -1, dtype) + for op1, op2, extractor, operand_identifier in to_check: + with pytest.warns(RuntimeWarning, match="overflow encountered"): + res = op1 // op2 + + assert res.dtype == op1.dtype + assert extractor(res) == np.iinfo(op1.dtype).min + + # Remainder is well defined though, and does not warn: + res = op1 % op2 + assert res.dtype == op1.dtype + assert extractor(res) == 0 + # Check fmod as well: + res = np.fmod(op1, op2) + assert extractor(res) == 0 + + # Divmod warns for the division part: + with pytest.warns(RuntimeWarning, match="overflow encountered"): + res1, res2 = np.divmod(op1, op2) + + assert res1.dtype == res2.dtype == op1.dtype + assert extractor(res1) == np.iinfo(op1.dtype).min + assert extractor(res2) == 0 + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + @pytest.mark.parametrize("dtype", np.typecodes["AllInteger"]) + def test_divide_by_zero(self, dtype): + # Note that the return value cannot be well defined here, but NumPy + # currently uses 0 consistently. This could be changed. + to_check = interesting_binop_operands(1, 0, dtype) + for op1, op2, extractor, operand_identifier in to_check: + with pytest.warns(RuntimeWarning, match="divide by zero"): + res = op1 // op2 + + assert res.dtype == op1.dtype + assert extractor(res) == 0 + + with pytest.warns(RuntimeWarning, match="divide by zero"): + res1, res2 = np.divmod(op1, op2) + + assert res1.dtype == res2.dtype == op1.dtype + assert extractor(res1) == 0 + assert extractor(res2) == 0 + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + @pytest.mark.parametrize("dividend_dtype", sctypes['int']) + @pytest.mark.parametrize("divisor_dtype", sctypes['int']) + @pytest.mark.parametrize("operation", + [np.remainder, np.fmod, np.divmod, np.floor_divide, + operator.mod, operator.floordiv]) + @np.errstate(divide='warn', over='warn') + def test_overflows(self, dividend_dtype, divisor_dtype, operation): + # SIMD tries to perform the operation on as many elements as possible + # that is a multiple of the register's size. We resort to the + # default implementation for the leftover elements. + # We try to cover all paths here. + arrays = [np.array([np.iinfo(dividend_dtype).min] * i, + dtype=dividend_dtype) for i in range(1, 129)] + divisor = np.array([-1], dtype=divisor_dtype) + # If dividend is a larger type than the divisor (`else` case), + # then, result will be a larger type than dividend and will not + # result in an overflow for `divmod` and `floor_divide`. + if np.dtype(dividend_dtype).itemsize >= np.dtype( + divisor_dtype).itemsize and operation in ( + np.divmod, np.floor_divide, operator.floordiv): + with pytest.warns( + RuntimeWarning, + match="overflow encountered in"): + result = operation( + dividend_dtype(np.iinfo(dividend_dtype).min), + divisor_dtype(-1) + ) + assert result == self.overflow_results[operation].nocast( + dividend_dtype) + + # Arrays + for a in arrays: + # In case of divmod, we need to flatten the result + # column first as we get a column vector of quotient and + # remainder and a normal flatten of the expected result. + with pytest.warns( + RuntimeWarning, + match="overflow encountered in"): + result = np.array(operation(a, divisor)).flatten('f') + expected_array = np.array( + [self.overflow_results[operation].nocast( + dividend_dtype)] * len(a)).flatten() + assert_array_equal(result, expected_array) + else: + # Scalars + result = operation( + dividend_dtype(np.iinfo(dividend_dtype).min), + divisor_dtype(-1) + ) + assert result == self.overflow_results[operation].casted( + dividend_dtype) + + # Arrays + for a in arrays: + # See above comment on flatten + result = np.array(operation(a, divisor)).flatten('f') + expected_array = np.array( + [self.overflow_results[operation].casted( + dividend_dtype)] * len(a)).flatten() + assert_array_equal(result, expected_array) + + +class TestCbrt: + def test_cbrt_scalar(self): + assert_almost_equal((np.cbrt(np.float32(-2.5)**3)), -2.5) + + def test_cbrt(self): + x = np.array([1., 2., -3., np.inf, -np.inf]) + assert_almost_equal(np.cbrt(x**3), x) + + assert_(np.isnan(np.cbrt(np.nan))) + assert_equal(np.cbrt(np.inf), np.inf) + assert_equal(np.cbrt(-np.inf), -np.inf) + + +class TestPower: + def test_power_float(self): + x = np.array([1., 2., 3.]) + assert_equal(x**0, [1., 1., 1.]) + assert_equal(x**1, x) + assert_equal(x**2, [1., 4., 9.]) + y = x.copy() + y **= 2 + assert_equal(y, [1., 4., 9.]) + assert_almost_equal(x**(-1), [1., 0.5, 1. / 3]) + assert_almost_equal(x**(0.5), [1., ncu.sqrt(2), ncu.sqrt(3)]) + + for out, inp, msg in _gen_alignment_data(dtype=np.float32, + type='unary', + max_size=11): + exp = [ncu.sqrt(i) for i in inp] + assert_almost_equal(inp**(0.5), exp, err_msg=msg) + np.sqrt(inp, out=out) + assert_equal(out, exp, err_msg=msg) + + for out, inp, msg in _gen_alignment_data(dtype=np.float64, + type='unary', + max_size=7): + exp = [ncu.sqrt(i) for i in inp] + assert_almost_equal(inp**(0.5), exp, err_msg=msg) + np.sqrt(inp, out=out) + assert_equal(out, exp, err_msg=msg) + + def test_power_complex(self): + x = np.array([1 + 2j, 2 + 3j, 3 + 4j]) + assert_equal(x**0, [1., 1., 1.]) + assert_equal(x**1, x) + assert_almost_equal(x**2, [-3 + 4j, -5 + 12j, -7 + 24j]) + assert_almost_equal(x**3, [(1 + 2j)**3, (2 + 3j)**3, (3 + 4j)**3]) + assert_almost_equal(x**4, [(1 + 2j)**4, (2 + 3j)**4, (3 + 4j)**4]) + assert_almost_equal(x**(-1), [1 / (1 + 2j), 1 / (2 + 3j), 1 / (3 + 4j)]) + assert_almost_equal(x**(-2), [1 / (1 + 2j)**2, 1 / (2 + 3j)**2, 1 / (3 + 4j)**2]) + assert_almost_equal(x**(-3), [(-11 + 2j) / 125, (-46 - 9j) / 2197, + (-117 - 44j) / 15625]) + assert_almost_equal(x**(0.5), [ncu.sqrt(1 + 2j), ncu.sqrt(2 + 3j), + ncu.sqrt(3 + 4j)]) + norm = 1. / ((x**14)[0]) + assert_almost_equal(x**14 * norm, + [i * norm for i in [-76443 + 16124j, 23161315 + 58317492j, + 5583548873 + 2465133864j]]) + + # Ticket #836 + def assert_complex_equal(x, y): + assert_array_equal(x.real, y.real) + assert_array_equal(x.imag, y.imag) + + for z in [complex(0, np.inf), complex(1, np.inf)]: + z = np.array([z], dtype=np.complex128) + with np.errstate(invalid="ignore"): + assert_complex_equal(z**1, z) + assert_complex_equal(z**2, z * z) + assert_complex_equal(z**3, z * z * z) + + def test_power_zero(self): + # ticket #1271 + zero = np.array([0j]) + one = np.array([1 + 0j]) + cnan = np.array([complex(np.nan, np.nan)]) + # FIXME cinf not tested. + #cinf = np.array([complex(np.inf, 0)]) + + def assert_complex_equal(x, y): + x, y = np.asarray(x), np.asarray(y) + assert_array_equal(x.real, y.real) + assert_array_equal(x.imag, y.imag) + + # positive powers + for p in [0.33, 0.5, 1, 1.5, 2, 3, 4, 5, 6.6]: + assert_complex_equal(np.power(zero, p), zero) + + # zero power + assert_complex_equal(np.power(zero, 0), one) + with np.errstate(invalid="ignore"): + assert_complex_equal(np.power(zero, 0 + 1j), cnan) + + # negative power + for p in [0.33, 0.5, 1, 1.5, 2, 3, 4, 5, 6.6]: + assert_complex_equal(np.power(zero, -p), cnan) + assert_complex_equal(np.power(zero, -1 + 0.2j), cnan) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + def test_zero_power_nonzero(self): + # Testing 0^{Non-zero} issue 18378 + zero = np.array([0.0 + 0.0j]) + cnan = np.array([complex(np.nan, np.nan)]) + + def assert_complex_equal(x, y): + assert_array_equal(x.real, y.real) + assert_array_equal(x.imag, y.imag) + + # Complex powers with positive real part will not generate a warning + assert_complex_equal(np.power(zero, 1 + 4j), zero) + assert_complex_equal(np.power(zero, 2 - 3j), zero) + # Testing zero values when real part is greater than zero + assert_complex_equal(np.power(zero, 1 + 1j), zero) + assert_complex_equal(np.power(zero, 1 + 0j), zero) + assert_complex_equal(np.power(zero, 1 - 1j), zero) + # Complex powers will negative real part or 0 (provided imaginary + # part is not zero) will generate a NAN and hence a RUNTIME warning + with pytest.warns(expected_warning=RuntimeWarning) as r: + assert_complex_equal(np.power(zero, -1 + 1j), cnan) + assert_complex_equal(np.power(zero, -2 - 3j), cnan) + assert_complex_equal(np.power(zero, -7 + 0j), cnan) + assert_complex_equal(np.power(zero, 0 + 1j), cnan) + assert_complex_equal(np.power(zero, 0 - 1j), cnan) + assert len(r) == 5 + + def test_fast_power(self): + x = np.array([1, 2, 3], np.int16) + res = x**2.0 + assert_((x**2.00001).dtype is res.dtype) + assert_array_equal(res, [1, 4, 9]) + # check the inplace operation on the casted copy doesn't mess with x + assert_(not np.may_share_memory(res, x)) + assert_array_equal(x, [1, 2, 3]) + + # Check that the fast path ignores 1-element not 0-d arrays + res = x ** np.array([[[2]]]) + assert_equal(res.shape, (1, 1, 3)) + + def test_integer_power(self): + a = np.array([15, 15], 'i8') + b = np.power(a, a) + assert_equal(b, [437893890380859375, 437893890380859375]) + + def test_integer_power_with_integer_zero_exponent(self): + dtypes = np.typecodes['Integer'] + for dt in dtypes: + arr = np.arange(-10, 10, dtype=dt) + assert_equal(np.power(arr, 0), np.ones_like(arr)) + + dtypes = np.typecodes['UnsignedInteger'] + for dt in dtypes: + arr = np.arange(10, dtype=dt) + assert_equal(np.power(arr, 0), np.ones_like(arr)) + + def test_integer_power_of_1(self): + dtypes = np.typecodes['AllInteger'] + for dt in dtypes: + arr = np.arange(10, dtype=dt) + assert_equal(np.power(1, arr), np.ones_like(arr)) + + def test_integer_power_of_zero(self): + dtypes = np.typecodes['AllInteger'] + for dt in dtypes: + arr = np.arange(1, 10, dtype=dt) + assert_equal(np.power(0, arr), np.zeros_like(arr)) + + def test_integer_to_negative_power(self): + dtypes = np.typecodes['Integer'] + for dt in dtypes: + a = np.array([0, 1, 2, 3], dtype=dt) + b = np.array([0, 1, 2, -3], dtype=dt) + one = np.array(1, dtype=dt) + minusone = np.array(-1, dtype=dt) + assert_raises(ValueError, np.power, a, b) + assert_raises(ValueError, np.power, a, minusone) + assert_raises(ValueError, np.power, one, b) + assert_raises(ValueError, np.power, one, minusone) + + def test_float_to_inf_power(self): + for dt in [np.float32, np.float64]: + a = np.array([1, 1, 2, 2, -2, -2, np.inf, -np.inf], dt) + b = np.array([np.inf, -np.inf, np.inf, -np.inf, + np.inf, -np.inf, np.inf, -np.inf], dt) + r = np.array([1, 1, np.inf, 0, np.inf, 0, np.inf, 0], dt) + assert_equal(np.power(a, b), r) + + def test_power_fast_paths(self): + # gh-26055 + for dt in [np.float32, np.float64]: + a = np.array([0, 1.1, 2, 12e12, -10., np.inf, -np.inf], dt) + expected = np.array([0.0, 1.21, 4., 1.44e+26, 100, np.inf, np.inf]) + result = np.power(a, 2.) + assert_array_max_ulp(result, expected.astype(dt), maxulp=1) + + a = np.array([0, 1.1, 2, 12e12], dt) + expected = np.sqrt(a).astype(dt) + result = np.power(a, 0.5) + assert_array_max_ulp(result, expected, maxulp=1) + + +class TestFloat_power: + def test_type_conversion(self): + arg_type = '?bhilBHILefdgFDG' + res_type = 'ddddddddddddgDDG' + for dtin, dtout in zip(arg_type, res_type): + msg = f"dtin: {dtin}, dtout: {dtout}" + arg = np.ones(1, dtype=dtin) + res = np.float_power(arg, arg) + assert_(res.dtype.name == np.dtype(dtout).name, msg) + + +class TestLog2: + @pytest.mark.parametrize('dt', ['f', 'd', 'g']) + def test_log2_values(self, dt): + x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024] + y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + xf = np.array(x, dtype=dt) + yf = np.array(y, dtype=dt) + assert_almost_equal(np.log2(xf), yf) + + @pytest.mark.parametrize("i", range(1, 65)) + def test_log2_ints(self, i): + # a good log2 implementation should provide this, + # might fail on OS with bad libm + v = np.log2(2.**i) + assert_equal(v, float(i), err_msg='at exponent %d' % i) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + def test_log2_special(self): + assert_equal(np.log2(1.), 0.) + assert_equal(np.log2(np.inf), np.inf) + assert_(np.isnan(np.log2(np.nan))) + + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', RuntimeWarning) + assert_(np.isnan(np.log2(-1.))) + assert_(np.isnan(np.log2(-np.inf))) + assert_equal(np.log2(0.), -np.inf) + assert_(w[0].category is RuntimeWarning) + assert_(w[1].category is RuntimeWarning) + assert_(w[2].category is RuntimeWarning) + + +class TestExp2: + def test_exp2_values(self): + x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024] + y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + for dt in ['f', 'd', 'g']: + xf = np.array(x, dtype=dt) + yf = np.array(y, dtype=dt) + assert_almost_equal(np.exp2(yf), xf) + + +class TestLogAddExp2(_FilterInvalids): + # Need test for intermediate precisions + def test_logaddexp2_values(self): + x = [1, 2, 3, 4, 5] + y = [5, 4, 3, 2, 1] + z = [6, 6, 6, 6, 6] + for dt, dec_ in zip(['f', 'd', 'g'], [6, 15, 15]): + xf = np.log2(np.array(x, dtype=dt)) + yf = np.log2(np.array(y, dtype=dt)) + zf = np.log2(np.array(z, dtype=dt)) + assert_almost_equal(np.logaddexp2(xf, yf), zf, decimal=dec_) + + def test_logaddexp2_range(self): + x = [1000000, -1000000, 1000200, -1000200] + y = [1000200, -1000200, 1000000, -1000000] + z = [1000200, -1000000, 1000200, -1000000] + for dt in ['f', 'd', 'g']: + logxf = np.array(x, dtype=dt) + logyf = np.array(y, dtype=dt) + logzf = np.array(z, dtype=dt) + assert_almost_equal(np.logaddexp2(logxf, logyf), logzf) + + def test_inf(self): + inf = np.inf + x = [inf, -inf, inf, -inf, inf, 1, -inf, 1] # noqa: E221 + y = [inf, inf, -inf, -inf, 1, inf, 1, -inf] # noqa: E221 + z = [inf, inf, inf, -inf, inf, inf, 1, 1] + with np.errstate(invalid='raise'): + for dt in ['f', 'd', 'g']: + logxf = np.array(x, dtype=dt) + logyf = np.array(y, dtype=dt) + logzf = np.array(z, dtype=dt) + assert_equal(np.logaddexp2(logxf, logyf), logzf) + + def test_nan(self): + assert_(np.isnan(np.logaddexp2(np.nan, np.inf))) + assert_(np.isnan(np.logaddexp2(np.inf, np.nan))) + assert_(np.isnan(np.logaddexp2(np.nan, 0))) + assert_(np.isnan(np.logaddexp2(0, np.nan))) + assert_(np.isnan(np.logaddexp2(np.nan, np.nan))) + + def test_reduce(self): + assert_equal(np.logaddexp2.identity, -np.inf) + assert_equal(np.logaddexp2.reduce([]), -np.inf) + assert_equal(np.logaddexp2.reduce([-np.inf]), -np.inf) + assert_equal(np.logaddexp2.reduce([-np.inf, 0]), 0) + + +class TestLog: + def test_log_values(self): + x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024] + y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + for dt in ['f', 'd', 'g']: + log2_ = 0.69314718055994530943 + xf = np.array(x, dtype=dt) + yf = np.array(y, dtype=dt) * log2_ + assert_almost_equal(np.log(xf), yf) + + # test aliasing(issue #17761) + x = np.array([2, 0.937500, 3, 0.947500, 1.054697]) + xf = np.log(x) + assert_almost_equal(np.log(x, out=x), xf) + + def test_log_values_maxofdtype(self): + # test log() of max for dtype does not raise + dtypes = [np.float32, np.float64] + # This is failing at least on linux aarch64 (see gh-25460), and on most + # other non x86-64 platforms checking `longdouble` isn't too useful as + # it's an alias for float64. + if platform.machine() == 'x86_64': + dtypes += [np.longdouble] + + for dt in dtypes: + with np.errstate(all='raise'): + x = np.finfo(dt).max + np.log(x) + + def test_log_strides(self): + np.random.seed(42) + strides = np.array([-4, -3, -2, -1, 1, 2, 3, 4]) + sizes = np.arange(2, 100) + for ii in sizes: + x_f64 = np.float64(np.random.uniform(low=0.01, high=100.0, size=ii)) + x_special = x_f64.copy() + x_special[3:-1:4] = 1.0 + y_true = np.log(x_f64) + y_special = np.log(x_special) + for jj in strides: + assert_array_almost_equal_nulp(np.log(x_f64[::jj]), y_true[::jj], nulp=2) + assert_array_almost_equal_nulp(np.log(x_special[::jj]), y_special[::jj], nulp=2) + + # Reference values were computed with mpmath, with mp.dps = 200. + @pytest.mark.parametrize( + 'z, wref', + [(1 + 1e-12j, 5e-25 + 1e-12j), + (1.000000000000001 + 3e-08j, + 1.5602230246251546e-15 + 2.999999999999996e-08j), + (0.9999995000000417 + 0.0009999998333333417j, + 7.831475869017683e-18 + 0.001j), + (0.9999999999999996 + 2.999999999999999e-08j, + 5.9107901499372034e-18 + 3e-08j), + (0.99995000042 - 0.009999833j, + -7.015159763822903e-15 - 0.009999999665816696j)], + ) + def test_log_precision_float64(self, z, wref): + w = np.log(z) + assert_allclose(w, wref, rtol=1e-15) + + # Reference values were computed with mpmath, with mp.dps = 200. + @pytest.mark.parametrize( + 'z, wref', + [(np.complex64(1.0 + 3e-6j), np.complex64(4.5e-12 + 3e-06j)), + (np.complex64(1.0 - 2e-5j), np.complex64(1.9999999e-10 - 2e-5j)), + (np.complex64(0.9999999 + 1e-06j), + np.complex64(-1.192088e-07 + 1.0000001e-06j))], + ) + def test_log_precision_float32(self, z, wref): + w = np.log(z) + assert_allclose(w, wref, rtol=1e-6) + + +class TestExp: + def test_exp_values(self): + x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024] + y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + for dt in ['f', 'd', 'g']: + log2_ = 0.69314718055994530943 + xf = np.array(x, dtype=dt) + yf = np.array(y, dtype=dt) * log2_ + assert_almost_equal(np.exp(yf), xf) + + def test_exp_strides(self): + np.random.seed(42) + strides = np.array([-4, -3, -2, -1, 1, 2, 3, 4]) + sizes = np.arange(2, 100) + for ii in sizes: + x_f64 = np.float64(np.random.uniform(low=0.01, high=709.1, size=ii)) + y_true = np.exp(x_f64) + for jj in strides: + assert_array_almost_equal_nulp(np.exp(x_f64[::jj]), y_true[::jj], nulp=2) + +class TestSpecialFloats: + def test_exp_values(self): + with np.errstate(under='raise', over='raise'): + x = [np.nan, np.nan, np.inf, 0.] + y = [np.nan, -np.nan, np.inf, -np.inf] + for dt in ['e', 'f', 'd', 'g']: + xf = np.array(x, dtype=dt) + yf = np.array(y, dtype=dt) + assert_equal(np.exp(yf), xf) + + # See: https://github.com/numpy/numpy/issues/19192 + @pytest.mark.xfail( + _glibc_older_than("2.17"), + reason="Older glibc versions may not raise appropriate FP exceptions" + ) + def test_exp_exceptions(self): + with np.errstate(over='raise'): + assert_raises(FloatingPointError, np.exp, np.float16(11.0899)) + assert_raises(FloatingPointError, np.exp, np.float32(100.)) + assert_raises(FloatingPointError, np.exp, np.float32(1E19)) + assert_raises(FloatingPointError, np.exp, np.float64(800.)) + assert_raises(FloatingPointError, np.exp, np.float64(1E19)) + + with np.errstate(under='raise'): + assert_raises(FloatingPointError, np.exp, np.float16(-17.5)) + assert_raises(FloatingPointError, np.exp, np.float32(-1000.)) + assert_raises(FloatingPointError, np.exp, np.float32(-1E19)) + assert_raises(FloatingPointError, np.exp, np.float64(-1000.)) + assert_raises(FloatingPointError, np.exp, np.float64(-1E19)) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + def test_log_values(self): + with np.errstate(all='ignore'): + x = [np.nan, np.nan, np.inf, np.nan, -np.inf, np.nan] + y = [np.nan, -np.nan, np.inf, -np.inf, 0.0, -1.0] + y1p = [np.nan, -np.nan, np.inf, -np.inf, -1.0, -2.0] + for dt in ['e', 'f', 'd', 'g']: + xf = np.array(x, dtype=dt) + yf = np.array(y, dtype=dt) + yf1p = np.array(y1p, dtype=dt) + assert_equal(np.log(yf), xf) + assert_equal(np.log2(yf), xf) + assert_equal(np.log10(yf), xf) + assert_equal(np.log1p(yf1p), xf) + + with np.errstate(divide='raise'): + for dt in ['e', 'f', 'd']: + assert_raises(FloatingPointError, np.log, + np.array(0.0, dtype=dt)) + assert_raises(FloatingPointError, np.log2, + np.array(0.0, dtype=dt)) + assert_raises(FloatingPointError, np.log10, + np.array(0.0, dtype=dt)) + assert_raises(FloatingPointError, np.log1p, + np.array(-1.0, dtype=dt)) + + with np.errstate(invalid='raise'): + for dt in ['e', 'f', 'd']: + assert_raises(FloatingPointError, np.log, + np.array(-np.inf, dtype=dt)) + assert_raises(FloatingPointError, np.log, + np.array(-1.0, dtype=dt)) + assert_raises(FloatingPointError, np.log2, + np.array(-np.inf, dtype=dt)) + assert_raises(FloatingPointError, np.log2, + np.array(-1.0, dtype=dt)) + assert_raises(FloatingPointError, np.log10, + np.array(-np.inf, dtype=dt)) + assert_raises(FloatingPointError, np.log10, + np.array(-1.0, dtype=dt)) + assert_raises(FloatingPointError, np.log1p, + np.array(-np.inf, dtype=dt)) + assert_raises(FloatingPointError, np.log1p, + np.array(-2.0, dtype=dt)) + + # See https://github.com/numpy/numpy/issues/18005 + with assert_no_warnings(): + a = np.array(1e9, dtype='float32') + np.log(a) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + @pytest.mark.parametrize('dtype', ['e', 'f', 'd', 'g']) + def test_sincos_values(self, dtype): + with np.errstate(all='ignore'): + x = [np.nan, np.nan, np.nan, np.nan] + y = [np.nan, -np.nan, np.inf, -np.inf] + xf = np.array(x, dtype=dtype) + yf = np.array(y, dtype=dtype) + assert_equal(np.sin(yf), xf) + assert_equal(np.cos(yf), xf) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + @pytest.mark.xfail( + sys.platform.startswith("darwin"), + reason="underflow is triggered for scalar 'sin'" + ) + def test_sincos_underflow(self): + with np.errstate(under='raise'): + underflow_trigger = np.array( + float.fromhex("0x1.f37f47a03f82ap-511"), + dtype=np.float64 + ) + np.sin(underflow_trigger) + np.cos(underflow_trigger) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + @pytest.mark.parametrize('callable', [np.sin, np.cos]) + @pytest.mark.parametrize('dtype', ['e', 'f', 'd']) + @pytest.mark.parametrize('value', [np.inf, -np.inf]) + def test_sincos_errors(self, callable, dtype, value): + with np.errstate(invalid='raise'): + assert_raises(FloatingPointError, callable, + np.array([value], dtype=dtype)) + + @pytest.mark.parametrize('callable', [np.sin, np.cos]) + @pytest.mark.parametrize('dtype', ['f', 'd']) + @pytest.mark.parametrize('stride', [-1, 1, 2, 4, 5]) + def test_sincos_overlaps(self, callable, dtype, stride): + N = 100 + M = N // abs(stride) + rng = np.random.default_rng(42) + x = rng.standard_normal(N, dtype) + y = callable(x[::stride]) + callable(x[::stride], out=x[:M]) + assert_equal(x[:M], y) + + @pytest.mark.parametrize('dt', ['e', 'f', 'd', 'g']) + def test_sqrt_values(self, dt): + with np.errstate(all='ignore'): + x = [np.nan, np.nan, np.inf, np.nan, 0.] + y = [np.nan, -np.nan, np.inf, -np.inf, 0.] + xf = np.array(x, dtype=dt) + yf = np.array(y, dtype=dt) + assert_equal(np.sqrt(yf), xf) + + # with np.errstate(invalid='raise'): + # assert_raises( + # FloatingPointError, np.sqrt, np.array(-100., dtype=dt) + # ) + + def test_abs_values(self): + x = [np.nan, np.nan, np.inf, np.inf, 0., 0., 1.0, 1.0] + y = [np.nan, -np.nan, np.inf, -np.inf, 0., -0., -1.0, 1.0] + for dt in ['e', 'f', 'd', 'g']: + xf = np.array(x, dtype=dt) + yf = np.array(y, dtype=dt) + assert_equal(np.abs(yf), xf) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + def test_square_values(self): + x = [np.nan, np.nan, np.inf, np.inf] + y = [np.nan, -np.nan, np.inf, -np.inf] + with np.errstate(all='ignore'): + for dt in ['e', 'f', 'd', 'g']: + xf = np.array(x, dtype=dt) + yf = np.array(y, dtype=dt) + assert_equal(np.square(yf), xf) + + with np.errstate(over='raise'): + assert_raises(FloatingPointError, np.square, + np.array(1E3, dtype='e')) + assert_raises(FloatingPointError, np.square, + np.array(1E32, dtype='f')) + assert_raises(FloatingPointError, np.square, + np.array(1E200, dtype='d')) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + def test_reciprocal_values(self): + with np.errstate(all='ignore'): + x = [np.nan, np.nan, 0.0, -0.0, np.inf, -np.inf] + y = [np.nan, -np.nan, np.inf, -np.inf, 0., -0.] + for dt in ['e', 'f', 'd', 'g']: + xf = np.array(x, dtype=dt) + yf = np.array(y, dtype=dt) + assert_equal(np.reciprocal(yf), xf) + + with np.errstate(divide='raise'): + for dt in ['e', 'f', 'd', 'g']: + assert_raises(FloatingPointError, np.reciprocal, + np.array(-0.0, dtype=dt)) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + def test_tan(self): + with np.errstate(all='ignore'): + in_ = [np.nan, -np.nan, 0.0, -0.0, np.inf, -np.inf] + out = [np.nan, np.nan, 0.0, -0.0, np.nan, np.nan] + for dt in ['e', 'f', 'd']: + in_arr = np.array(in_, dtype=dt) + out_arr = np.array(out, dtype=dt) + assert_equal(np.tan(in_arr), out_arr) + + with np.errstate(invalid='raise'): + for dt in ['e', 'f', 'd']: + assert_raises(FloatingPointError, np.tan, + np.array(np.inf, dtype=dt)) + assert_raises(FloatingPointError, np.tan, + np.array(-np.inf, dtype=dt)) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + def test_arcsincos(self): + with np.errstate(all='ignore'): + in_ = [np.nan, -np.nan, np.inf, -np.inf] + out = [np.nan, np.nan, np.nan, np.nan] + for dt in ['e', 'f', 'd']: + in_arr = np.array(in_, dtype=dt) + out_arr = np.array(out, dtype=dt) + assert_equal(np.arcsin(in_arr), out_arr) + assert_equal(np.arccos(in_arr), out_arr) + + for callable in [np.arcsin, np.arccos]: + for value in [np.inf, -np.inf, 2.0, -2.0]: + for dt in ['e', 'f', 'd']: + with np.errstate(invalid='raise'): + assert_raises(FloatingPointError, callable, + np.array(value, dtype=dt)) + + def test_arctan(self): + with np.errstate(all='ignore'): + in_ = [np.nan, -np.nan] + out = [np.nan, np.nan] + for dt in ['e', 'f', 'd']: + in_arr = np.array(in_, dtype=dt) + out_arr = np.array(out, dtype=dt) + assert_equal(np.arctan(in_arr), out_arr) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + def test_sinh(self): + in_ = [np.nan, -np.nan, np.inf, -np.inf] + out = [np.nan, np.nan, np.inf, -np.inf] + for dt in ['e', 'f', 'd']: + in_arr = np.array(in_, dtype=dt) + out_arr = np.array(out, dtype=dt) + assert_equal(np.sinh(in_arr), out_arr) + + with np.errstate(over='raise'): + assert_raises(FloatingPointError, np.sinh, + np.array(12.0, dtype='e')) + assert_raises(FloatingPointError, np.sinh, + np.array(120.0, dtype='f')) + assert_raises(FloatingPointError, np.sinh, + np.array(1200.0, dtype='d')) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + @pytest.mark.skipif('bsd' in sys.platform, + reason="fallback implementation may not raise, see gh-2487") + def test_cosh(self): + in_ = [np.nan, -np.nan, np.inf, -np.inf] + out = [np.nan, np.nan, np.inf, np.inf] + for dt in ['e', 'f', 'd']: + in_arr = np.array(in_, dtype=dt) + out_arr = np.array(out, dtype=dt) + assert_equal(np.cosh(in_arr), out_arr) + + with np.errstate(over='raise'): + assert_raises(FloatingPointError, np.cosh, + np.array(12.0, dtype='e')) + assert_raises(FloatingPointError, np.cosh, + np.array(120.0, dtype='f')) + assert_raises(FloatingPointError, np.cosh, + np.array(1200.0, dtype='d')) + + def test_tanh(self): + in_ = [np.nan, -np.nan, np.inf, -np.inf] + out = [np.nan, np.nan, 1.0, -1.0] + for dt in ['e', 'f', 'd']: + in_arr = np.array(in_, dtype=dt) + out_arr = np.array(out, dtype=dt) + assert_array_max_ulp(np.tanh(in_arr), out_arr, 3) + + def test_arcsinh(self): + in_ = [np.nan, -np.nan, np.inf, -np.inf] + out = [np.nan, np.nan, np.inf, -np.inf] + for dt in ['e', 'f', 'd']: + in_arr = np.array(in_, dtype=dt) + out_arr = np.array(out, dtype=dt) + assert_equal(np.arcsinh(in_arr), out_arr) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + def test_arccosh(self): + with np.errstate(all='ignore'): + in_ = [np.nan, -np.nan, np.inf, -np.inf, 1.0, 0.0] + out = [np.nan, np.nan, np.inf, np.nan, 0.0, np.nan] + for dt in ['e', 'f', 'd']: + in_arr = np.array(in_, dtype=dt) + out_arr = np.array(out, dtype=dt) + assert_equal(np.arccosh(in_arr), out_arr) + + for value in [0.0, -np.inf]: + with np.errstate(invalid='raise'): + for dt in ['e', 'f', 'd']: + assert_raises(FloatingPointError, np.arccosh, + np.array(value, dtype=dt)) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + def test_arctanh(self): + with np.errstate(all='ignore'): + in_ = [np.nan, -np.nan, np.inf, -np.inf, 1.0, -1.0, 2.0] + out = [np.nan, np.nan, np.nan, np.nan, np.inf, -np.inf, np.nan] + for dt in ['e', 'f', 'd']: + in_arr = np.array(in_, dtype=dt) + out_arr = np.array(out, dtype=dt) + assert_equal(np.arctanh(in_arr), out_arr) + + for value in [1.01, np.inf, -np.inf, 1.0, -1.0]: + with np.errstate(invalid='raise', divide='raise'): + for dt in ['e', 'f', 'd']: + assert_raises(FloatingPointError, np.arctanh, + np.array(value, dtype=dt)) + + # Make sure glibc < 2.18 atanh is not used, issue 25087 + assert np.signbit(np.arctanh(-1j).real) + + # See: https://github.com/numpy/numpy/issues/20448 + @pytest.mark.xfail( + _glibc_older_than("2.17"), + reason="Older glibc versions may not raise appropriate FP exceptions" + ) + def test_exp2(self): + with np.errstate(all='ignore'): + in_ = [np.nan, -np.nan, np.inf, -np.inf] + out = [np.nan, np.nan, np.inf, 0.0] + for dt in ['e', 'f', 'd']: + in_arr = np.array(in_, dtype=dt) + out_arr = np.array(out, dtype=dt) + assert_equal(np.exp2(in_arr), out_arr) + + for value in [2000.0, -2000.0]: + with np.errstate(over='raise', under='raise'): + for dt in ['e', 'f', 'd']: + assert_raises(FloatingPointError, np.exp2, + np.array(value, dtype=dt)) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + def test_expm1(self): + with np.errstate(all='ignore'): + in_ = [np.nan, -np.nan, np.inf, -np.inf] + out = [np.nan, np.nan, np.inf, -1.0] + for dt in ['e', 'f', 'd']: + in_arr = np.array(in_, dtype=dt) + out_arr = np.array(out, dtype=dt) + assert_equal(np.expm1(in_arr), out_arr) + + for value in [200.0, 2000.0]: + with np.errstate(over='raise'): + for dt in ['e', 'f']: + assert_raises(FloatingPointError, np.expm1, + np.array(value, dtype=dt)) + + # test to ensure no spurious FP exceptions are raised due to SIMD + INF_INVALID_ERR = [ + np.cos, np.sin, np.tan, np.arccos, np.arcsin, np.spacing, np.arctanh + ] + NEG_INVALID_ERR = [ + np.log, np.log2, np.log10, np.log1p, np.sqrt, np.arccosh, + np.arctanh + ] + ONE_INVALID_ERR = [ + np.arctanh, + ] + LTONE_INVALID_ERR = [ + np.arccosh, + ] + BYZERO_ERR = [ + np.log, np.log2, np.log10, np.reciprocal, np.arccosh + ] + + @pytest.mark.parametrize("ufunc", UFUNCS_UNARY_FP) + @pytest.mark.parametrize("dtype", ('e', 'f', 'd')) + @pytest.mark.parametrize("data, escape", ( + ([0.03], LTONE_INVALID_ERR), + ([0.03] * 32, LTONE_INVALID_ERR), + # neg + ([-1.0], NEG_INVALID_ERR), + ([-1.0] * 32, NEG_INVALID_ERR), + # flat + ([1.0], ONE_INVALID_ERR), + ([1.0] * 32, ONE_INVALID_ERR), + # zero + ([0.0], BYZERO_ERR), + ([0.0] * 32, BYZERO_ERR), + ([-0.0], BYZERO_ERR), + ([-0.0] * 32, BYZERO_ERR), + # nan + ([0.5, 0.5, 0.5, np.nan], LTONE_INVALID_ERR), + ([0.5, 0.5, 0.5, np.nan] * 32, LTONE_INVALID_ERR), + ([np.nan, 1.0, 1.0, 1.0], ONE_INVALID_ERR), + ([np.nan, 1.0, 1.0, 1.0] * 32, ONE_INVALID_ERR), + ([np.nan], []), + ([np.nan] * 32, []), + # inf + ([0.5, 0.5, 0.5, np.inf], INF_INVALID_ERR + LTONE_INVALID_ERR), + ([0.5, 0.5, 0.5, np.inf] * 32, INF_INVALID_ERR + LTONE_INVALID_ERR), + ([np.inf, 1.0, 1.0, 1.0], INF_INVALID_ERR), + ([np.inf, 1.0, 1.0, 1.0] * 32, INF_INVALID_ERR), + ([np.inf], INF_INVALID_ERR), + ([np.inf] * 32, INF_INVALID_ERR), + # ninf + ([0.5, 0.5, 0.5, -np.inf], + NEG_INVALID_ERR + INF_INVALID_ERR + LTONE_INVALID_ERR), + ([0.5, 0.5, 0.5, -np.inf] * 32, + NEG_INVALID_ERR + INF_INVALID_ERR + LTONE_INVALID_ERR), + ([-np.inf, 1.0, 1.0, 1.0], NEG_INVALID_ERR + INF_INVALID_ERR), + ([-np.inf, 1.0, 1.0, 1.0] * 32, NEG_INVALID_ERR + INF_INVALID_ERR), + ([-np.inf], NEG_INVALID_ERR + INF_INVALID_ERR), + ([-np.inf] * 32, NEG_INVALID_ERR + INF_INVALID_ERR), + )) + def test_unary_spurious_fpexception(self, ufunc, dtype, data, escape): + if escape and ufunc in escape: + return + # FIXME: NAN raises FP invalid exception: + # - ceil/float16 on MSVC:32-bit + # - spacing/float16 on almost all platforms + # - spacing/float32,float64 on Windows MSVC with VS2022 + if ufunc in (np.spacing, np.ceil) and dtype == 'e': + return + # Skip spacing tests with NaN on Windows MSVC (all dtypes) + import platform + if (ufunc == np.spacing and + platform.system() == 'Windows' and + any(np.isnan(d) if isinstance(d, (int, float)) else False for d in data)): + pytest.skip("spacing with NaN generates warnings on Windows/VS2022") + array = np.array(data, dtype=dtype) + with assert_no_warnings(): + ufunc(array) + + @pytest.mark.parametrize("dtype", ('e', 'f', 'd')) + def test_divide_spurious_fpexception(self, dtype): + dt = np.dtype(dtype) + dt_info = np.finfo(dt) + subnorm = dt_info.smallest_subnormal + # Verify a bug fix caused due to filling the remaining lanes of the + # partially loaded dividend SIMD vector with ones, which leads to + # raising an overflow warning when the divisor is denormal. + # see https://github.com/numpy/numpy/issues/25097 + with assert_no_warnings(): + np.zeros(128 + 1, dtype=dt) / subnorm + +class TestFPClass: + @pytest.mark.parametrize("stride", [-5, -4, -3, -2, -1, 1, + 2, 4, 5, 6, 7, 8, 9, 10]) + def test_fpclass(self, stride): + arr_f64 = np.array([np.nan, -np.nan, np.inf, -np.inf, -1.0, 1.0, -0.0, 0.0, 2.2251e-308, -2.2251e-308], dtype='d') + arr_f32 = np.array([np.nan, -np.nan, np.inf, -np.inf, -1.0, 1.0, -0.0, 0.0, 1.4013e-045, -1.4013e-045], dtype='f') + nan = np.array([True, True, False, False, False, False, False, False, False, False]) # noqa: E221 + inf = np.array([False, False, True, True, False, False, False, False, False, False]) # noqa: E221 + sign = np.array([False, True, False, True, True, False, True, False, False, True]) # noqa: E221 + finite = np.array([False, False, False, False, True, True, True, True, True, True]) # noqa: E221 + assert_equal(np.isnan(arr_f32[::stride]), nan[::stride]) + assert_equal(np.isnan(arr_f64[::stride]), nan[::stride]) + assert_equal(np.isinf(arr_f32[::stride]), inf[::stride]) + assert_equal(np.isinf(arr_f64[::stride]), inf[::stride]) + if platform.machine() == 'riscv64': + # On RISC-V, many operations that produce NaNs, such as converting + # a -NaN from f64 to f32, return a canonical NaN. The canonical + # NaNs are always positive. See section 11.3 NaN Generation and + # Propagation of the RISC-V Unprivileged ISA for more details. + # We disable the sign test on riscv64 for -np.nan as we + # cannot assume that its sign will be honoured in these tests. + arr_f64_rv = np.copy(arr_f64) + arr_f32_rv = np.copy(arr_f32) + arr_f64_rv[1] = -1.0 + arr_f32_rv[1] = -1.0 + assert_equal(np.signbit(arr_f32_rv[::stride]), sign[::stride]) + assert_equal(np.signbit(arr_f64_rv[::stride]), sign[::stride]) + else: + assert_equal(np.signbit(arr_f32[::stride]), sign[::stride]) + assert_equal(np.signbit(arr_f64[::stride]), sign[::stride]) + assert_equal(np.isfinite(arr_f32[::stride]), finite[::stride]) + assert_equal(np.isfinite(arr_f64[::stride]), finite[::stride]) + + @pytest.mark.parametrize("dtype", ['d', 'f']) + def test_fp_noncontiguous(self, dtype): + data = np.array([np.nan, -np.nan, np.inf, -np.inf, -1.0, + 1.0, -0.0, 0.0, 2.2251e-308, + -2.2251e-308], dtype=dtype) + nan = np.array([True, True, False, False, False, False, + False, False, False, False]) + inf = np.array([False, False, True, True, False, False, + False, False, False, False]) + sign = np.array([False, True, False, True, True, False, + True, False, False, True]) + finite = np.array([False, False, False, False, True, True, + True, True, True, True]) + out = np.ndarray(data.shape, dtype='bool') + ncontig_in = data[1::3] + ncontig_out = out[1::3] + contig_in = np.array(ncontig_in) + + if platform.machine() == 'riscv64': + # Disable the -np.nan signbit tests on riscv64. See comments in + # test_fpclass for more details. + data_rv = np.copy(data) + data_rv[1] = -1.0 + ncontig_sign_in = data_rv[1::3] + contig_sign_in = np.array(ncontig_sign_in) + else: + ncontig_sign_in = ncontig_in + contig_sign_in = contig_in + + assert_equal(ncontig_in.flags.c_contiguous, False) + assert_equal(ncontig_out.flags.c_contiguous, False) + assert_equal(contig_in.flags.c_contiguous, True) + assert_equal(ncontig_sign_in.flags.c_contiguous, False) + assert_equal(contig_sign_in.flags.c_contiguous, True) + # ncontig in, ncontig out + assert_equal(np.isnan(ncontig_in, out=ncontig_out), nan[1::3]) + assert_equal(np.isinf(ncontig_in, out=ncontig_out), inf[1::3]) + assert_equal(np.signbit(ncontig_sign_in, out=ncontig_out), sign[1::3]) + assert_equal(np.isfinite(ncontig_in, out=ncontig_out), finite[1::3]) + # contig in, ncontig out + assert_equal(np.isnan(contig_in, out=ncontig_out), nan[1::3]) + assert_equal(np.isinf(contig_in, out=ncontig_out), inf[1::3]) + assert_equal(np.signbit(contig_sign_in, out=ncontig_out), sign[1::3]) + assert_equal(np.isfinite(contig_in, out=ncontig_out), finite[1::3]) + # ncontig in, contig out + assert_equal(np.isnan(ncontig_in), nan[1::3]) + assert_equal(np.isinf(ncontig_in), inf[1::3]) + assert_equal(np.signbit(ncontig_sign_in), sign[1::3]) + assert_equal(np.isfinite(ncontig_in), finite[1::3]) + # contig in, contig out, nd stride + data_split = np.array(np.array_split(data, 2)) + nan_split = np.array(np.array_split(nan, 2)) + inf_split = np.array(np.array_split(inf, 2)) + sign_split = np.array(np.array_split(sign, 2)) + finite_split = np.array(np.array_split(finite, 2)) + assert_equal(np.isnan(data_split), nan_split) + assert_equal(np.isinf(data_split), inf_split) + if platform.machine() == 'riscv64': + data_split_rv = np.array(np.array_split(data_rv, 2)) + assert_equal(np.signbit(data_split_rv), sign_split) + else: + assert_equal(np.signbit(data_split), sign_split) + assert_equal(np.isfinite(data_split), finite_split) + +class TestLDExp: + @pytest.mark.parametrize("stride", [-4, -2, -1, 1, 2, 4]) + @pytest.mark.parametrize("dtype", ['f', 'd']) + def test_ldexp(self, dtype, stride): + mant = np.array([0.125, 0.25, 0.5, 1., 1., 2., 4., 8.], dtype=dtype) + exp = np.array([3, 2, 1, 0, 0, -1, -2, -3], dtype='i') + out = np.zeros(8, dtype=dtype) + assert_equal(np.ldexp(mant[::stride], exp[::stride], out=out[::stride]), np.ones(8, dtype=dtype)[::stride]) + assert_equal(out[::stride], np.ones(8, dtype=dtype)[::stride]) + +class TestFRExp: + @pytest.mark.parametrize("stride", [-4, -2, -1, 1, 2, 4]) + @pytest.mark.parametrize("dtype", ['f', 'd']) + @pytest.mark.skipif(not sys.platform.startswith('linux'), + reason="np.frexp gives different answers for NAN/INF on windows and linux") + @pytest.mark.xfail(IS_MUSL, reason="gh23049") + def test_frexp(self, dtype, stride): + arr = np.array([np.nan, np.nan, np.inf, -np.inf, 0.0, -0.0, 1.0, -1.0], dtype=dtype) + mant_true = np.array([np.nan, np.nan, np.inf, -np.inf, 0.0, -0.0, 0.5, -0.5], dtype=dtype) + exp_true = np.array([0, 0, 0, 0, 0, 0, 1, 1], dtype='i') + out_mant = np.ones(8, dtype=dtype) + out_exp = 2 * np.ones(8, dtype='i') + mant, exp = np.frexp(arr[::stride], out=(out_mant[::stride], out_exp[::stride])) + assert_equal(mant_true[::stride], mant) + assert_equal(exp_true[::stride], exp) + assert_equal(out_mant[::stride], mant_true[::stride]) + assert_equal(out_exp[::stride], exp_true[::stride]) + + +# func : [maxulperror, low, high] +avx_ufuncs = {'sqrt' : [1, 0., 100.], # noqa: E203 + 'absolute' : [0, -100., 100.], # noqa: E203 + 'reciprocal' : [1, 1., 100.], # noqa: E203 + 'square' : [1, -100., 100.], # noqa: E203 + 'rint' : [0, -100., 100.], # noqa: E203 + 'floor' : [0, -100., 100.], # noqa: E203 + 'ceil' : [0, -100., 100.], # noqa: E203 + 'trunc' : [0, -100., 100.]} # noqa: E203 + +class TestAVXUfuncs: + def test_avx_based_ufunc(self): + strides = np.array([-4, -3, -2, -1, 1, 2, 3, 4]) + np.random.seed(42) + for func, prop in avx_ufuncs.items(): + maxulperr = prop[0] + minval = prop[1] + maxval = prop[2] + # various array sizes to ensure masking in AVX is tested + for size in range(1, 32): + myfunc = getattr(np, func) + x_f32 = np.random.uniform(low=minval, high=maxval, + size=size).astype(np.float32) + x_f64 = x_f32.astype(np.float64) + x_f128 = x_f32.astype(np.longdouble) + y_true128 = myfunc(x_f128) + if maxulperr == 0: + assert_equal(myfunc(x_f32), y_true128.astype(np.float32)) + assert_equal(myfunc(x_f64), y_true128.astype(np.float64)) + else: + assert_array_max_ulp(myfunc(x_f32), + y_true128.astype(np.float32), + maxulp=maxulperr) + assert_array_max_ulp(myfunc(x_f64), + y_true128.astype(np.float64), + maxulp=maxulperr) + # various strides to test gather instruction + if size > 1: + y_true32 = myfunc(x_f32) + y_true64 = myfunc(x_f64) + for jj in strides: + assert_equal(myfunc(x_f64[::jj]), y_true64[::jj]) + assert_equal(myfunc(x_f32[::jj]), y_true32[::jj]) + +class TestAVXFloat32Transcendental: + def test_exp_float32(self): + np.random.seed(42) + x_f32 = np.float32(np.random.uniform(low=0.0, high=88.1, size=1000000)) + x_f64 = np.float64(x_f32) + assert_array_max_ulp(np.exp(x_f32), np.float32(np.exp(x_f64)), maxulp=3) + + def test_log_float32(self): + np.random.seed(42) + x_f32 = np.float32(np.random.uniform(low=0.0, high=1000, size=1000000)) + x_f64 = np.float64(x_f32) + assert_array_max_ulp(np.log(x_f32), np.float32(np.log(x_f64)), maxulp=4) + + def test_sincos_float32(self): + np.random.seed(42) + N = 1000000 + M = np.int_(N / 20) + index = np.random.randint(low=0, high=N, size=M) + x_f32 = np.float32(np.random.uniform(low=-100., high=100., size=N)) + if not _glibc_older_than("2.17"): + # test coverage for elements > 117435.992f for which glibc is used + # this is known to be problematic on old glibc, so skip it there + x_f32[index] = np.float32(10E+10 * np.random.rand(M)) + x_f64 = np.float64(x_f32) + assert_array_max_ulp(np.sin(x_f32), np.float32(np.sin(x_f64)), maxulp=2) + assert_array_max_ulp(np.cos(x_f32), np.float32(np.cos(x_f64)), maxulp=2) + # test aliasing(issue #17761) + tx_f32 = x_f32.copy() + assert_array_max_ulp(np.sin(x_f32, out=x_f32), np.float32(np.sin(x_f64)), maxulp=2) + assert_array_max_ulp(np.cos(tx_f32, out=tx_f32), np.float32(np.cos(x_f64)), maxulp=2) + + def test_strided_float32(self): + np.random.seed(42) + strides = np.array([-4, -3, -2, -1, 1, 2, 3, 4]) + sizes = np.arange(2, 100) + for ii in sizes: + x_f32 = np.float32(np.random.uniform(low=0.01, high=88.1, size=ii)) + x_f32_large = x_f32.copy() + x_f32_large[3:-1:4] = 120000.0 + exp_true = np.exp(x_f32) + log_true = np.log(x_f32) + sin_true = np.sin(x_f32_large) + cos_true = np.cos(x_f32_large) + for jj in strides: + assert_array_almost_equal_nulp(np.exp(x_f32[::jj]), exp_true[::jj], nulp=2) + assert_array_almost_equal_nulp(np.log(x_f32[::jj]), log_true[::jj], nulp=2) + assert_array_almost_equal_nulp(np.sin(x_f32_large[::jj]), sin_true[::jj], nulp=2) + assert_array_almost_equal_nulp(np.cos(x_f32_large[::jj]), cos_true[::jj], nulp=2) + +class TestLogAddExp(_FilterInvalids): + def test_logaddexp_values(self): + x = [1, 2, 3, 4, 5] + y = [5, 4, 3, 2, 1] + z = [6, 6, 6, 6, 6] + for dt, dec_ in zip(['f', 'd', 'g'], [6, 15, 15]): + xf = np.log(np.array(x, dtype=dt)) + yf = np.log(np.array(y, dtype=dt)) + zf = np.log(np.array(z, dtype=dt)) + assert_almost_equal(np.logaddexp(xf, yf), zf, decimal=dec_) + + def test_logaddexp_range(self): + x = [1000000, -1000000, 1000200, -1000200] + y = [1000200, -1000200, 1000000, -1000000] + z = [1000200, -1000000, 1000200, -1000000] + for dt in ['f', 'd', 'g']: + logxf = np.array(x, dtype=dt) + logyf = np.array(y, dtype=dt) + logzf = np.array(z, dtype=dt) + assert_almost_equal(np.logaddexp(logxf, logyf), logzf) + + def test_inf(self): + inf = np.inf + x = [inf, -inf, inf, -inf, inf, 1, -inf, 1] # noqa: E221 + y = [inf, inf, -inf, -inf, 1, inf, 1, -inf] # noqa: E221 + z = [inf, inf, inf, -inf, inf, inf, 1, 1] + with np.errstate(invalid='raise'): + for dt in ['f', 'd', 'g']: + logxf = np.array(x, dtype=dt) + logyf = np.array(y, dtype=dt) + logzf = np.array(z, dtype=dt) + assert_equal(np.logaddexp(logxf, logyf), logzf) + + def test_nan(self): + assert_(np.isnan(np.logaddexp(np.nan, np.inf))) + assert_(np.isnan(np.logaddexp(np.inf, np.nan))) + assert_(np.isnan(np.logaddexp(np.nan, 0))) + assert_(np.isnan(np.logaddexp(0, np.nan))) + assert_(np.isnan(np.logaddexp(np.nan, np.nan))) + + def test_reduce(self): + assert_equal(np.logaddexp.identity, -np.inf) + assert_equal(np.logaddexp.reduce([]), -np.inf) + + +class TestLog1p: + def test_log1p(self): + assert_almost_equal(ncu.log1p(0.2), ncu.log(1.2)) + assert_almost_equal(ncu.log1p(1e-6), ncu.log(1 + 1e-6)) + + def test_special(self): + with np.errstate(invalid="ignore", divide="ignore"): + assert_equal(ncu.log1p(np.nan), np.nan) + assert_equal(ncu.log1p(np.inf), np.inf) + assert_equal(ncu.log1p(-1.), -np.inf) + assert_equal(ncu.log1p(-2.), np.nan) + assert_equal(ncu.log1p(-np.inf), np.nan) + + +class TestExpm1: + def test_expm1(self): + assert_almost_equal(ncu.expm1(0.2), ncu.exp(0.2) - 1) + assert_almost_equal(ncu.expm1(1e-6), ncu.exp(1e-6) - 1) + + def test_special(self): + assert_equal(ncu.expm1(np.inf), np.inf) + assert_equal(ncu.expm1(0.), 0.) + assert_equal(ncu.expm1(-0.), -0.) + assert_equal(ncu.expm1(np.inf), np.inf) + assert_equal(ncu.expm1(-np.inf), -1.) + + def test_complex(self): + x = np.asarray(1e-12) + assert_allclose(x, ncu.expm1(x)) + x = x.astype(np.complex128) + assert_allclose(x, ncu.expm1(x)) + + +class TestHypot: + def test_simple(self): + assert_almost_equal(ncu.hypot(1, 1), ncu.sqrt(2)) + assert_almost_equal(ncu.hypot(0, 0), 0) + + def test_reduce(self): + assert_almost_equal(ncu.hypot.reduce([3.0, 4.0]), 5.0) + assert_almost_equal(ncu.hypot.reduce([3.0, 4.0, 0]), 5.0) + assert_almost_equal(ncu.hypot.reduce([9.0, 12.0, 20.0]), 25.0) + assert_equal(ncu.hypot.reduce([]), 0.0) + + +def assert_hypot_isnan(x, y): + with np.errstate(invalid='ignore'): + assert_(np.isnan(ncu.hypot(x, y)), + f"hypot({x}, {y}) is {ncu.hypot(x, y)}, not nan") + + +def assert_hypot_isinf(x, y): + with np.errstate(invalid='ignore'): + assert_(np.isinf(ncu.hypot(x, y)), + f"hypot({x}, {y}) is {ncu.hypot(x, y)}, not inf") + + +class TestHypotSpecialValues: + def test_nan_outputs(self): + assert_hypot_isnan(np.nan, np.nan) + assert_hypot_isnan(np.nan, 1) + + def test_nan_outputs2(self): + assert_hypot_isinf(np.nan, np.inf) + assert_hypot_isinf(np.inf, np.nan) + assert_hypot_isinf(np.inf, 0) + assert_hypot_isinf(0, np.inf) + assert_hypot_isinf(np.inf, np.inf) + assert_hypot_isinf(np.inf, 23.0) + + def test_no_fpe(self): + assert_no_warnings(ncu.hypot, np.inf, 0) + + +def assert_arctan2_isnan(x, y): + assert_(np.isnan(ncu.arctan2(x, y)), f"arctan({x}, {y}) is {ncu.arctan2(x, y)}, not nan") + + +def assert_arctan2_ispinf(x, y): + assert_((np.isinf(ncu.arctan2(x, y)) and ncu.arctan2(x, y) > 0), f"arctan({x}, {y}) is {ncu.arctan2(x, y)}, not +inf") + + +def assert_arctan2_isninf(x, y): + assert_((np.isinf(ncu.arctan2(x, y)) and ncu.arctan2(x, y) < 0), f"arctan({x}, {y}) is {ncu.arctan2(x, y)}, not -inf") + + +def assert_arctan2_ispzero(x, y): + assert_((ncu.arctan2(x, y) == 0 and not np.signbit(ncu.arctan2(x, y))), f"arctan({x}, {y}) is {ncu.arctan2(x, y)}, not +0") + + +def assert_arctan2_isnzero(x, y): + assert_((ncu.arctan2(x, y) == 0 and np.signbit(ncu.arctan2(x, y))), f"arctan({x}, {y}) is {ncu.arctan2(x, y)}, not -0") + + +class TestArctan2SpecialValues: + def test_one_one(self): + # atan2(1, 1) returns pi/4. + assert_almost_equal(ncu.arctan2(1, 1), 0.25 * np.pi) + assert_almost_equal(ncu.arctan2(-1, 1), -0.25 * np.pi) + assert_almost_equal(ncu.arctan2(1, -1), 0.75 * np.pi) + + def test_zero_nzero(self): + # atan2(+-0, -0) returns +-pi. + assert_almost_equal(ncu.arctan2(ncu.PZERO, ncu.NZERO), np.pi) + assert_almost_equal(ncu.arctan2(ncu.NZERO, ncu.NZERO), -np.pi) + + def test_zero_pzero(self): + # atan2(+-0, +0) returns +-0. + assert_arctan2_ispzero(ncu.PZERO, ncu.PZERO) + assert_arctan2_isnzero(ncu.NZERO, ncu.PZERO) + + def test_zero_negative(self): + # atan2(+-0, x) returns +-pi for x < 0. + assert_almost_equal(ncu.arctan2(ncu.PZERO, -1), np.pi) + assert_almost_equal(ncu.arctan2(ncu.NZERO, -1), -np.pi) + + def test_zero_positive(self): + # atan2(+-0, x) returns +-0 for x > 0. + assert_arctan2_ispzero(ncu.PZERO, 1) + assert_arctan2_isnzero(ncu.NZERO, 1) + + def test_positive_zero(self): + # atan2(y, +-0) returns +pi/2 for y > 0. + assert_almost_equal(ncu.arctan2(1, ncu.PZERO), 0.5 * np.pi) + assert_almost_equal(ncu.arctan2(1, ncu.NZERO), 0.5 * np.pi) + + def test_negative_zero(self): + # atan2(y, +-0) returns -pi/2 for y < 0. + assert_almost_equal(ncu.arctan2(-1, ncu.PZERO), -0.5 * np.pi) + assert_almost_equal(ncu.arctan2(-1, ncu.NZERO), -0.5 * np.pi) + + def test_any_ninf(self): + # atan2(+-y, -infinity) returns +-pi for finite y > 0. + assert_almost_equal(ncu.arctan2(1, -np.inf), np.pi) + assert_almost_equal(ncu.arctan2(-1, -np.inf), -np.pi) + + def test_any_pinf(self): + # atan2(+-y, +infinity) returns +-0 for finite y > 0. + assert_arctan2_ispzero(1, np.inf) + assert_arctan2_isnzero(-1, np.inf) + + def test_inf_any(self): + # atan2(+-infinity, x) returns +-pi/2 for finite x. + assert_almost_equal(ncu.arctan2( np.inf, 1), 0.5 * np.pi) + assert_almost_equal(ncu.arctan2(-np.inf, 1), -0.5 * np.pi) + + def test_inf_ninf(self): + # atan2(+-infinity, -infinity) returns +-3*pi/4. + assert_almost_equal(ncu.arctan2( np.inf, -np.inf), 0.75 * np.pi) + assert_almost_equal(ncu.arctan2(-np.inf, -np.inf), -0.75 * np.pi) + + def test_inf_pinf(self): + # atan2(+-infinity, +infinity) returns +-pi/4. + assert_almost_equal(ncu.arctan2( np.inf, np.inf), 0.25 * np.pi) + assert_almost_equal(ncu.arctan2(-np.inf, np.inf), -0.25 * np.pi) + + def test_nan_any(self): + # atan2(nan, x) returns nan for any x, including inf + assert_arctan2_isnan(np.nan, np.inf) + assert_arctan2_isnan(np.inf, np.nan) + assert_arctan2_isnan(np.nan, np.nan) + + +class TestLdexp: + def _check_ldexp(self, tp): + assert_almost_equal(ncu.ldexp(np.array(2., np.float32), + np.array(3, tp)), 16.) + assert_almost_equal(ncu.ldexp(np.array(2., np.float64), + np.array(3, tp)), 16.) + assert_almost_equal(ncu.ldexp(np.array(2., np.longdouble), + np.array(3, tp)), 16.) + + def test_ldexp(self): + # The default Python int type should work + assert_almost_equal(ncu.ldexp(2., 3), 16.) + # The following int types should all be accepted + self._check_ldexp(np.int8) + self._check_ldexp(np.int16) + self._check_ldexp(np.int32) + self._check_ldexp('i') + self._check_ldexp('l') + + def test_ldexp_overflow(self): + # silence warning emitted on overflow + with np.errstate(over="ignore"): + imax = np.iinfo(np.dtype('l')).max + imin = np.iinfo(np.dtype('l')).min + assert_equal(ncu.ldexp(2., imax), np.inf) + assert_equal(ncu.ldexp(2., imin), 0) + + +class TestMaximum(_FilterInvalids): + def test_reduce(self): + dflt = np.typecodes['AllFloat'] + dint = np.typecodes['AllInteger'] + seq1 = np.arange(11) + seq2 = seq1[::-1] + func = np.maximum.reduce + for dt in dint: + tmp1 = seq1.astype(dt) + tmp2 = seq2.astype(dt) + assert_equal(func(tmp1), 10) + assert_equal(func(tmp2), 10) + for dt in dflt: + tmp1 = seq1.astype(dt) + tmp2 = seq2.astype(dt) + assert_equal(func(tmp1), 10) + assert_equal(func(tmp2), 10) + tmp1[::2] = np.nan + tmp2[::2] = np.nan + assert_equal(func(tmp1), np.nan) + assert_equal(func(tmp2), np.nan) + + def test_reduce_complex(self): + assert_equal(np.maximum.reduce([1, 2j]), 1) + assert_equal(np.maximum.reduce([1 + 3j, 2j]), 1 + 3j) + + def test_float_nans(self): + nan = np.nan + arg1 = np.array([0, nan, nan]) + arg2 = np.array([nan, 0, nan]) + out = np.array([nan, nan, nan]) + assert_equal(np.maximum(arg1, arg2), out) + + def test_object_nans(self): + # Multiple checks to give this a chance to + # fail if cmp is used instead of rich compare. + # Failure cannot be guaranteed. + for i in range(1): + x = np.array(float('nan'), object) + y = 1.0 + z = np.array(float('nan'), object) + assert_(np.maximum(x, y) == 1.0) + assert_(np.maximum(z, y) == 1.0) + + def test_complex_nans(self): + nan = np.nan + for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]: + arg1 = np.array([0, cnan, cnan], dtype=complex) + arg2 = np.array([cnan, 0, cnan], dtype=complex) + out = np.array([nan, nan, nan], dtype=complex) + assert_equal(np.maximum(arg1, arg2), out) + + def test_object_array(self): + arg1 = np.arange(5, dtype=object) + arg2 = arg1 + 1 + assert_equal(np.maximum(arg1, arg2), arg2) + + def test_strided_array(self): + arr1 = np.array([-4.0, 1.0, 10.0, 0.0, np.nan, -np.nan, np.inf, -np.inf]) + arr2 = np.array([-2.0, -1.0, np.nan, 1.0, 0.0, np.nan, 1.0, -3.0]) # noqa: E221 + maxtrue = np.array([-2.0, 1.0, np.nan, 1.0, np.nan, np.nan, np.inf, -3.0]) + out = np.ones(8) + out_maxtrue = np.array([-2.0, 1.0, 1.0, 10.0, 1.0, 1.0, np.nan, 1.0]) + assert_equal(np.maximum(arr1, arr2), maxtrue) + assert_equal(np.maximum(arr1[::2], arr2[::2]), maxtrue[::2]) + assert_equal(np.maximum(arr1[:4:], arr2[::2]), np.array([-2.0, np.nan, 10.0, 1.0])) + assert_equal(np.maximum(arr1[::3], arr2[:3:]), np.array([-2.0, 0.0, np.nan])) + assert_equal(np.maximum(arr1[:6:2], arr2[::3], out=out[::3]), np.array([-2.0, 10., np.nan])) + assert_equal(out, out_maxtrue) + + def test_precision(self): + dtypes = [np.float16, np.float32, np.float64, np.longdouble] + + for dt in dtypes: + dtmin = np.finfo(dt).min + dtmax = np.finfo(dt).max + d1 = dt(0.1) + d1_next = np.nextafter(d1, np.inf) + + test_cases = [ + # v1 v2 expected + (dtmin, -np.inf, dtmin), + (dtmax, -np.inf, dtmax), + (d1, d1_next, d1_next), + (dtmax, np.nan, np.nan), + ] + + for v1, v2, expected in test_cases: + assert_equal(np.maximum([v1], [v2]), [expected]) + assert_equal(np.maximum.reduce([v1, v2]), expected) + + +class TestMinimum(_FilterInvalids): + def test_reduce(self): + dflt = np.typecodes['AllFloat'] + dint = np.typecodes['AllInteger'] + seq1 = np.arange(11) + seq2 = seq1[::-1] + func = np.minimum.reduce + for dt in dint: + tmp1 = seq1.astype(dt) + tmp2 = seq2.astype(dt) + assert_equal(func(tmp1), 0) + assert_equal(func(tmp2), 0) + for dt in dflt: + tmp1 = seq1.astype(dt) + tmp2 = seq2.astype(dt) + assert_equal(func(tmp1), 0) + assert_equal(func(tmp2), 0) + tmp1[::2] = np.nan + tmp2[::2] = np.nan + assert_equal(func(tmp1), np.nan) + assert_equal(func(tmp2), np.nan) + + def test_reduce_complex(self): + assert_equal(np.minimum.reduce([1, 2j]), 2j) + assert_equal(np.minimum.reduce([1 + 3j, 2j]), 2j) + + def test_float_nans(self): + nan = np.nan + arg1 = np.array([0, nan, nan]) + arg2 = np.array([nan, 0, nan]) + out = np.array([nan, nan, nan]) + assert_equal(np.minimum(arg1, arg2), out) + + def test_object_nans(self): + # Multiple checks to give this a chance to + # fail if cmp is used instead of rich compare. + # Failure cannot be guaranteed. + for i in range(1): + x = np.array(float('nan'), object) + y = 1.0 + z = np.array(float('nan'), object) + assert_(np.minimum(x, y) == 1.0) + assert_(np.minimum(z, y) == 1.0) + + def test_complex_nans(self): + nan = np.nan + for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]: + arg1 = np.array([0, cnan, cnan], dtype=complex) + arg2 = np.array([cnan, 0, cnan], dtype=complex) + out = np.array([nan, nan, nan], dtype=complex) + assert_equal(np.minimum(arg1, arg2), out) + + def test_object_array(self): + arg1 = np.arange(5, dtype=object) + arg2 = arg1 + 1 + assert_equal(np.minimum(arg1, arg2), arg1) + + def test_strided_array(self): + arr1 = np.array([-4.0, 1.0, 10.0, 0.0, np.nan, -np.nan, np.inf, -np.inf]) + arr2 = np.array([-2.0, -1.0, np.nan, 1.0, 0.0, np.nan, 1.0, -3.0]) + mintrue = np.array([-4.0, -1.0, np.nan, 0.0, np.nan, np.nan, 1.0, -np.inf]) + out = np.ones(8) + out_mintrue = np.array([-4.0, 1.0, 1.0, 1.0, 1.0, 1.0, np.nan, 1.0]) + assert_equal(np.minimum(arr1, arr2), mintrue) + assert_equal(np.minimum(arr1[::2], arr2[::2]), mintrue[::2]) + assert_equal(np.minimum(arr1[:4:], arr2[::2]), np.array([-4.0, np.nan, 0.0, 0.0])) + assert_equal(np.minimum(arr1[::3], arr2[:3:]), np.array([-4.0, -1.0, np.nan])) + assert_equal(np.minimum(arr1[:6:2], arr2[::3], out=out[::3]), np.array([-4.0, 1.0, np.nan])) + assert_equal(out, out_mintrue) + + def test_precision(self): + dtypes = [np.float16, np.float32, np.float64, np.longdouble] + + for dt in dtypes: + dtmin = np.finfo(dt).min + dtmax = np.finfo(dt).max + d1 = dt(0.1) + d1_next = np.nextafter(d1, np.inf) + + test_cases = [ + # v1 v2 expected + (dtmin, np.inf, dtmin), + (dtmax, np.inf, dtmax), + (d1, d1_next, d1), + (dtmin, np.nan, np.nan), + ] + + for v1, v2, expected in test_cases: + assert_equal(np.minimum([v1], [v2]), [expected]) + assert_equal(np.minimum.reduce([v1, v2]), expected) + + +class TestFmax(_FilterInvalids): + def test_reduce(self): + dflt = np.typecodes['AllFloat'] + dint = np.typecodes['AllInteger'] + seq1 = np.arange(11) + seq2 = seq1[::-1] + func = np.fmax.reduce + for dt in dint: + tmp1 = seq1.astype(dt) + tmp2 = seq2.astype(dt) + assert_equal(func(tmp1), 10) + assert_equal(func(tmp2), 10) + for dt in dflt: + tmp1 = seq1.astype(dt) + tmp2 = seq2.astype(dt) + assert_equal(func(tmp1), 10) + assert_equal(func(tmp2), 10) + tmp1[::2] = np.nan + tmp2[::2] = np.nan + assert_equal(func(tmp1), 9) + assert_equal(func(tmp2), 9) + + def test_reduce_complex(self): + assert_equal(np.fmax.reduce([1, 2j]), 1) + assert_equal(np.fmax.reduce([1 + 3j, 2j]), 1 + 3j) + + def test_float_nans(self): + nan = np.nan + arg1 = np.array([0, nan, nan]) + arg2 = np.array([nan, 0, nan]) + out = np.array([0, 0, nan]) + assert_equal(np.fmax(arg1, arg2), out) + + def test_complex_nans(self): + nan = np.nan + for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]: + arg1 = np.array([0, cnan, cnan], dtype=complex) + arg2 = np.array([cnan, 0, cnan], dtype=complex) + out = np.array([0, 0, nan], dtype=complex) + assert_equal(np.fmax(arg1, arg2), out) + + def test_precision(self): + dtypes = [np.float16, np.float32, np.float64, np.longdouble] + + for dt in dtypes: + dtmin = np.finfo(dt).min + dtmax = np.finfo(dt).max + d1 = dt(0.1) + d1_next = np.nextafter(d1, np.inf) + + test_cases = [ + # v1 v2 expected + (dtmin, -np.inf, dtmin), + (dtmax, -np.inf, dtmax), + (d1, d1_next, d1_next), + (dtmax, np.nan, dtmax), + ] + + for v1, v2, expected in test_cases: + assert_equal(np.fmax([v1], [v2]), [expected]) + assert_equal(np.fmax.reduce([v1, v2]), expected) + + +class TestFmin(_FilterInvalids): + def test_reduce(self): + dflt = np.typecodes['AllFloat'] + dint = np.typecodes['AllInteger'] + seq1 = np.arange(11) + seq2 = seq1[::-1] + func = np.fmin.reduce + for dt in dint: + tmp1 = seq1.astype(dt) + tmp2 = seq2.astype(dt) + assert_equal(func(tmp1), 0) + assert_equal(func(tmp2), 0) + for dt in dflt: + tmp1 = seq1.astype(dt) + tmp2 = seq2.astype(dt) + assert_equal(func(tmp1), 0) + assert_equal(func(tmp2), 0) + tmp1[::2] = np.nan + tmp2[::2] = np.nan + assert_equal(func(tmp1), 1) + assert_equal(func(tmp2), 1) + + def test_reduce_complex(self): + assert_equal(np.fmin.reduce([1, 2j]), 2j) + assert_equal(np.fmin.reduce([1 + 3j, 2j]), 2j) + + def test_float_nans(self): + nan = np.nan + arg1 = np.array([0, nan, nan]) + arg2 = np.array([nan, 0, nan]) + out = np.array([0, 0, nan]) + assert_equal(np.fmin(arg1, arg2), out) + + def test_complex_nans(self): + nan = np.nan + for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]: + arg1 = np.array([0, cnan, cnan], dtype=complex) + arg2 = np.array([cnan, 0, cnan], dtype=complex) + out = np.array([0, 0, nan], dtype=complex) + assert_equal(np.fmin(arg1, arg2), out) + + def test_precision(self): + dtypes = [np.float16, np.float32, np.float64, np.longdouble] + + for dt in dtypes: + dtmin = np.finfo(dt).min + dtmax = np.finfo(dt).max + d1 = dt(0.1) + d1_next = np.nextafter(d1, np.inf) + + test_cases = [ + # v1 v2 expected + (dtmin, np.inf, dtmin), + (dtmax, np.inf, dtmax), + (d1, d1_next, d1), + (dtmin, np.nan, dtmin), + ] + + for v1, v2, expected in test_cases: + assert_equal(np.fmin([v1], [v2]), [expected]) + assert_equal(np.fmin.reduce([v1, v2]), expected) + + +class TestBool: + def test_exceptions(self): + a = np.ones(1, dtype=np.bool) + assert_raises(TypeError, np.negative, a) + assert_raises(TypeError, np.positive, a) + assert_raises(TypeError, np.subtract, a, a) + + def test_truth_table_logical(self): + # 2, 3 and 4 serves as true values + input1 = [0, 0, 3, 2] + input2 = [0, 4, 0, 2] + + typecodes = (np.typecodes['AllFloat'] + + np.typecodes['AllInteger'] + + '?') # boolean + for dtype in map(np.dtype, typecodes): + arg1 = np.asarray(input1, dtype=dtype) + arg2 = np.asarray(input2, dtype=dtype) + + # OR + out = [False, True, True, True] + for func in (np.logical_or, np.maximum): + assert_equal(func(arg1, arg2).astype(bool), out) + # AND + out = [False, False, False, True] + for func in (np.logical_and, np.minimum): + assert_equal(func(arg1, arg2).astype(bool), out) + # XOR + out = [False, True, True, False] + for func in (np.logical_xor, np.not_equal): + assert_equal(func(arg1, arg2).astype(bool), out) + + def test_truth_table_bitwise(self): + arg1 = [False, False, True, True] + arg2 = [False, True, False, True] + + out = [False, True, True, True] + assert_equal(np.bitwise_or(arg1, arg2), out) + + out = [False, False, False, True] + assert_equal(np.bitwise_and(arg1, arg2), out) + + out = [False, True, True, False] + assert_equal(np.bitwise_xor(arg1, arg2), out) + + def test_reduce(self): + none = np.array([0, 0, 0, 0], bool) + some = np.array([1, 0, 1, 1], bool) + every = np.array([1, 1, 1, 1], bool) + empty = np.array([], bool) + + arrs = [none, some, every, empty] + + for arr in arrs: + assert_equal(np.logical_and.reduce(arr), all(arr)) + + for arr in arrs: + assert_equal(np.logical_or.reduce(arr), any(arr)) + + for arr in arrs: + assert_equal(np.logical_xor.reduce(arr), arr.sum() % 2 == 1) + + +class TestBitwiseUFuncs: + + _all_ints_bits = [ + np.dtype(c).itemsize * 8 for c in np.typecodes["AllInteger"]] + bitwise_types = [ + np.dtype(c) for c in '?' + np.typecodes["AllInteger"] + 'O'] + bitwise_bits = [ + 2, # boolean type + *_all_ints_bits, # All integers + max(_all_ints_bits) + 1, # Object_ type + ] + + def test_values(self): + for dt in self.bitwise_types: + zeros = np.array([0], dtype=dt) + ones = np.array([-1]).astype(dt) + msg = f"dt = '{dt.char}'" + + assert_equal(np.bitwise_not(zeros), ones, err_msg=msg) + assert_equal(np.bitwise_not(ones), zeros, err_msg=msg) + + assert_equal(np.bitwise_or(zeros, zeros), zeros, err_msg=msg) + assert_equal(np.bitwise_or(zeros, ones), ones, err_msg=msg) + assert_equal(np.bitwise_or(ones, zeros), ones, err_msg=msg) + assert_equal(np.bitwise_or(ones, ones), ones, err_msg=msg) + + assert_equal(np.bitwise_xor(zeros, zeros), zeros, err_msg=msg) + assert_equal(np.bitwise_xor(zeros, ones), ones, err_msg=msg) + assert_equal(np.bitwise_xor(ones, zeros), ones, err_msg=msg) + assert_equal(np.bitwise_xor(ones, ones), zeros, err_msg=msg) + + assert_equal(np.bitwise_and(zeros, zeros), zeros, err_msg=msg) + assert_equal(np.bitwise_and(zeros, ones), zeros, err_msg=msg) + assert_equal(np.bitwise_and(ones, zeros), zeros, err_msg=msg) + assert_equal(np.bitwise_and(ones, ones), ones, err_msg=msg) + + def test_types(self): + for dt in self.bitwise_types: + zeros = np.array([0], dtype=dt) + ones = np.array([-1]).astype(dt) + msg = f"dt = '{dt.char}'" + + assert_(np.bitwise_not(zeros).dtype == dt, msg) + assert_(np.bitwise_or(zeros, zeros).dtype == dt, msg) + assert_(np.bitwise_xor(zeros, zeros).dtype == dt, msg) + assert_(np.bitwise_and(zeros, zeros).dtype == dt, msg) + + def test_identity(self): + assert_(np.bitwise_or.identity == 0, 'bitwise_or') + assert_(np.bitwise_xor.identity == 0, 'bitwise_xor') + assert_(np.bitwise_and.identity == -1, 'bitwise_and') + + def test_reduction(self): + binary_funcs = (np.bitwise_or, np.bitwise_xor, np.bitwise_and) + + for dt in self.bitwise_types: + zeros = np.array([0], dtype=dt) + ones = np.array([-1]).astype(dt) + for f in binary_funcs: + msg = f"dt: '{dt}', f: '{f}'" + assert_equal(f.reduce(zeros), zeros, err_msg=msg) + assert_equal(f.reduce(ones), ones, err_msg=msg) + + # Test empty reduction, no object dtype + for dt in self.bitwise_types[:-1]: + # No object array types + empty = np.array([], dtype=dt) + for f in binary_funcs: + msg = f"dt: '{dt}', f: '{f}'" + tgt = np.array(f.identity).astype(dt) + res = f.reduce(empty) + assert_equal(res, tgt, err_msg=msg) + assert_(res.dtype == tgt.dtype, msg) + + # Empty object arrays use the identity. Note that the types may + # differ, the actual type used is determined by the assign_identity + # function and is not the same as the type returned by the identity + # method. + for f in binary_funcs: + msg = f"dt: '{f}'" + empty = np.array([], dtype=object) + tgt = f.identity + res = f.reduce(empty) + assert_equal(res, tgt, err_msg=msg) + + # Non-empty object arrays do not use the identity + for f in binary_funcs: + msg = f"dt: '{f}'" + btype = np.array([True], dtype=object) + assert_(type(f.reduce(btype)) is bool, msg) + + @pytest.mark.parametrize("input_dtype_obj, bitsize", + zip(bitwise_types, bitwise_bits)) + def test_bitwise_count(self, input_dtype_obj, bitsize): + input_dtype = input_dtype_obj.type + + for i in range(1, bitsize): + num = 2**i - 1 + msg = f"bitwise_count for {num}" + assert i == np.bitwise_count(input_dtype(num)), msg + if np.issubdtype( + input_dtype, np.signedinteger) or input_dtype == np.object_: + assert i == np.bitwise_count(input_dtype(-num)), msg + + a = np.array([2**i - 1 for i in range(1, bitsize)], dtype=input_dtype) + bitwise_count_a = np.bitwise_count(a) + expected = np.arange(1, bitsize, dtype=input_dtype) + + msg = f"array bitwise_count for {input_dtype}" + assert all(bitwise_count_a == expected), msg + + +class TestInt: + def test_logical_not(self): + x = np.ones(10, dtype=np.int16) + o = np.ones(10 * 2, dtype=bool) + tgt = o.copy() + tgt[::2] = False + os = o[::2] + assert_array_equal(np.logical_not(x, out=os), False) + assert_array_equal(o, tgt) + + +class TestFloatingPoint: + def test_floating_point(self): + assert_equal(ncu.FLOATING_POINT_SUPPORT, 1) + + +class TestDegrees: + def test_degrees(self): + assert_almost_equal(ncu.degrees(np.pi), 180.0) + assert_almost_equal(ncu.degrees(-0.5 * np.pi), -90.0) + + +class TestRadians: + def test_radians(self): + assert_almost_equal(ncu.radians(180.0), np.pi) + assert_almost_equal(ncu.radians(-90.0), -0.5 * np.pi) + + +class TestHeavside: + def test_heaviside(self): + x = np.array([[-30.0, -0.1, 0.0, 0.2], [7.5, np.nan, np.inf, -np.inf]]) + expectedhalf = np.array([[0.0, 0.0, 0.5, 1.0], [1.0, np.nan, 1.0, 0.0]]) + expected1 = expectedhalf.copy() + expected1[0, 2] = 1 + + h = ncu.heaviside(x, 0.5) + assert_equal(h, expectedhalf) + + h = ncu.heaviside(x, 1.0) + assert_equal(h, expected1) + + x = x.astype(np.float32) + + h = ncu.heaviside(x, np.float32(0.5)) + assert_equal(h, expectedhalf.astype(np.float32)) + + h = ncu.heaviside(x, np.float32(1.0)) + assert_equal(h, expected1.astype(np.float32)) + + +class TestSign: + def test_sign(self): + a = np.array([np.inf, -np.inf, np.nan, 0.0, 3.0, -3.0]) + out = np.zeros(a.shape) + tgt = np.array([1., -1., np.nan, 0.0, 1.0, -1.0]) + + with np.errstate(invalid='ignore'): + res = ncu.sign(a) + assert_equal(res, tgt) + res = ncu.sign(a, out) + assert_equal(res, tgt) + assert_equal(out, tgt) + + def test_sign_complex(self): + a = np.array([ + np.inf, -np.inf, complex(0, np.inf), complex(0, -np.inf), + complex(np.inf, np.inf), complex(np.inf, -np.inf), # nan + np.nan, complex(0, np.nan), complex(np.nan, np.nan), # nan + 0.0, # 0. + 3.0, -3.0, -2j, 3.0 + 4.0j, -8.0 + 6.0j + ]) + out = np.zeros(a.shape, a.dtype) + tgt = np.array([ + 1., -1., 1j, -1j, + ] + [complex(np.nan, np.nan)] * 5 + [ + 0.0, + 1.0, -1.0, -1j, 0.6 + 0.8j, -0.8 + 0.6j]) + + with np.errstate(invalid='ignore'): + res = ncu.sign(a) + assert_equal(res, tgt) + res = ncu.sign(a, out) + assert_(res is out) + assert_equal(res, tgt) + + def test_sign_dtype_object(self): + # In reference to github issue #6229 + + foo = np.array([-.1, 0, .1]) + a = np.sign(foo.astype(object)) + b = np.sign(foo) + + assert_array_equal(a, b) + + def test_sign_dtype_nan_object(self): + # In reference to github issue #6229 + def test_nan(): + foo = np.array([np.nan]) + # FIXME: a not used + a = np.sign(foo.astype(object)) + + assert_raises(TypeError, test_nan) + +class TestMinMax: + def test_minmax_blocked(self): + # simd tests on max/min, test all alignments, slow but important + # for 2 * vz + 2 * (vs - 1) + 1 (unrolled once) + for dt, sz in [(np.float32, 15), (np.float64, 7)]: + for out, inp, msg in _gen_alignment_data(dtype=dt, type='unary', + max_size=sz): + for i in range(inp.size): + inp[:] = np.arange(inp.size, dtype=dt) + inp[i] = np.nan + emsg = lambda: f'{inp!r}\n{msg}' + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', + "invalid value encountered in reduce", + RuntimeWarning) + assert_(np.isnan(inp.max()), msg=emsg) + assert_(np.isnan(inp.min()), msg=emsg) + + inp[i] = 1e10 + assert_equal(inp.max(), 1e10, err_msg=msg) + inp[i] = -1e10 + assert_equal(inp.min(), -1e10, err_msg=msg) + + def test_lower_align(self): + # check data that is not aligned to element size + # i.e doubles are aligned to 4 bytes on i386 + d = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64) + assert_equal(d.max(), d[0]) + assert_equal(d.min(), d[0]) + + def test_reduce_reorder(self): + # gh 10370, 11029 Some compilers reorder the call to npy_getfloatstatus + # and put it before the call to an intrinsic function that causes + # invalid status to be set. Also make sure warnings are not emitted + for n in (2, 4, 8, 16, 32): + for dt in (np.float32, np.float16, np.complex64): + for r in np.diagflat(np.array([np.nan] * n, dtype=dt)): + assert_equal(np.min(r), np.nan) + + def test_minimize_no_warns(self): + a = np.minimum(np.nan, 1) + assert_equal(a, np.nan) + + +class TestAbsoluteNegative: + def test_abs_neg_blocked(self): + # simd tests on abs, test all alignments for vz + 2 * (vs - 1) + 1 + for dt, sz in [(np.float32, 11), (np.float64, 5)]: + for out, inp, msg in _gen_alignment_data(dtype=dt, type='unary', + max_size=sz): + tgt = [ncu.absolute(i) for i in inp] + np.absolute(inp, out=out) + assert_equal(out, tgt, err_msg=msg) + assert_((out >= 0).all()) + + tgt = [-1 * (i) for i in inp] + np.negative(inp, out=out) + assert_equal(out, tgt, err_msg=msg) + + for v in [np.nan, -np.inf, np.inf]: + for i in range(inp.size): + d = np.arange(inp.size, dtype=dt) + inp[:] = -d + inp[i] = v + d[i] = -v if v == -np.inf else v + assert_array_equal(np.abs(inp), d, err_msg=msg) + np.abs(inp, out=out) + assert_array_equal(out, d, err_msg=msg) + + assert_array_equal(-inp, -1 * inp, err_msg=msg) + d = -1 * inp + np.negative(inp, out=out) + assert_array_equal(out, d, err_msg=msg) + + def test_lower_align(self): + # check data that is not aligned to element size + # i.e doubles are aligned to 4 bytes on i386 + d = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64) + assert_equal(np.abs(d), d) + assert_equal(np.negative(d), -d) + np.negative(d, out=d) + np.negative(np.ones_like(d), out=d) + np.abs(d, out=d) + np.abs(np.ones_like(d), out=d) + + @pytest.mark.parametrize("dtype", ['d', 'f', 'int32', 'int64']) + @pytest.mark.parametrize("big", [True, False]) + def test_noncontiguous(self, dtype, big): + data = np.array([-1.0, 1.0, -0.0, 0.0, 2.2251e-308, -2.5, 2.5, -6, + 6, -2.2251e-308, -8, 10], dtype=dtype) + expect = np.array([1.0, -1.0, 0.0, -0.0, -2.2251e-308, 2.5, -2.5, 6, + -6, 2.2251e-308, 8, -10], dtype=dtype) + if big: + data = np.repeat(data, 10) + expect = np.repeat(expect, 10) + out = np.ndarray(data.shape, dtype=dtype) + ncontig_in = data[1::2] + ncontig_out = out[1::2] + contig_in = np.array(ncontig_in) + # contig in, contig out + assert_array_equal(np.negative(contig_in), expect[1::2]) + # contig in, ncontig out + assert_array_equal(np.negative(contig_in, out=ncontig_out), + expect[1::2]) + # ncontig in, contig out + assert_array_equal(np.negative(ncontig_in), expect[1::2]) + # ncontig in, ncontig out + assert_array_equal(np.negative(ncontig_in, out=ncontig_out), + expect[1::2]) + # contig in, contig out, nd stride + data_split = np.array(np.array_split(data, 2)) + expect_split = np.array(np.array_split(expect, 2)) + assert_equal(np.negative(data_split), expect_split) + + +class TestPositive: + def test_valid(self): + valid_dtypes = [int, float, complex, object] + for dtype in valid_dtypes: + x = np.arange(5, dtype=dtype) + result = np.positive(x) + assert_equal(x, result, err_msg=str(dtype)) + + def test_invalid(self): + with assert_raises(TypeError): + np.positive(True) + with assert_raises(TypeError): + np.positive(np.datetime64('2000-01-01')) + with assert_raises(TypeError): + np.positive(np.array(['foo'], dtype=str)) + with assert_raises(TypeError): + np.positive(np.array(['bar'], dtype=object)) + + +class TestSpecialMethods: + def test_wrap(self): + + class with_wrap: + def __array__(self, dtype=None, copy=None): + return np.zeros(1) + + def __array_wrap__(self, arr, context, return_scalar): + r = with_wrap() + r.arr = arr + r.context = context + return r + + a = with_wrap() + x = ncu.minimum(a, a) + assert_equal(x.arr, np.zeros(1)) + func, args, i = x.context + assert_(func is ncu.minimum) + assert_equal(len(args), 2) + assert_equal(args[0], a) + assert_equal(args[1], a) + assert_equal(i, 0) + + def test_wrap_out(self): + # Calling convention for out should not affect how special methods are + # called + + class StoreArrayPrepareWrap(np.ndarray): + _wrap_args = None + _prepare_args = None + + def __new__(cls): + return np.zeros(()).view(cls) + + def __array_wrap__(self, obj, context, return_scalar): + self._wrap_args = context[1] + return obj + + @property + def args(self): + # We need to ensure these are fetched at the same time, before + # any other ufuncs are called by the assertions + return self._wrap_args + + def __repr__(self): + return "a" # for short test output + + def do_test(f_call, f_expected): + a = StoreArrayPrepareWrap() + + f_call(a) + + w = a.args + expected = f_expected(a) + try: + assert w == expected + except AssertionError as e: + # assert_equal produces truly useless error messages + raise AssertionError("\n".join([ + "Bad arguments passed in ufunc call", + f" expected: {expected}", + f" __array_wrap__ got: {w}" + ])) + + # method not on the out argument + do_test(lambda a: np.add(a, 0), lambda a: (a, 0)) + do_test(lambda a: np.add(a, 0, None), lambda a: (a, 0)) + do_test(lambda a: np.add(a, 0, out=None), lambda a: (a, 0)) + do_test(lambda a: np.add(a, 0, out=(None,)), lambda a: (a, 0)) + + # method on the out argument + do_test(lambda a: np.add(0, 0, a), lambda a: (0, 0, a)) + do_test(lambda a: np.add(0, 0, out=a), lambda a: (0, 0, a)) + do_test(lambda a: np.add(0, 0, out=(a,)), lambda a: (0, 0, a)) + + # Also check the where mask handling: + out = np.zeros([1], dtype=float) + do_test(lambda a: np.add(a, 0, where=False, out=None), lambda a: (a, 0)) + do_test(lambda a: np.add(0, 0, a, where=False), lambda a: (0, 0, a)) + + def test_wrap_with_iterable(self): + # test fix for bug #1026: + + class with_wrap(np.ndarray): + __array_priority__ = 10 + + def __new__(cls): + return np.asarray(1).view(cls).copy() + + def __array_wrap__(self, arr, context, return_scalar): + return arr.view(type(self)) + + a = with_wrap() + x = ncu.multiply(a, (1, 2, 3)) + assert_(isinstance(x, with_wrap)) + assert_array_equal(x, np.array((1, 2, 3))) + + def test_priority_with_scalar(self): + # test fix for bug #826: + + class A(np.ndarray): + __array_priority__ = 10 + + def __new__(cls): + return np.asarray(1.0, 'float64').view(cls).copy() + + a = A() + x = np.float64(1) * a + assert_(isinstance(x, A)) + assert_array_equal(x, np.array(1)) + + def test_priority(self): + + class A: + def __array__(self, dtype=None, copy=None): + return np.zeros(1) + + def __array_wrap__(self, arr, context, return_scalar): + r = type(self)() + r.arr = arr + r.context = context + return r + + class B(A): + __array_priority__ = 20. + + class C(A): + __array_priority__ = 40. + + x = np.zeros(1) + a = A() + b = B() + c = C() + f = ncu.minimum + assert_(type(f(x, x)) is np.ndarray) + assert_(type(f(x, a)) is A) + assert_(type(f(x, b)) is B) + assert_(type(f(x, c)) is C) + assert_(type(f(a, x)) is A) + assert_(type(f(b, x)) is B) + assert_(type(f(c, x)) is C) + + assert_(type(f(a, a)) is A) + assert_(type(f(a, b)) is B) + assert_(type(f(b, a)) is B) + assert_(type(f(b, b)) is B) + assert_(type(f(b, c)) is C) + assert_(type(f(c, b)) is C) + assert_(type(f(c, c)) is C) + + assert_(type(ncu.exp(a) is A)) + assert_(type(ncu.exp(b) is B)) + assert_(type(ncu.exp(c) is C)) + + def test_failing_wrap(self): + + class A: + def __array__(self, dtype=None, copy=None): + return np.zeros(2) + + def __array_wrap__(self, arr, context, return_scalar): + raise RuntimeError + + a = A() + assert_raises(RuntimeError, ncu.maximum, a, a) + assert_raises(RuntimeError, ncu.maximum.reduce, a) + + def test_failing_out_wrap(self): + + singleton = np.array([1.0]) + + class Ok(np.ndarray): + def __array_wrap__(self, obj, context, return_scalar): + return singleton + + class Bad(np.ndarray): + def __array_wrap__(self, obj, context, return_scalar): + raise RuntimeError + + ok = np.empty(1).view(Ok) + bad = np.empty(1).view(Bad) + # double-free (segfault) of "ok" if "bad" raises an exception + for i in range(10): + assert_raises(RuntimeError, ncu.frexp, 1, ok, bad) + + def test_none_wrap(self): + # Tests that issue #8507 is resolved. Previously, this would segfault + + class A: + def __array__(self, dtype=None, copy=None): + return np.zeros(1) + + def __array_wrap__(self, arr, context=None, return_scalar=False): + return None + + a = A() + assert_equal(ncu.maximum(a, a), None) + + def test_default_prepare(self): + + class with_wrap: + __array_priority__ = 10 + + def __array__(self, dtype=None, copy=None): + return np.zeros(1) + + def __array_wrap__(self, arr, context, return_scalar): + return arr + + a = with_wrap() + x = ncu.minimum(a, a) + assert_equal(x, np.zeros(1)) + assert_equal(type(x), np.ndarray) + + def test_array_too_many_args(self): + + class A: + def __array__(self, dtype, context, copy=None): + return np.zeros(1) + + a = A() + assert_raises_regex(TypeError, '2 required positional', np.sum, a) + + def test_ufunc_override(self): + # check override works even with instance with high priority. + class A: + def __array_ufunc__(self, func, method, *inputs, **kwargs): + return self, func, method, inputs, kwargs + + class MyNDArray(np.ndarray): + __array_priority__ = 100 + + a = A() + b = np.array([1]).view(MyNDArray) + res0 = np.multiply(a, b) + res1 = np.multiply(b, b, out=a) + + # self + assert_equal(res0[0], a) + assert_equal(res1[0], a) + assert_equal(res0[1], np.multiply) + assert_equal(res1[1], np.multiply) + assert_equal(res0[2], '__call__') + assert_equal(res1[2], '__call__') + assert_equal(res0[3], (a, b)) + assert_equal(res1[3], (b, b)) + assert_equal(res0[4], {}) + assert_equal(res1[4], {'out': (a,)}) + + def test_ufunc_override_mro(self): + + # Some multi arg functions for testing. + def tres_mul(a, b, c): + return a * b * c + + def quatro_mul(a, b, c, d): + return a * b * c * d + + # Make these into ufuncs. + three_mul_ufunc = np.frompyfunc(tres_mul, 3, 1) + four_mul_ufunc = np.frompyfunc(quatro_mul, 4, 1) + + class A: + def __array_ufunc__(self, func, method, *inputs, **kwargs): + return "A" + + class ASub(A): + def __array_ufunc__(self, func, method, *inputs, **kwargs): + return "ASub" + + class B: + def __array_ufunc__(self, func, method, *inputs, **kwargs): + return "B" + + class C: + def __init__(self): + self.count = 0 + + def __array_ufunc__(self, func, method, *inputs, **kwargs): + self.count += 1 + return NotImplemented + + class CSub(C): + def __array_ufunc__(self, func, method, *inputs, **kwargs): + self.count += 1 + return NotImplemented + + a = A() + a_sub = ASub() + b = B() + c = C() + + # Standard + res = np.multiply(a, a_sub) + assert_equal(res, "ASub") + res = np.multiply(a_sub, b) + assert_equal(res, "ASub") + + # With 1 NotImplemented + res = np.multiply(c, a) + assert_equal(res, "A") + assert_equal(c.count, 1) + # Check our counter works, so we can trust tests below. + res = np.multiply(c, a) + assert_equal(c.count, 2) + + # Both NotImplemented. + c = C() + c_sub = CSub() + assert_raises(TypeError, np.multiply, c, c_sub) + assert_equal(c.count, 1) + assert_equal(c_sub.count, 1) + c.count = c_sub.count = 0 + assert_raises(TypeError, np.multiply, c_sub, c) + assert_equal(c.count, 1) + assert_equal(c_sub.count, 1) + c.count = 0 + assert_raises(TypeError, np.multiply, c, c) + assert_equal(c.count, 1) + c.count = 0 + assert_raises(TypeError, np.multiply, 2, c) + assert_equal(c.count, 1) + + # Ternary testing. + assert_equal(three_mul_ufunc(a, 1, 2), "A") + assert_equal(three_mul_ufunc(1, a, 2), "A") + assert_equal(three_mul_ufunc(1, 2, a), "A") + + assert_equal(three_mul_ufunc(a, a, 6), "A") + assert_equal(three_mul_ufunc(a, 2, a), "A") + assert_equal(three_mul_ufunc(a, 2, b), "A") + assert_equal(three_mul_ufunc(a, 2, a_sub), "ASub") + assert_equal(three_mul_ufunc(a, a_sub, 3), "ASub") + c.count = 0 + assert_equal(three_mul_ufunc(c, a_sub, 3), "ASub") + assert_equal(c.count, 1) + c.count = 0 + assert_equal(three_mul_ufunc(1, a_sub, c), "ASub") + assert_equal(c.count, 0) + + c.count = 0 + assert_equal(three_mul_ufunc(a, b, c), "A") + assert_equal(c.count, 0) + c_sub.count = 0 + assert_equal(three_mul_ufunc(a, b, c_sub), "A") + assert_equal(c_sub.count, 0) + assert_equal(three_mul_ufunc(1, 2, b), "B") + + assert_raises(TypeError, three_mul_ufunc, 1, 2, c) + assert_raises(TypeError, three_mul_ufunc, c_sub, 2, c) + assert_raises(TypeError, three_mul_ufunc, c_sub, 2, 3) + + # Quaternary testing. + assert_equal(four_mul_ufunc(a, 1, 2, 3), "A") + assert_equal(four_mul_ufunc(1, a, 2, 3), "A") + assert_equal(four_mul_ufunc(1, 1, a, 3), "A") + assert_equal(four_mul_ufunc(1, 1, 2, a), "A") + + assert_equal(four_mul_ufunc(a, b, 2, 3), "A") + assert_equal(four_mul_ufunc(1, a, 2, b), "A") + assert_equal(four_mul_ufunc(b, 1, a, 3), "B") + assert_equal(four_mul_ufunc(a_sub, 1, 2, a), "ASub") + assert_equal(four_mul_ufunc(a, 1, 2, a_sub), "ASub") + + c = C() + c_sub = CSub() + assert_raises(TypeError, four_mul_ufunc, 1, 2, 3, c) + assert_equal(c.count, 1) + c.count = 0 + assert_raises(TypeError, four_mul_ufunc, 1, 2, c_sub, c) + assert_equal(c_sub.count, 1) + assert_equal(c.count, 1) + c2 = C() + c.count = c_sub.count = 0 + assert_raises(TypeError, four_mul_ufunc, 1, c, c_sub, c2) + assert_equal(c_sub.count, 1) + assert_equal(c.count, 1) + assert_equal(c2.count, 0) + c.count = c2.count = c_sub.count = 0 + assert_raises(TypeError, four_mul_ufunc, c2, c, c_sub, c) + assert_equal(c_sub.count, 1) + assert_equal(c.count, 0) + assert_equal(c2.count, 1) + + def test_ufunc_override_methods(self): + + class A: + def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): + return self, ufunc, method, inputs, kwargs + + # __call__ + a = A() + with assert_raises(TypeError): + np.multiply.__call__(1, a, foo='bar', answer=42) + res = np.multiply.__call__(1, a, subok='bar', where=42) + assert_equal(res[0], a) + assert_equal(res[1], np.multiply) + assert_equal(res[2], '__call__') + assert_equal(res[3], (1, a)) + assert_equal(res[4], {'subok': 'bar', 'where': 42}) + + # __call__, wrong args + assert_raises(TypeError, np.multiply, a) + assert_raises(TypeError, np.multiply, a, a, a, a) + assert_raises(TypeError, np.multiply, a, a, sig='a', signature='a') + assert_raises(TypeError, ncu_tests.inner1d, a, a, axis=0, axes=[0, 0]) + + # reduce, positional args + res = np.multiply.reduce(a, 'axis0', 'dtype0', 'out0', 'keep0') + assert_equal(res[0], a) + assert_equal(res[1], np.multiply) + assert_equal(res[2], 'reduce') + assert_equal(res[3], (a,)) + assert_equal(res[4], {'dtype': 'dtype0', + 'out': ('out0',), + 'keepdims': 'keep0', + 'axis': 'axis0'}) + + # reduce, kwargs + res = np.multiply.reduce(a, axis='axis0', dtype='dtype0', out='out0', + keepdims='keep0', initial='init0', + where='where0') + assert_equal(res[0], a) + assert_equal(res[1], np.multiply) + assert_equal(res[2], 'reduce') + assert_equal(res[3], (a,)) + assert_equal(res[4], {'dtype': 'dtype0', + 'out': ('out0',), + 'keepdims': 'keep0', + 'axis': 'axis0', + 'initial': 'init0', + 'where': 'where0'}) + # reduce, kwargs, out=None is removed + res = np.multiply.reduce(a, axis='axis0', dtype='dtype0', out=None, + keepdims='keep0', initial='init0', + where='where0') + assert_equal(res[0], a) + assert_equal(res[1], np.multiply) + assert_equal(res[2], 'reduce') + assert_equal(res[3], (a,)) + assert_equal(res[4], {'dtype': 'dtype0', + 'keepdims': 'keep0', + 'axis': 'axis0', + 'initial': 'init0', + 'where': 'where0'}) + + # reduce, output equal to None removed, but not other explicit ones, + # even if they are at their default value. + res = np.multiply.reduce(a, 0, None, None, False) + assert_equal(res[4], {'axis': 0, 'dtype': None, 'keepdims': False}) + res = np.multiply.reduce(a, out=None, axis=0, keepdims=True) + assert_equal(res[4], {'axis': 0, 'keepdims': True}) + res = np.multiply.reduce(a, None, out=(None,), dtype=None) + assert_equal(res[4], {'axis': None, 'dtype': None}) + res = np.multiply.reduce(a, 0, None, None, False, 2, True) + assert_equal(res[4], {'axis': 0, 'dtype': None, 'keepdims': False, + 'initial': 2, 'where': True}) + # np._NoValue ignored for initial + res = np.multiply.reduce(a, 0, None, None, False, + np._NoValue, True) + assert_equal(res[4], {'axis': 0, 'dtype': None, 'keepdims': False, + 'where': True}) + # None kept for initial, True for where. + res = np.multiply.reduce(a, 0, None, None, False, None, True) + assert_equal(res[4], {'axis': 0, 'dtype': None, 'keepdims': False, + 'initial': None, 'where': True}) + + # reduce, wrong args + assert_raises(ValueError, np.multiply.reduce, a, out=()) + assert_raises(ValueError, np.multiply.reduce, a, out=('out0', 'out1')) + assert_raises(TypeError, np.multiply.reduce, a, 'axis0', axis='axis0') + + # accumulate, pos args + res = np.multiply.accumulate(a, 'axis0', 'dtype0', 'out0') + assert_equal(res[0], a) + assert_equal(res[1], np.multiply) + assert_equal(res[2], 'accumulate') + assert_equal(res[3], (a,)) + assert_equal(res[4], {'dtype': 'dtype0', + 'out': ('out0',), + 'axis': 'axis0'}) + + # accumulate, kwargs + res = np.multiply.accumulate(a, axis='axis0', dtype='dtype0', + out='out0') + assert_equal(res[0], a) + assert_equal(res[1], np.multiply) + assert_equal(res[2], 'accumulate') + assert_equal(res[3], (a,)) + assert_equal(res[4], {'dtype': 'dtype0', + 'out': ('out0',), + 'axis': 'axis0'}) + + # accumulate, output equal to None removed. + res = np.multiply.accumulate(a, 0, None, None) + assert_equal(res[4], {'axis': 0, 'dtype': None}) + res = np.multiply.accumulate(a, out=None, axis=0, dtype='dtype1') + assert_equal(res[4], {'axis': 0, 'dtype': 'dtype1'}) + res = np.multiply.accumulate(a, None, out=(None,), dtype=None) + assert_equal(res[4], {'axis': None, 'dtype': None}) + + # accumulate, wrong args + assert_raises(ValueError, np.multiply.accumulate, a, out=()) + assert_raises(ValueError, np.multiply.accumulate, a, + out=('out0', 'out1')) + assert_raises(TypeError, np.multiply.accumulate, a, + 'axis0', axis='axis0') + + # reduceat, pos args + res = np.multiply.reduceat(a, [4, 2], 'axis0', 'dtype0', 'out0') + assert_equal(res[0], a) + assert_equal(res[1], np.multiply) + assert_equal(res[2], 'reduceat') + assert_equal(res[3], (a, [4, 2])) + assert_equal(res[4], {'dtype': 'dtype0', + 'out': ('out0',), + 'axis': 'axis0'}) + + # reduceat, kwargs + res = np.multiply.reduceat(a, [4, 2], axis='axis0', dtype='dtype0', + out='out0') + assert_equal(res[0], a) + assert_equal(res[1], np.multiply) + assert_equal(res[2], 'reduceat') + assert_equal(res[3], (a, [4, 2])) + assert_equal(res[4], {'dtype': 'dtype0', + 'out': ('out0',), + 'axis': 'axis0'}) + + # reduceat, output equal to None removed. + res = np.multiply.reduceat(a, [4, 2], 0, None, None) + assert_equal(res[4], {'axis': 0, 'dtype': None}) + res = np.multiply.reduceat(a, [4, 2], axis=None, out=None, dtype='dt') + assert_equal(res[4], {'axis': None, 'dtype': 'dt'}) + res = np.multiply.reduceat(a, [4, 2], None, None, out=(None,)) + assert_equal(res[4], {'axis': None, 'dtype': None}) + + # reduceat, wrong args + assert_raises(ValueError, np.multiply.reduce, a, [4, 2], out=()) + assert_raises(ValueError, np.multiply.reduce, a, [4, 2], + out=('out0', 'out1')) + assert_raises(TypeError, np.multiply.reduce, a, [4, 2], + 'axis0', axis='axis0') + + # outer + res = np.multiply.outer(a, 42) + assert_equal(res[0], a) + assert_equal(res[1], np.multiply) + assert_equal(res[2], 'outer') + assert_equal(res[3], (a, 42)) + assert_equal(res[4], {}) + + # outer, wrong args + assert_raises(TypeError, np.multiply.outer, a) + assert_raises(TypeError, np.multiply.outer, a, a, a, a) + assert_raises(TypeError, np.multiply.outer, a, a, sig='a', signature='a') + + # at + res = np.multiply.at(a, [4, 2], 'b0') + assert_equal(res[0], a) + assert_equal(res[1], np.multiply) + assert_equal(res[2], 'at') + assert_equal(res[3], (a, [4, 2], 'b0')) + + # at, wrong args + assert_raises(TypeError, np.multiply.at, a) + assert_raises(TypeError, np.multiply.at, a, a, a, a) + + def test_ufunc_override_out(self): + + class A: + def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): + return kwargs + + class B: + def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): + return kwargs + + a = A() + b = B() + res0 = np.multiply(a, b, 'out_arg') + res1 = np.multiply(a, b, out='out_arg') + res2 = np.multiply(2, b, 'out_arg') + res3 = np.multiply(3, b, out='out_arg') + res4 = np.multiply(a, 4, 'out_arg') + res5 = np.multiply(a, 5, out='out_arg') + + assert_equal(res0['out'][0], 'out_arg') + assert_equal(res1['out'][0], 'out_arg') + assert_equal(res2['out'][0], 'out_arg') + assert_equal(res3['out'][0], 'out_arg') + assert_equal(res4['out'][0], 'out_arg') + assert_equal(res5['out'][0], 'out_arg') + + # ufuncs with multiple output modf and frexp. + res6 = np.modf(a, 'out0', 'out1') + res7 = np.frexp(a, 'out0', 'out1') + assert_equal(res6['out'][0], 'out0') + assert_equal(res6['out'][1], 'out1') + assert_equal(res7['out'][0], 'out0') + assert_equal(res7['out'][1], 'out1') + + # While we're at it, check that default output is never passed on. + assert_(np.sin(a, None) == {}) + assert_(np.sin(a, out=None) == {}) + assert_(np.sin(a, out=(None,)) == {}) + assert_(np.modf(a, None) == {}) + assert_(np.modf(a, None, None) == {}) + assert_(np.modf(a, out=(None, None)) == {}) + with assert_raises(TypeError): + # Out argument must be tuple, since there are multiple outputs. + np.modf(a, out=None) + + # don't give positional and output argument, or too many arguments. + # wrong number of arguments in the tuple is an error too. + assert_raises(TypeError, np.multiply, a, b, 'one', out='two') + assert_raises(TypeError, np.multiply, a, b, 'one', 'two') + assert_raises(ValueError, np.multiply, a, b, out=('one', 'two')) + assert_raises(TypeError, np.multiply, a, out=()) + assert_raises(TypeError, np.modf, a, 'one', out=('two', 'three')) + assert_raises(TypeError, np.modf, a, 'one', 'two', 'three') + assert_raises(ValueError, np.modf, a, out=('one', 'two', 'three')) + assert_raises(ValueError, np.modf, a, out=('one',)) + + def test_ufunc_override_where(self): + + class OverriddenArrayOld(np.ndarray): + + def _unwrap(self, objs): + cls = type(self) + result = [] + for obj in objs: + if isinstance(obj, cls): + obj = np.array(obj) + elif type(obj) != np.ndarray: + return NotImplemented + result.append(obj) + return result + + def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): + + inputs = self._unwrap(inputs) + if inputs is NotImplemented: + return NotImplemented + + kwargs = kwargs.copy() + if "out" in kwargs: + kwargs["out"] = self._unwrap(kwargs["out"])[0] + if kwargs["out"] is NotImplemented: + return NotImplemented + + r = super().__array_ufunc__(ufunc, method, *inputs, **kwargs) + if r is not NotImplemented: + r = r.view(type(self)) + + return r + + class OverriddenArrayNew(OverriddenArrayOld): + def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): + + kwargs = kwargs.copy() + if "where" in kwargs: + kwargs["where"] = self._unwrap((kwargs["where"], )) + if kwargs["where"] is NotImplemented: + return NotImplemented + else: + kwargs["where"] = kwargs["where"][0] + + r = super().__array_ufunc__(ufunc, method, *inputs, **kwargs) + if r is not NotImplemented: + r = r.view(type(self)) + + return r + + ufunc = np.negative + + array = np.array([1, 2, 3]) + where = np.array([True, False, True]) + out = np.zeros(3, dtype=array.dtype) + expected = ufunc(array, where=where, out=out) + + with pytest.raises(TypeError): + ufunc( + array, + where=where.view(OverriddenArrayOld), + out=out, + ) + + result_1 = ufunc( + array, + where=where.view(OverriddenArrayNew), + out=out, + ) + assert isinstance(result_1, OverriddenArrayNew) + assert np.all(np.array(result_1) == expected, where=where) + + result_2 = ufunc( + array.view(OverriddenArrayNew), + where=where.view(OverriddenArrayNew), + out=out.view(OverriddenArrayNew), + ) + assert isinstance(result_2, OverriddenArrayNew) + assert np.all(np.array(result_2) == expected, where=where) + + def test_ufunc_override_exception(self): + + class A: + def __array_ufunc__(self, *a, **kwargs): + raise ValueError("oops") + + a = A() + assert_raises(ValueError, np.negative, 1, out=a) + assert_raises(ValueError, np.negative, a) + assert_raises(ValueError, np.divide, 1., a) + + def test_ufunc_override_not_implemented(self): + + class A: + def __array_ufunc__(self, *args, **kwargs): + return NotImplemented + + msg = ("operand type(s) all returned NotImplemented from " + "__array_ufunc__(, '__call__', <*>): 'A'") + with assert_raises_regex(TypeError, fnmatch.translate(msg)): + np.negative(A()) + + msg = ("operand type(s) all returned NotImplemented from " + "__array_ufunc__(, '__call__', <*>, , " + "out=(1,)): 'A', 'object', 'int'") + with assert_raises_regex(TypeError, fnmatch.translate(msg)): + np.add(A(), object(), out=1) + + def test_ufunc_override_disabled(self): + + class OptOut: + __array_ufunc__ = None + + opt_out = OptOut() + + # ufuncs always raise + msg = "operand 'OptOut' does not support ufuncs" + with assert_raises_regex(TypeError, msg): + np.add(opt_out, 1) + with assert_raises_regex(TypeError, msg): + np.add(1, opt_out) + with assert_raises_regex(TypeError, msg): + np.negative(opt_out) + + # opt-outs still hold even when other arguments have pathological + # __array_ufunc__ implementations + + class GreedyArray: + def __array_ufunc__(self, *args, **kwargs): + return self + + greedy = GreedyArray() + assert_(np.negative(greedy) is greedy) + with assert_raises_regex(TypeError, msg): + np.add(greedy, opt_out) + with assert_raises_regex(TypeError, msg): + np.add(greedy, 1, out=opt_out) + + def test_gufunc_override(self): + # gufunc are just ufunc instances, but follow a different path, + # so check __array_ufunc__ overrides them properly. + class A: + def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): + return self, ufunc, method, inputs, kwargs + + inner1d = ncu_tests.inner1d + a = A() + res = inner1d(a, a) + assert_equal(res[0], a) + assert_equal(res[1], inner1d) + assert_equal(res[2], '__call__') + assert_equal(res[3], (a, a)) + assert_equal(res[4], {}) + + res = inner1d(1, 1, out=a) + assert_equal(res[0], a) + assert_equal(res[1], inner1d) + assert_equal(res[2], '__call__') + assert_equal(res[3], (1, 1)) + assert_equal(res[4], {'out': (a,)}) + + # wrong number of arguments in the tuple is an error too. + assert_raises(TypeError, inner1d, a, out='two') + assert_raises(TypeError, inner1d, a, a, 'one', out='two') + assert_raises(TypeError, inner1d, a, a, 'one', 'two') + assert_raises(ValueError, inner1d, a, a, out=('one', 'two')) + assert_raises(ValueError, inner1d, a, a, out=()) + + def test_ufunc_override_with_super(self): + # NOTE: this class is used in doc/source/user/basics.subclassing.rst + # if you make any changes here, do update it there too. + class A(np.ndarray): + def __array_ufunc__(self, ufunc, method, *inputs, out=None, **kwargs): + args = [] + in_no = [] + for i, input_ in enumerate(inputs): + if isinstance(input_, A): + in_no.append(i) + args.append(input_.view(np.ndarray)) + else: + args.append(input_) + + outputs = out + out_no = [] + if outputs: + out_args = [] + for j, output in enumerate(outputs): + if isinstance(output, A): + out_no.append(j) + out_args.append(output.view(np.ndarray)) + else: + out_args.append(output) + kwargs['out'] = tuple(out_args) + else: + outputs = (None,) * ufunc.nout + + info = {} + if in_no: + info['inputs'] = in_no + if out_no: + info['outputs'] = out_no + + results = super().__array_ufunc__(ufunc, method, + *args, **kwargs) + if results is NotImplemented: + return NotImplemented + + if method == 'at': + if isinstance(inputs[0], A): + inputs[0].info = info + return + + if ufunc.nout == 1: + results = (results,) + + results = tuple((np.asarray(result).view(A) + if output is None else output) + for result, output in zip(results, outputs)) + if results and isinstance(results[0], A): + results[0].info = info + + return results[0] if len(results) == 1 else results + + class B: + def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): + if any(isinstance(input_, A) for input_ in inputs): + return "A!" + else: + return NotImplemented + + d = np.arange(5.) + # 1 input, 1 output + a = np.arange(5.).view(A) + b = np.sin(a) + check = np.sin(d) + assert_(np.all(check == b)) + assert_equal(b.info, {'inputs': [0]}) + b = np.sin(d, out=(a,)) + assert_(np.all(check == b)) + assert_equal(b.info, {'outputs': [0]}) + assert_(b is a) + a = np.arange(5.).view(A) + b = np.sin(a, out=a) + assert_(np.all(check == b)) + assert_equal(b.info, {'inputs': [0], 'outputs': [0]}) + + # 1 input, 2 outputs + a = np.arange(5.).view(A) + b1, b2 = np.modf(a) + assert_equal(b1.info, {'inputs': [0]}) + b1, b2 = np.modf(d, out=(None, a)) + assert_(b2 is a) + assert_equal(b1.info, {'outputs': [1]}) + a = np.arange(5.).view(A) + b = np.arange(5.).view(A) + c1, c2 = np.modf(a, out=(a, b)) + assert_(c1 is a) + assert_(c2 is b) + assert_equal(c1.info, {'inputs': [0], 'outputs': [0, 1]}) + + # 2 input, 1 output + a = np.arange(5.).view(A) + b = np.arange(5.).view(A) + c = np.add(a, b, out=a) + assert_(c is a) + assert_equal(c.info, {'inputs': [0, 1], 'outputs': [0]}) + # some tests with a non-ndarray subclass + a = np.arange(5.) + b = B() + assert_(a.__array_ufunc__(np.add, '__call__', a, b) is NotImplemented) + assert_(b.__array_ufunc__(np.add, '__call__', a, b) is NotImplemented) + assert_raises(TypeError, np.add, a, b) + a = a.view(A) + assert_(a.__array_ufunc__(np.add, '__call__', a, b) is NotImplemented) + assert_(b.__array_ufunc__(np.add, '__call__', a, b) == "A!") + assert_(np.add(a, b) == "A!") + # regression check for gh-9102 -- tests ufunc.reduce implicitly. + d = np.array([[1, 2, 3], [1, 2, 3]]) + a = d.view(A) + c = a.any() + check = d.any() + assert_equal(c, check) + assert_(c.info, {'inputs': [0]}) + c = a.max() + check = d.max() + assert_equal(c, check) + assert_(c.info, {'inputs': [0]}) + b = np.array(0).view(A) + c = a.max(out=b) + assert_equal(c, check) + assert_(c is b) + assert_(c.info, {'inputs': [0], 'outputs': [0]}) + check = a.max(axis=0) + b = np.zeros_like(check).view(A) + c = a.max(axis=0, out=b) + assert_equal(c, check) + assert_(c is b) + assert_(c.info, {'inputs': [0], 'outputs': [0]}) + # simple explicit tests of reduce, accumulate, reduceat + check = np.add.reduce(d, axis=1) + c = np.add.reduce(a, axis=1) + assert_equal(c, check) + assert_(c.info, {'inputs': [0]}) + b = np.zeros_like(c) + c = np.add.reduce(a, 1, None, b) + assert_equal(c, check) + assert_(c is b) + assert_(c.info, {'inputs': [0], 'outputs': [0]}) + check = np.add.accumulate(d, axis=0) + c = np.add.accumulate(a, axis=0) + assert_equal(c, check) + assert_(c.info, {'inputs': [0]}) + b = np.zeros_like(c) + c = np.add.accumulate(a, 0, None, b) + assert_equal(c, check) + assert_(c is b) + assert_(c.info, {'inputs': [0], 'outputs': [0]}) + indices = [0, 2, 1] + check = np.add.reduceat(d, indices, axis=1) + c = np.add.reduceat(a, indices, axis=1) + assert_equal(c, check) + assert_(c.info, {'inputs': [0]}) + b = np.zeros_like(c) + c = np.add.reduceat(a, indices, 1, None, b) + assert_equal(c, check) + assert_(c is b) + assert_(c.info, {'inputs': [0], 'outputs': [0]}) + # and a few tests for at + d = np.array([[1, 2, 3], [1, 2, 3]]) + check = d.copy() + a = d.copy().view(A) + np.add.at(check, ([0, 1], [0, 2]), 1.) + np.add.at(a, ([0, 1], [0, 2]), 1.) + assert_equal(a, check) + assert_(a.info, {'inputs': [0]}) + b = np.array(1.).view(A) + a = d.copy().view(A) + np.add.at(a, ([0, 1], [0, 2]), b) + assert_equal(a, check) + assert_(a.info, {'inputs': [0, 2]}) + + def test_array_ufunc_direct_call(self): + # This is mainly a regression test for gh-24023 (shouldn't segfault) + a = np.array(1) + with pytest.raises(TypeError): + a.__array_ufunc__() + + # No kwargs means kwargs may be NULL on the C-level + with pytest.raises(TypeError): + a.__array_ufunc__(1, 2) + + # And the same with a valid call: + res = a.__array_ufunc__(np.add, "__call__", a, a) + assert_array_equal(res, a + a) + + @pytest.mark.thread_unsafe(reason="modifies global module") + @pytest.mark.skipif(IS_PYPY, reason="__signature__ descriptor dance fails") + def test_ufunc_docstring(self): + original_doc = np.add.__doc__ + new_doc = "new docs" + expected_dict = ( + {} if IS_PYPY else {"__module__": "numpy", "__qualname__": "add"} + ) + expected_dict["__signature__"] = inspect.signature(np.add) + + np.add.__doc__ = new_doc + assert np.add.__doc__ == new_doc + assert np.add.__dict__["__doc__"] == new_doc + + del np.add.__doc__ + assert np.add.__doc__ == original_doc + assert np.add.__dict__ == expected_dict + + np.add.__dict__["other"] = 1 + np.add.__dict__["__doc__"] = new_doc + assert np.add.__doc__ == new_doc + + del np.add.__dict__["__doc__"] + assert np.add.__doc__ == original_doc + del np.add.__dict__["other"] + assert np.add.__dict__ == expected_dict + + +class TestChoose: + def test_mixed(self): + c = np.array([True, True]) + a = np.array([True, True]) + assert_equal(np.choose(c, (a, 1)), np.array([1, 1])) + + +class TestRationalFunctions: + def test_lcm(self): + self._test_lcm_inner(np.int16) + self._test_lcm_inner(np.uint16) + + def test_lcm_object(self): + self._test_lcm_inner(np.object_) + + def test_gcd(self): + self._test_gcd_inner(np.int16) + self._test_lcm_inner(np.uint16) + + def test_gcd_object(self): + self._test_gcd_inner(np.object_) + + def _test_lcm_inner(self, dtype): + # basic use + a = np.array([12, 120], dtype=dtype) + b = np.array([20, 200], dtype=dtype) + assert_equal(np.lcm(a, b), [60, 600]) + + if not issubclass(dtype, np.unsignedinteger): + # negatives are ignored + a = np.array([12, -12, 12, -12], dtype=dtype) + b = np.array([20, 20, -20, -20], dtype=dtype) + assert_equal(np.lcm(a, b), [60] * 4) + + # reduce + a = np.array([3, 12, 20], dtype=dtype) + assert_equal(np.lcm.reduce([3, 12, 20]), 60) + + # broadcasting, and a test including 0 + a = np.arange(6).astype(dtype) + b = 20 + assert_equal(np.lcm(a, b), [0, 20, 20, 60, 20, 20]) + + def _test_gcd_inner(self, dtype): + # basic use + a = np.array([12, 120], dtype=dtype) + b = np.array([20, 200], dtype=dtype) + assert_equal(np.gcd(a, b), [4, 40]) + + if not issubclass(dtype, np.unsignedinteger): + # negatives are ignored + a = np.array([12, -12, 12, -12], dtype=dtype) + b = np.array([20, 20, -20, -20], dtype=dtype) + assert_equal(np.gcd(a, b), [4] * 4) + + # reduce + a = np.array([15, 25, 35], dtype=dtype) + assert_equal(np.gcd.reduce(a), 5) + + # broadcasting, and a test including 0 + a = np.arange(6).astype(dtype) + b = 20 + assert_equal(np.gcd(a, b), [20, 1, 2, 1, 4, 5]) + + def test_lcm_overflow(self): + # verify that we don't overflow when a*b does overflow + big = np.int32(np.iinfo(np.int32).max // 11) + a = 2 * big + b = 5 * big + assert_equal(np.lcm(a, b), 10 * big) + + def test_gcd_overflow(self): + for dtype in (np.int32, np.int64): + # verify that we don't overflow when taking abs(x) + # not relevant for lcm, where the result is unrepresentable anyway + a = dtype(np.iinfo(dtype).min) # negative power of two + q = -(a // 4) + assert_equal(np.gcd(a, q * 3), q) + assert_equal(np.gcd(a, -q * 3), q) + + def test_decimal(self): + from decimal import Decimal + a = np.array([1, 1, -1, -1]) * Decimal('0.20') + b = np.array([1, -1, 1, -1]) * Decimal('0.12') + + assert_equal(np.gcd(a, b), 4 * [Decimal('0.04')]) + assert_equal(np.lcm(a, b), 4 * [Decimal('0.60')]) + + def test_float(self): + # not well-defined on float due to rounding errors + assert_raises(TypeError, np.gcd, 0.3, 0.4) + assert_raises(TypeError, np.lcm, 0.3, 0.4) + + def test_huge_integers(self): + # Converting to an array first is a bit different as it means we + # have an explicit object dtype: + assert_equal(np.array(2**200), 2**200) + # Special promotion rules should ensure that this also works for + # two Python integers (even if slow). + # (We do this for comparisons, as the result is always bool and + # we also special case array comparisons with Python integers) + np.equal(2**200, 2**200) + + # But, we cannot do this when it would affect the result dtype: + with pytest.raises(OverflowError): + np.gcd(2**100, 3**100) + + # Asking for `object` explicitly is fine, though: + assert np.gcd(2**100, 3**100, dtype=object) == 1 + + # As of now, the below work, because it is using arrays (which + # will be object arrays) + a = np.array(2**100 * 3**5) + b = np.array([2**100 * 5**7, 2**50 * 3**10]) + assert_equal(np.gcd(a, b), [2**100, 2**50 * 3**5]) + assert_equal(np.lcm(a, b), [2**100 * 3**5 * 5**7, 2**100 * 3**10]) + + def test_inf_and_nan(self): + inf = np.array([np.inf], dtype=np.object_) + assert_raises(ValueError, np.gcd, inf, 1) + assert_raises(ValueError, np.gcd, 1, inf) + assert_raises(ValueError, np.gcd, np.nan, inf) + assert_raises(TypeError, np.gcd, 4, float(np.inf)) + + +class TestRoundingFunctions: + + def test_object_direct(self): + """ test direct implementation of these magic methods """ + class C: + def __floor__(self): + return 1 + + def __ceil__(self): + return 2 + + def __trunc__(self): + return 3 + + arr = np.array([C(), C()]) + assert_equal(np.floor(arr), [1, 1]) + assert_equal(np.ceil(arr), [2, 2]) + assert_equal(np.trunc(arr), [3, 3]) + + def test_object_indirect(self): + """ test implementations via __float__ """ + class C: + def __float__(self): + return -2.5 + + arr = np.array([C(), C()]) + assert_equal(np.floor(arr), [-3, -3]) + assert_equal(np.ceil(arr), [-2, -2]) + with pytest.raises(TypeError): + np.trunc(arr) # consistent with math.trunc + + def test_fraction(self): + f = Fraction(-4, 3) + assert_equal(np.floor(f), -2) + assert_equal(np.ceil(f), -1) + assert_equal(np.trunc(f), -1) + + @pytest.mark.parametrize('func', [np.floor, np.ceil, np.trunc]) + @pytest.mark.parametrize('dtype', [np.bool, np.float64, np.float32, + np.int64, np.uint32]) + def test_output_dtype(self, func, dtype): + arr = np.array([-2, 0, 4, 8]).astype(dtype) + result = func(arr) + assert_equal(arr, result) + assert result.dtype == dtype + + +class TestComplexFunctions: + funcs = [np.arcsin, np.arccos, np.arctan, np.arcsinh, np.arccosh, + np.arctanh, np.sin, np.cos, np.tan, np.exp, + np.exp2, np.log, np.sqrt, np.log10, np.log2, + np.log1p] + + def test_it(self): + for f in self.funcs: + if f is np.arccosh: + x = 1.5 + else: + x = .5 + fr = f(x) + fz = f(complex(x)) + assert_almost_equal(fz.real, fr, err_msg=f'real part {f}') + assert_almost_equal(fz.imag, 0., err_msg=f'imag part {f}') + + @pytest.mark.xfail(IS_WASM, reason="doesn't work") + def test_precisions_consistent(self): + z = 1 + 1j + for f in self.funcs: + fcf = f(np.csingle(z)) + fcd = f(np.cdouble(z)) + fcl = f(np.clongdouble(z)) + assert_almost_equal(fcf, fcd, decimal=6, err_msg=f'fch-fcd {f}') + assert_almost_equal(fcl, fcd, decimal=15, err_msg=f'fch-fcl {f}') + + @pytest.mark.xfail(IS_WASM, reason="doesn't work") + def test_branch_cuts(self): + # check branch cuts and continuity on them + _check_branch_cut(np.log, -0.5, 1j, 1, -1, True) # noqa: E221 + _check_branch_cut(np.log2, -0.5, 1j, 1, -1, True) # noqa: E221 + _check_branch_cut(np.log10, -0.5, 1j, 1, -1, True) + _check_branch_cut(np.log1p, -1.5, 1j, 1, -1, True) + _check_branch_cut(np.sqrt, -0.5, 1j, 1, -1, True) # noqa: E221 + + _check_branch_cut(np.arcsin, [ -2, 2], [1j, 1j], 1, -1, True) + _check_branch_cut(np.arccos, [ -2, 2], [1j, 1j], 1, -1, True) + _check_branch_cut(np.arctan, [0 - 2j, 2j], [1, 1], -1, 1, True) + + _check_branch_cut(np.arcsinh, [0 - 2j, 2j], [1, 1], -1, 1, True) + _check_branch_cut(np.arccosh, [ -1, 0.5], [1j, 1j], 1, -1, True) + _check_branch_cut(np.arctanh, [ -2, 2], [1j, 1j], 1, -1, True) + + # check against bogus branch cuts: assert continuity between quadrants + _check_branch_cut(np.arcsin, [0 - 2j, 2j], [ 1, 1], 1, 1) + _check_branch_cut(np.arccos, [0 - 2j, 2j], [ 1, 1], 1, 1) + _check_branch_cut(np.arctan, [ -2, 2], [1j, 1j], 1, 1) + + _check_branch_cut(np.arcsinh, [ -2, 2, 0], [1j, 1j, 1], 1, 1) + _check_branch_cut(np.arccosh, [0 - 2j, 2j, 2], [1, 1, 1j], 1, 1) + _check_branch_cut(np.arctanh, [0 - 2j, 2j, 0], [1, 1, 1j], 1, 1) + + @pytest.mark.xfail(IS_WASM, reason="doesn't work") + def test_branch_cuts_complex64(self): + # check branch cuts and continuity on them + _check_branch_cut(np.log, -0.5, 1j, 1, -1, True, np.complex64) # noqa: E221 + _check_branch_cut(np.log2, -0.5, 1j, 1, -1, True, np.complex64) # noqa: E221 + _check_branch_cut(np.log10, -0.5, 1j, 1, -1, True, np.complex64) + _check_branch_cut(np.log1p, -1.5, 1j, 1, -1, True, np.complex64) + _check_branch_cut(np.sqrt, -0.5, 1j, 1, -1, True, np.complex64) # noqa: E221 + + _check_branch_cut(np.arcsin, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64) + _check_branch_cut(np.arccos, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64) + _check_branch_cut(np.arctan, [0 - 2j, 2j], [1, 1], -1, 1, True, np.complex64) + + _check_branch_cut(np.arcsinh, [0 - 2j, 2j], [1, 1], -1, 1, True, np.complex64) + _check_branch_cut(np.arccosh, [ -1, 0.5], [1j, 1j], 1, -1, True, np.complex64) + _check_branch_cut(np.arctanh, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64) + + # check against bogus branch cuts: assert continuity between quadrants + _check_branch_cut(np.arcsin, [0 - 2j, 2j], [ 1, 1], 1, 1, False, np.complex64) + _check_branch_cut(np.arccos, [0 - 2j, 2j], [ 1, 1], 1, 1, False, np.complex64) + _check_branch_cut(np.arctan, [ -2, 2], [1j, 1j], 1, 1, False, np.complex64) + + _check_branch_cut(np.arcsinh, [ -2, 2, 0], [1j, 1j, 1], 1, 1, False, np.complex64) + _check_branch_cut(np.arccosh, [0 - 2j, 2j, 2], [1, 1, 1j], 1, 1, False, np.complex64) + _check_branch_cut(np.arctanh, [0 - 2j, 2j, 0], [1, 1, 1j], 1, 1, False, np.complex64) + + def test_against_cmath(self): + import cmath + + points = [-1 - 1j, -1 + 1j, +1 - 1j, +1 + 1j] + name_map = {'arcsin': 'asin', 'arccos': 'acos', 'arctan': 'atan', + 'arcsinh': 'asinh', 'arccosh': 'acosh', 'arctanh': 'atanh'} + atol = 4 * np.finfo(complex).eps + for func in self.funcs: + fname = func.__name__.split('.')[-1] + cname = name_map.get(fname, fname) + try: + cfunc = getattr(cmath, cname) + except AttributeError: + continue + for p in points: + a = complex(func(np.complex128(p))) + b = cfunc(p) + assert_( + abs(a - b) < atol, + f"{fname} {p}: {a}; cmath: {b}" + ) + + @pytest.mark.xfail( + # manylinux2014 uses glibc2.17 + _glibc_older_than("2.18"), + reason="Older glibc versions are imprecise (maybe passes with SIMD?)" + ) + @pytest.mark.xfail(IS_WASM, reason="doesn't work") + @pytest.mark.parametrize('dtype', [ + np.complex64, np.complex128, np.clongdouble + ]) + def test_loss_of_precision(self, dtype): + """Check loss of precision in complex arc* functions""" + if dtype is np.clongdouble and platform.machine() != 'x86_64': + # Failures on musllinux, aarch64, s390x, ppc64le (see gh-17554) + pytest.skip('Only works reliably for x86-64 and recent glibc') + + # Check against known-good functions + + info = np.finfo(dtype) + real_dtype = dtype(0.).real.dtype + eps = info.eps + + def check(x, rtol): + x = x.astype(real_dtype) + + z = x.astype(dtype) + d = np.absolute(np.arcsinh(x) / np.arcsinh(z).real - 1) + assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(), + 'arcsinh')) + + z = (1j * x).astype(dtype) + d = np.absolute(np.arcsinh(x) / np.arcsin(z).imag - 1) + assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(), + 'arcsin')) + + z = x.astype(dtype) + d = np.absolute(np.arctanh(x) / np.arctanh(z).real - 1) + assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(), + 'arctanh')) + + z = (1j * x).astype(dtype) + d = np.absolute(np.arctanh(x) / np.arctan(z).imag - 1) + assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(), + 'arctan')) + + # The switchover was chosen as 1e-3; hence there can be up to + # ~eps/1e-3 of relative cancellation error before it + + x_series = np.logspace(-20, -3.001, 200) + x_basic = np.logspace(-2.999, 0, 10, endpoint=False) + + if dtype is np.clongdouble: + if bad_arcsinh(): + pytest.skip("Trig functions of np.clongdouble values known " + "to be inaccurate on aarch64 and PPC for some " + "compilation configurations.") + # It's not guaranteed that the system-provided arc functions + # are accurate down to a few epsilons. (Eg. on Linux 64-bit) + # So, give more leeway for long complex tests here: + check(x_series, 50.0 * eps) + else: + check(x_series, 2.1 * eps) + check(x_basic, 2.0 * eps / 1e-3) + + # Check a few points + + z = np.array([1e-5 * (1 + 1j)], dtype=dtype) + p = 9.999999999333333333e-6 + 1.000000000066666666e-5j + d = np.absolute(1 - np.arctanh(z) / p) + assert_(np.all(d < 1e-15)) + + p = 1.0000000000333333333e-5 + 9.999999999666666667e-6j + d = np.absolute(1 - np.arcsinh(z) / p) + assert_(np.all(d < 1e-15)) + + p = 9.999999999333333333e-6j + 1.000000000066666666e-5 + d = np.absolute(1 - np.arctan(z) / p) + assert_(np.all(d < 1e-15)) + + p = 1.0000000000333333333e-5j + 9.999999999666666667e-6 + d = np.absolute(1 - np.arcsin(z) / p) + assert_(np.all(d < 1e-15)) + + # Check continuity across switchover points + + def check(func, z0, d=1): + z0 = np.asarray(z0, dtype=dtype) + zp = z0 + abs(z0) * d * eps * 2 + zm = z0 - abs(z0) * d * eps * 2 + assert_(np.all(zp != zm), (zp, zm)) + + # NB: the cancellation error at the switchover is at least eps + good = (abs(func(zp) - func(zm)) < 2 * eps) + assert_(np.all(good), (func, z0[~good])) + + for func in (np.arcsinh, np.arcsinh, np.arcsin, np.arctanh, np.arctan): + pts = [rp + 1j * ip for rp in (-1e-3, 0, 1e-3) for ip in (-1e-3, 0, 1e-3) + if rp != 0 or ip != 0] + check(func, pts, 1) + check(func, pts, 1j) + check(func, pts, 1 + 1j) + + @np.errstate(all="ignore") + def test_promotion_corner_cases(self): + for func in self.funcs: + assert func(np.float16(1)).dtype == np.float16 + # Integer to low precision float promotion is a dubious choice: + assert func(np.uint8(1)).dtype == np.float16 + assert func(np.int16(1)).dtype == np.float32 + + +class TestAttributes: + def test_attributes(self): + add = ncu.add + assert_equal(add.__name__, 'add') + assert_(add.ntypes >= 18) # don't fail if types added + assert_('ii->i' in add.types) + assert_equal(add.nin, 2) + assert_equal(add.nout, 1) + assert_equal(add.identity, 0) + + def test_doc(self): + # don't bother checking the long list of kwargs, which are likely to + # change + assert_(ncu.add.__doc__.startswith( + "add(x1, x2, /, out=None, *, where=True")) + assert_(ncu.frexp.__doc__.startswith( + "frexp(x[, out1, out2], / [, out=(None, None)], *, where=True")) + + +class TestSubclass: + + def test_subclass_op(self): + + class simple(np.ndarray): + def __new__(subtype, shape): + self = np.ndarray.__new__(subtype, shape, dtype=object) + self.fill(0) + return self + + a = simple((3, 4)) + assert_equal(a + a, a) + + +class TestFrompyfunc: + + def test_identity(self): + def mul(a, b): + return a * b + + # with identity=value + mul_ufunc = np.frompyfunc(mul, nin=2, nout=1, identity=1) + assert_equal(mul_ufunc.reduce([2, 3, 4]), 24) + assert_equal(mul_ufunc.reduce(np.ones((2, 2)), axis=(0, 1)), 1) + assert_equal(mul_ufunc.reduce([]), 1) + + # with identity=None (reorderable) + mul_ufunc = np.frompyfunc(mul, nin=2, nout=1, identity=None) + assert_equal(mul_ufunc.reduce([2, 3, 4]), 24) + assert_equal(mul_ufunc.reduce(np.ones((2, 2)), axis=(0, 1)), 1) + assert_raises(ValueError, lambda: mul_ufunc.reduce([])) + + # with no identity (not reorderable) + mul_ufunc = np.frompyfunc(mul, nin=2, nout=1) + assert_equal(mul_ufunc.reduce([2, 3, 4]), 24) + assert_raises(ValueError, lambda: mul_ufunc.reduce(np.ones((2, 2)), axis=(0, 1))) + assert_raises(ValueError, lambda: mul_ufunc.reduce([])) + + +def _check_branch_cut(f, x0, dx, re_sign=1, im_sign=-1, sig_zero_ok=False, + dtype=complex): + """ + Check for a branch cut in a function. + + Assert that `x0` lies on a branch cut of function `f` and `f` is + continuous from the direction `dx`. + + Parameters + ---------- + f : func + Function to check + x0 : array-like + Point on branch cut + dx : array-like + Direction to check continuity in + re_sign, im_sign : {1, -1} + Change of sign of the real or imaginary part expected + sig_zero_ok : bool + Whether to check if the branch cut respects signed zero (if applicable) + dtype : dtype + Dtype to check (should be complex) + + """ + x0 = np.atleast_1d(x0).astype(dtype) + dx = np.atleast_1d(dx).astype(dtype) + + if np.dtype(dtype).char == 'F': + scale = np.finfo(dtype).eps * 1e2 + atol = np.float32(1e-2) + else: + scale = np.finfo(dtype).eps * 1e3 + atol = 1e-4 + + y0 = f(x0) + yp = f(x0 + dx * scale * np.absolute(x0) / np.absolute(dx)) + ym = f(x0 - dx * scale * np.absolute(x0) / np.absolute(dx)) + + assert_(np.all(np.absolute(y0.real - yp.real) < atol), (y0, yp)) + assert_(np.all(np.absolute(y0.imag - yp.imag) < atol), (y0, yp)) + assert_(np.all(np.absolute(y0.real - ym.real * re_sign) < atol), (y0, ym)) + assert_(np.all(np.absolute(y0.imag - ym.imag * im_sign) < atol), (y0, ym)) + + if sig_zero_ok: + # check that signed zeros also work as a displacement + jr = (x0.real == 0) & (dx.real != 0) + ji = (x0.imag == 0) & (dx.imag != 0) + if np.any(jr): + x = x0[jr] + x.real = ncu.NZERO + ym = f(x) + assert_(np.all(np.absolute(y0[jr].real - ym.real * re_sign) < atol), (y0[jr], ym)) + assert_(np.all(np.absolute(y0[jr].imag - ym.imag * im_sign) < atol), (y0[jr], ym)) + + if np.any(ji): + x = x0[ji] + x.imag = ncu.NZERO + ym = f(x) + assert_(np.all(np.absolute(y0[ji].real - ym.real * re_sign) < atol), (y0[ji], ym)) + assert_(np.all(np.absolute(y0[ji].imag - ym.imag * im_sign) < atol), (y0[ji], ym)) + +def test_copysign(): + assert_(np.copysign(1, -1) == -1) + with np.errstate(divide="ignore"): + assert_(1 / np.copysign(0, -1) < 0) + assert_(1 / np.copysign(0, 1) > 0) + assert_(np.signbit(np.copysign(np.nan, -1))) + assert_(not np.signbit(np.copysign(np.nan, 1))) + +def _test_nextafter(t): + one = t(1) + two = t(2) + zero = t(0) + eps = np.finfo(t).eps + assert_(np.nextafter(one, two) - one == eps) + assert_(np.nextafter(one, zero) - one < 0) + assert_(np.isnan(np.nextafter(np.nan, one))) + assert_(np.isnan(np.nextafter(one, np.nan))) + assert_(np.nextafter(one, one) == one) + +def test_nextafter(): + return _test_nextafter(np.float64) + + +def test_nextafterf(): + return _test_nextafter(np.float32) + + +@pytest.mark.skipif(np.finfo(np.double) == np.finfo(np.longdouble), + reason="long double is same as double") +@pytest.mark.xfail(condition=platform.machine().startswith("ppc64"), + reason="IBM double double") +def test_nextafterl(): + return _test_nextafter(np.longdouble) + + +def test_nextafter_0(): + for t, direction in itertools.product(np._core.sctypes['float'], (1, -1)): + # The value of tiny for double double is NaN, so we need to pass the + # assert + with warnings.catch_warnings(): + warnings.simplefilter('ignore', UserWarning) + if not np.isnan(np.finfo(t).tiny): + tiny = np.finfo(t).tiny + assert_( + 0. < direction * np.nextafter(t(0), t(direction)) < tiny) + assert_equal(np.nextafter(t(0), t(direction)) / t(2.1), direction * 0.0) + +def _test_spacing(t): + one = t(1) + eps = np.finfo(t).eps + nan = t(np.nan) + inf = t(np.inf) + with np.errstate(invalid='ignore'): + assert_equal(np.spacing(one), eps) + assert_(np.isnan(np.spacing(nan))) + assert_(np.isnan(np.spacing(inf))) + assert_(np.isnan(np.spacing(-inf))) + assert_(np.spacing(t(1e30)) != 0) + +def test_spacing(): + return _test_spacing(np.float64) + +def test_spacingf(): + return _test_spacing(np.float32) + + +@pytest.mark.skipif(np.finfo(np.double) == np.finfo(np.longdouble), + reason="long double is same as double") +@pytest.mark.xfail(condition=platform.machine().startswith("ppc64"), + reason="IBM double double") +def test_spacingl(): + return _test_spacing(np.longdouble) + +def test_spacing_gfortran(): + # Reference from this fortran file, built with gfortran 4.3.3 on linux + # 32bits: + # PROGRAM test_spacing + # INTEGER, PARAMETER :: SGL = SELECTED_REAL_KIND(p=6, r=37) + # INTEGER, PARAMETER :: DBL = SELECTED_REAL_KIND(p=13, r=200) + # + # WRITE(*,*) spacing(0.00001_DBL) + # WRITE(*,*) spacing(1.0_DBL) + # WRITE(*,*) spacing(1000._DBL) + # WRITE(*,*) spacing(10500._DBL) + # + # WRITE(*,*) spacing(0.00001_SGL) + # WRITE(*,*) spacing(1.0_SGL) + # WRITE(*,*) spacing(1000._SGL) + # WRITE(*,*) spacing(10500._SGL) + # END PROGRAM + ref = {np.float64: [1.69406589450860068E-021, + 2.22044604925031308E-016, + 1.13686837721616030E-013, + 1.81898940354585648E-012], + np.float32: [9.09494702E-13, + 1.19209290E-07, + 6.10351563E-05, + 9.76562500E-04]} + + for dt, dec_ in zip([np.float32, np.float64], (10, 20)): + x = np.array([1e-5, 1, 1000, 10500], dtype=dt) + assert_array_almost_equal(np.spacing(x), ref[dt], decimal=dec_) + +def test_nextafter_vs_spacing(): + # XXX: spacing does not handle long double yet + for t in [np.float32, np.float64]: + for _f in [1, 1e-5, 1000]: + f = t(_f) + f1 = t(_f + 1) + assert_(np.nextafter(f, f1) - f == np.spacing(f)) + +def test_pos_nan(): + """Check np.nan is a positive nan.""" + assert_(np.signbit(np.nan) == 0) + +def test_reduceat(): + """Test bug in reduceat when structured arrays are not copied.""" + db = np.dtype([('name', 'S11'), ('time', np.int64), ('value', np.float32)]) + a = np.empty([100], dtype=db) + a['name'] = 'Simple' + a['time'] = 10 + a['value'] = 100 + indx = [0, 7, 15, 25] + + h2 = [] + val1 = indx[0] + for val2 in indx[1:]: + h2.append(np.add.reduce(a['value'][val1:val2])) + val1 = val2 + h2.append(np.add.reduce(a['value'][val1:])) + h2 = np.array(h2) + + # test buffered -- this should work + h1 = np.add.reduceat(a['value'], indx) + assert_array_almost_equal(h1, h2) + + # This is when the error occurs. + # test no buffer + np.setbufsize(32) + h1 = np.add.reduceat(a['value'], indx) + np.setbufsize(ncu.UFUNC_BUFSIZE_DEFAULT) + assert_array_almost_equal(h1, h2) + +def test_negative_value_raises(): + with pytest.raises(ValueError, match="buffer size must be non-negative"): + np.setbufsize(-5) + + old = np.getbufsize() + try: + prev = np.setbufsize(4096) + assert prev == old + assert np.getbufsize() == 4096 + finally: + np.setbufsize(old) + +def test_reduceat_empty(): + """Reduceat should work with empty arrays""" + indices = np.array([], 'i4') + x = np.array([], 'f8') + result = np.add.reduceat(x, indices) + assert_equal(result.dtype, x.dtype) + assert_equal(result.shape, (0,)) + # Another case with a slightly different zero-sized shape + x = np.ones((5, 2)) + result = np.add.reduceat(x, [], axis=0) + assert_equal(result.dtype, x.dtype) + assert_equal(result.shape, (0, 2)) + result = np.add.reduceat(x, [], axis=1) + assert_equal(result.dtype, x.dtype) + assert_equal(result.shape, (5, 0)) + +def test_complex_nan_comparisons(): + nans = [complex(np.nan, 0), complex(0, np.nan), complex(np.nan, np.nan)] + fins = [complex(1, 0), complex(-1, 0), complex(0, 1), complex(0, -1), + complex(1, 1), complex(-1, -1), complex(0, 0)] + + with np.errstate(invalid='ignore'): + for x in nans + fins: + x = np.array([x]) + for y in nans + fins: + y = np.array([y]) + + if np.isfinite(x) and np.isfinite(y): + continue + + assert_equal(x < y, False, err_msg=f"{x!r} < {y!r}") + assert_equal(x > y, False, err_msg=f"{x!r} > {y!r}") + assert_equal(x <= y, False, err_msg=f"{x!r} <= {y!r}") + assert_equal(x >= y, False, err_msg=f"{x!r} >= {y!r}") + assert_equal(x == y, False, err_msg=f"{x!r} == {y!r}") + + +def test_rint_big_int(): + # np.rint bug for large integer values on Windows 32-bit and MKL + # https://github.com/numpy/numpy/issues/6685 + val = 4607998452777363968 + # This is exactly representable in floating point + assert_equal(val, int(float(val))) + # Rint should not change the value + assert_equal(val, np.rint(val)) + + +@pytest.mark.parametrize('ftype', [np.float32, np.float64]) +def test_memoverlap_accumulate(ftype): + # Reproduces bug https://github.com/numpy/numpy/issues/15597 + arr = np.array([0.61, 0.60, 0.77, 0.41, 0.19], dtype=ftype) + out_max = np.array([0.61, 0.61, 0.77, 0.77, 0.77], dtype=ftype) + out_min = np.array([0.61, 0.60, 0.60, 0.41, 0.19], dtype=ftype) + assert_equal(np.maximum.accumulate(arr), out_max) + assert_equal(np.minimum.accumulate(arr), out_min) + +@pytest.mark.parametrize("ufunc, dtype", [ + (ufunc, t[0]) + for ufunc in UFUNCS_BINARY_ACC + for t in ufunc.types + if t[-1] == '?' and t[0] not in 'DFGMmO' +]) +def test_memoverlap_accumulate_cmp(ufunc, dtype): + if ufunc.signature: + pytest.skip('For generic signatures only') + for size in (2, 8, 32, 64, 128, 256): + arr = np.array([0, 1, 1] * size, dtype=dtype) + acc = ufunc.accumulate(arr, dtype='?') + acc_u8 = acc.view(np.uint8) + exp = np.array(list(itertools.accumulate(arr, ufunc)), dtype=np.uint8) + assert_equal(exp, acc_u8) + +@pytest.mark.parametrize("ufunc, dtype", [ + (ufunc, t[0]) + for ufunc in UFUNCS_BINARY_ACC + for t in ufunc.types + if t[0] == t[1] and t[0] == t[-1] and t[0] not in 'DFGMmO?' +]) +def test_memoverlap_accumulate_symmetric(ufunc, dtype): + if ufunc.signature: + pytest.skip('For generic signatures only') + with np.errstate(all='ignore'): + for size in (2, 8, 32, 64, 128, 256): + arr = np.array([0, 1, 2] * size).astype(dtype) + acc = ufunc.accumulate(arr, dtype=dtype) + exp = np.array(list(itertools.accumulate(arr, ufunc)), dtype=dtype) + assert_equal(exp, acc) + +def test_signaling_nan_exceptions(): + with assert_no_warnings(): + a = np.ndarray(shape=(), dtype='float32', buffer=b'\x00\xe0\xbf\xff') + np.isnan(a) + +@pytest.mark.parametrize("arr", [ + np.arange(2), + np.matrix([0, 1]), + np.matrix([[0, 1], [2, 5]]), + ]) +def test_outer_subclass_preserve(arr): + # for gh-8661 + class foo(np.ndarray): + pass + actual = np.multiply.outer(arr.view(foo), arr.view(foo)) + assert actual.__class__.__name__ == 'foo' + +def test_outer_bad_subclass(): + class BadArr1(np.ndarray): + def __array_finalize__(self, obj): + # The outer call reshapes to 3 dims, try to do a bad reshape. + if self.ndim == 3: + self.shape = self.shape + (1,) + + class BadArr2(np.ndarray): + def __array_finalize__(self, obj): + if isinstance(obj, BadArr2): + # outer inserts 1-sized dims. In that case disturb them. + if self.shape[-1] == 1: + self.shape = self.shape[::-1] + + for cls in [BadArr1, BadArr2]: + arr = np.ones((2, 3)).view(cls) + with assert_raises(TypeError) as a: + # The first array gets reshaped (not the second one) + np.add.outer(arr, [1, 2]) + + # This actually works, since we only see the reshaping error: + arr = np.ones((2, 3)).view(cls) + assert type(np.add.outer([1, 2], arr)) is cls + +def test_outer_exceeds_maxdims(): + deep = np.ones((1,) * 33) + with assert_raises(ValueError): + np.add.outer(deep, deep) + +def test_bad_legacy_ufunc_silent_errors(): + # legacy ufuncs can't report errors and NumPy can't check if the GIL + # is released. So NumPy has to check after the GIL is released just to + # cover all bases. `np.power` uses/used to use this. + arr = np.arange(3).astype(np.float64) + + with pytest.raises(RuntimeError, match=r"How unexpected :\)!"): + ncu_tests.always_error(arr, arr) + + with pytest.raises(RuntimeError, match=r"How unexpected :\)!"): + # not contiguous means the fast-path cannot be taken + non_contig = arr.repeat(20).reshape(-1, 6)[:, ::2] + ncu_tests.always_error(non_contig, arr) + + with pytest.raises(RuntimeError, match=r"How unexpected :\)!"): + ncu_tests.always_error.outer(arr, arr) + + with pytest.raises(RuntimeError, match=r"How unexpected :\)!"): + ncu_tests.always_error.reduce(arr) + + with pytest.raises(RuntimeError, match=r"How unexpected :\)!"): + ncu_tests.always_error.reduceat(arr, [0, 1]) + + with pytest.raises(RuntimeError, match=r"How unexpected :\)!"): + ncu_tests.always_error.accumulate(arr) + + with pytest.raises(RuntimeError, match=r"How unexpected :\)!"): + ncu_tests.always_error.at(arr, [0, 1, 2], arr) + + +def test_bad_legacy_unary_ufunc_silent_errors(): + # Unary has a special scalar path right now, so test it explicitly. + with pytest.raises(RuntimeError, match=r"How unexpected :\)!"): + ncu_tests.always_error_unary(np.arange(3).astype(np.float64)) + + with pytest.raises(RuntimeError, match=r"How unexpected :\)!"): + ncu_tests.always_error_unary(1.5) + + +@pytest.mark.parametrize('x1', [np.arange(3.0), [0.0, 1.0, 2.0]]) +def test_bad_legacy_gufunc_silent_errors(x1): + # Verify that an exception raised in a gufunc loop propagates correctly. + # The signature of always_error_gufunc is '(i),()->()'. + with pytest.raises(RuntimeError, match=r"How unexpected :\)!"): + ncu_tests.always_error_gufunc(x1, 0.0) + + +class TestAddDocstring: + @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") + @pytest.mark.skipif(IS_PYPY, reason="PyPy does not modify tp_doc") + def test_add_same_docstring(self): + # test for attributes (which are C-level defined) + ncu.add_docstring(np.ndarray.flat, np.ndarray.flat.__doc__) + + # And typical functions: + def func(): + """docstring""" + return + + ncu.add_docstring(func, func.__doc__) + + @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") + def test_different_docstring_fails(self): + # test for attributes (which are C-level defined) + with assert_raises(RuntimeError): + ncu.add_docstring(np.ndarray.flat, "different docstring") + + # And typical functions: + def func(): + """docstring""" + return + + with assert_raises(RuntimeError): + ncu.add_docstring(func, "different docstring") + + +class TestAdd_newdoc_ufunc: + @pytest.mark.filterwarnings("ignore:_add_newdoc_ufunc:DeprecationWarning") + def test_ufunc_arg(self): + assert_raises(TypeError, ncu._add_newdoc_ufunc, 2, "blah") + assert_raises(ValueError, ncu._add_newdoc_ufunc, np.add, "blah") + + @pytest.mark.filterwarnings("ignore:_add_newdoc_ufunc:DeprecationWarning") + def test_string_arg(self): + assert_raises(TypeError, ncu._add_newdoc_ufunc, np.add, 3) + +class TestHypotErrorMessages: + def test_hypot_error_message_single_arg(self): + with pytest.raises(TypeError, match="hypot\\(\\) takes .* but 1 was given"): + np.hypot(5) + + def test_hypot_error_message_multiple_args(self): + with pytest.raises(TypeError, match="hypot\\(\\) takes .* but 4 were given"): + np.hypot(1, 2, 3, 4) diff --git a/local_packages/numpy/_core/tests/test_umath_accuracy.py b/local_packages/numpy/_core/tests/test_umath_accuracy.py new file mode 100644 index 0000000..3ca2f50 --- /dev/null +++ b/local_packages/numpy/_core/tests/test_umath_accuracy.py @@ -0,0 +1,132 @@ +import os +import sys +from ctypes import POINTER, c_double, c_float, c_int, c_longlong, cast, pointer +from os import path + +import pytest + +import numpy as np +from numpy._core._multiarray_umath import __cpu_features__ +from numpy.testing import assert_array_max_ulp +from numpy.testing._private.utils import _glibc_older_than + +UNARY_UFUNCS = [obj for obj in np._core.umath.__dict__.values() if + isinstance(obj, np.ufunc)] +UNARY_OBJECT_UFUNCS = [uf for uf in UNARY_UFUNCS if "O->O" in uf.types] + +# Remove functions that do not support `floats` +UNARY_OBJECT_UFUNCS.remove(np.invert) +UNARY_OBJECT_UFUNCS.remove(np.bitwise_count) + +IS_AVX = __cpu_features__.get('AVX512F', False) or \ + (__cpu_features__.get('FMA3', False) and __cpu_features__.get('AVX2', False)) + +IS_AVX512FP16 = __cpu_features__.get('AVX512FP16', False) + +# only run on linux with AVX, also avoid old glibc (numpy/numpy#20448). +runtest = (sys.platform.startswith('linux') + and IS_AVX and not _glibc_older_than("2.17")) +platform_skip = pytest.mark.skipif(not runtest, + reason="avoid testing inconsistent platform " + "library implementations") + +# convert string to hex function taken from: +# https://stackoverflow.com/questions/1592158/convert-hex-to-float # +def convert(s, datatype="np.float32"): + i = int(s, 16) # convert from hex to a Python int + if (datatype == "np.float64"): + cp = pointer(c_longlong(i)) # make this into a c long long integer + fp = cast(cp, POINTER(c_double)) # cast the int pointer to a double pointer + else: + cp = pointer(c_int(i)) # make this into a c integer + fp = cast(cp, POINTER(c_float)) # cast the int pointer to a float pointer + + return fp.contents.value # dereference the pointer, get the float + + +str_to_float = np.vectorize(convert) + +class TestAccuracy: + @platform_skip + def test_validate_transcendentals(self): + with np.errstate(all='ignore'): + data_dir = path.join(path.dirname(__file__), 'data') + files = os.listdir(data_dir) + files = list(filter(lambda f: f.endswith('.csv'), files)) + for filename in files: + filepath = path.join(data_dir, filename) + with open(filepath) as fid: + file_without_comments = ( + r for r in fid if r[0] not in ('$', '#') + ) + data = np.genfromtxt(file_without_comments, + dtype=('|S39', '|S39', '|S39', int), + names=('type', 'input', 'output', 'ulperr'), + delimiter=',', + skip_header=1) + npname = path.splitext(filename)[0].split('-')[3] + npfunc = getattr(np, npname) + for datatype in np.unique(data['type']): + data_subset = data[data['type'] == datatype] + data_input_str = data_subset['input'].astype(str) + data_output_str = data_subset['output'].astype(str) + data_type_str = data_subset['type'].astype(str) + + inval = np.array(str_to_float(data_input_str, + data_type_str), + dtype=eval(datatype)) + outval = np.array(str_to_float(data_output_str, + data_type_str), + dtype=eval(datatype)) + perm = np.random.permutation(len(inval)) + inval = inval[perm] + outval = outval[perm] + maxulperr = data_subset['ulperr'].max() + assert_array_max_ulp(npfunc(inval), outval, maxulperr) + + @pytest.mark.skipif(IS_AVX512FP16, + reason="SVML FP16 have slightly higher ULP errors") + @pytest.mark.parametrize("ufunc", UNARY_OBJECT_UFUNCS) + def test_validate_fp16_transcendentals(self, ufunc): + with np.errstate(all='ignore'): + arr = np.arange(65536, dtype=np.int16) + datafp16 = np.frombuffer(arr.tobytes(), dtype=np.float16) + datafp32 = datafp16.astype(np.float32) + assert_array_max_ulp(ufunc(datafp16), ufunc(datafp32), + maxulp=1, dtype=np.float16) + + @pytest.mark.skipif(not IS_AVX512FP16, + reason="lower ULP only apply for SVML FP16") + def test_validate_svml_fp16(self): + max_ulp_err = { + "arccos": 2.54, + "arccosh": 2.09, + "arcsin": 3.06, + "arcsinh": 1.51, + "arctan": 2.61, + "arctanh": 1.88, + "cbrt": 1.57, + "cos": 1.43, + "cosh": 1.33, + "exp2": 1.33, + "exp": 1.27, + "expm1": 0.53, + "log": 1.80, + "log10": 1.27, + "log1p": 1.88, + "log2": 1.80, + "sin": 1.88, + "sinh": 2.05, + "tan": 2.26, + "tanh": 3.00, + } + + with np.errstate(all='ignore'): + arr = np.arange(65536, dtype=np.int16) + datafp16 = np.frombuffer(arr.tobytes(), dtype=np.float16) + datafp32 = datafp16.astype(np.float32) + for func in max_ulp_err: + ufunc = getattr(np, func) + ulp = np.ceil(max_ulp_err[func]) + assert_array_max_ulp(ufunc(datafp16), ufunc(datafp32), + maxulp=ulp, dtype=np.float16) diff --git a/local_packages/numpy/_core/tests/test_umath_complex.py b/local_packages/numpy/_core/tests/test_umath_complex.py new file mode 100644 index 0000000..7012e7e --- /dev/null +++ b/local_packages/numpy/_core/tests/test_umath_complex.py @@ -0,0 +1,631 @@ +import platform +import sys + +import pytest + +import numpy as np + +# import the c-extension module directly since _arg is not exported via umath +import numpy._core._multiarray_umath as ncu +from numpy.testing import ( + assert_almost_equal, + assert_array_equal, + assert_array_max_ulp, + assert_equal, + assert_raises, +) + +# TODO: branch cuts (use Pauli code) +# TODO: conj 'symmetry' +# TODO: FPU exceptions + +# At least on Windows the results of many complex functions are not conforming +# to the C99 standard. See ticket 1574. +# Ditto for Solaris (ticket 1642) and OS X on PowerPC. +# FIXME: this will probably change when we require full C99 compatibility +with np.errstate(all='ignore'): + functions_seem_flaky = ((np.exp(complex(np.inf, 0)).imag != 0) + or (np.log(complex(ncu.NZERO, 0)).imag != np.pi)) +# TODO: replace with a check on whether platform-provided C99 funcs are used +xfail_complex_tests = (not sys.platform.startswith('linux') or functions_seem_flaky) + +# TODO This can be xfail when the generator functions are got rid of. +platform_skip = pytest.mark.skipif(xfail_complex_tests, + reason="Inadequate C99 complex support") + + +class TestCexp: + def test_simple(self): + check = check_complex_value + f = np.exp + + check(f, 1, 0, np.exp(1), 0, False) + check(f, 0, 1, np.cos(1), np.sin(1), False) + + ref = np.exp(1) * complex(np.cos(1), np.sin(1)) + check(f, 1, 1, ref.real, ref.imag, False) + + @platform_skip + def test_special_values(self): + # C99: Section G 6.3.1 + + check = check_complex_value + f = np.exp + + # cexp(+-0 + 0i) is 1 + 0i + check(f, ncu.PZERO, 0, 1, 0, False) + check(f, ncu.NZERO, 0, 1, 0, False) + + # cexp(x + infi) is nan + nani for finite x and raises 'invalid' FPU + # exception + check(f, 1, np.inf, np.nan, np.nan) + check(f, -1, np.inf, np.nan, np.nan) + check(f, 0, np.inf, np.nan, np.nan) + + # cexp(inf + 0i) is inf + 0i + check(f, np.inf, 0, np.inf, 0) + + # cexp(-inf + yi) is +0 * (cos(y) + i sin(y)) for finite y + check(f, -np.inf, 1, ncu.PZERO, ncu.PZERO) + check(f, -np.inf, 0.75 * np.pi, ncu.NZERO, ncu.PZERO) + + # cexp(inf + yi) is +inf * (cos(y) + i sin(y)) for finite y + check(f, np.inf, 1, np.inf, np.inf) + check(f, np.inf, 0.75 * np.pi, -np.inf, np.inf) + + # cexp(-inf + inf i) is +-0 +- 0i (signs unspecified) + def _check_ninf_inf(dummy): + msgform = "cexp(-inf, inf) is (%f, %f), expected (+-0, +-0)" + with np.errstate(invalid='ignore'): + z = f(np.array(complex(-np.inf, np.inf))) + if z.real != 0 or z.imag != 0: + raise AssertionError(msgform % (z.real, z.imag)) + + _check_ninf_inf(None) + + # cexp(inf + inf i) is +-inf + NaNi and raised invalid FPU ex. + def _check_inf_inf(dummy): + msgform = "cexp(inf, inf) is (%f, %f), expected (+-inf, nan)" + with np.errstate(invalid='ignore'): + z = f(np.array(complex(np.inf, np.inf))) + if not np.isinf(z.real) or not np.isnan(z.imag): + raise AssertionError(msgform % (z.real, z.imag)) + + _check_inf_inf(None) + + # cexp(-inf + nan i) is +-0 +- 0i + def _check_ninf_nan(dummy): + msgform = "cexp(-inf, nan) is (%f, %f), expected (+-0, +-0)" + with np.errstate(invalid='ignore'): + z = f(np.array(complex(-np.inf, np.nan))) + if z.real != 0 or z.imag != 0: + raise AssertionError(msgform % (z.real, z.imag)) + + _check_ninf_nan(None) + + # cexp(inf + nan i) is +-inf + nan + def _check_inf_nan(dummy): + msgform = "cexp(-inf, nan) is (%f, %f), expected (+-inf, nan)" + with np.errstate(invalid='ignore'): + z = f(np.array(complex(np.inf, np.nan))) + if not np.isinf(z.real) or not np.isnan(z.imag): + raise AssertionError(msgform % (z.real, z.imag)) + + _check_inf_nan(None) + + # cexp(nan + yi) is nan + nani for y != 0 (optional: raises invalid FPU + # ex) + check(f, np.nan, 1, np.nan, np.nan) + check(f, np.nan, -1, np.nan, np.nan) + + check(f, np.nan, np.inf, np.nan, np.nan) + check(f, np.nan, -np.inf, np.nan, np.nan) + + # cexp(nan + nani) is nan + nani + check(f, np.nan, np.nan, np.nan, np.nan) + + # TODO This can be xfail when the generator functions are got rid of. + @pytest.mark.skip(reason="cexp(nan + 0I) is wrong on most platforms") + def test_special_values2(self): + # XXX: most implementations get it wrong here (including glibc <= 2.10) + # cexp(nan + 0i) is nan + 0i + check = check_complex_value + f = np.exp + + check(f, np.nan, 0, np.nan, 0) + +class TestClog: + def test_simple(self): + x = np.array([1 + 0j, 1 + 2j]) + y_r = np.log(np.abs(x)) + 1j * np.angle(x) + y = np.log(x) + assert_almost_equal(y, y_r) + + @platform_skip + @pytest.mark.skipif(platform.machine() == "armv5tel", reason="See gh-413.") + def test_special_values(self): + xl = [] + yl = [] + + # From C99 std (Sec 6.3.2) + # XXX: check exceptions raised + # --- raise for invalid fails. + + # clog(-0 + i0) returns -inf + i pi and raises the 'divide-by-zero' + # floating-point exception. + with np.errstate(divide='raise'): + x = np.array([ncu.NZERO], dtype=complex) + y = complex(-np.inf, np.pi) + assert_raises(FloatingPointError, np.log, x) + with np.errstate(divide='ignore'): + assert_almost_equal(np.log(x), y) + + xl.append(x) + yl.append(y) + + # clog(+0 + i0) returns -inf + i0 and raises the 'divide-by-zero' + # floating-point exception. + with np.errstate(divide='raise'): + x = np.array([0], dtype=complex) + y = complex(-np.inf, 0) + assert_raises(FloatingPointError, np.log, x) + with np.errstate(divide='ignore'): + assert_almost_equal(np.log(x), y) + + xl.append(x) + yl.append(y) + + # clog(x + i inf returns +inf + i pi /2, for finite x. + x = np.array([complex(1, np.inf)], dtype=complex) + y = complex(np.inf, 0.5 * np.pi) + assert_almost_equal(np.log(x), y) + xl.append(x) + yl.append(y) + + x = np.array([complex(-1, np.inf)], dtype=complex) + assert_almost_equal(np.log(x), y) + xl.append(x) + yl.append(y) + + # clog(x + iNaN) returns NaN + iNaN and optionally raises the + # 'invalid' floating- point exception, for finite x. + with np.errstate(invalid='raise'): + x = np.array([complex(1., np.nan)], dtype=complex) + y = complex(np.nan, np.nan) + #assert_raises(FloatingPointError, np.log, x) + with np.errstate(invalid='ignore'): + assert_almost_equal(np.log(x), y) + + xl.append(x) + yl.append(y) + + with np.errstate(invalid='raise'): + x = np.array([np.inf + 1j * np.nan], dtype=complex) + #assert_raises(FloatingPointError, np.log, x) + with np.errstate(invalid='ignore'): + assert_almost_equal(np.log(x), y) + + xl.append(x) + yl.append(y) + + # clog(- inf + iy) returns +inf + ipi , for finite positive-signed y. + x = np.array([-np.inf + 1j], dtype=complex) + y = complex(np.inf, np.pi) + assert_almost_equal(np.log(x), y) + xl.append(x) + yl.append(y) + + # clog(+ inf + iy) returns +inf + i0, for finite positive-signed y. + x = np.array([np.inf + 1j], dtype=complex) + y = complex(np.inf, 0) + assert_almost_equal(np.log(x), y) + xl.append(x) + yl.append(y) + + # clog(- inf + i inf) returns +inf + i3pi /4. + x = np.array([complex(-np.inf, np.inf)], dtype=complex) + y = complex(np.inf, 0.75 * np.pi) + assert_almost_equal(np.log(x), y) + xl.append(x) + yl.append(y) + + # clog(+ inf + i inf) returns +inf + ipi /4. + x = np.array([complex(np.inf, np.inf)], dtype=complex) + y = complex(np.inf, 0.25 * np.pi) + assert_almost_equal(np.log(x), y) + xl.append(x) + yl.append(y) + + # clog(+/- inf + iNaN) returns +inf + iNaN. + x = np.array([complex(np.inf, np.nan)], dtype=complex) + y = complex(np.inf, np.nan) + assert_almost_equal(np.log(x), y) + xl.append(x) + yl.append(y) + + x = np.array([complex(-np.inf, np.nan)], dtype=complex) + assert_almost_equal(np.log(x), y) + xl.append(x) + yl.append(y) + + # clog(NaN + iy) returns NaN + iNaN and optionally raises the + # 'invalid' floating-point exception, for finite y. + x = np.array([complex(np.nan, 1)], dtype=complex) + y = complex(np.nan, np.nan) + assert_almost_equal(np.log(x), y) + xl.append(x) + yl.append(y) + + # clog(NaN + i inf) returns +inf + iNaN. + x = np.array([complex(np.nan, np.inf)], dtype=complex) + y = complex(np.inf, np.nan) + assert_almost_equal(np.log(x), y) + xl.append(x) + yl.append(y) + + # clog(NaN + iNaN) returns NaN + iNaN. + x = np.array([complex(np.nan, np.nan)], dtype=complex) + y = complex(np.nan, np.nan) + assert_almost_equal(np.log(x), y) + xl.append(x) + yl.append(y) + + # clog(conj(z)) = conj(clog(z)). + xa = np.array(xl, dtype=complex) + ya = np.array(yl, dtype=complex) + with np.errstate(divide='ignore'): + for i in range(len(xa)): + assert_almost_equal(np.log(xa[i].conj()), ya[i].conj()) + + +class TestCsqrt: + + def test_simple(self): + # sqrt(1) + check_complex_value(np.sqrt, 1, 0, 1, 0) + + # sqrt(1i) + rres = 0.5 * np.sqrt(2) + ires = rres + check_complex_value(np.sqrt, 0, 1, rres, ires, False) + + # sqrt(-1) + check_complex_value(np.sqrt, -1, 0, 0, 1) + + def test_simple_conjugate(self): + ref = np.conj(np.sqrt(complex(1, 1))) + + def f(z): + return np.sqrt(np.conj(z)) + + check_complex_value(f, 1, 1, ref.real, ref.imag, False) + + #def test_branch_cut(self): + # _check_branch_cut(f, -1, 0, 1, -1) + + @platform_skip + def test_special_values(self): + # C99: Sec G 6.4.2 + + check = check_complex_value + f = np.sqrt + + # csqrt(+-0 + 0i) is 0 + 0i + check(f, ncu.PZERO, 0, 0, 0) + check(f, ncu.NZERO, 0, 0, 0) + + # csqrt(x + infi) is inf + infi for any x (including NaN) + check(f, 1, np.inf, np.inf, np.inf) + check(f, -1, np.inf, np.inf, np.inf) + + check(f, ncu.PZERO, np.inf, np.inf, np.inf) + check(f, ncu.NZERO, np.inf, np.inf, np.inf) + check(f, np.inf, np.inf, np.inf, np.inf) + check(f, -np.inf, np.inf, np.inf, np.inf) # noqa: E221 + check(f, -np.nan, np.inf, np.inf, np.inf) # noqa: E221 + + # csqrt(x + nani) is nan + nani for any finite x + check(f, 1, np.nan, np.nan, np.nan) + check(f, -1, np.nan, np.nan, np.nan) + check(f, 0, np.nan, np.nan, np.nan) + + # csqrt(-inf + yi) is +0 + infi for any finite y > 0 + check(f, -np.inf, 1, ncu.PZERO, np.inf) + + # csqrt(inf + yi) is +inf + 0i for any finite y > 0 + check(f, np.inf, 1, np.inf, ncu.PZERO) + + # csqrt(-inf + nani) is nan +- infi (both +i infi are valid) + def _check_ninf_nan(dummy): + msgform = "csqrt(-inf, nan) is (%f, %f), expected (nan, +-inf)" + z = np.sqrt(np.array(complex(-np.inf, np.nan))) + # FIXME: ugly workaround for isinf bug. + with np.errstate(invalid='ignore'): + if not (np.isnan(z.real) and np.isinf(z.imag)): + raise AssertionError(msgform % (z.real, z.imag)) + + _check_ninf_nan(None) + + # csqrt(+inf + nani) is inf + nani + check(f, np.inf, np.nan, np.inf, np.nan) + + # csqrt(nan + yi) is nan + nani for any finite y (infinite handled in x + # + nani) + check(f, np.nan, 0, np.nan, np.nan) + check(f, np.nan, 1, np.nan, np.nan) + check(f, np.nan, np.nan, np.nan, np.nan) + + # XXX: check for conj(csqrt(z)) == csqrt(conj(z)) (need to fix branch + # cuts first) + +class TestCpow: + def setup_method(self): + self.olderr = np.seterr(invalid='ignore') + + def teardown_method(self): + np.seterr(**self.olderr) + + def test_simple(self): + x = np.array([1 + 1j, 0 + 2j, 1 + 2j, np.inf, np.nan]) + y_r = x ** 2 + y = np.power(x, 2) + assert_almost_equal(y, y_r) + + def test_scalar(self): + x = np.array([1, 1j, 2, 2.5 + .37j, np.inf, np.nan]) + y = np.array([1, 1j, -0.5 + 1.5j, -0.5 + 1.5j, 2, 3]) + lx = list(range(len(x))) + + # Hardcode the expected `builtins.complex` values, + # as complex exponentiation is broken as of bpo-44698 + p_r = [ + 1 + 0j, + 0.20787957635076193 + 0j, + 0.35812203996480685 + 0.6097119028618724j, + 0.12659112128185032 + 0.48847676699581527j, + complex(np.inf, np.nan), + complex(np.nan, np.nan), + ] + + n_r = [x[i] ** y[i] for i in lx] + for i in lx: + assert_almost_equal(n_r[i], p_r[i], err_msg='Loop %d\n' % i) + + def test_array(self): + x = np.array([1, 1j, 2, 2.5 + .37j, np.inf, np.nan]) + y = np.array([1, 1j, -0.5 + 1.5j, -0.5 + 1.5j, 2, 3]) + lx = list(range(len(x))) + + # Hardcode the expected `builtins.complex` values, + # as complex exponentiation is broken as of bpo-44698 + p_r = [ + 1 + 0j, + 0.20787957635076193 + 0j, + 0.35812203996480685 + 0.6097119028618724j, + 0.12659112128185032 + 0.48847676699581527j, + complex(np.inf, np.nan), + complex(np.nan, np.nan), + ] + + n_r = x ** y + for i in lx: + assert_almost_equal(n_r[i], p_r[i], err_msg='Loop %d\n' % i) + +class TestCabs: + def setup_method(self): + self.olderr = np.seterr(invalid='ignore') + + def teardown_method(self): + np.seterr(**self.olderr) + + def test_simple(self): + x = np.array([1 + 1j, 0 + 2j, 1 + 2j, np.inf, np.nan]) + y_r = np.array([np.sqrt(2.), 2, np.sqrt(5), np.inf, np.nan]) + y = np.abs(x) + assert_almost_equal(y, y_r) + + def test_fabs(self): + # Test that np.abs(x +- 0j) == np.abs(x) (as mandated by C99 for cabs) + x = np.array([1 + 0j], dtype=complex) + assert_array_equal(np.abs(x), np.real(x)) + + x = np.array([complex(1, ncu.NZERO)], dtype=complex) + assert_array_equal(np.abs(x), np.real(x)) + + x = np.array([complex(np.inf, ncu.NZERO)], dtype=complex) + assert_array_equal(np.abs(x), np.real(x)) + + x = np.array([complex(np.nan, ncu.NZERO)], dtype=complex) + assert_array_equal(np.abs(x), np.real(x)) + + def test_cabs_inf_nan(self): + x, y = [], [] + + # cabs(+-nan + nani) returns nan + x.append(np.nan) + y.append(np.nan) + check_real_value(np.abs, np.nan, np.nan, np.nan) + + x.append(np.nan) + y.append(-np.nan) + check_real_value(np.abs, -np.nan, np.nan, np.nan) + + # According to C99 standard, if exactly one of the real/part is inf and + # the other nan, then cabs should return inf + x.append(np.inf) + y.append(np.nan) + check_real_value(np.abs, np.inf, np.nan, np.inf) + + x.append(-np.inf) + y.append(np.nan) + check_real_value(np.abs, -np.inf, np.nan, np.inf) + + # cabs(conj(z)) == conj(cabs(z)) (= cabs(z)) + def f(a): + return np.abs(np.conj(a)) + + def g(a, b): + return np.abs(complex(a, b)) + + xa = np.array(x, dtype=complex) + assert len(xa) == len(x) == len(y) + for xi, yi in zip(x, y): + ref = g(xi, yi) + check_real_value(f, xi, yi, ref) + +class TestCarg: + def test_simple(self): + check_real_value(ncu._arg, 1, 0, 0, False) + check_real_value(ncu._arg, 0, 1, 0.5 * np.pi, False) + + check_real_value(ncu._arg, 1, 1, 0.25 * np.pi, False) + check_real_value(ncu._arg, ncu.PZERO, ncu.PZERO, ncu.PZERO) + + # TODO This can be xfail when the generator functions are got rid of. + @pytest.mark.skip( + reason="Complex arithmetic with signed zero fails on most platforms") + def test_zero(self): + # carg(-0 +- 0i) returns +- pi + check_real_value(ncu._arg, ncu.NZERO, ncu.PZERO, np.pi, False) + check_real_value(ncu._arg, ncu.NZERO, ncu.NZERO, -np.pi, False) + + # carg(+0 +- 0i) returns +- 0 + check_real_value(ncu._arg, ncu.PZERO, ncu.PZERO, ncu.PZERO) + check_real_value(ncu._arg, ncu.PZERO, ncu.NZERO, ncu.NZERO) + + # carg(x +- 0i) returns +- 0 for x > 0 + check_real_value(ncu._arg, 1, ncu.PZERO, ncu.PZERO, False) + check_real_value(ncu._arg, 1, ncu.NZERO, ncu.NZERO, False) + + # carg(x +- 0i) returns +- pi for x < 0 + check_real_value(ncu._arg, -1, ncu.PZERO, np.pi, False) + check_real_value(ncu._arg, -1, ncu.NZERO, -np.pi, False) + + # carg(+- 0 + yi) returns pi/2 for y > 0 + check_real_value(ncu._arg, ncu.PZERO, 1, 0.5 * np.pi, False) + check_real_value(ncu._arg, ncu.NZERO, 1, 0.5 * np.pi, False) + + # carg(+- 0 + yi) returns -pi/2 for y < 0 + check_real_value(ncu._arg, ncu.PZERO, -1, 0.5 * np.pi, False) + check_real_value(ncu._arg, ncu.NZERO, -1, -0.5 * np.pi, False) + + #def test_branch_cuts(self): + # _check_branch_cut(ncu._arg, -1, 1j, -1, 1) + + def test_special_values(self): + # carg(-np.inf +- yi) returns +-pi for finite y > 0 + check_real_value(ncu._arg, -np.inf, 1, np.pi, False) + check_real_value(ncu._arg, -np.inf, -1, -np.pi, False) + + # carg(np.inf +- yi) returns +-0 for finite y > 0 + check_real_value(ncu._arg, np.inf, 1, ncu.PZERO, False) + check_real_value(ncu._arg, np.inf, -1, ncu.NZERO, False) + + # carg(x +- np.infi) returns +-pi/2 for finite x + check_real_value(ncu._arg, 1, np.inf, 0.5 * np.pi, False) + check_real_value(ncu._arg, 1, -np.inf, -0.5 * np.pi, False) + + # carg(-np.inf +- np.infi) returns +-3pi/4 + check_real_value(ncu._arg, -np.inf, np.inf, 0.75 * np.pi, False) + check_real_value(ncu._arg, -np.inf, -np.inf, -0.75 * np.pi, False) + + # carg(np.inf +- np.infi) returns +-pi/4 + check_real_value(ncu._arg, np.inf, np.inf, 0.25 * np.pi, False) + check_real_value(ncu._arg, np.inf, -np.inf, -0.25 * np.pi, False) + + # carg(x + yi) returns np.nan if x or y is nan + check_real_value(ncu._arg, np.nan, 0, np.nan, False) + check_real_value(ncu._arg, 0, np.nan, np.nan, False) + + check_real_value(ncu._arg, np.nan, np.inf, np.nan, False) + check_real_value(ncu._arg, np.inf, np.nan, np.nan, False) + + +def check_real_value(f, x1, y1, x, exact=True): + z1 = np.array([complex(x1, y1)]) + if exact: + assert_equal(f(z1), x) + else: + assert_almost_equal(f(z1), x) + + +def check_complex_value(f, x1, y1, x2, y2, exact=True): + z1 = np.array([complex(x1, y1)]) + z2 = complex(x2, y2) + with np.errstate(invalid='ignore'): + if exact: + assert_equal(f(z1), z2) + else: + assert_almost_equal(f(z1), z2) + +class TestSpecialComplexAVX: + @pytest.mark.parametrize("stride", [-4, -2, -1, 1, 2, 4]) + @pytest.mark.parametrize("astype", [np.complex64, np.complex128]) + def test_array(self, stride, astype): + nan = np.nan + inf = np.inf + arr = np.array([complex(nan, nan), + complex(nan, inf), + complex(inf, nan), + complex(inf, inf), + complex(0., inf), + complex(inf, 0.), + complex(0., 0.), + complex(0., nan), + complex(nan, 0.)], dtype=astype) + abs_true = np.array([nan, inf, inf, inf, inf, inf, 0., nan, nan], + dtype=arr.real.dtype) + sq_true = np.array([complex(nan, nan), + complex(nan, nan), + complex(nan, nan), + complex(nan, inf), + complex(-inf, nan), + complex(inf, nan), + complex(0., 0.), + complex(nan, nan), + complex(nan, nan)], dtype=astype) + with np.errstate(invalid='ignore'): + assert_equal(np.abs(arr[::stride]), abs_true[::stride]) + assert_equal(np.square(arr[::stride]), sq_true[::stride]) + +class TestComplexAbsoluteAVX: + @pytest.mark.parametrize("arraysize", + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 18, 19]) + @pytest.mark.parametrize("stride", [-4, -3, -2, -1, 1, 2, 3, 4]) + @pytest.mark.parametrize("astype", [np.complex64, np.complex128]) + # test to ensure masking and strides work as intended in the AVX implementation + def test_array(self, arraysize, stride, astype): + arr = np.ones(arraysize, dtype=astype) + abs_true = np.ones(arraysize, dtype=arr.real.dtype) + assert_equal(np.abs(arr[::stride]), abs_true[::stride]) + +# Testcase taken as is from https://github.com/numpy/numpy/issues/16660 +class TestComplexAbsoluteMixedDTypes: + @pytest.mark.parametrize("stride", [-4, -3, -2, -1, 1, 2, 3, 4]) + @pytest.mark.parametrize("astype", [np.complex64, np.complex128]) + @pytest.mark.parametrize("func", ['abs', 'square', 'conjugate']) + def test_array(self, stride, astype, func): + dtype = [('template_id', 'U') + uni_arr2 = str_arr.astype(' _ExtOjbDict: ... +def _make_extobj(*, all: _ErrKind = ..., **kwargs: Unpack[_ExtOjbDict]) -> CapsuleType: ... diff --git a/local_packages/numpy/_distributor_init.py b/local_packages/numpy/_distributor_init.py new file mode 100644 index 0000000..f608036 --- /dev/null +++ b/local_packages/numpy/_distributor_init.py @@ -0,0 +1,15 @@ +""" Distributor init file + +Distributors: you can add custom code here to support particular distributions +of numpy. + +For example, this is a good place to put any BLAS/LAPACK initialization code. + +The numpy standard source distribution will not put code in this file, so you +can safely replace this file with your own version. +""" + +try: + from . import _distributor_init_local # noqa: F401 +except ImportError: + pass diff --git a/local_packages/numpy/_distributor_init.pyi b/local_packages/numpy/_distributor_init.pyi new file mode 100644 index 0000000..94456ab --- /dev/null +++ b/local_packages/numpy/_distributor_init.pyi @@ -0,0 +1 @@ +# intentionally left blank diff --git a/local_packages/numpy/_expired_attrs_2_0.py b/local_packages/numpy/_expired_attrs_2_0.py new file mode 100644 index 0000000..2eebf95 --- /dev/null +++ b/local_packages/numpy/_expired_attrs_2_0.py @@ -0,0 +1,78 @@ +""" +Dict of expired attributes that are discontinued since 2.0 release. +Each item is associated with a migration note. +""" + +__expired_attributes__ = { + "geterrobj": "Use the np.errstate context manager instead.", + "seterrobj": "Use the np.errstate context manager instead.", + "cast": "Use `np.asarray(arr, dtype=dtype)` instead.", + "source": "Use `inspect.getsource` instead.", + "lookfor": "Search NumPy's documentation directly.", + "who": "Use an IDE variable explorer or `locals()` instead.", + "fastCopyAndTranspose": "Use `arr.T.copy()` instead.", + "set_numeric_ops": + "For the general case, use `PyUFunc_ReplaceLoopBySignature`. " + "For ndarray subclasses, define the ``__array_ufunc__`` method " + "and override the relevant ufunc.", + "NINF": "Use `-np.inf` instead.", + "PINF": "Use `np.inf` instead.", + "NZERO": "Use `-0.0` instead.", + "PZERO": "Use `0.0` instead.", + "add_newdoc": + "It's still available as `np.lib.add_newdoc`.", + "add_docstring": + "It's still available as `np.lib.add_docstring`.", + "add_newdoc_ufunc": + "It's an internal function and doesn't have a replacement.", + "safe_eval": "Use `ast.literal_eval` instead.", + "float_": "Use `np.float64` instead.", + "complex_": "Use `np.complex128` instead.", + "longfloat": "Use `np.longdouble` instead.", + "singlecomplex": "Use `np.complex64` instead.", + "cfloat": "Use `np.complex128` instead.", + "longcomplex": "Use `np.clongdouble` instead.", + "clongfloat": "Use `np.clongdouble` instead.", + "string_": "Use `np.bytes_` instead.", + "unicode_": "Use `np.str_` instead.", + "Inf": "Use `np.inf` instead.", + "Infinity": "Use `np.inf` instead.", + "NaN": "Use `np.nan` instead.", + "infty": "Use `np.inf` instead.", + "issctype": "Use `issubclass(rep, np.generic)` instead.", + "maximum_sctype": + "Use a specific dtype instead. You should avoid relying " + "on any implicit mechanism and select the largest dtype of " + "a kind explicitly in the code.", + "obj2sctype": "Use `np.dtype(obj).type` instead.", + "sctype2char": "Use `np.dtype(obj).char` instead.", + "sctypes": "Access dtypes explicitly instead.", + "issubsctype": "Use `np.issubdtype` instead.", + "set_string_function": + "Use `np.set_printoptions` instead with a formatter for " + "custom printing of NumPy objects.", + "asfarray": "Use `np.asarray` with a proper dtype instead.", + "issubclass_": "Use `issubclass` builtin instead.", + "tracemalloc_domain": "It's now available from `np.lib`.", + "mat": "Use `np.asmatrix` instead.", + "recfromcsv": "Use `np.genfromtxt` with comma delimiter instead.", + "recfromtxt": "Use `np.genfromtxt` instead.", + "deprecate": "Emit `DeprecationWarning` with `warnings.warn` directly, " + "or use `typing.deprecated`.", + "deprecate_with_doc": "Emit `DeprecationWarning` with `warnings.warn` " + "directly, or use `typing.deprecated`.", + "find_common_type": + "Use `numpy.promote_types` or `numpy.result_type` instead. " + "To achieve semantics for the `scalar_types` argument, use " + "`numpy.result_type` and pass the Python values `0`, `0.0`, or `0j`.", + "round_": "Use `np.round` instead.", + "get_array_wrap": "", + "DataSource": "It's still available as `np.lib.npyio.DataSource`.", + "nbytes": "Use `np.dtype().itemsize` instead.", + "byte_bounds": "Now it's available under `np.lib.array_utils.byte_bounds`", + "compare_chararrays": + "It's still available as `np.char.compare_chararrays`.", + "format_parser": "It's still available as `np.rec.format_parser`.", + "alltrue": "Use `np.all` instead.", + "sometrue": "Use `np.any` instead.", +} diff --git a/local_packages/numpy/_expired_attrs_2_0.pyi b/local_packages/numpy/_expired_attrs_2_0.pyi new file mode 100644 index 0000000..de6c2d1 --- /dev/null +++ b/local_packages/numpy/_expired_attrs_2_0.pyi @@ -0,0 +1,61 @@ +from typing import Final, TypedDict, final, type_check_only + +@final +@type_check_only +class _ExpiredAttributesType(TypedDict): + geterrobj: str + seterrobj: str + cast: str + source: str + lookfor: str + who: str + fastCopyAndTranspose: str + set_numeric_ops: str + NINF: str + PINF: str + NZERO: str + PZERO: str + add_newdoc: str + add_docstring: str + add_newdoc_ufunc: str + safe_eval: str + float_: str + complex_: str + longfloat: str + singlecomplex: str + cfloat: str + longcomplex: str + clongfloat: str + string_: str + unicode_: str + Inf: str + Infinity: str + NaN: str + infty: str + issctype: str + maximum_sctype: str + obj2sctype: str + sctype2char: str + sctypes: str + issubsctype: str + set_string_function: str + asfarray: str + issubclass_: str + tracemalloc_domain: str + mat: str + recfromcsv: str + recfromtxt: str + deprecate: str + deprecate_with_doc: str + find_common_type: str + round_: str + get_array_wrap: str + DataSource: str + nbytes: str + byte_bounds: str + compare_chararrays: str + format_parser: str + alltrue: str + sometrue: str + +__expired_attributes__: Final[_ExpiredAttributesType] = ... diff --git a/local_packages/numpy/_globals.py b/local_packages/numpy/_globals.py new file mode 100644 index 0000000..ada8d5c --- /dev/null +++ b/local_packages/numpy/_globals.py @@ -0,0 +1,121 @@ +""" +Module defining global singleton classes. + +This module raises a RuntimeError if an attempt to reload it is made. In that +way the identities of the classes defined here are fixed and will remain so +even if numpy itself is reloaded. In particular, a function like the following +will still work correctly after numpy is reloaded:: + + def foo(arg=np._NoValue): + if arg is np._NoValue: + ... + +That was not the case when the singleton classes were defined in the numpy +``__init__.py`` file. See gh-7844 for a discussion of the reload problem that +motivated this module. + +""" +import enum + +from ._utils import set_module as _set_module + +__all__ = ['_NoValue', '_CopyMode'] + + +# Disallow reloading this module so as to preserve the identities of the +# classes defined here. +if '_is_loaded' in globals(): + raise RuntimeError('Reloading numpy._globals is not allowed') +_is_loaded = True + + +class _NoValueType: + """Special keyword value. + + The instance of this class may be used as the default value assigned to a + keyword if no other obvious default (e.g., `None`) is suitable, + + Common reasons for using this keyword are: + + - A new keyword is added to a function, and that function forwards its + inputs to another function or method which can be defined outside of + NumPy. For example, ``np.std(x)`` calls ``x.std``, so when a ``keepdims`` + keyword was added that could only be forwarded if the user explicitly + specified ``keepdims``; downstream array libraries may not have added + the same keyword, so adding ``x.std(..., keepdims=keepdims)`` + unconditionally could have broken previously working code. + - A keyword is being deprecated, and a deprecation warning must only be + emitted when the keyword is used. + + """ + __instance = None + + def __new__(cls): + # ensure that only one instance exists + if not cls.__instance: + cls.__instance = super().__new__(cls) + return cls.__instance + + def __repr__(self): + return "" + + +_NoValue = _NoValueType() + + +@_set_module("numpy") +class _CopyMode(enum.Enum): + """ + An enumeration for the copy modes supported + by numpy.copy() and numpy.array(). The following three modes are supported, + + - ALWAYS: This means that a deep copy of the input + array will always be taken. + - IF_NEEDED: This means that a deep copy of the input + array will be taken only if necessary. + - NEVER: This means that the deep copy will never be taken. + If a copy cannot be avoided then a `ValueError` will be + raised. + + Note that the buffer-protocol could in theory do copies. NumPy currently + assumes an object exporting the buffer protocol will never do this. + """ + + ALWAYS = True + NEVER = False + IF_NEEDED = 2 + + def __bool__(self): + # For backwards compatibility + if self == _CopyMode.ALWAYS: + return True + + if self == _CopyMode.NEVER: + return False + + raise ValueError(f"{self} is neither True nor False.") + + +class _SignatureDescriptor: + # A descriptor to store on the ufunc __dict__ that avoids definig a + # signature for the ufunc class/type but allows the instance to have one. + # This is needed because inspect.signature() chokes on normal properties + # (as of 3.14 at least). + # We could also set __signature__ on the instance but this allows deferred + # computation of the signature. + def __get__(self, obj, objtype=None): + # Delay import, not a critical path but need to avoid circular import. + from numpy._core._internal import _ufunc_inspect_signature_builder + + if obj is None: + # could also return None, which is accepted as "not set" by + # inspect.signature(). + raise AttributeError( + "type object 'numpy.ufunc' has no attribute '__signature__'") + + # Store on the instance, after this the descriptor won't be used. + obj.__signature__ = _ufunc_inspect_signature_builder(obj) + return obj.__signature__ + + +_signature_descriptor = _SignatureDescriptor() diff --git a/local_packages/numpy/_globals.pyi b/local_packages/numpy/_globals.pyi new file mode 100644 index 0000000..b2231a9 --- /dev/null +++ b/local_packages/numpy/_globals.pyi @@ -0,0 +1,17 @@ +__all__ = ["_CopyMode", "_NoValue"] + +import enum +from typing import Final, final + +@final +class _CopyMode(enum.Enum): + ALWAYS = True + NEVER = False + IF_NEEDED = 2 + + def __bool__(self, /) -> bool: ... + +@final +class _NoValueType: ... + +_NoValue: Final[_NoValueType] = ... diff --git a/local_packages/numpy/_pyinstaller/__init__.py b/local_packages/numpy/_pyinstaller/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/local_packages/numpy/_pyinstaller/__init__.pyi b/local_packages/numpy/_pyinstaller/__init__.pyi new file mode 100644 index 0000000..e69de29 diff --git a/local_packages/numpy/_pyinstaller/hook-numpy.py b/local_packages/numpy/_pyinstaller/hook-numpy.py new file mode 100644 index 0000000..61c224b --- /dev/null +++ b/local_packages/numpy/_pyinstaller/hook-numpy.py @@ -0,0 +1,36 @@ +"""This hook should collect all binary files and any hidden modules that numpy +needs. + +Our (some-what inadequate) docs for writing PyInstaller hooks are kept here: +https://pyinstaller.readthedocs.io/en/stable/hooks.html + +""" +from PyInstaller.compat import is_pure_conda +from PyInstaller.utils.hooks import collect_dynamic_libs + +# Collect all DLLs inside numpy's installation folder, dump them into built +# app's root. +binaries = collect_dynamic_libs("numpy", ".") + +# If using Conda without any non-conda virtual environment manager: +if is_pure_conda: + # Assume running the NumPy from Conda-forge and collect it's DLLs from the + # communal Conda bin directory. DLLs from NumPy's dependencies must also be + # collected to capture MKL, OpenBlas, OpenMP, etc. + from PyInstaller.utils.hooks import conda_support + datas = conda_support.collect_dynamic_libs("numpy", dependencies=True) + +# Submodules PyInstaller cannot detect. `_dtype_ctypes` is only imported +# from C and `_multiarray_tests` is used in tests (which are not packed). +hiddenimports = ['numpy._core._dtype_ctypes', 'numpy._core._multiarray_tests'] + +# Remove testing and building code and packages that are referenced throughout +# NumPy but are not really dependencies. +excludedimports = [ + "scipy", + "pytest", + "f2py", + "setuptools", + "distutils", + "numpy.distutils", +] diff --git a/local_packages/numpy/_pyinstaller/hook-numpy.pyi b/local_packages/numpy/_pyinstaller/hook-numpy.pyi new file mode 100644 index 0000000..6da4914 --- /dev/null +++ b/local_packages/numpy/_pyinstaller/hook-numpy.pyi @@ -0,0 +1,6 @@ +from typing import Final + +binaries: Final[list[tuple[str, str]]] = ... + +hiddenimports: Final[list[str]] = ... +excludedimports: Final[list[str]] = ... diff --git a/local_packages/numpy/_pyinstaller/tests/__init__.py b/local_packages/numpy/_pyinstaller/tests/__init__.py new file mode 100644 index 0000000..4ed8fdd --- /dev/null +++ b/local_packages/numpy/_pyinstaller/tests/__init__.py @@ -0,0 +1,16 @@ +import pytest + +from numpy.testing import IS_EDITABLE, IS_WASM + +if IS_WASM: + pytest.skip( + "WASM/Pyodide does not use or support Fortran", + allow_module_level=True + ) + + +if IS_EDITABLE: + pytest.skip( + "Editable install doesn't support tests with a compile step", + allow_module_level=True + ) diff --git a/local_packages/numpy/_pyinstaller/tests/pyinstaller-smoke.py b/local_packages/numpy/_pyinstaller/tests/pyinstaller-smoke.py new file mode 100644 index 0000000..eb28070 --- /dev/null +++ b/local_packages/numpy/_pyinstaller/tests/pyinstaller-smoke.py @@ -0,0 +1,32 @@ +"""A crude *bit of everything* smoke test to verify PyInstaller compatibility. + +PyInstaller typically goes wrong by forgetting to package modules, extension +modules or shared libraries. This script should aim to touch as many of those +as possible in an attempt to trip a ModuleNotFoundError or a DLL load failure +due to an uncollected resource. Missing resources are unlikely to lead to +arithmetic errors so there's generally no need to verify any calculation's +output - merely that it made it to the end OK. This script should not +explicitly import any of numpy's submodules as that gives PyInstaller undue +hints that those submodules exist and should be collected (accessing implicitly +loaded submodules is OK). + +""" +import numpy as np + +a = np.arange(1., 10.).reshape((3, 3)) % 5 +np.linalg.det(a) +a @ a +a @ a.T +np.linalg.inv(a) +np.sin(np.exp(a)) +np.linalg.svd(a) +np.linalg.eigh(a) + +np.unique(np.random.randint(0, 10, 100)) +np.sort(np.random.uniform(0, 10, 100)) + +np.fft.fft(np.exp(2j * np.pi * np.arange(8) / 8)) +np.ma.masked_array(np.arange(10), np.random.rand(10) < .5).sum() +np.polynomial.Legendre([7, 8, 9]).roots() + +print("I made it!") diff --git a/local_packages/numpy/_pyinstaller/tests/test_pyinstaller.py b/local_packages/numpy/_pyinstaller/tests/test_pyinstaller.py new file mode 100644 index 0000000..a9061da --- /dev/null +++ b/local_packages/numpy/_pyinstaller/tests/test_pyinstaller.py @@ -0,0 +1,35 @@ +import subprocess +from pathlib import Path + +import pytest + + +# PyInstaller has been very unproactive about replacing 'imp' with 'importlib'. +@pytest.mark.filterwarnings('ignore::DeprecationWarning') +# It also leaks io.BytesIO()s. +@pytest.mark.filterwarnings('ignore::ResourceWarning') +@pytest.mark.parametrize("mode", ["--onedir", "--onefile"]) +@pytest.mark.slow +def test_pyinstaller(mode, tmp_path): + """Compile and run pyinstaller-smoke.py using PyInstaller.""" + + pyinstaller_cli = pytest.importorskip("PyInstaller.__main__").run + + source = Path(__file__).with_name("pyinstaller-smoke.py").resolve() + args = [ + # Place all generated files in ``tmp_path``. + '--workpath', str(tmp_path / "build"), + '--distpath', str(tmp_path / "dist"), + '--specpath', str(tmp_path), + mode, + str(source), + ] + pyinstaller_cli(args) + + if mode == "--onefile": + exe = tmp_path / "dist" / source.stem + else: + exe = tmp_path / "dist" / source.stem / source.stem + + p = subprocess.run([str(exe)], check=True, stdout=subprocess.PIPE) + assert p.stdout.strip() == b"I made it!" diff --git a/local_packages/numpy/_pytesttester.py b/local_packages/numpy/_pytesttester.py new file mode 100644 index 0000000..77342e4 --- /dev/null +++ b/local_packages/numpy/_pytesttester.py @@ -0,0 +1,201 @@ +""" +Pytest test running. + +This module implements the ``test()`` function for NumPy modules. The usual +boiler plate for doing that is to put the following in the module +``__init__.py`` file:: + + from numpy._pytesttester import PytestTester + test = PytestTester(__name__) + del PytestTester + + +Warnings filtering and other runtime settings should be dealt with in the +``pytest.ini`` file in the numpy repo root. The behavior of the test depends on +whether or not that file is found as follows: + +* ``pytest.ini`` is present (develop mode) + All warnings except those explicitly filtered out are raised as error. +* ``pytest.ini`` is absent (release mode) + DeprecationWarnings and PendingDeprecationWarnings are ignored, other + warnings are passed through. + +In practice, tests run from the numpy repo are run in development mode with +``spin``, through the standard ``spin test`` invocation or from an inplace +build with ``pytest numpy``. + +This module is imported by every numpy subpackage, so lies at the top level to +simplify circular import issues. For the same reason, it contains no numpy +imports at module scope, instead importing numpy within function calls. +""" +import os +import sys + +__all__ = ['PytestTester'] + + +def _show_numpy_info(): + import numpy as np + + print(f"NumPy version {np.__version__}") + info = np.lib._utils_impl._opt_info() + print("NumPy CPU features: ", (info or 'nothing enabled')) + + +class PytestTester: + """ + Pytest test runner. + + A test function is typically added to a package's __init__.py like so:: + + from numpy._pytesttester import PytestTester + test = PytestTester(__name__).test + del PytestTester + + Calling this test function finds and runs all tests associated with the + module and all its sub-modules. + + Attributes + ---------- + module_name : str + Full path to the package to test. + + Parameters + ---------- + module_name : module name + The name of the module to test. + + Notes + ----- + Unlike the previous ``nose``-based implementation, this class is not + publicly exposed as it performs some ``numpy``-specific warning + suppression. + + """ + def __init__(self, module_name): + self.module_name = module_name + self.__module__ = module_name + + def __call__(self, label='fast', verbose=1, extra_argv=None, + doctests=False, coverage=False, durations=-1, tests=None): + """ + Run tests for module using pytest. + + Parameters + ---------- + label : {'fast', 'full'}, optional + Identifies the tests to run. When set to 'fast', tests decorated + with `pytest.mark.slow` are skipped, when 'full', the slow marker + is ignored. + verbose : int, optional + Verbosity value for test outputs, in the range 1-3. Default is 1. + extra_argv : list, optional + List with any extra arguments to pass to pytests. + doctests : bool, optional + .. note:: Not supported + coverage : bool, optional + If True, report coverage of NumPy code. Default is False. + Requires installation of (pip) pytest-cov. + durations : int, optional + If < 0, do nothing, If 0, report time of all tests, if > 0, + report the time of the slowest `timer` tests. Default is -1. + tests : test or list of tests + Tests to be executed with pytest '--pyargs' + + Returns + ------- + result : bool + Return True on success, false otherwise. + + Notes + ----- + Each NumPy module exposes `test` in its namespace to run all tests for + it. For example, to run all tests for numpy.lib: + + >>> np.lib.test() #doctest: +SKIP + + Examples + -------- + >>> result = np.lib.test() #doctest: +SKIP + ... + 1023 passed, 2 skipped, 6 deselected, 1 xfailed in 10.39 seconds + >>> result + True + + """ + import warnings + + import pytest + + module = sys.modules[self.module_name] + module_path = os.path.abspath(module.__path__[0]) + + # setup the pytest arguments + pytest_args = ["-l"] + + # offset verbosity. The "-q" cancels a "-v". + pytest_args += ["-q"] + + if sys.version_info < (3, 12): + with warnings.catch_warnings(): + warnings.simplefilter("always") + # Filter out distutils cpu warnings (could be localized to + # distutils tests). ASV has problems with top level import, + # so fetch module for suppression here. + from numpy.distutils import cpuinfo # noqa: F401 + + # Filter out annoying import messages. Want these in both develop and + # release mode. + pytest_args += [ + "-W ignore:Not importing directory", + "-W ignore:numpy.dtype size changed", + "-W ignore:numpy.ufunc size changed", + "-W ignore::UserWarning:cpuinfo", + ] + + # When testing matrices, ignore their PendingDeprecationWarnings + pytest_args += [ + "-W ignore:the matrix subclass is not", + "-W ignore:Importing from numpy.matlib is", + ] + + if doctests: + pytest_args += ["--doctest-modules"] + + if extra_argv: + pytest_args += list(extra_argv) + + if verbose > 1: + pytest_args += ["-" + "v" * (verbose - 1)] + + if coverage: + pytest_args += ["--cov=" + module_path] + + if label == "fast": + # not importing at the top level to avoid circular import of module + from numpy.testing import IS_PYPY + if IS_PYPY: + pytest_args += ["-m", "not slow and not slow_pypy"] + else: + pytest_args += ["-m", "not slow"] + + elif label != "full": + pytest_args += ["-m", label] + + if durations >= 0: + pytest_args += [f"--durations={durations}"] + + if tests is None: + tests = [self.module_name] + + pytest_args += ["--pyargs"] + list(tests) + + # run tests. + _show_numpy_info() + + try: + code = pytest.main(pytest_args) + except SystemExit as exc: + code = exc.code + + return code == 0 diff --git a/local_packages/numpy/_pytesttester.pyi b/local_packages/numpy/_pytesttester.pyi new file mode 100644 index 0000000..bd71239 --- /dev/null +++ b/local_packages/numpy/_pytesttester.pyi @@ -0,0 +1,18 @@ +from collections.abc import Iterable +from typing import Literal as L + +__all__ = ["PytestTester"] + +class PytestTester: + module_name: str + def __init__(self, module_name: str) -> None: ... + def __call__( + self, + label: L["fast", "full"] = "fast", + verbose: int = 1, + extra_argv: Iterable[str] | None = None, + doctests: L[False] = False, + coverage: bool = False, + durations: int = -1, + tests: Iterable[str] | None = None, + ) -> bool: ... diff --git a/local_packages/numpy/_typing/__init__.py b/local_packages/numpy/_typing/__init__.py new file mode 100644 index 0000000..22eb6e3 --- /dev/null +++ b/local_packages/numpy/_typing/__init__.py @@ -0,0 +1,173 @@ +"""Private counterpart of ``numpy.typing``.""" + +import sys + +from ._array_like import ( + NDArray as NDArray, + _ArrayLike as _ArrayLike, + _ArrayLikeAnyString_co as _ArrayLikeAnyString_co, + _ArrayLikeBool_co as _ArrayLikeBool_co, + _ArrayLikeBytes_co as _ArrayLikeBytes_co, + _ArrayLikeComplex128_co as _ArrayLikeComplex128_co, + _ArrayLikeComplex_co as _ArrayLikeComplex_co, + _ArrayLikeDT64_co as _ArrayLikeDT64_co, + _ArrayLikeFloat64_co as _ArrayLikeFloat64_co, + _ArrayLikeFloat_co as _ArrayLikeFloat_co, + _ArrayLikeInt as _ArrayLikeInt, + _ArrayLikeInt_co as _ArrayLikeInt_co, + _ArrayLikeNumber_co as _ArrayLikeNumber_co, + _ArrayLikeObject_co as _ArrayLikeObject_co, + _ArrayLikeStr_co as _ArrayLikeStr_co, + _ArrayLikeString_co as _ArrayLikeString_co, + _ArrayLikeTD64_co as _ArrayLikeTD64_co, + _ArrayLikeUInt_co as _ArrayLikeUInt_co, + _ArrayLikeVoid_co as _ArrayLikeVoid_co, + _FiniteNestedSequence as _FiniteNestedSequence, + _SupportsArray as _SupportsArray, + _SupportsArrayFunc as _SupportsArrayFunc, +) + +# +from ._char_codes import ( + _BoolCodes as _BoolCodes, + _ByteCodes as _ByteCodes, + _BytesCodes as _BytesCodes, + _CDoubleCodes as _CDoubleCodes, + _CharacterCodes as _CharacterCodes, + _CLongDoubleCodes as _CLongDoubleCodes, + _Complex64Codes as _Complex64Codes, + _Complex128Codes as _Complex128Codes, + _ComplexFloatingCodes as _ComplexFloatingCodes, + _CSingleCodes as _CSingleCodes, + _DoubleCodes as _DoubleCodes, + _DT64Codes as _DT64Codes, + _FlexibleCodes as _FlexibleCodes, + _Float16Codes as _Float16Codes, + _Float32Codes as _Float32Codes, + _Float64Codes as _Float64Codes, + _FloatingCodes as _FloatingCodes, + _GenericCodes as _GenericCodes, + _HalfCodes as _HalfCodes, + _InexactCodes as _InexactCodes, + _Int8Codes as _Int8Codes, + _Int16Codes as _Int16Codes, + _Int32Codes as _Int32Codes, + _Int64Codes as _Int64Codes, + _IntCCodes as _IntCCodes, + _IntCodes as _IntCodes, + _IntegerCodes as _IntegerCodes, + _IntPCodes as _IntPCodes, + _LongCodes as _LongCodes, + _LongDoubleCodes as _LongDoubleCodes, + _LongLongCodes as _LongLongCodes, + _NumberCodes as _NumberCodes, + _ObjectCodes as _ObjectCodes, + _ShortCodes as _ShortCodes, + _SignedIntegerCodes as _SignedIntegerCodes, + _SingleCodes as _SingleCodes, + _StrCodes as _StrCodes, + _StringCodes as _StringCodes, + _TD64Codes as _TD64Codes, + _UByteCodes as _UByteCodes, + _UInt8Codes as _UInt8Codes, + _UInt16Codes as _UInt16Codes, + _UInt32Codes as _UInt32Codes, + _UInt64Codes as _UInt64Codes, + _UIntCCodes as _UIntCCodes, + _UIntCodes as _UIntCodes, + _UIntPCodes as _UIntPCodes, + _ULongCodes as _ULongCodes, + _ULongLongCodes as _ULongLongCodes, + _UnsignedIntegerCodes as _UnsignedIntegerCodes, + _UShortCodes as _UShortCodes, + _VoidCodes as _VoidCodes, +) + +# +from ._dtype_like import ( + _DTypeLike as _DTypeLike, + _DTypeLikeBool as _DTypeLikeBool, + _DTypeLikeBytes as _DTypeLikeBytes, + _DTypeLikeComplex as _DTypeLikeComplex, + _DTypeLikeComplex_co as _DTypeLikeComplex_co, + _DTypeLikeDT64 as _DTypeLikeDT64, + _DTypeLikeFloat as _DTypeLikeFloat, + _DTypeLikeInt as _DTypeLikeInt, + _DTypeLikeObject as _DTypeLikeObject, + _DTypeLikeStr as _DTypeLikeStr, + _DTypeLikeTD64 as _DTypeLikeTD64, + _DTypeLikeUInt as _DTypeLikeUInt, + _DTypeLikeVoid as _DTypeLikeVoid, + _HasDType as _HasDType, + _SupportsDType as _SupportsDType, + _VoidDTypeLike as _VoidDTypeLike, +) + +# +from ._nbit import ( + _NBitByte as _NBitByte, + _NBitDouble as _NBitDouble, + _NBitHalf as _NBitHalf, + _NBitInt as _NBitInt, + _NBitIntC as _NBitIntC, + _NBitIntP as _NBitIntP, + _NBitLong as _NBitLong, + _NBitLongDouble as _NBitLongDouble, + _NBitLongLong as _NBitLongLong, + _NBitShort as _NBitShort, + _NBitSingle as _NBitSingle, +) + +# +from ._nbit_base import ( # type: ignore[deprecated] + NBitBase as NBitBase, # pyright: ignore[reportDeprecated] + _8Bit as _8Bit, + _16Bit as _16Bit, + _32Bit as _32Bit, + _64Bit as _64Bit, + _96Bit as _96Bit, + _128Bit as _128Bit, +) + +# +from ._nested_sequence import _NestedSequence as _NestedSequence + +# +from ._scalars import ( + _BoolLike_co as _BoolLike_co, + _CharLike_co as _CharLike_co, + _ComplexLike_co as _ComplexLike_co, + _FloatLike_co as _FloatLike_co, + _IntLike_co as _IntLike_co, + _NumberLike_co as _NumberLike_co, + _ScalarLike_co as _ScalarLike_co, + _TD64Like_co as _TD64Like_co, + _UIntLike_co as _UIntLike_co, + _VoidLike_co as _VoidLike_co, +) + +# +from ._shape import _AnyShape as _AnyShape, _Shape as _Shape, _ShapeLike as _ShapeLike + +# +from ._ufunc import ( + _GUFunc_Nin2_Nout1 as _GUFunc_Nin2_Nout1, + _UFunc_Nin1_Nout1 as _UFunc_Nin1_Nout1, + _UFunc_Nin1_Nout2 as _UFunc_Nin1_Nout2, + _UFunc_Nin2_Nout1 as _UFunc_Nin2_Nout1, + _UFunc_Nin2_Nout2 as _UFunc_Nin2_Nout2, +) + +# wrapping the public aliases in `TypeAliasType` helps with introspection readability +if sys.version_info >= (3, 12): + from typing import TypeAliasType + + from ._array_like import ArrayLike as _ArrayLikeAlias + from ._dtype_like import DTypeLike as _DTypeLikeAlias + + ArrayLike = TypeAliasType("ArrayLike", _ArrayLikeAlias) + DTypeLike = TypeAliasType("DTypeLike", _DTypeLikeAlias) + +else: + from ._array_like import ArrayLike as ArrayLike + from ._dtype_like import DTypeLike as DTypeLike diff --git a/local_packages/numpy/_typing/_add_docstring.py b/local_packages/numpy/_typing/_add_docstring.py new file mode 100644 index 0000000..5330a6b --- /dev/null +++ b/local_packages/numpy/_typing/_add_docstring.py @@ -0,0 +1,153 @@ +"""A module for creating docstrings for sphinx ``data`` domains.""" + +import re +import textwrap + +from ._array_like import NDArray + +_docstrings_list = [] + + +def add_newdoc(name: str, value: str, doc: str) -> None: + """Append ``_docstrings_list`` with a docstring for `name`. + + Parameters + ---------- + name : str + The name of the object. + value : str + A string-representation of the object. + doc : str + The docstring of the object. + + """ + _docstrings_list.append((name, value, doc)) + + +def _parse_docstrings() -> str: + """Convert all docstrings in ``_docstrings_list`` into a single + sphinx-legible text block. + + """ + type_list_ret = [] + for name, value, doc in _docstrings_list: + s = textwrap.dedent(doc).replace("\n", "\n ") + + # Replace sections by rubrics + lines = s.split("\n") + new_lines = [] + indent = "" + for line in lines: + m = re.match(r'^(\s+)[-=]+\s*$', line) + if m and new_lines: + prev = textwrap.dedent(new_lines.pop()) + if prev == "Examples": + indent = "" + new_lines.append(f'{m.group(1)}.. rubric:: {prev}') + else: + indent = 4 * " " + new_lines.append(f'{m.group(1)}.. admonition:: {prev}') + new_lines.append("") + else: + new_lines.append(f"{indent}{line}") + + s = "\n".join(new_lines) + s_block = f""".. data:: {name}\n :value: {value}\n {s}""" + type_list_ret.append(s_block) + return "\n".join(type_list_ret) + + +add_newdoc('ArrayLike', 'typing.Union[...]', + """ + A `~typing.Union` representing objects that can be coerced + into an `~numpy.ndarray`. + + Among others this includes the likes of: + + * Scalars. + * (Nested) sequences. + * Objects implementing the `~class.__array__` protocol. + + .. versionadded:: 1.20 + + See Also + -------- + :term:`array_like`: + Any scalar or sequence that can be interpreted as an ndarray. + + Examples + -------- + .. code-block:: python + + >>> import numpy as np + >>> import numpy.typing as npt + + >>> def as_array(a: npt.ArrayLike) -> np.ndarray: + ... return np.array(a) + + """) + +add_newdoc('DTypeLike', 'typing.Union[...]', + """ + A `~typing.Union` representing objects that can be coerced + into a `~numpy.dtype`. + + Among others this includes the likes of: + + * :class:`type` objects. + * Character codes or the names of :class:`type` objects. + * Objects with the ``.dtype`` attribute. + + .. versionadded:: 1.20 + + See Also + -------- + :ref:`Specifying and constructing data types ` + A comprehensive overview of all objects that can be coerced + into data types. + + Examples + -------- + .. code-block:: python + + >>> import numpy as np + >>> import numpy.typing as npt + + >>> def as_dtype(d: npt.DTypeLike) -> np.dtype: + ... return np.dtype(d) + + """) + +add_newdoc('NDArray', repr(NDArray), + """ + A `np.ndarray[tuple[Any, ...], np.dtype[ScalarT]] ` + type alias :term:`generic ` w.r.t. its + `dtype.type `. + + Can be used during runtime for typing arrays with a given dtype + and unspecified shape. + + .. versionadded:: 1.21 + + Examples + -------- + .. code-block:: python + + >>> import numpy as np + >>> import numpy.typing as npt + + >>> print(npt.NDArray) + numpy.ndarray[tuple[typing.Any, ...], numpy.dtype[~_ScalarT]] + + >>> print(npt.NDArray[np.float64]) + numpy.ndarray[tuple[typing.Any, ...], numpy.dtype[numpy.float64]] + + >>> NDArrayInt = npt.NDArray[np.int_] + >>> a: NDArrayInt = np.arange(10) + + >>> def func(a: npt.ArrayLike) -> npt.NDArray[Any]: + ... return np.array(a) + + """) + +_docstrings = _parse_docstrings() diff --git a/local_packages/numpy/_typing/_array_like.py b/local_packages/numpy/_typing/_array_like.py new file mode 100644 index 0000000..6b071f4 --- /dev/null +++ b/local_packages/numpy/_typing/_array_like.py @@ -0,0 +1,106 @@ +import sys +from collections.abc import Callable, Collection, Sequence +from typing import TYPE_CHECKING, Any, Protocol, TypeAlias, TypeVar, runtime_checkable + +import numpy as np +from numpy import dtype + +from ._nbit_base import _32Bit, _64Bit +from ._nested_sequence import _NestedSequence +from ._shape import _AnyShape + +if TYPE_CHECKING: + StringDType = np.dtypes.StringDType +else: + # at runtime outside of type checking importing this from numpy.dtypes + # would lead to a circular import + from numpy._core.multiarray import StringDType + +_T = TypeVar("_T") +_ScalarT = TypeVar("_ScalarT", bound=np.generic) +_DTypeT = TypeVar("_DTypeT", bound=dtype[Any]) +_DTypeT_co = TypeVar("_DTypeT_co", covariant=True, bound=dtype[Any]) + +NDArray: TypeAlias = np.ndarray[_AnyShape, dtype[_ScalarT]] + +# The `_SupportsArray` protocol only cares about the default dtype +# (i.e. `dtype=None` or no `dtype` parameter at all) of the to-be returned +# array. +# Concrete implementations of the protocol are responsible for adding +# any and all remaining overloads +@runtime_checkable +class _SupportsArray(Protocol[_DTypeT_co]): + def __array__(self) -> np.ndarray[Any, _DTypeT_co]: ... + + +@runtime_checkable +class _SupportsArrayFunc(Protocol): + """A protocol class representing `~class.__array_function__`.""" + def __array_function__( + self, + func: Callable[..., Any], + types: Collection[type[Any]], + args: tuple[Any, ...], + kwargs: dict[str, Any], + ) -> object: ... + + +# TODO: Wait until mypy supports recursive objects in combination with typevars +_FiniteNestedSequence: TypeAlias = ( + _T + | Sequence[_T] + | Sequence[Sequence[_T]] + | Sequence[Sequence[Sequence[_T]]] + | Sequence[Sequence[Sequence[Sequence[_T]]]] +) + +# A subset of `npt.ArrayLike` that can be parametrized w.r.t. `np.generic` +_ArrayLike: TypeAlias = ( + _SupportsArray[dtype[_ScalarT]] + | _NestedSequence[_SupportsArray[dtype[_ScalarT]]] +) + +# A union representing array-like objects; consists of two typevars: +# One representing types that can be parametrized w.r.t. `np.dtype` +# and another one for the rest +_DualArrayLike: TypeAlias = ( + _SupportsArray[_DTypeT] + | _NestedSequence[_SupportsArray[_DTypeT]] + | _T + | _NestedSequence[_T] +) + +if sys.version_info >= (3, 12): + from collections.abc import Buffer as _Buffer +else: + @runtime_checkable + class _Buffer(Protocol): + def __buffer__(self, flags: int, /) -> memoryview: ... + +ArrayLike: TypeAlias = _Buffer | _DualArrayLike[dtype[Any], complex | bytes | str] + +# `ArrayLike_co`: array-like objects that can be coerced into `X` +# given the casting rules `same_kind` +_ArrayLikeBool_co: TypeAlias = _DualArrayLike[dtype[np.bool], bool] +_ArrayLikeUInt_co: TypeAlias = _DualArrayLike[dtype[np.bool | np.unsignedinteger], bool] +_ArrayLikeInt_co: TypeAlias = _DualArrayLike[dtype[np.bool | np.integer], int] +_ArrayLikeFloat_co: TypeAlias = _DualArrayLike[dtype[np.bool | np.integer | np.floating], float] +_ArrayLikeComplex_co: TypeAlias = _DualArrayLike[dtype[np.bool | np.number], complex] +_ArrayLikeNumber_co: TypeAlias = _ArrayLikeComplex_co +_ArrayLikeTD64_co: TypeAlias = _DualArrayLike[dtype[np.bool | np.integer | np.timedelta64], int] +_ArrayLikeDT64_co: TypeAlias = _ArrayLike[np.datetime64] +_ArrayLikeObject_co: TypeAlias = _ArrayLike[np.object_] + +_ArrayLikeVoid_co: TypeAlias = _ArrayLike[np.void] +_ArrayLikeBytes_co: TypeAlias = _DualArrayLike[dtype[np.bytes_], bytes] +_ArrayLikeStr_co: TypeAlias = _DualArrayLike[dtype[np.str_], str] +_ArrayLikeString_co: TypeAlias = _DualArrayLike[StringDType, str] +_ArrayLikeAnyString_co: TypeAlias = _DualArrayLike[dtype[np.character] | StringDType, bytes | str] + +__Float64_co: TypeAlias = np.floating[_64Bit] | np.float32 | np.float16 | np.integer | np.bool +__Complex128_co: TypeAlias = np.number[_64Bit] | np.number[_32Bit] | np.float16 | np.integer | np.bool +_ArrayLikeFloat64_co: TypeAlias = _DualArrayLike[dtype[__Float64_co], float] +_ArrayLikeComplex128_co: TypeAlias = _DualArrayLike[dtype[__Complex128_co], complex] + +# NOTE: This includes `builtins.bool`, but not `numpy.bool`. +_ArrayLikeInt: TypeAlias = _DualArrayLike[dtype[np.integer], int] diff --git a/local_packages/numpy/_typing/_char_codes.py b/local_packages/numpy/_typing/_char_codes.py new file mode 100644 index 0000000..7b6fad2 --- /dev/null +++ b/local_packages/numpy/_typing/_char_codes.py @@ -0,0 +1,213 @@ +from typing import Literal + +_BoolCodes = Literal[ + "bool", "bool_", + "?", "|?", "=?", "?", + "b1", "|b1", "=b1", "b1", +] # fmt: skip + +_UInt8Codes = Literal["uint8", "u1", "|u1", "=u1", "u1"] +_UInt16Codes = Literal["uint16", "u2", "|u2", "=u2", "u2"] +_UInt32Codes = Literal["uint32", "u4", "|u4", "=u4", "u4"] +_UInt64Codes = Literal["uint64", "u8", "|u8", "=u8", "u8"] + +_Int8Codes = Literal["int8", "i1", "|i1", "=i1", "i1"] +_Int16Codes = Literal["int16", "i2", "|i2", "=i2", "i2"] +_Int32Codes = Literal["int32", "i4", "|i4", "=i4", "i4"] +_Int64Codes = Literal["int64", "i8", "|i8", "=i8", "i8"] + +_Float16Codes = Literal["float16", "f2", "|f2", "=f2", "f2"] +_Float32Codes = Literal["float32", "f4", "|f4", "=f4", "f4"] +_Float64Codes = Literal["float64", "f8", "|f8", "=f8", "f8"] + +_Complex64Codes = Literal["complex64", "c8", "|c8", "=c8", "c8"] +_Complex128Codes = Literal["complex128", "c16", "|c16", "=c16", "c16"] + +_ByteCodes = Literal["byte", "b", "|b", "=b", "b"] +_ShortCodes = Literal["short", "h", "|h", "=h", "h"] +_IntCCodes = Literal["intc", "i", "|i", "=i", "i"] +_IntPCodes = Literal["intp", "int", "int_", "n", "|n", "=n", "n"] +_LongCodes = Literal["long", "l", "|l", "=l", "l"] +_IntCodes = _IntPCodes +_LongLongCodes = Literal["longlong", "q", "|q", "=q", "q"] + +_UByteCodes = Literal["ubyte", "B", "|B", "=B", "B"] +_UShortCodes = Literal["ushort", "H", "|H", "=H", "H"] +_UIntCCodes = Literal["uintc", "I", "|I", "=I", "I"] +_UIntPCodes = Literal["uintp", "uint", "N", "|N", "=N", "N"] +_ULongCodes = Literal["ulong", "L", "|L", "=L", "L"] +_UIntCodes = _UIntPCodes +_ULongLongCodes = Literal["ulonglong", "Q", "|Q", "=Q", "Q"] + +_HalfCodes = Literal["half", "e", "|e", "=e", "e"] +_SingleCodes = Literal["single", "f", "|f", "=f", "f"] +_DoubleCodes = Literal["double", "float", "d", "|d", "=d", "d"] +_LongDoubleCodes = Literal["longdouble", "g", "|g", "=g", "g"] + +_CSingleCodes = Literal["csingle", "F", "|F", "=F", "F"] +_CDoubleCodes = Literal["cdouble", "complex", "D", "|D", "=D", "D"] +_CLongDoubleCodes = Literal["clongdouble", "G", "|G", "=G", "G"] + +_StrCodes = Literal["str", "str_", "unicode", "U", "|U", "=U", "U"] +_BytesCodes = Literal["bytes", "bytes_", "S", "|S", "=S", "S"] +_VoidCodes = Literal["void", "V", "|V", "=V", "V"] +_ObjectCodes = Literal["object", "object_", "O", "|O", "=O", "O"] + +_DT64Codes = Literal[ + "datetime64", "|datetime64", "=datetime64", + "datetime64", + "datetime64[Y]", "|datetime64[Y]", "=datetime64[Y]", + "datetime64[Y]", + "datetime64[M]", "|datetime64[M]", "=datetime64[M]", + "datetime64[M]", + "datetime64[W]", "|datetime64[W]", "=datetime64[W]", + "datetime64[W]", + "datetime64[D]", "|datetime64[D]", "=datetime64[D]", + "datetime64[D]", + "datetime64[h]", "|datetime64[h]", "=datetime64[h]", + "datetime64[h]", + "datetime64[m]", "|datetime64[m]", "=datetime64[m]", + "datetime64[m]", + "datetime64[s]", "|datetime64[s]", "=datetime64[s]", + "datetime64[s]", + "datetime64[ms]", "|datetime64[ms]", "=datetime64[ms]", + "datetime64[ms]", + "datetime64[us]", "|datetime64[us]", "=datetime64[us]", + "datetime64[us]", + "datetime64[ns]", "|datetime64[ns]", "=datetime64[ns]", + "datetime64[ns]", + "datetime64[ps]", "|datetime64[ps]", "=datetime64[ps]", + "datetime64[ps]", + "datetime64[fs]", "|datetime64[fs]", "=datetime64[fs]", + "datetime64[fs]", + "datetime64[as]", "|datetime64[as]", "=datetime64[as]", + "datetime64[as]", + "M", "|M", "=M", "M", + "M8", "|M8", "=M8", "M8", + "M8[Y]", "|M8[Y]", "=M8[Y]", "M8[Y]", + "M8[M]", "|M8[M]", "=M8[M]", "M8[M]", + "M8[W]", "|M8[W]", "=M8[W]", "M8[W]", + "M8[D]", "|M8[D]", "=M8[D]", "M8[D]", + "M8[h]", "|M8[h]", "=M8[h]", "M8[h]", + "M8[m]", "|M8[m]", "=M8[m]", "M8[m]", + "M8[s]", "|M8[s]", "=M8[s]", "M8[s]", + "M8[ms]", "|M8[ms]", "=M8[ms]", "M8[ms]", + "M8[us]", "|M8[us]", "=M8[us]", "M8[us]", + "M8[ns]", "|M8[ns]", "=M8[ns]", "M8[ns]", + "M8[ps]", "|M8[ps]", "=M8[ps]", "M8[ps]", + "M8[fs]", "|M8[fs]", "=M8[fs]", "M8[fs]", + "M8[as]", "|M8[as]", "=M8[as]", "M8[as]", +] +_TD64Codes = Literal[ + "timedelta64", "|timedelta64", "=timedelta64", + "timedelta64", + "timedelta64[Y]", "|timedelta64[Y]", "=timedelta64[Y]", + "timedelta64[Y]", + "timedelta64[M]", "|timedelta64[M]", "=timedelta64[M]", + "timedelta64[M]", + "timedelta64[W]", "|timedelta64[W]", "=timedelta64[W]", + "timedelta64[W]", + "timedelta64[D]", "|timedelta64[D]", "=timedelta64[D]", + "timedelta64[D]", + "timedelta64[h]", "|timedelta64[h]", "=timedelta64[h]", + "timedelta64[h]", + "timedelta64[m]", "|timedelta64[m]", "=timedelta64[m]", + "timedelta64[m]", + "timedelta64[s]", "|timedelta64[s]", "=timedelta64[s]", + "timedelta64[s]", + "timedelta64[ms]", "|timedelta64[ms]", "=timedelta64[ms]", + "timedelta64[ms]", + "timedelta64[us]", "|timedelta64[us]", "=timedelta64[us]", + "timedelta64[us]", + "timedelta64[ns]", "|timedelta64[ns]", "=timedelta64[ns]", + "timedelta64[ns]", + "timedelta64[ps]", "|timedelta64[ps]", "=timedelta64[ps]", + "timedelta64[ps]", + "timedelta64[fs]", "|timedelta64[fs]", "=timedelta64[fs]", + "timedelta64[fs]", + "timedelta64[as]", "|timedelta64[as]", "=timedelta64[as]", + "timedelta64[as]", + "m", "|m", "=m", "m", + "m8", "|m8", "=m8", "m8", + "m8[Y]", "|m8[Y]", "=m8[Y]", "m8[Y]", + "m8[M]", "|m8[M]", "=m8[M]", "m8[M]", + "m8[W]", "|m8[W]", "=m8[W]", "m8[W]", + "m8[D]", "|m8[D]", "=m8[D]", "m8[D]", + "m8[h]", "|m8[h]", "=m8[h]", "m8[h]", + "m8[m]", "|m8[m]", "=m8[m]", "m8[m]", + "m8[s]", "|m8[s]", "=m8[s]", "m8[s]", + "m8[ms]", "|m8[ms]", "=m8[ms]", "m8[ms]", + "m8[us]", "|m8[us]", "=m8[us]", "m8[us]", + "m8[ns]", "|m8[ns]", "=m8[ns]", "m8[ns]", + "m8[ps]", "|m8[ps]", "=m8[ps]", "m8[ps]", + "m8[fs]", "|m8[fs]", "=m8[fs]", "m8[fs]", + "m8[as]", "|m8[as]", "=m8[as]", "m8[as]", +] + +# NOTE: `StringDType' has no scalar type, and therefore has no name that can +# be passed to the `dtype` constructor +_StringCodes = Literal["T", "|T", "=T", "T"] + +# NOTE: Nested literals get flattened and de-duplicated at runtime, which isn't +# the case for a `Union` of `Literal`s. +# So even though they're equivalent when type-checking, they differ at runtime. +# Another advantage of nesting, is that they always have a "flat" +# `Literal.__args__`, which is a tuple of *literally* all its literal values. + +_UnsignedIntegerCodes = Literal[ + _UInt8Codes, + _UInt16Codes, + _UInt32Codes, + _UInt64Codes, + _UIntCodes, + _UByteCodes, + _UShortCodes, + _UIntCCodes, + _ULongCodes, + _ULongLongCodes, +] +_SignedIntegerCodes = Literal[ + _Int8Codes, + _Int16Codes, + _Int32Codes, + _Int64Codes, + _IntCodes, + _ByteCodes, + _ShortCodes, + _IntCCodes, + _LongCodes, + _LongLongCodes, +] +_FloatingCodes = Literal[ + _Float16Codes, + _Float32Codes, + _Float64Codes, + _HalfCodes, + _SingleCodes, + _DoubleCodes, + _LongDoubleCodes +] +_ComplexFloatingCodes = Literal[ + _Complex64Codes, + _Complex128Codes, + _CSingleCodes, + _CDoubleCodes, + _CLongDoubleCodes, +] +_IntegerCodes = Literal[_UnsignedIntegerCodes, _SignedIntegerCodes] +_InexactCodes = Literal[_FloatingCodes, _ComplexFloatingCodes] +_NumberCodes = Literal[_IntegerCodes, _InexactCodes] + +_CharacterCodes = Literal[_StrCodes, _BytesCodes] +_FlexibleCodes = Literal[_VoidCodes, _CharacterCodes] + +_GenericCodes = Literal[ + _BoolCodes, + _NumberCodes, + _FlexibleCodes, + _DT64Codes, + _TD64Codes, + _ObjectCodes, + # TODO: add `_StringCodes` once it has a scalar type + # _StringCodes, +] diff --git a/local_packages/numpy/_typing/_dtype_like.py b/local_packages/numpy/_typing/_dtype_like.py new file mode 100644 index 0000000..34c4bd4 --- /dev/null +++ b/local_packages/numpy/_typing/_dtype_like.py @@ -0,0 +1,114 @@ +from collections.abc import Sequence # noqa: F811 +from typing import Any, Protocol, TypeAlias, TypedDict, TypeVar + +import numpy as np + +from ._char_codes import ( + _BoolCodes, + _BytesCodes, + _ComplexFloatingCodes, + _DT64Codes, + _FloatingCodes, + _NumberCodes, + _ObjectCodes, + _SignedIntegerCodes, + _StrCodes, + _TD64Codes, + _UnsignedIntegerCodes, + _VoidCodes, +) + +_ScalarT = TypeVar("_ScalarT", bound=np.generic) +_DTypeT = TypeVar("_DTypeT", bound=np.dtype) +_DTypeT_co = TypeVar("_DTypeT_co", bound=np.dtype, covariant=True) + +_DTypeLikeNested: TypeAlias = Any # TODO: wait for support for recursive types + + +# Mandatory keys +class _DTypeDictBase(TypedDict): + names: Sequence[str] + formats: Sequence[_DTypeLikeNested] + + +# Mandatory + optional keys +class _DTypeDict(_DTypeDictBase, total=False): + # Only `str` elements are usable as indexing aliases, + # but `titles` can in principle accept any object + offsets: Sequence[int] + titles: Sequence[Any] + itemsize: int + aligned: bool + + +class _HasDType(Protocol[_DTypeT_co]): + @property + def dtype(self) -> _DTypeT_co: ... + + +class _HasNumPyDType(Protocol[_DTypeT_co]): + @property + def __numpy_dtype__(self, /) -> _DTypeT_co: ... + + +_SupportsDType: TypeAlias = _HasDType[_DTypeT] | _HasNumPyDType[_DTypeT] + + +# A subset of `npt.DTypeLike` that can be parametrized w.r.t. `np.generic` +_DTypeLike: TypeAlias = type[_ScalarT] | np.dtype[_ScalarT] | _SupportsDType[np.dtype[_ScalarT]] + + +# Would create a dtype[np.void] +_VoidDTypeLike: TypeAlias = ( + # If a tuple, then it can be either: + # - (flexible_dtype, itemsize) + # - (fixed_dtype, shape) + # - (base_dtype, new_dtype) + # But because `_DTypeLikeNested = Any`, the first two cases are redundant + + # tuple[_DTypeLikeNested, int] | tuple[_DTypeLikeNested, _ShapeLike] | + tuple[_DTypeLikeNested, _DTypeLikeNested] + + # [(field_name, field_dtype, field_shape), ...] + # The type here is quite broad because NumPy accepts quite a wide + # range of inputs inside the list; see the tests for some examples. + | list[Any] + + # {'names': ..., 'formats': ..., 'offsets': ..., 'titles': ..., 'itemsize': ...} + | _DTypeDict +) + +# Aliases for commonly used dtype-like objects. +# Note that the precision of `np.number` subclasses is ignored herein. +_DTypeLikeBool: TypeAlias = type[bool] | _DTypeLike[np.bool] | _BoolCodes +_DTypeLikeInt: TypeAlias = ( + type[int] | _DTypeLike[np.signedinteger] | _SignedIntegerCodes +) +_DTypeLikeUInt: TypeAlias = _DTypeLike[np.unsignedinteger] | _UnsignedIntegerCodes +_DTypeLikeFloat: TypeAlias = type[float] | _DTypeLike[np.floating] | _FloatingCodes +_DTypeLikeComplex: TypeAlias = ( + type[complex] | _DTypeLike[np.complexfloating] | _ComplexFloatingCodes +) +_DTypeLikeComplex_co: TypeAlias = ( + type[complex] | _DTypeLike[np.bool | np.number] | _BoolCodes | _NumberCodes +) +_DTypeLikeDT64: TypeAlias = _DTypeLike[np.timedelta64] | _TD64Codes +_DTypeLikeTD64: TypeAlias = _DTypeLike[np.datetime64] | _DT64Codes +_DTypeLikeBytes: TypeAlias = type[bytes] | _DTypeLike[np.bytes_] | _BytesCodes +_DTypeLikeStr: TypeAlias = type[str] | _DTypeLike[np.str_] | _StrCodes +_DTypeLikeVoid: TypeAlias = ( + type[memoryview] | _DTypeLike[np.void] | _VoidDTypeLike | _VoidCodes +) +_DTypeLikeObject: TypeAlias = type[object] | _DTypeLike[np.object_] | _ObjectCodes + + +# Anything that can be coerced into numpy.dtype. +# Reference: https://docs.scipy.org/doc/numpy/reference/arrays.dtypes.html +DTypeLike: TypeAlias = _DTypeLike[Any] | _VoidDTypeLike | str + +# NOTE: while it is possible to provide the dtype as a dict of +# dtype-like objects (e.g. `{'field1': ..., 'field2': ..., ...}`), +# this syntax is officially discouraged and +# therefore not included in the type-union defining `DTypeLike`. +# +# See https://github.com/numpy/numpy/issues/16891 for more details. diff --git a/local_packages/numpy/_typing/_extended_precision.py b/local_packages/numpy/_typing/_extended_precision.py new file mode 100644 index 0000000..c707e72 --- /dev/null +++ b/local_packages/numpy/_typing/_extended_precision.py @@ -0,0 +1,15 @@ +"""A module with platform-specific extended precision +`numpy.number` subclasses. + +The subclasses are defined here (instead of ``__init__.pyi``) such +that they can be imported conditionally via the numpy's mypy plugin. +""" + +import numpy as np + +from . import _96Bit, _128Bit + +float96 = np.floating[_96Bit] +float128 = np.floating[_128Bit] +complex192 = np.complexfloating[_96Bit, _96Bit] +complex256 = np.complexfloating[_128Bit, _128Bit] diff --git a/local_packages/numpy/_typing/_nbit.py b/local_packages/numpy/_typing/_nbit.py new file mode 100644 index 0000000..60bce32 --- /dev/null +++ b/local_packages/numpy/_typing/_nbit.py @@ -0,0 +1,19 @@ +"""A module with the precisions of platform-specific `~numpy.number`s.""" + +from typing import TypeAlias + +from ._nbit_base import _8Bit, _16Bit, _32Bit, _64Bit, _96Bit, _128Bit + +# To-be replaced with a `npt.NBitBase` subclass by numpy's mypy plugin +_NBitByte: TypeAlias = _8Bit +_NBitShort: TypeAlias = _16Bit +_NBitIntC: TypeAlias = _32Bit +_NBitIntP: TypeAlias = _32Bit | _64Bit +_NBitInt: TypeAlias = _NBitIntP +_NBitLong: TypeAlias = _32Bit | _64Bit +_NBitLongLong: TypeAlias = _64Bit + +_NBitHalf: TypeAlias = _16Bit +_NBitSingle: TypeAlias = _32Bit +_NBitDouble: TypeAlias = _64Bit +_NBitLongDouble: TypeAlias = _64Bit | _96Bit | _128Bit diff --git a/local_packages/numpy/_typing/_nbit_base.py b/local_packages/numpy/_typing/_nbit_base.py new file mode 100644 index 0000000..28d3e63 --- /dev/null +++ b/local_packages/numpy/_typing/_nbit_base.py @@ -0,0 +1,94 @@ +"""A module with the precisions of generic `~numpy.number` types.""" +from typing import final + +from numpy._utils import set_module + + +@final # Disallow the creation of arbitrary `NBitBase` subclasses +@set_module("numpy.typing") +class NBitBase: + """ + A type representing `numpy.number` precision during static type checking. + + Used exclusively for the purpose of static type checking, `NBitBase` + represents the base of a hierarchical set of subclasses. + Each subsequent subclass is herein used for representing a lower level + of precision, *e.g.* ``64Bit > 32Bit > 16Bit``. + + .. versionadded:: 1.20 + + .. deprecated:: 2.3 + Use ``@typing.overload`` or a ``TypeVar`` with a scalar-type as upper + bound, instead. + + Examples + -------- + Below is a typical usage example: `NBitBase` is herein used for annotating + a function that takes a float and integer of arbitrary precision + as arguments and returns a new float of whichever precision is largest + (*e.g.* ``np.float16 + np.int64 -> np.float64``). + + .. code-block:: python + + >>> from typing import TypeVar, TYPE_CHECKING + >>> import numpy as np + >>> import numpy.typing as npt + + >>> S = TypeVar("S", bound=npt.NBitBase) + >>> T = TypeVar("T", bound=npt.NBitBase) + + >>> def add(a: np.floating[S], b: np.integer[T]) -> np.floating[S | T]: + ... return a + b + + >>> a = np.float16() + >>> b = np.int64() + >>> out = add(a, b) + + >>> if TYPE_CHECKING: + ... reveal_locals() + ... # note: Revealed local types are: + ... # note: a: numpy.floating[numpy.typing._16Bit*] + ... # note: b: numpy.signedinteger[numpy.typing._64Bit*] + ... # note: out: numpy.floating[numpy.typing._64Bit*] + + """ + # Deprecated in NumPy 2.3, 2025-05-01 + + def __init_subclass__(cls) -> None: + allowed_names = { + "NBitBase", "_128Bit", "_96Bit", "_64Bit", "_32Bit", "_16Bit", "_8Bit" + } + if cls.__name__ not in allowed_names: + raise TypeError('cannot inherit from final class "NBitBase"') + super().__init_subclass__() + +@final +@set_module("numpy._typing") +# Silence errors about subclassing a `@final`-decorated class +class _128Bit(NBitBase): # type: ignore[misc] # pyright: ignore[reportGeneralTypeIssues] + pass + +@final +@set_module("numpy._typing") +class _96Bit(_128Bit): # type: ignore[misc] # pyright: ignore[reportGeneralTypeIssues] + pass + +@final +@set_module("numpy._typing") +class _64Bit(_96Bit): # type: ignore[misc] # pyright: ignore[reportGeneralTypeIssues] + pass + +@final +@set_module("numpy._typing") +class _32Bit(_64Bit): # type: ignore[misc] # pyright: ignore[reportGeneralTypeIssues] + pass + +@final +@set_module("numpy._typing") +class _16Bit(_32Bit): # type: ignore[misc] # pyright: ignore[reportGeneralTypeIssues] + pass + +@final +@set_module("numpy._typing") +class _8Bit(_16Bit): # type: ignore[misc] # pyright: ignore[reportGeneralTypeIssues] + pass diff --git a/local_packages/numpy/_typing/_nbit_base.pyi b/local_packages/numpy/_typing/_nbit_base.pyi new file mode 100644 index 0000000..d88c9f4 --- /dev/null +++ b/local_packages/numpy/_typing/_nbit_base.pyi @@ -0,0 +1,39 @@ +# pyright: reportDeprecated=false +# pyright: reportGeneralTypeIssues=false +# mypy: disable-error-code=misc + +from typing import final +from typing_extensions import deprecated + +# Deprecated in NumPy 2.3, 2025-05-01 +@deprecated( + "`NBitBase` is deprecated and will be removed from numpy.typing in the " + "future. Use `@typing.overload` or a `TypeVar` with a scalar-type as upper " + "bound, instead. (deprecated in NumPy 2.3)", +) +@final +class NBitBase: ... + +@final +class _256Bit(NBitBase): ... + +@final +class _128Bit(_256Bit): ... + +@final +class _96Bit(_128Bit): ... + +@final +class _80Bit(_96Bit): ... + +@final +class _64Bit(_80Bit): ... + +@final +class _32Bit(_64Bit): ... + +@final +class _16Bit(_32Bit): ... + +@final +class _8Bit(_16Bit): ... diff --git a/local_packages/numpy/_typing/_nested_sequence.py b/local_packages/numpy/_typing/_nested_sequence.py new file mode 100644 index 0000000..e3362a9 --- /dev/null +++ b/local_packages/numpy/_typing/_nested_sequence.py @@ -0,0 +1,79 @@ +"""A module containing the `_NestedSequence` protocol.""" + +from typing import TYPE_CHECKING, Any, Protocol, TypeVar, runtime_checkable + +if TYPE_CHECKING: + from collections.abc import Iterator + +__all__ = ["_NestedSequence"] + +_T_co = TypeVar("_T_co", covariant=True) + + +@runtime_checkable +class _NestedSequence(Protocol[_T_co]): + """A protocol for representing nested sequences. + + Warning + ------- + `_NestedSequence` currently does not work in combination with typevars, + *e.g.* ``def func(a: _NestedSequnce[T]) -> T: ...``. + + See Also + -------- + collections.abc.Sequence + ABCs for read-only and mutable :term:`sequences`. + + Examples + -------- + .. code-block:: python + + >>> from typing import TYPE_CHECKING + >>> import numpy as np + >>> from numpy._typing import _NestedSequence + + >>> def get_dtype(seq: _NestedSequence[float]) -> np.dtype[np.float64]: + ... return np.asarray(seq).dtype + + >>> a = get_dtype([1.0]) + >>> b = get_dtype([[1.0]]) + >>> c = get_dtype([[[1.0]]]) + >>> d = get_dtype([[[[1.0]]]]) + + >>> if TYPE_CHECKING: + ... reveal_locals() + ... # note: Revealed local types are: + ... # note: a: numpy.dtype[numpy.floating[numpy._typing._64Bit]] + ... # note: b: numpy.dtype[numpy.floating[numpy._typing._64Bit]] + ... # note: c: numpy.dtype[numpy.floating[numpy._typing._64Bit]] + ... # note: d: numpy.dtype[numpy.floating[numpy._typing._64Bit]] + + """ + + def __len__(self, /) -> int: + """Implement ``len(self)``.""" + raise NotImplementedError + + def __getitem__(self, index: int, /) -> "_T_co | _NestedSequence[_T_co]": + """Implement ``self[x]``.""" + raise NotImplementedError + + def __contains__(self, x: object, /) -> bool: + """Implement ``x in self``.""" + raise NotImplementedError + + def __iter__(self, /) -> "Iterator[_T_co | _NestedSequence[_T_co]]": + """Implement ``iter(self)``.""" + raise NotImplementedError + + def __reversed__(self, /) -> "Iterator[_T_co | _NestedSequence[_T_co]]": + """Implement ``reversed(self)``.""" + raise NotImplementedError + + def count(self, value: Any, /) -> int: + """Return the number of occurrences of `value`.""" + raise NotImplementedError + + def index(self, value: Any, /) -> int: + """Return the first index of `value`.""" + raise NotImplementedError diff --git a/local_packages/numpy/_typing/_scalars.py b/local_packages/numpy/_typing/_scalars.py new file mode 100644 index 0000000..b0de66d --- /dev/null +++ b/local_packages/numpy/_typing/_scalars.py @@ -0,0 +1,20 @@ +from typing import Any, TypeAlias + +import numpy as np + +# NOTE: `_StrLike_co` and `_BytesLike_co` are pointless, as `np.str_` and +# `np.bytes_` are already subclasses of their builtin counterpart +_CharLike_co: TypeAlias = str | bytes + +# The `Like_co` type-aliases below represent all scalars that can be +# coerced into `` (with the casting rule `same_kind`) +_BoolLike_co: TypeAlias = bool | np.bool +_UIntLike_co: TypeAlias = bool | np.unsignedinteger | np.bool +_IntLike_co: TypeAlias = int | np.integer | np.bool +_FloatLike_co: TypeAlias = float | np.floating | np.integer | np.bool +_ComplexLike_co: TypeAlias = complex | np.number | np.bool +_NumberLike_co: TypeAlias = _ComplexLike_co +_TD64Like_co: TypeAlias = int | np.timedelta64 | np.integer | np.bool +# `_VoidLike_co` is technically not a scalar, but it's close enough +_VoidLike_co: TypeAlias = tuple[Any, ...] | np.void +_ScalarLike_co: TypeAlias = complex | str | bytes | np.generic diff --git a/local_packages/numpy/_typing/_shape.py b/local_packages/numpy/_typing/_shape.py new file mode 100644 index 0000000..e297aef --- /dev/null +++ b/local_packages/numpy/_typing/_shape.py @@ -0,0 +1,8 @@ +from collections.abc import Sequence +from typing import Any, SupportsIndex, TypeAlias + +_Shape: TypeAlias = tuple[int, ...] +_AnyShape: TypeAlias = tuple[Any, ...] + +# Anything that can be coerced to a shape tuple +_ShapeLike: TypeAlias = SupportsIndex | Sequence[SupportsIndex] diff --git a/local_packages/numpy/_typing/_ufunc.py b/local_packages/numpy/_typing/_ufunc.py new file mode 100644 index 0000000..db52a1f --- /dev/null +++ b/local_packages/numpy/_typing/_ufunc.py @@ -0,0 +1,7 @@ +from numpy import ufunc + +_UFunc_Nin1_Nout1 = ufunc +_UFunc_Nin2_Nout1 = ufunc +_UFunc_Nin1_Nout2 = ufunc +_UFunc_Nin2_Nout2 = ufunc +_GUFunc_Nin2_Nout1 = ufunc diff --git a/local_packages/numpy/_typing/_ufunc.pyi b/local_packages/numpy/_typing/_ufunc.pyi new file mode 100644 index 0000000..9d3fa0e --- /dev/null +++ b/local_packages/numpy/_typing/_ufunc.pyi @@ -0,0 +1,975 @@ +"""A module with private type-check-only `numpy.ufunc` subclasses. + +The signatures of the ufuncs are too varied to reasonably type +with a single class. So instead, `ufunc` has been expanded into +four private subclasses, one for each combination of +`~ufunc.nin` and `~ufunc.nout`. +""" # noqa: PYI021 + +from _typeshed import Incomplete +from types import EllipsisType +from typing import ( + Any, + Generic, + Literal, + LiteralString, + Never, + NoReturn, + Protocol, + SupportsIndex, + TypeAlias, + TypedDict, + TypeVar, + Unpack, + overload, + type_check_only, +) + +import numpy as np +from numpy import _CastingKind, _OrderKACF, ufunc +from numpy.typing import NDArray + +from ._array_like import ArrayLike, _ArrayLikeBool_co, _ArrayLikeInt_co +from ._dtype_like import DTypeLike +from ._scalars import _ScalarLike_co +from ._shape import _ShapeLike + +_T = TypeVar("_T") +_2Tuple: TypeAlias = tuple[_T, _T] +_3Tuple: TypeAlias = tuple[_T, _T, _T] +_4Tuple: TypeAlias = tuple[_T, _T, _T, _T] + +_2PTuple: TypeAlias = tuple[_T, _T, *tuple[_T, ...]] +_3PTuple: TypeAlias = tuple[_T, _T, _T, *tuple[_T, ...]] +_4PTuple: TypeAlias = tuple[_T, _T, _T, _T, *tuple[_T, ...]] + +_NTypes = TypeVar("_NTypes", bound=int, covariant=True) +_IDType = TypeVar("_IDType", covariant=True) +_NameType = TypeVar("_NameType", bound=LiteralString, covariant=True) +_Signature = TypeVar("_Signature", bound=LiteralString, covariant=True) + +_NIn = TypeVar("_NIn", bound=int, covariant=True) +_NOut = TypeVar("_NOut", bound=int, covariant=True) +_ReturnType_co = TypeVar("_ReturnType_co", covariant=True) +_ArrayT = TypeVar("_ArrayT", bound=np.ndarray) + +@type_check_only +class _SupportsArrayUFunc(Protocol): + def __array_ufunc__( + self, + ufunc: ufunc, + method: Literal["__call__", "reduce", "reduceat", "accumulate", "outer", "at"], + *inputs: Any, + **kwargs: Any, + ) -> Any: ... + +@type_check_only +class _UFunc3Kwargs(TypedDict, total=False): + where: _ArrayLikeBool_co | None + casting: _CastingKind + order: _OrderKACF + subok: bool + signature: _3Tuple[str | None] | str | None + +@type_check_only +class _ReduceKwargs(TypedDict, total=False): + initial: Incomplete # = + where: _ArrayLikeBool_co | None # = True + +# NOTE: `reduce`, `accumulate`, `reduceat` and `outer` raise a ValueError for +# ufuncs that don't accept two input arguments and return one output argument. +# In such cases the respective methods return `NoReturn` + +# NOTE: Similarly, `at` won't be defined for ufuncs that return +# multiple outputs; in such cases `at` is typed to return `NoReturn` + +# NOTE: If 2 output types are returned then `out` must be a +# 2-tuple of arrays. Otherwise `None` or a plain array are also acceptable + +# pyright: reportIncompatibleMethodOverride=false + +@type_check_only +class _UFunc_Nin1_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc] + @property + def __name__(self) -> _NameType: ... + @property + def __qualname__(self) -> _NameType: ... # pyright: ignore[reportIncompatibleVariableOverride] + @property + def ntypes(self) -> _NTypes: ... + @property + def identity(self) -> _IDType: ... + @property + def nin(self) -> Literal[1]: ... + @property + def nout(self) -> Literal[1]: ... + @property + def nargs(self) -> Literal[2]: ... + @property + def signature(self) -> None: ... + + @overload + def __call__( + self, + x1: _ScalarLike_co, + /, + out: None = None, + *, + dtype: DTypeLike | None = None, + where: _ArrayLikeBool_co | None = True, + casting: _CastingKind = ..., + order: _OrderKACF = ..., + subok: bool = ..., + signature: str | _2Tuple[str | None] = ..., + ) -> Incomplete: ... + @overload + def __call__( + self, + x1: ArrayLike, + /, + out: np.ndarray | tuple[np.ndarray] | EllipsisType | None = None, + *, + dtype: DTypeLike | None = None, + where: _ArrayLikeBool_co | None = True, + casting: _CastingKind = ..., + order: _OrderKACF = ..., + subok: bool = ..., + signature: str | _2Tuple[str | None] = ..., + ) -> NDArray[Incomplete]: ... + @overload + def __call__( + self, + x1: _SupportsArrayUFunc, + /, + out: np.ndarray | tuple[np.ndarray] | EllipsisType | None = None, + *, + dtype: DTypeLike | None = None, + where: _ArrayLikeBool_co | None = True, + casting: _CastingKind = ..., + order: _OrderKACF = ..., + subok: bool = ..., + signature: str | _2Tuple[str | None] = ..., + ) -> Incomplete: ... + + def accumulate(self, array: Never, /) -> NoReturn: ... # type: ignore[override] + def reduce(self, array: Never, /) -> NoReturn: ... # type: ignore[override] + def reduceat(self, array: Never, /, indices: Never) -> NoReturn: ... # type: ignore[override] + def outer(self, A: Never, B: Never, /) -> NoReturn: ... # type: ignore[override] + + def at(self, a: np.ndarray | _SupportsArrayUFunc, indices: _ArrayLikeInt_co, /) -> None: ... # type: ignore[override] + +@type_check_only +class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc] + @property + def __name__(self) -> _NameType: ... + @property + def __qualname__(self) -> _NameType: ... # pyright: ignore[reportIncompatibleVariableOverride] + @property + def ntypes(self) -> _NTypes: ... + @property + def identity(self) -> _IDType: ... + @property + def nin(self) -> Literal[2]: ... + @property + def nout(self) -> Literal[1]: ... + @property + def nargs(self) -> Literal[3]: ... + @property + def signature(self) -> None: ... + + @overload # (scalar, scalar) -> scalar + def __call__( + self, + x1: _ScalarLike_co, + x2: _ScalarLike_co, + /, + out: EllipsisType | None = None, + *, + dtype: DTypeLike | None = None, + **kwds: Unpack[_UFunc3Kwargs], + ) -> Incomplete: ... + @overload # (array-like, array) -> array + def __call__( + self, + x1: ArrayLike, + x2: np.ndarray, + /, + out: np.ndarray | tuple[np.ndarray] | EllipsisType | None = None, + *, + dtype: DTypeLike | None = None, + **kwds: Unpack[_UFunc3Kwargs], + ) -> NDArray[Incomplete]: ... + @overload # (array, array-like) -> array + def __call__( + self, + x1: np.ndarray, + x2: ArrayLike, + /, + out: np.ndarray | tuple[np.ndarray] | EllipsisType | None = None, + *, + dtype: DTypeLike | None = None, + **kwds: Unpack[_UFunc3Kwargs], + ) -> NDArray[Incomplete]: ... + @overload # (array-like, array-like, out=array) -> array + def __call__( + self, + x1: ArrayLike, + x2: ArrayLike, + /, + out: np.ndarray | tuple[np.ndarray], + *, + dtype: DTypeLike | None = None, + **kwds: Unpack[_UFunc3Kwargs], + ) -> NDArray[Incomplete]: ... + @overload # (array-like, array-like) -> array | scalar + def __call__( + self, + x1: ArrayLike, + x2: ArrayLike, + /, + out: np.ndarray | tuple[np.ndarray] | EllipsisType | None = None, + *, + dtype: DTypeLike | None = None, + **kwds: Unpack[_UFunc3Kwargs], + ) -> NDArray[Incomplete] | Incomplete: ... + + def accumulate( + self, + array: ArrayLike, + /, + axis: SupportsIndex = 0, + dtype: DTypeLike | None = None, + out: np.ndarray | EllipsisType | None = None, + ) -> NDArray[Incomplete]: ... + + @overload # type: ignore[override] + def reduce( # out=None (default), keepdims=False (default) + self, + array: ArrayLike, + /, + axis: _ShapeLike | None = 0, + dtype: DTypeLike | None = None, + out: None = None, + *, + keepdims: Literal[False] = False, + **kwargs: Unpack[_ReduceKwargs], + ) -> Incomplete: ... + @overload # out=ndarray or out=... + def reduce( + self, + array: ArrayLike, + /, + axis: _ShapeLike | None = 0, + dtype: DTypeLike | None = None, + *, + out: np.ndarray | EllipsisType, + keepdims: bool = False, + **kwargs: Unpack[_ReduceKwargs], + ) -> NDArray[Incomplete]: ... + @overload # keepdims=True + def reduce( + self, + array: ArrayLike, + /, + axis: _ShapeLike | None = 0, + dtype: DTypeLike | None = None, + out: np.ndarray | EllipsisType | None = None, + *, + keepdims: Literal[True], + **kwargs: Unpack[_ReduceKwargs], + ) -> NDArray[Incomplete]: ... + + def reduceat( + self, + array: ArrayLike, + /, + indices: _ArrayLikeInt_co, + axis: SupportsIndex = 0, + dtype: DTypeLike | None = None, + out: np.ndarray | EllipsisType | None = None, + ) -> NDArray[Incomplete]: ... + + @overload # type: ignore[override] + def outer( # (scalar, scalar) -> scalar + self, + A: _ScalarLike_co, + B: _ScalarLike_co, + /, + *, + out: None = None, + dtype: DTypeLike | None = None, + **kwds: Unpack[_UFunc3Kwargs], + ) -> Incomplete: ... + @overload # (array-like, array) -> array + def outer( + self, + A: ArrayLike, + B: np.ndarray, + /, + *, + out: np.ndarray | tuple[np.ndarray] | EllipsisType | None = None, + dtype: DTypeLike | None = None, + **kwds: Unpack[_UFunc3Kwargs], + ) -> NDArray[Incomplete]: ... + @overload # (array, array-like) -> array + def outer( + self, + A: np.ndarray, + B: ArrayLike, + /, + *, + out: np.ndarray | tuple[np.ndarray] | EllipsisType | None = None, + dtype: DTypeLike | None = None, + **kwds: Unpack[_UFunc3Kwargs], + ) -> NDArray[Incomplete]: ... + @overload # (array-like, array-like, out=array) -> array + def outer( + self, + A: ArrayLike, + B: ArrayLike, + /, + *, + out: np.ndarray | tuple[np.ndarray] | EllipsisType, + dtype: DTypeLike | None = None, + **kwds: Unpack[_UFunc3Kwargs], + ) -> NDArray[Incomplete]: ... + @overload # (array-like, array-like) -> array | scalar + def outer( + self, + A: ArrayLike, + B: ArrayLike, + /, + *, + out: None = None, + dtype: DTypeLike | None = None, + **kwds: Unpack[_UFunc3Kwargs], + ) -> NDArray[Incomplete] | Incomplete: ... + + def at( # type: ignore[override] + self, + a: np.ndarray | _SupportsArrayUFunc, + indices: _ArrayLikeInt_co, + b: ArrayLike, + /, + ) -> None: ... + +@type_check_only +class _UFunc_Nin1_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc] + @property + def __name__(self) -> _NameType: ... + @property + def __qualname__(self) -> _NameType: ... # pyright: ignore[reportIncompatibleVariableOverride] + @property + def ntypes(self) -> _NTypes: ... + @property + def identity(self) -> _IDType: ... + @property + def nin(self) -> Literal[1]: ... + @property + def nout(self) -> Literal[2]: ... + @property + def nargs(self) -> Literal[3]: ... + @property + def signature(self) -> None: ... + + @overload + def __call__( + self, + x1: _ScalarLike_co, + out1: EllipsisType | None = ..., + out2: None = None, + /, + *, + out: EllipsisType | None = ..., + dtype: DTypeLike | None = None, + where: _ArrayLikeBool_co | None = True, + casting: _CastingKind = ..., + order: _OrderKACF = ..., + subok: bool = ..., + signature: str | _3Tuple[str | None] = ..., + ) -> _2Tuple[Incomplete]: ... + @overload + def __call__( + self, + x1: ArrayLike, + out1: np.ndarray | EllipsisType | None = ..., + out2: np.ndarray | None = ..., + /, + *, + out: _2Tuple[np.ndarray] | EllipsisType = ..., + dtype: DTypeLike | None = None, + where: _ArrayLikeBool_co | None = True, + casting: _CastingKind = ..., + order: _OrderKACF = ..., + subok: bool = ..., + signature: str | _3Tuple[str | None] = ..., + ) -> _2Tuple[NDArray[Incomplete]]: ... + @overload + def __call__( + self, + x1: _SupportsArrayUFunc, + out1: np.ndarray | EllipsisType | None = ..., + out2: np.ndarray | None = ..., + /, + *, + out: _2Tuple[np.ndarray] | EllipsisType = ..., + dtype: DTypeLike | None = None, + where: _ArrayLikeBool_co | None = True, + casting: _CastingKind = ..., + order: _OrderKACF = ..., + subok: bool = ..., + signature: str | _3Tuple[str | None] = ..., + ) -> _2Tuple[Incomplete]: ... + + def accumulate(self, array: Never, /) -> NoReturn: ... # type: ignore[override] + def reduce(self, array: Never, /) -> NoReturn: ... # type: ignore[override] + def reduceat(self, array: Never, /, indices: Never) -> NoReturn: ... # type: ignore[override] + def outer(self, A: Never, B: Never, /) -> NoReturn: ... # type: ignore[override] + def at(self, a: Never, indices: Never, /) -> NoReturn: ... # type: ignore[override] + +@type_check_only +class _UFunc_Nin2_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc] + @property + def __name__(self) -> _NameType: ... + @property + def __qualname__(self) -> _NameType: ... # pyright: ignore[reportIncompatibleVariableOverride] + @property + def ntypes(self) -> _NTypes: ... + @property + def identity(self) -> _IDType: ... + @property + def nin(self) -> Literal[2]: ... + @property + def nout(self) -> Literal[2]: ... + @property + def nargs(self) -> Literal[4]: ... + @property + def signature(self) -> None: ... + + @overload + def __call__( + self, + x1: _ScalarLike_co, + x2: _ScalarLike_co, + out1: EllipsisType | None = ..., + out2: None = None, + /, + *, + out: EllipsisType | None = ..., + dtype: DTypeLike | None = None, + where: _ArrayLikeBool_co | None = True, + casting: _CastingKind = ..., + order: _OrderKACF = ..., + subok: bool = ..., + signature: str | _4Tuple[str | None] = ..., + ) -> _2Tuple[Incomplete]: ... + @overload + def __call__( + self, + x1: ArrayLike, + x2: ArrayLike, + out1: np.ndarray | EllipsisType | None = ..., + out2: np.ndarray | None = ..., + /, + *, + out: _2Tuple[np.ndarray] | EllipsisType = ..., + dtype: DTypeLike | None = None, + where: _ArrayLikeBool_co | None = True, + casting: _CastingKind = ..., + order: _OrderKACF = ..., + subok: bool = ..., + signature: str | _4Tuple[str | None] = ..., + ) -> _2Tuple[NDArray[Incomplete]]: ... + + def accumulate(self, array: Never, /) -> NoReturn: ... # type: ignore[override] + def reduce(self, array: Never, /) -> NoReturn: ... # type: ignore[override] + def reduceat(self, array: Never, /, indices: Never) -> NoReturn: ... # type: ignore[override] + def outer(self, A: Never, B: Never, /) -> NoReturn: ... # type: ignore[override] + def at(self, a: Never, indices: Never, b: Never, /) -> NoReturn: ... # type: ignore[override] + +@type_check_only +class _GUFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType, _Signature]): # type: ignore[misc] + @property + def __name__(self) -> _NameType: ... + @property + def __qualname__(self) -> _NameType: ... # pyright: ignore[reportIncompatibleVariableOverride] + @property + def ntypes(self) -> _NTypes: ... + @property + def identity(self) -> _IDType: ... + @property + def nin(self) -> Literal[2]: ... + @property + def nout(self) -> Literal[1]: ... + @property + def nargs(self) -> Literal[3]: ... + @property + def signature(self) -> _Signature: ... + + # Scalar for 1D array-likes; ndarray otherwise + @overload + def __call__( + self, + x1: ArrayLike, + x2: ArrayLike, + /, + out: EllipsisType | None = None, + *, + dtype: DTypeLike | None = None, + casting: _CastingKind = ..., + order: _OrderKACF = ..., + subok: bool = ..., + signature: str | _3Tuple[str | None] = ..., + axes: list[_2Tuple[SupportsIndex]] = ..., + ) -> Incomplete: ... + @overload + def __call__( + self, + x1: ArrayLike, + x2: ArrayLike, + /, + out: np.ndarray | tuple[np.ndarray] | EllipsisType, + *, + dtype: DTypeLike | None = None, + casting: _CastingKind = ..., + order: _OrderKACF = ..., + subok: bool = ..., + signature: str | _3Tuple[str | None] = ..., + axes: list[_2Tuple[SupportsIndex]] = ..., + ) -> NDArray[Incomplete]: ... + + def accumulate(self, array: Never, /) -> NoReturn: ... # type: ignore[override] + def reduce(self, array: Never, /) -> NoReturn: ... # type: ignore[override] + def reduceat(self, array: Never, /, indices: Never) -> NoReturn: ... # type: ignore[override] + def outer(self, A: Never, B: Never, /) -> NoReturn: ... # type: ignore[override] + def at(self, a: Never, indices: Never, b: Never, /) -> NoReturn: ... # type: ignore[override] + +@type_check_only +class _PyFunc_Kwargs_Nargs2(TypedDict, total=False): + where: _ArrayLikeBool_co | None + casting: _CastingKind + order: _OrderKACF + dtype: DTypeLike + subok: bool + signature: str | tuple[DTypeLike, DTypeLike] + +@type_check_only +class _PyFunc_Kwargs_Nargs3(TypedDict, total=False): + where: _ArrayLikeBool_co | None + casting: _CastingKind + order: _OrderKACF + dtype: DTypeLike + subok: bool + signature: str | tuple[DTypeLike, DTypeLike, DTypeLike] + +@type_check_only +class _PyFunc_Kwargs_Nargs3P(TypedDict, total=False): + where: _ArrayLikeBool_co | None + casting: _CastingKind + order: _OrderKACF + dtype: DTypeLike + subok: bool + signature: str | _3PTuple[DTypeLike] + +@type_check_only +class _PyFunc_Kwargs_Nargs4P(TypedDict, total=False): + where: _ArrayLikeBool_co | None + casting: _CastingKind + order: _OrderKACF + dtype: DTypeLike + subok: bool + signature: str | _4PTuple[DTypeLike] + +@type_check_only +class _PyFunc_Nin1_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: ignore[misc] + @property + def identity(self) -> _IDType: ... + @property + def nin(self) -> Literal[1]: ... + @property + def nout(self) -> Literal[1]: ... + @property + def nargs(self) -> Literal[2]: ... + @property + def ntypes(self) -> Literal[1]: ... + @property + def signature(self) -> None: ... + + @overload + def __call__( + self, + x1: _ScalarLike_co, + /, + out: EllipsisType | None = None, + **kwargs: Unpack[_PyFunc_Kwargs_Nargs2], + ) -> _ReturnType_co: ... + @overload + def __call__( + self, + x1: ArrayLike, + /, + out: EllipsisType | None = None, + **kwargs: Unpack[_PyFunc_Kwargs_Nargs2], + ) -> _ReturnType_co | NDArray[np.object_]: ... + @overload + def __call__( + self, + x1: ArrayLike, + /, + out: _ArrayT | tuple[_ArrayT], + **kwargs: Unpack[_PyFunc_Kwargs_Nargs2], + ) -> _ArrayT: ... + @overload + def __call__( + self, + x1: _SupportsArrayUFunc, + /, + out: np.ndarray | tuple[np.ndarray] | EllipsisType | None = None, + **kwargs: Unpack[_PyFunc_Kwargs_Nargs2], + ) -> Incomplete: ... + + def accumulate(self, array: Never, /) -> NoReturn: ... # type: ignore[override] + def reduce(self, array: Never, /) -> NoReturn: ... # type: ignore[override] + def reduceat(self, array: Never, /, indices: Never) -> NoReturn: ... # type: ignore[override] + def outer(self, A: Never, B: Never, /) -> NoReturn: ... # type: ignore[override] + + def at(self, a: np.ndarray | _SupportsArrayUFunc, indices: _ArrayLikeInt_co, /) -> None: ... # type: ignore[override] + +@type_check_only +class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: ignore[misc] + @property + def identity(self) -> _IDType: ... + @property + def nin(self) -> Literal[2]: ... + @property + def nout(self) -> Literal[1]: ... + @property + def nargs(self) -> Literal[3]: ... + @property + def ntypes(self) -> Literal[1]: ... + @property + def signature(self) -> None: ... + + @overload + def __call__( + self, + x1: _ScalarLike_co, + x2: _ScalarLike_co, + /, + out: EllipsisType | None = None, + **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], + ) -> _ReturnType_co: ... + @overload + def __call__( + self, + x1: ArrayLike, + x2: ArrayLike, + /, + out: EllipsisType | None = ..., + **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], + ) -> _ReturnType_co | NDArray[np.object_]: ... + @overload + def __call__( + self, + x1: ArrayLike, + x2: ArrayLike, + /, + out: _ArrayT | tuple[_ArrayT], + **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], + ) -> _ArrayT: ... + @overload + def __call__( + self, + x1: _SupportsArrayUFunc, + x2: _SupportsArrayUFunc | ArrayLike, + /, + out: np.ndarray | tuple[np.ndarray] | EllipsisType | None = None, + **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], + ) -> Incomplete: ... + @overload + def __call__( + self, + x1: ArrayLike, + x2: _SupportsArrayUFunc, + /, + out: np.ndarray | tuple[np.ndarray] | EllipsisType | None = None, + **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], + ) -> Incomplete: ... + + @overload # type: ignore[override] + def accumulate( + self, + array: ArrayLike, + /, + axis: SupportsIndex = 0, + dtype: DTypeLike | None = None, + out: EllipsisType | None = None, + ) -> NDArray[np.object_]: ... + @overload + def accumulate( + self, + array: ArrayLike, + /, + axis: SupportsIndex = 0, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, + ) -> _ArrayT: ... + + @overload # type: ignore[override] + def reduce( # out=array + self, + array: ArrayLike, + /, + axis: _ShapeLike | None = 0, + dtype: DTypeLike | None = None, + *, + out: _ArrayT | tuple[_ArrayT], + keepdims: bool = False, + **kwargs: Unpack[_ReduceKwargs], + ) -> _ArrayT: ... + @overload # out=... + def reduce( + self, + array: ArrayLike, + /, + axis: _ShapeLike | None = 0, + dtype: DTypeLike | None = None, + *, + out: EllipsisType, + keepdims: bool = False, + **kwargs: Unpack[_ReduceKwargs], + ) -> NDArray[np.object_]: ... + @overload # keepdims=True + def reduce( + self, + array: ArrayLike, + /, + axis: _ShapeLike | None = 0, + dtype: DTypeLike | None = None, + out: EllipsisType | None = None, + *, + keepdims: Literal[True], + **kwargs: Unpack[_ReduceKwargs], + ) -> NDArray[np.object_]: ... + @overload + def reduce( + self, + array: ArrayLike, + /, + axis: _ShapeLike | None = 0, + dtype: DTypeLike | None = None, + out: EllipsisType | None = None, + keepdims: bool = False, + **kwargs: Unpack[_ReduceKwargs], + ) -> _ReturnType_co | NDArray[np.object_]: ... + + @overload # type: ignore[override] + def reduceat( + self, + array: ArrayLike, + /, + indices: _ArrayLikeInt_co, + axis: SupportsIndex = 0, + dtype: DTypeLike | None = None, + *, + out: _ArrayT | tuple[_ArrayT], + ) -> _ArrayT: ... + @overload + def reduceat( + self, + array: ArrayLike, + /, + indices: _ArrayLikeInt_co, + axis: SupportsIndex = 0, + dtype: DTypeLike | None = None, + out: EllipsisType | None = None, + ) -> NDArray[np.object_]: ... + @overload + def reduceat( + self, + array: _SupportsArrayUFunc, + /, + indices: _ArrayLikeInt_co, + axis: SupportsIndex = 0, + dtype: DTypeLike | None = None, + out: np.ndarray | tuple[np.ndarray] | EllipsisType | None = None, + ) -> Incomplete: ... + + @overload # type: ignore[override] + def outer( + self, + A: _ScalarLike_co, + B: _ScalarLike_co, + /, + *, + out: EllipsisType | None = None, + **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], + ) -> _ReturnType_co: ... + @overload + def outer( + self, + A: ArrayLike, + B: ArrayLike, + /, + *, + out: EllipsisType | None = None, + **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], + ) -> _ReturnType_co | NDArray[np.object_]: ... + @overload + def outer( + self, + A: ArrayLike, + B: ArrayLike, + /, + *, + out: _ArrayT, + **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], + ) -> _ArrayT: ... + @overload + def outer( + self, + A: _SupportsArrayUFunc, + B: _SupportsArrayUFunc | ArrayLike, + /, + *, + out: EllipsisType | None = None, + **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], + ) -> Incomplete: ... + @overload + def outer( + self, + A: _ScalarLike_co, + B: _SupportsArrayUFunc | ArrayLike, + /, + *, + out: EllipsisType | None = None, + **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], + ) -> Incomplete: ... + + def at( # type: ignore[override] + self, + a: np.ndarray | _SupportsArrayUFunc, + indices: _ArrayLikeInt_co, + b: ArrayLike, + /, + ) -> None: ... + +@type_check_only +class _PyFunc_Nin3P_Nout1(ufunc, Generic[_ReturnType_co, _IDType, _NIn]): # type: ignore[misc] + @property + def identity(self) -> _IDType: ... + @property + def nin(self) -> _NIn: ... + @property + def nout(self) -> Literal[1]: ... + @property + def ntypes(self) -> Literal[1]: ... + @property + def signature(self) -> None: ... + + @overload + def __call__( + self, + x1: _ScalarLike_co, + x2: _ScalarLike_co, + x3: _ScalarLike_co, + /, + *xs: _ScalarLike_co, + out: EllipsisType | None = ..., + **kwargs: Unpack[_PyFunc_Kwargs_Nargs4P], + ) -> _ReturnType_co: ... + @overload + def __call__( + self, + x1: ArrayLike, + x2: ArrayLike, + x3: ArrayLike, + /, + *xs: ArrayLike, + out: EllipsisType | None = ..., + **kwargs: Unpack[_PyFunc_Kwargs_Nargs4P], + ) -> _ReturnType_co | NDArray[np.object_]: ... + @overload + def __call__( + self, + x1: ArrayLike, + x2: ArrayLike, + x3: ArrayLike, + /, + *xs: ArrayLike, + out: _ArrayT | tuple[_ArrayT], + **kwargs: Unpack[_PyFunc_Kwargs_Nargs4P], + ) -> _ArrayT: ... + @overload + def __call__( + self, + x1: _SupportsArrayUFunc | ArrayLike, + x2: _SupportsArrayUFunc | ArrayLike, + x3: _SupportsArrayUFunc | ArrayLike, + /, + *xs: _SupportsArrayUFunc | ArrayLike, + out: np.ndarray | tuple[np.ndarray] | EllipsisType | None = ..., + **kwargs: Unpack[_PyFunc_Kwargs_Nargs4P], + ) -> Incomplete: ... + + def accumulate(self, array: Never, /) -> NoReturn: ... # type: ignore[override] + def reduce(self, array: Never, /) -> NoReturn: ... # type: ignore[override] + def reduceat(self, array: Never, /, indices: Never) -> NoReturn: ... # type: ignore[override] + def outer(self, A: Never, B: Never, /) -> NoReturn: ... # type: ignore[override] + def at(self, a: Never, indices: Never, /, *args: Never) -> NoReturn: ... # type: ignore[override] + +@type_check_only +class _PyFunc_Nin1P_Nout2P(ufunc, Generic[_ReturnType_co, _IDType, _NIn, _NOut]): # type: ignore[misc] + @property + def identity(self) -> _IDType: ... + @property + def nin(self) -> _NIn: ... + @property + def nout(self) -> _NOut: ... + @property + def ntypes(self) -> Literal[1]: ... + @property + def signature(self) -> None: ... + + @overload + def __call__( + self, + x1: _ScalarLike_co, + /, + *xs: _ScalarLike_co, + out: EllipsisType | None = ..., + **kwargs: Unpack[_PyFunc_Kwargs_Nargs3P], + ) -> _2PTuple[_ReturnType_co]: ... + @overload + def __call__( + self, + x1: ArrayLike, + /, + *xs: ArrayLike, + out: EllipsisType | None = ..., + **kwargs: Unpack[_PyFunc_Kwargs_Nargs3P], + ) -> _2PTuple[_ReturnType_co | NDArray[np.object_]]: ... + @overload + def __call__( + self, + x1: ArrayLike, + /, + *xs: ArrayLike, + out: _2PTuple[_ArrayT], + **kwargs: Unpack[_PyFunc_Kwargs_Nargs3P], + ) -> _2PTuple[_ArrayT]: ... + @overload + def __call__( + self, + x1: _SupportsArrayUFunc | ArrayLike, + /, + *xs: _SupportsArrayUFunc | ArrayLike, + out: _2PTuple[np.ndarray] | EllipsisType | None = ..., + **kwargs: Unpack[_PyFunc_Kwargs_Nargs3P], + ) -> Incomplete: ... + + def accumulate(self, array: Never, /) -> NoReturn: ... # type: ignore[override] + def reduce(self, array: Never, /) -> NoReturn: ... # type: ignore[override] + def reduceat(self, array: Never, /, indices: Never) -> NoReturn: ... # type: ignore[override] + def outer(self, A: Never, B: Never, /) -> NoReturn: ... # type: ignore[override] + def at(self, a: Never, indices: Never, /, *args: Never) -> NoReturn: ... # type: ignore[override] diff --git a/local_packages/numpy/_utils/__init__.py b/local_packages/numpy/_utils/__init__.py new file mode 100644 index 0000000..84ee99d --- /dev/null +++ b/local_packages/numpy/_utils/__init__.py @@ -0,0 +1,95 @@ +""" +This is a module for defining private helpers which do not depend on the +rest of NumPy. + +Everything in here must be self-contained so that it can be +imported anywhere else without creating circular imports. +If a utility requires the import of NumPy, it probably belongs +in ``numpy._core``. +""" + +import functools +import warnings + +from ._convertions import asbytes, asunicode + + +def set_module(module): + """Private decorator for overriding __module__ on a function or class. + + Example usage:: + + @set_module('numpy') + def example(): + pass + + assert example.__module__ == 'numpy' + """ + def decorator(func): + if module is not None: + if isinstance(func, type): + try: + func._module_source = func.__module__ + except (AttributeError): + pass + + func.__module__ = module + return func + return decorator + + +def _rename_parameter(old_names, new_names, dep_version=None): + """ + Generate decorator for backward-compatible keyword renaming. + + Apply the decorator generated by `_rename_parameter` to functions with a + renamed parameter to maintain backward-compatibility. + + After decoration, the function behaves as follows: + If only the new parameter is passed into the function, behave as usual. + If only the old parameter is passed into the function (as a keyword), raise + a DeprecationWarning if `dep_version` is provided, and behave as usual + otherwise. + If both old and new parameters are passed into the function, raise a + DeprecationWarning if `dep_version` is provided, and raise the appropriate + TypeError (function got multiple values for argument). + + Parameters + ---------- + old_names : list of str + Old names of parameters + new_name : list of str + New names of parameters + dep_version : str, optional + Version of NumPy in which old parameter was deprecated in the format + 'X.Y.Z'. If supplied, the deprecation message will indicate that + support for the old parameter will be removed in version 'X.Y+2.Z' + + Notes + ----- + Untested with functions that accept *args. Probably won't work as written. + + """ + def decorator(fun): + @functools.wraps(fun) + def wrapper(*args, **kwargs): + __tracebackhide__ = True # Hide traceback for py.test + for old_name, new_name in zip(old_names, new_names): + if old_name in kwargs: + if dep_version: + end_version = dep_version.split('.') + end_version[1] = str(int(end_version[1]) + 2) + end_version = '.'.join(end_version) + msg = (f"Use of keyword argument `{old_name}` is " + f"deprecated and replaced by `{new_name}`. " + f"Support for `{old_name}` will be removed " + f"in NumPy {end_version}.") + warnings.warn(msg, DeprecationWarning, stacklevel=2) + if new_name in kwargs: + msg = (f"{fun.__name__}() got multiple values for " + f"argument now known as `{new_name}`") + raise TypeError(msg) + kwargs[new_name] = kwargs.pop(old_name) + return fun(*args, **kwargs) + return wrapper + return decorator diff --git a/local_packages/numpy/_utils/__init__.pyi b/local_packages/numpy/_utils/__init__.pyi new file mode 100644 index 0000000..b630777 --- /dev/null +++ b/local_packages/numpy/_utils/__init__.pyi @@ -0,0 +1,28 @@ +from _typeshed import IdentityFunction +from collections.abc import Callable, Iterable +from typing import Protocol, TypeVar, overload, type_check_only + +from ._convertions import asbytes as asbytes, asunicode as asunicode + +### + +_T = TypeVar("_T") +_HasModuleT = TypeVar("_HasModuleT", bound=_HasModule) + +@type_check_only +class _HasModule(Protocol): + __module__: str + +### + +@overload +def set_module(module: None) -> IdentityFunction: ... +@overload +def set_module(module: str) -> Callable[[_HasModuleT], _HasModuleT]: ... + +# +def _rename_parameter( + old_names: Iterable[str], + new_names: Iterable[str], + dep_version: str | None = None, +) -> Callable[[Callable[..., _T]], Callable[..., _T]]: ... diff --git a/local_packages/numpy/_utils/_convertions.py b/local_packages/numpy/_utils/_convertions.py new file mode 100644 index 0000000..ab15a8b --- /dev/null +++ b/local_packages/numpy/_utils/_convertions.py @@ -0,0 +1,18 @@ +""" +A set of methods retained from np.compat module that +are still used across codebase. +""" + +__all__ = ["asunicode", "asbytes"] + + +def asunicode(s): + if isinstance(s, bytes): + return s.decode('latin1') + return str(s) + + +def asbytes(s): + if isinstance(s, bytes): + return s + return str(s).encode('latin1') diff --git a/local_packages/numpy/_utils/_convertions.pyi b/local_packages/numpy/_utils/_convertions.pyi new file mode 100644 index 0000000..6cc599a --- /dev/null +++ b/local_packages/numpy/_utils/_convertions.pyi @@ -0,0 +1,4 @@ +__all__ = ["asbytes", "asunicode"] + +def asunicode(s: bytes | str) -> str: ... +def asbytes(s: bytes | str) -> str: ... diff --git a/local_packages/numpy/_utils/_inspect.py b/local_packages/numpy/_utils/_inspect.py new file mode 100644 index 0000000..b499f58 --- /dev/null +++ b/local_packages/numpy/_utils/_inspect.py @@ -0,0 +1,192 @@ +"""Subset of inspect module from upstream python + +We use this instead of upstream because upstream inspect is slow to import, and +significantly contributes to numpy import times. Importing this copy has almost +no overhead. + +""" +import types + +__all__ = ['getargspec', 'formatargspec'] + +# ----------------------------------------------------------- type-checking +def ismethod(object): + """Return true if the object is an instance method. + + Instance method objects provide these attributes: + __doc__ documentation string + __name__ name with which this method was defined + im_class class object in which this method belongs + im_func function object containing implementation of method + im_self instance to which this method is bound, or None + + """ + return isinstance(object, types.MethodType) + +def isfunction(object): + """Return true if the object is a user-defined function. + + Function objects provide these attributes: + __doc__ documentation string + __name__ name with which this function was defined + func_code code object containing compiled function bytecode + func_defaults tuple of any default values for arguments + func_doc (same as __doc__) + func_globals global namespace in which this function was defined + func_name (same as __name__) + + """ + return isinstance(object, types.FunctionType) + +def iscode(object): + """Return true if the object is a code object. + + Code objects provide these attributes: + co_argcount number of arguments (not including * or ** args) + co_code string of raw compiled bytecode + co_consts tuple of constants used in the bytecode + co_filename name of file in which this code object was created + co_firstlineno number of first line in Python source code + co_flags bitmap: 1=optimized | 2=newlocals | 4=*arg | 8=**arg + co_lnotab encoded mapping of line numbers to bytecode indices + co_name name with which this code object was defined + co_names tuple of names of local variables + co_nlocals number of local variables + co_stacksize virtual machine stack space required + co_varnames tuple of names of arguments and local variables + + """ + return isinstance(object, types.CodeType) + + +# ------------------------------------------------ argument list extraction +# These constants are from Python's compile.h. +CO_OPTIMIZED, CO_NEWLOCALS, CO_VARARGS, CO_VARKEYWORDS = 1, 2, 4, 8 + +def getargs(co): + """Get information about the arguments accepted by a code object. + + Three things are returned: (args, varargs, varkw), where 'args' is + a list of argument names (possibly containing nested lists), and + 'varargs' and 'varkw' are the names of the * and ** arguments or None. + + """ + + if not iscode(co): + raise TypeError('arg is not a code object') + + nargs = co.co_argcount + names = co.co_varnames + args = list(names[:nargs]) + + # The following acrobatics are for anonymous (tuple) arguments. + # Which we do not need to support, so remove to avoid importing + # the dis module. + for i in range(nargs): + if args[i][:1] in ['', '.']: + raise TypeError("tuple function arguments are not supported") + varargs = None + if co.co_flags & CO_VARARGS: + varargs = co.co_varnames[nargs] + nargs = nargs + 1 + varkw = None + if co.co_flags & CO_VARKEYWORDS: + varkw = co.co_varnames[nargs] + return args, varargs, varkw + +def getargspec(func): + """Get the names and default values of a function's arguments. + + A tuple of four things is returned: (args, varargs, varkw, defaults). + 'args' is a list of the argument names (it may contain nested lists). + 'varargs' and 'varkw' are the names of the * and ** arguments or None. + 'defaults' is an n-tuple of the default values of the last n arguments. + + """ + + if ismethod(func): + func = func.__func__ + if not isfunction(func): + raise TypeError('arg is not a Python function') + args, varargs, varkw = getargs(func.__code__) + return args, varargs, varkw, func.__defaults__ + +def getargvalues(frame): + """Get information about arguments passed into a particular frame. + + A tuple of four things is returned: (args, varargs, varkw, locals). + 'args' is a list of the argument names (it may contain nested lists). + 'varargs' and 'varkw' are the names of the * and ** arguments or None. + 'locals' is the locals dictionary of the given frame. + + """ + args, varargs, varkw = getargs(frame.f_code) + return args, varargs, varkw, frame.f_locals + +def joinseq(seq): + if len(seq) == 1: + return '(' + seq[0] + ',)' + else: + return '(' + ', '.join(seq) + ')' + +def strseq(object, convert, join=joinseq): + """Recursively walk a sequence, stringifying each element. + + """ + if type(object) in [list, tuple]: + return join([strseq(_o, convert, join) for _o in object]) + else: + return convert(object) + +def formatargspec(args, varargs=None, varkw=None, defaults=None, + formatarg=str, + formatvarargs=lambda name: '*' + name, + formatvarkw=lambda name: '**' + name, + formatvalue=lambda value: '=' + repr(value), + join=joinseq): + """Format an argument spec from the 4 values returned by getargspec. + + The first four arguments are (args, varargs, varkw, defaults). The + other four arguments are the corresponding optional formatting functions + that are called to turn names and values into strings. The ninth + argument is an optional function to format the sequence of arguments. + + """ + specs = [] + if defaults: + firstdefault = len(args) - len(defaults) + for i in range(len(args)): + spec = strseq(args[i], formatarg, join) + if defaults and i >= firstdefault: + spec = spec + formatvalue(defaults[i - firstdefault]) + specs.append(spec) + if varargs is not None: + specs.append(formatvarargs(varargs)) + if varkw is not None: + specs.append(formatvarkw(varkw)) + return '(' + ', '.join(specs) + ')' + +def formatargvalues(args, varargs, varkw, locals, + formatarg=str, + formatvarargs=lambda name: '*' + name, + formatvarkw=lambda name: '**' + name, + formatvalue=lambda value: '=' + repr(value), + join=joinseq): + """Format an argument spec from the 4 values returned by getargvalues. + + The first four arguments are (args, varargs, varkw, locals). The + next four arguments are the corresponding optional formatting functions + that are called to turn names and values into strings. The ninth + argument is an optional function to format the sequence of arguments. + + """ + def convert(name, locals=locals, + formatarg=formatarg, formatvalue=formatvalue): + return formatarg(name) + formatvalue(locals[name]) + specs = [strseq(arg, convert, join) for arg in args] + + if varargs: + specs.append(formatvarargs(varargs) + formatvalue(locals[varargs])) + if varkw: + specs.append(formatvarkw(varkw) + formatvalue(locals[varkw])) + return '(' + ', '.join(specs) + ')' diff --git a/local_packages/numpy/_utils/_inspect.pyi b/local_packages/numpy/_utils/_inspect.pyi new file mode 100644 index 0000000..40546d2 --- /dev/null +++ b/local_packages/numpy/_utils/_inspect.pyi @@ -0,0 +1,70 @@ +import types +from _typeshed import SupportsLenAndGetItem +from collections.abc import Callable, Mapping +from typing import Any, Final, TypeAlias, TypeVar, overload +from typing_extensions import TypeIs + +__all__ = ["formatargspec", "getargspec"] + +### + +_T = TypeVar("_T") +_RT = TypeVar("_RT") + +_StrSeq: TypeAlias = SupportsLenAndGetItem[str] +_NestedSeq: TypeAlias = list[_T | _NestedSeq[_T]] | tuple[_T | _NestedSeq[_T], ...] + +_JoinFunc: TypeAlias = Callable[[list[_T]], _T] +_FormatFunc: TypeAlias = Callable[[_T], str] + +### + +CO_OPTIMIZED: Final = 1 +CO_NEWLOCALS: Final = 2 +CO_VARARGS: Final = 4 +CO_VARKEYWORDS: Final = 8 + +### + +def ismethod(object: object) -> TypeIs[types.MethodType]: ... +def isfunction(object: object) -> TypeIs[types.FunctionType]: ... +def iscode(object: object) -> TypeIs[types.CodeType]: ... + +### + +def getargs(co: types.CodeType) -> tuple[list[str], str | None, str | None]: ... +def getargspec(func: types.MethodType | types.FunctionType) -> tuple[list[str], str | None, str | None, tuple[Any, ...]]: ... +def getargvalues(frame: types.FrameType) -> tuple[list[str], str | None, str | None, dict[str, Any]]: ... + +# +def joinseq(seq: _StrSeq) -> str: ... + +# +@overload +def strseq(object: _NestedSeq[str], convert: Callable[[Any], Any], join: _JoinFunc[str] = ...) -> str: ... +@overload +def strseq(object: _NestedSeq[_T], convert: Callable[[_T], _RT], join: _JoinFunc[_RT]) -> _RT: ... + +# +def formatargspec( + args: _StrSeq, + varargs: str | None = None, + varkw: str | None = None, + defaults: SupportsLenAndGetItem[object] | None = None, + formatarg: _FormatFunc[str] = ..., # str + formatvarargs: _FormatFunc[str] = ..., # "*{}".format + formatvarkw: _FormatFunc[str] = ..., # "**{}".format + formatvalue: _FormatFunc[object] = ..., # "={!r}".format + join: _JoinFunc[str] = ..., # joinseq +) -> str: ... +def formatargvalues( + args: _StrSeq, + varargs: str | None, + varkw: str | None, + locals: Mapping[str, object] | None, + formatarg: _FormatFunc[str] = ..., # str + formatvarargs: _FormatFunc[str] = ..., # "*{}".format + formatvarkw: _FormatFunc[str] = ..., # "**{}".format + formatvalue: _FormatFunc[object] = ..., # "={!r}".format + join: _JoinFunc[str] = ..., # joinseq +) -> str: ... diff --git a/local_packages/numpy/_utils/_pep440.py b/local_packages/numpy/_utils/_pep440.py new file mode 100644 index 0000000..035a069 --- /dev/null +++ b/local_packages/numpy/_utils/_pep440.py @@ -0,0 +1,486 @@ +"""Utility to compare pep440 compatible version strings. + +The LooseVersion and StrictVersion classes that distutils provides don't +work; they don't recognize anything like alpha/beta/rc/dev versions. +""" + +# Copyright (c) Donald Stufft and individual contributors. +# All rights reserved. + +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: + +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. + +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. + +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +import collections +import itertools +import re + +__all__ = [ + "parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN", +] + + +# BEGIN packaging/_structures.py + + +class Infinity: + def __repr__(self): + return "Infinity" + + def __hash__(self): + return hash(repr(self)) + + def __lt__(self, other): + return False + + def __le__(self, other): + return False + + def __eq__(self, other): + return isinstance(other, self.__class__) + + def __ne__(self, other): + return not isinstance(other, self.__class__) + + def __gt__(self, other): + return True + + def __ge__(self, other): + return True + + def __neg__(self): + return NegativeInfinity + + +Infinity = Infinity() + + +class NegativeInfinity: + def __repr__(self): + return "-Infinity" + + def __hash__(self): + return hash(repr(self)) + + def __lt__(self, other): + return True + + def __le__(self, other): + return True + + def __eq__(self, other): + return isinstance(other, self.__class__) + + def __ne__(self, other): + return not isinstance(other, self.__class__) + + def __gt__(self, other): + return False + + def __ge__(self, other): + return False + + def __neg__(self): + return Infinity + + +# BEGIN packaging/version.py + + +NegativeInfinity = NegativeInfinity() + +_Version = collections.namedtuple( + "_Version", + ["epoch", "release", "dev", "pre", "post", "local"], +) + + +def parse(version): + """ + Parse the given version string and return either a :class:`Version` object + or a :class:`LegacyVersion` object depending on if the given version is + a valid PEP 440 version or a legacy version. + """ + try: + return Version(version) + except InvalidVersion: + return LegacyVersion(version) + + +class InvalidVersion(ValueError): + """ + An invalid version was found, users should refer to PEP 440. + """ + + +class _BaseVersion: + + def __hash__(self): + return hash(self._key) + + def __lt__(self, other): + return self._compare(other, lambda s, o: s < o) + + def __le__(self, other): + return self._compare(other, lambda s, o: s <= o) + + def __eq__(self, other): + return self._compare(other, lambda s, o: s == o) + + def __ge__(self, other): + return self._compare(other, lambda s, o: s >= o) + + def __gt__(self, other): + return self._compare(other, lambda s, o: s > o) + + def __ne__(self, other): + return self._compare(other, lambda s, o: s != o) + + def _compare(self, other, method): + if not isinstance(other, _BaseVersion): + return NotImplemented + + return method(self._key, other._key) + + +class LegacyVersion(_BaseVersion): + + def __init__(self, version): + self._version = str(version) + self._key = _legacy_cmpkey(self._version) + + def __str__(self): + return self._version + + def __repr__(self): + return f"" + + @property + def public(self): + return self._version + + @property + def base_version(self): + return self._version + + @property + def local(self): + return None + + @property + def is_prerelease(self): + return False + + @property + def is_postrelease(self): + return False + + +_legacy_version_component_re = re.compile( + r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE, +) + +_legacy_version_replacement_map = { + "pre": "c", "preview": "c", "-": "final-", "rc": "c", "dev": "@", +} + + +def _parse_version_parts(s): + for part in _legacy_version_component_re.split(s): + part = _legacy_version_replacement_map.get(part, part) + + if not part or part == ".": + continue + + if part[:1] in "0123456789": + # pad for numeric comparison + yield part.zfill(8) + else: + yield "*" + part + + # ensure that alpha/beta/candidate are before final + yield "*final" + + +def _legacy_cmpkey(version): + # We hardcode an epoch of -1 here. A PEP 440 version can only have an epoch + # greater than or equal to 0. This will effectively put the LegacyVersion, + # which uses the defacto standard originally implemented by setuptools, + # as before all PEP 440 versions. + epoch = -1 + + # This scheme is taken from pkg_resources.parse_version setuptools prior to + # its adoption of the packaging library. + parts = [] + for part in _parse_version_parts(version.lower()): + if part.startswith("*"): + # remove "-" before a prerelease tag + if part < "*final": + while parts and parts[-1] == "*final-": + parts.pop() + + # remove trailing zeros from each series of numeric parts + while parts and parts[-1] == "00000000": + parts.pop() + + parts.append(part) + parts = tuple(parts) + + return epoch, parts + + +# Deliberately not anchored to the start and end of the string, to make it +# easier for 3rd party code to reuse +VERSION_PATTERN = r""" + v? + (?: + (?:(?P[0-9]+)!)? # epoch + (?P[0-9]+(?:\.[0-9]+)*) # release segment + (?P
                                          # pre-release
+            [-_\.]?
+            (?P(a|b|c|rc|alpha|beta|pre|preview))
+            [-_\.]?
+            (?P[0-9]+)?
+        )?
+        (?P                                         # post release
+            (?:-(?P[0-9]+))
+            |
+            (?:
+                [-_\.]?
+                (?Ppost|rev|r)
+                [-_\.]?
+                (?P[0-9]+)?
+            )
+        )?
+        (?P                                          # dev release
+            [-_\.]?
+            (?Pdev)
+            [-_\.]?
+            (?P[0-9]+)?
+        )?
+    )
+    (?:\+(?P[a-z0-9]+(?:[-_\.][a-z0-9]+)*))?       # local version
+"""
+
+
+class Version(_BaseVersion):
+
+    _regex = re.compile(
+        r"^\s*" + VERSION_PATTERN + r"\s*$",
+        re.VERBOSE | re.IGNORECASE,
+    )
+
+    def __init__(self, version):
+        # Validate the version and parse it into pieces
+        match = self._regex.search(version)
+        if not match:
+            raise InvalidVersion(f"Invalid version: '{version}'")
+
+        # Store the parsed out pieces of the version
+        self._version = _Version(
+            epoch=int(match.group("epoch")) if match.group("epoch") else 0,
+            release=tuple(int(i) for i in match.group("release").split(".")),
+            pre=_parse_letter_version(
+                match.group("pre_l"),
+                match.group("pre_n"),
+            ),
+            post=_parse_letter_version(
+                match.group("post_l"),
+                match.group("post_n1") or match.group("post_n2"),
+            ),
+            dev=_parse_letter_version(
+                match.group("dev_l"),
+                match.group("dev_n"),
+            ),
+            local=_parse_local_version(match.group("local")),
+        )
+
+        # Generate a key which will be used for sorting
+        self._key = _cmpkey(
+            self._version.epoch,
+            self._version.release,
+            self._version.pre,
+            self._version.post,
+            self._version.dev,
+            self._version.local,
+        )
+
+    def __repr__(self):
+        return f""
+
+    def __str__(self):
+        parts = []
+
+        # Epoch
+        if self._version.epoch != 0:
+            parts.append(f"{self._version.epoch}!")
+
+        # Release segment
+        parts.append(".".join(str(x) for x in self._version.release))
+
+        # Pre-release
+        if self._version.pre is not None:
+            parts.append("".join(str(x) for x in self._version.pre))
+
+        # Post-release
+        if self._version.post is not None:
+            parts.append(f".post{self._version.post[1]}")
+
+        # Development release
+        if self._version.dev is not None:
+            parts.append(f".dev{self._version.dev[1]}")
+
+        # Local version segment
+        if self._version.local is not None:
+            parts.append(
+                f"+{'.'.join(str(x) for x in self._version.local)}"
+            )
+
+        return "".join(parts)
+
+    @property
+    def public(self):
+        return str(self).split("+", 1)[0]
+
+    @property
+    def base_version(self):
+        parts = []
+
+        # Epoch
+        if self._version.epoch != 0:
+            parts.append(f"{self._version.epoch}!")
+
+        # Release segment
+        parts.append(".".join(str(x) for x in self._version.release))
+
+        return "".join(parts)
+
+    @property
+    def local(self):
+        version_string = str(self)
+        if "+" in version_string:
+            return version_string.split("+", 1)[1]
+
+    @property
+    def is_prerelease(self):
+        return bool(self._version.dev or self._version.pre)
+
+    @property
+    def is_postrelease(self):
+        return bool(self._version.post)
+
+
+def _parse_letter_version(letter, number):
+    if letter:
+        # We assume there is an implicit 0 in a pre-release if there is
+        # no numeral associated with it.
+        if number is None:
+            number = 0
+
+        # We normalize any letters to their lower-case form
+        letter = letter.lower()
+
+        # We consider some words to be alternate spellings of other words and
+        # in those cases we want to normalize the spellings to our preferred
+        # spelling.
+        if letter == "alpha":
+            letter = "a"
+        elif letter == "beta":
+            letter = "b"
+        elif letter in ["c", "pre", "preview"]:
+            letter = "rc"
+        elif letter in ["rev", "r"]:
+            letter = "post"
+
+        return letter, int(number)
+    if not letter and number:
+        # We assume that if we are given a number but not given a letter,
+        # then this is using the implicit post release syntax (e.g., 1.0-1)
+        letter = "post"
+
+        return letter, int(number)
+
+
+_local_version_seperators = re.compile(r"[\._-]")
+
+
+def _parse_local_version(local):
+    """
+    Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
+    """
+    if local is not None:
+        return tuple(
+            part.lower() if not part.isdigit() else int(part)
+            for part in _local_version_seperators.split(local)
+        )
+
+
+def _cmpkey(epoch, release, pre, post, dev, local):
+    # When we compare a release version, we want to compare it with all of the
+    # trailing zeros removed. So we'll use a reverse the list, drop all the now
+    # leading zeros until we come to something non-zero, then take the rest,
+    # re-reverse it back into the correct order, and make it a tuple and use
+    # that for our sorting key.
+    release = tuple(
+        reversed(list(
+            itertools.dropwhile(
+                lambda x: x == 0,
+                reversed(release),
+            )
+        ))
+    )
+
+    # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
+    # We'll do this by abusing the pre-segment, but we _only_ want to do this
+    # if there is no pre- or a post-segment. If we have one of those, then
+    # the normal sorting rules will handle this case correctly.
+    if pre is None and post is None and dev is not None:
+        pre = -Infinity
+    # Versions without a pre-release (except as noted above) should sort after
+    # those with one.
+    elif pre is None:
+        pre = Infinity
+
+    # Versions without a post-segment should sort before those with one.
+    if post is None:
+        post = -Infinity
+
+    # Versions without a development segment should sort after those with one.
+    if dev is None:
+        dev = Infinity
+
+    if local is None:
+        # Versions without a local segment should sort before those with one.
+        local = -Infinity
+    else:
+        # Versions with a local segment need that segment parsed to implement
+        # the sorting rules in PEP440.
+        # - Alphanumeric segments sort before numeric segments
+        # - Alphanumeric segments sort lexicographically
+        # - Numeric segments sort numerically
+        # - Shorter versions sort before longer versions when the prefixes
+        #   match exactly
+        local = tuple(
+            (i, "") if isinstance(i, int) else (-Infinity, i)
+            for i in local
+        )
+
+    return epoch, release, pre, post, dev, local
diff --git a/local_packages/numpy/_utils/_pep440.pyi b/local_packages/numpy/_utils/_pep440.pyi
new file mode 100644
index 0000000..11ae02e
--- /dev/null
+++ b/local_packages/numpy/_utils/_pep440.pyi
@@ -0,0 +1,118 @@
+import re
+from collections.abc import Callable
+from typing import (
+    Any,
+    ClassVar,
+    Final,
+    Generic,
+    Literal as L,
+    NamedTuple,
+    TypeVar,
+    final,
+    type_check_only,
+)
+from typing_extensions import TypeIs
+
+__all__ = ["VERSION_PATTERN", "InvalidVersion", "LegacyVersion", "Version", "parse"]
+
+###
+
+_CmpKeyT = TypeVar("_CmpKeyT", bound=tuple[object, ...])
+_CmpKeyT_co = TypeVar("_CmpKeyT_co", bound=tuple[object, ...], default=tuple[Any, ...], covariant=True)
+
+###
+
+VERSION_PATTERN: Final[str] = ...
+
+class InvalidVersion(ValueError): ...
+
+@type_check_only
+@final
+class _InfinityType:
+    def __hash__(self) -> int: ...
+    def __eq__(self, other: object, /) -> TypeIs[_InfinityType]: ...
+    def __ne__(self, other: object, /) -> bool: ...
+    def __lt__(self, other: object, /) -> L[False]: ...
+    def __le__(self, other: object, /) -> L[False]: ...
+    def __gt__(self, other: object, /) -> L[True]: ...
+    def __ge__(self, other: object, /) -> L[True]: ...
+    def __neg__(self) -> _NegativeInfinityType: ...
+
+Infinity: Final[_InfinityType] = ...
+
+@type_check_only
+@final
+class _NegativeInfinityType:
+    def __hash__(self) -> int: ...
+    def __eq__(self, other: object, /) -> TypeIs[_NegativeInfinityType]: ...
+    def __ne__(self, other: object, /) -> bool: ...
+    def __lt__(self, other: object, /) -> L[True]: ...
+    def __le__(self, other: object, /) -> L[True]: ...
+    def __gt__(self, other: object, /) -> L[False]: ...
+    def __ge__(self, other: object, /) -> L[False]: ...
+    def __neg__(self) -> _InfinityType: ...
+
+NegativeInfinity: Final[_NegativeInfinityType] = ...
+
+class _Version(NamedTuple):
+    epoch: int
+    release: tuple[int, ...]
+    dev: tuple[str, int] | None
+    pre: tuple[str, int] | None
+    post: tuple[str, int] | None
+    local: tuple[str | int, ...] | None
+
+class _BaseVersion(Generic[_CmpKeyT_co]):
+    _key: _CmpKeyT_co
+    def __hash__(self) -> int: ...
+    def __eq__(self, other: _BaseVersion, /) -> bool: ...  # type: ignore[override]  # pyright: ignore[reportIncompatibleMethodOverride]
+    def __ne__(self, other: _BaseVersion, /) -> bool: ...  # type: ignore[override]  # pyright: ignore[reportIncompatibleMethodOverride]
+    def __lt__(self, other: _BaseVersion, /) -> bool: ...
+    def __le__(self, other: _BaseVersion, /) -> bool: ...
+    def __ge__(self, other: _BaseVersion, /) -> bool: ...
+    def __gt__(self, other: _BaseVersion, /) -> bool: ...
+    def _compare(self, /, other: _BaseVersion[_CmpKeyT], method: Callable[[_CmpKeyT_co, _CmpKeyT], bool]) -> bool: ...
+
+class LegacyVersion(_BaseVersion[tuple[L[-1], tuple[str, ...]]]):
+    _version: Final[str]
+    def __init__(self, /, version: str) -> None: ...
+    @property
+    def public(self) -> str: ...
+    @property
+    def base_version(self) -> str: ...
+    @property
+    def local(self) -> None: ...
+    @property
+    def is_prerelease(self) -> L[False]: ...
+    @property
+    def is_postrelease(self) -> L[False]: ...
+
+class Version(
+    _BaseVersion[
+        tuple[
+            int,  # epoch
+            tuple[int, ...],  # release
+            tuple[str, int] | _InfinityType | _NegativeInfinityType,  # pre
+            tuple[str, int] | _NegativeInfinityType,  # post
+            tuple[str, int] | _InfinityType,  # dev
+            tuple[tuple[int, L[""]] | tuple[_NegativeInfinityType, str], ...] | _NegativeInfinityType,  # local
+        ],
+    ],
+):
+    _regex: ClassVar[re.Pattern[str]] = ...
+    _version: Final[str]
+
+    def __init__(self, /, version: str) -> None: ...
+    @property
+    def public(self) -> str: ...
+    @property
+    def base_version(self) -> str: ...
+    @property
+    def local(self) -> str | None: ...
+    @property
+    def is_prerelease(self) -> bool: ...
+    @property
+    def is_postrelease(self) -> bool: ...
+
+#
+def parse(version: str) -> Version | LegacyVersion: ...
diff --git a/local_packages/numpy/char/__init__.py b/local_packages/numpy/char/__init__.py
new file mode 100644
index 0000000..d98d38c
--- /dev/null
+++ b/local_packages/numpy/char/__init__.py
@@ -0,0 +1,2 @@
+from numpy._core.defchararray import *
+from numpy._core.defchararray import __all__, __doc__
diff --git a/local_packages/numpy/char/__init__.pyi b/local_packages/numpy/char/__init__.pyi
new file mode 100644
index 0000000..e151f20
--- /dev/null
+++ b/local_packages/numpy/char/__init__.pyi
@@ -0,0 +1,111 @@
+from numpy._core.defchararray import (
+    add,
+    array,
+    asarray,
+    capitalize,
+    center,
+    chararray,
+    compare_chararrays,
+    count,
+    decode,
+    encode,
+    endswith,
+    equal,
+    expandtabs,
+    find,
+    greater,
+    greater_equal,
+    index,
+    isalnum,
+    isalpha,
+    isdecimal,
+    isdigit,
+    islower,
+    isnumeric,
+    isspace,
+    istitle,
+    isupper,
+    join,
+    less,
+    less_equal,
+    ljust,
+    lower,
+    lstrip,
+    mod,
+    multiply,
+    not_equal,
+    partition,
+    replace,
+    rfind,
+    rindex,
+    rjust,
+    rpartition,
+    rsplit,
+    rstrip,
+    split,
+    splitlines,
+    startswith,
+    str_len,
+    strip,
+    swapcase,
+    title,
+    translate,
+    upper,
+    zfill,
+)
+
+__all__ = [
+    "equal",
+    "not_equal",
+    "greater_equal",
+    "less_equal",
+    "greater",
+    "less",
+    "str_len",
+    "add",
+    "multiply",
+    "mod",
+    "capitalize",
+    "center",
+    "count",
+    "decode",
+    "encode",
+    "endswith",
+    "expandtabs",
+    "find",
+    "index",
+    "isalnum",
+    "isalpha",
+    "isdigit",
+    "islower",
+    "isspace",
+    "istitle",
+    "isupper",
+    "join",
+    "ljust",
+    "lower",
+    "lstrip",
+    "partition",
+    "replace",
+    "rfind",
+    "rindex",
+    "rjust",
+    "rpartition",
+    "rsplit",
+    "rstrip",
+    "split",
+    "splitlines",
+    "startswith",
+    "strip",
+    "swapcase",
+    "title",
+    "translate",
+    "upper",
+    "zfill",
+    "isnumeric",
+    "isdecimal",
+    "array",
+    "asarray",
+    "compare_chararrays",
+    "chararray",
+]
diff --git a/local_packages/numpy/conftest.py b/local_packages/numpy/conftest.py
new file mode 100644
index 0000000..c3c96ef
--- /dev/null
+++ b/local_packages/numpy/conftest.py
@@ -0,0 +1,248 @@
+"""
+Pytest configuration and fixtures for the Numpy test suite.
+"""
+import os
+import sys
+import tempfile
+import warnings
+from contextlib import contextmanager
+from pathlib import Path
+
+import hypothesis
+import pytest
+
+import numpy
+from numpy._core._multiarray_tests import get_fpu_mode
+from numpy.testing._private.utils import NOGIL_BUILD
+
+try:
+    from scipy_doctest.conftest import dt_config
+    HAVE_SCPDT = True
+except ModuleNotFoundError:
+    HAVE_SCPDT = False
+
+try:
+    import pytest_run_parallel  # noqa: F401
+    PARALLEL_RUN_AVALIABLE = True
+except ModuleNotFoundError:
+    PARALLEL_RUN_AVALIABLE = False
+
+_old_fpu_mode = None
+_collect_results = {}
+
+# Use a known and persistent tmpdir for hypothesis' caches, which
+# can be automatically cleared by the OS or user.
+hypothesis.configuration.set_hypothesis_home_dir(
+    os.path.join(tempfile.gettempdir(), ".hypothesis")
+)
+
+# We register two custom profiles for Numpy - for details see
+# https://hypothesis.readthedocs.io/en/latest/settings.html
+# The first is designed for our own CI runs; the latter also
+# forces determinism and is designed for use via np.test()
+hypothesis.settings.register_profile(
+    name="numpy-profile", deadline=None, print_blob=True,
+)
+hypothesis.settings.register_profile(
+    name="np.test() profile",
+    deadline=None, print_blob=True, database=None, derandomize=True,
+    suppress_health_check=list(hypothesis.HealthCheck),
+)
+# Note that the default profile is chosen based on the presence
+# of pytest.ini, but can be overridden by passing the
+# --hypothesis-profile=NAME argument to pytest.
+_pytest_ini = os.path.join(os.path.dirname(__file__), "..", "pytest.ini")
+hypothesis.settings.load_profile(
+    "numpy-profile" if os.path.isfile(_pytest_ini) else "np.test() profile"
+)
+
+# The experimentalAPI is used in _umath_tests
+os.environ["NUMPY_EXPERIMENTAL_DTYPE_API"] = "1"
+
+def pytest_configure(config):
+    config.addinivalue_line("markers",
+        "valgrind_error: Tests that are known to error under valgrind.")
+    config.addinivalue_line("markers",
+        "leaks_references: Tests that are known to leak references.")
+    config.addinivalue_line("markers",
+        "slow: Tests that are very slow.")
+    config.addinivalue_line("markers",
+        "slow_pypy: Tests that are very slow on pypy.")
+    if not PARALLEL_RUN_AVALIABLE:
+        config.addinivalue_line("markers",
+            "parallel_threads(n): run the given test function in parallel "
+            "using `n` threads.",
+        )
+        config.addinivalue_line("markers",
+            "iterations(n): run the given test function `n` times in each thread",
+        )
+        config.addinivalue_line("markers",
+            "thread_unsafe: mark the test function as single-threaded",
+        )
+
+
+def pytest_addoption(parser):
+    parser.addoption("--available-memory", action="store", default=None,
+                     help=("Set amount of memory available for running the "
+                           "test suite. This can result to tests requiring "
+                           "especially large amounts of memory to be skipped. "
+                           "Equivalent to setting environment variable "
+                           "NPY_AVAILABLE_MEM. Default: determined"
+                           "automatically."))
+
+
+gil_enabled_at_start = True
+if NOGIL_BUILD:
+    gil_enabled_at_start = sys._is_gil_enabled()
+
+
+def pytest_sessionstart(session):
+    available_mem = session.config.getoption('available_memory')
+    if available_mem is not None:
+        os.environ['NPY_AVAILABLE_MEM'] = available_mem
+
+
+def pytest_terminal_summary(terminalreporter, exitstatus, config):
+    if NOGIL_BUILD and not gil_enabled_at_start and sys._is_gil_enabled():
+        tr = terminalreporter
+        tr.ensure_newline()
+        tr.section("GIL re-enabled", sep="=", red=True, bold=True)
+        tr.line("The GIL was re-enabled at runtime during the tests.")
+        tr.line("This can happen with no test failures if the RuntimeWarning")
+        tr.line("raised by Python when this happens is filtered by a test.")
+        tr.line("")
+        tr.line("Please ensure all new C modules declare support for running")
+        tr.line("without the GIL. Any new tests that intentionally imports ")
+        tr.line("code that re-enables the GIL should do so in a subprocess.")
+        pytest.exit("GIL re-enabled during tests", returncode=1)
+
+# FIXME when yield tests are gone.
+@pytest.hookimpl(tryfirst=True)
+def pytest_itemcollected(item):
+    """
+    Check FPU precision mode was not changed during test collection.
+
+    The clumsy way we do it here is mainly necessary because numpy
+    still uses yield tests, which can execute code at test collection
+    time.
+    """
+    global _old_fpu_mode
+
+    mode = get_fpu_mode()
+
+    if _old_fpu_mode is None:
+        _old_fpu_mode = mode
+    elif mode != _old_fpu_mode:
+        _collect_results[item] = (_old_fpu_mode, mode)
+        _old_fpu_mode = mode
+
+    # mark f2py tests as thread unsafe
+    if Path(item.fspath).parent == Path(__file__).parent / 'f2py' / 'tests':
+        item.add_marker(pytest.mark.thread_unsafe(
+            reason="f2py tests are thread-unsafe"))
+
+
+@pytest.fixture(scope="function", autouse=True)
+def check_fpu_mode(request):
+    """
+    Check FPU precision mode was not changed during the test.
+    """
+    old_mode = get_fpu_mode()
+    yield
+    new_mode = get_fpu_mode()
+
+    if old_mode != new_mode:
+        raise AssertionError(f"FPU precision mode changed from {old_mode:#x} to "
+                             f"{new_mode:#x} during the test")
+
+    collect_result = _collect_results.get(request.node)
+    if collect_result is not None:
+        old_mode, new_mode = collect_result
+        raise AssertionError(f"FPU precision mode changed from {old_mode:#x} to "
+                             f"{new_mode:#x} when collecting the test")
+
+
+@pytest.fixture(autouse=True)
+def add_np(doctest_namespace):
+    doctest_namespace['np'] = numpy
+
+
+if HAVE_SCPDT:
+
+    @contextmanager
+    def warnings_errors_and_rng(test=None):
+        """Filter out the wall of DeprecationWarnings.
+        """
+        msgs = ["The numpy.linalg.linalg",
+                "The numpy.fft.helper",
+                "dep_util",
+                "pkg_resources",
+                "numpy.core.umath",
+                "msvccompiler",
+                "Deprecated call",
+                "numpy.core",
+                "Importing from numpy.matlib",
+                "This function is deprecated.",    # random_integers
+                "Data type alias 'a'",     # numpy.rec.fromfile
+                "Arrays of 2-dimensional vectors",   # matlib.cross
+                "NumPy warning suppression and assertion utilities are deprecated."
+        ]
+        msg = "|".join(msgs)
+
+        msgs_r = [
+            "invalid value encountered",
+            "divide by zero encountered"
+        ]
+        msg_r = "|".join(msgs_r)
+
+        with warnings.catch_warnings():
+            warnings.filterwarnings(
+                'ignore', category=DeprecationWarning, message=msg
+            )
+            warnings.filterwarnings(
+                'ignore', category=RuntimeWarning, message=msg_r
+            )
+            yield
+
+    # find and check doctests under this context manager
+    dt_config.user_context_mgr = warnings_errors_and_rng
+
+    # numpy specific tweaks from refguide-check
+    dt_config.rndm_markers.add('#uninitialized')
+    dt_config.rndm_markers.add('# uninitialized')
+
+    # make the checker pick on mismatched dtypes
+    dt_config.strict_check = True
+
+    import doctest
+    dt_config.optionflags = doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS
+
+    # recognize the StringDType repr
+    dt_config.check_namespace['StringDType'] = numpy.dtypes.StringDType
+
+    # temporary skips
+    dt_config.skiplist = {
+        'numpy.savez',    # unclosed file
+        'numpy.matlib.savez',
+        'numpy.__array_namespace_info__',
+        'numpy.matlib.__array_namespace_info__',
+    }
+
+    # xfail problematic tutorials
+    dt_config.pytest_extra_xfail = {
+        'how-to-verify-bug.rst': '',
+        'c-info.ufunc-tutorial.rst': '',
+        'basics.interoperability.rst': 'needs pandas',
+        'basics.dispatch.rst': 'errors out in /testing/overrides.py',
+        'basics.subclassing.rst': '.. testcode:: admonitions not understood',
+        'misc.rst': 'manipulates warnings',
+    }
+
+    # ignores are for things fail doctest collection (optionals etc)
+    dt_config.pytest_extra_ignore = [
+        'numpy/distutils',
+        'numpy/_core/cversions.py',
+        'numpy/_pyinstaller',
+        'numpy/random/_examples',
+        'numpy/f2py/_backends/_distutils.py',
+    ]
diff --git a/local_packages/numpy/core/__init__.py b/local_packages/numpy/core/__init__.py
new file mode 100644
index 0000000..cfd96ed
--- /dev/null
+++ b/local_packages/numpy/core/__init__.py
@@ -0,0 +1,33 @@
+"""
+The `numpy.core` submodule exists solely for backward compatibility
+purposes. The original `core` was renamed to `_core` and made private.
+`numpy.core` will be removed in the future.
+"""
+from numpy import _core
+
+from ._utils import _raise_warning
+
+
+# We used to use `np.core._ufunc_reconstruct` to unpickle.
+# This is unnecessary, but old pickles saved before 1.20 will be using it,
+# and there is no reason to break loading them.
+def _ufunc_reconstruct(module, name):
+    # The `fromlist` kwarg is required to ensure that `mod` points to the
+    # inner-most module rather than the parent package when module name is
+    # nested. This makes it possible to pickle non-toplevel ufuncs such as
+    # scipy.special.expit for instance.
+    mod = __import__(module, fromlist=[name])
+    return getattr(mod, name)
+
+
+# force lazy-loading of submodules to ensure a warning is printed
+
+__all__ = ["arrayprint", "defchararray", "_dtype_ctypes", "_dtype",  # noqa: F822
+           "einsumfunc", "fromnumeric", "function_base", "getlimits",
+           "_internal", "multiarray", "_multiarray_umath", "numeric",
+           "numerictypes", "overrides", "records", "shape_base", "umath"]
+
+def __getattr__(attr_name):
+    attr = getattr(_core, attr_name)
+    _raise_warning(attr_name)
+    return attr
diff --git a/local_packages/numpy/core/__init__.pyi b/local_packages/numpy/core/__init__.pyi
new file mode 100644
index 0000000..e69de29
diff --git a/local_packages/numpy/core/_dtype.py b/local_packages/numpy/core/_dtype.py
new file mode 100644
index 0000000..5446079
--- /dev/null
+++ b/local_packages/numpy/core/_dtype.py
@@ -0,0 +1,10 @@
+def __getattr__(attr_name):
+    from numpy._core import _dtype
+
+    from ._utils import _raise_warning
+    ret = getattr(_dtype, attr_name, None)
+    if ret is None:
+        raise AttributeError(
+            f"module 'numpy.core._dtype' has no attribute {attr_name}")
+    _raise_warning(attr_name, "_dtype")
+    return ret
diff --git a/local_packages/numpy/core/_dtype.pyi b/local_packages/numpy/core/_dtype.pyi
new file mode 100644
index 0000000..e69de29
diff --git a/local_packages/numpy/core/_dtype_ctypes.py b/local_packages/numpy/core/_dtype_ctypes.py
new file mode 100644
index 0000000..10cfba2
--- /dev/null
+++ b/local_packages/numpy/core/_dtype_ctypes.py
@@ -0,0 +1,10 @@
+def __getattr__(attr_name):
+    from numpy._core import _dtype_ctypes
+
+    from ._utils import _raise_warning
+    ret = getattr(_dtype_ctypes, attr_name, None)
+    if ret is None:
+        raise AttributeError(
+            f"module 'numpy.core._dtype_ctypes' has no attribute {attr_name}")
+    _raise_warning(attr_name, "_dtype_ctypes")
+    return ret
diff --git a/local_packages/numpy/core/_dtype_ctypes.pyi b/local_packages/numpy/core/_dtype_ctypes.pyi
new file mode 100644
index 0000000..e69de29
diff --git a/local_packages/numpy/core/_internal.py b/local_packages/numpy/core/_internal.py
new file mode 100644
index 0000000..63a6ccc
--- /dev/null
+++ b/local_packages/numpy/core/_internal.py
@@ -0,0 +1,27 @@
+from numpy._core import _internal
+
+
+# Build a new array from the information in a pickle.
+# Note that the name numpy.core._internal._reconstruct is embedded in
+# pickles of ndarrays made with NumPy before release 1.0
+# so don't remove the name here, or you'll
+# break backward compatibility.
+def _reconstruct(subtype, shape, dtype):
+    from numpy import ndarray
+    return ndarray.__new__(subtype, shape, dtype)
+
+
+# Pybind11 (in versions <= 2.11.1) imports _dtype_from_pep3118 from the
+# _internal submodule, therefore it must be importable without a warning.
+_dtype_from_pep3118 = _internal._dtype_from_pep3118
+
+def __getattr__(attr_name):
+    from numpy._core import _internal
+
+    from ._utils import _raise_warning
+    ret = getattr(_internal, attr_name, None)
+    if ret is None:
+        raise AttributeError(
+            f"module 'numpy.core._internal' has no attribute {attr_name}")
+    _raise_warning(attr_name, "_internal")
+    return ret
diff --git a/local_packages/numpy/core/_multiarray_umath.py b/local_packages/numpy/core/_multiarray_umath.py
new file mode 100644
index 0000000..c1e6b4e
--- /dev/null
+++ b/local_packages/numpy/core/_multiarray_umath.py
@@ -0,0 +1,57 @@
+from numpy import ufunc
+from numpy._core import _multiarray_umath
+
+for item in _multiarray_umath.__dir__():
+    # ufuncs appear in pickles with a path in numpy.core._multiarray_umath
+    # and so must import from this namespace without warning or error
+    attr = getattr(_multiarray_umath, item)
+    if isinstance(attr, ufunc):
+        globals()[item] = attr
+
+
+def __getattr__(attr_name):
+    from numpy._core import _multiarray_umath
+
+    from ._utils import _raise_warning
+
+    if attr_name in {"_ARRAY_API", "_UFUNC_API"}:
+        import sys
+        import textwrap
+        import traceback
+
+        from numpy.version import short_version
+
+        msg = textwrap.dedent(f"""
+            A module that was compiled using NumPy 1.x cannot be run in
+            NumPy {short_version} as it may crash. To support both 1.x and 2.x
+            versions of NumPy, modules must be compiled with NumPy 2.0.
+            Some module may need to rebuild instead e.g. with 'pybind11>=2.12'.
+
+            If you are a user of the module, the easiest solution will be to
+            downgrade to 'numpy<2' or try to upgrade the affected module.
+            We expect that some modules will need time to support NumPy 2.
+
+            """)
+        tb_msg = "Traceback (most recent call last):"
+        for line in traceback.format_stack()[:-1]:
+            if "frozen importlib" in line:
+                continue
+            tb_msg += line
+
+        # Also print the message (with traceback).  This is because old versions
+        # of NumPy unfortunately set up the import to replace (and hide) the
+        # error.  The traceback shouldn't be needed, but e.g. pytest plugins
+        # seem to swallow it and we should be failing anyway...
+        sys.stderr.write(msg + tb_msg)
+        raise ImportError(msg)
+
+    ret = getattr(_multiarray_umath, attr_name, None)
+    if ret is None:
+        raise AttributeError(
+            "module 'numpy.core._multiarray_umath' has no attribute "
+            f"{attr_name}")
+    _raise_warning(attr_name, "_multiarray_umath")
+    return ret
+
+
+del _multiarray_umath, ufunc
diff --git a/local_packages/numpy/core/_utils.py b/local_packages/numpy/core/_utils.py
new file mode 100644
index 0000000..5f47f4b
--- /dev/null
+++ b/local_packages/numpy/core/_utils.py
@@ -0,0 +1,21 @@
+import warnings
+
+
+def _raise_warning(attr: str, submodule: str | None = None) -> None:
+    new_module = "numpy._core"
+    old_module = "numpy.core"
+    if submodule is not None:
+        new_module = f"{new_module}.{submodule}"
+        old_module = f"{old_module}.{submodule}"
+    warnings.warn(
+        f"{old_module} is deprecated and has been renamed to {new_module}. "
+        "The numpy._core namespace contains private NumPy internals and its "
+        "use is discouraged, as NumPy internals can change without warning in "
+        "any release. In practice, most real-world usage of numpy.core is to "
+        "access functionality in the public NumPy API. If that is the case, "
+        "use the public NumPy API. If not, you are using NumPy internals. "
+        "If you would still like to access an internal attribute, "
+        f"use {new_module}.{attr}.",
+        DeprecationWarning,
+        stacklevel=3
+    )
diff --git a/local_packages/numpy/core/arrayprint.py b/local_packages/numpy/core/arrayprint.py
new file mode 100644
index 0000000..8be5c5c
--- /dev/null
+++ b/local_packages/numpy/core/arrayprint.py
@@ -0,0 +1,10 @@
+def __getattr__(attr_name):
+    from numpy._core import arrayprint
+
+    from ._utils import _raise_warning
+    ret = getattr(arrayprint, attr_name, None)
+    if ret is None:
+        raise AttributeError(
+            f"module 'numpy.core.arrayprint' has no attribute {attr_name}")
+    _raise_warning(attr_name, "arrayprint")
+    return ret
diff --git a/local_packages/numpy/core/defchararray.py b/local_packages/numpy/core/defchararray.py
new file mode 100644
index 0000000..1c87068
--- /dev/null
+++ b/local_packages/numpy/core/defchararray.py
@@ -0,0 +1,10 @@
+def __getattr__(attr_name):
+    from numpy._core import defchararray
+
+    from ._utils import _raise_warning
+    ret = getattr(defchararray, attr_name, None)
+    if ret is None:
+        raise AttributeError(
+            f"module 'numpy.core.defchararray' has no attribute {attr_name}")
+    _raise_warning(attr_name, "defchararray")
+    return ret
diff --git a/local_packages/numpy/core/einsumfunc.py b/local_packages/numpy/core/einsumfunc.py
new file mode 100644
index 0000000..fe5aa39
--- /dev/null
+++ b/local_packages/numpy/core/einsumfunc.py
@@ -0,0 +1,10 @@
+def __getattr__(attr_name):
+    from numpy._core import einsumfunc
+
+    from ._utils import _raise_warning
+    ret = getattr(einsumfunc, attr_name, None)
+    if ret is None:
+        raise AttributeError(
+            f"module 'numpy.core.einsumfunc' has no attribute {attr_name}")
+    _raise_warning(attr_name, "einsumfunc")
+    return ret
diff --git a/local_packages/numpy/core/fromnumeric.py b/local_packages/numpy/core/fromnumeric.py
new file mode 100644
index 0000000..fae7a03
--- /dev/null
+++ b/local_packages/numpy/core/fromnumeric.py
@@ -0,0 +1,10 @@
+def __getattr__(attr_name):
+    from numpy._core import fromnumeric
+
+    from ._utils import _raise_warning
+    ret = getattr(fromnumeric, attr_name, None)
+    if ret is None:
+        raise AttributeError(
+            f"module 'numpy.core.fromnumeric' has no attribute {attr_name}")
+    _raise_warning(attr_name, "fromnumeric")
+    return ret
diff --git a/local_packages/numpy/core/function_base.py b/local_packages/numpy/core/function_base.py
new file mode 100644
index 0000000..e15c971
--- /dev/null
+++ b/local_packages/numpy/core/function_base.py
@@ -0,0 +1,10 @@
+def __getattr__(attr_name):
+    from numpy._core import function_base
+
+    from ._utils import _raise_warning
+    ret = getattr(function_base, attr_name, None)
+    if ret is None:
+        raise AttributeError(
+            f"module 'numpy.core.function_base' has no attribute {attr_name}")
+    _raise_warning(attr_name, "function_base")
+    return ret
diff --git a/local_packages/numpy/core/getlimits.py b/local_packages/numpy/core/getlimits.py
new file mode 100644
index 0000000..dc009cb
--- /dev/null
+++ b/local_packages/numpy/core/getlimits.py
@@ -0,0 +1,10 @@
+def __getattr__(attr_name):
+    from numpy._core import getlimits
+
+    from ._utils import _raise_warning
+    ret = getattr(getlimits, attr_name, None)
+    if ret is None:
+        raise AttributeError(
+            f"module 'numpy.core.getlimits' has no attribute {attr_name}")
+    _raise_warning(attr_name, "getlimits")
+    return ret
diff --git a/local_packages/numpy/core/multiarray.py b/local_packages/numpy/core/multiarray.py
new file mode 100644
index 0000000..b226709
--- /dev/null
+++ b/local_packages/numpy/core/multiarray.py
@@ -0,0 +1,25 @@
+from numpy._core import multiarray
+
+# these must import without warning or error from numpy.core.multiarray to
+# support old pickle files
+for item in ["_reconstruct", "scalar"]:
+    globals()[item] = getattr(multiarray, item)
+
+# Pybind11 (in versions <= 2.11.1) imports _ARRAY_API from the multiarray
+# submodule as a part of NumPy initialization, therefore it must be importable
+# without a warning.
+_ARRAY_API = multiarray._ARRAY_API
+
+def __getattr__(attr_name):
+    from numpy._core import multiarray
+
+    from ._utils import _raise_warning
+    ret = getattr(multiarray, attr_name, None)
+    if ret is None:
+        raise AttributeError(
+            f"module 'numpy.core.multiarray' has no attribute {attr_name}")
+    _raise_warning(attr_name, "multiarray")
+    return ret
+
+
+del multiarray
diff --git a/local_packages/numpy/core/numeric.py b/local_packages/numpy/core/numeric.py
new file mode 100644
index 0000000..ddd70b3
--- /dev/null
+++ b/local_packages/numpy/core/numeric.py
@@ -0,0 +1,12 @@
+def __getattr__(attr_name):
+    from numpy._core import numeric
+
+    from ._utils import _raise_warning
+
+    sentinel = object()
+    ret = getattr(numeric, attr_name, sentinel)
+    if ret is sentinel:
+        raise AttributeError(
+            f"module 'numpy.core.numeric' has no attribute {attr_name}")
+    _raise_warning(attr_name, "numeric")
+    return ret
diff --git a/local_packages/numpy/core/numerictypes.py b/local_packages/numpy/core/numerictypes.py
new file mode 100644
index 0000000..cf2ad99
--- /dev/null
+++ b/local_packages/numpy/core/numerictypes.py
@@ -0,0 +1,10 @@
+def __getattr__(attr_name):
+    from numpy._core import numerictypes
+
+    from ._utils import _raise_warning
+    ret = getattr(numerictypes, attr_name, None)
+    if ret is None:
+        raise AttributeError(
+            f"module 'numpy.core.numerictypes' has no attribute {attr_name}")
+    _raise_warning(attr_name, "numerictypes")
+    return ret
diff --git a/local_packages/numpy/core/overrides.py b/local_packages/numpy/core/overrides.py
new file mode 100644
index 0000000..17830ed
--- /dev/null
+++ b/local_packages/numpy/core/overrides.py
@@ -0,0 +1,10 @@
+def __getattr__(attr_name):
+    from numpy._core import overrides
+
+    from ._utils import _raise_warning
+    ret = getattr(overrides, attr_name, None)
+    if ret is None:
+        raise AttributeError(
+            f"module 'numpy.core.overrides' has no attribute {attr_name}")
+    _raise_warning(attr_name, "overrides")
+    return ret
diff --git a/local_packages/numpy/core/overrides.pyi b/local_packages/numpy/core/overrides.pyi
new file mode 100644
index 0000000..fab3512
--- /dev/null
+++ b/local_packages/numpy/core/overrides.pyi
@@ -0,0 +1,7 @@
+# NOTE: At runtime, this submodule dynamically re-exports any `numpy._core.overrides`
+# member, and issues a `DeprecationWarning` when accessed. But since there is no
+# `__dir__` or `__all__` present, these annotations would be unverifiable. Because
+# this module is also deprecated in favor of `numpy._core`, and therefore not part of
+# the public API, we omit the "re-exports", which in practice would require literal
+# duplication of the stubs in order for the `@deprecated` decorator to be understood
+# by type-checkers.
diff --git a/local_packages/numpy/core/records.py b/local_packages/numpy/core/records.py
new file mode 100644
index 0000000..0cc4503
--- /dev/null
+++ b/local_packages/numpy/core/records.py
@@ -0,0 +1,10 @@
+def __getattr__(attr_name):
+    from numpy._core import records
+
+    from ._utils import _raise_warning
+    ret = getattr(records, attr_name, None)
+    if ret is None:
+        raise AttributeError(
+            f"module 'numpy.core.records' has no attribute {attr_name}")
+    _raise_warning(attr_name, "records")
+    return ret
diff --git a/local_packages/numpy/core/shape_base.py b/local_packages/numpy/core/shape_base.py
new file mode 100644
index 0000000..9cffce7
--- /dev/null
+++ b/local_packages/numpy/core/shape_base.py
@@ -0,0 +1,10 @@
+def __getattr__(attr_name):
+    from numpy._core import shape_base
+
+    from ._utils import _raise_warning
+    ret = getattr(shape_base, attr_name, None)
+    if ret is None:
+        raise AttributeError(
+            f"module 'numpy.core.shape_base' has no attribute {attr_name}")
+    _raise_warning(attr_name, "shape_base")
+    return ret
diff --git a/local_packages/numpy/core/umath.py b/local_packages/numpy/core/umath.py
new file mode 100644
index 0000000..25a60cc
--- /dev/null
+++ b/local_packages/numpy/core/umath.py
@@ -0,0 +1,10 @@
+def __getattr__(attr_name):
+    from numpy._core import umath
+
+    from ._utils import _raise_warning
+    ret = getattr(umath, attr_name, None)
+    if ret is None:
+        raise AttributeError(
+            f"module 'numpy.core.umath' has no attribute {attr_name}")
+    _raise_warning(attr_name, "umath")
+    return ret
diff --git a/local_packages/numpy/ctypeslib/__init__.py b/local_packages/numpy/ctypeslib/__init__.py
new file mode 100644
index 0000000..fd3c773
--- /dev/null
+++ b/local_packages/numpy/ctypeslib/__init__.py
@@ -0,0 +1,13 @@
+from ._ctypeslib import (
+    __all__,
+    __doc__,
+    _concrete_ndptr,
+    _ndptr,
+    as_array,
+    as_ctypes,
+    as_ctypes_type,
+    c_intp,
+    ctypes,
+    load_library,
+    ndpointer,
+)
diff --git a/local_packages/numpy/ctypeslib/__init__.pyi b/local_packages/numpy/ctypeslib/__init__.pyi
new file mode 100644
index 0000000..f088d02
--- /dev/null
+++ b/local_packages/numpy/ctypeslib/__init__.pyi
@@ -0,0 +1,15 @@
+import ctypes
+from ctypes import c_int64 as _c_intp
+
+from ._ctypeslib import (
+    __all__ as __all__,
+    __doc__ as __doc__,
+    _concrete_ndptr as _concrete_ndptr,
+    _ndptr as _ndptr,
+    as_array as as_array,
+    as_ctypes as as_ctypes,
+    as_ctypes_type as as_ctypes_type,
+    c_intp as c_intp,
+    load_library as load_library,
+    ndpointer as ndpointer,
+)
diff --git a/local_packages/numpy/ctypeslib/_ctypeslib.py b/local_packages/numpy/ctypeslib/_ctypeslib.py
new file mode 100644
index 0000000..9255603
--- /dev/null
+++ b/local_packages/numpy/ctypeslib/_ctypeslib.py
@@ -0,0 +1,603 @@
+"""
+============================
+``ctypes`` Utility Functions
+============================
+
+See Also
+--------
+load_library : Load a C library.
+ndpointer : Array restype/argtype with verification.
+as_ctypes : Create a ctypes array from an ndarray.
+as_array : Create an ndarray from a ctypes array.
+
+References
+----------
+.. [1] "SciPy Cookbook: ctypes", https://scipy-cookbook.readthedocs.io/items/Ctypes.html
+
+Examples
+--------
+Load the C library:
+
+>>> _lib = np.ctypeslib.load_library('libmystuff', '.')     #doctest: +SKIP
+
+Our result type, an ndarray that must be of type double, be 1-dimensional
+and is C-contiguous in memory:
+
+>>> array_1d_double = np.ctypeslib.ndpointer(
+...                          dtype=np.double,
+...                          ndim=1, flags='CONTIGUOUS')    #doctest: +SKIP
+
+Our C-function typically takes an array and updates its values
+in-place.  For example::
+
+    void foo_func(double* x, int length)
+    {
+        int i;
+        for (i = 0; i < length; i++) {
+            x[i] = i*i;
+        }
+    }
+
+We wrap it using:
+
+>>> _lib.foo_func.restype = None                      #doctest: +SKIP
+>>> _lib.foo_func.argtypes = [array_1d_double, c_int] #doctest: +SKIP
+
+Then, we're ready to call ``foo_func``:
+
+>>> out = np.empty(15, dtype=np.double)
+>>> _lib.foo_func(out, len(out))                #doctest: +SKIP
+
+"""
+__all__ = ['load_library', 'ndpointer', 'c_intp', 'as_ctypes', 'as_array',
+           'as_ctypes_type']
+
+import os
+
+import numpy as np
+import numpy._core.multiarray as mu
+from numpy._utils import set_module
+
+try:
+    import ctypes
+except ImportError:
+    ctypes = None
+
+if ctypes is None:
+    @set_module("numpy.ctypeslib")
+    def _dummy(*args, **kwds):
+        """
+        Dummy object that raises an ImportError if ctypes is not available.
+
+        Raises
+        ------
+        ImportError
+            If ctypes is not available.
+
+        """
+        raise ImportError("ctypes is not available.")
+    load_library = _dummy
+    as_ctypes = _dummy
+    as_ctypes_type = _dummy
+    as_array = _dummy
+    ndpointer = _dummy
+    from numpy import intp as c_intp
+    _ndptr_base = object
+else:
+    import numpy._core._internal as nic
+    c_intp = nic._getintp_ctype()
+    del nic
+    _ndptr_base = ctypes.c_void_p
+
+    # Adapted from Albert Strasheim
+    @set_module("numpy.ctypeslib")
+    def load_library(libname, loader_path):
+        """
+        It is possible to load a library using
+
+        >>> lib = ctypes.cdll[] # doctest: +SKIP
+
+        But there are cross-platform considerations, such as library file extensions,
+        plus the fact Windows will just load the first library it finds with that name.
+        NumPy supplies the load_library function as a convenience.
+
+        .. versionchanged:: 1.20.0
+            Allow libname and loader_path to take any
+            :term:`python:path-like object`.
+
+        Parameters
+        ----------
+        libname : path-like
+            Name of the library, which can have 'lib' as a prefix,
+            but without an extension.
+        loader_path : path-like
+            Where the library can be found.
+
+        Returns
+        -------
+        ctypes.cdll[libpath] : library object
+           A ctypes library object
+
+        Raises
+        ------
+        OSError
+            If there is no library with the expected extension, or the
+            library is defective and cannot be loaded.
+        """
+        # Convert path-like objects into strings
+        libname = os.fsdecode(libname)
+        loader_path = os.fsdecode(loader_path)
+
+        ext = os.path.splitext(libname)[1]
+        if not ext:
+            import sys
+            import sysconfig
+            # Try to load library with platform-specific name, otherwise
+            # default to libname.[so|dll|dylib].  Sometimes, these files are
+            # built erroneously on non-linux platforms.
+            base_ext = ".so"
+            if sys.platform.startswith("darwin"):
+                base_ext = ".dylib"
+            elif sys.platform.startswith("win"):
+                base_ext = ".dll"
+            libname_ext = [libname + base_ext]
+            so_ext = sysconfig.get_config_var("EXT_SUFFIX")
+            if not so_ext == base_ext:
+                libname_ext.insert(0, libname + so_ext)
+        else:
+            libname_ext = [libname]
+
+        loader_path = os.path.abspath(loader_path)
+        if not os.path.isdir(loader_path):
+            libdir = os.path.dirname(loader_path)
+        else:
+            libdir = loader_path
+
+        for ln in libname_ext:
+            libpath = os.path.join(libdir, ln)
+            if os.path.exists(libpath):
+                try:
+                    return ctypes.cdll[libpath]
+                except OSError:
+                    # defective lib file
+                    raise
+        # if no successful return in the libname_ext loop:
+        raise OSError("no file with expected extension")
+
+
+def _num_fromflags(flaglist):
+    num = 0
+    for val in flaglist:
+        num += mu._flagdict[val]
+    return num
+
+
+_flagnames = ['C_CONTIGUOUS', 'F_CONTIGUOUS', 'ALIGNED', 'WRITEABLE',
+              'OWNDATA', 'WRITEBACKIFCOPY']
+def _flags_fromnum(num):
+    res = []
+    for key in _flagnames:
+        value = mu._flagdict[key]
+        if (num & value):
+            res.append(key)
+    return res
+
+
+class _ndptr(_ndptr_base):
+    @classmethod
+    def from_param(cls, obj):
+        if not isinstance(obj, np.ndarray):
+            raise TypeError("argument must be an ndarray")
+        if cls._dtype_ is not None \
+               and obj.dtype != cls._dtype_:
+            raise TypeError(f"array must have data type {cls._dtype_}")
+        if cls._ndim_ is not None \
+               and obj.ndim != cls._ndim_:
+            raise TypeError("array must have %d dimension(s)" % cls._ndim_)
+        if cls._shape_ is not None \
+               and obj.shape != cls._shape_:
+            raise TypeError(f"array must have shape {str(cls._shape_)}")
+        if cls._flags_ is not None \
+               and ((obj.flags.num & cls._flags_) != cls._flags_):
+            raise TypeError(f"array must have flags {_flags_fromnum(cls._flags_)}")
+        return obj.ctypes
+
+
+class _concrete_ndptr(_ndptr):
+    """
+    Like _ndptr, but with `_shape_` and `_dtype_` specified.
+
+    Notably, this means the pointer has enough information to reconstruct
+    the array, which is not generally true.
+    """
+    def _check_retval_(self):
+        """
+        This method is called when this class is used as the .restype
+        attribute for a shared-library function, to automatically wrap the
+        pointer into an array.
+        """
+        return self.contents
+
+    @property
+    def contents(self):
+        """
+        Get an ndarray viewing the data pointed to by this pointer.
+
+        This mirrors the `contents` attribute of a normal ctypes pointer
+        """
+        full_dtype = np.dtype((self._dtype_, self._shape_))
+        full_ctype = ctypes.c_char * full_dtype.itemsize
+        buffer = ctypes.cast(self, ctypes.POINTER(full_ctype)).contents
+        return np.frombuffer(buffer, dtype=full_dtype).squeeze(axis=0)
+
+
+# Factory for an array-checking class with from_param defined for
+# use with ctypes argtypes mechanism
+_pointer_type_cache = {}
+
+@set_module("numpy.ctypeslib")
+def ndpointer(dtype=None, ndim=None, shape=None, flags=None):
+    """
+    Array-checking restype/argtypes.
+
+    An ndpointer instance is used to describe an ndarray in restypes
+    and argtypes specifications.  This approach is more flexible than
+    using, for example, ``POINTER(c_double)``, since several restrictions
+    can be specified, which are verified upon calling the ctypes function.
+    These include data type, number of dimensions, shape and flags.  If a
+    given array does not satisfy the specified restrictions,
+    a ``TypeError`` is raised.
+
+    Parameters
+    ----------
+    dtype : data-type, optional
+        Array data-type.
+    ndim : int, optional
+        Number of array dimensions.
+    shape : tuple of ints, optional
+        Array shape.
+    flags : str or tuple of str
+        Array flags; may be one or more of:
+
+        - C_CONTIGUOUS / C / CONTIGUOUS
+        - F_CONTIGUOUS / F / FORTRAN
+        - OWNDATA / O
+        - WRITEABLE / W
+        - ALIGNED / A
+        - WRITEBACKIFCOPY / X
+
+    Returns
+    -------
+    klass : ndpointer type object
+        A type object, which is an ``_ndtpr`` instance containing
+        dtype, ndim, shape and flags information.
+
+    Raises
+    ------
+    TypeError
+        If a given array does not satisfy the specified restrictions.
+
+    Examples
+    --------
+    >>> clib.somefunc.argtypes = [np.ctypeslib.ndpointer(dtype=np.float64,
+    ...                                                  ndim=1,
+    ...                                                  flags='C_CONTIGUOUS')]
+    ... #doctest: +SKIP
+    >>> clib.somefunc(np.array([1, 2, 3], dtype=np.float64))
+    ... #doctest: +SKIP
+
+    """
+
+    # normalize dtype to dtype | None
+    if dtype is not None:
+        dtype = np.dtype(dtype)
+
+    # normalize flags to int | None
+    num = None
+    if flags is not None:
+        if isinstance(flags, str):
+            flags = flags.split(',')
+        elif isinstance(flags, (int, np.integer)):
+            num = flags
+            flags = _flags_fromnum(num)
+        elif isinstance(flags, mu.flagsobj):
+            num = flags.num
+            flags = _flags_fromnum(num)
+        if num is None:
+            try:
+                flags = [x.strip().upper() for x in flags]
+            except Exception as e:
+                raise TypeError("invalid flags specification") from e
+            num = _num_fromflags(flags)
+
+    # normalize shape to tuple | None
+    if shape is not None:
+        try:
+            shape = tuple(shape)
+        except TypeError:
+            # single integer -> 1-tuple
+            shape = (shape,)
+
+    cache_key = (dtype, ndim, shape, num)
+
+    try:
+        return _pointer_type_cache[cache_key]
+    except KeyError:
+        pass
+
+    # produce a name for the new type
+    if dtype is None:
+        name = 'any'
+    elif dtype.names is not None:
+        name = str(id(dtype))
+    else:
+        name = dtype.str
+    if ndim is not None:
+        name += "_%dd" % ndim
+    if shape is not None:
+        name += "_" + "x".join(str(x) for x in shape)
+    if flags is not None:
+        name += "_" + "_".join(flags)
+
+    if dtype is not None and shape is not None:
+        base = _concrete_ndptr
+    else:
+        base = _ndptr
+
+    klass = type(f"ndpointer_{name}", (base,),
+                 {"_dtype_": dtype,
+                  "_shape_": shape,
+                  "_ndim_": ndim,
+                  "_flags_": num})
+    _pointer_type_cache[cache_key] = klass
+    return klass
+
+
+if ctypes is not None:
+    def _ctype_ndarray(element_type, shape):
+        """ Create an ndarray of the given element type and shape """
+        for dim in shape[::-1]:
+            element_type = dim * element_type
+            # prevent the type name include np.ctypeslib
+            element_type.__module__ = None
+        return element_type
+
+    def _get_scalar_type_map():
+        """
+        Return a dictionary mapping native endian scalar dtype to ctypes types
+        """
+        ct = ctypes
+        simple_types = [
+            ct.c_byte, ct.c_short, ct.c_int, ct.c_long, ct.c_longlong,
+            ct.c_ubyte, ct.c_ushort, ct.c_uint, ct.c_ulong, ct.c_ulonglong,
+            ct.c_float, ct.c_double,
+            ct.c_bool,
+        ]
+        return {np.dtype(ctype): ctype for ctype in simple_types}
+
+    _scalar_type_map = _get_scalar_type_map()
+
+    def _ctype_from_dtype_scalar(dtype):
+        # swapping twice ensure that `=` is promoted to <, >, or |
+        dtype_with_endian = dtype.newbyteorder('S').newbyteorder('S')
+        dtype_native = dtype.newbyteorder('=')
+        try:
+            ctype = _scalar_type_map[dtype_native]
+        except KeyError as e:
+            raise NotImplementedError(
+                f"Converting {dtype!r} to a ctypes type"
+            ) from None
+
+        if dtype_with_endian.byteorder == '>':
+            ctype = ctype.__ctype_be__
+        elif dtype_with_endian.byteorder == '<':
+            ctype = ctype.__ctype_le__
+
+        return ctype
+
+    def _ctype_from_dtype_subarray(dtype):
+        element_dtype, shape = dtype.subdtype
+        ctype = _ctype_from_dtype(element_dtype)
+        return _ctype_ndarray(ctype, shape)
+
+    def _ctype_from_dtype_structured(dtype):
+        # extract offsets of each field
+        field_data = []
+        for name in dtype.names:
+            field_dtype, offset = dtype.fields[name][:2]
+            field_data.append((offset, name, _ctype_from_dtype(field_dtype)))
+
+        # ctypes doesn't care about field order
+        field_data = sorted(field_data, key=lambda f: f[0])
+
+        if len(field_data) > 1 and all(offset == 0 for offset, _, _ in field_data):
+            # union, if multiple fields all at address 0
+            size = 0
+            _fields_ = []
+            for offset, name, ctype in field_data:
+                _fields_.append((name, ctype))
+                size = max(size, ctypes.sizeof(ctype))
+
+            # pad to the right size
+            if dtype.itemsize != size:
+                _fields_.append(('', ctypes.c_char * dtype.itemsize))
+
+            # we inserted manual padding, so always `_pack_`
+            return type('union', (ctypes.Union,), {
+                '_fields_': _fields_,
+                '_pack_': 1,
+                '__module__': None,
+            })
+        else:
+            last_offset = 0
+            _fields_ = []
+            for offset, name, ctype in field_data:
+                padding = offset - last_offset
+                if padding < 0:
+                    raise NotImplementedError("Overlapping fields")
+                if padding > 0:
+                    _fields_.append(('', ctypes.c_char * padding))
+
+                _fields_.append((name, ctype))
+                last_offset = offset + ctypes.sizeof(ctype)
+
+            padding = dtype.itemsize - last_offset
+            if padding > 0:
+                _fields_.append(('', ctypes.c_char * padding))
+
+            # we inserted manual padding, so always `_pack_`
+            return type('struct', (ctypes.Structure,), {
+                '_fields_': _fields_,
+                '_pack_': 1,
+                '__module__': None,
+            })
+
+    def _ctype_from_dtype(dtype):
+        if dtype.fields is not None:
+            return _ctype_from_dtype_structured(dtype)
+        elif dtype.subdtype is not None:
+            return _ctype_from_dtype_subarray(dtype)
+        else:
+            return _ctype_from_dtype_scalar(dtype)
+
+    @set_module("numpy.ctypeslib")
+    def as_ctypes_type(dtype):
+        r"""
+        Convert a dtype into a ctypes type.
+
+        Parameters
+        ----------
+        dtype : dtype
+            The dtype to convert
+
+        Returns
+        -------
+        ctype
+            A ctype scalar, union, array, or struct
+
+        Raises
+        ------
+        NotImplementedError
+            If the conversion is not possible
+
+        Notes
+        -----
+        This function does not losslessly round-trip in either direction.
+
+        ``np.dtype(as_ctypes_type(dt))`` will:
+
+        - insert padding fields
+        - reorder fields to be sorted by offset
+        - discard field titles
+
+        ``as_ctypes_type(np.dtype(ctype))`` will:
+
+        - discard the class names of `ctypes.Structure`\ s and
+          `ctypes.Union`\ s
+        - convert single-element `ctypes.Union`\ s into single-element
+          `ctypes.Structure`\ s
+        - insert padding fields
+
+        Examples
+        --------
+        Converting a simple dtype:
+
+        >>> dt = np.dtype('int8')
+        >>> ctype = np.ctypeslib.as_ctypes_type(dt)
+        >>> ctype
+        
+
+        Converting a structured dtype:
+
+        >>> dt = np.dtype([('x', 'i4'), ('y', 'f4')])
+        >>> ctype = np.ctypeslib.as_ctypes_type(dt)
+        >>> ctype
+        
+
+        """
+        return _ctype_from_dtype(np.dtype(dtype))
+
+    @set_module("numpy.ctypeslib")
+    def as_array(obj, shape=None):
+        """
+        Create a numpy array from a ctypes array or POINTER.
+
+        The numpy array shares the memory with the ctypes object.
+
+        The shape parameter must be given if converting from a ctypes POINTER.
+        The shape parameter is ignored if converting from a ctypes array
+
+        Examples
+        --------
+        Converting a ctypes integer array:
+
+        >>> import ctypes
+        >>> ctypes_array = (ctypes.c_int * 5)(0, 1, 2, 3, 4)
+        >>> np_array = np.ctypeslib.as_array(ctypes_array)
+        >>> np_array
+        array([0, 1, 2, 3, 4], dtype=int32)
+
+        Converting a ctypes POINTER:
+
+        >>> import ctypes
+        >>> buffer = (ctypes.c_int * 5)(0, 1, 2, 3, 4)
+        >>> pointer = ctypes.cast(buffer, ctypes.POINTER(ctypes.c_int))
+        >>> np_array = np.ctypeslib.as_array(pointer, (5,))
+        >>> np_array
+        array([0, 1, 2, 3, 4], dtype=int32)
+
+        """
+        if isinstance(obj, ctypes._Pointer):
+            # convert pointers to an array of the desired shape
+            if shape is None:
+                raise TypeError(
+                    'as_array() requires a shape argument when called on a '
+                    'pointer')
+            p_arr_type = ctypes.POINTER(_ctype_ndarray(obj._type_, shape))
+            obj = ctypes.cast(obj, p_arr_type).contents
+
+        return np.asarray(obj)
+
+    @set_module("numpy.ctypeslib")
+    def as_ctypes(obj):
+        """
+        Create and return a ctypes object from a numpy array.  Actually
+        anything that exposes the __array_interface__ is accepted.
+
+        Examples
+        --------
+        Create ctypes object from inferred int ``np.array``:
+
+        >>> inferred_int_array = np.array([1, 2, 3])
+        >>> c_int_array = np.ctypeslib.as_ctypes(inferred_int_array)
+        >>> type(c_int_array)
+        
+        >>> c_int_array[:]
+        [1, 2, 3]
+
+        Create ctypes object from explicit 8 bit unsigned int ``np.array`` :
+
+        >>> exp_int_array = np.array([1, 2, 3], dtype=np.uint8)
+        >>> c_int_array = np.ctypeslib.as_ctypes(exp_int_array)
+        >>> type(c_int_array)
+        
+        >>> c_int_array[:]
+        [1, 2, 3]
+
+        """
+        ai = obj.__array_interface__
+        if ai["strides"]:
+            raise TypeError("strided arrays not supported")
+        if ai["version"] != 3:
+            raise TypeError("only __array_interface__ version 3 supported")
+        addr, readonly = ai["data"]
+        if readonly:
+            raise TypeError("readonly arrays unsupported")
+
+        # can't use `_dtype((ai["typestr"], ai["shape"]))` here, as it overflows
+        # dtype.itemsize (gh-14214)
+        ctype_scalar = as_ctypes_type(ai["typestr"])
+        result_type = _ctype_ndarray(ctype_scalar, ai["shape"])
+        result = result_type.from_address(addr)
+        result.__keep = obj
+        return result
diff --git a/local_packages/numpy/ctypeslib/_ctypeslib.pyi b/local_packages/numpy/ctypeslib/_ctypeslib.pyi
new file mode 100644
index 0000000..8881141
--- /dev/null
+++ b/local_packages/numpy/ctypeslib/_ctypeslib.pyi
@@ -0,0 +1,236 @@
+# NOTE: Numpy's mypy plugin is used for importing the correct
+# platform-specific `ctypes._SimpleCData[int]` sub-type
+import ctypes
+from _typeshed import StrOrBytesPath
+from collections.abc import Iterable, Sequence
+from ctypes import c_int64 as _c_intp
+from typing import Any, ClassVar, Generic, Literal as L, TypeAlias, TypeVar, overload
+
+import numpy as np
+from numpy import (
+    byte,
+    double,
+    dtype,
+    generic,
+    intc,
+    long,
+    longdouble,
+    longlong,
+    ndarray,
+    short,
+    single,
+    ubyte,
+    uintc,
+    ulong,
+    ulonglong,
+    ushort,
+    void,
+)
+from numpy._core._internal import _ctypes
+from numpy._core.multiarray import flagsobj
+from numpy._typing import (
+    DTypeLike,
+    NDArray,
+    _AnyShape,
+    _ArrayLike,
+    _BoolCodes,
+    _ByteCodes,
+    _DoubleCodes,
+    _DTypeLike,
+    _IntCCodes,
+    _LongCodes,
+    _LongDoubleCodes,
+    _LongLongCodes,
+    _ShapeLike,
+    _ShortCodes,
+    _SingleCodes,
+    _UByteCodes,
+    _UIntCCodes,
+    _ULongCodes,
+    _ULongLongCodes,
+    _UShortCodes,
+    _VoidDTypeLike,
+)
+
+__all__ = ["load_library", "ndpointer", "c_intp", "as_ctypes", "as_array", "as_ctypes_type"]
+
+# TODO: Add a proper `_Shape` bound once we've got variadic typevars
+_DTypeT = TypeVar("_DTypeT", bound=dtype)
+_DTypeOptionalT = TypeVar("_DTypeOptionalT", bound=dtype | None)
+_ScalarT = TypeVar("_ScalarT", bound=generic)
+
+_FlagsKind: TypeAlias = L[
+    "C_CONTIGUOUS", "CONTIGUOUS", "C",
+    "F_CONTIGUOUS", "FORTRAN", "F",
+    "ALIGNED", "A",
+    "WRITEABLE", "W",
+    "OWNDATA", "O",
+    "WRITEBACKIFCOPY", "X",
+]
+
+# TODO: Add a shape typevar once we have variadic typevars (PEP 646)
+class _ndptr(ctypes.c_void_p, Generic[_DTypeOptionalT]):
+    # In practice these 4 classvars are defined in the dynamic class
+    # returned by `ndpointer`
+    _dtype_: ClassVar[_DTypeOptionalT]
+    _shape_: ClassVar[_AnyShape | None]
+    _ndim_: ClassVar[int | None]
+    _flags_: ClassVar[list[_FlagsKind] | None]
+
+    @overload  # type: ignore[override]
+    @classmethod
+    def from_param(cls: type[_ndptr[None]], obj: NDArray[Any]) -> _ctypes[Any]: ...
+    @overload
+    @classmethod
+    def from_param(cls: type[_ndptr[_DTypeT]], obj: ndarray[Any, _DTypeT]) -> _ctypes[Any]: ...
+
+class _concrete_ndptr(_ndptr[_DTypeT]):
+    _dtype_: ClassVar[_DTypeT]
+    _shape_: ClassVar[_AnyShape]
+    @property
+    def contents(self) -> ndarray[_AnyShape, _DTypeT]: ...
+
+def load_library(libname: StrOrBytesPath, loader_path: StrOrBytesPath) -> ctypes.CDLL: ...
+
+c_intp = _c_intp
+
+@overload
+def ndpointer(
+    dtype: None = None,
+    ndim: int | None = None,
+    shape: _ShapeLike | None = None,
+    flags: _FlagsKind | Iterable[_FlagsKind] | int | flagsobj | None = None,
+) -> type[_ndptr[None]]: ...
+@overload
+def ndpointer(
+    dtype: _DTypeLike[_ScalarT],
+    ndim: int | None = None,
+    *,
+    shape: _ShapeLike,
+    flags: _FlagsKind | Iterable[_FlagsKind] | int | flagsobj | None = None,
+) -> type[_concrete_ndptr[dtype[_ScalarT]]]: ...
+@overload
+def ndpointer(
+    dtype: DTypeLike | None,
+    ndim: int | None = None,
+    *,
+    shape: _ShapeLike,
+    flags: _FlagsKind | Iterable[_FlagsKind] | int | flagsobj | None = None,
+) -> type[_concrete_ndptr[dtype]]: ...
+@overload
+def ndpointer(
+    dtype: _DTypeLike[_ScalarT],
+    ndim: int | None = None,
+    shape: None = None,
+    flags: _FlagsKind | Iterable[_FlagsKind] | int | flagsobj | None = None,
+) -> type[_ndptr[dtype[_ScalarT]]]: ...
+@overload
+def ndpointer(
+    dtype: DTypeLike | None,
+    ndim: int | None = None,
+    shape: None = None,
+    flags: _FlagsKind | Iterable[_FlagsKind] | int | flagsobj | None = None,
+) -> type[_ndptr[dtype]]: ...
+
+@overload
+def as_ctypes_type(dtype: _BoolCodes | _DTypeLike[np.bool] | type[ctypes.c_bool]) -> type[ctypes.c_bool]: ...
+@overload
+def as_ctypes_type(dtype: _ByteCodes | _DTypeLike[byte] | type[ctypes.c_byte]) -> type[ctypes.c_byte]: ...
+@overload
+def as_ctypes_type(dtype: _ShortCodes | _DTypeLike[short] | type[ctypes.c_short]) -> type[ctypes.c_short]: ...
+@overload
+def as_ctypes_type(dtype: _IntCCodes | _DTypeLike[intc] | type[ctypes.c_int]) -> type[ctypes.c_int]: ...
+@overload
+def as_ctypes_type(dtype: _LongCodes | _DTypeLike[long] | type[ctypes.c_long]) -> type[ctypes.c_long]: ...
+@overload
+def as_ctypes_type(dtype: type[int]) -> type[c_intp]: ...
+@overload
+def as_ctypes_type(dtype: _LongLongCodes | _DTypeLike[longlong] | type[ctypes.c_longlong]) -> type[ctypes.c_longlong]: ...
+@overload
+def as_ctypes_type(dtype: _UByteCodes | _DTypeLike[ubyte] | type[ctypes.c_ubyte]) -> type[ctypes.c_ubyte]: ...
+@overload
+def as_ctypes_type(dtype: _UShortCodes | _DTypeLike[ushort] | type[ctypes.c_ushort]) -> type[ctypes.c_ushort]: ...
+@overload
+def as_ctypes_type(dtype: _UIntCCodes | _DTypeLike[uintc] | type[ctypes.c_uint]) -> type[ctypes.c_uint]: ...
+@overload
+def as_ctypes_type(dtype: _ULongCodes | _DTypeLike[ulong] | type[ctypes.c_ulong]) -> type[ctypes.c_ulong]: ...
+@overload
+def as_ctypes_type(dtype: _ULongLongCodes | _DTypeLike[ulonglong] | type[ctypes.c_ulonglong]) -> type[ctypes.c_ulonglong]: ...
+@overload
+def as_ctypes_type(dtype: _SingleCodes | _DTypeLike[single] | type[ctypes.c_float]) -> type[ctypes.c_float]: ...
+@overload
+def as_ctypes_type(dtype: _DoubleCodes | _DTypeLike[double] | type[float | ctypes.c_double]) -> type[ctypes.c_double]: ...
+@overload
+def as_ctypes_type(dtype: _LongDoubleCodes | _DTypeLike[longdouble] | type[ctypes.c_longdouble]) -> type[ctypes.c_longdouble]: ...
+@overload
+def as_ctypes_type(dtype: _VoidDTypeLike) -> type[Any]: ...  # `ctypes.Union` or `ctypes.Structure`
+@overload
+def as_ctypes_type(dtype: str) -> type[Any]: ...
+
+@overload
+def as_array(obj: ctypes._PointerLike, shape: Sequence[int]) -> NDArray[Any]: ...
+@overload
+def as_array(obj: _ArrayLike[_ScalarT], shape: _ShapeLike | None = None) -> NDArray[_ScalarT]: ...
+@overload
+def as_array(obj: object, shape: _ShapeLike | None = None) -> NDArray[Any]: ...
+
+@overload
+def as_ctypes(obj: np.bool) -> ctypes.c_bool: ...
+@overload
+def as_ctypes(obj: byte) -> ctypes.c_byte: ...
+@overload
+def as_ctypes(obj: short) -> ctypes.c_short: ...
+@overload
+def as_ctypes(obj: intc) -> ctypes.c_int: ...
+@overload
+def as_ctypes(obj: long) -> ctypes.c_long: ...
+@overload
+def as_ctypes(obj: longlong) -> ctypes.c_longlong: ...  # type: ignore[overload-cannot-match]
+@overload
+def as_ctypes(obj: ubyte) -> ctypes.c_ubyte: ...
+@overload
+def as_ctypes(obj: ushort) -> ctypes.c_ushort: ...
+@overload
+def as_ctypes(obj: uintc) -> ctypes.c_uint: ...
+@overload
+def as_ctypes(obj: ulong) -> ctypes.c_ulong: ...
+@overload
+def as_ctypes(obj: ulonglong) -> ctypes.c_ulonglong: ...  # type: ignore[overload-cannot-match]
+@overload
+def as_ctypes(obj: single) -> ctypes.c_float: ...
+@overload
+def as_ctypes(obj: double) -> ctypes.c_double: ...
+@overload
+def as_ctypes(obj: longdouble) -> ctypes.c_longdouble: ...
+@overload
+def as_ctypes(obj: void) -> Any: ...  # `ctypes.Union` or `ctypes.Structure`
+@overload
+def as_ctypes(obj: NDArray[np.bool]) -> ctypes.Array[ctypes.c_bool]: ...
+@overload
+def as_ctypes(obj: NDArray[byte]) -> ctypes.Array[ctypes.c_byte]: ...
+@overload
+def as_ctypes(obj: NDArray[short]) -> ctypes.Array[ctypes.c_short]: ...
+@overload
+def as_ctypes(obj: NDArray[intc]) -> ctypes.Array[ctypes.c_int]: ...
+@overload
+def as_ctypes(obj: NDArray[long]) -> ctypes.Array[ctypes.c_long]: ...
+@overload
+def as_ctypes(obj: NDArray[longlong]) -> ctypes.Array[ctypes.c_longlong]: ...  # type: ignore[overload-cannot-match]
+@overload
+def as_ctypes(obj: NDArray[ubyte]) -> ctypes.Array[ctypes.c_ubyte]: ...
+@overload
+def as_ctypes(obj: NDArray[ushort]) -> ctypes.Array[ctypes.c_ushort]: ...
+@overload
+def as_ctypes(obj: NDArray[uintc]) -> ctypes.Array[ctypes.c_uint]: ...
+@overload
+def as_ctypes(obj: NDArray[ulong]) -> ctypes.Array[ctypes.c_ulong]: ...
+@overload
+def as_ctypes(obj: NDArray[ulonglong]) -> ctypes.Array[ctypes.c_ulonglong]: ...  # type: ignore[overload-cannot-match]
+@overload
+def as_ctypes(obj: NDArray[single]) -> ctypes.Array[ctypes.c_float]: ...
+@overload
+def as_ctypes(obj: NDArray[double]) -> ctypes.Array[ctypes.c_double]: ...
+@overload
+def as_ctypes(obj: NDArray[longdouble]) -> ctypes.Array[ctypes.c_longdouble]: ...
+@overload
+def as_ctypes(obj: NDArray[void]) -> ctypes.Array[Any]: ...  # `ctypes.Union` or `ctypes.Structure`
diff --git a/local_packages/numpy/doc/ufuncs.py b/local_packages/numpy/doc/ufuncs.py
new file mode 100644
index 0000000..7324168
--- /dev/null
+++ b/local_packages/numpy/doc/ufuncs.py
@@ -0,0 +1,138 @@
+"""
+===================
+Universal Functions
+===================
+
+Ufuncs are, generally speaking, mathematical functions or operations that are
+applied element-by-element to the contents of an array. That is, the result
+in each output array element only depends on the value in the corresponding
+input array (or arrays) and on no other array elements. NumPy comes with a
+large suite of ufuncs, and scipy extends that suite substantially. The simplest
+example is the addition operator: ::
+
+ >>> np.array([0,2,3,4]) + np.array([1,1,-1,2])
+ array([1, 3, 2, 6])
+
+The ufunc module lists all the available ufuncs in numpy. Documentation on
+the specific ufuncs may be found in those modules. This documentation is
+intended to address the more general aspects of ufuncs common to most of
+them. All of the ufuncs that make use of Python operators (e.g., +, -, etc.)
+have equivalent functions defined (e.g. add() for +)
+
+Type coercion
+=============
+
+What happens when a binary operator (e.g., +,-,\\*,/, etc) deals with arrays of
+two different types? What is the type of the result? Typically, the result is
+the higher of the two types. For example: ::
+
+ float32 + float64 -> float64
+ int8 + int32 -> int32
+ int16 + float32 -> float32
+ float32 + complex64 -> complex64
+
+There are some less obvious cases generally involving mixes of types
+(e.g. uints, ints and floats) where equal bit sizes for each are not
+capable of saving all the information in a different type of equivalent
+bit size. Some examples are int32 vs float32 or uint32 vs int32.
+Generally, the result is the higher type of larger size than both
+(if available). So: ::
+
+ int32 + float32 -> float64
+ uint32 + int32 -> int64
+
+Finally, the type coercion behavior when expressions involve Python
+scalars is different than that seen for arrays. Since Python has a
+limited number of types, combining a Python int with a dtype=np.int8
+array does not coerce to the higher type but instead, the type of the
+array prevails. So the rules for Python scalars combined with arrays is
+that the result will be that of the array equivalent the Python scalar
+if the Python scalar is of a higher 'kind' than the array (e.g., float
+vs. int), otherwise the resultant type will be that of the array.
+For example: ::
+
+  Python int + int8 -> int8
+  Python float + int8 -> float64
+
+ufunc methods
+=============
+
+Binary ufuncs support 4 methods.
+
+**.reduce(arr)** applies the binary operator to elements of the array in
+  sequence. For example: ::
+
+ >>> np.add.reduce(np.arange(10))  # adds all elements of array
+ 45
+
+For multidimensional arrays, the first dimension is reduced by default: ::
+
+ >>> np.add.reduce(np.arange(10).reshape(2,5))
+     array([ 5,  7,  9, 11, 13])
+
+The axis keyword can be used to specify different axes to reduce: ::
+
+ >>> np.add.reduce(np.arange(10).reshape(2,5),axis=1)
+ array([10, 35])
+
+**.accumulate(arr)** applies the binary operator and generates an
+equivalently shaped array that includes the accumulated amount for each
+element of the array. A couple examples: ::
+
+ >>> np.add.accumulate(np.arange(10))
+ array([ 0,  1,  3,  6, 10, 15, 21, 28, 36, 45])
+ >>> np.multiply.accumulate(np.arange(1,9))
+ array([    1,     2,     6,    24,   120,   720,  5040, 40320])
+
+The behavior for multidimensional arrays is the same as for .reduce(),
+as is the use of the axis keyword).
+
+**.reduceat(arr,indices)** allows one to apply reduce to selected parts
+  of an array. It is a difficult method to understand. See the documentation
+  at:
+
+**.outer(arr1,arr2)** generates an outer operation on the two arrays arr1 and
+  arr2. It will work on multidimensional arrays (the shape of the result is
+  the concatenation of the two input shapes.: ::
+
+ >>> np.multiply.outer(np.arange(3),np.arange(4))
+ array([[0, 0, 0, 0],
+        [0, 1, 2, 3],
+        [0, 2, 4, 6]])
+
+Output arguments
+================
+
+All ufuncs accept an optional output array. The array must be of the expected
+output shape. Beware that if the type of the output array is of a different
+(and lower) type than the output result, the results may be silently truncated
+or otherwise corrupted in the downcast to the lower type. This usage is useful
+when one wants to avoid creating large temporary arrays and instead allows one
+to reuse the same array memory repeatedly (at the expense of not being able to
+use more convenient operator notation in expressions). Note that when the
+output argument is used, the ufunc still returns a reference to the result.
+
+ >>> x = np.arange(2)
+ >>> np.add(np.arange(2, dtype=float), np.arange(2, dtype=float), x,
+ ...        casting='unsafe')
+ array([0, 2])
+ >>> x
+ array([0, 2])
+
+and & or as ufuncs
+==================
+
+Invariably people try to use the python 'and' and 'or' as logical operators
+(and quite understandably). But these operators do not behave as normal
+operators since Python treats these quite differently. They cannot be
+overloaded with array equivalents. Thus using 'and' or 'or' with an array
+results in an error. There are two alternatives:
+
+ 1) use the ufunc functions logical_and() and logical_or().
+ 2) use the bitwise operators & and \\|. The drawback of these is that if
+    the arguments to these operators are not boolean arrays, the result is
+    likely incorrect. On the other hand, most usages of logical_and and
+    logical_or are with boolean arrays. As long as one is careful, this is
+    a convenient way to apply these operators.
+
+"""
diff --git a/local_packages/numpy/dtypes.py b/local_packages/numpy/dtypes.py
new file mode 100644
index 0000000..550a29e
--- /dev/null
+++ b/local_packages/numpy/dtypes.py
@@ -0,0 +1,41 @@
+"""
+This module is home to specific dtypes related functionality and their classes.
+For more general information about dtypes, also see `numpy.dtype` and
+:ref:`arrays.dtypes`.
+
+Similar to the builtin ``types`` module, this submodule defines types (classes)
+that are not widely used directly.
+
+.. versionadded:: NumPy 1.25
+
+    The dtypes module is new in NumPy 1.25.  Previously DType classes were
+    only accessible indirectly.
+
+
+DType classes
+-------------
+
+The following are the classes of the corresponding NumPy dtype instances and
+NumPy scalar types.  The classes can be used in ``isinstance`` checks and can
+also be instantiated or used directly.  Direct use of these classes is not
+typical, since their scalar counterparts (e.g. ``np.float64``) or strings
+like ``"float64"`` can be used.
+"""
+
+# See doc/source/reference/routines.dtypes.rst for module-level docs
+
+__all__ = []
+
+
+def _add_dtype_helper(DType, alias):
+    # Function to add DTypes a bit more conveniently without channeling them
+    # through `numpy._core._multiarray_umath` namespace or similar.
+    from numpy import dtypes
+
+    setattr(dtypes, DType.__name__, DType)
+    __all__.append(DType.__name__)
+
+    if alias:
+        alias = alias.removeprefix("numpy.dtypes.")
+        setattr(dtypes, alias, DType)
+        __all__.append(alias)
diff --git a/local_packages/numpy/dtypes.pyi b/local_packages/numpy/dtypes.pyi
new file mode 100644
index 0000000..3e34113
--- /dev/null
+++ b/local_packages/numpy/dtypes.pyi
@@ -0,0 +1,630 @@
+# ruff: noqa: ANN401
+from typing import (
+    Any,
+    Generic,
+    Literal as L,
+    LiteralString,
+    Never,
+    NoReturn,
+    Self,
+    TypeAlias,
+    final,
+    overload,
+    type_check_only,
+)
+from typing_extensions import TypeVar
+
+import numpy as np
+
+__all__ = [  # noqa: RUF022
+    "BoolDType",
+    "Int8DType",
+    "ByteDType",
+    "UInt8DType",
+    "UByteDType",
+    "Int16DType",
+    "ShortDType",
+    "UInt16DType",
+    "UShortDType",
+    "Int32DType",
+    "IntDType",
+    "UInt32DType",
+    "UIntDType",
+    "Int64DType",
+    "LongDType",
+    "UInt64DType",
+    "ULongDType",
+    "LongLongDType",
+    "ULongLongDType",
+    "Float16DType",
+    "Float32DType",
+    "Float64DType",
+    "LongDoubleDType",
+    "Complex64DType",
+    "Complex128DType",
+    "CLongDoubleDType",
+    "ObjectDType",
+    "BytesDType",
+    "StrDType",
+    "VoidDType",
+    "DateTime64DType",
+    "TimeDelta64DType",
+    "StringDType",
+]
+
+# Helper base classes (typing-only)
+
+_ScalarT_co = TypeVar("_ScalarT_co", bound=np.generic, covariant=True)
+
+@type_check_only
+class _SimpleDType(np.dtype[_ScalarT_co], Generic[_ScalarT_co]):  # type: ignore[misc]  # pyright: ignore[reportGeneralTypeIssues]
+    names: None  # pyright: ignore[reportIncompatibleVariableOverride]
+    def __new__(cls, /) -> Self: ...
+    def __getitem__(self, key: Any, /) -> NoReturn: ...
+    @property
+    def base(self) -> np.dtype[_ScalarT_co]: ...
+    @property
+    def fields(self) -> None: ...
+    @property
+    def isalignedstruct(self) -> L[False]: ...
+    @property
+    def isnative(self) -> L[True]: ...
+    @property
+    def ndim(self) -> L[0]: ...
+    @property
+    def shape(self) -> tuple[()]: ...
+    @property
+    def subdtype(self) -> None: ...
+
+@type_check_only
+class _LiteralDType(_SimpleDType[_ScalarT_co], Generic[_ScalarT_co]):  # type: ignore[misc]
+    @property
+    def flags(self) -> L[0]: ...
+    @property
+    def hasobject(self) -> L[False]: ...
+
+# Helper mixins (typing-only):
+
+_KindT_co = TypeVar("_KindT_co", bound=LiteralString, covariant=True)
+_CharT_co = TypeVar("_CharT_co", bound=LiteralString, covariant=True)
+_NumT_co = TypeVar("_NumT_co", bound=int, covariant=True)
+
+@type_check_only
+class _TypeCodes(Generic[_KindT_co, _CharT_co, _NumT_co]):
+    @final
+    @property
+    def kind(self) -> _KindT_co: ...
+    @final
+    @property
+    def char(self) -> _CharT_co: ...
+    @final
+    @property
+    def num(self) -> _NumT_co: ...
+
+@type_check_only
+class _NoOrder:
+    @final
+    @property
+    def byteorder(self) -> L["|"]: ...
+
+@type_check_only
+class _NativeOrder:
+    @final
+    @property
+    def byteorder(self) -> L["="]: ...
+
+_DataSize_co = TypeVar("_DataSize_co", bound=int, covariant=True)
+_ItemSize_co = TypeVar("_ItemSize_co", bound=int, covariant=True, default=int)
+
+@type_check_only
+class _NBit(Generic[_DataSize_co, _ItemSize_co]):
+    @final
+    @property
+    def alignment(self) -> _DataSize_co: ...
+    @final
+    @property
+    def itemsize(self) -> _ItemSize_co: ...
+
+@type_check_only
+class _8Bit(_NoOrder, _NBit[L[1], L[1]]): ...
+
+# Boolean:
+
+@final
+class BoolDType(  # type: ignore[misc]
+    _TypeCodes[L["b"], L["?"], L[0]],
+    _8Bit,
+    _LiteralDType[np.bool],
+):
+    @property
+    def name(self) -> L["bool"]: ...
+    @property
+    def str(self) -> L["|b1"]: ...
+
+# Sized integers:
+
+@final
+class Int8DType(  # type: ignore[misc]
+    _TypeCodes[L["i"], L["b"], L[1]],
+    _8Bit,
+    _LiteralDType[np.int8],
+):
+    @property
+    def name(self) -> L["int8"]: ...
+    @property
+    def str(self) -> L["|i1"]: ...
+
+@final
+class UInt8DType(  # type: ignore[misc]
+    _TypeCodes[L["u"], L["B"], L[2]],
+    _8Bit,
+    _LiteralDType[np.uint8],
+):
+    @property
+    def name(self) -> L["uint8"]: ...
+    @property
+    def str(self) -> L["|u1"]: ...
+
+@final
+class Int16DType(  # type: ignore[misc]
+    _TypeCodes[L["i"], L["h"], L[3]],
+    _NativeOrder,
+    _NBit[L[2], L[2]],
+    _LiteralDType[np.int16],
+):
+    @property
+    def name(self) -> L["int16"]: ...
+    @property
+    def str(self) -> L["i2"]: ...
+
+@final
+class UInt16DType(  # type: ignore[misc]
+    _TypeCodes[L["u"], L["H"], L[4]],
+    _NativeOrder,
+    _NBit[L[2], L[2]],
+    _LiteralDType[np.uint16],
+):
+    @property
+    def name(self) -> L["uint16"]: ...
+    @property
+    def str(self) -> L["u2"]: ...
+
+@final
+class Int32DType(  # type: ignore[misc]
+    _TypeCodes[L["i"], L["i", "l"], L[5, 7]],
+    _NativeOrder,
+    _NBit[L[4], L[4]],
+    _LiteralDType[np.int32],
+):
+    @property
+    def name(self) -> L["int32"]: ...
+    @property
+    def str(self) -> L["i4"]: ...
+
+@final
+class UInt32DType(  # type: ignore[misc]
+    _TypeCodes[L["u"], L["I", "L"], L[6, 8]],
+    _NativeOrder,
+    _NBit[L[4], L[4]],
+    _LiteralDType[np.uint32],
+):
+    @property
+    def name(self) -> L["uint32"]: ...
+    @property
+    def str(self) -> L["u4"]: ...
+
+@final
+class Int64DType(  # type: ignore[misc]
+    _TypeCodes[L["i"], L["l", "q"], L[7, 9]],
+    _NativeOrder,
+    _NBit[L[8], L[8]],
+    _LiteralDType[np.int64],
+):
+    @property
+    def name(self) -> L["int64"]: ...
+    @property
+    def str(self) -> L["i8"]: ...
+
+@final
+class UInt64DType(  # type: ignore[misc]
+    _TypeCodes[L["u"], L["L", "Q"], L[8, 10]],
+    _NativeOrder,
+    _NBit[L[8], L[8]],
+    _LiteralDType[np.uint64],
+):
+    @property
+    def name(self) -> L["uint64"]: ...
+    @property
+    def str(self) -> L["u8"]: ...
+
+# Standard C-named version/alias:
+# NOTE: Don't make these `Final`: it will break stubtest
+ByteDType = Int8DType
+UByteDType = UInt8DType
+ShortDType = Int16DType
+UShortDType = UInt16DType
+
+@final
+class IntDType(  # type: ignore[misc]
+    _TypeCodes[L["i"], L["i"], L[5]],
+    _NativeOrder,
+    _NBit[L[4], L[4]],
+    _LiteralDType[np.intc],
+):
+    @property
+    def name(self) -> L["int32"]: ...
+    @property
+    def str(self) -> L["i4"]: ...
+
+@final
+class UIntDType(  # type: ignore[misc]
+    _TypeCodes[L["u"], L["I"], L[6]],
+    _NativeOrder,
+    _NBit[L[4], L[4]],
+    _LiteralDType[np.uintc],
+):
+    @property
+    def name(self) -> L["uint32"]: ...
+    @property
+    def str(self) -> L["u4"]: ...
+
+@final
+class LongDType(  # type: ignore[misc]
+    _TypeCodes[L["i"], L["l"], L[7]],
+    _NativeOrder,
+    _NBit[L[4, 8], L[4, 8]],
+    _LiteralDType[np.long],
+):
+    @property
+    def name(self) -> L["int32", "int64"]: ...
+    @property
+    def str(self) -> L["i4", "i8"]: ...
+
+@final
+class ULongDType(  # type: ignore[misc]
+    _TypeCodes[L["u"], L["L"], L[8]],
+    _NativeOrder,
+    _NBit[L[4, 8], L[4, 8]],
+    _LiteralDType[np.ulong],
+):
+    @property
+    def name(self) -> L["uint32", "uint64"]: ...
+    @property
+    def str(self) -> L["u4", "u8"]: ...
+
+@final
+class LongLongDType(  # type: ignore[misc]
+    _TypeCodes[L["i"], L["q"], L[9]],
+    _NativeOrder,
+    _NBit[L[8], L[8]],
+    _LiteralDType[np.longlong],
+):
+    @property
+    def name(self) -> L["int64"]: ...
+    @property
+    def str(self) -> L["i8"]: ...
+
+@final
+class ULongLongDType(  # type: ignore[misc]
+    _TypeCodes[L["u"], L["Q"], L[10]],
+    _NativeOrder,
+    _NBit[L[8], L[8]],
+    _LiteralDType[np.ulonglong],
+):
+    @property
+    def name(self) -> L["uint64"]: ...
+    @property
+    def str(self) -> L["u8"]: ...
+
+# Floats:
+
+@final
+class Float16DType(  # type: ignore[misc]
+    _TypeCodes[L["f"], L["e"], L[23]],
+    _NativeOrder,
+    _NBit[L[2], L[2]],
+    _LiteralDType[np.float16],
+):
+    @property
+    def name(self) -> L["float16"]: ...
+    @property
+    def str(self) -> L["f2"]: ...
+
+@final
+class Float32DType(  # type: ignore[misc]
+    _TypeCodes[L["f"], L["f"], L[11]],
+    _NativeOrder,
+    _NBit[L[4], L[4]],
+    _LiteralDType[np.float32],
+):
+    @property
+    def name(self) -> L["float32"]: ...
+    @property
+    def str(self) -> L["f4"]: ...
+
+@final
+class Float64DType(  # type: ignore[misc]
+    _TypeCodes[L["f"], L["d"], L[12]],
+    _NativeOrder,
+    _NBit[L[8], L[8]],
+    _LiteralDType[np.float64],
+):
+    @property
+    def name(self) -> L["float64"]: ...
+    @property
+    def str(self) -> L["f8"]: ...
+
+@final
+class LongDoubleDType(  # type: ignore[misc]
+    _TypeCodes[L["f"], L["g"], L[13]],
+    _NativeOrder,
+    _NBit[L[8, 12, 16], L[8, 12, 16]],
+    _LiteralDType[np.longdouble],
+):
+    @property
+    def name(self) -> L["float64", "float96", "float128"]: ...
+    @property
+    def str(self) -> L["f8", "f12", "f16"]: ...
+
+# Complex:
+
+@final
+class Complex64DType(  # type: ignore[misc]
+    _TypeCodes[L["c"], L["F"], L[14]],
+    _NativeOrder,
+    _NBit[L[4], L[8]],
+    _LiteralDType[np.complex64],
+):
+    @property
+    def name(self) -> L["complex64"]: ...
+    @property
+    def str(self) -> L["c8"]: ...
+
+@final
+class Complex128DType(  # type: ignore[misc]
+    _TypeCodes[L["c"], L["D"], L[15]],
+    _NativeOrder,
+    _NBit[L[8], L[16]],
+    _LiteralDType[np.complex128],
+):
+    @property
+    def name(self) -> L["complex128"]: ...
+    @property
+    def str(self) -> L["c16"]: ...
+
+@final
+class CLongDoubleDType(  # type: ignore[misc]
+    _TypeCodes[L["c"], L["G"], L[16]],
+    _NativeOrder,
+    _NBit[L[8, 12, 16], L[16, 24, 32]],
+    _LiteralDType[np.clongdouble],
+):
+    @property
+    def name(self) -> L["complex128", "complex192", "complex256"]: ...
+    @property
+    def str(self) -> L["c16", "c24", "c32"]: ...
+
+# Python objects:
+
+@final
+class ObjectDType(  # type: ignore[misc]
+    _TypeCodes[L["O"], L["O"], L[17]],
+    _NoOrder,
+    _NBit[L[8], L[8]],
+    _SimpleDType[np.object_],
+):
+    @property
+    def hasobject(self) -> L[True]: ...
+    @property
+    def name(self) -> L["object"]: ...
+    @property
+    def str(self) -> L["|O"]: ...
+
+# Flexible:
+
+@final
+class BytesDType(  # type: ignore[misc]
+    _TypeCodes[L["S"], L["S"], L[18]],
+    _NoOrder,
+    _NBit[L[1], _ItemSize_co],
+    _SimpleDType[np.bytes_],
+    Generic[_ItemSize_co],
+):
+    def __new__(cls, size: _ItemSize_co, /) -> BytesDType[_ItemSize_co]: ...
+    @property
+    def hasobject(self) -> L[False]: ...
+    @property
+    def name(self) -> LiteralString: ...
+    @property
+    def str(self) -> LiteralString: ...
+
+@final
+class StrDType(  # type: ignore[misc]
+    _TypeCodes[L["U"], L["U"], L[19]],
+    _NativeOrder,
+    _NBit[L[4], _ItemSize_co],
+    _SimpleDType[np.str_],
+    Generic[_ItemSize_co],
+):
+    def __new__(cls, size: _ItemSize_co, /) -> StrDType[_ItemSize_co]: ...
+    @property
+    def hasobject(self) -> L[False]: ...
+    @property
+    def name(self) -> LiteralString: ...
+    @property
+    def str(self) -> LiteralString: ...
+
+@final
+class VoidDType(  # type: ignore[misc]
+    _TypeCodes[L["V"], L["V"], L[20]],
+    _NoOrder,
+    _NBit[L[1], _ItemSize_co],
+    np.dtype[np.void],  # pyright: ignore[reportGeneralTypeIssues]
+    Generic[_ItemSize_co],
+):
+    # NOTE: `VoidDType(...)` raises a `TypeError` at the moment
+    def __new__(cls, length: _ItemSize_co, /) -> NoReturn: ...
+    @property
+    def base(self) -> Self: ...
+    @property
+    def isalignedstruct(self) -> L[False]: ...
+    @property
+    def isnative(self) -> L[True]: ...
+    @property
+    def ndim(self) -> L[0]: ...
+    @property
+    def shape(self) -> tuple[()]: ...
+    @property
+    def subdtype(self) -> None: ...
+    @property
+    def name(self) -> LiteralString: ...
+    @property
+    def str(self) -> LiteralString: ...
+
+# Other:
+
+_DateUnit: TypeAlias = L["Y", "M", "W", "D"]
+_TimeUnit: TypeAlias = L["h", "m", "s", "ms", "us", "ns", "ps", "fs", "as"]
+_DateTimeUnit: TypeAlias = _DateUnit | _TimeUnit
+
+@final
+class DateTime64DType(  # type: ignore[misc]
+    _TypeCodes[L["M"], L["M"], L[21]],
+    _NativeOrder,
+    _NBit[L[8], L[8]],
+    _LiteralDType[np.datetime64],
+):
+    # NOTE: `DateTime64DType(...)` raises a `TypeError` at the moment
+    # TODO: Once implemented, don't forget the`unit: L["μs"]` overload.
+    def __new__(cls, unit: _DateTimeUnit, /) -> NoReturn: ...
+    @property
+    def name(self) -> L[
+        "datetime64",
+        "datetime64[Y]",
+        "datetime64[M]",
+        "datetime64[W]",
+        "datetime64[D]",
+        "datetime64[h]",
+        "datetime64[m]",
+        "datetime64[s]",
+        "datetime64[ms]",
+        "datetime64[us]",
+        "datetime64[ns]",
+        "datetime64[ps]",
+        "datetime64[fs]",
+        "datetime64[as]",
+    ]: ...
+    @property
+    def str(self) -> L[
+        "M8",
+        "M8[Y]",
+        "M8[M]",
+        "M8[W]",
+        "M8[D]",
+        "M8[h]",
+        "M8[m]",
+        "M8[s]",
+        "M8[ms]",
+        "M8[us]",
+        "M8[ns]",
+        "M8[ps]",
+        "M8[fs]",
+        "M8[as]",
+    ]: ...
+
+@final
+class TimeDelta64DType(  # type: ignore[misc]
+    _TypeCodes[L["m"], L["m"], L[22]],
+    _NativeOrder,
+    _NBit[L[8], L[8]],
+    _LiteralDType[np.timedelta64],
+):
+    # NOTE: `TimeDelta64DType(...)` raises a `TypeError` at the moment
+    # TODO: Once implemented, don't forget to overload on `unit: L["μs"]`.
+    def __new__(cls, unit: _DateTimeUnit, /) -> NoReturn: ...
+    @property
+    def name(self) -> L[
+        "timedelta64",
+        "timedelta64[Y]",
+        "timedelta64[M]",
+        "timedelta64[W]",
+        "timedelta64[D]",
+        "timedelta64[h]",
+        "timedelta64[m]",
+        "timedelta64[s]",
+        "timedelta64[ms]",
+        "timedelta64[us]",
+        "timedelta64[ns]",
+        "timedelta64[ps]",
+        "timedelta64[fs]",
+        "timedelta64[as]",
+    ]: ...
+    @property
+    def str(self) -> L[
+        "m8",
+        "m8[Y]",
+        "m8[M]",
+        "m8[W]",
+        "m8[D]",
+        "m8[h]",
+        "m8[m]",
+        "m8[s]",
+        "m8[ms]",
+        "m8[us]",
+        "m8[ns]",
+        "m8[ps]",
+        "m8[fs]",
+        "m8[as]",
+    ]: ...
+
+_NaObjectT_co = TypeVar("_NaObjectT_co", default=Never, covariant=True)
+
+@final
+class StringDType(  # type: ignore[misc]
+    _TypeCodes[L["T"], L["T"], L[2056]],
+    _NativeOrder,
+    _NBit[L[8], L[16]],
+    # TODO(jorenham): change once we have a string scalar type:
+    # https://github.com/numpy/numpy/issues/28165
+    np.dtype[str],  # type: ignore[type-var]  # pyright: ignore[reportGeneralTypeIssues, reportInvalidTypeArguments]
+    Generic[_NaObjectT_co],
+):
+    @property
+    def na_object(self) -> _NaObjectT_co: ...
+    @property
+    def coerce(self) -> L[True]: ...
+
+    #
+    @overload
+    def __new__(cls, /, *, coerce: bool = True) -> Self: ...
+    @overload
+    def __new__(cls, /, *, na_object: _NaObjectT_co, coerce: bool = True) -> Self: ...
+
+    #
+    def __getitem__(self, key: Never, /) -> NoReturn: ...  # type: ignore[override]  # pyright: ignore[reportIncompatibleMethodOverride]
+    @property
+    def fields(self) -> None: ...
+    @property
+    def base(self) -> Self: ...
+    @property
+    def ndim(self) -> L[0]: ...
+    @property
+    def shape(self) -> tuple[()]: ...
+
+    #
+    @property
+    def name(self) -> L["StringDType64", "StringDType128"]: ...
+    @property
+    def subdtype(self) -> None: ...
+    @property
+    def type(self) -> type[str]: ...
+    @property
+    def str(self) -> L["|T8", "|T16"]: ...
+
+    #
+    @property
+    def hasobject(self) -> L[True]: ...
+    @property
+    def isalignedstruct(self) -> L[False]: ...
+    @property
+    def isnative(self) -> L[True]: ...
diff --git a/local_packages/numpy/exceptions.py b/local_packages/numpy/exceptions.py
new file mode 100644
index 0000000..cf70b4a
--- /dev/null
+++ b/local_packages/numpy/exceptions.py
@@ -0,0 +1,246 @@
+"""
+Exceptions and Warnings
+=======================
+
+General exceptions used by NumPy.  Note that some exceptions may be module
+specific, such as linear algebra errors.
+
+.. versionadded:: NumPy 1.25
+
+    The exceptions module is new in NumPy 1.25.
+
+.. currentmodule:: numpy.exceptions
+
+Warnings
+--------
+.. autosummary::
+   :toctree: generated/
+
+   ComplexWarning             Given when converting complex to real.
+   VisibleDeprecationWarning  Same as a DeprecationWarning, but more visible.
+   RankWarning                Issued when the design matrix is rank deficient.
+
+Exceptions
+----------
+.. autosummary::
+   :toctree: generated/
+
+    AxisError          Given when an axis was invalid.
+    DTypePromotionError   Given when no common dtype could be found.
+    TooHardError       Error specific to `numpy.shares_memory`.
+
+"""
+
+
+__all__ = [
+    "ComplexWarning", "VisibleDeprecationWarning", "ModuleDeprecationWarning",
+    "TooHardError", "AxisError", "DTypePromotionError"]
+
+
+# Disallow reloading this module so as to preserve the identities of the
+# classes defined here.
+if '_is_loaded' in globals():
+    raise RuntimeError('Reloading numpy._globals is not allowed')
+_is_loaded = True
+
+
+class ComplexWarning(RuntimeWarning):
+    """
+    The warning raised when casting a complex dtype to a real dtype.
+
+    As implemented, casting a complex number to a real discards its imaginary
+    part, but this behavior may not be what the user actually wants.
+
+    """
+    pass
+
+
+class ModuleDeprecationWarning(DeprecationWarning):
+    """Module deprecation warning.
+
+    .. warning::
+
+        This warning should not be used, since nose testing is not relevant
+        anymore.
+
+    The nose tester turns ordinary Deprecation warnings into test failures.
+    That makes it hard to deprecate whole modules, because they get
+    imported by default. So this is a special Deprecation warning that the
+    nose tester will let pass without making tests fail.
+
+    """
+    pass
+
+
+class VisibleDeprecationWarning(UserWarning):
+    """Visible deprecation warning.
+
+    By default, python will not show deprecation warnings, so this class
+    can be used when a very visible warning is helpful, for example because
+    the usage is most likely a user bug.
+
+    """
+    pass
+
+
+class RankWarning(RuntimeWarning):
+    """Matrix rank warning.
+
+    Issued by polynomial functions when the design matrix is rank deficient.
+
+    """
+    pass
+
+
+# Exception used in shares_memory()
+class TooHardError(RuntimeError):
+    """``max_work`` was exceeded.
+
+    This is raised whenever the maximum number of candidate solutions
+    to consider specified by the ``max_work`` parameter is exceeded.
+    Assigning a finite number to ``max_work`` may have caused the operation
+    to fail.
+
+    """
+    pass
+
+
+class AxisError(ValueError, IndexError):
+    """Axis supplied was invalid.
+
+    This is raised whenever an ``axis`` parameter is specified that is larger
+    than the number of array dimensions.
+    For compatibility with code written against older numpy versions, which
+    raised a mixture of :exc:`ValueError` and :exc:`IndexError` for this
+    situation, this exception subclasses both to ensure that
+    ``except ValueError`` and ``except IndexError`` statements continue
+    to catch ``AxisError``.
+
+    Parameters
+    ----------
+    axis : int or str
+        The out of bounds axis or a custom exception message.
+        If an axis is provided, then `ndim` should be specified as well.
+    ndim : int, optional
+        The number of array dimensions.
+    msg_prefix : str, optional
+        A prefix for the exception message.
+
+    Attributes
+    ----------
+    axis : int, optional
+        The out of bounds axis or ``None`` if a custom exception
+        message was provided. This should be the axis as passed by
+        the user, before any normalization to resolve negative indices.
+
+        .. versionadded:: 1.22
+    ndim : int, optional
+        The number of array dimensions or ``None`` if a custom exception
+        message was provided.
+
+        .. versionadded:: 1.22
+
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> array_1d = np.arange(10)
+    >>> np.cumsum(array_1d, axis=1)
+    Traceback (most recent call last):
+      ...
+    numpy.exceptions.AxisError: axis 1 is out of bounds for array of dimension 1
+
+    Negative axes are preserved:
+
+    >>> np.cumsum(array_1d, axis=-2)
+    Traceback (most recent call last):
+      ...
+    numpy.exceptions.AxisError: axis -2 is out of bounds for array of dimension 1
+
+    The class constructor generally takes the axis and arrays'
+    dimensionality as arguments:
+
+    >>> print(np.exceptions.AxisError(2, 1, msg_prefix='error'))
+    error: axis 2 is out of bounds for array of dimension 1
+
+    Alternatively, a custom exception message can be passed:
+
+    >>> print(np.exceptions.AxisError('Custom error message'))
+    Custom error message
+
+    """
+
+    __slots__ = ("_msg", "axis", "ndim")
+
+    def __init__(self, axis, ndim=None, msg_prefix=None):
+        if ndim is msg_prefix is None:
+            # single-argument form: directly set the error message
+            self._msg = axis
+            self.axis = None
+            self.ndim = None
+        else:
+            self._msg = msg_prefix
+            self.axis = axis
+            self.ndim = ndim
+
+    def __str__(self):
+        axis = self.axis
+        ndim = self.ndim
+
+        if axis is ndim is None:
+            return self._msg
+        else:
+            msg = f"axis {axis} is out of bounds for array of dimension {ndim}"
+            if self._msg is not None:
+                msg = f"{self._msg}: {msg}"
+            return msg
+
+
+class DTypePromotionError(TypeError):
+    """Multiple DTypes could not be converted to a common one.
+
+    This exception derives from ``TypeError`` and is raised whenever dtypes
+    cannot be converted to a single common one.  This can be because they
+    are of a different category/class or incompatible instances of the same
+    one (see Examples).
+
+    Notes
+    -----
+    Many functions will use promotion to find the correct result and
+    implementation.  For these functions the error will typically be chained
+    with a more specific error indicating that no implementation was found
+    for the input dtypes.
+
+    Typically promotion should be considered "invalid" between the dtypes of
+    two arrays when `arr1 == arr2` can safely return all ``False`` because the
+    dtypes are fundamentally different.
+
+    Examples
+    --------
+    Datetimes and complex numbers are incompatible classes and cannot be
+    promoted:
+
+    >>> import numpy as np
+    >>> np.result_type(np.dtype("M8[s]"), np.complex128)  # doctest: +IGNORE_EXCEPTION_DETAIL
+    Traceback (most recent call last):
+     ...
+    DTypePromotionError: The DType  could not
+    be promoted by . This means that no common
+    DType exists for the given inputs. For example they cannot be stored in a
+    single array unless the dtype is `object`. The full list of DTypes is:
+    (, )
+
+    For example for structured dtypes, the structure can mismatch and the
+    same ``DTypePromotionError`` is given when two structured dtypes with
+    a mismatch in their number of fields is given:
+
+    >>> dtype1 = np.dtype([("field1", np.float64), ("field2", np.int64)])
+    >>> dtype2 = np.dtype([("field1", np.float64)])
+    >>> np.promote_types(dtype1, dtype2)  # doctest: +IGNORE_EXCEPTION_DETAIL
+    Traceback (most recent call last):
+     ...
+    DTypePromotionError: field names `('field1', 'field2')` and `('field1',)`
+    mismatch.
+
+    """  # noqa: E501
+    pass
diff --git a/local_packages/numpy/exceptions.pyi b/local_packages/numpy/exceptions.pyi
new file mode 100644
index 0000000..4cc4eff
--- /dev/null
+++ b/local_packages/numpy/exceptions.pyi
@@ -0,0 +1,27 @@
+from typing import overload
+
+__all__ = [
+    "ComplexWarning",
+    "VisibleDeprecationWarning",
+    "ModuleDeprecationWarning",
+    "TooHardError",
+    "AxisError",
+    "DTypePromotionError",
+]
+
+class ComplexWarning(RuntimeWarning): ...
+class ModuleDeprecationWarning(DeprecationWarning): ...
+class VisibleDeprecationWarning(UserWarning): ...
+class RankWarning(RuntimeWarning): ...
+class TooHardError(RuntimeError): ...
+class DTypePromotionError(TypeError): ...
+
+class AxisError(ValueError, IndexError):
+    __slots__ = "_msg", "axis", "ndim"
+
+    axis: int | None
+    ndim: int | None
+    @overload
+    def __init__(self, axis: str, ndim: None = None, msg_prefix: None = None) -> None: ...
+    @overload
+    def __init__(self, axis: int, ndim: int, msg_prefix: str | None = None) -> None: ...
diff --git a/local_packages/numpy/f2py/__init__.py b/local_packages/numpy/f2py/__init__.py
new file mode 100644
index 0000000..e34dd99
--- /dev/null
+++ b/local_packages/numpy/f2py/__init__.py
@@ -0,0 +1,86 @@
+"""Fortran to Python Interface Generator.
+
+Copyright 1999 -- 2011 Pearu Peterson all rights reserved.
+Copyright 2011 -- present NumPy Developers.
+Permission to use, modify, and distribute this software is given under the terms
+of the NumPy License.
+
+NO WARRANTY IS EXPRESSED OR IMPLIED.  USE AT YOUR OWN RISK.
+"""
+__all__ = ['run_main', 'get_include']
+
+import os
+import subprocess
+import sys
+import warnings
+
+from numpy.exceptions import VisibleDeprecationWarning
+
+from . import diagnose, f2py2e
+
+run_main = f2py2e.run_main
+main = f2py2e.main
+
+
+def get_include():
+    """
+    Return the directory that contains the ``fortranobject.c`` and ``.h`` files.
+
+    .. note::
+
+        This function is not needed when building an extension with
+        `numpy.distutils` directly from ``.f`` and/or ``.pyf`` files
+        in one go.
+
+    Python extension modules built with f2py-generated code need to use
+    ``fortranobject.c`` as a source file, and include the ``fortranobject.h``
+    header. This function can be used to obtain the directory containing
+    both of these files.
+
+    Returns
+    -------
+    include_path : str
+        Absolute path to the directory containing ``fortranobject.c`` and
+        ``fortranobject.h``.
+
+    Notes
+    -----
+    .. versionadded:: 1.21.1
+
+    Unless the build system you are using has specific support for f2py,
+    building a Python extension using a ``.pyf`` signature file is a two-step
+    process. For a module ``mymod``:
+
+    * Step 1: run ``python -m numpy.f2py mymod.pyf --quiet``. This
+      generates ``mymodmodule.c`` and (if needed)
+      ``mymod-f2pywrappers.f`` files next to ``mymod.pyf``.
+    * Step 2: build your Python extension module. This requires the
+      following source files:
+
+      * ``mymodmodule.c``
+      * ``mymod-f2pywrappers.f`` (if it was generated in Step 1)
+      * ``fortranobject.c``
+
+    See Also
+    --------
+    numpy.get_include : function that returns the numpy include directory
+
+    """
+    return os.path.join(os.path.dirname(__file__), 'src')
+
+
+def __getattr__(attr):
+
+    # Avoid importing things that aren't needed for building
+    # which might import the main numpy module
+    if attr == "test":
+        from numpy._pytesttester import PytestTester
+        test = PytestTester(__name__)
+        return test
+
+    else:
+        raise AttributeError(f"module {__name__!r} has no attribute {attr!r}")
+
+
+def __dir__():
+    return list(globals().keys() | {"test"})
diff --git a/local_packages/numpy/f2py/__init__.pyi b/local_packages/numpy/f2py/__init__.pyi
new file mode 100644
index 0000000..aa7d591
--- /dev/null
+++ b/local_packages/numpy/f2py/__init__.pyi
@@ -0,0 +1,5 @@
+from .f2py2e import main as main, run_main
+
+__all__ = ["get_include", "run_main"]
+
+def get_include() -> str: ...
diff --git a/local_packages/numpy/f2py/__main__.py b/local_packages/numpy/f2py/__main__.py
new file mode 100644
index 0000000..936a753
--- /dev/null
+++ b/local_packages/numpy/f2py/__main__.py
@@ -0,0 +1,5 @@
+# See:
+# https://web.archive.org/web/20140822061353/http://cens.ioc.ee/projects/f2py2e
+from numpy.f2py.f2py2e import main
+
+main()
diff --git a/local_packages/numpy/f2py/__version__.py b/local_packages/numpy/f2py/__version__.py
new file mode 100644
index 0000000..8d12d95
--- /dev/null
+++ b/local_packages/numpy/f2py/__version__.py
@@ -0,0 +1 @@
+from numpy.version import version  # noqa: F401
diff --git a/local_packages/numpy/f2py/__version__.pyi b/local_packages/numpy/f2py/__version__.pyi
new file mode 100644
index 0000000..85b4225
--- /dev/null
+++ b/local_packages/numpy/f2py/__version__.pyi
@@ -0,0 +1 @@
+from numpy.version import version as version
diff --git a/local_packages/numpy/f2py/_backends/__init__.py b/local_packages/numpy/f2py/_backends/__init__.py
new file mode 100644
index 0000000..e91393c
--- /dev/null
+++ b/local_packages/numpy/f2py/_backends/__init__.py
@@ -0,0 +1,9 @@
+def f2py_build_generator(name):
+    if name == "meson":
+        from ._meson import MesonBackend
+        return MesonBackend
+    elif name == "distutils":
+        from ._distutils import DistutilsBackend
+        return DistutilsBackend
+    else:
+        raise ValueError(f"Unknown backend: {name}")
diff --git a/local_packages/numpy/f2py/_backends/__init__.pyi b/local_packages/numpy/f2py/_backends/__init__.pyi
new file mode 100644
index 0000000..43625c6
--- /dev/null
+++ b/local_packages/numpy/f2py/_backends/__init__.pyi
@@ -0,0 +1,5 @@
+from typing import Literal as L
+
+from ._backend import Backend
+
+def f2py_build_generator(name: L["distutils", "meson"]) -> Backend: ...
diff --git a/local_packages/numpy/f2py/_backends/_backend.py b/local_packages/numpy/f2py/_backends/_backend.py
new file mode 100644
index 0000000..5dda400
--- /dev/null
+++ b/local_packages/numpy/f2py/_backends/_backend.py
@@ -0,0 +1,44 @@
+from abc import ABC, abstractmethod
+
+
+class Backend(ABC):
+    def __init__(
+        self,
+        modulename,
+        sources,
+        extra_objects,
+        build_dir,
+        include_dirs,
+        library_dirs,
+        libraries,
+        define_macros,
+        undef_macros,
+        f2py_flags,
+        sysinfo_flags,
+        fc_flags,
+        flib_flags,
+        setup_flags,
+        remove_build_dir,
+        extra_dat,
+    ):
+        self.modulename = modulename
+        self.sources = sources
+        self.extra_objects = extra_objects
+        self.build_dir = build_dir
+        self.include_dirs = include_dirs
+        self.library_dirs = library_dirs
+        self.libraries = libraries
+        self.define_macros = define_macros
+        self.undef_macros = undef_macros
+        self.f2py_flags = f2py_flags
+        self.sysinfo_flags = sysinfo_flags
+        self.fc_flags = fc_flags
+        self.flib_flags = flib_flags
+        self.setup_flags = setup_flags
+        self.remove_build_dir = remove_build_dir
+        self.extra_dat = extra_dat
+
+    @abstractmethod
+    def compile(self) -> None:
+        """Compile the wrapper."""
+        pass
diff --git a/local_packages/numpy/f2py/_backends/_backend.pyi b/local_packages/numpy/f2py/_backends/_backend.pyi
new file mode 100644
index 0000000..ed24519
--- /dev/null
+++ b/local_packages/numpy/f2py/_backends/_backend.pyi
@@ -0,0 +1,46 @@
+import abc
+from pathlib import Path
+from typing import Any, Final
+
+class Backend(abc.ABC):
+    modulename: Final[str]
+    sources: Final[list[str | Path]]
+    extra_objects: Final[list[str]]
+    build_dir: Final[str | Path]
+    include_dirs: Final[list[str | Path]]
+    library_dirs: Final[list[str | Path]]
+    libraries: Final[list[str]]
+    define_macros: Final[list[tuple[str, str | None]]]
+    undef_macros: Final[list[str]]
+    f2py_flags: Final[list[str]]
+    sysinfo_flags: Final[list[str]]
+    fc_flags: Final[list[str]]
+    flib_flags: Final[list[str]]
+    setup_flags: Final[list[str]]
+    remove_build_dir: Final[bool]
+    extra_dat: Final[dict[str, Any]]
+
+    def __init__(
+        self,
+        /,
+        modulename: str,
+        sources: list[str | Path],
+        extra_objects: list[str],
+        build_dir: str | Path,
+        include_dirs: list[str | Path],
+        library_dirs: list[str | Path],
+        libraries: list[str],
+        define_macros: list[tuple[str, str | None]],
+        undef_macros: list[str],
+        f2py_flags: list[str],
+        sysinfo_flags: list[str],
+        fc_flags: list[str],
+        flib_flags: list[str],
+        setup_flags: list[str],
+        remove_build_dir: bool,
+        extra_dat: dict[str, Any],
+    ) -> None: ...
+
+    #
+    @abc.abstractmethod
+    def compile(self) -> None: ...
diff --git a/local_packages/numpy/f2py/_backends/_distutils.py b/local_packages/numpy/f2py/_backends/_distutils.py
new file mode 100644
index 0000000..5c8f109
--- /dev/null
+++ b/local_packages/numpy/f2py/_backends/_distutils.py
@@ -0,0 +1,76 @@
+import os
+import shutil
+import sys
+import warnings
+
+from numpy.distutils.core import Extension, setup
+from numpy.distutils.misc_util import dict_append
+from numpy.distutils.system_info import get_info
+from numpy.exceptions import VisibleDeprecationWarning
+
+from ._backend import Backend
+
+
+class DistutilsBackend(Backend):
+    def __init__(sef, *args, **kwargs):
+        warnings.warn(
+            "\ndistutils has been deprecated since NumPy 1.26.x\n"
+            "Use the Meson backend instead, or generate wrappers"
+            " without -c and use a custom build script",
+            VisibleDeprecationWarning,
+            stacklevel=2,
+        )
+        super().__init__(*args, **kwargs)
+
+    def compile(self):
+        num_info = {}
+        if num_info:
+            self.include_dirs.extend(num_info.get("include_dirs", []))
+        ext_args = {
+            "name": self.modulename,
+            "sources": self.sources,
+            "include_dirs": self.include_dirs,
+            "library_dirs": self.library_dirs,
+            "libraries": self.libraries,
+            "define_macros": self.define_macros,
+            "undef_macros": self.undef_macros,
+            "extra_objects": self.extra_objects,
+            "f2py_options": self.f2py_flags,
+        }
+
+        if self.sysinfo_flags:
+            for n in self.sysinfo_flags:
+                i = get_info(n)
+                if not i:
+                    print(
+                        f"No {n!r} resources found"
+                        "in system (try `f2py --help-link`)"
+                    )
+                dict_append(ext_args, **i)
+
+        ext = Extension(**ext_args)
+
+        sys.argv = [sys.argv[0]] + self.setup_flags
+        sys.argv.extend(
+            [
+                "build",
+                "--build-temp",
+                self.build_dir,
+                "--build-base",
+                self.build_dir,
+                "--build-platlib",
+                ".",
+                "--disable-optimization",
+            ]
+        )
+
+        if self.fc_flags:
+            sys.argv.extend(["config_fc"] + self.fc_flags)
+        if self.flib_flags:
+            sys.argv.extend(["build_ext"] + self.flib_flags)
+
+        setup(ext_modules=[ext])
+
+        if self.remove_build_dir and os.path.exists(self.build_dir):
+            print(f"Removing build directory {self.build_dir}")
+            shutil.rmtree(self.build_dir)
diff --git a/local_packages/numpy/f2py/_backends/_distutils.pyi b/local_packages/numpy/f2py/_backends/_distutils.pyi
new file mode 100644
index 0000000..56bbf7e
--- /dev/null
+++ b/local_packages/numpy/f2py/_backends/_distutils.pyi
@@ -0,0 +1,13 @@
+from typing_extensions import deprecated, override
+
+from ._backend import Backend
+
+class DistutilsBackend(Backend):
+    @deprecated(
+        "distutils has been deprecated since NumPy 1.26.x. Use the Meson backend instead, or generate wrappers without -c and "
+        "use a custom build script"
+    )
+    # NOTE: the `sef` typo matches runtime
+    def __init__(sef, *args: object, **kwargs: object) -> None: ...
+    @override
+    def compile(self) -> None: ...
diff --git a/local_packages/numpy/f2py/_backends/_meson.py b/local_packages/numpy/f2py/_backends/_meson.py
new file mode 100644
index 0000000..4c498ba
--- /dev/null
+++ b/local_packages/numpy/f2py/_backends/_meson.py
@@ -0,0 +1,244 @@
+import errno
+import os
+import re
+import shutil
+import subprocess
+import sys
+from itertools import chain
+from pathlib import Path
+from string import Template
+
+from ._backend import Backend
+
+
+class MesonTemplate:
+    """Template meson build file generation class."""
+
+    def __init__(
+        self,
+        modulename: str,
+        sources: list[Path],
+        deps: list[str],
+        libraries: list[str],
+        library_dirs: list[Path],
+        include_dirs: list[Path],
+        object_files: list[Path],
+        linker_args: list[str],
+        fortran_args: list[str],
+        build_type: str,
+        python_exe: str,
+    ):
+        self.modulename = modulename
+        self.build_template_path = (
+            Path(__file__).parent.absolute() / "meson.build.template"
+        )
+        self.sources = sources
+        self.deps = deps
+        self.libraries = libraries
+        self.library_dirs = library_dirs
+        if include_dirs is not None:
+            self.include_dirs = include_dirs
+        else:
+            self.include_dirs = []
+        self.substitutions = {}
+        self.objects = object_files
+        # Convert args to '' wrapped variant for meson
+        self.fortran_args = [
+            f"'{x}'" if not (x.startswith("'") and x.endswith("'")) else x
+            for x in fortran_args
+        ]
+        self.pipeline = [
+            self.initialize_template,
+            self.sources_substitution,
+            self.objects_substitution,
+            self.deps_substitution,
+            self.include_substitution,
+            self.libraries_substitution,
+            self.fortran_args_substitution,
+        ]
+        self.build_type = build_type
+        self.python_exe = python_exe
+        self.indent = " " * 21
+
+    def meson_build_template(self) -> str:
+        if not self.build_template_path.is_file():
+            raise FileNotFoundError(
+                errno.ENOENT,
+                "Meson build template"
+                f" {self.build_template_path.absolute()}"
+                " does not exist.",
+            )
+        return self.build_template_path.read_text()
+
+    def initialize_template(self) -> None:
+        self.substitutions["modulename"] = self.modulename
+        self.substitutions["buildtype"] = self.build_type
+        self.substitutions["python"] = self.python_exe
+
+    def sources_substitution(self) -> None:
+        self.substitutions["source_list"] = ",\n".join(
+            [f"{self.indent}'''{source}'''," for source in self.sources]
+        )
+
+    def objects_substitution(self) -> None:
+        self.substitutions["obj_list"] = ",\n".join(
+            [f"{self.indent}'''{obj}'''," for obj in self.objects]
+        )
+
+    def deps_substitution(self) -> None:
+        self.substitutions["dep_list"] = f",\n{self.indent}".join(
+            [f"{self.indent}dependency('{dep}')," for dep in self.deps]
+        )
+
+    def libraries_substitution(self) -> None:
+        self.substitutions["lib_dir_declarations"] = "\n".join(
+            [
+                f"lib_dir_{i} = declare_dependency(link_args : ['''-L{lib_dir}'''])"
+                for i, lib_dir in enumerate(self.library_dirs)
+            ]
+        )
+
+        self.substitutions["lib_declarations"] = "\n".join(
+            [
+                f"{lib.replace('.', '_')} = declare_dependency(link_args : ['-l{lib}'])"
+                for lib in self.libraries
+            ]
+        )
+
+        self.substitutions["lib_list"] = f"\n{self.indent}".join(
+            [f"{self.indent}{lib.replace('.', '_')}," for lib in self.libraries]
+        )
+        self.substitutions["lib_dir_list"] = f"\n{self.indent}".join(
+            [f"{self.indent}lib_dir_{i}," for i in range(len(self.library_dirs))]
+        )
+
+    def include_substitution(self) -> None:
+        self.substitutions["inc_list"] = f",\n{self.indent}".join(
+            [f"{self.indent}'''{inc}'''," for inc in self.include_dirs]
+        )
+
+    def fortran_args_substitution(self) -> None:
+        if self.fortran_args:
+            self.substitutions["fortran_args"] = (
+                f"{self.indent}fortran_args: [{', '.join(list(self.fortran_args))}],"
+            )
+        else:
+            self.substitutions["fortran_args"] = ""
+
+    def generate_meson_build(self):
+        for node in self.pipeline:
+            node()
+        template = Template(self.meson_build_template())
+        meson_build = template.substitute(self.substitutions)
+        meson_build = meson_build.replace(",,", ",")
+        return meson_build
+
+
+class MesonBackend(Backend):
+    def __init__(self, *args, **kwargs):
+        super().__init__(*args, **kwargs)
+        self.dependencies = self.extra_dat.get("dependencies", [])
+        self.meson_build_dir = "bbdir"
+        self.build_type = (
+            "debug" if any("debug" in flag for flag in self.fc_flags) else "release"
+        )
+        self.fc_flags = _get_flags(self.fc_flags)
+
+    def _move_exec_to_root(self, build_dir: Path):
+        walk_dir = Path(build_dir) / self.meson_build_dir
+        path_objects = chain(
+            walk_dir.glob(f"{self.modulename}*.so"),
+            walk_dir.glob(f"{self.modulename}*.pyd"),
+            walk_dir.glob(f"{self.modulename}*.dll"),
+        )
+        # Same behavior as distutils
+        # https://github.com/numpy/numpy/issues/24874#issuecomment-1835632293
+        for path_object in path_objects:
+            dest_path = Path.cwd() / path_object.name
+            if dest_path.exists():
+                dest_path.unlink()
+            shutil.copy2(path_object, dest_path)
+            os.remove(path_object)
+
+    def write_meson_build(self, build_dir: Path) -> None:
+        """Writes the meson build file at specified location"""
+        meson_template = MesonTemplate(
+            self.modulename,
+            self.sources,
+            self.dependencies,
+            self.libraries,
+            self.library_dirs,
+            self.include_dirs,
+            self.extra_objects,
+            self.flib_flags,
+            self.fc_flags,
+            self.build_type,
+            sys.executable,
+        )
+        src = meson_template.generate_meson_build()
+        Path(build_dir).mkdir(parents=True, exist_ok=True)
+        meson_build_file = Path(build_dir) / "meson.build"
+        meson_build_file.write_text(src)
+        return meson_build_file
+
+    def _run_subprocess_command(self, command, cwd):
+        subprocess.run(command, cwd=cwd, check=True)
+
+    def run_meson(self, build_dir: Path):
+        setup_command = ["meson", "setup", self.meson_build_dir]
+        self._run_subprocess_command(setup_command, build_dir)
+        compile_command = ["meson", "compile", "-C", self.meson_build_dir]
+        self._run_subprocess_command(compile_command, build_dir)
+
+    def compile(self) -> None:
+        self.sources = _prepare_sources(self.modulename, self.sources, self.build_dir)
+        _prepare_objects(self.modulename, self.extra_objects, self.build_dir)
+        self.write_meson_build(self.build_dir)
+        self.run_meson(self.build_dir)
+        self._move_exec_to_root(self.build_dir)
+
+
+def _prepare_sources(mname, sources, bdir):
+    extended_sources = sources.copy()
+    Path(bdir).mkdir(parents=True, exist_ok=True)
+    # Copy sources
+    for source in sources:
+        if Path(source).exists() and Path(source).is_file():
+            shutil.copy(source, bdir)
+    generated_sources = [
+        Path(f"{mname}module.c"),
+        Path(f"{mname}-f2pywrappers2.f90"),
+        Path(f"{mname}-f2pywrappers.f"),
+    ]
+    bdir = Path(bdir)
+    for generated_source in generated_sources:
+        if generated_source.exists():
+            shutil.copy(generated_source, bdir / generated_source.name)
+            extended_sources.append(generated_source.name)
+            generated_source.unlink()
+    extended_sources = [
+        Path(source).name
+        for source in extended_sources
+        if not Path(source).suffix == ".pyf"
+    ]
+    return extended_sources
+
+def _prepare_objects(mname, objects, bdir):
+    Path(bdir).mkdir(parents=True, exist_ok=True)
+    # Copy objects
+    for obj in objects:
+        if Path(obj).exists() and Path(obj).is_file():
+            shutil.copy(obj, bdir)
+
+def _get_flags(fc_flags):
+    flag_values = []
+    flag_pattern = re.compile(r"--f(77|90)flags=(.*)")
+    for flag in fc_flags:
+        match_result = flag_pattern.match(flag)
+        if match_result:
+            values = match_result.group(2).strip().split()
+            values = [val.strip("'\"") for val in values]
+            flag_values.extend(values)
+    # Hacky way to preserve order of flags
+    unique_flags = list(dict.fromkeys(flag_values))
+    return unique_flags
diff --git a/local_packages/numpy/f2py/_backends/_meson.pyi b/local_packages/numpy/f2py/_backends/_meson.pyi
new file mode 100644
index 0000000..5c85c61
--- /dev/null
+++ b/local_packages/numpy/f2py/_backends/_meson.pyi
@@ -0,0 +1,62 @@
+from collections.abc import Callable
+from pathlib import Path
+from typing import Final, Literal as L
+from typing_extensions import override
+
+from ._backend import Backend
+
+class MesonTemplate:
+    modulename: Final[str]
+    build_template_path: Final[Path]
+    sources: Final[list[str | Path]]
+    deps: Final[list[str]]
+    libraries: Final[list[str]]
+    library_dirs: Final[list[str | Path]]
+    include_dirs: Final[list[str | Path]]
+    substitutions: Final[dict[str, str]]
+    objects: Final[list[str | Path]]
+    fortran_args: Final[list[str]]
+    pipeline: Final[list[Callable[[], None]]]
+    build_type: Final[str]
+    python_exe: Final[str]
+    indent: Final[str]
+
+    def __init__(
+        self,
+        /,
+        modulename: str,
+        sources: list[Path],
+        deps: list[str],
+        libraries: list[str],
+        library_dirs: list[str | Path],
+        include_dirs: list[str | Path],
+        object_files: list[str | Path],
+        linker_args: list[str],
+        fortran_args: list[str],
+        build_type: str,
+        python_exe: str,
+    ) -> None: ...
+
+    #
+    def initialize_template(self) -> None: ...
+    def sources_substitution(self) -> None: ...
+    def objects_substitution(self) -> None: ...
+    def deps_substitution(self) -> None: ...
+    def libraries_substitution(self) -> None: ...
+    def include_substitution(self) -> None: ...
+    def fortran_args_substitution(self) -> None: ...
+
+    #
+    def meson_build_template(self) -> str: ...
+    def generate_meson_build(self) -> str: ...
+
+class MesonBackend(Backend):
+    dependencies: list[str]
+    meson_build_dir: L["bdir"]
+    build_type: L["debug", "release"]
+
+    def __init__(self, /, *args: object, **kwargs: object) -> None: ...
+    def write_meson_build(self, /, build_dir: Path) -> None: ...
+    def run_meson(self, /, build_dir: Path) -> None: ...
+    @override
+    def compile(self) -> None: ...
diff --git a/local_packages/numpy/f2py/_backends/meson.build.template b/local_packages/numpy/f2py/_backends/meson.build.template
new file mode 100644
index 0000000..58c6758
--- /dev/null
+++ b/local_packages/numpy/f2py/_backends/meson.build.template
@@ -0,0 +1,58 @@
+project('${modulename}',
+        ['c', 'fortran'],
+        version : '0.1',
+        meson_version: '>= 1.1.0',
+        default_options : [
+                            'warning_level=1',
+                            'buildtype=${buildtype}'
+                          ])
+fc = meson.get_compiler('fortran')
+
+py = import('python').find_installation('''${python}''', pure: false)
+py_dep = py.dependency()
+
+incdir_numpy = run_command(py,
+  ['-c', 'import os; os.chdir(".."); import numpy; print(numpy.get_include())'],
+  check : true
+).stdout().strip()
+
+incdir_f2py = run_command(py,
+    ['-c', 'import os; os.chdir(".."); import numpy.f2py; print(numpy.f2py.get_include())'],
+    check : true
+).stdout().strip()
+
+inc_np = include_directories(incdir_numpy)
+np_dep = declare_dependency(include_directories: inc_np)
+
+incdir_f2py = incdir_numpy / '..' / '..' / 'f2py' / 'src'
+inc_f2py = include_directories(incdir_f2py)
+fortranobject_c = incdir_f2py / 'fortranobject.c'
+
+inc_np = include_directories(incdir_numpy, incdir_f2py)
+# gh-25000
+quadmath_dep = fc.find_library('quadmath', required: false)
+
+${lib_declarations}
+${lib_dir_declarations}
+
+py.extension_module('${modulename}',
+                     [
+${source_list},
+                     fortranobject_c
+                     ],
+                     include_directories: [
+                     inc_np,
+${inc_list}
+                     ],
+                     objects: [
+${obj_list}
+                     ],
+                     dependencies : [
+                     py_dep,
+                     quadmath_dep,
+${dep_list}
+${lib_list}
+${lib_dir_list}
+                     ],
+${fortran_args}
+                     install : true)
diff --git a/local_packages/numpy/f2py/_isocbind.py b/local_packages/numpy/f2py/_isocbind.py
new file mode 100644
index 0000000..3043c5d
--- /dev/null
+++ b/local_packages/numpy/f2py/_isocbind.py
@@ -0,0 +1,62 @@
+"""
+ISO_C_BINDING maps for f2py2e.
+Only required declarations/macros/functions will be used.
+
+Copyright 1999 -- 2011 Pearu Peterson all rights reserved.
+Copyright 2011 -- present NumPy Developers.
+Permission to use, modify, and distribute this software is given under the
+terms of the NumPy License.
+
+NO WARRANTY IS EXPRESSED OR IMPLIED.  USE AT YOUR OWN RISK.
+"""
+# These map to keys in c2py_map, via forced casting for now, see gh-25229
+iso_c_binding_map = {
+    'integer': {
+        'c_int': 'int',
+        'c_short': 'short',  # 'short' <=> 'int' for now
+        'c_long': 'long',  # 'long' <=> 'int' for now
+        'c_long_long': 'long_long',
+        'c_signed_char': 'signed_char',
+        'c_size_t': 'unsigned',  # size_t <=> 'unsigned' for now
+        'c_int8_t': 'signed_char',  # int8_t <=> 'signed_char' for now
+        'c_int16_t': 'short',  # int16_t <=> 'short' for now
+        'c_int32_t': 'int',  # int32_t <=> 'int' for now
+        'c_int64_t': 'long_long',
+        'c_int_least8_t': 'signed_char',  # int_least8_t <=> 'signed_char' for now
+        'c_int_least16_t': 'short',  # int_least16_t <=> 'short' for now
+        'c_int_least32_t': 'int',  # int_least32_t <=> 'int' for now
+        'c_int_least64_t': 'long_long',
+        'c_int_fast8_t': 'signed_char',  # int_fast8_t <=> 'signed_char' for now
+        'c_int_fast16_t': 'short',  # int_fast16_t <=> 'short' for now
+        'c_int_fast32_t': 'int',  # int_fast32_t <=> 'int' for now
+        'c_int_fast64_t': 'long_long',
+        'c_intmax_t': 'long_long',  # intmax_t <=> 'long_long' for now
+        'c_intptr_t': 'long',  # intptr_t <=> 'long' for now
+        'c_ptrdiff_t': 'long',  # ptrdiff_t <=> 'long' for now
+    },
+    'real': {
+        'c_float': 'float',
+        'c_double': 'double',
+        'c_long_double': 'long_double'
+    },
+    'complex': {
+        'c_float_complex': 'complex_float',
+        'c_double_complex': 'complex_double',
+        'c_long_double_complex': 'complex_long_double'
+    },
+    'logical': {
+        'c_bool': 'unsigned_char'  # _Bool <=> 'unsigned_char' for now
+    },
+    'character': {
+        'c_char': 'char'
+    }
+}
+
+# TODO: See gh-25229
+isoc_c2pycode_map = {}
+iso_c2py_map = {}
+
+isoc_kindmap = {}
+for fortran_type, c_type_dict in iso_c_binding_map.items():
+    for c_type in c_type_dict.keys():
+        isoc_kindmap[c_type] = fortran_type
diff --git a/local_packages/numpy/f2py/_isocbind.pyi b/local_packages/numpy/f2py/_isocbind.pyi
new file mode 100644
index 0000000..b972f56
--- /dev/null
+++ b/local_packages/numpy/f2py/_isocbind.pyi
@@ -0,0 +1,13 @@
+from typing import Any, Final
+
+iso_c_binding_map: Final[dict[str, dict[str, str]]] = ...
+
+isoc_c2pycode_map: Final[dict[str, Any]] = {}  # not implemented
+iso_c2py_map: Final[dict[str, Any]] = {}  # not implemented
+
+isoc_kindmap: Final[dict[str, str]] = ...
+
+# namespace pollution
+c_type: str
+c_type_dict: dict[str, str]
+fortran_type: str
diff --git a/local_packages/numpy/f2py/_src_pyf.py b/local_packages/numpy/f2py/_src_pyf.py
new file mode 100644
index 0000000..b5c424f
--- /dev/null
+++ b/local_packages/numpy/f2py/_src_pyf.py
@@ -0,0 +1,247 @@
+import os
+import re
+
+# START OF CODE VENDORED FROM `numpy.distutils.from_template`
+#############################################################
+"""
+process_file(filename)
+
+  takes templated file .xxx.src and produces .xxx file where .xxx
+  is .pyf .f90 or .f using the following template rules:
+
+  '<..>' denotes a template.
+
+  All function and subroutine blocks in a source file with names that
+  contain '<..>' will be replicated according to the rules in '<..>'.
+
+  The number of comma-separated words in '<..>' will determine the number of
+  replicates.
+
+  '<..>' may have two different forms, named and short. For example,
+
+  named:
+    where anywhere inside a block '

' will be replaced with + 'd', 's', 'z', and 'c' for each replicate of the block. + + <_c> is already defined: <_c=s,d,c,z> + <_t> is already defined: <_t=real,double precision,complex,double complex> + + short: + , a short form of the named, useful when no

appears inside + a block. + + In general, '<..>' contains a comma separated list of arbitrary + expressions. If these expression must contain a comma|leftarrow|rightarrow, + then prepend the comma|leftarrow|rightarrow with a backslash. + + If an expression matches '\\' then it will be replaced + by -th expression. + + Note that all '<..>' forms in a block must have the same number of + comma-separated entries. + + Predefined named template rules: + + + + + +""" + +routine_start_re = re.compile(r'(\n|\A)(( (\$|\*))|)\s*(subroutine|function)\b', re.I) +routine_end_re = re.compile(r'\n\s*end\s*(subroutine|function)\b.*(\n|\Z)', re.I) +function_start_re = re.compile(r'\n (\$|\*)\s*function\b', re.I) + +def parse_structure(astr): + """ Return a list of tuples for each function or subroutine each + tuple is the start and end of a subroutine or function to be + expanded. + """ + + spanlist = [] + ind = 0 + while True: + m = routine_start_re.search(astr, ind) + if m is None: + break + start = m.start() + if function_start_re.match(astr, start, m.end()): + while True: + i = astr.rfind('\n', ind, start) + if i == -1: + break + start = i + if astr[i:i + 7] != '\n $': + break + start += 1 + m = routine_end_re.search(astr, m.end()) + ind = end = (m and m.end() - 1) or len(astr) + spanlist.append((start, end)) + return spanlist + + +template_re = re.compile(r"<\s*(\w[\w\d]*)\s*>") +named_re = re.compile(r"<\s*(\w[\w\d]*)\s*=\s*(.*?)\s*>") +list_re = re.compile(r"<\s*((.*?))\s*>") + +def find_repl_patterns(astr): + reps = named_re.findall(astr) + names = {} + for rep in reps: + name = rep[0].strip() or unique_key(names) + repl = rep[1].replace(r'\,', '@comma@') + thelist = conv(repl) + names[name] = thelist + return names + +def find_and_remove_repl_patterns(astr): + names = find_repl_patterns(astr) + astr = re.subn(named_re, '', astr)[0] + return astr, names + + +item_re = re.compile(r"\A\\(?P\d+)\Z") +def conv(astr): + b = astr.split(',') + l = [x.strip() for x in b] + for i in range(len(l)): + m = item_re.match(l[i]) + if m: + j = int(m.group('index')) + l[i] = l[j] + return ','.join(l) + +def unique_key(adict): + """ Obtain a unique key given a dictionary.""" + allkeys = list(adict.keys()) + done = False + n = 1 + while not done: + newkey = f'__l{n}' + if newkey in allkeys: + n += 1 + else: + done = True + return newkey + + +template_name_re = re.compile(r'\A\s*(\w[\w\d]*)\s*\Z') +def expand_sub(substr, names): + substr = substr.replace(r'\>', '@rightarrow@') + substr = substr.replace(r'\<', '@leftarrow@') + lnames = find_repl_patterns(substr) + substr = named_re.sub(r"<\1>", substr) # get rid of definition templates + + def listrepl(mobj): + thelist = conv(mobj.group(1).replace(r'\,', '@comma@')) + if template_name_re.match(thelist): + return f"<{thelist}>" + name = None + for key in lnames.keys(): # see if list is already in dictionary + if lnames[key] == thelist: + name = key + if name is None: # this list is not in the dictionary yet + name = unique_key(lnames) + lnames[name] = thelist + return f"<{name}>" + + # convert all lists to named templates + # new names are constructed as needed + substr = list_re.sub(listrepl, substr) + + numsubs = None + base_rule = None + rules = {} + for r in template_re.findall(substr): + if r not in rules: + thelist = lnames.get(r, names.get(r, None)) + if thelist is None: + raise ValueError(f'No replicates found for <{r}>') + if r not in names and not thelist.startswith('_'): + names[r] = thelist + rule = [i.replace('@comma@', ',') for i in thelist.split(',')] + num = len(rule) + + if numsubs is None: + numsubs = num + rules[r] = rule + base_rule = r + elif num == numsubs: + rules[r] = rule + else: + rules_base_rule = ','.join(rules[base_rule]) + print("Mismatch in number of replacements " + f"(base <{base_rule}={rules_base_rule}>) " + f"for <{r}={thelist}>. Ignoring.") + if not rules: + return substr + + def namerepl(mobj): + name = mobj.group(1) + return rules.get(name, (k + 1) * [name])[k] + + newstr = '' + for k in range(numsubs): + newstr += template_re.sub(namerepl, substr) + '\n\n' + + newstr = newstr.replace('@rightarrow@', '>') + newstr = newstr.replace('@leftarrow@', '<') + return newstr + +def process_str(allstr): + newstr = allstr + writestr = '' + + struct = parse_structure(newstr) + + oldend = 0 + names = {} + names.update(_special_names) + for sub in struct: + cleanedstr, defs = find_and_remove_repl_patterns(newstr[oldend:sub[0]]) + writestr += cleanedstr + names.update(defs) + writestr += expand_sub(newstr[sub[0]:sub[1]], names) + oldend = sub[1] + writestr += newstr[oldend:] + + return writestr + + +include_src_re = re.compile(r"(\n|\A)\s*include\s*['\"](?P[\w\d./\\]+\.src)['\"]", re.I) + +def resolve_includes(source): + d = os.path.dirname(source) + with open(source) as fid: + lines = [] + for line in fid: + m = include_src_re.match(line) + if m: + fn = m.group('name') + if not os.path.isabs(fn): + fn = os.path.join(d, fn) + if os.path.isfile(fn): + lines.extend(resolve_includes(fn)) + else: + lines.append(line) + else: + lines.append(line) + return lines + +def process_file(source): + lines = resolve_includes(source) + return process_str(''.join(lines)) + + +_special_names = find_repl_patterns(''' +<_c=s,d,c,z> +<_t=real,double precision,complex,double complex> + + + + + +''') + +# END OF CODE VENDORED FROM `numpy.distutils.from_template` +########################################################### diff --git a/local_packages/numpy/f2py/_src_pyf.pyi b/local_packages/numpy/f2py/_src_pyf.pyi new file mode 100644 index 0000000..50ddd07 --- /dev/null +++ b/local_packages/numpy/f2py/_src_pyf.pyi @@ -0,0 +1,28 @@ +import re +from _typeshed import StrOrBytesPath +from collections.abc import Mapping +from typing import Final + +routine_start_re: Final[re.Pattern[str]] = ... +routine_end_re: Final[re.Pattern[str]] = ... +function_start_re: Final[re.Pattern[str]] = ... +template_re: Final[re.Pattern[str]] = ... +named_re: Final[re.Pattern[str]] = ... +list_re: Final[re.Pattern[str]] = ... +item_re: Final[re.Pattern[str]] = ... +template_name_re: Final[re.Pattern[str]] = ... +include_src_re: Final[re.Pattern[str]] = ... + +def parse_structure(astr: str) -> list[tuple[int, int]]: ... +def find_repl_patterns(astr: str) -> dict[str, str]: ... +def find_and_remove_repl_patterns(astr: str) -> tuple[str, dict[str, str]]: ... +def conv(astr: str) -> str: ... + +# +def unique_key(adict: Mapping[str, object]) -> str: ... +def expand_sub(substr: str, names: dict[str, str]) -> str: ... +def process_str(allstr: str) -> str: ... + +# +def resolve_includes(source: StrOrBytesPath) -> list[str]: ... +def process_file(source: StrOrBytesPath) -> str: ... diff --git a/local_packages/numpy/f2py/auxfuncs.py b/local_packages/numpy/f2py/auxfuncs.py new file mode 100644 index 0000000..a5af31d --- /dev/null +++ b/local_packages/numpy/f2py/auxfuncs.py @@ -0,0 +1,1004 @@ +""" +Auxiliary functions for f2py2e. + +Copyright 1999 -- 2011 Pearu Peterson all rights reserved. +Copyright 2011 -- present NumPy Developers. +Permission to use, modify, and distribute this software is given under the +terms of the NumPy (BSD style) LICENSE. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +""" +import pprint +import re +import sys +import types +from functools import reduce + +from . import __version__, cfuncs +from .cfuncs import errmess + +__all__ = [ + 'applyrules', 'debugcapi', 'dictappend', 'errmess', 'gentitle', + 'getargs2', 'getcallprotoargument', 'getcallstatement', + 'getfortranname', 'getpymethoddef', 'getrestdoc', 'getusercode', + 'getusercode1', 'getdimension', 'hasbody', 'hascallstatement', 'hascommon', + 'hasexternals', 'hasinitvalue', 'hasnote', 'hasresultnote', + 'isallocatable', 'isarray', 'isarrayofstrings', + 'ischaracter', 'ischaracterarray', 'ischaracter_or_characterarray', + 'iscomplex', 'iscstyledirective', + 'iscomplexarray', 'iscomplexfunction', 'iscomplexfunction_warn', + 'isdouble', 'isdummyroutine', 'isexternal', 'isfunction', + 'isfunction_wrap', 'isint1', 'isint1array', 'isinteger', 'isintent_aux', + 'isintent_c', 'isintent_callback', 'isintent_copy', 'isintent_dict', + 'isintent_hide', 'isintent_in', 'isintent_inout', 'isintent_inplace', + 'isintent_nothide', 'isintent_out', 'isintent_overwrite', 'islogical', + 'islogicalfunction', 'islong_complex', 'islong_double', + 'islong_doublefunction', 'islong_long', 'islong_longfunction', + 'ismodule', 'ismoduleroutine', 'isoptional', 'isprivate', 'isvariable', + 'isrequired', 'isroutine', 'isscalar', 'issigned_long_longarray', + 'isstring', 'isstringarray', 'isstring_or_stringarray', 'isstringfunction', + 'issubroutine', 'get_f2py_modulename', 'issubroutine_wrap', 'isthreadsafe', + 'isunsigned', 'isunsigned_char', 'isunsigned_chararray', + 'isunsigned_long_long', 'isunsigned_long_longarray', 'isunsigned_short', + 'isunsigned_shortarray', 'l_and', 'l_not', 'l_or', 'outmess', 'replace', + 'show', 'stripcomma', 'throw_error', 'isattr_value', 'getuseblocks', + 'process_f2cmap_dict', 'containscommon', 'containsderivedtypes' +] + + +f2py_version = __version__.version + + +show = pprint.pprint + +options = {} +debugoptions = [] +wrapfuncs = 1 + + +def outmess(t): + if options.get('verbose', 1): + sys.stdout.write(t) + + +def debugcapi(var): + return 'capi' in debugoptions + + +def _ischaracter(var): + return 'typespec' in var and var['typespec'] == 'character' and \ + not isexternal(var) + + +def _isstring(var): + return 'typespec' in var and var['typespec'] == 'character' and \ + not isexternal(var) + + +def ischaracter_or_characterarray(var): + return _ischaracter(var) and 'charselector' not in var + + +def ischaracter(var): + return ischaracter_or_characterarray(var) and not isarray(var) + + +def ischaracterarray(var): + return ischaracter_or_characterarray(var) and isarray(var) + + +def isstring_or_stringarray(var): + return _ischaracter(var) and 'charselector' in var + + +def isstring(var): + return isstring_or_stringarray(var) and not isarray(var) + + +def isstringarray(var): + return isstring_or_stringarray(var) and isarray(var) + + +def isarrayofstrings(var): # obsolete? + # leaving out '*' for now so that `character*(*) a(m)` and `character + # a(m,*)` are treated differently. Luckily `character**` is illegal. + return isstringarray(var) and var['dimension'][-1] == '(*)' + + +def isarray(var): + return 'dimension' in var and not isexternal(var) + + +def isscalar(var): + return not (isarray(var) or isstring(var) or isexternal(var)) + + +def iscomplex(var): + return isscalar(var) and \ + var.get('typespec') in ['complex', 'double complex'] + + +def islogical(var): + return isscalar(var) and var.get('typespec') == 'logical' + + +def isinteger(var): + return isscalar(var) and var.get('typespec') == 'integer' + + +def isreal(var): + return isscalar(var) and var.get('typespec') == 'real' + + +def get_kind(var): + try: + return var['kindselector']['*'] + except KeyError: + try: + return var['kindselector']['kind'] + except KeyError: + pass + + +def isint1(var): + return var.get('typespec') == 'integer' \ + and get_kind(var) == '1' and not isarray(var) + + +def islong_long(var): + if not isscalar(var): + return 0 + if var.get('typespec') not in ['integer', 'logical']: + return 0 + return get_kind(var) == '8' + + +def isunsigned_char(var): + if not isscalar(var): + return 0 + if var.get('typespec') != 'integer': + return 0 + return get_kind(var) == '-1' + + +def isunsigned_short(var): + if not isscalar(var): + return 0 + if var.get('typespec') != 'integer': + return 0 + return get_kind(var) == '-2' + + +def isunsigned(var): + if not isscalar(var): + return 0 + if var.get('typespec') != 'integer': + return 0 + return get_kind(var) == '-4' + + +def isunsigned_long_long(var): + if not isscalar(var): + return 0 + if var.get('typespec') != 'integer': + return 0 + return get_kind(var) == '-8' + + +def isdouble(var): + if not isscalar(var): + return 0 + if not var.get('typespec') == 'real': + return 0 + return get_kind(var) == '8' + + +def islong_double(var): + if not isscalar(var): + return 0 + if not var.get('typespec') == 'real': + return 0 + return get_kind(var) == '16' + + +def islong_complex(var): + if not iscomplex(var): + return 0 + return get_kind(var) == '32' + + +def iscomplexarray(var): + return isarray(var) and \ + var.get('typespec') in ['complex', 'double complex'] + + +def isint1array(var): + return isarray(var) and var.get('typespec') == 'integer' \ + and get_kind(var) == '1' + + +def isunsigned_chararray(var): + return isarray(var) and var.get('typespec') in ['integer', 'logical']\ + and get_kind(var) == '-1' + + +def isunsigned_shortarray(var): + return isarray(var) and var.get('typespec') in ['integer', 'logical']\ + and get_kind(var) == '-2' + + +def isunsignedarray(var): + return isarray(var) and var.get('typespec') in ['integer', 'logical']\ + and get_kind(var) == '-4' + + +def isunsigned_long_longarray(var): + return isarray(var) and var.get('typespec') in ['integer', 'logical']\ + and get_kind(var) == '-8' + + +def issigned_chararray(var): + return isarray(var) and var.get('typespec') in ['integer', 'logical']\ + and get_kind(var) == '1' + + +def issigned_shortarray(var): + return isarray(var) and var.get('typespec') in ['integer', 'logical']\ + and get_kind(var) == '2' + + +def issigned_array(var): + return isarray(var) and var.get('typespec') in ['integer', 'logical']\ + and get_kind(var) == '4' + + +def issigned_long_longarray(var): + return isarray(var) and var.get('typespec') in ['integer', 'logical']\ + and get_kind(var) == '8' + + +def isallocatable(var): + return 'attrspec' in var and 'allocatable' in var['attrspec'] + + +def ismutable(var): + return not ('dimension' not in var or isstring(var)) + + +def ismoduleroutine(rout): + return 'modulename' in rout + + +def ismodule(rout): + return 'block' in rout and 'module' == rout['block'] + + +def isfunction(rout): + return 'block' in rout and 'function' == rout['block'] + + +def isfunction_wrap(rout): + if isintent_c(rout): + return 0 + return wrapfuncs and isfunction(rout) and (not isexternal(rout)) + + +def issubroutine(rout): + return 'block' in rout and 'subroutine' == rout['block'] + + +def issubroutine_wrap(rout): + if isintent_c(rout): + return 0 + return issubroutine(rout) and hasassumedshape(rout) + +def isattr_value(var): + return 'value' in var.get('attrspec', []) + + +def hasassumedshape(rout): + if rout.get('hasassumedshape'): + return True + for a in rout['args']: + for d in rout['vars'].get(a, {}).get('dimension', []): + if d == ':': + rout['hasassumedshape'] = True + return True + return False + + +def requiresf90wrapper(rout): + return ismoduleroutine(rout) or hasassumedshape(rout) + + +def isroutine(rout): + return isfunction(rout) or issubroutine(rout) + + +def islogicalfunction(rout): + if not isfunction(rout): + return 0 + if 'result' in rout: + a = rout['result'] + else: + a = rout['name'] + if a in rout['vars']: + return islogical(rout['vars'][a]) + return 0 + + +def islong_longfunction(rout): + if not isfunction(rout): + return 0 + if 'result' in rout: + a = rout['result'] + else: + a = rout['name'] + if a in rout['vars']: + return islong_long(rout['vars'][a]) + return 0 + + +def islong_doublefunction(rout): + if not isfunction(rout): + return 0 + if 'result' in rout: + a = rout['result'] + else: + a = rout['name'] + if a in rout['vars']: + return islong_double(rout['vars'][a]) + return 0 + + +def iscomplexfunction(rout): + if not isfunction(rout): + return 0 + if 'result' in rout: + a = rout['result'] + else: + a = rout['name'] + if a in rout['vars']: + return iscomplex(rout['vars'][a]) + return 0 + + +def iscomplexfunction_warn(rout): + if iscomplexfunction(rout): + outmess("""\ + ************************************************************** + Warning: code with a function returning complex value + may not work correctly with your Fortran compiler. + When using GNU gcc/g77 compilers, codes should work + correctly for callbacks with: + f2py -c -DF2PY_CB_RETURNCOMPLEX + **************************************************************\n""") + return 1 + return 0 + + +def isstringfunction(rout): + if not isfunction(rout): + return 0 + if 'result' in rout: + a = rout['result'] + else: + a = rout['name'] + if a in rout['vars']: + return isstring(rout['vars'][a]) + return 0 + + +def hasexternals(rout): + return 'externals' in rout and rout['externals'] + + +def isthreadsafe(rout): + return 'f2pyenhancements' in rout and \ + 'threadsafe' in rout['f2pyenhancements'] + + +def hasvariables(rout): + return 'vars' in rout and rout['vars'] + + +def isoptional(var): + return ('attrspec' in var and 'optional' in var['attrspec'] and + 'required' not in var['attrspec']) and isintent_nothide(var) + + +def isexternal(var): + return 'attrspec' in var and 'external' in var['attrspec'] + + +def getdimension(var): + dimpattern = r"\((.*?)\)" + if 'attrspec' in var.keys(): + if any('dimension' in s for s in var['attrspec']): + return next(re.findall(dimpattern, v) for v in var['attrspec']) + + +def isrequired(var): + return not isoptional(var) and isintent_nothide(var) + + +def iscstyledirective(f2py_line): + directives = {"callstatement", "callprotoargument", "pymethoddef"} + return any(directive in f2py_line.lower() for directive in directives) + + +def isintent_in(var): + if 'intent' not in var: + return 1 + if 'hide' in var['intent']: + return 0 + if 'inplace' in var['intent']: + return 0 + if 'in' in var['intent']: + return 1 + if 'out' in var['intent']: + return 0 + if 'inout' in var['intent']: + return 0 + if 'outin' in var['intent']: + return 0 + return 1 + + +def isintent_inout(var): + return ('intent' in var and ('inout' in var['intent'] or + 'outin' in var['intent']) and 'in' not in var['intent'] and + 'hide' not in var['intent'] and 'inplace' not in var['intent']) + + +def isintent_out(var): + return 'out' in var.get('intent', []) + + +def isintent_hide(var): + return ('intent' in var and ('hide' in var['intent'] or + ('out' in var['intent'] and 'in' not in var['intent'] and + (not l_or(isintent_inout, isintent_inplace)(var))))) + + +def isintent_nothide(var): + return not isintent_hide(var) + + +def isintent_c(var): + return 'c' in var.get('intent', []) + + +def isintent_cache(var): + return 'cache' in var.get('intent', []) + + +def isintent_copy(var): + return 'copy' in var.get('intent', []) + + +def isintent_overwrite(var): + return 'overwrite' in var.get('intent', []) + + +def isintent_callback(var): + return 'callback' in var.get('intent', []) + + +def isintent_inplace(var): + return 'inplace' in var.get('intent', []) + + +def isintent_aux(var): + return 'aux' in var.get('intent', []) + + +def isintent_aligned4(var): + return 'aligned4' in var.get('intent', []) + + +def isintent_aligned8(var): + return 'aligned8' in var.get('intent', []) + + +def isintent_aligned16(var): + return 'aligned16' in var.get('intent', []) + + +isintent_dict = {isintent_in: 'INTENT_IN', isintent_inout: 'INTENT_INOUT', + isintent_out: 'INTENT_OUT', isintent_hide: 'INTENT_HIDE', + isintent_cache: 'INTENT_CACHE', + isintent_c: 'INTENT_C', isoptional: 'OPTIONAL', + isintent_inplace: 'INTENT_INPLACE', + isintent_aligned4: 'INTENT_ALIGNED4', + isintent_aligned8: 'INTENT_ALIGNED8', + isintent_aligned16: 'INTENT_ALIGNED16', + } + + +def isprivate(var): + return 'attrspec' in var and 'private' in var['attrspec'] + + +def isvariable(var): + # heuristic to find public/private declarations of filtered subroutines + if len(var) == 1 and 'attrspec' in var and \ + var['attrspec'][0] in ('public', 'private'): + is_var = False + else: + is_var = True + return is_var + +def hasinitvalue(var): + return '=' in var + + +def hasinitvalueasstring(var): + if not hasinitvalue(var): + return 0 + return var['='][0] in ['"', "'"] + + +def hasnote(var): + return 'note' in var + + +def hasresultnote(rout): + if not isfunction(rout): + return 0 + if 'result' in rout: + a = rout['result'] + else: + a = rout['name'] + if a in rout['vars']: + return hasnote(rout['vars'][a]) + return 0 + + +def hascommon(rout): + return 'common' in rout + + +def containscommon(rout): + if hascommon(rout): + return 1 + if hasbody(rout): + for b in rout['body']: + if containscommon(b): + return 1 + return 0 + + +def hasderivedtypes(rout): + return ('block' in rout) and rout['block'] == 'type' + + +def containsderivedtypes(rout): + if hasderivedtypes(rout): + return 1 + if hasbody(rout): + for b in rout['body']: + if hasderivedtypes(b): + return 1 + return 0 + + +def containsmodule(block): + if ismodule(block): + return 1 + if not hasbody(block): + return 0 + for b in block['body']: + if containsmodule(b): + return 1 + return 0 + + +def hasbody(rout): + return 'body' in rout + + +def hascallstatement(rout): + return getcallstatement(rout) is not None + + +def istrue(var): + return 1 + + +def isfalse(var): + return 0 + + +class F2PYError(Exception): + pass + + +class throw_error: + + def __init__(self, mess): + self.mess = mess + + def __call__(self, var): + mess = f'\n\n var = {var}\n Message: {self.mess}\n' + raise F2PYError(mess) + + +def l_and(*f): + l1, l2 = 'lambda v', [] + for i in range(len(f)): + l1 = '%s,f%d=f[%d]' % (l1, i, i) + l2.append('f%d(v)' % (i)) + return eval(f"{l1}:{' and '.join(l2)}") + + +def l_or(*f): + l1, l2 = 'lambda v', [] + for i in range(len(f)): + l1 = '%s,f%d=f[%d]' % (l1, i, i) + l2.append('f%d(v)' % (i)) + return eval(f"{l1}:{' or '.join(l2)}") + + +def l_not(f): + return eval('lambda v,f=f:not f(v)') + + +def isdummyroutine(rout): + try: + return rout['f2pyenhancements']['fortranname'] == '' + except KeyError: + return 0 + + +def getfortranname(rout): + try: + name = rout['f2pyenhancements']['fortranname'] + if name == '': + raise KeyError + if not name: + errmess(f"Failed to use fortranname from {rout['f2pyenhancements']}\n") + raise KeyError + except KeyError: + name = rout['name'] + return name + + +def getmultilineblock(rout, blockname, comment=1, counter=0): + try: + r = rout['f2pyenhancements'].get(blockname) + except KeyError: + return + if not r: + return + if counter > 0 and isinstance(r, str): + return + if isinstance(r, list): + if counter >= len(r): + return + r = r[counter] + if r[:3] == "'''": + if comment: + r = '\t/* start ' + blockname + \ + ' multiline (' + repr(counter) + ') */\n' + r[3:] + else: + r = r[3:] + if r[-3:] == "'''": + if comment: + r = r[:-3] + '\n\t/* end multiline (' + repr(counter) + ')*/' + else: + r = r[:-3] + else: + errmess(f"{blockname} multiline block should end with `'''`: {repr(r)}\n") + return r + + +def getcallstatement(rout): + return getmultilineblock(rout, 'callstatement') + + +def getcallprotoargument(rout, cb_map={}): + r = getmultilineblock(rout, 'callprotoargument', comment=0) + if r: + return r + if hascallstatement(rout): + outmess( + 'warning: callstatement is defined without callprotoargument\n') + return + from .capi_maps import getctype + arg_types, arg_types2 = [], [] + if l_and(isstringfunction, l_not(isfunction_wrap))(rout): + arg_types.extend(['char*', 'size_t']) + for n in rout['args']: + var = rout['vars'][n] + if isintent_callback(var): + continue + if n in cb_map: + ctype = cb_map[n] + '_typedef' + else: + ctype = getctype(var) + if l_and(isintent_c, l_or(isscalar, iscomplex))(var): + pass + elif isstring(var): + pass + elif not isattr_value(var): + ctype = ctype + '*' + if (isstring(var) + or isarrayofstrings(var) # obsolete? + or isstringarray(var)): + arg_types2.append('size_t') + arg_types.append(ctype) + + proto_args = ','.join(arg_types + arg_types2) + if not proto_args: + proto_args = 'void' + return proto_args + + +def getusercode(rout): + return getmultilineblock(rout, 'usercode') + + +def getusercode1(rout): + return getmultilineblock(rout, 'usercode', counter=1) + + +def getpymethoddef(rout): + return getmultilineblock(rout, 'pymethoddef') + + +def getargs(rout): + sortargs, args = [], [] + if 'args' in rout: + args = rout['args'] + if 'sortvars' in rout: + for a in rout['sortvars']: + if a in args: + sortargs.append(a) + for a in args: + if a not in sortargs: + sortargs.append(a) + else: + sortargs = rout['args'] + return args, sortargs + + +def getargs2(rout): + sortargs, args = [], rout.get('args', []) + auxvars = [a for a in rout['vars'].keys() if isintent_aux(rout['vars'][a]) + and a not in args] + args = auxvars + args + if 'sortvars' in rout: + for a in rout['sortvars']: + if a in args: + sortargs.append(a) + for a in args: + if a not in sortargs: + sortargs.append(a) + else: + sortargs = auxvars + rout['args'] + return args, sortargs + + +def getrestdoc(rout): + if 'f2pymultilines' not in rout: + return None + k = None + if rout['block'] == 'python module': + k = rout['block'], rout['name'] + return rout['f2pymultilines'].get(k, None) + + +def gentitle(name): + ln = (80 - len(name) - 6) // 2 + return f"/*{ln * '*'} {name} {ln * '*'}*/" + + +def flatlist(lst): + if isinstance(lst, list): + return reduce(lambda x, y, f=flatlist: x + f(y), lst, []) + return [lst] + + +def stripcomma(s): + if s and s[-1] == ',': + return s[:-1] + return s + + +def replace(str, d, defaultsep=''): + if isinstance(d, list): + return [replace(str, _m, defaultsep) for _m in d] + if isinstance(str, list): + return [replace(_m, d, defaultsep) for _m in str] + for k in 2 * list(d.keys()): + if k == 'separatorsfor': + continue + if 'separatorsfor' in d and k in d['separatorsfor']: + sep = d['separatorsfor'][k] + else: + sep = defaultsep + if isinstance(d[k], list): + str = str.replace(f'#{k}#', sep.join(flatlist(d[k]))) + else: + str = str.replace(f'#{k}#', d[k]) + return str + + +def dictappend(rd, ar): + if isinstance(ar, list): + for a in ar: + rd = dictappend(rd, a) + return rd + for k in ar.keys(): + if k[0] == '_': + continue + if k in rd: + if isinstance(rd[k], str): + rd[k] = [rd[k]] + if isinstance(rd[k], list): + if isinstance(ar[k], list): + rd[k] = rd[k] + ar[k] + else: + rd[k].append(ar[k]) + elif isinstance(rd[k], dict): + if isinstance(ar[k], dict): + if k == 'separatorsfor': + for k1 in ar[k].keys(): + if k1 not in rd[k]: + rd[k][k1] = ar[k][k1] + else: + rd[k] = dictappend(rd[k], ar[k]) + else: + rd[k] = ar[k] + return rd + + +def applyrules(rules, d, var={}): + ret = {} + if isinstance(rules, list): + for r in rules: + rr = applyrules(r, d, var) + ret = dictappend(ret, rr) + if '_break' in rr: + break + return ret + if '_check' in rules and (not rules['_check'](var)): + return ret + if 'need' in rules: + res = applyrules({'needs': rules['need']}, d, var) + if 'needs' in res: + cfuncs.append_needs(res['needs']) + + for k in rules.keys(): + if k == 'separatorsfor': + ret[k] = rules[k] + continue + if isinstance(rules[k], str): + ret[k] = replace(rules[k], d) + elif isinstance(rules[k], list): + ret[k] = [] + for i in rules[k]: + ar = applyrules({k: i}, d, var) + if k in ar: + ret[k].append(ar[k]) + elif k[0] == '_': + continue + elif isinstance(rules[k], dict): + ret[k] = [] + for k1 in rules[k].keys(): + if isinstance(k1, types.FunctionType) and k1(var): + if isinstance(rules[k][k1], list): + for i in rules[k][k1]: + if isinstance(i, dict): + res = applyrules({'supertext': i}, d, var) + i = res.get('supertext', '') + ret[k].append(replace(i, d)) + else: + i = rules[k][k1] + if isinstance(i, dict): + res = applyrules({'supertext': i}, d) + i = res.get('supertext', '') + ret[k].append(replace(i, d)) + else: + errmess(f'applyrules: ignoring rule {repr(rules[k])}.\n') + if isinstance(ret[k], list): + if len(ret[k]) == 1: + ret[k] = ret[k][0] + if ret[k] == []: + del ret[k] + return ret + + +_f2py_module_name_match = re.compile(r'\s*python\s*module\s*(?P[\w_]+)', + re.I).match +_f2py_user_module_name_match = re.compile(r'\s*python\s*module\s*(?P[\w_]*?' + r'__user__[\w_]*)', re.I).match + +def get_f2py_modulename(source): + name = None + with open(source) as f: + for line in f: + m = _f2py_module_name_match(line) + if m: + if _f2py_user_module_name_match(line): # skip *__user__* names + continue + name = m.group('name') + break + return name + +def getuseblocks(pymod): + all_uses = [] + for inner in pymod['body']: + for modblock in inner['body']: + if modblock.get('use'): + all_uses.extend([x for x in modblock.get("use").keys() if "__" not in x]) + return all_uses + +def process_f2cmap_dict(f2cmap_all, new_map, c2py_map, verbose=False): + """ + Update the Fortran-to-C type mapping dictionary with new mappings and + return a list of successfully mapped C types. + + This function integrates a new mapping dictionary into an existing + Fortran-to-C type mapping dictionary. It ensures that all keys are in + lowercase and validates new entries against a given C-to-Python mapping + dictionary. Redefinitions and invalid entries are reported with a warning. + + Parameters + ---------- + f2cmap_all : dict + The existing Fortran-to-C type mapping dictionary that will be updated. + It should be a dictionary of dictionaries where the main keys represent + Fortran types and the nested dictionaries map Fortran type specifiers + to corresponding C types. + + new_map : dict + A dictionary containing new type mappings to be added to `f2cmap_all`. + The structure should be similar to `f2cmap_all`, with keys representing + Fortran types and values being dictionaries of type specifiers and their + C type equivalents. + + c2py_map : dict + A dictionary used for validating the C types in `new_map`. It maps C + types to corresponding Python types and is used to ensure that the C + types specified in `new_map` are valid. + + verbose : boolean + A flag used to provide information about the types mapped + + Returns + ------- + tuple of (dict, list) + The updated Fortran-to-C type mapping dictionary and a list of + successfully mapped C types. + """ + f2cmap_mapped = [] + + new_map_lower = {} + for k, d1 in new_map.items(): + d1_lower = {k1.lower(): v1 for k1, v1 in d1.items()} + new_map_lower[k.lower()] = d1_lower + + for k, d1 in new_map_lower.items(): + if k not in f2cmap_all: + f2cmap_all[k] = {} + + for k1, v1 in d1.items(): + if v1 in c2py_map: + if k1 in f2cmap_all[k]: + outmess( + "\tWarning: redefinition of {'%s':{'%s':'%s'->'%s'}}\n" + % (k, k1, f2cmap_all[k][k1], v1) + ) + f2cmap_all[k][k1] = v1 + if verbose: + outmess(f'\tMapping "{k}(kind={k1})" to "{v1}\"\n') + f2cmap_mapped.append(v1) + elif verbose: + errmess( + "\tIgnoring map {'%s':{'%s':'%s'}}: '%s' must be in %s\n" + % (k, k1, v1, v1, list(c2py_map.keys())) + ) + + return f2cmap_all, f2cmap_mapped diff --git a/local_packages/numpy/f2py/auxfuncs.pyi b/local_packages/numpy/f2py/auxfuncs.pyi new file mode 100644 index 0000000..32e381c --- /dev/null +++ b/local_packages/numpy/f2py/auxfuncs.pyi @@ -0,0 +1,262 @@ +from _typeshed import FileDescriptorOrPath +from collections.abc import Callable, Mapping +from pprint import pprint as show +from typing import Any, Final, Literal as L, Never, TypeAlias, TypeVar, overload + +from .cfuncs import errmess + +__all__ = [ + "applyrules", + "containscommon", + "containsderivedtypes", + "debugcapi", + "dictappend", + "errmess", + "gentitle", + "get_f2py_modulename", + "getargs2", + "getcallprotoargument", + "getcallstatement", + "getdimension", + "getfortranname", + "getpymethoddef", + "getrestdoc", + "getuseblocks", + "getusercode", + "getusercode1", + "hasbody", + "hascallstatement", + "hascommon", + "hasexternals", + "hasinitvalue", + "hasnote", + "hasresultnote", + "isallocatable", + "isarray", + "isarrayofstrings", + "isattr_value", + "ischaracter", + "ischaracter_or_characterarray", + "ischaracterarray", + "iscomplex", + "iscomplexarray", + "iscomplexfunction", + "iscomplexfunction_warn", + "iscstyledirective", + "isdouble", + "isdummyroutine", + "isexternal", + "isfunction", + "isfunction_wrap", + "isint1", + "isint1array", + "isinteger", + "isintent_aux", + "isintent_c", + "isintent_callback", + "isintent_copy", + "isintent_dict", + "isintent_hide", + "isintent_in", + "isintent_inout", + "isintent_inplace", + "isintent_nothide", + "isintent_out", + "isintent_overwrite", + "islogical", + "islogicalfunction", + "islong_complex", + "islong_double", + "islong_doublefunction", + "islong_long", + "islong_longfunction", + "ismodule", + "ismoduleroutine", + "isoptional", + "isprivate", + "isrequired", + "isroutine", + "isscalar", + "issigned_long_longarray", + "isstring", + "isstring_or_stringarray", + "isstringarray", + "isstringfunction", + "issubroutine", + "issubroutine_wrap", + "isthreadsafe", + "isunsigned", + "isunsigned_char", + "isunsigned_chararray", + "isunsigned_long_long", + "isunsigned_long_longarray", + "isunsigned_short", + "isunsigned_shortarray", + "isvariable", + "l_and", + "l_not", + "l_or", + "outmess", + "process_f2cmap_dict", + "replace", + "show", + "stripcomma", + "throw_error", +] + +### + +_VT = TypeVar("_VT") +_RT = TypeVar("_RT") + +_Var: TypeAlias = Mapping[str, list[str]] +_ROut: TypeAlias = Mapping[str, str] +_F2CMap: TypeAlias = Mapping[str, Mapping[str, str]] + +_Bool: TypeAlias = bool | L[0, 1] +_Intent: TypeAlias = L[ + "INTENT_IN", + "INTENT_OUT", + "INTENT_INOUT", + "INTENT_C", + "INTENT_CACHE", + "INTENT_HIDE", + "INTENT_INPLACE", + "INTENT_ALIGNED4", + "INTENT_ALIGNED8", + "INTENT_ALIGNED16", + "OPTIONAL", +] + +### + +isintent_dict: dict[Callable[[_Var], _Bool], _Intent] + +class F2PYError(Exception): ... + +class throw_error: + mess: Final[str] + def __init__(self, /, mess: str) -> None: ... + def __call__(self, /, var: _Var) -> Never: ... # raises F2PYError + +# +def l_and(*f: tuple[str, Callable[[_VT], _RT]]) -> Callable[[_VT], _RT]: ... +def l_or(*f: tuple[str, Callable[[_VT], _RT]]) -> Callable[[_VT], _RT]: ... +def l_not(f: tuple[str, Callable[[_VT], _RT]]) -> Callable[[_VT], _RT]: ... + +# +def outmess(t: str) -> None: ... +def debugcapi(var: _Var) -> bool: ... + +# +def hasinitvalue(var: _Var | str) -> bool: ... +def hasnote(var: _Var | str) -> bool: ... +def ischaracter(var: _Var) -> bool: ... +def ischaracterarray(var: _Var) -> bool: ... +def ischaracter_or_characterarray(var: _Var) -> bool: ... +def isstring(var: _Var) -> bool: ... +def isstringarray(var: _Var) -> bool: ... +def isstring_or_stringarray(var: _Var) -> bool: ... +def isarray(var: _Var) -> bool: ... +def isarrayofstrings(var: _Var) -> bool: ... +def isscalar(var: _Var) -> bool: ... +def iscomplex(var: _Var) -> bool: ... +def islogical(var: _Var) -> bool: ... +def isinteger(var: _Var) -> bool: ... +def isint1(var: _Var) -> bool: ... +def isint1array(var: _Var) -> bool: ... +def islong_long(var: _Var) -> _Bool: ... +def isunsigned(var: _Var) -> _Bool: ... +def isunsigned_char(var: _Var) -> _Bool: ... +def isunsigned_chararray(var: _Var) -> bool: ... +def isunsigned_short(var: _Var) -> _Bool: ... +def isunsigned_shortarray(var: _Var) -> bool: ... +def isunsigned_long_long(var: _Var) -> _Bool: ... +def isunsigned_long_longarray(var: _Var) -> bool: ... +def issigned_long_longarray(var: _Var) -> bool: ... +def isdouble(var: _Var) -> _Bool: ... +def islong_double(var: _Var) -> _Bool: ... +def islong_complex(var: _Var) -> _Bool: ... +def iscomplexarray(var: _Var) -> bool: ... +def isallocatable(var: _Var) -> bool: ... +def isattr_value(var: _Var) -> bool: ... +def isoptional(var: _Var) -> bool: ... +def isexternal(var: _Var) -> bool: ... +def isrequired(var: _Var) -> bool: ... +def isprivate(var: _Var) -> bool: ... +def isvariable(var: _Var) -> bool: ... +def isintent_in(var: _Var) -> _Bool: ... +def isintent_inout(var: _Var) -> bool: ... +def isintent_out(var: _Var) -> bool: ... +def isintent_hide(var: _Var) -> bool: ... +def isintent_nothide(var: _Var) -> bool: ... +def isintent_c(var: _Var) -> bool: ... +def isintent_cache(var: _Var) -> bool: ... +def isintent_copy(var: _Var) -> bool: ... +def isintent_overwrite(var: _Var) -> bool: ... +def isintent_callback(var: _Var) -> bool: ... +def isintent_inplace(var: _Var) -> bool: ... +def isintent_aux(var: _Var) -> bool: ... + +# +def containsderivedtypes(rout: _ROut) -> L[0, 1]: ... +def containscommon(rout: _ROut) -> _Bool: ... +def hasexternals(rout: _ROut) -> bool: ... +def hasresultnote(rout: _ROut) -> _Bool: ... +def hasbody(rout: _ROut) -> _Bool: ... +def hascommon(rout: _ROut) -> bool: ... +def hasderivedtypes(rout: _ROut) -> bool: ... +def hascallstatement(rout: _ROut) -> bool: ... +def isroutine(rout: _ROut) -> bool: ... +def ismodule(rout: _ROut) -> bool: ... +def ismoduleroutine(rout: _ROut) -> bool: ... +def issubroutine(rout: _ROut) -> bool: ... +def issubroutine_wrap(rout: _ROut) -> _Bool: ... +def isfunction(rout: _ROut) -> bool: ... +def isfunction_wrap(rout: _ROut) -> _Bool: ... +def islogicalfunction(rout: _ROut) -> _Bool: ... +def islong_longfunction(rout: _ROut) -> _Bool: ... +def islong_doublefunction(rout: _ROut) -> _Bool: ... +def iscomplexfunction(rout: _ROut) -> _Bool: ... +def iscomplexfunction_warn(rout: _ROut) -> _Bool: ... +def isstringfunction(rout: _ROut) -> _Bool: ... +def isthreadsafe(rout: _ROut) -> bool: ... +def isdummyroutine(rout: _ROut) -> _Bool: ... +def iscstyledirective(f2py_line: str) -> bool: ... + +# . +def getdimension(var: _Var) -> list[Any] | None: ... +def getfortranname(rout: _ROut) -> str: ... +def getmultilineblock(rout: _ROut, blockname: str, comment: _Bool = 1, counter: int = 0) -> str | None: ... +def getcallstatement(rout: _ROut) -> str | None: ... +def getcallprotoargument(rout: _ROut, cb_map: dict[str, str] = {}) -> str: ... +def getusercode(rout: _ROut) -> str | None: ... +def getusercode1(rout: _ROut) -> str | None: ... +def getpymethoddef(rout: _ROut) -> str | None: ... +def getargs(rout: _ROut) -> tuple[list[str], list[str]]: ... +def getargs2(rout: _ROut) -> tuple[list[str], list[str]]: ... +def getrestdoc(rout: _ROut) -> str | None: ... + +# +def gentitle(name: str) -> str: ... +def stripcomma(s: str) -> str: ... +@overload +def replace(str: str, d: list[str], defaultsep: str = "") -> list[str]: ... +@overload +def replace(str: list[str], d: str, defaultsep: str = "") -> list[str]: ... +@overload +def replace(str: str, d: str, defaultsep: str = "") -> str: ... + +# +def dictappend(rd: Mapping[str, object], ar: Mapping[str, object] | list[Mapping[str, object]]) -> dict[str, Any]: ... +def applyrules(rules: Mapping[str, object], d: Mapping[str, object], var: _Var = {}) -> dict[str, Any]: ... + +# +def get_f2py_modulename(source: FileDescriptorOrPath) -> str: ... +def getuseblocks(pymod: Mapping[str, Mapping[str, Mapping[str, str]]]) -> list[str]: ... +def process_f2cmap_dict( + f2cmap_all: _F2CMap, + new_map: _F2CMap, + c2py_map: _F2CMap, + verbose: bool = False, +) -> tuple[dict[str, dict[str, str]], list[str]]: ... diff --git a/local_packages/numpy/f2py/capi_maps.py b/local_packages/numpy/f2py/capi_maps.py new file mode 100644 index 0000000..290ac2f --- /dev/null +++ b/local_packages/numpy/f2py/capi_maps.py @@ -0,0 +1,811 @@ +""" +Copyright 1999 -- 2011 Pearu Peterson all rights reserved. +Copyright 2011 -- present NumPy Developers. +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +""" +from . import __version__ + +f2py_version = __version__.version + +import copy +import os +import re + +from . import cb_rules +from ._isocbind import iso_c2py_map, iso_c_binding_map, isoc_c2pycode_map + +# The environment provided by auxfuncs.py is needed for some calls to eval. +# As the needed functions cannot be determined by static inspection of the +# code, it is safest to use import * pending a major refactoring of f2py. +from .auxfuncs import * +from .crackfortran import markoutercomma + +__all__ = [ + 'getctype', 'getstrlength', 'getarrdims', 'getpydocsign', + 'getarrdocsign', 'getinit', 'sign2map', 'routsign2map', 'modsign2map', + 'cb_sign2map', 'cb_routsign2map', 'common_sign2map', 'process_f2cmap_dict' +] + + +depargs = [] +lcb_map = {} +lcb2_map = {} +# forced casting: mainly caused by the fact that Python or Numeric +# C/APIs do not support the corresponding C types. +c2py_map = {'double': 'float', + 'float': 'float', # forced casting + 'long_double': 'float', # forced casting + 'char': 'int', # forced casting + 'signed_char': 'int', # forced casting + 'unsigned_char': 'int', # forced casting + 'short': 'int', # forced casting + 'unsigned_short': 'int', # forced casting + 'int': 'int', # forced casting + 'long': 'int', + 'long_long': 'long', + 'unsigned': 'int', # forced casting + 'complex_float': 'complex', # forced casting + 'complex_double': 'complex', + 'complex_long_double': 'complex', # forced casting + 'string': 'string', + 'character': 'bytes', + } + +c2capi_map = {'double': 'NPY_DOUBLE', + 'float': 'NPY_FLOAT', + 'long_double': 'NPY_LONGDOUBLE', + 'char': 'NPY_BYTE', + 'unsigned_char': 'NPY_UBYTE', + 'signed_char': 'NPY_BYTE', + 'short': 'NPY_SHORT', + 'unsigned_short': 'NPY_USHORT', + 'int': 'NPY_INT', + 'unsigned': 'NPY_UINT', + 'long': 'NPY_LONG', + 'unsigned_long': 'NPY_ULONG', + 'long_long': 'NPY_LONGLONG', + 'unsigned_long_long': 'NPY_ULONGLONG', + 'complex_float': 'NPY_CFLOAT', + 'complex_double': 'NPY_CDOUBLE', + 'complex_long_double': 'NPY_CDOUBLE', + 'string': 'NPY_STRING', + 'character': 'NPY_STRING'} + +c2pycode_map = {'double': 'd', + 'float': 'f', + 'long_double': 'g', + 'char': 'b', + 'unsigned_char': 'B', + 'signed_char': 'b', + 'short': 'h', + 'unsigned_short': 'H', + 'int': 'i', + 'unsigned': 'I', + 'long': 'l', + 'unsigned_long': 'L', + 'long_long': 'q', + 'unsigned_long_long': 'Q', + 'complex_float': 'F', + 'complex_double': 'D', + 'complex_long_double': 'G', + 'string': 'S', + 'character': 'c'} + +# https://docs.python.org/3/c-api/arg.html#building-values +c2buildvalue_map = {'double': 'd', + 'float': 'f', + 'char': 'b', + 'signed_char': 'b', + 'short': 'h', + 'int': 'i', + 'long': 'l', + 'long_long': 'L', + 'complex_float': 'N', + 'complex_double': 'N', + 'complex_long_double': 'N', + 'string': 'y', + 'character': 'c'} + +f2cmap_all = {'real': {'': 'float', '4': 'float', '8': 'double', + '12': 'long_double', '16': 'long_double'}, + 'integer': {'': 'int', '1': 'signed_char', '2': 'short', + '4': 'int', '8': 'long_long', + '-1': 'unsigned_char', '-2': 'unsigned_short', + '-4': 'unsigned', '-8': 'unsigned_long_long'}, + 'complex': {'': 'complex_float', '8': 'complex_float', + '16': 'complex_double', '24': 'complex_long_double', + '32': 'complex_long_double'}, + 'complexkind': {'': 'complex_float', '4': 'complex_float', + '8': 'complex_double', '12': 'complex_long_double', + '16': 'complex_long_double'}, + 'logical': {'': 'int', '1': 'char', '2': 'short', '4': 'int', + '8': 'long_long'}, + 'double complex': {'': 'complex_double'}, + 'double precision': {'': 'double'}, + 'byte': {'': 'char'}, + } + +# Add ISO_C handling +c2pycode_map.update(isoc_c2pycode_map) +c2py_map.update(iso_c2py_map) +f2cmap_all, _ = process_f2cmap_dict(f2cmap_all, iso_c_binding_map, c2py_map) +# End ISO_C handling +f2cmap_default = copy.deepcopy(f2cmap_all) + +f2cmap_mapped = [] + +def load_f2cmap_file(f2cmap_file): + global f2cmap_all, f2cmap_mapped + + f2cmap_all = copy.deepcopy(f2cmap_default) + + if f2cmap_file is None: + # Default value + f2cmap_file = '.f2py_f2cmap' + if not os.path.isfile(f2cmap_file): + return + + # User defined additions to f2cmap_all. + # f2cmap_file must contain a dictionary of dictionaries, only. For + # example, {'real':{'low':'float'}} means that Fortran 'real(low)' is + # interpreted as C 'float'. This feature is useful for F90/95 users if + # they use PARAMETERS in type specifications. + try: + outmess(f'Reading f2cmap from {f2cmap_file!r} ...\n') + with open(f2cmap_file) as f: + d = eval(f.read().lower(), {}, {}) + f2cmap_all, f2cmap_mapped = process_f2cmap_dict(f2cmap_all, d, c2py_map, True) + outmess('Successfully applied user defined f2cmap changes\n') + except Exception as msg: + errmess(f'Failed to apply user defined f2cmap changes: {msg}. Skipping.\n') + + +cformat_map = {'double': '%g', + 'float': '%g', + 'long_double': '%Lg', + 'char': '%d', + 'signed_char': '%d', + 'unsigned_char': '%hhu', + 'short': '%hd', + 'unsigned_short': '%hu', + 'int': '%d', + 'unsigned': '%u', + 'long': '%ld', + 'unsigned_long': '%lu', + 'long_long': '%ld', + 'complex_float': '(%g,%g)', + 'complex_double': '(%g,%g)', + 'complex_long_double': '(%Lg,%Lg)', + 'string': '\\"%s\\"', + 'character': "'%c'", + } + +# Auxiliary functions + + +def getctype(var): + """ + Determines C type + """ + ctype = 'void' + if isfunction(var): + if 'result' in var: + a = var['result'] + else: + a = var['name'] + if a in var['vars']: + return getctype(var['vars'][a]) + else: + errmess(f'getctype: function {a} has no return value?!\n') + elif issubroutine(var): + return ctype + elif ischaracter_or_characterarray(var): + return 'character' + elif isstring_or_stringarray(var): + return 'string' + elif 'typespec' in var and var['typespec'].lower() in f2cmap_all: + typespec = var['typespec'].lower() + f2cmap = f2cmap_all[typespec] + ctype = f2cmap[''] # default type + if 'kindselector' in var: + if '*' in var['kindselector']: + try: + ctype = f2cmap[var['kindselector']['*']] + except KeyError: + errmess('getctype: "%s %s %s" not supported.\n' % + (var['typespec'], '*', var['kindselector']['*'])) + elif 'kind' in var['kindselector']: + if typespec + 'kind' in f2cmap_all: + f2cmap = f2cmap_all[typespec + 'kind'] + try: + ctype = f2cmap[var['kindselector']['kind']] + except KeyError: + if typespec in f2cmap_all: + f2cmap = f2cmap_all[typespec] + try: + ctype = f2cmap[str(var['kindselector']['kind'])] + except KeyError: + errmess('getctype: "%s(kind=%s)" is mapped to C "%s" (to override define dict(%s = dict(%s="")) in %s/.f2py_f2cmap file).\n' + % (typespec, var['kindselector']['kind'], ctype, + typespec, var['kindselector']['kind'], os.getcwd())) + elif not isexternal(var): + errmess(f'getctype: No C-type found in "{var}", assuming void.\n') + return ctype + + +def f2cexpr(expr): + """Rewrite Fortran expression as f2py supported C expression. + + Due to the lack of a proper expression parser in f2py, this + function uses a heuristic approach that assumes that Fortran + arithmetic expressions are valid C arithmetic expressions when + mapping Fortran function calls to the corresponding C function/CPP + macros calls. + + """ + # TODO: support Fortran `len` function with optional kind parameter + expr = re.sub(r'\blen\b', 'f2py_slen', expr) + return expr + + +def getstrlength(var): + if isstringfunction(var): + if 'result' in var: + a = var['result'] + else: + a = var['name'] + if a in var['vars']: + return getstrlength(var['vars'][a]) + else: + errmess(f'getstrlength: function {a} has no return value?!\n') + if not isstring(var): + errmess( + f'getstrlength: expected a signature of a string but got: {repr(var)}\n') + len = '1' + if 'charselector' in var: + a = var['charselector'] + if '*' in a: + len = a['*'] + elif 'len' in a: + len = f2cexpr(a['len']) + if re.match(r'\(\s*(\*|:)\s*\)', len) or re.match(r'(\*|:)', len): + if isintent_hide(var): + errmess('getstrlength:intent(hide): expected a string with defined length but got: %s\n' % ( + repr(var))) + len = '-1' + return len + + +def getarrdims(a, var, verbose=0): + ret = {} + if isstring(var) and not isarray(var): + ret['size'] = getstrlength(var) + ret['rank'] = '0' + ret['dims'] = '' + elif isscalar(var): + ret['size'] = '1' + ret['rank'] = '0' + ret['dims'] = '' + elif isarray(var): + dim = copy.copy(var['dimension']) + ret['size'] = '*'.join(dim) + try: + ret['size'] = repr(eval(ret['size'])) + except Exception: + pass + ret['dims'] = ','.join(dim) + ret['rank'] = repr(len(dim)) + ret['rank*[-1]'] = repr(len(dim) * [-1])[1:-1] + for i in range(len(dim)): # solve dim for dependencies + v = [] + if dim[i] in depargs: + v = [dim[i]] + else: + for va in depargs: + if re.match(r'.*?\b%s\b.*' % va, dim[i]): + v.append(va) + for va in v: + if depargs.index(va) > depargs.index(a): + dim[i] = '*' + break + ret['setdims'], i = '', -1 + for d in dim: + i = i + 1 + if d not in ['*', ':', '(*)', '(:)']: + ret['setdims'] = '%s#varname#_Dims[%d]=%s,' % ( + ret['setdims'], i, d) + if ret['setdims']: + ret['setdims'] = ret['setdims'][:-1] + ret['cbsetdims'], i = '', -1 + for d in var['dimension']: + i = i + 1 + if d not in ['*', ':', '(*)', '(:)']: + ret['cbsetdims'] = '%s#varname#_Dims[%d]=%s,' % ( + ret['cbsetdims'], i, d) + elif isintent_in(var): + outmess('getarrdims:warning: assumed shape array, using 0 instead of %r\n' + % (d)) + ret['cbsetdims'] = '%s#varname#_Dims[%d]=%s,' % ( + ret['cbsetdims'], i, 0) + elif verbose: + errmess( + f'getarrdims: If in call-back function: array argument {repr(a)} must have bounded dimensions: got {repr(d)}\n') + if ret['cbsetdims']: + ret['cbsetdims'] = ret['cbsetdims'][:-1] +# if not isintent_c(var): +# var['dimension'].reverse() + return ret + + +def getpydocsign(a, var): + global lcb_map + if isfunction(var): + if 'result' in var: + af = var['result'] + else: + af = var['name'] + if af in var['vars']: + return getpydocsign(af, var['vars'][af]) + else: + errmess(f'getctype: function {af} has no return value?!\n') + return '', '' + sig, sigout = a, a + opt = '' + if isintent_in(var): + opt = 'input' + elif isintent_inout(var): + opt = 'in/output' + out_a = a + if isintent_out(var): + for k in var['intent']: + if k[:4] == 'out=': + out_a = k[4:] + break + init = '' + ctype = getctype(var) + + if hasinitvalue(var): + init, showinit = getinit(a, var) + init = f', optional\\n Default: {showinit}' + if isscalar(var): + if isintent_inout(var): + sig = '%s : %s rank-0 array(%s,\'%s\')%s' % (a, opt, c2py_map[ctype], + c2pycode_map[ctype], init) + else: + sig = f'{a} : {opt} {c2py_map[ctype]}{init}' + sigout = f'{out_a} : {c2py_map[ctype]}' + elif isstring(var): + if isintent_inout(var): + sig = '%s : %s rank-0 array(string(len=%s),\'c\')%s' % ( + a, opt, getstrlength(var), init) + else: + sig = f'{a} : {opt} string(len={getstrlength(var)}){init}' + sigout = f'{out_a} : string(len={getstrlength(var)})' + elif isarray(var): + dim = var['dimension'] + rank = repr(len(dim)) + sig = '%s : %s rank-%s array(\'%s\') with bounds (%s)%s' % (a, opt, rank, + c2pycode_map[ + ctype], + ','.join(dim), init) + if a == out_a: + sigout = '%s : rank-%s array(\'%s\') with bounds (%s)'\ + % (a, rank, c2pycode_map[ctype], ','.join(dim)) + else: + sigout = '%s : rank-%s array(\'%s\') with bounds (%s) and %s storage'\ + % (out_a, rank, c2pycode_map[ctype], ','.join(dim), a) + elif isexternal(var): + ua = '' + if a in lcb_map and lcb_map[a] in lcb2_map and 'argname' in lcb2_map[lcb_map[a]]: + ua = lcb2_map[lcb_map[a]]['argname'] + if not ua == a: + ua = f' => {ua}' + else: + ua = '' + sig = f'{a} : call-back function{ua}' + sigout = sig + else: + errmess( + f'getpydocsign: Could not resolve docsignature for "{a}".\n') + return sig, sigout + + +def getarrdocsign(a, var): + ctype = getctype(var) + if isstring(var) and (not isarray(var)): + sig = f'{a} : rank-0 array(string(len={getstrlength(var)}),\'c\')' + elif isscalar(var): + sig = f'{a} : rank-0 array({c2py_map[ctype]},\'{c2pycode_map[ctype]}\')' + elif isarray(var): + dim = var['dimension'] + rank = repr(len(dim)) + sig = '%s : rank-%s array(\'%s\') with bounds (%s)' % (a, rank, + c2pycode_map[ + ctype], + ','.join(dim)) + return sig + + +def getinit(a, var): + if isstring(var): + init, showinit = '""', "''" + else: + init, showinit = '', '' + if hasinitvalue(var): + init = var['='] + showinit = init + if iscomplex(var) or iscomplexarray(var): + ret = {} + + try: + v = var["="] + if ',' in v: + ret['init.r'], ret['init.i'] = markoutercomma( + v[1:-1]).split('@,@') + else: + v = eval(v, {}, {}) + ret['init.r'], ret['init.i'] = str(v.real), str(v.imag) + except Exception: + raise ValueError( + f'getinit: expected complex number `(r,i)\' but got `{init}\' as initial value of {a!r}.') + if isarray(var): + init = f"(capi_c.r={ret['init.r']},capi_c.i={ret['init.i']},capi_c)" + elif isstring(var): + if not init: + init, showinit = '""', "''" + if init[0] == "'": + init = '"%s"' % (init[1:-1].replace('"', '\\"')) + if init[0] == '"': + showinit = f"'{init[1:-1]}'" + return init, showinit + + +def get_elsize(var): + if isstring(var) or isstringarray(var): + elsize = getstrlength(var) + # override with user-specified length when available: + elsize = var['charselector'].get('f2py_len', elsize) + return elsize + if ischaracter(var) or ischaracterarray(var): + return '1' + # for numerical types, PyArray_New* functions ignore specified + # elsize, so we just return 1 and let elsize be determined at + # runtime, see fortranobject.c + return '1' + + +def sign2map(a, var): + """ + varname,ctype,atype + init,init.r,init.i,pytype + vardebuginfo,vardebugshowvalue,varshowvalue + varrformat + + intent + """ + out_a = a + if isintent_out(var): + for k in var['intent']: + if k[:4] == 'out=': + out_a = k[4:] + break + ret = {'varname': a, 'outvarname': out_a, 'ctype': getctype(var)} + intent_flags = [] + for f, s in isintent_dict.items(): + if f(var): + intent_flags.append(f'F2PY_{s}') + if intent_flags: + # TODO: Evaluate intent_flags here. + ret['intent'] = '|'.join(intent_flags) + else: + ret['intent'] = 'F2PY_INTENT_IN' + if isarray(var): + ret['varrformat'] = 'N' + elif ret['ctype'] in c2buildvalue_map: + ret['varrformat'] = c2buildvalue_map[ret['ctype']] + else: + ret['varrformat'] = 'O' + ret['init'], ret['showinit'] = getinit(a, var) + if hasinitvalue(var) and iscomplex(var) and not isarray(var): + ret['init.r'], ret['init.i'] = markoutercomma( + ret['init'][1:-1]).split('@,@') + if isexternal(var): + ret['cbnamekey'] = a + if a in lcb_map: + ret['cbname'] = lcb_map[a] + ret['maxnofargs'] = lcb2_map[lcb_map[a]]['maxnofargs'] + ret['nofoptargs'] = lcb2_map[lcb_map[a]]['nofoptargs'] + ret['cbdocstr'] = lcb2_map[lcb_map[a]]['docstr'] + ret['cblatexdocstr'] = lcb2_map[lcb_map[a]]['latexdocstr'] + else: + ret['cbname'] = a + errmess('sign2map: Confused: external %s is not in lcb_map%s.\n' % ( + a, list(lcb_map.keys()))) + if isstring(var): + ret['length'] = getstrlength(var) + if isarray(var): + ret = dictappend(ret, getarrdims(a, var)) + dim = copy.copy(var['dimension']) + if ret['ctype'] in c2capi_map: + ret['atype'] = c2capi_map[ret['ctype']] + ret['elsize'] = get_elsize(var) + # Debug info + if debugcapi(var): + il = [isintent_in, 'input', isintent_out, 'output', + isintent_inout, 'inoutput', isrequired, 'required', + isoptional, 'optional', isintent_hide, 'hidden', + iscomplex, 'complex scalar', + l_and(isscalar, l_not(iscomplex)), 'scalar', + isstring, 'string', isarray, 'array', + iscomplexarray, 'complex array', isstringarray, 'string array', + iscomplexfunction, 'complex function', + l_and(isfunction, l_not(iscomplexfunction)), 'function', + isexternal, 'callback', + isintent_callback, 'callback', + isintent_aux, 'auxiliary', + ] + rl = [] + for i in range(0, len(il), 2): + if il[i](var): + rl.append(il[i + 1]) + if isstring(var): + rl.append(f"slen({a})={ret['length']}") + if isarray(var): + ddim = ','.join( + map(lambda x, y: f'{x}|{y}', var['dimension'], dim)) + rl.append(f'dims({ddim})') + if isexternal(var): + ret['vardebuginfo'] = f"debug-capi:{a}=>{ret['cbname']}:{','.join(rl)}" + else: + ret['vardebuginfo'] = 'debug-capi:%s %s=%s:%s' % ( + ret['ctype'], a, ret['showinit'], ','.join(rl)) + if isscalar(var): + if ret['ctype'] in cformat_map: + ret['vardebugshowvalue'] = f"debug-capi:{a}={cformat_map[ret['ctype']]}" + if isstring(var): + ret['vardebugshowvalue'] = 'debug-capi:slen(%s)=%%d %s=\\"%%s\\"' % ( + a, a) + if isexternal(var): + ret['vardebugshowvalue'] = f'debug-capi:{a}=%p' + if ret['ctype'] in cformat_map: + ret['varshowvalue'] = f"#name#:{a}={cformat_map[ret['ctype']]}" + ret['showvalueformat'] = f"{cformat_map[ret['ctype']]}" + if isstring(var): + ret['varshowvalue'] = '#name#:slen(%s)=%%d %s=\\"%%s\\"' % (a, a) + ret['pydocsign'], ret['pydocsignout'] = getpydocsign(a, var) + if hasnote(var): + ret['note'] = var['note'] + return ret + + +def routsign2map(rout): + """ + name,NAME,begintitle,endtitle + rname,ctype,rformat + routdebugshowvalue + """ + global lcb_map + name = rout['name'] + fname = getfortranname(rout) + ret = {'name': name, + 'texname': name.replace('_', '\\_'), + 'name_lower': name.lower(), + 'NAME': name.upper(), + 'begintitle': gentitle(name), + 'endtitle': gentitle(f'end of {name}'), + 'fortranname': fname, + 'FORTRANNAME': fname.upper(), + 'callstatement': getcallstatement(rout) or '', + 'usercode': getusercode(rout) or '', + 'usercode1': getusercode1(rout) or '', + } + if '_' in fname: + ret['F_FUNC'] = 'F_FUNC_US' + else: + ret['F_FUNC'] = 'F_FUNC' + if '_' in name: + ret['F_WRAPPEDFUNC'] = 'F_WRAPPEDFUNC_US' + else: + ret['F_WRAPPEDFUNC'] = 'F_WRAPPEDFUNC' + lcb_map = {} + if 'use' in rout: + for u in rout['use'].keys(): + if u in cb_rules.cb_map: + for un in cb_rules.cb_map[u]: + ln = un[0] + if 'map' in rout['use'][u]: + for k in rout['use'][u]['map'].keys(): + if rout['use'][u]['map'][k] == un[0]: + ln = k + break + lcb_map[ln] = un[1] + elif rout.get('externals'): + errmess('routsign2map: Confused: function %s has externals %s but no "use" statement.\n' % ( + ret['name'], repr(rout['externals']))) + ret['callprotoargument'] = getcallprotoargument(rout, lcb_map) or '' + if isfunction(rout): + if 'result' in rout: + a = rout['result'] + else: + a = rout['name'] + ret['rname'] = a + ret['pydocsign'], ret['pydocsignout'] = getpydocsign(a, rout) + ret['ctype'] = getctype(rout['vars'][a]) + if hasresultnote(rout): + ret['resultnote'] = rout['vars'][a]['note'] + rout['vars'][a]['note'] = ['See elsewhere.'] + if ret['ctype'] in c2buildvalue_map: + ret['rformat'] = c2buildvalue_map[ret['ctype']] + else: + ret['rformat'] = 'O' + errmess('routsign2map: no c2buildvalue key for type %s\n' % + (repr(ret['ctype']))) + if debugcapi(rout): + if ret['ctype'] in cformat_map: + ret['routdebugshowvalue'] = 'debug-capi:%s=%s' % ( + a, cformat_map[ret['ctype']]) + if isstringfunction(rout): + ret['routdebugshowvalue'] = 'debug-capi:slen(%s)=%%d %s=\\"%%s\\"' % ( + a, a) + if isstringfunction(rout): + ret['rlength'] = getstrlength(rout['vars'][a]) + if ret['rlength'] == '-1': + errmess('routsign2map: expected explicit specification of the length of the string returned by the fortran function %s; taking 10.\n' % ( + repr(rout['name']))) + ret['rlength'] = '10' + if hasnote(rout): + ret['note'] = rout['note'] + rout['note'] = ['See elsewhere.'] + return ret + + +def modsign2map(m): + """ + modulename + """ + if ismodule(m): + ret = {'f90modulename': m['name'], + 'F90MODULENAME': m['name'].upper(), + 'texf90modulename': m['name'].replace('_', '\\_')} + else: + ret = {'modulename': m['name'], + 'MODULENAME': m['name'].upper(), + 'texmodulename': m['name'].replace('_', '\\_')} + ret['restdoc'] = getrestdoc(m) or [] + if hasnote(m): + ret['note'] = m['note'] + ret['usercode'] = getusercode(m) or '' + ret['usercode1'] = getusercode1(m) or '' + if m['body']: + ret['interface_usercode'] = getusercode(m['body'][0]) or '' + else: + ret['interface_usercode'] = '' + ret['pymethoddef'] = getpymethoddef(m) or '' + if 'gil_used' in m: + ret['gil_used'] = m['gil_used'] + if 'coutput' in m: + ret['coutput'] = m['coutput'] + if 'f2py_wrapper_output' in m: + ret['f2py_wrapper_output'] = m['f2py_wrapper_output'] + return ret + + +def cb_sign2map(a, var, index=None): + ret = {'varname': a} + ret['varname_i'] = ret['varname'] + ret['ctype'] = getctype(var) + if ret['ctype'] in c2capi_map: + ret['atype'] = c2capi_map[ret['ctype']] + ret['elsize'] = get_elsize(var) + if ret['ctype'] in cformat_map: + ret['showvalueformat'] = f"{cformat_map[ret['ctype']]}" + if isarray(var): + ret = dictappend(ret, getarrdims(a, var)) + ret['pydocsign'], ret['pydocsignout'] = getpydocsign(a, var) + if hasnote(var): + ret['note'] = var['note'] + var['note'] = ['See elsewhere.'] + return ret + + +def cb_routsign2map(rout, um): + """ + name,begintitle,endtitle,argname + ctype,rctype,maxnofargs,nofoptargs,returncptr + """ + ret = {'name': f"cb_{rout['name']}_in_{um}", + 'returncptr': ''} + if isintent_callback(rout): + if '_' in rout['name']: + F_FUNC = 'F_FUNC_US' + else: + F_FUNC = 'F_FUNC' + ret['callbackname'] = f"{F_FUNC}({rout['name'].lower()},{rout['name'].upper()})" + ret['static'] = 'extern' + else: + ret['callbackname'] = ret['name'] + ret['static'] = 'static' + ret['argname'] = rout['name'] + ret['begintitle'] = gentitle(ret['name']) + ret['endtitle'] = gentitle(f"end of {ret['name']}") + ret['ctype'] = getctype(rout) + ret['rctype'] = 'void' + if ret['ctype'] == 'string': + ret['rctype'] = 'void' + else: + ret['rctype'] = ret['ctype'] + if ret['rctype'] != 'void': + if iscomplexfunction(rout): + ret['returncptr'] = """ +#ifdef F2PY_CB_RETURNCOMPLEX +return_value= +#endif +""" + else: + ret['returncptr'] = 'return_value=' + if ret['ctype'] in cformat_map: + ret['showvalueformat'] = f"{cformat_map[ret['ctype']]}" + if isstringfunction(rout): + ret['strlength'] = getstrlength(rout) + if isfunction(rout): + if 'result' in rout: + a = rout['result'] + else: + a = rout['name'] + if hasnote(rout['vars'][a]): + ret['note'] = rout['vars'][a]['note'] + rout['vars'][a]['note'] = ['See elsewhere.'] + ret['rname'] = a + ret['pydocsign'], ret['pydocsignout'] = getpydocsign(a, rout) + if iscomplexfunction(rout): + ret['rctype'] = """ +#ifdef F2PY_CB_RETURNCOMPLEX +#ctype# +#else +void +#endif +""" + elif hasnote(rout): + ret['note'] = rout['note'] + rout['note'] = ['See elsewhere.'] + nofargs = 0 + nofoptargs = 0 + if 'args' in rout and 'vars' in rout: + for a in rout['args']: + var = rout['vars'][a] + if l_or(isintent_in, isintent_inout)(var): + nofargs = nofargs + 1 + if isoptional(var): + nofoptargs = nofoptargs + 1 + ret['maxnofargs'] = repr(nofargs) + ret['nofoptargs'] = repr(nofoptargs) + if hasnote(rout) and isfunction(rout) and 'result' in rout: + ret['routnote'] = rout['note'] + rout['note'] = ['See elsewhere.'] + return ret + + +def common_sign2map(a, var): # obsolete + ret = {'varname': a, 'ctype': getctype(var)} + if isstringarray(var): + ret['ctype'] = 'char' + if ret['ctype'] in c2capi_map: + ret['atype'] = c2capi_map[ret['ctype']] + ret['elsize'] = get_elsize(var) + if ret['ctype'] in cformat_map: + ret['showvalueformat'] = f"{cformat_map[ret['ctype']]}" + if isarray(var): + ret = dictappend(ret, getarrdims(a, var)) + elif isstring(var): + ret['size'] = getstrlength(var) + ret['rank'] = '1' + ret['pydocsign'], ret['pydocsignout'] = getpydocsign(a, var) + if hasnote(var): + ret['note'] = var['note'] + var['note'] = ['See elsewhere.'] + # for strings this returns 0-rank but actually is 1-rank + ret['arrdocstr'] = getarrdocsign(a, var) + return ret diff --git a/local_packages/numpy/f2py/capi_maps.pyi b/local_packages/numpy/f2py/capi_maps.pyi new file mode 100644 index 0000000..9266003 --- /dev/null +++ b/local_packages/numpy/f2py/capi_maps.pyi @@ -0,0 +1,33 @@ +from .auxfuncs import _ROut, _Var, process_f2cmap_dict + +__all__ = [ + "cb_routsign2map", + "cb_sign2map", + "common_sign2map", + "getarrdims", + "getarrdocsign", + "getctype", + "getinit", + "getpydocsign", + "getstrlength", + "modsign2map", + "process_f2cmap_dict", + "routsign2map", + "sign2map", +] + +### + +def getctype(var: _Var) -> str: ... +def f2cexpr(expr: str) -> str: ... +def getstrlength(var: _Var) -> str: ... +def getarrdims(a: str, var: _Var, verbose: int = 0) -> dict[str, str]: ... +def getpydocsign(a: str, var: _Var) -> tuple[str, str]: ... +def getarrdocsign(a: str, var: _Var) -> str: ... +def getinit(a: str, var: _Var) -> tuple[str, str]: ... +def sign2map(a: str, var: _Var) -> dict[str, str]: ... +def routsign2map(rout: _ROut) -> dict[str, str]: ... +def modsign2map(m: _ROut) -> dict[str, str]: ... +def cb_sign2map(a: str, var: _Var, index: object | None = None) -> dict[str, str]: ... +def cb_routsign2map(rout: _ROut, um: str) -> dict[str, str]: ... +def common_sign2map(a: str, var: _Var) -> dict[str, str]: ... # obsolete diff --git a/local_packages/numpy/f2py/cb_rules.py b/local_packages/numpy/f2py/cb_rules.py new file mode 100644 index 0000000..238d473 --- /dev/null +++ b/local_packages/numpy/f2py/cb_rules.py @@ -0,0 +1,665 @@ +""" +Build call-back mechanism for f2py2e. + +Copyright 1999 -- 2011 Pearu Peterson all rights reserved. +Copyright 2011 -- present NumPy Developers. +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +""" +from . import __version__, cfuncs +from .auxfuncs import ( + applyrules, + debugcapi, + dictappend, + errmess, + getargs, + hasnote, + isarray, + iscomplex, + iscomplexarray, + iscomplexfunction, + isfunction, + isintent_c, + isintent_hide, + isintent_in, + isintent_inout, + isintent_nothide, + isintent_out, + isoptional, + isrequired, + isscalar, + isstring, + isstringfunction, + issubroutine, + l_and, + l_not, + l_or, + outmess, + replace, + stripcomma, + throw_error, +) + +f2py_version = __version__.version + + +################## Rules for callback function ############## + +cb_routine_rules = { + 'cbtypedefs': 'typedef #rctype#(*#name#_typedef)(#optargs_td##args_td##strarglens_td##noargs#);', + 'body': """ +#begintitle# +typedef struct { + PyObject *capi; + PyTupleObject *args_capi; + int nofargs; + jmp_buf jmpbuf; +} #name#_t; + +#if defined(F2PY_THREAD_LOCAL_DECL) && !defined(F2PY_USE_PYTHON_TLS) + +static F2PY_THREAD_LOCAL_DECL #name#_t *_active_#name# = NULL; + +static #name#_t *swap_active_#name#(#name#_t *ptr) { + #name#_t *prev = _active_#name#; + _active_#name# = ptr; + return prev; +} + +static #name#_t *get_active_#name#(void) { + return _active_#name#; +} + +#else + +static #name#_t *swap_active_#name#(#name#_t *ptr) { + char *key = "__f2py_cb_#name#"; + return (#name#_t *)F2PySwapThreadLocalCallbackPtr(key, ptr); +} + +static #name#_t *get_active_#name#(void) { + char *key = "__f2py_cb_#name#"; + return (#name#_t *)F2PyGetThreadLocalCallbackPtr(key); +} + +#endif + +/*typedef #rctype#(*#name#_typedef)(#optargs_td##args_td##strarglens_td##noargs#);*/ +#static# #rctype# #callbackname# (#optargs##args##strarglens##noargs#) { + #name#_t cb_local = { NULL, NULL, 0 }; + #name#_t *cb = NULL; + PyTupleObject *capi_arglist = NULL; + PyObject *capi_return = NULL; + PyObject *capi_tmp = NULL; + PyObject *capi_arglist_list = NULL; + int capi_j,capi_i = 0; + int capi_longjmp_ok = 1; +#decl# +#ifdef F2PY_REPORT_ATEXIT +f2py_cb_start_clock(); +#endif + cb = get_active_#name#(); + if (cb == NULL) { + capi_longjmp_ok = 0; + cb = &cb_local; + } + capi_arglist = cb->args_capi; + CFUNCSMESS(\"cb:Call-back function #name# (maxnofargs=#maxnofargs#(-#nofoptargs#))\\n\"); + CFUNCSMESSPY(\"cb:#name#_capi=\",cb->capi); + if (cb->capi==NULL) { + capi_longjmp_ok = 0; + cb->capi = PyObject_GetAttrString(#modulename#_module,\"#argname#\"); + CFUNCSMESSPY(\"cb:#name#_capi=\",cb->capi); + } + if (cb->capi==NULL) { + PyErr_SetString(#modulename#_error,\"cb: Callback #argname# not defined (as an argument or module #modulename# attribute).\\n\"); + goto capi_fail; + } + if (F2PyCapsule_Check(cb->capi)) { + #name#_typedef #name#_cptr; + #name#_cptr = F2PyCapsule_AsVoidPtr(cb->capi); + #returncptr#(*#name#_cptr)(#optargs_nm##args_nm##strarglens_nm#); + #return# + } + if (capi_arglist==NULL) { + capi_longjmp_ok = 0; + capi_tmp = PyObject_GetAttrString(#modulename#_module,\"#argname#_extra_args\"); + if (capi_tmp) { + capi_arglist = (PyTupleObject *)PySequence_Tuple(capi_tmp); + Py_DECREF(capi_tmp); + if (capi_arglist==NULL) { + PyErr_SetString(#modulename#_error,\"Failed to convert #modulename#.#argname#_extra_args to tuple.\\n\"); + goto capi_fail; + } + } else { + PyErr_Clear(); + capi_arglist = (PyTupleObject *)Py_BuildValue(\"()\"); + } + } + if (capi_arglist == NULL) { + PyErr_SetString(#modulename#_error,\"Callback #argname# argument list is not set.\\n\"); + goto capi_fail; + } +#setdims# +#ifdef PYPY_VERSION +#define CAPI_ARGLIST_SETITEM(idx, value) PyList_SetItem((PyObject *)capi_arglist_list, idx, value) + capi_arglist_list = PySequence_List((PyObject *)capi_arglist); + if (capi_arglist_list == NULL) goto capi_fail; +#else +#define CAPI_ARGLIST_SETITEM(idx, value) PyTuple_SetItem((PyObject *)capi_arglist, idx, value) +#endif +#pyobjfrom# +#undef CAPI_ARGLIST_SETITEM +#ifdef PYPY_VERSION + CFUNCSMESSPY(\"cb:capi_arglist=\",capi_arglist_list); +#else + CFUNCSMESSPY(\"cb:capi_arglist=\",capi_arglist); +#endif + CFUNCSMESS(\"cb:Call-back calling Python function #argname#.\\n\"); +#ifdef F2PY_REPORT_ATEXIT +f2py_cb_start_call_clock(); +#endif +#ifdef PYPY_VERSION + capi_return = PyObject_CallObject(cb->capi,(PyObject *)capi_arglist_list); + Py_DECREF(capi_arglist_list); + capi_arglist_list = NULL; +#else + capi_return = PyObject_CallObject(cb->capi,(PyObject *)capi_arglist); +#endif +#ifdef F2PY_REPORT_ATEXIT +f2py_cb_stop_call_clock(); +#endif + CFUNCSMESSPY(\"cb:capi_return=\",capi_return); + if (capi_return == NULL) { + fprintf(stderr,\"capi_return is NULL\\n\"); + goto capi_fail; + } + if (capi_return == Py_None) { + Py_DECREF(capi_return); + capi_return = Py_BuildValue(\"()\"); + } + else if (!PyTuple_Check(capi_return)) { + capi_return = Py_BuildValue(\"(N)\",capi_return); + } + capi_j = PyTuple_Size(capi_return); + capi_i = 0; +#frompyobj# + CFUNCSMESS(\"cb:#name#:successful\\n\"); + Py_DECREF(capi_return); +#ifdef F2PY_REPORT_ATEXIT +f2py_cb_stop_clock(); +#endif + goto capi_return_pt; +capi_fail: + fprintf(stderr,\"Call-back #name# failed.\\n\"); + Py_XDECREF(capi_return); + Py_XDECREF(capi_arglist_list); + if (capi_longjmp_ok) { + longjmp(cb->jmpbuf,-1); + } +capi_return_pt: + ; +#return# +} +#endtitle# +""", + 'need': ['setjmp.h', 'CFUNCSMESS', 'F2PY_THREAD_LOCAL_DECL'], + 'maxnofargs': '#maxnofargs#', + 'nofoptargs': '#nofoptargs#', + 'docstr': """\ + def #argname#(#docsignature#): return #docreturn#\\n\\ +#docstrsigns#""", + 'latexdocstr': """ +{{}\\verb@def #argname#(#latexdocsignature#): return #docreturn#@{}} +#routnote# + +#latexdocstrsigns#""", + 'docstrshort': 'def #argname#(#docsignature#): return #docreturn#' +} +cb_rout_rules = [ + { # Init + 'separatorsfor': {'decl': '\n', + 'args': ',', 'optargs': '', 'pyobjfrom': '\n', 'freemem': '\n', + 'args_td': ',', 'optargs_td': '', + 'args_nm': ',', 'optargs_nm': '', + 'frompyobj': '\n', 'setdims': '\n', + 'docstrsigns': '\\n"\n"', + 'latexdocstrsigns': '\n', + 'latexdocstrreq': '\n', 'latexdocstropt': '\n', + 'latexdocstrout': '\n', 'latexdocstrcbs': '\n', + }, + 'decl': '/*decl*/', 'pyobjfrom': '/*pyobjfrom*/', 'frompyobj': '/*frompyobj*/', + 'args': [], 'optargs': '', 'return': '', 'strarglens': '', 'freemem': '/*freemem*/', + 'args_td': [], 'optargs_td': '', 'strarglens_td': '', + 'args_nm': [], 'optargs_nm': '', 'strarglens_nm': '', + 'noargs': '', + 'setdims': '/*setdims*/', + 'docstrsigns': '', 'latexdocstrsigns': '', + 'docstrreq': ' Required arguments:', + 'docstropt': ' Optional arguments:', + 'docstrout': ' Return objects:', + 'docstrcbs': ' Call-back functions:', + 'docreturn': '', 'docsign': '', 'docsignopt': '', + 'latexdocstrreq': '\\noindent Required arguments:', + 'latexdocstropt': '\\noindent Optional arguments:', + 'latexdocstrout': '\\noindent Return objects:', + 'latexdocstrcbs': '\\noindent Call-back functions:', + 'routnote': {hasnote: '--- #note#', l_not(hasnote): ''}, + }, { # Function + 'decl': ' #ctype# return_value = 0;', + 'frompyobj': [ + {debugcapi: ' CFUNCSMESS("cb:Getting return_value->");'}, + '''\ + if (capi_j>capi_i) { + GETSCALARFROMPYTUPLE(capi_return,capi_i++,&return_value,#ctype#, + "#ctype#_from_pyobj failed in converting return_value of" + " call-back function #name# to C #ctype#\\n"); + } else { + fprintf(stderr,"Warning: call-back function #name# did not provide" + " return value (index=%d, type=#ctype#)\\n",capi_i); + }''', + {debugcapi: + ' fprintf(stderr,"#showvalueformat#.\\n",return_value);'} + ], + 'need': ['#ctype#_from_pyobj', {debugcapi: 'CFUNCSMESS'}, 'GETSCALARFROMPYTUPLE'], + 'return': ' return return_value;', + '_check': l_and(isfunction, l_not(isstringfunction), l_not(iscomplexfunction)) + }, + { # String function + 'pyobjfrom': {debugcapi: ' fprintf(stderr,"debug-capi:cb:#name#:%d:\\n",return_value_len);'}, + 'args': '#ctype# return_value,int return_value_len', + 'args_nm': 'return_value,&return_value_len', + 'args_td': '#ctype# ,int', + 'frompyobj': [ + {debugcapi: ' CFUNCSMESS("cb:Getting return_value->\\"");'}, + """\ + if (capi_j>capi_i) { + GETSTRFROMPYTUPLE(capi_return,capi_i++,return_value,return_value_len); + } else { + fprintf(stderr,"Warning: call-back function #name# did not provide" + " return value (index=%d, type=#ctype#)\\n",capi_i); + }""", + {debugcapi: + ' fprintf(stderr,"#showvalueformat#\\".\\n",return_value);'} + ], + 'need': ['#ctype#_from_pyobj', {debugcapi: 'CFUNCSMESS'}, + 'string.h', 'GETSTRFROMPYTUPLE'], + 'return': 'return;', + '_check': isstringfunction + }, + { # Complex function + 'optargs': """ +#ifndef F2PY_CB_RETURNCOMPLEX +#ctype# *return_value +#endif +""", + 'optargs_nm': """ +#ifndef F2PY_CB_RETURNCOMPLEX +return_value +#endif +""", + 'optargs_td': """ +#ifndef F2PY_CB_RETURNCOMPLEX +#ctype# * +#endif +""", + 'decl': """ +#ifdef F2PY_CB_RETURNCOMPLEX + #ctype# return_value = {0, 0}; +#endif +""", + 'frompyobj': [ + {debugcapi: ' CFUNCSMESS("cb:Getting return_value->");'}, + """\ + if (capi_j>capi_i) { +#ifdef F2PY_CB_RETURNCOMPLEX + GETSCALARFROMPYTUPLE(capi_return,capi_i++,&return_value,#ctype#, + \"#ctype#_from_pyobj failed in converting return_value of call-back\" + \" function #name# to C #ctype#\\n\"); +#else + GETSCALARFROMPYTUPLE(capi_return,capi_i++,return_value,#ctype#, + \"#ctype#_from_pyobj failed in converting return_value of call-back\" + \" function #name# to C #ctype#\\n\"); +#endif + } else { + fprintf(stderr, + \"Warning: call-back function #name# did not provide\" + \" return value (index=%d, type=#ctype#)\\n\",capi_i); + }""", + {debugcapi: """\ +#ifdef F2PY_CB_RETURNCOMPLEX + fprintf(stderr,\"#showvalueformat#.\\n\",(return_value).r,(return_value).i); +#else + fprintf(stderr,\"#showvalueformat#.\\n\",(*return_value).r,(*return_value).i); +#endif +"""} + ], + 'return': """ +#ifdef F2PY_CB_RETURNCOMPLEX + return return_value; +#else + return; +#endif +""", + 'need': ['#ctype#_from_pyobj', {debugcapi: 'CFUNCSMESS'}, + 'string.h', 'GETSCALARFROMPYTUPLE', '#ctype#'], + '_check': iscomplexfunction + }, + {'docstrout': ' #pydocsignout#', + 'latexdocstrout': ['\\item[]{{}\\verb@#pydocsignout#@{}}', + {hasnote: '--- #note#'}], + 'docreturn': '#rname#,', + '_check': isfunction}, + {'_check': issubroutine, 'return': 'return;'} +] + +cb_arg_rules = [ + { # Doc + 'docstropt': {l_and(isoptional, isintent_nothide): ' #pydocsign#'}, + 'docstrreq': {l_and(isrequired, isintent_nothide): ' #pydocsign#'}, + 'docstrout': {isintent_out: ' #pydocsignout#'}, + 'latexdocstropt': {l_and(isoptional, isintent_nothide): ['\\item[]{{}\\verb@#pydocsign#@{}}', + {hasnote: '--- #note#'}]}, + 'latexdocstrreq': {l_and(isrequired, isintent_nothide): ['\\item[]{{}\\verb@#pydocsign#@{}}', + {hasnote: '--- #note#'}]}, + 'latexdocstrout': {isintent_out: ['\\item[]{{}\\verb@#pydocsignout#@{}}', + {l_and(hasnote, isintent_hide): '--- #note#', + l_and(hasnote, isintent_nothide): '--- See above.'}]}, + 'docsign': {l_and(isrequired, isintent_nothide): '#varname#,'}, + 'docsignopt': {l_and(isoptional, isintent_nothide): '#varname#,'}, + 'depend': '' + }, + { + 'args': { + l_and(isscalar, isintent_c): '#ctype# #varname_i#', + l_and(isscalar, l_not(isintent_c)): '#ctype# *#varname_i#_cb_capi', + isarray: '#ctype# *#varname_i#', + isstring: '#ctype# #varname_i#' + }, + 'args_nm': { + l_and(isscalar, isintent_c): '#varname_i#', + l_and(isscalar, l_not(isintent_c)): '#varname_i#_cb_capi', + isarray: '#varname_i#', + isstring: '#varname_i#' + }, + 'args_td': { + l_and(isscalar, isintent_c): '#ctype#', + l_and(isscalar, l_not(isintent_c)): '#ctype# *', + isarray: '#ctype# *', + isstring: '#ctype#' + }, + 'need': {l_or(isscalar, isarray, isstring): '#ctype#'}, + # untested with multiple args + 'strarglens': {isstring: ',int #varname_i#_cb_len'}, + 'strarglens_td': {isstring: ',int'}, # untested with multiple args + # untested with multiple args + 'strarglens_nm': {isstring: ',#varname_i#_cb_len'}, + }, + { # Scalars + 'decl': {l_not(isintent_c): ' #ctype# #varname_i#=(*#varname_i#_cb_capi);'}, + 'error': {l_and(isintent_c, isintent_out, + throw_error('intent(c,out) is forbidden for callback scalar arguments')): + ''}, + 'frompyobj': [{debugcapi: ' CFUNCSMESS("cb:Getting #varname#->");'}, + {isintent_out: + ' if (capi_j>capi_i)\n GETSCALARFROMPYTUPLE(capi_return,capi_i++,#varname_i#_cb_capi,#ctype#,"#ctype#_from_pyobj failed in converting argument #varname# of call-back function #name# to C #ctype#\\n");'}, + {l_and(debugcapi, l_and(l_not(iscomplex), isintent_c)): + ' fprintf(stderr,"#showvalueformat#.\\n",#varname_i#);'}, + {l_and(debugcapi, l_and(l_not(iscomplex), l_not(isintent_c))): + ' fprintf(stderr,"#showvalueformat#.\\n",*#varname_i#_cb_capi);'}, + {l_and(debugcapi, l_and(iscomplex, isintent_c)): + ' fprintf(stderr,"#showvalueformat#.\\n",(#varname_i#).r,(#varname_i#).i);'}, + {l_and(debugcapi, l_and(iscomplex, l_not(isintent_c))): + ' fprintf(stderr,"#showvalueformat#.\\n",(*#varname_i#_cb_capi).r,(*#varname_i#_cb_capi).i);'}, + ], + 'need': [{isintent_out: ['#ctype#_from_pyobj', 'GETSCALARFROMPYTUPLE']}, + {debugcapi: 'CFUNCSMESS'}], + '_check': isscalar + }, { + 'pyobjfrom': [{isintent_in: """\ + if (cb->nofargs>capi_i) + if (CAPI_ARGLIST_SETITEM(capi_i++,pyobj_from_#ctype#1(#varname_i#))) + goto capi_fail;"""}, + {isintent_inout: """\ + if (cb->nofargs>capi_i) + if (CAPI_ARGLIST_SETITEM(capi_i++,pyarr_from_p_#ctype#1(#varname_i#_cb_capi))) + goto capi_fail;"""}], + 'need': [{isintent_in: 'pyobj_from_#ctype#1'}, + {isintent_inout: 'pyarr_from_p_#ctype#1'}, + {iscomplex: '#ctype#'}], + '_check': l_and(isscalar, isintent_nothide), + '_optional': '' + }, { # String + 'frompyobj': [{debugcapi: ' CFUNCSMESS("cb:Getting #varname#->\\"");'}, + """ if (capi_j>capi_i) + GETSTRFROMPYTUPLE(capi_return,capi_i++,#varname_i#,#varname_i#_cb_len);""", + {debugcapi: + ' fprintf(stderr,"#showvalueformat#\\":%d:.\\n",#varname_i#,#varname_i#_cb_len);'}, + ], + 'need': ['#ctype#', 'GETSTRFROMPYTUPLE', + {debugcapi: 'CFUNCSMESS'}, 'string.h'], + '_check': l_and(isstring, isintent_out) + }, { + 'pyobjfrom': [ + {debugcapi: + (' fprintf(stderr,"debug-capi:cb:#varname#=#showvalueformat#:' + '%d:\\n",#varname_i#,#varname_i#_cb_len);')}, + {isintent_in: """\ + if (cb->nofargs>capi_i) + if (CAPI_ARGLIST_SETITEM(capi_i++,pyobj_from_#ctype#1size(#varname_i#,#varname_i#_cb_len))) + goto capi_fail;"""}, + {isintent_inout: """\ + if (cb->nofargs>capi_i) { + int #varname_i#_cb_dims[] = {#varname_i#_cb_len}; + if (CAPI_ARGLIST_SETITEM(capi_i++,pyarr_from_p_#ctype#1(#varname_i#,#varname_i#_cb_dims))) + goto capi_fail; + }"""}], + 'need': [{isintent_in: 'pyobj_from_#ctype#1size'}, + {isintent_inout: 'pyarr_from_p_#ctype#1'}], + '_check': l_and(isstring, isintent_nothide), + '_optional': '' + }, + # Array ... + { + 'decl': ' npy_intp #varname_i#_Dims[#rank#] = {#rank*[-1]#};', + 'setdims': ' #cbsetdims#;', + '_check': isarray, + '_depend': '' + }, + { + 'pyobjfrom': [{debugcapi: ' fprintf(stderr,"debug-capi:cb:#varname#\\n");'}, + {isintent_c: """\ + if (cb->nofargs>capi_i) { + /* tmp_arr will be inserted to capi_arglist_list that will be + destroyed when leaving callback function wrapper together + with tmp_arr. */ + PyArrayObject *tmp_arr = (PyArrayObject *)PyArray_New(&PyArray_Type, + #rank#,#varname_i#_Dims,#atype#,NULL,(char*)#varname_i#,#elsize#, + NPY_ARRAY_CARRAY,NULL); +""", + l_not(isintent_c): """\ + if (cb->nofargs>capi_i) { + /* tmp_arr will be inserted to capi_arglist_list that will be + destroyed when leaving callback function wrapper together + with tmp_arr. */ + PyArrayObject *tmp_arr = (PyArrayObject *)PyArray_New(&PyArray_Type, + #rank#,#varname_i#_Dims,#atype#,NULL,(char*)#varname_i#,#elsize#, + NPY_ARRAY_FARRAY,NULL); +""", + }, + """ + if (tmp_arr==NULL) + goto capi_fail; + if (CAPI_ARGLIST_SETITEM(capi_i++,(PyObject *)tmp_arr)) + goto capi_fail; +}"""], + '_check': l_and(isarray, isintent_nothide, l_or(isintent_in, isintent_inout)), + '_optional': '', + }, { + 'frompyobj': [{debugcapi: ' CFUNCSMESS("cb:Getting #varname#->");'}, + """ if (capi_j>capi_i) { + PyArrayObject *rv_cb_arr = NULL; + if ((capi_tmp = PyTuple_GetItem(capi_return,capi_i++))==NULL) goto capi_fail; + rv_cb_arr = array_from_pyobj(#atype#,#varname_i#_Dims,#rank#,F2PY_INTENT_IN""", + {isintent_c: '|F2PY_INTENT_C'}, + """,capi_tmp); + if (rv_cb_arr == NULL) { + fprintf(stderr,\"rv_cb_arr is NULL\\n\"); + goto capi_fail; + } + MEMCOPY(#varname_i#,PyArray_DATA(rv_cb_arr),PyArray_NBYTES(rv_cb_arr)); + if (capi_tmp != (PyObject *)rv_cb_arr) { + Py_DECREF(rv_cb_arr); + } + }""", + {debugcapi: ' fprintf(stderr,"<-.\\n");'}, + ], + 'need': ['MEMCOPY', {iscomplexarray: '#ctype#'}], + '_check': l_and(isarray, isintent_out) + }, { + 'docreturn': '#varname#,', + '_check': isintent_out + } +] + +################## Build call-back module ############# +cb_map = {} + + +def buildcallbacks(m): + cb_map[m['name']] = [] + for bi in m['body']: + if bi['block'] == 'interface': + for b in bi['body']: + if b: + buildcallback(b, m['name']) + else: + errmess(f"warning: empty body for {m['name']}\n") + + +def buildcallback(rout, um): + from . import capi_maps + + outmess(f" Constructing call-back function \"cb_{rout['name']}_in_{um}\"\n") + args, depargs = getargs(rout) + capi_maps.depargs = depargs + var = rout['vars'] + vrd = capi_maps.cb_routsign2map(rout, um) + rd = dictappend({}, vrd) + cb_map[um].append([rout['name'], rd['name']]) + for r in cb_rout_rules: + if ('_check' in r and r['_check'](rout)) or ('_check' not in r): + ar = applyrules(r, vrd, rout) + rd = dictappend(rd, ar) + savevrd = {} + for i, a in enumerate(args): + vrd = capi_maps.cb_sign2map(a, var[a], index=i) + savevrd[a] = vrd + for r in cb_arg_rules: + if '_depend' in r: + continue + if '_optional' in r and isoptional(var[a]): + continue + if ('_check' in r and r['_check'](var[a])) or ('_check' not in r): + ar = applyrules(r, vrd, var[a]) + rd = dictappend(rd, ar) + if '_break' in r: + break + for a in args: + vrd = savevrd[a] + for r in cb_arg_rules: + if '_depend' in r: + continue + if ('_optional' not in r) or ('_optional' in r and isrequired(var[a])): + continue + if ('_check' in r and r['_check'](var[a])) or ('_check' not in r): + ar = applyrules(r, vrd, var[a]) + rd = dictappend(rd, ar) + if '_break' in r: + break + for a in depargs: + vrd = savevrd[a] + for r in cb_arg_rules: + if '_depend' not in r: + continue + if '_optional' in r: + continue + if ('_check' in r and r['_check'](var[a])) or ('_check' not in r): + ar = applyrules(r, vrd, var[a]) + rd = dictappend(rd, ar) + if '_break' in r: + break + if 'args' in rd and 'optargs' in rd: + if isinstance(rd['optargs'], list): + rd['optargs'] = rd['optargs'] + [""" +#ifndef F2PY_CB_RETURNCOMPLEX +, +#endif +"""] + rd['optargs_nm'] = rd['optargs_nm'] + [""" +#ifndef F2PY_CB_RETURNCOMPLEX +, +#endif +"""] + rd['optargs_td'] = rd['optargs_td'] + [""" +#ifndef F2PY_CB_RETURNCOMPLEX +, +#endif +"""] + if isinstance(rd['docreturn'], list): + rd['docreturn'] = stripcomma( + replace('#docreturn#', {'docreturn': rd['docreturn']})) + optargs = stripcomma(replace('#docsignopt#', + {'docsignopt': rd['docsignopt']} + )) + if optargs == '': + rd['docsignature'] = stripcomma( + replace('#docsign#', {'docsign': rd['docsign']})) + else: + rd['docsignature'] = replace('#docsign#[#docsignopt#]', + {'docsign': rd['docsign'], + 'docsignopt': optargs, + }) + rd['latexdocsignature'] = rd['docsignature'].replace('_', '\\_') + rd['latexdocsignature'] = rd['latexdocsignature'].replace(',', ', ') + rd['docstrsigns'] = [] + rd['latexdocstrsigns'] = [] + for k in ['docstrreq', 'docstropt', 'docstrout', 'docstrcbs']: + if k in rd and isinstance(rd[k], list): + rd['docstrsigns'] = rd['docstrsigns'] + rd[k] + k = 'latex' + k + if k in rd and isinstance(rd[k], list): + rd['latexdocstrsigns'] = rd['latexdocstrsigns'] + rd[k][0:1] +\ + ['\\begin{description}'] + rd[k][1:] +\ + ['\\end{description}'] + if 'args' not in rd: + rd['args'] = '' + rd['args_td'] = '' + rd['args_nm'] = '' + if not (rd.get('args') or rd.get('optargs') or rd.get('strarglens')): + rd['noargs'] = 'void' + + ar = applyrules(cb_routine_rules, rd) + cfuncs.callbacks[rd['name']] = ar['body'] + if isinstance(ar['need'], str): + ar['need'] = [ar['need']] + + if 'need' in rd: + for t in cfuncs.typedefs.keys(): + if t in rd['need']: + ar['need'].append(t) + + cfuncs.typedefs_generated[rd['name'] + '_typedef'] = ar['cbtypedefs'] + ar['need'].append(rd['name'] + '_typedef') + cfuncs.needs[rd['name']] = ar['need'] + + capi_maps.lcb2_map[rd['name']] = {'maxnofargs': ar['maxnofargs'], + 'nofoptargs': ar['nofoptargs'], + 'docstr': ar['docstr'], + 'latexdocstr': ar['latexdocstr'], + 'argname': rd['argname'] + } + outmess(f" {ar['docstrshort']}\n") +################## Build call-back function ############# diff --git a/local_packages/numpy/f2py/cb_rules.pyi b/local_packages/numpy/f2py/cb_rules.pyi new file mode 100644 index 0000000..b22f544 --- /dev/null +++ b/local_packages/numpy/f2py/cb_rules.pyi @@ -0,0 +1,17 @@ +from collections.abc import Mapping +from typing import Any, Final + +from .__version__ import version + +## + +f2py_version: Final = version + +cb_routine_rules: Final[dict[str, str | list[str]]] = ... +cb_rout_rules: Final[list[dict[str, str | Any]]] = ... +cb_arg_rules: Final[list[dict[str, str | Any]]] = ... + +cb_map: Final[dict[str, list[list[str]]]] = ... + +def buildcallbacks(m: Mapping[str, object]) -> None: ... +def buildcallback(rout: Mapping[str, object], um: Mapping[str, object]) -> None: ... diff --git a/local_packages/numpy/f2py/cfuncs.py b/local_packages/numpy/f2py/cfuncs.py new file mode 100644 index 0000000..b2b1cad --- /dev/null +++ b/local_packages/numpy/f2py/cfuncs.py @@ -0,0 +1,1563 @@ +""" +C declarations, CPP macros, and C functions for f2py2e. +Only required declarations/macros/functions will be used. + +Copyright 1999 -- 2011 Pearu Peterson all rights reserved. +Copyright 2011 -- present NumPy Developers. +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +""" +import copy +import sys + +from . import __version__ + +f2py_version = __version__.version + + +def errmess(s: str) -> None: + """ + Write an error message to stderr. + + This indirection is needed because sys.stderr might not always be available (see #26862). + """ + if sys.stderr is not None: + sys.stderr.write(s) + +##################### Definitions ################## + + +outneeds = {'includes0': [], 'includes': [], 'typedefs': [], 'typedefs_generated': [], + 'userincludes': [], + 'cppmacros': [], 'cfuncs': [], 'callbacks': [], 'f90modhooks': [], + 'commonhooks': []} +needs = {} +includes0 = {'includes0': '/*need_includes0*/'} +includes = {'includes': '/*need_includes*/'} +userincludes = {'userincludes': '/*need_userincludes*/'} +typedefs = {'typedefs': '/*need_typedefs*/'} +typedefs_generated = {'typedefs_generated': '/*need_typedefs_generated*/'} +cppmacros = {'cppmacros': '/*need_cppmacros*/'} +cfuncs = {'cfuncs': '/*need_cfuncs*/'} +callbacks = {'callbacks': '/*need_callbacks*/'} +f90modhooks = {'f90modhooks': '/*need_f90modhooks*/', + 'initf90modhooksstatic': '/*initf90modhooksstatic*/', + 'initf90modhooksdynamic': '/*initf90modhooksdynamic*/', + } +commonhooks = {'commonhooks': '/*need_commonhooks*/', + 'initcommonhooks': '/*need_initcommonhooks*/', + } + +############ Includes ################### + +includes0['math.h'] = '#include ' +includes0['string.h'] = '#include ' +includes0['setjmp.h'] = '#include ' + +includes['arrayobject.h'] = '''#define PY_ARRAY_UNIQUE_SYMBOL PyArray_API +#include "arrayobject.h"''' +includes['npy_math.h'] = '#include "numpy/npy_math.h"' + +includes['arrayobject.h'] = '#include "fortranobject.h"' +includes['stdarg.h'] = '#include ' + +############# Type definitions ############### + +typedefs['unsigned_char'] = 'typedef unsigned char unsigned_char;' +typedefs['unsigned_short'] = 'typedef unsigned short unsigned_short;' +typedefs['unsigned_long'] = 'typedef unsigned long unsigned_long;' +typedefs['signed_char'] = 'typedef signed char signed_char;' +typedefs['long_long'] = """ +#if defined(NPY_OS_WIN32) +typedef __int64 long_long; +#else +typedef long long long_long; +typedef unsigned long long unsigned_long_long; +#endif +""" +typedefs['unsigned_long_long'] = """ +#if defined(NPY_OS_WIN32) +typedef __uint64 long_long; +#else +typedef unsigned long long unsigned_long_long; +#endif +""" +typedefs['long_double'] = """ +#ifndef _LONG_DOUBLE +typedef long double long_double; +#endif +""" +typedefs[ + 'complex_long_double'] = 'typedef struct {long double r,i;} complex_long_double;' +typedefs['complex_float'] = 'typedef struct {float r,i;} complex_float;' +typedefs['complex_double'] = 'typedef struct {double r,i;} complex_double;' +typedefs['string'] = """typedef char * string;""" +typedefs['character'] = """typedef char character;""" + + +############### CPP macros #################### +cppmacros['CFUNCSMESS'] = """ +#ifdef DEBUGCFUNCS +#define CFUNCSMESS(mess) fprintf(stderr,\"debug-capi:\"mess); +#define CFUNCSMESSPY(mess,obj) CFUNCSMESS(mess) \\ + PyObject_Print((PyObject *)obj,stderr,Py_PRINT_RAW);\\ + fprintf(stderr,\"\\n\"); +#else +#define CFUNCSMESS(mess) +#define CFUNCSMESSPY(mess,obj) +#endif +""" +cppmacros['F_FUNC'] = """ +#if defined(PREPEND_FORTRAN) +#if defined(NO_APPEND_FORTRAN) +#if defined(UPPERCASE_FORTRAN) +#define F_FUNC(f,F) _##F +#else +#define F_FUNC(f,F) _##f +#endif +#else +#if defined(UPPERCASE_FORTRAN) +#define F_FUNC(f,F) _##F##_ +#else +#define F_FUNC(f,F) _##f##_ +#endif +#endif +#else +#if defined(NO_APPEND_FORTRAN) +#if defined(UPPERCASE_FORTRAN) +#define F_FUNC(f,F) F +#else +#define F_FUNC(f,F) f +#endif +#else +#if defined(UPPERCASE_FORTRAN) +#define F_FUNC(f,F) F##_ +#else +#define F_FUNC(f,F) f##_ +#endif +#endif +#endif +#if defined(UNDERSCORE_G77) +#define F_FUNC_US(f,F) F_FUNC(f##_,F##_) +#else +#define F_FUNC_US(f,F) F_FUNC(f,F) +#endif +""" +cppmacros['F_WRAPPEDFUNC'] = """ +#if defined(PREPEND_FORTRAN) +#if defined(NO_APPEND_FORTRAN) +#if defined(UPPERCASE_FORTRAN) +#define F_WRAPPEDFUNC(f,F) _F2PYWRAP##F +#else +#define F_WRAPPEDFUNC(f,F) _f2pywrap##f +#endif +#else +#if defined(UPPERCASE_FORTRAN) +#define F_WRAPPEDFUNC(f,F) _F2PYWRAP##F##_ +#else +#define F_WRAPPEDFUNC(f,F) _f2pywrap##f##_ +#endif +#endif +#else +#if defined(NO_APPEND_FORTRAN) +#if defined(UPPERCASE_FORTRAN) +#define F_WRAPPEDFUNC(f,F) F2PYWRAP##F +#else +#define F_WRAPPEDFUNC(f,F) f2pywrap##f +#endif +#else +#if defined(UPPERCASE_FORTRAN) +#define F_WRAPPEDFUNC(f,F) F2PYWRAP##F##_ +#else +#define F_WRAPPEDFUNC(f,F) f2pywrap##f##_ +#endif +#endif +#endif +#if defined(UNDERSCORE_G77) +#define F_WRAPPEDFUNC_US(f,F) F_WRAPPEDFUNC(f##_,F##_) +#else +#define F_WRAPPEDFUNC_US(f,F) F_WRAPPEDFUNC(f,F) +#endif +""" +cppmacros['F_MODFUNC'] = """ +#if defined(F90MOD2CCONV1) /*E.g. Compaq Fortran */ +#if defined(NO_APPEND_FORTRAN) +#define F_MODFUNCNAME(m,f) $ ## m ## $ ## f +#else +#define F_MODFUNCNAME(m,f) $ ## m ## $ ## f ## _ +#endif +#endif + +#if defined(F90MOD2CCONV2) /*E.g. IBM XL Fortran, not tested though */ +#if defined(NO_APPEND_FORTRAN) +#define F_MODFUNCNAME(m,f) __ ## m ## _MOD_ ## f +#else +#define F_MODFUNCNAME(m,f) __ ## m ## _MOD_ ## f ## _ +#endif +#endif + +#if defined(F90MOD2CCONV3) /*E.g. MIPSPro Compilers */ +#if defined(NO_APPEND_FORTRAN) +#define F_MODFUNCNAME(m,f) f ## .in. ## m +#else +#define F_MODFUNCNAME(m,f) f ## .in. ## m ## _ +#endif +#endif +/* +#if defined(UPPERCASE_FORTRAN) +#define F_MODFUNC(m,M,f,F) F_MODFUNCNAME(M,F) +#else +#define F_MODFUNC(m,M,f,F) F_MODFUNCNAME(m,f) +#endif +*/ + +#define F_MODFUNC(m,f) (*(f2pymodstruct##m##.##f)) +""" +cppmacros['SWAPUNSAFE'] = """ +#define SWAP(a,b) (size_t)(a) = ((size_t)(a) ^ (size_t)(b));\\ + (size_t)(b) = ((size_t)(a) ^ (size_t)(b));\\ + (size_t)(a) = ((size_t)(a) ^ (size_t)(b)) +""" +cppmacros['SWAP'] = """ +#define SWAP(a,b,t) {\\ + t *c;\\ + c = a;\\ + a = b;\\ + b = c;} +""" +# cppmacros['ISCONTIGUOUS']='#define ISCONTIGUOUS(m) (PyArray_FLAGS(m) & +# NPY_ARRAY_C_CONTIGUOUS)' +cppmacros['PRINTPYOBJERR'] = """ +#define PRINTPYOBJERR(obj)\\ + fprintf(stderr,\"#modulename#.error is related to \");\\ + PyObject_Print((PyObject *)obj,stderr,Py_PRINT_RAW);\\ + fprintf(stderr,\"\\n\"); +""" +cppmacros['MINMAX'] = """ +#ifndef max +#define max(a,b) ((a > b) ? (a) : (b)) +#endif +#ifndef min +#define min(a,b) ((a < b) ? (a) : (b)) +#endif +#ifndef MAX +#define MAX(a,b) ((a > b) ? (a) : (b)) +#endif +#ifndef MIN +#define MIN(a,b) ((a < b) ? (a) : (b)) +#endif +""" +cppmacros['len..'] = """ +/* See fortranobject.h for definitions. The macros here are provided for BC. */ +#define rank f2py_rank +#define shape f2py_shape +#define fshape f2py_shape +#define len f2py_len +#define flen f2py_flen +#define slen f2py_slen +#define size f2py_size +""" +cppmacros['pyobj_from_char1'] = r""" +#define pyobj_from_char1(v) (PyLong_FromLong(v)) +""" +cppmacros['pyobj_from_short1'] = r""" +#define pyobj_from_short1(v) (PyLong_FromLong(v)) +""" +needs['pyobj_from_int1'] = ['signed_char'] +cppmacros['pyobj_from_int1'] = r""" +#define pyobj_from_int1(v) (PyLong_FromLong(v)) +""" +cppmacros['pyobj_from_long1'] = r""" +#define pyobj_from_long1(v) (PyLong_FromLong(v)) +""" +needs['pyobj_from_long_long1'] = ['long_long'] +cppmacros['pyobj_from_long_long1'] = """ +#ifdef HAVE_LONG_LONG +#define pyobj_from_long_long1(v) (PyLong_FromLongLong(v)) +#else +#warning HAVE_LONG_LONG is not available. Redefining pyobj_from_long_long. +#define pyobj_from_long_long1(v) (PyLong_FromLong(v)) +#endif +""" +needs['pyobj_from_long_double1'] = ['long_double'] +cppmacros['pyobj_from_long_double1'] = """ +#define pyobj_from_long_double1(v) (PyFloat_FromDouble(v))""" +cppmacros['pyobj_from_double1'] = """ +#define pyobj_from_double1(v) (PyFloat_FromDouble(v))""" +cppmacros['pyobj_from_float1'] = """ +#define pyobj_from_float1(v) (PyFloat_FromDouble(v))""" +needs['pyobj_from_complex_long_double1'] = ['complex_long_double'] +cppmacros['pyobj_from_complex_long_double1'] = """ +#define pyobj_from_complex_long_double1(v) (PyComplex_FromDoubles(v.r,v.i))""" +needs['pyobj_from_complex_double1'] = ['complex_double'] +cppmacros['pyobj_from_complex_double1'] = """ +#define pyobj_from_complex_double1(v) (PyComplex_FromDoubles(v.r,v.i))""" +needs['pyobj_from_complex_float1'] = ['complex_float'] +cppmacros['pyobj_from_complex_float1'] = """ +#define pyobj_from_complex_float1(v) (PyComplex_FromDoubles(v.r,v.i))""" +needs['pyobj_from_string1'] = ['string'] +cppmacros['pyobj_from_string1'] = """ +#define pyobj_from_string1(v) (PyUnicode_FromString((char *)v))""" +needs['pyobj_from_string1size'] = ['string'] +cppmacros['pyobj_from_string1size'] = """ +#define pyobj_from_string1size(v,len) (PyUnicode_FromStringAndSize((char *)v, len))""" +needs['TRYPYARRAYTEMPLATE'] = ['PRINTPYOBJERR'] +cppmacros['TRYPYARRAYTEMPLATE'] = """ +/* New SciPy */ +#define TRYPYARRAYTEMPLATECHAR case NPY_STRING: *(char *)(PyArray_DATA(arr))=*v; break; +#define TRYPYARRAYTEMPLATELONG case NPY_LONG: *(long *)(PyArray_DATA(arr))=*v; break; +#define TRYPYARRAYTEMPLATEOBJECT case NPY_OBJECT: PyArray_SETITEM(arr,PyArray_DATA(arr),pyobj_from_ ## ctype ## 1(*v)); break; + +#define TRYPYARRAYTEMPLATE(ctype,typecode) \\ + PyArrayObject *arr = NULL;\\ + if (!obj) return -2;\\ + if (!PyArray_Check(obj)) return -1;\\ + if (!(arr=(PyArrayObject *)obj)) {fprintf(stderr,\"TRYPYARRAYTEMPLATE:\");PRINTPYOBJERR(obj);return 0;}\\ + if (PyArray_DESCR(arr)->type==typecode) {*(ctype *)(PyArray_DATA(arr))=*v; return 1;}\\ + switch (PyArray_TYPE(arr)) {\\ + case NPY_DOUBLE: *(npy_double *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_INT: *(npy_int *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_LONG: *(npy_long *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_FLOAT: *(npy_float *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_CDOUBLE: *(npy_double *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_CFLOAT: *(npy_float *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_BOOL: *(npy_bool *)(PyArray_DATA(arr))=(*v!=0); break;\\ + case NPY_UBYTE: *(npy_ubyte *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_BYTE: *(npy_byte *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_SHORT: *(npy_short *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_USHORT: *(npy_ushort *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_UINT: *(npy_uint *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_ULONG: *(npy_ulong *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_LONGLONG: *(npy_longlong *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_ULONGLONG: *(npy_ulonglong *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_LONGDOUBLE: *(npy_longdouble *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_CLONGDOUBLE: *(npy_longdouble *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_OBJECT: PyArray_SETITEM(arr, PyArray_DATA(arr), pyobj_from_ ## ctype ## 1(*v)); break;\\ + default: return -2;\\ + };\\ + return 1 +""" + +needs['TRYCOMPLEXPYARRAYTEMPLATE'] = ['PRINTPYOBJERR'] +cppmacros['TRYCOMPLEXPYARRAYTEMPLATE'] = """ +#define TRYCOMPLEXPYARRAYTEMPLATEOBJECT case NPY_OBJECT: PyArray_SETITEM(arr, PyArray_DATA(arr), pyobj_from_complex_ ## ctype ## 1((*v))); break; +#define TRYCOMPLEXPYARRAYTEMPLATE(ctype,typecode)\\ + PyArrayObject *arr = NULL;\\ + if (!obj) return -2;\\ + if (!PyArray_Check(obj)) return -1;\\ + if (!(arr=(PyArrayObject *)obj)) {fprintf(stderr,\"TRYCOMPLEXPYARRAYTEMPLATE:\");PRINTPYOBJERR(obj);return 0;}\\ + if (PyArray_DESCR(arr)->type==typecode) {\\ + *(ctype *)(PyArray_DATA(arr))=(*v).r;\\ + *(ctype *)(PyArray_DATA(arr)+sizeof(ctype))=(*v).i;\\ + return 1;\\ + }\\ + switch (PyArray_TYPE(arr)) {\\ + case NPY_CDOUBLE: *(npy_double *)(PyArray_DATA(arr))=(*v).r;\\ + *(npy_double *)(PyArray_DATA(arr)+sizeof(npy_double))=(*v).i;\\ + break;\\ + case NPY_CFLOAT: *(npy_float *)(PyArray_DATA(arr))=(*v).r;\\ + *(npy_float *)(PyArray_DATA(arr)+sizeof(npy_float))=(*v).i;\\ + break;\\ + case NPY_DOUBLE: *(npy_double *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_LONG: *(npy_long *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_FLOAT: *(npy_float *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_INT: *(npy_int *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_SHORT: *(npy_short *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_UBYTE: *(npy_ubyte *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_BYTE: *(npy_byte *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_BOOL: *(npy_bool *)(PyArray_DATA(arr))=((*v).r!=0 && (*v).i!=0); break;\\ + case NPY_USHORT: *(npy_ushort *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_UINT: *(npy_uint *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_ULONG: *(npy_ulong *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_LONGLONG: *(npy_longlong *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_ULONGLONG: *(npy_ulonglong *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_LONGDOUBLE: *(npy_longdouble *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_CLONGDOUBLE: *(npy_longdouble *)(PyArray_DATA(arr))=(*v).r;\\ + *(npy_longdouble *)(PyArray_DATA(arr)+sizeof(npy_longdouble))=(*v).i;\\ + break;\\ + case NPY_OBJECT: PyArray_SETITEM(arr, PyArray_DATA(arr), pyobj_from_complex_ ## ctype ## 1((*v))); break;\\ + default: return -2;\\ + };\\ + return -1; +""" +# cppmacros['NUMFROMARROBJ']=""" +# define NUMFROMARROBJ(typenum,ctype) \\ +# if (PyArray_Check(obj)) arr = (PyArrayObject *)obj;\\ +# else arr = (PyArrayObject *)PyArray_ContiguousFromObject(obj,typenum,0,0);\\ +# if (arr) {\\ +# if (PyArray_TYPE(arr)==NPY_OBJECT) {\\ +# if (!ctype ## _from_pyobj(v,(PyArray_DESCR(arr)->getitem)(PyArray_DATA(arr)),\"\"))\\ +# goto capi_fail;\\ +# } else {\\ +# (PyArray_DESCR(arr)->cast[typenum])(PyArray_DATA(arr),1,(char*)v,1,1);\\ +# }\\ +# if ((PyObject *)arr != obj) { Py_DECREF(arr); }\\ +# return 1;\\ +# } +# """ +# XXX: Note that CNUMFROMARROBJ is identical with NUMFROMARROBJ +# cppmacros['CNUMFROMARROBJ']=""" +# define CNUMFROMARROBJ(typenum,ctype) \\ +# if (PyArray_Check(obj)) arr = (PyArrayObject *)obj;\\ +# else arr = (PyArrayObject *)PyArray_ContiguousFromObject(obj,typenum,0,0);\\ +# if (arr) {\\ +# if (PyArray_TYPE(arr)==NPY_OBJECT) {\\ +# if (!ctype ## _from_pyobj(v,(PyArray_DESCR(arr)->getitem)(PyArray_DATA(arr)),\"\"))\\ +# goto capi_fail;\\ +# } else {\\ +# (PyArray_DESCR(arr)->cast[typenum])((void *)(PyArray_DATA(arr)),1,(void *)(v),1,1);\\ +# }\\ +# if ((PyObject *)arr != obj) { Py_DECREF(arr); }\\ +# return 1;\\ +# } +# """ + + +needs['GETSTRFROMPYTUPLE'] = ['STRINGCOPYN', 'PRINTPYOBJERR'] +cppmacros['GETSTRFROMPYTUPLE'] = """ +#define GETSTRFROMPYTUPLE(tuple,index,str,len) {\\ + PyObject *rv_cb_str = PyTuple_GetItem((tuple),(index));\\ + if (rv_cb_str == NULL)\\ + goto capi_fail;\\ + if (PyBytes_Check(rv_cb_str)) {\\ + str[len-1]='\\0';\\ + STRINGCOPYN((str),PyBytes_AS_STRING((PyBytesObject*)rv_cb_str),(len));\\ + } else {\\ + PRINTPYOBJERR(rv_cb_str);\\ + PyErr_SetString(#modulename#_error,\"string object expected\");\\ + goto capi_fail;\\ + }\\ + } +""" +cppmacros['GETSCALARFROMPYTUPLE'] = """ +#define GETSCALARFROMPYTUPLE(tuple,index,var,ctype,mess) {\\ + if ((capi_tmp = PyTuple_GetItem((tuple),(index)))==NULL) goto capi_fail;\\ + if (!(ctype ## _from_pyobj((var),capi_tmp,mess)))\\ + goto capi_fail;\\ + } +""" + +cppmacros['FAILNULL'] = """\ +#define FAILNULL(p) do { \\ + if ((p) == NULL) { \\ + PyErr_SetString(PyExc_MemoryError, "NULL pointer found"); \\ + goto capi_fail; \\ + } \\ +} while (0) +""" +needs['MEMCOPY'] = ['string.h', 'FAILNULL'] +cppmacros['MEMCOPY'] = """ +#define MEMCOPY(to,from,n)\\ + do { FAILNULL(to); FAILNULL(from); (void)memcpy(to,from,n); } while (0) +""" +cppmacros['STRINGMALLOC'] = """ +#define STRINGMALLOC(str,len)\\ + if ((str = (string)malloc(len+1)) == NULL) {\\ + PyErr_SetString(PyExc_MemoryError, \"out of memory\");\\ + goto capi_fail;\\ + } else {\\ + (str)[len] = '\\0';\\ + } +""" +cppmacros['STRINGFREE'] = """ +#define STRINGFREE(str) do {if (!(str == NULL)) free(str);} while (0) +""" +needs['STRINGPADN'] = ['string.h'] +cppmacros['STRINGPADN'] = """ +/* +STRINGPADN replaces null values with padding values from the right. + +`to` must have size of at least N bytes. + +If the `to[N-1]` has null value, then replace it and all the +preceding, nulls with the given padding. + +STRINGPADN(to, N, PADDING, NULLVALUE) is an inverse operation. +*/ +#define STRINGPADN(to, N, NULLVALUE, PADDING) \\ + do { \\ + int _m = (N); \\ + char *_to = (to); \\ + for (_m -= 1; _m >= 0 && _to[_m] == NULLVALUE; _m--) { \\ + _to[_m] = PADDING; \\ + } \\ + } while (0) +""" +needs['STRINGCOPYN'] = ['string.h', 'FAILNULL'] +cppmacros['STRINGCOPYN'] = """ +/* +STRINGCOPYN copies N bytes. + +`to` and `from` buffers must have sizes of at least N bytes. +*/ +#define STRINGCOPYN(to,from,N) \\ + do { \\ + int _m = (N); \\ + char *_to = (to); \\ + char *_from = (from); \\ + FAILNULL(_to); FAILNULL(_from); \\ + (void)strncpy(_to, _from, _m); \\ + } while (0) +""" +needs['STRINGCOPY'] = ['string.h', 'FAILNULL'] +cppmacros['STRINGCOPY'] = """ +#define STRINGCOPY(to,from)\\ + do { FAILNULL(to); FAILNULL(from); (void)strcpy(to,from); } while (0) +""" +cppmacros['CHECKGENERIC'] = """ +#define CHECKGENERIC(check,tcheck,name) \\ + if (!(check)) {\\ + PyErr_SetString(#modulename#_error,\"(\"tcheck\") failed for \"name);\\ + /*goto capi_fail;*/\\ + } else """ +cppmacros['CHECKARRAY'] = """ +#define CHECKARRAY(check,tcheck,name) \\ + if (!(check)) {\\ + PyErr_SetString(#modulename#_error,\"(\"tcheck\") failed for \"name);\\ + /*goto capi_fail;*/\\ + } else """ +cppmacros['CHECKSTRING'] = """ +#define CHECKSTRING(check,tcheck,name,show,var)\\ + if (!(check)) {\\ + char errstring[256];\\ + sprintf(errstring, \"%s: \"show, \"(\"tcheck\") failed for \"name, slen(var), var);\\ + PyErr_SetString(#modulename#_error, errstring);\\ + /*goto capi_fail;*/\\ + } else """ +cppmacros['CHECKSCALAR'] = """ +#define CHECKSCALAR(check,tcheck,name,show,var)\\ + if (!(check)) {\\ + char errstring[256];\\ + sprintf(errstring, \"%s: \"show, \"(\"tcheck\") failed for \"name, var);\\ + PyErr_SetString(#modulename#_error,errstring);\\ + /*goto capi_fail;*/\\ + } else """ +# cppmacros['CHECKDIMS']=""" +# define CHECKDIMS(dims,rank) \\ +# for (int i=0;i<(rank);i++)\\ +# if (dims[i]<0) {\\ +# fprintf(stderr,\"Unspecified array argument requires a complete dimension specification.\\n\");\\ +# goto capi_fail;\\ +# } +# """ +cppmacros[ + 'ARRSIZE'] = '#define ARRSIZE(dims,rank) (_PyArray_multiply_list(dims,rank))' +cppmacros['OLDPYNUM'] = """ +#ifdef OLDPYNUM +#error You need to install NumPy version 0.13 or higher. See https://scipy.org/install.html +#endif +""" + +# Defining the correct value to indicate thread-local storage in C without +# running a compile-time check (which we have no control over in generated +# code used outside of NumPy) is hard. Therefore we support overriding this +# via an external define - the f2py-using package can then use the same +# compile-time checks as we use for `NPY_TLS` when building NumPy (see +# scipy#21860 for an example of that). +# +# __STDC_NO_THREADS__ should not be coupled to the availability of _Thread_local. +# In case we get a bug report, guard it with __STDC_NO_THREADS__ after all. +# +# `thread_local` has become a keyword in C23, but don't try to use that yet +# (too new, doing so while C23 support is preliminary will likely cause more +# problems than it solves). +# +# Note: do not try to use `threads.h`, its availability is very low +# *and* threads.h isn't actually used where `F2PY_THREAD_LOCAL_DECL` is +# in the generated code. See gh-27718 for more details. +cppmacros["F2PY_THREAD_LOCAL_DECL"] = """ +#ifndef F2PY_THREAD_LOCAL_DECL +#if defined(_MSC_VER) +#define F2PY_THREAD_LOCAL_DECL __declspec(thread) +#elif defined(NPY_OS_MINGW) +#define F2PY_THREAD_LOCAL_DECL __thread +#elif defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) +#define F2PY_THREAD_LOCAL_DECL _Thread_local +#elif defined(__GNUC__) \\ + && (__GNUC__ > 4 || (__GNUC__ == 4 && (__GNUC_MINOR__ >= 4))) +#define F2PY_THREAD_LOCAL_DECL __thread +#endif +#endif +""" +################# C functions ############### + +cfuncs['calcarrindex'] = """ +static int calcarrindex(int *i,PyArrayObject *arr) { + int k,ii = i[0]; + for (k=1; k < PyArray_NDIM(arr); k++) + ii += (ii*(PyArray_DIM(arr,k) - 1)+i[k]); /* assuming contiguous arr */ + return ii; +}""" +cfuncs['calcarrindextr'] = """ +static int calcarrindextr(int *i,PyArrayObject *arr) { + int k,ii = i[PyArray_NDIM(arr)-1]; + for (k=1; k < PyArray_NDIM(arr); k++) + ii += (ii*(PyArray_DIM(arr,PyArray_NDIM(arr)-k-1) - 1)+i[PyArray_NDIM(arr)-k-1]); /* assuming contiguous arr */ + return ii; +}""" +cfuncs['forcomb'] = """ +struct ForcombCache { int nd;npy_intp *d;int *i,*i_tr,tr; }; +static int initforcomb(struct ForcombCache *cache, npy_intp *dims,int nd,int tr) { + int k; + if (dims==NULL) return 0; + if (nd<0) return 0; + cache->nd = nd; + cache->d = dims; + cache->tr = tr; + + cache->i = (int *)malloc(sizeof(int)*nd); + if (cache->i==NULL) return 0; + cache->i_tr = (int *)malloc(sizeof(int)*nd); + if (cache->i_tr==NULL) {free(cache->i); return 0;}; + + for (k=1;ki[k] = cache->i_tr[nd-k-1] = 0; + } + cache->i[0] = cache->i_tr[nd-1] = -1; + return 1; +} +static int *nextforcomb(struct ForcombCache *cache) { + if (cache==NULL) return NULL; + int j,*i,*i_tr,k; + int nd=cache->nd; + if ((i=cache->i) == NULL) return NULL; + if ((i_tr=cache->i_tr) == NULL) return NULL; + if (cache->d == NULL) return NULL; + i[0]++; + if (i[0]==cache->d[0]) { + j=1; + while ((jd[j]-1)) j++; + if (j==nd) { + free(i); + free(i_tr); + return NULL; + } + for (k=0;ktr) return i_tr; + return i; +}""" +needs['try_pyarr_from_string'] = ['STRINGCOPYN', 'PRINTPYOBJERR', 'string'] +cfuncs['try_pyarr_from_string'] = """ +/* + try_pyarr_from_string copies str[:len(obj)] to the data of an `ndarray`. + + If obj is an `ndarray`, it is assumed to be contiguous. + + If the specified len==-1, str must be null-terminated. +*/ +static int try_pyarr_from_string(PyObject *obj, + const string str, const int len) { +#ifdef DEBUGCFUNCS +fprintf(stderr, "try_pyarr_from_string(str='%s', len=%d, obj=%p)\\n", + (char*)str,len, obj); +#endif + if (!obj) return -2; /* Object missing */ + if (obj == Py_None) return -1; /* None */ + if (!PyArray_Check(obj)) goto capi_fail; /* not an ndarray */ + if (PyArray_Check(obj)) { + PyArrayObject *arr = (PyArrayObject *)obj; + assert(ISCONTIGUOUS(arr)); + string buf = PyArray_DATA(arr); + npy_intp n = len; + if (n == -1) { + /* Assuming null-terminated str. */ + n = strlen(str); + } + if (n > PyArray_NBYTES(arr)) { + n = PyArray_NBYTES(arr); + } + STRINGCOPYN(buf, str, n); + return 1; + } +capi_fail: + PRINTPYOBJERR(obj); + PyErr_SetString(#modulename#_error, \"try_pyarr_from_string failed\"); + return 0; +} +""" +needs['string_from_pyobj'] = ['string', 'STRINGMALLOC', 'STRINGCOPYN'] +cfuncs['string_from_pyobj'] = """ +/* + Create a new string buffer `str` of at most length `len` from a + Python string-like object `obj`. + + The string buffer has given size (len) or the size of inistr when len==-1. + + The string buffer is padded with blanks: in Fortran, trailing blanks + are insignificant contrary to C nulls. + */ +static int +string_from_pyobj(string *str, int *len, const string inistr, PyObject *obj, + const char *errmess) +{ + PyObject *tmp = NULL; + string buf = NULL; + npy_intp n = -1; +#ifdef DEBUGCFUNCS +fprintf(stderr,\"string_from_pyobj(str='%s',len=%d,inistr='%s',obj=%p)\\n\", + (char*)str, *len, (char *)inistr, obj); +#endif + if (obj == Py_None) { + n = strlen(inistr); + buf = inistr; + } + else if (PyArray_Check(obj)) { + PyArrayObject *arr = (PyArrayObject *)obj; + if (!ISCONTIGUOUS(arr)) { + PyErr_SetString(PyExc_ValueError, + \"array object is non-contiguous.\"); + goto capi_fail; + } + n = PyArray_NBYTES(arr); + buf = PyArray_DATA(arr); + n = strnlen(buf, n); + } + else { + if (PyBytes_Check(obj)) { + tmp = obj; + Py_INCREF(tmp); + } + else if (PyUnicode_Check(obj)) { + tmp = PyUnicode_AsASCIIString(obj); + } + else { + PyObject *tmp2; + tmp2 = PyObject_Str(obj); + if (tmp2) { + tmp = PyUnicode_AsASCIIString(tmp2); + Py_DECREF(tmp2); + } + else { + tmp = NULL; + } + } + if (tmp == NULL) goto capi_fail; + n = PyBytes_GET_SIZE(tmp); + buf = PyBytes_AS_STRING(tmp); + } + if (*len == -1) { + /* TODO: change the type of `len` so that we can remove this */ + if (n > NPY_MAX_INT) { + PyErr_SetString(PyExc_OverflowError, + "object too large for a 32-bit int"); + goto capi_fail; + } + *len = n; + } + else if (*len < n) { + /* discard the last (len-n) bytes of input buf */ + n = *len; + } + if (n < 0 || *len < 0 || buf == NULL) { + goto capi_fail; + } + STRINGMALLOC(*str, *len); // *str is allocated with size (*len + 1) + if (n < *len) { + /* + Pad fixed-width string with nulls. The caller will replace + nulls with blanks when the corresponding argument is not + intent(c). + */ + memset(*str + n, '\\0', *len - n); + } + STRINGCOPYN(*str, buf, n); + Py_XDECREF(tmp); + return 1; +capi_fail: + Py_XDECREF(tmp); + { + PyObject* err = PyErr_Occurred(); + if (err == NULL) { + err = #modulename#_error; + } + PyErr_SetString(err, errmess); + } + return 0; +} +""" + +cfuncs['character_from_pyobj'] = """ +static int +character_from_pyobj(character* v, PyObject *obj, const char *errmess) { + if (PyBytes_Check(obj)) { + /* empty bytes has trailing null, so dereferencing is always safe */ + *v = PyBytes_AS_STRING(obj)[0]; + return 1; + } else if (PyUnicode_Check(obj)) { + PyObject* tmp = PyUnicode_AsASCIIString(obj); + if (tmp != NULL) { + *v = PyBytes_AS_STRING(tmp)[0]; + Py_DECREF(tmp); + return 1; + } + } else if (PyArray_Check(obj)) { + PyArrayObject* arr = (PyArrayObject*)obj; + if (F2PY_ARRAY_IS_CHARACTER_COMPATIBLE(arr)) { + *v = PyArray_BYTES(arr)[0]; + return 1; + } else if (F2PY_IS_UNICODE_ARRAY(arr)) { + // TODO: update when numpy will support 1-byte and + // 2-byte unicode dtypes + PyObject* tmp = PyUnicode_FromKindAndData( + PyUnicode_4BYTE_KIND, + PyArray_BYTES(arr), + (PyArray_NBYTES(arr)>0?1:0)); + if (tmp != NULL) { + if (character_from_pyobj(v, tmp, errmess)) { + Py_DECREF(tmp); + return 1; + } + Py_DECREF(tmp); + } + } + } else if (PySequence_Check(obj)) { + PyObject* tmp = PySequence_GetItem(obj,0); + if (tmp != NULL) { + if (character_from_pyobj(v, tmp, errmess)) { + Py_DECREF(tmp); + return 1; + } + Py_DECREF(tmp); + } + } + { + /* TODO: This error (and most other) error handling needs cleaning. */ + char mess[F2PY_MESSAGE_BUFFER_SIZE]; + strcpy(mess, errmess); + PyObject* err = PyErr_Occurred(); + if (err == NULL) { + err = PyExc_TypeError; + Py_INCREF(err); + } + else { + Py_INCREF(err); + PyErr_Clear(); + } + sprintf(mess + strlen(mess), + " -- expected str|bytes|sequence-of-str-or-bytes, got "); + f2py_describe(obj, mess + strlen(mess)); + PyErr_SetString(err, mess); + Py_DECREF(err); + } + return 0; +} +""" + +# TODO: These should be dynamically generated, too many mapped to int things, +# see note in _isocbind.py +needs['char_from_pyobj'] = ['int_from_pyobj'] +cfuncs['char_from_pyobj'] = """ +static int +char_from_pyobj(char* v, PyObject *obj, const char *errmess) { + int i = 0; + if (int_from_pyobj(&i, obj, errmess)) { + *v = (char)i; + return 1; + } + return 0; +} +""" + + +needs['signed_char_from_pyobj'] = ['int_from_pyobj', 'signed_char'] +cfuncs['signed_char_from_pyobj'] = """ +static int +signed_char_from_pyobj(signed_char* v, PyObject *obj, const char *errmess) { + int i = 0; + if (int_from_pyobj(&i, obj, errmess)) { + *v = (signed_char)i; + return 1; + } + return 0; +} +""" + + +needs['short_from_pyobj'] = ['int_from_pyobj'] +cfuncs['short_from_pyobj'] = """ +static int +short_from_pyobj(short* v, PyObject *obj, const char *errmess) { + int i = 0; + if (int_from_pyobj(&i, obj, errmess)) { + *v = (short)i; + return 1; + } + return 0; +} +""" + + +cfuncs['int_from_pyobj'] = """ +static int +int_from_pyobj(int* v, PyObject *obj, const char *errmess) +{ + PyObject* tmp = NULL; + + if (PyLong_Check(obj)) { + *v = Npy__PyLong_AsInt(obj); + return !(*v == -1 && PyErr_Occurred()); + } + + tmp = PyNumber_Long(obj); + if (tmp) { + *v = Npy__PyLong_AsInt(tmp); + Py_DECREF(tmp); + return !(*v == -1 && PyErr_Occurred()); + } + + if (PyComplex_Check(obj)) { + PyErr_Clear(); + tmp = PyObject_GetAttrString(obj,\"real\"); + } + else if (PyBytes_Check(obj) || PyUnicode_Check(obj)) { + /*pass*/; + } + else if (PySequence_Check(obj)) { + PyErr_Clear(); + tmp = PySequence_GetItem(obj, 0); + } + + if (tmp) { + if (int_from_pyobj(v, tmp, errmess)) { + Py_DECREF(tmp); + return 1; + } + Py_DECREF(tmp); + } + + { + PyObject* err = PyErr_Occurred(); + if (err == NULL) { + err = #modulename#_error; + } + PyErr_SetString(err, errmess); + } + return 0; +} +""" + + +cfuncs['long_from_pyobj'] = """ +static int +long_from_pyobj(long* v, PyObject *obj, const char *errmess) { + PyObject* tmp = NULL; + + if (PyLong_Check(obj)) { + *v = PyLong_AsLong(obj); + return !(*v == -1 && PyErr_Occurred()); + } + + tmp = PyNumber_Long(obj); + if (tmp) { + *v = PyLong_AsLong(tmp); + Py_DECREF(tmp); + return !(*v == -1 && PyErr_Occurred()); + } + + if (PyComplex_Check(obj)) { + PyErr_Clear(); + tmp = PyObject_GetAttrString(obj,\"real\"); + } + else if (PyBytes_Check(obj) || PyUnicode_Check(obj)) { + /*pass*/; + } + else if (PySequence_Check(obj)) { + PyErr_Clear(); + tmp = PySequence_GetItem(obj, 0); + } + + if (tmp) { + if (long_from_pyobj(v, tmp, errmess)) { + Py_DECREF(tmp); + return 1; + } + Py_DECREF(tmp); + } + { + PyObject* err = PyErr_Occurred(); + if (err == NULL) { + err = #modulename#_error; + } + PyErr_SetString(err, errmess); + } + return 0; +} +""" + + +needs['long_long_from_pyobj'] = ['long_long'] +cfuncs['long_long_from_pyobj'] = """ +static int +long_long_from_pyobj(long_long* v, PyObject *obj, const char *errmess) +{ + PyObject* tmp = NULL; + + if (PyLong_Check(obj)) { + *v = PyLong_AsLongLong(obj); + return !(*v == -1 && PyErr_Occurred()); + } + + tmp = PyNumber_Long(obj); + if (tmp) { + *v = PyLong_AsLongLong(tmp); + Py_DECREF(tmp); + return !(*v == -1 && PyErr_Occurred()); + } + + if (PyComplex_Check(obj)) { + PyErr_Clear(); + tmp = PyObject_GetAttrString(obj,\"real\"); + } + else if (PyBytes_Check(obj) || PyUnicode_Check(obj)) { + /*pass*/; + } + else if (PySequence_Check(obj)) { + PyErr_Clear(); + tmp = PySequence_GetItem(obj, 0); + } + + if (tmp) { + if (long_long_from_pyobj(v, tmp, errmess)) { + Py_DECREF(tmp); + return 1; + } + Py_DECREF(tmp); + } + { + PyObject* err = PyErr_Occurred(); + if (err == NULL) { + err = #modulename#_error; + } + PyErr_SetString(err,errmess); + } + return 0; +} +""" + + +needs['long_double_from_pyobj'] = ['double_from_pyobj', 'long_double'] +cfuncs['long_double_from_pyobj'] = """ +static int +long_double_from_pyobj(long_double* v, PyObject *obj, const char *errmess) +{ + double d=0; + if (PyArray_CheckScalar(obj)){ + if PyArray_IsScalar(obj, LongDouble) { + PyArray_ScalarAsCtype(obj, v); + return 1; + } + else if (PyArray_Check(obj)) { + PyArrayObject *arr = (PyArrayObject *)obj; + if (PyArray_TYPE(arr) == NPY_LONGDOUBLE) { + (*v) = *((npy_longdouble *)PyArray_DATA(arr)); + return 1; + } + } + } + if (double_from_pyobj(&d, obj, errmess)) { + *v = (long_double)d; + return 1; + } + return 0; +} +""" + + +cfuncs['double_from_pyobj'] = """ +static int +double_from_pyobj(double* v, PyObject *obj, const char *errmess) +{ + PyObject* tmp = NULL; + if (PyFloat_Check(obj)) { + *v = PyFloat_AsDouble(obj); + return !(*v == -1.0 && PyErr_Occurred()); + } + + tmp = PyNumber_Float(obj); + if (tmp) { + *v = PyFloat_AsDouble(tmp); + Py_DECREF(tmp); + return !(*v == -1.0 && PyErr_Occurred()); + } + + if (PyComplex_Check(obj)) { + PyErr_Clear(); + tmp = PyObject_GetAttrString(obj,\"real\"); + } + else if (PyBytes_Check(obj) || PyUnicode_Check(obj)) { + /*pass*/; + } + else if (PySequence_Check(obj)) { + PyErr_Clear(); + tmp = PySequence_GetItem(obj, 0); + } + + if (tmp) { + if (double_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;} + Py_DECREF(tmp); + } + { + PyObject* err = PyErr_Occurred(); + if (err==NULL) err = #modulename#_error; + PyErr_SetString(err,errmess); + } + return 0; +} +""" + + +needs['float_from_pyobj'] = ['double_from_pyobj'] +cfuncs['float_from_pyobj'] = """ +static int +float_from_pyobj(float* v, PyObject *obj, const char *errmess) +{ + double d=0.0; + if (double_from_pyobj(&d,obj,errmess)) { + *v = (float)d; + return 1; + } + return 0; +} +""" + + +needs['complex_long_double_from_pyobj'] = ['complex_long_double', 'long_double', + 'complex_double_from_pyobj', 'npy_math.h'] +cfuncs['complex_long_double_from_pyobj'] = """ +static int +complex_long_double_from_pyobj(complex_long_double* v, PyObject *obj, const char *errmess) +{ + complex_double cd = {0.0,0.0}; + if (PyArray_CheckScalar(obj)){ + if PyArray_IsScalar(obj, CLongDouble) { + PyArray_ScalarAsCtype(obj, v); + return 1; + } + else if (PyArray_Check(obj)) { + PyArrayObject *arr = (PyArrayObject *)obj; + if (PyArray_TYPE(arr)==NPY_CLONGDOUBLE) { + (*v).r = npy_creall(*(((npy_clongdouble *)PyArray_DATA(arr)))); + (*v).i = npy_cimagl(*(((npy_clongdouble *)PyArray_DATA(arr)))); + return 1; + } + } + } + if (complex_double_from_pyobj(&cd,obj,errmess)) { + (*v).r = (long_double)cd.r; + (*v).i = (long_double)cd.i; + return 1; + } + return 0; +} +""" + + +needs['complex_double_from_pyobj'] = ['complex_double', 'npy_math.h'] +cfuncs['complex_double_from_pyobj'] = """ +static int +complex_double_from_pyobj(complex_double* v, PyObject *obj, const char *errmess) { + Py_complex c; + if (PyComplex_Check(obj)) { + c = PyComplex_AsCComplex(obj); + (*v).r = c.real; + (*v).i = c.imag; + return 1; + } + if (PyArray_IsScalar(obj, ComplexFloating)) { + if (PyArray_IsScalar(obj, CFloat)) { + npy_cfloat new; + PyArray_ScalarAsCtype(obj, &new); + (*v).r = (double)npy_crealf(new); + (*v).i = (double)npy_cimagf(new); + } + else if (PyArray_IsScalar(obj, CLongDouble)) { + npy_clongdouble new; + PyArray_ScalarAsCtype(obj, &new); + (*v).r = (double)npy_creall(new); + (*v).i = (double)npy_cimagl(new); + } + else { /* if (PyArray_IsScalar(obj, CDouble)) */ + PyArray_ScalarAsCtype(obj, v); + } + return 1; + } + if (PyArray_CheckScalar(obj)) { /* 0-dim array or still array scalar */ + PyArrayObject *arr; + if (PyArray_Check(obj)) { + arr = (PyArrayObject *)PyArray_Cast((PyArrayObject *)obj, NPY_CDOUBLE); + } + else { + arr = (PyArrayObject *)PyArray_FromScalar(obj, PyArray_DescrFromType(NPY_CDOUBLE)); + } + if (arr == NULL) { + return 0; + } + (*v).r = npy_creal(*(((npy_cdouble *)PyArray_DATA(arr)))); + (*v).i = npy_cimag(*(((npy_cdouble *)PyArray_DATA(arr)))); + Py_DECREF(arr); + return 1; + } + /* Python does not provide PyNumber_Complex function :-( */ + (*v).i = 0.0; + if (PyFloat_Check(obj)) { + (*v).r = PyFloat_AsDouble(obj); + return !((*v).r == -1.0 && PyErr_Occurred()); + } + if (PyLong_Check(obj)) { + (*v).r = PyLong_AsDouble(obj); + return !((*v).r == -1.0 && PyErr_Occurred()); + } + if (PySequence_Check(obj) && !(PyBytes_Check(obj) || PyUnicode_Check(obj))) { + PyObject *tmp = PySequence_GetItem(obj,0); + if (tmp) { + if (complex_double_from_pyobj(v,tmp,errmess)) { + Py_DECREF(tmp); + return 1; + } + Py_DECREF(tmp); + } + } + { + PyObject* err = PyErr_Occurred(); + if (err==NULL) + err = PyExc_TypeError; + PyErr_SetString(err,errmess); + } + return 0; +} +""" + + +needs['complex_float_from_pyobj'] = [ + 'complex_float', 'complex_double_from_pyobj'] +cfuncs['complex_float_from_pyobj'] = """ +static int +complex_float_from_pyobj(complex_float* v,PyObject *obj,const char *errmess) +{ + complex_double cd={0.0,0.0}; + if (complex_double_from_pyobj(&cd,obj,errmess)) { + (*v).r = (float)cd.r; + (*v).i = (float)cd.i; + return 1; + } + return 0; +} +""" + + +cfuncs['try_pyarr_from_character'] = """ +static int try_pyarr_from_character(PyObject* obj, character* v) { + PyArrayObject *arr = (PyArrayObject*)obj; + if (!obj) return -2; + if (PyArray_Check(obj)) { + if (F2PY_ARRAY_IS_CHARACTER_COMPATIBLE(arr)) { + *(character *)(PyArray_DATA(arr)) = *v; + return 1; + } + } + { + char mess[F2PY_MESSAGE_BUFFER_SIZE]; + PyObject* err = PyErr_Occurred(); + if (err == NULL) { + err = PyExc_ValueError; + strcpy(mess, "try_pyarr_from_character failed" + " -- expected bytes array-scalar|array, got "); + f2py_describe(obj, mess + strlen(mess)); + PyErr_SetString(err, mess); + } + } + return 0; +} +""" + +needs['try_pyarr_from_char'] = ['pyobj_from_char1', 'TRYPYARRAYTEMPLATE'] +cfuncs[ + 'try_pyarr_from_char'] = 'static int try_pyarr_from_char(PyObject* obj,char* v) {\n TRYPYARRAYTEMPLATE(char,\'c\');\n}\n' +needs['try_pyarr_from_signed_char'] = ['TRYPYARRAYTEMPLATE', 'unsigned_char'] +cfuncs[ + 'try_pyarr_from_unsigned_char'] = 'static int try_pyarr_from_unsigned_char(PyObject* obj,unsigned_char* v) {\n TRYPYARRAYTEMPLATE(unsigned_char,\'b\');\n}\n' +needs['try_pyarr_from_signed_char'] = ['TRYPYARRAYTEMPLATE', 'signed_char'] +cfuncs[ + 'try_pyarr_from_signed_char'] = 'static int try_pyarr_from_signed_char(PyObject* obj,signed_char* v) {\n TRYPYARRAYTEMPLATE(signed_char,\'1\');\n}\n' +needs['try_pyarr_from_short'] = ['pyobj_from_short1', 'TRYPYARRAYTEMPLATE'] +cfuncs[ + 'try_pyarr_from_short'] = 'static int try_pyarr_from_short(PyObject* obj,short* v) {\n TRYPYARRAYTEMPLATE(short,\'s\');\n}\n' +needs['try_pyarr_from_int'] = ['pyobj_from_int1', 'TRYPYARRAYTEMPLATE'] +cfuncs[ + 'try_pyarr_from_int'] = 'static int try_pyarr_from_int(PyObject* obj,int* v) {\n TRYPYARRAYTEMPLATE(int,\'i\');\n}\n' +needs['try_pyarr_from_long'] = ['pyobj_from_long1', 'TRYPYARRAYTEMPLATE'] +cfuncs[ + 'try_pyarr_from_long'] = 'static int try_pyarr_from_long(PyObject* obj,long* v) {\n TRYPYARRAYTEMPLATE(long,\'l\');\n}\n' +needs['try_pyarr_from_long_long'] = [ + 'pyobj_from_long_long1', 'TRYPYARRAYTEMPLATE', 'long_long'] +cfuncs[ + 'try_pyarr_from_long_long'] = 'static int try_pyarr_from_long_long(PyObject* obj,long_long* v) {\n TRYPYARRAYTEMPLATE(long_long,\'L\');\n}\n' +needs['try_pyarr_from_float'] = ['pyobj_from_float1', 'TRYPYARRAYTEMPLATE'] +cfuncs[ + 'try_pyarr_from_float'] = 'static int try_pyarr_from_float(PyObject* obj,float* v) {\n TRYPYARRAYTEMPLATE(float,\'f\');\n}\n' +needs['try_pyarr_from_double'] = ['pyobj_from_double1', 'TRYPYARRAYTEMPLATE'] +cfuncs[ + 'try_pyarr_from_double'] = 'static int try_pyarr_from_double(PyObject* obj,double* v) {\n TRYPYARRAYTEMPLATE(double,\'d\');\n}\n' +needs['try_pyarr_from_complex_float'] = [ + 'pyobj_from_complex_float1', 'TRYCOMPLEXPYARRAYTEMPLATE', 'complex_float'] +cfuncs[ + 'try_pyarr_from_complex_float'] = 'static int try_pyarr_from_complex_float(PyObject* obj,complex_float* v) {\n TRYCOMPLEXPYARRAYTEMPLATE(float,\'F\');\n}\n' +needs['try_pyarr_from_complex_double'] = [ + 'pyobj_from_complex_double1', 'TRYCOMPLEXPYARRAYTEMPLATE', 'complex_double'] +cfuncs[ + 'try_pyarr_from_complex_double'] = 'static int try_pyarr_from_complex_double(PyObject* obj,complex_double* v) {\n TRYCOMPLEXPYARRAYTEMPLATE(double,\'D\');\n}\n' + + +needs['create_cb_arglist'] = ['CFUNCSMESS', 'PRINTPYOBJERR', 'MINMAX'] +# create the list of arguments to be used when calling back to python +cfuncs['create_cb_arglist'] = """ +static int +create_cb_arglist(PyObject* fun, PyTupleObject* xa , const int maxnofargs, + const int nofoptargs, int *nofargs, PyTupleObject **args, + const char *errmess) +{ + PyObject *tmp = NULL; + PyObject *tmp_fun = NULL; + Py_ssize_t tot, opt, ext, siz, i, di = 0; + CFUNCSMESS(\"create_cb_arglist\\n\"); + tot=opt=ext=siz=0; + /* Get the total number of arguments */ + if (PyFunction_Check(fun)) { + tmp_fun = fun; + Py_INCREF(tmp_fun); + } + else { + di = 1; + if (PyObject_HasAttrString(fun,\"im_func\")) { + tmp_fun = PyObject_GetAttrString(fun,\"im_func\"); + } + else if (PyObject_HasAttrString(fun,\"__call__\")) { + tmp = PyObject_GetAttrString(fun,\"__call__\"); + if (PyObject_HasAttrString(tmp,\"im_func\")) + tmp_fun = PyObject_GetAttrString(tmp,\"im_func\"); + else { + tmp_fun = fun; /* built-in function */ + Py_INCREF(tmp_fun); + tot = maxnofargs; + if (PyCFunction_Check(fun)) { + /* In case the function has a co_argcount (like on PyPy) */ + di = 0; + } + if (xa != NULL) + tot += PyTuple_Size((PyObject *)xa); + } + Py_XDECREF(tmp); + } + else if (PyFortran_Check(fun) || PyFortran_Check1(fun)) { + tot = maxnofargs; + if (xa != NULL) + tot += PyTuple_Size((PyObject *)xa); + tmp_fun = fun; + Py_INCREF(tmp_fun); + } + else if (F2PyCapsule_Check(fun)) { + tot = maxnofargs; + if (xa != NULL) + ext = PyTuple_Size((PyObject *)xa); + if(ext>0) { + fprintf(stderr,\"extra arguments tuple cannot be used with PyCapsule call-back\\n\"); + goto capi_fail; + } + tmp_fun = fun; + Py_INCREF(tmp_fun); + } + } + + if (tmp_fun == NULL) { + fprintf(stderr, + \"Call-back argument must be function|instance|instance.__call__|f2py-function \" + \"but got %s.\\n\", + ((fun == NULL) ? \"NULL\" : Py_TYPE(fun)->tp_name)); + goto capi_fail; + } + + if (PyObject_HasAttrString(tmp_fun,\"__code__\")) { + if (PyObject_HasAttrString(tmp = PyObject_GetAttrString(tmp_fun,\"__code__\"),\"co_argcount\")) { + PyObject *tmp_argcount = PyObject_GetAttrString(tmp,\"co_argcount\"); + Py_DECREF(tmp); + if (tmp_argcount == NULL) { + goto capi_fail; + } + tot = PyLong_AsSsize_t(tmp_argcount) - di; + Py_DECREF(tmp_argcount); + } + } + /* Get the number of optional arguments */ + if (PyObject_HasAttrString(tmp_fun,\"__defaults__\")) { + if (PyTuple_Check(tmp = PyObject_GetAttrString(tmp_fun,\"__defaults__\"))) + opt = PyTuple_Size(tmp); + Py_XDECREF(tmp); + } + /* Get the number of extra arguments */ + if (xa != NULL) + ext = PyTuple_Size((PyObject *)xa); + /* Calculate the size of call-backs argument list */ + siz = MIN(maxnofargs+ext,tot); + *nofargs = MAX(0,siz-ext); + +#ifdef DEBUGCFUNCS + fprintf(stderr, + \"debug-capi:create_cb_arglist:maxnofargs(-nofoptargs),\" + \"tot,opt,ext,siz,nofargs = %d(-%d), %zd, %zd, %zd, %zd, %d\\n\", + maxnofargs, nofoptargs, tot, opt, ext, siz, *nofargs); +#endif + + if (siz < tot-opt) { + fprintf(stderr, + \"create_cb_arglist: Failed to build argument list \" + \"(siz) with enough arguments (tot-opt) required by \" + \"user-supplied function (siz,tot,opt=%zd, %zd, %zd).\\n\", + siz, tot, opt); + goto capi_fail; + } + + /* Initialize argument list */ + *args = (PyTupleObject *)PyTuple_New(siz); + for (i=0;i<*nofargs;i++) { + Py_INCREF(Py_None); + PyTuple_SET_ITEM((PyObject *)(*args),i,Py_None); + } + if (xa != NULL) + for (i=(*nofargs);i 0: + if outneeds[n][0] not in needs: + out.append(outneeds[n][0]) + del outneeds[n][0] + else: + flag = 0 + for k in outneeds[n][1:]: + if k in needs[outneeds[n][0]]: + flag = 1 + break + if flag: + outneeds[n] = outneeds[n][1:] + [outneeds[n][0]] + else: + out.append(outneeds[n][0]) + del outneeds[n][0] + if saveout and (0 not in map(lambda x, y: x == y, saveout, outneeds[n])) \ + and outneeds[n] != []: + print(n, saveout) + errmess( + 'get_needs: no progress in sorting needs, probably circular dependence, skipping.\n') + out = out + saveout + break + saveout = copy.copy(outneeds[n]) + if out == []: + out = [n] + res[n] = out + return res diff --git a/local_packages/numpy/f2py/cfuncs.pyi b/local_packages/numpy/f2py/cfuncs.pyi new file mode 100644 index 0000000..5887177 --- /dev/null +++ b/local_packages/numpy/f2py/cfuncs.pyi @@ -0,0 +1,31 @@ +from typing import Final, TypeAlias + +from .__version__ import version + +### + +_NeedListDict: TypeAlias = dict[str, list[str]] +_NeedDict: TypeAlias = dict[str, str] + +### + +f2py_version: Final = version + +outneeds: Final[_NeedListDict] = ... +needs: Final[_NeedListDict] = ... + +includes0: Final[_NeedDict] = ... +includes: Final[_NeedDict] = ... +userincludes: Final[_NeedDict] = ... +typedefs: Final[_NeedDict] = ... +typedefs_generated: Final[_NeedDict] = ... +cppmacros: Final[_NeedDict] = ... +cfuncs: Final[_NeedDict] = ... +callbacks: Final[_NeedDict] = ... +f90modhooks: Final[_NeedDict] = ... +commonhooks: Final[_NeedDict] = ... + +def errmess(s: str) -> None: ... +def buildcfuncs() -> None: ... +def get_needs() -> _NeedListDict: ... +def append_needs(need: str | list[str], flag: int = 1) -> _NeedListDict: ... diff --git a/local_packages/numpy/f2py/common_rules.py b/local_packages/numpy/f2py/common_rules.py new file mode 100644 index 0000000..cef757b --- /dev/null +++ b/local_packages/numpy/f2py/common_rules.py @@ -0,0 +1,143 @@ +""" +Build common block mechanism for f2py2e. + +Copyright 1999 -- 2011 Pearu Peterson all rights reserved. +Copyright 2011 -- present NumPy Developers. +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +""" +from . import __version__ + +f2py_version = __version__.version + +from . import capi_maps, func2subr +from .auxfuncs import getuseblocks, hasbody, hascommon, hasnote, isintent_hide, outmess +from .crackfortran import rmbadname + + +def findcommonblocks(block, top=1): + ret = [] + if hascommon(block): + for key, value in block['common'].items(): + vars_ = {v: block['vars'][v] for v in value} + ret.append((key, value, vars_)) + elif hasbody(block): + for b in block['body']: + ret = ret + findcommonblocks(b, 0) + if top: + tret = [] + names = [] + for t in ret: + if t[0] not in names: + names.append(t[0]) + tret.append(t) + return tret + return ret + + +def buildhooks(m): + ret = {'commonhooks': [], 'initcommonhooks': [], + 'docs': ['"COMMON blocks:\\n"']} + fwrap = [''] + + def fadd(line, s=fwrap): + s[0] = f'{s[0]}\n {line}' + chooks = [''] + + def cadd(line, s=chooks): + s[0] = f'{s[0]}\n{line}' + ihooks = [''] + + def iadd(line, s=ihooks): + s[0] = f'{s[0]}\n{line}' + doc = [''] + + def dadd(line, s=doc): + s[0] = f'{s[0]}\n{line}' + for (name, vnames, vars) in findcommonblocks(m): + lower_name = name.lower() + hnames, inames = [], [] + for n in vnames: + if isintent_hide(vars[n]): + hnames.append(n) + else: + inames.append(n) + if hnames: + outmess('\t\tConstructing COMMON block support for "%s"...\n\t\t %s\n\t\t Hidden: %s\n' % ( + name, ','.join(inames), ','.join(hnames))) + else: + outmess('\t\tConstructing COMMON block support for "%s"...\n\t\t %s\n' % ( + name, ','.join(inames))) + fadd(f'subroutine f2pyinit{name}(setupfunc)') + for usename in getuseblocks(m): + fadd(f'use {usename}') + fadd('external setupfunc') + for n in vnames: + fadd(func2subr.var2fixfortran(vars, n)) + if name == '_BLNK_': + fadd(f"common {','.join(vnames)}") + else: + fadd(f"common /{name}/ {','.join(vnames)}") + fadd(f"call setupfunc({','.join(inames)})") + fadd('end\n') + cadd('static FortranDataDef f2py_%s_def[] = {' % (name)) + idims = [] + for n in inames: + ct = capi_maps.getctype(vars[n]) + elsize = capi_maps.get_elsize(vars[n]) + at = capi_maps.c2capi_map[ct] + dm = capi_maps.getarrdims(n, vars[n]) + if dm['dims']: + idims.append(f"({dm['dims']})") + else: + idims.append('') + dms = dm['dims'].strip() + if not dms: + dms = '-1' + cadd('\t{\"%s\",%s,{{%s}},%s, %s},' + % (n, dm['rank'], dms, at, elsize)) + cadd('\t{NULL}\n};') + inames1 = rmbadname(inames) + inames1_tps = ','.join(['char *' + s for s in inames1]) + cadd('static void f2py_setup_%s(%s) {' % (name, inames1_tps)) + cadd('\tint i_f2py=0;') + for n in inames1: + cadd(f'\tf2py_{name}_def[i_f2py++].data = {n};') + cadd('}') + if '_' in lower_name: + F_FUNC = 'F_FUNC_US' + else: + F_FUNC = 'F_FUNC' + cadd('extern void %s(f2pyinit%s,F2PYINIT%s)(void(*)(%s));' + % (F_FUNC, lower_name, name.upper(), + ','.join(['char*'] * len(inames1)))) + cadd('static void f2py_init_%s(void) {' % name) + cadd('\t%s(f2pyinit%s,F2PYINIT%s)(f2py_setup_%s);' + % (F_FUNC, lower_name, name.upper(), name)) + cadd('}\n') + iadd(f'\ttmp = PyFortranObject_New(f2py_{name}_def,f2py_init_{name});') + iadd('\tif (tmp == NULL) return NULL;') + iadd(f'\tif (F2PyDict_SetItemString(d, "{name}", tmp) == -1) return NULL;') + iadd('\tPy_DECREF(tmp);') + tname = name.replace('_', '\\_') + dadd('\\subsection{Common block \\texttt{%s}}\n' % (tname)) + dadd('\\begin{description}') + for n in inames: + dadd('\\item[]{{}\\verb@%s@{}}' % + (capi_maps.getarrdocsign(n, vars[n]))) + if hasnote(vars[n]): + note = vars[n]['note'] + if isinstance(note, list): + note = '\n'.join(note) + dadd(f'--- {note}') + dadd('\\end{description}') + ret['docs'].append( + f"\"\t/{name}/ {','.join(map(lambda v, d: v + d, inames, idims))}\\n\"") + ret['commonhooks'] = chooks + ret['initcommonhooks'] = ihooks + ret['latexdoc'] = doc[0] + if len(ret['docs']) <= 1: + ret['docs'] = '' + return ret, fwrap[0] diff --git a/local_packages/numpy/f2py/common_rules.pyi b/local_packages/numpy/f2py/common_rules.pyi new file mode 100644 index 0000000..d840de0 --- /dev/null +++ b/local_packages/numpy/f2py/common_rules.pyi @@ -0,0 +1,9 @@ +from collections.abc import Mapping +from typing import Any, Final + +from .__version__ import version + +f2py_version: Final = version + +def findcommonblocks(block: Mapping[str, object], top: int = 1) -> list[tuple[str, list[str], dict[str, Any]]]: ... +def buildhooks(m: Mapping[str, object]) -> tuple[dict[str, Any], str]: ... diff --git a/local_packages/numpy/f2py/crackfortran.py b/local_packages/numpy/f2py/crackfortran.py new file mode 100644 index 0000000..22d8043 --- /dev/null +++ b/local_packages/numpy/f2py/crackfortran.py @@ -0,0 +1,3725 @@ +""" +crackfortran --- read fortran (77,90) code and extract declaration information. + +Copyright 1999 -- 2011 Pearu Peterson all rights reserved. +Copyright 2011 -- present NumPy Developers. +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. + + +Usage of crackfortran: +====================== +Command line keys: -quiet,-verbose,-fix,-f77,-f90,-show,-h + -m ,--ignore-contains +Functions: crackfortran, crack2fortran +The following Fortran statements/constructions are supported +(or will be if needed): + block data,byte,call,character,common,complex,contains,data, + dimension,double complex,double precision,end,external,function, + implicit,integer,intent,interface,intrinsic, + logical,module,optional,parameter,private,public, + program,real,(sequence?),subroutine,type,use,virtual, + include,pythonmodule +Note: 'virtual' is mapped to 'dimension'. +Note: 'implicit integer (z) static (z)' is 'implicit static (z)' (this is minor bug). +Note: code after 'contains' will be ignored until its scope ends. +Note: 'common' statement is extended: dimensions are moved to variable definitions +Note: f2py directive: f2py is read as +Note: pythonmodule is introduced to represent Python module + +Usage: + `postlist=crackfortran(files)` + `postlist` contains declaration information read from the list of files `files`. + `crack2fortran(postlist)` returns a fortran code to be saved to pyf-file + + `postlist` has the following structure: + *** it is a list of dictionaries containing `blocks': + B = {'block','body','vars','parent_block'[,'name','prefix','args','result', + 'implicit','externals','interfaced','common','sortvars', + 'commonvars','note']} + B['block'] = 'interface' | 'function' | 'subroutine' | 'module' | + 'program' | 'block data' | 'type' | 'pythonmodule' | + 'abstract interface' + B['body'] --- list containing `subblocks' with the same structure as `blocks' + B['parent_block'] --- dictionary of a parent block: + C['body'][]['parent_block'] is C + B['vars'] --- dictionary of variable definitions + B['sortvars'] --- dictionary of variable definitions sorted by dependence (independent first) + B['name'] --- name of the block (not if B['block']=='interface') + B['prefix'] --- prefix string (only if B['block']=='function') + B['args'] --- list of argument names if B['block']== 'function' | 'subroutine' + B['result'] --- name of the return value (only if B['block']=='function') + B['implicit'] --- dictionary {'a':,'b':...} | None + B['externals'] --- list of variables being external + B['interfaced'] --- list of variables being external and defined + B['common'] --- dictionary of common blocks (list of objects) + B['commonvars'] --- list of variables used in common blocks (dimensions are moved to variable definitions) + B['from'] --- string showing the 'parents' of the current block + B['use'] --- dictionary of modules used in current block: + {:{['only':<0|1>],['map':{:,...}]}} + B['note'] --- list of LaTeX comments on the block + B['f2pyenhancements'] --- optional dictionary + {'threadsafe':'','fortranname':, + 'callstatement':|, + 'callprotoargument':, + 'usercode':|, + 'pymethoddef:' + } + B['entry'] --- dictionary {entryname:argslist,..} + B['varnames'] --- list of variable names given in the order of reading the + Fortran code, useful for derived types. + B['saved_interface'] --- a string of scanned routine signature, defines explicit interface + *** Variable definition is a dictionary + D = B['vars'][] = + {'typespec'[,'attrspec','kindselector','charselector','=','typename']} + D['typespec'] = 'byte' | 'character' | 'complex' | 'double complex' | + 'double precision' | 'integer' | 'logical' | 'real' | 'type' + D['attrspec'] --- list of attributes (e.g. 'dimension()', + 'external','intent(in|out|inout|hide|c|callback|cache|aligned4|aligned8|aligned16)', + 'optional','required', etc) + K = D['kindselector'] = {['*','kind']} (only if D['typespec'] = + 'complex' | 'integer' | 'logical' | 'real' ) + C = D['charselector'] = {['*','len','kind','f2py_len']} + (only if D['typespec']=='character') + D['='] --- initialization expression string + D['typename'] --- name of the type if D['typespec']=='type' + D['dimension'] --- list of dimension bounds + D['intent'] --- list of intent specifications + D['depend'] --- list of variable names on which current variable depends on + D['check'] --- list of C-expressions; if C-expr returns zero, exception is raised + D['note'] --- list of LaTeX comments on the variable + *** Meaning of kind/char selectors (few examples): + D['typespec>']*K['*'] + D['typespec'](kind=K['kind']) + character*C['*'] + character(len=C['len'],kind=C['kind'], f2py_len=C['f2py_len']) + (see also fortran type declaration statement formats below) + +Fortran 90 type declaration statement format (F77 is subset of F90) +==================================================================== +(Main source: IBM XL Fortran 5.1 Language Reference Manual) +type declaration = [[]::] + = byte | + character[] | + complex[] | + double complex | + double precision | + integer[] | + logical[] | + real[] | + type() + = * | + ([len=][,[kind=]]) | + (kind=[,len=]) + = * | + ([kind=]) + = comma separated list of attributes. + Only the following attributes are used in + building up the interface: + external + (parameter --- affects '=' key) + optional + intent + Other attributes are ignored. + = in | out | inout + = comma separated list of dimension bounds. + = [[*][()] | [()]*] + [// | =] [,] + +In addition, the following attributes are used: check,depend,note + +TODO: + * Apply 'parameter' attribute (e.g. 'integer parameter :: i=2' 'real x(i)' + -> 'real x(2)') + The above may be solved by creating appropriate preprocessor program, for example. + +""" +import codecs +import copy +import fileinput +import os +import platform +import re +import string +import sys +from pathlib import Path + +try: + import charset_normalizer +except ImportError: + charset_normalizer = None + +from . import __version__, symbolic + +# The environment provided by auxfuncs.py is needed for some calls to eval. +# As the needed functions cannot be determined by static inspection of the +# code, it is safest to use import * pending a major refactoring of f2py. +from .auxfuncs import * + +f2py_version = __version__.version + +# Global flags: +strictf77 = 1 # Ignore `!' comments unless line[0]=='!' +sourcecodeform = 'fix' # 'fix','free' +quiet = 0 # Be verbose if 0 (Obsolete: not used any more) +verbose = 1 # Be quiet if 0, extra verbose if > 1. +tabchar = 4 * ' ' +pyffilename = '' +f77modulename = '' +skipemptyends = 0 # for old F77 programs without 'program' statement +ignorecontains = 1 +dolowercase = 1 +debug = [] + +# Global variables +beginpattern = '' +currentfilename = '' +expectbegin = 1 +f90modulevars = {} +filepositiontext = '' +gotnextfile = 1 +groupcache = None +groupcounter = 0 +grouplist = {groupcounter: []} +groupname = '' +include_paths = [] +neededmodule = -1 +onlyfuncs = [] +previous_context = None +skipblocksuntil = -1 +skipfuncs = [] +skipfunctions = [] +usermodules = [] + + +def reset_global_f2py_vars(): + global groupcounter, grouplist, neededmodule, expectbegin + global skipblocksuntil, usermodules, f90modulevars, gotnextfile + global filepositiontext, currentfilename, skipfunctions, skipfuncs + global onlyfuncs, include_paths, previous_context + global strictf77, sourcecodeform, quiet, verbose, tabchar, pyffilename + global f77modulename, skipemptyends, ignorecontains, dolowercase, debug + + # flags + strictf77 = 1 + sourcecodeform = 'fix' + quiet = 0 + verbose = 1 + tabchar = 4 * ' ' + pyffilename = '' + f77modulename = '' + skipemptyends = 0 + ignorecontains = 1 + dolowercase = 1 + debug = [] + # variables + groupcounter = 0 + grouplist = {groupcounter: []} + neededmodule = -1 + expectbegin = 1 + skipblocksuntil = -1 + usermodules = [] + f90modulevars = {} + gotnextfile = 1 + filepositiontext = '' + currentfilename = '' + skipfunctions = [] + skipfuncs = [] + onlyfuncs = [] + include_paths = [] + previous_context = None + + +def outmess(line, flag=1): + global filepositiontext + + if not verbose: + return + if not quiet: + if flag: + sys.stdout.write(filepositiontext) + sys.stdout.write(line) + + +re._MAXCACHE = 50 +defaultimplicitrules = {} +for c in "abcdefghopqrstuvwxyz$_": + defaultimplicitrules[c] = {'typespec': 'real'} +for c in "ijklmn": + defaultimplicitrules[c] = {'typespec': 'integer'} +badnames = {} +invbadnames = {} +for n in ['int', 'double', 'float', 'char', 'short', 'long', 'void', 'case', 'while', + 'return', 'signed', 'unsigned', 'if', 'for', 'typedef', 'sizeof', 'union', + 'struct', 'static', 'register', 'new', 'break', 'do', 'goto', 'switch', + 'continue', 'else', 'inline', 'extern', 'delete', 'const', 'auto', + 'len', 'rank', 'shape', 'index', 'slen', 'size', '_i', + 'max', 'min', + 'flen', 'fshape', + 'string', 'complex_double', 'float_double', 'stdin', 'stderr', 'stdout', + 'type', 'default']: + badnames[n] = n + '_bn' + invbadnames[n + '_bn'] = n + + +def rmbadname1(name): + if name in badnames: + errmess(f'rmbadname1: Replacing "{name}" with "{badnames[name]}".\n') + return badnames[name] + return name + + +def rmbadname(names): + return [rmbadname1(_m) for _m in names] + + +def undo_rmbadname1(name): + if name in invbadnames: + errmess(f'undo_rmbadname1: Replacing "{name}" with "{invbadnames[name]}".\n') + return invbadnames[name] + return name + + +def undo_rmbadname(names): + return [undo_rmbadname1(_m) for _m in names] + + +_has_f_header = re.compile(r'-\*-\s*fortran\s*-\*-', re.I).search +_has_f90_header = re.compile(r'-\*-\s*f90\s*-\*-', re.I).search +_has_fix_header = re.compile(r'-\*-\s*fix\s*-\*-', re.I).search +_free_f90_start = re.compile(r'[^c*]\s*[^\s\d\t]', re.I).match + +# Extensions +COMMON_FREE_EXTENSIONS = ['.f90', '.f95', '.f03', '.f08'] +COMMON_FIXED_EXTENSIONS = ['.for', '.ftn', '.f77', '.f'] + + +def openhook(filename, mode): + """Ensures that filename is opened with correct encoding parameter. + + This function uses charset_normalizer package, when available, for + determining the encoding of the file to be opened. When charset_normalizer + is not available, the function detects only UTF encodings, otherwise, ASCII + encoding is used as fallback. + """ + # Reads in the entire file. Robust detection of encoding. + # Correctly handles comments or late stage unicode characters + # gh-22871 + if charset_normalizer is not None: + encoding = charset_normalizer.from_path(filename).best().encoding + else: + # hint: install charset_normalizer for correct encoding handling + # No need to read the whole file for trying with startswith + nbytes = min(32, os.path.getsize(filename)) + with open(filename, 'rb') as fhandle: + raw = fhandle.read(nbytes) + if raw.startswith(codecs.BOM_UTF8): + encoding = 'UTF-8-SIG' + elif raw.startswith((codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE)): + encoding = 'UTF-32' + elif raw.startswith((codecs.BOM_LE, codecs.BOM_BE)): + encoding = 'UTF-16' + else: + # Fallback, without charset_normalizer + encoding = 'ascii' + return open(filename, mode, encoding=encoding) + + +def is_free_format(fname): + """Check if file is in free format Fortran.""" + # f90 allows both fixed and free format, assuming fixed unless + # signs of free format are detected. + result = False + if Path(fname).suffix.lower() in COMMON_FREE_EXTENSIONS: + result = True + with openhook(fname, 'r') as fhandle: + line = fhandle.readline() + n = 15 # the number of non-comment lines to scan for hints + if _has_f_header(line): + n = 0 + elif _has_f90_header(line): + n = 0 + result = True + while n > 0 and line: + if line[0] != '!' and line.strip(): + n -= 1 + if (line[0] != '\t' and _free_f90_start(line[:5])) or line[-2:-1] == '&': + result = True + break + line = fhandle.readline() + return result + + +# Read fortran (77,90) code +def readfortrancode(ffile, dowithline=show, istop=1): + """ + Read fortran codes from files and + 1) Get rid of comments, line continuations, and empty lines; lower cases. + 2) Call dowithline(line) on every line. + 3) Recursively call itself when statement \"include ''\" is met. + """ + global gotnextfile, filepositiontext, currentfilename, sourcecodeform, strictf77 + global beginpattern, quiet, verbose, dolowercase, include_paths + + if not istop: + saveglobals = gotnextfile, filepositiontext, currentfilename, sourcecodeform, strictf77,\ + beginpattern, quiet, verbose, dolowercase + if ffile == []: + return + localdolowercase = dolowercase + # cont: set to True when the content of the last line read + # indicates statement continuation + cont = False + finalline = '' + ll = '' + includeline = re.compile( + r'\s*include\s*(\'|")(?P[^\'"]*)(\'|")', re.I) + cont1 = re.compile(r'(?P.*)&\s*\Z') + cont2 = re.compile(r'(\s*&|)(?P.*)') + mline_mark = re.compile(r".*?'''") + if istop: + dowithline('', -1) + ll, l1 = '', '' + spacedigits = [' '] + [str(_m) for _m in range(10)] + filepositiontext = '' + fin = fileinput.FileInput(ffile, openhook=openhook) + while True: + try: + l = fin.readline() + except UnicodeDecodeError as msg: + raise Exception( + f'readfortrancode: reading {fin.filename()}#{fin.lineno()}' + f' failed with\n{msg}.\nIt is likely that installing charset_normalizer' + ' package will help f2py determine the input file encoding' + ' correctly.') + if not l: + break + if fin.isfirstline(): + filepositiontext = '' + currentfilename = fin.filename() + gotnextfile = 1 + l1 = l + strictf77 = 0 + sourcecodeform = 'fix' + ext = os.path.splitext(currentfilename)[1] + if Path(currentfilename).suffix.lower() in COMMON_FIXED_EXTENSIONS and \ + not (_has_f90_header(l) or _has_fix_header(l)): + strictf77 = 1 + elif is_free_format(currentfilename) and not _has_fix_header(l): + sourcecodeform = 'free' + if strictf77: + beginpattern = beginpattern77 + else: + beginpattern = beginpattern90 + outmess('\tReading file %s (format:%s%s)\n' + % (repr(currentfilename), sourcecodeform, + (strictf77 and ',strict') or '')) + + l = l.expandtabs().replace('\xa0', ' ') + # Get rid of newline characters + while not l == '': + if l[-1] not in "\n\r\f": + break + l = l[:-1] + # Do not lower for directives, gh-2547, gh-27697, gh-26681 + is_f2py_directive = False + # Unconditionally remove comments + (l, rl) = split_by_unquoted(l, '!') + l += ' ' + if rl[:5].lower() == '!f2py': # f2py directive + l, _ = split_by_unquoted(l + 4 * ' ' + rl[5:], '!') + is_f2py_directive = True + if l.strip() == '': # Skip empty line + if sourcecodeform == 'free': + # In free form, a statement continues in the next line + # that is not a comment line [3.3.2.4^1], lines with + # blanks are comment lines [3.3.2.3^1]. Hence, the + # line continuation flag must retain its state. + pass + else: + # In fixed form, statement continuation is determined + # by a non-blank character at the 6-th position. Empty + # line indicates a start of a new statement + # [3.3.3.3^1]. Hence, the line continuation flag must + # be reset. + cont = False + continue + if sourcecodeform == 'fix': + if l[0] in ['*', 'c', '!', 'C', '#']: + if l[1:5].lower() == 'f2py': # f2py directive + l = ' ' + l[5:] + is_f2py_directive = True + else: # Skip comment line + cont = False + is_f2py_directive = False + continue + elif strictf77: + if len(l) > 72: + l = l[:72] + if l[0] not in spacedigits: + raise Exception('readfortrancode: Found non-(space,digit) char ' + 'in the first column.\n\tAre you sure that ' + 'this code is in fix form?\n\tline=%s' % repr(l)) + + if (not cont or strictf77) and (len(l) > 5 and not l[5] == ' '): + # Continuation of a previous line + ll = ll + l[6:] + finalline = '' + origfinalline = '' + else: + r = cont1.match(l) + if r: + l = r.group('line') # Continuation follows .. + if cont: + ll = ll + cont2.match(l).group('line') + finalline = '' + origfinalline = '' + else: + # clean up line beginning from possible digits. + l = ' ' + l[5:] + # f2py directives are already stripped by this point + if localdolowercase: + finalline = ll.lower() + else: + finalline = ll + origfinalline = ll + ll = l + + elif sourcecodeform == 'free': + if not cont and ext == '.pyf' and mline_mark.match(l): + l = l + '\n' + while True: + lc = fin.readline() + if not lc: + errmess( + 'Unexpected end of file when reading multiline\n') + break + l = l + lc + if mline_mark.match(lc): + break + l = l.rstrip() + r = cont1.match(l) + if r: + l = r.group('line') # Continuation follows .. + if cont: + ll = ll + cont2.match(l).group('line') + finalline = '' + origfinalline = '' + else: + if localdolowercase: + # only skip lowering for C style constructs + # gh-2547, gh-27697, gh-26681, gh-28014 + finalline = ll.lower() if not (is_f2py_directive and iscstyledirective(ll)) else ll + else: + finalline = ll + origfinalline = ll + ll = l + cont = (r is not None) + else: + raise ValueError( + f"Flag sourcecodeform must be either 'fix' or 'free': {repr(sourcecodeform)}") + filepositiontext = 'Line #%d in %s:"%s"\n\t' % ( + fin.filelineno() - 1, currentfilename, l1) + m = includeline.match(origfinalline) + if m: + fn = m.group('name') + if os.path.isfile(fn): + readfortrancode(fn, dowithline=dowithline, istop=0) + else: + include_dirs = [ + os.path.dirname(currentfilename)] + include_paths + foundfile = 0 + for inc_dir in include_dirs: + fn1 = os.path.join(inc_dir, fn) + if os.path.isfile(fn1): + foundfile = 1 + readfortrancode(fn1, dowithline=dowithline, istop=0) + break + if not foundfile: + outmess('readfortrancode: could not find include file %s in %s. Ignoring.\n' % ( + repr(fn), os.pathsep.join(include_dirs))) + else: + dowithline(finalline) + l1 = ll + # Last line should never have an f2py directive anyway + if localdolowercase: + finalline = ll.lower() + else: + finalline = ll + origfinalline = ll + filepositiontext = 'Line #%d in %s:"%s"\n\t' % ( + fin.filelineno() - 1, currentfilename, l1) + m = includeline.match(origfinalline) + if m: + fn = m.group('name') + if os.path.isfile(fn): + readfortrancode(fn, dowithline=dowithline, istop=0) + else: + include_dirs = [os.path.dirname(currentfilename)] + include_paths + foundfile = 0 + for inc_dir in include_dirs: + fn1 = os.path.join(inc_dir, fn) + if os.path.isfile(fn1): + foundfile = 1 + readfortrancode(fn1, dowithline=dowithline, istop=0) + break + if not foundfile: + outmess('readfortrancode: could not find include file %s in %s. Ignoring.\n' % ( + repr(fn), os.pathsep.join(include_dirs))) + else: + dowithline(finalline) + filepositiontext = '' + fin.close() + if istop: + dowithline('', 1) + else: + gotnextfile, filepositiontext, currentfilename, sourcecodeform, strictf77,\ + beginpattern, quiet, verbose, dolowercase = saveglobals + + +# Crack line +beforethisafter = r'\s*(?P%s(?=\s*(\b(%s)\b)))'\ + r'\s*(?P(\b(%s)\b))'\ + r'\s*(?P%s)\s*\Z' +## +fortrantypes = r'character|logical|integer|real|complex|double\s*(precision\s*(complex|)|complex)|type(?=\s*\([\w\s,=(*)]*\))|byte' +typespattern = re.compile( + beforethisafter % ('', fortrantypes, fortrantypes, '.*'), re.I), 'type' +typespattern4implicit = re.compile(beforethisafter % ( + '', fortrantypes + '|static|automatic|undefined', fortrantypes + '|static|automatic|undefined', '.*'), re.I) +# +functionpattern = re.compile(beforethisafter % ( + r'([a-z]+[\w\s(=*+-/)]*?|)', 'function', 'function', '.*'), re.I), 'begin' +subroutinepattern = re.compile(beforethisafter % ( + r'[a-z\s]*?', 'subroutine', 'subroutine', '.*'), re.I), 'begin' +# modulepattern=re.compile(beforethisafter%('[a-z\s]*?','module','module','.*'),re.I),'begin' +# +groupbegins77 = r'program|block\s*data' +beginpattern77 = re.compile( + beforethisafter % ('', groupbegins77, groupbegins77, '.*'), re.I), 'begin' +groupbegins90 = groupbegins77 + \ + r'|module(?!\s*procedure)|python\s*module|(abstract|)\s*interface|'\ + r'type(?!\s*\()' +beginpattern90 = re.compile( + beforethisafter % ('', groupbegins90, groupbegins90, '.*'), re.I), 'begin' +groupends = (r'end|endprogram|endblockdata|endmodule|endpythonmodule|' + r'endinterface|endsubroutine|endfunction') +endpattern = re.compile( + beforethisafter % ('', groupends, groupends, '.*'), re.I), 'end' +# block, the Fortran 2008 construct needs special handling in the rest of the file +endifs = r'end\s*(if|do|where|select|while|forall|associate|'\ + r'critical|enum|team)' +endifpattern = re.compile( + beforethisafter % (r'[\w]*?', endifs, endifs, '.*'), re.I), 'endif' +# +moduleprocedures = r'module\s*procedure' +moduleprocedurepattern = re.compile( + beforethisafter % ('', moduleprocedures, moduleprocedures, '.*'), re.I), \ + 'moduleprocedure' +implicitpattern = re.compile( + beforethisafter % ('', 'implicit', 'implicit', '.*'), re.I), 'implicit' +dimensionpattern = re.compile(beforethisafter % ( + '', 'dimension|virtual', 'dimension|virtual', '.*'), re.I), 'dimension' +externalpattern = re.compile( + beforethisafter % ('', 'external', 'external', '.*'), re.I), 'external' +optionalpattern = re.compile( + beforethisafter % ('', 'optional', 'optional', '.*'), re.I), 'optional' +requiredpattern = re.compile( + beforethisafter % ('', 'required', 'required', '.*'), re.I), 'required' +publicpattern = re.compile( + beforethisafter % ('', 'public', 'public', '.*'), re.I), 'public' +privatepattern = re.compile( + beforethisafter % ('', 'private', 'private', '.*'), re.I), 'private' +intrinsicpattern = re.compile( + beforethisafter % ('', 'intrinsic', 'intrinsic', '.*'), re.I), 'intrinsic' +intentpattern = re.compile(beforethisafter % ( + '', 'intent|depend|note|check', 'intent|depend|note|check', r'\s*\(.*?\).*'), re.I), 'intent' +parameterpattern = re.compile( + beforethisafter % ('', 'parameter', 'parameter', r'\s*\(.*'), re.I), 'parameter' +datapattern = re.compile( + beforethisafter % ('', 'data', 'data', '.*'), re.I), 'data' +callpattern = re.compile( + beforethisafter % ('', 'call', 'call', '.*'), re.I), 'call' +entrypattern = re.compile( + beforethisafter % ('', 'entry', 'entry', '.*'), re.I), 'entry' +callfunpattern = re.compile( + beforethisafter % ('', 'callfun', 'callfun', '.*'), re.I), 'callfun' +commonpattern = re.compile( + beforethisafter % ('', 'common', 'common', '.*'), re.I), 'common' +usepattern = re.compile( + beforethisafter % ('', 'use', 'use', '.*'), re.I), 'use' +containspattern = re.compile( + beforethisafter % ('', 'contains', 'contains', ''), re.I), 'contains' +formatpattern = re.compile( + beforethisafter % ('', 'format', 'format', '.*'), re.I), 'format' +# Non-fortran and f2py-specific statements +f2pyenhancementspattern = re.compile(beforethisafter % ('', 'threadsafe|fortranname|callstatement|callprotoargument|usercode|pymethoddef', + 'threadsafe|fortranname|callstatement|callprotoargument|usercode|pymethoddef', '.*'), re.I | re.S), 'f2pyenhancements' +multilinepattern = re.compile( + r"\s*(?P''')(?P.*?)(?P''')\s*\Z", re.S), 'multiline' +## + +def split_by_unquoted(line, characters): + """ + Splits the line into (line[:i], line[i:]), + where i is the index of first occurrence of one of the characters + not within quotes, or len(line) if no such index exists + """ + assert not (set('"\'') & set(characters)), "cannot split by unquoted quotes" + r = re.compile( + r"\A(?P({single_quoted}|{double_quoted}|{not_quoted})*)" + r"(?P{char}.*)\Z".format( + not_quoted=f"[^\"'{re.escape(characters)}]", + char=f"[{re.escape(characters)}]", + single_quoted=r"('([^'\\]|(\\.))*')", + double_quoted=r'("([^"\\]|(\\.))*")')) + m = r.match(line) + if m: + d = m.groupdict() + return (d["before"], d["after"]) + return (line, "") + +def _simplifyargs(argsline): + a = [] + for n in markoutercomma(argsline).split('@,@'): + for r in '(),': + n = n.replace(r, '_') + a.append(n) + return ','.join(a) + + +crackline_re_1 = re.compile(r'\s*(?P\b[a-z]+\w*\b)\s*=.*', re.I) +crackline_bind_1 = re.compile(r'\s*(?P\b[a-z]+\w*\b)\s*=.*', re.I) +crackline_bindlang = re.compile(r'\s*bind\(\s*(?P[^,]+)\s*,\s*name\s*=\s*"(?P[^"]+)"\s*\)', re.I) + +def crackline(line, reset=0): + """ + reset=-1 --- initialize + reset=0 --- crack the line + reset=1 --- final check if mismatch of blocks occurred + + Cracked data is saved in grouplist[0]. + """ + global beginpattern, groupcounter, groupname, groupcache, grouplist + global filepositiontext, currentfilename, neededmodule, expectbegin + global skipblocksuntil, skipemptyends, previous_context, gotnextfile + + _, has_semicolon = split_by_unquoted(line, ";") + if has_semicolon and not (f2pyenhancementspattern[0].match(line) or + multilinepattern[0].match(line)): + # XXX: non-zero reset values need testing + assert reset == 0, repr(reset) + # split line on unquoted semicolons + line, semicolon_line = split_by_unquoted(line, ";") + while semicolon_line: + crackline(line, reset) + line, semicolon_line = split_by_unquoted(semicolon_line[1:], ";") + crackline(line, reset) + return + if reset < 0: + groupcounter = 0 + groupname = {groupcounter: ''} + groupcache = {groupcounter: {}} + grouplist = {groupcounter: []} + groupcache[groupcounter]['body'] = [] + groupcache[groupcounter]['vars'] = {} + groupcache[groupcounter]['block'] = '' + groupcache[groupcounter]['name'] = '' + neededmodule = -1 + skipblocksuntil = -1 + return + if reset > 0: + fl = 0 + if f77modulename and neededmodule == groupcounter: + fl = 2 + while groupcounter > fl: + outmess('crackline: groupcounter=%s groupname=%s\n' % + (repr(groupcounter), repr(groupname))) + outmess( + 'crackline: Mismatch of blocks encountered. Trying to fix it by assuming "end" statement.\n') + grouplist[groupcounter - 1].append(groupcache[groupcounter]) + grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter] + del grouplist[groupcounter] + groupcounter = groupcounter - 1 + if f77modulename and neededmodule == groupcounter: + grouplist[groupcounter - 1].append(groupcache[groupcounter]) + grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter] + del grouplist[groupcounter] + groupcounter = groupcounter - 1 # end interface + grouplist[groupcounter - 1].append(groupcache[groupcounter]) + grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter] + del grouplist[groupcounter] + groupcounter = groupcounter - 1 # end module + neededmodule = -1 + return + if line == '': + return + flag = 0 + for pat in [dimensionpattern, externalpattern, intentpattern, optionalpattern, + requiredpattern, + parameterpattern, datapattern, publicpattern, privatepattern, + intrinsicpattern, + endifpattern, endpattern, + formatpattern, + beginpattern, functionpattern, subroutinepattern, + implicitpattern, typespattern, commonpattern, + callpattern, usepattern, containspattern, + entrypattern, + f2pyenhancementspattern, + multilinepattern, + moduleprocedurepattern + ]: + m = pat[0].match(line) + if m: + break + flag = flag + 1 + if not m: + re_1 = crackline_re_1 + if 0 <= skipblocksuntil <= groupcounter: + return + if 'externals' in groupcache[groupcounter]: + for name in groupcache[groupcounter]['externals']: + if name in invbadnames: + name = invbadnames[name] + if 'interfaced' in groupcache[groupcounter] and name in groupcache[groupcounter]['interfaced']: + continue + m1 = re.match( + r'(?P[^"]*)\b%s\b\s*@\(@(?P[^@]*)@\)@.*\Z' % name, markouterparen(line), re.I) + if m1: + m2 = re_1.match(m1.group('before')) + a = _simplifyargs(m1.group('args')) + if m2: + line = f"callfun {name}({a}) result ({m2.group('result')})" + else: + line = f'callfun {name}({a})' + m = callfunpattern[0].match(line) + if not m: + outmess( + f'crackline: could not resolve function call for line={repr(line)}.\n') + return + analyzeline(m, 'callfun', line) + return + if verbose > 1 or (verbose == 1 and currentfilename.lower().endswith('.pyf')): + previous_context = None + outmess('crackline:%d: No pattern for line\n' % (groupcounter)) + return + elif pat[1] == 'end': + if 0 <= skipblocksuntil < groupcounter: + groupcounter = groupcounter - 1 + if skipblocksuntil <= groupcounter: + return + if groupcounter <= 0: + raise Exception('crackline: groupcounter(=%s) is nonpositive. ' + 'Check the blocks.' + % (groupcounter)) + m1 = beginpattern[0].match(line) + if (m1) and (not m1.group('this') == groupname[groupcounter]): + raise Exception('crackline: End group %s does not match with ' + 'previous Begin group %s\n\t%s' % + (repr(m1.group('this')), repr(groupname[groupcounter]), + filepositiontext) + ) + if skipblocksuntil == groupcounter: + skipblocksuntil = -1 + grouplist[groupcounter - 1].append(groupcache[groupcounter]) + grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter] + del grouplist[groupcounter] + groupcounter = groupcounter - 1 + if not skipemptyends: + expectbegin = 1 + elif pat[1] == 'begin': + if 0 <= skipblocksuntil <= groupcounter: + groupcounter = groupcounter + 1 + return + gotnextfile = 0 + analyzeline(m, pat[1], line) + expectbegin = 0 + elif pat[1] == 'endif': + pass + elif pat[1] == 'moduleprocedure': + analyzeline(m, pat[1], line) + elif pat[1] == 'contains': + if ignorecontains: + return + if 0 <= skipblocksuntil <= groupcounter: + return + skipblocksuntil = groupcounter + else: + if 0 <= skipblocksuntil <= groupcounter: + return + analyzeline(m, pat[1], line) + + +def markouterparen(line): + l = '' + f = 0 + for c in line: + if c == '(': + f = f + 1 + if f == 1: + l = l + '@(@' + continue + elif c == ')': + f = f - 1 + if f == 0: + l = l + '@)@' + continue + l = l + c + return l + + +def markoutercomma(line, comma=','): + l = '' + f = 0 + before, after = split_by_unquoted(line, comma + '()') + l += before + while after: + if (after[0] == comma) and (f == 0): + l += '@' + comma + '@' + else: + l += after[0] + if after[0] == '(': + f += 1 + elif after[0] == ')': + f -= 1 + before, after = split_by_unquoted(after[1:], comma + '()') + l += before + assert not f, repr((f, line, l)) + return l + +def unmarkouterparen(line): + r = line.replace('@(@', '(').replace('@)@', ')') + return r + + +def appenddecl(decl, decl2, force=1): + if not decl: + decl = {} + if not decl2: + return decl + if decl is decl2: + return decl + for k in list(decl2.keys()): + if k == 'typespec': + if force or k not in decl: + decl[k] = decl2[k] + elif k == 'attrspec': + for l in decl2[k]: + decl = setattrspec(decl, l, force) + elif k == 'kindselector': + decl = setkindselector(decl, decl2[k], force) + elif k == 'charselector': + decl = setcharselector(decl, decl2[k], force) + elif k in ['=', 'typename']: + if force or k not in decl: + decl[k] = decl2[k] + elif k == 'note': + pass + elif k in ['intent', 'check', 'dimension', 'optional', + 'required', 'depend']: + errmess(f'appenddecl: "{k}" not implemented.\n') + else: + raise Exception('appenddecl: Unknown variable definition key: ' + + str(k)) + return decl + + +selectpattern = re.compile( + r'\s*(?P(@\(@.*?@\)@|\*[\d*]+|\*\s*@\(@.*?@\)@|))(?P.*)\Z', re.I) +typedefpattern = re.compile( + r'(?:,(?P[\w(),]+))?(::)?(?P\b[a-z$_][\w$]*\b)' + r'(?:\((?P[\w,]*)\))?\Z', re.I) +nameargspattern = re.compile( + r'\s*(?P\b[\w$]+\b)\s*(@\(@\s*(?P[\w\s,]*)\s*@\)@|)\s*((result(\s*@\(@\s*(?P\b[\w$]+\b)\s*@\)@|))|(bind\s*@\(@\s*(?P(?:(?!@\)@).)*)\s*@\)@))*\s*\Z', re.I) +operatorpattern = re.compile( + r'\s*(?P(operator|assignment))' + r'@\(@\s*(?P[^)]+)\s*@\)@\s*\Z', re.I) +callnameargspattern = re.compile( + r'\s*(?P\b[\w$]+\b)\s*@\(@\s*(?P.*)\s*@\)@\s*\Z', re.I) +real16pattern = re.compile( + r'([-+]?(?:\d+(?:\.\d*)?|\d*\.\d+))[dD]((?:[-+]?\d+)?)') +real8pattern = re.compile( + r'([-+]?((?:\d+(?:\.\d*)?|\d*\.\d+))[eE]((?:[-+]?\d+)?)|(\d+\.\d*))') + +_intentcallbackpattern = re.compile(r'intent\s*\(.*?\bcallback\b', re.I) + + +def _is_intent_callback(vdecl): + for a in vdecl.get('attrspec', []): + if _intentcallbackpattern.match(a): + return 1 + return 0 + + +def _resolvetypedefpattern(line): + line = ''.join(line.split()) # removes whitespace + m1 = typedefpattern.match(line) + print(line, m1) + if m1: + attrs = m1.group('attributes') + attrs = [a.lower() for a in attrs.split(',')] if attrs else [] + return m1.group('name'), attrs, m1.group('params') + return None, [], None + +def parse_name_for_bind(line): + pattern = re.compile(r'bind\(\s*(?P[^,]+)(?:\s*,\s*name\s*=\s*["\'](?P[^"\']+)["\']\s*)?\)', re.I) + match = pattern.search(line) + bind_statement = None + if match: + bind_statement = match.group(0) + # Remove the 'bind' construct from the line. + line = line[:match.start()] + line[match.end():] + return line, bind_statement + +def _resolvenameargspattern(line): + line, bind_cname = parse_name_for_bind(line) + line = markouterparen(line) + m1 = nameargspattern.match(line) + if m1: + return m1.group('name'), m1.group('args'), m1.group('result'), bind_cname + m1 = operatorpattern.match(line) + if m1: + name = m1.group('scheme') + '(' + m1.group('name') + ')' + return name, [], None, None + m1 = callnameargspattern.match(line) + if m1: + return m1.group('name'), m1.group('args'), None, None + return None, [], None, None + + +def analyzeline(m, case, line): + """ + Reads each line in the input file in sequence and updates global vars. + + Effectively reads and collects information from the input file to the + global variable groupcache, a dictionary containing info about each part + of the fortran module. + + At the end of analyzeline, information is filtered into the correct dict + keys, but parameter values and dimensions are not yet interpreted. + """ + global groupcounter, groupname, groupcache, grouplist, filepositiontext + global currentfilename, f77modulename, neededinterface, neededmodule + global expectbegin, gotnextfile, previous_context + + block = m.group('this') + if case != 'multiline': + previous_context = None + if expectbegin and case not in ['begin', 'call', 'callfun', 'type'] \ + and not skipemptyends and groupcounter < 1: + newname = os.path.basename(currentfilename).split('.')[0] + outmess( + f'analyzeline: no group yet. Creating program group with name "{newname}".\n') + gotnextfile = 0 + groupcounter = groupcounter + 1 + groupname[groupcounter] = 'program' + groupcache[groupcounter] = {} + grouplist[groupcounter] = [] + groupcache[groupcounter]['body'] = [] + groupcache[groupcounter]['vars'] = {} + groupcache[groupcounter]['block'] = 'program' + groupcache[groupcounter]['name'] = newname + groupcache[groupcounter]['from'] = 'fromsky' + expectbegin = 0 + if case in ['begin', 'call', 'callfun']: + # Crack line => block,name,args,result + block = block.lower() + if re.match(r'block\s*data', block, re.I): + block = 'block data' + elif re.match(r'python\s*module', block, re.I): + block = 'python module' + elif re.match(r'abstract\s*interface', block, re.I): + block = 'abstract interface' + if block == 'type': + name, attrs, _ = _resolvetypedefpattern(m.group('after')) + groupcache[groupcounter]['vars'][name] = {'attrspec': attrs} + args = [] + result = None + else: + name, args, result, bindcline = _resolvenameargspattern(m.group('after')) + if name is None: + if block == 'block data': + name = '_BLOCK_DATA_' + else: + name = '' + if block not in ['interface', 'block data', 'abstract interface']: + outmess('analyzeline: No name/args pattern found for line.\n') + + previous_context = (block, name, groupcounter) + if args: + args = rmbadname([x.strip() + for x in markoutercomma(args).split('@,@')]) + else: + args = [] + if '' in args: + while '' in args: + args.remove('') + outmess( + 'analyzeline: argument list is malformed (missing argument).\n') + + # end of crack line => block,name,args,result + needmodule = 0 + needinterface = 0 + + if case in ['call', 'callfun']: + needinterface = 1 + if 'args' not in groupcache[groupcounter]: + return + if name not in groupcache[groupcounter]['args']: + return + for it in grouplist[groupcounter]: + if it['name'] == name: + return + if name in groupcache[groupcounter]['interfaced']: + return + block = {'call': 'subroutine', 'callfun': 'function'}[case] + if f77modulename and neededmodule == -1 and groupcounter <= 1: + neededmodule = groupcounter + 2 + needmodule = 1 + if block not in ['interface', 'abstract interface']: + needinterface = 1 + # Create new block(s) + groupcounter = groupcounter + 1 + groupcache[groupcounter] = {} + grouplist[groupcounter] = [] + if needmodule: + if verbose > 1: + outmess('analyzeline: Creating module block %s\n' % + repr(f77modulename), 0) + groupname[groupcounter] = 'module' + groupcache[groupcounter]['block'] = 'python module' + groupcache[groupcounter]['name'] = f77modulename + groupcache[groupcounter]['from'] = '' + groupcache[groupcounter]['body'] = [] + groupcache[groupcounter]['externals'] = [] + groupcache[groupcounter]['interfaced'] = [] + groupcache[groupcounter]['vars'] = {} + groupcounter = groupcounter + 1 + groupcache[groupcounter] = {} + grouplist[groupcounter] = [] + if needinterface: + if verbose > 1: + outmess('analyzeline: Creating additional interface block (groupcounter=%s).\n' % ( + groupcounter), 0) + groupname[groupcounter] = 'interface' + groupcache[groupcounter]['block'] = 'interface' + groupcache[groupcounter]['name'] = 'unknown_interface' + groupcache[groupcounter]['from'] = '%s:%s' % ( + groupcache[groupcounter - 1]['from'], groupcache[groupcounter - 1]['name']) + groupcache[groupcounter]['body'] = [] + groupcache[groupcounter]['externals'] = [] + groupcache[groupcounter]['interfaced'] = [] + groupcache[groupcounter]['vars'] = {} + groupcounter = groupcounter + 1 + groupcache[groupcounter] = {} + grouplist[groupcounter] = [] + groupname[groupcounter] = block + groupcache[groupcounter]['block'] = block + if not name: + name = 'unknown_' + block.replace(' ', '_') + groupcache[groupcounter]['prefix'] = m.group('before') + groupcache[groupcounter]['name'] = rmbadname1(name) + groupcache[groupcounter]['result'] = result + if groupcounter == 1: + groupcache[groupcounter]['from'] = currentfilename + elif f77modulename and groupcounter == 3: + groupcache[groupcounter]['from'] = '%s:%s' % ( + groupcache[groupcounter - 1]['from'], currentfilename) + else: + groupcache[groupcounter]['from'] = '%s:%s' % ( + groupcache[groupcounter - 1]['from'], groupcache[groupcounter - 1]['name']) + for k in list(groupcache[groupcounter].keys()): + if not groupcache[groupcounter][k]: + del groupcache[groupcounter][k] + + groupcache[groupcounter]['args'] = args + groupcache[groupcounter]['body'] = [] + groupcache[groupcounter]['externals'] = [] + groupcache[groupcounter]['interfaced'] = [] + groupcache[groupcounter]['vars'] = {} + groupcache[groupcounter]['entry'] = {} + # end of creation + if block == 'type': + groupcache[groupcounter]['varnames'] = [] + + if case in ['call', 'callfun']: # set parents variables + if name not in groupcache[groupcounter - 2]['externals']: + groupcache[groupcounter - 2]['externals'].append(name) + groupcache[groupcounter]['vars'] = copy.deepcopy( + groupcache[groupcounter - 2]['vars']) + try: + del groupcache[groupcounter]['vars'][name][ + groupcache[groupcounter]['vars'][name]['attrspec'].index('external')] + except Exception: + pass + if block in ['function', 'subroutine']: # set global attributes + # name is fortran name + if bindcline: + bindcdat = re.search(crackline_bindlang, bindcline) + if bindcdat: + groupcache[groupcounter]['bindlang'] = {name: {}} + groupcache[groupcounter]['bindlang'][name]["lang"] = bindcdat.group('lang') + if bindcdat.group('lang_name'): + groupcache[groupcounter]['bindlang'][name]["name"] = bindcdat.group('lang_name') + try: + groupcache[groupcounter]['vars'][name] = appenddecl( + groupcache[groupcounter]['vars'][name], groupcache[groupcounter - 2]['vars']['']) + except Exception: + pass + if case == 'callfun': # return type + if result and result in groupcache[groupcounter]['vars']: + if not name == result: + groupcache[groupcounter]['vars'][name] = appenddecl( + groupcache[groupcounter]['vars'][name], groupcache[groupcounter]['vars'][result]) + # if groupcounter>1: # name is interfaced + try: + groupcache[groupcounter - 2]['interfaced'].append(name) + except Exception: + pass + if block == 'function': + t = typespattern[0].match(m.group('before') + ' ' + name) + if t: + typespec, selector, attr, edecl = cracktypespec0( + t.group('this'), t.group('after')) + updatevars(typespec, selector, attr, edecl) + + if case in ['call', 'callfun']: + grouplist[groupcounter - 1].append(groupcache[groupcounter]) + grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter] + del grouplist[groupcounter] + groupcounter = groupcounter - 1 # end routine + grouplist[groupcounter - 1].append(groupcache[groupcounter]) + grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter] + del grouplist[groupcounter] + groupcounter = groupcounter - 1 # end interface + + elif case == 'entry': + name, args, result, _ = _resolvenameargspattern(m.group('after')) + if name is not None: + if args: + args = rmbadname([x.strip() + for x in markoutercomma(args).split('@,@')]) + else: + args = [] + assert result is None, repr(result) + groupcache[groupcounter]['entry'][name] = args + previous_context = ('entry', name, groupcounter) + elif case == 'type': + typespec, selector, attr, edecl = cracktypespec0( + block, m.group('after')) + last_name = updatevars(typespec, selector, attr, edecl) + if last_name is not None: + previous_context = ('variable', last_name, groupcounter) + elif case in ['dimension', 'intent', 'optional', 'required', 'external', 'public', 'private', 'intrinsic']: + edecl = groupcache[groupcounter]['vars'] + ll = m.group('after').strip() + i = ll.find('::') + if i < 0 and case == 'intent': + i = markouterparen(ll).find('@)@') - 2 + ll = ll[:i + 1] + '::' + ll[i + 1:] + i = ll.find('::') + if ll[i:] == '::' and 'args' in groupcache[groupcounter]: + outmess('All arguments will have attribute %s%s\n' % + (m.group('this'), ll[:i])) + ll = ll + ','.join(groupcache[groupcounter]['args']) + if i < 0: + i = 0 + pl = '' + else: + pl = ll[:i].strip() + ll = ll[i + 2:] + ch = markoutercomma(pl).split('@,@') + if len(ch) > 1: + pl = ch[0] + outmess('analyzeline: cannot handle multiple attributes without type specification. Ignoring %r.\n' % ( + ','.join(ch[1:]))) + last_name = None + + for e in [x.strip() for x in markoutercomma(ll).split('@,@')]: + m1 = namepattern.match(e) + if not m1: + if case in ['public', 'private']: + k = '' + else: + print(m.groupdict()) + outmess('analyzeline: no name pattern found in %s statement for %s. Skipping.\n' % ( + case, repr(e))) + continue + else: + k = rmbadname1(m1.group('name')) + if case in ['public', 'private'] and k in {'operator', 'assignment'}: + k += m1.group('after') + if k not in edecl: + edecl[k] = {} + if case == 'dimension': + ap = case + m1.group('after') + if case == 'intent': + ap = m.group('this') + pl + if _intentcallbackpattern.match(ap): + if k not in groupcache[groupcounter]['args']: + if groupcounter > 1: + if '__user__' not in groupcache[groupcounter - 2]['name']: + outmess( + 'analyzeline: missing __user__ module (could be nothing)\n') + # fixes ticket 1693 + if k != groupcache[groupcounter]['name']: + outmess('analyzeline: appending intent(callback) %s' + ' to %s arguments\n' % (k, groupcache[groupcounter]['name'])) + groupcache[groupcounter]['args'].append(k) + else: + errmess( + f'analyzeline: intent(callback) {k} is ignored\n') + else: + errmess('analyzeline: intent(callback) %s is already' + ' in argument list\n' % (k)) + if case in ['optional', 'required', 'public', 'external', 'private', 'intrinsic']: + ap = case + if 'attrspec' in edecl[k]: + edecl[k]['attrspec'].append(ap) + else: + edecl[k]['attrspec'] = [ap] + if case == 'external': + if groupcache[groupcounter]['block'] == 'program': + outmess('analyzeline: ignoring program arguments\n') + continue + if k not in groupcache[groupcounter]['args']: + continue + if 'externals' not in groupcache[groupcounter]: + groupcache[groupcounter]['externals'] = [] + groupcache[groupcounter]['externals'].append(k) + last_name = k + groupcache[groupcounter]['vars'] = edecl + if last_name is not None: + previous_context = ('variable', last_name, groupcounter) + elif case == 'moduleprocedure': + groupcache[groupcounter]['implementedby'] = \ + [x.strip() for x in m.group('after').split(',')] + elif case == 'parameter': + edecl = groupcache[groupcounter]['vars'] + ll = m.group('after').strip()[1:-1] + last_name = None + for e in markoutercomma(ll).split('@,@'): + try: + k, initexpr = [x.strip() for x in e.split('=')] + except Exception: + outmess( + f'analyzeline: could not extract name,expr in parameter statement "{e}" of "{ll}\"\n') + continue + params = get_parameters(edecl) + k = rmbadname1(k) + if k not in edecl: + edecl[k] = {} + if '=' in edecl[k] and (not edecl[k]['='] == initexpr): + outmess('analyzeline: Overwriting the value of parameter "%s" ("%s") with "%s".\n' % ( + k, edecl[k]['='], initexpr)) + t = determineexprtype(initexpr, params) + if t: + if t.get('typespec') == 'real': + tt = list(initexpr) + for m in real16pattern.finditer(initexpr): + tt[m.start():m.end()] = list( + initexpr[m.start():m.end()].lower().replace('d', 'e')) + initexpr = ''.join(tt) + elif t.get('typespec') == 'complex': + initexpr = initexpr[1:].lower().replace('d', 'e').\ + replace(',', '+1j*(') + try: + v = eval(initexpr, {}, params) + except (SyntaxError, NameError, TypeError) as msg: + errmess('analyzeline: Failed to evaluate %r. Ignoring: %s\n' + % (initexpr, msg)) + continue + edecl[k]['='] = repr(v) + if 'attrspec' in edecl[k]: + edecl[k]['attrspec'].append('parameter') + else: + edecl[k]['attrspec'] = ['parameter'] + last_name = k + groupcache[groupcounter]['vars'] = edecl + if last_name is not None: + previous_context = ('variable', last_name, groupcounter) + elif case == 'implicit': + if m.group('after').strip().lower() == 'none': + groupcache[groupcounter]['implicit'] = None + elif m.group('after'): + impl = groupcache[groupcounter].get('implicit', {}) + if impl is None: + outmess( + 'analyzeline: Overwriting earlier "implicit none" statement.\n') + impl = {} + for e in markoutercomma(m.group('after')).split('@,@'): + decl = {} + m1 = re.match( + r'\s*(?P.*?)\s*(\(\s*(?P[a-z-, ]+)\s*\)\s*|)\Z', e, re.I) + if not m1: + outmess( + f'analyzeline: could not extract info of implicit statement part "{e}\"\n') + continue + m2 = typespattern4implicit.match(m1.group('this')) + if not m2: + outmess( + f'analyzeline: could not extract types pattern of implicit statement part "{e}\"\n') + continue + typespec, selector, attr, edecl = cracktypespec0( + m2.group('this'), m2.group('after')) + kindselect, charselect, typename = cracktypespec( + typespec, selector) + decl['typespec'] = typespec + decl['kindselector'] = kindselect + decl['charselector'] = charselect + decl['typename'] = typename + for k in list(decl.keys()): + if not decl[k]: + del decl[k] + for r in markoutercomma(m1.group('after')).split('@,@'): + if '-' in r: + try: + begc, endc = [x.strip() for x in r.split('-')] + except Exception: + outmess( + f'analyzeline: expected "-" instead of "{r}" in range list of implicit statement\n') + continue + else: + begc = endc = r.strip() + if not len(begc) == len(endc) == 1: + outmess( + f'analyzeline: expected "-" instead of "{r}" in range list of implicit statement (2)\n') + continue + for o in range(ord(begc), ord(endc) + 1): + impl[chr(o)] = decl + groupcache[groupcounter]['implicit'] = impl + elif case == 'data': + ll = [] + dl = '' + il = '' + f = 0 + fc = 1 + inp = 0 + for c in m.group('after'): + if not inp: + if c == "'": + fc = not fc + if c == '/' and fc: + f = f + 1 + continue + if c == '(': + inp = inp + 1 + elif c == ')': + inp = inp - 1 + if f == 0: + dl = dl + c + elif f == 1: + il = il + c + elif f == 2: + dl = dl.strip() + if dl.startswith(','): + dl = dl[1:].strip() + ll.append([dl, il]) + dl = c + il = '' + f = 0 + if f == 2: + dl = dl.strip() + if dl.startswith(','): + dl = dl[1:].strip() + ll.append([dl, il]) + vars = groupcache[groupcounter].get('vars', {}) + last_name = None + for l in ll: + l[0], l[1] = l[0].strip().removeprefix(','), l[1].strip() + if l[0].startswith('('): + outmess(f'analyzeline: implied-DO list "{l[0]}" is not supported. Skipping.\n') + continue + for idx, v in enumerate(rmbadname([x.strip() for x in markoutercomma(l[0]).split('@,@')])): + if v.startswith('('): + outmess(f'analyzeline: implied-DO list "{v}" is not supported. Skipping.\n') + # XXX: subsequent init expressions may get wrong values. + # Ignoring since data statements are irrelevant for + # wrapping. + continue + if '!' in l[1]: + # Fixes gh-24746 pyf generation + # XXX: This essentially ignores the value for generating the pyf which is fine: + # integer dimension(3) :: mytab + # common /mycom/ mytab + # Since in any case it is initialized in the Fortran code + outmess(f'Comment line in declaration "{l[1]}" is not supported. Skipping.\n') + continue + vars.setdefault(v, {}) + vtype = vars[v].get('typespec') + vdim = getdimension(vars[v]) + matches = re.findall(r"\(.*?\)", l[1]) if vtype == 'complex' else l[1].split(',') + try: + new_val = f"(/{', '.join(matches)}/)" if vdim else matches[idx] + except IndexError: + # gh-24746 + # Runs only if above code fails. Fixes the line + # DATA IVAR1, IVAR2, IVAR3, IVAR4, EVAR5 /4*0,0.0D0/ + # by expanding to ['0', '0', '0', '0', '0.0d0'] + if any("*" in m for m in matches): + expanded_list = [] + for match in matches: + if "*" in match: + try: + multiplier, value = match.split("*") + expanded_list.extend([value.strip()] * int(multiplier)) + except ValueError: # if int(multiplier) fails + expanded_list.append(match.strip()) + else: + expanded_list.append(match.strip()) + matches = expanded_list + new_val = f"(/{', '.join(matches)}/)" if vdim else matches[idx] + current_val = vars[v].get('=') + if current_val and (current_val != new_val): + outmess(f'analyzeline: changing init expression of "{v}" ("{current_val}") to "{new_val}\"\n') + vars[v]['='] = new_val + last_name = v + groupcache[groupcounter]['vars'] = vars + if last_name: + previous_context = ('variable', last_name, groupcounter) + elif case == 'common': + line = m.group('after').strip() + if not line[0] == '/': + line = '//' + line + + cl = [] + [_, bn, ol] = re.split('/', line, maxsplit=2) # noqa: RUF039 + bn = bn.strip() + if not bn: + bn = '_BLNK_' + cl.append([bn, ol]) + commonkey = {} + if 'common' in groupcache[groupcounter]: + commonkey = groupcache[groupcounter]['common'] + for c in cl: + if c[0] not in commonkey: + commonkey[c[0]] = [] + for i in [x.strip() for x in markoutercomma(c[1]).split('@,@')]: + if i: + commonkey[c[0]].append(i) + groupcache[groupcounter]['common'] = commonkey + previous_context = ('common', bn, groupcounter) + elif case == 'use': + m1 = re.match( + r'\A\s*(?P\b\w+\b)\s*((,(\s*\bonly\b\s*:|(?P))\s*(?P.*))|)\s*\Z', m.group('after'), re.I) + if m1: + mm = m1.groupdict() + if 'use' not in groupcache[groupcounter]: + groupcache[groupcounter]['use'] = {} + name = m1.group('name') + groupcache[groupcounter]['use'][name] = {} + isonly = 0 + if 'list' in mm and mm['list'] is not None: + if 'notonly' in mm and mm['notonly'] is None: + isonly = 1 + groupcache[groupcounter]['use'][name]['only'] = isonly + ll = [x.strip() for x in mm['list'].split(',')] + rl = {} + for l in ll: + if '=' in l: + m2 = re.match( + r'\A\s*(?P\b\w+\b)\s*=\s*>\s*(?P\b\w+\b)\s*\Z', l, re.I) + if m2: + rl[m2.group('local').strip()] = m2.group( + 'use').strip() + else: + outmess( + f'analyzeline: Not local=>use pattern found in {repr(l)}\n') + else: + rl[l] = l + groupcache[groupcounter]['use'][name]['map'] = rl + else: + print(m.groupdict()) + outmess('analyzeline: Could not crack the use statement.\n') + elif case in ['f2pyenhancements']: + if 'f2pyenhancements' not in groupcache[groupcounter]: + groupcache[groupcounter]['f2pyenhancements'] = {} + d = groupcache[groupcounter]['f2pyenhancements'] + if m.group('this') == 'usercode' and 'usercode' in d: + if isinstance(d['usercode'], str): + d['usercode'] = [d['usercode']] + d['usercode'].append(m.group('after')) + else: + d[m.group('this')] = m.group('after') + elif case == 'multiline': + if previous_context is None: + if verbose: + outmess('analyzeline: No context for multiline block.\n') + return + gc = groupcounter + appendmultiline(groupcache[gc], + previous_context[:2], + m.group('this')) + elif verbose > 1: + print(m.groupdict()) + outmess('analyzeline: No code implemented for line.\n') + + +def appendmultiline(group, context_name, ml): + if 'f2pymultilines' not in group: + group['f2pymultilines'] = {} + d = group['f2pymultilines'] + if context_name not in d: + d[context_name] = [] + d[context_name].append(ml) + + +def cracktypespec0(typespec, ll): + selector = None + attr = None + if re.match(r'double\s*complex', typespec, re.I): + typespec = 'double complex' + elif re.match(r'double\s*precision', typespec, re.I): + typespec = 'double precision' + else: + typespec = typespec.strip().lower() + m1 = selectpattern.match(markouterparen(ll)) + if not m1: + outmess( + 'cracktypespec0: no kind/char_selector pattern found for line.\n') + return + d = m1.groupdict() + for k in list(d.keys()): + d[k] = unmarkouterparen(d[k]) + if typespec in ['complex', 'integer', 'logical', 'real', 'character', 'type']: + selector = d['this'] + ll = d['after'] + i = ll.find('::') + if i >= 0: + attr = ll[:i].strip() + ll = ll[i + 2:] + return typespec, selector, attr, ll + + +##### +namepattern = re.compile(r'\s*(?P\b\w+\b)\s*(?P.*)\s*\Z', re.I) +kindselector = re.compile( + r'\s*(\(\s*(kind\s*=)?\s*(?P.*)\s*\)|\*\s*(?P.*?))\s*\Z', re.I) +charselector = re.compile( + r'\s*(\((?P.*)\)|\*\s*(?P.*))\s*\Z', re.I) +lenkindpattern = re.compile( + r'\s*(kind\s*=\s*(?P.*?)\s*(@,@\s*len\s*=\s*(?P.*)|)' + r'|(len\s*=\s*|)(?P.*?)\s*(@,@\s*(kind\s*=\s*|)(?P.*)' + r'|(f2py_len\s*=\s*(?P.*))|))\s*\Z', re.I) +lenarraypattern = re.compile( + r'\s*(@\(@\s*(?!/)\s*(?P.*?)\s*@\)@\s*\*\s*(?P.*?)|(\*\s*(?P.*?)|)\s*(@\(@\s*(?!/)\s*(?P.*?)\s*@\)@|))\s*(=\s*(?P.*?)|(@\(@|)/\s*(?P.*?)\s*/(@\)@|)|)\s*\Z', re.I) + + +def removespaces(expr): + expr = expr.strip() + if len(expr) <= 1: + return expr + expr2 = expr[0] + for i in range(1, len(expr) - 1): + if (expr[i] == ' ' and + ((expr[i + 1] in "()[]{}=+-/* ") or + (expr[i - 1] in "()[]{}=+-/* "))): + continue + expr2 = expr2 + expr[i] + expr2 = expr2 + expr[-1] + return expr2 + + +def markinnerspaces(line): + """ + The function replace all spaces in the input variable line which are + surrounded with quotation marks, with the triplet "@_@". + + For instance, for the input "a 'b c'" the function returns "a 'b@_@c'" + + Parameters + ---------- + line : str + + Returns + ------- + str + + """ + fragment = '' + inside = False + current_quote = None + escaped = '' + for c in line: + if escaped == '\\' and c in ['\\', '\'', '"']: + fragment += c + escaped = c + continue + if not inside and c in ['\'', '"']: + current_quote = c + if c == current_quote: + inside = not inside + elif c == ' ' and inside: + fragment += '@_@' + continue + fragment += c + escaped = c # reset to non-backslash + return fragment + + +def updatevars(typespec, selector, attrspec, entitydecl): + """ + Returns last_name, the variable name without special chars, parenthesis + or dimension specifiers. + + Alters groupcache to add the name, typespec, attrspec (and possibly value) + of current variable. + """ + global groupcache, groupcounter + + last_name = None + kindselect, charselect, typename = cracktypespec(typespec, selector) + # Clean up outer commas, whitespace and undesired chars from attrspec + if attrspec: + attrspec = [x.strip() for x in markoutercomma(attrspec).split('@,@')] + l = [] + c = re.compile(r'(?P[a-zA-Z]+)') + for a in attrspec: + if not a: + continue + m = c.match(a) + if m: + s = m.group('start').lower() + a = s + a[len(s):] + l.append(a) + attrspec = l + el = [x.strip() for x in markoutercomma(entitydecl).split('@,@')] + el1 = [] + for e in el: + for e1 in [x.strip() for x in markoutercomma(removespaces(markinnerspaces(e)), comma=' ').split('@ @')]: + if e1: + el1.append(e1.replace('@_@', ' ')) + for e in el1: + m = namepattern.match(e) + if not m: + outmess( + f'updatevars: no name pattern found for entity={repr(e)}. Skipping.\n') + continue + ename = rmbadname1(m.group('name')) + edecl = {} + if ename in groupcache[groupcounter]['vars']: + edecl = groupcache[groupcounter]['vars'][ename].copy() + not_has_typespec = 'typespec' not in edecl + if not_has_typespec: + edecl['typespec'] = typespec + elif typespec and (not typespec == edecl['typespec']): + outmess('updatevars: attempt to change the type of "%s" ("%s") to "%s". Ignoring.\n' % ( + ename, edecl['typespec'], typespec)) + if 'kindselector' not in edecl: + edecl['kindselector'] = copy.copy(kindselect) + elif kindselect: + for k in list(kindselect.keys()): + if k in edecl['kindselector'] and (not kindselect[k] == edecl['kindselector'][k]): + outmess('updatevars: attempt to change the kindselector "%s" of "%s" ("%s") to "%s". Ignoring.\n' % ( + k, ename, edecl['kindselector'][k], kindselect[k])) + else: + edecl['kindselector'][k] = copy.copy(kindselect[k]) + if 'charselector' not in edecl and charselect: + if not_has_typespec: + edecl['charselector'] = charselect + else: + errmess('updatevars:%s: attempt to change empty charselector to %r. Ignoring.\n' + % (ename, charselect)) + elif charselect: + for k in list(charselect.keys()): + if k in edecl['charselector'] and (not charselect[k] == edecl['charselector'][k]): + outmess('updatevars: attempt to change the charselector "%s" of "%s" ("%s") to "%s". Ignoring.\n' % ( + k, ename, edecl['charselector'][k], charselect[k])) + else: + edecl['charselector'][k] = copy.copy(charselect[k]) + if 'typename' not in edecl: + edecl['typename'] = typename + elif typename and (not edecl['typename'] == typename): + outmess('updatevars: attempt to change the typename of "%s" ("%s") to "%s". Ignoring.\n' % ( + ename, edecl['typename'], typename)) + if 'attrspec' not in edecl: + edecl['attrspec'] = copy.copy(attrspec) + elif attrspec: + for a in attrspec: + if a not in edecl['attrspec']: + edecl['attrspec'].append(a) + else: + edecl['typespec'] = copy.copy(typespec) + edecl['kindselector'] = copy.copy(kindselect) + edecl['charselector'] = copy.copy(charselect) + edecl['typename'] = typename + edecl['attrspec'] = copy.copy(attrspec) + if 'external' in (edecl.get('attrspec') or []) and e in groupcache[groupcounter]['args']: + if 'externals' not in groupcache[groupcounter]: + groupcache[groupcounter]['externals'] = [] + groupcache[groupcounter]['externals'].append(e) + if m.group('after'): + m1 = lenarraypattern.match(markouterparen(m.group('after'))) + if m1: + d1 = m1.groupdict() + for lk in ['len', 'array', 'init']: + if d1[lk + '2'] is not None: + d1[lk] = d1[lk + '2'] + del d1[lk + '2'] + for k in list(d1.keys()): + if d1[k] is not None: + d1[k] = unmarkouterparen(d1[k]) + else: + del d1[k] + + if 'len' in d1 and 'array' in d1: + if d1['len'] == '': + d1['len'] = d1['array'] + del d1['array'] + elif typespec == 'character': + if ('charselector' not in edecl) or (not edecl['charselector']): + edecl['charselector'] = {} + if 'len' in edecl['charselector']: + del edecl['charselector']['len'] + edecl['charselector']['*'] = d1['len'] + del d1['len'] + else: + d1['array'] = d1['array'] + ',' + d1['len'] + del d1['len'] + errmess('updatevars: "%s %s" is mapped to "%s %s(%s)"\n' % ( + typespec, e, typespec, ename, d1['array'])) + + if 'len' in d1: + if typespec in ['complex', 'integer', 'logical', 'real']: + if ('kindselector' not in edecl) or (not edecl['kindselector']): + edecl['kindselector'] = {} + edecl['kindselector']['*'] = d1['len'] + del d1['len'] + elif typespec == 'character': + if ('charselector' not in edecl) or (not edecl['charselector']): + edecl['charselector'] = {} + if 'len' in edecl['charselector']: + del edecl['charselector']['len'] + edecl['charselector']['*'] = d1['len'] + del d1['len'] + + if 'init' in d1: + if '=' in edecl and (not edecl['='] == d1['init']): + outmess('updatevars: attempt to change the init expression of "%s" ("%s") to "%s". Ignoring.\n' % ( + ename, edecl['='], d1['init'])) + else: + edecl['='] = d1['init'] + + if 'array' in d1: + dm = f"dimension({d1['array']})" + if 'attrspec' not in edecl or (not edecl['attrspec']): + edecl['attrspec'] = [dm] + else: + edecl['attrspec'].append(dm) + for dm1 in edecl['attrspec']: + if dm1[:9] == 'dimension' and dm1 != dm: + del edecl['attrspec'][-1] + errmess('updatevars:%s: attempt to change %r to %r. Ignoring.\n' + % (ename, dm1, dm)) + break + + else: + outmess('updatevars: could not crack entity declaration "%s". Ignoring.\n' % ( + ename + m.group('after'))) + for k in list(edecl.keys()): + if not edecl[k]: + del edecl[k] + groupcache[groupcounter]['vars'][ename] = edecl + if 'varnames' in groupcache[groupcounter]: + groupcache[groupcounter]['varnames'].append(ename) + last_name = ename + return last_name + + +def cracktypespec(typespec, selector): + kindselect = None + charselect = None + typename = None + if selector: + if typespec in ['complex', 'integer', 'logical', 'real']: + kindselect = kindselector.match(selector) + if not kindselect: + outmess( + f'cracktypespec: no kindselector pattern found for {repr(selector)}\n') + return + kindselect = kindselect.groupdict() + kindselect['*'] = kindselect['kind2'] + del kindselect['kind2'] + for k in list(kindselect.keys()): + if not kindselect[k]: + del kindselect[k] + for k, i in list(kindselect.items()): + kindselect[k] = rmbadname1(i) + elif typespec == 'character': + charselect = charselector.match(selector) + if not charselect: + outmess( + f'cracktypespec: no charselector pattern found for {repr(selector)}\n') + return + charselect = charselect.groupdict() + charselect['*'] = charselect['charlen'] + del charselect['charlen'] + if charselect['lenkind']: + lenkind = lenkindpattern.match( + markoutercomma(charselect['lenkind'])) + lenkind = lenkind.groupdict() + for lk in ['len', 'kind']: + if lenkind[lk + '2']: + lenkind[lk] = lenkind[lk + '2'] + charselect[lk] = lenkind[lk] + del lenkind[lk + '2'] + if lenkind['f2py_len'] is not None: + # used to specify the length of assumed length strings + charselect['f2py_len'] = lenkind['f2py_len'] + del charselect['lenkind'] + for k in list(charselect.keys()): + if not charselect[k]: + del charselect[k] + for k, i in list(charselect.items()): + charselect[k] = rmbadname1(i) + elif typespec == 'type': + typename = re.match(r'\s*\(\s*(?P\w+)\s*\)', selector, re.I) + if typename: + typename = typename.group('name') + else: + outmess('cracktypespec: no typename found in %s\n' % + (repr(typespec + selector))) + else: + outmess(f'cracktypespec: no selector used for {repr(selector)}\n') + return kindselect, charselect, typename +###### + + +def setattrspec(decl, attr, force=0): + if not decl: + decl = {} + if not attr: + return decl + if 'attrspec' not in decl: + decl['attrspec'] = [attr] + return decl + if force: + decl['attrspec'].append(attr) + if attr in decl['attrspec']: + return decl + if attr == 'static' and 'automatic' not in decl['attrspec']: + decl['attrspec'].append(attr) + elif attr == 'automatic' and 'static' not in decl['attrspec']: + decl['attrspec'].append(attr) + elif attr == 'public': + if 'private' not in decl['attrspec']: + decl['attrspec'].append(attr) + elif attr == 'private': + if 'public' not in decl['attrspec']: + decl['attrspec'].append(attr) + else: + decl['attrspec'].append(attr) + return decl + + +def setkindselector(decl, sel, force=0): + if not decl: + decl = {} + if not sel: + return decl + if 'kindselector' not in decl: + decl['kindselector'] = sel + return decl + for k in list(sel.keys()): + if force or k not in decl['kindselector']: + decl['kindselector'][k] = sel[k] + return decl + + +def setcharselector(decl, sel, force=0): + if not decl: + decl = {} + if not sel: + return decl + if 'charselector' not in decl: + decl['charselector'] = sel + return decl + + for k in list(sel.keys()): + if force or k not in decl['charselector']: + decl['charselector'][k] = sel[k] + return decl + + +def getblockname(block, unknown='unknown'): + if 'name' in block: + return block['name'] + return unknown + +# post processing + + +def setmesstext(block): + global filepositiontext + + try: + filepositiontext = f"In: {block['from']}:{block['name']}\n" + except Exception: + pass + + +def get_usedict(block): + usedict = {} + if 'parent_block' in block: + usedict = get_usedict(block['parent_block']) + if 'use' in block: + usedict.update(block['use']) + return usedict + + +def get_useparameters(block, param_map=None): + global f90modulevars + + if param_map is None: + param_map = {} + usedict = get_usedict(block) + if not usedict: + return param_map + for usename, mapping in list(usedict.items()): + usename = usename.lower() + if usename not in f90modulevars: + outmess('get_useparameters: no module %s info used by %s\n' % + (usename, block.get('name'))) + continue + mvars = f90modulevars[usename] + params = get_parameters(mvars) + if not params: + continue + # XXX: apply mapping + if mapping: + errmess(f'get_useparameters: mapping for {mapping} not impl.\n') + for k, v in list(params.items()): + if k in param_map: + outmess('get_useparameters: overriding parameter %s with' + ' value from module %s\n' % (repr(k), repr(usename))) + param_map[k] = v + + return param_map + + +def postcrack2(block, tab='', param_map=None): + global f90modulevars + + if not f90modulevars: + return block + if isinstance(block, list): + ret = [postcrack2(g, tab=tab + '\t', param_map=param_map) + for g in block] + return ret + setmesstext(block) + outmess(f"{tab}Block: {block['name']}\n", 0) + + if param_map is None: + param_map = get_useparameters(block) + + if param_map is not None and 'vars' in block: + vars = block['vars'] + for n in list(vars.keys()): + var = vars[n] + if 'kindselector' in var: + kind = var['kindselector'] + if 'kind' in kind: + val = kind['kind'] + if val in param_map: + kind['kind'] = param_map[val] + new_body = [postcrack2(b, tab=tab + '\t', param_map=param_map) + for b in block['body']] + block['body'] = new_body + + return block + + +def postcrack(block, args=None, tab=''): + """ + TODO: + function return values + determine expression types if in argument list + """ + global usermodules, onlyfunctions + + if isinstance(block, list): + gret = [] + uret = [] + for g in block: + setmesstext(g) + g = postcrack(g, tab=tab + '\t') + # sort user routines to appear first + if 'name' in g and '__user__' in g['name']: + uret.append(g) + else: + gret.append(g) + return uret + gret + setmesstext(block) + if not isinstance(block, dict) and 'block' not in block: + raise Exception('postcrack: Expected block dictionary instead of ' + + str(block)) + if 'name' in block and not block['name'] == 'unknown_interface': + outmess(f"{tab}Block: {block['name']}\n", 0) + block = analyzeargs(block) + block = analyzecommon(block) + block['vars'] = analyzevars(block) + block['sortvars'] = sortvarnames(block['vars']) + if block.get('args'): + args = block['args'] + block['body'] = analyzebody(block, args, tab=tab) + + userisdefined = [] + if 'use' in block: + useblock = block['use'] + for k in list(useblock.keys()): + if '__user__' in k: + userisdefined.append(k) + else: + useblock = {} + name = '' + if 'name' in block: + name = block['name'] + # and not userisdefined: # Build a __user__ module + if block.get('externals'): + interfaced = [] + if 'interfaced' in block: + interfaced = block['interfaced'] + mvars = copy.copy(block['vars']) + if name: + mname = name + '__user__routines' + else: + mname = 'unknown__user__routines' + if mname in userisdefined: + i = 1 + while f"{mname}_{i}" in userisdefined: + i = i + 1 + mname = f"{mname}_{i}" + interface = {'block': 'interface', 'body': [], + 'vars': {}, 'name': name + '_user_interface'} + for e in block['externals']: + if e in interfaced: + edef = [] + j = -1 + for b in block['body']: + j = j + 1 + if b['block'] == 'interface': + i = -1 + for bb in b['body']: + i = i + 1 + if 'name' in bb and bb['name'] == e: + edef = copy.copy(bb) + del b['body'][i] + break + if edef: + if not b['body']: + del block['body'][j] + del interfaced[interfaced.index(e)] + break + interface['body'].append(edef) + elif e in mvars and not isexternal(mvars[e]): + interface['vars'][e] = mvars[e] + if interface['vars'] or interface['body']: + block['interfaced'] = interfaced + mblock = {'block': 'python module', 'body': [ + interface], 'vars': {}, 'name': mname, 'interfaced': block['externals']} + useblock[mname] = {} + usermodules.append(mblock) + if useblock: + block['use'] = useblock + return block + + +def sortvarnames(vars): + indep = [] + dep = [] + for v in list(vars.keys()): + if 'depend' in vars[v] and vars[v]['depend']: + dep.append(v) + else: + indep.append(v) + n = len(dep) + i = 0 + while dep: # XXX: How to catch dependence cycles correctly? + v = dep[0] + fl = 0 + for w in dep[1:]: + if w in vars[v]['depend']: + fl = 1 + break + if fl: + dep = dep[1:] + [v] + i = i + 1 + if i > n: + errmess('sortvarnames: failed to compute dependencies because' + ' of cyclic dependencies between ' + + ', '.join(dep) + '\n') + indep = indep + dep + break + else: + indep.append(v) + dep = dep[1:] + n = len(dep) + i = 0 + return indep + + +def analyzecommon(block): + if not hascommon(block): + return block + commonvars = [] + for k in list(block['common'].keys()): + comvars = [] + for e in block['common'][k]: + m = re.match( + r'\A\s*\b(?P.*?)\b\s*(\((?P.*?)\)|)\s*\Z', e, re.I) + if m: + dims = [] + if m.group('dims'): + dims = [x.strip() + for x in markoutercomma(m.group('dims')).split('@,@')] + n = rmbadname1(m.group('name').strip()) + if n in block['vars']: + if 'attrspec' in block['vars'][n]: + block['vars'][n]['attrspec'].append( + f"dimension({','.join(dims)})") + else: + block['vars'][n]['attrspec'] = [ + f"dimension({','.join(dims)})"] + elif dims: + block['vars'][n] = { + 'attrspec': [f"dimension({','.join(dims)})"]} + else: + block['vars'][n] = {} + if n not in commonvars: + commonvars.append(n) + else: + n = e + errmess( + f'analyzecommon: failed to extract "[()]" from "{e}" in common /{k}/.\n') + comvars.append(n) + block['common'][k] = comvars + if 'commonvars' not in block: + block['commonvars'] = commonvars + else: + block['commonvars'] = block['commonvars'] + commonvars + return block + + +def analyzebody(block, args, tab=''): + global usermodules, skipfuncs, onlyfuncs, f90modulevars + + setmesstext(block) + + maybe_private = { + key: value + for key, value in block['vars'].items() + if 'attrspec' not in value or 'public' not in value['attrspec'] + } + + body = [] + for b in block['body']: + b['parent_block'] = block + if b['block'] in ['function', 'subroutine']: + if args is not None and b['name'] not in args: + continue + else: + as_ = b['args'] + # Add private members to skipfuncs for gh-23879 + if b['name'] in maybe_private.keys(): + skipfuncs.append(b['name']) + if b['name'] in skipfuncs: + continue + if onlyfuncs and b['name'] not in onlyfuncs: + continue + b['saved_interface'] = crack2fortrangen( + b, '\n' + ' ' * 6, as_interface=True) + + else: + as_ = args + b = postcrack(b, as_, tab=tab + '\t') + if b['block'] in ['interface', 'abstract interface'] and \ + not b['body'] and not b.get('implementedby'): + if 'f2pyenhancements' not in b: + continue + if b['block'].replace(' ', '') == 'pythonmodule': + usermodules.append(b) + else: + if b['block'] == 'module': + f90modulevars[b['name']] = b['vars'] + body.append(b) + return body + + +def buildimplicitrules(block): + setmesstext(block) + implicitrules = defaultimplicitrules + attrrules = {} + if 'implicit' in block: + if block['implicit'] is None: + implicitrules = None + if verbose > 1: + outmess( + f"buildimplicitrules: no implicit rules for routine {repr(block['name'])}.\n") + else: + for k in list(block['implicit'].keys()): + if block['implicit'][k].get('typespec') not in ['static', 'automatic']: + implicitrules[k] = block['implicit'][k] + else: + attrrules[k] = block['implicit'][k]['typespec'] + return implicitrules, attrrules + + +def myeval(e, g=None, l=None): + """ Like `eval` but returns only integers and floats """ + r = eval(e, g, l) + if type(r) in [int, float]: + return r + raise ValueError(f'r={r!r}') + + +getlincoef_re_1 = re.compile(r'\A\b\w+\b\Z', re.I) + + +def getlincoef(e, xset): # e = a*x+b ; x in xset + """ + Obtain ``a`` and ``b`` when ``e == "a*x+b"``, where ``x`` is a symbol in + xset. + + >>> getlincoef('2*x + 1', {'x'}) + (2, 1, 'x') + >>> getlincoef('3*x + x*2 + 2 + 1', {'x'}) + (5, 3, 'x') + >>> getlincoef('0', {'x'}) + (0, 0, None) + >>> getlincoef('0*x', {'x'}) + (0, 0, 'x') + >>> getlincoef('x*x', {'x'}) + (None, None, None) + + This can be tricked by sufficiently complex expressions + + >>> getlincoef('(x - 0.5)*(x - 1.5)*(x - 1)*x + 2*x + 3', {'x'}) + (2.0, 3.0, 'x') + """ + try: + c = int(myeval(e, {}, {})) + return 0, c, None + except Exception: + pass + if getlincoef_re_1.match(e): + return 1, 0, e + len_e = len(e) + for x in xset: + if len(x) > len_e: + continue + if re.search(r'\w\s*\([^)]*\b' + x + r'\b', e): + # skip function calls having x as an argument, e.g max(1, x) + continue + re_1 = re.compile(r'(?P.*?)\b' + x + r'\b(?P.*)', re.I) + m = re_1.match(e) + if m: + try: + m1 = re_1.match(e) + while m1: + ee = f"{m1.group('before')}({0}){m1.group('after')}" + m1 = re_1.match(ee) + b = myeval(ee, {}, {}) + m1 = re_1.match(e) + while m1: + ee = f"{m1.group('before')}({1}){m1.group('after')}" + m1 = re_1.match(ee) + a = myeval(ee, {}, {}) - b + m1 = re_1.match(e) + while m1: + ee = f"{m1.group('before')}({0.5}){m1.group('after')}" + m1 = re_1.match(ee) + c = myeval(ee, {}, {}) + # computing another point to be sure that expression is linear + m1 = re_1.match(e) + while m1: + ee = f"{m1.group('before')}({1.5}){m1.group('after')}" + m1 = re_1.match(ee) + c2 = myeval(ee, {}, {}) + if (a * 0.5 + b == c and a * 1.5 + b == c2): + return a, b, x + except Exception: + pass + break + return None, None, None + + +word_pattern = re.compile(r'\b[a-z][\w$]*\b', re.I) + + +def _get_depend_dict(name, vars, deps): + if name in vars: + words = vars[name].get('depend', []) + + if '=' in vars[name] and not isstring(vars[name]): + for word in word_pattern.findall(vars[name]['=']): + # The word_pattern may return values that are not + # only variables, they can be string content for instance + if word not in words and word in vars and word != name: + words.append(word) + for word in words[:]: + for w in deps.get(word, []) \ + or _get_depend_dict(word, vars, deps): + if w not in words: + words.append(w) + else: + outmess(f'_get_depend_dict: no dependence info for {repr(name)}\n') + words = [] + deps[name] = words + return words + + +def _calc_depend_dict(vars): + names = list(vars.keys()) + depend_dict = {} + for n in names: + _get_depend_dict(n, vars, depend_dict) + return depend_dict + + +def get_sorted_names(vars): + depend_dict = _calc_depend_dict(vars) + names = [] + for name in list(depend_dict.keys()): + if not depend_dict[name]: + names.append(name) + del depend_dict[name] + while depend_dict: + for name, lst in list(depend_dict.items()): + new_lst = [n for n in lst if n in depend_dict] + if not new_lst: + names.append(name) + del depend_dict[name] + else: + depend_dict[name] = new_lst + return [name for name in names if name in vars] + + +def _kind_func(string): + # XXX: return something sensible. + if string[0] in "'\"": + string = string[1:-1] + if real16pattern.match(string): + return 8 + elif real8pattern.match(string): + return 4 + return 'kind(' + string + ')' + + +def _selected_int_kind_func(r): + # XXX: This should be processor dependent + m = 10 ** r + if m <= 2 ** 8: + return 1 + if m <= 2 ** 16: + return 2 + if m <= 2 ** 32: + return 4 + if m <= 2 ** 63: + return 8 + if m <= 2 ** 128: + return 16 + return -1 + + +def _selected_real_kind_func(p, r=0, radix=0): + # XXX: This should be processor dependent + # This is only verified for 0 <= p <= 20, possibly good for p <= 33 and above + if p < 7: + return 4 + if p < 16: + return 8 + machine = platform.machine().lower() + if machine.startswith(('aarch64', 'alpha', 'arm64', 'loongarch', 'mips', 'power', 'ppc', 'riscv', 's390x', 'sparc')): + if p <= 33: + return 16 + elif p < 19: + return 10 + elif p <= 33: + return 16 + return -1 + + +def get_parameters(vars, global_params={}): + params = copy.copy(global_params) + g_params = copy.copy(global_params) + for name, func in [('kind', _kind_func), + ('selected_int_kind', _selected_int_kind_func), + ('selected_real_kind', _selected_real_kind_func), ]: + if name not in g_params: + g_params[name] = func + param_names = [] + for n in get_sorted_names(vars): + if 'attrspec' in vars[n] and 'parameter' in vars[n]['attrspec']: + param_names.append(n) + kind_re = re.compile(r'\bkind\s*\(\s*(?P.*)\s*\)', re.I) + selected_int_kind_re = re.compile( + r'\bselected_int_kind\s*\(\s*(?P.*)\s*\)', re.I) + selected_kind_re = re.compile( + r'\bselected_(int|real)_kind\s*\(\s*(?P.*)\s*\)', re.I) + for n in param_names: + if '=' in vars[n]: + v = vars[n]['='] + if islogical(vars[n]): + v = v.lower() + for repl in [ + ('.false.', 'False'), + ('.true.', 'True'), + # TODO: test .eq., .neq., etc replacements. + ]: + v = v.replace(*repl) + + v = kind_re.sub(r'kind("\1")', v) + v = selected_int_kind_re.sub(r'selected_int_kind(\1)', v) + + # We need to act according to the data. + # The easy case is if the data has a kind-specifier, + # then we may easily remove those specifiers. + # However, it may be that the user uses other specifiers...(!) + is_replaced = False + + if 'kindselector' in vars[n]: + # Remove kind specifier (including those defined + # by parameters) + if 'kind' in vars[n]['kindselector']: + orig_v_len = len(v) + v = v.replace('_' + vars[n]['kindselector']['kind'], '') + # Again, this will be true if even a single specifier + # has been replaced, see comment above. + is_replaced = len(v) < orig_v_len + + if not is_replaced: + if not selected_kind_re.match(v): + v_ = v.split('_') + # In case there are additive parameters + if len(v_) > 1: + v = ''.join(v_[:-1]).lower().replace(v_[-1].lower(), '') + + # Currently this will not work for complex numbers. + # There is missing code for extracting a complex number, + # which may be defined in either of these: + # a) (Re, Im) + # b) cmplx(Re, Im) + # c) dcmplx(Re, Im) + # d) cmplx(Re, Im, ) + + if isdouble(vars[n]): + tt = list(v) + for m in real16pattern.finditer(v): + tt[m.start():m.end()] = list( + v[m.start():m.end()].lower().replace('d', 'e')) + v = ''.join(tt) + + elif iscomplex(vars[n]): + outmess(f'get_parameters[TODO]: ' + f'implement evaluation of complex expression {v}\n') + + dimspec = ([s.removeprefix('dimension').strip() + for s in vars[n]['attrspec'] + if s.startswith('dimension')] or [None])[0] + + # Handle _dp for gh-6624 + # Also fixes gh-20460 + if real16pattern.search(v): + v = 8 + elif real8pattern.search(v): + v = 4 + try: + params[n] = param_eval(v, g_params, params, dimspec=dimspec) + except Exception as msg: + params[n] = v + outmess(f'get_parameters: got "{msg}" on {n!r}\n') + + if isstring(vars[n]) and isinstance(params[n], int): + params[n] = chr(params[n]) + nl = n.lower() + if nl != n: + params[nl] = params[n] + else: + print(vars[n]) + outmess(f'get_parameters:parameter {n!r} does not have value?!\n') + return params + + +def _eval_length(length, params): + if length in ['(:)', '(*)', '*']: + return '(*)' + return _eval_scalar(length, params) + + +_is_kind_number = re.compile(r'\d+_').match + + +def _eval_scalar(value, params): + if _is_kind_number(value): + value = value.split('_')[0] + try: + # TODO: use symbolic from PR #19805 + value = eval(value, {}, params) + value = (repr if isinstance(value, str) else str)(value) + except (NameError, SyntaxError, TypeError): + return value + except Exception as msg: + errmess('"%s" in evaluating %r ' + '(available names: %s)\n' + % (msg, value, list(params.keys()))) + return value + + +def analyzevars(block): + """ + Sets correct dimension information for each variable/parameter + """ + + global f90modulevars + + setmesstext(block) + implicitrules, attrrules = buildimplicitrules(block) + vars = copy.copy(block['vars']) + if block['block'] == 'function' and block['name'] not in vars: + vars[block['name']] = {} + if '' in block['vars']: + del vars[''] + if 'attrspec' in block['vars']['']: + gen = block['vars']['']['attrspec'] + for n in set(vars) | {b['name'] for b in block['body']}: + for k in ['public', 'private']: + if k in gen: + vars[n] = setattrspec(vars.get(n, {}), k) + svars = [] + args = block['args'] + for a in args: + try: + vars[a] + svars.append(a) + except KeyError: + pass + for n in list(vars.keys()): + if n not in args: + svars.append(n) + + params = get_parameters(vars, get_useparameters(block)) + # At this point, params are read and interpreted, but + # the params used to define vars are not yet parsed + dep_matches = {} + name_match = re.compile(r'[A-Za-z][\w$]*').match + for v in list(vars.keys()): + m = name_match(v) + if m: + n = v[m.start():m.end()] + try: + dep_matches[n] + except KeyError: + dep_matches[n] = re.compile(r'.*\b%s\b' % (v), re.I).match + for n in svars: + if n[0] in list(attrrules.keys()): + vars[n] = setattrspec(vars[n], attrrules[n[0]]) + if 'typespec' not in vars[n]: + if not ('attrspec' in vars[n] and 'external' in vars[n]['attrspec']): + if implicitrules: + ln0 = n[0].lower() + for k in list(implicitrules[ln0].keys()): + if k == 'typespec' and implicitrules[ln0][k] == 'undefined': + continue + if k not in vars[n]: + vars[n][k] = implicitrules[ln0][k] + elif k == 'attrspec': + for l in implicitrules[ln0][k]: + vars[n] = setattrspec(vars[n], l) + elif n in block['args']: + outmess('analyzevars: typespec of variable %s is not defined in routine %s.\n' % ( + repr(n), block['name'])) + if 'charselector' in vars[n]: + if 'len' in vars[n]['charselector']: + l = vars[n]['charselector']['len'] + try: + l = str(eval(l, {}, params)) + except Exception: + pass + vars[n]['charselector']['len'] = l + + if 'kindselector' in vars[n]: + if 'kind' in vars[n]['kindselector']: + l = vars[n]['kindselector']['kind'] + try: + l = str(eval(l, {}, params)) + except Exception: + pass + vars[n]['kindselector']['kind'] = l + + dimension_exprs = {} + if 'attrspec' in vars[n]: + attr = vars[n]['attrspec'] + attr.reverse() + vars[n]['attrspec'] = [] + dim, intent, depend, check, note = None, None, None, None, None + for a in attr: + if a[:9] == 'dimension': + dim = (a[9:].strip())[1:-1] + elif a[:6] == 'intent': + intent = (a[6:].strip())[1:-1] + elif a[:6] == 'depend': + depend = (a[6:].strip())[1:-1] + elif a[:5] == 'check': + check = (a[5:].strip())[1:-1] + elif a[:4] == 'note': + note = (a[4:].strip())[1:-1] + else: + vars[n] = setattrspec(vars[n], a) + if intent: + if 'intent' not in vars[n]: + vars[n]['intent'] = [] + for c in [x.strip() for x in markoutercomma(intent).split('@,@')]: + # Remove spaces so that 'in out' becomes 'inout' + tmp = c.replace(' ', '') + if tmp not in vars[n]['intent']: + vars[n]['intent'].append(tmp) + intent = None + if note: + note = note.replace('\\n\\n', '\n\n') + note = note.replace('\\n ', '\n') + if 'note' not in vars[n]: + vars[n]['note'] = [note] + else: + vars[n]['note'].append(note) + note = None + if depend is not None: + if 'depend' not in vars[n]: + vars[n]['depend'] = [] + for c in rmbadname([x.strip() for x in markoutercomma(depend).split('@,@')]): + if c not in vars[n]['depend']: + vars[n]['depend'].append(c) + depend = None + if check is not None: + if 'check' not in vars[n]: + vars[n]['check'] = [] + for c in [x.strip() for x in markoutercomma(check).split('@,@')]: + if c not in vars[n]['check']: + vars[n]['check'].append(c) + check = None + if dim and 'dimension' not in vars[n]: + vars[n]['dimension'] = [] + for d in rmbadname( + [x.strip() for x in markoutercomma(dim).split('@,@')] + ): + # d is the expression inside the dimension declaration + # Evaluate `d` with respect to params + try: + # the dimension for this variable depends on a + # previously defined parameter + d = param_parse(d, params) + except (ValueError, IndexError, KeyError): + outmess( + 'analyzevars: could not parse dimension for ' + f'variable {d!r}\n' + ) + + dim_char = ':' if d == ':' else '*' + if d == dim_char: + dl = [dim_char] + else: + dl = markoutercomma(d, ':').split('@:@') + if len(dl) == 2 and '*' in dl: # e.g. dimension(5:*) + dl = ['*'] + d = '*' + if len(dl) == 1 and dl[0] != dim_char: + dl = ['1', dl[0]] + if len(dl) == 2: + d1, d2 = map(symbolic.Expr.parse, dl) + dsize = d2 - d1 + 1 + d = dsize.tostring(language=symbolic.Language.C) + # find variables v that define d as a linear + # function, `d == a * v + b`, and store + # coefficients a and b for further analysis. + solver_and_deps = {} + for v in block['vars']: + s = symbolic.as_symbol(v) + if dsize.contains(s): + try: + a, b = dsize.linear_solve(s) + + def solve_v(s, a=a, b=b): + return (s - b) / a + + all_symbols = set(a.symbols()) + all_symbols.update(b.symbols()) + except RuntimeError as msg: + # d is not a linear function of v, + # however, if v can be determined + # from d using other means, + # implement the corresponding + # solve_v function here. + solve_v = None + all_symbols = set(dsize.symbols()) + v_deps = { + s.data for s in all_symbols + if s.data in vars} + solver_and_deps[v] = solve_v, list(v_deps) + # Note that dsize may contain symbols that are + # not defined in block['vars']. Here we assume + # these correspond to Fortran/C intrinsic + # functions or that are defined by other + # means. We'll let the compiler validate the + # definiteness of such symbols. + dimension_exprs[d] = solver_and_deps + vars[n]['dimension'].append(d) + + if 'check' not in vars[n] and 'args' in block and n in block['args']: + # n is an argument that has no checks defined. Here we + # generate some consistency checks for n, and when n is an + # array, generate checks for its dimensions and construct + # initialization expressions. + n_deps = vars[n].get('depend', []) + n_checks = [] + n_is_input = l_or(isintent_in, isintent_inout, + isintent_inplace)(vars[n]) + if isarray(vars[n]): # n is array + for i, d in enumerate(vars[n]['dimension']): + coeffs_and_deps = dimension_exprs.get(d) + if coeffs_and_deps is None: + # d is `:` or `*` or a constant expression + pass + elif n_is_input: + # n is an input array argument and its shape + # may define variables used in dimension + # specifications. + for v, (solver, deps) in coeffs_and_deps.items(): + def compute_deps(v, deps): + for v1 in coeffs_and_deps.get(v, [None, []])[1]: + if v1 not in deps: + deps.add(v1) + compute_deps(v1, deps) + all_deps = set() + compute_deps(v, all_deps) + if (v in n_deps + or '=' in vars[v] + or 'depend' in vars[v]): + # Skip a variable that + # - n depends on + # - has user-defined initialization expression + # - has user-defined dependencies + continue + if solver is not None and v not in all_deps: + # v can be solved from d, hence, we + # make it an optional argument with + # initialization expression: + is_required = False + init = solver(symbolic.as_symbol( + f'shape({n}, {i})')) + init = init.tostring( + language=symbolic.Language.C) + vars[v]['='] = init + # n needs to be initialized before v. So, + # making v dependent on n and on any + # variables in solver or d. + vars[v]['depend'] = [n] + deps + if 'check' not in vars[v]: + # add check only when no + # user-specified checks exist + vars[v]['check'] = [ + f'shape({n}, {i}) == {d}'] + else: + # d is a non-linear function on v, + # hence, v must be a required input + # argument that n will depend on + is_required = True + if 'intent' not in vars[v]: + vars[v]['intent'] = [] + if 'in' not in vars[v]['intent']: + vars[v]['intent'].append('in') + # v needs to be initialized before n + n_deps.append(v) + n_checks.append( + f'shape({n}, {i}) == {d}') + v_attr = vars[v].get('attrspec', []) + if not ('optional' in v_attr + or 'required' in v_attr): + v_attr.append( + 'required' if is_required else 'optional') + if v_attr: + vars[v]['attrspec'] = v_attr + if coeffs_and_deps is not None: + # extend v dependencies with ones specified in attrspec + for v, (solver, deps) in coeffs_and_deps.items(): + v_deps = vars[v].get('depend', []) + for aa in vars[v].get('attrspec', []): + if aa.startswith('depend'): + aa = ''.join(aa.split()) + v_deps.extend(aa[7:-1].split(',')) + if v_deps: + vars[v]['depend'] = list(set(v_deps)) + if n not in v_deps: + n_deps.append(v) + elif isstring(vars[n]): + if 'charselector' in vars[n]: + if '*' in vars[n]['charselector']: + length = _eval_length(vars[n]['charselector']['*'], + params) + vars[n]['charselector']['*'] = length + elif 'len' in vars[n]['charselector']: + length = _eval_length(vars[n]['charselector']['len'], + params) + del vars[n]['charselector']['len'] + vars[n]['charselector']['*'] = length + if n_checks: + vars[n]['check'] = n_checks + if n_deps: + vars[n]['depend'] = list(set(n_deps)) + + if '=' in vars[n]: + if 'attrspec' not in vars[n]: + vars[n]['attrspec'] = [] + if ('optional' not in vars[n]['attrspec']) and \ + ('required' not in vars[n]['attrspec']): + vars[n]['attrspec'].append('optional') + if 'depend' not in vars[n]: + vars[n]['depend'] = [] + for v, m in list(dep_matches.items()): + if m(vars[n]['=']): + vars[n]['depend'].append(v) + if not vars[n]['depend']: + del vars[n]['depend'] + if isscalar(vars[n]): + vars[n]['='] = _eval_scalar(vars[n]['='], params) + + for n in list(vars.keys()): + if n == block['name']: # n is block name + if 'note' in vars[n]: + block['note'] = vars[n]['note'] + if block['block'] == 'function': + if 'result' in block and block['result'] in vars: + vars[n] = appenddecl(vars[n], vars[block['result']]) + if 'prefix' in block: + pr = block['prefix'] + pr1 = pr.replace('pure', '') + ispure = (not pr == pr1) + pr = pr1.replace('recursive', '') + isrec = (not pr == pr1) + m = typespattern[0].match(pr) + if m: + typespec, selector, attr, edecl = cracktypespec0( + m.group('this'), m.group('after')) + kindselect, charselect, typename = cracktypespec( + typespec, selector) + vars[n]['typespec'] = typespec + try: + if block['result']: + vars[block['result']]['typespec'] = typespec + except Exception: + pass + if kindselect: + if 'kind' in kindselect: + try: + kindselect['kind'] = eval( + kindselect['kind'], {}, params) + except Exception: + pass + vars[n]['kindselector'] = kindselect + if charselect: + vars[n]['charselector'] = charselect + if typename: + vars[n]['typename'] = typename + if ispure: + vars[n] = setattrspec(vars[n], 'pure') + if isrec: + vars[n] = setattrspec(vars[n], 'recursive') + else: + outmess( + f"analyzevars: prefix ({repr(block['prefix'])}) were not used\n") + if block['block'] not in ['module', 'pythonmodule', 'python module', 'block data']: + if 'commonvars' in block: + neededvars = copy.copy(block['args'] + block['commonvars']) + else: + neededvars = copy.copy(block['args']) + for n in list(vars.keys()): + if l_or(isintent_callback, isintent_aux)(vars[n]): + neededvars.append(n) + if 'entry' in block: + neededvars.extend(list(block['entry'].keys())) + for k in list(block['entry'].keys()): + for n in block['entry'][k]: + if n not in neededvars: + neededvars.append(n) + if block['block'] == 'function': + if 'result' in block: + neededvars.append(block['result']) + else: + neededvars.append(block['name']) + if block['block'] in ['subroutine', 'function']: + name = block['name'] + if name in vars and 'intent' in vars[name]: + block['intent'] = vars[name]['intent'] + if block['block'] == 'type': + neededvars.extend(list(vars.keys())) + for n in list(vars.keys()): + if n not in neededvars: + del vars[n] + return vars + + +analyzeargs_re_1 = re.compile(r'\A[a-z]+[\w$]*\Z', re.I) + + +def param_eval(v, g_params, params, dimspec=None): + """ + Creates a dictionary of indices and values for each parameter in a + parameter array to be evaluated later. + + WARNING: It is not possible to initialize multidimensional array + parameters e.g. dimension(-3:1, 4, 3:5) at this point. This is because in + Fortran initialization through array constructor requires the RESHAPE + intrinsic function. Since the right-hand side of the parameter declaration + is not executed in f2py, but rather at the compiled c/fortran extension, + later, it is not possible to execute a reshape of a parameter array. + One issue remains: if the user wants to access the array parameter from + python, we should either + 1) allow them to access the parameter array using python standard indexing + (which is often incompatible with the original fortran indexing) + 2) allow the parameter array to be accessed in python as a dictionary with + fortran indices as keys + We are choosing 2 for now. + """ + if dimspec is None: + try: + p = eval(v, g_params, params) + except Exception as msg: + p = v + outmess(f'param_eval: got "{msg}" on {v!r}\n') + return p + + # This is an array parameter. + # First, we parse the dimension information + if len(dimspec) < 2 or dimspec[::len(dimspec) - 1] != "()": + raise ValueError(f'param_eval: dimension {dimspec} can\'t be parsed') + dimrange = dimspec[1:-1].split(',') + if len(dimrange) == 1: + # e.g. dimension(2) or dimension(-1:1) + dimrange = dimrange[0].split(':') + # now, dimrange is a list of 1 or 2 elements + if len(dimrange) == 1: + bound = param_parse(dimrange[0], params) + dimrange = range(1, int(bound) + 1) + else: + lbound = param_parse(dimrange[0], params) + ubound = param_parse(dimrange[1], params) + dimrange = range(int(lbound), int(ubound) + 1) + else: + raise ValueError('param_eval: multidimensional array parameters ' + f'{dimspec} not supported') + + # Parse parameter value + v = (v[2:-2] if v.startswith('(/') else v).split(',') + v_eval = [] + for item in v: + try: + item = eval(item, g_params, params) + except Exception as msg: + outmess(f'param_eval: got "{msg}" on {item!r}\n') + v_eval.append(item) + + p = dict(zip(dimrange, v_eval)) + + return p + + +def param_parse(d, params): + """Recursively parse array dimensions. + + Parses the declaration of an array variable or parameter + `dimension` keyword, and is called recursively if the + dimension for this array is a previously defined parameter + (found in `params`). + + Parameters + ---------- + d : str + Fortran expression describing the dimension of an array. + params : dict + Previously parsed parameters declared in the Fortran source file. + + Returns + ------- + out : str + Parsed dimension expression. + + Examples + -------- + + * If the line being analyzed is + + `integer, parameter, dimension(2) :: pa = (/ 3, 5 /)` + + then `d = 2` and we return immediately, with + + >>> d = '2' + >>> param_parse(d, params) + 2 + + * If the line being analyzed is + + `integer, parameter, dimension(pa) :: pb = (/1, 2, 3/)` + + then `d = 'pa'`; since `pa` is a previously parsed parameter, + and `pa = 3`, we call `param_parse` recursively, to obtain + + >>> d = 'pa' + >>> params = {'pa': 3} + >>> param_parse(d, params) + 3 + + * If the line being analyzed is + + `integer, parameter, dimension(pa(1)) :: pb = (/1, 2, 3/)` + + then `d = 'pa(1)'`; since `pa` is a previously parsed parameter, + and `pa(1) = 3`, we call `param_parse` recursively, to obtain + + >>> d = 'pa(1)' + >>> params = dict(pa={1: 3, 2: 5}) + >>> param_parse(d, params) + 3 + """ + if "(" in d: + # this dimension expression is an array + dname = d[:d.find("(")] + ddims = d[d.find("(") + 1:d.rfind(")")] + # this dimension expression is also a parameter; + # parse it recursively + index = int(param_parse(ddims, params)) + return str(params[dname][index]) + elif d in params: + return str(params[d]) + else: + for p in params: + re_1 = re.compile( + r'(?P.*?)\b' + p + r'\b(?P.*)', re.I + ) + m = re_1.match(d) + while m: + d = m.group('before') + \ + str(params[p]) + m.group('after') + m = re_1.match(d) + return d + + +def expr2name(a, block, args=[]): + orig_a = a + a_is_expr = not analyzeargs_re_1.match(a) + if a_is_expr: # `a` is an expression + implicitrules, attrrules = buildimplicitrules(block) + at = determineexprtype(a, block['vars'], implicitrules) + na = 'e_' + for c in a: + c = c.lower() + if c not in string.ascii_lowercase + string.digits: + c = '_' + na = na + c + if na[-1] == '_': + na = na + 'e' + else: + na = na + '_e' + a = na + while a in block['vars'] or a in block['args']: + a = a + 'r' + if a in args: + k = 1 + while a + str(k) in args: + k = k + 1 + a = a + str(k) + if a_is_expr: + block['vars'][a] = at + else: + if a not in block['vars']: + block['vars'][a] = block['vars'].get(orig_a, {}) + if 'externals' in block and orig_a in block['externals'] + block['interfaced']: + block['vars'][a] = setattrspec(block['vars'][a], 'external') + return a + + +def analyzeargs(block): + setmesstext(block) + implicitrules, _ = buildimplicitrules(block) + if 'args' not in block: + block['args'] = [] + args = [] + for a in block['args']: + a = expr2name(a, block, args) + args.append(a) + block['args'] = args + if 'entry' in block: + for k, args1 in list(block['entry'].items()): + for a in args1: + if a not in block['vars']: + block['vars'][a] = {} + + for b in block['body']: + if b['name'] in args: + if 'externals' not in block: + block['externals'] = [] + if b['name'] not in block['externals']: + block['externals'].append(b['name']) + if 'result' in block and block['result'] not in block['vars']: + block['vars'][block['result']] = {} + return block + + +determineexprtype_re_1 = re.compile(r'\A\(.+?,.+?\)\Z', re.I) +determineexprtype_re_2 = re.compile(r'\A[+-]?\d+(_(?P\w+)|)\Z', re.I) +determineexprtype_re_3 = re.compile( + r'\A[+-]?[\d.]+[-\d+de.]*(_(?P\w+)|)\Z', re.I) +determineexprtype_re_4 = re.compile(r'\A\(.*\)\Z', re.I) +determineexprtype_re_5 = re.compile(r'\A(?P\w+)\s*\(.*?\)\s*\Z', re.I) + + +def _ensure_exprdict(r): + if isinstance(r, int): + return {'typespec': 'integer'} + if isinstance(r, float): + return {'typespec': 'real'} + if isinstance(r, complex): + return {'typespec': 'complex'} + if isinstance(r, dict): + return r + raise AssertionError(repr(r)) + + +def determineexprtype(expr, vars, rules={}): + if expr in vars: + return _ensure_exprdict(vars[expr]) + expr = expr.strip() + if determineexprtype_re_1.match(expr): + return {'typespec': 'complex'} + m = determineexprtype_re_2.match(expr) + if m: + if 'name' in m.groupdict() and m.group('name'): + outmess( + f'determineexprtype: selected kind types not supported ({repr(expr)})\n') + return {'typespec': 'integer'} + m = determineexprtype_re_3.match(expr) + if m: + if 'name' in m.groupdict() and m.group('name'): + outmess( + f'determineexprtype: selected kind types not supported ({repr(expr)})\n') + return {'typespec': 'real'} + for op in ['+', '-', '*', '/']: + for e in [x.strip() for x in markoutercomma(expr, comma=op).split('@' + op + '@')]: + if e in vars: + return _ensure_exprdict(vars[e]) + t = {} + if determineexprtype_re_4.match(expr): # in parenthesis + t = determineexprtype(expr[1:-1], vars, rules) + else: + m = determineexprtype_re_5.match(expr) + if m: + rn = m.group('name') + t = determineexprtype(m.group('name'), vars, rules) + if t and 'attrspec' in t: + del t['attrspec'] + if not t: + if rn[0] in rules: + return _ensure_exprdict(rules[rn[0]]) + if expr[0] in '\'"': + return {'typespec': 'character', 'charselector': {'*': '*'}} + if not t: + outmess( + f'determineexprtype: could not determine expressions ({repr(expr)}) type.\n') + return t + +###### + + +def crack2fortrangen(block, tab='\n', as_interface=False): + global skipfuncs, onlyfuncs + + setmesstext(block) + ret = '' + if isinstance(block, list): + for g in block: + if g and g['block'] in ['function', 'subroutine']: + if g['name'] in skipfuncs: + continue + if onlyfuncs and g['name'] not in onlyfuncs: + continue + ret = ret + crack2fortrangen(g, tab, as_interface=as_interface) + return ret + prefix = '' + name = '' + args = '' + blocktype = block['block'] + if blocktype == 'program': + return '' + argsl = [] + if 'name' in block: + name = block['name'] + if 'args' in block: + vars = block['vars'] + for a in block['args']: + a = expr2name(a, block, argsl) + if not isintent_callback(vars[a]): + argsl.append(a) + if block['block'] == 'function' or argsl: + args = f"({','.join(argsl)})" + f2pyenhancements = '' + if 'f2pyenhancements' in block: + for k in list(block['f2pyenhancements'].keys()): + f2pyenhancements = '%s%s%s %s' % ( + f2pyenhancements, tab + tabchar, k, block['f2pyenhancements'][k]) + intent_lst = block.get('intent', [])[:] + if blocktype == 'function' and 'callback' in intent_lst: + intent_lst.remove('callback') + if intent_lst: + f2pyenhancements = '%s%sintent(%s) %s' %\ + (f2pyenhancements, tab + tabchar, + ','.join(intent_lst), name) + use = '' + if 'use' in block: + use = use2fortran(block['use'], tab + tabchar) + common = '' + if 'common' in block: + common = common2fortran(block['common'], tab + tabchar) + if name == 'unknown_interface': + name = '' + result = '' + if 'result' in block: + result = f" result ({block['result']})" + if block['result'] not in argsl: + argsl.append(block['result']) + body = crack2fortrangen(block['body'], tab + tabchar, as_interface=as_interface) + vars = vars2fortran( + block, block['vars'], argsl, tab + tabchar, as_interface=as_interface) + mess = '' + if 'from' in block and not as_interface: + mess = f"! in {block['from']}" + if 'entry' in block: + entry_stmts = '' + for k, i in list(block['entry'].items()): + entry_stmts = f"{entry_stmts}{tab + tabchar}entry {k}({','.join(i)})" + body = body + entry_stmts + if blocktype == 'block data' and name == '_BLOCK_DATA_': + name = '' + ret = '%s%s%s %s%s%s %s%s%s%s%s%s%send %s %s' % ( + tab, prefix, blocktype, name, args, result, mess, f2pyenhancements, use, vars, common, body, tab, blocktype, name) + return ret + + +def common2fortran(common, tab=''): + ret = '' + for k in list(common.keys()): + if k == '_BLNK_': + ret = f"{ret}{tab}common {','.join(common[k])}" + else: + ret = f"{ret}{tab}common /{k}/ {','.join(common[k])}" + return ret + + +def use2fortran(use, tab=''): + ret = '' + for m in list(use.keys()): + ret = f'{ret}{tab}use {m},' + if use[m] == {}: + if ret and ret[-1] == ',': + ret = ret[:-1] + continue + if 'only' in use[m] and use[m]['only']: + ret = f'{ret} only:' + if 'map' in use[m] and use[m]['map']: + c = ' ' + for k in list(use[m]['map'].keys()): + if k == use[m]['map'][k]: + ret = f'{ret}{c}{k}' + c = ',' + else: + ret = f"{ret}{c}{k}=>{use[m]['map'][k]}" + c = ',' + if ret and ret[-1] == ',': + ret = ret[:-1] + return ret + + +def true_intent_list(var): + lst = var['intent'] + ret = [] + for intent in lst: + try: + f = globals()[f'isintent_{intent}'] + except KeyError: + pass + else: + if f(var): + ret.append(intent) + return ret + + +def vars2fortran(block, vars, args, tab='', as_interface=False): + setmesstext(block) + ret = '' + nout = [] + for a in args: + if a in block['vars']: + nout.append(a) + if 'commonvars' in block: + for a in block['commonvars']: + if a in vars: + if a not in nout: + nout.append(a) + else: + errmess( + f'vars2fortran: Confused?!: "{a}" is not defined in vars.\n') + if 'varnames' in block: + nout.extend(block['varnames']) + if not as_interface: + for a in list(vars.keys()): + if a not in nout: + nout.append(a) + for a in nout: + if 'depend' in vars[a]: + for d in vars[a]['depend']: + if d in vars and 'depend' in vars[d] and a in vars[d]['depend']: + errmess( + f'vars2fortran: Warning: cross-dependence between variables "{a}" and "{d}\"\n') + if 'externals' in block and a in block['externals']: + if isintent_callback(vars[a]): + ret = f'{ret}{tab}intent(callback) {a}' + ret = f'{ret}{tab}external {a}' + if isoptional(vars[a]): + ret = f'{ret}{tab}optional {a}' + if a in vars and 'typespec' not in vars[a]: + continue + cont = 1 + for b in block['body']: + if a == b['name'] and b['block'] == 'function': + cont = 0 + break + if cont: + continue + if a not in vars: + show(vars) + outmess(f'vars2fortran: No definition for argument "{a}".\n') + continue + if a == block['name']: + if block['block'] != 'function' or block.get('result'): + # 1) skip declaring a variable that name matches with + # subroutine name + # 2) skip declaring function when its type is + # declared via `result` construction + continue + if 'typespec' not in vars[a]: + if 'attrspec' in vars[a] and 'external' in vars[a]['attrspec']: + if a in args: + ret = f'{ret}{tab}external {a}' + continue + show(vars[a]) + outmess(f'vars2fortran: No typespec for argument "{a}".\n') + continue + vardef = vars[a]['typespec'] + if vardef == 'type' and 'typename' in vars[a]: + vardef = f"{vardef}({vars[a]['typename']})" + selector = {} + if 'kindselector' in vars[a]: + selector = vars[a]['kindselector'] + elif 'charselector' in vars[a]: + selector = vars[a]['charselector'] + if '*' in selector: + if selector['*'] in ['*', ':']: + vardef = f"{vardef}*({selector['*']})" + else: + vardef = f"{vardef}*{selector['*']}" + elif 'len' in selector: + vardef = f"{vardef}(len={selector['len']}" + if 'kind' in selector: + vardef = f"{vardef},kind={selector['kind']})" + else: + vardef = f'{vardef})' + elif 'kind' in selector: + vardef = f"{vardef}(kind={selector['kind']})" + c = ' ' + if 'attrspec' in vars[a]: + attr = [l for l in vars[a]['attrspec'] + if l not in ['external']] + if as_interface and 'intent(in)' in attr and 'intent(out)' in attr: + # In Fortran, intent(in, out) are conflicting while + # intent(in, out) can be specified only via + # `!f2py intent(out) ..`. + # So, for the Fortran interface, we'll drop + # intent(out) to resolve the conflict. + attr.remove('intent(out)') + if attr: + vardef = f"{vardef}, {','.join(attr)}" + c = ',' + if 'dimension' in vars[a]: + vardef = f"{vardef}{c}dimension({','.join(vars[a]['dimension'])})" + c = ',' + if 'intent' in vars[a]: + lst = true_intent_list(vars[a]) + if lst: + vardef = f"{vardef}{c}intent({','.join(lst)})" + c = ',' + if 'check' in vars[a]: + vardef = f"{vardef}{c}check({','.join(vars[a]['check'])})" + c = ',' + if 'depend' in vars[a]: + vardef = f"{vardef}{c}depend({','.join(vars[a]['depend'])})" + c = ',' + if '=' in vars[a]: + v = vars[a]['='] + if vars[a]['typespec'] in ['complex', 'double complex']: + try: + v = eval(v) + v = f'({v.real},{v.imag})' + except Exception: + pass + vardef = f'{vardef} :: {a}={v}' + else: + vardef = f'{vardef} :: {a}' + ret = f'{ret}{tab}{vardef}' + return ret +###### + + +# We expose post_processing_hooks as global variable so that +# user-libraries could register their own hooks to f2py. +post_processing_hooks = [] + + +def crackfortran(files): + global usermodules, post_processing_hooks + + outmess('Reading fortran codes...\n', 0) + readfortrancode(files, crackline) + outmess('Post-processing...\n', 0) + usermodules = [] + postlist = postcrack(grouplist[0]) + outmess('Applying post-processing hooks...\n', 0) + for hook in post_processing_hooks: + outmess(f' {hook.__name__}\n', 0) + postlist = traverse(postlist, hook) + outmess('Post-processing (stage 2)...\n', 0) + postlist = postcrack2(postlist) + return usermodules + postlist + + +def crack2fortran(block): + global f2py_version + + pyf = crack2fortrangen(block) + '\n' + header = """! -*- f90 -*- +! Note: the context of this file is case sensitive. +""" + footer = """ +! This file was auto-generated with f2py (version:%s). +! See: +! https://web.archive.org/web/20140822061353/http://cens.ioc.ee/projects/f2py2e +""" % (f2py_version) + return header + pyf + footer + + +def _is_visit_pair(obj): + return (isinstance(obj, tuple) + and len(obj) == 2 + and isinstance(obj[0], (int, str))) + + +def traverse(obj, visit, parents=[], result=None, *args, **kwargs): + '''Traverse f2py data structure with the following visit function: + + def visit(item, parents, result, *args, **kwargs): + """ + + parents is a list of key-"f2py data structure" pairs from which + items are taken from. + + result is a f2py data structure that is filled with the + return value of the visit function. + + item is 2-tuple (index, value) if parents[-1][1] is a list + item is 2-tuple (key, value) if parents[-1][1] is a dict + + The return value of visit must be None, or of the same kind as + item, that is, if parents[-1] is a list, the return value must + be 2-tuple (new_index, new_value), or if parents[-1] is a + dict, the return value must be 2-tuple (new_key, new_value). + + If new_index or new_value is None, the return value of visit + is ignored, that is, it will not be added to the result. + + If the return value is None, the content of obj will be + traversed, otherwise not. + """ + ''' + + if _is_visit_pair(obj): + if obj[0] == 'parent_block': + # avoid infinite recursion + return obj + new_result = visit(obj, parents, result, *args, **kwargs) + if new_result is not None: + assert _is_visit_pair(new_result) + return new_result + parent = obj + result_key, obj = obj + else: + parent = (None, obj) + result_key = None + + if isinstance(obj, list): + new_result = [] + for index, value in enumerate(obj): + new_index, new_item = traverse((index, value), visit, + parents + [parent], result, + *args, **kwargs) + if new_index is not None: + new_result.append(new_item) + elif isinstance(obj, dict): + new_result = {} + for key, value in obj.items(): + new_key, new_value = traverse((key, value), visit, + parents + [parent], result, + *args, **kwargs) + if new_key is not None: + new_result[new_key] = new_value + else: + new_result = obj + + if result_key is None: + return new_result + return result_key, new_result + + +def character_backward_compatibility_hook(item, parents, result, + *args, **kwargs): + """Previously, Fortran character was incorrectly treated as + character*1. This hook fixes the usage of the corresponding + variables in `check`, `dimension`, `=`, and `callstatement` + expressions. + + The usage of `char*` in `callprotoargument` expression can be left + unchanged because C `character` is C typedef of `char`, although, + new implementations should use `character*` in the corresponding + expressions. + + See https://github.com/numpy/numpy/pull/19388 for more information. + + """ + parent_key, parent_value = parents[-1] + key, value = item + + def fix_usage(varname, value): + value = re.sub(r'[*]\s*\b' + varname + r'\b', varname, value) + value = re.sub(r'\b' + varname + r'\b\s*[\[]\s*0\s*[\]]', + varname, value) + return value + + if parent_key in ['dimension', 'check']: + assert parents[-3][0] == 'vars' + vars_dict = parents[-3][1] + elif key == '=': + assert parents[-2][0] == 'vars' + vars_dict = parents[-2][1] + else: + vars_dict = None + + new_value = None + if vars_dict is not None: + new_value = value + for varname, vd in vars_dict.items(): + if ischaracter(vd): + new_value = fix_usage(varname, new_value) + elif key == 'callstatement': + vars_dict = parents[-2][1]['vars'] + new_value = value + for varname, vd in vars_dict.items(): + if ischaracter(vd): + # replace all occurrences of `` with + # `&` in argument passing + new_value = re.sub( + r'(? `{new_value}`\n', 1) + return (key, new_value) + + +post_processing_hooks.append(character_backward_compatibility_hook) + + +if __name__ == "__main__": + files = [] + funcs = [] + f = 1 + f2 = 0 + f3 = 0 + showblocklist = 0 + for l in sys.argv[1:]: + if l == '': + pass + elif l[0] == ':': + f = 0 + elif l == '-quiet': + quiet = 1 + verbose = 0 + elif l == '-verbose': + verbose = 2 + quiet = 0 + elif l == '-fix': + if strictf77: + outmess( + 'Use option -f90 before -fix if Fortran 90 code is in fix form.\n', 0) + skipemptyends = 1 + sourcecodeform = 'fix' + elif l == '-skipemptyends': + skipemptyends = 1 + elif l == '--ignore-contains': + ignorecontains = 1 + elif l == '-f77': + strictf77 = 1 + sourcecodeform = 'fix' + elif l == '-f90': + strictf77 = 0 + sourcecodeform = 'free' + skipemptyends = 1 + elif l == '-h': + f2 = 1 + elif l == '-show': + showblocklist = 1 + elif l == '-m': + f3 = 1 + elif l[0] == '-': + errmess(f'Unknown option {repr(l)}\n') + elif f2: + f2 = 0 + pyffilename = l + elif f3: + f3 = 0 + f77modulename = l + elif f: + try: + open(l).close() + files.append(l) + except OSError as detail: + errmess(f'OSError: {detail!s}\n') + else: + funcs.append(l) + if not strictf77 and f77modulename and not skipemptyends: + outmess("""\ + Warning: You have specified module name for non Fortran 77 code that + should not need one (expect if you are scanning F90 code for non + module blocks but then you should use flag -skipemptyends and also + be sure that the files do not contain programs without program + statement). +""", 0) + + postlist = crackfortran(files) + if pyffilename: + outmess(f'Writing fortran code to file {repr(pyffilename)}\n', 0) + pyf = crack2fortran(postlist) + with open(pyffilename, 'w') as f: + f.write(pyf) + if showblocklist: + show(postlist) diff --git a/local_packages/numpy/f2py/crackfortran.pyi b/local_packages/numpy/f2py/crackfortran.pyi new file mode 100644 index 0000000..742d358 --- /dev/null +++ b/local_packages/numpy/f2py/crackfortran.pyi @@ -0,0 +1,266 @@ +import re +from _typeshed import StrOrBytesPath, StrPath +from collections.abc import Callable, Iterable, Mapping +from typing import ( + IO, + Any, + Concatenate, + Final, + Literal as L, + Never, + ParamSpec, + TypeAlias, + overload, +) + +from .__version__ import version +from .auxfuncs import isintent_dict as isintent_dict + +### + +_Tss = ParamSpec("_Tss") + +_VisitResult: TypeAlias = list[Any] | dict[str, Any] | None +_VisitItem: TypeAlias = tuple[str | None, _VisitResult] +_VisitFunc: TypeAlias = Callable[Concatenate[_VisitItem, list[_VisitItem], _VisitResult, _Tss], _VisitItem | None] + +### + +COMMON_FREE_EXTENSIONS: Final[list[str]] = ... +COMMON_FIXED_EXTENSIONS: Final[list[str]] = ... + +f2py_version: Final = version +tabchar: Final[str] = " " + +f77modulename: str +pyffilename: str +sourcecodeform: L["fix", "gree"] +strictf77: L[0, 1] +quiet: L[0, 1] +verbose: L[0, 1, 2] +skipemptyends: L[0, 1] +ignorecontains: L[1] +dolowercase: L[1] + +beginpattern: str | re.Pattern[str] +currentfilename: str +filepositiontext: str +expectbegin: L[0, 1] +gotnextfile: L[0, 1] +neededmodule: int +skipblocksuntil: int +groupcounter: int +groupname: dict[int, str] | str +groupcache: dict[int, dict[str, Any]] | None +grouplist: dict[int, list[dict[str, Any]]] | None +previous_context: tuple[str, str, int] | None + +f90modulevars: dict[str, dict[str, Any]] = {} +debug: list[Never] = [] +include_paths: list[str] = [] +onlyfuncs: list[str] = [] +skipfuncs: list[str] = [] +skipfunctions: Final[list[str]] = [] +usermodules: Final[list[dict[str, Any]]] = [] + +defaultimplicitrules: Final[dict[str, dict[str, str]]] = {} +badnames: Final[dict[str, str]] = {} +invbadnames: Final[dict[str, str]] = {} + +beforethisafter: Final[str] = ... +fortrantypes: Final[str] = ... +groupbegins77: Final[str] = ... +groupbegins90: Final[str] = ... +groupends: Final[str] = ... +endifs: Final[str] = ... +moduleprocedures: Final[str] = ... + +beginpattern77: Final[tuple[re.Pattern[str], L["begin"]]] = ... +beginpattern90: Final[tuple[re.Pattern[str], L["begin"]]] = ... +callpattern: Final[tuple[re.Pattern[str], L["call"]]] = ... +callfunpattern: Final[tuple[re.Pattern[str], L["callfun"]]] = ... +commonpattern: Final[tuple[re.Pattern[str], L["common"]]] = ... +containspattern: Final[tuple[re.Pattern[str], L["contains"]]] = ... +datapattern: Final[tuple[re.Pattern[str], L["data"]]] = ... +dimensionpattern: Final[tuple[re.Pattern[str], L["dimension"]]] = ... +endifpattern: Final[tuple[re.Pattern[str], L["endif"]]] = ... +endpattern: Final[tuple[re.Pattern[str], L["end"]]] = ... +entrypattern: Final[tuple[re.Pattern[str], L["entry"]]] = ... +externalpattern: Final[tuple[re.Pattern[str], L["external"]]] = ... +f2pyenhancementspattern: Final[tuple[re.Pattern[str], L["f2pyenhancements"]]] = ... +formatpattern: Final[tuple[re.Pattern[str], L["format"]]] = ... +functionpattern: Final[tuple[re.Pattern[str], L["begin"]]] = ... +implicitpattern: Final[tuple[re.Pattern[str], L["implicit"]]] = ... +intentpattern: Final[tuple[re.Pattern[str], L["intent"]]] = ... +intrinsicpattern: Final[tuple[re.Pattern[str], L["intrinsic"]]] = ... +optionalpattern: Final[tuple[re.Pattern[str], L["optional"]]] = ... +moduleprocedurepattern: Final[tuple[re.Pattern[str], L["moduleprocedure"]]] = ... +multilinepattern: Final[tuple[re.Pattern[str], L["multiline"]]] = ... +parameterpattern: Final[tuple[re.Pattern[str], L["parameter"]]] = ... +privatepattern: Final[tuple[re.Pattern[str], L["private"]]] = ... +publicpattern: Final[tuple[re.Pattern[str], L["public"]]] = ... +requiredpattern: Final[tuple[re.Pattern[str], L["required"]]] = ... +subroutinepattern: Final[tuple[re.Pattern[str], L["begin"]]] = ... +typespattern: Final[tuple[re.Pattern[str], L["type"]]] = ... +usepattern: Final[tuple[re.Pattern[str], L["use"]]] = ... + +analyzeargs_re_1: Final[re.Pattern[str]] = ... +callnameargspattern: Final[re.Pattern[str]] = ... +charselector: Final[re.Pattern[str]] = ... +crackline_bind_1: Final[re.Pattern[str]] = ... +crackline_bindlang: Final[re.Pattern[str]] = ... +crackline_re_1: Final[re.Pattern[str]] = ... +determineexprtype_re_1: Final[re.Pattern[str]] = ... +determineexprtype_re_2: Final[re.Pattern[str]] = ... +determineexprtype_re_3: Final[re.Pattern[str]] = ... +determineexprtype_re_4: Final[re.Pattern[str]] = ... +determineexprtype_re_5: Final[re.Pattern[str]] = ... +getlincoef_re_1: Final[re.Pattern[str]] = ... +kindselector: Final[re.Pattern[str]] = ... +lenarraypattern: Final[re.Pattern[str]] = ... +lenkindpattern: Final[re.Pattern[str]] = ... +namepattern: Final[re.Pattern[str]] = ... +nameargspattern: Final[re.Pattern[str]] = ... +operatorpattern: Final[re.Pattern[str]] = ... +real16pattern: Final[re.Pattern[str]] = ... +real8pattern: Final[re.Pattern[str]] = ... +selectpattern: Final[re.Pattern[str]] = ... +typedefpattern: Final[re.Pattern[str]] = ... +typespattern4implicit: Final[re.Pattern[str]] = ... +word_pattern: Final[re.Pattern[str]] = ... + +post_processing_hooks: Final[list[_VisitFunc[...]]] = [] + +# +def outmess(line: str, flag: int = 1) -> None: ... +def reset_global_f2py_vars() -> None: ... + +# +def rmbadname1(name: str) -> str: ... +def undo_rmbadname1(name: str) -> str: ... +def rmbadname(names: Iterable[str]) -> list[str]: ... +def undo_rmbadname(names: Iterable[str]) -> list[str]: ... + +# +def openhook(filename: StrPath, mode: str) -> IO[Any]: ... +def is_free_format(fname: StrPath) -> bool: ... +def readfortrancode( + ffile: StrOrBytesPath | Iterable[StrOrBytesPath], + dowithline: Callable[[str, int], object] = ..., + istop: int = 1, +) -> None: ... + +# +def split_by_unquoted(line: str, characters: str) -> tuple[str, str]: ... + +# +def crackline(line: str, reset: int = 0) -> None: ... +def markouterparen(line: str) -> str: ... +def markoutercomma(line: str, comma: str = ",") -> str: ... +def unmarkouterparen(line: str) -> str: ... +def appenddecl(decl: Mapping[str, object] | None, decl2: Mapping[str, object] | None, force: int = 1) -> dict[str, Any]: ... + +# +def parse_name_for_bind(line: str) -> tuple[str, str | None]: ... +def analyzeline(m: re.Match[str], case: str, line: str) -> None: ... +def appendmultiline(group: dict[str, Any], context_name: str, ml: str) -> None: ... +def cracktypespec0(typespec: str, ll: str | None) -> tuple[str, str | None, str | None, str | None]: ... + +# +def removespaces(expr: str) -> str: ... +def markinnerspaces(line: str) -> str: ... +def updatevars(typespec: str, selector: str | None, attrspec: str, entitydecl: str) -> str: ... +def cracktypespec(typespec: str, selector: str | None) -> tuple[dict[str, str] | None, dict[str, str] | None, str | None]: ... + +# +def setattrspec(decl: dict[str, list[str]], attr: str | None, force: int = 0) -> dict[str, list[str]]: ... +def setkindselector(decl: dict[str, dict[str, str]], sel: dict[str, str], force: int = 0) -> dict[str, dict[str, str]]: ... +def setcharselector(decl: dict[str, dict[str, str]], sel: dict[str, str], force: int = 0) -> dict[str, dict[str, str]]: ... +def getblockname(block: Mapping[str, object], unknown: str = "unknown") -> str: ... +def setmesstext(block: Mapping[str, object]) -> None: ... +def get_usedict(block: Mapping[str, object]) -> dict[str, str]: ... +def get_useparameters(block: Mapping[str, object], param_map: Mapping[str, str] | None = None) -> dict[str, str]: ... + +# +@overload +def postcrack2( + block: dict[str, Any], + tab: str = "", + param_map: Mapping[str, str] | None = None, +) -> dict[str, str | Any]: ... +@overload +def postcrack2( + block: list[dict[str, Any]], + tab: str = "", + param_map: Mapping[str, str] | None = None, +) -> list[dict[str, str | Any]]: ... + +# +@overload +def postcrack(block: dict[str, Any], args: Mapping[str, str] | None = None, tab: str = "") -> dict[str, Any]: ... +@overload +def postcrack(block: list[dict[str, str]], args: Mapping[str, str] | None = None, tab: str = "") -> list[dict[str, Any]]: ... + +# +def sortvarnames(vars: Mapping[str, object]) -> list[str]: ... +def analyzecommon(block: Mapping[str, object]) -> dict[str, Any]: ... +def analyzebody(block: Mapping[str, object], args: Mapping[str, str], tab: str = "") -> list[dict[str, Any]]: ... +def buildimplicitrules(block: Mapping[str, object]) -> tuple[dict[str, dict[str, str]], dict[str, str]]: ... +def myeval(e: str, g: object | None = None, l: object | None = None) -> float: ... + +# +def getlincoef(e: str, xset: set[str]) -> tuple[float | None, float | None, str | None]: ... + +# +def get_sorted_names(vars: Mapping[str, Mapping[str, str]]) -> list[str]: ... +def get_parameters(vars: Mapping[str, Mapping[str, str]], global_params: dict[str, str] = {}) -> dict[str, str]: ... + +# +def analyzevars(block: Mapping[str, Any]) -> dict[str, dict[str, str]]: ... + +# +def param_eval(v: str, g_params: dict[str, Any], params: Mapping[str, object], dimspec: str | None = None) -> dict[str, Any]: ... +def param_parse(d: str, params: Mapping[str, str]) -> str: ... +def expr2name(a: str, block: Mapping[str, object], args: list[str] = []) -> str: ... +def analyzeargs(block: Mapping[str, object]) -> dict[str, Any]: ... + +# +def determineexprtype(expr: str, vars: Mapping[str, object], rules: dict[str, Any] = {}) -> dict[str, Any]: ... +def crack2fortrangen(block: Mapping[str, object], tab: str = "\n", as_interface: bool = False) -> str: ... +def common2fortran(common: Mapping[str, object], tab: str = "") -> str: ... +def use2fortran(use: Mapping[str, object], tab: str = "") -> str: ... +def true_intent_list(var: dict[str, list[str]]) -> list[str]: ... +def vars2fortran( + block: Mapping[str, Mapping[str, object]], + vars: Mapping[str, object], + args: Mapping[str, str], + tab: str = "", + as_interface: bool = False, +) -> str: ... + +# +def crackfortran(files: StrOrBytesPath | Iterable[StrOrBytesPath]) -> list[dict[str, Any]]: ... +def crack2fortran(block: Mapping[str, Any]) -> str: ... + +# +def traverse( + obj: tuple[str | None, _VisitResult], + visit: _VisitFunc[_Tss], + parents: list[tuple[str | None, _VisitResult]] = [], + result: list[Any] | dict[str, Any] | None = None, + *args: _Tss.args, + **kwargs: _Tss.kwargs, +) -> _VisitItem | _VisitResult: ... + +# +def character_backward_compatibility_hook( + item: _VisitItem, + parents: list[_VisitItem], + result: object, # ignored + *args: object, # ignored + **kwargs: object, # ignored +) -> _VisitItem | None: ... + +# namespace pollution +c: str +n: str diff --git a/local_packages/numpy/f2py/diagnose.py b/local_packages/numpy/f2py/diagnose.py new file mode 100644 index 0000000..7eb1697 --- /dev/null +++ b/local_packages/numpy/f2py/diagnose.py @@ -0,0 +1,149 @@ +#!/usr/bin/env python3 +import os +import sys +import tempfile + + +def run(): + _path = os.getcwd() + os.chdir(tempfile.gettempdir()) + print('------') + print(f'os.name={os.name!r}') + print('------') + print(f'sys.platform={sys.platform!r}') + print('------') + print('sys.version:') + print(sys.version) + print('------') + print('sys.prefix:') + print(sys.prefix) + print('------') + print(f"sys.path={':'.join(sys.path)!r}") + print('------') + + try: + import numpy + has_newnumpy = 1 + except ImportError as e: + print('Failed to import new numpy:', e) + has_newnumpy = 0 + + try: + from numpy.f2py import f2py2e + has_f2py2e = 1 + except ImportError as e: + print('Failed to import f2py2e:', e) + has_f2py2e = 0 + + try: + import numpy.distutils + has_numpy_distutils = 2 + except ImportError: + try: + import numpy_distutils + has_numpy_distutils = 1 + except ImportError as e: + print('Failed to import numpy_distutils:', e) + has_numpy_distutils = 0 + + if has_newnumpy: + try: + print(f'Found new numpy version {numpy.__version__!r} in {numpy.__file__}') + except Exception as msg: + print('error:', msg) + print('------') + + if has_f2py2e: + try: + print('Found f2py2e version %r in %s' % + (f2py2e.__version__.version, f2py2e.__file__)) + except Exception as msg: + print('error:', msg) + print('------') + + if has_numpy_distutils: + try: + if has_numpy_distutils == 2: + print('Found numpy.distutils version %r in %r' % ( + numpy.distutils.__version__, + numpy.distutils.__file__)) + else: + print('Found numpy_distutils version %r in %r' % ( + numpy_distutils.numpy_distutils_version.numpy_distutils_version, + numpy_distutils.__file__)) + print('------') + except Exception as msg: + print('error:', msg) + print('------') + try: + if has_numpy_distutils == 1: + print( + 'Importing numpy_distutils.command.build_flib ...', end=' ') + import numpy_distutils.command.build_flib as build_flib + print('ok') + print('------') + try: + print( + 'Checking availability of supported Fortran compilers:') + for compiler_class in build_flib.all_compilers: + compiler_class(verbose=1).is_available() + print('------') + except Exception as msg: + print('error:', msg) + print('------') + except Exception as msg: + print( + 'error:', msg, '(ignore it, build_flib is obsolete for numpy.distutils 0.2.2 and up)') + print('------') + try: + if has_numpy_distutils == 2: + print('Importing numpy.distutils.fcompiler ...', end=' ') + import numpy.distutils.fcompiler as fcompiler + else: + print('Importing numpy_distutils.fcompiler ...', end=' ') + import numpy_distutils.fcompiler as fcompiler + print('ok') + print('------') + try: + print('Checking availability of supported Fortran compilers:') + fcompiler.show_fcompilers() + print('------') + except Exception as msg: + print('error:', msg) + print('------') + except Exception as msg: + print('error:', msg) + print('------') + try: + if has_numpy_distutils == 2: + print('Importing numpy.distutils.cpuinfo ...', end=' ') + from numpy.distutils.cpuinfo import cpuinfo + print('ok') + print('------') + else: + try: + print( + 'Importing numpy_distutils.command.cpuinfo ...', end=' ') + from numpy_distutils.command.cpuinfo import cpuinfo + print('ok') + print('------') + except Exception as msg: + print('error:', msg, '(ignore it)') + print('Importing numpy_distutils.cpuinfo ...', end=' ') + from numpy_distutils.cpuinfo import cpuinfo + print('ok') + print('------') + cpu = cpuinfo() + print('CPU information:', end=' ') + for name in dir(cpuinfo): + if name[0] == '_' and name[1] != '_' and getattr(cpu, name[1:])(): + print(name[1:], end=' ') + print('------') + except Exception as msg: + print('error:', msg) + print('------') + os.chdir(_path) + + +if __name__ == "__main__": + run() diff --git a/local_packages/numpy/f2py/diagnose.pyi b/local_packages/numpy/f2py/diagnose.pyi new file mode 100644 index 0000000..b88194a --- /dev/null +++ b/local_packages/numpy/f2py/diagnose.pyi @@ -0,0 +1 @@ +def run() -> None: ... diff --git a/local_packages/numpy/f2py/f2py2e.py b/local_packages/numpy/f2py/f2py2e.py new file mode 100644 index 0000000..84a5aa3 --- /dev/null +++ b/local_packages/numpy/f2py/f2py2e.py @@ -0,0 +1,788 @@ +""" + +f2py2e - Fortran to Python C/API generator. 2nd Edition. + See __usage__ below. + +Copyright 1999 -- 2011 Pearu Peterson all rights reserved. +Copyright 2011 -- present NumPy Developers. +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +""" +import argparse +import os +import pprint +import re +import sys + +from numpy.f2py._backends import f2py_build_generator + +from . import ( + __version__, + auxfuncs, + capi_maps, + cb_rules, + cfuncs, + crackfortran, + f90mod_rules, + rules, +) +from .cfuncs import errmess + +f2py_version = __version__.version +numpy_version = __version__.version + +# outmess=sys.stdout.write +show = pprint.pprint +outmess = auxfuncs.outmess +MESON_ONLY_VER = (sys.version_info >= (3, 12)) + +__usage__ =\ +f"""Usage: + +1) To construct extension module sources: + + f2py [] [[[only:]||[skip:]] \\ + ] \\ + [: ...] + +2) To compile fortran files and build extension modules: + + f2py -c [, , ] + +3) To generate signature files: + + f2py -h ...< same options as in (1) > + +Description: This program generates a Python C/API file (module.c) + that contains wrappers for given fortran functions so that they + can be called from Python. With the -c option the corresponding + extension modules are built. + +Options: + + -h Write signatures of the fortran routines to file + and exit. You can then edit and use it instead + of . If ==stdout then the + signatures are printed to stdout. + Names of fortran routines for which Python C/API + functions will be generated. Default is all that are found + in . + Paths to fortran/signature files that will be scanned for + in order to determine their signatures. + skip: Ignore fortran functions that follow until `:'. + only: Use only fortran functions that follow until `:'. + : Get back to mode. + + -m Name of the module; f2py generates a Python/C API + file module.c or extension module . + Default is 'untitled'. + + '-include

' Writes additional headers in the C wrapper, can be passed + multiple times, generates #include
each time. + + --[no-]lower Do [not] lower the cases in . By default, + --lower is assumed with -h key, and --no-lower without -h key. + + --build-dir All f2py generated files are created in . + Default is tempfile.mkdtemp(). + + --overwrite-signature Overwrite existing signature file. + + --[no-]latex-doc Create (or not) module.tex. + Default is --no-latex-doc. + --short-latex Create 'incomplete' LaTeX document (without commands + \\documentclass, \\tableofcontents, and \\begin{{document}}, + \\end{{document}}). + + --[no-]rest-doc Create (or not) module.rst. + Default is --no-rest-doc. + + --debug-capi Create C/API code that reports the state of the wrappers + during runtime. Useful for debugging. + + --[no-]wrap-functions Create Fortran subroutine wrappers to Fortran 77 + functions. --wrap-functions is default because it ensures + maximum portability/compiler independence. + + --[no-]freethreading-compatible Create a module that declares it does or + doesn't require the GIL. The default is + --freethreading-compatible for backward + compatibility. Inspect the Fortran code you are wrapping for + thread safety issues before passing + --no-freethreading-compatible, as f2py does not analyze + fortran code for thread safety issues. + + --include-paths ::... Search include files from the given + directories. + + --help-link [..] List system resources found by system_info.py. See also + --link- switch below. [..] is optional list + of resources names. E.g. try 'f2py --help-link lapack_opt'. + + --f2cmap Load Fortran-to-Python KIND specification from the given + file. Default: .f2py_f2cmap in current directory. + + --quiet Run quietly. + --verbose Run with extra verbosity. + --skip-empty-wrappers Only generate wrapper files when needed. + -v Print f2py version ID and exit. + + +build backend options (only effective with -c) +[NO_MESON] is used to indicate an option not meant to be used +with the meson backend or above Python 3.12: + + --fcompiler= Specify Fortran compiler type by vendor [NO_MESON] + --compiler= Specify distutils C compiler type [NO_MESON] + + --help-fcompiler List available Fortran compilers and exit [NO_MESON] + --f77exec= Specify the path to F77 compiler [NO_MESON] + --f90exec= Specify the path to F90 compiler [NO_MESON] + --f77flags= Specify F77 compiler flags + --f90flags= Specify F90 compiler flags + --opt= Specify optimization flags [NO_MESON] + --arch= Specify architecture specific optimization flags [NO_MESON] + --noopt Compile without optimization [NO_MESON] + --noarch Compile without arch-dependent optimization [NO_MESON] + --debug Compile with debugging information + + --dep + Specify a meson dependency for the module. This may + be passed multiple times for multiple dependencies. + Dependencies are stored in a list for further processing. + + Example: --dep lapack --dep scalapack + This will identify "lapack" and "scalapack" as dependencies + and remove them from argv, leaving a dependencies list + containing ["lapack", "scalapack"]. + + --backend + Specify the build backend for the compilation process. + The supported backends are 'meson' and 'distutils'. + If not specified, defaults to 'distutils'. On + Python 3.12 or higher, the default is 'meson'. + +Extra options (only effective with -c): + + --link- Link extension module with as defined + by numpy.distutils/system_info.py. E.g. to link + with optimized LAPACK libraries (vecLib on MacOSX, + ATLAS elsewhere), use --link-lapack_opt. + See also --help-link switch. [NO_MESON] + + -L/path/to/lib/ -l + -D -U + -I/path/to/include/ + .o .so .a + + Using the following macros may be required with non-gcc Fortran + compilers: + -DPREPEND_FORTRAN -DNO_APPEND_FORTRAN -DUPPERCASE_FORTRAN + + When using -DF2PY_REPORT_ATEXIT, a performance report of F2PY + interface is printed out at exit (platforms: Linux). + + When using -DF2PY_REPORT_ON_ARRAY_COPY=, a message is + sent to stderr whenever F2PY interface makes a copy of an + array. Integer sets the threshold for array sizes when + a message should be shown. + +Version: {f2py_version} +numpy Version: {numpy_version} +License: NumPy license (see LICENSE.txt in the NumPy source code) +Copyright 1999 -- 2011 Pearu Peterson all rights reserved. +Copyright 2011 -- present NumPy Developers. +https://numpy.org/doc/stable/f2py/index.html\n""" + + +def scaninputline(inputline): + files, skipfuncs, onlyfuncs, debug = [], [], [], [] + f, f2, f3, f5, f6, f8, f9, f10 = 1, 0, 0, 0, 0, 0, 0, 0 + verbose = 1 + emptygen = True + dolc = -1 + dolatexdoc = 0 + dorestdoc = 0 + wrapfuncs = 1 + buildpath = '.' + include_paths, freethreading_compatible, inputline = get_newer_options(inputline) + signsfile, modulename = None, None + options = {'buildpath': buildpath, + 'coutput': None, + 'f2py_wrapper_output': None} + for l in inputline: + if l == '': + pass + elif l == 'only:': + f = 0 + elif l == 'skip:': + f = -1 + elif l == ':': + f = 1 + elif l[:8] == '--debug-': + debug.append(l[8:]) + elif l == '--lower': + dolc = 1 + elif l == '--build-dir': + f6 = 1 + elif l == '--no-lower': + dolc = 0 + elif l == '--quiet': + verbose = 0 + elif l == '--verbose': + verbose += 1 + elif l == '--latex-doc': + dolatexdoc = 1 + elif l == '--no-latex-doc': + dolatexdoc = 0 + elif l == '--rest-doc': + dorestdoc = 1 + elif l == '--no-rest-doc': + dorestdoc = 0 + elif l == '--wrap-functions': + wrapfuncs = 1 + elif l == '--no-wrap-functions': + wrapfuncs = 0 + elif l == '--short-latex': + options['shortlatex'] = 1 + elif l == '--coutput': + f8 = 1 + elif l == '--f2py-wrapper-output': + f9 = 1 + elif l == '--f2cmap': + f10 = 1 + elif l == '--overwrite-signature': + options['h-overwrite'] = 1 + elif l == '-h': + f2 = 1 + elif l == '-m': + f3 = 1 + elif l[:2] == '-v': + print(f2py_version) + sys.exit() + elif l == '--show-compilers': + f5 = 1 + elif l[:8] == '-include': + cfuncs.outneeds['userincludes'].append(l[9:-1]) + cfuncs.userincludes[l[9:-1]] = '#include ' + l[8:] + elif l == '--skip-empty-wrappers': + emptygen = False + elif l[0] == '-': + errmess(f'Unknown option {repr(l)}\n') + sys.exit() + elif f2: + f2 = 0 + signsfile = l + elif f3: + f3 = 0 + modulename = l + elif f6: + f6 = 0 + buildpath = l + elif f8: + f8 = 0 + options["coutput"] = l + elif f9: + f9 = 0 + options["f2py_wrapper_output"] = l + elif f10: + f10 = 0 + options["f2cmap_file"] = l + elif f == 1: + try: + with open(l): + pass + files.append(l) + except OSError as detail: + errmess(f'OSError: {detail!s}. Skipping file "{l!s}".\n') + elif f == -1: + skipfuncs.append(l) + elif f == 0: + onlyfuncs.append(l) + if not f5 and not files and not modulename: + print(__usage__) + sys.exit() + if not os.path.isdir(buildpath): + if not verbose: + outmess(f'Creating build directory {buildpath}\n') + os.mkdir(buildpath) + if signsfile: + signsfile = os.path.join(buildpath, signsfile) + if signsfile and os.path.isfile(signsfile) and 'h-overwrite' not in options: + errmess( + f'Signature file "{signsfile}" exists!!! Use --overwrite-signature to overwrite.\n') + sys.exit() + + options['emptygen'] = emptygen + options['debug'] = debug + options['verbose'] = verbose + if dolc == -1 and not signsfile: + options['do-lower'] = 0 + else: + options['do-lower'] = dolc + if modulename: + options['module'] = modulename + if signsfile: + options['signsfile'] = signsfile + if onlyfuncs: + options['onlyfuncs'] = onlyfuncs + if skipfuncs: + options['skipfuncs'] = skipfuncs + options['dolatexdoc'] = dolatexdoc + options['dorestdoc'] = dorestdoc + options['wrapfuncs'] = wrapfuncs + options['buildpath'] = buildpath + options['include_paths'] = include_paths + options['requires_gil'] = not freethreading_compatible + options.setdefault('f2cmap_file', None) + return files, options + + +def callcrackfortran(files, options): + rules.options = options + crackfortran.debug = options['debug'] + crackfortran.verbose = options['verbose'] + if 'module' in options: + crackfortran.f77modulename = options['module'] + if 'skipfuncs' in options: + crackfortran.skipfuncs = options['skipfuncs'] + if 'onlyfuncs' in options: + crackfortran.onlyfuncs = options['onlyfuncs'] + crackfortran.include_paths[:] = options['include_paths'] + crackfortran.dolowercase = options['do-lower'] + postlist = crackfortran.crackfortran(files) + if 'signsfile' in options: + outmess(f"Saving signatures to file \"{options['signsfile']}\"\n") + pyf = crackfortran.crack2fortran(postlist) + if options['signsfile'][-6:] == 'stdout': + sys.stdout.write(pyf) + else: + with open(options['signsfile'], 'w') as f: + f.write(pyf) + if options["coutput"] is None: + for mod in postlist: + mod["coutput"] = f"{mod['name']}module.c" + else: + for mod in postlist: + mod["coutput"] = options["coutput"] + if options["f2py_wrapper_output"] is None: + for mod in postlist: + mod["f2py_wrapper_output"] = f"{mod['name']}-f2pywrappers.f" + else: + for mod in postlist: + mod["f2py_wrapper_output"] = options["f2py_wrapper_output"] + for mod in postlist: + if options["requires_gil"]: + mod['gil_used'] = 'Py_MOD_GIL_USED' + else: + mod['gil_used'] = 'Py_MOD_GIL_NOT_USED' + # gh-26718 Reset global + crackfortran.f77modulename = '' + return postlist + + +def buildmodules(lst): + cfuncs.buildcfuncs() + outmess('Building modules...\n') + modules, mnames, isusedby = [], [], {} + for item in lst: + if '__user__' in item['name']: + cb_rules.buildcallbacks(item) + else: + if 'use' in item: + for u in item['use'].keys(): + if u not in isusedby: + isusedby[u] = [] + isusedby[u].append(item['name']) + modules.append(item) + mnames.append(item['name']) + ret = {} + for module, name in zip(modules, mnames): + if name in isusedby: + outmess('\tSkipping module "%s" which is used by %s.\n' % ( + name, ','.join('"%s"' % s for s in isusedby[name]))) + else: + um = [] + if 'use' in module: + for u in module['use'].keys(): + if u in isusedby and u in mnames: + um.append(modules[mnames.index(u)]) + else: + outmess( + f'\tModule "{name}" uses nonexisting "{u}" ' + 'which will be ignored.\n') + ret[name] = {} + dict_append(ret[name], rules.buildmodule(module, um)) + return ret + + +def dict_append(d_out, d_in): + for (k, v) in d_in.items(): + if k not in d_out: + d_out[k] = [] + if isinstance(v, list): + d_out[k] = d_out[k] + v + else: + d_out[k].append(v) + + +def run_main(comline_list): + """ + Equivalent to running:: + + f2py + + where ``=string.join(,' ')``, but in Python. Unless + ``-h`` is used, this function returns a dictionary containing + information on generated modules and their dependencies on source + files. + + You cannot build extension modules with this function, that is, + using ``-c`` is not allowed. Use the ``compile`` command instead. + + Examples + -------- + The command ``f2py -m scalar scalar.f`` can be executed from Python as + follows. + + .. literalinclude:: ../../source/f2py/code/results/run_main_session.dat + :language: python + + """ + crackfortran.reset_global_f2py_vars() + f2pydir = os.path.dirname(os.path.abspath(cfuncs.__file__)) + fobjhsrc = os.path.join(f2pydir, 'src', 'fortranobject.h') + fobjcsrc = os.path.join(f2pydir, 'src', 'fortranobject.c') + # gh-22819 -- begin + parser = make_f2py_compile_parser() + args, comline_list = parser.parse_known_args(comline_list) + pyf_files, _ = filter_files("", "[.]pyf([.]src|)", comline_list) + # Checks that no existing modulename is defined in a pyf file + # TODO: Remove all this when scaninputline is replaced + if args.module_name: + if "-h" in comline_list: + modname = ( + args.module_name + ) # Directly use from args when -h is present + else: + modname = validate_modulename( + pyf_files, args.module_name + ) # Validate modname when -h is not present + comline_list += ['-m', modname] # needed for the rest of scaninputline + # gh-22819 -- end + files, options = scaninputline(comline_list) + auxfuncs.options = options + capi_maps.load_f2cmap_file(options['f2cmap_file']) + postlist = callcrackfortran(files, options) + isusedby = {} + for plist in postlist: + if 'use' in plist: + for u in plist['use'].keys(): + if u not in isusedby: + isusedby[u] = [] + isusedby[u].append(plist['name']) + for plist in postlist: + module_name = plist['name'] + if plist['block'] == 'python module' and '__user__' in module_name: + if module_name in isusedby: + # if not quiet: + usedby = ','.join(f'"{s}"' for s in isusedby[module_name]) + outmess( + f'Skipping Makefile build for module "{module_name}" ' + f'which is used by {usedby}\n') + if 'signsfile' in options: + if options['verbose'] > 1: + outmess( + 'Stopping. Edit the signature file and then run f2py on the signature file: ') + outmess(f"{os.path.basename(sys.argv[0])} {options['signsfile']}\n") + return + for plist in postlist: + if plist['block'] != 'python module': + if 'python module' not in options: + errmess( + 'Tip: If your original code is Fortran source then you must use -m option.\n') + raise TypeError('All blocks must be python module blocks but got %s' % ( + repr(plist['block']))) + auxfuncs.debugoptions = options['debug'] + f90mod_rules.options = options + auxfuncs.wrapfuncs = options['wrapfuncs'] + + ret = buildmodules(postlist) + + for mn in ret.keys(): + dict_append(ret[mn], {'csrc': fobjcsrc, 'h': fobjhsrc}) + return ret + + +def filter_files(prefix, suffix, files, remove_prefix=None): + """ + Filter files by prefix and suffix. + """ + filtered, rest = [], [] + match = re.compile(prefix + r'.*' + suffix + r'\Z').match + if remove_prefix: + ind = len(prefix) + else: + ind = 0 + for file in [x.strip() for x in files]: + if match(file): + filtered.append(file[ind:]) + else: + rest.append(file) + return filtered, rest + + +def get_prefix(module): + p = os.path.dirname(os.path.dirname(module.__file__)) + return p + + +class CombineIncludePaths(argparse.Action): + def __call__(self, parser, namespace, values, option_string=None): + include_paths_set = set(getattr(namespace, 'include_paths', []) or []) + if option_string == "--include_paths": + outmess("Use --include-paths or -I instead of --include_paths which will be removed") + if option_string in {"--include-paths", "--include_paths"}: + include_paths_set.update(values.split(':')) + else: + include_paths_set.add(values) + namespace.include_paths = list(include_paths_set) + +def f2py_parser(): + parser = argparse.ArgumentParser(add_help=False) + parser.add_argument("-I", dest="include_paths", action=CombineIncludePaths) + parser.add_argument("--include-paths", dest="include_paths", action=CombineIncludePaths) + parser.add_argument("--include_paths", dest="include_paths", action=CombineIncludePaths) + parser.add_argument("--freethreading-compatible", dest="ftcompat", action=argparse.BooleanOptionalAction) + return parser + +def get_newer_options(iline): + iline = (' '.join(iline)).split() + parser = f2py_parser() + args, remain = parser.parse_known_args(iline) + ipaths = args.include_paths + if args.include_paths is None: + ipaths = [] + return ipaths, args.ftcompat, remain + +def make_f2py_compile_parser(): + parser = argparse.ArgumentParser(add_help=False) + parser.add_argument("--dep", action="append", dest="dependencies") + parser.add_argument("--backend", choices=['meson', 'distutils'], default='distutils') + parser.add_argument("-m", dest="module_name") + return parser + +def preparse_sysargv(): + # To keep backwards bug compatibility, newer flags are handled by argparse, + # and `sys.argv` is passed to the rest of `f2py` as is. + parser = make_f2py_compile_parser() + + args, remaining_argv = parser.parse_known_args() + sys.argv = [sys.argv[0]] + remaining_argv + + backend_key = args.backend + if MESON_ONLY_VER and backend_key == 'distutils': + outmess("Cannot use distutils backend with Python>=3.12," + " using meson backend instead.\n") + backend_key = "meson" + + return { + "dependencies": args.dependencies or [], + "backend": backend_key, + "modulename": args.module_name, + } + +def run_compile(): + """ + Do it all in one call! + """ + import tempfile + + # Collect dependency flags, preprocess sys.argv + argy = preparse_sysargv() + modulename = argy["modulename"] + if modulename is None: + modulename = 'untitled' + dependencies = argy["dependencies"] + backend_key = argy["backend"] + build_backend = f2py_build_generator(backend_key) + + i = sys.argv.index('-c') + del sys.argv[i] + + remove_build_dir = 0 + try: + i = sys.argv.index('--build-dir') + except ValueError: + i = None + if i is not None: + build_dir = sys.argv[i + 1] + del sys.argv[i + 1] + del sys.argv[i] + else: + remove_build_dir = 1 + build_dir = tempfile.mkdtemp() + + _reg1 = re.compile(r'--link-') + sysinfo_flags = [_m for _m in sys.argv[1:] if _reg1.match(_m)] + sys.argv = [_m for _m in sys.argv if _m not in sysinfo_flags] + if sysinfo_flags: + sysinfo_flags = [f[7:] for f in sysinfo_flags] + + _reg2 = re.compile( + r'--((no-|)(wrap-functions|lower|freethreading-compatible)|debug-capi|quiet|skip-empty-wrappers)|-include') + f2py_flags = [_m for _m in sys.argv[1:] if _reg2.match(_m)] + sys.argv = [_m for _m in sys.argv if _m not in f2py_flags] + f2py_flags2 = [] + fl = 0 + for a in sys.argv[1:]: + if a in ['only:', 'skip:']: + fl = 1 + elif a == ':': + fl = 0 + if fl or a == ':': + f2py_flags2.append(a) + if f2py_flags2 and f2py_flags2[-1] != ':': + f2py_flags2.append(':') + f2py_flags.extend(f2py_flags2) + sys.argv = [_m for _m in sys.argv if _m not in f2py_flags2] + _reg3 = re.compile( + r'--((f(90)?compiler(-exec|)|compiler)=|help-compiler)') + flib_flags = [_m for _m in sys.argv[1:] if _reg3.match(_m)] + sys.argv = [_m for _m in sys.argv if _m not in flib_flags] + # TODO: Once distutils is dropped completely, i.e. min_ver >= 3.12, unify into --fflags + reg_f77_f90_flags = re.compile(r'--f(77|90)flags=') + reg_distutils_flags = re.compile(r'--((f(77|90)exec|opt|arch)=|(debug|noopt|noarch|help-fcompiler))') + fc_flags = [_m for _m in sys.argv[1:] if reg_f77_f90_flags.match(_m)] + distutils_flags = [_m for _m in sys.argv[1:] if reg_distutils_flags.match(_m)] + if not (MESON_ONLY_VER or backend_key == 'meson'): + fc_flags.extend(distutils_flags) + sys.argv = [_m for _m in sys.argv if _m not in (fc_flags + distutils_flags)] + + del_list = [] + for s in flib_flags: + v = '--fcompiler=' + if s[:len(v)] == v: + if MESON_ONLY_VER or backend_key == 'meson': + outmess( + "--fcompiler cannot be used with meson," + "set compiler with the FC environment variable\n" + ) + else: + from numpy.distutils import fcompiler + fcompiler.load_all_fcompiler_classes() + allowed_keys = list(fcompiler.fcompiler_class.keys()) + nv = ov = s[len(v):].lower() + if ov not in allowed_keys: + vmap = {} # XXX + try: + nv = vmap[ov] + except KeyError: + if ov not in vmap.values(): + print(f'Unknown vendor: "{s[len(v):]}"') + nv = ov + i = flib_flags.index(s) + flib_flags[i] = '--fcompiler=' + nv # noqa: B909 + continue + for s in del_list: + i = flib_flags.index(s) + del flib_flags[i] + assert len(flib_flags) <= 2, repr(flib_flags) + + _reg5 = re.compile(r'--(verbose)') + setup_flags = [_m for _m in sys.argv[1:] if _reg5.match(_m)] + sys.argv = [_m for _m in sys.argv if _m not in setup_flags] + + if '--quiet' in f2py_flags: + setup_flags.append('--quiet') + + # Ugly filter to remove everything but sources + sources = sys.argv[1:] + f2cmapopt = '--f2cmap' + if f2cmapopt in sys.argv: + i = sys.argv.index(f2cmapopt) + f2py_flags.extend(sys.argv[i:i + 2]) + del sys.argv[i + 1], sys.argv[i] + sources = sys.argv[1:] + + pyf_files, _sources = filter_files("", "[.]pyf([.]src|)", sources) + sources = pyf_files + _sources + modulename = validate_modulename(pyf_files, modulename) + extra_objects, sources = filter_files('', '[.](o|a|so|dylib)', sources) + library_dirs, sources = filter_files('-L', '', sources, remove_prefix=1) + libraries, sources = filter_files('-l', '', sources, remove_prefix=1) + undef_macros, sources = filter_files('-U', '', sources, remove_prefix=1) + define_macros, sources = filter_files('-D', '', sources, remove_prefix=1) + for i in range(len(define_macros)): + name_value = define_macros[i].split('=', 1) + if len(name_value) == 1: + name_value.append(None) + if len(name_value) == 2: + define_macros[i] = tuple(name_value) + else: + print('Invalid use of -D:', name_value) + + # Construct wrappers / signatures / things + if backend_key == 'meson': + if not pyf_files: + outmess('Using meson backend\nWill pass --lower to f2py\nSee https://numpy.org/doc/stable/f2py/buildtools/meson.html\n') + f2py_flags.append('--lower') + run_main(f" {' '.join(f2py_flags)} -m {modulename} {' '.join(sources)}".split()) + else: + run_main(f" {' '.join(f2py_flags)} {' '.join(pyf_files)}".split()) + + # Order matters here, includes are needed for run_main above + include_dirs, _, sources = get_newer_options(sources) + # Now use the builder + builder = build_backend( + modulename, + sources, + extra_objects, + build_dir, + include_dirs, + library_dirs, + libraries, + define_macros, + undef_macros, + f2py_flags, + sysinfo_flags, + fc_flags, + flib_flags, + setup_flags, + remove_build_dir, + {"dependencies": dependencies}, + ) + + builder.compile() + + +def validate_modulename(pyf_files, modulename='untitled'): + if len(pyf_files) > 1: + raise ValueError("Only one .pyf file per call") + if pyf_files: + pyff = pyf_files[0] + pyf_modname = auxfuncs.get_f2py_modulename(pyff) + if modulename != pyf_modname: + outmess( + f"Ignoring -m {modulename}.\n" + f"{pyff} defines {pyf_modname} to be the modulename.\n" + ) + modulename = pyf_modname + return modulename + +def main(): + if '--help-link' in sys.argv[1:]: + sys.argv.remove('--help-link') + if MESON_ONLY_VER: + outmess("Use --dep for meson builds\n") + else: + from numpy.distutils.system_info import show_all + show_all() + return + + if '-c' in sys.argv[1:]: + run_compile() + else: + run_main(sys.argv[1:]) diff --git a/local_packages/numpy/f2py/f2py2e.pyi b/local_packages/numpy/f2py/f2py2e.pyi new file mode 100644 index 0000000..46794e5 --- /dev/null +++ b/local_packages/numpy/f2py/f2py2e.pyi @@ -0,0 +1,74 @@ +import argparse +import pprint +from collections.abc import Hashable, Iterable, Mapping, MutableMapping, Sequence +from types import ModuleType +from typing import Any, Final, NotRequired, TypedDict, type_check_only +from typing_extensions import TypeVar, override + +from .__version__ import version +from .auxfuncs import _Bool, outmess as outmess + +### + +_KT = TypeVar("_KT", bound=Hashable) +_VT = TypeVar("_VT") + +@type_check_only +class _F2PyDict(TypedDict): + csrc: list[str] + h: list[str] + fsrc: NotRequired[list[str]] + ltx: NotRequired[list[str]] + +@type_check_only +class _PreparseResult(TypedDict): + dependencies: list[str] + backend: str + modulename: str + +### + +MESON_ONLY_VER: Final[bool] +f2py_version: Final = version +numpy_version: Final = version +__usage__: Final[str] + +show = pprint.pprint + +class CombineIncludePaths(argparse.Action): + @override + def __call__( + self, + /, + parser: argparse.ArgumentParser, + namespace: argparse.Namespace, + values: str | Sequence[str] | None, + option_string: str | None = None, + ) -> None: ... + +# +def run_main(comline_list: Iterable[str]) -> dict[str, _F2PyDict]: ... +def run_compile() -> None: ... +def main() -> None: ... + +# +def scaninputline(inputline: Iterable[str]) -> tuple[list[str], dict[str, _Bool]]: ... +def callcrackfortran(files: list[str], options: dict[str, bool]) -> list[dict[str, Any]]: ... +def buildmodules(lst: Iterable[Mapping[str, object]]) -> dict[str, dict[str, Any]]: ... +def dict_append(d_out: MutableMapping[_KT, _VT], d_in: Mapping[_KT, _VT]) -> None: ... +def filter_files( + prefix: str, + suffix: str, + files: Iterable[str], + remove_prefix: _Bool | None = None, +) -> tuple[list[str], list[str]]: ... +def get_prefix(module: ModuleType) -> str: ... +def get_newer_options(iline: Iterable[str]) -> tuple[list[str], Any, list[str]]: ... + +# +def f2py_parser() -> argparse.ArgumentParser: ... +def make_f2py_compile_parser() -> argparse.ArgumentParser: ... + +# +def preparse_sysargv() -> _PreparseResult: ... +def validate_modulename(pyf_files: Sequence[str], modulename: str = "untitled") -> str: ... diff --git a/local_packages/numpy/f2py/f90mod_rules.py b/local_packages/numpy/f2py/f90mod_rules.py new file mode 100644 index 0000000..d13a42a --- /dev/null +++ b/local_packages/numpy/f2py/f90mod_rules.py @@ -0,0 +1,269 @@ +""" +Build F90 module support for f2py2e. + +Copyright 1999 -- 2011 Pearu Peterson all rights reserved. +Copyright 2011 -- present NumPy Developers. +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +""" +__version__ = "$Revision: 1.27 $"[10:-1] + +f2py_version = 'See `f2py -v`' + +import numpy as np + +from . import capi_maps, func2subr + +# The environment provided by auxfuncs.py is needed for some calls to eval. +# As the needed functions cannot be determined by static inspection of the +# code, it is safest to use import * pending a major refactoring of f2py. +from .auxfuncs import * +from .crackfortran import undo_rmbadname, undo_rmbadname1 + +options = {} + + +def findf90modules(m): + if ismodule(m): + return [m] + if not hasbody(m): + return [] + ret = [] + for b in m['body']: + if ismodule(b): + ret.append(b) + else: + ret = ret + findf90modules(b) + return ret + + +fgetdims1 = """\ + external f2pysetdata + logical ns + integer r,i + integer(%d) s(*) + ns = .FALSE. + if (allocated(d)) then + do i=1,r + if ((size(d,i).ne.s(i)).and.(s(i).ge.0)) then + ns = .TRUE. + end if + end do + if (ns) then + deallocate(d) + end if + end if + if ((.not.allocated(d)).and.(s(1).ge.1)) then""" % np.intp().itemsize + +fgetdims2 = """\ + end if + if (allocated(d)) then + do i=1,r + s(i) = size(d,i) + end do + end if + flag = 1 + call f2pysetdata(d,allocated(d))""" + +fgetdims2_sa = """\ + end if + if (allocated(d)) then + do i=1,r + s(i) = size(d,i) + end do + !s(r) must be equal to len(d(1)) + end if + flag = 2 + call f2pysetdata(d,allocated(d))""" + + +def buildhooks(pymod): + from . import rules + ret = {'f90modhooks': [], 'initf90modhooks': [], 'body': [], + 'need': ['F_FUNC', 'arrayobject.h'], + 'separatorsfor': {'includes0': '\n', 'includes': '\n'}, + 'docs': ['"Fortran 90/95 modules:\\n"'], + 'latexdoc': []} + fhooks = [''] + + def fadd(line, s=fhooks): + s[0] = f'{s[0]}\n {line}' + doc = [''] + + def dadd(line, s=doc): + s[0] = f'{s[0]}\n{line}' + + usenames = getuseblocks(pymod) + for m in findf90modules(pymod): + sargs, fargs, efargs, modobjs, notvars, onlyvars = [], [], [], [], [ + m['name']], [] + sargsp = [] + ifargs = [] + mfargs = [] + if hasbody(m): + for b in m['body']: + notvars.append(b['name']) + for n in m['vars'].keys(): + var = m['vars'][n] + + if (n not in notvars and isvariable(var)) and (not l_or(isintent_hide, isprivate)(var)): + onlyvars.append(n) + mfargs.append(n) + outmess(f"\t\tConstructing F90 module support for \"{m['name']}\"...\n") + if len(onlyvars) == 0 and len(notvars) == 1 and m['name'] in notvars: + outmess(f"\t\t\tSkipping {m['name']} since there are no public vars/func in this module...\n") + continue + + # gh-25186 + if m['name'] in usenames and containscommon(m): + outmess(f"\t\t\tSkipping {m['name']} since it is in 'use' and contains a common block...\n") + continue + # skip modules with derived types + if m['name'] in usenames and containsderivedtypes(m): + outmess(f"\t\t\tSkipping {m['name']} since it is in 'use' and contains a derived type...\n") + continue + if onlyvars: + outmess(f"\t\t Variables: {' '.join(onlyvars)}\n") + chooks = [''] + + def cadd(line, s=chooks): + s[0] = f'{s[0]}\n{line}' + ihooks = [''] + + def iadd(line, s=ihooks): + s[0] = f'{s[0]}\n{line}' + + vrd = capi_maps.modsign2map(m) + cadd('static FortranDataDef f2py_%s_def[] = {' % (m['name'])) + dadd('\\subsection{Fortran 90/95 module \\texttt{%s}}\n' % (m['name'])) + if hasnote(m): + note = m['note'] + if isinstance(note, list): + note = '\n'.join(note) + dadd(note) + if onlyvars: + dadd('\\begin{description}') + for n in onlyvars: + var = m['vars'][n] + modobjs.append(n) + ct = capi_maps.getctype(var) + at = capi_maps.c2capi_map[ct] + dm = capi_maps.getarrdims(n, var) + dms = dm['dims'].replace('*', '-1').strip() + dms = dms.replace(':', '-1').strip() + if not dms: + dms = '-1' + use_fgetdims2 = fgetdims2 + cadd('\t{"%s",%s,{{%s}},%s, %s},' % + (undo_rmbadname1(n), dm['rank'], dms, at, + capi_maps.get_elsize(var))) + dadd('\\item[]{{}\\verb@%s@{}}' % + (capi_maps.getarrdocsign(n, var))) + if hasnote(var): + note = var['note'] + if isinstance(note, list): + note = '\n'.join(note) + dadd(f'--- {note}') + if isallocatable(var): + fargs.append(f"f2py_{m['name']}_getdims_{n}") + efargs.append(fargs[-1]) + sargs.append( + f'void (*{n})(int*,npy_intp*,void(*)(char*,npy_intp*),int*)') + sargsp.append('void (*)(int*,npy_intp*,void(*)(char*,npy_intp*),int*)') + iadd(f"\tf2py_{m['name']}_def[i_f2py++].func = {n};") + fadd(f'subroutine {fargs[-1]}(r,s,f2pysetdata,flag)') + fadd(f"use {m['name']}, only: d => {undo_rmbadname1(n)}\n") + fadd('integer flag\n') + fhooks[0] = fhooks[0] + fgetdims1 + dms = range(1, int(dm['rank']) + 1) + fadd(' allocate(d(%s))\n' % + (','.join(['s(%s)' % i for i in dms]))) + fhooks[0] = fhooks[0] + use_fgetdims2 + fadd(f'end subroutine {fargs[-1]}') + else: + fargs.append(n) + sargs.append(f'char *{n}') + sargsp.append('char*') + iadd(f"\tf2py_{m['name']}_def[i_f2py++].data = {n};") + if onlyvars: + dadd('\\end{description}') + if hasbody(m): + for b in m['body']: + if not isroutine(b): + outmess("f90mod_rules.buildhooks:" + f" skipping {b['block']} {b['name']}\n") + continue + modobjs.append(f"{b['name']}()") + b['modulename'] = m['name'] + api, wrap = rules.buildapi(b) + if isfunction(b): + fhooks[0] = fhooks[0] + wrap + fargs.append(f"f2pywrap_{m['name']}_{b['name']}") + ifargs.append(func2subr.createfuncwrapper(b, signature=1)) + elif wrap: + fhooks[0] = fhooks[0] + wrap + fargs.append(f"f2pywrap_{m['name']}_{b['name']}") + ifargs.append( + func2subr.createsubrwrapper(b, signature=1)) + else: + fargs.append(b['name']) + mfargs.append(fargs[-1]) + api['externroutines'] = [] + ar = applyrules(api, vrd) + ar['docs'] = [] + ar['docshort'] = [] + ret = dictappend(ret, ar) + cadd(('\t{"%s",-1,{{-1}},0,0,NULL,(void *)' + 'f2py_rout_#modulename#_%s_%s,' + 'doc_f2py_rout_#modulename#_%s_%s},') + % (b['name'], m['name'], b['name'], m['name'], b['name'])) + sargs.append(f"char *{b['name']}") + sargsp.append('char *') + iadd(f"\tf2py_{m['name']}_def[i_f2py++].data = {b['name']};") + cadd('\t{NULL}\n};\n') + iadd('}') + ihooks[0] = 'static void f2py_setup_%s(%s) {\n\tint i_f2py=0;%s' % ( + m['name'], ','.join(sargs), ihooks[0]) + if '_' in m['name']: + F_FUNC = 'F_FUNC_US' + else: + F_FUNC = 'F_FUNC' + iadd('extern void %s(f2pyinit%s,F2PYINIT%s)(void (*)(%s));' + % (F_FUNC, m['name'], m['name'].upper(), ','.join(sargsp))) + iadd('static void f2py_init_%s(void) {' % (m['name'])) + iadd('\t%s(f2pyinit%s,F2PYINIT%s)(f2py_setup_%s);' + % (F_FUNC, m['name'], m['name'].upper(), m['name'])) + iadd('}\n') + ret['f90modhooks'] = ret['f90modhooks'] + chooks + ihooks + ret['initf90modhooks'] = ['\tPyDict_SetItemString(d, "%s", PyFortranObject_New(f2py_%s_def,f2py_init_%s));' % ( + m['name'], m['name'], m['name'])] + ret['initf90modhooks'] + fadd('') + fadd(f"subroutine f2pyinit{m['name']}(f2pysetupfunc)") + if mfargs: + for a in undo_rmbadname(mfargs): + fadd(f"use {m['name']}, only : {a}") + if ifargs: + fadd(' '.join(['interface'] + ifargs)) + fadd('end interface') + fadd('external f2pysetupfunc') + if efargs: + for a in undo_rmbadname(efargs): + fadd(f'external {a}') + fadd(f"call f2pysetupfunc({','.join(undo_rmbadname(fargs))})") + fadd(f"end subroutine f2pyinit{m['name']}\n") + + dadd('\n'.join(ret['latexdoc']).replace( + r'\subsection{', r'\subsubsection{')) + + ret['latexdoc'] = [] + ret['docs'].append(f"\"\t{m['name']} --- {','.join(undo_rmbadname(modobjs))}\"") + + ret['routine_defs'] = '' + ret['doc'] = [] + ret['docshort'] = [] + ret['latexdoc'] = doc[0] + if len(ret['docs']) <= 1: + ret['docs'] = '' + return ret, fhooks[0] diff --git a/local_packages/numpy/f2py/f90mod_rules.pyi b/local_packages/numpy/f2py/f90mod_rules.pyi new file mode 100644 index 0000000..4df004e --- /dev/null +++ b/local_packages/numpy/f2py/f90mod_rules.pyi @@ -0,0 +1,16 @@ +from collections.abc import Mapping +from typing import Any, Final + +from .auxfuncs import isintent_dict as isintent_dict + +__version__: Final[str] = ... +f2py_version: Final = "See `f2py -v`" + +options: Final[dict[str, bool]] + +fgetdims1: Final[str] = ... +fgetdims2: Final[str] = ... +fgetdims2_sa: Final[str] = ... + +def findf90modules(m: Mapping[str, object]) -> list[dict[str, Any]]: ... +def buildhooks(pymod: Mapping[str, object]) -> dict[str, Any]: ... diff --git a/local_packages/numpy/f2py/func2subr.py b/local_packages/numpy/f2py/func2subr.py new file mode 100644 index 0000000..09b67f7 --- /dev/null +++ b/local_packages/numpy/f2py/func2subr.py @@ -0,0 +1,329 @@ +""" + +Rules for building C/API module with f2py2e. + +Copyright 1999 -- 2011 Pearu Peterson all rights reserved. +Copyright 2011 -- present NumPy Developers. +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +""" +import copy + +from ._isocbind import isoc_kindmap +from .auxfuncs import ( + getfortranname, + isexternal, + isfunction, + isfunction_wrap, + isintent_in, + isintent_out, + islogicalfunction, + ismoduleroutine, + isscalar, + issubroutine, + issubroutine_wrap, + outmess, + show, +) + + +def var2fixfortran(vars, a, fa=None, f90mode=None): + if fa is None: + fa = a + if a not in vars: + show(vars) + outmess(f'var2fixfortran: No definition for argument "{a}".\n') + return '' + if 'typespec' not in vars[a]: + show(vars[a]) + outmess(f'var2fixfortran: No typespec for argument "{a}".\n') + return '' + vardef = vars[a]['typespec'] + if vardef == 'type' and 'typename' in vars[a]: + vardef = f"{vardef}({vars[a]['typename']})" + selector = {} + lk = '' + if 'kindselector' in vars[a]: + selector = vars[a]['kindselector'] + lk = 'kind' + elif 'charselector' in vars[a]: + selector = vars[a]['charselector'] + lk = 'len' + if '*' in selector: + if f90mode: + if selector['*'] in ['*', ':', '(*)']: + vardef = f'{vardef}(len=*)' + else: + vardef = f"{vardef}({lk}={selector['*']})" + elif selector['*'] in ['*', ':']: + vardef = f"{vardef}*({selector['*']})" + else: + vardef = f"{vardef}*{selector['*']}" + elif 'len' in selector: + vardef = f"{vardef}(len={selector['len']}" + if 'kind' in selector: + vardef = f"{vardef},kind={selector['kind']})" + else: + vardef = f'{vardef})' + elif 'kind' in selector: + vardef = f"{vardef}(kind={selector['kind']})" + + vardef = f'{vardef} {fa}' + if 'dimension' in vars[a]: + vardef = f"{vardef}({','.join(vars[a]['dimension'])})" + return vardef + +def useiso_c_binding(rout): + useisoc = False + for value in rout['vars'].values(): + kind_value = value.get('kindselector', {}).get('kind') + if kind_value in isoc_kindmap: + return True + return useisoc + +def createfuncwrapper(rout, signature=0): + assert isfunction(rout) + + extra_args = [] + vars = rout['vars'] + for a in rout['args']: + v = rout['vars'][a] + for i, d in enumerate(v.get('dimension', [])): + if d == ':': + dn = f'f2py_{a}_d{i}' + dv = {'typespec': 'integer', 'intent': ['hide']} + dv['='] = f'shape({a}, {i})' + extra_args.append(dn) + vars[dn] = dv + v['dimension'][i] = dn + rout['args'].extend(extra_args) + need_interface = bool(extra_args) + + ret = [''] + + def add(line, ret=ret): + ret[0] = f'{ret[0]}\n {line}' + name = rout['name'] + fortranname = getfortranname(rout) + f90mode = ismoduleroutine(rout) + newname = f'{name}f2pywrap' + + if newname not in vars: + vars[newname] = vars[name] + args = [newname] + rout['args'][1:] + else: + args = [newname] + rout['args'] + + l_tmpl = var2fixfortran(vars, name, '@@@NAME@@@', f90mode) + if l_tmpl[:13] == 'character*(*)': + if f90mode: + l_tmpl = 'character(len=10)' + l_tmpl[13:] + else: + l_tmpl = 'character*10' + l_tmpl[13:] + charselect = vars[name]['charselector'] + if charselect.get('*', '') == '(*)': + charselect['*'] = '10' + + l1 = l_tmpl.replace('@@@NAME@@@', newname) + rl = None + + useisoc = useiso_c_binding(rout) + sargs = ', '.join(args) + if f90mode: + # gh-23598 fix warning + # Essentially, this gets called again with modules where the name of the + # function is added to the arguments, which is not required, and removed + sargs = sargs.replace(f"{name}, ", '') + args = [arg for arg in args if arg != name] + rout['args'] = args + add(f"subroutine f2pywrap_{rout['modulename']}_{name} ({sargs})") + if not signature: + add(f"use {rout['modulename']}, only : {fortranname}") + if useisoc: + add('use iso_c_binding') + else: + add(f'subroutine f2pywrap{name} ({sargs})') + if useisoc: + add('use iso_c_binding') + if not need_interface: + add(f'external {fortranname}') + rl = l_tmpl.replace('@@@NAME@@@', '') + ' ' + fortranname + + if need_interface: + for line in rout['saved_interface'].split('\n'): + if line.lstrip().startswith('use ') and '__user__' not in line: + add(line) + + args = args[1:] + dumped_args = [] + for a in args: + if isexternal(vars[a]): + add(f'external {a}') + dumped_args.append(a) + for a in args: + if a in dumped_args: + continue + if isscalar(vars[a]): + add(var2fixfortran(vars, a, f90mode=f90mode)) + dumped_args.append(a) + for a in args: + if a in dumped_args: + continue + if isintent_in(vars[a]): + add(var2fixfortran(vars, a, f90mode=f90mode)) + dumped_args.append(a) + for a in args: + if a in dumped_args: + continue + add(var2fixfortran(vars, a, f90mode=f90mode)) + + add(l1) + if rl is not None: + add(rl) + + if need_interface: + if f90mode: + # f90 module already defines needed interface + pass + else: + add('interface') + add(rout['saved_interface'].lstrip()) + add('end interface') + + sargs = ', '.join([a for a in args if a not in extra_args]) + + if not signature: + if islogicalfunction(rout): + add(f'{newname} = .not.(.not.{fortranname}({sargs}))') + else: + add(f'{newname} = {fortranname}({sargs})') + if f90mode: + add(f"end subroutine f2pywrap_{rout['modulename']}_{name}") + else: + add('end') + return ret[0] + + +def createsubrwrapper(rout, signature=0): + assert issubroutine(rout) + + extra_args = [] + vars = rout['vars'] + for a in rout['args']: + v = rout['vars'][a] + for i, d in enumerate(v.get('dimension', [])): + if d == ':': + dn = f'f2py_{a}_d{i}' + dv = {'typespec': 'integer', 'intent': ['hide']} + dv['='] = f'shape({a}, {i})' + extra_args.append(dn) + vars[dn] = dv + v['dimension'][i] = dn + rout['args'].extend(extra_args) + need_interface = bool(extra_args) + + ret = [''] + + def add(line, ret=ret): + ret[0] = f'{ret[0]}\n {line}' + name = rout['name'] + fortranname = getfortranname(rout) + f90mode = ismoduleroutine(rout) + + args = rout['args'] + + useisoc = useiso_c_binding(rout) + sargs = ', '.join(args) + if f90mode: + add(f"subroutine f2pywrap_{rout['modulename']}_{name} ({sargs})") + if useisoc: + add('use iso_c_binding') + if not signature: + add(f"use {rout['modulename']}, only : {fortranname}") + else: + add(f'subroutine f2pywrap{name} ({sargs})') + if useisoc: + add('use iso_c_binding') + if not need_interface: + add(f'external {fortranname}') + + if need_interface: + for line in rout['saved_interface'].split('\n'): + if line.lstrip().startswith('use ') and '__user__' not in line: + add(line) + + dumped_args = [] + for a in args: + if isexternal(vars[a]): + add(f'external {a}') + dumped_args.append(a) + for a in args: + if a in dumped_args: + continue + if isscalar(vars[a]): + add(var2fixfortran(vars, a, f90mode=f90mode)) + dumped_args.append(a) + for a in args: + if a in dumped_args: + continue + add(var2fixfortran(vars, a, f90mode=f90mode)) + + if need_interface: + if f90mode: + # f90 module already defines needed interface + pass + else: + add('interface') + for line in rout['saved_interface'].split('\n'): + if line.lstrip().startswith('use ') and '__user__' in line: + continue + add(line) + add('end interface') + + sargs = ', '.join([a for a in args if a not in extra_args]) + + if not signature: + add(f'call {fortranname}({sargs})') + if f90mode: + add(f"end subroutine f2pywrap_{rout['modulename']}_{name}") + else: + add('end') + return ret[0] + + +def assubr(rout): + if isfunction_wrap(rout): + fortranname = getfortranname(rout) + name = rout['name'] + outmess('\t\tCreating wrapper for Fortran function "%s"("%s")...\n' % ( + name, fortranname)) + rout = copy.copy(rout) + fname = name + rname = fname + if 'result' in rout: + rname = rout['result'] + rout['vars'][fname] = rout['vars'][rname] + fvar = rout['vars'][fname] + if not isintent_out(fvar): + if 'intent' not in fvar: + fvar['intent'] = [] + fvar['intent'].append('out') + flag = 1 + for i in fvar['intent']: + if i.startswith('out='): + flag = 0 + break + if flag: + fvar['intent'].append(f'out={rname}') + rout['args'][:] = [fname] + rout['args'] + return rout, createfuncwrapper(rout) + if issubroutine_wrap(rout): + fortranname = getfortranname(rout) + name = rout['name'] + outmess('\t\tCreating wrapper for Fortran subroutine "%s"("%s")...\n' + % (name, fortranname)) + rout = copy.copy(rout) + return rout, createsubrwrapper(rout) + return rout, '' diff --git a/local_packages/numpy/f2py/func2subr.pyi b/local_packages/numpy/f2py/func2subr.pyi new file mode 100644 index 0000000..8d2b3db --- /dev/null +++ b/local_packages/numpy/f2py/func2subr.pyi @@ -0,0 +1,7 @@ +from .auxfuncs import _Bool, _ROut, _Var + +def var2fixfortran(vars: _Var, a: str, fa: str | None = None, f90mode: _Bool | None = None) -> str: ... +def useiso_c_binding(rout: _ROut) -> bool: ... +def createfuncwrapper(rout: _ROut, signature: int = 0) -> str: ... +def createsubrwrapper(rout: _ROut, signature: int = 0) -> str: ... +def assubr(rout: _ROut) -> tuple[dict[str, str], str]: ... diff --git a/local_packages/numpy/f2py/rules.py b/local_packages/numpy/f2py/rules.py new file mode 100644 index 0000000..68c49e6 --- /dev/null +++ b/local_packages/numpy/f2py/rules.py @@ -0,0 +1,1629 @@ +""" + +Rules for building C/API module with f2py2e. + +Here is a skeleton of a new wrapper function (13Dec2001): + +wrapper_function(args) + declarations + get_python_arguments, say, `a' and `b' + + get_a_from_python + if (successful) { + + get_b_from_python + if (successful) { + + callfortran + if (successful) { + + put_a_to_python + if (successful) { + + put_b_to_python + if (successful) { + + buildvalue = ... + + } + + } + + } + + } + cleanup_b + + } + cleanup_a + + return buildvalue + +Copyright 1999 -- 2011 Pearu Peterson all rights reserved. +Copyright 2011 -- present NumPy Developers. +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +""" +import copy +import os +import sys +import time +from pathlib import Path + +# __version__.version is now the same as the NumPy version +from . import ( + __version__, + capi_maps, + cfuncs, + common_rules, + f90mod_rules, + func2subr, + use_rules, +) +from .auxfuncs import ( + applyrules, + debugcapi, + dictappend, + errmess, + gentitle, + getargs2, + hascallstatement, + hasexternals, + hasinitvalue, + hasnote, + hasresultnote, + isarray, + isarrayofstrings, + isattr_value, + ischaracter, + ischaracter_or_characterarray, + ischaracterarray, + iscomplex, + iscomplexarray, + iscomplexfunction, + iscomplexfunction_warn, + isdummyroutine, + isexternal, + isfunction, + isfunction_wrap, + isint1, + isint1array, + isintent_aux, + isintent_c, + isintent_callback, + isintent_copy, + isintent_hide, + isintent_inout, + isintent_nothide, + isintent_out, + isintent_overwrite, + islogical, + islong_complex, + islong_double, + islong_doublefunction, + islong_long, + islong_longfunction, + ismoduleroutine, + isoptional, + isrequired, + isscalar, + issigned_long_longarray, + isstring, + isstringarray, + isstringfunction, + issubroutine, + issubroutine_wrap, + isthreadsafe, + isunsigned, + isunsigned_char, + isunsigned_chararray, + isunsigned_long_long, + isunsigned_long_longarray, + isunsigned_short, + isunsigned_shortarray, + l_and, + l_not, + l_or, + outmess, + replace, + requiresf90wrapper, + stripcomma, +) + +f2py_version = __version__.version +numpy_version = __version__.version + +options = {} +sepdict = {} +# for k in ['need_cfuncs']: sepdict[k]=',' +for k in ['decl', + 'frompyobj', + 'cleanupfrompyobj', + 'topyarr', 'method', + 'pyobjfrom', 'closepyobjfrom', + 'freemem', + 'userincludes', + 'includes0', 'includes', 'typedefs', 'typedefs_generated', + 'cppmacros', 'cfuncs', 'callbacks', + 'latexdoc', + 'restdoc', + 'routine_defs', 'externroutines', + 'initf2pywraphooks', + 'commonhooks', 'initcommonhooks', + 'f90modhooks', 'initf90modhooks']: + sepdict[k] = '\n' + +#################### Rules for C/API module ################# + +generationtime = int(os.environ.get('SOURCE_DATE_EPOCH', time.time())) +module_rules = { + 'modulebody': """\ +/* File: #modulename#module.c + * This file is auto-generated with f2py (version:#f2py_version#). + * f2py is a Fortran to Python Interface Generator (FPIG), Second Edition, + * written by Pearu Peterson . + * Generation date: """ + time.asctime(time.gmtime(generationtime)) + """ + * Do not edit this file directly unless you know what you are doing!!! + */ + +#ifdef __cplusplus +extern \"C\" { +#endif + +#ifndef PY_SSIZE_T_CLEAN +#define PY_SSIZE_T_CLEAN +#endif /* PY_SSIZE_T_CLEAN */ + +/* Unconditionally included */ +#include +#include + +""" + gentitle("See f2py2e/cfuncs.py: includes") + """ +#includes# +#includes0# + +""" + gentitle("See f2py2e/rules.py: mod_rules['modulebody']") + """ +static PyObject *#modulename#_error; +static PyObject *#modulename#_module; + +""" + gentitle("See f2py2e/cfuncs.py: typedefs") + """ +#typedefs# + +""" + gentitle("See f2py2e/cfuncs.py: typedefs_generated") + """ +#typedefs_generated# + +""" + gentitle("See f2py2e/cfuncs.py: cppmacros") + """ +#cppmacros# + +""" + gentitle("See f2py2e/cfuncs.py: cfuncs") + """ +#cfuncs# + +""" + gentitle("See f2py2e/cfuncs.py: userincludes") + """ +#userincludes# + +""" + gentitle("See f2py2e/capi_rules.py: usercode") + """ +#usercode# + +/* See f2py2e/rules.py */ +#externroutines# + +""" + gentitle("See f2py2e/capi_rules.py: usercode1") + """ +#usercode1# + +""" + gentitle("See f2py2e/cb_rules.py: buildcallback") + """ +#callbacks# + +""" + gentitle("See f2py2e/rules.py: buildapi") + """ +#body# + +""" + gentitle("See f2py2e/f90mod_rules.py: buildhooks") + """ +#f90modhooks# + +""" + gentitle("See f2py2e/rules.py: module_rules['modulebody']") + """ + +""" + gentitle("See f2py2e/common_rules.py: buildhooks") + """ +#commonhooks# + +""" + gentitle("See f2py2e/rules.py") + """ + +static FortranDataDef f2py_routine_defs[] = { +#routine_defs# + {NULL} +}; + +static PyMethodDef f2py_module_methods[] = { +#pymethoddef# + {NULL,NULL} +}; + +static struct PyModuleDef moduledef = { + PyModuleDef_HEAD_INIT, + "#modulename#", + NULL, + -1, + f2py_module_methods, + NULL, + NULL, + NULL, + NULL +}; + +PyMODINIT_FUNC PyInit_#modulename#(void) { + int i; + PyObject *m,*d, *s, *tmp; + m = #modulename#_module = PyModule_Create(&moduledef); + Py_SET_TYPE(&PyFortran_Type, &PyType_Type); + import_array(); + if (PyErr_Occurred()) + {PyErr_SetString(PyExc_ImportError, \"can't initialize module #modulename# (failed to import numpy)\"); return m;} + d = PyModule_GetDict(m); + s = PyUnicode_FromString(\"#f2py_version#\"); + PyDict_SetItemString(d, \"__version__\", s); + Py_DECREF(s); + s = PyUnicode_FromString( + \"This module '#modulename#' is auto-generated with f2py (version:#f2py_version#).\\nFunctions:\\n\"\n#docs#\".\"); + PyDict_SetItemString(d, \"__doc__\", s); + Py_DECREF(s); + s = PyUnicode_FromString(\"""" + numpy_version + """\"); + PyDict_SetItemString(d, \"__f2py_numpy_version__\", s); + Py_DECREF(s); + #modulename#_error = PyErr_NewException (\"#modulename#.error\", NULL, NULL); + /* + * Store the error object inside the dict, so that it could get deallocated. + * (in practice, this is a module, so it likely will not and cannot.) + */ + PyDict_SetItemString(d, \"_#modulename#_error\", #modulename#_error); + Py_DECREF(#modulename#_error); + for(i=0;f2py_routine_defs[i].name!=NULL;i++) { + tmp = PyFortranObject_NewAsAttr(&f2py_routine_defs[i]); + PyDict_SetItemString(d, f2py_routine_defs[i].name, tmp); + Py_DECREF(tmp); + } +#initf2pywraphooks# +#initf90modhooks# +#initcommonhooks# +#interface_usercode# + +#ifdef Py_GIL_DISABLED + // signal whether this module supports running with the GIL disabled + PyUnstable_Module_SetGIL(m , #gil_used#); +#endif + +#ifdef F2PY_REPORT_ATEXIT + if (! PyErr_Occurred()) + on_exit(f2py_report_on_exit,(void*)\"#modulename#\"); +#endif + + if (PyType_Ready(&PyFortran_Type) < 0) { + return NULL; + } + + return m; +} +#ifdef __cplusplus +} +#endif +""", + 'separatorsfor': {'latexdoc': '\n\n', + 'restdoc': '\n\n'}, + 'latexdoc': ['\\section{Module \\texttt{#texmodulename#}}\n', + '#modnote#\n', + '#latexdoc#'], + 'restdoc': ['Module #modulename#\n' + '=' * 80, + '\n#restdoc#'] +} + +defmod_rules = [ + {'body': '/*eof body*/', + 'method': '/*eof method*/', + 'externroutines': '/*eof externroutines*/', + 'routine_defs': '/*eof routine_defs*/', + 'initf90modhooks': '/*eof initf90modhooks*/', + 'initf2pywraphooks': '/*eof initf2pywraphooks*/', + 'initcommonhooks': '/*eof initcommonhooks*/', + 'latexdoc': '', + 'restdoc': '', + 'modnote': {hasnote: '#note#', l_not(hasnote): ''}, + } +] + +routine_rules = { + 'separatorsfor': sepdict, + 'body': """ +#begintitle# +static char doc_#apiname#[] = \"\\\n#docreturn##name#(#docsignatureshort#)\\n\\nWrapper for ``#name#``.\\\n\\n#docstrsigns#\"; +/* #declfortranroutine# */ +static PyObject *#apiname#(const PyObject *capi_self, + PyObject *capi_args, + PyObject *capi_keywds, + #functype# (*f2py_func)(#callprotoargument#)) { + PyObject * volatile capi_buildvalue = NULL; + volatile int f2py_success = 1; +#decl# + static char *capi_kwlist[] = {#kwlist##kwlistopt##kwlistxa#NULL}; +#usercode# +#routdebugenter# +#ifdef F2PY_REPORT_ATEXIT +f2py_start_clock(); +#endif + if (!PyArg_ParseTupleAndKeywords(capi_args,capi_keywds,\\ + \"#argformat#|#keyformat##xaformat#:#pyname#\",\\ + capi_kwlist#args_capi##keys_capi##keys_xa#))\n return NULL; +#frompyobj# +/*end of frompyobj*/ +#ifdef F2PY_REPORT_ATEXIT +f2py_start_call_clock(); +#endif +#callfortranroutine# +if (PyErr_Occurred()) + f2py_success = 0; +#ifdef F2PY_REPORT_ATEXIT +f2py_stop_call_clock(); +#endif +/*end of callfortranroutine*/ + if (f2py_success) { +#pyobjfrom# +/*end of pyobjfrom*/ + CFUNCSMESS(\"Building return value.\\n\"); + capi_buildvalue = Py_BuildValue(\"#returnformat#\"#return#); +/*closepyobjfrom*/ +#closepyobjfrom# + } /*if (f2py_success) after callfortranroutine*/ +/*cleanupfrompyobj*/ +#cleanupfrompyobj# + if (capi_buildvalue == NULL) { +#routdebugfailure# + } else { +#routdebugleave# + } + CFUNCSMESS(\"Freeing memory.\\n\"); +#freemem# +#ifdef F2PY_REPORT_ATEXIT +f2py_stop_clock(); +#endif + return capi_buildvalue; +} +#endtitle# +""", + 'routine_defs': '#routine_def#', + 'initf2pywraphooks': '#initf2pywraphook#', + 'externroutines': '#declfortranroutine#', + 'doc': '#docreturn##name#(#docsignature#)', + 'docshort': '#docreturn##name#(#docsignatureshort#)', + 'docs': '" #docreturn##name#(#docsignature#)\\n"\n', + 'need': ['arrayobject.h', 'CFUNCSMESS', 'MINMAX'], + 'cppmacros': {debugcapi: '#define DEBUGCFUNCS'}, + 'latexdoc': ['\\subsection{Wrapper function \\texttt{#texname#}}\n', + """ +\\noindent{{}\\verb@#docreturn##name#@{}}\\texttt{(#latexdocsignatureshort#)} +#routnote# + +#latexdocstrsigns# +"""], + 'restdoc': ['Wrapped function ``#name#``\n' + '-' * 80, + + ] +} + +################## Rules for C/API function ############## + +rout_rules = [ + { # Init + 'separatorsfor': {'callfortranroutine': '\n', 'routdebugenter': '\n', 'decl': '\n', + 'routdebugleave': '\n', 'routdebugfailure': '\n', + 'setjmpbuf': ' || ', + 'docstrreq': '\n', 'docstropt': '\n', 'docstrout': '\n', + 'docstrcbs': '\n', 'docstrsigns': '\\n"\n"', + 'latexdocstrsigns': '\n', + 'latexdocstrreq': '\n', 'latexdocstropt': '\n', + 'latexdocstrout': '\n', 'latexdocstrcbs': '\n', + }, + 'kwlist': '', 'kwlistopt': '', 'callfortran': '', 'callfortranappend': '', + 'docsign': '', 'docsignopt': '', 'decl': '/*decl*/', + 'freemem': '/*freemem*/', + 'docsignshort': '', 'docsignoptshort': '', + 'docstrsigns': '', 'latexdocstrsigns': '', + 'docstrreq': '\\nParameters\\n----------', + 'docstropt': '\\nOther Parameters\\n----------------', + 'docstrout': '\\nReturns\\n-------', + 'docstrcbs': '\\nNotes\\n-----\\nCall-back functions::\\n', + 'latexdocstrreq': '\\noindent Required arguments:', + 'latexdocstropt': '\\noindent Optional arguments:', + 'latexdocstrout': '\\noindent Return objects:', + 'latexdocstrcbs': '\\noindent Call-back functions:', + 'args_capi': '', 'keys_capi': '', 'functype': '', + 'frompyobj': '/*frompyobj*/', + # this list will be reversed + 'cleanupfrompyobj': ['/*end of cleanupfrompyobj*/'], + 'pyobjfrom': '/*pyobjfrom*/', + # this list will be reversed + 'closepyobjfrom': ['/*end of closepyobjfrom*/'], + 'topyarr': '/*topyarr*/', 'routdebugleave': '/*routdebugleave*/', + 'routdebugenter': '/*routdebugenter*/', + 'routdebugfailure': '/*routdebugfailure*/', + 'callfortranroutine': '/*callfortranroutine*/', + 'argformat': '', 'keyformat': '', 'need_cfuncs': '', + 'docreturn': '', 'return': '', 'returnformat': '', 'rformat': '', + 'kwlistxa': '', 'keys_xa': '', 'xaformat': '', 'docsignxa': '', 'docsignxashort': '', + 'initf2pywraphook': '', + 'routnote': {hasnote: '--- #note#', l_not(hasnote): ''}, + }, { + 'apiname': 'f2py_rout_#modulename#_#name#', + 'pyname': '#modulename#.#name#', + 'decl': '', + '_check': l_not(ismoduleroutine) + }, { + 'apiname': 'f2py_rout_#modulename#_#f90modulename#_#name#', + 'pyname': '#modulename#.#f90modulename#.#name#', + 'decl': '', + '_check': ismoduleroutine + }, { # Subroutine + 'functype': 'void', + 'declfortranroutine': {l_and(l_not(l_or(ismoduleroutine, isintent_c)), l_not(isdummyroutine)): 'extern void #F_FUNC#(#fortranname#,#FORTRANNAME#)(#callprotoargument#);', + l_and(l_not(ismoduleroutine), isintent_c, l_not(isdummyroutine)): 'extern void #fortranname#(#callprotoargument#);', + ismoduleroutine: '', + isdummyroutine: '' + }, + 'routine_def': { + l_not(l_or(ismoduleroutine, isintent_c, isdummyroutine)): + ' {\"#name#\",-1,{{-1}},0,0,(char *)' + ' #F_FUNC#(#fortranname#,#FORTRANNAME#),' + ' (f2py_init_func)#apiname#,doc_#apiname#},', + l_and(l_not(ismoduleroutine), isintent_c, l_not(isdummyroutine)): + ' {\"#name#\",-1,{{-1}},0,0,(char *)#fortranname#,' + ' (f2py_init_func)#apiname#,doc_#apiname#},', + l_and(l_not(ismoduleroutine), isdummyroutine): + ' {\"#name#\",-1,{{-1}},0,0,NULL,' + ' (f2py_init_func)#apiname#,doc_#apiname#},', + }, + 'need': {l_and(l_not(l_or(ismoduleroutine, isintent_c)), l_not(isdummyroutine)): 'F_FUNC'}, + 'callfortranroutine': [ + {debugcapi: [ + """ fprintf(stderr,\"debug-capi:Fortran subroutine `#fortranname#(#callfortran#)\'\\n\");"""]}, + {hasexternals: """\ + if (#setjmpbuf#) { + f2py_success = 0; + } else {"""}, + {isthreadsafe: ' Py_BEGIN_ALLOW_THREADS'}, + {hascallstatement: ''' #callstatement#; + /*(*f2py_func)(#callfortran#);*/'''}, + {l_not(l_or(hascallstatement, isdummyroutine)) + : ' (*f2py_func)(#callfortran#);'}, + {isthreadsafe: ' Py_END_ALLOW_THREADS'}, + {hasexternals: """ }"""} + ], + '_check': l_and(issubroutine, l_not(issubroutine_wrap)), + }, { # Wrapped function + 'functype': 'void', + 'declfortranroutine': {l_not(l_or(ismoduleroutine, isdummyroutine)): 'extern void #F_WRAPPEDFUNC#(#name_lower#,#NAME#)(#callprotoargument#);', + isdummyroutine: '', + }, + + 'routine_def': { + l_not(l_or(ismoduleroutine, isdummyroutine)): + ' {\"#name#\",-1,{{-1}},0,0,(char *)' + ' #F_WRAPPEDFUNC#(#name_lower#,#NAME#),' + ' (f2py_init_func)#apiname#,doc_#apiname#},', + isdummyroutine: + ' {\"#name#\",-1,{{-1}},0,0,NULL,' + ' (f2py_init_func)#apiname#,doc_#apiname#},', + }, + 'initf2pywraphook': {l_not(l_or(ismoduleroutine, isdummyroutine)): ''' + { + extern #ctype# #F_FUNC#(#name_lower#,#NAME#)(void); + PyObject* o = PyDict_GetItemString(d,"#name#"); + tmp = F2PyCapsule_FromVoidPtr((void*)#F_WRAPPEDFUNC#(#name_lower#,#NAME#),NULL); + PyObject_SetAttrString(o,"_cpointer", tmp); + Py_DECREF(tmp); + s = PyUnicode_FromString("#name#"); + PyObject_SetAttrString(o,"__name__", s); + Py_DECREF(s); + } + '''}, + 'need': {l_not(l_or(ismoduleroutine, isdummyroutine)): ['F_WRAPPEDFUNC', 'F_FUNC']}, + 'callfortranroutine': [ + {debugcapi: [ + """ fprintf(stderr,\"debug-capi:Fortran subroutine `f2pywrap#name_lower#(#callfortran#)\'\\n\");"""]}, + {hasexternals: """\ + if (#setjmpbuf#) { + f2py_success = 0; + } else {"""}, + {isthreadsafe: ' Py_BEGIN_ALLOW_THREADS'}, + {l_not(l_or(hascallstatement, isdummyroutine)) + : ' (*f2py_func)(#callfortran#);'}, + {hascallstatement: + ' #callstatement#;\n /*(*f2py_func)(#callfortran#);*/'}, + {isthreadsafe: ' Py_END_ALLOW_THREADS'}, + {hasexternals: ' }'} + ], + '_check': isfunction_wrap, + }, { # Wrapped subroutine + 'functype': 'void', + 'declfortranroutine': {l_not(l_or(ismoduleroutine, isdummyroutine)): 'extern void #F_WRAPPEDFUNC#(#name_lower#,#NAME#)(#callprotoargument#);', + isdummyroutine: '', + }, + + 'routine_def': { + l_not(l_or(ismoduleroutine, isdummyroutine)): + ' {\"#name#\",-1,{{-1}},0,0,(char *)' + ' #F_WRAPPEDFUNC#(#name_lower#,#NAME#),' + ' (f2py_init_func)#apiname#,doc_#apiname#},', + isdummyroutine: + ' {\"#name#\",-1,{{-1}},0,0,NULL,' + ' (f2py_init_func)#apiname#,doc_#apiname#},', + }, + 'initf2pywraphook': {l_not(l_or(ismoduleroutine, isdummyroutine)): ''' + { + extern void #F_FUNC#(#name_lower#,#NAME#)(void); + PyObject* o = PyDict_GetItemString(d,"#name#"); + tmp = F2PyCapsule_FromVoidPtr((void*)#F_FUNC#(#name_lower#,#NAME#),NULL); + PyObject_SetAttrString(o,"_cpointer", tmp); + Py_DECREF(tmp); + s = PyUnicode_FromString("#name#"); + PyObject_SetAttrString(o,"__name__", s); + Py_DECREF(s); + } + '''}, + 'need': {l_not(l_or(ismoduleroutine, isdummyroutine)): ['F_WRAPPEDFUNC', 'F_FUNC']}, + 'callfortranroutine': [ + {debugcapi: [ + """ fprintf(stderr,\"debug-capi:Fortran subroutine `f2pywrap#name_lower#(#callfortran#)\'\\n\");"""]}, + {hasexternals: """\ + if (#setjmpbuf#) { + f2py_success = 0; + } else {"""}, + {isthreadsafe: ' Py_BEGIN_ALLOW_THREADS'}, + {l_not(l_or(hascallstatement, isdummyroutine)) + : ' (*f2py_func)(#callfortran#);'}, + {hascallstatement: + ' #callstatement#;\n /*(*f2py_func)(#callfortran#);*/'}, + {isthreadsafe: ' Py_END_ALLOW_THREADS'}, + {hasexternals: ' }'} + ], + '_check': issubroutine_wrap, + }, { # Function + 'functype': '#ctype#', + 'docreturn': {l_not(isintent_hide): '#rname#,'}, + 'docstrout': '#pydocsignout#', + 'latexdocstrout': ['\\item[]{{}\\verb@#pydocsignout#@{}}', + {hasresultnote: '--- #resultnote#'}], + 'callfortranroutine': [{l_and(debugcapi, isstringfunction): """\ +#ifdef USESCOMPAQFORTRAN + fprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callcompaqfortran#)\\n\"); +#else + fprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callfortran#)\\n\"); +#endif +"""}, + {l_and(debugcapi, l_not(isstringfunction)): """\ + fprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callfortran#)\\n\"); +"""} + ], + '_check': l_and(isfunction, l_not(isfunction_wrap)) + }, { # Scalar function + 'declfortranroutine': {l_and(l_not(l_or(ismoduleroutine, isintent_c)), l_not(isdummyroutine)): 'extern #ctype# #F_FUNC#(#fortranname#,#FORTRANNAME#)(#callprotoargument#);', + l_and(l_not(ismoduleroutine), isintent_c, l_not(isdummyroutine)): 'extern #ctype# #fortranname#(#callprotoargument#);', + isdummyroutine: '' + }, + 'routine_def': { + l_and(l_not(l_or(ismoduleroutine, isintent_c)), + l_not(isdummyroutine)): + (' {\"#name#\",-1,{{-1}},0,0,(char *)' + ' #F_FUNC#(#fortranname#,#FORTRANNAME#),' + ' (f2py_init_func)#apiname#,doc_#apiname#},'), + l_and(l_not(ismoduleroutine), isintent_c, l_not(isdummyroutine)): + (' {\"#name#\",-1,{{-1}},0,0,(char *)#fortranname#,' + ' (f2py_init_func)#apiname#,doc_#apiname#},'), + isdummyroutine: + ' {\"#name#\",-1,{{-1}},0,0,NULL,' + '(f2py_init_func)#apiname#,doc_#apiname#},', + }, + 'decl': [{iscomplexfunction_warn: ' #ctype# #name#_return_value={0,0};', + l_not(iscomplexfunction): ' #ctype# #name#_return_value=0;'}, + {iscomplexfunction: + ' PyObject *#name#_return_value_capi = Py_None;'} + ], + 'callfortranroutine': [ + {hasexternals: """\ + if (#setjmpbuf#) { + f2py_success = 0; + } else {"""}, + {isthreadsafe: ' Py_BEGIN_ALLOW_THREADS'}, + {hascallstatement: ''' #callstatement#; +/* #name#_return_value = (*f2py_func)(#callfortran#);*/ +'''}, + {l_not(l_or(hascallstatement, isdummyroutine)) + : ' #name#_return_value = (*f2py_func)(#callfortran#);'}, + {isthreadsafe: ' Py_END_ALLOW_THREADS'}, + {hasexternals: ' }'}, + {l_and(debugcapi, iscomplexfunction) + : ' fprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value.r,#name#_return_value.i);'}, + {l_and(debugcapi, l_not(iscomplexfunction)): ' fprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value);'}], + 'pyobjfrom': {iscomplexfunction: ' #name#_return_value_capi = pyobj_from_#ctype#1(#name#_return_value);'}, + 'need': [{l_not(isdummyroutine): 'F_FUNC'}, + {iscomplexfunction: 'pyobj_from_#ctype#1'}, + {islong_longfunction: 'long_long'}, + {islong_doublefunction: 'long_double'}], + 'returnformat': {l_not(isintent_hide): '#rformat#'}, + 'return': {iscomplexfunction: ',#name#_return_value_capi', + l_not(l_or(iscomplexfunction, isintent_hide)): ',#name#_return_value'}, + '_check': l_and(isfunction, l_not(isstringfunction), l_not(isfunction_wrap)) + }, { # String function # in use for --no-wrap + 'declfortranroutine': 'extern void #F_FUNC#(#fortranname#,#FORTRANNAME#)(#callprotoargument#);', + 'routine_def': {l_not(l_or(ismoduleroutine, isintent_c)): + ' {\"#name#\",-1,{{-1}},0,0,(char *)#F_FUNC#(#fortranname#,#FORTRANNAME#),(f2py_init_func)#apiname#,doc_#apiname#},', + l_and(l_not(ismoduleroutine), isintent_c): + ' {\"#name#\",-1,{{-1}},0,0,(char *)#fortranname#,(f2py_init_func)#apiname#,doc_#apiname#},' + }, + 'decl': [' #ctype# #name#_return_value = NULL;', + ' int #name#_return_value_len = 0;'], + 'callfortran': '#name#_return_value,#name#_return_value_len,', + 'callfortranroutine': [' #name#_return_value_len = #rlength#;', + ' if ((#name#_return_value = (string)malloc(#name#_return_value_len+1) == NULL) {', + ' PyErr_SetString(PyExc_MemoryError, \"out of memory\");', + ' f2py_success = 0;', + ' } else {', + " (#name#_return_value)[#name#_return_value_len] = '\\0';", + ' }', + ' if (f2py_success) {', + {hasexternals: """\ + if (#setjmpbuf#) { + f2py_success = 0; + } else {"""}, + {isthreadsafe: ' Py_BEGIN_ALLOW_THREADS'}, + """\ +#ifdef USESCOMPAQFORTRAN + (*f2py_func)(#callcompaqfortran#); +#else + (*f2py_func)(#callfortran#); +#endif +""", + {isthreadsafe: ' Py_END_ALLOW_THREADS'}, + {hasexternals: ' }'}, + {debugcapi: + ' fprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value_len,#name#_return_value);'}, + ' } /* if (f2py_success) after (string)malloc */', + ], + 'returnformat': '#rformat#', + 'return': ',#name#_return_value', + 'freemem': ' STRINGFREE(#name#_return_value);', + 'need': ['F_FUNC', '#ctype#', 'STRINGFREE'], + '_check': l_and(isstringfunction, l_not(isfunction_wrap)) # ???obsolete + }, + { # Debugging + 'routdebugenter': ' fprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#(#docsignature#)\\n");', + 'routdebugleave': ' fprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#: successful.\\n");', + 'routdebugfailure': ' fprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#: failure.\\n");', + '_check': debugcapi + } +] + +################ Rules for arguments ################## + +typedef_need_dict = {islong_long: 'long_long', + islong_double: 'long_double', + islong_complex: 'complex_long_double', + isunsigned_char: 'unsigned_char', + isunsigned_short: 'unsigned_short', + isunsigned: 'unsigned', + isunsigned_long_long: 'unsigned_long_long', + isunsigned_chararray: 'unsigned_char', + isunsigned_shortarray: 'unsigned_short', + isunsigned_long_longarray: 'unsigned_long_long', + issigned_long_longarray: 'long_long', + isint1: 'signed_char', + ischaracter_or_characterarray: 'character', + } + +aux_rules = [ + { + 'separatorsfor': sepdict + }, + { # Common + 'frompyobj': [' /* Processing auxiliary variable #varname# */', + {debugcapi: ' fprintf(stderr,"#vardebuginfo#\\n");'}, ], + 'cleanupfrompyobj': ' /* End of cleaning variable #varname# */', + 'need': typedef_need_dict, + }, + # Scalars (not complex) + { # Common + 'decl': ' #ctype# #varname# = 0;', + 'need': {hasinitvalue: 'math.h'}, + 'frompyobj': {hasinitvalue: ' #varname# = #init#;'}, + '_check': l_and(isscalar, l_not(iscomplex)), + }, + { + 'return': ',#varname#', + 'docstrout': '#pydocsignout#', + 'docreturn': '#outvarname#,', + 'returnformat': '#varrformat#', + '_check': l_and(isscalar, l_not(iscomplex), isintent_out), + }, + # Complex scalars + { # Common + 'decl': ' #ctype# #varname#;', + 'frompyobj': {hasinitvalue: ' #varname#.r = #init.r#, #varname#.i = #init.i#;'}, + '_check': iscomplex + }, + # String + { # Common + 'decl': [' #ctype# #varname# = NULL;', + ' int slen(#varname#);', + ], + 'need': ['len..'], + '_check': isstring + }, + # Array + { # Common + 'decl': [' #ctype# *#varname# = NULL;', + ' npy_intp #varname#_Dims[#rank#] = {#rank*[-1]#};', + ' const int #varname#_Rank = #rank#;', + ], + 'need': ['len..', {hasinitvalue: 'forcomb'}, {hasinitvalue: 'CFUNCSMESS'}], + '_check': isarray + }, + # Scalararray + { # Common + '_check': l_and(isarray, l_not(iscomplexarray)) + }, { # Not hidden + '_check': l_and(isarray, l_not(iscomplexarray), isintent_nothide) + }, + # Integer*1 array + {'need': '#ctype#', + '_check': isint1array, + '_depend': '' + }, + # Integer*-1 array + {'need': '#ctype#', + '_check': l_or(isunsigned_chararray, isunsigned_char), + '_depend': '' + }, + # Integer*-2 array + {'need': '#ctype#', + '_check': isunsigned_shortarray, + '_depend': '' + }, + # Integer*-8 array + {'need': '#ctype#', + '_check': isunsigned_long_longarray, + '_depend': '' + }, + # Complexarray + {'need': '#ctype#', + '_check': iscomplexarray, + '_depend': '' + }, + # Stringarray + { + 'callfortranappend': {isarrayofstrings: 'flen(#varname#),'}, + 'need': 'string', + '_check': isstringarray + } +] + +arg_rules = [ + { + 'separatorsfor': sepdict + }, + { # Common + 'frompyobj': [' /* Processing variable #varname# */', + {debugcapi: ' fprintf(stderr,"#vardebuginfo#\\n");'}, ], + 'cleanupfrompyobj': ' /* End of cleaning variable #varname# */', + '_depend': '', + 'need': typedef_need_dict, + }, + # Doc signatures + { + 'docstropt': {l_and(isoptional, isintent_nothide): '#pydocsign#'}, + 'docstrreq': {l_and(isrequired, isintent_nothide): '#pydocsign#'}, + 'docstrout': {isintent_out: '#pydocsignout#'}, + 'latexdocstropt': {l_and(isoptional, isintent_nothide): ['\\item[]{{}\\verb@#pydocsign#@{}}', + {hasnote: '--- #note#'}]}, + 'latexdocstrreq': {l_and(isrequired, isintent_nothide): ['\\item[]{{}\\verb@#pydocsign#@{}}', + {hasnote: '--- #note#'}]}, + 'latexdocstrout': {isintent_out: ['\\item[]{{}\\verb@#pydocsignout#@{}}', + {l_and(hasnote, isintent_hide): '--- #note#', + l_and(hasnote, isintent_nothide): '--- See above.'}]}, + 'depend': '' + }, + # Required/Optional arguments + { + 'kwlist': '"#varname#",', + 'docsign': '#varname#,', + '_check': l_and(isintent_nothide, l_not(isoptional)) + }, + { + 'kwlistopt': '"#varname#",', + 'docsignopt': '#varname#=#showinit#,', + 'docsignoptshort': '#varname#,', + '_check': l_and(isintent_nothide, isoptional) + }, + # Docstring/BuildValue + { + 'docreturn': '#outvarname#,', + 'returnformat': '#varrformat#', + '_check': isintent_out + }, + # Externals (call-back functions) + { # Common + 'docsignxa': {isintent_nothide: '#varname#_extra_args=(),'}, + 'docsignxashort': {isintent_nothide: '#varname#_extra_args,'}, + 'docstropt': {isintent_nothide: '#varname#_extra_args : input tuple, optional\\n Default: ()'}, + 'docstrcbs': '#cbdocstr#', + 'latexdocstrcbs': '\\item[] #cblatexdocstr#', + 'latexdocstropt': {isintent_nothide: '\\item[]{{}\\verb@#varname#_extra_args := () input tuple@{}} --- Extra arguments for call-back function {{}\\verb@#varname#@{}}.'}, + 'decl': [' #cbname#_t #varname#_cb = { Py_None, NULL, 0 };', + ' #cbname#_t *#varname#_cb_ptr = &#varname#_cb;', + ' PyTupleObject *#varname#_xa_capi = NULL;', + {l_not(isintent_callback): + ' #cbname#_typedef #varname#_cptr;'} + ], + 'kwlistxa': {isintent_nothide: '"#varname#_extra_args",'}, + 'argformat': {isrequired: 'O'}, + 'keyformat': {isoptional: 'O'}, + 'xaformat': {isintent_nothide: 'O!'}, + 'args_capi': {isrequired: ',&#varname#_cb.capi'}, + 'keys_capi': {isoptional: ',&#varname#_cb.capi'}, + 'keys_xa': ',&PyTuple_Type,&#varname#_xa_capi', + 'setjmpbuf': '(setjmp(#varname#_cb.jmpbuf))', + 'callfortran': {l_not(isintent_callback): '#varname#_cptr,'}, + 'need': ['#cbname#', 'setjmp.h'], + '_check': isexternal + }, + { + 'frompyobj': [{l_not(isintent_callback): """\ +if(F2PyCapsule_Check(#varname#_cb.capi)) { + #varname#_cptr = F2PyCapsule_AsVoidPtr(#varname#_cb.capi); +} else { + #varname#_cptr = #cbname#; +} +"""}, {isintent_callback: """\ +if (#varname#_cb.capi==Py_None) { + #varname#_cb.capi = PyObject_GetAttrString(#modulename#_module,\"#varname#\"); + if (#varname#_cb.capi) { + if (#varname#_xa_capi==NULL) { + if (PyObject_HasAttrString(#modulename#_module,\"#varname#_extra_args\")) { + PyObject* capi_tmp = PyObject_GetAttrString(#modulename#_module,\"#varname#_extra_args\"); + if (capi_tmp) { + #varname#_xa_capi = (PyTupleObject *)PySequence_Tuple(capi_tmp); + Py_DECREF(capi_tmp); + } + else { + #varname#_xa_capi = (PyTupleObject *)Py_BuildValue(\"()\"); + } + if (#varname#_xa_capi==NULL) { + PyErr_SetString(#modulename#_error,\"Failed to convert #modulename#.#varname#_extra_args to tuple.\\n\"); + return NULL; + } + } + } + } + if (#varname#_cb.capi==NULL) { + PyErr_SetString(#modulename#_error,\"Callback #varname# not defined (as an argument or module #modulename# attribute).\\n\"); + return NULL; + } +} +"""}, + """\ + if (create_cb_arglist(#varname#_cb.capi,#varname#_xa_capi,#maxnofargs#,#nofoptargs#,&#varname#_cb.nofargs,&#varname#_cb.args_capi,\"failed in processing argument list for call-back #varname#.\")) { +""", + {debugcapi: ["""\ + fprintf(stderr,\"debug-capi:Assuming %d arguments; at most #maxnofargs#(-#nofoptargs#) is expected.\\n\",#varname#_cb.nofargs); + CFUNCSMESSPY(\"for #varname#=\",#varname#_cb.capi);""", + {l_not(isintent_callback): """ fprintf(stderr,\"#vardebugshowvalue# (call-back in C).\\n\",#cbname#);"""}]}, + """\ + CFUNCSMESS(\"Saving callback variables for `#varname#`.\\n\"); + #varname#_cb_ptr = swap_active_#cbname#(#varname#_cb_ptr);""", + ], + 'cleanupfrompyobj': + """\ + CFUNCSMESS(\"Restoring callback variables for `#varname#`.\\n\"); + #varname#_cb_ptr = swap_active_#cbname#(#varname#_cb_ptr); + Py_DECREF(#varname#_cb.args_capi); + }""", + 'need': ['SWAP', 'create_cb_arglist'], + '_check': isexternal, + '_depend': '' + }, + # Scalars (not complex) + { # Common + 'decl': ' #ctype# #varname# = 0;', + 'pyobjfrom': {debugcapi: ' fprintf(stderr,"#vardebugshowvalue#\\n",#varname#);'}, + 'callfortran': {l_or(isintent_c, isattr_value): '#varname#,', l_not(l_or(isintent_c, isattr_value)): '&#varname#,'}, + 'return': {isintent_out: ',#varname#'}, + '_check': l_and(isscalar, l_not(iscomplex)) + }, { + 'need': {hasinitvalue: 'math.h'}, + '_check': l_and(isscalar, l_not(iscomplex)), + }, { # Not hidden + 'decl': ' PyObject *#varname#_capi = Py_None;', + 'argformat': {isrequired: 'O'}, + 'keyformat': {isoptional: 'O'}, + 'args_capi': {isrequired: ',&#varname#_capi'}, + 'keys_capi': {isoptional: ',&#varname#_capi'}, + 'pyobjfrom': {isintent_inout: """\ + f2py_success = try_pyarr_from_#ctype#(#varname#_capi,&#varname#); + if (f2py_success) {"""}, + 'closepyobjfrom': {isintent_inout: " } /*if (f2py_success) of #varname# pyobjfrom*/"}, + 'need': {isintent_inout: 'try_pyarr_from_#ctype#'}, + '_check': l_and(isscalar, l_not(iscomplex), l_not(isstring), + isintent_nothide) + }, { + 'frompyobj': [ + # hasinitvalue... + # if pyobj is None: + # varname = init + # else + # from_pyobj(varname) + # + # isoptional and noinitvalue... + # if pyobj is not None: + # from_pyobj(varname) + # else: + # varname is uninitialized + # + # ... + # from_pyobj(varname) + # + {hasinitvalue: ' if (#varname#_capi == Py_None) #varname# = #init#; else', + '_depend': ''}, + {l_and(isoptional, l_not(hasinitvalue)): ' if (#varname#_capi != Py_None)', + '_depend': ''}, + {l_not(islogical): '''\ + f2py_success = #ctype#_from_pyobj(&#varname#,#varname#_capi,"#pyname#() #nth# (#varname#) can\'t be converted to #ctype#"); + if (f2py_success) {'''}, + {islogical: '''\ + #varname# = (#ctype#)PyObject_IsTrue(#varname#_capi); + f2py_success = 1; + if (f2py_success) {'''}, + ], + 'cleanupfrompyobj': ' } /*if (f2py_success) of #varname#*/', + 'need': {l_not(islogical): '#ctype#_from_pyobj'}, + '_check': l_and(isscalar, l_not(iscomplex), isintent_nothide), + '_depend': '' + }, { # Hidden + 'frompyobj': {hasinitvalue: ' #varname# = #init#;'}, + 'need': typedef_need_dict, + '_check': l_and(isscalar, l_not(iscomplex), isintent_hide), + '_depend': '' + }, { # Common + 'frompyobj': {debugcapi: ' fprintf(stderr,"#vardebugshowvalue#\\n",#varname#);'}, + '_check': l_and(isscalar, l_not(iscomplex)), + '_depend': '' + }, + # Complex scalars + { # Common + 'decl': ' #ctype# #varname#;', + 'callfortran': {isintent_c: '#varname#,', l_not(isintent_c): '&#varname#,'}, + 'pyobjfrom': {debugcapi: ' fprintf(stderr,"#vardebugshowvalue#\\n",#varname#.r,#varname#.i);'}, + 'return': {isintent_out: ',#varname#_capi'}, + '_check': iscomplex + }, { # Not hidden + 'decl': ' PyObject *#varname#_capi = Py_None;', + 'argformat': {isrequired: 'O'}, + 'keyformat': {isoptional: 'O'}, + 'args_capi': {isrequired: ',&#varname#_capi'}, + 'keys_capi': {isoptional: ',&#varname#_capi'}, + 'need': {isintent_inout: 'try_pyarr_from_#ctype#'}, + 'pyobjfrom': {isintent_inout: """\ + f2py_success = try_pyarr_from_#ctype#(#varname#_capi,&#varname#); + if (f2py_success) {"""}, + 'closepyobjfrom': {isintent_inout: " } /*if (f2py_success) of #varname# pyobjfrom*/"}, + '_check': l_and(iscomplex, isintent_nothide) + }, { + 'frompyobj': [{hasinitvalue: ' if (#varname#_capi==Py_None) {#varname#.r = #init.r#, #varname#.i = #init.i#;} else'}, + {l_and(isoptional, l_not(hasinitvalue)) + : ' if (#varname#_capi != Py_None)'}, + ' f2py_success = #ctype#_from_pyobj(&#varname#,#varname#_capi,"#pyname#() #nth# (#varname#) can\'t be converted to #ctype#");' + '\n if (f2py_success) {'], + 'cleanupfrompyobj': ' } /*if (f2py_success) of #varname# frompyobj*/', + 'need': ['#ctype#_from_pyobj'], + '_check': l_and(iscomplex, isintent_nothide), + '_depend': '' + }, { # Hidden + 'decl': {isintent_out: ' PyObject *#varname#_capi = Py_None;'}, + '_check': l_and(iscomplex, isintent_hide) + }, { + 'frompyobj': {hasinitvalue: ' #varname#.r = #init.r#, #varname#.i = #init.i#;'}, + '_check': l_and(iscomplex, isintent_hide), + '_depend': '' + }, { # Common + 'pyobjfrom': {isintent_out: ' #varname#_capi = pyobj_from_#ctype#1(#varname#);'}, + 'need': ['pyobj_from_#ctype#1'], + '_check': iscomplex + }, { + 'frompyobj': {debugcapi: ' fprintf(stderr,"#vardebugshowvalue#\\n",#varname#.r,#varname#.i);'}, + '_check': iscomplex, + '_depend': '' + }, + # String + { # Common + 'decl': [' #ctype# #varname# = NULL;', + ' int slen(#varname#);', + ' PyObject *#varname#_capi = Py_None;'], + 'callfortran': '#varname#,', + 'callfortranappend': 'slen(#varname#),', + 'pyobjfrom': [ + {debugcapi: + ' fprintf(stderr,' + '"#vardebugshowvalue#\\n",slen(#varname#),#varname#);'}, + # The trailing null value for Fortran is blank. + {l_and(isintent_out, l_not(isintent_c)): + " STRINGPADN(#varname#, slen(#varname#), ' ', '\\0');"}, + ], + 'return': {isintent_out: ',#varname#'}, + 'need': ['len..', + {l_and(isintent_out, l_not(isintent_c)): 'STRINGPADN'}], + '_check': isstring + }, { # Common + 'frompyobj': [ + """\ + slen(#varname#) = #elsize#; + f2py_success = #ctype#_from_pyobj(&#varname#,&slen(#varname#),#init#,""" +"""#varname#_capi,\"#ctype#_from_pyobj failed in converting #nth#""" +"""`#varname#\' of #pyname# to C #ctype#\"); + if (f2py_success) {""", + # The trailing null value for Fortran is blank. + {l_not(isintent_c): + " STRINGPADN(#varname#, slen(#varname#), '\\0', ' ');"}, + ], + 'cleanupfrompyobj': """\ + STRINGFREE(#varname#); + } /*if (f2py_success) of #varname#*/""", + 'need': ['#ctype#_from_pyobj', 'len..', 'STRINGFREE', + {l_not(isintent_c): 'STRINGPADN'}], + '_check': isstring, + '_depend': '' + }, { # Not hidden + 'argformat': {isrequired: 'O'}, + 'keyformat': {isoptional: 'O'}, + 'args_capi': {isrequired: ',&#varname#_capi'}, + 'keys_capi': {isoptional: ',&#varname#_capi'}, + 'pyobjfrom': [ + {l_and(isintent_inout, l_not(isintent_c)): + " STRINGPADN(#varname#, slen(#varname#), ' ', '\\0');"}, + {isintent_inout: '''\ + f2py_success = try_pyarr_from_#ctype#(#varname#_capi, #varname#, + slen(#varname#)); + if (f2py_success) {'''}], + 'closepyobjfrom': {isintent_inout: ' } /*if (f2py_success) of #varname# pyobjfrom*/'}, + 'need': {isintent_inout: 'try_pyarr_from_#ctype#', + l_and(isintent_inout, l_not(isintent_c)): 'STRINGPADN'}, + '_check': l_and(isstring, isintent_nothide) + }, { # Hidden + '_check': l_and(isstring, isintent_hide) + }, { + 'frompyobj': {debugcapi: ' fprintf(stderr,"#vardebugshowvalue#\\n",slen(#varname#),#varname#);'}, + '_check': isstring, + '_depend': '' + }, + # Array + { # Common + 'decl': [' #ctype# *#varname# = NULL;', + ' npy_intp #varname#_Dims[#rank#] = {#rank*[-1]#};', + ' const int #varname#_Rank = #rank#;', + ' PyArrayObject *capi_#varname#_as_array = NULL;', + ' int capi_#varname#_intent = 0;', + {isstringarray: ' int slen(#varname#) = 0;'}, + ], + 'callfortran': '#varname#,', + 'callfortranappend': {isstringarray: 'slen(#varname#),'}, + 'return': {isintent_out: ',capi_#varname#_as_array'}, + 'need': 'len..', + '_check': isarray + }, { # intent(overwrite) array + 'decl': ' int capi_overwrite_#varname# = 1;', + 'kwlistxa': '"overwrite_#varname#",', + 'xaformat': 'i', + 'keys_xa': ',&capi_overwrite_#varname#', + 'docsignxa': 'overwrite_#varname#=1,', + 'docsignxashort': 'overwrite_#varname#,', + 'docstropt': 'overwrite_#varname# : input int, optional\\n Default: 1', + '_check': l_and(isarray, isintent_overwrite), + }, { + 'frompyobj': ' capi_#varname#_intent |= (capi_overwrite_#varname#?0:F2PY_INTENT_COPY);', + '_check': l_and(isarray, isintent_overwrite), + '_depend': '', + }, + { # intent(copy) array + 'decl': ' int capi_overwrite_#varname# = 0;', + 'kwlistxa': '"overwrite_#varname#",', + 'xaformat': 'i', + 'keys_xa': ',&capi_overwrite_#varname#', + 'docsignxa': 'overwrite_#varname#=0,', + 'docsignxashort': 'overwrite_#varname#,', + 'docstropt': 'overwrite_#varname# : input int, optional\\n Default: 0', + '_check': l_and(isarray, isintent_copy), + }, { + 'frompyobj': ' capi_#varname#_intent |= (capi_overwrite_#varname#?0:F2PY_INTENT_COPY);', + '_check': l_and(isarray, isintent_copy), + '_depend': '', + }, { + 'need': [{hasinitvalue: 'forcomb'}, {hasinitvalue: 'CFUNCSMESS'}], + '_check': isarray, + '_depend': '' + }, { # Not hidden + 'decl': ' PyObject *#varname#_capi = Py_None;', + 'argformat': {isrequired: 'O'}, + 'keyformat': {isoptional: 'O'}, + 'args_capi': {isrequired: ',&#varname#_capi'}, + 'keys_capi': {isoptional: ',&#varname#_capi'}, + '_check': l_and(isarray, isintent_nothide) + }, { + 'frompyobj': [ + ' #setdims#;', + ' capi_#varname#_intent |= #intent#;', + (' const char capi_errmess[] = "#modulename#.#pyname#:' + ' failed to create array from the #nth# `#varname#`";'), + {isintent_hide: + ' capi_#varname#_as_array = ndarray_from_pyobj(' + ' #atype#,#elsize#,#varname#_Dims,#varname#_Rank,' + ' capi_#varname#_intent,Py_None,capi_errmess);'}, + {isintent_nothide: + ' capi_#varname#_as_array = ndarray_from_pyobj(' + ' #atype#,#elsize#,#varname#_Dims,#varname#_Rank,' + ' capi_#varname#_intent,#varname#_capi,capi_errmess);'}, + """\ + if (capi_#varname#_as_array == NULL) { + PyObject* capi_err = PyErr_Occurred(); + if (capi_err == NULL) { + capi_err = #modulename#_error; + PyErr_SetString(capi_err, capi_errmess); + } + } else { + #varname# = (#ctype# *)(PyArray_DATA(capi_#varname#_as_array)); +""", + {isstringarray: + ' slen(#varname#) = f2py_itemsize(#varname#);'}, + {hasinitvalue: [ + {isintent_nothide: + ' if (#varname#_capi == Py_None) {'}, + {isintent_hide: ' {'}, + {iscomplexarray: ' #ctype# capi_c;'}, + """\ + int *_i,capi_i=0; + CFUNCSMESS(\"#name#: Initializing #varname#=#init#\\n\"); + struct ForcombCache cache; + if (initforcomb(&cache, PyArray_DIMS(capi_#varname#_as_array), + PyArray_NDIM(capi_#varname#_as_array),1)) { + while ((_i = nextforcomb(&cache))) + #varname#[capi_i++] = #init#; /* fortran way */ + } else { + PyObject *exc, *val, *tb; + PyErr_Fetch(&exc, &val, &tb); + PyErr_SetString(exc ? exc : #modulename#_error, + \"Initialization of #nth# #varname# failed (initforcomb).\"); + npy_PyErr_ChainExceptionsCause(exc, val, tb); + f2py_success = 0; + } + } + if (f2py_success) {"""]}, + ], + 'cleanupfrompyobj': [ # note that this list will be reversed + ' } ' + '/* if (capi_#varname#_as_array == NULL) ... else of #varname# */', + {l_not(l_or(isintent_out, isintent_hide)): """\ + if((PyObject *)capi_#varname#_as_array!=#varname#_capi) { + Py_XDECREF(capi_#varname#_as_array); }"""}, + {l_and(isintent_hide, l_not(isintent_out)) + : """ Py_XDECREF(capi_#varname#_as_array);"""}, + {hasinitvalue: ' } /*if (f2py_success) of #varname# init*/'}, + ], + '_check': isarray, + '_depend': '' + }, + # Scalararray + { # Common + '_check': l_and(isarray, l_not(iscomplexarray)) + }, { # Not hidden + '_check': l_and(isarray, l_not(iscomplexarray), isintent_nothide) + }, + # Integer*1 array + {'need': '#ctype#', + '_check': isint1array, + '_depend': '' + }, + # Integer*-1 array + {'need': '#ctype#', + '_check': isunsigned_chararray, + '_depend': '' + }, + # Integer*-2 array + {'need': '#ctype#', + '_check': isunsigned_shortarray, + '_depend': '' + }, + # Integer*-8 array + {'need': '#ctype#', + '_check': isunsigned_long_longarray, + '_depend': '' + }, + # Complexarray + {'need': '#ctype#', + '_check': iscomplexarray, + '_depend': '' + }, + # Character + { + 'need': 'string', + '_check': ischaracter, + }, + # Character array + { + 'need': 'string', + '_check': ischaracterarray, + }, + # Stringarray + { + 'callfortranappend': {isarrayofstrings: 'flen(#varname#),'}, + 'need': 'string', + '_check': isstringarray + } +] + +################# Rules for checking ############### + +check_rules = [ + { + 'frompyobj': {debugcapi: ' fprintf(stderr,\"debug-capi:Checking `#check#\'\\n\");'}, + 'need': 'len..' + }, { + 'frompyobj': ' CHECKSCALAR(#check#,\"#check#\",\"#nth# #varname#\",\"#varshowvalue#\",#varname#) {', + 'cleanupfrompyobj': ' } /*CHECKSCALAR(#check#)*/', + 'need': 'CHECKSCALAR', + '_check': l_and(isscalar, l_not(iscomplex)), + '_break': '' + }, { + 'frompyobj': ' CHECKSTRING(#check#,\"#check#\",\"#nth# #varname#\",\"#varshowvalue#\",#varname#) {', + 'cleanupfrompyobj': ' } /*CHECKSTRING(#check#)*/', + 'need': 'CHECKSTRING', + '_check': isstring, + '_break': '' + }, { + 'need': 'CHECKARRAY', + 'frompyobj': ' CHECKARRAY(#check#,\"#check#\",\"#nth# #varname#\") {', + 'cleanupfrompyobj': ' } /*CHECKARRAY(#check#)*/', + '_check': isarray, + '_break': '' + }, { + 'need': 'CHECKGENERIC', + 'frompyobj': ' CHECKGENERIC(#check#,\"#check#\",\"#nth# #varname#\") {', + 'cleanupfrompyobj': ' } /*CHECKGENERIC(#check#)*/', + } +] + +########## Applying the rules. No need to modify what follows ############# + +#################### Build C/API module ####################### + + +def buildmodule(m, um): + """ + Return + """ + outmess(f" Building module \"{m['name']}\"...\n") + ret = {} + mod_rules = defmod_rules[:] + vrd = capi_maps.modsign2map(m) + rd = dictappend({'f2py_version': f2py_version}, vrd) + funcwrappers = [] + funcwrappers2 = [] # F90 codes + for n in m['interfaced']: + nb = None + for bi in m['body']: + if bi['block'] not in ['interface', 'abstract interface']: + errmess('buildmodule: Expected interface block. Skipping.\n') + continue + for b in bi['body']: + if b['name'] == n: + nb = b + break + + if not nb: + print( + f'buildmodule: Could not find the body of interfaced routine "{n}". Skipping.\n', file=sys.stderr) + continue + nb_list = [nb] + if 'entry' in nb: + for k, a in nb['entry'].items(): + nb1 = copy.deepcopy(nb) + del nb1['entry'] + nb1['name'] = k + nb1['args'] = a + nb_list.append(nb1) + for nb in nb_list: + # requiresf90wrapper must be called before buildapi as it + # rewrites assumed shape arrays as automatic arrays. + isf90 = requiresf90wrapper(nb) + # options is in scope here + if options['emptygen']: + b_path = options['buildpath'] + m_name = vrd['modulename'] + outmess(' Generating possibly empty wrappers"\n') + Path(f"{b_path}/{vrd['coutput']}").touch() + if isf90: + # f77 + f90 wrappers + outmess(f' Maybe empty "{m_name}-f2pywrappers2.f90"\n') + Path(f'{b_path}/{m_name}-f2pywrappers2.f90').touch() + outmess(f' Maybe empty "{m_name}-f2pywrappers.f"\n') + Path(f'{b_path}/{m_name}-f2pywrappers.f').touch() + else: + # only f77 wrappers + outmess(f' Maybe empty "{m_name}-f2pywrappers.f"\n') + Path(f'{b_path}/{m_name}-f2pywrappers.f').touch() + api, wrap = buildapi(nb) + if wrap: + if isf90: + funcwrappers2.append(wrap) + else: + funcwrappers.append(wrap) + ar = applyrules(api, vrd) + rd = dictappend(rd, ar) + + # Construct COMMON block support + cr, wrap = common_rules.buildhooks(m) + if wrap: + funcwrappers.append(wrap) + ar = applyrules(cr, vrd) + rd = dictappend(rd, ar) + + # Construct F90 module support + mr, wrap = f90mod_rules.buildhooks(m) + if wrap: + funcwrappers2.append(wrap) + ar = applyrules(mr, vrd) + rd = dictappend(rd, ar) + + for u in um: + ar = use_rules.buildusevars(u, m['use'][u['name']]) + rd = dictappend(rd, ar) + + needs = cfuncs.get_needs() + # Add mapped definitions + needs['typedefs'] += [cvar for cvar in capi_maps.f2cmap_mapped # + if cvar in typedef_need_dict.values()] + code = {} + for n in needs.keys(): + code[n] = [] + for k in needs[n]: + c = '' + if k in cfuncs.includes0: + c = cfuncs.includes0[k] + elif k in cfuncs.includes: + c = cfuncs.includes[k] + elif k in cfuncs.userincludes: + c = cfuncs.userincludes[k] + elif k in cfuncs.typedefs: + c = cfuncs.typedefs[k] + elif k in cfuncs.typedefs_generated: + c = cfuncs.typedefs_generated[k] + elif k in cfuncs.cppmacros: + c = cfuncs.cppmacros[k] + elif k in cfuncs.cfuncs: + c = cfuncs.cfuncs[k] + elif k in cfuncs.callbacks: + c = cfuncs.callbacks[k] + elif k in cfuncs.f90modhooks: + c = cfuncs.f90modhooks[k] + elif k in cfuncs.commonhooks: + c = cfuncs.commonhooks[k] + else: + errmess(f'buildmodule: unknown need {repr(k)}.\n') + continue + code[n].append(c) + mod_rules.append(code) + for r in mod_rules: + if ('_check' in r and r['_check'](m)) or ('_check' not in r): + ar = applyrules(r, vrd, m) + rd = dictappend(rd, ar) + ar = applyrules(module_rules, rd) + + fn = os.path.join(options['buildpath'], vrd['coutput']) + ret['csrc'] = fn + with open(fn, 'w') as f: + f.write(ar['modulebody'].replace('\t', 2 * ' ')) + outmess(f" Wrote C/API module \"{m['name']}\" to file \"{fn}\"\n") + + if options['dorestdoc']: + fn = os.path.join( + options['buildpath'], vrd['modulename'] + 'module.rest') + with open(fn, 'w') as f: + f.write('.. -*- rest -*-\n') + f.write('\n'.join(ar['restdoc'])) + outmess(' ReST Documentation is saved to file "%s/%smodule.rest"\n' % + (options['buildpath'], vrd['modulename'])) + if options['dolatexdoc']: + fn = os.path.join( + options['buildpath'], vrd['modulename'] + 'module.tex') + ret['ltx'] = fn + with open(fn, 'w') as f: + f.write( + f'% This file is auto-generated with f2py (version:{f2py_version})\n') + if 'shortlatex' not in options: + f.write( + '\\documentclass{article}\n\\usepackage{a4wide}\n\\begin{document}\n\\tableofcontents\n\n') + f.write('\n'.join(ar['latexdoc'])) + if 'shortlatex' not in options: + f.write('\\end{document}') + outmess(' Documentation is saved to file "%s/%smodule.tex"\n' % + (options['buildpath'], vrd['modulename'])) + if funcwrappers: + wn = os.path.join(options['buildpath'], vrd['f2py_wrapper_output']) + ret['fsrc'] = wn + with open(wn, 'w') as f: + f.write('C -*- fortran -*-\n') + f.write( + f'C This file is autogenerated with f2py (version:{f2py_version})\n') + f.write( + 'C It contains Fortran 77 wrappers to fortran functions.\n') + lines = [] + for l in ('\n\n'.join(funcwrappers) + '\n').split('\n'): + if 0 <= l.find('!') < 66: + # don't split comment lines + lines.append(l + '\n') + elif l and l[0] == ' ': + while len(l) >= 66: + lines.append(l[:66] + '\n &') + l = l[66:] + lines.append(l + '\n') + else: + lines.append(l + '\n') + lines = ''.join(lines).replace('\n &\n', '\n') + f.write(lines) + outmess(f' Fortran 77 wrappers are saved to "{wn}\"\n') + if funcwrappers2: + wn = os.path.join( + options['buildpath'], f"{vrd['modulename']}-f2pywrappers2.f90") + ret['fsrc'] = wn + with open(wn, 'w') as f: + f.write('! -*- f90 -*-\n') + f.write( + f'! This file is autogenerated with f2py (version:{f2py_version})\n') + f.write( + '! It contains Fortran 90 wrappers to fortran functions.\n') + lines = [] + for l in ('\n\n'.join(funcwrappers2) + '\n').split('\n'): + if 0 <= l.find('!') < 72: + # don't split comment lines + lines.append(l + '\n') + elif len(l) > 72 and l[0] == ' ': + lines.append(l[:72] + '&\n &') + l = l[72:] + while len(l) > 66: + lines.append(l[:66] + '&\n &') + l = l[66:] + lines.append(l + '\n') + else: + lines.append(l + '\n') + lines = ''.join(lines).replace('\n &\n', '\n') + f.write(lines) + outmess(f' Fortran 90 wrappers are saved to "{wn}\"\n') + return ret + +################## Build C/API function ############# + + +stnd = {1: 'st', 2: 'nd', 3: 'rd', 4: 'th', 5: 'th', + 6: 'th', 7: 'th', 8: 'th', 9: 'th', 0: 'th'} + + +def buildapi(rout): + rout, wrap = func2subr.assubr(rout) + args, depargs = getargs2(rout) + capi_maps.depargs = depargs + var = rout['vars'] + + if ismoduleroutine(rout): + outmess(' Constructing wrapper function "%s.%s"...\n' % + (rout['modulename'], rout['name'])) + else: + outmess(f" Constructing wrapper function \"{rout['name']}\"...\n") + # Routine + vrd = capi_maps.routsign2map(rout) + rd = dictappend({}, vrd) + for r in rout_rules: + if ('_check' in r and r['_check'](rout)) or ('_check' not in r): + ar = applyrules(r, vrd, rout) + rd = dictappend(rd, ar) + + # Args + nth, nthk = 0, 0 + savevrd = {} + for a in args: + vrd = capi_maps.sign2map(a, var[a]) + if isintent_aux(var[a]): + _rules = aux_rules + else: + _rules = arg_rules + if not isintent_hide(var[a]): + if not isoptional(var[a]): + nth = nth + 1 + vrd['nth'] = repr(nth) + stnd[nth % 10] + ' argument' + else: + nthk = nthk + 1 + vrd['nth'] = repr(nthk) + stnd[nthk % 10] + ' keyword' + else: + vrd['nth'] = 'hidden' + savevrd[a] = vrd + for r in _rules: + if '_depend' in r: + continue + if ('_check' in r and r['_check'](var[a])) or ('_check' not in r): + ar = applyrules(r, vrd, var[a]) + rd = dictappend(rd, ar) + if '_break' in r: + break + for a in depargs: + if isintent_aux(var[a]): + _rules = aux_rules + else: + _rules = arg_rules + vrd = savevrd[a] + for r in _rules: + if '_depend' not in r: + continue + if ('_check' in r and r['_check'](var[a])) or ('_check' not in r): + ar = applyrules(r, vrd, var[a]) + rd = dictappend(rd, ar) + if '_break' in r: + break + if 'check' in var[a]: + for c in var[a]['check']: + vrd['check'] = c + ar = applyrules(check_rules, vrd, var[a]) + rd = dictappend(rd, ar) + if isinstance(rd['cleanupfrompyobj'], list): + rd['cleanupfrompyobj'].reverse() + if isinstance(rd['closepyobjfrom'], list): + rd['closepyobjfrom'].reverse() + rd['docsignature'] = stripcomma(replace('#docsign##docsignopt##docsignxa#', + {'docsign': rd['docsign'], + 'docsignopt': rd['docsignopt'], + 'docsignxa': rd['docsignxa']})) + optargs = stripcomma(replace('#docsignopt##docsignxa#', + {'docsignxa': rd['docsignxashort'], + 'docsignopt': rd['docsignoptshort']} + )) + if optargs == '': + rd['docsignatureshort'] = stripcomma( + replace('#docsign#', {'docsign': rd['docsign']})) + else: + rd['docsignatureshort'] = replace('#docsign#[#docsignopt#]', + {'docsign': rd['docsign'], + 'docsignopt': optargs, + }) + rd['latexdocsignatureshort'] = rd['docsignatureshort'].replace('_', '\\_') + rd['latexdocsignatureshort'] = rd[ + 'latexdocsignatureshort'].replace(',', ', ') + cfs = stripcomma(replace('#callfortran##callfortranappend#', { + 'callfortran': rd['callfortran'], 'callfortranappend': rd['callfortranappend']})) + if len(rd['callfortranappend']) > 1: + rd['callcompaqfortran'] = stripcomma(replace('#callfortran# 0,#callfortranappend#', { + 'callfortran': rd['callfortran'], 'callfortranappend': rd['callfortranappend']})) + else: + rd['callcompaqfortran'] = cfs + rd['callfortran'] = cfs + if isinstance(rd['docreturn'], list): + rd['docreturn'] = stripcomma( + replace('#docreturn#', {'docreturn': rd['docreturn']})) + ' = ' + rd['docstrsigns'] = [] + rd['latexdocstrsigns'] = [] + for k in ['docstrreq', 'docstropt', 'docstrout', 'docstrcbs']: + if k in rd and isinstance(rd[k], list): + rd['docstrsigns'] = rd['docstrsigns'] + rd[k] + k = 'latex' + k + if k in rd and isinstance(rd[k], list): + rd['latexdocstrsigns'] = rd['latexdocstrsigns'] + rd[k][0:1] +\ + ['\\begin{description}'] + rd[k][1:] +\ + ['\\end{description}'] + + ar = applyrules(routine_rules, rd) + if ismoduleroutine(rout): + outmess(f" {ar['docshort']}\n") + else: + outmess(f" {ar['docshort']}\n") + return ar, wrap + + +#################### EOF rules.py ####################### diff --git a/local_packages/numpy/f2py/rules.pyi b/local_packages/numpy/f2py/rules.pyi new file mode 100644 index 0000000..30439f6 --- /dev/null +++ b/local_packages/numpy/f2py/rules.pyi @@ -0,0 +1,41 @@ +from collections.abc import Callable, Iterable, Mapping +from typing import Any, Final, Literal as L, TypeAlias +from typing_extensions import TypeVar + +from .__version__ import version +from .auxfuncs import _Bool, _Var + +### + +_VT = TypeVar("_VT", default=str) + +_Predicate: TypeAlias = Callable[[_Var], _Bool] +_RuleDict: TypeAlias = dict[str, _VT] +_DefDict: TypeAlias = dict[_Predicate, _VT] + +### + +f2py_version: Final = version +numpy_version: Final = version + +options: Final[dict[str, bool]] = ... +sepdict: Final[dict[str, str]] = ... + +generationtime: Final[int] = ... +typedef_need_dict: Final[_DefDict[str]] = ... + +module_rules: Final[_RuleDict[str | list[str] | _RuleDict]] = ... +routine_rules: Final[_RuleDict[str | list[str] | _DefDict | _RuleDict]] = ... +defmod_rules: Final[list[_RuleDict[str | _DefDict]]] = ... +rout_rules: Final[list[_RuleDict[str | Any]]] = ... +aux_rules: Final[list[_RuleDict[str | Any]]] = ... +arg_rules: Final[list[_RuleDict[str | Any]]] = ... +check_rules: Final[list[_RuleDict[str | Any]]] = ... + +stnd: Final[dict[L[1, 2, 3, 4, 5, 6, 7, 8, 9, 0], L["st", "nd", "rd", "th"]]] = ... + +def buildmodule(m: Mapping[str, str | Any], um: Iterable[Mapping[str, str | Any]]) -> _RuleDict: ... +def buildapi(rout: Mapping[str, str]) -> tuple[_RuleDict, str]: ... + +# namespace pollution +k: str diff --git a/local_packages/numpy/f2py/setup.cfg b/local_packages/numpy/f2py/setup.cfg new file mode 100644 index 0000000..1466954 --- /dev/null +++ b/local_packages/numpy/f2py/setup.cfg @@ -0,0 +1,3 @@ +[bdist_rpm] +doc_files = docs/ + tests/ \ No newline at end of file diff --git a/local_packages/numpy/f2py/src/fortranobject.c b/local_packages/numpy/f2py/src/fortranobject.c new file mode 100644 index 0000000..d6664d6 --- /dev/null +++ b/local_packages/numpy/f2py/src/fortranobject.c @@ -0,0 +1,1436 @@ +#define FORTRANOBJECT_C +#include "fortranobject.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#include + +/* + This file implements: FortranObject, array_from_pyobj, copy_ND_array + + Author: Pearu Peterson + $Revision: 1.52 $ + $Date: 2005/07/11 07:44:20 $ +*/ + +int +F2PyDict_SetItemString(PyObject *dict, char *name, PyObject *obj) +{ + if (obj == NULL) { + fprintf(stderr, "Error loading %s\n", name); + if (PyErr_Occurred()) { + PyErr_Print(); + PyErr_Clear(); + } + return -1; + } + return PyDict_SetItemString(dict, name, obj); +} + +/* + * Python-only fallback for thread-local callback pointers + */ +void * +F2PySwapThreadLocalCallbackPtr(char *key, void *ptr) +{ + PyObject *local_dict, *value; + void *prev; + + local_dict = PyThreadState_GetDict(); + if (local_dict == NULL) { + Py_FatalError( + "F2PySwapThreadLocalCallbackPtr: PyThreadState_GetDict " + "failed"); + } + + value = PyDict_GetItemString(local_dict, key); // noqa: borrowed-ref OK + if (value != NULL) { + prev = PyLong_AsVoidPtr(value); + if (PyErr_Occurred()) { + Py_FatalError( + "F2PySwapThreadLocalCallbackPtr: PyLong_AsVoidPtr failed"); + } + } + else { + prev = NULL; + } + + value = PyLong_FromVoidPtr((void *)ptr); + if (value == NULL) { + Py_FatalError( + "F2PySwapThreadLocalCallbackPtr: PyLong_FromVoidPtr failed"); + } + + if (PyDict_SetItemString(local_dict, key, value) != 0) { + Py_FatalError( + "F2PySwapThreadLocalCallbackPtr: PyDict_SetItemString failed"); + } + + Py_DECREF(value); + + return prev; +} + +void * +F2PyGetThreadLocalCallbackPtr(char *key) +{ + PyObject *local_dict, *value; + void *prev; + + local_dict = PyThreadState_GetDict(); + if (local_dict == NULL) { + Py_FatalError( + "F2PyGetThreadLocalCallbackPtr: PyThreadState_GetDict failed"); + } + + value = PyDict_GetItemString(local_dict, key); // noqa: borrowed-ref OK + if (value != NULL) { + prev = PyLong_AsVoidPtr(value); + if (PyErr_Occurred()) { + Py_FatalError( + "F2PyGetThreadLocalCallbackPtr: PyLong_AsVoidPtr failed"); + } + } + else { + prev = NULL; + } + + return prev; +} + +static PyArray_Descr * +get_descr_from_type_and_elsize(const int type_num, const int elsize) { + PyArray_Descr * descr = PyArray_DescrFromType(type_num); + if (type_num == NPY_STRING) { + // PyArray_DescrFromType returns descr with elsize = 0. + PyArray_DESCR_REPLACE(descr); + if (descr == NULL) { + return NULL; + } + PyDataType_SET_ELSIZE(descr, elsize); + } + return descr; +} + +/************************* FortranObject *******************************/ + +typedef PyObject *(*fortranfunc)(PyObject *, PyObject *, PyObject *, void *); + +PyObject * +PyFortranObject_New(FortranDataDef *defs, f2py_void_func init) +{ + int i; + PyFortranObject *fp = NULL; + PyObject *v = NULL; + if (init != NULL) { /* Initialize F90 module objects */ + (*(init))(); + } + fp = PyObject_New(PyFortranObject, &PyFortran_Type); + if (fp == NULL) { + return NULL; + } + if ((fp->dict = PyDict_New()) == NULL) { + Py_DECREF(fp); + return NULL; + } + fp->len = 0; + while (defs[fp->len].name != NULL) { + fp->len++; + } + if (fp->len == 0) { + goto fail; + } + fp->defs = defs; + for (i = 0; i < fp->len; i++) { + if (fp->defs[i].rank == -1) { /* Is Fortran routine */ + v = PyFortranObject_NewAsAttr(&(fp->defs[i])); + if (v == NULL) { + goto fail; + } + PyDict_SetItemString(fp->dict, fp->defs[i].name, v); + Py_XDECREF(v); + } + else if ((fp->defs[i].data) != + NULL) { /* Is Fortran variable or array (not allocatable) */ + PyArray_Descr * + descr = get_descr_from_type_and_elsize(fp->defs[i].type, + fp->defs[i].elsize); + if (descr == NULL) { + goto fail; + } + v = PyArray_NewFromDescr(&PyArray_Type, descr, fp->defs[i].rank, + fp->defs[i].dims.d, NULL, fp->defs[i].data, + NPY_ARRAY_FARRAY, NULL); + if (v == NULL) { + Py_DECREF(descr); + goto fail; + } + PyDict_SetItemString(fp->dict, fp->defs[i].name, v); + Py_XDECREF(v); + } + } + return (PyObject *)fp; +fail: + Py_XDECREF(fp); + return NULL; +} + +PyObject * +PyFortranObject_NewAsAttr(FortranDataDef *defs) +{ /* used for calling F90 module routines */ + PyFortranObject *fp = NULL; + fp = PyObject_New(PyFortranObject, &PyFortran_Type); + if (fp == NULL) + return NULL; + if ((fp->dict = PyDict_New()) == NULL) { + PyObject_Del(fp); + return NULL; + } + fp->len = 1; + fp->defs = defs; + if (defs->rank == -1) { + PyDict_SetItemString(fp->dict, "__name__", PyUnicode_FromFormat("function %s", defs->name)); + } else if (defs->rank == 0) { + PyDict_SetItemString(fp->dict, "__name__", PyUnicode_FromFormat("scalar %s", defs->name)); + } else { + PyDict_SetItemString(fp->dict, "__name__", PyUnicode_FromFormat("array %s", defs->name)); + } + return (PyObject *)fp; +} + +/* Fortran methods */ + +static void +fortran_dealloc(PyFortranObject *fp) +{ + Py_XDECREF(fp->dict); + PyObject_Del(fp); +} + +/* Returns number of bytes consumed from buf, or -1 on error. */ +static Py_ssize_t +format_def(char *buf, Py_ssize_t size, FortranDataDef def) +{ + char *p = buf; + int i; + npy_intp n; + + n = PyOS_snprintf(p, size, "array(%" NPY_INTP_FMT, def.dims.d[0]); + if (n < 0 || n >= size) { + return -1; + } + p += n; + size -= n; + + for (i = 1; i < def.rank; i++) { + n = PyOS_snprintf(p, size, ",%" NPY_INTP_FMT, def.dims.d[i]); + if (n < 0 || n >= size) { + return -1; + } + p += n; + size -= n; + } + + if (size <= 0) { + return -1; + } + + *p++ = ')'; + size--; + + if (def.data == NULL) { + static const char notalloc[] = ", not allocated"; + if ((size_t)size < sizeof(notalloc)) { + return -1; + } + memcpy(p, notalloc, sizeof(notalloc)); + p += sizeof(notalloc); + size -= sizeof(notalloc); + } + + return p - buf; +} + +static PyObject * +fortran_doc(FortranDataDef def) +{ + char *buf, *p; + PyObject *s = NULL; + Py_ssize_t n, origsize, size = 100; + + if (def.doc != NULL) { + size += strlen(def.doc); + } + origsize = size; + buf = p = (char *)PyMem_Malloc(size); + if (buf == NULL) { + return PyErr_NoMemory(); + } + + if (def.rank == -1) { + if (def.doc) { + n = strlen(def.doc); + if (n > size) { + goto fail; + } + memcpy(p, def.doc, n); + p += n; + size -= n; + } + else { + n = PyOS_snprintf(p, size, "%s - no docs available", def.name); + if (n < 0 || n >= size) { + goto fail; + } + p += n; + size -= n; + } + } + else { + PyArray_Descr *d = PyArray_DescrFromType(def.type); + n = PyOS_snprintf(p, size, "%s : '%c'-", def.name, d->type); + Py_DECREF(d); + if (n < 0 || n >= size) { + goto fail; + } + p += n; + size -= n; + + if (def.data == NULL) { + n = format_def(p, size, def); + if (n < 0) { + goto fail; + } + p += n; + size -= n; + } + else if (def.rank > 0) { + n = format_def(p, size, def); + if (n < 0) { + goto fail; + } + p += n; + size -= n; + } + else { + n = strlen("scalar"); + if (size < n) { + goto fail; + } + memcpy(p, "scalar", n); + p += n; + size -= n; + } + } + if (size <= 1) { + goto fail; + } + *p++ = '\n'; + size--; + + /* p now points one beyond the last character of the string in buf */ + s = PyUnicode_FromStringAndSize(buf, p - buf); + + PyMem_Free(buf); + return s; + +fail: + fprintf(stderr, + "fortranobject.c: fortran_doc: len(p)=%zd>%zd=size:" + " too long docstring required, increase size\n", + p - buf, origsize); + PyMem_Free(buf); + return NULL; +} + +static FortranDataDef *save_def; /* save pointer of an allocatable array */ +static void +set_data(char *d, npy_intp *f) +{ /* callback from Fortran */ + if (*f) /* In fortran f=allocated(d) */ + save_def->data = d; + else + save_def->data = NULL; + /* printf("set_data: d=%p,f=%d\n",d,*f); */ +} + +static PyObject * +fortran_getattr(PyFortranObject *fp, char *name) +{ + int i, j, k, flag; + if (fp->dict != NULL) { + // python 3.13 added PyDict_GetItemRef +#if PY_VERSION_HEX < 0x030D0000 + PyObject *v = _PyDict_GetItemStringWithError(fp->dict, name); // noqa: borrowed-ref OK + if (v == NULL && PyErr_Occurred()) { + return NULL; + } + else if (v != NULL) { + Py_INCREF(v); + return v; + } +#else + PyObject *v; + int result = PyDict_GetItemStringRef(fp->dict, name, &v); + if (result == -1) { + return NULL; + } + else if (result == 1) { + return v; + } +#endif + + } + for (i = 0, j = 1; i < fp->len && (j = strcmp(name, fp->defs[i].name)); + i++) + ; + if (j == 0) + if (fp->defs[i].rank != -1) { /* F90 allocatable array */ + if (fp->defs[i].func == NULL) + return NULL; + for (k = 0; k < fp->defs[i].rank; ++k) fp->defs[i].dims.d[k] = -1; + save_def = &fp->defs[i]; + (*(fp->defs[i].func))(&fp->defs[i].rank, fp->defs[i].dims.d, + set_data, &flag); + if (flag == 2) + k = fp->defs[i].rank + 1; + else + k = fp->defs[i].rank; + if (fp->defs[i].data != NULL) { /* array is allocated */ + PyObject *v = PyArray_New( + &PyArray_Type, k, fp->defs[i].dims.d, fp->defs[i].type, + NULL, fp->defs[i].data, 0, NPY_ARRAY_FARRAY, NULL); + if (v == NULL) + return NULL; + /* Py_INCREF(v); */ + return v; + } + else { /* array is not allocated */ + Py_RETURN_NONE; + } + } + if (strcmp(name, "__dict__") == 0) { + Py_INCREF(fp->dict); + return fp->dict; + } + if (strcmp(name, "__doc__") == 0) { + PyObject *s = PyUnicode_FromString(""), *s2, *s3; + for (i = 0; i < fp->len; i++) { + s2 = fortran_doc(fp->defs[i]); + s3 = PyUnicode_Concat(s, s2); + Py_DECREF(s2); + Py_DECREF(s); + s = s3; + } + if (PyDict_SetItemString(fp->dict, name, s)) + return NULL; + return s; + } + if ((strcmp(name, "_cpointer") == 0) && (fp->len == 1)) { + PyObject *cobj = + F2PyCapsule_FromVoidPtr((void *)(fp->defs[0].data), NULL); + if (PyDict_SetItemString(fp->dict, name, cobj)) + return NULL; + return cobj; + } + PyObject *str, *ret; + str = PyUnicode_FromString(name); + ret = PyObject_GenericGetAttr((PyObject *)fp, str); + Py_DECREF(str); + return ret; +} + +static int +fortran_setattr(PyFortranObject *fp, char *name, PyObject *v) +{ + int i, j, flag; + PyArrayObject *arr = NULL; + for (i = 0, j = 1; i < fp->len && (j = strcmp(name, fp->defs[i].name)); + i++) + ; + if (j == 0) { + if (fp->defs[i].rank == -1) { + PyErr_SetString(PyExc_AttributeError, + "over-writing fortran routine"); + return -1; + } + if (fp->defs[i].func != NULL) { /* is allocatable array */ + npy_intp dims[F2PY_MAX_DIMS]; + int k; + save_def = &fp->defs[i]; + if (v != Py_None) { /* set new value (reallocate if needed -- + see f2py generated code for more + details ) */ + for (k = 0; k < fp->defs[i].rank; k++) dims[k] = -1; + if ((arr = array_from_pyobj(fp->defs[i].type, dims, + fp->defs[i].rank, F2PY_INTENT_IN, + v)) == NULL) + return -1; + (*(fp->defs[i].func))(&fp->defs[i].rank, PyArray_DIMS(arr), + set_data, &flag); + } + else { /* deallocate */ + for (k = 0; k < fp->defs[i].rank; k++) dims[k] = 0; + (*(fp->defs[i].func))(&fp->defs[i].rank, dims, set_data, + &flag); + for (k = 0; k < fp->defs[i].rank; k++) dims[k] = -1; + } + memcpy(fp->defs[i].dims.d, dims, + fp->defs[i].rank * sizeof(npy_intp)); + } + else { /* not allocatable array */ + if ((arr = array_from_pyobj(fp->defs[i].type, fp->defs[i].dims.d, + fp->defs[i].rank, F2PY_INTENT_IN, + v)) == NULL) + return -1; + } + if (fp->defs[i].data != + NULL) { /* copy Python object to Fortran array */ + npy_intp s = PyArray_MultiplyList(fp->defs[i].dims.d, + PyArray_NDIM(arr)); + if (s == -1) + s = PyArray_MultiplyList(PyArray_DIMS(arr), PyArray_NDIM(arr)); + if (s < 0 || (memcpy(fp->defs[i].data, PyArray_DATA(arr), + s * PyArray_ITEMSIZE(arr))) == NULL) { + if ((PyObject *)arr != v) { + Py_DECREF(arr); + } + return -1; + } + if ((PyObject *)arr != v) { + Py_DECREF(arr); + } + } + else + return (fp->defs[i].func == NULL ? -1 : 0); + return 0; /* successful */ + } + if (fp->dict == NULL) { + fp->dict = PyDict_New(); + if (fp->dict == NULL) + return -1; + } + if (v == NULL) { + int rv = PyDict_DelItemString(fp->dict, name); + if (rv < 0) + PyErr_SetString(PyExc_AttributeError, + "delete non-existing fortran attribute"); + return rv; + } + else + return PyDict_SetItemString(fp->dict, name, v); +} + +static PyObject * +fortran_call(PyFortranObject *fp, PyObject *arg, PyObject *kw) +{ + int i = 0; + /* printf("fortran call + name=%s,func=%p,data=%p,%p\n",fp->defs[i].name, + fp->defs[i].func,fp->defs[i].data,&fp->defs[i].data); */ + if (fp->defs[i].rank == -1) { /* is Fortran routine */ + if (fp->defs[i].func == NULL) { + PyErr_Format(PyExc_RuntimeError, "no function to call"); + return NULL; + } + else if (fp->defs[i].data == NULL) + /* dummy routine */ + return (*((fortranfunc)(fp->defs[i].func)))((PyObject *)fp, arg, + kw, NULL); + else + return (*((fortranfunc)(fp->defs[i].func)))( + (PyObject *)fp, arg, kw, (void *)fp->defs[i].data); + } + PyErr_Format(PyExc_TypeError, "this fortran object is not callable"); + return NULL; +} + +static PyObject * +fortran_repr(PyFortranObject *fp) +{ + PyObject *name = NULL, *repr = NULL; + name = PyObject_GetAttrString((PyObject *)fp, "__name__"); + PyErr_Clear(); + if (name != NULL && PyUnicode_Check(name)) { + repr = PyUnicode_FromFormat("", name); + } + else { + repr = PyUnicode_FromString(""); + } + Py_XDECREF(name); + return repr; +} + +PyTypeObject PyFortran_Type = { + PyVarObject_HEAD_INIT(NULL, 0).tp_name = "fortran", + .tp_basicsize = sizeof(PyFortranObject), + .tp_dealloc = (destructor)fortran_dealloc, + .tp_getattr = (getattrfunc)fortran_getattr, + .tp_setattr = (setattrfunc)fortran_setattr, + .tp_repr = (reprfunc)fortran_repr, + .tp_call = (ternaryfunc)fortran_call, +}; + +/************************* f2py_report_atexit *******************************/ + +#ifdef F2PY_REPORT_ATEXIT +static int passed_time = 0; +static int passed_counter = 0; +static int passed_call_time = 0; +static struct timeb start_time; +static struct timeb stop_time; +static struct timeb start_call_time; +static struct timeb stop_call_time; +static int cb_passed_time = 0; +static int cb_passed_counter = 0; +static int cb_passed_call_time = 0; +static struct timeb cb_start_time; +static struct timeb cb_stop_time; +static struct timeb cb_start_call_time; +static struct timeb cb_stop_call_time; + +extern void +f2py_start_clock(void) +{ + ftime(&start_time); +} +extern void +f2py_start_call_clock(void) +{ + f2py_stop_clock(); + ftime(&start_call_time); +} +extern void +f2py_stop_clock(void) +{ + ftime(&stop_time); + passed_time += 1000 * (stop_time.time - start_time.time); + passed_time += stop_time.millitm - start_time.millitm; +} +extern void +f2py_stop_call_clock(void) +{ + ftime(&stop_call_time); + passed_call_time += 1000 * (stop_call_time.time - start_call_time.time); + passed_call_time += stop_call_time.millitm - start_call_time.millitm; + passed_counter += 1; + f2py_start_clock(); +} + +extern void +f2py_cb_start_clock(void) +{ + ftime(&cb_start_time); +} +extern void +f2py_cb_start_call_clock(void) +{ + f2py_cb_stop_clock(); + ftime(&cb_start_call_time); +} +extern void +f2py_cb_stop_clock(void) +{ + ftime(&cb_stop_time); + cb_passed_time += 1000 * (cb_stop_time.time - cb_start_time.time); + cb_passed_time += cb_stop_time.millitm - cb_start_time.millitm; +} +extern void +f2py_cb_stop_call_clock(void) +{ + ftime(&cb_stop_call_time); + cb_passed_call_time += + 1000 * (cb_stop_call_time.time - cb_start_call_time.time); + cb_passed_call_time += + cb_stop_call_time.millitm - cb_start_call_time.millitm; + cb_passed_counter += 1; + f2py_cb_start_clock(); +} + +static int f2py_report_on_exit_been_here = 0; +extern void +f2py_report_on_exit(int exit_flag, void *name) +{ + if (f2py_report_on_exit_been_here) { + fprintf(stderr, " %s\n", (char *)name); + return; + } + f2py_report_on_exit_been_here = 1; + fprintf(stderr, " /-----------------------\\\n"); + fprintf(stderr, " < F2PY performance report >\n"); + fprintf(stderr, " \\-----------------------/\n"); + fprintf(stderr, "Overall time spent in ...\n"); + fprintf(stderr, "(a) wrapped (Fortran/C) functions : %8d msec\n", + passed_call_time); + fprintf(stderr, "(b) f2py interface, %6d calls : %8d msec\n", + passed_counter, passed_time); + fprintf(stderr, "(c) call-back (Python) functions : %8d msec\n", + cb_passed_call_time); + fprintf(stderr, "(d) f2py call-back interface, %6d calls : %8d msec\n", + cb_passed_counter, cb_passed_time); + + fprintf(stderr, + "(e) wrapped (Fortran/C) functions (actual) : %8d msec\n\n", + passed_call_time - cb_passed_call_time - cb_passed_time); + fprintf(stderr, + "Use -DF2PY_REPORT_ATEXIT_DISABLE to disable this message.\n"); + fprintf(stderr, "Exit status: %d\n", exit_flag); + fprintf(stderr, "Modules : %s\n", (char *)name); +} +#endif + +/********************** report on array copy ****************************/ + +#ifdef F2PY_REPORT_ON_ARRAY_COPY +static void +f2py_report_on_array_copy(PyArrayObject *arr) +{ + const npy_intp arr_size = PyArray_Size((PyObject *)arr); + if (arr_size > F2PY_REPORT_ON_ARRAY_COPY) { + fprintf(stderr, + "copied an array: size=%ld, elsize=%" NPY_INTP_FMT "\n", + arr_size, (npy_intp)PyArray_ITEMSIZE(arr)); + } +} +static void +f2py_report_on_array_copy_fromany(void) +{ + fprintf(stderr, "created an array from object\n"); +} + +#define F2PY_REPORT_ON_ARRAY_COPY_FROMARR \ + f2py_report_on_array_copy((PyArrayObject *)arr) +#define F2PY_REPORT_ON_ARRAY_COPY_FROMANY f2py_report_on_array_copy_fromany() +#else +#define F2PY_REPORT_ON_ARRAY_COPY_FROMARR +#define F2PY_REPORT_ON_ARRAY_COPY_FROMANY +#endif + +/************************* array_from_obj *******************************/ + +/* + * File: array_from_pyobj.c + * + * Description: + * ------------ + * Provides array_from_pyobj function that returns a contiguous array + * object with the given dimensions and required storage order, either + * in row-major (C) or column-major (Fortran) order. The function + * array_from_pyobj is very flexible about its Python object argument + * that can be any number, list, tuple, or array. + * + * array_from_pyobj is used in f2py generated Python extension + * modules. + * + * Author: Pearu Peterson + * Created: 13-16 January 2002 + * $Id: fortranobject.c,v 1.52 2005/07/11 07:44:20 pearu Exp $ + */ + +static int check_and_fix_dimensions(const PyArrayObject* arr, + const int rank, + npy_intp *dims, + const char *errmess); + +static int +find_first_negative_dimension(const int rank, const npy_intp *dims) +{ + int i; + for (i = 0; i < rank; ++i) { + if (dims[i] < 0) { + return i; + } + } + return -1; +} + +#ifdef DEBUG_COPY_ND_ARRAY +void +dump_dims(int rank, npy_intp const *dims) +{ + int i; + printf("["); + for (i = 0; i < rank; ++i) { + printf("%3" NPY_INTP_FMT, dims[i]); + } + printf("]\n"); +} +void +dump_attrs(const PyArrayObject *obj) +{ + const PyArrayObject_fields *arr = (const PyArrayObject_fields *)obj; + int rank = PyArray_NDIM(arr); + npy_intp size = PyArray_Size((PyObject *)arr); + printf("\trank = %d, flags = %d, size = %" NPY_INTP_FMT "\n", rank, + arr->flags, size); + printf("\tstrides = "); + dump_dims(rank, arr->strides); + printf("\tdimensions = "); + dump_dims(rank, arr->dimensions); +} +#endif + +#define SWAPTYPE(a, b, t) \ + { \ + t c; \ + c = (a); \ + (a) = (b); \ + (b) = c; \ + } + +static int +swap_arrays(PyArrayObject *obj1, PyArrayObject *obj2) +{ + PyArrayObject_fields *arr1 = (PyArrayObject_fields *)obj1, + *arr2 = (PyArrayObject_fields *)obj2; + SWAPTYPE(arr1->data, arr2->data, char *); + SWAPTYPE(arr1->nd, arr2->nd, int); + SWAPTYPE(arr1->dimensions, arr2->dimensions, npy_intp *); + SWAPTYPE(arr1->strides, arr2->strides, npy_intp *); + SWAPTYPE(arr1->base, arr2->base, PyObject *); + SWAPTYPE(arr1->descr, arr2->descr, PyArray_Descr *); + SWAPTYPE(arr1->flags, arr2->flags, int); + /* SWAPTYPE(arr1->weakreflist,arr2->weakreflist,PyObject*); */ + return 0; +} + +#define ARRAY_ISCOMPATIBLE(arr,type_num) \ + ((PyArray_ISINTEGER(arr) && PyTypeNum_ISINTEGER(type_num)) || \ + (PyArray_ISFLOAT(arr) && PyTypeNum_ISFLOAT(type_num)) || \ + (PyArray_ISCOMPLEX(arr) && PyTypeNum_ISCOMPLEX(type_num)) || \ + (PyArray_ISBOOL(arr) && PyTypeNum_ISBOOL(type_num)) || \ + (PyArray_ISSTRING(arr) && PyTypeNum_ISSTRING(type_num))) + +static int +get_elsize(PyObject *obj) { + /* + get_elsize determines array itemsize from a Python object. Returns + elsize if successful, -1 otherwise. + + Supported types of the input are: numpy.ndarray, bytes, str, tuple, + list. + */ + + if (PyArray_Check(obj)) { + return PyArray_ITEMSIZE((PyArrayObject *)obj); + } else if (PyBytes_Check(obj)) { + return PyBytes_GET_SIZE(obj); + } else if (PyUnicode_Check(obj)) { + return PyUnicode_GET_LENGTH(obj); + } else if (PySequence_Check(obj)) { + PyObject* fast = PySequence_Fast(obj, "f2py:fortranobject.c:get_elsize"); // noqa: borrowed-ref OK + if (fast != NULL) { + Py_ssize_t i, n = PySequence_Fast_GET_SIZE(fast); + int sz, elsize = 0; + for (i=0; i elsize) { + elsize = sz; + } + } + Py_DECREF(fast); + return elsize; + } + } + return -1; +} + +extern PyArrayObject * +ndarray_from_pyobj(const int type_num, + const int elsize_, + npy_intp *dims, + const int rank, + const int intent, + PyObject *obj, + const char *errmess) { + /* + * Return an array with given element type and shape from a Python + * object while taking into account the usage intent of the array. + * + * - element type is defined by type_num and elsize + * - shape is defined by dims and rank + * + * ndarray_from_pyobj is used to convert Python object arguments + * to numpy ndarrays with given type and shape that data is passed + * to interfaced Fortran or C functions. + * + * errmess (if not NULL), contains a prefix of an error message + * for an exception to be triggered within this function. + * + * Negative elsize value means that elsize is to be determined + * from the Python object in runtime. + * + * Note on strings + * --------------- + * + * String type (type_num == NPY_STRING) does not have fixed + * element size and, by default, the type object sets it to + * 0. Therefore, for string types, one has to use elsize + * argument. For other types, elsize value is ignored. + * + * NumPy defines the type of a fixed-width string as + * dtype('S'). In addition, there is also dtype('c'), that + * appears as dtype('S1') (these have the same type_num value), + * but is actually different (.char attribute is either 'S' or + * 'c', respectively). + * + * In Fortran, character arrays and strings are different + * concepts. The relation between Fortran types, NumPy dtypes, + * and type_num-elsize pairs, is defined as follows: + * + * character*5 foo | dtype('S5') | elsize=5, shape=() + * character(5) foo | dtype('S1') | elsize=1, shape=(5) + * character*5 foo(n) | dtype('S5') | elsize=5, shape=(n,) + * character(5) foo(n) | dtype('S1') | elsize=1, shape=(5, n) + * character*(*) foo | dtype('S') | elsize=-1, shape=() + * + * Note about reference counting + * ----------------------------- + * + * If the caller returns the array to Python, it must be done with + * Py_BuildValue("N",arr). Otherwise, if obj!=arr then the caller + * must call Py_DECREF(arr). + * + * Note on intent(cache,out,..) + * ---------------------------- + * Don't expect correct data when returning intent(cache) array. + * + */ + char mess[F2PY_MESSAGE_BUFFER_SIZE]; + PyArrayObject *arr = NULL; + int elsize = (elsize_ < 0 ? get_elsize(obj) : elsize_); + if (elsize < 0) { + if (errmess != NULL) { + strcpy(mess, errmess); + } + sprintf(mess + strlen(mess), + " -- failed to determine element size from %s", + Py_TYPE(obj)->tp_name); + PyErr_SetString(PyExc_SystemError, mess); + return NULL; + } + PyArray_Descr * descr = get_descr_from_type_and_elsize(type_num, elsize); // new reference + if (descr == NULL) { + return NULL; + } + elsize = PyDataType_ELSIZE(descr); + if ((intent & F2PY_INTENT_HIDE) + || ((intent & F2PY_INTENT_CACHE) && (obj == Py_None)) + || ((intent & F2PY_OPTIONAL) && (obj == Py_None)) + ) { + /* intent(cache), optional, intent(hide) */ + int ineg = find_first_negative_dimension(rank, dims); + if (ineg >= 0) { + int i; + strcpy(mess, "failed to create intent(cache|hide)|optional array" + "-- must have defined dimensions but got ("); + for(i = 0; i < rank; ++i) + sprintf(mess + strlen(mess), "%" NPY_INTP_FMT ",", dims[i]); + strcat(mess, ")"); + PyErr_SetString(PyExc_ValueError, mess); + Py_DECREF(descr); + return NULL; + } + arr = (PyArrayObject *) \ + PyArray_NewFromDescr(&PyArray_Type, descr, rank, dims, + NULL, NULL, !(intent & F2PY_INTENT_C), NULL); + if (arr == NULL) { + Py_DECREF(descr); + return NULL; + } + if (PyArray_ITEMSIZE(arr) != elsize) { + strcpy(mess, "failed to create intent(cache|hide)|optional array"); + sprintf(mess+strlen(mess)," -- expected elsize=%d got %" NPY_INTP_FMT, elsize, (npy_intp)PyArray_ITEMSIZE(arr)); + PyErr_SetString(PyExc_ValueError,mess); + Py_DECREF(arr); + return NULL; + } + if (!(intent & F2PY_INTENT_CACHE)) { + PyArray_FILLWBYTE(arr, 0); + } + return arr; + } + + if (PyArray_Check(obj)) { + arr = (PyArrayObject *)obj; + if (intent & F2PY_INTENT_CACHE) { + /* intent(cache) */ + if (PyArray_ISONESEGMENT(arr) + && PyArray_ITEMSIZE(arr) >= elsize) { + if (check_and_fix_dimensions(arr, rank, dims, errmess)) { + Py_DECREF(descr); + return NULL; + } + if (intent & F2PY_INTENT_OUT) + Py_INCREF(arr); + Py_DECREF(descr); + return arr; + } + strcpy(mess, "failed to initialize intent(cache) array"); + if (!PyArray_ISONESEGMENT(arr)) + strcat(mess, " -- input must be in one segment"); + if (PyArray_ITEMSIZE(arr) < elsize) + sprintf(mess + strlen(mess), + " -- expected at least elsize=%d but got " + "%" NPY_INTP_FMT, + elsize, (npy_intp)PyArray_ITEMSIZE(arr)); + PyErr_SetString(PyExc_ValueError, mess); + Py_DECREF(descr); + return NULL; + } + + /* here we have always intent(in) or intent(inout) or intent(inplace) + */ + + if (check_and_fix_dimensions(arr, rank, dims, errmess)) { + Py_DECREF(descr); + return NULL; + } + /* + printf("intent alignment=%d\n", F2PY_GET_ALIGNMENT(intent)); + printf("alignment check=%d\n", F2PY_CHECK_ALIGNMENT(arr, intent)); + int i; + for (i=1;i<=16;i++) + printf("i=%d isaligned=%d\n", i, ARRAY_ISALIGNED(arr, i)); + */ + if ((! (intent & F2PY_INTENT_COPY)) && + PyArray_ITEMSIZE(arr) == elsize && + ARRAY_ISCOMPATIBLE(arr,type_num) && + F2PY_CHECK_ALIGNMENT(arr, intent)) { + if ((intent & F2PY_INTENT_INOUT || intent & F2PY_INTENT_INPLACE) + ? ((intent & F2PY_INTENT_C) ? PyArray_ISCARRAY(arr) : PyArray_ISFARRAY(arr)) + : ((intent & F2PY_INTENT_C) ? PyArray_ISCARRAY_RO(arr) : PyArray_ISFARRAY_RO(arr))) { + if ((intent & F2PY_INTENT_OUT)) { + Py_INCREF(arr); + } + /* Returning input array */ + Py_DECREF(descr); + return arr; + } + } + if (intent & F2PY_INTENT_INOUT) { + strcpy(mess, "failed to initialize intent(inout) array"); + /* Must use PyArray_IS*ARRAY because intent(inout) requires + * writable input */ + if ((intent & F2PY_INTENT_C) && !PyArray_ISCARRAY(arr)) + strcat(mess, " -- input not contiguous"); + if (!(intent & F2PY_INTENT_C) && !PyArray_ISFARRAY(arr)) + strcat(mess, " -- input not fortran contiguous"); + if (PyArray_ITEMSIZE(arr) != elsize) + sprintf(mess + strlen(mess), + " -- expected elsize=%d but got %" NPY_INTP_FMT, + elsize, + (npy_intp)PyArray_ITEMSIZE(arr) + ); + if (!(ARRAY_ISCOMPATIBLE(arr, type_num))) { + sprintf(mess + strlen(mess), + " -- input '%c' not compatible to '%c'", + PyArray_DESCR(arr)->type, descr->type); + } + if (!(F2PY_CHECK_ALIGNMENT(arr, intent))) + sprintf(mess + strlen(mess), " -- input not %d-aligned", + F2PY_GET_ALIGNMENT(intent)); + PyErr_SetString(PyExc_ValueError, mess); + Py_DECREF(descr); + return NULL; + } + + /* here we have always intent(in) or intent(inplace) */ + + { + PyArrayObject * retarr = (PyArrayObject *) \ + PyArray_NewFromDescr(&PyArray_Type, descr, PyArray_NDIM(arr), PyArray_DIMS(arr), + NULL, NULL, !(intent & F2PY_INTENT_C), NULL); + if (retarr==NULL) { + Py_DECREF(descr); + return NULL; + } + F2PY_REPORT_ON_ARRAY_COPY_FROMARR; + if (PyArray_CopyInto(retarr, arr)) { + Py_DECREF(retarr); + return NULL; + } + if (intent & F2PY_INTENT_INPLACE) { + if (swap_arrays(arr,retarr)) { + Py_DECREF(retarr); + return NULL; /* XXX: set exception */ + } + Py_XDECREF(retarr); + if (intent & F2PY_INTENT_OUT) + Py_INCREF(arr); + } else { + arr = retarr; + } + } + return arr; + } + + if ((intent & F2PY_INTENT_INOUT) || (intent & F2PY_INTENT_INPLACE) || + (intent & F2PY_INTENT_CACHE)) { + PyErr_Format(PyExc_TypeError, + "failed to initialize intent(inout|inplace|cache) " + "array, input '%s' object is not an array", + Py_TYPE(obj)->tp_name); + Py_DECREF(descr); + return NULL; + } + + { + F2PY_REPORT_ON_ARRAY_COPY_FROMANY; + arr = (PyArrayObject *)PyArray_FromAny( + obj, descr, 0, 0, + ((intent & F2PY_INTENT_C) ? NPY_ARRAY_CARRAY + : NPY_ARRAY_FARRAY) | + NPY_ARRAY_FORCECAST, + NULL); + // Warning: in the case of NPY_STRING, PyArray_FromAny may + // reset descr->elsize, e.g. dtype('S0') becomes dtype('S1'). + if (arr == NULL) { + Py_DECREF(descr); + return NULL; + } + if (type_num != NPY_STRING && PyArray_ITEMSIZE(arr) != elsize) { + // This is internal sanity tests: elsize has been set to + // descr->elsize in the beginning of this function. + strcpy(mess, "failed to initialize intent(in) array"); + sprintf(mess + strlen(mess), + " -- expected elsize=%d got %" NPY_INTP_FMT, elsize, + (npy_intp)PyArray_ITEMSIZE(arr)); + PyErr_SetString(PyExc_ValueError, mess); + Py_DECREF(arr); + return NULL; + } + if (check_and_fix_dimensions(arr, rank, dims, errmess)) { + Py_DECREF(arr); + return NULL; + } + return arr; + } +} + +extern PyArrayObject * +array_from_pyobj(const int type_num, + npy_intp *dims, + const int rank, + const int intent, + PyObject *obj) { + /* + Same as ndarray_from_pyobj but with elsize determined from type, + if possible. Provided for backward compatibility. + */ + PyArray_Descr* descr = PyArray_DescrFromType(type_num); + int elsize = PyDataType_ELSIZE(descr); + Py_DECREF(descr); + return ndarray_from_pyobj(type_num, elsize, dims, rank, intent, obj, NULL); +} + +/*****************************************/ +/* Helper functions for array_from_pyobj */ +/*****************************************/ + +static int +check_and_fix_dimensions(const PyArrayObject* arr, const int rank, + npy_intp *dims, const char *errmess) +{ + /* + * This function fills in blanks (that are -1's) in dims list using + * the dimensions from arr. It also checks that non-blank dims will + * match with the corresponding values in arr dimensions. + * + * Returns 0 if the function is successful. + * + * If an error condition is detected, an exception is set and 1 is + * returned. + */ + char mess[F2PY_MESSAGE_BUFFER_SIZE]; + const npy_intp arr_size = + (PyArray_NDIM(arr)) ? PyArray_Size((PyObject *)arr) : 1; +#ifdef DEBUG_COPY_ND_ARRAY + dump_attrs(arr); + printf("check_and_fix_dimensions:init: dims="); + dump_dims(rank, dims); +#endif + if (rank > PyArray_NDIM(arr)) { /* [1,2] -> [[1],[2]]; 1 -> [[1]] */ + npy_intp new_size = 1; + int free_axe = -1; + int i; + npy_intp d; + /* Fill dims where -1 or 0; check dimensions; calc new_size; */ + for (i = 0; i < PyArray_NDIM(arr); ++i) { + d = PyArray_DIM(arr, i); + if (dims[i] >= 0) { + if (d > 1 && dims[i] != d) { + PyErr_Format( + PyExc_ValueError, + "%d-th dimension must be fixed to %" NPY_INTP_FMT + " but got %" NPY_INTP_FMT "\n", + i, dims[i], d); + return 1; + } + if (!dims[i]) + dims[i] = 1; + } + else { + dims[i] = d ? d : 1; + } + new_size *= dims[i]; + } + for (i = PyArray_NDIM(arr); i < rank; ++i) + if (dims[i] > 1) { + PyErr_Format(PyExc_ValueError, + "%d-th dimension must be %" NPY_INTP_FMT + " but got 0 (not defined).\n", + i, dims[i]); + return 1; + } + else if (free_axe < 0) + free_axe = i; + else + dims[i] = 1; + if (free_axe >= 0) { + dims[free_axe] = arr_size / new_size; + new_size *= dims[free_axe]; + } + if (new_size != arr_size) { + PyErr_Format(PyExc_ValueError, + "unexpected array size: new_size=%" NPY_INTP_FMT + ", got array with arr_size=%" NPY_INTP_FMT + " (maybe too many free indices)\n", + new_size, arr_size); + return 1; + } + } + else if (rank == PyArray_NDIM(arr)) { + npy_intp new_size = 1; + int i; + npy_intp d; + for (i = 0; i < rank; ++i) { + d = PyArray_DIM(arr, i); + if (dims[i] >= 0) { + if (d > 1 && d != dims[i]) { + if (errmess != NULL) { + strcpy(mess, errmess); + } + sprintf(mess + strlen(mess), + " -- %d-th dimension must be fixed to %" + NPY_INTP_FMT " but got %" NPY_INTP_FMT, + i, dims[i], d); + PyErr_SetString(PyExc_ValueError, mess); + return 1; + } + if (!dims[i]) + dims[i] = 1; + } + else + dims[i] = d; + new_size *= dims[i]; + } + if (new_size != arr_size) { + PyErr_Format(PyExc_ValueError, + "unexpected array size: new_size=%" NPY_INTP_FMT + ", got array with arr_size=%" NPY_INTP_FMT "\n", + new_size, arr_size); + return 1; + } + } + else { /* [[1,2]] -> [[1],[2]] */ + int i, j; + npy_intp d; + int effrank; + npy_intp size; + for (i = 0, effrank = 0; i < PyArray_NDIM(arr); ++i) + if (PyArray_DIM(arr, i) > 1) + ++effrank; + if (dims[rank - 1] >= 0) + if (effrank > rank) { + PyErr_Format(PyExc_ValueError, + "too many axes: %d (effrank=%d), " + "expected rank=%d\n", + PyArray_NDIM(arr), effrank, rank); + return 1; + } + + for (i = 0, j = 0; i < rank; ++i) { + while (j < PyArray_NDIM(arr) && PyArray_DIM(arr, j) < 2) ++j; + if (j >= PyArray_NDIM(arr)) + d = 1; + else + d = PyArray_DIM(arr, j++); + if (dims[i] >= 0) { + if (d > 1 && d != dims[i]) { + if (errmess != NULL) { + strcpy(mess, errmess); + } + sprintf(mess + strlen(mess), + " -- %d-th dimension must be fixed to %" + NPY_INTP_FMT " but got %" NPY_INTP_FMT + " (real index=%d)\n", + i, dims[i], d, j-1); + PyErr_SetString(PyExc_ValueError, mess); + return 1; + } + if (!dims[i]) + dims[i] = 1; + } + else + dims[i] = d; + } + + for (i = rank; i < PyArray_NDIM(arr); + ++i) { /* [[1,2],[3,4]] -> [1,2,3,4] */ + while (j < PyArray_NDIM(arr) && PyArray_DIM(arr, j) < 2) ++j; + if (j >= PyArray_NDIM(arr)) + d = 1; + else + d = PyArray_DIM(arr, j++); + dims[rank - 1] *= d; + } + for (i = 0, size = 1; i < rank; ++i) size *= dims[i]; + if (size != arr_size) { + char msg[200]; + int len; + snprintf(msg, sizeof(msg), + "unexpected array size: size=%" NPY_INTP_FMT + ", arr_size=%" NPY_INTP_FMT + ", rank=%d, effrank=%d, arr.nd=%d, dims=[", + size, arr_size, rank, effrank, PyArray_NDIM(arr)); + for (i = 0; i < rank; ++i) { + len = strlen(msg); + snprintf(msg + len, sizeof(msg) - len, " %" NPY_INTP_FMT, + dims[i]); + } + len = strlen(msg); + snprintf(msg + len, sizeof(msg) - len, " ], arr.dims=["); + for (i = 0; i < PyArray_NDIM(arr); ++i) { + len = strlen(msg); + snprintf(msg + len, sizeof(msg) - len, " %" NPY_INTP_FMT, + PyArray_DIM(arr, i)); + } + len = strlen(msg); + snprintf(msg + len, sizeof(msg) - len, " ]\n"); + PyErr_SetString(PyExc_ValueError, msg); + return 1; + } + } +#ifdef DEBUG_COPY_ND_ARRAY + printf("check_and_fix_dimensions:end: dims="); + dump_dims(rank, dims); +#endif + return 0; +} + +/* End of file: array_from_pyobj.c */ + +/************************* copy_ND_array *******************************/ + +extern int +copy_ND_array(const PyArrayObject *arr, PyArrayObject *out) +{ + F2PY_REPORT_ON_ARRAY_COPY_FROMARR; + return PyArray_CopyInto(out, (PyArrayObject *)arr); +} + +/********************* Various utility functions ***********************/ + +extern int +f2py_describe(PyObject *obj, char *buf) { + /* + Write the description of a Python object to buf. The caller must + provide buffer with size sufficient to write the description. + + Return 1 on success. + */ + char localbuf[F2PY_MESSAGE_BUFFER_SIZE]; + if (PyBytes_Check(obj)) { + sprintf(localbuf, "%d-%s", (npy_int)PyBytes_GET_SIZE(obj), Py_TYPE(obj)->tp_name); + } else if (PyUnicode_Check(obj)) { + sprintf(localbuf, "%d-%s", (npy_int)PyUnicode_GET_LENGTH(obj), Py_TYPE(obj)->tp_name); + } else if (PyArray_CheckScalar(obj)) { + PyArrayObject* arr = (PyArrayObject*)obj; + sprintf(localbuf, "%c%" NPY_INTP_FMT "-%s-scalar", PyArray_DESCR(arr)->kind, PyArray_ITEMSIZE(arr), Py_TYPE(obj)->tp_name); + } else if (PyArray_Check(obj)) { + int i; + PyArrayObject* arr = (PyArrayObject*)obj; + strcpy(localbuf, "("); + for (i=0; ikind, PyArray_ITEMSIZE(arr), Py_TYPE(obj)->tp_name); + } else if (PySequence_Check(obj)) { + sprintf(localbuf, "%d-%s", (npy_int)PySequence_Length(obj), Py_TYPE(obj)->tp_name); + } else { + sprintf(localbuf, "%s instance", Py_TYPE(obj)->tp_name); + } + // TODO: detect the size of buf and make sure that size(buf) >= size(localbuf). + strcpy(buf, localbuf); + return 1; +} + +extern npy_intp +f2py_size_impl(PyArrayObject* var, ...) +{ + npy_intp sz = 0; + npy_intp dim; + npy_intp rank; + va_list argp; + va_start(argp, var); + dim = va_arg(argp, npy_int); + if (dim==-1) + { + sz = PyArray_SIZE(var); + } + else + { + rank = PyArray_NDIM(var); + if (dim>=1 && dim<=rank) + sz = PyArray_DIM(var, dim-1); + else + fprintf(stderr, "f2py_size: 2nd argument value=%" NPY_INTP_FMT + " fails to satisfy 1<=value<=%" NPY_INTP_FMT + ". Result will be 0.\n", dim, rank); + } + va_end(argp); + return sz; +} + +/*********************************************/ +/* Compatibility functions for Python >= 3.0 */ +/*********************************************/ + +PyObject * +F2PyCapsule_FromVoidPtr(void *ptr, void (*dtor)(PyObject *)) +{ + PyObject *ret = PyCapsule_New(ptr, NULL, dtor); + if (ret == NULL) { + PyErr_Clear(); + } + return ret; +} + +void * +F2PyCapsule_AsVoidPtr(PyObject *obj) +{ + void *ret = PyCapsule_GetPointer(obj, NULL); + if (ret == NULL) { + PyErr_Clear(); + } + return ret; +} + +int +F2PyCapsule_Check(PyObject *ptr) +{ + return PyCapsule_CheckExact(ptr); +} + +#ifdef __cplusplus +} +#endif +/************************* EOF fortranobject.c *******************************/ diff --git a/local_packages/numpy/f2py/src/fortranobject.h b/local_packages/numpy/f2py/src/fortranobject.h new file mode 100644 index 0000000..4aed2f6 --- /dev/null +++ b/local_packages/numpy/f2py/src/fortranobject.h @@ -0,0 +1,173 @@ +#ifndef Py_FORTRANOBJECT_H +#define Py_FORTRANOBJECT_H +#ifdef __cplusplus +extern "C" { +#endif + +#include + +#ifndef NPY_NO_DEPRECATED_API +#define NPY_NO_DEPRECATED_API NPY_API_VERSION +#endif +#ifdef FORTRANOBJECT_C +#define NO_IMPORT_ARRAY +#endif +#define PY_ARRAY_UNIQUE_SYMBOL _npy_f2py_ARRAY_API +#include "numpy/arrayobject.h" +#include "numpy/npy_3kcompat.h" + +#ifdef F2PY_REPORT_ATEXIT +#include +// clang-format off +extern void f2py_start_clock(void); +extern void f2py_stop_clock(void); +extern void f2py_start_call_clock(void); +extern void f2py_stop_call_clock(void); +extern void f2py_cb_start_clock(void); +extern void f2py_cb_stop_clock(void); +extern void f2py_cb_start_call_clock(void); +extern void f2py_cb_stop_call_clock(void); +extern void f2py_report_on_exit(int, void *); +// clang-format on +#endif + +#ifdef DMALLOC +#include "dmalloc.h" +#endif + +/* Fortran object interface */ + +/* +123456789-123456789-123456789-123456789-123456789-123456789-123456789-12 + +PyFortranObject represents various Fortran objects: +Fortran (module) routines, COMMON blocks, module data. + +Author: Pearu Peterson +*/ + +#define F2PY_MAX_DIMS 40 +#define F2PY_MESSAGE_BUFFER_SIZE 300 // Increase on "stack smashing detected" + +typedef void (*f2py_set_data_func)(char *, npy_intp *); +typedef void (*f2py_void_func)(void); +typedef void (*f2py_init_func)(int *, npy_intp *, f2py_set_data_func, int *); + +/*typedef void* (*f2py_c_func)(void*,...);*/ + +typedef void *(*f2pycfunc)(void); + +typedef struct { + char *name; /* attribute (array||routine) name */ + int rank; /* array rank, 0 for scalar, max is F2PY_MAX_DIMS, + || rank=-1 for Fortran routine */ + struct { + npy_intp d[F2PY_MAX_DIMS]; + } dims; /* dimensions of the array, || not used */ + int type; /* PyArray_ || not used */ + int elsize; /* Element size || not used */ + char *data; /* pointer to array || Fortran routine */ + f2py_init_func func; /* initialization function for + allocatable arrays: + func(&rank,dims,set_ptr_func,name,len(name)) + || C/API wrapper for Fortran routine */ + char *doc; /* documentation string; only recommended + for routines. */ +} FortranDataDef; + +typedef struct { + PyObject_HEAD + int len; /* Number of attributes */ + FortranDataDef *defs; /* An array of FortranDataDef's */ + PyObject *dict; /* Fortran object attribute dictionary */ +} PyFortranObject; + +#define PyFortran_Check(op) (Py_TYPE(op) == &PyFortran_Type) +#define PyFortran_Check1(op) (0 == strcmp(Py_TYPE(op)->tp_name, "fortran")) + +extern PyTypeObject PyFortran_Type; +extern int +F2PyDict_SetItemString(PyObject *dict, char *name, PyObject *obj); +extern PyObject * +PyFortranObject_New(FortranDataDef *defs, f2py_void_func init); +extern PyObject * +PyFortranObject_NewAsAttr(FortranDataDef *defs); + +PyObject * +F2PyCapsule_FromVoidPtr(void *ptr, void (*dtor)(PyObject *)); +void * +F2PyCapsule_AsVoidPtr(PyObject *obj); +int +F2PyCapsule_Check(PyObject *ptr); + +extern void * +F2PySwapThreadLocalCallbackPtr(char *key, void *ptr); +extern void * +F2PyGetThreadLocalCallbackPtr(char *key); + +#define ISCONTIGUOUS(m) (PyArray_FLAGS(m) & NPY_ARRAY_C_CONTIGUOUS) +#define F2PY_INTENT_IN 1 +#define F2PY_INTENT_INOUT 2 +#define F2PY_INTENT_OUT 4 +#define F2PY_INTENT_HIDE 8 +#define F2PY_INTENT_CACHE 16 +#define F2PY_INTENT_COPY 32 +#define F2PY_INTENT_C 64 +#define F2PY_OPTIONAL 128 +#define F2PY_INTENT_INPLACE 256 +#define F2PY_INTENT_ALIGNED4 512 +#define F2PY_INTENT_ALIGNED8 1024 +#define F2PY_INTENT_ALIGNED16 2048 + +#define ARRAY_ISALIGNED(ARR, SIZE) ((size_t)(PyArray_DATA(ARR)) % (SIZE) == 0) +#define F2PY_ALIGN4(intent) (intent & F2PY_INTENT_ALIGNED4) +#define F2PY_ALIGN8(intent) (intent & F2PY_INTENT_ALIGNED8) +#define F2PY_ALIGN16(intent) (intent & F2PY_INTENT_ALIGNED16) + +#define F2PY_GET_ALIGNMENT(intent) \ + (F2PY_ALIGN4(intent) \ + ? 4 \ + : (F2PY_ALIGN8(intent) ? 8 : (F2PY_ALIGN16(intent) ? 16 : 1))) +#define F2PY_CHECK_ALIGNMENT(arr, intent) \ + ARRAY_ISALIGNED(arr, F2PY_GET_ALIGNMENT(intent)) +#define F2PY_ARRAY_IS_CHARACTER_COMPATIBLE(arr) ((PyArray_DESCR(arr)->type_num == NPY_STRING && PyArray_ITEMSIZE(arr) >= 1) \ + || PyArray_DESCR(arr)->type_num == NPY_UINT8) +#define F2PY_IS_UNICODE_ARRAY(arr) (PyArray_DESCR(arr)->type_num == NPY_UNICODE) + +extern PyArrayObject * +ndarray_from_pyobj(const int type_num, const int elsize_, npy_intp *dims, + const int rank, const int intent, PyObject *obj, + const char *errmess); + +extern PyArrayObject * +array_from_pyobj(const int type_num, npy_intp *dims, const int rank, + const int intent, PyObject *obj); +extern int +copy_ND_array(const PyArrayObject *in, PyArrayObject *out); + +#ifdef DEBUG_COPY_ND_ARRAY +extern void +dump_attrs(const PyArrayObject *arr); +#endif + + extern int f2py_describe(PyObject *obj, char *buf); + + /* Utility CPP macros and functions that can be used in signature file + expressions. See signature-file.rst for documentation. + */ + +#define f2py_itemsize(var) (PyArray_ITEMSIZE(capi_ ## var ## _as_array)) +#define f2py_size(var, ...) f2py_size_impl((PyArrayObject *)(capi_ ## var ## _as_array), ## __VA_ARGS__, -1) +#define f2py_rank(var) var ## _Rank +#define f2py_shape(var,dim) var ## _Dims[dim] +#define f2py_len(var) f2py_shape(var,0) +#define f2py_fshape(var,dim) f2py_shape(var,rank(var)-dim-1) +#define f2py_flen(var) f2py_fshape(var,0) +#define f2py_slen(var) capi_ ## var ## _len + + extern npy_intp f2py_size_impl(PyArrayObject* var, ...); + +#ifdef __cplusplus +} +#endif +#endif /* !Py_FORTRANOBJECT_H */ diff --git a/local_packages/numpy/f2py/symbolic.py b/local_packages/numpy/f2py/symbolic.py new file mode 100644 index 0000000..c768b3c --- /dev/null +++ b/local_packages/numpy/f2py/symbolic.py @@ -0,0 +1,1518 @@ +"""Fortran/C symbolic expressions + +References: +- J3/21-007: Draft Fortran 202x. https://j3-fortran.org/doc/year/21/21-007.pdf + +Copyright 1999 -- 2011 Pearu Peterson all rights reserved. +Copyright 2011 -- present NumPy Developers. +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +""" + +# To analyze Fortran expressions to solve dimensions specifications, +# for instances, we implement a minimal symbolic engine for parsing +# expressions into a tree of expression instances. As a first +# instance, we care only about arithmetic expressions involving +# integers and operations like addition (+), subtraction (-), +# multiplication (*), division (Fortran / is Python //, Fortran // is +# concatenate), and exponentiation (**). In addition, .pyf files may +# contain C expressions that support here is implemented as well. +# +# TODO: support logical constants (Op.BOOLEAN) +# TODO: support logical operators (.AND., ...) +# TODO: support defined operators (.MYOP., ...) +# +__all__ = ['Expr'] + + +import re +import warnings +from enum import Enum +from math import gcd + + +class Language(Enum): + """ + Used as Expr.tostring language argument. + """ + Python = 0 + Fortran = 1 + C = 2 + + +class Op(Enum): + """ + Used as Expr op attribute. + """ + INTEGER = 10 + REAL = 12 + COMPLEX = 15 + STRING = 20 + ARRAY = 30 + SYMBOL = 40 + TERNARY = 100 + APPLY = 200 + INDEXING = 210 + CONCAT = 220 + RELATIONAL = 300 + TERMS = 1000 + FACTORS = 2000 + REF = 3000 + DEREF = 3001 + + +class RelOp(Enum): + """ + Used in Op.RELATIONAL expression to specify the function part. + """ + EQ = 1 + NE = 2 + LT = 3 + LE = 4 + GT = 5 + GE = 6 + + @classmethod + def fromstring(cls, s, language=Language.C): + if language is Language.Fortran: + return {'.eq.': RelOp.EQ, '.ne.': RelOp.NE, + '.lt.': RelOp.LT, '.le.': RelOp.LE, + '.gt.': RelOp.GT, '.ge.': RelOp.GE}[s.lower()] + return {'==': RelOp.EQ, '!=': RelOp.NE, '<': RelOp.LT, + '<=': RelOp.LE, '>': RelOp.GT, '>=': RelOp.GE}[s] + + def tostring(self, language=Language.C): + if language is Language.Fortran: + return {RelOp.EQ: '.eq.', RelOp.NE: '.ne.', + RelOp.LT: '.lt.', RelOp.LE: '.le.', + RelOp.GT: '.gt.', RelOp.GE: '.ge.'}[self] + return {RelOp.EQ: '==', RelOp.NE: '!=', + RelOp.LT: '<', RelOp.LE: '<=', + RelOp.GT: '>', RelOp.GE: '>='}[self] + + +class ArithOp(Enum): + """ + Used in Op.APPLY expression to specify the function part. + """ + POS = 1 + NEG = 2 + ADD = 3 + SUB = 4 + MUL = 5 + DIV = 6 + POW = 7 + + +class OpError(Exception): + pass + + +class Precedence(Enum): + """ + Used as Expr.tostring precedence argument. + """ + ATOM = 0 + POWER = 1 + UNARY = 2 + PRODUCT = 3 + SUM = 4 + LT = 6 + EQ = 7 + LAND = 11 + LOR = 12 + TERNARY = 13 + ASSIGN = 14 + TUPLE = 15 + NONE = 100 + + +integer_types = (int,) +number_types = (int, float) + + +def _pairs_add(d, k, v): + # Internal utility method for updating terms and factors data. + c = d.get(k) + if c is None: + d[k] = v + else: + c = c + v + if c: + d[k] = c + else: + del d[k] + + +class ExprWarning(UserWarning): + pass + + +def ewarn(message): + warnings.warn(message, ExprWarning, stacklevel=2) + + +class Expr: + """Represents a Fortran expression as an op-data pair. + + Expr instances are hashable and sortable. + """ + + @staticmethod + def parse(s, language=Language.C): + """Parse a Fortran expression to an Expr. + """ + return fromstring(s, language=language) + + def __init__(self, op, data): + assert isinstance(op, Op) + + # sanity checks + if op is Op.INTEGER: + # data is a 2-tuple of numeric object and a kind value + # (default is 4) + assert isinstance(data, tuple) and len(data) == 2 + assert isinstance(data[0], int) + assert isinstance(data[1], (int, str)), data + elif op is Op.REAL: + # data is a 2-tuple of numeric object and a kind value + # (default is 4) + assert isinstance(data, tuple) and len(data) == 2 + assert isinstance(data[0], float) + assert isinstance(data[1], (int, str)), data + elif op is Op.COMPLEX: + # data is a 2-tuple of constant expressions + assert isinstance(data, tuple) and len(data) == 2 + elif op is Op.STRING: + # data is a 2-tuple of quoted string and a kind value + # (default is 1) + assert isinstance(data, tuple) and len(data) == 2 + assert (isinstance(data[0], str) + and data[0][::len(data[0]) - 1] in ('""', "''", '@@')) + assert isinstance(data[1], (int, str)), data + elif op is Op.SYMBOL: + # data is any hashable object + assert hash(data) is not None + elif op in (Op.ARRAY, Op.CONCAT): + # data is a tuple of expressions + assert isinstance(data, tuple) + assert all(isinstance(item, Expr) for item in data), data + elif op in (Op.TERMS, Op.FACTORS): + # data is {:} where dict values + # are nonzero Python integers + assert isinstance(data, dict) + elif op is Op.APPLY: + # data is (, , ) where + # operands are Expr instances + assert isinstance(data, tuple) and len(data) == 3 + # function is any hashable object + assert hash(data[0]) is not None + assert isinstance(data[1], tuple) + assert isinstance(data[2], dict) + elif op is Op.INDEXING: + # data is (, ) + assert isinstance(data, tuple) and len(data) == 2 + # function is any hashable object + assert hash(data[0]) is not None + elif op is Op.TERNARY: + # data is (, , ) + assert isinstance(data, tuple) and len(data) == 3 + elif op in (Op.REF, Op.DEREF): + # data is Expr instance + assert isinstance(data, Expr) + elif op is Op.RELATIONAL: + # data is (, , ) + assert isinstance(data, tuple) and len(data) == 3 + else: + raise NotImplementedError( + f'unknown op or missing sanity check: {op}') + + self.op = op + self.data = data + + def __eq__(self, other): + return (isinstance(other, Expr) + and self.op is other.op + and self.data == other.data) + + def __hash__(self): + if self.op in (Op.TERMS, Op.FACTORS): + data = tuple(sorted(self.data.items())) + elif self.op is Op.APPLY: + data = self.data[:2] + tuple(sorted(self.data[2].items())) + else: + data = self.data + return hash((self.op, data)) + + def __lt__(self, other): + if isinstance(other, Expr): + if self.op is not other.op: + return self.op.value < other.op.value + if self.op in (Op.TERMS, Op.FACTORS): + return (tuple(sorted(self.data.items())) + < tuple(sorted(other.data.items()))) + if self.op is Op.APPLY: + if self.data[:2] != other.data[:2]: + return self.data[:2] < other.data[:2] + return tuple(sorted(self.data[2].items())) < tuple( + sorted(other.data[2].items())) + return self.data < other.data + return NotImplemented + + def __le__(self, other): return self == other or self < other + + def __gt__(self, other): return not (self <= other) + + def __ge__(self, other): return not (self < other) + + def __repr__(self): + return f'{type(self).__name__}({self.op}, {self.data!r})' + + def __str__(self): + return self.tostring() + + def tostring(self, parent_precedence=Precedence.NONE, + language=Language.Fortran): + """Return a string representation of Expr. + """ + if self.op in (Op.INTEGER, Op.REAL): + precedence = (Precedence.SUM if self.data[0] < 0 + else Precedence.ATOM) + r = str(self.data[0]) + (f'_{self.data[1]}' + if self.data[1] != 4 else '') + elif self.op is Op.COMPLEX: + r = ', '.join(item.tostring(Precedence.TUPLE, language=language) + for item in self.data) + r = '(' + r + ')' + precedence = Precedence.ATOM + elif self.op is Op.SYMBOL: + precedence = Precedence.ATOM + r = str(self.data) + elif self.op is Op.STRING: + r = self.data[0] + if self.data[1] != 1: + r = self.data[1] + '_' + r + precedence = Precedence.ATOM + elif self.op is Op.ARRAY: + r = ', '.join(item.tostring(Precedence.TUPLE, language=language) + for item in self.data) + r = '[' + r + ']' + precedence = Precedence.ATOM + elif self.op is Op.TERMS: + terms = [] + for term, coeff in sorted(self.data.items()): + if coeff < 0: + op = ' - ' + coeff = -coeff + else: + op = ' + ' + if coeff == 1: + term = term.tostring(Precedence.SUM, language=language) + elif term == as_number(1): + term = str(coeff) + else: + term = f'{coeff} * ' + term.tostring( + Precedence.PRODUCT, language=language) + if terms: + terms.append(op) + elif op == ' - ': + terms.append('-') + terms.append(term) + r = ''.join(terms) or '0' + precedence = Precedence.SUM if terms else Precedence.ATOM + elif self.op is Op.FACTORS: + factors = [] + tail = [] + for base, exp in sorted(self.data.items()): + op = ' * ' + if exp == 1: + factor = base.tostring(Precedence.PRODUCT, + language=language) + elif language is Language.C: + if exp in range(2, 10): + factor = base.tostring(Precedence.PRODUCT, + language=language) + factor = ' * '.join([factor] * exp) + elif exp in range(-10, 0): + factor = base.tostring(Precedence.PRODUCT, + language=language) + tail += [factor] * -exp + continue + else: + factor = base.tostring(Precedence.TUPLE, + language=language) + factor = f'pow({factor}, {exp})' + else: + factor = base.tostring(Precedence.POWER, + language=language) + f' ** {exp}' + if factors: + factors.append(op) + factors.append(factor) + if tail: + if not factors: + factors += ['1'] + factors += ['/', '(', ' * '.join(tail), ')'] + r = ''.join(factors) or '1' + precedence = Precedence.PRODUCT if factors else Precedence.ATOM + elif self.op is Op.APPLY: + name, args, kwargs = self.data + if name is ArithOp.DIV and language is Language.C: + numer, denom = [arg.tostring(Precedence.PRODUCT, + language=language) + for arg in args] + r = f'{numer} / {denom}' + precedence = Precedence.PRODUCT + else: + args = [arg.tostring(Precedence.TUPLE, language=language) + for arg in args] + args += [k + '=' + v.tostring(Precedence.NONE) + for k, v in kwargs.items()] + r = f'{name}({", ".join(args)})' + precedence = Precedence.ATOM + elif self.op is Op.INDEXING: + name = self.data[0] + args = [arg.tostring(Precedence.TUPLE, language=language) + for arg in self.data[1:]] + r = f'{name}[{", ".join(args)}]' + precedence = Precedence.ATOM + elif self.op is Op.CONCAT: + args = [arg.tostring(Precedence.PRODUCT, language=language) + for arg in self.data] + r = " // ".join(args) + precedence = Precedence.PRODUCT + elif self.op is Op.TERNARY: + cond, expr1, expr2 = [a.tostring(Precedence.TUPLE, + language=language) + for a in self.data] + if language is Language.C: + r = f'({cond}?{expr1}:{expr2})' + elif language is Language.Python: + r = f'({expr1} if {cond} else {expr2})' + elif language is Language.Fortran: + r = f'merge({expr1}, {expr2}, {cond})' + else: + raise NotImplementedError( + f'tostring for {self.op} and {language}') + precedence = Precedence.ATOM + elif self.op is Op.REF: + r = '&' + self.data.tostring(Precedence.UNARY, language=language) + precedence = Precedence.UNARY + elif self.op is Op.DEREF: + r = '*' + self.data.tostring(Precedence.UNARY, language=language) + precedence = Precedence.UNARY + elif self.op is Op.RELATIONAL: + rop, left, right = self.data + precedence = (Precedence.EQ if rop in (RelOp.EQ, RelOp.NE) + else Precedence.LT) + left = left.tostring(precedence, language=language) + right = right.tostring(precedence, language=language) + rop = rop.tostring(language=language) + r = f'{left} {rop} {right}' + else: + raise NotImplementedError(f'tostring for op {self.op}') + if parent_precedence.value < precedence.value: + # If parent precedence is higher than operand precedence, + # operand will be enclosed in parenthesis. + return '(' + r + ')' + return r + + def __pos__(self): + return self + + def __neg__(self): + return self * -1 + + def __add__(self, other): + other = as_expr(other) + if isinstance(other, Expr): + if self.op is other.op: + if self.op in (Op.INTEGER, Op.REAL): + return as_number( + self.data[0] + other.data[0], + max(self.data[1], other.data[1])) + if self.op is Op.COMPLEX: + r1, i1 = self.data + r2, i2 = other.data + return as_complex(r1 + r2, i1 + i2) + if self.op is Op.TERMS: + r = Expr(self.op, dict(self.data)) + for k, v in other.data.items(): + _pairs_add(r.data, k, v) + return normalize(r) + if self.op is Op.COMPLEX and other.op in (Op.INTEGER, Op.REAL): + return self + as_complex(other) + elif self.op in (Op.INTEGER, Op.REAL) and other.op is Op.COMPLEX: + return as_complex(self) + other + elif self.op is Op.REAL and other.op is Op.INTEGER: + return self + as_real(other, kind=self.data[1]) + elif self.op is Op.INTEGER and other.op is Op.REAL: + return as_real(self, kind=other.data[1]) + other + return as_terms(self) + as_terms(other) + return NotImplemented + + def __radd__(self, other): + if isinstance(other, number_types): + return as_number(other) + self + return NotImplemented + + def __sub__(self, other): + return self + (-other) + + def __rsub__(self, other): + if isinstance(other, number_types): + return as_number(other) - self + return NotImplemented + + def __mul__(self, other): + other = as_expr(other) + if isinstance(other, Expr): + if self.op is other.op: + if self.op in (Op.INTEGER, Op.REAL): + return as_number(self.data[0] * other.data[0], + max(self.data[1], other.data[1])) + elif self.op is Op.COMPLEX: + r1, i1 = self.data + r2, i2 = other.data + return as_complex(r1 * r2 - i1 * i2, r1 * i2 + r2 * i1) + + if self.op is Op.FACTORS: + r = Expr(self.op, dict(self.data)) + for k, v in other.data.items(): + _pairs_add(r.data, k, v) + return normalize(r) + elif self.op is Op.TERMS: + r = Expr(self.op, {}) + for t1, c1 in self.data.items(): + for t2, c2 in other.data.items(): + _pairs_add(r.data, t1 * t2, c1 * c2) + return normalize(r) + + if self.op is Op.COMPLEX and other.op in (Op.INTEGER, Op.REAL): + return self * as_complex(other) + elif other.op is Op.COMPLEX and self.op in (Op.INTEGER, Op.REAL): + return as_complex(self) * other + elif self.op is Op.REAL and other.op is Op.INTEGER: + return self * as_real(other, kind=self.data[1]) + elif self.op is Op.INTEGER and other.op is Op.REAL: + return as_real(self, kind=other.data[1]) * other + + if self.op is Op.TERMS: + return self * as_terms(other) + elif other.op is Op.TERMS: + return as_terms(self) * other + + return as_factors(self) * as_factors(other) + return NotImplemented + + def __rmul__(self, other): + if isinstance(other, number_types): + return as_number(other) * self + return NotImplemented + + def __pow__(self, other): + other = as_expr(other) + if isinstance(other, Expr): + if other.op is Op.INTEGER: + exponent = other.data[0] + # TODO: other kind not used + if exponent == 0: + return as_number(1) + if exponent == 1: + return self + if exponent > 0: + if self.op is Op.FACTORS: + r = Expr(self.op, {}) + for k, v in self.data.items(): + r.data[k] = v * exponent + return normalize(r) + return self * (self ** (exponent - 1)) + elif exponent != -1: + return (self ** (-exponent)) ** -1 + return Expr(Op.FACTORS, {self: exponent}) + return as_apply(ArithOp.POW, self, other) + return NotImplemented + + def __truediv__(self, other): + other = as_expr(other) + if isinstance(other, Expr): + # Fortran / is different from Python /: + # - `/` is a truncate operation for integer operands + return normalize(as_apply(ArithOp.DIV, self, other)) + return NotImplemented + + def __rtruediv__(self, other): + other = as_expr(other) + if isinstance(other, Expr): + return other / self + return NotImplemented + + def __floordiv__(self, other): + other = as_expr(other) + if isinstance(other, Expr): + # Fortran // is different from Python //: + # - `//` is a concatenate operation for string operands + return normalize(Expr(Op.CONCAT, (self, other))) + return NotImplemented + + def __rfloordiv__(self, other): + other = as_expr(other) + if isinstance(other, Expr): + return other // self + return NotImplemented + + def __call__(self, *args, **kwargs): + # In Fortran, parenthesis () are use for both function call as + # well as indexing operations. + # + # TODO: implement a method for deciding when __call__ should + # return an INDEXING expression. + return as_apply(self, *map(as_expr, args), + **{k: as_expr(v) for k, v in kwargs.items()}) + + def __getitem__(self, index): + # Provided to support C indexing operations that .pyf files + # may contain. + index = as_expr(index) + if not isinstance(index, tuple): + index = index, + if len(index) > 1: + ewarn(f'C-index should be a single expression but got `{index}`') + return Expr(Op.INDEXING, (self,) + index) + + def substitute(self, symbols_map): + """Recursively substitute symbols with values in symbols map. + + Symbols map is a dictionary of symbol-expression pairs. + """ + if self.op is Op.SYMBOL: + value = symbols_map.get(self) + if value is None: + return self + m = re.match(r'\A(@__f2py_PARENTHESIS_(\w+)_\d+@)\Z', self.data) + if m: + # complement to fromstring method + items, paren = m.groups() + if paren in ['ROUNDDIV', 'SQUARE']: + return as_array(value) + assert paren == 'ROUND', (paren, value) + return value + if self.op in (Op.INTEGER, Op.REAL, Op.STRING): + return self + if self.op in (Op.ARRAY, Op.COMPLEX): + return Expr(self.op, tuple(item.substitute(symbols_map) + for item in self.data)) + if self.op is Op.CONCAT: + return normalize(Expr(self.op, tuple(item.substitute(symbols_map) + for item in self.data))) + if self.op is Op.TERMS: + r = None + for term, coeff in self.data.items(): + if r is None: + r = term.substitute(symbols_map) * coeff + else: + r += term.substitute(symbols_map) * coeff + if r is None: + ewarn('substitute: empty TERMS expression interpreted as' + ' int-literal 0') + return as_number(0) + return r + if self.op is Op.FACTORS: + r = None + for base, exponent in self.data.items(): + if r is None: + r = base.substitute(symbols_map) ** exponent + else: + r *= base.substitute(symbols_map) ** exponent + if r is None: + ewarn('substitute: empty FACTORS expression interpreted' + ' as int-literal 1') + return as_number(1) + return r + if self.op is Op.APPLY: + target, args, kwargs = self.data + if isinstance(target, Expr): + target = target.substitute(symbols_map) + args = tuple(a.substitute(symbols_map) for a in args) + kwargs = {k: v.substitute(symbols_map) + for k, v in kwargs.items()} + return normalize(Expr(self.op, (target, args, kwargs))) + if self.op is Op.INDEXING: + func = self.data[0] + if isinstance(func, Expr): + func = func.substitute(symbols_map) + args = tuple(a.substitute(symbols_map) for a in self.data[1:]) + return normalize(Expr(self.op, (func,) + args)) + if self.op is Op.TERNARY: + operands = tuple(a.substitute(symbols_map) for a in self.data) + return normalize(Expr(self.op, operands)) + if self.op in (Op.REF, Op.DEREF): + return normalize(Expr(self.op, self.data.substitute(symbols_map))) + if self.op is Op.RELATIONAL: + rop, left, right = self.data + left = left.substitute(symbols_map) + right = right.substitute(symbols_map) + return normalize(Expr(self.op, (rop, left, right))) + raise NotImplementedError(f'substitute method for {self.op}: {self!r}') + + def traverse(self, visit, *args, **kwargs): + """Traverse expression tree with visit function. + + The visit function is applied to an expression with given args + and kwargs. + + Traverse call returns an expression returned by visit when not + None, otherwise return a new normalized expression with + traverse-visit sub-expressions. + """ + result = visit(self, *args, **kwargs) + if result is not None: + return result + + if self.op in (Op.INTEGER, Op.REAL, Op.STRING, Op.SYMBOL): + return self + elif self.op in (Op.COMPLEX, Op.ARRAY, Op.CONCAT, Op.TERNARY): + return normalize(Expr(self.op, tuple( + item.traverse(visit, *args, **kwargs) + for item in self.data))) + elif self.op in (Op.TERMS, Op.FACTORS): + data = {} + for k, v in self.data.items(): + k = k.traverse(visit, *args, **kwargs) + v = (v.traverse(visit, *args, **kwargs) + if isinstance(v, Expr) else v) + if k in data: + v = data[k] + v + data[k] = v + return normalize(Expr(self.op, data)) + elif self.op is Op.APPLY: + obj = self.data[0] + func = (obj.traverse(visit, *args, **kwargs) + if isinstance(obj, Expr) else obj) + operands = tuple(operand.traverse(visit, *args, **kwargs) + for operand in self.data[1]) + kwoperands = {k: v.traverse(visit, *args, **kwargs) + for k, v in self.data[2].items()} + return normalize(Expr(self.op, (func, operands, kwoperands))) + elif self.op is Op.INDEXING: + obj = self.data[0] + obj = (obj.traverse(visit, *args, **kwargs) + if isinstance(obj, Expr) else obj) + indices = tuple(index.traverse(visit, *args, **kwargs) + for index in self.data[1:]) + return normalize(Expr(self.op, (obj,) + indices)) + elif self.op in (Op.REF, Op.DEREF): + return normalize(Expr(self.op, + self.data.traverse(visit, *args, **kwargs))) + elif self.op is Op.RELATIONAL: + rop, left, right = self.data + left = left.traverse(visit, *args, **kwargs) + right = right.traverse(visit, *args, **kwargs) + return normalize(Expr(self.op, (rop, left, right))) + raise NotImplementedError(f'traverse method for {self.op}') + + def contains(self, other): + """Check if self contains other. + """ + found = [] + + def visit(expr, found=found): + if found: + return expr + elif expr == other: + found.append(1) + return expr + + self.traverse(visit) + + return len(found) != 0 + + def symbols(self): + """Return a set of symbols contained in self. + """ + found = set() + + def visit(expr, found=found): + if expr.op is Op.SYMBOL: + found.add(expr) + + self.traverse(visit) + + return found + + def polynomial_atoms(self): + """Return a set of expressions used as atoms in polynomial self. + """ + found = set() + + def visit(expr, found=found): + if expr.op is Op.FACTORS: + for b in expr.data: + b.traverse(visit) + return expr + if expr.op in (Op.TERMS, Op.COMPLEX): + return + if expr.op is Op.APPLY and isinstance(expr.data[0], ArithOp): + if expr.data[0] is ArithOp.POW: + expr.data[1][0].traverse(visit) + return expr + return + if expr.op in (Op.INTEGER, Op.REAL): + return expr + + found.add(expr) + + if expr.op in (Op.INDEXING, Op.APPLY): + return expr + + self.traverse(visit) + + return found + + def linear_solve(self, symbol): + """Return a, b such that a * symbol + b == self. + + If self is not linear with respect to symbol, raise RuntimeError. + """ + b = self.substitute({symbol: as_number(0)}) + ax = self - b + a = ax.substitute({symbol: as_number(1)}) + + zero, _ = as_numer_denom(a * symbol - ax) + + if zero != as_number(0): + raise RuntimeError(f'not a {symbol}-linear equation:' + f' {a} * {symbol} + {b} == {self}') + return a, b + + +def normalize(obj): + """Normalize Expr and apply basic evaluation methods. + """ + if not isinstance(obj, Expr): + return obj + + if obj.op is Op.TERMS: + d = {} + for t, c in obj.data.items(): + if c == 0: + continue + if t.op is Op.COMPLEX and c != 1: + t = t * c + c = 1 + if t.op is Op.TERMS: + for t1, c1 in t.data.items(): + _pairs_add(d, t1, c1 * c) + else: + _pairs_add(d, t, c) + if len(d) == 0: + # TODO: determine correct kind + return as_number(0) + elif len(d) == 1: + (t, c), = d.items() + if c == 1: + return t + return Expr(Op.TERMS, d) + + if obj.op is Op.FACTORS: + coeff = 1 + d = {} + for b, e in obj.data.items(): + if e == 0: + continue + if b.op is Op.TERMS and isinstance(e, integer_types) and e > 1: + # expand integer powers of sums + b = b * (b ** (e - 1)) + e = 1 + + if b.op in (Op.INTEGER, Op.REAL): + if e == 1: + coeff *= b.data[0] + elif e > 0: + coeff *= b.data[0] ** e + else: + _pairs_add(d, b, e) + elif b.op is Op.FACTORS: + if e > 0 and isinstance(e, integer_types): + for b1, e1 in b.data.items(): + _pairs_add(d, b1, e1 * e) + else: + _pairs_add(d, b, e) + else: + _pairs_add(d, b, e) + if len(d) == 0 or coeff == 0: + # TODO: determine correct kind + assert isinstance(coeff, number_types) + return as_number(coeff) + elif len(d) == 1: + (b, e), = d.items() + if e == 1: + t = b + else: + t = Expr(Op.FACTORS, d) + if coeff == 1: + return t + return Expr(Op.TERMS, {t: coeff}) + elif coeff == 1: + return Expr(Op.FACTORS, d) + else: + return Expr(Op.TERMS, {Expr(Op.FACTORS, d): coeff}) + + if obj.op is Op.APPLY and obj.data[0] is ArithOp.DIV: + dividend, divisor = obj.data[1] + t1, c1 = as_term_coeff(dividend) + t2, c2 = as_term_coeff(divisor) + if isinstance(c1, integer_types) and isinstance(c2, integer_types): + g = gcd(c1, c2) + c1, c2 = c1 // g, c2 // g + else: + c1, c2 = c1 / c2, 1 + + if t1.op is Op.APPLY and t1.data[0] is ArithOp.DIV: + numer = t1.data[1][0] * c1 + denom = t1.data[1][1] * t2 * c2 + return as_apply(ArithOp.DIV, numer, denom) + + if t2.op is Op.APPLY and t2.data[0] is ArithOp.DIV: + numer = t2.data[1][1] * t1 * c1 + denom = t2.data[1][0] * c2 + return as_apply(ArithOp.DIV, numer, denom) + + d = dict(as_factors(t1).data) + for b, e in as_factors(t2).data.items(): + _pairs_add(d, b, -e) + numer, denom = {}, {} + for b, e in d.items(): + if e > 0: + numer[b] = e + else: + denom[b] = -e + numer = normalize(Expr(Op.FACTORS, numer)) * c1 + denom = normalize(Expr(Op.FACTORS, denom)) * c2 + + if denom.op in (Op.INTEGER, Op.REAL) and denom.data[0] == 1: + # TODO: denom kind not used + return numer + return as_apply(ArithOp.DIV, numer, denom) + + if obj.op is Op.CONCAT: + lst = [obj.data[0]] + for s in obj.data[1:]: + last = lst[-1] + if ( + last.op is Op.STRING + and s.op is Op.STRING + and last.data[0][0] in '"\'' + and s.data[0][0] == last.data[0][-1] + ): + new_last = as_string(last.data[0][:-1] + s.data[0][1:], + max(last.data[1], s.data[1])) + lst[-1] = new_last + else: + lst.append(s) + if len(lst) == 1: + return lst[0] + return Expr(Op.CONCAT, tuple(lst)) + + if obj.op is Op.TERNARY: + cond, expr1, expr2 = map(normalize, obj.data) + if cond.op is Op.INTEGER: + return expr1 if cond.data[0] else expr2 + return Expr(Op.TERNARY, (cond, expr1, expr2)) + + return obj + + +def as_expr(obj): + """Convert non-Expr objects to Expr objects. + """ + if isinstance(obj, complex): + return as_complex(obj.real, obj.imag) + if isinstance(obj, number_types): + return as_number(obj) + if isinstance(obj, str): + # STRING expression holds string with boundary quotes, hence + # applying repr: + return as_string(repr(obj)) + if isinstance(obj, tuple): + return tuple(map(as_expr, obj)) + return obj + + +def as_symbol(obj): + """Return object as SYMBOL expression (variable or unparsed expression). + """ + return Expr(Op.SYMBOL, obj) + + +def as_number(obj, kind=4): + """Return object as INTEGER or REAL constant. + """ + if isinstance(obj, int): + return Expr(Op.INTEGER, (obj, kind)) + if isinstance(obj, float): + return Expr(Op.REAL, (obj, kind)) + if isinstance(obj, Expr): + if obj.op in (Op.INTEGER, Op.REAL): + return obj + raise OpError(f'cannot convert {obj} to INTEGER or REAL constant') + + +def as_integer(obj, kind=4): + """Return object as INTEGER constant. + """ + if isinstance(obj, int): + return Expr(Op.INTEGER, (obj, kind)) + if isinstance(obj, Expr): + if obj.op is Op.INTEGER: + return obj + raise OpError(f'cannot convert {obj} to INTEGER constant') + + +def as_real(obj, kind=4): + """Return object as REAL constant. + """ + if isinstance(obj, int): + return Expr(Op.REAL, (float(obj), kind)) + if isinstance(obj, float): + return Expr(Op.REAL, (obj, kind)) + if isinstance(obj, Expr): + if obj.op is Op.REAL: + return obj + elif obj.op is Op.INTEGER: + return Expr(Op.REAL, (float(obj.data[0]), kind)) + raise OpError(f'cannot convert {obj} to REAL constant') + + +def as_string(obj, kind=1): + """Return object as STRING expression (string literal constant). + """ + return Expr(Op.STRING, (obj, kind)) + + +def as_array(obj): + """Return object as ARRAY expression (array constant). + """ + if isinstance(obj, Expr): + obj = obj, + return Expr(Op.ARRAY, obj) + + +def as_complex(real, imag=0): + """Return object as COMPLEX expression (complex literal constant). + """ + return Expr(Op.COMPLEX, (as_expr(real), as_expr(imag))) + + +def as_apply(func, *args, **kwargs): + """Return object as APPLY expression (function call, constructor, etc.) + """ + return Expr(Op.APPLY, + (func, tuple(map(as_expr, args)), + {k: as_expr(v) for k, v in kwargs.items()})) + + +def as_ternary(cond, expr1, expr2): + """Return object as TERNARY expression (cond?expr1:expr2). + """ + return Expr(Op.TERNARY, (cond, expr1, expr2)) + + +def as_ref(expr): + """Return object as referencing expression. + """ + return Expr(Op.REF, expr) + + +def as_deref(expr): + """Return object as dereferencing expression. + """ + return Expr(Op.DEREF, expr) + + +def as_eq(left, right): + return Expr(Op.RELATIONAL, (RelOp.EQ, left, right)) + + +def as_ne(left, right): + return Expr(Op.RELATIONAL, (RelOp.NE, left, right)) + + +def as_lt(left, right): + return Expr(Op.RELATIONAL, (RelOp.LT, left, right)) + + +def as_le(left, right): + return Expr(Op.RELATIONAL, (RelOp.LE, left, right)) + + +def as_gt(left, right): + return Expr(Op.RELATIONAL, (RelOp.GT, left, right)) + + +def as_ge(left, right): + return Expr(Op.RELATIONAL, (RelOp.GE, left, right)) + + +def as_terms(obj): + """Return expression as TERMS expression. + """ + if isinstance(obj, Expr): + obj = normalize(obj) + if obj.op is Op.TERMS: + return obj + if obj.op is Op.INTEGER: + return Expr(Op.TERMS, {as_integer(1, obj.data[1]): obj.data[0]}) + if obj.op is Op.REAL: + return Expr(Op.TERMS, {as_real(1, obj.data[1]): obj.data[0]}) + return Expr(Op.TERMS, {obj: 1}) + raise OpError(f'cannot convert {type(obj)} to terms Expr') + + +def as_factors(obj): + """Return expression as FACTORS expression. + """ + if isinstance(obj, Expr): + obj = normalize(obj) + if obj.op is Op.FACTORS: + return obj + if obj.op is Op.TERMS: + if len(obj.data) == 1: + (term, coeff), = obj.data.items() + if coeff == 1: + return Expr(Op.FACTORS, {term: 1}) + return Expr(Op.FACTORS, {term: 1, Expr.number(coeff): 1}) + if (obj.op is Op.APPLY + and obj.data[0] is ArithOp.DIV + and not obj.data[2]): + return Expr(Op.FACTORS, {obj.data[1][0]: 1, obj.data[1][1]: -1}) + return Expr(Op.FACTORS, {obj: 1}) + raise OpError(f'cannot convert {type(obj)} to terms Expr') + + +def as_term_coeff(obj): + """Return expression as term-coefficient pair. + """ + if isinstance(obj, Expr): + obj = normalize(obj) + if obj.op is Op.INTEGER: + return as_integer(1, obj.data[1]), obj.data[0] + if obj.op is Op.REAL: + return as_real(1, obj.data[1]), obj.data[0] + if obj.op is Op.TERMS: + if len(obj.data) == 1: + (term, coeff), = obj.data.items() + return term, coeff + # TODO: find common divisor of coefficients + if obj.op is Op.APPLY and obj.data[0] is ArithOp.DIV: + t, c = as_term_coeff(obj.data[1][0]) + return as_apply(ArithOp.DIV, t, obj.data[1][1]), c + return obj, 1 + raise OpError(f'cannot convert {type(obj)} to term and coeff') + + +def as_numer_denom(obj): + """Return expression as numer-denom pair. + """ + if isinstance(obj, Expr): + obj = normalize(obj) + if obj.op in (Op.INTEGER, Op.REAL, Op.COMPLEX, Op.SYMBOL, + Op.INDEXING, Op.TERNARY): + return obj, as_number(1) + elif obj.op is Op.APPLY: + if obj.data[0] is ArithOp.DIV and not obj.data[2]: + numers, denoms = map(as_numer_denom, obj.data[1]) + return numers[0] * denoms[1], numers[1] * denoms[0] + return obj, as_number(1) + elif obj.op is Op.TERMS: + numers, denoms = [], [] + for term, coeff in obj.data.items(): + n, d = as_numer_denom(term) + n = n * coeff + numers.append(n) + denoms.append(d) + numer, denom = as_number(0), as_number(1) + for i in range(len(numers)): + n = numers[i] + for j in range(len(numers)): + if i != j: + n *= denoms[j] + numer += n + denom *= denoms[i] + if denom.op in (Op.INTEGER, Op.REAL) and denom.data[0] < 0: + numer, denom = -numer, -denom + return numer, denom + elif obj.op is Op.FACTORS: + numer, denom = as_number(1), as_number(1) + for b, e in obj.data.items(): + bnumer, bdenom = as_numer_denom(b) + if e > 0: + numer *= bnumer ** e + denom *= bdenom ** e + elif e < 0: + numer *= bdenom ** (-e) + denom *= bnumer ** (-e) + return numer, denom + raise OpError(f'cannot convert {type(obj)} to numer and denom') + + +def _counter(): + # Used internally to generate unique dummy symbols + counter = 0 + while True: + counter += 1 + yield counter + + +COUNTER = _counter() + + +def eliminate_quotes(s): + """Replace quoted substrings of input string. + + Return a new string and a mapping of replacements. + """ + d = {} + + def repl(m): + kind, value = m.groups()[:2] + if kind: + # remove trailing underscore + kind = kind[:-1] + p = {"'": "SINGLE", '"': "DOUBLE"}[value[0]] + k = f'{kind}@__f2py_QUOTES_{p}_{COUNTER.__next__()}@' + d[k] = value + return k + + new_s = re.sub(r'({kind}_|)({single_quoted}|{double_quoted})'.format( + kind=r'\w[\w\d_]*', + single_quoted=r"('([^'\\]|(\\.))*')", + double_quoted=r'("([^"\\]|(\\.))*")'), + repl, s) + + assert '"' not in new_s + assert "'" not in new_s + + return new_s, d + + +def insert_quotes(s, d): + """Inverse of eliminate_quotes. + """ + for k, v in d.items(): + kind = k[:k.find('@')] + if kind: + kind += '_' + s = s.replace(k, kind + v) + return s + + +def replace_parenthesis(s): + """Replace substrings of input that are enclosed in parenthesis. + + Return a new string and a mapping of replacements. + """ + # Find a parenthesis pair that appears first. + + # Fortran deliminator are `(`, `)`, `[`, `]`, `(/', '/)`, `/`. + # We don't handle `/` deliminator because it is not a part of an + # expression. + left, right = None, None + mn_i = len(s) + for left_, right_ in (('(/', '/)'), + '()', + '{}', # to support C literal structs + '[]'): + i = s.find(left_) + if i == -1: + continue + if i < mn_i: + mn_i = i + left, right = left_, right_ + + if left is None: + return s, {} + + i = mn_i + j = s.find(right, i) + if j == -1: + raise ValueError(f'Mismatch of {left + right} parenthesis in {s!r}') + + while s.count(left, i + 1, j) != s.count(right, i + 1, j): + j = s.find(right, j + 1) + if j == -1: + raise ValueError(f'Mismatch of {left + right} parenthesis in {s!r}') + + p = {'(': 'ROUND', '[': 'SQUARE', '{': 'CURLY', '(/': 'ROUNDDIV'}[left] + + k = f'@__f2py_PARENTHESIS_{p}_{COUNTER.__next__()}@' + v = s[i + len(left):j] + r, d = replace_parenthesis(s[j + len(right):]) + d[k] = v + return s[:i] + k + r, d + + +def _get_parenthesis_kind(s): + assert s.startswith('@__f2py_PARENTHESIS_'), s + return s.split('_')[4] + + +def unreplace_parenthesis(s, d): + """Inverse of replace_parenthesis. + """ + for k, v in d.items(): + p = _get_parenthesis_kind(k) + left = {'ROUND': '(', 'SQUARE': '[', 'CURLY': '{', 'ROUNDDIV': '(/'}[p] + right = {'ROUND': ')', 'SQUARE': ']', 'CURLY': '}', 'ROUNDDIV': '/)'}[p] + s = s.replace(k, left + v + right) + return s + + +def fromstring(s, language=Language.C): + """Create an expression from a string. + + This is a "lazy" parser, that is, only arithmetic operations are + resolved, non-arithmetic operations are treated as symbols. + """ + r = _FromStringWorker(language=language).parse(s) + if isinstance(r, Expr): + return r + raise ValueError(f'failed to parse `{s}` to Expr instance: got `{r}`') + + +class _Pair: + # Internal class to represent a pair of expressions + + def __init__(self, left, right): + self.left = left + self.right = right + + def substitute(self, symbols_map): + left, right = self.left, self.right + if isinstance(left, Expr): + left = left.substitute(symbols_map) + if isinstance(right, Expr): + right = right.substitute(symbols_map) + return _Pair(left, right) + + def __repr__(self): + return f'{type(self).__name__}({self.left}, {self.right})' + + +class _FromStringWorker: + + def __init__(self, language=Language.C): + self.original = None + self.quotes_map = None + self.language = language + + def finalize_string(self, s): + return insert_quotes(s, self.quotes_map) + + def parse(self, inp): + self.original = inp + unquoted, self.quotes_map = eliminate_quotes(inp) + return self.process(unquoted) + + def process(self, s, context='expr'): + """Parse string within the given context. + + The context may define the result in case of ambiguous + expressions. For instance, consider expressions `f(x, y)` and + `(x, y) + (a, b)` where `f` is a function and pair `(x, y)` + denotes complex number. Specifying context as "args" or + "expr", the subexpression `(x, y)` will be parse to an + argument list or to a complex number, respectively. + """ + if isinstance(s, (list, tuple)): + return type(s)(self.process(s_, context) for s_ in s) + + assert isinstance(s, str), (type(s), s) + + # replace subexpressions in parenthesis with f2py @-names + r, raw_symbols_map = replace_parenthesis(s) + r = r.strip() + + def restore(r): + # restores subexpressions marked with f2py @-names + if isinstance(r, (list, tuple)): + return type(r)(map(restore, r)) + return unreplace_parenthesis(r, raw_symbols_map) + + # comma-separated tuple + if ',' in r: + operands = restore(r.split(',')) + if context == 'args': + return tuple(self.process(operands)) + if context == 'expr': + if len(operands) == 2: + # complex number literal + return as_complex(*self.process(operands)) + raise NotImplementedError( + f'parsing comma-separated list (context={context}): {r}') + + # ternary operation + m = re.match(r'\A([^?]+)[?]([^:]+)[:](.+)\Z', r) + if m: + assert context == 'expr', context + oper, expr1, expr2 = restore(m.groups()) + oper = self.process(oper) + expr1 = self.process(expr1) + expr2 = self.process(expr2) + return as_ternary(oper, expr1, expr2) + + # relational expression + if self.language is Language.Fortran: + m = re.match( + r'\A(.+)\s*[.](eq|ne|lt|le|gt|ge)[.]\s*(.+)\Z', r, re.I) + else: + m = re.match( + r'\A(.+)\s*([=][=]|[!][=]|[<][=]|[<]|[>][=]|[>])\s*(.+)\Z', r) + if m: + left, rop, right = m.groups() + if self.language is Language.Fortran: + rop = '.' + rop + '.' + left, right = self.process(restore((left, right))) + rop = RelOp.fromstring(rop, language=self.language) + return Expr(Op.RELATIONAL, (rop, left, right)) + + # keyword argument + m = re.match(r'\A(\w[\w\d_]*)\s*[=](.*)\Z', r) + if m: + keyname, value = m.groups() + value = restore(value) + return _Pair(keyname, self.process(value)) + + # addition/subtraction operations + operands = re.split(r'((? 1: + result = self.process(restore(operands[0] or '0')) + for op, operand in zip(operands[1::2], operands[2::2]): + operand = self.process(restore(operand)) + op = op.strip() + if op == '+': + result += operand + else: + assert op == '-' + result -= operand + return result + + # string concatenate operation + if self.language is Language.Fortran and '//' in r: + operands = restore(r.split('//')) + return Expr(Op.CONCAT, + tuple(self.process(operands))) + + # multiplication/division operations + operands = re.split(r'(?<=[@\w\d_])\s*([*]|/)', + (r if self.language is Language.C + else r.replace('**', '@__f2py_DOUBLE_STAR@'))) + if len(operands) > 1: + operands = restore(operands) + if self.language is not Language.C: + operands = [operand.replace('@__f2py_DOUBLE_STAR@', '**') + for operand in operands] + # Expression is an arithmetic product + result = self.process(operands[0]) + for op, operand in zip(operands[1::2], operands[2::2]): + operand = self.process(operand) + op = op.strip() + if op == '*': + result *= operand + else: + assert op == '/' + result /= operand + return result + + # referencing/dereferencing + if r.startswith(('*', '&')): + op = {'*': Op.DEREF, '&': Op.REF}[r[0]] + operand = self.process(restore(r[1:])) + return Expr(op, operand) + + # exponentiation operations + if self.language is not Language.C and '**' in r: + operands = list(reversed(restore(r.split('**')))) + result = self.process(operands[0]) + for operand in operands[1:]: + operand = self.process(operand) + result = operand ** result + return result + + # int-literal-constant + m = re.match(r'\A({digit_string})({kind}|)\Z'.format( + digit_string=r'\d+', + kind=r'_(\d+|\w[\w\d_]*)'), r) + if m: + value, _, kind = m.groups() + if kind and kind.isdigit(): + kind = int(kind) + return as_integer(int(value), kind or 4) + + # real-literal-constant + m = re.match(r'\A({significant}({exponent}|)|\d+{exponent})({kind}|)\Z' + .format( + significant=r'[.]\d+|\d+[.]\d*', + exponent=r'[edED][+-]?\d+', + kind=r'_(\d+|\w[\w\d_]*)'), r) + if m: + value, _, _, kind = m.groups() + if kind and kind.isdigit(): + kind = int(kind) + value = value.lower() + if 'd' in value: + return as_real(float(value.replace('d', 'e')), kind or 8) + return as_real(float(value), kind or 4) + + # string-literal-constant with kind parameter specification + if r in self.quotes_map: + kind = r[:r.find('@')] + return as_string(self.quotes_map[r], kind or 1) + + # array constructor or literal complex constant or + # parenthesized expression + if r in raw_symbols_map: + paren = _get_parenthesis_kind(r) + items = self.process(restore(raw_symbols_map[r]), + 'expr' if paren == 'ROUND' else 'args') + if paren == 'ROUND': + if isinstance(items, Expr): + return items + if paren in ['ROUNDDIV', 'SQUARE']: + # Expression is an array constructor + if isinstance(items, Expr): + items = (items,) + return as_array(items) + + # function call/indexing + m = re.match(r'\A(.+)\s*(@__f2py_PARENTHESIS_(ROUND|SQUARE)_\d+@)\Z', + r) + if m: + target, args, paren = m.groups() + target = self.process(restore(target)) + args = self.process(restore(args)[1:-1], 'args') + if not isinstance(args, tuple): + args = args, + if paren == 'ROUND': + kwargs = {a.left: a.right for a in args + if isinstance(a, _Pair)} + args = tuple(a for a in args if not isinstance(a, _Pair)) + # Warning: this could also be Fortran indexing operation.. + return as_apply(target, *args, **kwargs) + else: + # Expression is a C/Python indexing operation + # (e.g. used in .pyf files) + assert paren == 'SQUARE' + return target[args] + + # Fortran standard conforming identifier + m = re.match(r'\A\w[\w\d_]*\Z', r) + if m: + return as_symbol(r) + + # fall-back to symbol + r = self.finalize_string(restore(r)) + ewarn( + f'fromstring: treating {r!r} as symbol (original={self.original})') + return as_symbol(r) diff --git a/local_packages/numpy/f2py/symbolic.pyi b/local_packages/numpy/f2py/symbolic.pyi new file mode 100644 index 0000000..06be2bb --- /dev/null +++ b/local_packages/numpy/f2py/symbolic.pyi @@ -0,0 +1,219 @@ +from collections.abc import Callable, Mapping +from enum import Enum +from typing import Any, Generic, Literal as L, ParamSpec, Self, TypeAlias, overload +from typing_extensions import TypeVar + +__all__ = ["Expr"] + +### + +_Tss = ParamSpec("_Tss") +_ExprT = TypeVar("_ExprT", bound=Expr) +_ExprT1 = TypeVar("_ExprT1", bound=Expr) +_ExprT2 = TypeVar("_ExprT2", bound=Expr) +_OpT_co = TypeVar("_OpT_co", bound=Op, default=Op, covariant=True) +_LanguageT_co = TypeVar("_LanguageT_co", bound=Language, default=Language, covariant=True) +_DataT_co = TypeVar("_DataT_co", default=Any, covariant=True) +_LeftT_co = TypeVar("_LeftT_co", default=Any, covariant=True) +_RightT_co = TypeVar("_RightT_co", default=Any, covariant=True) + +_RelCOrPy: TypeAlias = L["==", "!=", "<", "<=", ">", ">="] +_RelFortran: TypeAlias = L[".eq.", ".ne.", ".lt.", ".le.", ".gt.", ".ge."] + +_ToExpr: TypeAlias = Expr | complex | str +_ToExprN: TypeAlias = _ToExpr | tuple[_ToExprN, ...] +_NestedString: TypeAlias = str | tuple[_NestedString, ...] | list[_NestedString] + +### + +class OpError(Exception): ... +class ExprWarning(UserWarning): ... + +class Language(Enum): + Python = 0 + Fortran = 1 + C = 2 + +class Op(Enum): + INTEGER = 10 + REAL = 12 + COMPLEX = 15 + STRING = 20 + ARRAY = 30 + SYMBOL = 40 + TERNARY = 100 + APPLY = 200 + INDEXING = 210 + CONCAT = 220 + RELATIONAL = 300 + TERMS = 1_000 + FACTORS = 2_000 + REF = 3_000 + DEREF = 3_001 + +class RelOp(Enum): + EQ = 1 + NE = 2 + LT = 3 + LE = 4 + GT = 5 + GE = 6 + + @overload + @classmethod + def fromstring(cls, s: _RelCOrPy, language: L[Language.C, Language.Python] = ...) -> RelOp: ... + @overload + @classmethod + def fromstring(cls, s: _RelFortran, language: L[Language.Fortran]) -> RelOp: ... + + # + @overload + def tostring(self, /, language: L[Language.C, Language.Python] = ...) -> _RelCOrPy: ... + @overload + def tostring(self, /, language: L[Language.Fortran]) -> _RelFortran: ... + +class ArithOp(Enum): + POS = 1 + NEG = 2 + ADD = 3 + SUB = 4 + MUL = 5 + DIV = 6 + POW = 7 + +class Precedence(Enum): + ATOM = 0 + POWER = 1 + UNARY = 2 + PRODUCT = 3 + SUM = 4 + LT = 6 + EQ = 7 + LAND = 11 + LOR = 12 + TERNARY = 13 + ASSIGN = 14 + TUPLE = 15 + NONE = 100 + +class Expr(Generic[_OpT_co, _DataT_co]): + op: _OpT_co + data: _DataT_co + + @staticmethod + def parse(s: str, language: Language = ...) -> Expr: ... + + # + def __init__(self, /, op: Op, data: _DataT_co) -> None: ... + + # + def __lt__(self, other: Expr, /) -> bool: ... + def __le__(self, other: Expr, /) -> bool: ... + def __gt__(self, other: Expr, /) -> bool: ... + def __ge__(self, other: Expr, /) -> bool: ... + + # + def __pos__(self, /) -> Self: ... + def __neg__(self, /) -> Expr: ... + + # + def __add__(self, other: Expr, /) -> Expr: ... + def __radd__(self, other: Expr, /) -> Expr: ... + + # + def __sub__(self, other: Expr, /) -> Expr: ... + def __rsub__(self, other: Expr, /) -> Expr: ... + + # + def __mul__(self, other: Expr, /) -> Expr: ... + def __rmul__(self, other: Expr, /) -> Expr: ... + + # + def __pow__(self, other: Expr, /) -> Expr: ... + + # + def __truediv__(self, other: Expr, /) -> Expr: ... + def __rtruediv__(self, other: Expr, /) -> Expr: ... + + # + def __floordiv__(self, other: Expr, /) -> Expr: ... + def __rfloordiv__(self, other: Expr, /) -> Expr: ... + + # + def __call__( + self, + /, + *args: _ToExprN, + **kwargs: _ToExprN, + ) -> Expr[L[Op.APPLY], tuple[Self, tuple[Expr, ...], dict[str, Expr]]]: ... + + # + @overload + def __getitem__(self, index: _ExprT | tuple[_ExprT], /) -> Expr[L[Op.INDEXING], tuple[Self, _ExprT]]: ... + @overload + def __getitem__(self, index: _ToExpr | tuple[_ToExpr], /) -> Expr[L[Op.INDEXING], tuple[Self, Expr]]: ... + + # + def substitute(self, /, symbols_map: Mapping[Expr, Expr]) -> Expr: ... + + # + @overload + def traverse(self, /, visit: Callable[_Tss, None], *args: _Tss.args, **kwargs: _Tss.kwargs) -> Expr: ... + @overload + def traverse(self, /, visit: Callable[_Tss, _ExprT], *args: _Tss.args, **kwargs: _Tss.kwargs) -> _ExprT: ... + + # + def contains(self, /, other: Expr) -> bool: ... + + # + def symbols(self, /) -> set[Expr]: ... + def polynomial_atoms(self, /) -> set[Expr]: ... + + # + def linear_solve(self, /, symbol: Expr) -> tuple[Expr, Expr]: ... + + # + def tostring(self, /, parent_precedence: Precedence = ..., language: Language = ...) -> str: ... + +class _Pair(Generic[_LeftT_co, _RightT_co]): + left: _LeftT_co + right: _RightT_co + + def __init__(self, /, left: _LeftT_co, right: _RightT_co) -> None: ... + + # + @overload + def substitute(self: _Pair[_ExprT1, _ExprT2], /, symbols_map: Mapping[Expr, Expr]) -> _Pair[Expr, Expr]: ... + @overload + def substitute(self: _Pair[_ExprT1, object], /, symbols_map: Mapping[Expr, Expr]) -> _Pair[Expr, Any]: ... + @overload + def substitute(self: _Pair[object, _ExprT2], /, symbols_map: Mapping[Expr, Expr]) -> _Pair[Any, Expr]: ... + @overload + def substitute(self, /, symbols_map: Mapping[Expr, Expr]) -> _Pair: ... + +class _FromStringWorker(Generic[_LanguageT_co]): + language: _LanguageT_co + + original: str | None + quotes_map: dict[str, str] + + @overload + def __init__(self: _FromStringWorker[L[Language.C]], /, language: L[Language.C] = ...) -> None: ... + @overload + def __init__(self, /, language: _LanguageT_co) -> None: ... + + # + def finalize_string(self, /, s: str) -> str: ... + + # + def parse(self, /, inp: str) -> Expr | _Pair: ... + + # + @overload + def process(self, /, s: str, context: str = "expr") -> Expr | _Pair: ... + @overload + def process(self, /, s: list[str], context: str = "expr") -> list[Expr | _Pair]: ... + @overload + def process(self, /, s: tuple[str, ...], context: str = "expr") -> tuple[Expr | _Pair, ...]: ... + @overload + def process(self, /, s: _NestedString, context: str = "expr") -> Any: ... # noqa: ANN401 diff --git a/local_packages/numpy/f2py/tests/__init__.py b/local_packages/numpy/f2py/tests/__init__.py new file mode 100644 index 0000000..4ed8fdd --- /dev/null +++ b/local_packages/numpy/f2py/tests/__init__.py @@ -0,0 +1,16 @@ +import pytest + +from numpy.testing import IS_EDITABLE, IS_WASM + +if IS_WASM: + pytest.skip( + "WASM/Pyodide does not use or support Fortran", + allow_module_level=True + ) + + +if IS_EDITABLE: + pytest.skip( + "Editable install doesn't support tests with a compile step", + allow_module_level=True + ) diff --git a/local_packages/numpy/f2py/tests/src/abstract_interface/foo.f90 b/local_packages/numpy/f2py/tests/src/abstract_interface/foo.f90 new file mode 100644 index 0000000..76d16aa --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/abstract_interface/foo.f90 @@ -0,0 +1,34 @@ +module ops_module + + abstract interface + subroutine op(x, y, z) + integer, intent(in) :: x, y + integer, intent(out) :: z + end subroutine + end interface + +contains + + subroutine foo(x, y, r1, r2) + integer, intent(in) :: x, y + integer, intent(out) :: r1, r2 + procedure (op) add1, add2 + procedure (op), pointer::p + p=>add1 + call p(x, y, r1) + p=>add2 + call p(x, y, r2) + end subroutine +end module + +subroutine add1(x, y, z) + integer, intent(in) :: x, y + integer, intent(out) :: z + z = x + y +end subroutine + +subroutine add2(x, y, z) + integer, intent(in) :: x, y + integer, intent(out) :: z + z = x + 2 * y +end subroutine diff --git a/local_packages/numpy/f2py/tests/src/abstract_interface/gh18403_mod.f90 b/local_packages/numpy/f2py/tests/src/abstract_interface/gh18403_mod.f90 new file mode 100644 index 0000000..36791e4 --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/abstract_interface/gh18403_mod.f90 @@ -0,0 +1,6 @@ +module test + abstract interface + subroutine foo() + end subroutine + end interface +end module test diff --git a/local_packages/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c b/local_packages/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c new file mode 100644 index 0000000..25866f1 --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c @@ -0,0 +1,235 @@ +/* + * This file was auto-generated with f2py (version:2_1330) and hand edited by + * Pearu for testing purposes. Do not edit this file unless you know what you + * are doing!!! + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/*********************** See f2py2e/cfuncs.py: includes ***********************/ + +#define PY_SSIZE_T_CLEAN +#include +#include "fortranobject.h" +#include + +static PyObject *wrap_error; +static PyObject *wrap_module; + +/************************************ call ************************************/ +static char doc_f2py_rout_wrap_call[] = "\ +Function signature:\n\ + arr = call(type_num,dims,intent,obj)\n\ +Required arguments:\n" +" type_num : input int\n" +" dims : input int-sequence\n" +" intent : input int\n" +" obj : input python object\n" +"Return objects:\n" +" arr : array"; +static PyObject *f2py_rout_wrap_call(PyObject *capi_self, + PyObject *capi_args) { + PyObject * volatile capi_buildvalue = NULL; + int type_num = 0; + int elsize = 0; + npy_intp *dims = NULL; + PyObject *dims_capi = Py_None; + int rank = 0; + int intent = 0; + PyArrayObject *capi_arr_tmp = NULL; + PyObject *arr_capi = Py_None; + int i; + + if (!PyArg_ParseTuple(capi_args,"iiOiO|:wrap.call",\ + &type_num,&elsize,&dims_capi,&intent,&arr_capi)) + return NULL; + rank = PySequence_Length(dims_capi); + dims = malloc(rank*sizeof(npy_intp)); + for (i=0;ikind, + PyArray_DESCR(arr)->type, + PyArray_TYPE(arr), + PyArray_ITEMSIZE(arr), + PyDataType_ALIGNMENT(PyArray_DESCR(arr)), + PyArray_FLAGS(arr), + PyArray_ITEMSIZE(arr)); +} + +static PyMethodDef f2py_module_methods[] = { + + {"call",f2py_rout_wrap_call,METH_VARARGS,doc_f2py_rout_wrap_call}, + {"array_attrs",f2py_rout_wrap_attrs,METH_VARARGS,doc_f2py_rout_wrap_attrs}, + {NULL,NULL} +}; + +static struct PyModuleDef moduledef = { + PyModuleDef_HEAD_INIT, + "test_array_from_pyobj_ext", + NULL, + -1, + f2py_module_methods, + NULL, + NULL, + NULL, + NULL +}; + +PyMODINIT_FUNC PyInit_test_array_from_pyobj_ext(void) { + PyObject *m,*d, *s; + m = wrap_module = PyModule_Create(&moduledef); + Py_SET_TYPE(&PyFortran_Type, &PyType_Type); + import_array(); + if (PyErr_Occurred()) + Py_FatalError("can't initialize module wrap (failed to import numpy)"); + d = PyModule_GetDict(m); + s = PyUnicode_FromString("This module 'wrap' is auto-generated with f2py (version:2_1330).\nFunctions:\n" + " arr = call(type_num,dims,intent,obj)\n" + "."); + PyDict_SetItemString(d, "__doc__", s); + wrap_error = PyErr_NewException ("wrap.error", NULL, NULL); + Py_DECREF(s); + +#define ADDCONST(NAME, CONST) \ + s = PyLong_FromLong(CONST); \ + PyDict_SetItemString(d, NAME, s); \ + Py_DECREF(s) + + ADDCONST("F2PY_INTENT_IN", F2PY_INTENT_IN); + ADDCONST("F2PY_INTENT_INOUT", F2PY_INTENT_INOUT); + ADDCONST("F2PY_INTENT_OUT", F2PY_INTENT_OUT); + ADDCONST("F2PY_INTENT_HIDE", F2PY_INTENT_HIDE); + ADDCONST("F2PY_INTENT_CACHE", F2PY_INTENT_CACHE); + ADDCONST("F2PY_INTENT_COPY", F2PY_INTENT_COPY); + ADDCONST("F2PY_INTENT_C", F2PY_INTENT_C); + ADDCONST("F2PY_OPTIONAL", F2PY_OPTIONAL); + ADDCONST("F2PY_INTENT_INPLACE", F2PY_INTENT_INPLACE); + ADDCONST("NPY_BOOL", NPY_BOOL); + ADDCONST("NPY_BYTE", NPY_BYTE); + ADDCONST("NPY_UBYTE", NPY_UBYTE); + ADDCONST("NPY_SHORT", NPY_SHORT); + ADDCONST("NPY_USHORT", NPY_USHORT); + ADDCONST("NPY_INT", NPY_INT); + ADDCONST("NPY_UINT", NPY_UINT); + ADDCONST("NPY_INTP", NPY_INTP); + ADDCONST("NPY_UINTP", NPY_UINTP); + ADDCONST("NPY_LONG", NPY_LONG); + ADDCONST("NPY_ULONG", NPY_ULONG); + ADDCONST("NPY_LONGLONG", NPY_LONGLONG); + ADDCONST("NPY_ULONGLONG", NPY_ULONGLONG); + ADDCONST("NPY_FLOAT", NPY_FLOAT); + ADDCONST("NPY_DOUBLE", NPY_DOUBLE); + ADDCONST("NPY_LONGDOUBLE", NPY_LONGDOUBLE); + ADDCONST("NPY_CFLOAT", NPY_CFLOAT); + ADDCONST("NPY_CDOUBLE", NPY_CDOUBLE); + ADDCONST("NPY_CLONGDOUBLE", NPY_CLONGDOUBLE); + ADDCONST("NPY_OBJECT", NPY_OBJECT); + ADDCONST("NPY_STRING", NPY_STRING); + ADDCONST("NPY_UNICODE", NPY_UNICODE); + ADDCONST("NPY_VOID", NPY_VOID); + ADDCONST("NPY_NTYPES_LEGACY", NPY_NTYPES_LEGACY); + ADDCONST("NPY_NOTYPE", NPY_NOTYPE); + ADDCONST("NPY_USERDEF", NPY_USERDEF); + + ADDCONST("CONTIGUOUS", NPY_ARRAY_C_CONTIGUOUS); + ADDCONST("FORTRAN", NPY_ARRAY_F_CONTIGUOUS); + ADDCONST("OWNDATA", NPY_ARRAY_OWNDATA); + ADDCONST("FORCECAST", NPY_ARRAY_FORCECAST); + ADDCONST("ENSURECOPY", NPY_ARRAY_ENSURECOPY); + ADDCONST("ENSUREARRAY", NPY_ARRAY_ENSUREARRAY); + ADDCONST("ALIGNED", NPY_ARRAY_ALIGNED); + ADDCONST("WRITEABLE", NPY_ARRAY_WRITEABLE); + ADDCONST("WRITEBACKIFCOPY", NPY_ARRAY_WRITEBACKIFCOPY); + + ADDCONST("BEHAVED", NPY_ARRAY_BEHAVED); + ADDCONST("BEHAVED_NS", NPY_ARRAY_BEHAVED_NS); + ADDCONST("CARRAY", NPY_ARRAY_CARRAY); + ADDCONST("FARRAY", NPY_ARRAY_FARRAY); + ADDCONST("CARRAY_RO", NPY_ARRAY_CARRAY_RO); + ADDCONST("FARRAY_RO", NPY_ARRAY_FARRAY_RO); + ADDCONST("DEFAULT", NPY_ARRAY_DEFAULT); + ADDCONST("UPDATE_ALL", NPY_ARRAY_UPDATE_ALL); + +#undef ADDCONST + + if (PyErr_Occurred()) + Py_FatalError("can't initialize module wrap"); + +#ifdef F2PY_REPORT_ATEXIT + on_exit(f2py_report_on_exit,(void*)"array_from_pyobj.wrap.call"); +#endif + +#ifdef Py_GIL_DISABLED + // signal whether this module supports running with the GIL disabled + PyUnstable_Module_SetGIL(m, Py_MOD_GIL_NOT_USED); +#endif + + return m; +} +#ifdef __cplusplus +} +#endif diff --git a/local_packages/numpy/f2py/tests/src/assumed_shape/.f2py_f2cmap b/local_packages/numpy/f2py/tests/src/assumed_shape/.f2py_f2cmap new file mode 100644 index 0000000..2665f89 --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/assumed_shape/.f2py_f2cmap @@ -0,0 +1 @@ +dict(real=dict(rk="double")) diff --git a/local_packages/numpy/f2py/tests/src/assumed_shape/foo_free.f90 b/local_packages/numpy/f2py/tests/src/assumed_shape/foo_free.f90 new file mode 100644 index 0000000..b301710 --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/assumed_shape/foo_free.f90 @@ -0,0 +1,34 @@ + +subroutine sum(x, res) + implicit none + real, intent(in) :: x(:) + real, intent(out) :: res + + integer :: i + + !print *, "sum: size(x) = ", size(x) + + res = 0.0 + + do i = 1, size(x) + res = res + x(i) + enddo + +end subroutine sum + +function fsum(x) result (res) + implicit none + real, intent(in) :: x(:) + real :: res + + integer :: i + + !print *, "fsum: size(x) = ", size(x) + + res = 0.0 + + do i = 1, size(x) + res = res + x(i) + enddo + +end function fsum diff --git a/local_packages/numpy/f2py/tests/src/assumed_shape/foo_mod.f90 b/local_packages/numpy/f2py/tests/src/assumed_shape/foo_mod.f90 new file mode 100644 index 0000000..cbe6317 --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/assumed_shape/foo_mod.f90 @@ -0,0 +1,41 @@ + +module mod + +contains + +subroutine sum(x, res) + implicit none + real, intent(in) :: x(:) + real, intent(out) :: res + + integer :: i + + !print *, "sum: size(x) = ", size(x) + + res = 0.0 + + do i = 1, size(x) + res = res + x(i) + enddo + +end subroutine sum + +function fsum(x) result (res) + implicit none + real, intent(in) :: x(:) + real :: res + + integer :: i + + !print *, "fsum: size(x) = ", size(x) + + res = 0.0 + + do i = 1, size(x) + res = res + x(i) + enddo + +end function fsum + + +end module mod diff --git a/local_packages/numpy/f2py/tests/src/assumed_shape/foo_use.f90 b/local_packages/numpy/f2py/tests/src/assumed_shape/foo_use.f90 new file mode 100644 index 0000000..337465a --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/assumed_shape/foo_use.f90 @@ -0,0 +1,19 @@ +subroutine sum_with_use(x, res) + use precision + + implicit none + + real(kind=rk), intent(in) :: x(:) + real(kind=rk), intent(out) :: res + + integer :: i + + !print *, "size(x) = ", size(x) + + res = 0.0 + + do i = 1, size(x) + res = res + x(i) + enddo + + end subroutine diff --git a/local_packages/numpy/f2py/tests/src/assumed_shape/precision.f90 b/local_packages/numpy/f2py/tests/src/assumed_shape/precision.f90 new file mode 100644 index 0000000..ed6c70c --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/assumed_shape/precision.f90 @@ -0,0 +1,4 @@ +module precision + integer, parameter :: rk = selected_real_kind(8) + integer, parameter :: ik = selected_real_kind(4) +end module diff --git a/local_packages/numpy/f2py/tests/src/block_docstring/foo.f b/local_packages/numpy/f2py/tests/src/block_docstring/foo.f new file mode 100644 index 0000000..c8315f1 --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/block_docstring/foo.f @@ -0,0 +1,6 @@ + SUBROUTINE FOO() + INTEGER BAR(2, 3) + + COMMON /BLOCK/ BAR + RETURN + END diff --git a/local_packages/numpy/f2py/tests/src/callback/foo.f b/local_packages/numpy/f2py/tests/src/callback/foo.f new file mode 100644 index 0000000..ba397bb --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/callback/foo.f @@ -0,0 +1,62 @@ + subroutine t(fun,a) + integer a +cf2py intent(out) a + external fun + call fun(a) + end + + subroutine func(a) +cf2py intent(in,out) a + integer a + a = a + 11 + end + + subroutine func0(a) +cf2py intent(out) a + integer a + a = 11 + end + + subroutine t2(a) +cf2py intent(callback) fun + integer a +cf2py intent(out) a + external fun + call fun(a) + end + + subroutine string_callback(callback, a) + external callback + double precision callback + double precision a + character*1 r +cf2py intent(out) a + r = 'r' + a = callback(r) + end + + subroutine string_callback_array(callback, cu, lencu, a) + external callback + integer callback + integer lencu + character*8 cu(lencu) + integer a +cf2py intent(out) a + + a = callback(cu, lencu) + end + + subroutine hidden_callback(a, r) + external global_f +cf2py intent(callback, hide) global_f + integer a, r, global_f +cf2py intent(out) r + r = global_f(a) + end + + subroutine hidden_callback2(a, r) + external global_f + integer a, r, global_f +cf2py intent(out) r + r = global_f(a) + end diff --git a/local_packages/numpy/f2py/tests/src/callback/gh17797.f90 b/local_packages/numpy/f2py/tests/src/callback/gh17797.f90 new file mode 100644 index 0000000..49853af --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/callback/gh17797.f90 @@ -0,0 +1,7 @@ +function gh17797(f, y) result(r) + external f + integer(8) :: r, f + integer(8), dimension(:) :: y + r = f(0) + r = r + sum(y) +end function gh17797 diff --git a/local_packages/numpy/f2py/tests/src/callback/gh18335.f90 b/local_packages/numpy/f2py/tests/src/callback/gh18335.f90 new file mode 100644 index 0000000..92b6d75 --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/callback/gh18335.f90 @@ -0,0 +1,17 @@ + ! When gh18335_workaround is defined as an extension, + ! the issue cannot be reproduced. + !subroutine gh18335_workaround(f, y) + ! implicit none + ! external f + ! integer(kind=1) :: y(1) + ! call f(y) + !end subroutine gh18335_workaround + + function gh18335(f) result (r) + implicit none + external f + integer(kind=1) :: y(1), r + y(1) = 123 + call f(y) + r = y(1) + end function gh18335 diff --git a/local_packages/numpy/f2py/tests/src/callback/gh25211.f b/local_packages/numpy/f2py/tests/src/callback/gh25211.f new file mode 100644 index 0000000..ba727a1 --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/callback/gh25211.f @@ -0,0 +1,10 @@ + SUBROUTINE FOO(FUN,R) + EXTERNAL FUN + INTEGER I + REAL*8 R, FUN +Cf2py intent(out) r + R = 0D0 + DO I=-5,5 + R = R + FUN(I) + ENDDO + END diff --git a/local_packages/numpy/f2py/tests/src/callback/gh25211.pyf b/local_packages/numpy/f2py/tests/src/callback/gh25211.pyf new file mode 100644 index 0000000..f120111 --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/callback/gh25211.pyf @@ -0,0 +1,18 @@ +python module __user__routines + interface + function fun(i) result (r) + integer :: i + real*8 :: r + end function fun + end interface +end python module __user__routines + +python module callback2 + interface + subroutine foo(f,r) + use __user__routines, f=>fun + external f + real*8 intent(out) :: r + end subroutine foo + end interface +end python module callback2 diff --git a/local_packages/numpy/f2py/tests/src/callback/gh26681.f90 b/local_packages/numpy/f2py/tests/src/callback/gh26681.f90 new file mode 100644 index 0000000..00c0ec9 --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/callback/gh26681.f90 @@ -0,0 +1,18 @@ +module utils + implicit none + contains + subroutine my_abort(message) + implicit none + character(len=*), intent(in) :: message + !f2py callstatement PyErr_SetString(PyExc_ValueError, message);f2py_success = 0; + !f2py callprotoargument char* + write(0,*) "THIS SHOULD NOT APPEAR" + stop 1 + end subroutine my_abort + + subroutine do_something(message) + !f2py intent(callback, hide) mypy_abort + character(len=*), intent(in) :: message + call mypy_abort(message) + end subroutine do_something +end module utils diff --git a/local_packages/numpy/f2py/tests/src/cli/gh_22819.pyf b/local_packages/numpy/f2py/tests/src/cli/gh_22819.pyf new file mode 100644 index 0000000..8eb5bb1 --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/cli/gh_22819.pyf @@ -0,0 +1,6 @@ +python module test_22819 + interface + subroutine hello() + end subroutine hello + end interface +end python module test_22819 diff --git a/local_packages/numpy/f2py/tests/src/cli/hi77.f b/local_packages/numpy/f2py/tests/src/cli/hi77.f new file mode 100644 index 0000000..8b916eb --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/cli/hi77.f @@ -0,0 +1,3 @@ + SUBROUTINE HI + PRINT*, "HELLO WORLD" + END SUBROUTINE diff --git a/local_packages/numpy/f2py/tests/src/cli/hiworld.f90 b/local_packages/numpy/f2py/tests/src/cli/hiworld.f90 new file mode 100644 index 0000000..981f877 --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/cli/hiworld.f90 @@ -0,0 +1,3 @@ +function hi() + print*, "Hello World" +end function diff --git a/local_packages/numpy/f2py/tests/src/common/block.f b/local_packages/numpy/f2py/tests/src/common/block.f new file mode 100644 index 0000000..7ea7968 --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/common/block.f @@ -0,0 +1,11 @@ + SUBROUTINE INITCB + DOUBLE PRECISION LONG + CHARACTER STRING + INTEGER OK + + COMMON /BLOCK/ LONG, STRING, OK + LONG = 1.0 + STRING = '2' + OK = 3 + RETURN + END diff --git a/local_packages/numpy/f2py/tests/src/common/gh19161.f90 b/local_packages/numpy/f2py/tests/src/common/gh19161.f90 new file mode 100644 index 0000000..a2f4073 --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/common/gh19161.f90 @@ -0,0 +1,10 @@ +module typedefmod + use iso_fortran_env, only: real32 +end module typedefmod + +module data + use typedefmod, only: real32 + implicit none + real(kind=real32) :: x + common/test/x +end module data diff --git a/local_packages/numpy/f2py/tests/src/crackfortran/accesstype.f90 b/local_packages/numpy/f2py/tests/src/crackfortran/accesstype.f90 new file mode 100644 index 0000000..e2cbd44 --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/crackfortran/accesstype.f90 @@ -0,0 +1,13 @@ +module foo + public + type, private, bind(c) :: a + integer :: i + end type a + type, bind(c) :: b_ + integer :: j + end type b_ + public :: b_ + type :: c + integer :: k + end type c +end module foo diff --git a/local_packages/numpy/f2py/tests/src/crackfortran/common_with_division.f b/local_packages/numpy/f2py/tests/src/crackfortran/common_with_division.f new file mode 100644 index 0000000..4aa12cf --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/crackfortran/common_with_division.f @@ -0,0 +1,17 @@ + subroutine common_with_division + integer lmu,lb,lub,lpmin + parameter (lmu=1) + parameter (lb=20) +c crackfortran fails to parse this +c parameter (lub=(lb-1)*lmu+1) +c crackfortran can successfully parse this though + parameter (lub=lb*lmu-lmu+1) + parameter (lpmin=2) + +c crackfortran fails to parse this correctly +c common /mortmp/ ctmp((lub*(lub+1)*(lub+1))/lpmin+1) + + common /mortmp/ ctmp(lub/lpmin+1) + + return + end diff --git a/local_packages/numpy/f2py/tests/src/crackfortran/data_common.f b/local_packages/numpy/f2py/tests/src/crackfortran/data_common.f new file mode 100644 index 0000000..5ffd865 --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/crackfortran/data_common.f @@ -0,0 +1,8 @@ + BLOCK DATA PARAM_INI + COMMON /MYCOM/ MYDATA + DATA MYDATA /0/ + END + SUBROUTINE SUB1 + COMMON /MYCOM/ MYDATA + MYDATA = MYDATA + 1 + END diff --git a/local_packages/numpy/f2py/tests/src/crackfortran/data_multiplier.f b/local_packages/numpy/f2py/tests/src/crackfortran/data_multiplier.f new file mode 100644 index 0000000..19ff8a8 --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/crackfortran/data_multiplier.f @@ -0,0 +1,5 @@ + BLOCK DATA MYBLK + IMPLICIT DOUBLE PRECISION (A-H,O-Z) + COMMON /MYCOM/ IVAR1, IVAR2, IVAR3, IVAR4, EVAR5 + DATA IVAR1, IVAR2, IVAR3, IVAR4, EVAR5 /2*3,2*2,0.0D0/ + END diff --git a/local_packages/numpy/f2py/tests/src/crackfortran/data_stmts.f90 b/local_packages/numpy/f2py/tests/src/crackfortran/data_stmts.f90 new file mode 100644 index 0000000..576c5e4 --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/crackfortran/data_stmts.f90 @@ -0,0 +1,20 @@ +! gh-23276 +module cmplxdat + implicit none + integer :: i, j + real :: x, y + real, dimension(2) :: z + real(kind=8) :: pi + complex(kind=8), target :: medium_ref_index + complex(kind=8), target :: ref_index_one, ref_index_two + complex(kind=8), dimension(2) :: my_array + real(kind=8), dimension(3) :: my_real_array = (/1.0d0, 2.0d0, 3.0d0/) + + data i, j / 2, 3 / + data x, y / 1.5, 2.0 / + data z / 3.5, 7.0 / + data medium_ref_index / (1.d0, 0.d0) / + data ref_index_one, ref_index_two / (13.0d0, 21.0d0), (-30.0d0, 43.0d0) / + data my_array / (1.0d0, 2.0d0), (-3.0d0, 4.0d0) / + data pi / 3.1415926535897932384626433832795028841971693993751058209749445923078164062d0 / +end module cmplxdat diff --git a/local_packages/numpy/f2py/tests/src/crackfortran/data_with_comments.f b/local_packages/numpy/f2py/tests/src/crackfortran/data_with_comments.f new file mode 100644 index 0000000..4128f00 --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/crackfortran/data_with_comments.f @@ -0,0 +1,8 @@ + BLOCK DATA PARAM_INI + COMMON /MYCOM/ MYTAB + INTEGER MYTAB(3) + DATA MYTAB/ + * 0, ! 1 and more commenty stuff + * 4, ! 2 + * 0 / + END diff --git a/local_packages/numpy/f2py/tests/src/crackfortran/foo_deps.f90 b/local_packages/numpy/f2py/tests/src/crackfortran/foo_deps.f90 new file mode 100644 index 0000000..e327b25 --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/crackfortran/foo_deps.f90 @@ -0,0 +1,6 @@ +module foo + type bar + character(len = 4) :: text + end type bar + type(bar), parameter :: abar = bar('abar') +end module foo diff --git a/local_packages/numpy/f2py/tests/src/crackfortran/gh15035.f b/local_packages/numpy/f2py/tests/src/crackfortran/gh15035.f new file mode 100644 index 0000000..1bb2e67 --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/crackfortran/gh15035.f @@ -0,0 +1,16 @@ + subroutine subb(k) + real(8), intent(inout) :: k(:) + k=k+1 + endsubroutine + + subroutine subc(w,k) + real(8), intent(in) :: w(:) + real(8), intent(out) :: k(size(w)) + k=w+1 + endsubroutine + + function t0(value) + character value + character t0 + t0 = value + endfunction diff --git a/local_packages/numpy/f2py/tests/src/crackfortran/gh17859.f b/local_packages/numpy/f2py/tests/src/crackfortran/gh17859.f new file mode 100644 index 0000000..9959538 --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/crackfortran/gh17859.f @@ -0,0 +1,12 @@ + integer(8) function external_as_statement(fcn) + implicit none + external fcn + integer(8) :: fcn + external_as_statement = fcn(0) + end + + integer(8) function external_as_attribute(fcn) + implicit none + integer(8), external :: fcn + external_as_attribute = fcn(0) + end diff --git a/local_packages/numpy/f2py/tests/src/crackfortran/gh22648.pyf b/local_packages/numpy/f2py/tests/src/crackfortran/gh22648.pyf new file mode 100644 index 0000000..b3454f1 --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/crackfortran/gh22648.pyf @@ -0,0 +1,7 @@ +python module iri16py ! in + interface ! in :iri16py + block data ! in :iri16py:iridreg_modified.for + COMMON /fircom/ eden,tabhe,tabla,tabmo,tabza,tabfl + end block data + end interface +end python module iri16py diff --git a/local_packages/numpy/f2py/tests/src/crackfortran/gh23533.f b/local_packages/numpy/f2py/tests/src/crackfortran/gh23533.f new file mode 100644 index 0000000..db522af --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/crackfortran/gh23533.f @@ -0,0 +1,5 @@ + SUBROUTINE EXAMPLE( ) + IF( .TRUE. ) THEN + CALL DO_SOMETHING() + END IF ! ** .TRUE. ** + END diff --git a/local_packages/numpy/f2py/tests/src/crackfortran/gh23598.f90 b/local_packages/numpy/f2py/tests/src/crackfortran/gh23598.f90 new file mode 100644 index 0000000..e0dffb5 --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/crackfortran/gh23598.f90 @@ -0,0 +1,4 @@ +integer function intproduct(a, b) result(res) + integer, intent(in) :: a, b + res = a*b +end function diff --git a/local_packages/numpy/f2py/tests/src/crackfortran/gh23598Warn.f90 b/local_packages/numpy/f2py/tests/src/crackfortran/gh23598Warn.f90 new file mode 100644 index 0000000..3b44efc --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/crackfortran/gh23598Warn.f90 @@ -0,0 +1,11 @@ +module test_bug + implicit none + private + public :: intproduct + +contains + integer function intproduct(a, b) result(res) + integer, intent(in) :: a, b + res = a*b + end function +end module diff --git a/local_packages/numpy/f2py/tests/src/crackfortran/gh23879.f90 b/local_packages/numpy/f2py/tests/src/crackfortran/gh23879.f90 new file mode 100644 index 0000000..fac262d --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/crackfortran/gh23879.f90 @@ -0,0 +1,20 @@ +module gh23879 + implicit none + private + public :: foo + + contains + + subroutine foo(a, b) + integer, intent(in) :: a + integer, intent(out) :: b + b = a + call bar(b) + end subroutine + + subroutine bar(x) + integer, intent(inout) :: x + x = 2*x + end subroutine + + end module gh23879 diff --git a/local_packages/numpy/f2py/tests/src/crackfortran/gh27697.f90 b/local_packages/numpy/f2py/tests/src/crackfortran/gh27697.f90 new file mode 100644 index 0000000..a5eae4e --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/crackfortran/gh27697.f90 @@ -0,0 +1,12 @@ +module utils + implicit none + contains + subroutine my_abort(message) + implicit none + character(len=*), intent(in) :: message + !f2py callstatement PyErr_SetString(PyExc_ValueError, message);f2py_success = 0; + !f2py callprotoargument char* + write(0,*) "THIS SHOULD NOT APPEAR" + stop 1 + end subroutine my_abort +end module utils diff --git a/local_packages/numpy/f2py/tests/src/crackfortran/gh2848.f90 b/local_packages/numpy/f2py/tests/src/crackfortran/gh2848.f90 new file mode 100644 index 0000000..31ea932 --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/crackfortran/gh2848.f90 @@ -0,0 +1,13 @@ + subroutine gh2848( & + ! first 2 parameters + par1, par2,& + ! last 2 parameters + par3, par4) + + integer, intent(in) :: par1, par2 + integer, intent(out) :: par3, par4 + + par3 = par1 + par4 = par2 + + end subroutine gh2848 diff --git a/local_packages/numpy/f2py/tests/src/crackfortran/operators.f90 b/local_packages/numpy/f2py/tests/src/crackfortran/operators.f90 new file mode 100644 index 0000000..1d060a3 --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/crackfortran/operators.f90 @@ -0,0 +1,49 @@ +module foo + type bar + character(len = 32) :: item + end type bar + interface operator(.item.) + module procedure item_int, item_real + end interface operator(.item.) + interface operator(==) + module procedure items_are_equal + end interface operator(==) + interface assignment(=) + module procedure get_int, get_real + end interface assignment(=) +contains + function item_int(val) result(elem) + integer, intent(in) :: val + type(bar) :: elem + + write(elem%item, "(I32)") val + end function item_int + + function item_real(val) result(elem) + real, intent(in) :: val + type(bar) :: elem + + write(elem%item, "(1PE32.12)") val + end function item_real + + function items_are_equal(val1, val2) result(equal) + type(bar), intent(in) :: val1, val2 + logical :: equal + + equal = (val1%item == val2%item) + end function items_are_equal + + subroutine get_real(rval, item) + real, intent(out) :: rval + type(bar), intent(in) :: item + + read(item%item, *) rval + end subroutine get_real + + subroutine get_int(rval, item) + integer, intent(out) :: rval + type(bar), intent(in) :: item + + read(item%item, *) rval + end subroutine get_int +end module foo diff --git a/local_packages/numpy/f2py/tests/src/crackfortran/privatemod.f90 b/local_packages/numpy/f2py/tests/src/crackfortran/privatemod.f90 new file mode 100644 index 0000000..2674c21 --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/crackfortran/privatemod.f90 @@ -0,0 +1,11 @@ +module foo + private + integer :: a + public :: setA + integer :: b +contains + subroutine setA(v) + integer, intent(in) :: v + a = v + end subroutine setA +end module foo diff --git a/local_packages/numpy/f2py/tests/src/crackfortran/publicmod.f90 b/local_packages/numpy/f2py/tests/src/crackfortran/publicmod.f90 new file mode 100644 index 0000000..1db76e3 --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/crackfortran/publicmod.f90 @@ -0,0 +1,10 @@ +module foo + public + integer, private :: a + public :: setA +contains + subroutine setA(v) + integer, intent(in) :: v + a = v + end subroutine setA +end module foo diff --git a/local_packages/numpy/f2py/tests/src/crackfortran/pubprivmod.f90 b/local_packages/numpy/f2py/tests/src/crackfortran/pubprivmod.f90 new file mode 100644 index 0000000..46bef7c --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/crackfortran/pubprivmod.f90 @@ -0,0 +1,10 @@ +module foo + public + integer, private :: a + integer :: b +contains + subroutine setA(v) + integer, intent(in) :: v + a = v + end subroutine setA +end module foo diff --git a/local_packages/numpy/f2py/tests/src/crackfortran/unicode_comment.f90 b/local_packages/numpy/f2py/tests/src/crackfortran/unicode_comment.f90 new file mode 100644 index 0000000..13515ce --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/crackfortran/unicode_comment.f90 @@ -0,0 +1,4 @@ +subroutine foo(x) + real(8), intent(in) :: x + ! Écrit à l'écran la valeur de x +end subroutine diff --git a/local_packages/numpy/f2py/tests/src/f2cmap/.f2py_f2cmap b/local_packages/numpy/f2py/tests/src/f2cmap/.f2py_f2cmap new file mode 100644 index 0000000..a4425f8 --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/f2cmap/.f2py_f2cmap @@ -0,0 +1 @@ +dict(real=dict(real32='float', real64='double'), integer=dict(int64='long_long')) diff --git a/local_packages/numpy/f2py/tests/src/f2cmap/isoFortranEnvMap.f90 b/local_packages/numpy/f2py/tests/src/f2cmap/isoFortranEnvMap.f90 new file mode 100644 index 0000000..1e1dc1d --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/f2cmap/isoFortranEnvMap.f90 @@ -0,0 +1,9 @@ + subroutine func1(n, x, res) + use, intrinsic :: iso_fortran_env, only: int64, real64 + implicit none + integer(int64), intent(in) :: n + real(real64), intent(in) :: x(n) + real(real64), intent(out) :: res +!f2py intent(hide) :: n + res = sum(x) + end diff --git a/local_packages/numpy/f2py/tests/src/isocintrin/isoCtests.f90 b/local_packages/numpy/f2py/tests/src/isocintrin/isoCtests.f90 new file mode 100644 index 0000000..765f7c1 --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/isocintrin/isoCtests.f90 @@ -0,0 +1,34 @@ + module coddity + use iso_c_binding, only: c_double, c_int, c_int64_t + implicit none + contains + subroutine c_add(a, b, c) bind(c, name="c_add") + real(c_double), intent(in) :: a, b + real(c_double), intent(out) :: c + c = a + b + end subroutine c_add + ! gh-9693 + function wat(x, y) result(z) bind(c) + integer(c_int), intent(in) :: x, y + integer(c_int) :: z + + z = x + 7 + end function wat + ! gh-25207 + subroutine c_add_int64(a, b, c) bind(c) + integer(c_int64_t), intent(in) :: a, b + integer(c_int64_t), intent(out) :: c + c = a + b + end subroutine c_add_int64 + ! gh-25207 + subroutine add_arr(A, B, C) + integer(c_int64_t), intent(in) :: A(3) + integer(c_int64_t), intent(in) :: B(3) + integer(c_int64_t), intent(out) :: C(3) + integer :: j + + do j = 1, 3 + C(j) = A(j)+B(j) + end do + end subroutine + end module coddity diff --git a/local_packages/numpy/f2py/tests/src/kind/foo.f90 b/local_packages/numpy/f2py/tests/src/kind/foo.f90 new file mode 100644 index 0000000..d3d15cf --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/kind/foo.f90 @@ -0,0 +1,20 @@ + + +subroutine selectedrealkind(p, r, res) + implicit none + + integer, intent(in) :: p, r + !f2py integer :: r=0 + integer, intent(out) :: res + res = selected_real_kind(p, r) + +end subroutine + +subroutine selectedintkind(p, res) + implicit none + + integer, intent(in) :: p + integer, intent(out) :: res + res = selected_int_kind(p) + +end subroutine diff --git a/local_packages/numpy/f2py/tests/src/mixed/foo.f b/local_packages/numpy/f2py/tests/src/mixed/foo.f new file mode 100644 index 0000000..c347425 --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/mixed/foo.f @@ -0,0 +1,5 @@ + subroutine bar11(a) +cf2py intent(out) a + integer a + a = 11 + end diff --git a/local_packages/numpy/f2py/tests/src/mixed/foo_fixed.f90 b/local_packages/numpy/f2py/tests/src/mixed/foo_fixed.f90 new file mode 100644 index 0000000..7543a6a --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/mixed/foo_fixed.f90 @@ -0,0 +1,8 @@ + module foo_fixed + contains + subroutine bar12(a) +!f2py intent(out) a + integer a + a = 12 + end subroutine bar12 + end module foo_fixed diff --git a/local_packages/numpy/f2py/tests/src/mixed/foo_free.f90 b/local_packages/numpy/f2py/tests/src/mixed/foo_free.f90 new file mode 100644 index 0000000..c1b641f --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/mixed/foo_free.f90 @@ -0,0 +1,8 @@ +module foo_free +contains + subroutine bar13(a) + !f2py intent(out) a + integer a + a = 13 + end subroutine bar13 +end module foo_free diff --git a/local_packages/numpy/f2py/tests/src/modules/gh25337/data.f90 b/local_packages/numpy/f2py/tests/src/modules/gh25337/data.f90 new file mode 100644 index 0000000..483d13c --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/modules/gh25337/data.f90 @@ -0,0 +1,8 @@ +module data + real(8) :: shift +contains + subroutine set_shift(in_shift) + real(8), intent(in) :: in_shift + shift = in_shift + end subroutine set_shift +end module data diff --git a/local_packages/numpy/f2py/tests/src/modules/gh25337/use_data.f90 b/local_packages/numpy/f2py/tests/src/modules/gh25337/use_data.f90 new file mode 100644 index 0000000..b3fae8b --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/modules/gh25337/use_data.f90 @@ -0,0 +1,6 @@ +subroutine shift_a(dim_a, a) + use data, only: shift + integer, intent(in) :: dim_a + real(8), intent(inout), dimension(dim_a) :: a + a = a + shift +end subroutine shift_a diff --git a/local_packages/numpy/f2py/tests/src/modules/gh26920/two_mods_with_no_public_entities.f90 b/local_packages/numpy/f2py/tests/src/modules/gh26920/two_mods_with_no_public_entities.f90 new file mode 100644 index 0000000..07adce5 --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/modules/gh26920/two_mods_with_no_public_entities.f90 @@ -0,0 +1,21 @@ + module mod2 + implicit none + private mod2_func1 + contains + + subroutine mod2_func1() + print*, "mod2_func1" + end subroutine mod2_func1 + + end module mod2 + + module mod1 + implicit none + private :: mod1_func1 + contains + + subroutine mod1_func1() + print*, "mod1_func1" + end subroutine mod1_func1 + + end module mod1 diff --git a/local_packages/numpy/f2py/tests/src/modules/gh26920/two_mods_with_one_public_routine.f90 b/local_packages/numpy/f2py/tests/src/modules/gh26920/two_mods_with_one_public_routine.f90 new file mode 100644 index 0000000..b7fb95b --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/modules/gh26920/two_mods_with_one_public_routine.f90 @@ -0,0 +1,21 @@ + module mod2 + implicit none + PUBLIC :: mod2_func1 + contains + + subroutine mod2_func1() + print*, "mod2_func1" + end subroutine mod2_func1 + + end module mod2 + + module mod1 + implicit none + PUBLIC :: mod1_func1 + contains + + subroutine mod1_func1() + print*, "mod1_func1" + end subroutine mod1_func1 + + end module mod1 diff --git a/local_packages/numpy/f2py/tests/src/modules/module_data_docstring.f90 b/local_packages/numpy/f2py/tests/src/modules/module_data_docstring.f90 new file mode 100644 index 0000000..4505e0c --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/modules/module_data_docstring.f90 @@ -0,0 +1,12 @@ +module mod + integer :: i + integer :: x(4) + real, dimension(2,3) :: a + real, allocatable, dimension(:,:) :: b +contains + subroutine foo + integer :: k + k = 1 + a(1,2) = a(1,2)+3 + end subroutine foo +end module mod diff --git a/local_packages/numpy/f2py/tests/src/modules/use_modules.f90 b/local_packages/numpy/f2py/tests/src/modules/use_modules.f90 new file mode 100644 index 0000000..aa40c86 --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/modules/use_modules.f90 @@ -0,0 +1,20 @@ +module mathops + implicit none +contains + function add(a, b) result(c) + integer, intent(in) :: a, b + integer :: c + c = a + b + end function add +end module mathops + +module useops + use mathops, only: add + implicit none +contains + function sum_and_double(a, b) result(d) + integer, intent(in) :: a, b + integer :: d + d = 2 * add(a, b) + end function sum_and_double +end module useops diff --git a/local_packages/numpy/f2py/tests/src/negative_bounds/issue_20853.f90 b/local_packages/numpy/f2py/tests/src/negative_bounds/issue_20853.f90 new file mode 100644 index 0000000..bf1fa92 --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/negative_bounds/issue_20853.f90 @@ -0,0 +1,7 @@ +subroutine foo(is_, ie_, arr, tout) + implicit none + integer :: is_,ie_ + real, intent(in) :: arr(is_:ie_) + real, intent(out) :: tout(is_:ie_) + tout = arr +end diff --git a/local_packages/numpy/f2py/tests/src/parameter/constant_array.f90 b/local_packages/numpy/f2py/tests/src/parameter/constant_array.f90 new file mode 100644 index 0000000..9a6bf81 --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/parameter/constant_array.f90 @@ -0,0 +1,45 @@ +! Check that parameter arrays are correctly intercepted. +subroutine foo_array(x, y, z) + implicit none + integer, parameter :: dp = selected_real_kind(15) + integer, parameter :: pa = 2 + integer, parameter :: intparamarray(2) = (/ 3, 5 /) + integer, dimension(pa), parameter :: pb = (/ 2, 10 /) + integer, parameter, dimension(intparamarray(1)) :: pc = (/ 2, 10, 20 /) + real(dp), parameter :: doubleparamarray(3) = (/ 3.14_dp, 4._dp, 6.44_dp /) + real(dp), intent(inout) :: x(intparamarray(1)) + real(dp), intent(inout) :: y(intparamarray(2)) + real(dp), intent(out) :: z + + x = x/pb(2) + y = y*pc(2) + z = doubleparamarray(1)*doubleparamarray(2) + doubleparamarray(3) + + return +end subroutine + +subroutine foo_array_any_index(x, y) + implicit none + integer, parameter :: dp = selected_real_kind(15) + integer, parameter, dimension(-1:1) :: myparamarray = (/ 6, 3, 1 /) + integer, parameter, dimension(2) :: nested = (/ 2, 0 /) + integer, parameter :: dim = 2 + real(dp), intent(in) :: x(myparamarray(-1)) + real(dp), intent(out) :: y(nested(1), myparamarray(nested(dim))) + + y = reshape(x, (/nested(1), myparamarray(nested(2))/)) + + return +end subroutine + +subroutine foo_array_delims(x) + implicit none + integer, parameter :: dp = selected_real_kind(15) + integer, parameter, dimension(2) :: myparamarray = (/ (6), 1 /) + integer, parameter, dimension(3) :: test = (/2, 1, (3)/) + real(dp), intent(out) :: x + + x = myparamarray(1)+test(3) + + return +end subroutine diff --git a/local_packages/numpy/f2py/tests/src/parameter/constant_both.f90 b/local_packages/numpy/f2py/tests/src/parameter/constant_both.f90 new file mode 100644 index 0000000..ac90ced --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/parameter/constant_both.f90 @@ -0,0 +1,57 @@ +! Check that parameters are correct intercepted. +! Constants with comma separations are commonly +! used, for instance Pi = 3._dp +subroutine foo(x) + implicit none + integer, parameter :: sp = selected_real_kind(6) + integer, parameter :: dp = selected_real_kind(15) + integer, parameter :: ii = selected_int_kind(9) + integer, parameter :: il = selected_int_kind(18) + real(dp), intent(inout) :: x + dimension x(3) + real(sp), parameter :: three_s = 3._sp + real(dp), parameter :: three_d = 3._dp + integer(ii), parameter :: three_i = 3_ii + integer(il), parameter :: three_l = 3_il + x(1) = x(1) + x(2) * three_s * three_i + x(3) * three_d * three_l + x(2) = x(2) * three_s + x(3) = x(3) * three_l + return +end subroutine + + +subroutine foo_no(x) + implicit none + integer, parameter :: sp = selected_real_kind(6) + integer, parameter :: dp = selected_real_kind(15) + integer, parameter :: ii = selected_int_kind(9) + integer, parameter :: il = selected_int_kind(18) + real(dp), intent(inout) :: x + dimension x(3) + real(sp), parameter :: three_s = 3. + real(dp), parameter :: three_d = 3. + integer(ii), parameter :: three_i = 3 + integer(il), parameter :: three_l = 3 + x(1) = x(1) + x(2) * three_s * three_i + x(3) * three_d * three_l + x(2) = x(2) * three_s + x(3) = x(3) * three_l + return +end subroutine + +subroutine foo_sum(x) + implicit none + integer, parameter :: sp = selected_real_kind(6) + integer, parameter :: dp = selected_real_kind(15) + integer, parameter :: ii = selected_int_kind(9) + integer, parameter :: il = selected_int_kind(18) + real(dp), intent(inout) :: x + dimension x(3) + real(sp), parameter :: three_s = 2._sp + 1._sp + real(dp), parameter :: three_d = 1._dp + 2._dp + integer(ii), parameter :: three_i = 2_ii + 1_ii + integer(il), parameter :: three_l = 1_il + 2_il + x(1) = x(1) + x(2) * three_s * three_i + x(3) * three_d * three_l + x(2) = x(2) * three_s + x(3) = x(3) * three_l + return +end subroutine diff --git a/local_packages/numpy/f2py/tests/src/parameter/constant_compound.f90 b/local_packages/numpy/f2py/tests/src/parameter/constant_compound.f90 new file mode 100644 index 0000000..e51f5e9 --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/parameter/constant_compound.f90 @@ -0,0 +1,15 @@ +! Check that parameters are correct intercepted. +! Constants with comma separations are commonly +! used, for instance Pi = 3._dp +subroutine foo_compound_int(x) + implicit none + integer, parameter :: ii = selected_int_kind(9) + integer(ii), intent(inout) :: x + dimension x(3) + integer(ii), parameter :: three = 3_ii + integer(ii), parameter :: two = 2_ii + integer(ii), parameter :: six = three * 1_ii * two + + x(1) = x(1) + x(2) + x(3) * six + return +end subroutine diff --git a/local_packages/numpy/f2py/tests/src/parameter/constant_integer.f90 b/local_packages/numpy/f2py/tests/src/parameter/constant_integer.f90 new file mode 100644 index 0000000..aaa83d2 --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/parameter/constant_integer.f90 @@ -0,0 +1,22 @@ +! Check that parameters are correct intercepted. +! Constants with comma separations are commonly +! used, for instance Pi = 3._dp +subroutine foo_int(x) + implicit none + integer, parameter :: ii = selected_int_kind(9) + integer(ii), intent(inout) :: x + dimension x(3) + integer(ii), parameter :: three = 3_ii + x(1) = x(1) + x(2) + x(3) * three + return +end subroutine + +subroutine foo_long(x) + implicit none + integer, parameter :: ii = selected_int_kind(18) + integer(ii), intent(inout) :: x + dimension x(3) + integer(ii), parameter :: three = 3_ii + x(1) = x(1) + x(2) + x(3) * three + return +end subroutine diff --git a/local_packages/numpy/f2py/tests/src/parameter/constant_non_compound.f90 b/local_packages/numpy/f2py/tests/src/parameter/constant_non_compound.f90 new file mode 100644 index 0000000..62c9a5b --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/parameter/constant_non_compound.f90 @@ -0,0 +1,23 @@ +! Check that parameters are correct intercepted. +! Specifically that types of constants without +! compound kind specs are correctly inferred +! adapted Gibbs iteration code from pymc +! for this test case +subroutine foo_non_compound_int(x) + implicit none + integer, parameter :: ii = selected_int_kind(9) + + integer(ii) maxiterates + parameter (maxiterates=2) + + integer(ii) maxseries + parameter (maxseries=2) + + integer(ii) wasize + parameter (wasize=maxiterates*maxseries) + integer(ii), intent(inout) :: x + dimension x(wasize) + + x(1) = x(1) + x(2) + x(3) + x(4) * wasize + return +end subroutine diff --git a/local_packages/numpy/f2py/tests/src/parameter/constant_real.f90 b/local_packages/numpy/f2py/tests/src/parameter/constant_real.f90 new file mode 100644 index 0000000..02ac9dd --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/parameter/constant_real.f90 @@ -0,0 +1,23 @@ +! Check that parameters are correct intercepted. +! Constants with comma separations are commonly +! used, for instance Pi = 3._dp +subroutine foo_single(x) + implicit none + integer, parameter :: rp = selected_real_kind(6) + real(rp), intent(inout) :: x + dimension x(3) + real(rp), parameter :: three = 3._rp + x(1) = x(1) + x(2) + x(3) * three + return +end subroutine + +subroutine foo_double(x) + implicit none + integer, parameter :: rp = selected_real_kind(15) + real(rp), intent(inout) :: x + dimension x(3) + real(rp), parameter :: three = 3._rp + x(1) = x(1) + x(2) + x(3) * three + return +end subroutine + diff --git a/local_packages/numpy/f2py/tests/src/quoted_character/foo.f b/local_packages/numpy/f2py/tests/src/quoted_character/foo.f new file mode 100644 index 0000000..9dc1cfa --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/quoted_character/foo.f @@ -0,0 +1,14 @@ + SUBROUTINE FOO(OUT1, OUT2, OUT3, OUT4, OUT5, OUT6) + CHARACTER SINGLE, DOUBLE, SEMICOL, EXCLA, OPENPAR, CLOSEPAR + PARAMETER (SINGLE="'", DOUBLE='"', SEMICOL=';', EXCLA="!", + 1 OPENPAR="(", CLOSEPAR=")") + CHARACTER OUT1, OUT2, OUT3, OUT4, OUT5, OUT6 +Cf2py intent(out) OUT1, OUT2, OUT3, OUT4, OUT5, OUT6 + OUT1 = SINGLE + OUT2 = DOUBLE + OUT3 = SEMICOL + OUT4 = EXCLA + OUT5 = OPENPAR + OUT6 = CLOSEPAR + RETURN + END diff --git a/local_packages/numpy/f2py/tests/src/regression/AB.inc b/local_packages/numpy/f2py/tests/src/regression/AB.inc new file mode 100644 index 0000000..8a02f63 --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/regression/AB.inc @@ -0,0 +1 @@ +real(8) b, n, m diff --git a/local_packages/numpy/f2py/tests/src/regression/assignOnlyModule.f90 b/local_packages/numpy/f2py/tests/src/regression/assignOnlyModule.f90 new file mode 100644 index 0000000..479ac79 --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/regression/assignOnlyModule.f90 @@ -0,0 +1,25 @@ + MODULE MOD_TYPES + INTEGER, PARAMETER :: SP = SELECTED_REAL_KIND(6, 37) + INTEGER, PARAMETER :: DP = SELECTED_REAL_KIND(15, 307) + END MODULE +! + MODULE F_GLOBALS + USE MOD_TYPES + IMPLICIT NONE + INTEGER, PARAMETER :: N_MAX = 16 + INTEGER, PARAMETER :: I_MAX = 18 + INTEGER, PARAMETER :: J_MAX = 72 + REAL(SP) :: XREF + END MODULE F_GLOBALS +! + SUBROUTINE DUMMY () +! + USE F_GLOBALS + USE MOD_TYPES + IMPLICIT NONE +! + REAL(SP) :: MINIMAL + MINIMAL = 0.01*XREF + RETURN +! + END SUBROUTINE DUMMY diff --git a/local_packages/numpy/f2py/tests/src/regression/datonly.f90 b/local_packages/numpy/f2py/tests/src/regression/datonly.f90 new file mode 100644 index 0000000..67fc4ac --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/regression/datonly.f90 @@ -0,0 +1,17 @@ +module datonly + implicit none + integer, parameter :: max_value = 100 + real, dimension(:), allocatable :: data_array +end module datonly + +module dat + implicit none + integer, parameter :: max_= 1009 +end module dat + +subroutine simple_subroutine(ain, aout) + use dat, only: max_ + integer, intent(in) :: ain + integer, intent(out) :: aout + aout = ain + max_ +end subroutine simple_subroutine diff --git a/local_packages/numpy/f2py/tests/src/regression/f77comments.f b/local_packages/numpy/f2py/tests/src/regression/f77comments.f new file mode 100644 index 0000000..452a01a --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/regression/f77comments.f @@ -0,0 +1,26 @@ + SUBROUTINE TESTSUB( + & INPUT1, INPUT2, !Input + & OUTPUT1, OUTPUT2) !Output + + IMPLICIT NONE + INTEGER, INTENT(IN) :: INPUT1, INPUT2 + INTEGER, INTENT(OUT) :: OUTPUT1, OUTPUT2 + + OUTPUT1 = INPUT1 + INPUT2 + OUTPUT2 = INPUT1 * INPUT2 + + RETURN + END SUBROUTINE TESTSUB + + SUBROUTINE TESTSUB2(OUTPUT) + IMPLICIT NONE + INTEGER, PARAMETER :: N = 10 ! Array dimension + REAL, INTENT(OUT) :: OUTPUT(N) + INTEGER :: I + + DO I = 1, N + OUTPUT(I) = I * 2.0 + END DO + + RETURN + END diff --git a/local_packages/numpy/f2py/tests/src/regression/f77fixedform.f95 b/local_packages/numpy/f2py/tests/src/regression/f77fixedform.f95 new file mode 100644 index 0000000..e47a13f --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/regression/f77fixedform.f95 @@ -0,0 +1,5 @@ +C This is an invalid file, but it does compile with -ffixed-form + subroutine mwe( + & x) + real x + end subroutine mwe diff --git a/local_packages/numpy/f2py/tests/src/regression/f90continuation.f90 b/local_packages/numpy/f2py/tests/src/regression/f90continuation.f90 new file mode 100644 index 0000000..879e716 --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/regression/f90continuation.f90 @@ -0,0 +1,9 @@ +SUBROUTINE TESTSUB(INPUT1, & ! Hello +! commenty +INPUT2, OUTPUT1, OUTPUT2) ! more comments + INTEGER, INTENT(IN) :: INPUT1, INPUT2 + INTEGER, INTENT(OUT) :: OUTPUT1, OUTPUT2 + OUTPUT1 = INPUT1 + & + INPUT2 + OUTPUT2 = INPUT1 * INPUT2 +END SUBROUTINE TESTSUB diff --git a/local_packages/numpy/f2py/tests/src/regression/incfile.f90 b/local_packages/numpy/f2py/tests/src/regression/incfile.f90 new file mode 100644 index 0000000..276ef3a --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/regression/incfile.f90 @@ -0,0 +1,5 @@ +function add(n,m) result(b) + implicit none + include 'AB.inc' + b = n + m +end function add diff --git a/local_packages/numpy/f2py/tests/src/regression/inout.f90 b/local_packages/numpy/f2py/tests/src/regression/inout.f90 new file mode 100644 index 0000000..80cdad9 --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/regression/inout.f90 @@ -0,0 +1,9 @@ +! Check that intent(in out) translates as intent(inout). +! The separation seems to be a common usage. + subroutine foo(x) + implicit none + real(4), intent(in out) :: x + dimension x(3) + x(1) = x(1) + x(2) + x(3) + return + end diff --git a/local_packages/numpy/f2py/tests/src/regression/lower_f2py_fortran.f90 b/local_packages/numpy/f2py/tests/src/regression/lower_f2py_fortran.f90 new file mode 100644 index 0000000..1c4b8c1 --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/regression/lower_f2py_fortran.f90 @@ -0,0 +1,5 @@ +subroutine inquire_next(IU) + IMPLICIT NONE + integer :: IU + !f2py intent(in) IU +end subroutine diff --git a/local_packages/numpy/f2py/tests/src/regression/mod_derived_types.f90 b/local_packages/numpy/f2py/tests/src/regression/mod_derived_types.f90 new file mode 100644 index 0000000..7692c82 --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/regression/mod_derived_types.f90 @@ -0,0 +1,23 @@ +module mtypes + implicit none + integer, parameter :: value1 = 100 + type :: master_data + integer :: idat = 200 + end type master_data + type(master_data) :: masterdata +end module mtypes + + +subroutine no_type_subroutine(ain, aout) + use mtypes, only: value1 + integer, intent(in) :: ain + integer, intent(out) :: aout + aout = ain + value1 +end subroutine no_type_subroutine + +subroutine type_subroutine(ain, aout) + use mtypes, only: masterdata + integer, intent(in) :: ain + integer, intent(out) :: aout + aout = ain + masterdata%idat +end subroutine type_subroutine \ No newline at end of file diff --git a/local_packages/numpy/f2py/tests/src/return_character/foo77.f b/local_packages/numpy/f2py/tests/src/return_character/foo77.f new file mode 100644 index 0000000..facae10 --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/return_character/foo77.f @@ -0,0 +1,45 @@ + function t0(value) + character value + character t0 + t0 = value + end + function t1(value) + character*1 value + character*1 t1 + t1 = value + end + function t5(value) + character*5 value + character*5 t5 + t5 = value + end + function ts(value) + character*(*) value + character*(*) ts + ts = value + end + + subroutine s0(t0,value) + character value + character t0 +cf2py intent(out) t0 + t0 = value + end + subroutine s1(t1,value) + character*1 value + character*1 t1 +cf2py intent(out) t1 + t1 = value + end + subroutine s5(t5,value) + character*5 value + character*5 t5 +cf2py intent(out) t5 + t5 = value + end + subroutine ss(ts,value) + character*(*) value + character*10 ts +cf2py intent(out) ts + ts = value + end diff --git a/local_packages/numpy/f2py/tests/src/return_character/foo90.f90 b/local_packages/numpy/f2py/tests/src/return_character/foo90.f90 new file mode 100644 index 0000000..36182bc --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/return_character/foo90.f90 @@ -0,0 +1,48 @@ +module f90_return_char + contains + function t0(value) + character :: value + character :: t0 + t0 = value + end function t0 + function t1(value) + character(len=1) :: value + character(len=1) :: t1 + t1 = value + end function t1 + function t5(value) + character(len=5) :: value + character(len=5) :: t5 + t5 = value + end function t5 + function ts(value) + character(len=*) :: value + character(len=10) :: ts + ts = value + end function ts + + subroutine s0(t0,value) + character :: value + character :: t0 +!f2py intent(out) t0 + t0 = value + end subroutine s0 + subroutine s1(t1,value) + character(len=1) :: value + character(len=1) :: t1 +!f2py intent(out) t1 + t1 = value + end subroutine s1 + subroutine s5(t5,value) + character(len=5) :: value + character(len=5) :: t5 +!f2py intent(out) t5 + t5 = value + end subroutine s5 + subroutine ss(ts,value) + character(len=*) :: value + character(len=10) :: ts +!f2py intent(out) ts + ts = value + end subroutine ss +end module f90_return_char diff --git a/local_packages/numpy/f2py/tests/src/return_complex/foo77.f b/local_packages/numpy/f2py/tests/src/return_complex/foo77.f new file mode 100644 index 0000000..37a1ec8 --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/return_complex/foo77.f @@ -0,0 +1,45 @@ + function t0(value) + complex value + complex t0 + t0 = value + end + function t8(value) + complex*8 value + complex*8 t8 + t8 = value + end + function t16(value) + complex*16 value + complex*16 t16 + t16 = value + end + function td(value) + double complex value + double complex td + td = value + end + + subroutine s0(t0,value) + complex value + complex t0 +cf2py intent(out) t0 + t0 = value + end + subroutine s8(t8,value) + complex*8 value + complex*8 t8 +cf2py intent(out) t8 + t8 = value + end + subroutine s16(t16,value) + complex*16 value + complex*16 t16 +cf2py intent(out) t16 + t16 = value + end + subroutine sd(td,value) + double complex value + double complex td +cf2py intent(out) td + td = value + end diff --git a/local_packages/numpy/f2py/tests/src/return_complex/foo90.f90 b/local_packages/numpy/f2py/tests/src/return_complex/foo90.f90 new file mode 100644 index 0000000..adc27b4 --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/return_complex/foo90.f90 @@ -0,0 +1,48 @@ +module f90_return_complex + contains + function t0(value) + complex :: value + complex :: t0 + t0 = value + end function t0 + function t8(value) + complex(kind=4) :: value + complex(kind=4) :: t8 + t8 = value + end function t8 + function t16(value) + complex(kind=8) :: value + complex(kind=8) :: t16 + t16 = value + end function t16 + function td(value) + double complex :: value + double complex :: td + td = value + end function td + + subroutine s0(t0,value) + complex :: value + complex :: t0 +!f2py intent(out) t0 + t0 = value + end subroutine s0 + subroutine s8(t8,value) + complex(kind=4) :: value + complex(kind=4) :: t8 +!f2py intent(out) t8 + t8 = value + end subroutine s8 + subroutine s16(t16,value) + complex(kind=8) :: value + complex(kind=8) :: t16 +!f2py intent(out) t16 + t16 = value + end subroutine s16 + subroutine sd(td,value) + double complex :: value + double complex :: td +!f2py intent(out) td + td = value + end subroutine sd +end module f90_return_complex diff --git a/local_packages/numpy/f2py/tests/src/return_integer/foo77.f b/local_packages/numpy/f2py/tests/src/return_integer/foo77.f new file mode 100644 index 0000000..1ab895b --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/return_integer/foo77.f @@ -0,0 +1,56 @@ + function t0(value) + integer value + integer t0 + t0 = value + end + function t1(value) + integer*1 value + integer*1 t1 + t1 = value + end + function t2(value) + integer*2 value + integer*2 t2 + t2 = value + end + function t4(value) + integer*4 value + integer*4 t4 + t4 = value + end + function t8(value) + integer*8 value + integer*8 t8 + t8 = value + end + + subroutine s0(t0,value) + integer value + integer t0 +cf2py intent(out) t0 + t0 = value + end + subroutine s1(t1,value) + integer*1 value + integer*1 t1 +cf2py intent(out) t1 + t1 = value + end + subroutine s2(t2,value) + integer*2 value + integer*2 t2 +cf2py intent(out) t2 + t2 = value + end + subroutine s4(t4,value) + integer*4 value + integer*4 t4 +cf2py intent(out) t4 + t4 = value + end + subroutine s8(t8,value) + integer*8 value + integer*8 t8 +cf2py intent(out) t8 + t8 = value + end diff --git a/local_packages/numpy/f2py/tests/src/return_integer/foo90.f90 b/local_packages/numpy/f2py/tests/src/return_integer/foo90.f90 new file mode 100644 index 0000000..ba9249a --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/return_integer/foo90.f90 @@ -0,0 +1,59 @@ +module f90_return_integer + contains + function t0(value) + integer :: value + integer :: t0 + t0 = value + end function t0 + function t1(value) + integer(kind=1) :: value + integer(kind=1) :: t1 + t1 = value + end function t1 + function t2(value) + integer(kind=2) :: value + integer(kind=2) :: t2 + t2 = value + end function t2 + function t4(value) + integer(kind=4) :: value + integer(kind=4) :: t4 + t4 = value + end function t4 + function t8(value) + integer(kind=8) :: value + integer(kind=8) :: t8 + t8 = value + end function t8 + + subroutine s0(t0,value) + integer :: value + integer :: t0 +!f2py intent(out) t0 + t0 = value + end subroutine s0 + subroutine s1(t1,value) + integer(kind=1) :: value + integer(kind=1) :: t1 +!f2py intent(out) t1 + t1 = value + end subroutine s1 + subroutine s2(t2,value) + integer(kind=2) :: value + integer(kind=2) :: t2 +!f2py intent(out) t2 + t2 = value + end subroutine s2 + subroutine s4(t4,value) + integer(kind=4) :: value + integer(kind=4) :: t4 +!f2py intent(out) t4 + t4 = value + end subroutine s4 + subroutine s8(t8,value) + integer(kind=8) :: value + integer(kind=8) :: t8 +!f2py intent(out) t8 + t8 = value + end subroutine s8 +end module f90_return_integer diff --git a/local_packages/numpy/f2py/tests/src/return_logical/foo77.f b/local_packages/numpy/f2py/tests/src/return_logical/foo77.f new file mode 100644 index 0000000..ef53014 --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/return_logical/foo77.f @@ -0,0 +1,56 @@ + function t0(value) + logical value + logical t0 + t0 = value + end + function t1(value) + logical*1 value + logical*1 t1 + t1 = value + end + function t2(value) + logical*2 value + logical*2 t2 + t2 = value + end + function t4(value) + logical*4 value + logical*4 t4 + t4 = value + end +c function t8(value) +c logical*8 value +c logical*8 t8 +c t8 = value +c end + + subroutine s0(t0,value) + logical value + logical t0 +cf2py intent(out) t0 + t0 = value + end + subroutine s1(t1,value) + logical*1 value + logical*1 t1 +cf2py intent(out) t1 + t1 = value + end + subroutine s2(t2,value) + logical*2 value + logical*2 t2 +cf2py intent(out) t2 + t2 = value + end + subroutine s4(t4,value) + logical*4 value + logical*4 t4 +cf2py intent(out) t4 + t4 = value + end +c subroutine s8(t8,value) +c logical*8 value +c logical*8 t8 +cf2py intent(out) t8 +c t8 = value +c end diff --git a/local_packages/numpy/f2py/tests/src/return_logical/foo90.f90 b/local_packages/numpy/f2py/tests/src/return_logical/foo90.f90 new file mode 100644 index 0000000..a452646 --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/return_logical/foo90.f90 @@ -0,0 +1,59 @@ +module f90_return_logical + contains + function t0(value) + logical :: value + logical :: t0 + t0 = value + end function t0 + function t1(value) + logical(kind=1) :: value + logical(kind=1) :: t1 + t1 = value + end function t1 + function t2(value) + logical(kind=2) :: value + logical(kind=2) :: t2 + t2 = value + end function t2 + function t4(value) + logical(kind=4) :: value + logical(kind=4) :: t4 + t4 = value + end function t4 + function t8(value) + logical(kind=8) :: value + logical(kind=8) :: t8 + t8 = value + end function t8 + + subroutine s0(t0,value) + logical :: value + logical :: t0 +!f2py intent(out) t0 + t0 = value + end subroutine s0 + subroutine s1(t1,value) + logical(kind=1) :: value + logical(kind=1) :: t1 +!f2py intent(out) t1 + t1 = value + end subroutine s1 + subroutine s2(t2,value) + logical(kind=2) :: value + logical(kind=2) :: t2 +!f2py intent(out) t2 + t2 = value + end subroutine s2 + subroutine s4(t4,value) + logical(kind=4) :: value + logical(kind=4) :: t4 +!f2py intent(out) t4 + t4 = value + end subroutine s4 + subroutine s8(t8,value) + logical(kind=8) :: value + logical(kind=8) :: t8 +!f2py intent(out) t8 + t8 = value + end subroutine s8 +end module f90_return_logical diff --git a/local_packages/numpy/f2py/tests/src/return_real/foo77.f b/local_packages/numpy/f2py/tests/src/return_real/foo77.f new file mode 100644 index 0000000..bf43dbf --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/return_real/foo77.f @@ -0,0 +1,45 @@ + function t0(value) + real value + real t0 + t0 = value + end + function t4(value) + real*4 value + real*4 t4 + t4 = value + end + function t8(value) + real*8 value + real*8 t8 + t8 = value + end + function td(value) + double precision value + double precision td + td = value + end + + subroutine s0(t0,value) + real value + real t0 +cf2py intent(out) t0 + t0 = value + end + subroutine s4(t4,value) + real*4 value + real*4 t4 +cf2py intent(out) t4 + t4 = value + end + subroutine s8(t8,value) + real*8 value + real*8 t8 +cf2py intent(out) t8 + t8 = value + end + subroutine sd(td,value) + double precision value + double precision td +cf2py intent(out) td + td = value + end diff --git a/local_packages/numpy/f2py/tests/src/return_real/foo90.f90 b/local_packages/numpy/f2py/tests/src/return_real/foo90.f90 new file mode 100644 index 0000000..df97199 --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/return_real/foo90.f90 @@ -0,0 +1,48 @@ +module f90_return_real + contains + function t0(value) + real :: value + real :: t0 + t0 = value + end function t0 + function t4(value) + real(kind=4) :: value + real(kind=4) :: t4 + t4 = value + end function t4 + function t8(value) + real(kind=8) :: value + real(kind=8) :: t8 + t8 = value + end function t8 + function td(value) + double precision :: value + double precision :: td + td = value + end function td + + subroutine s0(t0,value) + real :: value + real :: t0 +!f2py intent(out) t0 + t0 = value + end subroutine s0 + subroutine s4(t4,value) + real(kind=4) :: value + real(kind=4) :: t4 +!f2py intent(out) t4 + t4 = value + end subroutine s4 + subroutine s8(t8,value) + real(kind=8) :: value + real(kind=8) :: t8 +!f2py intent(out) t8 + t8 = value + end subroutine s8 + subroutine sd(td,value) + double precision :: value + double precision :: td +!f2py intent(out) td + td = value + end subroutine sd +end module f90_return_real diff --git a/local_packages/numpy/f2py/tests/src/routines/funcfortranname.f b/local_packages/numpy/f2py/tests/src/routines/funcfortranname.f new file mode 100644 index 0000000..89be972 --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/routines/funcfortranname.f @@ -0,0 +1,5 @@ + REAL*8 FUNCTION FUNCFORTRANNAME(A,B) + REAL*8 A, B + FUNCFORTRANNAME = A + B + RETURN + END FUNCTION diff --git a/local_packages/numpy/f2py/tests/src/routines/funcfortranname.pyf b/local_packages/numpy/f2py/tests/src/routines/funcfortranname.pyf new file mode 100644 index 0000000..8730ca6 --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/routines/funcfortranname.pyf @@ -0,0 +1,11 @@ +python module funcfortranname ! in + interface ! in :funcfortranname + function funcfortranname_default(a,b) ! in :funcfortranname:funcfortranname.f + fortranname funcfortranname + real*8 :: a + real*8 :: b + real*8 :: funcfortranname_default + real*8, intent(out) :: funcfortranname + end function funcfortranname_default + end interface +end python module funcfortranname diff --git a/local_packages/numpy/f2py/tests/src/routines/subrout.f b/local_packages/numpy/f2py/tests/src/routines/subrout.f new file mode 100644 index 0000000..1d1eeae --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/routines/subrout.f @@ -0,0 +1,4 @@ + SUBROUTINE SUBROUT(A,B,C) + REAL*8 A, B, C + C = A + B + END SUBROUTINE diff --git a/local_packages/numpy/f2py/tests/src/routines/subrout.pyf b/local_packages/numpy/f2py/tests/src/routines/subrout.pyf new file mode 100644 index 0000000..e27cbe1 --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/routines/subrout.pyf @@ -0,0 +1,10 @@ +python module subrout ! in + interface ! in :subrout + subroutine subrout_default(a,b,c) ! in :subrout:subrout.f + fortranname subrout + real*8 :: a + real*8 :: b + real*8, intent(out) :: c + end subroutine subrout_default + end interface +end python module subrout diff --git a/local_packages/numpy/f2py/tests/src/size/foo.f90 b/local_packages/numpy/f2py/tests/src/size/foo.f90 new file mode 100644 index 0000000..5b66f8c --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/size/foo.f90 @@ -0,0 +1,44 @@ + +subroutine foo(a, n, m, b) + implicit none + + real, intent(in) :: a(n, m) + integer, intent(in) :: n, m + real, intent(out) :: b(size(a, 1)) + + integer :: i + + do i = 1, size(b) + b(i) = sum(a(i,:)) + enddo +end subroutine + +subroutine trans(x,y) + implicit none + real, intent(in), dimension(:,:) :: x + real, intent(out), dimension( size(x,2), size(x,1) ) :: y + integer :: N, M, i, j + N = size(x,1) + M = size(x,2) + DO i=1,N + do j=1,M + y(j,i) = x(i,j) + END DO + END DO +end subroutine trans + +subroutine flatten(x,y) + implicit none + real, intent(in), dimension(:,:) :: x + real, intent(out), dimension( size(x) ) :: y + integer :: N, M, i, j, k + N = size(x,1) + M = size(x,2) + k = 1 + DO i=1,N + do j=1,M + y(k) = x(i,j) + k = k + 1 + END DO + END DO +end subroutine flatten diff --git a/local_packages/numpy/f2py/tests/src/string/char.f90 b/local_packages/numpy/f2py/tests/src/string/char.f90 new file mode 100644 index 0000000..bb7985c --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/string/char.f90 @@ -0,0 +1,29 @@ +MODULE char_test + +CONTAINS + +SUBROUTINE change_strings(strings, n_strs, out_strings) + IMPLICIT NONE + + ! Inputs + INTEGER, INTENT(IN) :: n_strs + CHARACTER, INTENT(IN), DIMENSION(2,n_strs) :: strings + CHARACTER, INTENT(OUT), DIMENSION(2,n_strs) :: out_strings + +!f2py INTEGER, INTENT(IN) :: n_strs +!f2py CHARACTER, INTENT(IN), DIMENSION(2,n_strs) :: strings +!f2py CHARACTER, INTENT(OUT), DIMENSION(2,n_strs) :: strings + + ! Misc. + INTEGER*4 :: j + + + DO j=1, n_strs + out_strings(1,j) = strings(1,j) + out_strings(2,j) = 'A' + END DO + +END SUBROUTINE change_strings + +END MODULE char_test + diff --git a/local_packages/numpy/f2py/tests/src/string/fixed_string.f90 b/local_packages/numpy/f2py/tests/src/string/fixed_string.f90 new file mode 100644 index 0000000..7fd1585 --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/string/fixed_string.f90 @@ -0,0 +1,34 @@ +function sint(s) result(i) + implicit none + character(len=*) :: s + integer :: j, i + i = 0 + do j=len(s), 1, -1 + if (.not.((i.eq.0).and.(s(j:j).eq.' '))) then + i = i + ichar(s(j:j)) * 10 ** (j - 1) + endif + end do + return + end function sint + + function test_in_bytes4(a) result (i) + implicit none + integer :: sint + character(len=4) :: a + integer :: i + i = sint(a) + a(1:1) = 'A' + return + end function test_in_bytes4 + + function test_inout_bytes4(a) result (i) + implicit none + integer :: sint + character(len=4), intent(inout) :: a + integer :: i + if (a(1:1).ne.' ') then + a(1:1) = 'E' + endif + i = sint(a) + return + end function test_inout_bytes4 diff --git a/local_packages/numpy/f2py/tests/src/string/gh24008.f b/local_packages/numpy/f2py/tests/src/string/gh24008.f new file mode 100644 index 0000000..ab64cf7 --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/string/gh24008.f @@ -0,0 +1,8 @@ + SUBROUTINE GREET(NAME, GREETING) + CHARACTER NAME*(*), GREETING*(*) + CHARACTER*(50) MESSAGE + + MESSAGE = 'Hello, ' // NAME // ', ' // GREETING +c$$$ PRINT *, MESSAGE + + END SUBROUTINE GREET diff --git a/local_packages/numpy/f2py/tests/src/string/gh24662.f90 b/local_packages/numpy/f2py/tests/src/string/gh24662.f90 new file mode 100644 index 0000000..ca53413 --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/string/gh24662.f90 @@ -0,0 +1,7 @@ +subroutine string_inout_optional(output) + implicit none + character*(32), optional, intent(inout) :: output + if (present(output)) then + output="output string" + endif +end subroutine diff --git a/local_packages/numpy/f2py/tests/src/string/gh25286.f90 b/local_packages/numpy/f2py/tests/src/string/gh25286.f90 new file mode 100644 index 0000000..db1c710 --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/string/gh25286.f90 @@ -0,0 +1,14 @@ +subroutine charint(trans, info) + character, intent(in) :: trans + integer, intent(out) :: info + if (trans == 'N') then + info = 1 + else if (trans == 'T') then + info = 2 + else if (trans == 'C') then + info = 3 + else + info = -1 + end if + +end subroutine charint diff --git a/local_packages/numpy/f2py/tests/src/string/gh25286.pyf b/local_packages/numpy/f2py/tests/src/string/gh25286.pyf new file mode 100644 index 0000000..7b96090 --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/string/gh25286.pyf @@ -0,0 +1,12 @@ +python module _char_handling_test + interface + subroutine charint(trans, info) + callstatement (*f2py_func)(&trans, &info) + callprotoargument char*, int* + + character, intent(in), check(trans=='N'||trans=='T'||trans=='C') :: trans = 'N' + integer intent(out) :: info + + end subroutine charint + end interface +end python module _char_handling_test diff --git a/local_packages/numpy/f2py/tests/src/string/gh25286_bc.pyf b/local_packages/numpy/f2py/tests/src/string/gh25286_bc.pyf new file mode 100644 index 0000000..e7b10fa --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/string/gh25286_bc.pyf @@ -0,0 +1,12 @@ +python module _char_handling_test + interface + subroutine charint(trans, info) + callstatement (*f2py_func)(&trans, &info) + callprotoargument char*, int* + + character, intent(in), check(*trans=='N'||*trans=='T'||*trans=='C') :: trans = 'N' + integer intent(out) :: info + + end subroutine charint + end interface +end python module _char_handling_test diff --git a/local_packages/numpy/f2py/tests/src/string/scalar_string.f90 b/local_packages/numpy/f2py/tests/src/string/scalar_string.f90 new file mode 100644 index 0000000..f8f0761 --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/string/scalar_string.f90 @@ -0,0 +1,9 @@ +MODULE string_test + + character(len=8) :: string + character string77 * 8 + + character(len=12), dimension(5,7) :: strarr + character strarr77(5,7) * 12 + +END MODULE string_test diff --git a/local_packages/numpy/f2py/tests/src/string/string.f b/local_packages/numpy/f2py/tests/src/string/string.f new file mode 100644 index 0000000..5210ca4 --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/string/string.f @@ -0,0 +1,12 @@ +C FILE: STRING.F + SUBROUTINE FOO(A,B,C,D) + CHARACTER*5 A, B + CHARACTER*(*) C,D +Cf2py intent(in) a,c +Cf2py intent(inout) b,d + A(1:1) = 'A' + B(1:1) = 'B' + C(1:1) = 'C' + D(1:1) = 'D' + END +C END OF FILE STRING.F diff --git a/local_packages/numpy/f2py/tests/src/value_attrspec/gh21665.f90 b/local_packages/numpy/f2py/tests/src/value_attrspec/gh21665.f90 new file mode 100644 index 0000000..7d9dc0f --- /dev/null +++ b/local_packages/numpy/f2py/tests/src/value_attrspec/gh21665.f90 @@ -0,0 +1,9 @@ +module fortfuncs + implicit none +contains + subroutine square(x,y) + integer, intent(in), value :: x + integer, intent(out) :: y + y = x*x + end subroutine square +end module fortfuncs diff --git a/local_packages/numpy/f2py/tests/test_abstract_interface.py b/local_packages/numpy/f2py/tests/test_abstract_interface.py new file mode 100644 index 0000000..21e77db --- /dev/null +++ b/local_packages/numpy/f2py/tests/test_abstract_interface.py @@ -0,0 +1,26 @@ +import pytest + +from numpy.f2py import crackfortran +from numpy.testing import IS_WASM + +from . import util + + +@pytest.mark.skipif(IS_WASM, reason="Cannot start subprocess") +@pytest.mark.slow +class TestAbstractInterface(util.F2PyTest): + sources = [util.getpath("tests", "src", "abstract_interface", "foo.f90")] + + skip = ["add1", "add2"] + + def test_abstract_interface(self): + assert self.module.ops_module.foo(3, 5) == (8, 13) + + def test_parse_abstract_interface(self): + # Test gh18403 + fpath = util.getpath("tests", "src", "abstract_interface", + "gh18403_mod.f90") + mod = crackfortran.crackfortran([str(fpath)]) + assert len(mod) == 1 + assert len(mod[0]["body"]) == 1 + assert mod[0]["body"][0]["block"] == "abstract interface" diff --git a/local_packages/numpy/f2py/tests/test_array_from_pyobj.py b/local_packages/numpy/f2py/tests/test_array_from_pyobj.py new file mode 100644 index 0000000..15383e9 --- /dev/null +++ b/local_packages/numpy/f2py/tests/test_array_from_pyobj.py @@ -0,0 +1,678 @@ +import copy +import platform +import sys +from pathlib import Path + +import pytest + +import numpy as np +from numpy._core._type_aliases import c_names_dict as _c_names_dict + +from . import util + +wrap = None + +# Extend core typeinfo with CHARACTER to test dtype('c') +c_names_dict = dict( + CHARACTER=np.dtype("c"), + **_c_names_dict +) + + +def get_testdir(): + testroot = Path(__file__).resolve().parent / "src" + return testroot / "array_from_pyobj" + +def setup_module(): + """ + Build the required testing extension module + + """ + global wrap + + if wrap is None: + src = [ + get_testdir() / "wrapmodule.c", + ] + wrap = util.build_meson(src, module_name="test_array_from_pyobj_ext") + + +def flags_info(arr): + flags = wrap.array_attrs(arr)[6] + return flags2names(flags) + + +def flags2names(flags): + info = [] + for flagname in [ + "CONTIGUOUS", + "FORTRAN", + "OWNDATA", + "ENSURECOPY", + "ENSUREARRAY", + "ALIGNED", + "NOTSWAPPED", + "WRITEABLE", + "WRITEBACKIFCOPY", + "UPDATEIFCOPY", + "BEHAVED", + "BEHAVED_RO", + "CARRAY", + "FARRAY", + ]: + if abs(flags) & getattr(wrap, flagname, 0): + info.append(flagname) + return info + + +class Intent: + def __init__(self, intent_list=[]): + self.intent_list = intent_list[:] + flags = 0 + for i in intent_list: + if i == "optional": + flags |= wrap.F2PY_OPTIONAL + else: + flags |= getattr(wrap, "F2PY_INTENT_" + i.upper()) + self.flags = flags + + def __getattr__(self, name): + name = name.lower() + if name == "in_": + name = "in" + return self.__class__(self.intent_list + [name]) + + def __str__(self): + return f"intent({','.join(self.intent_list)})" + + def __repr__(self): + return f"Intent({self.intent_list!r})" + + def is_intent(self, *names): + return all(name in self.intent_list for name in names) + + def is_intent_exact(self, *names): + return len(self.intent_list) == len(names) and self.is_intent(*names) + + +intent = Intent() + +_type_names = [ + "BOOL", + "BYTE", + "UBYTE", + "SHORT", + "USHORT", + "INT", + "UINT", + "LONG", + "ULONG", + "LONGLONG", + "ULONGLONG", + "FLOAT", + "DOUBLE", + "CFLOAT", + "STRING1", + "STRING5", + "CHARACTER", +] + +_cast_dict = {"BOOL": ["BOOL"]} +_cast_dict["BYTE"] = _cast_dict["BOOL"] + ["BYTE"] +_cast_dict["UBYTE"] = _cast_dict["BOOL"] + ["UBYTE"] +_cast_dict["BYTE"] = ["BYTE"] +_cast_dict["UBYTE"] = ["UBYTE"] +_cast_dict["SHORT"] = _cast_dict["BYTE"] + ["UBYTE", "SHORT"] +_cast_dict["USHORT"] = _cast_dict["UBYTE"] + ["BYTE", "USHORT"] +_cast_dict["INT"] = _cast_dict["SHORT"] + ["USHORT", "INT"] +_cast_dict["UINT"] = _cast_dict["USHORT"] + ["SHORT", "UINT"] + +_cast_dict["LONG"] = _cast_dict["INT"] + ["LONG"] +_cast_dict["ULONG"] = _cast_dict["UINT"] + ["ULONG"] + +_cast_dict["LONGLONG"] = _cast_dict["LONG"] + ["LONGLONG"] +_cast_dict["ULONGLONG"] = _cast_dict["ULONG"] + ["ULONGLONG"] + +_cast_dict["FLOAT"] = _cast_dict["SHORT"] + ["USHORT", "FLOAT"] +_cast_dict["DOUBLE"] = _cast_dict["INT"] + ["UINT", "FLOAT", "DOUBLE"] + +_cast_dict["CFLOAT"] = _cast_dict["FLOAT"] + ["CFLOAT"] + +_cast_dict['STRING1'] = ['STRING1'] +_cast_dict['STRING5'] = ['STRING5'] +_cast_dict['CHARACTER'] = ['CHARACTER'] + +# 32 bit system malloc typically does not provide the alignment required by +# 16 byte long double types this means the inout intent cannot be satisfied +# and several tests fail as the alignment flag can be randomly true or false +# when numpy gains an aligned allocator the tests could be enabled again +# +# Furthermore, on macOS ARM64 and AIX, LONGDOUBLE is an alias for DOUBLE. +if ((np.intp().dtype.itemsize != 4 or np.clongdouble().dtype.alignment <= 8) + and sys.platform not in ["win32", "aix"] + and (platform.system(), platform.processor()) != ("Darwin", "arm")): + _type_names.extend(["LONGDOUBLE", "CDOUBLE", "CLONGDOUBLE"]) + _cast_dict["LONGDOUBLE"] = _cast_dict["LONG"] + [ + "ULONG", + "FLOAT", + "DOUBLE", + "LONGDOUBLE", + ] + _cast_dict["CLONGDOUBLE"] = _cast_dict["LONGDOUBLE"] + [ + "CFLOAT", + "CDOUBLE", + "CLONGDOUBLE", + ] + _cast_dict["CDOUBLE"] = _cast_dict["DOUBLE"] + ["CFLOAT", "CDOUBLE"] + + +class Type: + _type_cache = {} + + def __new__(cls, name): + if isinstance(name, np.dtype): + dtype0 = name + name = None + for n, i in c_names_dict.items(): + if not isinstance(i, type) and dtype0.type is i.type: + name = n + break + obj = cls._type_cache.get(name.upper(), None) + if obj is not None: + return obj + obj = object.__new__(cls) + obj._init(name) + cls._type_cache[name.upper()] = obj + return obj + + def _init(self, name): + self.NAME = name.upper() + + if self.NAME == 'CHARACTER': + info = c_names_dict[self.NAME] + self.type_num = wrap.NPY_STRING + self.elsize = 1 + self.dtype = np.dtype('c') + elif self.NAME.startswith('STRING'): + info = c_names_dict[self.NAME[:6]] + self.type_num = wrap.NPY_STRING + self.elsize = int(self.NAME[6:] or 0) + self.dtype = np.dtype(f'S{self.elsize}') + else: + info = c_names_dict[self.NAME] + self.type_num = getattr(wrap, 'NPY_' + self.NAME) + self.elsize = info.itemsize + self.dtype = np.dtype(info.type) + + assert self.type_num == info.num + self.type = info.type + self.dtypechar = info.char + + def __repr__(self): + return (f"Type({self.NAME})|type_num={self.type_num}," + f" dtype={self.dtype}," + f" type={self.type}, elsize={self.elsize}," + f" dtypechar={self.dtypechar}") + + def cast_types(self): + return [self.__class__(_m) for _m in _cast_dict[self.NAME]] + + def all_types(self): + return [self.__class__(_m) for _m in _type_names] + + def smaller_types(self): + bits = c_names_dict[self.NAME].alignment + types = [] + for name in _type_names: + if c_names_dict[name].alignment < bits: + types.append(Type(name)) + return types + + def equal_types(self): + bits = c_names_dict[self.NAME].alignment + types = [] + for name in _type_names: + if name == self.NAME: + continue + if c_names_dict[name].alignment == bits: + types.append(Type(name)) + return types + + def larger_types(self): + bits = c_names_dict[self.NAME].alignment + types = [] + for name in _type_names: + if c_names_dict[name].alignment > bits: + types.append(Type(name)) + return types + + +class Array: + + def __repr__(self): + return (f'Array({self.type}, {self.dims}, {self.intent},' + f' {self.obj})|arr={self.arr}') + + def __init__(self, typ, dims, intent, obj): + self.type = typ + self.dims = dims + self.intent = intent + self.obj_copy = copy.deepcopy(obj) + self.obj = obj + + # arr.dtypechar may be different from typ.dtypechar + self.arr = wrap.call(typ.type_num, + typ.elsize, + dims, intent.flags, obj) + + assert isinstance(self.arr, np.ndarray) + + self.arr_attr = wrap.array_attrs(self.arr) + + if len(dims) > 1: + if self.intent.is_intent("c"): + assert (intent.flags & wrap.F2PY_INTENT_C) + assert not self.arr.flags["FORTRAN"] + assert self.arr.flags["CONTIGUOUS"] + assert (not self.arr_attr[6] & wrap.FORTRAN) + else: + assert (not intent.flags & wrap.F2PY_INTENT_C) + assert self.arr.flags["FORTRAN"] + assert not self.arr.flags["CONTIGUOUS"] + assert (self.arr_attr[6] & wrap.FORTRAN) + + if obj is None: + self.pyarr = None + self.pyarr_attr = None + return + + if intent.is_intent("cache"): + assert isinstance(obj, np.ndarray), repr(type(obj)) + self.pyarr = np.array(obj).reshape(*dims).copy() + else: + self.pyarr = np.array( + np.array(obj, dtype=typ.dtypechar).reshape(*dims), + order=(self.intent.is_intent("c") and "C") or "F", + ) + assert self.pyarr.dtype == typ + self.pyarr.setflags(write=self.arr.flags["WRITEABLE"]) + assert self.pyarr.flags["OWNDATA"], (obj, intent) + self.pyarr_attr = wrap.array_attrs(self.pyarr) + + if len(dims) > 1: + if self.intent.is_intent("c"): + assert not self.pyarr.flags["FORTRAN"] + assert self.pyarr.flags["CONTIGUOUS"] + assert (not self.pyarr_attr[6] & wrap.FORTRAN) + else: + assert self.pyarr.flags["FORTRAN"] + assert not self.pyarr.flags["CONTIGUOUS"] + assert (self.pyarr_attr[6] & wrap.FORTRAN) + + assert self.arr_attr[1] == self.pyarr_attr[1] # nd + assert self.arr_attr[2] == self.pyarr_attr[2] # dimensions + if self.arr_attr[1] <= 1: + assert self.arr_attr[3] == self.pyarr_attr[3], repr(( + self.arr_attr[3], + self.pyarr_attr[3], + self.arr.tobytes(), + self.pyarr.tobytes(), + )) # strides + assert self.arr_attr[5][-2:] == self.pyarr_attr[5][-2:], repr(( + self.arr_attr[5], self.pyarr_attr[5] + )) # descr + assert self.arr_attr[6] == self.pyarr_attr[6], repr(( + self.arr_attr[6], + self.pyarr_attr[6], + flags2names(0 * self.arr_attr[6] - self.pyarr_attr[6]), + flags2names(self.arr_attr[6]), + intent, + )) # flags + + if intent.is_intent("cache"): + assert self.arr_attr[5][3] >= self.type.elsize + else: + assert self.arr_attr[5][3] == self.type.elsize + assert (self.arr_equal(self.pyarr, self.arr)) + + if isinstance(self.obj, np.ndarray): + if typ.elsize == Type(obj.dtype).elsize: + if not intent.is_intent("copy") and self.arr_attr[1] <= 1: + assert self.has_shared_memory() + + def arr_equal(self, arr1, arr2): + if arr1.shape != arr2.shape: + return False + return (arr1 == arr2).all() + + def __str__(self): + return str(self.arr) + + def has_shared_memory(self): + """Check that created array shares data with input array.""" + if self.obj is self.arr: + return True + if not isinstance(self.obj, np.ndarray): + return False + obj_attr = wrap.array_attrs(self.obj) + return obj_attr[0] == self.arr_attr[0] + + +class TestIntent: + def test_in_out(self): + assert str(intent.in_.out) == "intent(in,out)" + assert intent.in_.c.is_intent("c") + assert not intent.in_.c.is_intent_exact("c") + assert intent.in_.c.is_intent_exact("c", "in") + assert intent.in_.c.is_intent_exact("in", "c") + assert not intent.in_.is_intent("c") + + +class TestSharedMemory: + + @pytest.fixture(autouse=True, scope="class", params=_type_names) + def setup_type(self, request): + request.cls.type = Type(request.param) + request.cls.array = lambda self, dims, intent, obj: Array( + Type(request.param), dims, intent, obj) + + @property + def num2seq(self): + if self.type.NAME.startswith('STRING'): + elsize = self.type.elsize + return ['1' * elsize, '2' * elsize] + return [1, 2] + + @property + def num23seq(self): + if self.type.NAME.startswith('STRING'): + elsize = self.type.elsize + return [['1' * elsize, '2' * elsize, '3' * elsize], + ['4' * elsize, '5' * elsize, '6' * elsize]] + return [[1, 2, 3], [4, 5, 6]] + + def test_in_from_2seq(self): + a = self.array([2], intent.in_, self.num2seq) + assert not a.has_shared_memory() + + def test_in_from_2casttype(self): + for t in self.type.cast_types(): + obj = np.array(self.num2seq, dtype=t.dtype) + a = self.array([len(self.num2seq)], intent.in_, obj) + if t.elsize == self.type.elsize: + assert a.has_shared_memory(), repr((self.type.dtype, t.dtype)) + else: + assert not a.has_shared_memory() + + @pytest.mark.parametrize("write", ["w", "ro"]) + @pytest.mark.parametrize("order", ["C", "F"]) + @pytest.mark.parametrize("inp", ["2seq", "23seq"]) + def test_in_nocopy(self, write, order, inp): + """Test if intent(in) array can be passed without copies""" + seq = getattr(self, "num" + inp) + obj = np.array(seq, dtype=self.type.dtype, order=order) + obj.setflags(write=(write == 'w')) + a = self.array(obj.shape, + ((order == 'C' and intent.in_.c) or intent.in_), obj) + assert a.has_shared_memory() + + def test_inout_2seq(self): + obj = np.array(self.num2seq, dtype=self.type.dtype) + a = self.array([len(self.num2seq)], intent.inout, obj) + assert a.has_shared_memory() + + try: + a = self.array([2], intent.in_.inout, self.num2seq) + except TypeError as msg: + if not str(msg).startswith( + "failed to initialize intent(inout|inplace|cache) array"): + raise + else: + raise SystemError("intent(inout) should have failed on sequence") + + def test_f_inout_23seq(self): + obj = np.array(self.num23seq, dtype=self.type.dtype, order="F") + shape = (len(self.num23seq), len(self.num23seq[0])) + a = self.array(shape, intent.in_.inout, obj) + assert a.has_shared_memory() + + obj = np.array(self.num23seq, dtype=self.type.dtype, order="C") + shape = (len(self.num23seq), len(self.num23seq[0])) + try: + a = self.array(shape, intent.in_.inout, obj) + except ValueError as msg: + if not str(msg).startswith( + "failed to initialize intent(inout) array"): + raise + else: + raise SystemError( + "intent(inout) should have failed on improper array") + + def test_c_inout_23seq(self): + obj = np.array(self.num23seq, dtype=self.type.dtype) + shape = (len(self.num23seq), len(self.num23seq[0])) + a = self.array(shape, intent.in_.c.inout, obj) + assert a.has_shared_memory() + + def test_in_copy_from_2casttype(self): + for t in self.type.cast_types(): + obj = np.array(self.num2seq, dtype=t.dtype) + a = self.array([len(self.num2seq)], intent.in_.copy, obj) + assert not a.has_shared_memory() + + def test_c_in_from_23seq(self): + a = self.array( + [len(self.num23seq), len(self.num23seq[0])], intent.in_, + self.num23seq) + assert not a.has_shared_memory() + + def test_in_from_23casttype(self): + for t in self.type.cast_types(): + obj = np.array(self.num23seq, dtype=t.dtype) + a = self.array( + [len(self.num23seq), len(self.num23seq[0])], intent.in_, obj) + assert not a.has_shared_memory() + + def test_f_in_from_23casttype(self): + for t in self.type.cast_types(): + obj = np.array(self.num23seq, dtype=t.dtype, order="F") + a = self.array( + [len(self.num23seq), len(self.num23seq[0])], intent.in_, obj) + if t.elsize == self.type.elsize: + assert a.has_shared_memory() + else: + assert not a.has_shared_memory() + + def test_c_in_from_23casttype(self): + for t in self.type.cast_types(): + obj = np.array(self.num23seq, dtype=t.dtype) + a = self.array( + [len(self.num23seq), len(self.num23seq[0])], intent.in_.c, obj) + if t.elsize == self.type.elsize: + assert a.has_shared_memory() + else: + assert not a.has_shared_memory() + + def test_f_copy_in_from_23casttype(self): + for t in self.type.cast_types(): + obj = np.array(self.num23seq, dtype=t.dtype, order="F") + a = self.array( + [len(self.num23seq), len(self.num23seq[0])], intent.in_.copy, + obj) + assert not a.has_shared_memory() + + def test_c_copy_in_from_23casttype(self): + for t in self.type.cast_types(): + obj = np.array(self.num23seq, dtype=t.dtype) + a = self.array( + [len(self.num23seq), len(self.num23seq[0])], intent.in_.c.copy, + obj) + assert not a.has_shared_memory() + + def test_in_cache_from_2casttype(self): + for t in self.type.all_types(): + if t.elsize != self.type.elsize: + continue + obj = np.array(self.num2seq, dtype=t.dtype) + shape = (len(self.num2seq), ) + a = self.array(shape, intent.in_.c.cache, obj) + assert a.has_shared_memory() + + a = self.array(shape, intent.in_.cache, obj) + assert a.has_shared_memory() + + obj = np.array(self.num2seq, dtype=t.dtype, order="F") + a = self.array(shape, intent.in_.c.cache, obj) + assert a.has_shared_memory() + + a = self.array(shape, intent.in_.cache, obj) + assert a.has_shared_memory(), repr(t.dtype) + + try: + a = self.array(shape, intent.in_.cache, obj[::-1]) + except ValueError as msg: + if not str(msg).startswith( + "failed to initialize intent(cache) array"): + raise + else: + raise SystemError( + "intent(cache) should have failed on multisegmented array") + + def test_in_cache_from_2casttype_failure(self): + for t in self.type.all_types(): + if t.NAME == 'STRING': + # string elsize is 0, so skipping the test + continue + if t.elsize >= self.type.elsize: + continue + is_int = np.issubdtype(t.dtype, np.integer) + if is_int and int(self.num2seq[0]) > np.iinfo(t.dtype).max: + # skip test if num2seq would trigger an overflow error + continue + obj = np.array(self.num2seq, dtype=t.dtype) + shape = (len(self.num2seq), ) + try: + self.array(shape, intent.in_.cache, obj) # Should succeed + except ValueError as msg: + if not str(msg).startswith( + "failed to initialize intent(cache) array"): + raise + else: + raise SystemError( + "intent(cache) should have failed on smaller array") + + def test_cache_hidden(self): + shape = (2, ) + a = self.array(shape, intent.cache.hide, None) + assert a.arr.shape == shape + + shape = (2, 3) + a = self.array(shape, intent.cache.hide, None) + assert a.arr.shape == shape + + shape = (-1, 3) + try: + a = self.array(shape, intent.cache.hide, None) + except ValueError as msg: + if not str(msg).startswith( + "failed to create intent(cache|hide)|optional array"): + raise + else: + raise SystemError( + "intent(cache) should have failed on undefined dimensions") + + def test_hidden(self): + shape = (2, ) + a = self.array(shape, intent.hide, None) + assert a.arr.shape == shape + assert a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype)) + + shape = (2, 3) + a = self.array(shape, intent.hide, None) + assert a.arr.shape == shape + assert a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype)) + assert a.arr.flags["FORTRAN"] and not a.arr.flags["CONTIGUOUS"] + + shape = (2, 3) + a = self.array(shape, intent.c.hide, None) + assert a.arr.shape == shape + assert a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype)) + assert not a.arr.flags["FORTRAN"] and a.arr.flags["CONTIGUOUS"] + + shape = (-1, 3) + try: + a = self.array(shape, intent.hide, None) + except ValueError as msg: + if not str(msg).startswith( + "failed to create intent(cache|hide)|optional array"): + raise + else: + raise SystemError( + "intent(hide) should have failed on undefined dimensions") + + def test_optional_none(self): + shape = (2, ) + a = self.array(shape, intent.optional, None) + assert a.arr.shape == shape + assert a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype)) + + shape = (2, 3) + a = self.array(shape, intent.optional, None) + assert a.arr.shape == shape + assert a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype)) + assert a.arr.flags["FORTRAN"] and not a.arr.flags["CONTIGUOUS"] + + shape = (2, 3) + a = self.array(shape, intent.c.optional, None) + assert a.arr.shape == shape + assert a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype)) + assert not a.arr.flags["FORTRAN"] and a.arr.flags["CONTIGUOUS"] + + def test_optional_from_2seq(self): + obj = self.num2seq + shape = (len(obj), ) + a = self.array(shape, intent.optional, obj) + assert a.arr.shape == shape + assert not a.has_shared_memory() + + def test_optional_from_23seq(self): + obj = self.num23seq + shape = (len(obj), len(obj[0])) + a = self.array(shape, intent.optional, obj) + assert a.arr.shape == shape + assert not a.has_shared_memory() + + a = self.array(shape, intent.optional.c, obj) + assert a.arr.shape == shape + assert not a.has_shared_memory() + + def test_inplace(self): + obj = np.array(self.num23seq, dtype=self.type.dtype) + assert not obj.flags["FORTRAN"] and obj.flags["CONTIGUOUS"] + shape = obj.shape + a = self.array(shape, intent.inplace, obj) + assert obj[1][2] == a.arr[1][2], repr((obj, a.arr)) + a.arr[1][2] = 54 + assert obj[1][2] == a.arr[1][2] == np.array(54, dtype=self.type.dtype) + assert a.arr is obj + assert obj.flags["FORTRAN"] # obj attributes are changed inplace! + assert not obj.flags["CONTIGUOUS"] + + def test_inplace_from_casttype(self): + for t in self.type.cast_types(): + if t is self.type: + continue + obj = np.array(self.num23seq, dtype=t.dtype) + assert obj.dtype.type == t.type + assert obj.dtype.type is not self.type.type + assert not obj.flags["FORTRAN"] and obj.flags["CONTIGUOUS"] + shape = obj.shape + a = self.array(shape, intent.inplace, obj) + assert obj[1][2] == a.arr[1][2], repr((obj, a.arr)) + a.arr[1][2] = 54 + assert obj[1][2] == a.arr[1][2] == np.array(54, + dtype=self.type.dtype) + assert a.arr is obj + assert obj.flags["FORTRAN"] # obj attributes changed inplace! + assert not obj.flags["CONTIGUOUS"] + assert obj.dtype.type is self.type.type # obj changed inplace! diff --git a/local_packages/numpy/f2py/tests/test_assumed_shape.py b/local_packages/numpy/f2py/tests/test_assumed_shape.py new file mode 100644 index 0000000..cf75644 --- /dev/null +++ b/local_packages/numpy/f2py/tests/test_assumed_shape.py @@ -0,0 +1,50 @@ +import os +import tempfile + +import pytest + +from . import util + + +class TestAssumedShapeSumExample(util.F2PyTest): + sources = [ + util.getpath("tests", "src", "assumed_shape", "foo_free.f90"), + util.getpath("tests", "src", "assumed_shape", "foo_use.f90"), + util.getpath("tests", "src", "assumed_shape", "precision.f90"), + util.getpath("tests", "src", "assumed_shape", "foo_mod.f90"), + util.getpath("tests", "src", "assumed_shape", ".f2py_f2cmap"), + ] + + @pytest.mark.slow + def test_all(self): + r = self.module.fsum([1, 2]) + assert r == 3 + r = self.module.sum([1, 2]) + assert r == 3 + r = self.module.sum_with_use([1, 2]) + assert r == 3 + + r = self.module.mod.sum([1, 2]) + assert r == 3 + r = self.module.mod.fsum([1, 2]) + assert r == 3 + + +class TestF2cmapOption(TestAssumedShapeSumExample): + def setup_method(self): + # Use a custom file name for .f2py_f2cmap + self.sources = list(self.sources) + f2cmap_src = self.sources.pop(-1) + + self.f2cmap_file = tempfile.NamedTemporaryFile(delete=False) + with open(f2cmap_src, "rb") as f: + self.f2cmap_file.write(f.read()) + self.f2cmap_file.close() + + self.sources.append(self.f2cmap_file.name) + self.options = ["--f2cmap", self.f2cmap_file.name] + + super().setup_method() + + def teardown_method(self): + os.unlink(self.f2cmap_file.name) diff --git a/local_packages/numpy/f2py/tests/test_block_docstring.py b/local_packages/numpy/f2py/tests/test_block_docstring.py new file mode 100644 index 0000000..ba255a1 --- /dev/null +++ b/local_packages/numpy/f2py/tests/test_block_docstring.py @@ -0,0 +1,20 @@ +import sys + +import pytest + +from numpy.testing import IS_PYPY + +from . import util + + +@pytest.mark.slow +class TestBlockDocString(util.F2PyTest): + sources = [util.getpath("tests", "src", "block_docstring", "foo.f")] + + @pytest.mark.skipif(sys.platform == "win32", + reason="Fails with MinGW64 Gfortran (Issue #9673)") + @pytest.mark.xfail(IS_PYPY, + reason="PyPy cannot modify tp_doc after PyType_Ready") + def test_block_docstring(self): + expected = "bar : 'i'-array(2,3)\n" + assert self.module.block.__doc__ == expected diff --git a/local_packages/numpy/f2py/tests/test_callback.py b/local_packages/numpy/f2py/tests/test_callback.py new file mode 100644 index 0000000..6614efb --- /dev/null +++ b/local_packages/numpy/f2py/tests/test_callback.py @@ -0,0 +1,263 @@ +import math +import platform +import sys +import textwrap +import threading +import time +import traceback + +import pytest + +import numpy as np +from numpy.testing import IS_PYPY + +from . import util + + +class TestF77Callback(util.F2PyTest): + sources = [util.getpath("tests", "src", "callback", "foo.f")] + + @pytest.mark.parametrize("name", ["t", "t2"]) + @pytest.mark.slow + def test_all(self, name): + self.check_function(name) + + @pytest.mark.xfail(IS_PYPY, + reason="PyPy cannot modify tp_doc after PyType_Ready") + def test_docstring(self): + expected = textwrap.dedent("""\ + a = t(fun,[fun_extra_args]) + + Wrapper for ``t``. + + Parameters + ---------- + fun : call-back function + + Other Parameters + ---------------- + fun_extra_args : input tuple, optional + Default: () + + Returns + ------- + a : int + + Notes + ----- + Call-back functions:: + + def fun(): return a + Return objects: + a : int + """) + assert self.module.t.__doc__ == expected + + def check_function(self, name): + t = getattr(self.module, name) + r = t(lambda: 4) + assert r == 4 + r = t(lambda a: 5, fun_extra_args=(6, )) + assert r == 5 + r = t(lambda a: a, fun_extra_args=(6, )) + assert r == 6 + r = t(lambda a: 5 + a, fun_extra_args=(7, )) + assert r == 12 + r = t(math.degrees, fun_extra_args=(math.pi, )) + assert r == 180 + r = t(math.degrees, fun_extra_args=(math.pi, )) + assert r == 180 + + r = t(self.module.func, fun_extra_args=(6, )) + assert r == 17 + r = t(self.module.func0) + assert r == 11 + r = t(self.module.func0._cpointer) + assert r == 11 + + class A: + def __call__(self): + return 7 + + def mth(self): + return 9 + + a = A() + r = t(a) + assert r == 7 + r = t(a.mth) + assert r == 9 + + @pytest.mark.skipif(sys.platform == 'win32', + reason='Fails with MinGW64 Gfortran (Issue #9673)') + def test_string_callback(self): + def callback(code): + if code == "r": + return 0 + else: + return 1 + + f = self.module.string_callback + r = f(callback) + assert r == 0 + + @pytest.mark.skipif(sys.platform == 'win32', + reason='Fails with MinGW64 Gfortran (Issue #9673)') + def test_string_callback_array(self): + # See gh-10027 + cu1 = np.zeros((1, ), "S8") + cu2 = np.zeros((1, 8), "c") + cu3 = np.array([""], "S8") + + def callback(cu, lencu): + if cu.shape != (lencu,): + return 1 + if cu.dtype != "S8": + return 2 + if not np.all(cu == b""): + return 3 + return 0 + + f = self.module.string_callback_array + for cu in [cu1, cu2, cu3]: + res = f(callback, cu, cu.size) + assert res == 0 + + def test_threadsafety(self): + # Segfaults if the callback handling is not threadsafe + + errors = [] + + def cb(): + # Sleep here to make it more likely for another thread + # to call their callback at the same time. + time.sleep(1e-3) + + # Check reentrancy + r = self.module.t(lambda: 123) + assert r == 123 + + return 42 + + def runner(name): + try: + for j in range(50): + r = self.module.t(cb) + assert r == 42 + self.check_function(name) + except Exception: + errors.append(traceback.format_exc()) + + threads = [ + threading.Thread(target=runner, args=(arg, )) + for arg in ("t", "t2") for n in range(20) + ] + + for t in threads: + t.start() + + for t in threads: + t.join() + + errors = "\n\n".join(errors) + if errors: + raise AssertionError(errors) + + def test_hidden_callback(self): + try: + self.module.hidden_callback(2) + except Exception as msg: + assert str(msg).startswith("Callback global_f not defined") + + try: + self.module.hidden_callback2(2) + except Exception as msg: + assert str(msg).startswith("cb: Callback global_f not defined") + + self.module.global_f = lambda x: x + 1 + r = self.module.hidden_callback(2) + assert r == 3 + + self.module.global_f = lambda x: x + 2 + r = self.module.hidden_callback(2) + assert r == 4 + + del self.module.global_f + try: + self.module.hidden_callback(2) + except Exception as msg: + assert str(msg).startswith("Callback global_f not defined") + + self.module.global_f = lambda x=0: x + 3 + r = self.module.hidden_callback(2) + assert r == 5 + + # reproducer of gh18341 + r = self.module.hidden_callback2(2) + assert r == 3 + + +class TestF77CallbackPythonTLS(TestF77Callback): + """ + Callback tests using Python thread-local storage instead of + compiler-provided + """ + + options = ["-DF2PY_USE_PYTHON_TLS"] + + +class TestF90Callback(util.F2PyTest): + sources = [util.getpath("tests", "src", "callback", "gh17797.f90")] + + @pytest.mark.slow + def test_gh17797(self): + def incr(x): + return x + 123 + + y = np.array([1, 2, 3], dtype=np.int64) + r = self.module.gh17797(incr, y) + assert r == 123 + 1 + 2 + 3 + + +class TestGH18335(util.F2PyTest): + """The reproduction of the reported issue requires specific input that + extensions may break the issue conditions, so the reproducer is + implemented as a separate test class. Do not extend this test with + other tests! + """ + sources = [util.getpath("tests", "src", "callback", "gh18335.f90")] + + @pytest.mark.slow + def test_gh18335(self): + def foo(x): + x[0] += 1 + + r = self.module.gh18335(foo) + assert r == 123 + 1 + + +class TestGH25211(util.F2PyTest): + sources = [util.getpath("tests", "src", "callback", "gh25211.f"), + util.getpath("tests", "src", "callback", "gh25211.pyf")] + module_name = "callback2" + + def test_gh25211(self): + def bar(x): + return x * x + + res = self.module.foo(bar) + assert res == 110 + + +@pytest.mark.slow +@pytest.mark.xfail(condition=(platform.system().lower() == 'darwin'), + run=False, + reason="Callback aborts cause CI failures on macOS") +class TestCBFortranCallstatement(util.F2PyTest): + sources = [util.getpath("tests", "src", "callback", "gh26681.f90")] + options = ['--lower'] + + def test_callstatement_fortran(self): + with pytest.raises(ValueError, match='helpme') as exc: + self.module.mypy_abort = self.module.utils.my_abort + self.module.utils.do_something('helpme') diff --git a/local_packages/numpy/f2py/tests/test_character.py b/local_packages/numpy/f2py/tests/test_character.py new file mode 100644 index 0000000..74868a6 --- /dev/null +++ b/local_packages/numpy/f2py/tests/test_character.py @@ -0,0 +1,641 @@ +import textwrap + +import pytest + +import numpy as np +from numpy.f2py.tests import util +from numpy.testing import assert_array_equal, assert_equal, assert_raises + + +@pytest.mark.slow +class TestCharacterString(util.F2PyTest): + # options = ['--debug-capi', '--build-dir', '/tmp/test-build-f2py'] + suffix = '.f90' + fprefix = 'test_character_string' + length_list = ['1', '3', 'star'] + + code = '' + for length in length_list: + fsuffix = length + clength = {'star': '(*)'}.get(length, length) + + code += textwrap.dedent(f""" + + subroutine {fprefix}_input_{fsuffix}(c, o, n) + character*{clength}, intent(in) :: c + integer n + !f2py integer, depend(c), intent(hide) :: n = slen(c) + integer*1, dimension(n) :: o + !f2py intent(out) o + o = transfer(c, o) + end subroutine {fprefix}_input_{fsuffix} + + subroutine {fprefix}_output_{fsuffix}(c, o, n) + character*{clength}, intent(out) :: c + integer n + integer*1, dimension(n), intent(in) :: o + !f2py integer, depend(o), intent(hide) :: n = len(o) + c = transfer(o, c) + end subroutine {fprefix}_output_{fsuffix} + + subroutine {fprefix}_array_input_{fsuffix}(c, o, m, n) + integer m, i, n + character*{clength}, intent(in), dimension(m) :: c + !f2py integer, depend(c), intent(hide) :: m = len(c) + !f2py integer, depend(c), intent(hide) :: n = f2py_itemsize(c) + integer*1, dimension(m, n), intent(out) :: o + do i=1,m + o(i, :) = transfer(c(i), o(i, :)) + end do + end subroutine {fprefix}_array_input_{fsuffix} + + subroutine {fprefix}_array_output_{fsuffix}(c, o, m, n) + character*{clength}, intent(out), dimension(m) :: c + integer n + integer*1, dimension(m, n), intent(in) :: o + !f2py character(f2py_len=n) :: c + !f2py integer, depend(o), intent(hide) :: m = len(o) + !f2py integer, depend(o), intent(hide) :: n = shape(o, 1) + do i=1,m + c(i) = transfer(o(i, :), c(i)) + end do + end subroutine {fprefix}_array_output_{fsuffix} + + subroutine {fprefix}_2d_array_input_{fsuffix}(c, o, m1, m2, n) + integer m1, m2, i, j, n + character*{clength}, intent(in), dimension(m1, m2) :: c + !f2py integer, depend(c), intent(hide) :: m1 = len(c) + !f2py integer, depend(c), intent(hide) :: m2 = shape(c, 1) + !f2py integer, depend(c), intent(hide) :: n = f2py_itemsize(c) + integer*1, dimension(m1, m2, n), intent(out) :: o + do i=1,m1 + do j=1,m2 + o(i, j, :) = transfer(c(i, j), o(i, j, :)) + end do + end do + end subroutine {fprefix}_2d_array_input_{fsuffix} + """) + + @pytest.mark.parametrize("length", length_list) + def test_input(self, length): + fsuffix = {'(*)': 'star'}.get(length, length) + f = getattr(self.module, self.fprefix + '_input_' + fsuffix) + + a = {'1': 'a', '3': 'abc', 'star': 'abcde' * 3}[length] + + assert_array_equal(f(a), np.array(list(map(ord, a)), dtype='u1')) + + @pytest.mark.parametrize("length", length_list[:-1]) + def test_output(self, length): + fsuffix = length + f = getattr(self.module, self.fprefix + '_output_' + fsuffix) + + a = {'1': 'a', '3': 'abc'}[length] + + assert_array_equal(f(np.array(list(map(ord, a)), dtype='u1')), + a.encode()) + + @pytest.mark.parametrize("length", length_list) + def test_array_input(self, length): + fsuffix = length + f = getattr(self.module, self.fprefix + '_array_input_' + fsuffix) + + a = np.array([{'1': 'a', '3': 'abc', 'star': 'abcde' * 3}[length], + {'1': 'A', '3': 'ABC', 'star': 'ABCDE' * 3}[length], + ], dtype='S') + + expected = np.array([list(s) for s in a], dtype='u1') + assert_array_equal(f(a), expected) + + @pytest.mark.parametrize("length", length_list) + def test_array_output(self, length): + fsuffix = length + f = getattr(self.module, self.fprefix + '_array_output_' + fsuffix) + + expected = np.array( + [{'1': 'a', '3': 'abc', 'star': 'abcde' * 3}[length], + {'1': 'A', '3': 'ABC', 'star': 'ABCDE' * 3}[length]], dtype='S') + + a = np.array([list(s) for s in expected], dtype='u1') + assert_array_equal(f(a), expected) + + @pytest.mark.parametrize("length", length_list) + def test_2d_array_input(self, length): + fsuffix = length + f = getattr(self.module, self.fprefix + '_2d_array_input_' + fsuffix) + + a = np.array([[{'1': 'a', '3': 'abc', 'star': 'abcde' * 3}[length], + {'1': 'A', '3': 'ABC', 'star': 'ABCDE' * 3}[length]], + [{'1': 'f', '3': 'fgh', 'star': 'fghij' * 3}[length], + {'1': 'F', '3': 'FGH', 'star': 'FGHIJ' * 3}[length]]], + dtype='S') + expected = np.array([[list(item) for item in row] for row in a], + dtype='u1', order='F') + assert_array_equal(f(a), expected) + + +class TestCharacter(util.F2PyTest): + # options = ['--debug-capi', '--build-dir', '/tmp/test-build-f2py'] + suffix = '.f90' + fprefix = 'test_character' + + code = textwrap.dedent(f""" + subroutine {fprefix}_input(c, o) + character, intent(in) :: c + integer*1 o + !f2py intent(out) o + o = transfer(c, o) + end subroutine {fprefix}_input + + subroutine {fprefix}_output(c, o) + character :: c + integer*1, intent(in) :: o + !f2py intent(out) c + c = transfer(o, c) + end subroutine {fprefix}_output + + subroutine {fprefix}_input_output(c, o) + character, intent(in) :: c + character o + !f2py intent(out) o + o = c + end subroutine {fprefix}_input_output + + subroutine {fprefix}_inout(c, n) + character :: c, n + !f2py intent(in) n + !f2py intent(inout) c + c = n + end subroutine {fprefix}_inout + + function {fprefix}_return(o) result (c) + character :: c + character, intent(in) :: o + c = transfer(o, c) + end function {fprefix}_return + + subroutine {fprefix}_array_input(c, o) + character, intent(in) :: c(3) + integer*1 o(3) + !f2py intent(out) o + integer i + do i=1,3 + o(i) = transfer(c(i), o(i)) + end do + end subroutine {fprefix}_array_input + + subroutine {fprefix}_2d_array_input(c, o) + character, intent(in) :: c(2, 3) + integer*1 o(2, 3) + !f2py intent(out) o + integer i, j + do i=1,2 + do j=1,3 + o(i, j) = transfer(c(i, j), o(i, j)) + end do + end do + end subroutine {fprefix}_2d_array_input + + subroutine {fprefix}_array_output(c, o) + character :: c(3) + integer*1, intent(in) :: o(3) + !f2py intent(out) c + do i=1,3 + c(i) = transfer(o(i), c(i)) + end do + end subroutine {fprefix}_array_output + + subroutine {fprefix}_array_inout(c, n) + character :: c(3), n(3) + !f2py intent(in) n(3) + !f2py intent(inout) c(3) + do i=1,3 + c(i) = n(i) + end do + end subroutine {fprefix}_array_inout + + subroutine {fprefix}_2d_array_inout(c, n) + character :: c(2, 3), n(2, 3) + !f2py intent(in) n(2, 3) + !f2py intent(inout) c(2. 3) + integer i, j + do i=1,2 + do j=1,3 + c(i, j) = n(i, j) + end do + end do + end subroutine {fprefix}_2d_array_inout + + function {fprefix}_array_return(o) result (c) + character, dimension(3) :: c + character, intent(in) :: o(3) + do i=1,3 + c(i) = o(i) + end do + end function {fprefix}_array_return + + function {fprefix}_optional(o) result (c) + character, intent(in) :: o + !f2py character o = "a" + character :: c + c = o + end function {fprefix}_optional + """) + + @pytest.mark.parametrize("dtype", ['c', 'S1']) + def test_input(self, dtype): + f = getattr(self.module, self.fprefix + '_input') + + assert_equal(f(np.array('a', dtype=dtype)), ord('a')) + assert_equal(f(np.array(b'a', dtype=dtype)), ord('a')) + assert_equal(f(np.array(['a'], dtype=dtype)), ord('a')) + assert_equal(f(np.array('abc', dtype=dtype)), ord('a')) + assert_equal(f(np.array([['a']], dtype=dtype)), ord('a')) + + def test_input_varia(self): + f = getattr(self.module, self.fprefix + '_input') + + assert_equal(f('a'), ord('a')) + assert_equal(f(b'a'), ord(b'a')) + assert_equal(f(''), 0) + assert_equal(f(b''), 0) + assert_equal(f(b'\0'), 0) + assert_equal(f('ab'), ord('a')) + assert_equal(f(b'ab'), ord('a')) + assert_equal(f(['a']), ord('a')) + + assert_equal(f(np.array(b'a')), ord('a')) + assert_equal(f(np.array([b'a'])), ord('a')) + a = np.array('a') + assert_equal(f(a), ord('a')) + a = np.array(['a']) + assert_equal(f(a), ord('a')) + + try: + f([]) + except IndexError as msg: + if not str(msg).endswith(' got 0-list'): + raise + else: + raise SystemError(f'{f.__name__} should have failed on empty list') + + try: + f(97) + except TypeError as msg: + if not str(msg).endswith(' got int instance'): + raise + else: + raise SystemError(f'{f.__name__} should have failed on int value') + + @pytest.mark.parametrize("dtype", ['c', 'S1', 'U1']) + def test_array_input(self, dtype): + f = getattr(self.module, self.fprefix + '_array_input') + + assert_array_equal(f(np.array(['a', 'b', 'c'], dtype=dtype)), + np.array(list(map(ord, 'abc')), dtype='i1')) + assert_array_equal(f(np.array([b'a', b'b', b'c'], dtype=dtype)), + np.array(list(map(ord, 'abc')), dtype='i1')) + + def test_array_input_varia(self): + f = getattr(self.module, self.fprefix + '_array_input') + assert_array_equal(f(['a', 'b', 'c']), + np.array(list(map(ord, 'abc')), dtype='i1')) + assert_array_equal(f([b'a', b'b', b'c']), + np.array(list(map(ord, 'abc')), dtype='i1')) + + try: + f(['a', 'b', 'c', 'd']) + except ValueError as msg: + if not str(msg).endswith( + 'th dimension must be fixed to 3 but got 4'): + raise + else: + raise SystemError( + f'{f.__name__} should have failed on wrong input') + + @pytest.mark.parametrize("dtype", ['c', 'S1', 'U1']) + def test_2d_array_input(self, dtype): + f = getattr(self.module, self.fprefix + '_2d_array_input') + + a = np.array([['a', 'b', 'c'], + ['d', 'e', 'f']], dtype=dtype, order='F') + expected = a.view(np.uint32 if dtype == 'U1' else np.uint8) + assert_array_equal(f(a), expected) + + def test_output(self): + f = getattr(self.module, self.fprefix + '_output') + + assert_equal(f(ord(b'a')), b'a') + assert_equal(f(0), b'\0') + + def test_array_output(self): + f = getattr(self.module, self.fprefix + '_array_output') + + assert_array_equal(f(list(map(ord, 'abc'))), + np.array(list('abc'), dtype='S1')) + + def test_input_output(self): + f = getattr(self.module, self.fprefix + '_input_output') + + assert_equal(f(b'a'), b'a') + assert_equal(f('a'), b'a') + assert_equal(f(''), b'\0') + + @pytest.mark.parametrize("dtype", ['c', 'S1']) + def test_inout(self, dtype): + f = getattr(self.module, self.fprefix + '_inout') + + a = np.array(list('abc'), dtype=dtype) + f(a, 'A') + assert_array_equal(a, np.array(list('Abc'), dtype=a.dtype)) + f(a[1:], 'B') + assert_array_equal(a, np.array(list('ABc'), dtype=a.dtype)) + + a = np.array(['abc'], dtype=dtype) + f(a, 'A') + assert_array_equal(a, np.array(['Abc'], dtype=a.dtype)) + + def test_inout_varia(self): + f = getattr(self.module, self.fprefix + '_inout') + a = np.array('abc', dtype='S3') + f(a, 'A') + assert_array_equal(a, np.array('Abc', dtype=a.dtype)) + + a = np.array(['abc'], dtype='S3') + f(a, 'A') + assert_array_equal(a, np.array(['Abc'], dtype=a.dtype)) + + try: + f('abc', 'A') + except ValueError as msg: + if not str(msg).endswith(' got 3-str'): + raise + else: + raise SystemError(f'{f.__name__} should have failed on str value') + + @pytest.mark.parametrize("dtype", ['c', 'S1']) + def test_array_inout(self, dtype): + f = getattr(self.module, self.fprefix + '_array_inout') + n = np.array(['A', 'B', 'C'], dtype=dtype, order='F') + + a = np.array(['a', 'b', 'c'], dtype=dtype, order='F') + f(a, n) + assert_array_equal(a, n) + + a = np.array(['a', 'b', 'c', 'd'], dtype=dtype) + f(a[1:], n) + assert_array_equal(a, np.array(['a', 'A', 'B', 'C'], dtype=dtype)) + + a = np.array([['a', 'b', 'c']], dtype=dtype, order='F') + f(a, n) + assert_array_equal(a, np.array([['A', 'B', 'C']], dtype=dtype)) + + a = np.array(['a', 'b', 'c', 'd'], dtype=dtype, order='F') + try: + f(a, n) + except ValueError as msg: + if not str(msg).endswith( + 'th dimension must be fixed to 3 but got 4'): + raise + else: + raise SystemError( + f'{f.__name__} should have failed on wrong input') + + @pytest.mark.parametrize("dtype", ['c', 'S1']) + def test_2d_array_inout(self, dtype): + f = getattr(self.module, self.fprefix + '_2d_array_inout') + n = np.array([['A', 'B', 'C'], + ['D', 'E', 'F']], + dtype=dtype, order='F') + a = np.array([['a', 'b', 'c'], + ['d', 'e', 'f']], + dtype=dtype, order='F') + f(a, n) + assert_array_equal(a, n) + + def test_return(self): + f = getattr(self.module, self.fprefix + '_return') + + assert_equal(f('a'), b'a') + + @pytest.mark.skip('fortran function returning array segfaults') + def test_array_return(self): + f = getattr(self.module, self.fprefix + '_array_return') + + a = np.array(list('abc'), dtype='S1') + assert_array_equal(f(a), a) + + def test_optional(self): + f = getattr(self.module, self.fprefix + '_optional') + + assert_equal(f(), b"a") + assert_equal(f(b'B'), b"B") + + +class TestMiscCharacter(util.F2PyTest): + # options = ['--debug-capi', '--build-dir', '/tmp/test-build-f2py'] + suffix = '.f90' + fprefix = 'test_misc_character' + + code = textwrap.dedent(f""" + subroutine {fprefix}_gh18684(x, y, m) + character(len=5), dimension(m), intent(in) :: x + character*5, dimension(m), intent(out) :: y + integer i, m + !f2py integer, intent(hide), depend(x) :: m = f2py_len(x) + do i=1,m + y(i) = x(i) + end do + end subroutine {fprefix}_gh18684 + + subroutine {fprefix}_gh6308(x, i) + integer i + !f2py check(i>=0 && i<12) i + character*5 name, x + common name(12) + name(i + 1) = x + end subroutine {fprefix}_gh6308 + + subroutine {fprefix}_gh4519(x) + character(len=*), intent(in) :: x(:) + !f2py intent(out) x + integer :: i + ! Uncomment for debug printing: + !do i=1, size(x) + ! print*, "x(",i,")=", x(i) + !end do + end subroutine {fprefix}_gh4519 + + pure function {fprefix}_gh3425(x) result (y) + character(len=*), intent(in) :: x + character(len=len(x)) :: y + integer :: i + do i = 1, len(x) + j = iachar(x(i:i)) + if (j>=iachar("a") .and. j<=iachar("z") ) then + y(i:i) = achar(j-32) + else + y(i:i) = x(i:i) + endif + end do + end function {fprefix}_gh3425 + + subroutine {fprefix}_character_bc_new(x, y, z) + character, intent(in) :: x + character, intent(out) :: y + !f2py character, depend(x) :: y = x + !f2py character, dimension((x=='a'?1:2)), depend(x), intent(out) :: z + character, dimension(*) :: z + !f2py character, optional, check(x == 'a' || x == 'b') :: x = 'a' + !f2py callstatement (*f2py_func)(&x, &y, z) + !f2py callprotoargument character*, character*, character* + if (y.eq.x) then + y = x + else + y = 'e' + endif + z(1) = 'c' + end subroutine {fprefix}_character_bc_new + + subroutine {fprefix}_character_bc_old(x, y, z) + character, intent(in) :: x + character, intent(out) :: y + !f2py character, depend(x) :: y = x[0] + !f2py character, dimension((*x=='a'?1:2)), depend(x), intent(out) :: z + character, dimension(*) :: z + !f2py character, optional, check(*x == 'a' || x[0] == 'b') :: x = 'a' + !f2py callstatement (*f2py_func)(x, y, z) + !f2py callprotoargument char*, char*, char* + if (y.eq.x) then + y = x + else + y = 'e' + endif + z(1) = 'c' + end subroutine {fprefix}_character_bc_old + """) + + @pytest.mark.slow + def test_gh18684(self): + # Test character(len=5) and character*5 usages + f = getattr(self.module, self.fprefix + '_gh18684') + x = np.array(["abcde", "fghij"], dtype='S5') + y = f(x) + + assert_array_equal(x, y) + + def test_gh6308(self): + # Test character string array in a common block + f = getattr(self.module, self.fprefix + '_gh6308') + + assert_equal(self.module._BLNK_.name.dtype, np.dtype('S5')) + assert_equal(len(self.module._BLNK_.name), 12) + f("abcde", 0) + assert_equal(self.module._BLNK_.name[0], b"abcde") + f("12345", 5) + assert_equal(self.module._BLNK_.name[5], b"12345") + + def test_gh4519(self): + # Test array of assumed length strings + f = getattr(self.module, self.fprefix + '_gh4519') + + for x, expected in [ + ('a', {'shape': (), 'dtype': np.dtype('S1')}), + ('text', {'shape': (), 'dtype': np.dtype('S4')}), + (np.array(['1', '2', '3'], dtype='S1'), + {'shape': (3,), 'dtype': np.dtype('S1')}), + (['1', '2', '34'], + {'shape': (3,), 'dtype': np.dtype('S2')}), + (['', ''], {'shape': (2,), 'dtype': np.dtype('S1')})]: + r = f(x) + for k, v in expected.items(): + assert_equal(getattr(r, k), v) + + def test_gh3425(self): + # Test returning a copy of assumed length string + f = getattr(self.module, self.fprefix + '_gh3425') + # f is equivalent to bytes.upper + + assert_equal(f('abC'), b'ABC') + assert_equal(f(''), b'') + assert_equal(f('abC12d'), b'ABC12D') + + @pytest.mark.parametrize("state", ['new', 'old']) + def test_character_bc(self, state): + f = getattr(self.module, self.fprefix + '_character_bc_' + state) + + c, a = f() + assert_equal(c, b'a') + assert_equal(len(a), 1) + + c, a = f(b'b') + assert_equal(c, b'b') + assert_equal(len(a), 2) + + assert_raises(Exception, lambda: f(b'c')) + + +class TestStringScalarArr(util.F2PyTest): + sources = [util.getpath("tests", "src", "string", "scalar_string.f90")] + + def test_char(self): + for out in (self.module.string_test.string, + self.module.string_test.string77): + expected = () + assert out.shape == expected + expected = '|S8' + assert out.dtype == expected + + def test_char_arr(self): + for out in (self.module.string_test.strarr, + self.module.string_test.strarr77): + expected = (5, 7) + assert out.shape == expected + expected = '|S12' + assert out.dtype == expected + +class TestStringAssumedLength(util.F2PyTest): + sources = [util.getpath("tests", "src", "string", "gh24008.f")] + + def test_gh24008(self): + self.module.greet("joe", "bob") + +@pytest.mark.slow +class TestStringOptionalInOut(util.F2PyTest): + sources = [util.getpath("tests", "src", "string", "gh24662.f90")] + + def test_gh24662(self): + self.module.string_inout_optional() + a = np.array('hi', dtype='S32') + self.module.string_inout_optional(a) + assert "output string" in a.tobytes().decode() + with pytest.raises(Exception): # noqa: B017 + aa = "Hi" + self.module.string_inout_optional(aa) + + +@pytest.mark.slow +class TestNewCharHandling(util.F2PyTest): + # from v1.24 onwards, gh-19388 + sources = [ + util.getpath("tests", "src", "string", "gh25286.pyf"), + util.getpath("tests", "src", "string", "gh25286.f90") + ] + module_name = "_char_handling_test" + + def test_gh25286(self): + info = self.module.charint('T') + assert info == 2 + +@pytest.mark.slow +class TestBCCharHandling(util.F2PyTest): + # SciPy style, "incorrect" bindings with a hook + sources = [ + util.getpath("tests", "src", "string", "gh25286_bc.pyf"), + util.getpath("tests", "src", "string", "gh25286.f90") + ] + module_name = "_char_handling_test" + + def test_gh25286(self): + info = self.module.charint('T') + assert info == 2 diff --git a/local_packages/numpy/f2py/tests/test_common.py b/local_packages/numpy/f2py/tests/test_common.py new file mode 100644 index 0000000..b9fbd84 --- /dev/null +++ b/local_packages/numpy/f2py/tests/test_common.py @@ -0,0 +1,23 @@ +import pytest + +import numpy as np + +from . import util + + +@pytest.mark.slow +class TestCommonBlock(util.F2PyTest): + sources = [util.getpath("tests", "src", "common", "block.f")] + + def test_common_block(self): + self.module.initcb() + assert self.module.block.long_bn == np.array(1.0, dtype=np.float64) + assert self.module.block.string_bn == np.array("2", dtype="|S1") + assert self.module.block.ok == np.array(3, dtype=np.int32) + + +class TestCommonWithUse(util.F2PyTest): + sources = [util.getpath("tests", "src", "common", "gh19161.f90")] + + def test_common_gh19161(self): + assert self.module.data.x == 0 diff --git a/local_packages/numpy/f2py/tests/test_crackfortran.py b/local_packages/numpy/f2py/tests/test_crackfortran.py new file mode 100644 index 0000000..c3967cf --- /dev/null +++ b/local_packages/numpy/f2py/tests/test_crackfortran.py @@ -0,0 +1,421 @@ +import contextlib +import importlib +import io +import textwrap +import time + +import pytest + +import numpy as np +from numpy.f2py import crackfortran +from numpy.f2py.crackfortran import markinnerspaces, nameargspattern + +from . import util + + +class TestNoSpace(util.F2PyTest): + # issue gh-15035: add handling for endsubroutine, endfunction with no space + # between "end" and the block name + sources = [util.getpath("tests", "src", "crackfortran", "gh15035.f")] + + def test_module(self): + k = np.array([1, 2, 3], dtype=np.float64) + w = np.array([1, 2, 3], dtype=np.float64) + self.module.subb(k) + assert np.allclose(k, w + 1) + self.module.subc([w, k]) + assert np.allclose(k, w + 1) + assert self.module.t0("23") == b"2" + + +class TestPublicPrivate: + def test_defaultPrivate(self): + fpath = util.getpath("tests", "src", "crackfortran", "privatemod.f90") + mod = crackfortran.crackfortran([str(fpath)]) + assert len(mod) == 1 + mod = mod[0] + assert "private" in mod["vars"]["a"]["attrspec"] + assert "public" not in mod["vars"]["a"]["attrspec"] + assert "private" in mod["vars"]["b"]["attrspec"] + assert "public" not in mod["vars"]["b"]["attrspec"] + assert "private" not in mod["vars"]["seta"]["attrspec"] + assert "public" in mod["vars"]["seta"]["attrspec"] + + def test_defaultPublic(self, tmp_path): + fpath = util.getpath("tests", "src", "crackfortran", "publicmod.f90") + mod = crackfortran.crackfortran([str(fpath)]) + assert len(mod) == 1 + mod = mod[0] + assert "private" in mod["vars"]["a"]["attrspec"] + assert "public" not in mod["vars"]["a"]["attrspec"] + assert "private" not in mod["vars"]["seta"]["attrspec"] + assert "public" in mod["vars"]["seta"]["attrspec"] + + def test_access_type(self, tmp_path): + fpath = util.getpath("tests", "src", "crackfortran", "accesstype.f90") + mod = crackfortran.crackfortran([str(fpath)]) + assert len(mod) == 1 + tt = mod[0]['vars'] + assert set(tt['a']['attrspec']) == {'private', 'bind(c)'} + assert set(tt['b_']['attrspec']) == {'public', 'bind(c)'} + assert set(tt['c']['attrspec']) == {'public'} + + def test_nowrap_private_proceedures(self, tmp_path): + fpath = util.getpath("tests", "src", "crackfortran", "gh23879.f90") + mod = crackfortran.crackfortran([str(fpath)]) + assert len(mod) == 1 + pyf = crackfortran.crack2fortran(mod) + assert 'bar' not in pyf + +class TestModuleProcedure: + def test_moduleOperators(self, tmp_path): + fpath = util.getpath("tests", "src", "crackfortran", "operators.f90") + mod = crackfortran.crackfortran([str(fpath)]) + assert len(mod) == 1 + mod = mod[0] + assert "body" in mod and len(mod["body"]) == 9 + assert mod["body"][1]["name"] == "operator(.item.)" + assert "implementedby" in mod["body"][1] + assert mod["body"][1]["implementedby"] == \ + ["item_int", "item_real"] + assert mod["body"][2]["name"] == "operator(==)" + assert "implementedby" in mod["body"][2] + assert mod["body"][2]["implementedby"] == ["items_are_equal"] + assert mod["body"][3]["name"] == "assignment(=)" + assert "implementedby" in mod["body"][3] + assert mod["body"][3]["implementedby"] == \ + ["get_int", "get_real"] + + def test_notPublicPrivate(self, tmp_path): + fpath = util.getpath("tests", "src", "crackfortran", "pubprivmod.f90") + mod = crackfortran.crackfortran([str(fpath)]) + assert len(mod) == 1 + mod = mod[0] + assert mod['vars']['a']['attrspec'] == ['private', ] + assert mod['vars']['b']['attrspec'] == ['public', ] + assert mod['vars']['seta']['attrspec'] == ['public', ] + + +class TestExternal(util.F2PyTest): + # issue gh-17859: add external attribute support + sources = [util.getpath("tests", "src", "crackfortran", "gh17859.f")] + + def test_external_as_statement(self): + def incr(x): + return x + 123 + + r = self.module.external_as_statement(incr) + assert r == 123 + + def test_external_as_attribute(self): + def incr(x): + return x + 123 + + r = self.module.external_as_attribute(incr) + assert r == 123 + + +class TestCrackFortran(util.F2PyTest): + # gh-2848: commented lines between parameters in subroutine parameter lists + sources = [util.getpath("tests", "src", "crackfortran", "gh2848.f90"), + util.getpath("tests", "src", "crackfortran", "common_with_division.f") + ] + + def test_gh2848(self): + r = self.module.gh2848(1, 2) + assert r == (1, 2) + + def test_common_with_division(self): + assert len(self.module.mortmp.ctmp) == 11 + +class TestMarkinnerspaces: + # gh-14118: markinnerspaces does not handle multiple quotations + + def test_do_not_touch_normal_spaces(self): + test_list = ["a ", " a", "a b c", "'abcdefghij'"] + for i in test_list: + assert markinnerspaces(i) == i + + def test_one_relevant_space(self): + assert markinnerspaces("a 'b c' \\' \\'") == "a 'b@_@c' \\' \\'" + assert markinnerspaces(r'a "b c" \" \"') == r'a "b@_@c" \" \"' + + def test_ignore_inner_quotes(self): + assert markinnerspaces("a 'b c\" \" d' e") == "a 'b@_@c\"@_@\"@_@d' e" + assert markinnerspaces("a \"b c' ' d\" e") == "a \"b@_@c'@_@'@_@d\" e" + + def test_multiple_relevant_spaces(self): + assert markinnerspaces("a 'b c' 'd e'") == "a 'b@_@c' 'd@_@e'" + assert markinnerspaces(r'a "b c" "d e"') == r'a "b@_@c" "d@_@e"' + + +class TestDimSpec(util.F2PyTest): + """This test suite tests various expressions that are used as dimension + specifications. + + There exists two usage cases where analyzing dimensions + specifications are important. + + In the first case, the size of output arrays must be defined based + on the inputs to a Fortran function. Because Fortran supports + arbitrary bases for indexing, for instance, `arr(lower:upper)`, + f2py has to evaluate an expression `upper - lower + 1` where + `lower` and `upper` are arbitrary expressions of input parameters. + The evaluation is performed in C, so f2py has to translate Fortran + expressions to valid C expressions (an alternative approach is + that a developer specifies the corresponding C expressions in a + .pyf file). + + In the second case, when user provides an input array with a given + size but some hidden parameters used in dimensions specifications + need to be determined based on the input array size. This is a + harder problem because f2py has to solve the inverse problem: find + a parameter `p` such that `upper(p) - lower(p) + 1` equals to the + size of input array. In the case when this equation cannot be + solved (e.g. because the input array size is wrong), raise an + error before calling the Fortran function (that otherwise would + likely crash Python process when the size of input arrays is + wrong). f2py currently supports this case only when the equation + is linear with respect to unknown parameter. + + """ + + suffix = ".f90" + + code_template = textwrap.dedent(""" + function get_arr_size_{count}(a, n) result (length) + integer, intent(in) :: n + integer, dimension({dimspec}), intent(out) :: a + integer length + length = size(a) + end function + + subroutine get_inv_arr_size_{count}(a, n) + integer :: n + ! the value of n is computed in f2py wrapper + !f2py intent(out) n + integer, dimension({dimspec}), intent(in) :: a + if (a({first}).gt.0) then + ! print*, "a=", a + endif + end subroutine + """) + + linear_dimspecs = [ + "n", "2*n", "2:n", "n/2", "5 - n/2", "3*n:20", "n*(n+1):n*(n+5)", + "2*n, n" + ] + nonlinear_dimspecs = ["2*n:3*n*n+2*n"] + all_dimspecs = linear_dimspecs + nonlinear_dimspecs + + code = "" + for count, dimspec in enumerate(all_dimspecs): + lst = [(d.split(":")[0] if ":" in d else "1") for d in dimspec.split(',')] + code += code_template.format( + count=count, + dimspec=dimspec, + first=", ".join(lst), + ) + + @pytest.mark.parametrize("dimspec", all_dimspecs) + @pytest.mark.slow + def test_array_size(self, dimspec): + + count = self.all_dimspecs.index(dimspec) + get_arr_size = getattr(self.module, f"get_arr_size_{count}") + + for n in [1, 2, 3, 4, 5]: + sz, a = get_arr_size(n) + assert a.size == sz + + @pytest.mark.parametrize("dimspec", all_dimspecs) + def test_inv_array_size(self, dimspec): + + count = self.all_dimspecs.index(dimspec) + get_arr_size = getattr(self.module, f"get_arr_size_{count}") + get_inv_arr_size = getattr(self.module, f"get_inv_arr_size_{count}") + + for n in [1, 2, 3, 4, 5]: + sz, a = get_arr_size(n) + if dimspec in self.nonlinear_dimspecs: + # one must specify n as input, the call we'll ensure + # that a and n are compatible: + n1 = get_inv_arr_size(a, n) + else: + # in case of linear dependence, n can be determined + # from the shape of a: + n1 = get_inv_arr_size(a) + # n1 may be different from n (for instance, when `a` size + # is a function of some `n` fraction) but it must produce + # the same sized array + sz1, _ = get_arr_size(n1) + assert sz == sz1, (n, n1, sz, sz1) + + +class TestModuleDeclaration: + def test_dependencies(self, tmp_path): + fpath = util.getpath("tests", "src", "crackfortran", "foo_deps.f90") + mod = crackfortran.crackfortran([str(fpath)]) + assert len(mod) == 1 + assert mod[0]["vars"]["abar"]["="] == "bar('abar')" + + +class TestEval(util.F2PyTest): + def test_eval_scalar(self): + eval_scalar = crackfortran._eval_scalar + + assert eval_scalar('123', {}) == '123' + assert eval_scalar('12 + 3', {}) == '15' + assert eval_scalar('a + b', {"a": 1, "b": 2}) == '3' + assert eval_scalar('"123"', {}) == "'123'" + + +class TestFortranReader(util.F2PyTest): + @pytest.mark.parametrize("encoding", + ['ascii', 'utf-8', 'utf-16', 'utf-32']) + def test_input_encoding(self, tmp_path, encoding): + # gh-635 + f_path = tmp_path / f"input_with_{encoding}_encoding.f90" + with f_path.open('w', encoding=encoding) as ff: + ff.write(""" + subroutine foo() + end subroutine foo + """) + mod = crackfortran.crackfortran([str(f_path)]) + assert mod[0]['name'] == 'foo' + + +@pytest.mark.slow +class TestUnicodeComment(util.F2PyTest): + sources = [util.getpath("tests", "src", "crackfortran", "unicode_comment.f90")] + + @pytest.mark.skipif( + (importlib.util.find_spec("charset_normalizer") is None), + reason="test requires charset_normalizer which is not installed", + ) + def test_encoding_comment(self): + self.module.foo(3) + + +class TestNameArgsPatternBacktracking: + @pytest.mark.parametrize( + ['adversary'], + [ + ('@)@bind@(@',), + ('@)@bind @(@',), + ('@)@bind foo bar baz@(@',) + ] + ) + def test_nameargspattern_backtracking(self, adversary): + '''address ReDOS vulnerability: + https://github.com/numpy/numpy/issues/23338''' + trials_per_batch = 12 + batches_per_regex = 4 + start_reps, end_reps = 15, 25 + for ii in range(start_reps, end_reps): + repeated_adversary = adversary * ii + # test times in small batches. + # this gives us more chances to catch a bad regex + # while still catching it before too long if it is bad + for _ in range(batches_per_regex): + times = [] + for _ in range(trials_per_batch): + t0 = time.perf_counter() + mtch = nameargspattern.search(repeated_adversary) + times.append(time.perf_counter() - t0) + # our pattern should be much faster than 0.2s per search + # it's unlikely that a bad regex will pass even on fast CPUs + assert np.median(times) < 0.2 + assert not mtch + # if the adversary is capped with @)@, it becomes acceptable + # according to the old version of the regex. + # that should still be true. + good_version_of_adversary = repeated_adversary + '@)@' + assert nameargspattern.search(good_version_of_adversary) + +class TestFunctionReturn(util.F2PyTest): + sources = [util.getpath("tests", "src", "crackfortran", "gh23598.f90")] + + @pytest.mark.slow + def test_function_rettype(self): + # gh-23598 + assert self.module.intproduct(3, 4) == 12 + + +class TestFortranGroupCounters(util.F2PyTest): + def test_end_if_comment(self): + # gh-23533 + fpath = util.getpath("tests", "src", "crackfortran", "gh23533.f") + try: + crackfortran.crackfortran([str(fpath)]) + except Exception as exc: + assert False, f"'crackfortran.crackfortran' raised an exception {exc}" + + +class TestF77CommonBlockReader: + def test_gh22648(self, tmp_path): + fpath = util.getpath("tests", "src", "crackfortran", "gh22648.pyf") + with contextlib.redirect_stdout(io.StringIO()) as stdout_f2py: + mod = crackfortran.crackfortran([str(fpath)]) + assert "Mismatch" not in stdout_f2py.getvalue() + +class TestParamEval: + # issue gh-11612, array parameter parsing + def test_param_eval_nested(self): + v = '(/3.14, 4./)' + g_params = {"kind": crackfortran._kind_func, + "selected_int_kind": crackfortran._selected_int_kind_func, + "selected_real_kind": crackfortran._selected_real_kind_func} + params = {'dp': 8, 'intparamarray': {1: 3, 2: 5}, + 'nested': {1: 1, 2: 2, 3: 3}} + dimspec = '(2)' + ret = crackfortran.param_eval(v, g_params, params, dimspec=dimspec) + assert ret == {1: 3.14, 2: 4.0} + + def test_param_eval_nonstandard_range(self): + v = '(/ 6, 3, 1 /)' + g_params = {"kind": crackfortran._kind_func, + "selected_int_kind": crackfortran._selected_int_kind_func, + "selected_real_kind": crackfortran._selected_real_kind_func} + params = {} + dimspec = '(-1:1)' + ret = crackfortran.param_eval(v, g_params, params, dimspec=dimspec) + assert ret == {-1: 6, 0: 3, 1: 1} + + def test_param_eval_empty_range(self): + v = '6' + g_params = {"kind": crackfortran._kind_func, + "selected_int_kind": crackfortran._selected_int_kind_func, + "selected_real_kind": crackfortran._selected_real_kind_func} + params = {} + dimspec = '' + pytest.raises(ValueError, crackfortran.param_eval, v, g_params, params, + dimspec=dimspec) + + def test_param_eval_non_array_param(self): + v = '3.14_dp' + g_params = {"kind": crackfortran._kind_func, + "selected_int_kind": crackfortran._selected_int_kind_func, + "selected_real_kind": crackfortran._selected_real_kind_func} + params = {} + ret = crackfortran.param_eval(v, g_params, params, dimspec=None) + assert ret == '3.14_dp' + + def test_param_eval_too_many_dims(self): + v = 'reshape((/ (i, i=1, 250) /), (/5, 10, 5/))' + g_params = {"kind": crackfortran._kind_func, + "selected_int_kind": crackfortran._selected_int_kind_func, + "selected_real_kind": crackfortran._selected_real_kind_func} + params = {} + dimspec = '(0:4, 3:12, 5)' + pytest.raises(ValueError, crackfortran.param_eval, v, g_params, params, + dimspec=dimspec) + +@pytest.mark.slow +class TestLowerF2PYDirective(util.F2PyTest): + sources = [util.getpath("tests", "src", "crackfortran", "gh27697.f90")] + options = ['--lower'] + + def test_no_lower_fail(self): + with pytest.raises(ValueError, match='aborting directly') as exc: + self.module.utils.my_abort('aborting directly') diff --git a/local_packages/numpy/f2py/tests/test_data.py b/local_packages/numpy/f2py/tests/test_data.py new file mode 100644 index 0000000..0cea556 --- /dev/null +++ b/local_packages/numpy/f2py/tests/test_data.py @@ -0,0 +1,71 @@ +import pytest + +import numpy as np +from numpy.f2py.crackfortran import crackfortran + +from . import util + + +class TestData(util.F2PyTest): + sources = [util.getpath("tests", "src", "crackfortran", "data_stmts.f90")] + + # For gh-23276 + @pytest.mark.slow + def test_data_stmts(self): + assert self.module.cmplxdat.i == 2 + assert self.module.cmplxdat.j == 3 + assert self.module.cmplxdat.x == 1.5 + assert self.module.cmplxdat.y == 2.0 + assert self.module.cmplxdat.pi == 3.1415926535897932384626433832795028841971693993751058209749445923078164062 + assert self.module.cmplxdat.medium_ref_index == np.array(1. + 0.j) + assert np.all(self.module.cmplxdat.z == np.array([3.5, 7.0])) + assert np.all(self.module.cmplxdat.my_array == np.array([ 1. + 2.j, -3. + 4.j])) + assert np.all(self.module.cmplxdat.my_real_array == np.array([ 1., 2., 3.])) + assert np.all(self.module.cmplxdat.ref_index_one == np.array([13.0 + 21.0j])) + assert np.all(self.module.cmplxdat.ref_index_two == np.array([-30.0 + 43.0j])) + + def test_crackedlines(self): + mod = crackfortran(self.sources) + assert mod[0]['vars']['x']['='] == '1.5' + assert mod[0]['vars']['y']['='] == '2.0' + assert mod[0]['vars']['pi']['='] == '3.1415926535897932384626433832795028841971693993751058209749445923078164062d0' + assert mod[0]['vars']['my_real_array']['='] == '(/1.0d0, 2.0d0, 3.0d0/)' + assert mod[0]['vars']['ref_index_one']['='] == '(13.0d0, 21.0d0)' + assert mod[0]['vars']['ref_index_two']['='] == '(-30.0d0, 43.0d0)' + assert mod[0]['vars']['my_array']['='] == '(/(1.0d0, 2.0d0), (-3.0d0, 4.0d0)/)' + assert mod[0]['vars']['z']['='] == '(/3.5, 7.0/)' + +class TestDataF77(util.F2PyTest): + sources = [util.getpath("tests", "src", "crackfortran", "data_common.f")] + + # For gh-23276 + def test_data_stmts(self): + assert self.module.mycom.mydata == 0 + + def test_crackedlines(self): + mod = crackfortran(str(self.sources[0])) + print(mod[0]['vars']) + assert mod[0]['vars']['mydata']['='] == '0' + + +class TestDataMultiplierF77(util.F2PyTest): + sources = [util.getpath("tests", "src", "crackfortran", "data_multiplier.f")] + + # For gh-23276 + def test_data_stmts(self): + assert self.module.mycom.ivar1 == 3 + assert self.module.mycom.ivar2 == 3 + assert self.module.mycom.ivar3 == 2 + assert self.module.mycom.ivar4 == 2 + assert self.module.mycom.evar5 == 0 + + +class TestDataWithCommentsF77(util.F2PyTest): + sources = [util.getpath("tests", "src", "crackfortran", "data_with_comments.f")] + + # For gh-23276 + def test_data_stmts(self): + assert len(self.module.mycom.mytab) == 3 + assert self.module.mycom.mytab[0] == 0 + assert self.module.mycom.mytab[1] == 4 + assert self.module.mycom.mytab[2] == 0 diff --git a/local_packages/numpy/f2py/tests/test_docs.py b/local_packages/numpy/f2py/tests/test_docs.py new file mode 100644 index 0000000..7015af3 --- /dev/null +++ b/local_packages/numpy/f2py/tests/test_docs.py @@ -0,0 +1,66 @@ +from pathlib import Path + +import pytest + +import numpy as np +from numpy.testing import assert_array_equal, assert_equal + +from . import util + + +def get_docdir(): + parents = Path(__file__).resolve().parents + try: + # Assumes that spin is used to run tests + nproot = parents[8] + except IndexError: + docdir = None + else: + docdir = nproot / "doc" / "source" / "f2py" / "code" + if docdir and docdir.is_dir(): + return docdir + # Assumes that an editable install is used to run tests + return parents[3] / "doc" / "source" / "f2py" / "code" + + +pytestmark = pytest.mark.skipif( + not get_docdir().is_dir(), + reason=f"Could not find f2py documentation sources" + f"({get_docdir()} does not exist)", +) + +def _path(*args): + return get_docdir().joinpath(*args) + +@pytest.mark.slow +class TestDocAdvanced(util.F2PyTest): + # options = ['--debug-capi', '--build-dir', '/tmp/build-f2py'] + sources = [_path('asterisk1.f90'), _path('asterisk2.f90'), + _path('ftype.f')] + + def test_asterisk1(self): + foo = self.module.foo1 + assert_equal(foo(), b'123456789A12') + + def test_asterisk2(self): + foo = self.module.foo2 + assert_equal(foo(2), b'12') + assert_equal(foo(12), b'123456789A12') + assert_equal(foo(20), b'123456789A123456789B') + + def test_ftype(self): + ftype = self.module + ftype.foo() + assert_equal(ftype.data.a, 0) + ftype.data.a = 3 + ftype.data.x = [1, 2, 3] + assert_equal(ftype.data.a, 3) + assert_array_equal(ftype.data.x, + np.array([1, 2, 3], dtype=np.float32)) + ftype.data.x[1] = 45 + assert_array_equal(ftype.data.x, + np.array([1, 45, 3], dtype=np.float32)) + # gh-26718 Cleanup for repeated test runs + ftype.data.a = 0 + + # TODO: implement test methods for other example Fortran codes diff --git a/local_packages/numpy/f2py/tests/test_f2cmap.py b/local_packages/numpy/f2py/tests/test_f2cmap.py new file mode 100644 index 0000000..a35320c --- /dev/null +++ b/local_packages/numpy/f2py/tests/test_f2cmap.py @@ -0,0 +1,17 @@ +import numpy as np + +from . import util + + +class TestF2Cmap(util.F2PyTest): + sources = [ + util.getpath("tests", "src", "f2cmap", "isoFortranEnvMap.f90"), + util.getpath("tests", "src", "f2cmap", ".f2py_f2cmap") + ] + + # gh-15095 + def test_gh15095(self): + inp = np.ones(3) + out = self.module.func1(inp) + exp_out = 3 + assert out == exp_out diff --git a/local_packages/numpy/f2py/tests/test_f2py2e.py b/local_packages/numpy/f2py/tests/test_f2py2e.py new file mode 100644 index 0000000..959e152 --- /dev/null +++ b/local_packages/numpy/f2py/tests/test_f2py2e.py @@ -0,0 +1,983 @@ +import platform +import re +import shlex +import subprocess +import sys +import textwrap +from collections import namedtuple +from pathlib import Path + +import pytest + +from numpy.f2py.f2py2e import main as f2pycli +from numpy.testing._private.utils import NOGIL_BUILD + +from . import util + +####################### +# F2PY Test utilities # +###################### + +# Tests for CLI commands which call meson will fail if no compilers are present, these are to be skipped + +def compiler_check_f2pycli(): + if not util.has_fortran_compiler(): + pytest.skip("CLI command needs a Fortran compiler") + else: + f2pycli() + +######################### +# CLI utils and classes # +######################### + + +PPaths = namedtuple("PPaths", "finp, f90inp, pyf, wrap77, wrap90, cmodf") + + +def get_io_paths(fname_inp, mname="untitled"): + """Takes in a temporary file for testing and returns the expected output and input paths + + Here expected output is essentially one of any of the possible generated + files. + + ..note:: + + Since this does not actually run f2py, none of these are guaranteed to + exist, and module names are typically incorrect + + Parameters + ---------- + fname_inp : str + The input filename + mname : str, optional + The name of the module, untitled by default + + Returns + ------- + genp : NamedTuple PPaths + The possible paths which are generated, not all of which exist + """ + bpath = Path(fname_inp) + return PPaths( + finp=bpath.with_suffix(".f"), + f90inp=bpath.with_suffix(".f90"), + pyf=bpath.with_suffix(".pyf"), + wrap77=bpath.with_name(f"{mname}-f2pywrappers.f"), + wrap90=bpath.with_name(f"{mname}-f2pywrappers2.f90"), + cmodf=bpath.with_name(f"{mname}module.c"), + ) + + +################ +# CLI Fixtures # +################ + + +@pytest.fixture(scope="session") +def hello_world_f90(tmpdir_factory): + """Generates a single f90 file for testing""" + fdat = util.getpath("tests", "src", "cli", "hiworld.f90").read_text() + fn = tmpdir_factory.getbasetemp() / "hello.f90" + fn.write_text(fdat, encoding="ascii") + return fn + + +@pytest.fixture(scope="session") +def gh23598_warn(tmpdir_factory): + """F90 file for testing warnings in gh23598""" + fdat = util.getpath("tests", "src", "crackfortran", "gh23598Warn.f90").read_text() + fn = tmpdir_factory.getbasetemp() / "gh23598Warn.f90" + fn.write_text(fdat, encoding="ascii") + return fn + + +@pytest.fixture(scope="session") +def gh22819_cli(tmpdir_factory): + """F90 file for testing disallowed CLI arguments in ghff819""" + fdat = util.getpath("tests", "src", "cli", "gh_22819.pyf").read_text() + fn = tmpdir_factory.getbasetemp() / "gh_22819.pyf" + fn.write_text(fdat, encoding="ascii") + return fn + + +@pytest.fixture(scope="session") +def hello_world_f77(tmpdir_factory): + """Generates a single f77 file for testing""" + fdat = util.getpath("tests", "src", "cli", "hi77.f").read_text() + fn = tmpdir_factory.getbasetemp() / "hello.f" + fn.write_text(fdat, encoding="ascii") + return fn + + +@pytest.fixture(scope="session") +def retreal_f77(tmpdir_factory): + """Generates a single f77 file for testing""" + fdat = util.getpath("tests", "src", "return_real", "foo77.f").read_text() + fn = tmpdir_factory.getbasetemp() / "foo.f" + fn.write_text(fdat, encoding="ascii") + return fn + +@pytest.fixture(scope="session") +def f2cmap_f90(tmpdir_factory): + """Generates a single f90 file for testing""" + fdat = util.getpath("tests", "src", "f2cmap", "isoFortranEnvMap.f90").read_text() + f2cmap = util.getpath("tests", "src", "f2cmap", ".f2py_f2cmap").read_text() + fn = tmpdir_factory.getbasetemp() / "f2cmap.f90" + fmap = tmpdir_factory.getbasetemp() / "mapfile" + fn.write_text(fdat, encoding="ascii") + fmap.write_text(f2cmap, encoding="ascii") + return fn + +######### +# Tests # +######### + +def test_gh22819_cli(capfd, gh22819_cli, monkeypatch): + """Check that module names are handled correctly + gh-22819 + Essentially, the -m name cannot be used to import the module, so the module + named in the .pyf needs to be used instead + + CLI :: -m and a .pyf file + """ + ipath = Path(gh22819_cli) + monkeypatch.setattr(sys, "argv", f"f2py -m blah {ipath}".split()) + with util.switchdir(ipath.parent): + f2pycli() + gen_paths = [item.name for item in ipath.parent.rglob("*") if item.is_file()] + assert "blahmodule.c" not in gen_paths # shouldn't be generated + assert "blah-f2pywrappers.f" not in gen_paths + assert "test_22819-f2pywrappers.f" in gen_paths + assert "test_22819module.c" in gen_paths + + +def test_gh22819_many_pyf(capfd, gh22819_cli, monkeypatch): + """Only one .pyf file allowed + gh-22819 + CLI :: .pyf files + """ + ipath = Path(gh22819_cli) + monkeypatch.setattr(sys, "argv", f"f2py -m blah {ipath} hello.pyf".split()) + with util.switchdir(ipath.parent): + with pytest.raises(ValueError, match="Only one .pyf file per call"): + f2pycli() + + +def test_gh23598_warn(capfd, gh23598_warn, monkeypatch): + foutl = get_io_paths(gh23598_warn, mname="test") + ipath = foutl.f90inp + monkeypatch.setattr( + sys, "argv", + f'f2py {ipath} -m test'.split()) + + with util.switchdir(ipath.parent): + f2pycli() # Generate files + wrapper = foutl.wrap90.read_text() + assert "intproductf2pywrap, intpr" not in wrapper + + +def test_gen_pyf(capfd, hello_world_f90, monkeypatch): + """Ensures that a signature file is generated via the CLI + CLI :: -h + """ + ipath = Path(hello_world_f90) + opath = Path(hello_world_f90).stem + ".pyf" + monkeypatch.setattr(sys, "argv", f'f2py -h {opath} {ipath}'.split()) + + with util.switchdir(ipath.parent): + f2pycli() # Generate wrappers + out, _ = capfd.readouterr() + assert "Saving signatures to file" in out + assert Path(f'{opath}').exists() + + +def test_gen_pyf_stdout(capfd, hello_world_f90, monkeypatch): + """Ensures that a signature file can be dumped to stdout + CLI :: -h + """ + ipath = Path(hello_world_f90) + monkeypatch.setattr(sys, "argv", f'f2py -h stdout {ipath}'.split()) + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert "Saving signatures to file" in out + assert "function hi() ! in " in out + + +def test_gen_pyf_no_overwrite(capfd, hello_world_f90, monkeypatch): + """Ensures that the CLI refuses to overwrite signature files + CLI :: -h without --overwrite-signature + """ + ipath = Path(hello_world_f90) + monkeypatch.setattr(sys, "argv", f'f2py -h faker.pyf {ipath}'.split()) + + with util.switchdir(ipath.parent): + Path("faker.pyf").write_text("Fake news", encoding="ascii") + with pytest.raises(SystemExit): + f2pycli() # Refuse to overwrite + _, err = capfd.readouterr() + assert "Use --overwrite-signature to overwrite" in err + + +@pytest.mark.skipif(sys.version_info <= (3, 12), reason="Python 3.12 required") +def test_untitled_cli(capfd, hello_world_f90, monkeypatch): + """Check that modules are named correctly + + CLI :: defaults + """ + ipath = Path(hello_world_f90) + monkeypatch.setattr(sys, "argv", f"f2py --backend meson -c {ipath}".split()) + with util.switchdir(ipath.parent): + compiler_check_f2pycli() + out, _ = capfd.readouterr() + assert "untitledmodule.c" in out + + +@pytest.mark.skipif((platform.system() != 'Linux') or (sys.version_info <= (3, 12)), reason='Compiler and 3.12 required') +def test_no_py312_distutils_fcompiler(capfd, hello_world_f90, monkeypatch): + """Check that no distutils imports are performed on 3.12 + CLI :: --fcompiler --help-link --backend distutils + """ + MNAME = "hi" + foutl = get_io_paths(hello_world_f90, mname=MNAME) + ipath = foutl.f90inp + monkeypatch.setattr( + sys, "argv", f"f2py {ipath} -c --fcompiler=gfortran -m {MNAME}".split() + ) + with util.switchdir(ipath.parent): + compiler_check_f2pycli() + out, _ = capfd.readouterr() + assert "--fcompiler cannot be used with meson" in out + monkeypatch.setattr( + sys, "argv", ["f2py", "--help-link"] + ) + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert "Use --dep for meson builds" in out + MNAME = "hi2" # Needs to be different for a new -c + monkeypatch.setattr( + sys, "argv", f"f2py {ipath} -c -m {MNAME} --backend distutils".split() + ) + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert "Cannot use distutils backend with Python>=3.12" in out + + +@pytest.mark.xfail +def test_f2py_skip(capfd, retreal_f77, monkeypatch): + """Tests that functions can be skipped + CLI :: skip: + """ + foutl = get_io_paths(retreal_f77, mname="test") + ipath = foutl.finp + toskip = "t0 t4 t8 sd s8 s4" + remaining = "td s0" + monkeypatch.setattr( + sys, "argv", + f'f2py {ipath} -m test skip: {toskip}'.split()) + + with util.switchdir(ipath.parent): + f2pycli() + out, err = capfd.readouterr() + for skey in toskip.split(): + assert ( + f'buildmodule: Could not found the body of interfaced routine "{skey}". Skipping.' + in err) + for rkey in remaining.split(): + assert f'Constructing wrapper function "{rkey}"' in out + + +def test_f2py_only(capfd, retreal_f77, monkeypatch): + """Test that functions can be kept by only: + CLI :: only: + """ + foutl = get_io_paths(retreal_f77, mname="test") + ipath = foutl.finp + toskip = "t0 t4 t8 sd s8 s4" + tokeep = "td s0" + monkeypatch.setattr( + sys, "argv", + f'f2py {ipath} -m test only: {tokeep}'.split()) + + with util.switchdir(ipath.parent): + f2pycli() + out, err = capfd.readouterr() + for skey in toskip.split(): + assert ( + f'buildmodule: Could not find the body of interfaced routine "{skey}". Skipping.' + in err) + for rkey in tokeep.split(): + assert f'Constructing wrapper function "{rkey}"' in out + + +def test_file_processing_switch(capfd, hello_world_f90, retreal_f77, + monkeypatch): + """Tests that it is possible to return to file processing mode + CLI :: : + BUG: numpy-gh #20520 + """ + foutl = get_io_paths(retreal_f77, mname="test") + ipath = foutl.finp + toskip = "t0 t4 t8 sd s8 s4" + ipath2 = Path(hello_world_f90) + tokeep = "td s0 hi" # hi is in ipath2 + mname = "blah" + monkeypatch.setattr( + sys, + "argv", + f'f2py {ipath} -m {mname} only: {tokeep} : {ipath2}'.split( + ), + ) + + with util.switchdir(ipath.parent): + f2pycli() + out, err = capfd.readouterr() + for skey in toskip.split(): + assert ( + f'buildmodule: Could not find the body of interfaced routine "{skey}". Skipping.' + in err) + for rkey in tokeep.split(): + assert f'Constructing wrapper function "{rkey}"' in out + + +def test_mod_gen_f77(capfd, hello_world_f90, monkeypatch): + """Checks the generation of files based on a module name + CLI :: -m + """ + MNAME = "hi" + foutl = get_io_paths(hello_world_f90, mname=MNAME) + ipath = foutl.f90inp + monkeypatch.setattr(sys, "argv", f'f2py {ipath} -m {MNAME}'.split()) + with util.switchdir(ipath.parent): + f2pycli() + + # Always generate C module + assert Path.exists(foutl.cmodf) + # File contains a function, check for F77 wrappers + assert Path.exists(foutl.wrap77) + + +def test_mod_gen_gh25263(capfd, hello_world_f77, monkeypatch): + """Check that pyf files are correctly generated with module structure + CLI :: -m -h pyf_file + BUG: numpy-gh #20520 + """ + MNAME = "hi" + foutl = get_io_paths(hello_world_f77, mname=MNAME) + ipath = foutl.finp + monkeypatch.setattr(sys, "argv", f'f2py {ipath} -m {MNAME} -h hi.pyf'.split()) + with util.switchdir(ipath.parent): + f2pycli() + with Path('hi.pyf').open() as hipyf: + pyfdat = hipyf.read() + assert "python module hi" in pyfdat + + +def test_lower_cmod(capfd, hello_world_f77, monkeypatch): + """Lowers cases by flag or when -h is present + + CLI :: --[no-]lower + """ + foutl = get_io_paths(hello_world_f77, mname="test") + ipath = foutl.finp + capshi = re.compile(r"HI\(\)") + capslo = re.compile(r"hi\(\)") + # Case I: --lower is passed + monkeypatch.setattr(sys, "argv", f'f2py {ipath} -m test --lower'.split()) + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert capslo.search(out) is not None + assert capshi.search(out) is None + # Case II: --no-lower is passed + monkeypatch.setattr(sys, "argv", + f'f2py {ipath} -m test --no-lower'.split()) + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert capslo.search(out) is None + assert capshi.search(out) is not None + + +def test_lower_sig(capfd, hello_world_f77, monkeypatch): + """Lowers cases in signature files by flag or when -h is present + + CLI :: --[no-]lower -h + """ + foutl = get_io_paths(hello_world_f77, mname="test") + ipath = foutl.finp + # Signature files + capshi = re.compile(r"Block: HI") + capslo = re.compile(r"Block: hi") + # Case I: --lower is implied by -h + # TODO: Clean up to prevent passing --overwrite-signature + monkeypatch.setattr( + sys, + "argv", + f'f2py {ipath} -h {foutl.pyf} -m test --overwrite-signature'.split(), + ) + + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert capslo.search(out) is not None + assert capshi.search(out) is None + + # Case II: --no-lower overrides -h + monkeypatch.setattr( + sys, + "argv", + f'f2py {ipath} -h {foutl.pyf} -m test --overwrite-signature --no-lower' + .split(), + ) + + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert capslo.search(out) is None + assert capshi.search(out) is not None + + +def test_build_dir(capfd, hello_world_f90, monkeypatch): + """Ensures that the build directory can be specified + + CLI :: --build-dir + """ + ipath = Path(hello_world_f90) + mname = "blah" + odir = "tttmp" + monkeypatch.setattr(sys, "argv", + f'f2py -m {mname} {ipath} --build-dir {odir}'.split()) + + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert f"Wrote C/API module \"{mname}\"" in out + + +def test_overwrite(capfd, hello_world_f90, monkeypatch): + """Ensures that the build directory can be specified + + CLI :: --overwrite-signature + """ + ipath = Path(hello_world_f90) + monkeypatch.setattr( + sys, "argv", + f'f2py -h faker.pyf {ipath} --overwrite-signature'.split()) + + with util.switchdir(ipath.parent): + Path("faker.pyf").write_text("Fake news", encoding="ascii") + f2pycli() + out, _ = capfd.readouterr() + assert "Saving signatures to file" in out + + +def test_latexdoc(capfd, hello_world_f90, monkeypatch): + """Ensures that TeX documentation is written out + + CLI :: --latex-doc + """ + ipath = Path(hello_world_f90) + mname = "blah" + monkeypatch.setattr(sys, "argv", + f'f2py -m {mname} {ipath} --latex-doc'.split()) + + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert "Documentation is saved to file" in out + with Path(f"{mname}module.tex").open() as otex: + assert "\\documentclass" in otex.read() + + +def test_nolatexdoc(capfd, hello_world_f90, monkeypatch): + """Ensures that TeX documentation is written out + + CLI :: --no-latex-doc + """ + ipath = Path(hello_world_f90) + mname = "blah" + monkeypatch.setattr(sys, "argv", + f'f2py -m {mname} {ipath} --no-latex-doc'.split()) + + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert "Documentation is saved to file" not in out + + +def test_shortlatex(capfd, hello_world_f90, monkeypatch): + """Ensures that truncated documentation is written out + + TODO: Test to ensure this has no effect without --latex-doc + CLI :: --latex-doc --short-latex + """ + ipath = Path(hello_world_f90) + mname = "blah" + monkeypatch.setattr( + sys, + "argv", + f'f2py -m {mname} {ipath} --latex-doc --short-latex'.split(), + ) + + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert "Documentation is saved to file" in out + with Path(f"./{mname}module.tex").open() as otex: + assert "\\documentclass" not in otex.read() + + +def test_restdoc(capfd, hello_world_f90, monkeypatch): + """Ensures that RsT documentation is written out + + CLI :: --rest-doc + """ + ipath = Path(hello_world_f90) + mname = "blah" + monkeypatch.setattr(sys, "argv", + f'f2py -m {mname} {ipath} --rest-doc'.split()) + + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert "ReST Documentation is saved to file" in out + with Path(f"./{mname}module.rest").open() as orst: + assert r".. -*- rest -*-" in orst.read() + + +def test_norestexdoc(capfd, hello_world_f90, monkeypatch): + """Ensures that TeX documentation is written out + + CLI :: --no-rest-doc + """ + ipath = Path(hello_world_f90) + mname = "blah" + monkeypatch.setattr(sys, "argv", + f'f2py -m {mname} {ipath} --no-rest-doc'.split()) + + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert "ReST Documentation is saved to file" not in out + + +def test_debugcapi(capfd, hello_world_f90, monkeypatch): + """Ensures that debugging wrappers are written + + CLI :: --debug-capi + """ + ipath = Path(hello_world_f90) + mname = "blah" + monkeypatch.setattr(sys, "argv", + f'f2py -m {mname} {ipath} --debug-capi'.split()) + + with util.switchdir(ipath.parent): + f2pycli() + with Path(f"./{mname}module.c").open() as ocmod: + assert r"#define DEBUGCFUNCS" in ocmod.read() + + +@pytest.mark.skip(reason="Consistently fails on CI; noisy so skip not xfail.") +def test_debugcapi_bld(hello_world_f90, monkeypatch): + """Ensures that debugging wrappers work + + CLI :: --debug-capi -c + """ + ipath = Path(hello_world_f90) + mname = "blah" + monkeypatch.setattr(sys, "argv", + f'f2py -m {mname} {ipath} -c --debug-capi'.split()) + + with util.switchdir(ipath.parent): + f2pycli() + cmd_run = shlex.split(f"{sys.executable} -c \"import blah; blah.hi()\"") + rout = subprocess.run(cmd_run, capture_output=True, encoding='UTF-8') + eout = ' Hello World\n' + eerr = textwrap.dedent("""\ +debug-capi:Python C/API function blah.hi() +debug-capi:float hi=:output,hidden,scalar +debug-capi:hi=0 +debug-capi:Fortran subroutine `f2pywraphi(&hi)' +debug-capi:hi=0 +debug-capi:Building return value. +debug-capi:Python C/API function blah.hi: successful. +debug-capi:Freeing memory. + """) + assert rout.stdout == eout + assert rout.stderr == eerr + + +def test_wrapfunc_def(capfd, hello_world_f90, monkeypatch): + """Ensures that fortran subroutine wrappers for F77 are included by default + + CLI :: --[no]-wrap-functions + """ + # Implied + ipath = Path(hello_world_f90) + mname = "blah" + monkeypatch.setattr(sys, "argv", f'f2py -m {mname} {ipath}'.split()) + + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert r"Fortran 77 wrappers are saved to" in out + + # Explicit + monkeypatch.setattr(sys, "argv", + f'f2py -m {mname} {ipath} --wrap-functions'.split()) + + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert r"Fortran 77 wrappers are saved to" in out + + +def test_nowrapfunc(capfd, hello_world_f90, monkeypatch): + """Ensures that fortran subroutine wrappers for F77 can be disabled + + CLI :: --no-wrap-functions + """ + ipath = Path(hello_world_f90) + mname = "blah" + monkeypatch.setattr(sys, "argv", + f'f2py -m {mname} {ipath} --no-wrap-functions'.split()) + + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert r"Fortran 77 wrappers are saved to" not in out + + +def test_inclheader(capfd, hello_world_f90, monkeypatch): + """Add to the include directories + + CLI :: -include + TODO: Document this in the help string + """ + ipath = Path(hello_world_f90) + mname = "blah" + monkeypatch.setattr( + sys, + "argv", + f'f2py -m {mname} {ipath} -include -include '. + split(), + ) + + with util.switchdir(ipath.parent): + f2pycli() + with Path(f"./{mname}module.c").open() as ocmod: + ocmr = ocmod.read() + assert "#include " in ocmr + assert "#include " in ocmr + +@pytest.mark.skipif((platform.system() != 'Linux'), reason='Compiler required') +def test_cli_obj(capfd, hello_world_f90, monkeypatch): + """Ensures that the extra object can be specified when using meson backend + """ + ipath = Path(hello_world_f90) + mname = "blah" + odir = "tttmp" + obj = "extra.o" + monkeypatch.setattr(sys, "argv", + f'f2py --backend meson --build-dir {odir} -m {mname} -c {obj} {ipath}'.split()) + + with util.switchdir(ipath.parent): + Path(obj).touch() + compiler_check_f2pycli() + with Path(f"{odir}/meson.build").open() as mesonbuild: + mbld = mesonbuild.read() + assert "objects:" in mbld + assert f"'''{obj}'''" in mbld + + +def test_inclpath(): + """Add to the include directories + + CLI :: --include-paths + """ + # TODO: populate + pass + + +def test_hlink(): + """Add to the include directories + + CLI :: --help-link + """ + # TODO: populate + pass + + +def test_f2cmap(capfd, f2cmap_f90, monkeypatch): + """Check that Fortran-to-Python KIND specs can be passed + + CLI :: --f2cmap + """ + ipath = Path(f2cmap_f90) + monkeypatch.setattr(sys, "argv", f'f2py -m blah {ipath} --f2cmap mapfile'.split()) + + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert "Reading f2cmap from 'mapfile' ..." in out + assert "Mapping \"real(kind=real32)\" to \"float\"" in out + assert "Mapping \"real(kind=real64)\" to \"double\"" in out + assert "Mapping \"integer(kind=int64)\" to \"long_long\"" in out + assert "Successfully applied user defined f2cmap changes" in out + + +def test_quiet(capfd, hello_world_f90, monkeypatch): + """Reduce verbosity + + CLI :: --quiet + """ + ipath = Path(hello_world_f90) + monkeypatch.setattr(sys, "argv", f'f2py -m blah {ipath} --quiet'.split()) + + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert len(out) == 0 + + +def test_verbose(capfd, hello_world_f90, monkeypatch): + """Increase verbosity + + CLI :: --verbose + """ + ipath = Path(hello_world_f90) + monkeypatch.setattr(sys, "argv", f'f2py -m blah {ipath} --verbose'.split()) + + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert "analyzeline" in out + + +def test_version(capfd, monkeypatch): + """Ensure version + + CLI :: -v + """ + monkeypatch.setattr(sys, "argv", ["f2py", "-v"]) + # TODO: f2py2e should not call sys.exit() after printing the version + with pytest.raises(SystemExit): + f2pycli() + out, _ = capfd.readouterr() + import numpy as np + assert np.__version__ == out.strip() + + +@pytest.mark.skip(reason="Consistently fails on CI; noisy so skip not xfail.") +def test_npdistop(hello_world_f90, monkeypatch): + """ + CLI :: -c + """ + ipath = Path(hello_world_f90) + monkeypatch.setattr(sys, "argv", f'f2py -m blah {ipath} -c'.split()) + + with util.switchdir(ipath.parent): + f2pycli() + cmd_run = shlex.split(f"{sys.executable} -c \"import blah; blah.hi()\"") + rout = subprocess.run(cmd_run, capture_output=True, encoding='UTF-8') + eout = ' Hello World\n' + assert rout.stdout == eout + + +@pytest.mark.skipif((platform.system() != 'Linux') or sys.version_info <= (3, 12), + reason='Compiler and Python 3.12 or newer required') +def test_no_freethreading_compatible(hello_world_f90, monkeypatch): + """ + CLI :: --no-freethreading-compatible + """ + ipath = Path(hello_world_f90) + monkeypatch.setattr(sys, "argv", f'f2py -m blah {ipath} -c --no-freethreading-compatible'.split()) + + with util.switchdir(ipath.parent): + compiler_check_f2pycli() + cmd = f"{sys.executable} -c \"import blah; blah.hi();" + if NOGIL_BUILD: + cmd += "import sys; assert sys._is_gil_enabled() is True\"" + else: + cmd += "\"" + cmd_run = shlex.split(cmd) + rout = subprocess.run(cmd_run, capture_output=True, encoding='UTF-8') + eout = ' Hello World\n' + assert rout.stdout == eout + if NOGIL_BUILD: + assert "The global interpreter lock (GIL) has been enabled to load module 'blah'" in rout.stderr + assert rout.returncode == 0 + + +@pytest.mark.skipif((platform.system() != 'Linux') or sys.version_info <= (3, 12), + reason='Compiler and Python 3.12 or newer required') +def test_freethreading_compatible(hello_world_f90, monkeypatch): + """ + CLI :: --freethreading_compatible + """ + ipath = Path(hello_world_f90) + monkeypatch.setattr(sys, "argv", f'f2py -m blah {ipath} -c --freethreading-compatible'.split()) + + with util.switchdir(ipath.parent): + compiler_check_f2pycli() + cmd = f"{sys.executable} -c \"import blah; blah.hi();" + if NOGIL_BUILD: + cmd += "import sys; assert sys._is_gil_enabled() is False\"" + else: + cmd += "\"" + cmd_run = shlex.split(cmd) + rout = subprocess.run(cmd_run, capture_output=True, encoding='UTF-8') + eout = ' Hello World\n' + assert rout.stdout == eout + assert rout.stderr == "" + assert rout.returncode == 0 + + +# Numpy distutils flags +# TODO: These should be tested separately + +def test_npd_fcompiler(): + """ + CLI :: -c --fcompiler + """ + # TODO: populate + pass + + +def test_npd_compiler(): + """ + CLI :: -c --compiler + """ + # TODO: populate + pass + + +def test_npd_help_fcompiler(): + """ + CLI :: -c --help-fcompiler + """ + # TODO: populate + pass + + +def test_npd_f77exec(): + """ + CLI :: -c --f77exec + """ + # TODO: populate + pass + + +def test_npd_f90exec(): + """ + CLI :: -c --f90exec + """ + # TODO: populate + pass + + +def test_npd_f77flags(): + """ + CLI :: -c --f77flags + """ + # TODO: populate + pass + + +def test_npd_f90flags(): + """ + CLI :: -c --f90flags + """ + # TODO: populate + pass + + +def test_npd_opt(): + """ + CLI :: -c --opt + """ + # TODO: populate + pass + + +def test_npd_arch(): + """ + CLI :: -c --arch + """ + # TODO: populate + pass + + +def test_npd_noopt(): + """ + CLI :: -c --noopt + """ + # TODO: populate + pass + + +def test_npd_noarch(): + """ + CLI :: -c --noarch + """ + # TODO: populate + pass + + +def test_npd_debug(): + """ + CLI :: -c --debug + """ + # TODO: populate + pass + + +def test_npd_link_auto(): + """ + CLI :: -c --link- + """ + # TODO: populate + pass + + +def test_npd_lib(): + """ + CLI :: -c -L/path/to/lib/ -l + """ + # TODO: populate + pass + + +def test_npd_define(): + """ + CLI :: -D + """ + # TODO: populate + pass + + +def test_npd_undefine(): + """ + CLI :: -U + """ + # TODO: populate + pass + + +def test_npd_incl(): + """ + CLI :: -I/path/to/include/ + """ + # TODO: populate + pass + + +def test_npd_linker(): + """ + CLI :: .o .so .a + """ + # TODO: populate + pass diff --git a/local_packages/numpy/f2py/tests/test_isoc.py b/local_packages/numpy/f2py/tests/test_isoc.py new file mode 100644 index 0000000..f3450f1 --- /dev/null +++ b/local_packages/numpy/f2py/tests/test_isoc.py @@ -0,0 +1,56 @@ +import pytest + +import numpy as np +from numpy.testing import assert_allclose + +from . import util + + +class TestISOC(util.F2PyTest): + sources = [ + util.getpath("tests", "src", "isocintrin", "isoCtests.f90"), + ] + + # gh-24553 + @pytest.mark.slow + def test_c_double(self): + out = self.module.coddity.c_add(1, 2) + exp_out = 3 + assert out == exp_out + + # gh-9693 + def test_bindc_function(self): + out = self.module.coddity.wat(1, 20) + exp_out = 8 + assert out == exp_out + + # gh-25207 + def test_bindc_kinds(self): + out = self.module.coddity.c_add_int64(1, 20) + exp_out = 21 + assert out == exp_out + + # gh-25207 + def test_bindc_add_arr(self): + a = np.array([1, 2, 3]) + b = np.array([1, 2, 3]) + out = self.module.coddity.add_arr(a, b) + exp_out = a * 2 + assert_allclose(out, exp_out) + + +def test_process_f2cmap_dict(): + from numpy.f2py.auxfuncs import process_f2cmap_dict + + f2cmap_all = {"integer": {"8": "rubbish_type"}} + new_map = {"INTEGER": {"4": "int"}} + c2py_map = {"int": "int", "rubbish_type": "long"} + + exp_map, exp_maptyp = ({"integer": {"8": "rubbish_type", "4": "int"}}, ["int"]) + + # Call the function + res_map, res_maptyp = process_f2cmap_dict(f2cmap_all, new_map, c2py_map) + + # Assert the result is as expected + assert res_map == exp_map + assert res_maptyp == exp_maptyp diff --git a/local_packages/numpy/f2py/tests/test_kind.py b/local_packages/numpy/f2py/tests/test_kind.py new file mode 100644 index 0000000..c219cc8 --- /dev/null +++ b/local_packages/numpy/f2py/tests/test_kind.py @@ -0,0 +1,52 @@ +import platform +import sys + +import pytest + +from numpy.f2py.crackfortran import ( + _selected_int_kind_func as selected_int_kind, + _selected_real_kind_func as selected_real_kind, +) + +from . import util + +IS_PPC_OR_AIX = platform.machine().lower().startswith("ppc") or platform.system() == 'AIX' + +class TestKind(util.F2PyTest): + sources = [util.getpath("tests", "src", "kind", "foo.f90")] + + @pytest.mark.skipif(sys.maxsize < 2 ** 31 + 1, + reason="Fails for 32 bit machines") + def test_int(self): + """Test `int` kind_func for integers up to 10**40.""" + selectedintkind = self.module.selectedintkind + + for i in range(40): + assert selectedintkind(i) == selected_int_kind( + i + ), f"selectedintkind({i}): expected {selected_int_kind(i)!r} but got {selectedintkind(i)!r}" + + def test_real(self): + """ + Test (processor-dependent) `real` kind_func for real numbers + of up to 31 digits precision (extended/quadruple). + """ + selectedrealkind = self.module.selectedrealkind + + for i in range(32): + assert selectedrealkind(i) == selected_real_kind( + i + ), f"selectedrealkind({i}): expected {selected_real_kind(i)!r} but got {selectedrealkind(i)!r}" + + @pytest.mark.xfail(IS_PPC_OR_AIX, + reason="Some PowerPC may not support full IEEE 754 precision") + def test_quad_precision(self): + """ + Test kind_func for quadruple precision [`real(16)`] of 32+ digits . + """ + selectedrealkind = self.module.selectedrealkind + + for i in range(32, 40): + assert selectedrealkind(i) == selected_real_kind( + i + ), f"selectedrealkind({i}): expected {selected_real_kind(i)!r} but got {selectedrealkind(i)!r}" diff --git a/local_packages/numpy/f2py/tests/test_mixed.py b/local_packages/numpy/f2py/tests/test_mixed.py new file mode 100644 index 0000000..07f43e2 --- /dev/null +++ b/local_packages/numpy/f2py/tests/test_mixed.py @@ -0,0 +1,35 @@ +import textwrap + +import pytest + +from numpy.testing import IS_PYPY + +from . import util + + +class TestMixed(util.F2PyTest): + sources = [ + util.getpath("tests", "src", "mixed", "foo.f"), + util.getpath("tests", "src", "mixed", "foo_fixed.f90"), + util.getpath("tests", "src", "mixed", "foo_free.f90"), + ] + + @pytest.mark.slow + def test_all(self): + assert self.module.bar11() == 11 + assert self.module.foo_fixed.bar12() == 12 + assert self.module.foo_free.bar13() == 13 + + @pytest.mark.xfail(IS_PYPY, + reason="PyPy cannot modify tp_doc after PyType_Ready") + def test_docstring(self): + expected = textwrap.dedent("""\ + a = bar11() + + Wrapper for ``bar11``. + + Returns + ------- + a : int + """) + assert self.module.bar11.__doc__ == expected diff --git a/local_packages/numpy/f2py/tests/test_modules.py b/local_packages/numpy/f2py/tests/test_modules.py new file mode 100644 index 0000000..96d5ffc --- /dev/null +++ b/local_packages/numpy/f2py/tests/test_modules.py @@ -0,0 +1,83 @@ +import textwrap + +import pytest + +from numpy.testing import IS_PYPY + +from . import util + + +@pytest.mark.slow +class TestModuleFilterPublicEntities(util.F2PyTest): + sources = [ + util.getpath( + "tests", "src", "modules", "gh26920", + "two_mods_with_one_public_routine.f90" + ) + ] + # we filter the only public function mod2 + only = ["mod1_func1", ] + + def test_gh26920(self): + # if it compiles and can be loaded, things are fine + pass + + +@pytest.mark.slow +class TestModuleWithoutPublicEntities(util.F2PyTest): + sources = [ + util.getpath( + "tests", "src", "modules", "gh26920", + "two_mods_with_no_public_entities.f90" + ) + ] + only = ["mod1_func1", ] + + def test_gh26920(self): + # if it compiles and can be loaded, things are fine + pass + + +@pytest.mark.slow +class TestModuleDocString(util.F2PyTest): + sources = [util.getpath("tests", "src", "modules", "module_data_docstring.f90")] + + @pytest.mark.xfail(IS_PYPY, reason="PyPy cannot modify tp_doc after PyType_Ready") + def test_module_docstring(self): + assert self.module.mod.__doc__ == textwrap.dedent( + """\ + i : 'i'-scalar + x : 'i'-array(4) + a : 'f'-array(2,3) + b : 'f'-array(-1,-1), not allocated\x00 + foo()\n + Wrapper for ``foo``.\n\n""" + ) + + +@pytest.mark.slow +class TestModuleAndSubroutine(util.F2PyTest): + module_name = "example" + sources = [ + util.getpath("tests", "src", "modules", "gh25337", "data.f90"), + util.getpath("tests", "src", "modules", "gh25337", "use_data.f90"), + ] + + def test_gh25337(self): + self.module.data.set_shift(3) + assert "data" in dir(self.module) + + +@pytest.mark.slow +class TestUsedModule(util.F2PyTest): + module_name = "fmath" + sources = [ + util.getpath("tests", "src", "modules", "use_modules.f90"), + ] + + def test_gh25867(self): + compiled_mods = [x for x in dir(self.module) if "__" not in x] + assert "useops" in compiled_mods + assert self.module.useops.sum_and_double(3, 7) == 20 + assert "mathops" in compiled_mods + assert self.module.mathops.add(3, 7) == 10 diff --git a/local_packages/numpy/f2py/tests/test_parameter.py b/local_packages/numpy/f2py/tests/test_parameter.py new file mode 100644 index 0000000..513d021 --- /dev/null +++ b/local_packages/numpy/f2py/tests/test_parameter.py @@ -0,0 +1,129 @@ +import pytest + +import numpy as np + +from . import util + + +class TestParameters(util.F2PyTest): + # Check that intent(in out) translates as intent(inout) + sources = [ + util.getpath("tests", "src", "parameter", "constant_real.f90"), + util.getpath("tests", "src", "parameter", "constant_integer.f90"), + util.getpath("tests", "src", "parameter", "constant_both.f90"), + util.getpath("tests", "src", "parameter", "constant_compound.f90"), + util.getpath("tests", "src", "parameter", "constant_non_compound.f90"), + util.getpath("tests", "src", "parameter", "constant_array.f90"), + ] + + @pytest.mark.slow + def test_constant_real_single(self): + # non-contiguous should raise error + x = np.arange(6, dtype=np.float32)[::2] + pytest.raises(ValueError, self.module.foo_single, x) + + # check values with contiguous array + x = np.arange(3, dtype=np.float32) + self.module.foo_single(x) + assert np.allclose(x, [0 + 1 + 2 * 3, 1, 2]) + + @pytest.mark.slow + def test_constant_real_double(self): + # non-contiguous should raise error + x = np.arange(6, dtype=np.float64)[::2] + pytest.raises(ValueError, self.module.foo_double, x) + + # check values with contiguous array + x = np.arange(3, dtype=np.float64) + self.module.foo_double(x) + assert np.allclose(x, [0 + 1 + 2 * 3, 1, 2]) + + @pytest.mark.slow + def test_constant_compound_int(self): + # non-contiguous should raise error + x = np.arange(6, dtype=np.int32)[::2] + pytest.raises(ValueError, self.module.foo_compound_int, x) + + # check values with contiguous array + x = np.arange(3, dtype=np.int32) + self.module.foo_compound_int(x) + assert np.allclose(x, [0 + 1 + 2 * 6, 1, 2]) + + @pytest.mark.slow + def test_constant_non_compound_int(self): + # check values + x = np.arange(4, dtype=np.int32) + self.module.foo_non_compound_int(x) + assert np.allclose(x, [0 + 1 + 2 + 3 * 4, 1, 2, 3]) + + @pytest.mark.slow + def test_constant_integer_int(self): + # non-contiguous should raise error + x = np.arange(6, dtype=np.int32)[::2] + pytest.raises(ValueError, self.module.foo_int, x) + + # check values with contiguous array + x = np.arange(3, dtype=np.int32) + self.module.foo_int(x) + assert np.allclose(x, [0 + 1 + 2 * 3, 1, 2]) + + @pytest.mark.slow + def test_constant_integer_long(self): + # non-contiguous should raise error + x = np.arange(6, dtype=np.int64)[::2] + pytest.raises(ValueError, self.module.foo_long, x) + + # check values with contiguous array + x = np.arange(3, dtype=np.int64) + self.module.foo_long(x) + assert np.allclose(x, [0 + 1 + 2 * 3, 1, 2]) + + @pytest.mark.slow + def test_constant_both(self): + # non-contiguous should raise error + x = np.arange(6, dtype=np.float64)[::2] + pytest.raises(ValueError, self.module.foo, x) + + # check values with contiguous array + x = np.arange(3, dtype=np.float64) + self.module.foo(x) + assert np.allclose(x, [0 + 1 * 3 * 3 + 2 * 3 * 3, 1 * 3, 2 * 3]) + + @pytest.mark.slow + def test_constant_no(self): + # non-contiguous should raise error + x = np.arange(6, dtype=np.float64)[::2] + pytest.raises(ValueError, self.module.foo_no, x) + + # check values with contiguous array + x = np.arange(3, dtype=np.float64) + self.module.foo_no(x) + assert np.allclose(x, [0 + 1 * 3 * 3 + 2 * 3 * 3, 1 * 3, 2 * 3]) + + @pytest.mark.slow + def test_constant_sum(self): + # non-contiguous should raise error + x = np.arange(6, dtype=np.float64)[::2] + pytest.raises(ValueError, self.module.foo_sum, x) + + # check values with contiguous array + x = np.arange(3, dtype=np.float64) + self.module.foo_sum(x) + assert np.allclose(x, [0 + 1 * 3 * 3 + 2 * 3 * 3, 1 * 3, 2 * 3]) + + def test_constant_array(self): + x = np.arange(3, dtype=np.float64) + y = np.arange(5, dtype=np.float64) + z = self.module.foo_array(x, y) + assert np.allclose(x, [0.0, 1. / 10, 2. / 10]) + assert np.allclose(y, [0.0, 1. * 10, 2. * 10, 3. * 10, 4. * 10]) + assert np.allclose(z, 19.0) + + def test_constant_array_any_index(self): + x = np.arange(6, dtype=np.float64) + y = self.module.foo_array_any_index(x) + assert np.allclose(y, x.reshape((2, 3), order='F')) + + def test_constant_array_delims(self): + x = self.module.foo_array_delims() + assert x == 9 diff --git a/local_packages/numpy/f2py/tests/test_pyf_src.py b/local_packages/numpy/f2py/tests/test_pyf_src.py new file mode 100644 index 0000000..2ecb0fb --- /dev/null +++ b/local_packages/numpy/f2py/tests/test_pyf_src.py @@ -0,0 +1,43 @@ +# This test is ported from numpy.distutils +from numpy.f2py._src_pyf import process_str +from numpy.testing import assert_equal + +pyf_src = """ +python module foo + <_rd=real,double precision> + interface + subroutine foosub(tol) + <_rd>, intent(in,out) :: tol + end subroutine foosub + end interface +end python module foo +""" + +expected_pyf = """ +python module foo + interface + subroutine sfoosub(tol) + real, intent(in,out) :: tol + end subroutine sfoosub + subroutine dfoosub(tol) + double precision, intent(in,out) :: tol + end subroutine dfoosub + end interface +end python module foo +""" + + +def normalize_whitespace(s): + """ + Remove leading and trailing whitespace, and convert internal + stretches of whitespace to a single space. + """ + return ' '.join(s.split()) + + +def test_from_template(): + """Regression test for gh-10712.""" + pyf = process_str(pyf_src) + normalized_pyf = normalize_whitespace(pyf) + normalized_expected_pyf = normalize_whitespace(expected_pyf) + assert_equal(normalized_pyf, normalized_expected_pyf) diff --git a/local_packages/numpy/f2py/tests/test_quoted_character.py b/local_packages/numpy/f2py/tests/test_quoted_character.py new file mode 100644 index 0000000..3cbcb3c --- /dev/null +++ b/local_packages/numpy/f2py/tests/test_quoted_character.py @@ -0,0 +1,18 @@ +"""See https://github.com/numpy/numpy/pull/10676. + +""" +import sys + +import pytest + +from . import util + + +class TestQuotedCharacter(util.F2PyTest): + sources = [util.getpath("tests", "src", "quoted_character", "foo.f")] + + @pytest.mark.skipif(sys.platform == "win32", + reason="Fails with MinGW64 Gfortran (Issue #9673)") + @pytest.mark.slow + def test_quoted_character(self): + assert self.module.foo() == (b"'", b'"', b";", b"!", b"(", b")") diff --git a/local_packages/numpy/f2py/tests/test_regression.py b/local_packages/numpy/f2py/tests/test_regression.py new file mode 100644 index 0000000..c4636a7 --- /dev/null +++ b/local_packages/numpy/f2py/tests/test_regression.py @@ -0,0 +1,187 @@ +import os +import platform + +import pytest + +import numpy as np +import numpy.testing as npt + +from . import util + + +class TestIntentInOut(util.F2PyTest): + # Check that intent(in out) translates as intent(inout) + sources = [util.getpath("tests", "src", "regression", "inout.f90")] + + @pytest.mark.slow + def test_inout(self): + # non-contiguous should raise error + x = np.arange(6, dtype=np.float32)[::2] + pytest.raises(ValueError, self.module.foo, x) + + # check values with contiguous array + x = np.arange(3, dtype=np.float32) + self.module.foo(x) + assert np.allclose(x, [3, 1, 2]) + + +class TestDataOnlyMultiModule(util.F2PyTest): + # Check that modules without subroutines work + sources = [util.getpath("tests", "src", "regression", "datonly.f90")] + + @pytest.mark.slow + def test_mdat(self): + assert self.module.datonly.max_value == 100 + assert self.module.dat.max_ == 1009 + int_in = 5 + assert self.module.simple_subroutine(5) == 1014 + + +class TestModuleWithDerivedType(util.F2PyTest): + # Check that modules with derived types work + sources = [util.getpath("tests", "src", "regression", "mod_derived_types.f90")] + + @pytest.mark.slow + def test_mtypes(self): + assert self.module.no_type_subroutine(10) == 110 + assert self.module.type_subroutine(10) == 210 + + +class TestNegativeBounds(util.F2PyTest): + # Check that negative bounds work correctly + sources = [util.getpath("tests", "src", "negative_bounds", "issue_20853.f90")] + + @pytest.mark.slow + def test_negbound(self): + xvec = np.arange(12) + xlow = -6 + xhigh = 4 + + # Calculate the upper bound, + # Keeping the 1 index in mind + + def ubound(xl, xh): + return xh - xl + 1 + rval = self.module.foo(is_=xlow, ie_=xhigh, + arr=xvec[:ubound(xlow, xhigh)]) + expval = np.arange(11, dtype=np.float32) + assert np.allclose(rval, expval) + + +class TestNumpyVersionAttribute(util.F2PyTest): + # Check that th attribute __f2py_numpy_version__ is present + # in the compiled module and that has the value np.__version__. + sources = [util.getpath("tests", "src", "regression", "inout.f90")] + + @pytest.mark.slow + def test_numpy_version_attribute(self): + + # Check that self.module has an attribute named "__f2py_numpy_version__" + assert hasattr(self.module, "__f2py_numpy_version__") + + # Check that the attribute __f2py_numpy_version__ is a string + assert isinstance(self.module.__f2py_numpy_version__, str) + + # Check that __f2py_numpy_version__ has the value numpy.__version__ + assert np.__version__ == self.module.__f2py_numpy_version__ + + +def test_include_path(): + incdir = np.f2py.get_include() + fnames_in_dir = os.listdir(incdir) + for fname in ("fortranobject.c", "fortranobject.h"): + assert fname in fnames_in_dir + + +class TestIncludeFiles(util.F2PyTest): + sources = [util.getpath("tests", "src", "regression", "incfile.f90")] + options = [f"-I{util.getpath('tests', 'src', 'regression')}", + f"--include-paths {util.getpath('tests', 'src', 'regression')}"] + + @pytest.mark.slow + def test_gh25344(self): + exp = 7.0 + res = self.module.add(3.0, 4.0) + assert exp == res + +class TestF77Comments(util.F2PyTest): + # Check that comments are stripped from F77 continuation lines + sources = [util.getpath("tests", "src", "regression", "f77comments.f")] + + @pytest.mark.slow + def test_gh26148(self): + x1 = np.array(3, dtype=np.int32) + x2 = np.array(5, dtype=np.int32) + res = self.module.testsub(x1, x2) + assert res[0] == 8 + assert res[1] == 15 + + @pytest.mark.slow + def test_gh26466(self): + # Check that comments after PARAMETER directions are stripped + expected = np.arange(1, 11, dtype=np.float32) * 2 + res = self.module.testsub2() + npt.assert_allclose(expected, res) + +class TestF90Contiuation(util.F2PyTest): + # Check that comments are stripped from F90 continuation lines + sources = [util.getpath("tests", "src", "regression", "f90continuation.f90")] + + @pytest.mark.slow + def test_gh26148b(self): + x1 = np.array(3, dtype=np.int32) + x2 = np.array(5, dtype=np.int32) + res = self.module.testsub(x1, x2) + assert res[0] == 8 + assert res[1] == 15 + +class TestLowerF2PYDirectives(util.F2PyTest): + # Check variables are cased correctly + sources = [util.getpath("tests", "src", "regression", "lower_f2py_fortran.f90")] + + @pytest.mark.slow + def test_gh28014(self): + self.module.inquire_next(3) + assert True + +@pytest.mark.slow +def test_gh26623(): + # Including libraries with . should not generate an incorrect meson.build + try: + aa = util.build_module( + [util.getpath("tests", "src", "regression", "f90continuation.f90")], + ["-lfoo.bar"], + module_name="Blah", + ) + except RuntimeError as rerr: + assert "lparen got assign" not in str(rerr) + + +@pytest.mark.slow +@pytest.mark.skipif(platform.system() == "Windows", reason='Unsupported on this platform for now') +def test_gh25784(): + # Compile dubious file using passed flags + try: + aa = util.build_module( + [util.getpath("tests", "src", "regression", "f77fixedform.f95")], + options=[ + # Meson will collect and dedup these to pass to fortran_args: + "--f77flags='-ffixed-form -O2'", + "--f90flags=\"-ffixed-form -g\"", + ], + module_name="Blah", + ) + except ImportError as rerr: + assert "unknown_subroutine_" in str(rerr) + + +@pytest.mark.slow +class TestAssignmentOnlyModules(util.F2PyTest): + # Ensure that variables are exposed without functions or subroutines in a module + sources = [util.getpath("tests", "src", "regression", "assignOnlyModule.f90")] + + @pytest.mark.slow + def test_gh27167(self): + assert (self.module.f_globals.n_max == 16) + assert (self.module.f_globals.i_max == 18) + assert (self.module.f_globals.j_max == 72) diff --git a/local_packages/numpy/f2py/tests/test_return_character.py b/local_packages/numpy/f2py/tests/test_return_character.py new file mode 100644 index 0000000..aae3f0f --- /dev/null +++ b/local_packages/numpy/f2py/tests/test_return_character.py @@ -0,0 +1,48 @@ +import platform + +import pytest + +from numpy import array + +from . import util + +IS_S390X = platform.machine() == "s390x" + + +@pytest.mark.slow +class TestReturnCharacter(util.F2PyTest): + def check_function(self, t, tname): + if tname in ["t0", "t1", "s0", "s1"]: + assert t("23") == b"2" + r = t("ab") + assert r == b"a" + r = t(array("ab")) + assert r == b"a" + r = t(array(77, "u1")) + assert r == b"M" + elif tname in ["ts", "ss"]: + assert t(23) == b"23" + assert t("123456789abcdef") == b"123456789a" + elif tname in ["t5", "s5"]: + assert t(23) == b"23" + assert t("ab") == b"ab" + assert t("123456789abcdef") == b"12345" + else: + raise NotImplementedError + + +class TestFReturnCharacter(TestReturnCharacter): + sources = [ + util.getpath("tests", "src", "return_character", "foo77.f"), + util.getpath("tests", "src", "return_character", "foo90.f90"), + ] + + @pytest.mark.xfail(IS_S390X, reason="callback returns ' '") + @pytest.mark.parametrize("name", ["t0", "t1", "t5", "s0", "s1", "s5", "ss"]) + def test_all_f77(self, name): + self.check_function(getattr(self.module, name), name) + + @pytest.mark.xfail(IS_S390X, reason="callback returns ' '") + @pytest.mark.parametrize("name", ["t0", "t1", "t5", "ts", "s0", "s1", "s5", "ss"]) + def test_all_f90(self, name): + self.check_function(getattr(self.module.f90_return_char, name), name) diff --git a/local_packages/numpy/f2py/tests/test_return_complex.py b/local_packages/numpy/f2py/tests/test_return_complex.py new file mode 100644 index 0000000..aa3f28e --- /dev/null +++ b/local_packages/numpy/f2py/tests/test_return_complex.py @@ -0,0 +1,67 @@ +import pytest + +from numpy import array + +from . import util + + +@pytest.mark.slow +class TestReturnComplex(util.F2PyTest): + def check_function(self, t, tname): + if tname in ["t0", "t8", "s0", "s8"]: + err = 1e-5 + else: + err = 0.0 + assert abs(t(234j) - 234.0j) <= err + assert abs(t(234.6) - 234.6) <= err + assert abs(t(234) - 234.0) <= err + assert abs(t(234.6 + 3j) - (234.6 + 3j)) <= err + # assert abs(t('234')-234.)<=err + # assert abs(t('234.6')-234.6)<=err + assert abs(t(-234) + 234.0) <= err + assert abs(t([234]) - 234.0) <= err + assert abs(t((234, )) - 234.0) <= err + assert abs(t(array(234)) - 234.0) <= err + assert abs(t(array(23 + 4j, "F")) - (23 + 4j)) <= err + assert abs(t(array([234])) - 234.0) <= err + assert abs(t(array([[234]])) - 234.0) <= err + assert abs(t(array([234]).astype("b")) + 22.0) <= err + assert abs(t(array([234], "h")) - 234.0) <= err + assert abs(t(array([234], "i")) - 234.0) <= err + assert abs(t(array([234], "l")) - 234.0) <= err + assert abs(t(array([234], "q")) - 234.0) <= err + assert abs(t(array([234], "f")) - 234.0) <= err + assert abs(t(array([234], "d")) - 234.0) <= err + assert abs(t(array([234 + 3j], "F")) - (234 + 3j)) <= err + assert abs(t(array([234], "D")) - 234.0) <= err + + # pytest.raises(TypeError, t, array([234], 'S1')) + pytest.raises(TypeError, t, "abc") + + pytest.raises(IndexError, t, []) + pytest.raises(IndexError, t, ()) + + pytest.raises(TypeError, t, t) + pytest.raises(TypeError, t, {}) + + try: + r = t(10**400) + assert repr(r) in ["(inf+0j)", "(Infinity+0j)"] + except OverflowError: + pass + + +class TestFReturnComplex(TestReturnComplex): + sources = [ + util.getpath("tests", "src", "return_complex", "foo77.f"), + util.getpath("tests", "src", "return_complex", "foo90.f90"), + ] + + @pytest.mark.parametrize("name", ["t0", "t8", "t16", "td", "s0", "s8", "s16", "sd"]) + def test_all_f77(self, name): + self.check_function(getattr(self.module, name), name) + + @pytest.mark.parametrize("name", ["t0", "t8", "t16", "td", "s0", "s8", "s16", "sd"]) + def test_all_f90(self, name): + self.check_function(getattr(self.module.f90_return_complex, name), + name) diff --git a/local_packages/numpy/f2py/tests/test_return_integer.py b/local_packages/numpy/f2py/tests/test_return_integer.py new file mode 100644 index 0000000..50309d5 --- /dev/null +++ b/local_packages/numpy/f2py/tests/test_return_integer.py @@ -0,0 +1,55 @@ +import pytest + +from numpy import array + +from . import util + + +@pytest.mark.slow +class TestReturnInteger(util.F2PyTest): + def check_function(self, t, tname): + assert t(123) == 123 + assert t(123.6) == 123 + assert t("123") == 123 + assert t(-123) == -123 + assert t([123]) == 123 + assert t((123, )) == 123 + assert t(array(123)) == 123 + assert t(array(123, "b")) == 123 + assert t(array(123, "h")) == 123 + assert t(array(123, "i")) == 123 + assert t(array(123, "l")) == 123 + assert t(array(123, "B")) == 123 + assert t(array(123, "f")) == 123 + assert t(array(123, "d")) == 123 + + # pytest.raises(ValueError, t, array([123],'S3')) + pytest.raises(ValueError, t, "abc") + + pytest.raises(IndexError, t, []) + pytest.raises(IndexError, t, ()) + + pytest.raises(TypeError, t, t) + pytest.raises(TypeError, t, {}) + + if tname in ["t8", "s8"]: + pytest.raises(OverflowError, t, 100000000000000000000000) + pytest.raises(OverflowError, t, 10000000011111111111111.23) + + +class TestFReturnInteger(TestReturnInteger): + sources = [ + util.getpath("tests", "src", "return_integer", "foo77.f"), + util.getpath("tests", "src", "return_integer", "foo90.f90"), + ] + + @pytest.mark.parametrize("name", + ["t0", "t1", "t2", "t4", "t8", "s0", "s1", "s2", "s4", "s8"]) + def test_all_f77(self, name): + self.check_function(getattr(self.module, name), name) + + @pytest.mark.parametrize("name", + ["t0", "t1", "t2", "t4", "t8", "s0", "s1", "s2", "s4", "s8"]) + def test_all_f90(self, name): + self.check_function(getattr(self.module.f90_return_integer, name), + name) diff --git a/local_packages/numpy/f2py/tests/test_return_logical.py b/local_packages/numpy/f2py/tests/test_return_logical.py new file mode 100644 index 0000000..a4a3395 --- /dev/null +++ b/local_packages/numpy/f2py/tests/test_return_logical.py @@ -0,0 +1,65 @@ +import pytest + +from numpy import array + +from . import util + + +class TestReturnLogical(util.F2PyTest): + def check_function(self, t): + assert t(True) == 1 + assert t(False) == 0 + assert t(0) == 0 + assert t(None) == 0 + assert t(0.0) == 0 + assert t(0j) == 0 + assert t(1j) == 1 + assert t(234) == 1 + assert t(234.6) == 1 + assert t(234.6 + 3j) == 1 + assert t("234") == 1 + assert t("aaa") == 1 + assert t("") == 0 + assert t([]) == 0 + assert t(()) == 0 + assert t({}) == 0 + assert t(t) == 1 + assert t(-234) == 1 + assert t(10**100) == 1 + assert t([234]) == 1 + assert t((234, )) == 1 + assert t(array(234)) == 1 + assert t(array([234])) == 1 + assert t(array([[234]])) == 1 + assert t(array([127], "b")) == 1 + assert t(array([234], "h")) == 1 + assert t(array([234], "i")) == 1 + assert t(array([234], "l")) == 1 + assert t(array([234], "f")) == 1 + assert t(array([234], "d")) == 1 + assert t(array([234 + 3j], "F")) == 1 + assert t(array([234], "D")) == 1 + assert t(array(0)) == 0 + assert t(array([0])) == 0 + assert t(array([[0]])) == 0 + assert t(array([0j])) == 0 + assert t(array([1])) == 1 + pytest.raises(ValueError, t, array([0, 0])) + + +class TestFReturnLogical(TestReturnLogical): + sources = [ + util.getpath("tests", "src", "return_logical", "foo77.f"), + util.getpath("tests", "src", "return_logical", "foo90.f90"), + ] + + @pytest.mark.slow + @pytest.mark.parametrize("name", ["t0", "t1", "t2", "t4", "s0", "s1", "s2", "s4"]) + def test_all_f77(self, name): + self.check_function(getattr(self.module, name)) + + @pytest.mark.slow + @pytest.mark.parametrize("name", + ["t0", "t1", "t2", "t4", "t8", "s0", "s1", "s2", "s4", "s8"]) + def test_all_f90(self, name): + self.check_function(getattr(self.module.f90_return_logical, name)) diff --git a/local_packages/numpy/f2py/tests/test_return_real.py b/local_packages/numpy/f2py/tests/test_return_real.py new file mode 100644 index 0000000..4339657 --- /dev/null +++ b/local_packages/numpy/f2py/tests/test_return_real.py @@ -0,0 +1,109 @@ +import platform + +import pytest + +from numpy import array +from numpy.testing import IS_64BIT + +from . import util + + +@pytest.mark.slow +class TestReturnReal(util.F2PyTest): + def check_function(self, t, tname): + if tname in ["t0", "t4", "s0", "s4"]: + err = 1e-5 + else: + err = 0.0 + assert abs(t(234) - 234.0) <= err + assert abs(t(234.6) - 234.6) <= err + assert abs(t("234") - 234) <= err + assert abs(t("234.6") - 234.6) <= err + assert abs(t(-234) + 234) <= err + assert abs(t([234]) - 234) <= err + assert abs(t((234, )) - 234.0) <= err + assert abs(t(array(234)) - 234.0) <= err + assert abs(t(array(234).astype("b")) + 22) <= err + assert abs(t(array(234, "h")) - 234.0) <= err + assert abs(t(array(234, "i")) - 234.0) <= err + assert abs(t(array(234, "l")) - 234.0) <= err + assert abs(t(array(234, "B")) - 234.0) <= err + assert abs(t(array(234, "f")) - 234.0) <= err + assert abs(t(array(234, "d")) - 234.0) <= err + if tname in ["t0", "t4", "s0", "s4"]: + assert t(1e200) == t(1e300) # inf + + # pytest.raises(ValueError, t, array([234], 'S1')) + pytest.raises(ValueError, t, "abc") + + pytest.raises(IndexError, t, []) + pytest.raises(IndexError, t, ()) + + pytest.raises(TypeError, t, t) + pytest.raises(TypeError, t, {}) + + try: + r = t(10**400) + assert repr(r) in ["inf", "Infinity"] + except OverflowError: + pass + + +@pytest.mark.skipif( + platform.system() == "Darwin", + reason="Prone to error when run with numpy/f2py/tests on mac os, " + "but not when run in isolation", +) +@pytest.mark.skipif( + not IS_64BIT, reason="32-bit builds are buggy" +) +class TestCReturnReal(TestReturnReal): + suffix = ".pyf" + module_name = "c_ext_return_real" + code = """ +python module c_ext_return_real +usercode \'\'\' +float t4(float value) { return value; } +void s4(float *t4, float value) { *t4 = value; } +double t8(double value) { return value; } +void s8(double *t8, double value) { *t8 = value; } +\'\'\' +interface + function t4(value) + real*4 intent(c) :: t4,value + end + function t8(value) + real*8 intent(c) :: t8,value + end + subroutine s4(t4,value) + intent(c) s4 + real*4 intent(out) :: t4 + real*4 intent(c) :: value + end + subroutine s8(t8,value) + intent(c) s8 + real*8 intent(out) :: t8 + real*8 intent(c) :: value + end +end interface +end python module c_ext_return_real + """ + + @pytest.mark.parametrize("name", ["t4", "t8", "s4", "s8"]) + def test_all(self, name): + self.check_function(getattr(self.module, name), name) + + +class TestFReturnReal(TestReturnReal): + sources = [ + util.getpath("tests", "src", "return_real", "foo77.f"), + util.getpath("tests", "src", "return_real", "foo90.f90"), + ] + + @pytest.mark.parametrize("name", ["t0", "t4", "t8", "td", "s0", "s4", "s8", "sd"]) + def test_all_f77(self, name): + self.check_function(getattr(self.module, name), name) + + @pytest.mark.parametrize("name", ["t0", "t4", "t8", "td", "s0", "s4", "s8", "sd"]) + def test_all_f90(self, name): + self.check_function(getattr(self.module.f90_return_real, name), name) diff --git a/local_packages/numpy/f2py/tests/test_routines.py b/local_packages/numpy/f2py/tests/test_routines.py new file mode 100644 index 0000000..01135dd --- /dev/null +++ b/local_packages/numpy/f2py/tests/test_routines.py @@ -0,0 +1,29 @@ +import pytest + +from . import util + + +@pytest.mark.slow +class TestRenamedFunc(util.F2PyTest): + sources = [ + util.getpath("tests", "src", "routines", "funcfortranname.f"), + util.getpath("tests", "src", "routines", "funcfortranname.pyf"), + ] + module_name = "funcfortranname" + + def test_gh25799(self): + assert dir(self.module) + assert self.module.funcfortranname_default(200, 12) == 212 + + +@pytest.mark.slow +class TestRenamedSubroutine(util.F2PyTest): + sources = [ + util.getpath("tests", "src", "routines", "subrout.f"), + util.getpath("tests", "src", "routines", "subrout.pyf"), + ] + module_name = "subrout" + + def test_renamed_subroutine(self): + assert dir(self.module) + assert self.module.subrout_default(200, 12) == 212 diff --git a/local_packages/numpy/f2py/tests/test_semicolon_split.py b/local_packages/numpy/f2py/tests/test_semicolon_split.py new file mode 100644 index 0000000..2a16b19 --- /dev/null +++ b/local_packages/numpy/f2py/tests/test_semicolon_split.py @@ -0,0 +1,75 @@ +import platform + +import pytest + +from numpy.testing import IS_64BIT + +from . import util + + +@pytest.mark.skipif( + platform.system() == "Darwin", + reason="Prone to error when run with numpy/f2py/tests on mac os, " + "but not when run in isolation", +) +@pytest.mark.skipif( + not IS_64BIT, reason="32-bit builds are buggy" +) +class TestMultiline(util.F2PyTest): + suffix = ".pyf" + module_name = "multiline" + code = f""" +python module {module_name} + usercode ''' +void foo(int* x) {{ + char dummy = ';'; + *x = 42; +}} +''' + interface + subroutine foo(x) + intent(c) foo + integer intent(out) :: x + end subroutine foo + end interface +end python module {module_name} + """ + + def test_multiline(self): + assert self.module.foo() == 42 + + +@pytest.mark.skipif( + platform.system() == "Darwin", + reason="Prone to error when run with numpy/f2py/tests on mac os, " + "but not when run in isolation", +) +@pytest.mark.skipif( + not IS_64BIT, reason="32-bit builds are buggy" +) +@pytest.mark.slow +class TestCallstatement(util.F2PyTest): + suffix = ".pyf" + module_name = "callstatement" + code = f""" +python module {module_name} + usercode ''' +void foo(int* x) {{ +}} +''' + interface + subroutine foo(x) + intent(c) foo + integer intent(out) :: x + callprotoargument int* + callstatement {{ & + ; & + x = 42; & + }} + end subroutine foo + end interface +end python module {module_name} + """ + + def test_callstatement(self): + assert self.module.foo() == 42 diff --git a/local_packages/numpy/f2py/tests/test_size.py b/local_packages/numpy/f2py/tests/test_size.py new file mode 100644 index 0000000..ac2eaf1 --- /dev/null +++ b/local_packages/numpy/f2py/tests/test_size.py @@ -0,0 +1,45 @@ +import pytest + +import numpy as np + +from . import util + + +class TestSizeSumExample(util.F2PyTest): + sources = [util.getpath("tests", "src", "size", "foo.f90")] + + @pytest.mark.slow + def test_all(self): + r = self.module.foo([[]]) + assert r == [0] + + r = self.module.foo([[1, 2]]) + assert r == [3] + + r = self.module.foo([[1, 2], [3, 4]]) + assert np.allclose(r, [3, 7]) + + r = self.module.foo([[1, 2], [3, 4], [5, 6]]) + assert np.allclose(r, [3, 7, 11]) + + @pytest.mark.slow + def test_transpose(self): + r = self.module.trans([[]]) + assert np.allclose(r.T, np.array([[]])) + + r = self.module.trans([[1, 2]]) + assert np.allclose(r, [[1.], [2.]]) + + r = self.module.trans([[1, 2, 3], [4, 5, 6]]) + assert np.allclose(r, [[1, 4], [2, 5], [3, 6]]) + + @pytest.mark.slow + def test_flatten(self): + r = self.module.flatten([[]]) + assert np.allclose(r, []) + + r = self.module.flatten([[1, 2]]) + assert np.allclose(r, [1, 2]) + + r = self.module.flatten([[1, 2, 3], [4, 5, 6]]) + assert np.allclose(r, [1, 2, 3, 4, 5, 6]) diff --git a/local_packages/numpy/f2py/tests/test_string.py b/local_packages/numpy/f2py/tests/test_string.py new file mode 100644 index 0000000..f484ea3 --- /dev/null +++ b/local_packages/numpy/f2py/tests/test_string.py @@ -0,0 +1,100 @@ +import pytest + +import numpy as np + +from . import util + + +class TestString(util.F2PyTest): + sources = [util.getpath("tests", "src", "string", "char.f90")] + + @pytest.mark.slow + def test_char(self): + strings = np.array(["ab", "cd", "ef"], dtype="c").T + inp, out = self.module.char_test.change_strings( + strings, strings.shape[1]) + assert inp == pytest.approx(strings) + expected = strings.copy() + expected[1, :] = "AAA" + assert out == pytest.approx(expected) + + +class TestDocStringArguments(util.F2PyTest): + sources = [util.getpath("tests", "src", "string", "string.f")] + + def test_example(self): + a = np.array(b"123\0\0") + b = np.array(b"123\0\0") + c = np.array(b"123") + d = np.array(b"123") + + self.module.foo(a, b, c, d) + + assert a.tobytes() == b"123\0\0" + assert b.tobytes() == b"B23\0\0" + assert c.tobytes() == b"123" + assert d.tobytes() == b"D23" + + +class TestFixedString(util.F2PyTest): + sources = [util.getpath("tests", "src", "string", "fixed_string.f90")] + + @staticmethod + def _sint(s, start=0, end=None): + """Return the content of a string buffer as integer value. + + For example: + _sint('1234') -> 4321 + _sint('123A') -> 17321 + """ + if isinstance(s, np.ndarray): + s = s.tobytes() + elif isinstance(s, str): + s = s.encode() + assert isinstance(s, bytes) + if end is None: + end = len(s) + i = 0 + for j in range(start, min(end, len(s))): + i += s[j] * 10**j + return i + + def _get_input(self, intent="in"): + if intent in ["in"]: + yield "" + yield "1" + yield "1234" + yield "12345" + yield b"" + yield b"\0" + yield b"1" + yield b"\01" + yield b"1\0" + yield b"1234" + yield b"12345" + yield np.ndarray((), np.bytes_, buffer=b"") # array(b'', dtype='|S0') + yield np.array(b"") # array(b'', dtype='|S1') + yield np.array(b"\0") + yield np.array(b"1") + yield np.array(b"1\0") + yield np.array(b"\01") + yield np.array(b"1234") + yield np.array(b"123\0") + yield np.array(b"12345") + + def test_intent_in(self): + for s in self._get_input(): + r = self.module.test_in_bytes4(s) + # also checks that s is not changed inplace + expected = self._sint(s, end=4) + assert r == expected, s + + def test_intent_inout(self): + for s in self._get_input(intent="inout"): + rest = self._sint(s, start=4) + r = self.module.test_inout_bytes4(s) + expected = self._sint(s, end=4) + assert r == expected + + # check that the rest of input string is preserved + assert rest == self._sint(s, start=4) diff --git a/local_packages/numpy/f2py/tests/test_symbolic.py b/local_packages/numpy/f2py/tests/test_symbolic.py new file mode 100644 index 0000000..fbf5abd --- /dev/null +++ b/local_packages/numpy/f2py/tests/test_symbolic.py @@ -0,0 +1,500 @@ +import pytest + +from numpy.f2py.symbolic import ( + ArithOp, + Expr, + Language, + Op, + as_apply, + as_array, + as_complex, + as_deref, + as_eq, + as_expr, + as_factors, + as_ge, + as_gt, + as_le, + as_lt, + as_ne, + as_number, + as_numer_denom, + as_ref, + as_string, + as_symbol, + as_terms, + as_ternary, + eliminate_quotes, + fromstring, + insert_quotes, + normalize, +) + +from . import util + + +class TestSymbolic(util.F2PyTest): + def test_eliminate_quotes(self): + def worker(s): + r, d = eliminate_quotes(s) + s1 = insert_quotes(r, d) + assert s1 == s + + for kind in ["", "mykind_"]: + worker(kind + '"1234" // "ABCD"') + worker(kind + '"1234" // ' + kind + '"ABCD"') + worker(kind + "\"1234\" // 'ABCD'") + worker(kind + '"1234" // ' + kind + "'ABCD'") + worker(kind + '"1\\"2\'AB\'34"') + worker("a = " + kind + "'1\\'2\"AB\"34'") + + def test_sanity(self): + x = as_symbol("x") + y = as_symbol("y") + z = as_symbol("z") + + assert x.op == Op.SYMBOL + assert repr(x) == "Expr(Op.SYMBOL, 'x')" + assert x == x + assert x != y + assert hash(x) is not None + + n = as_number(123) + m = as_number(456) + assert n.op == Op.INTEGER + assert repr(n) == "Expr(Op.INTEGER, (123, 4))" + assert n == n + assert n != m + assert hash(n) is not None + + fn = as_number(12.3) + fm = as_number(45.6) + assert fn.op == Op.REAL + assert repr(fn) == "Expr(Op.REAL, (12.3, 4))" + assert fn == fn + assert fn != fm + assert hash(fn) is not None + + c = as_complex(1, 2) + c2 = as_complex(3, 4) + assert c.op == Op.COMPLEX + assert repr(c) == ("Expr(Op.COMPLEX, (Expr(Op.INTEGER, (1, 4))," + " Expr(Op.INTEGER, (2, 4))))") + assert c == c + assert c != c2 + assert hash(c) is not None + + s = as_string("'123'") + s2 = as_string('"ABC"') + assert s.op == Op.STRING + assert repr(s) == "Expr(Op.STRING, (\"'123'\", 1))", repr(s) + assert s == s + assert s != s2 + + a = as_array((n, m)) + b = as_array((n, )) + assert a.op == Op.ARRAY + assert repr(a) == ("Expr(Op.ARRAY, (Expr(Op.INTEGER, (123, 4))," + " Expr(Op.INTEGER, (456, 4))))") + assert a == a + assert a != b + + t = as_terms(x) + u = as_terms(y) + assert t.op == Op.TERMS + assert repr(t) == "Expr(Op.TERMS, {Expr(Op.SYMBOL, 'x'): 1})" + assert t == t + assert t != u + assert hash(t) is not None + + v = as_factors(x) + w = as_factors(y) + assert v.op == Op.FACTORS + assert repr(v) == "Expr(Op.FACTORS, {Expr(Op.SYMBOL, 'x'): 1})" + assert v == v + assert w != v + assert hash(v) is not None + + t = as_ternary(x, y, z) + u = as_ternary(x, z, y) + assert t.op == Op.TERNARY + assert t == t + assert t != u + assert hash(t) is not None + + e = as_eq(x, y) + f = as_lt(x, y) + assert e.op == Op.RELATIONAL + assert e == e + assert e != f + assert hash(e) is not None + + def test_tostring_fortran(self): + x = as_symbol("x") + y = as_symbol("y") + z = as_symbol("z") + n = as_number(123) + m = as_number(456) + a = as_array((n, m)) + c = as_complex(n, m) + + assert str(x) == "x" + assert str(n) == "123" + assert str(a) == "[123, 456]" + assert str(c) == "(123, 456)" + + assert str(Expr(Op.TERMS, {x: 1})) == "x" + assert str(Expr(Op.TERMS, {x: 2})) == "2 * x" + assert str(Expr(Op.TERMS, {x: -1})) == "-x" + assert str(Expr(Op.TERMS, {x: -2})) == "-2 * x" + assert str(Expr(Op.TERMS, {x: 1, y: 1})) == "x + y" + assert str(Expr(Op.TERMS, {x: -1, y: -1})) == "-x - y" + assert str(Expr(Op.TERMS, {x: 2, y: 3})) == "2 * x + 3 * y" + assert str(Expr(Op.TERMS, {x: -2, y: 3})) == "-2 * x + 3 * y" + assert str(Expr(Op.TERMS, {x: 2, y: -3})) == "2 * x - 3 * y" + + assert str(Expr(Op.FACTORS, {x: 1})) == "x" + assert str(Expr(Op.FACTORS, {x: 2})) == "x ** 2" + assert str(Expr(Op.FACTORS, {x: -1})) == "x ** -1" + assert str(Expr(Op.FACTORS, {x: -2})) == "x ** -2" + assert str(Expr(Op.FACTORS, {x: 1, y: 1})) == "x * y" + assert str(Expr(Op.FACTORS, {x: 2, y: 3})) == "x ** 2 * y ** 3" + + v = Expr(Op.FACTORS, {x: 2, Expr(Op.TERMS, {x: 1, y: 1}): 3}) + assert str(v) == "x ** 2 * (x + y) ** 3", str(v) + v = Expr(Op.FACTORS, {x: 2, Expr(Op.FACTORS, {x: 1, y: 1}): 3}) + assert str(v) == "x ** 2 * (x * y) ** 3", str(v) + + assert str(Expr(Op.APPLY, ("f", (), {}))) == "f()" + assert str(Expr(Op.APPLY, ("f", (x, ), {}))) == "f(x)" + assert str(Expr(Op.APPLY, ("f", (x, y), {}))) == "f(x, y)" + assert str(Expr(Op.INDEXING, ("f", x))) == "f[x]" + + assert str(as_ternary(x, y, z)) == "merge(y, z, x)" + assert str(as_eq(x, y)) == "x .eq. y" + assert str(as_ne(x, y)) == "x .ne. y" + assert str(as_lt(x, y)) == "x .lt. y" + assert str(as_le(x, y)) == "x .le. y" + assert str(as_gt(x, y)) == "x .gt. y" + assert str(as_ge(x, y)) == "x .ge. y" + + def test_tostring_c(self): + language = Language.C + x = as_symbol("x") + y = as_symbol("y") + z = as_symbol("z") + n = as_number(123) + + assert Expr(Op.FACTORS, {x: 2}).tostring(language=language) == "x * x" + assert (Expr(Op.FACTORS, { + x + y: 2 + }).tostring(language=language) == "(x + y) * (x + y)") + assert Expr(Op.FACTORS, { + x: 12 + }).tostring(language=language) == "pow(x, 12)" + + assert as_apply(ArithOp.DIV, x, + y).tostring(language=language) == "x / y" + assert (as_apply(ArithOp.DIV, x, + x + y).tostring(language=language) == "x / (x + y)") + assert (as_apply(ArithOp.DIV, x - y, x + + y).tostring(language=language) == "(x - y) / (x + y)") + assert (x + (x - y) / (x + y) + + n).tostring(language=language) == "123 + x + (x - y) / (x + y)" + + assert as_ternary(x, y, z).tostring(language=language) == "(x?y:z)" + assert as_eq(x, y).tostring(language=language) == "x == y" + assert as_ne(x, y).tostring(language=language) == "x != y" + assert as_lt(x, y).tostring(language=language) == "x < y" + assert as_le(x, y).tostring(language=language) == "x <= y" + assert as_gt(x, y).tostring(language=language) == "x > y" + assert as_ge(x, y).tostring(language=language) == "x >= y" + + def test_operations(self): + x = as_symbol("x") + y = as_symbol("y") + z = as_symbol("z") + + assert x + x == Expr(Op.TERMS, {x: 2}) + assert x - x == Expr(Op.INTEGER, (0, 4)) + assert x + y == Expr(Op.TERMS, {x: 1, y: 1}) + assert x - y == Expr(Op.TERMS, {x: 1, y: -1}) + assert x * x == Expr(Op.FACTORS, {x: 2}) + assert x * y == Expr(Op.FACTORS, {x: 1, y: 1}) + + assert +x == x + assert -x == Expr(Op.TERMS, {x: -1}), repr(-x) + assert 2 * x == Expr(Op.TERMS, {x: 2}) + assert 2 + x == Expr(Op.TERMS, {x: 1, as_number(1): 2}) + assert 2 * x + 3 * y == Expr(Op.TERMS, {x: 2, y: 3}) + assert (x + y) * 2 == Expr(Op.TERMS, {x: 2, y: 2}) + + assert x**2 == Expr(Op.FACTORS, {x: 2}) + assert (x + y)**2 == Expr( + Op.TERMS, + { + Expr(Op.FACTORS, {x: 2}): 1, + Expr(Op.FACTORS, {y: 2}): 1, + Expr(Op.FACTORS, { + x: 1, + y: 1 + }): 2, + }, + ) + assert (x + y) * x == x**2 + x * y + assert (x + y)**2 == x**2 + 2 * x * y + y**2 + assert (x + y)**2 + (x - y)**2 == 2 * x**2 + 2 * y**2 + assert (x + y) * z == x * z + y * z + assert z * (x + y) == x * z + y * z + + assert (x / 2) == as_apply(ArithOp.DIV, x, as_number(2)) + assert (2 * x / 2) == x + assert (3 * x / 2) == as_apply(ArithOp.DIV, 3 * x, as_number(2)) + assert (4 * x / 2) == 2 * x + assert (5 * x / 2) == as_apply(ArithOp.DIV, 5 * x, as_number(2)) + assert (6 * x / 2) == 3 * x + assert ((3 * 5) * x / 6) == as_apply(ArithOp.DIV, 5 * x, as_number(2)) + assert (30 * x**2 * y**4 / (24 * x**3 * y**3)) == as_apply( + ArithOp.DIV, 5 * y, 4 * x) + assert ((15 * x / 6) / 5) == as_apply(ArithOp.DIV, x, + as_number(2)), (15 * x / 6) / 5 + assert (x / (5 / x)) == as_apply(ArithOp.DIV, x**2, as_number(5)) + + assert (x / 2.0) == Expr(Op.TERMS, {x: 0.5}) + + s = as_string('"ABC"') + t = as_string('"123"') + + assert s // t == Expr(Op.STRING, ('"ABC123"', 1)) + assert s // x == Expr(Op.CONCAT, (s, x)) + assert x // s == Expr(Op.CONCAT, (x, s)) + + c = as_complex(1.0, 2.0) + assert -c == as_complex(-1.0, -2.0) + assert c + c == as_expr((1 + 2j) * 2) + assert c * c == as_expr((1 + 2j)**2) + + def test_substitute(self): + x = as_symbol("x") + y = as_symbol("y") + z = as_symbol("z") + a = as_array((x, y)) + + assert x.substitute({x: y}) == y + assert (x + y).substitute({x: z}) == y + z + assert (x * y).substitute({x: z}) == y * z + assert (x**4).substitute({x: z}) == z**4 + assert (x / y).substitute({x: z}) == z / y + assert x.substitute({x: y + z}) == y + z + assert a.substitute({x: y + z}) == as_array((y + z, y)) + + assert as_ternary(x, y, + z).substitute({x: y + z}) == as_ternary(y + z, y, z) + assert as_eq(x, y).substitute({x: y + z}) == as_eq(y + z, y) + + def test_fromstring(self): + + x = as_symbol("x") + y = as_symbol("y") + z = as_symbol("z") + f = as_symbol("f") + s = as_string('"ABC"') + t = as_string('"123"') + a = as_array((x, y)) + + assert fromstring("x") == x + assert fromstring("+ x") == x + assert fromstring("- x") == -x + assert fromstring("x + y") == x + y + assert fromstring("x + 1") == x + 1 + assert fromstring("x * y") == x * y + assert fromstring("x * 2") == x * 2 + assert fromstring("x / y") == x / y + assert fromstring("x ** 2", language=Language.Python) == x**2 + assert fromstring("x ** 2 ** 3", language=Language.Python) == x**2**3 + assert fromstring("(x + y) * z") == (x + y) * z + + assert fromstring("f(x)") == f(x) + assert fromstring("f(x,y)") == f(x, y) + assert fromstring("f[x]") == f[x] + assert fromstring("f[x][y]") == f[x][y] + + assert fromstring('"ABC"') == s + assert (normalize( + fromstring('"ABC" // "123" ', + language=Language.Fortran)) == s // t) + assert fromstring('f("ABC")') == f(s) + assert fromstring('MYSTRKIND_"ABC"') == as_string('"ABC"', "MYSTRKIND") + + assert fromstring("(/x, y/)") == a, fromstring("(/x, y/)") + assert fromstring("f((/x, y/))") == f(a) + assert fromstring("(/(x+y)*z/)") == as_array(((x + y) * z, )) + + assert fromstring("123") == as_number(123) + assert fromstring("123_2") == as_number(123, 2) + assert fromstring("123_myintkind") == as_number(123, "myintkind") + + assert fromstring("123.0") == as_number(123.0, 4) + assert fromstring("123.0_4") == as_number(123.0, 4) + assert fromstring("123.0_8") == as_number(123.0, 8) + assert fromstring("123.0e0") == as_number(123.0, 4) + assert fromstring("123.0d0") == as_number(123.0, 8) + assert fromstring("123d0") == as_number(123.0, 8) + assert fromstring("123e-0") == as_number(123.0, 4) + assert fromstring("123d+0") == as_number(123.0, 8) + assert fromstring("123.0_myrealkind") == as_number(123.0, "myrealkind") + assert fromstring("3E4") == as_number(30000.0, 4) + + assert fromstring("(1, 2)") == as_complex(1, 2) + assert fromstring("(1e2, PI)") == as_complex(as_number(100.0), + as_symbol("PI")) + + assert fromstring("[1, 2]") == as_array((as_number(1), as_number(2))) + + assert fromstring("POINT(x, y=1)") == as_apply(as_symbol("POINT"), + x, + y=as_number(1)) + assert fromstring( + 'PERSON(name="John", age=50, shape=(/34, 23/))') == as_apply( + as_symbol("PERSON"), + name=as_string('"John"'), + age=as_number(50), + shape=as_array((as_number(34), as_number(23))), + ) + + assert fromstring("x?y:z") == as_ternary(x, y, z) + + assert fromstring("*x") == as_deref(x) + assert fromstring("**x") == as_deref(as_deref(x)) + assert fromstring("&x") == as_ref(x) + assert fromstring("(*x) * (*y)") == as_deref(x) * as_deref(y) + assert fromstring("(*x) * *y") == as_deref(x) * as_deref(y) + assert fromstring("*x * *y") == as_deref(x) * as_deref(y) + assert fromstring("*x**y") == as_deref(x) * as_deref(y) + + assert fromstring("x == y") == as_eq(x, y) + assert fromstring("x != y") == as_ne(x, y) + assert fromstring("x < y") == as_lt(x, y) + assert fromstring("x > y") == as_gt(x, y) + assert fromstring("x <= y") == as_le(x, y) + assert fromstring("x >= y") == as_ge(x, y) + + assert fromstring("x .eq. y", language=Language.Fortran) == as_eq(x, y) + assert fromstring("x .ne. y", language=Language.Fortran) == as_ne(x, y) + assert fromstring("x .lt. y", language=Language.Fortran) == as_lt(x, y) + assert fromstring("x .gt. y", language=Language.Fortran) == as_gt(x, y) + assert fromstring("x .le. y", language=Language.Fortran) == as_le(x, y) + assert fromstring("x .ge. y", language=Language.Fortran) == as_ge(x, y) + + def test_traverse(self): + x = as_symbol("x") + y = as_symbol("y") + z = as_symbol("z") + f = as_symbol("f") + + # Use traverse to substitute a symbol + def replace_visit(s, r=z): + if s == x: + return r + + assert x.traverse(replace_visit) == z + assert y.traverse(replace_visit) == y + assert z.traverse(replace_visit) == z + assert (f(y)).traverse(replace_visit) == f(y) + assert (f(x)).traverse(replace_visit) == f(z) + assert (f[y]).traverse(replace_visit) == f[y] + assert (f[z]).traverse(replace_visit) == f[z] + assert (x + y + z).traverse(replace_visit) == (2 * z + y) + assert (x + + f(y, x - z)).traverse(replace_visit) == (z + + f(y, as_number(0))) + assert as_eq(x, y).traverse(replace_visit) == as_eq(z, y) + + # Use traverse to collect symbols, method 1 + function_symbols = set() + symbols = set() + + def collect_symbols(s): + if s.op is Op.APPLY: + oper = s.data[0] + function_symbols.add(oper) + if oper in symbols: + symbols.remove(oper) + elif s.op is Op.SYMBOL and s not in function_symbols: + symbols.add(s) + + (x + f(y, x - z)).traverse(collect_symbols) + assert function_symbols == {f} + assert symbols == {x, y, z} + + # Use traverse to collect symbols, method 2 + def collect_symbols2(expr, symbols): + if expr.op is Op.SYMBOL: + symbols.add(expr) + + symbols = set() + (x + f(y, x - z)).traverse(collect_symbols2, symbols) + assert symbols == {x, y, z, f} + + # Use traverse to partially collect symbols + def collect_symbols3(expr, symbols): + if expr.op is Op.APPLY: + # skip traversing function calls + return expr + if expr.op is Op.SYMBOL: + symbols.add(expr) + + symbols = set() + (x + f(y, x - z)).traverse(collect_symbols3, symbols) + assert symbols == {x} + + def test_linear_solve(self): + x = as_symbol("x") + y = as_symbol("y") + z = as_symbol("z") + + assert x.linear_solve(x) == (as_number(1), as_number(0)) + assert (x + 1).linear_solve(x) == (as_number(1), as_number(1)) + assert (2 * x).linear_solve(x) == (as_number(2), as_number(0)) + assert (2 * x + 3).linear_solve(x) == (as_number(2), as_number(3)) + assert as_number(3).linear_solve(x) == (as_number(0), as_number(3)) + assert y.linear_solve(x) == (as_number(0), y) + assert (y * z).linear_solve(x) == (as_number(0), y * z) + + assert (x + y).linear_solve(x) == (as_number(1), y) + assert (z * x + y).linear_solve(x) == (z, y) + assert ((z + y) * x + y).linear_solve(x) == (z + y, y) + assert (z * y * x + y).linear_solve(x) == (z * y, y) + + pytest.raises(RuntimeError, lambda: (x * x).linear_solve(x)) + + def test_as_numer_denom(self): + x = as_symbol("x") + y = as_symbol("y") + n = as_number(123) + + assert as_numer_denom(x) == (x, as_number(1)) + assert as_numer_denom(x / n) == (x, n) + assert as_numer_denom(n / x) == (n, x) + assert as_numer_denom(x / y) == (x, y) + assert as_numer_denom(x * y) == (x * y, as_number(1)) + assert as_numer_denom(n + x / y) == (x + n * y, y) + assert as_numer_denom(n + x / (y - x / n)) == (y * n**2, y * n - x) + + def test_polynomial_atoms(self): + x = as_symbol("x") + y = as_symbol("y") + n = as_number(123) + + assert x.polynomial_atoms() == {x} + assert n.polynomial_atoms() == set() + assert (y[x]).polynomial_atoms() == {y[x]} + assert (y(x)).polynomial_atoms() == {y(x)} + assert (y(x) + x).polynomial_atoms() == {y(x), x} + assert (y(x) * x[y]).polynomial_atoms() == {y(x), x[y]} + assert (y(x)**x).polynomial_atoms() == {y(x)} + + def test_unmatched_parenthesis_gh30268(self): + #gh - 30268 + with pytest.raises(ValueError, match=r"Mismatch of \(\) parenthesis"): + Expr.parse("DATA (A, I=1, N", language=Language.Fortran) diff --git a/local_packages/numpy/f2py/tests/test_value_attrspec.py b/local_packages/numpy/f2py/tests/test_value_attrspec.py new file mode 100644 index 0000000..1afae08 --- /dev/null +++ b/local_packages/numpy/f2py/tests/test_value_attrspec.py @@ -0,0 +1,15 @@ +import pytest + +from . import util + + +class TestValueAttr(util.F2PyTest): + sources = [util.getpath("tests", "src", "value_attrspec", "gh21665.f90")] + + # gh-21665 + @pytest.mark.slow + def test_gh21665(self): + inp = 2 + out = self.module.fortfuncs.square(inp) + exp_out = 4 + assert out == exp_out diff --git a/local_packages/numpy/f2py/tests/util.py b/local_packages/numpy/f2py/tests/util.py new file mode 100644 index 0000000..35e5d3b --- /dev/null +++ b/local_packages/numpy/f2py/tests/util.py @@ -0,0 +1,442 @@ +""" +Utility functions for + +- building and importing modules on test time, using a temporary location +- detecting if compilers are present +- determining paths to tests + +""" +import atexit +import concurrent.futures +import contextlib +import glob +import os +import shutil +import subprocess +import sys +import tempfile +from importlib import import_module +from pathlib import Path + +import pytest + +import numpy +from numpy._utils import asunicode +from numpy.f2py._backends._meson import MesonBackend +from numpy.testing import IS_WASM, temppath + +# +# Check if compilers are available at all... +# + +def check_language(lang, code_snippet=None): + if sys.platform == "win32": + pytest.skip("No Fortran tests on Windows (Issue #25134)", allow_module_level=True) + tmpdir = tempfile.mkdtemp() + try: + meson_file = os.path.join(tmpdir, "meson.build") + with open(meson_file, "w") as f: + f.write("project('check_compilers')\n") + f.write(f"add_languages('{lang}')\n") + if code_snippet: + f.write(f"{lang}_compiler = meson.get_compiler('{lang}')\n") + f.write(f"{lang}_code = '''{code_snippet}'''\n") + f.write( + f"_have_{lang}_feature =" + f"{lang}_compiler.compiles({lang}_code," + f" name: '{lang} feature check')\n" + ) + try: + runmeson = subprocess.run( + ["meson", "setup", "btmp"], + check=False, + cwd=tmpdir, + capture_output=True, + ) + except subprocess.CalledProcessError: + pytest.skip("meson not present, skipping compiler dependent test", allow_module_level=True) + return runmeson.returncode == 0 + finally: + shutil.rmtree(tmpdir) + + +fortran77_code = ''' +C Example Fortran 77 code + PROGRAM HELLO + PRINT *, 'Hello, Fortran 77!' + END +''' + +fortran90_code = ''' +! Example Fortran 90 code +program hello90 + type :: greeting + character(len=20) :: text + end type greeting + + type(greeting) :: greet + greet%text = 'hello, fortran 90!' + print *, greet%text +end program hello90 +''' + +# Dummy class for caching relevant checks +class CompilerChecker: + def __init__(self): + self.compilers_checked = False + self.has_c = False + self.has_f77 = False + self.has_f90 = False + + def check_compilers(self): + if (not self.compilers_checked) and (not sys.platform == "cygwin"): + with concurrent.futures.ThreadPoolExecutor() as executor: + futures = [ + executor.submit(check_language, "c"), + executor.submit(check_language, "fortran", fortran77_code), + executor.submit(check_language, "fortran", fortran90_code) + ] + + self.has_c = futures[0].result() + self.has_f77 = futures[1].result() + self.has_f90 = futures[2].result() + + self.compilers_checked = True + + +if not IS_WASM: + checker = CompilerChecker() + checker.check_compilers() + +def has_c_compiler(): + return checker.has_c + +def has_f77_compiler(): + return checker.has_f77 + +def has_f90_compiler(): + return checker.has_f90 + +def has_fortran_compiler(): + return (checker.has_f90 and checker.has_f77) + + +# +# Maintaining a temporary module directory +# + +_module_dir = None +_module_num = 5403 + +if sys.platform == "cygwin": + NUMPY_INSTALL_ROOT = Path(__file__).parent.parent.parent + _module_list = list(NUMPY_INSTALL_ROOT.glob("**/*.dll")) + + +def _cleanup(): + global _module_dir + if _module_dir is not None: + try: + sys.path.remove(_module_dir) + except ValueError: + pass + try: + shutil.rmtree(_module_dir) + except OSError: + pass + _module_dir = None + + +def get_module_dir(): + global _module_dir + if _module_dir is None: + _module_dir = tempfile.mkdtemp() + atexit.register(_cleanup) + if _module_dir not in sys.path: + sys.path.insert(0, _module_dir) + return _module_dir + + +def get_temp_module_name(): + # Assume single-threaded, and the module dir usable only by this thread + global _module_num + get_module_dir() + name = "_test_ext_module_%d" % _module_num + _module_num += 1 + if name in sys.modules: + # this should not be possible, but check anyway + raise RuntimeError("Temporary module name already in use.") + return name + + +def _memoize(func): + memo = {} + + def wrapper(*a, **kw): + key = repr((a, kw)) + if key not in memo: + try: + memo[key] = func(*a, **kw) + except Exception as e: + memo[key] = e + raise + ret = memo[key] + if isinstance(ret, Exception): + raise ret + return ret + + wrapper.__name__ = func.__name__ + return wrapper + + +# +# Building modules +# + + +@_memoize +def build_module(source_files, options=[], skip=[], only=[], module_name=None): + """ + Compile and import a f2py module, built from the given files. + + """ + + code = f"import sys; sys.path = {sys.path!r}; import numpy.f2py; numpy.f2py.main()" + + d = get_module_dir() + # gh-27045 : Skip if no compilers are found + if not has_fortran_compiler(): + pytest.skip("No Fortran compiler available") + + # Copy files + dst_sources = [] + f2py_sources = [] + for fn in source_files: + if not os.path.isfile(fn): + raise RuntimeError(f"{fn} is not a file") + dst = os.path.join(d, os.path.basename(fn)) + shutil.copyfile(fn, dst) + dst_sources.append(dst) + + base, ext = os.path.splitext(dst) + if ext in (".f90", ".f95", ".f", ".c", ".pyf"): + f2py_sources.append(dst) + + assert f2py_sources + + # Prepare options + if module_name is None: + module_name = get_temp_module_name() + gil_options = [] + if '--freethreading-compatible' not in options and '--no-freethreading-compatible' not in options: + # default to disabling the GIL if unset in options + gil_options = ['--freethreading-compatible'] + f2py_opts = ["-c", "-m", module_name] + options + gil_options + f2py_sources + f2py_opts += ["--backend", "meson"] + if skip: + f2py_opts += ["skip:"] + skip + if only: + f2py_opts += ["only:"] + only + + # Build + cwd = os.getcwd() + try: + os.chdir(d) + cmd = [sys.executable, "-c", code] + f2py_opts + p = subprocess.Popen(cmd, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT) + out, err = p.communicate() + if p.returncode != 0: + raise RuntimeError(f"Running f2py failed: {cmd[4:]}\n{asunicode(out)}") + finally: + os.chdir(cwd) + + # Partial cleanup + for fn in dst_sources: + os.unlink(fn) + + # Rebase (Cygwin-only) + if sys.platform == "cygwin": + # If someone starts deleting modules after import, this will + # need to change to record how big each module is, rather than + # relying on rebase being able to find that from the files. + _module_list.extend( + glob.glob(os.path.join(d, f"{module_name:s}*")) + ) + subprocess.check_call( + ["/usr/bin/rebase", "--database", "--oblivious", "--verbose"] + + _module_list + ) + + # Import + return import_module(module_name) + + +@_memoize +def build_code(source_code, + options=[], + skip=[], + only=[], + suffix=None, + module_name=None): + """ + Compile and import Fortran code using f2py. + + """ + if suffix is None: + suffix = ".f" + with temppath(suffix=suffix) as path: + with open(path, "w") as f: + f.write(source_code) + return build_module([path], + options=options, + skip=skip, + only=only, + module_name=module_name) + + +# +# Building with meson +# + + +class SimplifiedMesonBackend(MesonBackend): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + def compile(self): + self.write_meson_build(self.build_dir) + self.run_meson(self.build_dir) + + +def build_meson(source_files, module_name=None, **kwargs): + """ + Build a module via Meson and import it. + """ + + # gh-27045 : Skip if no compilers are found + if not has_fortran_compiler(): + pytest.skip("No Fortran compiler available") + + build_dir = get_module_dir() + if module_name is None: + module_name = get_temp_module_name() + + # Initialize the MesonBackend + backend = SimplifiedMesonBackend( + modulename=module_name, + sources=source_files, + extra_objects=kwargs.get("extra_objects", []), + build_dir=build_dir, + include_dirs=kwargs.get("include_dirs", []), + library_dirs=kwargs.get("library_dirs", []), + libraries=kwargs.get("libraries", []), + define_macros=kwargs.get("define_macros", []), + undef_macros=kwargs.get("undef_macros", []), + f2py_flags=kwargs.get("f2py_flags", []), + sysinfo_flags=kwargs.get("sysinfo_flags", []), + fc_flags=kwargs.get("fc_flags", []), + flib_flags=kwargs.get("flib_flags", []), + setup_flags=kwargs.get("setup_flags", []), + remove_build_dir=kwargs.get("remove_build_dir", False), + extra_dat=kwargs.get("extra_dat", {}), + ) + + backend.compile() + + # Import the compiled module + sys.path.insert(0, f"{build_dir}/{backend.meson_build_dir}") + return import_module(module_name) + + +# +# Unittest convenience +# + + +class F2PyTest: + code = None + sources = None + options = [] + skip = [] + only = [] + suffix = ".f" + module = None + _has_c_compiler = None + _has_f77_compiler = None + _has_f90_compiler = None + + @property + def module_name(self): + cls = type(self) + return f'_{cls.__module__.rsplit(".", 1)[-1]}_{cls.__name__}_ext_module' + + @classmethod + def setup_class(cls): + if sys.platform == "win32": + pytest.skip("Fails with MinGW64 Gfortran (Issue #9673)") + F2PyTest._has_c_compiler = has_c_compiler() + F2PyTest._has_f77_compiler = has_f77_compiler() + F2PyTest._has_f90_compiler = has_f90_compiler() + F2PyTest._has_fortran_compiler = has_fortran_compiler() + + def setup_method(self): + if self.module is not None: + return + + codes = self.sources or [] + if self.code: + codes.append(self.suffix) + + needs_f77 = any(str(fn).endswith(".f") for fn in codes) + needs_f90 = any(str(fn).endswith(".f90") for fn in codes) + needs_pyf = any(str(fn).endswith(".pyf") for fn in codes) + + if needs_f77 and not self._has_f77_compiler: + pytest.skip("No Fortran 77 compiler available") + if needs_f90 and not self._has_f90_compiler: + pytest.skip("No Fortran 90 compiler available") + if needs_pyf and not self._has_fortran_compiler: + pytest.skip("No Fortran compiler available") + + # Build the module + if self.code is not None: + self.module = build_code( + self.code, + options=self.options, + skip=self.skip, + only=self.only, + suffix=self.suffix, + module_name=self.module_name, + ) + + if self.sources is not None: + self.module = build_module( + self.sources, + options=self.options, + skip=self.skip, + only=self.only, + module_name=self.module_name, + ) + + +# +# Helper functions +# + + +def getpath(*a): + # Package root + d = Path(numpy.f2py.__file__).parent.resolve() + return d.joinpath(*a) + + +@contextlib.contextmanager +def switchdir(path): + curpath = Path.cwd() + os.chdir(path) + try: + yield + finally: + os.chdir(curpath) diff --git a/local_packages/numpy/f2py/use_rules.py b/local_packages/numpy/f2py/use_rules.py new file mode 100644 index 0000000..1e06f6c --- /dev/null +++ b/local_packages/numpy/f2py/use_rules.py @@ -0,0 +1,99 @@ +""" +Build 'use others module data' mechanism for f2py2e. + +Copyright 1999 -- 2011 Pearu Peterson all rights reserved. +Copyright 2011 -- present NumPy Developers. +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +""" +__version__ = "$Revision: 1.3 $"[10:-1] + +f2py_version = 'See `f2py -v`' + + +from .auxfuncs import applyrules, dictappend, gentitle, hasnote, outmess + +usemodule_rules = { + 'body': """ +#begintitle# +static char doc_#apiname#[] = \"\\\nVariable wrapper signature:\\n\\ +\t #name# = get_#name#()\\n\\ +Arguments:\\n\\ +#docstr#\"; +extern F_MODFUNC(#usemodulename#,#USEMODULENAME#,#realname#,#REALNAME#); +static PyObject *#apiname#(PyObject *capi_self, PyObject *capi_args) { +/*#decl#*/ +\tif (!PyArg_ParseTuple(capi_args, \"\")) goto capi_fail; +printf(\"c: %d\\n\",F_MODFUNC(#usemodulename#,#USEMODULENAME#,#realname#,#REALNAME#)); +\treturn Py_BuildValue(\"\"); +capi_fail: +\treturn NULL; +} +""", + 'method': '\t{\"get_#name#\",#apiname#,METH_VARARGS|METH_KEYWORDS,doc_#apiname#},', + 'need': ['F_MODFUNC'] +} + +################ + + +def buildusevars(m, r): + ret = {} + outmess( + f"\t\tBuilding use variable hooks for module \"{m['name']}\" (feature only for F90/F95)...\n") + varsmap = {} + revmap = {} + if 'map' in r: + for k in r['map'].keys(): + if r['map'][k] in revmap: + outmess('\t\t\tVariable "%s<=%s" is already mapped by "%s". Skipping.\n' % ( + r['map'][k], k, revmap[r['map'][k]])) + else: + revmap[r['map'][k]] = k + if r.get('only'): + for v in r['map'].keys(): + if r['map'][v] in m['vars']: + + if revmap[r['map'][v]] == v: + varsmap[v] = r['map'][v] + else: + outmess(f"\t\t\tIgnoring map \"{v}=>{r['map'][v]}\". See above.\n") + else: + outmess( + f"\t\t\tNo definition for variable \"{v}=>{r['map'][v]}\". Skipping.\n") + else: + for v in m['vars'].keys(): + varsmap[v] = revmap.get(v, v) + for v in varsmap.keys(): + ret = dictappend(ret, buildusevar(v, varsmap[v], m['vars'], m['name'])) + return ret + + +def buildusevar(name, realname, vars, usemodulename): + outmess('\t\t\tConstructing wrapper function for variable "%s=>%s"...\n' % ( + name, realname)) + ret = {} + vrd = {'name': name, + 'realname': realname, + 'REALNAME': realname.upper(), + 'usemodulename': usemodulename, + 'USEMODULENAME': usemodulename.upper(), + 'texname': name.replace('_', '\\_'), + 'begintitle': gentitle(f'{name}=>{realname}'), + 'endtitle': gentitle(f'end of {name}=>{realname}'), + 'apiname': f'#modulename#_use_{realname}_from_{usemodulename}' + } + nummap = {0: 'Ro', 1: 'Ri', 2: 'Rii', 3: 'Riii', 4: 'Riv', + 5: 'Rv', 6: 'Rvi', 7: 'Rvii', 8: 'Rviii', 9: 'Rix'} + vrd['texnamename'] = name + for i in nummap.keys(): + vrd['texnamename'] = vrd['texnamename'].replace(repr(i), nummap[i]) + if hasnote(vars[realname]): + vrd['note'] = vars[realname]['note'] + rd = dictappend({}, vrd) + + print(name, realname, vars[realname]) + ret = applyrules(usemodule_rules, rd) + return ret diff --git a/local_packages/numpy/f2py/use_rules.pyi b/local_packages/numpy/f2py/use_rules.pyi new file mode 100644 index 0000000..58c7f9b --- /dev/null +++ b/local_packages/numpy/f2py/use_rules.pyi @@ -0,0 +1,9 @@ +from collections.abc import Mapping +from typing import Any, Final + +__version__: Final[str] = ... +f2py_version: Final = "See `f2py -v`" +usemodule_rules: Final[dict[str, str | list[str]]] = ... + +def buildusevars(m: Mapping[str, object], r: Mapping[str, Mapping[str, object]]) -> dict[str, Any]: ... +def buildusevar(name: str, realname: str, vars: Mapping[str, Mapping[str, object]], usemodulename: str) -> dict[str, Any]: ... diff --git a/local_packages/numpy/fft/__init__.py b/local_packages/numpy/fft/__init__.py new file mode 100644 index 0000000..2de162c --- /dev/null +++ b/local_packages/numpy/fft/__init__.py @@ -0,0 +1,213 @@ +""" +Discrete Fourier Transform +========================== + +.. currentmodule:: numpy.fft + +The SciPy module `scipy.fft` is a more comprehensive superset +of `numpy.fft`, which includes only a basic set of routines. + +Standard FFTs +------------- + +.. autosummary:: + :toctree: generated/ + + fft Discrete Fourier transform. + ifft Inverse discrete Fourier transform. + fft2 Discrete Fourier transform in two dimensions. + ifft2 Inverse discrete Fourier transform in two dimensions. + fftn Discrete Fourier transform in N-dimensions. + ifftn Inverse discrete Fourier transform in N dimensions. + +Real FFTs +--------- + +.. autosummary:: + :toctree: generated/ + + rfft Real discrete Fourier transform. + irfft Inverse real discrete Fourier transform. + rfft2 Real discrete Fourier transform in two dimensions. + irfft2 Inverse real discrete Fourier transform in two dimensions. + rfftn Real discrete Fourier transform in N dimensions. + irfftn Inverse real discrete Fourier transform in N dimensions. + +Hermitian FFTs +-------------- + +.. autosummary:: + :toctree: generated/ + + hfft Hermitian discrete Fourier transform. + ihfft Inverse Hermitian discrete Fourier transform. + +Helper routines +--------------- + +.. autosummary:: + :toctree: generated/ + + fftfreq Discrete Fourier Transform sample frequencies. + rfftfreq DFT sample frequencies (for usage with rfft, irfft). + fftshift Shift zero-frequency component to center of spectrum. + ifftshift Inverse of fftshift. + + +Background information +---------------------- + +Fourier analysis is fundamentally a method for expressing a function as a +sum of periodic components, and for recovering the function from those +components. When both the function and its Fourier transform are +replaced with discretized counterparts, it is called the discrete Fourier +transform (DFT). The DFT has become a mainstay of numerical computing in +part because of a very fast algorithm for computing it, called the Fast +Fourier Transform (FFT), which was known to Gauss (1805) and was brought +to light in its current form by Cooley and Tukey [CT]_. Press et al. [NR]_ +provide an accessible introduction to Fourier analysis and its +applications. + +Because the discrete Fourier transform separates its input into +components that contribute at discrete frequencies, it has a great number +of applications in digital signal processing, e.g., for filtering, and in +this context the discretized input to the transform is customarily +referred to as a *signal*, which exists in the *time domain*. The output +is called a *spectrum* or *transform* and exists in the *frequency +domain*. + +Implementation details +---------------------- + +There are many ways to define the DFT, varying in the sign of the +exponent, normalization, etc. In this implementation, the DFT is defined +as + +.. math:: + A_k = \\sum_{m=0}^{n-1} a_m \\exp\\left\\{-2\\pi i{mk \\over n}\\right\\} + \\qquad k = 0,\\ldots,n-1. + +The DFT is in general defined for complex inputs and outputs, and a +single-frequency component at linear frequency :math:`f` is +represented by a complex exponential +:math:`a_m = \\exp\\{2\\pi i\\,f m\\Delta t\\}`, where :math:`\\Delta t` +is the sampling interval. + +The values in the result follow so-called "standard" order: If ``A = +fft(a, n)``, then ``A[0]`` contains the zero-frequency term (the sum of +the signal), which is always purely real for real inputs. Then ``A[1:n/2]`` +contains the positive-frequency terms, and ``A[n/2+1:]`` contains the +negative-frequency terms, in order of decreasingly negative frequency. +For an even number of input points, ``A[n/2]`` represents both positive and +negative Nyquist frequency, and is also purely real for real input. For +an odd number of input points, ``A[(n-1)/2]`` contains the largest positive +frequency, while ``A[(n+1)/2]`` contains the largest negative frequency. +The routine ``np.fft.fftfreq(n)`` returns an array giving the frequencies +of corresponding elements in the output. The routine +``np.fft.fftshift(A)`` shifts transforms and their frequencies to put the +zero-frequency components in the middle, and ``np.fft.ifftshift(A)`` undoes +that shift. + +When the input `a` is a time-domain signal and ``A = fft(a)``, ``np.abs(A)`` +is its amplitude spectrum and ``np.abs(A)**2`` is its power spectrum. +The phase spectrum is obtained by ``np.angle(A)``. + +The inverse DFT is defined as + +.. math:: + a_m = \\frac{1}{n}\\sum_{k=0}^{n-1}A_k\\exp\\left\\{2\\pi i{mk\\over n}\\right\\} + \\qquad m = 0,\\ldots,n-1. + +It differs from the forward transform by the sign of the exponential +argument and the default normalization by :math:`1/n`. + +Type Promotion +-------------- + +`numpy.fft` promotes ``float32`` and ``complex64`` arrays to ``float64`` and +``complex128`` arrays respectively. For an FFT implementation that does not +promote input arrays, see `scipy.fftpack`. + +Normalization +------------- + +The argument ``norm`` indicates which direction of the pair of direct/inverse +transforms is scaled and with what normalization factor. +The default normalization (``"backward"``) has the direct (forward) transforms +unscaled and the inverse (backward) transforms scaled by :math:`1/n`. It is +possible to obtain unitary transforms by setting the keyword argument ``norm`` +to ``"ortho"`` so that both direct and inverse transforms are scaled by +:math:`1/\\sqrt{n}`. Finally, setting the keyword argument ``norm`` to +``"forward"`` has the direct transforms scaled by :math:`1/n` and the inverse +transforms unscaled (i.e. exactly opposite to the default ``"backward"``). +`None` is an alias of the default option ``"backward"`` for backward +compatibility. + +Real and Hermitian transforms +----------------------------- + +When the input is purely real, its transform is Hermitian, i.e., the +component at frequency :math:`f_k` is the complex conjugate of the +component at frequency :math:`-f_k`, which means that for real +inputs there is no information in the negative frequency components that +is not already available from the positive frequency components. +The family of `rfft` functions is +designed to operate on real inputs, and exploits this symmetry by +computing only the positive frequency components, up to and including the +Nyquist frequency. Thus, ``n`` input points produce ``n/2+1`` complex +output points. The inverses of this family assumes the same symmetry of +its input, and for an output of ``n`` points uses ``n/2+1`` input points. + +Correspondingly, when the spectrum is purely real, the signal is +Hermitian. The `hfft` family of functions exploits this symmetry by +using ``n/2+1`` complex points in the input (time) domain for ``n`` real +points in the frequency domain. + +In higher dimensions, FFTs are used, e.g., for image analysis and +filtering. The computational efficiency of the FFT means that it can +also be a faster way to compute large convolutions, using the property +that a convolution in the time domain is equivalent to a point-by-point +multiplication in the frequency domain. + +Higher dimensions +----------------- + +In two dimensions, the DFT is defined as + +.. math:: + A_{kl} = \\sum_{m=0}^{M-1} \\sum_{n=0}^{N-1} + a_{mn}\\exp\\left\\{-2\\pi i \\left({mk\\over M}+{nl\\over N}\\right)\\right\\} + \\qquad k = 0, \\ldots, M-1;\\quad l = 0, \\ldots, N-1, + +which extends in the obvious way to higher dimensions, and the inverses +in higher dimensions also extend in the same way. + +References +---------- + +.. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the + machine calculation of complex Fourier series," *Math. Comput.* + 19: 297-301. + +.. [NR] Press, W., Teukolsky, S., Vetterline, W.T., and Flannery, B.P., + 2007, *Numerical Recipes: The Art of Scientific Computing*, ch. + 12-13. Cambridge Univ. Press, Cambridge, UK. + +Examples +-------- + +For examples, see the various functions. + +""" + +from . import _helper, _pocketfft +from ._helper import * +from ._pocketfft import * + +__all__ = _pocketfft.__all__.copy() # noqa: PLE0605 +__all__ += _helper.__all__ + +from numpy._pytesttester import PytestTester + +test = PytestTester(__name__) +del PytestTester diff --git a/local_packages/numpy/fft/__init__.pyi b/local_packages/numpy/fft/__init__.pyi new file mode 100644 index 0000000..893a697 --- /dev/null +++ b/local_packages/numpy/fft/__init__.pyi @@ -0,0 +1,38 @@ +from ._helper import fftfreq, fftshift, ifftshift, rfftfreq +from ._pocketfft import ( + fft, + fft2, + fftn, + hfft, + ifft, + ifft2, + ifftn, + ihfft, + irfft, + irfft2, + irfftn, + rfft, + rfft2, + rfftn, +) + +__all__ = [ + "fft", + "ifft", + "rfft", + "irfft", + "hfft", + "ihfft", + "rfftn", + "irfftn", + "rfft2", + "irfft2", + "fft2", + "ifft2", + "fftn", + "ifftn", + "fftshift", + "ifftshift", + "fftfreq", + "rfftfreq", +] diff --git a/local_packages/numpy/fft/_helper.py b/local_packages/numpy/fft/_helper.py new file mode 100644 index 0000000..77adeac --- /dev/null +++ b/local_packages/numpy/fft/_helper.py @@ -0,0 +1,235 @@ +""" +Discrete Fourier Transforms - _helper.py + +""" +from numpy._core import arange, asarray, empty, integer, roll +from numpy._core.overrides import array_function_dispatch, set_module + +# Created by Pearu Peterson, September 2002 + +__all__ = ['fftshift', 'ifftshift', 'fftfreq', 'rfftfreq'] + +integer_types = (int, integer) + + +def _fftshift_dispatcher(x, axes=None): + return (x,) + + +@array_function_dispatch(_fftshift_dispatcher, module='numpy.fft') +def fftshift(x, axes=None): + """ + Shift the zero-frequency component to the center of the spectrum. + + This function swaps half-spaces for all axes listed (defaults to all). + Note that ``y[0]`` is the Nyquist component only if ``len(x)`` is even. + + Parameters + ---------- + x : array_like + Input array. + axes : int or shape tuple, optional + Axes over which to shift. Default is None, which shifts all axes. + + Returns + ------- + y : ndarray + The shifted array. + + See Also + -------- + ifftshift : The inverse of `fftshift`. + + Examples + -------- + >>> import numpy as np + >>> freqs = np.fft.fftfreq(10, 0.1) + >>> freqs + array([ 0., 1., 2., ..., -3., -2., -1.]) + >>> np.fft.fftshift(freqs) + array([-5., -4., -3., -2., -1., 0., 1., 2., 3., 4.]) + + Shift the zero-frequency component only along the second axis: + + >>> freqs = np.fft.fftfreq(9, d=1./9).reshape(3, 3) + >>> freqs + array([[ 0., 1., 2.], + [ 3., 4., -4.], + [-3., -2., -1.]]) + >>> np.fft.fftshift(freqs, axes=(1,)) + array([[ 2., 0., 1.], + [-4., 3., 4.], + [-1., -3., -2.]]) + + """ + x = asarray(x) + if axes is None: + axes = tuple(range(x.ndim)) + shift = [dim // 2 for dim in x.shape] + elif isinstance(axes, integer_types): + shift = x.shape[axes] // 2 + else: + shift = [x.shape[ax] // 2 for ax in axes] + + return roll(x, shift, axes) + + +@array_function_dispatch(_fftshift_dispatcher, module='numpy.fft') +def ifftshift(x, axes=None): + """ + The inverse of `fftshift`. Although identical for even-length `x`, the + functions differ by one sample for odd-length `x`. + + Parameters + ---------- + x : array_like + Input array. + axes : int or shape tuple, optional + Axes over which to calculate. Defaults to None, which shifts all axes. + + Returns + ------- + y : ndarray + The shifted array. + + See Also + -------- + fftshift : Shift zero-frequency component to the center of the spectrum. + + Examples + -------- + >>> import numpy as np + >>> freqs = np.fft.fftfreq(9, d=1./9).reshape(3, 3) + >>> freqs + array([[ 0., 1., 2.], + [ 3., 4., -4.], + [-3., -2., -1.]]) + >>> np.fft.ifftshift(np.fft.fftshift(freqs)) + array([[ 0., 1., 2.], + [ 3., 4., -4.], + [-3., -2., -1.]]) + + """ + x = asarray(x) + if axes is None: + axes = tuple(range(x.ndim)) + shift = [-(dim // 2) for dim in x.shape] + elif isinstance(axes, integer_types): + shift = -(x.shape[axes] // 2) + else: + shift = [-(x.shape[ax] // 2) for ax in axes] + + return roll(x, shift, axes) + + +@set_module('numpy.fft') +def fftfreq(n, d=1.0, device=None): + """ + Return the Discrete Fourier Transform sample frequencies. + + The returned float array `f` contains the frequency bin centers in cycles + per unit of the sample spacing (with zero at the start). For instance, if + the sample spacing is in seconds, then the frequency unit is cycles/second. + + Given a window length `n` and a sample spacing `d`:: + + f = [0, 1, ..., n/2-1, -n/2, ..., -1] / (d*n) if n is even + f = [0, 1, ..., (n-1)/2, -(n-1)/2, ..., -1] / (d*n) if n is odd + + Parameters + ---------- + n : int + Window length. + d : scalar, optional + Sample spacing (inverse of the sampling rate). Defaults to 1. + device : str, optional + The device on which to place the created array. Default: ``None``. + For Array-API interoperability only, so must be ``"cpu"`` if passed. + + .. versionadded:: 2.0.0 + + Returns + ------- + f : ndarray + Array of length `n` containing the sample frequencies. + + Examples + -------- + >>> import numpy as np + >>> signal = np.array([-2, 8, 6, 4, 1, 0, 3, 5], dtype=float) + >>> fourier = np.fft.fft(signal) + >>> n = signal.size + >>> timestep = 0.1 + >>> freq = np.fft.fftfreq(n, d=timestep) + >>> freq + array([ 0. , 1.25, 2.5 , ..., -3.75, -2.5 , -1.25]) + + """ + if not isinstance(n, integer_types): + raise ValueError("n should be an integer") + val = 1.0 / (n * d) + results = empty(n, int, device=device) + N = (n - 1) // 2 + 1 + p1 = arange(0, N, dtype=int, device=device) + results[:N] = p1 + p2 = arange(-(n // 2), 0, dtype=int, device=device) + results[N:] = p2 + return results * val + + +@set_module('numpy.fft') +def rfftfreq(n, d=1.0, device=None): + """ + Return the Discrete Fourier Transform sample frequencies + (for usage with rfft, irfft). + + The returned float array `f` contains the frequency bin centers in cycles + per unit of the sample spacing (with zero at the start). For instance, if + the sample spacing is in seconds, then the frequency unit is cycles/second. + + Given a window length `n` and a sample spacing `d`:: + + f = [0, 1, ..., n/2-1, n/2] / (d*n) if n is even + f = [0, 1, ..., (n-1)/2-1, (n-1)/2] / (d*n) if n is odd + + Unlike `fftfreq` (but like `scipy.fftpack.rfftfreq`) + the Nyquist frequency component is considered to be positive. + + Parameters + ---------- + n : int + Window length. + d : scalar, optional + Sample spacing (inverse of the sampling rate). Defaults to 1. + device : str, optional + The device on which to place the created array. Default: ``None``. + For Array-API interoperability only, so must be ``"cpu"`` if passed. + + .. versionadded:: 2.0.0 + + Returns + ------- + f : ndarray + Array of length ``n//2 + 1`` containing the sample frequencies. + + Examples + -------- + >>> import numpy as np + >>> signal = np.array([-2, 8, 6, 4, 1, 0, 3, 5, -3, 4], dtype=float) + >>> fourier = np.fft.rfft(signal) + >>> n = signal.size + >>> sample_rate = 100 + >>> freq = np.fft.fftfreq(n, d=1./sample_rate) + >>> freq + array([ 0., 10., 20., ..., -30., -20., -10.]) + >>> freq = np.fft.rfftfreq(n, d=1./sample_rate) + >>> freq + array([ 0., 10., 20., 30., 40., 50.]) + + """ + if not isinstance(n, integer_types): + raise ValueError("n should be an integer") + val = 1.0 / (n * d) + N = n // 2 + 1 + results = arange(0, N, dtype=int, device=device) + return results * val diff --git a/local_packages/numpy/fft/_helper.pyi b/local_packages/numpy/fft/_helper.pyi new file mode 100644 index 0000000..1ea451e --- /dev/null +++ b/local_packages/numpy/fft/_helper.pyi @@ -0,0 +1,44 @@ +from typing import Any, Final, Literal as L, TypeVar, overload + +from numpy import complexfloating, floating, generic, integer +from numpy._typing import ( + ArrayLike, + NDArray, + _ArrayLike, + _ArrayLikeComplex_co, + _ArrayLikeFloat_co, + _ShapeLike, +) + +__all__ = ["fftfreq", "fftshift", "ifftshift", "rfftfreq"] + +_ScalarT = TypeVar("_ScalarT", bound=generic) + +### + +integer_types: Final[tuple[type[int], type[integer]]] = ... + +### + +@overload +def fftshift(x: _ArrayLike[_ScalarT], axes: _ShapeLike | None = None) -> NDArray[_ScalarT]: ... +@overload +def fftshift(x: ArrayLike, axes: _ShapeLike | None = None) -> NDArray[Any]: ... + +# +@overload +def ifftshift(x: _ArrayLike[_ScalarT], axes: _ShapeLike | None = None) -> NDArray[_ScalarT]: ... +@overload +def ifftshift(x: ArrayLike, axes: _ShapeLike | None = None) -> NDArray[Any]: ... + +# +@overload +def fftfreq(n: int | integer, d: _ArrayLikeFloat_co = 1.0, device: L["cpu"] | None = None) -> NDArray[floating]: ... +@overload +def fftfreq(n: int | integer, d: _ArrayLikeComplex_co = 1.0, device: L["cpu"] | None = None) -> NDArray[complexfloating]: ... + +# +@overload +def rfftfreq(n: int | integer, d: _ArrayLikeFloat_co = 1.0, device: L["cpu"] | None = None) -> NDArray[floating]: ... +@overload +def rfftfreq(n: int | integer, d: _ArrayLikeComplex_co = 1.0, device: L["cpu"] | None = None) -> NDArray[complexfloating]: ... diff --git a/local_packages/numpy/fft/_pocketfft.py b/local_packages/numpy/fft/_pocketfft.py new file mode 100644 index 0000000..1ce7c76 --- /dev/null +++ b/local_packages/numpy/fft/_pocketfft.py @@ -0,0 +1,1693 @@ +""" +Discrete Fourier Transforms + +Routines in this module: + +fft(a, n=None, axis=-1, norm="backward") +ifft(a, n=None, axis=-1, norm="backward") +rfft(a, n=None, axis=-1, norm="backward") +irfft(a, n=None, axis=-1, norm="backward") +hfft(a, n=None, axis=-1, norm="backward") +ihfft(a, n=None, axis=-1, norm="backward") +fftn(a, s=None, axes=None, norm="backward") +ifftn(a, s=None, axes=None, norm="backward") +rfftn(a, s=None, axes=None, norm="backward") +irfftn(a, s=None, axes=None, norm="backward") +fft2(a, s=None, axes=(-2,-1), norm="backward") +ifft2(a, s=None, axes=(-2, -1), norm="backward") +rfft2(a, s=None, axes=(-2,-1), norm="backward") +irfft2(a, s=None, axes=(-2, -1), norm="backward") + +i = inverse transform +r = transform of purely real data +h = Hermite transform +n = n-dimensional transform +2 = 2-dimensional transform +(Note: 2D routines are just nD routines with different default +behavior.) + +""" +__all__ = ['fft', 'ifft', 'rfft', 'irfft', 'hfft', 'ihfft', 'rfftn', + 'irfftn', 'rfft2', 'irfft2', 'fft2', 'ifft2', 'fftn', 'ifftn'] + +import functools +import warnings + +from numpy._core import ( + asarray, + conjugate, + empty_like, + overrides, + reciprocal, + result_type, + sqrt, + take, +) +from numpy.lib.array_utils import normalize_axis_index + +from . import _pocketfft_umath as pfu + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy.fft') + + +# `inv_norm` is a float by which the result of the transform needs to be +# divided. This replaces the original, more intuitive 'fct` parameter to avoid +# divisions by zero (or alternatively additional checks) in the case of +# zero-length axes during its computation. +def _raw_fft(a, n, axis, is_real, is_forward, norm, out=None): + if n < 1: + raise ValueError(f"Invalid number of FFT data points ({n}) specified.") + + # Calculate the normalization factor, passing in the array dtype to + # avoid precision loss in the possible sqrt or reciprocal. + if not is_forward: + norm = _swap_direction(norm) + + real_dtype = result_type(a.real.dtype, 1.0) + if norm is None or norm == "backward": + fct = 1 + elif norm == "ortho": + fct = reciprocal(sqrt(n, dtype=real_dtype)) + elif norm == "forward": + fct = reciprocal(n, dtype=real_dtype) + else: + raise ValueError(f'Invalid norm value {norm}; should be "backward",' + '"ortho" or "forward".') + + n_out = n + if is_real: + if is_forward: + ufunc = pfu.rfft_n_even if n % 2 == 0 else pfu.rfft_n_odd + n_out = n // 2 + 1 + else: + ufunc = pfu.irfft + else: + ufunc = pfu.fft if is_forward else pfu.ifft + + axis = normalize_axis_index(axis, a.ndim) + + if out is None: + if is_real and not is_forward: # irfft, complex in, real output. + out_dtype = real_dtype + else: # Others, complex output. + out_dtype = result_type(a.dtype, 1j) + out = empty_like(a, shape=a.shape[:axis] + (n_out,) + a.shape[axis + 1:], + dtype=out_dtype) + elif ((shape := getattr(out, "shape", None)) is not None + and (len(shape) != a.ndim or shape[axis] != n_out)): + raise ValueError("output array has wrong shape.") + + return ufunc(a, fct, axes=[(axis,), (), (axis,)], out=out) + + +_SWAP_DIRECTION_MAP = {"backward": "forward", None: "forward", + "ortho": "ortho", "forward": "backward"} + + +def _swap_direction(norm): + try: + return _SWAP_DIRECTION_MAP[norm] + except KeyError: + raise ValueError(f'Invalid norm value {norm}; should be "backward", ' + '"ortho" or "forward".') from None + + +def _fft_dispatcher(a, n=None, axis=None, norm=None, out=None): + return (a, out) + + +@array_function_dispatch(_fft_dispatcher) +def fft(a, n=None, axis=-1, norm=None, out=None): + """ + Compute the one-dimensional discrete Fourier Transform. + + This function computes the one-dimensional *n*-point discrete Fourier + Transform (DFT) with the efficient Fast Fourier Transform (FFT) + algorithm [CT]_. + + Parameters + ---------- + a : array_like + Input array, can be complex. + n : int, optional + Length of the transformed axis of the output. + If `n` is smaller than the length of the input, the input is cropped. + If it is larger, the input is padded with zeros. If `n` is not given, + the length of the input along the axis specified by `axis` is used. + axis : int, optional + Axis over which to compute the FFT. If not given, the last axis is + used. + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see `numpy.fft`). Default is "backward". + Indicates which direction of the forward/backward pair of transforms + is scaled and with what normalization factor. + + .. versionadded:: 1.20.0 + + The "backward", "forward" values were added. + out : complex ndarray, optional + If provided, the result will be placed in this array. It should be + of the appropriate shape and dtype. + + .. versionadded:: 2.0.0 + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axis + indicated by `axis`, or the last one if `axis` is not specified. + + Raises + ------ + IndexError + If `axis` is not a valid axis of `a`. + + See Also + -------- + numpy.fft : for definition of the DFT and conventions used. + ifft : The inverse of `fft`. + fft2 : The two-dimensional FFT. + fftn : The *n*-dimensional FFT. + rfftn : The *n*-dimensional FFT of real input. + fftfreq : Frequency bins for given FFT parameters. + + Notes + ----- + FFT (Fast Fourier Transform) refers to a way the discrete Fourier + Transform (DFT) can be calculated efficiently, by using symmetries in the + calculated terms. The symmetry is highest when `n` is a power of 2, and + the transform is therefore most efficient for these sizes. + + The DFT is defined, with the conventions used in this implementation, in + the documentation for the `numpy.fft` module. + + References + ---------- + .. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the + machine calculation of complex Fourier series," *Math. Comput.* + 19: 297-301. + + Examples + -------- + >>> import numpy as np + >>> np.fft.fft(np.exp(2j * np.pi * np.arange(8) / 8)) + array([-2.33486982e-16+1.14423775e-17j, 8.00000000e+00-1.25557246e-15j, + 2.33486982e-16+2.33486982e-16j, 0.00000000e+00+1.22464680e-16j, + -1.14423775e-17+2.33486982e-16j, 0.00000000e+00+5.20784380e-16j, + 1.14423775e-17+1.14423775e-17j, 0.00000000e+00+1.22464680e-16j]) + + In this example, real input has an FFT which is Hermitian, i.e., symmetric + in the real part and anti-symmetric in the imaginary part, as described in + the `numpy.fft` documentation: + + >>> import matplotlib.pyplot as plt + >>> t = np.arange(256) + >>> sp = np.fft.fft(np.sin(t)) + >>> freq = np.fft.fftfreq(t.shape[-1]) + >>> _ = plt.plot(freq, sp.real, freq, sp.imag) + >>> plt.show() + + """ + a = asarray(a) + if n is None: + n = a.shape[axis] + output = _raw_fft(a, n, axis, False, True, norm, out) + return output + + +@array_function_dispatch(_fft_dispatcher) +def ifft(a, n=None, axis=-1, norm=None, out=None): + """ + Compute the one-dimensional inverse discrete Fourier Transform. + + This function computes the inverse of the one-dimensional *n*-point + discrete Fourier transform computed by `fft`. In other words, + ``ifft(fft(a)) == a`` to within numerical accuracy. + For a general description of the algorithm and definitions, + see `numpy.fft`. + + The input should be ordered in the same way as is returned by `fft`, + i.e., + + * ``a[0]`` should contain the zero frequency term, + * ``a[1:n//2]`` should contain the positive-frequency terms, + * ``a[n//2 + 1:]`` should contain the negative-frequency terms, in + increasing order starting from the most negative frequency. + + For an even number of input points, ``A[n//2]`` represents the sum of + the values at the positive and negative Nyquist frequencies, as the two + are aliased together. See `numpy.fft` for details. + + Parameters + ---------- + a : array_like + Input array, can be complex. + n : int, optional + Length of the transformed axis of the output. + If `n` is smaller than the length of the input, the input is cropped. + If it is larger, the input is padded with zeros. If `n` is not given, + the length of the input along the axis specified by `axis` is used. + See notes about padding issues. + axis : int, optional + Axis over which to compute the inverse DFT. If not given, the last + axis is used. + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see `numpy.fft`). Default is "backward". + Indicates which direction of the forward/backward pair of transforms + is scaled and with what normalization factor. + + .. versionadded:: 1.20.0 + + The "backward", "forward" values were added. + + out : complex ndarray, optional + If provided, the result will be placed in this array. It should be + of the appropriate shape and dtype. + + .. versionadded:: 2.0.0 + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axis + indicated by `axis`, or the last one if `axis` is not specified. + + Raises + ------ + IndexError + If `axis` is not a valid axis of `a`. + + See Also + -------- + numpy.fft : An introduction, with definitions and general explanations. + fft : The one-dimensional (forward) FFT, of which `ifft` is the inverse + ifft2 : The two-dimensional inverse FFT. + ifftn : The n-dimensional inverse FFT. + + Notes + ----- + If the input parameter `n` is larger than the size of the input, the input + is padded by appending zeros at the end. Even though this is the common + approach, it might lead to surprising results. If a different padding is + desired, it must be performed before calling `ifft`. + + Examples + -------- + >>> import numpy as np + >>> np.fft.ifft([0, 4, 0, 0]) + array([ 1.+0.j, 0.+1.j, -1.+0.j, 0.-1.j]) # may vary + + Create and plot a band-limited signal with random phases: + + >>> import matplotlib.pyplot as plt + >>> t = np.arange(400) + >>> n = np.zeros((400,), dtype=complex) + >>> n[40:60] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20,))) + >>> s = np.fft.ifft(n) + >>> plt.plot(t, s.real, label='real') + [] + >>> plt.plot(t, s.imag, '--', label='imaginary') + [] + >>> plt.legend() + + >>> plt.show() + + """ + a = asarray(a) + if n is None: + n = a.shape[axis] + output = _raw_fft(a, n, axis, False, False, norm, out=out) + return output + + +@array_function_dispatch(_fft_dispatcher) +def rfft(a, n=None, axis=-1, norm=None, out=None): + """ + Compute the one-dimensional discrete Fourier Transform for real input. + + This function computes the one-dimensional *n*-point discrete Fourier + Transform (DFT) of a real-valued array by means of an efficient algorithm + called the Fast Fourier Transform (FFT). + + Parameters + ---------- + a : array_like + Input array + n : int, optional + Number of points along transformation axis in the input to use. + If `n` is smaller than the length of the input, the input is cropped. + If it is larger, the input is padded with zeros. If `n` is not given, + the length of the input along the axis specified by `axis` is used. + axis : int, optional + Axis over which to compute the FFT. If not given, the last axis is + used. + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see `numpy.fft`). Default is "backward". + Indicates which direction of the forward/backward pair of transforms + is scaled and with what normalization factor. + + .. versionadded:: 1.20.0 + + The "backward", "forward" values were added. + + out : complex ndarray, optional + If provided, the result will be placed in this array. It should be + of the appropriate shape and dtype. + + .. versionadded:: 2.0.0 + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axis + indicated by `axis`, or the last one if `axis` is not specified. + If `n` is even, the length of the transformed axis is ``(n/2)+1``. + If `n` is odd, the length is ``(n+1)/2``. + + Raises + ------ + IndexError + If `axis` is not a valid axis of `a`. + + See Also + -------- + numpy.fft : For definition of the DFT and conventions used. + irfft : The inverse of `rfft`. + fft : The one-dimensional FFT of general (complex) input. + fftn : The *n*-dimensional FFT. + rfftn : The *n*-dimensional FFT of real input. + + Notes + ----- + When the DFT is computed for purely real input, the output is + Hermitian-symmetric, i.e. the negative frequency terms are just the complex + conjugates of the corresponding positive-frequency terms, and the + negative-frequency terms are therefore redundant. This function does not + compute the negative frequency terms, and the length of the transformed + axis of the output is therefore ``n//2 + 1``. + + When ``A = rfft(a)`` and fs is the sampling frequency, ``A[0]`` contains + the zero-frequency term 0*fs, which is real due to Hermitian symmetry. + + If `n` is even, ``A[-1]`` contains the term representing both positive + and negative Nyquist frequency (+fs/2 and -fs/2), and must also be purely + real. If `n` is odd, there is no term at fs/2; ``A[-1]`` contains + the largest positive frequency (fs/2*(n-1)/n), and is complex in the + general case. + + If the input `a` contains an imaginary part, it is silently discarded. + + Examples + -------- + >>> import numpy as np + >>> np.fft.fft([0, 1, 0, 0]) + array([ 1.+0.j, 0.-1.j, -1.+0.j, 0.+1.j]) # may vary + >>> np.fft.rfft([0, 1, 0, 0]) + array([ 1.+0.j, 0.-1.j, -1.+0.j]) # may vary + + Notice how the final element of the `fft` output is the complex conjugate + of the second element, for real input. For `rfft`, this symmetry is + exploited to compute only the non-negative frequency terms. + + """ + a = asarray(a) + if n is None: + n = a.shape[axis] + output = _raw_fft(a, n, axis, True, True, norm, out=out) + return output + + +@array_function_dispatch(_fft_dispatcher) +def irfft(a, n=None, axis=-1, norm=None, out=None): + """ + Computes the inverse of `rfft`. + + This function computes the inverse of the one-dimensional *n*-point + discrete Fourier Transform of real input computed by `rfft`. + In other words, ``irfft(rfft(a), len(a)) == a`` to within numerical + accuracy. (See Notes below for why ``len(a)`` is necessary here.) + + The input is expected to be in the form returned by `rfft`, i.e. the + real zero-frequency term followed by the complex positive frequency terms + in order of increasing frequency. Since the discrete Fourier Transform of + real input is Hermitian-symmetric, the negative frequency terms are taken + to be the complex conjugates of the corresponding positive frequency terms. + + Parameters + ---------- + a : array_like + The input array. + n : int, optional + Length of the transformed axis of the output. + For `n` output points, ``n//2+1`` input points are necessary. If the + input is longer than this, it is cropped. If it is shorter than this, + it is padded with zeros. If `n` is not given, it is taken to be + ``2*(m-1)`` where ``m`` is the length of the input along the axis + specified by `axis`. + axis : int, optional + Axis over which to compute the inverse FFT. If not given, the last + axis is used. + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see `numpy.fft`). Default is "backward". + Indicates which direction of the forward/backward pair of transforms + is scaled and with what normalization factor. + + .. versionadded:: 1.20.0 + + The "backward", "forward" values were added. + + out : ndarray, optional + If provided, the result will be placed in this array. It should be + of the appropriate shape and dtype. + + .. versionadded:: 2.0.0 + + Returns + ------- + out : ndarray + The truncated or zero-padded input, transformed along the axis + indicated by `axis`, or the last one if `axis` is not specified. + The length of the transformed axis is `n`, or, if `n` is not given, + ``2*(m-1)`` where ``m`` is the length of the transformed axis of the + input. To get an odd number of output points, `n` must be specified. + + Raises + ------ + IndexError + If `axis` is not a valid axis of `a`. + + See Also + -------- + numpy.fft : For definition of the DFT and conventions used. + rfft : The one-dimensional FFT of real input, of which `irfft` is inverse. + fft : The one-dimensional FFT. + irfft2 : The inverse of the two-dimensional FFT of real input. + irfftn : The inverse of the *n*-dimensional FFT of real input. + + Notes + ----- + Returns the real valued `n`-point inverse discrete Fourier transform + of `a`, where `a` contains the non-negative frequency terms of a + Hermitian-symmetric sequence. `n` is the length of the result, not the + input. + + If you specify an `n` such that `a` must be zero-padded or truncated, the + extra/removed values will be added/removed at high frequencies. One can + thus resample a series to `m` points via Fourier interpolation by: + ``a_resamp = irfft(rfft(a), m)``. + + The correct interpretation of the hermitian input depends on the length of + the original data, as given by `n`. This is because each input shape could + correspond to either an odd or even length signal. By default, `irfft` + assumes an even output length which puts the last entry at the Nyquist + frequency; aliasing with its symmetric counterpart. By Hermitian symmetry, + the value is thus treated as purely real. To avoid losing information, the + correct length of the real input **must** be given. + + Examples + -------- + >>> import numpy as np + >>> np.fft.ifft([1, -1j, -1, 1j]) + array([0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]) # may vary + >>> np.fft.irfft([1, -1j, -1]) + array([0., 1., 0., 0.]) + + Notice how the last term in the input to the ordinary `ifft` is the + complex conjugate of the second term, and the output has zero imaginary + part everywhere. When calling `irfft`, the negative frequencies are not + specified, and the output array is purely real. + + """ + a = asarray(a) + if n is None: + n = (a.shape[axis] - 1) * 2 + output = _raw_fft(a, n, axis, True, False, norm, out=out) + return output + + +@array_function_dispatch(_fft_dispatcher) +def hfft(a, n=None, axis=-1, norm=None, out=None): + """ + Compute the FFT of a signal that has Hermitian symmetry, i.e., a real + spectrum. + + Parameters + ---------- + a : array_like + The input array. + n : int, optional + Length of the transformed axis of the output. For `n` output + points, ``n//2 + 1`` input points are necessary. If the input is + longer than this, it is cropped. If it is shorter than this, it is + padded with zeros. If `n` is not given, it is taken to be ``2*(m-1)`` + where ``m`` is the length of the input along the axis specified by + `axis`. + axis : int, optional + Axis over which to compute the FFT. If not given, the last + axis is used. + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see `numpy.fft`). Default is "backward". + Indicates which direction of the forward/backward pair of transforms + is scaled and with what normalization factor. + + .. versionadded:: 1.20.0 + + The "backward", "forward" values were added. + + out : ndarray, optional + If provided, the result will be placed in this array. It should be + of the appropriate shape and dtype. + + .. versionadded:: 2.0.0 + + Returns + ------- + out : ndarray + The truncated or zero-padded input, transformed along the axis + indicated by `axis`, or the last one if `axis` is not specified. + The length of the transformed axis is `n`, or, if `n` is not given, + ``2*m - 2`` where ``m`` is the length of the transformed axis of + the input. To get an odd number of output points, `n` must be + specified, for instance as ``2*m - 1`` in the typical case, + + Raises + ------ + IndexError + If `axis` is not a valid axis of `a`. + + See also + -------- + rfft : Compute the one-dimensional FFT for real input. + ihfft : The inverse of `hfft`. + + Notes + ----- + `hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the + opposite case: here the signal has Hermitian symmetry in the time + domain and is real in the frequency domain. So here it's `hfft` for + which you must supply the length of the result if it is to be odd. + + * even: ``ihfft(hfft(a, 2*len(a) - 2)) == a``, within roundoff error, + * odd: ``ihfft(hfft(a, 2*len(a) - 1)) == a``, within roundoff error. + + The correct interpretation of the hermitian input depends on the length of + the original data, as given by `n`. This is because each input shape could + correspond to either an odd or even length signal. By default, `hfft` + assumes an even output length which puts the last entry at the Nyquist + frequency; aliasing with its symmetric counterpart. By Hermitian symmetry, + the value is thus treated as purely real. To avoid losing information, the + shape of the full signal **must** be given. + + Examples + -------- + >>> import numpy as np + >>> signal = np.array([1, 2, 3, 4, 3, 2]) + >>> np.fft.fft(signal) + array([15.+0.j, -4.+0.j, 0.+0.j, -1.-0.j, 0.+0.j, -4.+0.j]) # may vary + >>> np.fft.hfft(signal[:4]) # Input first half of signal + array([15., -4., 0., -1., 0., -4.]) + >>> np.fft.hfft(signal, 6) # Input entire signal and truncate + array([15., -4., 0., -1., 0., -4.]) + + + >>> signal = np.array([[1, 1.j], [-1.j, 2]]) + >>> np.conj(signal.T) - signal # check Hermitian symmetry + array([[ 0.-0.j, -0.+0.j], # may vary + [ 0.+0.j, 0.-0.j]]) + >>> freq_spectrum = np.fft.hfft(signal) + >>> freq_spectrum + array([[ 1., 1.], + [ 2., -2.]]) + + """ + a = asarray(a) + if n is None: + n = (a.shape[axis] - 1) * 2 + new_norm = _swap_direction(norm) + output = irfft(conjugate(a), n, axis, norm=new_norm, out=None) + return output + + +@array_function_dispatch(_fft_dispatcher) +def ihfft(a, n=None, axis=-1, norm=None, out=None): + """ + Compute the inverse FFT of a signal that has Hermitian symmetry. + + Parameters + ---------- + a : array_like + Input array. + n : int, optional + Length of the inverse FFT, the number of points along + transformation axis in the input to use. If `n` is smaller than + the length of the input, the input is cropped. If it is larger, + the input is padded with zeros. If `n` is not given, the length of + the input along the axis specified by `axis` is used. + axis : int, optional + Axis over which to compute the inverse FFT. If not given, the last + axis is used. + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see `numpy.fft`). Default is "backward". + Indicates which direction of the forward/backward pair of transforms + is scaled and with what normalization factor. + + .. versionadded:: 1.20.0 + + The "backward", "forward" values were added. + + out : complex ndarray, optional + If provided, the result will be placed in this array. It should be + of the appropriate shape and dtype. + + .. versionadded:: 2.0.0 + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axis + indicated by `axis`, or the last one if `axis` is not specified. + The length of the transformed axis is ``n//2 + 1``. + + See also + -------- + hfft, irfft + + Notes + ----- + `hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the + opposite case: here the signal has Hermitian symmetry in the time + domain and is real in the frequency domain. So here it's `hfft` for + which you must supply the length of the result if it is to be odd: + + * even: ``ihfft(hfft(a, 2*len(a) - 2)) == a``, within roundoff error, + * odd: ``ihfft(hfft(a, 2*len(a) - 1)) == a``, within roundoff error. + + Examples + -------- + >>> import numpy as np + >>> spectrum = np.array([ 15, -4, 0, -1, 0, -4]) + >>> np.fft.ifft(spectrum) + array([1.+0.j, 2.+0.j, 3.+0.j, 4.+0.j, 3.+0.j, 2.+0.j]) # may vary + >>> np.fft.ihfft(spectrum) + array([ 1.-0.j, 2.-0.j, 3.-0.j, 4.-0.j]) # may vary + + """ + a = asarray(a) + if n is None: + n = a.shape[axis] + new_norm = _swap_direction(norm) + out = rfft(a, n, axis, norm=new_norm, out=out) + return conjugate(out, out=out) + + +def _cook_nd_args(a, s=None, axes=None, invreal=0): + if s is None: + shapeless = True + if axes is None: + s = list(a.shape) + else: + s = take(a.shape, axes) + else: + shapeless = False + s = list(s) + if axes is None: + if not shapeless: + msg = ("`axes` should not be `None` if `s` is not `None` " + "(Deprecated in NumPy 2.0). In a future version of NumPy, " + "this will raise an error and `s[i]` will correspond to " + "the size along the transformed axis specified by " + "`axes[i]`. To retain current behaviour, pass a sequence " + "[0, ..., k-1] to `axes` for an array of dimension k.") + warnings.warn(msg, DeprecationWarning, stacklevel=3) + axes = list(range(-len(s), 0)) + if len(s) != len(axes): + raise ValueError("Shape and axes have different lengths.") + if invreal and shapeless: + s[-1] = (a.shape[axes[-1]] - 1) * 2 + if None in s: + msg = ("Passing an array containing `None` values to `s` is " + "deprecated in NumPy 2.0 and will raise an error in " + "a future version of NumPy. To use the default behaviour " + "of the corresponding 1-D transform, pass the value matching " + "the default for its `n` parameter. To use the default " + "behaviour for every axis, the `s` argument can be omitted.") + warnings.warn(msg, DeprecationWarning, stacklevel=3) + # use the whole input array along axis `i` if `s[i] == -1` + s = [a.shape[_a] if _s == -1 else _s for _s, _a in zip(s, axes)] + return s, axes + + +def _raw_fftnd(a, s=None, axes=None, function=fft, norm=None, out=None): + a = asarray(a) + s, axes = _cook_nd_args(a, s, axes) + itl = list(range(len(axes))) + itl.reverse() + for ii in itl: + a = function(a, n=s[ii], axis=axes[ii], norm=norm, out=out) + return a + + +def _fftn_dispatcher(a, s=None, axes=None, norm=None, out=None): + return (a, out) + + +@array_function_dispatch(_fftn_dispatcher) +def fftn(a, s=None, axes=None, norm=None, out=None): + """ + Compute the N-dimensional discrete Fourier Transform. + + This function computes the *N*-dimensional discrete Fourier Transform over + any number of axes in an *M*-dimensional array by means of the Fast Fourier + Transform (FFT). + + Parameters + ---------- + a : array_like + Input array, can be complex. + s : sequence of ints, optional + Shape (length of each transformed axis) of the output + (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). + This corresponds to ``n`` for ``fft(x, n)``. + Along any axis, if the given shape is smaller than that of the input, + the input is cropped. If it is larger, the input is padded with zeros. + + .. versionchanged:: 2.0 + + If it is ``-1``, the whole input is used (no padding/trimming). + + If `s` is not given, the shape of the input along the axes specified + by `axes` is used. + + .. deprecated:: 2.0 + + If `s` is not ``None``, `axes` must not be ``None`` either. + + .. deprecated:: 2.0 + + `s` must contain only ``int`` s, not ``None`` values. ``None`` + values currently mean that the default value for ``n`` is used + in the corresponding 1-D transform, but this behaviour is + deprecated. + + axes : sequence of ints, optional + Axes over which to compute the FFT. If not given, the last ``len(s)`` + axes are used, or all axes if `s` is also not specified. + Repeated indices in `axes` means that the transform over that axis is + performed multiple times. + + .. deprecated:: 2.0 + + If `s` is specified, the corresponding `axes` to be transformed + must be explicitly specified too. + + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see `numpy.fft`). Default is "backward". + Indicates which direction of the forward/backward pair of transforms + is scaled and with what normalization factor. + + .. versionadded:: 1.20.0 + + The "backward", "forward" values were added. + + out : complex ndarray, optional + If provided, the result will be placed in this array. It should be + of the appropriate shape and dtype for all axes (and hence is + incompatible with passing in all but the trivial ``s``). + + .. versionadded:: 2.0.0 + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axes + indicated by `axes`, or by a combination of `s` and `a`, + as explained in the parameters section above. + + Raises + ------ + ValueError + If `s` and `axes` have different length. + IndexError + If an element of `axes` is larger than than the number of axes of `a`. + + See Also + -------- + numpy.fft : Overall view of discrete Fourier transforms, with definitions + and conventions used. + ifftn : The inverse of `fftn`, the inverse *n*-dimensional FFT. + fft : The one-dimensional FFT, with definitions and conventions used. + rfftn : The *n*-dimensional FFT of real input. + fft2 : The two-dimensional FFT. + fftshift : Shifts zero-frequency terms to centre of array + + Notes + ----- + The output, analogously to `fft`, contains the term for zero frequency in + the low-order corner of all axes, the positive frequency terms in the + first half of all axes, the term for the Nyquist frequency in the middle + of all axes and the negative frequency terms in the second half of all + axes, in order of decreasingly negative frequency. + + See `numpy.fft` for details, definitions and conventions used. + + Examples + -------- + >>> import numpy as np + >>> a = np.mgrid[:3, :3, :3][0] + >>> np.fft.fftn(a, axes=(1, 2)) + array([[[ 0.+0.j, 0.+0.j, 0.+0.j], # may vary + [ 0.+0.j, 0.+0.j, 0.+0.j], + [ 0.+0.j, 0.+0.j, 0.+0.j]], + [[ 9.+0.j, 0.+0.j, 0.+0.j], + [ 0.+0.j, 0.+0.j, 0.+0.j], + [ 0.+0.j, 0.+0.j, 0.+0.j]], + [[18.+0.j, 0.+0.j, 0.+0.j], + [ 0.+0.j, 0.+0.j, 0.+0.j], + [ 0.+0.j, 0.+0.j, 0.+0.j]]]) + >>> np.fft.fftn(a, (2, 2), axes=(0, 1)) + array([[[ 2.+0.j, 2.+0.j, 2.+0.j], # may vary + [ 0.+0.j, 0.+0.j, 0.+0.j]], + [[-2.+0.j, -2.+0.j, -2.+0.j], + [ 0.+0.j, 0.+0.j, 0.+0.j]]]) + + >>> import matplotlib.pyplot as plt + >>> [X, Y] = np.meshgrid(2 * np.pi * np.arange(200) / 12, + ... 2 * np.pi * np.arange(200) / 34) + >>> S = np.sin(X) + np.cos(Y) + np.random.uniform(0, 1, X.shape) + >>> FS = np.fft.fftn(S) + >>> plt.imshow(np.log(np.abs(np.fft.fftshift(FS))**2)) + + >>> plt.show() + + """ + return _raw_fftnd(a, s, axes, fft, norm, out=out) + + +@array_function_dispatch(_fftn_dispatcher) +def ifftn(a, s=None, axes=None, norm=None, out=None): + """ + Compute the N-dimensional inverse discrete Fourier Transform. + + This function computes the inverse of the N-dimensional discrete + Fourier Transform over any number of axes in an M-dimensional array by + means of the Fast Fourier Transform (FFT). In other words, + ``ifftn(fftn(a)) == a`` to within numerical accuracy. + For a description of the definitions and conventions used, see `numpy.fft`. + + The input, analogously to `ifft`, should be ordered in the same way as is + returned by `fftn`, i.e. it should have the term for zero frequency + in all axes in the low-order corner, the positive frequency terms in the + first half of all axes, the term for the Nyquist frequency in the middle + of all axes and the negative frequency terms in the second half of all + axes, in order of decreasingly negative frequency. + + Parameters + ---------- + a : array_like + Input array, can be complex. + s : sequence of ints, optional + Shape (length of each transformed axis) of the output + (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). + This corresponds to ``n`` for ``ifft(x, n)``. + Along any axis, if the given shape is smaller than that of the input, + the input is cropped. If it is larger, the input is padded with zeros. + + .. versionchanged:: 2.0 + + If it is ``-1``, the whole input is used (no padding/trimming). + + If `s` is not given, the shape of the input along the axes specified + by `axes` is used. See notes for issue on `ifft` zero padding. + + .. deprecated:: 2.0 + + If `s` is not ``None``, `axes` must not be ``None`` either. + + .. deprecated:: 2.0 + + `s` must contain only ``int`` s, not ``None`` values. ``None`` + values currently mean that the default value for ``n`` is used + in the corresponding 1-D transform, but this behaviour is + deprecated. + + axes : sequence of ints, optional + Axes over which to compute the IFFT. If not given, the last ``len(s)`` + axes are used, or all axes if `s` is also not specified. + Repeated indices in `axes` means that the inverse transform over that + axis is performed multiple times. + + .. deprecated:: 2.0 + + If `s` is specified, the corresponding `axes` to be transformed + must be explicitly specified too. + + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see `numpy.fft`). Default is "backward". + Indicates which direction of the forward/backward pair of transforms + is scaled and with what normalization factor. + + .. versionadded:: 1.20.0 + + The "backward", "forward" values were added. + + out : complex ndarray, optional + If provided, the result will be placed in this array. It should be + of the appropriate shape and dtype for all axes (and hence is + incompatible with passing in all but the trivial ``s``). + + .. versionadded:: 2.0.0 + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axes + indicated by `axes`, or by a combination of `s` or `a`, + as explained in the parameters section above. + + Raises + ------ + ValueError + If `s` and `axes` have different length. + IndexError + If an element of `axes` is larger than than the number of axes of `a`. + + See Also + -------- + numpy.fft : Overall view of discrete Fourier transforms, with definitions + and conventions used. + fftn : The forward *n*-dimensional FFT, of which `ifftn` is the inverse. + ifft : The one-dimensional inverse FFT. + ifft2 : The two-dimensional inverse FFT. + ifftshift : Undoes `fftshift`, shifts zero-frequency terms to beginning + of array. + + Notes + ----- + See `numpy.fft` for definitions and conventions used. + + Zero-padding, analogously with `ifft`, is performed by appending zeros to + the input along the specified dimension. Although this is the common + approach, it might lead to surprising results. If another form of zero + padding is desired, it must be performed before `ifftn` is called. + + Examples + -------- + >>> import numpy as np + >>> a = np.eye(4) + >>> np.fft.ifftn(np.fft.fftn(a, axes=(0,)), axes=(1,)) + array([[1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], # may vary + [0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j], + [0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j], + [0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]]) + + + Create and plot an image with band-limited frequency content: + + >>> import matplotlib.pyplot as plt + >>> n = np.zeros((200,200), dtype=complex) + >>> n[60:80, 20:40] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20, 20))) + >>> im = np.fft.ifftn(n).real + >>> plt.imshow(im) + + >>> plt.show() + + """ + return _raw_fftnd(a, s, axes, ifft, norm, out=out) + + +@array_function_dispatch(_fftn_dispatcher) +def fft2(a, s=None, axes=(-2, -1), norm=None, out=None): + """ + Compute the 2-dimensional discrete Fourier Transform. + + This function computes the *n*-dimensional discrete Fourier Transform + over any axes in an *M*-dimensional array by means of the + Fast Fourier Transform (FFT). By default, the transform is computed over + the last two axes of the input array, i.e., a 2-dimensional FFT. + + Parameters + ---------- + a : array_like + Input array, can be complex + s : sequence of ints, optional + Shape (length of each transformed axis) of the output + (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). + This corresponds to ``n`` for ``fft(x, n)``. + Along each axis, if the given shape is smaller than that of the input, + the input is cropped. If it is larger, the input is padded with zeros. + + .. versionchanged:: 2.0 + + If it is ``-1``, the whole input is used (no padding/trimming). + + If `s` is not given, the shape of the input along the axes specified + by `axes` is used. + + .. deprecated:: 2.0 + + If `s` is not ``None``, `axes` must not be ``None`` either. + + .. deprecated:: 2.0 + + `s` must contain only ``int`` s, not ``None`` values. ``None`` + values currently mean that the default value for ``n`` is used + in the corresponding 1-D transform, but this behaviour is + deprecated. + + axes : sequence of ints, optional + Axes over which to compute the FFT. If not given, the last two + axes are used. A repeated index in `axes` means the transform over + that axis is performed multiple times. A one-element sequence means + that a one-dimensional FFT is performed. Default: ``(-2, -1)``. + + .. deprecated:: 2.0 + + If `s` is specified, the corresponding `axes` to be transformed + must not be ``None``. + + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see `numpy.fft`). Default is "backward". + Indicates which direction of the forward/backward pair of transforms + is scaled and with what normalization factor. + + .. versionadded:: 1.20.0 + + The "backward", "forward" values were added. + + out : complex ndarray, optional + If provided, the result will be placed in this array. It should be + of the appropriate shape and dtype for all axes (and hence only the + last axis can have ``s`` not equal to the shape at that axis). + + .. versionadded:: 2.0.0 + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axes + indicated by `axes`, or the last two axes if `axes` is not given. + + Raises + ------ + ValueError + If `s` and `axes` have different length, or `axes` not given and + ``len(s) != 2``. + IndexError + If an element of `axes` is larger than than the number of axes of `a`. + + See Also + -------- + numpy.fft : Overall view of discrete Fourier transforms, with definitions + and conventions used. + ifft2 : The inverse two-dimensional FFT. + fft : The one-dimensional FFT. + fftn : The *n*-dimensional FFT. + fftshift : Shifts zero-frequency terms to the center of the array. + For two-dimensional input, swaps first and third quadrants, and second + and fourth quadrants. + + Notes + ----- + `fft2` is just `fftn` with a different default for `axes`. + + The output, analogously to `fft`, contains the term for zero frequency in + the low-order corner of the transformed axes, the positive frequency terms + in the first half of these axes, the term for the Nyquist frequency in the + middle of the axes and the negative frequency terms in the second half of + the axes, in order of decreasingly negative frequency. + + See `fftn` for details and a plotting example, and `numpy.fft` for + definitions and conventions used. + + + Examples + -------- + >>> import numpy as np + >>> a = np.mgrid[:5, :5][0] + >>> np.fft.fft2(a) + array([[ 50. +0.j , 0. +0.j , 0. +0.j , # may vary + 0. +0.j , 0. +0.j ], + [-12.5+17.20477401j, 0. +0.j , 0. +0.j , + 0. +0.j , 0. +0.j ], + [-12.5 +4.0614962j , 0. +0.j , 0. +0.j , + 0. +0.j , 0. +0.j ], + [-12.5 -4.0614962j , 0. +0.j , 0. +0.j , + 0. +0.j , 0. +0.j ], + [-12.5-17.20477401j, 0. +0.j , 0. +0.j , + 0. +0.j , 0. +0.j ]]) + + """ + return _raw_fftnd(a, s, axes, fft, norm, out=out) + + +@array_function_dispatch(_fftn_dispatcher) +def ifft2(a, s=None, axes=(-2, -1), norm=None, out=None): + """ + Compute the 2-dimensional inverse discrete Fourier Transform. + + This function computes the inverse of the 2-dimensional discrete Fourier + Transform over any number of axes in an M-dimensional array by means of + the Fast Fourier Transform (FFT). In other words, ``ifft2(fft2(a)) == a`` + to within numerical accuracy. By default, the inverse transform is + computed over the last two axes of the input array. + + The input, analogously to `ifft`, should be ordered in the same way as is + returned by `fft2`, i.e. it should have the term for zero frequency + in the low-order corner of the two axes, the positive frequency terms in + the first half of these axes, the term for the Nyquist frequency in the + middle of the axes and the negative frequency terms in the second half of + both axes, in order of decreasingly negative frequency. + + Parameters + ---------- + a : array_like + Input array, can be complex. + s : sequence of ints, optional + Shape (length of each axis) of the output (``s[0]`` refers to axis 0, + ``s[1]`` to axis 1, etc.). This corresponds to `n` for ``ifft(x, n)``. + Along each axis, if the given shape is smaller than that of the input, + the input is cropped. If it is larger, the input is padded with zeros. + + .. versionchanged:: 2.0 + + If it is ``-1``, the whole input is used (no padding/trimming). + + If `s` is not given, the shape of the input along the axes specified + by `axes` is used. See notes for issue on `ifft` zero padding. + + .. deprecated:: 2.0 + + If `s` is not ``None``, `axes` must not be ``None`` either. + + .. deprecated:: 2.0 + + `s` must contain only ``int`` s, not ``None`` values. ``None`` + values currently mean that the default value for ``n`` is used + in the corresponding 1-D transform, but this behaviour is + deprecated. + + axes : sequence of ints, optional + Axes over which to compute the FFT. If not given, the last two + axes are used. A repeated index in `axes` means the transform over + that axis is performed multiple times. A one-element sequence means + that a one-dimensional FFT is performed. Default: ``(-2, -1)``. + + .. deprecated:: 2.0 + + If `s` is specified, the corresponding `axes` to be transformed + must not be ``None``. + + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see `numpy.fft`). Default is "backward". + Indicates which direction of the forward/backward pair of transforms + is scaled and with what normalization factor. + + .. versionadded:: 1.20.0 + + The "backward", "forward" values were added. + + out : complex ndarray, optional + If provided, the result will be placed in this array. It should be + of the appropriate shape and dtype for all axes (and hence is + incompatible with passing in all but the trivial ``s``). + + .. versionadded:: 2.0.0 + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axes + indicated by `axes`, or the last two axes if `axes` is not given. + + Raises + ------ + ValueError + If `s` and `axes` have different length, or `axes` not given and + ``len(s) != 2``. + IndexError + If an element of `axes` is larger than than the number of axes of `a`. + + See Also + -------- + numpy.fft : Overall view of discrete Fourier transforms, with definitions + and conventions used. + fft2 : The forward 2-dimensional FFT, of which `ifft2` is the inverse. + ifftn : The inverse of the *n*-dimensional FFT. + fft : The one-dimensional FFT. + ifft : The one-dimensional inverse FFT. + + Notes + ----- + `ifft2` is just `ifftn` with a different default for `axes`. + + See `ifftn` for details and a plotting example, and `numpy.fft` for + definition and conventions used. + + Zero-padding, analogously with `ifft`, is performed by appending zeros to + the input along the specified dimension. Although this is the common + approach, it might lead to surprising results. If another form of zero + padding is desired, it must be performed before `ifft2` is called. + + Examples + -------- + >>> import numpy as np + >>> a = 4 * np.eye(4) + >>> np.fft.ifft2(a) + array([[1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], # may vary + [0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j], + [0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j], + [0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]]) + + """ + return _raw_fftnd(a, s, axes, ifft, norm, out=None) + + +@array_function_dispatch(_fftn_dispatcher) +def rfftn(a, s=None, axes=None, norm=None, out=None): + """ + Compute the N-dimensional discrete Fourier Transform for real input. + + This function computes the N-dimensional discrete Fourier Transform over + any number of axes in an M-dimensional real array by means of the Fast + Fourier Transform (FFT). By default, all axes are transformed, with the + real transform performed over the last axis, while the remaining + transforms are complex. + + Parameters + ---------- + a : array_like + Input array, taken to be real. + s : sequence of ints, optional + Shape (length along each transformed axis) to use from the input. + (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). + The final element of `s` corresponds to `n` for ``rfft(x, n)``, while + for the remaining axes, it corresponds to `n` for ``fft(x, n)``. + Along any axis, if the given shape is smaller than that of the input, + the input is cropped. If it is larger, the input is padded with zeros. + + .. versionchanged:: 2.0 + + If it is ``-1``, the whole input is used (no padding/trimming). + + If `s` is not given, the shape of the input along the axes specified + by `axes` is used. + + .. deprecated:: 2.0 + + If `s` is not ``None``, `axes` must not be ``None`` either. + + .. deprecated:: 2.0 + + `s` must contain only ``int`` s, not ``None`` values. ``None`` + values currently mean that the default value for ``n`` is used + in the corresponding 1-D transform, but this behaviour is + deprecated. + + axes : sequence of ints, optional + Axes over which to compute the FFT. If not given, the last ``len(s)`` + axes are used, or all axes if `s` is also not specified. + + .. deprecated:: 2.0 + + If `s` is specified, the corresponding `axes` to be transformed + must be explicitly specified too. + + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see `numpy.fft`). Default is "backward". + Indicates which direction of the forward/backward pair of transforms + is scaled and with what normalization factor. + + .. versionadded:: 1.20.0 + + The "backward", "forward" values were added. + + out : complex ndarray, optional + If provided, the result will be placed in this array. It should be + of the appropriate shape and dtype for all axes (and hence is + incompatible with passing in all but the trivial ``s``). + + .. versionadded:: 2.0.0 + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axes + indicated by `axes`, or by a combination of `s` and `a`, + as explained in the parameters section above. + The length of the last axis transformed will be ``s[-1]//2+1``, + while the remaining transformed axes will have lengths according to + `s`, or unchanged from the input. + + Raises + ------ + ValueError + If `s` and `axes` have different length. + IndexError + If an element of `axes` is larger than than the number of axes of `a`. + + See Also + -------- + irfftn : The inverse of `rfftn`, i.e. the inverse of the n-dimensional FFT + of real input. + fft : The one-dimensional FFT, with definitions and conventions used. + rfft : The one-dimensional FFT of real input. + fftn : The n-dimensional FFT. + rfft2 : The two-dimensional FFT of real input. + + Notes + ----- + The transform for real input is performed over the last transformation + axis, as by `rfft`, then the transform over the remaining axes is + performed as by `fftn`. The order of the output is as for `rfft` for the + final transformation axis, and as for `fftn` for the remaining + transformation axes. + + See `fft` for details, definitions and conventions used. + + Examples + -------- + >>> import numpy as np + >>> a = np.ones((2, 2, 2)) + >>> np.fft.rfftn(a) + array([[[8.+0.j, 0.+0.j], # may vary + [0.+0.j, 0.+0.j]], + [[0.+0.j, 0.+0.j], + [0.+0.j, 0.+0.j]]]) + + >>> np.fft.rfftn(a, axes=(2, 0)) + array([[[4.+0.j, 0.+0.j], # may vary + [4.+0.j, 0.+0.j]], + [[0.+0.j, 0.+0.j], + [0.+0.j, 0.+0.j]]]) + + """ + a = asarray(a) + s, axes = _cook_nd_args(a, s, axes) + a = rfft(a, s[-1], axes[-1], norm, out=out) + for ii in range(len(axes) - 2, -1, -1): + a = fft(a, s[ii], axes[ii], norm, out=out) + return a + + +@array_function_dispatch(_fftn_dispatcher) +def rfft2(a, s=None, axes=(-2, -1), norm=None, out=None): + """ + Compute the 2-dimensional FFT of a real array. + + Parameters + ---------- + a : array + Input array, taken to be real. + s : sequence of ints, optional + Shape of the FFT. + + .. versionchanged:: 2.0 + + If it is ``-1``, the whole input is used (no padding/trimming). + + .. deprecated:: 2.0 + + If `s` is not ``None``, `axes` must not be ``None`` either. + + .. deprecated:: 2.0 + + `s` must contain only ``int`` s, not ``None`` values. ``None`` + values currently mean that the default value for ``n`` is used + in the corresponding 1-D transform, but this behaviour is + deprecated. + + axes : sequence of ints, optional + Axes over which to compute the FFT. Default: ``(-2, -1)``. + + .. deprecated:: 2.0 + + If `s` is specified, the corresponding `axes` to be transformed + must not be ``None``. + + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see `numpy.fft`). Default is "backward". + Indicates which direction of the forward/backward pair of transforms + is scaled and with what normalization factor. + + .. versionadded:: 1.20.0 + + The "backward", "forward" values were added. + + out : complex ndarray, optional + If provided, the result will be placed in this array. It should be + of the appropriate shape and dtype for the last inverse transform. + incompatible with passing in all but the trivial ``s``). + + .. versionadded:: 2.0.0 + + Returns + ------- + out : ndarray + The result of the real 2-D FFT. + + See Also + -------- + rfftn : Compute the N-dimensional discrete Fourier Transform for real + input. + + Notes + ----- + This is really just `rfftn` with different default behavior. + For more details see `rfftn`. + + Examples + -------- + >>> import numpy as np + >>> a = np.mgrid[:5, :5][0] + >>> np.fft.rfft2(a) + array([[ 50. +0.j , 0. +0.j , 0. +0.j ], + [-12.5+17.20477401j, 0. +0.j , 0. +0.j ], + [-12.5 +4.0614962j , 0. +0.j , 0. +0.j ], + [-12.5 -4.0614962j , 0. +0.j , 0. +0.j ], + [-12.5-17.20477401j, 0. +0.j , 0. +0.j ]]) + """ + return rfftn(a, s, axes, norm, out=out) + + +@array_function_dispatch(_fftn_dispatcher) +def irfftn(a, s=None, axes=None, norm=None, out=None): + """ + Computes the inverse of `rfftn`. + + This function computes the inverse of the N-dimensional discrete + Fourier Transform for real input over any number of axes in an + M-dimensional array by means of the Fast Fourier Transform (FFT). In + other words, ``irfftn(rfftn(a), a.shape) == a`` to within numerical + accuracy. (The ``a.shape`` is necessary like ``len(a)`` is for `irfft`, + and for the same reason.) + + The input should be ordered in the same way as is returned by `rfftn`, + i.e. as for `irfft` for the final transformation axis, and as for `ifftn` + along all the other axes. + + Parameters + ---------- + a : array_like + Input array. + s : sequence of ints, optional + Shape (length of each transformed axis) of the output + (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). `s` is also the + number of input points used along this axis, except for the last axis, + where ``s[-1]//2+1`` points of the input are used. + Along any axis, if the shape indicated by `s` is smaller than that of + the input, the input is cropped. If it is larger, the input is padded + with zeros. + + .. versionchanged:: 2.0 + + If it is ``-1``, the whole input is used (no padding/trimming). + + If `s` is not given, the shape of the input along the axes + specified by axes is used. Except for the last axis which is taken to + be ``2*(m-1)`` where ``m`` is the length of the input along that axis. + + .. deprecated:: 2.0 + + If `s` is not ``None``, `axes` must not be ``None`` either. + + .. deprecated:: 2.0 + + `s` must contain only ``int`` s, not ``None`` values. ``None`` + values currently mean that the default value for ``n`` is used + in the corresponding 1-D transform, but this behaviour is + deprecated. + + axes : sequence of ints, optional + Axes over which to compute the inverse FFT. If not given, the last + `len(s)` axes are used, or all axes if `s` is also not specified. + Repeated indices in `axes` means that the inverse transform over that + axis is performed multiple times. + + .. deprecated:: 2.0 + + If `s` is specified, the corresponding `axes` to be transformed + must be explicitly specified too. + + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see `numpy.fft`). Default is "backward". + Indicates which direction of the forward/backward pair of transforms + is scaled and with what normalization factor. + + .. versionadded:: 1.20.0 + + The "backward", "forward" values were added. + + out : ndarray, optional + If provided, the result will be placed in this array. It should be + of the appropriate shape and dtype for the last transformation. + + .. versionadded:: 2.0.0 + + Returns + ------- + out : ndarray + The truncated or zero-padded input, transformed along the axes + indicated by `axes`, or by a combination of `s` or `a`, + as explained in the parameters section above. + The length of each transformed axis is as given by the corresponding + element of `s`, or the length of the input in every axis except for the + last one if `s` is not given. In the final transformed axis the length + of the output when `s` is not given is ``2*(m-1)`` where ``m`` is the + length of the final transformed axis of the input. To get an odd + number of output points in the final axis, `s` must be specified. + + Raises + ------ + ValueError + If `s` and `axes` have different length. + IndexError + If an element of `axes` is larger than than the number of axes of `a`. + + See Also + -------- + rfftn : The forward n-dimensional FFT of real input, + of which `ifftn` is the inverse. + fft : The one-dimensional FFT, with definitions and conventions used. + irfft : The inverse of the one-dimensional FFT of real input. + irfft2 : The inverse of the two-dimensional FFT of real input. + + Notes + ----- + See `fft` for definitions and conventions used. + + See `rfft` for definitions and conventions used for real input. + + The correct interpretation of the hermitian input depends on the shape of + the original data, as given by `s`. This is because each input shape could + correspond to either an odd or even length signal. By default, `irfftn` + assumes an even output length which puts the last entry at the Nyquist + frequency; aliasing with its symmetric counterpart. When performing the + final complex to real transform, the last value is thus treated as purely + real. To avoid losing information, the correct shape of the real input + **must** be given. + + Examples + -------- + >>> import numpy as np + >>> a = np.zeros((3, 2, 2)) + >>> a[0, 0, 0] = 3 * 2 * 2 + >>> np.fft.irfftn(a) + array([[[1., 1.], + [1., 1.]], + [[1., 1.], + [1., 1.]], + [[1., 1.], + [1., 1.]]]) + + """ + a = asarray(a) + s, axes = _cook_nd_args(a, s, axes, invreal=1) + for ii in range(len(axes) - 1): + a = ifft(a, s[ii], axes[ii], norm) + a = irfft(a, s[-1], axes[-1], norm, out=out) + return a + + +@array_function_dispatch(_fftn_dispatcher) +def irfft2(a, s=None, axes=(-2, -1), norm=None, out=None): + """ + Computes the inverse of `rfft2`. + + Parameters + ---------- + a : array_like + The input array + s : sequence of ints, optional + Shape of the real output to the inverse FFT. + + .. versionchanged:: 2.0 + + If it is ``-1``, the whole input is used (no padding/trimming). + + .. deprecated:: 2.0 + + If `s` is not ``None``, `axes` must not be ``None`` either. + + .. deprecated:: 2.0 + + `s` must contain only ``int`` s, not ``None`` values. ``None`` + values currently mean that the default value for ``n`` is used + in the corresponding 1-D transform, but this behaviour is + deprecated. + + axes : sequence of ints, optional + The axes over which to compute the inverse fft. + Default: ``(-2, -1)``, the last two axes. + + .. deprecated:: 2.0 + + If `s` is specified, the corresponding `axes` to be transformed + must not be ``None``. + + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see `numpy.fft`). Default is "backward". + Indicates which direction of the forward/backward pair of transforms + is scaled and with what normalization factor. + + .. versionadded:: 1.20.0 + + The "backward", "forward" values were added. + + out : ndarray, optional + If provided, the result will be placed in this array. It should be + of the appropriate shape and dtype for the last transformation. + + .. versionadded:: 2.0.0 + + Returns + ------- + out : ndarray + The result of the inverse real 2-D FFT. + + See Also + -------- + rfft2 : The forward two-dimensional FFT of real input, + of which `irfft2` is the inverse. + rfft : The one-dimensional FFT for real input. + irfft : The inverse of the one-dimensional FFT of real input. + irfftn : Compute the inverse of the N-dimensional FFT of real input. + + Notes + ----- + This is really `irfftn` with different defaults. + For more details see `irfftn`. + + Examples + -------- + >>> import numpy as np + >>> a = np.mgrid[:5, :5][0] + >>> A = np.fft.rfft2(a) + >>> np.fft.irfft2(A, s=a.shape) + array([[0., 0., 0., 0., 0.], + [1., 1., 1., 1., 1.], + [2., 2., 2., 2., 2.], + [3., 3., 3., 3., 3.], + [4., 4., 4., 4., 4.]]) + """ + return irfftn(a, s, axes, norm, out=None) diff --git a/local_packages/numpy/fft/_pocketfft.pyi b/local_packages/numpy/fft/_pocketfft.pyi new file mode 100644 index 0000000..3234c64 --- /dev/null +++ b/local_packages/numpy/fft/_pocketfft.pyi @@ -0,0 +1,137 @@ +from collections.abc import Sequence +from typing import Literal as L, TypeAlias + +from numpy import complex128, float64 +from numpy._typing import ArrayLike, NDArray, _ArrayLikeNumber_co + +__all__ = [ + "fft", + "ifft", + "rfft", + "irfft", + "hfft", + "ihfft", + "rfftn", + "irfftn", + "rfft2", + "irfft2", + "fft2", + "ifft2", + "fftn", + "ifftn", +] + +_NormKind: TypeAlias = L["backward", "ortho", "forward"] | None + +def fft( + a: ArrayLike, + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: NDArray[complex128] | None = None, +) -> NDArray[complex128]: ... + +def ifft( + a: ArrayLike, + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: NDArray[complex128] | None = None, +) -> NDArray[complex128]: ... + +def rfft( + a: ArrayLike, + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: NDArray[complex128] | None = None, +) -> NDArray[complex128]: ... + +def irfft( + a: ArrayLike, + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: NDArray[float64] | None = None, +) -> NDArray[float64]: ... + +# Input array must be compatible with `np.conjugate` +def hfft( + a: _ArrayLikeNumber_co, + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: NDArray[float64] | None = None, +) -> NDArray[float64]: ... + +def ihfft( + a: ArrayLike, + n: int | None = None, + axis: int = -1, + norm: _NormKind = None, + out: NDArray[complex128] | None = None, +) -> NDArray[complex128]: ... + +def fftn( + a: ArrayLike, + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: NDArray[complex128] | None = None, +) -> NDArray[complex128]: ... + +def ifftn( + a: ArrayLike, + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: NDArray[complex128] | None = None, +) -> NDArray[complex128]: ... + +def rfftn( + a: ArrayLike, + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: NDArray[complex128] | None = None, +) -> NDArray[complex128]: ... + +def irfftn( + a: ArrayLike, + s: Sequence[int] | None = None, + axes: Sequence[int] | None = None, + norm: _NormKind = None, + out: NDArray[float64] | None = None, +) -> NDArray[float64]: ... + +def fft2( + a: ArrayLike, + s: Sequence[int] | None = None, + axes: Sequence[int] | None = (-2, -1), + norm: _NormKind = None, + out: NDArray[complex128] | None = None, +) -> NDArray[complex128]: ... + +def ifft2( + a: ArrayLike, + s: Sequence[int] | None = None, + axes: Sequence[int] | None = (-2, -1), + norm: _NormKind = None, + out: NDArray[complex128] | None = None, +) -> NDArray[complex128]: ... + +def rfft2( + a: ArrayLike, + s: Sequence[int] | None = None, + axes: Sequence[int] | None = (-2, -1), + norm: _NormKind = None, + out: NDArray[complex128] | None = None, +) -> NDArray[complex128]: ... + +def irfft2( + a: ArrayLike, + s: Sequence[int] | None = None, + axes: Sequence[int] | None = (-2, -1), + norm: _NormKind = None, + out: NDArray[float64] | None = None, +) -> NDArray[float64]: ... diff --git a/local_packages/numpy/fft/_pocketfft_umath.cp312-win_amd64.lib b/local_packages/numpy/fft/_pocketfft_umath.cp312-win_amd64.lib new file mode 100644 index 0000000..bed5b38 Binary files /dev/null and b/local_packages/numpy/fft/_pocketfft_umath.cp312-win_amd64.lib differ diff --git a/local_packages/numpy/fft/_pocketfft_umath.cp312-win_amd64.pyd b/local_packages/numpy/fft/_pocketfft_umath.cp312-win_amd64.pyd new file mode 100644 index 0000000..abbd409 Binary files /dev/null and b/local_packages/numpy/fft/_pocketfft_umath.cp312-win_amd64.pyd differ diff --git a/local_packages/numpy/fft/tests/__init__.py b/local_packages/numpy/fft/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/local_packages/numpy/fft/tests/test_helper.py b/local_packages/numpy/fft/tests/test_helper.py new file mode 100644 index 0000000..c02a736 --- /dev/null +++ b/local_packages/numpy/fft/tests/test_helper.py @@ -0,0 +1,167 @@ +"""Test functions for fftpack.helper module + +Copied from fftpack.helper by Pearu Peterson, October 2005 + +""" +import numpy as np +from numpy import fft, pi +from numpy.testing import assert_array_almost_equal + + +class TestFFTShift: + + def test_definition(self): + x = [0, 1, 2, 3, 4, -4, -3, -2, -1] + y = [-4, -3, -2, -1, 0, 1, 2, 3, 4] + assert_array_almost_equal(fft.fftshift(x), y) + assert_array_almost_equal(fft.ifftshift(y), x) + x = [0, 1, 2, 3, 4, -5, -4, -3, -2, -1] + y = [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4] + assert_array_almost_equal(fft.fftshift(x), y) + assert_array_almost_equal(fft.ifftshift(y), x) + + def test_inverse(self): + for n in [1, 4, 9, 100, 211]: + x = np.random.random((n,)) + assert_array_almost_equal(fft.ifftshift(fft.fftshift(x)), x) + + def test_axes_keyword(self): + freqs = [[0, 1, 2], [3, 4, -4], [-3, -2, -1]] + shifted = [[-1, -3, -2], [2, 0, 1], [-4, 3, 4]] + assert_array_almost_equal(fft.fftshift(freqs, axes=(0, 1)), shifted) + assert_array_almost_equal(fft.fftshift(freqs, axes=0), + fft.fftshift(freqs, axes=(0,))) + assert_array_almost_equal(fft.ifftshift(shifted, axes=(0, 1)), freqs) + assert_array_almost_equal(fft.ifftshift(shifted, axes=0), + fft.ifftshift(shifted, axes=(0,))) + + assert_array_almost_equal(fft.fftshift(freqs), shifted) + assert_array_almost_equal(fft.ifftshift(shifted), freqs) + + def test_uneven_dims(self): + """ Test 2D input, which has uneven dimension sizes """ + freqs = [ + [0, 1], + [2, 3], + [4, 5] + ] + + # shift in dimension 0 + shift_dim0 = [ + [4, 5], + [0, 1], + [2, 3] + ] + assert_array_almost_equal(fft.fftshift(freqs, axes=0), shift_dim0) + assert_array_almost_equal(fft.ifftshift(shift_dim0, axes=0), freqs) + assert_array_almost_equal(fft.fftshift(freqs, axes=(0,)), shift_dim0) + assert_array_almost_equal(fft.ifftshift(shift_dim0, axes=[0]), freqs) + + # shift in dimension 1 + shift_dim1 = [ + [1, 0], + [3, 2], + [5, 4] + ] + assert_array_almost_equal(fft.fftshift(freqs, axes=1), shift_dim1) + assert_array_almost_equal(fft.ifftshift(shift_dim1, axes=1), freqs) + + # shift in both dimensions + shift_dim_both = [ + [5, 4], + [1, 0], + [3, 2] + ] + assert_array_almost_equal(fft.fftshift(freqs, axes=(0, 1)), shift_dim_both) + assert_array_almost_equal(fft.ifftshift(shift_dim_both, axes=(0, 1)), freqs) + assert_array_almost_equal(fft.fftshift(freqs, axes=[0, 1]), shift_dim_both) + assert_array_almost_equal(fft.ifftshift(shift_dim_both, axes=[0, 1]), freqs) + + # axes=None (default) shift in all dimensions + assert_array_almost_equal(fft.fftshift(freqs, axes=None), shift_dim_both) + assert_array_almost_equal(fft.ifftshift(shift_dim_both, axes=None), freqs) + assert_array_almost_equal(fft.fftshift(freqs), shift_dim_both) + assert_array_almost_equal(fft.ifftshift(shift_dim_both), freqs) + + def test_equal_to_original(self): + """ Test the new (>=v1.15) and old implementations are equal (see #10073) """ + from numpy._core import arange, asarray, concatenate, take + + def original_fftshift(x, axes=None): + """ How fftshift was implemented in v1.14""" + tmp = asarray(x) + ndim = tmp.ndim + if axes is None: + axes = list(range(ndim)) + elif isinstance(axes, int): + axes = (axes,) + y = tmp + for k in axes: + n = tmp.shape[k] + p2 = (n + 1) // 2 + mylist = concatenate((arange(p2, n), arange(p2))) + y = take(y, mylist, k) + return y + + def original_ifftshift(x, axes=None): + """ How ifftshift was implemented in v1.14 """ + tmp = asarray(x) + ndim = tmp.ndim + if axes is None: + axes = list(range(ndim)) + elif isinstance(axes, int): + axes = (axes,) + y = tmp + for k in axes: + n = tmp.shape[k] + p2 = n - (n + 1) // 2 + mylist = concatenate((arange(p2, n), arange(p2))) + y = take(y, mylist, k) + return y + + # create possible 2d array combinations and try all possible keywords + # compare output to original functions + for i in range(16): + for j in range(16): + for axes_keyword in [0, 1, None, (0,), (0, 1)]: + inp = np.random.rand(i, j) + + assert_array_almost_equal(fft.fftshift(inp, axes_keyword), + original_fftshift(inp, axes_keyword)) + + assert_array_almost_equal(fft.ifftshift(inp, axes_keyword), + original_ifftshift(inp, axes_keyword)) + + +class TestFFTFreq: + + def test_definition(self): + x = [0, 1, 2, 3, 4, -4, -3, -2, -1] + assert_array_almost_equal(9 * fft.fftfreq(9), x) + assert_array_almost_equal(9 * pi * fft.fftfreq(9, pi), x) + x = [0, 1, 2, 3, 4, -5, -4, -3, -2, -1] + assert_array_almost_equal(10 * fft.fftfreq(10), x) + assert_array_almost_equal(10 * pi * fft.fftfreq(10, pi), x) + + +class TestRFFTFreq: + + def test_definition(self): + x = [0, 1, 2, 3, 4] + assert_array_almost_equal(9 * fft.rfftfreq(9), x) + assert_array_almost_equal(9 * pi * fft.rfftfreq(9, pi), x) + x = [0, 1, 2, 3, 4, 5] + assert_array_almost_equal(10 * fft.rfftfreq(10), x) + assert_array_almost_equal(10 * pi * fft.rfftfreq(10, pi), x) + + +class TestIRFFTN: + + def test_not_last_axis_success(self): + ar, ai = np.random.random((2, 16, 8, 32)) + a = ar + 1j * ai + + axes = (-2,) + + # Should not raise error + fft.irfftn(a, axes=axes) diff --git a/local_packages/numpy/fft/tests/test_pocketfft.py b/local_packages/numpy/fft/tests/test_pocketfft.py new file mode 100644 index 0000000..6f26ab6 --- /dev/null +++ b/local_packages/numpy/fft/tests/test_pocketfft.py @@ -0,0 +1,589 @@ +import queue +import threading + +import pytest + +import numpy as np +from numpy.random import random +from numpy.testing import IS_WASM, assert_allclose, assert_array_equal, assert_raises + + +def fft1(x): + L = len(x) + phase = -2j * np.pi * (np.arange(L) / L) + phase = np.arange(L).reshape(-1, 1) * phase + return np.sum(x * np.exp(phase), axis=1) + + +class TestFFTShift: + + def test_fft_n(self): + assert_raises(ValueError, np.fft.fft, [1, 2, 3], 0) + + +class TestFFT1D: + + def test_identity(self): + maxlen = 512 + x = random(maxlen) + 1j * random(maxlen) + xr = random(maxlen) + for i in range(1, maxlen): + assert_allclose(np.fft.ifft(np.fft.fft(x[0:i])), x[0:i], + atol=1e-12) + assert_allclose(np.fft.irfft(np.fft.rfft(xr[0:i]), i), + xr[0:i], atol=1e-12) + + @pytest.mark.parametrize("dtype", [np.single, np.double, np.longdouble]) + def test_identity_long_short(self, dtype): + # Test with explicitly given number of points, both for n + # smaller and for n larger than the input size. + maxlen = 16 + atol = 5 * np.spacing(np.array(1., dtype=dtype)) + x = random(maxlen).astype(dtype) + 1j * random(maxlen).astype(dtype) + xx = np.concatenate([x, np.zeros_like(x)]) + xr = random(maxlen).astype(dtype) + xxr = np.concatenate([xr, np.zeros_like(xr)]) + for i in range(1, maxlen * 2): + check_c = np.fft.ifft(np.fft.fft(x, n=i), n=i) + assert check_c.real.dtype == dtype + assert_allclose(check_c, xx[0:i], atol=atol, rtol=0) + check_r = np.fft.irfft(np.fft.rfft(xr, n=i), n=i) + assert check_r.dtype == dtype + assert_allclose(check_r, xxr[0:i], atol=atol, rtol=0) + + @pytest.mark.parametrize("dtype", [np.single, np.double, np.longdouble]) + def test_identity_long_short_reversed(self, dtype): + # Also test explicitly given number of points in reversed order. + maxlen = 16 + atol = 6 * np.spacing(np.array(1., dtype=dtype)) + x = random(maxlen).astype(dtype) + 1j * random(maxlen).astype(dtype) + xx = np.concatenate([x, np.zeros_like(x)]) + for i in range(1, maxlen * 2): + check_via_c = np.fft.fft(np.fft.ifft(x, n=i), n=i) + assert check_via_c.dtype == x.dtype + assert_allclose(check_via_c, xx[0:i], atol=atol, rtol=0) + # For irfft, we can neither recover the imaginary part of + # the first element, nor the imaginary part of the last + # element if npts is even. So, set to 0 for the comparison. + y = x.copy() + n = i // 2 + 1 + y.imag[0] = 0 + if i % 2 == 0: + y.imag[n - 1:] = 0 + yy = np.concatenate([y, np.zeros_like(y)]) + check_via_r = np.fft.rfft(np.fft.irfft(x, n=i), n=i) + assert check_via_r.dtype == x.dtype + assert_allclose(check_via_r, yy[0:n], atol=atol, rtol=0) + + def test_fft(self): + x = random(30) + 1j * random(30) + assert_allclose(fft1(x), np.fft.fft(x), atol=1e-6) + assert_allclose(fft1(x), np.fft.fft(x, norm="backward"), atol=1e-6) + assert_allclose(fft1(x) / np.sqrt(30), + np.fft.fft(x, norm="ortho"), atol=1e-6) + assert_allclose(fft1(x) / 30., + np.fft.fft(x, norm="forward"), atol=1e-6) + + @pytest.mark.parametrize("axis", (0, 1)) + @pytest.mark.parametrize("dtype", (complex, float)) + @pytest.mark.parametrize("transpose", (True, False)) + def test_fft_out_argument(self, dtype, transpose, axis): + def zeros_like(x): + if transpose: + return np.zeros_like(x.T).T + else: + return np.zeros_like(x) + + # tests below only test the out parameter + if dtype is complex: + y = random((10, 20)) + 1j * random((10, 20)) + fft, ifft = np.fft.fft, np.fft.ifft + else: + y = random((10, 20)) + fft, ifft = np.fft.rfft, np.fft.irfft + + expected = fft(y, axis=axis) + out = zeros_like(expected) + result = fft(y, out=out, axis=axis) + assert result is out + assert_array_equal(result, expected) + + expected2 = ifft(expected, axis=axis) + out2 = out if dtype is complex else zeros_like(expected2) + result2 = ifft(out, out=out2, axis=axis) + assert result2 is out2 + assert_array_equal(result2, expected2) + + @pytest.mark.parametrize("axis", [0, 1]) + def test_fft_inplace_out(self, axis): + # Test some weirder in-place combinations + y = random((20, 20)) + 1j * random((20, 20)) + # Fully in-place. + y1 = y.copy() + expected1 = np.fft.fft(y1, axis=axis) + result1 = np.fft.fft(y1, axis=axis, out=y1) + assert result1 is y1 + assert_array_equal(result1, expected1) + # In-place of part of the array; rest should be unchanged. + y2 = y.copy() + out2 = y2[:10] if axis == 0 else y2[:, :10] + expected2 = np.fft.fft(y2, n=10, axis=axis) + result2 = np.fft.fft(y2, n=10, axis=axis, out=out2) + assert result2 is out2 + assert_array_equal(result2, expected2) + if axis == 0: + assert_array_equal(y2[10:], y[10:]) + else: + assert_array_equal(y2[:, 10:], y[:, 10:]) + # In-place of another part of the array. + y3 = y.copy() + y3_sel = y3[5:] if axis == 0 else y3[:, 5:] + out3 = y3[5:15] if axis == 0 else y3[:, 5:15] + expected3 = np.fft.fft(y3_sel, n=10, axis=axis) + result3 = np.fft.fft(y3_sel, n=10, axis=axis, out=out3) + assert result3 is out3 + assert_array_equal(result3, expected3) + if axis == 0: + assert_array_equal(y3[:5], y[:5]) + assert_array_equal(y3[15:], y[15:]) + else: + assert_array_equal(y3[:, :5], y[:, :5]) + assert_array_equal(y3[:, 15:], y[:, 15:]) + # In-place with n > nin; rest should be unchanged. + y4 = y.copy() + y4_sel = y4[:10] if axis == 0 else y4[:, :10] + out4 = y4[:15] if axis == 0 else y4[:, :15] + expected4 = np.fft.fft(y4_sel, n=15, axis=axis) + result4 = np.fft.fft(y4_sel, n=15, axis=axis, out=out4) + assert result4 is out4 + assert_array_equal(result4, expected4) + if axis == 0: + assert_array_equal(y4[15:], y[15:]) + else: + assert_array_equal(y4[:, 15:], y[:, 15:]) + # Overwrite in a transpose. + y5 = y.copy() + out5 = y5.T + result5 = np.fft.fft(y5, axis=axis, out=out5) + assert result5 is out5 + assert_array_equal(result5, expected1) + # Reverse strides. + y6 = y.copy() + out6 = y6[::-1] if axis == 0 else y6[:, ::-1] + result6 = np.fft.fft(y6, axis=axis, out=out6) + assert result6 is out6 + assert_array_equal(result6, expected1) + + def test_fft_bad_out(self): + x = np.arange(30.) + with pytest.raises(TypeError, match="must be of ArrayType"): + np.fft.fft(x, out="") + with pytest.raises(ValueError, match="has wrong shape"): + np.fft.fft(x, out=np.zeros_like(x).reshape(5, -1)) + with pytest.raises(TypeError, match="Cannot cast"): + np.fft.fft(x, out=np.zeros_like(x, dtype=float)) + + @pytest.mark.parametrize('norm', (None, 'backward', 'ortho', 'forward')) + def test_ifft(self, norm): + x = random(30) + 1j * random(30) + assert_allclose( + x, np.fft.ifft(np.fft.fft(x, norm=norm), norm=norm), + atol=1e-6) + # Ensure we get the correct error message + with pytest.raises(ValueError, + match='Invalid number of FFT data points'): + np.fft.ifft([], norm=norm) + + def test_fft2(self): + x = random((30, 20)) + 1j * random((30, 20)) + assert_allclose(np.fft.fft(np.fft.fft(x, axis=1), axis=0), + np.fft.fft2(x), atol=1e-6) + assert_allclose(np.fft.fft2(x), + np.fft.fft2(x, norm="backward"), atol=1e-6) + assert_allclose(np.fft.fft2(x) / np.sqrt(30 * 20), + np.fft.fft2(x, norm="ortho"), atol=1e-6) + assert_allclose(np.fft.fft2(x) / (30. * 20.), + np.fft.fft2(x, norm="forward"), atol=1e-6) + + def test_ifft2(self): + x = random((30, 20)) + 1j * random((30, 20)) + assert_allclose(np.fft.ifft(np.fft.ifft(x, axis=1), axis=0), + np.fft.ifft2(x), atol=1e-6) + assert_allclose(np.fft.ifft2(x), + np.fft.ifft2(x, norm="backward"), atol=1e-6) + assert_allclose(np.fft.ifft2(x) * np.sqrt(30 * 20), + np.fft.ifft2(x, norm="ortho"), atol=1e-6) + assert_allclose(np.fft.ifft2(x) * (30. * 20.), + np.fft.ifft2(x, norm="forward"), atol=1e-6) + + def test_fftn(self): + x = random((30, 20, 10)) + 1j * random((30, 20, 10)) + assert_allclose( + np.fft.fft(np.fft.fft(np.fft.fft(x, axis=2), axis=1), axis=0), + np.fft.fftn(x), atol=1e-6) + assert_allclose(np.fft.fftn(x), + np.fft.fftn(x, norm="backward"), atol=1e-6) + assert_allclose(np.fft.fftn(x) / np.sqrt(30 * 20 * 10), + np.fft.fftn(x, norm="ortho"), atol=1e-6) + assert_allclose(np.fft.fftn(x) / (30. * 20. * 10.), + np.fft.fftn(x, norm="forward"), atol=1e-6) + + def test_ifftn(self): + x = random((30, 20, 10)) + 1j * random((30, 20, 10)) + assert_allclose( + np.fft.ifft(np.fft.ifft(np.fft.ifft(x, axis=2), axis=1), axis=0), + np.fft.ifftn(x), atol=1e-6) + assert_allclose(np.fft.ifftn(x), + np.fft.ifftn(x, norm="backward"), atol=1e-6) + assert_allclose(np.fft.ifftn(x) * np.sqrt(30 * 20 * 10), + np.fft.ifftn(x, norm="ortho"), atol=1e-6) + assert_allclose(np.fft.ifftn(x) * (30. * 20. * 10.), + np.fft.ifftn(x, norm="forward"), atol=1e-6) + + def test_rfft(self): + x = random(30) + for n in [x.size, 2 * x.size]: + for norm in [None, 'backward', 'ortho', 'forward']: + assert_allclose( + np.fft.fft(x, n=n, norm=norm)[:(n // 2 + 1)], + np.fft.rfft(x, n=n, norm=norm), atol=1e-6) + assert_allclose( + np.fft.rfft(x, n=n), + np.fft.rfft(x, n=n, norm="backward"), atol=1e-6) + assert_allclose( + np.fft.rfft(x, n=n) / np.sqrt(n), + np.fft.rfft(x, n=n, norm="ortho"), atol=1e-6) + assert_allclose( + np.fft.rfft(x, n=n) / n, + np.fft.rfft(x, n=n, norm="forward"), atol=1e-6) + + def test_rfft_even(self): + x = np.arange(8) + n = 4 + y = np.fft.rfft(x, n) + assert_allclose(y, np.fft.fft(x[:n])[:n // 2 + 1], rtol=1e-14) + + def test_rfft_odd(self): + x = np.array([1, 0, 2, 3, -3]) + y = np.fft.rfft(x) + assert_allclose(y, np.fft.fft(x)[:3], rtol=1e-14) + + def test_irfft(self): + x = random(30) + assert_allclose(x, np.fft.irfft(np.fft.rfft(x)), atol=1e-6) + assert_allclose(x, np.fft.irfft(np.fft.rfft(x, norm="backward"), + norm="backward"), atol=1e-6) + assert_allclose(x, np.fft.irfft(np.fft.rfft(x, norm="ortho"), + norm="ortho"), atol=1e-6) + assert_allclose(x, np.fft.irfft(np.fft.rfft(x, norm="forward"), + norm="forward"), atol=1e-6) + + def test_rfft2(self): + x = random((30, 20)) + assert_allclose(np.fft.fft2(x)[:, :11], np.fft.rfft2(x), atol=1e-6) + assert_allclose(np.fft.rfft2(x), + np.fft.rfft2(x, norm="backward"), atol=1e-6) + assert_allclose(np.fft.rfft2(x) / np.sqrt(30 * 20), + np.fft.rfft2(x, norm="ortho"), atol=1e-6) + assert_allclose(np.fft.rfft2(x) / (30. * 20.), + np.fft.rfft2(x, norm="forward"), atol=1e-6) + + def test_irfft2(self): + x = random((30, 20)) + assert_allclose(x, np.fft.irfft2(np.fft.rfft2(x)), atol=1e-6) + assert_allclose(x, np.fft.irfft2(np.fft.rfft2(x, norm="backward"), + norm="backward"), atol=1e-6) + assert_allclose(x, np.fft.irfft2(np.fft.rfft2(x, norm="ortho"), + norm="ortho"), atol=1e-6) + assert_allclose(x, np.fft.irfft2(np.fft.rfft2(x, norm="forward"), + norm="forward"), atol=1e-6) + + def test_rfftn(self): + x = random((30, 20, 10)) + assert_allclose(np.fft.fftn(x)[:, :, :6], np.fft.rfftn(x), atol=1e-6) + assert_allclose(np.fft.rfftn(x), + np.fft.rfftn(x, norm="backward"), atol=1e-6) + assert_allclose(np.fft.rfftn(x) / np.sqrt(30 * 20 * 10), + np.fft.rfftn(x, norm="ortho"), atol=1e-6) + assert_allclose(np.fft.rfftn(x) / (30. * 20. * 10.), + np.fft.rfftn(x, norm="forward"), atol=1e-6) + # Regression test for gh-27159 + x = np.ones((2, 3)) + result = np.fft.rfftn(x, axes=(0, 0, 1), s=(10, 20, 40)) + assert result.shape == (10, 21) + expected = np.fft.fft(np.fft.fft(np.fft.rfft(x, axis=1, n=40), + axis=0, n=20), axis=0, n=10) + assert expected.shape == (10, 21) + assert_allclose(result, expected, atol=1e-6) + + def test_irfftn(self): + x = random((30, 20, 10)) + assert_allclose(x, np.fft.irfftn(np.fft.rfftn(x)), atol=1e-6) + assert_allclose(x, np.fft.irfftn(np.fft.rfftn(x, norm="backward"), + norm="backward"), atol=1e-6) + assert_allclose(x, np.fft.irfftn(np.fft.rfftn(x, norm="ortho"), + norm="ortho"), atol=1e-6) + assert_allclose(x, np.fft.irfftn(np.fft.rfftn(x, norm="forward"), + norm="forward"), atol=1e-6) + + def test_hfft(self): + x = random(14) + 1j * random(14) + x_herm = np.concatenate((random(1), x, random(1))) + x = np.concatenate((x_herm, x[::-1].conj())) + assert_allclose(np.fft.fft(x), np.fft.hfft(x_herm), atol=1e-6) + assert_allclose(np.fft.hfft(x_herm), + np.fft.hfft(x_herm, norm="backward"), atol=1e-6) + assert_allclose(np.fft.hfft(x_herm) / np.sqrt(30), + np.fft.hfft(x_herm, norm="ortho"), atol=1e-6) + assert_allclose(np.fft.hfft(x_herm) / 30., + np.fft.hfft(x_herm, norm="forward"), atol=1e-6) + + def test_ihfft(self): + x = random(14) + 1j * random(14) + x_herm = np.concatenate((random(1), x, random(1))) + x = np.concatenate((x_herm, x[::-1].conj())) + assert_allclose(x_herm, np.fft.ihfft(np.fft.hfft(x_herm)), atol=1e-6) + assert_allclose(x_herm, np.fft.ihfft(np.fft.hfft(x_herm, + norm="backward"), norm="backward"), atol=1e-6) + assert_allclose(x_herm, np.fft.ihfft(np.fft.hfft(x_herm, + norm="ortho"), norm="ortho"), atol=1e-6) + assert_allclose(x_herm, np.fft.ihfft(np.fft.hfft(x_herm, + norm="forward"), norm="forward"), atol=1e-6) + + @pytest.mark.parametrize("op", [np.fft.fftn, np.fft.ifftn, + np.fft.rfftn, np.fft.irfftn]) + def test_axes(self, op): + x = random((30, 20, 10)) + axes = [(0, 1, 2), (0, 2, 1), (1, 0, 2), (1, 2, 0), (2, 0, 1), (2, 1, 0)] + for a in axes: + op_tr = op(np.transpose(x, a)) + tr_op = np.transpose(op(x, axes=a), a) + assert_allclose(op_tr, tr_op, atol=1e-6) + + @pytest.mark.parametrize("op", [np.fft.fftn, np.fft.ifftn, + np.fft.fft2, np.fft.ifft2]) + def test_s_negative_1(self, op): + x = np.arange(100).reshape(10, 10) + # should use the whole input array along the first axis + assert op(x, s=(-1, 5), axes=(0, 1)).shape == (10, 5) + + @pytest.mark.parametrize("op", [np.fft.fftn, np.fft.ifftn, + np.fft.rfftn, np.fft.irfftn]) + def test_s_axes_none(self, op): + x = np.arange(100).reshape(10, 10) + with pytest.warns(match='`axes` should not be `None` if `s`'): + op(x, s=(-1, 5)) + + @pytest.mark.parametrize("op", [np.fft.fft2, np.fft.ifft2]) + def test_s_axes_none_2D(self, op): + x = np.arange(100).reshape(10, 10) + with pytest.warns(match='`axes` should not be `None` if `s`'): + op(x, s=(-1, 5), axes=None) + + @pytest.mark.parametrize("op", [np.fft.fftn, np.fft.ifftn, + np.fft.rfftn, np.fft.irfftn, + np.fft.fft2, np.fft.ifft2]) + def test_s_contains_none(self, op): + x = random((30, 20, 10)) + with pytest.warns(match='array containing `None` values to `s`'): + op(x, s=(10, None, 10), axes=(0, 1, 2)) + + def test_all_1d_norm_preserving(self): + # verify that round-trip transforms are norm-preserving + x = random(30) + x_norm = np.linalg.norm(x) + n = x.size * 2 + func_pairs = [(np.fft.fft, np.fft.ifft), + (np.fft.rfft, np.fft.irfft), + # hfft: order so the first function takes x.size samples + # (necessary for comparison to x_norm above) + (np.fft.ihfft, np.fft.hfft), + ] + for forw, back in func_pairs: + for n in [x.size, 2 * x.size]: + for norm in [None, 'backward', 'ortho', 'forward']: + tmp = forw(x, n=n, norm=norm) + tmp = back(tmp, n=n, norm=norm) + assert_allclose(x_norm, + np.linalg.norm(tmp), atol=1e-6) + + @pytest.mark.parametrize("axes", [(0, 1), (0, 2), None]) + @pytest.mark.parametrize("dtype", (complex, float)) + @pytest.mark.parametrize("transpose", (True, False)) + def test_fftn_out_argument(self, dtype, transpose, axes): + def zeros_like(x): + if transpose: + return np.zeros_like(x.T).T + else: + return np.zeros_like(x) + + # tests below only test the out parameter + if dtype is complex: + x = random((10, 5, 6)) + 1j * random((10, 5, 6)) + fft, ifft = np.fft.fftn, np.fft.ifftn + else: + x = random((10, 5, 6)) + fft, ifft = np.fft.rfftn, np.fft.irfftn + + expected = fft(x, axes=axes) + out = zeros_like(expected) + result = fft(x, out=out, axes=axes) + assert result is out + assert_array_equal(result, expected) + + expected2 = ifft(expected, axes=axes) + out2 = out if dtype is complex else zeros_like(expected2) + result2 = ifft(out, out=out2, axes=axes) + assert result2 is out2 + assert_array_equal(result2, expected2) + + @pytest.mark.parametrize("fft", [np.fft.fftn, np.fft.ifftn, np.fft.rfftn]) + def test_fftn_out_and_s_interaction(self, fft): + # With s, shape varies, so generally one cannot pass in out. + if fft is np.fft.rfftn: + x = random((10, 5, 6)) + else: + x = random((10, 5, 6)) + 1j * random((10, 5, 6)) + with pytest.raises(ValueError, match="has wrong shape"): + fft(x, out=np.zeros_like(x), s=(3, 3, 3), axes=(0, 1, 2)) + # Except on the first axis done (which is the last of axes). + s = (10, 5, 5) + expected = fft(x, s=s, axes=(0, 1, 2)) + out = np.zeros_like(expected) + result = fft(x, s=s, axes=(0, 1, 2), out=out) + assert result is out + assert_array_equal(result, expected) + + @pytest.mark.parametrize("s", [(9, 5, 5), (3, 3, 3)]) + def test_irfftn_out_and_s_interaction(self, s): + # Since for irfftn, the output is real and thus cannot be used for + # intermediate steps, it should always work. + x = random((9, 5, 6, 2)) + 1j * random((9, 5, 6, 2)) + expected = np.fft.irfftn(x, s=s, axes=(0, 1, 2)) + out = np.zeros_like(expected) + result = np.fft.irfftn(x, s=s, axes=(0, 1, 2), out=out) + assert result is out + assert_array_equal(result, expected) + + +@pytest.mark.parametrize( + "dtype", + [np.float32, np.float64, np.complex64, np.complex128]) +@pytest.mark.parametrize("order", ["F", 'non-contiguous']) +@pytest.mark.parametrize( + "fft", + [np.fft.fft, np.fft.fft2, np.fft.fftn, + np.fft.ifft, np.fft.ifft2, np.fft.ifftn]) +def test_fft_with_order(dtype, order, fft): + # Check that FFT/IFFT produces identical results for C, Fortran and + # non contiguous arrays + rng = np.random.RandomState(42) + X = rng.rand(8, 7, 13).astype(dtype, copy=False) + # See discussion in pull/14178 + _tol = 8.0 * np.sqrt(np.log2(X.size)) * np.finfo(X.dtype).eps + if order == 'F': + Y = np.asfortranarray(X) + else: + # Make a non contiguous array + Y = X[::-1] + X = np.ascontiguousarray(X[::-1]) + + if fft.__name__.endswith('fft'): + for axis in range(3): + X_res = fft(X, axis=axis) + Y_res = fft(Y, axis=axis) + assert_allclose(X_res, Y_res, atol=_tol, rtol=_tol) + elif fft.__name__.endswith(('fft2', 'fftn')): + axes = [(0, 1), (1, 2), (0, 2)] + if fft.__name__.endswith('fftn'): + axes.extend([(0,), (1,), (2,), None]) + for ax in axes: + X_res = fft(X, axes=ax) + Y_res = fft(Y, axes=ax) + assert_allclose(X_res, Y_res, atol=_tol, rtol=_tol) + else: + raise ValueError + + +@pytest.mark.parametrize("order", ["F", "C"]) +@pytest.mark.parametrize("n", [None, 7, 12]) +def test_fft_output_order(order, n): + rng = np.random.RandomState(42) + x = rng.rand(10) + x = np.asarray(x, dtype=np.complex64, order=order) + res = np.fft.fft(x, n=n) + assert res.flags.c_contiguous == x.flags.c_contiguous + assert res.flags.f_contiguous == x.flags.f_contiguous + +@pytest.mark.skipif(IS_WASM, reason="Cannot start thread") +class TestFFTThreadSafe: + threads = 16 + input_shape = (800, 200) + + def _test_mtsame(self, func, *args): + def worker(args, q): + q.put(func(*args)) + + q = queue.Queue() + expected = func(*args) + + # Spin off a bunch of threads to call the same function simultaneously + t = [threading.Thread(target=worker, args=(args, q)) + for i in range(self.threads)] + [x.start() for x in t] + + [x.join() for x in t] + # Make sure all threads returned the correct value + for i in range(self.threads): + assert_array_equal(q.get(timeout=5), expected, + 'Function returned wrong value in multithreaded context') + + def test_fft(self): + a = np.ones(self.input_shape) * 1 + 0j + self._test_mtsame(np.fft.fft, a) + + def test_ifft(self): + a = np.ones(self.input_shape) * 1 + 0j + self._test_mtsame(np.fft.ifft, a) + + def test_rfft(self): + a = np.ones(self.input_shape) + self._test_mtsame(np.fft.rfft, a) + + def test_irfft(self): + a = np.ones(self.input_shape) * 1 + 0j + self._test_mtsame(np.fft.irfft, a) + + +def test_irfft_with_n_1_regression(): + # Regression test for gh-25661 + x = np.arange(10) + np.fft.irfft(x, n=1) + np.fft.hfft(x, n=1) + np.fft.irfft(np.array([0], complex), n=10) + + +def test_irfft_with_n_large_regression(): + # Regression test for gh-25679 + x = np.arange(5) * (1 + 1j) + result = np.fft.hfft(x, n=10) + expected = np.array([20., 9.91628173, -11.8819096, 7.1048486, + -6.62459848, 4., -3.37540152, -0.16057669, + 1.8819096, -20.86055364]) + assert_allclose(result, expected) + + +@pytest.mark.parametrize("fft", [ + np.fft.fft, np.fft.ifft, np.fft.rfft, np.fft.irfft +]) +@pytest.mark.parametrize("data", [ + np.array([False, True, False]), + np.arange(10, dtype=np.uint8), + np.arange(5, dtype=np.int16), +]) +def test_fft_with_integer_or_bool_input(data, fft): + # Regression test for gh-25819 + result = fft(data) + float_data = data.astype(np.result_type(data, 1.)) + expected = fft(float_data) + assert_array_equal(result, expected) diff --git a/local_packages/numpy/lib/__init__.py b/local_packages/numpy/lib/__init__.py new file mode 100644 index 0000000..a248d04 --- /dev/null +++ b/local_packages/numpy/lib/__init__.py @@ -0,0 +1,97 @@ +""" +``numpy.lib`` is mostly a space for implementing functions that don't +belong in core or in another NumPy submodule with a clear purpose +(e.g. ``random``, ``fft``, ``linalg``, ``ma``). + +``numpy.lib``'s private submodules contain basic functions that are used by +other public modules and are useful to have in the main name-space. + +""" + +# Public submodules +# Note: recfunctions is public, but not imported +from numpy._core._multiarray_umath import add_docstring, tracemalloc_domain +from numpy._core.function_base import add_newdoc + +# Private submodules +# load module names. See https://github.com/networkx/networkx/issues/5838 +from . import ( + _arraypad_impl, + _arraysetops_impl, + _arrayterator_impl, + _function_base_impl, + _histograms_impl, + _index_tricks_impl, + _nanfunctions_impl, + _npyio_impl, + _polynomial_impl, + _shape_base_impl, + _stride_tricks_impl, + _twodim_base_impl, + _type_check_impl, + _ufunclike_impl, + _utils_impl, + _version, + array_utils, + format, + introspect, + mixins, + npyio, + scimath, + stride_tricks, +) + +# numpy.lib namespace members +from ._arrayterator_impl import Arrayterator +from ._version import NumpyVersion + +__all__ = [ + "Arrayterator", "add_docstring", "add_newdoc", "array_utils", + "format", "introspect", "mixins", "NumpyVersion", "npyio", "scimath", + "stride_tricks", "tracemalloc_domain", +] + +add_newdoc.__module__ = "numpy.lib" + +from numpy._pytesttester import PytestTester + +test = PytestTester(__name__) +del PytestTester + +def __getattr__(attr): + # Warn for deprecated/removed aliases + import math + import warnings + + if attr == "math": + warnings.warn( + "`np.lib.math` is a deprecated alias for the standard library " + "`math` module (Deprecated Numpy 1.25). Replace usages of " + "`numpy.lib.math` with `math`", DeprecationWarning, stacklevel=2) + return math + elif attr == "emath": + raise AttributeError( + "numpy.lib.emath was an alias for emath module that was removed " + "in NumPy 2.0. Replace usages of numpy.lib.emath with " + "numpy.emath.", + name=None + ) + elif attr in ( + "histograms", "type_check", "nanfunctions", "function_base", + "arraypad", "arraysetops", "ufunclike", "utils", "twodim_base", + "shape_base", "polynomial", "index_tricks", + ): + raise AttributeError( + f"numpy.lib.{attr} is now private. If you are using a public " + "function, it should be available in the main numpy namespace, " + "otherwise check the NumPy 2.0 migration guide.", + name=None + ) + elif attr == "arrayterator": + raise AttributeError( + "numpy.lib.arrayterator submodule is now private. To access " + "Arrayterator class use numpy.lib.Arrayterator.", + name=None + ) + else: + raise AttributeError(f"module {__name__!r} has no attribute {attr!r}") diff --git a/local_packages/numpy/lib/__init__.pyi b/local_packages/numpy/lib/__init__.pyi new file mode 100644 index 0000000..5a85743 --- /dev/null +++ b/local_packages/numpy/lib/__init__.pyi @@ -0,0 +1,52 @@ +from numpy._core.function_base import add_newdoc +from numpy._core.multiarray import add_docstring, tracemalloc_domain + +# all submodules of `lib` are accessible at runtime through `__getattr__`, +# so we implicitly re-export them here +from . import ( + _array_utils_impl as _array_utils_impl, + _arraypad_impl as _arraypad_impl, + _arraysetops_impl as _arraysetops_impl, + _arrayterator_impl as _arrayterator_impl, + _datasource as _datasource, + _format_impl as _format_impl, + _function_base_impl as _function_base_impl, + _histograms_impl as _histograms_impl, + _index_tricks_impl as _index_tricks_impl, + _iotools as _iotools, + _nanfunctions_impl as _nanfunctions_impl, + _npyio_impl as _npyio_impl, + _polynomial_impl as _polynomial_impl, + _scimath_impl as _scimath_impl, + _shape_base_impl as _shape_base_impl, + _stride_tricks_impl as _stride_tricks_impl, + _twodim_base_impl as _twodim_base_impl, + _type_check_impl as _type_check_impl, + _ufunclike_impl as _ufunclike_impl, + _utils_impl as _utils_impl, + _version as _version, + array_utils, + format, + introspect, + mixins, + npyio, + scimath, + stride_tricks, +) +from ._arrayterator_impl import Arrayterator +from ._version import NumpyVersion + +__all__ = [ + "Arrayterator", + "add_docstring", + "add_newdoc", + "array_utils", + "format", + "introspect", + "mixins", + "NumpyVersion", + "npyio", + "scimath", + "stride_tricks", + "tracemalloc_domain", +] diff --git a/local_packages/numpy/lib/_array_utils_impl.py b/local_packages/numpy/lib/_array_utils_impl.py new file mode 100644 index 0000000..c3996e1 --- /dev/null +++ b/local_packages/numpy/lib/_array_utils_impl.py @@ -0,0 +1,62 @@ +""" +Miscellaneous utils. +""" +from numpy._core import asarray +from numpy._core.numeric import normalize_axis_index, normalize_axis_tuple +from numpy._utils import set_module + +__all__ = ["byte_bounds", "normalize_axis_tuple", "normalize_axis_index"] + + +@set_module("numpy.lib.array_utils") +def byte_bounds(a): + """ + Returns pointers to the end-points of an array. + + Parameters + ---------- + a : ndarray + Input array. It must conform to the Python-side of the array + interface. + + Returns + ------- + (low, high) : tuple of 2 integers + The first integer is the first byte of the array, the second + integer is just past the last byte of the array. If `a` is not + contiguous it will not use every byte between the (`low`, `high`) + values. + + Examples + -------- + >>> import numpy as np + >>> I = np.eye(2, dtype='f'); I.dtype + dtype('float32') + >>> low, high = np.lib.array_utils.byte_bounds(I) + >>> high - low == I.size*I.itemsize + True + >>> I = np.eye(2); I.dtype + dtype('float64') + >>> low, high = np.lib.array_utils.byte_bounds(I) + >>> high - low == I.size*I.itemsize + True + + """ + ai = a.__array_interface__ + a_data = ai['data'][0] + astrides = ai['strides'] + ashape = ai['shape'] + bytes_a = asarray(a).dtype.itemsize + + a_low = a_high = a_data + if astrides is None: + # contiguous case + a_high += a.size * bytes_a + else: + for shape, stride in zip(ashape, astrides): + if stride < 0: + a_low += (shape - 1) * stride + else: + a_high += (shape - 1) * stride + a_high += bytes_a + return a_low, a_high diff --git a/local_packages/numpy/lib/_array_utils_impl.pyi b/local_packages/numpy/lib/_array_utils_impl.pyi new file mode 100644 index 0000000..e33507a --- /dev/null +++ b/local_packages/numpy/lib/_array_utils_impl.pyi @@ -0,0 +1,10 @@ +import numpy as np +from numpy._core.numeric import normalize_axis_index, normalize_axis_tuple + +__all__ = ["byte_bounds", "normalize_axis_tuple", "normalize_axis_index"] + +# NOTE: In practice `byte_bounds` can (potentially) take any object +# implementing the `__array_interface__` protocol. The caveat is +# that certain keys, marked as optional in the spec, must be present for +# `byte_bounds`. This concerns `"strides"` and `"data"`. +def byte_bounds(a: np.generic | np.ndarray) -> tuple[int, int]: ... diff --git a/local_packages/numpy/lib/_arraypad_impl.py b/local_packages/numpy/lib/_arraypad_impl.py new file mode 100644 index 0000000..681b92f --- /dev/null +++ b/local_packages/numpy/lib/_arraypad_impl.py @@ -0,0 +1,926 @@ +""" +The arraypad module contains a group of functions to pad values onto the edges +of an n-dimensional array. + +""" +import typing + +import numpy as np +from numpy._core.overrides import array_function_dispatch +from numpy.lib._index_tricks_impl import ndindex + +__all__ = ['pad'] + + +############################################################################### +# Private utility functions. + + +def _round_if_needed(arr, dtype): + """ + Rounds arr inplace if destination dtype is integer. + + Parameters + ---------- + arr : ndarray + Input array. + dtype : dtype + The dtype of the destination array. + """ + if np.issubdtype(dtype, np.integer): + arr.round(out=arr) + + +def _slice_at_axis(sl, axis): + """ + Construct tuple of slices to slice an array in the given dimension. + + Parameters + ---------- + sl : slice + The slice for the given dimension. + axis : int + The axis to which `sl` is applied. All other dimensions are left + "unsliced". + + Returns + ------- + sl : tuple of slices + A tuple with slices matching `shape` in length. + + Examples + -------- + >>> np._slice_at_axis(slice(None, 3, -1), 1) + (slice(None, None, None), slice(None, 3, -1), (...,)) + """ + return (slice(None),) * axis + (sl,) + (...,) + + +def _view_roi(array, original_area_slice, axis): + """ + Get a view of the current region of interest during iterative padding. + + When padding multiple dimensions iteratively corner values are + unnecessarily overwritten multiple times. This function reduces the + working area for the first dimensions so that corners are excluded. + + Parameters + ---------- + array : ndarray + The array with the region of interest. + original_area_slice : tuple of slices + Denotes the area with original values of the unpadded array. + axis : int + The currently padded dimension assuming that `axis` is padded before + `axis` + 1. + + Returns + ------- + roi : ndarray + The region of interest of the original `array`. + """ + axis += 1 + sl = (slice(None),) * axis + original_area_slice[axis:] + return array[sl] + + +def _pad_simple(array, pad_width, fill_value=None): + """ + Pad array on all sides with either a single value or undefined values. + + Parameters + ---------- + array : ndarray + Array to grow. + pad_width : sequence of tuple[int, int] + Pad width on both sides for each dimension in `arr`. + fill_value : scalar, optional + If provided the padded area is filled with this value, otherwise + the pad area left undefined. + + Returns + ------- + padded : ndarray + The padded array with the same dtype as`array`. Its order will default + to C-style if `array` is not F-contiguous. + original_area_slice : tuple + A tuple of slices pointing to the area of the original array. + """ + # Allocate grown array + new_shape = tuple( + left + size + right + for size, (left, right) in zip(array.shape, pad_width) + ) + order = 'F' if array.flags.fnc else 'C' # Fortran and not also C-order + padded = np.empty(new_shape, dtype=array.dtype, order=order) + + if fill_value is not None: + padded.fill(fill_value) + + # Copy old array into correct space + original_area_slice = tuple( + slice(left, left + size) + for size, (left, right) in zip(array.shape, pad_width) + ) + padded[original_area_slice] = array + + return padded, original_area_slice + + +def _set_pad_area(padded, axis, width_pair, value_pair): + """ + Set empty-padded area in given dimension. + + Parameters + ---------- + padded : ndarray + Array with the pad area which is modified inplace. + axis : int + Dimension with the pad area to set. + width_pair : (int, int) + Pair of widths that mark the pad area on both sides in the given + dimension. + value_pair : tuple of scalars or ndarrays + Values inserted into the pad area on each side. It must match or be + broadcastable to the shape of `arr`. + """ + left_slice = _slice_at_axis(slice(None, width_pair[0]), axis) + padded[left_slice] = value_pair[0] + + right_slice = _slice_at_axis( + slice(padded.shape[axis] - width_pair[1], None), axis) + padded[right_slice] = value_pair[1] + + +def _get_edges(padded, axis, width_pair): + """ + Retrieve edge values from empty-padded array in given dimension. + + Parameters + ---------- + padded : ndarray + Empty-padded array. + axis : int + Dimension in which the edges are considered. + width_pair : (int, int) + Pair of widths that mark the pad area on both sides in the given + dimension. + + Returns + ------- + left_edge, right_edge : ndarray + Edge values of the valid area in `padded` in the given dimension. Its + shape will always match `padded` except for the dimension given by + `axis` which will have a length of 1. + """ + left_index = width_pair[0] + left_slice = _slice_at_axis(slice(left_index, left_index + 1), axis) + left_edge = padded[left_slice] + + right_index = padded.shape[axis] - width_pair[1] + right_slice = _slice_at_axis(slice(right_index - 1, right_index), axis) + right_edge = padded[right_slice] + + return left_edge, right_edge + + +def _get_linear_ramps(padded, axis, width_pair, end_value_pair): + """ + Construct linear ramps for empty-padded array in given dimension. + + Parameters + ---------- + padded : ndarray + Empty-padded array. + axis : int + Dimension in which the ramps are constructed. + width_pair : (int, int) + Pair of widths that mark the pad area on both sides in the given + dimension. + end_value_pair : (scalar, scalar) + End values for the linear ramps which form the edge of the fully padded + array. These values are included in the linear ramps. + + Returns + ------- + left_ramp, right_ramp : ndarray + Linear ramps to set on both sides of `padded`. + """ + edge_pair = _get_edges(padded, axis, width_pair) + + left_ramp, right_ramp = ( + np.linspace( + start=end_value, + stop=edge.squeeze(axis), # Dimension is replaced by linspace + num=width, + endpoint=False, + dtype=padded.dtype, + axis=axis + ) + for end_value, edge, width in zip( + end_value_pair, edge_pair, width_pair + ) + ) + + # Reverse linear space in appropriate dimension + right_ramp = right_ramp[_slice_at_axis(slice(None, None, -1), axis)] + + return left_ramp, right_ramp + + +def _get_stats(padded, axis, width_pair, length_pair, stat_func): + """ + Calculate statistic for the empty-padded array in given dimension. + + Parameters + ---------- + padded : ndarray + Empty-padded array. + axis : int + Dimension in which the statistic is calculated. + width_pair : (int, int) + Pair of widths that mark the pad area on both sides in the given + dimension. + length_pair : 2-element sequence of None or int + Gives the number of values in valid area from each side that is + taken into account when calculating the statistic. If None the entire + valid area in `padded` is considered. + stat_func : function + Function to compute statistic. The expected signature is + ``stat_func(x: ndarray, axis: int, keepdims: bool) -> ndarray``. + + Returns + ------- + left_stat, right_stat : ndarray + Calculated statistic for both sides of `padded`. + """ + # Calculate indices of the edges of the area with original values + left_index = width_pair[0] + right_index = padded.shape[axis] - width_pair[1] + # as well as its length + max_length = right_index - left_index + + # Limit stat_lengths to max_length + left_length, right_length = length_pair + if left_length is None or max_length < left_length: + left_length = max_length + if right_length is None or max_length < right_length: + right_length = max_length + + if (left_length == 0 or right_length == 0) \ + and stat_func in {np.amax, np.amin}: + # amax and amin can't operate on an empty array, + # raise a more descriptive warning here instead of the default one + raise ValueError("stat_length of 0 yields no value for padding") + + # Calculate statistic for the left side + left_slice = _slice_at_axis( + slice(left_index, left_index + left_length), axis) + left_chunk = padded[left_slice] + left_stat = stat_func(left_chunk, axis=axis, keepdims=True) + _round_if_needed(left_stat, padded.dtype) + + if left_length == right_length == max_length: + # return early as right_stat must be identical to left_stat + return left_stat, left_stat + + # Calculate statistic for the right side + right_slice = _slice_at_axis( + slice(right_index - right_length, right_index), axis) + right_chunk = padded[right_slice] + right_stat = stat_func(right_chunk, axis=axis, keepdims=True) + _round_if_needed(right_stat, padded.dtype) + + return left_stat, right_stat + + +def _set_reflect_both(padded, axis, width_pair, method, + original_period, include_edge=False): + """ + Pad `axis` of `arr` with reflection. + + Parameters + ---------- + padded : ndarray + Input array of arbitrary shape. + axis : int + Axis along which to pad `arr`. + width_pair : (int, int) + Pair of widths that mark the pad area on both sides in the given + dimension. + method : str + Controls method of reflection; options are 'even' or 'odd'. + original_period : int + Original length of data on `axis` of `arr`. + include_edge : bool + If true, edge value is included in reflection, otherwise the edge + value forms the symmetric axis to the reflection. + + Returns + ------- + pad_amt : tuple of ints, length 2 + New index positions of padding to do along the `axis`. If these are + both 0, padding is done in this dimension. + """ + left_pad, right_pad = width_pair + old_length = padded.shape[axis] - right_pad - left_pad + + if include_edge: + # Avoid wrapping with only a subset of the original area + # by ensuring period can only be a multiple of the original + # area's length. + old_length = old_length // original_period * original_period + # Edge is included, we need to offset the pad amount by 1 + edge_offset = 1 + else: + # Avoid wrapping with only a subset of the original area + # by ensuring period can only be a multiple of the original + # area's length. + old_length = ((old_length - 1) // (original_period - 1) + * (original_period - 1) + 1) + edge_offset = 0 # Edge is not included, no need to offset pad amount + old_length -= 1 # but must be omitted from the chunk + + if left_pad > 0: + # Pad with reflected values on left side: + # First limit chunk size which can't be larger than pad area + chunk_length = min(old_length, left_pad) + # Slice right to left, stop on or next to edge, start relative to stop + stop = left_pad - edge_offset + start = stop + chunk_length + left_slice = _slice_at_axis(slice(start, stop, -1), axis) + left_chunk = padded[left_slice] + + if method == "odd": + # Negate chunk and align with edge + edge_slice = _slice_at_axis(slice(left_pad, left_pad + 1), axis) + left_chunk = 2 * padded[edge_slice] - left_chunk + + # Insert chunk into padded area + start = left_pad - chunk_length + stop = left_pad + pad_area = _slice_at_axis(slice(start, stop), axis) + padded[pad_area] = left_chunk + # Adjust pointer to left edge for next iteration + left_pad -= chunk_length + + if right_pad > 0: + # Pad with reflected values on right side: + # First limit chunk size which can't be larger than pad area + chunk_length = min(old_length, right_pad) + # Slice right to left, start on or next to edge, stop relative to start + start = -right_pad + edge_offset - 2 + stop = start - chunk_length + right_slice = _slice_at_axis(slice(start, stop, -1), axis) + right_chunk = padded[right_slice] + + if method == "odd": + # Negate chunk and align with edge + edge_slice = _slice_at_axis( + slice(-right_pad - 1, -right_pad), axis) + right_chunk = 2 * padded[edge_slice] - right_chunk + + # Insert chunk into padded area + start = padded.shape[axis] - right_pad + stop = start + chunk_length + pad_area = _slice_at_axis(slice(start, stop), axis) + padded[pad_area] = right_chunk + # Adjust pointer to right edge for next iteration + right_pad -= chunk_length + + return left_pad, right_pad + + +def _set_wrap_both(padded, axis, width_pair, original_period): + """ + Pad `axis` of `arr` with wrapped values. + + Parameters + ---------- + padded : ndarray + Input array of arbitrary shape. + axis : int + Axis along which to pad `arr`. + width_pair : (int, int) + Pair of widths that mark the pad area on both sides in the given + dimension. + original_period : int + Original length of data on `axis` of `arr`. + + Returns + ------- + pad_amt : tuple of ints, length 2 + New index positions of padding to do along the `axis`. If these are + both 0, padding is done in this dimension. + """ + left_pad, right_pad = width_pair + period = padded.shape[axis] - right_pad - left_pad + # Avoid wrapping with only a subset of the original area by ensuring period + # can only be a multiple of the original area's length. + period = period // original_period * original_period + + # If the current dimension of `arr` doesn't contain enough valid values + # (not part of the undefined pad area) we need to pad multiple times. + # Each time the pad area shrinks on both sides which is communicated with + # these variables. + new_left_pad = 0 + new_right_pad = 0 + + if left_pad > 0: + # Pad with wrapped values on left side + # First slice chunk from left side of the non-pad area. + # Use min(period, left_pad) to ensure that chunk is not larger than + # pad area. + slice_end = left_pad + period + slice_start = slice_end - min(period, left_pad) + right_slice = _slice_at_axis(slice(slice_start, slice_end), axis) + right_chunk = padded[right_slice] + + if left_pad > period: + # Chunk is smaller than pad area + pad_area = _slice_at_axis(slice(left_pad - period, left_pad), axis) + new_left_pad = left_pad - period + else: + # Chunk matches pad area + pad_area = _slice_at_axis(slice(None, left_pad), axis) + padded[pad_area] = right_chunk + + if right_pad > 0: + # Pad with wrapped values on right side + # First slice chunk from right side of the non-pad area. + # Use min(period, right_pad) to ensure that chunk is not larger than + # pad area. + slice_start = -right_pad - period + slice_end = slice_start + min(period, right_pad) + left_slice = _slice_at_axis(slice(slice_start, slice_end), axis) + left_chunk = padded[left_slice] + + if right_pad > period: + # Chunk is smaller than pad area + pad_area = _slice_at_axis( + slice(-right_pad, -right_pad + period), axis) + new_right_pad = right_pad - period + else: + # Chunk matches pad area + pad_area = _slice_at_axis(slice(-right_pad, None), axis) + padded[pad_area] = left_chunk + + return new_left_pad, new_right_pad + + +def _as_pairs(x, ndim, as_index=False): + """ + Broadcast `x` to an array with the shape (`ndim`, 2). + + A helper function for `pad` that prepares and validates arguments like + `pad_width` for iteration in pairs. + + Parameters + ---------- + x : {None, scalar, array-like} + The object to broadcast to the shape (`ndim`, 2). + ndim : int + Number of pairs the broadcasted `x` will have. + as_index : bool, optional + If `x` is not None, try to round each element of `x` to an integer + (dtype `np.intp`) and ensure every element is positive. + + Returns + ------- + pairs : nested iterables, shape (`ndim`, 2) + The broadcasted version of `x`. + + Raises + ------ + ValueError + If `as_index` is True and `x` contains negative elements. + Or if `x` is not broadcastable to the shape (`ndim`, 2). + """ + if x is None: + # Pass through None as a special case, otherwise np.round(x) fails + # with an AttributeError + return ((None, None),) * ndim + + x = np.array(x) + if as_index: + x = np.round(x).astype(np.intp, copy=False) + + if x.ndim < 3: + # Optimization: Possibly use faster paths for cases where `x` has + # only 1 or 2 elements. `np.broadcast_to` could handle these as well + # but is currently slower + + if x.size == 1: + # x was supplied as a single value + x = x.ravel() # Ensure x[0] works for x.ndim == 0, 1, 2 + if as_index and x < 0: + raise ValueError("index can't contain negative values") + return ((x[0], x[0]),) * ndim + + if x.size == 2 and x.shape != (2, 1): + # x was supplied with a single value for each side + # but except case when each dimension has a single value + # which should be broadcasted to a pair, + # e.g. [[1], [2]] -> [[1, 1], [2, 2]] not [[1, 2], [1, 2]] + x = x.ravel() # Ensure x[0], x[1] works + if as_index and (x[0] < 0 or x[1] < 0): + raise ValueError("index can't contain negative values") + return ((x[0], x[1]),) * ndim + + if as_index and x.min() < 0: + raise ValueError("index can't contain negative values") + + # Converting the array with `tolist` seems to improve performance + # when iterating and indexing the result (see usage in `pad`) + return np.broadcast_to(x, (ndim, 2)).tolist() + + +def _pad_dispatcher(array, pad_width, mode=None, **kwargs): + return (array,) + + +############################################################################### +# Public functions + + +@array_function_dispatch(_pad_dispatcher, module='numpy') +def pad(array, pad_width, mode='constant', **kwargs): + """ + Pad an array. + + Parameters + ---------- + array : array_like of rank N + The array to pad. + pad_width : {sequence, array_like, int, dict} + Number of values padded to the edges of each axis. + ``((before_1, after_1), ... (before_N, after_N))`` unique pad widths + for each axis. + ``(before, after)`` or ``((before, after),)`` yields same before + and after pad for each axis. + ``(pad,)`` or ``int`` is a shortcut for before = after = pad width + for all axes. + If a ``dict``, each key is an axis and its corresponding value is an ``int`` or + ``int`` pair describing the padding ``(before, after)`` or ``pad`` width for + that axis. + mode : str or function, optional + One of the following string values or a user supplied function. + + 'constant' (default) + Pads with a constant value. + 'edge' + Pads with the edge values of array. + 'linear_ramp' + Pads with the linear ramp between end_value and the + array edge value. + 'maximum' + Pads with the maximum value of all or part of the + vector along each axis. + 'mean' + Pads with the mean value of all or part of the + vector along each axis. + 'median' + Pads with the median value of all or part of the + vector along each axis. + 'minimum' + Pads with the minimum value of all or part of the + vector along each axis. + 'reflect' + Pads with the reflection of the vector mirrored on + the first and last values of the vector along each + axis. + 'symmetric' + Pads with the reflection of the vector mirrored + along the edge of the array. + 'wrap' + Pads with the wrap of the vector along the axis. + The first values are used to pad the end and the + end values are used to pad the beginning. + 'empty' + Pads with undefined values. + + + Padding function, see Notes. + stat_length : sequence or int, optional + Used in 'maximum', 'mean', 'median', and 'minimum'. Number of + values at edge of each axis used to calculate the statistic value. + + ``((before_1, after_1), ... (before_N, after_N))`` unique statistic + lengths for each axis. + + ``(before, after)`` or ``((before, after),)`` yields same before + and after statistic lengths for each axis. + + ``(stat_length,)`` or ``int`` is a shortcut for + ``before = after = statistic`` length for all axes. + + Default is ``None``, to use the entire axis. + constant_values : sequence or scalar, optional + Used in 'constant'. The values to set the padded values for each + axis. + + ``((before_1, after_1), ... (before_N, after_N))`` unique pad constants + for each axis. + + ``(before, after)`` or ``((before, after),)`` yields same before + and after constants for each axis. + + ``(constant,)`` or ``constant`` is a shortcut for + ``before = after = constant`` for all axes. + + Default is 0. + end_values : sequence or scalar, optional + Used in 'linear_ramp'. The values used for the ending value of the + linear_ramp and that will form the edge of the padded array. + + ``((before_1, after_1), ... (before_N, after_N))`` unique end values + for each axis. + + ``(before, after)`` or ``((before, after),)`` yields same before + and after end values for each axis. + + ``(constant,)`` or ``constant`` is a shortcut for + ``before = after = constant`` for all axes. + + Default is 0. + reflect_type : {'even', 'odd'}, optional + Used in 'reflect', and 'symmetric'. The 'even' style is the + default with an unaltered reflection around the edge value. For + the 'odd' style, the extended part of the array is created by + subtracting the reflected values from two times the edge value. + + Returns + ------- + pad : ndarray + Padded array of rank equal to `array` with shape increased + according to `pad_width`. + + Notes + ----- + For an array with rank greater than 1, some of the padding of later + axes is calculated from padding of previous axes. This is easiest to + think about with a rank 2 array where the corners of the padded array + are calculated by using padded values from the first axis. + + The padding function, if used, should modify a rank 1 array in-place. It + has the following signature:: + + padding_func(vector, iaxis_pad_width, iaxis, kwargs) + + where + + vector : ndarray + A rank 1 array already padded with zeros. Padded values are + vector[:iaxis_pad_width[0]] and vector[-iaxis_pad_width[1]:]. + iaxis_pad_width : tuple + A 2-tuple of ints, iaxis_pad_width[0] represents the number of + values padded at the beginning of vector where + iaxis_pad_width[1] represents the number of values padded at + the end of vector. + iaxis : int + The axis currently being calculated. + kwargs : dict + Any keyword arguments the function requires. + + Examples + -------- + >>> import numpy as np + >>> a = [1, 2, 3, 4, 5] + >>> np.pad(a, (2, 3), 'constant', constant_values=(4, 6)) + array([4, 4, 1, ..., 6, 6, 6]) + + >>> np.pad(a, (2, 3), 'edge') + array([1, 1, 1, ..., 5, 5, 5]) + + >>> np.pad(a, (2, 3), 'linear_ramp', end_values=(5, -4)) + array([ 5, 3, 1, 2, 3, 4, 5, 2, -1, -4]) + + >>> np.pad(a, (2,), 'maximum') + array([5, 5, 1, 2, 3, 4, 5, 5, 5]) + + >>> np.pad(a, (2,), 'mean') + array([3, 3, 1, 2, 3, 4, 5, 3, 3]) + + >>> np.pad(a, (2,), 'median') + array([3, 3, 1, 2, 3, 4, 5, 3, 3]) + + >>> a = [[1, 2], [3, 4]] + >>> np.pad(a, ((3, 2), (2, 3)), 'minimum') + array([[1, 1, 1, 2, 1, 1, 1], + [1, 1, 1, 2, 1, 1, 1], + [1, 1, 1, 2, 1, 1, 1], + [1, 1, 1, 2, 1, 1, 1], + [3, 3, 3, 4, 3, 3, 3], + [1, 1, 1, 2, 1, 1, 1], + [1, 1, 1, 2, 1, 1, 1]]) + + >>> a = [1, 2, 3, 4, 5] + >>> np.pad(a, (2, 3), 'reflect') + array([3, 2, 1, 2, 3, 4, 5, 4, 3, 2]) + + >>> np.pad(a, (2, 3), 'reflect', reflect_type='odd') + array([-1, 0, 1, 2, 3, 4, 5, 6, 7, 8]) + + >>> np.pad(a, (2, 3), 'symmetric') + array([2, 1, 1, 2, 3, 4, 5, 5, 4, 3]) + + >>> np.pad(a, (2, 3), 'symmetric', reflect_type='odd') + array([0, 1, 1, 2, 3, 4, 5, 5, 6, 7]) + + >>> np.pad(a, (2, 3), 'wrap') + array([4, 5, 1, 2, 3, 4, 5, 1, 2, 3]) + + >>> def pad_with(vector, pad_width, iaxis, kwargs): + ... pad_value = kwargs.get('padder', 10) + ... vector[:pad_width[0]] = pad_value + ... vector[-pad_width[1]:] = pad_value + >>> a = np.arange(6) + >>> a = a.reshape((2, 3)) + >>> np.pad(a, 2, pad_with) + array([[10, 10, 10, 10, 10, 10, 10], + [10, 10, 10, 10, 10, 10, 10], + [10, 10, 0, 1, 2, 10, 10], + [10, 10, 3, 4, 5, 10, 10], + [10, 10, 10, 10, 10, 10, 10], + [10, 10, 10, 10, 10, 10, 10]]) + >>> np.pad(a, 2, pad_with, padder=100) + array([[100, 100, 100, 100, 100, 100, 100], + [100, 100, 100, 100, 100, 100, 100], + [100, 100, 0, 1, 2, 100, 100], + [100, 100, 3, 4, 5, 100, 100], + [100, 100, 100, 100, 100, 100, 100], + [100, 100, 100, 100, 100, 100, 100]]) + + >>> a = np.arange(1, 7).reshape(2, 3) + >>> np.pad(a, {1: (1, 2)}) + array([[0, 1, 2, 3, 0, 0], + [0, 4, 5, 6, 0, 0]]) + >>> np.pad(a, {-1: 2}) + array([[0, 0, 1, 2, 3, 0, 0], + [0, 0, 4, 5, 6, 0, 0]]) + >>> np.pad(a, {0: (3, 0)}) + array([[0, 0, 0], + [0, 0, 0], + [0, 0, 0], + [1, 2, 3], + [4, 5, 6]]) + >>> np.pad(a, {0: (3, 0), 1: 2}) + array([[0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 2, 3, 0, 0], + [0, 0, 4, 5, 6, 0, 0]]) + """ + array = np.asarray(array) + if isinstance(pad_width, dict): + seq = [(0, 0)] * array.ndim + for axis, width in pad_width.items(): + match width: + case int(both): + seq[axis] = both, both + case tuple((int(before), int(after))): + seq[axis] = before, after + case _ as invalid: + typing.assert_never(invalid) + pad_width = seq + pad_width = np.asarray(pad_width) + + if not pad_width.dtype.kind == 'i': + raise TypeError('`pad_width` must be of integral type.') + + # Broadcast to shape (array.ndim, 2) + pad_width = _as_pairs(pad_width, array.ndim, as_index=True) + + if callable(mode): + # Old behavior: Use user-supplied function with np.apply_along_axis + function = mode + # Create a new zero padded array + padded, _ = _pad_simple(array, pad_width, fill_value=0) + # And apply along each axis + + for axis in range(padded.ndim): + # Iterate using ndindex as in apply_along_axis, but assuming that + # function operates inplace on the padded array. + + # view with the iteration axis at the end + view = np.moveaxis(padded, axis, -1) + + # compute indices for the iteration axes, and append a trailing + # ellipsis to prevent 0d arrays decaying to scalars (gh-8642) + inds = ndindex(view.shape[:-1]) + inds = (ind + (Ellipsis,) for ind in inds) + for ind in inds: + function(view[ind], pad_width[axis], axis, kwargs) + + return padded + + # Make sure that no unsupported keywords were passed for the current mode + allowed_kwargs = { + 'empty': [], 'edge': [], 'wrap': [], + 'constant': ['constant_values'], + 'linear_ramp': ['end_values'], + 'maximum': ['stat_length'], + 'mean': ['stat_length'], + 'median': ['stat_length'], + 'minimum': ['stat_length'], + 'reflect': ['reflect_type'], + 'symmetric': ['reflect_type'], + } + try: + unsupported_kwargs = set(kwargs) - set(allowed_kwargs[mode]) + except KeyError: + raise ValueError(f"mode '{mode}' is not supported") from None + if unsupported_kwargs: + raise ValueError("unsupported keyword arguments for mode " + f"'{mode}': {unsupported_kwargs}") + + stat_functions = {"maximum": np.amax, "minimum": np.amin, + "mean": np.mean, "median": np.median} + + # Create array with final shape and original values + # (padded area is undefined) + padded, original_area_slice = _pad_simple(array, pad_width) + # And prepare iteration over all dimensions + # (zipping may be more readable than using enumerate) + axes = range(padded.ndim) + + if mode == "constant": + values = kwargs.get("constant_values", 0) + values = _as_pairs(values, padded.ndim) + for axis, width_pair, value_pair in zip(axes, pad_width, values): + roi = _view_roi(padded, original_area_slice, axis) + _set_pad_area(roi, axis, width_pair, value_pair) + + elif mode == "empty": + pass # Do nothing as _pad_simple already returned the correct result + + elif array.size == 0: + # Only modes "constant" and "empty" can extend empty axes, all other + # modes depend on `array` not being empty + # -> ensure every empty axis is only "padded with 0" + for axis, width_pair in zip(axes, pad_width): + if array.shape[axis] == 0 and any(width_pair): + raise ValueError( + f"can't extend empty axis {axis} using modes other than " + "'constant' or 'empty'" + ) + # passed, don't need to do anything more as _pad_simple already + # returned the correct result + + elif mode == "edge": + for axis, width_pair in zip(axes, pad_width): + roi = _view_roi(padded, original_area_slice, axis) + edge_pair = _get_edges(roi, axis, width_pair) + _set_pad_area(roi, axis, width_pair, edge_pair) + + elif mode == "linear_ramp": + end_values = kwargs.get("end_values", 0) + end_values = _as_pairs(end_values, padded.ndim) + for axis, width_pair, value_pair in zip(axes, pad_width, end_values): + roi = _view_roi(padded, original_area_slice, axis) + ramp_pair = _get_linear_ramps(roi, axis, width_pair, value_pair) + _set_pad_area(roi, axis, width_pair, ramp_pair) + + elif mode in stat_functions: + func = stat_functions[mode] + length = kwargs.get("stat_length") + length = _as_pairs(length, padded.ndim, as_index=True) + for axis, width_pair, length_pair in zip(axes, pad_width, length): + roi = _view_roi(padded, original_area_slice, axis) + stat_pair = _get_stats(roi, axis, width_pair, length_pair, func) + _set_pad_area(roi, axis, width_pair, stat_pair) + + elif mode in {"reflect", "symmetric"}: + method = kwargs.get("reflect_type", "even") + include_edge = mode == "symmetric" + for axis, (left_index, right_index) in zip(axes, pad_width): + if array.shape[axis] == 1 and (left_index > 0 or right_index > 0): + # Extending singleton dimension for 'reflect' is legacy + # behavior; it really should raise an error. + edge_pair = _get_edges(padded, axis, (left_index, right_index)) + _set_pad_area( + padded, axis, (left_index, right_index), edge_pair) + continue + + roi = _view_roi(padded, original_area_slice, axis) + while left_index > 0 or right_index > 0: + # Iteratively pad until dimension is filled with reflected + # values. This is necessary if the pad area is larger than + # the length of the original values in the current dimension. + left_index, right_index = _set_reflect_both( + roi, axis, (left_index, right_index), + method, array.shape[axis], include_edge + ) + + elif mode == "wrap": + for axis, (left_index, right_index) in zip(axes, pad_width): + roi = _view_roi(padded, original_area_slice, axis) + original_period = padded.shape[axis] - right_index - left_index + while left_index > 0 or right_index > 0: + # Iteratively pad until dimension is filled with wrapped + # values. This is necessary if the pad area is larger than + # the length of the original values in the current dimension. + left_index, right_index = _set_wrap_both( + roi, axis, (left_index, right_index), original_period) + + return padded diff --git a/local_packages/numpy/lib/_arraypad_impl.pyi b/local_packages/numpy/lib/_arraypad_impl.pyi new file mode 100644 index 0000000..e7aacea --- /dev/null +++ b/local_packages/numpy/lib/_arraypad_impl.pyi @@ -0,0 +1,88 @@ +from typing import ( + Any, + Literal as L, + Protocol, + TypeAlias, + TypeVar, + overload, + type_check_only, +) + +from numpy import generic +from numpy._typing import ArrayLike, NDArray, _ArrayLike, _ArrayLikeInt + +__all__ = ["pad"] + +_ScalarT = TypeVar("_ScalarT", bound=generic) + +@type_check_only +class _ModeFunc(Protocol): + def __call__( + self, + vector: NDArray[Any], + iaxis_pad_width: tuple[int, int], + iaxis: int, + kwargs: dict[str, Any], + /, + ) -> None: ... + +_ModeKind: TypeAlias = L[ + "constant", + "edge", + "linear_ramp", + "maximum", + "mean", + "median", + "minimum", + "reflect", + "symmetric", + "wrap", + "empty", +] + +# TODO: In practice each keyword argument is exclusive to one or more +# specific modes. Consider adding more overloads to express this in the future. + +_PadWidth: TypeAlias = ( + _ArrayLikeInt + | dict[int, int] + | dict[int, tuple[int, int]] + | dict[int, int | tuple[int, int]] +) +# Expand `**kwargs` into explicit keyword-only arguments +@overload +def pad( + array: _ArrayLike[_ScalarT], + pad_width: _PadWidth, + mode: _ModeKind = "constant", + *, + stat_length: _ArrayLikeInt | None = None, + constant_values: ArrayLike = 0, + end_values: ArrayLike = 0, + reflect_type: L["odd", "even"] = "even", +) -> NDArray[_ScalarT]: ... +@overload +def pad( + array: ArrayLike, + pad_width: _PadWidth, + mode: _ModeKind = "constant", + *, + stat_length: _ArrayLikeInt | None = None, + constant_values: ArrayLike = 0, + end_values: ArrayLike = 0, + reflect_type: L["odd", "even"] = "even", +) -> NDArray[Any]: ... +@overload +def pad( + array: _ArrayLike[_ScalarT], + pad_width: _PadWidth, + mode: _ModeFunc, + **kwargs: Any, +) -> NDArray[_ScalarT]: ... +@overload +def pad( + array: ArrayLike, + pad_width: _PadWidth, + mode: _ModeFunc, + **kwargs: Any, +) -> NDArray[Any]: ... diff --git a/local_packages/numpy/lib/_arraysetops_impl.py b/local_packages/numpy/lib/_arraysetops_impl.py new file mode 100644 index 0000000..5d521b1 --- /dev/null +++ b/local_packages/numpy/lib/_arraysetops_impl.py @@ -0,0 +1,1158 @@ +""" +Set operations for arrays based on sorting. + +Notes +----- + +For floating point arrays, inaccurate results may appear due to usual round-off +and floating point comparison issues. + +Speed could be gained in some operations by an implementation of +`numpy.sort`, that can provide directly the permutation vectors, thus avoiding +calls to `numpy.argsort`. + +Original author: Robert Cimrman + +""" +import functools +from typing import NamedTuple + +import numpy as np +from numpy._core import overrides +from numpy._core._multiarray_umath import _array_converter, _unique_hash +from numpy.lib.array_utils import normalize_axis_index + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + + +__all__ = [ + "ediff1d", "intersect1d", "isin", "setdiff1d", "setxor1d", + "union1d", "unique", "unique_all", "unique_counts", "unique_inverse", + "unique_values" +] + + +def _ediff1d_dispatcher(ary, to_end=None, to_begin=None): + return (ary, to_end, to_begin) + + +@array_function_dispatch(_ediff1d_dispatcher) +def ediff1d(ary, to_end=None, to_begin=None): + """ + The differences between consecutive elements of an array. + + Parameters + ---------- + ary : array_like + If necessary, will be flattened before the differences are taken. + to_end : array_like, optional + Number(s) to append at the end of the returned differences. + to_begin : array_like, optional + Number(s) to prepend at the beginning of the returned differences. + + Returns + ------- + ediff1d : ndarray + The differences. Loosely, this is ``ary.flat[1:] - ary.flat[:-1]``. + + See Also + -------- + diff, gradient + + Notes + ----- + When applied to masked arrays, this function drops the mask information + if the `to_begin` and/or `to_end` parameters are used. + + Examples + -------- + >>> import numpy as np + >>> x = np.array([1, 2, 4, 7, 0]) + >>> np.ediff1d(x) + array([ 1, 2, 3, -7]) + + >>> np.ediff1d(x, to_begin=-99, to_end=np.array([88, 99])) + array([-99, 1, 2, ..., -7, 88, 99]) + + The returned array is always 1D. + + >>> y = [[1, 2, 4], [1, 6, 24]] + >>> np.ediff1d(y) + array([ 1, 2, -3, 5, 18]) + + """ + conv = _array_converter(ary) + # Convert to (any) array and ravel: + ary = conv[0].ravel() + + # enforce that the dtype of `ary` is used for the output + dtype_req = ary.dtype + + # fast track default case + if to_begin is None and to_end is None: + return ary[1:] - ary[:-1] + + if to_begin is None: + l_begin = 0 + else: + to_begin = np.asanyarray(to_begin) + if not np.can_cast(to_begin, dtype_req, casting="same_kind"): + raise TypeError("dtype of `to_begin` must be compatible " + "with input `ary` under the `same_kind` rule.") + + to_begin = to_begin.ravel() + l_begin = len(to_begin) + + if to_end is None: + l_end = 0 + else: + to_end = np.asanyarray(to_end) + if not np.can_cast(to_end, dtype_req, casting="same_kind"): + raise TypeError("dtype of `to_end` must be compatible " + "with input `ary` under the `same_kind` rule.") + + to_end = to_end.ravel() + l_end = len(to_end) + + # do the calculation in place and copy to_begin and to_end + l_diff = max(len(ary) - 1, 0) + result = np.empty_like(ary, shape=l_diff + l_begin + l_end) + + if l_begin > 0: + result[:l_begin] = to_begin + if l_end > 0: + result[l_begin + l_diff:] = to_end + np.subtract(ary[1:], ary[:-1], result[l_begin:l_begin + l_diff]) + + return conv.wrap(result) + + +def _unpack_tuple(x): + """ Unpacks one-element tuples for use as return values """ + if len(x) == 1: + return x[0] + else: + return x + + +def _unique_dispatcher(ar, return_index=None, return_inverse=None, + return_counts=None, axis=None, *, equal_nan=None, + sorted=True): + return (ar,) + + +@array_function_dispatch(_unique_dispatcher) +def unique(ar, return_index=False, return_inverse=False, + return_counts=False, axis=None, *, equal_nan=True, + sorted=True): + """ + Find the unique elements of an array. + + Returns the sorted unique elements of an array. There are three optional + outputs in addition to the unique elements: + + * the indices of the input array that give the unique values + * the indices of the unique array that reconstruct the input array + * the number of times each unique value comes up in the input array + + Parameters + ---------- + ar : array_like + Input array. Unless `axis` is specified, this will be flattened if it + is not already 1-D. + return_index : bool, optional + If True, also return the indices of `ar` (along the specified axis, + if provided, or in the flattened array) that result in the unique array. + return_inverse : bool, optional + If True, also return the indices of the unique array (for the specified + axis, if provided) that can be used to reconstruct `ar`. + return_counts : bool, optional + If True, also return the number of times each unique item appears + in `ar`. + axis : int or None, optional + The axis to operate on. If None, `ar` will be flattened. If an integer, + the subarrays indexed by the given axis will be flattened and treated + as the elements of a 1-D array with the dimension of the given axis, + see the notes for more details. Object arrays or structured arrays + that contain objects are not supported if the `axis` kwarg is used. The + default is None. + + equal_nan : bool, optional + If True, collapses multiple NaN values in the return array into one. + + .. versionadded:: 1.24 + + sorted : bool, optional + If True, the unique elements are sorted. Elements may be sorted in + practice even if ``sorted=False``, but this could change without + notice. + + .. versionadded:: 2.3 + + Returns + ------- + unique : ndarray + The sorted unique values. + unique_indices : ndarray, optional + The indices of the first occurrences of the unique values in the + original array. Only provided if `return_index` is True. + unique_inverse : ndarray, optional + The indices to reconstruct the original array from the + unique array. Only provided if `return_inverse` is True. + unique_counts : ndarray, optional + The number of times each of the unique values comes up in the + original array. Only provided if `return_counts` is True. + + See Also + -------- + repeat : Repeat elements of an array. + sort : Return a sorted copy of an array. + + Notes + ----- + When an axis is specified the subarrays indexed by the axis are sorted. + This is done by making the specified axis the first dimension of the array + (move the axis to the first dimension to keep the order of the other axes) + and then flattening the subarrays in C order. The flattened subarrays are + then viewed as a structured type with each element given a label, with the + effect that we end up with a 1-D array of structured types that can be + treated in the same way as any other 1-D array. The result is that the + flattened subarrays are sorted in lexicographic order starting with the + first element. + + .. versionchanged:: 1.21 + Like np.sort, NaN will sort to the end of the values. + For complex arrays all NaN values are considered equivalent + (no matter whether the NaN is in the real or imaginary part). + As the representant for the returned array the smallest one in the + lexicographical order is chosen - see np.sort for how the lexicographical + order is defined for complex arrays. + + .. versionchanged:: 2.0 + For multi-dimensional inputs, ``unique_inverse`` is reshaped + such that the input can be reconstructed using + ``np.take(unique, unique_inverse, axis=axis)``. The result is + now not 1-dimensional when ``axis=None``. + + Note that in NumPy 2.0.0 a higher dimensional array was returned also + when ``axis`` was not ``None``. This was reverted, but + ``inverse.reshape(-1)`` can be used to ensure compatibility with both + versions. + + Examples + -------- + >>> import numpy as np + >>> np.unique([1, 1, 2, 2, 3, 3]) + array([1, 2, 3]) + >>> a = np.array([[1, 1], [2, 3]]) + >>> np.unique(a) + array([1, 2, 3]) + + Return the unique rows of a 2D array + + >>> a = np.array([[1, 0, 0], [1, 0, 0], [2, 3, 4]]) + >>> np.unique(a, axis=0) + array([[1, 0, 0], [2, 3, 4]]) + + Return the indices of the original array that give the unique values: + + >>> a = np.array(['a', 'b', 'b', 'c', 'a']) + >>> u, indices = np.unique(a, return_index=True) + >>> u + array(['a', 'b', 'c'], dtype='>> indices + array([0, 1, 3]) + >>> a[indices] + array(['a', 'b', 'c'], dtype='>> a = np.array([1, 2, 6, 4, 2, 3, 2]) + >>> u, indices = np.unique(a, return_inverse=True) + >>> u + array([1, 2, 3, 4, 6]) + >>> indices + array([0, 1, 4, 3, 1, 2, 1]) + >>> u[indices] + array([1, 2, 6, 4, 2, 3, 2]) + + Reconstruct the input values from the unique values and counts: + + >>> a = np.array([1, 2, 6, 4, 2, 3, 2]) + >>> values, counts = np.unique(a, return_counts=True) + >>> values + array([1, 2, 3, 4, 6]) + >>> counts + array([1, 3, 1, 1, 1]) + >>> np.repeat(values, counts) + array([1, 2, 2, 2, 3, 4, 6]) # original order not preserved + + """ + ar = np.asanyarray(ar) + if axis is None or ar.ndim == 1: + if axis is not None: + normalize_axis_index(axis, ar.ndim) + ret = _unique1d(ar, return_index, return_inverse, return_counts, + equal_nan=equal_nan, inverse_shape=ar.shape, axis=None, + sorted=sorted) + return _unpack_tuple(ret) + + # axis was specified and not None + try: + ar = np.moveaxis(ar, axis, 0) + except np.exceptions.AxisError: + # this removes the "axis1" or "axis2" prefix from the error message + raise np.exceptions.AxisError(axis, ar.ndim) from None + inverse_shape = [1] * ar.ndim + inverse_shape[axis] = ar.shape[0] + + # Must reshape to a contiguous 2D array for this to work... + orig_shape, orig_dtype = ar.shape, ar.dtype + ar = ar.reshape(orig_shape[0], np.prod(orig_shape[1:], dtype=np.intp)) + ar = np.ascontiguousarray(ar) + dtype = [(f'f{i}', ar.dtype) for i in range(ar.shape[1])] + + # At this point, `ar` has shape `(n, m)`, and `dtype` is a structured + # data type with `m` fields where each field has the data type of `ar`. + # In the following, we create the array `consolidated`, which has + # shape `(n,)` with data type `dtype`. + try: + if ar.shape[1] > 0: + consolidated = ar.view(dtype) + else: + # If ar.shape[1] == 0, then dtype will be `np.dtype([])`, which is + # a data type with itemsize 0, and the call `ar.view(dtype)` will + # fail. Instead, we'll use `np.empty` to explicitly create the + # array with shape `(len(ar),)`. Because `dtype` in this case has + # itemsize 0, the total size of the result is still 0 bytes. + consolidated = np.empty(len(ar), dtype=dtype) + except TypeError as e: + # There's no good way to do this for object arrays, etc... + msg = 'The axis argument to unique is not supported for dtype {dt}' + raise TypeError(msg.format(dt=ar.dtype)) from e + + def reshape_uniq(uniq): + n = len(uniq) + uniq = uniq.view(orig_dtype) + uniq = uniq.reshape(n, *orig_shape[1:]) + uniq = np.moveaxis(uniq, 0, axis) + return uniq + + output = _unique1d(consolidated, return_index, + return_inverse, return_counts, + equal_nan=equal_nan, inverse_shape=inverse_shape, + axis=axis, sorted=sorted) + output = (reshape_uniq(output[0]),) + output[1:] + return _unpack_tuple(output) + + +def _unique1d(ar, return_index=False, return_inverse=False, + return_counts=False, *, equal_nan=True, inverse_shape=None, + axis=None, sorted=True): + """ + Find the unique elements of an array, ignoring shape. + + Uses a hash table to find the unique elements if possible. + """ + ar = np.asanyarray(ar).flatten() + if len(ar.shape) != 1: + # np.matrix, and maybe some other array subclasses, insist on keeping + # two dimensions for all operations. Coerce to an ndarray in such cases. + ar = np.asarray(ar).flatten() + + optional_indices = return_index or return_inverse + + # masked arrays are not supported yet. + if not optional_indices and not return_counts and not np.ma.is_masked(ar): + # First we convert the array to a numpy array, later we wrap it back + # in case it was a subclass of numpy.ndarray. + conv = _array_converter(ar) + ar_, = conv + + if (hash_unique := _unique_hash(ar_, equal_nan=equal_nan)) \ + is not NotImplemented: + if sorted: + hash_unique.sort() + # We wrap the result back in case it was a subclass of numpy.ndarray. + return (conv.wrap(hash_unique),) + + # If we don't use the hash map, we use the slower sorting method. + if optional_indices: + perm = ar.argsort(kind='mergesort' if return_index else 'quicksort') + aux = ar[perm] + else: + ar.sort() + aux = ar + mask = np.empty(aux.shape, dtype=np.bool) + mask[:1] = True + if (equal_nan and aux.shape[0] > 0 and aux.dtype.kind in "cfmM" and + np.isnan(aux[-1])): + if aux.dtype.kind == "c": # for complex all NaNs are considered equivalent + aux_firstnan = np.searchsorted(np.isnan(aux), True, side='left') + else: + aux_firstnan = np.searchsorted(aux, aux[-1], side='left') + if aux_firstnan > 0: + mask[1:aux_firstnan] = ( + aux[1:aux_firstnan] != aux[:aux_firstnan - 1]) + mask[aux_firstnan] = True + mask[aux_firstnan + 1:] = False + else: + mask[1:] = aux[1:] != aux[:-1] + + ret = (aux[mask],) + if return_index: + ret += (perm[mask],) + if return_inverse: + imask = np.cumsum(mask) - 1 + inv_idx = np.empty(mask.shape, dtype=np.intp) + inv_idx[perm] = imask + ret += (inv_idx.reshape(inverse_shape) if axis is None else inv_idx,) + if return_counts: + idx = np.concatenate(np.nonzero(mask) + ([mask.size],)) + ret += (np.diff(idx),) + return ret + + +# Array API set functions + +class UniqueAllResult(NamedTuple): + values: np.ndarray + indices: np.ndarray + inverse_indices: np.ndarray + counts: np.ndarray + + +class UniqueCountsResult(NamedTuple): + values: np.ndarray + counts: np.ndarray + + +class UniqueInverseResult(NamedTuple): + values: np.ndarray + inverse_indices: np.ndarray + + +def _unique_all_dispatcher(x, /): + return (x,) + + +@array_function_dispatch(_unique_all_dispatcher) +def unique_all(x): + """ + Find the unique elements of an array, and counts, inverse, and indices. + + This function is an Array API compatible alternative to:: + + np.unique(x, return_index=True, return_inverse=True, + return_counts=True, equal_nan=False, sorted=False) + + but returns a namedtuple for easier access to each output. + + .. note:: + This function currently always returns a sorted result, however, + this could change in any NumPy minor release. + + Parameters + ---------- + x : array_like + Input array. It will be flattened if it is not already 1-D. + + Returns + ------- + out : namedtuple + The result containing: + + * values - The unique elements of an input array. + * indices - The first occurring indices for each unique element. + * inverse_indices - The indices from the set of unique elements + that reconstruct `x`. + * counts - The corresponding counts for each unique element. + + See Also + -------- + unique : Find the unique elements of an array. + + Examples + -------- + >>> import numpy as np + >>> x = [1, 1, 2] + >>> uniq = np.unique_all(x) + >>> uniq.values + array([1, 2]) + >>> uniq.indices + array([0, 2]) + >>> uniq.inverse_indices + array([0, 0, 1]) + >>> uniq.counts + array([2, 1]) + """ + result = unique( + x, + return_index=True, + return_inverse=True, + return_counts=True, + equal_nan=False, + ) + return UniqueAllResult(*result) + + +def _unique_counts_dispatcher(x, /): + return (x,) + + +@array_function_dispatch(_unique_counts_dispatcher) +def unique_counts(x): + """ + Find the unique elements and counts of an input array `x`. + + This function is an Array API compatible alternative to:: + + np.unique(x, return_counts=True, equal_nan=False, sorted=False) + + but returns a namedtuple for easier access to each output. + + .. note:: + This function currently always returns a sorted result, however, + this could change in any NumPy minor release. + + Parameters + ---------- + x : array_like + Input array. It will be flattened if it is not already 1-D. + + Returns + ------- + out : namedtuple + The result containing: + + * values - The unique elements of an input array. + * counts - The corresponding counts for each unique element. + + See Also + -------- + unique : Find the unique elements of an array. + + Examples + -------- + >>> import numpy as np + >>> x = [1, 1, 2] + >>> uniq = np.unique_counts(x) + >>> uniq.values + array([1, 2]) + >>> uniq.counts + array([2, 1]) + """ + result = unique( + x, + return_index=False, + return_inverse=False, + return_counts=True, + equal_nan=False, + ) + return UniqueCountsResult(*result) + + +def _unique_inverse_dispatcher(x, /): + return (x,) + + +@array_function_dispatch(_unique_inverse_dispatcher) +def unique_inverse(x): + """ + Find the unique elements of `x` and indices to reconstruct `x`. + + This function is an Array API compatible alternative to:: + + np.unique(x, return_inverse=True, equal_nan=False, sorted=False) + + but returns a namedtuple for easier access to each output. + + .. note:: + This function currently always returns a sorted result, however, + this could change in any NumPy minor release. + + Parameters + ---------- + x : array_like + Input array. It will be flattened if it is not already 1-D. + + Returns + ------- + out : namedtuple + The result containing: + + * values - The unique elements of an input array. + * inverse_indices - The indices from the set of unique elements + that reconstruct `x`. + + See Also + -------- + unique : Find the unique elements of an array. + + Examples + -------- + >>> import numpy as np + >>> x = [1, 1, 2] + >>> uniq = np.unique_inverse(x) + >>> uniq.values + array([1, 2]) + >>> uniq.inverse_indices + array([0, 0, 1]) + """ + result = unique( + x, + return_index=False, + return_inverse=True, + return_counts=False, + equal_nan=False, + ) + return UniqueInverseResult(*result) + + +def _unique_values_dispatcher(x, /): + return (x,) + + +@array_function_dispatch(_unique_values_dispatcher) +def unique_values(x): + """ + Returns the unique elements of an input array `x`. + + This function is an Array API compatible alternative to:: + + np.unique(x, equal_nan=False, sorted=False) + + .. versionchanged:: 2.3 + The algorithm was changed to a faster one that does not rely on + sorting, and hence the results are no longer implicitly sorted. + + Parameters + ---------- + x : array_like + Input array. It will be flattened if it is not already 1-D. + + Returns + ------- + out : ndarray + The unique elements of an input array. + + See Also + -------- + unique : Find the unique elements of an array. + + Examples + -------- + >>> import numpy as np + >>> np.unique_values([1, 1, 2]) + array([1, 2]) # may vary + + """ + return unique( + x, + return_index=False, + return_inverse=False, + return_counts=False, + equal_nan=False, + sorted=False, + ) + + +def _intersect1d_dispatcher( + ar1, ar2, assume_unique=None, return_indices=None): + return (ar1, ar2) + + +@array_function_dispatch(_intersect1d_dispatcher) +def intersect1d(ar1, ar2, assume_unique=False, return_indices=False): + """ + Find the intersection of two arrays. + + Return the sorted, unique values that are in both of the input arrays. + + Parameters + ---------- + ar1, ar2 : array_like + Input arrays. Will be flattened if not already 1D. + assume_unique : bool + If True, the input arrays are both assumed to be unique, which + can speed up the calculation. If True but ``ar1`` or ``ar2`` are not + unique, incorrect results and out-of-bounds indices could result. + Default is False. + return_indices : bool + If True, the indices which correspond to the intersection of the two + arrays are returned. The first instance of a value is used if there are + multiple. Default is False. + + Returns + ------- + intersect1d : ndarray + Sorted 1D array of common and unique elements. + comm1 : ndarray + The indices of the first occurrences of the common values in `ar1`. + Only provided if `return_indices` is True. + comm2 : ndarray + The indices of the first occurrences of the common values in `ar2`. + Only provided if `return_indices` is True. + + Examples + -------- + >>> import numpy as np + >>> np.intersect1d([1, 3, 4, 3], [3, 1, 2, 1]) + array([1, 3]) + + To intersect more than two arrays, use functools.reduce: + + >>> from functools import reduce + >>> reduce(np.intersect1d, ([1, 3, 4, 3], [3, 1, 2, 1], [6, 3, 4, 2])) + array([3]) + + To return the indices of the values common to the input arrays + along with the intersected values: + + >>> x = np.array([1, 1, 2, 3, 4]) + >>> y = np.array([2, 1, 4, 6]) + >>> xy, x_ind, y_ind = np.intersect1d(x, y, return_indices=True) + >>> x_ind, y_ind + (array([0, 2, 4]), array([1, 0, 2])) + >>> xy, x[x_ind], y[y_ind] + (array([1, 2, 4]), array([1, 2, 4]), array([1, 2, 4])) + + """ + ar1 = np.asanyarray(ar1) + ar2 = np.asanyarray(ar2) + + if not assume_unique: + if return_indices: + ar1, ind1 = unique(ar1, return_index=True) + ar2, ind2 = unique(ar2, return_index=True) + else: + ar1 = unique(ar1) + ar2 = unique(ar2) + else: + ar1 = ar1.ravel() + ar2 = ar2.ravel() + + aux = np.concatenate((ar1, ar2)) + if return_indices: + aux_sort_indices = np.argsort(aux, kind='mergesort') + aux = aux[aux_sort_indices] + else: + aux.sort() + + mask = aux[1:] == aux[:-1] + int1d = aux[:-1][mask] + + if return_indices: + ar1_indices = aux_sort_indices[:-1][mask] + ar2_indices = aux_sort_indices[1:][mask] - ar1.size + if not assume_unique: + ar1_indices = ind1[ar1_indices] + ar2_indices = ind2[ar2_indices] + + return int1d, ar1_indices, ar2_indices + else: + return int1d + + +def _setxor1d_dispatcher(ar1, ar2, assume_unique=None): + return (ar1, ar2) + + +@array_function_dispatch(_setxor1d_dispatcher) +def setxor1d(ar1, ar2, assume_unique=False): + """ + Find the set exclusive-or of two arrays. + + Return the sorted, unique values that are in only one (not both) of the + input arrays. + + Parameters + ---------- + ar1, ar2 : array_like + Input arrays. + assume_unique : bool + If True, the input arrays are both assumed to be unique, which + can speed up the calculation. Default is False. + + Returns + ------- + setxor1d : ndarray + Sorted 1D array of unique values that are in only one of the input + arrays. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([1, 2, 3, 2, 4]) + >>> b = np.array([2, 3, 5, 7, 5]) + >>> np.setxor1d(a,b) + array([1, 4, 5, 7]) + + """ + if not assume_unique: + ar1 = unique(ar1) + ar2 = unique(ar2) + + aux = np.concatenate((ar1, ar2), axis=None) + if aux.size == 0: + return aux + + aux.sort() + flag = np.concatenate(([True], aux[1:] != aux[:-1], [True])) + return aux[flag[1:] & flag[:-1]] + + +def _isin(ar1, ar2, assume_unique=False, invert=False, *, kind=None): + # Ravel both arrays, behavior for the first array could be different + ar1 = np.asarray(ar1).ravel() + ar2 = np.asarray(ar2).ravel() + + # Ensure that iteration through object arrays yields size-1 arrays + if ar2.dtype == object: + ar2 = ar2.reshape(-1, 1) + + if kind not in {None, 'sort', 'table'}: + raise ValueError( + f"Invalid kind: '{kind}'. Please use None, 'sort' or 'table'.") + + # Can use the table method if all arrays are integers or boolean: + is_int_arrays = all(ar.dtype.kind in ("u", "i", "b") for ar in (ar1, ar2)) + use_table_method = is_int_arrays and kind in {None, 'table'} + + if use_table_method: + if ar2.size == 0: + if invert: + return np.ones_like(ar1, dtype=bool) + else: + return np.zeros_like(ar1, dtype=bool) + + # Convert booleans to uint8 so we can use the fast integer algorithm + if ar1.dtype == bool: + ar1 = ar1.astype(np.uint8) + if ar2.dtype == bool: + ar2 = ar2.astype(np.uint8) + + ar2_min = int(np.min(ar2)) + ar2_max = int(np.max(ar2)) + + ar2_range = ar2_max - ar2_min + + # Constraints on whether we can actually use the table method: + # 1. Assert memory usage is not too large + below_memory_constraint = ar2_range <= 6 * (ar1.size + ar2.size) + # 2. Check overflows for (ar2 - ar2_min); dtype=ar2.dtype + range_safe_from_overflow = ar2_range <= np.iinfo(ar2.dtype).max + + # Optimal performance is for approximately + # log10(size) > (log10(range) - 2.27) / 0.927. + # However, here we set the requirement that by default + # the intermediate array can only be 6x + # the combined memory allocation of the original + # arrays. See discussion on + # https://github.com/numpy/numpy/pull/12065. + + if ( + range_safe_from_overflow and + (below_memory_constraint or kind == 'table') + ): + + if invert: + outgoing_array = np.ones_like(ar1, dtype=bool) + else: + outgoing_array = np.zeros_like(ar1, dtype=bool) + + # Make elements 1 where the integer exists in ar2 + if invert: + isin_helper_ar = np.ones(ar2_range + 1, dtype=bool) + isin_helper_ar[ar2 - ar2_min] = 0 + else: + isin_helper_ar = np.zeros(ar2_range + 1, dtype=bool) + isin_helper_ar[ar2 - ar2_min] = 1 + + # Mask out elements we know won't work + basic_mask = (ar1 <= ar2_max) & (ar1 >= ar2_min) + in_range_ar1 = ar1[basic_mask] + if in_range_ar1.size == 0: + # Nothing more to do, since all values are out of range. + return outgoing_array + + # Unfortunately, ar2_min can be out of range for `intp` even + # if the calculation result must fit in range (and be positive). + # In that case, use ar2.dtype which must work for all unmasked + # values. + try: + ar2_min = np.array(ar2_min, dtype=np.intp) + dtype = np.intp + except OverflowError: + dtype = ar2.dtype + + out = np.empty_like(in_range_ar1, dtype=np.intp) + outgoing_array[basic_mask] = isin_helper_ar[ + np.subtract(in_range_ar1, ar2_min, dtype=dtype, + out=out, casting="unsafe")] + + return outgoing_array + elif kind == 'table': # not range_safe_from_overflow + raise RuntimeError( + "You have specified kind='table', " + "but the range of values in `ar2` or `ar1` exceed the " + "maximum integer of the datatype. " + "Please set `kind` to None or 'sort'." + ) + elif kind == 'table': + raise ValueError( + "The 'table' method is only " + "supported for boolean or integer arrays. " + "Please select 'sort' or None for kind." + ) + + # Check if one of the arrays may contain arbitrary objects + contains_object = ar1.dtype.hasobject or ar2.dtype.hasobject + + # This code is run when + # a) the first condition is true, making the code significantly faster + # b) the second condition is true (i.e. `ar1` or `ar2` may contain + # arbitrary objects), since then sorting is not guaranteed to work + if len(ar2) < 10 * len(ar1) ** 0.145 or contains_object: + if invert: + mask = np.ones(len(ar1), dtype=bool) + for a in ar2: + mask &= (ar1 != a) + else: + mask = np.zeros(len(ar1), dtype=bool) + for a in ar2: + mask |= (ar1 == a) + return mask + + # Otherwise use sorting + if not assume_unique: + ar1, rev_idx = np.unique(ar1, return_inverse=True) + ar2 = np.unique(ar2) + + ar = np.concatenate((ar1, ar2)) + # We need this to be a stable sort, so always use 'mergesort' + # here. The values from the first array should always come before + # the values from the second array. + order = ar.argsort(kind='mergesort') + sar = ar[order] + if invert: + bool_ar = (sar[1:] != sar[:-1]) + else: + bool_ar = (sar[1:] == sar[:-1]) + flag = np.concatenate((bool_ar, [invert])) + ret = np.empty(ar.shape, dtype=bool) + ret[order] = flag + + if assume_unique: + return ret[:len(ar1)] + else: + return ret[rev_idx] + + +def _isin_dispatcher(element, test_elements, assume_unique=None, invert=None, + *, kind=None): + return (element, test_elements) + + +@array_function_dispatch(_isin_dispatcher) +def isin(element, test_elements, assume_unique=False, invert=False, *, + kind=None): + """ + Calculates ``element in test_elements``, broadcasting over `element` only. + Returns a boolean array of the same shape as `element` that is True + where an element of `element` is in `test_elements` and False otherwise. + + Parameters + ---------- + element : array_like + Input array. + test_elements : array_like + The values against which to test each value of `element`. + This argument is flattened if it is an array or array_like. + See notes for behavior with non-array-like parameters. + assume_unique : bool, optional + If True, the input arrays are both assumed to be unique, which + can speed up the calculation. Default is False. + invert : bool, optional + If True, the values in the returned array are inverted, as if + calculating `element not in test_elements`. Default is False. + ``np.isin(a, b, invert=True)`` is equivalent to (but faster + than) ``np.invert(np.isin(a, b))``. + kind : {None, 'sort', 'table'}, optional + The algorithm to use. This will not affect the final result, + but will affect the speed and memory use. The default, None, + will select automatically based on memory considerations. + + * If 'sort', will use a mergesort-based approach. This will have + a memory usage of roughly 6 times the sum of the sizes of + `element` and `test_elements`, not accounting for size of dtypes. + * If 'table', will use a lookup table approach similar + to a counting sort. This is only available for boolean and + integer arrays. This will have a memory usage of the + size of `element` plus the max-min value of `test_elements`. + `assume_unique` has no effect when the 'table' option is used. + * If None, will automatically choose 'table' if + the required memory allocation is less than or equal to + 6 times the sum of the sizes of `element` and `test_elements`, + otherwise will use 'sort'. This is done to not use + a large amount of memory by default, even though + 'table' may be faster in most cases. If 'table' is chosen, + `assume_unique` will have no effect. + + + Returns + ------- + isin : ndarray, bool + Has the same shape as `element`. The values `element[isin]` + are in `test_elements`. + + Notes + ----- + `isin` is an element-wise function version of the python keyword `in`. + ``isin(a, b)`` is roughly equivalent to + ``np.array([item in b for item in a])`` if `a` and `b` are 1-D sequences. + + `element` and `test_elements` are converted to arrays if they are not + already. If `test_elements` is a set (or other non-sequence collection) + it will be converted to an object array with one element, rather than an + array of the values contained in `test_elements`. This is a consequence + of the `array` constructor's way of handling non-sequence collections. + Converting the set to a list usually gives the desired behavior. + + Using ``kind='table'`` tends to be faster than `kind='sort'` if the + following relationship is true: + ``log10(len(test_elements)) > + (log10(max(test_elements)-min(test_elements)) - 2.27) / 0.927``, + but may use greater memory. The default value for `kind` will + be automatically selected based only on memory usage, so one may + manually set ``kind='table'`` if memory constraints can be relaxed. + + Examples + -------- + >>> import numpy as np + >>> element = 2*np.arange(4).reshape((2, 2)) + >>> element + array([[0, 2], + [4, 6]]) + >>> test_elements = [1, 2, 4, 8] + >>> mask = np.isin(element, test_elements) + >>> mask + array([[False, True], + [ True, False]]) + >>> element[mask] + array([2, 4]) + + The indices of the matched values can be obtained with `nonzero`: + + >>> np.nonzero(mask) + (array([0, 1]), array([1, 0])) + + The test can also be inverted: + + >>> mask = np.isin(element, test_elements, invert=True) + >>> mask + array([[ True, False], + [False, True]]) + >>> element[mask] + array([0, 6]) + + Because of how `array` handles sets, the following does not + work as expected: + + >>> test_set = {1, 2, 4, 8} + >>> np.isin(element, test_set) + array([[False, False], + [False, False]]) + + Casting the set to a list gives the expected result: + + >>> np.isin(element, list(test_set)) + array([[False, True], + [ True, False]]) + """ + element = np.asarray(element) + return _isin(element, test_elements, assume_unique=assume_unique, + invert=invert, kind=kind).reshape(element.shape) + + +def _union1d_dispatcher(ar1, ar2): + return (ar1, ar2) + + +@array_function_dispatch(_union1d_dispatcher) +def union1d(ar1, ar2): + """ + Find the union of two arrays. + + Return the unique, sorted array of values that are in either of the two + input arrays. + + Parameters + ---------- + ar1, ar2 : array_like + Input arrays. They are flattened if they are not already 1D. + + Returns + ------- + union1d : ndarray + Unique, sorted union of the input arrays. + + Examples + -------- + >>> import numpy as np + >>> np.union1d([-1, 0, 1], [-2, 0, 2]) + array([-2, -1, 0, 1, 2]) + + To find the union of more than two arrays, use functools.reduce: + + >>> from functools import reduce + >>> reduce(np.union1d, ([1, 3, 4, 3], [3, 1, 2, 1], [6, 3, 4, 2])) + array([1, 2, 3, 4, 6]) + """ + return unique(np.concatenate((ar1, ar2), axis=None)) + + +def _setdiff1d_dispatcher(ar1, ar2, assume_unique=None): + return (ar1, ar2) + + +@array_function_dispatch(_setdiff1d_dispatcher) +def setdiff1d(ar1, ar2, assume_unique=False): + """ + Find the set difference of two arrays. + + Return the unique values in `ar1` that are not in `ar2`. + + Parameters + ---------- + ar1 : array_like + Input array. + ar2 : array_like + Input comparison array. + assume_unique : bool + If True, the input arrays are both assumed to be unique, which + can speed up the calculation. Default is False. + + Returns + ------- + setdiff1d : ndarray + 1D array of values in `ar1` that are not in `ar2`. The result + is sorted when `assume_unique=False`, but otherwise only sorted + if the input is sorted. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([1, 2, 3, 2, 4, 1]) + >>> b = np.array([3, 4, 5, 6]) + >>> np.setdiff1d(a, b) + array([1, 2]) + + """ + if assume_unique: + ar1 = np.asarray(ar1).ravel() + else: + ar1 = unique(ar1) + ar2 = unique(ar2) + return ar1[_isin(ar1, ar2, assume_unique=True, invert=True)] diff --git a/local_packages/numpy/lib/_arraysetops_impl.pyi b/local_packages/numpy/lib/_arraysetops_impl.pyi new file mode 100644 index 0000000..da687da --- /dev/null +++ b/local_packages/numpy/lib/_arraysetops_impl.pyi @@ -0,0 +1,462 @@ +from typing import ( + Any, + Generic, + Literal as L, + NamedTuple, + SupportsIndex, + TypeAlias, + overload, +) +from typing_extensions import TypeVar + +import numpy as np +from numpy._typing import ( + ArrayLike, + NDArray, + _ArrayLike, + _ArrayLikeBool_co, + _ArrayLikeNumber_co, +) + +__all__ = [ + "ediff1d", + "intersect1d", + "isin", + "setdiff1d", + "setxor1d", + "union1d", + "unique", + "unique_all", + "unique_counts", + "unique_inverse", + "unique_values", +] + +_ScalarT = TypeVar("_ScalarT", bound=np.generic) +_NumericT = TypeVar("_NumericT", bound=np.number | np.timedelta64 | np.object_) + +# Explicitly set all allowed values to prevent accidental castings to +# abstract dtypes (their common super-type). +# Only relevant if two or more arguments are parametrized, (e.g. `setdiff1d`) +# which could result in, for example, `int64` and `float64`producing a +# `number[_64Bit]` array +_EitherSCT = TypeVar( + "_EitherSCT", + np.bool, + np.int8, np.int16, np.int32, np.int64, np.intp, + np.uint8, np.uint16, np.uint32, np.uint64, np.uintp, + np.float16, np.float32, np.float64, np.longdouble, + np.complex64, np.complex128, np.clongdouble, + np.timedelta64, np.datetime64, + np.bytes_, np.str_, np.void, np.object_, + np.integer, np.floating, np.complexfloating, np.character, +) # fmt: skip + +_AnyArray: TypeAlias = NDArray[Any] +_IntArray: TypeAlias = NDArray[np.intp] + +### + +class UniqueAllResult(NamedTuple, Generic[_ScalarT]): + values: NDArray[_ScalarT] + indices: _IntArray + inverse_indices: _IntArray + counts: _IntArray + +class UniqueCountsResult(NamedTuple, Generic[_ScalarT]): + values: NDArray[_ScalarT] + counts: _IntArray + +class UniqueInverseResult(NamedTuple, Generic[_ScalarT]): + values: NDArray[_ScalarT] + inverse_indices: _IntArray + +# +@overload +def ediff1d( + ary: _ArrayLikeBool_co, + to_end: ArrayLike | None = None, + to_begin: ArrayLike | None = None, +) -> NDArray[np.int8]: ... +@overload +def ediff1d( + ary: _ArrayLike[_NumericT], + to_end: ArrayLike | None = None, + to_begin: ArrayLike | None = None, +) -> NDArray[_NumericT]: ... +@overload +def ediff1d( + ary: _ArrayLike[np.datetime64[Any]], + to_end: ArrayLike | None = None, + to_begin: ArrayLike | None = None, +) -> NDArray[np.timedelta64]: ... +@overload +def ediff1d( + ary: _ArrayLikeNumber_co, + to_end: ArrayLike | None = None, + to_begin: ArrayLike | None = None, +) -> _AnyArray: ... + +# +@overload # known scalar-type, FFF +def unique( + ar: _ArrayLike[_ScalarT], + return_index: L[False] = False, + return_inverse: L[False] = False, + return_counts: L[False] = False, + axis: SupportsIndex | None = None, + *, + equal_nan: bool = True, + sorted: bool = True, +) -> NDArray[_ScalarT]: ... +@overload # unknown scalar-type, FFF +def unique( + ar: ArrayLike, + return_index: L[False] = False, + return_inverse: L[False] = False, + return_counts: L[False] = False, + axis: SupportsIndex | None = None, + *, + equal_nan: bool = True, + sorted: bool = True, +) -> _AnyArray: ... +@overload # known scalar-type, TFF +def unique( + ar: _ArrayLike[_ScalarT], + return_index: L[True], + return_inverse: L[False] = False, + return_counts: L[False] = False, + axis: SupportsIndex | None = None, + *, + equal_nan: bool = True, + sorted: bool = True, +) -> tuple[NDArray[_ScalarT], _IntArray]: ... +@overload # unknown scalar-type, TFF +def unique( + ar: ArrayLike, + return_index: L[True], + return_inverse: L[False] = False, + return_counts: L[False] = False, + axis: SupportsIndex | None = None, + *, + equal_nan: bool = True, + sorted: bool = True, +) -> tuple[_AnyArray, _IntArray]: ... +@overload # known scalar-type, FTF (positional) +def unique( + ar: _ArrayLike[_ScalarT], + return_index: L[False], + return_inverse: L[True], + return_counts: L[False] = False, + axis: SupportsIndex | None = None, + *, + equal_nan: bool = True, + sorted: bool = True, +) -> tuple[NDArray[_ScalarT], _IntArray]: ... +@overload # known scalar-type, FTF (keyword) +def unique( + ar: _ArrayLike[_ScalarT], + return_index: L[False] = False, + *, + return_inverse: L[True], + return_counts: L[False] = False, + axis: SupportsIndex | None = None, + equal_nan: bool = True, + sorted: bool = True, +) -> tuple[NDArray[_ScalarT], _IntArray]: ... +@overload # unknown scalar-type, FTF (positional) +def unique( + ar: ArrayLike, + return_index: L[False], + return_inverse: L[True], + return_counts: L[False] = False, + axis: SupportsIndex | None = None, + *, + equal_nan: bool = True, + sorted: bool = True, +) -> tuple[_AnyArray, _IntArray]: ... +@overload # unknown scalar-type, FTF (keyword) +def unique( + ar: ArrayLike, + return_index: L[False] = False, + *, + return_inverse: L[True], + return_counts: L[False] = False, + axis: SupportsIndex | None = None, + equal_nan: bool = True, + sorted: bool = True, +) -> tuple[_AnyArray, _IntArray]: ... +@overload # known scalar-type, FFT (positional) +def unique( + ar: _ArrayLike[_ScalarT], + return_index: L[False], + return_inverse: L[False], + return_counts: L[True], + axis: SupportsIndex | None = None, + *, + equal_nan: bool = True, + sorted: bool = True, +) -> tuple[NDArray[_ScalarT], _IntArray]: ... +@overload # known scalar-type, FFT (keyword) +def unique( + ar: _ArrayLike[_ScalarT], + return_index: L[False] = False, + return_inverse: L[False] = False, + *, + return_counts: L[True], + axis: SupportsIndex | None = None, + equal_nan: bool = True, + sorted: bool = True, +) -> tuple[NDArray[_ScalarT], _IntArray]: ... +@overload # unknown scalar-type, FFT (positional) +def unique( + ar: ArrayLike, + return_index: L[False], + return_inverse: L[False], + return_counts: L[True], + axis: SupportsIndex | None = None, + *, + equal_nan: bool = True, + sorted: bool = True, +) -> tuple[_AnyArray, _IntArray]: ... +@overload # unknown scalar-type, FFT (keyword) +def unique( + ar: ArrayLike, + return_index: L[False] = False, + return_inverse: L[False] = False, + *, + return_counts: L[True], + axis: SupportsIndex | None = None, + equal_nan: bool = True, + sorted: bool = True, +) -> tuple[_AnyArray, _IntArray]: ... +@overload # known scalar-type, TTF +def unique( + ar: _ArrayLike[_ScalarT], + return_index: L[True], + return_inverse: L[True], + return_counts: L[False] = False, + axis: SupportsIndex | None = None, + *, + equal_nan: bool = True, + sorted: bool = True, +) -> tuple[NDArray[_ScalarT], _IntArray, _IntArray]: ... +@overload # unknown scalar-type, TTF +def unique( + ar: ArrayLike, + return_index: L[True], + return_inverse: L[True], + return_counts: L[False] = False, + axis: SupportsIndex | None = None, + *, + equal_nan: bool = True, + sorted: bool = True, +) -> tuple[_AnyArray, _IntArray, _IntArray]: ... +@overload # known scalar-type, TFT (positional) +def unique( + ar: _ArrayLike[_ScalarT], + return_index: L[True], + return_inverse: L[False], + return_counts: L[True], + axis: SupportsIndex | None = None, + *, + equal_nan: bool = True, + sorted: bool = True, +) -> tuple[NDArray[_ScalarT], _IntArray, _IntArray]: ... +@overload # known scalar-type, TFT (keyword) +def unique( + ar: _ArrayLike[_ScalarT], + return_index: L[True], + return_inverse: L[False] = False, + *, + return_counts: L[True], + axis: SupportsIndex | None = None, + equal_nan: bool = True, + sorted: bool = True, +) -> tuple[NDArray[_ScalarT], _IntArray, _IntArray]: ... +@overload # unknown scalar-type, TFT (positional) +def unique( + ar: ArrayLike, + return_index: L[True], + return_inverse: L[False], + return_counts: L[True], + axis: SupportsIndex | None = None, + *, + equal_nan: bool = True, + sorted: bool = True, +) -> tuple[_AnyArray, _IntArray, _IntArray]: ... +@overload # unknown scalar-type, TFT (keyword) +def unique( + ar: ArrayLike, + return_index: L[True], + return_inverse: L[False] = False, + *, + return_counts: L[True], + axis: SupportsIndex | None = None, + equal_nan: bool = True, + sorted: bool = True, +) -> tuple[_AnyArray, _IntArray, _IntArray]: ... +@overload # known scalar-type, FTT (positional) +def unique( + ar: _ArrayLike[_ScalarT], + return_index: L[False], + return_inverse: L[True], + return_counts: L[True], + axis: SupportsIndex | None = None, + *, + equal_nan: bool = True, + sorted: bool = True, +) -> tuple[NDArray[_ScalarT], _IntArray, _IntArray]: ... +@overload # known scalar-type, FTT (keyword) +def unique( + ar: _ArrayLike[_ScalarT], + return_index: L[False] = False, + *, + return_inverse: L[True], + return_counts: L[True], + axis: SupportsIndex | None = None, + equal_nan: bool = True, + sorted: bool = True, +) -> tuple[NDArray[_ScalarT], _IntArray, _IntArray]: ... +@overload # unknown scalar-type, FTT (positional) +def unique( + ar: ArrayLike, + return_index: L[False], + return_inverse: L[True], + return_counts: L[True], + axis: SupportsIndex | None = None, + *, + equal_nan: bool = True, + sorted: bool = True, +) -> tuple[_AnyArray, _IntArray, _IntArray]: ... +@overload # unknown scalar-type, FTT (keyword) +def unique( + ar: ArrayLike, + return_index: L[False] = False, + *, + return_inverse: L[True], + return_counts: L[True], + axis: SupportsIndex | None = None, + equal_nan: bool = True, + sorted: bool = True, +) -> tuple[_AnyArray, _IntArray, _IntArray]: ... +@overload # known scalar-type, TTT +def unique( + ar: _ArrayLike[_ScalarT], + return_index: L[True], + return_inverse: L[True], + return_counts: L[True], + axis: SupportsIndex | None = None, + *, + equal_nan: bool = True, + sorted: bool = True, +) -> tuple[NDArray[_ScalarT], _IntArray, _IntArray, _IntArray]: ... +@overload # unknown scalar-type, TTT +def unique( + ar: ArrayLike, + return_index: L[True], + return_inverse: L[True], + return_counts: L[True], + axis: SupportsIndex | None = None, + *, + equal_nan: bool = True, + sorted: bool = True, +) -> tuple[_AnyArray, _IntArray, _IntArray, _IntArray]: ... + +# +@overload +def unique_all(x: _ArrayLike[_ScalarT]) -> UniqueAllResult[_ScalarT]: ... +@overload +def unique_all(x: ArrayLike) -> UniqueAllResult[Any]: ... + +# +@overload +def unique_counts(x: _ArrayLike[_ScalarT]) -> UniqueCountsResult[_ScalarT]: ... +@overload +def unique_counts(x: ArrayLike) -> UniqueCountsResult[Any]: ... + +# +@overload +def unique_inverse(x: _ArrayLike[_ScalarT]) -> UniqueInverseResult[_ScalarT]: ... +@overload +def unique_inverse(x: ArrayLike) -> UniqueInverseResult[Any]: ... + +# +@overload +def unique_values(x: _ArrayLike[_ScalarT]) -> NDArray[_ScalarT]: ... +@overload +def unique_values(x: ArrayLike) -> _AnyArray: ... + +# +@overload # known scalar-type, return_indices=False (default) +def intersect1d( + ar1: _ArrayLike[_EitherSCT], + ar2: _ArrayLike[_EitherSCT], + assume_unique: bool = False, + return_indices: L[False] = False, +) -> NDArray[_EitherSCT]: ... +@overload # known scalar-type, return_indices=True (positional) +def intersect1d( + ar1: _ArrayLike[_EitherSCT], + ar2: _ArrayLike[_EitherSCT], + assume_unique: bool, + return_indices: L[True], +) -> tuple[NDArray[_EitherSCT], _IntArray, _IntArray]: ... +@overload # known scalar-type, return_indices=True (keyword) +def intersect1d( + ar1: _ArrayLike[_EitherSCT], + ar2: _ArrayLike[_EitherSCT], + assume_unique: bool = False, + *, + return_indices: L[True], +) -> tuple[NDArray[_EitherSCT], _IntArray, _IntArray]: ... +@overload # unknown scalar-type, return_indices=False (default) +def intersect1d( + ar1: ArrayLike, + ar2: ArrayLike, + assume_unique: bool = False, + return_indices: L[False] = False, +) -> _AnyArray: ... +@overload # unknown scalar-type, return_indices=True (positional) +def intersect1d( + ar1: ArrayLike, + ar2: ArrayLike, + assume_unique: bool, + return_indices: L[True], +) -> tuple[_AnyArray, _IntArray, _IntArray]: ... +@overload # unknown scalar-type, return_indices=True (keyword) +def intersect1d( + ar1: ArrayLike, + ar2: ArrayLike, + assume_unique: bool = False, + *, + return_indices: L[True], +) -> tuple[_AnyArray, _IntArray, _IntArray]: ... + +# +@overload +def setxor1d(ar1: _ArrayLike[_EitherSCT], ar2: _ArrayLike[_EitherSCT], assume_unique: bool = False) -> NDArray[_EitherSCT]: ... +@overload +def setxor1d(ar1: ArrayLike, ar2: ArrayLike, assume_unique: bool = False) -> _AnyArray: ... + +# +@overload +def union1d(ar1: _ArrayLike[_EitherSCT], ar2: _ArrayLike[_EitherSCT]) -> NDArray[_EitherSCT]: ... +@overload +def union1d(ar1: ArrayLike, ar2: ArrayLike) -> _AnyArray: ... + +# +@overload +def setdiff1d(ar1: _ArrayLike[_EitherSCT], ar2: _ArrayLike[_EitherSCT], assume_unique: bool = False) -> NDArray[_EitherSCT]: ... +@overload +def setdiff1d(ar1: ArrayLike, ar2: ArrayLike, assume_unique: bool = False) -> _AnyArray: ... + +# +def isin( + element: ArrayLike, + test_elements: ArrayLike, + assume_unique: bool = False, + invert: bool = False, + *, + kind: L["sort", "table"] | None = None, +) -> NDArray[np.bool]: ... diff --git a/local_packages/numpy/lib/_arrayterator_impl.py b/local_packages/numpy/lib/_arrayterator_impl.py new file mode 100644 index 0000000..5f7c5fc --- /dev/null +++ b/local_packages/numpy/lib/_arrayterator_impl.py @@ -0,0 +1,224 @@ +""" +A buffered iterator for big arrays. + +This module solves the problem of iterating over a big file-based array +without having to read it into memory. The `Arrayterator` class wraps +an array object, and when iterated it will return sub-arrays with at most +a user-specified number of elements. + +""" +from functools import reduce +from operator import mul + +__all__ = ['Arrayterator'] + + +class Arrayterator: + """ + Buffered iterator for big arrays. + + `Arrayterator` creates a buffered iterator for reading big arrays in small + contiguous blocks. The class is useful for objects stored in the + file system. It allows iteration over the object *without* reading + everything in memory; instead, small blocks are read and iterated over. + + `Arrayterator` can be used with any object that supports multidimensional + slices. This includes NumPy arrays, but also variables from + Scientific.IO.NetCDF or pynetcdf for example. + + Parameters + ---------- + var : array_like + The object to iterate over. + buf_size : int, optional + The buffer size. If `buf_size` is supplied, the maximum amount of + data that will be read into memory is `buf_size` elements. + Default is None, which will read as many element as possible + into memory. + + Attributes + ---------- + var + buf_size + start + stop + step + shape + flat + + See Also + -------- + numpy.ndenumerate : Multidimensional array iterator. + numpy.flatiter : Flat array iterator. + numpy.memmap : Create a memory-map to an array stored + in a binary file on disk. + + Notes + ----- + The algorithm works by first finding a "running dimension", along which + the blocks will be extracted. Given an array of dimensions + ``(d1, d2, ..., dn)``, e.g. if `buf_size` is smaller than ``d1``, the + first dimension will be used. If, on the other hand, + ``d1 < buf_size < d1*d2`` the second dimension will be used, and so on. + Blocks are extracted along this dimension, and when the last block is + returned the process continues from the next dimension, until all + elements have been read. + + Examples + -------- + >>> import numpy as np + >>> a = np.arange(3 * 4 * 5 * 6).reshape(3, 4, 5, 6) + >>> a_itor = np.lib.Arrayterator(a, 2) + >>> a_itor.shape + (3, 4, 5, 6) + + Now we can iterate over ``a_itor``, and it will return arrays of size + two. Since `buf_size` was smaller than any dimension, the first + dimension will be iterated over first: + + >>> for subarr in a_itor: + ... if not subarr.all(): + ... print(subarr, subarr.shape) # doctest: +SKIP + >>> # [[[[0 1]]]] (1, 1, 1, 2) + + """ + + __module__ = "numpy.lib" + + def __init__(self, var, buf_size=None): + self.var = var + self.buf_size = buf_size + + self.start = [0 for dim in var.shape] + self.stop = list(var.shape) + self.step = [1 for dim in var.shape] + + def __getattr__(self, attr): + return getattr(self.var, attr) + + def __getitem__(self, index): + """ + Return a new arrayterator. + + """ + # Fix index, handling ellipsis and incomplete slices. + if not isinstance(index, tuple): + index = (index,) + fixed = [] + length, dims = len(index), self.ndim + for slice_ in index: + if slice_ is Ellipsis: + fixed.extend([slice(None)] * (dims - length + 1)) + length = len(fixed) + elif isinstance(slice_, int): + fixed.append(slice(slice_, slice_ + 1, 1)) + else: + fixed.append(slice_) + index = tuple(fixed) + if len(index) < dims: + index += (slice(None),) * (dims - len(index)) + + # Return a new arrayterator object. + out = self.__class__(self.var, self.buf_size) + for i, (start, stop, step, slice_) in enumerate( + zip(self.start, self.stop, self.step, index)): + out.start[i] = start + (slice_.start or 0) + out.step[i] = step * (slice_.step or 1) + out.stop[i] = start + (slice_.stop or stop - start) + out.stop[i] = min(stop, out.stop[i]) + return out + + def __array__(self, dtype=None, copy=None): + """ + Return corresponding data. + + """ + slice_ = tuple(slice(*t) for t in zip( + self.start, self.stop, self.step)) + return self.var[slice_] + + @property + def flat(self): + """ + A 1-D flat iterator for Arrayterator objects. + + This iterator returns elements of the array to be iterated over in + `~lib.Arrayterator` one by one. + It is similar to `flatiter`. + + See Also + -------- + lib.Arrayterator + flatiter + + Examples + -------- + >>> a = np.arange(3 * 4 * 5 * 6).reshape(3, 4, 5, 6) + >>> a_itor = np.lib.Arrayterator(a, 2) + + >>> for subarr in a_itor.flat: + ... if not subarr: + ... print(subarr, type(subarr)) + ... + 0 + + """ + for block in self: + yield from block.flat + + @property + def shape(self): + """ + The shape of the array to be iterated over. + + For an example, see `Arrayterator`. + + """ + return tuple(((stop - start - 1) // step + 1) for start, stop, step in + zip(self.start, self.stop, self.step)) + + def __iter__(self): + # Skip arrays with degenerate dimensions + if [dim for dim in self.shape if dim <= 0]: + return + + start = self.start[:] + stop = self.stop[:] + step = self.step[:] + ndims = self.var.ndim + + while True: + count = self.buf_size or reduce(mul, self.shape) + + # iterate over each dimension, looking for the + # running dimension (ie, the dimension along which + # the blocks will be built from) + rundim = 0 + for i in range(ndims - 1, -1, -1): + # if count is zero we ran out of elements to read + # along higher dimensions, so we read only a single position + if count == 0: + stop[i] = start[i] + 1 + elif count <= self.shape[i]: + # limit along this dimension + stop[i] = start[i] + count * step[i] + rundim = i + else: + # read everything along this dimension + stop[i] = self.stop[i] + stop[i] = min(self.stop[i], stop[i]) + count = count // self.shape[i] + + # yield a block + slice_ = tuple(slice(*t) for t in zip(start, stop, step)) + yield self.var[slice_] + + # Update start position, taking care of overflow to + # other dimensions + start[rundim] = stop[rundim] # start where we stopped + for i in range(ndims - 1, 0, -1): + if start[i] >= self.stop[i]: + start[i] = self.start[i] + start[i - 1] += self.step[i - 1] + if start[0] >= self.stop[0]: + return diff --git a/local_packages/numpy/lib/_arrayterator_impl.pyi b/local_packages/numpy/lib/_arrayterator_impl.pyi new file mode 100644 index 0000000..5fd589a --- /dev/null +++ b/local_packages/numpy/lib/_arrayterator_impl.pyi @@ -0,0 +1,45 @@ +# pyright: reportIncompatibleMethodOverride=false + +from collections.abc import Generator +from types import EllipsisType +from typing import Any, Final, TypeAlias, overload +from typing_extensions import TypeVar + +import numpy as np +from numpy._typing import _AnyShape, _Shape + +__all__ = ["Arrayterator"] + +_ShapeT_co = TypeVar("_ShapeT_co", bound=_Shape, default=_AnyShape, covariant=True) +_DTypeT = TypeVar("_DTypeT", bound=np.dtype) +_DTypeT_co = TypeVar("_DTypeT_co", bound=np.dtype, default=np.dtype, covariant=True) +_ScalarT = TypeVar("_ScalarT", bound=np.generic) + +_AnyIndex: TypeAlias = EllipsisType | int | slice | tuple[EllipsisType | int | slice, ...] + +# NOTE: In reality `Arrayterator` does not actually inherit from `ndarray`, +# but its ``__getattr__` method does wrap around the former and thus has +# access to all its methods + +class Arrayterator(np.ndarray[_ShapeT_co, _DTypeT_co]): + var: np.ndarray[_ShapeT_co, _DTypeT_co] # type: ignore[assignment] + buf_size: Final[int | None] + start: Final[list[int]] + stop: Final[list[int]] + step: Final[list[int]] + + @property # type: ignore[misc] + def shape(self) -> _ShapeT_co: ... + @property + def flat(self: Arrayterator[Any, np.dtype[_ScalarT]]) -> Generator[_ScalarT]: ... # type: ignore[override] + + # + def __init__(self, /, var: np.ndarray[_ShapeT_co, _DTypeT_co], buf_size: int | None = None) -> None: ... + def __getitem__(self, index: _AnyIndex, /) -> Arrayterator[_AnyShape, _DTypeT_co]: ... # type: ignore[override] + def __iter__(self) -> Generator[np.ndarray[_AnyShape, _DTypeT_co]]: ... + + # + @overload # type: ignore[override] + def __array__(self, /, dtype: None = None, copy: bool | None = None) -> np.ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __array__(self, /, dtype: _DTypeT, copy: bool | None = None) -> np.ndarray[_ShapeT_co, _DTypeT]: ... diff --git a/local_packages/numpy/lib/_datasource.py b/local_packages/numpy/lib/_datasource.py new file mode 100644 index 0000000..72398c5 --- /dev/null +++ b/local_packages/numpy/lib/_datasource.py @@ -0,0 +1,700 @@ +"""A file interface for handling local and remote data files. + +The goal of datasource is to abstract some of the file system operations +when dealing with data files so the researcher doesn't have to know all the +low-level details. Through datasource, a researcher can obtain and use a +file with one function call, regardless of location of the file. + +DataSource is meant to augment standard python libraries, not replace them. +It should work seamlessly with standard file IO operations and the os +module. + +DataSource files can originate locally or remotely: + +- local files : '/home/guido/src/local/data.txt' +- URLs (http, ftp, ...) : 'http://www.scipy.org/not/real/data.txt' + +DataSource files can also be compressed or uncompressed. Currently only +gzip, bz2 and xz are supported. + +Example:: + + >>> # Create a DataSource, use os.curdir (default) for local storage. + >>> from numpy import DataSource + >>> ds = DataSource() + >>> + >>> # Open a remote file. + >>> # DataSource downloads the file, stores it locally in: + >>> # './www.google.com/index.html' + >>> # opens the file and returns a file object. + >>> fp = ds.open('http://www.google.com/') # doctest: +SKIP + >>> + >>> # Use the file as you normally would + >>> fp.read() # doctest: +SKIP + >>> fp.close() # doctest: +SKIP + +""" +import os + +from numpy._utils import set_module + +_open = open + + +def _check_mode(mode, encoding, newline): + """Check mode and that encoding and newline are compatible. + + Parameters + ---------- + mode : str + File open mode. + encoding : str + File encoding. + newline : str + Newline for text files. + + """ + if "t" in mode: + if "b" in mode: + raise ValueError(f"Invalid mode: {mode!r}") + else: + if encoding is not None: + raise ValueError("Argument 'encoding' not supported in binary mode") + if newline is not None: + raise ValueError("Argument 'newline' not supported in binary mode") + + +# Using a class instead of a module-level dictionary +# to reduce the initial 'import numpy' overhead by +# deferring the import of lzma, bz2 and gzip until needed + +# TODO: .zip support, .tar support? +class _FileOpeners: + """ + Container for different methods to open (un-)compressed files. + + `_FileOpeners` contains a dictionary that holds one method for each + supported file format. Attribute lookup is implemented in such a way + that an instance of `_FileOpeners` itself can be indexed with the keys + of that dictionary. Currently uncompressed files as well as files + compressed with ``gzip``, ``bz2`` or ``xz`` compression are supported. + + Notes + ----- + `_file_openers`, an instance of `_FileOpeners`, is made available for + use in the `_datasource` module. + + Examples + -------- + >>> import gzip + >>> np.lib._datasource._file_openers.keys() + [None, '.bz2', '.gz', '.xz', '.lzma'] + >>> np.lib._datasource._file_openers['.gz'] is gzip.open + True + + """ + + def __init__(self): + self._loaded = False + self._file_openers = {None: open} + + def _load(self): + if self._loaded: + return + + try: + import bz2 + self._file_openers[".bz2"] = bz2.open + except ImportError: + pass + + try: + import gzip + self._file_openers[".gz"] = gzip.open + except ImportError: + pass + + try: + import lzma + self._file_openers[".xz"] = lzma.open + self._file_openers[".lzma"] = lzma.open + except (ImportError, AttributeError): + # There are incompatible backports of lzma that do not have the + # lzma.open attribute, so catch that as well as ImportError. + pass + + self._loaded = True + + def keys(self): + """ + Return the keys of currently supported file openers. + + Parameters + ---------- + None + + Returns + ------- + keys : list + The keys are None for uncompressed files and the file extension + strings (i.e. ``'.gz'``, ``'.xz'``) for supported compression + methods. + + """ + self._load() + return list(self._file_openers.keys()) + + def __getitem__(self, key): + self._load() + return self._file_openers[key] + + +_file_openers = _FileOpeners() + +def open(path, mode='r', destpath=os.curdir, encoding=None, newline=None): + """ + Open `path` with `mode` and return the file object. + + If ``path`` is an URL, it will be downloaded, stored in the + `DataSource` `destpath` directory and opened from there. + + Parameters + ---------- + path : str or pathlib.Path + Local file path or URL to open. + mode : str, optional + Mode to open `path`. Mode 'r' for reading, 'w' for writing, 'a' to + append. Available modes depend on the type of object specified by + path. Default is 'r'. + destpath : str, optional + Path to the directory where the source file gets downloaded to for + use. If `destpath` is None, a temporary directory will be created. + The default path is the current directory. + encoding : {None, str}, optional + Open text file with given encoding. The default encoding will be + what `open` uses. + newline : {None, str}, optional + Newline to use when reading text file. + + Returns + ------- + out : file object + The opened file. + + Notes + ----- + This is a convenience function that instantiates a `DataSource` and + returns the file object from ``DataSource.open(path)``. + + """ + + ds = DataSource(destpath) + return ds.open(path, mode, encoding=encoding, newline=newline) + + +@set_module('numpy.lib.npyio') +class DataSource: + """ + DataSource(destpath='.') + + A generic data source file (file, http, ftp, ...). + + DataSources can be local files or remote files/URLs. The files may + also be compressed or uncompressed. DataSource hides some of the + low-level details of downloading the file, allowing you to simply pass + in a valid file path (or URL) and obtain a file object. + + Parameters + ---------- + destpath : str or None, optional + Path to the directory where the source file gets downloaded to for + use. If `destpath` is None, a temporary directory will be created. + The default path is the current directory. + + Notes + ----- + URLs require a scheme string (``http://``) to be used, without it they + will fail:: + + >>> repos = np.lib.npyio.DataSource() + >>> repos.exists('www.google.com/index.html') + False + >>> repos.exists('http://www.google.com/index.html') + True + + Temporary directories are deleted when the DataSource is deleted. + + Examples + -------- + :: + + >>> ds = np.lib.npyio.DataSource('/home/guido') + >>> urlname = 'http://www.google.com/' + >>> gfile = ds.open('http://www.google.com/') + >>> ds.abspath(urlname) + '/home/guido/www.google.com/index.html' + + >>> ds = np.lib.npyio.DataSource(None) # use with temporary file + >>> ds.open('/home/guido/foobar.txt') + + >>> ds.abspath('/home/guido/foobar.txt') + '/tmp/.../home/guido/foobar.txt' + + """ + + def __init__(self, destpath=os.curdir): + """Create a DataSource with a local path at destpath.""" + if destpath: + self._destpath = os.path.abspath(destpath) + self._istmpdest = False + else: + import tempfile # deferring import to improve startup time + self._destpath = tempfile.mkdtemp() + self._istmpdest = True + + def __del__(self): + # Remove temp directories + if hasattr(self, '_istmpdest') and self._istmpdest: + import shutil + + shutil.rmtree(self._destpath) + + def _iszip(self, filename): + """Test if the filename is a zip file by looking at the file extension. + + """ + fname, ext = os.path.splitext(filename) + return ext in _file_openers.keys() + + def _iswritemode(self, mode): + """Test if the given mode will open a file for writing.""" + + # Currently only used to test the bz2 files. + _writemodes = ("w", "+") + return any(c in _writemodes for c in mode) + + def _splitzipext(self, filename): + """Split zip extension from filename and return filename. + + Returns + ------- + base, zip_ext : {tuple} + + """ + + if self._iszip(filename): + return os.path.splitext(filename) + else: + return filename, None + + def _possible_names(self, filename): + """Return a tuple containing compressed filename variations.""" + names = [filename] + if not self._iszip(filename): + for zipext in _file_openers.keys(): + if zipext: + names.append(filename + zipext) + return names + + def _isurl(self, path): + """Test if path is a net location. Tests the scheme and netloc.""" + + # We do this here to reduce the 'import numpy' initial import time. + from urllib.parse import urlparse + + # BUG : URLs require a scheme string ('http://') to be used. + # www.google.com will fail. + # Should we prepend the scheme for those that don't have it and + # test that also? Similar to the way we append .gz and test for + # for compressed versions of files. + + scheme, netloc, upath, uparams, uquery, ufrag = urlparse(path) + return bool(scheme and netloc) + + def _cache(self, path): + """Cache the file specified by path. + + Creates a copy of the file in the datasource cache. + + """ + # We import these here because importing them is slow and + # a significant fraction of numpy's total import time. + import shutil + from urllib.request import urlopen + + upath = self.abspath(path) + + # ensure directory exists + if not os.path.exists(os.path.dirname(upath)): + os.makedirs(os.path.dirname(upath)) + + # TODO: Doesn't handle compressed files! + if self._isurl(path): + with urlopen(path) as openedurl: + with _open(upath, 'wb') as f: + shutil.copyfileobj(openedurl, f) + else: + shutil.copyfile(path, upath) + return upath + + def _findfile(self, path): + """Searches for ``path`` and returns full path if found. + + If path is an URL, _findfile will cache a local copy and return the + path to the cached file. If path is a local file, _findfile will + return a path to that local file. + + The search will include possible compressed versions of the file + and return the first occurrence found. + + """ + + # Build list of possible local file paths + if not self._isurl(path): + # Valid local paths + filelist = self._possible_names(path) + # Paths in self._destpath + filelist += self._possible_names(self.abspath(path)) + else: + # Cached URLs in self._destpath + filelist = self._possible_names(self.abspath(path)) + # Remote URLs + filelist = filelist + self._possible_names(path) + + for name in filelist: + if self.exists(name): + if self._isurl(name): + name = self._cache(name) + return name + return None + + def abspath(self, path): + """ + Return absolute path of file in the DataSource directory. + + If `path` is an URL, then `abspath` will return either the location + the file exists locally or the location it would exist when opened + using the `open` method. + + Parameters + ---------- + path : str or pathlib.Path + Can be a local file or a remote URL. + + Returns + ------- + out : str + Complete path, including the `DataSource` destination directory. + + Notes + ----- + The functionality is based on `os.path.abspath`. + + """ + # We do this here to reduce the 'import numpy' initial import time. + from urllib.parse import urlparse + + # TODO: This should be more robust. Handles case where path includes + # the destpath, but not other sub-paths. Failing case: + # path = /home/guido/datafile.txt + # destpath = /home/alex/ + # upath = self.abspath(path) + # upath == '/home/alex/home/guido/datafile.txt' + + # handle case where path includes self._destpath + splitpath = path.split(self._destpath, 2) + if len(splitpath) > 1: + path = splitpath[1] + scheme, netloc, upath, uparams, uquery, ufrag = urlparse(path) + netloc = self._sanitize_relative_path(netloc) + upath = self._sanitize_relative_path(upath) + return os.path.join(self._destpath, netloc, upath) + + def _sanitize_relative_path(self, path): + """Return a sanitised relative path for which + os.path.abspath(os.path.join(base, path)).startswith(base) + """ + last = None + path = os.path.normpath(path) + while path != last: + last = path + # Note: os.path.join treats '/' as os.sep on Windows + path = path.lstrip(os.sep).lstrip('/') + path = path.lstrip(os.pardir).removeprefix('..') + drive, path = os.path.splitdrive(path) # for Windows + return path + + def exists(self, path): + """ + Test if path exists. + + Test if `path` exists as (and in this order): + + - a local file. + - a remote URL that has been downloaded and stored locally in the + `DataSource` directory. + - a remote URL that has not been downloaded, but is valid and + accessible. + + Parameters + ---------- + path : str or pathlib.Path + Can be a local file or a remote URL. + + Returns + ------- + out : bool + True if `path` exists. + + Notes + ----- + When `path` is an URL, `exists` will return True if it's either + stored locally in the `DataSource` directory, or is a valid remote + URL. `DataSource` does not discriminate between the two, the file + is accessible if it exists in either location. + + """ + + # First test for local path + if os.path.exists(path): + return True + + # We import this here because importing urllib is slow and + # a significant fraction of numpy's total import time. + from urllib.error import URLError + from urllib.request import urlopen + + # Test cached url + upath = self.abspath(path) + if os.path.exists(upath): + return True + + # Test remote url + if self._isurl(path): + try: + netfile = urlopen(path) + netfile.close() + del netfile + return True + except URLError: + return False + return False + + def open(self, path, mode='r', encoding=None, newline=None): + """ + Open and return file-like object. + + If `path` is an URL, it will be downloaded, stored in the + `DataSource` directory and opened from there. + + Parameters + ---------- + path : str or pathlib.Path + Local file path or URL to open. + mode : {'r', 'w', 'a'}, optional + Mode to open `path`. Mode 'r' for reading, 'w' for writing, + 'a' to append. Available modes depend on the type of object + specified by `path`. Default is 'r'. + encoding : {None, str}, optional + Open text file with given encoding. The default encoding will be + what `open` uses. + newline : {None, str}, optional + Newline to use when reading text file. + + Returns + ------- + out : file object + File object. + + """ + + # TODO: There is no support for opening a file for writing which + # doesn't exist yet (creating a file). Should there be? + + # TODO: Add a ``subdir`` parameter for specifying the subdirectory + # used to store URLs in self._destpath. + + if self._isurl(path) and self._iswritemode(mode): + raise ValueError("URLs are not writeable") + + # NOTE: _findfile will fail on a new file opened for writing. + found = self._findfile(path) + if found: + _fname, ext = self._splitzipext(found) + if ext == 'bz2': + mode.replace("+", "") + return _file_openers[ext](found, mode=mode, + encoding=encoding, newline=newline) + else: + raise FileNotFoundError(f"{path} not found.") + + +class Repository (DataSource): + """ + Repository(baseurl, destpath='.') + + A data repository where multiple DataSource's share a base + URL/directory. + + `Repository` extends `DataSource` by prepending a base URL (or + directory) to all the files it handles. Use `Repository` when you will + be working with multiple files from one base URL. Initialize + `Repository` with the base URL, then refer to each file by its filename + only. + + Parameters + ---------- + baseurl : str + Path to the local directory or remote location that contains the + data files. + destpath : str or None, optional + Path to the directory where the source file gets downloaded to for + use. If `destpath` is None, a temporary directory will be created. + The default path is the current directory. + + Examples + -------- + To analyze all files in the repository, do something like this + (note: this is not self-contained code):: + + >>> repos = np.lib._datasource.Repository('/home/user/data/dir/') + >>> for filename in filelist: + ... fp = repos.open(filename) + ... fp.analyze() + ... fp.close() + + Similarly you could use a URL for a repository:: + + >>> repos = np.lib._datasource.Repository('http://www.xyz.edu/data') + + """ + + def __init__(self, baseurl, destpath=os.curdir): + """Create a Repository with a shared url or directory of baseurl.""" + DataSource.__init__(self, destpath=destpath) + self._baseurl = baseurl + + def __del__(self): + DataSource.__del__(self) + + def _fullpath(self, path): + """Return complete path for path. Prepends baseurl if necessary.""" + splitpath = path.split(self._baseurl, 2) + if len(splitpath) == 1: + result = os.path.join(self._baseurl, path) + else: + result = path # path contains baseurl already + return result + + def _findfile(self, path): + """Extend DataSource method to prepend baseurl to ``path``.""" + return DataSource._findfile(self, self._fullpath(path)) + + def abspath(self, path): + """ + Return absolute path of file in the Repository directory. + + If `path` is an URL, then `abspath` will return either the location + the file exists locally or the location it would exist when opened + using the `open` method. + + Parameters + ---------- + path : str or pathlib.Path + Can be a local file or a remote URL. This may, but does not + have to, include the `baseurl` with which the `Repository` was + initialized. + + Returns + ------- + out : str + Complete path, including the `DataSource` destination directory. + + """ + return DataSource.abspath(self, self._fullpath(path)) + + def exists(self, path): + """ + Test if path exists prepending Repository base URL to path. + + Test if `path` exists as (and in this order): + + - a local file. + - a remote URL that has been downloaded and stored locally in the + `DataSource` directory. + - a remote URL that has not been downloaded, but is valid and + accessible. + + Parameters + ---------- + path : str or pathlib.Path + Can be a local file or a remote URL. This may, but does not + have to, include the `baseurl` with which the `Repository` was + initialized. + + Returns + ------- + out : bool + True if `path` exists. + + Notes + ----- + When `path` is an URL, `exists` will return True if it's either + stored locally in the `DataSource` directory, or is a valid remote + URL. `DataSource` does not discriminate between the two, the file + is accessible if it exists in either location. + + """ + return DataSource.exists(self, self._fullpath(path)) + + def open(self, path, mode='r', encoding=None, newline=None): + """ + Open and return file-like object prepending Repository base URL. + + If `path` is an URL, it will be downloaded, stored in the + DataSource directory and opened from there. + + Parameters + ---------- + path : str or pathlib.Path + Local file path or URL to open. This may, but does not have to, + include the `baseurl` with which the `Repository` was + initialized. + mode : {'r', 'w', 'a'}, optional + Mode to open `path`. Mode 'r' for reading, 'w' for writing, + 'a' to append. Available modes depend on the type of object + specified by `path`. Default is 'r'. + encoding : {None, str}, optional + Open text file with given encoding. The default encoding will be + what `open` uses. + newline : {None, str}, optional + Newline to use when reading text file. + + Returns + ------- + out : file object + File object. + + """ + return DataSource.open(self, self._fullpath(path), mode, + encoding=encoding, newline=newline) + + def listdir(self): + """ + List files in the source Repository. + + Returns + ------- + files : list of str or pathlib.Path + List of file names (not containing a directory part). + + Notes + ----- + Does not currently work for remote repositories. + + """ + if self._isurl(self._baseurl): + raise NotImplementedError( + "Directory listing of URLs, not supported yet.") + else: + return os.listdir(self._baseurl) diff --git a/local_packages/numpy/lib/_datasource.pyi b/local_packages/numpy/lib/_datasource.pyi new file mode 100644 index 0000000..dba0434 --- /dev/null +++ b/local_packages/numpy/lib/_datasource.pyi @@ -0,0 +1,30 @@ +from _typeshed import OpenBinaryMode, OpenTextMode +from pathlib import Path +from typing import IO, Any, TypeAlias + +_Mode: TypeAlias = OpenBinaryMode | OpenTextMode + +### + +# exported in numpy.lib.nppyio +class DataSource: + def __init__(self, /, destpath: Path | str | None = ".") -> None: ... + def __del__(self, /) -> None: ... + def abspath(self, /, path: str) -> str: ... + def exists(self, /, path: str) -> bool: ... + + # Whether the file-object is opened in string or bytes mode (by default) + # depends on the file-extension of `path` + def open(self, /, path: str, mode: _Mode = "r", encoding: str | None = None, newline: str | None = None) -> IO[Any]: ... + +class Repository(DataSource): + def __init__(self, /, baseurl: str, destpath: str | None = ".") -> None: ... + def listdir(self, /) -> list[str]: ... + +def open( + path: str, + mode: _Mode = "r", + destpath: str | None = ".", + encoding: str | None = None, + newline: str | None = None, +) -> IO[Any]: ... diff --git a/local_packages/numpy/lib/_format_impl.py b/local_packages/numpy/lib/_format_impl.py new file mode 100644 index 0000000..2bb5577 --- /dev/null +++ b/local_packages/numpy/lib/_format_impl.py @@ -0,0 +1,1036 @@ +""" +Binary serialization + +NPY format +========== + +A simple format for saving numpy arrays to disk with the full +information about them. + +The ``.npy`` format is the standard binary file format in NumPy for +persisting a *single* arbitrary NumPy array on disk. The format stores all +of the shape and dtype information necessary to reconstruct the array +correctly even on another machine with a different architecture. +The format is designed to be as simple as possible while achieving +its limited goals. + +The ``.npz`` format is the standard format for persisting *multiple* NumPy +arrays on disk. A ``.npz`` file is a zip file containing multiple ``.npy`` +files, one for each array. + +Capabilities +------------ + +- Can represent all NumPy arrays including nested record arrays and + object arrays. + +- Represents the data in its native binary form. + +- Supports Fortran-contiguous arrays directly. + +- Stores all of the necessary information to reconstruct the array + including shape and dtype on a machine of a different + architecture. Both little-endian and big-endian arrays are + supported, and a file with little-endian numbers will yield + a little-endian array on any machine reading the file. The + types are described in terms of their actual sizes. For example, + if a machine with a 64-bit C "long int" writes out an array with + "long ints", a reading machine with 32-bit C "long ints" will yield + an array with 64-bit integers. + +- Is straightforward to reverse engineer. Datasets often live longer than + the programs that created them. A competent developer should be + able to create a solution in their preferred programming language to + read most ``.npy`` files that they have been given without much + documentation. + +- Allows memory-mapping of the data. See `open_memmap`. + +- Can be read from a filelike stream object instead of an actual file. + +- Stores object arrays, i.e. arrays containing elements that are arbitrary + Python objects. Files with object arrays are not to be mmapable, but + can be read and written to disk. + +Limitations +----------- + +- Arbitrary subclasses of numpy.ndarray are not completely preserved. + Subclasses will be accepted for writing, but only the array data will + be written out. A regular numpy.ndarray object will be created + upon reading the file. + +.. warning:: + + Due to limitations in the interpretation of structured dtypes, dtypes + with fields with empty names will have the names replaced by 'f0', 'f1', + etc. Such arrays will not round-trip through the format entirely + accurately. The data is intact; only the field names will differ. We are + working on a fix for this. This fix will not require a change in the + file format. The arrays with such structures can still be saved and + restored, and the correct dtype may be restored by using the + ``loadedarray.view(correct_dtype)`` method. + +File extensions +--------------- + +We recommend using the ``.npy`` and ``.npz`` extensions for files saved +in this format. This is by no means a requirement; applications may wish +to use these file formats but use an extension specific to the +application. In the absence of an obvious alternative, however, +we suggest using ``.npy`` and ``.npz``. + +Version numbering +----------------- + +The version numbering of these formats is independent of NumPy version +numbering. If the format is upgraded, the code in `numpy.io` will still +be able to read and write Version 1.0 files. + +Format Version 1.0 +------------------ + +The first 6 bytes are a magic string: exactly ``\\x93NUMPY``. + +The next 1 byte is an unsigned byte: the major version number of the file +format, e.g. ``\\x01``. + +The next 1 byte is an unsigned byte: the minor version number of the file +format, e.g. ``\\x00``. Note: the version of the file format is not tied +to the version of the numpy package. + +The next 2 bytes form a little-endian unsigned short int: the length of +the header data HEADER_LEN. + +The next HEADER_LEN bytes form the header data describing the array's +format. It is an ASCII string which contains a Python literal expression +of a dictionary. It is terminated by a newline (``\\n``) and padded with +spaces (``\\x20``) to make the total of +``len(magic string) + 2 + len(length) + HEADER_LEN`` be evenly divisible +by 64 for alignment purposes. + +The dictionary contains three keys: + + "descr" : dtype.descr + An object that can be passed as an argument to the `numpy.dtype` + constructor to create the array's dtype. + "fortran_order" : bool + Whether the array data is Fortran-contiguous or not. Since + Fortran-contiguous arrays are a common form of non-C-contiguity, + we allow them to be written directly to disk for efficiency. + "shape" : tuple of int + The shape of the array. + +For repeatability and readability, the dictionary keys are sorted in +alphabetic order. This is for convenience only. A writer SHOULD implement +this if possible. A reader MUST NOT depend on this. + +Following the header comes the array data. If the dtype contains Python +objects (i.e. ``dtype.hasobject is True``), then the data is a Python +pickle of the array. Otherwise the data is the contiguous (either C- +or Fortran-, depending on ``fortran_order``) bytes of the array. +Consumers can figure out the number of bytes by multiplying the number +of elements given by the shape (noting that ``shape=()`` means there is +1 element) by ``dtype.itemsize``. + +Format Version 2.0 +------------------ + +The version 1.0 format only allowed the array header to have a total size of +65535 bytes. This can be exceeded by structured arrays with a large number of +columns. The version 2.0 format extends the header size to 4 GiB. +`numpy.save` will automatically save in 2.0 format if the data requires it, +else it will always use the more compatible 1.0 format. + +The description of the fourth element of the header therefore has become: +"The next 4 bytes form a little-endian unsigned int: the length of the header +data HEADER_LEN." + +Format Version 3.0 +------------------ + +This version replaces the ASCII string (which in practice was latin1) with +a utf8-encoded string, so supports structured types with any unicode field +names. + +Notes +----- +The ``.npy`` format, including motivation for creating it and a comparison of +alternatives, is described in the +:doc:`"npy-format" NEP `, however details have +evolved with time and this document is more current. + +""" +import io +import os +import pickle +import warnings + +import numpy +from numpy._utils import set_module +from numpy.lib._utils_impl import drop_metadata + +__all__ = [] + +drop_metadata.__module__ = "numpy.lib.format" + +EXPECTED_KEYS = {'descr', 'fortran_order', 'shape'} +MAGIC_PREFIX = b'\x93NUMPY' +MAGIC_LEN = len(MAGIC_PREFIX) + 2 +ARRAY_ALIGN = 64 # plausible values are powers of 2 between 16 and 4096 +BUFFER_SIZE = 2**18 # size of buffer for reading npz files in bytes +# allow growth within the address space of a 64 bit machine along one axis +GROWTH_AXIS_MAX_DIGITS = 21 # = len(str(8*2**64-1)) hypothetical int1 dtype + +# difference between version 1.0 and 2.0 is a 4 byte (I) header length +# instead of 2 bytes (H) allowing storage of large structured arrays +_header_size_info = { + (1, 0): (' 255: + raise ValueError("major version must be 0 <= major < 256") + if minor < 0 or minor > 255: + raise ValueError("minor version must be 0 <= minor < 256") + return MAGIC_PREFIX + bytes([major, minor]) + + +@set_module("numpy.lib.format") +def read_magic(fp): + """ Read the magic string to get the version of the file format. + + Parameters + ---------- + fp : filelike object + + Returns + ------- + major : int + minor : int + """ + magic_str = _read_bytes(fp, MAGIC_LEN, "magic string") + if magic_str[:-2] != MAGIC_PREFIX: + msg = "the magic string is not correct; expected %r, got %r" + raise ValueError(msg % (MAGIC_PREFIX, magic_str[:-2])) + major, minor = magic_str[-2:] + return major, minor + + +@set_module("numpy.lib.format") +def dtype_to_descr(dtype): + """ + Get a serializable descriptor from the dtype. + + The .descr attribute of a dtype object cannot be round-tripped through + the dtype() constructor. Simple types, like dtype('float32'), have + a descr which looks like a record array with one field with '' as + a name. The dtype() constructor interprets this as a request to give + a default name. Instead, we construct descriptor that can be passed to + dtype(). + + Parameters + ---------- + dtype : dtype + The dtype of the array that will be written to disk. + + Returns + ------- + descr : object + An object that can be passed to `numpy.dtype()` in order to + replicate the input dtype. + + """ + # NOTE: that drop_metadata may not return the right dtype e.g. for user + # dtypes. In that case our code below would fail the same, though. + new_dtype = drop_metadata(dtype) + if new_dtype is not dtype: + warnings.warn("metadata on a dtype is not saved to an npy/npz. " + "Use another format (such as pickle) to store it.", + UserWarning, stacklevel=2) + dtype = new_dtype + + if dtype.names is not None: + # This is a record array. The .descr is fine. XXX: parts of the + # record array with an empty name, like padding bytes, still get + # fiddled with. This needs to be fixed in the C implementation of + # dtype(). + return dtype.descr + elif not type(dtype)._legacy: + # this must be a user-defined dtype since numpy does not yet expose any + # non-legacy dtypes in the public API + # + # non-legacy dtypes don't yet have __array_interface__ + # support. Instead, as a hack, we use pickle to save the array, and lie + # that the dtype is object. When the array is loaded, the descriptor is + # unpickled with the array and the object dtype in the header is + # discarded. + # + # a future NEP should define a way to serialize user-defined + # descriptors and ideally work out the possible security implications + warnings.warn("Custom dtypes are saved as python objects using the " + "pickle protocol. Loading this file requires " + "allow_pickle=True to be set.", + UserWarning, stacklevel=2) + return "|O" + else: + return dtype.str + + +@set_module("numpy.lib.format") +def descr_to_dtype(descr): + """ + Returns a dtype based off the given description. + + This is essentially the reverse of `~lib.format.dtype_to_descr`. It will + remove the valueless padding fields created by, i.e. simple fields like + dtype('float32'), and then convert the description to its corresponding + dtype. + + Parameters + ---------- + descr : object + The object retrieved by dtype.descr. Can be passed to + `numpy.dtype` in order to replicate the input dtype. + + Returns + ------- + dtype : dtype + The dtype constructed by the description. + + """ + if isinstance(descr, str): + # No padding removal needed + return numpy.dtype(descr) + elif isinstance(descr, tuple): + # subtype, will always have a shape descr[1] + dt = descr_to_dtype(descr[0]) + return numpy.dtype((dt, descr[1])) + + titles = [] + names = [] + formats = [] + offsets = [] + offset = 0 + for field in descr: + if len(field) == 2: + name, descr_str = field + dt = descr_to_dtype(descr_str) + else: + name, descr_str, shape = field + dt = numpy.dtype((descr_to_dtype(descr_str), shape)) + + # Ignore padding bytes, which will be void bytes with '' as name + # Once support for blank names is removed, only "if name == ''" needed) + is_pad = (name == '' and dt.type is numpy.void and dt.names is None) + if not is_pad: + title, name = name if isinstance(name, tuple) else (None, name) + titles.append(title) + names.append(name) + formats.append(dt) + offsets.append(offset) + offset += dt.itemsize + + return numpy.dtype({'names': names, 'formats': formats, 'titles': titles, + 'offsets': offsets, 'itemsize': offset}) + + +@set_module("numpy.lib.format") +def header_data_from_array_1_0(array): + """ Get the dictionary of header metadata from a numpy.ndarray. + + Parameters + ---------- + array : numpy.ndarray + + Returns + ------- + d : dict + This has the appropriate entries for writing its string representation + to the header of the file. + """ + d = {'shape': array.shape} + if array.flags.c_contiguous: + d['fortran_order'] = False + elif array.flags.f_contiguous: + d['fortran_order'] = True + else: + # Totally non-contiguous data. We will have to make it C-contiguous + # before writing. Note that we need to test for C_CONTIGUOUS first + # because a 1-D array is both C_CONTIGUOUS and F_CONTIGUOUS. + d['fortran_order'] = False + + d['descr'] = dtype_to_descr(array.dtype) + return d + + +def _wrap_header(header, version): + """ + Takes a stringified header, and attaches the prefix and padding to it + """ + import struct + assert version is not None + fmt, encoding = _header_size_info[version] + header = header.encode(encoding) + hlen = len(header) + 1 + padlen = ARRAY_ALIGN - ((MAGIC_LEN + struct.calcsize(fmt) + hlen) % ARRAY_ALIGN) + try: + header_prefix = magic(*version) + struct.pack(fmt, hlen + padlen) + except struct.error: + msg = f"Header length {hlen} too big for version={version}" + raise ValueError(msg) from None + + # Pad the header with spaces and a final newline such that the magic + # string, the header-length short and the header are aligned on a + # ARRAY_ALIGN byte boundary. This supports memory mapping of dtypes + # aligned up to ARRAY_ALIGN on systems like Linux where mmap() + # offset must be page-aligned (i.e. the beginning of the file). + return header_prefix + header + b' ' * padlen + b'\n' + + +def _wrap_header_guess_version(header): + """ + Like `_wrap_header`, but chooses an appropriate version given the contents + """ + try: + return _wrap_header(header, (1, 0)) + except ValueError: + pass + + try: + ret = _wrap_header(header, (2, 0)) + except UnicodeEncodeError: + pass + else: + warnings.warn("Stored array in format 2.0. It can only be" + "read by NumPy >= 1.9", UserWarning, stacklevel=2) + return ret + + header = _wrap_header(header, (3, 0)) + warnings.warn("Stored array in format 3.0. It can only be " + "read by NumPy >= 1.17", UserWarning, stacklevel=2) + return header + + +def _write_array_header(fp, d, version=None): + """ Write the header for an array and returns the version used + + Parameters + ---------- + fp : filelike object + d : dict + This has the appropriate entries for writing its string representation + to the header of the file. + version : tuple or None + None means use oldest that works. Providing an explicit version will + raise a ValueError if the format does not allow saving this data. + Default: None + """ + header = ["{"] + for key, value in sorted(d.items()): + # Need to use repr here, since we eval these when reading + header.append(f"'{key}': {repr(value)}, ") + header.append("}") + header = "".join(header) + + # Add some spare space so that the array header can be modified in-place + # when changing the array size, e.g. when growing it by appending data at + # the end. + shape = d['shape'] + header += " " * ((GROWTH_AXIS_MAX_DIGITS - len(repr( + shape[-1 if d['fortran_order'] else 0] + ))) if len(shape) > 0 else 0) + + if version is None: + header = _wrap_header_guess_version(header) + else: + header = _wrap_header(header, version) + fp.write(header) + + +@set_module("numpy.lib.format") +def write_array_header_1_0(fp, d): + """ Write the header for an array using the 1.0 format. + + Parameters + ---------- + fp : filelike object + d : dict + This has the appropriate entries for writing its string + representation to the header of the file. + """ + _write_array_header(fp, d, (1, 0)) + + +@set_module("numpy.lib.format") +def write_array_header_2_0(fp, d): + """ Write the header for an array using the 2.0 format. + The 2.0 format allows storing very large structured arrays. + + Parameters + ---------- + fp : filelike object + d : dict + This has the appropriate entries for writing its string + representation to the header of the file. + """ + _write_array_header(fp, d, (2, 0)) + + +@set_module("numpy.lib.format") +def read_array_header_1_0(fp, max_header_size=_MAX_HEADER_SIZE): + """ + Read an array header from a filelike object using the 1.0 file format + version. + + This will leave the file object located just after the header. + + Parameters + ---------- + fp : filelike object + A file object or something with a `.read()` method like a file. + + Returns + ------- + shape : tuple of int + The shape of the array. + fortran_order : bool + The array data will be written out directly if it is either + C-contiguous or Fortran-contiguous. Otherwise, it will be made + contiguous before writing it out. + dtype : dtype + The dtype of the file's data. + max_header_size : int, optional + Maximum allowed size of the header. Large headers may not be safe + to load securely and thus require explicitly passing a larger value. + See :py:func:`ast.literal_eval()` for details. + + Raises + ------ + ValueError + If the data is invalid. + + """ + return _read_array_header( + fp, version=(1, 0), max_header_size=max_header_size) + + +@set_module("numpy.lib.format") +def read_array_header_2_0(fp, max_header_size=_MAX_HEADER_SIZE): + """ + Read an array header from a filelike object using the 2.0 file format + version. + + This will leave the file object located just after the header. + + Parameters + ---------- + fp : filelike object + A file object or something with a `.read()` method like a file. + max_header_size : int, optional + Maximum allowed size of the header. Large headers may not be safe + to load securely and thus require explicitly passing a larger value. + See :py:func:`ast.literal_eval()` for details. + + Returns + ------- + shape : tuple of int + The shape of the array. + fortran_order : bool + The array data will be written out directly if it is either + C-contiguous or Fortran-contiguous. Otherwise, it will be made + contiguous before writing it out. + dtype : dtype + The dtype of the file's data. + + Raises + ------ + ValueError + If the data is invalid. + + """ + return _read_array_header( + fp, version=(2, 0), max_header_size=max_header_size) + + +def _filter_header(s): + """Clean up 'L' in npz header ints. + + Cleans up the 'L' in strings representing integers. Needed to allow npz + headers produced in Python2 to be read in Python3. + + Parameters + ---------- + s : string + Npy file header. + + Returns + ------- + header : str + Cleaned up header. + + """ + import tokenize + from io import StringIO + + tokens = [] + last_token_was_number = False + for token in tokenize.generate_tokens(StringIO(s).readline): + token_type = token[0] + token_string = token[1] + if (last_token_was_number and + token_type == tokenize.NAME and + token_string == "L"): + continue + else: + tokens.append(token) + last_token_was_number = (token_type == tokenize.NUMBER) + return tokenize.untokenize(tokens) + + +def _read_array_header(fp, version, max_header_size=_MAX_HEADER_SIZE): + """ + see read_array_header_1_0 + """ + # Read an unsigned, little-endian short int which has the length of the + # header. + import ast + import struct + hinfo = _header_size_info.get(version) + if hinfo is None: + raise ValueError(f"Invalid version {version!r}") + hlength_type, encoding = hinfo + + hlength_str = _read_bytes(fp, struct.calcsize(hlength_type), "array header length") + header_length = struct.unpack(hlength_type, hlength_str)[0] + header = _read_bytes(fp, header_length, "array header") + header = header.decode(encoding) + if len(header) > max_header_size: + raise ValueError( + f"Header info length ({len(header)}) is large and may not be safe " + "to load securely.\n" + "To allow loading, adjust `max_header_size` or fully trust " + "the `.npy` file using `allow_pickle=True`.\n" + "For safety against large resource use or crashes, sandboxing " + "may be necessary.") + + # The header is a pretty-printed string representation of a literal + # Python dictionary with trailing newlines padded to an ARRAY_ALIGN byte + # boundary. The keys are strings. + # "shape" : tuple of int + # "fortran_order" : bool + # "descr" : dtype.descr + # Versions (2, 0) and (1, 0) could have been created by a Python 2 + # implementation before header filtering was implemented. + # + # For performance reasons, we try without _filter_header first though + try: + d = ast.literal_eval(header) + except SyntaxError as e: + if version <= (2, 0): + header = _filter_header(header) + try: + d = ast.literal_eval(header) + except SyntaxError as e2: + msg = "Cannot parse header: {!r}" + raise ValueError(msg.format(header)) from e2 + else: + warnings.warn( + "Reading `.npy` or `.npz` file required additional " + "header parsing as it was created on Python 2. Save the " + "file again to speed up loading and avoid this warning.", + UserWarning, stacklevel=4) + else: + msg = "Cannot parse header: {!r}" + raise ValueError(msg.format(header)) from e + if not isinstance(d, dict): + msg = "Header is not a dictionary: {!r}" + raise ValueError(msg.format(d)) + + if EXPECTED_KEYS != d.keys(): + keys = sorted(d.keys()) + msg = "Header does not contain the correct keys: {!r}" + raise ValueError(msg.format(keys)) + + # Sanity-check the values. + if (not isinstance(d['shape'], tuple) or + not all(isinstance(x, int) for x in d['shape'])): + msg = "shape is not valid: {!r}" + raise ValueError(msg.format(d['shape'])) + if not isinstance(d['fortran_order'], bool): + msg = "fortran_order is not a valid bool: {!r}" + raise ValueError(msg.format(d['fortran_order'])) + try: + dtype = descr_to_dtype(d['descr']) + except TypeError as e: + msg = "descr is not a valid dtype descriptor: {!r}" + raise ValueError(msg.format(d['descr'])) from e + + return d['shape'], d['fortran_order'], dtype + + +@set_module("numpy.lib.format") +def write_array(fp, array, version=None, allow_pickle=True, pickle_kwargs=None): + """ + Write an array to an NPY file, including a header. + + If the array is neither C-contiguous nor Fortran-contiguous AND the + file_like object is not a real file object, this function will have to + copy data in memory. + + Parameters + ---------- + fp : file_like object + An open, writable file object, or similar object with a + ``.write()`` method. + array : ndarray + The array to write to disk. + version : (int, int) or None, optional + The version number of the format. None means use the oldest + supported version that is able to store the data. Default: None + allow_pickle : bool, optional + Whether to allow writing pickled data. Default: True + pickle_kwargs : dict, optional + Additional keyword arguments to pass to pickle.dump, excluding + 'protocol'. These are only useful when pickling objects in object + arrays to Python 2 compatible format. + + Raises + ------ + ValueError + If the array cannot be persisted. This includes the case of + allow_pickle=False and array being an object array. + Various other errors + If the array contains Python objects as part of its dtype, the + process of pickling them may raise various errors if the objects + are not picklable. + + """ + _check_version(version) + _write_array_header(fp, header_data_from_array_1_0(array), version) + + if array.itemsize == 0: + buffersize = 0 + else: + # Set buffer size to 16 MiB to hide the Python loop overhead. + buffersize = max(16 * 1024 ** 2 // array.itemsize, 1) + + dtype_class = type(array.dtype) + + if array.dtype.hasobject or not dtype_class._legacy: + # We contain Python objects so we cannot write out the data + # directly. Instead, we will pickle it out + if not allow_pickle: + if array.dtype.hasobject: + raise ValueError("Object arrays cannot be saved when " + "allow_pickle=False") + if not dtype_class._legacy: + raise ValueError("User-defined dtypes cannot be saved " + "when allow_pickle=False") + if pickle_kwargs is None: + pickle_kwargs = {} + pickle.dump(array, fp, protocol=4, **pickle_kwargs) + elif array.flags.f_contiguous and not array.flags.c_contiguous: + if isfileobj(fp): + array.T.tofile(fp) + else: + for chunk in numpy.nditer( + array, flags=['external_loop', 'buffered', 'zerosize_ok'], + buffersize=buffersize, order='F'): + fp.write(chunk.tobytes('C')) + elif isfileobj(fp): + array.tofile(fp) + else: + for chunk in numpy.nditer( + array, flags=['external_loop', 'buffered', 'zerosize_ok'], + buffersize=buffersize, order='C'): + fp.write(chunk.tobytes('C')) + + +@set_module("numpy.lib.format") +def read_array(fp, allow_pickle=False, pickle_kwargs=None, *, + max_header_size=_MAX_HEADER_SIZE): + """ + Read an array from an NPY file. + + Parameters + ---------- + fp : file_like object + If this is not a real file object, then this may take extra memory + and time. + allow_pickle : bool, optional + Whether to allow writing pickled data. Default: False + pickle_kwargs : dict + Additional keyword arguments to pass to pickle.load. These are only + useful when loading object arrays saved on Python 2. + max_header_size : int, optional + Maximum allowed size of the header. Large headers may not be safe + to load securely and thus require explicitly passing a larger value. + See :py:func:`ast.literal_eval()` for details. + This option is ignored when `allow_pickle` is passed. In that case + the file is by definition trusted and the limit is unnecessary. + + Returns + ------- + array : ndarray + The array from the data on disk. + + Raises + ------ + ValueError + If the data is invalid, or allow_pickle=False and the file contains + an object array. + + """ + if allow_pickle: + # Effectively ignore max_header_size, since `allow_pickle` indicates + # that the input is fully trusted. + max_header_size = 2**64 + + version = read_magic(fp) + _check_version(version) + shape, fortran_order, dtype = _read_array_header( + fp, version, max_header_size=max_header_size) + if len(shape) == 0: + count = 1 + else: + count = numpy.multiply.reduce(shape, dtype=numpy.int64) + + # Now read the actual data. + if dtype.hasobject: + # The array contained Python objects. We need to unpickle the data. + if not allow_pickle: + raise ValueError("Object arrays cannot be loaded when " + "allow_pickle=False") + if pickle_kwargs is None: + pickle_kwargs = {} + try: + array = pickle.load(fp, **pickle_kwargs) + except UnicodeError as err: + # Friendlier error message + raise UnicodeError("Unpickling a python object failed: %r\n" + "You may need to pass the encoding= option " + "to numpy.load" % (err,)) from err + else: + if isfileobj(fp): + # We can use the fast fromfile() function. + array = numpy.fromfile(fp, dtype=dtype, count=count) + else: + # This is not a real file. We have to read it the + # memory-intensive way. + # crc32 module fails on reads greater than 2 ** 32 bytes, + # breaking large reads from gzip streams. Chunk reads to + # BUFFER_SIZE bytes to avoid issue and reduce memory overhead + # of the read. In non-chunked case count < max_read_count, so + # only one read is performed. + + # Use np.ndarray instead of np.empty since the latter does + # not correctly instantiate zero-width string dtypes; see + # https://github.com/numpy/numpy/pull/6430 + array = numpy.ndarray(count, dtype=dtype) + + if dtype.itemsize > 0: + # If dtype.itemsize == 0 then there's nothing more to read + max_read_count = BUFFER_SIZE // min(BUFFER_SIZE, dtype.itemsize) + + for i in range(0, count, max_read_count): + read_count = min(max_read_count, count - i) + read_size = int(read_count * dtype.itemsize) + data = _read_bytes(fp, read_size, "array data") + array[i:i + read_count] = numpy.frombuffer(data, dtype=dtype, + count=read_count) + + if array.size != count: + raise ValueError( + "Failed to read all data for array. " + f"Expected {shape} = {count} elements, " + f"could only read {array.size} elements. " + "(file seems not fully written?)" + ) + + if fortran_order: + array = array.reshape(shape[::-1]) + array = array.transpose() + else: + array = array.reshape(shape) + + return array + + +@set_module("numpy.lib.format") +def open_memmap(filename, mode='r+', dtype=None, shape=None, + fortran_order=False, version=None, *, + max_header_size=_MAX_HEADER_SIZE): + """ + Open a .npy file as a memory-mapped array. + + This may be used to read an existing file or create a new one. + + Parameters + ---------- + filename : str or path-like + The name of the file on disk. This may *not* be a file-like + object. + mode : str, optional + The mode in which to open the file; the default is 'r+'. In + addition to the standard file modes, 'c' is also accepted to mean + "copy on write." See `memmap` for the available mode strings. + dtype : data-type, optional + The data type of the array if we are creating a new file in "write" + mode, if not, `dtype` is ignored. The default value is None, which + results in a data-type of `float64`. + shape : tuple of int + The shape of the array if we are creating a new file in "write" + mode, in which case this parameter is required. Otherwise, this + parameter is ignored and is thus optional. + fortran_order : bool, optional + Whether the array should be Fortran-contiguous (True) or + C-contiguous (False, the default) if we are creating a new file in + "write" mode. + version : tuple of int (major, minor) or None + If the mode is a "write" mode, then this is the version of the file + format used to create the file. None means use the oldest + supported version that is able to store the data. Default: None + max_header_size : int, optional + Maximum allowed size of the header. Large headers may not be safe + to load securely and thus require explicitly passing a larger value. + See :py:func:`ast.literal_eval()` for details. + + Returns + ------- + marray : memmap + The memory-mapped array. + + Raises + ------ + ValueError + If the data or the mode is invalid. + OSError + If the file is not found or cannot be opened correctly. + + See Also + -------- + numpy.memmap + + """ + if isfileobj(filename): + raise ValueError("Filename must be a string or a path-like object." + " Memmap cannot use existing file handles.") + + if 'w' in mode: + # We are creating the file, not reading it. + # Check if we ought to create the file. + _check_version(version) + # Ensure that the given dtype is an authentic dtype object rather + # than just something that can be interpreted as a dtype object. + dtype = numpy.dtype(dtype) + if dtype.hasobject: + msg = "Array can't be memory-mapped: Python objects in dtype." + raise ValueError(msg) + d = { + "descr": dtype_to_descr(dtype), + "fortran_order": fortran_order, + "shape": shape, + } + # If we got here, then it should be safe to create the file. + with open(os.fspath(filename), mode + 'b') as fp: + _write_array_header(fp, d, version) + offset = fp.tell() + else: + # Read the header of the file first. + with open(os.fspath(filename), 'rb') as fp: + version = read_magic(fp) + _check_version(version) + + shape, fortran_order, dtype = _read_array_header( + fp, version, max_header_size=max_header_size) + if dtype.hasobject: + msg = "Array can't be memory-mapped: Python objects in dtype." + raise ValueError(msg) + offset = fp.tell() + + if fortran_order: + order = 'F' + else: + order = 'C' + + # We need to change a write-only mode to a read-write mode since we've + # already written data to the file. + if mode == 'w+': + mode = 'r+' + + marray = numpy.memmap(filename, dtype=dtype, shape=shape, order=order, + mode=mode, offset=offset) + + return marray + + +def _read_bytes(fp, size, error_template="ran out of data"): + """ + Read from file-like object until size bytes are read. + Raises ValueError if not EOF is encountered before size bytes are read. + Non-blocking objects only supported if they derive from io objects. + + Required as e.g. ZipExtFile in python 2.6 can return less data than + requested. + """ + data = b"" + while True: + # io files (default in python3) return None or raise on + # would-block, python2 file will truncate, probably nothing can be + # done about that. note that regular files can't be non-blocking + try: + r = fp.read(size - len(data)) + data += r + if len(r) == 0 or len(data) == size: + break + except BlockingIOError: + pass + if len(data) != size: + msg = "EOF: reading %s, expected %d bytes got %d" + raise ValueError(msg % (error_template, size, len(data))) + else: + return data + + +@set_module("numpy.lib.format") +def isfileobj(f): + if not isinstance(f, (io.FileIO, io.BufferedReader, io.BufferedWriter)): + return False + try: + # BufferedReader/Writer may raise OSError when + # fetching `fileno()` (e.g. when wrapping BytesIO). + f.fileno() + return True + except OSError: + return False diff --git a/local_packages/numpy/lib/_format_impl.pyi b/local_packages/numpy/lib/_format_impl.pyi new file mode 100644 index 0000000..b45df02 --- /dev/null +++ b/local_packages/numpy/lib/_format_impl.pyi @@ -0,0 +1,56 @@ +import os +from _typeshed import SupportsRead, SupportsWrite +from typing import Any, BinaryIO, Final, TypeAlias, TypeGuard + +import numpy as np +import numpy.typing as npt +from numpy.lib._utils_impl import drop_metadata as drop_metadata + +__all__: list[str] = [] + +_DTypeDescr: TypeAlias = list[tuple[str, str]] | list[tuple[str, str, tuple[int, ...]]] + +### + +EXPECTED_KEYS: Final[set[str]] = ... +MAGIC_PREFIX: Final = b"\x93NUMPY" +MAGIC_LEN: Final = 8 +ARRAY_ALIGN: Final = 64 +BUFFER_SIZE: Final = 262_144 # 1 << 18 +GROWTH_AXIS_MAX_DIGITS: Final = 21 +_MAX_HEADER_SIZE: Final = 10_000 + +def magic(major: int, minor: int) -> bytes: ... +def read_magic(fp: SupportsRead[bytes]) -> tuple[int, int]: ... +def dtype_to_descr(dtype: np.dtype) -> _DTypeDescr: ... +def descr_to_dtype(descr: _DTypeDescr) -> np.dtype: ... +def header_data_from_array_1_0(array: np.ndarray) -> dict[str, Any]: ... +def write_array_header_1_0(fp: SupportsWrite[bytes], d: dict[str, Any]) -> None: ... +def write_array_header_2_0(fp: SupportsWrite[bytes], d: dict[str, Any]) -> None: ... +def read_array_header_1_0(fp: SupportsRead[bytes], max_header_size: int = 10_000) -> tuple[tuple[int, ...], bool, np.dtype]: ... +def read_array_header_2_0(fp: SupportsRead[bytes], max_header_size: int = 10_000) -> tuple[tuple[int, ...], bool, np.dtype]: ... +def write_array( + fp: SupportsWrite[bytes], + array: np.ndarray, + version: tuple[int, int] | None = None, + allow_pickle: bool = True, + pickle_kwargs: dict[str, Any] | None = None, +) -> None: ... +def read_array( + fp: SupportsRead[bytes], + allow_pickle: bool = False, + pickle_kwargs: dict[str, Any] | None = None, + *, + max_header_size: int = 10_000, +) -> np.ndarray: ... +def open_memmap( + filename: str | os.PathLike[Any], + mode: str = "r+", + dtype: npt.DTypeLike | None = None, + shape: tuple[int, ...] | None = None, + fortran_order: bool = False, + version: tuple[int, int] | None = None, + *, + max_header_size: int = 10_000, +) -> np.memmap: ... +def isfileobj(f: object) -> TypeGuard[BinaryIO]: ... # don't use `typing.TypeIs` diff --git a/local_packages/numpy/lib/_function_base_impl.py b/local_packages/numpy/lib/_function_base_impl.py new file mode 100644 index 0000000..6d77cf3 --- /dev/null +++ b/local_packages/numpy/lib/_function_base_impl.py @@ -0,0 +1,5760 @@ +import builtins +import collections.abc +import functools +import re +import warnings + +import numpy as np +import numpy._core.numeric as _nx +from numpy._core import overrides, transpose +from numpy._core._multiarray_umath import _array_converter +from numpy._core.fromnumeric import any, mean, nonzero, partition, ravel, sum +from numpy._core.multiarray import ( + _monotonicity, + _place, + bincount, + interp as compiled_interp, + interp_complex as compiled_interp_complex, + normalize_axis_index, +) +from numpy._core.numeric import ( + absolute, + arange, + array, + asanyarray, + asarray, + concatenate, + dot, + empty, + integer, + intp, + isscalar, + ndarray, + ones, + take, + where, + zeros_like, +) +from numpy._core.numerictypes import typecodes +from numpy._core.umath import ( + add, + arctan2, + cos, + exp, + floor, + frompyfunc, + less_equal, + minimum, + mod, + not_equal, + pi, + sin, + sqrt, + subtract, +) +from numpy._utils import set_module + +# needed in this module for compatibility +from numpy.lib._histograms_impl import histogram, histogramdd # noqa: F401 +from numpy.lib._twodim_base_impl import diag + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + + +__all__ = [ + 'select', 'piecewise', 'trim_zeros', 'copy', 'iterable', 'percentile', + 'diff', 'gradient', 'angle', 'unwrap', 'sort_complex', 'flip', + 'rot90', 'extract', 'place', 'vectorize', 'asarray_chkfinite', 'average', + 'bincount', 'digitize', 'cov', 'corrcoef', + 'median', 'sinc', 'hamming', 'hanning', 'bartlett', + 'blackman', 'kaiser', 'trapezoid', 'i0', + 'meshgrid', 'delete', 'insert', 'append', 'interp', + 'quantile' + ] + +# _QuantileMethods is a dictionary listing all the supported methods to +# compute quantile/percentile. +# +# Below virtual_index refers to the index of the element where the percentile +# would be found in the sorted sample. +# When the sample contains exactly the percentile wanted, the virtual_index is +# an integer to the index of this element. +# When the percentile wanted is in between two elements, the virtual_index +# is made of a integer part (a.k.a 'i' or 'left') and a fractional part +# (a.k.a 'g' or 'gamma') +# +# Each method in _QuantileMethods has two properties +# get_virtual_index : Callable +# The function used to compute the virtual_index. +# fix_gamma : Callable +# A function used for discrete methods to force the index to a specific value. +_QuantileMethods = { + # --- HYNDMAN and FAN METHODS + # Discrete methods + 'inverted_cdf': { + 'get_virtual_index': lambda n, quantiles: _inverted_cdf(n, quantiles), # noqa: PLW0108 + 'fix_gamma': None, # should never be called + }, + 'averaged_inverted_cdf': { + 'get_virtual_index': lambda n, quantiles: (n * quantiles) - 1, + 'fix_gamma': lambda gamma, _: _get_gamma_mask( + shape=gamma.shape, + default_value=1., + conditioned_value=0.5, + where=gamma == 0), + }, + 'closest_observation': { + 'get_virtual_index': lambda n, quantiles: _closest_observation(n, quantiles), # noqa: PLW0108 + 'fix_gamma': None, # should never be called + }, + # Continuous methods + 'interpolated_inverted_cdf': { + 'get_virtual_index': lambda n, quantiles: + _compute_virtual_index(n, quantiles, 0, 1), + 'fix_gamma': lambda gamma, _: gamma, + }, + 'hazen': { + 'get_virtual_index': lambda n, quantiles: + _compute_virtual_index(n, quantiles, 0.5, 0.5), + 'fix_gamma': lambda gamma, _: gamma, + }, + 'weibull': { + 'get_virtual_index': lambda n, quantiles: + _compute_virtual_index(n, quantiles, 0, 0), + 'fix_gamma': lambda gamma, _: gamma, + }, + # Default method. + # To avoid some rounding issues, `(n-1) * quantiles` is preferred to + # `_compute_virtual_index(n, quantiles, 1, 1)`. + # They are mathematically equivalent. + 'linear': { + 'get_virtual_index': lambda n, quantiles: (n - 1) * quantiles, + 'fix_gamma': lambda gamma, _: gamma, + }, + 'median_unbiased': { + 'get_virtual_index': lambda n, quantiles: + _compute_virtual_index(n, quantiles, 1 / 3.0, 1 / 3.0), + 'fix_gamma': lambda gamma, _: gamma, + }, + 'normal_unbiased': { + 'get_virtual_index': lambda n, quantiles: + _compute_virtual_index(n, quantiles, 3 / 8.0, 3 / 8.0), + 'fix_gamma': lambda gamma, _: gamma, + }, + # --- OTHER METHODS + 'lower': { + 'get_virtual_index': lambda n, quantiles: np.floor( + (n - 1) * quantiles).astype(np.intp), + 'fix_gamma': None, # should never be called, index dtype is int + }, + 'higher': { + 'get_virtual_index': lambda n, quantiles: np.ceil( + (n - 1) * quantiles).astype(np.intp), + 'fix_gamma': None, # should never be called, index dtype is int + }, + 'midpoint': { + 'get_virtual_index': lambda n, quantiles: 0.5 * ( + np.floor((n - 1) * quantiles) + + np.ceil((n - 1) * quantiles)), + 'fix_gamma': lambda gamma, index: _get_gamma_mask( + shape=gamma.shape, + default_value=0.5, + conditioned_value=0., + where=index % 1 == 0), + }, + 'nearest': { + 'get_virtual_index': lambda n, quantiles: np.around( + (n - 1) * quantiles).astype(np.intp), + 'fix_gamma': None, + # should never be called, index dtype is int + }} + + +def _rot90_dispatcher(m, k=None, axes=None): + return (m,) + + +@array_function_dispatch(_rot90_dispatcher) +def rot90(m, k=1, axes=(0, 1)): + """ + Rotate an array by 90 degrees in the plane specified by axes. + + Rotation direction is from the first towards the second axis. + This means for a 2D array with the default `k` and `axes`, the + rotation will be counterclockwise. + + Parameters + ---------- + m : array_like + Array of two or more dimensions. + k : integer + Number of times the array is rotated by 90 degrees. + axes : (2,) array_like + The array is rotated in the plane defined by the axes. + Axes must be different. + + Returns + ------- + y : ndarray + A rotated view of `m`. + + See Also + -------- + flip : Reverse the order of elements in an array along the given axis. + fliplr : Flip an array horizontally. + flipud : Flip an array vertically. + + Notes + ----- + ``rot90(m, k=1, axes=(1,0))`` is the reverse of + ``rot90(m, k=1, axes=(0,1))`` + + ``rot90(m, k=1, axes=(1,0))`` is equivalent to + ``rot90(m, k=-1, axes=(0,1))`` + + Examples + -------- + >>> import numpy as np + >>> m = np.array([[1,2],[3,4]], int) + >>> m + array([[1, 2], + [3, 4]]) + >>> np.rot90(m) + array([[2, 4], + [1, 3]]) + >>> np.rot90(m, 2) + array([[4, 3], + [2, 1]]) + >>> m = np.arange(8).reshape((2,2,2)) + >>> np.rot90(m, 1, (1,2)) + array([[[1, 3], + [0, 2]], + [[5, 7], + [4, 6]]]) + + """ + axes = tuple(axes) + if len(axes) != 2: + raise ValueError("len(axes) must be 2.") + + m = asanyarray(m) + + if axes[0] == axes[1] or absolute(axes[0] - axes[1]) == m.ndim: + raise ValueError("Axes must be different.") + + if (axes[0] >= m.ndim or axes[0] < -m.ndim + or axes[1] >= m.ndim or axes[1] < -m.ndim): + raise ValueError(f"Axes={axes} out of range for array of ndim={m.ndim}.") + + k %= 4 + + if k == 0: + return m[:] + if k == 2: + return flip(flip(m, axes[0]), axes[1]) + + axes_list = arange(0, m.ndim) + (axes_list[axes[0]], axes_list[axes[1]]) = (axes_list[axes[1]], + axes_list[axes[0]]) + + if k == 1: + return transpose(flip(m, axes[1]), axes_list) + else: + # k == 3 + return flip(transpose(m, axes_list), axes[1]) + + +def _flip_dispatcher(m, axis=None): + return (m,) + + +@array_function_dispatch(_flip_dispatcher) +def flip(m, axis=None): + """ + Reverse the order of elements in an array along the given axis. + + The shape of the array is preserved, but the elements are reordered. + + Parameters + ---------- + m : array_like + Input array. + axis : None or int or tuple of ints, optional + Axis or axes along which to flip over. The default, + axis=None, will flip over all of the axes of the input array. + If axis is negative it counts from the last to the first axis. + + If axis is a tuple of ints, flipping is performed on all of the axes + specified in the tuple. + + Returns + ------- + out : array_like + A view of `m` with the entries of axis reversed. Since a view is + returned, this operation is done in constant time. + + See Also + -------- + flipud : Flip an array vertically (axis=0). + fliplr : Flip an array horizontally (axis=1). + + Notes + ----- + flip(m, 0) is equivalent to flipud(m). + + flip(m, 1) is equivalent to fliplr(m). + + flip(m, n) corresponds to ``m[...,::-1,...]`` with ``::-1`` at position n. + + flip(m) corresponds to ``m[::-1,::-1,...,::-1]`` with ``::-1`` at all + positions. + + flip(m, (0, 1)) corresponds to ``m[::-1,::-1,...]`` with ``::-1`` at + position 0 and position 1. + + Examples + -------- + >>> import numpy as np + >>> A = np.arange(8).reshape((2,2,2)) + >>> A + array([[[0, 1], + [2, 3]], + [[4, 5], + [6, 7]]]) + >>> np.flip(A, 0) + array([[[4, 5], + [6, 7]], + [[0, 1], + [2, 3]]]) + >>> np.flip(A, 1) + array([[[2, 3], + [0, 1]], + [[6, 7], + [4, 5]]]) + >>> np.flip(A) + array([[[7, 6], + [5, 4]], + [[3, 2], + [1, 0]]]) + >>> np.flip(A, (0, 2)) + array([[[5, 4], + [7, 6]], + [[1, 0], + [3, 2]]]) + >>> rng = np.random.default_rng() + >>> A = rng.normal(size=(3,4,5)) + >>> np.all(np.flip(A,2) == A[:,:,::-1,...]) + True + """ + if not hasattr(m, 'ndim'): + m = asarray(m) + if axis is None: + indexer = (np.s_[::-1],) * m.ndim + else: + axis = _nx.normalize_axis_tuple(axis, m.ndim) + indexer = [np.s_[:]] * m.ndim + for ax in axis: + indexer[ax] = np.s_[::-1] + indexer = tuple(indexer) + return m[indexer] + + +@set_module('numpy') +def iterable(y): + """ + Check whether or not an object can be iterated over. + + Parameters + ---------- + y : object + Input object. + + Returns + ------- + b : bool + Return ``True`` if the object has an iterator method or is a + sequence and ``False`` otherwise. + + + Examples + -------- + >>> import numpy as np + >>> np.iterable([1, 2, 3]) + True + >>> np.iterable(2) + False + + Notes + ----- + In most cases, the results of ``np.iterable(obj)`` are consistent with + ``isinstance(obj, collections.abc.Iterable)``. One notable exception is + the treatment of 0-dimensional arrays:: + + >>> from collections.abc import Iterable + >>> a = np.array(1.0) # 0-dimensional numpy array + >>> isinstance(a, Iterable) + True + >>> np.iterable(a) + False + + """ + try: + iter(y) + except TypeError: + return False + return True + + +def _weights_are_valid(weights, a, axis): + """Validate weights array. + + We assume, weights is not None. + """ + wgt = np.asanyarray(weights) + + # Sanity checks + if a.shape != wgt.shape: + if axis is None: + raise TypeError( + "Axis must be specified when shapes of a and weights " + "differ.") + if wgt.shape != tuple(a.shape[ax] for ax in axis): + raise ValueError( + "Shape of weights must be consistent with " + "shape of a along specified axis.") + + # setup wgt to broadcast along axis + wgt = wgt.transpose(np.argsort(axis)) + wgt = wgt.reshape(tuple((s if ax in axis else 1) + for ax, s in enumerate(a.shape))) + return wgt + + +def _average_dispatcher(a, axis=None, weights=None, returned=None, *, + keepdims=None): + return (a, weights) + + +@array_function_dispatch(_average_dispatcher) +def average(a, axis=None, weights=None, returned=False, *, + keepdims=np._NoValue): + """ + Compute the weighted average along the specified axis. + + Parameters + ---------- + a : array_like + Array containing data to be averaged. If `a` is not an array, a + conversion is attempted. + axis : None or int or tuple of ints, optional + Axis or axes along which to average `a`. The default, + `axis=None`, will average over all of the elements of the input array. + If axis is negative it counts from the last to the first axis. + If axis is a tuple of ints, averaging is performed on all of the axes + specified in the tuple instead of a single axis or all the axes as + before. + weights : array_like, optional + An array of weights associated with the values in `a`. Each value in + `a` contributes to the average according to its associated weight. + The array of weights must be the same shape as `a` if no axis is + specified, otherwise the weights must have dimensions and shape + consistent with `a` along the specified axis. + If `weights=None`, then all data in `a` are assumed to have a + weight equal to one. + The calculation is:: + + avg = sum(a * weights) / sum(weights) + + where the sum is over all included elements. + The only constraint on the values of `weights` is that `sum(weights)` + must not be 0. + returned : bool, optional + Default is `False`. If `True`, the tuple (`average`, `sum_of_weights`) + is returned, otherwise only the average is returned. + If `weights=None`, `sum_of_weights` is equivalent to the number of + elements over which the average is taken. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `a`. + *Note:* `keepdims` will not work with instances of `numpy.matrix` + or other classes whose methods do not support `keepdims`. + + .. versionadded:: 1.23.0 + + Returns + ------- + retval, [sum_of_weights] : array_type or double + Return the average along the specified axis. When `returned` is `True`, + return a tuple with the average as the first element and the sum + of the weights as the second element. `sum_of_weights` is of the + same type as `retval`. The result dtype follows a general pattern. + If `weights` is None, the result dtype will be that of `a` , or ``float64`` + if `a` is integral. Otherwise, if `weights` is not None and `a` is non- + integral, the result type will be the type of lowest precision capable of + representing values of both `a` and `weights`. If `a` happens to be + integral, the previous rules still applies but the result dtype will + at least be ``float64``. + + Raises + ------ + ZeroDivisionError + When all weights along axis are zero. See `numpy.ma.average` for a + version robust to this type of error. + TypeError + When `weights` does not have the same shape as `a`, and `axis=None`. + ValueError + When `weights` does not have dimensions and shape consistent with `a` + along specified `axis`. + + See Also + -------- + mean + + ma.average : average for masked arrays -- useful if your data contains + "missing" values + numpy.result_type : Returns the type that results from applying the + numpy type promotion rules to the arguments. + + Examples + -------- + >>> import numpy as np + >>> data = np.arange(1, 5) + >>> data + array([1, 2, 3, 4]) + >>> np.average(data) + 2.5 + >>> np.average(np.arange(1, 11), weights=np.arange(10, 0, -1)) + 4.0 + + >>> data = np.arange(6).reshape((3, 2)) + >>> data + array([[0, 1], + [2, 3], + [4, 5]]) + >>> np.average(data, axis=1, weights=[1./4, 3./4]) + array([0.75, 2.75, 4.75]) + >>> np.average(data, weights=[1./4, 3./4]) + Traceback (most recent call last): + ... + TypeError: Axis must be specified when shapes of a and weights differ. + + With ``keepdims=True``, the following result has shape (3, 1). + + >>> np.average(data, axis=1, keepdims=True) + array([[0.5], + [2.5], + [4.5]]) + + >>> data = np.arange(8).reshape((2, 2, 2)) + >>> data + array([[[0, 1], + [2, 3]], + [[4, 5], + [6, 7]]]) + >>> np.average(data, axis=(0, 1), weights=[[1./4, 3./4], [1., 1./2]]) + array([3.4, 4.4]) + >>> np.average(data, axis=0, weights=[[1./4, 3./4], [1., 1./2]]) + Traceback (most recent call last): + ... + ValueError: Shape of weights must be consistent + with shape of a along specified axis. + """ + a = np.asanyarray(a) + + if axis is not None: + axis = _nx.normalize_axis_tuple(axis, a.ndim, argname="axis") + + if keepdims is np._NoValue: + # Don't pass on the keepdims argument if one wasn't given. + keepdims_kw = {} + else: + keepdims_kw = {'keepdims': keepdims} + + if weights is None: + avg = a.mean(axis, **keepdims_kw) + avg_as_array = np.asanyarray(avg) + scl = avg_as_array.dtype.type(a.size / avg_as_array.size) + else: + wgt = _weights_are_valid(weights=weights, a=a, axis=axis) + + if issubclass(a.dtype.type, (np.integer, np.bool)): + result_dtype = np.result_type(a.dtype, wgt.dtype, 'f8') + else: + result_dtype = np.result_type(a.dtype, wgt.dtype) + + scl = wgt.sum(axis=axis, dtype=result_dtype, **keepdims_kw) + if np.any(scl == 0.0): + raise ZeroDivisionError( + "Weights sum to zero, can't be normalized") + + avg = avg_as_array = np.multiply(a, wgt, + dtype=result_dtype).sum(axis, **keepdims_kw) / scl + + if returned: + if scl.shape != avg_as_array.shape: + scl = np.broadcast_to(scl, avg_as_array.shape, subok=True).copy() + return avg, scl + else: + return avg + + +@set_module('numpy') +def asarray_chkfinite(a, dtype=None, order=None): + """Convert the input to an array, checking for NaNs or Infs. + + Parameters + ---------- + a : array_like + Input data, in any form that can be converted to an array. This + includes lists, lists of tuples, tuples, tuples of tuples, tuples + of lists and ndarrays. Success requires no NaNs or Infs. + dtype : data-type, optional + By default, the data-type is inferred from the input data. + order : {'C', 'F', 'A', 'K'}, optional + The memory layout of the output. + 'C' gives a row-major layout (C-style), + 'F' gives a column-major layout (Fortran-style). + 'C' and 'F' will copy if needed to ensure the output format. + 'A' (any) is equivalent to 'F' if input a is non-contiguous or + Fortran-contiguous, otherwise, it is equivalent to 'C'. + Unlike 'C' or 'F', 'A' does not ensure that the result is contiguous. + 'K' (keep) preserves the input order for the output. + 'C' is the default. + + Returns + ------- + out : ndarray + Array interpretation of `a`. No copy is performed if the input + is already an ndarray. If `a` is a subclass of ndarray, a base + class ndarray is returned. + + Raises + ------ + ValueError + Raises ValueError if `a` contains NaN (Not a Number) or Inf (Infinity). + + See Also + -------- + asarray : Create and array. + asanyarray : Similar function which passes through subclasses. + ascontiguousarray : Convert input to a contiguous array. + asfortranarray : Convert input to an ndarray with column-major + memory order. + fromiter : Create an array from an iterator. + fromfunction : Construct an array by executing a function on grid + positions. + + Examples + -------- + >>> import numpy as np + + Convert a list into an array. If all elements are finite, then + ``asarray_chkfinite`` is identical to ``asarray``. + + >>> a = [1, 2] + >>> np.asarray_chkfinite(a, dtype=float) + array([1., 2.]) + + Raises ValueError if array_like contains Nans or Infs. + + >>> a = [1, 2, np.inf] + >>> try: + ... np.asarray_chkfinite(a) + ... except ValueError: + ... print('ValueError') + ... + ValueError + + """ + a = asarray(a, dtype=dtype, order=order) + if a.dtype.char in typecodes['AllFloat'] and not np.isfinite(a).all(): + raise ValueError( + "array must not contain infs or NaNs") + return a + + +def _piecewise_dispatcher(x, condlist, funclist, *args, **kw): + yield x + # support the undocumented behavior of allowing scalars + if np.iterable(condlist): + yield from condlist + + +@array_function_dispatch(_piecewise_dispatcher) +def piecewise(x, condlist, funclist, *args, **kw): + """ + Evaluate a piecewise-defined function. + + Given a set of conditions and corresponding functions, evaluate each + function on the input data wherever its condition is true. + + Parameters + ---------- + x : ndarray or scalar + The input domain. + condlist : list of bool arrays or bool scalars + Each boolean array corresponds to a function in `funclist`. Wherever + `condlist[i]` is True, `funclist[i](x)` is used as the output value. + + Each boolean array in `condlist` selects a piece of `x`, + and should therefore be of the same shape as `x`. + + The length of `condlist` must correspond to that of `funclist`. + If one extra function is given, i.e. if + ``len(funclist) == len(condlist) + 1``, then that extra function + is the default value, used wherever all conditions are false. + funclist : list of callables, f(x,*args,**kw), or scalars + Each function is evaluated over `x` wherever its corresponding + condition is True. It should take a 1d array as input and give a 1d + array or a scalar value as output. If, instead of a callable, + a scalar is provided then a constant function (``lambda x: scalar``) is + assumed. + args : tuple, optional + Any further arguments given to `piecewise` are passed to the functions + upon execution, i.e., if called ``piecewise(..., ..., 1, 'a')``, then + each function is called as ``f(x, 1, 'a')``. + kw : dict, optional + Keyword arguments used in calling `piecewise` are passed to the + functions upon execution, i.e., if called + ``piecewise(..., ..., alpha=1)``, then each function is called as + ``f(x, alpha=1)``. + + Returns + ------- + out : ndarray + The output is the same shape and type as x and is found by + calling the functions in `funclist` on the appropriate portions of `x`, + as defined by the boolean arrays in `condlist`. Portions not covered + by any condition have a default value of 0. + + + See Also + -------- + choose, select, where + + Notes + ----- + This is similar to choose or select, except that functions are + evaluated on elements of `x` that satisfy the corresponding condition from + `condlist`. + + The result is:: + + |-- + |funclist[0](x[condlist[0]]) + out = |funclist[1](x[condlist[1]]) + |... + |funclist[n2](x[condlist[n2]]) + |-- + + Examples + -------- + >>> import numpy as np + + Define the signum function, which is -1 for ``x < 0`` and +1 for ``x >= 0``. + + >>> x = np.linspace(-2.5, 2.5, 6) + >>> np.piecewise(x, [x < 0, x >= 0], [-1, 1]) + array([-1., -1., -1., 1., 1., 1.]) + + Define the absolute value, which is ``-x`` for ``x <0`` and ``x`` for + ``x >= 0``. + + >>> np.piecewise(x, [x < 0, x >= 0], [lambda x: -x, lambda x: x]) + array([2.5, 1.5, 0.5, 0.5, 1.5, 2.5]) + + Apply the same function to a scalar value. + + >>> y = -2 + >>> np.piecewise(y, [y < 0, y >= 0], [lambda x: -x, lambda x: x]) + array(2) + + """ + x = asanyarray(x) + n2 = len(funclist) + + # undocumented: single condition is promoted to a list of one condition + if isscalar(condlist) or ( + not isinstance(condlist[0], (list, ndarray)) and x.ndim != 0): + condlist = [condlist] + + condlist = asarray(condlist, dtype=bool) + n = len(condlist) + + if n == n2 - 1: # compute the "otherwise" condition. + condelse = ~np.any(condlist, axis=0, keepdims=True) + condlist = np.concatenate([condlist, condelse], axis=0) + n += 1 + elif n != n2: + raise ValueError( + f"with {n} condition(s), either {n} or {n + 1} functions are expected" + ) + + y = zeros_like(x) + for cond, func in zip(condlist, funclist): + if not isinstance(func, collections.abc.Callable): + y[cond] = func + else: + vals = x[cond] + if vals.size > 0: + y[cond] = func(vals, *args, **kw) + + return y + + +def _select_dispatcher(condlist, choicelist, default=None): + yield from condlist + yield from choicelist + + +@array_function_dispatch(_select_dispatcher) +def select(condlist, choicelist, default=0): + """ + Return an array drawn from elements in choicelist, depending on conditions. + + Parameters + ---------- + condlist : list of bool ndarrays + The list of conditions which determine from which array in `choicelist` + the output elements are taken. When multiple conditions are satisfied, + the first one encountered in `condlist` is used. + choicelist : list of ndarrays + The list of arrays from which the output elements are taken. It has + to be of the same length as `condlist`. + default : array_like, optional + The element inserted in `output` when all conditions evaluate to False. + + Returns + ------- + output : ndarray + The output at position m is the m-th element of the array in + `choicelist` where the m-th element of the corresponding array in + `condlist` is True. + + See Also + -------- + where : Return elements from one of two arrays depending on condition. + take, choose, compress, diag, diagonal + + Examples + -------- + >>> import numpy as np + + Beginning with an array of integers from 0 to 5 (inclusive), + elements less than ``3`` are negated, elements greater than ``3`` + are squared, and elements not meeting either of these conditions + (exactly ``3``) are replaced with a `default` value of ``42``. + + >>> x = np.arange(6) + >>> condlist = [x<3, x>3] + >>> choicelist = [-x, x**2] + >>> np.select(condlist, choicelist, 42) + array([ 0, -1, -2, 42, 16, 25]) + + When multiple conditions are satisfied, the first one encountered in + `condlist` is used. + + >>> condlist = [x<=4, x>3] + >>> choicelist = [x, x**2] + >>> np.select(condlist, choicelist, 55) + array([ 0, 1, 2, 3, 4, 25]) + + """ + # Check the size of condlist and choicelist are the same, or abort. + if len(condlist) != len(choicelist): + raise ValueError( + 'list of cases must be same length as list of conditions') + + # Now that the dtype is known, handle the deprecated select([], []) case + if len(condlist) == 0: + raise ValueError("select with an empty condition list is not possible") + + # TODO: This preserves the Python int, float, complex manually to get the + # right `result_type` with NEP 50. Most likely we will grow a better + # way to spell this (and this can be replaced). + choicelist = [ + choice if type(choice) in (int, float, complex) else np.asarray(choice) + for choice in choicelist] + choicelist.append(default if type(default) in (int, float, complex) + else np.asarray(default)) + + try: + dtype = np.result_type(*choicelist) + except TypeError as e: + msg = f'Choicelist and default value do not have a common dtype: {e}' + raise TypeError(msg) from None + + # Convert conditions to arrays and broadcast conditions and choices + # as the shape is needed for the result. Doing it separately optimizes + # for example when all choices are scalars. + condlist = np.broadcast_arrays(*condlist) + choicelist = np.broadcast_arrays(*choicelist) + + # If cond array is not an ndarray in boolean format or scalar bool, abort. + for i, cond in enumerate(condlist): + if cond.dtype.type is not np.bool: + raise TypeError( + f'invalid entry {i} in condlist: should be boolean ndarray') + + if choicelist[0].ndim == 0: + # This may be common, so avoid the call. + result_shape = condlist[0].shape + else: + result_shape = np.broadcast_arrays(condlist[0], choicelist[0])[0].shape + + result = np.full(result_shape, choicelist[-1], dtype) + + # Use np.copyto to burn each choicelist array onto result, using the + # corresponding condlist as a boolean mask. This is done in reverse + # order since the first choice should take precedence. + choicelist = choicelist[-2::-1] + condlist = condlist[::-1] + for choice, cond in zip(choicelist, condlist): + np.copyto(result, choice, where=cond) + + return result + + +def _copy_dispatcher(a, order=None, subok=None): + return (a,) + + +@array_function_dispatch(_copy_dispatcher) +def copy(a, order='K', subok=False): + """ + Return an array copy of the given object. + + Parameters + ---------- + a : array_like + Input data. + order : {'C', 'F', 'A', 'K'}, optional + Controls the memory layout of the copy. 'C' means C-order, + 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous, + 'C' otherwise. 'K' means match the layout of `a` as closely + as possible. (Note that this function and :meth:`ndarray.copy` are very + similar, but have different default values for their order= + arguments.) + subok : bool, optional + If True, then sub-classes will be passed-through, otherwise the + returned array will be forced to be a base-class array (defaults to False). + + Returns + ------- + arr : ndarray + Array interpretation of `a`. + + See Also + -------- + ndarray.copy : Preferred method for creating an array copy + + Notes + ----- + This is equivalent to: + + >>> np.array(a, copy=True) #doctest: +SKIP + + The copy made of the data is shallow, i.e., for arrays with object dtype, + the new array will point to the same objects. + See Examples from `ndarray.copy`. + + Examples + -------- + >>> import numpy as np + + Create an array x, with a reference y and a copy z: + + >>> x = np.array([1, 2, 3]) + >>> y = x + >>> z = np.copy(x) + + Note that, when we modify x, y changes, but not z: + + >>> x[0] = 10 + >>> x[0] == y[0] + True + >>> x[0] == z[0] + False + + Note that, np.copy clears previously set WRITEABLE=False flag. + + >>> a = np.array([1, 2, 3]) + >>> a.flags["WRITEABLE"] = False + >>> b = np.copy(a) + >>> b.flags["WRITEABLE"] + True + >>> b[0] = 3 + >>> b + array([3, 2, 3]) + """ + return array(a, order=order, subok=subok, copy=True) + +# Basic operations + + +def _gradient_dispatcher(f, *varargs, axis=None, edge_order=None): + yield f + yield from varargs + + +@array_function_dispatch(_gradient_dispatcher) +def gradient(f, *varargs, axis=None, edge_order=1): + """ + Return the gradient of an N-dimensional array. + + The gradient is computed using second order accurate central differences + in the interior points and either first or second order accurate one-sides + (forward or backwards) differences at the boundaries. + The returned gradient hence has the same shape as the input array. + + Parameters + ---------- + f : array_like + An N-dimensional array containing samples of a scalar function. + varargs : list of scalar or array, optional + Spacing between f values. Default unitary spacing for all dimensions. + Spacing can be specified using: + + 1. single scalar to specify a sample distance for all dimensions. + 2. N scalars to specify a constant sample distance for each dimension. + i.e. `dx`, `dy`, `dz`, ... + 3. N arrays to specify the coordinates of the values along each + dimension of F. The length of the array must match the size of + the corresponding dimension + 4. Any combination of N scalars/arrays with the meaning of 2. and 3. + + If `axis` is given, the number of varargs must equal the number of axes + specified in the axis parameter. + Default: 1. (see Examples below). + + edge_order : {1, 2}, optional + Gradient is calculated using N-th order accurate differences + at the boundaries. Default: 1. + axis : None or int or tuple of ints, optional + Gradient is calculated only along the given axis or axes + The default (axis = None) is to calculate the gradient for all the axes + of the input array. axis may be negative, in which case it counts from + the last to the first axis. + + Returns + ------- + gradient : ndarray or tuple of ndarray + A tuple of ndarrays (or a single ndarray if there is only one + dimension) corresponding to the derivatives of f with respect + to each dimension. Each derivative has the same shape as f. + + Examples + -------- + >>> import numpy as np + >>> f = np.array([1, 2, 4, 7, 11, 16]) + >>> np.gradient(f) + array([1. , 1.5, 2.5, 3.5, 4.5, 5. ]) + >>> np.gradient(f, 2) + array([0.5 , 0.75, 1.25, 1.75, 2.25, 2.5 ]) + + Spacing can be also specified with an array that represents the coordinates + of the values F along the dimensions. + For instance a uniform spacing: + + >>> x = np.arange(f.size) + >>> np.gradient(f, x) + array([1. , 1.5, 2.5, 3.5, 4.5, 5. ]) + + Or a non uniform one: + + >>> x = np.array([0., 1., 1.5, 3.5, 4., 6.]) + >>> np.gradient(f, x) + array([1. , 3. , 3.5, 6.7, 6.9, 2.5]) + + For two dimensional arrays, the return will be two arrays ordered by + axis. In this example the first array stands for the gradient in + rows and the second one in columns direction: + + >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]])) + (array([[ 2., 2., -1.], + [ 2., 2., -1.]]), + array([[1. , 2.5, 4. ], + [1. , 1. , 1. ]])) + + In this example the spacing is also specified: + uniform for axis=0 and non uniform for axis=1 + + >>> dx = 2. + >>> y = [1., 1.5, 3.5] + >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]]), dx, y) + (array([[ 1. , 1. , -0.5], + [ 1. , 1. , -0.5]]), + array([[2. , 2. , 2. ], + [2. , 1.7, 0.5]])) + + It is possible to specify how boundaries are treated using `edge_order` + + >>> x = np.array([0, 1, 2, 3, 4]) + >>> f = x**2 + >>> np.gradient(f, edge_order=1) + array([1., 2., 4., 6., 7.]) + >>> np.gradient(f, edge_order=2) + array([0., 2., 4., 6., 8.]) + + The `axis` keyword can be used to specify a subset of axes of which the + gradient is calculated + + >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]]), axis=0) + array([[ 2., 2., -1.], + [ 2., 2., -1.]]) + + The `varargs` argument defines the spacing between sample points in the + input array. It can take two forms: + + 1. An array, specifying coordinates, which may be unevenly spaced: + + >>> x = np.array([0., 2., 3., 6., 8.]) + >>> y = x ** 2 + >>> np.gradient(y, x, edge_order=2) + array([ 0., 4., 6., 12., 16.]) + + 2. A scalar, representing the fixed sample distance: + + >>> dx = 2 + >>> x = np.array([0., 2., 4., 6., 8.]) + >>> y = x ** 2 + >>> np.gradient(y, dx, edge_order=2) + array([ 0., 4., 8., 12., 16.]) + + It's possible to provide different data for spacing along each dimension. + The number of arguments must match the number of dimensions in the input + data. + + >>> dx = 2 + >>> dy = 3 + >>> x = np.arange(0, 6, dx) + >>> y = np.arange(0, 9, dy) + >>> xs, ys = np.meshgrid(x, y) + >>> zs = xs + 2 * ys + >>> np.gradient(zs, dy, dx) # Passing two scalars + (array([[2., 2., 2.], + [2., 2., 2.], + [2., 2., 2.]]), + array([[1., 1., 1.], + [1., 1., 1.], + [1., 1., 1.]])) + + Mixing scalars and arrays is also allowed: + + >>> np.gradient(zs, y, dx) # Passing one array and one scalar + (array([[2., 2., 2.], + [2., 2., 2.], + [2., 2., 2.]]), + array([[1., 1., 1.], + [1., 1., 1.], + [1., 1., 1.]])) + + Notes + ----- + Assuming that :math:`f\\in C^{3}` (i.e., :math:`f` has at least 3 continuous + derivatives) and let :math:`h_{*}` be a non-homogeneous stepsize, we + minimize the "consistency error" :math:`\\eta_{i}` between the true gradient + and its estimate from a linear combination of the neighboring grid-points: + + .. math:: + + \\eta_{i} = f_{i}^{\\left(1\\right)} - + \\left[ \\alpha f\\left(x_{i}\\right) + + \\beta f\\left(x_{i} + h_{d}\\right) + + \\gamma f\\left(x_{i}-h_{s}\\right) + \\right] + + By substituting :math:`f(x_{i} + h_{d})` and :math:`f(x_{i} - h_{s})` + with their Taylor series expansion, this translates into solving + the following the linear system: + + .. math:: + + \\left\\{ + \\begin{array}{r} + \\alpha+\\beta+\\gamma=0 \\\\ + \\beta h_{d}-\\gamma h_{s}=1 \\\\ + \\beta h_{d}^{2}+\\gamma h_{s}^{2}=0 + \\end{array} + \\right. + + The resulting approximation of :math:`f_{i}^{(1)}` is the following: + + .. math:: + + \\hat f_{i}^{(1)} = + \\frac{ + h_{s}^{2}f\\left(x_{i} + h_{d}\\right) + + \\left(h_{d}^{2} - h_{s}^{2}\\right)f\\left(x_{i}\\right) + - h_{d}^{2}f\\left(x_{i}-h_{s}\\right)} + { h_{s}h_{d}\\left(h_{d} + h_{s}\\right)} + + \\mathcal{O}\\left(\\frac{h_{d}h_{s}^{2} + + h_{s}h_{d}^{2}}{h_{d} + + h_{s}}\\right) + + It is worth noting that if :math:`h_{s}=h_{d}` + (i.e., data are evenly spaced) + we find the standard second order approximation: + + .. math:: + + \\hat f_{i}^{(1)}= + \\frac{f\\left(x_{i+1}\\right) - f\\left(x_{i-1}\\right)}{2h} + + \\mathcal{O}\\left(h^{2}\\right) + + With a similar procedure the forward/backward approximations used for + boundaries can be derived. + + References + ---------- + .. [1] Quarteroni A., Sacco R., Saleri F. (2007) Numerical Mathematics + (Texts in Applied Mathematics). New York: Springer. + .. [2] Durran D. R. (1999) Numerical Methods for Wave Equations + in Geophysical Fluid Dynamics. New York: Springer. + .. [3] Fornberg B. (1988) Generation of Finite Difference Formulas on + Arbitrarily Spaced Grids, + Mathematics of Computation 51, no. 184 : 699-706. + `PDF `_. + """ + f = np.asanyarray(f) + N = f.ndim # number of dimensions + + if axis is None: + axes = tuple(range(N)) + else: + axes = _nx.normalize_axis_tuple(axis, N) + + len_axes = len(axes) + n = len(varargs) + if n == 0: + # no spacing argument - use 1 in all axes + dx = [1.0] * len_axes + elif n == 1 and np.ndim(varargs[0]) == 0: + # single scalar for all axes + dx = varargs * len_axes + elif n == len_axes: + # scalar or 1d array for each axis + dx = list(varargs) + for i, distances in enumerate(dx): + distances = np.asanyarray(distances) + if distances.ndim == 0: + continue + elif distances.ndim != 1: + raise ValueError("distances must be either scalars or 1d") + if len(distances) != f.shape[axes[i]]: + raise ValueError("when 1d, distances must match " + "the length of the corresponding dimension") + if np.issubdtype(distances.dtype, np.integer): + # Convert numpy integer types to float64 to avoid modular + # arithmetic in np.diff(distances). + distances = distances.astype(np.float64) + diffx = np.diff(distances) + # if distances are constant reduce to the scalar case + # since it brings a consistent speedup + if (diffx == diffx[0]).all(): + diffx = diffx[0] + dx[i] = diffx + else: + raise TypeError("invalid number of arguments") + + if edge_order > 2: + raise ValueError("'edge_order' greater than 2 not supported") + + # use central differences on interior and one-sided differences on the + # endpoints. This preserves second order-accuracy over the full domain. + + outvals = [] + + # create slice objects --- initially all are [:, :, ..., :] + slice1 = [slice(None)] * N + slice2 = [slice(None)] * N + slice3 = [slice(None)] * N + slice4 = [slice(None)] * N + + otype = f.dtype + if otype.type is np.datetime64: + # the timedelta dtype with the same unit information + otype = np.dtype(otype.name.replace('datetime', 'timedelta')) + # view as timedelta to allow addition + f = f.view(otype) + elif otype.type is np.timedelta64: + pass + elif np.issubdtype(otype, np.inexact): + pass + else: + # All other types convert to floating point. + # First check if f is a numpy integer type; if so, convert f to float64 + # to avoid modular arithmetic when computing the changes in f. + if np.issubdtype(otype, np.integer): + f = f.astype(np.float64) + otype = np.float64 + + for axis, ax_dx in zip(axes, dx): + if f.shape[axis] < edge_order + 1: + raise ValueError( + "Shape of array too small to calculate a numerical gradient, " + "at least (edge_order + 1) elements are required.") + # result allocation + out = np.empty_like(f, dtype=otype) + + # spacing for the current axis + uniform_spacing = np.ndim(ax_dx) == 0 + + # Numerical differentiation: 2nd order interior + slice1[axis] = slice(1, -1) + slice2[axis] = slice(None, -2) + slice3[axis] = slice(1, -1) + slice4[axis] = slice(2, None) + + if uniform_spacing: + out[tuple(slice1)] = (f[tuple(slice4)] - f[tuple(slice2)]) / (2. * ax_dx) + else: + dx1 = ax_dx[0:-1] + dx2 = ax_dx[1:] + a = -(dx2) / (dx1 * (dx1 + dx2)) + b = (dx2 - dx1) / (dx1 * dx2) + c = dx1 / (dx2 * (dx1 + dx2)) + # fix the shape for broadcasting + shape = np.ones(N, dtype=int) + shape[axis] = -1 + + a = a.reshape(shape) + b = b.reshape(shape) + c = c.reshape(shape) + # 1D equivalent -- out[1:-1] = a * f[:-2] + b * f[1:-1] + c * f[2:] + out[tuple(slice1)] = a * f[tuple(slice2)] + b * f[tuple(slice3)] \ + + c * f[tuple(slice4)] + + # Numerical differentiation: 1st order edges + if edge_order == 1: + slice1[axis] = 0 + slice2[axis] = 1 + slice3[axis] = 0 + dx_0 = ax_dx if uniform_spacing else ax_dx[0] + # 1D equivalent -- out[0] = (f[1] - f[0]) / (x[1] - x[0]) + out[tuple(slice1)] = (f[tuple(slice2)] - f[tuple(slice3)]) / dx_0 + + slice1[axis] = -1 + slice2[axis] = -1 + slice3[axis] = -2 + dx_n = ax_dx if uniform_spacing else ax_dx[-1] + # 1D equivalent -- out[-1] = (f[-1] - f[-2]) / (x[-1] - x[-2]) + out[tuple(slice1)] = (f[tuple(slice2)] - f[tuple(slice3)]) / dx_n + + # Numerical differentiation: 2nd order edges + else: + slice1[axis] = 0 + slice2[axis] = 0 + slice3[axis] = 1 + slice4[axis] = 2 + if uniform_spacing: + a = -1.5 / ax_dx + b = 2. / ax_dx + c = -0.5 / ax_dx + else: + dx1 = ax_dx[0] + dx2 = ax_dx[1] + a = -(2. * dx1 + dx2) / (dx1 * (dx1 + dx2)) + b = (dx1 + dx2) / (dx1 * dx2) + c = - dx1 / (dx2 * (dx1 + dx2)) + # 1D equivalent -- out[0] = a * f[0] + b * f[1] + c * f[2] + out[tuple(slice1)] = a * f[tuple(slice2)] + b * f[tuple(slice3)] \ + + c * f[tuple(slice4)] + + slice1[axis] = -1 + slice2[axis] = -3 + slice3[axis] = -2 + slice4[axis] = -1 + if uniform_spacing: + a = 0.5 / ax_dx + b = -2. / ax_dx + c = 1.5 / ax_dx + else: + dx1 = ax_dx[-2] + dx2 = ax_dx[-1] + a = (dx2) / (dx1 * (dx1 + dx2)) + b = - (dx2 + dx1) / (dx1 * dx2) + c = (2. * dx2 + dx1) / (dx2 * (dx1 + dx2)) + # 1D equivalent -- out[-1] = a * f[-3] + b * f[-2] + c * f[-1] + out[tuple(slice1)] = a * f[tuple(slice2)] + b * f[tuple(slice3)] \ + + c * f[tuple(slice4)] + + outvals.append(out) + + # reset the slice object in this dimension to ":" + slice1[axis] = slice(None) + slice2[axis] = slice(None) + slice3[axis] = slice(None) + slice4[axis] = slice(None) + + if len_axes == 1: + return outvals[0] + return tuple(outvals) + + +def _diff_dispatcher(a, n=None, axis=None, prepend=None, append=None): + return (a, prepend, append) + + +@array_function_dispatch(_diff_dispatcher) +def diff(a, n=1, axis=-1, prepend=np._NoValue, append=np._NoValue): + """ + Calculate the n-th discrete difference along the given axis. + + The first difference is given by ``out[i] = a[i+1] - a[i]`` along + the given axis, higher differences are calculated by using `diff` + recursively. + + Parameters + ---------- + a : array_like + Input array + n : int, optional + The number of times values are differenced. If zero, the input + is returned as-is. + axis : int, optional + The axis along which the difference is taken, default is the + last axis. + prepend, append : array_like, optional + Values to prepend or append to `a` along axis prior to + performing the difference. Scalar values are expanded to + arrays with length 1 in the direction of axis and the shape + of the input array in along all other axes. Otherwise the + dimension and shape must match `a` except along axis. + + Returns + ------- + diff : ndarray + The n-th differences. The shape of the output is the same as `a` + except along `axis` where the dimension is smaller by `n`. The + type of the output is the same as the type of the difference + between any two elements of `a`. This is the same as the type of + `a` in most cases. A notable exception is `datetime64`, which + results in a `timedelta64` output array. + + See Also + -------- + gradient, ediff1d, cumsum + + Notes + ----- + Type is preserved for boolean arrays, so the result will contain + `False` when consecutive elements are the same and `True` when they + differ. + + For unsigned integer arrays, the results will also be unsigned. This + should not be surprising, as the result is consistent with + calculating the difference directly: + + >>> u8_arr = np.array([1, 0], dtype=np.uint8) + >>> np.diff(u8_arr) + array([255], dtype=uint8) + >>> u8_arr[1,...] - u8_arr[0,...] + np.uint8(255) + + If this is not desirable, then the array should be cast to a larger + integer type first: + + >>> i16_arr = u8_arr.astype(np.int16) + >>> np.diff(i16_arr) + array([-1], dtype=int16) + + Examples + -------- + >>> import numpy as np + >>> x = np.array([1, 2, 4, 7, 0]) + >>> np.diff(x) + array([ 1, 2, 3, -7]) + >>> np.diff(x, n=2) + array([ 1, 1, -10]) + + >>> x = np.array([[1, 3, 6, 10], [0, 5, 6, 8]]) + >>> np.diff(x) + array([[2, 3, 4], + [5, 1, 2]]) + >>> np.diff(x, axis=0) + array([[-1, 2, 0, -2]]) + + >>> x = np.arange('1066-10-13', '1066-10-16', dtype=np.datetime64) + >>> np.diff(x) + array([1, 1], dtype='timedelta64[D]') + + """ + if n == 0: + return a + if n < 0: + raise ValueError( + "order must be non-negative but got " + repr(n)) + + a = asanyarray(a) + nd = a.ndim + if nd == 0: + raise ValueError("diff requires input that is at least one dimensional") + axis = normalize_axis_index(axis, nd) + + combined = [] + if prepend is not np._NoValue: + prepend = np.asanyarray(prepend) + if prepend.ndim == 0: + shape = list(a.shape) + shape[axis] = 1 + prepend = np.broadcast_to(prepend, tuple(shape)) + combined.append(prepend) + + combined.append(a) + + if append is not np._NoValue: + append = np.asanyarray(append) + if append.ndim == 0: + shape = list(a.shape) + shape[axis] = 1 + append = np.broadcast_to(append, tuple(shape)) + combined.append(append) + + if len(combined) > 1: + a = np.concatenate(combined, axis) + + slice1 = [slice(None)] * nd + slice2 = [slice(None)] * nd + slice1[axis] = slice(1, None) + slice2[axis] = slice(None, -1) + slice1 = tuple(slice1) + slice2 = tuple(slice2) + + op = not_equal if a.dtype == np.bool else subtract + for _ in range(n): + a = op(a[slice1], a[slice2]) + + return a + + +def _interp_dispatcher(x, xp, fp, left=None, right=None, period=None): + return (x, xp, fp) + + +@array_function_dispatch(_interp_dispatcher) +def interp(x, xp, fp, left=None, right=None, period=None): + """ + One-dimensional linear interpolation for monotonically increasing sample points. + + Returns the one-dimensional piecewise linear interpolant to a function + with given discrete data points (`xp`, `fp`), evaluated at `x`. + + Parameters + ---------- + x : array_like + The x-coordinates at which to evaluate the interpolated values. + + xp : 1-D sequence of floats + The x-coordinates of the data points, must be increasing if argument + `period` is not specified. Otherwise, `xp` is internally sorted after + normalizing the periodic boundaries with ``xp = xp % period``. + + fp : 1-D sequence of float or complex + The y-coordinates of the data points, same length as `xp`. + + left : optional float or complex corresponding to fp + Value to return for `x < xp[0]`, default is `fp[0]`. + + right : optional float or complex corresponding to fp + Value to return for `x > xp[-1]`, default is `fp[-1]`. + + period : None or float, optional + A period for the x-coordinates. This parameter allows the proper + interpolation of angular x-coordinates. Parameters `left` and `right` + are ignored if `period` is specified. + + Returns + ------- + y : float or complex (corresponding to fp) or ndarray + The interpolated values, same shape as `x`. + + Raises + ------ + ValueError + If `xp` and `fp` have different length + If `xp` or `fp` are not 1-D sequences + If `period == 0` + + See Also + -------- + scipy.interpolate + + Warnings + -------- + The x-coordinate sequence is expected to be increasing, but this is not + explicitly enforced. However, if the sequence `xp` is non-increasing, + interpolation results are meaningless. + + Note that, since NaN is unsortable, `xp` also cannot contain NaNs. + + A simple check for `xp` being strictly increasing is:: + + np.all(np.diff(xp) > 0) + + Examples + -------- + >>> import numpy as np + >>> xp = [1, 2, 3] + >>> fp = [3, 2, 0] + >>> np.interp(2.5, xp, fp) + 1.0 + >>> np.interp([0, 1, 1.5, 2.72, 3.14], xp, fp) + array([3. , 3. , 2.5 , 0.56, 0. ]) + >>> UNDEF = -99.0 + >>> np.interp(3.14, xp, fp, right=UNDEF) + -99.0 + + Plot an interpolant to the sine function: + + >>> x = np.linspace(0, 2*np.pi, 10) + >>> y = np.sin(x) + >>> xvals = np.linspace(0, 2*np.pi, 50) + >>> yinterp = np.interp(xvals, x, y) + >>> import matplotlib.pyplot as plt + >>> plt.plot(x, y, 'o') + [] + >>> plt.plot(xvals, yinterp, '-x') + [] + >>> plt.show() + + Interpolation with periodic x-coordinates: + + >>> x = [-180, -170, -185, 185, -10, -5, 0, 365] + >>> xp = [190, -190, 350, -350] + >>> fp = [5, 10, 3, 4] + >>> np.interp(x, xp, fp, period=360) + array([7.5 , 5. , 8.75, 6.25, 3. , 3.25, 3.5 , 3.75]) + + Complex interpolation: + + >>> x = [1.5, 4.0] + >>> xp = [2,3,5] + >>> fp = [1.0j, 0, 2+3j] + >>> np.interp(x, xp, fp) + array([0.+1.j , 1.+1.5j]) + + """ + + fp = np.asarray(fp) + + if np.iscomplexobj(fp): + interp_func = compiled_interp_complex + input_dtype = np.complex128 + else: + interp_func = compiled_interp + input_dtype = np.float64 + + if period is not None: + if period == 0: + raise ValueError("period must be a non-zero value") + period = abs(period) + left = None + right = None + + x = np.asarray(x, dtype=np.float64) + xp = np.asarray(xp, dtype=np.float64) + fp = np.asarray(fp, dtype=input_dtype) + + if xp.ndim != 1 or fp.ndim != 1: + raise ValueError("Data points must be 1-D sequences") + if xp.shape[0] != fp.shape[0]: + raise ValueError("fp and xp are not of the same length") + # normalizing periodic boundaries + x = x % period + xp = xp % period + asort_xp = np.argsort(xp) + xp = xp[asort_xp] + fp = fp[asort_xp] + xp = np.concatenate((xp[-1:] - period, xp, xp[0:1] + period)) + fp = np.concatenate((fp[-1:], fp, fp[0:1])) + + return interp_func(x, xp, fp, left, right) + + +def _angle_dispatcher(z, deg=None): + return (z,) + + +@array_function_dispatch(_angle_dispatcher) +def angle(z, deg=False): + """ + Return the angle of the complex argument. + + Parameters + ---------- + z : array_like + A complex number or sequence of complex numbers. + deg : bool, optional + Return angle in degrees if True, radians if False (default). + + Returns + ------- + angle : ndarray or scalar + The counterclockwise angle from the positive real axis on the complex + plane in the range ``(-pi, pi]``, with dtype as numpy.float64. + + See Also + -------- + arctan2 + absolute + + Notes + ----- + This function passes the imaginary and real parts of the argument to + `arctan2` to compute the result; consequently, it follows the convention + of `arctan2` when the magnitude of the argument is zero. See example. + + Examples + -------- + >>> import numpy as np + >>> np.angle([1.0, 1.0j, 1+1j]) # in radians + array([ 0. , 1.57079633, 0.78539816]) # may vary + >>> np.angle(1+1j, deg=True) # in degrees + 45.0 + >>> np.angle([0., -0., complex(0., -0.), complex(-0., -0.)]) # convention + array([ 0. , 3.14159265, -0. , -3.14159265]) + + """ + z = asanyarray(z) + if issubclass(z.dtype.type, _nx.complexfloating): + zimag = z.imag + zreal = z.real + else: + zimag = 0 + zreal = z + + a = arctan2(zimag, zreal) + if deg: + a *= 180 / pi + return a + + +def _unwrap_dispatcher(p, discont=None, axis=None, *, period=None): + return (p,) + + +@array_function_dispatch(_unwrap_dispatcher) +def unwrap(p, discont=None, axis=-1, *, period=2 * pi): + r""" + Unwrap by taking the complement of large deltas with respect to the period. + + This unwraps a signal `p` by changing elements which have an absolute + difference from their predecessor of more than ``max(discont, period/2)`` + to their `period`-complementary values. + + For the default case where `period` is :math:`2\pi` and `discont` is + :math:`\pi`, this unwraps a radian phase `p` such that adjacent differences + are never greater than :math:`\pi` by adding :math:`2k\pi` for some + integer :math:`k`. + + Parameters + ---------- + p : array_like + Input array. + discont : float, optional + Maximum discontinuity between values, default is ``period/2``. + Values below ``period/2`` are treated as if they were ``period/2``. + To have an effect different from the default, `discont` should be + larger than ``period/2``. + axis : int, optional + Axis along which unwrap will operate, default is the last axis. + period : float, optional + Size of the range over which the input wraps. By default, it is + ``2 pi``. + + .. versionadded:: 1.21.0 + + Returns + ------- + out : ndarray + Output array. + + See Also + -------- + rad2deg, deg2rad + + Notes + ----- + If the discontinuity in `p` is smaller than ``period/2``, + but larger than `discont`, no unwrapping is done because taking + the complement would only make the discontinuity larger. + + Examples + -------- + >>> import numpy as np + + >>> phase = np.linspace(0, np.pi, num=5) + >>> phase[3:] += np.pi + >>> phase + array([ 0. , 0.78539816, 1.57079633, 5.49778714, 6.28318531]) # may vary + >>> np.unwrap(phase) + array([ 0. , 0.78539816, 1.57079633, -0.78539816, 0. ]) # may vary + >>> np.unwrap([0, 1, 2, -1, 0], period=4) + array([0, 1, 2, 3, 4]) + >>> np.unwrap([ 1, 2, 3, 4, 5, 6, 1, 2, 3], period=6) + array([1, 2, 3, 4, 5, 6, 7, 8, 9]) + >>> np.unwrap([2, 3, 4, 5, 2, 3, 4, 5], period=4) + array([2, 3, 4, 5, 6, 7, 8, 9]) + >>> phase_deg = np.mod(np.linspace(0 ,720, 19), 360) - 180 + >>> np.unwrap(phase_deg, period=360) + array([-180., -140., -100., -60., -20., 20., 60., 100., 140., + 180., 220., 260., 300., 340., 380., 420., 460., 500., + 540.]) + + This example plots the unwrapping of the wrapped input signal `w`. + First generate `w`, then apply `unwrap` to get `u`. + + >>> t = np.linspace(0, 25, 801) + >>> w = np.mod(1.5 * np.sin(1.1 * t + 0.26) * (1 - t / 6 + (t / 23) ** 3), 2.0) - 1 + >>> u = np.unwrap(w, period=2.0) + + Plot `w` and `u`. + + >>> import matplotlib.pyplot as plt + >>> plt.plot(t, w, label='w (a signal wrapped to [-1, 1])') + >>> plt.plot(t, u, linewidth=2.5, alpha=0.5, label='unwrap(w, period=2)') + >>> plt.xlabel('t') + >>> plt.grid(alpha=0.6) + >>> plt.legend(framealpha=1, shadow=True) + >>> plt.show() + """ + p = asarray(p) + nd = p.ndim + dd = diff(p, axis=axis) + if discont is None: + discont = period / 2 + slice1 = [slice(None, None)] * nd # full slices + slice1[axis] = slice(1, None) + slice1 = tuple(slice1) + dtype = np.result_type(dd, period) + if _nx.issubdtype(dtype, _nx.integer): + interval_high, rem = divmod(period, 2) + boundary_ambiguous = rem == 0 + else: + interval_high = period / 2 + boundary_ambiguous = True + interval_low = -interval_high + ddmod = mod(dd - interval_low, period) + interval_low + if boundary_ambiguous: + # for `mask = (abs(dd) == period/2)`, the above line made + # `ddmod[mask] == -period/2`. correct these such that + # `ddmod[mask] == sign(dd[mask])*period/2`. + _nx.copyto(ddmod, interval_high, + where=(ddmod == interval_low) & (dd > 0)) + ph_correct = ddmod - dd + _nx.copyto(ph_correct, 0, where=abs(dd) < discont) + up = array(p, copy=True, dtype=dtype) + up[slice1] = p[slice1] + ph_correct.cumsum(axis) + return up + + +def _sort_complex(a): + return (a,) + + +@array_function_dispatch(_sort_complex) +def sort_complex(a): + """ + Sort a complex array using the real part first, then the imaginary part. + + Parameters + ---------- + a : array_like + Input array + + Returns + ------- + out : complex ndarray + Always returns a sorted complex array. + + Examples + -------- + >>> import numpy as np + >>> np.sort_complex([5, 3, 6, 2, 1]) + array([1.+0.j, 2.+0.j, 3.+0.j, 5.+0.j, 6.+0.j]) + + >>> np.sort_complex([1 + 2j, 2 - 1j, 3 - 2j, 3 - 3j, 3 + 5j]) + array([1.+2.j, 2.-1.j, 3.-3.j, 3.-2.j, 3.+5.j]) + + """ + b = array(a, copy=True) + b.sort() + if not issubclass(b.dtype.type, _nx.complexfloating): + if b.dtype.char in 'bhBH': + return b.astype('F') + elif b.dtype.char == 'g': + return b.astype('G') + else: + return b.astype('D') + else: + return b + + +def _arg_trim_zeros(filt): + """Return indices of the first and last non-zero element. + + Parameters + ---------- + filt : array_like + Input array. + + Returns + ------- + start, stop : ndarray + Two arrays containing the indices of the first and last non-zero + element in each dimension. + + See also + -------- + trim_zeros + + Examples + -------- + >>> import numpy as np + >>> _arg_trim_zeros(np.array([0, 0, 1, 1, 0])) + (array([2]), array([3])) + """ + nonzero = ( + np.argwhere(filt) + if filt.dtype != np.object_ + # Historically, `trim_zeros` treats `None` in an object array + # as non-zero while argwhere doesn't, account for that + else np.argwhere(filt != 0) + ) + if nonzero.size == 0: + start = stop = np.array([], dtype=np.intp) + else: + start = nonzero.min(axis=0) + stop = nonzero.max(axis=0) + return start, stop + + +def _trim_zeros(filt, trim=None, axis=None): + return (filt,) + + +@array_function_dispatch(_trim_zeros) +def trim_zeros(filt, trim='fb', axis=None): + """Remove values along a dimension which are zero along all other. + + Parameters + ---------- + filt : array_like + Input array. + trim : {"fb", "f", "b"}, optional + A string with 'f' representing trim from front and 'b' to trim from + back. By default, zeros are trimmed on both sides. + Front and back refer to the edges of a dimension, with "front" referring + to the side with the lowest index 0, and "back" referring to the highest + index (or index -1). + axis : int or sequence, optional + If None, `filt` is cropped such that the smallest bounding box is + returned that still contains all values which are not zero. + If an axis is specified, `filt` will be sliced in that dimension only + on the sides specified by `trim`. The remaining area will be the + smallest that still contains all values wich are not zero. + + .. versionadded:: 2.2.0 + + Returns + ------- + trimmed : ndarray or sequence + The result of trimming the input. The number of dimensions and the + input data type are preserved. + + Notes + ----- + For all-zero arrays, the first axis is trimmed first. + + Examples + -------- + >>> import numpy as np + >>> a = np.array((0, 0, 0, 1, 2, 3, 0, 2, 1, 0)) + >>> np.trim_zeros(a) + array([1, 2, 3, 0, 2, 1]) + + >>> np.trim_zeros(a, trim='b') + array([0, 0, 0, ..., 0, 2, 1]) + + Multiple dimensions are supported. + + >>> b = np.array([[0, 0, 2, 3, 0, 0], + ... [0, 1, 0, 3, 0, 0], + ... [0, 0, 0, 0, 0, 0]]) + >>> np.trim_zeros(b) + array([[0, 2, 3], + [1, 0, 3]]) + + >>> np.trim_zeros(b, axis=-1) + array([[0, 2, 3], + [1, 0, 3], + [0, 0, 0]]) + + The input data type is preserved, list/tuple in means list/tuple out. + + >>> np.trim_zeros([0, 1, 2, 0]) + [1, 2] + + """ + filt_ = np.asarray(filt) + + trim = trim.lower() + if trim not in {"fb", "bf", "f", "b"}: + raise ValueError(f"unexpected character(s) in `trim`: {trim!r}") + if axis is None: + axis_tuple = tuple(range(filt_.ndim)) + else: + axis_tuple = _nx.normalize_axis_tuple(axis, filt_.ndim, argname="axis") + + if not axis_tuple: + # No trimming requested -> return input unmodified. + return filt + + start, stop = _arg_trim_zeros(filt_) + stop += 1 # Adjust for slicing + + if start.size == 0: + # filt is all-zero -> assign same values to start and stop so that + # resulting slice will be empty + start = stop = np.zeros(filt_.ndim, dtype=np.intp) + else: + if 'f' not in trim: + start = (None,) * filt_.ndim + if 'b' not in trim: + stop = (None,) * filt_.ndim + + sl = tuple(slice(start[ax], stop[ax]) if ax in axis_tuple else slice(None) + for ax in range(filt_.ndim)) + if len(sl) == 1: + # filt is 1D -> avoid multi-dimensional slicing to preserve + # non-array input types + return filt[sl[0]] + return filt[sl] + + +def _extract_dispatcher(condition, arr): + return (condition, arr) + + +@array_function_dispatch(_extract_dispatcher) +def extract(condition, arr): + """ + Return the elements of an array that satisfy some condition. + + This is equivalent to ``np.compress(ravel(condition), ravel(arr))``. If + `condition` is boolean ``np.extract`` is equivalent to ``arr[condition]``. + + Note that `place` does the exact opposite of `extract`. + + Parameters + ---------- + condition : array_like + An array whose nonzero or True entries indicate the elements of `arr` + to extract. + arr : array_like + Input array of the same size as `condition`. + + Returns + ------- + extract : ndarray + Rank 1 array of values from `arr` where `condition` is True. + + See Also + -------- + take, put, copyto, compress, place + + Examples + -------- + >>> import numpy as np + >>> arr = np.arange(12).reshape((3, 4)) + >>> arr + array([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> condition = np.mod(arr, 3)==0 + >>> condition + array([[ True, False, False, True], + [False, False, True, False], + [False, True, False, False]]) + >>> np.extract(condition, arr) + array([0, 3, 6, 9]) + + + If `condition` is boolean: + + >>> arr[condition] + array([0, 3, 6, 9]) + + """ + return _nx.take(ravel(arr), nonzero(ravel(condition))[0]) + + +def _place_dispatcher(arr, mask, vals): + return (arr, mask, vals) + + +@array_function_dispatch(_place_dispatcher) +def place(arr, mask, vals): + """ + Change elements of an array based on conditional and input values. + + Similar to ``np.copyto(arr, vals, where=mask)``, the difference is that + `place` uses the first N elements of `vals`, where N is the number of + True values in `mask`, while `copyto` uses the elements where `mask` + is True. + + Note that `extract` does the exact opposite of `place`. + + Parameters + ---------- + arr : ndarray + Array to put data into. + mask : array_like + Boolean mask array. Must have the same size as `a`. + vals : 1-D sequence + Values to put into `a`. Only the first N elements are used, where + N is the number of True values in `mask`. If `vals` is smaller + than N, it will be repeated, and if elements of `a` are to be masked, + this sequence must be non-empty. + + See Also + -------- + copyto, put, take, extract + + Examples + -------- + >>> import numpy as np + >>> arr = np.arange(6).reshape(2, 3) + >>> np.place(arr, arr>2, [44, 55]) + >>> arr + array([[ 0, 1, 2], + [44, 55, 44]]) + + """ + return _place(arr, mask, vals) + + +# See https://docs.scipy.org/doc/numpy/reference/c-api.generalized-ufuncs.html +_DIMENSION_NAME = r'\w+' +_CORE_DIMENSION_LIST = f'(?:{_DIMENSION_NAME}(?:,{_DIMENSION_NAME})*)?' +_ARGUMENT = fr'\({_CORE_DIMENSION_LIST}\)' +_ARGUMENT_LIST = f'{_ARGUMENT}(?:,{_ARGUMENT})*' +_SIGNATURE = f'^{_ARGUMENT_LIST}->{_ARGUMENT_LIST}$' + + +def _parse_gufunc_signature(signature): + """ + Parse string signatures for a generalized universal function. + + Arguments + --------- + signature : string + Generalized universal function signature, e.g., ``(m,n),(n,p)->(m,p)`` + for ``np.matmul``. + + Returns + ------- + Tuple of input and output core dimensions parsed from the signature, each + of the form List[Tuple[str, ...]]. + """ + signature = re.sub(r'\s+', '', signature) + + if not re.match(_SIGNATURE, signature): + raise ValueError( + f'not a valid gufunc signature: {signature}') + return tuple([tuple(re.findall(_DIMENSION_NAME, arg)) + for arg in re.findall(_ARGUMENT, arg_list)] + for arg_list in signature.split('->')) + + +def _update_dim_sizes(dim_sizes, arg, core_dims): + """ + Incrementally check and update core dimension sizes for a single argument. + + Arguments + --------- + dim_sizes : Dict[str, int] + Sizes of existing core dimensions. Will be updated in-place. + arg : ndarray + Argument to examine. + core_dims : Tuple[str, ...] + Core dimensions for this argument. + """ + if not core_dims: + return + + num_core_dims = len(core_dims) + if arg.ndim < num_core_dims: + raise ValueError( + '%d-dimensional argument does not have enough ' + 'dimensions for all core dimensions %r' + % (arg.ndim, core_dims)) + + core_shape = arg.shape[-num_core_dims:] + for dim, size in zip(core_dims, core_shape): + if dim in dim_sizes: + if size != dim_sizes[dim]: + raise ValueError( + 'inconsistent size for core dimension %r: %r vs %r' + % (dim, size, dim_sizes[dim])) + else: + dim_sizes[dim] = size + + +def _parse_input_dimensions(args, input_core_dims): + """ + Parse broadcast and core dimensions for vectorize with a signature. + + Arguments + --------- + args : Tuple[ndarray, ...] + Tuple of input arguments to examine. + input_core_dims : List[Tuple[str, ...]] + List of core dimensions corresponding to each input. + + Returns + ------- + broadcast_shape : Tuple[int, ...] + Common shape to broadcast all non-core dimensions to. + dim_sizes : Dict[str, int] + Common sizes for named core dimensions. + """ + broadcast_args = [] + dim_sizes = {} + for arg, core_dims in zip(args, input_core_dims): + _update_dim_sizes(dim_sizes, arg, core_dims) + ndim = arg.ndim - len(core_dims) + dummy_array = np.lib.stride_tricks.as_strided(0, arg.shape[:ndim]) + broadcast_args.append(dummy_array) + broadcast_shape = np.lib._stride_tricks_impl._broadcast_shape( + *broadcast_args + ) + return broadcast_shape, dim_sizes + + +def _calculate_shapes(broadcast_shape, dim_sizes, list_of_core_dims): + """Helper for calculating broadcast shapes with core dimensions.""" + return [broadcast_shape + tuple(dim_sizes[dim] for dim in core_dims) + for core_dims in list_of_core_dims] + + +def _create_arrays(broadcast_shape, dim_sizes, list_of_core_dims, dtypes, + results=None): + """Helper for creating output arrays in vectorize.""" + shapes = _calculate_shapes(broadcast_shape, dim_sizes, list_of_core_dims) + if dtypes is None: + dtypes = [None] * len(shapes) + if results is None: + arrays = tuple(np.empty(shape=shape, dtype=dtype) + for shape, dtype in zip(shapes, dtypes)) + else: + arrays = tuple(np.empty_like(result, shape=shape, dtype=dtype) + for result, shape, dtype + in zip(results, shapes, dtypes)) + return arrays + + +def _get_vectorize_dtype(dtype): + if dtype.char in "SU": + return dtype.char + return dtype + + +@set_module('numpy') +class vectorize: + """ + vectorize(pyfunc=np._NoValue, otypes=None, doc=None, excluded=None, + cache=False, signature=None) + + Returns an object that acts like pyfunc, but takes arrays as input. + + Define a vectorized function which takes a nested sequence of objects or + numpy arrays as inputs and returns a single numpy array or a tuple of numpy + arrays. The vectorized function evaluates `pyfunc` over successive tuples + of the input arrays like the python map function, except it uses the + broadcasting rules of numpy. + + The data type of the output of `vectorized` is determined by calling + the function with the first element of the input. This can be avoided + by specifying the `otypes` argument. + + Parameters + ---------- + pyfunc : callable, optional + A python function or method. + Can be omitted to produce a decorator with keyword arguments. + otypes : str or list of dtypes, optional + The output data type. It must be specified as either a string of + typecode characters or a list of data type specifiers. There should + be one data type specifier for each output. + doc : str, optional + The docstring for the function. If None, the docstring will be the + ``pyfunc.__doc__``. + excluded : set, optional + Set of strings or integers representing the positional or keyword + arguments for which the function will not be vectorized. These will be + passed directly to `pyfunc` unmodified. + + cache : bool, optional + If neither `otypes` nor `signature` are provided, and `cache` is ``True``, then + cache the number of outputs. + + signature : string, optional + Generalized universal function signature, e.g., ``(m,n),(n)->(m)`` for + vectorized matrix-vector multiplication. If provided, ``pyfunc`` will + be called with (and expected to return) arrays with shapes given by the + size of corresponding core dimensions. By default, ``pyfunc`` is + assumed to take scalars as input and output. + + Returns + ------- + out : callable + A vectorized function if ``pyfunc`` was provided, + a decorator otherwise. + + See Also + -------- + frompyfunc : Takes an arbitrary Python function and returns a ufunc + + Notes + ----- + The `vectorize` function is provided primarily for convenience, not for + performance. The implementation is essentially a for loop. + + If neither `otypes` nor `signature` are specified, then a call to the function with + the first argument will be used to determine the number of outputs. The results of + this call will be cached if `cache` is `True` to prevent calling the function + twice. However, to implement the cache, the original function must be wrapped + which will slow down subsequent calls, so only do this if your function is + expensive. + + The new keyword argument interface and `excluded` argument support + further degrades performance. + + References + ---------- + .. [1] :doc:`/reference/c-api/generalized-ufuncs` + + Examples + -------- + >>> import numpy as np + >>> def myfunc(a, b): + ... "Return a-b if a>b, otherwise return a+b" + ... if a > b: + ... return a - b + ... else: + ... return a + b + + >>> vfunc = np.vectorize(myfunc) + >>> vfunc([1, 2, 3, 4], 2) + array([3, 4, 1, 2]) + + The docstring is taken from the input function to `vectorize` unless it + is specified: + + >>> vfunc.__doc__ + 'Return a-b if a>b, otherwise return a+b' + >>> vfunc = np.vectorize(myfunc, doc='Vectorized `myfunc`') + >>> vfunc.__doc__ + 'Vectorized `myfunc`' + + The output type is determined by evaluating the first element of the input, + unless it is specified: + + >>> out = vfunc([1, 2, 3, 4], 2) + >>> type(out[0]) + + >>> vfunc = np.vectorize(myfunc, otypes=[float]) + >>> out = vfunc([1, 2, 3, 4], 2) + >>> type(out[0]) + + + The `excluded` argument can be used to prevent vectorizing over certain + arguments. This can be useful for array-like arguments of a fixed length + such as the coefficients for a polynomial as in `polyval`: + + >>> def mypolyval(p, x): + ... _p = list(p) + ... res = _p.pop(0) + ... while _p: + ... res = res*x + _p.pop(0) + ... return res + + Here, we exclude the zeroth argument from vectorization whether it is + passed by position or keyword. + + >>> vpolyval = np.vectorize(mypolyval, excluded={0, 'p'}) + >>> vpolyval([1, 2, 3], x=[0, 1]) + array([3, 6]) + >>> vpolyval(p=[1, 2, 3], x=[0, 1]) + array([3, 6]) + + The `signature` argument allows for vectorizing functions that act on + non-scalar arrays of fixed length. For example, you can use it for a + vectorized calculation of Pearson correlation coefficient and its p-value: + + >>> import scipy.stats + >>> pearsonr = np.vectorize(scipy.stats.pearsonr, + ... signature='(n),(n)->(),()') + >>> pearsonr([[0, 1, 2, 3]], [[1, 2, 3, 4], [4, 3, 2, 1]]) + (array([ 1., -1.]), array([ 0., 0.])) + + Or for a vectorized convolution: + + >>> convolve = np.vectorize(np.convolve, signature='(n),(m)->(k)') + >>> convolve(np.eye(4), [1, 2, 1]) + array([[1., 2., 1., 0., 0., 0.], + [0., 1., 2., 1., 0., 0.], + [0., 0., 1., 2., 1., 0.], + [0., 0., 0., 1., 2., 1.]]) + + Decorator syntax is supported. The decorator can be called as + a function to provide keyword arguments: + + >>> @np.vectorize + ... def identity(x): + ... return x + ... + >>> identity([0, 1, 2]) + array([0, 1, 2]) + >>> @np.vectorize(otypes=[float]) + ... def as_float(x): + ... return x + ... + >>> as_float([0, 1, 2]) + array([0., 1., 2.]) + """ + def __init__(self, pyfunc=np._NoValue, otypes=None, doc=None, + excluded=None, cache=False, signature=None): + + if (pyfunc != np._NoValue) and (not callable(pyfunc)): + # Splitting the error message to keep + # the length below 79 characters. + part1 = "When used as a decorator, " + part2 = "only accepts keyword arguments." + raise TypeError(part1 + part2) + + self.pyfunc = pyfunc + self.cache = cache + self.signature = signature + if pyfunc != np._NoValue and hasattr(pyfunc, '__name__'): + self.__name__ = pyfunc.__name__ + + self._ufunc = {} # Caching to improve default performance + self._doc = None + self.__doc__ = doc + if doc is None and hasattr(pyfunc, '__doc__'): + self.__doc__ = pyfunc.__doc__ + else: + self._doc = doc + + if isinstance(otypes, str): + for char in otypes: + if char not in typecodes['All']: + raise ValueError(f"Invalid otype specified: {char}") + elif iterable(otypes): + otypes = [_get_vectorize_dtype(_nx.dtype(x)) for x in otypes] + elif otypes is not None: + raise ValueError("Invalid otype specification") + self.otypes = otypes + + # Excluded variable support + if excluded is None: + excluded = set() + self.excluded = set(excluded) + + if signature is not None: + self._in_and_out_core_dims = _parse_gufunc_signature(signature) + else: + self._in_and_out_core_dims = None + + def _init_stage_2(self, pyfunc, *args, **kwargs): + self.__name__ = pyfunc.__name__ + self.pyfunc = pyfunc + if self._doc is None: + self.__doc__ = pyfunc.__doc__ + else: + self.__doc__ = self._doc + + def _call_as_normal(self, *args, **kwargs): + """ + Return arrays with the results of `pyfunc` broadcast (vectorized) over + `args` and `kwargs` not in `excluded`. + """ + excluded = self.excluded + if not kwargs and not excluded: + func = self.pyfunc + vargs = args + else: + # The wrapper accepts only positional arguments: we use `names` and + # `inds` to mutate `the_args` and `kwargs` to pass to the original + # function. + nargs = len(args) + + names = [_n for _n in kwargs if _n not in excluded] + inds = [_i for _i in range(nargs) if _i not in excluded] + the_args = list(args) + + def func(*vargs): + for _n, _i in enumerate(inds): + the_args[_i] = vargs[_n] + kwargs.update(zip(names, vargs[len(inds):])) + return self.pyfunc(*the_args, **kwargs) + + vargs = [args[_i] for _i in inds] + vargs.extend([kwargs[_n] for _n in names]) + + return self._vectorize_call(func=func, args=vargs) + + def __call__(self, *args, **kwargs): + if self.pyfunc is np._NoValue: + self._init_stage_2(*args, **kwargs) + return self + + return self._call_as_normal(*args, **kwargs) + + def _get_ufunc_and_otypes(self, func, args): + """Return (ufunc, otypes).""" + # frompyfunc will fail if args is empty + if not args: + raise ValueError('args can not be empty') + + if self.otypes is not None: + otypes = self.otypes + + # self._ufunc is a dictionary whose keys are the number of + # arguments (i.e. len(args)) and whose values are ufuncs created + # by frompyfunc. len(args) can be different for different calls if + # self.pyfunc has parameters with default values. We only use the + # cache when func is self.pyfunc, which occurs when the call uses + # only positional arguments and no arguments are excluded. + + nin = len(args) + nout = len(self.otypes) + if func is not self.pyfunc or nin not in self._ufunc: + ufunc = frompyfunc(func, nin, nout) + else: + ufunc = None # We'll get it from self._ufunc + if func is self.pyfunc: + ufunc = self._ufunc.setdefault(nin, ufunc) + else: + # Get number of outputs and output types by calling the function on + # the first entries of args. We also cache the result to prevent + # the subsequent call when the ufunc is evaluated. + # Assumes that ufunc first evaluates the 0th elements in the input + # arrays (the input values are not checked to ensure this) + args = [asarray(a) for a in args] + if builtins.any(arg.size == 0 for arg in args): + raise ValueError('cannot call `vectorize` on size 0 inputs ' + 'unless `otypes` is set') + + inputs = [arg.flat[0] for arg in args] + outputs = func(*inputs) + + # Performance note: profiling indicates that -- for simple + # functions at least -- this wrapping can almost double the + # execution time. + # Hence we make it optional. + if self.cache: + _cache = [outputs] + + def _func(*vargs): + if _cache: + return _cache.pop() + else: + return func(*vargs) + else: + _func = func + + if isinstance(outputs, tuple): + nout = len(outputs) + else: + nout = 1 + outputs = (outputs,) + + otypes = ''.join([asarray(outputs[_k]).dtype.char + for _k in range(nout)]) + + # Performance note: profiling indicates that creating the ufunc is + # not a significant cost compared with wrapping so it seems not + # worth trying to cache this. + ufunc = frompyfunc(_func, len(args), nout) + + return ufunc, otypes + + def _vectorize_call(self, func, args): + """Vectorized call to `func` over positional `args`.""" + if self.signature is not None: + res = self._vectorize_call_with_signature(func, args) + elif not args: + res = func() + else: + ufunc, otypes = self._get_ufunc_and_otypes(func=func, args=args) + # gh-29196: `dtype=object` should eventually be removed + args = [asanyarray(a, dtype=object) for a in args] + outputs = ufunc(*args, out=...) + + if ufunc.nout == 1: + res = asanyarray(outputs, dtype=otypes[0]) + else: + res = tuple(asanyarray(x, dtype=t) + for x, t in zip(outputs, otypes)) + return res + + def _vectorize_call_with_signature(self, func, args): + """Vectorized call over positional arguments with a signature.""" + input_core_dims, output_core_dims = self._in_and_out_core_dims + + if len(args) != len(input_core_dims): + raise TypeError('wrong number of positional arguments: ' + 'expected %r, got %r' + % (len(input_core_dims), len(args))) + args = tuple(asanyarray(arg) for arg in args) + + broadcast_shape, dim_sizes = _parse_input_dimensions( + args, input_core_dims) + input_shapes = _calculate_shapes(broadcast_shape, dim_sizes, + input_core_dims) + args = [np.broadcast_to(arg, shape, subok=True) + for arg, shape in zip(args, input_shapes)] + + outputs = None + otypes = self.otypes + nout = len(output_core_dims) + + for index in np.ndindex(*broadcast_shape): + results = func(*(arg[index] for arg in args)) + + n_results = len(results) if isinstance(results, tuple) else 1 + + if nout != n_results: + raise ValueError( + 'wrong number of outputs from pyfunc: expected %r, got %r' + % (nout, n_results)) + + if nout == 1: + results = (results,) + + if outputs is None: + for result, core_dims in zip(results, output_core_dims): + _update_dim_sizes(dim_sizes, result, core_dims) + + outputs = _create_arrays(broadcast_shape, dim_sizes, + output_core_dims, otypes, results) + + for output, result in zip(outputs, results): + output[index] = result + + if outputs is None: + # did not call the function even once + if otypes is None: + raise ValueError('cannot call `vectorize` on size 0 inputs ' + 'unless `otypes` is set') + if builtins.any(dim not in dim_sizes + for dims in output_core_dims + for dim in dims): + raise ValueError('cannot call `vectorize` with a signature ' + 'including new output dimensions on size 0 ' + 'inputs') + outputs = _create_arrays(broadcast_shape, dim_sizes, + output_core_dims, otypes) + + return outputs[0] if nout == 1 else outputs + + +def _cov_dispatcher(m, y=None, rowvar=None, bias=None, ddof=None, + fweights=None, aweights=None, *, dtype=None): + return (m, y, fweights, aweights) + + +@array_function_dispatch(_cov_dispatcher) +def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None, + aweights=None, *, dtype=None): + """ + Estimate a covariance matrix, given data and weights. + + Covariance indicates the level to which two variables vary together. + If we examine N-dimensional samples, :math:`X = [x_1, x_2, ..., x_N]^T`, + then the covariance matrix element :math:`C_{ij}` is the covariance of + :math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance + of :math:`x_i`. + + See the notes for an outline of the algorithm. + + Parameters + ---------- + m : array_like + A 1-D or 2-D array containing multiple variables and observations. + Each row of `m` represents a variable, and each column a single + observation of all those variables. Also see `rowvar` below. + y : array_like, optional + An additional set of variables and observations. `y` has the same form + as that of `m`. + rowvar : bool, optional + If `rowvar` is True (default), then each row represents a + variable, with observations in the columns. Otherwise, the relationship + is transposed: each column represents a variable, while the rows + contain observations. + bias : bool, optional + Default normalization (False) is by ``(N - 1)``, where ``N`` is the + number of observations given (unbiased estimate). If `bias` is True, + then normalization is by ``N``. These values can be overridden by using + the keyword ``ddof`` in numpy versions >= 1.5. + ddof : int, optional + If not ``None`` the default value implied by `bias` is overridden. + Note that ``ddof=1`` will return the unbiased estimate, even if both + `fweights` and `aweights` are specified, and ``ddof=0`` will return + the simple average. See the notes for the details. The default value + is ``None``. + fweights : array_like, int, optional + 1-D array of integer frequency weights; the number of times each + observation vector should be repeated. + aweights : array_like, optional + 1-D array of observation vector weights. These relative weights are + typically large for observations considered "important" and smaller for + observations considered less "important". If ``ddof=0`` the array of + weights can be used to assign probabilities to observation vectors. + dtype : data-type, optional + Data-type of the result. By default, the return data-type will have + at least `numpy.float64` precision. + + .. versionadded:: 1.20 + + Returns + ------- + out : ndarray + The covariance matrix of the variables. + + See Also + -------- + corrcoef : Normalized covariance matrix + + Notes + ----- + Assume that the observations are in the columns of the observation + array `m` and let ``f = fweights`` and ``a = aweights`` for brevity. The + steps to compute the weighted covariance are as follows:: + + >>> m = np.arange(10, dtype=np.float64) + >>> f = np.arange(10) * 2 + >>> a = np.arange(10) ** 2. + >>> ddof = 1 + >>> w = f * a + >>> v1 = np.sum(w) + >>> v2 = np.sum(w * a) + >>> m -= np.sum(m * w, axis=None, keepdims=True) / v1 + >>> cov = np.dot(m * w, m.T) * v1 / (v1**2 - ddof * v2) + + Note that when ``a == 1``, the normalization factor + ``v1 / (v1**2 - ddof * v2)`` goes over to ``1 / (np.sum(f) - ddof)`` + as it should. + + Examples + -------- + >>> import numpy as np + + Consider two variables, :math:`x_0` and :math:`x_1`, which + correlate perfectly, but in opposite directions: + + >>> x = np.array([[0, 2], [1, 1], [2, 0]]).T + >>> x + array([[0, 1, 2], + [2, 1, 0]]) + + Note how :math:`x_0` increases while :math:`x_1` decreases. The covariance + matrix shows this clearly: + + >>> np.cov(x) + array([[ 1., -1.], + [-1., 1.]]) + + Note that element :math:`C_{0,1}`, which shows the correlation between + :math:`x_0` and :math:`x_1`, is negative. + + Further, note how `x` and `y` are combined: + + >>> x = [-2.1, -1, 4.3] + >>> y = [3, 1.1, 0.12] + >>> X = np.stack((x, y), axis=0) + >>> np.cov(X) + array([[11.71 , -4.286 ], # may vary + [-4.286 , 2.144133]]) + >>> np.cov(x, y) + array([[11.71 , -4.286 ], # may vary + [-4.286 , 2.144133]]) + >>> np.cov(x) + array(11.71) + + """ + # Check inputs + if ddof is not None and ddof != int(ddof): + raise ValueError( + "ddof must be integer") + + # Handles complex arrays too + m = np.asarray(m) + if m.ndim > 2: + raise ValueError("m has more than 2 dimensions") + + if y is not None: + y = np.asarray(y) + if y.ndim > 2: + raise ValueError("y has more than 2 dimensions") + + if dtype is None: + if y is None: + dtype = np.result_type(m, np.float64) + else: + dtype = np.result_type(m, y, np.float64) + + X = array(m, ndmin=2, dtype=dtype) + if not rowvar and m.ndim != 1: + X = X.T + if X.shape[0] == 0: + return np.array([]).reshape(0, 0) + if y is not None: + y = array(y, copy=None, ndmin=2, dtype=dtype) + if not rowvar and y.shape[0] != 1: + y = y.T + X = np.concatenate((X, y), axis=0) + + if ddof is None: + if bias == 0: + ddof = 1 + else: + ddof = 0 + + # Get the product of frequencies and weights + w = None + if fweights is not None: + fweights = np.asarray(fweights, dtype=float) + if not np.all(fweights == np.around(fweights)): + raise TypeError( + "fweights must be integer") + if fweights.ndim > 1: + raise RuntimeError( + "cannot handle multidimensional fweights") + if fweights.shape[0] != X.shape[1]: + raise RuntimeError( + "incompatible numbers of samples and fweights") + if any(fweights < 0): + raise ValueError( + "fweights cannot be negative") + w = fweights + if aweights is not None: + aweights = np.asarray(aweights, dtype=float) + if aweights.ndim > 1: + raise RuntimeError( + "cannot handle multidimensional aweights") + if aweights.shape[0] != X.shape[1]: + raise RuntimeError( + "incompatible numbers of samples and aweights") + if any(aweights < 0): + raise ValueError( + "aweights cannot be negative") + if w is None: + w = aweights + else: + w *= aweights + + avg, w_sum = average(X, axis=1, weights=w, returned=True) + w_sum = w_sum[0] + + # Determine the normalization + if w is None: + fact = X.shape[1] - ddof + elif ddof == 0: + fact = w_sum + elif aweights is None: + fact = w_sum - ddof + else: + fact = w_sum - ddof * sum(w * aweights) / w_sum + + if fact <= 0: + warnings.warn("Degrees of freedom <= 0 for slice", + RuntimeWarning, stacklevel=2) + fact = 0.0 + + X -= avg[:, None] + if w is None: + X_T = X.T + else: + X_T = (X * w).T + c = dot(X, X_T.conj()) + c *= np.true_divide(1, fact) + return c.squeeze() + + +def _corrcoef_dispatcher(x, y=None, rowvar=None, *, + dtype=None): + return (x, y) + + +@array_function_dispatch(_corrcoef_dispatcher) +def corrcoef(x, y=None, rowvar=True, *, + dtype=None): + """ + Return Pearson product-moment correlation coefficients. + + Please refer to the documentation for `cov` for more detail. The + relationship between the correlation coefficient matrix, `R`, and the + covariance matrix, `C`, is + + .. math:: R_{ij} = \\frac{ C_{ij} } { \\sqrt{ C_{ii} C_{jj} } } + + The values of `R` are between -1 and 1, inclusive. + + Parameters + ---------- + x : array_like + A 1-D or 2-D array containing multiple variables and observations. + Each row of `x` represents a variable, and each column a single + observation of all those variables. Also see `rowvar` below. + y : array_like, optional + An additional set of variables and observations. `y` has the same + shape as `x`. + rowvar : bool, optional + If `rowvar` is True (default), then each row represents a + variable, with observations in the columns. Otherwise, the relationship + is transposed: each column represents a variable, while the rows + contain observations. + + dtype : data-type, optional + Data-type of the result. By default, the return data-type will have + at least `numpy.float64` precision. + + .. versionadded:: 1.20 + + Returns + ------- + R : ndarray + The correlation coefficient matrix of the variables. + + See Also + -------- + cov : Covariance matrix + + Notes + ----- + Due to floating point rounding the resulting array may not be Hermitian, + the diagonal elements may not be 1, and the elements may not satisfy the + inequality abs(a) <= 1. The real and imaginary parts are clipped to the + interval [-1, 1] in an attempt to improve on that situation but is not + much help in the complex case. + + Examples + -------- + >>> import numpy as np + + In this example we generate two random arrays, ``xarr`` and ``yarr``, and + compute the row-wise and column-wise Pearson correlation coefficients, + ``R``. Since ``rowvar`` is true by default, we first find the row-wise + Pearson correlation coefficients between the variables of ``xarr``. + + >>> import numpy as np + >>> rng = np.random.default_rng(seed=42) + >>> xarr = rng.random((3, 3)) + >>> xarr + array([[0.77395605, 0.43887844, 0.85859792], + [0.69736803, 0.09417735, 0.97562235], + [0.7611397 , 0.78606431, 0.12811363]]) + >>> R1 = np.corrcoef(xarr) + >>> R1 + array([[ 1. , 0.99256089, -0.68080986], + [ 0.99256089, 1. , -0.76492172], + [-0.68080986, -0.76492172, 1. ]]) + + If we add another set of variables and observations ``yarr``, we can + compute the row-wise Pearson correlation coefficients between the + variables in ``xarr`` and ``yarr``. + + >>> yarr = rng.random((3, 3)) + >>> yarr + array([[0.45038594, 0.37079802, 0.92676499], + [0.64386512, 0.82276161, 0.4434142 ], + [0.22723872, 0.55458479, 0.06381726]]) + >>> R2 = np.corrcoef(xarr, yarr) + >>> R2 + array([[ 1. , 0.99256089, -0.68080986, 0.75008178, -0.934284 , + -0.99004057], + [ 0.99256089, 1. , -0.76492172, 0.82502011, -0.97074098, + -0.99981569], + [-0.68080986, -0.76492172, 1. , -0.99507202, 0.89721355, + 0.77714685], + [ 0.75008178, 0.82502011, -0.99507202, 1. , -0.93657855, + -0.83571711], + [-0.934284 , -0.97074098, 0.89721355, -0.93657855, 1. , + 0.97517215], + [-0.99004057, -0.99981569, 0.77714685, -0.83571711, 0.97517215, + 1. ]]) + + Finally if we use the option ``rowvar=False``, the columns are now + being treated as the variables and we will find the column-wise Pearson + correlation coefficients between variables in ``xarr`` and ``yarr``. + + >>> R3 = np.corrcoef(xarr, yarr, rowvar=False) + >>> R3 + array([[ 1. , 0.77598074, -0.47458546, -0.75078643, -0.9665554 , + 0.22423734], + [ 0.77598074, 1. , -0.92346708, -0.99923895, -0.58826587, + -0.44069024], + [-0.47458546, -0.92346708, 1. , 0.93773029, 0.23297648, + 0.75137473], + [-0.75078643, -0.99923895, 0.93773029, 1. , 0.55627469, + 0.47536961], + [-0.9665554 , -0.58826587, 0.23297648, 0.55627469, 1. , + -0.46666491], + [ 0.22423734, -0.44069024, 0.75137473, 0.47536961, -0.46666491, + 1. ]]) + + """ + c = cov(x, y, rowvar, dtype=dtype) + try: + d = diag(c) + except ValueError: + # scalar covariance + # nan if incorrect value (nan, inf, 0), 1 otherwise + return c / c + stddev = sqrt(d.real) + c /= stddev[:, None] + c /= stddev[None, :] + + # Clip real and imaginary parts to [-1, 1]. This does not guarantee + # abs(a[i,j]) <= 1 for complex arrays, but is the best we can do without + # excessive work. + np.clip(c.real, -1, 1, out=c.real) + if np.iscomplexobj(c): + np.clip(c.imag, -1, 1, out=c.imag) + + return c + + +@set_module('numpy') +def blackman(M): + """ + Return the Blackman window. + + The Blackman window is a taper formed by using the first three + terms of a summation of cosines. It was designed to have close to the + minimal leakage possible. It is close to optimal, only slightly worse + than a Kaiser window. + + Parameters + ---------- + M : int + Number of points in the output window. If zero or less, an empty + array is returned. + + Returns + ------- + out : ndarray + The window, with the maximum value normalized to one (the value one + appears only if the number of samples is odd). + + See Also + -------- + bartlett, hamming, hanning, kaiser + + Notes + ----- + The Blackman window is defined as + + .. math:: w(n) = 0.42 - 0.5 \\cos(2\\pi n/M) + 0.08 \\cos(4\\pi n/M) + + Most references to the Blackman window come from the signal processing + literature, where it is used as one of many windowing functions for + smoothing values. It is also known as an apodization (which means + "removing the foot", i.e. smoothing discontinuities at the beginning + and end of the sampled signal) or tapering function. It is known as a + "near optimal" tapering function, almost as good (by some measures) + as the kaiser window. + + References + ---------- + Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra, + Dover Publications, New York. + + Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing. + Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471. + + Examples + -------- + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> np.blackman(12) + array([-1.38777878e-17, 3.26064346e-02, 1.59903635e-01, # may vary + 4.14397981e-01, 7.36045180e-01, 9.67046769e-01, + 9.67046769e-01, 7.36045180e-01, 4.14397981e-01, + 1.59903635e-01, 3.26064346e-02, -1.38777878e-17]) + + Plot the window and the frequency response. + + .. plot:: + :include-source: + + import matplotlib.pyplot as plt + from numpy.fft import fft, fftshift + window = np.blackman(51) + plt.plot(window) + plt.title("Blackman window") + plt.ylabel("Amplitude") + plt.xlabel("Sample") + plt.show() # doctest: +SKIP + + plt.figure() + A = fft(window, 2048) / 25.5 + mag = np.abs(fftshift(A)) + freq = np.linspace(-0.5, 0.5, len(A)) + with np.errstate(divide='ignore', invalid='ignore'): + response = 20 * np.log10(mag) + response = np.clip(response, -100, 100) + plt.plot(freq, response) + plt.title("Frequency response of Blackman window") + plt.ylabel("Magnitude [dB]") + plt.xlabel("Normalized frequency [cycles per sample]") + plt.axis('tight') + plt.show() + + """ + # Ensures at least float64 via 0.0. M should be an integer, but conversion + # to double is safe for a range. + values = np.array([0.0, M]) + M = values[1] + + if M < 1: + return array([], dtype=values.dtype) + if M == 1: + return ones(1, dtype=values.dtype) + n = arange(1 - M, M, 2) + return 0.42 + 0.5 * cos(pi * n / (M - 1)) + 0.08 * cos(2.0 * pi * n / (M - 1)) + + +@set_module('numpy') +def bartlett(M): + """ + Return the Bartlett window. + + The Bartlett window is very similar to a triangular window, except + that the end points are at zero. It is often used in signal + processing for tapering a signal, without generating too much + ripple in the frequency domain. + + Parameters + ---------- + M : int + Number of points in the output window. If zero or less, an + empty array is returned. + + Returns + ------- + out : array + The triangular window, with the maximum value normalized to one + (the value one appears only if the number of samples is odd), with + the first and last samples equal to zero. + + See Also + -------- + blackman, hamming, hanning, kaiser + + Notes + ----- + The Bartlett window is defined as + + .. math:: w(n) = \\frac{2}{M-1} \\left( + \\frac{M-1}{2} - \\left|n - \\frac{M-1}{2}\\right| + \\right) + + Most references to the Bartlett window come from the signal processing + literature, where it is used as one of many windowing functions for + smoothing values. Note that convolution with this window produces linear + interpolation. It is also known as an apodization (which means "removing + the foot", i.e. smoothing discontinuities at the beginning and end of the + sampled signal) or tapering function. The Fourier transform of the + Bartlett window is the product of two sinc functions. Note the excellent + discussion in Kanasewich [2]_. + + References + ---------- + .. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra", + Biometrika 37, 1-16, 1950. + .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", + The University of Alberta Press, 1975, pp. 109-110. + .. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal + Processing", Prentice-Hall, 1999, pp. 468-471. + .. [4] Wikipedia, "Window function", + https://en.wikipedia.org/wiki/Window_function + .. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, + "Numerical Recipes", Cambridge University Press, 1986, page 429. + + Examples + -------- + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> np.bartlett(12) + array([ 0. , 0.18181818, 0.36363636, 0.54545455, 0.72727273, # may vary + 0.90909091, 0.90909091, 0.72727273, 0.54545455, 0.36363636, + 0.18181818, 0. ]) + + Plot the window and its frequency response (requires SciPy and matplotlib). + + .. plot:: + :include-source: + + import matplotlib.pyplot as plt + from numpy.fft import fft, fftshift + window = np.bartlett(51) + plt.plot(window) + plt.title("Bartlett window") + plt.ylabel("Amplitude") + plt.xlabel("Sample") + plt.show() + plt.figure() + A = fft(window, 2048) / 25.5 + mag = np.abs(fftshift(A)) + freq = np.linspace(-0.5, 0.5, len(A)) + with np.errstate(divide='ignore', invalid='ignore'): + response = 20 * np.log10(mag) + response = np.clip(response, -100, 100) + plt.plot(freq, response) + plt.title("Frequency response of Bartlett window") + plt.ylabel("Magnitude [dB]") + plt.xlabel("Normalized frequency [cycles per sample]") + plt.axis('tight') + plt.show() + + """ + # Ensures at least float64 via 0.0. M should be an integer, but conversion + # to double is safe for a range. + values = np.array([0.0, M]) + M = values[1] + + if M < 1: + return array([], dtype=values.dtype) + if M == 1: + return ones(1, dtype=values.dtype) + n = arange(1 - M, M, 2) + return where(less_equal(n, 0), 1 + n / (M - 1), 1 - n / (M - 1)) + + +@set_module('numpy') +def hanning(M): + """ + Return the Hanning window. + + The Hanning window is a taper formed by using a weighted cosine. + + Parameters + ---------- + M : int + Number of points in the output window. If zero or less, an + empty array is returned. + + Returns + ------- + out : ndarray, shape(M,) + The window, with the maximum value normalized to one (the value + one appears only if `M` is odd). + + See Also + -------- + bartlett, blackman, hamming, kaiser + + Notes + ----- + The Hanning window is defined as + + .. math:: w(n) = 0.5 - 0.5\\cos\\left(\\frac{2\\pi{n}}{M-1}\\right) + \\qquad 0 \\leq n \\leq M-1 + + The Hanning was named for Julius von Hann, an Austrian meteorologist. + It is also known as the Cosine Bell. Some authors prefer that it be + called a Hann window, to help avoid confusion with the very similar + Hamming window. + + Most references to the Hanning window come from the signal processing + literature, where it is used as one of many windowing functions for + smoothing values. It is also known as an apodization (which means + "removing the foot", i.e. smoothing discontinuities at the beginning + and end of the sampled signal) or tapering function. + + References + ---------- + .. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power + spectra, Dover Publications, New York. + .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", + The University of Alberta Press, 1975, pp. 106-108. + .. [3] Wikipedia, "Window function", + https://en.wikipedia.org/wiki/Window_function + .. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, + "Numerical Recipes", Cambridge University Press, 1986, page 425. + + Examples + -------- + >>> import numpy as np + >>> np.hanning(12) + array([0. , 0.07937323, 0.29229249, 0.57115742, 0.82743037, + 0.97974649, 0.97974649, 0.82743037, 0.57115742, 0.29229249, + 0.07937323, 0. ]) + + Plot the window and its frequency response. + + .. plot:: + :include-source: + + import matplotlib.pyplot as plt + from numpy.fft import fft, fftshift + window = np.hanning(51) + plt.plot(window) + plt.title("Hann window") + plt.ylabel("Amplitude") + plt.xlabel("Sample") + plt.show() + + plt.figure() + A = fft(window, 2048) / 25.5 + mag = np.abs(fftshift(A)) + freq = np.linspace(-0.5, 0.5, len(A)) + with np.errstate(divide='ignore', invalid='ignore'): + response = 20 * np.log10(mag) + response = np.clip(response, -100, 100) + plt.plot(freq, response) + plt.title("Frequency response of the Hann window") + plt.ylabel("Magnitude [dB]") + plt.xlabel("Normalized frequency [cycles per sample]") + plt.axis('tight') + plt.show() + + """ + # Ensures at least float64 via 0.0. M should be an integer, but conversion + # to double is safe for a range. + values = np.array([0.0, M]) + M = values[1] + + if M < 1: + return array([], dtype=values.dtype) + if M == 1: + return ones(1, dtype=values.dtype) + n = arange(1 - M, M, 2) + return 0.5 + 0.5 * cos(pi * n / (M - 1)) + + +@set_module('numpy') +def hamming(M): + """ + Return the Hamming window. + + The Hamming window is a taper formed by using a weighted cosine. + + Parameters + ---------- + M : int + Number of points in the output window. If zero or less, an + empty array is returned. + + Returns + ------- + out : ndarray + The window, with the maximum value normalized to one (the value + one appears only if the number of samples is odd). + + See Also + -------- + bartlett, blackman, hanning, kaiser + + Notes + ----- + The Hamming window is defined as + + .. math:: w(n) = 0.54 - 0.46\\cos\\left(\\frac{2\\pi{n}}{M-1}\\right) + \\qquad 0 \\leq n \\leq M-1 + + The Hamming was named for R. W. Hamming, an associate of J. W. Tukey + and is described in Blackman and Tukey. It was recommended for + smoothing the truncated autocovariance function in the time domain. + Most references to the Hamming window come from the signal processing + literature, where it is used as one of many windowing functions for + smoothing values. It is also known as an apodization (which means + "removing the foot", i.e. smoothing discontinuities at the beginning + and end of the sampled signal) or tapering function. + + References + ---------- + .. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power + spectra, Dover Publications, New York. + .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The + University of Alberta Press, 1975, pp. 109-110. + .. [3] Wikipedia, "Window function", + https://en.wikipedia.org/wiki/Window_function + .. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, + "Numerical Recipes", Cambridge University Press, 1986, page 425. + + Examples + -------- + >>> import numpy as np + >>> np.hamming(12) + array([ 0.08 , 0.15302337, 0.34890909, 0.60546483, 0.84123594, # may vary + 0.98136677, 0.98136677, 0.84123594, 0.60546483, 0.34890909, + 0.15302337, 0.08 ]) + + Plot the window and the frequency response. + + .. plot:: + :include-source: + + import matplotlib.pyplot as plt + from numpy.fft import fft, fftshift + window = np.hamming(51) + plt.plot(window) + plt.title("Hamming window") + plt.ylabel("Amplitude") + plt.xlabel("Sample") + plt.show() + + plt.figure() + A = fft(window, 2048) / 25.5 + mag = np.abs(fftshift(A)) + freq = np.linspace(-0.5, 0.5, len(A)) + response = 20 * np.log10(mag) + response = np.clip(response, -100, 100) + plt.plot(freq, response) + plt.title("Frequency response of Hamming window") + plt.ylabel("Magnitude [dB]") + plt.xlabel("Normalized frequency [cycles per sample]") + plt.axis('tight') + plt.show() + + """ + # Ensures at least float64 via 0.0. M should be an integer, but conversion + # to double is safe for a range. + values = np.array([0.0, M]) + M = values[1] + + if M < 1: + return array([], dtype=values.dtype) + if M == 1: + return ones(1, dtype=values.dtype) + n = arange(1 - M, M, 2) + return 0.54 + 0.46 * cos(pi * n / (M - 1)) + + +## Code from cephes for i0 + +_i0A = [ + -4.41534164647933937950E-18, + 3.33079451882223809783E-17, + -2.43127984654795469359E-16, + 1.71539128555513303061E-15, + -1.16853328779934516808E-14, + 7.67618549860493561688E-14, + -4.85644678311192946090E-13, + 2.95505266312963983461E-12, + -1.72682629144155570723E-11, + 9.67580903537323691224E-11, + -5.18979560163526290666E-10, + 2.65982372468238665035E-9, + -1.30002500998624804212E-8, + 6.04699502254191894932E-8, + -2.67079385394061173391E-7, + 1.11738753912010371815E-6, + -4.41673835845875056359E-6, + 1.64484480707288970893E-5, + -5.75419501008210370398E-5, + 1.88502885095841655729E-4, + -5.76375574538582365885E-4, + 1.63947561694133579842E-3, + -4.32430999505057594430E-3, + 1.05464603945949983183E-2, + -2.37374148058994688156E-2, + 4.93052842396707084878E-2, + -9.49010970480476444210E-2, + 1.71620901522208775349E-1, + -3.04682672343198398683E-1, + 6.76795274409476084995E-1 + ] + +_i0B = [ + -7.23318048787475395456E-18, + -4.83050448594418207126E-18, + 4.46562142029675999901E-17, + 3.46122286769746109310E-17, + -2.82762398051658348494E-16, + -3.42548561967721913462E-16, + 1.77256013305652638360E-15, + 3.81168066935262242075E-15, + -9.55484669882830764870E-15, + -4.15056934728722208663E-14, + 1.54008621752140982691E-14, + 3.85277838274214270114E-13, + 7.18012445138366623367E-13, + -1.79417853150680611778E-12, + -1.32158118404477131188E-11, + -3.14991652796324136454E-11, + 1.18891471078464383424E-11, + 4.94060238822496958910E-10, + 3.39623202570838634515E-9, + 2.26666899049817806459E-8, + 2.04891858946906374183E-7, + 2.89137052083475648297E-6, + 6.88975834691682398426E-5, + 3.36911647825569408990E-3, + 8.04490411014108831608E-1 + ] + + +def _chbevl(x, vals): + b0 = vals[0] + b1 = 0.0 + + for i in range(1, len(vals)): + b2 = b1 + b1 = b0 + b0 = x * b1 - b2 + vals[i] + + return 0.5 * (b0 - b2) + + +def _i0_1(x): + return exp(x) * _chbevl(x / 2.0 - 2, _i0A) + + +def _i0_2(x): + return exp(x) * _chbevl(32.0 / x - 2.0, _i0B) / sqrt(x) + + +def _i0_dispatcher(x): + return (x,) + + +@array_function_dispatch(_i0_dispatcher) +def i0(x): + """ + Modified Bessel function of the first kind, order 0. + + Usually denoted :math:`I_0`. + + Parameters + ---------- + x : array_like of float + Argument of the Bessel function. + + Returns + ------- + out : ndarray, shape = x.shape, dtype = float + The modified Bessel function evaluated at each of the elements of `x`. + + See Also + -------- + scipy.special.i0, scipy.special.iv, scipy.special.ive + + Notes + ----- + The scipy implementation is recommended over this function: it is a + proper ufunc written in C, and more than an order of magnitude faster. + + We use the algorithm published by Clenshaw [1]_ and referenced by + Abramowitz and Stegun [2]_, for which the function domain is + partitioned into the two intervals [0,8] and (8,inf), and Chebyshev + polynomial expansions are employed in each interval. Relative error on + the domain [0,30] using IEEE arithmetic is documented [3]_ as having a + peak of 5.8e-16 with an rms of 1.4e-16 (n = 30000). + + References + ---------- + .. [1] C. W. Clenshaw, "Chebyshev series for mathematical functions", in + *National Physical Laboratory Mathematical Tables*, vol. 5, London: + Her Majesty's Stationery Office, 1962. + .. [2] M. Abramowitz and I. A. Stegun, *Handbook of Mathematical + Functions*, 10th printing, New York: Dover, 1964, pp. 379. + https://personal.math.ubc.ca/~cbm/aands/page_379.htm + .. [3] https://metacpan.org/pod/distribution/Math-Cephes/lib/Math/Cephes.pod#i0:-Modified-Bessel-function-of-order-zero + + Examples + -------- + >>> import numpy as np + >>> np.i0(0.) + array(1.0) + >>> np.i0([0, 1, 2, 3]) + array([1. , 1.26606588, 2.2795853 , 4.88079259]) + + """ + x = np.asanyarray(x) + if x.dtype.kind == 'c': + raise TypeError("i0 not supported for complex values") + if x.dtype.kind != 'f': + x = x.astype(float) + x = np.abs(x) + return piecewise(x, [x <= 8.0], [_i0_1, _i0_2]) + +## End of cephes code for i0 + + +@set_module('numpy') +def kaiser(M, beta): + """ + Return the Kaiser window. + + The Kaiser window is a taper formed by using a Bessel function. + + Parameters + ---------- + M : int + Number of points in the output window. If zero or less, an + empty array is returned. + beta : float + Shape parameter for window. + + Returns + ------- + out : array + The window, with the maximum value normalized to one (the value + one appears only if the number of samples is odd). + + See Also + -------- + bartlett, blackman, hamming, hanning + + Notes + ----- + The Kaiser window is defined as + + .. math:: w(n) = I_0\\left( \\beta \\sqrt{1-\\frac{4n^2}{(M-1)^2}} + \\right)/I_0(\\beta) + + with + + .. math:: \\quad -\\frac{M-1}{2} \\leq n \\leq \\frac{M-1}{2}, + + where :math:`I_0` is the modified zeroth-order Bessel function. + + The Kaiser was named for Jim Kaiser, who discovered a simple + approximation to the DPSS window based on Bessel functions. The Kaiser + window is a very good approximation to the Digital Prolate Spheroidal + Sequence, or Slepian window, which is the transform which maximizes the + energy in the main lobe of the window relative to total energy. + + The Kaiser can approximate many other windows by varying the beta + parameter. + + ==== ======================= + beta Window shape + ==== ======================= + 0 Rectangular + 5 Similar to a Hamming + 6 Similar to a Hanning + 8.6 Similar to a Blackman + ==== ======================= + + A beta value of 14 is probably a good starting point. Note that as beta + gets large, the window narrows, and so the number of samples needs to be + large enough to sample the increasingly narrow spike, otherwise NaNs will + get returned. + + Most references to the Kaiser window come from the signal processing + literature, where it is used as one of many windowing functions for + smoothing values. It is also known as an apodization (which means + "removing the foot", i.e. smoothing discontinuities at the beginning + and end of the sampled signal) or tapering function. + + References + ---------- + .. [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by + digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285. + John Wiley and Sons, New York, (1966). + .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The + University of Alberta Press, 1975, pp. 177-178. + .. [3] Wikipedia, "Window function", + https://en.wikipedia.org/wiki/Window_function + + Examples + -------- + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> np.kaiser(12, 14) + array([7.72686684e-06, 3.46009194e-03, 4.65200189e-02, # may vary + 2.29737120e-01, 5.99885316e-01, 9.45674898e-01, + 9.45674898e-01, 5.99885316e-01, 2.29737120e-01, + 4.65200189e-02, 3.46009194e-03, 7.72686684e-06]) + + + Plot the window and the frequency response. + + .. plot:: + :include-source: + + import matplotlib.pyplot as plt + from numpy.fft import fft, fftshift + window = np.kaiser(51, 14) + plt.plot(window) + plt.title("Kaiser window") + plt.ylabel("Amplitude") + plt.xlabel("Sample") + plt.show() + + plt.figure() + A = fft(window, 2048) / 25.5 + mag = np.abs(fftshift(A)) + freq = np.linspace(-0.5, 0.5, len(A)) + response = 20 * np.log10(mag) + response = np.clip(response, -100, 100) + plt.plot(freq, response) + plt.title("Frequency response of Kaiser window") + plt.ylabel("Magnitude [dB]") + plt.xlabel("Normalized frequency [cycles per sample]") + plt.axis('tight') + plt.show() + + """ + # Ensures at least float64 via 0.0. M should be an integer, but conversion + # to double is safe for a range. (Simplified result_type with 0.0 + # strongly typed. result-type is not/less order sensitive, but that mainly + # matters for integers anyway.) + values = np.array([0.0, M, beta]) + M = values[1] + beta = values[2] + + if M == 1: + return np.ones(1, dtype=values.dtype) + n = arange(0, M) + alpha = (M - 1) / 2.0 + return i0(beta * sqrt(1 - ((n - alpha) / alpha)**2.0)) / i0(beta) + + +def _sinc_dispatcher(x): + return (x,) + + +@array_function_dispatch(_sinc_dispatcher) +def sinc(x): + r""" + Return the normalized sinc function. + + The sinc function is equal to :math:`\sin(\pi x)/(\pi x)` for any argument + :math:`x\ne 0`. ``sinc(0)`` takes the limit value 1, making ``sinc`` not + only everywhere continuous but also infinitely differentiable. + + .. note:: + + Note the normalization factor of ``pi`` used in the definition. + This is the most commonly used definition in signal processing. + Use ``sinc(x / np.pi)`` to obtain the unnormalized sinc function + :math:`\sin(x)/x` that is more common in mathematics. + + Parameters + ---------- + x : ndarray + Array (possibly multi-dimensional) of values for which to calculate + ``sinc(x)``. + + Returns + ------- + out : ndarray + ``sinc(x)``, which has the same shape as the input. + + Notes + ----- + The name sinc is short for "sine cardinal" or "sinus cardinalis". + + The sinc function is used in various signal processing applications, + including in anti-aliasing, in the construction of a Lanczos resampling + filter, and in interpolation. + + For bandlimited interpolation of discrete-time signals, the ideal + interpolation kernel is proportional to the sinc function. + + References + ---------- + .. [1] Weisstein, Eric W. "Sinc Function." From MathWorld--A Wolfram Web + Resource. https://mathworld.wolfram.com/SincFunction.html + .. [2] Wikipedia, "Sinc function", + https://en.wikipedia.org/wiki/Sinc_function + + Examples + -------- + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> x = np.linspace(-4, 4, 41) + >>> np.sinc(x) + array([-3.89804309e-17, -4.92362781e-02, -8.40918587e-02, # may vary + -8.90384387e-02, -5.84680802e-02, 3.89804309e-17, + 6.68206631e-02, 1.16434881e-01, 1.26137788e-01, + 8.50444803e-02, -3.89804309e-17, -1.03943254e-01, + -1.89206682e-01, -2.16236208e-01, -1.55914881e-01, + 3.89804309e-17, 2.33872321e-01, 5.04551152e-01, + 7.56826729e-01, 9.35489284e-01, 1.00000000e+00, + 9.35489284e-01, 7.56826729e-01, 5.04551152e-01, + 2.33872321e-01, 3.89804309e-17, -1.55914881e-01, + -2.16236208e-01, -1.89206682e-01, -1.03943254e-01, + -3.89804309e-17, 8.50444803e-02, 1.26137788e-01, + 1.16434881e-01, 6.68206631e-02, 3.89804309e-17, + -5.84680802e-02, -8.90384387e-02, -8.40918587e-02, + -4.92362781e-02, -3.89804309e-17]) + + >>> plt.plot(x, np.sinc(x)) + [] + >>> plt.title("Sinc Function") + Text(0.5, 1.0, 'Sinc Function') + >>> plt.ylabel("Amplitude") + Text(0, 0.5, 'Amplitude') + >>> plt.xlabel("X") + Text(0.5, 0, 'X') + >>> plt.show() + + """ + x = np.asanyarray(x) + x = pi * x + # Hope that 1e-20 is sufficient for objects... + eps = np.finfo(x.dtype).eps if x.dtype.kind == "f" else 1e-20 + y = where(x, x, eps) + return sin(y) / y + + +def _ureduce(a, func, keepdims=False, **kwargs): + """ + Internal Function. + Call `func` with `a` as first argument swapping the axes to use extended + axis on functions that don't support it natively. + + Returns result and a.shape with axis dims set to 1. + + Parameters + ---------- + a : array_like + Input array or object that can be converted to an array. + func : callable + Reduction function capable of receiving a single axis argument. + It is called with `a` as first argument followed by `kwargs`. + kwargs : keyword arguments + additional keyword arguments to pass to `func`. + + Returns + ------- + result : tuple + Result of func(a, **kwargs) and a.shape with axis dims set to 1 + which can be used to reshape the result to the same shape a ufunc with + keepdims=True would produce. + + """ + a = np.asanyarray(a) + axis = kwargs.get('axis') + out = kwargs.get('out') + + if keepdims is np._NoValue: + keepdims = False + + nd = a.ndim + if axis is not None: + axis = _nx.normalize_axis_tuple(axis, nd) + + if keepdims and out is not None: + index_out = tuple( + 0 if i in axis else slice(None) for i in range(nd)) + kwargs['out'] = out[(Ellipsis, ) + index_out] + + if len(axis) == 1: + kwargs['axis'] = axis[0] + else: + keep = sorted(set(range(nd)) - set(axis)) + nkeep = len(keep) + + def reshape_arr(a): + # move axis that should not be reduced to front + a = np.moveaxis(a, keep, range(nkeep)) + # merge reduced axis + return a.reshape(a.shape[:nkeep] + (-1,)) + + a = reshape_arr(a) + + weights = kwargs.get("weights") + if weights is not None: + kwargs["weights"] = reshape_arr(weights) + + kwargs['axis'] = -1 + elif keepdims and out is not None: + index_out = (0, ) * nd + kwargs['out'] = out[(Ellipsis, ) + index_out] + + r = func(a, **kwargs) + + if out is not None: + return out + + if keepdims: + if axis is None: + index_r = (np.newaxis, ) * nd + else: + index_r = tuple( + np.newaxis if i in axis else slice(None) + for i in range(nd)) + r = r[(Ellipsis, ) + index_r] + + return r + + +def _median_dispatcher( + a, axis=None, out=None, overwrite_input=None, keepdims=None): + return (a, out) + + +@array_function_dispatch(_median_dispatcher) +def median(a, axis=None, out=None, overwrite_input=False, keepdims=False): + """ + Compute the median along the specified axis. + + Returns the median of the array elements. + + Parameters + ---------- + a : array_like + Input array or object that can be converted to an array. + axis : {int, sequence of int, None}, optional + Axis or axes along which the medians are computed. The default, + axis=None, will compute the median along a flattened version of + the array. If a sequence of axes, the array is first flattened + along the given axes, then the median is computed along the + resulting flattened axis. + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output, + but the type (of the output) will be cast if necessary. + overwrite_input : bool, optional + If True, then allow use of memory of input array `a` for + calculations. The input array will be modified by the call to + `median`. This will save memory when you do not need to preserve + the contents of the input array. Treat the input as undefined, + but it will probably be fully or partially sorted. Default is + False. If `overwrite_input` is ``True`` and `a` is not already an + `ndarray`, an error will be raised. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `arr`. + + Returns + ------- + median : ndarray + A new array holding the result. If the input contains integers + or floats smaller than ``float64``, then the output data-type is + ``np.float64``. Otherwise, the data-type of the output is the + same as that of the input. If `out` is specified, that array is + returned instead. + + See Also + -------- + mean, percentile + + Notes + ----- + Given a vector ``V`` of length ``N``, the median of ``V`` is the + middle value of a sorted copy of ``V``, ``V_sorted`` - i + e., ``V_sorted[(N-1)/2]``, when ``N`` is odd, and the average of the + two middle values of ``V_sorted`` when ``N`` is even. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[10, 7, 4], [3, 2, 1]]) + >>> a + array([[10, 7, 4], + [ 3, 2, 1]]) + >>> np.median(a) + np.float64(3.5) + >>> np.median(a, axis=0) + array([6.5, 4.5, 2.5]) + >>> np.median(a, axis=1) + array([7., 2.]) + >>> np.median(a, axis=(0, 1)) + np.float64(3.5) + >>> m = np.median(a, axis=0) + >>> out = np.zeros_like(m) + >>> np.median(a, axis=0, out=m) + array([6.5, 4.5, 2.5]) + >>> m + array([6.5, 4.5, 2.5]) + >>> b = a.copy() + >>> np.median(b, axis=1, overwrite_input=True) + array([7., 2.]) + >>> assert not np.all(a==b) + >>> b = a.copy() + >>> np.median(b, axis=None, overwrite_input=True) + np.float64(3.5) + >>> assert not np.all(a==b) + + """ + return _ureduce(a, func=_median, keepdims=keepdims, axis=axis, out=out, + overwrite_input=overwrite_input) + + +def _median(a, axis=None, out=None, overwrite_input=False): + # can't be reasonably be implemented in terms of percentile as we have to + # call mean to not break astropy + a = np.asanyarray(a) + + # Set the partition indexes + if axis is None: + sz = a.size + else: + sz = a.shape[axis] + if sz % 2 == 0: + szh = sz // 2 + kth = [szh - 1, szh] + else: + kth = [(sz - 1) // 2] + + # We have to check for NaNs (as of writing 'M' doesn't actually work). + supports_nans = np.issubdtype(a.dtype, np.inexact) or a.dtype.kind in 'Mm' + if supports_nans: + kth.append(-1) + + if overwrite_input: + if axis is None: + part = a.ravel() + part.partition(kth) + else: + a.partition(kth, axis=axis) + part = a + else: + part = partition(a, kth, axis=axis) + + if part.shape == (): + # make 0-D arrays work + return part.item() + if axis is None: + axis = 0 + + indexer = [slice(None)] * part.ndim + index = part.shape[axis] // 2 + if part.shape[axis] % 2 == 1: + # index with slice to allow mean (below) to work + indexer[axis] = slice(index, index + 1) + else: + indexer[axis] = slice(index - 1, index + 1) + indexer = tuple(indexer) + + # Use mean in both odd and even case to coerce data type, + # using out array if needed. + rout = mean(part[indexer], axis=axis, out=out) + if supports_nans and sz > 0: + # If nans are possible, warn and replace by nans like mean would. + rout = np.lib._utils_impl._median_nancheck(part, rout, axis) + + return rout + + +def _percentile_dispatcher(a, q, axis=None, out=None, overwrite_input=None, + method=None, keepdims=None, *, weights=None): + return (a, q, out, weights) + + +@array_function_dispatch(_percentile_dispatcher) +def percentile(a, + q, + axis=None, + out=None, + overwrite_input=False, + method="linear", + keepdims=False, + *, + weights=None): + """ + Compute the q-th percentile of the data along the specified axis. + + Returns the q-th percentile(s) of the array elements. + + Parameters + ---------- + a : array_like of real numbers + Input array or object that can be converted to an array. + q : array_like of float + Percentage or sequence of percentages for the percentiles to compute. + Values must be between 0 and 100 inclusive. + axis : {int, tuple of int, None}, optional + Axis or axes along which the percentiles are computed. The + default is to compute the percentile(s) along a flattened + version of the array. + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output, + but the type (of the output) will be cast if necessary. + overwrite_input : bool, optional + If True, then allow the input array `a` to be modified by intermediate + calculations, to save memory. In this case, the contents of the input + `a` after this function completes is undefined. + method : str, optional + This parameter specifies the method to use for estimating the + percentile. There are many different methods, some unique to NumPy. + See the notes for explanation. The options sorted by their R type + as summarized in the H&F paper [1]_ are: + + 1. 'inverted_cdf' + 2. 'averaged_inverted_cdf' + 3. 'closest_observation' + 4. 'interpolated_inverted_cdf' + 5. 'hazen' + 6. 'weibull' + 7. 'linear' (default) + 8. 'median_unbiased' + 9. 'normal_unbiased' + + The first three methods are discontinuous. NumPy further defines the + following discontinuous variations of the default 'linear' (7.) option: + + * 'lower' + * 'higher', + * 'midpoint' + * 'nearest' + + .. versionchanged:: 1.22.0 + This argument was previously called "interpolation" and only + offered the "linear" default and last four options. + + keepdims : bool, optional + If this is set to True, the axes which are reduced are left in + the result as dimensions with size one. With this option, the + result will broadcast correctly against the original array `a`. + + weights : array_like, optional + An array of weights associated with the values in `a`. Each value in + `a` contributes to the percentile according to its associated weight. + The weights array can either be 1-D (in which case its length must be + the size of `a` along the given axis) or of the same shape as `a`. + If `weights=None`, then all data in `a` are assumed to have a + weight equal to one. + Only `method="inverted_cdf"` supports weights. + See the notes for more details. + + .. versionadded:: 2.0.0 + + Returns + ------- + percentile : scalar or ndarray + If `q` is a single percentile and `axis=None`, then the result + is a scalar. If multiple percentiles are given, first axis of + the result corresponds to the percentiles. The other axes are + the axes that remain after the reduction of `a`. If the input + contains integers or floats smaller than ``float64``, the output + data-type is ``float64``. Otherwise, the output data-type is the + same as that of the input. If `out` is specified, that array is + returned instead. + + See Also + -------- + mean + median : equivalent to ``percentile(..., 50)`` + nanpercentile + quantile : equivalent to percentile, except q in the range [0, 1]. + + Notes + ----- + The behavior of `numpy.percentile` with percentage `q` is + that of `numpy.quantile` with argument ``q/100``. + For more information, please see `numpy.quantile`. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[10, 7, 4], [3, 2, 1]]) + >>> a + array([[10, 7, 4], + [ 3, 2, 1]]) + >>> np.percentile(a, 50) + 3.5 + >>> np.percentile(a, 50, axis=0) + array([6.5, 4.5, 2.5]) + >>> np.percentile(a, 50, axis=1) + array([7., 2.]) + >>> np.percentile(a, 50, axis=1, keepdims=True) + array([[7.], + [2.]]) + + >>> m = np.percentile(a, 50, axis=0) + >>> out = np.zeros_like(m) + >>> np.percentile(a, 50, axis=0, out=out) + array([6.5, 4.5, 2.5]) + >>> m + array([6.5, 4.5, 2.5]) + + >>> b = a.copy() + >>> np.percentile(b, 50, axis=1, overwrite_input=True) + array([7., 2.]) + >>> assert not np.all(a == b) + + The different methods can be visualized graphically: + + .. plot:: + + import matplotlib.pyplot as plt + + a = np.arange(4) + p = np.linspace(0, 100, 6001) + ax = plt.gca() + lines = [ + ('linear', '-', 'C0'), + ('inverted_cdf', ':', 'C1'), + # Almost the same as `inverted_cdf`: + ('averaged_inverted_cdf', '-.', 'C1'), + ('closest_observation', ':', 'C2'), + ('interpolated_inverted_cdf', '--', 'C1'), + ('hazen', '--', 'C3'), + ('weibull', '-.', 'C4'), + ('median_unbiased', '--', 'C5'), + ('normal_unbiased', '-.', 'C6'), + ] + for method, style, color in lines: + ax.plot( + p, np.percentile(a, p, method=method), + label=method, linestyle=style, color=color) + ax.set( + title='Percentiles for different methods and data: ' + str(a), + xlabel='Percentile', + ylabel='Estimated percentile value', + yticks=a) + ax.legend(bbox_to_anchor=(1.03, 1)) + plt.tight_layout() + plt.show() + + References + ---------- + .. [1] R. J. Hyndman and Y. Fan, + "Sample quantiles in statistical packages," + The American Statistician, 50(4), pp. 361-365, 1996 + + """ + a = np.asanyarray(a) + if a.dtype.kind == "c": + raise TypeError("a must be an array of real numbers") + + weak_q = type(q) in (int, float) # use weak promotion for final result type + q = np.true_divide(q, 100, out=...) + if not _quantile_is_valid(q): + raise ValueError("Percentiles must be in the range [0, 100]") + + if weights is not None: + if method != "inverted_cdf": + msg = ("Only method 'inverted_cdf' supports weights. " + f"Got: {method}.") + raise ValueError(msg) + if axis is not None: + axis = _nx.normalize_axis_tuple(axis, a.ndim, argname="axis") + weights = _weights_are_valid(weights=weights, a=a, axis=axis) + if np.any(weights < 0): + raise ValueError("Weights must be non-negative.") + + return _quantile_unchecked( + a, q, axis, out, overwrite_input, method, keepdims, weights, weak_q) + + +def _quantile_dispatcher(a, q, axis=None, out=None, overwrite_input=None, + method=None, keepdims=None, *, weights=None): + return (a, q, out, weights) + + +@array_function_dispatch(_quantile_dispatcher) +def quantile(a, + q, + axis=None, + out=None, + overwrite_input=False, + method="linear", + keepdims=False, + *, + weights=None): + """ + Compute the q-th quantile of the data along the specified axis. + + Parameters + ---------- + a : array_like of real numbers + Input array or object that can be converted to an array. + q : array_like of float + Probability or sequence of probabilities of the quantiles to compute. + Values must be between 0 and 1 inclusive. + axis : {int, tuple of int, None}, optional + Axis or axes along which the quantiles are computed. The default is + to compute the quantile(s) along a flattened version of the array. + out : ndarray, optional + Alternative output array in which to place the result. It must have + the same shape and buffer length as the expected output, but the + type (of the output) will be cast if necessary. + overwrite_input : bool, optional + If True, then allow the input array `a` to be modified by + intermediate calculations, to save memory. In this case, the + contents of the input `a` after this function completes is + undefined. + method : str, optional + This parameter specifies the method to use for estimating the + quantile. There are many different methods, some unique to NumPy. + The recommended options, numbered as they appear in [1]_, are: + + 1. 'inverted_cdf' + 2. 'averaged_inverted_cdf' + 3. 'closest_observation' + 4. 'interpolated_inverted_cdf' + 5. 'hazen' + 6. 'weibull' + 7. 'linear' (default) + 8. 'median_unbiased' + 9. 'normal_unbiased' + + The first three methods are discontinuous. For backward compatibility + with previous versions of NumPy, the following discontinuous variations + of the default 'linear' (7.) option are available: + + * 'lower' + * 'higher', + * 'midpoint' + * 'nearest' + + See Notes for details. + + .. versionchanged:: 1.22.0 + This argument was previously called "interpolation" and only + offered the "linear" default and last four options. + + keepdims : bool, optional + If this is set to True, the axes which are reduced are left in + the result as dimensions with size one. With this option, the + result will broadcast correctly against the original array `a`. + + weights : array_like, optional + An array of weights associated with the values in `a`. Each value in + `a` contributes to the quantile according to its associated weight. + The weights array can either be 1-D (in which case its length must be + the size of `a` along the given axis) or of the same shape as `a`. + If `weights=None`, then all data in `a` are assumed to have a + weight equal to one. + Only `method="inverted_cdf"` supports weights. + See the notes for more details. + + .. versionadded:: 2.0.0 + + Returns + ------- + quantile : scalar or ndarray + If `q` is a single probability and `axis=None`, then the result + is a scalar. If multiple probability levels are given, first axis + of the result corresponds to the quantiles. The other axes are + the axes that remain after the reduction of `a`. If the input + contains integers or floats smaller than ``float64``, the output + data-type is ``float64``. Otherwise, the output data-type is the + same as that of the input. If `out` is specified, that array is + returned instead. + + See Also + -------- + mean + percentile : equivalent to quantile, but with q in the range [0, 100]. + median : equivalent to ``quantile(..., 0.5)`` + nanquantile + + Notes + ----- + Given a sample `a` from an underlying distribution, `quantile` provides a + nonparametric estimate of the inverse cumulative distribution function. + + By default, this is done by interpolating between adjacent elements in + ``y``, a sorted copy of `a`:: + + (1-g)*y[j] + g*y[j+1] + + where the index ``j`` and coefficient ``g`` are the integral and + fractional components of ``q * (n-1)``, and ``n`` is the number of + elements in the sample. + + This is a special case of Equation 1 of H&F [1]_. More generally, + + - ``j = (q*n + m - 1) // 1``, and + - ``g = (q*n + m - 1) % 1``, + + where ``m`` may be defined according to several different conventions. + The preferred convention may be selected using the ``method`` parameter: + + =============================== =============== =============== + ``method`` number in H&F ``m`` + =============================== =============== =============== + ``interpolated_inverted_cdf`` 4 ``0`` + ``hazen`` 5 ``1/2`` + ``weibull`` 6 ``q`` + ``linear`` (default) 7 ``1 - q`` + ``median_unbiased`` 8 ``q/3 + 1/3`` + ``normal_unbiased`` 9 ``q/4 + 3/8`` + =============================== =============== =============== + + Note that indices ``j`` and ``j + 1`` are clipped to the range ``0`` to + ``n - 1`` when the results of the formula would be outside the allowed + range of non-negative indices. The ``- 1`` in the formulas for ``j`` and + ``g`` accounts for Python's 0-based indexing. + + The table above includes only the estimators from H&F that are continuous + functions of probability `q` (estimators 4-9). NumPy also provides the + three discontinuous estimators from H&F (estimators 1-3), where ``j`` is + defined as above, ``m`` is defined as follows, and ``g`` is a function + of the real-valued ``index = q*n + m - 1`` and ``j``. + + 1. ``inverted_cdf``: ``m = 0`` and ``g = int(index - j > 0)`` + 2. ``averaged_inverted_cdf``: ``m = 0`` and + ``g = (1 + int(index - j > 0)) / 2`` + 3. ``closest_observation``: ``m = -1/2`` and + ``g = 1 - int((index == j) & (j%2 == 1))`` + + For backward compatibility with previous versions of NumPy, `quantile` + provides four additional discontinuous estimators. Like + ``method='linear'``, all have ``m = 1 - q`` so that ``j = q*(n-1) // 1``, + but ``g`` is defined as follows. + + - ``lower``: ``g = 0`` + - ``midpoint``: ``g = 0.5`` + - ``higher``: ``g = 1`` + - ``nearest``: ``g = (q*(n-1) % 1) > 0.5`` + + **Weighted quantiles:** + More formally, the quantile at probability level :math:`q` of a cumulative + distribution function :math:`F(y)=P(Y \\leq y)` with probability measure + :math:`P` is defined as any number :math:`x` that fulfills the + *coverage conditions* + + .. math:: P(Y < x) \\leq q \\quad\\text{and}\\quad P(Y \\leq x) \\geq q + + with random variable :math:`Y\\sim P`. + Sample quantiles, the result of `quantile`, provide nonparametric + estimation of the underlying population counterparts, represented by the + unknown :math:`F`, given a data vector `a` of length ``n``. + + Some of the estimators above arise when one considers :math:`F` as the + empirical distribution function of the data, i.e. + :math:`F(y) = \\frac{1}{n} \\sum_i 1_{a_i \\leq y}`. + Then, different methods correspond to different choices of :math:`x` that + fulfill the above coverage conditions. Methods that follow this approach + are ``inverted_cdf`` and ``averaged_inverted_cdf``. + + For weighted quantiles, the coverage conditions still hold. The + empirical cumulative distribution is simply replaced by its weighted + version, i.e. + :math:`P(Y \\leq t) = \\frac{1}{\\sum_i w_i} \\sum_i w_i 1_{x_i \\leq t}`. + Only ``method="inverted_cdf"`` supports weights. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[10, 7, 4], [3, 2, 1]]) + >>> a + array([[10, 7, 4], + [ 3, 2, 1]]) + >>> np.quantile(a, 0.5) + 3.5 + >>> np.quantile(a, 0.5, axis=0) + array([6.5, 4.5, 2.5]) + >>> np.quantile(a, 0.5, axis=1) + array([7., 2.]) + >>> np.quantile(a, 0.5, axis=1, keepdims=True) + array([[7.], + [2.]]) + >>> m = np.quantile(a, 0.5, axis=0) + >>> out = np.zeros_like(m) + >>> np.quantile(a, 0.5, axis=0, out=out) + array([6.5, 4.5, 2.5]) + >>> m + array([6.5, 4.5, 2.5]) + >>> b = a.copy() + >>> np.quantile(b, 0.5, axis=1, overwrite_input=True) + array([7., 2.]) + >>> assert not np.all(a == b) + + See also `numpy.percentile` for a visualization of most methods. + + References + ---------- + .. [1] R. J. Hyndman and Y. Fan, + "Sample quantiles in statistical packages," + The American Statistician, 50(4), pp. 361-365, 1996 + + """ + a = np.asanyarray(a) + if a.dtype.kind == "c": + raise TypeError("a must be an array of real numbers") + + weak_q = type(q) in (int, float) # use weak promotion for final result type + q = np.asanyarray(q) + + if not _quantile_is_valid(q): + raise ValueError("Quantiles must be in the range [0, 1]") + + if weights is not None: + if method != "inverted_cdf": + msg = ("Only method 'inverted_cdf' supports weights. " + f"Got: {method}.") + raise ValueError(msg) + if axis is not None: + axis = _nx.normalize_axis_tuple(axis, a.ndim, argname="axis") + weights = _weights_are_valid(weights=weights, a=a, axis=axis) + if np.any(weights < 0): + raise ValueError("Weights must be non-negative.") + + return _quantile_unchecked( + a, q, axis, out, overwrite_input, method, keepdims, weights, weak_q) + + +def _quantile_unchecked(a, + q, + axis=None, + out=None, + overwrite_input=False, + method="linear", + keepdims=False, + weights=None, + weak_q=False): + """Assumes that q is in [0, 1], and is an ndarray""" + return _ureduce(a, + func=_quantile_ureduce_func, + q=q, + weights=weights, + keepdims=keepdims, + axis=axis, + out=out, + overwrite_input=overwrite_input, + method=method, + weak_q=weak_q) + + +def _quantile_is_valid(q): + # avoid expensive reductions, relevant for arrays with < O(1000) elements + if q.ndim == 1 and q.size < 10: + for i in range(q.size): + if not (0.0 <= q[i] <= 1.0): + return False + elif not (q.min() >= 0 and q.max() <= 1): + return False + return True + + +def _compute_virtual_index(n, quantiles, alpha: float, beta: float): + """ + Compute the floating point indexes of an array for the linear + interpolation of quantiles. + n : array_like + The sample sizes. + quantiles : array_like + The quantiles values. + alpha : float + A constant used to correct the index computed. + beta : float + A constant used to correct the index computed. + + alpha and beta values depend on the chosen method + (see quantile documentation) + + Reference: + Hyndman&Fan paper "Sample Quantiles in Statistical Packages", + DOI: 10.1080/00031305.1996.10473566 + """ + return n * quantiles + ( + alpha + quantiles * (1 - alpha - beta) + ) - 1 + + +def _get_gamma(virtual_indexes, previous_indexes, method): + """ + Compute gamma (a.k.a 'm' or 'weight') for the linear interpolation + of quantiles. + + virtual_indexes : array_like + The indexes where the percentile is supposed to be found in the sorted + sample. + previous_indexes : array_like + The floor values of virtual_indexes. + method : dict + The interpolation method chosen, which may have a specific rule + modifying gamma. + + gamma is usually the fractional part of virtual_indexes but can be modified + by the interpolation method. + """ + gamma = np.asanyarray(virtual_indexes - previous_indexes) + gamma = method["fix_gamma"](gamma, virtual_indexes) + # Ensure both that we have an array, and that we keep the dtype + # (which may have been matched to the input array). + return np.asanyarray(gamma, dtype=virtual_indexes.dtype) + + +def _lerp(a, b, t, out=None): + """ + Compute the linear interpolation weighted by gamma on each point of + two same shape array. + + a : array_like + Left bound. + b : array_like + Right bound. + t : array_like + The interpolation weight. + out : array_like + Output array. + """ + diff_b_a = b - a + lerp_interpolation = add(a, diff_b_a * t, out=... if out is None else out) + subtract(b, diff_b_a * (1 - t), out=lerp_interpolation, where=t >= 0.5, + casting='unsafe', dtype=type(lerp_interpolation.dtype)) + if lerp_interpolation.ndim == 0 and out is None: + lerp_interpolation = lerp_interpolation[()] # unpack 0d arrays + return lerp_interpolation + + +def _get_gamma_mask(shape, default_value, conditioned_value, where): + out = np.full(shape, default_value) + np.copyto(out, conditioned_value, where=where, casting="unsafe") + return out + + +def _discrete_interpolation_to_boundaries(index, gamma_condition_fun): + previous = np.floor(index) + next = previous + 1 + gamma = index - previous + res = _get_gamma_mask(shape=index.shape, + default_value=next, + conditioned_value=previous, + where=gamma_condition_fun(gamma, index) + ).astype(np.intp) + # Some methods can lead to out-of-bound integers, clip them: + res[res < 0] = 0 + return res + + +def _closest_observation(n, quantiles): + # "choose the nearest even order statistic at g=0" (H&F (1996) pp. 362). + # Order is 1-based so for zero-based indexing round to nearest odd index. + gamma_fun = lambda gamma, index: (gamma == 0) & (np.floor(index) % 2 == 1) + return _discrete_interpolation_to_boundaries((n * quantiles) - 1 - 0.5, + gamma_fun) + + +def _inverted_cdf(n, quantiles): + gamma_fun = lambda gamma, _: (gamma == 0) + return _discrete_interpolation_to_boundaries((n * quantiles) - 1, + gamma_fun) + + +def _quantile_ureduce_func( + a: np.ndarray, + q: np.ndarray, + weights: np.ndarray | None, + axis: int | None = None, + out: np.ndarray | None = None, + overwrite_input: bool = False, + method: str = "linear", + weak_q: bool = False, +) -> np.ndarray: + if q.ndim > 2: + # The code below works fine for nd, but it might not have useful + # semantics. For now, keep the supported dimensions the same as it was + # before. + raise ValueError("q must be a scalar or 1d") + if overwrite_input: + if axis is None: + axis = 0 + arr = a.ravel() + wgt = None if weights is None else weights.ravel() + else: + arr = a + wgt = weights + elif axis is None: + axis = 0 + arr = a.flatten() + wgt = None if weights is None else weights.flatten() + else: + arr = a.copy() + wgt = weights + result = _quantile(arr, + quantiles=q, + axis=axis, + method=method, + out=out, + weights=wgt, + weak_q=weak_q) + return result + + +def _get_indexes(arr, virtual_indexes, valid_values_count): + """ + Get the valid indexes of arr neighbouring virtual_indexes. + Note + This is a companion function to linear interpolation of + Quantiles + + Returns + ------- + (previous_indexes, next_indexes): Tuple + A Tuple of virtual_indexes neighbouring indexes + """ + previous_indexes = floor(virtual_indexes, out=...) + next_indexes = add(previous_indexes, 1, out=...) + indexes_above_bounds = virtual_indexes >= valid_values_count - 1 + # When indexes is above max index, take the max value of the array + if indexes_above_bounds.any(): + previous_indexes[indexes_above_bounds] = -1 + next_indexes[indexes_above_bounds] = -1 + # When indexes is below min index, take the min value of the array + indexes_below_bounds = virtual_indexes < 0 + if indexes_below_bounds.any(): + previous_indexes[indexes_below_bounds] = 0 + next_indexes[indexes_below_bounds] = 0 + if np.issubdtype(arr.dtype, np.inexact): + # After the sort, slices having NaNs will have for last element a NaN + virtual_indexes_nans = np.isnan(virtual_indexes) + if virtual_indexes_nans.any(): + previous_indexes[virtual_indexes_nans] = -1 + next_indexes[virtual_indexes_nans] = -1 + previous_indexes = previous_indexes.astype(np.intp) + next_indexes = next_indexes.astype(np.intp) + return previous_indexes, next_indexes + + +def _quantile( + arr: "np.typing.ArrayLike", + quantiles: np.ndarray, + axis: int = -1, + method: str = "linear", + out: np.ndarray | None = None, + weights: "np.typing.ArrayLike | None" = None, + weak_q: bool = False, +) -> np.ndarray: + """ + Private function that doesn't support extended axis or keepdims. + These methods are extended to this function using _ureduce + See nanpercentile for parameter usage + It computes the quantiles of the array for the given axis. + A linear interpolation is performed based on the `method`. + + By default, the method is "linear" where alpha == beta == 1 which + performs the 7th method of Hyndman&Fan. + With "median_unbiased" we get alpha == beta == 1/3 + thus the 8th method of Hyndman&Fan. + """ + # --- Setup + arr = np.asanyarray(arr) + values_count = arr.shape[axis] + # The dimensions of `q` are prepended to the output shape, so we need the + # axis being sampled from `arr` to be last. + if axis != 0: # But moveaxis is slow, so only call it if necessary. + arr = np.moveaxis(arr, axis, destination=0) + supports_nans = ( + np.issubdtype(arr.dtype, np.inexact) or arr.dtype.kind in 'Mm' + ) + + if weights is None: + # --- Computation of indexes + # Index where to find the value in the sorted array. + # Virtual because it is a floating point value, not a valid index. + # The nearest neighbours are used for interpolation + try: + method_props = _QuantileMethods[method] + except KeyError: + raise ValueError( + f"{method!r} is not a valid method. Use one of: " + f"{_QuantileMethods.keys()}") from None + virtual_indexes = method_props["get_virtual_index"](values_count, + quantiles) + virtual_indexes = np.asanyarray(virtual_indexes) + + if method_props["fix_gamma"] is None: + supports_integers = True + else: + int_virtual_indices = np.issubdtype(virtual_indexes.dtype, + np.integer) + supports_integers = method == 'linear' and int_virtual_indices + + if supports_integers: + # No interpolation needed, take the points along axis + if supports_nans: + # may contain nan, which would sort to the end + arr.partition( + concatenate((virtual_indexes.ravel(), [-1])), axis=0, + ) + slices_having_nans = np.isnan(arr[-1, ...]) + else: + # cannot contain nan + arr.partition(virtual_indexes.ravel(), axis=0) + slices_having_nans = np.array(False, dtype=bool) + result = take(arr, virtual_indexes, axis=0, out=out) + else: + previous_indexes, next_indexes = _get_indexes(arr, + virtual_indexes, + values_count) + # --- Sorting + arr.partition( + np.unique(np.concatenate(([0, -1], + previous_indexes.ravel(), + next_indexes.ravel(), + ))), + axis=0) + if supports_nans: + slices_having_nans = np.isnan(arr[-1, ...]) + else: + slices_having_nans = None + # --- Get values from indexes + previous = arr[previous_indexes] + next = arr[next_indexes] + # --- Linear interpolation + gamma = _get_gamma(virtual_indexes, previous_indexes, + method_props) + if weak_q: + gamma = float(gamma) + else: + result_shape = virtual_indexes.shape + (1,) * (arr.ndim - 1) + gamma = gamma.reshape(result_shape) + result = _lerp(previous, + next, + gamma, + out=out) + else: + # Weighted case + # This implements method="inverted_cdf", the only supported weighted + # method, which needs to sort anyway. + weights = np.asanyarray(weights) + if axis != 0: + weights = np.moveaxis(weights, axis, destination=0) + index_array = np.argsort(arr, axis=0) + + # arr = arr[index_array, ...] # but this adds trailing dimensions of + # 1. + arr = np.take_along_axis(arr, index_array, axis=0) + if weights.shape == arr.shape: + weights = np.take_along_axis(weights, index_array, axis=0) + else: + # weights is 1d + weights = weights.reshape(-1)[index_array, ...] + + if supports_nans: + # may contain nan, which would sort to the end + slices_having_nans = np.isnan(arr[-1, ...]) + else: + # cannot contain nan + slices_having_nans = np.array(False, dtype=bool) + + # We use the weights to calculate the empirical cumulative + # distribution function cdf + cdf = weights.cumsum(axis=0, dtype=np.float64) + cdf /= cdf[-1, ...] # normalization to 1 + if np.isnan(cdf[-1]).any(): + # Above calculations should normally warn for the zero/inf case. + raise ValueError("Weights included NaN, inf or were all zero.") + # Search index i such that + # sum(weights[j], j=0..i-1) < quantile <= sum(weights[j], j=0..i) + # is then equivalent to + # cdf[i-1] < quantile <= cdf[i] + # Unfortunately, searchsorted only accepts 1-d arrays as first + # argument, so we will need to iterate over dimensions. + + # Without the following cast, searchsorted can return surprising + # results, e.g. + # np.searchsorted(np.array([0.2, 0.4, 0.6, 0.8, 1.]), + # np.array(0.4, dtype=np.float32), side="left") + # returns 2 instead of 1 because 0.4 is not binary representable. + if quantiles.dtype.kind == "f": + cdf = cdf.astype(quantiles.dtype) + # Weights must be non-negative, so we might have zero weights at the + # beginning leading to some leading zeros in cdf. The call to + # np.searchsorted for quantiles=0 will then pick the first element, + # but should pick the first one larger than zero. We + # therefore simply set 0 values in cdf to -1. + if np.any(cdf[0, ...] == 0): + cdf[cdf == 0] = -1 + + def find_cdf_1d(arr, cdf): + indices = np.searchsorted(cdf, quantiles, side="left") + # We might have reached the maximum with i = len(arr), e.g. for + # quantiles = 1, and need to cut it to len(arr) - 1. + indices = minimum(indices, values_count - 1) + result = take(arr, indices, axis=0) + return result + + r_shape = arr.shape[1:] + if quantiles.ndim > 0: + r_shape = quantiles.shape + r_shape + if out is None: + result = np.empty_like(arr, shape=r_shape) + else: + if out.shape != r_shape: + msg = (f"Wrong shape of argument 'out', shape={r_shape} is " + f"required; got shape={out.shape}.") + raise ValueError(msg) + result = out + + # See apply_along_axis, which we do for axis=0. Note that Ni = (,) + # always, so we remove it here. + Nk = arr.shape[1:] + for kk in np.ndindex(Nk): + result[(...,) + kk] = find_cdf_1d( + arr[np.s_[:, ] + kk], cdf[np.s_[:, ] + kk] + ) + + # Make result the same as in unweighted inverted_cdf. + if result.shape == () and result.dtype == np.dtype("O"): + result = result.item() + + if np.any(slices_having_nans): + if result.ndim == 0 and out is None: + # can't write to a scalar, but indexing will be correct + result = arr[-1] + else: + np.copyto(result, arr[-1, ...], where=slices_having_nans) + return result + + +def _trapezoid_dispatcher(y, x=None, dx=None, axis=None): + return (y, x) + + +@array_function_dispatch(_trapezoid_dispatcher) +def trapezoid(y, x=None, dx=1.0, axis=-1): + r""" + Integrate along the given axis using the composite trapezoidal rule. + + If `x` is provided, the integration happens in sequence along its + elements - they are not sorted. + + Integrate `y` (`x`) along each 1d slice on the given axis, compute + :math:`\int y(x) dx`. + When `x` is specified, this integrates along the parametric curve, + computing :math:`\int_t y(t) dt = + \int_t y(t) \left.\frac{dx}{dt}\right|_{x=x(t)} dt`. + + .. versionadded:: 2.0.0 + + Parameters + ---------- + y : array_like + Input array to integrate. + x : array_like, optional + The sample points corresponding to the `y` values. If `x` is None, + the sample points are assumed to be evenly spaced `dx` apart. The + default is None. + dx : scalar, optional + The spacing between sample points when `x` is None. The default is 1. + axis : int, optional + The axis along which to integrate. + + Returns + ------- + trapezoid : float or ndarray + Definite integral of `y` = n-dimensional array as approximated along + a single axis by the trapezoidal rule. If `y` is a 1-dimensional array, + then the result is a float. If `n` is greater than 1, then the result + is an `n`-1 dimensional array. + + See Also + -------- + sum, cumsum + + Notes + ----- + Image [2]_ illustrates trapezoidal rule -- y-axis locations of points + will be taken from `y` array, by default x-axis distances between + points will be 1.0, alternatively they can be provided with `x` array + or with `dx` scalar. Return value will be equal to combined area under + the red lines. + + + References + ---------- + .. [1] Wikipedia page: https://en.wikipedia.org/wiki/Trapezoidal_rule + + .. [2] Illustration image: + https://en.wikipedia.org/wiki/File:Composite_trapezoidal_rule_illustration.png + + Examples + -------- + >>> import numpy as np + + Use the trapezoidal rule on evenly spaced points: + + >>> np.trapezoid([1, 2, 3]) + 4.0 + + The spacing between sample points can be selected by either the + ``x`` or ``dx`` arguments: + + >>> np.trapezoid([1, 2, 3], x=[4, 6, 8]) + 8.0 + >>> np.trapezoid([1, 2, 3], dx=2) + 8.0 + + Using a decreasing ``x`` corresponds to integrating in reverse: + + >>> np.trapezoid([1, 2, 3], x=[8, 6, 4]) + -8.0 + + More generally ``x`` is used to integrate along a parametric curve. We can + estimate the integral :math:`\int_0^1 x^2 = 1/3` using: + + >>> x = np.linspace(0, 1, num=50) + >>> y = x**2 + >>> np.trapezoid(y, x) + 0.33340274885464394 + + Or estimate the area of a circle, noting we repeat the sample which closes + the curve: + + >>> theta = np.linspace(0, 2 * np.pi, num=1000, endpoint=True) + >>> np.trapezoid(np.cos(theta), x=np.sin(theta)) + 3.141571941375841 + + ``np.trapezoid`` can be applied along a specified axis to do multiple + computations in one call: + + >>> a = np.arange(6).reshape(2, 3) + >>> a + array([[0, 1, 2], + [3, 4, 5]]) + >>> np.trapezoid(a, axis=0) + array([1.5, 2.5, 3.5]) + >>> np.trapezoid(a, axis=1) + array([2., 8.]) + """ + + y = asanyarray(y) + if x is None: + d = dx + else: + x = asanyarray(x) + if x.ndim == 1: + d = diff(x) + # reshape to correct shape + shape = [1] * y.ndim + shape[axis] = d.shape[0] + d = d.reshape(shape) + else: + d = diff(x, axis=axis) + nd = y.ndim + slice1 = [slice(None)] * nd + slice2 = [slice(None)] * nd + slice1[axis] = slice(1, None) + slice2[axis] = slice(None, -1) + try: + ret = (d * (y[tuple(slice1)] + y[tuple(slice2)]) / 2.0).sum(axis) + except ValueError: + # Operations didn't work, cast to ndarray + d = np.asarray(d) + y = np.asarray(y) + ret = add.reduce(d * (y[tuple(slice1)] + y[tuple(slice2)]) / 2.0, axis) + return ret + + +def _meshgrid_dispatcher(*xi, copy=None, sparse=None, indexing=None): + return xi + + +# Based on scitools meshgrid +@array_function_dispatch(_meshgrid_dispatcher) +def meshgrid(*xi, copy=True, sparse=False, indexing='xy'): + """ + Return a tuple of coordinate matrices from coordinate vectors. + + Make N-D coordinate arrays for vectorized evaluations of + N-D scalar/vector fields over N-D grids, given + one-dimensional coordinate arrays x1, x2,..., xn. + + Parameters + ---------- + x1, x2,..., xn : array_like + 1-D arrays representing the coordinates of a grid. + indexing : {'xy', 'ij'}, optional + Cartesian ('xy', default) or matrix ('ij') indexing of output. + See Notes for more details. + sparse : bool, optional + If True the shape of the returned coordinate array for dimension *i* + is reduced from ``(N1, ..., Ni, ... Nn)`` to + ``(1, ..., 1, Ni, 1, ..., 1)``. These sparse coordinate grids are + intended to be used with :ref:`basics.broadcasting`. When all + coordinates are used in an expression, broadcasting still leads to a + fully-dimensonal result array. + + Default is False. + + copy : bool, optional + If False, a view into the original arrays are returned in order to + conserve memory. Default is True. Please note that + ``sparse=False, copy=False`` will likely return non-contiguous + arrays. Furthermore, more than one element of a broadcast array + may refer to a single memory location. If you need to write to the + arrays, make copies first. + + Returns + ------- + X1, X2,..., XN : tuple of ndarrays + For vectors `x1`, `x2`,..., `xn` with lengths ``Ni=len(xi)``, + returns ``(N1, N2, N3,..., Nn)`` shaped arrays if indexing='ij' + or ``(N2, N1, N3,..., Nn)`` shaped arrays if indexing='xy' + with the elements of `xi` repeated to fill the matrix along + the first dimension for `x1`, the second for `x2` and so on. + + Notes + ----- + This function supports both indexing conventions through the indexing + keyword argument. Giving the string 'ij' returns a meshgrid with + matrix indexing, while 'xy' returns a meshgrid with Cartesian indexing. + In the 2-D case with inputs of length M and N, the outputs are of shape + (N, M) for 'xy' indexing and (M, N) for 'ij' indexing. In the 3-D case + with inputs of length M, N and P, outputs are of shape (N, M, P) for + 'xy' indexing and (M, N, P) for 'ij' indexing. The difference is + illustrated by the following code snippet:: + + xv, yv = np.meshgrid(x, y, indexing='ij') + for i in range(nx): + for j in range(ny): + # treat xv[i,j], yv[i,j] + + xv, yv = np.meshgrid(x, y, indexing='xy') + for i in range(nx): + for j in range(ny): + # treat xv[j,i], yv[j,i] + + In the 1-D and 0-D case, the indexing and sparse keywords have no effect. + + See Also + -------- + mgrid : Construct a multi-dimensional "meshgrid" using indexing notation. + ogrid : Construct an open multi-dimensional "meshgrid" using indexing + notation. + :ref:`how-to-index` + + Examples + -------- + >>> import numpy as np + >>> nx, ny = (3, 2) + >>> x = np.linspace(0, 1, nx) + >>> y = np.linspace(0, 1, ny) + >>> xv, yv = np.meshgrid(x, y) + >>> xv + array([[0. , 0.5, 1. ], + [0. , 0.5, 1. ]]) + >>> yv + array([[0., 0., 0.], + [1., 1., 1.]]) + + The result of `meshgrid` is a coordinate grid: + + >>> import matplotlib.pyplot as plt + >>> plt.plot(xv, yv, marker='o', color='k', linestyle='none') + >>> plt.show() + + You can create sparse output arrays to save memory and computation time. + + >>> xv, yv = np.meshgrid(x, y, sparse=True) + >>> xv + array([[0. , 0.5, 1. ]]) + >>> yv + array([[0.], + [1.]]) + + `meshgrid` is very useful to evaluate functions on a grid. If the + function depends on all coordinates, both dense and sparse outputs can be + used. + + >>> x = np.linspace(-5, 5, 101) + >>> y = np.linspace(-5, 5, 101) + >>> # full coordinate arrays + >>> xx, yy = np.meshgrid(x, y) + >>> zz = np.sqrt(xx**2 + yy**2) + >>> xx.shape, yy.shape, zz.shape + ((101, 101), (101, 101), (101, 101)) + >>> # sparse coordinate arrays + >>> xs, ys = np.meshgrid(x, y, sparse=True) + >>> zs = np.sqrt(xs**2 + ys**2) + >>> xs.shape, ys.shape, zs.shape + ((1, 101), (101, 1), (101, 101)) + >>> np.array_equal(zz, zs) + True + + >>> h = plt.contourf(x, y, zs) + >>> plt.axis('scaled') + >>> plt.colorbar() + >>> plt.show() + """ + ndim = len(xi) + + if indexing not in ['xy', 'ij']: + raise ValueError( + "Valid values for `indexing` are 'xy' and 'ij'.") + + s0 = (1,) * ndim + output = [np.asanyarray(x).reshape(s0[:i] + (-1,) + s0[i + 1:]) + for i, x in enumerate(xi)] + + if indexing == 'xy' and ndim > 1: + # switch first and second axis + output[0].shape = (1, -1) + s0[2:] + output[1].shape = (-1, 1) + s0[2:] + + if not sparse: + # Return the full N-D matrix (not only the 1-D vector) + output = np.broadcast_arrays(*output, subok=True) + + if copy: + output = tuple(x.copy() for x in output) + + return output + + +def _delete_dispatcher(arr, obj, axis=None): + return (arr, obj) + + +@array_function_dispatch(_delete_dispatcher) +def delete(arr, obj, axis=None): + """ + Return a new array with sub-arrays along an axis deleted. For a one + dimensional array, this returns those entries not returned by + `arr[obj]`. + + Parameters + ---------- + arr : array_like + Input array. + obj : slice, int, array-like of ints or bools + Indicate indices of sub-arrays to remove along the specified axis. + + .. versionchanged:: 1.19.0 + Boolean indices are now treated as a mask of elements to remove, + rather than being cast to the integers 0 and 1. + + axis : int, optional + The axis along which to delete the subarray defined by `obj`. + If `axis` is None, `obj` is applied to the flattened array. + + Returns + ------- + out : ndarray + A copy of `arr` with the elements specified by `obj` removed. Note + that `delete` does not occur in-place. If `axis` is None, `out` is + a flattened array. + + See Also + -------- + insert : Insert elements into an array. + append : Append elements at the end of an array. + + Notes + ----- + Often it is preferable to use a boolean mask. For example: + + >>> arr = np.arange(12) + 1 + >>> mask = np.ones(len(arr), dtype=bool) + >>> mask[[0,2,4]] = False + >>> result = arr[mask,...] + + Is equivalent to ``np.delete(arr, [0,2,4], axis=0)``, but allows further + use of `mask`. + + Examples + -------- + >>> import numpy as np + >>> arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]]) + >>> arr + array([[ 1, 2, 3, 4], + [ 5, 6, 7, 8], + [ 9, 10, 11, 12]]) + >>> np.delete(arr, 1, 0) + array([[ 1, 2, 3, 4], + [ 9, 10, 11, 12]]) + + >>> np.delete(arr, np.s_[::2], 1) + array([[ 2, 4], + [ 6, 8], + [10, 12]]) + >>> np.delete(arr, [1,3,5], None) + array([ 1, 3, 5, 7, 8, 9, 10, 11, 12]) + + """ + conv = _array_converter(arr) + arr, = conv.as_arrays(subok=False) + + ndim = arr.ndim + arrorder = 'F' if arr.flags.fnc else 'C' + if axis is None: + if ndim != 1: + arr = arr.ravel() + # needed for np.matrix, which is still not 1d after being ravelled + ndim = arr.ndim + axis = ndim - 1 + else: + axis = normalize_axis_index(axis, ndim) + + slobj = [slice(None)] * ndim + N = arr.shape[axis] + newshape = list(arr.shape) + + if isinstance(obj, slice): + start, stop, step = obj.indices(N) + xr = range(start, stop, step) + numtodel = len(xr) + + if numtodel <= 0: + return conv.wrap(arr.copy(order=arrorder), to_scalar=False) + + # Invert if step is negative: + if step < 0: + step = -step + start = xr[-1] + stop = xr[0] + 1 + + newshape[axis] -= numtodel + new = empty(newshape, arr.dtype, arrorder) + # copy initial chunk + if start == 0: + pass + else: + slobj[axis] = slice(None, start) + new[tuple(slobj)] = arr[tuple(slobj)] + # copy end chunk + if stop == N: + pass + else: + slobj[axis] = slice(stop - numtodel, None) + slobj2 = [slice(None)] * ndim + slobj2[axis] = slice(stop, None) + new[tuple(slobj)] = arr[tuple(slobj2)] + # copy middle pieces + if step == 1: + pass + else: # use array indexing. + keep = ones(stop - start, dtype=bool) + keep[:stop - start:step] = False + slobj[axis] = slice(start, stop - numtodel) + slobj2 = [slice(None)] * ndim + slobj2[axis] = slice(start, stop) + arr = arr[tuple(slobj2)] + slobj2[axis] = keep + new[tuple(slobj)] = arr[tuple(slobj2)] + + return conv.wrap(new, to_scalar=False) + + if isinstance(obj, (int, integer)) and not isinstance(obj, bool): + single_value = True + else: + single_value = False + _obj = obj + obj = np.asarray(obj) + # `size == 0` to allow empty lists similar to indexing, but (as there) + # is really too generic: + if obj.size == 0 and not isinstance(_obj, np.ndarray): + obj = obj.astype(intp) + elif obj.size == 1 and obj.dtype.kind in "ui": + # For a size 1 integer array we can use the single-value path + # (most dtypes, except boolean, should just fail later). + obj = obj.item() + single_value = True + + if single_value: + # optimization for a single value + if (obj < -N or obj >= N): + raise IndexError( + f"index {obj} is out of bounds for axis {axis} with " + f"size {N}") + if (obj < 0): + obj += N + newshape[axis] -= 1 + new = empty(newshape, arr.dtype, arrorder) + slobj[axis] = slice(None, obj) + new[tuple(slobj)] = arr[tuple(slobj)] + slobj[axis] = slice(obj, None) + slobj2 = [slice(None)] * ndim + slobj2[axis] = slice(obj + 1, None) + new[tuple(slobj)] = arr[tuple(slobj2)] + else: + if obj.dtype == bool: + if obj.shape != (N,): + raise ValueError('boolean array argument obj to delete ' + 'must be one dimensional and match the axis ' + f'length of {N}') + + # optimization, the other branch is slower + keep = ~obj + else: + keep = ones(N, dtype=bool) + keep[obj,] = False + + slobj[axis] = keep + new = arr[tuple(slobj)] + + return conv.wrap(new, to_scalar=False) + + +def _insert_dispatcher(arr, obj, values, axis=None): + return (arr, obj, values) + + +@array_function_dispatch(_insert_dispatcher) +def insert(arr, obj, values, axis=None): + """ + Insert values along the given axis before the given indices. + + Parameters + ---------- + arr : array_like + Input array. + obj : slice, int, array-like of ints or bools + Object that defines the index or indices before which `values` is + inserted. + + .. versionchanged:: 2.1.2 + Boolean indices are now treated as a mask of elements to insert, + rather than being cast to the integers 0 and 1. + + Support for multiple insertions when `obj` is a single scalar or a + sequence with one element (similar to calling insert multiple + times). + values : array_like + Values to insert into `arr`. If the type of `values` is different + from that of `arr`, `values` is converted to the type of `arr`. + `values` should be shaped so that ``arr[...,obj,...] = values`` + is legal. + axis : int, optional + Axis along which to insert `values`. If `axis` is None then `arr` + is flattened first. + + Returns + ------- + out : ndarray + A copy of `arr` with `values` inserted. Note that `insert` + does not occur in-place: a new array is returned. If + `axis` is None, `out` is a flattened array. + + See Also + -------- + append : Append elements at the end of an array. + concatenate : Join a sequence of arrays along an existing axis. + delete : Delete elements from an array. + + Notes + ----- + Note that for higher dimensional inserts ``obj=0`` behaves very different + from ``obj=[0]`` just like ``arr[:,0,:] = values`` is different from + ``arr[:,[0],:] = values``. This is because of the difference between basic + and advanced :ref:`indexing `. + + Examples + -------- + >>> import numpy as np + >>> a = np.arange(6).reshape(3, 2) + >>> a + array([[0, 1], + [2, 3], + [4, 5]]) + >>> np.insert(a, 1, 6) + array([0, 6, 1, 2, 3, 4, 5]) + >>> np.insert(a, 1, 6, axis=1) + array([[0, 6, 1], + [2, 6, 3], + [4, 6, 5]]) + + Difference between sequence and scalars, + showing how ``obj=[1]`` behaves different from ``obj=1``: + + >>> np.insert(a, [1], [[7],[8],[9]], axis=1) + array([[0, 7, 1], + [2, 8, 3], + [4, 9, 5]]) + >>> np.insert(a, 1, [[7],[8],[9]], axis=1) + array([[0, 7, 8, 9, 1], + [2, 7, 8, 9, 3], + [4, 7, 8, 9, 5]]) + >>> np.array_equal(np.insert(a, 1, [7, 8, 9], axis=1), + ... np.insert(a, [1], [[7],[8],[9]], axis=1)) + True + + >>> b = a.flatten() + >>> b + array([0, 1, 2, 3, 4, 5]) + >>> np.insert(b, [2, 2], [6, 7]) + array([0, 1, 6, 7, 2, 3, 4, 5]) + + >>> np.insert(b, slice(2, 4), [7, 8]) + array([0, 1, 7, 2, 8, 3, 4, 5]) + + >>> np.insert(b, [2, 2], [7.13, False]) # type casting + array([0, 1, 7, 0, 2, 3, 4, 5]) + + >>> x = np.arange(8).reshape(2, 4) + >>> idx = (1, 3) + >>> np.insert(x, idx, 999, axis=1) + array([[ 0, 999, 1, 2, 999, 3], + [ 4, 999, 5, 6, 999, 7]]) + + """ + conv = _array_converter(arr) + arr, = conv.as_arrays(subok=False) + + ndim = arr.ndim + arrorder = 'F' if arr.flags.fnc else 'C' + if axis is None: + if ndim != 1: + arr = arr.ravel() + # needed for np.matrix, which is still not 1d after being ravelled + ndim = arr.ndim + axis = ndim - 1 + else: + axis = normalize_axis_index(axis, ndim) + slobj = [slice(None)] * ndim + N = arr.shape[axis] + newshape = list(arr.shape) + + if isinstance(obj, slice): + # turn it into a range object + indices = arange(*obj.indices(N), dtype=intp) + else: + # need to copy obj, because indices will be changed in-place + indices = np.array(obj) + if indices.dtype == bool: + if obj.ndim != 1: + raise ValueError('boolean array argument obj to insert ' + 'must be one dimensional') + indices = np.flatnonzero(obj) + elif indices.ndim > 1: + raise ValueError( + "index array argument obj to insert must be one dimensional " + "or scalar") + if indices.size == 1: + index = indices.item() + if index < -N or index > N: + raise IndexError(f"index {obj} is out of bounds for axis {axis} " + f"with size {N}") + if (index < 0): + index += N + + # There are some object array corner cases here, but we cannot avoid + # that: + values = array(values, copy=None, ndmin=arr.ndim, dtype=arr.dtype) + if indices.ndim == 0: + # broadcasting is very different here, since a[:,0,:] = ... behaves + # very different from a[:,[0],:] = ...! This changes values so that + # it works likes the second case. (here a[:,0:1,:]) + values = np.moveaxis(values, 0, axis) + numnew = values.shape[axis] + newshape[axis] += numnew + new = empty(newshape, arr.dtype, arrorder) + slobj[axis] = slice(None, index) + new[tuple(slobj)] = arr[tuple(slobj)] + slobj[axis] = slice(index, index + numnew) + new[tuple(slobj)] = values + slobj[axis] = slice(index + numnew, None) + slobj2 = [slice(None)] * ndim + slobj2[axis] = slice(index, None) + new[tuple(slobj)] = arr[tuple(slobj2)] + + return conv.wrap(new, to_scalar=False) + + elif indices.size == 0 and not isinstance(obj, np.ndarray): + # Can safely cast the empty list to intp + indices = indices.astype(intp) + + indices[indices < 0] += N + + numnew = len(indices) + order = indices.argsort(kind='mergesort') # stable sort + indices[order] += np.arange(numnew) + + newshape[axis] += numnew + old_mask = ones(newshape[axis], dtype=bool) + old_mask[indices] = False + + new = empty(newshape, arr.dtype, arrorder) + slobj2 = [slice(None)] * ndim + slobj[axis] = indices + slobj2[axis] = old_mask + new[tuple(slobj)] = values + new[tuple(slobj2)] = arr + + return conv.wrap(new, to_scalar=False) + + +def _append_dispatcher(arr, values, axis=None): + return (arr, values) + + +@array_function_dispatch(_append_dispatcher) +def append(arr, values, axis=None): + """ + Append values to the end of an array. + + Parameters + ---------- + arr : array_like + Values are appended to a copy of this array. + values : array_like + These values are appended to a copy of `arr`. It must be of the + correct shape (the same shape as `arr`, excluding `axis`). If + `axis` is not specified, `values` can be any shape and will be + flattened before use. + axis : int, optional + The axis along which `values` are appended. If `axis` is not + given, both `arr` and `values` are flattened before use. + + Returns + ------- + append : ndarray + A copy of `arr` with `values` appended to `axis`. Note that + `append` does not occur in-place: a new array is allocated and + filled. If `axis` is None, `out` is a flattened array. + + See Also + -------- + insert : Insert elements into an array. + delete : Delete elements from an array. + + Examples + -------- + >>> import numpy as np + >>> np.append([1, 2, 3], [[4, 5, 6], [7, 8, 9]]) + array([1, 2, 3, ..., 7, 8, 9]) + + When `axis` is specified, `values` must have the correct shape. + + >>> np.append([[1, 2, 3], [4, 5, 6]], [[7, 8, 9]], axis=0) + array([[1, 2, 3], + [4, 5, 6], + [7, 8, 9]]) + + >>> np.append([[1, 2, 3], [4, 5, 6]], [7, 8, 9], axis=0) + Traceback (most recent call last): + ... + ValueError: all the input arrays must have same number of dimensions, but + the array at index 0 has 2 dimension(s) and the array at index 1 has 1 + dimension(s) + + >>> a = np.array([1, 2], dtype=int) + >>> c = np.append(a, []) + >>> c + array([1., 2.]) + >>> c.dtype + float64 + + Default dtype for empty ndarrays is `float64` thus making the output of dtype + `float64` when appended with dtype `int64` + + """ + arr = asanyarray(arr) + if axis is None: + if arr.ndim != 1: + arr = arr.ravel() + values = ravel(values) + axis = arr.ndim - 1 + return concatenate((arr, values), axis=axis) + + +def _digitize_dispatcher(x, bins, right=None): + return (x, bins) + + +@array_function_dispatch(_digitize_dispatcher) +def digitize(x, bins, right=False): + """ + Return the indices of the bins to which each value in input array belongs. + + ========= ============= ============================ + `right` order of bins returned index `i` satisfies + ========= ============= ============================ + ``False`` increasing ``bins[i-1] <= x < bins[i]`` + ``True`` increasing ``bins[i-1] < x <= bins[i]`` + ``False`` decreasing ``bins[i-1] > x >= bins[i]`` + ``True`` decreasing ``bins[i-1] >= x > bins[i]`` + ========= ============= ============================ + + If values in `x` are beyond the bounds of `bins`, 0 or ``len(bins)`` is + returned as appropriate. + + Parameters + ---------- + x : array_like + Input array to be binned. Prior to NumPy 1.10.0, this array had to + be 1-dimensional, but can now have any shape. + bins : array_like + Array of bins. It has to be 1-dimensional and monotonic. + right : bool, optional + Indicating whether the intervals include the right or the left bin + edge. Default behavior is (right==False) indicating that the interval + does not include the right edge. The left bin end is open in this + case, i.e., bins[i-1] <= x < bins[i] is the default behavior for + monotonically increasing bins. + + Returns + ------- + indices : ndarray of ints + Output array of indices, of same shape as `x`. + + Raises + ------ + ValueError + If `bins` is not monotonic. + TypeError + If the type of the input is complex. + + See Also + -------- + bincount, histogram, unique, searchsorted + + Notes + ----- + If values in `x` are such that they fall outside the bin range, + attempting to index `bins` with the indices that `digitize` returns + will result in an IndexError. + + .. versionadded:: 1.10.0 + + `numpy.digitize` is implemented in terms of `numpy.searchsorted`. + This means that a binary search is used to bin the values, which scales + much better for larger number of bins than the previous linear search. + It also removes the requirement for the input array to be 1-dimensional. + + For monotonically *increasing* `bins`, the following are equivalent:: + + np.digitize(x, bins, right=True) + np.searchsorted(bins, x, side='left') + + Note that as the order of the arguments are reversed, the side must be too. + The `searchsorted` call is marginally faster, as it does not do any + monotonicity checks. Perhaps more importantly, it supports all dtypes. + + Examples + -------- + >>> import numpy as np + >>> x = np.array([0.2, 6.4, 3.0, 1.6]) + >>> bins = np.array([0.0, 1.0, 2.5, 4.0, 10.0]) + >>> inds = np.digitize(x, bins) + >>> inds + array([1, 4, 3, 2]) + >>> for n in range(x.size): + ... print(bins[inds[n]-1], "<=", x[n], "<", bins[inds[n]]) + ... + 0.0 <= 0.2 < 1.0 + 4.0 <= 6.4 < 10.0 + 2.5 <= 3.0 < 4.0 + 1.0 <= 1.6 < 2.5 + + >>> x = np.array([1.2, 10.0, 12.4, 15.5, 20.]) + >>> bins = np.array([0, 5, 10, 15, 20]) + >>> np.digitize(x,bins,right=True) + array([1, 2, 3, 4, 4]) + >>> np.digitize(x,bins,right=False) + array([1, 3, 3, 4, 5]) + """ + x = _nx.asarray(x) + bins = _nx.asarray(bins) + + # here for compatibility, searchsorted below is happy to take this + if np.issubdtype(x.dtype, _nx.complexfloating): + raise TypeError("x may not be complex") + + mono = _monotonicity(bins) + if mono == 0: + raise ValueError("bins must be monotonically increasing or decreasing") + + # this is backwards because the arguments below are swapped + side = 'left' if right else 'right' + if mono == -1: + # reverse the bins, and invert the results + return len(bins) - _nx.searchsorted(bins[::-1], x, side=side) + else: + return _nx.searchsorted(bins, x, side=side) diff --git a/local_packages/numpy/lib/_function_base_impl.pyi b/local_packages/numpy/lib/_function_base_impl.pyi new file mode 100644 index 0000000..8bcdf98 --- /dev/null +++ b/local_packages/numpy/lib/_function_base_impl.pyi @@ -0,0 +1,2324 @@ +from _typeshed import ConvertibleToInt, Incomplete +from collections.abc import Callable, Iterable, Sequence +from typing import ( + Any, + Concatenate, + Literal as L, + Never, + ParamSpec, + Protocol, + SupportsIndex, + SupportsInt, + TypeAlias, + overload, + type_check_only, +) +from typing_extensions import TypeIs, TypeVar + +import numpy as np +from numpy import _OrderKACF +from numpy._core.multiarray import bincount +from numpy._globals import _NoValueType +from numpy._typing import ( + ArrayLike, + DTypeLike, + NDArray, + _ArrayLike, + _ArrayLikeBool_co, + _ArrayLikeComplex_co, + _ArrayLikeFloat_co, + _ArrayLikeInt_co, + _ArrayLikeNumber_co, + _ArrayLikeObject_co, + _ComplexLike_co, + _DTypeLike, + _FloatLike_co, + _NestedSequence as _SeqND, + _NumberLike_co, + _ScalarLike_co, + _ShapeLike, + _SupportsArray, +) + +__all__ = [ + "select", + "piecewise", + "trim_zeros", + "copy", + "iterable", + "percentile", + "diff", + "gradient", + "angle", + "unwrap", + "sort_complex", + "flip", + "rot90", + "extract", + "place", + "vectorize", + "asarray_chkfinite", + "average", + "bincount", + "digitize", + "cov", + "corrcoef", + "median", + "sinc", + "hamming", + "hanning", + "bartlett", + "blackman", + "kaiser", + "trapezoid", + "i0", + "meshgrid", + "delete", + "insert", + "append", + "interp", + "quantile", +] + +_T = TypeVar("_T") +_T_co = TypeVar("_T_co", covariant=True) +# The `{}ss` suffix refers to the PEP 695 (Python 3.12) `ParamSpec` syntax, `**P`. +_Tss = ParamSpec("_Tss") + +_ScalarT = TypeVar("_ScalarT", bound=np.generic) +_ScalarT1 = TypeVar("_ScalarT1", bound=np.generic) +_ScalarT2 = TypeVar("_ScalarT2", bound=np.generic) +_FloatingT = TypeVar("_FloatingT", bound=np.floating) +_InexactT = TypeVar("_InexactT", bound=np.inexact) +_InexactTimeT = TypeVar("_InexactTimeT", bound=np.inexact | np.timedelta64) +_InexactDateTimeT = TypeVar("_InexactDateTimeT", bound=np.inexact | np.timedelta64 | np.datetime64) +_ScalarNumericT = TypeVar("_ScalarNumericT", bound=np.inexact | np.timedelta64 | np.object_) +_AnyDoubleT = TypeVar("_AnyDoubleT", bound=np.float64 | np.longdouble | np.complex128 | np.clongdouble) + +_ArrayT = TypeVar("_ArrayT", bound=np.ndarray) +_ArrayFloatingT = TypeVar("_ArrayFloatingT", bound=NDArray[np.floating]) +_ArrayFloatObjT = TypeVar("_ArrayFloatObjT", bound=NDArray[np.floating | np.object_]) +_ArrayComplexT = TypeVar("_ArrayComplexT", bound=NDArray[np.complexfloating]) +_ArrayInexactT = TypeVar("_ArrayInexactT", bound=NDArray[np.inexact]) +_ArrayNumericT = TypeVar("_ArrayNumericT", bound=NDArray[np.inexact | np.timedelta64 | np.object_]) + +_ArrayLike1D: TypeAlias = _SupportsArray[np.dtype[_ScalarT]] | Sequence[_ScalarT] + +_ShapeT = TypeVar("_ShapeT", bound=tuple[int, ...]) + +_integer_co: TypeAlias = np.integer | np.bool +_float64_co: TypeAlias = np.float64 | _integer_co +_floating_co: TypeAlias = np.floating | _integer_co + +# non-trivial scalar-types that will become `complex128` in `sort_complex()`, +# i.e. all numeric scalar types except for `[u]int{8,16} | longdouble` +_SortsToComplex128: TypeAlias = ( + np.bool + | np.int32 + | np.uint32 + | np.int64 + | np.uint64 + | np.float16 + | np.float32 + | np.float64 + | np.timedelta64 + | np.object_ +) + +_Array: TypeAlias = np.ndarray[_ShapeT, np.dtype[_ScalarT]] +_Array0D: TypeAlias = np.ndarray[tuple[()], np.dtype[_ScalarT]] +_Array1D: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] +_Array2D: TypeAlias = np.ndarray[tuple[int, int], np.dtype[_ScalarT]] +_Array3D: TypeAlias = np.ndarray[tuple[int, int, int], np.dtype[_ScalarT]] +_ArrayMax2D: TypeAlias = np.ndarray[tuple[int] | tuple[int, int], np.dtype[_ScalarT]] +# workaround for mypy and pyright not following the typing spec for overloads +_ArrayNoD: TypeAlias = np.ndarray[tuple[Never, Never, Never, Never], np.dtype[_ScalarT]] + +_Seq1D: TypeAlias = Sequence[_T] +_Seq2D: TypeAlias = Sequence[Sequence[_T]] +_Seq3D: TypeAlias = Sequence[Sequence[Sequence[_T]]] +_ListSeqND: TypeAlias = list[_T] | _SeqND[list[_T]] + +_Tuple2: TypeAlias = tuple[_T, _T] +_Tuple3: TypeAlias = tuple[_T, _T, _T] +_Tuple4: TypeAlias = tuple[_T, _T, _T, _T] + +_Mesh1: TypeAlias = tuple[_Array1D[_ScalarT]] +_Mesh2: TypeAlias = tuple[_Array2D[_ScalarT], _Array2D[_ScalarT1]] +_Mesh3: TypeAlias = tuple[_Array3D[_ScalarT], _Array3D[_ScalarT1], _Array3D[_ScalarT2]] + +_IndexLike: TypeAlias = slice | _ArrayLikeInt_co + +_Indexing: TypeAlias = L["ij", "xy"] +_InterpolationMethod = L[ + "inverted_cdf", + "averaged_inverted_cdf", + "closest_observation", + "interpolated_inverted_cdf", + "hazen", + "weibull", + "linear", + "median_unbiased", + "normal_unbiased", + "lower", + "higher", + "midpoint", + "nearest", +] + +# The resulting value will be used as `y[cond] = func(vals, *args, **kw)`, so in can +# return any (usually 1d) array-like or scalar-like compatible with the input. +_PiecewiseFunction: TypeAlias = Callable[Concatenate[NDArray[_ScalarT], _Tss], ArrayLike] +_PiecewiseFunctions: TypeAlias = _SizedIterable[_PiecewiseFunction[_ScalarT, _Tss] | _ScalarLike_co] + +@type_check_only +class _TrimZerosSequence(Protocol[_T_co]): + def __len__(self, /) -> int: ... + @overload + def __getitem__(self, key: int, /) -> object: ... + @overload + def __getitem__(self, key: slice, /) -> _T_co: ... + +@type_check_only +class _SupportsRMulFloat(Protocol[_T_co]): + def __rmul__(self, other: float, /) -> _T_co: ... + +@type_check_only +class _SizedIterable(Protocol[_T_co]): + def __iter__(self) -> Iterable[_T_co]: ... + def __len__(self) -> int: ... + +### + +class vectorize: + __doc__: str | None + __module__: L["numpy"] = "numpy" + pyfunc: Callable[..., Incomplete] + cache: bool + signature: str | None + otypes: str | None + excluded: set[int | str] + + def __init__( + self, + /, + pyfunc: Callable[..., Incomplete] | _NoValueType = ..., # = _NoValue + otypes: str | Iterable[DTypeLike] | None = None, + doc: str | None = None, + excluded: Iterable[int | str] | None = None, + cache: bool = False, + signature: str | None = None, + ) -> None: ... + def __call__(self, /, *args: Incomplete, **kwargs: Incomplete) -> Incomplete: ... + +@overload +def rot90(m: _ArrayT, k: int = 1, axes: tuple[int, int] = (0, 1)) -> _ArrayT: ... +@overload +def rot90(m: _ArrayLike[_ScalarT], k: int = 1, axes: tuple[int, int] = (0, 1)) -> NDArray[_ScalarT]: ... +@overload +def rot90(m: ArrayLike, k: int = 1, axes: tuple[int, int] = (0, 1)) -> NDArray[Incomplete]: ... + +# NOTE: Technically `flip` also accept scalars, but that has no effect and complicates +# the overloads significantly, so we ignore that case here. +@overload +def flip(m: _ArrayT, axis: int | tuple[int, ...] | None = None) -> _ArrayT: ... +@overload +def flip(m: _ArrayLike[_ScalarT], axis: int | tuple[int, ...] | None = None) -> NDArray[_ScalarT]: ... +@overload +def flip(m: ArrayLike, axis: int | tuple[int, ...] | None = None) -> NDArray[Incomplete]: ... + +# +def iterable(y: object) -> TypeIs[Iterable[Any]]: ... + +# NOTE: This assumes that if `axis` is given the input is at least 2d, and will +# therefore always return an array. +# NOTE: This assumes that if `keepdims=True` the input is at least 1d, and will +# therefore always return an array. +@overload # inexact array, keepdims=True +def average( + a: _ArrayInexactT, + axis: int | tuple[int, ...] | None = None, + weights: _ArrayLikeNumber_co | None = None, + returned: L[False] = False, + *, + keepdims: L[True], +) -> _ArrayInexactT: ... +@overload # inexact array, returned=True keepdims=True +def average( + a: _ArrayInexactT, + axis: int | tuple[int, ...] | None = None, + weights: _ArrayLikeNumber_co | None = None, + *, + returned: L[True], + keepdims: L[True], +) -> _Tuple2[_ArrayInexactT]: ... +@overload # inexact array-like, axis=None +def average( + a: _ArrayLike[_InexactT], + axis: None = None, + weights: _ArrayLikeNumber_co | None = None, + returned: L[False] = False, + *, + keepdims: L[False] | _NoValueType = ..., +) -> _InexactT: ... +@overload # inexact array-like, axis= +def average( + a: _ArrayLike[_InexactT], + axis: int | tuple[int, ...], + weights: _ArrayLikeNumber_co | None = None, + returned: L[False] = False, + *, + keepdims: L[False] | _NoValueType = ..., +) -> NDArray[_InexactT]: ... +@overload # inexact array-like, keepdims=True +def average( + a: _ArrayLike[_InexactT], + axis: int | tuple[int, ...] | None = None, + weights: _ArrayLikeNumber_co | None = None, + returned: L[False] = False, + *, + keepdims: L[True], +) -> NDArray[_InexactT]: ... +@overload # inexact array-like, axis=None, returned=True +def average( + a: _ArrayLike[_InexactT], + axis: None = None, + weights: _ArrayLikeNumber_co | None = None, + *, + returned: L[True], + keepdims: L[False] | _NoValueType = ..., +) -> _Tuple2[_InexactT]: ... +@overload # inexact array-like, axis=, returned=True +def average( + a: _ArrayLike[_InexactT], + axis: int | tuple[int, ...], + weights: _ArrayLikeNumber_co | None = None, + *, + returned: L[True], + keepdims: L[False] | _NoValueType = ..., +) -> _Tuple2[NDArray[_InexactT]]: ... +@overload # inexact array-like, returned=True, keepdims=True +def average( + a: _ArrayLike[_InexactT], + axis: int | tuple[int, ...] | None = None, + weights: _ArrayLikeNumber_co | None = None, + *, + returned: L[True], + keepdims: L[True], +) -> _Tuple2[NDArray[_InexactT]]: ... +@overload # bool or integer array-like, axis=None +def average( + a: _SeqND[float] | _ArrayLikeInt_co, + axis: None = None, + weights: _ArrayLikeFloat_co | None = None, + returned: L[False] = False, + *, + keepdims: L[False] | _NoValueType = ..., +) -> np.float64: ... +@overload # bool or integer array-like, axis= +def average( + a: _SeqND[float] | _ArrayLikeInt_co, + axis: int | tuple[int, ...], + weights: _ArrayLikeFloat_co | None = None, + returned: L[False] = False, + *, + keepdims: L[False] | _NoValueType = ..., +) -> NDArray[np.float64]: ... +@overload # bool or integer array-like, keepdims=True +def average( + a: _SeqND[float] | _ArrayLikeInt_co, + axis: int | tuple[int, ...] | None = None, + weights: _ArrayLikeFloat_co | None = None, + returned: L[False] = False, + *, + keepdims: L[True], +) -> NDArray[np.float64]: ... +@overload # bool or integer array-like, axis=None, returned=True +def average( + a: _SeqND[float] | _ArrayLikeInt_co, + axis: None = None, + weights: _ArrayLikeFloat_co | None = None, + *, + returned: L[True], + keepdims: L[False] | _NoValueType = ..., +) -> _Tuple2[np.float64]: ... +@overload # bool or integer array-like, axis=, returned=True +def average( + a: _SeqND[float] | _ArrayLikeInt_co, + axis: int | tuple[int, ...], + weights: _ArrayLikeFloat_co | None = None, + *, + returned: L[True], + keepdims: L[False] | _NoValueType = ..., +) -> _Tuple2[NDArray[np.float64]]: ... +@overload # bool or integer array-like, returned=True, keepdims=True +def average( + a: _SeqND[float] | _ArrayLikeInt_co, + axis: int | tuple[int, ...] | None = None, + weights: _ArrayLikeFloat_co | None = None, + *, + returned: L[True], + keepdims: L[True], +) -> _Tuple2[NDArray[np.float64]]: ... +@overload # complex array-like, axis=None +def average( + a: _ListSeqND[complex], + axis: None = None, + weights: _ArrayLikeComplex_co | None = None, + returned: L[False] = False, + *, + keepdims: L[False] | _NoValueType = ..., +) -> np.complex128: ... +@overload # complex array-like, axis= +def average( + a: _ListSeqND[complex], + axis: int | tuple[int, ...], + weights: _ArrayLikeComplex_co | None = None, + returned: L[False] = False, + *, + keepdims: L[False] | _NoValueType = ..., +) -> NDArray[np.complex128]: ... +@overload # complex array-like, keepdims=True +def average( + a: _ListSeqND[complex], + axis: int | tuple[int, ...] | None = None, + weights: _ArrayLikeComplex_co | None = None, + returned: L[False] = False, + *, + keepdims: L[True], +) -> NDArray[np.complex128]: ... +@overload # complex array-like, axis=None, returned=True +def average( + a: _ListSeqND[complex], + axis: None = None, + weights: _ArrayLikeComplex_co | None = None, + *, + returned: L[True], + keepdims: L[False] | _NoValueType = ..., +) -> _Tuple2[np.complex128]: ... +@overload # complex array-like, axis=, returned=True +def average( + a: _ListSeqND[complex], + axis: int | tuple[int, ...], + weights: _ArrayLikeComplex_co | None = None, + *, + returned: L[True], + keepdims: L[False] | _NoValueType = ..., +) -> _Tuple2[NDArray[np.complex128]]: ... +@overload # complex array-like, keepdims=True, returned=True +def average( + a: _ListSeqND[complex], + axis: int | tuple[int, ...] | None = None, + weights: _ArrayLikeComplex_co | None = None, + *, + returned: L[True], + keepdims: L[True], +) -> _Tuple2[NDArray[np.complex128]]: ... +@overload # unknown, axis=None +def average( + a: _ArrayLikeNumber_co | _ArrayLikeObject_co, + axis: None = None, + weights: _ArrayLikeNumber_co | None = None, + returned: L[False] = False, + *, + keepdims: L[False] | _NoValueType = ..., +) -> Any: ... +@overload # unknown, axis= +def average( + a: _ArrayLikeNumber_co | _ArrayLikeObject_co, + axis: int | tuple[int, ...], + weights: _ArrayLikeNumber_co | None = None, + returned: L[False] = False, + *, + keepdims: L[False] | _NoValueType = ..., +) -> np.ndarray: ... +@overload # unknown, keepdims=True +def average( + a: _ArrayLikeNumber_co | _ArrayLikeObject_co, + axis: int | tuple[int, ...] | None = None, + weights: _ArrayLikeNumber_co | None = None, + returned: L[False] = False, + *, + keepdims: L[True], +) -> np.ndarray: ... +@overload # unknown, axis=None, returned=True +def average( + a: _ArrayLikeNumber_co | _ArrayLikeObject_co, + axis: None = None, + weights: _ArrayLikeNumber_co | None = None, + *, + returned: L[True], + keepdims: L[False] | _NoValueType = ..., +) -> _Tuple2[Any]: ... +@overload # unknown, axis=, returned=True +def average( + a: _ArrayLikeNumber_co | _ArrayLikeObject_co, + axis: int | tuple[int, ...], + weights: _ArrayLikeNumber_co | None = None, + *, + returned: L[True], + keepdims: L[False] | _NoValueType = ..., +) -> _Tuple2[np.ndarray]: ... +@overload # unknown, returned=True, keepdims=True +def average( + a: _ArrayLikeNumber_co | _ArrayLikeObject_co, + axis: int | tuple[int, ...] | None = None, + weights: _ArrayLikeNumber_co | None = None, + *, + returned: L[True], + keepdims: L[True], +) -> _Tuple2[np.ndarray]: ... + +# +@overload +def asarray_chkfinite(a: _ArrayT, dtype: None = None, order: _OrderKACF = None) -> _ArrayT: ... +@overload +def asarray_chkfinite( + a: np.ndarray[_ShapeT], dtype: _DTypeLike[_ScalarT], order: _OrderKACF = None +) -> _Array[_ShapeT, _ScalarT]: ... +@overload +def asarray_chkfinite(a: _ArrayLike[_ScalarT], dtype: None = None, order: _OrderKACF = None) -> NDArray[_ScalarT]: ... +@overload +def asarray_chkfinite(a: object, dtype: _DTypeLike[_ScalarT], order: _OrderKACF = None) -> NDArray[_ScalarT]: ... +@overload +def asarray_chkfinite(a: object, dtype: DTypeLike | None = None, order: _OrderKACF = None) -> NDArray[Incomplete]: ... + +# NOTE: Contrary to the documentation, scalars are also accepted and treated as +# `[condlist]`. And even though the documentation says these should be boolean, in +# practice anything that `np.array(condlist, dtype=bool)` accepts will work, i.e. any +# array-like. +@overload +def piecewise( + x: _Array[_ShapeT, _ScalarT], + condlist: ArrayLike, + funclist: _PiecewiseFunctions[Any, _Tss], + *args: _Tss.args, + **kw: _Tss.kwargs, +) -> _Array[_ShapeT, _ScalarT]: ... +@overload +def piecewise( + x: _ArrayLike[_ScalarT], + condlist: ArrayLike, + funclist: _PiecewiseFunctions[Any, _Tss], + *args: _Tss.args, + **kw: _Tss.kwargs, +) -> NDArray[_ScalarT]: ... +@overload +def piecewise( + x: ArrayLike, + condlist: ArrayLike, + funclist: _PiecewiseFunctions[_ScalarT, _Tss], + *args: _Tss.args, + **kw: _Tss.kwargs, +) -> NDArray[_ScalarT]: ... + +# NOTE: condition is usually boolean, but anything with zero/non-zero semantics works +@overload +def extract(condition: ArrayLike, arr: _ArrayLike[_ScalarT]) -> _Array1D[_ScalarT]: ... +@overload +def extract(condition: ArrayLike, arr: _SeqND[bool]) -> _Array1D[np.bool]: ... +@overload +def extract(condition: ArrayLike, arr: _ListSeqND[int]) -> _Array1D[np.int_]: ... +@overload +def extract(condition: ArrayLike, arr: _ListSeqND[float]) -> _Array1D[np.float64]: ... +@overload +def extract(condition: ArrayLike, arr: _ListSeqND[complex]) -> _Array1D[np.complex128]: ... +@overload +def extract(condition: ArrayLike, arr: _SeqND[bytes]) -> _Array1D[np.bytes_]: ... +@overload +def extract(condition: ArrayLike, arr: _SeqND[str]) -> _Array1D[np.str_]: ... +@overload +def extract(condition: ArrayLike, arr: ArrayLike) -> _Array1D[Incomplete]: ... + +# NOTE: unlike `extract`, passing non-boolean conditions for `condlist` will raise an +# error at runtime +@overload +def select( + condlist: _SizedIterable[_ArrayLikeBool_co], + choicelist: Sequence[_ArrayT], + default: ArrayLike = 0, +) -> _ArrayT: ... +@overload +def select( + condlist: _SizedIterable[_ArrayLikeBool_co], + choicelist: Sequence[_ArrayLike[_ScalarT]] | NDArray[_ScalarT], + default: ArrayLike = 0, +) -> NDArray[_ScalarT]: ... +@overload +def select( + condlist: _SizedIterable[_ArrayLikeBool_co], + choicelist: Sequence[ArrayLike], + default: ArrayLike = 0, +) -> np.ndarray: ... + +# keep roughly in sync with `ma.core.copy` +@overload +def copy(a: _ArrayT, order: _OrderKACF, subok: L[True]) -> _ArrayT: ... +@overload +def copy(a: _ArrayT, order: _OrderKACF = "K", *, subok: L[True]) -> _ArrayT: ... +@overload +def copy(a: _ArrayLike[_ScalarT], order: _OrderKACF = "K", subok: L[False] = False) -> NDArray[_ScalarT]: ... +@overload +def copy(a: ArrayLike, order: _OrderKACF = "K", subok: L[False] = False) -> NDArray[Incomplete]: ... + +# +@overload # ?d, known inexact scalar-type +def gradient( + f: _ArrayNoD[_InexactTimeT], + *varargs: _ArrayLikeNumber_co, + axis: _ShapeLike | None = None, + edge_order: L[1, 2] = 1, + # `| Any` instead of ` | tuple` is returned to avoid several mypy_primer errors +) -> _Array1D[_InexactTimeT] | Any: ... +@overload # 1d, known inexact scalar-type +def gradient( + f: _Array1D[_InexactTimeT], + *varargs: _ArrayLikeNumber_co, + axis: _ShapeLike | None = None, + edge_order: L[1, 2] = 1, +) -> _Array1D[_InexactTimeT]: ... +@overload # 2d, known inexact scalar-type +def gradient( + f: _Array2D[_InexactTimeT], + *varargs: _ArrayLikeNumber_co, + axis: _ShapeLike | None = None, + edge_order: L[1, 2] = 1, +) -> _Mesh2[_InexactTimeT, _InexactTimeT]: ... +@overload # 3d, known inexact scalar-type +def gradient( + f: _Array3D[_InexactTimeT], + *varargs: _ArrayLikeNumber_co, + axis: _ShapeLike | None = None, + edge_order: L[1, 2] = 1, +) -> _Mesh3[_InexactTimeT, _InexactTimeT, _InexactTimeT]: ... +@overload # ?d, datetime64 scalar-type +def gradient( + f: _ArrayNoD[np.datetime64], + *varargs: _ArrayLikeNumber_co, + axis: _ShapeLike | None = None, + edge_order: L[1, 2] = 1, +) -> _Array1D[np.timedelta64] | tuple[NDArray[np.timedelta64], ...]: ... +@overload # 1d, datetime64 scalar-type +def gradient( + f: _Array1D[np.datetime64], + *varargs: _ArrayLikeNumber_co, + axis: _ShapeLike | None = None, + edge_order: L[1, 2] = 1, +) -> _Array1D[np.timedelta64]: ... +@overload # 2d, datetime64 scalar-type +def gradient( + f: _Array2D[np.datetime64], + *varargs: _ArrayLikeNumber_co, + axis: _ShapeLike | None = None, + edge_order: L[1, 2] = 1, +) -> _Mesh2[np.timedelta64, np.timedelta64]: ... +@overload # 3d, datetime64 scalar-type +def gradient( + f: _Array3D[np.datetime64], + *varargs: _ArrayLikeNumber_co, + axis: _ShapeLike | None = None, + edge_order: L[1, 2] = 1, +) -> _Mesh3[np.timedelta64, np.timedelta64, np.timedelta64]: ... +@overload # 1d float-like +def gradient( + f: _Seq1D[float], + *varargs: _ArrayLikeNumber_co, + axis: _ShapeLike | None = None, + edge_order: L[1, 2] = 1, +) -> _Array1D[np.float64]: ... +@overload # 2d float-like +def gradient( + f: _Seq2D[float], + *varargs: _ArrayLikeNumber_co, + axis: _ShapeLike | None = None, + edge_order: L[1, 2] = 1, +) -> _Mesh2[np.float64, np.float64]: ... +@overload # 3d float-like +def gradient( + f: _Seq3D[float], + *varargs: _ArrayLikeNumber_co, + axis: _ShapeLike | None = None, + edge_order: L[1, 2] = 1, +) -> _Mesh3[np.float64, np.float64, np.float64]: ... +@overload # 1d complex-like (the `list` avoids overlap with the float-like overload) +def gradient( + f: list[complex], + *varargs: _ArrayLikeNumber_co, + axis: _ShapeLike | None = None, + edge_order: L[1, 2] = 1, +) -> _Array1D[np.complex128]: ... +@overload # 2d float-like +def gradient( + f: _Seq1D[list[complex]], + *varargs: _ArrayLikeNumber_co, + axis: _ShapeLike | None = None, + edge_order: L[1, 2] = 1, +) -> _Mesh2[np.complex128, np.complex128]: ... +@overload # 3d float-like +def gradient( + f: _Seq2D[list[complex]], + *varargs: _ArrayLikeNumber_co, + axis: _ShapeLike | None = None, + edge_order: L[1, 2] = 1, +) -> _Mesh3[np.complex128, np.complex128, np.complex128]: ... +@overload # fallback +def gradient( + f: ArrayLike, + *varargs: _ArrayLikeNumber_co, + axis: _ShapeLike | None = None, + edge_order: L[1, 2] = 1, +) -> Incomplete: ... + +# +@overload # n == 0; return input unchanged +def diff( + a: _T, + n: L[0], + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., # = _NoValue + append: ArrayLike | _NoValueType = ..., # = _NoValue +) -> _T: ... +@overload # known array-type +def diff( + a: _ArrayNumericT, + n: int = 1, + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., +) -> _ArrayNumericT: ... +@overload # known shape, datetime64 +def diff( + a: _Array[_ShapeT, np.datetime64], + n: int = 1, + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., +) -> _Array[_ShapeT, np.timedelta64]: ... +@overload # unknown shape, known scalar-type +def diff( + a: _ArrayLike[_ScalarNumericT], + n: int = 1, + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., +) -> NDArray[_ScalarNumericT]: ... +@overload # unknown shape, datetime64 +def diff( + a: _ArrayLike[np.datetime64], + n: int = 1, + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., +) -> NDArray[np.timedelta64]: ... +@overload # 1d int +def diff( + a: _Seq1D[int], + n: int = 1, + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., +) -> _Array1D[np.int_]: ... +@overload # 2d int +def diff( + a: _Seq2D[int], + n: int = 1, + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., +) -> _Array2D[np.int_]: ... +@overload # 1d float (the `list` avoids overlap with the `int` overloads) +def diff( + a: list[float], + n: int = 1, + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., +) -> _Array1D[np.float64]: ... +@overload # 2d float +def diff( + a: _Seq1D[list[float]], + n: int = 1, + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., +) -> _Array2D[np.float64]: ... +@overload # 1d complex (the `list` avoids overlap with the `int` overloads) +def diff( + a: list[complex], + n: int = 1, + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., +) -> _Array1D[np.complex128]: ... +@overload # 2d complex +def diff( + a: _Seq1D[list[complex]], + n: int = 1, + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., +) -> _Array2D[np.complex128]: ... +@overload # unknown shape, unknown scalar-type +def diff( + a: ArrayLike, + n: int = 1, + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., + append: ArrayLike | _NoValueType = ..., +) -> NDArray[Incomplete]: ... + +# +@overload # float scalar +def interp( + x: _FloatLike_co, + xp: _ArrayLikeFloat_co, + fp: _ArrayLikeFloat_co, + left: _FloatLike_co | None = None, + right: _FloatLike_co | None = None, + period: _FloatLike_co | None = None, +) -> np.float64: ... +@overload # complex scalar +def interp( + x: _FloatLike_co, + xp: _ArrayLikeFloat_co, + fp: _ArrayLike1D[np.complexfloating] | list[complex], + left: _NumberLike_co | None = None, + right: _NumberLike_co | None = None, + period: _FloatLike_co | None = None, +) -> np.complex128: ... +@overload # float array +def interp( + x: _Array[_ShapeT, _floating_co], + xp: _ArrayLikeFloat_co, + fp: _ArrayLikeFloat_co, + left: _FloatLike_co | None = None, + right: _FloatLike_co | None = None, + period: _FloatLike_co | None = None, +) -> _Array[_ShapeT, np.float64]: ... +@overload # complex array +def interp( + x: _Array[_ShapeT, _floating_co], + xp: _ArrayLikeFloat_co, + fp: _ArrayLike1D[np.complexfloating] | list[complex], + left: _NumberLike_co | None = None, + right: _NumberLike_co | None = None, + period: _FloatLike_co | None = None, +) -> _Array[_ShapeT, np.complex128]: ... +@overload # float sequence +def interp( + x: _Seq1D[_FloatLike_co], + xp: _ArrayLikeFloat_co, + fp: _ArrayLikeFloat_co, + left: _FloatLike_co | None = None, + right: _FloatLike_co | None = None, + period: _FloatLike_co | None = None, +) -> _Array1D[np.float64]: ... +@overload # complex sequence +def interp( + x: _Seq1D[_FloatLike_co], + xp: _ArrayLikeFloat_co, + fp: _ArrayLike1D[np.complexfloating] | list[complex], + left: _NumberLike_co | None = None, + right: _NumberLike_co | None = None, + period: _FloatLike_co | None = None, +) -> _Array1D[np.complex128]: ... +@overload # float array-like +def interp( + x: _SeqND[_FloatLike_co], + xp: _ArrayLikeFloat_co, + fp: _ArrayLikeFloat_co, + left: _FloatLike_co | None = None, + right: _FloatLike_co | None = None, + period: _FloatLike_co | None = None, +) -> NDArray[np.float64]: ... +@overload # complex array-like +def interp( + x: _SeqND[_FloatLike_co], + xp: _ArrayLikeFloat_co, + fp: _ArrayLike1D[np.complexfloating] | list[complex], + left: _NumberLike_co | None = None, + right: _NumberLike_co | None = None, + period: _FloatLike_co | None = None, +) -> NDArray[np.complex128]: ... +@overload # float scalar/array-like +def interp( + x: _ArrayLikeFloat_co, + xp: _ArrayLikeFloat_co, + fp: _ArrayLikeFloat_co, + left: _FloatLike_co | None = None, + right: _FloatLike_co | None = None, + period: _FloatLike_co | None = None, +) -> NDArray[np.float64] | np.float64: ... +@overload # complex scalar/array-like +def interp( + x: _ArrayLikeFloat_co, + xp: _ArrayLikeFloat_co, + fp: _ArrayLike1D[np.complexfloating], + left: _NumberLike_co | None = None, + right: _NumberLike_co | None = None, + period: _FloatLike_co | None = None, +) -> NDArray[np.complex128] | np.complex128: ... +@overload # float/complex scalar/array-like +def interp( + x: _ArrayLikeFloat_co, + xp: _ArrayLikeFloat_co, + fp: _ArrayLikeNumber_co, + left: _NumberLike_co | None = None, + right: _NumberLike_co | None = None, + period: _FloatLike_co | None = None, +) -> NDArray[np.complex128 | np.float64] | np.complex128 | np.float64: ... + +# +@overload # 0d T: floating -> 0d T +def angle(z: _FloatingT, deg: bool = False) -> _FloatingT: ... +@overload # 0d complex | float | ~integer -> 0d float64 +def angle(z: complex | _integer_co, deg: bool = False) -> np.float64: ... +@overload # 0d complex64 -> 0d float32 +def angle(z: np.complex64, deg: bool = False) -> np.float32: ... +@overload # 0d clongdouble -> 0d longdouble +def angle(z: np.clongdouble, deg: bool = False) -> np.longdouble: ... +@overload # T: nd floating -> T +def angle(z: _ArrayFloatingT, deg: bool = False) -> _ArrayFloatingT: ... +@overload # nd T: complex128 | ~integer -> nd float64 +def angle(z: _Array[_ShapeT, np.complex128 | _integer_co], deg: bool = False) -> _Array[_ShapeT, np.float64]: ... +@overload # nd T: complex64 -> nd float32 +def angle(z: _Array[_ShapeT, np.complex64], deg: bool = False) -> _Array[_ShapeT, np.float32]: ... +@overload # nd T: clongdouble -> nd longdouble +def angle(z: _Array[_ShapeT, np.clongdouble], deg: bool = False) -> _Array[_ShapeT, np.longdouble]: ... +@overload # 1d complex -> 1d float64 +def angle(z: _Seq1D[complex], deg: bool = False) -> _Array1D[np.float64]: ... +@overload # 2d complex -> 2d float64 +def angle(z: _Seq2D[complex], deg: bool = False) -> _Array2D[np.float64]: ... +@overload # 3d complex -> 3d float64 +def angle(z: _Seq3D[complex], deg: bool = False) -> _Array3D[np.float64]: ... +@overload # fallback +def angle(z: _ArrayLikeComplex_co, deg: bool = False) -> NDArray[np.floating] | Any: ... + +# +@overload # known array-type +def unwrap( + p: _ArrayFloatObjT, + discont: float | None = None, + axis: int = -1, + *, + period: float = ..., # = τ +) -> _ArrayFloatObjT: ... +@overload # known shape, float64 +def unwrap( + p: _Array[_ShapeT, _float64_co], + discont: float | None = None, + axis: int = -1, + *, + period: float = ..., # = τ +) -> _Array[_ShapeT, np.float64]: ... +@overload # 1d float64-like +def unwrap( + p: _Seq1D[float | _float64_co], + discont: float | None = None, + axis: int = -1, + *, + period: float = ..., # = τ +) -> _Array1D[np.float64]: ... +@overload # 2d float64-like +def unwrap( + p: _Seq2D[float | _float64_co], + discont: float | None = None, + axis: int = -1, + *, + period: float = ..., # = τ +) -> _Array2D[np.float64]: ... +@overload # 3d float64-like +def unwrap( + p: _Seq3D[float | _float64_co], + discont: float | None = None, + axis: int = -1, + *, + period: float = ..., # = τ +) -> _Array3D[np.float64]: ... +@overload # ?d, float64 +def unwrap( + p: _SeqND[float] | _ArrayLike[_float64_co], + discont: float | None = None, + axis: int = -1, + *, + period: float = ..., # = τ +) -> NDArray[np.float64]: ... +@overload # fallback +def unwrap( + p: _ArrayLikeFloat_co | _ArrayLikeObject_co, + discont: float | None = None, + axis: int = -1, + *, + period: float = ..., # = τ +) -> np.ndarray: ... + +# +@overload +def sort_complex(a: _ArrayComplexT) -> _ArrayComplexT: ... +@overload # complex64, shape known +def sort_complex(a: _Array[_ShapeT, np.int8 | np.uint8 | np.int16 | np.uint16]) -> _Array[_ShapeT, np.complex64]: ... +@overload # complex64, shape unknown +def sort_complex(a: _ArrayLike[np.int8 | np.uint8 | np.int16 | np.uint16]) -> NDArray[np.complex64]: ... +@overload # complex128, shape known +def sort_complex(a: _Array[_ShapeT, _SortsToComplex128]) -> _Array[_ShapeT, np.complex128]: ... +@overload # complex128, shape unknown +def sort_complex(a: _ArrayLike[_SortsToComplex128]) -> NDArray[np.complex128]: ... +@overload # clongdouble, shape known +def sort_complex(a: _Array[_ShapeT, np.longdouble]) -> _Array[_ShapeT, np.clongdouble]: ... +@overload # clongdouble, shape unknown +def sort_complex(a: _ArrayLike[np.longdouble]) -> NDArray[np.clongdouble]: ... + +# +def trim_zeros(filt: _TrimZerosSequence[_T], trim: L["f", "b", "fb", "bf"] = "fb", axis: _ShapeLike | None = None) -> _T: ... + +# NOTE: keep in sync with `corrcoef` +@overload # ?d, known inexact scalar-type >=64 precision, y=. +def cov( + m: _ArrayLike[_AnyDoubleT], + y: _ArrayLike[_AnyDoubleT], + rowvar: bool = True, + bias: bool = False, + ddof: SupportsIndex | SupportsInt | None = None, + fweights: _ArrayLikeInt_co | None = None, + aweights: _ArrayLikeFloat_co | None = None, + *, + dtype: None = None, +) -> _Array2D[_AnyDoubleT]: ... +@overload # ?d, known inexact scalar-type >=64 precision, y=None -> 0d or 2d +def cov( + m: _ArrayNoD[_AnyDoubleT], + y: None = None, + rowvar: bool = True, + bias: bool = False, + ddof: SupportsIndex | SupportsInt | None = None, + fweights: _ArrayLikeInt_co | None = None, + aweights: _ArrayLikeFloat_co | None = None, + *, + dtype: _DTypeLike[_AnyDoubleT] | None = None, +) -> NDArray[_AnyDoubleT]: ... +@overload # 1d, known inexact scalar-type >=64 precision, y=None +def cov( + m: _Array1D[_AnyDoubleT], + y: None = None, + rowvar: bool = True, + bias: bool = False, + ddof: SupportsIndex | SupportsInt | None = None, + fweights: _ArrayLikeInt_co | None = None, + aweights: _ArrayLikeFloat_co | None = None, + *, + dtype: _DTypeLike[_AnyDoubleT] | None = None, +) -> _Array0D[_AnyDoubleT]: ... +@overload # nd, known inexact scalar-type >=64 precision, y=None -> 0d or 2d +def cov( + m: _ArrayLike[_AnyDoubleT], + y: None = None, + rowvar: bool = True, + bias: bool = False, + ddof: SupportsIndex | SupportsInt | None = None, + fweights: _ArrayLikeInt_co | None = None, + aweights: _ArrayLikeFloat_co | None = None, + *, + dtype: _DTypeLike[_AnyDoubleT] | None = None, +) -> NDArray[_AnyDoubleT]: ... +@overload # nd, casts to float64, y= +def cov( + m: NDArray[np.float32 | np.float16 | _integer_co] | _Seq1D[float] | _Seq2D[float], + y: NDArray[np.float32 | np.float16 | _integer_co] | _Seq1D[float] | _Seq2D[float], + rowvar: bool = True, + bias: bool = False, + ddof: SupportsIndex | SupportsInt | None = None, + fweights: _ArrayLikeInt_co | None = None, + aweights: _ArrayLikeFloat_co | None = None, + *, + dtype: _DTypeLike[np.float64] | None = None, +) -> _Array2D[np.float64]: ... +@overload # ?d or 2d, casts to float64, y=None -> 0d or 2d +def cov( + m: _ArrayNoD[np.float32 | np.float16 | _integer_co] | _Seq2D[float], + y: None = None, + rowvar: bool = True, + bias: bool = False, + ddof: SupportsIndex | SupportsInt | None = None, + fweights: _ArrayLikeInt_co | None = None, + aweights: _ArrayLikeFloat_co | None = None, + *, + dtype: _DTypeLike[np.float64] | None = None, +) -> NDArray[np.float64]: ... +@overload # 1d, casts to float64, y=None +def cov( + m: _Array1D[np.float32 | np.float16 | _integer_co] | _Seq1D[float], + y: None = None, + rowvar: bool = True, + bias: bool = False, + ddof: SupportsIndex | SupportsInt | None = None, + fweights: _ArrayLikeInt_co | None = None, + aweights: _ArrayLikeFloat_co | None = None, + *, + dtype: _DTypeLike[np.float64] | None = None, +) -> _Array0D[np.float64]: ... +@overload # nd, casts to float64, y=None -> 0d or 2d +def cov( + m: _ArrayLike[np.float32 | np.float16 | _integer_co], + y: None = None, + rowvar: bool = True, + bias: bool = False, + ddof: SupportsIndex | SupportsInt | None = None, + fweights: _ArrayLikeInt_co | None = None, + aweights: _ArrayLikeFloat_co | None = None, + *, + dtype: _DTypeLike[np.float64] | None = None, +) -> NDArray[np.float64]: ... +@overload # 1d complex, y= (`list` avoids overlap with float overloads) +def cov( + m: list[complex] | _Seq1D[list[complex]], + y: list[complex] | _Seq1D[list[complex]], + rowvar: bool = True, + bias: bool = False, + ddof: SupportsIndex | SupportsInt | None = None, + fweights: _ArrayLikeInt_co | None = None, + aweights: _ArrayLikeFloat_co | None = None, + *, + dtype: _DTypeLike[np.complex128] | None = None, +) -> _Array2D[np.complex128]: ... +@overload # 1d complex, y=None +def cov( + m: list[complex], + y: None = None, + rowvar: bool = True, + bias: bool = False, + ddof: SupportsIndex | SupportsInt | None = None, + fweights: _ArrayLikeInt_co | None = None, + aweights: _ArrayLikeFloat_co | None = None, + *, + dtype: _DTypeLike[np.complex128] | None = None, +) -> _Array0D[np.complex128]: ... +@overload # 2d complex, y=None -> 0d or 2d +def cov( + m: _Seq1D[list[complex]], + y: None = None, + rowvar: bool = True, + bias: bool = False, + ddof: SupportsIndex | SupportsInt | None = None, + fweights: _ArrayLikeInt_co | None = None, + aweights: _ArrayLikeFloat_co | None = None, + *, + dtype: _DTypeLike[np.complex128] | None = None, +) -> NDArray[np.complex128]: ... +@overload # 1d complex-like, y=None, dtype= +def cov( + m: _Seq1D[_ComplexLike_co], + y: None = None, + rowvar: bool = True, + bias: bool = False, + ddof: SupportsIndex | SupportsInt | None = None, + fweights: _ArrayLikeInt_co | None = None, + aweights: _ArrayLikeFloat_co | None = None, + *, + dtype: _DTypeLike[_ScalarT], +) -> _Array0D[_ScalarT]: ... +@overload # nd complex-like, y=, dtype= +def cov( + m: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co, + rowvar: bool = True, + bias: bool = False, + ddof: SupportsIndex | SupportsInt | None = None, + fweights: _ArrayLikeInt_co | None = None, + aweights: _ArrayLikeFloat_co | None = None, + *, + dtype: _DTypeLike[_ScalarT], +) -> _Array2D[_ScalarT]: ... +@overload # nd complex-like, y=None, dtype= -> 0d or 2d +def cov( + m: _ArrayLikeComplex_co, + y: None = None, + rowvar: bool = True, + bias: bool = False, + ddof: SupportsIndex | SupportsInt | None = None, + fweights: _ArrayLikeInt_co | None = None, + aweights: _ArrayLikeFloat_co | None = None, + *, + dtype: _DTypeLike[_ScalarT], +) -> NDArray[_ScalarT]: ... +@overload # nd complex-like, y=, dtype=? +def cov( + m: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co, + rowvar: bool = True, + bias: bool = False, + ddof: SupportsIndex | SupportsInt | None = None, + fweights: _ArrayLikeInt_co | None = None, + aweights: _ArrayLikeFloat_co | None = None, + *, + dtype: DTypeLike | None = None, +) -> _Array2D[Incomplete]: ... +@overload # 1d complex-like, y=None, dtype=? +def cov( + m: _Seq1D[_ComplexLike_co], + y: None = None, + rowvar: bool = True, + bias: bool = False, + ddof: SupportsIndex | SupportsInt | None = None, + fweights: _ArrayLikeInt_co | None = None, + aweights: _ArrayLikeFloat_co | None = None, + *, + dtype: DTypeLike | None = None, +) -> _Array0D[Incomplete]: ... +@overload # nd complex-like, dtype=? +def cov( + m: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co | None = None, + rowvar: bool = True, + bias: bool = False, + ddof: SupportsIndex | SupportsInt | None = None, + fweights: _ArrayLikeInt_co | None = None, + aweights: _ArrayLikeFloat_co | None = None, + *, + dtype: DTypeLike | None = None, +) -> NDArray[Incomplete]: ... + +# NOTE: If only `x` is given and the resulting array has shape (1,1), a bare scalar +# is returned instead of a 2D array. When y is given, a 2D array is always returned. +# This differs from `cov`, which returns 0-D arrays instead of scalars in such cases. +# NOTE: keep in sync with `cov` +@overload # ?d, known inexact scalar-type >=64 precision, y=. +def corrcoef( + x: _ArrayLike[_AnyDoubleT], + y: _ArrayLike[_AnyDoubleT], + rowvar: bool = True, + *, + dtype: _DTypeLike[_AnyDoubleT] | None = None, +) -> _Array2D[_AnyDoubleT]: ... +@overload # ?d, known inexact scalar-type >=64 precision, y=None +def corrcoef( + x: _ArrayNoD[_AnyDoubleT], + y: None = None, + rowvar: bool = True, + *, + dtype: _DTypeLike[_AnyDoubleT] | None = None, +) -> _Array2D[_AnyDoubleT] | _AnyDoubleT: ... +@overload # 1d, known inexact scalar-type >=64 precision, y=None +def corrcoef( + x: _Array1D[_AnyDoubleT], + y: None = None, + rowvar: bool = True, + *, + dtype: _DTypeLike[_AnyDoubleT] | None = None, +) -> _AnyDoubleT: ... +@overload # nd, known inexact scalar-type >=64 precision, y=None +def corrcoef( + x: _ArrayLike[_AnyDoubleT], + y: None = None, + rowvar: bool = True, + *, + dtype: _DTypeLike[_AnyDoubleT] | None = None, +) -> _Array2D[_AnyDoubleT] | _AnyDoubleT: ... +@overload # nd, casts to float64, y= +def corrcoef( + x: NDArray[np.float32 | np.float16 | _integer_co] | _Seq1D[float] | _Seq2D[float], + y: NDArray[np.float32 | np.float16 | _integer_co] | _Seq1D[float] | _Seq2D[float], + rowvar: bool = True, + *, + dtype: _DTypeLike[np.float64] | None = None, +) -> _Array2D[np.float64]: ... +@overload # ?d or 2d, casts to float64, y=None +def corrcoef( + x: _ArrayNoD[np.float32 | np.float16 | _integer_co] | _Seq2D[float], + y: None = None, + rowvar: bool = True, + *, + dtype: _DTypeLike[np.float64] | None = None, +) -> _Array2D[np.float64] | np.float64: ... +@overload # 1d, casts to float64, y=None +def corrcoef( + x: _Array1D[np.float32 | np.float16 | _integer_co] | _Seq1D[float], + y: None = None, + rowvar: bool = True, + *, + dtype: _DTypeLike[np.float64] | None = None, +) -> np.float64: ... +@overload # nd, casts to float64, y=None +def corrcoef( + x: _ArrayLike[np.float32 | np.float16 | _integer_co], + y: None = None, + rowvar: bool = True, + *, + dtype: _DTypeLike[np.float64] | None = None, +) -> _Array2D[np.float64] | np.float64: ... +@overload # 1d complex, y= (`list` avoids overlap with float overloads) +def corrcoef( + x: list[complex] | _Seq1D[list[complex]], + y: list[complex] | _Seq1D[list[complex]], + rowvar: bool = True, + *, + dtype: _DTypeLike[np.complex128] | None = None, +) -> _Array2D[np.complex128]: ... +@overload # 1d complex, y=None +def corrcoef( + x: list[complex], + y: None = None, + rowvar: bool = True, + *, + dtype: _DTypeLike[np.complex128] | None = None, +) -> np.complex128: ... +@overload # 2d complex, y=None +def corrcoef( + x: _Seq1D[list[complex]], + y: None = None, + rowvar: bool = True, + *, + dtype: _DTypeLike[np.complex128] | None = None, +) -> _Array2D[np.complex128] | np.complex128: ... +@overload # 1d complex-like, y=None, dtype= +def corrcoef( + x: _Seq1D[_ComplexLike_co], + y: None = None, + rowvar: bool = True, + *, + dtype: _DTypeLike[_ScalarT], +) -> _ScalarT: ... +@overload # nd complex-like, y=, dtype= +def corrcoef( + x: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co, + rowvar: bool = True, + *, + dtype: _DTypeLike[_ScalarT], +) -> _Array2D[_ScalarT]: ... +@overload # nd complex-like, y=None, dtype= +def corrcoef( + x: _ArrayLikeComplex_co, + y: None = None, + rowvar: bool = True, + *, + dtype: _DTypeLike[_ScalarT], +) -> _Array2D[_ScalarT] | _ScalarT: ... +@overload # nd complex-like, y=, dtype=? +def corrcoef( + x: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co, + rowvar: bool = True, + *, + dtype: DTypeLike | None = None, +) -> _Array2D[Incomplete]: ... +@overload # 1d complex-like, y=None, dtype=? +def corrcoef( + x: _Seq1D[_ComplexLike_co], + y: None = None, + rowvar: bool = True, + *, + dtype: DTypeLike | None = None, +) -> Incomplete: ... +@overload # nd complex-like, dtype=? +def corrcoef( + x: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co | None = None, + rowvar: bool = True, + *, + dtype: DTypeLike | None = None, +) -> _Array2D[Incomplete] | Incomplete: ... + +# note that floating `M` are accepted, but their fractional part is ignored +def blackman(M: _FloatLike_co) -> _Array1D[np.float64]: ... +def bartlett(M: _FloatLike_co) -> _Array1D[np.float64]: ... +def hanning(M: _FloatLike_co) -> _Array1D[np.float64]: ... +def hamming(M: _FloatLike_co) -> _Array1D[np.float64]: ... +def kaiser(M: _FloatLike_co, beta: _FloatLike_co) -> _Array1D[np.float64]: ... + +# +@overload +def i0(x: _Array[_ShapeT, np.floating | np.integer]) -> _Array[_ShapeT, np.float64]: ... +@overload +def i0(x: _FloatLike_co) -> _Array0D[np.float64]: ... +@overload +def i0(x: _Seq1D[_FloatLike_co]) -> _Array1D[np.float64]: ... +@overload +def i0(x: _Seq2D[_FloatLike_co]) -> _Array2D[np.float64]: ... +@overload +def i0(x: _Seq3D[_FloatLike_co]) -> _Array3D[np.float64]: ... +@overload +def i0(x: _ArrayLikeFloat_co) -> NDArray[np.float64]: ... + +# +@overload +def sinc(x: _InexactT) -> _InexactT: ... +@overload +def sinc(x: float | _float64_co) -> np.float64: ... +@overload +def sinc(x: complex) -> np.complex128 | Any: ... +@overload +def sinc(x: _ArrayInexactT) -> _ArrayInexactT: ... +@overload +def sinc(x: _Array[_ShapeT, _integer_co]) -> _Array[_ShapeT, np.float64]: ... +@overload +def sinc(x: _Seq1D[float]) -> _Array1D[np.float64]: ... +@overload +def sinc(x: _Seq2D[float]) -> _Array2D[np.float64]: ... +@overload +def sinc(x: _Seq3D[float]) -> _Array3D[np.float64]: ... +@overload +def sinc(x: _SeqND[float]) -> NDArray[np.float64]: ... +@overload +def sinc(x: list[complex]) -> _Array1D[np.complex128]: ... +@overload +def sinc(x: _Seq1D[list[complex]]) -> _Array2D[np.complex128]: ... +@overload +def sinc(x: _Seq2D[list[complex]]) -> _Array3D[np.complex128]: ... +@overload +def sinc(x: _ArrayLikeComplex_co) -> np.ndarray | Any: ... + +# NOTE: We assume that `axis` is only provided for >=1-D arrays because for <1-D arrays +# it has no effect, and would complicate the overloads significantly. +@overload # known scalar-type, keepdims=False (default) +def median( + a: _ArrayLike[_InexactTimeT], + axis: None = None, + out: None = None, + overwrite_input: bool = False, + keepdims: L[False] = False, +) -> _InexactTimeT: ... +@overload # float array-like, keepdims=False (default) +def median( + a: _ArrayLikeInt_co | _SeqND[float] | float, + axis: None = None, + out: None = None, + overwrite_input: bool = False, + keepdims: L[False] = False, +) -> np.float64: ... +@overload # complex array-like, keepdims=False (default) +def median( + a: _ListSeqND[complex], + axis: None = None, + out: None = None, + overwrite_input: bool = False, + keepdims: L[False] = False, +) -> np.complex128: ... +@overload # complex scalar, keepdims=False (default) +def median( + a: complex, + axis: None = None, + out: None = None, + overwrite_input: bool = False, + keepdims: L[False] = False, +) -> np.complex128 | Any: ... +@overload # known array-type, keepdims=True +def median( + a: _ArrayNumericT, + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + *, + keepdims: L[True], +) -> _ArrayNumericT: ... +@overload # known scalar-type, keepdims=True +def median( + a: _ArrayLike[_ScalarNumericT], + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + *, + keepdims: L[True], +) -> NDArray[_ScalarNumericT]: ... +@overload # known scalar-type, axis= +def median( + a: _ArrayLike[_ScalarNumericT], + axis: _ShapeLike, + out: None = None, + overwrite_input: bool = False, + keepdims: bool = False, +) -> NDArray[_ScalarNumericT]: ... +@overload # float array-like, keepdims=True +def median( + a: _SeqND[float], + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + *, + keepdims: L[True], +) -> NDArray[np.float64]: ... +@overload # float array-like, axis= +def median( + a: _SeqND[float], + axis: _ShapeLike, + out: None = None, + overwrite_input: bool = False, + keepdims: bool = False, +) -> NDArray[np.float64]: ... +@overload # complex array-like, keepdims=True +def median( + a: _ListSeqND[complex], + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + *, + keepdims: L[True], +) -> NDArray[np.complex128]: ... +@overload # complex array-like, axis= +def median( + a: _ListSeqND[complex], + axis: _ShapeLike, + out: None = None, + overwrite_input: bool = False, + keepdims: bool = False, +) -> NDArray[np.complex128]: ... +@overload # out= (keyword) +def median( + a: _ArrayLikeComplex_co | _ArrayLike[np.timedelta64 | np.object_], + axis: _ShapeLike | None = None, + *, + out: _ArrayT, + overwrite_input: bool = False, + keepdims: bool = False, +) -> _ArrayT: ... +@overload # out= (positional) +def median( + a: _ArrayLikeComplex_co | _ArrayLike[np.timedelta64 | np.object_], + axis: _ShapeLike | None, + out: _ArrayT, + overwrite_input: bool = False, + keepdims: bool = False, +) -> _ArrayT: ... +@overload # fallback +def median( + a: _ArrayLikeComplex_co | _ArrayLike[np.timedelta64 | np.object_], + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + keepdims: bool = False, +) -> Incomplete: ... + +# NOTE: keep in sync with `quantile` +@overload # inexact, scalar, axis=None +def percentile( + a: _ArrayLike[_InexactDateTimeT], + q: _FloatLike_co, + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: L[False] = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> _InexactDateTimeT: ... +@overload # inexact, scalar, axis= +def percentile( + a: _ArrayLike[_InexactDateTimeT], + q: _FloatLike_co, + axis: _ShapeLike, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: L[False] = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[_InexactDateTimeT]: ... +@overload # inexact, scalar, keepdims=True +def percentile( + a: _ArrayLike[_InexactDateTimeT], + q: _FloatLike_co, + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + *, + keepdims: L[True], + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[_InexactDateTimeT]: ... +@overload # inexact, array, axis=None +def percentile( + a: _ArrayLike[_InexactDateTimeT], + q: _Array[_ShapeT, _floating_co], + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: L[False] = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> _Array[_ShapeT, _InexactDateTimeT]: ... +@overload # inexact, array-like +def percentile( + a: _ArrayLike[_InexactDateTimeT], + q: NDArray[_floating_co] | _SeqND[_FloatLike_co], + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: bool = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[_InexactDateTimeT]: ... +@overload # float, scalar, axis=None +def percentile( + a: _SeqND[float] | _ArrayLikeInt_co, + q: _FloatLike_co, + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: L[False] = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> np.float64: ... +@overload # float, scalar, axis= +def percentile( + a: _SeqND[float] | _ArrayLikeInt_co, + q: _FloatLike_co, + axis: _ShapeLike, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: L[False] = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[np.float64]: ... +@overload # float, scalar, keepdims=True +def percentile( + a: _SeqND[float] | _ArrayLikeInt_co, + q: _FloatLike_co, + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + *, + keepdims: L[True], + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[np.float64]: ... +@overload # float, array, axis=None +def percentile( + a: _SeqND[float] | _ArrayLikeInt_co, + q: _Array[_ShapeT, _floating_co], + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: L[False] = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> _Array[_ShapeT, np.float64]: ... +@overload # float, array-like +def percentile( + a: _SeqND[float] | _ArrayLikeInt_co, + q: NDArray[_floating_co] | _SeqND[_FloatLike_co], + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: bool = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[np.float64]: ... +@overload # complex, scalar, axis=None +def percentile( + a: _ListSeqND[complex], + q: _FloatLike_co, + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: L[False] = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> np.complex128: ... +@overload # complex, scalar, axis= +def percentile( + a: _ListSeqND[complex], + q: _FloatLike_co, + axis: _ShapeLike, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: L[False] = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[np.complex128]: ... +@overload # complex, scalar, keepdims=True +def percentile( + a: _ListSeqND[complex], + q: _FloatLike_co, + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + *, + keepdims: L[True], + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[np.complex128]: ... +@overload # complex, array, axis=None +def percentile( + a: _ListSeqND[complex], + q: _Array[_ShapeT, _floating_co], + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: L[False] = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> _Array[_ShapeT, np.complex128]: ... +@overload # complex, array-like +def percentile( + a: _ListSeqND[complex], + q: NDArray[_floating_co] | _SeqND[_FloatLike_co], + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: bool = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[np.complex128]: ... +@overload # object_, scalar, axis=None +def percentile( + a: _ArrayLikeObject_co, + q: _FloatLike_co, + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: L[False] = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> Any: ... +@overload # object_, scalar, axis= +def percentile( + a: _ArrayLikeObject_co, + q: _FloatLike_co, + axis: _ShapeLike, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: L[False] = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[np.object_]: ... +@overload # object_, scalar, keepdims=True +def percentile( + a: _ArrayLikeObject_co, + q: _FloatLike_co, + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + *, + keepdims: L[True], + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[np.object_]: ... +@overload # object_, array, axis=None +def percentile( + a: _ArrayLikeObject_co, + q: _Array[_ShapeT, _floating_co], + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: L[False] = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> _Array[_ShapeT, np.object_]: ... +@overload # object_, array-like +def percentile( + a: _ArrayLikeObject_co, + q: NDArray[_floating_co] | _SeqND[_FloatLike_co], + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: bool = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[np.object_]: ... +@overload # out= (keyword) +def percentile( + a: ArrayLike, + q: _ArrayLikeFloat_co, + axis: _ShapeLike | None, + out: _ArrayT, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: bool = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> _ArrayT: ... +@overload # out= (positional) +def percentile( + a: ArrayLike, + q: _ArrayLikeFloat_co, + axis: _ShapeLike | None = None, + *, + out: _ArrayT, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: bool = False, + weights: _ArrayLikeFloat_co | None = None, +) -> _ArrayT: ... +@overload # fallback +def percentile( + a: _ArrayLikeNumber_co | _ArrayLikeObject_co, + q: _ArrayLikeFloat_co, + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: bool = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> Incomplete: ... + +# NOTE: keep in sync with `percentile` +@overload # inexact, scalar, axis=None +def quantile( + a: _ArrayLike[_InexactDateTimeT], + q: _FloatLike_co, + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: L[False] = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> _InexactDateTimeT: ... +@overload # inexact, scalar, axis= +def quantile( + a: _ArrayLike[_InexactDateTimeT], + q: _FloatLike_co, + axis: _ShapeLike, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: L[False] = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[_InexactDateTimeT]: ... +@overload # inexact, scalar, keepdims=True +def quantile( + a: _ArrayLike[_InexactDateTimeT], + q: _FloatLike_co, + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + *, + keepdims: L[True], + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[_InexactDateTimeT]: ... +@overload # inexact, array, axis=None +def quantile( + a: _ArrayLike[_InexactDateTimeT], + q: _Array[_ShapeT, _floating_co], + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: L[False] = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> _Array[_ShapeT, _InexactDateTimeT]: ... +@overload # inexact, array-like +def quantile( + a: _ArrayLike[_InexactDateTimeT], + q: NDArray[_floating_co] | _SeqND[_FloatLike_co], + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: bool = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[_InexactDateTimeT]: ... +@overload # float, scalar, axis=None +def quantile( + a: _SeqND[float] | _ArrayLikeInt_co, + q: _FloatLike_co, + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: L[False] = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> np.float64: ... +@overload # float, scalar, axis= +def quantile( + a: _SeqND[float] | _ArrayLikeInt_co, + q: _FloatLike_co, + axis: _ShapeLike, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: L[False] = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[np.float64]: ... +@overload # float, scalar, keepdims=True +def quantile( + a: _SeqND[float] | _ArrayLikeInt_co, + q: _FloatLike_co, + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + *, + keepdims: L[True], + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[np.float64]: ... +@overload # float, array, axis=None +def quantile( + a: _SeqND[float] | _ArrayLikeInt_co, + q: _Array[_ShapeT, _floating_co], + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: L[False] = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> _Array[_ShapeT, np.float64]: ... +@overload # float, array-like +def quantile( + a: _SeqND[float] | _ArrayLikeInt_co, + q: NDArray[_floating_co] | _SeqND[_FloatLike_co], + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: bool = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[np.float64]: ... +@overload # complex, scalar, axis=None +def quantile( + a: _ListSeqND[complex], + q: _FloatLike_co, + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: L[False] = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> np.complex128: ... +@overload # complex, scalar, axis= +def quantile( + a: _ListSeqND[complex], + q: _FloatLike_co, + axis: _ShapeLike, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: L[False] = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[np.complex128]: ... +@overload # complex, scalar, keepdims=True +def quantile( + a: _ListSeqND[complex], + q: _FloatLike_co, + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + *, + keepdims: L[True], + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[np.complex128]: ... +@overload # complex, array, axis=None +def quantile( + a: _ListSeqND[complex], + q: _Array[_ShapeT, _floating_co], + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: L[False] = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> _Array[_ShapeT, np.complex128]: ... +@overload # complex, array-like +def quantile( + a: _ListSeqND[complex], + q: NDArray[_floating_co] | _SeqND[_FloatLike_co], + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: bool = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[np.complex128]: ... +@overload # object_, scalar, axis=None +def quantile( + a: _ArrayLikeObject_co, + q: _FloatLike_co, + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: L[False] = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> Any: ... +@overload # object_, scalar, axis= +def quantile( + a: _ArrayLikeObject_co, + q: _FloatLike_co, + axis: _ShapeLike, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: L[False] = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[np.object_]: ... +@overload # object_, scalar, keepdims=True +def quantile( + a: _ArrayLikeObject_co, + q: _FloatLike_co, + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + *, + keepdims: L[True], + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[np.object_]: ... +@overload # object_, array, axis=None +def quantile( + a: _ArrayLikeObject_co, + q: _Array[_ShapeT, _floating_co], + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: L[False] = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> _Array[_ShapeT, np.object_]: ... +@overload # object_, array-like +def quantile( + a: _ArrayLikeObject_co, + q: NDArray[_floating_co] | _SeqND[_FloatLike_co], + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: bool = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> NDArray[np.object_]: ... +@overload # out= (keyword) +def quantile( + a: ArrayLike, + q: _ArrayLikeFloat_co, + axis: _ShapeLike | None, + out: _ArrayT, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: bool = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> _ArrayT: ... +@overload # out= (positional) +def quantile( + a: ArrayLike, + q: _ArrayLikeFloat_co, + axis: _ShapeLike | None = None, + *, + out: _ArrayT, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: bool = False, + weights: _ArrayLikeFloat_co | None = None, +) -> _ArrayT: ... +@overload # fallback +def quantile( + a: _ArrayLikeNumber_co | _ArrayLikeObject_co, + q: _ArrayLikeFloat_co, + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + method: _InterpolationMethod = "linear", + keepdims: bool = False, + *, + weights: _ArrayLikeFloat_co | None = None, +) -> Incomplete: ... + +# +@overload # ?d, known inexact/timedelta64 scalar-type +def trapezoid( + y: _ArrayNoD[_InexactTimeT], + x: _ArrayLike[_InexactTimeT] | _ArrayLikeFloat_co | None = None, + dx: float = 1.0, + axis: SupportsIndex = -1, +) -> NDArray[_InexactTimeT] | _InexactTimeT: ... +@overload # ?d, casts to float64 +def trapezoid( + y: _ArrayNoD[_integer_co], + x: _ArrayLikeFloat_co | None = None, + dx: float = 1.0, + axis: SupportsIndex = -1, +) -> NDArray[np.float64] | np.float64: ... +@overload # strict 1d, known inexact/timedelta64 scalar-type +def trapezoid( + y: _Array1D[_InexactTimeT], + x: _Array1D[_InexactTimeT] | _Seq1D[float] | None = None, + dx: float = 1.0, + axis: SupportsIndex = -1, +) -> _InexactTimeT: ... +@overload # strict 1d, casts to float64 +def trapezoid( + y: _Array1D[_float64_co] | _Seq1D[float], + x: _Array1D[_float64_co] | _Seq1D[float] | None = None, + dx: float = 1.0, + axis: SupportsIndex = -1, +) -> np.float64: ... +@overload # strict 1d, casts to complex128 (`list` prevents overlapping overloads) +def trapezoid( + y: list[complex], + x: _Seq1D[complex] | None = None, + dx: complex = 1.0, + axis: SupportsIndex = -1, +) -> np.complex128: ... +@overload # strict 1d, casts to complex128 +def trapezoid( + y: _Seq1D[complex], + x: list[complex], + dx: complex = 1.0, + axis: SupportsIndex = -1, +) -> np.complex128: ... +@overload # strict 2d, known inexact/timedelta64 scalar-type +def trapezoid( + y: _Array2D[_InexactTimeT], + x: _ArrayMax2D[_InexactTimeT] | _Seq2D[float] | _Seq1D[float] | None = None, + dx: float = 1.0, + axis: SupportsIndex = -1, +) -> _InexactTimeT: ... +@overload # strict 2d, casts to float64 +def trapezoid( + y: _Array2D[_float64_co] | _Seq2D[float], + x: _ArrayMax2D[_float64_co] | _Seq2D[float] | _Seq1D[float] | None = None, + dx: float = 1.0, + axis: SupportsIndex = -1, +) -> np.float64: ... +@overload # strict 2d, casts to complex128 (`list` prevents overlapping overloads) +def trapezoid( + y: _Seq1D[list[complex]], + x: _Seq2D[complex] | _Seq1D[complex] | None = None, + dx: complex = 1.0, + axis: SupportsIndex = -1, +) -> np.complex128: ... +@overload # strict 2d, casts to complex128 +def trapezoid( + y: _Seq2D[complex] | _Seq1D[complex], + x: _Seq1D[list[complex]], + dx: complex = 1.0, + axis: SupportsIndex = -1, +) -> np.complex128: ... +@overload +def trapezoid( + y: _ArrayLike[_InexactTimeT], + x: _ArrayLike[_InexactTimeT] | _ArrayLikeInt_co | None = None, + dx: complex = 1.0, + axis: SupportsIndex = -1, +) -> NDArray[_InexactTimeT] | _InexactTimeT: ... +@overload +def trapezoid( + y: _ArrayLike[_float64_co], + x: _ArrayLikeFloat_co | None = None, + dx: float = 1.0, + axis: SupportsIndex = -1, +) -> NDArray[np.float64] | np.float64: ... +@overload +def trapezoid( + y: _ArrayLike[np.complex128], + x: _ArrayLikeComplex_co | None = None, + dx: float = 1.0, + axis: SupportsIndex = -1, +) -> NDArray[np.complex128] | np.complex128: ... +@overload +def trapezoid( + y: _ArrayLikeComplex_co, + x: _ArrayLike[np.complex128], + dx: float = 1.0, + axis: SupportsIndex = -1, +) -> NDArray[np.complex128] | np.complex128: ... +@overload +def trapezoid( + y: _ArrayLikeObject_co, + x: _ArrayLikeObject_co | _ArrayLikeFloat_co | None = None, + dx: float = 1.0, + axis: SupportsIndex = -1, +) -> NDArray[np.object_] | Any: ... +@overload +def trapezoid( + y: _Seq1D[_SupportsRMulFloat[_T]], + x: _Seq1D[_SupportsRMulFloat[_T] | _T] | None = None, + dx: complex = 1.0, + axis: SupportsIndex = -1, +) -> _T: ... +@overload +def trapezoid( + y: _ArrayLikeComplex_co | _ArrayLike[np.timedelta64 | np.object_], + x: _ArrayLikeComplex_co | _ArrayLike[np.timedelta64 | np.object_] | None = None, + dx: complex = 1.0, + axis: SupportsIndex = -1, +) -> Incomplete: ... + +# +@overload # 0d +def meshgrid(*, copy: bool = True, sparse: bool = False, indexing: _Indexing = "xy") -> tuple[()]: ... +@overload # 1d, known scalar-type +def meshgrid( + x1: _ArrayLike[_ScalarT], + /, + *, + copy: bool = True, + sparse: bool = False, + indexing: _Indexing = "xy", +) -> _Mesh1[_ScalarT]: ... +@overload # 1d, unknown scalar-type +def meshgrid( + x1: ArrayLike, + /, + *, + copy: bool = True, + sparse: bool = False, + indexing: _Indexing = "xy", +) -> _Mesh1[Any]: ... +@overload # 2d, known scalar-types +def meshgrid( + x1: _ArrayLike[_ScalarT], + x2: _ArrayLike[_ScalarT1], + /, + *, + copy: bool = True, + sparse: bool = False, + indexing: _Indexing = "xy", +) -> _Mesh2[_ScalarT, _ScalarT1]: ... +@overload # 2d, known/unknown scalar-types +def meshgrid( + x1: _ArrayLike[_ScalarT], + x2: ArrayLike, + /, + *, + copy: bool = True, + sparse: bool = False, + indexing: _Indexing = "xy", +) -> _Mesh2[_ScalarT, Any]: ... +@overload # 2d, unknown/known scalar-types +def meshgrid( + x1: ArrayLike, + x2: _ArrayLike[_ScalarT], + /, + *, + copy: bool = True, + sparse: bool = False, + indexing: _Indexing = "xy", +) -> _Mesh2[Any, _ScalarT]: ... +@overload # 2d, unknown scalar-types +def meshgrid( + x1: ArrayLike, + x2: ArrayLike, + /, + *, + copy: bool = True, + sparse: bool = False, + indexing: _Indexing = "xy", +) -> _Mesh2[Any, Any]: ... +@overload # 3d, known scalar-types +def meshgrid( + x1: _ArrayLike[_ScalarT], + x2: _ArrayLike[_ScalarT1], + x3: _ArrayLike[_ScalarT2], + /, + *, + copy: bool = True, + sparse: bool = False, + indexing: _Indexing = "xy", +) -> _Mesh3[_ScalarT, _ScalarT1, _ScalarT2]: ... +@overload # 3d, unknown scalar-types +def meshgrid( + x1: ArrayLike, + x2: ArrayLike, + x3: ArrayLike, + /, + *, + copy: bool = True, + sparse: bool = False, + indexing: _Indexing = "xy", +) -> _Mesh3[Any, Any, Any]: ... +@overload # ?d, known scalar-types +def meshgrid( + *xi: _ArrayLike[_ScalarT], + copy: bool = True, + sparse: bool = False, + indexing: _Indexing = "xy", +) -> tuple[NDArray[_ScalarT], ...]: ... +@overload # ?d, unknown scalar-types +def meshgrid( + *xi: ArrayLike, + copy: bool = True, + sparse: bool = False, + indexing: _Indexing = "xy", +) -> tuple[NDArray[Any], ...]: ... + +# +def place(arr: np.ndarray, mask: ConvertibleToInt | Sequence[ConvertibleToInt], vals: ArrayLike) -> None: ... + +# keep in sync with `insert` +@overload # known scalar-type, axis=None (default) +def delete(arr: _ArrayLike[_ScalarT], obj: _IndexLike, axis: None = None) -> _Array1D[_ScalarT]: ... +@overload # known array-type, axis specified +def delete(arr: _ArrayT, obj: _IndexLike, axis: SupportsIndex) -> _ArrayT: ... +@overload # known scalar-type, axis specified +def delete(arr: _ArrayLike[_ScalarT], obj: _IndexLike, axis: SupportsIndex) -> NDArray[_ScalarT]: ... +@overload # known scalar-type, axis=None (default) +def delete(arr: ArrayLike, obj: _IndexLike, axis: None = None) -> _Array1D[Any]: ... +@overload # unknown scalar-type, axis specified +def delete(arr: ArrayLike, obj: _IndexLike, axis: SupportsIndex) -> NDArray[Any]: ... + +# keep in sync with `delete` +@overload # known scalar-type, axis=None (default) +def insert(arr: _ArrayLike[_ScalarT], obj: _IndexLike, values: ArrayLike, axis: None = None) -> _Array1D[_ScalarT]: ... +@overload # known array-type, axis specified +def insert(arr: _ArrayT, obj: _IndexLike, values: ArrayLike, axis: SupportsIndex) -> _ArrayT: ... +@overload # known scalar-type, axis specified +def insert(arr: _ArrayLike[_ScalarT], obj: _IndexLike, values: ArrayLike, axis: SupportsIndex) -> NDArray[_ScalarT]: ... +@overload # known scalar-type, axis=None (default) +def insert(arr: ArrayLike, obj: _IndexLike, values: ArrayLike, axis: None = None) -> _Array1D[Any]: ... +@overload # unknown scalar-type, axis specified +def insert(arr: ArrayLike, obj: _IndexLike, values: ArrayLike, axis: SupportsIndex) -> NDArray[Any]: ... + +# +@overload # known array type, axis specified +def append(arr: _ArrayT, values: _ArrayT, axis: SupportsIndex) -> _ArrayT: ... +@overload # 1d, known scalar type, axis specified +def append(arr: _Seq1D[_ScalarT], values: _Seq1D[_ScalarT], axis: SupportsIndex) -> _Array1D[_ScalarT]: ... +@overload # 2d, known scalar type, axis specified +def append(arr: _Seq2D[_ScalarT], values: _Seq2D[_ScalarT], axis: SupportsIndex) -> _Array2D[_ScalarT]: ... +@overload # 3d, known scalar type, axis specified +def append(arr: _Seq3D[_ScalarT], values: _Seq3D[_ScalarT], axis: SupportsIndex) -> _Array3D[_ScalarT]: ... +@overload # ?d, known scalar type, axis specified +def append(arr: _SeqND[_ScalarT], values: _SeqND[_ScalarT], axis: SupportsIndex) -> NDArray[_ScalarT]: ... +@overload # ?d, unknown scalar type, axis specified +def append(arr: np.ndarray | _SeqND[_ScalarLike_co], values: _SeqND[_ScalarLike_co], axis: SupportsIndex) -> np.ndarray: ... +@overload # known scalar type, axis=None +def append(arr: _ArrayLike[_ScalarT], values: _ArrayLike[_ScalarT], axis: None = None) -> _Array1D[_ScalarT]: ... +@overload # unknown scalar type, axis=None +def append(arr: ArrayLike, values: ArrayLike, axis: None = None) -> _Array1D[Any]: ... + +# +@overload +def digitize( + x: _Array[_ShapeT, np.floating | np.integer], bins: _ArrayLikeFloat_co, right: bool = False +) -> _Array[_ShapeT, np.int_]: ... +@overload +def digitize(x: _FloatLike_co, bins: _ArrayLikeFloat_co, right: bool = False) -> np.int_: ... +@overload +def digitize(x: _Seq1D[_FloatLike_co], bins: _ArrayLikeFloat_co, right: bool = False) -> _Array1D[np.int_]: ... +@overload +def digitize(x: _Seq2D[_FloatLike_co], bins: _ArrayLikeFloat_co, right: bool = False) -> _Array2D[np.int_]: ... +@overload +def digitize(x: _Seq3D[_FloatLike_co], bins: _ArrayLikeFloat_co, right: bool = False) -> _Array3D[np.int_]: ... +@overload +def digitize(x: _ArrayLikeFloat_co, bins: _ArrayLikeFloat_co, right: bool = False) -> NDArray[np.int_] | Any: ... diff --git a/local_packages/numpy/lib/_histograms_impl.py b/local_packages/numpy/lib/_histograms_impl.py new file mode 100644 index 0000000..b4aacd0 --- /dev/null +++ b/local_packages/numpy/lib/_histograms_impl.py @@ -0,0 +1,1085 @@ +""" +Histogram-related functions +""" +import contextlib +import functools +import operator +import warnings + +import numpy as np +from numpy._core import overrides + +__all__ = ['histogram', 'histogramdd', 'histogram_bin_edges'] + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + +# range is a keyword argument to many functions, so save the builtin so they can +# use it. +_range = range + + +def _ptp(x): + """Peak-to-peak value of x. + + This implementation avoids the problem of signed integer arrays having a + peak-to-peak value that cannot be represented with the array's data type. + This function returns an unsigned value for signed integer arrays. + """ + return _unsigned_subtract(x.max(), x.min()) + + +def _hist_bin_sqrt(x, range): + """ + Square root histogram bin estimator. + + Bin width is inversely proportional to the data size. Used by many + programs for its simplicity. + + Parameters + ---------- + x : array_like + Input data that is to be histogrammed, trimmed to range. May not + be empty. + + Returns + ------- + h : An estimate of the optimal bin width for the given data. + """ + del range # unused + return _ptp(x) / np.sqrt(x.size) + + +def _hist_bin_sturges(x, range): + """ + Sturges histogram bin estimator. + + A very simplistic estimator based on the assumption of normality of + the data. This estimator has poor performance for non-normal data, + which becomes especially obvious for large data sets. The estimate + depends only on size of the data. + + Parameters + ---------- + x : array_like + Input data that is to be histogrammed, trimmed to range. May not + be empty. + + Returns + ------- + h : An estimate of the optimal bin width for the given data. + """ + del range # unused + return _ptp(x) / (np.log2(x.size) + 1.0) + + +def _hist_bin_rice(x, range): + """ + Rice histogram bin estimator. + + Another simple estimator with no normality assumption. It has better + performance for large data than Sturges, but tends to overestimate + the number of bins. The number of bins is proportional to the cube + root of data size (asymptotically optimal). The estimate depends + only on size of the data. + + Parameters + ---------- + x : array_like + Input data that is to be histogrammed, trimmed to range. May not + be empty. + + Returns + ------- + h : An estimate of the optimal bin width for the given data. + """ + del range # unused + return _ptp(x) / (2.0 * x.size ** (1.0 / 3)) + + +def _hist_bin_scott(x, range): + """ + Scott histogram bin estimator. + + The binwidth is proportional to the standard deviation of the data + and inversely proportional to the cube root of data size + (asymptotically optimal). + + Parameters + ---------- + x : array_like + Input data that is to be histogrammed, trimmed to range. May not + be empty. + + Returns + ------- + h : An estimate of the optimal bin width for the given data. + """ + del range # unused + return (24.0 * np.pi**0.5 / x.size)**(1.0 / 3.0) * np.std(x) + + +def _hist_bin_stone(x, range): + """ + Histogram bin estimator based on minimizing the estimated integrated squared error (ISE). + + The number of bins is chosen by minimizing the estimated ISE against the unknown + true distribution. The ISE is estimated using cross-validation and can be regarded + as a generalization of Scott's rule. + https://en.wikipedia.org/wiki/Histogram#Scott.27s_normal_reference_rule + + This paper by Stone appears to be the origination of this rule. + https://digitalassets.lib.berkeley.edu/sdtr/ucb/text/34.pdf + + Parameters + ---------- + x : array_like + Input data that is to be histogrammed, trimmed to range. May not + be empty. + range : (float, float) + The lower and upper range of the bins. + + Returns + ------- + h : An estimate of the optimal bin width for the given data. + """ # noqa: E501 + + n = x.size + ptp_x = _ptp(x) + if n <= 1 or ptp_x == 0: + return 0 + + def jhat(nbins): + hh = ptp_x / nbins + p_k = np.histogram(x, bins=nbins, range=range)[0] / n + return (2 - (n + 1) * p_k.dot(p_k)) / hh + + nbins_upper_bound = max(100, int(np.sqrt(n))) + nbins = min(_range(1, nbins_upper_bound + 1), key=jhat) + if nbins == nbins_upper_bound: + warnings.warn("The number of bins estimated may be suboptimal.", + RuntimeWarning, stacklevel=3) + return ptp_x / nbins + + +def _hist_bin_doane(x, range): + """ + Doane's histogram bin estimator. + + Improved version of Sturges' formula which works better for + non-normal data. See + stats.stackexchange.com/questions/55134/doanes-formula-for-histogram-binning + + Parameters + ---------- + x : array_like + Input data that is to be histogrammed, trimmed to range. May not + be empty. + + Returns + ------- + h : An estimate of the optimal bin width for the given data. + """ + del range # unused + if x.size > 2: + sg1 = np.sqrt(6.0 * (x.size - 2) / ((x.size + 1.0) * (x.size + 3))) + sigma = np.std(x) + if sigma > 0.0: + # These three operations add up to + # g1 = np.mean(((x - np.mean(x)) / sigma)**3) + # but use only one temp array instead of three + temp = x - np.mean(x) + np.true_divide(temp, sigma, temp) + np.power(temp, 3, temp) + g1 = np.mean(temp) + return _ptp(x) / (1.0 + np.log2(x.size) + + np.log2(1.0 + np.absolute(g1) / sg1)) + return 0.0 + + +def _hist_bin_fd(x, range): + """ + The Freedman-Diaconis histogram bin estimator. + + The Freedman-Diaconis rule uses interquartile range (IQR) to + estimate binwidth. It is considered a variation of the Scott rule + with more robustness as the IQR is less affected by outliers than + the standard deviation. However, the IQR depends on fewer points + than the standard deviation, so it is less accurate, especially for + long tailed distributions. + + If the IQR is 0, this function returns 0 for the bin width. + Binwidth is inversely proportional to the cube root of data size + (asymptotically optimal). + + Parameters + ---------- + x : array_like + Input data that is to be histogrammed, trimmed to range. May not + be empty. + + Returns + ------- + h : An estimate of the optimal bin width for the given data. + """ + del range # unused + iqr = np.subtract(*np.percentile(x, [75, 25])) + return 2.0 * iqr * x.size ** (-1.0 / 3.0) + + +def _hist_bin_auto(x, range): + """ + Histogram bin estimator that uses the minimum width of a relaxed + Freedman-Diaconis and Sturges estimators if the FD bin width does + not result in a large number of bins. The relaxed Freedman-Diaconis estimator + limits the bin width to half the sqrt estimated to avoid small bins. + + The FD estimator is usually the most robust method, but its width + estimate tends to be too large for small `x` and bad for data with limited + variance. The Sturges estimator is quite good for small (<1000) datasets + and is the default in the R language. This method gives good off-the-shelf + behaviour. + + + Parameters + ---------- + x : array_like + Input data that is to be histogrammed, trimmed to range. May not + be empty. + range : Tuple with range for the histogram + + Returns + ------- + h : An estimate of the optimal bin width for the given data. + + See Also + -------- + _hist_bin_fd, _hist_bin_sturges + """ + fd_bw = _hist_bin_fd(x, range) + sturges_bw = _hist_bin_sturges(x, range) + sqrt_bw = _hist_bin_sqrt(x, range) + # heuristic to limit the maximal number of bins + fd_bw_corrected = max(fd_bw, sqrt_bw / 2) + return min(fd_bw_corrected, sturges_bw) + + +# Private dict initialized at module load time +_hist_bin_selectors = {'stone': _hist_bin_stone, + 'auto': _hist_bin_auto, + 'doane': _hist_bin_doane, + 'fd': _hist_bin_fd, + 'rice': _hist_bin_rice, + 'scott': _hist_bin_scott, + 'sqrt': _hist_bin_sqrt, + 'sturges': _hist_bin_sturges} + + +def _ravel_and_check_weights(a, weights): + """ Check a and weights have matching shapes, and ravel both """ + a = np.asarray(a) + + # Ensure that the array is a "subtractable" dtype + if a.dtype == np.bool: + msg = f"Converting input from {a.dtype} to {np.uint8} for compatibility." + warnings.warn(msg, RuntimeWarning, stacklevel=3) + a = a.astype(np.uint8) + + if weights is not None: + weights = np.asarray(weights) + if weights.shape != a.shape: + raise ValueError( + 'weights should have the same shape as a.') + weights = weights.ravel() + a = a.ravel() + return a, weights + + +def _get_outer_edges(a, range): + """ + Determine the outer bin edges to use, from either the data or the range + argument + """ + if range is not None: + first_edge, last_edge = range + if first_edge > last_edge: + raise ValueError( + 'max must be larger than min in range parameter.') + if not (np.isfinite(first_edge) and np.isfinite(last_edge)): + raise ValueError( + f"supplied range of [{first_edge}, {last_edge}] is not finite") + elif a.size == 0: + # handle empty arrays. Can't determine range, so use 0-1. + first_edge, last_edge = 0, 1 + else: + first_edge, last_edge = a.min(), a.max() + if not (np.isfinite(first_edge) and np.isfinite(last_edge)): + raise ValueError( + f"autodetected range of [{first_edge}, {last_edge}] is not finite") + + # expand empty range to avoid divide by zero + if first_edge == last_edge: + first_edge = first_edge - 0.5 + last_edge = last_edge + 0.5 + + return first_edge, last_edge + + +def _unsigned_subtract(a, b): + """ + Subtract two values where a >= b, and produce an unsigned result + + This is needed when finding the difference between the upper and lower + bound of an int16 histogram + """ + # coerce to a single type + signed_to_unsigned = { + np.byte: np.ubyte, + np.short: np.ushort, + np.intc: np.uintc, + np.int_: np.uint, + np.longlong: np.ulonglong + } + dt = np.result_type(a, b) + try: + unsigned_dt = signed_to_unsigned[dt.type] + except KeyError: + return np.subtract(a, b, dtype=dt) + else: + # we know the inputs are integers, and we are deliberately casting + # signed to unsigned. The input may be negative python integers so + # ensure we pass in arrays with the initial dtype (related to NEP 50). + return np.subtract(np.asarray(a, dtype=dt), np.asarray(b, dtype=dt), + casting='unsafe', dtype=unsigned_dt) + + +def _get_bin_edges(a, bins, range, weights): + """ + Computes the bins used internally by `histogram`. + + Parameters + ========== + a : ndarray + Ravelled data array + bins, range + Forwarded arguments from `histogram`. + weights : ndarray, optional + Ravelled weights array, or None + + Returns + ======= + bin_edges : ndarray + Array of bin edges + uniform_bins : (Number, Number, int): + The upper bound, lowerbound, and number of bins, used in the optimized + implementation of `histogram` that works on uniform bins. + """ + # parse the overloaded bins argument + n_equal_bins = None + bin_edges = None + + if isinstance(bins, str): + bin_name = bins + # if `bins` is a string for an automatic method, + # this will replace it with the number of bins calculated + if bin_name not in _hist_bin_selectors: + raise ValueError( + f"{bin_name!r} is not a valid estimator for `bins`") + if weights is not None: + raise TypeError("Automated estimation of the number of " + "bins is not supported for weighted data") + + first_edge, last_edge = _get_outer_edges(a, range) + + # truncate the range if needed + if range is not None: + keep = (a >= first_edge) + keep &= (a <= last_edge) + if not np.logical_and.reduce(keep): + a = a[keep] + + if a.size == 0: + n_equal_bins = 1 + else: + # Do not call selectors on empty arrays + width = _hist_bin_selectors[bin_name](a, (first_edge, last_edge)) + if width: + if np.issubdtype(a.dtype, np.integer) and width < 1: + width = 1 + delta = _unsigned_subtract(last_edge, first_edge) + n_equal_bins = int(np.ceil(delta / width)) + else: + # Width can be zero for some estimators, e.g. FD when + # the IQR of the data is zero. + n_equal_bins = 1 + + elif np.ndim(bins) == 0: + try: + n_equal_bins = operator.index(bins) + except TypeError as e: + raise TypeError( + '`bins` must be an integer, a string, or an array') from e + if n_equal_bins < 1: + raise ValueError('`bins` must be positive, when an integer') + + first_edge, last_edge = _get_outer_edges(a, range) + + elif np.ndim(bins) == 1: + bin_edges = np.asarray(bins) + if np.any(bin_edges[:-1] > bin_edges[1:]): + raise ValueError( + '`bins` must increase monotonically, when an array') + + else: + raise ValueError('`bins` must be 1d, when an array') + + if n_equal_bins is not None: + # gh-10322 means that type resolution rules are dependent on array + # shapes. To avoid this causing problems, we pick a type now and stick + # with it throughout. + bin_type = np.result_type(first_edge, last_edge, a) + if np.issubdtype(bin_type, np.integer): + bin_type = np.result_type(bin_type, float) + + # bin edges must be computed + bin_edges = np.linspace( + first_edge, last_edge, n_equal_bins + 1, + endpoint=True, dtype=bin_type) + if np.any(bin_edges[:-1] >= bin_edges[1:]): + raise ValueError( + f'Too many bins for data range. Cannot create {n_equal_bins} ' + f'finite-sized bins.') + return bin_edges, (first_edge, last_edge, n_equal_bins) + else: + return bin_edges, None + + +def _search_sorted_inclusive(a, v): + """ + Like `searchsorted`, but where the last item in `v` is placed on the right. + + In the context of a histogram, this makes the last bin edge inclusive + """ + return np.concatenate(( + a.searchsorted(v[:-1], 'left'), + a.searchsorted(v[-1:], 'right') + )) + + +def _histogram_bin_edges_dispatcher(a, bins=None, range=None, weights=None): + return (a, bins, weights) + + +@array_function_dispatch(_histogram_bin_edges_dispatcher) +def histogram_bin_edges(a, bins=10, range=None, weights=None): + r""" + Function to calculate only the edges of the bins used by the `histogram` + function. + + Parameters + ---------- + a : array_like + Input data. The histogram is computed over the flattened array. + bins : int or sequence of scalars or str, optional + If `bins` is an int, it defines the number of equal-width + bins in the given range (10, by default). If `bins` is a + sequence, it defines the bin edges, including the rightmost + edge, allowing for non-uniform bin widths. + + If `bins` is a string from the list below, `histogram_bin_edges` will + use the method chosen to calculate the optimal bin width and + consequently the number of bins (see the Notes section for more detail + on the estimators) from the data that falls within the requested range. + While the bin width will be optimal for the actual data + in the range, the number of bins will be computed to fill the + entire range, including the empty portions. For visualisation, + using the 'auto' option is suggested. Weighted data is not + supported for automated bin size selection. + + 'auto' + Minimum bin width between the 'sturges' and 'fd' estimators. + Provides good all-around performance. + + 'fd' (Freedman Diaconis Estimator) + Robust (resilient to outliers) estimator that takes into + account data variability and data size. + + 'doane' + An improved version of Sturges' estimator that works better + with non-normal datasets. + + 'scott' + Less robust estimator that takes into account data variability + and data size. + + 'stone' + Estimator based on leave-one-out cross-validation estimate of + the integrated squared error. Can be regarded as a generalization + of Scott's rule. + + 'rice' + Estimator does not take variability into account, only data + size. Commonly overestimates number of bins required. + + 'sturges' + R's default method, only accounts for data size. Only + optimal for gaussian data and underestimates number of bins + for large non-gaussian datasets. + + 'sqrt' + Square root (of data size) estimator, used by Excel and + other programs for its speed and simplicity. + + range : (float, float), optional + The lower and upper range of the bins. If not provided, range + is simply ``(a.min(), a.max())``. Values outside the range are + ignored. The first element of the range must be less than or + equal to the second. `range` affects the automatic bin + computation as well. While bin width is computed to be optimal + based on the actual data within `range`, the bin count will fill + the entire range including portions containing no data. + + weights : array_like, optional + An array of weights, of the same shape as `a`. Each value in + `a` only contributes its associated weight towards the bin count + (instead of 1). This is currently not used by any of the bin estimators, + but may be in the future. + + Returns + ------- + bin_edges : array of dtype float + The edges to pass into `histogram` + + See Also + -------- + histogram + + Notes + ----- + The methods to estimate the optimal number of bins are well founded + in literature, and are inspired by the choices R provides for + histogram visualisation. Note that having the number of bins + proportional to :math:`n^{1/3}` is asymptotically optimal, which is + why it appears in most estimators. These are simply plug-in methods + that give good starting points for number of bins. In the equations + below, :math:`h` is the binwidth and :math:`n_h` is the number of + bins. All estimators that compute bin counts are recast to bin width + using the `ptp` of the data. The final bin count is obtained from + ``np.round(np.ceil(range / h))``. The final bin width is often less + than what is returned by the estimators below. + + 'auto' (minimum bin width of the 'sturges' and 'fd' estimators) + A compromise to get a good value. For small datasets the Sturges + value will usually be chosen, while larger datasets will usually + default to FD. Avoids the overly conservative behaviour of FD + and Sturges for small and large datasets respectively. + Switchover point is usually :math:`a.size \approx 1000`. + + 'fd' (Freedman Diaconis Estimator) + .. math:: h = 2 \frac{IQR}{n^{1/3}} + + The binwidth is proportional to the interquartile range (IQR) + and inversely proportional to cube root of a.size. Can be too + conservative for small datasets, but is quite good for large + datasets. The IQR is very robust to outliers. + + 'scott' + .. math:: h = \sigma \sqrt[3]{\frac{24 \sqrt{\pi}}{n}} + + The binwidth is proportional to the standard deviation of the + data and inversely proportional to cube root of ``x.size``. Can + be too conservative for small datasets, but is quite good for + large datasets. The standard deviation is not very robust to + outliers. Values are very similar to the Freedman-Diaconis + estimator in the absence of outliers. + + 'rice' + .. math:: n_h = 2n^{1/3} + + The number of bins is only proportional to cube root of + ``a.size``. It tends to overestimate the number of bins and it + does not take into account data variability. + + 'sturges' + .. math:: n_h = \log _{2}(n) + 1 + + The number of bins is the base 2 log of ``a.size``. This + estimator assumes normality of data and is too conservative for + larger, non-normal datasets. This is the default method in R's + ``hist`` method. + + 'doane' + .. math:: n_h = 1 + \log_{2}(n) + + \log_{2}\left(1 + \frac{|g_1|}{\sigma_{g_1}}\right) + + g_1 = mean\left[\left(\frac{x - \mu}{\sigma}\right)^3\right] + + \sigma_{g_1} = \sqrt{\frac{6(n - 2)}{(n + 1)(n + 3)}} + + An improved version of Sturges' formula that produces better + estimates for non-normal datasets. This estimator attempts to + account for the skew of the data. + + 'sqrt' + .. math:: n_h = \sqrt n + + The simplest and fastest estimator. Only takes into account the + data size. + + Additionally, if the data is of integer dtype, then the binwidth will never + be less than 1. + + Examples + -------- + >>> import numpy as np + >>> arr = np.array([0, 0, 0, 1, 2, 3, 3, 4, 5]) + >>> np.histogram_bin_edges(arr, bins='auto', range=(0, 1)) + array([0. , 0.25, 0.5 , 0.75, 1. ]) + >>> np.histogram_bin_edges(arr, bins=2) + array([0. , 2.5, 5. ]) + + For consistency with histogram, an array of pre-computed bins is + passed through unmodified: + + >>> np.histogram_bin_edges(arr, [1, 2]) + array([1, 2]) + + This function allows one set of bins to be computed, and reused across + multiple histograms: + + >>> shared_bins = np.histogram_bin_edges(arr, bins='auto') + >>> shared_bins + array([0., 1., 2., 3., 4., 5.]) + + >>> group_id = np.array([0, 1, 1, 0, 1, 1, 0, 1, 1]) + >>> hist_0, _ = np.histogram(arr[group_id == 0], bins=shared_bins) + >>> hist_1, _ = np.histogram(arr[group_id == 1], bins=shared_bins) + + >>> hist_0; hist_1 + array([1, 1, 0, 1, 0]) + array([2, 0, 1, 1, 2]) + + Which gives more easily comparable results than using separate bins for + each histogram: + + >>> hist_0, bins_0 = np.histogram(arr[group_id == 0], bins='auto') + >>> hist_1, bins_1 = np.histogram(arr[group_id == 1], bins='auto') + >>> hist_0; hist_1 + array([1, 1, 1]) + array([2, 1, 1, 2]) + >>> bins_0; bins_1 + array([0., 1., 2., 3.]) + array([0. , 1.25, 2.5 , 3.75, 5. ]) + + """ + a, weights = _ravel_and_check_weights(a, weights) + bin_edges, _ = _get_bin_edges(a, bins, range, weights) + return bin_edges + + +def _histogram_dispatcher( + a, bins=None, range=None, density=None, weights=None): + return (a, bins, weights) + + +@array_function_dispatch(_histogram_dispatcher) +def histogram(a, bins=10, range=None, density=None, weights=None): + r""" + Compute the histogram of a dataset. + + Parameters + ---------- + a : array_like + Input data. The histogram is computed over the flattened array. + bins : int or sequence of scalars or str, optional + If `bins` is an int, it defines the number of equal-width + bins in the given range (10, by default). If `bins` is a + sequence, it defines a monotonically increasing array of bin edges, + including the rightmost edge, allowing for non-uniform bin widths. + + If `bins` is a string, it defines the method used to calculate the + optimal bin width, as defined by `histogram_bin_edges`. + + range : (float, float), optional + The lower and upper range of the bins. If not provided, range + is simply ``(a.min(), a.max())``. Values outside the range are + ignored. The first element of the range must be less than or + equal to the second. `range` affects the automatic bin + computation as well. While bin width is computed to be optimal + based on the actual data within `range`, the bin count will fill + the entire range including portions containing no data. + weights : array_like, optional + An array of weights, of the same shape as `a`. Each value in + `a` only contributes its associated weight towards the bin count + (instead of 1). If `density` is True, the weights are + normalized, so that the integral of the density over the range + remains 1. + Please note that the ``dtype`` of `weights` will also become the + ``dtype`` of the returned accumulator (`hist`), so it must be + large enough to hold accumulated values as well. + density : bool, optional + If ``False``, the result will contain the number of samples in + each bin. If ``True``, the result is the value of the + probability *density* function at the bin, normalized such that + the *integral* over the range is 1. Note that the sum of the + histogram values will not be equal to 1 unless bins of unity + width are chosen; it is not a probability *mass* function. + + Returns + ------- + hist : array + The values of the histogram. See `density` and `weights` for a + description of the possible semantics. If `weights` are given, + ``hist.dtype`` will be taken from `weights`. + bin_edges : array of dtype float + Return the bin edges ``(length(hist)+1)``. + + + See Also + -------- + histogramdd, bincount, searchsorted, digitize, histogram_bin_edges + + Notes + ----- + All but the last (righthand-most) bin is half-open. In other words, + if `bins` is:: + + [1, 2, 3, 4] + + then the first bin is ``[1, 2)`` (including 1, but excluding 2) and + the second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which + *includes* 4. + + + Examples + -------- + >>> import numpy as np + >>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3]) + (array([0, 2, 1]), array([0, 1, 2, 3])) + >>> np.histogram(np.arange(4), bins=np.arange(5), density=True) + (array([0.25, 0.25, 0.25, 0.25]), array([0, 1, 2, 3, 4])) + >>> np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3]) + (array([1, 4, 1]), array([0, 1, 2, 3])) + + >>> a = np.arange(5) + >>> hist, bin_edges = np.histogram(a, density=True) + >>> hist + array([0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5]) + >>> hist.sum() + 2.4999999999999996 + >>> np.sum(hist * np.diff(bin_edges)) + 1.0 + + Automated Bin Selection Methods example, using 2 peak random data + with 2000 points. + + .. plot:: + :include-source: + + import matplotlib.pyplot as plt + import numpy as np + + rng = np.random.RandomState(10) # deterministic random data + a = np.hstack((rng.normal(size=1000), + rng.normal(loc=5, scale=2, size=1000))) + plt.hist(a, bins='auto') # arguments are passed to np.histogram + plt.title("Histogram with 'auto' bins") + plt.show() + + """ + a, weights = _ravel_and_check_weights(a, weights) + + bin_edges, uniform_bins = _get_bin_edges(a, bins, range, weights) + + # Histogram is an integer or a float array depending on the weights. + if weights is None: + ntype = np.dtype(np.intp) + else: + ntype = weights.dtype + + # We set a block size, as this allows us to iterate over chunks when + # computing histograms, to minimize memory usage. + BLOCK = 65536 + + # The fast path uses bincount, but that only works for certain types + # of weight + simple_weights = ( + weights is None or + np.can_cast(weights.dtype, np.double) or + np.can_cast(weights.dtype, complex) + ) + + if uniform_bins is not None and simple_weights: + # Fast algorithm for equal bins + # We now convert values of a to bin indices, under the assumption of + # equal bin widths (which is valid here). + first_edge, last_edge, n_equal_bins = uniform_bins + + # Initialize empty histogram + n = np.zeros(n_equal_bins, ntype) + + # Pre-compute histogram scaling factor + norm_numerator = n_equal_bins + norm_denom = _unsigned_subtract(last_edge, first_edge) + + # We iterate over blocks here for two reasons: the first is that for + # large arrays, it is actually faster (for example for a 10^8 array it + # is 2x as fast) and it results in a memory footprint 3x lower in the + # limit of large arrays. + for i in _range(0, len(a), BLOCK): + tmp_a = a[i:i + BLOCK] + if weights is None: + tmp_w = None + else: + tmp_w = weights[i:i + BLOCK] + + # Only include values in the right range + keep = (tmp_a >= first_edge) + keep &= (tmp_a <= last_edge) + if not np.logical_and.reduce(keep): + tmp_a = tmp_a[keep] + if tmp_w is not None: + tmp_w = tmp_w[keep] + + # This cast ensures no type promotions occur below, which gh-10322 + # make unpredictable. Getting it wrong leads to precision errors + # like gh-8123. + tmp_a = tmp_a.astype(bin_edges.dtype, copy=False) + + # Compute the bin indices, and for values that lie exactly on + # last_edge we need to subtract one + f_indices = ((_unsigned_subtract(tmp_a, first_edge) / norm_denom) + * norm_numerator) + indices = f_indices.astype(np.intp) + indices[indices == n_equal_bins] -= 1 + + # The index computation is not guaranteed to give exactly + # consistent results within ~1 ULP of the bin edges. + decrement = tmp_a < bin_edges[indices] + indices[decrement] -= 1 + # The last bin includes the right edge. The other bins do not. + increment = ((tmp_a >= bin_edges[indices + 1]) + & (indices != n_equal_bins - 1)) + indices[increment] += 1 + + # We now compute the histogram using bincount + if ntype.kind == 'c': + n.real += np.bincount(indices, weights=tmp_w.real, + minlength=n_equal_bins) + n.imag += np.bincount(indices, weights=tmp_w.imag, + minlength=n_equal_bins) + else: + n += np.bincount(indices, weights=tmp_w, + minlength=n_equal_bins).astype(ntype) + else: + # Compute via cumulative histogram + cum_n = np.zeros(bin_edges.shape, ntype) + if weights is None: + for i in _range(0, len(a), BLOCK): + sa = np.sort(a[i:i + BLOCK]) + cum_n += _search_sorted_inclusive(sa, bin_edges) + else: + zero = np.zeros(1, dtype=ntype) + for i in _range(0, len(a), BLOCK): + tmp_a = a[i:i + BLOCK] + tmp_w = weights[i:i + BLOCK] + sorting_index = np.argsort(tmp_a) + sa = tmp_a[sorting_index] + sw = tmp_w[sorting_index] + cw = np.concatenate((zero, sw.cumsum())) + bin_index = _search_sorted_inclusive(sa, bin_edges) + cum_n += cw[bin_index] + + n = np.diff(cum_n) + + if density: + db = np.array(np.diff(bin_edges), float) + return n / db / n.sum(), bin_edges + + return n, bin_edges + + +def _histogramdd_dispatcher(sample, bins=None, range=None, density=None, + weights=None): + if hasattr(sample, 'shape'): # same condition as used in histogramdd + yield sample + else: + yield from sample + with contextlib.suppress(TypeError): + yield from bins + yield weights + + +@array_function_dispatch(_histogramdd_dispatcher) +def histogramdd(sample, bins=10, range=None, density=None, weights=None): + """ + Compute the multidimensional histogram of some data. + + Parameters + ---------- + sample : (N, D) array, or (N, D) array_like + The data to be histogrammed. + + Note the unusual interpretation of sample when an array_like: + + * When an array, each row is a coordinate in a D-dimensional space - + such as ``histogramdd(np.array([p1, p2, p3]))``. + * When an array_like, each element is the list of values for single + coordinate - such as ``histogramdd((X, Y, Z))``. + + The first form should be preferred. + + bins : sequence or int, optional + The bin specification: + + * A sequence of arrays describing the monotonically increasing bin + edges along each dimension. + * The number of bins for each dimension (nx, ny, ... =bins) + * The number of bins for all dimensions (nx=ny=...=bins). + + range : sequence, optional + A sequence of length D, each an optional (lower, upper) tuple giving + the outer bin edges to be used if the edges are not given explicitly in + `bins`. + An entry of None in the sequence results in the minimum and maximum + values being used for the corresponding dimension. + The default, None, is equivalent to passing a tuple of D None values. + density : bool, optional + If False, the default, returns the number of samples in each bin. + If True, returns the probability *density* function at the bin, + ``bin_count / sample_count / bin_volume``. + weights : (N,) array_like, optional + An array of values `w_i` weighing each sample `(x_i, y_i, z_i, ...)`. + Weights are normalized to 1 if density is True. If density is False, + the values of the returned histogram are equal to the sum of the + weights belonging to the samples falling into each bin. + + Returns + ------- + H : ndarray + The multidimensional histogram of sample x. See density and weights + for the different possible semantics. + edges : tuple of ndarrays + A tuple of D arrays describing the bin edges for each dimension. + + See Also + -------- + histogram: 1-D histogram + histogram2d: 2-D histogram + + Examples + -------- + >>> import numpy as np + >>> rng = np.random.default_rng() + >>> r = rng.normal(size=(100,3)) + >>> H, edges = np.histogramdd(r, bins = (5, 8, 4)) + >>> H.shape, edges[0].size, edges[1].size, edges[2].size + ((5, 8, 4), 6, 9, 5) + + """ + + try: + # Sample is an ND-array. + N, D = sample.shape + except (AttributeError, ValueError): + # Sample is a sequence of 1D arrays. + sample = np.atleast_2d(sample).T + N, D = sample.shape + + nbin = np.empty(D, np.intp) + edges = D * [None] + dedges = D * [None] + if weights is not None: + weights = np.asarray(weights) + + try: + M = len(bins) + if M != D: + raise ValueError( + 'The dimension of bins must be equal to the dimension of the ' + 'sample x.') + except TypeError: + # bins is an integer + bins = D * [bins] + + # normalize the range argument + if range is None: + range = (None,) * D + elif len(range) != D: + raise ValueError('range argument must have one entry per dimension') + + # Create edge arrays + for i in _range(D): + if np.ndim(bins[i]) == 0: + if bins[i] < 1: + raise ValueError( + f'`bins[{i}]` must be positive, when an integer') + smin, smax = _get_outer_edges(sample[:, i], range[i]) + try: + n = operator.index(bins[i]) + + except TypeError as e: + raise TypeError( + f"`bins[{i}]` must be an integer, when a scalar" + ) from e + + edges[i] = np.linspace(smin, smax, n + 1) + elif np.ndim(bins[i]) == 1: + edges[i] = np.asarray(bins[i]) + if np.any(edges[i][:-1] > edges[i][1:]): + raise ValueError( + f'`bins[{i}]` must be monotonically increasing, when an array') + else: + raise ValueError( + f'`bins[{i}]` must be a scalar or 1d array') + + nbin[i] = len(edges[i]) + 1 # includes an outlier on each end + dedges[i] = np.diff(edges[i]) + + # Compute the bin number each sample falls into. + Ncount = tuple( + # avoid np.digitize to work around gh-11022 + np.searchsorted(edges[i], sample[:, i], side='right') + for i in _range(D) + ) + + # Using digitize, values that fall on an edge are put in the right bin. + # For the rightmost bin, we want values equal to the right edge to be + # counted in the last bin, and not as an outlier. + for i in _range(D): + # Find which points are on the rightmost edge. + on_edge = (sample[:, i] == edges[i][-1]) + # Shift these points one bin to the left. + Ncount[i][on_edge] -= 1 + + # Compute the sample indices in the flattened histogram matrix. + # This raises an error if the array is too large. + xy = np.ravel_multi_index(Ncount, nbin) + + # Compute the number of repetitions in xy and assign it to the + # flattened histmat. + hist = np.bincount(xy, weights, minlength=nbin.prod()) + + # Shape into a proper matrix + hist = hist.reshape(nbin) + + # This preserves the (bad) behavior observed in gh-7845, for now. + hist = hist.astype(float, casting='safe') + + # Remove outliers (indices 0 and -1 for each dimension). + core = D * (slice(1, -1),) + hist = hist[core] + + if density: + # calculate the probability density function + s = hist.sum() + for i in _range(D): + shape = np.ones(D, int) + shape[i] = nbin[i] - 2 + hist = hist / dedges[i].reshape(shape) + hist /= s + + if (hist.shape != nbin - 2).any(): + raise RuntimeError( + "Internal Shape Error") + return hist, edges diff --git a/local_packages/numpy/lib/_histograms_impl.pyi b/local_packages/numpy/lib/_histograms_impl.pyi new file mode 100644 index 0000000..72a31dc --- /dev/null +++ b/local_packages/numpy/lib/_histograms_impl.pyi @@ -0,0 +1,40 @@ +from collections.abc import Sequence +from typing import Any, Literal as L, SupportsIndex, TypeAlias + +from numpy._typing import ArrayLike, NDArray + +__all__ = ["histogram", "histogramdd", "histogram_bin_edges"] + +_BinKind: TypeAlias = L[ + "stone", + "auto", + "doane", + "fd", + "rice", + "scott", + "sqrt", + "sturges", +] + +def histogram_bin_edges( + a: ArrayLike, + bins: _BinKind | SupportsIndex | ArrayLike = 10, + range: tuple[float, float] | None = None, + weights: ArrayLike | None = None, +) -> NDArray[Any]: ... + +def histogram( + a: ArrayLike, + bins: _BinKind | SupportsIndex | ArrayLike = 10, + range: tuple[float, float] | None = None, + density: bool | None = None, + weights: ArrayLike | None = None, +) -> tuple[NDArray[Any], NDArray[Any]]: ... + +def histogramdd( + sample: ArrayLike, + bins: SupportsIndex | ArrayLike = 10, + range: Sequence[tuple[float, float]] | None = None, + density: bool | None = None, + weights: ArrayLike | None = None, +) -> tuple[NDArray[Any], tuple[NDArray[Any], ...]]: ... diff --git a/local_packages/numpy/lib/_index_tricks_impl.py b/local_packages/numpy/lib/_index_tricks_impl.py new file mode 100644 index 0000000..40fef85 --- /dev/null +++ b/local_packages/numpy/lib/_index_tricks_impl.py @@ -0,0 +1,1048 @@ +import functools +import math +import sys +from itertools import product + +import numpy as np +import numpy._core.numeric as _nx +import numpy.matrixlib as matrixlib +from numpy._core import linspace, overrides +from numpy._core.multiarray import ravel_multi_index, unravel_index +from numpy._core.numeric import ScalarType, array +from numpy._core.numerictypes import issubdtype +from numpy._utils import set_module +from numpy.lib._function_base_impl import diff + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + + +__all__ = [ + 'ravel_multi_index', 'unravel_index', 'mgrid', 'ogrid', 'r_', 'c_', + 's_', 'index_exp', 'ix_', 'ndenumerate', 'ndindex', 'fill_diagonal', + 'diag_indices', 'diag_indices_from' +] + + +def _ix__dispatcher(*args): + return args + + +@array_function_dispatch(_ix__dispatcher) +def ix_(*args): + """ + Construct an open mesh from multiple sequences. + + This function takes N 1-D sequences and returns N outputs with N + dimensions each, such that the shape is 1 in all but one dimension + and the dimension with the non-unit shape value cycles through all + N dimensions. + + Using `ix_` one can quickly construct index arrays that will index + the cross product. ``a[np.ix_([1,3],[2,5])]`` returns the array + ``[[a[1,2] a[1,5]], [a[3,2] a[3,5]]]``. + + Parameters + ---------- + args : 1-D sequences + Each sequence should be of integer or boolean type. + Boolean sequences will be interpreted as boolean masks for the + corresponding dimension (equivalent to passing in + ``np.nonzero(boolean_sequence)``). + + Returns + ------- + out : tuple of ndarrays + N arrays with N dimensions each, with N the number of input + sequences. Together these arrays form an open mesh. + + See Also + -------- + ogrid, mgrid, meshgrid + + Examples + -------- + >>> import numpy as np + >>> a = np.arange(10).reshape(2, 5) + >>> a + array([[0, 1, 2, 3, 4], + [5, 6, 7, 8, 9]]) + >>> ixgrid = np.ix_([0, 1], [2, 4]) + >>> ixgrid + (array([[0], + [1]]), array([[2, 4]])) + >>> ixgrid[0].shape, ixgrid[1].shape + ((2, 1), (1, 2)) + >>> a[ixgrid] + array([[2, 4], + [7, 9]]) + + >>> ixgrid = np.ix_([True, True], [2, 4]) + >>> a[ixgrid] + array([[2, 4], + [7, 9]]) + >>> ixgrid = np.ix_([True, True], [False, False, True, False, True]) + >>> a[ixgrid] + array([[2, 4], + [7, 9]]) + + """ + out = [] + nd = len(args) + for k, new in enumerate(args): + if not isinstance(new, _nx.ndarray): + new = np.asarray(new) + if new.size == 0: + # Explicitly type empty arrays to avoid float default + new = new.astype(_nx.intp) + if new.ndim != 1: + raise ValueError("Cross index must be 1 dimensional") + if issubdtype(new.dtype, _nx.bool): + new, = new.nonzero() + new = new.reshape((1,) * k + (new.size,) + (1,) * (nd - k - 1)) + out.append(new) + return tuple(out) + + +class nd_grid: + """ + Construct a multi-dimensional "meshgrid". + + ``grid = nd_grid()`` creates an instance which will return a mesh-grid + when indexed. The dimension and number of the output arrays are equal + to the number of indexing dimensions. If the step length is not a + complex number, then the stop is not inclusive. + + However, if the step length is a **complex number** (e.g. 5j), then the + integer part of its magnitude is interpreted as specifying the + number of points to create between the start and stop values, where + the stop value **is inclusive**. + + If instantiated with an argument of ``sparse=True``, the mesh-grid is + open (or not fleshed out) so that only one-dimension of each returned + argument is greater than 1. + + Parameters + ---------- + sparse : bool, optional + Whether the grid is sparse or not. Default is False. + + Notes + ----- + Two instances of `nd_grid` are made available in the NumPy namespace, + `mgrid` and `ogrid`, approximately defined as:: + + mgrid = nd_grid(sparse=False) + ogrid = nd_grid(sparse=True) + + Users should use these pre-defined instances instead of using `nd_grid` + directly. + """ + __slots__ = ('sparse',) + + def __init__(self, sparse=False): + self.sparse = sparse + + def __getitem__(self, key): + try: + size = [] + # Mimic the behavior of `np.arange` and use a data type + # which is at least as large as `np.int_` + num_list = [0] + for k in range(len(key)): + step = key[k].step + start = key[k].start + stop = key[k].stop + if start is None: + start = 0 + if step is None: + step = 1 + if isinstance(step, (_nx.complexfloating, complex)): + step = abs(step) + size.append(int(step)) + else: + size.append( + math.ceil((stop - start) / step)) + num_list += [start, stop, step] + typ = _nx.result_type(*num_list) + if self.sparse: + nn = [_nx.arange(_x, dtype=_t) + for _x, _t in zip(size, (typ,) * len(size))] + else: + nn = _nx.indices(size, typ) + for k, kk in enumerate(key): + step = kk.step + start = kk.start + if start is None: + start = 0 + if step is None: + step = 1 + if isinstance(step, (_nx.complexfloating, complex)): + step = int(abs(step)) + if step != 1: + step = (kk.stop - start) / float(step - 1) + nn[k] = (nn[k] * step + start) + if self.sparse: + slobj = [_nx.newaxis] * len(size) + for k in range(len(size)): + slobj[k] = slice(None, None) + nn[k] = nn[k][tuple(slobj)] + slobj[k] = _nx.newaxis + return tuple(nn) # ogrid -> tuple of arrays + return nn # mgrid -> ndarray + except (IndexError, TypeError): + step = key.step + stop = key.stop + start = key.start + if start is None: + start = 0 + if isinstance(step, (_nx.complexfloating, complex)): + # Prevent the (potential) creation of integer arrays + step_float = abs(step) + step = length = int(step_float) + if step != 1: + step = (key.stop - start) / float(step - 1) + typ = _nx.result_type(start, stop, step_float) + return _nx.arange(0, length, 1, dtype=typ) * step + start + else: + return _nx.arange(start, stop, step) + + +class MGridClass(nd_grid): + """ + An instance which returns a dense multi-dimensional "meshgrid". + + An instance which returns a dense (or fleshed out) mesh-grid + when indexed, so that each returned argument has the same shape. + The dimensions and number of the output arrays are equal to the + number of indexing dimensions. If the step length is not a complex + number, then the stop is not inclusive. + + However, if the step length is a **complex number** (e.g. 5j), then + the integer part of its magnitude is interpreted as specifying the + number of points to create between the start and stop values, where + the stop value **is inclusive**. + + Returns + ------- + mesh-grid : ndarray + A single array, containing a set of `ndarray`\\ s all of the same + dimensions. stacked along the first axis. + + See Also + -------- + ogrid : like `mgrid` but returns open (not fleshed out) mesh grids + meshgrid: return coordinate matrices from coordinate vectors + r_ : array concatenator + :ref:`how-to-partition` + + Examples + -------- + >>> import numpy as np + >>> np.mgrid[0:5, 0:5] + array([[[0, 0, 0, 0, 0], + [1, 1, 1, 1, 1], + [2, 2, 2, 2, 2], + [3, 3, 3, 3, 3], + [4, 4, 4, 4, 4]], + [[0, 1, 2, 3, 4], + [0, 1, 2, 3, 4], + [0, 1, 2, 3, 4], + [0, 1, 2, 3, 4], + [0, 1, 2, 3, 4]]]) + >>> np.mgrid[-1:1:5j] + array([-1. , -0.5, 0. , 0.5, 1. ]) + + >>> np.mgrid[0:4].shape + (4,) + >>> np.mgrid[0:4, 0:5].shape + (2, 4, 5) + >>> np.mgrid[0:4, 0:5, 0:6].shape + (3, 4, 5, 6) + + """ + __slots__ = () + + def __init__(self): + super().__init__(sparse=False) + + +mgrid = MGridClass() + + +class OGridClass(nd_grid): + """ + An instance which returns an open multi-dimensional "meshgrid". + + An instance which returns an open (i.e. not fleshed out) mesh-grid + when indexed, so that only one dimension of each returned array is + greater than 1. The dimension and number of the output arrays are + equal to the number of indexing dimensions. If the step length is + not a complex number, then the stop is not inclusive. + + However, if the step length is a **complex number** (e.g. 5j), then + the integer part of its magnitude is interpreted as specifying the + number of points to create between the start and stop values, where + the stop value **is inclusive**. + + Returns + ------- + mesh-grid : ndarray or tuple of ndarrays + If the input is a single slice, returns an array. + If the input is multiple slices, returns a tuple of arrays, with + only one dimension not equal to 1. + + See Also + -------- + mgrid : like `ogrid` but returns dense (or fleshed out) mesh grids + meshgrid: return coordinate matrices from coordinate vectors + r_ : array concatenator + :ref:`how-to-partition` + + Examples + -------- + >>> from numpy import ogrid + >>> ogrid[-1:1:5j] + array([-1. , -0.5, 0. , 0.5, 1. ]) + >>> ogrid[0:5, 0:5] + (array([[0], + [1], + [2], + [3], + [4]]), + array([[0, 1, 2, 3, 4]])) + + """ + __slots__ = () + + def __init__(self): + super().__init__(sparse=True) + + +ogrid = OGridClass() + + +class AxisConcatenator: + """ + Translates slice objects to concatenation along an axis. + + For detailed documentation on usage, see `r_`. + """ + __slots__ = ('axis', 'matrix', 'ndmin', 'trans1d') + + # allow ma.mr_ to override this + concatenate = staticmethod(_nx.concatenate) + makemat = staticmethod(matrixlib.matrix) + + def __init__(self, axis=0, matrix=False, ndmin=1, trans1d=-1): + self.axis = axis + self.matrix = matrix + self.trans1d = trans1d + self.ndmin = ndmin + + def __getitem__(self, key): + # handle matrix builder syntax + if isinstance(key, str): + frame = sys._getframe().f_back + mymat = matrixlib.bmat(key, frame.f_globals, frame.f_locals) + return mymat + + if not isinstance(key, tuple): + key = (key,) + + # copy attributes, since they can be overridden in the first argument + trans1d = self.trans1d + ndmin = self.ndmin + matrix = self.matrix + axis = self.axis + + objs = [] + # dtypes or scalars for weak scalar handling in result_type + result_type_objs = [] + + for k, item in enumerate(key): + scalar = False + if isinstance(item, slice): + step = item.step + start = item.start + stop = item.stop + if start is None: + start = 0 + if step is None: + step = 1 + if isinstance(step, (_nx.complexfloating, complex)): + size = int(abs(step)) + newobj = linspace(start, stop, num=size) + else: + newobj = _nx.arange(start, stop, step) + if ndmin > 1: + newobj = array(newobj, copy=None, ndmin=ndmin) + if trans1d != -1: + newobj = newobj.swapaxes(-1, trans1d) + elif isinstance(item, str): + if k != 0: + raise ValueError("special directives must be the " + "first entry.") + if item in ('r', 'c'): + matrix = True + col = (item == 'c') + continue + if ',' in item: + vec = item.split(',') + try: + axis, ndmin = [int(x) for x in vec[:2]] + if len(vec) == 3: + trans1d = int(vec[2]) + continue + except Exception as e: + raise ValueError( + f"unknown special directive {item!r}" + ) from e + try: + axis = int(item) + continue + except (ValueError, TypeError) as e: + raise ValueError("unknown special directive") from e + elif type(item) in ScalarType: + scalar = True + newobj = item + else: + item_ndim = np.ndim(item) + newobj = array(item, copy=None, subok=True, ndmin=ndmin) + if trans1d != -1 and item_ndim < ndmin: + k2 = ndmin - item_ndim + k1 = trans1d + if k1 < 0: + k1 += k2 + 1 + defaxes = list(range(ndmin)) + axes = defaxes[:k1] + defaxes[k2:] + defaxes[k1:k2] + newobj = newobj.transpose(axes) + + objs.append(newobj) + if scalar: + result_type_objs.append(item) + else: + result_type_objs.append(newobj.dtype) + + # Ensure that scalars won't up-cast unless warranted, for 0, drops + # through to error in concatenate. + if len(result_type_objs) != 0: + final_dtype = _nx.result_type(*result_type_objs) + # concatenate could do cast, but that can be overridden: + objs = [array(obj, copy=None, subok=True, + ndmin=ndmin, dtype=final_dtype) for obj in objs] + + res = self.concatenate(tuple(objs), axis=axis) + + if matrix: + oldndim = res.ndim + res = self.makemat(res) + if oldndim == 1 and col: + res = res.T + return res + + def __len__(self): + return 0 + +# separate classes are used here instead of just making r_ = concatenator(0), +# etc. because otherwise we couldn't get the doc string to come out right +# in help(r_) + + +class RClass(AxisConcatenator): + """ + Translates slice objects to concatenation along the first axis. + + This is a simple way to build up arrays quickly. There are two use cases. + + 1. If the index expression contains comma separated arrays, then stack + them along their first axis. + 2. If the index expression contains slice notation or scalars then create + a 1-D array with a range indicated by the slice notation. + + If slice notation is used, the syntax ``start:stop:step`` is equivalent + to ``np.arange(start, stop, step)`` inside of the brackets. However, if + ``step`` is an imaginary number (i.e. 100j) then its integer portion is + interpreted as a number-of-points desired and the start and stop are + inclusive. In other words ``start:stop:stepj`` is interpreted as + ``np.linspace(start, stop, step, endpoint=1)`` inside of the brackets. + After expansion of slice notation, all comma separated sequences are + concatenated together. + + Optional character strings placed as the first element of the index + expression can be used to change the output. The strings 'r' or 'c' result + in matrix output. If the result is 1-D and 'r' is specified a 1 x N (row) + matrix is produced. If the result is 1-D and 'c' is specified, then + an N x 1 (column) matrix is produced. + If the result is 2-D then both provide the same matrix result. + + A string integer specifies which axis to stack multiple comma separated + arrays along. A string of two comma-separated integers allows indication + of the minimum number of dimensions to force each entry into as the + second integer (the axis to concatenate along is still the first integer). + + A string with three comma-separated integers allows specification of the + axis to concatenate along, the minimum number of dimensions to force the + entries to, and which axis should contain the start of the arrays which + are less than the specified number of dimensions. In other words the third + integer allows you to specify where the 1's should be placed in the shape + of the arrays that have their shapes upgraded. By default, they are placed + in the front of the shape tuple. The third argument allows you to specify + where the start of the array should be instead. Thus, a third argument of + '0' would place the 1's at the end of the array shape. Negative integers + specify where in the new shape tuple the last dimension of upgraded arrays + should be placed, so the default is '-1'. + + Parameters + ---------- + Not a function, so takes no parameters + + + Returns + ------- + A concatenated ndarray or matrix. + + See Also + -------- + concatenate : Join a sequence of arrays along an existing axis. + c_ : Translates slice objects to concatenation along the second axis. + + Examples + -------- + >>> import numpy as np + >>> np.r_[np.array([1,2,3]), 0, 0, np.array([4,5,6])] + array([1, 2, 3, ..., 4, 5, 6]) + >>> np.r_[-1:1:6j, [0]*3, 5, 6] + array([-1. , -0.6, -0.2, 0.2, 0.6, 1. , 0. , 0. , 0. , 5. , 6. ]) + + String integers specify the axis to concatenate along or the minimum + number of dimensions to force entries into. + + >>> a = np.array([[0, 1, 2], [3, 4, 5]]) + >>> np.r_['-1', a, a] # concatenate along last axis + array([[0, 1, 2, 0, 1, 2], + [3, 4, 5, 3, 4, 5]]) + >>> np.r_['0,2', [1,2,3], [4,5,6]] # concatenate along first axis, dim>=2 + array([[1, 2, 3], + [4, 5, 6]]) + + >>> np.r_['0,2,0', [1,2,3], [4,5,6]] + array([[1], + [2], + [3], + [4], + [5], + [6]]) + >>> np.r_['1,2,0', [1,2,3], [4,5,6]] + array([[1, 4], + [2, 5], + [3, 6]]) + + Using 'r' or 'c' as a first string argument creates a matrix. + + >>> np.r_['r',[1,2,3], [4,5,6]] + matrix([[1, 2, 3, 4, 5, 6]]) + + """ + __slots__ = () + + def __init__(self): + AxisConcatenator.__init__(self, 0) + + +r_ = RClass() + + +class CClass(AxisConcatenator): + """ + Translates slice objects to concatenation along the second axis. + + This is short-hand for ``np.r_['-1,2,0', index expression]``, which is + useful because of its common occurrence. In particular, arrays will be + stacked along their last axis after being upgraded to at least 2-D with + 1's post-pended to the shape (column vectors made out of 1-D arrays). + + See Also + -------- + column_stack : Stack 1-D arrays as columns into a 2-D array. + r_ : For more detailed documentation. + + Examples + -------- + >>> import numpy as np + >>> np.c_[np.array([1,2,3]), np.array([4,5,6])] + array([[1, 4], + [2, 5], + [3, 6]]) + >>> np.c_[np.array([[1,2,3]]), 0, 0, np.array([[4,5,6]])] + array([[1, 2, 3, ..., 4, 5, 6]]) + + """ + __slots__ = () + + def __init__(self): + AxisConcatenator.__init__(self, -1, ndmin=2, trans1d=0) + + +c_ = CClass() + + +@set_module('numpy') +class ndenumerate: + """ + Multidimensional index iterator. + + Return an iterator yielding pairs of array coordinates and values. + + Parameters + ---------- + arr : ndarray + Input array. + + See Also + -------- + ndindex, flatiter + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[1, 2], [3, 4]]) + >>> for index, x in np.ndenumerate(a): + ... print(index, x) + (0, 0) 1 + (0, 1) 2 + (1, 0) 3 + (1, 1) 4 + + """ + + def __init__(self, arr): + self.iter = np.asarray(arr).flat + + def __next__(self): + """ + Standard iterator method, returns the index tuple and array value. + + Returns + ------- + coords : tuple of ints + The indices of the current iteration. + val : scalar + The array element of the current iteration. + + """ + return self.iter.coords, next(self.iter) + + def __iter__(self): + return self + + +@set_module('numpy') +class ndindex: + """ + An N-dimensional iterator object to index arrays. + + Given the shape of an array, an `ndindex` instance iterates over + the N-dimensional index of the array. At each iteration a tuple + of indices is returned, the last dimension is iterated over first. + + Parameters + ---------- + shape : ints, or a single tuple of ints + The size of each dimension of the array can be passed as + individual parameters or as the elements of a tuple. + + See Also + -------- + ndenumerate, flatiter + + Examples + -------- + >>> import numpy as np + + Dimensions as individual arguments + + >>> for index in np.ndindex(3, 2, 1): + ... print(index) + (0, 0, 0) + (0, 1, 0) + (1, 0, 0) + (1, 1, 0) + (2, 0, 0) + (2, 1, 0) + + Same dimensions - but in a tuple ``(3, 2, 1)`` + + >>> for index in np.ndindex((3, 2, 1)): + ... print(index) + (0, 0, 0) + (0, 1, 0) + (1, 0, 0) + (1, 1, 0) + (2, 0, 0) + (2, 1, 0) + + """ + + def __init__(self, *shape): + if len(shape) == 1 and isinstance(shape[0], tuple): + shape = shape[0] + if min(shape, default=0) < 0: + raise ValueError("negative dimensions are not allowed") + self._iter = product(*map(range, shape)) + + def __iter__(self): + return self + + def __next__(self): + """ + Standard iterator method, updates the index and returns the index + tuple. + + Returns + ------- + val : tuple of ints + Returns a tuple containing the indices of the current + iteration. + + """ + return next(self._iter) + + +# You can do all this with slice() plus a few special objects, +# but there's a lot to remember. This version is simpler because +# it uses the standard array indexing syntax. +# +# Written by Konrad Hinsen +# last revision: 1999-7-23 +# +# Cosmetic changes by T. Oliphant 2001 +# +# + +class IndexExpression: + """ + A nicer way to build up index tuples for arrays. + + .. note:: + Use one of the two predefined instances ``index_exp`` or `s_` + rather than directly using `IndexExpression`. + + For any index combination, including slicing and axis insertion, + ``a[indices]`` is the same as ``a[np.index_exp[indices]]`` for any + array `a`. However, ``np.index_exp[indices]`` can be used anywhere + in Python code and returns a tuple of slice objects that can be + used in the construction of complex index expressions. + + Parameters + ---------- + maketuple : bool + If True, always returns a tuple. + + See Also + -------- + s_ : Predefined instance without tuple conversion: + `s_ = IndexExpression(maketuple=False)`. + The ``index_exp`` is another predefined instance that + always returns a tuple: + `index_exp = IndexExpression(maketuple=True)`. + + Notes + ----- + You can do all this with :class:`slice` plus a few special objects, + but there's a lot to remember and this version is simpler because + it uses the standard array indexing syntax. + + Examples + -------- + >>> import numpy as np + >>> np.s_[2::2] + slice(2, None, 2) + >>> np.index_exp[2::2] + (slice(2, None, 2),) + + >>> np.array([0, 1, 2, 3, 4])[np.s_[2::2]] + array([2, 4]) + + """ + __slots__ = ('maketuple',) + + def __init__(self, maketuple): + self.maketuple = maketuple + + def __getitem__(self, item): + if self.maketuple and not isinstance(item, tuple): + return (item,) + else: + return item + + +index_exp = IndexExpression(maketuple=True) +s_ = IndexExpression(maketuple=False) + +# End contribution from Konrad. + + +# The following functions complement those in twodim_base, but are +# applicable to N-dimensions. + + +def _fill_diagonal_dispatcher(a, val, wrap=None): + return (a,) + + +@array_function_dispatch(_fill_diagonal_dispatcher) +def fill_diagonal(a, val, wrap=False): + """Fill the main diagonal of the given array of any dimensionality. + + For an array `a` with ``a.ndim >= 2``, the diagonal is the list of + values ``a[i, ..., i]`` with indices ``i`` all identical. This function + modifies the input array in-place without returning a value. + + Parameters + ---------- + a : array, at least 2-D. + Array whose diagonal is to be filled in-place. + val : scalar or array_like + Value(s) to write on the diagonal. If `val` is scalar, the value is + written along the diagonal. If array-like, the flattened `val` is + written along the diagonal, repeating if necessary to fill all + diagonal entries. + + wrap : bool + For tall matrices in NumPy version up to 1.6.2, the + diagonal "wrapped" after N columns. You can have this behavior + with this option. This affects only tall matrices. + + See also + -------- + diag_indices, diag_indices_from + + Notes + ----- + This functionality can be obtained via `diag_indices`, but internally + this version uses a much faster implementation that never constructs the + indices and uses simple slicing. + + Examples + -------- + >>> import numpy as np + >>> a = np.zeros((3, 3), int) + >>> np.fill_diagonal(a, 5) + >>> a + array([[5, 0, 0], + [0, 5, 0], + [0, 0, 5]]) + + The same function can operate on a 4-D array: + + >>> a = np.zeros((3, 3, 3, 3), int) + >>> np.fill_diagonal(a, 4) + + We only show a few blocks for clarity: + + >>> a[0, 0] + array([[4, 0, 0], + [0, 0, 0], + [0, 0, 0]]) + >>> a[1, 1] + array([[0, 0, 0], + [0, 4, 0], + [0, 0, 0]]) + >>> a[2, 2] + array([[0, 0, 0], + [0, 0, 0], + [0, 0, 4]]) + + The wrap option affects only tall matrices: + + >>> # tall matrices no wrap + >>> a = np.zeros((5, 3), int) + >>> np.fill_diagonal(a, 4) + >>> a + array([[4, 0, 0], + [0, 4, 0], + [0, 0, 4], + [0, 0, 0], + [0, 0, 0]]) + + >>> # tall matrices wrap + >>> a = np.zeros((5, 3), int) + >>> np.fill_diagonal(a, 4, wrap=True) + >>> a + array([[4, 0, 0], + [0, 4, 0], + [0, 0, 4], + [0, 0, 0], + [4, 0, 0]]) + + >>> # wide matrices + >>> a = np.zeros((3, 5), int) + >>> np.fill_diagonal(a, 4, wrap=True) + >>> a + array([[4, 0, 0, 0, 0], + [0, 4, 0, 0, 0], + [0, 0, 4, 0, 0]]) + + The anti-diagonal can be filled by reversing the order of elements + using either `numpy.flipud` or `numpy.fliplr`. + + >>> a = np.zeros((3, 3), int); + >>> np.fill_diagonal(np.fliplr(a), [1,2,3]) # Horizontal flip + >>> a + array([[0, 0, 1], + [0, 2, 0], + [3, 0, 0]]) + >>> np.fill_diagonal(np.flipud(a), [1,2,3]) # Vertical flip + >>> a + array([[0, 0, 3], + [0, 2, 0], + [1, 0, 0]]) + + Note that the order in which the diagonal is filled varies depending + on the flip function. + """ + if a.ndim < 2: + raise ValueError("array must be at least 2-d") + end = None + if a.ndim == 2: + # Explicit, fast formula for the common case. For 2-d arrays, we + # accept rectangular ones. + step = a.shape[1] + 1 + # This is needed to don't have tall matrix have the diagonal wrap. + if not wrap: + end = a.shape[1] * a.shape[1] + else: + # For more than d=2, the strided formula is only valid for arrays with + # all dimensions equal, so we check first. + if not np.all(diff(a.shape) == 0): + raise ValueError("All dimensions of input must be of equal length") + step = 1 + (np.cumprod(a.shape[:-1])).sum() + + # Write the value out into the diagonal. + a.flat[:end:step] = val + + +@set_module('numpy') +def diag_indices(n, ndim=2): + """ + Return the indices to access the main diagonal of an array. + + This returns a tuple of indices that can be used to access the main + diagonal of an array `a` with ``a.ndim >= 2`` dimensions and shape + (n, n, ..., n). For ``a.ndim = 2`` this is the usual diagonal, for + ``a.ndim > 2`` this is the set of indices to access ``a[i, i, ..., i]`` + for ``i = [0..n-1]``. + + Parameters + ---------- + n : int + The size, along each dimension, of the arrays for which the returned + indices can be used. + + ndim : int, optional + The number of dimensions. + + See Also + -------- + diag_indices_from + + Examples + -------- + >>> import numpy as np + + Create a set of indices to access the diagonal of a (4, 4) array: + + >>> di = np.diag_indices(4) + >>> di + (array([0, 1, 2, 3]), array([0, 1, 2, 3])) + >>> a = np.arange(16).reshape(4, 4) + >>> a + array([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11], + [12, 13, 14, 15]]) + >>> a[di] = 100 + >>> a + array([[100, 1, 2, 3], + [ 4, 100, 6, 7], + [ 8, 9, 100, 11], + [ 12, 13, 14, 100]]) + + Now, we create indices to manipulate a 3-D array: + + >>> d3 = np.diag_indices(2, 3) + >>> d3 + (array([0, 1]), array([0, 1]), array([0, 1])) + + And use it to set the diagonal of an array of zeros to 1: + + >>> a = np.zeros((2, 2, 2), dtype=int) + >>> a[d3] = 1 + >>> a + array([[[1, 0], + [0, 0]], + [[0, 0], + [0, 1]]]) + + """ + idx = np.arange(n) + return (idx,) * ndim + + +def _diag_indices_from(arr): + return (arr,) + + +@array_function_dispatch(_diag_indices_from) +def diag_indices_from(arr): + """ + Return the indices to access the main diagonal of an n-dimensional array. + + See `diag_indices` for full details. + + Parameters + ---------- + arr : array, at least 2-D + + See Also + -------- + diag_indices + + Examples + -------- + >>> import numpy as np + + Create a 4 by 4 array. + + >>> a = np.arange(16).reshape(4, 4) + >>> a + array([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11], + [12, 13, 14, 15]]) + + Get the indices of the diagonal elements. + + >>> di = np.diag_indices_from(a) + >>> di + (array([0, 1, 2, 3]), array([0, 1, 2, 3])) + + >>> a[di] + array([ 0, 5, 10, 15]) + + This is simply syntactic sugar for diag_indices. + + >>> np.diag_indices(a.shape[0]) + (array([0, 1, 2, 3]), array([0, 1, 2, 3])) + + """ + + if not arr.ndim >= 2: + raise ValueError("input array must be at least 2-d") + # For more than d=2, the strided formula is only valid for arrays with + # all dimensions equal, so we check first. + if not np.all(diff(arr.shape) == 0): + raise ValueError("All dimensions of input must be of equal length") + + return diag_indices(arr.shape[0], arr.ndim) diff --git a/local_packages/numpy/lib/_index_tricks_impl.pyi b/local_packages/numpy/lib/_index_tricks_impl.pyi new file mode 100644 index 0000000..ff316f5 --- /dev/null +++ b/local_packages/numpy/lib/_index_tricks_impl.pyi @@ -0,0 +1,267 @@ +from _typeshed import Incomplete, SupportsLenAndGetItem +from collections.abc import Sequence +from typing import ( + Any, + ClassVar, + Final, + Generic, + Literal as L, + Self, + SupportsIndex, + final, + overload, +) +from typing_extensions import TypeVar + +import numpy as np +from numpy import _CastingKind +from numpy._core.multiarray import ravel_multi_index, unravel_index +from numpy._typing import ( + ArrayLike, + DTypeLike, + NDArray, + _AnyShape, + _ArrayLike, + _DTypeLike, + _FiniteNestedSequence, + _HasDType, + _NestedSequence, + _SupportsArray, +) + +__all__ = [ # noqa: RUF022 + "ravel_multi_index", + "unravel_index", + "mgrid", + "ogrid", + "r_", + "c_", + "s_", + "index_exp", + "ix_", + "ndenumerate", + "ndindex", + "fill_diagonal", + "diag_indices", + "diag_indices_from", +] + +### + +_T = TypeVar("_T") +_TupleT = TypeVar("_TupleT", bound=tuple[Any, ...]) +_ArrayT = TypeVar("_ArrayT", bound=NDArray[Any]) +_DTypeT = TypeVar("_DTypeT", bound=np.dtype) +_ScalarT = TypeVar("_ScalarT", bound=np.generic) +_ScalarT_co = TypeVar("_ScalarT_co", bound=np.generic, default=Any, covariant=True) +_BoolT_co = TypeVar("_BoolT_co", bound=bool, default=bool, covariant=True) + +_AxisT_co = TypeVar("_AxisT_co", bound=int, default=L[0], covariant=True) +_MatrixT_co = TypeVar("_MatrixT_co", bound=bool, default=L[False], covariant=True) +_NDMinT_co = TypeVar("_NDMinT_co", bound=int, default=L[1], covariant=True) +_Trans1DT_co = TypeVar("_Trans1DT_co", bound=int, default=L[-1], covariant=True) + +### + +class ndenumerate(Generic[_ScalarT_co]): + @overload + def __init__(self: ndenumerate[_ScalarT], arr: _FiniteNestedSequence[_SupportsArray[np.dtype[_ScalarT]]]) -> None: ... + @overload + def __init__(self: ndenumerate[np.str_], arr: str | _NestedSequence[str]) -> None: ... + @overload + def __init__(self: ndenumerate[np.bytes_], arr: bytes | _NestedSequence[bytes]) -> None: ... + @overload + def __init__(self: ndenumerate[np.bool], arr: bool | _NestedSequence[bool]) -> None: ... + @overload + def __init__(self: ndenumerate[np.intp], arr: int | _NestedSequence[int]) -> None: ... + @overload + def __init__(self: ndenumerate[np.float64], arr: float | _NestedSequence[float]) -> None: ... + @overload + def __init__(self: ndenumerate[np.complex128], arr: complex | _NestedSequence[complex]) -> None: ... + @overload + def __init__(self: ndenumerate[Incomplete], arr: object) -> None: ... + + # The first overload is a (semi-)workaround for a mypy bug (tested with v1.10 and v1.11) + @overload + def __next__( + self: ndenumerate[np.bool | np.number | np.flexible | np.datetime64 | np.timedelta64], + /, + ) -> tuple[_AnyShape, _ScalarT_co]: ... + @overload + def __next__(self: ndenumerate[np.object_], /) -> tuple[_AnyShape, Incomplete]: ... + @overload + def __next__(self, /) -> tuple[_AnyShape, _ScalarT_co]: ... + + # + def __iter__(self) -> Self: ... + +class ndindex: + @overload + def __init__(self, shape: tuple[SupportsIndex, ...], /) -> None: ... + @overload + def __init__(self, /, *shape: SupportsIndex) -> None: ... + + # + def __iter__(self) -> Self: ... + def __next__(self) -> _AnyShape: ... + +class nd_grid(Generic[_BoolT_co]): + __slots__ = ("sparse",) + + sparse: _BoolT_co + def __init__(self, sparse: _BoolT_co = ...) -> None: ... # stubdefaulter: ignore[missing-default] + @overload + def __getitem__(self: nd_grid[L[False]], key: slice | Sequence[slice]) -> NDArray[Incomplete]: ... + @overload + def __getitem__(self: nd_grid[L[True]], key: slice | Sequence[slice]) -> tuple[NDArray[Incomplete], ...]: ... + +@final +class MGridClass(nd_grid[L[False]]): + __slots__ = () + + def __init__(self) -> None: ... + +@final +class OGridClass(nd_grid[L[True]]): + __slots__ = () + + def __init__(self) -> None: ... + +class AxisConcatenator(Generic[_AxisT_co, _MatrixT_co, _NDMinT_co, _Trans1DT_co]): + __slots__ = "axis", "matrix", "ndmin", "trans1d" + + makemat: ClassVar[type[np.matrix[tuple[int, int], np.dtype]]] + + axis: _AxisT_co + matrix: _MatrixT_co + ndmin: _NDMinT_co + trans1d: _Trans1DT_co + + # NOTE: mypy does not understand that these default values are the same as the + # TypeVar defaults. Since the workaround would require us to write 16 overloads, + # we ignore the assignment type errors here. + def __init__( + self, + /, + axis: _AxisT_co = 0, # type: ignore[assignment] + matrix: _MatrixT_co = False, # type: ignore[assignment] + ndmin: _NDMinT_co = 1, # type: ignore[assignment] + trans1d: _Trans1DT_co = -1, # type: ignore[assignment] + ) -> None: ... + + # TODO(jorenham): annotate this + def __getitem__(self, key: Incomplete, /) -> Incomplete: ... + def __len__(self, /) -> L[0]: ... + + # Keep in sync with _core.multiarray.concatenate + @staticmethod + @overload + def concatenate( + arrays: _ArrayLike[_ScalarT], + /, + axis: SupportsIndex | None = 0, + out: None = None, + *, + dtype: None = None, + casting: _CastingKind | None = "same_kind", + ) -> NDArray[_ScalarT]: ... + @staticmethod + @overload + def concatenate( + arrays: SupportsLenAndGetItem[ArrayLike], + /, + axis: SupportsIndex | None = 0, + out: None = None, + *, + dtype: _DTypeLike[_ScalarT], + casting: _CastingKind | None = "same_kind", + ) -> NDArray[_ScalarT]: ... + @staticmethod + @overload + def concatenate( + arrays: SupportsLenAndGetItem[ArrayLike], + /, + axis: SupportsIndex | None = 0, + out: None = None, + *, + dtype: DTypeLike | None = None, + casting: _CastingKind | None = "same_kind", + ) -> NDArray[Incomplete]: ... + @staticmethod + @overload + def concatenate( + arrays: SupportsLenAndGetItem[ArrayLike], + /, + axis: SupportsIndex | None = 0, + *, + out: _ArrayT, + dtype: DTypeLike | None = None, + casting: _CastingKind | None = "same_kind", + ) -> _ArrayT: ... + @staticmethod + @overload + def concatenate( + arrays: SupportsLenAndGetItem[ArrayLike], + /, + axis: SupportsIndex | None, + out: _ArrayT, + *, + dtype: DTypeLike | None = None, + casting: _CastingKind | None = "same_kind", + ) -> _ArrayT: ... + +@final +class RClass(AxisConcatenator[L[0], L[False], L[1], L[-1]]): + __slots__ = () + + def __init__(self, /) -> None: ... + +@final +class CClass(AxisConcatenator[L[-1], L[False], L[2], L[0]]): + __slots__ = () + + def __init__(self, /) -> None: ... + +class IndexExpression(Generic[_BoolT_co]): + __slots__ = ("maketuple",) + + maketuple: _BoolT_co + def __init__(self, maketuple: _BoolT_co) -> None: ... + @overload + def __getitem__(self, item: _TupleT) -> _TupleT: ... + @overload + def __getitem__(self: IndexExpression[L[True]], item: _T) -> tuple[_T]: ... + @overload + def __getitem__(self: IndexExpression[L[False]], item: _T) -> _T: ... + +@overload +def ix_(*args: _FiniteNestedSequence[_HasDType[_DTypeT]]) -> tuple[np.ndarray[_AnyShape, _DTypeT], ...]: ... +@overload +def ix_(*args: str | _NestedSequence[str]) -> tuple[NDArray[np.str_], ...]: ... +@overload +def ix_(*args: bytes | _NestedSequence[bytes]) -> tuple[NDArray[np.bytes_], ...]: ... +@overload +def ix_(*args: bool | _NestedSequence[bool]) -> tuple[NDArray[np.bool], ...]: ... +@overload +def ix_(*args: int | _NestedSequence[int]) -> tuple[NDArray[np.intp], ...]: ... +@overload +def ix_(*args: float | _NestedSequence[float]) -> tuple[NDArray[np.float64], ...]: ... +@overload +def ix_(*args: complex | _NestedSequence[complex]) -> tuple[NDArray[np.complex128], ...]: ... + +# +def fill_diagonal(a: NDArray[Any], val: object, wrap: bool = False) -> None: ... + +# +def diag_indices(n: int, ndim: int = 2) -> tuple[NDArray[np.intp], ...]: ... +def diag_indices_from(arr: ArrayLike) -> tuple[NDArray[np.intp], ...]: ... + +# +mgrid: Final[MGridClass] = ... +ogrid: Final[OGridClass] = ... + +r_: Final[RClass] = ... +c_: Final[CClass] = ... + +index_exp: Final[IndexExpression[L[True]]] = ... +s_: Final[IndexExpression[L[False]]] = ... diff --git a/local_packages/numpy/lib/_iotools.py b/local_packages/numpy/lib/_iotools.py new file mode 100644 index 0000000..3586b41 --- /dev/null +++ b/local_packages/numpy/lib/_iotools.py @@ -0,0 +1,900 @@ +"""A collection of functions designed to help I/O with ascii files. + +""" +__docformat__ = "restructuredtext en" + +import itertools + +import numpy as np +import numpy._core.numeric as nx +from numpy._utils import asbytes, asunicode + + +def _decode_line(line, encoding=None): + """Decode bytes from binary input streams. + + Defaults to decoding from 'latin1'. + + Parameters + ---------- + line : str or bytes + Line to be decoded. + encoding : str + Encoding used to decode `line`. + + Returns + ------- + decoded_line : str + + """ + if type(line) is bytes: + if encoding is None: + encoding = "latin1" + line = line.decode(encoding) + + return line + + +def _is_string_like(obj): + """ + Check whether obj behaves like a string. + """ + try: + obj + '' + except (TypeError, ValueError): + return False + return True + + +def _is_bytes_like(obj): + """ + Check whether obj behaves like a bytes object. + """ + try: + obj + b'' + except (TypeError, ValueError): + return False + return True + + +def has_nested_fields(ndtype): + """ + Returns whether one or several fields of a dtype are nested. + + Parameters + ---------- + ndtype : dtype + Data-type of a structured array. + + Raises + ------ + AttributeError + If `ndtype` does not have a `names` attribute. + + Examples + -------- + >>> import numpy as np + >>> dt = np.dtype([('name', 'S4'), ('x', float), ('y', float)]) + >>> np.lib._iotools.has_nested_fields(dt) + False + + """ + return any(ndtype[name].names is not None for name in ndtype.names or ()) + + +def flatten_dtype(ndtype, flatten_base=False): + """ + Unpack a structured data-type by collapsing nested fields and/or fields + with a shape. + + Note that the field names are lost. + + Parameters + ---------- + ndtype : dtype + The datatype to collapse + flatten_base : bool, optional + If True, transform a field with a shape into several fields. Default is + False. + + Examples + -------- + >>> import numpy as np + >>> dt = np.dtype([('name', 'S4'), ('x', float), ('y', float), + ... ('block', int, (2, 3))]) + >>> np.lib._iotools.flatten_dtype(dt) + [dtype('S4'), dtype('float64'), dtype('float64'), dtype('int64')] + >>> np.lib._iotools.flatten_dtype(dt, flatten_base=True) + [dtype('S4'), + dtype('float64'), + dtype('float64'), + dtype('int64'), + dtype('int64'), + dtype('int64'), + dtype('int64'), + dtype('int64'), + dtype('int64')] + + """ + names = ndtype.names + if names is None: + if flatten_base: + return [ndtype.base] * int(np.prod(ndtype.shape)) + return [ndtype.base] + else: + types = [] + for field in names: + info = ndtype.fields[field] + flat_dt = flatten_dtype(info[0], flatten_base) + types.extend(flat_dt) + return types + + +class LineSplitter: + """ + Object to split a string at a given delimiter or at given places. + + Parameters + ---------- + delimiter : str, int, or sequence of ints, optional + If a string, character used to delimit consecutive fields. + If an integer or a sequence of integers, width(s) of each field. + comments : str, optional + Character used to mark the beginning of a comment. Default is '#'. + autostrip : bool, optional + Whether to strip each individual field. Default is True. + + """ + + def autostrip(self, method): + """ + Wrapper to strip each member of the output of `method`. + + Parameters + ---------- + method : function + Function that takes a single argument and returns a sequence of + strings. + + Returns + ------- + wrapped : function + The result of wrapping `method`. `wrapped` takes a single input + argument and returns a list of strings that are stripped of + white-space. + + """ + return lambda input: [_.strip() for _ in method(input)] + + def __init__(self, delimiter=None, comments='#', autostrip=True, + encoding=None): + delimiter = _decode_line(delimiter) + comments = _decode_line(comments) + + self.comments = comments + + # Delimiter is a character + if (delimiter is None) or isinstance(delimiter, str): + delimiter = delimiter or None + _handyman = self._delimited_splitter + # Delimiter is a list of field widths + elif hasattr(delimiter, '__iter__'): + _handyman = self._variablewidth_splitter + idx = np.cumsum([0] + list(delimiter)) + delimiter = [slice(i, j) for (i, j) in itertools.pairwise(idx)] + # Delimiter is a single integer + elif int(delimiter): + (_handyman, delimiter) = ( + self._fixedwidth_splitter, int(delimiter)) + else: + (_handyman, delimiter) = (self._delimited_splitter, None) + self.delimiter = delimiter + if autostrip: + self._handyman = self.autostrip(_handyman) + else: + self._handyman = _handyman + self.encoding = encoding + + def _delimited_splitter(self, line): + """Chop off comments, strip, and split at delimiter. """ + if self.comments is not None: + line = line.split(self.comments)[0] + line = line.strip(" \r\n") + if not line: + return [] + return line.split(self.delimiter) + + def _fixedwidth_splitter(self, line): + if self.comments is not None: + line = line.split(self.comments)[0] + line = line.strip("\r\n") + if not line: + return [] + fixed = self.delimiter + slices = [slice(i, i + fixed) for i in range(0, len(line), fixed)] + return [line[s] for s in slices] + + def _variablewidth_splitter(self, line): + if self.comments is not None: + line = line.split(self.comments)[0] + if not line: + return [] + slices = self.delimiter + return [line[s] for s in slices] + + def __call__(self, line): + return self._handyman(_decode_line(line, self.encoding)) + + +class NameValidator: + """ + Object to validate a list of strings to use as field names. + + The strings are stripped of any non alphanumeric character, and spaces + are replaced by '_'. During instantiation, the user can define a list + of names to exclude, as well as a list of invalid characters. Names in + the exclusion list are appended a '_' character. + + Once an instance has been created, it can be called with a list of + names, and a list of valid names will be created. The `__call__` + method accepts an optional keyword "default" that sets the default name + in case of ambiguity. By default this is 'f', so that names will + default to `f0`, `f1`, etc. + + Parameters + ---------- + excludelist : sequence, optional + A list of names to exclude. This list is appended to the default + list ['return', 'file', 'print']. Excluded names are appended an + underscore: for example, `file` becomes `file_` if supplied. + deletechars : str, optional + A string combining invalid characters that must be deleted from the + names. + case_sensitive : {True, False, 'upper', 'lower'}, optional + * If True, field names are case-sensitive. + * If False or 'upper', field names are converted to upper case. + * If 'lower', field names are converted to lower case. + + The default value is True. + replace_space : '_', optional + Character(s) used in replacement of white spaces. + + Notes + ----- + Calling an instance of `NameValidator` is the same as calling its + method `validate`. + + Examples + -------- + >>> import numpy as np + >>> validator = np.lib._iotools.NameValidator() + >>> validator(['file', 'field2', 'with space', 'CaSe']) + ('file_', 'field2', 'with_space', 'CaSe') + + >>> validator = np.lib._iotools.NameValidator(excludelist=['excl'], + ... deletechars='q', + ... case_sensitive=False) + >>> validator(['excl', 'field2', 'no_q', 'with space', 'CaSe']) + ('EXCL', 'FIELD2', 'NO_Q', 'WITH_SPACE', 'CASE') + + """ + + defaultexcludelist = 'return', 'file', 'print' + defaultdeletechars = frozenset(r"""~!@#$%^&*()-=+~\|]}[{';: /?.>,<""") + + def __init__(self, excludelist=None, deletechars=None, + case_sensitive=None, replace_space='_'): + # Process the exclusion list .. + if excludelist is None: + excludelist = [] + excludelist.extend(self.defaultexcludelist) + self.excludelist = excludelist + # Process the list of characters to delete + if deletechars is None: + delete = set(self.defaultdeletechars) + else: + delete = set(deletechars) + delete.add('"') + self.deletechars = delete + # Process the case option ..... + if (case_sensitive is None) or (case_sensitive is True): + self.case_converter = lambda x: x + elif (case_sensitive is False) or case_sensitive.startswith('u'): + self.case_converter = lambda x: x.upper() + elif case_sensitive.startswith('l'): + self.case_converter = lambda x: x.lower() + else: + msg = f'unrecognized case_sensitive value {case_sensitive}.' + raise ValueError(msg) + + self.replace_space = replace_space + + def validate(self, names, defaultfmt="f%i", nbfields=None): + """ + Validate a list of strings as field names for a structured array. + + Parameters + ---------- + names : sequence of str + Strings to be validated. + defaultfmt : str, optional + Default format string, used if validating a given string + reduces its length to zero. + nbfields : integer, optional + Final number of validated names, used to expand or shrink the + initial list of names. + + Returns + ------- + validatednames : list of str + The list of validated field names. + + Notes + ----- + A `NameValidator` instance can be called directly, which is the + same as calling `validate`. For examples, see `NameValidator`. + + """ + # Initial checks .............. + if (names is None): + if (nbfields is None): + return None + names = [] + if isinstance(names, str): + names = [names, ] + if nbfields is not None: + nbnames = len(names) + if (nbnames < nbfields): + names = list(names) + [''] * (nbfields - nbnames) + elif (nbnames > nbfields): + names = names[:nbfields] + # Set some shortcuts ........... + deletechars = self.deletechars + excludelist = self.excludelist + case_converter = self.case_converter + replace_space = self.replace_space + # Initializes some variables ... + validatednames = [] + seen = {} + nbempty = 0 + + for item in names: + item = case_converter(item).strip() + if replace_space: + item = item.replace(' ', replace_space) + item = ''.join([c for c in item if c not in deletechars]) + if item == '': + item = defaultfmt % nbempty + while item in names: + nbempty += 1 + item = defaultfmt % nbempty + nbempty += 1 + elif item in excludelist: + item += '_' + cnt = seen.get(item, 0) + if cnt > 0: + validatednames.append(item + '_%d' % cnt) + else: + validatednames.append(item) + seen[item] = cnt + 1 + return tuple(validatednames) + + def __call__(self, names, defaultfmt="f%i", nbfields=None): + return self.validate(names, defaultfmt=defaultfmt, nbfields=nbfields) + + +def str2bool(value): + """ + Tries to transform a string supposed to represent a boolean to a boolean. + + Parameters + ---------- + value : str + The string that is transformed to a boolean. + + Returns + ------- + boolval : bool + The boolean representation of `value`. + + Raises + ------ + ValueError + If the string is not 'True' or 'False' (case independent) + + Examples + -------- + >>> import numpy as np + >>> np.lib._iotools.str2bool('TRUE') + True + >>> np.lib._iotools.str2bool('false') + False + + """ + value = value.upper() + if value == 'TRUE': + return True + elif value == 'FALSE': + return False + else: + raise ValueError("Invalid boolean") + + +class ConverterError(Exception): + """ + Exception raised when an error occurs in a converter for string values. + + """ + pass + + +class ConverterLockError(ConverterError): + """ + Exception raised when an attempt is made to upgrade a locked converter. + + """ + pass + + +class ConversionWarning(UserWarning): + """ + Warning issued when a string converter has a problem. + + Notes + ----- + In `genfromtxt` a `ConversionWarning` is issued if raising exceptions + is explicitly suppressed with the "invalid_raise" keyword. + + """ + pass + + +class StringConverter: + """ + Factory class for function transforming a string into another object + (int, float). + + After initialization, an instance can be called to transform a string + into another object. If the string is recognized as representing a + missing value, a default value is returned. + + Attributes + ---------- + func : function + Function used for the conversion. + default : any + Default value to return when the input corresponds to a missing + value. + type : type + Type of the output. + _status : int + Integer representing the order of the conversion. + _mapper : sequence of tuples + Sequence of tuples (dtype, function, default value) to evaluate in + order. + _locked : bool + Holds `locked` parameter. + + Parameters + ---------- + dtype_or_func : {None, dtype, function}, optional + If a `dtype`, specifies the input data type, used to define a basic + function and a default value for missing data. For example, when + `dtype` is float, the `func` attribute is set to `float` and the + default value to `np.nan`. If a function, this function is used to + convert a string to another object. In this case, it is recommended + to give an associated default value as input. + default : any, optional + Value to return by default, that is, when the string to be + converted is flagged as missing. If not given, `StringConverter` + tries to supply a reasonable default value. + missing_values : {None, sequence of str}, optional + ``None`` or sequence of strings indicating a missing value. If ``None`` + then missing values are indicated by empty entries. The default is + ``None``. + locked : bool, optional + Whether the StringConverter should be locked to prevent automatic + upgrade or not. Default is False. + + """ + _mapper = [(nx.bool, str2bool, False), + (nx.int_, int, -1),] + + # On 32-bit systems, we need to make sure that we explicitly include + # nx.int64 since ns.int_ is nx.int32. + if nx.dtype(nx.int_).itemsize < nx.dtype(nx.int64).itemsize: + _mapper.append((nx.int64, int, -1)) + + _mapper.extend([(nx.float64, float, nx.nan), + (nx.complex128, complex, nx.nan + 0j), + (nx.longdouble, nx.longdouble, nx.nan), + # If a non-default dtype is passed, fall back to generic + # ones (should only be used for the converter) + (nx.integer, int, -1), + (nx.floating, float, nx.nan), + (nx.complexfloating, complex, nx.nan + 0j), + # Last, try with the string types (must be last, because + # `_mapper[-1]` is used as default in some cases) + (nx.str_, asunicode, '???'), + (nx.bytes_, asbytes, '???'), + ]) + + @classmethod + def _getdtype(cls, val): + """Returns the dtype of the input variable.""" + return np.array(val).dtype + + @classmethod + def _getsubdtype(cls, val): + """Returns the type of the dtype of the input variable.""" + return np.array(val).dtype.type + + @classmethod + def _dtypeortype(cls, dtype): + """Returns dtype for datetime64 and type of dtype otherwise.""" + + # This is a bit annoying. We want to return the "general" type in most + # cases (ie. "string" rather than "S10"), but we want to return the + # specific type for datetime64 (ie. "datetime64[us]" rather than + # "datetime64"). + if dtype.type == np.datetime64: + return dtype + return dtype.type + + @classmethod + def upgrade_mapper(cls, func, default=None): + """ + Upgrade the mapper of a StringConverter by adding a new function and + its corresponding default. + + The input function (or sequence of functions) and its associated + default value (if any) is inserted in penultimate position of the + mapper. The corresponding type is estimated from the dtype of the + default value. + + Parameters + ---------- + func : var + Function, or sequence of functions + + Examples + -------- + >>> import dateutil.parser + >>> import datetime + >>> dateparser = dateutil.parser.parse + >>> defaultdate = datetime.date(2000, 1, 1) + >>> StringConverter.upgrade_mapper(dateparser, default=defaultdate) + """ + # Func is a single functions + if callable(func): + cls._mapper.insert(-1, (cls._getsubdtype(default), func, default)) + return + elif hasattr(func, '__iter__'): + if isinstance(func[0], (tuple, list)): + for _ in func: + cls._mapper.insert(-1, _) + return + if default is None: + default = [None] * len(func) + else: + default = list(default) + default.append([None] * (len(func) - len(default))) + for fct, dft in zip(func, default): + cls._mapper.insert(-1, (cls._getsubdtype(dft), fct, dft)) + + @classmethod + def _find_map_entry(cls, dtype): + # if a converter for the specific dtype is available use that + for i, (deftype, func, default_def) in enumerate(cls._mapper): + if dtype.type == deftype: + return i, (deftype, func, default_def) + + # otherwise find an inexact match + for i, (deftype, func, default_def) in enumerate(cls._mapper): + if np.issubdtype(dtype.type, deftype): + return i, (deftype, func, default_def) + + raise LookupError + + def __init__(self, dtype_or_func=None, default=None, missing_values=None, + locked=False): + # Defines a lock for upgrade + self._locked = bool(locked) + # No input dtype: minimal initialization + if dtype_or_func is None: + self.func = str2bool + self._status = 0 + self.default = default or False + dtype = np.dtype('bool') + else: + # Is the input a np.dtype ? + try: + self.func = None + dtype = np.dtype(dtype_or_func) + except TypeError: + # dtype_or_func must be a function, then + if not callable(dtype_or_func): + errmsg = ("The input argument `dtype` is neither a" + " function nor a dtype (got '%s' instead)") + raise TypeError(errmsg % type(dtype_or_func)) + # Set the function + self.func = dtype_or_func + # If we don't have a default, try to guess it or set it to + # None + if default is None: + try: + default = self.func('0') + except ValueError: + default = None + dtype = self._getdtype(default) + + # find the best match in our mapper + try: + self._status, (_, func, default_def) = self._find_map_entry(dtype) + except LookupError: + # no match + self.default = default + _, func, _ = self._mapper[-1] + self._status = 0 + else: + # use the found default only if we did not already have one + if default is None: + self.default = default_def + else: + self.default = default + + # If the input was a dtype, set the function to the last we saw + if self.func is None: + self.func = func + + # If the status is 1 (int), change the function to + # something more robust. + if self.func == self._mapper[1][1]: + if issubclass(dtype.type, np.uint64): + self.func = np.uint64 + elif issubclass(dtype.type, np.int64): + self.func = np.int64 + else: + self.func = lambda x: int(float(x)) + # Store the list of strings corresponding to missing values. + if missing_values is None: + self.missing_values = {''} + else: + if isinstance(missing_values, str): + missing_values = missing_values.split(",") + self.missing_values = set(list(missing_values) + ['']) + + self._callingfunction = self._strict_call + self.type = self._dtypeortype(dtype) + self._checked = False + self._initial_default = default + + def _loose_call(self, value): + try: + return self.func(value) + except ValueError: + return self.default + + def _strict_call(self, value): + try: + + # We check if we can convert the value using the current function + new_value = self.func(value) + + # In addition to having to check whether func can convert the + # value, we also have to make sure that we don't get overflow + # errors for integers. + if self.func is int: + try: + np.array(value, dtype=self.type) + except OverflowError: + raise ValueError + + # We're still here so we can now return the new value + return new_value + + except ValueError: + if value.strip() in self.missing_values: + if not self._status: + self._checked = False + return self.default + raise ValueError(f"Cannot convert string '{value}'") + + def __call__(self, value): + return self._callingfunction(value) + + def _do_upgrade(self): + # Raise an exception if we locked the converter... + if self._locked: + errmsg = "Converter is locked and cannot be upgraded" + raise ConverterLockError(errmsg) + _statusmax = len(self._mapper) + # Complains if we try to upgrade by the maximum + _status = self._status + if _status == _statusmax: + errmsg = "Could not find a valid conversion function" + raise ConverterError(errmsg) + elif _status < _statusmax - 1: + _status += 1 + self.type, self.func, default = self._mapper[_status] + self._status = _status + if self._initial_default is not None: + self.default = self._initial_default + else: + self.default = default + + def upgrade(self, value): + """ + Find the best converter for a given string, and return the result. + + The supplied string `value` is converted by testing different + converters in order. First the `func` method of the + `StringConverter` instance is tried, if this fails other available + converters are tried. The order in which these other converters + are tried is determined by the `_status` attribute of the instance. + + Parameters + ---------- + value : str + The string to convert. + + Returns + ------- + out : any + The result of converting `value` with the appropriate converter. + + """ + self._checked = True + try: + return self._strict_call(value) + except ValueError: + self._do_upgrade() + return self.upgrade(value) + + def iterupgrade(self, value): + self._checked = True + if not hasattr(value, '__iter__'): + value = (value,) + _strict_call = self._strict_call + try: + for _m in value: + _strict_call(_m) + except ValueError: + self._do_upgrade() + self.iterupgrade(value) + + def update(self, func, default=None, testing_value=None, + missing_values='', locked=False): + """ + Set StringConverter attributes directly. + + Parameters + ---------- + func : function + Conversion function. + default : any, optional + Value to return by default, that is, when the string to be + converted is flagged as missing. If not given, + `StringConverter` tries to supply a reasonable default value. + testing_value : str, optional + A string representing a standard input value of the converter. + This string is used to help defining a reasonable default + value. + missing_values : {sequence of str, None}, optional + Sequence of strings indicating a missing value. If ``None``, then + the existing `missing_values` are cleared. The default is ``''``. + locked : bool, optional + Whether the StringConverter should be locked to prevent + automatic upgrade or not. Default is False. + + Notes + ----- + `update` takes the same parameters as the constructor of + `StringConverter`, except that `func` does not accept a `dtype` + whereas `dtype_or_func` in the constructor does. + + """ + self.func = func + self._locked = locked + + # Don't reset the default to None if we can avoid it + if default is not None: + self.default = default + self.type = self._dtypeortype(self._getdtype(default)) + else: + try: + tester = func(testing_value or '1') + except (TypeError, ValueError): + tester = None + self.type = self._dtypeortype(self._getdtype(tester)) + + # Add the missing values to the existing set or clear it. + if missing_values is None: + # Clear all missing values even though the ctor initializes it to + # set(['']) when the argument is None. + self.missing_values = set() + else: + if not np.iterable(missing_values): + missing_values = [missing_values] + if not all(isinstance(v, str) for v in missing_values): + raise TypeError("missing_values must be strings or unicode") + self.missing_values.update(missing_values) + + +def easy_dtype(ndtype, names=None, defaultfmt="f%i", **validationargs): + """ + Convenience function to create a `np.dtype` object. + + The function processes the input `dtype` and matches it with the given + names. + + Parameters + ---------- + ndtype : var + Definition of the dtype. Can be any string or dictionary recognized + by the `np.dtype` function, or a sequence of types. + names : str or sequence, optional + Sequence of strings to use as field names for a structured dtype. + For convenience, `names` can be a string of a comma-separated list + of names. + defaultfmt : str, optional + Format string used to define missing names, such as ``"f%i"`` + (default) or ``"fields_%02i"``. + validationargs : optional + A series of optional arguments used to initialize a + `NameValidator`. + + Examples + -------- + >>> import numpy as np + >>> np.lib._iotools.easy_dtype(float) + dtype('float64') + >>> np.lib._iotools.easy_dtype("i4, f8") + dtype([('f0', '>> np.lib._iotools.easy_dtype("i4, f8", defaultfmt="field_%03i") + dtype([('field_000', '>> np.lib._iotools.easy_dtype((int, float, float), names="a,b,c") + dtype([('a', '>> np.lib._iotools.easy_dtype(float, names="a,b,c") + dtype([('a', ' None: ... + def __call__(self, /, line: str | bytes) -> list[str]: ... + def autostrip(self, /, method: Callable[[_T], Iterable[str]]) -> Callable[[_T], list[str]]: ... + +class NameValidator: + defaultexcludelist: ClassVar[Sequence[str]] = ... + defaultdeletechars: ClassVar[frozenset[str]] = ... + excludelist: list[str] + deletechars: set[str] + case_converter: Callable[[str], str] + replace_space: str + + def __init__( + self, + /, + excludelist: Iterable[str] | None = None, + deletechars: Iterable[str] | None = None, + case_sensitive: Literal["upper", "lower"] | bool | None = None, + replace_space: str = "_", + ) -> None: ... + def __call__(self, /, names: Iterable[str], defaultfmt: str = "f%i", nbfields: int | None = None) -> tuple[str, ...]: ... + def validate(self, /, names: Iterable[str], defaultfmt: str = "f%i", nbfields: int | None = None) -> tuple[str, ...]: ... + +class StringConverter: + func: Callable[[str], Any] | None + default: Any + missing_values: set[str] + type: np.dtype[np.datetime64] | np.generic + + def __init__( + self, + /, + dtype_or_func: npt.DTypeLike | None = None, + default: None = None, + missing_values: Iterable[str] | None = None, + locked: bool = False, + ) -> None: ... + def update( + self, + /, + func: Callable[[str], Any], + default: object | None = None, + testing_value: str | None = None, + missing_values: str = "", + locked: bool = False, + ) -> None: ... + # + def __call__(self, /, value: str) -> Any: ... + def upgrade(self, /, value: str) -> Any: ... + def iterupgrade(self, /, value: Iterable[str] | str) -> None: ... + + # + @classmethod + def upgrade_mapper(cls, func: Callable[[str], Any], default: object | None = None) -> None: ... + +def _decode_line(line: str | bytes, encoding: str | None = None) -> str: ... +def _is_string_like(obj: object) -> bool: ... +def _is_bytes_like(obj: object) -> bool: ... +def has_nested_fields(ndtype: np.dtype[np.void]) -> bool: ... +def flatten_dtype(ndtype: np.dtype[np.void], flatten_base: bool = False) -> type[np.dtype]: ... +@overload +def str2bool(value: Literal["false", "False", "FALSE"]) -> Literal[False]: ... +@overload +def str2bool(value: Literal["true", "True", "TRUE"]) -> Literal[True]: ... +def easy_dtype( + ndtype: str | Sequence[_DTypeLikeNested], + names: str | Sequence[str] | None = None, + defaultfmt: str = "f%i", + **validationargs: Unpack[_NameValidatorKwargs], +) -> np.dtype[np.void]: ... diff --git a/local_packages/numpy/lib/_nanfunctions_impl.py b/local_packages/numpy/lib/_nanfunctions_impl.py new file mode 100644 index 0000000..d5a01a3 --- /dev/null +++ b/local_packages/numpy/lib/_nanfunctions_impl.py @@ -0,0 +1,2006 @@ +""" +Functions that ignore NaN. + +Functions +--------- + +- `nanmin` -- minimum non-NaN value +- `nanmax` -- maximum non-NaN value +- `nanargmin` -- index of minimum non-NaN value +- `nanargmax` -- index of maximum non-NaN value +- `nansum` -- sum of non-NaN values +- `nanprod` -- product of non-NaN values +- `nancumsum` -- cumulative sum of non-NaN values +- `nancumprod` -- cumulative product of non-NaN values +- `nanmean` -- mean of non-NaN values +- `nanvar` -- variance of non-NaN values +- `nanstd` -- standard deviation of non-NaN values +- `nanmedian` -- median of non-NaN values +- `nanquantile` -- qth quantile of non-NaN values +- `nanpercentile` -- qth percentile of non-NaN values + +""" +import functools +import warnings + +import numpy as np +import numpy._core.numeric as _nx +from numpy._core import overrides +from numpy.lib import _function_base_impl as fnb +from numpy.lib._function_base_impl import _weights_are_valid + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + + +__all__ = [ + 'nansum', 'nanmax', 'nanmin', 'nanargmax', 'nanargmin', 'nanmean', + 'nanmedian', 'nanpercentile', 'nanvar', 'nanstd', 'nanprod', + 'nancumsum', 'nancumprod', 'nanquantile' + ] + + +def _nan_mask(a, out=None): + """ + Parameters + ---------- + a : array-like + Input array with at least 1 dimension. + out : ndarray, optional + Alternate output array in which to place the result. The default + is ``None``; if provided, it must have the same shape as the + expected output and will prevent the allocation of a new array. + + Returns + ------- + y : bool ndarray or True + A bool array where ``np.nan`` positions are marked with ``False`` + and other positions are marked with ``True``. If the type of ``a`` + is such that it can't possibly contain ``np.nan``, returns ``True``. + """ + # we assume that a is an array for this private function + + if a.dtype.kind not in 'fc': + return True + + y = np.isnan(a, out=out) + y = np.invert(y, out=y) + return y + +def _replace_nan(a, val): + """ + If `a` is of inexact type, make a copy of `a`, replace NaNs with + the `val` value, and return the copy together with a boolean mask + marking the locations where NaNs were present. If `a` is not of + inexact type, do nothing and return `a` together with a mask of None. + + Note that scalars will end up as array scalars, which is important + for using the result as the value of the out argument in some + operations. + + Parameters + ---------- + a : array-like + Input array. + val : float + NaN values are set to val before doing the operation. + + Returns + ------- + y : ndarray + If `a` is of inexact type, return a copy of `a` with the NaNs + replaced by the fill value, otherwise return `a`. + mask: {bool, None} + If `a` is of inexact type, return a boolean mask marking locations of + NaNs, otherwise return None. + + """ + a = np.asanyarray(a) + + if a.dtype == np.object_: + # object arrays do not support `isnan` (gh-9009), so make a guess + mask = np.not_equal(a, a, dtype=bool) + elif issubclass(a.dtype.type, np.inexact): + mask = np.isnan(a) + else: + mask = None + + if mask is not None: + a = np.array(a, subok=True, copy=True) + np.copyto(a, val, where=mask) + + return a, mask + + +def _copyto(a, val, mask): + """ + Replace values in `a` with NaN where `mask` is True. This differs from + copyto in that it will deal with the case where `a` is a numpy scalar. + + Parameters + ---------- + a : ndarray or numpy scalar + Array or numpy scalar some of whose values are to be replaced + by val. + val : numpy scalar + Value used a replacement. + mask : ndarray, scalar + Boolean array. Where True the corresponding element of `a` is + replaced by `val`. Broadcasts. + + Returns + ------- + res : ndarray, scalar + Array with elements replaced or scalar `val`. + + """ + if isinstance(a, np.ndarray): + np.copyto(a, val, where=mask, casting='unsafe') + else: + a = a.dtype.type(val) + return a + + +def _remove_nan_1d(arr1d, second_arr1d=None, overwrite_input=False): + """ + Equivalent to arr1d[~arr1d.isnan()], but in a different order + + Presumably faster as it incurs fewer copies + + Parameters + ---------- + arr1d : ndarray + Array to remove nans from + second_arr1d : ndarray or None + A second array which will have the same positions removed as arr1d. + overwrite_input : bool + True if `arr1d` can be modified in place + + Returns + ------- + res : ndarray + Array with nan elements removed + second_res : ndarray or None + Second array with nan element positions of first array removed. + overwrite_input : bool + True if `res` can be modified in place, given the constraint on the + input + """ + if arr1d.dtype == object: + # object arrays do not support `isnan` (gh-9009), so make a guess + c = np.not_equal(arr1d, arr1d, dtype=bool) + else: + c = np.isnan(arr1d) + + s = np.nonzero(c)[0] + if s.size == arr1d.size: + warnings.warn("All-NaN slice encountered", RuntimeWarning, + stacklevel=6) + if second_arr1d is None: + return arr1d[:0], None, True + else: + return arr1d[:0], second_arr1d[:0], True + elif s.size == 0: + return arr1d, second_arr1d, overwrite_input + else: + if not overwrite_input: + arr1d = arr1d.copy() + # select non-nans at end of array + enonan = arr1d[-s.size:][~c[-s.size:]] + # fill nans in beginning of array with non-nans of end + arr1d[s[:enonan.size]] = enonan + + if second_arr1d is None: + return arr1d[:-s.size], None, True + else: + if not overwrite_input: + second_arr1d = second_arr1d.copy() + enonan = second_arr1d[-s.size:][~c[-s.size:]] + second_arr1d[s[:enonan.size]] = enonan + + return arr1d[:-s.size], second_arr1d[:-s.size], True + + +def _divide_by_count(a, b, out=None): + """ + Compute a/b ignoring invalid results. If `a` is an array the division + is done in place. If `a` is a scalar, then its type is preserved in the + output. If out is None, then a is used instead so that the division + is in place. Note that this is only called with `a` an inexact type. + + Parameters + ---------- + a : {ndarray, numpy scalar} + Numerator. Expected to be of inexact type but not checked. + b : {ndarray, numpy scalar} + Denominator. + out : ndarray, optional + Alternate output array in which to place the result. The default + is ``None``; if provided, it must have the same shape as the + expected output, but the type will be cast if necessary. + + Returns + ------- + ret : {ndarray, numpy scalar} + The return value is a/b. If `a` was an ndarray the division is done + in place. If `a` is a numpy scalar, the division preserves its type. + + """ + with np.errstate(invalid='ignore', divide='ignore'): + if isinstance(a, np.ndarray): + if out is None: + return np.divide(a, b, out=a, casting='unsafe') + else: + return np.divide(a, b, out=out, casting='unsafe') + elif out is None: + # Precaution against reduced object arrays + try: + return a.dtype.type(a / b) + except AttributeError: + return a / b + else: + # This is questionable, but currently a numpy scalar can + # be output to a zero dimensional array. + return np.divide(a, b, out=out, casting='unsafe') + + +def _nanmin_dispatcher(a, axis=None, out=None, keepdims=None, + initial=None, where=None): + return (a, out) + + +@array_function_dispatch(_nanmin_dispatcher) +def nanmin(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, + where=np._NoValue): + """ + Return minimum of an array or minimum along an axis, ignoring any NaNs. + When all-NaN slices are encountered a ``RuntimeWarning`` is raised and + Nan is returned for that slice. + + Parameters + ---------- + a : array_like + Array containing numbers whose minimum is desired. If `a` is not an + array, a conversion is attempted. + axis : {int, tuple of int, None}, optional + Axis or axes along which the minimum is computed. The default is to compute + the minimum of the flattened array. + out : ndarray, optional + Alternate output array in which to place the result. The default + is ``None``; if provided, it must have the same shape as the + expected output, but the type will be cast if necessary. See + :ref:`ufuncs-output-type` for more details. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `a`. + + If the value is anything but the default, then + `keepdims` will be passed through to the `min` method + of sub-classes of `ndarray`. If the sub-classes methods + does not implement `keepdims` any exceptions will be raised. + initial : scalar, optional + The maximum value of an output element. Must be present to allow + computation on empty slice. See `~numpy.ufunc.reduce` for details. + + .. versionadded:: 1.22.0 + where : array_like of bool, optional + Elements to compare for the minimum. See `~numpy.ufunc.reduce` + for details. + + .. versionadded:: 1.22.0 + + Returns + ------- + nanmin : ndarray + An array with the same shape as `a`, with the specified axis + removed. If `a` is a 0-d array, or if axis is None, an ndarray + scalar is returned. The same dtype as `a` is returned. + + See Also + -------- + nanmax : + The maximum value of an array along a given axis, ignoring any NaNs. + amin : + The minimum value of an array along a given axis, propagating any NaNs. + fmin : + Element-wise minimum of two arrays, ignoring any NaNs. + minimum : + Element-wise minimum of two arrays, propagating any NaNs. + isnan : + Shows which elements are Not a Number (NaN). + isfinite: + Shows which elements are neither NaN nor infinity. + + amax, fmax, maximum + + Notes + ----- + NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic + (IEEE 754). This means that Not a Number is not equivalent to infinity. + Positive infinity is treated as a very large number and negative + infinity is treated as a very small (i.e. negative) number. + + If the input has a integer type the function is equivalent to np.min. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[1, 2], [3, np.nan]]) + >>> np.nanmin(a) + 1.0 + >>> np.nanmin(a, axis=0) + array([1., 2.]) + >>> np.nanmin(a, axis=1) + array([1., 3.]) + + When positive infinity and negative infinity are present: + + >>> np.nanmin([1, 2, np.nan, np.inf]) + 1.0 + >>> np.nanmin([1, 2, np.nan, -np.inf]) + -inf + + """ + kwargs = {} + if keepdims is not np._NoValue: + kwargs['keepdims'] = keepdims + if initial is not np._NoValue: + kwargs['initial'] = initial + if where is not np._NoValue: + kwargs['where'] = where + + if (type(a) is np.ndarray or type(a) is np.memmap) and a.dtype != np.object_: + # Fast, but not safe for subclasses of ndarray, or object arrays, + # which do not implement isnan (gh-9009), or fmin correctly (gh-8975) + res = np.fmin.reduce(a, axis=axis, out=out, **kwargs) + if np.isnan(res).any(): + warnings.warn("All-NaN slice encountered", RuntimeWarning, + stacklevel=2) + else: + # Slow, but safe for subclasses of ndarray + a, mask = _replace_nan(a, +np.inf) + res = np.amin(a, axis=axis, out=out, **kwargs) + if mask is None: + return res + + # Check for all-NaN axis + kwargs.pop("initial", None) + mask = np.all(mask, axis=axis, **kwargs) + if np.any(mask): + res = _copyto(res, np.nan, mask) + warnings.warn("All-NaN axis encountered", RuntimeWarning, + stacklevel=2) + return res + + +def _nanmax_dispatcher(a, axis=None, out=None, keepdims=None, + initial=None, where=None): + return (a, out) + + +@array_function_dispatch(_nanmax_dispatcher) +def nanmax(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, + where=np._NoValue): + """ + Return the maximum of an array or maximum along an axis, ignoring any + NaNs. When all-NaN slices are encountered a ``RuntimeWarning`` is + raised and NaN is returned for that slice. + + Parameters + ---------- + a : array_like + Array containing numbers whose maximum is desired. If `a` is not an + array, a conversion is attempted. + axis : {int, tuple of int, None}, optional + Axis or axes along which the maximum is computed. The default is to compute + the maximum of the flattened array. + out : ndarray, optional + Alternate output array in which to place the result. The default + is ``None``; if provided, it must have the same shape as the + expected output, but the type will be cast if necessary. See + :ref:`ufuncs-output-type` for more details. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `a`. + If the value is anything but the default, then + `keepdims` will be passed through to the `max` method + of sub-classes of `ndarray`. If the sub-classes methods + does not implement `keepdims` any exceptions will be raised. + initial : scalar, optional + The minimum value of an output element. Must be present to allow + computation on empty slice. See `~numpy.ufunc.reduce` for details. + + .. versionadded:: 1.22.0 + where : array_like of bool, optional + Elements to compare for the maximum. See `~numpy.ufunc.reduce` + for details. + + .. versionadded:: 1.22.0 + + Returns + ------- + nanmax : ndarray + An array with the same shape as `a`, with the specified axis removed. + If `a` is a 0-d array, or if axis is None, an ndarray scalar is + returned. The same dtype as `a` is returned. + + See Also + -------- + nanmin : + The minimum value of an array along a given axis, ignoring any NaNs. + amax : + The maximum value of an array along a given axis, propagating any NaNs. + fmax : + Element-wise maximum of two arrays, ignoring any NaNs. + maximum : + Element-wise maximum of two arrays, propagating any NaNs. + isnan : + Shows which elements are Not a Number (NaN). + isfinite: + Shows which elements are neither NaN nor infinity. + + amin, fmin, minimum + + Notes + ----- + NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic + (IEEE 754). This means that Not a Number is not equivalent to infinity. + Positive infinity is treated as a very large number and negative + infinity is treated as a very small (i.e. negative) number. + + If the input has a integer type the function is equivalent to np.max. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[1, 2], [3, np.nan]]) + >>> np.nanmax(a) + 3.0 + >>> np.nanmax(a, axis=0) + array([3., 2.]) + >>> np.nanmax(a, axis=1) + array([2., 3.]) + + When positive infinity and negative infinity are present: + + >>> np.nanmax([1, 2, np.nan, -np.inf]) + 2.0 + >>> np.nanmax([1, 2, np.nan, np.inf]) + inf + + """ + kwargs = {} + if keepdims is not np._NoValue: + kwargs['keepdims'] = keepdims + if initial is not np._NoValue: + kwargs['initial'] = initial + if where is not np._NoValue: + kwargs['where'] = where + + if (type(a) is np.ndarray or type(a) is np.memmap) and a.dtype != np.object_: + # Fast, but not safe for subclasses of ndarray, or object arrays, + # which do not implement isnan (gh-9009), or fmax correctly (gh-8975) + res = np.fmax.reduce(a, axis=axis, out=out, **kwargs) + if np.isnan(res).any(): + warnings.warn("All-NaN slice encountered", RuntimeWarning, + stacklevel=2) + else: + # Slow, but safe for subclasses of ndarray + a, mask = _replace_nan(a, -np.inf) + res = np.amax(a, axis=axis, out=out, **kwargs) + if mask is None: + return res + + # Check for all-NaN axis + kwargs.pop("initial", None) + mask = np.all(mask, axis=axis, **kwargs) + if np.any(mask): + res = _copyto(res, np.nan, mask) + warnings.warn("All-NaN axis encountered", RuntimeWarning, + stacklevel=2) + return res + + +def _nanargmin_dispatcher(a, axis=None, out=None, *, keepdims=None): + return (a,) + + +@array_function_dispatch(_nanargmin_dispatcher) +def nanargmin(a, axis=None, out=None, *, keepdims=np._NoValue): + """ + Return the indices of the minimum values in the specified axis ignoring + NaNs. For all-NaN slices ``ValueError`` is raised. Warning: the results + cannot be trusted if a slice contains only NaNs and Infs. + + Parameters + ---------- + a : array_like + Input data. + axis : int, optional + Axis along which to operate. By default flattened input is used. + out : array, optional + If provided, the result will be inserted into this array. It should + be of the appropriate shape and dtype. + + .. versionadded:: 1.22.0 + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the array. + + .. versionadded:: 1.22.0 + + Returns + ------- + index_array : ndarray + An array of indices or a single index value. + + See Also + -------- + argmin, nanargmax + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[np.nan, 4], [2, 3]]) + >>> np.argmin(a) + 0 + >>> np.nanargmin(a) + 2 + >>> np.nanargmin(a, axis=0) + array([1, 1]) + >>> np.nanargmin(a, axis=1) + array([1, 0]) + + """ + a, mask = _replace_nan(a, np.inf) + if mask is not None and mask.size: + mask = np.all(mask, axis=axis) + if np.any(mask): + raise ValueError("All-NaN slice encountered") + res = np.argmin(a, axis=axis, out=out, keepdims=keepdims) + return res + + +def _nanargmax_dispatcher(a, axis=None, out=None, *, keepdims=None): + return (a,) + + +@array_function_dispatch(_nanargmax_dispatcher) +def nanargmax(a, axis=None, out=None, *, keepdims=np._NoValue): + """ + Return the indices of the maximum values in the specified axis ignoring + NaNs. For all-NaN slices ``ValueError`` is raised. Warning: the + results cannot be trusted if a slice contains only NaNs and -Infs. + + + Parameters + ---------- + a : array_like + Input data. + axis : int, optional + Axis along which to operate. By default flattened input is used. + out : array, optional + If provided, the result will be inserted into this array. It should + be of the appropriate shape and dtype. + + .. versionadded:: 1.22.0 + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the array. + + .. versionadded:: 1.22.0 + + Returns + ------- + index_array : ndarray + An array of indices or a single index value. + + See Also + -------- + argmax, nanargmin + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[np.nan, 4], [2, 3]]) + >>> np.argmax(a) + 0 + >>> np.nanargmax(a) + 1 + >>> np.nanargmax(a, axis=0) + array([1, 0]) + >>> np.nanargmax(a, axis=1) + array([1, 1]) + + """ + a, mask = _replace_nan(a, -np.inf) + if mask is not None and mask.size: + mask = np.all(mask, axis=axis) + if np.any(mask): + raise ValueError("All-NaN slice encountered") + res = np.argmax(a, axis=axis, out=out, keepdims=keepdims) + return res + + +def _nansum_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None, + initial=None, where=None): + return (a, out) + + +@array_function_dispatch(_nansum_dispatcher) +def nansum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, + initial=np._NoValue, where=np._NoValue): + """ + Return the sum of array elements over a given axis treating Not a + Numbers (NaNs) as zero. + + In NumPy versions <= 1.9.0 Nan is returned for slices that are all-NaN or + empty. In later versions zero is returned. + + Parameters + ---------- + a : array_like + Array containing numbers whose sum is desired. If `a` is not an + array, a conversion is attempted. + axis : {int, tuple of int, None}, optional + Axis or axes along which the sum is computed. The default is to compute the + sum of the flattened array. + dtype : data-type, optional + The type of the returned array and of the accumulator in which the + elements are summed. By default, the dtype of `a` is used. An + exception is when `a` has an integer type with less precision than + the platform (u)intp. In that case, the default will be either + (u)int32 or (u)int64 depending on whether the platform is 32 or 64 + bits. For inexact inputs, dtype must be inexact. + out : ndarray, optional + Alternate output array in which to place the result. The default + is ``None``. If provided, it must have the same shape as the + expected output, but the type will be cast if necessary. See + :ref:`ufuncs-output-type` for more details. The casting of NaN to integer + can yield unexpected results. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `a`. + + If the value is anything but the default, then + `keepdims` will be passed through to the `mean` or `sum` methods + of sub-classes of `ndarray`. If the sub-classes methods + does not implement `keepdims` any exceptions will be raised. + initial : scalar, optional + Starting value for the sum. See `~numpy.ufunc.reduce` for details. + + .. versionadded:: 1.22.0 + where : array_like of bool, optional + Elements to include in the sum. See `~numpy.ufunc.reduce` for details. + + .. versionadded:: 1.22.0 + + Returns + ------- + nansum : ndarray. + A new array holding the result is returned unless `out` is + specified, in which it is returned. The result has the same + size as `a`, and the same shape as `a` if `axis` is not None + or `a` is a 1-d array. + + See Also + -------- + numpy.sum : Sum across array propagating NaNs. + isnan : Show which elements are NaN. + isfinite : Show which elements are not NaN or +/-inf. + + Notes + ----- + If both positive and negative infinity are present, the sum will be Not + A Number (NaN). + + Examples + -------- + >>> import numpy as np + >>> np.nansum(1) + 1 + >>> np.nansum([1]) + 1 + >>> np.nansum([1, np.nan]) + 1.0 + >>> a = np.array([[1, 1], [1, np.nan]]) + >>> np.nansum(a) + 3.0 + >>> np.nansum(a, axis=0) + array([2., 1.]) + >>> np.nansum([1, np.nan, np.inf]) + inf + >>> np.nansum([1, np.nan, -np.inf]) + -inf + >>> with np.errstate(invalid="ignore"): + ... np.nansum([1, np.nan, np.inf, -np.inf]) # both +/- infinity present + np.float64(nan) + + """ + a, mask = _replace_nan(a, 0) + return np.sum(a, axis=axis, dtype=dtype, out=out, keepdims=keepdims, + initial=initial, where=where) + + +def _nanprod_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None, + initial=None, where=None): + return (a, out) + + +@array_function_dispatch(_nanprod_dispatcher) +def nanprod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, + initial=np._NoValue, where=np._NoValue): + """ + Return the product of array elements over a given axis treating Not a + Numbers (NaNs) as ones. + + One is returned for slices that are all-NaN or empty. + + Parameters + ---------- + a : array_like + Array containing numbers whose product is desired. If `a` is not an + array, a conversion is attempted. + axis : {int, tuple of int, None}, optional + Axis or axes along which the product is computed. The default is to compute + the product of the flattened array. + dtype : data-type, optional + The type of the returned array and of the accumulator in which the + elements are summed. By default, the dtype of `a` is used. An + exception is when `a` has an integer type with less precision than + the platform (u)intp. In that case, the default will be either + (u)int32 or (u)int64 depending on whether the platform is 32 or 64 + bits. For inexact inputs, dtype must be inexact. + out : ndarray, optional + Alternate output array in which to place the result. The default + is ``None``. If provided, it must have the same shape as the + expected output, but the type will be cast if necessary. See + :ref:`ufuncs-output-type` for more details. The casting of NaN to integer + can yield unexpected results. + keepdims : bool, optional + If True, the axes which are reduced are left in the result as + dimensions with size one. With this option, the result will + broadcast correctly against the original `arr`. + initial : scalar, optional + The starting value for this product. See `~numpy.ufunc.reduce` + for details. + + .. versionadded:: 1.22.0 + where : array_like of bool, optional + Elements to include in the product. See `~numpy.ufunc.reduce` + for details. + + .. versionadded:: 1.22.0 + + Returns + ------- + nanprod : ndarray + A new array holding the result is returned unless `out` is + specified, in which case it is returned. + + See Also + -------- + numpy.prod : Product across array propagating NaNs. + isnan : Show which elements are NaN. + + Examples + -------- + >>> import numpy as np + >>> np.nanprod(1) + 1 + >>> np.nanprod([1]) + 1 + >>> np.nanprod([1, np.nan]) + 1.0 + >>> a = np.array([[1, 2], [3, np.nan]]) + >>> np.nanprod(a) + 6.0 + >>> np.nanprod(a, axis=0) + array([3., 2.]) + + """ + a, mask = _replace_nan(a, 1) + return np.prod(a, axis=axis, dtype=dtype, out=out, keepdims=keepdims, + initial=initial, where=where) + + +def _nancumsum_dispatcher(a, axis=None, dtype=None, out=None): + return (a, out) + + +@array_function_dispatch(_nancumsum_dispatcher) +def nancumsum(a, axis=None, dtype=None, out=None): + """ + Return the cumulative sum of array elements over a given axis treating Not a + Numbers (NaNs) as zero. The cumulative sum does not change when NaNs are + encountered and leading NaNs are replaced by zeros. + + Zeros are returned for slices that are all-NaN or empty. + + Parameters + ---------- + a : array_like + Input array. + axis : int, optional + Axis along which the cumulative sum is computed. The default + (None) is to compute the cumsum over the flattened array. + dtype : dtype, optional + Type of the returned array and of the accumulator in which the + elements are summed. If `dtype` is not specified, it defaults + to the dtype of `a`, unless `a` has an integer dtype with a + precision less than that of the default platform integer. In + that case, the default platform integer is used. + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output + but the type will be cast if necessary. See :ref:`ufuncs-output-type` for + more details. + + Returns + ------- + nancumsum : ndarray. + A new array holding the result is returned unless `out` is + specified, in which it is returned. The result has the same + size as `a`, and the same shape as `a` if `axis` is not None + or `a` is a 1-d array. + + See Also + -------- + numpy.cumsum : Cumulative sum across array propagating NaNs. + isnan : Show which elements are NaN. + + Examples + -------- + >>> import numpy as np + >>> np.nancumsum(1) + array([1]) + >>> np.nancumsum([1]) + array([1]) + >>> np.nancumsum([1, np.nan]) + array([1., 1.]) + >>> a = np.array([[1, 2], [3, np.nan]]) + >>> np.nancumsum(a) + array([1., 3., 6., 6.]) + >>> np.nancumsum(a, axis=0) + array([[1., 2.], + [4., 2.]]) + >>> np.nancumsum(a, axis=1) + array([[1., 3.], + [3., 3.]]) + + """ + a, mask = _replace_nan(a, 0) + return np.cumsum(a, axis=axis, dtype=dtype, out=out) + + +def _nancumprod_dispatcher(a, axis=None, dtype=None, out=None): + return (a, out) + + +@array_function_dispatch(_nancumprod_dispatcher) +def nancumprod(a, axis=None, dtype=None, out=None): + """ + Return the cumulative product of array elements over a given axis treating Not a + Numbers (NaNs) as one. The cumulative product does not change when NaNs are + encountered and leading NaNs are replaced by ones. + + Ones are returned for slices that are all-NaN or empty. + + Parameters + ---------- + a : array_like + Input array. + axis : int, optional + Axis along which the cumulative product is computed. By default + the input is flattened. + dtype : dtype, optional + Type of the returned array, as well as of the accumulator in which + the elements are multiplied. If *dtype* is not specified, it + defaults to the dtype of `a`, unless `a` has an integer dtype with + a precision less than that of the default platform integer. In + that case, the default platform integer is used instead. + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output + but the type of the resulting values will be cast if necessary. + + Returns + ------- + nancumprod : ndarray + A new array holding the result is returned unless `out` is + specified, in which case it is returned. + + See Also + -------- + numpy.cumprod : Cumulative product across array propagating NaNs. + isnan : Show which elements are NaN. + + Examples + -------- + >>> import numpy as np + >>> np.nancumprod(1) + array([1]) + >>> np.nancumprod([1]) + array([1]) + >>> np.nancumprod([1, np.nan]) + array([1., 1.]) + >>> a = np.array([[1, 2], [3, np.nan]]) + >>> np.nancumprod(a) + array([1., 2., 6., 6.]) + >>> np.nancumprod(a, axis=0) + array([[1., 2.], + [3., 2.]]) + >>> np.nancumprod(a, axis=1) + array([[1., 2.], + [3., 3.]]) + + """ + a, mask = _replace_nan(a, 1) + return np.cumprod(a, axis=axis, dtype=dtype, out=out) + + +def _nanmean_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None, + *, where=None): + return (a, out) + + +@array_function_dispatch(_nanmean_dispatcher) +def nanmean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, + *, where=np._NoValue): + """ + Compute the arithmetic mean along the specified axis, ignoring NaNs. + + Returns the average of the array elements. The average is taken over + the flattened array by default, otherwise over the specified axis. + `float64` intermediate and return values are used for integer inputs. + + For all-NaN slices, NaN is returned and a `RuntimeWarning` is raised. + + Parameters + ---------- + a : array_like + Array containing numbers whose mean is desired. If `a` is not an + array, a conversion is attempted. + axis : {int, tuple of int, None}, optional + Axis or axes along which the means are computed. The default is to compute + the mean of the flattened array. + dtype : data-type, optional + Type to use in computing the mean. For integer inputs, the default + is `float64`; for inexact inputs, it is the same as the input + dtype. + out : ndarray, optional + Alternate output array in which to place the result. The default + is ``None``; if provided, it must have the same shape as the + expected output, but the type will be cast if necessary. + See :ref:`ufuncs-output-type` for more details. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `a`. + + If the value is anything but the default, then + `keepdims` will be passed through to the `mean` or `sum` methods + of sub-classes of `ndarray`. If the sub-classes methods + does not implement `keepdims` any exceptions will be raised. + where : array_like of bool, optional + Elements to include in the mean. See `~numpy.ufunc.reduce` for details. + + .. versionadded:: 1.22.0 + + Returns + ------- + m : ndarray, see dtype parameter above + If `out=None`, returns a new array containing the mean values, + otherwise a reference to the output array is returned. Nan is + returned for slices that contain only NaNs. + + See Also + -------- + average : Weighted average + mean : Arithmetic mean taken while not ignoring NaNs + var, nanvar + + Notes + ----- + The arithmetic mean is the sum of the non-NaN elements along the axis + divided by the number of non-NaN elements. + + Note that for floating-point input, the mean is computed using the same + precision the input has. Depending on the input data, this can cause + the results to be inaccurate, especially for `float32`. Specifying a + higher-precision accumulator using the `dtype` keyword can alleviate + this issue. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[1, np.nan], [3, 4]]) + >>> np.nanmean(a) + 2.6666666666666665 + >>> np.nanmean(a, axis=0) + array([2., 4.]) + >>> np.nanmean(a, axis=1) + array([1., 3.5]) # may vary + + """ + arr, mask = _replace_nan(a, 0) + if mask is None: + return np.mean(arr, axis=axis, dtype=dtype, out=out, keepdims=keepdims, + where=where) + + if dtype is not None: + dtype = np.dtype(dtype) + if dtype is not None and not issubclass(dtype.type, np.inexact): + raise TypeError("If a is inexact, then dtype must be inexact") + if out is not None and not issubclass(out.dtype.type, np.inexact): + raise TypeError("If a is inexact, then out must be inexact") + + cnt = np.sum(~mask, axis=axis, dtype=np.intp, keepdims=keepdims, + where=where) + tot = np.sum(arr, axis=axis, dtype=dtype, out=out, keepdims=keepdims, + where=where) + avg = _divide_by_count(tot, cnt, out=out) + + isbad = (cnt == 0) + if isbad.any(): + warnings.warn("Mean of empty slice", RuntimeWarning, stacklevel=2) + # NaN is the only possible bad value, so no further + # action is needed to handle bad results. + return avg + + +def _nanmedian1d(arr1d, overwrite_input=False): + """ + Private function for rank 1 arrays. Compute the median ignoring NaNs. + See nanmedian for parameter usage + """ + arr1d_parsed, _, overwrite_input = _remove_nan_1d( + arr1d, overwrite_input=overwrite_input, + ) + + if arr1d_parsed.size == 0: + # Ensure that a nan-esque scalar of the appropriate type (and unit) + # is returned for `timedelta64` and `complexfloating` + return arr1d[-1] + + return np.median(arr1d_parsed, overwrite_input=overwrite_input) + + +def _nanmedian(a, axis=None, out=None, overwrite_input=False): + """ + Private function that doesn't support extended axis or keepdims. + These methods are extended to this function using _ureduce + See nanmedian for parameter usage + + """ + if axis is None or a.ndim == 1: + part = a.ravel() + if out is None: + return _nanmedian1d(part, overwrite_input) + else: + out[...] = _nanmedian1d(part, overwrite_input) + return out + else: + # for small medians use sort + indexing which is still faster than + # apply_along_axis + # benchmarked with shuffled (50, 50, x) containing a few NaN + if a.shape[axis] < 600: + return _nanmedian_small(a, axis, out, overwrite_input) + result = np.apply_along_axis(_nanmedian1d, axis, a, overwrite_input) + if out is not None: + out[...] = result + return result + + +def _nanmedian_small(a, axis=None, out=None, overwrite_input=False): + """ + sort + indexing median, faster for small medians along multiple + dimensions due to the high overhead of apply_along_axis + + see nanmedian for parameter usage + """ + a = np.ma.masked_array(a, np.isnan(a)) + m = np.ma.median(a, axis=axis, overwrite_input=overwrite_input) + for i in range(np.count_nonzero(m.mask.ravel())): + warnings.warn("All-NaN slice encountered", RuntimeWarning, + stacklevel=5) + + fill_value = np.timedelta64("NaT") if m.dtype.kind == "m" else np.nan + if out is not None: + out[...] = m.filled(fill_value) + return out + return m.filled(fill_value) + + +def _nanmedian_dispatcher( + a, axis=None, out=None, overwrite_input=None, keepdims=None): + return (a, out) + + +@array_function_dispatch(_nanmedian_dispatcher) +def nanmedian(a, axis=None, out=None, overwrite_input=False, keepdims=np._NoValue): + """ + Compute the median along the specified axis, while ignoring NaNs. + + Returns the median of the array elements. + + Parameters + ---------- + a : array_like + Input array or object that can be converted to an array. + axis : {int, sequence of int, None}, optional + Axis or axes along which the medians are computed. The default + is to compute the median along a flattened version of the array. + A sequence of axes is supported since version 1.9.0. + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output, + but the type (of the output) will be cast if necessary. + overwrite_input : bool, optional + If True, then allow use of memory of input array `a` for + calculations. The input array will be modified by the call to + `median`. This will save memory when you do not need to preserve + the contents of the input array. Treat the input as undefined, + but it will probably be fully or partially sorted. Default is + False. If `overwrite_input` is ``True`` and `a` is not already an + `ndarray`, an error will be raised. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `a`. + + If this is anything but the default value it will be passed + through (in the special case of an empty array) to the + `mean` function of the underlying array. If the array is + a sub-class and `mean` does not have the kwarg `keepdims` this + will raise a RuntimeError. + + Returns + ------- + median : ndarray + A new array holding the result. If the input contains integers + or floats smaller than ``float64``, then the output data-type is + ``np.float64``. Otherwise, the data-type of the output is the + same as that of the input. If `out` is specified, that array is + returned instead. + + See Also + -------- + mean, median, percentile + + Notes + ----- + Given a vector ``V`` of length ``N``, the median of ``V`` is the + middle value of a sorted copy of ``V``, ``V_sorted`` - i.e., + ``V_sorted[(N-1)/2]``, when ``N`` is odd and the average of the two + middle values of ``V_sorted`` when ``N`` is even. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[10.0, 7, 4], [3, 2, 1]]) + >>> a[0, 1] = np.nan + >>> a + array([[10., nan, 4.], + [ 3., 2., 1.]]) + >>> np.median(a) + np.float64(nan) + >>> np.nanmedian(a) + 3.0 + >>> np.nanmedian(a, axis=0) + array([6.5, 2. , 2.5]) + >>> np.median(a, axis=1) + array([nan, 2.]) + >>> b = a.copy() + >>> np.nanmedian(b, axis=1, overwrite_input=True) + array([7., 2.]) + >>> assert not np.all(a==b) + >>> b = a.copy() + >>> np.nanmedian(b, axis=None, overwrite_input=True) + 3.0 + >>> assert not np.all(a==b) + + """ + a = np.asanyarray(a) + # apply_along_axis in _nanmedian doesn't handle empty arrays well, + # so deal them upfront + if a.size == 0: + return np.nanmean(a, axis, out=out, keepdims=keepdims) + + return fnb._ureduce(a, func=_nanmedian, keepdims=keepdims, + axis=axis, out=out, + overwrite_input=overwrite_input) + + +def _nanpercentile_dispatcher( + a, q, axis=None, out=None, overwrite_input=None, + method=None, keepdims=None, *, weights=None): + return (a, q, out, weights) + + +@array_function_dispatch(_nanpercentile_dispatcher) +def nanpercentile( + a, + q, + axis=None, + out=None, + overwrite_input=False, + method="linear", + keepdims=np._NoValue, + *, + weights=None, +): + """ + Compute the qth percentile of the data along the specified axis, + while ignoring nan values. + + Returns the qth percentile(s) of the array elements. + + Parameters + ---------- + a : array_like + Input array or object that can be converted to an array, containing + nan values to be ignored. + q : array_like of float + Percentile or sequence of percentiles to compute, which must be + between 0 and 100 inclusive. + axis : {int, tuple of int, None}, optional + Axis or axes along which the percentiles are computed. The default + is to compute the percentile(s) along a flattened version of the + array. + out : ndarray, optional + Alternative output array in which to place the result. It must have + the same shape and buffer length as the expected output, but the + type (of the output) will be cast if necessary. + overwrite_input : bool, optional + If True, then allow the input array `a` to be modified by + intermediate calculations, to save memory. In this case, the + contents of the input `a` after this function completes is + undefined. + method : str, optional + This parameter specifies the method to use for estimating the + percentile. There are many different methods, some unique to NumPy. + See the notes for explanation. The options sorted by their R type + as summarized in the H&F paper [1]_ are: + + 1. 'inverted_cdf' + 2. 'averaged_inverted_cdf' + 3. 'closest_observation' + 4. 'interpolated_inverted_cdf' + 5. 'hazen' + 6. 'weibull' + 7. 'linear' (default) + 8. 'median_unbiased' + 9. 'normal_unbiased' + + The first three methods are discontinuous. NumPy further defines the + following discontinuous variations of the default 'linear' (7.) option: + + * 'lower' + * 'higher', + * 'midpoint' + * 'nearest' + + .. versionchanged:: 1.22.0 + This argument was previously called "interpolation" and only + offered the "linear" default and last four options. + + keepdims : bool, optional + If this is set to True, the axes which are reduced are left in + the result as dimensions with size one. With this option, the + result will broadcast correctly against the original array `a`. + + If this is anything but the default value it will be passed + through (in the special case of an empty array) to the + `mean` function of the underlying array. If the array is + a sub-class and `mean` does not have the kwarg `keepdims` this + will raise a RuntimeError. + + weights : array_like, optional + An array of weights associated with the values in `a`. Each value in + `a` contributes to the percentile according to its associated weight. + The weights array can either be 1-D (in which case its length must be + the size of `a` along the given axis) or of the same shape as `a`. + If `weights=None`, then all data in `a` are assumed to have a + weight equal to one. + Only `method="inverted_cdf"` supports weights. + + .. versionadded:: 2.0.0 + + Returns + ------- + percentile : scalar or ndarray + If `q` is a single percentile and `axis=None`, then the result + is a scalar. If multiple percentiles are given, first axis of + the result corresponds to the percentiles. The other axes are + the axes that remain after the reduction of `a`. If the input + contains integers or floats smaller than ``float64``, the output + data-type is ``float64``. Otherwise, the output data-type is the + same as that of the input. If `out` is specified, that array is + returned instead. + + See Also + -------- + nanmean + nanmedian : equivalent to ``nanpercentile(..., 50)`` + percentile, median, mean + nanquantile : equivalent to nanpercentile, except q in range [0, 1]. + + Notes + ----- + The behavior of `numpy.nanpercentile` with percentage `q` is that of + `numpy.quantile` with argument ``q/100`` (ignoring nan values). + For more information, please see `numpy.quantile`. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[10., 7., 4.], [3., 2., 1.]]) + >>> a[0][1] = np.nan + >>> a + array([[10., nan, 4.], + [ 3., 2., 1.]]) + >>> np.percentile(a, 50) + np.float64(nan) + >>> np.nanpercentile(a, 50) + 3.0 + >>> np.nanpercentile(a, 50, axis=0) + array([6.5, 2. , 2.5]) + >>> np.nanpercentile(a, 50, axis=1, keepdims=True) + array([[7.], + [2.]]) + >>> m = np.nanpercentile(a, 50, axis=0) + >>> out = np.zeros_like(m) + >>> np.nanpercentile(a, 50, axis=0, out=out) + array([6.5, 2. , 2.5]) + >>> m + array([6.5, 2. , 2.5]) + + >>> b = a.copy() + >>> np.nanpercentile(b, 50, axis=1, overwrite_input=True) + array([7., 2.]) + >>> assert not np.all(a==b) + + References + ---------- + .. [1] R. J. Hyndman and Y. Fan, + "Sample quantiles in statistical packages," + The American Statistician, 50(4), pp. 361-365, 1996 + + """ + a = np.asanyarray(a) + if a.dtype.kind == "c": + raise TypeError("a must be an array of real numbers") + + weak_q = type(q) in (int, float) # use weak promotion for final result type + q = np.true_divide(q, 100, out=...) + if not fnb._quantile_is_valid(q): + raise ValueError("Percentiles must be in the range [0, 100]") + + if weights is not None: + if method != "inverted_cdf": + msg = ("Only method 'inverted_cdf' supports weights. " + f"Got: {method}.") + raise ValueError(msg) + if axis is not None: + axis = _nx.normalize_axis_tuple(axis, a.ndim, argname="axis") + weights = _weights_are_valid(weights=weights, a=a, axis=axis) + if np.any(weights < 0): + raise ValueError("Weights must be non-negative.") + + return _nanquantile_unchecked( + a, q, axis, out, overwrite_input, method, keepdims, weights, weak_q) + + +def _nanquantile_dispatcher(a, q, axis=None, out=None, overwrite_input=None, + method=None, keepdims=None, *, weights=None): + return (a, q, out, weights) + + +@array_function_dispatch(_nanquantile_dispatcher) +def nanquantile( + a, + q, + axis=None, + out=None, + overwrite_input=False, + method="linear", + keepdims=np._NoValue, + *, + weights=None, +): + """ + Compute the qth quantile of the data along the specified axis, + while ignoring nan values. + Returns the qth quantile(s) of the array elements. + + Parameters + ---------- + a : array_like + Input array or object that can be converted to an array, containing + nan values to be ignored + q : array_like of float + Probability or sequence of probabilities for the quantiles to compute. + Values must be between 0 and 1 inclusive. + axis : {int, tuple of int, None}, optional + Axis or axes along which the quantiles are computed. The + default is to compute the quantile(s) along a flattened + version of the array. + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output, + but the type (of the output) will be cast if necessary. + overwrite_input : bool, optional + If True, then allow the input array `a` to be modified by intermediate + calculations, to save memory. In this case, the contents of the input + `a` after this function completes is undefined. + method : str, optional + This parameter specifies the method to use for estimating the + quantile. There are many different methods, some unique to NumPy. + See the notes for explanation. The options sorted by their R type + as summarized in the H&F paper [1]_ are: + + 1. 'inverted_cdf' + 2. 'averaged_inverted_cdf' + 3. 'closest_observation' + 4. 'interpolated_inverted_cdf' + 5. 'hazen' + 6. 'weibull' + 7. 'linear' (default) + 8. 'median_unbiased' + 9. 'normal_unbiased' + + The first three methods are discontinuous. NumPy further defines the + following discontinuous variations of the default 'linear' (7.) option: + + * 'lower' + * 'higher', + * 'midpoint' + * 'nearest' + + .. versionchanged:: 1.22.0 + This argument was previously called "interpolation" and only + offered the "linear" default and last four options. + + keepdims : bool, optional + If this is set to True, the axes which are reduced are left in + the result as dimensions with size one. With this option, the + result will broadcast correctly against the original array `a`. + + If this is anything but the default value it will be passed + through (in the special case of an empty array) to the + `mean` function of the underlying array. If the array is + a sub-class and `mean` does not have the kwarg `keepdims` this + will raise a RuntimeError. + + weights : array_like, optional + An array of weights associated with the values in `a`. Each value in + `a` contributes to the quantile according to its associated weight. + The weights array can either be 1-D (in which case its length must be + the size of `a` along the given axis) or of the same shape as `a`. + If `weights=None`, then all data in `a` are assumed to have a + weight equal to one. + Only `method="inverted_cdf"` supports weights. + + .. versionadded:: 2.0.0 + + Returns + ------- + quantile : scalar or ndarray + If `q` is a single probability and `axis=None`, then the result + is a scalar. If multiple probability levels are given, first axis of + the result corresponds to the quantiles. The other axes are + the axes that remain after the reduction of `a`. If the input + contains integers or floats smaller than ``float64``, the output + data-type is ``float64``. Otherwise, the output data-type is the + same as that of the input. If `out` is specified, that array is + returned instead. + + See Also + -------- + quantile + nanmean, nanmedian + nanmedian : equivalent to ``nanquantile(..., 0.5)`` + nanpercentile : same as nanquantile, but with q in the range [0, 100]. + + Notes + ----- + The behavior of `numpy.nanquantile` is the same as that of + `numpy.quantile` (ignoring nan values). + For more information, please see `numpy.quantile`. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[10., 7., 4.], [3., 2., 1.]]) + >>> a[0][1] = np.nan + >>> a + array([[10., nan, 4.], + [ 3., 2., 1.]]) + >>> np.quantile(a, 0.5) + np.float64(nan) + >>> np.nanquantile(a, 0.5) + 3.0 + >>> np.nanquantile(a, 0.5, axis=0) + array([6.5, 2. , 2.5]) + >>> np.nanquantile(a, 0.5, axis=1, keepdims=True) + array([[7.], + [2.]]) + >>> m = np.nanquantile(a, 0.5, axis=0) + >>> out = np.zeros_like(m) + >>> np.nanquantile(a, 0.5, axis=0, out=out) + array([6.5, 2. , 2.5]) + >>> m + array([6.5, 2. , 2.5]) + >>> b = a.copy() + >>> np.nanquantile(b, 0.5, axis=1, overwrite_input=True) + array([7., 2.]) + >>> assert not np.all(a==b) + + References + ---------- + .. [1] R. J. Hyndman and Y. Fan, + "Sample quantiles in statistical packages," + The American Statistician, 50(4), pp. 361-365, 1996 + + """ + a = np.asanyarray(a) + if a.dtype.kind == "c": + raise TypeError("a must be an array of real numbers") + + weak_q = type(q) in (int, float) # use weak promotion for final result type + q = np.asanyarray(q) + + if not fnb._quantile_is_valid(q): + raise ValueError("Quantiles must be in the range [0, 1]") + + if weights is not None: + if method != "inverted_cdf": + msg = ("Only method 'inverted_cdf' supports weights. " + f"Got: {method}.") + raise ValueError(msg) + if axis is not None: + axis = _nx.normalize_axis_tuple(axis, a.ndim, argname="axis") + weights = _weights_are_valid(weights=weights, a=a, axis=axis) + if np.any(weights < 0): + raise ValueError("Weights must be non-negative.") + + return _nanquantile_unchecked( + a, q, axis, out, overwrite_input, method, keepdims, weights, weak_q) + + +def _nanquantile_unchecked( + a, + q, + axis=None, + out=None, + overwrite_input=False, + method="linear", + keepdims=np._NoValue, + weights=None, + weak_q=False, +): + """Assumes that q is in [0, 1], and is an ndarray""" + # apply_along_axis in _nanpercentile doesn't handle empty arrays well, + # so deal them upfront + if a.size == 0: + return np.nanmean(a, axis, out=out, keepdims=keepdims) + return fnb._ureduce(a, + func=_nanquantile_ureduce_func, + q=q, + weights=weights, + keepdims=keepdims, + axis=axis, + out=out, + overwrite_input=overwrite_input, + method=method, + weak_q=weak_q) + + +def _nanquantile_ureduce_func( + a: np.array, + q: np.array, + weights: np.array, + axis: int | None = None, + out=None, + overwrite_input: bool = False, + method="linear", + weak_q=False, +): + """ + Private function that doesn't support extended axis or keepdims. + These methods are extended to this function using _ureduce + See nanpercentile for parameter usage + """ + if axis is None or a.ndim == 1: + part = a.ravel() + wgt = None if weights is None else weights.ravel() + result = _nanquantile_1d(part, q, overwrite_input, method, + weights=wgt, weak_q=weak_q) + # Note that this code could try to fill in `out` right away + elif weights is None: + result = np.apply_along_axis(_nanquantile_1d, axis, a, q, + overwrite_input, method, weights, weak_q) + # apply_along_axis fills in collapsed axis with results. + # Move those axes to the beginning to match percentile's + # convention. + if q.ndim != 0: + from_ax = [axis + i for i in range(q.ndim)] + result = np.moveaxis(result, from_ax, list(range(q.ndim))) + else: + # We need to apply along axis over 2 arrays, a and weights. + # move operation axes to end for simplicity: + a = np.moveaxis(a, axis, -1) + if weights is not None: + weights = np.moveaxis(weights, axis, -1) + if out is not None: + result = out + else: + # weights are limited to `inverted_cdf` so the result dtype + # is known to be identical to that of `a` here: + result = np.empty_like(a, shape=q.shape + a.shape[:-1]) + + for ii in np.ndindex(a.shape[:-1]): + result[(...,) + ii] = _nanquantile_1d( + a[ii], q, weights=weights[ii], + overwrite_input=overwrite_input, method=method, + weak_q=weak_q, + ) + # This path dealt with `out` already... + return result + + if out is not None: + out[...] = result + return result + + +def _nanquantile_1d( + arr1d, q, overwrite_input=False, method="linear", weights=None, + weak_q=False, +): + """ + Private function for rank 1 arrays. Compute quantile ignoring NaNs. + See nanpercentile for parameter usage + """ + # TODO: What to do when arr1d = [1, np.nan] and weights = [0, 1]? + arr1d, weights, overwrite_input = _remove_nan_1d(arr1d, + second_arr1d=weights, overwrite_input=overwrite_input) + if arr1d.size == 0: + # convert to scalar + return np.full(q.shape, np.nan, dtype=arr1d.dtype)[()] + + return fnb._quantile_unchecked( + arr1d, + q, + overwrite_input=overwrite_input, + method=method, + weights=weights, + weak_q=weak_q, + ) + + +def _nanvar_dispatcher(a, axis=None, dtype=None, out=None, ddof=None, + keepdims=None, *, where=None, mean=None, + correction=None): + return (a, out) + + +@array_function_dispatch(_nanvar_dispatcher) +def nanvar(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, + *, where=np._NoValue, mean=np._NoValue, correction=np._NoValue): + """ + Compute the variance along the specified axis, while ignoring NaNs. + + Returns the variance of the array elements, a measure of the spread of + a distribution. The variance is computed for the flattened array by + default, otherwise over the specified axis. + + For all-NaN slices or slices with zero degrees of freedom, NaN is + returned and a `RuntimeWarning` is raised. + + Parameters + ---------- + a : array_like + Array containing numbers whose variance is desired. If `a` is not an + array, a conversion is attempted. + axis : {int, tuple of int, None}, optional + Axis or axes along which the variance is computed. The default is to compute + the variance of the flattened array. + dtype : data-type, optional + Type to use in computing the variance. For arrays of integer type + the default is `float64`; for arrays of float types it is the same as + the array type. + out : ndarray, optional + Alternate output array in which to place the result. It must have + the same shape as the expected output, but the type is cast if + necessary. + ddof : {int, float}, optional + "Delta Degrees of Freedom": the divisor used in the calculation is + ``N - ddof``, where ``N`` represents the number of non-NaN + elements. By default `ddof` is zero. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `a`. + where : array_like of bool, optional + Elements to include in the variance. See `~numpy.ufunc.reduce` for + details. + + .. versionadded:: 1.22.0 + + mean : array_like, optional + Provide the mean to prevent its recalculation. The mean should have + a shape as if it was calculated with ``keepdims=True``. + The axis for the calculation of the mean should be the same as used in + the call to this var function. + + .. versionadded:: 2.0.0 + + correction : {int, float}, optional + Array API compatible name for the ``ddof`` parameter. Only one of them + can be provided at the same time. + + .. versionadded:: 2.0.0 + + Returns + ------- + variance : ndarray, see dtype parameter above + If `out` is None, return a new array containing the variance, + otherwise return a reference to the output array. If ddof is >= the + number of non-NaN elements in a slice or the slice contains only + NaNs, then the result for that slice is NaN. + + See Also + -------- + std : Standard deviation + mean : Average + var : Variance while not ignoring NaNs + nanstd, nanmean + :ref:`ufuncs-output-type` + + Notes + ----- + The variance is the average of the squared deviations from the mean, + i.e., ``var = mean(abs(x - x.mean())**2)``. + + The mean is normally calculated as ``x.sum() / N``, where ``N = len(x)``. + If, however, `ddof` is specified, the divisor ``N - ddof`` is used + instead. In standard statistical practice, ``ddof=1`` provides an + unbiased estimator of the variance of a hypothetical infinite + population. ``ddof=0`` provides a maximum likelihood estimate of the + variance for normally distributed variables. + + Note that for complex numbers, the absolute value is taken before + squaring, so that the result is always real and nonnegative. + + For floating-point input, the variance is computed using the same + precision the input has. Depending on the input data, this can cause + the results to be inaccurate, especially for `float32` (see example + below). Specifying a higher-accuracy accumulator using the ``dtype`` + keyword can alleviate this issue. + + For this function to work on sub-classes of ndarray, they must define + `sum` with the kwarg `keepdims` + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[1, np.nan], [3, 4]]) + >>> np.nanvar(a) + 1.5555555555555554 + >>> np.nanvar(a, axis=0) + array([1., 0.]) + >>> np.nanvar(a, axis=1) + array([0., 0.25]) # may vary + + """ + arr, mask = _replace_nan(a, 0) + if mask is None: + return np.var(arr, axis=axis, dtype=dtype, out=out, ddof=ddof, + keepdims=keepdims, where=where, mean=mean, + correction=correction) + + if dtype is not None: + dtype = np.dtype(dtype) + if dtype is not None and not issubclass(dtype.type, np.inexact): + raise TypeError("If a is inexact, then dtype must be inexact") + if out is not None and not issubclass(out.dtype.type, np.inexact): + raise TypeError("If a is inexact, then out must be inexact") + + if correction != np._NoValue: + if ddof != 0: + raise ValueError( + "ddof and correction can't be provided simultaneously." + ) + else: + ddof = correction + + # Compute mean + if type(arr) is np.matrix: + _keepdims = np._NoValue + else: + _keepdims = True + + cnt = np.sum(~mask, axis=axis, dtype=np.intp, keepdims=_keepdims, + where=where) + + if mean is not np._NoValue: + avg = mean + else: + # we need to special case matrix for reverse compatibility + # in order for this to work, these sums need to be called with + # keepdims=True, however matrix now raises an error in this case, but + # the reason that it drops the keepdims kwarg is to force keepdims=True + # so this used to work by serendipity. + avg = np.sum(arr, axis=axis, dtype=dtype, + keepdims=_keepdims, where=where) + avg = _divide_by_count(avg, cnt) + + # Compute squared deviation from mean. + np.subtract(arr, avg, out=arr, casting='unsafe', where=where) + arr = _copyto(arr, 0, mask) + if issubclass(arr.dtype.type, np.complexfloating): + sqr = np.multiply(arr, arr.conj(), out=arr, where=where).real + else: + sqr = np.multiply(arr, arr, out=arr, where=where) + + # Compute variance. + var = np.sum(sqr, axis=axis, dtype=dtype, out=out, keepdims=keepdims, + where=where) + + # Precaution against reduced object arrays + try: + var_ndim = var.ndim + except AttributeError: + var_ndim = np.ndim(var) + if var_ndim < cnt.ndim: + # Subclasses of ndarray may ignore keepdims, so check here. + cnt = cnt.squeeze(axis) + dof = cnt - ddof + var = _divide_by_count(var, dof) + + isbad = (dof <= 0) + if np.any(isbad): + warnings.warn("Degrees of freedom <= 0 for slice.", RuntimeWarning, + stacklevel=2) + # NaN, inf, or negative numbers are all possible bad + # values, so explicitly replace them with NaN. + var = _copyto(var, np.nan, isbad) + return var + + +def _nanstd_dispatcher(a, axis=None, dtype=None, out=None, ddof=None, + keepdims=None, *, where=None, mean=None, + correction=None): + return (a, out) + + +@array_function_dispatch(_nanstd_dispatcher) +def nanstd(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, + *, where=np._NoValue, mean=np._NoValue, correction=np._NoValue): + """ + Compute the standard deviation along the specified axis, while + ignoring NaNs. + + Returns the standard deviation, a measure of the spread of a + distribution, of the non-NaN array elements. The standard deviation is + computed for the flattened array by default, otherwise over the + specified axis. + + For all-NaN slices or slices with zero degrees of freedom, NaN is + returned and a `RuntimeWarning` is raised. + + Parameters + ---------- + a : array_like + Calculate the standard deviation of the non-NaN values. + axis : {int, tuple of int, None}, optional + Axis or axes along which the standard deviation is computed. The default is + to compute the standard deviation of the flattened array. + dtype : dtype, optional + Type to use in computing the standard deviation. For arrays of + integer type the default is float64, for arrays of float types it + is the same as the array type. + out : ndarray, optional + Alternative output array in which to place the result. It must have + the same shape as the expected output but the type (of the + calculated values) will be cast if necessary. + ddof : {int, float}, optional + Means Delta Degrees of Freedom. The divisor used in calculations + is ``N - ddof``, where ``N`` represents the number of non-NaN + elements. By default `ddof` is zero. + + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `a`. + + If this value is anything but the default it is passed through + as-is to the relevant functions of the sub-classes. If these + functions do not have a `keepdims` kwarg, a RuntimeError will + be raised. + where : array_like of bool, optional + Elements to include in the standard deviation. + See `~numpy.ufunc.reduce` for details. + + .. versionadded:: 1.22.0 + + mean : array_like, optional + Provide the mean to prevent its recalculation. The mean should have + a shape as if it was calculated with ``keepdims=True``. + The axis for the calculation of the mean should be the same as used in + the call to this std function. + + .. versionadded:: 2.0.0 + + correction : {int, float}, optional + Array API compatible name for the ``ddof`` parameter. Only one of them + can be provided at the same time. + + .. versionadded:: 2.0.0 + + Returns + ------- + standard_deviation : ndarray, see dtype parameter above. + If `out` is None, return a new array containing the standard + deviation, otherwise return a reference to the output array. If + ddof is >= the number of non-NaN elements in a slice or the slice + contains only NaNs, then the result for that slice is NaN. + + See Also + -------- + var, mean, std + nanvar, nanmean + :ref:`ufuncs-output-type` + + Notes + ----- + The standard deviation is the square root of the average of the squared + deviations from the mean: ``std = sqrt(mean(abs(x - x.mean())**2))``. + + The average squared deviation is normally calculated as + ``x.sum() / N``, where ``N = len(x)``. If, however, `ddof` is + specified, the divisor ``N - ddof`` is used instead. In standard + statistical practice, ``ddof=1`` provides an unbiased estimator of the + variance of the infinite population. ``ddof=0`` provides a maximum + likelihood estimate of the variance for normally distributed variables. + The standard deviation computed in this function is the square root of + the estimated variance, so even with ``ddof=1``, it will not be an + unbiased estimate of the standard deviation per se. + + Note that, for complex numbers, `std` takes the absolute value before + squaring, so that the result is always real and nonnegative. + + For floating-point input, the *std* is computed using the same + precision the input has. Depending on the input data, this can cause + the results to be inaccurate, especially for float32 (see example + below). Specifying a higher-accuracy accumulator using the `dtype` + keyword can alleviate this issue. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[1, np.nan], [3, 4]]) + >>> np.nanstd(a) + 1.247219128924647 + >>> np.nanstd(a, axis=0) + array([1., 0.]) + >>> np.nanstd(a, axis=1) + array([0., 0.5]) # may vary + + """ + var = nanvar(a, axis=axis, dtype=dtype, out=out, ddof=ddof, + keepdims=keepdims, where=where, mean=mean, + correction=correction) + if isinstance(var, np.ndarray): + std = np.sqrt(var, out=var) + elif hasattr(var, 'dtype'): + std = var.dtype.type(np.sqrt(var)) + else: + std = np.sqrt(var) + return std diff --git a/local_packages/numpy/lib/_nanfunctions_impl.pyi b/local_packages/numpy/lib/_nanfunctions_impl.pyi new file mode 100644 index 0000000..fd5d277 --- /dev/null +++ b/local_packages/numpy/lib/_nanfunctions_impl.pyi @@ -0,0 +1,48 @@ +from numpy._core.fromnumeric import ( + amax, + amin, + argmax, + argmin, + cumprod, + cumsum, + mean, + prod, + std, + sum, + var, +) +from numpy.lib._function_base_impl import median, percentile, quantile + +__all__ = [ + "nansum", + "nanmax", + "nanmin", + "nanargmax", + "nanargmin", + "nanmean", + "nanmedian", + "nanpercentile", + "nanvar", + "nanstd", + "nanprod", + "nancumsum", + "nancumprod", + "nanquantile", +] + +# NOTE: In reality these functions are not aliases but distinct functions +# with identical signatures. +nanmin = amin +nanmax = amax +nanargmin = argmin +nanargmax = argmax +nansum = sum +nanprod = prod +nancumsum = cumsum +nancumprod = cumprod +nanmean = mean +nanvar = var +nanstd = std +nanmedian = median +nanpercentile = percentile +nanquantile = quantile diff --git a/local_packages/numpy/lib/_npyio_impl.py b/local_packages/numpy/lib/_npyio_impl.py new file mode 100644 index 0000000..72e746f --- /dev/null +++ b/local_packages/numpy/lib/_npyio_impl.py @@ -0,0 +1,2583 @@ +""" +IO related functions. +""" +import contextlib +import functools +import itertools +import operator +import os +import pickle +import re +import warnings +import weakref +from collections.abc import Mapping +from operator import itemgetter + +import numpy as np +from numpy._core import overrides +from numpy._core._multiarray_umath import _load_from_filelike +from numpy._core.multiarray import packbits, unpackbits +from numpy._core.overrides import finalize_array_function_like, set_module +from numpy._utils import asbytes, asunicode + +from . import format +from ._datasource import DataSource # noqa: F401 +from ._format_impl import _MAX_HEADER_SIZE +from ._iotools import ( + ConversionWarning, + ConverterError, + ConverterLockError, + LineSplitter, + NameValidator, + StringConverter, + _decode_line, + _is_string_like, + easy_dtype, + flatten_dtype, + has_nested_fields, +) + +__all__ = [ + 'savetxt', 'loadtxt', 'genfromtxt', 'load', 'save', 'savez', + 'savez_compressed', 'packbits', 'unpackbits', 'fromregex' + ] + + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + + +class BagObj: + """ + BagObj(obj) + + Convert attribute look-ups to getitems on the object passed in. + + Parameters + ---------- + obj : class instance + Object on which attribute look-up is performed. + + Examples + -------- + >>> import numpy as np + >>> from numpy.lib._npyio_impl import BagObj as BO + >>> class BagDemo: + ... def __getitem__(self, key): # An instance of BagObj(BagDemo) + ... # will call this method when any + ... # attribute look-up is required + ... result = "Doesn't matter what you want, " + ... return result + "you're gonna get this" + ... + >>> demo_obj = BagDemo() + >>> bagobj = BO(demo_obj) + >>> bagobj.hello_there + "Doesn't matter what you want, you're gonna get this" + >>> bagobj.I_can_be_anything + "Doesn't matter what you want, you're gonna get this" + + """ + + def __init__(self, obj): + # Use weakref to make NpzFile objects collectable by refcount + self._obj = weakref.proxy(obj) + + def __getattribute__(self, key): + try: + return object.__getattribute__(self, '_obj')[key] + except KeyError: + raise AttributeError(key) from None + + def __dir__(self): + """ + Enables dir(bagobj) to list the files in an NpzFile. + + This also enables tab-completion in an interpreter or IPython. + """ + return list(object.__getattribute__(self, '_obj').keys()) + + +def zipfile_factory(file, *args, **kwargs): + """ + Create a ZipFile. + + Allows for Zip64, and the `file` argument can accept file, str, or + pathlib.Path objects. `args` and `kwargs` are passed to the zipfile.ZipFile + constructor. + """ + if not hasattr(file, 'read'): + file = os.fspath(file) + import zipfile + kwargs['allowZip64'] = True + return zipfile.ZipFile(file, *args, **kwargs) + + +@set_module('numpy.lib.npyio') +class NpzFile(Mapping): + """ + NpzFile(fid) + + A dictionary-like object with lazy-loading of files in the zipped + archive provided on construction. + + `NpzFile` is used to load files in the NumPy ``.npz`` data archive + format. It assumes that files in the archive have a ``.npy`` extension, + other files are ignored. + + The arrays and file strings are lazily loaded on either + getitem access using ``obj['key']`` or attribute lookup using + ``obj.f.key``. A list of all files (without ``.npy`` extensions) can + be obtained with ``obj.files`` and the ZipFile object itself using + ``obj.zip``. + + Attributes + ---------- + files : list of str + List of all files in the archive with a ``.npy`` extension. + zip : ZipFile instance + The ZipFile object initialized with the zipped archive. + f : BagObj instance + An object on which attribute can be performed as an alternative + to getitem access on the `NpzFile` instance itself. + allow_pickle : bool, optional + Allow loading pickled data. Default: False + pickle_kwargs : dict, optional + Additional keyword arguments to pass on to pickle.load. + These are only useful when loading object arrays saved on + Python 2. + max_header_size : int, optional + Maximum allowed size of the header. Large headers may not be safe + to load securely and thus require explicitly passing a larger value. + See :py:func:`ast.literal_eval()` for details. + This option is ignored when `allow_pickle` is passed. In that case + the file is by definition trusted and the limit is unnecessary. + + Parameters + ---------- + fid : file, str, or pathlib.Path + The zipped archive to open. This is either a file-like object + or a string containing the path to the archive. + own_fid : bool, optional + Whether NpzFile should close the file handle. + Requires that `fid` is a file-like object. + + Examples + -------- + >>> import numpy as np + >>> from tempfile import TemporaryFile + >>> outfile = TemporaryFile() + >>> x = np.arange(10) + >>> y = np.sin(x) + >>> np.savez(outfile, x=x, y=y) + >>> _ = outfile.seek(0) + + >>> npz = np.load(outfile) + >>> isinstance(npz, np.lib.npyio.NpzFile) + True + >>> npz + NpzFile 'object' with keys: x, y + >>> sorted(npz.files) + ['x', 'y'] + >>> npz['x'] # getitem access + array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + >>> npz.f.x # attribute lookup + array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + + """ + # Make __exit__ safe if zipfile_factory raises an exception + zip = None + fid = None + _MAX_REPR_ARRAY_COUNT = 5 + + def __init__(self, fid, own_fid=False, allow_pickle=False, + pickle_kwargs=None, *, + max_header_size=_MAX_HEADER_SIZE): + # Import is postponed to here since zipfile depends on gzip, an + # optional component of the so-called standard library. + _zip = zipfile_factory(fid) + _files = _zip.namelist() + self.files = [name.removesuffix(".npy") for name in _files] + self._files = dict(zip(self.files, _files)) + self._files.update(zip(_files, _files)) + self.allow_pickle = allow_pickle + self.max_header_size = max_header_size + self.pickle_kwargs = pickle_kwargs + self.zip = _zip + self.f = BagObj(self) + if own_fid: + self.fid = fid + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + self.close() + + def close(self): + """ + Close the file. + + """ + if self.zip is not None: + self.zip.close() + self.zip = None + if self.fid is not None: + self.fid.close() + self.fid = None + self.f = None # break reference cycle + + def __del__(self): + self.close() + + # Implement the Mapping ABC + def __iter__(self): + return iter(self.files) + + def __len__(self): + return len(self.files) + + def __getitem__(self, key): + try: + key = self._files[key] + except KeyError: + raise KeyError(f"{key} is not a file in the archive") from None + else: + with self.zip.open(key) as bytes: + magic = bytes.read(len(format.MAGIC_PREFIX)) + bytes.seek(0) + if magic == format.MAGIC_PREFIX: + # FIXME: This seems like it will copy strings around + # more than is strictly necessary. The zipfile + # will read the string and then + # the format.read_array will copy the string + # to another place in memory. + # It would be better if the zipfile could read + # (or at least uncompress) the data + # directly into the array memory. + return format.read_array( + bytes, + allow_pickle=self.allow_pickle, + pickle_kwargs=self.pickle_kwargs, + max_header_size=self.max_header_size + ) + else: + return bytes.read() + + def __contains__(self, key): + return (key in self._files) + + def __repr__(self): + # Get filename or default to `object` + if isinstance(self.fid, str): + filename = self.fid + else: + filename = getattr(self.fid, "name", "object") + + # Get the name of arrays + array_names = ', '.join(self.files[:self._MAX_REPR_ARRAY_COUNT]) + if len(self.files) > self._MAX_REPR_ARRAY_COUNT: + array_names += "..." + return f"NpzFile {filename!r} with keys: {array_names}" + + # Work around problems with the docstrings in the Mapping methods + # They contain a `->`, which confuses the type annotation interpretations + # of sphinx-docs. See gh-25964 + + def get(self, key, default=None, /): + """ + D.get(k,[,d]) returns D[k] if k in D, else d. d defaults to None. + """ + return Mapping.get(self, key, default) + + def items(self): + """ + D.items() returns a set-like object providing a view on the items + """ + return Mapping.items(self) + + def keys(self): + """ + D.keys() returns a set-like object providing a view on the keys + """ + return Mapping.keys(self) + + def values(self): + """ + D.values() returns a set-like object providing a view on the values + """ + return Mapping.values(self) + + +@set_module('numpy') +def load(file, mmap_mode=None, allow_pickle=False, fix_imports=True, + encoding='ASCII', *, max_header_size=_MAX_HEADER_SIZE): + """ + Load arrays or pickled objects from ``.npy``, ``.npz`` or pickled files. + + .. warning:: Loading files that contain object arrays uses the ``pickle`` + module, which is not secure against erroneous or maliciously + constructed data. Consider passing ``allow_pickle=False`` to + load data that is known not to contain object arrays for the + safer handling of untrusted sources. + + Parameters + ---------- + file : file-like object, string, or pathlib.Path + The file to read. File-like objects must support the + ``seek()`` and ``read()`` methods and must always + be opened in binary mode. Pickled files require that the + file-like object support the ``readline()`` method as well. + mmap_mode : {None, 'r+', 'r', 'w+', 'c'}, optional + If not None, then memory-map the file, using the given mode (see + `numpy.memmap` for a detailed description of the modes). A + memory-mapped array is kept on disk. However, it can be accessed + and sliced like any ndarray. Memory mapping is especially useful + for accessing small fragments of large files without reading the + entire file into memory. + allow_pickle : bool, optional + Allow loading pickled object arrays stored in npy files. Reasons for + disallowing pickles include security, as loading pickled data can + execute arbitrary code. If pickles are disallowed, loading object + arrays will fail. Default: False + fix_imports : bool, optional + Only useful when loading Python 2 generated pickled files, + which includes npy/npz files containing object arrays. If `fix_imports` + is True, pickle will try to map the old Python 2 names to the new names + used in Python 3. + encoding : str, optional + What encoding to use when reading Python 2 strings. Only useful when + loading Python 2 generated pickled files, which includes + npy/npz files containing object arrays. Values other than 'latin1', + 'ASCII', and 'bytes' are not allowed, as they can corrupt numerical + data. Default: 'ASCII' + max_header_size : int, optional + Maximum allowed size of the header. Large headers may not be safe + to load securely and thus require explicitly passing a larger value. + See :py:func:`ast.literal_eval()` for details. + This option is ignored when `allow_pickle` is passed. In that case + the file is by definition trusted and the limit is unnecessary. + + Returns + ------- + result : array, tuple, dict, etc. + Data stored in the file. For ``.npz`` files, the returned instance + of NpzFile class must be closed to avoid leaking file descriptors. + + Raises + ------ + OSError + If the input file does not exist or cannot be read. + UnpicklingError + If ``allow_pickle=True``, but the file cannot be loaded as a pickle. + ValueError + The file contains an object array, but ``allow_pickle=False`` given. + EOFError + When calling ``np.load`` multiple times on the same file handle, + if all data has already been read + + See Also + -------- + save, savez, savez_compressed, loadtxt + memmap : Create a memory-map to an array stored in a file on disk. + lib.format.open_memmap : Create or load a memory-mapped ``.npy`` file. + + Notes + ----- + - If the file contains pickle data, then whatever object is stored + in the pickle is returned. + - If the file is a ``.npy`` file, then a single array is returned. + - If the file is a ``.npz`` file, then a dictionary-like object is + returned, containing ``{filename: array}`` key-value pairs, one for + each file in the archive. + - If the file is a ``.npz`` file, the returned value supports the + context manager protocol in a similar fashion to the open function:: + + with load('foo.npz') as data: + a = data['a'] + + The underlying file descriptor is closed when exiting the 'with' + block. + + Examples + -------- + >>> import numpy as np + + Store data to disk, and load it again: + + >>> np.save('/tmp/123', np.array([[1, 2, 3], [4, 5, 6]])) + >>> np.load('/tmp/123.npy') + array([[1, 2, 3], + [4, 5, 6]]) + + Store compressed data to disk, and load it again: + + >>> a=np.array([[1, 2, 3], [4, 5, 6]]) + >>> b=np.array([1, 2]) + >>> np.savez('/tmp/123.npz', a=a, b=b) + >>> data = np.load('/tmp/123.npz') + >>> data['a'] + array([[1, 2, 3], + [4, 5, 6]]) + >>> data['b'] + array([1, 2]) + >>> data.close() + + Mem-map the stored array, and then access the second row + directly from disk: + + >>> X = np.load('/tmp/123.npy', mmap_mode='r') + >>> X[1, :] + memmap([4, 5, 6]) + + """ + if encoding not in ('ASCII', 'latin1', 'bytes'): + # The 'encoding' value for pickle also affects what encoding + # the serialized binary data of NumPy arrays is loaded + # in. Pickle does not pass on the encoding information to + # NumPy. The unpickling code in numpy._core.multiarray is + # written to assume that unicode data appearing where binary + # should be is in 'latin1'. 'bytes' is also safe, as is 'ASCII'. + # + # Other encoding values can corrupt binary data, and we + # purposefully disallow them. For the same reason, the errors= + # argument is not exposed, as values other than 'strict' + # result can similarly silently corrupt numerical data. + raise ValueError("encoding must be 'ASCII', 'latin1', or 'bytes'") + + pickle_kwargs = {'encoding': encoding, 'fix_imports': fix_imports} + + with contextlib.ExitStack() as stack: + if hasattr(file, 'read'): + fid = file + own_fid = False + else: + fid = stack.enter_context(open(os.fspath(file), "rb")) + own_fid = True + + # Code to distinguish from NumPy binary files and pickles. + _ZIP_PREFIX = b'PK\x03\x04' + _ZIP_SUFFIX = b'PK\x05\x06' # empty zip files start with this + N = len(format.MAGIC_PREFIX) + magic = fid.read(N) + if not magic: + raise EOFError("No data left in file") + # If the file size is less than N, we need to make sure not + # to seek past the beginning of the file + fid.seek(-min(N, len(magic)), 1) # back-up + if magic.startswith((_ZIP_PREFIX, _ZIP_SUFFIX)): + # zip-file (assume .npz) + # Potentially transfer file ownership to NpzFile + stack.pop_all() + ret = NpzFile(fid, own_fid=own_fid, allow_pickle=allow_pickle, + pickle_kwargs=pickle_kwargs, + max_header_size=max_header_size) + return ret + elif magic == format.MAGIC_PREFIX: + # .npy file + if mmap_mode: + if allow_pickle: + max_header_size = 2**64 + return format.open_memmap(file, mode=mmap_mode, + max_header_size=max_header_size) + else: + return format.read_array(fid, allow_pickle=allow_pickle, + pickle_kwargs=pickle_kwargs, + max_header_size=max_header_size) + else: + # Try a pickle + if not allow_pickle: + raise ValueError( + "This file contains pickled (object) data. If you trust " + "the file you can load it unsafely using the " + "`allow_pickle=` keyword argument or `pickle.load()`.") + try: + return pickle.load(fid, **pickle_kwargs) + except Exception as e: + raise pickle.UnpicklingError( + f"Failed to interpret file {file!r} as a pickle") from e + + +def _save_dispatcher(file, arr, allow_pickle=None): + return (arr,) + + +@array_function_dispatch(_save_dispatcher) +def save(file, arr, allow_pickle=True): + """ + Save an array to a binary file in NumPy ``.npy`` format. + + Parameters + ---------- + file : file, str, or pathlib.Path + File or filename to which the data is saved. If file is a file-object, + then the filename is unchanged. If file is a string or Path, + a ``.npy`` extension will be appended to the filename if it does not + already have one. + arr : array_like + Array data to be saved. + allow_pickle : bool, optional + Allow saving object arrays using Python pickles. Reasons for + disallowing pickles include security (loading pickled data can execute + arbitrary code) and portability (pickled objects may not be loadable + on different Python installations, for example if the stored objects + require libraries that are not available, and not all pickled data is + compatible between different versions of Python). + Default: True + + See Also + -------- + savez : Save several arrays into a ``.npz`` archive + savetxt, load + + Notes + ----- + For a description of the ``.npy`` format, see :py:mod:`numpy.lib.format`. + + Any data saved to the file is appended to the end of the file. + + Examples + -------- + >>> import numpy as np + + >>> from tempfile import TemporaryFile + >>> outfile = TemporaryFile() + + >>> x = np.arange(10) + >>> np.save(outfile, x) + + >>> _ = outfile.seek(0) # Only needed to simulate closing & reopening file + >>> np.load(outfile) + array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + + + >>> with open('test.npy', 'wb') as f: + ... np.save(f, np.array([1, 2])) + ... np.save(f, np.array([1, 3])) + >>> with open('test.npy', 'rb') as f: + ... a = np.load(f) + ... b = np.load(f) + >>> print(a, b) + # [1 2] [1 3] + """ + if hasattr(file, 'write'): + file_ctx = contextlib.nullcontext(file) + else: + file = os.fspath(file) + if not file.endswith('.npy'): + file = file + '.npy' + file_ctx = open(file, "wb") + + with file_ctx as fid: + arr = np.asanyarray(arr) + format.write_array(fid, arr, allow_pickle=allow_pickle) + + +def _savez_dispatcher(file, *args, allow_pickle=True, **kwds): + yield from args + yield from kwds.values() + + +@array_function_dispatch(_savez_dispatcher) +def savez(file, *args, allow_pickle=True, **kwds): + """Save several arrays into a single file in uncompressed ``.npz`` format. + + Provide arrays as keyword arguments to store them under the + corresponding name in the output file: ``savez(fn, x=x, y=y)``. + + If arrays are specified as positional arguments, i.e., ``savez(fn, + x, y)``, their names will be `arr_0`, `arr_1`, etc. + + Parameters + ---------- + file : file, str, or pathlib.Path + Either the filename (string) or an open file (file-like object) + where the data will be saved. If file is a string or a Path, the + ``.npz`` extension will be appended to the filename if it is not + already there. + args : Arguments, optional + Arrays to save to the file. Please use keyword arguments (see + `kwds` below) to assign names to arrays. Arrays specified as + args will be named "arr_0", "arr_1", and so on. + allow_pickle : bool, optional + Allow saving object arrays using Python pickles. Reasons for + disallowing pickles include security (loading pickled data can execute + arbitrary code) and portability (pickled objects may not be loadable + on different Python installations, for example if the stored objects + require libraries that are not available, and not all pickled data is + compatible between different versions of Python). + Default: True + kwds : Keyword arguments, optional + Arrays to save to the file. Each array will be saved to the + output file with its corresponding keyword name. + + Returns + ------- + None + + See Also + -------- + save : Save a single array to a binary file in NumPy format. + savetxt : Save an array to a file as plain text. + savez_compressed : Save several arrays into a compressed ``.npz`` archive + + Notes + ----- + The ``.npz`` file format is a zipped archive of files named after the + variables they contain. The archive is not compressed and each file + in the archive contains one variable in ``.npy`` format. For a + description of the ``.npy`` format, see :py:mod:`numpy.lib.format`. + + When opening the saved ``.npz`` file with `load` a `~lib.npyio.NpzFile` + object is returned. This is a dictionary-like object which can be queried + for its list of arrays (with the ``.files`` attribute), and for the arrays + themselves. + + Keys passed in `kwds` are used as filenames inside the ZIP archive. + Therefore, keys should be valid filenames; e.g., avoid keys that begin with + ``/`` or contain ``.``. + + When naming variables with keyword arguments, it is not possible to name a + variable ``file``, as this would cause the ``file`` argument to be defined + twice in the call to ``savez``. + + Examples + -------- + >>> import numpy as np + >>> from tempfile import TemporaryFile + >>> outfile = TemporaryFile() + >>> x = np.arange(10) + >>> y = np.sin(x) + + Using `savez` with \\*args, the arrays are saved with default names. + + >>> np.savez(outfile, x, y) + >>> _ = outfile.seek(0) # Only needed to simulate closing & reopening file + >>> npzfile = np.load(outfile) + >>> npzfile.files + ['arr_0', 'arr_1'] + >>> npzfile['arr_0'] + array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + + Using `savez` with \\**kwds, the arrays are saved with the keyword names. + + >>> outfile = TemporaryFile() + >>> np.savez(outfile, x=x, y=y) + >>> _ = outfile.seek(0) + >>> npzfile = np.load(outfile) + >>> sorted(npzfile.files) + ['x', 'y'] + >>> npzfile['x'] + array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + + """ + _savez(file, args, kwds, False, allow_pickle=allow_pickle) + + +def _savez_compressed_dispatcher(file, *args, allow_pickle=True, **kwds): + yield from args + yield from kwds.values() + + +@array_function_dispatch(_savez_compressed_dispatcher) +def savez_compressed(file, *args, allow_pickle=True, **kwds): + """ + Save several arrays into a single file in compressed ``.npz`` format. + + Provide arrays as keyword arguments to store them under the + corresponding name in the output file: ``savez_compressed(fn, x=x, y=y)``. + + If arrays are specified as positional arguments, i.e., + ``savez_compressed(fn, x, y)``, their names will be `arr_0`, `arr_1`, etc. + + Parameters + ---------- + file : file, str, or pathlib.Path + Either the filename (string) or an open file (file-like object) + where the data will be saved. If file is a string or a Path, the + ``.npz`` extension will be appended to the filename if it is not + already there. + args : Arguments, optional + Arrays to save to the file. Please use keyword arguments (see + `kwds` below) to assign names to arrays. Arrays specified as + args will be named "arr_0", "arr_1", and so on. + allow_pickle : bool, optional + Allow saving object arrays using Python pickles. Reasons for + disallowing pickles include security (loading pickled data can execute + arbitrary code) and portability (pickled objects may not be loadable + on different Python installations, for example if the stored objects + require libraries that are not available, and not all pickled data is + compatible between different versions of Python). + Default: True + kwds : Keyword arguments, optional + Arrays to save to the file. Each array will be saved to the + output file with its corresponding keyword name. + + Returns + ------- + None + + See Also + -------- + numpy.save : Save a single array to a binary file in NumPy format. + numpy.savetxt : Save an array to a file as plain text. + numpy.savez : Save several arrays into an uncompressed ``.npz`` file format + numpy.load : Load the files created by savez_compressed. + + Notes + ----- + The ``.npz`` file format is a zipped archive of files named after the + variables they contain. The archive is compressed with + ``zipfile.ZIP_DEFLATED`` and each file in the archive contains one variable + in ``.npy`` format. For a description of the ``.npy`` format, see + :py:mod:`numpy.lib.format`. + + + When opening the saved ``.npz`` file with `load` a `~lib.npyio.NpzFile` + object is returned. This is a dictionary-like object which can be queried + for its list of arrays (with the ``.files`` attribute), and for the arrays + themselves. + + Examples + -------- + >>> import numpy as np + >>> test_array = np.random.rand(3, 2) + >>> test_vector = np.random.rand(4) + >>> np.savez_compressed('/tmp/123', a=test_array, b=test_vector) + >>> loaded = np.load('/tmp/123.npz') + >>> print(np.array_equal(test_array, loaded['a'])) + True + >>> print(np.array_equal(test_vector, loaded['b'])) + True + + """ + _savez(file, args, kwds, True, allow_pickle=allow_pickle) + + +def _savez(file, args, kwds, compress, allow_pickle=True, pickle_kwargs=None): + # Import is postponed to here since zipfile depends on gzip, an optional + # component of the so-called standard library. + import zipfile + + if not hasattr(file, 'write'): + file = os.fspath(file) + if not file.endswith('.npz'): + file = file + '.npz' + + namedict = kwds + for i, val in enumerate(args): + key = 'arr_%d' % i + if key in namedict.keys(): + raise ValueError( + f"Cannot use un-named variables and keyword {key}") + namedict[key] = val + + if compress: + compression = zipfile.ZIP_DEFLATED + else: + compression = zipfile.ZIP_STORED + + zipf = zipfile_factory(file, mode="w", compression=compression) + try: + for key, val in namedict.items(): + fname = key + '.npy' + val = np.asanyarray(val) + # always force zip64, gh-10776 + with zipf.open(fname, 'w', force_zip64=True) as fid: + format.write_array(fid, val, + allow_pickle=allow_pickle, + pickle_kwargs=pickle_kwargs) + finally: + zipf.close() + + +def _ensure_ndmin_ndarray_check_param(ndmin): + """Just checks if the param ndmin is supported on + _ensure_ndmin_ndarray. It is intended to be used as + verification before running anything expensive. + e.g. loadtxt, genfromtxt + """ + # Check correctness of the values of `ndmin` + if ndmin not in [0, 1, 2]: + raise ValueError(f"Illegal value of ndmin keyword: {ndmin}") + +def _ensure_ndmin_ndarray(a, *, ndmin: int): + """This is a helper function of loadtxt and genfromtxt to ensure + proper minimum dimension as requested + + ndim : int. Supported values 1, 2, 3 + ^^ whenever this changes, keep in sync with + _ensure_ndmin_ndarray_check_param + """ + # Verify that the array has at least dimensions `ndmin`. + # Tweak the size and shape of the arrays - remove extraneous dimensions + if a.ndim > ndmin: + a = np.squeeze(a) + # and ensure we have the minimum number of dimensions asked for + # - has to be in this order for the odd case ndmin=1, a.squeeze().ndim=0 + if a.ndim < ndmin: + if ndmin == 1: + a = np.atleast_1d(a) + elif ndmin == 2: + a = np.atleast_2d(a).T + + return a + + +# amount of lines loadtxt reads in one chunk, can be overridden for testing +_loadtxt_chunksize = 50000 + + +def _check_nonneg_int(value, name="argument"): + try: + operator.index(value) + except TypeError: + raise TypeError(f"{name} must be an integer") from None + if value < 0: + raise ValueError(f"{name} must be nonnegative") + + +def _preprocess_comments(iterable, comments, encoding): + """ + Generator that consumes a line iterated iterable and strips out the + multiple (or multi-character) comments from lines. + This is a pre-processing step to achieve feature parity with loadtxt + (we assume that this feature is a nieche feature). + """ + for line in iterable: + if isinstance(line, bytes): + # Need to handle conversion here, or the splitting would fail + line = line.decode(encoding) + + for c in comments: + line = line.split(c, 1)[0] + + yield line + + +# The number of rows we read in one go if confronted with a parametric dtype +_loadtxt_chunksize = 50000 + + +def _read(fname, *, delimiter=',', comment='#', quote='"', + imaginary_unit='j', usecols=None, skiplines=0, + max_rows=None, converters=None, ndmin=None, unpack=False, + dtype=np.float64, encoding=None): + r""" + Read a NumPy array from a text file. + This is a helper function for loadtxt. + + Parameters + ---------- + fname : file, str, or pathlib.Path + The filename or the file to be read. + delimiter : str, optional + Field delimiter of the fields in line of the file. + Default is a comma, ','. If None any sequence of whitespace is + considered a delimiter. + comment : str or sequence of str or None, optional + Character that begins a comment. All text from the comment + character to the end of the line is ignored. + Multiple comments or multiple-character comment strings are supported, + but may be slower and `quote` must be empty if used. + Use None to disable all use of comments. + quote : str or None, optional + Character that is used to quote string fields. Default is '"' + (a double quote). Use None to disable quote support. + imaginary_unit : str, optional + Character that represent the imaginary unit `sqrt(-1)`. + Default is 'j'. + usecols : array_like, optional + A one-dimensional array of integer column numbers. These are the + columns from the file to be included in the array. If this value + is not given, all the columns are used. + skiplines : int, optional + Number of lines to skip before interpreting the data in the file. + max_rows : int, optional + Maximum number of rows of data to read. Default is to read the + entire file. + converters : dict or callable, optional + A function to parse all columns strings into the desired value, or + a dictionary mapping column number to a parser function. + E.g. if column 0 is a date string: ``converters = {0: datestr2num}``. + Converters can also be used to provide a default value for missing + data, e.g. ``converters = lambda s: float(s.strip() or 0)`` will + convert empty fields to 0. + Default: None + ndmin : int, optional + Minimum dimension of the array returned. + Allowed values are 0, 1 or 2. Default is 0. + unpack : bool, optional + If True, the returned array is transposed, so that arguments may be + unpacked using ``x, y, z = read(...)``. When used with a structured + data-type, arrays are returned for each field. Default is False. + dtype : numpy data type + A NumPy dtype instance, can be a structured dtype to map to the + columns of the file. + encoding : str, optional + Encoding used to decode the inputfile. The special value 'bytes' + (the default) enables backwards-compatible behavior for `converters`, + ensuring that inputs to the converter functions are encoded + bytes objects. The special value 'bytes' has no additional effect if + ``converters=None``. If encoding is ``'bytes'`` or ``None``, the + default system encoding is used. + + Returns + ------- + ndarray + NumPy array. + """ + # Handle special 'bytes' keyword for encoding + byte_converters = False + if encoding == 'bytes': + encoding = None + byte_converters = True + + if dtype is None: + raise TypeError("a dtype must be provided.") + dtype = np.dtype(dtype) + + read_dtype_via_object_chunks = None + if dtype.kind in 'SUM' and dtype in { + np.dtype("S0"), np.dtype("U0"), np.dtype("M8"), np.dtype("m8")}: + # This is a legacy "flexible" dtype. We do not truly support + # parametric dtypes currently (no dtype discovery step in the core), + # but have to support these for backward compatibility. + read_dtype_via_object_chunks = dtype + dtype = np.dtype(object) + + if usecols is not None: + # Allow usecols to be a single int or a sequence of ints, the C-code + # handles the rest + try: + usecols = list(usecols) + except TypeError: + usecols = [usecols] + + _ensure_ndmin_ndarray_check_param(ndmin) + + if comment is None: + comments = None + else: + # assume comments are a sequence of strings + if "" in comment: + raise ValueError( + "comments cannot be an empty string. Use comments=None to " + "disable comments." + ) + comments = tuple(comment) + comment = None + if len(comments) == 0: + comments = None # No comments at all + elif len(comments) == 1: + # If there is only one comment, and that comment has one character, + # the normal parsing can deal with it just fine. + if isinstance(comments[0], str) and len(comments[0]) == 1: + comment = comments[0] + comments = None + # Input validation if there are multiple comment characters + elif delimiter in comments: + raise TypeError( + f"Comment characters '{comments}' cannot include the " + f"delimiter '{delimiter}'" + ) + + # comment is now either a 1 or 0 character string or a tuple: + if comments is not None: + # Note: An earlier version support two character comments (and could + # have been extended to multiple characters, we assume this is + # rare enough to not optimize for. + if quote is not None: + raise ValueError( + "when multiple comments or a multi-character comment is " + "given, quotes are not supported. In this case quotechar " + "must be set to None.") + + if len(imaginary_unit) != 1: + raise ValueError('len(imaginary_unit) must be 1.') + + _check_nonneg_int(skiplines) + if max_rows is not None: + _check_nonneg_int(max_rows) + else: + # Passing -1 to the C code means "read the entire file". + max_rows = -1 + + fh_closing_ctx = contextlib.nullcontext() + filelike = False + try: + if isinstance(fname, os.PathLike): + fname = os.fspath(fname) + if isinstance(fname, str): + fh = np.lib._datasource.open(fname, 'rt', encoding=encoding) + if encoding is None: + encoding = getattr(fh, 'encoding', 'latin1') + + fh_closing_ctx = contextlib.closing(fh) + data = fh + filelike = True + else: + if encoding is None: + encoding = getattr(fname, 'encoding', 'latin1') + data = iter(fname) + except TypeError as e: + raise ValueError( + f"fname must be a string, filehandle, list of strings,\n" + f"or generator. Got {type(fname)} instead.") from e + + with fh_closing_ctx: + if comments is not None: + if filelike: + data = iter(data) + filelike = False + data = _preprocess_comments(data, comments, encoding) + + if read_dtype_via_object_chunks is None: + arr = _load_from_filelike( + data, delimiter=delimiter, comment=comment, quote=quote, + imaginary_unit=imaginary_unit, + usecols=usecols, skiplines=skiplines, max_rows=max_rows, + converters=converters, dtype=dtype, + encoding=encoding, filelike=filelike, + byte_converters=byte_converters) + + else: + # This branch reads the file into chunks of object arrays and then + # casts them to the desired actual dtype. This ensures correct + # string-length and datetime-unit discovery (like `arr.astype()`). + # Due to chunking, certain error reports are less clear, currently. + if filelike: + data = iter(data) # cannot chunk when reading from file + filelike = False + + c_byte_converters = False + if read_dtype_via_object_chunks == "S": + c_byte_converters = True # Use latin1 rather than ascii + + chunks = [] + while max_rows != 0: + if max_rows < 0: + chunk_size = _loadtxt_chunksize + else: + chunk_size = min(_loadtxt_chunksize, max_rows) + + next_arr = _load_from_filelike( + data, delimiter=delimiter, comment=comment, quote=quote, + imaginary_unit=imaginary_unit, + usecols=usecols, skiplines=skiplines, max_rows=chunk_size, + converters=converters, dtype=dtype, + encoding=encoding, filelike=filelike, + byte_converters=byte_converters, + c_byte_converters=c_byte_converters) + # Cast here already. We hope that this is better even for + # large files because the storage is more compact. It could + # be adapted (in principle the concatenate could cast). + chunks.append(next_arr.astype(read_dtype_via_object_chunks)) + + skiplines = 0 # Only have to skip for first chunk + if max_rows >= 0: + max_rows -= chunk_size + if len(next_arr) < chunk_size: + # There was less data than requested, so we are done. + break + + # Need at least one chunk, but if empty, the last one may have + # the wrong shape. + if len(chunks) > 1 and len(chunks[-1]) == 0: + del chunks[-1] + if len(chunks) == 1: + arr = chunks[0] + else: + arr = np.concatenate(chunks, axis=0) + + # NOTE: ndmin works as advertised for structured dtypes, but normally + # these would return a 1D result plus the structured dimension, + # so ndmin=2 adds a third dimension even when no squeezing occurs. + # A `squeeze=False` could be a better solution (pandas uses squeeze). + arr = _ensure_ndmin_ndarray(arr, ndmin=ndmin) + + if arr.shape: + if arr.shape[0] == 0: + warnings.warn( + f'loadtxt: input contained no data: "{fname}"', + category=UserWarning, + stacklevel=3 + ) + + if unpack: + # Unpack structured dtypes if requested: + dt = arr.dtype + if dt.names is not None: + # For structured arrays, return an array for each field. + return [arr[field] for field in dt.names] + else: + return arr.T + else: + return arr + + +@finalize_array_function_like +@set_module('numpy') +def loadtxt(fname, dtype=float, comments='#', delimiter=None, + converters=None, skiprows=0, usecols=None, unpack=False, + ndmin=0, encoding=None, max_rows=None, *, quotechar=None, + like=None): + r""" + Load data from a text file. + + Parameters + ---------- + fname : file, str, pathlib.Path, list of str, generator + File, filename, list, or generator to read. If the filename + extension is ``.gz`` or ``.bz2``, the file is first decompressed. Note + that generators must return bytes or strings. The strings + in a list or produced by a generator are treated as lines. + dtype : data-type, optional + Data-type of the resulting array; default: float. If this is a + structured data-type, the resulting array will be 1-dimensional, and + each row will be interpreted as an element of the array. In this + case, the number of columns used must match the number of fields in + the data-type. + comments : str or sequence of str or None, optional + The characters or list of characters used to indicate the start of a + comment. None implies no comments. For backwards compatibility, byte + strings will be decoded as 'latin1'. The default is '#'. + delimiter : str, optional + The character used to separate the values. For backwards compatibility, + byte strings will be decoded as 'latin1'. The default is whitespace. + + .. versionchanged:: 1.23.0 + Only single character delimiters are supported. Newline characters + cannot be used as the delimiter. + + converters : dict or callable, optional + Converter functions to customize value parsing. If `converters` is + callable, the function is applied to all columns, else it must be a + dict that maps column number to a parser function. + See examples for further details. + Default: None. + + .. versionchanged:: 1.23.0 + The ability to pass a single callable to be applied to all columns + was added. + + skiprows : int, optional + Skip the first `skiprows` lines, including comments; default: 0. + usecols : int or sequence, optional + Which columns to read, with 0 being the first. For example, + ``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns. + The default, None, results in all columns being read. + unpack : bool, optional + If True, the returned array is transposed, so that arguments may be + unpacked using ``x, y, z = loadtxt(...)``. When used with a + structured data-type, arrays are returned for each field. + Default is False. + ndmin : int, optional + The returned array will have at least `ndmin` dimensions. + Otherwise mono-dimensional axes will be squeezed. + Legal values: 0 (default), 1 or 2. + encoding : str, optional + Encoding used to decode the inputfile. Does not apply to input streams. + The special value 'bytes' enables backward compatibility workarounds + that ensures you receive byte arrays as results if possible and passes + 'latin1' encoded strings to converters. Override this value to receive + unicode arrays and pass strings as input to converters. If set to None + the system default is used. The default value is None. + + .. versionchanged:: 2.0 + Before NumPy 2, the default was ``'bytes'`` for Python 2 + compatibility. The default is now ``None``. + + max_rows : int, optional + Read `max_rows` rows of content after `skiprows` lines. The default is + to read all the rows. Note that empty rows containing no data such as + empty lines and comment lines are not counted towards `max_rows`, + while such lines are counted in `skiprows`. + + .. versionchanged:: 1.23.0 + Lines containing no data, including comment lines (e.g., lines + starting with '#' or as specified via `comments`) are not counted + towards `max_rows`. + quotechar : unicode character or None, optional + The character used to denote the start and end of a quoted item. + Occurrences of the delimiter or comment characters are ignored within + a quoted item. The default value is ``quotechar=None``, which means + quoting support is disabled. + + If two consecutive instances of `quotechar` are found within a quoted + field, the first is treated as an escape character. See examples. + + .. versionadded:: 1.23.0 + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + out : ndarray + Data read from the text file. + + See Also + -------- + load, fromstring, fromregex + genfromtxt : Load data with missing values handled as specified. + scipy.io.loadmat : reads MATLAB data files + + Notes + ----- + This function aims to be a fast reader for simply formatted files. The + `genfromtxt` function provides more sophisticated handling of, e.g., + lines with missing values. + + Each row in the input text file must have the same number of values to be + able to read all values. If all rows do not have same number of values, a + subset of up to n columns (where n is the least number of values present + in all rows) can be read by specifying the columns via `usecols`. + + The strings produced by the Python float.hex method can be used as + input for floats. + + Examples + -------- + >>> import numpy as np + >>> from io import StringIO # StringIO behaves like a file object + >>> c = StringIO("0 1\n2 3") + >>> np.loadtxt(c) + array([[0., 1.], + [2., 3.]]) + + >>> d = StringIO("M 21 72\nF 35 58") + >>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'), + ... 'formats': ('S1', 'i4', 'f4')}) + array([(b'M', 21, 72.), (b'F', 35, 58.)], + dtype=[('gender', 'S1'), ('age', '>> c = StringIO("1,0,2\n3,0,4") + >>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True) + >>> x + array([1., 3.]) + >>> y + array([2., 4.]) + + The `converters` argument is used to specify functions to preprocess the + text prior to parsing. `converters` can be a dictionary that maps + preprocessing functions to each column: + + >>> s = StringIO("1.618, 2.296\n3.141, 4.669\n") + >>> conv = { + ... 0: lambda x: np.floor(float(x)), # conversion fn for column 0 + ... 1: lambda x: np.ceil(float(x)), # conversion fn for column 1 + ... } + >>> np.loadtxt(s, delimiter=",", converters=conv) + array([[1., 3.], + [3., 5.]]) + + `converters` can be a callable instead of a dictionary, in which case it + is applied to all columns: + + >>> s = StringIO("0xDE 0xAD\n0xC0 0xDE") + >>> import functools + >>> conv = functools.partial(int, base=16) + >>> np.loadtxt(s, converters=conv) + array([[222., 173.], + [192., 222.]]) + + This example shows how `converters` can be used to convert a field + with a trailing minus sign into a negative number. + + >>> s = StringIO("10.01 31.25-\n19.22 64.31\n17.57- 63.94") + >>> def conv(fld): + ... return -float(fld[:-1]) if fld.endswith("-") else float(fld) + ... + >>> np.loadtxt(s, converters=conv) + array([[ 10.01, -31.25], + [ 19.22, 64.31], + [-17.57, 63.94]]) + + Using a callable as the converter can be particularly useful for handling + values with different formatting, e.g. floats with underscores: + + >>> s = StringIO("1 2.7 100_000") + >>> np.loadtxt(s, converters=float) + array([1.e+00, 2.7e+00, 1.e+05]) + + This idea can be extended to automatically handle values specified in + many different formats, such as hex values: + + >>> def conv(val): + ... try: + ... return float(val) + ... except ValueError: + ... return float.fromhex(val) + >>> s = StringIO("1, 2.5, 3_000, 0b4, 0x1.4000000000000p+2") + >>> np.loadtxt(s, delimiter=",", converters=conv) + array([1.0e+00, 2.5e+00, 3.0e+03, 1.8e+02, 5.0e+00]) + + Or a format where the ``-`` sign comes after the number: + + >>> s = StringIO("10.01 31.25-\n19.22 64.31\n17.57- 63.94") + >>> conv = lambda x: -float(x[:-1]) if x.endswith("-") else float(x) + >>> np.loadtxt(s, converters=conv) + array([[ 10.01, -31.25], + [ 19.22, 64.31], + [-17.57, 63.94]]) + + Support for quoted fields is enabled with the `quotechar` parameter. + Comment and delimiter characters are ignored when they appear within a + quoted item delineated by `quotechar`: + + >>> s = StringIO('"alpha, #42", 10.0\n"beta, #64", 2.0\n') + >>> dtype = np.dtype([("label", "U12"), ("value", float)]) + >>> np.loadtxt(s, dtype=dtype, delimiter=",", quotechar='"') + array([('alpha, #42', 10.), ('beta, #64', 2.)], + dtype=[('label', '>> s = StringIO('"alpha, #42" 10.0\n"beta, #64" 2.0\n') + >>> dtype = np.dtype([("label", "U12"), ("value", float)]) + >>> np.loadtxt(s, dtype=dtype, delimiter=None, quotechar='"') + array([('alpha, #42', 10.), ('beta, #64', 2.)], + dtype=[('label', '>> s = StringIO('"Hello, my name is ""Monty""!"') + >>> np.loadtxt(s, dtype="U", delimiter=",", quotechar='"') + array('Hello, my name is "Monty"!', dtype='>> d = StringIO("1 2\n2 4\n3 9 12\n4 16 20") + >>> np.loadtxt(d, usecols=(0, 1)) + array([[ 1., 2.], + [ 2., 4.], + [ 3., 9.], + [ 4., 16.]]) + + """ + + if like is not None: + return _loadtxt_with_like( + like, fname, dtype=dtype, comments=comments, delimiter=delimiter, + converters=converters, skiprows=skiprows, usecols=usecols, + unpack=unpack, ndmin=ndmin, encoding=encoding, + max_rows=max_rows + ) + + if isinstance(delimiter, bytes): + delimiter.decode("latin1") + + if dtype is None: + dtype = np.float64 + + comment = comments + # Control character type conversions for Py3 convenience + if comment is not None: + if isinstance(comment, (str, bytes)): + comment = [comment] + comment = [ + x.decode('latin1') if isinstance(x, bytes) else x for x in comment] + if isinstance(delimiter, bytes): + delimiter = delimiter.decode('latin1') + + arr = _read(fname, dtype=dtype, comment=comment, delimiter=delimiter, + converters=converters, skiplines=skiprows, usecols=usecols, + unpack=unpack, ndmin=ndmin, encoding=encoding, + max_rows=max_rows, quote=quotechar) + + return arr + + +_loadtxt_with_like = array_function_dispatch()(loadtxt) + + +def _savetxt_dispatcher(fname, X, fmt=None, delimiter=None, newline=None, + header=None, footer=None, comments=None, + encoding=None): + return (X,) + + +@array_function_dispatch(_savetxt_dispatcher) +def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='', + footer='', comments='# ', encoding=None): + """ + Save an array to a text file. + + Parameters + ---------- + fname : filename, file handle or pathlib.Path + If the filename ends in ``.gz``, the file is automatically saved in + compressed gzip format. `loadtxt` understands gzipped files + transparently. + X : 1D or 2D array_like + Data to be saved to a text file. + fmt : str or sequence of strs, optional + A single format (%10.5f), a sequence of formats, or a + multi-format string, e.g. 'Iteration %d -- %10.5f', in which + case `delimiter` is ignored. For complex `X`, the legal options + for `fmt` are: + + * a single specifier, ``fmt='%.4e'``, resulting in numbers formatted + like ``' (%s+%sj)' % (fmt, fmt)`` + * a full string specifying every real and imaginary part, e.g. + ``' %.4e %+.4ej %.4e %+.4ej %.4e %+.4ej'`` for 3 columns + * a list of specifiers, one per column - in this case, the real + and imaginary part must have separate specifiers, + e.g. ``['%.3e + %.3ej', '(%.15e%+.15ej)']`` for 2 columns + delimiter : str, optional + String or character separating columns. + newline : str, optional + String or character separating lines. + header : str, optional + String that will be written at the beginning of the file. + footer : str, optional + String that will be written at the end of the file. + comments : str, optional + String that will be prepended to the ``header`` and ``footer`` strings, + to mark them as comments. Default: '# ', as expected by e.g. + ``numpy.loadtxt``. + encoding : {None, str}, optional + Encoding used to encode the outputfile. Does not apply to output + streams. If the encoding is something other than 'bytes' or 'latin1' + you will not be able to load the file in NumPy versions < 1.14. Default + is 'latin1'. + + See Also + -------- + save : Save an array to a binary file in NumPy ``.npy`` format + savez : Save several arrays into an uncompressed ``.npz`` archive + savez_compressed : Save several arrays into a compressed ``.npz`` archive + + Notes + ----- + Further explanation of the `fmt` parameter + (``%[flag]width[.precision]specifier``): + + flags: + ``-`` : left justify + + ``+`` : Forces to precede result with + or -. + + ``0`` : Left pad the number with zeros instead of space (see width). + + width: + Minimum number of characters to be printed. The value is not truncated + if it has more characters. + + precision: + - For integer specifiers (eg. ``d,i,o,x``), the minimum number of + digits. + - For ``e, E`` and ``f`` specifiers, the number of digits to print + after the decimal point. + - For ``g`` and ``G``, the maximum number of significant digits. + - For ``s``, the maximum number of characters. + + specifiers: + ``c`` : character + + ``d`` or ``i`` : signed decimal integer + + ``e`` or ``E`` : scientific notation with ``e`` or ``E``. + + ``f`` : decimal floating point + + ``g,G`` : use the shorter of ``e,E`` or ``f`` + + ``o`` : signed octal + + ``s`` : string of characters + + ``u`` : unsigned decimal integer + + ``x,X`` : unsigned hexadecimal integer + + This explanation of ``fmt`` is not complete, for an exhaustive + specification see [1]_. + + References + ---------- + .. [1] `Format Specification Mini-Language + `_, + Python Documentation. + + Examples + -------- + >>> import numpy as np + >>> x = y = z = np.arange(0.0,5.0,1.0) + >>> np.savetxt('test.out', x, delimiter=',') # X is an array + >>> np.savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays + >>> np.savetxt('test.out', x, fmt='%1.4e') # use exponential notation + + """ + + class WriteWrap: + """Convert to bytes on bytestream inputs. + + """ + def __init__(self, fh, encoding): + self.fh = fh + self.encoding = encoding + self.do_write = self.first_write + + def close(self): + self.fh.close() + + def write(self, v): + self.do_write(v) + + def write_bytes(self, v): + if isinstance(v, bytes): + self.fh.write(v) + else: + self.fh.write(v.encode(self.encoding)) + + def write_normal(self, v): + self.fh.write(asunicode(v)) + + def first_write(self, v): + try: + self.write_normal(v) + self.write = self.write_normal + except TypeError: + # input is probably a bytestream + self.write_bytes(v) + self.write = self.write_bytes + + own_fh = False + if isinstance(fname, os.PathLike): + fname = os.fspath(fname) + if _is_string_like(fname): + # datasource doesn't support creating a new file ... + open(fname, 'wt').close() + fh = np.lib._datasource.open(fname, 'wt', encoding=encoding) + own_fh = True + elif hasattr(fname, 'write'): + # wrap to handle byte output streams + fh = WriteWrap(fname, encoding or 'latin1') + else: + raise ValueError('fname must be a string or file handle') + + try: + X = np.asarray(X) + + # Handle 1-dimensional arrays + if X.ndim == 0 or X.ndim > 2: + raise ValueError( + "Expected 1D or 2D array, got %dD array instead" % X.ndim) + elif X.ndim == 1: + # Common case -- 1d array of numbers + if X.dtype.names is None: + X = np.atleast_2d(X).T + ncol = 1 + + # Complex dtype -- each field indicates a separate column + else: + ncol = len(X.dtype.names) + else: + ncol = X.shape[1] + + iscomplex_X = np.iscomplexobj(X) + # `fmt` can be a string with multiple insertion points or a + # list of formats. E.g. '%10.5f\t%10d' or ('%10.5f', '$10d') + if type(fmt) in (list, tuple): + if len(fmt) != ncol: + raise AttributeError(f'fmt has wrong shape. {str(fmt)}') + format = delimiter.join(fmt) + elif isinstance(fmt, str): + n_fmt_chars = fmt.count('%') + error = ValueError(f'fmt has wrong number of % formats: {fmt}') + if n_fmt_chars == 1: + if iscomplex_X: + fmt = [f' ({fmt}+{fmt}j)', ] * ncol + else: + fmt = [fmt, ] * ncol + format = delimiter.join(fmt) + elif iscomplex_X and n_fmt_chars != (2 * ncol): + raise error + elif ((not iscomplex_X) and n_fmt_chars != ncol): + raise error + else: + format = fmt + else: + raise ValueError(f'invalid fmt: {fmt!r}') + + if len(header) > 0: + header = header.replace('\n', '\n' + comments) + fh.write(comments + header + newline) + if iscomplex_X: + for row in X: + row2 = [] + for number in row: + row2.extend((number.real, number.imag)) + s = format % tuple(row2) + newline + fh.write(s.replace('+-', '-')) + else: + for row in X: + try: + v = format % tuple(row) + newline + except TypeError as e: + raise TypeError("Mismatch between array dtype ('%s') and " + "format specifier ('%s')" + % (str(X.dtype), format)) from e + fh.write(v) + + if len(footer) > 0: + footer = footer.replace('\n', '\n' + comments) + fh.write(comments + footer + newline) + finally: + if own_fh: + fh.close() + + +@set_module('numpy') +def fromregex(file, regexp, dtype, encoding=None): + r""" + Construct an array from a text file, using regular expression parsing. + + The returned array is always a structured array, and is constructed from + all matches of the regular expression in the file. Groups in the regular + expression are converted to fields of the structured array. + + Parameters + ---------- + file : file, str, or pathlib.Path + Filename or file object to read. + + .. versionchanged:: 1.22.0 + Now accepts `os.PathLike` implementations. + + regexp : str or regexp + Regular expression used to parse the file. + Groups in the regular expression correspond to fields in the dtype. + dtype : dtype or list of dtypes + Dtype for the structured array; must be a structured datatype. + encoding : str, optional + Encoding used to decode the inputfile. Does not apply to input streams. + + Returns + ------- + output : ndarray + The output array, containing the part of the content of `file` that + was matched by `regexp`. `output` is always a structured array. + + Raises + ------ + TypeError + When `dtype` is not a valid dtype for a structured array. + + See Also + -------- + fromstring, loadtxt + + Notes + ----- + Dtypes for structured arrays can be specified in several forms, but all + forms specify at least the data type and field name. For details see + `basics.rec`. + + Examples + -------- + >>> import numpy as np + >>> from io import StringIO + >>> text = StringIO("1312 foo\n1534 bar\n444 qux") + + >>> regexp = r"(\d+)\s+(...)" # match [digits, whitespace, anything] + >>> output = np.fromregex(text, regexp, + ... [('num', np.int64), ('key', 'S3')]) + >>> output + array([(1312, b'foo'), (1534, b'bar'), ( 444, b'qux')], + dtype=[('num', '>> output['num'] + array([1312, 1534, 444]) + + """ + own_fh = False + if not hasattr(file, "read"): + file = os.fspath(file) + file = np.lib._datasource.open(file, 'rt', encoding=encoding) + own_fh = True + + try: + if not isinstance(dtype, np.dtype): + dtype = np.dtype(dtype) + if dtype.names is None: + raise TypeError('dtype must be a structured datatype.') + + content = file.read() + if isinstance(content, bytes) and isinstance(regexp, str): + regexp = asbytes(regexp) + + if not hasattr(regexp, 'match'): + regexp = re.compile(regexp) + seq = regexp.findall(content) + if seq and not isinstance(seq[0], tuple): + # Only one group is in the regexp. + # Create the new array as a single data-type and then + # re-interpret as a single-field structured array. + newdtype = np.dtype(dtype[dtype.names[0]]) + output = np.array(seq, dtype=newdtype) + output = output.view(dtype) + else: + output = np.array(seq, dtype=dtype) + + return output + finally: + if own_fh: + file.close() + + +#####-------------------------------------------------------------------------- +#---- --- ASCII functions --- +#####-------------------------------------------------------------------------- + + +@finalize_array_function_like +@set_module('numpy') +def genfromtxt(fname, dtype=float, comments='#', delimiter=None, + skip_header=0, skip_footer=0, converters=None, + missing_values=None, filling_values=None, usecols=None, + names=None, excludelist=None, + deletechars=''.join(sorted(NameValidator.defaultdeletechars)), # noqa: B008 + replace_space='_', autostrip=False, case_sensitive=True, + defaultfmt="f%i", unpack=None, usemask=False, loose=True, + invalid_raise=True, max_rows=None, encoding=None, + *, ndmin=0, like=None): + """ + Load data from a text file, with missing values handled as specified. + + Each line past the first `skip_header` lines is split at the `delimiter` + character, and characters following the `comments` character are discarded. + + Parameters + ---------- + fname : file, str, pathlib.Path, list of str, generator + File, filename, list, or generator to read. If the filename + extension is ``.gz`` or ``.bz2``, the file is first decompressed. Note + that generators must return bytes or strings. The strings + in a list or produced by a generator are treated as lines. + dtype : dtype, optional + Data type of the resulting array. + If None, the dtypes will be determined by the contents of each + column, individually. + comments : str, optional + The character used to indicate the start of a comment. + All the characters occurring on a line after a comment are discarded. + delimiter : str, int, or sequence, optional + The string used to separate values. By default, any consecutive + whitespaces act as delimiter. An integer or sequence of integers + can also be provided as width(s) of each field. + skiprows : int, optional + `skiprows` was removed in numpy 1.10. Please use `skip_header` instead. + skip_header : int, optional + The number of lines to skip at the beginning of the file. + skip_footer : int, optional + The number of lines to skip at the end of the file. + converters : variable, optional + The set of functions that convert the data of a column to a value. + The converters can also be used to provide a default value + for missing data: ``converters = {3: lambda s: float(s or 0)}``. + missing : variable, optional + `missing` was removed in numpy 1.10. Please use `missing_values` + instead. + missing_values : variable, optional + The set of strings corresponding to missing data. + filling_values : variable, optional + The set of values to be used as default when the data are missing. + usecols : sequence, optional + Which columns to read, with 0 being the first. For example, + ``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns. + names : {None, True, str, sequence}, optional + If `names` is True, the field names are read from the first line after + the first `skip_header` lines. This line can optionally be preceded + by a comment delimiter. Any content before the comment delimiter is + discarded. If `names` is a sequence or a single-string of + comma-separated names, the names will be used to define the field + names in a structured dtype. If `names` is None, the names of the + dtype fields will be used, if any. + excludelist : sequence, optional + A list of names to exclude. This list is appended to the default list + ['return','file','print']. Excluded names are appended with an + underscore: for example, `file` would become `file_`. + deletechars : str, optional + A string combining invalid characters that must be deleted from the + names. + defaultfmt : str, optional + A format used to define default field names, such as "f%i" or "f_%02i". + autostrip : bool, optional + Whether to automatically strip white spaces from the variables. + replace_space : char, optional + Character(s) used in replacement of white spaces in the variable + names. By default, use a '_'. + case_sensitive : {True, False, 'upper', 'lower'}, optional + If True, field names are case sensitive. + If False or 'upper', field names are converted to upper case. + If 'lower', field names are converted to lower case. + unpack : bool, optional + If True, the returned array is transposed, so that arguments may be + unpacked using ``x, y, z = genfromtxt(...)``. When used with a + structured data-type, arrays are returned for each field. + Default is False. + usemask : bool, optional + If True, return a masked array. + If False, return a regular array. + loose : bool, optional + If True, do not raise errors for invalid values. + invalid_raise : bool, optional + If True, an exception is raised if an inconsistency is detected in the + number of columns. + If False, a warning is emitted and the offending lines are skipped. + max_rows : int, optional + The maximum number of rows to read. Must not be used with skip_footer + at the same time. If given, the value must be at least 1. Default is + to read the entire file. + encoding : str, optional + Encoding used to decode the inputfile. Does not apply when `fname` + is a file object. The special value 'bytes' enables backward + compatibility workarounds that ensure that you receive byte arrays + when possible and passes latin1 encoded strings to converters. + Override this value to receive unicode arrays and pass strings + as input to converters. If set to None the system default is used. + The default value is 'bytes'. + + .. versionchanged:: 2.0 + Before NumPy 2, the default was ``'bytes'`` for Python 2 + compatibility. The default is now ``None``. + + ndmin : int, optional + Same parameter as `loadtxt` + + .. versionadded:: 1.23.0 + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + out : ndarray + Data read from the text file. If `usemask` is True, this is a + masked array. + + See Also + -------- + numpy.loadtxt : equivalent function when no data is missing. + + Notes + ----- + * When spaces are used as delimiters, or when no delimiter has been given + as input, there should not be any missing data between two fields. + * When variables are named (either by a flexible dtype or with a `names` + sequence), there must not be any header in the file (else a ValueError + exception is raised). + * Individual values are not stripped of spaces by default. + When using a custom converter, make sure the function does remove spaces. + * Custom converters may receive unexpected values due to dtype + discovery. + + References + ---------- + .. [1] NumPy User Guide, section `I/O with NumPy + `_. + + Examples + -------- + >>> from io import StringIO + >>> import numpy as np + + Comma delimited file with mixed dtype + + >>> s = StringIO("1,1.3,abcde") + >>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'), + ... ('mystring','S5')], delimiter=",") + >>> data + array((1, 1.3, b'abcde'), + dtype=[('myint', '>> _ = s.seek(0) # needed for StringIO example only + >>> data = np.genfromtxt(s, dtype=None, + ... names = ['myint','myfloat','mystring'], delimiter=",") + >>> data + array((1, 1.3, 'abcde'), + dtype=[('myint', '>> _ = s.seek(0) + >>> data = np.genfromtxt(s, dtype="i8,f8,S5", + ... names=['myint','myfloat','mystring'], delimiter=",") + >>> data + array((1, 1.3, b'abcde'), + dtype=[('myint', '>> s = StringIO("11.3abcde") + >>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'], + ... delimiter=[1,3,5]) + >>> data + array((1, 1.3, 'abcde'), + dtype=[('intvar', '>> f = StringIO(''' + ... text,# of chars + ... hello world,11 + ... numpy,5''') + >>> np.genfromtxt(f, dtype='S12,S12', delimiter=',') + array([(b'text', b''), (b'hello world', b'11'), (b'numpy', b'5')], + dtype=[('f0', 'S12'), ('f1', 'S12')]) + + """ + + if like is not None: + return _genfromtxt_with_like( + like, fname, dtype=dtype, comments=comments, delimiter=delimiter, + skip_header=skip_header, skip_footer=skip_footer, + converters=converters, missing_values=missing_values, + filling_values=filling_values, usecols=usecols, names=names, + excludelist=excludelist, deletechars=deletechars, + replace_space=replace_space, autostrip=autostrip, + case_sensitive=case_sensitive, defaultfmt=defaultfmt, + unpack=unpack, usemask=usemask, loose=loose, + invalid_raise=invalid_raise, max_rows=max_rows, encoding=encoding, + ndmin=ndmin, + ) + + _ensure_ndmin_ndarray_check_param(ndmin) + + if max_rows is not None: + if skip_footer: + raise ValueError( + "The keywords 'skip_footer' and 'max_rows' can not be " + "specified at the same time.") + if max_rows < 1: + raise ValueError("'max_rows' must be at least 1.") + + if usemask: + from numpy.ma import MaskedArray, make_mask_descr + # Check the input dictionary of converters + user_converters = converters or {} + if not isinstance(user_converters, dict): + raise TypeError( + "The input argument 'converter' should be a valid dictionary " + "(got '%s' instead)" % type(user_converters)) + + if encoding == 'bytes': + encoding = None + byte_converters = True + else: + byte_converters = False + + # Initialize the filehandle, the LineSplitter and the NameValidator + if isinstance(fname, os.PathLike): + fname = os.fspath(fname) + if isinstance(fname, str): + fid = np.lib._datasource.open(fname, 'rt', encoding=encoding) + fid_ctx = contextlib.closing(fid) + else: + fid = fname + fid_ctx = contextlib.nullcontext(fid) + try: + fhd = iter(fid) + except TypeError as e: + raise TypeError( + "fname must be a string, a filehandle, a sequence of strings,\n" + f"or an iterator of strings. Got {type(fname)} instead." + ) from e + with fid_ctx: + split_line = LineSplitter(delimiter=delimiter, comments=comments, + autostrip=autostrip, encoding=encoding) + validate_names = NameValidator(excludelist=excludelist, + deletechars=deletechars, + case_sensitive=case_sensitive, + replace_space=replace_space) + + # Skip the first `skip_header` rows + try: + for i in range(skip_header): + next(fhd) + + # Keep on until we find the first valid values + first_values = None + + while not first_values: + first_line = _decode_line(next(fhd), encoding) + if (names is True) and (comments is not None): + if comments in first_line: + first_line = ( + ''.join(first_line.split(comments)[1:])) + first_values = split_line(first_line) + except StopIteration: + # return an empty array if the datafile is empty + first_line = '' + first_values = [] + warnings.warn( + f'genfromtxt: Empty input file: "{fname}"', stacklevel=2 + ) + + # Should we take the first values as names ? + if names is True: + fval = first_values[0].strip() + if comments is not None: + if fval in comments: + del first_values[0] + + # Check the columns to use: make sure `usecols` is a list + if usecols is not None: + try: + usecols = [_.strip() for _ in usecols.split(",")] + except AttributeError: + try: + usecols = list(usecols) + except TypeError: + usecols = [usecols, ] + nbcols = len(usecols or first_values) + + # Check the names and overwrite the dtype.names if needed + if names is True: + names = validate_names([str(_.strip()) for _ in first_values]) + first_line = '' + elif _is_string_like(names): + names = validate_names([_.strip() for _ in names.split(',')]) + elif names: + names = validate_names(names) + # Get the dtype + if dtype is not None: + dtype = easy_dtype(dtype, defaultfmt=defaultfmt, names=names, + excludelist=excludelist, + deletechars=deletechars, + case_sensitive=case_sensitive, + replace_space=replace_space) + # Make sure the names is a list (for 2.5) + if names is not None: + names = list(names) + + if usecols: + for (i, current) in enumerate(usecols): + # if usecols is a list of names, convert to a list of indices + if _is_string_like(current): + usecols[i] = names.index(current) + elif current < 0: + usecols[i] = current + len(first_values) + # If the dtype is not None, make sure we update it + if (dtype is not None) and (len(dtype) > nbcols): + descr = dtype.descr + dtype = np.dtype([descr[_] for _ in usecols]) + names = list(dtype.names) + # If `names` is not None, update the names + elif (names is not None) and (len(names) > nbcols): + names = [names[_] for _ in usecols] + elif (names is not None) and (dtype is not None): + names = list(dtype.names) + + # Process the missing values ............................... + # Rename missing_values for convenience + user_missing_values = missing_values or () + if isinstance(user_missing_values, bytes): + user_missing_values = user_missing_values.decode('latin1') + + # Define the list of missing_values (one column: one list) + missing_values = [[''] for _ in range(nbcols)] + + # We have a dictionary: process it field by field + if isinstance(user_missing_values, dict): + # Loop on the items + for (key, val) in user_missing_values.items(): + # Is the key a string ? + if _is_string_like(key): + try: + # Transform it into an integer + key = names.index(key) + except ValueError: + # We couldn't find it: the name must have been dropped + continue + # Redefine the key as needed if it's a column number + if usecols: + try: + key = usecols.index(key) + except ValueError: + pass + # Transform the value as a list of string + if isinstance(val, (list, tuple)): + val = [str(_) for _ in val] + else: + val = [str(val), ] + # Add the value(s) to the current list of missing + if key is None: + # None acts as default + for miss in missing_values: + miss.extend(val) + else: + missing_values[key].extend(val) + # We have a sequence : each item matches a column + elif isinstance(user_missing_values, (list, tuple)): + for (value, entry) in zip(user_missing_values, missing_values): + value = str(value) + if value not in entry: + entry.append(value) + # We have a string : apply it to all entries + elif isinstance(user_missing_values, str): + user_value = user_missing_values.split(",") + for entry in missing_values: + entry.extend(user_value) + # We have something else: apply it to all entries + else: + for entry in missing_values: + entry.extend([str(user_missing_values)]) + + # Process the filling_values ............................... + # Rename the input for convenience + user_filling_values = filling_values + if user_filling_values is None: + user_filling_values = [] + # Define the default + filling_values = [None] * nbcols + # We have a dictionary : update each entry individually + if isinstance(user_filling_values, dict): + for (key, val) in user_filling_values.items(): + if _is_string_like(key): + try: + # Transform it into an integer + key = names.index(key) + except ValueError: + # We couldn't find it: the name must have been dropped + continue + # Redefine the key if it's a column number + # and usecols is defined + if usecols: + try: + key = usecols.index(key) + except ValueError: + pass + # Add the value to the list + filling_values[key] = val + # We have a sequence : update on a one-to-one basis + elif isinstance(user_filling_values, (list, tuple)): + n = len(user_filling_values) + if (n <= nbcols): + filling_values[:n] = user_filling_values + else: + filling_values = user_filling_values[:nbcols] + # We have something else : use it for all entries + else: + filling_values = [user_filling_values] * nbcols + + # Initialize the converters ................................ + if dtype is None: + # Note: we can't use a [...]*nbcols, as we would have 3 times + # the same converter, instead of 3 different converters. + converters = [ + StringConverter(None, missing_values=miss, default=fill) + for (miss, fill) in zip(missing_values, filling_values) + ] + else: + dtype_flat = flatten_dtype(dtype, flatten_base=True) + # Initialize the converters + if len(dtype_flat) > 1: + # Flexible type : get a converter from each dtype + zipit = zip(dtype_flat, missing_values, filling_values) + converters = [StringConverter(dt, + locked=True, + missing_values=miss, + default=fill) + for (dt, miss, fill) in zipit] + else: + # Set to a default converter (but w/ different missing values) + zipit = zip(missing_values, filling_values) + converters = [StringConverter(dtype, + locked=True, + missing_values=miss, + default=fill) + for (miss, fill) in zipit] + # Update the converters to use the user-defined ones + uc_update = [] + for (j, conv) in user_converters.items(): + # If the converter is specified by column names, + # use the index instead + if _is_string_like(j): + try: + j = names.index(j) + i = j + except ValueError: + continue + elif usecols: + try: + i = usecols.index(j) + except ValueError: + # Unused converter specified + continue + else: + i = j + # Find the value to test - first_line is not filtered by usecols: + if len(first_line): + testing_value = first_values[j] + else: + testing_value = None + if conv is bytes: + user_conv = asbytes + elif byte_converters: + # Converters may use decode to workaround numpy's old + # behavior, so encode the string again before passing + # to the user converter. + def tobytes_first(x, conv): + if type(x) is bytes: + return conv(x) + return conv(x.encode("latin1")) + user_conv = functools.partial(tobytes_first, conv=conv) + else: + user_conv = conv + converters[i].update(user_conv, locked=True, + testing_value=testing_value, + default=filling_values[i], + missing_values=missing_values[i],) + uc_update.append((i, user_conv)) + # Make sure we have the corrected keys in user_converters... + user_converters.update(uc_update) + + # Fixme: possible error as following variable never used. + # miss_chars = [_.missing_values for _ in converters] + + # Initialize the output lists ... + # ... rows + rows = [] + append_to_rows = rows.append + # ... masks + if usemask: + masks = [] + append_to_masks = masks.append + # ... invalid + invalid = [] + append_to_invalid = invalid.append + + # Parse each line + for (i, line) in enumerate(itertools.chain([first_line, ], fhd)): + values = split_line(line) + nbvalues = len(values) + # Skip an empty line + if nbvalues == 0: + continue + if usecols: + # Select only the columns we need + try: + values = [values[_] for _ in usecols] + except IndexError: + append_to_invalid((i + skip_header + 1, nbvalues)) + continue + elif nbvalues != nbcols: + append_to_invalid((i + skip_header + 1, nbvalues)) + continue + # Store the values + append_to_rows(tuple(values)) + if usemask: + append_to_masks(tuple(v.strip() in m + for (v, m) in zip(values, + missing_values))) + if len(rows) == max_rows: + break + + # Upgrade the converters (if needed) + if dtype is None: + for (i, converter) in enumerate(converters): + current_column = [itemgetter(i)(_m) for _m in rows] + try: + converter.iterupgrade(current_column) + except ConverterLockError: + errmsg = f"Converter #{i} is locked and cannot be upgraded: " + current_column = map(itemgetter(i), rows) + for (j, value) in enumerate(current_column): + try: + converter.upgrade(value) + except (ConverterError, ValueError): + line_number = j + 1 + skip_header + errmsg += f"(occurred line #{line_number} for value '{value}')" + raise ConverterError(errmsg) + + # Check that we don't have invalid values + nbinvalid = len(invalid) + if nbinvalid > 0: + nbrows = len(rows) + nbinvalid - skip_footer + # Construct the error message + template = f" Line #%i (got %i columns instead of {nbcols})" + if skip_footer > 0: + nbinvalid_skipped = len([_ for _ in invalid + if _[0] > nbrows + skip_header]) + invalid = invalid[:nbinvalid - nbinvalid_skipped] + skip_footer -= nbinvalid_skipped +# +# nbrows -= skip_footer +# errmsg = [template % (i, nb) +# for (i, nb) in invalid if i < nbrows] +# else: + errmsg = [template % (i, nb) + for (i, nb) in invalid] + if len(errmsg): + errmsg.insert(0, "Some errors were detected !") + errmsg = "\n".join(errmsg) + # Raise an exception ? + if invalid_raise: + raise ValueError(errmsg) + # Issue a warning ? + else: + warnings.warn(errmsg, ConversionWarning, stacklevel=2) + + # Strip the last skip_footer data + if skip_footer > 0: + rows = rows[:-skip_footer] + if usemask: + masks = masks[:-skip_footer] + + # Convert each value according to the converter: + # We want to modify the list in place to avoid creating a new one... + if loose: + rows = list( + zip(*[[conv._loose_call(_r) for _r in map(itemgetter(i), rows)] + for (i, conv) in enumerate(converters)])) + else: + rows = list( + zip(*[[conv._strict_call(_r) for _r in map(itemgetter(i), rows)] + for (i, conv) in enumerate(converters)])) + + # Reset the dtype + data = rows + if dtype is None: + # Get the dtypes from the types of the converters + column_types = [conv.type for conv in converters] + # Find the columns with strings... + strcolidx = [i for (i, v) in enumerate(column_types) + if v == np.str_] + + if byte_converters and strcolidx: + # convert strings back to bytes for backward compatibility + warnings.warn( + "Reading unicode strings without specifying the encoding " + "argument is deprecated. Set the encoding, use None for the " + "system default.", + np.exceptions.VisibleDeprecationWarning, stacklevel=2) + + def encode_unicode_cols(row_tup): + row = list(row_tup) + for i in strcolidx: + row[i] = row[i].encode('latin1') + return tuple(row) + + try: + data = [encode_unicode_cols(r) for r in data] + except UnicodeEncodeError: + pass + else: + for i in strcolidx: + column_types[i] = np.bytes_ + + # Update string types to be the right length + sized_column_types = column_types.copy() + for i, col_type in enumerate(column_types): + if np.issubdtype(col_type, np.character): + n_chars = max(len(row[i]) for row in data) + sized_column_types[i] = (col_type, n_chars) + + if names is None: + # If the dtype is uniform (before sizing strings) + base = { + c_type + for c, c_type in zip(converters, column_types) + if c._checked} + if len(base) == 1: + uniform_type, = base + (ddtype, mdtype) = (uniform_type, bool) + else: + ddtype = [(defaultfmt % i, dt) + for (i, dt) in enumerate(sized_column_types)] + if usemask: + mdtype = [(defaultfmt % i, bool) + for (i, dt) in enumerate(sized_column_types)] + else: + ddtype = list(zip(names, sized_column_types)) + mdtype = list(zip(names, [bool] * len(sized_column_types))) + output = np.array(data, dtype=ddtype) + if usemask: + outputmask = np.array(masks, dtype=mdtype) + else: + # Overwrite the initial dtype names if needed + if names and dtype.names is not None: + dtype.names = names + # Case 1. We have a structured type + if len(dtype_flat) > 1: + # Nested dtype, eg [('a', int), ('b', [('b0', int), ('b1', 'f4')])] + # First, create the array using a flattened dtype: + # [('a', int), ('b1', int), ('b2', float)] + # Then, view the array using the specified dtype. + if 'O' in (_.char for _ in dtype_flat): + if has_nested_fields(dtype): + raise NotImplementedError( + "Nested fields involving objects are not supported...") + else: + output = np.array(data, dtype=dtype) + else: + rows = np.array(data, dtype=[('', _) for _ in dtype_flat]) + output = rows.view(dtype) + # Now, process the rowmasks the same way + if usemask: + rowmasks = np.array( + masks, dtype=np.dtype([('', bool) for t in dtype_flat])) + # Construct the new dtype + mdtype = make_mask_descr(dtype) + outputmask = rowmasks.view(mdtype) + # Case #2. We have a basic dtype + else: + # We used some user-defined converters + if user_converters: + ishomogeneous = True + descr = [] + for i, ttype in enumerate([conv.type for conv in converters]): + # Keep the dtype of the current converter + if i in user_converters: + ishomogeneous &= (ttype == dtype.type) + if np.issubdtype(ttype, np.character): + ttype = (ttype, max(len(row[i]) for row in data)) + descr.append(('', ttype)) + else: + descr.append(('', dtype)) + # So we changed the dtype ? + if not ishomogeneous: + # We have more than one field + if len(descr) > 1: + dtype = np.dtype(descr) + # We have only one field: drop the name if not needed. + else: + dtype = np.dtype(ttype) + # + output = np.array(data, dtype) + if usemask: + if dtype.names is not None: + mdtype = [(_, bool) for _ in dtype.names] + else: + mdtype = bool + outputmask = np.array(masks, dtype=mdtype) + # Try to take care of the missing data we missed + names = output.dtype.names + if usemask and names: + for (name, conv) in zip(names, converters): + missing_values = [conv(_) for _ in conv.missing_values + if _ != ''] + for mval in missing_values: + outputmask[name] |= (output[name] == mval) + # Construct the final array + if usemask: + output = output.view(MaskedArray) + output._mask = outputmask + + output = _ensure_ndmin_ndarray(output, ndmin=ndmin) + + if unpack: + if names is None: + return output.T + elif len(names) == 1: + # squeeze single-name dtypes too + return output[names[0]] + else: + # For structured arrays with multiple fields, + # return an array for each field. + return [output[field] for field in names] + return output + + +_genfromtxt_with_like = array_function_dispatch()(genfromtxt) + + +def recfromtxt(fname, **kwargs): + """ + Load ASCII data from a file and return it in a record array. + + If ``usemask=False`` a standard `recarray` is returned, + if ``usemask=True`` a MaskedRecords array is returned. + + .. deprecated:: 2.0 + Use `numpy.genfromtxt` instead. + + Parameters + ---------- + fname, kwargs : For a description of input parameters, see `genfromtxt`. + + See Also + -------- + numpy.genfromtxt : generic function + + Notes + ----- + By default, `dtype` is None, which means that the data-type of the output + array will be determined from the data. + + """ + + # Deprecated in NumPy 2.0, 2023-07-11 + warnings.warn( + "`recfromtxt` is deprecated, " + "use `numpy.genfromtxt` instead." + "(deprecated in NumPy 2.0)", + DeprecationWarning, + stacklevel=2 + ) + + kwargs.setdefault("dtype", None) + usemask = kwargs.get('usemask', False) + output = genfromtxt(fname, **kwargs) + if usemask: + from numpy.ma.mrecords import MaskedRecords + output = output.view(MaskedRecords) + else: + output = output.view(np.recarray) + return output + + +def recfromcsv(fname, **kwargs): + """ + Load ASCII data stored in a comma-separated file. + + The returned array is a record array (if ``usemask=False``, see + `recarray`) or a masked record array (if ``usemask=True``, + see `ma.mrecords.MaskedRecords`). + + .. deprecated:: 2.0 + Use `numpy.genfromtxt` with comma as `delimiter` instead. + + Parameters + ---------- + fname, kwargs : For a description of input parameters, see `genfromtxt`. + + See Also + -------- + numpy.genfromtxt : generic function to load ASCII data. + + Notes + ----- + By default, `dtype` is None, which means that the data-type of the output + array will be determined from the data. + + """ + + # Deprecated in NumPy 2.0, 2023-07-11 + warnings.warn( + "`recfromcsv` is deprecated, " + "use `numpy.genfromtxt` with comma as `delimiter` instead. " + "(deprecated in NumPy 2.0)", + DeprecationWarning, + stacklevel=2 + ) + + # Set default kwargs for genfromtxt as relevant to csv import. + kwargs.setdefault("case_sensitive", "lower") + kwargs.setdefault("names", True) + kwargs.setdefault("delimiter", ",") + kwargs.setdefault("dtype", None) + output = genfromtxt(fname, **kwargs) + + usemask = kwargs.get("usemask", False) + if usemask: + from numpy.ma.mrecords import MaskedRecords + output = output.view(MaskedRecords) + else: + output = output.view(np.recarray) + return output diff --git a/local_packages/numpy/lib/_npyio_impl.pyi b/local_packages/numpy/lib/_npyio_impl.pyi new file mode 100644 index 0000000..349edd0 --- /dev/null +++ b/local_packages/numpy/lib/_npyio_impl.pyi @@ -0,0 +1,299 @@ +import types +import zipfile +from _typeshed import ( + StrOrBytesPath, + StrPath, + SupportsKeysAndGetItem, + SupportsRead, + SupportsWrite, +) +from collections.abc import Callable, Collection, Iterable, Iterator, Mapping, Sequence +from re import Pattern +from typing import ( + IO, + Any, + ClassVar, + Generic, + Literal as L, + Protocol, + Self, + TypeAlias, + overload, + type_check_only, +) +from typing_extensions import TypeVar, override + +import numpy as np +from numpy._core.multiarray import packbits, unpackbits +from numpy._typing import ArrayLike, DTypeLike, NDArray, _DTypeLike, _SupportsArrayFunc +from numpy.ma.mrecords import MaskedRecords + +from ._datasource import DataSource as DataSource + +__all__ = [ + "fromregex", + "genfromtxt", + "load", + "loadtxt", + "packbits", + "save", + "savetxt", + "savez", + "savez_compressed", + "unpackbits", +] + +_T = TypeVar("_T") +_T_co = TypeVar("_T_co", covariant=True) +_ScalarT = TypeVar("_ScalarT", bound=np.generic) +_ScalarT_co = TypeVar("_ScalarT_co", bound=np.generic, default=Any, covariant=True) + +_FName: TypeAlias = StrPath | Iterable[str] | Iterable[bytes] +_FNameRead: TypeAlias = StrPath | SupportsRead[str] | SupportsRead[bytes] +_FNameWriteBytes: TypeAlias = StrPath | SupportsWrite[bytes] +_FNameWrite: TypeAlias = _FNameWriteBytes | SupportsWrite[str] + +@type_check_only +class _SupportsReadSeek(SupportsRead[_T_co], Protocol[_T_co]): + def seek(self, offset: int, whence: int, /) -> object: ... + +class BagObj(Generic[_T_co]): + def __init__(self, /, obj: SupportsKeysAndGetItem[str, _T_co]) -> None: ... + def __getattribute__(self, key: str, /) -> _T_co: ... + def __dir__(self) -> list[str]: ... + +class NpzFile(Mapping[str, NDArray[_ScalarT_co]]): + _MAX_REPR_ARRAY_COUNT: ClassVar[int] = 5 + + zip: zipfile.ZipFile | None = None + fid: IO[str] | None = None + files: list[str] + allow_pickle: bool + pickle_kwargs: Mapping[str, Any] | None + f: BagObj[NpzFile[_ScalarT_co]] + + # + def __init__( + self, + /, + fid: IO[Any], + own_fid: bool = False, + allow_pickle: bool = False, + pickle_kwargs: Mapping[str, object] | None = None, + *, + max_header_size: int = 10_000, + ) -> None: ... + def __del__(self) -> None: ... + def __enter__(self) -> Self: ... + def __exit__(self, cls: type[BaseException] | None, e: BaseException | None, tb: types.TracebackType | None, /) -> None: ... + @override + def __len__(self) -> int: ... + @override + def __iter__(self) -> Iterator[str]: ... + @override + def __getitem__(self, key: str, /) -> NDArray[_ScalarT_co]: ... + + # + @override + @overload + def get(self, key: str, default: None = None, /) -> NDArray[_ScalarT_co] | None: ... + @overload + def get(self, key: str, default: NDArray[_ScalarT_co] | _T, /) -> NDArray[_ScalarT_co] | _T: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # + def close(self) -> None: ... + +# NOTE: Returns a `NpzFile` if file is a zip file; +# returns an `ndarray`/`memmap` otherwise +def load( + file: StrOrBytesPath | _SupportsReadSeek[bytes], + mmap_mode: L["r+", "r", "w+", "c"] | None = None, + allow_pickle: bool = False, + fix_imports: bool = True, + encoding: L["ASCII", "latin1", "bytes"] = "ASCII", + *, + max_header_size: int = 10_000, +) -> Any: ... + +def save(file: _FNameWriteBytes, arr: ArrayLike, allow_pickle: bool = True) -> None: ... +def savez(file: _FNameWriteBytes, *args: ArrayLike, allow_pickle: bool = True, **kwds: ArrayLike) -> None: ... +def savez_compressed(file: _FNameWriteBytes, *args: ArrayLike, allow_pickle: bool = True, **kwds: ArrayLike) -> None: ... + +# File-like objects only have to implement `__iter__` and, +# optionally, `encoding` +@overload +def loadtxt( + fname: _FName, + dtype: None = None, + comments: str | Sequence[str] | None = "#", + delimiter: str | None = None, + converters: Mapping[int | str, Callable[[str], Any]] | Callable[[str], Any] | None = None, + skiprows: int = 0, + usecols: int | Sequence[int] | None = None, + unpack: bool = False, + ndmin: L[0, 1, 2] = 0, + encoding: str | None = None, + max_rows: int | None = None, + *, + quotechar: str | None = None, + like: _SupportsArrayFunc | None = None, +) -> NDArray[np.float64]: ... +@overload +def loadtxt( + fname: _FName, + dtype: _DTypeLike[_ScalarT], + comments: str | Sequence[str] | None = "#", + delimiter: str | None = None, + converters: Mapping[int | str, Callable[[str], Any]] | Callable[[str], Any] | None = None, + skiprows: int = 0, + usecols: int | Sequence[int] | None = None, + unpack: bool = False, + ndmin: L[0, 1, 2] = 0, + encoding: str | None = None, + max_rows: int | None = None, + *, + quotechar: str | None = None, + like: _SupportsArrayFunc | None = None, +) -> NDArray[_ScalarT]: ... +@overload +def loadtxt( + fname: _FName, + dtype: DTypeLike | None, + comments: str | Sequence[str] | None = "#", + delimiter: str | None = None, + converters: Mapping[int | str, Callable[[str], Any]] | Callable[[str], Any] | None = None, + skiprows: int = 0, + usecols: int | Sequence[int] | None = None, + unpack: bool = False, + ndmin: L[0, 1, 2] = 0, + encoding: str | None = None, + max_rows: int | None = None, + *, + quotechar: str | None = None, + like: _SupportsArrayFunc | None = None, +) -> NDArray[Any]: ... + +def savetxt( + fname: _FNameWrite, + X: ArrayLike, + fmt: str | Sequence[str] = "%.18e", + delimiter: str = " ", + newline: str = "\n", + header: str = "", + footer: str = "", + comments: str = "# ", + encoding: str | None = None, +) -> None: ... + +@overload +def fromregex( + file: _FNameRead, + regexp: str | bytes | Pattern[Any], + dtype: _DTypeLike[_ScalarT], + encoding: str | None = None, +) -> NDArray[_ScalarT]: ... +@overload +def fromregex( + file: _FNameRead, + regexp: str | bytes | Pattern[Any], + dtype: DTypeLike | None, + encoding: str | None = None, +) -> NDArray[Any]: ... + +@overload +def genfromtxt( + fname: _FName, + dtype: None = None, + comments: str = "#", + delimiter: str | int | Iterable[int] | None = None, + skip_header: int = 0, + skip_footer: int = 0, + converters: Mapping[int | str, Callable[[str], Any]] | None = None, + missing_values: Any = None, + filling_values: Any = None, + usecols: Sequence[int] | None = None, + names: L[True] | str | Collection[str] | None = None, + excludelist: Sequence[str] | None = None, + deletechars: str = " !#$%&'()*+,-./:;<=>?@[\\]^{|}~", + replace_space: str = "_", + autostrip: bool = False, + case_sensitive: bool | L["upper", "lower"] = True, + defaultfmt: str = "f%i", + unpack: bool | None = None, + usemask: bool = False, + loose: bool = True, + invalid_raise: bool = True, + max_rows: int | None = None, + encoding: str | None = None, + *, + ndmin: L[0, 1, 2] = 0, + like: _SupportsArrayFunc | None = None, +) -> NDArray[Any]: ... +@overload +def genfromtxt( + fname: _FName, + dtype: _DTypeLike[_ScalarT], + comments: str = "#", + delimiter: str | int | Iterable[int] | None = None, + skip_header: int = 0, + skip_footer: int = 0, + converters: Mapping[int | str, Callable[[str], Any]] | None = None, + missing_values: Any = None, + filling_values: Any = None, + usecols: Sequence[int] | None = None, + names: L[True] | str | Collection[str] | None = None, + excludelist: Sequence[str] | None = None, + deletechars: str = " !#$%&'()*+,-./:;<=>?@[\\]^{|}~", + replace_space: str = "_", + autostrip: bool = False, + case_sensitive: bool | L["upper", "lower"] = True, + defaultfmt: str = "f%i", + unpack: bool | None = None, + usemask: bool = False, + loose: bool = True, + invalid_raise: bool = True, + max_rows: int | None = None, + encoding: str | None = None, + *, + ndmin: L[0, 1, 2] = 0, + like: _SupportsArrayFunc | None = None, +) -> NDArray[_ScalarT]: ... +@overload +def genfromtxt( + fname: _FName, + dtype: DTypeLike | None, + comments: str = "#", + delimiter: str | int | Iterable[int] | None = None, + skip_header: int = 0, + skip_footer: int = 0, + converters: Mapping[int | str, Callable[[str], Any]] | None = None, + missing_values: Any = None, + filling_values: Any = None, + usecols: Sequence[int] | None = None, + names: L[True] | str | Collection[str] | None = None, + excludelist: Sequence[str] | None = None, + deletechars: str = " !#$%&'()*+,-./:;<=>?@[\\]^{|}~", + replace_space: str = "_", + autostrip: bool = False, + case_sensitive: bool | L["upper", "lower"] = True, + defaultfmt: str = "f%i", + unpack: bool | None = None, + usemask: bool = False, + loose: bool = True, + invalid_raise: bool = True, + max_rows: int | None = None, + encoding: str | None = None, + *, + ndmin: L[0, 1, 2] = 0, + like: _SupportsArrayFunc | None = None, +) -> NDArray[Any]: ... + +@overload +def recfromtxt(fname: _FName, *, usemask: L[False] = False, **kwargs: object) -> np.recarray[Any, np.dtype[np.record]]: ... +@overload +def recfromtxt(fname: _FName, *, usemask: L[True], **kwargs: object) -> MaskedRecords[Any, np.dtype[np.void]]: ... + +@overload +def recfromcsv(fname: _FName, *, usemask: L[False] = False, **kwargs: object) -> np.recarray[Any, np.dtype[np.record]]: ... +@overload +def recfromcsv(fname: _FName, *, usemask: L[True], **kwargs: object) -> MaskedRecords[Any, np.dtype[np.void]]: ... diff --git a/local_packages/numpy/lib/_polynomial_impl.py b/local_packages/numpy/lib/_polynomial_impl.py new file mode 100644 index 0000000..e9d2d5d --- /dev/null +++ b/local_packages/numpy/lib/_polynomial_impl.py @@ -0,0 +1,1465 @@ +""" +Functions to operate on polynomials. + +""" +__all__ = ['poly', 'roots', 'polyint', 'polyder', 'polyadd', + 'polysub', 'polymul', 'polydiv', 'polyval', 'poly1d', + 'polyfit'] + +import functools +import re +import warnings + +import numpy._core.numeric as NX +from numpy._core import ( + abs, + array, + atleast_1d, + dot, + finfo, + hstack, + isscalar, + ones, + overrides, +) +from numpy._utils import set_module +from numpy.exceptions import RankWarning +from numpy.lib._function_base_impl import trim_zeros +from numpy.lib._twodim_base_impl import diag, vander +from numpy.lib._type_check_impl import imag, iscomplex, mintypecode, real +from numpy.linalg import eigvals, inv, lstsq + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + + +def _poly_dispatcher(seq_of_zeros): + return seq_of_zeros + + +@array_function_dispatch(_poly_dispatcher) +def poly(seq_of_zeros): + """ + Find the coefficients of a polynomial with the given sequence of roots. + + .. note:: + This forms part of the old polynomial API. Since version 1.4, the + new polynomial API defined in `numpy.polynomial` is preferred. + A summary of the differences can be found in the + :doc:`transition guide `. + + Returns the coefficients of the polynomial whose leading coefficient + is one for the given sequence of zeros (multiple roots must be included + in the sequence as many times as their multiplicity; see Examples). + A square matrix (or array, which will be treated as a matrix) can also + be given, in which case the coefficients of the characteristic polynomial + of the matrix are returned. + + Parameters + ---------- + seq_of_zeros : array_like, shape (N,) or (N, N) + A sequence of polynomial roots, or a square array or matrix object. + + Returns + ------- + c : ndarray + 1D array of polynomial coefficients from highest to lowest degree: + + ``c[0] * x**(N) + c[1] * x**(N-1) + ... + c[N-1] * x + c[N]`` + where c[0] always equals 1. + + Raises + ------ + ValueError + If input is the wrong shape (the input must be a 1-D or square + 2-D array). + + See Also + -------- + polyval : Compute polynomial values. + roots : Return the roots of a polynomial. + polyfit : Least squares polynomial fit. + poly1d : A one-dimensional polynomial class. + + Notes + ----- + Specifying the roots of a polynomial still leaves one degree of + freedom, typically represented by an undetermined leading + coefficient. [1]_ In the case of this function, that coefficient - + the first one in the returned array - is always taken as one. (If + for some reason you have one other point, the only automatic way + presently to leverage that information is to use ``polyfit``.) + + The characteristic polynomial, :math:`p_a(t)`, of an `n`-by-`n` + matrix **A** is given by + + :math:`p_a(t) = \\mathrm{det}(t\\, \\mathbf{I} - \\mathbf{A})`, + + where **I** is the `n`-by-`n` identity matrix. [2]_ + + References + ---------- + .. [1] M. Sullivan and M. Sullivan, III, "Algebra and Trigonometry, + Enhanced With Graphing Utilities," Prentice-Hall, pg. 318, 1996. + + .. [2] G. Strang, "Linear Algebra and Its Applications, 2nd Edition," + Academic Press, pg. 182, 1980. + + Examples + -------- + + Given a sequence of a polynomial's zeros: + + >>> import numpy as np + + >>> np.poly((0, 0, 0)) # Multiple root example + array([1., 0., 0., 0.]) + + The line above represents z**3 + 0*z**2 + 0*z + 0. + + >>> np.poly((-1./2, 0, 1./2)) + array([ 1. , 0. , -0.25, 0. ]) + + The line above represents z**3 - z/4 + + >>> np.poly((np.random.random(1)[0], 0, np.random.random(1)[0])) + array([ 1. , -0.77086955, 0.08618131, 0. ]) # random + + Given a square array object: + + >>> P = np.array([[0, 1./3], [-1./2, 0]]) + >>> np.poly(P) + array([1. , 0. , 0.16666667]) + + Note how in all cases the leading coefficient is always 1. + + """ + seq_of_zeros = atleast_1d(seq_of_zeros) + sh = seq_of_zeros.shape + + if len(sh) == 2 and sh[0] == sh[1] and sh[0] != 0: + seq_of_zeros = eigvals(seq_of_zeros) + elif len(sh) == 1: + dt = seq_of_zeros.dtype + # Let object arrays slip through, e.g. for arbitrary precision + if dt != object: + seq_of_zeros = seq_of_zeros.astype(mintypecode(dt.char)) + else: + raise ValueError("input must be 1d or non-empty square 2d array.") + + if len(seq_of_zeros) == 0: + return 1.0 + dt = seq_of_zeros.dtype + a = ones((1,), dtype=dt) + for zero in seq_of_zeros: + a = NX.convolve(a, array([1, -zero], dtype=dt), mode='full') + + if issubclass(a.dtype.type, NX.complexfloating): + # if complex roots are all complex conjugates, the roots are real. + roots = NX.asarray(seq_of_zeros, complex) + if NX.all(NX.sort(roots) == NX.sort(roots.conjugate())): + a = a.real.copy() + + return a + + +def _roots_dispatcher(p): + return p + + +@array_function_dispatch(_roots_dispatcher) +def roots(p): + """ + Return the roots of a polynomial with coefficients given in p. + + .. note:: + This forms part of the old polynomial API. Since version 1.4, the + new polynomial API defined in `numpy.polynomial` is preferred. + A summary of the differences can be found in the + :doc:`transition guide `. + + The values in the rank-1 array `p` are coefficients of a polynomial. + If the length of `p` is n+1 then the polynomial is described by:: + + p[0] * x**n + p[1] * x**(n-1) + ... + p[n-1]*x + p[n] + + Parameters + ---------- + p : array_like + Rank-1 array of polynomial coefficients. + + Returns + ------- + out : ndarray + An array containing the roots of the polynomial. + + Raises + ------ + ValueError + When `p` cannot be converted to a rank-1 array. + + See also + -------- + poly : Find the coefficients of a polynomial with a given sequence + of roots. + polyval : Compute polynomial values. + polyfit : Least squares polynomial fit. + poly1d : A one-dimensional polynomial class. + + Notes + ----- + The algorithm relies on computing the eigenvalues of the + companion matrix [1]_. + + References + ---------- + .. [1] R. A. Horn & C. R. Johnson, *Matrix Analysis*. Cambridge, UK: + Cambridge University Press, 1999, pp. 146-7. + + Examples + -------- + >>> import numpy as np + >>> coeff = [3.2, 2, 1] + >>> np.roots(coeff) + array([-0.3125+0.46351241j, -0.3125-0.46351241j]) + + """ + # If input is scalar, this makes it an array + p = atleast_1d(p) + if p.ndim != 1: + raise ValueError("Input must be a rank-1 array.") + + # find non-zero array entries + non_zero = NX.nonzero(NX.ravel(p))[0] + + # Return an empty array if polynomial is all zeros + if len(non_zero) == 0: + return NX.array([]) + + # find the number of trailing zeros -- this is the number of roots at 0. + trailing_zeros = len(p) - non_zero[-1] - 1 + + # strip leading and trailing zeros + p = p[int(non_zero[0]):int(non_zero[-1]) + 1] + + # casting: if incoming array isn't floating point, make it floating point. + if not issubclass(p.dtype.type, (NX.floating, NX.complexfloating)): + p = p.astype(float) + + N = len(p) + if N > 1: + # build companion matrix and find its eigenvalues (the roots) + A = diag(NX.ones((N - 2,), p.dtype), -1) + A[0, :] = -p[1:] / p[0] + roots = eigvals(A) + else: + roots = NX.array([]) + + # tack any zeros onto the back of the array + roots = hstack((roots, NX.zeros(trailing_zeros, roots.dtype))) + return roots + + +def _polyint_dispatcher(p, m=None, k=None): + return (p,) + + +@array_function_dispatch(_polyint_dispatcher) +def polyint(p, m=1, k=None): + """ + Return an antiderivative (indefinite integral) of a polynomial. + + .. note:: + This forms part of the old polynomial API. Since version 1.4, the + new polynomial API defined in `numpy.polynomial` is preferred. + A summary of the differences can be found in the + :doc:`transition guide `. + + The returned order `m` antiderivative `P` of polynomial `p` satisfies + :math:`\\frac{d^m}{dx^m}P(x) = p(x)` and is defined up to `m - 1` + integration constants `k`. The constants determine the low-order + polynomial part + + .. math:: \\frac{k_{m-1}}{0!} x^0 + \\ldots + \\frac{k_0}{(m-1)!}x^{m-1} + + of `P` so that :math:`P^{(j)}(0) = k_{m-j-1}`. + + Parameters + ---------- + p : array_like or poly1d + Polynomial to integrate. + A sequence is interpreted as polynomial coefficients, see `poly1d`. + m : int, optional + Order of the antiderivative. (Default: 1) + k : list of `m` scalars or scalar, optional + Integration constants. They are given in the order of integration: + those corresponding to highest-order terms come first. + + If ``None`` (default), all constants are assumed to be zero. + If `m = 1`, a single scalar can be given instead of a list. + + See Also + -------- + polyder : derivative of a polynomial + poly1d.integ : equivalent method + + Examples + -------- + + The defining property of the antiderivative: + + >>> import numpy as np + + >>> p = np.poly1d([1,1,1]) + >>> P = np.polyint(p) + >>> P + poly1d([ 0.33333333, 0.5 , 1. , 0. ]) # may vary + >>> np.polyder(P) == p + True + + The integration constants default to zero, but can be specified: + + >>> P = np.polyint(p, 3) + >>> P(0) + 0.0 + >>> np.polyder(P)(0) + 0.0 + >>> np.polyder(P, 2)(0) + 0.0 + >>> P = np.polyint(p, 3, k=[6,5,3]) + >>> P + poly1d([ 0.01666667, 0.04166667, 0.16666667, 3. , 5. , 3. ]) # may vary + + Note that 3 = 6 / 2!, and that the constants are given in the order of + integrations. Constant of the highest-order polynomial term comes first: + + >>> np.polyder(P, 2)(0) + 6.0 + >>> np.polyder(P, 1)(0) + 5.0 + >>> P(0) + 3.0 + + """ + m = int(m) + if m < 0: + raise ValueError("Order of integral must be positive (see polyder)") + if k is None: + k = NX.zeros(m, float) + k = atleast_1d(k) + if len(k) == 1 and m > 1: + k = k[0] * NX.ones(m, float) + if len(k) < m: + raise ValueError( + "k must be a scalar or a rank-1 array of length 1 or >m.") + + truepoly = isinstance(p, poly1d) + p = NX.asarray(p) + if m == 0: + if truepoly: + return poly1d(p) + return p + else: + # Note: this must work also with object and integer arrays + y = NX.concatenate((p.__truediv__(NX.arange(len(p), 0, -1)), [k[0]])) + val = polyint(y, m - 1, k=k[1:]) + if truepoly: + return poly1d(val) + return val + + +def _polyder_dispatcher(p, m=None): + return (p,) + + +@array_function_dispatch(_polyder_dispatcher) +def polyder(p, m=1): + """ + Return the derivative of the specified order of a polynomial. + + .. note:: + This forms part of the old polynomial API. Since version 1.4, the + new polynomial API defined in `numpy.polynomial` is preferred. + A summary of the differences can be found in the + :doc:`transition guide `. + + Parameters + ---------- + p : poly1d or sequence + Polynomial to differentiate. + A sequence is interpreted as polynomial coefficients, see `poly1d`. + m : int, optional + Order of differentiation (default: 1) + + Returns + ------- + der : poly1d + A new polynomial representing the derivative. + + See Also + -------- + polyint : Anti-derivative of a polynomial. + poly1d : Class for one-dimensional polynomials. + + Examples + -------- + + The derivative of the polynomial :math:`x^3 + x^2 + x^1 + 1` is: + + >>> import numpy as np + + >>> p = np.poly1d([1,1,1,1]) + >>> p2 = np.polyder(p) + >>> p2 + poly1d([3, 2, 1]) + + which evaluates to: + + >>> p2(2.) + 17.0 + + We can verify this, approximating the derivative with + ``(f(x + h) - f(x))/h``: + + >>> (p(2. + 0.001) - p(2.)) / 0.001 + 17.007000999997857 + + The fourth-order derivative of a 3rd-order polynomial is zero: + + >>> np.polyder(p, 2) + poly1d([6, 2]) + >>> np.polyder(p, 3) + poly1d([6]) + >>> np.polyder(p, 4) + poly1d([0]) + + """ + m = int(m) + if m < 0: + raise ValueError("Order of derivative must be positive (see polyint)") + + truepoly = isinstance(p, poly1d) + p = NX.asarray(p) + n = len(p) - 1 + y = p[:-1] * NX.arange(n, 0, -1) + if m == 0: + val = p + else: + val = polyder(y, m - 1) + if truepoly: + val = poly1d(val) + return val + + +def _polyfit_dispatcher(x, y, deg, rcond=None, full=None, w=None, cov=None): + return (x, y, w) + + +@array_function_dispatch(_polyfit_dispatcher) +def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False): + """ + Least squares polynomial fit. + + .. note:: + This forms part of the old polynomial API. Since version 1.4, the + new polynomial API defined in `numpy.polynomial` is preferred. + A summary of the differences can be found in the + :doc:`transition guide `. + + Fit a polynomial ``p[0] * x**deg + ... + p[deg]`` of degree `deg` + to points `(x, y)`. Returns a vector of coefficients `p` that minimises + the squared error in the order `deg`, `deg-1`, ... `0`. + + The `Polynomial.fit ` class + method is recommended for new code as it is more stable numerically. See + the documentation of the method for more information. + + Parameters + ---------- + x : array_like, shape (M,) + x-coordinates of the M sample points ``(x[i], y[i])``. + y : array_like, shape (M,) or (M, K) + y-coordinates of the sample points. Several data sets of sample + points sharing the same x-coordinates can be fitted at once by + passing in a 2D-array that contains one dataset per column. + deg : int + Degree of the fitting polynomial + rcond : float, optional + Relative condition number of the fit. Singular values smaller than + this relative to the largest singular value will be ignored. The + default value is len(x)*eps, where eps is the relative precision of + the float type, about 2e-16 in most cases. + full : bool, optional + Switch determining nature of return value. When it is False (the + default) just the coefficients are returned, when True diagnostic + information from the singular value decomposition is also returned. + w : array_like, shape (M,), optional + Weights. If not None, the weight ``w[i]`` applies to the unsquared + residual ``y[i] - y_hat[i]`` at ``x[i]``. Ideally the weights are + chosen so that the errors of the products ``w[i]*y[i]`` all have the + same variance. When using inverse-variance weighting, use + ``w[i] = 1/sigma(y[i])``. The default value is None. + cov : bool or str, optional + If given and not `False`, return not just the estimate but also its + covariance matrix. By default, the covariance are scaled by + chi2/dof, where dof = M - (deg + 1), i.e., the weights are presumed + to be unreliable except in a relative sense and everything is scaled + such that the reduced chi2 is unity. This scaling is omitted if + ``cov='unscaled'``, as is relevant for the case that the weights are + w = 1/sigma, with sigma known to be a reliable estimate of the + uncertainty. + + Returns + ------- + p : ndarray, shape (deg + 1,) or (deg + 1, K) + Polynomial coefficients, highest power first. If `y` was 2-D, the + coefficients for `k`-th data set are in ``p[:,k]``. + + residuals, rank, singular_values, rcond + These values are only returned if ``full == True`` + + - residuals -- sum of squared residuals of the least squares fit + - rank -- the effective rank of the scaled Vandermonde + coefficient matrix + - singular_values -- singular values of the scaled Vandermonde + coefficient matrix + - rcond -- value of `rcond`. + + For more details, see `numpy.linalg.lstsq`. + + V : ndarray, shape (deg + 1, deg + 1) or (deg + 1, deg + 1, K) + Present only if ``full == False`` and ``cov == True``. The covariance + matrix of the polynomial coefficient estimates. The diagonal of + this matrix are the variance estimates for each coefficient. If y + is a 2-D array, then the covariance matrix for the `k`-th data set + are in ``V[:,:,k]`` + + + Warns + ----- + RankWarning + The rank of the coefficient matrix in the least-squares fit is + deficient. The warning is only raised if ``full == False``. + + The warnings can be turned off by + + >>> import warnings + >>> warnings.simplefilter('ignore', np.exceptions.RankWarning) + + See Also + -------- + polyval : Compute polynomial values. + linalg.lstsq : Computes a least-squares fit. + scipy.interpolate.UnivariateSpline : Computes spline fits. + + Notes + ----- + The solution minimizes the squared error + + .. math:: + E = \\sum_{j=0}^k |p(x_j) - y_j|^2 + + in the equations:: + + x[0]**n * p[0] + ... + x[0] * p[n-1] + p[n] = y[0] + x[1]**n * p[0] + ... + x[1] * p[n-1] + p[n] = y[1] + ... + x[k]**n * p[0] + ... + x[k] * p[n-1] + p[n] = y[k] + + The coefficient matrix of the coefficients `p` is a Vandermonde matrix. + + `polyfit` issues a `~exceptions.RankWarning` when the least-squares fit is + badly conditioned. This implies that the best fit is not well-defined due + to numerical error. The results may be improved by lowering the polynomial + degree or by replacing `x` by `x` - `x`.mean(). The `rcond` parameter + can also be set to a value smaller than its default, but the resulting + fit may be spurious: including contributions from the small singular + values can add numerical noise to the result. + + Note that fitting polynomial coefficients is inherently badly conditioned + when the degree of the polynomial is large or the interval of sample points + is badly centered. The quality of the fit should always be checked in these + cases. When polynomial fits are not satisfactory, splines may be a good + alternative. + + References + ---------- + .. [1] Wikipedia, "Curve fitting", + https://en.wikipedia.org/wiki/Curve_fitting + .. [2] Wikipedia, "Polynomial interpolation", + https://en.wikipedia.org/wiki/Polynomial_interpolation + + Examples + -------- + >>> import numpy as np + >>> import warnings + >>> x = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0]) + >>> y = np.array([0.0, 0.8, 0.9, 0.1, -0.8, -1.0]) + >>> z = np.polyfit(x, y, 3) + >>> z + array([ 0.08703704, -0.81349206, 1.69312169, -0.03968254]) # may vary + + It is convenient to use `poly1d` objects for dealing with polynomials: + + >>> p = np.poly1d(z) + >>> p(0.5) + 0.6143849206349179 # may vary + >>> p(3.5) + -0.34732142857143039 # may vary + >>> p(10) + 22.579365079365115 # may vary + + High-order polynomials may oscillate wildly: + + >>> with warnings.catch_warnings(): + ... warnings.simplefilter('ignore', np.exceptions.RankWarning) + ... p30 = np.poly1d(np.polyfit(x, y, 30)) + ... + >>> p30(4) + -0.80000000000000204 # may vary + >>> p30(5) + -0.99999999999999445 # may vary + >>> p30(4.5) + -0.10547061179440398 # may vary + + Illustration: + + >>> import matplotlib.pyplot as plt + >>> xp = np.linspace(-2, 6, 100) + >>> _ = plt.plot(x, y, '.', xp, p(xp), '-', xp, p30(xp), '--') + >>> plt.ylim(-2,2) + (-2, 2) + >>> plt.show() + + """ + order = int(deg) + 1 + x = NX.asarray(x) + 0.0 + y = NX.asarray(y) + 0.0 + + # check arguments. + if deg < 0: + raise ValueError("expected deg >= 0") + if x.ndim != 1: + raise TypeError("expected 1D vector for x") + if x.size == 0: + raise TypeError("expected non-empty vector for x") + if y.ndim < 1 or y.ndim > 2: + raise TypeError("expected 1D or 2D array for y") + if x.shape[0] != y.shape[0]: + raise TypeError("expected x and y to have same length") + + # set rcond + if rcond is None: + rcond = len(x) * finfo(x.dtype).eps + + # set up least squares equation for powers of x + lhs = vander(x, order) + rhs = y + + # apply weighting + if w is not None: + w = NX.asarray(w) + 0.0 + if w.ndim != 1: + raise TypeError("expected a 1-d array for weights") + if w.shape[0] != y.shape[0]: + raise TypeError("expected w and y to have the same length") + lhs *= w[:, NX.newaxis] + if rhs.ndim == 2: + rhs *= w[:, NX.newaxis] + else: + rhs *= w + + # scale lhs to improve condition number and solve + scale = NX.sqrt((lhs * lhs).sum(axis=0)) + lhs /= scale + c, resids, rank, s = lstsq(lhs, rhs, rcond) + c = (c.T / scale).T # broadcast scale coefficients + + # warn on rank reduction, which indicates an ill conditioned matrix + if rank != order and not full: + msg = "Polyfit may be poorly conditioned" + warnings.warn(msg, RankWarning, stacklevel=2) + + if full: + return c, resids, rank, s, rcond + elif cov: + Vbase = inv(dot(lhs.T, lhs)) + Vbase /= NX.outer(scale, scale) + if cov == "unscaled": + fac = 1 + else: + if len(x) <= order: + raise ValueError("the number of data points must exceed order " + "to scale the covariance matrix") + # note, this used to be: fac = resids / (len(x) - order - 2.0) + # it was decided that the "- 2" (originally justified by "Bayesian + # uncertainty analysis") is not what the user expects + # (see gh-11196 and gh-11197) + fac = resids / (len(x) - order) + if y.ndim == 1: + return c, Vbase * fac + else: + return c, Vbase[:, :, NX.newaxis] * fac + else: + return c + + +def _polyval_dispatcher(p, x): + return (p, x) + + +@array_function_dispatch(_polyval_dispatcher) +def polyval(p, x): + """ + Evaluate a polynomial at specific values. + + .. note:: + This forms part of the old polynomial API. Since version 1.4, the + new polynomial API defined in `numpy.polynomial` is preferred. + A summary of the differences can be found in the + :doc:`transition guide `. + + If `p` is of length N, this function returns the value:: + + p[0]*x**(N-1) + p[1]*x**(N-2) + ... + p[N-2]*x + p[N-1] + + If `x` is a sequence, then ``p(x)`` is returned for each element of ``x``. + If `x` is another polynomial then the composite polynomial ``p(x(t))`` + is returned. + + Parameters + ---------- + p : array_like or poly1d object + 1D array of polynomial coefficients (including coefficients equal + to zero) from highest degree to the constant term, or an + instance of poly1d. + x : array_like or poly1d object + A number, an array of numbers, or an instance of poly1d, at + which to evaluate `p`. + + Returns + ------- + values : ndarray or poly1d + If `x` is a poly1d instance, the result is the composition of the two + polynomials, i.e., `x` is "substituted" in `p` and the simplified + result is returned. In addition, the type of `x` - array_like or + poly1d - governs the type of the output: `x` array_like => `values` + array_like, `x` a poly1d object => `values` is also. + + See Also + -------- + poly1d: A polynomial class. + + Notes + ----- + Horner's scheme [1]_ is used to evaluate the polynomial. Even so, + for polynomials of high degree the values may be inaccurate due to + rounding errors. Use carefully. + + If `x` is a subtype of `ndarray` the return value will be of the same type. + + References + ---------- + .. [1] I. N. Bronshtein, K. A. Semendyayev, and K. A. Hirsch (Eng. + trans. Ed.), *Handbook of Mathematics*, New York, Van Nostrand + Reinhold Co., 1985, pg. 720. + + Examples + -------- + >>> import numpy as np + >>> np.polyval([3,0,1], 5) # 3 * 5**2 + 0 * 5**1 + 1 + 76 + >>> np.polyval([3,0,1], np.poly1d(5)) + poly1d([76]) + >>> np.polyval(np.poly1d([3,0,1]), 5) + 76 + >>> np.polyval(np.poly1d([3,0,1]), np.poly1d(5)) + poly1d([76]) + + """ + p = NX.asarray(p) + if isinstance(x, poly1d): + y = 0 + else: + x = NX.asanyarray(x) + y = NX.zeros_like(x) + for pv in p: + y = y * x + pv + return y + + +def _binary_op_dispatcher(a1, a2): + return (a1, a2) + + +@array_function_dispatch(_binary_op_dispatcher) +def polyadd(a1, a2): + """ + Find the sum of two polynomials. + + .. note:: + This forms part of the old polynomial API. Since version 1.4, the + new polynomial API defined in `numpy.polynomial` is preferred. + A summary of the differences can be found in the + :doc:`transition guide `. + + Returns the polynomial resulting from the sum of two input polynomials. + Each input must be either a poly1d object or a 1D sequence of polynomial + coefficients, from highest to lowest degree. + + Parameters + ---------- + a1, a2 : array_like or poly1d object + Input polynomials. + + Returns + ------- + out : ndarray or poly1d object + The sum of the inputs. If either input is a poly1d object, then the + output is also a poly1d object. Otherwise, it is a 1D array of + polynomial coefficients from highest to lowest degree. + + See Also + -------- + poly1d : A one-dimensional polynomial class. + poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval + + Examples + -------- + >>> import numpy as np + >>> np.polyadd([1, 2], [9, 5, 4]) + array([9, 6, 6]) + + Using poly1d objects: + + >>> p1 = np.poly1d([1, 2]) + >>> p2 = np.poly1d([9, 5, 4]) + >>> print(p1) + 1 x + 2 + >>> print(p2) + 2 + 9 x + 5 x + 4 + >>> print(np.polyadd(p1, p2)) + 2 + 9 x + 6 x + 6 + + """ + truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d)) + a1 = atleast_1d(a1) + a2 = atleast_1d(a2) + diff = len(a2) - len(a1) + if diff == 0: + val = a1 + a2 + elif diff > 0: + zr = NX.zeros(diff, a1.dtype) + val = NX.concatenate((zr, a1)) + a2 + else: + zr = NX.zeros(abs(diff), a2.dtype) + val = a1 + NX.concatenate((zr, a2)) + if truepoly: + val = poly1d(val) + return val + + +@array_function_dispatch(_binary_op_dispatcher) +def polysub(a1, a2): + """ + Difference (subtraction) of two polynomials. + + .. note:: + This forms part of the old polynomial API. Since version 1.4, the + new polynomial API defined in `numpy.polynomial` is preferred. + A summary of the differences can be found in the + :doc:`transition guide `. + + Given two polynomials `a1` and `a2`, returns ``a1 - a2``. + `a1` and `a2` can be either array_like sequences of the polynomials' + coefficients (including coefficients equal to zero), or `poly1d` objects. + + Parameters + ---------- + a1, a2 : array_like or poly1d + Minuend and subtrahend polynomials, respectively. + + Returns + ------- + out : ndarray or poly1d + Array or `poly1d` object of the difference polynomial's coefficients. + + See Also + -------- + polyval, polydiv, polymul, polyadd + + Examples + -------- + + .. math:: (2 x^2 + 10 x - 2) - (3 x^2 + 10 x -4) = (-x^2 + 2) + + >>> import numpy as np + + >>> np.polysub([2, 10, -2], [3, 10, -4]) + array([-1, 0, 2]) + + """ + truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d)) + a1 = atleast_1d(a1) + a2 = atleast_1d(a2) + diff = len(a2) - len(a1) + if diff == 0: + val = a1 - a2 + elif diff > 0: + zr = NX.zeros(diff, a1.dtype) + val = NX.concatenate((zr, a1)) - a2 + else: + zr = NX.zeros(abs(diff), a2.dtype) + val = a1 - NX.concatenate((zr, a2)) + if truepoly: + val = poly1d(val) + return val + + +@array_function_dispatch(_binary_op_dispatcher) +def polymul(a1, a2): + """ + Find the product of two polynomials. + + .. note:: + This forms part of the old polynomial API. Since version 1.4, the + new polynomial API defined in `numpy.polynomial` is preferred. + A summary of the differences can be found in the + :doc:`transition guide `. + + Finds the polynomial resulting from the multiplication of the two input + polynomials. Each input must be either a poly1d object or a 1D sequence + of polynomial coefficients, from highest to lowest degree. + + Parameters + ---------- + a1, a2 : array_like or poly1d object + Input polynomials. + + Returns + ------- + out : ndarray or poly1d object + The polynomial resulting from the multiplication of the inputs. If + either inputs is a poly1d object, then the output is also a poly1d + object. Otherwise, it is a 1D array of polynomial coefficients from + highest to lowest degree. + + See Also + -------- + poly1d : A one-dimensional polynomial class. + poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval + convolve : Array convolution. Same output as polymul, but has parameter + for overlap mode. + + Examples + -------- + >>> import numpy as np + >>> np.polymul([1, 2, 3], [9, 5, 1]) + array([ 9, 23, 38, 17, 3]) + + Using poly1d objects: + + >>> p1 = np.poly1d([1, 2, 3]) + >>> p2 = np.poly1d([9, 5, 1]) + >>> print(p1) + 2 + 1 x + 2 x + 3 + >>> print(p2) + 2 + 9 x + 5 x + 1 + >>> print(np.polymul(p1, p2)) + 4 3 2 + 9 x + 23 x + 38 x + 17 x + 3 + + """ + truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d)) + a1, a2 = poly1d(a1), poly1d(a2) + val = NX.convolve(a1, a2) + if truepoly: + val = poly1d(val) + return val + + +def _polydiv_dispatcher(u, v): + return (u, v) + + +@array_function_dispatch(_polydiv_dispatcher) +def polydiv(u, v): + """ + Returns the quotient and remainder of polynomial division. + + .. note:: + This forms part of the old polynomial API. Since version 1.4, the + new polynomial API defined in `numpy.polynomial` is preferred. + A summary of the differences can be found in the + :doc:`transition guide `. + + The input arrays are the coefficients (including any coefficients + equal to zero) of the "numerator" (dividend) and "denominator" + (divisor) polynomials, respectively. + + Parameters + ---------- + u : array_like or poly1d + Dividend polynomial's coefficients. + + v : array_like or poly1d + Divisor polynomial's coefficients. + + Returns + ------- + q : ndarray + Coefficients, including those equal to zero, of the quotient. + r : ndarray + Coefficients, including those equal to zero, of the remainder. + + See Also + -------- + poly, polyadd, polyder, polydiv, polyfit, polyint, polymul, polysub + polyval + + Notes + ----- + Both `u` and `v` must be 0-d or 1-d (ndim = 0 or 1), but `u.ndim` need + not equal `v.ndim`. In other words, all four possible combinations - + ``u.ndim = v.ndim = 0``, ``u.ndim = v.ndim = 1``, + ``u.ndim = 1, v.ndim = 0``, and ``u.ndim = 0, v.ndim = 1`` - work. + + Examples + -------- + + .. math:: \\frac{3x^2 + 5x + 2}{2x + 1} = 1.5x + 1.75, remainder 0.25 + + >>> import numpy as np + + >>> x = np.array([3.0, 5.0, 2.0]) + >>> y = np.array([2.0, 1.0]) + >>> np.polydiv(x, y) + (array([1.5 , 1.75]), array([0.25])) + + """ + truepoly = (isinstance(u, poly1d) or isinstance(v, poly1d)) + u = atleast_1d(u) + 0.0 + v = atleast_1d(v) + 0.0 + # w has the common type + w = u[0] + v[0] + m = len(u) - 1 + n = len(v) - 1 + scale = 1. / v[0] + q = NX.zeros((max(m - n + 1, 1),), w.dtype) + r = u.astype(w.dtype) + for k in range(m - n + 1): + d = scale * r[k] + q[k] = d + r[k:k + n + 1] -= d * v + while NX.allclose(r[0], 0, rtol=1e-14) and (r.shape[-1] > 1): + r = r[1:] + if truepoly: + return poly1d(q), poly1d(r) + return q, r + + +_poly_mat = re.compile(r"\*\*([0-9]*)") +def _raise_power(astr, wrap=70): + n = 0 + line1 = '' + line2 = '' + output = ' ' + while True: + mat = _poly_mat.search(astr, n) + if mat is None: + break + span = mat.span() + power = mat.groups()[0] + partstr = astr[n:span[0]] + n = span[1] + toadd2 = partstr + ' ' * (len(power) - 1) + toadd1 = ' ' * (len(partstr) - 1) + power + if ((len(line2) + len(toadd2) > wrap) or + (len(line1) + len(toadd1) > wrap)): + output += line1 + "\n" + line2 + "\n " + line1 = toadd1 + line2 = toadd2 + else: + line2 += partstr + ' ' * (len(power) - 1) + line1 += ' ' * (len(partstr) - 1) + power + output += line1 + "\n" + line2 + return output + astr[n:] + + +@set_module('numpy') +class poly1d: + """ + A one-dimensional polynomial class. + + .. note:: + This forms part of the old polynomial API. Since version 1.4, the + new polynomial API defined in `numpy.polynomial` is preferred. + A summary of the differences can be found in the + :doc:`transition guide `. + + A convenience class, used to encapsulate "natural" operations on + polynomials so that said operations may take on their customary + form in code (see Examples). + + Parameters + ---------- + c_or_r : array_like + The polynomial's coefficients, in decreasing powers, or if + the value of the second parameter is True, the polynomial's + roots (values where the polynomial evaluates to 0). For example, + ``poly1d([1, 2, 3])`` returns an object that represents + :math:`x^2 + 2x + 3`, whereas ``poly1d([1, 2, 3], True)`` returns + one that represents :math:`(x-1)(x-2)(x-3) = x^3 - 6x^2 + 11x -6`. + r : bool, optional + If True, `c_or_r` specifies the polynomial's roots; the default + is False. + variable : str, optional + Changes the variable used when printing `p` from `x` to `variable` + (see Examples). + + Examples + -------- + >>> import numpy as np + + Construct the polynomial :math:`x^2 + 2x + 3`: + + >>> import numpy as np + + >>> p = np.poly1d([1, 2, 3]) + >>> print(np.poly1d(p)) + 2 + 1 x + 2 x + 3 + + Evaluate the polynomial at :math:`x = 0.5`: + + >>> p(0.5) + 4.25 + + Find the roots: + + >>> p.r + array([-1.+1.41421356j, -1.-1.41421356j]) + >>> p(p.r) + array([ -4.44089210e-16+0.j, -4.44089210e-16+0.j]) # may vary + + These numbers in the previous line represent (0, 0) to machine precision + + Show the coefficients: + + >>> p.c + array([1, 2, 3]) + + Display the order (the leading zero-coefficients are removed): + + >>> p.order + 2 + + Show the coefficient of the k-th power in the polynomial + (which is equivalent to ``p.c[-(i+1)]``): + + >>> p[1] + 2 + + Polynomials can be added, subtracted, multiplied, and divided + (returns quotient and remainder): + + >>> p * p + poly1d([ 1, 4, 10, 12, 9]) + + >>> (p**3 + 4) / p + (poly1d([ 1., 4., 10., 12., 9.]), poly1d([4.])) + + ``asarray(p)`` gives the coefficient array, so polynomials can be + used in all functions that accept arrays: + + >>> p**2 # square of polynomial + poly1d([ 1, 4, 10, 12, 9]) + + >>> np.square(p) # square of individual coefficients + array([1, 4, 9]) + + The variable used in the string representation of `p` can be modified, + using the `variable` parameter: + + >>> p = np.poly1d([1,2,3], variable='z') + >>> print(p) + 2 + 1 z + 2 z + 3 + + Construct a polynomial from its roots: + + >>> np.poly1d([1, 2], True) + poly1d([ 1., -3., 2.]) + + This is the same polynomial as obtained by: + + >>> np.poly1d([1, -1]) * np.poly1d([1, -2]) + poly1d([ 1, -3, 2]) + + """ + __hash__ = None + + @property + def coeffs(self): + """ The polynomial coefficients """ + return self._coeffs + + @coeffs.setter + def coeffs(self, value): + # allowing this makes p.coeffs *= 2 legal + if value is not self._coeffs: + raise AttributeError("Cannot set attribute") + + @property + def variable(self): + """ The name of the polynomial variable """ + return self._variable + + # calculated attributes + @property + def order(self): + """ The order or degree of the polynomial """ + return len(self._coeffs) - 1 + + @property + def roots(self): + """ The roots of the polynomial, where self(x) == 0 """ + return roots(self._coeffs) + + # our internal _coeffs property need to be backed by __dict__['coeffs'] for + # scipy to work correctly. + @property + def _coeffs(self): + return self.__dict__['coeffs'] + + @_coeffs.setter + def _coeffs(self, coeffs): + self.__dict__['coeffs'] = coeffs + + # alias attributes + r = roots + c = coef = coefficients = coeffs + o = order + + def __init__(self, c_or_r, r=False, variable=None): + if isinstance(c_or_r, poly1d): + self._variable = c_or_r._variable + self._coeffs = c_or_r._coeffs + + if set(c_or_r.__dict__) - set(self.__dict__): + msg = ("In the future extra properties will not be copied " + "across when constructing one poly1d from another") + warnings.warn(msg, FutureWarning, stacklevel=2) + self.__dict__.update(c_or_r.__dict__) + + if variable is not None: + self._variable = variable + return + if r: + c_or_r = poly(c_or_r) + c_or_r = atleast_1d(c_or_r) + if c_or_r.ndim > 1: + raise ValueError("Polynomial must be 1d only.") + c_or_r = trim_zeros(c_or_r, trim='f') + if len(c_or_r) == 0: + c_or_r = NX.array([0], dtype=c_or_r.dtype) + self._coeffs = c_or_r + if variable is None: + variable = 'x' + self._variable = variable + + def __array__(self, t=None, copy=None): + if t: + return NX.asarray(self.coeffs, t, copy=copy) + else: + return NX.asarray(self.coeffs, copy=copy) + + def __repr__(self): + vals = repr(self.coeffs) + vals = vals[6:-1] + return f"poly1d({vals})" + + def __len__(self): + return self.order + + def __str__(self): + thestr = "0" + var = self.variable + + # Remove leading zeros + coeffs = self.coeffs[NX.logical_or.accumulate(self.coeffs != 0)] + N = len(coeffs) - 1 + + def fmt_float(q): + s = f'{q:.4g}' + s = s.removesuffix('.0000') + return s + + for k, coeff in enumerate(coeffs): + if not iscomplex(coeff): + coefstr = fmt_float(real(coeff)) + elif real(coeff) == 0: + coefstr = f'{fmt_float(imag(coeff))}j' + else: + coefstr = f'({fmt_float(real(coeff))} + {fmt_float(imag(coeff))}j)' + + power = (N - k) + if power == 0: + if coefstr != '0': + newstr = f'{coefstr}' + elif k == 0: + newstr = '0' + else: + newstr = '' + elif power == 1: + if coefstr == '0': + newstr = '' + elif coefstr == 'b': + newstr = var + else: + newstr = f'{coefstr} {var}' + elif coefstr == '0': + newstr = '' + elif coefstr == 'b': + newstr = '%s**%d' % (var, power,) + else: + newstr = '%s %s**%d' % (coefstr, var, power) + + if k > 0: + if newstr != '': + if newstr.startswith('-'): + thestr = f"{thestr} - {newstr[1:]}" + else: + thestr = f"{thestr} + {newstr}" + else: + thestr = newstr + return _raise_power(thestr) + + def __call__(self, val): + return polyval(self.coeffs, val) + + def __neg__(self): + return poly1d(-self.coeffs) + + def __pos__(self): + return self + + def __mul__(self, other): + if isscalar(other): + return poly1d(self.coeffs * other) + else: + other = poly1d(other) + return poly1d(polymul(self.coeffs, other.coeffs)) + + def __rmul__(self, other): + if isscalar(other): + return poly1d(other * self.coeffs) + else: + other = poly1d(other) + return poly1d(polymul(self.coeffs, other.coeffs)) + + def __add__(self, other): + other = poly1d(other) + return poly1d(polyadd(self.coeffs, other.coeffs)) + + def __radd__(self, other): + other = poly1d(other) + return poly1d(polyadd(self.coeffs, other.coeffs)) + + def __pow__(self, val): + if not isscalar(val) or int(val) != val or val < 0: + raise ValueError("Power to non-negative integers only.") + res = [1] + for _ in range(val): + res = polymul(self.coeffs, res) + return poly1d(res) + + def __sub__(self, other): + other = poly1d(other) + return poly1d(polysub(self.coeffs, other.coeffs)) + + def __rsub__(self, other): + other = poly1d(other) + return poly1d(polysub(other.coeffs, self.coeffs)) + + def __truediv__(self, other): + if isscalar(other): + return poly1d(self.coeffs / other) + else: + other = poly1d(other) + return polydiv(self, other) + + def __rtruediv__(self, other): + if isscalar(other): + return poly1d(other / self.coeffs) + else: + other = poly1d(other) + return polydiv(other, self) + + def __eq__(self, other): + if not isinstance(other, poly1d): + return NotImplemented + if self.coeffs.shape != other.coeffs.shape: + return False + return (self.coeffs == other.coeffs).all() + + def __ne__(self, other): + if not isinstance(other, poly1d): + return NotImplemented + return not self.__eq__(other) + + def __getitem__(self, val): + ind = self.order - val + if val > self.order: + return self.coeffs.dtype.type(0) + if val < 0: + return self.coeffs.dtype.type(0) + return self.coeffs[ind] + + def __setitem__(self, key, val): + ind = self.order - key + if key < 0: + raise ValueError("Does not support negative powers.") + if key > self.order: + zr = NX.zeros(key - self.order, self.coeffs.dtype) + self._coeffs = NX.concatenate((zr, self.coeffs)) + ind = 0 + self._coeffs[ind] = val + + def __iter__(self): + return iter(self.coeffs) + + def integ(self, m=1, k=0): + """ + Return an antiderivative (indefinite integral) of this polynomial. + + Refer to `polyint` for full documentation. + + See Also + -------- + polyint : equivalent function + + """ + return poly1d(polyint(self.coeffs, m=m, k=k)) + + def deriv(self, m=1): + """ + Return a derivative of this polynomial. + + Refer to `polyder` for full documentation. + + See Also + -------- + polyder : equivalent function + + """ + return poly1d(polyder(self.coeffs, m=m)) + +# Stuff to do on module import + + +warnings.simplefilter('always', RankWarning) diff --git a/local_packages/numpy/lib/_polynomial_impl.pyi b/local_packages/numpy/lib/_polynomial_impl.pyi new file mode 100644 index 0000000..9c02a7f --- /dev/null +++ b/local_packages/numpy/lib/_polynomial_impl.pyi @@ -0,0 +1,338 @@ +from typing import ( + Any, + Literal as L, + NoReturn, + SupportsIndex, + SupportsInt, + TypeAlias, + TypeVar, + overload, +) + +import numpy as np +from numpy import ( + complex128, + complexfloating, + float64, + floating, + int32, + int64, + object_, + poly1d, + signedinteger, + unsignedinteger, +) +from numpy._typing import ( + ArrayLike, + NDArray, + _ArrayLikeBool_co, + _ArrayLikeComplex_co, + _ArrayLikeFloat_co, + _ArrayLikeInt_co, + _ArrayLikeObject_co, + _ArrayLikeUInt_co, +) + +_T = TypeVar("_T") + +_2Tup: TypeAlias = tuple[_T, _T] +_5Tup: TypeAlias = tuple[ + _T, + NDArray[float64], + NDArray[int32], + NDArray[float64], + NDArray[float64], +] + +__all__ = [ + "poly", + "roots", + "polyint", + "polyder", + "polyadd", + "polysub", + "polymul", + "polydiv", + "polyval", + "poly1d", + "polyfit", +] + +def poly(seq_of_zeros: ArrayLike) -> NDArray[floating]: ... + +# Returns either a float or complex array depending on the input values. +# See `np.linalg.eigvals`. +def roots(p: ArrayLike) -> NDArray[complexfloating] | NDArray[floating]: ... + +@overload +def polyint( + p: poly1d, + m: SupportsInt | SupportsIndex = 1, + k: _ArrayLikeComplex_co | _ArrayLikeObject_co | None = None, +) -> poly1d: ... +@overload +def polyint( + p: _ArrayLikeFloat_co, + m: SupportsInt | SupportsIndex = 1, + k: _ArrayLikeFloat_co | None = None, +) -> NDArray[floating]: ... +@overload +def polyint( + p: _ArrayLikeComplex_co, + m: SupportsInt | SupportsIndex = 1, + k: _ArrayLikeComplex_co | None = None, +) -> NDArray[complexfloating]: ... +@overload +def polyint( + p: _ArrayLikeObject_co, + m: SupportsInt | SupportsIndex = 1, + k: _ArrayLikeObject_co | None = None, +) -> NDArray[object_]: ... + +@overload +def polyder( + p: poly1d, + m: SupportsInt | SupportsIndex = 1, +) -> poly1d: ... +@overload +def polyder( + p: _ArrayLikeFloat_co, + m: SupportsInt | SupportsIndex = 1, +) -> NDArray[floating]: ... +@overload +def polyder( + p: _ArrayLikeComplex_co, + m: SupportsInt | SupportsIndex = 1, +) -> NDArray[complexfloating]: ... +@overload +def polyder( + p: _ArrayLikeObject_co, + m: SupportsInt | SupportsIndex = 1, +) -> NDArray[object_]: ... + +@overload +def polyfit( + x: _ArrayLikeFloat_co, + y: _ArrayLikeFloat_co, + deg: SupportsIndex | SupportsInt, + rcond: float | None = None, + full: L[False] = False, + w: _ArrayLikeFloat_co | None = None, + cov: L[False] = False, +) -> NDArray[float64]: ... +@overload +def polyfit( + x: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co, + deg: SupportsIndex | SupportsInt, + rcond: float | None = None, + full: L[False] = False, + w: _ArrayLikeFloat_co | None = None, + cov: L[False] = False, +) -> NDArray[complex128]: ... +@overload +def polyfit( + x: _ArrayLikeFloat_co, + y: _ArrayLikeFloat_co, + deg: SupportsIndex | SupportsInt, + rcond: float | None = None, + full: L[False] = False, + w: _ArrayLikeFloat_co | None = None, + *, + cov: L[True, "unscaled"], +) -> _2Tup[NDArray[float64]]: ... +@overload +def polyfit( + x: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co, + deg: SupportsIndex | SupportsInt, + rcond: float | None = None, + full: L[False] = False, + w: _ArrayLikeFloat_co | None = None, + *, + cov: L[True, "unscaled"], +) -> _2Tup[NDArray[complex128]]: ... +@overload +def polyfit( + x: _ArrayLikeFloat_co, + y: _ArrayLikeFloat_co, + deg: SupportsIndex | SupportsInt, + rcond: float | None, + full: L[True], + w: _ArrayLikeFloat_co | None = None, + cov: bool | L["unscaled"] = False, +) -> _5Tup[NDArray[float64]]: ... +@overload +def polyfit( + x: _ArrayLikeFloat_co, + y: _ArrayLikeFloat_co, + deg: SupportsIndex | SupportsInt, + rcond: float | None = None, + *, + full: L[True], + w: _ArrayLikeFloat_co | None = None, + cov: bool | L["unscaled"] = False, +) -> _5Tup[NDArray[float64]]: ... +@overload +def polyfit( + x: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co, + deg: SupportsIndex | SupportsInt, + rcond: float | None, + full: L[True], + w: _ArrayLikeFloat_co | None = None, + cov: bool | L["unscaled"] = False, +) -> _5Tup[NDArray[complex128]]: ... +@overload +def polyfit( + x: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co, + deg: SupportsIndex | SupportsInt, + rcond: float | None = None, + *, + full: L[True], + w: _ArrayLikeFloat_co | None = None, + cov: bool | L["unscaled"] = False, +) -> _5Tup[NDArray[complex128]]: ... + +@overload +def polyval( + p: _ArrayLikeBool_co, + x: _ArrayLikeBool_co, +) -> NDArray[int64]: ... +@overload +def polyval( + p: _ArrayLikeUInt_co, + x: _ArrayLikeUInt_co, +) -> NDArray[unsignedinteger]: ... +@overload +def polyval( + p: _ArrayLikeInt_co, + x: _ArrayLikeInt_co, +) -> NDArray[signedinteger]: ... +@overload +def polyval( + p: _ArrayLikeFloat_co, + x: _ArrayLikeFloat_co, +) -> NDArray[floating]: ... +@overload +def polyval( + p: _ArrayLikeComplex_co, + x: _ArrayLikeComplex_co, +) -> NDArray[complexfloating]: ... +@overload +def polyval( + p: _ArrayLikeObject_co, + x: _ArrayLikeObject_co, +) -> NDArray[object_]: ... + +@overload +def polyadd( + a1: poly1d, + a2: _ArrayLikeComplex_co | _ArrayLikeObject_co, +) -> poly1d: ... +@overload +def polyadd( + a1: _ArrayLikeComplex_co | _ArrayLikeObject_co, + a2: poly1d, +) -> poly1d: ... +@overload +def polyadd( + a1: _ArrayLikeBool_co, + a2: _ArrayLikeBool_co, +) -> NDArray[np.bool]: ... +@overload +def polyadd( + a1: _ArrayLikeUInt_co, + a2: _ArrayLikeUInt_co, +) -> NDArray[unsignedinteger]: ... +@overload +def polyadd( + a1: _ArrayLikeInt_co, + a2: _ArrayLikeInt_co, +) -> NDArray[signedinteger]: ... +@overload +def polyadd( + a1: _ArrayLikeFloat_co, + a2: _ArrayLikeFloat_co, +) -> NDArray[floating]: ... +@overload +def polyadd( + a1: _ArrayLikeComplex_co, + a2: _ArrayLikeComplex_co, +) -> NDArray[complexfloating]: ... +@overload +def polyadd( + a1: _ArrayLikeObject_co, + a2: _ArrayLikeObject_co, +) -> NDArray[object_]: ... + +@overload +def polysub( + a1: poly1d, + a2: _ArrayLikeComplex_co | _ArrayLikeObject_co, +) -> poly1d: ... +@overload +def polysub( + a1: _ArrayLikeComplex_co | _ArrayLikeObject_co, + a2: poly1d, +) -> poly1d: ... +@overload +def polysub( + a1: _ArrayLikeBool_co, + a2: _ArrayLikeBool_co, +) -> NoReturn: ... +@overload +def polysub( + a1: _ArrayLikeUInt_co, + a2: _ArrayLikeUInt_co, +) -> NDArray[unsignedinteger]: ... +@overload +def polysub( + a1: _ArrayLikeInt_co, + a2: _ArrayLikeInt_co, +) -> NDArray[signedinteger]: ... +@overload +def polysub( + a1: _ArrayLikeFloat_co, + a2: _ArrayLikeFloat_co, +) -> NDArray[floating]: ... +@overload +def polysub( + a1: _ArrayLikeComplex_co, + a2: _ArrayLikeComplex_co, +) -> NDArray[complexfloating]: ... +@overload +def polysub( + a1: _ArrayLikeObject_co, + a2: _ArrayLikeObject_co, +) -> NDArray[object_]: ... + +# NOTE: Not an alias, but they do have the same signature (that we can reuse) +polymul = polyadd + +@overload +def polydiv( + u: poly1d, + v: _ArrayLikeComplex_co | _ArrayLikeObject_co, +) -> _2Tup[poly1d]: ... +@overload +def polydiv( + u: _ArrayLikeComplex_co | _ArrayLikeObject_co, + v: poly1d, +) -> _2Tup[poly1d]: ... +@overload +def polydiv( + u: _ArrayLikeFloat_co, + v: _ArrayLikeFloat_co, +) -> _2Tup[NDArray[floating]]: ... +@overload +def polydiv( + u: _ArrayLikeComplex_co, + v: _ArrayLikeComplex_co, +) -> _2Tup[NDArray[complexfloating]]: ... +@overload +def polydiv( + u: _ArrayLikeObject_co, + v: _ArrayLikeObject_co, +) -> _2Tup[NDArray[Any]]: ... diff --git a/local_packages/numpy/lib/_scimath_impl.py b/local_packages/numpy/lib/_scimath_impl.py new file mode 100644 index 0000000..b33f42b --- /dev/null +++ b/local_packages/numpy/lib/_scimath_impl.py @@ -0,0 +1,642 @@ +""" +Wrapper functions to more user-friendly calling of certain math functions +whose output data-type is different than the input data-type in certain +domains of the input. + +For example, for functions like `log` with branch cuts, the versions in this +module provide the mathematically valid answers in the complex plane:: + + >>> import math + >>> np.emath.log(-math.exp(1)) == (1+1j*math.pi) + True + +Similarly, `sqrt`, other base logarithms, `power` and trig functions are +correctly handled. See their respective docstrings for specific examples. + +""" +import numpy._core.numeric as nx +import numpy._core.numerictypes as nt +from numpy._core.numeric import any, asarray +from numpy._core.overrides import array_function_dispatch, set_module +from numpy.lib._type_check_impl import isreal + +__all__ = [ + 'sqrt', 'log', 'log2', 'logn', 'log10', 'power', 'arccos', 'arcsin', + 'arctanh' + ] + + +_ln2 = nx.log(2.0) + + +def _tocomplex(arr): + """Convert its input `arr` to a complex array. + + The input is returned as a complex array of the smallest type that will fit + the original data: types like single, byte, short, etc. become csingle, + while others become cdouble. + + A copy of the input is always made. + + Parameters + ---------- + arr : array + + Returns + ------- + array + An array with the same input data as the input but in complex form. + + Examples + -------- + >>> import numpy as np + + First, consider an input of type short: + + >>> a = np.array([1,2,3],np.short) + + >>> ac = np.lib.scimath._tocomplex(a); ac + array([1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64) + + >>> ac.dtype + dtype('complex64') + + If the input is of type double, the output is correspondingly of the + complex double type as well: + + >>> b = np.array([1,2,3],np.double) + + >>> bc = np.lib.scimath._tocomplex(b); bc + array([1.+0.j, 2.+0.j, 3.+0.j]) + + >>> bc.dtype + dtype('complex128') + + Note that even if the input was complex to begin with, a copy is still + made, since the astype() method always copies: + + >>> c = np.array([1,2,3],np.csingle) + + >>> cc = np.lib.scimath._tocomplex(c); cc + array([1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64) + + >>> c *= 2; c + array([2.+0.j, 4.+0.j, 6.+0.j], dtype=complex64) + + >>> cc + array([1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64) + """ + if issubclass(arr.dtype.type, (nt.single, nt.byte, nt.short, nt.ubyte, + nt.ushort, nt.csingle)): + return arr.astype(nt.csingle) + else: + return arr.astype(nt.cdouble) + + +def _fix_real_lt_zero(x): + """Convert `x` to complex if it has real, negative components. + + Otherwise, output is just the array version of the input (via asarray). + + Parameters + ---------- + x : array_like + + Returns + ------- + array + + Examples + -------- + >>> import numpy as np + >>> np.lib.scimath._fix_real_lt_zero([1,2]) + array([1, 2]) + + >>> np.lib.scimath._fix_real_lt_zero([-1,2]) + array([-1.+0.j, 2.+0.j]) + + """ + x = asarray(x) + if any(isreal(x) & (x < 0)): + x = _tocomplex(x) + return x + + +def _fix_int_lt_zero(x): + """Convert `x` to double if it has real, negative components. + + Otherwise, output is just the array version of the input (via asarray). + + Parameters + ---------- + x : array_like + + Returns + ------- + array + + Examples + -------- + >>> import numpy as np + >>> np.lib.scimath._fix_int_lt_zero([1,2]) + array([1, 2]) + + >>> np.lib.scimath._fix_int_lt_zero([-1,2]) + array([-1., 2.]) + """ + x = asarray(x) + if any(isreal(x) & (x < 0)): + x = x * 1.0 + return x + + +def _fix_real_abs_gt_1(x): + """Convert `x` to complex if it has real components x_i with abs(x_i)>1. + + Otherwise, output is just the array version of the input (via asarray). + + Parameters + ---------- + x : array_like + + Returns + ------- + array + + Examples + -------- + >>> import numpy as np + >>> np.lib.scimath._fix_real_abs_gt_1([0,1]) + array([0, 1]) + + >>> np.lib.scimath._fix_real_abs_gt_1([0,2]) + array([0.+0.j, 2.+0.j]) + """ + x = asarray(x) + if any(isreal(x) & (abs(x) > 1)): + x = _tocomplex(x) + return x + + +def _unary_dispatcher(x): + return (x,) + + +@set_module('numpy.lib.scimath') +@array_function_dispatch(_unary_dispatcher) +def sqrt(x): + """ + Compute the square root of x. + + For negative input elements, a complex value is returned + (unlike `numpy.sqrt` which returns NaN). + + Parameters + ---------- + x : array_like + The input value(s). + + Returns + ------- + out : ndarray or scalar + The square root of `x`. If `x` was a scalar, so is `out`, + otherwise an array is returned. + + See Also + -------- + numpy.sqrt + + Examples + -------- + For real, non-negative inputs this works just like `numpy.sqrt`: + + >>> import numpy as np + + >>> np.emath.sqrt(1) + 1.0 + >>> np.emath.sqrt([1, 4]) + array([1., 2.]) + + But it automatically handles negative inputs: + + >>> np.emath.sqrt(-1) + 1j + >>> np.emath.sqrt([-1,4]) + array([0.+1.j, 2.+0.j]) + + Different results are expected because: + floating point 0.0 and -0.0 are distinct. + + For more control, explicitly use complex() as follows: + + >>> np.emath.sqrt(complex(-4.0, 0.0)) + 2j + >>> np.emath.sqrt(complex(-4.0, -0.0)) + -2j + """ + x = _fix_real_lt_zero(x) + return nx.sqrt(x) + + +@set_module('numpy.lib.scimath') +@array_function_dispatch(_unary_dispatcher) +def log(x): + """ + Compute the natural logarithm of `x`. + + Return the "principal value" (for a description of this, see `numpy.log`) + of :math:`log_e(x)`. For real `x > 0`, this is a real number (``log(0)`` + returns ``-inf`` and ``log(np.inf)`` returns ``inf``). Otherwise, the + complex principle value is returned. + + Parameters + ---------- + x : array_like + The value(s) whose log is (are) required. + + Returns + ------- + out : ndarray or scalar + The log of the `x` value(s). If `x` was a scalar, so is `out`, + otherwise an array is returned. + + See Also + -------- + numpy.log + + Notes + ----- + For a log() that returns ``NAN`` when real `x < 0`, use `numpy.log` + (note, however, that otherwise `numpy.log` and this `log` are identical, + i.e., both return ``-inf`` for `x = 0`, ``inf`` for `x = inf`, and, + notably, the complex principle value if ``x.imag != 0``). + + Examples + -------- + >>> import numpy as np + >>> np.emath.log(np.exp(1)) + 1.0 + + Negative arguments are handled "correctly" (recall that + ``exp(log(x)) == x`` does *not* hold for real ``x < 0``): + + >>> np.emath.log(-np.exp(1)) == (1 + np.pi * 1j) + True + + """ + x = _fix_real_lt_zero(x) + return nx.log(x) + + +@set_module('numpy.lib.scimath') +@array_function_dispatch(_unary_dispatcher) +def log10(x): + """ + Compute the logarithm base 10 of `x`. + + Return the "principal value" (for a description of this, see + `numpy.log10`) of :math:`log_{10}(x)`. For real `x > 0`, this + is a real number (``log10(0)`` returns ``-inf`` and ``log10(np.inf)`` + returns ``inf``). Otherwise, the complex principle value is returned. + + Parameters + ---------- + x : array_like or scalar + The value(s) whose log base 10 is (are) required. + + Returns + ------- + out : ndarray or scalar + The log base 10 of the `x` value(s). If `x` was a scalar, so is `out`, + otherwise an array object is returned. + + See Also + -------- + numpy.log10 + + Notes + ----- + For a log10() that returns ``NAN`` when real `x < 0`, use `numpy.log10` + (note, however, that otherwise `numpy.log10` and this `log10` are + identical, i.e., both return ``-inf`` for `x = 0`, ``inf`` for `x = inf`, + and, notably, the complex principle value if ``x.imag != 0``). + + Examples + -------- + >>> import numpy as np + + (We set the printing precision so the example can be auto-tested) + + >>> np.set_printoptions(precision=4) + + >>> np.emath.log10(10**1) + 1.0 + + >>> np.emath.log10([-10**1, -10**2, 10**2]) + array([1.+1.3644j, 2.+1.3644j, 2.+0.j ]) + + """ + x = _fix_real_lt_zero(x) + return nx.log10(x) + + +def _logn_dispatcher(n, x): + return (n, x,) + + +@set_module('numpy.lib.scimath') +@array_function_dispatch(_logn_dispatcher) +def logn(n, x): + """ + Take log base n of x. + + If `x` contains negative inputs, the answer is computed and returned in the + complex domain. + + Parameters + ---------- + n : array_like + The integer base(s) in which the log is taken. + x : array_like + The value(s) whose log base `n` is (are) required. + + Returns + ------- + out : ndarray or scalar + The log base `n` of the `x` value(s). If `x` was a scalar, so is + `out`, otherwise an array is returned. + + Examples + -------- + >>> import numpy as np + >>> np.set_printoptions(precision=4) + + >>> np.emath.logn(2, [4, 8]) + array([2., 3.]) + >>> np.emath.logn(2, [-4, -8, 8]) + array([2.+4.5324j, 3.+4.5324j, 3.+0.j ]) + + """ + x = _fix_real_lt_zero(x) + n = _fix_real_lt_zero(n) + return nx.log(x) / nx.log(n) + + +@set_module('numpy.lib.scimath') +@array_function_dispatch(_unary_dispatcher) +def log2(x): + """ + Compute the logarithm base 2 of `x`. + + Return the "principal value" (for a description of this, see + `numpy.log2`) of :math:`log_2(x)`. For real `x > 0`, this is + a real number (``log2(0)`` returns ``-inf`` and ``log2(np.inf)`` returns + ``inf``). Otherwise, the complex principle value is returned. + + Parameters + ---------- + x : array_like + The value(s) whose log base 2 is (are) required. + + Returns + ------- + out : ndarray or scalar + The log base 2 of the `x` value(s). If `x` was a scalar, so is `out`, + otherwise an array is returned. + + See Also + -------- + numpy.log2 + + Notes + ----- + For a log2() that returns ``NAN`` when real `x < 0`, use `numpy.log2` + (note, however, that otherwise `numpy.log2` and this `log2` are + identical, i.e., both return ``-inf`` for `x = 0`, ``inf`` for `x = inf`, + and, notably, the complex principle value if ``x.imag != 0``). + + Examples + -------- + + We set the printing precision so the example can be auto-tested: + + >>> np.set_printoptions(precision=4) + + >>> np.emath.log2(8) + 3.0 + >>> np.emath.log2([-4, -8, 8]) + array([2.+4.5324j, 3.+4.5324j, 3.+0.j ]) + + """ + x = _fix_real_lt_zero(x) + return nx.log2(x) + + +def _power_dispatcher(x, p): + return (x, p) + + +@set_module('numpy.lib.scimath') +@array_function_dispatch(_power_dispatcher) +def power(x, p): + """ + Return x to the power p, (x**p). + + If `x` contains negative values, the output is converted to the + complex domain. + + Parameters + ---------- + x : array_like + The input value(s). + p : array_like of ints + The power(s) to which `x` is raised. If `x` contains multiple values, + `p` has to either be a scalar, or contain the same number of values + as `x`. In the latter case, the result is + ``x[0]**p[0], x[1]**p[1], ...``. + + Returns + ------- + out : ndarray or scalar + The result of ``x**p``. If `x` and `p` are scalars, so is `out`, + otherwise an array is returned. + + See Also + -------- + numpy.power + + Examples + -------- + >>> import numpy as np + >>> np.set_printoptions(precision=4) + + >>> np.emath.power(2, 2) + 4 + + >>> np.emath.power([2, 4], 2) + array([ 4, 16]) + + >>> np.emath.power([2, 4], -2) + array([0.25 , 0.0625]) + + >>> np.emath.power([-2, 4], 2) + array([ 4.-0.j, 16.+0.j]) + + >>> np.emath.power([2, 4], [2, 4]) + array([ 4, 256]) + + """ + x = _fix_real_lt_zero(x) + p = _fix_int_lt_zero(p) + return nx.power(x, p) + + +@set_module('numpy.lib.scimath') +@array_function_dispatch(_unary_dispatcher) +def arccos(x): + """ + Compute the inverse cosine of x. + + Return the "principal value" (for a description of this, see + `numpy.arccos`) of the inverse cosine of `x`. For real `x` such that + `abs(x) <= 1`, this is a real number in the closed interval + :math:`[0, \\pi]`. Otherwise, the complex principle value is returned. + + Parameters + ---------- + x : array_like or scalar + The value(s) whose arccos is (are) required. + + Returns + ------- + out : ndarray or scalar + The inverse cosine(s) of the `x` value(s). If `x` was a scalar, so + is `out`, otherwise an array object is returned. + + See Also + -------- + numpy.arccos + + Notes + ----- + For an arccos() that returns ``NAN`` when real `x` is not in the + interval ``[-1,1]``, use `numpy.arccos`. + + Examples + -------- + >>> import numpy as np + >>> np.set_printoptions(precision=4) + + >>> np.emath.arccos(1) # a scalar is returned + 0.0 + + >>> np.emath.arccos([1,2]) + array([0.-0.j , 0.-1.317j]) + + """ + x = _fix_real_abs_gt_1(x) + return nx.arccos(x) + + +@set_module('numpy.lib.scimath') +@array_function_dispatch(_unary_dispatcher) +def arcsin(x): + """ + Compute the inverse sine of x. + + Return the "principal value" (for a description of this, see + `numpy.arcsin`) of the inverse sine of `x`. For real `x` such that + `abs(x) <= 1`, this is a real number in the closed interval + :math:`[-\\pi/2, \\pi/2]`. Otherwise, the complex principle value is + returned. + + Parameters + ---------- + x : array_like or scalar + The value(s) whose arcsin is (are) required. + + Returns + ------- + out : ndarray or scalar + The inverse sine(s) of the `x` value(s). If `x` was a scalar, so + is `out`, otherwise an array object is returned. + + See Also + -------- + numpy.arcsin + + Notes + ----- + For an arcsin() that returns ``NAN`` when real `x` is not in the + interval ``[-1,1]``, use `numpy.arcsin`. + + Examples + -------- + >>> import numpy as np + >>> np.set_printoptions(precision=4) + + >>> np.emath.arcsin(0) + 0.0 + + >>> np.emath.arcsin([0,1]) + array([0. , 1.5708]) + + """ + x = _fix_real_abs_gt_1(x) + return nx.arcsin(x) + + +@set_module('numpy.lib.scimath') +@array_function_dispatch(_unary_dispatcher) +def arctanh(x): + """ + Compute the inverse hyperbolic tangent of `x`. + + Return the "principal value" (for a description of this, see + `numpy.arctanh`) of ``arctanh(x)``. For real `x` such that + ``abs(x) < 1``, this is a real number. If `abs(x) > 1`, or if `x` is + complex, the result is complex. Finally, `x = 1` returns``inf`` and + ``x=-1`` returns ``-inf``. + + Parameters + ---------- + x : array_like + The value(s) whose arctanh is (are) required. + + Returns + ------- + out : ndarray or scalar + The inverse hyperbolic tangent(s) of the `x` value(s). If `x` was + a scalar so is `out`, otherwise an array is returned. + + + See Also + -------- + numpy.arctanh + + Notes + ----- + For an arctanh() that returns ``NAN`` when real `x` is not in the + interval ``(-1,1)``, use `numpy.arctanh` (this latter, however, does + return +/-inf for ``x = +/-1``). + + Examples + -------- + >>> import numpy as np + >>> np.set_printoptions(precision=4) + + >>> np.emath.arctanh(0.5) + 0.5493061443340549 + + >>> import warnings + >>> with warnings.catch_warnings(): + ... warnings.simplefilter('ignore', RuntimeWarning) + ... np.emath.arctanh(np.eye(2)) + array([[inf, 0.], + [ 0., inf]]) + >>> np.emath.arctanh([1j]) + array([0.+0.7854j]) + + """ + x = _fix_real_abs_gt_1(x) + return nx.arctanh(x) diff --git a/local_packages/numpy/lib/_scimath_impl.pyi b/local_packages/numpy/lib/_scimath_impl.pyi new file mode 100644 index 0000000..e6390c2 --- /dev/null +++ b/local_packages/numpy/lib/_scimath_impl.pyi @@ -0,0 +1,93 @@ +from typing import Any, overload + +from numpy import complexfloating +from numpy._typing import ( + NDArray, + _ArrayLikeComplex_co, + _ArrayLikeFloat_co, + _ComplexLike_co, + _FloatLike_co, +) + +__all__ = ["sqrt", "log", "log2", "logn", "log10", "power", "arccos", "arcsin", "arctanh"] + +@overload +def sqrt(x: _FloatLike_co) -> Any: ... +@overload +def sqrt(x: _ComplexLike_co) -> complexfloating: ... +@overload +def sqrt(x: _ArrayLikeFloat_co) -> NDArray[Any]: ... +@overload +def sqrt(x: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ... + +@overload +def log(x: _FloatLike_co) -> Any: ... +@overload +def log(x: _ComplexLike_co) -> complexfloating: ... +@overload +def log(x: _ArrayLikeFloat_co) -> NDArray[Any]: ... +@overload +def log(x: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ... + +@overload +def log10(x: _FloatLike_co) -> Any: ... +@overload +def log10(x: _ComplexLike_co) -> complexfloating: ... +@overload +def log10(x: _ArrayLikeFloat_co) -> NDArray[Any]: ... +@overload +def log10(x: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ... + +@overload +def log2(x: _FloatLike_co) -> Any: ... +@overload +def log2(x: _ComplexLike_co) -> complexfloating: ... +@overload +def log2(x: _ArrayLikeFloat_co) -> NDArray[Any]: ... +@overload +def log2(x: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ... + +@overload +def logn(n: _FloatLike_co, x: _FloatLike_co) -> Any: ... +@overload +def logn(n: _ComplexLike_co, x: _ComplexLike_co) -> complexfloating: ... +@overload +def logn(n: _ArrayLikeFloat_co, x: _ArrayLikeFloat_co) -> NDArray[Any]: ... +@overload +def logn(n: _ArrayLikeComplex_co, x: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ... + +@overload +def power(x: _FloatLike_co, p: _FloatLike_co) -> Any: ... +@overload +def power(x: _ComplexLike_co, p: _ComplexLike_co) -> complexfloating: ... +@overload +def power(x: _ArrayLikeFloat_co, p: _ArrayLikeFloat_co) -> NDArray[Any]: ... +@overload +def power(x: _ArrayLikeComplex_co, p: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ... + +@overload +def arccos(x: _FloatLike_co) -> Any: ... +@overload +def arccos(x: _ComplexLike_co) -> complexfloating: ... +@overload +def arccos(x: _ArrayLikeFloat_co) -> NDArray[Any]: ... +@overload +def arccos(x: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ... + +@overload +def arcsin(x: _FloatLike_co) -> Any: ... +@overload +def arcsin(x: _ComplexLike_co) -> complexfloating: ... +@overload +def arcsin(x: _ArrayLikeFloat_co) -> NDArray[Any]: ... +@overload +def arcsin(x: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ... + +@overload +def arctanh(x: _FloatLike_co) -> Any: ... +@overload +def arctanh(x: _ComplexLike_co) -> complexfloating: ... +@overload +def arctanh(x: _ArrayLikeFloat_co) -> NDArray[Any]: ... +@overload +def arctanh(x: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ... diff --git a/local_packages/numpy/lib/_shape_base_impl.py b/local_packages/numpy/lib/_shape_base_impl.py new file mode 100644 index 0000000..c9e0fd3 --- /dev/null +++ b/local_packages/numpy/lib/_shape_base_impl.py @@ -0,0 +1,1289 @@ +import functools +import warnings + +import numpy as np +import numpy._core.numeric as _nx +from numpy._core import atleast_3d, overrides, vstack +from numpy._core._multiarray_umath import _array_converter +from numpy._core.fromnumeric import reshape, transpose +from numpy._core.multiarray import normalize_axis_index +from numpy._core.numeric import ( + array, + asanyarray, + asarray, + normalize_axis_tuple, + zeros, + zeros_like, +) +from numpy._core.overrides import set_module +from numpy._core.shape_base import _arrays_for_stack_dispatcher +from numpy.lib._index_tricks_impl import ndindex +from numpy.matrixlib.defmatrix import matrix # this raises all the right alarm bells + +__all__ = [ + 'column_stack', 'row_stack', 'dstack', 'array_split', 'split', + 'hsplit', 'vsplit', 'dsplit', 'apply_over_axes', 'expand_dims', + 'apply_along_axis', 'kron', 'tile', 'take_along_axis', + 'put_along_axis' + ] + + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + + +def _make_along_axis_idx(arr_shape, indices, axis): + # compute dimensions to iterate over + if not _nx.issubdtype(indices.dtype, _nx.integer): + raise IndexError('`indices` must be an integer array') + if len(arr_shape) != indices.ndim: + raise ValueError( + "`indices` and `arr` must have the same number of dimensions") + shape_ones = (1,) * indices.ndim + dest_dims = list(range(axis)) + [None] + list(range(axis + 1, indices.ndim)) + + # build a fancy index, consisting of orthogonal aranges, with the + # requested index inserted at the right location + fancy_index = [] + for dim, n in zip(dest_dims, arr_shape): + if dim is None: + fancy_index.append(indices) + else: + ind_shape = shape_ones[:dim] + (-1,) + shape_ones[dim + 1:] + fancy_index.append(_nx.arange(n).reshape(ind_shape)) + + return tuple(fancy_index) + + +def _take_along_axis_dispatcher(arr, indices, axis=None): + return (arr, indices) + + +@array_function_dispatch(_take_along_axis_dispatcher) +def take_along_axis(arr, indices, axis=-1): + """ + Take values from the input array by matching 1d index and data slices. + + This iterates over matching 1d slices oriented along the specified axis in + the index and data arrays, and uses the former to look up values in the + latter. These slices can be different lengths. + + Functions returning an index along an axis, like `argsort` and + `argpartition`, produce suitable indices for this function. + + Parameters + ---------- + arr : ndarray (Ni..., M, Nk...) + Source array + indices : ndarray (Ni..., J, Nk...) + Indices to take along each 1d slice of ``arr``. This must match the + dimension of ``arr``, but dimensions Ni and Nj only need to broadcast + against ``arr``. + axis : int or None, optional + The axis to take 1d slices along. If axis is None, the input array is + treated as if it had first been flattened to 1d, for consistency with + `sort` and `argsort`. + + .. versionchanged:: 2.3 + The default value is now ``-1``. + + Returns + ------- + out: ndarray (Ni..., J, Nk...) + The indexed result. + + Notes + ----- + This is equivalent to (but faster than) the following use of `ndindex` and + `s_`, which sets each of ``ii`` and ``kk`` to a tuple of indices:: + + Ni, M, Nk = a.shape[:axis], a.shape[axis], a.shape[axis+1:] + J = indices.shape[axis] # Need not equal M + out = np.empty(Ni + (J,) + Nk) + + for ii in ndindex(Ni): + for kk in ndindex(Nk): + a_1d = a [ii + s_[:,] + kk] + indices_1d = indices[ii + s_[:,] + kk] + out_1d = out [ii + s_[:,] + kk] + for j in range(J): + out_1d[j] = a_1d[indices_1d[j]] + + Equivalently, eliminating the inner loop, the last two lines would be:: + + out_1d[:] = a_1d[indices_1d] + + See Also + -------- + take : Take along an axis, using the same indices for every 1d slice + put_along_axis : + Put values into the destination array by matching 1d index and data slices + + Examples + -------- + >>> import numpy as np + + For this sample array + + >>> a = np.array([[10, 30, 20], [60, 40, 50]]) + + We can sort either by using sort directly, or argsort and this function + + >>> np.sort(a, axis=1) + array([[10, 20, 30], + [40, 50, 60]]) + >>> ai = np.argsort(a, axis=1) + >>> ai + array([[0, 2, 1], + [1, 2, 0]]) + >>> np.take_along_axis(a, ai, axis=1) + array([[10, 20, 30], + [40, 50, 60]]) + + The same works for max and min, if you maintain the trivial dimension + with ``keepdims``: + + >>> np.max(a, axis=1, keepdims=True) + array([[30], + [60]]) + >>> ai = np.argmax(a, axis=1, keepdims=True) + >>> ai + array([[1], + [0]]) + >>> np.take_along_axis(a, ai, axis=1) + array([[30], + [60]]) + + If we want to get the max and min at the same time, we can stack the + indices first + + >>> ai_min = np.argmin(a, axis=1, keepdims=True) + >>> ai_max = np.argmax(a, axis=1, keepdims=True) + >>> ai = np.concatenate([ai_min, ai_max], axis=1) + >>> ai + array([[0, 1], + [1, 0]]) + >>> np.take_along_axis(a, ai, axis=1) + array([[10, 30], + [40, 60]]) + """ + # normalize inputs + if axis is None: + if indices.ndim != 1: + raise ValueError( + 'when axis=None, `indices` must have a single dimension.') + arr = np.array(arr.flat) + axis = 0 + else: + axis = normalize_axis_index(axis, arr.ndim) + + # use the fancy index + return arr[_make_along_axis_idx(arr.shape, indices, axis)] + + +def _put_along_axis_dispatcher(arr, indices, values, axis): + return (arr, indices, values) + + +@array_function_dispatch(_put_along_axis_dispatcher) +def put_along_axis(arr, indices, values, axis): + """ + Put values into the destination array by matching 1d index and data slices. + + This iterates over matching 1d slices oriented along the specified axis in + the index and data arrays, and uses the former to place values into the + latter. These slices can be different lengths. + + Functions returning an index along an axis, like `argsort` and + `argpartition`, produce suitable indices for this function. + + Parameters + ---------- + arr : ndarray (Ni..., M, Nk...) + Destination array. + indices : ndarray (Ni..., J, Nk...) + Indices to change along each 1d slice of `arr`. This must match the + dimension of arr, but dimensions in Ni and Nj may be 1 to broadcast + against `arr`. + values : array_like (Ni..., J, Nk...) + values to insert at those indices. Its shape and dimension are + broadcast to match that of `indices`. + axis : int + The axis to take 1d slices along. If axis is None, the destination + array is treated as if a flattened 1d view had been created of it. + + Notes + ----- + This is equivalent to (but faster than) the following use of `ndindex` and + `s_`, which sets each of ``ii`` and ``kk`` to a tuple of indices:: + + Ni, M, Nk = a.shape[:axis], a.shape[axis], a.shape[axis+1:] + J = indices.shape[axis] # Need not equal M + + for ii in ndindex(Ni): + for kk in ndindex(Nk): + a_1d = a [ii + s_[:,] + kk] + indices_1d = indices[ii + s_[:,] + kk] + values_1d = values [ii + s_[:,] + kk] + for j in range(J): + a_1d[indices_1d[j]] = values_1d[j] + + Equivalently, eliminating the inner loop, the last two lines would be:: + + a_1d[indices_1d] = values_1d + + See Also + -------- + take_along_axis : + Take values from the input array by matching 1d index and data slices + + Examples + -------- + >>> import numpy as np + + For this sample array + + >>> a = np.array([[10, 30, 20], [60, 40, 50]]) + + We can replace the maximum values with: + + >>> ai = np.argmax(a, axis=1, keepdims=True) + >>> ai + array([[1], + [0]]) + >>> np.put_along_axis(a, ai, 99, axis=1) + >>> a + array([[10, 99, 20], + [99, 40, 50]]) + + """ + # normalize inputs + if axis is None: + if indices.ndim != 1: + raise ValueError( + 'when axis=None, `indices` must have a single dimension.') + arr = np.array(arr.flat) + axis = 0 + else: + axis = normalize_axis_index(axis, arr.ndim) + + # use the fancy index + arr[_make_along_axis_idx(arr.shape, indices, axis)] = values + + +def _apply_along_axis_dispatcher(func1d, axis, arr, *args, **kwargs): + return (arr,) + + +@array_function_dispatch(_apply_along_axis_dispatcher) +def apply_along_axis(func1d, axis, arr, *args, **kwargs): + """ + Apply a function to 1-D slices along the given axis. + + Execute `func1d(a, *args, **kwargs)` where `func1d` operates on 1-D arrays + and `a` is a 1-D slice of `arr` along `axis`. + + This is equivalent to (but faster than) the following use of `ndindex` and + `s_`, which sets each of ``ii``, ``jj``, and ``kk`` to a tuple of indices:: + + Ni, Nk = a.shape[:axis], a.shape[axis+1:] + for ii in ndindex(Ni): + for kk in ndindex(Nk): + f = func1d(arr[ii + s_[:,] + kk]) + Nj = f.shape + for jj in ndindex(Nj): + out[ii + jj + kk] = f[jj] + + Equivalently, eliminating the inner loop, this can be expressed as:: + + Ni, Nk = a.shape[:axis], a.shape[axis+1:] + for ii in ndindex(Ni): + for kk in ndindex(Nk): + out[ii + s_[...,] + kk] = func1d(arr[ii + s_[:,] + kk]) + + Parameters + ---------- + func1d : function (M,) -> (Nj...) + This function should accept 1-D arrays. It is applied to 1-D + slices of `arr` along the specified axis. + axis : integer + Axis along which `arr` is sliced. + arr : ndarray (Ni..., M, Nk...) + Input array. + args : any + Additional arguments to `func1d`. + kwargs : any + Additional named arguments to `func1d`. + + Returns + ------- + out : ndarray (Ni..., Nj..., Nk...) + The output array. The shape of `out` is identical to the shape of + `arr`, except along the `axis` dimension. This axis is removed, and + replaced with new dimensions equal to the shape of the return value + of `func1d`. So if `func1d` returns a scalar `out` will have one + fewer dimensions than `arr`. + + See Also + -------- + apply_over_axes : Apply a function repeatedly over multiple axes. + + Examples + -------- + >>> import numpy as np + >>> def my_func(a): + ... \"\"\"Average first and last element of a 1-D array\"\"\" + ... return (a[0] + a[-1]) * 0.5 + >>> b = np.array([[1,2,3], [4,5,6], [7,8,9]]) + >>> np.apply_along_axis(my_func, 0, b) + array([4., 5., 6.]) + >>> np.apply_along_axis(my_func, 1, b) + array([2., 5., 8.]) + + For a function that returns a 1D array, the number of dimensions in + `outarr` is the same as `arr`. + + >>> b = np.array([[8,1,7], [4,3,9], [5,2,6]]) + >>> np.apply_along_axis(sorted, 1, b) + array([[1, 7, 8], + [3, 4, 9], + [2, 5, 6]]) + + For a function that returns a higher dimensional array, those dimensions + are inserted in place of the `axis` dimension. + + >>> b = np.array([[1,2,3], [4,5,6], [7,8,9]]) + >>> np.apply_along_axis(np.diag, -1, b) + array([[[1, 0, 0], + [0, 2, 0], + [0, 0, 3]], + [[4, 0, 0], + [0, 5, 0], + [0, 0, 6]], + [[7, 0, 0], + [0, 8, 0], + [0, 0, 9]]]) + """ + # handle negative axes + conv = _array_converter(arr) + arr = conv[0] + + nd = arr.ndim + axis = normalize_axis_index(axis, nd) + + # arr, with the iteration axis at the end + in_dims = list(range(nd)) + inarr_view = transpose(arr, in_dims[:axis] + in_dims[axis + 1:] + [axis]) + + # compute indices for the iteration axes, and append a trailing ellipsis to + # prevent 0d arrays decaying to scalars, which fixes gh-8642 + inds = ndindex(inarr_view.shape[:-1]) + inds = (ind + (Ellipsis,) for ind in inds) + + # invoke the function on the first item + try: + ind0 = next(inds) + except StopIteration: + raise ValueError( + 'Cannot apply_along_axis when any iteration dimensions are 0' + ) from None + res = asanyarray(func1d(inarr_view[ind0], *args, **kwargs)) + + # build a buffer for storing evaluations of func1d. + # remove the requested axis, and add the new ones on the end. + # laid out so that each write is contiguous. + # for a tuple index inds, buff[inds] = func1d(inarr_view[inds]) + if not isinstance(res, matrix): + buff = zeros_like(res, shape=inarr_view.shape[:-1] + res.shape) + else: + # Matrices are nasty with reshaping, so do not preserve them here. + buff = zeros(inarr_view.shape[:-1] + res.shape, dtype=res.dtype) + + # permutation of axes such that out = buff.transpose(buff_permute) + buff_dims = list(range(buff.ndim)) + buff_permute = ( + buff_dims[0 : axis] + + buff_dims[buff.ndim - res.ndim : buff.ndim] + + buff_dims[axis : buff.ndim - res.ndim] + ) + + # save the first result, then compute and save all remaining results + buff[ind0] = res + for ind in inds: + buff[ind] = asanyarray(func1d(inarr_view[ind], *args, **kwargs)) + + res = transpose(buff, buff_permute) + return conv.wrap(res) + + +def _apply_over_axes_dispatcher(func, a, axes): + return (a,) + + +@array_function_dispatch(_apply_over_axes_dispatcher) +def apply_over_axes(func, a, axes): + """ + Apply a function repeatedly over multiple axes. + + `func` is called as `res = func(a, axis)`, where `axis` is the first + element of `axes`. The result `res` of the function call must have + either the same dimensions as `a` or one less dimension. If `res` + has one less dimension than `a`, a dimension is inserted before + `axis`. The call to `func` is then repeated for each axis in `axes`, + with `res` as the first argument. + + Parameters + ---------- + func : function + This function must take two arguments, `func(a, axis)`. + a : array_like + Input array. + axes : array_like + Axes over which `func` is applied; the elements must be integers. + + Returns + ------- + apply_over_axis : ndarray + The output array. The number of dimensions is the same as `a`, + but the shape can be different. This depends on whether `func` + changes the shape of its output with respect to its input. + + See Also + -------- + apply_along_axis : + Apply a function to 1-D slices of an array along the given axis. + + Notes + ----- + This function is equivalent to tuple axis arguments to reorderable ufuncs + with keepdims=True. Tuple axis arguments to ufuncs have been available since + version 1.7.0. + + Examples + -------- + >>> import numpy as np + >>> a = np.arange(24).reshape(2,3,4) + >>> a + array([[[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]], + [[12, 13, 14, 15], + [16, 17, 18, 19], + [20, 21, 22, 23]]]) + + Sum over axes 0 and 2. The result has same number of dimensions + as the original array: + + >>> np.apply_over_axes(np.sum, a, [0,2]) + array([[[ 60], + [ 92], + [124]]]) + + Tuple axis arguments to ufuncs are equivalent: + + >>> np.sum(a, axis=(0,2), keepdims=True) + array([[[ 60], + [ 92], + [124]]]) + + """ + val = asarray(a) + N = a.ndim + if array(axes).ndim == 0: + axes = (axes,) + for axis in axes: + if axis < 0: + axis = N + axis + args = (val, axis) + res = func(*args) + if res.ndim == val.ndim: + val = res + else: + res = expand_dims(res, axis) + if res.ndim == val.ndim: + val = res + else: + raise ValueError("function is not returning " + "an array of the correct shape") + return val + + +def _expand_dims_dispatcher(a, axis): + return (a,) + + +@array_function_dispatch(_expand_dims_dispatcher) +def expand_dims(a, axis): + """ + Expand the shape of an array. + + Insert a new axis that will appear at the `axis` position in the expanded + array shape. + + Parameters + ---------- + a : array_like + Input array. + axis : int or tuple of ints + Position in the expanded axes where the new axis (or axes) is placed. + + .. deprecated:: 1.13.0 + Passing an axis where ``axis > a.ndim`` will be treated as + ``axis == a.ndim``, and passing ``axis < -a.ndim - 1`` will + be treated as ``axis == 0``. This behavior is deprecated. + + Returns + ------- + result : ndarray + View of `a` with the number of dimensions increased. + + See Also + -------- + squeeze : The inverse operation, removing singleton dimensions + reshape : Insert, remove, and combine dimensions, and resize existing ones + atleast_1d, atleast_2d, atleast_3d + + Examples + -------- + >>> import numpy as np + >>> x = np.array([1, 2]) + >>> x.shape + (2,) + + The following is equivalent to ``x[np.newaxis, :]`` or ``x[np.newaxis]``: + + >>> y = np.expand_dims(x, axis=0) + >>> y + array([[1, 2]]) + >>> y.shape + (1, 2) + + The following is equivalent to ``x[:, np.newaxis]``: + + >>> y = np.expand_dims(x, axis=1) + >>> y + array([[1], + [2]]) + >>> y.shape + (2, 1) + + ``axis`` may also be a tuple: + + >>> y = np.expand_dims(x, axis=(0, 1)) + >>> y + array([[[1, 2]]]) + + >>> y = np.expand_dims(x, axis=(2, 0)) + >>> y + array([[[1], + [2]]]) + + Note that some examples may use ``None`` instead of ``np.newaxis``. These + are the same objects: + + >>> np.newaxis is None + True + + """ + if isinstance(a, matrix): + a = asarray(a) + else: + a = asanyarray(a) + + if not isinstance(axis, (tuple, list)): + axis = (axis,) + + out_ndim = len(axis) + a.ndim + axis = normalize_axis_tuple(axis, out_ndim) + + shape_it = iter(a.shape) + shape = [1 if ax in axis else next(shape_it) for ax in range(out_ndim)] + + return a.reshape(shape) + + +# NOTE: Remove once deprecation period passes +@set_module("numpy") +def row_stack(tup, *, dtype=None, casting="same_kind"): + # Deprecated in NumPy 2.0, 2023-08-18 + warnings.warn( + "`row_stack` alias is deprecated. " + "Use `np.vstack` directly.", + DeprecationWarning, + stacklevel=2 + ) + return vstack(tup, dtype=dtype, casting=casting) + + +row_stack.__doc__ = vstack.__doc__ + + +def _column_stack_dispatcher(tup): + return _arrays_for_stack_dispatcher(tup) + + +@array_function_dispatch(_column_stack_dispatcher) +def column_stack(tup): + """ + Stack 1-D arrays as columns into a 2-D array. + + Take a sequence of 1-D arrays and stack them as columns + to make a single 2-D array. 2-D arrays are stacked as-is, + just like with `hstack`. 1-D arrays are turned into 2-D columns + first. + + Parameters + ---------- + tup : sequence of 1-D or 2-D arrays. + Arrays to stack. All of them must have the same first dimension. + + Returns + ------- + stacked : 2-D array + The array formed by stacking the given arrays. + + See Also + -------- + stack, hstack, vstack, concatenate + + Examples + -------- + >>> import numpy as np + >>> a = np.array((1,2,3)) + >>> b = np.array((4,5,6)) + >>> np.column_stack((a,b)) + array([[1, 4], + [2, 5], + [3, 6]]) + + """ + arrays = [] + for v in tup: + arr = asanyarray(v) + if arr.ndim < 2: + arr = array(arr, copy=None, subok=True, ndmin=2).T + arrays.append(arr) + return _nx.concatenate(arrays, 1) + + +def _dstack_dispatcher(tup): + return _arrays_for_stack_dispatcher(tup) + + +@array_function_dispatch(_dstack_dispatcher) +def dstack(tup): + """ + Stack arrays in sequence depth wise (along third axis). + + This is equivalent to concatenation along the third axis after 2-D arrays + of shape `(M,N)` have been reshaped to `(M,N,1)` and 1-D arrays of shape + `(N,)` have been reshaped to `(1,N,1)`. Rebuilds arrays divided by + `dsplit`. + + This function makes most sense for arrays with up to 3 dimensions. For + instance, for pixel-data with a height (first axis), width (second axis), + and r/g/b channels (third axis). The functions `concatenate`, `stack` and + `block` provide more general stacking and concatenation operations. + + Parameters + ---------- + tup : sequence of arrays + The arrays must have the same shape along all but the third axis. + 1-D or 2-D arrays must have the same shape. + + Returns + ------- + stacked : ndarray + The array formed by stacking the given arrays, will be at least 3-D. + + See Also + -------- + concatenate : Join a sequence of arrays along an existing axis. + stack : Join a sequence of arrays along a new axis. + block : Assemble an nd-array from nested lists of blocks. + vstack : Stack arrays in sequence vertically (row wise). + hstack : Stack arrays in sequence horizontally (column wise). + column_stack : Stack 1-D arrays as columns into a 2-D array. + dsplit : Split array along third axis. + + Examples + -------- + >>> import numpy as np + >>> a = np.array((1,2,3)) + >>> b = np.array((4,5,6)) + >>> np.dstack((a,b)) + array([[[1, 4], + [2, 5], + [3, 6]]]) + + >>> a = np.array([[1],[2],[3]]) + >>> b = np.array([[4],[5],[6]]) + >>> np.dstack((a,b)) + array([[[1, 4]], + [[2, 5]], + [[3, 6]]]) + + """ + arrs = atleast_3d(*tup) + if not isinstance(arrs, tuple): + arrs = (arrs,) + return _nx.concatenate(arrs, 2) + + +def _array_split_dispatcher(ary, indices_or_sections, axis=None): + return (ary, indices_or_sections) + + +@array_function_dispatch(_array_split_dispatcher) +def array_split(ary, indices_or_sections, axis=0): + """ + Split an array into multiple sub-arrays. + + Please refer to the ``split`` documentation. The only difference + between these functions is that ``array_split`` allows + `indices_or_sections` to be an integer that does *not* equally + divide the axis. For an array of length l that should be split + into n sections, it returns l % n sub-arrays of size l//n + 1 + and the rest of size l//n. + + See Also + -------- + split : Split array into multiple sub-arrays of equal size. + + Examples + -------- + >>> import numpy as np + >>> x = np.arange(8.0) + >>> np.array_split(x, 3) + [array([0., 1., 2.]), array([3., 4., 5.]), array([6., 7.])] + + >>> x = np.arange(9) + >>> np.array_split(x, 4) + [array([0, 1, 2]), array([3, 4]), array([5, 6]), array([7, 8])] + + """ + try: + Ntotal = ary.shape[axis] + except AttributeError: + Ntotal = len(ary) + try: + # handle array case. + Nsections = len(indices_or_sections) + 1 + div_points = [0] + list(indices_or_sections) + [Ntotal] + except TypeError: + # indices_or_sections is a scalar, not an array. + Nsections = int(indices_or_sections) + if Nsections <= 0: + raise ValueError('number sections must be larger than 0.') from None + Neach_section, extras = divmod(Ntotal, Nsections) + section_sizes = ([0] + + extras * [Neach_section + 1] + + (Nsections - extras) * [Neach_section]) + div_points = _nx.array(section_sizes, dtype=_nx.intp).cumsum() + + sub_arys = [] + sary = _nx.swapaxes(ary, axis, 0) + for i in range(Nsections): + st = div_points[i] + end = div_points[i + 1] + sub_arys.append(_nx.swapaxes(sary[st:end], axis, 0)) + + return sub_arys + + +def _split_dispatcher(ary, indices_or_sections, axis=None): + return (ary, indices_or_sections) + + +@array_function_dispatch(_split_dispatcher) +def split(ary, indices_or_sections, axis=0): + """ + Split an array into multiple sub-arrays as views into `ary`. + + Parameters + ---------- + ary : ndarray + Array to be divided into sub-arrays. + indices_or_sections : int or 1-D array + If `indices_or_sections` is an integer, N, the array will be divided + into N equal arrays along `axis`. If such a split is not possible, + an error is raised. + + If `indices_or_sections` is a 1-D array of sorted integers, the entries + indicate where along `axis` the array is split. For example, + ``[2, 3]`` would, for ``axis=0``, result in + + - ary[:2] + - ary[2:3] + - ary[3:] + + If an index exceeds the dimension of the array along `axis`, + an empty sub-array is returned correspondingly. + axis : int, optional + The axis along which to split, default is 0. + + Returns + ------- + sub-arrays : list of ndarrays + A list of sub-arrays as views into `ary`. + + Raises + ------ + ValueError + If `indices_or_sections` is given as an integer, but + a split does not result in equal division. + + See Also + -------- + array_split : Split an array into multiple sub-arrays of equal or + near-equal size. Does not raise an exception if + an equal division cannot be made. + hsplit : Split array into multiple sub-arrays horizontally (column-wise). + vsplit : Split array into multiple sub-arrays vertically (row wise). + dsplit : Split array into multiple sub-arrays along the 3rd axis (depth). + concatenate : Join a sequence of arrays along an existing axis. + stack : Join a sequence of arrays along a new axis. + hstack : Stack arrays in sequence horizontally (column wise). + vstack : Stack arrays in sequence vertically (row wise). + dstack : Stack arrays in sequence depth wise (along third dimension). + + Examples + -------- + >>> import numpy as np + >>> x = np.arange(9.0) + >>> np.split(x, 3) + [array([0., 1., 2.]), array([3., 4., 5.]), array([6., 7., 8.])] + + >>> x = np.arange(8.0) + >>> np.split(x, [3, 5, 6, 10]) + [array([0., 1., 2.]), + array([3., 4.]), + array([5.]), + array([6., 7.]), + array([], dtype=float64)] + + """ + try: + len(indices_or_sections) + except TypeError: + sections = indices_or_sections + N = ary.shape[axis] + if N % sections: + raise ValueError( + 'array split does not result in an equal division') from None + return array_split(ary, indices_or_sections, axis) + + +def _hvdsplit_dispatcher(ary, indices_or_sections): + return (ary, indices_or_sections) + + +@array_function_dispatch(_hvdsplit_dispatcher) +def hsplit(ary, indices_or_sections): + """ + Split an array into multiple sub-arrays horizontally (column-wise). + + Please refer to the `split` documentation. `hsplit` is equivalent + to `split` with ``axis=1``, the array is always split along the second + axis except for 1-D arrays, where it is split at ``axis=0``. + + See Also + -------- + split : Split an array into multiple sub-arrays of equal size. + + Examples + -------- + >>> import numpy as np + >>> x = np.arange(16.0).reshape(4, 4) + >>> x + array([[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.], + [12., 13., 14., 15.]]) + >>> np.hsplit(x, 2) + [array([[ 0., 1.], + [ 4., 5.], + [ 8., 9.], + [12., 13.]]), + array([[ 2., 3.], + [ 6., 7.], + [10., 11.], + [14., 15.]])] + >>> np.hsplit(x, np.array([3, 6])) + [array([[ 0., 1., 2.], + [ 4., 5., 6.], + [ 8., 9., 10.], + [12., 13., 14.]]), + array([[ 3.], + [ 7.], + [11.], + [15.]]), + array([], shape=(4, 0), dtype=float64)] + + With a higher dimensional array the split is still along the second axis. + + >>> x = np.arange(8.0).reshape(2, 2, 2) + >>> x + array([[[0., 1.], + [2., 3.]], + [[4., 5.], + [6., 7.]]]) + >>> np.hsplit(x, 2) + [array([[[0., 1.]], + [[4., 5.]]]), + array([[[2., 3.]], + [[6., 7.]]])] + + With a 1-D array, the split is along axis 0. + + >>> x = np.array([0, 1, 2, 3, 4, 5]) + >>> np.hsplit(x, 2) + [array([0, 1, 2]), array([3, 4, 5])] + + """ + if _nx.ndim(ary) == 0: + raise ValueError('hsplit only works on arrays of 1 or more dimensions') + if ary.ndim > 1: + return split(ary, indices_or_sections, 1) + else: + return split(ary, indices_or_sections, 0) + + +@array_function_dispatch(_hvdsplit_dispatcher) +def vsplit(ary, indices_or_sections): + """ + Split an array into multiple sub-arrays vertically (row-wise). + + Please refer to the ``split`` documentation. ``vsplit`` is equivalent + to ``split`` with `axis=0` (default), the array is always split along the + first axis regardless of the array dimension. + + See Also + -------- + split : Split an array into multiple sub-arrays of equal size. + + Examples + -------- + >>> import numpy as np + >>> x = np.arange(16.0).reshape(4, 4) + >>> x + array([[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.], + [12., 13., 14., 15.]]) + >>> np.vsplit(x, 2) + [array([[0., 1., 2., 3.], + [4., 5., 6., 7.]]), + array([[ 8., 9., 10., 11.], + [12., 13., 14., 15.]])] + >>> np.vsplit(x, np.array([3, 6])) + [array([[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.]]), + array([[12., 13., 14., 15.]]), + array([], shape=(0, 4), dtype=float64)] + + With a higher dimensional array the split is still along the first axis. + + >>> x = np.arange(8.0).reshape(2, 2, 2) + >>> x + array([[[0., 1.], + [2., 3.]], + [[4., 5.], + [6., 7.]]]) + >>> np.vsplit(x, 2) + [array([[[0., 1.], + [2., 3.]]]), + array([[[4., 5.], + [6., 7.]]])] + + """ + if _nx.ndim(ary) < 2: + raise ValueError('vsplit only works on arrays of 2 or more dimensions') + return split(ary, indices_or_sections, 0) + + +@array_function_dispatch(_hvdsplit_dispatcher) +def dsplit(ary, indices_or_sections): + """ + Split array into multiple sub-arrays along the 3rd axis (depth). + + Please refer to the `split` documentation. `dsplit` is equivalent + to `split` with ``axis=2``, the array is always split along the third + axis provided the array dimension is greater than or equal to 3. + + See Also + -------- + split : Split an array into multiple sub-arrays of equal size. + + Examples + -------- + >>> import numpy as np + >>> x = np.arange(16.0).reshape(2, 2, 4) + >>> x + array([[[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.]], + [[ 8., 9., 10., 11.], + [12., 13., 14., 15.]]]) + >>> np.dsplit(x, 2) + [array([[[ 0., 1.], + [ 4., 5.]], + [[ 8., 9.], + [12., 13.]]]), array([[[ 2., 3.], + [ 6., 7.]], + [[10., 11.], + [14., 15.]]])] + >>> np.dsplit(x, np.array([3, 6])) + [array([[[ 0., 1., 2.], + [ 4., 5., 6.]], + [[ 8., 9., 10.], + [12., 13., 14.]]]), + array([[[ 3.], + [ 7.]], + [[11.], + [15.]]]), + array([], shape=(2, 2, 0), dtype=float64)] + """ + if _nx.ndim(ary) < 3: + raise ValueError('dsplit only works on arrays of 3 or more dimensions') + return split(ary, indices_or_sections, 2) + + +def get_array_wrap(*args): + """Find the wrapper for the array with the highest priority. + + In case of ties, leftmost wins. If no wrapper is found, return None. + + .. deprecated:: 2.0 + """ + + # Deprecated in NumPy 2.0, 2023-07-11 + warnings.warn( + "`get_array_wrap` is deprecated. " + "(deprecated in NumPy 2.0)", + DeprecationWarning, + stacklevel=2 + ) + + wrappers = sorted((getattr(x, '__array_priority__', 0), -i, + x.__array_wrap__) for i, x in enumerate(args) + if hasattr(x, '__array_wrap__')) + if wrappers: + return wrappers[-1][-1] + return None + + +def _kron_dispatcher(a, b): + return (a, b) + + +@array_function_dispatch(_kron_dispatcher) +def kron(a, b): + """ + Kronecker product of two arrays. + + Computes the Kronecker product, a composite array made of blocks of the + second array scaled by the first. + + Parameters + ---------- + a, b : array_like + + Returns + ------- + out : ndarray + + See Also + -------- + outer : The outer product + + Notes + ----- + The function assumes that the number of dimensions of `a` and `b` + are the same, if necessary prepending the smallest with ones. + If ``a.shape = (r0,r1,...,rN)`` and ``b.shape = (s0,s1,...,sN)``, + the Kronecker product has shape ``(r0*s0, r1*s1, ..., rN*SN)``. + The elements are products of elements from `a` and `b`, organized + explicitly by:: + + kron(a,b)[k0,k1,...,kN] = a[i0,i1,...,iN] * b[j0,j1,...,jN] + + where:: + + kt = it * st + jt, t = 0,...,N + + In the common 2-D case (N=1), the block structure can be visualized:: + + [[ a[0,0]*b, a[0,1]*b, ... , a[0,-1]*b ], + [ ... ... ], + [ a[-1,0]*b, a[-1,1]*b, ... , a[-1,-1]*b ]] + + + Examples + -------- + >>> import numpy as np + >>> np.kron([1,10,100], [5,6,7]) + array([ 5, 6, 7, ..., 500, 600, 700]) + >>> np.kron([5,6,7], [1,10,100]) + array([ 5, 50, 500, ..., 7, 70, 700]) + + >>> np.kron(np.eye(2), np.ones((2,2))) + array([[1., 1., 0., 0.], + [1., 1., 0., 0.], + [0., 0., 1., 1.], + [0., 0., 1., 1.]]) + + >>> a = np.arange(100).reshape((2,5,2,5)) + >>> b = np.arange(24).reshape((2,3,4)) + >>> c = np.kron(a,b) + >>> c.shape + (2, 10, 6, 20) + >>> I = (1,3,0,2) + >>> J = (0,2,1) + >>> J1 = (0,) + J # extend to ndim=4 + >>> S1 = (1,) + b.shape + >>> K = tuple(np.array(I) * np.array(S1) + np.array(J1)) + >>> c[K] == a[I]*b[J] + True + + """ + # Working: + # 1. Equalise the shapes by prepending smaller array with 1s + # 2. Expand shapes of both the arrays by adding new axes at + # odd positions for 1st array and even positions for 2nd + # 3. Compute the product of the modified array + # 4. The inner most array elements now contain the rows of + # the Kronecker product + # 5. Reshape the result to kron's shape, which is same as + # product of shapes of the two arrays. + b = asanyarray(b) + a = array(a, copy=None, subok=True, ndmin=b.ndim) + is_any_mat = isinstance(a, matrix) or isinstance(b, matrix) + ndb, nda = b.ndim, a.ndim + nd = max(ndb, nda) + + if (nda == 0 or ndb == 0): + return _nx.multiply(a, b) + + as_ = a.shape + bs = b.shape + if not a.flags.contiguous: + a = reshape(a, as_) + if not b.flags.contiguous: + b = reshape(b, bs) + + # Equalise the shapes by prepending smaller one with 1s + as_ = (1,) * max(0, ndb - nda) + as_ + bs = (1,) * max(0, nda - ndb) + bs + + # Insert empty dimensions + a_arr = expand_dims(a, axis=tuple(range(ndb - nda))) + b_arr = expand_dims(b, axis=tuple(range(nda - ndb))) + + # Compute the product + a_arr = expand_dims(a_arr, axis=tuple(range(1, nd * 2, 2))) + b_arr = expand_dims(b_arr, axis=tuple(range(0, nd * 2, 2))) + # In case of `mat`, convert result to `array` + result = _nx.multiply(a_arr, b_arr, subok=(not is_any_mat)) + + # Reshape back + result = result.reshape(_nx.multiply(as_, bs)) + + return result if not is_any_mat else matrix(result, copy=False) + + +def _tile_dispatcher(A, reps): + return (A, reps) + + +@array_function_dispatch(_tile_dispatcher) +def tile(A, reps): + """ + Construct an array by repeating A the number of times given by reps. + + If `reps` has length ``d``, the result will have dimension of + ``max(d, A.ndim)``. + + If ``A.ndim < d``, `A` is promoted to be d-dimensional by prepending new + axes. So a shape (3,) array is promoted to (1, 3) for 2-D replication, + or shape (1, 1, 3) for 3-D replication. If this is not the desired + behavior, promote `A` to d-dimensions manually before calling this + function. + + If ``A.ndim > d``, `reps` is promoted to `A`.ndim by prepending 1's to it. + Thus for an `A` of shape (2, 3, 4, 5), a `reps` of (2, 2) is treated as + (1, 1, 2, 2). + + Note : Although tile may be used for broadcasting, it is strongly + recommended to use numpy's broadcasting operations and functions. + + Parameters + ---------- + A : array_like + The input array. + reps : array_like + The number of repetitions of `A` along each axis. + + Returns + ------- + c : ndarray + The tiled output array. + + See Also + -------- + repeat : Repeat elements of an array. + broadcast_to : Broadcast an array to a new shape + + Examples + -------- + >>> import numpy as np + >>> a = np.array([0, 1, 2]) + >>> np.tile(a, 2) + array([0, 1, 2, 0, 1, 2]) + >>> np.tile(a, (2, 2)) + array([[0, 1, 2, 0, 1, 2], + [0, 1, 2, 0, 1, 2]]) + >>> np.tile(a, (2, 1, 2)) + array([[[0, 1, 2, 0, 1, 2]], + [[0, 1, 2, 0, 1, 2]]]) + + >>> b = np.array([[1, 2], [3, 4]]) + >>> np.tile(b, 2) + array([[1, 2, 1, 2], + [3, 4, 3, 4]]) + >>> np.tile(b, (2, 1)) + array([[1, 2], + [3, 4], + [1, 2], + [3, 4]]) + + >>> c = np.array([1,2,3,4]) + >>> np.tile(c,(4,1)) + array([[1, 2, 3, 4], + [1, 2, 3, 4], + [1, 2, 3, 4], + [1, 2, 3, 4]]) + """ + try: + tup = tuple(reps) + except TypeError: + tup = (reps,) + d = len(tup) + if all(x == 1 for x in tup) and isinstance(A, _nx.ndarray): + # Fixes the problem that the function does not make a copy if A is a + # numpy array and the repetitions are 1 in all dimensions + return _nx.array(A, copy=True, subok=True, ndmin=d) + else: + # Note that no copy of zero-sized arrays is made. However since they + # have no data there is no risk of an inadvertent overwrite. + c = _nx.array(A, copy=None, subok=True, ndmin=d) + if (d < c.ndim): + tup = (1,) * (c.ndim - d) + tup + shape_out = tuple(s * t for s, t in zip(c.shape, tup)) + n = c.size + if n > 0: + for dim_in, nrep in zip(c.shape, tup): + if nrep != 1: + c = c.reshape(-1, n).repeat(nrep, 0) + n //= dim_in + return c.reshape(shape_out) diff --git a/local_packages/numpy/lib/_shape_base_impl.pyi b/local_packages/numpy/lib/_shape_base_impl.pyi new file mode 100644 index 0000000..352f57d --- /dev/null +++ b/local_packages/numpy/lib/_shape_base_impl.pyi @@ -0,0 +1,236 @@ +from collections.abc import Callable, Sequence +from typing import ( + Any, + Concatenate, + ParamSpec, + Protocol, + SupportsIndex, + TypeVar, + overload, + type_check_only, +) +from typing_extensions import deprecated + +import numpy as np +from numpy import ( + _CastingKind, + complexfloating, + floating, + generic, + integer, + object_, + signedinteger, + ufunc, + unsignedinteger, +) +from numpy._typing import ( + ArrayLike, + DTypeLike, + NDArray, + _ArrayLike, + _ArrayLikeBool_co, + _ArrayLikeComplex_co, + _ArrayLikeFloat_co, + _ArrayLikeInt_co, + _ArrayLikeObject_co, + _ArrayLikeUInt_co, + _ShapeLike, +) + +__all__ = [ + "column_stack", + "row_stack", + "dstack", + "array_split", + "split", + "hsplit", + "vsplit", + "dsplit", + "apply_over_axes", + "expand_dims", + "apply_along_axis", + "kron", + "tile", + "take_along_axis", + "put_along_axis", +] + +_P = ParamSpec("_P") +_ScalarT = TypeVar("_ScalarT", bound=generic) + +# Signature of `__array_wrap__` +@type_check_only +class _ArrayWrap(Protocol): + def __call__( + self, + array: NDArray[Any], + context: tuple[ufunc, tuple[Any, ...], int] | None = ..., + return_scalar: bool = ..., + /, + ) -> Any: ... + +@type_check_only +class _SupportsArrayWrap(Protocol): + @property + def __array_wrap__(self) -> _ArrayWrap: ... + +### + +def take_along_axis( + arr: _ScalarT | NDArray[_ScalarT], + indices: NDArray[integer], + axis: int | None = -1, +) -> NDArray[_ScalarT]: ... + +def put_along_axis( + arr: NDArray[_ScalarT], + indices: NDArray[integer], + values: ArrayLike, + axis: int | None, +) -> None: ... + +@overload +def apply_along_axis( + func1d: Callable[Concatenate[NDArray[Any], _P], _ArrayLike[_ScalarT]], + axis: SupportsIndex, + arr: ArrayLike, + *args: _P.args, + **kwargs: _P.kwargs, +) -> NDArray[_ScalarT]: ... +@overload +def apply_along_axis( + func1d: Callable[Concatenate[NDArray[Any], _P], Any], + axis: SupportsIndex, + arr: ArrayLike, + *args: _P.args, + **kwargs: _P.kwargs, +) -> NDArray[Any]: ... + +def apply_over_axes( + func: Callable[[NDArray[Any], int], NDArray[_ScalarT]], + a: ArrayLike, + axes: int | Sequence[int], +) -> NDArray[_ScalarT]: ... + +@overload +def expand_dims( + a: _ArrayLike[_ScalarT], + axis: _ShapeLike, +) -> NDArray[_ScalarT]: ... +@overload +def expand_dims( + a: ArrayLike, + axis: _ShapeLike, +) -> NDArray[Any]: ... + +# Deprecated in NumPy 2.0, 2023-08-18 +@deprecated("`row_stack` alias is deprecated. Use `np.vstack` directly.") +def row_stack( + tup: Sequence[ArrayLike], + *, + dtype: DTypeLike | None = None, + casting: _CastingKind = "same_kind", +) -> NDArray[Any]: ... + +# keep in sync with `numpy.ma.extras.column_stack` +@overload +def column_stack(tup: Sequence[_ArrayLike[_ScalarT]]) -> NDArray[_ScalarT]: ... +@overload +def column_stack(tup: Sequence[ArrayLike]) -> NDArray[Any]: ... + +# keep in sync with `numpy.ma.extras.dstack` +@overload +def dstack(tup: Sequence[_ArrayLike[_ScalarT]]) -> NDArray[_ScalarT]: ... +@overload +def dstack(tup: Sequence[ArrayLike]) -> NDArray[Any]: ... + +@overload +def array_split( + ary: _ArrayLike[_ScalarT], + indices_or_sections: _ShapeLike, + axis: SupportsIndex = 0, +) -> list[NDArray[_ScalarT]]: ... +@overload +def array_split( + ary: ArrayLike, + indices_or_sections: _ShapeLike, + axis: SupportsIndex = 0, +) -> list[NDArray[Any]]: ... + +@overload +def split( + ary: _ArrayLike[_ScalarT], + indices_or_sections: _ShapeLike, + axis: SupportsIndex = 0, +) -> list[NDArray[_ScalarT]]: ... +@overload +def split( + ary: ArrayLike, + indices_or_sections: _ShapeLike, + axis: SupportsIndex = 0, +) -> list[NDArray[Any]]: ... + +# keep in sync with `numpy.ma.extras.hsplit` +@overload +def hsplit( + ary: _ArrayLike[_ScalarT], + indices_or_sections: _ShapeLike, +) -> list[NDArray[_ScalarT]]: ... +@overload +def hsplit( + ary: ArrayLike, + indices_or_sections: _ShapeLike, +) -> list[NDArray[Any]]: ... + +@overload +def vsplit( + ary: _ArrayLike[_ScalarT], + indices_or_sections: _ShapeLike, +) -> list[NDArray[_ScalarT]]: ... +@overload +def vsplit( + ary: ArrayLike, + indices_or_sections: _ShapeLike, +) -> list[NDArray[Any]]: ... + +@overload +def dsplit( + ary: _ArrayLike[_ScalarT], + indices_or_sections: _ShapeLike, +) -> list[NDArray[_ScalarT]]: ... +@overload +def dsplit( + ary: ArrayLike, + indices_or_sections: _ShapeLike, +) -> list[NDArray[Any]]: ... + +@overload +def get_array_wrap(*args: _SupportsArrayWrap) -> _ArrayWrap: ... +@overload +def get_array_wrap(*args: object) -> _ArrayWrap | None: ... + +@overload +def kron(a: _ArrayLikeBool_co, b: _ArrayLikeBool_co) -> NDArray[np.bool]: ... # type: ignore[misc] +@overload +def kron(a: _ArrayLikeUInt_co, b: _ArrayLikeUInt_co) -> NDArray[unsignedinteger]: ... # type: ignore[misc] +@overload +def kron(a: _ArrayLikeInt_co, b: _ArrayLikeInt_co) -> NDArray[signedinteger]: ... # type: ignore[misc] +@overload +def kron(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co) -> NDArray[floating]: ... # type: ignore[misc] +@overload +def kron(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ... +@overload +def kron(a: _ArrayLikeObject_co, b: Any) -> NDArray[object_]: ... +@overload +def kron(a: Any, b: _ArrayLikeObject_co) -> NDArray[object_]: ... + +@overload +def tile( + A: _ArrayLike[_ScalarT], + reps: int | Sequence[int], +) -> NDArray[_ScalarT]: ... +@overload +def tile( + A: ArrayLike, + reps: int | Sequence[int], +) -> NDArray[Any]: ... diff --git a/local_packages/numpy/lib/_stride_tricks_impl.py b/local_packages/numpy/lib/_stride_tricks_impl.py new file mode 100644 index 0000000..98a79b3 --- /dev/null +++ b/local_packages/numpy/lib/_stride_tricks_impl.py @@ -0,0 +1,582 @@ +""" +Utilities that manipulate strides to achieve desirable effects. + +An explanation of strides can be found in the :ref:`arrays.ndarray`. + +""" +import numpy as np +from numpy._core.numeric import normalize_axis_tuple +from numpy._core.overrides import array_function_dispatch, set_module + +__all__ = ['broadcast_to', 'broadcast_arrays', 'broadcast_shapes'] + + +class DummyArray: + """Dummy object that just exists to hang __array_interface__ dictionaries + and possibly keep alive a reference to a base array. + """ + + def __init__(self, interface, base=None): + self.__array_interface__ = interface + self.base = base + + +def _maybe_view_as_subclass(original_array, new_array): + if type(original_array) is not type(new_array): + # if input was an ndarray subclass and subclasses were OK, + # then view the result as that subclass. + new_array = new_array.view(type=type(original_array)) + # Since we have done something akin to a view from original_array, we + # should let the subclass finalize (if it has it implemented, i.e., is + # not None). + if new_array.__array_finalize__: + new_array.__array_finalize__(original_array) + return new_array + + +@set_module("numpy.lib.stride_tricks") +def as_strided(x, shape=None, strides=None, subok=False, writeable=True): + """ + Create a view into the array with the given shape and strides. + + .. warning:: This function has to be used with extreme care, see notes. + + Parameters + ---------- + x : ndarray + Array to create a new. + shape : sequence of int, optional + The shape of the new array. Defaults to ``x.shape``. + strides : sequence of int, optional + The strides of the new array. Defaults to ``x.strides``. + subok : bool, optional + If True, subclasses are preserved. + writeable : bool, optional + If set to False, the returned array will always be readonly. + Otherwise it will be writable if the original array was. It + is advisable to set this to False if possible (see Notes). + + Returns + ------- + view : ndarray + + See also + -------- + broadcast_to : broadcast an array to a given shape. + reshape : reshape an array. + lib.stride_tricks.sliding_window_view : + userfriendly and safe function for a creation of sliding window views. + + Notes + ----- + ``as_strided`` creates a view into the array given the exact strides + and shape. This means it manipulates the internal data structure of + ndarray and, if done incorrectly, the array elements can point to + invalid memory and can corrupt results or crash your program. + It is advisable to always use the original ``x.strides`` when + calculating new strides to avoid reliance on a contiguous memory + layout. + + Furthermore, arrays created with this function often contain self + overlapping memory, so that two elements are identical. + Vectorized write operations on such arrays will typically be + unpredictable. They may even give different results for small, large, + or transposed arrays. + + Since writing to these arrays has to be tested and done with great + care, you may want to use ``writeable=False`` to avoid accidental write + operations. + + For these reasons it is advisable to avoid ``as_strided`` when + possible. + """ + # first convert input to array, possibly keeping subclass + x = np.array(x, copy=None, subok=subok) + interface = dict(x.__array_interface__) + if shape is not None: + interface['shape'] = tuple(shape) + if strides is not None: + interface['strides'] = tuple(strides) + + array = np.asarray(DummyArray(interface, base=x)) + # The route via `__interface__` does not preserve structured + # dtypes. Since dtype should remain unchanged, we set it explicitly. + array.dtype = x.dtype + + view = _maybe_view_as_subclass(x, array) + + if view.flags.writeable and not writeable: + view.flags.writeable = False + + return view + + +def _sliding_window_view_dispatcher(x, window_shape, axis=None, *, + subok=None, writeable=None): + return (x,) + + +@array_function_dispatch( + _sliding_window_view_dispatcher, module="numpy.lib.stride_tricks" +) +def sliding_window_view(x, window_shape, axis=None, *, + subok=False, writeable=False): + """ + Create a sliding window view into the array with the given window shape. + + Also known as rolling or moving window, the window slides across all + dimensions of the array and extracts subsets of the array at all window + positions. + + .. versionadded:: 1.20.0 + + Parameters + ---------- + x : array_like + Array to create the sliding window view from. + window_shape : int or tuple of int + Size of window over each axis that takes part in the sliding window. + If `axis` is not present, must have same length as the number of input + array dimensions. Single integers `i` are treated as if they were the + tuple `(i,)`. + axis : int or tuple of int, optional + Axis or axes along which the sliding window is applied. + By default, the sliding window is applied to all axes and + `window_shape[i]` will refer to axis `i` of `x`. + If `axis` is given as a `tuple of int`, `window_shape[i]` will refer to + the axis `axis[i]` of `x`. + Single integers `i` are treated as if they were the tuple `(i,)`. + subok : bool, optional + If True, sub-classes will be passed-through, otherwise the returned + array will be forced to be a base-class array (default). + writeable : bool, optional + When true, allow writing to the returned view. The default is false, + as this should be used with caution: the returned view contains the + same memory location multiple times, so writing to one location will + cause others to change. + + Returns + ------- + view : ndarray + Sliding window view of the array. The sliding window dimensions are + inserted at the end, and the original dimensions are trimmed as + required by the size of the sliding window. + That is, ``view.shape = x_shape_trimmed + window_shape``, where + ``x_shape_trimmed`` is ``x.shape`` with every entry reduced by one less + than the corresponding window size. + + See Also + -------- + lib.stride_tricks.as_strided: A lower-level and less safe routine for + creating arbitrary views from custom shape and strides. + broadcast_to: broadcast an array to a given shape. + + Notes + ----- + .. warning:: + + This function creates views with overlapping memory. When + ``writeable=True``, writing to the view will modify the original array + and may affect multiple view positions. See the examples below and + :doc:`this guide ` + about the difference between copies and views. + + For many applications using a sliding window view can be convenient, but + potentially very slow. Often specialized solutions exist, for example: + + - `scipy.signal.fftconvolve` + + - filtering functions in `scipy.ndimage` + + - moving window functions provided by + `bottleneck `_. + + As a rough estimate, a sliding window approach with an input size of `N` + and a window size of `W` will scale as `O(N*W)` where frequently a special + algorithm can achieve `O(N)`. That means that the sliding window variant + for a window size of 100 can be a 100 times slower than a more specialized + version. + + Nevertheless, for small window sizes, when no custom algorithm exists, or + as a prototyping and developing tool, this function can be a good solution. + + Examples + -------- + >>> import numpy as np + >>> from numpy.lib.stride_tricks import sliding_window_view + >>> x = np.arange(6) + >>> x.shape + (6,) + >>> v = sliding_window_view(x, 3) + >>> v.shape + (4, 3) + >>> v + array([[0, 1, 2], + [1, 2, 3], + [2, 3, 4], + [3, 4, 5]]) + + This also works in more dimensions, e.g. + + >>> i, j = np.ogrid[:3, :4] + >>> x = 10*i + j + >>> x.shape + (3, 4) + >>> x + array([[ 0, 1, 2, 3], + [10, 11, 12, 13], + [20, 21, 22, 23]]) + >>> shape = (2,2) + >>> v = sliding_window_view(x, shape) + >>> v.shape + (2, 3, 2, 2) + >>> v + array([[[[ 0, 1], + [10, 11]], + [[ 1, 2], + [11, 12]], + [[ 2, 3], + [12, 13]]], + [[[10, 11], + [20, 21]], + [[11, 12], + [21, 22]], + [[12, 13], + [22, 23]]]]) + + The axis can be specified explicitly: + + >>> v = sliding_window_view(x, 3, 0) + >>> v.shape + (1, 4, 3) + >>> v + array([[[ 0, 10, 20], + [ 1, 11, 21], + [ 2, 12, 22], + [ 3, 13, 23]]]) + + The same axis can be used several times. In that case, every use reduces + the corresponding original dimension: + + >>> v = sliding_window_view(x, (2, 3), (1, 1)) + >>> v.shape + (3, 1, 2, 3) + >>> v + array([[[[ 0, 1, 2], + [ 1, 2, 3]]], + [[[10, 11, 12], + [11, 12, 13]]], + [[[20, 21, 22], + [21, 22, 23]]]]) + + Combining with stepped slicing (`::step`), this can be used to take sliding + views which skip elements: + + >>> x = np.arange(7) + >>> sliding_window_view(x, 5)[:, ::2] + array([[0, 2, 4], + [1, 3, 5], + [2, 4, 6]]) + + or views which move by multiple elements + + >>> x = np.arange(7) + >>> sliding_window_view(x, 3)[::2, :] + array([[0, 1, 2], + [2, 3, 4], + [4, 5, 6]]) + + A common application of `sliding_window_view` is the calculation of running + statistics. The simplest example is the + `moving average `_: + + >>> x = np.arange(6) + >>> x.shape + (6,) + >>> v = sliding_window_view(x, 3) + >>> v.shape + (4, 3) + >>> v + array([[0, 1, 2], + [1, 2, 3], + [2, 3, 4], + [3, 4, 5]]) + >>> moving_average = v.mean(axis=-1) + >>> moving_average + array([1., 2., 3., 4.]) + + The two examples below demonstrate the effect of ``writeable=True``. + + Creating a view with the default ``writeable=False`` and then writing to + it raises an error. + + >>> v = sliding_window_view(x, 3) + >>> v[0,1] = 10 + Traceback (most recent call last): + ... + ValueError: assignment destination is read-only + + Creating a view with ``writeable=True`` and then writing to it changes + the original array and multiple view positions. + + >>> x = np.arange(6) # reset x for the second example + >>> v = sliding_window_view(x, 3, writeable=True) + >>> v[0,1] = 10 + >>> x + array([ 0, 10, 2, 3, 4, 5]) + >>> v + array([[ 0, 10, 2], + [10, 2, 3], + [ 2, 3, 4], + [ 3, 4, 5]]) + + Note that a sliding window approach is often **not** optimal (see Notes). + """ + window_shape = (tuple(window_shape) + if np.iterable(window_shape) + else (window_shape,)) + # first convert input to array, possibly keeping subclass + x = np.array(x, copy=None, subok=subok) + + window_shape_array = np.array(window_shape) + if np.any(window_shape_array < 0): + raise ValueError('`window_shape` cannot contain negative values') + + if axis is None: + axis = tuple(range(x.ndim)) + if len(window_shape) != len(axis): + raise ValueError(f'Since axis is `None`, must provide ' + f'window_shape for all dimensions of `x`; ' + f'got {len(window_shape)} window_shape elements ' + f'and `x.ndim` is {x.ndim}.') + else: + axis = normalize_axis_tuple(axis, x.ndim, allow_duplicate=True) + if len(window_shape) != len(axis): + raise ValueError(f'Must provide matching length window_shape and ' + f'axis; got {len(window_shape)} window_shape ' + f'elements and {len(axis)} axes elements.') + + out_strides = x.strides + tuple(x.strides[ax] for ax in axis) + + # note: same axis can be windowed repeatedly + x_shape_trimmed = list(x.shape) + for ax, dim in zip(axis, window_shape): + if x_shape_trimmed[ax] < dim: + raise ValueError( + 'window shape cannot be larger than input array shape') + x_shape_trimmed[ax] -= dim - 1 + out_shape = tuple(x_shape_trimmed) + window_shape + return as_strided(x, strides=out_strides, shape=out_shape, + subok=subok, writeable=writeable) + + +def _broadcast_to(array, shape, subok, readonly): + shape = tuple(shape) if np.iterable(shape) else (shape,) + array = np.array(array, copy=None, subok=subok) + if not shape and array.shape: + raise ValueError('cannot broadcast a non-scalar to a scalar array') + if any(size < 0 for size in shape): + raise ValueError('all elements of broadcast shape must be non-' + 'negative') + extras = [] + it = np.nditer( + (array,), flags=['multi_index', 'refs_ok', 'zerosize_ok'] + extras, + op_flags=['readonly'], itershape=shape, order='C') + with it: + # never really has writebackifcopy semantics + broadcast = it.itviews[0] + result = _maybe_view_as_subclass(array, broadcast) + # In a future version this will go away + if not readonly and array.flags._writeable_no_warn: + result.flags.writeable = True + result.flags._warn_on_write = True + return result + + +def _broadcast_to_dispatcher(array, shape, subok=None): + return (array,) + + +@array_function_dispatch(_broadcast_to_dispatcher, module='numpy') +def broadcast_to(array, shape, subok=False): + """Broadcast an array to a new shape. + + Parameters + ---------- + array : array_like + The array to broadcast. + shape : tuple or int + The shape of the desired array. A single integer ``i`` is interpreted + as ``(i,)``. + subok : bool, optional + If True, then sub-classes will be passed-through, otherwise + the returned array will be forced to be a base-class array (default). + + Returns + ------- + broadcast : array + A readonly view on the original array with the given shape. It is + typically not contiguous. Furthermore, more than one element of a + broadcasted array may refer to a single memory location. + + Raises + ------ + ValueError + If the array is not compatible with the new shape according to NumPy's + broadcasting rules. + + See Also + -------- + broadcast + broadcast_arrays + broadcast_shapes + + Examples + -------- + >>> import numpy as np + >>> x = np.array([1, 2, 3]) + >>> np.broadcast_to(x, (3, 3)) + array([[1, 2, 3], + [1, 2, 3], + [1, 2, 3]]) + """ + return _broadcast_to(array, shape, subok=subok, readonly=True) + + +def _broadcast_shape(*args): + """Returns the shape of the arrays that would result from broadcasting the + supplied arrays against each other. + """ + # use the old-iterator because np.nditer does not handle size 0 arrays + # consistently + b = np.broadcast(*args[:64]) + # unfortunately, it cannot handle 64 or more arguments directly + for pos in range(64, len(args), 63): + # ironically, np.broadcast does not properly handle np.broadcast + # objects (it treats them as scalars) + # use broadcasting to avoid allocating the full array + b = broadcast_to(0, b.shape) + b = np.broadcast(b, *args[pos:(pos + 63)]) + return b.shape + + +_size0_dtype = np.dtype([]) + + +@set_module('numpy') +def broadcast_shapes(*args): + """ + Broadcast the input shapes into a single shape. + + :ref:`Learn more about broadcasting here `. + + .. versionadded:: 1.20.0 + + Parameters + ---------- + *args : tuples of ints, or ints + The shapes to be broadcast against each other. + + Returns + ------- + tuple + Broadcasted shape. + + Raises + ------ + ValueError + If the shapes are not compatible and cannot be broadcast according + to NumPy's broadcasting rules. + + See Also + -------- + broadcast + broadcast_arrays + broadcast_to + + Examples + -------- + >>> import numpy as np + >>> np.broadcast_shapes((1, 2), (3, 1), (3, 2)) + (3, 2) + + >>> np.broadcast_shapes((6, 7), (5, 6, 1), (7,), (5, 1, 7)) + (5, 6, 7) + """ + arrays = [np.empty(x, dtype=_size0_dtype) for x in args] + return _broadcast_shape(*arrays) + + +def _broadcast_arrays_dispatcher(*args, subok=None): + return args + + +@array_function_dispatch(_broadcast_arrays_dispatcher, module='numpy') +def broadcast_arrays(*args, subok=False): + """ + Broadcast any number of arrays against each other. + + Parameters + ---------- + *args : array_likes + The arrays to broadcast. + + subok : bool, optional + If True, then sub-classes will be passed-through, otherwise + the returned arrays will be forced to be a base-class array (default). + + Returns + ------- + broadcasted : tuple of arrays + These arrays are views on the original arrays. They are typically + not contiguous. Furthermore, more than one element of a + broadcasted array may refer to a single memory location. If you need + to write to the arrays, make copies first. While you can set the + ``writable`` flag True, writing to a single output value may end up + changing more than one location in the output array. + + .. deprecated:: 1.17 + The output is currently marked so that if written to, a deprecation + warning will be emitted. A future version will set the + ``writable`` flag False so writing to it will raise an error. + + See Also + -------- + broadcast + broadcast_to + broadcast_shapes + + Examples + -------- + >>> import numpy as np + >>> x = np.array([[1,2,3]]) + >>> y = np.array([[4],[5]]) + >>> np.broadcast_arrays(x, y) + (array([[1, 2, 3], + [1, 2, 3]]), + array([[4, 4, 4], + [5, 5, 5]])) + + Here is a useful idiom for getting contiguous copies instead of + non-contiguous views. + + >>> [np.array(a) for a in np.broadcast_arrays(x, y)] + [array([[1, 2, 3], + [1, 2, 3]]), + array([[4, 4, 4], + [5, 5, 5]])] + + """ + # nditer is not used here to avoid the limit of 64 arrays. + # Otherwise, something like the following one-liner would suffice: + # return np.nditer(args, flags=['multi_index', 'zerosize_ok'], + # order='C').itviews + + args = [np.array(_m, copy=None, subok=subok) for _m in args] + + shape = _broadcast_shape(*args) + + result = [array if array.shape == shape + else _broadcast_to(array, shape, subok=subok, readonly=False) + for array in args] + return tuple(result) diff --git a/local_packages/numpy/lib/_stride_tricks_impl.pyi b/local_packages/numpy/lib/_stride_tricks_impl.pyi new file mode 100644 index 0000000..008f2d5 --- /dev/null +++ b/local_packages/numpy/lib/_stride_tricks_impl.pyi @@ -0,0 +1,73 @@ +from collections.abc import Iterable +from typing import Any, SupportsIndex, TypeVar, overload + +from numpy import generic +from numpy._typing import ArrayLike, NDArray, _AnyShape, _ArrayLike, _ShapeLike + +__all__ = ["broadcast_to", "broadcast_arrays", "broadcast_shapes"] + +_ScalarT = TypeVar("_ScalarT", bound=generic) + +class DummyArray: + __array_interface__: dict[str, Any] + base: NDArray[Any] | None + def __init__( + self, + interface: dict[str, Any], + base: NDArray[Any] | None = None, + ) -> None: ... + +@overload +def as_strided( + x: _ArrayLike[_ScalarT], + shape: Iterable[int] | None = None, + strides: Iterable[int] | None = None, + subok: bool = False, + writeable: bool = True, +) -> NDArray[_ScalarT]: ... +@overload +def as_strided( + x: ArrayLike, + shape: Iterable[int] | None = None, + strides: Iterable[int] | None = None, + subok: bool = False, + writeable: bool = True, +) -> NDArray[Any]: ... + +@overload +def sliding_window_view( + x: _ArrayLike[_ScalarT], + window_shape: int | Iterable[int], + axis: SupportsIndex | None = None, + *, + subok: bool = False, + writeable: bool = False, +) -> NDArray[_ScalarT]: ... +@overload +def sliding_window_view( + x: ArrayLike, + window_shape: int | Iterable[int], + axis: SupportsIndex | None = None, + *, + subok: bool = False, + writeable: bool = False, +) -> NDArray[Any]: ... + +@overload +def broadcast_to( + array: _ArrayLike[_ScalarT], + shape: int | Iterable[int], + subok: bool = False, +) -> NDArray[_ScalarT]: ... +@overload +def broadcast_to( + array: ArrayLike, + shape: int | Iterable[int], + subok: bool = False, +) -> NDArray[Any]: ... + +def broadcast_shapes(*args: _ShapeLike) -> _AnyShape: ... +def broadcast_arrays(*args: ArrayLike, subok: bool = False) -> tuple[NDArray[Any], ...]: ... + +# used internally by `lib._function_base_impl._parse_input_dimensions` +def _broadcast_shape(*args: ArrayLike) -> _AnyShape: ... diff --git a/local_packages/numpy/lib/_twodim_base_impl.py b/local_packages/numpy/lib/_twodim_base_impl.py new file mode 100644 index 0000000..bad797a --- /dev/null +++ b/local_packages/numpy/lib/_twodim_base_impl.py @@ -0,0 +1,1201 @@ +""" Basic functions for manipulating 2d arrays + +""" +import functools +import operator + +from numpy._core import iinfo, overrides +from numpy._core._multiarray_umath import _array_converter +from numpy._core.numeric import ( + arange, + asanyarray, + asarray, + diagonal, + empty, + greater_equal, + indices, + int8, + int16, + int32, + int64, + intp, + multiply, + nonzero, + ones, + promote_types, + where, + zeros, +) +from numpy._core.overrides import finalize_array_function_like, set_module +from numpy.lib._stride_tricks_impl import broadcast_to + +__all__ = [ + 'diag', 'diagflat', 'eye', 'fliplr', 'flipud', 'tri', 'triu', + 'tril', 'vander', 'histogram2d', 'mask_indices', 'tril_indices', + 'tril_indices_from', 'triu_indices', 'triu_indices_from', ] + + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + + +i1 = iinfo(int8) +i2 = iinfo(int16) +i4 = iinfo(int32) + + +def _min_int(low, high): + """ get small int that fits the range """ + if high <= i1.max and low >= i1.min: + return int8 + if high <= i2.max and low >= i2.min: + return int16 + if high <= i4.max and low >= i4.min: + return int32 + return int64 + + +def _flip_dispatcher(m): + return (m,) + + +@array_function_dispatch(_flip_dispatcher) +def fliplr(m): + """ + Reverse the order of elements along axis 1 (left/right). + + For a 2-D array, this flips the entries in each row in the left/right + direction. Columns are preserved, but appear in a different order than + before. + + Parameters + ---------- + m : array_like + Input array, must be at least 2-D. + + Returns + ------- + f : ndarray + A view of `m` with the columns reversed. Since a view + is returned, this operation is :math:`\\mathcal O(1)`. + + See Also + -------- + flipud : Flip array in the up/down direction. + flip : Flip array in one or more dimensions. + rot90 : Rotate array counterclockwise. + + Notes + ----- + Equivalent to ``m[:,::-1]`` or ``np.flip(m, axis=1)``. + Requires the array to be at least 2-D. + + Examples + -------- + >>> import numpy as np + >>> A = np.diag([1.,2.,3.]) + >>> A + array([[1., 0., 0.], + [0., 2., 0.], + [0., 0., 3.]]) + >>> np.fliplr(A) + array([[0., 0., 1.], + [0., 2., 0.], + [3., 0., 0.]]) + + >>> rng = np.random.default_rng() + >>> A = rng.normal(size=(2,3,5)) + >>> np.all(np.fliplr(A) == A[:,::-1,...]) + True + + """ + m = asanyarray(m) + if m.ndim < 2: + raise ValueError("Input must be >= 2-d.") + return m[:, ::-1] + + +@array_function_dispatch(_flip_dispatcher) +def flipud(m): + """ + Reverse the order of elements along axis 0 (up/down). + + For a 2-D array, this flips the entries in each column in the up/down + direction. Rows are preserved, but appear in a different order than before. + + Parameters + ---------- + m : array_like + Input array. + + Returns + ------- + out : array_like + A view of `m` with the rows reversed. Since a view is + returned, this operation is :math:`\\mathcal O(1)`. + + See Also + -------- + fliplr : Flip array in the left/right direction. + flip : Flip array in one or more dimensions. + rot90 : Rotate array counterclockwise. + + Notes + ----- + Equivalent to ``m[::-1, ...]`` or ``np.flip(m, axis=0)``. + Requires the array to be at least 1-D. + + Examples + -------- + >>> import numpy as np + >>> A = np.diag([1.0, 2, 3]) + >>> A + array([[1., 0., 0.], + [0., 2., 0.], + [0., 0., 3.]]) + >>> np.flipud(A) + array([[0., 0., 3.], + [0., 2., 0.], + [1., 0., 0.]]) + + >>> rng = np.random.default_rng() + >>> A = rng.normal(size=(2,3,5)) + >>> np.all(np.flipud(A) == A[::-1,...]) + True + + >>> np.flipud([1,2]) + array([2, 1]) + + """ + m = asanyarray(m) + if m.ndim < 1: + raise ValueError("Input must be >= 1-d.") + return m[::-1, ...] + + +@finalize_array_function_like +@set_module('numpy') +def eye(N, M=None, k=0, dtype=float, order='C', *, device=None, like=None): + """ + Return a 2-D array with ones on the diagonal and zeros elsewhere. + + Parameters + ---------- + N : int + Number of rows in the output. + M : int, optional + Number of columns in the output. If None, defaults to `N`. + k : int, optional + Index of the diagonal: 0 (the default) refers to the main diagonal, + a positive value refers to an upper diagonal, and a negative value + to a lower diagonal. + dtype : data-type, optional + Data-type of the returned array. + order : {'C', 'F'}, optional + Whether the output should be stored in row-major (C-style) or + column-major (Fortran-style) order in memory. + device : str, optional + The device on which to place the created array. Default: None. + For Array-API interoperability only, so must be ``"cpu"`` if passed. + + .. versionadded:: 2.0.0 + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + I : ndarray of shape (N,M) + An array where all elements are equal to zero, except for the `k`-th + diagonal, whose values are equal to one. + + See Also + -------- + identity : (almost) equivalent function + diag : diagonal 2-D array from a 1-D array specified by the user. + + Examples + -------- + >>> import numpy as np + >>> np.eye(2, dtype=int) + array([[1, 0], + [0, 1]]) + >>> np.eye(3, k=1) + array([[0., 1., 0.], + [0., 0., 1.], + [0., 0., 0.]]) + + """ + if like is not None: + return _eye_with_like( + like, N, M=M, k=k, dtype=dtype, order=order, device=device + ) + if M is None: + M = N + m = zeros((N, M), dtype=dtype, order=order, device=device) + if k >= M: + return m + # Ensure M and k are integers, so we don't get any surprise casting + # results in the expressions `M-k` and `M+1` used below. This avoids + # a problem with inputs with type (for example) np.uint64. + M = operator.index(M) + k = operator.index(k) + if k >= 0: + i = k + else: + i = (-k) * M + m[:M - k].flat[i::M + 1] = 1 + return m + + +_eye_with_like = array_function_dispatch()(eye) + + +def _diag_dispatcher(v, k=None): + return (v,) + + +@array_function_dispatch(_diag_dispatcher) +def diag(v, k=0): + """ + Extract a diagonal or construct a diagonal array. + + See the more detailed documentation for ``numpy.diagonal`` if you use this + function to extract a diagonal and wish to write to the resulting array; + whether it returns a copy or a view depends on what version of numpy you + are using. + + Parameters + ---------- + v : array_like + If `v` is a 2-D array, return a copy of its `k`-th diagonal. + If `v` is a 1-D array, return a 2-D array with `v` on the `k`-th + diagonal. + k : int, optional + Diagonal in question. The default is 0. Use `k>0` for diagonals + above the main diagonal, and `k<0` for diagonals below the main + diagonal. + + Returns + ------- + out : ndarray + The extracted diagonal or constructed diagonal array. + + See Also + -------- + diagonal : Return specified diagonals. + diagflat : Create a 2-D array with the flattened input as a diagonal. + trace : Sum along diagonals. + triu : Upper triangle of an array. + tril : Lower triangle of an array. + + Examples + -------- + >>> import numpy as np + >>> x = np.arange(9).reshape((3,3)) + >>> x + array([[0, 1, 2], + [3, 4, 5], + [6, 7, 8]]) + + >>> np.diag(x) + array([0, 4, 8]) + >>> np.diag(x, k=1) + array([1, 5]) + >>> np.diag(x, k=-1) + array([3, 7]) + + >>> np.diag(np.diag(x)) + array([[0, 0, 0], + [0, 4, 0], + [0, 0, 8]]) + + """ + v = asanyarray(v) + s = v.shape + if len(s) == 1: + n = s[0] + abs(k) + res = zeros((n, n), v.dtype) + if k >= 0: + i = k + else: + i = (-k) * n + res[:n - k].flat[i::n + 1] = v + return res + elif len(s) == 2: + return diagonal(v, k) + else: + raise ValueError("Input must be 1- or 2-d.") + + +@array_function_dispatch(_diag_dispatcher) +def diagflat(v, k=0): + """ + Create a two-dimensional array with the flattened input as a diagonal. + + Parameters + ---------- + v : array_like + Input data, which is flattened and set as the `k`-th + diagonal of the output. + k : int, optional + Diagonal to set; 0, the default, corresponds to the "main" diagonal, + a positive (negative) `k` giving the number of the diagonal above + (below) the main. + + Returns + ------- + out : ndarray + The 2-D output array. + + See Also + -------- + diag : MATLAB work-alike for 1-D and 2-D arrays. + diagonal : Return specified diagonals. + trace : Sum along diagonals. + + Examples + -------- + >>> import numpy as np + >>> np.diagflat([[1,2], [3,4]]) + array([[1, 0, 0, 0], + [0, 2, 0, 0], + [0, 0, 3, 0], + [0, 0, 0, 4]]) + + >>> np.diagflat([1,2], 1) + array([[0, 1, 0], + [0, 0, 2], + [0, 0, 0]]) + + """ + conv = _array_converter(v) + v, = conv.as_arrays(subok=False) + v = v.ravel() + s = len(v) + n = s + abs(k) + res = zeros((n, n), v.dtype) + if (k >= 0): + i = arange(0, n - k, dtype=intp) + fi = i + k + i * n + else: + i = arange(0, n + k, dtype=intp) + fi = i + (i - k) * n + res.flat[fi] = v + + return conv.wrap(res) + + +@finalize_array_function_like +@set_module('numpy') +def tri(N, M=None, k=0, dtype=float, *, like=None): + """ + An array with ones at and below the given diagonal and zeros elsewhere. + + Parameters + ---------- + N : int + Number of rows in the array. + M : int, optional + Number of columns in the array. + By default, `M` is taken equal to `N`. + k : int, optional + The sub-diagonal at and below which the array is filled. + `k` = 0 is the main diagonal, while `k` < 0 is below it, + and `k` > 0 is above. The default is 0. + dtype : dtype, optional + Data type of the returned array. The default is float. + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + tri : ndarray of shape (N, M) + Array with its lower triangle filled with ones and zero elsewhere; + in other words ``T[i,j] == 1`` for ``j <= i + k``, 0 otherwise. + + Examples + -------- + >>> import numpy as np + >>> np.tri(3, 5, 2, dtype=int) + array([[1, 1, 1, 0, 0], + [1, 1, 1, 1, 0], + [1, 1, 1, 1, 1]]) + + >>> np.tri(3, 5, -1) + array([[0., 0., 0., 0., 0.], + [1., 0., 0., 0., 0.], + [1., 1., 0., 0., 0.]]) + + """ + if like is not None: + return _tri_with_like(like, N, M=M, k=k, dtype=dtype) + + if M is None: + M = N + + m = greater_equal.outer(arange(N, dtype=_min_int(0, N)), + arange(-k, M - k, dtype=_min_int(-k, M - k))) + + # Avoid making a copy if the requested type is already bool + m = m.astype(dtype, copy=False) + + return m + + +_tri_with_like = array_function_dispatch()(tri) + + +def _trilu_dispatcher(m, k=None): + return (m,) + + +@array_function_dispatch(_trilu_dispatcher) +def tril(m, k=0): + """ + Lower triangle of an array. + + Return a copy of an array with elements above the `k`-th diagonal zeroed. + For arrays with ``ndim`` exceeding 2, `tril` will apply to the final two + axes. + + Parameters + ---------- + m : array_like, shape (..., M, N) + Input array. + k : int, optional + Diagonal above which to zero elements. `k = 0` (the default) is the + main diagonal, `k < 0` is below it and `k > 0` is above. + + Returns + ------- + tril : ndarray, shape (..., M, N) + Lower triangle of `m`, of same shape and data-type as `m`. + + See Also + -------- + triu : same thing, only for the upper triangle + + Examples + -------- + >>> import numpy as np + >>> np.tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1) + array([[ 0, 0, 0], + [ 4, 0, 0], + [ 7, 8, 0], + [10, 11, 12]]) + + >>> np.tril(np.arange(3*4*5).reshape(3, 4, 5)) + array([[[ 0, 0, 0, 0, 0], + [ 5, 6, 0, 0, 0], + [10, 11, 12, 0, 0], + [15, 16, 17, 18, 0]], + [[20, 0, 0, 0, 0], + [25, 26, 0, 0, 0], + [30, 31, 32, 0, 0], + [35, 36, 37, 38, 0]], + [[40, 0, 0, 0, 0], + [45, 46, 0, 0, 0], + [50, 51, 52, 0, 0], + [55, 56, 57, 58, 0]]]) + + """ + m = asanyarray(m) + mask = tri(*m.shape[-2:], k=k, dtype=bool) + + return where(mask, m, zeros(1, m.dtype)) + + +@array_function_dispatch(_trilu_dispatcher) +def triu(m, k=0): + """ + Upper triangle of an array. + + Return a copy of an array with the elements below the `k`-th diagonal + zeroed. For arrays with ``ndim`` exceeding 2, `triu` will apply to the + final two axes. + + Please refer to the documentation for `tril` for further details. + + See Also + -------- + tril : lower triangle of an array + + Examples + -------- + >>> import numpy as np + >>> np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1) + array([[ 1, 2, 3], + [ 4, 5, 6], + [ 0, 8, 9], + [ 0, 0, 12]]) + + >>> np.triu(np.arange(3*4*5).reshape(3, 4, 5)) + array([[[ 0, 1, 2, 3, 4], + [ 0, 6, 7, 8, 9], + [ 0, 0, 12, 13, 14], + [ 0, 0, 0, 18, 19]], + [[20, 21, 22, 23, 24], + [ 0, 26, 27, 28, 29], + [ 0, 0, 32, 33, 34], + [ 0, 0, 0, 38, 39]], + [[40, 41, 42, 43, 44], + [ 0, 46, 47, 48, 49], + [ 0, 0, 52, 53, 54], + [ 0, 0, 0, 58, 59]]]) + + """ + m = asanyarray(m) + mask = tri(*m.shape[-2:], k=k - 1, dtype=bool) + + return where(mask, zeros(1, m.dtype), m) + + +def _vander_dispatcher(x, N=None, increasing=None): + return (x,) + + +# Originally borrowed from John Hunter and matplotlib +@array_function_dispatch(_vander_dispatcher) +def vander(x, N=None, increasing=False): + """ + Generate a Vandermonde matrix. + + The columns of the output matrix are powers of the input vector. The + order of the powers is determined by the `increasing` boolean argument. + Specifically, when `increasing` is False, the `i`-th output column is + the input vector raised element-wise to the power of ``N - i - 1``. Such + a matrix with a geometric progression in each row is named for Alexandre- + Theophile Vandermonde. + + Parameters + ---------- + x : array_like + 1-D input array. + N : int, optional + Number of columns in the output. If `N` is not specified, a square + array is returned (``N = len(x)``). + increasing : bool, optional + Order of the powers of the columns. If True, the powers increase + from left to right, if False (the default) they are reversed. + + Returns + ------- + out : ndarray + Vandermonde matrix. If `increasing` is False, the first column is + ``x^(N-1)``, the second ``x^(N-2)`` and so forth. If `increasing` is + True, the columns are ``x^0, x^1, ..., x^(N-1)``. + + See Also + -------- + polynomial.polynomial.polyvander + + Examples + -------- + >>> import numpy as np + >>> x = np.array([1, 2, 3, 5]) + >>> N = 3 + >>> np.vander(x, N) + array([[ 1, 1, 1], + [ 4, 2, 1], + [ 9, 3, 1], + [25, 5, 1]]) + + >>> np.column_stack([x**(N-1-i) for i in range(N)]) + array([[ 1, 1, 1], + [ 4, 2, 1], + [ 9, 3, 1], + [25, 5, 1]]) + + >>> x = np.array([1, 2, 3, 5]) + >>> np.vander(x) + array([[ 1, 1, 1, 1], + [ 8, 4, 2, 1], + [ 27, 9, 3, 1], + [125, 25, 5, 1]]) + >>> np.vander(x, increasing=True) + array([[ 1, 1, 1, 1], + [ 1, 2, 4, 8], + [ 1, 3, 9, 27], + [ 1, 5, 25, 125]]) + + The determinant of a square Vandermonde matrix is the product + of the differences between the values of the input vector: + + >>> np.linalg.det(np.vander(x)) + 48.000000000000043 # may vary + >>> (5-3)*(5-2)*(5-1)*(3-2)*(3-1)*(2-1) + 48 + + """ + x = asarray(x) + if x.ndim != 1: + raise ValueError("x must be a one-dimensional array or sequence.") + if N is None: + N = len(x) + + v = empty((len(x), N), dtype=promote_types(x.dtype, int)) + tmp = v[:, ::-1] if not increasing else v + + if N > 0: + tmp[:, 0] = 1 + if N > 1: + tmp[:, 1:] = x[:, None] + multiply.accumulate(tmp[:, 1:], out=tmp[:, 1:], axis=1) + + return v + + +def _histogram2d_dispatcher(x, y, bins=None, range=None, density=None, + weights=None): + yield x + yield y + + # This terrible logic is adapted from the checks in histogram2d + try: + N = len(bins) + except TypeError: + N = 1 + if N == 2: + yield from bins # bins=[x, y] + else: + yield bins + + yield weights + + +@array_function_dispatch(_histogram2d_dispatcher) +def histogram2d(x, y, bins=10, range=None, density=None, weights=None): + """ + Compute the bi-dimensional histogram of two data samples. + + Parameters + ---------- + x : array_like, shape (N,) + An array containing the x coordinates of the points to be + histogrammed. + y : array_like, shape (N,) + An array containing the y coordinates of the points to be + histogrammed. + bins : int or array_like or [int, int] or [array, array], optional + The bin specification: + + * If int, the number of bins for the two dimensions (nx=ny=bins). + * If array_like, the bin edges for the two dimensions + (x_edges=y_edges=bins). + * If [int, int], the number of bins in each dimension + (nx, ny = bins). + * If [array, array], the bin edges in each dimension + (x_edges, y_edges = bins). + * A combination [int, array] or [array, int], where int + is the number of bins and array is the bin edges. + + range : array_like, shape(2,2), optional + The leftmost and rightmost edges of the bins along each dimension + (if not specified explicitly in the `bins` parameters): + ``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range + will be considered outliers and not tallied in the histogram. + density : bool, optional + If False, the default, returns the number of samples in each bin. + If True, returns the probability *density* function at the bin, + ``bin_count / sample_count / bin_area``. + weights : array_like, shape(N,), optional + An array of values ``w_i`` weighing each sample ``(x_i, y_i)``. + Weights are normalized to 1 if `density` is True. If `density` is + False, the values of the returned histogram are equal to the sum of + the weights belonging to the samples falling into each bin. + + Returns + ------- + H : ndarray, shape(nx, ny) + The bi-dimensional histogram of samples `x` and `y`. Values in `x` + are histogrammed along the first dimension and values in `y` are + histogrammed along the second dimension. + xedges : ndarray, shape(nx+1,) + The bin edges along the first dimension. + yedges : ndarray, shape(ny+1,) + The bin edges along the second dimension. + + See Also + -------- + histogram : 1D histogram + histogramdd : Multidimensional histogram + + Notes + ----- + When `density` is True, then the returned histogram is the sample + density, defined such that the sum over bins of the product + ``bin_value * bin_area`` is 1. + + Please note that the histogram does not follow the Cartesian convention + where `x` values are on the abscissa and `y` values on the ordinate + axis. Rather, `x` is histogrammed along the first dimension of the + array (vertical), and `y` along the second dimension of the array + (horizontal). This ensures compatibility with `histogramdd`. + + Examples + -------- + >>> import numpy as np + >>> from matplotlib.image import NonUniformImage + >>> import matplotlib.pyplot as plt + + Construct a 2-D histogram with variable bin width. First define the bin + edges: + + >>> xedges = [0, 1, 3, 5] + >>> yedges = [0, 2, 3, 4, 6] + + Next we create a histogram H with random bin content: + + >>> x = np.random.normal(2, 1, 100) + >>> y = np.random.normal(1, 1, 100) + >>> H, xedges, yedges = np.histogram2d(x, y, bins=(xedges, yedges)) + >>> # Histogram does not follow Cartesian convention (see Notes), + >>> # therefore transpose H for visualization purposes. + >>> H = H.T + + :func:`imshow ` can only display square bins: + + >>> fig = plt.figure(figsize=(7, 3)) + >>> ax = fig.add_subplot(131, title='imshow: square bins') + >>> plt.imshow(H, interpolation='nearest', origin='lower', + ... extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]]) + + + :func:`pcolormesh ` can display actual edges: + + >>> ax = fig.add_subplot(132, title='pcolormesh: actual edges', + ... aspect='equal') + >>> X, Y = np.meshgrid(xedges, yedges) + >>> ax.pcolormesh(X, Y, H) + + + :class:`NonUniformImage ` can be used to + display actual bin edges with interpolation: + + >>> ax = fig.add_subplot(133, title='NonUniformImage: interpolated', + ... aspect='equal', xlim=xedges[[0, -1]], ylim=yedges[[0, -1]]) + >>> im = NonUniformImage(ax, interpolation='bilinear') + >>> xcenters = (xedges[:-1] + xedges[1:]) / 2 + >>> ycenters = (yedges[:-1] + yedges[1:]) / 2 + >>> im.set_data(xcenters, ycenters, H) + >>> ax.add_image(im) + >>> plt.show() + + It is also possible to construct a 2-D histogram without specifying bin + edges: + + >>> # Generate non-symmetric test data + >>> n = 10000 + >>> x = np.linspace(1, 100, n) + >>> y = 2*np.log(x) + np.random.rand(n) - 0.5 + >>> # Compute 2d histogram. Note the order of x/y and xedges/yedges + >>> H, yedges, xedges = np.histogram2d(y, x, bins=20) + + Now we can plot the histogram using + :func:`pcolormesh `, and a + :func:`hexbin ` for comparison. + + >>> # Plot histogram using pcolormesh + >>> fig, (ax1, ax2) = plt.subplots(ncols=2, sharey=True) + >>> ax1.pcolormesh(xedges, yedges, H, cmap='rainbow') + >>> ax1.plot(x, 2*np.log(x), 'k-') + >>> ax1.set_xlim(x.min(), x.max()) + >>> ax1.set_ylim(y.min(), y.max()) + >>> ax1.set_xlabel('x') + >>> ax1.set_ylabel('y') + >>> ax1.set_title('histogram2d') + >>> ax1.grid() + + >>> # Create hexbin plot for comparison + >>> ax2.hexbin(x, y, gridsize=20, cmap='rainbow') + >>> ax2.plot(x, 2*np.log(x), 'k-') + >>> ax2.set_title('hexbin') + >>> ax2.set_xlim(x.min(), x.max()) + >>> ax2.set_xlabel('x') + >>> ax2.grid() + + >>> plt.show() + """ + from numpy import histogramdd + + if len(x) != len(y): + raise ValueError('x and y must have the same length.') + + try: + N = len(bins) + except TypeError: + N = 1 + + if N not in {1, 2}: + xedges = yedges = asarray(bins) + bins = [xedges, yedges] + hist, edges = histogramdd([x, y], bins, range, density, weights) + return hist, edges[0], edges[1] + + +@set_module('numpy') +def mask_indices(n, mask_func, k=0): + """ + Return the indices to access (n, n) arrays, given a masking function. + + Assume `mask_func` is a function that, for a square array a of size + ``(n, n)`` with a possible offset argument `k`, when called as + ``mask_func(a, k)`` returns a new array with zeros in certain locations + (functions like `triu` or `tril` do precisely this). Then this function + returns the indices where the non-zero values would be located. + + Parameters + ---------- + n : int + The returned indices will be valid to access arrays of shape (n, n). + mask_func : callable + A function whose call signature is similar to that of `triu`, `tril`. + That is, ``mask_func(x, k)`` returns a boolean array, shaped like `x`. + `k` is an optional argument to the function. + k : scalar + An optional argument which is passed through to `mask_func`. Functions + like `triu`, `tril` take a second argument that is interpreted as an + offset. + + Returns + ------- + indices : tuple of arrays. + The `n` arrays of indices corresponding to the locations where + ``mask_func(np.ones((n, n)), k)`` is True. + + See Also + -------- + triu, tril, triu_indices, tril_indices + + Examples + -------- + >>> import numpy as np + + These are the indices that would allow you to access the upper triangular + part of any 3x3 array: + + >>> iu = np.mask_indices(3, np.triu) + + For example, if `a` is a 3x3 array: + + >>> a = np.arange(9).reshape(3, 3) + >>> a + array([[0, 1, 2], + [3, 4, 5], + [6, 7, 8]]) + >>> a[iu] + array([0, 1, 2, 4, 5, 8]) + + An offset can be passed also to the masking function. This gets us the + indices starting on the first diagonal right of the main one: + + >>> iu1 = np.mask_indices(3, np.triu, 1) + + with which we now extract only three elements: + + >>> a[iu1] + array([1, 2, 5]) + + """ + m = ones((n, n), int) + a = mask_func(m, k) + return nonzero(a != 0) + + +@set_module('numpy') +def tril_indices(n, k=0, m=None): + """ + Return the indices for the lower-triangle of an (n, m) array. + + Parameters + ---------- + n : int + The row dimension of the arrays for which the returned + indices will be valid. + k : int, optional + Diagonal offset (see `tril` for details). + m : int, optional + The column dimension of the arrays for which the returned + arrays will be valid. + By default `m` is taken equal to `n`. + + + Returns + ------- + inds : tuple of arrays + The row and column indices, respectively. The row indices are sorted + in non-decreasing order, and the corresponding column indices are + strictly increasing for each row. + + See also + -------- + triu_indices : similar function, for upper-triangular. + mask_indices : generic function accepting an arbitrary mask function. + tril, triu + + Examples + -------- + >>> import numpy as np + + Compute two different sets of indices to access 4x4 arrays, one for the + lower triangular part starting at the main diagonal, and one starting two + diagonals further right: + + >>> il1 = np.tril_indices(4) + >>> il1 + (array([0, 1, 1, 2, 2, 2, 3, 3, 3, 3]), array([0, 0, 1, 0, 1, 2, 0, 1, 2, 3])) + + Note that row indices (first array) are non-decreasing, and the corresponding + column indices (second array) are strictly increasing for each row. + Here is how they can be used with a sample array: + + >>> a = np.arange(16).reshape(4, 4) + >>> a + array([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11], + [12, 13, 14, 15]]) + + Both for indexing: + + >>> a[il1] + array([ 0, 4, 5, ..., 13, 14, 15]) + + And for assigning values: + + >>> a[il1] = -1 + >>> a + array([[-1, 1, 2, 3], + [-1, -1, 6, 7], + [-1, -1, -1, 11], + [-1, -1, -1, -1]]) + + These cover almost the whole array (two diagonals right of the main one): + + >>> il2 = np.tril_indices(4, 2) + >>> a[il2] = -10 + >>> a + array([[-10, -10, -10, 3], + [-10, -10, -10, -10], + [-10, -10, -10, -10], + [-10, -10, -10, -10]]) + + """ + tri_ = tri(n, m, k=k, dtype=bool) + + return tuple(broadcast_to(inds, tri_.shape)[tri_] + for inds in indices(tri_.shape, sparse=True)) + + +def _trilu_indices_form_dispatcher(arr, k=None): + return (arr,) + + +@array_function_dispatch(_trilu_indices_form_dispatcher) +def tril_indices_from(arr, k=0): + """ + Return the indices for the lower-triangle of arr. + + See `tril_indices` for full details. + + Parameters + ---------- + arr : array_like + The indices will be valid for square arrays whose dimensions are + the same as arr. + k : int, optional + Diagonal offset (see `tril` for details). + + Examples + -------- + >>> import numpy as np + + Create a 4 by 4 array + + >>> a = np.arange(16).reshape(4, 4) + >>> a + array([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11], + [12, 13, 14, 15]]) + + Pass the array to get the indices of the lower triangular elements. + + >>> trili = np.tril_indices_from(a) + >>> trili + (array([0, 1, 1, 2, 2, 2, 3, 3, 3, 3]), array([0, 0, 1, 0, 1, 2, 0, 1, 2, 3])) + + >>> a[trili] + array([ 0, 4, 5, 8, 9, 10, 12, 13, 14, 15]) + + This is syntactic sugar for tril_indices(). + + >>> np.tril_indices(a.shape[0]) + (array([0, 1, 1, 2, 2, 2, 3, 3, 3, 3]), array([0, 0, 1, 0, 1, 2, 0, 1, 2, 3])) + + Use the `k` parameter to return the indices for the lower triangular array + up to the k-th diagonal. + + >>> trili1 = np.tril_indices_from(a, k=1) + >>> a[trili1] + array([ 0, 1, 4, 5, 6, 8, 9, 10, 11, 12, 13, 14, 15]) + + See Also + -------- + tril_indices, tril, triu_indices_from + """ + if arr.ndim != 2: + raise ValueError("input array must be 2-d") + return tril_indices(arr.shape[-2], k=k, m=arr.shape[-1]) + + +@set_module('numpy') +def triu_indices(n, k=0, m=None): + """ + Return the indices for the upper-triangle of an (n, m) array. + + Parameters + ---------- + n : int + The size of the arrays for which the returned indices will + be valid. + k : int, optional + Diagonal offset (see `triu` for details). + m : int, optional + The column dimension of the arrays for which the returned + arrays will be valid. + By default `m` is taken equal to `n`. + + + Returns + ------- + inds : tuple, shape(2) of ndarrays, shape(`n`) + The row and column indices, respectively. The row indices are sorted + in non-decreasing order, and the corresponding column indices are + strictly increasing for each row. + + See also + -------- + tril_indices : similar function, for lower-triangular. + mask_indices : generic function accepting an arbitrary mask function. + triu, tril + + Examples + -------- + >>> import numpy as np + + Compute two different sets of indices to access 4x4 arrays, one for the + upper triangular part starting at the main diagonal, and one starting two + diagonals further right: + + >>> iu1 = np.triu_indices(4) + >>> iu1 + (array([0, 0, 0, 0, 1, 1, 1, 2, 2, 3]), array([0, 1, 2, 3, 1, 2, 3, 2, 3, 3])) + + Note that row indices (first array) are non-decreasing, and the corresponding + column indices (second array) are strictly increasing for each row. + + Here is how they can be used with a sample array: + + >>> a = np.arange(16).reshape(4, 4) + >>> a + array([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11], + [12, 13, 14, 15]]) + + Both for indexing: + + >>> a[iu1] + array([ 0, 1, 2, ..., 10, 11, 15]) + + And for assigning values: + + >>> a[iu1] = -1 + >>> a + array([[-1, -1, -1, -1], + [ 4, -1, -1, -1], + [ 8, 9, -1, -1], + [12, 13, 14, -1]]) + + These cover only a small part of the whole array (two diagonals right + of the main one): + + >>> iu2 = np.triu_indices(4, 2) + >>> a[iu2] = -10 + >>> a + array([[ -1, -1, -10, -10], + [ 4, -1, -1, -10], + [ 8, 9, -1, -1], + [ 12, 13, 14, -1]]) + + """ + tri_ = ~tri(n, m, k=k - 1, dtype=bool) + + return tuple(broadcast_to(inds, tri_.shape)[tri_] + for inds in indices(tri_.shape, sparse=True)) + + +@array_function_dispatch(_trilu_indices_form_dispatcher) +def triu_indices_from(arr, k=0): + """ + Return the indices for the upper-triangle of arr. + + See `triu_indices` for full details. + + Parameters + ---------- + arr : ndarray, shape(N, N) + The indices will be valid for square arrays. + k : int, optional + Diagonal offset (see `triu` for details). + + Returns + ------- + triu_indices_from : tuple, shape(2) of ndarray, shape(N) + Indices for the upper-triangle of `arr`. + + Examples + -------- + >>> import numpy as np + + Create a 4 by 4 array + + >>> a = np.arange(16).reshape(4, 4) + >>> a + array([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11], + [12, 13, 14, 15]]) + + Pass the array to get the indices of the upper triangular elements. + + >>> triui = np.triu_indices_from(a) + >>> triui + (array([0, 0, 0, 0, 1, 1, 1, 2, 2, 3]), array([0, 1, 2, 3, 1, 2, 3, 2, 3, 3])) + + >>> a[triui] + array([ 0, 1, 2, 3, 5, 6, 7, 10, 11, 15]) + + This is syntactic sugar for triu_indices(). + + >>> np.triu_indices(a.shape[0]) + (array([0, 0, 0, 0, 1, 1, 1, 2, 2, 3]), array([0, 1, 2, 3, 1, 2, 3, 2, 3, 3])) + + Use the `k` parameter to return the indices for the upper triangular array + from the k-th diagonal. + + >>> triuim1 = np.triu_indices_from(a, k=1) + >>> a[triuim1] + array([ 1, 2, 3, 6, 7, 11]) + + + See Also + -------- + triu_indices, triu, tril_indices_from + """ + if arr.ndim != 2: + raise ValueError("input array must be 2-d") + return triu_indices(arr.shape[-2], k=k, m=arr.shape[-1]) diff --git a/local_packages/numpy/lib/_twodim_base_impl.pyi b/local_packages/numpy/lib/_twodim_base_impl.pyi new file mode 100644 index 0000000..864b70e --- /dev/null +++ b/local_packages/numpy/lib/_twodim_base_impl.pyi @@ -0,0 +1,408 @@ +from _typeshed import Incomplete +from collections.abc import Callable, Sequence +from typing import ( + Any, + Literal as L, + Never, + Protocol, + TypeAlias, + TypeVar, + overload, + type_check_only, +) + +import numpy as np +from numpy import _OrderCF +from numpy._typing import ( + ArrayLike, + DTypeLike, + NDArray, + _ArrayLike, + _DTypeLike, + _NumberLike_co, + _ScalarLike_co, + _SupportsArray, + _SupportsArrayFunc, +) + +__all__ = [ + "diag", + "diagflat", + "eye", + "fliplr", + "flipud", + "tri", + "triu", + "tril", + "vander", + "histogram2d", + "mask_indices", + "tril_indices", + "tril_indices_from", + "triu_indices", + "triu_indices_from", +] + +### + +_T = TypeVar("_T") +_ArrayT = TypeVar("_ArrayT", bound=np.ndarray) +_ScalarT = TypeVar("_ScalarT", bound=np.generic) +_ComplexT = TypeVar("_ComplexT", bound=np.complexfloating) +_InexactT = TypeVar("_InexactT", bound=np.inexact) +_NumberT = TypeVar("_NumberT", bound=np.number) +_NumberObjectT = TypeVar("_NumberObjectT", bound=np.number | np.object_) +_NumberCoT = TypeVar("_NumberCoT", bound=_Number_co) + +_Int_co: TypeAlias = np.integer | np.bool +_Float_co: TypeAlias = np.floating | _Int_co +_Number_co: TypeAlias = np.number | np.bool + +_Array1D: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] +_Array2D: TypeAlias = np.ndarray[tuple[int, int], np.dtype[_ScalarT]] +# Workaround for mypy's and pyright's lack of compliance with the typing spec for +# overloads for gradual types. This works because only `Any` and `Never` are assignable +# to `Never`. +_ArrayNoD: TypeAlias = np.ndarray[tuple[Never] | tuple[Never, Never], np.dtype[_ScalarT]] + +_ArrayLike1D: TypeAlias = _SupportsArray[np.dtype[_ScalarT]] | Sequence[_ScalarT] +_ArrayLike1DInt_co: TypeAlias = _SupportsArray[np.dtype[_Int_co]] | Sequence[int | _Int_co] +_ArrayLike1DFloat_co: TypeAlias = _SupportsArray[np.dtype[_Float_co]] | Sequence[float | _Float_co] +_ArrayLike2DFloat_co: TypeAlias = _SupportsArray[np.dtype[_Float_co]] | Sequence[_ArrayLike1DFloat_co] +_ArrayLike1DNumber_co: TypeAlias = _SupportsArray[np.dtype[_Number_co]] | Sequence[complex | _Number_co] + +# The returned arrays dtype must be compatible with `np.equal` +_MaskFunc: TypeAlias = Callable[[NDArray[np.int_], _T], NDArray[_Number_co | np.timedelta64 | np.datetime64 | np.object_]] + +_Indices2D: TypeAlias = tuple[_Array1D[np.intp], _Array1D[np.intp]] +_Histogram2D: TypeAlias = tuple[_Array2D[np.float64], _Array1D[_ScalarT], _Array1D[_ScalarT]] + +@type_check_only +class _HasShapeAndNDim(Protocol): + @property # TODO: require 2d shape once shape-typing has matured + def shape(self) -> tuple[int, ...]: ... + @property + def ndim(self) -> int: ... + +### + +# keep in sync with `flipud` +@overload +def fliplr(m: _ArrayT) -> _ArrayT: ... +@overload +def fliplr(m: _ArrayLike[_ScalarT]) -> NDArray[_ScalarT]: ... +@overload +def fliplr(m: ArrayLike) -> NDArray[Any]: ... + +# keep in sync with `fliplr` +@overload +def flipud(m: _ArrayT) -> _ArrayT: ... +@overload +def flipud(m: _ArrayLike[_ScalarT]) -> NDArray[_ScalarT]: ... +@overload +def flipud(m: ArrayLike) -> NDArray[Any]: ... + +# +@overload +def eye( + N: int, + M: int | None = None, + k: int = 0, + dtype: None = ..., # = float # stubdefaulter: ignore[missing-default] + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array2D[np.float64]: ... +@overload +def eye( + N: int, + M: int | None, + k: int, + dtype: _DTypeLike[_ScalarT], + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array2D[_ScalarT]: ... +@overload +def eye( + N: int, + M: int | None = None, + k: int = 0, + *, + dtype: _DTypeLike[_ScalarT], + order: _OrderCF = "C", + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array2D[_ScalarT]: ... +@overload +def eye( + N: int, + M: int | None = None, + k: int = 0, + dtype: DTypeLike | None = ..., # = float + order: _OrderCF = "C", + *, + device: L["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, +) -> _Array2D[Incomplete]: ... + +# +@overload +def diag(v: _ArrayNoD[_ScalarT] | Sequence[Sequence[_ScalarT]], k: int = 0) -> NDArray[_ScalarT]: ... +@overload +def diag(v: _Array2D[_ScalarT] | Sequence[Sequence[_ScalarT]], k: int = 0) -> _Array1D[_ScalarT]: ... +@overload +def diag(v: _Array1D[_ScalarT] | Sequence[_ScalarT], k: int = 0) -> _Array2D[_ScalarT]: ... +@overload +def diag(v: Sequence[Sequence[_ScalarLike_co]], k: int = 0) -> _Array1D[Incomplete]: ... +@overload +def diag(v: Sequence[_ScalarLike_co], k: int = 0) -> _Array2D[Incomplete]: ... +@overload +def diag(v: _ArrayLike[_ScalarT], k: int = 0) -> NDArray[_ScalarT]: ... +@overload +def diag(v: ArrayLike, k: int = 0) -> NDArray[Incomplete]: ... + +# keep in sync with `numpy.ma.extras.diagflat` +@overload +def diagflat(v: _ArrayLike[_ScalarT], k: int = 0) -> _Array2D[_ScalarT]: ... +@overload +def diagflat(v: ArrayLike, k: int = 0) -> _Array2D[Incomplete]: ... + +# +@overload +def tri( + N: int, + M: int | None = None, + k: int = 0, + dtype: None = ..., # = float # stubdefaulter: ignore[missing-default] + *, + like: _SupportsArrayFunc | None = None +) -> _Array2D[np.float64]: ... +@overload +def tri( + N: int, + M: int | None, + k: int, + dtype: _DTypeLike[_ScalarT], + *, + like: _SupportsArrayFunc | None = None +) -> _Array2D[_ScalarT]: ... +@overload +def tri( + N: int, + M: int | None = None, + k: int = 0, + *, + dtype: _DTypeLike[_ScalarT], + like: _SupportsArrayFunc | None = None +) -> _Array2D[_ScalarT]: ... +@overload +def tri( + N: int, + M: int | None = None, + k: int = 0, + dtype: DTypeLike | None = ..., # = float + *, + like: _SupportsArrayFunc | None = None +) -> _Array2D[Any]: ... + +# keep in sync with `triu` +@overload +def tril(m: _ArrayT, k: int = 0) -> _ArrayT: ... +@overload +def tril(m: _ArrayLike[_ScalarT], k: int = 0) -> NDArray[_ScalarT]: ... +@overload +def tril(m: ArrayLike, k: int = 0) -> NDArray[Any]: ... + +# keep in sync with `tril` +@overload +def triu(m: _ArrayT, k: int = 0) -> _ArrayT: ... +@overload +def triu(m: _ArrayLike[_ScalarT], k: int = 0) -> NDArray[_ScalarT]: ... +@overload +def triu(m: ArrayLike, k: int = 0) -> NDArray[Any]: ... + +# we use `list` (invariant) instead of `Sequence` (covariant) to avoid overlap +@overload +def vander(x: _ArrayLike1D[_NumberObjectT], N: int | None = None, increasing: bool = False) -> _Array2D[_NumberObjectT]: ... +@overload +def vander(x: _ArrayLike1D[np.bool] | list[int], N: int | None = None, increasing: bool = False) -> _Array2D[np.int_]: ... +@overload +def vander(x: list[float], N: int | None = None, increasing: bool = False) -> _Array2D[np.float64]: ... +@overload +def vander(x: list[complex], N: int | None = None, increasing: bool = False) -> _Array2D[np.complex128]: ... +@overload # fallback +def vander(x: Sequence[_NumberLike_co], N: int | None = None, increasing: bool = False) -> _Array2D[Any]: ... + +# +@overload +def histogram2d( + x: _ArrayLike1D[_ComplexT], + y: _ArrayLike1D[_ComplexT | _Float_co], + bins: int | Sequence[int] = 10, + range: _ArrayLike2DFloat_co | None = None, + density: bool | None = None, + weights: _ArrayLike1DFloat_co | None = None, +) -> _Histogram2D[_ComplexT]: ... +@overload +def histogram2d( + x: _ArrayLike1D[_ComplexT | _Float_co], + y: _ArrayLike1D[_ComplexT], + bins: int | Sequence[int] = 10, + range: _ArrayLike2DFloat_co | None = None, + density: bool | None = None, + weights: _ArrayLike1DFloat_co | None = None, +) -> _Histogram2D[_ComplexT]: ... +@overload +def histogram2d( + x: _ArrayLike1D[_InexactT], + y: _ArrayLike1D[_InexactT | _Int_co], + bins: int | Sequence[int] = 10, + range: _ArrayLike2DFloat_co | None = None, + density: bool | None = None, + weights: _ArrayLike1DFloat_co | None = None, +) -> _Histogram2D[_InexactT]: ... +@overload +def histogram2d( + x: _ArrayLike1D[_InexactT | _Int_co], + y: _ArrayLike1D[_InexactT], + bins: int | Sequence[int] = 10, + range: _ArrayLike2DFloat_co | None = None, + density: bool | None = None, + weights: _ArrayLike1DFloat_co | None = None, +) -> _Histogram2D[_InexactT]: ... +@overload +def histogram2d( + x: _ArrayLike1DInt_co | Sequence[float], + y: _ArrayLike1DInt_co | Sequence[float], + bins: int | Sequence[int] = 10, + range: _ArrayLike2DFloat_co | None = None, + density: bool | None = None, + weights: _ArrayLike1DFloat_co | None = None, +) -> _Histogram2D[np.float64]: ... +@overload +def histogram2d( + x: Sequence[complex], + y: Sequence[complex], + bins: int | Sequence[int] = 10, + range: _ArrayLike2DFloat_co | None = None, + density: bool | None = None, + weights: _ArrayLike1DFloat_co | None = None, +) -> _Histogram2D[np.complex128 | Any]: ... +@overload +def histogram2d( + x: _ArrayLike1DNumber_co, + y: _ArrayLike1DNumber_co, + bins: _ArrayLike1D[_NumberCoT] | Sequence[_ArrayLike1D[_NumberCoT]], + range: _ArrayLike2DFloat_co | None = None, + density: bool | None = None, + weights: _ArrayLike1DFloat_co | None = None, +) -> _Histogram2D[_NumberCoT]: ... +@overload +def histogram2d( + x: _ArrayLike1D[_InexactT], + y: _ArrayLike1D[_InexactT], + bins: Sequence[_ArrayLike1D[_NumberCoT] | int], + range: _ArrayLike2DFloat_co | None = None, + density: bool | None = None, + weights: _ArrayLike1DFloat_co | None = None, +) -> _Histogram2D[_InexactT | _NumberCoT]: ... +@overload +def histogram2d( + x: _ArrayLike1D[_InexactT], + y: _ArrayLike1D[_InexactT], + bins: Sequence[_ArrayLike1DNumber_co | int], + range: _ArrayLike2DFloat_co | None = None, + density: bool | None = None, + weights: _ArrayLike1DFloat_co | None = None, +) -> _Histogram2D[_InexactT | Any]: ... +@overload +def histogram2d( + x: _ArrayLike1DInt_co | Sequence[float], + y: _ArrayLike1DInt_co | Sequence[float], + bins: Sequence[_ArrayLike1D[_NumberCoT] | int], + range: _ArrayLike2DFloat_co | None = None, + density: bool | None = None, + weights: _ArrayLike1DFloat_co | None = None, +) -> _Histogram2D[np.float64 | _NumberCoT]: ... +@overload +def histogram2d( + x: _ArrayLike1DInt_co | Sequence[float], + y: _ArrayLike1DInt_co | Sequence[float], + bins: Sequence[_ArrayLike1DNumber_co | int], + range: _ArrayLike2DFloat_co | None = None, + density: bool | None = None, + weights: _ArrayLike1DFloat_co | None = None, +) -> _Histogram2D[np.float64 | Any]: ... +@overload +def histogram2d( + x: Sequence[complex], + y: Sequence[complex], + bins: Sequence[_ArrayLike1D[_NumberCoT] | int], + range: _ArrayLike2DFloat_co | None = None, + density: bool | None = None, + weights: _ArrayLike1DFloat_co | None = None, +) -> _Histogram2D[np.complex128 | _NumberCoT]: ... +@overload +def histogram2d( + x: Sequence[complex], + y: Sequence[complex], + bins: Sequence[_ArrayLike1DNumber_co | int], + range: _ArrayLike2DFloat_co | None = None, + density: bool | None = None, + weights: _ArrayLike1DFloat_co | None = None, +) -> _Histogram2D[np.complex128 | Any]: ... +@overload +def histogram2d( + x: _ArrayLike1DNumber_co, + y: _ArrayLike1DNumber_co, + bins: Sequence[Sequence[int]], + range: _ArrayLike2DFloat_co | None = None, + density: bool | None = None, + weights: _ArrayLike1DFloat_co | None = None, +) -> _Histogram2D[np.int_]: ... +@overload +def histogram2d( + x: _ArrayLike1DNumber_co, + y: _ArrayLike1DNumber_co, + bins: Sequence[Sequence[float]], + range: _ArrayLike2DFloat_co | None = None, + density: bool | None = None, + weights: _ArrayLike1DFloat_co | None = None, +) -> _Histogram2D[np.float64 | Any]: ... +@overload +def histogram2d( + x: _ArrayLike1DNumber_co, + y: _ArrayLike1DNumber_co, + bins: Sequence[Sequence[complex]], + range: _ArrayLike2DFloat_co | None = None, + density: bool | None = None, + weights: _ArrayLike1DFloat_co | None = None, +) -> _Histogram2D[np.complex128 | Any]: ... +@overload +def histogram2d( + x: _ArrayLike1DNumber_co, + y: _ArrayLike1DNumber_co, + bins: Sequence[_ArrayLike1DNumber_co | int] | int, + range: _ArrayLike2DFloat_co | None = None, + density: bool | None = None, + weights: _ArrayLike1DFloat_co | None = None, +) -> _Histogram2D[Any]: ... + +# NOTE: we're assuming/demanding here the `mask_func` returns +# an ndarray of shape `(n, n)`; otherwise there is the possibility +# of the output tuple having more or less than 2 elements +@overload +def mask_indices(n: int, mask_func: _MaskFunc[int], k: int = 0) -> _Indices2D: ... +@overload +def mask_indices(n: int, mask_func: _MaskFunc[_T], k: _T) -> _Indices2D: ... + +# +def tril_indices(n: int, k: int = 0, m: int | None = None) -> _Indices2D: ... +def triu_indices(n: int, k: int = 0, m: int | None = None) -> _Indices2D: ... + +# these will accept anything with `shape: tuple[int, int]` and `ndim: int` attributes +def tril_indices_from(arr: _HasShapeAndNDim, k: int = 0) -> _Indices2D: ... +def triu_indices_from(arr: _HasShapeAndNDim, k: int = 0) -> _Indices2D: ... diff --git a/local_packages/numpy/lib/_type_check_impl.py b/local_packages/numpy/lib/_type_check_impl.py new file mode 100644 index 0000000..584088c --- /dev/null +++ b/local_packages/numpy/lib/_type_check_impl.py @@ -0,0 +1,710 @@ +"""Automatically adapted for numpy Sep 19, 2005 by convertcode.py + +""" +import functools + +__all__ = ['iscomplexobj', 'isrealobj', 'imag', 'iscomplex', + 'isreal', 'nan_to_num', 'real', 'real_if_close', + 'typename', 'mintypecode', + 'common_type'] + +import numpy._core.numeric as _nx +from numpy._core import getlimits, overrides +from numpy._core.numeric import asanyarray, asarray, isnan, zeros +from numpy._utils import set_module + +from ._ufunclike_impl import isneginf, isposinf + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + + +_typecodes_by_elsize = 'GDFgdfQqLlIiHhBb?' + + +@set_module('numpy') +def mintypecode(typechars, typeset='GDFgdf', default='d'): + """ + Return the character for the minimum-size type to which given types can + be safely cast. + + The returned type character must represent the smallest size dtype such + that an array of the returned type can handle the data from an array of + all types in `typechars` (or if `typechars` is an array, then its + dtype.char). + + Parameters + ---------- + typechars : list of str or array_like + If a list of strings, each string should represent a dtype. + If array_like, the character representation of the array dtype is used. + typeset : str or list of str, optional + The set of characters that the returned character is chosen from. + The default set is 'GDFgdf'. + default : str, optional + The default character, this is returned if none of the characters in + `typechars` matches a character in `typeset`. + + Returns + ------- + typechar : str + The character representing the minimum-size type that was found. + + See Also + -------- + dtype + + Examples + -------- + >>> import numpy as np + >>> np.mintypecode(['d', 'f', 'S']) + 'd' + >>> x = np.array([1.1, 2-3.j]) + >>> np.mintypecode(x) + 'D' + + >>> np.mintypecode('abceh', default='G') + 'G' + + """ + typecodes = ((isinstance(t, str) and t) or asarray(t).dtype.char + for t in typechars) + intersection = {t for t in typecodes if t in typeset} + if not intersection: + return default + if 'F' in intersection and 'd' in intersection: + return 'D' + return min(intersection, key=_typecodes_by_elsize.index) + + +def _real_dispatcher(val): + return (val,) + + +@array_function_dispatch(_real_dispatcher) +def real(val): + """ + Return the real part of the complex argument. + + Parameters + ---------- + val : array_like + Input array. + + Returns + ------- + out : ndarray or scalar + The real component of the complex argument. If `val` is real, the type + of `val` is used for the output. If `val` has complex elements, the + returned type is float. + + See Also + -------- + real_if_close, imag, angle + + Examples + -------- + >>> import numpy as np + >>> a = np.array([1+2j, 3+4j, 5+6j]) + >>> a.real + array([1., 3., 5.]) + >>> a.real = 9 + >>> a + array([9.+2.j, 9.+4.j, 9.+6.j]) + >>> a.real = np.array([9, 8, 7]) + >>> a + array([9.+2.j, 8.+4.j, 7.+6.j]) + >>> np.real(1 + 1j) + 1.0 + + """ + try: + return val.real + except AttributeError: + return asanyarray(val).real + + +def _imag_dispatcher(val): + return (val,) + + +@array_function_dispatch(_imag_dispatcher) +def imag(val): + """ + Return the imaginary part of the complex argument. + + Parameters + ---------- + val : array_like + Input array. + + Returns + ------- + out : ndarray or scalar + The imaginary component of the complex argument. If `val` is real, + the type of `val` is used for the output. If `val` has complex + elements, the returned type is float. + + See Also + -------- + real, angle, real_if_close + + Examples + -------- + >>> import numpy as np + >>> a = np.array([1+2j, 3+4j, 5+6j]) + >>> a.imag + array([2., 4., 6.]) + >>> a.imag = np.array([8, 10, 12]) + >>> a + array([1. +8.j, 3.+10.j, 5.+12.j]) + >>> np.imag(1 + 1j) + 1.0 + + """ + try: + return val.imag + except AttributeError: + return asanyarray(val).imag + + +def _is_type_dispatcher(x): + return (x,) + + +@array_function_dispatch(_is_type_dispatcher) +def iscomplex(x): + """ + Returns a bool array, where True if input element is complex. + + What is tested is whether the input has a non-zero imaginary part, not if + the input type is complex. + + Parameters + ---------- + x : array_like + Input array. + + Returns + ------- + out : ndarray of bools + Output array. + + See Also + -------- + isreal + iscomplexobj : Return True if x is a complex type or an array of complex + numbers. + + Examples + -------- + >>> import numpy as np + >>> np.iscomplex([1+1j, 1+0j, 4.5, 3, 2, 2j]) + array([ True, False, False, False, False, True]) + + """ + ax = asanyarray(x) + if issubclass(ax.dtype.type, _nx.complexfloating): + return ax.imag != 0 + res = zeros(ax.shape, bool) + return res[()] # convert to scalar if needed + + +@array_function_dispatch(_is_type_dispatcher) +def isreal(x): + """ + Returns a bool array, where True if input element is real. + + If element has complex type with zero imaginary part, the return value + for that element is True. + + Parameters + ---------- + x : array_like + Input array. + + Returns + ------- + out : ndarray, bool + Boolean array of same shape as `x`. + + Notes + ----- + `isreal` may behave unexpectedly for string or object arrays (see examples) + + See Also + -------- + iscomplex + isrealobj : Return True if x is not a complex type. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([1+1j, 1+0j, 4.5, 3, 2, 2j], dtype=complex) + >>> np.isreal(a) + array([False, True, True, True, True, False]) + + The function does not work on string arrays. + + >>> a = np.array([2j, "a"], dtype="U") + >>> np.isreal(a) # Warns about non-elementwise comparison + False + + Returns True for all elements in input array of ``dtype=object`` even if + any of the elements is complex. + + >>> a = np.array([1, "2", 3+4j], dtype=object) + >>> np.isreal(a) + array([ True, True, True]) + + isreal should not be used with object arrays + + >>> a = np.array([1+2j, 2+1j], dtype=object) + >>> np.isreal(a) + array([ True, True]) + + """ + return imag(x) == 0 + + +@array_function_dispatch(_is_type_dispatcher) +def iscomplexobj(x): + """ + Check for a complex type or an array of complex numbers. + + The type of the input is checked, not the value. Even if the input + has an imaginary part equal to zero, `iscomplexobj` evaluates to True. + + Parameters + ---------- + x : any + The input can be of any type and shape. + + Returns + ------- + iscomplexobj : bool + The return value, True if `x` is of a complex type or has at least + one complex element. + + See Also + -------- + isrealobj, iscomplex + + Examples + -------- + >>> import numpy as np + >>> np.iscomplexobj(1) + False + >>> np.iscomplexobj(1+0j) + True + >>> np.iscomplexobj([3, 1+0j, True]) + True + + """ + try: + dtype = x.dtype + type_ = dtype.type + except AttributeError: + type_ = asarray(x).dtype.type + return issubclass(type_, _nx.complexfloating) + + +@array_function_dispatch(_is_type_dispatcher) +def isrealobj(x): + """ + Return True if x is a not complex type or an array of complex numbers. + + The type of the input is checked, not the value. So even if the input + has an imaginary part equal to zero, `isrealobj` evaluates to False + if the data type is complex. + + Parameters + ---------- + x : any + The input can be of any type and shape. + + Returns + ------- + y : bool + The return value, False if `x` is of a complex type. + + See Also + -------- + iscomplexobj, isreal + + Notes + ----- + The function is only meant for arrays with numerical values but it + accepts all other objects. Since it assumes array input, the return + value of other objects may be True. + + >>> np.isrealobj('A string') + True + >>> np.isrealobj(False) + True + >>> np.isrealobj(None) + True + + Examples + -------- + >>> import numpy as np + >>> np.isrealobj(1) + True + >>> np.isrealobj(1+0j) + False + >>> np.isrealobj([3, 1+0j, True]) + False + + """ + return not iscomplexobj(x) + +#----------------------------------------------------------------------------- + +def _getmaxmin(t): + from numpy._core import getlimits + f = getlimits.finfo(t) + return f.max, f.min + + +def _nan_to_num_dispatcher(x, copy=None, nan=None, posinf=None, neginf=None): + return (x,) + + +@array_function_dispatch(_nan_to_num_dispatcher) +def nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None): + """ + Replace NaN with zero and infinity with large finite numbers (default + behaviour) or with the numbers defined by the user using the `nan`, + `posinf` and/or `neginf` keywords. + + If `x` is inexact, NaN is replaced by zero or by the user defined value in + `nan` keyword, infinity is replaced by the largest finite floating point + values representable by ``x.dtype`` or by the user defined value in + `posinf` keyword and -infinity is replaced by the most negative finite + floating point values representable by ``x.dtype`` or by the user defined + value in `neginf` keyword. + + For complex dtypes, the above is applied to each of the real and + imaginary components of `x` separately. + + If `x` is not inexact, then no replacements are made. + + Parameters + ---------- + x : scalar or array_like + Input data. + copy : bool, optional + Whether to create a copy of `x` (True) or to replace values + in-place (False). The in-place operation only occurs if + casting to an array does not require a copy. + Default is True. + nan : int, float, or bool or array_like of int, float, or bool, optional + Values to be used to fill NaN values. If no values are passed + then NaN values will be replaced with 0.0. + posinf : int, float, or bool or array_like of int, float, or bool, optional + Values to be used to fill positive infinity values. If no values are + passed then positive infinity values will be replaced with a very + large number. + neginf : int, float, or bool or array_like of int, float, or bool, optional + Values to be used to fill negative infinity values. If no values are + passed then negative infinity values will be replaced with a very + small (or negative) number. + + Returns + ------- + out : ndarray + `x`, with the non-finite values replaced. If `copy` is False, this may + be `x` itself. + + See Also + -------- + isinf : Shows which elements are positive or negative infinity. + isneginf : Shows which elements are negative infinity. + isposinf : Shows which elements are positive infinity. + isnan : Shows which elements are Not a Number (NaN). + isfinite : Shows which elements are finite (not NaN, not infinity) + + Notes + ----- + NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic + (IEEE 754). This means that Not a Number is not equivalent to infinity. + + Examples + -------- + >>> import numpy as np + >>> np.nan_to_num(np.inf) + 1.7976931348623157e+308 + >>> np.nan_to_num(-np.inf) + -1.7976931348623157e+308 + >>> np.nan_to_num(np.nan) + 0.0 + >>> x = np.array([np.inf, -np.inf, np.nan, -128, 128]) + >>> np.nan_to_num(x) + array([ 1.79769313e+308, -1.79769313e+308, 0.00000000e+000, # may vary + -1.28000000e+002, 1.28000000e+002]) + >>> np.nan_to_num(x, nan=-9999, posinf=33333333, neginf=33333333) + array([ 3.3333333e+07, 3.3333333e+07, -9.9990000e+03, + -1.2800000e+02, 1.2800000e+02]) + >>> nan = np.array([11, 12, -9999, 13, 14]) + >>> posinf = np.array([33333333, 11, 12, 13, 14]) + >>> neginf = np.array([11, 33333333, 12, 13, 14]) + >>> np.nan_to_num(x, nan=nan, posinf=posinf, neginf=neginf) + array([ 3.3333333e+07, 3.3333333e+07, -9.9990000e+03, -1.2800000e+02, + 1.2800000e+02]) + >>> y = np.array([complex(np.inf, np.nan), np.nan, complex(np.nan, np.inf)]) + array([ 1.79769313e+308, -1.79769313e+308, 0.00000000e+000, # may vary + -1.28000000e+002, 1.28000000e+002]) + >>> np.nan_to_num(y) + array([ 1.79769313e+308 +0.00000000e+000j, # may vary + 0.00000000e+000 +0.00000000e+000j, + 0.00000000e+000 +1.79769313e+308j]) + >>> np.nan_to_num(y, nan=111111, posinf=222222) + array([222222.+111111.j, 111111. +0.j, 111111.+222222.j]) + >>> nan = np.array([11, 12, 13]) + >>> posinf = np.array([21, 22, 23]) + >>> neginf = np.array([31, 32, 33]) + >>> np.nan_to_num(y, nan=nan, posinf=posinf, neginf=neginf) + array([21.+11.j, 12. +0.j, 13.+23.j]) + """ + x = _nx.array(x, subok=True, copy=copy) + xtype = x.dtype.type + + isscalar = (x.ndim == 0) + + if not issubclass(xtype, _nx.inexact): + return x[()] if isscalar else x + + iscomplex = issubclass(xtype, _nx.complexfloating) + + dest = (x.real, x.imag) if iscomplex else (x,) + maxf, minf = _getmaxmin(x.real.dtype) + if posinf is not None: + maxf = posinf + if neginf is not None: + minf = neginf + for d in dest: + idx_nan = isnan(d) + idx_posinf = isposinf(d) + idx_neginf = isneginf(d) + _nx.copyto(d, nan, where=idx_nan) + _nx.copyto(d, maxf, where=idx_posinf) + _nx.copyto(d, minf, where=idx_neginf) + return x[()] if isscalar else x + +#----------------------------------------------------------------------------- + +def _real_if_close_dispatcher(a, tol=None): + return (a,) + + +@array_function_dispatch(_real_if_close_dispatcher) +def real_if_close(a, tol=100): + """ + If input is complex with all imaginary parts close to zero, return + real parts. + + "Close to zero" is defined as `tol` * (machine epsilon of the type for + `a`). + + Parameters + ---------- + a : array_like + Input array. + tol : float + Tolerance in machine epsilons for the complex part of the elements + in the array. If the tolerance is <=1, then the absolute tolerance + is used. + + Returns + ------- + out : ndarray + If `a` is real, the type of `a` is used for the output. If `a` + has complex elements, the returned type is float. + + See Also + -------- + real, imag, angle + + Notes + ----- + Machine epsilon varies from machine to machine and between data types + but Python floats on most platforms have a machine epsilon equal to + 2.2204460492503131e-16. You can use 'np.finfo(float).eps' to print + out the machine epsilon for floats. + + Examples + -------- + >>> import numpy as np + >>> np.finfo(float).eps + 2.2204460492503131e-16 # may vary + + >>> np.real_if_close([2.1 + 4e-14j, 5.2 + 3e-15j], tol=1000) + array([2.1, 5.2]) + >>> np.real_if_close([2.1 + 4e-13j, 5.2 + 3e-15j], tol=1000) + array([2.1+4.e-13j, 5.2 + 3e-15j]) + + """ + a = asanyarray(a) + type_ = a.dtype.type + if not issubclass(type_, _nx.complexfloating): + return a + if tol > 1: + f = getlimits.finfo(type_) + tol = f.eps * tol + if _nx.all(_nx.absolute(a.imag) < tol): + a = a.real + return a + + +#----------------------------------------------------------------------------- + +_namefromtype = {'S1': 'character', + '?': 'bool', + 'b': 'signed char', + 'B': 'unsigned char', + 'h': 'short', + 'H': 'unsigned short', + 'i': 'integer', + 'I': 'unsigned integer', + 'l': 'long integer', + 'L': 'unsigned long integer', + 'q': 'long long integer', + 'Q': 'unsigned long long integer', + 'f': 'single precision', + 'd': 'double precision', + 'g': 'long precision', + 'F': 'complex single precision', + 'D': 'complex double precision', + 'G': 'complex long double precision', + 'S': 'string', + 'U': 'unicode', + 'V': 'void', + 'O': 'object' + } + +@set_module('numpy') +def typename(char): + """ + Return a description for the given data type code. + + Parameters + ---------- + char : str + Data type code. + + Returns + ------- + out : str + Description of the input data type code. + + See Also + -------- + dtype + + Examples + -------- + >>> import numpy as np + >>> typechars = ['S1', '?', 'B', 'D', 'G', 'F', 'I', 'H', 'L', 'O', 'Q', + ... 'S', 'U', 'V', 'b', 'd', 'g', 'f', 'i', 'h', 'l', 'q'] + >>> for typechar in typechars: + ... print(typechar, ' : ', np.typename(typechar)) + ... + S1 : character + ? : bool + B : unsigned char + D : complex double precision + G : complex long double precision + F : complex single precision + I : unsigned integer + H : unsigned short + L : unsigned long integer + O : object + Q : unsigned long long integer + S : string + U : unicode + V : void + b : signed char + d : double precision + g : long precision + f : single precision + i : integer + h : short + l : long integer + q : long long integer + + """ + return _namefromtype[char] + +#----------------------------------------------------------------------------- + + +#determine the "minimum common type" for a group of arrays. +array_type = [[_nx.float16, _nx.float32, _nx.float64, _nx.longdouble], + [None, _nx.complex64, _nx.complex128, _nx.clongdouble]] +array_precision = {_nx.float16: 0, + _nx.float32: 1, + _nx.float64: 2, + _nx.longdouble: 3, + _nx.complex64: 1, + _nx.complex128: 2, + _nx.clongdouble: 3} + + +def _common_type_dispatcher(*arrays): + return arrays + + +@array_function_dispatch(_common_type_dispatcher) +def common_type(*arrays): + """ + Return a scalar type which is common to the input arrays. + + The return type will always be an inexact (i.e. floating point) scalar + type, even if all the arrays are integer arrays. If one of the inputs is + an integer array, the minimum precision type that is returned is a + 64-bit floating point dtype. + + All input arrays except int64 and uint64 can be safely cast to the + returned dtype without loss of information. + + Parameters + ---------- + array1, array2, ... : ndarrays + Input arrays. + + Returns + ------- + out : data type code + Data type code. + + See Also + -------- + dtype, mintypecode + + Examples + -------- + >>> np.common_type(np.arange(2, dtype=np.float32)) + + >>> np.common_type(np.arange(2, dtype=np.float32), np.arange(2)) + + >>> np.common_type(np.arange(4), np.array([45, 6.j]), np.array([45.0])) + + + """ + is_complex = False + precision = 0 + for a in arrays: + t = a.dtype.type + if iscomplexobj(a): + is_complex = True + if issubclass(t, _nx.integer): + p = 2 # array_precision[_nx.double] + else: + p = array_precision.get(t) + if p is None: + raise TypeError("can't get common type for non-numeric array") + precision = max(precision, p) + if is_complex: + return array_type[1][precision] + else: + return array_type[0][precision] diff --git a/local_packages/numpy/lib/_type_check_impl.pyi b/local_packages/numpy/lib/_type_check_impl.pyi new file mode 100644 index 0000000..8b665cd --- /dev/null +++ b/local_packages/numpy/lib/_type_check_impl.pyi @@ -0,0 +1,348 @@ +from _typeshed import Incomplete +from collections.abc import Container, Iterable +from typing import Any, Literal as L, Protocol, TypeAlias, overload, type_check_only +from typing_extensions import TypeVar + +import numpy as np +from numpy._typing import ( + ArrayLike, + NDArray, + _16Bit, + _32Bit, + _64Bit, + _ArrayLike, + _NestedSequence, + _ScalarLike_co, + _SupportsArray, +) + +__all__ = [ + "common_type", + "imag", + "iscomplex", + "iscomplexobj", + "isreal", + "isrealobj", + "mintypecode", + "nan_to_num", + "real", + "real_if_close", + "typename", +] + +_T = TypeVar("_T") +_T_co = TypeVar("_T_co", covariant=True) +_ScalarT = TypeVar("_ScalarT", bound=np.generic) +_ScalarT_co = TypeVar("_ScalarT_co", bound=np.generic, covariant=True) +_RealT = TypeVar("_RealT", bound=np.floating | np.integer | np.bool) + +_FloatMax32: TypeAlias = np.float32 | np.float16 +_ComplexMax128: TypeAlias = np.complex128 | np.complex64 +_RealMax64: TypeAlias = np.float64 | np.float32 | np.float16 | np.integer +_Real: TypeAlias = np.floating | np.integer +_InexactMax32: TypeAlias = np.inexact[_32Bit] | np.float16 +_NumberMax64: TypeAlias = np.number[_64Bit] | np.number[_32Bit] | np.number[_16Bit] | np.integer + +@type_check_only +class _HasReal(Protocol[_T_co]): + @property + def real(self, /) -> _T_co: ... + +@type_check_only +class _HasImag(Protocol[_T_co]): + @property + def imag(self, /) -> _T_co: ... + +@type_check_only +class _HasDType(Protocol[_ScalarT_co]): + @property + def dtype(self, /) -> np.dtype[_ScalarT_co]: ... + +### + +def mintypecode(typechars: Iterable[str | ArrayLike], typeset: str | Container[str] = "GDFgdf", default: str = "d") -> str: ... + +# +@overload +def real(val: _HasReal[_T]) -> _T: ... # type: ignore[overload-overlap] +@overload +def real(val: _ArrayLike[_RealT]) -> NDArray[_RealT]: ... +@overload +def real(val: ArrayLike) -> NDArray[Any]: ... + +# +@overload +def imag(val: _HasImag[_T]) -> _T: ... # type: ignore[overload-overlap] +@overload +def imag(val: _ArrayLike[_RealT]) -> NDArray[_RealT]: ... +@overload +def imag(val: ArrayLike) -> NDArray[Any]: ... + +# +@overload +def iscomplex(x: _ScalarLike_co) -> np.bool: ... +@overload +def iscomplex(x: NDArray[Any] | _NestedSequence[ArrayLike]) -> NDArray[np.bool]: ... +@overload +def iscomplex(x: ArrayLike) -> np.bool | NDArray[np.bool]: ... + +# +@overload +def isreal(x: _ScalarLike_co) -> np.bool: ... +@overload +def isreal(x: NDArray[Any] | _NestedSequence[ArrayLike]) -> NDArray[np.bool]: ... +@overload +def isreal(x: ArrayLike) -> np.bool | NDArray[np.bool]: ... + +# +def iscomplexobj(x: _HasDType[Any] | ArrayLike) -> bool: ... +def isrealobj(x: _HasDType[Any] | ArrayLike) -> bool: ... + +# +@overload +def nan_to_num( + x: _ScalarT, + copy: bool = True, + nan: float = 0.0, + posinf: float | None = None, + neginf: float | None = None, +) -> _ScalarT: ... +@overload +def nan_to_num( + x: NDArray[_ScalarT] | _NestedSequence[_ArrayLike[_ScalarT]], + copy: bool = True, + nan: float = 0.0, + posinf: float | None = None, + neginf: float | None = None, +) -> NDArray[_ScalarT]: ... +@overload +def nan_to_num( + x: _SupportsArray[np.dtype[_ScalarT]], + copy: bool = True, + nan: float = 0.0, + posinf: float | None = None, + neginf: float | None = None, +) -> _ScalarT | NDArray[_ScalarT]: ... +@overload +def nan_to_num( + x: _NestedSequence[ArrayLike], + copy: bool = True, + nan: float = 0.0, + posinf: float | None = None, + neginf: float | None = None, +) -> NDArray[Incomplete]: ... +@overload +def nan_to_num( + x: ArrayLike, + copy: bool = True, + nan: float = 0.0, + posinf: float | None = None, + neginf: float | None = None, +) -> Incomplete: ... + +# NOTE: The [overload-overlap] mypy error is a false positive +@overload +def real_if_close(a: _ArrayLike[np.complex64], tol: float = 100) -> NDArray[np.float32 | np.complex64]: ... # type: ignore[overload-overlap] +@overload +def real_if_close(a: _ArrayLike[np.complex128], tol: float = 100) -> NDArray[np.float64 | np.complex128]: ... +@overload +def real_if_close(a: _ArrayLike[np.clongdouble], tol: float = 100) -> NDArray[np.longdouble | np.clongdouble]: ... +@overload +def real_if_close(a: _ArrayLike[_RealT], tol: float = 100) -> NDArray[_RealT]: ... +@overload +def real_if_close(a: ArrayLike, tol: float = 100) -> NDArray[Any]: ... + +# +@overload +def typename(char: L["S1"]) -> L["character"]: ... +@overload +def typename(char: L["?"]) -> L["bool"]: ... +@overload +def typename(char: L["b"]) -> L["signed char"]: ... +@overload +def typename(char: L["B"]) -> L["unsigned char"]: ... +@overload +def typename(char: L["h"]) -> L["short"]: ... +@overload +def typename(char: L["H"]) -> L["unsigned short"]: ... +@overload +def typename(char: L["i"]) -> L["integer"]: ... +@overload +def typename(char: L["I"]) -> L["unsigned integer"]: ... +@overload +def typename(char: L["l"]) -> L["long integer"]: ... +@overload +def typename(char: L["L"]) -> L["unsigned long integer"]: ... +@overload +def typename(char: L["q"]) -> L["long long integer"]: ... +@overload +def typename(char: L["Q"]) -> L["unsigned long long integer"]: ... +@overload +def typename(char: L["f"]) -> L["single precision"]: ... +@overload +def typename(char: L["d"]) -> L["double precision"]: ... +@overload +def typename(char: L["g"]) -> L["long precision"]: ... +@overload +def typename(char: L["F"]) -> L["complex single precision"]: ... +@overload +def typename(char: L["D"]) -> L["complex double precision"]: ... +@overload +def typename(char: L["G"]) -> L["complex long double precision"]: ... +@overload +def typename(char: L["S"]) -> L["string"]: ... +@overload +def typename(char: L["U"]) -> L["unicode"]: ... +@overload +def typename(char: L["V"]) -> L["void"]: ... +@overload +def typename(char: L["O"]) -> L["object"]: ... + +# NOTE: The [overload-overlap] mypy errors are false positives +@overload +def common_type() -> type[np.float16]: ... +@overload +def common_type(a0: _HasDType[np.float16], /, *ai: _HasDType[np.float16]) -> type[np.float16]: ... # type: ignore[overload-overlap] +@overload +def common_type(a0: _HasDType[np.float32], /, *ai: _HasDType[_FloatMax32]) -> type[np.float32]: ... # type: ignore[overload-overlap] +@overload +def common_type( # type: ignore[overload-overlap] + a0: _HasDType[np.float64 | np.integer], + /, + *ai: _HasDType[_RealMax64], +) -> type[np.float64]: ... +@overload +def common_type( # type: ignore[overload-overlap] + a0: _HasDType[np.longdouble], + /, + *ai: _HasDType[_Real], +) -> type[np.longdouble]: ... +@overload +def common_type( # type: ignore[overload-overlap] + a0: _HasDType[np.complex64], + /, + *ai: _HasDType[_InexactMax32], +) -> type[np.complex64]: ... +@overload +def common_type( # type: ignore[overload-overlap] + a0: _HasDType[np.complex128], + /, + *ai: _HasDType[_NumberMax64], +) -> type[np.complex128]: ... +@overload +def common_type( # type: ignore[overload-overlap] + a0: _HasDType[np.clongdouble], + /, + *ai: _HasDType[np.number], +) -> type[np.clongdouble]: ... +@overload +def common_type( # type: ignore[overload-overlap] + a0: _HasDType[_FloatMax32], + array1: _HasDType[np.float32], + /, + *ai: _HasDType[_FloatMax32], +) -> type[np.float32]: ... +@overload +def common_type( + a0: _HasDType[_RealMax64], + array1: _HasDType[np.float64 | np.integer], + /, + *ai: _HasDType[_RealMax64], +) -> type[np.float64]: ... +@overload +def common_type( + a0: _HasDType[_Real], + array1: _HasDType[np.longdouble], + /, + *ai: _HasDType[_Real], +) -> type[np.longdouble]: ... +@overload +def common_type( # type: ignore[overload-overlap] + a0: _HasDType[_InexactMax32], + array1: _HasDType[np.complex64], + /, + *ai: _HasDType[_InexactMax32], +) -> type[np.complex64]: ... +@overload +def common_type( + a0: _HasDType[np.float64], + array1: _HasDType[_ComplexMax128], + /, + *ai: _HasDType[_NumberMax64], +) -> type[np.complex128]: ... +@overload +def common_type( + a0: _HasDType[_ComplexMax128], + array1: _HasDType[np.float64], + /, + *ai: _HasDType[_NumberMax64], +) -> type[np.complex128]: ... +@overload +def common_type( + a0: _HasDType[_NumberMax64], + array1: _HasDType[np.complex128], + /, + *ai: _HasDType[_NumberMax64], +) -> type[np.complex128]: ... +@overload +def common_type( + a0: _HasDType[_ComplexMax128], + array1: _HasDType[np.complex128 | np.integer], + /, + *ai: _HasDType[_NumberMax64], +) -> type[np.complex128]: ... +@overload +def common_type( + a0: _HasDType[np.complex128 | np.integer], + array1: _HasDType[_ComplexMax128], + /, + *ai: _HasDType[_NumberMax64], +) -> type[np.complex128]: ... +@overload +def common_type( + a0: _HasDType[_Real], + /, + *ai: _HasDType[_Real], +) -> type[np.floating]: ... +@overload +def common_type( + a0: _HasDType[np.number], + array1: _HasDType[np.clongdouble], + /, + *ai: _HasDType[np.number], +) -> type[np.clongdouble]: ... +@overload +def common_type( + a0: _HasDType[np.longdouble], + array1: _HasDType[np.complexfloating], + /, + *ai: _HasDType[np.number], +) -> type[np.clongdouble]: ... +@overload +def common_type( + a0: _HasDType[np.complexfloating], + array1: _HasDType[np.longdouble], + /, + *ai: _HasDType[np.number], +) -> type[np.clongdouble]: ... +@overload +def common_type( + a0: _HasDType[np.complexfloating], + array1: _HasDType[np.number], + /, + *ai: _HasDType[np.number], +) -> type[np.complexfloating]: ... +@overload +def common_type( + a0: _HasDType[np.number], + array1: _HasDType[np.complexfloating], + /, + *ai: _HasDType[np.number], +) -> type[np.complexfloating]: ... +@overload +def common_type( + a0: _HasDType[np.number], + array1: _HasDType[np.number], + /, + *ai: _HasDType[np.number], +) -> type[Any]: ... diff --git a/local_packages/numpy/lib/_ufunclike_impl.py b/local_packages/numpy/lib/_ufunclike_impl.py new file mode 100644 index 0000000..5698406 --- /dev/null +++ b/local_packages/numpy/lib/_ufunclike_impl.py @@ -0,0 +1,199 @@ +""" +Module of functions that are like ufuncs in acting on arrays and optionally +storing results in an output array. + +""" +__all__ = ['fix', 'isneginf', 'isposinf'] + +import numpy._core.numeric as nx +from numpy._core.overrides import array_function_dispatch + + +def _dispatcher(x, out=None): + return (x, out) + + +@array_function_dispatch(_dispatcher, verify=False, module='numpy') +def fix(x, out=None): + """ + Round to nearest integer towards zero. + + Round an array of floats element-wise to nearest integer towards zero. + The rounded values have the same data-type as the input. + + Parameters + ---------- + x : array_like + An array to be rounded + out : ndarray, optional + A location into which the result is stored. If provided, it must have + a shape that the input broadcasts to. If not provided or None, a + freshly-allocated array is returned. + + Returns + ------- + out : ndarray of floats + An array with the same dimensions and data-type as the input. + If second argument is not supplied then a new array is returned + with the rounded values. + + If a second argument is supplied the result is stored there. + The return value ``out`` is then a reference to that array. + + See Also + -------- + rint, trunc, floor, ceil + around : Round to given number of decimals + + Examples + -------- + >>> import numpy as np + >>> np.fix(3.14) + 3.0 + >>> np.fix(3) + 3 + >>> np.fix([2.1, 2.9, -2.1, -2.9]) + array([ 2., 2., -2., -2.]) + + """ + return nx.trunc(x, out=out) + + +@array_function_dispatch(_dispatcher, verify=False, module='numpy') +def isposinf(x, out=None): + """ + Test element-wise for positive infinity, return result as bool array. + + Parameters + ---------- + x : array_like + The input array. + out : array_like, optional + A location into which the result is stored. If provided, it must have a + shape that the input broadcasts to. If not provided or None, a + freshly-allocated boolean array is returned. + + Returns + ------- + out : ndarray + A boolean array with the same dimensions as the input. + If second argument is not supplied then a boolean array is returned + with values True where the corresponding element of the input is + positive infinity and values False where the element of the input is + not positive infinity. + + If a second argument is supplied the result is stored there. If the + type of that array is a numeric type the result is represented as zeros + and ones, if the type is boolean then as False and True. + The return value `out` is then a reference to that array. + + See Also + -------- + isinf, isneginf, isfinite, isnan + + Notes + ----- + NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic + (IEEE 754). + + Errors result if the second argument is also supplied when x is a scalar + input, if first and second arguments have different shapes, or if the + first argument has complex values + + Examples + -------- + >>> import numpy as np + >>> np.isposinf(np.inf) + True + >>> np.isposinf(-np.inf) + False + >>> np.isposinf([-np.inf, 0., np.inf]) + array([False, False, True]) + + >>> x = np.array([-np.inf, 0., np.inf]) + >>> y = np.array([2, 2, 2]) + >>> np.isposinf(x, y) + array([0, 0, 1]) + >>> y + array([0, 0, 1]) + + """ + is_inf = nx.isinf(x) + try: + signbit = ~nx.signbit(x) + except TypeError as e: + dtype = nx.asanyarray(x).dtype + raise TypeError(f'This operation is not supported for {dtype} values ' + 'because it would be ambiguous.') from e + else: + return nx.logical_and(is_inf, signbit, out) + + +@array_function_dispatch(_dispatcher, verify=False, module='numpy') +def isneginf(x, out=None): + """ + Test element-wise for negative infinity, return result as bool array. + + Parameters + ---------- + x : array_like + The input array. + out : array_like, optional + A location into which the result is stored. If provided, it must have a + shape that the input broadcasts to. If not provided or None, a + freshly-allocated boolean array is returned. + + Returns + ------- + out : ndarray + A boolean array with the same dimensions as the input. + If second argument is not supplied then a numpy boolean array is + returned with values True where the corresponding element of the + input is negative infinity and values False where the element of + the input is not negative infinity. + + If a second argument is supplied the result is stored there. If the + type of that array is a numeric type the result is represented as + zeros and ones, if the type is boolean then as False and True. The + return value `out` is then a reference to that array. + + See Also + -------- + isinf, isposinf, isnan, isfinite + + Notes + ----- + NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic + (IEEE 754). + + Errors result if the second argument is also supplied when x is a scalar + input, if first and second arguments have different shapes, or if the + first argument has complex values. + + Examples + -------- + >>> import numpy as np + >>> np.isneginf(-np.inf) + True + >>> np.isneginf(np.inf) + False + >>> np.isneginf([-np.inf, 0., np.inf]) + array([ True, False, False]) + + >>> x = np.array([-np.inf, 0., np.inf]) + >>> y = np.array([2, 2, 2]) + >>> np.isneginf(x, y) + array([1, 0, 0]) + >>> y + array([1, 0, 0]) + + """ + is_inf = nx.isinf(x) + try: + signbit = nx.signbit(x) + except TypeError as e: + dtype = nx.asanyarray(x).dtype + raise TypeError(f'This operation is not supported for {dtype} values ' + 'because it would be ambiguous.') from e + else: + return nx.logical_and(is_inf, signbit, out) diff --git a/local_packages/numpy/lib/_ufunclike_impl.pyi b/local_packages/numpy/lib/_ufunclike_impl.pyi new file mode 100644 index 0000000..0f00767 --- /dev/null +++ b/local_packages/numpy/lib/_ufunclike_impl.pyi @@ -0,0 +1,60 @@ +from typing import Any, TypeVar, overload +from typing_extensions import deprecated + +import numpy as np +from numpy import floating, object_ +from numpy._typing import ( + NDArray, + _ArrayLikeFloat_co, + _ArrayLikeObject_co, + _FloatLike_co, +) + +__all__ = ["fix", "isneginf", "isposinf"] + +_ArrayT = TypeVar("_ArrayT", bound=NDArray[Any]) + +@overload +@deprecated("np.fix will be deprecated in NumPy 2.5 in favor of np.trunc", category=PendingDeprecationWarning) +def fix(x: _FloatLike_co, out: None = None) -> floating: ... +@overload +@deprecated("np.fix will be deprecated in NumPy 2.5 in favor of np.trunc", category=PendingDeprecationWarning) +def fix(x: _ArrayLikeFloat_co, out: None = None) -> NDArray[floating]: ... +@overload +@deprecated("np.fix will be deprecated in NumPy 2.5 in favor of np.trunc", category=PendingDeprecationWarning) +def fix(x: _ArrayLikeObject_co, out: None = None) -> NDArray[object_]: ... +@overload +@deprecated("np.fix will be deprecated in NumPy 2.5 in favor of np.trunc", category=PendingDeprecationWarning) +def fix(x: _ArrayLikeFloat_co | _ArrayLikeObject_co, out: _ArrayT) -> _ArrayT: ... + +@overload +def isposinf( # type: ignore[misc] + x: _FloatLike_co, + out: None = None, +) -> np.bool: ... +@overload +def isposinf( + x: _ArrayLikeFloat_co, + out: None = None, +) -> NDArray[np.bool]: ... +@overload +def isposinf( + x: _ArrayLikeFloat_co, + out: _ArrayT, +) -> _ArrayT: ... + +@overload +def isneginf( # type: ignore[misc] + x: _FloatLike_co, + out: None = None, +) -> np.bool: ... +@overload +def isneginf( + x: _ArrayLikeFloat_co, + out: None = None, +) -> NDArray[np.bool]: ... +@overload +def isneginf( + x: _ArrayLikeFloat_co, + out: _ArrayT, +) -> _ArrayT: ... diff --git a/local_packages/numpy/lib/_user_array_impl.py b/local_packages/numpy/lib/_user_array_impl.py new file mode 100644 index 0000000..2465f5f --- /dev/null +++ b/local_packages/numpy/lib/_user_array_impl.py @@ -0,0 +1,310 @@ +""" +Container class for backward compatibility with NumArray. + +The user_array.container class exists for backward compatibility with NumArray +and is not meant to be used in new code. If you need to create an array +container class, we recommend either creating a class that wraps an ndarray +or subclasses ndarray. + +""" +from numpy._core import ( + absolute, + add, + arange, + array, + asarray, + bitwise_and, + bitwise_or, + bitwise_xor, + divide, + equal, + greater, + greater_equal, + invert, + left_shift, + less, + less_equal, + multiply, + not_equal, + power, + remainder, + reshape, + right_shift, + shape, + sin, + sqrt, + subtract, + transpose, +) +from numpy._core.overrides import set_module + + +@set_module("numpy.lib.user_array") +class container: + """ + container(data, dtype=None, copy=True) + + Standard container-class for easy multiple-inheritance. + + Methods + ------- + copy + byteswap + astype + + """ + def __init_subclass__(cls) -> None: + # Deprecated in NumPy 2.4, 2025-11-24 + import warnings + + warnings.warn( + "The numpy.lib.user_array.container class is deprecated and will be " + "removed in a future version.", + DeprecationWarning, + stacklevel=2, + ) + + def __init__(self, data, dtype=None, copy=True): + self.array = array(data, dtype, copy=copy) + + def __repr__(self): + if self.ndim > 0: + return self.__class__.__name__ + repr(self.array)[len("array"):] + else: + return self.__class__.__name__ + "(" + repr(self.array) + ")" + + def __array__(self, t=None): + if t: + return self.array.astype(t) + return self.array + + # Array as sequence + def __len__(self): + return len(self.array) + + def __getitem__(self, index): + return self._rc(self.array[index]) + + def __setitem__(self, index, value): + self.array[index] = asarray(value, self.dtype) + + def __abs__(self): + return self._rc(absolute(self.array)) + + def __neg__(self): + return self._rc(-self.array) + + def __add__(self, other): + return self._rc(self.array + asarray(other)) + + __radd__ = __add__ + + def __iadd__(self, other): + add(self.array, other, self.array) + return self + + def __sub__(self, other): + return self._rc(self.array - asarray(other)) + + def __rsub__(self, other): + return self._rc(asarray(other) - self.array) + + def __isub__(self, other): + subtract(self.array, other, self.array) + return self + + def __mul__(self, other): + return self._rc(multiply(self.array, asarray(other))) + + __rmul__ = __mul__ + + def __imul__(self, other): + multiply(self.array, other, self.array) + return self + + def __mod__(self, other): + return self._rc(remainder(self.array, other)) + + def __rmod__(self, other): + return self._rc(remainder(other, self.array)) + + def __imod__(self, other): + remainder(self.array, other, self.array) + return self + + def __divmod__(self, other): + return (self._rc(divide(self.array, other)), + self._rc(remainder(self.array, other))) + + def __rdivmod__(self, other): + return (self._rc(divide(other, self.array)), + self._rc(remainder(other, self.array))) + + def __pow__(self, other): + return self._rc(power(self.array, asarray(other))) + + def __rpow__(self, other): + return self._rc(power(asarray(other), self.array)) + + def __ipow__(self, other): + power(self.array, other, self.array) + return self + + def __lshift__(self, other): + return self._rc(left_shift(self.array, other)) + + def __rshift__(self, other): + return self._rc(right_shift(self.array, other)) + + def __rlshift__(self, other): + return self._rc(left_shift(other, self.array)) + + def __rrshift__(self, other): + return self._rc(right_shift(other, self.array)) + + def __ilshift__(self, other): + left_shift(self.array, other, self.array) + return self + + def __irshift__(self, other): + right_shift(self.array, other, self.array) + return self + + def __and__(self, other): + return self._rc(bitwise_and(self.array, other)) + + def __rand__(self, other): + return self._rc(bitwise_and(other, self.array)) + + def __iand__(self, other): + bitwise_and(self.array, other, self.array) + return self + + def __xor__(self, other): + return self._rc(bitwise_xor(self.array, other)) + + def __rxor__(self, other): + return self._rc(bitwise_xor(other, self.array)) + + def __ixor__(self, other): + bitwise_xor(self.array, other, self.array) + return self + + def __or__(self, other): + return self._rc(bitwise_or(self.array, other)) + + def __ror__(self, other): + return self._rc(bitwise_or(other, self.array)) + + def __ior__(self, other): + bitwise_or(self.array, other, self.array) + return self + + def __pos__(self): + return self._rc(self.array) + + def __invert__(self): + return self._rc(invert(self.array)) + + def _scalarfunc(self, func): + if self.ndim == 0: + return func(self[0]) + else: + raise TypeError( + "only rank-0 arrays can be converted to Python scalars.") + + def __complex__(self): + return self._scalarfunc(complex) + + def __float__(self): + return self._scalarfunc(float) + + def __int__(self): + return self._scalarfunc(int) + + def __hex__(self): + return self._scalarfunc(hex) + + def __oct__(self): + return self._scalarfunc(oct) + + def __lt__(self, other): + return self._rc(less(self.array, other)) + + def __le__(self, other): + return self._rc(less_equal(self.array, other)) + + def __eq__(self, other): + return self._rc(equal(self.array, other)) + + def __ne__(self, other): + return self._rc(not_equal(self.array, other)) + + def __gt__(self, other): + return self._rc(greater(self.array, other)) + + def __ge__(self, other): + return self._rc(greater_equal(self.array, other)) + + def copy(self): + "" + return self._rc(self.array.copy()) + + def tobytes(self): + "" + return self.array.tobytes() + + def byteswap(self): + "" + return self._rc(self.array.byteswap()) + + def astype(self, typecode): + "" + return self._rc(self.array.astype(typecode)) + + def _rc(self, a): + if len(shape(a)) == 0: + return a + else: + return self.__class__(a) + + def __array_wrap__(self, *args): + return self.__class__(args[0]) + + def __setattr__(self, attr, value): + if attr == 'array': + object.__setattr__(self, attr, value) + return + try: + self.array.__setattr__(attr, value) + except AttributeError: + object.__setattr__(self, attr, value) + + # Only called after other approaches fail. + def __getattr__(self, attr): + if (attr == 'array'): + return object.__getattribute__(self, attr) + return self.array.__getattribute__(attr) + + +############################################################# +# Test of class container +############################################################# +if __name__ == '__main__': + temp = reshape(arange(10000), (100, 100)) + + ua = container(temp) + # new object created begin test + print(dir(ua)) + print(shape(ua), ua.shape) # I have changed Numeric.py + + ua_small = ua[:3, :5] + print(ua_small) + # this did not change ua[0,0], which is not normal behavior + ua_small[0, 0] = 10 + print(ua_small[0, 0], ua[0, 0]) + print(sin(ua_small) / 3. * 6. + sqrt(ua_small ** 2)) + print(less(ua_small, 103), type(less(ua_small, 103))) + print(type(ua_small * reshape(arange(15), shape(ua_small)))) + print(reshape(ua_small, (5, 3))) + print(transpose(ua_small)) diff --git a/local_packages/numpy/lib/_user_array_impl.pyi b/local_packages/numpy/lib/_user_array_impl.pyi new file mode 100644 index 0000000..7f364b4 --- /dev/null +++ b/local_packages/numpy/lib/_user_array_impl.pyi @@ -0,0 +1,226 @@ +from _typeshed import Incomplete +from types import EllipsisType +from typing import Any, Generic, Self, SupportsIndex, TypeAlias, overload +from typing_extensions import TypeVar, deprecated, override + +import numpy as np +import numpy.typing as npt +from numpy._typing import ( + _AnyShape, + _ArrayLike, + _ArrayLikeBool_co, + _ArrayLikeInt_co, + _DTypeLike, +) + +### + +_ScalarT = TypeVar("_ScalarT", bound=np.generic) +_ShapeT = TypeVar("_ShapeT", bound=tuple[int, ...]) +_ShapeT_co = TypeVar("_ShapeT_co", bound=tuple[int, ...], default=_AnyShape, covariant=True) +_DTypeT = TypeVar("_DTypeT", bound=np.dtype) +_DTypeT_co = TypeVar("_DTypeT_co", bound=np.dtype, default=np.dtype, covariant=True) + +_BoolArrayT = TypeVar("_BoolArrayT", bound=container[Any, np.dtype[np.bool]]) +_IntegralArrayT = TypeVar("_IntegralArrayT", bound=container[Any, np.dtype[np.bool | np.integer | np.object_]]) +_RealContainerT = TypeVar( + "_RealContainerT", + bound=container[Any, np.dtype[np.bool | np.integer | np.floating | np.timedelta64 | np.object_]], +) +_NumericContainerT = TypeVar("_NumericContainerT", bound=container[Any, np.dtype[np.number | np.timedelta64 | np.object_]]) + +_ArrayInt_co: TypeAlias = npt.NDArray[np.integer | np.bool] + +_ToIndexSlice: TypeAlias = slice | EllipsisType | _ArrayInt_co | None +_ToIndexSlices: TypeAlias = _ToIndexSlice | tuple[_ToIndexSlice, ...] +_ToIndex: TypeAlias = SupportsIndex | _ToIndexSlice +_ToIndices: TypeAlias = _ToIndex | tuple[_ToIndex, ...] + +### +# pyright: reportDeprecated = false + +@deprecated("The numpy.lib.user_array.container class is deprecated and will be removed in a future version.") +class container(Generic[_ShapeT_co, _DTypeT_co]): + array: np.ndarray[_ShapeT_co, _DTypeT_co] + + @overload + def __init__( + self, + /, + data: container[_ShapeT_co, _DTypeT_co] | np.ndarray[_ShapeT_co, _DTypeT_co], + dtype: None = None, + copy: bool = True, + ) -> None: ... + @overload + def __init__( + self: container[Any, np.dtype[_ScalarT]], + /, + data: _ArrayLike[_ScalarT], + dtype: None = None, + copy: bool = True, + ) -> None: ... + @overload + def __init__( + self: container[Any, np.dtype[_ScalarT]], + /, + data: npt.ArrayLike, + dtype: _DTypeLike[_ScalarT], + copy: bool = True, + ) -> None: ... + @overload + def __init__(self, /, data: npt.ArrayLike, dtype: npt.DTypeLike | None = None, copy: bool = True) -> None: ... + + # + def __complex__(self, /) -> complex: ... + def __float__(self, /) -> float: ... + def __int__(self, /) -> int: ... + def __hex__(self, /) -> str: ... + def __oct__(self, /) -> str: ... + + # + @override + def __eq__(self, other: object, /) -> container[_ShapeT_co, np.dtype[np.bool]]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + @override + def __ne__(self, other: object, /) -> container[_ShapeT_co, np.dtype[np.bool]]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + + # + def __lt__(self, other: npt.ArrayLike, /) -> container[_ShapeT_co, np.dtype[np.bool]]: ... + def __le__(self, other: npt.ArrayLike, /) -> container[_ShapeT_co, np.dtype[np.bool]]: ... + def __gt__(self, other: npt.ArrayLike, /) -> container[_ShapeT_co, np.dtype[np.bool]]: ... + def __ge__(self, other: npt.ArrayLike, /) -> container[_ShapeT_co, np.dtype[np.bool]]: ... + + # + def __len__(self, /) -> int: ... + + # keep in sync with np.ndarray + @overload + def __getitem__(self, key: _ArrayInt_co | tuple[_ArrayInt_co, ...], /) -> container[_ShapeT_co, _DTypeT_co]: ... + @overload + def __getitem__(self, key: _ToIndexSlices, /) -> container[_AnyShape, _DTypeT_co]: ... + @overload + def __getitem__(self, key: _ToIndices, /) -> Any: ... + @overload + def __getitem__(self: container[Any, np.dtype[np.void]], key: list[str], /) -> container[_ShapeT_co, np.dtype[np.void]]: ... + @overload + def __getitem__(self: container[Any, np.dtype[np.void]], key: str, /) -> container[_ShapeT_co, np.dtype]: ... + + # keep in sync with np.ndarray + @overload + def __setitem__(self, index: _ToIndices, value: object, /) -> None: ... + @overload + def __setitem__(self: container[Any, np.dtype[np.void]], key: str | list[str], value: object, /) -> None: ... + + # keep in sync with np.ndarray + @overload + def __abs__(self: container[_ShapeT, np.dtype[np.complex64]], /) -> container[_ShapeT, np.dtype[np.float32]]: ... # type: ignore[overload-overlap] + @overload + def __abs__(self: container[_ShapeT, np.dtype[np.complex128]], /) -> container[_ShapeT, np.dtype[np.float64]]: ... + @overload + def __abs__(self: container[_ShapeT, np.dtype[np.complex192]], /) -> container[_ShapeT, np.dtype[np.float96]]: ... + @overload + def __abs__(self: container[_ShapeT, np.dtype[np.complex256]], /) -> container[_ShapeT, np.dtype[np.float128]]: ... + @overload + def __abs__(self: _RealContainerT, /) -> _RealContainerT: ... + + # + def __neg__(self: _NumericContainerT, /) -> _NumericContainerT: ... # noqa: PYI019 + def __pos__(self: _NumericContainerT, /) -> _NumericContainerT: ... # noqa: PYI019 + def __invert__(self: _IntegralArrayT, /) -> _IntegralArrayT: ... # noqa: PYI019 + + # TODO(jorenham): complete these binary ops + + # + def __add__(self, other: npt.ArrayLike, /) -> Incomplete: ... + def __radd__(self, other: npt.ArrayLike, /) -> Incomplete: ... + def __iadd__(self, other: npt.ArrayLike, /) -> Self: ... + + # + def __sub__(self, other: npt.ArrayLike, /) -> Incomplete: ... + def __rsub__(self, other: npt.ArrayLike, /) -> Incomplete: ... + def __isub__(self, other: npt.ArrayLike, /) -> Self: ... + + # + def __mul__(self, other: npt.ArrayLike, /) -> Incomplete: ... + def __rmul__(self, other: npt.ArrayLike, /) -> Incomplete: ... + def __imul__(self, other: npt.ArrayLike, /) -> Self: ... + + # + def __mod__(self, other: npt.ArrayLike, /) -> Incomplete: ... + def __rmod__(self, other: npt.ArrayLike, /) -> Incomplete: ... + def __imod__(self, other: npt.ArrayLike, /) -> Self: ... + + # + def __divmod__(self, other: npt.ArrayLike, /) -> tuple[Incomplete, Incomplete]: ... + def __rdivmod__(self, other: npt.ArrayLike, /) -> tuple[Incomplete, Incomplete]: ... + + # + def __pow__(self, other: npt.ArrayLike, /) -> Incomplete: ... + def __rpow__(self, other: npt.ArrayLike, /) -> Incomplete: ... + def __ipow__(self, other: npt.ArrayLike, /) -> Self: ... + + # + def __lshift__(self, other: _ArrayLikeInt_co, /) -> container[_AnyShape, np.dtype[np.integer]]: ... + def __rlshift__(self, other: _ArrayLikeInt_co, /) -> container[_AnyShape, np.dtype[np.integer]]: ... + def __ilshift__(self, other: _ArrayLikeInt_co, /) -> Self: ... + + # + def __rshift__(self, other: _ArrayLikeInt_co, /) -> container[_AnyShape, np.dtype[np.integer]]: ... + def __rrshift__(self, other: _ArrayLikeInt_co, /) -> container[_AnyShape, np.dtype[np.integer]]: ... + def __irshift__(self, other: _ArrayLikeInt_co, /) -> Self: ... + + # + @overload + def __and__( + self: container[Any, np.dtype[np.bool]], other: _ArrayLikeBool_co, / + ) -> container[_AnyShape, np.dtype[np.bool]]: ... + @overload + def __and__(self, other: _ArrayLikeInt_co, /) -> container[_AnyShape, np.dtype[np.bool | np.integer]]: ... + __rand__ = __and__ + @overload + def __iand__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ... + @overload + def __iand__(self, other: _ArrayLikeInt_co, /) -> Self: ... + + # + @overload + def __xor__( + self: container[Any, np.dtype[np.bool]], other: _ArrayLikeBool_co, / + ) -> container[_AnyShape, np.dtype[np.bool]]: ... + @overload + def __xor__(self, other: _ArrayLikeInt_co, /) -> container[_AnyShape, np.dtype[np.bool | np.integer]]: ... + __rxor__ = __xor__ + @overload + def __ixor__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ... + @overload + def __ixor__(self, other: _ArrayLikeInt_co, /) -> Self: ... + + # + @overload + def __or__( + self: container[Any, np.dtype[np.bool]], other: _ArrayLikeBool_co, / + ) -> container[_AnyShape, np.dtype[np.bool]]: ... + @overload + def __or__(self, other: _ArrayLikeInt_co, /) -> container[_AnyShape, np.dtype[np.bool | np.integer]]: ... + __ror__ = __or__ + @overload + def __ior__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ... + @overload + def __ior__(self, other: _ArrayLikeInt_co, /) -> Self: ... + + # + @overload + def __array__(self, /, t: None = None) -> np.ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __array__(self, /, t: _DTypeT) -> np.ndarray[_ShapeT_co, _DTypeT]: ... + + # + @overload + def __array_wrap__(self, arg0: npt.ArrayLike, /) -> container[_ShapeT_co, _DTypeT_co]: ... + @overload + def __array_wrap__(self, a: np.ndarray[_ShapeT, _DTypeT], c: Any = ..., s: Any = ..., /) -> container[_ShapeT, _DTypeT]: ... + + # + def copy(self, /) -> Self: ... + def tobytes(self, /) -> bytes: ... + def byteswap(self, /) -> Self: ... + def astype(self, /, typecode: _DTypeLike[_ScalarT]) -> container[_ShapeT_co, np.dtype[_ScalarT]]: ... diff --git a/local_packages/numpy/lib/_utils_impl.py b/local_packages/numpy/lib/_utils_impl.py new file mode 100644 index 0000000..164aa4e --- /dev/null +++ b/local_packages/numpy/lib/_utils_impl.py @@ -0,0 +1,784 @@ +import functools +import os +import platform +import sys +import textwrap +import types +import warnings + +import numpy as np +from numpy._core import ndarray +from numpy._utils import set_module + +__all__ = [ + 'get_include', 'info', 'show_runtime' +] + + +@set_module('numpy') +def show_runtime(): + """ + Print information about various resources in the system + including available intrinsic support and BLAS/LAPACK library + in use + + .. versionadded:: 1.24.0 + + See Also + -------- + show_config : Show libraries in the system on which NumPy was built. + + Notes + ----- + 1. Information is derived with the help of `threadpoolctl `_ + library if available. + 2. SIMD related information is derived from ``__cpu_features__``, + ``__cpu_baseline__`` and ``__cpu_dispatch__`` + + """ + from pprint import pprint + + from numpy._core._multiarray_umath import ( + __cpu_baseline__, + __cpu_dispatch__, + __cpu_features__, + ) + config_found = [{ + "numpy_version": np.__version__, + "python": sys.version, + "uname": platform.uname(), + }] + features_found, features_not_found = [], [] + for feature in __cpu_dispatch__: + if __cpu_features__[feature]: + features_found.append(feature) + else: + features_not_found.append(feature) + config_found.append({ + "simd_extensions": { + "baseline": __cpu_baseline__, + "found": features_found, + "not_found": features_not_found + } + }) + config_found.append({ + "ignore_floating_point_errors_in_matmul": + not np._core._multiarray_umath._blas_supports_fpe(None), + }) + + try: + from threadpoolctl import threadpool_info + config_found.extend(threadpool_info()) + except ImportError: + print("WARNING: `threadpoolctl` not found in system!" + " Install it by `pip install threadpoolctl`." + " Once installed, try `np.show_runtime` again" + " for more detailed build information") + pprint(config_found) + + +@set_module('numpy') +def get_include(): + """ + Return the directory that contains the NumPy \\*.h header files. + + Extension modules that need to compile against NumPy may need to use this + function to locate the appropriate include directory. + + Notes + ----- + When using ``setuptools``, for example in ``setup.py``:: + + import numpy as np + ... + Extension('extension_name', ... + include_dirs=[np.get_include()]) + ... + + Note that a CLI tool ``numpy-config`` was introduced in NumPy 2.0, using + that is likely preferred for build systems other than ``setuptools``:: + + $ numpy-config --cflags + -I/path/to/site-packages/numpy/_core/include + + # Or rely on pkg-config: + $ export PKG_CONFIG_PATH=$(numpy-config --pkgconfigdir) + $ pkg-config --cflags + -I/path/to/site-packages/numpy/_core/include + + Examples + -------- + >>> np.get_include() + '.../site-packages/numpy/core/include' # may vary + + """ + import numpy + if numpy.show_config is None: + # running from numpy source directory + d = os.path.join(os.path.dirname(numpy.__file__), '_core', 'include') + else: + # using installed numpy core headers + import numpy._core as _core + d = os.path.join(os.path.dirname(_core.__file__), 'include') + return d + + +class _Deprecate: + """ + Decorator class to deprecate old functions. + + Refer to `deprecate` for details. + + See Also + -------- + deprecate + + """ + + def __init__(self, old_name=None, new_name=None, message=None): + self.old_name = old_name + self.new_name = new_name + self.message = message + + def __call__(self, func, *args, **kwargs): + """ + Decorator call. Refer to ``decorate``. + + """ + old_name = self.old_name + new_name = self.new_name + message = self.message + + if old_name is None: + old_name = func.__name__ + if new_name is None: + depdoc = f"`{old_name}` is deprecated!" + else: + depdoc = f"`{old_name}` is deprecated, use `{new_name}` instead!" + + if message is not None: + depdoc += "\n" + message + + @functools.wraps(func) + def newfunc(*args, **kwds): + warnings.warn(depdoc, DeprecationWarning, stacklevel=2) + return func(*args, **kwds) + + newfunc.__name__ = old_name + doc = func.__doc__ + if doc is None: + doc = depdoc + else: + lines = doc.expandtabs().split('\n') + indent = _get_indent(lines[1:]) + if lines[0].lstrip(): + # Indent the original first line to let inspect.cleandoc() + # dedent the docstring despite the deprecation notice. + doc = indent * ' ' + doc + else: + # Remove the same leading blank lines as cleandoc() would. + skip = len(lines[0]) + 1 + for line in lines[1:]: + if len(line) > indent: + break + skip += len(line) + 1 + doc = doc[skip:] + depdoc = textwrap.indent(depdoc, ' ' * indent) + doc = f'{depdoc}\n\n{doc}' + newfunc.__doc__ = doc + + return newfunc + + +def _get_indent(lines): + """ + Determines the leading whitespace that could be removed from all the lines. + """ + indent = sys.maxsize + for line in lines: + content = len(line.lstrip()) + if content: + indent = min(indent, len(line) - content) + if indent == sys.maxsize: + indent = 0 + return indent + + +def deprecate(*args, **kwargs): + """ + Issues a DeprecationWarning, adds warning to `old_name`'s + docstring, rebinds ``old_name.__name__`` and returns the new + function object. + + This function may also be used as a decorator. + + .. deprecated:: 2.0 + Use `~warnings.warn` with :exc:`DeprecationWarning` instead. + + Parameters + ---------- + func : function + The function to be deprecated. + old_name : str, optional + The name of the function to be deprecated. Default is None, in + which case the name of `func` is used. + new_name : str, optional + The new name for the function. Default is None, in which case the + deprecation message is that `old_name` is deprecated. If given, the + deprecation message is that `old_name` is deprecated and `new_name` + should be used instead. + message : str, optional + Additional explanation of the deprecation. Displayed in the + docstring after the warning. + + Returns + ------- + old_func : function + The deprecated function. + + Examples + -------- + Note that ``olduint`` returns a value after printing Deprecation + Warning: + + >>> olduint = np.lib.utils.deprecate(np.uint) + DeprecationWarning: `uint64` is deprecated! # may vary + >>> olduint(6) + 6 + + """ + # Deprecate may be run as a function or as a decorator + # If run as a function, we initialise the decorator class + # and execute its __call__ method. + + # Deprecated in NumPy 2.0, 2023-07-11 + warnings.warn( + "`deprecate` is deprecated, " + "use `warn` with `DeprecationWarning` instead. " + "(deprecated in NumPy 2.0)", + DeprecationWarning, + stacklevel=2 + ) + + if args: + fn = args[0] + args = args[1:] + + return _Deprecate(*args, **kwargs)(fn) + else: + return _Deprecate(*args, **kwargs) + + +def deprecate_with_doc(msg): + """ + Deprecates a function and includes the deprecation in its docstring. + + .. deprecated:: 2.0 + Use `~warnings.warn` with :exc:`DeprecationWarning` instead. + + This function is used as a decorator. It returns an object that can be + used to issue a DeprecationWarning, by passing the to-be decorated + function as argument, this adds warning to the to-be decorated function's + docstring and returns the new function object. + + See Also + -------- + deprecate : Decorate a function such that it issues a + :exc:`DeprecationWarning` + + Parameters + ---------- + msg : str + Additional explanation of the deprecation. Displayed in the + docstring after the warning. + + Returns + ------- + obj : object + + """ + + # Deprecated in NumPy 2.0, 2023-07-11 + warnings.warn( + "`deprecate` is deprecated, " + "use `warn` with `DeprecationWarning` instead. " + "(deprecated in NumPy 2.0)", + DeprecationWarning, + stacklevel=2 + ) + + return _Deprecate(message=msg) + + +#----------------------------------------------------------------------------- + + +# NOTE: pydoc defines a help function which works similarly to this +# except it uses a pager to take over the screen. + +# combine name and arguments and split to multiple lines of width +# characters. End lines on a comma and begin argument list indented with +# the rest of the arguments. +def _split_line(name, arguments, width): + firstwidth = len(name) + k = firstwidth + newstr = name + sepstr = ", " + arglist = arguments.split(sepstr) + for argument in arglist: + if k == firstwidth: + addstr = "" + else: + addstr = sepstr + k = k + len(argument) + len(addstr) + if k > width: + k = firstwidth + 1 + len(argument) + newstr = newstr + ",\n" + " " * (firstwidth + 2) + argument + else: + newstr = newstr + addstr + argument + return newstr + + +_namedict = None +_dictlist = None + +# Traverse all module directories underneath globals +# to see if something is defined +def _makenamedict(module='numpy'): + module = __import__(module, globals(), locals(), []) + thedict = {module.__name__: module.__dict__} + dictlist = [module.__name__] + totraverse = [module.__dict__] + while True: + if len(totraverse) == 0: + break + thisdict = totraverse.pop(0) + for x in thisdict.keys(): + if isinstance(thisdict[x], types.ModuleType): + modname = thisdict[x].__name__ + if modname not in dictlist: + moddict = thisdict[x].__dict__ + dictlist.append(modname) + totraverse.append(moddict) + thedict[modname] = moddict + return thedict, dictlist + + +def _info(obj, output=None): + """Provide information about ndarray obj. + + Parameters + ---------- + obj : ndarray + Must be ndarray, not checked. + output + Where printed output goes. + + Notes + ----- + Copied over from the numarray module prior to its removal. + Adapted somewhat as only numpy is an option now. + + Called by info. + + """ + extra = "" + tic = "" + bp = lambda x: x + cls = getattr(obj, '__class__', type(obj)) + nm = getattr(cls, '__name__', cls) + strides = obj.strides + endian = obj.dtype.byteorder + + if output is None: + output = sys.stdout + + print("class: ", nm, file=output) + print("shape: ", obj.shape, file=output) + print("strides: ", strides, file=output) + print("itemsize: ", obj.itemsize, file=output) + print("aligned: ", bp(obj.flags.aligned), file=output) + print("contiguous: ", bp(obj.flags.contiguous), file=output) + print("fortran: ", obj.flags.fortran, file=output) + print( + f"data pointer: {hex(obj.ctypes._as_parameter_.value)}{extra}", + file=output + ) + print("byteorder: ", end=' ', file=output) + if endian in ['|', '=']: + print(f"{tic}{sys.byteorder}{tic}", file=output) + byteswap = False + elif endian == '>': + print(f"{tic}big{tic}", file=output) + byteswap = sys.byteorder != "big" + else: + print(f"{tic}little{tic}", file=output) + byteswap = sys.byteorder != "little" + print("byteswap: ", bp(byteswap), file=output) + print(f"type: {obj.dtype}", file=output) + + +@set_module('numpy') +def info(object=None, maxwidth=76, output=None, toplevel='numpy'): + """ + Get help information for an array, function, class, or module. + + Parameters + ---------- + object : object or str, optional + Input object or name to get information about. If `object` is + an `ndarray` instance, information about the array is printed. + If `object` is a numpy object, its docstring is given. If it is + a string, available modules are searched for matching objects. + If None, information about `info` itself is returned. + maxwidth : int, optional + Printing width. + output : file like object, optional + File like object that the output is written to, default is + ``None``, in which case ``sys.stdout`` will be used. + The object has to be opened in 'w' or 'a' mode. + toplevel : str, optional + Start search at this level. + + Notes + ----- + When used interactively with an object, ``np.info(obj)`` is equivalent + to ``help(obj)`` on the Python prompt or ``obj?`` on the IPython + prompt. + + Examples + -------- + >>> np.info(np.polyval) # doctest: +SKIP + polyval(p, x) + Evaluate the polynomial p at x. + ... + + When using a string for `object` it is possible to get multiple results. + + >>> np.info('fft') # doctest: +SKIP + *** Found in numpy *** + Core FFT routines + ... + *** Found in numpy.fft *** + fft(a, n=None, axis=-1) + ... + *** Repeat reference found in numpy.fft.fftpack *** + *** Total of 3 references found. *** + + When the argument is an array, information about the array is printed. + + >>> a = np.array([[1 + 2j, 3, -4], [-5j, 6, 0]], dtype=np.complex64) + >>> np.info(a) + class: ndarray + shape: (2, 3) + strides: (24, 8) + itemsize: 8 + aligned: True + contiguous: True + fortran: False + data pointer: 0x562b6e0d2860 # may vary + byteorder: little + byteswap: False + type: complex64 + + """ + global _namedict, _dictlist + # Local import to speed up numpy's import time. + import inspect + import pydoc + + if (hasattr(object, '_ppimport_importer') or + hasattr(object, '_ppimport_module')): + object = object._ppimport_module + elif hasattr(object, '_ppimport_attr'): + object = object._ppimport_attr + + if output is None: + output = sys.stdout + + if object is None: + info(info) + elif isinstance(object, ndarray): + _info(object, output=output) + elif isinstance(object, str): + if _namedict is None: + _namedict, _dictlist = _makenamedict(toplevel) + numfound = 0 + objlist = [] + for namestr in _dictlist: + try: + obj = _namedict[namestr][object] + if id(obj) in objlist: + print(f"\n *** Repeat reference found in {namestr} *** ", + file=output + ) + else: + objlist.append(id(obj)) + print(f" *** Found in {namestr} ***", file=output) + info(obj) + print("-" * maxwidth, file=output) + numfound += 1 + except KeyError: + pass + if numfound == 0: + print(f"Help for {object} not found.", file=output) + else: + print("\n " + "*** Total of %d references found. ***" % numfound, + file=output + ) + + elif inspect.isfunction(object) or inspect.ismethod(object): + name = object.__name__ + try: + arguments = str(inspect.signature(object)) + except Exception: + arguments = "()" + + if len(name + arguments) > maxwidth: + argstr = _split_line(name, arguments, maxwidth) + else: + argstr = name + arguments + + print(" " + argstr + "\n", file=output) + print(inspect.getdoc(object), file=output) + + elif inspect.isclass(object): + name = object.__name__ + try: + arguments = str(inspect.signature(object)) + except Exception: + arguments = "()" + + if len(name + arguments) > maxwidth: + argstr = _split_line(name, arguments, maxwidth) + else: + argstr = name + arguments + + print(" " + argstr + "\n", file=output) + doc1 = inspect.getdoc(object) + if doc1 is None: + if hasattr(object, '__init__'): + print(inspect.getdoc(object.__init__), file=output) + else: + print(inspect.getdoc(object), file=output) + + methods = pydoc.allmethods(object) + + public_methods = [meth for meth in methods if meth[0] != '_'] + if public_methods: + print("\n\nMethods:\n", file=output) + for meth in public_methods: + thisobj = getattr(object, meth, None) + if thisobj is not None: + methstr, other = pydoc.splitdoc( + inspect.getdoc(thisobj) or "None" + ) + print(f" {meth} -- {methstr}", file=output) + + elif hasattr(object, '__doc__'): + print(inspect.getdoc(object), file=output) + + +def safe_eval(source): + """ + Protected string evaluation. + + .. deprecated:: 2.0 + Use `ast.literal_eval` instead. + + Evaluate a string containing a Python literal expression without + allowing the execution of arbitrary non-literal code. + + .. warning:: + + This function is identical to :py:meth:`ast.literal_eval` and + has the same security implications. It may not always be safe + to evaluate large input strings. + + Parameters + ---------- + source : str + The string to evaluate. + + Returns + ------- + obj : object + The result of evaluating `source`. + + Raises + ------ + SyntaxError + If the code has invalid Python syntax, or if it contains + non-literal code. + + Examples + -------- + >>> np.safe_eval('1') + 1 + >>> np.safe_eval('[1, 2, 3]') + [1, 2, 3] + >>> np.safe_eval('{"foo": ("bar", 10.0)}') + {'foo': ('bar', 10.0)} + + >>> np.safe_eval('import os') + Traceback (most recent call last): + ... + SyntaxError: invalid syntax + + >>> np.safe_eval('open("/home/user/.ssh/id_dsa").read()') + Traceback (most recent call last): + ... + ValueError: malformed node or string: <_ast.Call object at 0x...> + + """ + + # Deprecated in NumPy 2.0, 2023-07-11 + warnings.warn( + "`safe_eval` is deprecated. Use `ast.literal_eval` instead. " + "Be aware of security implications, such as memory exhaustion " + "based attacks (deprecated in NumPy 2.0)", + DeprecationWarning, + stacklevel=2 + ) + + # Local import to speed up numpy's import time. + import ast + return ast.literal_eval(source) + + +def _median_nancheck(data, result, axis): + """ + Utility function to check median result from data for NaN values at the end + and return NaN in that case. Input result can also be a MaskedArray. + + Parameters + ---------- + data : array + Sorted input data to median function + result : Array or MaskedArray + Result of median function. + axis : int + Axis along which the median was computed. + + Returns + ------- + result : scalar or ndarray + Median or NaN in axes which contained NaN in the input. If the input + was an array, NaN will be inserted in-place. If a scalar, either the + input itself or a scalar NaN. + """ + if data.size == 0: + return result + potential_nans = data.take(-1, axis=axis) + n = np.isnan(potential_nans) + # masked NaN values are ok, although for masked the copyto may fail for + # unmasked ones (this was always broken) when the result is a scalar. + if np.ma.isMaskedArray(n): + n = n.filled(False) + + if not n.any(): + return result + + # Without given output, it is possible that the current result is a + # numpy scalar, which is not writeable. If so, just return nan. + if isinstance(result, np.generic): + return potential_nans + + # Otherwise copy NaNs (if there are any) + np.copyto(result, potential_nans, where=n) + return result + +def _opt_info(): + """ + Returns a string containing the CPU features supported + by the current build. + + The format of the string can be explained as follows: + - Dispatched features supported by the running machine end with `*`. + - Dispatched features not supported by the running machine + end with `?`. + - Remaining features represent the baseline. + + Returns: + str: A formatted string indicating the supported CPU features. + """ + from numpy._core._multiarray_umath import ( + __cpu_baseline__, + __cpu_dispatch__, + __cpu_features__, + ) + + if len(__cpu_baseline__) == 0 and len(__cpu_dispatch__) == 0: + return '' + + enabled_features = ' '.join(__cpu_baseline__) + for feature in __cpu_dispatch__: + if __cpu_features__[feature]: + enabled_features += f" {feature}*" + else: + enabled_features += f" {feature}?" + + return enabled_features + +def drop_metadata(dtype, /): + """ + Returns the dtype unchanged if it contained no metadata or a copy of the + dtype if it (or any of its structure dtypes) contained metadata. + + This utility is used by `np.save` and `np.savez` to drop metadata before + saving. + + .. note:: + + Due to its limitation this function may move to a more appropriate + home or change in the future and is considered semi-public API only. + + .. warning:: + + This function does not preserve more strange things like record dtypes + and user dtypes may simply return the wrong thing. If you need to be + sure about the latter, check the result with: + ``np.can_cast(new_dtype, dtype, casting="no")``. + + """ + if dtype.fields is not None: + found_metadata = dtype.metadata is not None + + names = [] + formats = [] + offsets = [] + titles = [] + for name, field in dtype.fields.items(): + field_dt = drop_metadata(field[0]) + if field_dt is not field[0]: + found_metadata = True + + names.append(name) + formats.append(field_dt) + offsets.append(field[1]) + titles.append(None if len(field) < 3 else field[2]) + + if not found_metadata: + return dtype + + structure = { + 'names': names, 'formats': formats, 'offsets': offsets, 'titles': titles, + 'itemsize': dtype.itemsize} + + # NOTE: Could pass (dtype.type, structure) to preserve record dtypes... + return np.dtype(structure, align=dtype.isalignedstruct) + elif dtype.subdtype is not None: + # subarray dtype + subdtype, shape = dtype.subdtype + new_subdtype = drop_metadata(subdtype) + if dtype.metadata is None and new_subdtype is subdtype: + return dtype + + return np.dtype((new_subdtype, shape)) + else: + # Normal unstructured dtype + if dtype.metadata is None: + return dtype + # Note that `dt.str` doesn't round-trip e.g. for user-dtypes. + return np.dtype(dtype.str) diff --git a/local_packages/numpy/lib/_utils_impl.pyi b/local_packages/numpy/lib/_utils_impl.pyi new file mode 100644 index 0000000..e73ba65 --- /dev/null +++ b/local_packages/numpy/lib/_utils_impl.pyi @@ -0,0 +1,22 @@ +from _typeshed import SupportsWrite +from typing import LiteralString +from typing_extensions import TypeVar + +import numpy as np + +__all__ = ["get_include", "info", "show_runtime"] + +_ScalarOrArrayT = TypeVar("_ScalarOrArrayT", bound=np.generic | np.ndarray) +_DTypeT = TypeVar("_DTypeT", bound=np.dtype) + +### + +def get_include() -> LiteralString: ... +def show_runtime() -> None: ... +def info( + object: object = None, maxwidth: int = 76, output: SupportsWrite[str] | None = None, toplevel: str = "numpy" +) -> None: ... +def drop_metadata(dtype: _DTypeT, /) -> _DTypeT: ... + +# used internally by `lib._function_base_impl._median` +def _median_nancheck(data: np.ndarray, result: _ScalarOrArrayT, axis: int) -> _ScalarOrArrayT: ... diff --git a/local_packages/numpy/lib/_version.py b/local_packages/numpy/lib/_version.py new file mode 100644 index 0000000..d70a610 --- /dev/null +++ b/local_packages/numpy/lib/_version.py @@ -0,0 +1,153 @@ +"""Utility to compare (NumPy) version strings. + +The NumpyVersion class allows properly comparing numpy version strings. +The LooseVersion and StrictVersion classes that distutils provides don't +work; they don't recognize anything like alpha/beta/rc/dev versions. + +""" +import re + +__all__ = ['NumpyVersion'] + + +class NumpyVersion: + """Parse and compare numpy version strings. + + NumPy has the following versioning scheme (numbers given are examples; they + can be > 9 in principle): + + - Released version: '1.8.0', '1.8.1', etc. + - Alpha: '1.8.0a1', '1.8.0a2', etc. + - Beta: '1.8.0b1', '1.8.0b2', etc. + - Release candidates: '1.8.0rc1', '1.8.0rc2', etc. + - Development versions: '1.8.0.dev-f1234afa' (git commit hash appended) + - Development versions after a1: '1.8.0a1.dev-f1234afa', + '1.8.0b2.dev-f1234afa', '1.8.1rc1.dev-f1234afa', etc. + - Development versions (no git hash available): '1.8.0.dev-Unknown' + + Comparing needs to be done against a valid version string or other + `NumpyVersion` instance. Note that all development versions of the same + (pre-)release compare equal. + + Parameters + ---------- + vstring : str + NumPy version string (``np.__version__``). + + Examples + -------- + >>> from numpy.lib import NumpyVersion + >>> if NumpyVersion(np.__version__) < '1.7.0': + ... print('skip') + >>> # skip + + >>> NumpyVersion('1.7') # raises ValueError, add ".0" + Traceback (most recent call last): + ... + ValueError: Not a valid numpy version string + + """ + + __module__ = "numpy.lib" + + def __init__(self, vstring): + self.vstring = vstring + ver_main = re.match(r'\d+\.\d+\.\d+', vstring) + if not ver_main: + raise ValueError("Not a valid numpy version string") + + self.version = ver_main.group() + self.major, self.minor, self.bugfix = [int(x) for x in + self.version.split('.')] + if len(vstring) == ver_main.end(): + self.pre_release = 'final' + else: + alpha = re.match(r'a\d', vstring[ver_main.end():]) + beta = re.match(r'b\d', vstring[ver_main.end():]) + rc = re.match(r'rc\d', vstring[ver_main.end():]) + pre_rel = [m for m in [alpha, beta, rc] if m is not None] + if pre_rel: + self.pre_release = pre_rel[0].group() + else: + self.pre_release = '' + + self.is_devversion = bool(re.search(r'.dev', vstring)) + + def _compare_version(self, other): + """Compare major.minor.bugfix""" + if self.major == other.major: + if self.minor == other.minor: + if self.bugfix == other.bugfix: + vercmp = 0 + elif self.bugfix > other.bugfix: + vercmp = 1 + else: + vercmp = -1 + elif self.minor > other.minor: + vercmp = 1 + else: + vercmp = -1 + elif self.major > other.major: + vercmp = 1 + else: + vercmp = -1 + + return vercmp + + def _compare_pre_release(self, other): + """Compare alpha/beta/rc/final.""" + if self.pre_release == other.pre_release: + vercmp = 0 + elif self.pre_release == 'final': + vercmp = 1 + elif other.pre_release == 'final': + vercmp = -1 + elif self.pre_release > other.pre_release: + vercmp = 1 + else: + vercmp = -1 + + return vercmp + + def _compare(self, other): + if not isinstance(other, (str, NumpyVersion)): + raise ValueError("Invalid object to compare with NumpyVersion.") + + if isinstance(other, str): + other = NumpyVersion(other) + + vercmp = self._compare_version(other) + if vercmp == 0: + # Same x.y.z version, check for alpha/beta/rc + vercmp = self._compare_pre_release(other) + if vercmp == 0: + # Same version and same pre-release, check if dev version + if self.is_devversion is other.is_devversion: + vercmp = 0 + elif self.is_devversion: + vercmp = -1 + else: + vercmp = 1 + + return vercmp + + def __lt__(self, other): + return self._compare(other) < 0 + + def __le__(self, other): + return self._compare(other) <= 0 + + def __eq__(self, other): + return self._compare(other) == 0 + + def __ne__(self, other): + return self._compare(other) != 0 + + def __gt__(self, other): + return self._compare(other) > 0 + + def __ge__(self, other): + return self._compare(other) >= 0 + + def __repr__(self): + return f"NumpyVersion({self.vstring})" diff --git a/local_packages/numpy/lib/_version.pyi b/local_packages/numpy/lib/_version.pyi new file mode 100644 index 0000000..c53ef79 --- /dev/null +++ b/local_packages/numpy/lib/_version.pyi @@ -0,0 +1,17 @@ +__all__ = ["NumpyVersion"] + +class NumpyVersion: + vstring: str + version: str + major: int + minor: int + bugfix: int + pre_release: str + is_devversion: bool + def __init__(self, vstring: str) -> None: ... + def __lt__(self, other: str | NumpyVersion) -> bool: ... + def __le__(self, other: str | NumpyVersion) -> bool: ... + def __eq__(self, other: str | NumpyVersion) -> bool: ... # type: ignore[override] + def __ne__(self, other: str | NumpyVersion) -> bool: ... # type: ignore[override] + def __gt__(self, other: str | NumpyVersion) -> bool: ... + def __ge__(self, other: str | NumpyVersion) -> bool: ... diff --git a/local_packages/numpy/lib/array_utils.py b/local_packages/numpy/lib/array_utils.py new file mode 100644 index 0000000..c267eb0 --- /dev/null +++ b/local_packages/numpy/lib/array_utils.py @@ -0,0 +1,7 @@ +from ._array_utils_impl import ( # noqa: F401 + __all__, + __doc__, + byte_bounds, + normalize_axis_index, + normalize_axis_tuple, +) diff --git a/local_packages/numpy/lib/array_utils.pyi b/local_packages/numpy/lib/array_utils.pyi new file mode 100644 index 0000000..4b9ebe3 --- /dev/null +++ b/local_packages/numpy/lib/array_utils.pyi @@ -0,0 +1,6 @@ +from ._array_utils_impl import ( + __all__ as __all__, + byte_bounds as byte_bounds, + normalize_axis_index as normalize_axis_index, + normalize_axis_tuple as normalize_axis_tuple, +) diff --git a/local_packages/numpy/lib/format.py b/local_packages/numpy/lib/format.py new file mode 100644 index 0000000..8e0c799 --- /dev/null +++ b/local_packages/numpy/lib/format.py @@ -0,0 +1,24 @@ +from ._format_impl import ( # noqa: F401 + ARRAY_ALIGN, + BUFFER_SIZE, + EXPECTED_KEYS, + GROWTH_AXIS_MAX_DIGITS, + MAGIC_LEN, + MAGIC_PREFIX, + __all__, + __doc__, + descr_to_dtype, + drop_metadata, + dtype_to_descr, + header_data_from_array_1_0, + isfileobj, + magic, + open_memmap, + read_array, + read_array_header_1_0, + read_array_header_2_0, + read_magic, + write_array, + write_array_header_1_0, + write_array_header_2_0, +) diff --git a/local_packages/numpy/lib/format.pyi b/local_packages/numpy/lib/format.pyi new file mode 100644 index 0000000..c29e18f --- /dev/null +++ b/local_packages/numpy/lib/format.pyi @@ -0,0 +1,24 @@ +from ._format_impl import ( + ARRAY_ALIGN as ARRAY_ALIGN, + BUFFER_SIZE as BUFFER_SIZE, + EXPECTED_KEYS as EXPECTED_KEYS, + GROWTH_AXIS_MAX_DIGITS as GROWTH_AXIS_MAX_DIGITS, + MAGIC_LEN as MAGIC_LEN, + MAGIC_PREFIX as MAGIC_PREFIX, + __all__ as __all__, + __doc__ as __doc__, + descr_to_dtype as descr_to_dtype, + drop_metadata as drop_metadata, + dtype_to_descr as dtype_to_descr, + header_data_from_array_1_0 as header_data_from_array_1_0, + isfileobj as isfileobj, + magic as magic, + open_memmap as open_memmap, + read_array as read_array, + read_array_header_1_0 as read_array_header_1_0, + read_array_header_2_0 as read_array_header_2_0, + read_magic as read_magic, + write_array as write_array, + write_array_header_1_0 as write_array_header_1_0, + write_array_header_2_0 as write_array_header_2_0, +) diff --git a/local_packages/numpy/lib/introspect.py b/local_packages/numpy/lib/introspect.py new file mode 100644 index 0000000..816c79a --- /dev/null +++ b/local_packages/numpy/lib/introspect.py @@ -0,0 +1,94 @@ +""" +Introspection helper functions. +""" + +__all__ = ['opt_func_info'] + + +def opt_func_info(func_name=None, signature=None): + """ + Returns a dictionary containing the currently supported CPU dispatched + features for all optimized functions. + + Parameters + ---------- + func_name : str (optional) + Regular expression to filter by function name. + + signature : str (optional) + Regular expression to filter by data type. + + Returns + ------- + dict + A dictionary where keys are optimized function names and values are + nested dictionaries indicating supported targets based on data types. + + Examples + -------- + Retrieve dispatch information for functions named 'add' or 'sub' and + data types 'float64' or 'float32': + + >>> import numpy as np + >>> dict = np.lib.introspect.opt_func_info( + ... func_name="add|abs", signature="float64|complex64" + ... ) + >>> import json + >>> print(json.dumps(dict, indent=2)) # may vary (architecture) + { + "absolute": { + "dd": { + "current": "SSE41", + "available": "SSE41 baseline(SSE SSE2 SSE3)" + }, + "Ff": { + "current": "FMA3__AVX2", + "available": "AVX512F FMA3__AVX2 baseline(SSE SSE2 SSE3)" + }, + "Dd": { + "current": "FMA3__AVX2", + "available": "AVX512F FMA3__AVX2 baseline(SSE SSE2 SSE3)" + } + }, + "add": { + "ddd": { + "current": "FMA3__AVX2", + "available": "FMA3__AVX2 baseline(SSE SSE2 SSE3)" + }, + "FFF": { + "current": "FMA3__AVX2", + "available": "FMA3__AVX2 baseline(SSE SSE2 SSE3)" + } + } + } + + """ + import re + + from numpy._core._multiarray_umath import __cpu_targets_info__ as targets, dtype + + if func_name is not None: + func_pattern = re.compile(func_name) + matching_funcs = { + k: v for k, v in targets.items() + if func_pattern.search(k) + } + else: + matching_funcs = targets + + if signature is not None: + sig_pattern = re.compile(signature) + matching_sigs = {} + for k, v in matching_funcs.items(): + matching_chars = {} + for chars, targets in v.items(): + if any( + sig_pattern.search(c) or sig_pattern.search(dtype(c).name) + for c in chars + ): + matching_chars[chars] = targets # noqa: PERF403 + if matching_chars: + matching_sigs[k] = matching_chars + else: + matching_sigs = matching_funcs + return matching_sigs diff --git a/local_packages/numpy/lib/introspect.pyi b/local_packages/numpy/lib/introspect.pyi new file mode 100644 index 0000000..7929981 --- /dev/null +++ b/local_packages/numpy/lib/introspect.pyi @@ -0,0 +1,3 @@ +__all__ = ["opt_func_info"] + +def opt_func_info(func_name: str | None = None, signature: str | None = None) -> dict[str, dict[str, dict[str, str]]]: ... diff --git a/local_packages/numpy/lib/mixins.py b/local_packages/numpy/lib/mixins.py new file mode 100644 index 0000000..cd02bf7 --- /dev/null +++ b/local_packages/numpy/lib/mixins.py @@ -0,0 +1,180 @@ +""" +Mixin classes for custom array types that don't inherit from ndarray. +""" +from numpy._core import umath as um + +__all__ = ['NDArrayOperatorsMixin'] + + +def _disables_array_ufunc(obj): + """True when __array_ufunc__ is set to None.""" + try: + return obj.__array_ufunc__ is None + except AttributeError: + return False + + +def _binary_method(ufunc, name): + """Implement a forward binary method with a ufunc, e.g., __add__.""" + def func(self, other): + if _disables_array_ufunc(other): + return NotImplemented + return ufunc(self, other) + func.__name__ = f'__{name}__' + return func + + +def _reflected_binary_method(ufunc, name): + """Implement a reflected binary method with a ufunc, e.g., __radd__.""" + def func(self, other): + if _disables_array_ufunc(other): + return NotImplemented + return ufunc(other, self) + func.__name__ = f'__r{name}__' + return func + + +def _inplace_binary_method(ufunc, name): + """Implement an in-place binary method with a ufunc, e.g., __iadd__.""" + def func(self, other): + return ufunc(self, other, out=(self,)) + func.__name__ = f'__i{name}__' + return func + + +def _numeric_methods(ufunc, name): + """Implement forward, reflected and inplace binary methods with a ufunc.""" + return (_binary_method(ufunc, name), + _reflected_binary_method(ufunc, name), + _inplace_binary_method(ufunc, name)) + + +def _unary_method(ufunc, name): + """Implement a unary special method with a ufunc.""" + def func(self): + return ufunc(self) + func.__name__ = f'__{name}__' + return func + + +class NDArrayOperatorsMixin: + """Mixin defining all operator special methods using __array_ufunc__. + + This class implements the special methods for almost all of Python's + builtin operators defined in the `operator` module, including comparisons + (``==``, ``>``, etc.) and arithmetic (``+``, ``*``, ``-``, etc.), by + deferring to the ``__array_ufunc__`` method, which subclasses must + implement. + + It is useful for writing classes that do not inherit from `numpy.ndarray`, + but that should support arithmetic and numpy universal functions like + arrays as described in :external+neps:doc:`nep-0013-ufunc-overrides`. + + As a trivial example, consider this implementation of an ``ArrayLike`` + class that simply wraps a NumPy array and ensures that the result of any + arithmetic operation is also an ``ArrayLike`` object: + + >>> import numbers + >>> class ArrayLike(np.lib.mixins.NDArrayOperatorsMixin): + ... def __init__(self, value): + ... self.value = np.asarray(value) + ... + ... # One might also consider adding the built-in list type to this + ... # list, to support operations like np.add(array_like, list) + ... _HANDLED_TYPES = (np.ndarray, numbers.Number) + ... + ... def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): + ... out = kwargs.get('out', ()) + ... for x in inputs + out: + ... # Only support operations with instances of + ... # _HANDLED_TYPES. Use ArrayLike instead of type(self) + ... # for isinstance to allow subclasses that don't + ... # override __array_ufunc__ to handle ArrayLike objects. + ... if not isinstance( + ... x, self._HANDLED_TYPES + (ArrayLike,) + ... ): + ... return NotImplemented + ... + ... # Defer to the implementation of the ufunc + ... # on unwrapped values. + ... inputs = tuple(x.value if isinstance(x, ArrayLike) else x + ... for x in inputs) + ... if out: + ... kwargs['out'] = tuple( + ... x.value if isinstance(x, ArrayLike) else x + ... for x in out) + ... result = getattr(ufunc, method)(*inputs, **kwargs) + ... + ... if type(result) is tuple: + ... # multiple return values + ... return tuple(type(self)(x) for x in result) + ... elif method == 'at': + ... # no return value + ... return None + ... else: + ... # one return value + ... return type(self)(result) + ... + ... def __repr__(self): + ... return '%s(%r)' % (type(self).__name__, self.value) + + In interactions between ``ArrayLike`` objects and numbers or numpy arrays, + the result is always another ``ArrayLike``: + + >>> x = ArrayLike([1, 2, 3]) + >>> x - 1 + ArrayLike(array([0, 1, 2])) + >>> 1 - x + ArrayLike(array([ 0, -1, -2])) + >>> np.arange(3) - x + ArrayLike(array([-1, -1, -1])) + >>> x - np.arange(3) + ArrayLike(array([1, 1, 1])) + + Note that unlike ``numpy.ndarray``, ``ArrayLike`` does not allow operations + with arbitrary, unrecognized types. This ensures that interactions with + ArrayLike preserve a well-defined casting hierarchy. + + """ + + __slots__ = () + # Like np.ndarray, this mixin class implements "Option 1" from the ufunc + # overrides NEP. + + # comparisons don't have reflected and in-place versions + __lt__ = _binary_method(um.less, 'lt') + __le__ = _binary_method(um.less_equal, 'le') + __eq__ = _binary_method(um.equal, 'eq') + __ne__ = _binary_method(um.not_equal, 'ne') + __gt__ = _binary_method(um.greater, 'gt') + __ge__ = _binary_method(um.greater_equal, 'ge') + + # numeric methods + __add__, __radd__, __iadd__ = _numeric_methods(um.add, 'add') + __sub__, __rsub__, __isub__ = _numeric_methods(um.subtract, 'sub') + __mul__, __rmul__, __imul__ = _numeric_methods(um.multiply, 'mul') + __matmul__, __rmatmul__, __imatmul__ = _numeric_methods( + um.matmul, 'matmul') + __truediv__, __rtruediv__, __itruediv__ = _numeric_methods( + um.true_divide, 'truediv') + __floordiv__, __rfloordiv__, __ifloordiv__ = _numeric_methods( + um.floor_divide, 'floordiv') + __mod__, __rmod__, __imod__ = _numeric_methods(um.remainder, 'mod') + __divmod__ = _binary_method(um.divmod, 'divmod') + __rdivmod__ = _reflected_binary_method(um.divmod, 'divmod') + # __idivmod__ does not exist + # TODO: handle the optional third argument for __pow__? + __pow__, __rpow__, __ipow__ = _numeric_methods(um.power, 'pow') + __lshift__, __rlshift__, __ilshift__ = _numeric_methods( + um.left_shift, 'lshift') + __rshift__, __rrshift__, __irshift__ = _numeric_methods( + um.right_shift, 'rshift') + __and__, __rand__, __iand__ = _numeric_methods(um.bitwise_and, 'and') + __xor__, __rxor__, __ixor__ = _numeric_methods(um.bitwise_xor, 'xor') + __or__, __ror__, __ior__ = _numeric_methods(um.bitwise_or, 'or') + + # unary methods + __neg__ = _unary_method(um.negative, 'neg') + __pos__ = _unary_method(um.positive, 'pos') + __abs__ = _unary_method(um.absolute, 'abs') + __invert__ = _unary_method(um.invert, 'invert') diff --git a/local_packages/numpy/lib/mixins.pyi b/local_packages/numpy/lib/mixins.pyi new file mode 100644 index 0000000..e508a5c --- /dev/null +++ b/local_packages/numpy/lib/mixins.pyi @@ -0,0 +1,78 @@ +from abc import ABC, abstractmethod +from typing import Any, Literal as L, type_check_only + +from numpy import ufunc + +__all__ = ["NDArrayOperatorsMixin"] + +# NOTE: `NDArrayOperatorsMixin` is not formally an abstract baseclass, +# even though it's reliant on subclasses implementing `__array_ufunc__` + +# NOTE: The accepted input- and output-types of the various dunders are +# completely dependent on how `__array_ufunc__` is implemented. +# As such, only little type safety can be provided here. + +class NDArrayOperatorsMixin(ABC): + __slots__ = () + + @type_check_only + @abstractmethod + def __array_ufunc__( + self, + ufunc: ufunc, + method: L["__call__", "reduce", "reduceat", "accumulate", "outer", "at"], + /, + *inputs: Any, + **kwargs: Any, + ) -> Any: ... + def __lt__(self, other: Any) -> Any: ... + def __le__(self, other: Any) -> Any: ... + def __eq__(self, other: Any) -> Any: ... + def __ne__(self, other: Any) -> Any: ... + def __gt__(self, other: Any) -> Any: ... + def __ge__(self, other: Any) -> Any: ... + def __add__(self, other: Any) -> Any: ... + def __radd__(self, other: Any) -> Any: ... + def __iadd__(self, other: Any) -> Any: ... + def __sub__(self, other: Any) -> Any: ... + def __rsub__(self, other: Any) -> Any: ... + def __isub__(self, other: Any) -> Any: ... + def __mul__(self, other: Any) -> Any: ... + def __rmul__(self, other: Any) -> Any: ... + def __imul__(self, other: Any) -> Any: ... + def __matmul__(self, other: Any) -> Any: ... + def __rmatmul__(self, other: Any) -> Any: ... + def __imatmul__(self, other: Any) -> Any: ... + def __truediv__(self, other: Any) -> Any: ... + def __rtruediv__(self, other: Any) -> Any: ... + def __itruediv__(self, other: Any) -> Any: ... + def __floordiv__(self, other: Any) -> Any: ... + def __rfloordiv__(self, other: Any) -> Any: ... + def __ifloordiv__(self, other: Any) -> Any: ... + def __mod__(self, other: Any) -> Any: ... + def __rmod__(self, other: Any) -> Any: ... + def __imod__(self, other: Any) -> Any: ... + def __divmod__(self, other: Any) -> Any: ... + def __rdivmod__(self, other: Any) -> Any: ... + def __pow__(self, other: Any) -> Any: ... + def __rpow__(self, other: Any) -> Any: ... + def __ipow__(self, other: Any) -> Any: ... + def __lshift__(self, other: Any) -> Any: ... + def __rlshift__(self, other: Any) -> Any: ... + def __ilshift__(self, other: Any) -> Any: ... + def __rshift__(self, other: Any) -> Any: ... + def __rrshift__(self, other: Any) -> Any: ... + def __irshift__(self, other: Any) -> Any: ... + def __and__(self, other: Any) -> Any: ... + def __rand__(self, other: Any) -> Any: ... + def __iand__(self, other: Any) -> Any: ... + def __xor__(self, other: Any) -> Any: ... + def __rxor__(self, other: Any) -> Any: ... + def __ixor__(self, other: Any) -> Any: ... + def __or__(self, other: Any) -> Any: ... + def __ror__(self, other: Any) -> Any: ... + def __ior__(self, other: Any) -> Any: ... + def __neg__(self) -> Any: ... + def __pos__(self) -> Any: ... + def __abs__(self) -> Any: ... + def __invert__(self) -> Any: ... diff --git a/local_packages/numpy/lib/npyio.py b/local_packages/numpy/lib/npyio.py new file mode 100644 index 0000000..84d8079 --- /dev/null +++ b/local_packages/numpy/lib/npyio.py @@ -0,0 +1 @@ +from ._npyio_impl import DataSource, NpzFile, __doc__ # noqa: F401 diff --git a/local_packages/numpy/lib/npyio.pyi b/local_packages/numpy/lib/npyio.pyi new file mode 100644 index 0000000..fd3ae8f --- /dev/null +++ b/local_packages/numpy/lib/npyio.pyi @@ -0,0 +1,5 @@ +from numpy.lib._npyio_impl import ( + DataSource as DataSource, + NpzFile as NpzFile, + __doc__ as __doc__, +) diff --git a/local_packages/numpy/lib/recfunctions.py b/local_packages/numpy/lib/recfunctions.py new file mode 100644 index 0000000..c8a6dd8 --- /dev/null +++ b/local_packages/numpy/lib/recfunctions.py @@ -0,0 +1,1681 @@ +""" +Collection of utilities to manipulate structured arrays. + +Most of these functions were initially implemented by John Hunter for +matplotlib. They have been rewritten and extended for convenience. + +""" +import itertools + +import numpy as np +import numpy.ma as ma +import numpy.ma.mrecords as mrec +from numpy._core.overrides import array_function_dispatch +from numpy.lib._iotools import _is_string_like + +__all__ = [ + 'append_fields', 'apply_along_fields', 'assign_fields_by_name', + 'drop_fields', 'find_duplicates', 'flatten_descr', + 'get_fieldstructure', 'get_names', 'get_names_flat', + 'join_by', 'merge_arrays', 'rec_append_fields', + 'rec_drop_fields', 'rec_join', 'recursive_fill_fields', + 'rename_fields', 'repack_fields', 'require_fields', + 'stack_arrays', 'structured_to_unstructured', 'unstructured_to_structured', + ] + + +def _recursive_fill_fields_dispatcher(input, output): + return (input, output) + + +@array_function_dispatch(_recursive_fill_fields_dispatcher) +def recursive_fill_fields(input, output): + """ + Fills fields from output with fields from input, + with support for nested structures. + + Parameters + ---------- + input : ndarray + Input array. + output : ndarray + Output array. + + Notes + ----- + * `output` should be at least the same size as `input` + + Examples + -------- + >>> import numpy as np + >>> from numpy.lib import recfunctions as rfn + >>> a = np.array([(1, 10.), (2, 20.)], dtype=[('A', np.int64), ('B', np.float64)]) + >>> b = np.zeros((3,), dtype=a.dtype) + >>> rfn.recursive_fill_fields(a, b) + array([(1, 10.), (2, 20.), (0, 0.)], dtype=[('A', '>> import numpy as np + >>> dt = np.dtype([(('a', 'A'), np.int64), ('b', np.double, 3)]) + >>> dt.descr + [(('a', 'A'), '>> _get_fieldspec(dt) + [(('a', 'A'), dtype('int64')), ('b', dtype(('>> import numpy as np + >>> from numpy.lib import recfunctions as rfn + >>> rfn.get_names(np.empty((1,), dtype=[('A', int)]).dtype) + ('A',) + >>> rfn.get_names(np.empty((1,), dtype=[('A',int), ('B', float)]).dtype) + ('A', 'B') + >>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])]) + >>> rfn.get_names(adtype) + ('a', ('b', ('ba', 'bb'))) + """ + listnames = [] + names = adtype.names + for name in names: + current = adtype[name] + if current.names is not None: + listnames.append((name, tuple(get_names(current)))) + else: + listnames.append(name) + return tuple(listnames) + + +def get_names_flat(adtype): + """ + Returns the field names of the input datatype as a tuple. Input datatype + must have fields otherwise error is raised. + Nested structure are flattened beforehand. + + Parameters + ---------- + adtype : dtype + Input datatype + + Examples + -------- + >>> import numpy as np + >>> from numpy.lib import recfunctions as rfn + >>> rfn.get_names_flat(np.empty((1,), dtype=[('A', int)]).dtype) is None + False + >>> rfn.get_names_flat(np.empty((1,), dtype=[('A',int), ('B', str)]).dtype) + ('A', 'B') + >>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])]) + >>> rfn.get_names_flat(adtype) + ('a', 'b', 'ba', 'bb') + """ + listnames = [] + names = adtype.names + for name in names: + listnames.append(name) + current = adtype[name] + if current.names is not None: + listnames.extend(get_names_flat(current)) + return tuple(listnames) + + +def flatten_descr(ndtype): + """ + Flatten a structured data-type description. + + Examples + -------- + >>> import numpy as np + >>> from numpy.lib import recfunctions as rfn + >>> ndtype = np.dtype([('a', '>> rfn.flatten_descr(ndtype) + (('a', dtype('int32')), ('ba', dtype('float64')), ('bb', dtype('int32'))) + + """ + names = ndtype.names + if names is None: + return (('', ndtype),) + else: + descr = [] + for field in names: + (typ, _) = ndtype.fields[field] + if typ.names is not None: + descr.extend(flatten_descr(typ)) + else: + descr.append((field, typ)) + return tuple(descr) + + +def _zip_dtype(seqarrays, flatten=False): + newdtype = [] + if flatten: + for a in seqarrays: + newdtype.extend(flatten_descr(a.dtype)) + else: + for a in seqarrays: + current = a.dtype + if current.names is not None and len(current.names) == 1: + # special case - dtypes of 1 field are flattened + newdtype.extend(_get_fieldspec(current)) + else: + newdtype.append(('', current)) + return np.dtype(newdtype) + + +def _zip_descr(seqarrays, flatten=False): + """ + Combine the dtype description of a series of arrays. + + Parameters + ---------- + seqarrays : sequence of arrays + Sequence of arrays + flatten : {boolean}, optional + Whether to collapse nested descriptions. + """ + return _zip_dtype(seqarrays, flatten=flatten).descr + + +def get_fieldstructure(adtype, lastname=None, parents=None,): + """ + Returns a dictionary with fields indexing lists of their parent fields. + + This function is used to simplify access to fields nested in other fields. + + Parameters + ---------- + adtype : np.dtype + Input datatype + lastname : optional + Last processed field name (used internally during recursion). + parents : dictionary + Dictionary of parent fields (used internally during recursion). + + Examples + -------- + >>> import numpy as np + >>> from numpy.lib import recfunctions as rfn + >>> ndtype = np.dtype([('A', int), + ... ('B', [('BA', int), + ... ('BB', [('BBA', int), ('BBB', int)])])]) + >>> rfn.get_fieldstructure(ndtype) + ... # XXX: possible regression, order of BBA and BBB is swapped + {'A': [], 'B': [], 'BA': ['B'], 'BB': ['B'], 'BBA': ['B', 'BB'], 'BBB': ['B', 'BB']} + + """ + if parents is None: + parents = {} + names = adtype.names + for name in names: + current = adtype[name] + if current.names is not None: + if lastname: + parents[name] = [lastname, ] + else: + parents[name] = [] + parents.update(get_fieldstructure(current, name, parents)) + else: + lastparent = list(parents.get(lastname, []) or []) + if lastparent: + lastparent.append(lastname) + elif lastname: + lastparent = [lastname, ] + parents[name] = lastparent or [] + return parents + + +def _izip_fields_flat(iterable): + """ + Returns an iterator of concatenated fields from a sequence of arrays, + collapsing any nested structure. + + """ + for element in iterable: + if isinstance(element, np.void): + yield from _izip_fields_flat(tuple(element)) + else: + yield element + + +def _izip_fields(iterable): + """ + Returns an iterator of concatenated fields from a sequence of arrays. + + """ + for element in iterable: + if (hasattr(element, '__iter__') and + not isinstance(element, str)): + yield from _izip_fields(element) + elif isinstance(element, np.void) and len(tuple(element)) == 1: + # this statement is the same from the previous expression + yield from _izip_fields(element) + else: + yield element + + +def _izip_records(seqarrays, fill_value=None, flatten=True): + """ + Returns an iterator of concatenated items from a sequence of arrays. + + Parameters + ---------- + seqarrays : sequence of arrays + Sequence of arrays. + fill_value : {None, integer} + Value used to pad shorter iterables. + flatten : {True, False}, + Whether to + """ + + # Should we flatten the items, or just use a nested approach + if flatten: + zipfunc = _izip_fields_flat + else: + zipfunc = _izip_fields + + for tup in itertools.zip_longest(*seqarrays, fillvalue=fill_value): + yield tuple(zipfunc(tup)) + + +def _fix_output(output, usemask=True, asrecarray=False): + """ + Private function: return a recarray, a ndarray, a MaskedArray + or a MaskedRecords depending on the input parameters + """ + if not isinstance(output, ma.MaskedArray): + usemask = False + if usemask: + if asrecarray: + output = output.view(mrec.MaskedRecords) + else: + output = ma.filled(output) + if asrecarray: + output = output.view(np.recarray) + return output + + +def _fix_defaults(output, defaults=None): + """ + Update the fill_value and masked data of `output` + from the default given in a dictionary defaults. + """ + names = output.dtype.names + (data, mask, fill_value) = (output.data, output.mask, output.fill_value) + for (k, v) in (defaults or {}).items(): + if k in names: + fill_value[k] = v + data[k][mask[k]] = v + return output + + +def _merge_arrays_dispatcher(seqarrays, fill_value=None, flatten=None, + usemask=None, asrecarray=None): + return seqarrays + + +@array_function_dispatch(_merge_arrays_dispatcher) +def merge_arrays(seqarrays, fill_value=-1, flatten=False, + usemask=False, asrecarray=False): + """ + Merge arrays field by field. + + Parameters + ---------- + seqarrays : sequence of ndarrays + Sequence of arrays + fill_value : {float}, optional + Filling value used to pad missing data on the shorter arrays. + flatten : {False, True}, optional + Whether to collapse nested fields. + usemask : {False, True}, optional + Whether to return a masked array or not. + asrecarray : {False, True}, optional + Whether to return a recarray (MaskedRecords) or not. + + Examples + -------- + >>> import numpy as np + >>> from numpy.lib import recfunctions as rfn + >>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.]))) + array([( 1, 10.), ( 2, 20.), (-1, 30.)], + dtype=[('f0', '>> rfn.merge_arrays((np.array([1, 2], dtype=np.int64), + ... np.array([10., 20., 30.])), usemask=False) + array([(1, 10.0), (2, 20.0), (-1, 30.0)], + dtype=[('f0', '>> rfn.merge_arrays((np.array([1, 2]).view([('a', np.int64)]), + ... np.array([10., 20., 30.])), + ... usemask=False, asrecarray=True) + rec.array([( 1, 10.), ( 2, 20.), (-1, 30.)], + dtype=[('a', '>> import numpy as np + >>> from numpy.lib import recfunctions as rfn + >>> a = np.array([(1, (2, 3.0)), (4, (5, 6.0))], + ... dtype=[('a', np.int64), ('b', [('ba', np.double), ('bb', np.int64)])]) + >>> rfn.drop_fields(a, 'a') + array([((2., 3),), ((5., 6),)], + dtype=[('b', [('ba', '>> rfn.drop_fields(a, 'ba') + array([(1, (3,)), (4, (6,))], dtype=[('a', '>> rfn.drop_fields(a, ['ba', 'bb']) + array([(1,), (4,)], dtype=[('a', '>> import numpy as np + >>> from numpy.lib import recfunctions as rfn + >>> a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))], + ... dtype=[('a', int),('b', [('ba', float), ('bb', (float, 2))])]) + >>> rfn.rename_fields(a, {'a':'A', 'bb':'BB'}) + array([(1, (2., [ 3., 30.])), (4, (5., [ 6., 60.]))], + dtype=[('A', ' 1: + data = merge_arrays(data, flatten=True, usemask=usemask, + fill_value=fill_value) + else: + data = data.pop() + # + output = ma.masked_all( + max(len(base), len(data)), + dtype=_get_fieldspec(base.dtype) + _get_fieldspec(data.dtype)) + output = recursive_fill_fields(base, output) + output = recursive_fill_fields(data, output) + # + return _fix_output(output, usemask=usemask, asrecarray=asrecarray) + + +def _rec_append_fields_dispatcher(base, names, data, dtypes=None): + yield base + yield from data + + +@array_function_dispatch(_rec_append_fields_dispatcher) +def rec_append_fields(base, names, data, dtypes=None): + """ + Add new fields to an existing array. + + The names of the fields are given with the `names` arguments, + the corresponding values with the `data` arguments. + If a single field is appended, `names`, `data` and `dtypes` do not have + to be lists but just values. + + Parameters + ---------- + base : array + Input array to extend. + names : string, sequence + String or sequence of strings corresponding to the names + of the new fields. + data : array or sequence of arrays + Array or sequence of arrays storing the fields to add to the base. + dtypes : sequence of datatypes, optional + Datatype or sequence of datatypes. + If None, the datatypes are estimated from the `data`. + + See Also + -------- + append_fields + + Returns + ------- + appended_array : np.recarray + """ + return append_fields(base, names, data=data, dtypes=dtypes, + asrecarray=True, usemask=False) + + +def _repack_fields_dispatcher(a, align=None, recurse=None): + return (a,) + + +@array_function_dispatch(_repack_fields_dispatcher) +def repack_fields(a, align=False, recurse=False): + """ + Re-pack the fields of a structured array or dtype in memory. + + The memory layout of structured datatypes allows fields at arbitrary + byte offsets. This means the fields can be separated by padding bytes, + their offsets can be non-monotonically increasing, and they can overlap. + + This method removes any overlaps and reorders the fields in memory so they + have increasing byte offsets, and adds or removes padding bytes depending + on the `align` option, which behaves like the `align` option to + `numpy.dtype`. + + If `align=False`, this method produces a "packed" memory layout in which + each field starts at the byte the previous field ended, and any padding + bytes are removed. + + If `align=True`, this methods produces an "aligned" memory layout in which + each field's offset is a multiple of its alignment, and the total itemsize + is a multiple of the largest alignment, by adding padding bytes as needed. + + Parameters + ---------- + a : ndarray or dtype + array or dtype for which to repack the fields. + align : boolean + If true, use an "aligned" memory layout, otherwise use a "packed" layout. + recurse : boolean + If True, also repack nested structures. + + Returns + ------- + repacked : ndarray or dtype + Copy of `a` with fields repacked, or `a` itself if no repacking was + needed. + + Examples + -------- + >>> import numpy as np + + >>> from numpy.lib import recfunctions as rfn + >>> def print_offsets(d): + ... print("offsets:", [d.fields[name][1] for name in d.names]) + ... print("itemsize:", d.itemsize) + ... + >>> dt = np.dtype('u1, >> dt + dtype({'names': ['f0', 'f1', 'f2'], 'formats': ['u1', '>> print_offsets(dt) + offsets: [0, 8, 16] + itemsize: 24 + >>> packed_dt = rfn.repack_fields(dt) + >>> packed_dt + dtype([('f0', 'u1'), ('f1', '>> print_offsets(packed_dt) + offsets: [0, 1, 9] + itemsize: 17 + + """ + if not isinstance(a, np.dtype): + dt = repack_fields(a.dtype, align=align, recurse=recurse) + return a.astype(dt, copy=False) + + if a.names is None: + return a + + fieldinfo = [] + for name in a.names: + tup = a.fields[name] + if recurse: + fmt = repack_fields(tup[0], align=align, recurse=True) + else: + fmt = tup[0] + + if len(tup) == 3: + name = (tup[2], name) + + fieldinfo.append((name, fmt)) + + dt = np.dtype(fieldinfo, align=align) + return np.dtype((a.type, dt)) + +def _get_fields_and_offsets(dt, offset=0): + """ + Returns a flat list of (dtype, count, offset) tuples of all the + scalar fields in the dtype "dt", including nested fields, in left + to right order. + """ + + # counts up elements in subarrays, including nested subarrays, and returns + # base dtype and count + def count_elem(dt): + count = 1 + while dt.shape != (): + for size in dt.shape: + count *= size + dt = dt.base + return dt, count + + fields = [] + for name in dt.names: + field = dt.fields[name] + f_dt, f_offset = field[0], field[1] + f_dt, n = count_elem(f_dt) + + if f_dt.names is None: + fields.append((np.dtype((f_dt, (n,))), n, f_offset + offset)) + else: + subfields = _get_fields_and_offsets(f_dt, f_offset + offset) + size = f_dt.itemsize + + for i in range(n): + if i == 0: + # optimization: avoid list comprehension if no subarray + fields.extend(subfields) + else: + fields.extend([(d, c, o + i * size) for d, c, o in subfields]) + return fields + +def _common_stride(offsets, counts, itemsize): + """ + Returns the stride between the fields, or None if the stride is not + constant. The values in "counts" designate the lengths of + subarrays. Subarrays are treated as many contiguous fields, with + always positive stride. + """ + if len(offsets) <= 1: + return itemsize + + negative = offsets[1] < offsets[0] # negative stride + if negative: + # reverse, so offsets will be ascending + it = zip(reversed(offsets), reversed(counts)) + else: + it = zip(offsets, counts) + + prev_offset = None + stride = None + for offset, count in it: + if count != 1: # subarray: always c-contiguous + if negative: + return None # subarrays can never have a negative stride + if stride is None: + stride = itemsize + if stride != itemsize: + return None + end_offset = offset + (count - 1) * itemsize + else: + end_offset = offset + + if prev_offset is not None: + new_stride = offset - prev_offset + if stride is None: + stride = new_stride + if stride != new_stride: + return None + + prev_offset = end_offset + + if negative: + return -stride + return stride + + +def _structured_to_unstructured_dispatcher(arr, dtype=None, copy=None, + casting=None): + return (arr,) + +@array_function_dispatch(_structured_to_unstructured_dispatcher) +def structured_to_unstructured(arr, dtype=None, copy=False, casting='unsafe'): + """ + Converts an n-D structured array into an (n+1)-D unstructured array. + + The new array will have a new last dimension equal in size to the + number of field-elements of the input array. If not supplied, the output + datatype is determined from the numpy type promotion rules applied to all + the field datatypes. + + Nested fields, as well as each element of any subarray fields, all count + as a single field-elements. + + Parameters + ---------- + arr : ndarray + Structured array or dtype to convert. Cannot contain object datatype. + dtype : dtype, optional + The dtype of the output unstructured array. + copy : bool, optional + If true, always return a copy. If false, a view is returned if + possible, such as when the `dtype` and strides of the fields are + suitable and the array subtype is one of `numpy.ndarray`, + `numpy.recarray` or `numpy.memmap`. + + .. versionchanged:: 1.25.0 + A view can now be returned if the fields are separated by a + uniform stride. + + casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + See casting argument of `numpy.ndarray.astype`. Controls what kind of + data casting may occur. + + Returns + ------- + unstructured : ndarray + Unstructured array with one more dimension. + + Examples + -------- + >>> import numpy as np + + >>> from numpy.lib import recfunctions as rfn + >>> a = np.zeros(4, dtype=[('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)]) + >>> a + array([(0, (0., 0), [0., 0.]), (0, (0., 0), [0., 0.]), + (0, (0., 0), [0., 0.]), (0, (0., 0), [0., 0.])], + dtype=[('a', '>> rfn.structured_to_unstructured(a) + array([[0., 0., 0., 0., 0.], + [0., 0., 0., 0., 0.], + [0., 0., 0., 0., 0.], + [0., 0., 0., 0., 0.]]) + + >>> b = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)], + ... dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')]) + >>> np.mean(rfn.structured_to_unstructured(b[['x', 'z']]), axis=-1) + array([ 3. , 5.5, 9. , 11. ]) + + """ # noqa: E501 + if arr.dtype.names is None: + raise ValueError('arr must be a structured array') + + fields = _get_fields_and_offsets(arr.dtype) + n_fields = len(fields) + if n_fields == 0 and dtype is None: + raise ValueError("arr has no fields. Unable to guess dtype") + elif n_fields == 0: + # too many bugs elsewhere for this to work now + raise NotImplementedError("arr with no fields is not supported") + + dts, counts, offsets = zip(*fields) + names = [f'f{n}' for n in range(n_fields)] + + if dtype is None: + out_dtype = np.result_type(*[dt.base for dt in dts]) + else: + out_dtype = np.dtype(dtype) + + # Use a series of views and casts to convert to an unstructured array: + + # first view using flattened fields (doesn't work for object arrays) + # Note: dts may include a shape for subarrays + flattened_fields = np.dtype({'names': names, + 'formats': dts, + 'offsets': offsets, + 'itemsize': arr.dtype.itemsize}) + arr = arr.view(flattened_fields) + + # we only allow a few types to be unstructured by manipulating the + # strides, because we know it won't work with, for example, np.matrix nor + # np.ma.MaskedArray. + can_view = type(arr) in (np.ndarray, np.recarray, np.memmap) + if (not copy) and can_view and all(dt.base == out_dtype for dt in dts): + # all elements have the right dtype already; if they have a common + # stride, we can just return a view + common_stride = _common_stride(offsets, counts, out_dtype.itemsize) + if common_stride is not None: + wrap = arr.__array_wrap__ + + new_shape = arr.shape + (sum(counts), out_dtype.itemsize) + new_strides = arr.strides + (abs(common_stride), 1) + + arr = arr[..., np.newaxis].view(np.uint8) # view as bytes + arr = arr[..., min(offsets):] # remove the leading unused data + arr = np.lib.stride_tricks.as_strided(arr, + new_shape, + new_strides, + subok=True) + + # cast and drop the last dimension again + arr = arr.view(out_dtype)[..., 0] + + if common_stride < 0: + arr = arr[..., ::-1] # reverse, if the stride was negative + if type(arr) is not type(wrap.__self__): + # Some types (e.g. recarray) turn into an ndarray along the + # way, so we have to wrap it again in order to match the + # behavior with copy=True. + arr = wrap(arr) + return arr + + # next cast to a packed format with all fields converted to new dtype + packed_fields = np.dtype({'names': names, + 'formats': [(out_dtype, dt.shape) for dt in dts]}) + arr = arr.astype(packed_fields, copy=copy, casting=casting) + + # finally is it safe to view the packed fields as the unstructured type + return arr.view((out_dtype, (sum(counts),))) + + +def _unstructured_to_structured_dispatcher(arr, dtype=None, names=None, + align=None, copy=None, casting=None): + return (arr,) + +@array_function_dispatch(_unstructured_to_structured_dispatcher) +def unstructured_to_structured(arr, dtype=None, names=None, align=False, + copy=False, casting='unsafe'): + """ + Converts an n-D unstructured array into an (n-1)-D structured array. + + The last dimension of the input array is converted into a structure, with + number of field-elements equal to the size of the last dimension of the + input array. By default all output fields have the input array's dtype, but + an output structured dtype with an equal number of fields-elements can be + supplied instead. + + Nested fields, as well as each element of any subarray fields, all count + towards the number of field-elements. + + Parameters + ---------- + arr : ndarray + Unstructured array or dtype to convert. + dtype : dtype, optional + The structured dtype of the output array + names : list of strings, optional + If dtype is not supplied, this specifies the field names for the output + dtype, in order. The field dtypes will be the same as the input array. + align : boolean, optional + Whether to create an aligned memory layout. + copy : bool, optional + See copy argument to `numpy.ndarray.astype`. If true, always return a + copy. If false, and `dtype` requirements are satisfied, a view is + returned. + casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + See casting argument of `numpy.ndarray.astype`. Controls what kind of + data casting may occur. + + Returns + ------- + structured : ndarray + Structured array with fewer dimensions. + + Examples + -------- + >>> import numpy as np + + >>> from numpy.lib import recfunctions as rfn + >>> dt = np.dtype([('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)]) + >>> a = np.arange(20).reshape((4,5)) + >>> a + array([[ 0, 1, 2, 3, 4], + [ 5, 6, 7, 8, 9], + [10, 11, 12, 13, 14], + [15, 16, 17, 18, 19]]) + >>> rfn.unstructured_to_structured(a, dt) + array([( 0, ( 1., 2), [ 3., 4.]), ( 5, ( 6., 7), [ 8., 9.]), + (10, (11., 12), [13., 14.]), (15, (16., 17), [18., 19.])], + dtype=[('a', '>> import numpy as np + + >>> from numpy.lib import recfunctions as rfn + >>> b = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)], + ... dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')]) + >>> rfn.apply_along_fields(np.mean, b) + array([ 2.66666667, 5.33333333, 8.66666667, 11. ]) + >>> rfn.apply_along_fields(np.mean, b[['x', 'z']]) + array([ 3. , 5.5, 9. , 11. ]) + + """ + if arr.dtype.names is None: + raise ValueError('arr must be a structured array') + + uarr = structured_to_unstructured(arr) + return func(uarr, axis=-1) + # works and avoids axis requirement, but very, very slow: + #return np.apply_along_axis(func, -1, uarr) + +def _assign_fields_by_name_dispatcher(dst, src, zero_unassigned=None): + return dst, src + +@array_function_dispatch(_assign_fields_by_name_dispatcher) +def assign_fields_by_name(dst, src, zero_unassigned=True): + """ + Assigns values from one structured array to another by field name. + + Normally in numpy >= 1.14, assignment of one structured array to another + copies fields "by position", meaning that the first field from the src is + copied to the first field of the dst, and so on, regardless of field name. + + This function instead copies "by field name", such that fields in the dst + are assigned from the identically named field in the src. This applies + recursively for nested structures. This is how structure assignment worked + in numpy >= 1.6 to <= 1.13. + + Parameters + ---------- + dst : ndarray + src : ndarray + The source and destination arrays during assignment. + zero_unassigned : bool, optional + If True, fields in the dst for which there was no matching + field in the src are filled with the value 0 (zero). This + was the behavior of numpy <= 1.13. If False, those fields + are not modified. + """ + + if dst.dtype.names is None: + dst[...] = src + return + + for name in dst.dtype.names: + if name not in src.dtype.names: + if zero_unassigned: + dst[name] = 0 + else: + assign_fields_by_name(dst[name], src[name], + zero_unassigned) + +def _require_fields_dispatcher(array, required_dtype): + return (array,) + +@array_function_dispatch(_require_fields_dispatcher) +def require_fields(array, required_dtype): + """ + Casts a structured array to a new dtype using assignment by field-name. + + This function assigns from the old to the new array by name, so the + value of a field in the output array is the value of the field with the + same name in the source array. This has the effect of creating a new + ndarray containing only the fields "required" by the required_dtype. + + If a field name in the required_dtype does not exist in the + input array, that field is created and set to 0 in the output array. + + Parameters + ---------- + a : ndarray + array to cast + required_dtype : dtype + datatype for output array + + Returns + ------- + out : ndarray + array with the new dtype, with field values copied from the fields in + the input array with the same name + + Examples + -------- + >>> import numpy as np + + >>> from numpy.lib import recfunctions as rfn + >>> a = np.ones(4, dtype=[('a', 'i4'), ('b', 'f8'), ('c', 'u1')]) + >>> rfn.require_fields(a, [('b', 'f4'), ('c', 'u1')]) + array([(1., 1), (1., 1), (1., 1), (1., 1)], + dtype=[('b', '>> rfn.require_fields(a, [('b', 'f4'), ('newf', 'u1')]) + array([(1., 0), (1., 0), (1., 0), (1., 0)], + dtype=[('b', '>> import numpy as np + >>> from numpy.lib import recfunctions as rfn + >>> x = np.array([1, 2,]) + >>> rfn.stack_arrays(x) is x + True + >>> z = np.array([('A', 1), ('B', 2)], dtype=[('A', '|S3'), ('B', float)]) + >>> zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)], + ... dtype=[('A', '|S3'), ('B', np.double), ('C', np.double)]) + >>> test = rfn.stack_arrays((z,zz)) + >>> test + masked_array(data=[(b'A', 1.0, --), (b'B', 2.0, --), (b'a', 10.0, 100.0), + (b'b', 20.0, 200.0), (b'c', 30.0, 300.0)], + mask=[(False, False, True), (False, False, True), + (False, False, False), (False, False, False), + (False, False, False)], + fill_value=(b'N/A', 1e+20, 1e+20), + dtype=[('A', 'S3'), ('B', ' '{fdtype}'") + # Only one field: use concatenate + if len(newdescr) == 1: + output = ma.concatenate(seqarrays) + else: + # + output = ma.masked_all((np.sum(nrecords),), newdescr) + offset = np.cumsum(np.r_[0, nrecords]) + seen = [] + for (a, n, i, j) in zip(seqarrays, fldnames, offset[:-1], offset[1:]): + names = a.dtype.names + if names is None: + output[f'f{len(seen)}'][i:j] = a + else: + for name in n: + output[name][i:j] = a[name] + if name not in seen: + seen.append(name) + # + return _fix_output(_fix_defaults(output, defaults), + usemask=usemask, asrecarray=asrecarray) + + +def _find_duplicates_dispatcher( + a, key=None, ignoremask=None, return_index=None): + return (a,) + + +@array_function_dispatch(_find_duplicates_dispatcher) +def find_duplicates(a, key=None, ignoremask=True, return_index=False): + """ + Find the duplicates in a structured array along a given key + + Parameters + ---------- + a : array-like + Input array + key : {string, None}, optional + Name of the fields along which to check the duplicates. + If None, the search is performed by records + ignoremask : {True, False}, optional + Whether masked data should be discarded or considered as duplicates. + return_index : {False, True}, optional + Whether to return the indices of the duplicated values. + + Examples + -------- + >>> import numpy as np + >>> from numpy.lib import recfunctions as rfn + >>> ndtype = [('a', int)] + >>> a = np.ma.array([1, 1, 1, 2, 2, 3, 3], + ... mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype) + >>> rfn.find_duplicates(a, ignoremask=True, return_index=True) + (masked_array(data=[(1,), (1,), (2,), (2,)], + mask=[(False,), (False,), (False,), (False,)], + fill_value=(999999,), + dtype=[('a', '= nb1)] - nb1 + (r1cmn, r2cmn) = (len(idx_1), len(idx_2)) + if jointype == 'inner': + (r1spc, r2spc) = (0, 0) + elif jointype == 'outer': + idx_out = idx_sort[~flag_in] + idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)])) + idx_2 = np.concatenate((idx_2, idx_out[(idx_out >= nb1)] - nb1)) + (r1spc, r2spc) = (len(idx_1) - r1cmn, len(idx_2) - r2cmn) + elif jointype == 'leftouter': + idx_out = idx_sort[~flag_in] + idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)])) + (r1spc, r2spc) = (len(idx_1) - r1cmn, 0) + # Select the entries from each input + (s1, s2) = (r1[idx_1], r2[idx_2]) + # + # Build the new description of the output array ....... + # Start with the key fields + ndtype = _get_fieldspec(r1k.dtype) + + # Add the fields from r1 + for fname, fdtype in _get_fieldspec(r1.dtype): + if fname not in key: + ndtype.append((fname, fdtype)) + + # Add the fields from r2 + for fname, fdtype in _get_fieldspec(r2.dtype): + # Have we seen the current name already ? + # we need to rebuild this list every time + names = [name for name, dtype in ndtype] + try: + nameidx = names.index(fname) + except ValueError: + #... we haven't: just add the description to the current list + ndtype.append((fname, fdtype)) + else: + # collision + _, cdtype = ndtype[nameidx] + if fname in key: + # The current field is part of the key: take the largest dtype + ndtype[nameidx] = (fname, max(fdtype, cdtype)) + else: + # The current field is not part of the key: add the suffixes, + # and place the new field adjacent to the old one + ndtype[nameidx:nameidx + 1] = [ + (fname + r1postfix, cdtype), + (fname + r2postfix, fdtype) + ] + # Rebuild a dtype from the new fields + ndtype = np.dtype(ndtype) + # Find the largest nb of common fields : + # r1cmn and r2cmn should be equal, but... + cmn = max(r1cmn, r2cmn) + # Construct an empty array + output = ma.masked_all((cmn + r1spc + r2spc,), dtype=ndtype) + names = output.dtype.names + for f in r1names: + selected = s1[f] + if f not in names or (f in r2names and not r2postfix and f not in key): + f += r1postfix + current = output[f] + current[:r1cmn] = selected[:r1cmn] + if jointype in ('outer', 'leftouter'): + current[cmn:cmn + r1spc] = selected[r1cmn:] + for f in r2names: + selected = s2[f] + if f not in names or (f in r1names and not r1postfix and f not in key): + f += r2postfix + current = output[f] + current[:r2cmn] = selected[:r2cmn] + if (jointype == 'outer') and r2spc: + current[-r2spc:] = selected[r2cmn:] + # Sort and finalize the output + output.sort(order=key) + kwargs = {'usemask': usemask, 'asrecarray': asrecarray} + return _fix_output(_fix_defaults(output, defaults), **kwargs) + + +def _rec_join_dispatcher( + key, r1, r2, jointype=None, r1postfix=None, r2postfix=None, + defaults=None): + return (r1, r2) + + +@array_function_dispatch(_rec_join_dispatcher) +def rec_join(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2', + defaults=None): + """ + Join arrays `r1` and `r2` on keys. + Alternative to join_by, that always returns a np.recarray. + + See Also + -------- + join_by : equivalent function + """ + kwargs = {'jointype': jointype, 'r1postfix': r1postfix, 'r2postfix': r2postfix, + 'defaults': defaults, 'usemask': False, 'asrecarray': True} + return join_by(key, r1, r2, **kwargs) + + +del array_function_dispatch diff --git a/local_packages/numpy/lib/recfunctions.pyi b/local_packages/numpy/lib/recfunctions.pyi new file mode 100644 index 0000000..33713cf --- /dev/null +++ b/local_packages/numpy/lib/recfunctions.pyi @@ -0,0 +1,444 @@ +from _typeshed import Incomplete +from collections.abc import Callable, Iterable, Mapping, Sequence +from typing import Any, Literal, TypeAlias, overload +from typing_extensions import TypeVar + +import numpy as np +import numpy.typing as npt +from numpy._typing import _AnyShape, _DTypeLike, _DTypeLikeVoid +from numpy.ma.mrecords import MaskedRecords + +__all__ = [ + "append_fields", + "apply_along_fields", + "assign_fields_by_name", + "drop_fields", + "find_duplicates", + "flatten_descr", + "get_fieldstructure", + "get_names", + "get_names_flat", + "join_by", + "merge_arrays", + "rec_append_fields", + "rec_drop_fields", + "rec_join", + "recursive_fill_fields", + "rename_fields", + "repack_fields", + "require_fields", + "stack_arrays", + "structured_to_unstructured", + "unstructured_to_structured", +] + +_T = TypeVar("_T") +_ShapeT = TypeVar("_ShapeT", bound=tuple[int, ...]) +_ScalarT = TypeVar("_ScalarT", bound=np.generic) +_DTypeT = TypeVar("_DTypeT", bound=np.dtype) +_ArrayT = TypeVar("_ArrayT", bound=npt.NDArray[Any]) +_VoidArrayT = TypeVar("_VoidArrayT", bound=npt.NDArray[np.void]) +_NonVoidDTypeT = TypeVar("_NonVoidDTypeT", bound=_NonVoidDType) + +_OneOrMany: TypeAlias = _T | Iterable[_T] +_BuiltinSequence: TypeAlias = tuple[_T, ...] | list[_T] + +_NestedNames: TypeAlias = tuple[str | _NestedNames, ...] +_NonVoid: TypeAlias = np.bool | np.number | np.character | np.datetime64 | np.timedelta64 | np.object_ +_NonVoidDType: TypeAlias = np.dtype[_NonVoid] | np.dtypes.StringDType + +_JoinType: TypeAlias = Literal["inner", "outer", "leftouter"] + +### + +def recursive_fill_fields(input: npt.NDArray[np.void], output: _VoidArrayT) -> _VoidArrayT: ... + +# +def get_names(adtype: np.dtype[np.void]) -> _NestedNames: ... +def get_names_flat(adtype: np.dtype[np.void]) -> tuple[str, ...]: ... + +# +@overload +def flatten_descr(ndtype: _NonVoidDTypeT) -> tuple[tuple[Literal[""], _NonVoidDTypeT]]: ... +@overload +def flatten_descr(ndtype: np.dtype[np.void]) -> tuple[tuple[str, np.dtype]]: ... + +# +def get_fieldstructure( + adtype: np.dtype[np.void], + lastname: str | None = None, + parents: dict[str, list[str]] | None = None, +) -> dict[str, list[str]]: ... + +# +@overload +def merge_arrays( + seqarrays: Sequence[np.ndarray[_ShapeT, np.dtype]] | np.ndarray[_ShapeT, np.dtype], + fill_value: float = -1, + flatten: bool = False, + usemask: bool = False, + asrecarray: bool = False, +) -> np.recarray[_ShapeT, np.dtype[np.void]]: ... +@overload +def merge_arrays( + seqarrays: Sequence[npt.ArrayLike] | np.void, + fill_value: float = -1, + flatten: bool = False, + usemask: bool = False, + asrecarray: bool = False, +) -> np.recarray[_AnyShape, np.dtype[np.void]]: ... + +# +@overload +def drop_fields( + base: np.ndarray[_ShapeT, np.dtype[np.void]], + drop_names: str | Iterable[str], + usemask: bool = True, + asrecarray: Literal[False] = False, +) -> np.ndarray[_ShapeT, np.dtype[np.void]]: ... +@overload +def drop_fields( + base: np.ndarray[_ShapeT, np.dtype[np.void]], + drop_names: str | Iterable[str], + usemask: bool, + asrecarray: Literal[True], +) -> np.recarray[_ShapeT, np.dtype[np.void]]: ... +@overload +def drop_fields( + base: np.ndarray[_ShapeT, np.dtype[np.void]], + drop_names: str | Iterable[str], + usemask: bool = True, + *, + asrecarray: Literal[True], +) -> np.recarray[_ShapeT, np.dtype[np.void]]: ... + +# +@overload +def rename_fields( + base: MaskedRecords[_ShapeT, np.dtype[np.void]], + namemapper: Mapping[str, str], +) -> MaskedRecords[_ShapeT, np.dtype[np.void]]: ... +@overload +def rename_fields( + base: np.ma.MaskedArray[_ShapeT, np.dtype[np.void]], + namemapper: Mapping[str, str], +) -> np.ma.MaskedArray[_ShapeT, np.dtype[np.void]]: ... +@overload +def rename_fields( + base: np.recarray[_ShapeT, np.dtype[np.void]], + namemapper: Mapping[str, str], +) -> np.recarray[_ShapeT, np.dtype[np.void]]: ... +@overload +def rename_fields( + base: np.ndarray[_ShapeT, np.dtype[np.void]], + namemapper: Mapping[str, str], +) -> np.ndarray[_ShapeT, np.dtype[np.void]]: ... + +# +@overload +def append_fields( + base: np.ndarray[_ShapeT, np.dtype[np.void]], + names: _OneOrMany[str], + data: _OneOrMany[npt.NDArray[Any]], + dtypes: _BuiltinSequence[np.dtype] | None, + fill_value: int, + usemask: Literal[False], + asrecarray: Literal[False] = False, +) -> np.ndarray[_ShapeT, np.dtype[np.void]]: ... +@overload +def append_fields( + base: np.ndarray[_ShapeT, np.dtype[np.void]], + names: _OneOrMany[str], + data: _OneOrMany[npt.NDArray[Any]], + dtypes: _BuiltinSequence[np.dtype] | None = None, + fill_value: int = -1, + *, + usemask: Literal[False], + asrecarray: Literal[False] = False, +) -> np.ndarray[_ShapeT, np.dtype[np.void]]: ... +@overload +def append_fields( + base: np.ndarray[_ShapeT, np.dtype[np.void]], + names: _OneOrMany[str], + data: _OneOrMany[npt.NDArray[Any]], + dtypes: _BuiltinSequence[np.dtype] | None, + fill_value: int, + usemask: Literal[False], + asrecarray: Literal[True], +) -> np.recarray[_ShapeT, np.dtype[np.void]]: ... +@overload +def append_fields( + base: np.ndarray[_ShapeT, np.dtype[np.void]], + names: _OneOrMany[str], + data: _OneOrMany[npt.NDArray[Any]], + dtypes: _BuiltinSequence[np.dtype] | None = None, + fill_value: int = -1, + *, + usemask: Literal[False], + asrecarray: Literal[True], +) -> np.recarray[_ShapeT, np.dtype[np.void]]: ... +@overload +def append_fields( + base: np.ndarray[_ShapeT, np.dtype[np.void]], + names: _OneOrMany[str], + data: _OneOrMany[npt.NDArray[Any]], + dtypes: _BuiltinSequence[np.dtype] | None = None, + fill_value: int = -1, + usemask: Literal[True] = True, + asrecarray: Literal[False] = False, +) -> np.ma.MaskedArray[_ShapeT, np.dtype[np.void]]: ... +@overload +def append_fields( + base: np.ndarray[_ShapeT, np.dtype[np.void]], + names: _OneOrMany[str], + data: _OneOrMany[npt.NDArray[Any]], + dtypes: _BuiltinSequence[np.dtype] | None, + fill_value: int, + usemask: Literal[True], + asrecarray: Literal[True], +) -> MaskedRecords[_ShapeT, np.dtype[np.void]]: ... +@overload +def append_fields( + base: np.ndarray[_ShapeT, np.dtype[np.void]], + names: _OneOrMany[str], + data: _OneOrMany[npt.NDArray[Any]], + dtypes: _BuiltinSequence[np.dtype] | None = None, + fill_value: int = -1, + usemask: Literal[True] = True, + *, + asrecarray: Literal[True], +) -> MaskedRecords[_ShapeT, np.dtype[np.void]]: ... + +# +def rec_drop_fields( + base: np.ndarray[_ShapeT, np.dtype[np.void]], + drop_names: str | Iterable[str], +) -> np.recarray[_ShapeT, np.dtype[np.void]]: ... + +# +def rec_append_fields( + base: np.ndarray[_ShapeT, np.dtype[np.void]], + names: _OneOrMany[str], + data: _OneOrMany[npt.NDArray[Any]], + dtypes: _BuiltinSequence[np.dtype] | None = None, +) -> np.ma.MaskedArray[_ShapeT, np.dtype[np.void]]: ... + +# TODO(jorenham): Stop passing `void` directly once structured dtypes are implemented, +# e.g. using a `TypeVar` with constraints. +# https://github.com/numpy/numtype/issues/92 +@overload +def repack_fields(a: _DTypeT, align: bool = False, recurse: bool = False) -> _DTypeT: ... +@overload +def repack_fields(a: _ScalarT, align: bool = False, recurse: bool = False) -> _ScalarT: ... +@overload +def repack_fields(a: _ArrayT, align: bool = False, recurse: bool = False) -> _ArrayT: ... + +# TODO(jorenham): Attempt shape-typing (return type has ndim == arr.ndim + 1) +@overload +def structured_to_unstructured( + arr: npt.NDArray[np.void], + dtype: _DTypeLike[_ScalarT], + copy: bool = False, + casting: np._CastingKind = "unsafe", +) -> npt.NDArray[_ScalarT]: ... +@overload +def structured_to_unstructured( + arr: npt.NDArray[np.void], + dtype: npt.DTypeLike | None = None, + copy: bool = False, + casting: np._CastingKind = "unsafe", +) -> npt.NDArray[Any]: ... + +# +@overload +def unstructured_to_structured( + arr: npt.NDArray[Any], + dtype: npt.DTypeLike, + names: None = None, + align: bool = False, + copy: bool = False, + casting: str = "unsafe", +) -> npt.NDArray[np.void]: ... +@overload +def unstructured_to_structured( + arr: npt.NDArray[Any], + dtype: None, + names: _OneOrMany[str], + align: bool = False, + copy: bool = False, + casting: str = "unsafe", +) -> npt.NDArray[np.void]: ... +@overload +def unstructured_to_structured( + arr: npt.NDArray[Any], + dtype: None = None, + *, + names: _OneOrMany[str], + align: bool = False, + copy: bool = False, + casting: str = "unsafe", +) -> npt.NDArray[np.void]: ... + +# +def apply_along_fields( + func: Callable[[np.ndarray[_ShapeT, Any]], npt.NDArray[Any]], + arr: np.ndarray[_ShapeT, np.dtype[np.void]], +) -> np.ndarray[_ShapeT, np.dtype[np.void]]: ... + +# +def assign_fields_by_name(dst: npt.NDArray[np.void], src: npt.NDArray[np.void], zero_unassigned: bool = True) -> None: ... + +# +def require_fields( + array: np.ndarray[_ShapeT, np.dtype[np.void]], + required_dtype: _DTypeLikeVoid, +) -> np.ndarray[_ShapeT, np.dtype[np.void]]: ... + +# TODO(jorenham): Attempt shape-typing +@overload +def stack_arrays( + arrays: _ArrayT, + defaults: Mapping[str, object] | None = None, + usemask: bool = True, + asrecarray: bool = False, + autoconvert: bool = False, +) -> _ArrayT: ... +@overload +def stack_arrays( + arrays: Sequence[npt.NDArray[Any]], + defaults: Mapping[str, Incomplete] | None, + usemask: Literal[False], + asrecarray: Literal[False] = False, + autoconvert: bool = False, +) -> npt.NDArray[np.void]: ... +@overload +def stack_arrays( + arrays: Sequence[npt.NDArray[Any]], + defaults: Mapping[str, Incomplete] | None = None, + *, + usemask: Literal[False], + asrecarray: Literal[False] = False, + autoconvert: bool = False, +) -> npt.NDArray[np.void]: ... +@overload +def stack_arrays( + arrays: Sequence[npt.NDArray[Any]], + defaults: Mapping[str, Incomplete] | None = None, + *, + usemask: Literal[False], + asrecarray: Literal[True], + autoconvert: bool = False, +) -> np.recarray[_AnyShape, np.dtype[np.void]]: ... +@overload +def stack_arrays( + arrays: Sequence[npt.NDArray[Any]], + defaults: Mapping[str, Incomplete] | None = None, + usemask: Literal[True] = True, + asrecarray: Literal[False] = False, + autoconvert: bool = False, +) -> np.ma.MaskedArray[_AnyShape, np.dtype[np.void]]: ... +@overload +def stack_arrays( + arrays: Sequence[npt.NDArray[Any]], + defaults: Mapping[str, Incomplete] | None, + usemask: Literal[True], + asrecarray: Literal[True], + autoconvert: bool = False, +) -> MaskedRecords[_AnyShape, np.dtype[np.void]]: ... +@overload +def stack_arrays( + arrays: Sequence[npt.NDArray[Any]], + defaults: Mapping[str, Incomplete] | None = None, + usemask: Literal[True] = True, + *, + asrecarray: Literal[True], + autoconvert: bool = False, +) -> MaskedRecords[_AnyShape, np.dtype[np.void]]: ... + +# +@overload +def find_duplicates( + a: np.ma.MaskedArray[_ShapeT, np.dtype[np.void]], + key: str | None = None, + ignoremask: bool = True, + return_index: Literal[False] = False, +) -> np.ma.MaskedArray[_ShapeT, np.dtype[np.void]]: ... +@overload +def find_duplicates( + a: np.ma.MaskedArray[_ShapeT, np.dtype[np.void]], + key: str | None, + ignoremask: bool, + return_index: Literal[True], +) -> tuple[np.ma.MaskedArray[_ShapeT, np.dtype[np.void]], np.ndarray[_ShapeT, np.dtype[np.int_]]]: ... +@overload +def find_duplicates( + a: np.ma.MaskedArray[_ShapeT, np.dtype[np.void]], + key: str | None = None, + ignoremask: bool = True, + *, + return_index: Literal[True], +) -> tuple[np.ma.MaskedArray[_ShapeT, np.dtype[np.void]], np.ndarray[_ShapeT, np.dtype[np.int_]]]: ... + +# +@overload +def join_by( + key: str | Sequence[str], + r1: npt.NDArray[np.void], + r2: npt.NDArray[np.void], + jointype: _JoinType = "inner", + r1postfix: str = "1", + r2postfix: str = "2", + defaults: Mapping[str, object] | None = None, + *, + usemask: Literal[False], + asrecarray: Literal[False] = False, +) -> np.ndarray[tuple[int], np.dtype[np.void]]: ... +@overload +def join_by( + key: str | Sequence[str], + r1: npt.NDArray[np.void], + r2: npt.NDArray[np.void], + jointype: _JoinType = "inner", + r1postfix: str = "1", + r2postfix: str = "2", + defaults: Mapping[str, object] | None = None, + *, + usemask: Literal[False], + asrecarray: Literal[True], +) -> np.recarray[tuple[int], np.dtype[np.void]]: ... +@overload +def join_by( + key: str | Sequence[str], + r1: npt.NDArray[np.void], + r2: npt.NDArray[np.void], + jointype: _JoinType = "inner", + r1postfix: str = "1", + r2postfix: str = "2", + defaults: Mapping[str, object] | None = None, + usemask: Literal[True] = True, + asrecarray: Literal[False] = False, +) -> np.ma.MaskedArray[tuple[int], np.dtype[np.void]]: ... +@overload +def join_by( + key: str | Sequence[str], + r1: npt.NDArray[np.void], + r2: npt.NDArray[np.void], + jointype: _JoinType = "inner", + r1postfix: str = "1", + r2postfix: str = "2", + defaults: Mapping[str, object] | None = None, + usemask: Literal[True] = True, + *, + asrecarray: Literal[True], +) -> MaskedRecords[tuple[int], np.dtype[np.void]]: ... + +# +def rec_join( + key: str | Sequence[str], + r1: npt.NDArray[np.void], + r2: npt.NDArray[np.void], + jointype: _JoinType = "inner", + r1postfix: str = "1", + r2postfix: str = "2", + defaults: Mapping[str, object] | None = None, +) -> np.recarray[tuple[int], np.dtype[np.void]]: ... diff --git a/local_packages/numpy/lib/scimath.py b/local_packages/numpy/lib/scimath.py new file mode 100644 index 0000000..fb6824d --- /dev/null +++ b/local_packages/numpy/lib/scimath.py @@ -0,0 +1,13 @@ +from ._scimath_impl import ( # noqa: F401 + __all__, + __doc__, + arccos, + arcsin, + arctanh, + log, + log2, + log10, + logn, + power, + sqrt, +) diff --git a/local_packages/numpy/lib/scimath.pyi b/local_packages/numpy/lib/scimath.pyi new file mode 100644 index 0000000..ef2772a --- /dev/null +++ b/local_packages/numpy/lib/scimath.pyi @@ -0,0 +1,12 @@ +from ._scimath_impl import ( + __all__ as __all__, + arccos as arccos, + arcsin as arcsin, + arctanh as arctanh, + log as log, + log2 as log2, + log10 as log10, + logn as logn, + power as power, + sqrt as sqrt, +) diff --git a/local_packages/numpy/lib/stride_tricks.py b/local_packages/numpy/lib/stride_tricks.py new file mode 100644 index 0000000..721a548 --- /dev/null +++ b/local_packages/numpy/lib/stride_tricks.py @@ -0,0 +1 @@ +from ._stride_tricks_impl import __doc__, as_strided, sliding_window_view # noqa: F401 diff --git a/local_packages/numpy/lib/stride_tricks.pyi b/local_packages/numpy/lib/stride_tricks.pyi new file mode 100644 index 0000000..eb46f28 --- /dev/null +++ b/local_packages/numpy/lib/stride_tricks.pyi @@ -0,0 +1,4 @@ +from numpy.lib._stride_tricks_impl import ( + as_strided as as_strided, + sliding_window_view as sliding_window_view, +) diff --git a/local_packages/numpy/lib/tests/__init__.py b/local_packages/numpy/lib/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/local_packages/numpy/lib/tests/data/py2-objarr.npz b/local_packages/numpy/lib/tests/data/py2-objarr.npz new file mode 100644 index 0000000..68a3b53 Binary files /dev/null and b/local_packages/numpy/lib/tests/data/py2-objarr.npz differ diff --git a/local_packages/numpy/lib/tests/data/py3-objarr.npz b/local_packages/numpy/lib/tests/data/py3-objarr.npz new file mode 100644 index 0000000..fd7d9d3 Binary files /dev/null and b/local_packages/numpy/lib/tests/data/py3-objarr.npz differ diff --git a/local_packages/numpy/lib/tests/test__datasource.py b/local_packages/numpy/lib/tests/test__datasource.py new file mode 100644 index 0000000..2dd1941 --- /dev/null +++ b/local_packages/numpy/lib/tests/test__datasource.py @@ -0,0 +1,328 @@ +import os +import urllib.request as urllib_request +from shutil import rmtree +from tempfile import NamedTemporaryFile, mkdtemp, mkstemp +from urllib.error import URLError +from urllib.parse import urlparse + +import pytest + +import numpy.lib._datasource as datasource +from numpy.testing import assert_, assert_equal, assert_raises + + +def urlopen_stub(url, data=None): + '''Stub to replace urlopen for testing.''' + if url == valid_httpurl(): + tmpfile = NamedTemporaryFile(prefix='urltmp_') + return tmpfile + else: + raise URLError('Name or service not known') + + +# setup and teardown +old_urlopen = None + + +def setup_module(): + global old_urlopen + + old_urlopen = urllib_request.urlopen + urllib_request.urlopen = urlopen_stub + + +def teardown_module(): + urllib_request.urlopen = old_urlopen + + +# A valid website for more robust testing +http_path = 'http://www.google.com/' +http_file = 'index.html' + +http_fakepath = 'http://fake.abc.web/site/' +http_fakefile = 'fake.txt' + +malicious_files = ['/etc/shadow', '../../shadow', + '..\\system.dat', 'c:\\windows\\system.dat'] + +magic_line = b'three is the magic number' + + +# Utility functions used by many tests +def valid_textfile(filedir): + # Generate and return a valid temporary file. + fd, path = mkstemp(suffix='.txt', prefix='dstmp_', dir=filedir, text=True) + os.close(fd) + return path + + +def invalid_textfile(filedir): + # Generate and return an invalid filename. + fd, path = mkstemp(suffix='.txt', prefix='dstmp_', dir=filedir) + os.close(fd) + os.remove(path) + return path + + +def valid_httpurl(): + return http_path + http_file + + +def invalid_httpurl(): + return http_fakepath + http_fakefile + + +def valid_baseurl(): + return http_path + + +def invalid_baseurl(): + return http_fakepath + + +def valid_httpfile(): + return http_file + + +def invalid_httpfile(): + return http_fakefile + + +class TestDataSourceOpen: + def test_ValidHTTP(self, tmp_path): + ds = datasource.DataSource(tmp_path) + fh = ds.open(valid_httpurl()) + assert_(fh) + fh.close() + + def test_InvalidHTTP(self, tmp_path): + ds = datasource.DataSource(tmp_path) + url = invalid_httpurl() + assert_raises(OSError, ds.open, url) + try: + ds.open(url) + except OSError as e: + # Regression test for bug fixed in r4342. + assert_(e.errno is None) + + def test_InvalidHTTPCacheURLError(self, tmp_path): + ds = datasource.DataSource(tmp_path) + assert_raises(URLError, ds._cache, invalid_httpurl()) + + def test_ValidFile(self, tmp_path): + ds = datasource.DataSource(tmp_path) + local_file = valid_textfile(tmp_path) + fh = ds.open(local_file) + assert_(fh) + fh.close() + + def test_InvalidFile(self, tmp_path): + ds = datasource.DataSource(tmp_path) + invalid_file = invalid_textfile(tmp_path) + assert_raises(OSError, ds.open, invalid_file) + + def test_ValidGzipFile(self, tmp_path): + try: + import gzip + except ImportError: + # We don't have the gzip capabilities to test. + pytest.skip() + # Test datasource's internal file_opener for Gzip files. + ds = datasource.DataSource(tmp_path) + filepath = os.path.join(tmp_path, 'foobar.txt.gz') + fp = gzip.open(filepath, 'w') + fp.write(magic_line) + fp.close() + fp = ds.open(filepath) + result = fp.readline() + fp.close() + assert_equal(magic_line, result) + + def test_ValidBz2File(self, tmp_path): + try: + import bz2 + except ImportError: + # We don't have the bz2 capabilities to test. + pytest.skip() + # Test datasource's internal file_opener for BZip2 files. + ds = datasource.DataSource(tmp_path) + filepath = os.path.join(tmp_path, 'foobar.txt.bz2') + fp = bz2.BZ2File(filepath, 'w') + fp.write(magic_line) + fp.close() + fp = ds.open(filepath) + result = fp.readline() + fp.close() + assert_equal(magic_line, result) + + +class TestDataSourceExists: + def test_ValidHTTP(self, tmp_path): + ds = datasource.DataSource(tmp_path) + assert_(ds.exists(valid_httpurl())) + + def test_InvalidHTTP(self, tmp_path): + ds = datasource.DataSource(tmp_path) + assert_equal(ds.exists(invalid_httpurl()), False) + + def test_ValidFile(self, tmp_path): + # Test valid file in destpath + ds = datasource.DataSource(tmp_path) + tmpfile = valid_textfile(tmp_path) + assert_(ds.exists(tmpfile)) + # Test valid local file not in destpath + localdir = mkdtemp() + tmpfile = valid_textfile(localdir) + assert_(ds.exists(tmpfile)) + rmtree(localdir) + + def test_InvalidFile(self, tmp_path): + ds = datasource.DataSource(tmp_path) + tmpfile = invalid_textfile(tmp_path) + assert_equal(ds.exists(tmpfile), False) + + +class TestDataSourceAbspath: + def test_ValidHTTP(self, tmp_path): + ds = datasource.DataSource(tmp_path) + _, netloc, upath, _, _, _ = urlparse(valid_httpurl()) + local_path = os.path.join(tmp_path, netloc, + upath.strip(os.sep).strip('/')) + assert_equal(local_path, ds.abspath(valid_httpurl())) + + def test_ValidFile(self, tmp_path): + ds = datasource.DataSource(tmp_path) + tmpfile = valid_textfile(tmp_path) + tmpfilename = os.path.split(tmpfile)[-1] + # Test with filename only + assert_equal(tmpfile, ds.abspath(tmpfilename)) + # Test filename with complete path + assert_equal(tmpfile, ds.abspath(tmpfile)) + + def test_InvalidHTTP(self, tmp_path): + ds = datasource.DataSource(tmp_path) + _, netloc, upath, _, _, _ = urlparse(invalid_httpurl()) + invalidhttp = os.path.join(tmp_path, netloc, + upath.strip(os.sep).strip('/')) + assert_(invalidhttp != ds.abspath(valid_httpurl())) + + def test_InvalidFile(self, tmp_path): + ds = datasource.DataSource(tmp_path) + invalidfile = valid_textfile(tmp_path) + tmpfile = valid_textfile(tmp_path) + tmpfilename = os.path.split(tmpfile)[-1] + # Test with filename only + assert_(invalidfile != ds.abspath(tmpfilename)) + # Test filename with complete path + assert_(invalidfile != ds.abspath(tmpfile)) + + def test_sandboxing(self, tmp_path): + ds = datasource.DataSource(tmp_path) + tmpfile = valid_textfile(tmp_path) + tmpfilename = os.path.split(tmpfile)[-1] + + path = lambda x: os.path.abspath(ds.abspath(x)) + + assert_(path(valid_httpurl()).startswith(str(tmp_path))) + assert_(path(invalid_httpurl()).startswith(str(tmp_path))) + assert_(path(tmpfile).startswith(str(tmp_path))) + assert_(path(tmpfilename).startswith(str(tmp_path))) + for fn in malicious_files: + assert_(path(http_path + fn).startswith(str(tmp_path))) + assert_(path(fn).startswith(str(tmp_path))) + + def test_windows_os_sep(self, tmp_path): + orig_os_sep = os.sep + try: + os.sep = '\\' + self.test_ValidHTTP(tmp_path) + self.test_ValidFile(tmp_path) + self.test_InvalidHTTP(tmp_path) + self.test_InvalidFile(tmp_path) + self.test_sandboxing(tmp_path) + finally: + os.sep = orig_os_sep + + +class TestRepositoryAbspath: + def test_ValidHTTP(self, tmp_path): + repos = datasource.Repository(valid_baseurl(), tmp_path) + _, netloc, upath, _, _, _ = urlparse(valid_httpurl()) + local_path = os.path.join(repos._destpath, netloc, + upath.strip(os.sep).strip('/')) + filepath = repos.abspath(valid_httpfile()) + assert_equal(local_path, filepath) + + def test_sandboxing(self, tmp_path): + repos = datasource.Repository(valid_baseurl(), tmp_path) + path = lambda x: os.path.abspath(repos.abspath(x)) + assert_(path(valid_httpfile()).startswith(str(tmp_path))) + for fn in malicious_files: + assert_(path(http_path + fn).startswith(str(tmp_path))) + assert_(path(fn).startswith(str(tmp_path))) + + def test_windows_os_sep(self, tmp_path): + orig_os_sep = os.sep + try: + os.sep = '\\' + self.test_ValidHTTP(tmp_path) + self.test_sandboxing(tmp_path) + finally: + os.sep = orig_os_sep + + +class TestRepositoryExists: + def test_ValidFile(self, tmp_path): + # Create local temp file + repos = datasource.Repository(valid_baseurl(), tmp_path) + tmpfile = valid_textfile(tmp_path) + assert_(repos.exists(tmpfile)) + + def test_InvalidFile(self, tmp_path): + repos = datasource.Repository(valid_baseurl(), tmp_path) + tmpfile = invalid_textfile(tmp_path) + assert_equal(repos.exists(tmpfile), False) + + def test_RemoveHTTPFile(self, tmp_path): + repos = datasource.Repository(valid_baseurl(), tmp_path) + assert_(repos.exists(valid_httpurl())) + + def test_CachedHTTPFile(self, tmp_path): + localfile = valid_httpurl() + # Create a locally cached temp file with an URL based + # directory structure. This is similar to what Repository.open + # would do. + repos = datasource.Repository(valid_baseurl(), tmp_path) + _, netloc, _, _, _, _ = urlparse(localfile) + local_path = os.path.join(repos._destpath, netloc) + os.mkdir(local_path, 0o0700) + tmpfile = valid_textfile(local_path) + assert_(repos.exists(tmpfile)) + + +class TestOpenFunc: + def test_DataSourceOpen(self, tmp_path): + local_file = valid_textfile(tmp_path) + # Test case where destpath is passed in + fp = datasource.open(local_file, destpath=tmp_path) + assert_(fp) + fp.close() + # Test case where default destpath is used + fp = datasource.open(local_file) + assert_(fp) + fp.close() + +def test_del_attr_handling(): + # DataSource __del__ can be called + # even if __init__ fails when the + # Exception object is caught by the + # caller as happens in refguide_check + # is_deprecated() function + + ds = datasource.DataSource() + # simulate failed __init__ by removing key attribute + # produced within __init__ and expected by __del__ + del ds._istmpdest + # should not raise an AttributeError if __del__ + # gracefully handles failed __init__: + ds.__del__() diff --git a/local_packages/numpy/lib/tests/test__iotools.py b/local_packages/numpy/lib/tests/test__iotools.py new file mode 100644 index 0000000..2555c4b --- /dev/null +++ b/local_packages/numpy/lib/tests/test__iotools.py @@ -0,0 +1,358 @@ +import time +from datetime import date + +import pytest + +import numpy as np +from numpy.lib._iotools import ( + LineSplitter, + NameValidator, + StringConverter, + easy_dtype, + flatten_dtype, + has_nested_fields, +) +from numpy.testing import assert_, assert_allclose, assert_equal, assert_raises + + +class TestLineSplitter: + "Tests the LineSplitter class." + + def test_no_delimiter(self): + "Test LineSplitter w/o delimiter" + strg = " 1 2 3 4 5 # test" + test = LineSplitter()(strg) + assert_equal(test, ['1', '2', '3', '4', '5']) + test = LineSplitter('')(strg) + assert_equal(test, ['1', '2', '3', '4', '5']) + + def test_space_delimiter(self): + "Test space delimiter" + strg = " 1 2 3 4 5 # test" + test = LineSplitter(' ')(strg) + assert_equal(test, ['1', '2', '3', '4', '', '5']) + test = LineSplitter(' ')(strg) + assert_equal(test, ['1 2 3 4', '5']) + + def test_tab_delimiter(self): + "Test tab delimiter" + strg = " 1\t 2\t 3\t 4\t 5 6" + test = LineSplitter('\t')(strg) + assert_equal(test, ['1', '2', '3', '4', '5 6']) + strg = " 1 2\t 3 4\t 5 6" + test = LineSplitter('\t')(strg) + assert_equal(test, ['1 2', '3 4', '5 6']) + + def test_other_delimiter(self): + "Test LineSplitter on delimiter" + strg = "1,2,3,4,,5" + test = LineSplitter(',')(strg) + assert_equal(test, ['1', '2', '3', '4', '', '5']) + # + strg = " 1,2,3,4,,5 # test" + test = LineSplitter(',')(strg) + assert_equal(test, ['1', '2', '3', '4', '', '5']) + + # gh-11028 bytes comment/delimiters should get encoded + strg = b" 1,2,3,4,,5 % test" + test = LineSplitter(delimiter=b',', comments=b'%')(strg) + assert_equal(test, ['1', '2', '3', '4', '', '5']) + + def test_constant_fixed_width(self): + "Test LineSplitter w/ fixed-width fields" + strg = " 1 2 3 4 5 # test" + test = LineSplitter(3)(strg) + assert_equal(test, ['1', '2', '3', '4', '', '5', '']) + # + strg = " 1 3 4 5 6# test" + test = LineSplitter(20)(strg) + assert_equal(test, ['1 3 4 5 6']) + # + strg = " 1 3 4 5 6# test" + test = LineSplitter(30)(strg) + assert_equal(test, ['1 3 4 5 6']) + + def test_variable_fixed_width(self): + strg = " 1 3 4 5 6# test" + test = LineSplitter((3, 6, 6, 3))(strg) + assert_equal(test, ['1', '3', '4 5', '6']) + # + strg = " 1 3 4 5 6# test" + test = LineSplitter((6, 6, 9))(strg) + assert_equal(test, ['1', '3 4', '5 6']) + +# ----------------------------------------------------------------------------- + + +class TestNameValidator: + + def test_case_sensitivity(self): + "Test case sensitivity" + names = ['A', 'a', 'b', 'c'] + test = NameValidator().validate(names) + assert_equal(test, ['A', 'a', 'b', 'c']) + test = NameValidator(case_sensitive=False).validate(names) + assert_equal(test, ['A', 'A_1', 'B', 'C']) + test = NameValidator(case_sensitive='upper').validate(names) + assert_equal(test, ['A', 'A_1', 'B', 'C']) + test = NameValidator(case_sensitive='lower').validate(names) + assert_equal(test, ['a', 'a_1', 'b', 'c']) + + # check exceptions + assert_raises(ValueError, NameValidator, case_sensitive='foobar') + + def test_excludelist(self): + "Test excludelist" + names = ['dates', 'data', 'Other Data', 'mask'] + validator = NameValidator(excludelist=['dates', 'data', 'mask']) + test = validator.validate(names) + assert_equal(test, ['dates_', 'data_', 'Other_Data', 'mask_']) + + def test_missing_names(self): + "Test validate missing names" + namelist = ('a', 'b', 'c') + validator = NameValidator() + assert_equal(validator(namelist), ['a', 'b', 'c']) + namelist = ('', 'b', 'c') + assert_equal(validator(namelist), ['f0', 'b', 'c']) + namelist = ('a', 'b', '') + assert_equal(validator(namelist), ['a', 'b', 'f0']) + namelist = ('', 'f0', '') + assert_equal(validator(namelist), ['f1', 'f0', 'f2']) + + def test_validate_nb_names(self): + "Test validate nb names" + namelist = ('a', 'b', 'c') + validator = NameValidator() + assert_equal(validator(namelist, nbfields=1), ('a',)) + assert_equal(validator(namelist, nbfields=5, defaultfmt="g%i"), + ['a', 'b', 'c', 'g0', 'g1']) + + def test_validate_wo_names(self): + "Test validate no names" + namelist = None + validator = NameValidator() + assert_(validator(namelist) is None) + assert_equal(validator(namelist, nbfields=3), ['f0', 'f1', 'f2']) + +# ----------------------------------------------------------------------------- + + +def _bytes_to_date(s): + return date(*time.strptime(s, "%Y-%m-%d")[:3]) + + +class TestStringConverter: + "Test StringConverter" + + def test_creation(self): + "Test creation of a StringConverter" + converter = StringConverter(int, -99999) + assert_equal(converter._status, 1) + assert_equal(converter.default, -99999) + + def test_upgrade(self): + "Tests the upgrade method." + + converter = StringConverter() + assert_equal(converter._status, 0) + + # test int + assert_equal(converter.upgrade('0'), 0) + assert_equal(converter._status, 1) + + # On systems where long defaults to 32-bit, the statuses will be + # offset by one, so we check for this here. + import numpy._core.numeric as nx + status_offset = int(nx.dtype(nx.int_).itemsize < nx.dtype(nx.int64).itemsize) + + # test int > 2**32 + assert_equal(converter.upgrade('17179869184'), 17179869184) + assert_equal(converter._status, 1 + status_offset) + + # test float + assert_allclose(converter.upgrade('0.'), 0.0) + assert_equal(converter._status, 2 + status_offset) + + # test complex + assert_equal(converter.upgrade('0j'), complex('0j')) + assert_equal(converter._status, 3 + status_offset) + + # test str + # note that the longdouble type has been skipped, so the + # _status increases by 2. Everything should succeed with + # unicode conversion (8). + for s in ['a', b'a']: + res = converter.upgrade(s) + assert_(type(res) is str) + assert_equal(res, 'a') + assert_equal(converter._status, 8 + status_offset) + + def test_missing(self): + "Tests the use of missing values." + converter = StringConverter(missing_values=('missing', + 'missed')) + converter.upgrade('0') + assert_equal(converter('0'), 0) + assert_equal(converter(''), converter.default) + assert_equal(converter('missing'), converter.default) + assert_equal(converter('missed'), converter.default) + try: + converter('miss') + except ValueError: + pass + + @pytest.mark.thread_unsafe(reason="monkeypatches StringConverter") + def test_upgrademapper(self): + "Tests updatemapper" + dateparser = _bytes_to_date + _original_mapper = StringConverter._mapper[:] + try: + StringConverter.upgrade_mapper(dateparser, date(2000, 1, 1)) + convert = StringConverter(dateparser, date(2000, 1, 1)) + test = convert('2001-01-01') + assert_equal(test, date(2001, 1, 1)) + test = convert('2009-01-01') + assert_equal(test, date(2009, 1, 1)) + test = convert('') + assert_equal(test, date(2000, 1, 1)) + finally: + StringConverter._mapper = _original_mapper + + def test_string_to_object(self): + "Make sure that string-to-object functions are properly recognized" + old_mapper = StringConverter._mapper[:] # copy of list + conv = StringConverter(_bytes_to_date) + assert_equal(conv._mapper, old_mapper) + assert_(hasattr(conv, 'default')) + + def test_keep_default(self): + "Make sure we don't lose an explicit default" + converter = StringConverter(None, missing_values='', + default=-999) + converter.upgrade('3.14159265') + assert_equal(converter.default, -999) + assert_equal(converter.type, np.dtype(float)) + # + converter = StringConverter( + None, missing_values='', default=0) + converter.upgrade('3.14159265') + assert_equal(converter.default, 0) + assert_equal(converter.type, np.dtype(float)) + + def test_keep_default_zero(self): + "Check that we don't lose a default of 0" + converter = StringConverter(int, default=0, + missing_values="N/A") + assert_equal(converter.default, 0) + + def test_keep_missing_values(self): + "Check that we're not losing missing values" + converter = StringConverter(int, default=0, + missing_values="N/A") + assert_equal( + converter.missing_values, {'', 'N/A'}) + + def test_int64_dtype(self): + "Check that int64 integer types can be specified" + converter = StringConverter(np.int64, default=0) + val = "-9223372036854775807" + assert_(converter(val) == -9223372036854775807) + val = "9223372036854775807" + assert_(converter(val) == 9223372036854775807) + + def test_uint64_dtype(self): + "Check that uint64 integer types can be specified" + converter = StringConverter(np.uint64, default=0) + val = "9223372043271415339" + assert_(converter(val) == 9223372043271415339) + + +class TestMiscFunctions: + + def test_has_nested_dtype(self): + "Test has_nested_dtype" + ndtype = np.dtype(float) + assert_equal(has_nested_fields(ndtype), False) + ndtype = np.dtype([('A', '|S3'), ('B', float)]) + assert_equal(has_nested_fields(ndtype), False) + ndtype = np.dtype([('A', int), ('B', [('BA', float), ('BB', '|S1')])]) + assert_equal(has_nested_fields(ndtype), True) + + def test_easy_dtype(self): + "Test ndtype on dtypes" + # Simple case + ndtype = float + assert_equal(easy_dtype(ndtype), np.dtype(float)) + # As string w/o names + ndtype = "i4, f8" + assert_equal(easy_dtype(ndtype), + np.dtype([('f0', "i4"), ('f1', "f8")])) + # As string w/o names but different default format + assert_equal(easy_dtype(ndtype, defaultfmt="field_%03i"), + np.dtype([('field_000', "i4"), ('field_001', "f8")])) + # As string w/ names + ndtype = "i4, f8" + assert_equal(easy_dtype(ndtype, names="a, b"), + np.dtype([('a', "i4"), ('b', "f8")])) + # As string w/ names (too many) + ndtype = "i4, f8" + assert_equal(easy_dtype(ndtype, names="a, b, c"), + np.dtype([('a', "i4"), ('b', "f8")])) + # As string w/ names (not enough) + ndtype = "i4, f8" + assert_equal(easy_dtype(ndtype, names=", b"), + np.dtype([('f0', "i4"), ('b', "f8")])) + # ... (with different default format) + assert_equal(easy_dtype(ndtype, names="a", defaultfmt="f%02i"), + np.dtype([('a', "i4"), ('f00', "f8")])) + # As list of tuples w/o names + ndtype = [('A', int), ('B', float)] + assert_equal(easy_dtype(ndtype), np.dtype([('A', int), ('B', float)])) + # As list of tuples w/ names + assert_equal(easy_dtype(ndtype, names="a,b"), + np.dtype([('a', int), ('b', float)])) + # As list of tuples w/ not enough names + assert_equal(easy_dtype(ndtype, names="a"), + np.dtype([('a', int), ('f0', float)])) + # As list of tuples w/ too many names + assert_equal(easy_dtype(ndtype, names="a,b,c"), + np.dtype([('a', int), ('b', float)])) + # As list of types w/o names + ndtype = (int, float, float) + assert_equal(easy_dtype(ndtype), + np.dtype([('f0', int), ('f1', float), ('f2', float)])) + # As list of types w names + ndtype = (int, float, float) + assert_equal(easy_dtype(ndtype, names="a, b, c"), + np.dtype([('a', int), ('b', float), ('c', float)])) + # As simple dtype w/ names + ndtype = np.dtype(float) + assert_equal(easy_dtype(ndtype, names="a, b, c"), + np.dtype([(_, float) for _ in ('a', 'b', 'c')])) + # As simple dtype w/o names (but multiple fields) + ndtype = np.dtype(float) + assert_equal( + easy_dtype(ndtype, names=['', '', ''], defaultfmt="f%02i"), + np.dtype([(_, float) for _ in ('f00', 'f01', 'f02')])) + + def test_flatten_dtype(self): + "Testing flatten_dtype" + # Standard dtype + dt = np.dtype([("a", "f8"), ("b", "f8")]) + dt_flat = flatten_dtype(dt) + assert_equal(dt_flat, [float, float]) + # Recursive dtype + dt = np.dtype([("a", [("aa", '|S1'), ("ab", '|S2')]), ("b", int)]) + dt_flat = flatten_dtype(dt) + assert_equal(dt_flat, [np.dtype('|S1'), np.dtype('|S2'), int]) + # dtype with shaped fields + dt = np.dtype([("a", (float, 2)), ("b", (int, 3))]) + dt_flat = flatten_dtype(dt) + assert_equal(dt_flat, [float, int]) + dt_flat = flatten_dtype(dt, True) + assert_equal(dt_flat, [float] * 2 + [int] * 3) + # dtype w/ titles + dt = np.dtype([(("a", "A"), "f8"), (("b", "B"), "f8")]) + dt_flat = flatten_dtype(dt) + assert_equal(dt_flat, [float, float]) diff --git a/local_packages/numpy/lib/tests/test__version.py b/local_packages/numpy/lib/tests/test__version.py new file mode 100644 index 0000000..6e6a34a --- /dev/null +++ b/local_packages/numpy/lib/tests/test__version.py @@ -0,0 +1,64 @@ +"""Tests for the NumpyVersion class. + +""" +from numpy.lib import NumpyVersion +from numpy.testing import assert_, assert_raises + + +def test_main_versions(): + assert_(NumpyVersion('1.8.0') == '1.8.0') + for ver in ['1.9.0', '2.0.0', '1.8.1', '10.0.1']: + assert_(NumpyVersion('1.8.0') < ver) + + for ver in ['1.7.0', '1.7.1', '0.9.9']: + assert_(NumpyVersion('1.8.0') > ver) + + +def test_version_1_point_10(): + # regression test for gh-2998. + assert_(NumpyVersion('1.9.0') < '1.10.0') + assert_(NumpyVersion('1.11.0') < '1.11.1') + assert_(NumpyVersion('1.11.0') == '1.11.0') + assert_(NumpyVersion('1.99.11') < '1.99.12') + + +def test_alpha_beta_rc(): + assert_(NumpyVersion('1.8.0rc1') == '1.8.0rc1') + for ver in ['1.8.0', '1.8.0rc2']: + assert_(NumpyVersion('1.8.0rc1') < ver) + + for ver in ['1.8.0a2', '1.8.0b3', '1.7.2rc4']: + assert_(NumpyVersion('1.8.0rc1') > ver) + + assert_(NumpyVersion('1.8.0b1') > '1.8.0a2') + + +def test_dev_version(): + assert_(NumpyVersion('1.9.0.dev-Unknown') < '1.9.0') + for ver in ['1.9.0', '1.9.0a1', '1.9.0b2', '1.9.0b2.dev-ffffffff']: + assert_(NumpyVersion('1.9.0.dev-f16acvda') < ver) + + assert_(NumpyVersion('1.9.0.dev-f16acvda') == '1.9.0.dev-11111111') + + +def test_dev_a_b_rc_mixed(): + assert_(NumpyVersion('1.9.0a2.dev-f16acvda') == '1.9.0a2.dev-11111111') + assert_(NumpyVersion('1.9.0a2.dev-6acvda54') < '1.9.0a2') + + +def test_dev0_version(): + assert_(NumpyVersion('1.9.0.dev0+Unknown') < '1.9.0') + for ver in ['1.9.0', '1.9.0a1', '1.9.0b2', '1.9.0b2.dev0+ffffffff']: + assert_(NumpyVersion('1.9.0.dev0+f16acvda') < ver) + + assert_(NumpyVersion('1.9.0.dev0+f16acvda') == '1.9.0.dev0+11111111') + + +def test_dev0_a_b_rc_mixed(): + assert_(NumpyVersion('1.9.0a2.dev0+f16acvda') == '1.9.0a2.dev0+11111111') + assert_(NumpyVersion('1.9.0a2.dev0+6acvda54') < '1.9.0a2') + + +def test_raises(): + for ver in ['1.9', '1,9.0', '1.7.x']: + assert_raises(ValueError, NumpyVersion, ver) diff --git a/local_packages/numpy/lib/tests/test_array_utils.py b/local_packages/numpy/lib/tests/test_array_utils.py new file mode 100644 index 0000000..55b9d28 --- /dev/null +++ b/local_packages/numpy/lib/tests/test_array_utils.py @@ -0,0 +1,32 @@ +import numpy as np +from numpy.lib import array_utils +from numpy.testing import assert_equal + + +class TestByteBounds: + def test_byte_bounds(self): + # pointer difference matches size * itemsize + # due to contiguity + a = np.arange(12).reshape(3, 4) + low, high = array_utils.byte_bounds(a) + assert_equal(high - low, a.size * a.itemsize) + + def test_unusual_order_positive_stride(self): + a = np.arange(12).reshape(3, 4) + b = a.T + low, high = array_utils.byte_bounds(b) + assert_equal(high - low, b.size * b.itemsize) + + def test_unusual_order_negative_stride(self): + a = np.arange(12).reshape(3, 4) + b = a.T[::-1] + low, high = array_utils.byte_bounds(b) + assert_equal(high - low, b.size * b.itemsize) + + def test_strided(self): + a = np.arange(12) + b = a[::2] + low, high = array_utils.byte_bounds(b) + # the largest pointer address is lost (even numbers only in the + # stride), and compensate addresses for striding by 2 + assert_equal(high - low, b.size * 2 * b.itemsize - b.itemsize) diff --git a/local_packages/numpy/lib/tests/test_arraypad.py b/local_packages/numpy/lib/tests/test_arraypad.py new file mode 100644 index 0000000..14383e7 --- /dev/null +++ b/local_packages/numpy/lib/tests/test_arraypad.py @@ -0,0 +1,1427 @@ +"""Tests for the array padding functions. + +""" +import pytest + +import numpy as np +from numpy.lib._arraypad_impl import _as_pairs +from numpy.testing import assert_allclose, assert_array_equal, assert_equal + +_numeric_dtypes = ( + np._core.sctypes["uint"] + + np._core.sctypes["int"] + + np._core.sctypes["float"] + + np._core.sctypes["complex"] +) +_all_modes = { + 'constant': {'constant_values': 0}, + 'edge': {}, + 'linear_ramp': {'end_values': 0}, + 'maximum': {'stat_length': None}, + 'mean': {'stat_length': None}, + 'median': {'stat_length': None}, + 'minimum': {'stat_length': None}, + 'reflect': {'reflect_type': 'even'}, + 'symmetric': {'reflect_type': 'even'}, + 'wrap': {}, + 'empty': {} +} + + +class TestAsPairs: + def test_single_value(self): + """Test casting for a single value.""" + expected = np.array([[3, 3]] * 10) + for x in (3, [3], [[3]]): + result = _as_pairs(x, 10) + assert_equal(result, expected) + # Test with dtype=object + obj = object() + assert_equal( + _as_pairs(obj, 10), + np.array([[obj, obj]] * 10) + ) + + def test_two_values(self): + """Test proper casting for two different values.""" + # Broadcasting in the first dimension with numbers + expected = np.array([[3, 4]] * 10) + for x in ([3, 4], [[3, 4]]): + result = _as_pairs(x, 10) + assert_equal(result, expected) + # and with dtype=object + obj = object() + assert_equal( + _as_pairs(["a", obj], 10), + np.array([["a", obj]] * 10) + ) + + # Broadcasting in the second / last dimension with numbers + assert_equal( + _as_pairs([[3], [4]], 2), + np.array([[3, 3], [4, 4]]) + ) + # and with dtype=object + assert_equal( + _as_pairs([["a"], [obj]], 2), + np.array([["a", "a"], [obj, obj]]) + ) + + def test_with_none(self): + expected = ((None, None), (None, None), (None, None)) + assert_equal( + _as_pairs(None, 3, as_index=False), + expected + ) + assert_equal( + _as_pairs(None, 3, as_index=True), + expected + ) + + def test_pass_through(self): + """Test if `x` already matching desired output are passed through.""" + expected = np.arange(12).reshape((6, 2)) + assert_equal( + _as_pairs(expected, 6), + expected + ) + + def test_as_index(self): + """Test results if `as_index=True`.""" + assert_equal( + _as_pairs([2.6, 3.3], 10, as_index=True), + np.array([[3, 3]] * 10, dtype=np.intp) + ) + assert_equal( + _as_pairs([2.6, 4.49], 10, as_index=True), + np.array([[3, 4]] * 10, dtype=np.intp) + ) + for x in (-3, [-3], [[-3]], [-3, 4], [3, -4], [[-3, 4]], [[4, -3]], + [[1, 2]] * 9 + [[1, -2]]): + with pytest.raises(ValueError, match="negative values"): + _as_pairs(x, 10, as_index=True) + + def test_exceptions(self): + """Ensure faulty usage is discovered.""" + with pytest.raises(ValueError, match="more dimensions than allowed"): + _as_pairs([[[3]]], 10) + with pytest.raises(ValueError, match="could not be broadcast"): + _as_pairs([[1, 2], [3, 4]], 3) + with pytest.raises(ValueError, match="could not be broadcast"): + _as_pairs(np.ones((2, 3)), 3) + + +class TestConditionalShortcuts: + @pytest.mark.parametrize("mode", _all_modes.keys()) + def test_zero_padding_shortcuts(self, mode): + test = np.arange(120).reshape(4, 5, 6) + pad_amt = [(0, 0) for _ in test.shape] + assert_array_equal(test, np.pad(test, pad_amt, mode=mode)) + + @pytest.mark.parametrize("mode", ['maximum', 'mean', 'median', 'minimum',]) + def test_shallow_statistic_range(self, mode): + test = np.arange(120).reshape(4, 5, 6) + pad_amt = [(1, 1) for _ in test.shape] + assert_array_equal(np.pad(test, pad_amt, mode='edge'), + np.pad(test, pad_amt, mode=mode, stat_length=1)) + + @pytest.mark.parametrize("mode", ['maximum', 'mean', 'median', 'minimum',]) + def test_clip_statistic_range(self, mode): + test = np.arange(30).reshape(5, 6) + pad_amt = [(3, 3) for _ in test.shape] + assert_array_equal(np.pad(test, pad_amt, mode=mode), + np.pad(test, pad_amt, mode=mode, stat_length=30)) + + +class TestStatistic: + def test_check_mean_stat_length(self): + a = np.arange(100).astype('f') + a = np.pad(a, ((25, 20), ), 'mean', stat_length=((2, 3), )) + b = np.array( + [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, + 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, + 0.5, 0.5, 0.5, 0.5, 0.5, + + 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., + 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., + 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., + 30., 31., 32., 33., 34., 35., 36., 37., 38., 39., + 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., + 50., 51., 52., 53., 54., 55., 56., 57., 58., 59., + 60., 61., 62., 63., 64., 65., 66., 67., 68., 69., + 70., 71., 72., 73., 74., 75., 76., 77., 78., 79., + 80., 81., 82., 83., 84., 85., 86., 87., 88., 89., + 90., 91., 92., 93., 94., 95., 96., 97., 98., 99., + + 98., 98., 98., 98., 98., 98., 98., 98., 98., 98., + 98., 98., 98., 98., 98., 98., 98., 98., 98., 98. + ]) + assert_array_equal(a, b) + + def test_check_maximum_1(self): + a = np.arange(100) + a = np.pad(a, (25, 20), 'maximum') + b = np.array( + [99, 99, 99, 99, 99, 99, 99, 99, 99, 99, + 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, + 99, 99, 99, 99, 99, + + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + + 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, + 99, 99, 99, 99, 99, 99, 99, 99, 99, 99] + ) + assert_array_equal(a, b) + + def test_check_maximum_2(self): + a = np.arange(100) + 1 + a = np.pad(a, (25, 20), 'maximum') + b = np.array( + [100, 100, 100, 100, 100, 100, 100, 100, 100, 100, + 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, + 100, 100, 100, 100, 100, + + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, + 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, + 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, + 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, + 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, + 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, + 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, + 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, + 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, + + 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, + 100, 100, 100, 100, 100, 100, 100, 100, 100, 100] + ) + assert_array_equal(a, b) + + def test_check_maximum_stat_length(self): + a = np.arange(100) + 1 + a = np.pad(a, (25, 20), 'maximum', stat_length=10) + b = np.array( + [10, 10, 10, 10, 10, 10, 10, 10, 10, 10, + 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, + 10, 10, 10, 10, 10, + + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, + 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, + 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, + 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, + 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, + 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, + 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, + 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, + 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, + + 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, + 100, 100, 100, 100, 100, 100, 100, 100, 100, 100] + ) + assert_array_equal(a, b) + + def test_check_minimum_1(self): + a = np.arange(100) + a = np.pad(a, (25, 20), 'minimum') + b = np.array( + [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, + + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + ) + assert_array_equal(a, b) + + def test_check_minimum_2(self): + a = np.arange(100) + 2 + a = np.pad(a, (25, 20), 'minimum') + b = np.array( + [ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, + + 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, + 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, + 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, + 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, + 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, + 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, + 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, + 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, + 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, + + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2] + ) + assert_array_equal(a, b) + + def test_check_minimum_stat_length(self): + a = np.arange(100) + 1 + a = np.pad(a, (25, 20), 'minimum', stat_length=10) + b = np.array( + [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, + + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, + 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, + 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, + 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, + 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, + 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, + 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, + 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, + 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, + + 91, 91, 91, 91, 91, 91, 91, 91, 91, 91, + 91, 91, 91, 91, 91, 91, 91, 91, 91, 91] + ) + assert_array_equal(a, b) + + def test_check_median(self): + a = np.arange(100).astype('f') + a = np.pad(a, (25, 20), 'median') + b = np.array( + [49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, + 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, + 49.5, 49.5, 49.5, 49.5, 49.5, + + 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., + 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., + 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., + 30., 31., 32., 33., 34., 35., 36., 37., 38., 39., + 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., + 50., 51., 52., 53., 54., 55., 56., 57., 58., 59., + 60., 61., 62., 63., 64., 65., 66., 67., 68., 69., + 70., 71., 72., 73., 74., 75., 76., 77., 78., 79., + 80., 81., 82., 83., 84., 85., 86., 87., 88., 89., + 90., 91., 92., 93., 94., 95., 96., 97., 98., 99., + + 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, + 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5] + ) + assert_array_equal(a, b) + + def test_check_median_01(self): + a = np.array([[3, 1, 4], [4, 5, 9], [9, 8, 2]]) + a = np.pad(a, 1, 'median') + b = np.array( + [[4, 4, 5, 4, 4], + + [3, 3, 1, 4, 3], + [5, 4, 5, 9, 5], + [8, 9, 8, 2, 8], + + [4, 4, 5, 4, 4]] + ) + assert_array_equal(a, b) + + def test_check_median_02(self): + a = np.array([[3, 1, 4], [4, 5, 9], [9, 8, 2]]) + a = np.pad(a.T, 1, 'median').T + b = np.array( + [[5, 4, 5, 4, 5], + + [3, 3, 1, 4, 3], + [5, 4, 5, 9, 5], + [8, 9, 8, 2, 8], + + [5, 4, 5, 4, 5]] + ) + assert_array_equal(a, b) + + def test_check_median_stat_length(self): + a = np.arange(100).astype('f') + a[1] = 2. + a[97] = 96. + a = np.pad(a, (25, 20), 'median', stat_length=(3, 5)) + b = np.array( + [ 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., + 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., + 2., 2., 2., 2., 2., + + 0., 2., 2., 3., 4., 5., 6., 7., 8., 9., + 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., + 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., + 30., 31., 32., 33., 34., 35., 36., 37., 38., 39., + 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., + 50., 51., 52., 53., 54., 55., 56., 57., 58., 59., + 60., 61., 62., 63., 64., 65., 66., 67., 68., 69., + 70., 71., 72., 73., 74., 75., 76., 77., 78., 79., + 80., 81., 82., 83., 84., 85., 86., 87., 88., 89., + 90., 91., 92., 93., 94., 95., 96., 96., 98., 99., + + 96., 96., 96., 96., 96., 96., 96., 96., 96., 96., + 96., 96., 96., 96., 96., 96., 96., 96., 96., 96.] + ) + assert_array_equal(a, b) + + def test_check_mean_shape_one(self): + a = [[4, 5, 6]] + a = np.pad(a, (5, 7), 'mean', stat_length=2) + b = np.array( + [[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6]] + ) + assert_array_equal(a, b) + + def test_check_mean_2(self): + a = np.arange(100).astype('f') + a = np.pad(a, (25, 20), 'mean') + b = np.array( + [49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, + 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, + 49.5, 49.5, 49.5, 49.5, 49.5, + + 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., + 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., + 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., + 30., 31., 32., 33., 34., 35., 36., 37., 38., 39., + 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., + 50., 51., 52., 53., 54., 55., 56., 57., 58., 59., + 60., 61., 62., 63., 64., 65., 66., 67., 68., 69., + 70., 71., 72., 73., 74., 75., 76., 77., 78., 79., + 80., 81., 82., 83., 84., 85., 86., 87., 88., 89., + 90., 91., 92., 93., 94., 95., 96., 97., 98., 99., + + 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, + 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5] + ) + assert_array_equal(a, b) + + @pytest.mark.parametrize("mode", [ + "mean", + "median", + "minimum", + "maximum" + ]) + def test_same_prepend_append(self, mode): + """ Test that appended and prepended values are equal """ + # This test is constructed to trigger floating point rounding errors in + # a way that caused gh-11216 for mode=='mean' + a = np.array([-1, 2, -1]) + np.array([0, 1e-12, 0], dtype=np.float64) + a = np.pad(a, (1, 1), mode) + assert_equal(a[0], a[-1]) + + @pytest.mark.parametrize("mode", ["mean", "median", "minimum", "maximum"]) + @pytest.mark.parametrize( + "stat_length", [-2, (-2,), (3, -1), ((5, 2), (-2, 3)), ((-4,), (2,))] + ) + def test_check_negative_stat_length(self, mode, stat_length): + arr = np.arange(30).reshape((6, 5)) + match = "index can't contain negative values" + with pytest.raises(ValueError, match=match): + np.pad(arr, 2, mode, stat_length=stat_length) + + def test_simple_stat_length(self): + a = np.arange(30) + a = np.reshape(a, (6, 5)) + a = np.pad(a, ((2, 3), (3, 2)), mode='mean', stat_length=(3,)) + b = np.array( + [[6, 6, 6, 5, 6, 7, 8, 9, 8, 8], + [6, 6, 6, 5, 6, 7, 8, 9, 8, 8], + + [1, 1, 1, 0, 1, 2, 3, 4, 3, 3], + [6, 6, 6, 5, 6, 7, 8, 9, 8, 8], + [11, 11, 11, 10, 11, 12, 13, 14, 13, 13], + [16, 16, 16, 15, 16, 17, 18, 19, 18, 18], + [21, 21, 21, 20, 21, 22, 23, 24, 23, 23], + [26, 26, 26, 25, 26, 27, 28, 29, 28, 28], + + [21, 21, 21, 20, 21, 22, 23, 24, 23, 23], + [21, 21, 21, 20, 21, 22, 23, 24, 23, 23], + [21, 21, 21, 20, 21, 22, 23, 24, 23, 23]] + ) + assert_array_equal(a, b) + + @pytest.mark.filterwarnings("ignore:Mean of empty slice:RuntimeWarning") + @pytest.mark.filterwarnings( + "ignore:invalid value encountered in( scalar)? divide:RuntimeWarning" + ) + @pytest.mark.parametrize("mode", ["mean", "median"]) + def test_zero_stat_length_valid(self, mode): + arr = np.pad([1., 2.], (1, 2), mode, stat_length=0) + expected = np.array([np.nan, 1., 2., np.nan, np.nan]) + assert_equal(arr, expected) + + @pytest.mark.parametrize("mode", ["minimum", "maximum"]) + def test_zero_stat_length_invalid(self, mode): + match = "stat_length of 0 yields no value for padding" + with pytest.raises(ValueError, match=match): + np.pad([1., 2.], 0, mode, stat_length=0) + with pytest.raises(ValueError, match=match): + np.pad([1., 2.], 0, mode, stat_length=(1, 0)) + with pytest.raises(ValueError, match=match): + np.pad([1., 2.], 1, mode, stat_length=0) + with pytest.raises(ValueError, match=match): + np.pad([1., 2.], 1, mode, stat_length=(1, 0)) + + +class TestConstant: + def test_check_constant(self): + a = np.arange(100) + a = np.pad(a, (25, 20), 'constant', constant_values=(10, 20)) + b = np.array( + [10, 10, 10, 10, 10, 10, 10, 10, 10, 10, + 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, + 10, 10, 10, 10, 10, + + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + + 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, + 20, 20, 20, 20, 20, 20, 20, 20, 20, 20] + ) + assert_array_equal(a, b) + + def test_check_constant_zeros(self): + a = np.arange(100) + a = np.pad(a, (25, 20), 'constant') + b = np.array( + [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, + + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + ) + assert_array_equal(a, b) + + def test_check_constant_float(self): + # If input array is int, but constant_values are float, the dtype of + # the array to be padded is kept + arr = np.arange(30).reshape(5, 6) + test = np.pad(arr, (1, 2), mode='constant', + constant_values=1.1) + expected = np.array( + [[1, 1, 1, 1, 1, 1, 1, 1, 1], + + [1, 0, 1, 2, 3, 4, 5, 1, 1], + [1, 6, 7, 8, 9, 10, 11, 1, 1], + [1, 12, 13, 14, 15, 16, 17, 1, 1], + [1, 18, 19, 20, 21, 22, 23, 1, 1], + [1, 24, 25, 26, 27, 28, 29, 1, 1], + + [1, 1, 1, 1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1, 1, 1, 1]] + ) + assert_allclose(test, expected) + + def test_check_constant_float2(self): + # If input array is float, and constant_values are float, the dtype of + # the array to be padded is kept - here retaining the float constants + arr = np.arange(30).reshape(5, 6) + arr_float = arr.astype(np.float64) + test = np.pad(arr_float, ((1, 2), (1, 2)), mode='constant', + constant_values=1.1) + expected = np.array( + [[1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1], + + [1.1, 0. , 1. , 2. , 3. , 4. , 5. , 1.1, 1.1], # noqa: E203 + [1.1, 6. , 7. , 8. , 9. , 10. , 11. , 1.1, 1.1], # noqa: E203 + [1.1, 12. , 13. , 14. , 15. , 16. , 17. , 1.1, 1.1], # noqa: E203 + [1.1, 18. , 19. , 20. , 21. , 22. , 23. , 1.1, 1.1], # noqa: E203 + [1.1, 24. , 25. , 26. , 27. , 28. , 29. , 1.1, 1.1], # noqa: E203 + + [1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1], + [1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1]] + ) + assert_allclose(test, expected) + + def test_check_constant_float3(self): + a = np.arange(100, dtype=float) + a = np.pad(a, (25, 20), 'constant', constant_values=(-1.1, -1.2)) + b = np.array( + [-1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, + -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, + -1.1, -1.1, -1.1, -1.1, -1.1, + + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + + -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, + -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2] + ) + assert_allclose(a, b) + + def test_check_constant_odd_pad_amount(self): + arr = np.arange(30).reshape(5, 6) + test = np.pad(arr, ((1,), (2,)), mode='constant', + constant_values=3) + expected = np.array( + [[3, 3, 3, 3, 3, 3, 3, 3, 3, 3], + + [3, 3, 0, 1, 2, 3, 4, 5, 3, 3], + [3, 3, 6, 7, 8, 9, 10, 11, 3, 3], + [3, 3, 12, 13, 14, 15, 16, 17, 3, 3], + [3, 3, 18, 19, 20, 21, 22, 23, 3, 3], + [3, 3, 24, 25, 26, 27, 28, 29, 3, 3], + + [3, 3, 3, 3, 3, 3, 3, 3, 3, 3]] + ) + assert_allclose(test, expected) + + def test_check_constant_pad_2d(self): + arr = np.arange(4).reshape(2, 2) + test = np.pad(arr, ((1, 2), (1, 3)), mode='constant', + constant_values=((1, 2), (3, 4))) + expected = np.array( + [[3, 1, 1, 4, 4, 4], + [3, 0, 1, 4, 4, 4], + [3, 2, 3, 4, 4, 4], + [3, 2, 2, 4, 4, 4], + [3, 2, 2, 4, 4, 4]] + ) + assert_allclose(test, expected) + + def test_check_large_integers(self): + uint64_max = 2 ** 64 - 1 + arr = np.full(5, uint64_max, dtype=np.uint64) + test = np.pad(arr, 1, mode="constant", constant_values=arr.min()) + expected = np.full(7, uint64_max, dtype=np.uint64) + assert_array_equal(test, expected) + + int64_max = 2 ** 63 - 1 + arr = np.full(5, int64_max, dtype=np.int64) + test = np.pad(arr, 1, mode="constant", constant_values=arr.min()) + expected = np.full(7, int64_max, dtype=np.int64) + assert_array_equal(test, expected) + + def test_check_object_array(self): + arr = np.empty(1, dtype=object) + obj_a = object() + arr[0] = obj_a + obj_b = object() + obj_c = object() + arr = np.pad(arr, pad_width=1, mode='constant', + constant_values=(obj_b, obj_c)) + + expected = np.empty((3,), dtype=object) + expected[0] = obj_b + expected[1] = obj_a + expected[2] = obj_c + + assert_array_equal(arr, expected) + + def test_pad_empty_dimension(self): + arr = np.zeros((3, 0, 2)) + result = np.pad(arr, [(0,), (2,), (1,)], mode="constant") + assert result.shape == (3, 4, 4) + + +class TestLinearRamp: + def test_check_simple(self): + a = np.arange(100).astype('f') + a = np.pad(a, (25, 20), 'linear_ramp', end_values=(4, 5)) + b = np.array( + [4.00, 3.84, 3.68, 3.52, 3.36, 3.20, 3.04, 2.88, 2.72, 2.56, + 2.40, 2.24, 2.08, 1.92, 1.76, 1.60, 1.44, 1.28, 1.12, 0.96, + 0.80, 0.64, 0.48, 0.32, 0.16, + + 0.00, 1.00, 2.00, 3.00, 4.00, 5.00, 6.00, 7.00, 8.00, 9.00, + 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, + 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, + 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0, + 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0, 47.0, 48.0, 49.0, + 50.0, 51.0, 52.0, 53.0, 54.0, 55.0, 56.0, 57.0, 58.0, 59.0, + 60.0, 61.0, 62.0, 63.0, 64.0, 65.0, 66.0, 67.0, 68.0, 69.0, + 70.0, 71.0, 72.0, 73.0, 74.0, 75.0, 76.0, 77.0, 78.0, 79.0, + 80.0, 81.0, 82.0, 83.0, 84.0, 85.0, 86.0, 87.0, 88.0, 89.0, + 90.0, 91.0, 92.0, 93.0, 94.0, 95.0, 96.0, 97.0, 98.0, 99.0, + + 94.3, 89.6, 84.9, 80.2, 75.5, 70.8, 66.1, 61.4, 56.7, 52.0, + 47.3, 42.6, 37.9, 33.2, 28.5, 23.8, 19.1, 14.4, 9.7, 5.] + ) + assert_allclose(a, b, rtol=1e-5, atol=1e-5) + + def test_check_2d(self): + arr = np.arange(20).reshape(4, 5).astype(np.float64) + test = np.pad(arr, (2, 2), mode='linear_ramp', end_values=(0, 0)) + expected = np.array( + [[0., 0., 0., 0., 0., 0., 0., 0., 0.], + [0., 0., 0., 0.5, 1., 1.5, 2., 1., 0.], + [0., 0., 0., 1., 2., 3., 4., 2., 0.], + [0., 2.5, 5., 6., 7., 8., 9., 4.5, 0.], + [0., 5., 10., 11., 12., 13., 14., 7., 0.], + [0., 7.5, 15., 16., 17., 18., 19., 9.5, 0.], + [0., 3.75, 7.5, 8., 8.5, 9., 9.5, 4.75, 0.], + [0., 0., 0., 0., 0., 0., 0., 0., 0.]]) + assert_allclose(test, expected) + + @pytest.mark.xfail(exceptions=(AssertionError,)) + def test_object_array(self): + from fractions import Fraction + arr = np.array([Fraction(1, 2), Fraction(-1, 2)]) + actual = np.pad(arr, (2, 3), mode='linear_ramp', end_values=0) + + # deliberately chosen to have a non-power-of-2 denominator such that + # rounding to floats causes a failure. + expected = np.array([ + Fraction( 0, 12), + Fraction( 3, 12), + Fraction( 6, 12), + Fraction(-6, 12), + Fraction(-4, 12), + Fraction(-2, 12), + Fraction(-0, 12), + ]) + assert_equal(actual, expected) + + def test_end_values(self): + """Ensure that end values are exact.""" + a = np.pad(np.ones(10).reshape(2, 5), (223, 123), mode="linear_ramp") + assert_equal(a[:, 0], 0.) + assert_equal(a[:, -1], 0.) + assert_equal(a[0, :], 0.) + assert_equal(a[-1, :], 0.) + + @pytest.mark.parametrize("dtype", _numeric_dtypes) + def test_negative_difference(self, dtype): + """ + Check correct behavior of unsigned dtypes if there is a negative + difference between the edge to pad and `end_values`. Check both cases + to be independent of implementation. Test behavior for all other dtypes + in case dtype casting interferes with complex dtypes. See gh-14191. + """ + x = np.array([3], dtype=dtype) + result = np.pad(x, 3, mode="linear_ramp", end_values=0) + expected = np.array([0, 1, 2, 3, 2, 1, 0], dtype=dtype) + assert_equal(result, expected) + + x = np.array([0], dtype=dtype) + result = np.pad(x, 3, mode="linear_ramp", end_values=3) + expected = np.array([3, 2, 1, 0, 1, 2, 3], dtype=dtype) + assert_equal(result, expected) + + +class TestReflect: + def test_check_simple(self): + a = np.arange(100) + a = np.pad(a, (25, 20), 'reflect') + b = np.array( + [25, 24, 23, 22, 21, 20, 19, 18, 17, 16, + 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, + 5, 4, 3, 2, 1, + + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + + 98, 97, 96, 95, 94, 93, 92, 91, 90, 89, + 88, 87, 86, 85, 84, 83, 82, 81, 80, 79] + ) + assert_array_equal(a, b) + + def test_check_odd_method(self): + a = np.arange(100) + a = np.pad(a, (25, 20), 'reflect', reflect_type='odd') + b = np.array( + [-25, -24, -23, -22, -21, -20, -19, -18, -17, -16, + -15, -14, -13, -12, -11, -10, -9, -8, -7, -6, + -5, -4, -3, -2, -1, + + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + + 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, + 110, 111, 112, 113, 114, 115, 116, 117, 118, 119] + ) + assert_array_equal(a, b) + + def test_check_large_pad(self): + a = [[4, 5, 6], [6, 7, 8]] + a = np.pad(a, (5, 7), 'reflect') + b = np.array( + [[7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7], + + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7], + + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5]] + ) + assert_array_equal(a, b) + + def test_check_shape(self): + a = [[4, 5, 6]] + a = np.pad(a, (5, 7), 'reflect') + b = np.array( + [[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5]] + ) + assert_array_equal(a, b) + + def test_check_01(self): + a = np.pad([1, 2, 3], 2, 'reflect') + b = np.array([3, 2, 1, 2, 3, 2, 1]) + assert_array_equal(a, b) + + def test_check_02(self): + a = np.pad([1, 2, 3], 3, 'reflect') + b = np.array([2, 3, 2, 1, 2, 3, 2, 1, 2]) + assert_array_equal(a, b) + + def test_check_03(self): + a = np.pad([1, 2, 3], 4, 'reflect') + b = np.array([1, 2, 3, 2, 1, 2, 3, 2, 1, 2, 3]) + assert_array_equal(a, b) + + def test_check_04(self): + a = np.pad([1, 2, 3], [1, 10], 'reflect') + b = np.array([2, 1, 2, 3, 2, 1, 2, 3, 2, 1, 2, 3, 2, 1]) + assert_array_equal(a, b) + + def test_check_05(self): + a = np.pad([1, 2, 3, 4], [45, 10], 'reflect') + b = np.array( + [4, 3, 2, 1, 2, 3, 4, 3, 2, 1, + 2, 3, 4, 3, 2, 1, 2, 3, 4, 3, + 2, 1, 2, 3, 4, 3, 2, 1, 2, 3, + 4, 3, 2, 1, 2, 3, 4, 3, 2, 1, + 2, 3, 4, 3, 2, 1, 2, 3, 4, 3, + 2, 1, 2, 3, 4, 3, 2, 1, 2]) + assert_array_equal(a, b) + + def test_check_06(self): + a = np.pad([1, 2, 3, 4], [15, 2], 'symmetric') + b = np.array( + [2, 3, 4, 4, 3, 2, 1, 1, 2, 3, + 4, 4, 3, 2, 1, 1, 2, 3, 4, 4, + 3] + ) + assert_array_equal(a, b) + + def test_check_07(self): + a = np.pad([1, 2, 3, 4, 5, 6], [45, 3], 'symmetric') + b = np.array( + [4, 5, 6, 6, 5, 4, 3, 2, 1, 1, + 2, 3, 4, 5, 6, 6, 5, 4, 3, 2, + 1, 1, 2, 3, 4, 5, 6, 6, 5, 4, + 3, 2, 1, 1, 2, 3, 4, 5, 6, 6, + 5, 4, 3, 2, 1, 1, 2, 3, 4, 5, + 6, 6, 5, 4]) + assert_array_equal(a, b) + + +class TestEmptyArray: + """Check how padding behaves on arrays with an empty dimension.""" + + @pytest.mark.parametrize( + # Keep parametrization ordered, otherwise pytest-xdist might believe + # that different tests were collected during parallelization + "mode", sorted(_all_modes.keys() - {"constant", "empty"}) + ) + def test_pad_empty_dimension(self, mode): + match = ("can't extend empty axis 0 using modes other than 'constant' " + "or 'empty'") + with pytest.raises(ValueError, match=match): + np.pad([], 4, mode=mode) + with pytest.raises(ValueError, match=match): + np.pad(np.ndarray(0), 4, mode=mode) + with pytest.raises(ValueError, match=match): + np.pad(np.zeros((0, 3)), ((1,), (0,)), mode=mode) + + @pytest.mark.parametrize("mode", _all_modes.keys()) + def test_pad_non_empty_dimension(self, mode): + result = np.pad(np.ones((2, 0, 2)), ((3,), (0,), (1,)), mode=mode) + assert result.shape == (8, 0, 4) + + +class TestSymmetric: + def test_check_simple(self): + a = np.arange(100) + a = np.pad(a, (25, 20), 'symmetric') + b = np.array( + [24, 23, 22, 21, 20, 19, 18, 17, 16, 15, + 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, + 4, 3, 2, 1, 0, + + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + + 99, 98, 97, 96, 95, 94, 93, 92, 91, 90, + 89, 88, 87, 86, 85, 84, 83, 82, 81, 80] + ) + assert_array_equal(a, b) + + def test_check_odd_method(self): + a = np.arange(100) + a = np.pad(a, (25, 20), 'symmetric', reflect_type='odd') + b = np.array( + [-24, -23, -22, -21, -20, -19, -18, -17, -16, -15, + -14, -13, -12, -11, -10, -9, -8, -7, -6, -5, + -4, -3, -2, -1, 0, + + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + + 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, + 109, 110, 111, 112, 113, 114, 115, 116, 117, 118] + ) + assert_array_equal(a, b) + + def test_check_large_pad(self): + a = [[4, 5, 6], [6, 7, 8]] + a = np.pad(a, (5, 7), 'symmetric') + b = np.array( + [[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8], + [7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8], + + [7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8], + [7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6]] + ) + + assert_array_equal(a, b) + + def test_check_large_pad_odd(self): + a = [[4, 5, 6], [6, 7, 8]] + a = np.pad(a, (5, 7), 'symmetric', reflect_type='odd') + b = np.array( + [[-3, -2, -2, -1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6], + [-3, -2, -2, -1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6], + [-1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8], + [-1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8], + [ 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10], + + [ 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10], + [ 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12], + + [ 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12], + [ 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14], + [ 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14], + [ 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16], + [ 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16], + [ 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16, 17, 18, 18], + [ 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16, 17, 18, 18]] + ) + assert_array_equal(a, b) + + def test_check_shape(self): + a = [[4, 5, 6]] + a = np.pad(a, (5, 7), 'symmetric') + b = np.array( + [[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6]] + ) + assert_array_equal(a, b) + + def test_check_01(self): + a = np.pad([1, 2, 3], 2, 'symmetric') + b = np.array([2, 1, 1, 2, 3, 3, 2]) + assert_array_equal(a, b) + + def test_check_02(self): + a = np.pad([1, 2, 3], 3, 'symmetric') + b = np.array([3, 2, 1, 1, 2, 3, 3, 2, 1]) + assert_array_equal(a, b) + + def test_check_03(self): + a = np.pad([1, 2, 3], 6, 'symmetric') + b = np.array([1, 2, 3, 3, 2, 1, 1, 2, 3, 3, 2, 1, 1, 2, 3]) + assert_array_equal(a, b) + + +class TestWrap: + def test_check_simple(self): + a = np.arange(100) + a = np.pad(a, (25, 20), 'wrap') + b = np.array( + [75, 76, 77, 78, 79, 80, 81, 82, 83, 84, + 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, + 95, 96, 97, 98, 99, + + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] + ) + assert_array_equal(a, b) + + def test_check_large_pad(self): + a = np.arange(12) + a = np.reshape(a, (3, 4)) + a = np.pad(a, (10, 12), 'wrap') + b = np.array( + [[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, + 11, 8, 9, 10, 11, 8, 9, 10, 11], + [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, + 3, 0, 1, 2, 3, 0, 1, 2, 3], + [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, + 7, 4, 5, 6, 7, 4, 5, 6, 7], + [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, + 11, 8, 9, 10, 11, 8, 9, 10, 11], + [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, + 3, 0, 1, 2, 3, 0, 1, 2, 3], + [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, + 7, 4, 5, 6, 7, 4, 5, 6, 7], + [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, + 11, 8, 9, 10, 11, 8, 9, 10, 11], + [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, + 3, 0, 1, 2, 3, 0, 1, 2, 3], + [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, + 7, 4, 5, 6, 7, 4, 5, 6, 7], + [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, + 11, 8, 9, 10, 11, 8, 9, 10, 11], + + [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, + 3, 0, 1, 2, 3, 0, 1, 2, 3], + [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, + 7, 4, 5, 6, 7, 4, 5, 6, 7], + [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, + 11, 8, 9, 10, 11, 8, 9, 10, 11], + + [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, + 3, 0, 1, 2, 3, 0, 1, 2, 3], + [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, + 7, 4, 5, 6, 7, 4, 5, 6, 7], + [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, + 11, 8, 9, 10, 11, 8, 9, 10, 11], + [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, + 3, 0, 1, 2, 3, 0, 1, 2, 3], + [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, + 7, 4, 5, 6, 7, 4, 5, 6, 7], + [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, + 11, 8, 9, 10, 11, 8, 9, 10, 11], + [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, + 3, 0, 1, 2, 3, 0, 1, 2, 3], + [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, + 7, 4, 5, 6, 7, 4, 5, 6, 7], + [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, + 11, 8, 9, 10, 11, 8, 9, 10, 11], + [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, + 3, 0, 1, 2, 3, 0, 1, 2, 3], + [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, + 7, 4, 5, 6, 7, 4, 5, 6, 7], + [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, + 11, 8, 9, 10, 11, 8, 9, 10, 11]] + ) + assert_array_equal(a, b) + + def test_check_01(self): + a = np.pad([1, 2, 3], 3, 'wrap') + b = np.array([1, 2, 3, 1, 2, 3, 1, 2, 3]) + assert_array_equal(a, b) + + def test_check_02(self): + a = np.pad([1, 2, 3], 4, 'wrap') + b = np.array([3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1]) + assert_array_equal(a, b) + + def test_pad_with_zero(self): + a = np.ones((3, 5)) + b = np.pad(a, (0, 5), mode="wrap") + assert_array_equal(a, b[:-5, :-5]) + + def test_repeated_wrapping(self): + """ + Check wrapping on each side individually if the wrapped area is longer + than the original array. + """ + a = np.arange(5) + b = np.pad(a, (12, 0), mode="wrap") + assert_array_equal(np.r_[a, a, a, a][3:], b) + + a = np.arange(5) + b = np.pad(a, (0, 12), mode="wrap") + assert_array_equal(np.r_[a, a, a, a][:-3], b) + + def test_repeated_wrapping_multiple_origin(self): + """ + Assert that 'wrap' pads only with multiples of the original area if + the pad width is larger than the original array. + """ + a = np.arange(4).reshape(2, 2) + a = np.pad(a, [(1, 3), (3, 1)], mode='wrap') + b = np.array( + [[3, 2, 3, 2, 3, 2], + [1, 0, 1, 0, 1, 0], + [3, 2, 3, 2, 3, 2], + [1, 0, 1, 0, 1, 0], + [3, 2, 3, 2, 3, 2], + [1, 0, 1, 0, 1, 0]] + ) + assert_array_equal(a, b) + + +class TestEdge: + def test_check_simple(self): + a = np.arange(12) + a = np.reshape(a, (4, 3)) + a = np.pad(a, ((2, 3), (3, 2)), 'edge') + b = np.array( + [[0, 0, 0, 0, 1, 2, 2, 2], + [0, 0, 0, 0, 1, 2, 2, 2], + + [0, 0, 0, 0, 1, 2, 2, 2], + [3, 3, 3, 3, 4, 5, 5, 5], + [6, 6, 6, 6, 7, 8, 8, 8], + [9, 9, 9, 9, 10, 11, 11, 11], + + [9, 9, 9, 9, 10, 11, 11, 11], + [9, 9, 9, 9, 10, 11, 11, 11], + [9, 9, 9, 9, 10, 11, 11, 11]] + ) + assert_array_equal(a, b) + + def test_check_width_shape_1_2(self): + # Check a pad_width of the form ((1, 2),). + # Regression test for issue gh-7808. + a = np.array([1, 2, 3]) + padded = np.pad(a, ((1, 2),), 'edge') + expected = np.array([1, 1, 2, 3, 3, 3]) + assert_array_equal(padded, expected) + + a = np.array([[1, 2, 3], [4, 5, 6]]) + padded = np.pad(a, ((1, 2),), 'edge') + expected = np.pad(a, ((1, 2), (1, 2)), 'edge') + assert_array_equal(padded, expected) + + a = np.arange(24).reshape(2, 3, 4) + padded = np.pad(a, ((1, 2),), 'edge') + expected = np.pad(a, ((1, 2), (1, 2), (1, 2)), 'edge') + assert_array_equal(padded, expected) + + +class TestEmpty: + def test_simple(self): + arr = np.arange(24).reshape(4, 6) + result = np.pad(arr, [(2, 3), (3, 1)], mode="empty") + assert result.shape == (9, 10) + assert_equal(arr, result[2:-3, 3:-1]) + + def test_pad_empty_dimension(self): + arr = np.zeros((3, 0, 2)) + result = np.pad(arr, [(0,), (2,), (1,)], mode="empty") + assert result.shape == (3, 4, 4) + + +def test_legacy_vector_functionality(): + def _padwithtens(vector, pad_width, iaxis, kwargs): + vector[:pad_width[0]] = 10 + vector[-pad_width[1]:] = 10 + + a = np.arange(6).reshape(2, 3) + a = np.pad(a, 2, _padwithtens) + b = np.array( + [[10, 10, 10, 10, 10, 10, 10], + [10, 10, 10, 10, 10, 10, 10], + + [10, 10, 0, 1, 2, 10, 10], + [10, 10, 3, 4, 5, 10, 10], + + [10, 10, 10, 10, 10, 10, 10], + [10, 10, 10, 10, 10, 10, 10]] + ) + assert_array_equal(a, b) + + +def test_unicode_mode(): + a = np.pad([1], 2, mode='constant') + b = np.array([0, 0, 1, 0, 0]) + assert_array_equal(a, b) + + +@pytest.mark.parametrize("mode", ["edge", "symmetric", "reflect", "wrap"]) +def test_object_input(mode): + # Regression test for issue gh-11395. + a = np.full((4, 3), fill_value=None) + pad_amt = ((2, 3), (3, 2)) + b = np.full((9, 8), fill_value=None) + assert_array_equal(np.pad(a, pad_amt, mode=mode), b) + + +class TestPadWidth: + @pytest.mark.parametrize("pad_width", [ + (4, 5, 6, 7), + ((1,), (2,), (3,)), + ((1, 2), (3, 4), (5, 6)), + ((3, 4, 5), (0, 1, 2)), + ]) + @pytest.mark.parametrize("mode", _all_modes.keys()) + def test_misshaped_pad_width(self, pad_width, mode): + arr = np.arange(30).reshape((6, 5)) + match = "operands could not be broadcast together" + with pytest.raises(ValueError, match=match): + np.pad(arr, pad_width, mode) + + @pytest.mark.parametrize("mode", _all_modes.keys()) + def test_misshaped_pad_width_2(self, mode): + arr = np.arange(30).reshape((6, 5)) + match = ("input operand has more dimensions than allowed by the axis " + "remapping") + with pytest.raises(ValueError, match=match): + np.pad(arr, (((3,), (4,), (5,)), ((0,), (1,), (2,))), mode) + + @pytest.mark.parametrize( + "pad_width", [-2, (-2,), (3, -1), ((5, 2), (-2, 3)), ((-4,), (2,))]) + @pytest.mark.parametrize("mode", _all_modes.keys()) + def test_negative_pad_width(self, pad_width, mode): + arr = np.arange(30).reshape((6, 5)) + match = "index can't contain negative values" + with pytest.raises(ValueError, match=match): + np.pad(arr, pad_width, mode) + + @pytest.mark.parametrize("pad_width, dtype", [ + ("3", None), + ("word", None), + (None, None), + (object(), None), + (3.4, None), + (((2, 3, 4), (3, 2)), object), + (complex(1, -1), None), + (((-2.1, 3), (3, 2)), None), + ]) + @pytest.mark.parametrize("mode", _all_modes.keys()) + def test_bad_type(self, pad_width, dtype, mode): + arr = np.arange(30).reshape((6, 5)) + match = "`pad_width` must be of integral type." + if dtype is not None: + # avoid DeprecationWarning when not specifying dtype + with pytest.raises(TypeError, match=match): + np.pad(arr, np.array(pad_width, dtype=dtype), mode) + else: + with pytest.raises(TypeError, match=match): + np.pad(arr, pad_width, mode) + with pytest.raises(TypeError, match=match): + np.pad(arr, np.array(pad_width), mode) + + def test_pad_width_as_ndarray(self): + a = np.arange(12) + a = np.reshape(a, (4, 3)) + a = np.pad(a, np.array(((2, 3), (3, 2))), 'edge') + b = np.array( + [[0, 0, 0, 0, 1, 2, 2, 2], + [0, 0, 0, 0, 1, 2, 2, 2], + + [0, 0, 0, 0, 1, 2, 2, 2], + [3, 3, 3, 3, 4, 5, 5, 5], + [6, 6, 6, 6, 7, 8, 8, 8], + [9, 9, 9, 9, 10, 11, 11, 11], + + [9, 9, 9, 9, 10, 11, 11, 11], + [9, 9, 9, 9, 10, 11, 11, 11], + [9, 9, 9, 9, 10, 11, 11, 11]] + ) + assert_array_equal(a, b) + + @pytest.mark.parametrize("pad_width", [0, (0, 0), ((0, 0), (0, 0))]) + @pytest.mark.parametrize("mode", _all_modes.keys()) + def test_zero_pad_width(self, pad_width, mode): + arr = np.arange(30).reshape(6, 5) + assert_array_equal(arr, np.pad(arr, pad_width, mode=mode)) + + +@pytest.mark.parametrize("mode", _all_modes.keys()) +def test_kwargs(mode): + """Test behavior of pad's kwargs for the given mode.""" + allowed = _all_modes[mode] + not_allowed = {} + for kwargs in _all_modes.values(): + if kwargs != allowed: + not_allowed.update(kwargs) + # Test if allowed keyword arguments pass + np.pad([1, 2, 3], 1, mode, **allowed) + # Test if prohibited keyword arguments of other modes raise an error + for key, value in not_allowed.items(): + match = f"unsupported keyword arguments for mode '{mode}'" + with pytest.raises(ValueError, match=match): + np.pad([1, 2, 3], 1, mode, **{key: value}) + + +def test_constant_zero_default(): + arr = np.array([1, 1]) + assert_array_equal(np.pad(arr, 2), [0, 0, 1, 1, 0, 0]) + + +@pytest.mark.parametrize("mode", [1, "const", object(), None, True, False]) +def test_unsupported_mode(mode): + match = f"mode '{mode}' is not supported" + with pytest.raises(ValueError, match=match): + np.pad([1, 2, 3], 4, mode=mode) + + +@pytest.mark.parametrize("mode", _all_modes.keys()) +def test_non_contiguous_array(mode): + arr = np.arange(24).reshape(4, 6)[::2, ::2] + result = np.pad(arr, (2, 3), mode) + assert result.shape == (7, 8) + assert_equal(result[2:-3, 2:-3], arr) + + +@pytest.mark.parametrize("mode", _all_modes.keys()) +def test_memory_layout_persistence(mode): + """Test if C and F order is preserved for all pad modes.""" + x = np.ones((5, 10), order='C') + assert np.pad(x, 5, mode).flags["C_CONTIGUOUS"] + x = np.ones((5, 10), order='F') + assert np.pad(x, 5, mode).flags["F_CONTIGUOUS"] + + +@pytest.mark.parametrize("dtype", _numeric_dtypes) +@pytest.mark.parametrize("mode", _all_modes.keys()) +def test_dtype_persistence(dtype, mode): + arr = np.zeros((3, 2, 1), dtype=dtype) + result = np.pad(arr, 1, mode=mode) + assert result.dtype == dtype + + +@pytest.mark.parametrize("input_shape, pad_width, expected_shape", [ + ((3, 4, 5), {-2: (1, 3)}, (3, 4 + 1 + 3, 5)), + ((3, 4, 5), {0: (5, 2)}, (3 + 5 + 2, 4, 5)), + ((3, 4, 5), {0: (5, 2), -1: (3, 4)}, (3 + 5 + 2, 4, 5 + 3 + 4)), + ((3, 4, 5), {1: 5}, (3, 4 + 2 * 5, 5)), +]) +def test_pad_dict_pad_width(input_shape, pad_width, expected_shape): + a = np.zeros(input_shape) + result = np.pad(a, pad_width) + assert result.shape == expected_shape diff --git a/local_packages/numpy/lib/tests/test_arraysetops.py b/local_packages/numpy/lib/tests/test_arraysetops.py new file mode 100644 index 0000000..4e8d503 --- /dev/null +++ b/local_packages/numpy/lib/tests/test_arraysetops.py @@ -0,0 +1,1302 @@ +"""Test functions for 1D array set operations. + +""" +import pytest + +import numpy as np +from numpy import ediff1d, intersect1d, isin, setdiff1d, setxor1d, union1d, unique +from numpy.dtypes import StringDType +from numpy.exceptions import AxisError +from numpy.testing import ( + assert_array_equal, + assert_equal, + assert_raises, + assert_raises_regex, +) + + +class TestSetOps: + + def test_intersect1d(self): + # unique inputs + a = np.array([5, 7, 1, 2]) + b = np.array([2, 4, 3, 1, 5]) + + ec = np.array([1, 2, 5]) + c = intersect1d(a, b, assume_unique=True) + assert_array_equal(c, ec) + + # non-unique inputs + a = np.array([5, 5, 7, 1, 2]) + b = np.array([2, 1, 4, 3, 3, 1, 5]) + + ed = np.array([1, 2, 5]) + c = intersect1d(a, b) + assert_array_equal(c, ed) + assert_array_equal([], intersect1d([], [])) + + def test_intersect1d_array_like(self): + # See gh-11772 + class Test: + def __array__(self, dtype=None, copy=None): + return np.arange(3) + + a = Test() + res = intersect1d(a, a) + assert_array_equal(res, a) + res = intersect1d([1, 2, 3], [1, 2, 3]) + assert_array_equal(res, [1, 2, 3]) + + def test_intersect1d_indices(self): + # unique inputs + a = np.array([1, 2, 3, 4]) + b = np.array([2, 1, 4, 6]) + c, i1, i2 = intersect1d(a, b, assume_unique=True, return_indices=True) + ee = np.array([1, 2, 4]) + assert_array_equal(c, ee) + assert_array_equal(a[i1], ee) + assert_array_equal(b[i2], ee) + + # non-unique inputs + a = np.array([1, 2, 2, 3, 4, 3, 2]) + b = np.array([1, 8, 4, 2, 2, 3, 2, 3]) + c, i1, i2 = intersect1d(a, b, return_indices=True) + ef = np.array([1, 2, 3, 4]) + assert_array_equal(c, ef) + assert_array_equal(a[i1], ef) + assert_array_equal(b[i2], ef) + + # non1d, unique inputs + a = np.array([[2, 4, 5, 6], [7, 8, 1, 15]]) + b = np.array([[3, 2, 7, 6], [10, 12, 8, 9]]) + c, i1, i2 = intersect1d(a, b, assume_unique=True, return_indices=True) + ui1 = np.unravel_index(i1, a.shape) + ui2 = np.unravel_index(i2, b.shape) + ea = np.array([2, 6, 7, 8]) + assert_array_equal(ea, a[ui1]) + assert_array_equal(ea, b[ui2]) + + # non1d, not assumed to be uniqueinputs + a = np.array([[2, 4, 5, 6, 6], [4, 7, 8, 7, 2]]) + b = np.array([[3, 2, 7, 7], [10, 12, 8, 7]]) + c, i1, i2 = intersect1d(a, b, return_indices=True) + ui1 = np.unravel_index(i1, a.shape) + ui2 = np.unravel_index(i2, b.shape) + ea = np.array([2, 7, 8]) + assert_array_equal(ea, a[ui1]) + assert_array_equal(ea, b[ui2]) + + def test_setxor1d(self): + a = np.array([5, 7, 1, 2]) + b = np.array([2, 4, 3, 1, 5]) + + ec = np.array([3, 4, 7]) + c = setxor1d(a, b) + assert_array_equal(c, ec) + + a = np.array([1, 2, 3]) + b = np.array([6, 5, 4]) + + ec = np.array([1, 2, 3, 4, 5, 6]) + c = setxor1d(a, b) + assert_array_equal(c, ec) + + a = np.array([1, 8, 2, 3]) + b = np.array([6, 5, 4, 8]) + + ec = np.array([1, 2, 3, 4, 5, 6]) + c = setxor1d(a, b) + assert_array_equal(c, ec) + + assert_array_equal([], setxor1d([], [])) + + def test_setxor1d_unique(self): + a = np.array([1, 8, 2, 3]) + b = np.array([6, 5, 4, 8]) + + ec = np.array([1, 2, 3, 4, 5, 6]) + c = setxor1d(a, b, assume_unique=True) + assert_array_equal(c, ec) + + a = np.array([[1], [8], [2], [3]]) + b = np.array([[6, 5], [4, 8]]) + + ec = np.array([1, 2, 3, 4, 5, 6]) + c = setxor1d(a, b, assume_unique=True) + assert_array_equal(c, ec) + + def test_ediff1d(self): + zero_elem = np.array([]) + one_elem = np.array([1]) + two_elem = np.array([1, 2]) + + assert_array_equal([], ediff1d(zero_elem)) + assert_array_equal([0], ediff1d(zero_elem, to_begin=0)) + assert_array_equal([0], ediff1d(zero_elem, to_end=0)) + assert_array_equal([-1, 0], ediff1d(zero_elem, to_begin=-1, to_end=0)) + assert_array_equal([], ediff1d(one_elem)) + assert_array_equal([1], ediff1d(two_elem)) + assert_array_equal([7, 1, 9], ediff1d(two_elem, to_begin=7, to_end=9)) + assert_array_equal([5, 6, 1, 7, 8], + ediff1d(two_elem, to_begin=[5, 6], to_end=[7, 8])) + assert_array_equal([1, 9], ediff1d(two_elem, to_end=9)) + assert_array_equal([1, 7, 8], ediff1d(two_elem, to_end=[7, 8])) + assert_array_equal([7, 1], ediff1d(two_elem, to_begin=7)) + assert_array_equal([5, 6, 1], ediff1d(two_elem, to_begin=[5, 6])) + + @pytest.mark.parametrize("ary, prepend, append, expected", [ + # should fail because trying to cast + # np.nan standard floating point value + # into an integer array: + (np.array([1, 2, 3], dtype=np.int64), + None, + np.nan, + 'to_end'), + # should fail because attempting + # to downcast to int type: + (np.array([1, 2, 3], dtype=np.int64), + np.array([5, 7, 2], dtype=np.float32), + None, + 'to_begin'), + # should fail because attempting to cast + # two special floating point values + # to integers (on both sides of ary), + # `to_begin` is in the error message as the impl checks this first: + (np.array([1., 3., 9.], dtype=np.int8), + np.nan, + np.nan, + 'to_begin'), + ]) + def test_ediff1d_forbidden_type_casts(self, ary, prepend, append, expected): + # verify resolution of gh-11490 + + # specifically, raise an appropriate + # Exception when attempting to append or + # prepend with an incompatible type + msg = f'dtype of `{expected}` must be compatible' + with assert_raises_regex(TypeError, msg): + ediff1d(ary=ary, + to_end=append, + to_begin=prepend) + + @pytest.mark.parametrize( + "ary,prepend,append,expected", + [ + (np.array([1, 2, 3], dtype=np.int16), + 2**16, # will be cast to int16 under same kind rule. + 2**16 + 4, + np.array([0, 1, 1, 4], dtype=np.int16)), + (np.array([1, 2, 3], dtype=np.float32), + np.array([5], dtype=np.float64), + None, + np.array([5, 1, 1], dtype=np.float32)), + (np.array([1, 2, 3], dtype=np.int32), + 0, + 0, + np.array([0, 1, 1, 0], dtype=np.int32)), + (np.array([1, 2, 3], dtype=np.int64), + 3, + -9, + np.array([3, 1, 1, -9], dtype=np.int64)), + ] + ) + def test_ediff1d_scalar_handling(self, + ary, + prepend, + append, + expected): + # maintain backwards-compatibility + # of scalar prepend / append behavior + # in ediff1d following fix for gh-11490 + actual = np.ediff1d(ary=ary, + to_end=append, + to_begin=prepend) + assert_equal(actual, expected) + assert actual.dtype == expected.dtype + + @pytest.mark.parametrize("kind", [None, "sort", "table"]) + def test_isin(self, kind): + def _isin_slow(a, b): + b = np.asarray(b).flatten().tolist() + return a in b + isin_slow = np.vectorize(_isin_slow, otypes=[bool], excluded={1}) + + def assert_isin_equal(a, b): + x = isin(a, b, kind=kind) + y = isin_slow(a, b) + assert_array_equal(x, y) + + # multidimensional arrays in both arguments + a = np.arange(24).reshape([2, 3, 4]) + b = np.array([[10, 20, 30], [0, 1, 3], [11, 22, 33]]) + assert_isin_equal(a, b) + + # array-likes as both arguments + c = [(9, 8), (7, 6)] + d = (9, 7) + assert_isin_equal(c, d) + + # zero-d array: + f = np.array(3) + assert_isin_equal(f, b) + assert_isin_equal(a, f) + assert_isin_equal(f, f) + + # scalar: + assert_isin_equal(5, b) + assert_isin_equal(a, 6) + assert_isin_equal(5, 6) + + # empty array-like: + if kind != "table": + # An empty list will become float64, + # which is invalid for kind="table" + x = [] + assert_isin_equal(x, b) + assert_isin_equal(a, x) + assert_isin_equal(x, x) + + # empty array with various types: + for dtype in [bool, np.int64, np.float64]: + if kind == "table" and dtype == np.float64: + continue + + if dtype in {np.int64, np.float64}: + ar = np.array([10, 20, 30], dtype=dtype) + elif dtype in {bool}: + ar = np.array([True, False, False]) + + empty_array = np.array([], dtype=dtype) + + assert_isin_equal(empty_array, ar) + assert_isin_equal(ar, empty_array) + assert_isin_equal(empty_array, empty_array) + + @pytest.mark.parametrize("kind", [None, "sort", "table"]) + def test_isin_additional(self, kind): + # we use two different sizes for the b array here to test the + # two different paths in isin(). + for mult in (1, 10): + # One check without np.array to make sure lists are handled correct + a = [5, 7, 1, 2] + b = [2, 4, 3, 1, 5] * mult + ec = np.array([True, False, True, True]) + c = isin(a, b, assume_unique=True, kind=kind) + assert_array_equal(c, ec) + + a[0] = 8 + ec = np.array([False, False, True, True]) + c = isin(a, b, assume_unique=True, kind=kind) + assert_array_equal(c, ec) + + a[0], a[3] = 4, 8 + ec = np.array([True, False, True, False]) + c = isin(a, b, assume_unique=True, kind=kind) + assert_array_equal(c, ec) + + a = np.array([5, 4, 5, 3, 4, 4, 3, 4, 3, 5, 2, 1, 5, 5]) + b = [2, 3, 4] * mult + ec = [False, True, False, True, True, True, True, True, True, + False, True, False, False, False] + c = isin(a, b, kind=kind) + assert_array_equal(c, ec) + + b = b + [5, 5, 4] * mult + ec = [True, True, True, True, True, True, True, True, True, True, + True, False, True, True] + c = isin(a, b, kind=kind) + assert_array_equal(c, ec) + + a = np.array([5, 7, 1, 2]) + b = np.array([2, 4, 3, 1, 5] * mult) + ec = np.array([True, False, True, True]) + c = isin(a, b, kind=kind) + assert_array_equal(c, ec) + + a = np.array([5, 7, 1, 1, 2]) + b = np.array([2, 4, 3, 3, 1, 5] * mult) + ec = np.array([True, False, True, True, True]) + c = isin(a, b, kind=kind) + assert_array_equal(c, ec) + + a = np.array([5, 5]) + b = np.array([2, 2] * mult) + ec = np.array([False, False]) + c = isin(a, b, kind=kind) + assert_array_equal(c, ec) + + a = np.array([5]) + b = np.array([2]) + ec = np.array([False]) + c = isin(a, b, kind=kind) + assert_array_equal(c, ec) + + if kind in {None, "sort"}: + assert_array_equal(isin([], [], kind=kind), []) + + def test_isin_char_array(self): + a = np.array(['a', 'b', 'c', 'd', 'e', 'c', 'e', 'b']) + b = np.array(['a', 'c']) + + ec = np.array([True, False, True, False, False, True, False, False]) + c = isin(a, b) + + assert_array_equal(c, ec) + + @pytest.mark.parametrize("kind", [None, "sort", "table"]) + def test_isin_invert(self, kind): + "Test isin's invert parameter" + # We use two different sizes for the b array here to test the + # two different paths in isin(). + for mult in (1, 10): + a = np.array([5, 4, 5, 3, 4, 4, 3, 4, 3, 5, 2, 1, 5, 5]) + b = [2, 3, 4] * mult + assert_array_equal(np.invert(isin(a, b, kind=kind)), + isin(a, b, invert=True, kind=kind)) + + # float: + if kind in {None, "sort"}: + for mult in (1, 10): + a = np.array([5, 4, 5, 3, 4, 4, 3, 4, 3, 5, 2, 1, 5, 5], + dtype=np.float32) + b = [2, 3, 4] * mult + b = np.array(b, dtype=np.float32) + assert_array_equal(np.invert(isin(a, b, kind=kind)), + isin(a, b, invert=True, kind=kind)) + + def test_isin_hit_alternate_algorithm(self): + """Hit the standard isin code with integers""" + # Need extreme range to hit standard code + # This hits it without the use of kind='table' + a = np.array([5, 4, 5, 3, 4, 4, 1e9], dtype=np.int64) + b = np.array([2, 3, 4, 1e9], dtype=np.int64) + expected = np.array([0, 1, 0, 1, 1, 1, 1], dtype=bool) + assert_array_equal(expected, isin(a, b)) + assert_array_equal(np.invert(expected), isin(a, b, invert=True)) + + a = np.array([5, 7, 1, 2], dtype=np.int64) + b = np.array([2, 4, 3, 1, 5, 1e9], dtype=np.int64) + ec = np.array([True, False, True, True]) + c = isin(a, b, assume_unique=True) + assert_array_equal(c, ec) + + @pytest.mark.parametrize("kind", [None, "sort", "table"]) + def test_isin_boolean(self, kind): + """Test that isin works for boolean input""" + a = np.array([True, False]) + b = np.array([False, False, False]) + expected = np.array([False, True]) + assert_array_equal(expected, + isin(a, b, kind=kind)) + assert_array_equal(np.invert(expected), + isin(a, b, invert=True, kind=kind)) + + @pytest.mark.parametrize("kind", [None, "sort"]) + def test_isin_timedelta(self, kind): + """Test that isin works for timedelta input""" + rstate = np.random.RandomState(0) + a = rstate.randint(0, 100, size=10) + b = rstate.randint(0, 100, size=10) + truth = isin(a, b) + a_timedelta = a.astype("timedelta64[s]") + b_timedelta = b.astype("timedelta64[s]") + assert_array_equal(truth, isin(a_timedelta, b_timedelta, kind=kind)) + + def test_isin_table_timedelta_fails(self): + a = np.array([0, 1, 2], dtype="timedelta64[s]") + b = a + # Make sure it raises a value error: + with pytest.raises(ValueError): + isin(a, b, kind="table") + + @pytest.mark.parametrize( + "dtype1,dtype2", + [ + (np.int8, np.int16), + (np.int16, np.int8), + (np.uint8, np.uint16), + (np.uint16, np.uint8), + (np.uint8, np.int16), + (np.int16, np.uint8), + (np.uint64, np.int64), + ] + ) + @pytest.mark.parametrize("kind", [None, "sort", "table"]) + def test_isin_mixed_dtype(self, dtype1, dtype2, kind): + """Test that isin works as expected for mixed dtype input.""" + is_dtype2_signed = np.issubdtype(dtype2, np.signedinteger) + ar1 = np.array([0, 0, 1, 1], dtype=dtype1) + + if is_dtype2_signed: + ar2 = np.array([-128, 0, 127], dtype=dtype2) + else: + ar2 = np.array([127, 0, 255], dtype=dtype2) + + expected = np.array([True, True, False, False]) + + expect_failure = kind == "table" and ( + dtype1 == np.int16 and dtype2 == np.int8) + + if expect_failure: + with pytest.raises(RuntimeError, match="exceed the maximum"): + isin(ar1, ar2, kind=kind) + else: + assert_array_equal(isin(ar1, ar2, kind=kind), expected) + + @pytest.mark.parametrize("data", [ + np.array([2**63, 2**63 + 1], dtype=np.uint64), + np.array([-2**62, -2**62 - 1], dtype=np.int64), + ]) + @pytest.mark.parametrize("kind", [None, "sort", "table"]) + def test_isin_mixed_huge_vals(self, kind, data): + """Test values outside intp range (negative ones if 32bit system)""" + query = data[1] + res = np.isin(data, query, kind=kind) + assert_array_equal(res, [False, True]) + # Also check that nothing weird happens for values can't possibly + # in range. + data = data.astype(np.int32) # clearly different values + res = np.isin(data, query, kind=kind) + assert_array_equal(res, [False, False]) + + @pytest.mark.parametrize("kind", [None, "sort", "table"]) + def test_isin_mixed_boolean(self, kind): + """Test that isin works as expected for bool/int input.""" + for dtype in np.typecodes["AllInteger"]: + a = np.array([True, False, False], dtype=bool) + b = np.array([0, 0, 0, 0], dtype=dtype) + expected = np.array([False, True, True], dtype=bool) + assert_array_equal(isin(a, b, kind=kind), expected) + + a, b = b, a + expected = np.array([True, True, True, True], dtype=bool) + assert_array_equal(isin(a, b, kind=kind), expected) + + def test_isin_first_array_is_object(self): + ar1 = [None] + ar2 = np.array([1] * 10) + expected = np.array([False]) + result = np.isin(ar1, ar2) + assert_array_equal(result, expected) + + def test_isin_second_array_is_object(self): + ar1 = 1 + ar2 = np.array([None] * 10) + expected = np.array([False]) + result = np.isin(ar1, ar2) + assert_array_equal(result, expected) + + def test_isin_both_arrays_are_object(self): + ar1 = [None] + ar2 = np.array([None] * 10) + expected = np.array([True]) + result = np.isin(ar1, ar2) + assert_array_equal(result, expected) + + def test_isin_both_arrays_have_structured_dtype(self): + # Test arrays of a structured data type containing an integer field + # and a field of dtype `object` allowing for arbitrary Python objects + dt = np.dtype([('field1', int), ('field2', object)]) + ar1 = np.array([(1, None)], dtype=dt) + ar2 = np.array([(1, None)] * 10, dtype=dt) + expected = np.array([True]) + result = np.isin(ar1, ar2) + assert_array_equal(result, expected) + + def test_isin_with_arrays_containing_tuples(self): + ar1 = np.array([(1,), 2], dtype=object) + ar2 = np.array([(1,), 2], dtype=object) + expected = np.array([True, True]) + result = np.isin(ar1, ar2) + assert_array_equal(result, expected) + result = np.isin(ar1, ar2, invert=True) + assert_array_equal(result, np.invert(expected)) + + # An integer is added at the end of the array to make sure + # that the array builder will create the array with tuples + # and after it's created the integer is removed. + # There's a bug in the array constructor that doesn't handle + # tuples properly and adding the integer fixes that. + ar1 = np.array([(1,), (2, 1), 1], dtype=object) + ar1 = ar1[:-1] + ar2 = np.array([(1,), (2, 1), 1], dtype=object) + ar2 = ar2[:-1] + expected = np.array([True, True]) + result = np.isin(ar1, ar2) + assert_array_equal(result, expected) + result = np.isin(ar1, ar2, invert=True) + assert_array_equal(result, np.invert(expected)) + + ar1 = np.array([(1,), (2, 3), 1], dtype=object) + ar1 = ar1[:-1] + ar2 = np.array([(1,), 2], dtype=object) + expected = np.array([True, False]) + result = np.isin(ar1, ar2) + assert_array_equal(result, expected) + result = np.isin(ar1, ar2, invert=True) + assert_array_equal(result, np.invert(expected)) + + def test_isin_errors(self): + """Test that isin raises expected errors.""" + + # Error 1: `kind` is not one of 'sort' 'table' or None. + ar1 = np.array([1, 2, 3, 4, 5]) + ar2 = np.array([2, 4, 6, 8, 10]) + assert_raises(ValueError, isin, ar1, ar2, kind='quicksort') + + # Error 2: `kind="table"` does not work for non-integral arrays. + obj_ar1 = np.array([1, 'a', 3, 'b', 5], dtype=object) + obj_ar2 = np.array([1, 'a', 3, 'b', 5], dtype=object) + assert_raises(ValueError, isin, obj_ar1, obj_ar2, kind='table') + + for dtype in [np.int32, np.int64]: + ar1 = np.array([-1, 2, 3, 4, 5], dtype=dtype) + # The range of this array will overflow: + overflow_ar2 = np.array([-1, np.iinfo(dtype).max], dtype=dtype) + + # Error 3: `kind="table"` will trigger a runtime error + # if there is an integer overflow expected when computing the + # range of ar2 + assert_raises( + RuntimeError, + isin, ar1, overflow_ar2, kind='table' + ) + + # Non-error: `kind=None` will *not* trigger a runtime error + # if there is an integer overflow, it will switch to + # the `sort` algorithm. + result = np.isin(ar1, overflow_ar2, kind=None) + assert_array_equal(result, [True] + [False] * 4) + result = np.isin(ar1, overflow_ar2, kind='sort') + assert_array_equal(result, [True] + [False] * 4) + + def test_union1d(self): + a = np.array([5, 4, 7, 1, 2]) + b = np.array([2, 4, 3, 3, 2, 1, 5]) + + ec = np.array([1, 2, 3, 4, 5, 7]) + c = union1d(a, b) + assert_array_equal(c, ec) + + # Tests gh-10340, arguments to union1d should be + # flattened if they are not already 1D + x = np.array([[0, 1, 2], [3, 4, 5]]) + y = np.array([0, 1, 2, 3, 4]) + ez = np.array([0, 1, 2, 3, 4, 5]) + z = union1d(x, y) + assert_array_equal(z, ez) + + assert_array_equal([], union1d([], [])) + + def test_setdiff1d(self): + a = np.array([6, 5, 4, 7, 1, 2, 7, 4]) + b = np.array([2, 4, 3, 3, 2, 1, 5]) + + ec = np.array([6, 7]) + c = setdiff1d(a, b) + assert_array_equal(c, ec) + + a = np.arange(21) + b = np.arange(19) + ec = np.array([19, 20]) + c = setdiff1d(a, b) + assert_array_equal(c, ec) + + assert_array_equal([], setdiff1d([], [])) + a = np.array((), np.uint32) + assert_equal(setdiff1d(a, []).dtype, np.uint32) + + def test_setdiff1d_unique(self): + a = np.array([3, 2, 1]) + b = np.array([7, 5, 2]) + expected = np.array([3, 1]) + actual = setdiff1d(a, b, assume_unique=True) + assert_equal(actual, expected) + + def test_setdiff1d_char_array(self): + a = np.array(['a', 'b', 'c']) + b = np.array(['a', 'b', 's']) + assert_array_equal(setdiff1d(a, b), np.array(['c'])) + + def test_manyways(self): + a = np.array([5, 7, 1, 2, 8]) + b = np.array([9, 8, 2, 4, 3, 1, 5]) + + c1 = setxor1d(a, b) + aux1 = intersect1d(a, b) + aux2 = union1d(a, b) + c2 = setdiff1d(aux2, aux1) + assert_array_equal(c1, c2) + + +class TestUnique: + + def check_all(self, a, b, i1, i2, c, dt): + base_msg = 'check {0} failed for type {1}' + + msg = base_msg.format('values', dt) + v = unique(a) + assert_array_equal(v, b, msg) + assert type(v) == type(b) + + msg = base_msg.format('return_index', dt) + v, j = unique(a, True, False, False) + assert_array_equal(v, b, msg) + assert_array_equal(j, i1, msg) + assert type(v) == type(b) + + msg = base_msg.format('return_inverse', dt) + v, j = unique(a, False, True, False) + assert_array_equal(v, b, msg) + assert_array_equal(j, i2, msg) + assert type(v) == type(b) + + msg = base_msg.format('return_counts', dt) + v, j = unique(a, False, False, True) + assert_array_equal(v, b, msg) + assert_array_equal(j, c, msg) + assert type(v) == type(b) + + msg = base_msg.format('return_index and return_inverse', dt) + v, j1, j2 = unique(a, True, True, False) + assert_array_equal(v, b, msg) + assert_array_equal(j1, i1, msg) + assert_array_equal(j2, i2, msg) + assert type(v) == type(b) + + msg = base_msg.format('return_index and return_counts', dt) + v, j1, j2 = unique(a, True, False, True) + assert_array_equal(v, b, msg) + assert_array_equal(j1, i1, msg) + assert_array_equal(j2, c, msg) + assert type(v) == type(b) + + msg = base_msg.format('return_inverse and return_counts', dt) + v, j1, j2 = unique(a, False, True, True) + assert_array_equal(v, b, msg) + assert_array_equal(j1, i2, msg) + assert_array_equal(j2, c, msg) + assert type(v) == type(b) + + msg = base_msg.format(('return_index, return_inverse ' + 'and return_counts'), dt) + v, j1, j2, j3 = unique(a, True, True, True) + assert_array_equal(v, b, msg) + assert_array_equal(j1, i1, msg) + assert_array_equal(j2, i2, msg) + assert_array_equal(j3, c, msg) + assert type(v) == type(b) + + def get_types(self): + types = [] + types.extend(np.typecodes['AllInteger']) + types.extend(np.typecodes['AllFloat']) + types.append('datetime64[D]') + types.append('timedelta64[D]') + return types + + def test_unique_1d(self): + + a = [5, 7, 1, 2, 1, 5, 7] * 10 + b = [1, 2, 5, 7] + i1 = [2, 3, 0, 1] + i2 = [2, 3, 0, 1, 0, 2, 3] * 10 + c = np.multiply([2, 1, 2, 2], 10) + + # test for numeric arrays + types = self.get_types() + for dt in types: + aa = np.array(a, dt) + bb = np.array(b, dt) + self.check_all(aa, bb, i1, i2, c, dt) + + # test for object arrays + dt = 'O' + aa = np.empty(len(a), dt) + aa[:] = a + bb = np.empty(len(b), dt) + bb[:] = b + self.check_all(aa, bb, i1, i2, c, dt) + + # test for structured arrays + dt = [('', 'i'), ('', 'i')] + aa = np.array(list(zip(a, a)), dt) + bb = np.array(list(zip(b, b)), dt) + self.check_all(aa, bb, i1, i2, c, dt) + + # test for ticket #2799 + aa = [1. + 0.j, 1 - 1.j, 1] + assert_array_equal( + np.sort(np.unique(aa)), + [1. - 1.j, 1.], + ) + + # test for ticket #4785 + a = [(1, 2), (1, 2), (2, 3)] + unq = [1, 2, 3] + inv = [[0, 1], [0, 1], [1, 2]] + a1 = unique(a) + assert_array_equal(a1, unq) + a2, a2_inv = unique(a, return_inverse=True) + assert_array_equal(a2, unq) + assert_array_equal(a2_inv, inv) + + # test for chararrays with return_inverse (gh-5099) + a = np.char.chararray(5) + a[...] = '' + a2, a2_inv = np.unique(a, return_inverse=True) + assert_array_equal(a2_inv, np.zeros(5)) + + # test for ticket #9137 + a = [] + a1_idx = np.unique(a, return_index=True)[1] + a2_inv = np.unique(a, return_inverse=True)[1] + a3_idx, a3_inv = np.unique(a, return_index=True, + return_inverse=True)[1:] + assert_equal(a1_idx.dtype, np.intp) + assert_equal(a2_inv.dtype, np.intp) + assert_equal(a3_idx.dtype, np.intp) + assert_equal(a3_inv.dtype, np.intp) + + # test for ticket 2111 - float + a = [2.0, np.nan, 1.0, np.nan] + ua = [1.0, 2.0, np.nan] + ua_idx = [2, 0, 1] + ua_inv = [1, 2, 0, 2] + ua_cnt = [1, 1, 2] + # order of unique values is not guaranteed + assert_equal(np.sort(np.unique(a)), np.sort(ua)) + assert_equal(np.unique(a, return_index=True), (ua, ua_idx)) + assert_equal(np.unique(a, return_inverse=True), (ua, ua_inv)) + assert_equal(np.unique(a, return_counts=True), (ua, ua_cnt)) + + # test for ticket 2111 - complex + a = [2.0 - 1j, np.nan, 1.0 + 1j, complex(0.0, np.nan), complex(1.0, np.nan)] + ua = [1.0 + 1j, 2.0 - 1j, complex(0.0, np.nan)] + ua_idx = [2, 0, 3] + ua_inv = [1, 2, 0, 2, 2] + ua_cnt = [1, 1, 3] + # order of unique values is not guaranteed + assert_equal(np.sort(np.unique(a)), np.sort(ua)) + assert_equal(np.unique(a, return_index=True), (ua, ua_idx)) + assert_equal(np.unique(a, return_inverse=True), (ua, ua_inv)) + assert_equal(np.unique(a, return_counts=True), (ua, ua_cnt)) + + # test for ticket 2111 - datetime64 + nat = np.datetime64('nat') + a = [np.datetime64('2020-12-26'), nat, np.datetime64('2020-12-24'), nat] + ua = [np.datetime64('2020-12-24'), np.datetime64('2020-12-26'), nat] + ua_idx = [2, 0, 1] + ua_inv = [1, 2, 0, 2] + ua_cnt = [1, 1, 2] + assert_equal(np.unique(a), ua) + assert_equal(np.unique(a, return_index=True), (ua, ua_idx)) + assert_equal(np.unique(a, return_inverse=True), (ua, ua_inv)) + assert_equal(np.unique(a, return_counts=True), (ua, ua_cnt)) + + # test for ticket 2111 - timedelta + nat = np.timedelta64('nat') + a = [np.timedelta64(1, 'D'), nat, np.timedelta64(1, 'h'), nat] + ua = [np.timedelta64(1, 'h'), np.timedelta64(1, 'D'), nat] + ua_idx = [2, 0, 1] + ua_inv = [1, 2, 0, 2] + ua_cnt = [1, 1, 2] + assert_equal(np.unique(a), ua) + assert_equal(np.unique(a, return_index=True), (ua, ua_idx)) + assert_equal(np.unique(a, return_inverse=True), (ua, ua_inv)) + assert_equal(np.unique(a, return_counts=True), (ua, ua_cnt)) + + # test for gh-19300 + all_nans = [np.nan] * 4 + ua = [np.nan] + ua_idx = [0] + ua_inv = [0, 0, 0, 0] + ua_cnt = [4] + assert_equal(np.unique(all_nans), ua) + assert_equal(np.unique(all_nans, return_index=True), (ua, ua_idx)) + assert_equal(np.unique(all_nans, return_inverse=True), (ua, ua_inv)) + assert_equal(np.unique(all_nans, return_counts=True), (ua, ua_cnt)) + + def test_unique_zero_sized(self): + # test for zero-sized arrays + types = self.get_types() + types.extend('SU') + for dt in types: + a = np.array([], dt) + b = np.array([], dt) + i1 = np.array([], np.int64) + i2 = np.array([], np.int64) + c = np.array([], np.int64) + self.check_all(a, b, i1, i2, c, dt) + + def test_unique_subclass(self): + class Subclass(np.ndarray): + pass + + i1 = [2, 3, 0, 1] + i2 = [2, 3, 0, 1, 0, 2, 3] * 10 + c = np.multiply([2, 1, 2, 2], 10) + + # test for numeric arrays + types = self.get_types() + for dt in types: + a = np.array([5, 7, 1, 2, 1, 5, 7] * 10, dtype=dt) + b = np.array([1, 2, 5, 7], dtype=dt) + aa = Subclass(a.shape, dtype=dt, buffer=a) + bb = Subclass(b.shape, dtype=dt, buffer=b) + self.check_all(aa, bb, i1, i2, c, dt) + + def test_unique_byte_string_hash_based(self): + # test for byte string arrays + arr = ['apple', 'banana', 'apple', 'cherry', 'date', 'banana', 'fig', 'grape'] + unq_sorted = ['apple', 'banana', 'cherry', 'date', 'fig', 'grape'] + + a1 = unique(arr, sorted=False) + # the result varies depending on the impl of std::unordered_set, + # so we check them by sorting + assert_array_equal(sorted(a1.tolist()), unq_sorted) + + def test_unique_unicode_string_hash_based(self): + # test for unicode string arrays + arr = [ + 'café', 'cafe', 'café', 'naïve', 'naive', + 'résumé', 'naïve', 'resume', 'résumé', + ] + unq_sorted = ['cafe', 'café', 'naive', 'naïve', 'resume', 'résumé'] + + a1 = unique(arr, sorted=False) + # the result varies depending on the impl of std::unordered_set, + # so we check them by sorting + assert_array_equal(sorted(a1.tolist()), unq_sorted) + + def test_unique_vstring_hash_based_equal_nan(self): + # test for unicode and nullable string arrays (equal_nan=True) + a = np.array([ + # short strings + 'straße', + None, + 'strasse', + 'straße', + None, + 'niño', + 'nino', + 'élève', + 'eleve', + 'niño', + 'élève', + # medium strings + 'b' * 20, + 'ß' * 30, + None, + 'é' * 30, + 'e' * 20, + 'ß' * 30, + 'n' * 30, + 'ñ' * 20, + None, + 'e' * 20, + 'ñ' * 20, + # long strings + 'b' * 300, + 'ß' * 400, + None, + 'é' * 400, + 'e' * 300, + 'ß' * 400, + 'n' * 400, + 'ñ' * 300, + None, + 'e' * 300, + 'ñ' * 300, + ], + dtype=StringDType(na_object=None) + ) + unq_sorted_wo_none = [ + 'b' * 20, + 'b' * 300, + 'e' * 20, + 'e' * 300, + 'eleve', + 'nino', + 'niño', + 'n' * 30, + 'n' * 400, + 'strasse', + 'straße', + 'ß' * 30, + 'ß' * 400, + 'élève', + 'é' * 30, + 'é' * 400, + 'ñ' * 20, + 'ñ' * 300, + ] + + a1 = unique(a, sorted=False, equal_nan=True) + # the result varies depending on the impl of std::unordered_set, + # so we check them by sorting + + # a1 should have exactly one None + count_none = sum(x is None for x in a1) + assert_equal(count_none, 1) + + a1_wo_none = sorted(x for x in a1 if x is not None) + assert_array_equal(a1_wo_none, unq_sorted_wo_none) + + def test_unique_vstring_hash_based_not_equal_nan(self): + # test for unicode and nullable string arrays (equal_nan=False) + a = np.array([ + # short strings + 'straße', + None, + 'strasse', + 'straße', + None, + 'niño', + 'nino', + 'élève', + 'eleve', + 'niño', + 'élève', + # medium strings + 'b' * 20, + 'ß' * 30, + None, + 'é' * 30, + 'e' * 20, + 'ß' * 30, + 'n' * 30, + 'ñ' * 20, + None, + 'e' * 20, + 'ñ' * 20, + # long strings + 'b' * 300, + 'ß' * 400, + None, + 'é' * 400, + 'e' * 300, + 'ß' * 400, + 'n' * 400, + 'ñ' * 300, + None, + 'e' * 300, + 'ñ' * 300, + ], + dtype=StringDType(na_object=None) + ) + unq_sorted_wo_none = [ + 'b' * 20, + 'b' * 300, + 'e' * 20, + 'e' * 300, + 'eleve', + 'nino', + 'niño', + 'n' * 30, + 'n' * 400, + 'strasse', + 'straße', + 'ß' * 30, + 'ß' * 400, + 'élève', + 'é' * 30, + 'é' * 400, + 'ñ' * 20, + 'ñ' * 300, + ] + + a1 = unique(a, sorted=False, equal_nan=False) + # the result varies depending on the impl of std::unordered_set, + # so we check them by sorting + + # a1 should have exactly one None + count_none = sum(x is None for x in a1) + assert_equal(count_none, 6) + + a1_wo_none = sorted(x for x in a1 if x is not None) + assert_array_equal(a1_wo_none, unq_sorted_wo_none) + + def test_unique_vstring_errors(self): + a = np.array( + [ + 'apple', 'banana', 'apple', None, 'cherry', + 'date', 'banana', 'fig', None, 'grape', + ] * 2, + dtype=StringDType(na_object=None) + ) + assert_raises(ValueError, unique, a, equal_nan=False) + + @pytest.mark.parametrize("arg", ["return_index", "return_inverse", "return_counts"]) + def test_unsupported_hash_based(self, arg): + """These currently never use the hash-based solution. However, + it seems easier to just allow it. + + When the hash-based solution is added, this test should fail and be + replaced with something more comprehensive. + """ + a = np.array([1, 5, 2, 3, 4, 8, 199, 1, 3, 5]) + + res_not_sorted = np.unique([1, 1], sorted=False, **{arg: True}) + res_sorted = np.unique([1, 1], sorted=True, **{arg: True}) + # The following should fail without first sorting `res_not_sorted`. + for arr, expected in zip(res_not_sorted, res_sorted): + assert_array_equal(arr, expected) + + def test_unique_axis_errors(self): + assert_raises(TypeError, self._run_axis_tests, object) + assert_raises(TypeError, self._run_axis_tests, + [('a', int), ('b', object)]) + + assert_raises(AxisError, unique, np.arange(10), axis=2) + assert_raises(AxisError, unique, np.arange(10), axis=-2) + + def test_unique_axis_list(self): + msg = "Unique failed on list of lists" + inp = [[0, 1, 0], [0, 1, 0]] + inp_arr = np.asarray(inp) + assert_array_equal(unique(inp, axis=0), unique(inp_arr, axis=0), msg) + assert_array_equal(unique(inp, axis=1), unique(inp_arr, axis=1), msg) + + def test_unique_axis(self): + types = [] + types.extend(np.typecodes['AllInteger']) + types.extend(np.typecodes['AllFloat']) + types.append('datetime64[D]') + types.append('timedelta64[D]') + types.append([('a', int), ('b', int)]) + types.append([('a', int), ('b', float)]) + + for dtype in types: + self._run_axis_tests(dtype) + + msg = 'Non-bitwise-equal booleans test failed' + data = np.arange(10, dtype=np.uint8).reshape(-1, 2).view(bool) + result = np.array([[False, True], [True, True]], dtype=bool) + assert_array_equal(unique(data, axis=0), result, msg) + + msg = 'Negative zero equality test failed' + data = np.array([[-0.0, 0.0], [0.0, -0.0], [-0.0, 0.0], [0.0, -0.0]]) + result = np.array([[-0.0, 0.0]]) + assert_array_equal(unique(data, axis=0), result, msg) + + @pytest.mark.parametrize("axis", [0, -1]) + def test_unique_1d_with_axis(self, axis): + x = np.array([4, 3, 2, 3, 2, 1, 2, 2]) + uniq = unique(x, axis=axis) + assert_array_equal(uniq, [1, 2, 3, 4]) + + @pytest.mark.parametrize("axis", [None, 0, -1]) + def test_unique_inverse_with_axis(self, axis): + x = np.array([[4, 4, 3], [2, 2, 1], [2, 2, 1], [4, 4, 3]]) + uniq, inv = unique(x, return_inverse=True, axis=axis) + assert_equal(inv.ndim, x.ndim if axis is None else 1) + assert_array_equal(x, np.take(uniq, inv, axis=axis)) + + def test_unique_axis_zeros(self): + # issue 15559 + single_zero = np.empty(shape=(2, 0), dtype=np.int8) + uniq, idx, inv, cnt = unique(single_zero, axis=0, return_index=True, + return_inverse=True, return_counts=True) + + # there's 1 element of shape (0,) along axis 0 + assert_equal(uniq.dtype, single_zero.dtype) + assert_array_equal(uniq, np.empty(shape=(1, 0))) + assert_array_equal(idx, np.array([0])) + assert_array_equal(inv, np.array([0, 0])) + assert_array_equal(cnt, np.array([2])) + + # there's 0 elements of shape (2,) along axis 1 + uniq, idx, inv, cnt = unique(single_zero, axis=1, return_index=True, + return_inverse=True, return_counts=True) + + assert_equal(uniq.dtype, single_zero.dtype) + assert_array_equal(uniq, np.empty(shape=(2, 0))) + assert_array_equal(idx, np.array([])) + assert_array_equal(inv, np.array([])) + assert_array_equal(cnt, np.array([])) + + # test a "complicated" shape + shape = (0, 2, 0, 3, 0, 4, 0) + multiple_zeros = np.empty(shape=shape) + for axis in range(len(shape)): + expected_shape = list(shape) + if shape[axis] == 0: + expected_shape[axis] = 0 + else: + expected_shape[axis] = 1 + + assert_array_equal(unique(multiple_zeros, axis=axis), + np.empty(shape=expected_shape)) + + def test_unique_masked(self): + # issue 8664 + x = np.array([64, 0, 1, 2, 3, 63, 63, 0, 0, 0, 1, 2, 0, 63, 0], + dtype='uint8') + y = np.ma.masked_equal(x, 0) + + v = np.unique(y) + v2, i, c = np.unique(y, return_index=True, return_counts=True) + + msg = 'Unique returned different results when asked for index' + assert_array_equal(v.data, v2.data, msg) + assert_array_equal(v.mask, v2.mask, msg) + + def test_unique_sort_order_with_axis(self): + # These tests fail if sorting along axis is done by treating subarrays + # as unsigned byte strings. See gh-10495. + fmt = "sort order incorrect for integer type '%s'" + for dt in 'bhilq': + a = np.array([[-1], [0]], dt) + b = np.unique(a, axis=0) + assert_array_equal(a, b, fmt % dt) + + def _run_axis_tests(self, dtype): + data = np.array([[0, 1, 0, 0], + [1, 0, 0, 0], + [0, 1, 0, 0], + [1, 0, 0, 0]]).astype(dtype) + + msg = 'Unique with 1d array and axis=0 failed' + result = np.array([0, 1]) + assert_array_equal(unique(data), result.astype(dtype), msg) + + msg = 'Unique with 2d array and axis=0 failed' + result = np.array([[0, 1, 0, 0], [1, 0, 0, 0]]) + assert_array_equal(unique(data, axis=0), result.astype(dtype), msg) + + msg = 'Unique with 2d array and axis=1 failed' + result = np.array([[0, 0, 1], [0, 1, 0], [0, 0, 1], [0, 1, 0]]) + assert_array_equal(unique(data, axis=1), result.astype(dtype), msg) + + msg = 'Unique with 3d array and axis=2 failed' + data3d = np.array([[[1, 1], + [1, 0]], + [[0, 1], + [0, 0]]]).astype(dtype) + result = np.take(data3d, [1, 0], axis=2) + assert_array_equal(unique(data3d, axis=2), result, msg) + + uniq, idx, inv, cnt = unique(data, axis=0, return_index=True, + return_inverse=True, return_counts=True) + msg = "Unique's return_index=True failed with axis=0" + assert_array_equal(data[idx], uniq, msg) + msg = "Unique's return_inverse=True failed with axis=0" + assert_array_equal(np.take(uniq, inv, axis=0), data) + msg = "Unique's return_counts=True failed with axis=0" + assert_array_equal(cnt, np.array([2, 2]), msg) + + uniq, idx, inv, cnt = unique(data, axis=1, return_index=True, + return_inverse=True, return_counts=True) + msg = "Unique's return_index=True failed with axis=1" + assert_array_equal(data[:, idx], uniq) + msg = "Unique's return_inverse=True failed with axis=1" + assert_array_equal(np.take(uniq, inv, axis=1), data) + msg = "Unique's return_counts=True failed with axis=1" + assert_array_equal(cnt, np.array([2, 1, 1]), msg) + + def test_unique_nanequals(self): + # issue 20326 + a = np.array([1, 1, np.nan, np.nan, np.nan]) + unq = np.unique(a) + not_unq = np.unique(a, equal_nan=False) + assert_array_equal(unq, np.array([1, np.nan])) + assert_array_equal(not_unq, np.array([1, np.nan, np.nan, np.nan])) + + def test_unique_array_api_functions(self): + arr = np.array( + [ + np.nan, 1.0, 0.0, 4.0, -np.nan, + -0.0, 1.0, 3.0, 4.0, np.nan, + 5.0, -0.0, 1.0, -np.nan, 0.0, + ], + ) + + for res_unique_array_api, res_unique in [ + ( + np.unique_values(arr), + np.unique(arr, equal_nan=False) + ), + ( + np.unique_counts(arr), + np.unique(arr, return_counts=True, equal_nan=False) + ), + ( + np.unique_inverse(arr), + np.unique(arr, return_inverse=True, equal_nan=False) + ), + ( + np.unique_all(arr), + np.unique( + arr, + return_index=True, + return_inverse=True, + return_counts=True, + equal_nan=False + ) + ) + ]: + assert len(res_unique_array_api) == len(res_unique) + if not isinstance(res_unique_array_api, tuple): + res_unique_array_api = (res_unique_array_api,) + if not isinstance(res_unique, tuple): + res_unique = (res_unique,) + + for actual, expected in zip(res_unique_array_api, res_unique): + # Order of output is not guaranteed + assert_equal(np.sort(actual), np.sort(expected)) + + def test_unique_inverse_shape(self): + # Regression test for https://github.com/numpy/numpy/issues/25552 + arr = np.array([[1, 2, 3], [2, 3, 1]]) + expected_values, expected_inverse = np.unique(arr, return_inverse=True) + expected_inverse = expected_inverse.reshape(arr.shape) + for func in np.unique_inverse, np.unique_all: + result = func(arr) + assert_array_equal(expected_values, result.values) + assert_array_equal(expected_inverse, result.inverse_indices) + assert_array_equal(arr, result.values[result.inverse_indices]) + + @pytest.mark.parametrize( + 'data', + [[[1, 1, 1], + [1, 1, 1]], + [1, 3, 2], + 1], + ) + @pytest.mark.parametrize('transpose', [False, True]) + @pytest.mark.parametrize('dtype', [np.int32, np.float64]) + def test_unique_with_matrix(self, data, transpose, dtype): + mat = np.matrix(data).astype(dtype) + if transpose: + mat = mat.T + u = np.unique(mat) + expected = np.unique(np.asarray(mat)) + assert_array_equal(u, expected, strict=True) + + def test_unique_axis0_equal_nan_on_1d_array(self): + # Test Issue #29336 + arr1d = np.array([np.nan, 0, 0, np.nan]) + expected = np.array([0., np.nan]) + result = np.unique(arr1d, axis=0, equal_nan=True) + assert_array_equal(result, expected) + + def test_unique_axis_minus1_eq_on_1d_array(self): + arr1d = np.array([np.nan, 0, 0, np.nan]) + expected = np.array([0., np.nan]) + result = np.unique(arr1d, axis=-1, equal_nan=True) + assert_array_equal(result, expected) + + def test_unique_axis_float_raises_typeerror(self): + arr1d = np.array([np.nan, 0, 0, np.nan]) + with pytest.raises(TypeError, match="integer argument expected"): + np.unique(arr1d, axis=0.0, equal_nan=False) + + @pytest.mark.parametrize('dt', [np.dtype('F'), np.dtype('D')]) + @pytest.mark.parametrize('values', [[complex(0.0, -1), complex(-0.0, -1), 0], + [-200, complex(-200, -0.0), -1], + [-25, 3, -5j, complex(-25, -0.0), 3j]]) + def test_unique_complex_signed_zeros(self, dt, values): + z = np.array(values, dtype=dt) + u = np.unique(z) + assert len(u) == len(values) - 1 diff --git a/local_packages/numpy/lib/tests/test_arrayterator.py b/local_packages/numpy/lib/tests/test_arrayterator.py new file mode 100644 index 0000000..42a85e5 --- /dev/null +++ b/local_packages/numpy/lib/tests/test_arrayterator.py @@ -0,0 +1,45 @@ +from functools import reduce +from operator import mul + +import numpy as np +from numpy.lib import Arrayterator +from numpy.random import randint +from numpy.testing import assert_ + + +def test(): + np.random.seed(np.arange(10)) + + # Create a random array + ndims = randint(5) + 1 + shape = tuple(randint(10) + 1 for dim in range(ndims)) + els = reduce(mul, shape) + a = np.arange(els).reshape(shape) + + buf_size = randint(2 * els) + b = Arrayterator(a, buf_size) + + # Check that each block has at most ``buf_size`` elements + for block in b: + assert_(len(block.flat) <= (buf_size or els)) + + # Check that all elements are iterated correctly + assert_(list(b.flat) == list(a.flat)) + + # Slice arrayterator + start = [randint(dim) for dim in shape] + stop = [randint(dim) + 1 for dim in shape] + step = [randint(dim) + 1 for dim in shape] + slice_ = tuple(slice(*t) for t in zip(start, stop, step)) + c = b[slice_] + d = a[slice_] + + # Check that each block has at most ``buf_size`` elements + for block in c: + assert_(len(block.flat) <= (buf_size or els)) + + # Check that the arrayterator is sliced correctly + assert_(np.all(c.__array__() == d)) + + # Check that all elements are iterated correctly + assert_(list(c.flat) == list(d.flat)) diff --git a/local_packages/numpy/lib/tests/test_format.py b/local_packages/numpy/lib/tests/test_format.py new file mode 100644 index 0000000..c70e3d5 --- /dev/null +++ b/local_packages/numpy/lib/tests/test_format.py @@ -0,0 +1,1054 @@ +# doctest +r''' Test the .npy file format. + +Set up: + + >>> import sys + >>> from io import BytesIO + >>> from numpy.lib import format + >>> + >>> scalars = [ + ... np.uint8, + ... np.int8, + ... np.uint16, + ... np.int16, + ... np.uint32, + ... np.int32, + ... np.uint64, + ... np.int64, + ... np.float32, + ... np.float64, + ... np.complex64, + ... np.complex128, + ... object, + ... ] + >>> + >>> basic_arrays = [] + >>> + >>> for scalar in scalars: + ... for endian in '<>': + ... dtype = np.dtype(scalar).newbyteorder(endian) + ... basic = np.arange(15).astype(dtype) + ... basic_arrays.extend([ + ... np.array([], dtype=dtype), + ... np.array(10, dtype=dtype), + ... basic, + ... basic.reshape((3,5)), + ... basic.reshape((3,5)).T, + ... basic.reshape((3,5))[::-1,::2], + ... ]) + ... + >>> + >>> Pdescr = [ + ... ('x', 'i4', (2,)), + ... ('y', 'f8', (2, 2)), + ... ('z', 'u1')] + >>> + >>> + >>> PbufferT = [ + ... ([3,2], [[6.,4.],[6.,4.]], 8), + ... ([4,3], [[7.,5.],[7.,5.]], 9), + ... ] + >>> + >>> + >>> Ndescr = [ + ... ('x', 'i4', (2,)), + ... ('Info', [ + ... ('value', 'c16'), + ... ('y2', 'f8'), + ... ('Info2', [ + ... ('name', 'S2'), + ... ('value', 'c16', (2,)), + ... ('y3', 'f8', (2,)), + ... ('z3', 'u4', (2,))]), + ... ('name', 'S2'), + ... ('z2', 'b1')]), + ... ('color', 'S2'), + ... ('info', [ + ... ('Name', 'U8'), + ... ('Value', 'c16')]), + ... ('y', 'f8', (2, 2)), + ... ('z', 'u1')] + >>> + >>> + >>> NbufferT = [ + ... ([3,2], (6j, 6., ('nn', [6j,4j], [6.,4.], [1,2]), 'NN', True), 'cc', ('NN', 6j), [[6.,4.],[6.,4.]], 8), + ... ([4,3], (7j, 7., ('oo', [7j,5j], [7.,5.], [2,1]), 'OO', False), 'dd', ('OO', 7j), [[7.,5.],[7.,5.]], 9), + ... ] + >>> + >>> + >>> record_arrays = [ + ... np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('<')), + ... np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('<')), + ... np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('>')), + ... np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('>')), + ... ] + +Test the magic string writing. + + >>> format.magic(1, 0) + '\x93NUMPY\x01\x00' + >>> format.magic(0, 0) + '\x93NUMPY\x00\x00' + >>> format.magic(255, 255) + '\x93NUMPY\xff\xff' + >>> format.magic(2, 5) + '\x93NUMPY\x02\x05' + +Test the magic string reading. + + >>> format.read_magic(BytesIO(format.magic(1, 0))) + (1, 0) + >>> format.read_magic(BytesIO(format.magic(0, 0))) + (0, 0) + >>> format.read_magic(BytesIO(format.magic(255, 255))) + (255, 255) + >>> format.read_magic(BytesIO(format.magic(2, 5))) + (2, 5) + +Test the header writing. + + >>> for arr in basic_arrays + record_arrays: + ... f = BytesIO() + ... format.write_array_header_1_0(f, arr) # XXX: arr is not a dict, items gets called on it + ... print(repr(f.getvalue())) + ... + "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '|u1', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '|u1', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '|i1', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '|i1', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'u2', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '>u2', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '>u2', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '>u2', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '>u2', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '>u2', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'i2', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '>i2', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '>i2', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '>i2', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '>i2', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '>i2', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'u4', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '>u4', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '>u4', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '>u4', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '>u4', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '>u4', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'i4', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '>i4', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '>i4', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '>i4', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '>i4', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '>i4', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'u8', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '>u8', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '>u8', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '>u8', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '>u8', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '>u8', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'i8', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '>i8', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '>i8', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '>i8', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '>i8', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '>i8', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'f4', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '>f4', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '>f4', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '>f4', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '>f4', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '>f4', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'f8', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '>f8', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '>f8', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '>f8', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '>f8', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '>f8', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'c8', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '>c8', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '>c8', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '>c8', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '>c8', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '>c8', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'c16', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '>c16', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '>c16', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '>c16', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '>c16', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '>c16', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': 'O', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': 'O', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': 'O', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': 'O', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 3)} \n" + "v\x00{'descr': [('x', 'i4', (2,)), ('y', '>f8', (2, 2)), ('z', '|u1')],\n 'fortran_order': False,\n 'shape': (2,)} \n" + "\x16\x02{'descr': [('x', '>i4', (2,)),\n ('Info',\n [('value', '>c16'),\n ('y2', '>f8'),\n ('Info2',\n [('name', '|S2'),\n ('value', '>c16', (2,)),\n ('y3', '>f8', (2,)),\n ('z3', '>u4', (2,))]),\n ('name', '|S2'),\n ('z2', '|b1')]),\n ('color', '|S2'),\n ('info', [('Name', '>U8'), ('Value', '>c16')]),\n ('y', '>f8', (2, 2)),\n ('z', '|u1')],\n 'fortran_order': False,\n 'shape': (2,)} \n" +''' +import os +import sys +import warnings +from io import BytesIO + +import pytest + +import numpy as np +from numpy.lib import format +from numpy.testing import ( + IS_64BIT, + IS_PYPY, + IS_WASM, + assert_, + assert_array_equal, + assert_raises, + assert_raises_regex, +) +from numpy.testing._private.utils import requires_memory + +# Generate some basic arrays to test with. +scalars = [ + np.uint8, + np.int8, + np.uint16, + np.int16, + np.uint32, + np.int32, + np.uint64, + np.int64, + np.float32, + np.float64, + np.complex64, + np.complex128, + object, +] +basic_arrays = [] +for scalar in scalars: + for endian in '<>': + dtype = np.dtype(scalar).newbyteorder(endian) + basic = np.arange(1500).astype(dtype) + basic_arrays.extend([ + # Empty + np.array([], dtype=dtype), + # Rank-0 + np.array(10, dtype=dtype), + # 1-D + basic, + # 2-D C-contiguous + basic.reshape((30, 50)), + # 2-D F-contiguous + basic.reshape((30, 50)).T, + # 2-D non-contiguous + basic.reshape((30, 50))[::-1, ::2], + ]) + +# More complicated record arrays. +# This is the structure of the table used for plain objects: +# +# +-+-+-+ +# |x|y|z| +# +-+-+-+ + +# Structure of a plain array description: +Pdescr = [ + ('x', 'i4', (2,)), + ('y', 'f8', (2, 2)), + ('z', 'u1')] + +# A plain list of tuples with values for testing: +PbufferT = [ + # x y z + ([3, 2], [[6., 4.], [6., 4.]], 8), + ([4, 3], [[7., 5.], [7., 5.]], 9), + ] + + +# This is the structure of the table used for nested objects (DON'T PANIC!): +# +# +-+---------------------------------+-----+----------+-+-+ +# |x|Info |color|info |y|z| +# | +-----+--+----------------+----+--+ +----+-----+ | | +# | |value|y2|Info2 |name|z2| |Name|Value| | | +# | | | +----+-----+--+--+ | | | | | | | +# | | | |name|value|y3|z3| | | | | | | | +# +-+-----+--+----+-----+--+--+----+--+-----+----+-----+-+-+ +# + +# The corresponding nested array description: +Ndescr = [ + ('x', 'i4', (2,)), + ('Info', [ + ('value', 'c16'), + ('y2', 'f8'), + ('Info2', [ + ('name', 'S2'), + ('value', 'c16', (2,)), + ('y3', 'f8', (2,)), + ('z3', 'u4', (2,))]), + ('name', 'S2'), + ('z2', 'b1')]), + ('color', 'S2'), + ('info', [ + ('Name', 'U8'), + ('Value', 'c16')]), + ('y', 'f8', (2, 2)), + ('z', 'u1')] + +NbufferT = [ + ([3, 2], (6j, 6., ('nn', [6j, 4j], [6., 4.], [1, 2]), 'NN', True), + 'cc', ('NN', 6j), [[6., 4.], [6., 4.]], 8), + ([4, 3], (7j, 7., ('oo', [7j, 5j], [7., 5.], [2, 1]), 'OO', False), + 'dd', ('OO', 7j), [[7., 5.], [7., 5.]], 9), + ] + +record_arrays = [ + np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('<')), + np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('<')), + np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('>')), + np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('>')), + np.zeros(1, dtype=[('c', ('= (3, 12), reason="see gh-23988") +@pytest.mark.xfail(IS_WASM, reason="Emscripten NODEFS has a buggy dup") +def test_python2_python3_interoperability(): + fname = 'win64python2.npy' + path = os.path.join(os.path.dirname(__file__), 'data', fname) + with pytest.warns(UserWarning, match="Reading.*this warning\\."): + data = np.load(path) + assert_array_equal(data, np.ones(2)) + + +@pytest.mark.filterwarnings( + "ignore:.*align should be passed:numpy.exceptions.VisibleDeprecationWarning") +def test_pickle_python2_python3(): + # Test that loading object arrays saved on Python 2 works both on + # Python 2 and Python 3 and vice versa + data_dir = os.path.join(os.path.dirname(__file__), 'data') + + expected = np.array([None, range, '\u512a\u826f', + b'\xe4\xb8\x8d\xe8\x89\xaf'], + dtype=object) + + for fname in ['py2-np0-objarr.npy', 'py2-objarr.npy', 'py2-objarr.npz', + 'py3-objarr.npy', 'py3-objarr.npz']: + path = os.path.join(data_dir, fname) + + for encoding in ['bytes', 'latin1']: + data_f = np.load(path, allow_pickle=True, encoding=encoding) + if fname.endswith('.npz'): + data = data_f['x'] + data_f.close() + else: + data = data_f + + if encoding == 'latin1' and fname.startswith('py2'): + assert_(isinstance(data[3], str)) + assert_array_equal(data[:-1], expected[:-1]) + # mojibake occurs + assert_array_equal(data[-1].encode(encoding), expected[-1]) + else: + assert_(isinstance(data[3], bytes)) + assert_array_equal(data, expected) + + if fname.startswith('py2'): + if fname.endswith('.npz'): + data = np.load(path, allow_pickle=True) + assert_raises(UnicodeError, data.__getitem__, 'x') + data.close() + data = np.load(path, allow_pickle=True, fix_imports=False, + encoding='latin1') + assert_raises(ImportError, data.__getitem__, 'x') + data.close() + else: + assert_raises(UnicodeError, np.load, path, + allow_pickle=True) + assert_raises(ImportError, np.load, path, + allow_pickle=True, fix_imports=False, + encoding='latin1') + + +def test_pickle_disallow(tmpdir): + data_dir = os.path.join(os.path.dirname(__file__), 'data') + + path = os.path.join(data_dir, 'py2-objarr.npy') + assert_raises(ValueError, np.load, path, + allow_pickle=False, encoding='latin1') + + path = os.path.join(data_dir, 'py2-objarr.npz') + with np.load(path, allow_pickle=False, encoding='latin1') as f: + assert_raises(ValueError, f.__getitem__, 'x') + + path = os.path.join(tmpdir, 'pickle-disabled.npy') + assert_raises(ValueError, np.save, path, np.array([None], dtype=object), + allow_pickle=False) + +@pytest.mark.parametrize('dt', [ + # Not testing a subarray only dtype, because it cannot be attached to an array + # (and would fail the test as of writing this.) + np.dtype([('a', np.int8), + ('b', np.int16), + ('c', np.int32), + ], align=True), + np.dtype([('x', np.dtype(({'names': ['a', 'b'], + 'formats': ['i1', 'i1'], + 'offsets': [0, 4], + 'itemsize': 8, + }, + (3,))), + (4,), + )]), + np.dtype([('x', + (' 1, a) + assert_array_equal(b, [3, 2, 2, 3, 3]) + + def test_place(self): + # Make sure that non-np.ndarray objects + # raise an error instead of doing nothing + assert_raises(TypeError, place, [1, 2, 3], [True, False], [0, 1]) + + a = np.array([1, 4, 3, 2, 5, 8, 7]) + place(a, [0, 1, 0, 1, 0, 1, 0], [2, 4, 6]) + assert_array_equal(a, [1, 2, 3, 4, 5, 6, 7]) + + place(a, np.zeros(7), []) + assert_array_equal(a, np.arange(1, 8)) + + place(a, [1, 0, 1, 0, 1, 0, 1], [8, 9]) + assert_array_equal(a, [8, 2, 9, 4, 8, 6, 9]) + assert_raises_regex(ValueError, "Cannot insert from an empty array", + lambda: place(a, [0, 0, 0, 0, 0, 1, 0], [])) + + # See Issue #6974 + a = np.array(['12', '34']) + place(a, [0, 1], '9') + assert_array_equal(a, ['12', '9']) + + def test_both(self): + a = rand(10) + mask = a > 0.5 + ac = a.copy() + c = extract(mask, a) + place(a, mask, 0) + place(a, mask, c) + assert_array_equal(a, ac) + + +# _foo1 and _foo2 are used in some tests in TestVectorize. + +def _foo1(x, y=1.0): + return y * math.floor(x) + + +def _foo2(x, y=1.0, z=0.0): + return y * math.floor(x) + z + + +class TestVectorize: + + def test_simple(self): + def addsubtract(a, b): + if a > b: + return a - b + else: + return a + b + + f = vectorize(addsubtract) + r = f([0, 3, 6, 9], [1, 3, 5, 7]) + assert_array_equal(r, [1, 6, 1, 2]) + + def test_scalar(self): + def addsubtract(a, b): + if a > b: + return a - b + else: + return a + b + + f = vectorize(addsubtract) + r = f([0, 3, 6, 9], 5) + assert_array_equal(r, [5, 8, 1, 4]) + + def test_large(self): + x = np.linspace(-3, 2, 10000) + f = vectorize(lambda x: x) + y = f(x) + assert_array_equal(y, x) + + def test_ufunc(self): + f = vectorize(math.cos) + args = np.array([0, 0.5 * np.pi, np.pi, 1.5 * np.pi, 2 * np.pi]) + r1 = f(args) + r2 = np.cos(args) + assert_array_almost_equal(r1, r2) + + def test_keywords(self): + + def foo(a, b=1): + return a + b + + f = vectorize(foo) + args = np.array([1, 2, 3]) + r1 = f(args) + r2 = np.array([2, 3, 4]) + assert_array_equal(r1, r2) + r1 = f(args, 2) + r2 = np.array([3, 4, 5]) + assert_array_equal(r1, r2) + + def test_keywords_with_otypes_order1(self): + # gh-1620: The second call of f would crash with + # `ValueError: invalid number of arguments`. + f = vectorize(_foo1, otypes=[float]) + # We're testing the caching of ufuncs by vectorize, so the order + # of these function calls is an important part of the test. + r1 = f(np.arange(3.0), 1.0) + r2 = f(np.arange(3.0)) + assert_array_equal(r1, r2) + + def test_keywords_with_otypes_order2(self): + # gh-1620: The second call of f would crash with + # `ValueError: non-broadcastable output operand with shape () + # doesn't match the broadcast shape (3,)`. + f = vectorize(_foo1, otypes=[float]) + # We're testing the caching of ufuncs by vectorize, so the order + # of these function calls is an important part of the test. + r1 = f(np.arange(3.0)) + r2 = f(np.arange(3.0), 1.0) + assert_array_equal(r1, r2) + + def test_keywords_with_otypes_order3(self): + # gh-1620: The third call of f would crash with + # `ValueError: invalid number of arguments`. + f = vectorize(_foo1, otypes=[float]) + # We're testing the caching of ufuncs by vectorize, so the order + # of these function calls is an important part of the test. + r1 = f(np.arange(3.0)) + r2 = f(np.arange(3.0), y=1.0) + r3 = f(np.arange(3.0)) + assert_array_equal(r1, r2) + assert_array_equal(r1, r3) + + def test_keywords_with_otypes_several_kwd_args1(self): + # gh-1620 Make sure different uses of keyword arguments + # don't break the vectorized function. + f = vectorize(_foo2, otypes=[float]) + # We're testing the caching of ufuncs by vectorize, so the order + # of these function calls is an important part of the test. + r1 = f(10.4, z=100) + r2 = f(10.4, y=-1) + r3 = f(10.4) + assert_equal(r1, _foo2(10.4, z=100)) + assert_equal(r2, _foo2(10.4, y=-1)) + assert_equal(r3, _foo2(10.4)) + + def test_keywords_with_otypes_several_kwd_args2(self): + # gh-1620 Make sure different uses of keyword arguments + # don't break the vectorized function. + f = vectorize(_foo2, otypes=[float]) + # We're testing the caching of ufuncs by vectorize, so the order + # of these function calls is an important part of the test. + r1 = f(z=100, x=10.4, y=-1) + r2 = f(1, 2, 3) + assert_equal(r1, _foo2(z=100, x=10.4, y=-1)) + assert_equal(r2, _foo2(1, 2, 3)) + + def test_keywords_no_func_code(self): + # This needs to test a function that has keywords but + # no func_code attribute, since otherwise vectorize will + # inspect the func_code. + import random + try: + vectorize(random.randrange) # Should succeed + except Exception: + raise AssertionError + + def test_keywords2_ticket_2100(self): + # Test kwarg support: enhancement ticket 2100 + + def foo(a, b=1): + return a + b + + f = vectorize(foo) + args = np.array([1, 2, 3]) + r1 = f(a=args) + r2 = np.array([2, 3, 4]) + assert_array_equal(r1, r2) + r1 = f(b=1, a=args) + assert_array_equal(r1, r2) + r1 = f(args, b=2) + r2 = np.array([3, 4, 5]) + assert_array_equal(r1, r2) + + def test_keywords3_ticket_2100(self): + # Test excluded with mixed positional and kwargs: ticket 2100 + def mypolyval(x, p): + _p = list(p) + res = _p.pop(0) + while _p: + res = res * x + _p.pop(0) + return res + + vpolyval = np.vectorize(mypolyval, excluded=['p', 1]) + ans = [3, 6] + assert_array_equal(ans, vpolyval(x=[0, 1], p=[1, 2, 3])) + assert_array_equal(ans, vpolyval([0, 1], p=[1, 2, 3])) + assert_array_equal(ans, vpolyval([0, 1], [1, 2, 3])) + + def test_keywords4_ticket_2100(self): + # Test vectorizing function with no positional args. + @vectorize + def f(**kw): + res = 1.0 + for _k in kw: + res *= kw[_k] + return res + + assert_array_equal(f(a=[1, 2], b=[3, 4]), [3, 8]) + + def test_keywords5_ticket_2100(self): + # Test vectorizing function with no kwargs args. + @vectorize + def f(*v): + return np.prod(v) + + assert_array_equal(f([1, 2], [3, 4]), [3, 8]) + + def test_coverage1_ticket_2100(self): + def foo(): + return 1 + + f = vectorize(foo) + assert_array_equal(f(), 1) + + def test_assigning_docstring(self): + def foo(x): + """Original documentation""" + return x + + f = vectorize(foo) + assert_equal(f.__doc__, foo.__doc__) + + doc = "Provided documentation" + f = vectorize(foo, doc=doc) + assert_equal(f.__doc__, doc) + + def test_UnboundMethod_ticket_1156(self): + # Regression test for issue 1156 + class Foo: + b = 2 + + def bar(self, a): + return a ** self.b + + assert_array_equal(vectorize(Foo().bar)(np.arange(9)), + np.arange(9) ** 2) + assert_array_equal(vectorize(Foo.bar)(Foo(), np.arange(9)), + np.arange(9) ** 2) + + def test_execution_order_ticket_1487(self): + # Regression test for dependence on execution order: issue 1487 + f1 = vectorize(lambda x: x) + res1a = f1(np.arange(3)) + res1b = f1(np.arange(0.1, 3)) + f2 = vectorize(lambda x: x) + res2b = f2(np.arange(0.1, 3)) + res2a = f2(np.arange(3)) + assert_equal(res1a, res2a) + assert_equal(res1b, res2b) + + def test_string_ticket_1892(self): + # Test vectorization over strings: issue 1892. + f = np.vectorize(lambda x: x) + s = '0123456789' * 10 + assert_equal(s, f(s)) + + def test_dtype_promotion_gh_29189(self): + # dtype should not be silently promoted (int32 -> int64) + dtypes = [np.int16, np.int32, np.int64, np.float16, np.float32, np.float64] + + for dtype in dtypes: + x = np.asarray([1, 2, 3], dtype=dtype) + y = np.vectorize(lambda x: x + x)(x) + assert x.dtype == y.dtype + + def test_cache(self): + # Ensure that vectorized func called exactly once per argument. + _calls = [0] + + @vectorize + def f(x): + _calls[0] += 1 + return x ** 2 + + f.cache = True + x = np.arange(5) + assert_array_equal(f(x), x * x) + assert_equal(_calls[0], len(x)) + + def test_otypes(self): + f = np.vectorize(lambda x: x) + f.otypes = 'i' + x = np.arange(5) + assert_array_equal(f(x), x) + + def test_otypes_object_28624(self): + # with object otype, the vectorized function should return y + # wrapped into an object array + y = np.arange(3) + f = vectorize(lambda x: y, otypes=[object]) + + assert f(None).item() is y + assert f([None]).item() is y + + y = [1, 2, 3] + f = vectorize(lambda x: y, otypes=[object]) + + assert f(None).item() is y + assert f([None]).item() is y + + def test_parse_gufunc_signature(self): + assert_equal(nfb._parse_gufunc_signature('(x)->()'), ([('x',)], [()])) + assert_equal(nfb._parse_gufunc_signature('(x,y)->()'), + ([('x', 'y')], [()])) + assert_equal(nfb._parse_gufunc_signature('(x),(y)->()'), + ([('x',), ('y',)], [()])) + assert_equal(nfb._parse_gufunc_signature('(x)->(y)'), + ([('x',)], [('y',)])) + assert_equal(nfb._parse_gufunc_signature('(x)->(y),()'), + ([('x',)], [('y',), ()])) + assert_equal(nfb._parse_gufunc_signature('(),(a,b,c),(d)->(d,e)'), + ([(), ('a', 'b', 'c'), ('d',)], [('d', 'e')])) + + # Tests to check if whitespaces are ignored + assert_equal(nfb._parse_gufunc_signature('(x )->()'), ([('x',)], [()])) + assert_equal(nfb._parse_gufunc_signature('( x , y )->( )'), + ([('x', 'y')], [()])) + assert_equal(nfb._parse_gufunc_signature('(x),( y) ->()'), + ([('x',), ('y',)], [()])) + assert_equal(nfb._parse_gufunc_signature('( x)-> (y ) '), + ([('x',)], [('y',)])) + assert_equal(nfb._parse_gufunc_signature(' (x)->( y),( )'), + ([('x',)], [('y',), ()])) + assert_equal(nfb._parse_gufunc_signature( + '( ), ( a, b,c ) ,( d) -> (d , e)'), + ([(), ('a', 'b', 'c'), ('d',)], [('d', 'e')])) + + with assert_raises(ValueError): + nfb._parse_gufunc_signature('(x)(y)->()') + with assert_raises(ValueError): + nfb._parse_gufunc_signature('(x),(y)->') + with assert_raises(ValueError): + nfb._parse_gufunc_signature('((x))->(x)') + + def test_signature_simple(self): + def addsubtract(a, b): + if a > b: + return a - b + else: + return a + b + + f = vectorize(addsubtract, signature='(),()->()') + r = f([0, 3, 6, 9], [1, 3, 5, 7]) + assert_array_equal(r, [1, 6, 1, 2]) + + def test_signature_mean_last(self): + def mean(a): + return a.mean() + + f = vectorize(mean, signature='(n)->()') + r = f([[1, 3], [2, 4]]) + assert_array_equal(r, [2, 3]) + + def test_signature_center(self): + def center(a): + return a - a.mean() + + f = vectorize(center, signature='(n)->(n)') + r = f([[1, 3], [2, 4]]) + assert_array_equal(r, [[-1, 1], [-1, 1]]) + + def test_signature_two_outputs(self): + f = vectorize(lambda x: (x, x), signature='()->(),()') + r = f([1, 2, 3]) + assert_(isinstance(r, tuple) and len(r) == 2) + assert_array_equal(r[0], [1, 2, 3]) + assert_array_equal(r[1], [1, 2, 3]) + + def test_signature_outer(self): + f = vectorize(np.outer, signature='(a),(b)->(a,b)') + r = f([1, 2], [1, 2, 3]) + assert_array_equal(r, [[1, 2, 3], [2, 4, 6]]) + + r = f([[[1, 2]]], [1, 2, 3]) + assert_array_equal(r, [[[[1, 2, 3], [2, 4, 6]]]]) + + r = f([[1, 0], [2, 0]], [1, 2, 3]) + assert_array_equal(r, [[[1, 2, 3], [0, 0, 0]], + [[2, 4, 6], [0, 0, 0]]]) + + r = f([1, 2], [[1, 2, 3], [0, 0, 0]]) + assert_array_equal(r, [[[1, 2, 3], [2, 4, 6]], + [[0, 0, 0], [0, 0, 0]]]) + + def test_signature_computed_size(self): + f = vectorize(lambda x: x[:-1], signature='(n)->(m)') + r = f([1, 2, 3]) + assert_array_equal(r, [1, 2]) + + r = f([[1, 2, 3], [2, 3, 4]]) + assert_array_equal(r, [[1, 2], [2, 3]]) + + def test_signature_excluded(self): + + def foo(a, b=1): + return a + b + + f = vectorize(foo, signature='()->()', excluded={'b'}) + assert_array_equal(f([1, 2, 3]), [2, 3, 4]) + assert_array_equal(f([1, 2, 3], b=0), [1, 2, 3]) + + def test_signature_otypes(self): + f = vectorize(lambda x: x, signature='(n)->(n)', otypes=['float64']) + r = f([1, 2, 3]) + assert_equal(r.dtype, np.dtype('float64')) + assert_array_equal(r, [1, 2, 3]) + + def test_signature_invalid_inputs(self): + f = vectorize(operator.add, signature='(n),(n)->(n)') + with assert_raises_regex(TypeError, 'wrong number of positional'): + f([1, 2]) + with assert_raises_regex( + ValueError, 'does not have enough dimensions'): + f(1, 2) + with assert_raises_regex( + ValueError, 'inconsistent size for core dimension'): + f([1, 2], [1, 2, 3]) + + f = vectorize(operator.add, signature='()->()') + with assert_raises_regex(TypeError, 'wrong number of positional'): + f(1, 2) + + def test_signature_invalid_outputs(self): + + f = vectorize(lambda x: x[:-1], signature='(n)->(n)') + with assert_raises_regex( + ValueError, 'inconsistent size for core dimension'): + f([1, 2, 3]) + + f = vectorize(lambda x: x, signature='()->(),()') + with assert_raises_regex(ValueError, 'wrong number of outputs'): + f(1) + + f = vectorize(lambda x: (x, x), signature='()->()') + with assert_raises_regex(ValueError, 'wrong number of outputs'): + f([1, 2]) + + def test_size_zero_output(self): + # see issue 5868 + f = np.vectorize(lambda x: x) + x = np.zeros([0, 5], dtype=int) + with assert_raises_regex(ValueError, 'otypes'): + f(x) + + f.otypes = 'i' + assert_array_equal(f(x), x) + + f = np.vectorize(lambda x: x, signature='()->()') + with assert_raises_regex(ValueError, 'otypes'): + f(x) + + f = np.vectorize(lambda x: x, signature='()->()', otypes='i') + assert_array_equal(f(x), x) + + f = np.vectorize(lambda x: x, signature='(n)->(n)', otypes='i') + assert_array_equal(f(x), x) + + f = np.vectorize(lambda x: x, signature='(n)->(n)') + assert_array_equal(f(x.T), x.T) + + f = np.vectorize(lambda x: [x], signature='()->(n)', otypes='i') + with assert_raises_regex(ValueError, 'new output dimensions'): + f(x) + + def test_subclasses(self): + class subclass(np.ndarray): + pass + + m = np.array([[1., 0., 0.], + [0., 0., 1.], + [0., 1., 0.]]).view(subclass) + v = np.array([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]]).view(subclass) + # generalized (gufunc) + matvec = np.vectorize(np.matmul, signature='(m,m),(m)->(m)') + r = matvec(m, v) + assert_equal(type(r), subclass) + assert_equal(r, [[1., 3., 2.], [4., 6., 5.], [7., 9., 8.]]) + + # element-wise (ufunc) + mult = np.vectorize(lambda x, y: x * y) + r = mult(m, v) + assert_equal(type(r), subclass) + assert_equal(r, m * v) + + def test_name(self): + # gh-23021 + @np.vectorize + def f2(a, b): + return a + b + + assert f2.__name__ == 'f2' + + def test_decorator(self): + @vectorize + def addsubtract(a, b): + if a > b: + return a - b + else: + return a + b + + r = addsubtract([0, 3, 6, 9], [1, 3, 5, 7]) + assert_array_equal(r, [1, 6, 1, 2]) + + def test_docstring(self): + @vectorize + def f(x): + """Docstring""" + return x + + if sys.flags.optimize < 2: + assert f.__doc__ == "Docstring" + + def test_partial(self): + def foo(x, y): + return x + y + + bar = partial(foo, 3) + vbar = np.vectorize(bar) + assert vbar(1) == 4 + + def test_signature_otypes_decorator(self): + @vectorize(signature='(n)->(n)', otypes=['float64']) + def f(x): + return x + + r = f([1, 2, 3]) + assert_equal(r.dtype, np.dtype('float64')) + assert_array_equal(r, [1, 2, 3]) + assert f.__name__ == 'f' + + def test_bad_input(self): + with assert_raises(TypeError): + A = np.vectorize(pyfunc=3) + + def test_no_keywords(self): + with assert_raises(TypeError): + @np.vectorize("string") + def foo(): + return "bar" + + def test_positional_regression_9477(self): + # This supplies the first keyword argument as a positional, + # to ensure that they are still properly forwarded after the + # enhancement for #9477 + f = vectorize((lambda x: x), ['float64']) + r = f([2]) + assert_equal(r.dtype, np.dtype('float64')) + + def test_datetime_conversion(self): + otype = "datetime64[ns]" + arr = np.array(['2024-01-01', '2024-01-02', '2024-01-03'], + dtype='datetime64[ns]') + assert_array_equal(np.vectorize(lambda x: x, signature="(i)->(j)", + otypes=[otype])(arr), arr) + + +class TestLeaks: + class A: + iters = 20 + + def bound(self, *args): + return 0 + + @staticmethod + def unbound(*args): + return 0 + + @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") + @pytest.mark.skipif(NOGIL_BUILD, + reason=("Functions are immortalized if a thread is " + "launched, making this test flaky")) + @pytest.mark.parametrize('name, incr', [ + ('bound', A.iters), + ('unbound', 0), + ]) + @pytest.mark.thread_unsafe( + reason="test result depends on the reference count of a global object" + ) + def test_frompyfunc_leaks(self, name, incr): + # exposed in gh-11867 as np.vectorized, but the problem stems from + # frompyfunc. + # class.attribute = np.frompyfunc() creates a + # reference cycle if is a bound class method. + # It requires a gc collection cycle to break the cycle. + import gc + A_func = getattr(self.A, name) + gc.disable() + try: + refcount = sys.getrefcount(A_func) + for i in range(self.A.iters): + a = self.A() + a.f = np.frompyfunc(getattr(a, name), 1, 1) + out = a.f(np.arange(10)) + a = None + # A.func is part of a reference cycle if incr is non-zero + assert_equal(sys.getrefcount(A_func), refcount + incr) + for i in range(5): + gc.collect() + assert_equal(sys.getrefcount(A_func), refcount) + finally: + gc.enable() + + +class TestDigitize: + + def test_forward(self): + x = np.arange(-6, 5) + bins = np.arange(-5, 5) + assert_array_equal(digitize(x, bins), np.arange(11)) + + def test_reverse(self): + x = np.arange(5, -6, -1) + bins = np.arange(5, -5, -1) + assert_array_equal(digitize(x, bins), np.arange(11)) + + def test_random(self): + x = rand(10) + bin = np.linspace(x.min(), x.max(), 10) + assert_(np.all(digitize(x, bin) != 0)) + + def test_right_basic(self): + x = [1, 5, 4, 10, 8, 11, 0] + bins = [1, 5, 10] + default_answer = [1, 2, 1, 3, 2, 3, 0] + assert_array_equal(digitize(x, bins), default_answer) + right_answer = [0, 1, 1, 2, 2, 3, 0] + assert_array_equal(digitize(x, bins, True), right_answer) + + def test_right_open(self): + x = np.arange(-6, 5) + bins = np.arange(-6, 4) + assert_array_equal(digitize(x, bins, True), np.arange(11)) + + def test_right_open_reverse(self): + x = np.arange(5, -6, -1) + bins = np.arange(4, -6, -1) + assert_array_equal(digitize(x, bins, True), np.arange(11)) + + def test_right_open_random(self): + x = rand(10) + bins = np.linspace(x.min(), x.max(), 10) + assert_(np.all(digitize(x, bins, True) != 10)) + + def test_monotonic(self): + x = [-1, 0, 1, 2] + bins = [0, 0, 1] + assert_array_equal(digitize(x, bins, False), [0, 2, 3, 3]) + assert_array_equal(digitize(x, bins, True), [0, 0, 2, 3]) + bins = [1, 1, 0] + assert_array_equal(digitize(x, bins, False), [3, 2, 0, 0]) + assert_array_equal(digitize(x, bins, True), [3, 3, 2, 0]) + bins = [1, 1, 1, 1] + assert_array_equal(digitize(x, bins, False), [0, 0, 4, 4]) + assert_array_equal(digitize(x, bins, True), [0, 0, 0, 4]) + bins = [0, 0, 1, 0] + assert_raises(ValueError, digitize, x, bins) + bins = [1, 1, 0, 1] + assert_raises(ValueError, digitize, x, bins) + + def test_casting_error(self): + x = [1, 2, 3 + 1.j] + bins = [1, 2, 3] + assert_raises(TypeError, digitize, x, bins) + x, bins = bins, x + assert_raises(TypeError, digitize, x, bins) + + def test_return_type(self): + # Functions returning indices should always return base ndarrays + class A(np.ndarray): + pass + a = np.arange(5).view(A) + b = np.arange(1, 3).view(A) + assert_(not isinstance(digitize(b, a, False), A)) + assert_(not isinstance(digitize(b, a, True), A)) + + def test_large_integers_increasing(self): + # gh-11022 + x = 2**54 # loses precision in a float + assert_equal(np.digitize(x, [x - 1, x + 1]), 1) + + @pytest.mark.xfail( + reason="gh-11022: np._core.multiarray._monoticity loses precision") + def test_large_integers_decreasing(self): + # gh-11022 + x = 2**54 # loses precision in a float + assert_equal(np.digitize(x, [x + 1, x - 1]), 1) + + +class TestUnwrap: + + def test_simple(self): + # check that unwrap removes jumps greater that 2*pi + assert_array_equal(unwrap([1, 1 + 2 * np.pi]), [1, 1]) + # check that unwrap maintains continuity + assert_(np.all(diff(unwrap(rand(10) * 100)) < np.pi)) + + def test_period(self): + # check that unwrap removes jumps greater that 255 + assert_array_equal(unwrap([1, 1 + 256], period=255), [1, 2]) + # check that unwrap maintains continuity + assert_(np.all(diff(unwrap(rand(10) * 1000, period=255)) < 255)) + # check simple case + simple_seq = np.array([0, 75, 150, 225, 300]) + wrap_seq = np.mod(simple_seq, 255) + assert_array_equal(unwrap(wrap_seq, period=255), simple_seq) + # check custom discont value + uneven_seq = np.array([0, 75, 150, 225, 300, 430]) + wrap_uneven = np.mod(uneven_seq, 250) + no_discont = unwrap(wrap_uneven, period=250) + assert_array_equal(no_discont, [0, 75, 150, 225, 300, 180]) + sm_discont = unwrap(wrap_uneven, period=250, discont=140) + assert_array_equal(sm_discont, [0, 75, 150, 225, 300, 430]) + assert sm_discont.dtype == wrap_uneven.dtype + + +@pytest.mark.parametrize( + "dtype", "O" + np.typecodes["AllInteger"] + np.typecodes["Float"] +) +@pytest.mark.parametrize("M", [0, 1, 10]) +class TestFilterwindows: + + def test_hanning(self, dtype: str, M: int) -> None: + scalar = np.array(M, dtype=dtype)[()] + + w = hanning(scalar) + if dtype == "O": + ref_dtype = np.float64 + else: + ref_dtype = np.result_type(scalar.dtype, np.float64) + assert w.dtype == ref_dtype + + # check symmetry + assert_equal(w, flipud(w)) + + # check known value + if scalar < 1: + assert_array_equal(w, np.array([])) + elif scalar == 1: + assert_array_equal(w, np.ones(1)) + else: + assert_almost_equal(np.sum(w, axis=0), 4.500, 4) + + def test_hamming(self, dtype: str, M: int) -> None: + scalar = np.array(M, dtype=dtype)[()] + + w = hamming(scalar) + if dtype == "O": + ref_dtype = np.float64 + else: + ref_dtype = np.result_type(scalar.dtype, np.float64) + assert w.dtype == ref_dtype + + # check symmetry + assert_equal(w, flipud(w)) + + # check known value + if scalar < 1: + assert_array_equal(w, np.array([])) + elif scalar == 1: + assert_array_equal(w, np.ones(1)) + else: + assert_almost_equal(np.sum(w, axis=0), 4.9400, 4) + + def test_bartlett(self, dtype: str, M: int) -> None: + scalar = np.array(M, dtype=dtype)[()] + + w = bartlett(scalar) + if dtype == "O": + ref_dtype = np.float64 + else: + ref_dtype = np.result_type(scalar.dtype, np.float64) + assert w.dtype == ref_dtype + + # check symmetry + assert_equal(w, flipud(w)) + + # check known value + if scalar < 1: + assert_array_equal(w, np.array([])) + elif scalar == 1: + assert_array_equal(w, np.ones(1)) + else: + assert_almost_equal(np.sum(w, axis=0), 4.4444, 4) + + def test_blackman(self, dtype: str, M: int) -> None: + scalar = np.array(M, dtype=dtype)[()] + + w = blackman(scalar) + if dtype == "O": + ref_dtype = np.float64 + else: + ref_dtype = np.result_type(scalar.dtype, np.float64) + assert w.dtype == ref_dtype + + # check symmetry + assert_equal(w, flipud(w)) + + # check known value + if scalar < 1: + assert_array_equal(w, np.array([])) + elif scalar == 1: + assert_array_equal(w, np.ones(1)) + else: + assert_almost_equal(np.sum(w, axis=0), 3.7800, 4) + + def test_kaiser(self, dtype: str, M: int) -> None: + scalar = np.array(M, dtype=dtype)[()] + + w = kaiser(scalar, 0) + if dtype == "O": + ref_dtype = np.float64 + else: + ref_dtype = np.result_type(scalar.dtype, np.float64) + assert w.dtype == ref_dtype + + # check symmetry + assert_equal(w, flipud(w)) + + # check known value + if scalar < 1: + assert_array_equal(w, np.array([])) + elif scalar == 1: + assert_array_equal(w, np.ones(1)) + else: + assert_almost_equal(np.sum(w, axis=0), 10, 15) + + +class TestTrapezoid: + + def test_simple(self): + x = np.arange(-10, 10, .1) + r = trapezoid(np.exp(-.5 * x ** 2) / np.sqrt(2 * np.pi), dx=0.1) + # check integral of normal equals 1 + assert_almost_equal(r, 1, 7) + + def test_ndim(self): + x = np.linspace(0, 1, 3) + y = np.linspace(0, 2, 8) + z = np.linspace(0, 3, 13) + + wx = np.ones_like(x) * (x[1] - x[0]) + wx[0] /= 2 + wx[-1] /= 2 + wy = np.ones_like(y) * (y[1] - y[0]) + wy[0] /= 2 + wy[-1] /= 2 + wz = np.ones_like(z) * (z[1] - z[0]) + wz[0] /= 2 + wz[-1] /= 2 + + q = x[:, None, None] + y[None, :, None] + z[None, None, :] + + qx = (q * wx[:, None, None]).sum(axis=0) + qy = (q * wy[None, :, None]).sum(axis=1) + qz = (q * wz[None, None, :]).sum(axis=2) + + # n-d `x` + r = trapezoid(q, x=x[:, None, None], axis=0) + assert_almost_equal(r, qx) + r = trapezoid(q, x=y[None, :, None], axis=1) + assert_almost_equal(r, qy) + r = trapezoid(q, x=z[None, None, :], axis=2) + assert_almost_equal(r, qz) + + # 1-d `x` + r = trapezoid(q, x=x, axis=0) + assert_almost_equal(r, qx) + r = trapezoid(q, x=y, axis=1) + assert_almost_equal(r, qy) + r = trapezoid(q, x=z, axis=2) + assert_almost_equal(r, qz) + + def test_masked(self): + # Testing that masked arrays behave as if the function is 0 where + # masked + x = np.arange(5) + y = x * x + mask = x == 2 + ym = np.ma.array(y, mask=mask) + r = 13.0 # sum(0.5 * (0 + 1) * 1.0 + 0.5 * (9 + 16)) + assert_almost_equal(trapezoid(ym, x), r) + + xm = np.ma.array(x, mask=mask) + assert_almost_equal(trapezoid(ym, xm), r) + + xm = np.ma.array(x, mask=mask) + assert_almost_equal(trapezoid(y, xm), r) + + +class TestSinc: + + def test_simple(self): + assert_(sinc(0) == 1) + w = sinc(np.linspace(-1, 1, 100)) + # check symmetry + assert_array_almost_equal(w, flipud(w), 7) + + def test_array_like(self): + x = [0, 0.5] + y1 = sinc(np.array(x)) + y2 = sinc(list(x)) + y3 = sinc(tuple(x)) + assert_array_equal(y1, y2) + assert_array_equal(y1, y3) + + def test_bool_dtype(self): + x = (np.arange(4, dtype=np.uint8) % 2 == 1) + actual = sinc(x) + expected = sinc(x.astype(np.float64)) + assert_allclose(actual, expected) + assert actual.dtype == np.float64 + + @pytest.mark.parametrize('dtype', [np.uint8, np.int16, np.uint64]) + def test_int_dtypes(self, dtype): + x = np.arange(4, dtype=dtype) + actual = sinc(x) + expected = sinc(x.astype(np.float64)) + assert_allclose(actual, expected) + assert actual.dtype == np.float64 + + @pytest.mark.parametrize( + 'dtype', + [np.float16, np.float32, np.longdouble, np.complex64, np.complex128] + ) + def test_float_dtypes(self, dtype): + x = np.arange(4, dtype=dtype) + assert sinc(x).dtype == x.dtype + + def test_float16_underflow(self): + x = np.float16(0) + # before gh-27784, fill value for 0 in input would underflow float16, + # resulting in nan + assert_array_equal(sinc(x), np.asarray(1.0)) + + +class TestUnique: + + def test_simple(self): + x = np.array([4, 3, 2, 1, 1, 2, 3, 4, 0]) + assert_(np.all(unique(x) == [0, 1, 2, 3, 4])) + assert_(unique(np.array([1, 1, 1, 1, 1])) == np.array([1])) + x = ['widget', 'ham', 'foo', 'bar', 'foo', 'ham'] + assert_(np.all(unique(x) == ['bar', 'foo', 'ham', 'widget'])) + x = np.array([5 + 6j, 1 + 1j, 1 + 10j, 10, 5 + 6j]) + assert_(np.all(unique(x) == [1 + 1j, 1 + 10j, 5 + 6j, 10])) + + +class TestCheckFinite: + + def test_simple(self): + a = [1, 2, 3] + b = [1, 2, np.inf] + c = [1, 2, np.nan] + np.asarray_chkfinite(a) + assert_raises(ValueError, np.asarray_chkfinite, b) + assert_raises(ValueError, np.asarray_chkfinite, c) + + def test_dtype_order(self): + # Regression test for missing dtype and order arguments + a = [1, 2, 3] + a = np.asarray_chkfinite(a, order='F', dtype=np.float64) + assert_(a.dtype == np.float64) + + +class TestCorrCoef: + A = np.array( + [[0.15391142, 0.18045767, 0.14197213], + [0.70461506, 0.96474128, 0.27906989], + [0.9297531, 0.32296769, 0.19267156]]) + B = np.array( + [[0.10377691, 0.5417086, 0.49807457], + [0.82872117, 0.77801674, 0.39226705], + [0.9314666, 0.66800209, 0.03538394]]) + res1 = np.array( + [[1., 0.9379533, -0.04931983], + [0.9379533, 1., 0.30007991], + [-0.04931983, 0.30007991, 1.]]) + res2 = np.array( + [[1., 0.9379533, -0.04931983, 0.30151751, 0.66318558, 0.51532523], + [0.9379533, 1., 0.30007991, -0.04781421, 0.88157256, 0.78052386], + [-0.04931983, 0.30007991, 1., -0.96717111, 0.71483595, 0.83053601], + [0.30151751, -0.04781421, -0.96717111, 1., -0.51366032, -0.66173113], + [0.66318558, 0.88157256, 0.71483595, -0.51366032, 1., 0.98317823], + [0.51532523, 0.78052386, 0.83053601, -0.66173113, 0.98317823, 1.]]) + + def test_non_array(self): + assert_almost_equal(np.corrcoef([0, 1, 0], [1, 0, 1]), + [[1., -1.], [-1., 1.]]) + + def test_simple(self): + tgt1 = corrcoef(self.A) + assert_almost_equal(tgt1, self.res1) + assert_(np.all(np.abs(tgt1) <= 1.0)) + + tgt2 = corrcoef(self.A, self.B) + assert_almost_equal(tgt2, self.res2) + assert_(np.all(np.abs(tgt2) <= 1.0)) + + def test_complex(self): + x = np.array([[1, 2, 3], [1j, 2j, 3j]]) + res = corrcoef(x) + tgt = np.array([[1., -1.j], [1.j, 1.]]) + assert_allclose(res, tgt) + assert_(np.all(np.abs(res) <= 1.0)) + + def test_xy(self): + x = np.array([[1, 2, 3]]) + y = np.array([[1j, 2j, 3j]]) + assert_allclose(np.corrcoef(x, y), np.array([[1., -1.j], [1.j, 1.]])) + + def test_empty(self): + with warnings.catch_warnings(record=True): + warnings.simplefilter('always', RuntimeWarning) + assert_array_equal(corrcoef(np.array([])), np.nan) + assert_array_equal(corrcoef(np.array([]).reshape(0, 2)), + np.array([]).reshape(0, 0)) + assert_array_equal(corrcoef(np.array([]).reshape(2, 0)), + np.array([[np.nan, np.nan], [np.nan, np.nan]])) + + def test_extreme(self): + x = [[1e-100, 1e100], [1e100, 1e-100]] + with np.errstate(all='raise'): + c = corrcoef(x) + assert_array_almost_equal(c, np.array([[1., -1.], [-1., 1.]])) + assert_(np.all(np.abs(c) <= 1.0)) + + @pytest.mark.parametrize("test_type", np_floats) + def test_corrcoef_dtype(self, test_type): + cast_A = self.A.astype(test_type) + res = corrcoef(cast_A, dtype=test_type) + assert test_type == res.dtype + + +class TestCov: + x1 = np.array([[0, 2], [1, 1], [2, 0]]).T + res1 = np.array([[1., -1.], [-1., 1.]]) + x2 = np.array([0.0, 1.0, 2.0], ndmin=2) + frequencies = np.array([1, 4, 1]) + x2_repeats = np.array([[0.0], [1.0], [1.0], [1.0], [1.0], [2.0]]).T + res2 = np.array([[0.4, -0.4], [-0.4, 0.4]]) + unit_frequencies = np.ones(3, dtype=np.int_) + weights = np.array([1.0, 4.0, 1.0]) + res3 = np.array([[2. / 3., -2. / 3.], [-2. / 3., 2. / 3.]]) + unit_weights = np.ones(3) + x3 = np.array([0.3942, 0.5969, 0.7730, 0.9918, 0.7964]) + + def test_basic(self): + assert_allclose(cov(self.x1), self.res1) + + def test_complex(self): + x = np.array([[1, 2, 3], [1j, 2j, 3j]]) + res = np.array([[1., -1.j], [1.j, 1.]]) + assert_allclose(cov(x), res) + assert_allclose(cov(x, aweights=np.ones(3)), res) + + def test_xy(self): + x = np.array([[1, 2, 3]]) + y = np.array([[1j, 2j, 3j]]) + assert_allclose(cov(x, y), np.array([[1., -1.j], [1.j, 1.]])) + + def test_empty(self): + with warnings.catch_warnings(record=True): + warnings.simplefilter('always', RuntimeWarning) + assert_array_equal(cov(np.array([])), np.nan) + assert_array_equal(cov(np.array([]).reshape(0, 2)), + np.array([]).reshape(0, 0)) + assert_array_equal(cov(np.array([]).reshape(2, 0)), + np.array([[np.nan, np.nan], [np.nan, np.nan]])) + + def test_wrong_ddof(self): + with warnings.catch_warnings(record=True): + warnings.simplefilter('always', RuntimeWarning) + assert_array_equal(cov(self.x1, ddof=5), + np.array([[np.inf, -np.inf], + [-np.inf, np.inf]])) + + def test_1D_rowvar(self): + assert_allclose(cov(self.x3), cov(self.x3, rowvar=False)) + y = np.array([0.0780, 0.3107, 0.2111, 0.0334, 0.8501]) + assert_allclose(cov(self.x3, y), cov(self.x3, y, rowvar=False)) + + def test_1D_variance(self): + assert_allclose(cov(self.x3, ddof=1), np.var(self.x3, ddof=1)) + + def test_fweights(self): + assert_allclose(cov(self.x2, fweights=self.frequencies), + cov(self.x2_repeats)) + assert_allclose(cov(self.x1, fweights=self.frequencies), + self.res2) + assert_allclose(cov(self.x1, fweights=self.unit_frequencies), + self.res1) + nonint = self.frequencies + 0.5 + assert_raises(TypeError, cov, self.x1, fweights=nonint) + f = np.ones((2, 3), dtype=np.int_) + assert_raises(RuntimeError, cov, self.x1, fweights=f) + f = np.ones(2, dtype=np.int_) + assert_raises(RuntimeError, cov, self.x1, fweights=f) + f = -1 * np.ones(3, dtype=np.int_) + assert_raises(ValueError, cov, self.x1, fweights=f) + + def test_aweights(self): + assert_allclose(cov(self.x1, aweights=self.weights), self.res3) + assert_allclose(cov(self.x1, aweights=3.0 * self.weights), + cov(self.x1, aweights=self.weights)) + assert_allclose(cov(self.x1, aweights=self.unit_weights), self.res1) + w = np.ones((2, 3)) + assert_raises(RuntimeError, cov, self.x1, aweights=w) + w = np.ones(2) + assert_raises(RuntimeError, cov, self.x1, aweights=w) + w = -1.0 * np.ones(3) + assert_raises(ValueError, cov, self.x1, aweights=w) + + def test_unit_fweights_and_aweights(self): + assert_allclose(cov(self.x2, fweights=self.frequencies, + aweights=self.unit_weights), + cov(self.x2_repeats)) + assert_allclose(cov(self.x1, fweights=self.frequencies, + aweights=self.unit_weights), + self.res2) + assert_allclose(cov(self.x1, fweights=self.unit_frequencies, + aweights=self.unit_weights), + self.res1) + assert_allclose(cov(self.x1, fweights=self.unit_frequencies, + aweights=self.weights), + self.res3) + assert_allclose(cov(self.x1, fweights=self.unit_frequencies, + aweights=3.0 * self.weights), + cov(self.x1, aweights=self.weights)) + assert_allclose(cov(self.x1, fweights=self.unit_frequencies, + aweights=self.unit_weights), + self.res1) + + @pytest.mark.parametrize("test_type", np_floats) + def test_cov_dtype(self, test_type): + cast_x1 = self.x1.astype(test_type) + res = cov(cast_x1, dtype=test_type) + assert test_type == res.dtype + + def test_gh_27658(self): + x = np.ones((3, 1)) + expected = np.cov(x, ddof=0, rowvar=True) + actual = np.cov(x.T, ddof=0, rowvar=False) + assert_allclose(actual, expected, strict=True) + + +class Test_I0: + + def test_simple(self): + assert_almost_equal( + i0(0.5), + np.array(1.0634833707413234)) + + # need at least one test above 8, as the implementation is piecewise + A = np.array([0.49842636, 0.6969809, 0.22011976, 0.0155549, 10.0]) + expected = np.array([1.06307822, 1.12518299, 1.01214991, + 1.00006049, 2815.71662847]) + assert_almost_equal(i0(A), expected) + assert_almost_equal(i0(-A), expected) + + B = np.array([[0.827002, 0.99959078], + [0.89694769, 0.39298162], + [0.37954418, 0.05206293], + [0.36465447, 0.72446427], + [0.48164949, 0.50324519]]) + assert_almost_equal( + i0(B), + np.array([[1.17843223, 1.26583466], + [1.21147086, 1.03898290], + [1.03633899, 1.00067775], + [1.03352052, 1.13557954], + [1.05884290, 1.06432317]])) + # Regression test for gh-11205 + i0_0 = np.i0([0.]) + assert_equal(i0_0.shape, (1,)) + assert_array_equal(np.i0([0.]), np.array([1.])) + + def test_non_array(self): + a = np.arange(4) + + class array_like: + __array_interface__ = a.__array_interface__ + + def __array_wrap__(self, arr, context, return_scalar): + return self + + # E.g. pandas series survive ufunc calls through array-wrap: + assert isinstance(np.abs(array_like()), array_like) + exp = np.i0(a) + res = np.i0(array_like()) + + assert_array_equal(exp, res) + + def test_complex(self): + a = np.array([0, 1 + 2j]) + with pytest.raises(TypeError, match="i0 not supported for complex values"): + res = i0(a) + + +class TestKaiser: + + def test_simple(self): + assert_(np.isfinite(kaiser(1, 1.0))) + assert_almost_equal(kaiser(0, 1.0), + np.array([])) + assert_almost_equal(kaiser(2, 1.0), + np.array([0.78984831, 0.78984831])) + assert_almost_equal(kaiser(5, 1.0), + np.array([0.78984831, 0.94503323, 1., + 0.94503323, 0.78984831])) + assert_almost_equal(kaiser(5, 1.56789), + np.array([0.58285404, 0.88409679, 1., + 0.88409679, 0.58285404])) + + def test_int_beta(self): + kaiser(3, 4) + + +class TestMeshgrid: + + def test_simple(self): + [X, Y] = meshgrid([1, 2, 3], [4, 5, 6, 7]) + assert_array_equal(X, np.array([[1, 2, 3], + [1, 2, 3], + [1, 2, 3], + [1, 2, 3]])) + assert_array_equal(Y, np.array([[4, 4, 4], + [5, 5, 5], + [6, 6, 6], + [7, 7, 7]])) + + def test_single_input(self): + [X] = meshgrid([1, 2, 3, 4]) + assert_array_equal(X, np.array([1, 2, 3, 4])) + + def test_no_input(self): + args = [] + assert_array_equal([], meshgrid(*args)) + assert_array_equal([], meshgrid(*args, copy=False)) + + def test_indexing(self): + x = [1, 2, 3] + y = [4, 5, 6, 7] + [X, Y] = meshgrid(x, y, indexing='ij') + assert_array_equal(X, np.array([[1, 1, 1, 1], + [2, 2, 2, 2], + [3, 3, 3, 3]])) + assert_array_equal(Y, np.array([[4, 5, 6, 7], + [4, 5, 6, 7], + [4, 5, 6, 7]])) + + # Test expected shapes: + z = [8, 9] + assert_(meshgrid(x, y)[0].shape == (4, 3)) + assert_(meshgrid(x, y, indexing='ij')[0].shape == (3, 4)) + assert_(meshgrid(x, y, z)[0].shape == (4, 3, 2)) + assert_(meshgrid(x, y, z, indexing='ij')[0].shape == (3, 4, 2)) + + assert_raises(ValueError, meshgrid, x, y, indexing='notvalid') + + def test_sparse(self): + [X, Y] = meshgrid([1, 2, 3], [4, 5, 6, 7], sparse=True) + assert_array_equal(X, np.array([[1, 2, 3]])) + assert_array_equal(Y, np.array([[4], [5], [6], [7]])) + + def test_invalid_arguments(self): + # Test that meshgrid complains about invalid arguments + # Regression test for issue #4755: + # https://github.com/numpy/numpy/issues/4755 + assert_raises(TypeError, meshgrid, + [1, 2, 3], [4, 5, 6, 7], indices='ij') + + def test_return_type(self): + # Test for appropriate dtype in returned arrays. + # Regression test for issue #5297 + # https://github.com/numpy/numpy/issues/5297 + x = np.arange(0, 10, dtype=np.float32) + y = np.arange(10, 20, dtype=np.float64) + + X, Y = np.meshgrid(x, y) + + assert_(X.dtype == x.dtype) + assert_(Y.dtype == y.dtype) + + # copy + X, Y = np.meshgrid(x, y, copy=True) + + assert_(X.dtype == x.dtype) + assert_(Y.dtype == y.dtype) + + # sparse + X, Y = np.meshgrid(x, y, sparse=True) + + assert_(X.dtype == x.dtype) + assert_(Y.dtype == y.dtype) + + def test_writeback(self): + # Issue 8561 + X = np.array([1.1, 2.2]) + Y = np.array([3.3, 4.4]) + x, y = np.meshgrid(X, Y, sparse=False, copy=True) + + x[0, :] = 0 + assert_equal(x[0, :], 0) + assert_equal(x[1, :], X) + + def test_nd_shape(self): + a, b, c, d, e = np.meshgrid(*([0] * i for i in range(1, 6))) + expected_shape = (2, 1, 3, 4, 5) + assert_equal(a.shape, expected_shape) + assert_equal(b.shape, expected_shape) + assert_equal(c.shape, expected_shape) + assert_equal(d.shape, expected_shape) + assert_equal(e.shape, expected_shape) + + def test_nd_values(self): + a, b, c = np.meshgrid([0], [1, 2], [3, 4, 5]) + assert_equal(a, [[[0, 0, 0]], [[0, 0, 0]]]) + assert_equal(b, [[[1, 1, 1]], [[2, 2, 2]]]) + assert_equal(c, [[[3, 4, 5]], [[3, 4, 5]]]) + + def test_nd_indexing(self): + a, b, c = np.meshgrid([0], [1, 2], [3, 4, 5], indexing='ij') + assert_equal(a, [[[0, 0, 0], [0, 0, 0]]]) + assert_equal(b, [[[1, 1, 1], [2, 2, 2]]]) + assert_equal(c, [[[3, 4, 5], [3, 4, 5]]]) + + +class TestPiecewise: + + def test_simple(self): + # Condition is single bool list + x = piecewise([0, 0], [True, False], [1]) + assert_array_equal(x, [1, 0]) + + # List of conditions: single bool list + x = piecewise([0, 0], [[True, False]], [1]) + assert_array_equal(x, [1, 0]) + + # Conditions is single bool array + x = piecewise([0, 0], np.array([True, False]), [1]) + assert_array_equal(x, [1, 0]) + + # Condition is single int array + x = piecewise([0, 0], np.array([1, 0]), [1]) + assert_array_equal(x, [1, 0]) + + # List of conditions: int array + x = piecewise([0, 0], [np.array([1, 0])], [1]) + assert_array_equal(x, [1, 0]) + + x = piecewise([0, 0], [[False, True]], [lambda x:-1]) + assert_array_equal(x, [0, -1]) + + assert_raises_regex(ValueError, '1 or 2 functions are expected', + piecewise, [0, 0], [[False, True]], []) + assert_raises_regex(ValueError, '1 or 2 functions are expected', + piecewise, [0, 0], [[False, True]], [1, 2, 3]) + + def test_two_conditions(self): + x = piecewise([1, 2], [[True, False], [False, True]], [3, 4]) + assert_array_equal(x, [3, 4]) + + def test_scalar_domains_three_conditions(self): + x = piecewise(3, [True, False, False], [4, 2, 0]) + assert_equal(x, 4) + + def test_default(self): + # No value specified for x[1], should be 0 + x = piecewise([1, 2], [True, False], [2]) + assert_array_equal(x, [2, 0]) + + # Should set x[1] to 3 + x = piecewise([1, 2], [True, False], [2, 3]) + assert_array_equal(x, [2, 3]) + + def test_0d(self): + x = np.array(3) + y = piecewise(x, x > 3, [4, 0]) + assert_(y.ndim == 0) + assert_(y == 0) + + x = 5 + y = piecewise(x, [True, False], [1, 0]) + assert_(y.ndim == 0) + assert_(y == 1) + + # With 3 ranges (It was failing, before) + y = piecewise(x, [False, False, True], [1, 2, 3]) + assert_array_equal(y, 3) + + def test_0d_comparison(self): + x = 3 + y = piecewise(x, [x <= 3, x > 3], [4, 0]) # Should succeed. + assert_equal(y, 4) + + # With 3 ranges (It was failing, before) + x = 4 + y = piecewise(x, [x <= 3, (x > 3) * (x <= 5), x > 5], [1, 2, 3]) + assert_array_equal(y, 2) + + assert_raises_regex(ValueError, '2 or 3 functions are expected', + piecewise, x, [x <= 3, x > 3], [1]) + assert_raises_regex(ValueError, '2 or 3 functions are expected', + piecewise, x, [x <= 3, x > 3], [1, 1, 1, 1]) + + def test_0d_0d_condition(self): + x = np.array(3) + c = np.array(x > 3) + y = piecewise(x, [c], [1, 2]) + assert_equal(y, 2) + + def test_multidimensional_extrafunc(self): + x = np.array([[-2.5, -1.5, -0.5], + [0.5, 1.5, 2.5]]) + y = piecewise(x, [x < 0, x >= 2], [-1, 1, 3]) + assert_array_equal(y, np.array([[-1., -1., -1.], + [3., 3., 1.]])) + + def test_subclasses(self): + class subclass(np.ndarray): + pass + x = np.arange(5.).view(subclass) + r = piecewise(x, [x < 2., x >= 4], [-1., 1., 0.]) + assert_equal(type(r), subclass) + assert_equal(r, [-1., -1., 0., 0., 1.]) + + +class TestBincount: + + def test_simple(self): + y = np.bincount(np.arange(4)) + assert_array_equal(y, np.ones(4)) + + def test_simple2(self): + y = np.bincount(np.array([1, 5, 2, 4, 1])) + assert_array_equal(y, np.array([0, 2, 1, 0, 1, 1])) + + def test_simple_weight(self): + x = np.arange(4) + w = np.array([0.2, 0.3, 0.5, 0.1]) + y = np.bincount(x, w) + assert_array_equal(y, w) + + def test_simple_weight2(self): + x = np.array([1, 2, 4, 5, 2]) + w = np.array([0.2, 0.3, 0.5, 0.1, 0.2]) + y = np.bincount(x, w) + assert_array_equal(y, np.array([0, 0.2, 0.5, 0, 0.5, 0.1])) + + def test_with_minlength(self): + x = np.array([0, 1, 0, 1, 1]) + y = np.bincount(x, minlength=3) + assert_array_equal(y, np.array([2, 3, 0])) + x = [] + y = np.bincount(x, minlength=0) + assert_array_equal(y, np.array([])) + + def test_with_minlength_smaller_than_maxvalue(self): + x = np.array([0, 1, 1, 2, 2, 3, 3]) + y = np.bincount(x, minlength=2) + assert_array_equal(y, np.array([1, 2, 2, 2])) + y = np.bincount(x, minlength=0) + assert_array_equal(y, np.array([1, 2, 2, 2])) + + def test_with_minlength_and_weights(self): + x = np.array([1, 2, 4, 5, 2]) + w = np.array([0.2, 0.3, 0.5, 0.1, 0.2]) + y = np.bincount(x, w, 8) + assert_array_equal(y, np.array([0, 0.2, 0.5, 0, 0.5, 0.1, 0, 0])) + + def test_empty(self): + x = np.array([], dtype=int) + y = np.bincount(x) + assert_array_equal(x, y) + + def test_empty_with_minlength(self): + x = np.array([], dtype=int) + y = np.bincount(x, minlength=5) + assert_array_equal(y, np.zeros(5, dtype=int)) + + @pytest.mark.parametrize('minlength', [0, 3]) + def test_empty_list(self, minlength): + assert_array_equal(np.bincount([], minlength=minlength), + np.zeros(minlength, dtype=int)) + + def test_with_incorrect_minlength(self): + x = np.array([], dtype=int) + assert_raises_regex(TypeError, + "'str' object cannot be interpreted", + lambda: np.bincount(x, minlength="foobar")) + assert_raises_regex(ValueError, + "must not be negative", + lambda: np.bincount(x, minlength=-1)) + + x = np.arange(5) + assert_raises_regex(TypeError, + "'str' object cannot be interpreted", + lambda: np.bincount(x, minlength="foobar")) + assert_raises_regex(ValueError, + "must not be negative", + lambda: np.bincount(x, minlength=-1)) + + @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") + def test_dtype_reference_leaks(self): + # gh-6805 + intp_refcount = sys.getrefcount(np.dtype(np.intp)) + double_refcount = sys.getrefcount(np.dtype(np.double)) + + for j in range(10): + np.bincount([1, 2, 3]) + assert_equal(sys.getrefcount(np.dtype(np.intp)), intp_refcount) + assert_equal(sys.getrefcount(np.dtype(np.double)), double_refcount) + + for j in range(10): + np.bincount([1, 2, 3], [4, 5, 6]) + assert_equal(sys.getrefcount(np.dtype(np.intp)), intp_refcount) + assert_equal(sys.getrefcount(np.dtype(np.double)), double_refcount) + + @pytest.mark.parametrize("vals", [[[2, 2]], 2]) + def test_error_not_1d(self, vals): + # Test that values has to be 1-D (both as array and nested list) + vals_arr = np.asarray(vals) + with assert_raises(ValueError): + np.bincount(vals_arr) + with assert_raises(ValueError): + np.bincount(vals) + + @pytest.mark.parametrize("dt", np.typecodes["AllInteger"]) + def test_gh_28354(self, dt): + a = np.array([0, 1, 1, 3, 2, 1, 7], dtype=dt) + actual = np.bincount(a) + expected = [1, 3, 1, 1, 0, 0, 0, 1] + assert_array_equal(actual, expected) + + def test_contiguous_handling(self): + # check for absence of hard crash + np.bincount(np.arange(10000)[::2]) + + def test_gh_28354_array_like(self): + class A: + def __array__(self): + return np.array([0, 1, 1, 3, 2, 1, 7], dtype=np.uint64) + + a = A() + actual = np.bincount(a) + expected = [1, 3, 1, 1, 0, 0, 0, 1] + assert_array_equal(actual, expected) + + +class TestInterp: + + def test_exceptions(self): + assert_raises(ValueError, interp, 0, [], []) + assert_raises(ValueError, interp, 0, [0], [1, 2]) + assert_raises(ValueError, interp, 0, [0, 1], [1, 2], period=0) + assert_raises(ValueError, interp, 0, [], [], period=360) + assert_raises(ValueError, interp, 0, [0], [1, 2], period=360) + + def test_basic(self): + x = np.linspace(0, 1, 5) + y = np.linspace(0, 1, 5) + x0 = np.linspace(0, 1, 50) + assert_almost_equal(np.interp(x0, x, y), x0) + + def test_right_left_behavior(self): + # Needs range of sizes to test different code paths. + # size ==1 is special cased, 1 < size < 5 is linear search, and + # size >= 5 goes through local search and possibly binary search. + for size in range(1, 10): + xp = np.arange(size, dtype=np.double) + yp = np.ones(size, dtype=np.double) + incpts = np.array([-1, 0, size - 1, size], dtype=np.double) + decpts = incpts[::-1] + + incres = interp(incpts, xp, yp) + decres = interp(decpts, xp, yp) + inctgt = np.array([1, 1, 1, 1], dtype=float) + dectgt = inctgt[::-1] + assert_equal(incres, inctgt) + assert_equal(decres, dectgt) + + incres = interp(incpts, xp, yp, left=0) + decres = interp(decpts, xp, yp, left=0) + inctgt = np.array([0, 1, 1, 1], dtype=float) + dectgt = inctgt[::-1] + assert_equal(incres, inctgt) + assert_equal(decres, dectgt) + + incres = interp(incpts, xp, yp, right=2) + decres = interp(decpts, xp, yp, right=2) + inctgt = np.array([1, 1, 1, 2], dtype=float) + dectgt = inctgt[::-1] + assert_equal(incres, inctgt) + assert_equal(decres, dectgt) + + incres = interp(incpts, xp, yp, left=0, right=2) + decres = interp(decpts, xp, yp, left=0, right=2) + inctgt = np.array([0, 1, 1, 2], dtype=float) + dectgt = inctgt[::-1] + assert_equal(incres, inctgt) + assert_equal(decres, dectgt) + + def test_scalar_interpolation_point(self): + x = np.linspace(0, 1, 5) + y = np.linspace(0, 1, 5) + x0 = 0 + assert_almost_equal(np.interp(x0, x, y), x0) + x0 = .3 + assert_almost_equal(np.interp(x0, x, y), x0) + x0 = np.float32(.3) + assert_almost_equal(np.interp(x0, x, y), x0) + x0 = np.float64(.3) + assert_almost_equal(np.interp(x0, x, y), x0) + x0 = np.nan + assert_almost_equal(np.interp(x0, x, y), x0) + + def test_non_finite_behavior_exact_x(self): + x = [1, 2, 2.5, 3, 4] + xp = [1, 2, 3, 4] + fp = [1, 2, np.inf, 4] + assert_almost_equal(np.interp(x, xp, fp), [1, 2, np.inf, np.inf, 4]) + fp = [1, 2, np.nan, 4] + assert_almost_equal(np.interp(x, xp, fp), [1, 2, np.nan, np.nan, 4]) + + @pytest.fixture(params=[ + np.float64, + lambda x: _make_complex(x, 0), + lambda x: _make_complex(0, x), + lambda x: _make_complex(x, np.multiply(x, -2)) + ], ids=[ + 'real', + 'complex-real', + 'complex-imag', + 'complex-both' + ]) + def sc(self, request): + """ scale function used by the below tests """ + return request.param + + def test_non_finite_any_nan(self, sc): + """ test that nans are propagated """ + assert_equal(np.interp(0.5, [np.nan, 1], sc([ 0, 10])), sc(np.nan)) + assert_equal(np.interp(0.5, [ 0, np.nan], sc([ 0, 10])), sc(np.nan)) + assert_equal(np.interp(0.5, [ 0, 1], sc([np.nan, 10])), sc(np.nan)) + assert_equal(np.interp(0.5, [ 0, 1], sc([ 0, np.nan])), sc(np.nan)) + + def test_non_finite_inf(self, sc): + """ Test that interp between opposite infs gives nan """ + inf = np.inf + nan = np.nan + assert_equal(np.interp(0.5, [-inf, +inf], sc([ 0, 10])), sc(nan)) + assert_equal(np.interp(0.5, [ 0, 1], sc([-inf, +inf])), sc(nan)) + assert_equal(np.interp(0.5, [ 0, 1], sc([+inf, -inf])), sc(nan)) + + # unless the y values are equal + assert_equal(np.interp(0.5, [-np.inf, +np.inf], sc([ 10, 10])), sc(10)) + + def test_non_finite_half_inf_xf(self, sc): + """ Test that interp where both axes have a bound at inf gives nan """ + inf = np.inf + nan = np.nan + assert_equal(np.interp(0.5, [-inf, 1], sc([-inf, 10])), sc(nan)) + assert_equal(np.interp(0.5, [-inf, 1], sc([+inf, 10])), sc(nan)) + assert_equal(np.interp(0.5, [-inf, 1], sc([ 0, -inf])), sc(nan)) + assert_equal(np.interp(0.5, [-inf, 1], sc([ 0, +inf])), sc(nan)) + assert_equal(np.interp(0.5, [ 0, +inf], sc([-inf, 10])), sc(nan)) + assert_equal(np.interp(0.5, [ 0, +inf], sc([+inf, 10])), sc(nan)) + assert_equal(np.interp(0.5, [ 0, +inf], sc([ 0, -inf])), sc(nan)) + assert_equal(np.interp(0.5, [ 0, +inf], sc([ 0, +inf])), sc(nan)) + + def test_non_finite_half_inf_x(self, sc): + """ Test interp where the x axis has a bound at inf """ + assert_equal(np.interp(0.5, [-np.inf, -np.inf], sc([0, 10])), sc(10)) + assert_equal(np.interp(0.5, [-np.inf, 1 ], sc([0, 10])), sc(10)) # noqa: E202 + assert_equal(np.interp(0.5, [ 0, +np.inf], sc([0, 10])), sc(0)) + assert_equal(np.interp(0.5, [+np.inf, +np.inf], sc([0, 10])), sc(0)) + + def test_non_finite_half_inf_f(self, sc): + """ Test interp where the f axis has a bound at inf """ + assert_equal(np.interp(0.5, [0, 1], sc([ 0, -np.inf])), sc(-np.inf)) + assert_equal(np.interp(0.5, [0, 1], sc([ 0, +np.inf])), sc(+np.inf)) + assert_equal(np.interp(0.5, [0, 1], sc([-np.inf, 10])), sc(-np.inf)) + assert_equal(np.interp(0.5, [0, 1], sc([+np.inf, 10])), sc(+np.inf)) + assert_equal(np.interp(0.5, [0, 1], sc([-np.inf, -np.inf])), sc(-np.inf)) + assert_equal(np.interp(0.5, [0, 1], sc([+np.inf, +np.inf])), sc(+np.inf)) + + def test_complex_interp(self): + # test complex interpolation + x = np.linspace(0, 1, 5) + y = np.linspace(0, 1, 5) + (1 + np.linspace(0, 1, 5)) * 1.0j + x0 = 0.3 + y0 = x0 + (1 + x0) * 1.0j + assert_almost_equal(np.interp(x0, x, y), y0) + # test complex left and right + x0 = -1 + left = 2 + 3.0j + assert_almost_equal(np.interp(x0, x, y, left=left), left) + x0 = 2.0 + right = 2 + 3.0j + assert_almost_equal(np.interp(x0, x, y, right=right), right) + # test complex non finite + x = [1, 2, 2.5, 3, 4] + xp = [1, 2, 3, 4] + fp = [1, 2 + 1j, np.inf, 4] + y = [1, 2 + 1j, np.inf + 0.5j, np.inf, 4] + assert_almost_equal(np.interp(x, xp, fp), y) + # test complex periodic + x = [-180, -170, -185, 185, -10, -5, 0, 365] + xp = [190, -190, 350, -350] + fp = [5 + 1.0j, 10 + 2j, 3 + 3j, 4 + 4j] + y = [7.5 + 1.5j, 5. + 1.0j, 8.75 + 1.75j, 6.25 + 1.25j, 3. + 3j, 3.25 + 3.25j, + 3.5 + 3.5j, 3.75 + 3.75j] + assert_almost_equal(np.interp(x, xp, fp, period=360), y) + + def test_zero_dimensional_interpolation_point(self): + x = np.linspace(0, 1, 5) + y = np.linspace(0, 1, 5) + x0 = np.array(.3) + assert_almost_equal(np.interp(x0, x, y), x0) + + xp = np.array([0, 2, 4]) + fp = np.array([1, -1, 1]) + + actual = np.interp(np.array(1), xp, fp) + assert_equal(actual, 0) + assert_(isinstance(actual, np.float64)) + + actual = np.interp(np.array(4.5), xp, fp, period=4) + assert_equal(actual, 0.5) + assert_(isinstance(actual, np.float64)) + + def test_if_len_x_is_small(self): + xp = np.arange(0, 10, 0.0001) + fp = np.sin(xp) + assert_almost_equal(np.interp(np.pi, xp, fp), 0.0) + + def test_period(self): + x = [-180, -170, -185, 185, -10, -5, 0, 365] + xp = [190, -190, 350, -350] + fp = [5, 10, 3, 4] + y = [7.5, 5., 8.75, 6.25, 3., 3.25, 3.5, 3.75] + assert_almost_equal(np.interp(x, xp, fp, period=360), y) + x = np.array(x, order='F').reshape(2, -1) + y = np.array(y, order='C').reshape(2, -1) + assert_almost_equal(np.interp(x, xp, fp, period=360), y) + + +quantile_methods = [ + 'inverted_cdf', 'averaged_inverted_cdf', 'closest_observation', + 'interpolated_inverted_cdf', 'hazen', 'weibull', 'linear', + 'median_unbiased', 'normal_unbiased', 'nearest', 'lower', 'higher', + 'midpoint'] + +# Note: Technically, averaged_inverted_cdf and midpoint are not interpolated. +# but NumPy doesn't currently make a difference (at least w.r.t. to promotion). +interpolating_quantile_methods = [ + 'averaged_inverted_cdf', 'interpolated_inverted_cdf', 'hazen', 'weibull', + 'linear', 'median_unbiased', 'normal_unbiased', 'midpoint'] + +methods_supporting_weights = ["inverted_cdf"] + + +class TestPercentile: + + def test_basic(self): + x = np.arange(8) * 0.5 + assert_equal(np.percentile(x, 0), 0.) + assert_equal(np.percentile(x, 100), 3.5) + assert_equal(np.percentile(x, 50), 1.75) + x[1] = np.nan + assert_equal(np.percentile(x, 0), np.nan) + assert_equal(np.percentile(x, 0, method='nearest'), np.nan) + assert_equal(np.percentile(x, 0, method='inverted_cdf'), np.nan) + assert_equal( + np.percentile(x, 0, method='inverted_cdf', + weights=np.ones_like(x)), + np.nan, + ) + + def test_fraction(self): + x = [Fraction(i, 2) for i in range(8)] + + p = np.percentile(x, Fraction(0)) + assert_equal(p, Fraction(0)) + assert_equal(type(p), Fraction) + + p = np.percentile(x, Fraction(100)) + assert_equal(p, Fraction(7, 2)) + assert_equal(type(p), Fraction) + + p = np.percentile(x, Fraction(50)) + assert_equal(p, Fraction(7, 4)) + assert_equal(type(p), Fraction) + + p = np.percentile(x, [Fraction(50)]) + assert_equal(p, np.array([Fraction(7, 4)])) + assert_equal(type(p), np.ndarray) + + def test_api(self): + d = np.ones(5) + np.percentile(d, 5, None, None, False) + np.percentile(d, 5, None, None, False, 'linear') + o = np.ones((1,)) + np.percentile(d, 5, None, o, False, 'linear') + + def test_complex(self): + arr_c = np.array([0.5 + 3.0j, 2.1 + 0.5j, 1.6 + 2.3j], dtype='G') + assert_raises(TypeError, np.percentile, arr_c, 0.5) + arr_c = np.array([0.5 + 3.0j, 2.1 + 0.5j, 1.6 + 2.3j], dtype='D') + assert_raises(TypeError, np.percentile, arr_c, 0.5) + arr_c = np.array([0.5 + 3.0j, 2.1 + 0.5j, 1.6 + 2.3j], dtype='F') + assert_raises(TypeError, np.percentile, arr_c, 0.5) + + def test_2D(self): + x = np.array([[1, 1, 1], + [1, 1, 1], + [4, 4, 3], + [1, 1, 1], + [1, 1, 1]]) + assert_array_equal(np.percentile(x, 50, axis=0), [1, 1, 1]) + + @pytest.mark.parametrize("dtype", np.typecodes["Float"]) + def test_linear_nan_1D(self, dtype): + # METHOD 1 of H&F + arr = np.asarray([15.0, np.nan, 35.0, 40.0, 50.0], dtype=dtype) + res = np.percentile( + arr, + 40.0, + method="linear") + np.testing.assert_equal(res, np.nan) + np.testing.assert_equal(res.dtype, arr.dtype) + + H_F_TYPE_CODES = [(int_type, np.float64) + for int_type in np.typecodes["AllInteger"] + ] + [(np.float16, np.float16), + (np.float32, np.float32), + (np.float64, np.float64), + (np.longdouble, np.longdouble), + (np.dtype("O"), np.float64)] + + @pytest.mark.parametrize(["function", "quantile"], + [(np.quantile, 0.4), + (np.percentile, 40.0)]) + @pytest.mark.parametrize(["input_dtype", "expected_dtype"], H_F_TYPE_CODES) + @pytest.mark.parametrize(["method", "weighted", "expected"], + [("inverted_cdf", False, 20), + ("inverted_cdf", True, 20), + ("averaged_inverted_cdf", False, 27.5), + ("closest_observation", False, 20), + ("interpolated_inverted_cdf", False, 20), + ("hazen", False, 27.5), + ("weibull", False, 26), + ("linear", False, 29), + ("median_unbiased", False, 27), + ("normal_unbiased", False, 27.125), + ]) + def test_linear_interpolation(self, + function, + quantile, + method, + weighted, + expected, + input_dtype, + expected_dtype): + expected_dtype = np.dtype(expected_dtype) + + arr = np.asarray([15.0, 20.0, 35.0, 40.0, 50.0], dtype=input_dtype) + weights = np.ones_like(arr) if weighted else None + if input_dtype is np.longdouble: + if function is np.quantile: + # 0.4 is not exactly representable and it matters + # for "averaged_inverted_cdf", so we need to cheat. + quantile = input_dtype("0.4") + # We want to use nulp, but that does not work for longdouble + test_function = np.testing.assert_almost_equal + else: + test_function = np.testing.assert_array_almost_equal_nulp + + actual = function(arr, quantile, method=method, weights=weights) + + test_function(actual, expected_dtype.type(expected)) + + if method in ["inverted_cdf", "closest_observation"]: + if input_dtype == "O": + np.testing.assert_equal(np.asarray(actual).dtype, np.float64) + else: + np.testing.assert_equal(np.asarray(actual).dtype, + np.dtype(input_dtype)) + else: + np.testing.assert_equal(np.asarray(actual).dtype, + np.dtype(expected_dtype)) + + TYPE_CODES = np.typecodes["AllInteger"] + np.typecodes["Float"] + "O" + + @pytest.mark.parametrize("dtype", TYPE_CODES) + def test_lower_higher(self, dtype): + assert_equal(np.percentile(np.arange(10, dtype=dtype), 50, + method='lower'), 4) + assert_equal(np.percentile(np.arange(10, dtype=dtype), 50, + method='higher'), 5) + + @pytest.mark.parametrize("dtype", TYPE_CODES) + def test_midpoint(self, dtype): + assert_equal(np.percentile(np.arange(10, dtype=dtype), 51, + method='midpoint'), 4.5) + assert_equal(np.percentile(np.arange(9, dtype=dtype) + 1, 50, + method='midpoint'), 5) + assert_equal(np.percentile(np.arange(11, dtype=dtype), 51, + method='midpoint'), 5.5) + assert_equal(np.percentile(np.arange(11, dtype=dtype), 50, + method='midpoint'), 5) + + @pytest.mark.parametrize("dtype", TYPE_CODES) + def test_nearest(self, dtype): + assert_equal(np.percentile(np.arange(10, dtype=dtype), 51, + method='nearest'), 5) + assert_equal(np.percentile(np.arange(10, dtype=dtype), 49, + method='nearest'), 4) + + def test_linear_interpolation_extrapolation(self): + arr = np.random.rand(5) + + actual = np.percentile(arr, 100) + np.testing.assert_equal(actual, arr.max()) + + actual = np.percentile(arr, 0) + np.testing.assert_equal(actual, arr.min()) + + def test_sequence(self): + x = np.arange(8) * 0.5 + assert_equal(np.percentile(x, [0, 100, 50]), [0, 3.5, 1.75]) + + def test_axis(self): + x = np.arange(12).reshape(3, 4) + + assert_equal(np.percentile(x, (25, 50, 100)), [2.75, 5.5, 11.0]) + + r0 = [[2, 3, 4, 5], [4, 5, 6, 7], [8, 9, 10, 11]] + assert_equal(np.percentile(x, (25, 50, 100), axis=0), r0) + + r1 = [[0.75, 1.5, 3], [4.75, 5.5, 7], [8.75, 9.5, 11]] + assert_equal(np.percentile(x, (25, 50, 100), axis=1), np.array(r1).T) + + # ensure qth axis is always first as with np.array(old_percentile(..)) + x = np.arange(3 * 4 * 5 * 6).reshape(3, 4, 5, 6) + assert_equal(np.percentile(x, (25, 50)).shape, (2,)) + assert_equal(np.percentile(x, (25, 50, 75)).shape, (3,)) + assert_equal(np.percentile(x, (25, 50), axis=0).shape, (2, 4, 5, 6)) + assert_equal(np.percentile(x, (25, 50), axis=1).shape, (2, 3, 5, 6)) + assert_equal(np.percentile(x, (25, 50), axis=2).shape, (2, 3, 4, 6)) + assert_equal(np.percentile(x, (25, 50), axis=3).shape, (2, 3, 4, 5)) + assert_equal( + np.percentile(x, (25, 50, 75), axis=1).shape, (3, 3, 5, 6)) + assert_equal(np.percentile(x, (25, 50), + method="higher").shape, (2,)) + assert_equal(np.percentile(x, (25, 50, 75), + method="higher").shape, (3,)) + assert_equal(np.percentile(x, (25, 50), axis=0, + method="higher").shape, (2, 4, 5, 6)) + assert_equal(np.percentile(x, (25, 50), axis=1, + method="higher").shape, (2, 3, 5, 6)) + assert_equal(np.percentile(x, (25, 50), axis=2, + method="higher").shape, (2, 3, 4, 6)) + assert_equal(np.percentile(x, (25, 50), axis=3, + method="higher").shape, (2, 3, 4, 5)) + assert_equal(np.percentile(x, (25, 50, 75), axis=1, + method="higher").shape, (3, 3, 5, 6)) + + def test_scalar_q(self): + # test for no empty dimensions for compatibility with old percentile + x = np.arange(12).reshape(3, 4) + assert_equal(np.percentile(x, 50), 5.5) + assert_(np.isscalar(np.percentile(x, 50))) + r0 = np.array([4., 5., 6., 7.]) + assert_equal(np.percentile(x, 50, axis=0), r0) + assert_equal(np.percentile(x, 50, axis=0).shape, r0.shape) + r1 = np.array([1.5, 5.5, 9.5]) + assert_almost_equal(np.percentile(x, 50, axis=1), r1) + assert_equal(np.percentile(x, 50, axis=1).shape, r1.shape) + + out = np.empty(1) + assert_equal(np.percentile(x, 50, out=out), 5.5) + assert_equal(out, 5.5) + out = np.empty(4) + assert_equal(np.percentile(x, 50, axis=0, out=out), r0) + assert_equal(out, r0) + out = np.empty(3) + assert_equal(np.percentile(x, 50, axis=1, out=out), r1) + assert_equal(out, r1) + + # test for no empty dimensions for compatibility with old percentile + x = np.arange(12).reshape(3, 4) + assert_equal(np.percentile(x, 50, method='lower'), 5.) + assert_(np.isscalar(np.percentile(x, 50))) + r0 = np.array([4., 5., 6., 7.]) + c0 = np.percentile(x, 50, method='lower', axis=0) + assert_equal(c0, r0) + assert_equal(c0.shape, r0.shape) + r1 = np.array([1., 5., 9.]) + c1 = np.percentile(x, 50, method='lower', axis=1) + assert_almost_equal(c1, r1) + assert_equal(c1.shape, r1.shape) + + out = np.empty((), dtype=x.dtype) + c = np.percentile(x, 50, method='lower', out=out) + assert_equal(c, 5) + assert_equal(out, 5) + out = np.empty(4, dtype=x.dtype) + c = np.percentile(x, 50, method='lower', axis=0, out=out) + assert_equal(c, r0) + assert_equal(out, r0) + out = np.empty(3, dtype=x.dtype) + c = np.percentile(x, 50, method='lower', axis=1, out=out) + assert_equal(c, r1) + assert_equal(out, r1) + + def test_exception(self): + assert_raises(ValueError, np.percentile, [1, 2], 56, + method='foobar') + assert_raises(ValueError, np.percentile, [1], 101) + assert_raises(ValueError, np.percentile, [1], -1) + assert_raises(ValueError, np.percentile, [1], list(range(50)) + [101]) + assert_raises(ValueError, np.percentile, [1], list(range(50)) + [-0.1]) + + def test_percentile_list(self): + assert_equal(np.percentile([1, 2, 3], 0), 1) + + @pytest.mark.parametrize( + "percentile, with_weights", + [ + (np.percentile, False), + (partial(np.percentile, method="inverted_cdf"), True), + ] + ) + def test_percentile_out(self, percentile, with_weights): + out_dtype = int if with_weights else float + x = np.array([1, 2, 3]) + y = np.zeros((3,), dtype=out_dtype) + p = (1, 2, 3) + weights = np.ones_like(x) if with_weights else None + r = percentile(x, p, out=y, weights=weights) + assert r is y + assert_equal(percentile(x, p, weights=weights), y) + + x = np.array([[1, 2, 3], + [4, 5, 6]]) + y = np.zeros((3, 3), dtype=out_dtype) + weights = np.ones_like(x) if with_weights else None + r = percentile(x, p, axis=0, out=y, weights=weights) + assert r is y + assert_equal(percentile(x, p, weights=weights, axis=0), y) + + y = np.zeros((3, 2), dtype=out_dtype) + percentile(x, p, axis=1, out=y, weights=weights) + assert_equal(percentile(x, p, weights=weights, axis=1), y) + + x = np.arange(12).reshape(3, 4) + # q.dim > 1, float + if with_weights: + r0 = np.array([[0, 1, 2, 3], [4, 5, 6, 7]]) + else: + r0 = np.array([[2., 3., 4., 5.], [4., 5., 6., 7.]]) + out = np.empty((2, 4), dtype=out_dtype) + weights = np.ones_like(x) if with_weights else None + assert_equal( + percentile(x, (25, 50), axis=0, out=out, weights=weights), r0 + ) + assert_equal(out, r0) + r1 = np.array([[0.75, 4.75, 8.75], [1.5, 5.5, 9.5]]) + out = np.empty((2, 3)) + assert_equal(np.percentile(x, (25, 50), axis=1, out=out), r1) + assert_equal(out, r1) + + # q.dim > 1, int + r0 = np.array([[0, 1, 2, 3], [4, 5, 6, 7]]) + out = np.empty((2, 4), dtype=x.dtype) + c = np.percentile(x, (25, 50), method='lower', axis=0, out=out) + assert_equal(c, r0) + assert_equal(out, r0) + r1 = np.array([[0, 4, 8], [1, 5, 9]]) + out = np.empty((2, 3), dtype=x.dtype) + c = np.percentile(x, (25, 50), method='lower', axis=1, out=out) + assert_equal(c, r1) + assert_equal(out, r1) + + def test_percentile_empty_dim(self): + # empty dims are preserved + d = np.arange(11 * 2).reshape(11, 1, 2, 1) + assert_array_equal(np.percentile(d, 50, axis=0).shape, (1, 2, 1)) + assert_array_equal(np.percentile(d, 50, axis=1).shape, (11, 2, 1)) + assert_array_equal(np.percentile(d, 50, axis=2).shape, (11, 1, 1)) + assert_array_equal(np.percentile(d, 50, axis=3).shape, (11, 1, 2)) + assert_array_equal(np.percentile(d, 50, axis=-1).shape, (11, 1, 2)) + assert_array_equal(np.percentile(d, 50, axis=-2).shape, (11, 1, 1)) + assert_array_equal(np.percentile(d, 50, axis=-3).shape, (11, 2, 1)) + assert_array_equal(np.percentile(d, 50, axis=-4).shape, (1, 2, 1)) + + assert_array_equal(np.percentile(d, 50, axis=2, + method='midpoint').shape, + (11, 1, 1)) + assert_array_equal(np.percentile(d, 50, axis=-2, + method='midpoint').shape, + (11, 1, 1)) + + assert_array_equal(np.array(np.percentile(d, [10, 50], axis=0)).shape, + (2, 1, 2, 1)) + assert_array_equal(np.array(np.percentile(d, [10, 50], axis=1)).shape, + (2, 11, 2, 1)) + assert_array_equal(np.array(np.percentile(d, [10, 50], axis=2)).shape, + (2, 11, 1, 1)) + assert_array_equal(np.array(np.percentile(d, [10, 50], axis=3)).shape, + (2, 11, 1, 2)) + + def test_percentile_no_overwrite(self): + a = np.array([2, 3, 4, 1]) + np.percentile(a, [50], overwrite_input=False) + assert_equal(a, np.array([2, 3, 4, 1])) + + a = np.array([2, 3, 4, 1]) + np.percentile(a, [50]) + assert_equal(a, np.array([2, 3, 4, 1])) + + def test_no_p_overwrite(self): + p = np.linspace(0., 100., num=5) + np.percentile(np.arange(100.), p, method="midpoint") + assert_array_equal(p, np.linspace(0., 100., num=5)) + p = np.linspace(0., 100., num=5).tolist() + np.percentile(np.arange(100.), p, method="midpoint") + assert_array_equal(p, np.linspace(0., 100., num=5).tolist()) + + def test_percentile_overwrite(self): + a = np.array([2, 3, 4, 1]) + b = np.percentile(a, [50], overwrite_input=True) + assert_equal(b, np.array([2.5])) + + b = np.percentile([2, 3, 4, 1], [50], overwrite_input=True) + assert_equal(b, np.array([2.5])) + + def test_extended_axis(self): + o = np.random.normal(size=(71, 23)) + x = np.dstack([o] * 10) + assert_equal(np.percentile(x, 30, axis=(0, 1)), np.percentile(o, 30)) + x = np.moveaxis(x, -1, 0) + assert_equal(np.percentile(x, 30, axis=(-2, -1)), np.percentile(o, 30)) + x = x.swapaxes(0, 1).copy() + assert_equal(np.percentile(x, 30, axis=(0, -1)), np.percentile(o, 30)) + x = x.swapaxes(0, 1).copy() + + assert_equal(np.percentile(x, [25, 60], axis=(0, 1, 2)), + np.percentile(x, [25, 60], axis=None)) + assert_equal(np.percentile(x, [25, 60], axis=(0,)), + np.percentile(x, [25, 60], axis=0)) + + d = np.arange(3 * 5 * 7 * 11).reshape((3, 5, 7, 11)) + np.random.shuffle(d.ravel()) + assert_equal(np.percentile(d, 25, axis=(0, 1, 2))[0], + np.percentile(d[:, :, :, 0].flatten(), 25)) + assert_equal(np.percentile(d, [10, 90], axis=(0, 1, 3))[:, 1], + np.percentile(d[:, :, 1, :].flatten(), [10, 90])) + assert_equal(np.percentile(d, 25, axis=(3, 1, -4))[2], + np.percentile(d[:, :, 2, :].flatten(), 25)) + assert_equal(np.percentile(d, 25, axis=(3, 1, 2))[2], + np.percentile(d[2, :, :, :].flatten(), 25)) + assert_equal(np.percentile(d, 25, axis=(3, 2))[2, 1], + np.percentile(d[2, 1, :, :].flatten(), 25)) + assert_equal(np.percentile(d, 25, axis=(1, -2))[2, 1], + np.percentile(d[2, :, :, 1].flatten(), 25)) + assert_equal(np.percentile(d, 25, axis=(1, 3))[2, 2], + np.percentile(d[2, :, 2, :].flatten(), 25)) + + def test_extended_axis_invalid(self): + d = np.ones((3, 5, 7, 11)) + assert_raises(AxisError, np.percentile, d, axis=-5, q=25) + assert_raises(AxisError, np.percentile, d, axis=(0, -5), q=25) + assert_raises(AxisError, np.percentile, d, axis=4, q=25) + assert_raises(AxisError, np.percentile, d, axis=(0, 4), q=25) + # each of these refers to the same axis twice + assert_raises(ValueError, np.percentile, d, axis=(1, 1), q=25) + assert_raises(ValueError, np.percentile, d, axis=(-1, -1), q=25) + assert_raises(ValueError, np.percentile, d, axis=(3, -1), q=25) + + def test_keepdims(self): + d = np.ones((3, 5, 7, 11)) + assert_equal(np.percentile(d, 7, axis=None, keepdims=True).shape, + (1, 1, 1, 1)) + assert_equal(np.percentile(d, 7, axis=(0, 1), keepdims=True).shape, + (1, 1, 7, 11)) + assert_equal(np.percentile(d, 7, axis=(0, 3), keepdims=True).shape, + (1, 5, 7, 1)) + assert_equal(np.percentile(d, 7, axis=(1,), keepdims=True).shape, + (3, 1, 7, 11)) + assert_equal(np.percentile(d, 7, (0, 1, 2, 3), keepdims=True).shape, + (1, 1, 1, 1)) + assert_equal(np.percentile(d, 7, axis=(0, 1, 3), keepdims=True).shape, + (1, 1, 7, 1)) + + assert_equal(np.percentile(d, [1, 7], axis=(0, 1, 3), + keepdims=True).shape, (2, 1, 1, 7, 1)) + assert_equal(np.percentile(d, [1, 7], axis=(0, 3), + keepdims=True).shape, (2, 1, 5, 7, 1)) + + @pytest.mark.parametrize('q', [7, [1, 7]]) + @pytest.mark.parametrize( + argnames='axis', + argvalues=[ + None, + 1, + (1,), + (0, 1), + (-3, -1), + ] + ) + def test_keepdims_out(self, q, axis): + d = np.ones((3, 5, 7, 11)) + if axis is None: + shape_out = (1,) * d.ndim + else: + axis_norm = normalize_axis_tuple(axis, d.ndim) + shape_out = tuple( + 1 if i in axis_norm else d.shape[i] for i in range(d.ndim)) + shape_out = np.shape(q) + shape_out + + out = np.empty(shape_out) + result = np.percentile(d, q, axis=axis, keepdims=True, out=out) + assert result is out + assert_equal(result.shape, shape_out) + + def test_out(self): + o = np.zeros((4,)) + d = np.ones((3, 4)) + assert_equal(np.percentile(d, 0, 0, out=o), o) + assert_equal(np.percentile(d, 0, 0, method='nearest', out=o), o) + o = np.zeros((3,)) + assert_equal(np.percentile(d, 1, 1, out=o), o) + assert_equal(np.percentile(d, 1, 1, method='nearest', out=o), o) + + o = np.zeros(()) + assert_equal(np.percentile(d, 2, out=o), o) + assert_equal(np.percentile(d, 2, method='nearest', out=o), o) + + @pytest.mark.parametrize("method, weighted", [ + ("linear", False), + ("nearest", False), + ("inverted_cdf", False), + ("inverted_cdf", True), + ]) + def test_out_nan(self, method, weighted): + if weighted: + kwargs = {"weights": np.ones((3, 4)), "method": method} + else: + kwargs = {"method": method} + with warnings.catch_warnings(record=True): + warnings.filterwarnings('always', '', RuntimeWarning) + o = np.zeros((4,)) + d = np.ones((3, 4)) + d[2, 1] = np.nan + assert_equal(np.percentile(d, 0, 0, out=o, **kwargs), o) + + o = np.zeros((3,)) + assert_equal(np.percentile(d, 1, 1, out=o, **kwargs), o) + + o = np.zeros(()) + assert_equal(np.percentile(d, 1, out=o, **kwargs), o) + + def test_nan_behavior(self): + a = np.arange(24, dtype=float) + a[2] = np.nan + assert_equal(np.percentile(a, 0.3), np.nan) + assert_equal(np.percentile(a, 0.3, axis=0), np.nan) + assert_equal(np.percentile(a, [0.3, 0.6], axis=0), + np.array([np.nan] * 2)) + + a = np.arange(24, dtype=float).reshape(2, 3, 4) + a[1, 2, 3] = np.nan + a[1, 1, 2] = np.nan + + # no axis + assert_equal(np.percentile(a, 0.3), np.nan) + assert_equal(np.percentile(a, 0.3).ndim, 0) + + # axis0 zerod + b = np.percentile(np.arange(24, dtype=float).reshape(2, 3, 4), 0.3, 0) + b[2, 3] = np.nan + b[1, 2] = np.nan + assert_equal(np.percentile(a, 0.3, 0), b) + + # axis0 not zerod + b = np.percentile(np.arange(24, dtype=float).reshape(2, 3, 4), + [0.3, 0.6], 0) + b[:, 2, 3] = np.nan + b[:, 1, 2] = np.nan + assert_equal(np.percentile(a, [0.3, 0.6], 0), b) + + # axis1 zerod + b = np.percentile(np.arange(24, dtype=float).reshape(2, 3, 4), 0.3, 1) + b[1, 3] = np.nan + b[1, 2] = np.nan + assert_equal(np.percentile(a, 0.3, 1), b) + # axis1 not zerod + b = np.percentile( + np.arange(24, dtype=float).reshape(2, 3, 4), [0.3, 0.6], 1) + b[:, 1, 3] = np.nan + b[:, 1, 2] = np.nan + assert_equal(np.percentile(a, [0.3, 0.6], 1), b) + + # axis02 zerod + b = np.percentile( + np.arange(24, dtype=float).reshape(2, 3, 4), 0.3, (0, 2)) + b[1] = np.nan + b[2] = np.nan + assert_equal(np.percentile(a, 0.3, (0, 2)), b) + # axis02 not zerod + b = np.percentile(np.arange(24, dtype=float).reshape(2, 3, 4), + [0.3, 0.6], (0, 2)) + b[:, 1] = np.nan + b[:, 2] = np.nan + assert_equal(np.percentile(a, [0.3, 0.6], (0, 2)), b) + # axis02 not zerod with method='nearest' + b = np.percentile(np.arange(24, dtype=float).reshape(2, 3, 4), + [0.3, 0.6], (0, 2), method='nearest') + b[:, 1] = np.nan + b[:, 2] = np.nan + assert_equal(np.percentile( + a, [0.3, 0.6], (0, 2), method='nearest'), b) + + def test_nan_q(self): + # GH18830 + with pytest.raises(ValueError, match="Percentiles must be in"): + np.percentile([1, 2, 3, 4.0], np.nan) + with pytest.raises(ValueError, match="Percentiles must be in"): + np.percentile([1, 2, 3, 4.0], [np.nan]) + q = np.linspace(1.0, 99.0, 16) + q[0] = np.nan + with pytest.raises(ValueError, match="Percentiles must be in"): + np.percentile([1, 2, 3, 4.0], q) + + @pytest.mark.parametrize("dtype", ["m8[D]", "M8[s]"]) + @pytest.mark.parametrize("pos", [0, 23, 10]) + def test_nat_basic(self, dtype, pos): + # TODO: Note that times have dubious rounding as of fixing NaTs! + # NaT and NaN should behave the same, do basic tests for NaT: + a = np.arange(0, 24, dtype=dtype) + a[pos] = "NaT" + res = np.percentile(a, 30) + assert res.dtype == dtype + assert np.isnat(res) + res = np.percentile(a, [30, 60]) + assert res.dtype == dtype + assert np.isnat(res).all() + + a = np.arange(0, 24 * 3, dtype=dtype).reshape(-1, 3) + a[pos, 1] = "NaT" + res = np.percentile(a, 30, axis=0) + assert_array_equal(np.isnat(res), [False, True, False]) + + @pytest.mark.parametrize("qtype", [np.float16, np.float32]) + @pytest.mark.parametrize("method", quantile_methods) + def test_percentile_gh_29003(self, qtype, method): + # test that with float16 or float32 input we do not get overflow + zero = qtype(0) + one = qtype(1) + a = np.zeros(65521, qtype) + a[:20_000] = one + z = np.percentile(a, 50, method=method) + assert z == zero + assert z.dtype == a.dtype + z = np.percentile(a, 99, method=method) + assert z == one + assert z.dtype == a.dtype + + def test_percentile_gh_29003_Fraction(self): + zero = Fraction(0) + one = Fraction(1) + a = np.array([zero] * 65521) + a[:20_000] = one + z = np.percentile(a, 50) + assert z == zero + z = np.percentile(a, Fraction(50)) + assert z == zero + assert np.array(z).dtype == a.dtype + + z = np.percentile(a, 99) + assert z == one + # test that with only Fraction input the return type is a Fraction + z = np.percentile(a, Fraction(99)) + assert z == one + assert np.array(z).dtype == a.dtype + + @pytest.mark.parametrize("method", interpolating_quantile_methods) + @pytest.mark.parametrize("q", [50, 10.0]) + def test_q_weak_promotion(self, method, q): + a = np.array([1, 2, 3, 4, 5], dtype=np.float32) + value = np.percentile(a, q, method=method) + assert value.dtype == np.float32 + + @pytest.mark.parametrize("method", interpolating_quantile_methods) + def test_q_strong_promotion(self, method): + # For interpolating methods, the dtype should be float64, for + # discrete ones the original int8. (technically, mid-point has no + # reason to take into account `q`, but does so anyway.) + a = np.array([1, 2, 3, 4, 5], dtype=np.float32) + value = np.percentile(a, np.float64(50), method=method) + assert value.dtype == np.float64 + # Check that we don't do accidental promotion either: + value = np.percentile(a, np.float32(50), method=method) + assert value.dtype == np.float32 + + +class TestQuantile: + # most of this is already tested by TestPercentile + + def V(self, x, y, alpha): + # Identification function used in several tests. + return (x >= y) - alpha + + def test_max_ulp(self): + x = [0.0, 0.2, 0.4] + a = np.quantile(x, 0.45) + # The default linear method would result in 0 + 0.2 * (0.45/2) = 0.18. + # 0.18 is not exactly representable and the formula leads to a 1 ULP + # different result. Ensure it is this exact within 1 ULP, see gh-20331. + np.testing.assert_array_max_ulp(a, 0.18, maxulp=1) + + def test_basic(self): + x = np.arange(8) * 0.5 + assert_equal(np.quantile(x, 0), 0.) + assert_equal(np.quantile(x, 1), 3.5) + assert_equal(np.quantile(x, 0.5), 1.75) + + def test_correct_quantile_value(self): + a = np.array([True]) + tf_quant = np.quantile(True, False) + assert_equal(tf_quant, a[0]) + assert_equal(type(tf_quant), a.dtype) + a = np.array([False, True, True]) + quant_res = np.quantile(a, a) + assert_array_equal(quant_res, a) + assert_equal(quant_res.dtype, a.dtype) + + def test_fraction(self): + # fractional input, integral quantile + x = [Fraction(i, 2) for i in range(8)] + q = np.quantile(x, 0) + assert_equal(q, 0) + assert_equal(type(q), Fraction) + + q = np.quantile(x, 1) + assert_equal(q, Fraction(7, 2)) + assert_equal(type(q), Fraction) + + q = np.quantile(x, .5) + assert_equal(q, 1.75) + assert isinstance(q, float) + + q = np.quantile(x, Fraction(1, 2)) + assert_equal(q, Fraction(7, 4)) + assert_equal(type(q), Fraction) + + q = np.quantile(x, [Fraction(1, 2)]) + assert_equal(q, np.array([Fraction(7, 4)])) + assert_equal(type(q), np.ndarray) + + q = np.quantile(x, [[Fraction(1, 2)]]) + assert_equal(q, np.array([[Fraction(7, 4)]])) + assert_equal(type(q), np.ndarray) + + # repeat with integral input but fractional quantile + x = np.arange(8) + assert_equal(np.quantile(x, Fraction(1, 2)), Fraction(7, 2)) + + def test_complex(self): + # gh-22652 + arr_c = np.array([0.5 + 3.0j, 2.1 + 0.5j, 1.6 + 2.3j], dtype='G') + assert_raises(TypeError, np.quantile, arr_c, 0.5) + arr_c = np.array([0.5 + 3.0j, 2.1 + 0.5j, 1.6 + 2.3j], dtype='D') + assert_raises(TypeError, np.quantile, arr_c, 0.5) + arr_c = np.array([0.5 + 3.0j, 2.1 + 0.5j, 1.6 + 2.3j], dtype='F') + assert_raises(TypeError, np.quantile, arr_c, 0.5) + + def test_no_p_overwrite(self): + # this is worth retesting, because quantile does not make a copy + p0 = np.array([0, 0.75, 0.25, 0.5, 1.0]) + p = p0.copy() + np.quantile(np.arange(100.), p, method="midpoint") + assert_array_equal(p, p0) + + p0 = p0.tolist() + p = p.tolist() + np.quantile(np.arange(100.), p, method="midpoint") + assert_array_equal(p, p0) + + @pytest.mark.parametrize("dtype", np.typecodes["AllInteger"]) + def test_quantile_preserve_int_type(self, dtype): + res = np.quantile(np.array([1, 2], dtype=dtype), [0.5], + method="nearest") + assert res.dtype == dtype + + @pytest.mark.parametrize("method", quantile_methods) + def test_q_zero_one(self, method): + # gh-24710 + arr = [10, 11, 12] + quantile = np.quantile(arr, q=[0, 1], method=method) + assert_equal(quantile, np.array([10, 12])) + + @pytest.mark.parametrize("method", quantile_methods) + def test_quantile_monotonic(self, method): + # GH 14685 + # test that the return value of quantile is monotonic if p0 is ordered + # Also tests that the boundary values are not mishandled. + p0 = np.linspace(0, 1, 101) + quantile = np.quantile(np.array([0, 1, 1, 2, 2, 3, 3, 4, 5, 5, 1, 1, 9, 9, 9, + 8, 8, 7]) * 0.1, p0, method=method) + assert_equal(np.sort(quantile), quantile) + + # Also test one where the number of data points is clearly divisible: + quantile = np.quantile([0., 1., 2., 3.], p0, method=method) + assert_equal(np.sort(quantile), quantile) + + @hypothesis.given( + arr=arrays(dtype=np.float64, + shape=st.integers(min_value=3, max_value=1000), + elements=st.floats(allow_infinity=False, allow_nan=False, + min_value=-1e300, max_value=1e300))) + def test_quantile_monotonic_hypo(self, arr): + p0 = np.arange(0, 1, 0.01) + quantile = np.quantile(arr, p0) + assert_equal(np.sort(quantile), quantile) + + def test_quantile_scalar_nan(self): + a = np.array([[10., 7., 4.], [3., 2., 1.]]) + a[0][1] = np.nan + actual = np.quantile(a, 0.5) + assert np.isscalar(actual) + assert_equal(np.quantile(a, 0.5), np.nan) + + @pytest.mark.parametrize("weights", [False, True]) + @pytest.mark.parametrize("method", quantile_methods) + @pytest.mark.parametrize("alpha", [0.2, 0.5, 0.9]) + def test_quantile_identification_equation(self, weights, method, alpha): + # Test that the identification equation holds for the empirical + # CDF: + # E[V(x, Y)] = 0 <=> x is quantile + # with Y the random variable for which we have observed values and + # V(x, y) the canonical identification function for the quantile (at + # level alpha), see + # https://doi.org/10.48550/arXiv.0912.0902 + if weights and method not in methods_supporting_weights: + pytest.skip("Weights not supported by method.") + rng = np.random.default_rng(4321) + # We choose n and alpha such that we cover 3 cases: + # - n * alpha is an integer + # - n * alpha is a float that gets rounded down + # - n * alpha is a float that gest rounded up + n = 102 # n * alpha = 20.4, 51. , 91.8 + y = rng.random(n) + w = rng.integers(low=0, high=10, size=n) if weights else None + x = np.quantile(y, alpha, method=method, weights=w) + + if method in ("higher",): + # These methods do not fulfill the identification equation. + assert np.abs(np.mean(self.V(x, y, alpha))) > 0.1 / n + elif int(n * alpha) == n * alpha and not weights: + # We can expect exact results, up to machine precision. + assert_allclose( + np.average(self.V(x, y, alpha), weights=w), 0, atol=1e-14, + ) + else: + # V = (x >= y) - alpha cannot sum to zero exactly but within + # "sample precision". + assert_allclose(np.average(self.V(x, y, alpha), weights=w), 0, + atol=1 / n / np.amin([alpha, 1 - alpha])) + + @pytest.mark.parametrize("weights", [False, True]) + @pytest.mark.parametrize("method", quantile_methods) + @pytest.mark.parametrize("alpha", [0.2, 0.5, 0.9]) + def test_quantile_add_and_multiply_constant(self, weights, method, alpha): + # Test that + # 1. quantile(c + x) = c + quantile(x) + # 2. quantile(c * x) = c * quantile(x) + # 3. quantile(-x) = -quantile(x, 1 - alpha) + # On empirical quantiles, this equation does not hold exactly. + # Koenker (2005) "Quantile Regression" Chapter 2.2.3 calls these + # properties equivariance. + if weights and method not in methods_supporting_weights: + pytest.skip("Weights not supported by method.") + rng = np.random.default_rng(4321) + # We choose n and alpha such that we have cases for + # - n * alpha is an integer + # - n * alpha is a float that gets rounded down + # - n * alpha is a float that gest rounded up + n = 102 # n * alpha = 20.4, 51. , 91.8 + y = rng.random(n) + w = rng.integers(low=0, high=10, size=n) if weights else None + q = np.quantile(y, alpha, method=method, weights=w) + c = 13.5 + + # 1 + assert_allclose(np.quantile(c + y, alpha, method=method, weights=w), + c + q) + # 2 + assert_allclose(np.quantile(c * y, alpha, method=method, weights=w), + c * q) + # 3 + if weights: + # From here on, we would need more methods to support weights. + return + q = -np.quantile(-y, 1 - alpha, method=method) + if method == "inverted_cdf": + if ( + n * alpha == int(n * alpha) + or np.round(n * alpha) == int(n * alpha) + 1 + ): + assert_allclose(q, np.quantile(y, alpha, method="higher")) + else: + assert_allclose(q, np.quantile(y, alpha, method="lower")) + elif method == "closest_observation": + if n * alpha == int(n * alpha): + assert_allclose(q, np.quantile(y, alpha, method="higher")) + elif np.round(n * alpha) == int(n * alpha) + 1: + assert_allclose( + q, np.quantile(y, alpha + 1 / n, method="higher")) + else: + assert_allclose(q, np.quantile(y, alpha, method="lower")) + elif method == "interpolated_inverted_cdf": + assert_allclose(q, np.quantile(y, alpha + 1 / n, method=method)) + elif method == "nearest": + if n * alpha == int(n * alpha): + assert_allclose(q, np.quantile(y, alpha + 1 / n, method=method)) + else: + assert_allclose(q, np.quantile(y, alpha, method=method)) + elif method == "lower": + assert_allclose(q, np.quantile(y, alpha, method="higher")) + elif method == "higher": + assert_allclose(q, np.quantile(y, alpha, method="lower")) + else: + # "averaged_inverted_cdf", "hazen", "weibull", "linear", + # "median_unbiased", "normal_unbiased", "midpoint" + assert_allclose(q, np.quantile(y, alpha, method=method)) + + @pytest.mark.parametrize("method", methods_supporting_weights) + @pytest.mark.parametrize("alpha", [0.2, 0.5, 0.9]) + def test_quantile_constant_weights(self, method, alpha): + rng = np.random.default_rng(4321) + # We choose n and alpha such that we have cases for + # - n * alpha is an integer + # - n * alpha is a float that gets rounded down + # - n * alpha is a float that gest rounded up + n = 102 # n * alpha = 20.4, 51. , 91.8 + y = rng.random(n) + q = np.quantile(y, alpha, method=method) + + w = np.ones_like(y) + qw = np.quantile(y, alpha, method=method, weights=w) + assert_allclose(qw, q) + + w = 8.125 * np.ones_like(y) + qw = np.quantile(y, alpha, method=method, weights=w) + assert_allclose(qw, q) + + @pytest.mark.parametrize("method", methods_supporting_weights) + @pytest.mark.parametrize("alpha", [0, 0.2, 0.5, 0.9, 1]) + def test_quantile_with_integer_weights(self, method, alpha): + # Integer weights can be interpreted as repeated observations. + rng = np.random.default_rng(4321) + # We choose n and alpha such that we have cases for + # - n * alpha is an integer + # - n * alpha is a float that gets rounded down + # - n * alpha is a float that gest rounded up + n = 102 # n * alpha = 20.4, 51. , 91.8 + y = rng.random(n) + w = rng.integers(low=0, high=10, size=n, dtype=np.int32) + + qw = np.quantile(y, alpha, method=method, weights=w) + q = np.quantile(np.repeat(y, w), alpha, method=method) + assert_allclose(qw, q) + + @pytest.mark.parametrize("method", methods_supporting_weights) + def test_quantile_with_weights_and_axis(self, method): + rng = np.random.default_rng(4321) + + # 1d weight and single alpha + y = rng.random((2, 10, 3)) + w = np.abs(rng.random(10)) + alpha = 0.5 + q = np.quantile(y, alpha, weights=w, method=method, axis=1) + q_res = np.zeros(shape=(2, 3)) + for i in range(2): + for j in range(3): + q_res[i, j] = np.quantile( + y[i, :, j], alpha, method=method, weights=w + ) + assert_allclose(q, q_res) + + # 1d weight and 1d alpha + alpha = [0, 0.2, 0.4, 0.6, 0.8, 1] # shape (6,) + q = np.quantile(y, alpha, weights=w, method=method, axis=1) + q_res = np.zeros(shape=(6, 2, 3)) + for i in range(2): + for j in range(3): + q_res[:, i, j] = np.quantile( + y[i, :, j], alpha, method=method, weights=w + ) + assert_allclose(q, q_res) + + # 1d weight and 2d alpha + alpha = [[0, 0.2], [0.4, 0.6], [0.8, 1]] # shape (3, 2) + q = np.quantile(y, alpha, weights=w, method=method, axis=1) + q_res = q_res.reshape((3, 2, 2, 3)) + assert_allclose(q, q_res) + + # shape of weights equals shape of y + w = np.abs(rng.random((2, 10, 3))) + alpha = 0.5 + q = np.quantile(y, alpha, weights=w, method=method, axis=1) + q_res = np.zeros(shape=(2, 3)) + for i in range(2): + for j in range(3): + q_res[i, j] = np.quantile( + y[i, :, j], alpha, method=method, weights=w[i, :, j] + ) + assert_allclose(q, q_res) + + # axis is a tuple of all axes + q = np.quantile(y, alpha, weights=w, method=method, axis=(0, 1, 2)) + q_res = np.quantile(y, alpha, weights=w, method=method, axis=None) + assert_allclose(q, q_res) + + q = np.quantile(y, alpha, weights=w, method=method, axis=(1, 2)) + q_res = np.zeros(shape=(2,)) + for i in range(2): + q_res[i] = np.quantile(y[i], alpha, weights=w[i], method=method) + assert_allclose(q, q_res) + + @pytest.mark.parametrize("method", methods_supporting_weights) + def test_quantile_weights_min_max(self, method): + # Test weighted quantile at 0 and 1 with leading and trailing zero + # weights. + w = [0, 0, 1, 2, 3, 0] + y = np.arange(6) + y_min = np.quantile(y, 0, weights=w, method="inverted_cdf") + y_max = np.quantile(y, 1, weights=w, method="inverted_cdf") + assert y_min == y[2] # == 2 + assert y_max == y[4] # == 4 + + def test_quantile_weights_raises_negative_weights(self): + y = [1, 2] + w = [-0.5, 1] + with pytest.raises(ValueError, match="Weights must be non-negative"): + np.quantile(y, 0.5, weights=w, method="inverted_cdf") + + @pytest.mark.parametrize( + "method", + sorted(set(quantile_methods) - set(methods_supporting_weights)), + ) + def test_quantile_weights_raises_unsupported_methods(self, method): + y = [1, 2] + w = [0.5, 1] + msg = "Only method 'inverted_cdf' supports weights" + with pytest.raises(ValueError, match=msg): + np.quantile(y, 0.5, weights=w, method=method) + + def test_weibull_fraction(self): + arr = [Fraction(0, 1), Fraction(1, 10)] + quantile = np.quantile(arr, [0, ], method='weibull') + assert_equal(quantile, np.array(Fraction(0, 1))) + quantile = np.quantile(arr, [Fraction(1, 2)], method='weibull') + assert_equal(quantile, np.array(Fraction(1, 20))) + + def test_closest_observation(self): + # Round ties to nearest even order statistic (see #26656) + m = 'closest_observation' + q = 0.5 + arr = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + assert_equal(2, np.quantile(arr[0:3], q, method=m)) + assert_equal(2, np.quantile(arr[0:4], q, method=m)) + assert_equal(2, np.quantile(arr[0:5], q, method=m)) + assert_equal(3, np.quantile(arr[0:6], q, method=m)) + assert_equal(4, np.quantile(arr[0:7], q, method=m)) + assert_equal(4, np.quantile(arr[0:8], q, method=m)) + assert_equal(4, np.quantile(arr[0:9], q, method=m)) + assert_equal(5, np.quantile(arr, q, method=m)) + + @pytest.mark.parametrize("weights", + [[1, np.inf, 1, 1], [1, np.inf, 1, np.inf], [0, 0, 0, 0], + [np.finfo("float64").max] * 4]) + @pytest.mark.parametrize("dty", ["f8", "O"]) + def test_inf_zeroes_err(self, weights, dty): + m = "inverted_cdf" + q = 0.5 + arr = np.array([[1, 2, 3, 4]] * 2) + # Make one entry have bad weights and another good ones. + wgts = np.array([weights, [0.5] * 4], dtype=dty) + with pytest.raises(ValueError, + match=r"Weights included NaN, inf or were all zero"): + # We (currently) don't bother to check ahead so 0/0 or + # overflow to `inf` while summing weights, or `inf / inf` + # will all warn before the error is raised. + with np.errstate(all="ignore"): + a = np.quantile(arr, q, weights=wgts, method=m, axis=1) + + @pytest.mark.parametrize("weights", + [[1, np.nan, 1, 1], [1, np.nan, np.nan, 1]]) + @pytest.mark.parametrize(["err", "dty"], + [(ValueError, "f8"), ((RuntimeWarning, ValueError), "O")]) + def test_nan_err(self, err, dty, weights): + m = "inverted_cdf" + q = 0.5 + arr = np.array([[1, 2, 3, 4]] * 2) + # Make one entry have bad weights and another good ones. + wgts = np.array([weights, [0.5] * 4], dtype=dty) + with pytest.raises(err): + a = np.quantile(arr, q, weights=wgts, method=m) + + def test_quantile_gh_29003_Fraction(self): + r = np.quantile([1, 2], q=Fraction(1)) + assert r == Fraction(2) + assert isinstance(r, Fraction) + + r = np.quantile([1, 2], q=Fraction(.5)) + assert r == Fraction(3, 2) + assert isinstance(r, Fraction) + + def test_float16_gh_29003(self): + a = np.arange(50_001, dtype=np.float16) + q = .999 + value = np.quantile(a, q) + assert value == q * 50_000 + assert value.dtype == np.float16 + + @pytest.mark.parametrize("method", interpolating_quantile_methods) + @pytest.mark.parametrize("q", [0.5, 1]) + def test_q_weak_promotion(self, method, q): + a = np.array([1, 2, 3, 4, 5], dtype=np.float32) + value = np.quantile(a, q, method=method) + assert value.dtype == np.float32 + + @pytest.mark.parametrize("method", interpolating_quantile_methods) + def test_q_strong_promotion(self, method): + # For interpolating methods, the dtype should be float64, for + # discrete ones the original int8. (technically, mid-point has no + # reason to take into account `q`, but does so anyway.) + a = np.array([1, 2, 3, 4, 5], dtype=np.float32) + value = np.quantile(a, np.float64(0.5), method=method) + assert value.dtype == np.float64 + # Check that we don't do accidental promotion either: + value = np.quantile(a, np.float32(0.5), method=method) + assert value.dtype == np.float32 + + +class TestLerp: + @hypothesis.given(t0=st.floats(allow_nan=False, allow_infinity=False, + min_value=0, max_value=1), + t1=st.floats(allow_nan=False, allow_infinity=False, + min_value=0, max_value=1), + a=st.floats(allow_nan=False, allow_infinity=False, + min_value=-1e300, max_value=1e300), + b=st.floats(allow_nan=False, allow_infinity=False, + min_value=-1e300, max_value=1e300)) + def test_linear_interpolation_formula_monotonic(self, t0, t1, a, b): + l0 = nfb._lerp(a, b, t0) + l1 = nfb._lerp(a, b, t1) + if t0 == t1 or a == b: + assert l0 == l1 # uninteresting + elif (t0 < t1) == (a < b): + assert l0 <= l1 + else: + assert l0 >= l1 + + @hypothesis.given(t=st.floats(allow_nan=False, allow_infinity=False, + min_value=0, max_value=1), + a=st.floats(allow_nan=False, allow_infinity=False, + min_value=-1e300, max_value=1e300), + b=st.floats(allow_nan=False, allow_infinity=False, + min_value=-1e300, max_value=1e300)) + def test_linear_interpolation_formula_bounded(self, t, a, b): + if a <= b: + assert a <= nfb._lerp(a, b, t) <= b + else: + assert b <= nfb._lerp(a, b, t) <= a + + @hypothesis.given(t=st.floats(allow_nan=False, allow_infinity=False, + min_value=0, max_value=1), + a=st.floats(allow_nan=False, allow_infinity=False, + min_value=-1e300, max_value=1e300), + b=st.floats(allow_nan=False, allow_infinity=False, + min_value=-1e300, max_value=1e300)) + def test_linear_interpolation_formula_symmetric(self, t, a, b): + # double subtraction is needed to remove the extra precision of t < 0.5 + left = nfb._lerp(a, b, 1 - (1 - t)) + right = nfb._lerp(b, a, 1 - t) + assert_allclose(left, right) + + def test_linear_interpolation_formula_0d_inputs(self): + a = np.array(2) + b = np.array(5) + t = np.array(0.2) + assert nfb._lerp(a, b, t) == 2.6 + + +class TestMedian: + + def test_basic(self): + a0 = np.array(1) + a1 = np.arange(2) + a2 = np.arange(6).reshape(2, 3) + assert_equal(np.median(a0), 1) + assert_allclose(np.median(a1), 0.5) + assert_allclose(np.median(a2), 2.5) + assert_allclose(np.median(a2, axis=0), [1.5, 2.5, 3.5]) + assert_equal(np.median(a2, axis=1), [1, 4]) + assert_allclose(np.median(a2, axis=None), 2.5) + + a = np.array([0.0444502, 0.0463301, 0.141249, 0.0606775]) + assert_almost_equal((a[1] + a[3]) / 2., np.median(a)) + a = np.array([0.0463301, 0.0444502, 0.141249]) + assert_equal(a[0], np.median(a)) + a = np.array([0.0444502, 0.141249, 0.0463301]) + assert_equal(a[-1], np.median(a)) + # check array scalar result + assert_equal(np.median(a).ndim, 0) + a[1] = np.nan + assert_equal(np.median(a).ndim, 0) + + def test_axis_keyword(self): + a3 = np.array([[2, 3], + [0, 1], + [6, 7], + [4, 5]]) + for a in [a3, np.random.randint(0, 100, size=(2, 3, 4))]: + orig = a.copy() + np.median(a, axis=None) + for ax in range(a.ndim): + np.median(a, axis=ax) + assert_array_equal(a, orig) + + assert_allclose(np.median(a3, axis=0), [3, 4]) + assert_allclose(np.median(a3.T, axis=1), [3, 4]) + assert_allclose(np.median(a3), 3.5) + assert_allclose(np.median(a3, axis=None), 3.5) + assert_allclose(np.median(a3.T), 3.5) + + def test_overwrite_keyword(self): + a3 = np.array([[2, 3], + [0, 1], + [6, 7], + [4, 5]]) + a0 = np.array(1) + a1 = np.arange(2) + a2 = np.arange(6).reshape(2, 3) + assert_allclose(np.median(a0.copy(), overwrite_input=True), 1) + assert_allclose(np.median(a1.copy(), overwrite_input=True), 0.5) + assert_allclose(np.median(a2.copy(), overwrite_input=True), 2.5) + assert_allclose( + np.median(a2.copy(), overwrite_input=True, axis=0), [1.5, 2.5, 3.5]) + assert_allclose( + np.median(a2.copy(), overwrite_input=True, axis=1), [1, 4]) + assert_allclose( + np.median(a2.copy(), overwrite_input=True, axis=None), 2.5) + assert_allclose( + np.median(a3.copy(), overwrite_input=True, axis=0), [3, 4]) + assert_allclose( + np.median(a3.T.copy(), overwrite_input=True, axis=1), [3, 4]) + + a4 = np.arange(3 * 4 * 5, dtype=np.float32).reshape((3, 4, 5)) + np.random.shuffle(a4.ravel()) + assert_allclose(np.median(a4, axis=None), + np.median(a4.copy(), axis=None, overwrite_input=True)) + assert_allclose(np.median(a4, axis=0), + np.median(a4.copy(), axis=0, overwrite_input=True)) + assert_allclose(np.median(a4, axis=1), + np.median(a4.copy(), axis=1, overwrite_input=True)) + assert_allclose(np.median(a4, axis=2), + np.median(a4.copy(), axis=2, overwrite_input=True)) + + def test_array_like(self): + x = [1, 2, 3] + assert_almost_equal(np.median(x), 2) + x2 = [x] + assert_almost_equal(np.median(x2), 2) + assert_allclose(np.median(x2, axis=0), x) + + def test_subclass(self): + # gh-3846 + class MySubClass(np.ndarray): + + def __new__(cls, input_array, info=None): + obj = np.asarray(input_array).view(cls) + obj.info = info + return obj + + def mean(self, axis=None, dtype=None, out=None): + return -7 + + a = MySubClass([1, 2, 3]) + assert_equal(np.median(a), -7) + + @pytest.mark.parametrize('arr', + ([1., 2., 3.], [1., np.nan, 3.], np.nan, 0.)) + def test_subclass2(self, arr): + """Check that we return subclasses, even if a NaN scalar.""" + class MySubclass(np.ndarray): + pass + + m = np.median(np.array(arr).view(MySubclass)) + assert isinstance(m, MySubclass) + + def test_out(self): + o = np.zeros((4,)) + d = np.ones((3, 4)) + assert_equal(np.median(d, 0, out=o), o) + o = np.zeros((3,)) + assert_equal(np.median(d, 1, out=o), o) + o = np.zeros(()) + assert_equal(np.median(d, out=o), o) + + def test_out_nan(self): + with warnings.catch_warnings(record=True): + warnings.filterwarnings('always', '', RuntimeWarning) + o = np.zeros((4,)) + d = np.ones((3, 4)) + d[2, 1] = np.nan + assert_equal(np.median(d, 0, out=o), o) + o = np.zeros((3,)) + assert_equal(np.median(d, 1, out=o), o) + o = np.zeros(()) + assert_equal(np.median(d, out=o), o) + + def test_nan_behavior(self): + a = np.arange(24, dtype=float) + a[2] = np.nan + assert_equal(np.median(a), np.nan) + assert_equal(np.median(a, axis=0), np.nan) + + a = np.arange(24, dtype=float).reshape(2, 3, 4) + a[1, 2, 3] = np.nan + a[1, 1, 2] = np.nan + + # no axis + assert_equal(np.median(a), np.nan) + assert_equal(np.median(a).ndim, 0) + + # axis0 + b = np.median(np.arange(24, dtype=float).reshape(2, 3, 4), 0) + b[2, 3] = np.nan + b[1, 2] = np.nan + assert_equal(np.median(a, 0), b) + + # axis1 + b = np.median(np.arange(24, dtype=float).reshape(2, 3, 4), 1) + b[1, 3] = np.nan + b[1, 2] = np.nan + assert_equal(np.median(a, 1), b) + + # axis02 + b = np.median(np.arange(24, dtype=float).reshape(2, 3, 4), (0, 2)) + b[1] = np.nan + b[2] = np.nan + assert_equal(np.median(a, (0, 2)), b) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work correctly") + def test_empty(self): + # mean(empty array) emits two warnings: empty slice and divide by 0 + a = np.array([], dtype=float) + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', RuntimeWarning) + assert_equal(np.median(a), np.nan) + assert_(w[0].category is RuntimeWarning) + assert_equal(len(w), 2) + + # multiple dimensions + a = np.array([], dtype=float, ndmin=3) + # no axis + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', RuntimeWarning) + assert_equal(np.median(a), np.nan) + assert_(w[0].category is RuntimeWarning) + + # axis 0 and 1 + b = np.array([], dtype=float, ndmin=2) + assert_equal(np.median(a, axis=0), b) + assert_equal(np.median(a, axis=1), b) + + # axis 2 + b = np.array(np.nan, dtype=float, ndmin=2) + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', RuntimeWarning) + assert_equal(np.median(a, axis=2), b) + assert_(w[0].category is RuntimeWarning) + + def test_object(self): + o = np.arange(7.) + assert_(type(np.median(o.astype(object))), float) + o[2] = np.nan + assert_(type(np.median(o.astype(object))), float) + + def test_extended_axis(self): + o = np.random.normal(size=(71, 23)) + x = np.dstack([o] * 10) + assert_equal(np.median(x, axis=(0, 1)), np.median(o)) + x = np.moveaxis(x, -1, 0) + assert_equal(np.median(x, axis=(-2, -1)), np.median(o)) + x = x.swapaxes(0, 1).copy() + assert_equal(np.median(x, axis=(0, -1)), np.median(o)) + + assert_equal(np.median(x, axis=(0, 1, 2)), np.median(x, axis=None)) + assert_equal(np.median(x, axis=(0, )), np.median(x, axis=0)) + assert_equal(np.median(x, axis=(-1, )), np.median(x, axis=-1)) + + d = np.arange(3 * 5 * 7 * 11).reshape((3, 5, 7, 11)) + np.random.shuffle(d.ravel()) + assert_equal(np.median(d, axis=(0, 1, 2))[0], + np.median(d[:, :, :, 0].flatten())) + assert_equal(np.median(d, axis=(0, 1, 3))[1], + np.median(d[:, :, 1, :].flatten())) + assert_equal(np.median(d, axis=(3, 1, -4))[2], + np.median(d[:, :, 2, :].flatten())) + assert_equal(np.median(d, axis=(3, 1, 2))[2], + np.median(d[2, :, :, :].flatten())) + assert_equal(np.median(d, axis=(3, 2))[2, 1], + np.median(d[2, 1, :, :].flatten())) + assert_equal(np.median(d, axis=(1, -2))[2, 1], + np.median(d[2, :, :, 1].flatten())) + assert_equal(np.median(d, axis=(1, 3))[2, 2], + np.median(d[2, :, 2, :].flatten())) + + def test_extended_axis_invalid(self): + d = np.ones((3, 5, 7, 11)) + assert_raises(AxisError, np.median, d, axis=-5) + assert_raises(AxisError, np.median, d, axis=(0, -5)) + assert_raises(AxisError, np.median, d, axis=4) + assert_raises(AxisError, np.median, d, axis=(0, 4)) + assert_raises(ValueError, np.median, d, axis=(1, 1)) + + def test_keepdims(self): + d = np.ones((3, 5, 7, 11)) + assert_equal(np.median(d, axis=None, keepdims=True).shape, + (1, 1, 1, 1)) + assert_equal(np.median(d, axis=(0, 1), keepdims=True).shape, + (1, 1, 7, 11)) + assert_equal(np.median(d, axis=(0, 3), keepdims=True).shape, + (1, 5, 7, 1)) + assert_equal(np.median(d, axis=(1,), keepdims=True).shape, + (3, 1, 7, 11)) + assert_equal(np.median(d, axis=(0, 1, 2, 3), keepdims=True).shape, + (1, 1, 1, 1)) + assert_equal(np.median(d, axis=(0, 1, 3), keepdims=True).shape, + (1, 1, 7, 1)) + + @pytest.mark.parametrize( + argnames='axis', + argvalues=[ + None, + 1, + (1, ), + (0, 1), + (-3, -1), + ] + ) + def test_keepdims_out(self, axis): + d = np.ones((3, 5, 7, 11)) + if axis is None: + shape_out = (1,) * d.ndim + else: + axis_norm = normalize_axis_tuple(axis, d.ndim) + shape_out = tuple( + 1 if i in axis_norm else d.shape[i] for i in range(d.ndim)) + out = np.empty(shape_out) + result = np.median(d, axis=axis, keepdims=True, out=out) + assert result is out + assert_equal(result.shape, shape_out) + + @pytest.mark.parametrize("dtype", ["m8[s]"]) + @pytest.mark.parametrize("pos", [0, 23, 10]) + def test_nat_behavior(self, dtype, pos): + # TODO: Median does not support Datetime, due to `mean`. + # NaT and NaN should behave the same, do basic tests for NaT. + a = np.arange(0, 24, dtype=dtype) + a[pos] = "NaT" + res = np.median(a) + assert res.dtype == dtype + assert np.isnat(res) + res = np.percentile(a, [30, 60]) + assert res.dtype == dtype + assert np.isnat(res).all() + + a = np.arange(0, 24 * 3, dtype=dtype).reshape(-1, 3) + a[pos, 1] = "NaT" + res = np.median(a, axis=0) + assert_array_equal(np.isnat(res), [False, True, False]) + + +class TestSortComplex: + + @pytest.mark.parametrize("type_in, type_out", [ + ('l', 'D'), + ('h', 'F'), + ('H', 'F'), + ('b', 'F'), + ('B', 'F'), + ('g', 'G'), + ]) + def test_sort_real(self, type_in, type_out): + # sort_complex() type casting for real input types + a = np.array([5, 3, 6, 2, 1], dtype=type_in) + actual = np.sort_complex(a) + expected = np.sort(a).astype(type_out) + assert_equal(actual, expected) + assert_equal(actual.dtype, expected.dtype) + + def test_sort_complex(self): + # sort_complex() handling of complex input + a = np.array([2 + 3j, 1 - 2j, 1 - 3j, 2 + 1j], dtype='D') + expected = np.array([1 - 3j, 1 - 2j, 2 + 1j, 2 + 3j], dtype='D') + actual = np.sort_complex(a) + assert_equal(actual, expected) + assert_equal(actual.dtype, expected.dtype) diff --git a/local_packages/numpy/lib/tests/test_histograms.py b/local_packages/numpy/lib/tests/test_histograms.py new file mode 100644 index 0000000..cae11cf --- /dev/null +++ b/local_packages/numpy/lib/tests/test_histograms.py @@ -0,0 +1,855 @@ +import warnings + +import pytest + +import numpy as np +from numpy import histogram, histogram_bin_edges, histogramdd +from numpy.testing import ( + assert_, + assert_allclose, + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, + assert_array_max_ulp, + assert_equal, + assert_raises, + assert_raises_regex, +) + + +class TestHistogram: + + def setup_method(self): + pass + + def teardown_method(self): + pass + + def test_simple(self): + n = 100 + v = np.random.rand(n) + (a, b) = histogram(v) + # check if the sum of the bins equals the number of samples + assert_equal(np.sum(a, axis=0), n) + # check that the bin counts are evenly spaced when the data is from + # a linear function + (a, b) = histogram(np.linspace(0, 10, 100)) + assert_array_equal(a, 10) + + def test_one_bin(self): + # Ticket 632 + hist, edges = histogram([1, 2, 3, 4], [1, 2]) + assert_array_equal(hist, [2, ]) + assert_array_equal(edges, [1, 2]) + assert_raises(ValueError, histogram, [1, 2], bins=0) + h, e = histogram([1, 2], bins=1) + assert_equal(h, np.array([2])) + assert_allclose(e, np.array([1., 2.])) + + def test_density(self): + # Check that the integral of the density equals 1. + n = 100 + v = np.random.rand(n) + a, b = histogram(v, density=True) + area = np.sum(a * np.diff(b)) + assert_almost_equal(area, 1) + + # Check with non-constant bin widths + v = np.arange(10) + bins = [0, 1, 3, 6, 10] + a, b = histogram(v, bins, density=True) + assert_array_equal(a, .1) + assert_equal(np.sum(a * np.diff(b)), 1) + + # Test that passing False works too + a, b = histogram(v, bins, density=False) + assert_array_equal(a, [1, 2, 3, 4]) + + # Variable bin widths are especially useful to deal with + # infinities. + v = np.arange(10) + bins = [0, 1, 3, 6, np.inf] + a, b = histogram(v, bins, density=True) + assert_array_equal(a, [.1, .1, .1, 0.]) + + # Taken from a bug report from N. Becker on the numpy-discussion + # mailing list Aug. 6, 2010. + counts, dmy = np.histogram( + [1, 2, 3, 4], [0.5, 1.5, np.inf], density=True) + assert_equal(counts, [.25, 0]) + + def test_outliers(self): + # Check that outliers are not tallied + a = np.arange(10) + .5 + + # Lower outliers + h, b = histogram(a, range=[0, 9]) + assert_equal(h.sum(), 9) + + # Upper outliers + h, b = histogram(a, range=[1, 10]) + assert_equal(h.sum(), 9) + + # Normalization + h, b = histogram(a, range=[1, 9], density=True) + assert_almost_equal((h * np.diff(b)).sum(), 1, decimal=15) + + # Weights + w = np.arange(10) + .5 + h, b = histogram(a, range=[1, 9], weights=w, density=True) + assert_equal((h * np.diff(b)).sum(), 1) + + h, b = histogram(a, bins=8, range=[1, 9], weights=w) + assert_equal(h, w[1:-1]) + + def test_arr_weights_mismatch(self): + a = np.arange(10) + .5 + w = np.arange(11) + .5 + with assert_raises_regex(ValueError, "same shape as"): + h, b = histogram(a, range=[1, 9], weights=w, density=True) + + def test_type(self): + # Check the type of the returned histogram + a = np.arange(10) + .5 + h, b = histogram(a) + assert_(np.issubdtype(h.dtype, np.integer)) + + h, b = histogram(a, density=True) + assert_(np.issubdtype(h.dtype, np.floating)) + + h, b = histogram(a, weights=np.ones(10, int)) + assert_(np.issubdtype(h.dtype, np.integer)) + + h, b = histogram(a, weights=np.ones(10, float)) + assert_(np.issubdtype(h.dtype, np.floating)) + + def test_f32_rounding(self): + # gh-4799, check that the rounding of the edges works with float32 + x = np.array([276.318359, -69.593948, 21.329449], dtype=np.float32) + y = np.array([5005.689453, 4481.327637, 6010.369629], dtype=np.float32) + counts_hist, xedges, yedges = np.histogram2d(x, y, bins=100) + assert_equal(counts_hist.sum(), 3.) + + def test_bool_conversion(self): + # gh-12107 + # Reference integer histogram + a = np.array([1, 1, 0], dtype=np.uint8) + int_hist, int_edges = np.histogram(a) + + # Should raise a warning on booleans + # Ensure that the histograms are equivalent, need to suppress + # the warnings to get the actual outputs + with pytest.warns(RuntimeWarning, match='Converting input from .*'): + hist, edges = np.histogram([True, True, False]) + # A warning should be issued + assert_array_equal(hist, int_hist) + assert_array_equal(edges, int_edges) + + def test_weights(self): + v = np.random.rand(100) + w = np.ones(100) * 5 + a, b = histogram(v) + na, nb = histogram(v, density=True) + wa, wb = histogram(v, weights=w) + nwa, nwb = histogram(v, weights=w, density=True) + assert_array_almost_equal(a * 5, wa) + assert_array_almost_equal(na, nwa) + + # Check weights are properly applied. + v = np.linspace(0, 10, 10) + w = np.concatenate((np.zeros(5), np.ones(5))) + wa, wb = histogram(v, bins=np.arange(11), weights=w) + assert_array_almost_equal(wa, w) + + # Check with integer weights + wa, wb = histogram([1, 2, 2, 4], bins=4, weights=[4, 3, 2, 1]) + assert_array_equal(wa, [4, 5, 0, 1]) + wa, wb = histogram( + [1, 2, 2, 4], bins=4, weights=[4, 3, 2, 1], density=True) + assert_array_almost_equal(wa, np.array([4, 5, 0, 1]) / 10. / 3. * 4) + + # Check weights with non-uniform bin widths + a, b = histogram( + np.arange(9), [0, 1, 3, 6, 10], + weights=[2, 1, 1, 1, 1, 1, 1, 1, 1], density=True) + assert_almost_equal(a, [.2, .1, .1, .075]) + + def test_exotic_weights(self): + + # Test the use of weights that are not integer or floats, but e.g. + # complex numbers or object types. + + # Complex weights + values = np.array([1.3, 2.5, 2.3]) + weights = np.array([1, -1, 2]) + 1j * np.array([2, 1, 2]) + + # Check with custom bins + wa, wb = histogram(values, bins=[0, 2, 3], weights=weights) + assert_array_almost_equal(wa, np.array([1, 1]) + 1j * np.array([2, 3])) + + # Check with even bins + wa, wb = histogram(values, bins=2, range=[1, 3], weights=weights) + assert_array_almost_equal(wa, np.array([1, 1]) + 1j * np.array([2, 3])) + + # Decimal weights + from decimal import Decimal + values = np.array([1.3, 2.5, 2.3]) + weights = np.array([Decimal(1), Decimal(2), Decimal(3)]) + + # Check with custom bins + wa, wb = histogram(values, bins=[0, 2, 3], weights=weights) + assert_array_almost_equal(wa, [Decimal(1), Decimal(5)]) + + # Check with even bins + wa, wb = histogram(values, bins=2, range=[1, 3], weights=weights) + assert_array_almost_equal(wa, [Decimal(1), Decimal(5)]) + + def test_no_side_effects(self): + # This is a regression test that ensures that values passed to + # ``histogram`` are unchanged. + values = np.array([1.3, 2.5, 2.3]) + np.histogram(values, range=[-10, 10], bins=100) + assert_array_almost_equal(values, [1.3, 2.5, 2.3]) + + def test_empty(self): + a, b = histogram([], bins=([0, 1])) + assert_array_equal(a, np.array([0])) + assert_array_equal(b, np.array([0, 1])) + + def test_error_binnum_type(self): + # Tests if right Error is raised if bins argument is float + vals = np.linspace(0.0, 1.0, num=100) + histogram(vals, 5) + assert_raises(TypeError, histogram, vals, 2.4) + + def test_finite_range(self): + # Normal ranges should be fine + vals = np.linspace(0.0, 1.0, num=100) + histogram(vals, range=[0.25, 0.75]) + assert_raises(ValueError, histogram, vals, range=[np.nan, 0.75]) + assert_raises(ValueError, histogram, vals, range=[0.25, np.inf]) + + def test_invalid_range(self): + # start of range must be < end of range + vals = np.linspace(0.0, 1.0, num=100) + with assert_raises_regex(ValueError, "max must be larger than"): + np.histogram(vals, range=[0.1, 0.01]) + + def test_bin_edge_cases(self): + # Ensure that floating-point computations correctly place edge cases. + arr = np.array([337, 404, 739, 806, 1007, 1811, 2012]) + hist, edges = np.histogram(arr, bins=8296, range=(2, 2280)) + mask = hist > 0 + left_edges = edges[:-1][mask] + right_edges = edges[1:][mask] + for x, left, right in zip(arr, left_edges, right_edges): + assert_(x >= left) + assert_(x < right) + + def test_last_bin_inclusive_range(self): + arr = np.array([0., 0., 0., 1., 2., 3., 3., 4., 5.]) + hist, edges = np.histogram(arr, bins=30, range=(-0.5, 5)) + assert_equal(hist[-1], 1) + + def test_bin_array_dims(self): + # gracefully handle bins object > 1 dimension + vals = np.linspace(0.0, 1.0, num=100) + bins = np.array([[0, 0.5], [0.6, 1.0]]) + with assert_raises_regex(ValueError, "must be 1d"): + np.histogram(vals, bins=bins) + + def test_unsigned_monotonicity_check(self): + # Ensures ValueError is raised if bins not increasing monotonically + # when bins contain unsigned values (see #9222) + arr = np.array([2]) + bins = np.array([1, 3, 1], dtype='uint64') + with assert_raises(ValueError): + hist, edges = np.histogram(arr, bins=bins) + + def test_object_array_of_0d(self): + # gh-7864 + assert_raises(ValueError, + histogram, [np.array(0.4) for i in range(10)] + [-np.inf]) + assert_raises(ValueError, + histogram, [np.array(0.4) for i in range(10)] + [np.inf]) + + # these should not crash + np.histogram([np.array(0.5) for i in range(10)] + [.500000000000002]) + np.histogram([np.array(0.5) for i in range(10)] + [.5]) + + def test_some_nan_values(self): + # gh-7503 + one_nan = np.array([0, 1, np.nan]) + all_nan = np.array([np.nan, np.nan]) + + # the internal comparisons with NaN give warnings + with warnings.catch_warnings(): + warnings.simplefilter('ignore', RuntimeWarning) + # can't infer range with nan + assert_raises(ValueError, histogram, one_nan, bins='auto') + assert_raises(ValueError, histogram, all_nan, bins='auto') + + # explicit range solves the problem + h, b = histogram(one_nan, bins='auto', range=(0, 1)) + assert_equal(h.sum(), 2) # nan is not counted + h, b = histogram(all_nan, bins='auto', range=(0, 1)) + assert_equal(h.sum(), 0) # nan is not counted + + # as does an explicit set of bins + h, b = histogram(one_nan, bins=[0, 1]) + assert_equal(h.sum(), 2) # nan is not counted + h, b = histogram(all_nan, bins=[0, 1]) + assert_equal(h.sum(), 0) # nan is not counted + + def test_datetime(self): + begin = np.datetime64('2000-01-01', 'D') + offsets = np.array([0, 0, 1, 1, 2, 3, 5, 10, 20]) + bins = np.array([0, 2, 7, 20]) + dates = begin + offsets + date_bins = begin + bins + + td = np.dtype('timedelta64[D]') + + # Results should be the same for integer offsets or datetime values. + # For now, only explicit bins are supported, since linspace does not + # work on datetimes or timedeltas + d_count, d_edge = histogram(dates, bins=date_bins) + t_count, t_edge = histogram(offsets.astype(td), bins=bins.astype(td)) + i_count, i_edge = histogram(offsets, bins=bins) + + assert_equal(d_count, i_count) + assert_equal(t_count, i_count) + + assert_equal((d_edge - begin).astype(int), i_edge) + assert_equal(t_edge.astype(int), i_edge) + + assert_equal(d_edge.dtype, dates.dtype) + assert_equal(t_edge.dtype, td) + + def do_signed_overflow_bounds(self, dtype): + exponent = 8 * np.dtype(dtype).itemsize - 1 + arr = np.array([-2**exponent + 4, 2**exponent - 4], dtype=dtype) + hist, e = histogram(arr, bins=2) + assert_equal(e, [-2**exponent + 4, 0, 2**exponent - 4]) + assert_equal(hist, [1, 1]) + + def test_signed_overflow_bounds(self): + self.do_signed_overflow_bounds(np.byte) + self.do_signed_overflow_bounds(np.short) + self.do_signed_overflow_bounds(np.intc) + self.do_signed_overflow_bounds(np.int_) + self.do_signed_overflow_bounds(np.longlong) + + def do_precision_lower_bound(self, float_small, float_large): + eps = np.finfo(float_large).eps + + arr = np.array([1.0], float_small) + range = np.array([1.0 + eps, 2.0], float_large) + + # test is looking for behavior when the bounds change between dtypes + if range.astype(float_small)[0] != 1: + return + + # previously crashed + count, x_loc = np.histogram(arr, bins=1, range=range) + assert_equal(count, [0]) + assert_equal(x_loc.dtype, float_large) + + def do_precision_upper_bound(self, float_small, float_large): + eps = np.finfo(float_large).eps + + arr = np.array([1.0], float_small) + range = np.array([0.0, 1.0 - eps], float_large) + + # test is looking for behavior when the bounds change between dtypes + if range.astype(float_small)[-1] != 1: + return + + # previously crashed + count, x_loc = np.histogram(arr, bins=1, range=range) + assert_equal(count, [0]) + + assert_equal(x_loc.dtype, float_large) + + def do_precision(self, float_small, float_large): + self.do_precision_lower_bound(float_small, float_large) + self.do_precision_upper_bound(float_small, float_large) + + def test_precision(self): + # not looping results in a useful stack trace upon failure + self.do_precision(np.half, np.single) + self.do_precision(np.half, np.double) + self.do_precision(np.half, np.longdouble) + self.do_precision(np.single, np.double) + self.do_precision(np.single, np.longdouble) + self.do_precision(np.double, np.longdouble) + + def test_histogram_bin_edges(self): + hist, e = histogram([1, 2, 3, 4], [1, 2]) + edges = histogram_bin_edges([1, 2, 3, 4], [1, 2]) + assert_array_equal(edges, e) + + arr = np.array([0., 0., 0., 1., 2., 3., 3., 4., 5.]) + hist, e = histogram(arr, bins=30, range=(-0.5, 5)) + edges = histogram_bin_edges(arr, bins=30, range=(-0.5, 5)) + assert_array_equal(edges, e) + + hist, e = histogram(arr, bins='auto', range=(0, 1)) + edges = histogram_bin_edges(arr, bins='auto', range=(0, 1)) + assert_array_equal(edges, e) + + def test_small_value_range(self): + arr = np.array([1, 1 + 2e-16] * 10) + with pytest.raises(ValueError, match="Too many bins for data range"): + histogram(arr, bins=10) + + # @requires_memory(free_bytes=1e10) + # @pytest.mark.slow + @pytest.mark.skip(reason="Bad memory reports lead to OOM in ci testing") + def test_big_arrays(self): + sample = np.zeros([100000000, 3]) + xbins = 400 + ybins = 400 + zbins = np.arange(16000) + hist = np.histogramdd(sample=sample, bins=(xbins, ybins, zbins)) + assert_equal(type(hist), type((1, 2))) + + def test_gh_23110(self): + hist, e = np.histogram(np.array([-0.9e-308], dtype='>f8'), + bins=2, + range=(-1e-308, -2e-313)) + expected_hist = np.array([1, 0]) + assert_array_equal(hist, expected_hist) + + def test_gh_28400(self): + e = 1 + 1e-12 + Z = [0, 1, 1, 1, 1, 1, e, e, e, e, e, e, 2] + counts, edges = np.histogram(Z, bins="auto") + assert len(counts) < 10 + assert edges[0] == Z[0] + assert edges[-1] == Z[-1] + +class TestHistogramOptimBinNums: + """ + Provide test coverage when using provided estimators for optimal number of + bins + """ + + def test_empty(self): + estimator_list = ['fd', 'scott', 'rice', 'sturges', + 'doane', 'sqrt', 'auto', 'stone'] + # check it can deal with empty data + for estimator in estimator_list: + a, b = histogram([], bins=estimator) + assert_array_equal(a, np.array([0])) + assert_array_equal(b, np.array([0, 1])) + + def test_simple(self): + """ + Straightforward testing with a mixture of linspace data (for + consistency). All test values have been precomputed and the values + shouldn't change + """ + # Some basic sanity checking, with some fixed data. + # Checking for the correct number of bins + basic_test = {50: {'fd': 4, 'scott': 4, 'rice': 8, 'sturges': 7, + 'doane': 8, 'sqrt': 8, 'auto': 7, 'stone': 2}, + 500: {'fd': 8, 'scott': 8, 'rice': 16, 'sturges': 10, + 'doane': 12, 'sqrt': 23, 'auto': 10, 'stone': 9}, + 5000: {'fd': 17, 'scott': 17, 'rice': 35, 'sturges': 14, + 'doane': 17, 'sqrt': 71, 'auto': 17, 'stone': 20}} + + for testlen, expectedResults in basic_test.items(): + # Create some sort of non uniform data to test with + # (2 peak uniform mixture) + x1 = np.linspace(-10, -1, testlen // 5 * 2) + x2 = np.linspace(1, 10, testlen // 5 * 3) + x = np.concatenate((x1, x2)) + for estimator, numbins in expectedResults.items(): + a, b = np.histogram(x, estimator) + assert_equal(len(a), numbins, err_msg=f"For the {estimator} estimator " + f"with datasize of {testlen}") + + def test_small(self): + """ + Smaller datasets have the potential to cause issues with the data + adaptive methods, especially the FD method. All bin numbers have been + precalculated. + """ + small_dat = {1: {'fd': 1, 'scott': 1, 'rice': 1, 'sturges': 1, + 'doane': 1, 'sqrt': 1, 'stone': 1}, + 2: {'fd': 2, 'scott': 1, 'rice': 3, 'sturges': 2, + 'doane': 1, 'sqrt': 2, 'stone': 1}, + 3: {'fd': 2, 'scott': 2, 'rice': 3, 'sturges': 3, + 'doane': 3, 'sqrt': 2, 'stone': 1}} + + for testlen, expectedResults in small_dat.items(): + testdat = np.arange(testlen).astype(float) + for estimator, expbins in expectedResults.items(): + a, b = np.histogram(testdat, estimator) + assert_equal(len(a), expbins, err_msg=f"For the {estimator} estimator " + f"with datasize of {testlen}") + + def test_incorrect_methods(self): + """ + Check a Value Error is thrown when an unknown string is passed in + """ + check_list = ['mad', 'freeman', 'histograms', 'IQR'] + for estimator in check_list: + assert_raises(ValueError, histogram, [1, 2, 3], estimator) + + def test_novariance(self): + """ + Check that methods handle no variance in data + Primarily for Scott and FD as the SD and IQR are both 0 in this case + """ + novar_dataset = np.ones(100) + novar_resultdict = {'fd': 1, 'scott': 1, 'rice': 1, 'sturges': 1, + 'doane': 1, 'sqrt': 1, 'auto': 1, 'stone': 1} + + for estimator, numbins in novar_resultdict.items(): + a, b = np.histogram(novar_dataset, estimator) + assert_equal(len(a), numbins, + err_msg=f"{estimator} estimator, No Variance test") + + def test_limited_variance(self): + """ + Check when IQR is 0, but variance exists, we return a reasonable value. + """ + lim_var_data = np.ones(1000) + lim_var_data[:3] = 0 + lim_var_data[-4:] = 100 + + edges_auto = histogram_bin_edges(lim_var_data, 'auto') + assert_equal(edges_auto[0], 0) + assert_equal(edges_auto[-1], 100.) + assert len(edges_auto) < 100 + + edges_fd = histogram_bin_edges(lim_var_data, 'fd') + assert_equal(edges_fd, np.array([0, 100])) + + edges_sturges = histogram_bin_edges(lim_var_data, 'sturges') + assert_equal(edges_sturges, np.linspace(0, 100, 12)) + + def test_outlier(self): + """ + Check the FD, Scott and Doane with outliers. + + The FD estimates a smaller binwidth since it's less affected by + outliers. Since the range is so (artificially) large, this means more + bins, most of which will be empty, but the data of interest usually is + unaffected. The Scott estimator is more affected and returns fewer bins, + despite most of the variance being in one area of the data. The Doane + estimator lies somewhere between the other two. + """ + xcenter = np.linspace(-10, 10, 50) + outlier_dataset = np.hstack((np.linspace(-110, -100, 5), xcenter)) + + outlier_resultdict = {'fd': 21, 'scott': 5, 'doane': 11, 'stone': 6} + + for estimator, numbins in outlier_resultdict.items(): + a, b = np.histogram(outlier_dataset, estimator) + assert_equal(len(a), numbins) + + def test_scott_vs_stone(self): + # Verify that Scott's rule and Stone's rule converges for normally + # distributed data + + def nbins_ratio(seed, size): + rng = np.random.RandomState(seed) + x = rng.normal(loc=0, scale=2, size=size) + a, b = len(np.histogram(x, 'stone')[0]), len(np.histogram(x, 'scott')[0]) + return a / (a + b) + + geom_space = np.geomspace(start=10, stop=100, num=4).round().astype(int) + ll = [[nbins_ratio(seed, size) for size in geom_space] for seed in range(10)] + + # the average difference between the two methods decreases as the dataset + # size increases. + avg = abs(np.mean(ll, axis=0) - 0.5) + assert_almost_equal(avg, [0.15, 0.09, 0.08, 0.03], decimal=2) + + def test_simple_range(self): + """ + Straightforward testing with a mixture of linspace data (for + consistency). Adding in a 3rd mixture that will then be + completely ignored. All test values have been precomputed and + the shouldn't change. + """ + # some basic sanity checking, with some fixed data. + # Checking for the correct number of bins + basic_test = { + 50: {'fd': 8, 'scott': 8, 'rice': 15, + 'sturges': 14, 'auto': 14, 'stone': 8}, + 500: {'fd': 15, 'scott': 16, 'rice': 32, + 'sturges': 20, 'auto': 20, 'stone': 80}, + 5000: {'fd': 33, 'scott': 33, 'rice': 69, + 'sturges': 27, 'auto': 33, 'stone': 80} + } + + for testlen, expectedResults in basic_test.items(): + # create some sort of non uniform data to test with + # (3 peak uniform mixture) + x1 = np.linspace(-10, -1, testlen // 5 * 2) + x2 = np.linspace(1, 10, testlen // 5 * 3) + x3 = np.linspace(-100, -50, testlen) + x = np.hstack((x1, x2, x3)) + for estimator, numbins in expectedResults.items(): + a, b = np.histogram(x, estimator, range=(-20, 20)) + msg = f"For the {estimator} estimator" + msg += f" with datasize of {testlen}" + assert_equal(len(a), numbins, err_msg=msg) + + @pytest.mark.parametrize("bins", ['auto', 'fd', 'doane', 'scott', + 'stone', 'rice', 'sturges']) + def test_signed_integer_data(self, bins): + # Regression test for gh-14379. + a = np.array([-2, 0, 127], dtype=np.int8) + hist, edges = np.histogram(a, bins=bins) + hist32, edges32 = np.histogram(a.astype(np.int32), bins=bins) + assert_array_equal(hist, hist32) + assert_array_equal(edges, edges32) + + @pytest.mark.parametrize("bins", ['auto', 'fd', 'doane', 'scott', + 'stone', 'rice', 'sturges']) + def test_integer(self, bins): + """ + Test that bin width for integer data is at least 1. + """ + with warnings.catch_warnings(): + if bins == 'stone': + warnings.simplefilter('ignore', RuntimeWarning) + assert_equal( + np.histogram_bin_edges(np.tile(np.arange(9), 1000), bins), + np.arange(9)) + + def test_integer_non_auto(self): + """ + Test that the bin-width>=1 requirement *only* applies to auto binning. + """ + assert_equal( + np.histogram_bin_edges(np.tile(np.arange(9), 1000), 16), + np.arange(17) / 2) + assert_equal( + np.histogram_bin_edges(np.tile(np.arange(9), 1000), [.1, .2]), + [.1, .2]) + + def test_simple_weighted(self): + """ + Check that weighted data raises a TypeError + """ + estimator_list = ['fd', 'scott', 'rice', 'sturges', 'auto'] + for estimator in estimator_list: + assert_raises(TypeError, histogram, [1, 2, 3], + estimator, weights=[1, 2, 3]) + + +class TestHistogramdd: + + def test_simple(self): + x = np.array([[-.5, .5, 1.5], [-.5, 1.5, 2.5], [-.5, 2.5, .5], + [.5, .5, 1.5], [.5, 1.5, 2.5], [.5, 2.5, 2.5]]) + H, edges = histogramdd(x, (2, 3, 3), + range=[[-1, 1], [0, 3], [0, 3]]) + answer = np.array([[[0, 1, 0], [0, 0, 1], [1, 0, 0]], + [[0, 1, 0], [0, 0, 1], [0, 0, 1]]]) + assert_array_equal(H, answer) + + # Check normalization + ed = [[-2, 0, 2], [0, 1, 2, 3], [0, 1, 2, 3]] + H, edges = histogramdd(x, bins=ed, density=True) + assert_(np.all(H == answer / 12.)) + + # Check that H has the correct shape. + H, edges = histogramdd(x, (2, 3, 4), + range=[[-1, 1], [0, 3], [0, 4]], + density=True) + answer = np.array([[[0, 1, 0, 0], [0, 0, 1, 0], [1, 0, 0, 0]], + [[0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 1, 0]]]) + assert_array_almost_equal(H, answer / 6., 4) + # Check that a sequence of arrays is accepted and H has the correct + # shape. + z = [np.squeeze(y) for y in np.split(x, 3, axis=1)] + H, edges = histogramdd( + z, bins=(4, 3, 2), range=[[-2, 2], [0, 3], [0, 2]]) + answer = np.array([[[0, 0], [0, 0], [0, 0]], + [[0, 1], [0, 0], [1, 0]], + [[0, 1], [0, 0], [0, 0]], + [[0, 0], [0, 0], [0, 0]]]) + assert_array_equal(H, answer) + + Z = np.zeros((5, 5, 5)) + Z[list(range(5)), list(range(5)), list(range(5))] = 1. + H, edges = histogramdd([np.arange(5), np.arange(5), np.arange(5)], 5) + assert_array_equal(H, Z) + + def test_shape_3d(self): + # All possible permutations for bins of different lengths in 3D. + bins = ((5, 4, 6), (6, 4, 5), (5, 6, 4), (4, 6, 5), (6, 5, 4), + (4, 5, 6)) + r = np.random.rand(10, 3) + for b in bins: + H, edges = histogramdd(r, b) + assert_(H.shape == b) + + def test_shape_4d(self): + # All possible permutations for bins of different lengths in 4D. + bins = ((7, 4, 5, 6), (4, 5, 7, 6), (5, 6, 4, 7), (7, 6, 5, 4), + (5, 7, 6, 4), (4, 6, 7, 5), (6, 5, 7, 4), (7, 5, 4, 6), + (7, 4, 6, 5), (6, 4, 7, 5), (6, 7, 5, 4), (4, 6, 5, 7), + (4, 7, 5, 6), (5, 4, 6, 7), (5, 7, 4, 6), (6, 7, 4, 5), + (6, 5, 4, 7), (4, 7, 6, 5), (4, 5, 6, 7), (7, 6, 4, 5), + (5, 4, 7, 6), (5, 6, 7, 4), (6, 4, 5, 7), (7, 5, 6, 4)) + + r = np.random.rand(10, 4) + for b in bins: + H, edges = histogramdd(r, b) + assert_(H.shape == b) + + def test_weights(self): + v = np.random.rand(100, 2) + hist, edges = histogramdd(v) + n_hist, edges = histogramdd(v, density=True) + w_hist, edges = histogramdd(v, weights=np.ones(100)) + assert_array_equal(w_hist, hist) + w_hist, edges = histogramdd(v, weights=np.ones(100) * 2, density=True) + assert_array_equal(w_hist, n_hist) + w_hist, edges = histogramdd(v, weights=np.ones(100, int) * 2) + assert_array_equal(w_hist, 2 * hist) + + def test_identical_samples(self): + x = np.zeros((10, 2), int) + hist, edges = histogramdd(x, bins=2) + assert_array_equal(edges[0], np.array([-0.5, 0., 0.5])) + + def test_empty(self): + a, b = histogramdd([[], []], bins=([0, 1], [0, 1])) + assert_array_max_ulp(a, np.array([[0.]])) + a, b = np.histogramdd([[], [], []], bins=2) + assert_array_max_ulp(a, np.zeros((2, 2, 2))) + + def test_bins_errors(self): + # There are two ways to specify bins. Check for the right errors + # when mixing those. + x = np.arange(8).reshape(2, 4) + assert_raises(ValueError, np.histogramdd, x, bins=[-1, 2, 4, 5]) + assert_raises(ValueError, np.histogramdd, x, bins=[1, 0.99, 1, 1]) + assert_raises( + ValueError, np.histogramdd, x, bins=[1, 1, 1, [1, 2, 3, -3]]) + assert_(np.histogramdd(x, bins=[1, 1, 1, [1, 2, 3, 4]])) + + def test_inf_edges(self): + # Test using +/-inf bin edges works. See #1788. + with np.errstate(invalid='ignore'): + x = np.arange(6).reshape(3, 2) + expected = np.array([[1, 0], [0, 1], [0, 1]]) + h, e = np.histogramdd(x, bins=[3, [-np.inf, 2, 10]]) + assert_allclose(h, expected) + h, e = np.histogramdd(x, bins=[3, np.array([-1, 2, np.inf])]) + assert_allclose(h, expected) + h, e = np.histogramdd(x, bins=[3, [-np.inf, 3, np.inf]]) + assert_allclose(h, expected) + + def test_rightmost_binedge(self): + # Test event very close to rightmost binedge. See Github issue #4266 + x = [0.9999999995] + bins = [[0., 0.5, 1.0]] + hist, _ = histogramdd(x, bins=bins) + assert_(hist[0] == 0.0) + assert_(hist[1] == 1.) + x = [1.0] + bins = [[0., 0.5, 1.0]] + hist, _ = histogramdd(x, bins=bins) + assert_(hist[0] == 0.0) + assert_(hist[1] == 1.) + x = [1.0000000001] + bins = [[0., 0.5, 1.0]] + hist, _ = histogramdd(x, bins=bins) + assert_(hist[0] == 0.0) + assert_(hist[1] == 0.0) + x = [1.0001] + bins = [[0., 0.5, 1.0]] + hist, _ = histogramdd(x, bins=bins) + assert_(hist[0] == 0.0) + assert_(hist[1] == 0.0) + + def test_finite_range(self): + vals = np.random.random((100, 3)) + histogramdd(vals, range=[[0.0, 1.0], [0.25, 0.75], [0.25, 0.5]]) + assert_raises(ValueError, histogramdd, vals, + range=[[0.0, 1.0], [0.25, 0.75], [0.25, np.inf]]) + assert_raises(ValueError, histogramdd, vals, + range=[[0.0, 1.0], [np.nan, 0.75], [0.25, 0.5]]) + + def test_equal_edges(self): + """ Test that adjacent entries in an edge array can be equal """ + x = np.array([0, 1, 2]) + y = np.array([0, 1, 2]) + x_edges = np.array([0, 2, 2]) + y_edges = 1 + hist, edges = histogramdd((x, y), bins=(x_edges, y_edges)) + + hist_expected = np.array([ + [2.], + [1.], # x == 2 falls in the final bin + ]) + assert_equal(hist, hist_expected) + + def test_edge_dtype(self): + """ Test that if an edge array is input, its type is preserved """ + x = np.array([0, 10, 20]) + y = x / 10 + x_edges = np.array([0, 5, 15, 20]) + y_edges = x_edges / 10 + hist, edges = histogramdd((x, y), bins=(x_edges, y_edges)) + + assert_equal(edges[0].dtype, x_edges.dtype) + assert_equal(edges[1].dtype, y_edges.dtype) + + def test_large_integers(self): + big = 2**60 # Too large to represent with a full precision float + + x = np.array([0], np.int64) + x_edges = np.array([-1, +1], np.int64) + y = big + x + y_edges = big + x_edges + + hist, edges = histogramdd((x, y), bins=(x_edges, y_edges)) + + assert_equal(hist[0, 0], 1) + + def test_density_non_uniform_2d(self): + # Defines the following grid: + # + # 0 2 8 + # 0+-+-----+ + # + | + + # + | + + # 6+-+-----+ + # 8+-+-----+ + x_edges = np.array([0, 2, 8]) + y_edges = np.array([0, 6, 8]) + relative_areas = np.array([ + [3, 9], + [1, 3]]) + + # ensure the number of points in each region is proportional to its area + x = np.array([1] + [1] * 3 + [7] * 3 + [7] * 9) + y = np.array([7] + [1] * 3 + [7] * 3 + [1] * 9) + + # sanity check that the above worked as intended + hist, edges = histogramdd((y, x), bins=(y_edges, x_edges)) + assert_equal(hist, relative_areas) + + # resulting histogram should be uniform, since counts and areas are proportional + hist, edges = histogramdd((y, x), bins=(y_edges, x_edges), density=True) + assert_equal(hist, 1 / (8 * 8)) + + def test_density_non_uniform_1d(self): + # compare to histogram to show the results are the same + v = np.arange(10) + bins = np.array([0, 1, 3, 6, 10]) + hist, edges = histogram(v, bins, density=True) + hist_dd, edges_dd = histogramdd((v,), (bins,), density=True) + assert_equal(hist, hist_dd) + assert_equal(edges, edges_dd[0]) diff --git a/local_packages/numpy/lib/tests/test_index_tricks.py b/local_packages/numpy/lib/tests/test_index_tricks.py new file mode 100644 index 0000000..81e47ec --- /dev/null +++ b/local_packages/numpy/lib/tests/test_index_tricks.py @@ -0,0 +1,693 @@ +import pytest + +import numpy as np +from numpy.lib._index_tricks_impl import ( + c_, + diag_indices, + diag_indices_from, + fill_diagonal, + index_exp, + ix_, + mgrid, + ndenumerate, + ndindex, + ogrid, + r_, + s_, +) +from numpy.testing import ( + assert_, + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, + assert_equal, + assert_raises, + assert_raises_regex, +) + + +class TestRavelUnravelIndex: + def test_basic(self): + assert_equal(np.unravel_index(2, (2, 2)), (1, 0)) + + # test that new shape argument works properly + assert_equal(np.unravel_index(indices=2, + shape=(2, 2)), + (1, 0)) + + # test that an invalid second keyword argument + # is properly handled, including the old name `dims`. + with assert_raises(TypeError): + np.unravel_index(indices=2, hape=(2, 2)) + + with assert_raises(TypeError): + np.unravel_index(2, hape=(2, 2)) + + with assert_raises(TypeError): + np.unravel_index(254, ims=(17, 94)) + + with assert_raises(TypeError): + np.unravel_index(254, dims=(17, 94)) + + assert_equal(np.ravel_multi_index((1, 0), (2, 2)), 2) + assert_equal(np.unravel_index(254, (17, 94)), (2, 66)) + assert_equal(np.ravel_multi_index((2, 66), (17, 94)), 254) + assert_raises(ValueError, np.unravel_index, -1, (2, 2)) + assert_raises(TypeError, np.unravel_index, 0.5, (2, 2)) + assert_raises(ValueError, np.unravel_index, 4, (2, 2)) + assert_raises(ValueError, np.ravel_multi_index, (-3, 1), (2, 2)) + assert_raises(ValueError, np.ravel_multi_index, (2, 1), (2, 2)) + assert_raises(ValueError, np.ravel_multi_index, (0, -3), (2, 2)) + assert_raises(ValueError, np.ravel_multi_index, (0, 2), (2, 2)) + assert_raises(TypeError, np.ravel_multi_index, (0.1, 0.), (2, 2)) + + assert_equal(np.unravel_index((2 * 3 + 1) * 6 + 4, (4, 3, 6)), [2, 1, 4]) + assert_equal( + np.ravel_multi_index([2, 1, 4], (4, 3, 6)), (2 * 3 + 1) * 6 + 4) + + arr = np.array([[3, 6, 6], [4, 5, 1]]) + assert_equal(np.ravel_multi_index(arr, (7, 6)), [22, 41, 37]) + assert_equal( + np.ravel_multi_index(arr, (7, 6), order='F'), [31, 41, 13]) + assert_equal( + np.ravel_multi_index(arr, (4, 6), mode='clip'), [22, 23, 19]) + assert_equal(np.ravel_multi_index(arr, (4, 4), mode=('clip', 'wrap')), + [12, 13, 13]) + assert_equal(np.ravel_multi_index((3, 1, 4, 1), (6, 7, 8, 9)), 1621) + + assert_equal(np.unravel_index(np.array([22, 41, 37]), (7, 6)), + [[3, 6, 6], [4, 5, 1]]) + assert_equal( + np.unravel_index(np.array([31, 41, 13]), (7, 6), order='F'), + [[3, 6, 6], [4, 5, 1]]) + assert_equal(np.unravel_index(1621, (6, 7, 8, 9)), [3, 1, 4, 1]) + + def test_empty_indices(self): + msg1 = 'indices must be integral: the provided empty sequence was' + msg2 = 'only int indices permitted' + assert_raises_regex(TypeError, msg1, np.unravel_index, [], (10, 3, 5)) + assert_raises_regex(TypeError, msg1, np.unravel_index, (), (10, 3, 5)) + assert_raises_regex(TypeError, msg2, np.unravel_index, np.array([]), + (10, 3, 5)) + assert_equal(np.unravel_index(np.array([], dtype=int), (10, 3, 5)), + [[], [], []]) + assert_raises_regex(TypeError, msg1, np.ravel_multi_index, ([], []), + (10, 3)) + assert_raises_regex(TypeError, msg1, np.ravel_multi_index, ([], ['abc']), + (10, 3)) + assert_raises_regex(TypeError, msg2, np.ravel_multi_index, + (np.array([]), np.array([])), (5, 3)) + assert_equal(np.ravel_multi_index( + (np.array([], dtype=int), np.array([], dtype=int)), (5, 3)), []) + assert_equal(np.ravel_multi_index(np.array([[], []], dtype=int), + (5, 3)), []) + + def test_big_indices(self): + # ravel_multi_index for big indices (issue #7546) + if np.intp == np.int64: + arr = ([1, 29], [3, 5], [3, 117], [19, 2], + [2379, 1284], [2, 2], [0, 1]) + assert_equal( + np.ravel_multi_index(arr, (41, 7, 120, 36, 2706, 8, 6)), + [5627771580, 117259570957]) + + # test unravel_index for big indices (issue #9538) + assert_raises(ValueError, np.unravel_index, 1, (2**32 - 1, 2**31 + 1)) + + # test overflow checking for too big array (issue #7546) + dummy_arr = ([0], [0]) + half_max = np.iinfo(np.intp).max // 2 + assert_equal( + np.ravel_multi_index(dummy_arr, (half_max, 2)), [0]) + assert_raises(ValueError, + np.ravel_multi_index, dummy_arr, (half_max + 1, 2)) + assert_equal( + np.ravel_multi_index(dummy_arr, (half_max, 2), order='F'), [0]) + assert_raises(ValueError, + np.ravel_multi_index, dummy_arr, (half_max + 1, 2), order='F') + + def test_dtypes(self): + # Test with different data types + for dtype in [np.int16, np.uint16, np.int32, + np.uint32, np.int64, np.uint64]: + coords = np.array( + [[1, 0, 1, 2, 3, 4], [1, 6, 1, 3, 2, 0]], dtype=dtype) + shape = (5, 8) + uncoords = 8 * coords[0] + coords[1] + assert_equal(np.ravel_multi_index(coords, shape), uncoords) + assert_equal(coords, np.unravel_index(uncoords, shape)) + uncoords = coords[0] + 5 * coords[1] + assert_equal( + np.ravel_multi_index(coords, shape, order='F'), uncoords) + assert_equal(coords, np.unravel_index(uncoords, shape, order='F')) + + coords = np.array( + [[1, 0, 1, 2, 3, 4], [1, 6, 1, 3, 2, 0], [1, 3, 1, 0, 9, 5]], + dtype=dtype) + shape = (5, 8, 10) + uncoords = 10 * (8 * coords[0] + coords[1]) + coords[2] + assert_equal(np.ravel_multi_index(coords, shape), uncoords) + assert_equal(coords, np.unravel_index(uncoords, shape)) + uncoords = coords[0] + 5 * (coords[1] + 8 * coords[2]) + assert_equal( + np.ravel_multi_index(coords, shape, order='F'), uncoords) + assert_equal(coords, np.unravel_index(uncoords, shape, order='F')) + + def test_clipmodes(self): + # Test clipmodes + assert_equal( + np.ravel_multi_index([5, 1, -1, 2], (4, 3, 7, 12), mode='wrap'), + np.ravel_multi_index([1, 1, 6, 2], (4, 3, 7, 12))) + assert_equal(np.ravel_multi_index([5, 1, -1, 2], (4, 3, 7, 12), + mode=( + 'wrap', 'raise', 'clip', 'raise')), + np.ravel_multi_index([1, 1, 0, 2], (4, 3, 7, 12))) + assert_raises( + ValueError, np.ravel_multi_index, [5, 1, -1, 2], (4, 3, 7, 12)) + + def test_writeability(self): + # gh-7269 + x, y = np.unravel_index([1, 2, 3], (4, 5)) + assert_(x.flags.writeable) + assert_(y.flags.writeable) + + def test_0d(self): + # gh-580 + x = np.unravel_index(0, ()) + assert_equal(x, ()) + + assert_raises_regex(ValueError, "0d array", np.unravel_index, [0], ()) + assert_raises_regex( + ValueError, "out of bounds", np.unravel_index, [1], ()) + + @pytest.mark.parametrize("mode", ["clip", "wrap", "raise"]) + def test_empty_array_ravel(self, mode): + res = np.ravel_multi_index( + np.zeros((3, 0), dtype=np.intp), (2, 1, 0), mode=mode) + assert res.shape == (0,) + + with assert_raises(ValueError): + np.ravel_multi_index( + np.zeros((3, 1), dtype=np.intp), (2, 1, 0), mode=mode) + + def test_empty_array_unravel(self): + res = np.unravel_index(np.zeros(0, dtype=np.intp), (2, 1, 0)) + # res is a tuple of three empty arrays + assert len(res) == 3 + assert all(a.shape == (0,) for a in res) + + with assert_raises(ValueError): + np.unravel_index([1], (2, 1, 0)) + + def test_regression_size_1_index(self): + # actually tests the nditer size one index tracking + # regression test for gh-29690 + np.unravel_index(np.array([[1, 0, 1, 0]], dtype=np.uint32), (4,)) + +class TestGrid: + def test_basic(self): + a = mgrid[-1:1:10j] + b = mgrid[-1:1:0.1] + assert_(a.shape == (10,)) + assert_(b.shape == (20,)) + assert_(a[0] == -1) + assert_almost_equal(a[-1], 1) + assert_(b[0] == -1) + assert_almost_equal(b[1] - b[0], 0.1, 11) + assert_almost_equal(b[-1], b[0] + 19 * 0.1, 11) + assert_almost_equal(a[1] - a[0], 2.0 / 9.0, 11) + + def test_linspace_equivalence(self): + y, st = np.linspace(2, 10, retstep=True) + assert_almost_equal(st, 8 / 49.0) + assert_array_almost_equal(y, mgrid[2:10:50j], 13) + + def test_nd(self): + c = mgrid[-1:1:10j, -2:2:10j] + d = mgrid[-1:1:0.1, -2:2:0.2] + assert_(c.shape == (2, 10, 10)) + assert_(d.shape == (2, 20, 20)) + assert_array_equal(c[0][0, :], -np.ones(10, 'd')) + assert_array_equal(c[1][:, 0], -2 * np.ones(10, 'd')) + assert_array_almost_equal(c[0][-1, :], np.ones(10, 'd'), 11) + assert_array_almost_equal(c[1][:, -1], 2 * np.ones(10, 'd'), 11) + assert_array_almost_equal(d[0, 1, :] - d[0, 0, :], + 0.1 * np.ones(20, 'd'), 11) + assert_array_almost_equal(d[1, :, 1] - d[1, :, 0], + 0.2 * np.ones(20, 'd'), 11) + + def test_sparse(self): + grid_full = mgrid[-1:1:10j, -2:2:10j] + grid_sparse = ogrid[-1:1:10j, -2:2:10j] + + # sparse grids can be made dense by broadcasting + grid_broadcast = np.broadcast_arrays(*grid_sparse) + for f, b in zip(grid_full, grid_broadcast): + assert_equal(f, b) + + @pytest.mark.parametrize("start, stop, step, expected", [ + (None, 10, 10j, (200, 10)), + (-10, 20, None, (1800, 30)), + ]) + def test_mgrid_size_none_handling(self, start, stop, step, expected): + # regression test None value handling for + # start and step values used by mgrid; + # internally, this aims to cover previously + # unexplored code paths in nd_grid() + grid = mgrid[start:stop:step, start:stop:step] + # need a smaller grid to explore one of the + # untested code paths + grid_small = mgrid[start:stop:step] + assert_equal(grid.size, expected[0]) + assert_equal(grid_small.size, expected[1]) + + def test_accepts_npfloating(self): + # regression test for #16466 + grid64 = mgrid[0.1:0.33:0.1, ] + grid32 = mgrid[np.float32(0.1):np.float32(0.33):np.float32(0.1), ] + assert_array_almost_equal(grid64, grid32) + # At some point this was float64, but NEP 50 changed it: + assert grid32.dtype == np.float32 + assert grid64.dtype == np.float64 + + # different code path for single slice + grid64 = mgrid[0.1:0.33:0.1] + grid32 = mgrid[np.float32(0.1):np.float32(0.33):np.float32(0.1)] + assert_(grid32.dtype == np.float64) + assert_array_almost_equal(grid64, grid32) + + def test_accepts_longdouble(self): + # regression tests for #16945 + grid64 = mgrid[0.1:0.33:0.1, ] + grid128 = mgrid[ + np.longdouble(0.1):np.longdouble(0.33):np.longdouble(0.1), + ] + assert_(grid128.dtype == np.longdouble) + assert_array_almost_equal(grid64, grid128) + + grid128c_a = mgrid[0:np.longdouble(1):3.4j] + grid128c_b = mgrid[0:np.longdouble(1):3.4j, ] + assert_(grid128c_a.dtype == grid128c_b.dtype == np.longdouble) + assert_array_equal(grid128c_a, grid128c_b[0]) + + # different code path for single slice + grid64 = mgrid[0.1:0.33:0.1] + grid128 = mgrid[ + np.longdouble(0.1):np.longdouble(0.33):np.longdouble(0.1) + ] + assert_(grid128.dtype == np.longdouble) + assert_array_almost_equal(grid64, grid128) + + def test_accepts_npcomplexfloating(self): + # Related to #16466 + assert_array_almost_equal( + mgrid[0.1:0.3:3j, ], mgrid[0.1:0.3:np.complex64(3j), ] + ) + + # different code path for single slice + assert_array_almost_equal( + mgrid[0.1:0.3:3j], mgrid[0.1:0.3:np.complex64(3j)] + ) + + # Related to #16945 + grid64_a = mgrid[0.1:0.3:3.3j] + grid64_b = mgrid[0.1:0.3:3.3j, ][0] + assert_(grid64_a.dtype == grid64_b.dtype == np.float64) + assert_array_equal(grid64_a, grid64_b) + + grid128_a = mgrid[0.1:0.3:np.clongdouble(3.3j)] + grid128_b = mgrid[0.1:0.3:np.clongdouble(3.3j), ][0] + assert_(grid128_a.dtype == grid128_b.dtype == np.longdouble) + assert_array_equal(grid64_a, grid64_b) + + +class TestConcatenator: + def test_1d(self): + assert_array_equal(r_[1, 2, 3, 4, 5, 6], np.array([1, 2, 3, 4, 5, 6])) + b = np.ones(5) + c = r_[b, 0, 0, b] + assert_array_equal(c, [1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1]) + + def test_mixed_type(self): + g = r_[10.1, 1:10] + assert_(g.dtype == 'f8') + + def test_more_mixed_type(self): + g = r_[-10.1, np.array([1]), np.array([2, 3, 4]), 10.0] + assert_(g.dtype == 'f8') + + def test_complex_step(self): + # Regression test for #12262 + g = r_[0:36:100j] + assert_(g.shape == (100,)) + + # Related to #16466 + g = r_[0:36:np.complex64(100j)] + assert_(g.shape == (100,)) + + def test_2d(self): + b = np.random.rand(5, 5) + c = np.random.rand(5, 5) + d = r_['1', b, c] # append columns + assert_(d.shape == (5, 10)) + assert_array_equal(d[:, :5], b) + assert_array_equal(d[:, 5:], c) + d = r_[b, c] + assert_(d.shape == (10, 5)) + assert_array_equal(d[:5, :], b) + assert_array_equal(d[5:, :], c) + + def test_0d(self): + assert_equal(r_[0, np.array(1), 2], [0, 1, 2]) + assert_equal(r_[[0, 1, 2], np.array(3)], [0, 1, 2, 3]) + assert_equal(r_[np.array(0), [1, 2, 3]], [0, 1, 2, 3]) + + +class TestNdenumerate: + def test_basic(self): + a = np.array([[1, 2], [3, 4]]) + assert_equal(list(ndenumerate(a)), + [((0, 0), 1), ((0, 1), 2), ((1, 0), 3), ((1, 1), 4)]) + + +class TestIndexExpression: + def test_regression_1(self): + # ticket #1196 + a = np.arange(2) + assert_equal(a[:-1], a[s_[:-1]]) + assert_equal(a[:-1], a[index_exp[:-1]]) + + def test_simple_1(self): + a = np.random.rand(4, 5, 6) + + assert_equal(a[:, :3, [1, 2]], a[index_exp[:, :3, [1, 2]]]) + assert_equal(a[:, :3, [1, 2]], a[s_[:, :3, [1, 2]]]) + + +class TestIx_: + def test_regression_1(self): + # Test empty untyped inputs create outputs of indexing type, gh-5804 + a, = np.ix_(range(0)) + assert_equal(a.dtype, np.intp) + + a, = np.ix_([]) + assert_equal(a.dtype, np.intp) + + # but if the type is specified, don't change it + a, = np.ix_(np.array([], dtype=np.float32)) + assert_equal(a.dtype, np.float32) + + def test_shape_and_dtype(self): + sizes = (4, 5, 3, 2) + # Test both lists and arrays + for func in (range, np.arange): + arrays = np.ix_(*[func(sz) for sz in sizes]) + for k, (a, sz) in enumerate(zip(arrays, sizes)): + assert_equal(a.shape[k], sz) + assert_(all(sh == 1 for j, sh in enumerate(a.shape) if j != k)) + assert_(np.issubdtype(a.dtype, np.integer)) + + def test_bool(self): + bool_a = [True, False, True, True] + int_a, = np.nonzero(bool_a) + assert_equal(np.ix_(bool_a)[0], int_a) + + def test_1d_only(self): + idx2d = [[1, 2, 3], [4, 5, 6]] + assert_raises(ValueError, np.ix_, idx2d) + + def test_repeated_input(self): + length_of_vector = 5 + x = np.arange(length_of_vector) + out = ix_(x, x) + assert_equal(out[0].shape, (length_of_vector, 1)) + assert_equal(out[1].shape, (1, length_of_vector)) + # check that input shape is not modified + assert_equal(x.shape, (length_of_vector,)) + + +def test_c_(): + a = c_[np.array([[1, 2, 3]]), 0, 0, np.array([[4, 5, 6]])] + assert_equal(a, [[1, 2, 3, 0, 0, 4, 5, 6]]) + + +class TestFillDiagonal: + def test_basic(self): + a = np.zeros((3, 3), int) + fill_diagonal(a, 5) + assert_array_equal( + a, np.array([[5, 0, 0], + [0, 5, 0], + [0, 0, 5]]) + ) + + def test_tall_matrix(self): + a = np.zeros((10, 3), int) + fill_diagonal(a, 5) + assert_array_equal( + a, np.array([[5, 0, 0], + [0, 5, 0], + [0, 0, 5], + [0, 0, 0], + [0, 0, 0], + [0, 0, 0], + [0, 0, 0], + [0, 0, 0], + [0, 0, 0], + [0, 0, 0]]) + ) + + def test_tall_matrix_wrap(self): + a = np.zeros((10, 3), int) + fill_diagonal(a, 5, True) + assert_array_equal( + a, np.array([[5, 0, 0], + [0, 5, 0], + [0, 0, 5], + [0, 0, 0], + [5, 0, 0], + [0, 5, 0], + [0, 0, 5], + [0, 0, 0], + [5, 0, 0], + [0, 5, 0]]) + ) + + def test_wide_matrix(self): + a = np.zeros((3, 10), int) + fill_diagonal(a, 5) + assert_array_equal( + a, np.array([[5, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 5, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 5, 0, 0, 0, 0, 0, 0, 0]]) + ) + + def test_operate_4d_array(self): + a = np.zeros((3, 3, 3, 3), int) + fill_diagonal(a, 4) + i = np.array([0, 1, 2]) + assert_equal(np.where(a != 0), (i, i, i, i)) + + def test_low_dim_handling(self): + # raise error with low dimensionality + a = np.zeros(3, int) + with assert_raises_regex(ValueError, "at least 2-d"): + fill_diagonal(a, 5) + + def test_hetero_shape_handling(self): + # raise error with high dimensionality and + # shape mismatch + a = np.zeros((3, 3, 7, 3), int) + with assert_raises_regex(ValueError, "equal length"): + fill_diagonal(a, 2) + + +def test_diag_indices(): + di = diag_indices(4) + a = np.array([[1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12], + [13, 14, 15, 16]]) + a[di] = 100 + assert_array_equal( + a, np.array([[100, 2, 3, 4], + [5, 100, 7, 8], + [9, 10, 100, 12], + [13, 14, 15, 100]]) + ) + + # Now, we create indices to manipulate a 3-d array: + d3 = diag_indices(2, 3) + + # And use it to set the diagonal of a zeros array to 1: + a = np.zeros((2, 2, 2), int) + a[d3] = 1 + assert_array_equal( + a, np.array([[[1, 0], + [0, 0]], + [[0, 0], + [0, 1]]]) + ) + + +class TestDiagIndicesFrom: + + def test_diag_indices_from(self): + x = np.random.random((4, 4)) + r, c = diag_indices_from(x) + assert_array_equal(r, np.arange(4)) + assert_array_equal(c, np.arange(4)) + + def test_error_small_input(self): + x = np.ones(7) + with assert_raises_regex(ValueError, "at least 2-d"): + diag_indices_from(x) + + def test_error_shape_mismatch(self): + x = np.zeros((3, 3, 2, 3), int) + with assert_raises_regex(ValueError, "equal length"): + diag_indices_from(x) + + +def test_ndindex(): + x = list(ndindex(1, 2, 3)) + expected = [ix for ix, e in ndenumerate(np.zeros((1, 2, 3)))] + assert_array_equal(x, expected) + + x = list(ndindex((1, 2, 3))) + assert_array_equal(x, expected) + + # Test use of scalars and tuples + x = list(ndindex((3,))) + assert_array_equal(x, list(ndindex(3))) + + # Make sure size argument is optional + x = list(ndindex()) + assert_equal(x, [()]) + + x = list(ndindex(())) + assert_equal(x, [()]) + + # Make sure 0-sized ndindex works correctly + x = list(ndindex(*[0])) + assert_equal(x, []) + + +def test_ndindex_zero_dimensions_explicit(): + """Test ndindex produces empty iterators for explicit + zero-length dimensions.""" + assert list(np.ndindex(0, 3)) == [] + assert list(np.ndindex(3, 0, 2)) == [] + assert list(np.ndindex(0)) == [] + + +@pytest.mark.parametrize("bad_shape", [2.5, "2", [2, 3], (2.0, 3)]) +def test_ndindex_non_integer_dimensions(bad_shape): + """Test that non-integer dimensions raise TypeError.""" + with pytest.raises(TypeError): + # Passing invalid_shape_arg directly to ndindex. It will try to use it + # as a dimension and should trigger a TypeError. + list(np.ndindex(bad_shape)) + + +def test_ndindex_stop_iteration_behavior(): + """Test that StopIteration is raised properly after exhaustion.""" + it = np.ndindex(2, 2) + # Exhaust the iterator + list(it) + # Should raise StopIteration on subsequent calls + with pytest.raises(StopIteration): + next(it) + + +def test_ndindex_iterator_independence(): + """Test that each ndindex instance creates independent iterators.""" + shape = (2, 3) + iter1 = np.ndindex(*shape) + iter2 = np.ndindex(*shape) + + next(iter1) + next(iter1) + + assert_equal(next(iter2), (0, 0)) + assert_equal(next(iter1), (0, 2)) + + +def test_ndindex_tuple_vs_args_consistency(): + """Test that ndindex(shape) and ndindex(*shape) produce same results.""" + # Single dimension + assert_equal(list(np.ndindex(5)), list(np.ndindex((5,)))) + + # Multiple dimensions + assert_equal(list(np.ndindex(2, 3)), list(np.ndindex((2, 3)))) + + # Complex shape + shape = (2, 1, 4) + assert_equal(list(np.ndindex(*shape)), list(np.ndindex(shape))) + + +def test_ndindex_against_ndenumerate_compatibility(): + """Test ndindex produces same indices as ndenumerate.""" + for shape in [(1, 2, 3), (3,), (2, 2), ()]: + ndindex_result = list(np.ndindex(shape)) + ndenumerate_indices = [ix for ix, _ in np.ndenumerate(np.zeros(shape))] + assert_array_equal(ndindex_result, ndenumerate_indices) + + +def test_ndindex_multidimensional_correctness(): + """Test ndindex produces correct indices for multidimensional arrays.""" + shape = (2, 1, 3) + result = list(np.ndindex(*shape)) + expected = [ + (0, 0, 0), + (0, 0, 1), + (0, 0, 2), + (1, 0, 0), + (1, 0, 1), + (1, 0, 2), + ] + assert_equal(result, expected) + + +def test_ndindex_large_dimensions_behavior(): + """Test ndindex behaves correctly when initialized with large dimensions.""" + large_shape = (1000, 1000) + iter_obj = np.ndindex(*large_shape) + first_element = next(iter_obj) + assert_equal(first_element, (0, 0)) + + +def test_ndindex_empty_iterator_behavior(): + """Test detailed behavior of empty iterators.""" + empty_iter = np.ndindex(0, 5) + assert_equal(list(empty_iter), []) + + empty_iter2 = np.ndindex(3, 0, 2) + with pytest.raises(StopIteration): + next(empty_iter2) + + +@pytest.mark.parametrize( + "negative_shape_arg", + [ + (-1,), # Single negative dimension + (2, -3, 4), # Negative dimension in the middle + (5, 0, -2), # Mix of valid (0) and invalid (negative) dimensions + ], +) +def test_ndindex_negative_dimensions(negative_shape_arg): + """Test that negative dimensions raise ValueError.""" + with pytest.raises(ValueError): + ndindex(negative_shape_arg) + + +def test_ndindex_empty_shape(): + import numpy as np + # ndindex() and ndindex(()) should return a single empty tuple + assert list(np.ndindex()) == [()] + assert list(np.ndindex(())) == [()] + +def test_ndindex_negative_dim_raises(): + # ndindex(-1) should raise a ValueError + with pytest.raises(ValueError): + list(np.ndindex(-1)) diff --git a/local_packages/numpy/lib/tests/test_io.py b/local_packages/numpy/lib/tests/test_io.py new file mode 100644 index 0000000..5ba634b --- /dev/null +++ b/local_packages/numpy/lib/tests/test_io.py @@ -0,0 +1,2857 @@ +import gc +import gzip +import locale +import os +import re +import sys +import threading +import time +import warnings +import zipfile +from ctypes import c_bool +from datetime import datetime +from io import BytesIO, StringIO +from multiprocessing import Value, get_context +from pathlib import Path +from tempfile import NamedTemporaryFile + +import pytest + +import numpy as np +import numpy.ma as ma +from numpy._utils import asbytes +from numpy.exceptions import VisibleDeprecationWarning +from numpy.lib import _npyio_impl +from numpy.lib._iotools import ConversionWarning, ConverterError +from numpy.lib._npyio_impl import recfromcsv, recfromtxt +from numpy.ma.testutils import assert_equal +from numpy.testing import ( + HAS_REFCOUNT, + IS_PYPY, + IS_WASM, + assert_, + assert_allclose, + assert_array_equal, + assert_no_gc_cycles, + assert_no_warnings, + assert_raises, + assert_raises_regex, + break_cycles, + tempdir, + temppath, +) +from numpy.testing._private.utils import requires_memory + + +class TextIO(BytesIO): + """Helper IO class. + + Writes encode strings to bytes if needed, reads return bytes. + This makes it easier to emulate files opened in binary mode + without needing to explicitly convert strings to bytes in + setting up the test data. + + """ + def __init__(self, s=""): + BytesIO.__init__(self, asbytes(s)) + + def write(self, s): + BytesIO.write(self, asbytes(s)) + + def writelines(self, lines): + BytesIO.writelines(self, [asbytes(s) for s in lines]) + + +IS_64BIT = sys.maxsize > 2**32 +try: + import bz2 + HAS_BZ2 = True +except ImportError: + HAS_BZ2 = False +try: + import lzma + HAS_LZMA = True +except ImportError: + HAS_LZMA = False + + +def strptime(s, fmt=None): + """ + This function is available in the datetime module only from Python >= + 2.5. + + """ + if isinstance(s, bytes): + s = s.decode("latin1") + return datetime(*time.strptime(s, fmt)[:3]) + + +class RoundtripTest: + def roundtrip(self, save_func, *args, **kwargs): + """ + save_func : callable + Function used to save arrays to file. + file_on_disk : bool + If true, store the file on disk, instead of in a + string buffer. + save_kwds : dict + Parameters passed to `save_func`. + load_kwds : dict + Parameters passed to `numpy.load`. + args : tuple of arrays + Arrays stored to file. + + """ + save_kwds = kwargs.get('save_kwds', {}) + load_kwds = kwargs.get('load_kwds', {"allow_pickle": True}) + file_on_disk = kwargs.get('file_on_disk', False) + + if file_on_disk: + target_file = NamedTemporaryFile(delete=False) + load_file = target_file.name + else: + target_file = BytesIO() + load_file = target_file + + try: + arr = args + + save_func(target_file, *arr, **save_kwds) + target_file.flush() + target_file.seek(0) + + if sys.platform == 'win32' and not isinstance(target_file, BytesIO): + target_file.close() + + arr_reloaded = np.load(load_file, **load_kwds) + + finally: + if not isinstance(target_file, BytesIO): + target_file.close() + # holds an open file descriptor so it can't be deleted on win + if 'arr_reloaded' in locals(): + if not isinstance(arr_reloaded, np.lib.npyio.NpzFile): + os.remove(target_file.name) + + return arr, arr_reloaded + + def check_roundtrips(self, a): + self.roundtrip(a) + self.roundtrip(a, file_on_disk=True) + self.roundtrip(np.asfortranarray(a)) + self.roundtrip(np.asfortranarray(a), file_on_disk=True) + if a.shape[0] > 1: + # neither C nor Fortran contiguous for 2D arrays or more + self.roundtrip(np.asfortranarray(a)[1:]) + self.roundtrip(np.asfortranarray(a)[1:], file_on_disk=True) + + def test_array(self): + a = np.array([], float) + self.check_roundtrips(a) + + a = np.array([[1, 2], [3, 4]], float) + self.check_roundtrips(a) + + a = np.array([[1, 2], [3, 4]], int) + self.check_roundtrips(a) + + a = np.array([[1 + 5j, 2 + 6j], [3 + 7j, 4 + 8j]], dtype=np.csingle) + self.check_roundtrips(a) + + a = np.array([[1 + 5j, 2 + 6j], [3 + 7j, 4 + 8j]], dtype=np.cdouble) + self.check_roundtrips(a) + + def test_array_object(self): + a = np.array([], object) + self.check_roundtrips(a) + + a = np.array([[1, 2], [3, 4]], object) + self.check_roundtrips(a) + + def test_1D(self): + a = np.array([1, 2, 3, 4], int) + self.roundtrip(a) + + @pytest.mark.skipif(sys.platform == 'win32', reason="Fails on Win32") + def test_mmap(self): + a = np.array([[1, 2.5], [4, 7.3]]) + self.roundtrip(a, file_on_disk=True, load_kwds={'mmap_mode': 'r'}) + + a = np.asfortranarray([[1, 2.5], [4, 7.3]]) + self.roundtrip(a, file_on_disk=True, load_kwds={'mmap_mode': 'r'}) + + def test_record(self): + a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')]) + self.check_roundtrips(a) + + @pytest.mark.slow + def test_format_2_0(self): + dt = [(("%d" % i) * 100, float) for i in range(500)] + a = np.ones(1000, dtype=dt) + with warnings.catch_warnings(record=True): + warnings.filterwarnings('always', '', UserWarning) + self.check_roundtrips(a) + + +class TestSaveLoad(RoundtripTest): + def roundtrip(self, *args, **kwargs): + arr, arr_reloaded = RoundtripTest.roundtrip(self, np.save, *args, **kwargs) + assert_equal(arr[0], arr_reloaded) + assert_equal(arr[0].dtype, arr_reloaded.dtype) + assert_equal(arr[0].flags.fnc, arr_reloaded.flags.fnc) + + +class TestSavezLoad(RoundtripTest): + def roundtrip(self, *args, **kwargs): + arr, arr_reloaded = RoundtripTest.roundtrip(self, np.savez, *args, **kwargs) + try: + for n, a in enumerate(arr): + reloaded = arr_reloaded['arr_%d' % n] + assert_equal(a, reloaded) + assert_equal(a.dtype, reloaded.dtype) + assert_equal(a.flags.fnc, reloaded.flags.fnc) + finally: + # delete tempfile, must be done here on windows + if arr_reloaded.fid: + arr_reloaded.fid.close() + os.remove(arr_reloaded.fid.name) + + def test_load_non_npy(self): + """Test loading non-.npy files and name mapping in .npz.""" + with temppath(prefix="numpy_test_npz_load_non_npy_", suffix=".npz") as tmp: + with zipfile.ZipFile(tmp, "w") as npz: + with npz.open("test1.npy", "w") as out_file: + np.save(out_file, np.arange(10)) + with npz.open("test2", "w") as out_file: + np.save(out_file, np.arange(10)) + with npz.open("metadata", "w") as out_file: + out_file.write(b"Name: Test") + with np.load(tmp) as npz: + assert len(npz["test1"]) == 10 + assert len(npz["test1.npy"]) == 10 + assert len(npz["test2"]) == 10 + assert npz["metadata"] == b"Name: Test" + + @pytest.mark.skipif(IS_PYPY, reason="Hangs on PyPy") + @pytest.mark.skipif(not IS_64BIT, reason="Needs 64bit platform") + @pytest.mark.slow + @pytest.mark.thread_unsafe(reason="crashes with low memory") + def test_big_arrays(self): + L = (1 << 31) + 100000 + a = np.empty(L, dtype=np.uint8) + with temppath(prefix="numpy_test_big_arrays_", suffix=".npz") as tmp: + np.savez(tmp, a=a) + del a + npfile = np.load(tmp) + a = npfile['a'] # Should succeed + npfile.close() + + def test_multiple_arrays(self): + a = np.array([[1, 2], [3, 4]], float) + b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex) + self.roundtrip(a, b) + + def test_named_arrays(self): + a = np.array([[1, 2], [3, 4]], float) + b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex) + c = BytesIO() + np.savez(c, file_a=a, file_b=b) + c.seek(0) + l = np.load(c) + assert_equal(a, l['file_a']) + assert_equal(b, l['file_b']) + + def test_tuple_getitem_raises(self): + # gh-23748 + a = np.array([1, 2, 3]) + f = BytesIO() + np.savez(f, a=a) + f.seek(0) + l = np.load(f) + with pytest.raises(KeyError, match="(1, 2)"): + l[1, 2] + + def test_BagObj(self): + a = np.array([[1, 2], [3, 4]], float) + b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex) + c = BytesIO() + np.savez(c, file_a=a, file_b=b) + c.seek(0) + l = np.load(c) + assert_equal(sorted(dir(l.f)), ['file_a', 'file_b']) + assert_equal(a, l.f.file_a) + assert_equal(b, l.f.file_b) + + @pytest.mark.skipif(IS_WASM, reason="Cannot start thread") + def test_savez_filename_clashes(self): + # Test that issue #852 is fixed + # and savez functions in multithreaded environment + + def writer(error_list): + with temppath(suffix='.npz') as tmp: + arr = np.random.randn(500, 500) + try: + np.savez(tmp, arr=arr) + except OSError as err: + error_list.append(err) + + errors = [] + threads = [threading.Thread(target=writer, args=(errors,)) + for j in range(3)] + for t in threads: + t.start() + for t in threads: + t.join() + + if errors: + raise AssertionError(errors) + + def test_not_closing_opened_fid(self): + # Test that issue #2178 is fixed: + # verify could seek on 'loaded' file + with temppath(suffix='.npz') as tmp: + with open(tmp, 'wb') as fp: + np.savez(fp, data='LOVELY LOAD') + with open(tmp, 'rb', 10000) as fp: + fp.seek(0) + assert_(not fp.closed) + np.load(fp)['data'] + # fp must not get closed by .load + assert_(not fp.closed) + fp.seek(0) + assert_(not fp.closed) + + @pytest.mark.slow_pypy + def test_closing_fid(self): + # Test that issue #1517 (too many opened files) remains closed + # It might be a "weak" test since failed to get triggered on + # e.g. Debian sid of 2012 Jul 05 but was reported to + # trigger the failure on Ubuntu 10.04: + # http://projects.scipy.org/numpy/ticket/1517#comment:2 + with temppath(suffix='.npz') as tmp: + np.savez(tmp, data='LOVELY LOAD') + # We need to check if the garbage collector can properly close + # numpy npz file returned by np.load when their reference count + # goes to zero. Python running in debug mode raises a + # ResourceWarning when file closing is left to the garbage + # collector, so we catch the warnings. + with warnings.catch_warnings(): + # TODO: specify exact message + warnings.simplefilter('ignore', ResourceWarning) + for i in range(1, 1025): + try: + np.load(tmp)["data"] + except Exception as e: + msg = f"Failed to load data from a file: {e}" + raise AssertionError(msg) + finally: + if IS_PYPY: + gc.collect() + + def test_closing_zipfile_after_load(self): + # Check that zipfile owns file and can close it. This needs to + # pass a file name to load for the test. On windows failure will + # cause a second error will be raised when the attempt to remove + # the open file is made. + prefix = 'numpy_test_closing_zipfile_after_load_' + with temppath(suffix='.npz', prefix=prefix) as tmp: + np.savez(tmp, lab='place holder') + data = np.load(tmp) + fp = data.zip.fp + data.close() + assert_(fp.closed) + + @pytest.mark.parametrize("count, expected_repr", [ + (1, "NpzFile {fname!r} with keys: arr_0"), + (5, "NpzFile {fname!r} with keys: arr_0, arr_1, arr_2, arr_3, arr_4"), + # _MAX_REPR_ARRAY_COUNT is 5, so files with more than 5 keys are + # expected to end in '...' + (6, "NpzFile {fname!r} with keys: arr_0, arr_1, arr_2, arr_3, arr_4..."), + ]) + def test_repr_lists_keys(self, count, expected_repr): + a = np.array([[1, 2], [3, 4]], float) + with temppath(suffix='.npz') as tmp: + np.savez(tmp, *[a] * count) + l = np.load(tmp) + assert repr(l) == expected_repr.format(fname=tmp) + l.close() + + +class TestSaveTxt: + def test_array(self): + a = np.array([[1, 2], [3, 4]], float) + fmt = "%.18e" + c = BytesIO() + np.savetxt(c, a, fmt=fmt) + c.seek(0) + assert_equal(c.readlines(), + [asbytes((fmt + ' ' + fmt + '\n') % (1, 2)), + asbytes((fmt + ' ' + fmt + '\n') % (3, 4))]) + + a = np.array([[1, 2], [3, 4]], int) + c = BytesIO() + np.savetxt(c, a, fmt='%d') + c.seek(0) + assert_equal(c.readlines(), [b'1 2\n', b'3 4\n']) + + def test_1D(self): + a = np.array([1, 2, 3, 4], int) + c = BytesIO() + np.savetxt(c, a, fmt='%d') + c.seek(0) + lines = c.readlines() + assert_equal(lines, [b'1\n', b'2\n', b'3\n', b'4\n']) + + def test_0D_3D(self): + c = BytesIO() + assert_raises(ValueError, np.savetxt, c, np.array(1)) + assert_raises(ValueError, np.savetxt, c, np.array([[[1], [2]]])) + + def test_structured(self): + a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')]) + c = BytesIO() + np.savetxt(c, a, fmt='%d') + c.seek(0) + assert_equal(c.readlines(), [b'1 2\n', b'3 4\n']) + + def test_structured_padded(self): + # gh-13297 + a = np.array([(1, 2, 3), (4, 5, 6)], dtype=[ + ('foo', 'i4'), ('bar', 'i4'), ('baz', 'i4') + ]) + c = BytesIO() + np.savetxt(c, a[['foo', 'baz']], fmt='%d') + c.seek(0) + assert_equal(c.readlines(), [b'1 3\n', b'4 6\n']) + + def test_multifield_view(self): + a = np.ones(1, dtype=[('x', 'i4'), ('y', 'i4'), ('z', 'f4')]) + v = a[['x', 'z']] + with temppath(suffix='.npy') as path: + path = Path(path) + np.save(path, v) + data = np.load(path) + assert_array_equal(data, v) + + def test_delimiter(self): + a = np.array([[1., 2.], [3., 4.]]) + c = BytesIO() + np.savetxt(c, a, delimiter=',', fmt='%d') + c.seek(0) + assert_equal(c.readlines(), [b'1,2\n', b'3,4\n']) + + def test_format(self): + a = np.array([(1, 2), (3, 4)]) + c = BytesIO() + # Sequence of formats + np.savetxt(c, a, fmt=['%02d', '%3.1f']) + c.seek(0) + assert_equal(c.readlines(), [b'01 2.0\n', b'03 4.0\n']) + + # A single multiformat string + c = BytesIO() + np.savetxt(c, a, fmt='%02d : %3.1f') + c.seek(0) + lines = c.readlines() + assert_equal(lines, [b'01 : 2.0\n', b'03 : 4.0\n']) + + # Specify delimiter, should be overridden + c = BytesIO() + np.savetxt(c, a, fmt='%02d : %3.1f', delimiter=',') + c.seek(0) + lines = c.readlines() + assert_equal(lines, [b'01 : 2.0\n', b'03 : 4.0\n']) + + # Bad fmt, should raise a ValueError + c = BytesIO() + assert_raises(ValueError, np.savetxt, c, a, fmt=99) + + def test_header_footer(self): + # Test the functionality of the header and footer keyword argument. + + c = BytesIO() + a = np.array([(1, 2), (3, 4)], dtype=int) + test_header_footer = 'Test header / footer' + # Test the header keyword argument + np.savetxt(c, a, fmt='%1d', header=test_header_footer) + c.seek(0) + assert_equal(c.read(), + asbytes('# ' + test_header_footer + '\n1 2\n3 4\n')) + # Test the footer keyword argument + c = BytesIO() + np.savetxt(c, a, fmt='%1d', footer=test_header_footer) + c.seek(0) + assert_equal(c.read(), + asbytes('1 2\n3 4\n# ' + test_header_footer + '\n')) + # Test the commentstr keyword argument used on the header + c = BytesIO() + commentstr = '% ' + np.savetxt(c, a, fmt='%1d', + header=test_header_footer, comments=commentstr) + c.seek(0) + assert_equal(c.read(), + asbytes(commentstr + test_header_footer + '\n' + '1 2\n3 4\n')) + # Test the commentstr keyword argument used on the footer + c = BytesIO() + commentstr = '% ' + np.savetxt(c, a, fmt='%1d', + footer=test_header_footer, comments=commentstr) + c.seek(0) + assert_equal(c.read(), + asbytes('1 2\n3 4\n' + commentstr + test_header_footer + '\n')) + + @pytest.mark.parametrize("filename_type", [Path, str]) + def test_file_roundtrip(self, filename_type): + with temppath() as name: + a = np.array([(1, 2), (3, 4)]) + np.savetxt(filename_type(name), a) + b = np.loadtxt(filename_type(name)) + assert_array_equal(a, b) + + def test_complex_arrays(self): + ncols = 2 + nrows = 2 + a = np.zeros((ncols, nrows), dtype=np.complex128) + re = np.pi + im = np.e + a[:] = re + 1.0j * im + + # One format only + c = BytesIO() + np.savetxt(c, a, fmt=' %+.3e') + c.seek(0) + lines = c.readlines() + assert_equal( + lines, + [b' ( +3.142e+00+ +2.718e+00j) ( +3.142e+00+ +2.718e+00j)\n', + b' ( +3.142e+00+ +2.718e+00j) ( +3.142e+00+ +2.718e+00j)\n']) + + # One format for each real and imaginary part + c = BytesIO() + np.savetxt(c, a, fmt=' %+.3e' * 2 * ncols) + c.seek(0) + lines = c.readlines() + assert_equal( + lines, + [b' +3.142e+00 +2.718e+00 +3.142e+00 +2.718e+00\n', + b' +3.142e+00 +2.718e+00 +3.142e+00 +2.718e+00\n']) + + # One format for each complex number + c = BytesIO() + np.savetxt(c, a, fmt=['(%.3e%+.3ej)'] * ncols) + c.seek(0) + lines = c.readlines() + assert_equal( + lines, + [b'(3.142e+00+2.718e+00j) (3.142e+00+2.718e+00j)\n', + b'(3.142e+00+2.718e+00j) (3.142e+00+2.718e+00j)\n']) + + def test_complex_negative_exponent(self): + # Previous to 1.15, some formats generated x+-yj, gh 7895 + ncols = 2 + nrows = 2 + a = np.zeros((ncols, nrows), dtype=np.complex128) + re = np.pi + im = np.e + a[:] = re - 1.0j * im + c = BytesIO() + np.savetxt(c, a, fmt='%.3e') + c.seek(0) + lines = c.readlines() + assert_equal( + lines, + [b' (3.142e+00-2.718e+00j) (3.142e+00-2.718e+00j)\n', + b' (3.142e+00-2.718e+00j) (3.142e+00-2.718e+00j)\n']) + + def test_custom_writer(self): + + class CustomWriter(list): + def write(self, text): + self.extend(text.split(b'\n')) + + w = CustomWriter() + a = np.array([(1, 2), (3, 4)]) + np.savetxt(w, a) + b = np.loadtxt(w) + assert_array_equal(a, b) + + def test_unicode(self): + utf8 = b'\xcf\x96'.decode('UTF-8') + a = np.array([utf8], dtype=np.str_) + with tempdir() as tmpdir: + # set encoding as on windows it may not be unicode even on py3 + np.savetxt(os.path.join(tmpdir, 'test.csv'), a, fmt=['%s'], + encoding='UTF-8') + + def test_unicode_roundtrip(self): + utf8 = b'\xcf\x96'.decode('UTF-8') + a = np.array([utf8], dtype=np.str_) + # our gz wrapper support encoding + suffixes = ['', '.gz'] + if HAS_BZ2: + suffixes.append('.bz2') + if HAS_LZMA: + suffixes.extend(['.xz', '.lzma']) + with tempdir() as tmpdir: + for suffix in suffixes: + np.savetxt(os.path.join(tmpdir, 'test.csv' + suffix), a, + fmt=['%s'], encoding='UTF-16-LE') + b = np.loadtxt(os.path.join(tmpdir, 'test.csv' + suffix), + encoding='UTF-16-LE', dtype=np.str_) + assert_array_equal(a, b) + + def test_unicode_bytestream(self): + utf8 = b'\xcf\x96'.decode('UTF-8') + a = np.array([utf8], dtype=np.str_) + s = BytesIO() + np.savetxt(s, a, fmt=['%s'], encoding='UTF-8') + s.seek(0) + assert_equal(s.read().decode('UTF-8'), utf8 + '\n') + + def test_unicode_stringstream(self): + utf8 = b'\xcf\x96'.decode('UTF-8') + a = np.array([utf8], dtype=np.str_) + s = StringIO() + np.savetxt(s, a, fmt=['%s'], encoding='UTF-8') + s.seek(0) + assert_equal(s.read(), utf8 + '\n') + + @pytest.mark.parametrize("iotype", [StringIO, BytesIO]) + def test_unicode_and_bytes_fmt(self, iotype): + # string type of fmt should not matter, see also gh-4053 + a = np.array([1.]) + s = iotype() + np.savetxt(s, a, fmt="%f") + s.seek(0) + if iotype is StringIO: + assert_equal(s.read(), "%f\n" % 1.) + else: + assert_equal(s.read(), b"%f\n" % 1.) + + @pytest.mark.skipif(sys.platform == 'win32', reason="files>4GB may not work") + @pytest.mark.slow + @requires_memory(free_bytes=7e9) + @pytest.mark.thread_unsafe(reason="crashes with low memory") + def test_large_zip(self): + def check_large_zip(memoryerror_raised): + memoryerror_raised.value = False + try: + # The test takes at least 6GB of memory, writes a file larger + # than 4GB. This tests the ``allowZip64`` kwarg to ``zipfile`` + test_data = np.asarray([np.random.rand( + np.random.randint(50, 100), 4) + for i in range(800000)], dtype=object) + with tempdir() as tmpdir: + np.savez(os.path.join(tmpdir, 'test.npz'), + test_data=test_data) + except MemoryError: + memoryerror_raised.value = True + raise + # run in a subprocess to ensure memory is released on PyPy, see gh-15775 + # Use an object in shared memory to re-raise the MemoryError exception + # in our process if needed, see gh-16889 + memoryerror_raised = Value(c_bool) + + # Since Python 3.8, the default start method for multiprocessing has + # been changed from 'fork' to 'spawn' on macOS, causing inconsistency + # on memory sharing model, leading to failed test for check_large_zip + ctx = get_context('fork') + p = ctx.Process(target=check_large_zip, args=(memoryerror_raised,)) + p.start() + p.join() + if memoryerror_raised.value: + raise MemoryError("Child process raised a MemoryError exception") + # -9 indicates a SIGKILL, probably an OOM. + if p.exitcode == -9: + msg = "subprocess got a SIGKILL, apparently free memory was not sufficient" + pytest.xfail(msg) + assert p.exitcode == 0 + +class LoadTxtBase: + def check_compressed(self, fopen, suffixes): + # Test that we can load data from a compressed file + wanted = np.arange(6).reshape((2, 3)) + linesep = ('\n', '\r\n', '\r') + for sep in linesep: + data = '0 1 2' + sep + '3 4 5' + for suffix in suffixes: + with temppath(suffix=suffix) as name: + with fopen(name, mode='wt', encoding='UTF-32-LE') as f: + f.write(data) + res = self.loadfunc(name, encoding='UTF-32-LE') + assert_array_equal(res, wanted) + with fopen(name, "rt", encoding='UTF-32-LE') as f: + res = self.loadfunc(f) + assert_array_equal(res, wanted) + + def test_compressed_gzip(self): + self.check_compressed(gzip.open, ('.gz',)) + + @pytest.mark.skipif(not HAS_BZ2, reason="Needs bz2") + def test_compressed_bz2(self): + self.check_compressed(bz2.open, ('.bz2',)) + + @pytest.mark.skipif(not HAS_LZMA, reason="Needs lzma") + def test_compressed_lzma(self): + self.check_compressed(lzma.open, ('.xz', '.lzma')) + + def test_encoding(self): + with temppath() as path: + with open(path, "wb") as f: + f.write('0.\n1.\n2.'.encode("UTF-16")) + x = self.loadfunc(path, encoding="UTF-16") + assert_array_equal(x, [0., 1., 2.]) + + def test_stringload(self): + # umlaute + nonascii = b'\xc3\xb6\xc3\xbc\xc3\xb6'.decode("UTF-8") + with temppath() as path: + with open(path, "wb") as f: + f.write(nonascii.encode("UTF-16")) + x = self.loadfunc(path, encoding="UTF-16", dtype=np.str_) + assert_array_equal(x, nonascii) + + def test_binary_decode(self): + utf16 = b'\xff\xfeh\x04 \x00i\x04 \x00j\x04' + v = self.loadfunc(BytesIO(utf16), dtype=np.str_, encoding='UTF-16') + assert_array_equal(v, np.array(utf16.decode('UTF-16').split())) + + def test_converters_decode(self): + # test converters that decode strings + c = TextIO() + c.write(b'\xcf\x96') + c.seek(0) + x = self.loadfunc(c, dtype=np.str_, encoding="bytes", + converters={0: lambda x: x.decode('UTF-8')}) + a = np.array([b'\xcf\x96'.decode('UTF-8')]) + assert_array_equal(x, a) + + def test_converters_nodecode(self): + # test native string converters enabled by setting an encoding + utf8 = b'\xcf\x96'.decode('UTF-8') + with temppath() as path: + with open(path, 'wt', encoding='UTF-8') as f: + f.write(utf8) + x = self.loadfunc(path, dtype=np.str_, + converters={0: lambda x: x + 't'}, + encoding='UTF-8') + a = np.array([utf8 + 't']) + assert_array_equal(x, a) + + +class TestLoadTxt(LoadTxtBase): + loadfunc = staticmethod(np.loadtxt) + + def setup_method(self): + # lower chunksize for testing + self.orig_chunk = _npyio_impl._loadtxt_chunksize + _npyio_impl._loadtxt_chunksize = 1 + + def teardown_method(self): + _npyio_impl._loadtxt_chunksize = self.orig_chunk + + def test_record(self): + c = TextIO() + c.write('1 2\n3 4') + c.seek(0) + x = np.loadtxt(c, dtype=[('x', np.int32), ('y', np.int32)]) + a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')]) + assert_array_equal(x, a) + + d = TextIO() + d.write('M 64 75.0\nF 25 60.0') + d.seek(0) + mydescriptor = {'names': ('gender', 'age', 'weight'), + 'formats': ('S1', 'i4', 'f4')} + b = np.array([('M', 64.0, 75.0), + ('F', 25.0, 60.0)], dtype=mydescriptor) + y = np.loadtxt(d, dtype=mydescriptor) + assert_array_equal(y, b) + + def test_array(self): + c = TextIO() + c.write('1 2\n3 4') + + c.seek(0) + x = np.loadtxt(c, dtype=int) + a = np.array([[1, 2], [3, 4]], int) + assert_array_equal(x, a) + + c.seek(0) + x = np.loadtxt(c, dtype=float) + a = np.array([[1, 2], [3, 4]], float) + assert_array_equal(x, a) + + def test_1D(self): + c = TextIO() + c.write('1\n2\n3\n4\n') + c.seek(0) + x = np.loadtxt(c, dtype=int) + a = np.array([1, 2, 3, 4], int) + assert_array_equal(x, a) + + c = TextIO() + c.write('1,2,3,4\n') + c.seek(0) + x = np.loadtxt(c, dtype=int, delimiter=',') + a = np.array([1, 2, 3, 4], int) + assert_array_equal(x, a) + + def test_missing(self): + c = TextIO() + c.write('1,2,3,,5\n') + c.seek(0) + x = np.loadtxt(c, dtype=int, delimiter=',', + converters={3: lambda s: int(s or - 999)}) + a = np.array([1, 2, 3, -999, 5], int) + assert_array_equal(x, a) + + def test_converters_with_usecols(self): + c = TextIO() + c.write('1,2,3,,5\n6,7,8,9,10\n') + c.seek(0) + x = np.loadtxt(c, dtype=int, delimiter=',', + converters={3: lambda s: int(s or - 999)}, + usecols=(1, 3,)) + a = np.array([[2, -999], [7, 9]], int) + assert_array_equal(x, a) + + def test_comments_unicode(self): + c = TextIO() + c.write('# comment\n1,2,3,5\n') + c.seek(0) + x = np.loadtxt(c, dtype=int, delimiter=',', + comments='#') + a = np.array([1, 2, 3, 5], int) + assert_array_equal(x, a) + + def test_comments_byte(self): + c = TextIO() + c.write('# comment\n1,2,3,5\n') + c.seek(0) + x = np.loadtxt(c, dtype=int, delimiter=',', + comments=b'#') + a = np.array([1, 2, 3, 5], int) + assert_array_equal(x, a) + + def test_comments_multiple(self): + c = TextIO() + c.write('# comment\n1,2,3\n@ comment2\n4,5,6 // comment3') + c.seek(0) + x = np.loadtxt(c, dtype=int, delimiter=',', + comments=['#', '@', '//']) + a = np.array([[1, 2, 3], [4, 5, 6]], int) + assert_array_equal(x, a) + + @pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), + reason="PyPy bug in error formatting") + def test_comments_multi_chars(self): + c = TextIO() + c.write('/* comment\n1,2,3,5\n') + c.seek(0) + x = np.loadtxt(c, dtype=int, delimiter=',', + comments='/*') + a = np.array([1, 2, 3, 5], int) + assert_array_equal(x, a) + + # Check that '/*' is not transformed to ['/', '*'] + c = TextIO() + c.write('*/ comment\n1,2,3,5\n') + c.seek(0) + assert_raises(ValueError, np.loadtxt, c, dtype=int, delimiter=',', + comments='/*') + + def test_skiprows(self): + c = TextIO() + c.write('comment\n1,2,3,5\n') + c.seek(0) + x = np.loadtxt(c, dtype=int, delimiter=',', + skiprows=1) + a = np.array([1, 2, 3, 5], int) + assert_array_equal(x, a) + + c = TextIO() + c.write('# comment\n1,2,3,5\n') + c.seek(0) + x = np.loadtxt(c, dtype=int, delimiter=',', + skiprows=1) + a = np.array([1, 2, 3, 5], int) + assert_array_equal(x, a) + + def test_usecols(self): + a = np.array([[1, 2], [3, 4]], float) + c = BytesIO() + np.savetxt(c, a) + c.seek(0) + x = np.loadtxt(c, dtype=float, usecols=(1,)) + assert_array_equal(x, a[:, 1]) + + a = np.array([[1, 2, 3], [3, 4, 5]], float) + c = BytesIO() + np.savetxt(c, a) + c.seek(0) + x = np.loadtxt(c, dtype=float, usecols=(1, 2)) + assert_array_equal(x, a[:, 1:]) + + # Testing with arrays instead of tuples. + c.seek(0) + x = np.loadtxt(c, dtype=float, usecols=np.array([1, 2])) + assert_array_equal(x, a[:, 1:]) + + # Testing with an integer instead of a sequence + for int_type in [int, np.int8, np.int16, + np.int32, np.int64, np.uint8, np.uint16, + np.uint32, np.uint64]: + to_read = int_type(1) + c.seek(0) + x = np.loadtxt(c, dtype=float, usecols=to_read) + assert_array_equal(x, a[:, 1]) + + # Testing with some crazy custom integer type + class CrazyInt: + def __index__(self): + return 1 + + crazy_int = CrazyInt() + c.seek(0) + x = np.loadtxt(c, dtype=float, usecols=crazy_int) + assert_array_equal(x, a[:, 1]) + + c.seek(0) + x = np.loadtxt(c, dtype=float, usecols=(crazy_int,)) + assert_array_equal(x, a[:, 1]) + + # Checking with dtypes defined converters. + data = '''JOE 70.1 25.3 + BOB 60.5 27.9 + ''' + c = TextIO(data) + names = ['stid', 'temp'] + dtypes = ['S4', 'f8'] + arr = np.loadtxt(c, usecols=(0, 2), dtype=list(zip(names, dtypes))) + assert_equal(arr['stid'], [b"JOE", b"BOB"]) + assert_equal(arr['temp'], [25.3, 27.9]) + + # Testing non-ints in usecols + c.seek(0) + bogus_idx = 1.5 + assert_raises_regex( + TypeError, + f'^usecols must be.*{type(bogus_idx).__name__}', + np.loadtxt, c, usecols=bogus_idx + ) + + assert_raises_regex( + TypeError, + f'^usecols must be.*{type(bogus_idx).__name__}', + np.loadtxt, c, usecols=[0, bogus_idx, 0] + ) + + def test_bad_usecols(self): + with pytest.raises(OverflowError): + np.loadtxt(["1\n"], usecols=[2**64], delimiter=",") + with pytest.raises((ValueError, OverflowError)): + # Overflow error on 32bit platforms + np.loadtxt(["1\n"], usecols=[2**62], delimiter=",") + with pytest.raises(TypeError, + match="If a structured dtype .*. But 1 usecols were given and " + "the number of fields is 3."): + np.loadtxt(["1,1\n"], dtype="i,2i", usecols=[0], delimiter=",") + + def test_fancy_dtype(self): + c = TextIO() + c.write('1,2,3.0\n4,5,6.0\n') + c.seek(0) + dt = np.dtype([('x', int), ('y', [('t', int), ('s', float)])]) + x = np.loadtxt(c, dtype=dt, delimiter=',') + a = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dt) + assert_array_equal(x, a) + + def test_shaped_dtype(self): + c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6") + dt = np.dtype([('name', 'S4'), ('x', float), ('y', float), + ('block', int, (2, 3))]) + x = np.loadtxt(c, dtype=dt) + a = np.array([('aaaa', 1.0, 8.0, [[1, 2, 3], [4, 5, 6]])], + dtype=dt) + assert_array_equal(x, a) + + def test_3d_shaped_dtype(self): + c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6 7 8 9 10 11 12") + dt = np.dtype([('name', 'S4'), ('x', float), ('y', float), + ('block', int, (2, 2, 3))]) + x = np.loadtxt(c, dtype=dt) + a = np.array([('aaaa', 1.0, 8.0, + [[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]])], + dtype=dt) + assert_array_equal(x, a) + + def test_str_dtype(self): + # see gh-8033 + c = ["str1", "str2"] + + for dt in (str, np.bytes_): + a = np.array(["str1", "str2"], dtype=dt) + x = np.loadtxt(c, dtype=dt) + assert_array_equal(x, a) + + def test_empty_file(self): + with pytest.warns(UserWarning, match="input contained no data"): + c = TextIO() + x = np.loadtxt(c) + assert_equal(x.shape, (0,)) + x = np.loadtxt(c, dtype=np.int64) + assert_equal(x.shape, (0,)) + assert_(x.dtype == np.int64) + + def test_unused_converter(self): + c = TextIO() + c.writelines(['1 21\n', '3 42\n']) + c.seek(0) + data = np.loadtxt(c, usecols=(1,), + converters={0: lambda s: int(s, 16)}) + assert_array_equal(data, [21, 42]) + + c.seek(0) + data = np.loadtxt(c, usecols=(1,), + converters={1: lambda s: int(s, 16)}) + assert_array_equal(data, [33, 66]) + + def test_dtype_with_object(self): + # Test using an explicit dtype with an object + data = """ 1; 2001-01-01 + 2; 2002-01-31 """ + ndtype = [('idx', int), ('code', object)] + func = lambda s: strptime(s.strip(), "%Y-%m-%d") + converters = {1: func} + test = np.loadtxt(TextIO(data), delimiter=";", dtype=ndtype, + converters=converters) + control = np.array( + [(1, datetime(2001, 1, 1)), (2, datetime(2002, 1, 31))], + dtype=ndtype) + assert_equal(test, control) + + def test_uint64_type(self): + tgt = (9223372043271415339, 9223372043271415853) + c = TextIO() + c.write("%s %s" % tgt) + c.seek(0) + res = np.loadtxt(c, dtype=np.uint64) + assert_equal(res, tgt) + + def test_int64_type(self): + tgt = (-9223372036854775807, 9223372036854775807) + c = TextIO() + c.write("%s %s" % tgt) + c.seek(0) + res = np.loadtxt(c, dtype=np.int64) + assert_equal(res, tgt) + + def test_from_float_hex(self): + # IEEE doubles and floats only, otherwise the float32 + # conversion may fail. + tgt = np.logspace(-10, 10, 5).astype(np.float32) + tgt = np.hstack((tgt, -tgt)).astype(float) + inp = '\n'.join(map(float.hex, tgt)) + c = TextIO() + c.write(inp) + for dt in [float, np.float32]: + c.seek(0) + res = np.loadtxt( + c, dtype=dt, converters=float.fromhex, encoding="latin1") + assert_equal(res, tgt, err_msg=f"{dt}") + + @pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), + reason="PyPy bug in error formatting") + def test_default_float_converter_no_default_hex_conversion(self): + """ + Ensure that fromhex is only used for values with the correct prefix and + is not called by default. Regression test related to gh-19598. + """ + c = TextIO("a b c") + with pytest.raises(ValueError, + match=".*convert string 'a' to float64 at row 0, column 1"): + np.loadtxt(c) + + @pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), + reason="PyPy bug in error formatting") + def test_default_float_converter_exception(self): + """ + Ensure that the exception message raised during failed floating point + conversion is correct. Regression test related to gh-19598. + """ + c = TextIO("qrs tuv") # Invalid values for default float converter + with pytest.raises(ValueError, + match="could not convert string 'qrs' to float64"): + np.loadtxt(c) + + def test_from_complex(self): + tgt = (complex(1, 1), complex(1, -1)) + c = TextIO() + c.write("%s %s" % tgt) + c.seek(0) + res = np.loadtxt(c, dtype=complex) + assert_equal(res, tgt) + + def test_complex_misformatted(self): + # test for backward compatibility + # some complex formats used to generate x+-yj + a = np.zeros((2, 2), dtype=np.complex128) + re = np.pi + im = np.e + a[:] = re - 1.0j * im + c = BytesIO() + np.savetxt(c, a, fmt='%.16e') + c.seek(0) + txt = c.read() + c.seek(0) + # misformat the sign on the imaginary part, gh 7895 + txt_bad = txt.replace(b'e+00-', b'e00+-') + assert_(txt_bad != txt) + c.write(txt_bad) + c.seek(0) + res = np.loadtxt(c, dtype=complex) + assert_equal(res, a) + + def test_universal_newline(self): + with temppath() as name: + with open(name, 'w') as f: + f.write('1 21\r3 42\r') + data = np.loadtxt(name) + assert_array_equal(data, [[1, 21], [3, 42]]) + + def test_empty_field_after_tab(self): + c = TextIO() + c.write('1 \t2 \t3\tstart \n4\t5\t6\t \n7\t8\t9.5\t') + c.seek(0) + dt = {'names': ('x', 'y', 'z', 'comment'), + 'formats': (' num rows + c = TextIO() + c.write('comment\n1,2,3,5\n4,5,7,8\n2,1,4,5') + c.seek(0) + x = np.loadtxt(c, dtype=int, delimiter=',', + skiprows=1, max_rows=6) + a = np.array([[1, 2, 3, 5], [4, 5, 7, 8], [2, 1, 4, 5]], int) + assert_array_equal(x, a) + + @pytest.mark.parametrize(["skip", "data"], [ + (1, ["ignored\n", "1,2\n", "\n", "3,4\n"]), + # "Bad" lines that do not end in newlines: + (1, ["ignored", "1,2", "", "3,4"]), + (1, lambda: StringIO("ignored\n1,2\n\n3,4")), + # Same as above, but do not skip any lines: + (0, ["-1,0\n", "1,2\n", "\n", "3,4\n"]), + (0, ["-1,0", "1,2", "", "3,4"]), + (0, lambda: StringIO("-1,0\n1,2\n\n3,4"))]) + def test_max_rows_empty_lines(self, skip, data): + # gh-26718 re-instantiate StringIO objects each time + if callable(data): + data = data() + + with pytest.warns(UserWarning, + match=f"Input line 3.*max_rows={3 - skip}"): + res = np.loadtxt(data, dtype=int, skiprows=skip, delimiter=",", + max_rows=3 - skip) + assert_array_equal(res, [[-1, 0], [1, 2], [3, 4]][skip:]) + + if isinstance(data, StringIO): + data.seek(0) + + with warnings.catch_warnings(): + warnings.simplefilter("error", UserWarning) + with pytest.raises(UserWarning): + np.loadtxt(data, dtype=int, skiprows=skip, delimiter=",", + max_rows=3 - skip) + +class Testfromregex: + def test_record(self): + c = TextIO() + c.write('1.312 foo\n1.534 bar\n4.444 qux') + c.seek(0) + + dt = [('num', np.float64), ('val', 'S3')] + x = np.fromregex(c, r"([0-9.]+)\s+(...)", dt) + a = np.array([(1.312, 'foo'), (1.534, 'bar'), (4.444, 'qux')], + dtype=dt) + assert_array_equal(x, a) + + def test_record_2(self): + c = TextIO() + c.write('1312 foo\n1534 bar\n4444 qux') + c.seek(0) + + dt = [('num', np.int32), ('val', 'S3')] + x = np.fromregex(c, r"(\d+)\s+(...)", dt) + a = np.array([(1312, 'foo'), (1534, 'bar'), (4444, 'qux')], + dtype=dt) + assert_array_equal(x, a) + + def test_record_3(self): + c = TextIO() + c.write('1312 foo\n1534 bar\n4444 qux') + c.seek(0) + + dt = [('num', np.float64)] + x = np.fromregex(c, r"(\d+)\s+...", dt) + a = np.array([(1312,), (1534,), (4444,)], dtype=dt) + assert_array_equal(x, a) + + @pytest.mark.parametrize("path_type", [str, Path]) + def test_record_unicode(self, path_type): + utf8 = b'\xcf\x96' + with temppath() as str_path: + path = path_type(str_path) + with open(path, 'wb') as f: + f.write(b'1.312 foo' + utf8 + b' \n1.534 bar\n4.444 qux') + + dt = [('num', np.float64), ('val', 'U4')] + x = np.fromregex(path, r"(?u)([0-9.]+)\s+(\w+)", dt, encoding='UTF-8') + a = np.array([(1.312, 'foo' + utf8.decode('UTF-8')), (1.534, 'bar'), + (4.444, 'qux')], dtype=dt) + assert_array_equal(x, a) + + regexp = re.compile(r"([0-9.]+)\s+(\w+)", re.UNICODE) + x = np.fromregex(path, regexp, dt, encoding='UTF-8') + assert_array_equal(x, a) + + def test_compiled_bytes(self): + regexp = re.compile(br'(\d)') + c = BytesIO(b'123') + dt = [('num', np.float64)] + a = np.array([1, 2, 3], dtype=dt) + x = np.fromregex(c, regexp, dt) + assert_array_equal(x, a) + + def test_bad_dtype_not_structured(self): + regexp = re.compile(br'(\d)') + c = BytesIO(b'123') + with pytest.raises(TypeError, match='structured datatype'): + np.fromregex(c, regexp, dtype=np.float64) + + +#####-------------------------------------------------------------------------- + + +class TestFromTxt(LoadTxtBase): + loadfunc = staticmethod(np.genfromtxt) + + def test_record(self): + # Test w/ explicit dtype + data = TextIO('1 2\n3 4') + test = np.genfromtxt(data, dtype=[('x', np.int32), ('y', np.int32)]) + control = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')]) + assert_equal(test, control) + # + data = TextIO('M 64.0 75.0\nF 25.0 60.0') + descriptor = {'names': ('gender', 'age', 'weight'), + 'formats': ('S1', 'i4', 'f4')} + control = np.array([('M', 64.0, 75.0), ('F', 25.0, 60.0)], + dtype=descriptor) + test = np.genfromtxt(data, dtype=descriptor) + assert_equal(test, control) + + def test_array(self): + # Test outputting a standard ndarray + data = TextIO('1 2\n3 4') + control = np.array([[1, 2], [3, 4]], dtype=int) + test = np.genfromtxt(data, dtype=int) + assert_array_equal(test, control) + # + data.seek(0) + control = np.array([[1, 2], [3, 4]], dtype=float) + test = np.loadtxt(data, dtype=float) + assert_array_equal(test, control) + + def test_1D(self): + # Test squeezing to 1D + control = np.array([1, 2, 3, 4], int) + # + data = TextIO('1\n2\n3\n4\n') + test = np.genfromtxt(data, dtype=int) + assert_array_equal(test, control) + # + data = TextIO('1,2,3,4\n') + test = np.genfromtxt(data, dtype=int, delimiter=',') + assert_array_equal(test, control) + + def test_comments(self): + # Test the stripping of comments + control = np.array([1, 2, 3, 5], int) + # Comment on its own line + data = TextIO('# comment\n1,2,3,5\n') + test = np.genfromtxt(data, dtype=int, delimiter=',', comments='#') + assert_equal(test, control) + # Comment at the end of a line + data = TextIO('1,2,3,5# comment\n') + test = np.genfromtxt(data, dtype=int, delimiter=',', comments='#') + assert_equal(test, control) + + def test_skiprows(self): + # Test row skipping + control = np.array([1, 2, 3, 5], int) + kwargs = {"dtype": int, "delimiter": ','} + # + data = TextIO('comment\n1,2,3,5\n') + test = np.genfromtxt(data, skip_header=1, **kwargs) + assert_equal(test, control) + # + data = TextIO('# comment\n1,2,3,5\n') + test = np.loadtxt(data, skiprows=1, **kwargs) + assert_equal(test, control) + + def test_skip_footer(self): + data = [f"# {i}" for i in range(1, 6)] + data.append("A, B, C") + data.extend([f"{i},{i:3.1f},{i:03d}" for i in range(51)]) + data[-1] = "99,99" + kwargs = {"delimiter": ",", "names": True, "skip_header": 5, "skip_footer": 10} + test = np.genfromtxt(TextIO("\n".join(data)), **kwargs) + ctrl = np.array([(f"{i:f}", f"{i:f}", f"{i:f}") for i in range(41)], + dtype=[(_, float) for _ in "ABC"]) + assert_equal(test, ctrl) + + def test_skip_footer_with_invalid(self): + with warnings.catch_warnings(): + warnings.simplefilter('ignore', ConversionWarning) + basestr = '1 1\n2 2\n3 3\n4 4\n5 \n6 \n7 \n' + # Footer too small to get rid of all invalid values + assert_raises(ValueError, np.genfromtxt, + TextIO(basestr), skip_footer=1) + # except ValueError: + # pass + a = np.genfromtxt( + TextIO(basestr), skip_footer=1, invalid_raise=False) + assert_equal(a, np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]])) + # + a = np.genfromtxt(TextIO(basestr), skip_footer=3) + assert_equal(a, np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]])) + # + basestr = '1 1\n2 \n3 3\n4 4\n5 \n6 6\n7 7\n' + a = np.genfromtxt( + TextIO(basestr), skip_footer=1, invalid_raise=False) + assert_equal(a, np.array([[1., 1.], [3., 3.], [4., 4.], [6., 6.]])) + a = np.genfromtxt( + TextIO(basestr), skip_footer=3, invalid_raise=False) + assert_equal(a, np.array([[1., 1.], [3., 3.], [4., 4.]])) + + def test_header(self): + # Test retrieving a header + data = TextIO('gender age weight\nM 64.0 75.0\nF 25.0 60.0') + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', VisibleDeprecationWarning) + test = np.genfromtxt(data, dtype=None, names=True, + encoding='bytes') + assert_(w[0].category is VisibleDeprecationWarning) + control = {'gender': np.array([b'M', b'F']), + 'age': np.array([64.0, 25.0]), + 'weight': np.array([75.0, 60.0])} + assert_equal(test['gender'], control['gender']) + assert_equal(test['age'], control['age']) + assert_equal(test['weight'], control['weight']) + + def test_auto_dtype(self): + # Test the automatic definition of the output dtype + data = TextIO('A 64 75.0 3+4j True\nBCD 25 60.0 5+6j False') + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', VisibleDeprecationWarning) + test = np.genfromtxt(data, dtype=None, encoding='bytes') + assert_(w[0].category is VisibleDeprecationWarning) + control = [np.array([b'A', b'BCD']), + np.array([64, 25]), + np.array([75.0, 60.0]), + np.array([3 + 4j, 5 + 6j]), + np.array([True, False]), ] + assert_equal(test.dtype.names, ['f0', 'f1', 'f2', 'f3', 'f4']) + for (i, ctrl) in enumerate(control): + assert_equal(test[f'f{i}'], ctrl) + + def test_auto_dtype_uniform(self): + # Tests whether the output dtype can be uniformized + data = TextIO('1 2 3 4\n5 6 7 8\n') + test = np.genfromtxt(data, dtype=None) + control = np.array([[1, 2, 3, 4], [5, 6, 7, 8]]) + assert_equal(test, control) + + def test_fancy_dtype(self): + # Check that a nested dtype isn't MIA + data = TextIO('1,2,3.0\n4,5,6.0\n') + fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])]) + test = np.genfromtxt(data, dtype=fancydtype, delimiter=',') + control = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=fancydtype) + assert_equal(test, control) + + def test_names_overwrite(self): + # Test overwriting the names of the dtype + descriptor = {'names': ('g', 'a', 'w'), + 'formats': ('S1', 'i4', 'f4')} + data = TextIO(b'M 64.0 75.0\nF 25.0 60.0') + names = ('gender', 'age', 'weight') + test = np.genfromtxt(data, dtype=descriptor, names=names) + descriptor['names'] = names + control = np.array([('M', 64.0, 75.0), + ('F', 25.0, 60.0)], dtype=descriptor) + assert_equal(test, control) + + def test_bad_fname(self): + with pytest.raises(TypeError, match='fname must be a string,'): + np.genfromtxt(123) + + def test_commented_header(self): + # Check that names can be retrieved even if the line is commented out. + data = TextIO(""" +#gender age weight +M 21 72.100000 +F 35 58.330000 +M 33 21.99 + """) + # The # is part of the first name and should be deleted automatically. + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', VisibleDeprecationWarning) + test = np.genfromtxt(data, names=True, dtype=None, + encoding="bytes") + assert_(w[0].category is VisibleDeprecationWarning) + ctrl = np.array([('M', 21, 72.1), ('F', 35, 58.33), ('M', 33, 21.99)], + dtype=[('gender', '|S1'), ('age', int), ('weight', float)]) + assert_equal(test, ctrl) + # Ditto, but we should get rid of the first element + data = TextIO(b""" +# gender age weight +M 21 72.100000 +F 35 58.330000 +M 33 21.99 + """) + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', VisibleDeprecationWarning) + test = np.genfromtxt(data, names=True, dtype=None, + encoding="bytes") + assert_(w[0].category is VisibleDeprecationWarning) + assert_equal(test, ctrl) + + def test_names_and_comments_none(self): + # Tests case when names is true but comments is None (gh-10780) + data = TextIO('col1 col2\n 1 2\n 3 4') + test = np.genfromtxt(data, dtype=(int, int), comments=None, names=True) + control = np.array([(1, 2), (3, 4)], dtype=[('col1', int), ('col2', int)]) + assert_equal(test, control) + + def test_file_is_closed_on_error(self): + # gh-13200 + with tempdir() as tmpdir: + fpath = os.path.join(tmpdir, "test.csv") + with open(fpath, "wb") as f: + f.write('\N{GREEK PI SYMBOL}'.encode()) + + # ResourceWarnings are emitted from a destructor, so won't be + # detected by regular propagation to errors. + with assert_no_warnings(): + with pytest.raises(UnicodeDecodeError): + np.genfromtxt(fpath, encoding="ascii") + + def test_autonames_and_usecols(self): + # Tests names and usecols + data = TextIO('A B C D\n aaaa 121 45 9.1') + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', VisibleDeprecationWarning) + test = np.genfromtxt(data, usecols=('A', 'C', 'D'), + names=True, dtype=None, encoding="bytes") + assert_(w[0].category is VisibleDeprecationWarning) + control = np.array(('aaaa', 45, 9.1), + dtype=[('A', '|S4'), ('C', int), ('D', float)]) + assert_equal(test, control) + + def test_converters_with_usecols(self): + # Test the combination user-defined converters and usecol + data = TextIO('1,2,3,,5\n6,7,8,9,10\n') + test = np.genfromtxt(data, dtype=int, delimiter=',', + converters={3: lambda s: int(s or - 999)}, + usecols=(1, 3,)) + control = np.array([[2, -999], [7, 9]], int) + assert_equal(test, control) + + def test_converters_with_usecols_and_names(self): + # Tests names and usecols + data = TextIO('A B C D\n aaaa 121 45 9.1') + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', VisibleDeprecationWarning) + test = np.genfromtxt(data, usecols=('A', 'C', 'D'), names=True, + dtype=None, encoding="bytes", + converters={'C': lambda s: 2 * int(s)}) + assert_(w[0].category is VisibleDeprecationWarning) + control = np.array(('aaaa', 90, 9.1), + dtype=[('A', '|S4'), ('C', int), ('D', float)]) + assert_equal(test, control) + + def test_converters_cornercases(self): + # Test the conversion to datetime. + converter = { + 'date': lambda s: strptime(s, '%Y-%m-%d %H:%M:%SZ')} + data = TextIO('2009-02-03 12:00:00Z, 72214.0') + test = np.genfromtxt(data, delimiter=',', dtype=None, + names=['date', 'stid'], converters=converter) + control = np.array((datetime(2009, 2, 3), 72214.), + dtype=[('date', np.object_), ('stid', float)]) + assert_equal(test, control) + + def test_converters_cornercases2(self): + # Test the conversion to datetime64. + converter = { + 'date': lambda s: np.datetime64(strptime(s, '%Y-%m-%d %H:%M:%SZ'))} + data = TextIO('2009-02-03 12:00:00Z, 72214.0') + test = np.genfromtxt(data, delimiter=',', dtype=None, + names=['date', 'stid'], converters=converter) + control = np.array((datetime(2009, 2, 3), 72214.), + dtype=[('date', 'datetime64[us]'), ('stid', float)]) + assert_equal(test, control) + + def test_unused_converter(self): + # Test whether unused converters are forgotten + data = TextIO("1 21\n 3 42\n") + test = np.genfromtxt(data, usecols=(1,), + converters={0: lambda s: int(s, 16)}) + assert_equal(test, [21, 42]) + # + data.seek(0) + test = np.genfromtxt(data, usecols=(1,), + converters={1: lambda s: int(s, 16)}) + assert_equal(test, [33, 66]) + + def test_invalid_converter(self): + strip_rand = lambda x: float((b'r' in x.lower() and x.split()[-1]) or + ((b'r' not in x.lower() and x.strip()) or 0.0)) + strip_per = lambda x: float((b'%' in x.lower() and x.split()[0]) or + ((b'%' not in x.lower() and x.strip()) or 0.0)) + s = TextIO("D01N01,10/1/2003 ,1 %,R 75,400,600\r\n" + "L24U05,12/5/2003, 2 %,1,300, 150.5\r\n" + "D02N03,10/10/2004,R 1,,7,145.55") + kwargs = { + "converters": {2: strip_per, 3: strip_rand}, "delimiter": ",", + "dtype": None, "encoding": "bytes"} + assert_raises(ConverterError, np.genfromtxt, s, **kwargs) + + def test_tricky_converter_bug1666(self): + # Test some corner cases + s = TextIO('q1,2\nq3,4') + cnv = lambda s: float(s[1:]) + test = np.genfromtxt(s, delimiter=',', converters={0: cnv}) + control = np.array([[1., 2.], [3., 4.]]) + assert_equal(test, control) + + def test_dtype_with_converters(self): + dstr = "2009; 23; 46" + test = np.genfromtxt(TextIO(dstr,), + delimiter=";", dtype=float, converters={0: bytes}) + control = np.array([('2009', 23., 46)], + dtype=[('f0', '|S4'), ('f1', float), ('f2', float)]) + assert_equal(test, control) + test = np.genfromtxt(TextIO(dstr,), + delimiter=";", dtype=float, converters={0: float}) + control = np.array([2009., 23., 46],) + assert_equal(test, control) + + @pytest.mark.filterwarnings("ignore:.*recfromcsv.*:DeprecationWarning") + def test_dtype_with_converters_and_usecols(self): + dstr = "1,5,-1,1:1\n2,8,-1,1:n\n3,3,-2,m:n\n" + dmap = {'1:1': 0, '1:n': 1, 'm:1': 2, 'm:n': 3} + dtyp = [('e1', 'i4'), ('e2', 'i4'), ('e3', 'i2'), ('n', 'i1')] + conv = {0: int, 1: int, 2: int, 3: lambda r: dmap[r.decode()]} + test = recfromcsv(TextIO(dstr,), dtype=dtyp, delimiter=',', + names=None, converters=conv, encoding="bytes") + control = np.rec.array([(1, 5, -1, 0), (2, 8, -1, 1), (3, 3, -2, 3)], + dtype=dtyp) + assert_equal(test, control) + dtyp = [('e1', 'i4'), ('e2', 'i4'), ('n', 'i1')] + test = recfromcsv(TextIO(dstr,), dtype=dtyp, delimiter=',', + usecols=(0, 1, 3), names=None, converters=conv, + encoding="bytes") + control = np.rec.array([(1, 5, 0), (2, 8, 1), (3, 3, 3)], dtype=dtyp) + assert_equal(test, control) + + def test_dtype_with_object(self): + # Test using an explicit dtype with an object + data = """ 1; 2001-01-01 + 2; 2002-01-31 """ + ndtype = [('idx', int), ('code', object)] + func = lambda s: strptime(s.strip(), "%Y-%m-%d") + converters = {1: func} + test = np.genfromtxt(TextIO(data), delimiter=";", dtype=ndtype, + converters=converters) + control = np.array( + [(1, datetime(2001, 1, 1)), (2, datetime(2002, 1, 31))], + dtype=ndtype) + assert_equal(test, control) + + ndtype = [('nest', [('idx', int), ('code', object)])] + with assert_raises_regex(NotImplementedError, + 'Nested fields.* not supported.*'): + test = np.genfromtxt(TextIO(data), delimiter=";", + dtype=ndtype, converters=converters) + + # nested but empty fields also aren't supported + ndtype = [('idx', int), ('code', object), ('nest', [])] + with assert_raises_regex(NotImplementedError, + 'Nested fields.* not supported.*'): + test = np.genfromtxt(TextIO(data), delimiter=";", + dtype=ndtype, converters=converters) + + def test_dtype_with_object_no_converter(self): + # Object without a converter uses bytes: + parsed = np.genfromtxt(TextIO("1"), dtype=object) + assert parsed[()] == b"1" + parsed = np.genfromtxt(TextIO("string"), dtype=object) + assert parsed[()] == b"string" + + def test_userconverters_with_explicit_dtype(self): + # Test user_converters w/ explicit (standard) dtype + data = TextIO('skip,skip,2001-01-01,1.0,skip') + test = np.genfromtxt(data, delimiter=",", names=None, dtype=float, + usecols=(2, 3), converters={2: bytes}) + control = np.array([('2001-01-01', 1.)], + dtype=[('', '|S10'), ('', float)]) + assert_equal(test, control) + + def test_utf8_userconverters_with_explicit_dtype(self): + utf8 = b'\xcf\x96' + with temppath() as path: + with open(path, 'wb') as f: + f.write(b'skip,skip,2001-01-01' + utf8 + b',1.0,skip') + test = np.genfromtxt(path, delimiter=",", names=None, dtype=float, + usecols=(2, 3), converters={2: str}, + encoding='UTF-8') + control = np.array([('2001-01-01' + utf8.decode('UTF-8'), 1.)], + dtype=[('', '|U11'), ('', float)]) + assert_equal(test, control) + + def test_spacedelimiter(self): + # Test space delimiter + data = TextIO("1 2 3 4 5\n6 7 8 9 10") + test = np.genfromtxt(data) + control = np.array([[1., 2., 3., 4., 5.], + [6., 7., 8., 9., 10.]]) + assert_equal(test, control) + + def test_integer_delimiter(self): + # Test using an integer for delimiter + data = " 1 2 3\n 4 5 67\n890123 4" + test = np.genfromtxt(TextIO(data), delimiter=3) + control = np.array([[1, 2, 3], [4, 5, 67], [890, 123, 4]]) + assert_equal(test, control) + + def test_missing(self): + data = TextIO('1,2,3,,5\n') + test = np.genfromtxt(data, dtype=int, delimiter=',', + converters={3: lambda s: int(s or - 999)}) + control = np.array([1, 2, 3, -999, 5], int) + assert_equal(test, control) + + def test_missing_with_tabs(self): + # Test w/ a delimiter tab + txt = "1\t2\t3\n\t2\t\n1\t\t3" + test = np.genfromtxt(TextIO(txt), delimiter="\t", + usemask=True,) + ctrl_d = np.array([(1, 2, 3), (np.nan, 2, np.nan), (1, np.nan, 3)],) + ctrl_m = np.array([(0, 0, 0), (1, 0, 1), (0, 1, 0)], dtype=bool) + assert_equal(test.data, ctrl_d) + assert_equal(test.mask, ctrl_m) + + def test_usecols(self): + # Test the selection of columns + # Select 1 column + control = np.array([[1, 2], [3, 4]], float) + data = TextIO() + np.savetxt(data, control) + data.seek(0) + test = np.genfromtxt(data, dtype=float, usecols=(1,)) + assert_equal(test, control[:, 1]) + # + control = np.array([[1, 2, 3], [3, 4, 5]], float) + data = TextIO() + np.savetxt(data, control) + data.seek(0) + test = np.genfromtxt(data, dtype=float, usecols=(1, 2)) + assert_equal(test, control[:, 1:]) + # Testing with arrays instead of tuples. + data.seek(0) + test = np.genfromtxt(data, dtype=float, usecols=np.array([1, 2])) + assert_equal(test, control[:, 1:]) + + def test_usecols_as_css(self): + # Test giving usecols with a comma-separated string + data = "1 2 3\n4 5 6" + test = np.genfromtxt(TextIO(data), + names="a, b, c", usecols="a, c") + ctrl = np.array([(1, 3), (4, 6)], dtype=[(_, float) for _ in "ac"]) + assert_equal(test, ctrl) + + def test_usecols_with_structured_dtype(self): + # Test usecols with an explicit structured dtype + data = TextIO("JOE 70.1 25.3\nBOB 60.5 27.9") + names = ['stid', 'temp'] + dtypes = ['S4', 'f8'] + test = np.genfromtxt( + data, usecols=(0, 2), dtype=list(zip(names, dtypes))) + assert_equal(test['stid'], [b"JOE", b"BOB"]) + assert_equal(test['temp'], [25.3, 27.9]) + + def test_usecols_with_integer(self): + # Test usecols with an integer + test = np.genfromtxt(TextIO(b"1 2 3\n4 5 6"), usecols=0) + assert_equal(test, np.array([1., 4.])) + + def test_usecols_with_named_columns(self): + # Test usecols with named columns + ctrl = np.array([(1, 3), (4, 6)], dtype=[('a', float), ('c', float)]) + data = "1 2 3\n4 5 6" + kwargs = {"names": "a, b, c"} + test = np.genfromtxt(TextIO(data), usecols=(0, -1), **kwargs) + assert_equal(test, ctrl) + test = np.genfromtxt(TextIO(data), + usecols=('a', 'c'), **kwargs) + assert_equal(test, ctrl) + + def test_empty_file(self): + # Test that an empty file raises the proper warning. + with warnings.catch_warnings(): + warnings.filterwarnings('ignore', message="genfromtxt: Empty input file:") + data = TextIO() + test = np.genfromtxt(data) + assert_equal(test, np.array([])) + + # when skip_header > 0 + test = np.genfromtxt(data, skip_header=1) + assert_equal(test, np.array([])) + + def test_fancy_dtype_alt(self): + # Check that a nested dtype isn't MIA + data = TextIO('1,2,3.0\n4,5,6.0\n') + fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])]) + test = np.genfromtxt(data, dtype=fancydtype, delimiter=',', usemask=True) + control = ma.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=fancydtype) + assert_equal(test, control) + + def test_shaped_dtype(self): + c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6") + dt = np.dtype([('name', 'S4'), ('x', float), ('y', float), + ('block', int, (2, 3))]) + x = np.genfromtxt(c, dtype=dt) + a = np.array([('aaaa', 1.0, 8.0, [[1, 2, 3], [4, 5, 6]])], + dtype=dt) + assert_array_equal(x, a) + + def test_withmissing(self): + data = TextIO('A,B\n0,1\n2,N/A') + kwargs = {"delimiter": ",", "missing_values": "N/A", "names": True} + test = np.genfromtxt(data, dtype=None, usemask=True, **kwargs) + control = ma.array([(0, 1), (2, -1)], + mask=[(False, False), (False, True)], + dtype=[('A', int), ('B', int)]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + # + data.seek(0) + test = np.genfromtxt(data, usemask=True, **kwargs) + control = ma.array([(0, 1), (2, -1)], + mask=[(False, False), (False, True)], + dtype=[('A', float), ('B', float)]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + + def test_user_missing_values(self): + data = "A, B, C\n0, 0., 0j\n1, N/A, 1j\n-9, 2.2, N/A\n3, -99, 3j" + basekwargs = {"dtype": None, "delimiter": ",", "names": True} + mdtype = [('A', int), ('B', float), ('C', complex)] + # + test = np.genfromtxt(TextIO(data), missing_values="N/A", + **basekwargs) + control = ma.array([(0, 0.0, 0j), (1, -999, 1j), + (-9, 2.2, -999j), (3, -99, 3j)], + mask=[(0, 0, 0), (0, 1, 0), (0, 0, 1), (0, 0, 0)], + dtype=mdtype) + assert_equal(test, control) + # + basekwargs['dtype'] = mdtype + test = np.genfromtxt(TextIO(data), + missing_values={0: -9, 1: -99, 2: -999j}, + usemask=True, **basekwargs) + control = ma.array([(0, 0.0, 0j), (1, -999, 1j), + (-9, 2.2, -999j), (3, -99, 3j)], + mask=[(0, 0, 0), (0, 1, 0), (1, 0, 1), (0, 1, 0)], + dtype=mdtype) + assert_equal(test, control) + # + test = np.genfromtxt(TextIO(data), + missing_values={0: -9, 'B': -99, 'C': -999j}, + usemask=True, + **basekwargs) + control = ma.array([(0, 0.0, 0j), (1, -999, 1j), + (-9, 2.2, -999j), (3, -99, 3j)], + mask=[(0, 0, 0), (0, 1, 0), (1, 0, 1), (0, 1, 0)], + dtype=mdtype) + assert_equal(test, control) + + def test_user_filling_values(self): + # Test with missing and filling values + ctrl = np.array([(0, 3), (4, -999)], dtype=[('a', int), ('b', int)]) + data = "N/A, 2, 3\n4, ,???" + kwargs = {"delimiter": ",", + "dtype": int, + "names": "a,b,c", + "missing_values": {0: "N/A", 'b': " ", 2: "???"}, + "filling_values": {0: 0, 'b': 0, 2: -999}} + test = np.genfromtxt(TextIO(data), **kwargs) + ctrl = np.array([(0, 2, 3), (4, 0, -999)], + dtype=[(_, int) for _ in "abc"]) + assert_equal(test, ctrl) + # + test = np.genfromtxt(TextIO(data), usecols=(0, -1), **kwargs) + ctrl = np.array([(0, 3), (4, -999)], dtype=[(_, int) for _ in "ac"]) + assert_equal(test, ctrl) + + data2 = "1,2,*,4\n5,*,7,8\n" + test = np.genfromtxt(TextIO(data2), delimiter=',', dtype=int, + missing_values="*", filling_values=0) + ctrl = np.array([[1, 2, 0, 4], [5, 0, 7, 8]]) + assert_equal(test, ctrl) + test = np.genfromtxt(TextIO(data2), delimiter=',', dtype=int, + missing_values="*", filling_values=-1) + ctrl = np.array([[1, 2, -1, 4], [5, -1, 7, 8]]) + assert_equal(test, ctrl) + + def test_withmissing_float(self): + data = TextIO('A,B\n0,1.5\n2,-999.00') + test = np.genfromtxt(data, dtype=None, delimiter=',', + missing_values='-999.0', names=True, usemask=True) + control = ma.array([(0, 1.5), (2, -1.)], + mask=[(False, False), (False, True)], + dtype=[('A', int), ('B', float)]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + + def test_with_masked_column_uniform(self): + # Test masked column + data = TextIO('1 2 3\n4 5 6\n') + test = np.genfromtxt(data, dtype=None, + missing_values='2,5', usemask=True) + control = ma.array([[1, 2, 3], [4, 5, 6]], mask=[[0, 1, 0], [0, 1, 0]]) + assert_equal(test, control) + + def test_with_masked_column_various(self): + # Test masked column + data = TextIO('True 2 3\nFalse 5 6\n') + test = np.genfromtxt(data, dtype=None, + missing_values='2,5', usemask=True) + control = ma.array([(1, 2, 3), (0, 5, 6)], + mask=[(0, 1, 0), (0, 1, 0)], + dtype=[('f0', bool), ('f1', bool), ('f2', int)]) + assert_equal(test, control) + + def test_invalid_raise(self): + # Test invalid raise + data = ["1, 1, 1, 1, 1"] * 50 + for i in range(5): + data[10 * i] = "2, 2, 2, 2 2" + data.insert(0, "a, b, c, d, e") + mdata = TextIO("\n".join(data)) + + kwargs = {"delimiter": ",", "dtype": None, "names": True} + + def f(): + return np.genfromtxt(mdata, invalid_raise=False, **kwargs) + mtest = pytest.warns(ConversionWarning, f) + assert_equal(len(mtest), 45) + assert_equal(mtest, np.ones(45, dtype=[(_, int) for _ in 'abcde'])) + # + mdata.seek(0) + assert_raises(ValueError, np.genfromtxt, mdata, + delimiter=",", names=True) + + def test_invalid_raise_with_usecols(self): + # Test invalid_raise with usecols + data = ["1, 1, 1, 1, 1"] * 50 + for i in range(5): + data[10 * i] = "2, 2, 2, 2 2" + data.insert(0, "a, b, c, d, e") + mdata = TextIO("\n".join(data)) + + kwargs = {"delimiter": ",", "dtype": None, "names": True, + "invalid_raise": False} + + def f(): + return np.genfromtxt(mdata, usecols=(0, 4), **kwargs) + mtest = pytest.warns(ConversionWarning, f) + assert_equal(len(mtest), 45) + assert_equal(mtest, np.ones(45, dtype=[(_, int) for _ in 'ae'])) + # + mdata.seek(0) + mtest = np.genfromtxt(mdata, usecols=(0, 1), **kwargs) + assert_equal(len(mtest), 50) + control = np.ones(50, dtype=[(_, int) for _ in 'ab']) + control[[10 * _ for _ in range(5)]] = (2, 2) + assert_equal(mtest, control) + + def test_inconsistent_dtype(self): + # Test inconsistent dtype + data = ["1, 1, 1, 1, -1.1"] * 50 + mdata = TextIO("\n".join(data)) + + converters = {4: lambda x: f"({x.decode()})"} + kwargs = {"delimiter": ",", "converters": converters, + "dtype": [(_, int) for _ in 'abcde'], "encoding": "bytes"} + assert_raises(ValueError, np.genfromtxt, mdata, **kwargs) + + def test_default_field_format(self): + # Test default format + data = "0, 1, 2.3\n4, 5, 6.7" + mtest = np.genfromtxt(TextIO(data), + delimiter=",", dtype=None, defaultfmt="f%02i") + ctrl = np.array([(0, 1, 2.3), (4, 5, 6.7)], + dtype=[("f00", int), ("f01", int), ("f02", float)]) + assert_equal(mtest, ctrl) + + def test_single_dtype_wo_names(self): + # Test single dtype w/o names + data = "0, 1, 2.3\n4, 5, 6.7" + mtest = np.genfromtxt(TextIO(data), + delimiter=",", dtype=float, defaultfmt="f%02i") + ctrl = np.array([[0., 1., 2.3], [4., 5., 6.7]], dtype=float) + assert_equal(mtest, ctrl) + + def test_single_dtype_w_explicit_names(self): + # Test single dtype w explicit names + data = "0, 1, 2.3\n4, 5, 6.7" + mtest = np.genfromtxt(TextIO(data), + delimiter=",", dtype=float, names="a, b, c") + ctrl = np.array([(0., 1., 2.3), (4., 5., 6.7)], + dtype=[(_, float) for _ in "abc"]) + assert_equal(mtest, ctrl) + + def test_single_dtype_w_implicit_names(self): + # Test single dtype w implicit names + data = "a, b, c\n0, 1, 2.3\n4, 5, 6.7" + mtest = np.genfromtxt(TextIO(data), + delimiter=",", dtype=float, names=True) + ctrl = np.array([(0., 1., 2.3), (4., 5., 6.7)], + dtype=[(_, float) for _ in "abc"]) + assert_equal(mtest, ctrl) + + def test_easy_structured_dtype(self): + # Test easy structured dtype + data = "0, 1, 2.3\n4, 5, 6.7" + mtest = np.genfromtxt(TextIO(data), delimiter=",", + dtype=(int, float, float), defaultfmt="f_%02i") + ctrl = np.array([(0, 1., 2.3), (4, 5., 6.7)], + dtype=[("f_00", int), ("f_01", float), ("f_02", float)]) + assert_equal(mtest, ctrl) + + def test_autostrip(self): + # Test autostrip + data = "01/01/2003 , 1.3, abcde" + kwargs = {"delimiter": ",", "dtype": None, "encoding": "bytes"} + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', VisibleDeprecationWarning) + mtest = np.genfromtxt(TextIO(data), **kwargs) + assert_(w[0].category is VisibleDeprecationWarning) + ctrl = np.array([('01/01/2003 ', 1.3, ' abcde')], + dtype=[('f0', '|S12'), ('f1', float), ('f2', '|S8')]) + assert_equal(mtest, ctrl) + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', VisibleDeprecationWarning) + mtest = np.genfromtxt(TextIO(data), autostrip=True, **kwargs) + assert_(w[0].category is VisibleDeprecationWarning) + ctrl = np.array([('01/01/2003', 1.3, 'abcde')], + dtype=[('f0', '|S10'), ('f1', float), ('f2', '|S5')]) + assert_equal(mtest, ctrl) + + def test_replace_space(self): + # Test the 'replace_space' option + txt = "A.A, B (B), C:C\n1, 2, 3.14" + # Test default: replace ' ' by '_' and delete non-alphanum chars + test = np.genfromtxt(TextIO(txt), + delimiter=",", names=True, dtype=None) + ctrl_dtype = [("AA", int), ("B_B", int), ("CC", float)] + ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype) + assert_equal(test, ctrl) + # Test: no replace, no delete + test = np.genfromtxt(TextIO(txt), + delimiter=",", names=True, dtype=None, + replace_space='', deletechars='') + ctrl_dtype = [("A.A", int), ("B (B)", int), ("C:C", float)] + ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype) + assert_equal(test, ctrl) + # Test: no delete (spaces are replaced by _) + test = np.genfromtxt(TextIO(txt), + delimiter=",", names=True, dtype=None, + deletechars='') + ctrl_dtype = [("A.A", int), ("B_(B)", int), ("C:C", float)] + ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype) + assert_equal(test, ctrl) + + def test_replace_space_known_dtype(self): + # Test the 'replace_space' (and related) options when dtype != None + txt = "A.A, B (B), C:C\n1, 2, 3" + # Test default: replace ' ' by '_' and delete non-alphanum chars + test = np.genfromtxt(TextIO(txt), + delimiter=",", names=True, dtype=int) + ctrl_dtype = [("AA", int), ("B_B", int), ("CC", int)] + ctrl = np.array((1, 2, 3), dtype=ctrl_dtype) + assert_equal(test, ctrl) + # Test: no replace, no delete + test = np.genfromtxt(TextIO(txt), + delimiter=",", names=True, dtype=int, + replace_space='', deletechars='') + ctrl_dtype = [("A.A", int), ("B (B)", int), ("C:C", int)] + ctrl = np.array((1, 2, 3), dtype=ctrl_dtype) + assert_equal(test, ctrl) + # Test: no delete (spaces are replaced by _) + test = np.genfromtxt(TextIO(txt), + delimiter=",", names=True, dtype=int, + deletechars='') + ctrl_dtype = [("A.A", int), ("B_(B)", int), ("C:C", int)] + ctrl = np.array((1, 2, 3), dtype=ctrl_dtype) + assert_equal(test, ctrl) + + def test_incomplete_names(self): + # Test w/ incomplete names + data = "A,,C\n0,1,2\n3,4,5" + kwargs = {"delimiter": ",", "names": True} + # w/ dtype=None + ctrl = np.array([(0, 1, 2), (3, 4, 5)], + dtype=[(_, int) for _ in ('A', 'f0', 'C')]) + test = np.genfromtxt(TextIO(data), dtype=None, **kwargs) + assert_equal(test, ctrl) + # w/ default dtype + ctrl = np.array([(0, 1, 2), (3, 4, 5)], + dtype=[(_, float) for _ in ('A', 'f0', 'C')]) + test = np.genfromtxt(TextIO(data), **kwargs) + + def test_names_auto_completion(self): + # Make sure that names are properly completed + data = "1 2 3\n 4 5 6" + test = np.genfromtxt(TextIO(data), + dtype=(int, float, int), names="a") + ctrl = np.array([(1, 2, 3), (4, 5, 6)], + dtype=[('a', int), ('f0', float), ('f1', int)]) + assert_equal(test, ctrl) + + def test_names_with_usecols_bug1636(self): + # Make sure we pick up the right names w/ usecols + data = "A,B,C,D,E\n0,1,2,3,4\n0,1,2,3,4\n0,1,2,3,4" + ctrl_names = ("A", "C", "E") + test = np.genfromtxt(TextIO(data), + dtype=(int, int, int), delimiter=",", + usecols=(0, 2, 4), names=True) + assert_equal(test.dtype.names, ctrl_names) + # + test = np.genfromtxt(TextIO(data), + dtype=(int, int, int), delimiter=",", + usecols=("A", "C", "E"), names=True) + assert_equal(test.dtype.names, ctrl_names) + # + test = np.genfromtxt(TextIO(data), + dtype=int, delimiter=",", + usecols=("A", "C", "E"), names=True) + assert_equal(test.dtype.names, ctrl_names) + + def test_fixed_width_names(self): + # Test fix-width w/ names + data = " A B C\n 0 1 2.3\n 45 67 9." + kwargs = {"delimiter": (5, 5, 4), "names": True, "dtype": None} + ctrl = np.array([(0, 1, 2.3), (45, 67, 9.)], + dtype=[('A', int), ('B', int), ('C', float)]) + test = np.genfromtxt(TextIO(data), **kwargs) + assert_equal(test, ctrl) + # + kwargs = {"delimiter": 5, "names": True, "dtype": None} + ctrl = np.array([(0, 1, 2.3), (45, 67, 9.)], + dtype=[('A', int), ('B', int), ('C', float)]) + test = np.genfromtxt(TextIO(data), **kwargs) + assert_equal(test, ctrl) + + def test_filling_values(self): + # Test missing values + data = b"1, 2, 3\n1, , 5\n0, 6, \n" + kwargs = {"delimiter": ",", "dtype": None, "filling_values": -999} + ctrl = np.array([[1, 2, 3], [1, -999, 5], [0, 6, -999]], dtype=int) + test = np.genfromtxt(TextIO(data), **kwargs) + assert_equal(test, ctrl) + + def test_comments_is_none(self): + # Github issue 329 (None was previously being converted to 'None'). + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', VisibleDeprecationWarning) + test = np.genfromtxt(TextIO("test1,testNonetherestofthedata"), + dtype=None, comments=None, delimiter=',', + encoding="bytes") + assert_(w[0].category is VisibleDeprecationWarning) + assert_equal(test[1], b'testNonetherestofthedata') + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', VisibleDeprecationWarning) + test = np.genfromtxt(TextIO("test1, testNonetherestofthedata"), + dtype=None, comments=None, delimiter=',', + encoding="bytes") + assert_(w[0].category is VisibleDeprecationWarning) + assert_equal(test[1], b' testNonetherestofthedata') + + def test_latin1(self): + latin1 = b'\xf6\xfc\xf6' + norm = b"norm1,norm2,norm3\n" + enc = b"test1,testNonethe" + latin1 + b",test3\n" + s = norm + enc + norm + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', VisibleDeprecationWarning) + test = np.genfromtxt(TextIO(s), + dtype=None, comments=None, delimiter=',', + encoding="bytes") + assert_(w[0].category is VisibleDeprecationWarning) + assert_equal(test[1, 0], b"test1") + assert_equal(test[1, 1], b"testNonethe" + latin1) + assert_equal(test[1, 2], b"test3") + test = np.genfromtxt(TextIO(s), + dtype=None, comments=None, delimiter=',', + encoding='latin1') + assert_equal(test[1, 0], "test1") + assert_equal(test[1, 1], "testNonethe" + latin1.decode('latin1')) + assert_equal(test[1, 2], "test3") + + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', VisibleDeprecationWarning) + test = np.genfromtxt(TextIO(b"0,testNonethe" + latin1), + dtype=None, comments=None, delimiter=',', + encoding="bytes") + assert_(w[0].category is VisibleDeprecationWarning) + assert_equal(test['f0'], 0) + assert_equal(test['f1'], b"testNonethe" + latin1) + + def test_binary_decode_autodtype(self): + utf16 = b'\xff\xfeh\x04 \x00i\x04 \x00j\x04' + v = self.loadfunc(BytesIO(utf16), dtype=None, encoding='UTF-16') + assert_array_equal(v, np.array(utf16.decode('UTF-16').split())) + + def test_utf8_byte_encoding(self): + utf8 = b"\xcf\x96" + norm = b"norm1,norm2,norm3\n" + enc = b"test1,testNonethe" + utf8 + b",test3\n" + s = norm + enc + norm + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', VisibleDeprecationWarning) + test = np.genfromtxt(TextIO(s), + dtype=None, comments=None, delimiter=',', + encoding="bytes") + assert_(w[0].category is VisibleDeprecationWarning) + ctl = np.array([ + [b'norm1', b'norm2', b'norm3'], + [b'test1', b'testNonethe' + utf8, b'test3'], + [b'norm1', b'norm2', b'norm3']]) + assert_array_equal(test, ctl) + + def test_utf8_file(self): + utf8 = b"\xcf\x96" + with temppath() as path: + with open(path, "wb") as f: + f.write((b"test1,testNonethe" + utf8 + b",test3\n") * 2) + test = np.genfromtxt(path, dtype=None, comments=None, + delimiter=',', encoding="UTF-8") + ctl = np.array([ + ["test1", "testNonethe" + utf8.decode("UTF-8"), "test3"], + ["test1", "testNonethe" + utf8.decode("UTF-8"), "test3"]], + dtype=np.str_) + assert_array_equal(test, ctl) + + # test a mixed dtype + with open(path, "wb") as f: + f.write(b"0,testNonethe" + utf8) + test = np.genfromtxt(path, dtype=None, comments=None, + delimiter=',', encoding="UTF-8") + assert_equal(test['f0'], 0) + assert_equal(test['f1'], "testNonethe" + utf8.decode("UTF-8")) + + def test_utf8_file_nodtype_unicode(self): + # bytes encoding with non-latin1 -> unicode upcast + utf8 = '\u03d6' + latin1 = '\xf6\xfc\xf6' + + # skip test if cannot encode utf8 test string with preferred + # encoding. The preferred encoding is assumed to be the default + # encoding of open. Will need to change this for PyTest, maybe + # using pytest.mark.xfail(raises=***). + try: + encoding = locale.getpreferredencoding() + utf8.encode(encoding) + except (UnicodeError, ImportError): + pytest.skip('Skipping test_utf8_file_nodtype_unicode, ' + 'unable to encode utf8 in preferred encoding') + + with temppath() as path: + with open(path, "wt") as f: + f.write("norm1,norm2,norm3\n") + f.write("norm1," + latin1 + ",norm3\n") + f.write("test1,testNonethe" + utf8 + ",test3\n") + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', + VisibleDeprecationWarning) + test = np.genfromtxt(path, dtype=None, comments=None, + delimiter=',', encoding="bytes") + # Check for warning when encoding not specified. + assert_(w[0].category is VisibleDeprecationWarning) + ctl = np.array([ + ["norm1", "norm2", "norm3"], + ["norm1", latin1, "norm3"], + ["test1", "testNonethe" + utf8, "test3"]], + dtype=np.str_) + assert_array_equal(test, ctl) + + @pytest.mark.filterwarnings("ignore:.*recfromtxt.*:DeprecationWarning") + def test_recfromtxt(self): + # + data = TextIO('A,B\n0,1\n2,3') + kwargs = {"delimiter": ",", "missing_values": "N/A", "names": True} + test = recfromtxt(data, **kwargs) + control = np.array([(0, 1), (2, 3)], + dtype=[('A', int), ('B', int)]) + assert_(isinstance(test, np.recarray)) + assert_equal(test, control) + # + data = TextIO('A,B\n0,1\n2,N/A') + test = recfromtxt(data, dtype=None, usemask=True, **kwargs) + control = ma.array([(0, 1), (2, -1)], + mask=[(False, False), (False, True)], + dtype=[('A', int), ('B', int)]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + assert_equal(test.A, [0, 2]) + + @pytest.mark.filterwarnings("ignore:.*recfromcsv.*:DeprecationWarning") + def test_recfromcsv(self): + # + data = TextIO('A,B\n0,1\n2,3') + kwargs = {"missing_values": "N/A", "names": True, "case_sensitive": True, + "encoding": "bytes"} + test = recfromcsv(data, dtype=None, **kwargs) + control = np.array([(0, 1), (2, 3)], + dtype=[('A', int), ('B', int)]) + assert_(isinstance(test, np.recarray)) + assert_equal(test, control) + # + data = TextIO('A,B\n0,1\n2,N/A') + test = recfromcsv(data, dtype=None, usemask=True, **kwargs) + control = ma.array([(0, 1), (2, -1)], + mask=[(False, False), (False, True)], + dtype=[('A', int), ('B', int)]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + assert_equal(test.A, [0, 2]) + # + data = TextIO('A,B\n0,1\n2,3') + test = recfromcsv(data, missing_values='N/A',) + control = np.array([(0, 1), (2, 3)], + dtype=[('a', int), ('b', int)]) + assert_(isinstance(test, np.recarray)) + assert_equal(test, control) + # + data = TextIO('A,B\n0,1\n2,3') + dtype = [('a', int), ('b', float)] + test = recfromcsv(data, missing_values='N/A', dtype=dtype) + control = np.array([(0, 1), (2, 3)], + dtype=dtype) + assert_(isinstance(test, np.recarray)) + assert_equal(test, control) + + # gh-10394 + data = TextIO('color\n"red"\n"blue"') + test = recfromcsv(data, converters={0: lambda x: x.strip('\"')}) + control = np.array([('red',), ('blue',)], dtype=[('color', (str, 4))]) + assert_equal(test.dtype, control.dtype) + assert_equal(test, control) + + def test_max_rows(self): + # Test the `max_rows` keyword argument. + data = '1 2\n3 4\n5 6\n7 8\n9 10\n' + txt = TextIO(data) + a1 = np.genfromtxt(txt, max_rows=3) + a2 = np.genfromtxt(txt) + assert_equal(a1, [[1, 2], [3, 4], [5, 6]]) + assert_equal(a2, [[7, 8], [9, 10]]) + + # max_rows must be at least 1. + assert_raises(ValueError, np.genfromtxt, TextIO(data), max_rows=0) + + # An input with several invalid rows. + data = '1 1\n2 2\n0 \n3 3\n4 4\n5 \n6 \n7 \n' + + test = np.genfromtxt(TextIO(data), max_rows=2) + control = np.array([[1., 1.], [2., 2.]]) + assert_equal(test, control) + + # Test keywords conflict + assert_raises(ValueError, np.genfromtxt, TextIO(data), skip_footer=1, + max_rows=4) + + # Test with invalid value + assert_raises(ValueError, np.genfromtxt, TextIO(data), max_rows=4) + + # Test with invalid not raise + with warnings.catch_warnings(): + warnings.simplefilter('ignore', ConversionWarning) + + test = np.genfromtxt(TextIO(data), max_rows=4, invalid_raise=False) + control = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]]) + assert_equal(test, control) + + test = np.genfromtxt(TextIO(data), max_rows=5, invalid_raise=False) + control = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]]) + assert_equal(test, control) + + # Structured array with field names. + data = 'a b\n#c d\n1 1\n2 2\n#0 \n3 3\n4 4\n5 5\n' + + # Test with header, names and comments + txt = TextIO(data) + test = np.genfromtxt(txt, skip_header=1, max_rows=3, names=True) + control = np.array([(1.0, 1.0), (2.0, 2.0), (3.0, 3.0)], + dtype=[('c', ' should convert to float + # 2**34 = 17179869184 => should convert to int64 + # 2**10 = 1024 => should convert to int (int32 on 32-bit systems, + # int64 on 64-bit systems) + + data = TextIO('73786976294838206464 17179869184 1024') + + test = np.genfromtxt(data, dtype=None) + + assert_equal(test.dtype.names, ['f0', 'f1', 'f2']) + + assert_(test.dtype['f0'] == float) + assert_(test.dtype['f1'] == np.int64) + assert_(test.dtype['f2'] == np.int_) + + assert_allclose(test['f0'], 73786976294838206464.) + assert_equal(test['f1'], 17179869184) + assert_equal(test['f2'], 1024) + + def test_unpack_float_data(self): + txt = TextIO("1,2,3\n4,5,6\n7,8,9\n0.0,1.0,2.0") + a, b, c = np.loadtxt(txt, delimiter=",", unpack=True) + assert_array_equal(a, np.array([1.0, 4.0, 7.0, 0.0])) + assert_array_equal(b, np.array([2.0, 5.0, 8.0, 1.0])) + assert_array_equal(c, np.array([3.0, 6.0, 9.0, 2.0])) + + def test_unpack_structured(self): + # Regression test for gh-4341 + # Unpacking should work on structured arrays + txt = TextIO("M 21 72\nF 35 58") + dt = {'names': ('a', 'b', 'c'), 'formats': ('S1', 'i4', 'f4')} + a, b, c = np.genfromtxt(txt, dtype=dt, unpack=True) + assert_equal(a.dtype, np.dtype('S1')) + assert_equal(b.dtype, np.dtype('i4')) + assert_equal(c.dtype, np.dtype('f4')) + assert_array_equal(a, np.array([b'M', b'F'])) + assert_array_equal(b, np.array([21, 35])) + assert_array_equal(c, np.array([72., 58.])) + + def test_unpack_auto_dtype(self): + # Regression test for gh-4341 + # Unpacking should work when dtype=None + txt = TextIO("M 21 72.\nF 35 58.") + expected = (np.array(["M", "F"]), np.array([21, 35]), np.array([72., 58.])) + test = np.genfromtxt(txt, dtype=None, unpack=True, encoding="utf-8") + for arr, result in zip(expected, test): + assert_array_equal(arr, result) + assert_equal(arr.dtype, result.dtype) + + def test_unpack_single_name(self): + # Regression test for gh-4341 + # Unpacking should work when structured dtype has only one field + txt = TextIO("21\n35") + dt = {'names': ('a',), 'formats': ('i4',)} + expected = np.array([21, 35], dtype=np.int32) + test = np.genfromtxt(txt, dtype=dt, unpack=True) + assert_array_equal(expected, test) + assert_equal(expected.dtype, test.dtype) + + def test_squeeze_scalar(self): + # Regression test for gh-4341 + # Unpacking a scalar should give zero-dim output, + # even if dtype is structured + txt = TextIO("1") + dt = {'names': ('a',), 'formats': ('i4',)} + expected = np.array((1,), dtype=np.int32) + test = np.genfromtxt(txt, dtype=dt, unpack=True) + assert_array_equal(expected, test) + assert_equal((), test.shape) + assert_equal(expected.dtype, test.dtype) + + @pytest.mark.parametrize("ndim", [0, 1, 2]) + def test_ndmin_keyword(self, ndim: int): + # let's have the same behaviour of ndmin as loadtxt + # as they should be the same for non-missing values + txt = "42" + + a = np.loadtxt(StringIO(txt), ndmin=ndim) + b = np.genfromtxt(StringIO(txt), ndmin=ndim) + + assert_array_equal(a, b) + + +class TestPathUsage: + # Test that pathlib.Path can be used + def test_loadtxt(self): + with temppath(suffix='.txt') as path: + path = Path(path) + a = np.array([[1.1, 2], [3, 4]]) + np.savetxt(path, a) + x = np.loadtxt(path) + assert_array_equal(x, a) + + def test_save_load(self): + # Test that pathlib.Path instances can be used with save. + with temppath(suffix='.npy') as path: + path = Path(path) + a = np.array([[1, 2], [3, 4]], int) + np.save(path, a) + data = np.load(path) + assert_array_equal(data, a) + + def test_save_load_memmap(self): + # Test that pathlib.Path instances can be loaded mem-mapped. + with temppath(suffix='.npy') as path: + path = Path(path) + a = np.array([[1, 2], [3, 4]], int) + np.save(path, a) + data = np.load(path, mmap_mode='r') + assert_array_equal(data, a) + # close the mem-mapped file + del data + if IS_PYPY: + break_cycles() + break_cycles() + + @pytest.mark.xfail(IS_WASM, reason="memmap doesn't work correctly") + @pytest.mark.parametrize("filename_type", [Path, str]) + def test_save_load_memmap_readwrite(self, filename_type): + with temppath(suffix='.npy') as path: + path = filename_type(path) + a = np.array([[1, 2], [3, 4]], int) + np.save(path, a) + b = np.load(path, mmap_mode='r+') + a[0][0] = 5 + b[0][0] = 5 + del b # closes the file + if IS_PYPY: + break_cycles() + break_cycles() + data = np.load(path) + assert_array_equal(data, a) + + @pytest.mark.parametrize("filename_type", [Path, str]) + def test_savez_load(self, filename_type): + with temppath(suffix='.npz') as path: + path = filename_type(path) + np.savez(path, lab='place holder') + with np.load(path) as data: + assert_array_equal(data['lab'], 'place holder') + + @pytest.mark.parametrize("filename_type", [Path, str]) + def test_savez_compressed_load(self, filename_type): + with temppath(suffix='.npz') as path: + path = filename_type(path) + np.savez_compressed(path, lab='place holder') + data = np.load(path) + assert_array_equal(data['lab'], 'place holder') + data.close() + + @pytest.mark.parametrize("filename_type", [Path, str]) + def test_genfromtxt(self, filename_type): + with temppath(suffix='.txt') as path: + path = filename_type(path) + a = np.array([(1, 2), (3, 4)]) + np.savetxt(path, a) + data = np.genfromtxt(path) + assert_array_equal(a, data) + + @pytest.mark.parametrize("filename_type", [Path, str]) + @pytest.mark.filterwarnings("ignore:.*recfromtxt.*:DeprecationWarning") + def test_recfromtxt(self, filename_type): + with temppath(suffix='.txt') as path: + path = filename_type(path) + with open(path, 'w') as f: + f.write('A,B\n0,1\n2,3') + + kwargs = {"delimiter": ",", "missing_values": "N/A", "names": True} + test = recfromtxt(path, **kwargs) + control = np.array([(0, 1), (2, 3)], + dtype=[('A', int), ('B', int)]) + assert_(isinstance(test, np.recarray)) + assert_equal(test, control) + + @pytest.mark.parametrize("filename_type", [Path, str]) + @pytest.mark.filterwarnings("ignore:.*recfromcsv.*:DeprecationWarning") + def test_recfromcsv(self, filename_type): + with temppath(suffix='.txt') as path: + path = filename_type(path) + with open(path, 'w') as f: + f.write('A,B\n0,1\n2,3') + + kwargs = { + "missing_values": "N/A", "names": True, "case_sensitive": True + } + test = recfromcsv(path, dtype=None, **kwargs) + control = np.array([(0, 1), (2, 3)], + dtype=[('A', int), ('B', int)]) + assert_(isinstance(test, np.recarray)) + assert_equal(test, control) + + +def test_gzip_load(): + a = np.random.random((5, 5)) + + s = BytesIO() + f = gzip.GzipFile(fileobj=s, mode="w") + + np.save(f, a) + f.close() + s.seek(0) + + f = gzip.GzipFile(fileobj=s, mode="r") + assert_array_equal(np.load(f), a) + + +# These next two classes encode the minimal API needed to save()/load() arrays. +# The `test_ducktyping` ensures they work correctly +class JustWriter: + def __init__(self, base): + self.base = base + + def write(self, s): + return self.base.write(s) + + def flush(self): + return self.base.flush() + +class JustReader: + def __init__(self, base): + self.base = base + + def read(self, n): + return self.base.read(n) + + def seek(self, off, whence=0): + return self.base.seek(off, whence) + + +def test_ducktyping(): + a = np.random.random((5, 5)) + + s = BytesIO() + f = JustWriter(s) + + np.save(f, a) + f.flush() + s.seek(0) + + f = JustReader(s) + assert_array_equal(np.load(f), a) + + +def test_gzip_loadtxt(): + # Thanks to another windows brokenness, we can't use + # NamedTemporaryFile: a file created from this function cannot be + # reopened by another open call. So we first put the gzipped string + # of the test reference array, write it to a securely opened file, + # which is then read from by the loadtxt function + s = BytesIO() + g = gzip.GzipFile(fileobj=s, mode='w') + g.write(b'1 2 3\n') + g.close() + + s.seek(0) + with temppath(suffix='.gz') as name: + with open(name, 'wb') as f: + f.write(s.read()) + res = np.loadtxt(name) + s.close() + + assert_array_equal(res, [1, 2, 3]) + + +def test_gzip_loadtxt_from_string(): + s = BytesIO() + f = gzip.GzipFile(fileobj=s, mode="w") + f.write(b'1 2 3\n') + f.close() + s.seek(0) + + f = gzip.GzipFile(fileobj=s, mode="r") + assert_array_equal(np.loadtxt(f), [1, 2, 3]) + + +def test_npzfile_dict(): + s = BytesIO() + x = np.zeros((3, 3)) + y = np.zeros((3, 3)) + + np.savez(s, x=x, y=y) + s.seek(0) + + z = np.load(s) + + assert_('x' in z) + assert_('y' in z) + assert_('x' in z.keys()) + assert_('y' in z.keys()) + + for f, a in z.items(): + assert_(f in ['x', 'y']) + assert_equal(a.shape, (3, 3)) + + for a in z.values(): + assert_equal(a.shape, (3, 3)) + + assert_(len(z.items()) == 2) + + for f in z: + assert_(f in ['x', 'y']) + + assert_('x' in z.keys()) + assert (z.get('x') == z['x']).all() + + +@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") +@pytest.mark.thread_unsafe(reason="garbage collector is global state") +def test_load_refcount(): + # Check that objects returned by np.load are directly freed based on + # their refcount, rather than needing the gc to collect them. + + f = BytesIO() + np.savez(f, [1, 2, 3]) + f.seek(0) + + with assert_no_gc_cycles(): + np.load(f) + + f.seek(0) + dt = [("a", 'u1', 2), ("b", 'u1', 2)] + with assert_no_gc_cycles(): + x = np.loadtxt(TextIO("0 1 2 3"), dtype=dt) + assert_equal(x, np.array([((0, 1), (2, 3))], dtype=dt)) + + +def test_load_multiple_arrays_until_eof(): + f = BytesIO() + np.save(f, 1) + np.save(f, 2) + f.seek(0) + out1 = np.load(f) + assert out1 == 1 + out2 = np.load(f) + assert out2 == 2 + with pytest.raises(EOFError): + np.load(f) + + +def test_savez_nopickle(): + obj_array = np.array([1, 'hello'], dtype=object) + with temppath(suffix='.npz') as tmp: + np.savez(tmp, obj_array) + + with temppath(suffix='.npz') as tmp: + with pytest.raises(ValueError, match="Object arrays cannot be saved when.*"): + np.savez(tmp, obj_array, allow_pickle=False) + + with temppath(suffix='.npz') as tmp: + np.savez_compressed(tmp, obj_array) + + with temppath(suffix='.npz') as tmp: + with pytest.raises(ValueError, match="Object arrays cannot be saved when.*"): + np.savez_compressed(tmp, obj_array, allow_pickle=False) diff --git a/local_packages/numpy/lib/tests/test_loadtxt.py b/local_packages/numpy/lib/tests/test_loadtxt.py new file mode 100644 index 0000000..7a4ed17 --- /dev/null +++ b/local_packages/numpy/lib/tests/test_loadtxt.py @@ -0,0 +1,1099 @@ +""" +Tests specific to `np.loadtxt` added during the move of loadtxt to be backed +by C code. +These tests complement those found in `test_io.py`. +""" + +import os +import sys +from io import StringIO +from tempfile import NamedTemporaryFile, mkstemp + +import pytest + +import numpy as np +from numpy.ma.testutils import assert_equal +from numpy.testing import HAS_REFCOUNT, IS_PYPY, assert_array_equal + + +def test_scientific_notation(): + """Test that both 'e' and 'E' are parsed correctly.""" + data = StringIO( + + "1.0e-1,2.0E1,3.0\n" + "4.0e-2,5.0E-1,6.0\n" + "7.0e-3,8.0E1,9.0\n" + "0.0e-4,1.0E-1,2.0" + + ) + expected = np.array( + [[0.1, 20., 3.0], [0.04, 0.5, 6], [0.007, 80., 9], [0, 0.1, 2]] + ) + assert_array_equal(np.loadtxt(data, delimiter=","), expected) + + +@pytest.mark.parametrize("comment", ["..", "//", "@-", "this is a comment:"]) +def test_comment_multiple_chars(comment): + content = "# IGNORE\n1.5, 2.5# ABC\n3.0,4.0# XXX\n5.5,6.0\n" + txt = StringIO(content.replace("#", comment)) + a = np.loadtxt(txt, delimiter=",", comments=comment) + assert_equal(a, [[1.5, 2.5], [3.0, 4.0], [5.5, 6.0]]) + + +def mixed_types_structured(): + """ + Function providing heterogeneous input data with a structured dtype, along + with the associated structured array. + """ + data = StringIO( + + "1000;2.4;alpha;-34\n" + "2000;3.1;beta;29\n" + "3500;9.9;gamma;120\n" + "4090;8.1;delta;0\n" + "5001;4.4;epsilon;-99\n" + "6543;7.8;omega;-1\n" + + ) + dtype = np.dtype( + [('f0', np.uint16), ('f1', np.float64), ('f2', 'S7'), ('f3', np.int8)] + ) + expected = np.array( + [ + (1000, 2.4, "alpha", -34), + (2000, 3.1, "beta", 29), + (3500, 9.9, "gamma", 120), + (4090, 8.1, "delta", 0), + (5001, 4.4, "epsilon", -99), + (6543, 7.8, "omega", -1) + ], + dtype=dtype + ) + return data, dtype, expected + + +@pytest.mark.parametrize('skiprows', [0, 1, 2, 3]) +def test_structured_dtype_and_skiprows_no_empty_lines(skiprows): + data, dtype, expected = mixed_types_structured() + a = np.loadtxt(data, dtype=dtype, delimiter=";", skiprows=skiprows) + assert_array_equal(a, expected[skiprows:]) + + +def test_unpack_structured(): + data, dtype, expected = mixed_types_structured() + + a, b, c, d = np.loadtxt(data, dtype=dtype, delimiter=";", unpack=True) + assert_array_equal(a, expected["f0"]) + assert_array_equal(b, expected["f1"]) + assert_array_equal(c, expected["f2"]) + assert_array_equal(d, expected["f3"]) + + +def test_structured_dtype_with_shape(): + dtype = np.dtype([("a", "u1", 2), ("b", "u1", 2)]) + data = StringIO("0,1,2,3\n6,7,8,9\n") + expected = np.array([((0, 1), (2, 3)), ((6, 7), (8, 9))], dtype=dtype) + assert_array_equal(np.loadtxt(data, delimiter=",", dtype=dtype), expected) + + +def test_structured_dtype_with_multi_shape(): + dtype = np.dtype([("a", "u1", (2, 2))]) + data = StringIO("0 1 2 3\n") + expected = np.array([(((0, 1), (2, 3)),)], dtype=dtype) + assert_array_equal(np.loadtxt(data, dtype=dtype), expected) + + +def test_nested_structured_subarray(): + # Test from gh-16678 + point = np.dtype([('x', float), ('y', float)]) + dt = np.dtype([('code', int), ('points', point, (2,))]) + data = StringIO("100,1,2,3,4\n200,5,6,7,8\n") + expected = np.array( + [ + (100, [(1., 2.), (3., 4.)]), + (200, [(5., 6.), (7., 8.)]), + ], + dtype=dt + ) + assert_array_equal(np.loadtxt(data, dtype=dt, delimiter=","), expected) + + +def test_structured_dtype_offsets(): + # An aligned structured dtype will have additional padding + dt = np.dtype("i1, i4, i1, i4, i1, i4", align=True) + data = StringIO("1,2,3,4,5,6\n7,8,9,10,11,12\n") + expected = np.array([(1, 2, 3, 4, 5, 6), (7, 8, 9, 10, 11, 12)], dtype=dt) + assert_array_equal(np.loadtxt(data, delimiter=",", dtype=dt), expected) + + +@pytest.mark.parametrize("param", ("skiprows", "max_rows")) +def test_exception_negative_row_limits(param): + """skiprows and max_rows should raise for negative parameters.""" + with pytest.raises(ValueError, match="argument must be nonnegative"): + np.loadtxt("foo.bar", **{param: -3}) + + +@pytest.mark.parametrize("param", ("skiprows", "max_rows")) +def test_exception_noninteger_row_limits(param): + with pytest.raises(TypeError, match="argument must be an integer"): + np.loadtxt("foo.bar", **{param: 1.0}) + + +@pytest.mark.parametrize( + "data, shape", + [ + ("1 2 3 4 5\n", (1, 5)), # Single row + ("1\n2\n3\n4\n5\n", (5, 1)), # Single column + ] +) +def test_ndmin_single_row_or_col(data, shape): + arr = np.array([1, 2, 3, 4, 5]) + arr2d = arr.reshape(shape) + + assert_array_equal(np.loadtxt(StringIO(data), dtype=int), arr) + assert_array_equal(np.loadtxt(StringIO(data), dtype=int, ndmin=0), arr) + assert_array_equal(np.loadtxt(StringIO(data), dtype=int, ndmin=1), arr) + assert_array_equal(np.loadtxt(StringIO(data), dtype=int, ndmin=2), arr2d) + + +@pytest.mark.parametrize("badval", [-1, 3, None, "plate of shrimp"]) +def test_bad_ndmin(badval): + with pytest.raises(ValueError, match="Illegal value of ndmin keyword"): + np.loadtxt("foo.bar", ndmin=badval) + + +@pytest.mark.parametrize( + "ws", + ( + " ", # space + "\t", # tab + "\u2003", # em + "\u00A0", # non-break + "\u3000", # ideographic space + ) +) +def test_blank_lines_spaces_delimit(ws): + txt = StringIO( + f"1 2{ws}30\n\n{ws}\n" + f"4 5 60{ws}\n {ws} \n" + f"7 8 {ws} 90\n # comment\n" + f"3 2 1" + ) + # NOTE: It is unclear that the ` # comment` should succeed. Except + # for delimiter=None, which should use any whitespace (and maybe + # should just be implemented closer to Python + expected = np.array([[1, 2, 30], [4, 5, 60], [7, 8, 90], [3, 2, 1]]) + assert_equal( + np.loadtxt(txt, dtype=int, delimiter=None, comments="#"), expected + ) + + +def test_blank_lines_normal_delimiter(): + txt = StringIO('1,2,30\n\n4,5,60\n\n7,8,90\n# comment\n3,2,1') + expected = np.array([[1, 2, 30], [4, 5, 60], [7, 8, 90], [3, 2, 1]]) + assert_equal( + np.loadtxt(txt, dtype=int, delimiter=',', comments="#"), expected + ) + + +@pytest.mark.parametrize("dtype", (float, object)) +def test_maxrows_no_blank_lines(dtype): + txt = StringIO("1.5,2.5\n3.0,4.0\n5.5,6.0") + res = np.loadtxt(txt, dtype=dtype, delimiter=",", max_rows=2) + assert_equal(res.dtype, dtype) + assert_equal(res, np.array([["1.5", "2.5"], ["3.0", "4.0"]], dtype=dtype)) + + +@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), + reason="PyPy bug in error formatting") +@pytest.mark.parametrize("dtype", (np.dtype("f8"), np.dtype("i2"))) +def test_exception_message_bad_values(dtype): + txt = StringIO("1,2\n3,XXX\n5,6") + msg = f"could not convert string 'XXX' to {dtype} at row 1, column 2" + with pytest.raises(ValueError, match=msg): + np.loadtxt(txt, dtype=dtype, delimiter=",") + + +def test_converters_negative_indices(): + txt = StringIO('1.5,2.5\n3.0,XXX\n5.5,6.0') + conv = {-1: lambda s: np.nan if s == 'XXX' else float(s)} + expected = np.array([[1.5, 2.5], [3.0, np.nan], [5.5, 6.0]]) + res = np.loadtxt(txt, dtype=np.float64, delimiter=",", converters=conv) + assert_equal(res, expected) + + +def test_converters_negative_indices_with_usecols(): + txt = StringIO('1.5,2.5,3.5\n3.0,4.0,XXX\n5.5,6.0,7.5\n') + conv = {-1: lambda s: np.nan if s == 'XXX' else float(s)} + expected = np.array([[1.5, 3.5], [3.0, np.nan], [5.5, 7.5]]) + res = np.loadtxt( + txt, + dtype=np.float64, + delimiter=",", + converters=conv, + usecols=[0, -1], + ) + assert_equal(res, expected) + + # Second test with variable number of rows: + res = np.loadtxt(StringIO('''0,1,2\n0,1,2,3,4'''), delimiter=",", + usecols=[0, -1], converters={-1: (lambda x: -1)}) + assert_array_equal(res, [[0, -1], [0, -1]]) + + +def test_ragged_error(): + rows = ["1,2,3", "1,2,3", "4,3,2,1"] + with pytest.raises(ValueError, + match="the number of columns changed from 3 to 4 at row 3"): + np.loadtxt(rows, delimiter=",") + + +def test_ragged_usecols(): + # usecols, and negative ones, work even with varying number of columns. + txt = StringIO("0,0,XXX\n0,XXX,0,XXX\n0,XXX,XXX,0,XXX\n") + expected = np.array([[0, 0], [0, 0], [0, 0]]) + res = np.loadtxt(txt, dtype=float, delimiter=",", usecols=[0, -2]) + assert_equal(res, expected) + + txt = StringIO("0,0,XXX\n0\n0,XXX,XXX,0,XXX\n") + with pytest.raises(ValueError, + match="invalid column index -2 at row 2 with 1 columns"): + # There is no -2 column in the second row: + np.loadtxt(txt, dtype=float, delimiter=",", usecols=[0, -2]) + + +def test_empty_usecols(): + txt = StringIO("0,0,XXX\n0,XXX,0,XXX\n0,XXX,XXX,0,XXX\n") + res = np.loadtxt(txt, dtype=np.dtype([]), delimiter=",", usecols=[]) + assert res.shape == (3,) + assert res.dtype == np.dtype([]) + + +@pytest.mark.parametrize("c1", ["a", "の", "🫕"]) +@pytest.mark.parametrize("c2", ["a", "の", "🫕"]) +def test_large_unicode_characters(c1, c2): + # c1 and c2 span ascii, 16bit and 32bit range. + txt = StringIO(f"a,{c1},c,1.0\ne,{c2},2.0,g") + res = np.loadtxt(txt, dtype=np.dtype('U12'), delimiter=",") + expected = np.array( + [f"a,{c1},c,1.0".split(","), f"e,{c2},2.0,g".split(",")], + dtype=np.dtype('U12') + ) + assert_equal(res, expected) + + +def test_unicode_with_converter(): + txt = StringIO("cat,dog\nαβγ,δεζ\nabc,def\n") + conv = {0: lambda s: s.upper()} + res = np.loadtxt( + txt, + dtype=np.dtype("U12"), + converters=conv, + delimiter=",", + encoding=None + ) + expected = np.array([['CAT', 'dog'], ['ΑΒΓ', 'δεζ'], ['ABC', 'def']]) + assert_equal(res, expected) + + +def test_converter_with_structured_dtype(): + txt = StringIO('1.5,2.5,Abc\n3.0,4.0,dEf\n5.5,6.0,ghI\n') + dt = np.dtype([('m', np.int32), ('r', np.float32), ('code', 'U8')]) + conv = {0: lambda s: int(10 * float(s)), -1: lambda s: s.upper()} + res = np.loadtxt(txt, dtype=dt, delimiter=",", converters=conv) + expected = np.array( + [(15, 2.5, 'ABC'), (30, 4.0, 'DEF'), (55, 6.0, 'GHI')], dtype=dt + ) + assert_equal(res, expected) + + +def test_converter_with_unicode_dtype(): + """ + With the 'bytes' encoding, tokens are encoded prior to being + passed to the converter. This means that the output of the converter may + be bytes instead of unicode as expected by `read_rows`. + + This test checks that outputs from the above scenario are properly decoded + prior to parsing by `read_rows`. + """ + txt = StringIO('abc,def\nrst,xyz') + conv = bytes.upper + res = np.loadtxt( + txt, dtype=np.dtype("U3"), converters=conv, delimiter=",", + encoding="bytes") + expected = np.array([['ABC', 'DEF'], ['RST', 'XYZ']]) + assert_equal(res, expected) + + +def test_read_huge_row(): + row = "1.5, 2.5," * 50000 + row = row[:-1] + "\n" + txt = StringIO(row * 2) + res = np.loadtxt(txt, delimiter=",", dtype=float) + assert_equal(res, np.tile([1.5, 2.5], (2, 50000))) + + +@pytest.mark.parametrize("dtype", "edfgFDG") +def test_huge_float(dtype): + # Covers a non-optimized path that is rarely taken: + field = "0" * 1000 + ".123456789" + dtype = np.dtype(dtype) + value = np.loadtxt([field], dtype=dtype)[()] + assert value == dtype.type("0.123456789") + + +@pytest.mark.parametrize( + ("given_dtype", "expected_dtype"), + [ + ("S", np.dtype("S5")), + ("U", np.dtype("U5")), + ], +) +def test_string_no_length_given(given_dtype, expected_dtype): + """ + The given dtype is just 'S' or 'U' with no length. In these cases, the + length of the resulting dtype is determined by the longest string found + in the file. + """ + txt = StringIO("AAA,5-1\nBBBBB,0-3\nC,4-9\n") + res = np.loadtxt(txt, dtype=given_dtype, delimiter=",") + expected = np.array( + [['AAA', '5-1'], ['BBBBB', '0-3'], ['C', '4-9']], dtype=expected_dtype + ) + assert_equal(res, expected) + assert_equal(res.dtype, expected_dtype) + + +def test_float_conversion(): + """ + Some tests that the conversion to float64 works as accurately as the + Python built-in `float` function. In a naive version of the float parser, + these strings resulted in values that were off by an ULP or two. + """ + strings = [ + '0.9999999999999999', + '9876543210.123456', + '5.43215432154321e+300', + '0.901', + '0.333', + ] + txt = StringIO('\n'.join(strings)) + res = np.loadtxt(txt) + expected = np.array([float(s) for s in strings]) + assert_equal(res, expected) + + +def test_bool(): + # Simple test for bool via integer + txt = StringIO("1, 0\n10, -1") + res = np.loadtxt(txt, dtype=bool, delimiter=",") + assert res.dtype == bool + assert_array_equal(res, [[True, False], [True, True]]) + # Make sure we use only 1 and 0 on the byte level: + assert_array_equal(res.view(np.uint8), [[1, 0], [1, 1]]) + + +@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), + reason="PyPy bug in error formatting") +@pytest.mark.parametrize("dtype", np.typecodes["AllInteger"]) +@pytest.mark.filterwarnings("error:.*integer via a float.*:DeprecationWarning") +def test_integer_signs(dtype): + dtype = np.dtype(dtype) + assert np.loadtxt(["+2"], dtype=dtype) == 2 + if dtype.kind == "u": + with pytest.raises(ValueError): + np.loadtxt(["-1\n"], dtype=dtype) + else: + assert np.loadtxt(["-2\n"], dtype=dtype) == -2 + + for sign in ["++", "+-", "--", "-+"]: + with pytest.raises(ValueError): + np.loadtxt([f"{sign}2\n"], dtype=dtype) + + +@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), + reason="PyPy bug in error formatting") +@pytest.mark.parametrize("dtype", np.typecodes["AllInteger"]) +@pytest.mark.filterwarnings("error:.*integer via a float.*:DeprecationWarning") +def test_implicit_cast_float_to_int_fails(dtype): + txt = StringIO("1.0, 2.1, 3.7\n4, 5, 6") + with pytest.raises(ValueError): + np.loadtxt(txt, dtype=dtype, delimiter=",") + +@pytest.mark.parametrize("dtype", (np.complex64, np.complex128)) +@pytest.mark.parametrize("with_parens", (False, True)) +def test_complex_parsing(dtype, with_parens): + s = "(1.0-2.5j),3.75,(7+-5.0j)\n(4),(-19e2j),(0)" + if not with_parens: + s = s.replace("(", "").replace(")", "") + + res = np.loadtxt(StringIO(s), dtype=dtype, delimiter=",") + expected = np.array( + [[1.0 - 2.5j, 3.75, 7 - 5j], [4.0, -1900j, 0]], dtype=dtype + ) + assert_equal(res, expected) + + +def test_read_from_generator(): + def gen(): + for i in range(4): + yield f"{i},{2 * i},{i**2}" + + res = np.loadtxt(gen(), dtype=int, delimiter=",") + expected = np.array([[0, 0, 0], [1, 2, 1], [2, 4, 4], [3, 6, 9]]) + assert_equal(res, expected) + + +def test_read_from_generator_multitype(): + def gen(): + for i in range(3): + yield f"{i} {i / 4}" + + res = np.loadtxt(gen(), dtype="i, d", delimiter=" ") + expected = np.array([(0, 0.0), (1, 0.25), (2, 0.5)], dtype="i, d") + assert_equal(res, expected) + + +def test_read_from_bad_generator(): + def gen(): + yield from ["1,2", b"3, 5", 12738] + + with pytest.raises( + TypeError, match=r"non-string returned while reading data"): + np.loadtxt(gen(), dtype="i, i", delimiter=",") + + +@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") +def test_object_cleanup_on_read_error(): + sentinel = object() + already_read = 0 + + def conv(x): + nonlocal already_read + if already_read > 4999: + raise ValueError("failed half-way through!") + already_read += 1 + return sentinel + + txt = StringIO("x\n" * 10000) + + with pytest.raises(ValueError, match="at row 5000, column 1"): + np.loadtxt(txt, dtype=object, converters={0: conv}) + + assert sys.getrefcount(sentinel) == 2 + + +@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), + reason="PyPy bug in error formatting") +def test_character_not_bytes_compatible(): + """Test exception when a character cannot be encoded as 'S'.""" + data = StringIO("–") # == \u2013 + with pytest.raises(ValueError): + np.loadtxt(data, dtype="S5") + + +@pytest.mark.parametrize("conv", (0, [float], "")) +def test_invalid_converter(conv): + msg = ( + "converters must be a dictionary mapping columns to converter " + "functions or a single callable." + ) + with pytest.raises(TypeError, match=msg): + np.loadtxt(StringIO("1 2\n3 4"), converters=conv) + + +@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), + reason="PyPy bug in error formatting") +def test_converters_dict_raises_non_integer_key(): + with pytest.raises(TypeError, match="keys of the converters dict"): + np.loadtxt(StringIO("1 2\n3 4"), converters={"a": int}) + with pytest.raises(TypeError, match="keys of the converters dict"): + np.loadtxt(StringIO("1 2\n3 4"), converters={"a": int}, usecols=0) + + +@pytest.mark.parametrize("bad_col_ind", (3, -3)) +def test_converters_dict_raises_non_col_key(bad_col_ind): + data = StringIO("1 2\n3 4") + with pytest.raises(ValueError, match="converter specified for column"): + np.loadtxt(data, converters={bad_col_ind: int}) + + +def test_converters_dict_raises_val_not_callable(): + with pytest.raises(TypeError, + match="values of the converters dictionary must be callable"): + np.loadtxt(StringIO("1 2\n3 4"), converters={0: 1}) + + +@pytest.mark.parametrize("q", ('"', "'", "`")) +def test_quoted_field(q): + txt = StringIO( + f"{q}alpha, x{q}, 2.5\n{q}beta, y{q}, 4.5\n{q}gamma, z{q}, 5.0\n" + ) + dtype = np.dtype([('f0', 'U8'), ('f1', np.float64)]) + expected = np.array( + [("alpha, x", 2.5), ("beta, y", 4.5), ("gamma, z", 5.0)], dtype=dtype + ) + + res = np.loadtxt(txt, dtype=dtype, delimiter=",", quotechar=q) + assert_array_equal(res, expected) + + +@pytest.mark.parametrize("q", ('"', "'", "`")) +def test_quoted_field_with_whitepace_delimiter(q): + txt = StringIO( + f"{q}alpha, x{q} 2.5\n{q}beta, y{q} 4.5\n{q}gamma, z{q} 5.0\n" + ) + dtype = np.dtype([('f0', 'U8'), ('f1', np.float64)]) + expected = np.array( + [("alpha, x", 2.5), ("beta, y", 4.5), ("gamma, z", 5.0)], dtype=dtype + ) + + res = np.loadtxt(txt, dtype=dtype, delimiter=None, quotechar=q) + assert_array_equal(res, expected) + + +def test_quote_support_default(): + """Support for quoted fields is disabled by default.""" + txt = StringIO('"lat,long", 45, 30\n') + dtype = np.dtype([('f0', 'U24'), ('f1', np.float64), ('f2', np.float64)]) + + with pytest.raises(ValueError, + match="the dtype passed requires 3 columns but 4 were"): + np.loadtxt(txt, dtype=dtype, delimiter=",") + + # Enable quoting support with non-None value for quotechar param + txt.seek(0) + expected = np.array([("lat,long", 45., 30.)], dtype=dtype) + + res = np.loadtxt(txt, dtype=dtype, delimiter=",", quotechar='"') + assert_array_equal(res, expected) + + +@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), + reason="PyPy bug in error formatting") +def test_quotechar_multichar_error(): + txt = StringIO("1,2\n3,4") + msg = r".*must be a single unicode character or None" + with pytest.raises(TypeError, match=msg): + np.loadtxt(txt, delimiter=",", quotechar="''") + + +def test_comment_multichar_error_with_quote(): + txt = StringIO("1,2\n3,4") + msg = ( + "when multiple comments or a multi-character comment is given, " + "quotes are not supported." + ) + with pytest.raises(ValueError, match=msg): + np.loadtxt(txt, delimiter=",", comments="123", quotechar='"') + with pytest.raises(ValueError, match=msg): + np.loadtxt(txt, delimiter=",", comments=["#", "%"], quotechar='"') + + # A single character string in a tuple is unpacked though: + res = np.loadtxt(txt, delimiter=",", comments=("#",), quotechar="'") + assert_equal(res, [[1, 2], [3, 4]]) + + +def test_structured_dtype_with_quotes(): + data = StringIO( + + "1000;2.4;'alpha';-34\n" + "2000;3.1;'beta';29\n" + "3500;9.9;'gamma';120\n" + "4090;8.1;'delta';0\n" + "5001;4.4;'epsilon';-99\n" + "6543;7.8;'omega';-1\n" + + ) + dtype = np.dtype( + [('f0', np.uint16), ('f1', np.float64), ('f2', 'S7'), ('f3', np.int8)] + ) + expected = np.array( + [ + (1000, 2.4, "alpha", -34), + (2000, 3.1, "beta", 29), + (3500, 9.9, "gamma", 120), + (4090, 8.1, "delta", 0), + (5001, 4.4, "epsilon", -99), + (6543, 7.8, "omega", -1) + ], + dtype=dtype + ) + res = np.loadtxt(data, dtype=dtype, delimiter=";", quotechar="'") + assert_array_equal(res, expected) + + +def test_quoted_field_is_not_empty(): + txt = StringIO('1\n\n"4"\n""') + expected = np.array(["1", "4", ""], dtype="U1") + res = np.loadtxt(txt, delimiter=",", dtype="U1", quotechar='"') + assert_equal(res, expected) + +def test_quoted_field_is_not_empty_nonstrict(): + # Same as test_quoted_field_is_not_empty but check that we are not strict + # about missing closing quote (this is the `csv.reader` default also) + txt = StringIO('1\n\n"4"\n"') + expected = np.array(["1", "4", ""], dtype="U1") + res = np.loadtxt(txt, delimiter=",", dtype="U1", quotechar='"') + assert_equal(res, expected) + +def test_consecutive_quotechar_escaped(): + txt = StringIO('"Hello, my name is ""Monty""!"') + expected = np.array('Hello, my name is "Monty"!', dtype="U40") + res = np.loadtxt(txt, dtype="U40", delimiter=",", quotechar='"') + assert_equal(res, expected) + + +@pytest.mark.parametrize("data", ("", "\n\n\n", "# 1 2 3\n# 4 5 6\n")) +@pytest.mark.parametrize("ndmin", (0, 1, 2)) +@pytest.mark.parametrize("usecols", [None, (1, 2, 3)]) +def test_warn_on_no_data(data, ndmin, usecols): + """Check that a UserWarning is emitted when no data is read from input.""" + if usecols is not None: + expected_shape = (0, 3) + elif ndmin == 2: + expected_shape = (0, 1) # guess a single column?! + else: + expected_shape = (0,) + + txt = StringIO(data) + with pytest.warns(UserWarning, match="input contained no data"): + res = np.loadtxt(txt, ndmin=ndmin, usecols=usecols) + assert res.shape == expected_shape + + with NamedTemporaryFile(mode="w") as fh: + fh.write(data) + fh.seek(0) + with pytest.warns(UserWarning, match="input contained no data"): + res = np.loadtxt(txt, ndmin=ndmin, usecols=usecols) + assert res.shape == expected_shape + +@pytest.mark.parametrize("skiprows", (2, 3)) +def test_warn_on_skipped_data(skiprows): + data = "1 2 3\n4 5 6" + txt = StringIO(data) + with pytest.warns(UserWarning, match="input contained no data"): + np.loadtxt(txt, skiprows=skiprows) + + +@pytest.mark.parametrize(["dtype", "value"], [ + ("i2", 0x0001), ("u2", 0x0001), + ("i4", 0x00010203), ("u4", 0x00010203), + ("i8", 0x0001020304050607), ("u8", 0x0001020304050607), + # The following values are constructed to lead to unique bytes: + ("float16", 3.07e-05), + ("float32", 9.2557e-41), ("complex64", 9.2557e-41 + 2.8622554e-29j), + ("float64", -1.758571353180402e-24), + # Here and below, the repr side-steps a small loss of precision in + # complex `str` in PyPy (which is probably fine, as repr works): + ("complex128", repr(5.406409232372729e-29 - 1.758571353180402e-24j)), + # Use integer values that fit into double. Everything else leads to + # problems due to longdoubles going via double and decimal strings + # causing rounding errors. + ("longdouble", 0x01020304050607), + ("clongdouble", repr(0x01020304050607 + (0x00121314151617 * 1j))), + ("U2", "\U00010203\U000a0b0c")]) +@pytest.mark.parametrize("swap", [True, False]) +def test_byteswapping_and_unaligned(dtype, value, swap): + # Try to create "interesting" values within the valid unicode range: + dtype = np.dtype(dtype) + data = [f"x,{value}\n"] # repr as PyPy `str` truncates some + if swap: + dtype = dtype.newbyteorder() + full_dt = np.dtype([("a", "S1"), ("b", dtype)], align=False) + # The above ensures that the interesting "b" field is unaligned: + assert full_dt.fields["b"][1] == 1 + res = np.loadtxt(data, dtype=full_dt, delimiter=",", + max_rows=1) # max-rows prevents over-allocation + assert res["b"] == dtype.type(value) + + +@pytest.mark.parametrize("dtype", + np.typecodes["AllInteger"] + "efdFD" + "?") +def test_unicode_whitespace_stripping(dtype): + # Test that all numeric types (and bool) strip whitespace correctly + # \u202F is a narrow no-break space, `\n` is just a whitespace if quoted. + # Currently, skip float128 as it did not always support this and has no + # "custom" parsing: + txt = StringIO(' 3 ,"\u202F2\n"') + res = np.loadtxt(txt, dtype=dtype, delimiter=",", quotechar='"') + assert_array_equal(res, np.array([3, 2]).astype(dtype)) + + +@pytest.mark.parametrize("dtype", "FD") +def test_unicode_whitespace_stripping_complex(dtype): + # Complex has a few extra cases since it has two components and + # parentheses + line = " 1 , 2+3j , ( 4+5j ), ( 6+-7j ) , 8j , ( 9j ) \n" + data = [line, line.replace(" ", "\u202F")] + res = np.loadtxt(data, dtype=dtype, delimiter=',') + assert_array_equal(res, np.array([[1, 2 + 3j, 4 + 5j, 6 - 7j, 8j, 9j]] * 2)) + + +@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), + reason="PyPy bug in error formatting") +@pytest.mark.parametrize("dtype", "FD") +@pytest.mark.parametrize("field", + ["1 +2j", "1+ 2j", "1+2 j", "1+-+3", "(1j", "(1", "(1+2j", "1+2j)"]) +def test_bad_complex(dtype, field): + with pytest.raises(ValueError): + np.loadtxt([field + "\n"], dtype=dtype, delimiter=",") + + +@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), + reason="PyPy bug in error formatting") +@pytest.mark.parametrize("dtype", + np.typecodes["AllInteger"] + "efgdFDG" + "?") +def test_nul_character_error(dtype): + # Test that a \0 character is correctly recognized as an error even if + # what comes before is valid (not everything gets parsed internally). + if dtype.lower() == "g": + pytest.xfail("longdouble/clongdouble assignment may misbehave.") + with pytest.raises(ValueError): + np.loadtxt(["1\000"], dtype=dtype, delimiter=",", quotechar='"') + + +@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), + reason="PyPy bug in error formatting") +@pytest.mark.parametrize("dtype", + np.typecodes["AllInteger"] + "efgdFDG" + "?") +def test_no_thousands_support(dtype): + # Mainly to document behaviour, Python supports thousands like 1_1. + # (e and G may end up using different conversion and support it, this is + # a bug but happens...) + if dtype == "e": + pytest.skip("half assignment currently uses Python float converter") + if dtype in "eG": + pytest.xfail("clongdouble assignment is buggy (uses `complex`?).") + + assert int("1_1") == float("1_1") == complex("1_1") == 11 + with pytest.raises(ValueError): + np.loadtxt(["1_1\n"], dtype=dtype) + + +@pytest.mark.parametrize("data", [ + ["1,2\n", "2\n,3\n"], + ["1,2\n", "2\r,3\n"]]) +def test_bad_newline_in_iterator(data): + # In NumPy <=1.22 this was accepted, because newlines were completely + # ignored when the input was an iterable. This could be changed, but right + # now, we raise an error. + msg = "Found an unquoted embedded newline within a single line" + with pytest.raises(ValueError, match=msg): + np.loadtxt(data, delimiter=",") + + +@pytest.mark.parametrize("data", [ + ["1,2\n", "2,3\r\n"], # a universal newline + ["1,2\n", "'2\n',3\n"], # a quoted newline + ["1,2\n", "'2\r',3\n"], + ["1,2\n", "'2\r\n',3\n"], +]) +def test_good_newline_in_iterator(data): + # The quoted newlines will be untransformed here, but are just whitespace. + res = np.loadtxt(data, delimiter=",", quotechar="'") + assert_array_equal(res, [[1., 2.], [2., 3.]]) + + +@pytest.mark.parametrize("newline", ["\n", "\r", "\r\n"]) +def test_universal_newlines_quoted(newline): + # Check that universal newline support within the tokenizer is not applied + # to quoted fields. (note that lines must end in newline or quoted + # fields will not include a newline at all) + data = ['1,"2\n"\n', '3,"4\n', '1"\n'] + data = [row.replace("\n", newline) for row in data] + res = np.loadtxt(data, dtype=object, delimiter=",", quotechar='"') + assert_array_equal(res, [['1', f'2{newline}'], ['3', f'4{newline}1']]) + + +def test_null_character(): + # Basic tests to check that the NUL character is not special: + res = np.loadtxt(["1\0002\0003\n", "4\0005\0006"], delimiter="\000") + assert_array_equal(res, [[1, 2, 3], [4, 5, 6]]) + + # Also not as part of a field (avoid unicode/arrays as unicode strips \0) + res = np.loadtxt(["1\000,2\000,3\n", "4\000,5\000,6"], + delimiter=",", dtype=object) + assert res.tolist() == [["1\000", "2\000", "3"], ["4\000", "5\000", "6"]] + + +def test_iterator_fails_getting_next_line(): + class BadSequence: + def __len__(self): + return 100 + + def __getitem__(self, item): + if item == 50: + raise RuntimeError("Bad things happened!") + return f"{item}, {item + 1}" + + with pytest.raises(RuntimeError, match="Bad things happened!"): + np.loadtxt(BadSequence(), dtype=int, delimiter=",") + + +class TestCReaderUnitTests: + # These are internal tests for path that should not be possible to hit + # unless things go very very wrong somewhere. + def test_not_an_filelike(self): + with pytest.raises(AttributeError, match=".*read"): + np._core._multiarray_umath._load_from_filelike( + object(), dtype=np.dtype("i"), filelike=True) + + def test_filelike_read_fails(self): + # Can only be reached if loadtxt opens the file, so it is hard to do + # via the public interface (although maybe not impossible considering + # the current "DataClass" backing). + class BadFileLike: + counter = 0 + + def read(self, size): + self.counter += 1 + if self.counter > 20: + raise RuntimeError("Bad bad bad!") + return "1,2,3\n" + + with pytest.raises(RuntimeError, match="Bad bad bad!"): + np._core._multiarray_umath._load_from_filelike( + BadFileLike(), dtype=np.dtype("i"), filelike=True) + + def test_filelike_bad_read(self): + # Can only be reached if loadtxt opens the file, so it is hard to do + # via the public interface (although maybe not impossible considering + # the current "DataClass" backing). + + class BadFileLike: + counter = 0 + + def read(self, size): + return 1234 # not a string! + + with pytest.raises(TypeError, + match="non-string returned while reading data"): + np._core._multiarray_umath._load_from_filelike( + BadFileLike(), dtype=np.dtype("i"), filelike=True) + + def test_not_an_iter(self): + with pytest.raises(TypeError, + match="error reading from object, expected an iterable"): + np._core._multiarray_umath._load_from_filelike( + object(), dtype=np.dtype("i"), filelike=False) + + def test_bad_type(self): + with pytest.raises(TypeError, match="internal error: dtype must"): + np._core._multiarray_umath._load_from_filelike( + object(), dtype="i", filelike=False) + + def test_bad_encoding(self): + with pytest.raises(TypeError, match="encoding must be a unicode"): + np._core._multiarray_umath._load_from_filelike( + object(), dtype=np.dtype("i"), filelike=False, encoding=123) + + @pytest.mark.parametrize("newline", ["\r", "\n", "\r\n"]) + def test_manual_universal_newlines(self, newline): + # This is currently not available to users, because we should always + # open files with universal newlines enabled `newlines=None`. + # (And reading from an iterator uses slightly different code paths.) + # We have no real support for `newline="\r"` or `newline="\n" as the + # user cannot specify those options. + data = StringIO('0\n1\n"2\n"\n3\n4 #\n'.replace("\n", newline), + newline="") + + res = np._core._multiarray_umath._load_from_filelike( + data, dtype=np.dtype("U10"), filelike=True, + quote='"', comment="#", skiplines=1) + assert_array_equal(res[:, 0], ["1", f"2{newline}", "3", "4 "]) + + +def test_delimiter_comment_collision_raises(): + with pytest.raises(TypeError, match=".*control characters.*incompatible"): + np.loadtxt(StringIO("1, 2, 3"), delimiter=",", comments=",") + + +def test_delimiter_quotechar_collision_raises(): + with pytest.raises(TypeError, match=".*control characters.*incompatible"): + np.loadtxt(StringIO("1, 2, 3"), delimiter=",", quotechar=",") + + +def test_comment_quotechar_collision_raises(): + with pytest.raises(TypeError, match=".*control characters.*incompatible"): + np.loadtxt(StringIO("1 2 3"), comments="#", quotechar="#") + + +def test_delimiter_and_multiple_comments_collision_raises(): + with pytest.raises( + TypeError, match="Comment characters.*cannot include the delimiter" + ): + np.loadtxt(StringIO("1, 2, 3"), delimiter=",", comments=["#", ","]) + + +@pytest.mark.parametrize( + "ws", + ( + " ", # space + "\t", # tab + "\u2003", # em + "\u00A0", # non-break + "\u3000", # ideographic space + ) +) +def test_collision_with_default_delimiter_raises(ws): + with pytest.raises(TypeError, match=".*control characters.*incompatible"): + np.loadtxt(StringIO(f"1{ws}2{ws}3\n4{ws}5{ws}6\n"), comments=ws) + with pytest.raises(TypeError, match=".*control characters.*incompatible"): + np.loadtxt(StringIO(f"1{ws}2{ws}3\n4{ws}5{ws}6\n"), quotechar=ws) + + +@pytest.mark.parametrize("nl", ("\n", "\r")) +def test_control_character_newline_raises(nl): + txt = StringIO(f"1{nl}2{nl}3{nl}{nl}4{nl}5{nl}6{nl}{nl}") + msg = "control character.*cannot be a newline" + with pytest.raises(TypeError, match=msg): + np.loadtxt(txt, delimiter=nl) + with pytest.raises(TypeError, match=msg): + np.loadtxt(txt, comments=nl) + with pytest.raises(TypeError, match=msg): + np.loadtxt(txt, quotechar=nl) + + +@pytest.mark.parametrize( + ("generic_data", "long_datum", "unitless_dtype", "expected_dtype"), + [ + ("2012-03", "2013-01-15", "M8", "M8[D]"), # Datetimes + ("spam-a-lot", "tis_but_a_scratch", "U", "U17"), # str + ], +) +@pytest.mark.parametrize("nrows", (10, 50000, 60000)) # lt, eq, gt chunksize +def test_parametric_unit_discovery( + generic_data, long_datum, unitless_dtype, expected_dtype, nrows +): + """Check that the correct unit (e.g. month, day, second) is discovered from + the data when a user specifies a unitless datetime.""" + # Unit should be "D" (days) due to last entry + data = [generic_data] * nrows + [long_datum] + expected = np.array(data, dtype=expected_dtype) + assert len(data) == nrows + 1 + assert len(data) == len(expected) + + # file-like path + txt = StringIO("\n".join(data)) + a = np.loadtxt(txt, dtype=unitless_dtype) + assert len(a) == len(expected) + assert a.dtype == expected.dtype + assert_equal(a, expected) + + # file-obj path + fd, fname = mkstemp() + os.close(fd) + with open(fname, "w") as fh: + fh.write("\n".join(data) + "\n") + # loading the full file... + a = np.loadtxt(fname, dtype=unitless_dtype) + assert len(a) == len(expected) + assert a.dtype == expected.dtype + assert_equal(a, expected) + # loading half of the file... + a = np.loadtxt(fname, dtype=unitless_dtype, max_rows=int(nrows / 2)) + os.remove(fname) + assert len(a) == int(nrows / 2) + assert_equal(a, expected[:int(nrows / 2)]) + + +def test_str_dtype_unit_discovery_with_converter(): + data = ["spam-a-lot"] * 60000 + ["XXXtis_but_a_scratch"] + expected = np.array( + ["spam-a-lot"] * 60000 + ["tis_but_a_scratch"], dtype="U17" + ) + conv = lambda s: s.removeprefix("XXX") + + # file-like path + txt = StringIO("\n".join(data)) + a = np.loadtxt(txt, dtype="U", converters=conv) + assert a.dtype == expected.dtype + assert_equal(a, expected) + + # file-obj path + fd, fname = mkstemp() + os.close(fd) + with open(fname, "w") as fh: + fh.write("\n".join(data)) + a = np.loadtxt(fname, dtype="U", converters=conv) + os.remove(fname) + assert a.dtype == expected.dtype + assert_equal(a, expected) + + +@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), + reason="PyPy bug in error formatting") +def test_control_character_empty(): + with pytest.raises(TypeError, match="Text reading control character must"): + np.loadtxt(StringIO("1 2 3"), delimiter="") + with pytest.raises(TypeError, match="Text reading control character must"): + np.loadtxt(StringIO("1 2 3"), quotechar="") + with pytest.raises(ValueError, match="comments cannot be an empty string"): + np.loadtxt(StringIO("1 2 3"), comments="") + with pytest.raises(ValueError, match="comments cannot be an empty string"): + np.loadtxt(StringIO("1 2 3"), comments=["#", ""]) + + +def test_control_characters_as_bytes(): + """Byte control characters (comments, delimiter) are supported.""" + a = np.loadtxt(StringIO("#header\n1,2,3"), comments=b"#", delimiter=b",") + assert_equal(a, [1, 2, 3]) + + +@pytest.mark.filterwarnings('ignore::UserWarning') +def test_field_growing_cases(): + # Test empty field appending/growing (each field still takes 1 character) + # to see if the final field appending does not create issues. + res = np.loadtxt([""], delimiter=",", dtype=bytes) + assert len(res) == 0 + + for i in range(1, 1024): + res = np.loadtxt(["," * i], delimiter=",", dtype=bytes, max_rows=10) + assert len(res) == i + 1 + +@pytest.mark.parametrize("nmax", (10000, 50000, 55000, 60000)) +def test_maxrows_exceeding_chunksize(nmax): + # tries to read all of the file, + # or less, equal, greater than _loadtxt_chunksize + file_length = 60000 + + # file-like path + data = ["a 0.5 1"] * file_length + txt = StringIO("\n".join(data)) + res = np.loadtxt(txt, dtype=str, delimiter=" ", max_rows=nmax) + assert len(res) == nmax + + # file-obj path + fd, fname = mkstemp() + os.close(fd) + with open(fname, "w") as fh: + fh.write("\n".join(data)) + res = np.loadtxt(fname, dtype=str, delimiter=" ", max_rows=nmax) + os.remove(fname) + assert len(res) == nmax + +@pytest.mark.parametrize("nskip", (0, 10000, 12345, 50000, 67891, 100000)) +def test_skiprow_exceeding_maxrows_exceeding_chunksize(tmpdir, nskip): + # tries to read a file in chunks by skipping a variable amount of lines, + # less, equal, greater than max_rows + file_length = 110000 + data = "\n".join(f"{i} a 0.5 1" for i in range(1, file_length + 1)) + expected_length = min(60000, file_length - nskip) + expected = np.arange(nskip + 1, nskip + 1 + expected_length).astype(str) + + # file-like path + txt = StringIO(data) + res = np.loadtxt(txt, dtype='str', delimiter=" ", skiprows=nskip, max_rows=60000) + assert len(res) == expected_length + # are the right lines read in res? + assert_array_equal(expected, res[:, 0]) + + # file-obj path + tmp_file = tmpdir / "test_data.txt" + tmp_file.write(data) + fname = str(tmp_file) + res = np.loadtxt(fname, dtype='str', delimiter=" ", skiprows=nskip, max_rows=60000) + assert len(res) == expected_length + # are the right lines read in res? + assert_array_equal(expected, res[:, 0]) diff --git a/local_packages/numpy/lib/tests/test_mixins.py b/local_packages/numpy/lib/tests/test_mixins.py new file mode 100644 index 0000000..f0aec15 --- /dev/null +++ b/local_packages/numpy/lib/tests/test_mixins.py @@ -0,0 +1,215 @@ +import numbers +import operator + +import numpy as np +from numpy.testing import assert_, assert_equal, assert_raises + +# NOTE: This class should be kept as an exact copy of the example from the +# docstring for NDArrayOperatorsMixin. + +class ArrayLike(np.lib.mixins.NDArrayOperatorsMixin): + def __init__(self, value): + self.value = np.asarray(value) + + # One might also consider adding the built-in list type to this + # list, to support operations like np.add(array_like, list) + _HANDLED_TYPES = (np.ndarray, numbers.Number) + + def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): + out = kwargs.get('out', ()) + for x in inputs + out: + # Only support operations with instances of _HANDLED_TYPES. + # Use ArrayLike instead of type(self) for isinstance to + # allow subclasses that don't override __array_ufunc__ to + # handle ArrayLike objects. + if not isinstance(x, self._HANDLED_TYPES + (ArrayLike,)): + return NotImplemented + + # Defer to the implementation of the ufunc on unwrapped values. + inputs = tuple(x.value if isinstance(x, ArrayLike) else x + for x in inputs) + if out: + kwargs['out'] = tuple( + x.value if isinstance(x, ArrayLike) else x + for x in out) + result = getattr(ufunc, method)(*inputs, **kwargs) + + if type(result) is tuple: + # multiple return values + return tuple(type(self)(x) for x in result) + elif method == 'at': + # no return value + return None + else: + # one return value + return type(self)(result) + + def __repr__(self): + return f'{type(self).__name__}({self.value!r})' + + +def wrap_array_like(result): + if type(result) is tuple: + return tuple(ArrayLike(r) for r in result) + else: + return ArrayLike(result) + + +def _assert_equal_type_and_value(result, expected, err_msg=None): + assert_equal(type(result), type(expected), err_msg=err_msg) + if isinstance(result, tuple): + assert_equal(len(result), len(expected), err_msg=err_msg) + for result_item, expected_item in zip(result, expected): + _assert_equal_type_and_value(result_item, expected_item, err_msg) + else: + assert_equal(result.value, expected.value, err_msg=err_msg) + assert_equal(getattr(result.value, 'dtype', None), + getattr(expected.value, 'dtype', None), err_msg=err_msg) + + +_ALL_BINARY_OPERATORS = [ + operator.lt, + operator.le, + operator.eq, + operator.ne, + operator.gt, + operator.ge, + operator.add, + operator.sub, + operator.mul, + operator.truediv, + operator.floordiv, + operator.mod, + divmod, + pow, + operator.lshift, + operator.rshift, + operator.and_, + operator.xor, + operator.or_, +] + + +class TestNDArrayOperatorsMixin: + + def test_array_like_add(self): + + def check(result): + _assert_equal_type_and_value(result, ArrayLike(0)) + + check(ArrayLike(0) + 0) + check(0 + ArrayLike(0)) + + check(ArrayLike(0) + np.array(0)) + check(np.array(0) + ArrayLike(0)) + + check(ArrayLike(np.array(0)) + 0) + check(0 + ArrayLike(np.array(0))) + + check(ArrayLike(np.array(0)) + np.array(0)) + check(np.array(0) + ArrayLike(np.array(0))) + + def test_inplace(self): + array_like = ArrayLike(np.array([0])) + array_like += 1 + _assert_equal_type_and_value(array_like, ArrayLike(np.array([1]))) + + array = np.array([0]) + array += ArrayLike(1) + _assert_equal_type_and_value(array, ArrayLike(np.array([1]))) + + def test_opt_out(self): + + class OptOut: + """Object that opts out of __array_ufunc__.""" + __array_ufunc__ = None + + def __add__(self, other): + return self + + def __radd__(self, other): + return self + + array_like = ArrayLike(1) + opt_out = OptOut() + + # supported operations + assert_(array_like + opt_out is opt_out) + assert_(opt_out + array_like is opt_out) + + # not supported + with assert_raises(TypeError): + # don't use the Python default, array_like = array_like + opt_out + array_like += opt_out + with assert_raises(TypeError): + array_like - opt_out + with assert_raises(TypeError): + opt_out - array_like + + def test_subclass(self): + + class SubArrayLike(ArrayLike): + """Should take precedence over ArrayLike.""" + + x = ArrayLike(0) + y = SubArrayLike(1) + _assert_equal_type_and_value(x + y, y) + _assert_equal_type_and_value(y + x, y) + + def test_object(self): + x = ArrayLike(0) + obj = object() + with assert_raises(TypeError): + x + obj + with assert_raises(TypeError): + obj + x + with assert_raises(TypeError): + x += obj + + def test_unary_methods(self): + array = np.array([-1, 0, 1, 2]) + array_like = ArrayLike(array) + for op in [operator.neg, + operator.pos, + abs, + operator.invert]: + _assert_equal_type_and_value(op(array_like), ArrayLike(op(array))) + + def test_forward_binary_methods(self): + array = np.array([-1, 0, 1, 2]) + array_like = ArrayLike(array) + for op in _ALL_BINARY_OPERATORS: + expected = wrap_array_like(op(array, 1)) + actual = op(array_like, 1) + err_msg = f'failed for operator {op}' + _assert_equal_type_and_value(expected, actual, err_msg=err_msg) + + def test_reflected_binary_methods(self): + for op in _ALL_BINARY_OPERATORS: + expected = wrap_array_like(op(2, 1)) + actual = op(2, ArrayLike(1)) + err_msg = f'failed for operator {op}' + _assert_equal_type_and_value(expected, actual, err_msg=err_msg) + + def test_matmul(self): + array = np.array([1, 2], dtype=np.float64) + array_like = ArrayLike(array) + expected = ArrayLike(np.float64(5)) + _assert_equal_type_and_value(expected, np.matmul(array_like, array)) + _assert_equal_type_and_value( + expected, operator.matmul(array_like, array)) + _assert_equal_type_and_value( + expected, operator.matmul(array, array_like)) + + def test_ufunc_at(self): + array = ArrayLike(np.array([1, 2, 3, 4])) + assert_(np.negative.at(array, np.array([0, 1])) is None) + _assert_equal_type_and_value(array, ArrayLike([-1, -2, 3, 4])) + + def test_ufunc_two_outputs(self): + mantissa, exponent = np.frexp(2 ** -3) + expected = (ArrayLike(mantissa), ArrayLike(exponent)) + _assert_equal_type_and_value( + np.frexp(ArrayLike(2 ** -3)), expected) + _assert_equal_type_and_value( + np.frexp(ArrayLike(np.array(2 ** -3))), expected) diff --git a/local_packages/numpy/lib/tests/test_nanfunctions.py b/local_packages/numpy/lib/tests/test_nanfunctions.py new file mode 100644 index 0000000..6ef86bf --- /dev/null +++ b/local_packages/numpy/lib/tests/test_nanfunctions.py @@ -0,0 +1,1438 @@ +import inspect +import warnings +from functools import partial + +import pytest + +import numpy as np +from numpy._core.numeric import normalize_axis_tuple +from numpy.exceptions import AxisError, ComplexWarning +from numpy.lib._nanfunctions_impl import _nan_mask, _replace_nan +from numpy.testing import ( + assert_, + assert_almost_equal, + assert_array_equal, + assert_equal, + assert_raises, + assert_raises_regex, +) + +# Test data +_ndat = np.array([[0.6244, np.nan, 0.2692, 0.0116, np.nan, 0.1170], + [0.5351, -0.9403, np.nan, 0.2100, 0.4759, 0.2833], + [np.nan, np.nan, np.nan, 0.1042, np.nan, -0.5954], + [0.1610, np.nan, np.nan, 0.1859, 0.3146, np.nan]]) + + +# Rows of _ndat with nans removed +_rdat = [np.array([0.6244, 0.2692, 0.0116, 0.1170]), + np.array([0.5351, -0.9403, 0.2100, 0.4759, 0.2833]), + np.array([0.1042, -0.5954]), + np.array([0.1610, 0.1859, 0.3146])] + +# Rows of _ndat with nans converted to ones +_ndat_ones = np.array([[0.6244, 1.0, 0.2692, 0.0116, 1.0, 0.1170], + [0.5351, -0.9403, 1.0, 0.2100, 0.4759, 0.2833], + [1.0, 1.0, 1.0, 0.1042, 1.0, -0.5954], + [0.1610, 1.0, 1.0, 0.1859, 0.3146, 1.0]]) + +# Rows of _ndat with nans converted to zeros +_ndat_zeros = np.array([[0.6244, 0.0, 0.2692, 0.0116, 0.0, 0.1170], + [0.5351, -0.9403, 0.0, 0.2100, 0.4759, 0.2833], + [0.0, 0.0, 0.0, 0.1042, 0.0, -0.5954], + [0.1610, 0.0, 0.0, 0.1859, 0.3146, 0.0]]) + + +class TestSignatureMatch: + NANFUNCS = { + np.nanmin: np.amin, + np.nanmax: np.amax, + np.nanargmin: np.argmin, + np.nanargmax: np.argmax, + np.nansum: np.sum, + np.nanprod: np.prod, + np.nancumsum: np.cumsum, + np.nancumprod: np.cumprod, + np.nanmean: np.mean, + np.nanmedian: np.median, + np.nanpercentile: np.percentile, + np.nanquantile: np.quantile, + np.nanvar: np.var, + np.nanstd: np.std, + } + IDS = [k.__name__ for k in NANFUNCS] + + @staticmethod + def get_signature(func, default="..."): + """Construct a signature and replace all default parameter-values.""" + prm_list = [] + signature = inspect.signature(func) + for prm in signature.parameters.values(): + if prm.default is inspect.Parameter.empty: + prm_list.append(prm) + else: + prm_list.append(prm.replace(default=default)) + return inspect.Signature(prm_list) + + @pytest.mark.parametrize("nan_func,func", NANFUNCS.items(), ids=IDS) + def test_signature_match(self, nan_func, func): + # Ignore the default parameter-values as they can sometimes differ + # between the two functions (*e.g.* one has `False` while the other + # has `np._NoValue`) + signature = self.get_signature(func) + nan_signature = self.get_signature(nan_func) + np.testing.assert_equal(signature, nan_signature) + + def test_exhaustiveness(self): + """Validate that all nan functions are actually tested.""" + np.testing.assert_equal( + set(self.IDS), set(np.lib._nanfunctions_impl.__all__) + ) + + +class TestNanFunctions_MinMax: + + nanfuncs = [np.nanmin, np.nanmax] + stdfuncs = [np.min, np.max] + + def test_mutation(self): + # Check that passed array is not modified. + ndat = _ndat.copy() + for f in self.nanfuncs: + f(ndat) + assert_equal(ndat, _ndat) + + def test_keepdims(self): + mat = np.eye(3) + for nf, rf in zip(self.nanfuncs, self.stdfuncs): + for axis in [None, 0, 1]: + tgt = rf(mat, axis=axis, keepdims=True) + res = nf(mat, axis=axis, keepdims=True) + assert_(res.ndim == tgt.ndim) + + def test_out(self): + mat = np.eye(3) + for nf, rf in zip(self.nanfuncs, self.stdfuncs): + resout = np.zeros(3) + tgt = rf(mat, axis=1) + res = nf(mat, axis=1, out=resout) + assert_almost_equal(res, resout) + assert_almost_equal(res, tgt) + + def test_dtype_from_input(self): + codes = 'efdgFDG' + for nf, rf in zip(self.nanfuncs, self.stdfuncs): + for c in codes: + mat = np.eye(3, dtype=c) + tgt = rf(mat, axis=1).dtype.type + res = nf(mat, axis=1).dtype.type + assert_(res is tgt) + # scalar case + tgt = rf(mat, axis=None).dtype.type + res = nf(mat, axis=None).dtype.type + assert_(res is tgt) + + def test_result_values(self): + for nf, rf in zip(self.nanfuncs, self.stdfuncs): + tgt = [rf(d) for d in _rdat] + res = nf(_ndat, axis=1) + assert_almost_equal(res, tgt) + + @pytest.mark.parametrize("axis", [None, 0, 1]) + @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) + @pytest.mark.parametrize("array", [ + np.array(np.nan), + np.full((3, 3), np.nan), + ], ids=["0d", "2d"]) + def test_allnans(self, axis, dtype, array): + if axis is not None and array.ndim == 0: + pytest.skip("`axis != None` not supported for 0d arrays") + + array = array.astype(dtype) + match = "All-NaN slice encountered" + for func in self.nanfuncs: + with pytest.warns(RuntimeWarning, match=match): + out = func(array, axis=axis) + assert np.isnan(out).all() + assert out.dtype == array.dtype + + def test_masked(self): + mat = np.ma.fix_invalid(_ndat) + msk = mat._mask.copy() + for f in [np.nanmin]: + res = f(mat, axis=1) + tgt = f(_ndat, axis=1) + assert_equal(res, tgt) + assert_equal(mat._mask, msk) + assert_(not np.isinf(mat).any()) + + def test_scalar(self): + for f in self.nanfuncs: + assert_(f(0.) == 0.) + + def test_subclass(self): + class MyNDArray(np.ndarray): + pass + + # Check that it works and that type and + # shape are preserved + mine = np.eye(3).view(MyNDArray) + for f in self.nanfuncs: + res = f(mine, axis=0) + assert_(isinstance(res, MyNDArray)) + assert_(res.shape == (3,)) + res = f(mine, axis=1) + assert_(isinstance(res, MyNDArray)) + assert_(res.shape == (3,)) + res = f(mine) + assert_(res.shape == ()) + + # check that rows of nan are dealt with for subclasses (#4628) + mine[1] = np.nan + for f in self.nanfuncs: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + res = f(mine, axis=0) + assert_(isinstance(res, MyNDArray)) + assert_(not np.any(np.isnan(res))) + assert_(len(w) == 0) + + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + res = f(mine, axis=1) + assert_(isinstance(res, MyNDArray)) + assert_(np.isnan(res[1]) and not np.isnan(res[0]) + and not np.isnan(res[2])) + assert_(len(w) == 1, 'no warning raised') + assert_(issubclass(w[0].category, RuntimeWarning)) + + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + res = f(mine) + assert_(res.shape == ()) + assert_(res != np.nan) + assert_(len(w) == 0) + + def test_object_array(self): + arr = np.array([[1.0, 2.0], [np.nan, 4.0], [np.nan, np.nan]], dtype=object) + assert_equal(np.nanmin(arr), 1.0) + assert_equal(np.nanmin(arr, axis=0), [1.0, 2.0]) + + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + # assert_equal does not work on object arrays of nan + assert_equal(list(np.nanmin(arr, axis=1)), [1.0, 4.0, np.nan]) + assert_(len(w) == 1, 'no warning raised') + assert_(issubclass(w[0].category, RuntimeWarning)) + + @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) + def test_initial(self, dtype): + class MyNDArray(np.ndarray): + pass + + ar = np.arange(9).astype(dtype) + ar[:5] = np.nan + + for f in self.nanfuncs: + initial = 100 if f is np.nanmax else 0 + + ret1 = f(ar, initial=initial) + assert ret1.dtype == dtype + assert ret1 == initial + + ret2 = f(ar.view(MyNDArray), initial=initial) + assert ret2.dtype == dtype + assert ret2 == initial + + @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) + def test_where(self, dtype): + class MyNDArray(np.ndarray): + pass + + ar = np.arange(9).reshape(3, 3).astype(dtype) + ar[0, :] = np.nan + where = np.ones_like(ar, dtype=np.bool) + where[:, 0] = False + + for f in self.nanfuncs: + reference = 4 if f is np.nanmin else 8 + + ret1 = f(ar, where=where, initial=5) + assert ret1.dtype == dtype + assert ret1 == reference + + ret2 = f(ar.view(MyNDArray), where=where, initial=5) + assert ret2.dtype == dtype + assert ret2 == reference + + +class TestNanFunctions_ArgminArgmax: + + nanfuncs = [np.nanargmin, np.nanargmax] + + def test_mutation(self): + # Check that passed array is not modified. + ndat = _ndat.copy() + for f in self.nanfuncs: + f(ndat) + assert_equal(ndat, _ndat) + + def test_result_values(self): + for f, fcmp in zip(self.nanfuncs, [np.greater, np.less]): + for row in _ndat: + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', "invalid value encountered in", RuntimeWarning) + ind = f(row) + val = row[ind] + # comparing with NaN is tricky as the result + # is always false except for NaN != NaN + assert_(not np.isnan(val)) + assert_(not fcmp(val, row).any()) + assert_(not np.equal(val, row[:ind]).any()) + + @pytest.mark.parametrize("axis", [None, 0, 1]) + @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) + @pytest.mark.parametrize("array", [ + np.array(np.nan), + np.full((3, 3), np.nan), + ], ids=["0d", "2d"]) + def test_allnans(self, axis, dtype, array): + if axis is not None and array.ndim == 0: + pytest.skip("`axis != None` not supported for 0d arrays") + + array = array.astype(dtype) + for func in self.nanfuncs: + with pytest.raises(ValueError, match="All-NaN slice encountered"): + func(array, axis=axis) + + def test_empty(self): + mat = np.zeros((0, 3)) + for f in self.nanfuncs: + for axis in [0, None]: + assert_raises_regex( + ValueError, + "attempt to get argm.. of an empty sequence", + f, mat, axis=axis) + for axis in [1]: + res = f(mat, axis=axis) + assert_equal(res, np.zeros(0)) + + def test_scalar(self): + for f in self.nanfuncs: + assert_(f(0.) == 0.) + + def test_subclass(self): + class MyNDArray(np.ndarray): + pass + + # Check that it works and that type and + # shape are preserved + mine = np.eye(3).view(MyNDArray) + for f in self.nanfuncs: + res = f(mine, axis=0) + assert_(isinstance(res, MyNDArray)) + assert_(res.shape == (3,)) + res = f(mine, axis=1) + assert_(isinstance(res, MyNDArray)) + assert_(res.shape == (3,)) + res = f(mine) + assert_(res.shape == ()) + + @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) + def test_keepdims(self, dtype): + ar = np.arange(9).astype(dtype) + ar[:5] = np.nan + + for f in self.nanfuncs: + reference = 5 if f is np.nanargmin else 8 + ret = f(ar, keepdims=True) + assert ret.ndim == ar.ndim + assert ret == reference + + @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) + def test_out(self, dtype): + ar = np.arange(9).astype(dtype) + ar[:5] = np.nan + + for f in self.nanfuncs: + out = np.zeros((), dtype=np.intp) + reference = 5 if f is np.nanargmin else 8 + ret = f(ar, out=out) + assert ret is out + assert ret == reference + + +_TEST_ARRAYS = { + "0d": np.array(5), + "1d": np.array([127, 39, 93, 87, 46]) +} +for _v in _TEST_ARRAYS.values(): + _v.setflags(write=False) + + +@pytest.mark.parametrize( + "dtype", + np.typecodes["AllInteger"] + np.typecodes["AllFloat"] + "O", +) +@pytest.mark.parametrize("mat", _TEST_ARRAYS.values(), ids=_TEST_ARRAYS.keys()) +class TestNanFunctions_NumberTypes: + nanfuncs = { + np.nanmin: np.min, + np.nanmax: np.max, + np.nanargmin: np.argmin, + np.nanargmax: np.argmax, + np.nansum: np.sum, + np.nanprod: np.prod, + np.nancumsum: np.cumsum, + np.nancumprod: np.cumprod, + np.nanmean: np.mean, + np.nanmedian: np.median, + np.nanvar: np.var, + np.nanstd: np.std, + } + nanfunc_ids = [i.__name__ for i in nanfuncs] + + @pytest.mark.parametrize("nanfunc,func", nanfuncs.items(), ids=nanfunc_ids) + @np.errstate(over="ignore") + def test_nanfunc(self, mat, dtype, nanfunc, func): + mat = mat.astype(dtype) + tgt = func(mat) + out = nanfunc(mat) + + assert_almost_equal(out, tgt) + if dtype == "O": + assert type(out) is type(tgt) + else: + assert out.dtype == tgt.dtype + + @pytest.mark.parametrize( + "nanfunc,func", + [(np.nanquantile, np.quantile), (np.nanpercentile, np.percentile)], + ids=["nanquantile", "nanpercentile"], + ) + def test_nanfunc_q(self, mat, dtype, nanfunc, func): + mat = mat.astype(dtype) + if mat.dtype.kind == "c": + assert_raises(TypeError, func, mat, q=1) + assert_raises(TypeError, nanfunc, mat, q=1) + + else: + tgt = func(mat, q=1) + out = nanfunc(mat, q=1) + + assert_almost_equal(out, tgt) + + if dtype == "O": + assert type(out) is type(tgt) + else: + assert out.dtype == tgt.dtype + + @pytest.mark.parametrize( + "nanfunc,func", + [(np.nanvar, np.var), (np.nanstd, np.std)], + ids=["nanvar", "nanstd"], + ) + def test_nanfunc_ddof(self, mat, dtype, nanfunc, func): + mat = mat.astype(dtype) + tgt = func(mat, ddof=0.5) + out = nanfunc(mat, ddof=0.5) + + assert_almost_equal(out, tgt) + if dtype == "O": + assert type(out) is type(tgt) + else: + assert out.dtype == tgt.dtype + + @pytest.mark.parametrize( + "nanfunc", [np.nanvar, np.nanstd] + ) + def test_nanfunc_correction(self, mat, dtype, nanfunc): + mat = mat.astype(dtype) + assert_almost_equal( + nanfunc(mat, correction=0.5), nanfunc(mat, ddof=0.5) + ) + + err_msg = "ddof and correction can't be provided simultaneously." + with assert_raises_regex(ValueError, err_msg): + nanfunc(mat, ddof=0.5, correction=0.5) + + with assert_raises_regex(ValueError, err_msg): + nanfunc(mat, ddof=1, correction=0) + + +class SharedNanFunctionsTestsMixin: + def test_mutation(self): + # Check that passed array is not modified. + ndat = _ndat.copy() + for f in self.nanfuncs: + f(ndat) + assert_equal(ndat, _ndat) + + def test_keepdims(self): + mat = np.eye(3) + for nf, rf in zip(self.nanfuncs, self.stdfuncs): + for axis in [None, 0, 1]: + tgt = rf(mat, axis=axis, keepdims=True) + res = nf(mat, axis=axis, keepdims=True) + assert_(res.ndim == tgt.ndim) + + def test_out(self): + mat = np.eye(3) + for nf, rf in zip(self.nanfuncs, self.stdfuncs): + resout = np.zeros(3) + tgt = rf(mat, axis=1) + res = nf(mat, axis=1, out=resout) + assert_almost_equal(res, resout) + assert_almost_equal(res, tgt) + + def test_dtype_from_dtype(self): + mat = np.eye(3) + codes = 'efdgFDG' + for nf, rf in zip(self.nanfuncs, self.stdfuncs): + for c in codes: + with warnings.catch_warnings(): + if nf in {np.nanstd, np.nanvar} and c in 'FDG': + # Giving the warning is a small bug, see gh-8000 + warnings.simplefilter('ignore', ComplexWarning) + tgt = rf(mat, dtype=np.dtype(c), axis=1).dtype.type + res = nf(mat, dtype=np.dtype(c), axis=1).dtype.type + assert_(res is tgt) + # scalar case + tgt = rf(mat, dtype=np.dtype(c), axis=None).dtype.type + res = nf(mat, dtype=np.dtype(c), axis=None).dtype.type + assert_(res is tgt) + + def test_dtype_from_char(self): + mat = np.eye(3) + codes = 'efdgFDG' + for nf, rf in zip(self.nanfuncs, self.stdfuncs): + for c in codes: + with warnings.catch_warnings(): + if nf in {np.nanstd, np.nanvar} and c in 'FDG': + # Giving the warning is a small bug, see gh-8000 + warnings.simplefilter('ignore', ComplexWarning) + tgt = rf(mat, dtype=c, axis=1).dtype.type + res = nf(mat, dtype=c, axis=1).dtype.type + assert_(res is tgt) + # scalar case + tgt = rf(mat, dtype=c, axis=None).dtype.type + res = nf(mat, dtype=c, axis=None).dtype.type + assert_(res is tgt) + + def test_dtype_from_input(self): + codes = 'efdgFDG' + for nf, rf in zip(self.nanfuncs, self.stdfuncs): + for c in codes: + mat = np.eye(3, dtype=c) + tgt = rf(mat, axis=1).dtype.type + res = nf(mat, axis=1).dtype.type + assert_(res is tgt, f"res {res}, tgt {tgt}") + # scalar case + tgt = rf(mat, axis=None).dtype.type + res = nf(mat, axis=None).dtype.type + assert_(res is tgt) + + def test_result_values(self): + for nf, rf in zip(self.nanfuncs, self.stdfuncs): + tgt = [rf(d) for d in _rdat] + res = nf(_ndat, axis=1) + assert_almost_equal(res, tgt) + + def test_scalar(self): + for f in self.nanfuncs: + assert_(f(0.) == 0.) + + def test_subclass(self): + class MyNDArray(np.ndarray): + pass + + # Check that it works and that type and + # shape are preserved + array = np.eye(3) + mine = array.view(MyNDArray) + for f in self.nanfuncs: + expected_shape = f(array, axis=0).shape + res = f(mine, axis=0) + assert_(isinstance(res, MyNDArray)) + assert_(res.shape == expected_shape) + expected_shape = f(array, axis=1).shape + res = f(mine, axis=1) + assert_(isinstance(res, MyNDArray)) + assert_(res.shape == expected_shape) + expected_shape = f(array).shape + res = f(mine) + assert_(isinstance(res, MyNDArray)) + assert_(res.shape == expected_shape) + + +class TestNanFunctions_SumProd(SharedNanFunctionsTestsMixin): + + nanfuncs = [np.nansum, np.nanprod] + stdfuncs = [np.sum, np.prod] + + @pytest.mark.parametrize("axis", [None, 0, 1]) + @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) + @pytest.mark.parametrize("array", [ + np.array(np.nan), + np.full((3, 3), np.nan), + ], ids=["0d", "2d"]) + def test_allnans(self, axis, dtype, array): + if axis is not None and array.ndim == 0: + pytest.skip("`axis != None` not supported for 0d arrays") + + array = array.astype(dtype) + for func, identity in zip(self.nanfuncs, [0, 1]): + out = func(array, axis=axis) + assert np.all(out == identity) + assert out.dtype == array.dtype + + def test_empty(self): + for f, tgt_value in zip([np.nansum, np.nanprod], [0, 1]): + mat = np.zeros((0, 3)) + tgt = [tgt_value] * 3 + res = f(mat, axis=0) + assert_equal(res, tgt) + tgt = [] + res = f(mat, axis=1) + assert_equal(res, tgt) + tgt = tgt_value + res = f(mat, axis=None) + assert_equal(res, tgt) + + @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) + def test_initial(self, dtype): + ar = np.arange(9).astype(dtype) + ar[:5] = np.nan + + for f in self.nanfuncs: + reference = 28 if f is np.nansum else 3360 + ret = f(ar, initial=2) + assert ret.dtype == dtype + assert ret == reference + + @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) + def test_where(self, dtype): + ar = np.arange(9).reshape(3, 3).astype(dtype) + ar[0, :] = np.nan + where = np.ones_like(ar, dtype=np.bool) + where[:, 0] = False + + for f in self.nanfuncs: + reference = 26 if f is np.nansum else 2240 + ret = f(ar, where=where, initial=2) + assert ret.dtype == dtype + assert ret == reference + + +class TestNanFunctions_CumSumProd(SharedNanFunctionsTestsMixin): + + nanfuncs = [np.nancumsum, np.nancumprod] + stdfuncs = [np.cumsum, np.cumprod] + + @pytest.mark.parametrize("axis", [None, 0, 1]) + @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) + @pytest.mark.parametrize("array", [ + np.array(np.nan), + np.full((3, 3), np.nan) + ], ids=["0d", "2d"]) + def test_allnans(self, axis, dtype, array): + if axis is not None and array.ndim == 0: + pytest.skip("`axis != None` not supported for 0d arrays") + + array = array.astype(dtype) + for func, identity in zip(self.nanfuncs, [0, 1]): + out = func(array) + assert np.all(out == identity) + assert out.dtype == array.dtype + + def test_empty(self): + for f, tgt_value in zip(self.nanfuncs, [0, 1]): + mat = np.zeros((0, 3)) + tgt = tgt_value * np.ones((0, 3)) + res = f(mat, axis=0) + assert_equal(res, tgt) + tgt = mat + res = f(mat, axis=1) + assert_equal(res, tgt) + tgt = np.zeros(0) + res = f(mat, axis=None) + assert_equal(res, tgt) + + def test_keepdims(self): + for f, g in zip(self.nanfuncs, self.stdfuncs): + mat = np.eye(3) + for axis in [None, 0, 1]: + tgt = f(mat, axis=axis, out=None) + res = g(mat, axis=axis, out=None) + assert_(res.ndim == tgt.ndim) + + for f in self.nanfuncs: + d = np.ones((3, 5, 7, 11)) + # Randomly set some elements to NaN: + rs = np.random.RandomState(0) + d[rs.rand(*d.shape) < 0.5] = np.nan + res = f(d, axis=None) + assert_equal(res.shape, (1155,)) + for axis in np.arange(4): + res = f(d, axis=axis) + assert_equal(res.shape, (3, 5, 7, 11)) + + def test_result_values(self): + for axis in (-2, -1, 0, 1, None): + tgt = np.cumprod(_ndat_ones, axis=axis) + res = np.nancumprod(_ndat, axis=axis) + assert_almost_equal(res, tgt) + tgt = np.cumsum(_ndat_zeros, axis=axis) + res = np.nancumsum(_ndat, axis=axis) + assert_almost_equal(res, tgt) + + def test_out(self): + mat = np.eye(3) + for nf, rf in zip(self.nanfuncs, self.stdfuncs): + resout = np.eye(3) + for axis in (-2, -1, 0, 1): + tgt = rf(mat, axis=axis) + res = nf(mat, axis=axis, out=resout) + assert_almost_equal(res, resout) + assert_almost_equal(res, tgt) + + +class TestNanFunctions_MeanVarStd(SharedNanFunctionsTestsMixin): + + nanfuncs = [np.nanmean, np.nanvar, np.nanstd] + stdfuncs = [np.mean, np.var, np.std] + + def test_dtype_error(self): + for f in self.nanfuncs: + for dtype in [np.bool, np.int_, np.object_]: + assert_raises(TypeError, f, _ndat, axis=1, dtype=dtype) + + def test_out_dtype_error(self): + for f in self.nanfuncs: + for dtype in [np.bool, np.int_, np.object_]: + out = np.empty(_ndat.shape[0], dtype=dtype) + assert_raises(TypeError, f, _ndat, axis=1, out=out) + + def test_ddof(self): + nanfuncs = [np.nanvar, np.nanstd] + stdfuncs = [np.var, np.std] + for nf, rf in zip(nanfuncs, stdfuncs): + for ddof in [0, 1]: + tgt = [rf(d, ddof=ddof) for d in _rdat] + res = nf(_ndat, axis=1, ddof=ddof) + assert_almost_equal(res, tgt) + + def test_ddof_too_big(self): + nanfuncs = [np.nanvar, np.nanstd] + stdfuncs = [np.var, np.std] + dsize = [len(d) for d in _rdat] + for nf, rf in zip(nanfuncs, stdfuncs): + for ddof in range(5): + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + warnings.simplefilter('ignore', ComplexWarning) + tgt = [ddof >= d for d in dsize] + res = nf(_ndat, axis=1, ddof=ddof) + assert_equal(np.isnan(res), tgt) + if any(tgt): + assert_(len(w) == 1) + else: + assert_(len(w) == 0) + + @pytest.mark.parametrize("axis", [None, 0, 1]) + @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) + @pytest.mark.parametrize("array", [ + np.array(np.nan), + np.full((3, 3), np.nan), + ], ids=["0d", "2d"]) + def test_allnans(self, axis, dtype, array): + if axis is not None and array.ndim == 0: + pytest.skip("`axis != None` not supported for 0d arrays") + + array = array.astype(dtype) + match = "(Degrees of freedom <= 0 for slice.)|(Mean of empty slice)" + for func in self.nanfuncs: + with pytest.warns(RuntimeWarning, match=match): + out = func(array, axis=axis) + assert np.isnan(out).all() + + # `nanvar` and `nanstd` convert complex inputs to their + # corresponding floating dtype + if func is np.nanmean: + assert out.dtype == array.dtype + else: + assert out.dtype == np.abs(array).dtype + + def test_empty(self): + mat = np.zeros((0, 3)) + for f in self.nanfuncs: + for axis in [0, None]: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + assert_(np.isnan(f(mat, axis=axis)).all()) + assert_(len(w) == 1) + assert_(issubclass(w[0].category, RuntimeWarning)) + for axis in [1]: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + assert_equal(f(mat, axis=axis), np.zeros([])) + assert_(len(w) == 0) + + @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) + def test_where(self, dtype): + ar = np.arange(9).reshape(3, 3).astype(dtype) + ar[0, :] = np.nan + where = np.ones_like(ar, dtype=np.bool) + where[:, 0] = False + + for f, f_std in zip(self.nanfuncs, self.stdfuncs): + reference = f_std(ar[where][2:]) + dtype_reference = dtype if f is np.nanmean else ar.real.dtype + + ret = f(ar, where=where) + assert ret.dtype == dtype_reference + np.testing.assert_allclose(ret, reference) + + def test_nanstd_with_mean_keyword(self): + # Setting the seed to make the test reproducible + rng = np.random.RandomState(1234) + A = rng.randn(10, 20, 5) + 0.5 + A[:, 5, :] = np.nan + + mean_out = np.zeros((10, 1, 5)) + std_out = np.zeros((10, 1, 5)) + + mean = np.nanmean(A, + out=mean_out, + axis=1, + keepdims=True) + + # The returned object should be the object specified during calling + assert mean_out is mean + + std = np.nanstd(A, + out=std_out, + axis=1, + keepdims=True, + mean=mean) + + # The returned object should be the object specified during calling + assert std_out is std + + # Shape of returned mean and std should be same + assert std.shape == mean.shape + assert std.shape == (10, 1, 5) + + # Output should be the same as from the individual algorithms + std_old = np.nanstd(A, axis=1, keepdims=True) + + assert std_old.shape == mean.shape + assert_almost_equal(std, std_old) + + +_TIME_UNITS = ( + "Y", "M", "W", "D", "h", "m", "s", "ms", "us", "ns", "ps", "fs", "as" +) + +# All `inexact` + `timdelta64` type codes +_TYPE_CODES = list(np.typecodes["AllFloat"]) +_TYPE_CODES += [f"m8[{unit}]" for unit in _TIME_UNITS] + + +class TestNanFunctions_Median: + + def test_mutation(self): + # Check that passed array is not modified. + ndat = _ndat.copy() + np.nanmedian(ndat) + assert_equal(ndat, _ndat) + + def test_keepdims(self): + mat = np.eye(3) + for axis in [None, 0, 1]: + tgt = np.median(mat, axis=axis, out=None, overwrite_input=False) + res = np.nanmedian(mat, axis=axis, out=None, overwrite_input=False) + assert_(res.ndim == tgt.ndim) + + d = np.ones((3, 5, 7, 11)) + # Randomly set some elements to NaN: + w = np.random.random((4, 200)) * np.array(d.shape)[:, None] + w = w.astype(np.intp) + d[tuple(w)] = np.nan + with warnings.catch_warnings(): + warnings.simplefilter('ignore', RuntimeWarning) + res = np.nanmedian(d, axis=None, keepdims=True) + assert_equal(res.shape, (1, 1, 1, 1)) + res = np.nanmedian(d, axis=(0, 1), keepdims=True) + assert_equal(res.shape, (1, 1, 7, 11)) + res = np.nanmedian(d, axis=(0, 3), keepdims=True) + assert_equal(res.shape, (1, 5, 7, 1)) + res = np.nanmedian(d, axis=(1,), keepdims=True) + assert_equal(res.shape, (3, 1, 7, 11)) + res = np.nanmedian(d, axis=(0, 1, 2, 3), keepdims=True) + assert_equal(res.shape, (1, 1, 1, 1)) + res = np.nanmedian(d, axis=(0, 1, 3), keepdims=True) + assert_equal(res.shape, (1, 1, 7, 1)) + + @pytest.mark.parametrize( + argnames='axis', + argvalues=[ + None, + 1, + (1, ), + (0, 1), + (-3, -1), + ] + ) + @pytest.mark.filterwarnings("ignore:All-NaN slice:RuntimeWarning") + def test_keepdims_out(self, axis): + d = np.ones((3, 5, 7, 11)) + # Randomly set some elements to NaN: + w = np.random.random((4, 200)) * np.array(d.shape)[:, None] + w = w.astype(np.intp) + d[tuple(w)] = np.nan + if axis is None: + shape_out = (1,) * d.ndim + else: + axis_norm = normalize_axis_tuple(axis, d.ndim) + shape_out = tuple( + 1 if i in axis_norm else d.shape[i] for i in range(d.ndim)) + out = np.empty(shape_out) + result = np.nanmedian(d, axis=axis, keepdims=True, out=out) + assert result is out + assert_equal(result.shape, shape_out) + + def test_out(self): + mat = np.random.rand(3, 3) + nan_mat = np.insert(mat, [0, 2], np.nan, axis=1) + resout = np.zeros(3) + tgt = np.median(mat, axis=1) + res = np.nanmedian(nan_mat, axis=1, out=resout) + assert_almost_equal(res, resout) + assert_almost_equal(res, tgt) + # 0-d output: + resout = np.zeros(()) + tgt = np.median(mat, axis=None) + res = np.nanmedian(nan_mat, axis=None, out=resout) + assert_almost_equal(res, resout) + assert_almost_equal(res, tgt) + res = np.nanmedian(nan_mat, axis=(0, 1), out=resout) + assert_almost_equal(res, resout) + assert_almost_equal(res, tgt) + + def test_small_large(self): + # test the small and large code paths, current cutoff 400 elements + for s in [5, 20, 51, 200, 1000]: + d = np.random.randn(4, s) + # Randomly set some elements to NaN: + w = np.random.randint(0, d.size, size=d.size // 5) + d.ravel()[w] = np.nan + d[:, 0] = 1. # ensure at least one good value + # use normal median without nans to compare + tgt = [] + for x in d: + nonan = np.compress(~np.isnan(x), x) + tgt.append(np.median(nonan, overwrite_input=True)) + + assert_array_equal(np.nanmedian(d, axis=-1), tgt) + + def test_result_values(self): + tgt = [np.median(d) for d in _rdat] + res = np.nanmedian(_ndat, axis=1) + assert_almost_equal(res, tgt) + + @pytest.mark.parametrize("axis", [None, 0, 1]) + @pytest.mark.parametrize("dtype", _TYPE_CODES) + def test_allnans(self, dtype, axis): + mat = np.full((3, 3), np.nan).astype(dtype) + with pytest.warns(RuntimeWarning) as r: + output = np.nanmedian(mat, axis=axis) + assert output.dtype == mat.dtype + assert np.isnan(output).all() + + if axis is None: + assert_(len(r) == 1) + else: + assert_(len(r) == 3) + + # Check scalar + scalar = np.array(np.nan).astype(dtype)[()] + output_scalar = np.nanmedian(scalar) + assert output_scalar.dtype == scalar.dtype + assert np.isnan(output_scalar) + + if axis is None: + assert_(len(r) == 2) + else: + assert_(len(r) == 4) + + def test_empty(self): + mat = np.zeros((0, 3)) + for axis in [0, None]: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + assert_(np.isnan(np.nanmedian(mat, axis=axis)).all()) + assert_(len(w) == 1) + assert_(issubclass(w[0].category, RuntimeWarning)) + for axis in [1]: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + assert_equal(np.nanmedian(mat, axis=axis), np.zeros([])) + assert_(len(w) == 0) + + def test_scalar(self): + assert_(np.nanmedian(0.) == 0.) + + def test_extended_axis_invalid(self): + d = np.ones((3, 5, 7, 11)) + assert_raises(AxisError, np.nanmedian, d, axis=-5) + assert_raises(AxisError, np.nanmedian, d, axis=(0, -5)) + assert_raises(AxisError, np.nanmedian, d, axis=4) + assert_raises(AxisError, np.nanmedian, d, axis=(0, 4)) + assert_raises(ValueError, np.nanmedian, d, axis=(1, 1)) + + def test_float_special(self): + with warnings.catch_warnings(): + warnings.simplefilter('ignore', RuntimeWarning) + for inf in [np.inf, -np.inf]: + a = np.array([[inf, np.nan], [np.nan, np.nan]]) + assert_equal(np.nanmedian(a, axis=0), [inf, np.nan]) + assert_equal(np.nanmedian(a, axis=1), [inf, np.nan]) + assert_equal(np.nanmedian(a), inf) + + # minimum fill value check + a = np.array([[np.nan, np.nan, inf], + [np.nan, np.nan, inf]]) + assert_equal(np.nanmedian(a), inf) + assert_equal(np.nanmedian(a, axis=0), [np.nan, np.nan, inf]) + assert_equal(np.nanmedian(a, axis=1), inf) + + # no mask path + a = np.array([[inf, inf], [inf, inf]]) + assert_equal(np.nanmedian(a, axis=1), inf) + + a = np.array([[inf, 7, -inf, -9], + [-10, np.nan, np.nan, 5], + [4, np.nan, np.nan, inf]], + dtype=np.float32) + if inf > 0: + assert_equal(np.nanmedian(a, axis=0), [4., 7., -inf, 5.]) + assert_equal(np.nanmedian(a), 4.5) + else: + assert_equal(np.nanmedian(a, axis=0), [-10., 7., -inf, -9.]) + assert_equal(np.nanmedian(a), -2.5) + assert_equal(np.nanmedian(a, axis=-1), [-1., -2.5, inf]) + + for i in range(10): + for j in range(1, 10): + a = np.array([([np.nan] * i) + ([inf] * j)] * 2) + assert_equal(np.nanmedian(a), inf) + assert_equal(np.nanmedian(a, axis=1), inf) + assert_equal(np.nanmedian(a, axis=0), + ([np.nan] * i) + [inf] * j) + + a = np.array([([np.nan] * i) + ([-inf] * j)] * 2) + assert_equal(np.nanmedian(a), -inf) + assert_equal(np.nanmedian(a, axis=1), -inf) + assert_equal(np.nanmedian(a, axis=0), + ([np.nan] * i) + [-inf] * j) + + +class TestNanFunctions_Percentile: + + def test_mutation(self): + # Check that passed array is not modified. + ndat = _ndat.copy() + np.nanpercentile(ndat, 30) + assert_equal(ndat, _ndat) + + def test_keepdims(self): + mat = np.eye(3) + for axis in [None, 0, 1]: + tgt = np.percentile(mat, 70, axis=axis, out=None, + overwrite_input=False) + res = np.nanpercentile(mat, 70, axis=axis, out=None, + overwrite_input=False) + assert_(res.ndim == tgt.ndim) + + d = np.ones((3, 5, 7, 11)) + # Randomly set some elements to NaN: + w = np.random.random((4, 200)) * np.array(d.shape)[:, None] + w = w.astype(np.intp) + d[tuple(w)] = np.nan + with warnings.catch_warnings(): + warnings.simplefilter('ignore', RuntimeWarning) + res = np.nanpercentile(d, 90, axis=None, keepdims=True) + assert_equal(res.shape, (1, 1, 1, 1)) + res = np.nanpercentile(d, 90, axis=(0, 1), keepdims=True) + assert_equal(res.shape, (1, 1, 7, 11)) + res = np.nanpercentile(d, 90, axis=(0, 3), keepdims=True) + assert_equal(res.shape, (1, 5, 7, 1)) + res = np.nanpercentile(d, 90, axis=(1,), keepdims=True) + assert_equal(res.shape, (3, 1, 7, 11)) + res = np.nanpercentile(d, 90, axis=(0, 1, 2, 3), keepdims=True) + assert_equal(res.shape, (1, 1, 1, 1)) + res = np.nanpercentile(d, 90, axis=(0, 1, 3), keepdims=True) + assert_equal(res.shape, (1, 1, 7, 1)) + + @pytest.mark.parametrize('q', [7, [1, 7]]) + @pytest.mark.parametrize( + argnames='axis', + argvalues=[ + None, + 1, + (1,), + (0, 1), + (-3, -1), + ] + ) + @pytest.mark.filterwarnings("ignore:All-NaN slice:RuntimeWarning") + def test_keepdims_out(self, q, axis): + d = np.ones((3, 5, 7, 11)) + # Randomly set some elements to NaN: + w = np.random.random((4, 200)) * np.array(d.shape)[:, None] + w = w.astype(np.intp) + d[tuple(w)] = np.nan + if axis is None: + shape_out = (1,) * d.ndim + else: + axis_norm = normalize_axis_tuple(axis, d.ndim) + shape_out = tuple( + 1 if i in axis_norm else d.shape[i] for i in range(d.ndim)) + shape_out = np.shape(q) + shape_out + + out = np.empty(shape_out) + result = np.nanpercentile(d, q, axis=axis, keepdims=True, out=out) + assert result is out + assert_equal(result.shape, shape_out) + + @pytest.mark.parametrize("weighted", [False, True]) + def test_out(self, weighted): + mat = np.random.rand(3, 3) + nan_mat = np.insert(mat, [0, 2], np.nan, axis=1) + resout = np.zeros(3) + if weighted: + w_args = {"weights": np.ones_like(mat), "method": "inverted_cdf"} + nan_w_args = { + "weights": np.ones_like(nan_mat), "method": "inverted_cdf" + } + else: + w_args = {} + nan_w_args = {} + tgt = np.percentile(mat, 42, axis=1, **w_args) + res = np.nanpercentile(nan_mat, 42, axis=1, out=resout, **nan_w_args) + assert_almost_equal(res, resout) + assert_almost_equal(res, tgt) + # 0-d output: + resout = np.zeros(()) + tgt = np.percentile(mat, 42, axis=None, **w_args) + res = np.nanpercentile( + nan_mat, 42, axis=None, out=resout, **nan_w_args + ) + assert_almost_equal(res, resout) + assert_almost_equal(res, tgt) + res = np.nanpercentile( + nan_mat, 42, axis=(0, 1), out=resout, **nan_w_args + ) + assert_almost_equal(res, resout) + assert_almost_equal(res, tgt) + + def test_complex(self): + arr_c = np.array([0.5 + 3.0j, 2.1 + 0.5j, 1.6 + 2.3j], dtype='G') + assert_raises(TypeError, np.nanpercentile, arr_c, 0.5) + arr_c = np.array([0.5 + 3.0j, 2.1 + 0.5j, 1.6 + 2.3j], dtype='D') + assert_raises(TypeError, np.nanpercentile, arr_c, 0.5) + arr_c = np.array([0.5 + 3.0j, 2.1 + 0.5j, 1.6 + 2.3j], dtype='F') + assert_raises(TypeError, np.nanpercentile, arr_c, 0.5) + + @pytest.mark.parametrize("weighted", [False, True]) + @pytest.mark.parametrize("use_out", [False, True]) + def test_result_values(self, weighted, use_out): + if weighted: + percentile = partial(np.percentile, method="inverted_cdf") + nanpercentile = partial(np.nanpercentile, method="inverted_cdf") + + def gen_weights(d): + return np.ones_like(d) + + else: + percentile = np.percentile + nanpercentile = np.nanpercentile + + def gen_weights(d): + return None + + tgt = [percentile(d, 28, weights=gen_weights(d)) for d in _rdat] + out = np.empty_like(tgt) if use_out else None + res = nanpercentile(_ndat, 28, axis=1, + weights=gen_weights(_ndat), out=out) + assert_almost_equal(res, tgt) + # Transpose the array to fit the output convention of numpy.percentile + tgt = np.transpose([percentile(d, (28, 98), weights=gen_weights(d)) + for d in _rdat]) + out = np.empty_like(tgt) if use_out else None + res = nanpercentile(_ndat, (28, 98), axis=1, + weights=gen_weights(_ndat), out=out) + assert_almost_equal(res, tgt) + + @pytest.mark.parametrize("axis", [None, 0, 1]) + @pytest.mark.parametrize("dtype", np.typecodes["Float"]) + @pytest.mark.parametrize("array", [ + np.array(np.nan), + np.full((3, 3), np.nan), + ], ids=["0d", "2d"]) + def test_allnans(self, axis, dtype, array): + if axis is not None and array.ndim == 0: + pytest.skip("`axis != None` not supported for 0d arrays") + + array = array.astype(dtype) + with pytest.warns(RuntimeWarning, match="All-NaN slice encountered"): + out = np.nanpercentile(array, 60, axis=axis) + assert np.isnan(out).all() + assert out.dtype == array.dtype + + def test_empty(self): + mat = np.zeros((0, 3)) + for axis in [0, None]: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + assert_(np.isnan(np.nanpercentile(mat, 40, axis=axis)).all()) + assert_(len(w) == 1) + assert_(issubclass(w[0].category, RuntimeWarning)) + for axis in [1]: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + assert_equal(np.nanpercentile(mat, 40, axis=axis), np.zeros([])) + assert_(len(w) == 0) + + def test_scalar(self): + assert_equal(np.nanpercentile(0., 100), 0.) + a = np.arange(6) + r = np.nanpercentile(a, 50, axis=0) + assert_equal(r, 2.5) + assert_(np.isscalar(r)) + + def test_extended_axis_invalid(self): + d = np.ones((3, 5, 7, 11)) + assert_raises(AxisError, np.nanpercentile, d, q=5, axis=-5) + assert_raises(AxisError, np.nanpercentile, d, q=5, axis=(0, -5)) + assert_raises(AxisError, np.nanpercentile, d, q=5, axis=4) + assert_raises(AxisError, np.nanpercentile, d, q=5, axis=(0, 4)) + assert_raises(ValueError, np.nanpercentile, d, q=5, axis=(1, 1)) + + def test_multiple_percentiles(self): + perc = [50, 100] + mat = np.ones((4, 3)) + nan_mat = np.nan * mat + # For checking consistency in higher dimensional case + large_mat = np.ones((3, 4, 5)) + large_mat[:, 0:2:4, :] = 0 + large_mat[:, :, 3:] *= 2 + for axis in [None, 0, 1]: + for keepdim in [False, True]: + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', "All-NaN slice encountered", RuntimeWarning) + val = np.percentile(mat, perc, axis=axis, keepdims=keepdim) + nan_val = np.nanpercentile(nan_mat, perc, axis=axis, + keepdims=keepdim) + assert_equal(nan_val.shape, val.shape) + + val = np.percentile(large_mat, perc, axis=axis, + keepdims=keepdim) + nan_val = np.nanpercentile(large_mat, perc, axis=axis, + keepdims=keepdim) + assert_equal(nan_val, val) + + megamat = np.ones((3, 4, 5, 6)) + assert_equal( + np.nanpercentile(megamat, perc, axis=(1, 2)).shape, (2, 3, 6) + ) + + @pytest.mark.parametrize("nan_weight", [0, 1, 2, 3, 1e200]) + def test_nan_value_with_weight(self, nan_weight): + x = [1, np.nan, 2, 3] + result = np.float64(2.0) + q_unweighted = np.nanpercentile(x, 50, method="inverted_cdf") + assert_equal(q_unweighted, result) + + # The weight value at the nan position should not matter. + w = [1.0, nan_weight, 1.0, 1.0] + q_weighted = np.nanpercentile(x, 50, weights=w, method="inverted_cdf") + assert_equal(q_weighted, result) + + @pytest.mark.parametrize("axis", [0, 1, 2]) + def test_nan_value_with_weight_ndim(self, axis): + # Create a multi-dimensional array to test + np.random.seed(1) + x_no_nan = np.random.random(size=(100, 99, 2)) + # Set some places to NaN (not particularly smart) so there is always + # some non-Nan. + x = x_no_nan.copy() + x[np.arange(99), np.arange(99), 0] = np.nan + + p = np.array([[20., 50., 30], [70, 33, 80]]) + + # We just use ones as weights, but replace it with 0 or 1e200 at the + # NaN positions below. + weights = np.ones_like(x) + + # For comparison use weighted normal percentile with nan weights at + # 0 (and no NaNs); not sure this is strictly identical but should be + # sufficiently so (if a percentile lies exactly on a 0 value). + weights[np.isnan(x)] = 0 + p_expected = np.percentile( + x_no_nan, p, axis=axis, weights=weights, method="inverted_cdf") + + p_unweighted = np.nanpercentile( + x, p, axis=axis, method="inverted_cdf") + # The normal and unweighted versions should be identical: + assert_equal(p_unweighted, p_expected) + + weights[np.isnan(x)] = 1e200 # huge value, shouldn't matter + p_weighted = np.nanpercentile( + x, p, axis=axis, weights=weights, method="inverted_cdf") + assert_equal(p_weighted, p_expected) + # Also check with out passed: + out = np.empty_like(p_weighted) + res = np.nanpercentile( + x, p, axis=axis, weights=weights, out=out, method="inverted_cdf") + + assert res is out + assert_equal(out, p_expected) + + +class TestNanFunctions_Quantile: + # most of this is already tested by TestPercentile + + @pytest.mark.parametrize("weighted", [False, True]) + def test_regression(self, weighted): + ar = np.arange(24).reshape(2, 3, 4).astype(float) + ar[0][1] = np.nan + if weighted: + w_args = {"weights": np.ones_like(ar), "method": "inverted_cdf"} + else: + w_args = {} + + assert_equal(np.nanquantile(ar, q=0.5, **w_args), + np.nanpercentile(ar, q=50, **w_args)) + assert_equal(np.nanquantile(ar, q=0.5, axis=0, **w_args), + np.nanpercentile(ar, q=50, axis=0, **w_args)) + assert_equal(np.nanquantile(ar, q=0.5, axis=1, **w_args), + np.nanpercentile(ar, q=50, axis=1, **w_args)) + assert_equal(np.nanquantile(ar, q=[0.5], axis=1, **w_args), + np.nanpercentile(ar, q=[50], axis=1, **w_args)) + assert_equal(np.nanquantile(ar, q=[0.25, 0.5, 0.75], axis=1, **w_args), + np.nanpercentile(ar, q=[25, 50, 75], axis=1, **w_args)) + + def test_basic(self): + x = np.arange(8) * 0.5 + assert_equal(np.nanquantile(x, 0), 0.) + assert_equal(np.nanquantile(x, 1), 3.5) + assert_equal(np.nanquantile(x, 0.5), 1.75) + + def test_complex(self): + arr_c = np.array([0.5 + 3.0j, 2.1 + 0.5j, 1.6 + 2.3j], dtype='G') + assert_raises(TypeError, np.nanquantile, arr_c, 0.5) + arr_c = np.array([0.5 + 3.0j, 2.1 + 0.5j, 1.6 + 2.3j], dtype='D') + assert_raises(TypeError, np.nanquantile, arr_c, 0.5) + arr_c = np.array([0.5 + 3.0j, 2.1 + 0.5j, 1.6 + 2.3j], dtype='F') + assert_raises(TypeError, np.nanquantile, arr_c, 0.5) + + def test_no_p_overwrite(self): + # this is worth retesting, because quantile does not make a copy + p0 = np.array([0, 0.75, 0.25, 0.5, 1.0]) + p = p0.copy() + np.nanquantile(np.arange(100.), p, method="midpoint") + assert_array_equal(p, p0) + + p0 = p0.tolist() + p = p.tolist() + np.nanquantile(np.arange(100.), p, method="midpoint") + assert_array_equal(p, p0) + + @pytest.mark.parametrize("axis", [None, 0, 1]) + @pytest.mark.parametrize("dtype", np.typecodes["Float"]) + @pytest.mark.parametrize("array", [ + np.array(np.nan), + np.full((3, 3), np.nan), + ], ids=["0d", "2d"]) + def test_allnans(self, axis, dtype, array): + if axis is not None and array.ndim == 0: + pytest.skip("`axis != None` not supported for 0d arrays") + + array = array.astype(dtype) + with pytest.warns(RuntimeWarning, match="All-NaN slice encountered"): + out = np.nanquantile(array, 1, axis=axis) + assert np.isnan(out).all() + assert out.dtype == array.dtype + +@pytest.mark.parametrize("arr, expected", [ + # array of floats with some nans + (np.array([np.nan, 5.0, np.nan, np.inf]), + np.array([False, True, False, True])), + # int64 array that can't possibly have nans + (np.array([1, 5, 7, 9], dtype=np.int64), + True), + # bool array that can't possibly have nans + (np.array([False, True, False, True]), + True), + # 2-D complex array with nans + (np.array([[np.nan, 5.0], + [np.nan, np.inf]], dtype=np.complex64), + np.array([[False, True], + [False, True]])), + ]) +def test__nan_mask(arr, expected): + for out in [None, np.empty(arr.shape, dtype=np.bool)]: + actual = _nan_mask(arr, out=out) + assert_equal(actual, expected) + # the above won't distinguish between True proper + # and an array of True values; we want True proper + # for types that can't possibly contain NaN + if type(expected) is not np.ndarray: + assert actual is True + + +def test__replace_nan(): + """ Test that _replace_nan returns the original array if there are no + NaNs, not a copy. + """ + for dtype in [np.bool, np.int32, np.int64]: + arr = np.array([0, 1], dtype=dtype) + result, mask = _replace_nan(arr, 0) + assert mask is None + # do not make a copy if there are no nans + assert result is arr + + for dtype in [np.float32, np.float64]: + arr = np.array([0, 1], dtype=dtype) + result, mask = _replace_nan(arr, 2) + assert (mask == False).all() + # mask is not None, so we make a copy + assert result is not arr + assert_equal(result, arr) + + arr_nan = np.array([0, 1, np.nan], dtype=dtype) + result_nan, mask_nan = _replace_nan(arr_nan, 2) + assert_equal(mask_nan, np.array([False, False, True])) + assert result_nan is not arr_nan + assert_equal(result_nan, np.array([0, 1, 2])) + assert np.isnan(arr_nan[-1]) + + +@pytest.mark.thread_unsafe(reason="memmap is thread-unsafe (gh-29126)") +def test_memmap_takes_fast_route(tmpdir): + # We want memory mapped arrays to take the fast route through nanmax, + # which avoids creating a mask by using fmax.reduce (see gh-28721). So we + # check that on bad input, the error is from fmax (rather than maximum). + a = np.arange(10., dtype=float) + with open(tmpdir.join("data.bin"), "w+b") as fh: + fh.write(a.tobytes()) + mm = np.memmap(fh, dtype=a.dtype, shape=a.shape) + with pytest.raises(ValueError, match="reduction operation fmax"): + np.nanmax(mm, out=np.zeros(2)) + # For completeness, same for nanmin. + with pytest.raises(ValueError, match="reduction operation fmin"): + np.nanmin(mm, out=np.zeros(2)) diff --git a/local_packages/numpy/lib/tests/test_packbits.py b/local_packages/numpy/lib/tests/test_packbits.py new file mode 100644 index 0000000..0b0e9d1 --- /dev/null +++ b/local_packages/numpy/lib/tests/test_packbits.py @@ -0,0 +1,376 @@ +from itertools import chain + +import pytest + +import numpy as np +from numpy.testing import assert_array_equal, assert_equal, assert_raises + + +def test_packbits(): + # Copied from the docstring. + a = [[[1, 0, 1], [0, 1, 0]], + [[1, 1, 0], [0, 0, 1]]] + for dt in '?bBhHiIlLqQ': + arr = np.array(a, dtype=dt) + b = np.packbits(arr, axis=-1) + assert_equal(b.dtype, np.uint8) + assert_array_equal(b, np.array([[[160], [64]], [[192], [32]]])) + + assert_raises(TypeError, np.packbits, np.array(a, dtype=float)) + + +def test_packbits_empty(): + shapes = [ + (0,), (10, 20, 0), (10, 0, 20), (0, 10, 20), (20, 0, 0), (0, 20, 0), + (0, 0, 20), (0, 0, 0), + ] + for dt in '?bBhHiIlLqQ': + for shape in shapes: + a = np.empty(shape, dtype=dt) + b = np.packbits(a) + assert_equal(b.dtype, np.uint8) + assert_equal(b.shape, (0,)) + + +def test_packbits_empty_with_axis(): + # Original shapes and lists of packed shapes for different axes. + shapes = [ + ((0,), [(0,)]), + ((10, 20, 0), [(2, 20, 0), (10, 3, 0), (10, 20, 0)]), + ((10, 0, 20), [(2, 0, 20), (10, 0, 20), (10, 0, 3)]), + ((0, 10, 20), [(0, 10, 20), (0, 2, 20), (0, 10, 3)]), + ((20, 0, 0), [(3, 0, 0), (20, 0, 0), (20, 0, 0)]), + ((0, 20, 0), [(0, 20, 0), (0, 3, 0), (0, 20, 0)]), + ((0, 0, 20), [(0, 0, 20), (0, 0, 20), (0, 0, 3)]), + ((0, 0, 0), [(0, 0, 0), (0, 0, 0), (0, 0, 0)]), + ] + for dt in '?bBhHiIlLqQ': + for in_shape, out_shapes in shapes: + for ax, out_shape in enumerate(out_shapes): + a = np.empty(in_shape, dtype=dt) + b = np.packbits(a, axis=ax) + assert_equal(b.dtype, np.uint8) + assert_equal(b.shape, out_shape) + +@pytest.mark.parametrize('bitorder', ('little', 'big')) +def test_packbits_large(bitorder): + # test data large enough for 16 byte vectorization + a = np.array([1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, + 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, + 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, + 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, + 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, + 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, + 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, + 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, + 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, + 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, + 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, + 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, + 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, + 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, + 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0]) + a = a.repeat(3) + for dtype in '?bBhHiIlLqQ': + arr = np.array(a, dtype=dtype) + b = np.packbits(arr, axis=None, bitorder=bitorder) + assert_equal(b.dtype, np.uint8) + r = [252, 127, 192, 3, 254, 7, 252, 0, 7, 31, 240, 0, 28, 1, 255, 252, + 113, 248, 3, 255, 192, 28, 15, 192, 28, 126, 0, 224, 127, 255, + 227, 142, 7, 31, 142, 63, 28, 126, 56, 227, 240, 0, 227, 128, 63, + 224, 14, 56, 252, 112, 56, 255, 241, 248, 3, 240, 56, 224, 112, + 63, 255, 255, 199, 224, 14, 0, 31, 143, 192, 3, 255, 199, 0, 1, + 255, 224, 1, 255, 252, 126, 63, 0, 1, 192, 252, 14, 63, 0, 15, + 199, 252, 113, 255, 3, 128, 56, 252, 14, 7, 0, 113, 255, 255, 142, 56, 227, + 129, 248, 227, 129, 199, 31, 128] + if bitorder == 'big': + assert_array_equal(b, r) + # equal for size being multiple of 8 + assert_array_equal(np.unpackbits(b, bitorder=bitorder)[:-4], a) + + # check last byte of different remainders (16 byte vectorization) + b = [np.packbits(arr[:-i], axis=None)[-1] for i in range(1, 16)] + assert_array_equal(b, [128, 128, 128, 31, 30, 28, 24, 16, 0, 0, 0, 199, + 198, 196, 192]) + + arr = arr.reshape(36, 25) + b = np.packbits(arr, axis=0) + assert_equal(b.dtype, np.uint8) + assert_array_equal(b, [[190, 186, 178, 178, 150, 215, 87, 83, 83, 195, + 199, 206, 204, 204, 140, 140, 136, 136, 8, 40, 105, + 107, 75, 74, 88], + [72, 216, 248, 241, 227, 195, 202, 90, 90, 83, + 83, 119, 127, 109, 73, 64, 208, 244, 189, 45, + 41, 104, 122, 90, 18], + [113, 120, 248, 216, 152, 24, 60, 52, 182, 150, + 150, 150, 146, 210, 210, 246, 255, 255, 223, + 151, 21, 17, 17, 131, 163], + [214, 210, 210, 64, 68, 5, 5, 1, 72, 88, 92, + 92, 78, 110, 39, 181, 149, 220, 222, 218, 218, + 202, 234, 170, 168], + [0, 128, 128, 192, 80, 112, 48, 160, 160, 224, + 240, 208, 144, 128, 160, 224, 240, 208, 144, + 144, 176, 240, 224, 192, 128]]) + + b = np.packbits(arr, axis=1) + assert_equal(b.dtype, np.uint8) + assert_array_equal(b, [[252, 127, 192, 0], + [ 7, 252, 15, 128], + [240, 0, 28, 0], + [255, 128, 0, 128], + [192, 31, 255, 128], + [142, 63, 0, 0], + [255, 240, 7, 0], + [ 7, 224, 14, 0], + [126, 0, 224, 0], + [255, 255, 199, 0], + [ 56, 28, 126, 0], + [113, 248, 227, 128], + [227, 142, 63, 0], + [ 0, 28, 112, 0], + [ 15, 248, 3, 128], + [ 28, 126, 56, 0], + [ 56, 255, 241, 128], + [240, 7, 224, 0], + [227, 129, 192, 128], + [255, 255, 254, 0], + [126, 0, 224, 0], + [ 3, 241, 248, 0], + [ 0, 255, 241, 128], + [128, 0, 255, 128], + [224, 1, 255, 128], + [248, 252, 126, 0], + [ 0, 7, 3, 128], + [224, 113, 248, 0], + [ 0, 252, 127, 128], + [142, 63, 224, 0], + [224, 14, 63, 0], + [ 7, 3, 128, 0], + [113, 255, 255, 128], + [ 28, 113, 199, 0], + [ 7, 227, 142, 0], + [ 14, 56, 252, 0]]) + + arr = arr.T.copy() + b = np.packbits(arr, axis=0) + assert_equal(b.dtype, np.uint8) + assert_array_equal(b, [[252, 7, 240, 255, 192, 142, 255, 7, 126, 255, + 56, 113, 227, 0, 15, 28, 56, 240, 227, 255, + 126, 3, 0, 128, 224, 248, 0, 224, 0, 142, 224, + 7, 113, 28, 7, 14], + [127, 252, 0, 128, 31, 63, 240, 224, 0, 255, + 28, 248, 142, 28, 248, 126, 255, 7, 129, 255, + 0, 241, 255, 0, 1, 252, 7, 113, 252, 63, 14, + 3, 255, 113, 227, 56], + [192, 15, 28, 0, 255, 0, 7, 14, 224, 199, 126, + 227, 63, 112, 3, 56, 241, 224, 192, 254, 224, + 248, 241, 255, 255, 126, 3, 248, 127, 224, 63, + 128, 255, 199, 142, 252], + [0, 128, 0, 128, 128, 0, 0, 0, 0, 0, 0, 128, 0, + 0, 128, 0, 128, 0, 128, 0, 0, 0, 128, 128, + 128, 0, 128, 0, 128, 0, 0, 0, 128, 0, 0, 0]]) + + b = np.packbits(arr, axis=1) + assert_equal(b.dtype, np.uint8) + assert_array_equal(b, [[190, 72, 113, 214, 0], + [186, 216, 120, 210, 128], + [178, 248, 248, 210, 128], + [178, 241, 216, 64, 192], + [150, 227, 152, 68, 80], + [215, 195, 24, 5, 112], + [ 87, 202, 60, 5, 48], + [ 83, 90, 52, 1, 160], + [ 83, 90, 182, 72, 160], + [195, 83, 150, 88, 224], + [199, 83, 150, 92, 240], + [206, 119, 150, 92, 208], + [204, 127, 146, 78, 144], + [204, 109, 210, 110, 128], + [140, 73, 210, 39, 160], + [140, 64, 246, 181, 224], + [136, 208, 255, 149, 240], + [136, 244, 255, 220, 208], + [ 8, 189, 223, 222, 144], + [ 40, 45, 151, 218, 144], + [105, 41, 21, 218, 176], + [107, 104, 17, 202, 240], + [ 75, 122, 17, 234, 224], + [ 74, 90, 131, 170, 192], + [ 88, 18, 163, 168, 128]]) + + # result is the same if input is multiplied with a nonzero value + for dtype in 'bBhHiIlLqQ': + arr = np.array(a, dtype=dtype) + rnd = np.random.randint(low=np.iinfo(dtype).min, + high=np.iinfo(dtype).max, size=arr.size, + dtype=dtype) + rnd[rnd == 0] = 1 + arr *= rnd.astype(dtype) + b = np.packbits(arr, axis=-1) + assert_array_equal(np.unpackbits(b)[:-4], a) + + assert_raises(TypeError, np.packbits, np.array(a, dtype=float)) + + +def test_packbits_very_large(): + # test some with a larger arrays gh-8637 + # code is covered earlier but larger array makes crash on bug more likely + for s in range(950, 1050): + for dt in '?bBhHiIlLqQ': + x = np.ones((200, s), dtype=bool) + np.packbits(x, axis=1) + + +def test_unpackbits(): + # Copied from the docstring. + a = np.array([[2], [7], [23]], dtype=np.uint8) + b = np.unpackbits(a, axis=1) + assert_equal(b.dtype, np.uint8) + assert_array_equal(b, np.array([[0, 0, 0, 0, 0, 0, 1, 0], + [0, 0, 0, 0, 0, 1, 1, 1], + [0, 0, 0, 1, 0, 1, 1, 1]])) + +def test_pack_unpack_order(): + a = np.array([[2], [7], [23]], dtype=np.uint8) + b = np.unpackbits(a, axis=1) + assert_equal(b.dtype, np.uint8) + b_little = np.unpackbits(a, axis=1, bitorder='little') + b_big = np.unpackbits(a, axis=1, bitorder='big') + assert_array_equal(b, b_big) + assert_array_equal(a, np.packbits(b_little, axis=1, bitorder='little')) + assert_array_equal(b[:, ::-1], b_little) + assert_array_equal(a, np.packbits(b_big, axis=1, bitorder='big')) + assert_raises(ValueError, np.unpackbits, a, bitorder='r') + assert_raises(TypeError, np.unpackbits, a, bitorder=10) + + +def test_unpackbits_empty(): + a = np.empty((0,), dtype=np.uint8) + b = np.unpackbits(a) + assert_equal(b.dtype, np.uint8) + assert_array_equal(b, np.empty((0,))) + + +def test_unpackbits_empty_with_axis(): + # Lists of packed shapes for different axes and unpacked shapes. + shapes = [ + ([(0,)], (0,)), + ([(2, 24, 0), (16, 3, 0), (16, 24, 0)], (16, 24, 0)), + ([(2, 0, 24), (16, 0, 24), (16, 0, 3)], (16, 0, 24)), + ([(0, 16, 24), (0, 2, 24), (0, 16, 3)], (0, 16, 24)), + ([(3, 0, 0), (24, 0, 0), (24, 0, 0)], (24, 0, 0)), + ([(0, 24, 0), (0, 3, 0), (0, 24, 0)], (0, 24, 0)), + ([(0, 0, 24), (0, 0, 24), (0, 0, 3)], (0, 0, 24)), + ([(0, 0, 0), (0, 0, 0), (0, 0, 0)], (0, 0, 0)), + ] + for in_shapes, out_shape in shapes: + for ax, in_shape in enumerate(in_shapes): + a = np.empty(in_shape, dtype=np.uint8) + b = np.unpackbits(a, axis=ax) + assert_equal(b.dtype, np.uint8) + assert_equal(b.shape, out_shape) + + +def test_unpackbits_large(): + # test all possible numbers via comparison to already tested packbits + d = np.arange(277, dtype=np.uint8) + assert_array_equal(np.packbits(np.unpackbits(d)), d) + assert_array_equal(np.packbits(np.unpackbits(d[::2])), d[::2]) + d = np.tile(d, (3, 1)) + assert_array_equal(np.packbits(np.unpackbits(d, axis=1), axis=1), d) + d = d.T.copy() + assert_array_equal(np.packbits(np.unpackbits(d, axis=0), axis=0), d) + + +class TestCount: + x = np.array([ + [1, 0, 1, 0, 0, 1, 0], + [0, 1, 1, 1, 0, 0, 0], + [0, 0, 1, 0, 0, 1, 1], + [1, 1, 0, 0, 0, 1, 1], + [1, 0, 1, 0, 1, 0, 1], + [0, 0, 1, 1, 1, 0, 0], + [0, 1, 0, 1, 0, 1, 0], + ], dtype=np.uint8) + padded1 = np.zeros(57, dtype=np.uint8) + padded1[:49] = x.ravel() + padded1b = np.zeros(57, dtype=np.uint8) + padded1b[:49] = x[::-1].copy().ravel() + padded2 = np.zeros((9, 9), dtype=np.uint8) + padded2[:7, :7] = x + + @pytest.mark.parametrize('bitorder', ('little', 'big')) + @pytest.mark.parametrize('count', chain(range(58), range(-1, -57, -1))) + def test_roundtrip(self, bitorder, count): + if count < 0: + # one extra zero of padding + cutoff = count - 1 + else: + cutoff = count + # test complete invertibility of packbits and unpackbits with count + packed = np.packbits(self.x, bitorder=bitorder) + unpacked = np.unpackbits(packed, count=count, bitorder=bitorder) + assert_equal(unpacked.dtype, np.uint8) + assert_array_equal(unpacked, self.padded1[:cutoff]) + + @pytest.mark.parametrize('kwargs', [ + {}, {'count': None}, + ]) + def test_count(self, kwargs): + packed = np.packbits(self.x) + unpacked = np.unpackbits(packed, **kwargs) + assert_equal(unpacked.dtype, np.uint8) + assert_array_equal(unpacked, self.padded1[:-1]) + + @pytest.mark.parametrize('bitorder', ('little', 'big')) + # delta==-1 when count<0 because one extra zero of padding + @pytest.mark.parametrize('count', chain(range(8), range(-1, -9, -1))) + def test_roundtrip_axis(self, bitorder, count): + if count < 0: + # one extra zero of padding + cutoff = count - 1 + else: + cutoff = count + packed0 = np.packbits(self.x, axis=0, bitorder=bitorder) + unpacked0 = np.unpackbits(packed0, axis=0, count=count, + bitorder=bitorder) + assert_equal(unpacked0.dtype, np.uint8) + assert_array_equal(unpacked0, self.padded2[:cutoff, :self.x.shape[1]]) + + packed1 = np.packbits(self.x, axis=1, bitorder=bitorder) + unpacked1 = np.unpackbits(packed1, axis=1, count=count, + bitorder=bitorder) + assert_equal(unpacked1.dtype, np.uint8) + assert_array_equal(unpacked1, self.padded2[:self.x.shape[0], :cutoff]) + + @pytest.mark.parametrize('kwargs', [ + {}, {'count': None}, + {'bitorder': 'little'}, + {'bitorder': 'little', 'count': None}, + {'bitorder': 'big'}, + {'bitorder': 'big', 'count': None}, + ]) + def test_axis_count(self, kwargs): + packed0 = np.packbits(self.x, axis=0) + unpacked0 = np.unpackbits(packed0, axis=0, **kwargs) + assert_equal(unpacked0.dtype, np.uint8) + if kwargs.get('bitorder', 'big') == 'big': + assert_array_equal(unpacked0, self.padded2[:-1, :self.x.shape[1]]) + else: + assert_array_equal(unpacked0[::-1, :], self.padded2[:-1, :self.x.shape[1]]) + + packed1 = np.packbits(self.x, axis=1) + unpacked1 = np.unpackbits(packed1, axis=1, **kwargs) + assert_equal(unpacked1.dtype, np.uint8) + if kwargs.get('bitorder', 'big') == 'big': + assert_array_equal(unpacked1, self.padded2[:self.x.shape[0], :-1]) + else: + assert_array_equal(unpacked1[:, ::-1], self.padded2[:self.x.shape[0], :-1]) + + def test_bad_count(self): + packed0 = np.packbits(self.x, axis=0) + assert_raises(ValueError, np.unpackbits, packed0, axis=0, count=-9) + packed1 = np.packbits(self.x, axis=1) + assert_raises(ValueError, np.unpackbits, packed1, axis=1, count=-9) + packed = np.packbits(self.x) + assert_raises(ValueError, np.unpackbits, packed, count=-57) diff --git a/local_packages/numpy/lib/tests/test_polynomial.py b/local_packages/numpy/lib/tests/test_polynomial.py new file mode 100644 index 0000000..32547f8 --- /dev/null +++ b/local_packages/numpy/lib/tests/test_polynomial.py @@ -0,0 +1,325 @@ +import pytest + +import numpy as np +import numpy.polynomial.polynomial as poly +from numpy.testing import ( + assert_, + assert_allclose, + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, + assert_equal, + assert_raises, +) + +# `poly1d` has some support for `np.bool` and `np.timedelta64`, +# but it is limited and they are therefore excluded here +TYPE_CODES = np.typecodes["AllInteger"] + np.typecodes["AllFloat"] + "O" + + +class TestPolynomial: + def test_poly1d_str_and_repr(self): + p = np.poly1d([1., 2, 3]) + assert_equal(repr(p), 'poly1d([1., 2., 3.])') + assert_equal(str(p), + ' 2\n' + '1 x + 2 x + 3') + + q = np.poly1d([3., 2, 1]) + assert_equal(repr(q), 'poly1d([3., 2., 1.])') + assert_equal(str(q), + ' 2\n' + '3 x + 2 x + 1') + + r = np.poly1d([1.89999 + 2j, -3j, -5.12345678, 2 + 1j]) + assert_equal(str(r), + ' 3 2\n' + '(1.9 + 2j) x - 3j x - 5.123 x + (2 + 1j)') + + assert_equal(str(np.poly1d([-3, -2, -1])), + ' 2\n' + '-3 x - 2 x - 1') + + def test_poly1d_resolution(self): + p = np.poly1d([1., 2, 3]) + q = np.poly1d([3., 2, 1]) + assert_equal(p(0), 3.0) + assert_equal(p(5), 38.0) + assert_equal(q(0), 1.0) + assert_equal(q(5), 86.0) + + def test_poly1d_math(self): + # here we use some simple coeffs to make calculations easier + p = np.poly1d([1., 2, 4]) + q = np.poly1d([4., 2, 1]) + assert_equal(p / q, (np.poly1d([0.25]), np.poly1d([1.5, 3.75]))) + assert_equal(p.integ(), np.poly1d([1 / 3, 1., 4., 0.])) + assert_equal(p.integ(1), np.poly1d([1 / 3, 1., 4., 0.])) + + p = np.poly1d([1., 2, 3]) + q = np.poly1d([3., 2, 1]) + assert_equal(p * q, np.poly1d([3., 8., 14., 8., 3.])) + assert_equal(p + q, np.poly1d([4., 4., 4.])) + assert_equal(p - q, np.poly1d([-2., 0., 2.])) + assert_equal(p ** 4, np.poly1d([1., 8., 36., 104., 214., + 312., 324., 216., 81.])) + assert_equal(p(q), np.poly1d([9., 12., 16., 8., 6.])) + assert_equal(q(p), np.poly1d([3., 12., 32., 40., 34.])) + assert_equal(p.deriv(), np.poly1d([2., 2.])) + assert_equal(p.deriv(2), np.poly1d([2.])) + assert_equal(np.polydiv(np.poly1d([1, 0, -1]), np.poly1d([1, 1])), + (np.poly1d([1., -1.]), np.poly1d([0.]))) + + @pytest.mark.parametrize("type_code", TYPE_CODES) + def test_poly1d_misc(self, type_code: str) -> None: + dtype = np.dtype(type_code) + ar = np.array([1, 2, 3], dtype=dtype) + p = np.poly1d(ar) + + # `__eq__` + assert_equal(np.asarray(p), ar) + assert_equal(np.asarray(p).dtype, dtype) + assert_equal(len(p), 2) + + # `__getitem__` + comparison_dct = {-1: 0, 0: 3, 1: 2, 2: 1, 3: 0} + for index, ref in comparison_dct.items(): + scalar = p[index] + assert_equal(scalar, ref) + if dtype == np.object_: + assert isinstance(scalar, int) + else: + assert_equal(scalar.dtype, dtype) + + def test_poly1d_variable_arg(self): + q = np.poly1d([1., 2, 3], variable='y') + assert_equal(str(q), + ' 2\n' + '1 y + 2 y + 3') + q = np.poly1d([1., 2, 3], variable='lambda') + assert_equal(str(q), + ' 2\n' + '1 lambda + 2 lambda + 3') + + def test_poly(self): + assert_array_almost_equal(np.poly([3, -np.sqrt(2), np.sqrt(2)]), + [1, -3, -2, 6]) + + # From matlab docs + A = [[1, 2, 3], [4, 5, 6], [7, 8, 0]] + assert_array_almost_equal(np.poly(A), [1, -6, -72, -27]) + + # Should produce real output for perfect conjugates + assert_(np.isrealobj(np.poly([+1.082j, +2.613j, -2.613j, -1.082j]))) + assert_(np.isrealobj(np.poly([0 + 1j, -0 + -1j, 1 + 2j, + 1 - 2j, 1. + 3.5j, 1 - 3.5j]))) + assert_(np.isrealobj(np.poly([1j, -1j, 1 + 2j, 1 - 2j, 1 + 3j, 1 - 3.j]))) + assert_(np.isrealobj(np.poly([1j, -1j, 1 + 2j, 1 - 2j]))) + assert_(np.isrealobj(np.poly([1j, -1j, 2j, -2j]))) + assert_(np.isrealobj(np.poly([1j, -1j]))) + assert_(np.isrealobj(np.poly([1, -1]))) + + assert_(np.iscomplexobj(np.poly([1j, -1.0000001j]))) + + np.random.seed(42) + a = np.random.randn(100) + 1j * np.random.randn(100) + assert_(np.isrealobj(np.poly(np.concatenate((a, np.conjugate(a)))))) + + def test_roots(self): + assert_array_equal(np.roots([1, 0, 0]), [0, 0]) + + # Testing for larger root values + for i in np.logspace(10, 25, num=1000, base=10): + tgt = np.array([-1, 1, i]) + res = np.sort(np.roots(poly.polyfromroots(tgt)[::-1])) + # Adapting the expected precision according to the root value, + # to take into account numerical calculation error + assert_almost_equal(res, tgt, 14 - int(np.log10(i))) + + for i in np.logspace(10, 25, num=1000, base=10): + tgt = np.array([-1, 1.01, i]) + res = np.sort(np.roots(poly.polyfromroots(tgt)[::-1])) + # Adapting the expected precision according to the root value, + # to take into account numerical calculation error + assert_almost_equal(res, tgt, 14 - int(np.log10(i))) + + def test_str_leading_zeros(self): + p = np.poly1d([4, 3, 2, 1]) + p[3] = 0 + assert_equal(str(p), + " 2\n" + "3 x + 2 x + 1") + + p = np.poly1d([1, 2]) + p[0] = 0 + p[1] = 0 + assert_equal(str(p), " \n0") + + def test_polyfit(self): + c = np.array([3., 2., 1.]) + x = np.linspace(0, 2, 7) + y = np.polyval(c, x) + err = [1, -1, 1, -1, 1, -1, 1] + weights = np.arange(8, 1, -1)**2 / 7.0 + + # Check exception when too few points for variance estimate. Note that + # the estimate requires the number of data points to exceed + # degree + 1 + assert_raises(ValueError, np.polyfit, + [1], [1], deg=0, cov=True) + + # check 1D case + m, cov = np.polyfit(x, y + err, 2, cov=True) + est = [3.8571, 0.2857, 1.619] + assert_almost_equal(est, m, decimal=4) + val0 = [[ 1.4694, -2.9388, 0.8163], + [-2.9388, 6.3673, -2.1224], + [ 0.8163, -2.1224, 1.161 ]] # noqa: E202 + assert_almost_equal(val0, cov, decimal=4) + + m2, cov2 = np.polyfit(x, y + err, 2, w=weights, cov=True) + assert_almost_equal([4.8927, -1.0177, 1.7768], m2, decimal=4) + val = [[ 4.3964, -5.0052, 0.4878], + [-5.0052, 6.8067, -0.9089], + [ 0.4878, -0.9089, 0.3337]] + assert_almost_equal(val, cov2, decimal=4) + + m3, cov3 = np.polyfit(x, y + err, 2, w=weights, cov="unscaled") + assert_almost_equal([4.8927, -1.0177, 1.7768], m3, decimal=4) + val = [[ 0.1473, -0.1677, 0.0163], + [-0.1677, 0.228 , -0.0304], # noqa: E203 + [ 0.0163, -0.0304, 0.0112]] + assert_almost_equal(val, cov3, decimal=4) + + # check 2D (n,1) case + y = y[:, np.newaxis] + c = c[:, np.newaxis] + assert_almost_equal(c, np.polyfit(x, y, 2)) + # check 2D (n,2) case + yy = np.concatenate((y, y), axis=1) + cc = np.concatenate((c, c), axis=1) + assert_almost_equal(cc, np.polyfit(x, yy, 2)) + + m, cov = np.polyfit(x, yy + np.array(err)[:, np.newaxis], 2, cov=True) + assert_almost_equal(est, m[:, 0], decimal=4) + assert_almost_equal(est, m[:, 1], decimal=4) + assert_almost_equal(val0, cov[:, :, 0], decimal=4) + assert_almost_equal(val0, cov[:, :, 1], decimal=4) + + # check order 1 (deg=0) case, were the analytic results are simple + np.random.seed(123) + y = np.random.normal(size=(4, 10000)) + mean, cov = np.polyfit(np.zeros(y.shape[0]), y, deg=0, cov=True) + # Should get sigma_mean = sigma/sqrt(N) = 1./sqrt(4) = 0.5. + assert_allclose(mean.std(), 0.5, atol=0.01) + assert_allclose(np.sqrt(cov.mean()), 0.5, atol=0.01) + # Without scaling, since reduced chi2 is 1, the result should be the same. + mean, cov = np.polyfit(np.zeros(y.shape[0]), y, w=np.ones(y.shape[0]), + deg=0, cov="unscaled") + assert_allclose(mean.std(), 0.5, atol=0.01) + assert_almost_equal(np.sqrt(cov.mean()), 0.5) + # If we estimate our errors wrong, no change with scaling: + w = np.full(y.shape[0], 1. / 0.5) + mean, cov = np.polyfit(np.zeros(y.shape[0]), y, w=w, deg=0, cov=True) + assert_allclose(mean.std(), 0.5, atol=0.01) + assert_allclose(np.sqrt(cov.mean()), 0.5, atol=0.01) + # But if we do not scale, our estimate for the error in the mean will + # differ. + mean, cov = np.polyfit(np.zeros(y.shape[0]), y, w=w, deg=0, cov="unscaled") + assert_allclose(mean.std(), 0.5, atol=0.01) + assert_almost_equal(np.sqrt(cov.mean()), 0.25) + + def test_objects(self): + from decimal import Decimal + p = np.poly1d([Decimal('4.0'), Decimal('3.0'), Decimal('2.0')]) + p2 = p * Decimal('1.333333333333333') + assert_(p2[1] == Decimal("3.9999999999999990")) + p2 = p.deriv() + assert_(p2[1] == Decimal('8.0')) + p2 = p.integ() + assert_(p2[3] == Decimal("1.333333333333333333333333333")) + assert_(p2[2] == Decimal('1.5')) + assert_(np.issubdtype(p2.coeffs.dtype, np.object_)) + p = np.poly([Decimal(1), Decimal(2)]) + assert_equal(np.poly([Decimal(1), Decimal(2)]), + [1, Decimal(-3), Decimal(2)]) + + def test_complex(self): + p = np.poly1d([3j, 2j, 1j]) + p2 = p.integ() + assert_((p2.coeffs == [1j, 1j, 1j, 0]).all()) + p2 = p.deriv() + assert_((p2.coeffs == [6j, 2j]).all()) + + def test_integ_coeffs(self): + p = np.poly1d([3, 2, 1]) + p2 = p.integ(3, k=[9, 7, 6]) + expected = [1 / 4 / 5, 1 / 3 / 4, 1 / 2 / 3, 9 / 1 / 2, 7, 6] + assert_((p2.coeffs == expected).all()) + + def test_zero_dims(self): + try: + np.poly(np.zeros((0, 0))) + except ValueError: + pass + + def test_poly_int_overflow(self): + """ + Regression test for gh-5096. + """ + v = np.arange(1, 21) + assert_almost_equal(np.poly(v), np.poly(np.diag(v))) + + def test_zero_poly_dtype(self): + """ + Regression test for gh-16354. + """ + z = np.array([0, 0, 0]) + p = np.poly1d(z.astype(np.int64)) + assert_equal(p.coeffs.dtype, np.int64) + + p = np.poly1d(z.astype(np.float32)) + assert_equal(p.coeffs.dtype, np.float32) + + p = np.poly1d(z.astype(np.complex64)) + assert_equal(p.coeffs.dtype, np.complex64) + + def test_poly_eq(self): + p = np.poly1d([1, 2, 3]) + p2 = np.poly1d([1, 2, 4]) + assert_equal(p == None, False) # noqa: E711 + assert_equal(p != None, True) # noqa: E711 + assert_equal(p == p, True) + assert_equal(p == p2, False) + assert_equal(p != p2, True) + + def test_polydiv(self): + b = np.poly1d([2, 6, 6, 1]) + a = np.poly1d([-1j, (1 + 2j), -(2 + 1j), 1]) + q, r = np.polydiv(b, a) + assert_equal(q.coeffs.dtype, np.complex128) + assert_equal(r.coeffs.dtype, np.complex128) + assert_equal(q * a + r, b) + + c = [1, 2, 3] + d = np.poly1d([1, 2, 3]) + s, t = np.polydiv(c, d) + assert isinstance(s, np.poly1d) + assert isinstance(t, np.poly1d) + u, v = np.polydiv(d, c) + assert isinstance(u, np.poly1d) + assert isinstance(v, np.poly1d) + + def test_poly_coeffs_mutable(self): + """ Coefficients should be modifiable """ + p = np.poly1d([1, 2, 3]) + + p.coeffs += 1 + assert_equal(p.coeffs, [2, 3, 4]) + + p.coeffs[2] += 10 + assert_equal(p.coeffs, [2, 3, 14]) + + # this never used to be allowed - let's not add features to deprecated + # APIs + assert_raises(AttributeError, setattr, p, 'coeffs', np.array(1)) diff --git a/local_packages/numpy/lib/tests/test_recfunctions.py b/local_packages/numpy/lib/tests/test_recfunctions.py new file mode 100644 index 0000000..b9cc266 --- /dev/null +++ b/local_packages/numpy/lib/tests/test_recfunctions.py @@ -0,0 +1,1042 @@ +import pytest + +import numpy as np +import numpy.ma as ma +from numpy.lib.recfunctions import ( + append_fields, + apply_along_fields, + assign_fields_by_name, + drop_fields, + find_duplicates, + get_fieldstructure, + join_by, + merge_arrays, + recursive_fill_fields, + rename_fields, + repack_fields, + require_fields, + stack_arrays, + structured_to_unstructured, + unstructured_to_structured, +) +from numpy.ma.mrecords import MaskedRecords +from numpy.ma.testutils import assert_equal +from numpy.testing import assert_, assert_raises + +get_fieldspec = np.lib.recfunctions._get_fieldspec +get_names = np.lib.recfunctions.get_names +get_names_flat = np.lib.recfunctions.get_names_flat +zip_descr = np.lib.recfunctions._zip_descr +zip_dtype = np.lib.recfunctions._zip_dtype + + +class TestRecFunctions: + # Misc tests + def test_zip_descr(self): + # Test zip_descr + x = np.array([1, 2, ]) + y = np.array([10, 20, 30]) + z = np.array([('A', 1.), ('B', 2.)], + dtype=[('A', '|S3'), ('B', float)]) + w = np.array([(1, (2, 3.0)), (4, (5, 6.0))], + dtype=[('a', int), ('b', [('ba', float), ('bb', int)])]) + + # Std array + test = zip_descr((x, x), flatten=True) + assert_equal(test, + np.dtype([('', int), ('', int)])) + test = zip_descr((x, x), flatten=False) + assert_equal(test, + np.dtype([('', int), ('', int)])) + + # Std & flexible-dtype + test = zip_descr((x, z), flatten=True) + assert_equal(test, + np.dtype([('', int), ('A', '|S3'), ('B', float)])) + test = zip_descr((x, z), flatten=False) + assert_equal(test, + np.dtype([('', int), + ('', [('A', '|S3'), ('B', float)])])) + + # Standard & nested dtype + test = zip_descr((x, w), flatten=True) + assert_equal(test, + np.dtype([('', int), + ('a', int), + ('ba', float), ('bb', int)])) + test = zip_descr((x, w), flatten=False) + assert_equal(test, + np.dtype([('', int), + ('', [('a', int), + ('b', [('ba', float), ('bb', int)])])])) + + def test_drop_fields(self): + # Test drop_fields + a = np.array([(1, (2, 3.0)), (4, (5, 6.0))], + dtype=[('a', int), ('b', [('ba', float), ('bb', int)])]) + + # A basic field + test = drop_fields(a, 'a') + control = np.array([((2, 3.0),), ((5, 6.0),)], + dtype=[('b', [('ba', float), ('bb', int)])]) + assert_equal(test, control) + + # Another basic field (but nesting two fields) + test = drop_fields(a, 'b') + control = np.array([(1,), (4,)], dtype=[('a', int)]) + assert_equal(test, control) + + # A nested sub-field + test = drop_fields(a, ['ba', ]) + control = np.array([(1, (3.0,)), (4, (6.0,))], + dtype=[('a', int), ('b', [('bb', int)])]) + assert_equal(test, control) + + # All the nested sub-field from a field: zap that field + test = drop_fields(a, ['ba', 'bb']) + control = np.array([(1,), (4,)], dtype=[('a', int)]) + assert_equal(test, control) + + # dropping all fields results in an array with no fields + test = drop_fields(a, ['a', 'b']) + control = np.array([(), ()], dtype=[]) + assert_equal(test, control) + + def test_rename_fields(self): + # Test rename fields + a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))], + dtype=[('a', int), + ('b', [('ba', float), ('bb', (float, 2))])]) + test = rename_fields(a, {'a': 'A', 'bb': 'BB'}) + newdtype = [('A', int), ('b', [('ba', float), ('BB', (float, 2))])] + control = a.view(newdtype) + assert_equal(test.dtype, newdtype) + assert_equal(test, control) + + def test_get_names(self): + # Test get_names + ndtype = np.dtype([('A', '|S3'), ('B', float)]) + test = get_names(ndtype) + assert_equal(test, ('A', 'B')) + + ndtype = np.dtype([('a', int), ('b', [('ba', float), ('bb', int)])]) + test = get_names(ndtype) + assert_equal(test, ('a', ('b', ('ba', 'bb')))) + + ndtype = np.dtype([('a', int), ('b', [])]) + test = get_names(ndtype) + assert_equal(test, ('a', ('b', ()))) + + ndtype = np.dtype([]) + test = get_names(ndtype) + assert_equal(test, ()) + + def test_get_names_flat(self): + # Test get_names_flat + ndtype = np.dtype([('A', '|S3'), ('B', float)]) + test = get_names_flat(ndtype) + assert_equal(test, ('A', 'B')) + + ndtype = np.dtype([('a', int), ('b', [('ba', float), ('bb', int)])]) + test = get_names_flat(ndtype) + assert_equal(test, ('a', 'b', 'ba', 'bb')) + + ndtype = np.dtype([('a', int), ('b', [])]) + test = get_names_flat(ndtype) + assert_equal(test, ('a', 'b')) + + ndtype = np.dtype([]) + test = get_names_flat(ndtype) + assert_equal(test, ()) + + def test_get_fieldstructure(self): + # Test get_fieldstructure + + # No nested fields + ndtype = np.dtype([('A', '|S3'), ('B', float)]) + test = get_fieldstructure(ndtype) + assert_equal(test, {'A': [], 'B': []}) + + # One 1-nested field + ndtype = np.dtype([('A', int), ('B', [('BA', float), ('BB', '|S1')])]) + test = get_fieldstructure(ndtype) + assert_equal(test, {'A': [], 'B': [], 'BA': ['B', ], 'BB': ['B']}) + + # One 2-nested fields + ndtype = np.dtype([('A', int), + ('B', [('BA', int), + ('BB', [('BBA', int), ('BBB', int)])])]) + test = get_fieldstructure(ndtype) + control = {'A': [], 'B': [], 'BA': ['B'], 'BB': ['B'], + 'BBA': ['B', 'BB'], 'BBB': ['B', 'BB']} + assert_equal(test, control) + + # 0 fields + ndtype = np.dtype([]) + test = get_fieldstructure(ndtype) + assert_equal(test, {}) + + def test_find_duplicates(self): + # Test find_duplicates + a = ma.array([(2, (2., 'B')), (1, (2., 'B')), (2, (2., 'B')), + (1, (1., 'B')), (2, (2., 'B')), (2, (2., 'C'))], + mask=[(0, (0, 0)), (0, (0, 0)), (0, (0, 0)), + (0, (0, 0)), (1, (0, 0)), (0, (1, 0))], + dtype=[('A', int), ('B', [('BA', float), ('BB', '|S1')])]) + + test = find_duplicates(a, ignoremask=False, return_index=True) + control = [0, 2] + assert_equal(sorted(test[-1]), control) + assert_equal(test[0], a[test[-1]]) + + test = find_duplicates(a, key='A', return_index=True) + control = [0, 1, 2, 3, 5] + assert_equal(sorted(test[-1]), control) + assert_equal(test[0], a[test[-1]]) + + test = find_duplicates(a, key='B', return_index=True) + control = [0, 1, 2, 4] + assert_equal(sorted(test[-1]), control) + assert_equal(test[0], a[test[-1]]) + + test = find_duplicates(a, key='BA', return_index=True) + control = [0, 1, 2, 4] + assert_equal(sorted(test[-1]), control) + assert_equal(test[0], a[test[-1]]) + + test = find_duplicates(a, key='BB', return_index=True) + control = [0, 1, 2, 3, 4] + assert_equal(sorted(test[-1]), control) + assert_equal(test[0], a[test[-1]]) + + def test_find_duplicates_ignoremask(self): + # Test the ignoremask option of find_duplicates + ndtype = [('a', int)] + a = ma.array([1, 1, 1, 2, 2, 3, 3], + mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype) + test = find_duplicates(a, ignoremask=True, return_index=True) + control = [0, 1, 3, 4] + assert_equal(sorted(test[-1]), control) + assert_equal(test[0], a[test[-1]]) + + test = find_duplicates(a, ignoremask=False, return_index=True) + control = [0, 1, 2, 3, 4, 6] + assert_equal(sorted(test[-1]), control) + assert_equal(test[0], a[test[-1]]) + + def test_repack_fields(self): + dt = np.dtype('u1,f4,i8', align=True) + a = np.zeros(2, dtype=dt) + + assert_equal(repack_fields(dt), np.dtype('u1,f4,i8')) + assert_equal(repack_fields(a).itemsize, 13) + assert_equal(repack_fields(repack_fields(dt), align=True), dt) + + # make sure type is preserved + dt = np.dtype((np.record, dt)) + assert_(repack_fields(dt).type is np.record) + + @pytest.mark.thread_unsafe(reason="memmap is thread-unsafe (gh-29126)") + def test_structured_to_unstructured(self, tmp_path): + a = np.zeros(4, dtype=[('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)]) + out = structured_to_unstructured(a) + assert_equal(out, np.zeros((4, 5), dtype='f8')) + + b = np.array([(1, 2, 5), (4, 5, 7), (7, 8, 11), (10, 11, 12)], + dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')]) + out = np.mean(structured_to_unstructured(b[['x', 'z']]), axis=-1) + assert_equal(out, np.array([3., 5.5, 9., 11.])) + out = np.mean(structured_to_unstructured(b[['x']]), axis=-1) + assert_equal(out, np.array([1., 4. , 7., 10.])) # noqa: E203 + + c = np.arange(20).reshape((4, 5)) + out = unstructured_to_structured(c, a.dtype) + want = np.array([( 0, ( 1., 2), [ 3., 4.]), + ( 5, ( 6., 7), [ 8., 9.]), + (10, (11., 12), [13., 14.]), + (15, (16., 17), [18., 19.])], + dtype=[('a', 'i4'), + ('b', [('f0', 'f4'), ('f1', 'u2')]), + ('c', 'f4', (2,))]) + assert_equal(out, want) + + d = np.array([(1, 2, 5), (4, 5, 7), (7, 8, 11), (10, 11, 12)], + dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')]) + assert_equal(apply_along_fields(np.mean, d), + np.array([ 8.0 / 3, 16.0 / 3, 26.0 / 3, 11.])) + assert_equal(apply_along_fields(np.mean, d[['x', 'z']]), + np.array([ 3., 5.5, 9., 11.])) + + # check that for uniform field dtypes we get a view, not a copy: + d = np.array([(1, 2, 5), (4, 5, 7), (7, 8, 11), (10, 11, 12)], + dtype=[('x', 'i4'), ('y', 'i4'), ('z', 'i4')]) + dd = structured_to_unstructured(d) + ddd = unstructured_to_structured(dd, d.dtype) + assert_(np.shares_memory(dd, d)) + assert_(np.shares_memory(ddd, d)) + + # check that reversing the order of attributes works + dd_attrib_rev = structured_to_unstructured(d[['z', 'x']]) + assert_equal(dd_attrib_rev, [[5, 1], [7, 4], [11, 7], [12, 10]]) + assert_(np.shares_memory(dd_attrib_rev, d)) + + # including uniform fields with subarrays unpacked + d = np.array([(1, [2, 3], [[ 4, 5], [ 6, 7]]), + (8, [9, 10], [[11, 12], [13, 14]])], + dtype=[('x0', 'i4'), ('x1', ('i4', 2)), + ('x2', ('i4', (2, 2)))]) + dd = structured_to_unstructured(d) + ddd = unstructured_to_structured(dd, d.dtype) + assert_(np.shares_memory(dd, d)) + assert_(np.shares_memory(ddd, d)) + + # check that reversing with sub-arrays works as expected + d_rev = d[::-1] + dd_rev = structured_to_unstructured(d_rev) + assert_equal(dd_rev, [[8, 9, 10, 11, 12, 13, 14], + [1, 2, 3, 4, 5, 6, 7]]) + + # check that sub-arrays keep the order of their values + d_attrib_rev = d[['x2', 'x1', 'x0']] + dd_attrib_rev = structured_to_unstructured(d_attrib_rev) + assert_equal(dd_attrib_rev, [[4, 5, 6, 7, 2, 3, 1], + [11, 12, 13, 14, 9, 10, 8]]) + + # with ignored field at the end + d = np.array([(1, [2, 3], [[4, 5], [6, 7]], 32), + (8, [9, 10], [[11, 12], [13, 14]], 64)], + dtype=[('x0', 'i4'), ('x1', ('i4', 2)), + ('x2', ('i4', (2, 2))), ('ignored', 'u1')]) + dd = structured_to_unstructured(d[['x0', 'x1', 'x2']]) + assert_(np.shares_memory(dd, d)) + assert_equal(dd, [[1, 2, 3, 4, 5, 6, 7], + [8, 9, 10, 11, 12, 13, 14]]) + + # test that nested fields with identical names don't break anything + point = np.dtype([('x', int), ('y', int)]) + triangle = np.dtype([('a', point), ('b', point), ('c', point)]) + arr = np.zeros(10, triangle) + res = structured_to_unstructured(arr, dtype=int) + assert_equal(res, np.zeros((10, 6), dtype=int)) + + # test nested combinations of subarrays and structured arrays, gh-13333 + def subarray(dt, shape): + return np.dtype((dt, shape)) + + def structured(*dts): + return np.dtype([(f'x{i}', dt) for i, dt in enumerate(dts)]) + + def inspect(dt, dtype=None): + arr = np.zeros((), dt) + ret = structured_to_unstructured(arr, dtype=dtype) + backarr = unstructured_to_structured(ret, dt) + return ret.shape, ret.dtype, backarr.dtype + + dt = structured(subarray(structured(np.int32, np.int32), 3)) + assert_equal(inspect(dt), ((6,), np.int32, dt)) + + dt = structured(subarray(subarray(np.int32, 2), 2)) + assert_equal(inspect(dt), ((4,), np.int32, dt)) + + dt = structured(np.int32) + assert_equal(inspect(dt), ((1,), np.int32, dt)) + + dt = structured(np.int32, subarray(subarray(np.int32, 2), 2)) + assert_equal(inspect(dt), ((5,), np.int32, dt)) + + dt = structured() + assert_raises(ValueError, structured_to_unstructured, np.zeros(3, dt)) + + # these currently don't work, but we may make it work in the future + assert_raises(NotImplementedError, structured_to_unstructured, + np.zeros(3, dt), dtype=np.int32) + assert_raises(NotImplementedError, unstructured_to_structured, + np.zeros((3, 0), dtype=np.int32)) + + # test supported ndarray subclasses + d_plain = np.array([(1, 2), (3, 4)], dtype=[('a', 'i4'), ('b', 'i4')]) + dd_expected = structured_to_unstructured(d_plain, copy=True) + + # recarray + d = d_plain.view(np.recarray) + + dd = structured_to_unstructured(d, copy=False) + ddd = structured_to_unstructured(d, copy=True) + assert_(np.shares_memory(d, dd)) + assert_(type(dd) is np.recarray) + assert_(type(ddd) is np.recarray) + assert_equal(dd, dd_expected) + assert_equal(ddd, dd_expected) + + # memmap + d = np.memmap(tmp_path / 'memmap', + mode='w+', + dtype=d_plain.dtype, + shape=d_plain.shape) + d[:] = d_plain + dd = structured_to_unstructured(d, copy=False) + ddd = structured_to_unstructured(d, copy=True) + assert_(np.shares_memory(d, dd)) + assert_(type(dd) is np.memmap) + assert_(type(ddd) is np.memmap) + assert_equal(dd, dd_expected) + assert_equal(ddd, dd_expected) + + def test_unstructured_to_structured(self): + # test if dtype is the args of np.dtype + a = np.zeros((20, 2)) + test_dtype_args = [('x', float), ('y', float)] + test_dtype = np.dtype(test_dtype_args) + field1 = unstructured_to_structured(a, dtype=test_dtype_args) # now + field2 = unstructured_to_structured(a, dtype=test_dtype) # before + assert_equal(field1, field2) + + def test_field_assignment_by_name(self): + a = np.ones(2, dtype=[('a', 'i4'), ('b', 'f8'), ('c', 'u1')]) + newdt = [('b', 'f4'), ('c', 'u1')] + + assert_equal(require_fields(a, newdt), np.ones(2, newdt)) + + b = np.array([(1, 2), (3, 4)], dtype=newdt) + assign_fields_by_name(a, b, zero_unassigned=False) + assert_equal(a, np.array([(1, 1, 2), (1, 3, 4)], dtype=a.dtype)) + assign_fields_by_name(a, b) + assert_equal(a, np.array([(0, 1, 2), (0, 3, 4)], dtype=a.dtype)) + + # test nested fields + a = np.ones(2, dtype=[('a', [('b', 'f8'), ('c', 'u1')])]) + newdt = [('a', [('c', 'u1')])] + assert_equal(require_fields(a, newdt), np.ones(2, newdt)) + b = np.array([((2,),), ((3,),)], dtype=newdt) + assign_fields_by_name(a, b, zero_unassigned=False) + assert_equal(a, np.array([((1, 2),), ((1, 3),)], dtype=a.dtype)) + assign_fields_by_name(a, b) + assert_equal(a, np.array([((0, 2),), ((0, 3),)], dtype=a.dtype)) + + # test unstructured code path for 0d arrays + a, b = np.array(3), np.array(0) + assign_fields_by_name(b, a) + assert_equal(b[()], 3) + + +class TestRecursiveFillFields: + # Test recursive_fill_fields. + def test_simple_flexible(self): + # Test recursive_fill_fields on flexible-array + a = np.array([(1, 10.), (2, 20.)], dtype=[('A', int), ('B', float)]) + b = np.zeros((3,), dtype=a.dtype) + test = recursive_fill_fields(a, b) + control = np.array([(1, 10.), (2, 20.), (0, 0.)], + dtype=[('A', int), ('B', float)]) + assert_equal(test, control) + + def test_masked_flexible(self): + # Test recursive_fill_fields on masked flexible-array + a = ma.array([(1, 10.), (2, 20.)], mask=[(0, 1), (1, 0)], + dtype=[('A', int), ('B', float)]) + b = ma.zeros((3,), dtype=a.dtype) + test = recursive_fill_fields(a, b) + control = ma.array([(1, 10.), (2, 20.), (0, 0.)], + mask=[(0, 1), (1, 0), (0, 0)], + dtype=[('A', int), ('B', float)]) + assert_equal(test, control) + + +class TestMergeArrays: + # Test merge_arrays + + def _create_arrays(self): + x = np.array([1, 2, ]) + y = np.array([10, 20, 30]) + z = np.array( + [('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)]) + w = np.array( + [(1, (2, 3.0, ())), (4, (5, 6.0, ()))], + dtype=[('a', int), ('b', [('ba', float), ('bb', int), ('bc', [])])]) + return w, x, y, z + + def test_solo(self): + # Test merge_arrays on a single array. + _, x, _, z = self._create_arrays() + + test = merge_arrays(x) + control = np.array([(1,), (2,)], dtype=[('f0', int)]) + assert_equal(test, control) + test = merge_arrays((x,)) + assert_equal(test, control) + + test = merge_arrays(z, flatten=False) + assert_equal(test, z) + test = merge_arrays(z, flatten=True) + assert_equal(test, z) + + def test_solo_w_flatten(self): + # Test merge_arrays on a single array w & w/o flattening + w = self._create_arrays()[0] + test = merge_arrays(w, flatten=False) + assert_equal(test, w) + + test = merge_arrays(w, flatten=True) + control = np.array([(1, 2, 3.0), (4, 5, 6.0)], + dtype=[('a', int), ('ba', float), ('bb', int)]) + assert_equal(test, control) + + def test_standard(self): + # Test standard & standard + # Test merge arrays + _, x, y, _ = self._create_arrays() + test = merge_arrays((x, y), usemask=False) + control = np.array([(1, 10), (2, 20), (-1, 30)], + dtype=[('f0', int), ('f1', int)]) + assert_equal(test, control) + + test = merge_arrays((x, y), usemask=True) + control = ma.array([(1, 10), (2, 20), (-1, 30)], + mask=[(0, 0), (0, 0), (1, 0)], + dtype=[('f0', int), ('f1', int)]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + + def test_flatten(self): + # Test standard & flexible + _, x, _, z = self._create_arrays() + test = merge_arrays((x, z), flatten=True) + control = np.array([(1, 'A', 1.), (2, 'B', 2.)], + dtype=[('f0', int), ('A', '|S3'), ('B', float)]) + assert_equal(test, control) + + test = merge_arrays((x, z), flatten=False) + control = np.array([(1, ('A', 1.)), (2, ('B', 2.))], + dtype=[('f0', int), + ('f1', [('A', '|S3'), ('B', float)])]) + assert_equal(test, control) + + def test_flatten_wflexible(self): + # Test flatten standard & nested + w, x, _, _ = self._create_arrays() + test = merge_arrays((x, w), flatten=True) + control = np.array([(1, 1, 2, 3.0), (2, 4, 5, 6.0)], + dtype=[('f0', int), + ('a', int), ('ba', float), ('bb', int)]) + assert_equal(test, control) + + test = merge_arrays((x, w), flatten=False) + f1_descr = [('a', int), ('b', [('ba', float), ('bb', int), ('bc', [])])] + controldtype = [('f0', int), ('f1', f1_descr)] + control = np.array([(1., (1, (2, 3.0, ()))), (2, (4, (5, 6.0, ())))], + dtype=controldtype) + assert_equal(test, control) + + def test_wmasked_arrays(self): + # Test merge_arrays masked arrays + x = self._create_arrays()[1] + mx = ma.array([1, 2, 3], mask=[1, 0, 0]) + test = merge_arrays((x, mx), usemask=True) + control = ma.array([(1, 1), (2, 2), (-1, 3)], + mask=[(0, 1), (0, 0), (1, 0)], + dtype=[('f0', int), ('f1', int)]) + assert_equal(test, control) + test = merge_arrays((x, mx), usemask=True, asrecarray=True) + assert_equal(test, control) + assert_(isinstance(test, MaskedRecords)) + + def test_w_singlefield(self): + # Test single field + test = merge_arrays((np.array([1, 2]).view([('a', int)]), + np.array([10., 20., 30.])),) + control = ma.array([(1, 10.), (2, 20.), (-1, 30.)], + mask=[(0, 0), (0, 0), (1, 0)], + dtype=[('a', int), ('f1', float)]) + assert_equal(test, control) + + def test_w_shorter_flex(self): + # Test merge_arrays w/ a shorter flexndarray. + z = self._create_arrays()[-1] + + # Fixme, this test looks incomplete and broken + #test = merge_arrays((z, np.array([10, 20, 30]).view([('C', int)]))) + #control = np.array([('A', 1., 10), ('B', 2., 20), ('-1', -1, 20)], + # dtype=[('A', '|S3'), ('B', float), ('C', int)]) + #assert_equal(test, control) + + merge_arrays((z, np.array([10, 20, 30]).view([('C', int)]))) + np.array([('A', 1., 10), ('B', 2., 20), ('-1', -1, 20)], + dtype=[('A', '|S3'), ('B', float), ('C', int)]) + + def test_singlerecord(self): + _, x, y, z = self._create_arrays() + test = merge_arrays((x[0], y[0], z[0]), usemask=False) + control = np.array([(1, 10, ('A', 1))], + dtype=[('f0', int), + ('f1', int), + ('f2', [('A', '|S3'), ('B', float)])]) + assert_equal(test, control) + + +class TestAppendFields: + # Test append_fields + + def _create_arrays(self): + x = np.array([1, 2, ]) + y = np.array([10, 20, 30]) + z = np.array( + [('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)]) + w = np.array([(1, (2, 3.0)), (4, (5, 6.0))], + dtype=[('a', int), ('b', [('ba', float), ('bb', int)])]) + return w, x, y, z + + def test_append_single(self): + # Test simple case + x = self._create_arrays()[1] + test = append_fields(x, 'A', data=[10, 20, 30]) + control = ma.array([(1, 10), (2, 20), (-1, 30)], + mask=[(0, 0), (0, 0), (1, 0)], + dtype=[('f0', int), ('A', int)],) + assert_equal(test, control) + + def test_append_double(self): + # Test simple case + x = self._create_arrays()[1] + test = append_fields(x, ('A', 'B'), data=[[10, 20, 30], [100, 200]]) + control = ma.array([(1, 10, 100), (2, 20, 200), (-1, 30, -1)], + mask=[(0, 0, 0), (0, 0, 0), (1, 0, 1)], + dtype=[('f0', int), ('A', int), ('B', int)],) + assert_equal(test, control) + + def test_append_on_flex(self): + # Test append_fields on flexible type arrays + z = self._create_arrays()[-1] + test = append_fields(z, 'C', data=[10, 20, 30]) + control = ma.array([('A', 1., 10), ('B', 2., 20), (-1, -1., 30)], + mask=[(0, 0, 0), (0, 0, 0), (1, 1, 0)], + dtype=[('A', '|S3'), ('B', float), ('C', int)],) + assert_equal(test, control) + + def test_append_on_nested(self): + # Test append_fields on nested fields + w = self._create_arrays()[0] + test = append_fields(w, 'C', data=[10, 20, 30]) + control = ma.array([(1, (2, 3.0), 10), + (4, (5, 6.0), 20), + (-1, (-1, -1.), 30)], + mask=[( + 0, (0, 0), 0), (0, (0, 0), 0), (1, (1, 1), 0)], + dtype=[('a', int), + ('b', [('ba', float), ('bb', int)]), + ('C', int)],) + assert_equal(test, control) + + +class TestStackArrays: + # Test stack_arrays + def _create_arrays(self): + x = np.array([1, 2, ]) + y = np.array([10, 20, 30]) + z = np.array( + [('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)]) + w = np.array([(1, (2, 3.0)), (4, (5, 6.0))], + dtype=[('a', int), ('b', [('ba', float), ('bb', int)])]) + return w, x, y, z + + def test_solo(self): + # Test stack_arrays on single arrays + x = self._create_arrays()[1] + test = stack_arrays((x,)) + assert_equal(test, x) + assert_(test is x) + + test = stack_arrays(x) + assert_equal(test, x) + assert_(test is x) + + def test_unnamed_fields(self): + # Tests combinations of arrays w/o named fields + _, x, y, _ = self._create_arrays() + + test = stack_arrays((x, x), usemask=False) + control = np.array([1, 2, 1, 2]) + assert_equal(test, control) + + test = stack_arrays((x, y), usemask=False) + control = np.array([1, 2, 10, 20, 30]) + assert_equal(test, control) + + test = stack_arrays((y, x), usemask=False) + control = np.array([10, 20, 30, 1, 2]) + assert_equal(test, control) + + def test_unnamed_and_named_fields(self): + # Test combination of arrays w/ & w/o named fields + _, x, _, z = self._create_arrays() + + test = stack_arrays((x, z)) + control = ma.array([(1, -1, -1), (2, -1, -1), + (-1, 'A', 1), (-1, 'B', 2)], + mask=[(0, 1, 1), (0, 1, 1), + (1, 0, 0), (1, 0, 0)], + dtype=[('f0', int), ('A', '|S3'), ('B', float)]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + + test = stack_arrays((z, x)) + control = ma.array([('A', 1, -1), ('B', 2, -1), + (-1, -1, 1), (-1, -1, 2), ], + mask=[(0, 0, 1), (0, 0, 1), + (1, 1, 0), (1, 1, 0)], + dtype=[('A', '|S3'), ('B', float), ('f2', int)]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + + test = stack_arrays((z, z, x)) + control = ma.array([('A', 1, -1), ('B', 2, -1), + ('A', 1, -1), ('B', 2, -1), + (-1, -1, 1), (-1, -1, 2), ], + mask=[(0, 0, 1), (0, 0, 1), + (0, 0, 1), (0, 0, 1), + (1, 1, 0), (1, 1, 0)], + dtype=[('A', '|S3'), ('B', float), ('f2', int)]) + assert_equal(test, control) + + def test_matching_named_fields(self): + # Test combination of arrays w/ matching field names + _, x, _, z = self._create_arrays() + zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)], + dtype=[('A', '|S3'), ('B', float), ('C', float)]) + test = stack_arrays((z, zz)) + control = ma.array([('A', 1, -1), ('B', 2, -1), + ( + 'a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)], + dtype=[('A', '|S3'), ('B', float), ('C', float)], + mask=[(0, 0, 1), (0, 0, 1), + (0, 0, 0), (0, 0, 0), (0, 0, 0)]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + + test = stack_arrays((z, zz, x)) + ndtype = [('A', '|S3'), ('B', float), ('C', float), ('f3', int)] + control = ma.array([('A', 1, -1, -1), ('B', 2, -1, -1), + ('a', 10., 100., -1), ('b', 20., 200., -1), + ('c', 30., 300., -1), + (-1, -1, -1, 1), (-1, -1, -1, 2)], + dtype=ndtype, + mask=[(0, 0, 1, 1), (0, 0, 1, 1), + (0, 0, 0, 1), (0, 0, 0, 1), (0, 0, 0, 1), + (1, 1, 1, 0), (1, 1, 1, 0)]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + + def test_defaults(self): + # Test defaults: no exception raised if keys of defaults are not fields. + z = self._create_arrays()[-1] + zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)], + dtype=[('A', '|S3'), ('B', float), ('C', float)]) + defaults = {'A': '???', 'B': -999., 'C': -9999., 'D': -99999.} + test = stack_arrays((z, zz), defaults=defaults) + control = ma.array([('A', 1, -9999.), ('B', 2, -9999.), + ( + 'a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)], + dtype=[('A', '|S3'), ('B', float), ('C', float)], + mask=[(0, 0, 1), (0, 0, 1), + (0, 0, 0), (0, 0, 0), (0, 0, 0)]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + + def test_autoconversion(self): + # Tests autoconversion + adtype = [('A', int), ('B', bool), ('C', float)] + a = ma.array([(1, 2, 3)], mask=[(0, 1, 0)], dtype=adtype) + bdtype = [('A', int), ('B', float), ('C', float)] + b = ma.array([(4, 5, 6)], dtype=bdtype) + control = ma.array([(1, 2, 3), (4, 5, 6)], mask=[(0, 1, 0), (0, 0, 0)], + dtype=bdtype) + test = stack_arrays((a, b), autoconvert=True) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + with assert_raises(TypeError): + stack_arrays((a, b), autoconvert=False) + + def test_checktitles(self): + # Test using titles in the field names + adtype = [(('a', 'A'), int), (('b', 'B'), bool), (('c', 'C'), float)] + a = ma.array([(1, 2, 3)], mask=[(0, 1, 0)], dtype=adtype) + bdtype = [(('a', 'A'), int), (('b', 'B'), bool), (('c', 'C'), float)] + b = ma.array([(4, 5, 6)], dtype=bdtype) + test = stack_arrays((a, b)) + control = ma.array([(1, 2, 3), (4, 5, 6)], mask=[(0, 1, 0), (0, 0, 0)], + dtype=bdtype) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + + def test_subdtype(self): + z = np.array([ + ('A', 1), ('B', 2) + ], dtype=[('A', '|S3'), ('B', float, (1,))]) + zz = np.array([ + ('a', [10.], 100.), ('b', [20.], 200.), ('c', [30.], 300.) + ], dtype=[('A', '|S3'), ('B', float, (1,)), ('C', float)]) + + res = stack_arrays((z, zz)) + expected = ma.array( + data=[ + (b'A', [1.0], 0), + (b'B', [2.0], 0), + (b'a', [10.0], 100.0), + (b'b', [20.0], 200.0), + (b'c', [30.0], 300.0)], + mask=[ + (False, [False], True), + (False, [False], True), + (False, [False], False), + (False, [False], False), + (False, [False], False) + ], + dtype=zz.dtype + ) + assert_equal(res.dtype, expected.dtype) + assert_equal(res, expected) + assert_equal(res.mask, expected.mask) + + +class TestJoinBy: + def _create_arrays(self): + a = np.array(list(zip(np.arange(10), np.arange(50, 60), + np.arange(100, 110))), + dtype=[('a', int), ('b', int), ('c', int)]) + b = np.array(list(zip(np.arange(5, 15), np.arange(65, 75), + np.arange(100, 110))), + dtype=[('a', int), ('b', int), ('d', int)]) + return a, b + + def test_inner_join(self): + # Basic test of join_by + a, b = self._create_arrays() + test = join_by('a', a, b, jointype='inner') + control = np.array([(5, 55, 65, 105, 100), (6, 56, 66, 106, 101), + (7, 57, 67, 107, 102), (8, 58, 68, 108, 103), + (9, 59, 69, 109, 104)], + dtype=[('a', int), ('b1', int), ('b2', int), + ('c', int), ('d', int)]) + assert_equal(test, control) + + def test_join(self): + a, b = self._create_arrays() + # Fixme, this test is broken + #test = join_by(('a', 'b'), a, b) + #control = np.array([(5, 55, 105, 100), (6, 56, 106, 101), + # (7, 57, 107, 102), (8, 58, 108, 103), + # (9, 59, 109, 104)], + # dtype=[('a', int), ('b', int), + # ('c', int), ('d', int)]) + #assert_equal(test, control) + join_by(('a', 'b'), a, b) + np.array([(5, 55, 105, 100), (6, 56, 106, 101), + (7, 57, 107, 102), (8, 58, 108, 103), + (9, 59, 109, 104)], + dtype=[('a', int), ('b', int), + ('c', int), ('d', int)]) + + def test_join_subdtype(self): + # tests the bug in https://stackoverflow.com/q/44769632/102441 + foo = np.array([(1,)], + dtype=[('key', int)]) + bar = np.array([(1, np.array([1, 2, 3]))], + dtype=[('key', int), ('value', 'uint16', 3)]) + res = join_by('key', foo, bar) + assert_equal(res, bar.view(ma.MaskedArray)) + + def test_outer_join(self): + a, b = self._create_arrays() + test = join_by(('a', 'b'), a, b, 'outer') + control = ma.array([(0, 50, 100, -1), (1, 51, 101, -1), + (2, 52, 102, -1), (3, 53, 103, -1), + (4, 54, 104, -1), (5, 55, 105, -1), + (5, 65, -1, 100), (6, 56, 106, -1), + (6, 66, -1, 101), (7, 57, 107, -1), + (7, 67, -1, 102), (8, 58, 108, -1), + (8, 68, -1, 103), (9, 59, 109, -1), + (9, 69, -1, 104), (10, 70, -1, 105), + (11, 71, -1, 106), (12, 72, -1, 107), + (13, 73, -1, 108), (14, 74, -1, 109)], + mask=[(0, 0, 0, 1), (0, 0, 0, 1), + (0, 0, 0, 1), (0, 0, 0, 1), + (0, 0, 0, 1), (0, 0, 0, 1), + (0, 0, 1, 0), (0, 0, 0, 1), + (0, 0, 1, 0), (0, 0, 0, 1), + (0, 0, 1, 0), (0, 0, 0, 1), + (0, 0, 1, 0), (0, 0, 0, 1), + (0, 0, 1, 0), (0, 0, 1, 0), + (0, 0, 1, 0), (0, 0, 1, 0), + (0, 0, 1, 0), (0, 0, 1, 0)], + dtype=[('a', int), ('b', int), + ('c', int), ('d', int)]) + assert_equal(test, control) + + def test_leftouter_join(self): + a, b = self._create_arrays() + test = join_by(('a', 'b'), a, b, 'leftouter') + control = ma.array([(0, 50, 100, -1), (1, 51, 101, -1), + (2, 52, 102, -1), (3, 53, 103, -1), + (4, 54, 104, -1), (5, 55, 105, -1), + (6, 56, 106, -1), (7, 57, 107, -1), + (8, 58, 108, -1), (9, 59, 109, -1)], + mask=[(0, 0, 0, 1), (0, 0, 0, 1), + (0, 0, 0, 1), (0, 0, 0, 1), + (0, 0, 0, 1), (0, 0, 0, 1), + (0, 0, 0, 1), (0, 0, 0, 1), + (0, 0, 0, 1), (0, 0, 0, 1)], + dtype=[('a', int), ('b', int), ('c', int), ('d', int)]) + assert_equal(test, control) + + def test_different_field_order(self): + # gh-8940 + a = np.zeros(3, dtype=[('a', 'i4'), ('b', 'f4'), ('c', 'u1')]) + b = np.ones(3, dtype=[('c', 'u1'), ('b', 'f4'), ('a', 'i4')]) + # this should not give a FutureWarning: + j = join_by(['c', 'b'], a, b, jointype='inner', usemask=False) + assert_equal(j.dtype.names, ['b', 'c', 'a1', 'a2']) + + def test_duplicate_keys(self): + a = np.zeros(3, dtype=[('a', 'i4'), ('b', 'f4'), ('c', 'u1')]) + b = np.ones(3, dtype=[('c', 'u1'), ('b', 'f4'), ('a', 'i4')]) + assert_raises(ValueError, join_by, ['a', 'b', 'b'], a, b) + + def test_same_name_different_dtypes_key(self): + a_dtype = np.dtype([('key', 'S5'), ('value', ' 2**32 + + +def _add_keepdims(func): + """ hack in keepdims behavior into a function taking an axis """ + @functools.wraps(func) + def wrapped(a, axis, **kwargs): + res = func(a, axis=axis, **kwargs) + if axis is None: + axis = 0 # res is now a scalar, so we can insert this anywhere + return np.expand_dims(res, axis=axis) + return wrapped + + +class TestTakeAlongAxis: + def test_argequivalent(self): + """ Test it translates from arg to """ + from numpy.random import rand + a = rand(3, 4, 5) + + funcs = [ + (np.sort, np.argsort, {}), + (_add_keepdims(np.min), _add_keepdims(np.argmin), {}), + (_add_keepdims(np.max), _add_keepdims(np.argmax), {}), + #(np.partition, np.argpartition, dict(kth=2)), + ] + + for func, argfunc, kwargs in funcs: + for axis in list(range(a.ndim)) + [None]: + a_func = func(a, axis=axis, **kwargs) + ai_func = argfunc(a, axis=axis, **kwargs) + assert_equal(a_func, take_along_axis(a, ai_func, axis=axis)) + + def test_invalid(self): + """ Test it errors when indices has too few dimensions """ + a = np.ones((10, 10)) + ai = np.ones((10, 2), dtype=np.intp) + + # sanity check + take_along_axis(a, ai, axis=1) + + # not enough indices + assert_raises(ValueError, take_along_axis, a, np.array(1), axis=1) + # bool arrays not allowed + assert_raises(IndexError, take_along_axis, a, ai.astype(bool), axis=1) + # float arrays not allowed + assert_raises(IndexError, take_along_axis, a, ai.astype(float), axis=1) + # invalid axis + assert_raises(AxisError, take_along_axis, a, ai, axis=10) + # invalid indices + assert_raises(ValueError, take_along_axis, a, ai, axis=None) + + def test_empty(self): + """ Test everything is ok with empty results, even with inserted dims """ + a = np.ones((3, 4, 5)) + ai = np.ones((3, 0, 5), dtype=np.intp) + + actual = take_along_axis(a, ai, axis=1) + assert_equal(actual.shape, ai.shape) + + def test_broadcast(self): + """ Test that non-indexing dimensions are broadcast in both directions """ + a = np.ones((3, 4, 1)) + ai = np.ones((1, 2, 5), dtype=np.intp) + actual = take_along_axis(a, ai, axis=1) + assert_equal(actual.shape, (3, 2, 5)) + + +class TestPutAlongAxis: + def test_replace_max(self): + a_base = np.array([[10, 30, 20], [60, 40, 50]]) + + for axis in list(range(a_base.ndim)) + [None]: + # we mutate this in the loop + a = a_base.copy() + + # replace the max with a small value + i_max = _add_keepdims(np.argmax)(a, axis=axis) + put_along_axis(a, i_max, -99, axis=axis) + + # find the new minimum, which should max + i_min = _add_keepdims(np.argmin)(a, axis=axis) + + assert_equal(i_min, i_max) + + def test_broadcast(self): + """ Test that non-indexing dimensions are broadcast in both directions """ + a = np.ones((3, 4, 1)) + ai = np.arange(10, dtype=np.intp).reshape((1, 2, 5)) % 4 + put_along_axis(a, ai, 20, axis=1) + assert_equal(take_along_axis(a, ai, axis=1), 20) + + def test_invalid(self): + """ Test invalid inputs """ + a_base = np.array([[10, 30, 20], [60, 40, 50]]) + indices = np.array([[0], [1]]) + values = np.array([[2], [1]]) + + # sanity check + a = a_base.copy() + put_along_axis(a, indices, values, axis=0) + assert np.all(a == [[2, 2, 2], [1, 1, 1]]) + + # invalid indices + a = a_base.copy() + with assert_raises(ValueError) as exc: + put_along_axis(a, indices, values, axis=None) + assert "single dimension" in str(exc.exception) + + +class TestApplyAlongAxis: + def test_simple(self): + a = np.ones((20, 10), 'd') + assert_array_equal( + apply_along_axis(len, 0, a), len(a) * np.ones(a.shape[1])) + + def test_simple101(self): + a = np.ones((10, 101), 'd') + assert_array_equal( + apply_along_axis(len, 0, a), len(a) * np.ones(a.shape[1])) + + def test_3d(self): + a = np.arange(27).reshape((3, 3, 3)) + assert_array_equal(apply_along_axis(np.sum, 0, a), + [[27, 30, 33], [36, 39, 42], [45, 48, 51]]) + + def test_preserve_subclass(self): + def double(row): + return row * 2 + + class MyNDArray(np.ndarray): + pass + + m = np.array([[0, 1], [2, 3]]).view(MyNDArray) + expected = np.array([[0, 2], [4, 6]]).view(MyNDArray) + + result = apply_along_axis(double, 0, m) + assert_(isinstance(result, MyNDArray)) + assert_array_equal(result, expected) + + result = apply_along_axis(double, 1, m) + assert_(isinstance(result, MyNDArray)) + assert_array_equal(result, expected) + + def test_subclass(self): + class MinimalSubclass(np.ndarray): + data = 1 + + def minimal_function(array): + return array.data + + a = np.zeros((6, 3)).view(MinimalSubclass) + + assert_array_equal( + apply_along_axis(minimal_function, 0, a), np.array([1, 1, 1]) + ) + + def test_scalar_array(self, cls=np.ndarray): + a = np.ones((6, 3)).view(cls) + res = apply_along_axis(np.sum, 0, a) + assert_(isinstance(res, cls)) + assert_array_equal(res, np.array([6, 6, 6]).view(cls)) + + def test_0d_array(self, cls=np.ndarray): + def sum_to_0d(x): + """ Sum x, returning a 0d array of the same class """ + assert_equal(x.ndim, 1) + return np.squeeze(np.sum(x, keepdims=True)) + a = np.ones((6, 3)).view(cls) + res = apply_along_axis(sum_to_0d, 0, a) + assert_(isinstance(res, cls)) + assert_array_equal(res, np.array([6, 6, 6]).view(cls)) + + res = apply_along_axis(sum_to_0d, 1, a) + assert_(isinstance(res, cls)) + assert_array_equal(res, np.array([3, 3, 3, 3, 3, 3]).view(cls)) + + def test_axis_insertion(self, cls=np.ndarray): + def f1to2(x): + """produces an asymmetric non-square matrix from x""" + assert_equal(x.ndim, 1) + return (x[::-1] * x[1:, None]).view(cls) + + a2d = np.arange(6 * 3).reshape((6, 3)) + + # 2d insertion along first axis + actual = apply_along_axis(f1to2, 0, a2d) + expected = np.stack([ + f1to2(a2d[:, i]) for i in range(a2d.shape[1]) + ], axis=-1).view(cls) + assert_equal(type(actual), type(expected)) + assert_equal(actual, expected) + + # 2d insertion along last axis + actual = apply_along_axis(f1to2, 1, a2d) + expected = np.stack([ + f1to2(a2d[i, :]) for i in range(a2d.shape[0]) + ], axis=0).view(cls) + assert_equal(type(actual), type(expected)) + assert_equal(actual, expected) + + # 3d insertion along middle axis + a3d = np.arange(6 * 5 * 3).reshape((6, 5, 3)) + + actual = apply_along_axis(f1to2, 1, a3d) + expected = np.stack([ + np.stack([ + f1to2(a3d[i, :, j]) for i in range(a3d.shape[0]) + ], axis=0) + for j in range(a3d.shape[2]) + ], axis=-1).view(cls) + assert_equal(type(actual), type(expected)) + assert_equal(actual, expected) + + def test_subclass_preservation(self): + class MinimalSubclass(np.ndarray): + pass + self.test_scalar_array(MinimalSubclass) + self.test_0d_array(MinimalSubclass) + self.test_axis_insertion(MinimalSubclass) + + def test_axis_insertion_ma(self): + def f1to2(x): + """produces an asymmetric non-square matrix from x""" + assert_equal(x.ndim, 1) + res = x[::-1] * x[1:, None] + return np.ma.masked_where(res % 5 == 0, res) + a = np.arange(6 * 3).reshape((6, 3)) + res = apply_along_axis(f1to2, 0, a) + assert_(isinstance(res, np.ma.masked_array)) + assert_equal(res.ndim, 3) + assert_array_equal(res[:, :, 0].mask, f1to2(a[:, 0]).mask) + assert_array_equal(res[:, :, 1].mask, f1to2(a[:, 1]).mask) + assert_array_equal(res[:, :, 2].mask, f1to2(a[:, 2]).mask) + + def test_tuple_func1d(self): + def sample_1d(x): + return x[1], x[0] + res = np.apply_along_axis(sample_1d, 1, np.array([[1, 2], [3, 4]])) + assert_array_equal(res, np.array([[2, 1], [4, 3]])) + + def test_empty(self): + # can't apply_along_axis when there's no chance to call the function + def never_call(x): + assert_(False) # should never be reached + + a = np.empty((0, 0)) + assert_raises(ValueError, np.apply_along_axis, never_call, 0, a) + assert_raises(ValueError, np.apply_along_axis, never_call, 1, a) + + # but it's sometimes ok with some non-zero dimensions + def empty_to_1(x): + assert_(len(x) == 0) + return 1 + + a = np.empty((10, 0)) + actual = np.apply_along_axis(empty_to_1, 1, a) + assert_equal(actual, np.ones(10)) + assert_raises(ValueError, np.apply_along_axis, empty_to_1, 0, a) + + def test_with_iterable_object(self): + # from issue 5248 + d = np.array([ + [{1, 11}, {2, 22}, {3, 33}], + [{4, 44}, {5, 55}, {6, 66}] + ]) + actual = np.apply_along_axis(lambda a: set.union(*a), 0, d) + expected = np.array([{1, 11, 4, 44}, {2, 22, 5, 55}, {3, 33, 6, 66}]) + + assert_equal(actual, expected) + + # issue 8642 - assert_equal doesn't detect this! + for i in np.ndindex(actual.shape): + assert_equal(type(actual[i]), type(expected[i])) + + +class TestApplyOverAxes: + def test_simple(self): + a = np.arange(24).reshape(2, 3, 4) + aoa_a = apply_over_axes(np.sum, a, [0, 2]) + assert_array_equal(aoa_a, np.array([[[60], [92], [124]]])) + + +class TestExpandDims: + def test_functionality(self): + s = (2, 3, 4, 5) + a = np.empty(s) + for axis in range(-5, 4): + b = expand_dims(a, axis) + assert_(b.shape[axis] == 1) + assert_(np.squeeze(b).shape == s) + + def test_axis_tuple(self): + a = np.empty((3, 3, 3)) + assert np.expand_dims(a, axis=(0, 1, 2)).shape == (1, 1, 1, 3, 3, 3) + assert np.expand_dims(a, axis=(0, -1, -2)).shape == (1, 3, 3, 3, 1, 1) + assert np.expand_dims(a, axis=(0, 3, 5)).shape == (1, 3, 3, 1, 3, 1) + assert np.expand_dims(a, axis=(0, -3, -5)).shape == (1, 1, 3, 1, 3, 3) + + def test_axis_out_of_range(self): + s = (2, 3, 4, 5) + a = np.empty(s) + assert_raises(AxisError, expand_dims, a, -6) + assert_raises(AxisError, expand_dims, a, 5) + + a = np.empty((3, 3, 3)) + assert_raises(AxisError, expand_dims, a, (0, -6)) + assert_raises(AxisError, expand_dims, a, (0, 5)) + + def test_repeated_axis(self): + a = np.empty((3, 3, 3)) + assert_raises(ValueError, expand_dims, a, axis=(1, 1)) + + def test_subclasses(self): + a = np.arange(10).reshape((2, 5)) + a = np.ma.array(a, mask=a % 3 == 0) + + expanded = np.expand_dims(a, axis=1) + assert_(isinstance(expanded, np.ma.MaskedArray)) + assert_equal(expanded.shape, (2, 1, 5)) + assert_equal(expanded.mask.shape, (2, 1, 5)) + + +class TestArraySplit: + def test_integer_0_split(self): + a = np.arange(10) + assert_raises(ValueError, array_split, a, 0) + + def test_integer_split(self): + a = np.arange(10) + res = array_split(a, 1) + desired = [np.arange(10)] + compare_results(res, desired) + + res = array_split(a, 2) + desired = [np.arange(5), np.arange(5, 10)] + compare_results(res, desired) + + res = array_split(a, 3) + desired = [np.arange(4), np.arange(4, 7), np.arange(7, 10)] + compare_results(res, desired) + + res = array_split(a, 4) + desired = [np.arange(3), np.arange(3, 6), np.arange(6, 8), + np.arange(8, 10)] + compare_results(res, desired) + + res = array_split(a, 5) + desired = [np.arange(2), np.arange(2, 4), np.arange(4, 6), + np.arange(6, 8), np.arange(8, 10)] + compare_results(res, desired) + + res = array_split(a, 6) + desired = [np.arange(2), np.arange(2, 4), np.arange(4, 6), + np.arange(6, 8), np.arange(8, 9), np.arange(9, 10)] + compare_results(res, desired) + + res = array_split(a, 7) + desired = [np.arange(2), np.arange(2, 4), np.arange(4, 6), + np.arange(6, 7), np.arange(7, 8), np.arange(8, 9), + np.arange(9, 10)] + compare_results(res, desired) + + res = array_split(a, 8) + desired = [np.arange(2), np.arange(2, 4), np.arange(4, 5), + np.arange(5, 6), np.arange(6, 7), np.arange(7, 8), + np.arange(8, 9), np.arange(9, 10)] + compare_results(res, desired) + + res = array_split(a, 9) + desired = [np.arange(2), np.arange(2, 3), np.arange(3, 4), + np.arange(4, 5), np.arange(5, 6), np.arange(6, 7), + np.arange(7, 8), np.arange(8, 9), np.arange(9, 10)] + compare_results(res, desired) + + res = array_split(a, 10) + desired = [np.arange(1), np.arange(1, 2), np.arange(2, 3), + np.arange(3, 4), np.arange(4, 5), np.arange(5, 6), + np.arange(6, 7), np.arange(7, 8), np.arange(8, 9), + np.arange(9, 10)] + compare_results(res, desired) + + res = array_split(a, 11) + desired = [np.arange(1), np.arange(1, 2), np.arange(2, 3), + np.arange(3, 4), np.arange(4, 5), np.arange(5, 6), + np.arange(6, 7), np.arange(7, 8), np.arange(8, 9), + np.arange(9, 10), np.array([])] + compare_results(res, desired) + + def test_integer_split_2D_rows(self): + a = np.array([np.arange(10), np.arange(10)]) + res = array_split(a, 3, axis=0) + tgt = [np.array([np.arange(10)]), np.array([np.arange(10)]), + np.zeros((0, 10))] + compare_results(res, tgt) + assert_(a.dtype.type is res[-1].dtype.type) + + # Same thing for manual splits: + res = array_split(a, [0, 1], axis=0) + tgt = [np.zeros((0, 10)), np.array([np.arange(10)]), + np.array([np.arange(10)])] + compare_results(res, tgt) + assert_(a.dtype.type is res[-1].dtype.type) + + def test_integer_split_2D_cols(self): + a = np.array([np.arange(10), np.arange(10)]) + res = array_split(a, 3, axis=-1) + desired = [np.array([np.arange(4), np.arange(4)]), + np.array([np.arange(4, 7), np.arange(4, 7)]), + np.array([np.arange(7, 10), np.arange(7, 10)])] + compare_results(res, desired) + + def test_integer_split_2D_default(self): + """ This will fail if we change default axis + """ + a = np.array([np.arange(10), np.arange(10)]) + res = array_split(a, 3) + tgt = [np.array([np.arange(10)]), np.array([np.arange(10)]), + np.zeros((0, 10))] + compare_results(res, tgt) + assert_(a.dtype.type is res[-1].dtype.type) + # perhaps should check higher dimensions + + @pytest.mark.skipif(not IS_64BIT, reason="Needs 64bit platform") + def test_integer_split_2D_rows_greater_max_int32(self): + a = np.broadcast_to([0], (1 << 32, 2)) + res = array_split(a, 4) + chunk = np.broadcast_to([0], (1 << 30, 2)) + tgt = [chunk] * 4 + for i in range(len(tgt)): + assert_equal(res[i].shape, tgt[i].shape) + + def test_index_split_simple(self): + a = np.arange(10) + indices = [1, 5, 7] + res = array_split(a, indices, axis=-1) + desired = [np.arange(0, 1), np.arange(1, 5), np.arange(5, 7), + np.arange(7, 10)] + compare_results(res, desired) + + def test_index_split_low_bound(self): + a = np.arange(10) + indices = [0, 5, 7] + res = array_split(a, indices, axis=-1) + desired = [np.array([]), np.arange(0, 5), np.arange(5, 7), + np.arange(7, 10)] + compare_results(res, desired) + + def test_index_split_high_bound(self): + a = np.arange(10) + indices = [0, 5, 7, 10, 12] + res = array_split(a, indices, axis=-1) + desired = [np.array([]), np.arange(0, 5), np.arange(5, 7), + np.arange(7, 10), np.array([]), np.array([])] + compare_results(res, desired) + + +class TestSplit: + # The split function is essentially the same as array_split, + # except that it test if splitting will result in an + # equal split. Only test for this case. + + def test_equal_split(self): + a = np.arange(10) + res = split(a, 2) + desired = [np.arange(5), np.arange(5, 10)] + compare_results(res, desired) + + def test_unequal_split(self): + a = np.arange(10) + assert_raises(ValueError, split, a, 3) + + +class TestColumnStack: + def test_non_iterable(self): + assert_raises(TypeError, column_stack, 1) + + def test_1D_arrays(self): + # example from docstring + a = np.array((1, 2, 3)) + b = np.array((2, 3, 4)) + expected = np.array([[1, 2], + [2, 3], + [3, 4]]) + actual = np.column_stack((a, b)) + assert_equal(actual, expected) + + def test_2D_arrays(self): + # same as hstack 2D docstring example + a = np.array([[1], [2], [3]]) + b = np.array([[2], [3], [4]]) + expected = np.array([[1, 2], + [2, 3], + [3, 4]]) + actual = np.column_stack((a, b)) + assert_equal(actual, expected) + + def test_generator(self): + with pytest.raises(TypeError, match="arrays to stack must be"): + column_stack(np.arange(3) for _ in range(2)) + + +class TestDstack: + def test_non_iterable(self): + assert_raises(TypeError, dstack, 1) + + def test_0D_array(self): + a = np.array(1) + b = np.array(2) + res = dstack([a, b]) + desired = np.array([[[1, 2]]]) + assert_array_equal(res, desired) + + def test_1D_array(self): + a = np.array([1]) + b = np.array([2]) + res = dstack([a, b]) + desired = np.array([[[1, 2]]]) + assert_array_equal(res, desired) + + def test_2D_array(self): + a = np.array([[1], [2]]) + b = np.array([[1], [2]]) + res = dstack([a, b]) + desired = np.array([[[1, 1]], [[2, 2, ]]]) + assert_array_equal(res, desired) + + def test_2D_array2(self): + a = np.array([1, 2]) + b = np.array([1, 2]) + res = dstack([a, b]) + desired = np.array([[[1, 1], [2, 2]]]) + assert_array_equal(res, desired) + + def test_generator(self): + with pytest.raises(TypeError, match="arrays to stack must be"): + dstack(np.arange(3) for _ in range(2)) + + +# array_split has more comprehensive test of splitting. +# only do simple test on hsplit, vsplit, and dsplit +class TestHsplit: + """Only testing for integer splits. + + """ + def test_non_iterable(self): + assert_raises(ValueError, hsplit, 1, 1) + + def test_0D_array(self): + a = np.array(1) + try: + hsplit(a, 2) + assert_(0) + except ValueError: + pass + + def test_1D_array(self): + a = np.array([1, 2, 3, 4]) + res = hsplit(a, 2) + desired = [np.array([1, 2]), np.array([3, 4])] + compare_results(res, desired) + + def test_2D_array(self): + a = np.array([[1, 2, 3, 4], + [1, 2, 3, 4]]) + res = hsplit(a, 2) + desired = [np.array([[1, 2], [1, 2]]), np.array([[3, 4], [3, 4]])] + compare_results(res, desired) + + +class TestVsplit: + """Only testing for integer splits. + + """ + def test_non_iterable(self): + assert_raises(ValueError, vsplit, 1, 1) + + def test_0D_array(self): + a = np.array(1) + assert_raises(ValueError, vsplit, a, 2) + + def test_1D_array(self): + a = np.array([1, 2, 3, 4]) + try: + vsplit(a, 2) + assert_(0) + except ValueError: + pass + + def test_2D_array(self): + a = np.array([[1, 2, 3, 4], + [1, 2, 3, 4]]) + res = vsplit(a, 2) + desired = [np.array([[1, 2, 3, 4]]), np.array([[1, 2, 3, 4]])] + compare_results(res, desired) + + +class TestDsplit: + # Only testing for integer splits. + def test_non_iterable(self): + assert_raises(ValueError, dsplit, 1, 1) + + def test_0D_array(self): + a = np.array(1) + assert_raises(ValueError, dsplit, a, 2) + + def test_1D_array(self): + a = np.array([1, 2, 3, 4]) + assert_raises(ValueError, dsplit, a, 2) + + def test_2D_array(self): + a = np.array([[1, 2, 3, 4], + [1, 2, 3, 4]]) + try: + dsplit(a, 2) + assert_(0) + except ValueError: + pass + + def test_3D_array(self): + a = np.array([[[1, 2, 3, 4], + [1, 2, 3, 4]], + [[1, 2, 3, 4], + [1, 2, 3, 4]]]) + res = dsplit(a, 2) + desired = [np.array([[[1, 2], [1, 2]], [[1, 2], [1, 2]]]), + np.array([[[3, 4], [3, 4]], [[3, 4], [3, 4]]])] + compare_results(res, desired) + + +class TestSqueeze: + def test_basic(self): + from numpy.random import rand + + a = rand(20, 10, 10, 1, 1) + b = rand(20, 1, 10, 1, 20) + c = rand(1, 1, 20, 10) + assert_array_equal(np.squeeze(a), np.reshape(a, (20, 10, 10))) + assert_array_equal(np.squeeze(b), np.reshape(b, (20, 10, 20))) + assert_array_equal(np.squeeze(c), np.reshape(c, (20, 10))) + + # Squeezing to 0-dim should still give an ndarray + a = [[[1.5]]] + res = np.squeeze(a) + assert_equal(res, 1.5) + assert_equal(res.ndim, 0) + assert_equal(type(res), np.ndarray) + + +class TestKron: + def test_basic(self): + # Using 0-dimensional ndarray + a = np.array(1) + b = np.array([[1, 2], [3, 4]]) + k = np.array([[1, 2], [3, 4]]) + assert_array_equal(np.kron(a, b), k) + a = np.array([[1, 2], [3, 4]]) + b = np.array(1) + assert_array_equal(np.kron(a, b), k) + + # Using 1-dimensional ndarray + a = np.array([3]) + b = np.array([[1, 2], [3, 4]]) + k = np.array([[3, 6], [9, 12]]) + assert_array_equal(np.kron(a, b), k) + a = np.array([[1, 2], [3, 4]]) + b = np.array([3]) + assert_array_equal(np.kron(a, b), k) + + # Using 3-dimensional ndarray + a = np.array([[[1]], [[2]]]) + b = np.array([[1, 2], [3, 4]]) + k = np.array([[[1, 2], [3, 4]], [[2, 4], [6, 8]]]) + assert_array_equal(np.kron(a, b), k) + a = np.array([[1, 2], [3, 4]]) + b = np.array([[[1]], [[2]]]) + k = np.array([[[1, 2], [3, 4]], [[2, 4], [6, 8]]]) + assert_array_equal(np.kron(a, b), k) + + def test_return_type(self): + class myarray(np.ndarray): + __array_priority__ = 1.0 + + a = np.ones([2, 2]) + ma = myarray(a.shape, a.dtype, a.data) + assert_equal(type(kron(a, a)), np.ndarray) + assert_equal(type(kron(ma, ma)), myarray) + assert_equal(type(kron(a, ma)), myarray) + assert_equal(type(kron(ma, a)), myarray) + + @pytest.mark.parametrize( + "array_class", [np.asarray, np.asmatrix] + ) + def test_kron_smoke(self, array_class): + a = array_class(np.ones([3, 3])) + b = array_class(np.ones([3, 3])) + k = array_class(np.ones([9, 9])) + + assert_array_equal(np.kron(a, b), k) + + def test_kron_ma(self): + x = np.ma.array([[1, 2], [3, 4]], mask=[[0, 1], [1, 0]]) + k = np.ma.array(np.diag([1, 4, 4, 16]), + mask=~np.array(np.identity(4), dtype=bool)) + + assert_array_equal(k, np.kron(x, x)) + + @pytest.mark.parametrize( + "shape_a,shape_b", [ + ((1, 1), (1, 1)), + ((1, 2, 3), (4, 5, 6)), + ((2, 2), (2, 2, 2)), + ((1, 0), (1, 1)), + ((2, 0, 2), (2, 2)), + ((2, 0, 0, 2), (2, 0, 2)), + ]) + def test_kron_shape(self, shape_a, shape_b): + a = np.ones(shape_a) + b = np.ones(shape_b) + normalised_shape_a = (1,) * max(0, len(shape_b) - len(shape_a)) + shape_a + normalised_shape_b = (1,) * max(0, len(shape_a) - len(shape_b)) + shape_b + expected_shape = np.multiply(normalised_shape_a, normalised_shape_b) + + k = np.kron(a, b) + assert np.array_equal( + k.shape, expected_shape), "Unexpected shape from kron" + + +class TestTile: + def test_basic(self): + a = np.array([0, 1, 2]) + b = [[1, 2], [3, 4]] + assert_equal(tile(a, 2), [0, 1, 2, 0, 1, 2]) + assert_equal(tile(a, (2, 2)), [[0, 1, 2, 0, 1, 2], [0, 1, 2, 0, 1, 2]]) + assert_equal(tile(a, (1, 2)), [[0, 1, 2, 0, 1, 2]]) + assert_equal(tile(b, 2), [[1, 2, 1, 2], [3, 4, 3, 4]]) + assert_equal(tile(b, (2, 1)), [[1, 2], [3, 4], [1, 2], [3, 4]]) + assert_equal(tile(b, (2, 2)), [[1, 2, 1, 2], [3, 4, 3, 4], + [1, 2, 1, 2], [3, 4, 3, 4]]) + + def test_tile_one_repetition_on_array_gh4679(self): + a = np.arange(5) + b = tile(a, 1) + b += 2 + assert_equal(a, np.arange(5)) + + def test_empty(self): + a = np.array([[[]]]) + b = np.array([[], []]) + c = tile(b, 2).shape + d = tile(a, (3, 2, 5)).shape + assert_equal(c, (2, 0)) + assert_equal(d, (3, 2, 0)) + + def test_kroncompare(self): + from numpy.random import randint + + reps = [(2,), (1, 2), (2, 1), (2, 2), (2, 3, 2), (3, 2)] + shape = [(3,), (2, 3), (3, 4, 3), (3, 2, 3), (4, 3, 2, 4), (2, 2)] + for s in shape: + b = randint(0, 10, size=s) + for r in reps: + a = np.ones(r, b.dtype) + large = tile(b, r) + klarge = kron(a, b) + assert_equal(large, klarge) + + +class TestMayShareMemory: + def test_basic(self): + d = np.ones((50, 60)) + d2 = np.ones((30, 60, 6)) + assert_(np.may_share_memory(d, d)) + assert_(np.may_share_memory(d, d[::-1])) + assert_(np.may_share_memory(d, d[::2])) + assert_(np.may_share_memory(d, d[1:, ::-1])) + + assert_(not np.may_share_memory(d[::-1], d2)) + assert_(not np.may_share_memory(d[::2], d2)) + assert_(not np.may_share_memory(d[1:, ::-1], d2)) + assert_(np.may_share_memory(d2[1:, ::-1], d2)) + + +# Utility +def compare_results(res, desired): + """Compare lists of arrays.""" + for x, y in zip(res, desired, strict=False): + assert_array_equal(x, y) diff --git a/local_packages/numpy/lib/tests/test_stride_tricks.py b/local_packages/numpy/lib/tests/test_stride_tricks.py new file mode 100644 index 0000000..fb654b4 --- /dev/null +++ b/local_packages/numpy/lib/tests/test_stride_tricks.py @@ -0,0 +1,655 @@ +import pytest + +import numpy as np +from numpy._core._rational_tests import rational +from numpy.lib._stride_tricks_impl import ( + _broadcast_shape, + as_strided, + broadcast_arrays, + broadcast_shapes, + broadcast_to, + sliding_window_view, +) +from numpy.testing import ( + assert_, + assert_array_equal, + assert_equal, + assert_raises, + assert_raises_regex, +) + + +def assert_shapes_correct(input_shapes, expected_shape): + # Broadcast a list of arrays with the given input shapes and check the + # common output shape. + + inarrays = [np.zeros(s) for s in input_shapes] + outarrays = broadcast_arrays(*inarrays) + outshapes = [a.shape for a in outarrays] + expected = [expected_shape] * len(inarrays) + assert_equal(outshapes, expected) + + +def assert_incompatible_shapes_raise(input_shapes): + # Broadcast a list of arrays with the given (incompatible) input shapes + # and check that they raise a ValueError. + + inarrays = [np.zeros(s) for s in input_shapes] + assert_raises(ValueError, broadcast_arrays, *inarrays) + + +def assert_same_as_ufunc(shape0, shape1, transposed=False, flipped=False): + # Broadcast two shapes against each other and check that the data layout + # is the same as if a ufunc did the broadcasting. + + x0 = np.zeros(shape0, dtype=int) + # Note that multiply.reduce's identity element is 1.0, so when shape1==(), + # this gives the desired n==1. + n = int(np.multiply.reduce(shape1)) + x1 = np.arange(n).reshape(shape1) + if transposed: + x0 = x0.T + x1 = x1.T + if flipped: + x0 = x0[::-1] + x1 = x1[::-1] + # Use the add ufunc to do the broadcasting. Since we're adding 0s to x1, the + # result should be exactly the same as the broadcasted view of x1. + y = x0 + x1 + b0, b1 = broadcast_arrays(x0, x1) + assert_array_equal(y, b1) + + +def test_same(): + x = np.arange(10) + y = np.arange(10) + bx, by = broadcast_arrays(x, y) + assert_array_equal(x, bx) + assert_array_equal(y, by) + +def test_broadcast_kwargs(): + # ensure that a TypeError is appropriately raised when + # np.broadcast_arrays() is called with any keyword + # argument other than 'subok' + x = np.arange(10) + y = np.arange(10) + + with assert_raises_regex(TypeError, 'got an unexpected keyword'): + broadcast_arrays(x, y, dtype='float64') + + +def test_one_off(): + x = np.array([[1, 2, 3]]) + y = np.array([[1], [2], [3]]) + bx, by = broadcast_arrays(x, y) + bx0 = np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3]]) + by0 = bx0.T + assert_array_equal(bx0, bx) + assert_array_equal(by0, by) + + +def test_same_input_shapes(): + # Check that the final shape is just the input shape. + + data = [ + (), + (1,), + (3,), + (0, 1), + (0, 3), + (1, 0), + (3, 0), + (1, 3), + (3, 1), + (3, 3), + ] + for shape in data: + input_shapes = [shape] + # Single input. + assert_shapes_correct(input_shapes, shape) + # Double input. + input_shapes2 = [shape, shape] + assert_shapes_correct(input_shapes2, shape) + # Triple input. + input_shapes3 = [shape, shape, shape] + assert_shapes_correct(input_shapes3, shape) + + +def test_two_compatible_by_ones_input_shapes(): + # Check that two different input shapes of the same length, but some have + # ones, broadcast to the correct shape. + + data = [ + [[(1,), (3,)], (3,)], + [[(1, 3), (3, 3)], (3, 3)], + [[(3, 1), (3, 3)], (3, 3)], + [[(1, 3), (3, 1)], (3, 3)], + [[(1, 1), (3, 3)], (3, 3)], + [[(1, 1), (1, 3)], (1, 3)], + [[(1, 1), (3, 1)], (3, 1)], + [[(1, 0), (0, 0)], (0, 0)], + [[(0, 1), (0, 0)], (0, 0)], + [[(1, 0), (0, 1)], (0, 0)], + [[(1, 1), (0, 0)], (0, 0)], + [[(1, 1), (1, 0)], (1, 0)], + [[(1, 1), (0, 1)], (0, 1)], + ] + for input_shapes, expected_shape in data: + assert_shapes_correct(input_shapes, expected_shape) + # Reverse the input shapes since broadcasting should be symmetric. + assert_shapes_correct(input_shapes[::-1], expected_shape) + + +def test_two_compatible_by_prepending_ones_input_shapes(): + # Check that two different input shapes (of different lengths) broadcast + # to the correct shape. + + data = [ + [[(), (3,)], (3,)], + [[(3,), (3, 3)], (3, 3)], + [[(3,), (3, 1)], (3, 3)], + [[(1,), (3, 3)], (3, 3)], + [[(), (3, 3)], (3, 3)], + [[(1, 1), (3,)], (1, 3)], + [[(1,), (3, 1)], (3, 1)], + [[(1,), (1, 3)], (1, 3)], + [[(), (1, 3)], (1, 3)], + [[(), (3, 1)], (3, 1)], + [[(), (0,)], (0,)], + [[(0,), (0, 0)], (0, 0)], + [[(0,), (0, 1)], (0, 0)], + [[(1,), (0, 0)], (0, 0)], + [[(), (0, 0)], (0, 0)], + [[(1, 1), (0,)], (1, 0)], + [[(1,), (0, 1)], (0, 1)], + [[(1,), (1, 0)], (1, 0)], + [[(), (1, 0)], (1, 0)], + [[(), (0, 1)], (0, 1)], + ] + for input_shapes, expected_shape in data: + assert_shapes_correct(input_shapes, expected_shape) + # Reverse the input shapes since broadcasting should be symmetric. + assert_shapes_correct(input_shapes[::-1], expected_shape) + + +def test_incompatible_shapes_raise_valueerror(): + # Check that a ValueError is raised for incompatible shapes. + + data = [ + [(3,), (4,)], + [(2, 3), (2,)], + [(3,), (3,), (4,)], + [(1, 3, 4), (2, 3, 3)], + ] + for input_shapes in data: + assert_incompatible_shapes_raise(input_shapes) + # Reverse the input shapes since broadcasting should be symmetric. + assert_incompatible_shapes_raise(input_shapes[::-1]) + + +def test_same_as_ufunc(): + # Check that the data layout is the same as if a ufunc did the operation. + + data = [ + [[(1,), (3,)], (3,)], + [[(1, 3), (3, 3)], (3, 3)], + [[(3, 1), (3, 3)], (3, 3)], + [[(1, 3), (3, 1)], (3, 3)], + [[(1, 1), (3, 3)], (3, 3)], + [[(1, 1), (1, 3)], (1, 3)], + [[(1, 1), (3, 1)], (3, 1)], + [[(1, 0), (0, 0)], (0, 0)], + [[(0, 1), (0, 0)], (0, 0)], + [[(1, 0), (0, 1)], (0, 0)], + [[(1, 1), (0, 0)], (0, 0)], + [[(1, 1), (1, 0)], (1, 0)], + [[(1, 1), (0, 1)], (0, 1)], + [[(), (3,)], (3,)], + [[(3,), (3, 3)], (3, 3)], + [[(3,), (3, 1)], (3, 3)], + [[(1,), (3, 3)], (3, 3)], + [[(), (3, 3)], (3, 3)], + [[(1, 1), (3,)], (1, 3)], + [[(1,), (3, 1)], (3, 1)], + [[(1,), (1, 3)], (1, 3)], + [[(), (1, 3)], (1, 3)], + [[(), (3, 1)], (3, 1)], + [[(), (0,)], (0,)], + [[(0,), (0, 0)], (0, 0)], + [[(0,), (0, 1)], (0, 0)], + [[(1,), (0, 0)], (0, 0)], + [[(), (0, 0)], (0, 0)], + [[(1, 1), (0,)], (1, 0)], + [[(1,), (0, 1)], (0, 1)], + [[(1,), (1, 0)], (1, 0)], + [[(), (1, 0)], (1, 0)], + [[(), (0, 1)], (0, 1)], + ] + for input_shapes, expected_shape in data: + assert_same_as_ufunc(input_shapes[0], input_shapes[1], + f"Shapes: {input_shapes[0]} {input_shapes[1]}") + # Reverse the input shapes since broadcasting should be symmetric. + assert_same_as_ufunc(input_shapes[1], input_shapes[0]) + # Try them transposed, too. + assert_same_as_ufunc(input_shapes[0], input_shapes[1], True) + # ... and flipped for non-rank-0 inputs in order to test negative + # strides. + if () not in input_shapes: + assert_same_as_ufunc(input_shapes[0], input_shapes[1], False, True) + assert_same_as_ufunc(input_shapes[0], input_shapes[1], True, True) + + +def test_broadcast_to_succeeds(): + data = [ + [np.array(0), (0,), np.array(0)], + [np.array(0), (1,), np.zeros(1)], + [np.array(0), (3,), np.zeros(3)], + [np.ones(1), (1,), np.ones(1)], + [np.ones(1), (2,), np.ones(2)], + [np.ones(1), (1, 2, 3), np.ones((1, 2, 3))], + [np.arange(3), (3,), np.arange(3)], + [np.arange(3), (1, 3), np.arange(3).reshape(1, -1)], + [np.arange(3), (2, 3), np.array([[0, 1, 2], [0, 1, 2]])], + # test if shape is not a tuple + [np.ones(0), 0, np.ones(0)], + [np.ones(1), 1, np.ones(1)], + [np.ones(1), 2, np.ones(2)], + # these cases with size 0 are strange, but they reproduce the behavior + # of broadcasting with ufuncs (see test_same_as_ufunc above) + [np.ones(1), (0,), np.ones(0)], + [np.ones((1, 2)), (0, 2), np.ones((0, 2))], + [np.ones((2, 1)), (2, 0), np.ones((2, 0))], + ] + for input_array, shape, expected in data: + actual = broadcast_to(input_array, shape) + assert_array_equal(expected, actual) + + +def test_broadcast_to_raises(): + data = [ + [(0,), ()], + [(1,), ()], + [(3,), ()], + [(3,), (1,)], + [(3,), (2,)], + [(3,), (4,)], + [(1, 2), (2, 1)], + [(1, 1), (1,)], + [(1,), -1], + [(1,), (-1,)], + [(1, 2), (-1, 2)], + ] + for orig_shape, target_shape in data: + arr = np.zeros(orig_shape) + assert_raises(ValueError, lambda: broadcast_to(arr, target_shape)) + + +def test_broadcast_shape(): + # tests internal _broadcast_shape + # _broadcast_shape is already exercised indirectly by broadcast_arrays + # _broadcast_shape is also exercised by the public broadcast_shapes function + assert_equal(_broadcast_shape(), ()) + assert_equal(_broadcast_shape([1, 2]), (2,)) + assert_equal(_broadcast_shape(np.ones((1, 1))), (1, 1)) + assert_equal(_broadcast_shape(np.ones((1, 1)), np.ones((3, 4))), (3, 4)) + assert_equal(_broadcast_shape(*([np.ones((1, 2))] * 32)), (1, 2)) + assert_equal(_broadcast_shape(*([np.ones((1, 2))] * 100)), (1, 2)) + + # regression tests for gh-5862 + assert_equal(_broadcast_shape(*([np.ones(2)] * 32 + [1])), (2,)) + bad_args = [np.ones(2)] * 32 + [np.ones(3)] * 32 + assert_raises(ValueError, lambda: _broadcast_shape(*bad_args)) + + +def test_broadcast_shapes_succeeds(): + # tests public broadcast_shapes + data = [ + [[], ()], + [[()], ()], + [[(7,)], (7,)], + [[(1, 2), (2,)], (1, 2)], + [[(1, 1)], (1, 1)], + [[(1, 1), (3, 4)], (3, 4)], + [[(6, 7), (5, 6, 1), (7,), (5, 1, 7)], (5, 6, 7)], + [[(5, 6, 1)], (5, 6, 1)], + [[(1, 3), (3, 1)], (3, 3)], + [[(1, 0), (0, 0)], (0, 0)], + [[(0, 1), (0, 0)], (0, 0)], + [[(1, 0), (0, 1)], (0, 0)], + [[(1, 1), (0, 0)], (0, 0)], + [[(1, 1), (1, 0)], (1, 0)], + [[(1, 1), (0, 1)], (0, 1)], + [[(), (0,)], (0,)], + [[(0,), (0, 0)], (0, 0)], + [[(0,), (0, 1)], (0, 0)], + [[(1,), (0, 0)], (0, 0)], + [[(), (0, 0)], (0, 0)], + [[(1, 1), (0,)], (1, 0)], + [[(1,), (0, 1)], (0, 1)], + [[(1,), (1, 0)], (1, 0)], + [[(), (1, 0)], (1, 0)], + [[(), (0, 1)], (0, 1)], + [[(1,), (3,)], (3,)], + [[2, (3, 2)], (3, 2)], + ] + for input_shapes, target_shape in data: + assert_equal(broadcast_shapes(*input_shapes), target_shape) + + assert_equal(broadcast_shapes(*([(1, 2)] * 32)), (1, 2)) + assert_equal(broadcast_shapes(*([(1, 2)] * 100)), (1, 2)) + + # regression tests for gh-5862 + assert_equal(broadcast_shapes(*([(2,)] * 32)), (2,)) + + +def test_broadcast_shapes_raises(): + # tests public broadcast_shapes + data = [ + [(3,), (4,)], + [(2, 3), (2,)], + [(3,), (3,), (4,)], + [(1, 3, 4), (2, 3, 3)], + [(1, 2), (3, 1), (3, 2), (10, 5)], + [2, (2, 3)], + ] + for input_shapes in data: + assert_raises(ValueError, lambda: broadcast_shapes(*input_shapes)) + + bad_args = [(2,)] * 32 + [(3,)] * 32 + assert_raises(ValueError, lambda: broadcast_shapes(*bad_args)) + + +def test_as_strided(): + a = np.array([None]) + a_view = as_strided(a) + expected = np.array([None]) + assert_array_equal(a_view, np.array([None])) + + a = np.array([1, 2, 3, 4]) + a_view = as_strided(a, shape=(2,), strides=(2 * a.itemsize,)) + expected = np.array([1, 3]) + assert_array_equal(a_view, expected) + + a = np.array([1, 2, 3, 4]) + a_view = as_strided(a, shape=(3, 4), strides=(0, 1 * a.itemsize)) + expected = np.array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]]) + assert_array_equal(a_view, expected) + + # Regression test for gh-5081 + dt = np.dtype([('num', 'i4'), ('obj', 'O')]) + a = np.empty((4,), dtype=dt) + a['num'] = np.arange(1, 5) + a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize)) + expected_num = [[1, 2, 3, 4]] * 3 + expected_obj = [[None] * 4] * 3 + assert_equal(a_view.dtype, dt) + assert_array_equal(expected_num, a_view['num']) + assert_array_equal(expected_obj, a_view['obj']) + + # Make sure that void types without fields are kept unchanged + a = np.empty((4,), dtype='V4') + a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize)) + assert_equal(a.dtype, a_view.dtype) + + # Make sure that the only type that could fail is properly handled + dt = np.dtype({'names': [''], 'formats': ['V4']}) + a = np.empty((4,), dtype=dt) + a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize)) + assert_equal(a.dtype, a_view.dtype) + + # Custom dtypes should not be lost (gh-9161) + r = [rational(i) for i in range(4)] + a = np.array(r, dtype=rational) + a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize)) + assert_equal(a.dtype, a_view.dtype) + assert_array_equal([r] * 3, a_view) + + +class TestSlidingWindowView: + def test_1d(self): + arr = np.arange(5) + arr_view = sliding_window_view(arr, 2) + expected = np.array([[0, 1], + [1, 2], + [2, 3], + [3, 4]]) + assert_array_equal(arr_view, expected) + + def test_2d(self): + i, j = np.ogrid[:3, :4] + arr = 10 * i + j + shape = (2, 2) + arr_view = sliding_window_view(arr, shape) + expected = np.array([[[[0, 1], [10, 11]], + [[1, 2], [11, 12]], + [[2, 3], [12, 13]]], + [[[10, 11], [20, 21]], + [[11, 12], [21, 22]], + [[12, 13], [22, 23]]]]) + assert_array_equal(arr_view, expected) + + def test_2d_with_axis(self): + i, j = np.ogrid[:3, :4] + arr = 10 * i + j + arr_view = sliding_window_view(arr, 3, 0) + expected = np.array([[[0, 10, 20], + [1, 11, 21], + [2, 12, 22], + [3, 13, 23]]]) + assert_array_equal(arr_view, expected) + + def test_2d_repeated_axis(self): + i, j = np.ogrid[:3, :4] + arr = 10 * i + j + arr_view = sliding_window_view(arr, (2, 3), (1, 1)) + expected = np.array([[[[0, 1, 2], + [1, 2, 3]]], + [[[10, 11, 12], + [11, 12, 13]]], + [[[20, 21, 22], + [21, 22, 23]]]]) + assert_array_equal(arr_view, expected) + + def test_2d_without_axis(self): + i, j = np.ogrid[:4, :4] + arr = 10 * i + j + shape = (2, 3) + arr_view = sliding_window_view(arr, shape) + expected = np.array([[[[0, 1, 2], [10, 11, 12]], + [[1, 2, 3], [11, 12, 13]]], + [[[10, 11, 12], [20, 21, 22]], + [[11, 12, 13], [21, 22, 23]]], + [[[20, 21, 22], [30, 31, 32]], + [[21, 22, 23], [31, 32, 33]]]]) + assert_array_equal(arr_view, expected) + + def test_errors(self): + i, j = np.ogrid[:4, :4] + arr = 10 * i + j + with pytest.raises(ValueError, match='cannot contain negative values'): + sliding_window_view(arr, (-1, 3)) + with pytest.raises( + ValueError, + match='must provide window_shape for all dimensions of `x`'): + sliding_window_view(arr, (1,)) + with pytest.raises( + ValueError, + match='Must provide matching length window_shape and axis'): + sliding_window_view(arr, (1, 3, 4), axis=(0, 1)) + with pytest.raises( + ValueError, + match='window shape cannot be larger than input array'): + sliding_window_view(arr, (5, 5)) + + def test_writeable(self): + arr = np.arange(5) + view = sliding_window_view(arr, 2, writeable=False) + assert_(not view.flags.writeable) + with pytest.raises( + ValueError, + match='assignment destination is read-only'): + view[0, 0] = 3 + view = sliding_window_view(arr, 2, writeable=True) + assert_(view.flags.writeable) + view[0, 1] = 3 + assert_array_equal(arr, np.array([0, 3, 2, 3, 4])) + + def test_subok(self): + class MyArray(np.ndarray): + pass + + arr = np.arange(5).view(MyArray) + assert_(not isinstance(sliding_window_view(arr, 2, + subok=False), + MyArray)) + assert_(isinstance(sliding_window_view(arr, 2, subok=True), MyArray)) + # Default behavior + assert_(not isinstance(sliding_window_view(arr, 2), MyArray)) + + +def as_strided_writeable(): + arr = np.ones(10) + view = as_strided(arr, writeable=False) + assert_(not view.flags.writeable) + + # Check that writeable also is fine: + view = as_strided(arr, writeable=True) + assert_(view.flags.writeable) + view[...] = 3 + assert_array_equal(arr, np.full_like(arr, 3)) + + # Test that things do not break down for readonly: + arr.flags.writeable = False + view = as_strided(arr, writeable=False) + view = as_strided(arr, writeable=True) + assert_(not view.flags.writeable) + + +class VerySimpleSubClass(np.ndarray): + def __new__(cls, *args, **kwargs): + return np.array(*args, subok=True, **kwargs).view(cls) + + +class SimpleSubClass(VerySimpleSubClass): + def __new__(cls, *args, **kwargs): + self = np.array(*args, subok=True, **kwargs).view(cls) + self.info = 'simple' + return self + + def __array_finalize__(self, obj): + self.info = getattr(obj, 'info', '') + ' finalized' + + +def test_subclasses(): + # test that subclass is preserved only if subok=True + a = VerySimpleSubClass([1, 2, 3, 4]) + assert_(type(a) is VerySimpleSubClass) + a_view = as_strided(a, shape=(2,), strides=(2 * a.itemsize,)) + assert_(type(a_view) is np.ndarray) + a_view = as_strided(a, shape=(2,), strides=(2 * a.itemsize,), subok=True) + assert_(type(a_view) is VerySimpleSubClass) + # test that if a subclass has __array_finalize__, it is used + a = SimpleSubClass([1, 2, 3, 4]) + a_view = as_strided(a, shape=(2,), strides=(2 * a.itemsize,), subok=True) + assert_(type(a_view) is SimpleSubClass) + assert_(a_view.info == 'simple finalized') + + # similar tests for broadcast_arrays + b = np.arange(len(a)).reshape(-1, 1) + a_view, b_view = broadcast_arrays(a, b) + assert_(type(a_view) is np.ndarray) + assert_(type(b_view) is np.ndarray) + assert_(a_view.shape == b_view.shape) + a_view, b_view = broadcast_arrays(a, b, subok=True) + assert_(type(a_view) is SimpleSubClass) + assert_(a_view.info == 'simple finalized') + assert_(type(b_view) is np.ndarray) + assert_(a_view.shape == b_view.shape) + + # and for broadcast_to + shape = (2, 4) + a_view = broadcast_to(a, shape) + assert_(type(a_view) is np.ndarray) + assert_(a_view.shape == shape) + a_view = broadcast_to(a, shape, subok=True) + assert_(type(a_view) is SimpleSubClass) + assert_(a_view.info == 'simple finalized') + assert_(a_view.shape == shape) + + +def test_writeable(): + # broadcast_to should return a readonly array + original = np.array([1, 2, 3]) + result = broadcast_to(original, (2, 3)) + assert_equal(result.flags.writeable, False) + assert_raises(ValueError, result.__setitem__, slice(None), 0) + + # but the result of broadcast_arrays needs to be writeable, to + # preserve backwards compatibility + test_cases = [((False,), broadcast_arrays(original,)), + ((True, False), broadcast_arrays(0, original))] + for is_broadcast, results in test_cases: + for array_is_broadcast, result in zip(is_broadcast, results): + # This will change to False in a future version + if array_is_broadcast: + with pytest.warns(FutureWarning): + assert_equal(result.flags.writeable, True) + with pytest.warns(DeprecationWarning): + result[:] = 0 + # Warning not emitted, writing to the array resets it + assert_equal(result.flags.writeable, True) + else: + # No warning: + assert_equal(result.flags.writeable, True) + + for results in [broadcast_arrays(original), + broadcast_arrays(0, original)]: + for result in results: + # resets the warn_on_write DeprecationWarning + result.flags.writeable = True + # check: no warning emitted + assert_equal(result.flags.writeable, True) + result[:] = 0 + + # keep readonly input readonly + original.flags.writeable = False + _, result = broadcast_arrays(0, original) + assert_equal(result.flags.writeable, False) + + # regression test for GH6491 + shape = (2,) + strides = [0] + tricky_array = as_strided(np.array(0), shape, strides) + other = np.zeros((1,)) + first, second = broadcast_arrays(tricky_array, other) + assert_(first.shape == second.shape) + + +def test_writeable_memoryview(): + # The result of broadcast_arrays exports as a non-writeable memoryview + # because otherwise there is no good way to opt in to the new behaviour + # (i.e. you would need to set writeable to False explicitly). + # See gh-13929. + original = np.array([1, 2, 3]) + + test_cases = [((False, ), broadcast_arrays(original,)), + ((True, False), broadcast_arrays(0, original))] + for is_broadcast, results in test_cases: + for array_is_broadcast, result in zip(is_broadcast, results): + # This will change to False in a future version + if array_is_broadcast: + # memoryview(result, writable=True) will give warning but cannot + # be tested using the python API. + assert memoryview(result).readonly + else: + assert not memoryview(result).readonly + + +def test_reference_types(): + input_array = np.array('a', dtype=object) + expected = np.array(['a'] * 3, dtype=object) + actual = broadcast_to(input_array, (3,)) + assert_array_equal(expected, actual) + + actual, _ = broadcast_arrays(input_array, np.ones(3)) + assert_array_equal(expected, actual) diff --git a/local_packages/numpy/lib/tests/test_twodim_base.py b/local_packages/numpy/lib/tests/test_twodim_base.py new file mode 100644 index 0000000..eb6aa69 --- /dev/null +++ b/local_packages/numpy/lib/tests/test_twodim_base.py @@ -0,0 +1,559 @@ +"""Test functions for matrix module + +""" +import pytest + +import numpy as np +from numpy import ( + add, + arange, + array, + diag, + eye, + fliplr, + flipud, + histogram2d, + mask_indices, + ones, + tri, + tril_indices, + tril_indices_from, + triu_indices, + triu_indices_from, + vander, + zeros, +) +from numpy.testing import ( + assert_, + assert_array_almost_equal, + assert_array_equal, + assert_array_max_ulp, + assert_equal, + assert_raises, +) + + +def get_mat(n): + data = arange(n) + data = add.outer(data, data) + return data + + +class TestEye: + def test_basic(self): + assert_equal(eye(4), + array([[1, 0, 0, 0], + [0, 1, 0, 0], + [0, 0, 1, 0], + [0, 0, 0, 1]])) + + assert_equal(eye(4, dtype='f'), + array([[1, 0, 0, 0], + [0, 1, 0, 0], + [0, 0, 1, 0], + [0, 0, 0, 1]], 'f')) + + assert_equal(eye(3) == 1, + eye(3, dtype=bool)) + + def test_uint64(self): + # Regression test for gh-9982 + assert_equal(eye(np.uint64(2), dtype=int), array([[1, 0], [0, 1]])) + assert_equal(eye(np.uint64(2), M=np.uint64(4), k=np.uint64(1)), + array([[0, 1, 0, 0], [0, 0, 1, 0]])) + + def test_diag(self): + assert_equal(eye(4, k=1), + array([[0, 1, 0, 0], + [0, 0, 1, 0], + [0, 0, 0, 1], + [0, 0, 0, 0]])) + + assert_equal(eye(4, k=-1), + array([[0, 0, 0, 0], + [1, 0, 0, 0], + [0, 1, 0, 0], + [0, 0, 1, 0]])) + + def test_2d(self): + assert_equal(eye(4, 3), + array([[1, 0, 0], + [0, 1, 0], + [0, 0, 1], + [0, 0, 0]])) + + assert_equal(eye(3, 4), + array([[1, 0, 0, 0], + [0, 1, 0, 0], + [0, 0, 1, 0]])) + + def test_diag2d(self): + assert_equal(eye(3, 4, k=2), + array([[0, 0, 1, 0], + [0, 0, 0, 1], + [0, 0, 0, 0]])) + + assert_equal(eye(4, 3, k=-2), + array([[0, 0, 0], + [0, 0, 0], + [1, 0, 0], + [0, 1, 0]])) + + def test_eye_bounds(self): + assert_equal(eye(2, 2, 1), [[0, 1], [0, 0]]) + assert_equal(eye(2, 2, -1), [[0, 0], [1, 0]]) + assert_equal(eye(2, 2, 2), [[0, 0], [0, 0]]) + assert_equal(eye(2, 2, -2), [[0, 0], [0, 0]]) + assert_equal(eye(3, 2, 2), [[0, 0], [0, 0], [0, 0]]) + assert_equal(eye(3, 2, 1), [[0, 1], [0, 0], [0, 0]]) + assert_equal(eye(3, 2, -1), [[0, 0], [1, 0], [0, 1]]) + assert_equal(eye(3, 2, -2), [[0, 0], [0, 0], [1, 0]]) + assert_equal(eye(3, 2, -3), [[0, 0], [0, 0], [0, 0]]) + + def test_strings(self): + assert_equal(eye(2, 2, dtype='S3'), + [[b'1', b''], [b'', b'1']]) + + def test_bool(self): + assert_equal(eye(2, 2, dtype=bool), [[True, False], [False, True]]) + + def test_order(self): + mat_c = eye(4, 3, k=-1) + mat_f = eye(4, 3, k=-1, order='F') + assert_equal(mat_c, mat_f) + assert mat_c.flags.c_contiguous + assert not mat_c.flags.f_contiguous + assert not mat_f.flags.c_contiguous + assert mat_f.flags.f_contiguous + + +class TestDiag: + def test_vector(self): + vals = (100 * arange(5)).astype('l') + b = zeros((5, 5)) + for k in range(5): + b[k, k] = vals[k] + assert_equal(diag(vals), b) + b = zeros((7, 7)) + c = b.copy() + for k in range(5): + b[k, k + 2] = vals[k] + c[k + 2, k] = vals[k] + assert_equal(diag(vals, k=2), b) + assert_equal(diag(vals, k=-2), c) + + def test_matrix(self, vals=None): + if vals is None: + vals = (100 * get_mat(5) + 1).astype('l') + b = zeros((5,)) + for k in range(5): + b[k] = vals[k, k] + assert_equal(diag(vals), b) + b = b * 0 + for k in range(3): + b[k] = vals[k, k + 2] + assert_equal(diag(vals, 2), b[:3]) + for k in range(3): + b[k] = vals[k + 2, k] + assert_equal(diag(vals, -2), b[:3]) + + def test_fortran_order(self): + vals = array((100 * get_mat(5) + 1), order='F', dtype='l') + self.test_matrix(vals) + + def test_diag_bounds(self): + A = [[1, 2], [3, 4], [5, 6]] + assert_equal(diag(A, k=2), []) + assert_equal(diag(A, k=1), [2]) + assert_equal(diag(A, k=0), [1, 4]) + assert_equal(diag(A, k=-1), [3, 6]) + assert_equal(diag(A, k=-2), [5]) + assert_equal(diag(A, k=-3), []) + + def test_failure(self): + assert_raises(ValueError, diag, [[[1]]]) + + +class TestFliplr: + def test_basic(self): + assert_raises(ValueError, fliplr, ones(4)) + a = get_mat(4) + b = a[:, ::-1] + assert_equal(fliplr(a), b) + a = [[0, 1, 2], + [3, 4, 5]] + b = [[2, 1, 0], + [5, 4, 3]] + assert_equal(fliplr(a), b) + + +class TestFlipud: + def test_basic(self): + a = get_mat(4) + b = a[::-1, :] + assert_equal(flipud(a), b) + a = [[0, 1, 2], + [3, 4, 5]] + b = [[3, 4, 5], + [0, 1, 2]] + assert_equal(flipud(a), b) + + +class TestHistogram2d: + def test_simple(self): + x = array( + [0.41702200, 0.72032449, 1.1437481e-4, 0.302332573, 0.146755891]) + y = array( + [0.09233859, 0.18626021, 0.34556073, 0.39676747, 0.53881673]) + xedges = np.linspace(0, 1, 10) + yedges = np.linspace(0, 1, 10) + H = histogram2d(x, y, (xedges, yedges))[0] + answer = array( + [[0, 0, 0, 1, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [1, 0, 1, 0, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0]]) + assert_array_equal(H.T, answer) + H = histogram2d(x, y, xedges)[0] + assert_array_equal(H.T, answer) + H, xedges, yedges = histogram2d(list(range(10)), list(range(10))) + assert_array_equal(H, eye(10, 10)) + assert_array_equal(xedges, np.linspace(0, 9, 11)) + assert_array_equal(yedges, np.linspace(0, 9, 11)) + + def test_asym(self): + x = array([1, 1, 2, 3, 4, 4, 4, 5]) + y = array([1, 3, 2, 0, 1, 2, 3, 4]) + H, xed, yed = histogram2d( + x, y, (6, 5), range=[[0, 6], [0, 5]], density=True) + answer = array( + [[0., 0, 0, 0, 0], + [0, 1, 0, 1, 0], + [0, 0, 1, 0, 0], + [1, 0, 0, 0, 0], + [0, 1, 1, 1, 0], + [0, 0, 0, 0, 1]]) + assert_array_almost_equal(H, answer / 8., 3) + assert_array_equal(xed, np.linspace(0, 6, 7)) + assert_array_equal(yed, np.linspace(0, 5, 6)) + + def test_density(self): + x = array([1, 2, 3, 1, 2, 3, 1, 2, 3]) + y = array([1, 1, 1, 2, 2, 2, 3, 3, 3]) + H, xed, yed = histogram2d( + x, y, [[1, 2, 3, 5], [1, 2, 3, 5]], density=True) + answer = array([[1, 1, .5], + [1, 1, .5], + [.5, .5, .25]]) / 9. + assert_array_almost_equal(H, answer, 3) + + def test_all_outliers(self): + r = np.random.rand(100) + 1. + 1e6 # histogramdd rounds by decimal=6 + H, xed, yed = histogram2d(r, r, (4, 5), range=([0, 1], [0, 1])) + assert_array_equal(H, 0) + + def test_empty(self): + a, edge1, edge2 = histogram2d([], [], bins=([0, 1], [0, 1])) + assert_array_max_ulp(a, array([[0.]])) + + a, edge1, edge2 = histogram2d([], [], bins=4) + assert_array_max_ulp(a, np.zeros((4, 4))) + + def test_binparameter_combination(self): + x = array( + [0, 0.09207008, 0.64575234, 0.12875982, 0.47390599, + 0.59944483, 1]) + y = array( + [0, 0.14344267, 0.48988575, 0.30558665, 0.44700682, + 0.15886423, 1]) + edges = (0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1) + H, xe, ye = histogram2d(x, y, (edges, 4)) + answer = array( + [[2., 0., 0., 0.], + [0., 1., 0., 0.], + [0., 0., 0., 0.], + [0., 0., 0., 0.], + [0., 1., 0., 0.], + [1., 0., 0., 0.], + [0., 1., 0., 0.], + [0., 0., 0., 0.], + [0., 0., 0., 0.], + [0., 0., 0., 1.]]) + assert_array_equal(H, answer) + assert_array_equal(ye, array([0., 0.25, 0.5, 0.75, 1])) + H, xe, ye = histogram2d(x, y, (4, edges)) + answer = array( + [[1., 1., 0., 1., 0., 0., 0., 0., 0., 0.], + [0., 0., 0., 0., 1., 0., 0., 0., 0., 0.], + [0., 1., 0., 0., 1., 0., 0., 0., 0., 0.], + [0., 0., 0., 0., 0., 0., 0., 0., 0., 1.]]) + assert_array_equal(H, answer) + assert_array_equal(xe, array([0., 0.25, 0.5, 0.75, 1])) + + def test_dispatch(self): + class ShouldDispatch: + def __array_function__(self, function, types, args, kwargs): + return types, args, kwargs + + xy = [1, 2] + s_d = ShouldDispatch() + r = histogram2d(s_d, xy) + # Cannot use assert_equal since that dispatches... + assert_(r == ((ShouldDispatch,), (s_d, xy), {})) + r = histogram2d(xy, s_d) + assert_(r == ((ShouldDispatch,), (xy, s_d), {})) + r = histogram2d(xy, xy, bins=s_d) + assert_(r, ((ShouldDispatch,), (xy, xy), {'bins': s_d})) + r = histogram2d(xy, xy, bins=[s_d, 5]) + assert_(r, ((ShouldDispatch,), (xy, xy), {'bins': [s_d, 5]})) + assert_raises(Exception, histogram2d, xy, xy, bins=[s_d]) + r = histogram2d(xy, xy, weights=s_d) + assert_(r, ((ShouldDispatch,), (xy, xy), {'weights': s_d})) + + @pytest.mark.parametrize(("x_len", "y_len"), [(10, 11), (20, 19)]) + def test_bad_length(self, x_len, y_len): + x, y = np.ones(x_len), np.ones(y_len) + with pytest.raises(ValueError, + match='x and y must have the same length.'): + histogram2d(x, y) + + +class TestTri: + def test_dtype(self): + out = array([[1, 0, 0], + [1, 1, 0], + [1, 1, 1]]) + assert_array_equal(tri(3), out) + assert_array_equal(tri(3, dtype=bool), out.astype(bool)) + + +def test_tril_triu_ndim2(): + for dtype in np.typecodes['AllFloat'] + np.typecodes['AllInteger']: + a = np.ones((2, 2), dtype=dtype) + b = np.tril(a) + c = np.triu(a) + assert_array_equal(b, [[1, 0], [1, 1]]) + assert_array_equal(c, b.T) + # should return the same dtype as the original array + assert_equal(b.dtype, a.dtype) + assert_equal(c.dtype, a.dtype) + + +def test_tril_triu_ndim3(): + for dtype in np.typecodes['AllFloat'] + np.typecodes['AllInteger']: + a = np.array([ + [[1, 1], [1, 1]], + [[1, 1], [1, 0]], + [[1, 1], [0, 0]], + ], dtype=dtype) + a_tril_desired = np.array([ + [[1, 0], [1, 1]], + [[1, 0], [1, 0]], + [[1, 0], [0, 0]], + ], dtype=dtype) + a_triu_desired = np.array([ + [[1, 1], [0, 1]], + [[1, 1], [0, 0]], + [[1, 1], [0, 0]], + ], dtype=dtype) + a_triu_observed = np.triu(a) + a_tril_observed = np.tril(a) + assert_array_equal(a_triu_observed, a_triu_desired) + assert_array_equal(a_tril_observed, a_tril_desired) + assert_equal(a_triu_observed.dtype, a.dtype) + assert_equal(a_tril_observed.dtype, a.dtype) + + +def test_tril_triu_with_inf(): + # Issue 4859 + arr = np.array([[1, 1, np.inf], + [1, 1, 1], + [np.inf, 1, 1]]) + out_tril = np.array([[1, 0, 0], + [1, 1, 0], + [np.inf, 1, 1]]) + out_triu = out_tril.T + assert_array_equal(np.triu(arr), out_triu) + assert_array_equal(np.tril(arr), out_tril) + + +def test_tril_triu_dtype(): + # Issue 4916 + # tril and triu should return the same dtype as input + for c in np.typecodes['All']: + if c == 'V': + continue + arr = np.zeros((3, 3), dtype=c) + assert_equal(np.triu(arr).dtype, arr.dtype) + assert_equal(np.tril(arr).dtype, arr.dtype) + + # check special cases + arr = np.array([['2001-01-01T12:00', '2002-02-03T13:56'], + ['2004-01-01T12:00', '2003-01-03T13:45']], + dtype='datetime64') + assert_equal(np.triu(arr).dtype, arr.dtype) + assert_equal(np.tril(arr).dtype, arr.dtype) + + arr = np.zeros((3, 3), dtype='f4,f4') + assert_equal(np.triu(arr).dtype, arr.dtype) + assert_equal(np.tril(arr).dtype, arr.dtype) + + +def test_mask_indices(): + # simple test without offset + iu = mask_indices(3, np.triu) + a = np.arange(9).reshape(3, 3) + assert_array_equal(a[iu], array([0, 1, 2, 4, 5, 8])) + # Now with an offset + iu1 = mask_indices(3, np.triu, 1) + assert_array_equal(a[iu1], array([1, 2, 5])) + + +def test_tril_indices(): + # indices without and with offset + il1 = tril_indices(4) + il2 = tril_indices(4, k=2) + il3 = tril_indices(4, m=5) + il4 = tril_indices(4, k=2, m=5) + + a = np.array([[1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12], + [13, 14, 15, 16]]) + b = np.arange(1, 21).reshape(4, 5) + + # indexing: + assert_array_equal(a[il1], + array([1, 5, 6, 9, 10, 11, 13, 14, 15, 16])) + assert_array_equal(b[il3], + array([1, 6, 7, 11, 12, 13, 16, 17, 18, 19])) + + # And for assigning values: + a[il1] = -1 + assert_array_equal(a, + array([[-1, 2, 3, 4], + [-1, -1, 7, 8], + [-1, -1, -1, 12], + [-1, -1, -1, -1]])) + b[il3] = -1 + assert_array_equal(b, + array([[-1, 2, 3, 4, 5], + [-1, -1, 8, 9, 10], + [-1, -1, -1, 14, 15], + [-1, -1, -1, -1, 20]])) + # These cover almost the whole array (two diagonals right of the main one): + a[il2] = -10 + assert_array_equal(a, + array([[-10, -10, -10, 4], + [-10, -10, -10, -10], + [-10, -10, -10, -10], + [-10, -10, -10, -10]])) + b[il4] = -10 + assert_array_equal(b, + array([[-10, -10, -10, 4, 5], + [-10, -10, -10, -10, 10], + [-10, -10, -10, -10, -10], + [-10, -10, -10, -10, -10]])) + + +class TestTriuIndices: + def test_triu_indices(self): + iu1 = triu_indices(4) + iu2 = triu_indices(4, k=2) + iu3 = triu_indices(4, m=5) + iu4 = triu_indices(4, k=2, m=5) + + a = np.array([[1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12], + [13, 14, 15, 16]]) + b = np.arange(1, 21).reshape(4, 5) + + # Both for indexing: + assert_array_equal(a[iu1], + array([1, 2, 3, 4, 6, 7, 8, 11, 12, 16])) + assert_array_equal(b[iu3], + array([1, 2, 3, 4, 5, 7, 8, 9, + 10, 13, 14, 15, 19, 20])) + + # And for assigning values: + a[iu1] = -1 + assert_array_equal(a, + array([[-1, -1, -1, -1], + [5, -1, -1, -1], + [9, 10, -1, -1], + [13, 14, 15, -1]])) + b[iu3] = -1 + assert_array_equal(b, + array([[-1, -1, -1, -1, -1], + [6, -1, -1, -1, -1], + [11, 12, -1, -1, -1], + [16, 17, 18, -1, -1]])) + + # These cover almost the whole array (two diagonals right of the + # main one): + a[iu2] = -10 + assert_array_equal(a, + array([[-1, -1, -10, -10], + [5, -1, -1, -10], + [9, 10, -1, -1], + [13, 14, 15, -1]])) + b[iu4] = -10 + assert_array_equal(b, + array([[-1, -1, -10, -10, -10], + [6, -1, -1, -10, -10], + [11, 12, -1, -1, -10], + [16, 17, 18, -1, -1]])) + + +class TestTrilIndicesFrom: + def test_exceptions(self): + assert_raises(ValueError, tril_indices_from, np.ones((2,))) + assert_raises(ValueError, tril_indices_from, np.ones((2, 2, 2))) + # assert_raises(ValueError, tril_indices_from, np.ones((2, 3))) + + +class TestTriuIndicesFrom: + def test_exceptions(self): + assert_raises(ValueError, triu_indices_from, np.ones((2,))) + assert_raises(ValueError, triu_indices_from, np.ones((2, 2, 2))) + # assert_raises(ValueError, triu_indices_from, np.ones((2, 3))) + + +class TestVander: + def test_basic(self): + c = np.array([0, 1, -2, 3]) + v = vander(c) + powers = np.array([[0, 0, 0, 0, 1], + [1, 1, 1, 1, 1], + [16, -8, 4, -2, 1], + [81, 27, 9, 3, 1]]) + # Check default value of N: + assert_array_equal(v, powers[:, 1:]) + # Check a range of N values, including 0 and 5 (greater than default) + m = powers.shape[1] + for n in range(6): + v = vander(c, N=n) + assert_array_equal(v, powers[:, m - n:m]) + + def test_dtypes(self): + c = array([11, -12, 13], dtype=np.int8) + v = vander(c) + expected = np.array([[121, 11, 1], + [144, -12, 1], + [169, 13, 1]]) + assert_array_equal(v, expected) + + c = array([1.0 + 1j, 1.0 - 1j]) + v = vander(c, N=3) + expected = np.array([[2j, 1 + 1j, 1], + [-2j, 1 - 1j, 1]]) + # The data is floating point, but the values are small integers, + # so assert_array_equal *should* be safe here (rather than, say, + # assert_array_almost_equal). + assert_array_equal(v, expected) diff --git a/local_packages/numpy/lib/tests/test_type_check.py b/local_packages/numpy/lib/tests/test_type_check.py new file mode 100644 index 0000000..447c2c3 --- /dev/null +++ b/local_packages/numpy/lib/tests/test_type_check.py @@ -0,0 +1,473 @@ +import numpy as np +from numpy import ( + common_type, + iscomplex, + iscomplexobj, + isneginf, + isposinf, + isreal, + isrealobj, + mintypecode, + nan_to_num, + real_if_close, +) +from numpy.testing import assert_, assert_array_equal, assert_equal + + +def assert_all(x): + assert_(np.all(x), x) + + +class TestCommonType: + def test_basic(self): + ai32 = np.array([[1, 2], [3, 4]], dtype=np.int32) + af16 = np.array([[1, 2], [3, 4]], dtype=np.float16) + af32 = np.array([[1, 2], [3, 4]], dtype=np.float32) + af64 = np.array([[1, 2], [3, 4]], dtype=np.float64) + acs = np.array([[1 + 5j, 2 + 6j], [3 + 7j, 4 + 8j]], dtype=np.complex64) + acd = np.array([[1 + 5j, 2 + 6j], [3 + 7j, 4 + 8j]], dtype=np.complex128) + assert_(common_type(ai32) == np.float64) + assert_(common_type(af16) == np.float16) + assert_(common_type(af32) == np.float32) + assert_(common_type(af64) == np.float64) + assert_(common_type(acs) == np.complex64) + assert_(common_type(acd) == np.complex128) + + +class TestMintypecode: + + def test_default_1(self): + for itype in '1bcsuwil': + assert_equal(mintypecode(itype), 'd') + assert_equal(mintypecode('f'), 'f') + assert_equal(mintypecode('d'), 'd') + assert_equal(mintypecode('F'), 'F') + assert_equal(mintypecode('D'), 'D') + + def test_default_2(self): + for itype in '1bcsuwil': + assert_equal(mintypecode(itype + 'f'), 'f') + assert_equal(mintypecode(itype + 'd'), 'd') + assert_equal(mintypecode(itype + 'F'), 'F') + assert_equal(mintypecode(itype + 'D'), 'D') + assert_equal(mintypecode('ff'), 'f') + assert_equal(mintypecode('fd'), 'd') + assert_equal(mintypecode('fF'), 'F') + assert_equal(mintypecode('fD'), 'D') + assert_equal(mintypecode('df'), 'd') + assert_equal(mintypecode('dd'), 'd') + #assert_equal(mintypecode('dF',savespace=1),'F') + assert_equal(mintypecode('dF'), 'D') + assert_equal(mintypecode('dD'), 'D') + assert_equal(mintypecode('Ff'), 'F') + #assert_equal(mintypecode('Fd',savespace=1),'F') + assert_equal(mintypecode('Fd'), 'D') + assert_equal(mintypecode('FF'), 'F') + assert_equal(mintypecode('FD'), 'D') + assert_equal(mintypecode('Df'), 'D') + assert_equal(mintypecode('Dd'), 'D') + assert_equal(mintypecode('DF'), 'D') + assert_equal(mintypecode('DD'), 'D') + + def test_default_3(self): + assert_equal(mintypecode('fdF'), 'D') + #assert_equal(mintypecode('fdF',savespace=1),'F') + assert_equal(mintypecode('fdD'), 'D') + assert_equal(mintypecode('fFD'), 'D') + assert_equal(mintypecode('dFD'), 'D') + + assert_equal(mintypecode('ifd'), 'd') + assert_equal(mintypecode('ifF'), 'F') + assert_equal(mintypecode('ifD'), 'D') + assert_equal(mintypecode('idF'), 'D') + #assert_equal(mintypecode('idF',savespace=1),'F') + assert_equal(mintypecode('idD'), 'D') + + +class TestIsscalar: + + def test_basic(self): + assert_(np.isscalar(3)) + assert_(not np.isscalar([3])) + assert_(not np.isscalar((3,))) + assert_(np.isscalar(3j)) + assert_(np.isscalar(4.0)) + + +class TestReal: + + def test_real(self): + y = np.random.rand(10,) + assert_array_equal(y, np.real(y)) + + y = np.array(1) + out = np.real(y) + assert_array_equal(y, out) + assert_(isinstance(out, np.ndarray)) + + y = 1 + out = np.real(y) + assert_equal(y, out) + assert_(not isinstance(out, np.ndarray)) + + def test_cmplx(self): + y = np.random.rand(10,) + 1j * np.random.rand(10,) + assert_array_equal(y.real, np.real(y)) + + y = np.array(1 + 1j) + out = np.real(y) + assert_array_equal(y.real, out) + assert_(isinstance(out, np.ndarray)) + + y = 1 + 1j + out = np.real(y) + assert_equal(1.0, out) + assert_(not isinstance(out, np.ndarray)) + + +class TestImag: + + def test_real(self): + y = np.random.rand(10,) + assert_array_equal(0, np.imag(y)) + + y = np.array(1) + out = np.imag(y) + assert_array_equal(0, out) + assert_(isinstance(out, np.ndarray)) + + y = 1 + out = np.imag(y) + assert_equal(0, out) + assert_(not isinstance(out, np.ndarray)) + + def test_cmplx(self): + y = np.random.rand(10,) + 1j * np.random.rand(10,) + assert_array_equal(y.imag, np.imag(y)) + + y = np.array(1 + 1j) + out = np.imag(y) + assert_array_equal(y.imag, out) + assert_(isinstance(out, np.ndarray)) + + y = 1 + 1j + out = np.imag(y) + assert_equal(1.0, out) + assert_(not isinstance(out, np.ndarray)) + + +class TestIscomplex: + + def test_fail(self): + z = np.array([-1, 0, 1]) + res = iscomplex(z) + assert_(not np.any(res, axis=0)) + + def test_pass(self): + z = np.array([-1j, 1, 0]) + res = iscomplex(z) + assert_array_equal(res, [1, 0, 0]) + + +class TestIsreal: + + def test_pass(self): + z = np.array([-1, 0, 1j]) + res = isreal(z) + assert_array_equal(res, [1, 1, 0]) + + def test_fail(self): + z = np.array([-1j, 1, 0]) + res = isreal(z) + assert_array_equal(res, [0, 1, 1]) + + +class TestIscomplexobj: + + def test_basic(self): + z = np.array([-1, 0, 1]) + assert_(not iscomplexobj(z)) + z = np.array([-1j, 0, -1]) + assert_(iscomplexobj(z)) + + def test_scalar(self): + assert_(not iscomplexobj(1.0)) + assert_(iscomplexobj(1 + 0j)) + + def test_list(self): + assert_(iscomplexobj([3, 1 + 0j, True])) + assert_(not iscomplexobj([3, 1, True])) + + def test_duck(self): + class DummyComplexArray: + @property + def dtype(self): + return np.dtype(complex) + dummy = DummyComplexArray() + assert_(iscomplexobj(dummy)) + + def test_pandas_duck(self): + # This tests a custom np.dtype duck-typed class, such as used by pandas + # (pandas.core.dtypes) + class PdComplex(np.complex128): + pass + + class PdDtype: + name = 'category' + names = None + type = PdComplex + kind = 'c' + str = ' 1e10) and assert_all(np.isfinite(vals[2])) + assert_equal(type(vals), np.ndarray) + + # perform the same tests but with nan, posinf and neginf keywords + with np.errstate(divide='ignore', invalid='ignore'): + vals = nan_to_num(np.array((-1., 0, 1)) / 0., + nan=10, posinf=20, neginf=30) + assert_equal(vals, [30, 10, 20]) + assert_all(np.isfinite(vals[[0, 2]])) + assert_equal(type(vals), np.ndarray) + + # perform the same test but in-place + with np.errstate(divide='ignore', invalid='ignore'): + vals = np.array((-1., 0, 1)) / 0. + result = nan_to_num(vals, copy=False) + + assert_(result is vals) + assert_all(vals[0] < -1e10) and assert_all(np.isfinite(vals[0])) + assert_(vals[1] == 0) + assert_all(vals[2] > 1e10) and assert_all(np.isfinite(vals[2])) + assert_equal(type(vals), np.ndarray) + + # perform the same test but in-place + with np.errstate(divide='ignore', invalid='ignore'): + vals = np.array((-1., 0, 1)) / 0. + result = nan_to_num(vals, copy=False, nan=10, posinf=20, neginf=30) + + assert_(result is vals) + assert_equal(vals, [30, 10, 20]) + assert_all(np.isfinite(vals[[0, 2]])) + assert_equal(type(vals), np.ndarray) + + def test_array(self): + vals = nan_to_num([1]) + assert_array_equal(vals, np.array([1], int)) + assert_equal(type(vals), np.ndarray) + vals = nan_to_num([1], nan=10, posinf=20, neginf=30) + assert_array_equal(vals, np.array([1], int)) + assert_equal(type(vals), np.ndarray) + + def test_integer(self): + vals = nan_to_num(1) + assert_all(vals == 1) + assert_equal(type(vals), np.int_) + vals = nan_to_num(1, nan=10, posinf=20, neginf=30) + assert_all(vals == 1) + assert_equal(type(vals), np.int_) + + def test_float(self): + vals = nan_to_num(1.0) + assert_all(vals == 1.0) + assert_equal(type(vals), np.float64) + vals = nan_to_num(1.1, nan=10, posinf=20, neginf=30) + assert_all(vals == 1.1) + assert_equal(type(vals), np.float64) + + def test_complex_good(self): + vals = nan_to_num(1 + 1j) + assert_all(vals == 1 + 1j) + assert_equal(type(vals), np.complex128) + vals = nan_to_num(1 + 1j, nan=10, posinf=20, neginf=30) + assert_all(vals == 1 + 1j) + assert_equal(type(vals), np.complex128) + + def test_complex_bad(self): + with np.errstate(divide='ignore', invalid='ignore'): + v = 1 + 1j + v += np.array(0 + 1.j) / 0. + vals = nan_to_num(v) + # !! This is actually (unexpectedly) zero + assert_all(np.isfinite(vals)) + assert_equal(type(vals), np.complex128) + + def test_complex_bad2(self): + with np.errstate(divide='ignore', invalid='ignore'): + v = 1 + 1j + v += np.array(-1 + 1.j) / 0. + vals = nan_to_num(v) + assert_all(np.isfinite(vals)) + assert_equal(type(vals), np.complex128) + # Fixme + #assert_all(vals.imag > 1e10) and assert_all(np.isfinite(vals)) + # !! This is actually (unexpectedly) positive + # !! inf. Comment out for now, and see if it + # !! changes + #assert_all(vals.real < -1e10) and assert_all(np.isfinite(vals)) + + def test_do_not_rewrite_previous_keyword(self): + # This is done to test that when, for instance, nan=np.inf then these + # values are not rewritten by posinf keyword to the posinf value. + with np.errstate(divide='ignore', invalid='ignore'): + vals = nan_to_num(np.array((-1., 0, 1)) / 0., nan=np.inf, posinf=999) + assert_all(np.isfinite(vals[[0, 2]])) + assert_all(vals[0] < -1e10) + assert_equal(vals[[1, 2]], [np.inf, 999]) + assert_equal(type(vals), np.ndarray) + + +class TestRealIfClose: + + def test_basic(self): + a = np.random.rand(10) + b = real_if_close(a + 1e-15j) + assert_all(isrealobj(b)) + assert_array_equal(a, b) + b = real_if_close(a + 1e-7j) + assert_all(iscomplexobj(b)) + b = real_if_close(a + 1e-7j, tol=1e-6) + assert_all(isrealobj(b)) diff --git a/local_packages/numpy/lib/tests/test_ufunclike.py b/local_packages/numpy/lib/tests/test_ufunclike.py new file mode 100644 index 0000000..b4257eb --- /dev/null +++ b/local_packages/numpy/lib/tests/test_ufunclike.py @@ -0,0 +1,97 @@ +import numpy as np +from numpy import fix, isneginf, isposinf +from numpy.testing import assert_, assert_array_equal, assert_equal, assert_raises + + +class TestUfunclike: + + def test_isposinf(self): + a = np.array([np.inf, -np.inf, np.nan, 0.0, 3.0, -3.0]) + out = np.zeros(a.shape, bool) + tgt = np.array([True, False, False, False, False, False]) + + res = isposinf(a) + assert_equal(res, tgt) + res = isposinf(a, out) + assert_equal(res, tgt) + assert_equal(out, tgt) + + a = a.astype(np.complex128) + with assert_raises(TypeError): + isposinf(a) + + def test_isneginf(self): + a = np.array([np.inf, -np.inf, np.nan, 0.0, 3.0, -3.0]) + out = np.zeros(a.shape, bool) + tgt = np.array([False, True, False, False, False, False]) + + res = isneginf(a) + assert_equal(res, tgt) + res = isneginf(a, out) + assert_equal(res, tgt) + assert_equal(out, tgt) + + a = a.astype(np.complex128) + with assert_raises(TypeError): + isneginf(a) + + def test_fix(self): + a = np.array([[1.0, 1.1, 1.5, 1.8], [-1.0, -1.1, -1.5, -1.8]]) + out = np.zeros(a.shape, float) + tgt = np.array([[1., 1., 1., 1.], [-1., -1., -1., -1.]]) + + res = fix(a) + assert_equal(res, tgt) + res = fix(a, out) + assert_equal(res, tgt) + assert_equal(out, tgt) + assert_equal(fix(3.14), 3) + + def test_fix_with_subclass(self): + class MyArray(np.ndarray): + def __new__(cls, data, metadata=None): + res = np.array(data, copy=True).view(cls) + res.metadata = metadata + return res + + def __array_wrap__(self, obj, context=None, return_scalar=False): + if not isinstance(obj, MyArray): + obj = obj.view(MyArray) + if obj.metadata is None: + obj.metadata = self.metadata + return obj + + def __array_finalize__(self, obj): + self.metadata = getattr(obj, 'metadata', None) + return self + + a = np.array([1.1, -1.1]) + m = MyArray(a, metadata='foo') + f = fix(m) + assert_array_equal(f, np.array([1, -1])) + assert_(isinstance(f, MyArray)) + assert_equal(f.metadata, 'foo') + + # check 0d arrays don't decay to scalars + m0d = m[0, ...] + m0d.metadata = 'bar' + f0d = fix(m0d) + assert_(isinstance(f0d, MyArray)) + assert_equal(f0d.metadata, 'bar') + + def test_scalar(self): + x = np.inf + actual = np.isposinf(x) + expected = np.True_ + assert_equal(actual, expected) + assert_equal(type(actual), type(expected)) + + x = -3.4 + actual = np.fix(x) + expected = np.float64(-3.0) + assert_equal(actual, expected) + assert_equal(type(actual), type(expected)) + + out = np.array(0.0) + actual = np.fix(x, out=out) + assert_(actual is out) diff --git a/local_packages/numpy/lib/tests/test_utils.py b/local_packages/numpy/lib/tests/test_utils.py new file mode 100644 index 0000000..0106ee0 --- /dev/null +++ b/local_packages/numpy/lib/tests/test_utils.py @@ -0,0 +1,80 @@ +from io import StringIO + +import pytest + +import numpy as np +import numpy.lib._utils_impl as _utils_impl +from numpy.testing import assert_raises_regex + + +def test_assert_raises_regex_context_manager(): + with assert_raises_regex(ValueError, 'no deprecation warning'): + raise ValueError('no deprecation warning') + + +def test_info_method_heading(): + # info(class) should only print "Methods:" heading if methods exist + + class NoPublicMethods: + pass + + class WithPublicMethods: + def first_method(): + pass + + def _has_method_heading(cls): + out = StringIO() + np.info(cls, output=out) + return 'Methods:' in out.getvalue() + + assert _has_method_heading(WithPublicMethods) + assert not _has_method_heading(NoPublicMethods) + + +def test_drop_metadata(): + def _compare_dtypes(dt1, dt2): + return np.can_cast(dt1, dt2, casting='no') + + # structured dtype + dt = np.dtype([('l1', [('l2', np.dtype('S8', metadata={'msg': 'toto'}))])], + metadata={'msg': 'titi'}) + dt_m = _utils_impl.drop_metadata(dt) + assert _compare_dtypes(dt, dt_m) is True + assert dt_m.metadata is None + assert dt_m['l1'].metadata is None + assert dt_m['l1']['l2'].metadata is None + + # alignment + dt = np.dtype([('x', '>> from numpy import linalg as LA + >>> LA.inv(np.zeros((2,2))) + Traceback (most recent call last): + File "", line 1, in + File "...linalg.py", line 350, + in inv return wrap(solve(a, identity(a.shape[0], dtype=a.dtype))) + File "...linalg.py", line 249, + in solve + raise LinAlgError('Singular matrix') + numpy.linalg.LinAlgError: Singular matrix + + """ + + +def _raise_linalgerror_singular(err, flag): + raise LinAlgError("Singular matrix") + +def _raise_linalgerror_nonposdef(err, flag): + raise LinAlgError("Matrix is not positive definite") + +def _raise_linalgerror_eigenvalues_nonconvergence(err, flag): + raise LinAlgError("Eigenvalues did not converge") + +def _raise_linalgerror_svd_nonconvergence(err, flag): + raise LinAlgError("SVD did not converge") + +def _raise_linalgerror_lstsq(err, flag): + raise LinAlgError("SVD did not converge in Linear Least Squares") + +def _raise_linalgerror_qr(err, flag): + raise LinAlgError("Incorrect argument found while performing " + "QR factorization") + + +def _makearray(a): + new = asarray(a) + wrap = getattr(a, "__array_wrap__", new.__array_wrap__) + return new, wrap + +def isComplexType(t): + return issubclass(t, complexfloating) + + +_real_types_map = {single: single, + double: double, + csingle: single, + cdouble: double} + +_complex_types_map = {single: csingle, + double: cdouble, + csingle: csingle, + cdouble: cdouble} + +def _realType(t, default=double): + return _real_types_map.get(t, default) + +def _complexType(t, default=cdouble): + return _complex_types_map.get(t, default) + +def _commonType(*arrays): + # in lite version, use higher precision (always double or cdouble) + result_type = single + is_complex = False + for a in arrays: + type_ = a.dtype.type + if issubclass(type_, inexact): + if isComplexType(type_): + is_complex = True + rt = _realType(type_, default=None) + if rt is double: + result_type = double + elif rt is None: + # unsupported inexact scalar + raise TypeError(f"array type {a.dtype.name} is unsupported in linalg") + else: + result_type = double + if is_complex: + result_type = _complex_types_map[result_type] + return cdouble, result_type + else: + return double, result_type + + +def _to_native_byte_order(*arrays): + ret = [] + for arr in arrays: + if arr.dtype.byteorder not in ('=', '|'): + ret.append(asarray(arr, dtype=arr.dtype.newbyteorder('='))) + else: + ret.append(arr) + if len(ret) == 1: + return ret[0] + else: + return ret + + +def _assert_2d(*arrays): + for a in arrays: + if a.ndim != 2: + raise LinAlgError('%d-dimensional array given. Array must be ' + 'two-dimensional' % a.ndim) + +def _assert_stacked_2d(*arrays): + for a in arrays: + if a.ndim < 2: + raise LinAlgError('%d-dimensional array given. Array must be ' + 'at least two-dimensional' % a.ndim) + +def _assert_stacked_square(*arrays): + for a in arrays: + try: + m, n = a.shape[-2:] + except ValueError: + raise LinAlgError('%d-dimensional array given. Array must be ' + 'at least two-dimensional' % a.ndim) + if m != n: + raise LinAlgError('Last 2 dimensions of the array must be square') + +def _assert_finite(*arrays): + for a in arrays: + if not isfinite(a).all(): + raise LinAlgError("Array must not contain infs or NaNs") + +def _is_empty_2d(arr): + # check size first for efficiency + return arr.size == 0 and prod(arr.shape[-2:]) == 0 + + +def transpose(a): + """ + Transpose each matrix in a stack of matrices. + + Unlike np.transpose, this only swaps the last two axes, rather than all of + them + + Parameters + ---------- + a : (...,M,N) array_like + + Returns + ------- + aT : (...,N,M) ndarray + """ + return swapaxes(a, -1, -2) + +# Linear equations + +def _tensorsolve_dispatcher(a, b, axes=None): + return (a, b) + + +@array_function_dispatch(_tensorsolve_dispatcher) +def tensorsolve(a, b, axes=None): + """ + Solve the tensor equation ``a x = b`` for x. + + It is assumed that all indices of `x` are summed over in the product, + together with the rightmost indices of `a`, as is done in, for example, + ``tensordot(a, x, axes=x.ndim)``. + + Parameters + ---------- + a : array_like + Coefficient tensor, of shape ``b.shape + Q``. `Q`, a tuple, equals + the shape of that sub-tensor of `a` consisting of the appropriate + number of its rightmost indices, and must be such that + ``prod(Q) == prod(b.shape)`` (in which sense `a` is said to be + 'square'). + b : array_like + Right-hand tensor, which can be of any shape. + axes : tuple of ints, optional + Axes in `a` to reorder to the right, before inversion. + If None (default), no reordering is done. + + Returns + ------- + x : ndarray, shape Q + + Raises + ------ + LinAlgError + If `a` is singular or not 'square' (in the above sense). + + See Also + -------- + numpy.tensordot, tensorinv, numpy.einsum + + Examples + -------- + >>> import numpy as np + >>> a = np.eye(2*3*4).reshape((2*3, 4, 2, 3, 4)) + >>> rng = np.random.default_rng() + >>> b = rng.normal(size=(2*3, 4)) + >>> x = np.linalg.tensorsolve(a, b) + >>> x.shape + (2, 3, 4) + >>> np.allclose(np.tensordot(a, x, axes=3), b) + True + + """ + a, wrap = _makearray(a) + b = asarray(b) + an = a.ndim + + if axes is not None: + allaxes = list(range(an)) + for k in axes: + allaxes.remove(k) + allaxes.insert(an, k) + a = a.transpose(allaxes) + + oldshape = a.shape[-(an - b.ndim):] + prod = 1 + for k in oldshape: + prod *= k + + if a.size != prod ** 2: + raise LinAlgError( + "Input arrays must satisfy the requirement \ + prod(a.shape[b.ndim:]) == prod(a.shape[:b.ndim])" + ) + + a = a.reshape(prod, prod) + b = b.ravel() + res = wrap(solve(a, b)) + res.shape = oldshape + return res + + +def _solve_dispatcher(a, b): + return (a, b) + + +@array_function_dispatch(_solve_dispatcher) +def solve(a, b): + """ + Solve a linear matrix equation, or system of linear scalar equations. + + Computes the "exact" solution, `x`, of the well-determined, i.e., full + rank, linear matrix equation `ax = b`. + + Parameters + ---------- + a : (..., M, M) array_like + Coefficient matrix. + b : {(M,), (..., M, K)}, array_like + Ordinate or "dependent variable" values. + + Returns + ------- + x : {(..., M,), (..., M, K)} ndarray + Solution to the system a x = b. Returned shape is (..., M) if b is + shape (M,) and (..., M, K) if b is (..., M, K), where the "..." part is + broadcasted between a and b. + + Raises + ------ + LinAlgError + If `a` is singular or not square. + + See Also + -------- + scipy.linalg.solve : Similar function in SciPy. + + Notes + ----- + Broadcasting rules apply, see the `numpy.linalg` documentation for + details. + + The solutions are computed using LAPACK routine ``_gesv``. + + `a` must be square and of full-rank, i.e., all rows (or, equivalently, + columns) must be linearly independent; if either is not true, use + `lstsq` for the least-squares best "solution" of the + system/equation. + + .. versionchanged:: 2.0 + + The b array is only treated as a shape (M,) column vector if it is + exactly 1-dimensional. In all other instances it is treated as a stack + of (M, K) matrices. Previously b would be treated as a stack of (M,) + vectors if b.ndim was equal to a.ndim - 1. + + References + ---------- + .. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, + FL, Academic Press, Inc., 1980, pg. 22. + + Examples + -------- + Solve the system of equations: + ``x0 + 2 * x1 = 1`` and + ``3 * x0 + 5 * x1 = 2``: + + >>> import numpy as np + >>> a = np.array([[1, 2], [3, 5]]) + >>> b = np.array([1, 2]) + >>> x = np.linalg.solve(a, b) + >>> x + array([-1., 1.]) + + Check that the solution is correct: + + >>> np.allclose(np.dot(a, x), b) + True + + """ + a, _ = _makearray(a) + _assert_stacked_square(a) + b, wrap = _makearray(b) + t, result_t = _commonType(a, b) + + # We use the b = (..., M,) logic, only if the number of extra dimensions + # match exactly + if b.ndim == 1: + gufunc = _umath_linalg.solve1 + else: + gufunc = _umath_linalg.solve + + signature = 'DD->D' if isComplexType(t) else 'dd->d' + with errstate(call=_raise_linalgerror_singular, invalid='call', + over='ignore', divide='ignore', under='ignore'): + r = gufunc(a, b, signature=signature) + + return wrap(r.astype(result_t, copy=False)) + + +def _tensorinv_dispatcher(a, ind=None): + return (a,) + + +@array_function_dispatch(_tensorinv_dispatcher) +def tensorinv(a, ind=2): + """ + Compute the 'inverse' of an N-dimensional array. + + The result is an inverse for `a` relative to the tensordot operation + ``tensordot(a, b, ind)``, i. e., up to floating-point accuracy, + ``tensordot(tensorinv(a), a, ind)`` is the "identity" tensor for the + tensordot operation. + + Parameters + ---------- + a : array_like + Tensor to 'invert'. Its shape must be 'square', i. e., + ``prod(a.shape[:ind]) == prod(a.shape[ind:])``. + ind : int, optional + Number of first indices that are involved in the inverse sum. + Must be a positive integer, default is 2. + + Returns + ------- + b : ndarray + `a`'s tensordot inverse, shape ``a.shape[ind:] + a.shape[:ind]``. + + Raises + ------ + LinAlgError + If `a` is singular or not 'square' (in the above sense). + + See Also + -------- + numpy.tensordot, tensorsolve + + Examples + -------- + >>> import numpy as np + >>> a = np.eye(4*6).reshape((4, 6, 8, 3)) + >>> ainv = np.linalg.tensorinv(a, ind=2) + >>> ainv.shape + (8, 3, 4, 6) + >>> rng = np.random.default_rng() + >>> b = rng.normal(size=(4, 6)) + >>> np.allclose(np.tensordot(ainv, b), np.linalg.tensorsolve(a, b)) + True + + >>> a = np.eye(4*6).reshape((24, 8, 3)) + >>> ainv = np.linalg.tensorinv(a, ind=1) + >>> ainv.shape + (8, 3, 24) + >>> rng = np.random.default_rng() + >>> b = rng.normal(size=24) + >>> np.allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b)) + True + + """ + a = asarray(a) + oldshape = a.shape + prod = 1 + if ind > 0: + invshape = oldshape[ind:] + oldshape[:ind] + for k in oldshape[ind:]: + prod *= k + else: + raise ValueError("Invalid ind argument.") + a = a.reshape(prod, -1) + ia = inv(a) + return ia.reshape(*invshape) + + +# Matrix inversion + +def _unary_dispatcher(a): + return (a,) + + +@array_function_dispatch(_unary_dispatcher) +def inv(a): + """ + Compute the inverse of a matrix. + + Given a square matrix `a`, return the matrix `ainv` satisfying + ``a @ ainv = ainv @ a = eye(a.shape[0])``. + + Parameters + ---------- + a : (..., M, M) array_like + Matrix to be inverted. + + Returns + ------- + ainv : (..., M, M) ndarray or matrix + Inverse of the matrix `a`. + + Raises + ------ + LinAlgError + If `a` is not square or inversion fails. + + See Also + -------- + scipy.linalg.inv : Similar function in SciPy. + numpy.linalg.cond : Compute the condition number of a matrix. + numpy.linalg.svd : Compute the singular value decomposition of a matrix. + + Notes + ----- + Broadcasting rules apply, see the `numpy.linalg` documentation for + details. + + If `a` is detected to be singular, a `LinAlgError` is raised. If `a` is + ill-conditioned, a `LinAlgError` may or may not be raised, and results may + be inaccurate due to floating-point errors. + + References + ---------- + .. [1] Wikipedia, "Condition number", + https://en.wikipedia.org/wiki/Condition_number + + Examples + -------- + >>> import numpy as np + >>> from numpy.linalg import inv + >>> a = np.array([[1., 2.], [3., 4.]]) + >>> ainv = inv(a) + >>> np.allclose(a @ ainv, np.eye(2)) + True + >>> np.allclose(ainv @ a, np.eye(2)) + True + + If a is a matrix object, then the return value is a matrix as well: + + >>> ainv = inv(np.matrix(a)) + >>> ainv + matrix([[-2. , 1. ], + [ 1.5, -0.5]]) + + Inverses of several matrices can be computed at once: + + >>> a = np.array([[[1., 2.], [3., 4.]], [[1, 3], [3, 5]]]) + >>> inv(a) + array([[[-2. , 1. ], + [ 1.5 , -0.5 ]], + [[-1.25, 0.75], + [ 0.75, -0.25]]]) + + If a matrix is close to singular, the computed inverse may not satisfy + ``a @ ainv = ainv @ a = eye(a.shape[0])`` even if a `LinAlgError` + is not raised: + + >>> a = np.array([[2,4,6],[2,0,2],[6,8,14]]) + >>> inv(a) # No errors raised + array([[-1.12589991e+15, -5.62949953e+14, 5.62949953e+14], + [-1.12589991e+15, -5.62949953e+14, 5.62949953e+14], + [ 1.12589991e+15, 5.62949953e+14, -5.62949953e+14]]) + >>> a @ inv(a) + array([[ 0. , -0.5 , 0. ], # may vary + [-0.5 , 0.625, 0.25 ], + [ 0. , 0. , 1. ]]) + + To detect ill-conditioned matrices, you can use `numpy.linalg.cond` to + compute its *condition number* [1]_. The larger the condition number, the + more ill-conditioned the matrix is. As a rule of thumb, if the condition + number ``cond(a) = 10**k``, then you may lose up to ``k`` digits of + accuracy on top of what would be lost to the numerical method due to loss + of precision from arithmetic methods. + + >>> from numpy.linalg import cond + >>> cond(a) + np.float64(8.659885634118668e+17) # may vary + + It is also possible to detect ill-conditioning by inspecting the matrix's + singular values directly. The ratio between the largest and the smallest + singular value is the condition number: + + >>> from numpy.linalg import svd + >>> sigma = svd(a, compute_uv=False) # Do not compute singular vectors + >>> sigma.max()/sigma.min() + 8.659885634118668e+17 # may vary + + """ + a, wrap = _makearray(a) + _assert_stacked_square(a) + t, result_t = _commonType(a) + + signature = 'D->D' if isComplexType(t) else 'd->d' + with errstate(call=_raise_linalgerror_singular, invalid='call', + over='ignore', divide='ignore', under='ignore'): + ainv = _umath_linalg.inv(a, signature=signature) + return wrap(ainv.astype(result_t, copy=False)) + + +def _matrix_power_dispatcher(a, n): + return (a,) + + +@array_function_dispatch(_matrix_power_dispatcher) +def matrix_power(a, n): + """ + Raise a square matrix to the (integer) power `n`. + + For positive integers `n`, the power is computed by repeated matrix + squarings and matrix multiplications. If ``n == 0``, the identity matrix + of the same shape as M is returned. If ``n < 0``, the inverse + is computed and then raised to the ``abs(n)``. + + .. note:: Stacks of object matrices are not currently supported. + + Parameters + ---------- + a : (..., M, M) array_like + Matrix to be "powered". + n : int + The exponent can be any integer or long integer, positive, + negative, or zero. + + Returns + ------- + a**n : (..., M, M) ndarray or matrix object + The return value is the same shape and type as `M`; + if the exponent is positive or zero then the type of the + elements is the same as those of `M`. If the exponent is + negative the elements are floating-point. + + Raises + ------ + LinAlgError + For matrices that are not square or that (for negative powers) cannot + be inverted numerically. + + Examples + -------- + >>> import numpy as np + >>> from numpy.linalg import matrix_power + >>> i = np.array([[0, 1], [-1, 0]]) # matrix equiv. of the imaginary unit + >>> matrix_power(i, 3) # should = -i + array([[ 0, -1], + [ 1, 0]]) + >>> matrix_power(i, 0) + array([[1, 0], + [0, 1]]) + >>> matrix_power(i, -3) # should = 1/(-i) = i, but w/ f.p. elements + array([[ 0., 1.], + [-1., 0.]]) + + Somewhat more sophisticated example + + >>> q = np.zeros((4, 4)) + >>> q[0:2, 0:2] = -i + >>> q[2:4, 2:4] = i + >>> q # one of the three quaternion units not equal to 1 + array([[ 0., -1., 0., 0.], + [ 1., 0., 0., 0.], + [ 0., 0., 0., 1.], + [ 0., 0., -1., 0.]]) + >>> matrix_power(q, 2) # = -np.eye(4) + array([[-1., 0., 0., 0.], + [ 0., -1., 0., 0.], + [ 0., 0., -1., 0.], + [ 0., 0., 0., -1.]]) + + """ + a = asanyarray(a) + _assert_stacked_square(a) + + try: + n = operator.index(n) + except TypeError as e: + raise TypeError("exponent must be an integer") from e + + # Fall back on dot for object arrays. Object arrays are not supported by + # the current implementation of matmul using einsum + if a.dtype != object: + fmatmul = matmul + elif a.ndim == 2: + fmatmul = dot + else: + raise NotImplementedError( + "matrix_power not supported for stacks of object arrays") + + if n == 0: + a = empty_like(a) + a[...] = eye(a.shape[-2], dtype=a.dtype) + return a + + elif n < 0: + a = inv(a) + n = abs(n) + + # short-cuts. + if n == 1: + return a + + elif n == 2: + return fmatmul(a, a) + + elif n == 3: + return fmatmul(fmatmul(a, a), a) + + # Use binary decomposition to reduce the number of matrix multiplications. + # Here, we iterate over the bits of n, from LSB to MSB, raise `a` to + # increasing powers of 2, and multiply into the result as needed. + z = result = None + while n > 0: + z = a if z is None else fmatmul(z, z) + n, bit = divmod(n, 2) + if bit: + result = z if result is None else fmatmul(result, z) + + return result + + +# Cholesky decomposition + +def _cholesky_dispatcher(a, /, *, upper=None): + return (a,) + + +@array_function_dispatch(_cholesky_dispatcher) +def cholesky(a, /, *, upper=False): + """ + Cholesky decomposition. + + Return the lower or upper Cholesky decomposition, ``L * L.H`` or + ``U.H * U``, of the square matrix ``a``, where ``L`` is lower-triangular, + ``U`` is upper-triangular, and ``.H`` is the conjugate transpose operator + (which is the ordinary transpose if ``a`` is real-valued). ``a`` must be + Hermitian (symmetric if real-valued) and positive-definite. No checking is + performed to verify whether ``a`` is Hermitian or not. In addition, only + the lower or upper-triangular and diagonal elements of ``a`` are used. + Only ``L`` or ``U`` is actually returned. + + Parameters + ---------- + a : (..., M, M) array_like + Hermitian (symmetric if all elements are real), positive-definite + input matrix. + upper : bool + If ``True``, the result must be the upper-triangular Cholesky factor. + If ``False``, the result must be the lower-triangular Cholesky factor. + Default: ``False``. + + Returns + ------- + L : (..., M, M) array_like + Lower or upper-triangular Cholesky factor of `a`. Returns a matrix + object if `a` is a matrix object. + + Raises + ------ + LinAlgError + If the decomposition fails, for example, if `a` is not + positive-definite. + + See Also + -------- + scipy.linalg.cholesky : Similar function in SciPy. + scipy.linalg.cholesky_banded : Cholesky decompose a banded Hermitian + positive-definite matrix. + scipy.linalg.cho_factor : Cholesky decomposition of a matrix, to use in + `scipy.linalg.cho_solve`. + + Notes + ----- + Broadcasting rules apply, see the `numpy.linalg` documentation for + details. + + The Cholesky decomposition is often used as a fast way of solving + + .. math:: A \\mathbf{x} = \\mathbf{b} + + (when `A` is both Hermitian/symmetric and positive-definite). + + First, we solve for :math:`\\mathbf{y}` in + + .. math:: L \\mathbf{y} = \\mathbf{b}, + + and then for :math:`\\mathbf{x}` in + + .. math:: L^{H} \\mathbf{x} = \\mathbf{y}. + + Examples + -------- + >>> import numpy as np + >>> A = np.array([[1,-2j],[2j,5]]) + >>> A + array([[ 1.+0.j, -0.-2.j], + [ 0.+2.j, 5.+0.j]]) + >>> L = np.linalg.cholesky(A) + >>> L + array([[1.+0.j, 0.+0.j], + [0.+2.j, 1.+0.j]]) + >>> np.dot(L, L.T.conj()) # verify that L * L.H = A + array([[1.+0.j, 0.-2.j], + [0.+2.j, 5.+0.j]]) + >>> A = [[1,-2j],[2j,5]] # what happens if A is only array_like? + >>> np.linalg.cholesky(A) # an ndarray object is returned + array([[1.+0.j, 0.+0.j], + [0.+2.j, 1.+0.j]]) + >>> # But a matrix object is returned if A is a matrix object + >>> np.linalg.cholesky(np.matrix(A)) + matrix([[ 1.+0.j, 0.+0.j], + [ 0.+2.j, 1.+0.j]]) + >>> # The upper-triangular Cholesky factor can also be obtained. + >>> np.linalg.cholesky(A, upper=True) + array([[1.-0.j, 0.-2.j], + [0.-0.j, 1.-0.j]]) + + """ + gufunc = _umath_linalg.cholesky_up if upper else _umath_linalg.cholesky_lo + a, wrap = _makearray(a) + _assert_stacked_square(a) + t, result_t = _commonType(a) + signature = 'D->D' if isComplexType(t) else 'd->d' + with errstate(call=_raise_linalgerror_nonposdef, invalid='call', + over='ignore', divide='ignore', under='ignore'): + r = gufunc(a, signature=signature) + return wrap(r.astype(result_t, copy=False)) + + +# outer product + + +def _outer_dispatcher(x1, x2): + return (x1, x2) + + +@array_function_dispatch(_outer_dispatcher) +def outer(x1, x2, /): + """ + Compute the outer product of two vectors. + + This function is Array API compatible. Compared to ``np.outer`` + it accepts 1-dimensional inputs only. + + Parameters + ---------- + x1 : (M,) array_like + One-dimensional input array of size ``N``. + Must have a numeric data type. + x2 : (N,) array_like + One-dimensional input array of size ``M``. + Must have a numeric data type. + + Returns + ------- + out : (M, N) ndarray + ``out[i, j] = a[i] * b[j]`` + + See also + -------- + outer + + Examples + -------- + Make a (*very* coarse) grid for computing a Mandelbrot set: + + >>> rl = np.linalg.outer(np.ones((5,)), np.linspace(-2, 2, 5)) + >>> rl + array([[-2., -1., 0., 1., 2.], + [-2., -1., 0., 1., 2.], + [-2., -1., 0., 1., 2.], + [-2., -1., 0., 1., 2.], + [-2., -1., 0., 1., 2.]]) + >>> im = np.linalg.outer(1j*np.linspace(2, -2, 5), np.ones((5,))) + >>> im + array([[0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j], + [0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j], + [0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], + [0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j], + [0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j]]) + >>> grid = rl + im + >>> grid + array([[-2.+2.j, -1.+2.j, 0.+2.j, 1.+2.j, 2.+2.j], + [-2.+1.j, -1.+1.j, 0.+1.j, 1.+1.j, 2.+1.j], + [-2.+0.j, -1.+0.j, 0.+0.j, 1.+0.j, 2.+0.j], + [-2.-1.j, -1.-1.j, 0.-1.j, 1.-1.j, 2.-1.j], + [-2.-2.j, -1.-2.j, 0.-2.j, 1.-2.j, 2.-2.j]]) + + An example using a "vector" of letters: + + >>> x = np.array(['a', 'b', 'c'], dtype=object) + >>> np.linalg.outer(x, [1, 2, 3]) + array([['a', 'aa', 'aaa'], + ['b', 'bb', 'bbb'], + ['c', 'cc', 'ccc']], dtype=object) + + """ + x1 = asanyarray(x1) + x2 = asanyarray(x2) + if x1.ndim != 1 or x2.ndim != 1: + raise ValueError( + "Input arrays must be one-dimensional, but they are " + f"{x1.ndim=} and {x2.ndim=}." + ) + return _core_outer(x1, x2, out=None) + + +# QR decomposition + + +def _qr_dispatcher(a, mode=None): + return (a,) + + +@array_function_dispatch(_qr_dispatcher) +def qr(a, mode='reduced'): + """ + Compute the qr factorization of a matrix. + + Factor the matrix `a` as *qr*, where `q` is orthonormal and `r` is + upper-triangular. + + Parameters + ---------- + a : array_like, shape (..., M, N) + An array-like object with the dimensionality of at least 2. + mode : {'reduced', 'complete', 'r', 'raw'}, optional, default: 'reduced' + If K = min(M, N), then + + * 'reduced' : returns Q, R with dimensions (..., M, K), (..., K, N) + * 'complete' : returns Q, R with dimensions (..., M, M), (..., M, N) + * 'r' : returns R only with dimensions (..., K, N) + * 'raw' : returns h, tau with dimensions (..., N, M), (..., K,) + + The options 'reduced', 'complete, and 'raw' are new in numpy 1.8, + see the notes for more information. The default is 'reduced', and to + maintain backward compatibility with earlier versions of numpy both + it and the old default 'full' can be omitted. Note that array h + returned in 'raw' mode is transposed for calling Fortran. The + 'economic' mode is deprecated. The modes 'full' and 'economic' may + be passed using only the first letter for backwards compatibility, + but all others must be spelled out. See the Notes for more + explanation. + + + Returns + ------- + Q : ndarray of float or complex, optional + A matrix with orthonormal columns. When mode = 'complete' the + result is an orthogonal/unitary matrix depending on whether or not + a is real/complex. The determinant may be either +/- 1 in that + case. In case the number of dimensions in the input array is + greater than 2 then a stack of the matrices with above properties + is returned. + R : ndarray of float or complex, optional + The upper-triangular matrix or a stack of upper-triangular + matrices if the number of dimensions in the input array is greater + than 2. + (h, tau) : ndarrays of np.double or np.cdouble, optional + The array h contains the Householder reflectors that generate q + along with r. The tau array contains scaling factors for the + reflectors. In the deprecated 'economic' mode only h is returned. + + Raises + ------ + LinAlgError + If factoring fails. + + See Also + -------- + scipy.linalg.qr : Similar function in SciPy. + scipy.linalg.rq : Compute RQ decomposition of a matrix. + + Notes + ----- + When mode is 'reduced' or 'complete', the result will be a namedtuple with + the attributes ``Q`` and ``R``. + + This is an interface to the LAPACK routines ``dgeqrf``, ``zgeqrf``, + ``dorgqr``, and ``zungqr``. + + For more information on the qr factorization, see for example: + https://en.wikipedia.org/wiki/QR_factorization + + Subclasses of `ndarray` are preserved except for the 'raw' mode. So if + `a` is of type `matrix`, all the return values will be matrices too. + + New 'reduced', 'complete', and 'raw' options for mode were added in + NumPy 1.8.0 and the old option 'full' was made an alias of 'reduced'. In + addition the options 'full' and 'economic' were deprecated. Because + 'full' was the previous default and 'reduced' is the new default, + backward compatibility can be maintained by letting `mode` default. + The 'raw' option was added so that LAPACK routines that can multiply + arrays by q using the Householder reflectors can be used. Note that in + this case the returned arrays are of type np.double or np.cdouble and + the h array is transposed to be FORTRAN compatible. No routines using + the 'raw' return are currently exposed by numpy, but some are available + in lapack_lite and just await the necessary work. + + Examples + -------- + >>> import numpy as np + >>> rng = np.random.default_rng() + >>> a = rng.normal(size=(9, 6)) + >>> Q, R = np.linalg.qr(a) + >>> np.allclose(a, np.dot(Q, R)) # a does equal QR + True + >>> R2 = np.linalg.qr(a, mode='r') + >>> np.allclose(R, R2) # mode='r' returns the same R as mode='full' + True + >>> a = np.random.normal(size=(3, 2, 2)) # Stack of 2 x 2 matrices as input + >>> Q, R = np.linalg.qr(a) + >>> Q.shape + (3, 2, 2) + >>> R.shape + (3, 2, 2) + >>> np.allclose(a, np.matmul(Q, R)) + True + + Example illustrating a common use of `qr`: solving of least squares + problems + + What are the least-squares-best `m` and `y0` in ``y = y0 + mx`` for + the following data: {(0,1), (1,0), (1,2), (2,1)}. (Graph the points + and you'll see that it should be y0 = 0, m = 1.) The answer is provided + by solving the over-determined matrix equation ``Ax = b``, where:: + + A = array([[0, 1], [1, 1], [1, 1], [2, 1]]) + x = array([[y0], [m]]) + b = array([[1], [0], [2], [1]]) + + If A = QR such that Q is orthonormal (which is always possible via + Gram-Schmidt), then ``x = inv(R) * (Q.T) * b``. (In numpy practice, + however, we simply use `lstsq`.) + + >>> A = np.array([[0, 1], [1, 1], [1, 1], [2, 1]]) + >>> A + array([[0, 1], + [1, 1], + [1, 1], + [2, 1]]) + >>> b = np.array([1, 2, 2, 3]) + >>> Q, R = np.linalg.qr(A) + >>> p = np.dot(Q.T, b) + >>> np.dot(np.linalg.inv(R), p) + array([ 1., 1.]) + + """ + if mode not in ('reduced', 'complete', 'r', 'raw'): + if mode in ('f', 'full'): + # 2013-04-01, 1.8 + msg = ( + "The 'full' option is deprecated in favor of 'reduced'.\n" + "For backward compatibility let mode default." + ) + warnings.warn(msg, DeprecationWarning, stacklevel=2) + mode = 'reduced' + elif mode in ('e', 'economic'): + # 2013-04-01, 1.8 + msg = "The 'economic' option is deprecated." + warnings.warn(msg, DeprecationWarning, stacklevel=2) + mode = 'economic' + else: + raise ValueError(f"Unrecognized mode '{mode}'") + + a, wrap = _makearray(a) + _assert_stacked_2d(a) + m, n = a.shape[-2:] + t, result_t = _commonType(a) + a = a.astype(t, copy=True) + a = _to_native_byte_order(a) + mn = min(m, n) + + signature = 'D->D' if isComplexType(t) else 'd->d' + with errstate(call=_raise_linalgerror_qr, invalid='call', + over='ignore', divide='ignore', under='ignore'): + tau = _umath_linalg.qr_r_raw(a, signature=signature) + + # handle modes that don't return q + if mode == 'r': + r = triu(a[..., :mn, :]) + r = r.astype(result_t, copy=False) + return wrap(r) + + if mode == 'raw': + q = transpose(a) + q = q.astype(result_t, copy=False) + tau = tau.astype(result_t, copy=False) + return wrap(q), tau + + if mode == 'economic': + a = a.astype(result_t, copy=False) + return wrap(a) + + # mc is the number of columns in the resulting q + # matrix. If the mode is complete then it is + # same as number of rows, and if the mode is reduced, + # then it is the minimum of number of rows and columns. + if mode == 'complete' and m > n: + mc = m + gufunc = _umath_linalg.qr_complete + else: + mc = mn + gufunc = _umath_linalg.qr_reduced + + signature = 'DD->D' if isComplexType(t) else 'dd->d' + with errstate(call=_raise_linalgerror_qr, invalid='call', + over='ignore', divide='ignore', under='ignore'): + q = gufunc(a, tau, signature=signature) + r = triu(a[..., :mc, :]) + + q = q.astype(result_t, copy=False) + r = r.astype(result_t, copy=False) + + return QRResult(wrap(q), wrap(r)) + +# Eigenvalues + + +@array_function_dispatch(_unary_dispatcher) +def eigvals(a): + """ + Compute the eigenvalues of a general matrix. + + Main difference between `eigvals` and `eig`: the eigenvectors aren't + returned. + + Parameters + ---------- + a : (..., M, M) array_like + A complex- or real-valued matrix whose eigenvalues will be computed. + + Returns + ------- + w : (..., M,) ndarray + The eigenvalues, each repeated according to its multiplicity. + They are not necessarily ordered, nor are they necessarily + real for real matrices. + + Raises + ------ + LinAlgError + If the eigenvalue computation does not converge. + + See Also + -------- + eig : eigenvalues and right eigenvectors of general arrays + eigvalsh : eigenvalues of real symmetric or complex Hermitian + (conjugate symmetric) arrays. + eigh : eigenvalues and eigenvectors of real symmetric or complex + Hermitian (conjugate symmetric) arrays. + scipy.linalg.eigvals : Similar function in SciPy. + + Notes + ----- + Broadcasting rules apply, see the `numpy.linalg` documentation for + details. + + This is implemented using the ``_geev`` LAPACK routines which compute + the eigenvalues and eigenvectors of general square arrays. + + Examples + -------- + Illustration, using the fact that the eigenvalues of a diagonal matrix + are its diagonal elements, that multiplying a matrix on the left + by an orthogonal matrix, `Q`, and on the right by `Q.T` (the transpose + of `Q`), preserves the eigenvalues of the "middle" matrix. In other words, + if `Q` is orthogonal, then ``Q * A * Q.T`` has the same eigenvalues as + ``A``: + + >>> import numpy as np + >>> from numpy import linalg as LA + >>> x = np.random.random() + >>> Q = np.array([[np.cos(x), -np.sin(x)], [np.sin(x), np.cos(x)]]) + >>> LA.norm(Q[0, :]), LA.norm(Q[1, :]), np.dot(Q[0, :],Q[1, :]) + (1.0, 1.0, 0.0) + + Now multiply a diagonal matrix by ``Q`` on one side and + by ``Q.T`` on the other: + + >>> D = np.diag((-1,1)) + >>> LA.eigvals(D) + array([-1., 1.]) + >>> A = np.dot(Q, D) + >>> A = np.dot(A, Q.T) + >>> LA.eigvals(A) + array([ 1., -1.]) # random + + """ + a, wrap = _makearray(a) + _assert_stacked_square(a) + _assert_finite(a) + t, result_t = _commonType(a) + + signature = 'D->D' if isComplexType(t) else 'd->D' + with errstate(call=_raise_linalgerror_eigenvalues_nonconvergence, + invalid='call', over='ignore', divide='ignore', + under='ignore'): + w = _umath_linalg.eigvals(a, signature=signature) + + if not isComplexType(t): + if all(w.imag == 0): + w = w.real + result_t = _realType(result_t) + else: + result_t = _complexType(result_t) + + return w.astype(result_t, copy=False) + + +def _eigvalsh_dispatcher(a, UPLO=None): + return (a,) + + +@array_function_dispatch(_eigvalsh_dispatcher) +def eigvalsh(a, UPLO='L'): + """ + Compute the eigenvalues of a complex Hermitian or real symmetric matrix. + + Main difference from eigh: the eigenvectors are not computed. + + Parameters + ---------- + a : (..., M, M) array_like + A complex- or real-valued matrix whose eigenvalues are to be + computed. + UPLO : {'L', 'U'}, optional + Specifies whether the calculation is done with the lower triangular + part of `a` ('L', default) or the upper triangular part ('U'). + Irrespective of this value only the real parts of the diagonal will + be considered in the computation to preserve the notion of a Hermitian + matrix. It therefore follows that the imaginary part of the diagonal + will always be treated as zero. + + Returns + ------- + w : (..., M,) ndarray + The eigenvalues in ascending order, each repeated according to + its multiplicity. + + Raises + ------ + LinAlgError + If the eigenvalue computation does not converge. + + See Also + -------- + eigh : eigenvalues and eigenvectors of real symmetric or complex Hermitian + (conjugate symmetric) arrays. + eigvals : eigenvalues of general real or complex arrays. + eig : eigenvalues and right eigenvectors of general real or complex + arrays. + scipy.linalg.eigvalsh : Similar function in SciPy. + + Notes + ----- + Broadcasting rules apply, see the `numpy.linalg` documentation for + details. + + The eigenvalues are computed using LAPACK routines ``_syevd``, ``_heevd``. + + Examples + -------- + >>> import numpy as np + >>> from numpy import linalg as LA + >>> a = np.array([[1, -2j], [2j, 5]]) + >>> LA.eigvalsh(a) + array([ 0.17157288, 5.82842712]) # may vary + + >>> # demonstrate the treatment of the imaginary part of the diagonal + >>> a = np.array([[5+2j, 9-2j], [0+2j, 2-1j]]) + >>> a + array([[5.+2.j, 9.-2.j], + [0.+2.j, 2.-1.j]]) + >>> # with UPLO='L' this is numerically equivalent to using LA.eigvals() + >>> # with: + >>> b = np.array([[5.+0.j, 0.-2.j], [0.+2.j, 2.-0.j]]) + >>> b + array([[5.+0.j, 0.-2.j], + [0.+2.j, 2.+0.j]]) + >>> wa = LA.eigvalsh(a) + >>> wb = LA.eigvals(b) + >>> wa + array([1., 6.]) + >>> wb + array([6.+0.j, 1.+0.j]) + + """ + UPLO = UPLO.upper() + if UPLO not in ('L', 'U'): + raise ValueError("UPLO argument must be 'L' or 'U'") + + if UPLO == 'L': + gufunc = _umath_linalg.eigvalsh_lo + else: + gufunc = _umath_linalg.eigvalsh_up + + a, wrap = _makearray(a) + _assert_stacked_square(a) + t, result_t = _commonType(a) + signature = 'D->d' if isComplexType(t) else 'd->d' + with errstate(call=_raise_linalgerror_eigenvalues_nonconvergence, + invalid='call', over='ignore', divide='ignore', + under='ignore'): + w = gufunc(a, signature=signature) + return w.astype(_realType(result_t), copy=False) + + +# Eigenvectors + + +@array_function_dispatch(_unary_dispatcher) +def eig(a): + """ + Compute the eigenvalues and right eigenvectors of a square array. + + Parameters + ---------- + a : (..., M, M) array + Matrices for which the eigenvalues and right eigenvectors will + be computed + + Returns + ------- + A namedtuple with the following attributes: + + eigenvalues : (..., M) array + The eigenvalues, each repeated according to its multiplicity. + The eigenvalues are not necessarily ordered. The resulting + array will be of complex type, unless the imaginary part is + zero in which case it will be cast to a real type. When `a` + is real the resulting eigenvalues will be real (0 imaginary + part) or occur in conjugate pairs + + eigenvectors : (..., M, M) array + The normalized (unit "length") eigenvectors, such that the + column ``eigenvectors[:,i]`` is the eigenvector corresponding to the + eigenvalue ``eigenvalues[i]``. + + Raises + ------ + LinAlgError + If the eigenvalue computation does not converge. + + See Also + -------- + eigvals : eigenvalues of a non-symmetric array. + eigh : eigenvalues and eigenvectors of a real symmetric or complex + Hermitian (conjugate symmetric) array. + eigvalsh : eigenvalues of a real symmetric or complex Hermitian + (conjugate symmetric) array. + scipy.linalg.eig : Similar function in SciPy that also solves the + generalized eigenvalue problem. + scipy.linalg.schur : Best choice for unitary and other non-Hermitian + normal matrices. + + Notes + ----- + Broadcasting rules apply, see the `numpy.linalg` documentation for + details. + + This is implemented using the ``_geev`` LAPACK routines which compute + the eigenvalues and eigenvectors of general square arrays. + + The number `w` is an eigenvalue of `a` if there exists a vector `v` such + that ``a @ v = w * v``. Thus, the arrays `a`, `eigenvalues`, and + `eigenvectors` satisfy the equations ``a @ eigenvectors[:,i] = + eigenvalues[i] * eigenvectors[:,i]`` for :math:`i \\in \\{0,...,M-1\\}`. + + The array `eigenvectors` may not be of maximum rank, that is, some of the + columns may be linearly dependent, although round-off error may obscure + that fact. If the eigenvalues are all different, then theoretically the + eigenvectors are linearly independent and `a` can be diagonalized by a + similarity transformation using `eigenvectors`, i.e, ``inv(eigenvectors) @ + a @ eigenvectors`` is diagonal. + + For non-Hermitian normal matrices the SciPy function `scipy.linalg.schur` + is preferred because the matrix `eigenvectors` is guaranteed to be + unitary, which is not the case when using `eig`. The Schur factorization + produces an upper triangular matrix rather than a diagonal matrix, but for + normal matrices only the diagonal of the upper triangular matrix is + needed, the rest is roundoff error. + + Finally, it is emphasized that `eigenvectors` consists of the *right* (as + in right-hand side) eigenvectors of `a`. A vector `y` satisfying ``y.T @ a + = z * y.T`` for some number `z` is called a *left* eigenvector of `a`, + and, in general, the left and right eigenvectors of a matrix are not + necessarily the (perhaps conjugate) transposes of each other. + + References + ---------- + G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, FL, + Academic Press, Inc., 1980, Various pp. + + Examples + -------- + >>> import numpy as np + >>> from numpy import linalg as LA + + (Almost) trivial example with real eigenvalues and eigenvectors. + + >>> eigenvalues, eigenvectors = LA.eig(np.diag((1, 2, 3))) + >>> eigenvalues + array([1., 2., 3.]) + >>> eigenvectors + array([[1., 0., 0.], + [0., 1., 0.], + [0., 0., 1.]]) + + Real matrix possessing complex eigenvalues and eigenvectors; + note that the eigenvalues are complex conjugates of each other. + + >>> eigenvalues, eigenvectors = LA.eig(np.array([[1, -1], [1, 1]])) + >>> eigenvalues + array([1.+1.j, 1.-1.j]) + >>> eigenvectors + array([[0.70710678+0.j , 0.70710678-0.j ], + [0. -0.70710678j, 0. +0.70710678j]]) + + Complex-valued matrix with real eigenvalues (but complex-valued + eigenvectors); note that ``a.conj().T == a``, i.e., `a` is Hermitian. + + >>> a = np.array([[1, 1j], [-1j, 1]]) + >>> eigenvalues, eigenvectors = LA.eig(a) + >>> eigenvalues + array([2.+0.j, 0.+0.j]) + >>> eigenvectors + array([[ 0. +0.70710678j, 0.70710678+0.j ], # may vary + [ 0.70710678+0.j , -0. +0.70710678j]]) + + Be careful about round-off error! + + >>> a = np.array([[1 + 1e-9, 0], [0, 1 - 1e-9]]) + >>> # Theor. eigenvalues are 1 +/- 1e-9 + >>> eigenvalues, eigenvectors = LA.eig(a) + >>> eigenvalues + array([1., 1.]) + >>> eigenvectors + array([[1., 0.], + [0., 1.]]) + + """ + a, wrap = _makearray(a) + _assert_stacked_square(a) + _assert_finite(a) + t, result_t = _commonType(a) + + signature = 'D->DD' if isComplexType(t) else 'd->DD' + with errstate(call=_raise_linalgerror_eigenvalues_nonconvergence, + invalid='call', over='ignore', divide='ignore', + under='ignore'): + w, vt = _umath_linalg.eig(a, signature=signature) + + if not isComplexType(t) and all(w.imag == 0.0): + w = w.real + vt = vt.real + result_t = _realType(result_t) + else: + result_t = _complexType(result_t) + + vt = vt.astype(result_t, copy=False) + return EigResult(w.astype(result_t, copy=False), wrap(vt)) + + +@array_function_dispatch(_eigvalsh_dispatcher) +def eigh(a, UPLO='L'): + """ + Return the eigenvalues and eigenvectors of a complex Hermitian + (conjugate symmetric) or a real symmetric matrix. + + Returns two objects, a 1-D array containing the eigenvalues of `a`, and + a 2-D square array or matrix (depending on the input type) of the + corresponding eigenvectors (in columns). + + Parameters + ---------- + a : (..., M, M) array + Hermitian or real symmetric matrices whose eigenvalues and + eigenvectors are to be computed. + UPLO : {'L', 'U'}, optional + Specifies whether the calculation is done with the lower triangular + part of `a` ('L', default) or the upper triangular part ('U'). + Irrespective of this value only the real parts of the diagonal will + be considered in the computation to preserve the notion of a Hermitian + matrix. It therefore follows that the imaginary part of the diagonal + will always be treated as zero. + + Returns + ------- + A namedtuple with the following attributes: + + eigenvalues : (..., M) ndarray + The eigenvalues in ascending order, each repeated according to + its multiplicity. + eigenvectors : {(..., M, M) ndarray, (..., M, M) matrix} + The column ``eigenvectors[:, i]`` is the normalized eigenvector + corresponding to the eigenvalue ``eigenvalues[i]``. Will return a + matrix object if `a` is a matrix object. + + Raises + ------ + LinAlgError + If the eigenvalue computation does not converge. + + See Also + -------- + eigvalsh : eigenvalues of real symmetric or complex Hermitian + (conjugate symmetric) arrays. + eig : eigenvalues and right eigenvectors for non-symmetric arrays. + eigvals : eigenvalues of non-symmetric arrays. + scipy.linalg.eigh : Similar function in SciPy (but also solves the + generalized eigenvalue problem). + + Notes + ----- + Broadcasting rules apply, see the `numpy.linalg` documentation for + details. + + The eigenvalues/eigenvectors are computed using LAPACK routines ``_syevd``, + ``_heevd``. + + The eigenvalues of real symmetric or complex Hermitian matrices are always + real. [1]_ The array `eigenvalues` of (column) eigenvectors is unitary and + `a`, `eigenvalues`, and `eigenvectors` satisfy the equations ``dot(a, + eigenvectors[:, i]) = eigenvalues[i] * eigenvectors[:, i]``. + + References + ---------- + .. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, + FL, Academic Press, Inc., 1980, pg. 222. + + Examples + -------- + >>> import numpy as np + >>> from numpy import linalg as LA + >>> a = np.array([[1, -2j], [2j, 5]]) + >>> a + array([[ 1.+0.j, -0.-2.j], + [ 0.+2.j, 5.+0.j]]) + >>> eigenvalues, eigenvectors = LA.eigh(a) + >>> eigenvalues + array([0.17157288, 5.82842712]) + >>> eigenvectors + array([[-0.92387953+0.j , -0.38268343+0.j ], # may vary + [ 0. +0.38268343j, 0. -0.92387953j]]) + + >>> (np.dot(a, eigenvectors[:, 0]) - + ... eigenvalues[0] * eigenvectors[:, 0]) # verify 1st eigenval/vec pair + array([5.55111512e-17+0.0000000e+00j, 0.00000000e+00+1.2490009e-16j]) + >>> (np.dot(a, eigenvectors[:, 1]) - + ... eigenvalues[1] * eigenvectors[:, 1]) # verify 2nd eigenval/vec pair + array([0.+0.j, 0.+0.j]) + + >>> A = np.matrix(a) # what happens if input is a matrix object + >>> A + matrix([[ 1.+0.j, -0.-2.j], + [ 0.+2.j, 5.+0.j]]) + >>> eigenvalues, eigenvectors = LA.eigh(A) + >>> eigenvalues + array([0.17157288, 5.82842712]) + >>> eigenvectors + matrix([[-0.92387953+0.j , -0.38268343+0.j ], # may vary + [ 0. +0.38268343j, 0. -0.92387953j]]) + + >>> # demonstrate the treatment of the imaginary part of the diagonal + >>> a = np.array([[5+2j, 9-2j], [0+2j, 2-1j]]) + >>> a + array([[5.+2.j, 9.-2.j], + [0.+2.j, 2.-1.j]]) + >>> # with UPLO='L' this is numerically equivalent to using LA.eig() with: + >>> b = np.array([[5.+0.j, 0.-2.j], [0.+2.j, 2.-0.j]]) + >>> b + array([[5.+0.j, 0.-2.j], + [0.+2.j, 2.+0.j]]) + >>> wa, va = LA.eigh(a) + >>> wb, vb = LA.eig(b) + >>> wa + array([1., 6.]) + >>> wb + array([6.+0.j, 1.+0.j]) + >>> va + array([[-0.4472136 +0.j , -0.89442719+0.j ], # may vary + [ 0. +0.89442719j, 0. -0.4472136j ]]) + >>> vb + array([[ 0.89442719+0.j , -0. +0.4472136j], + [-0. +0.4472136j, 0.89442719+0.j ]]) + + """ + UPLO = UPLO.upper() + if UPLO not in ('L', 'U'): + raise ValueError("UPLO argument must be 'L' or 'U'") + + a, wrap = _makearray(a) + _assert_stacked_square(a) + t, result_t = _commonType(a) + + if UPLO == 'L': + gufunc = _umath_linalg.eigh_lo + else: + gufunc = _umath_linalg.eigh_up + + signature = 'D->dD' if isComplexType(t) else 'd->dd' + with errstate(call=_raise_linalgerror_eigenvalues_nonconvergence, + invalid='call', over='ignore', divide='ignore', + under='ignore'): + w, vt = gufunc(a, signature=signature) + w = w.astype(_realType(result_t), copy=False) + vt = vt.astype(result_t, copy=False) + return EighResult(w, wrap(vt)) + + +# Singular value decomposition + +def _svd_dispatcher(a, full_matrices=None, compute_uv=None, hermitian=None): + return (a,) + + +@array_function_dispatch(_svd_dispatcher) +def svd(a, full_matrices=True, compute_uv=True, hermitian=False): + """ + Singular Value Decomposition. + + When `a` is a 2D array, and ``full_matrices=False``, then it is + factorized as ``u @ np.diag(s) @ vh = (u * s) @ vh``, where + `u` and the Hermitian transpose of `vh` are 2D arrays with + orthonormal columns and `s` is a 1D array of `a`'s singular + values. When `a` is higher-dimensional, SVD is applied in + stacked mode as explained below. + + Parameters + ---------- + a : (..., M, N) array_like + A real or complex array with ``a.ndim >= 2``. + full_matrices : bool, optional + If True (default), `u` and `vh` have the shapes ``(..., M, M)`` and + ``(..., N, N)``, respectively. Otherwise, the shapes are + ``(..., M, K)`` and ``(..., K, N)``, respectively, where + ``K = min(M, N)``. + compute_uv : bool, optional + Whether or not to compute `u` and `vh` in addition to `s`. True + by default. + hermitian : bool, optional + If True, `a` is assumed to be Hermitian (symmetric if real-valued), + enabling a more efficient method for finding singular values. + Defaults to False. + + Returns + ------- + U : { (..., M, M), (..., M, K) } array + Unitary array(s). The first ``a.ndim - 2`` dimensions have the same + size as those of the input `a`. The size of the last two dimensions + depends on the value of `full_matrices`. Only returned when + `compute_uv` is True. + S : (..., K) array + Vector(s) with the singular values, within each vector sorted in + descending order. The first ``a.ndim - 2`` dimensions have the same + size as those of the input `a`. + Vh : { (..., N, N), (..., K, N) } array + Unitary array(s). The first ``a.ndim - 2`` dimensions have the same + size as those of the input `a`. The size of the last two dimensions + depends on the value of `full_matrices`. Only returned when + `compute_uv` is True. + + Raises + ------ + LinAlgError + If SVD computation does not converge. + + See Also + -------- + scipy.linalg.svd : Similar function in SciPy. + scipy.linalg.svdvals : Compute singular values of a matrix. + + Notes + ----- + When `compute_uv` is True, the result is a namedtuple with the following + attribute names: `U`, `S`, and `Vh`. + + The decomposition is performed using LAPACK routine ``_gesdd``. + + SVD is usually described for the factorization of a 2D matrix :math:`A`. + The higher-dimensional case will be discussed below. In the 2D case, SVD is + written as :math:`A = U S V^H`, where :math:`A = a`, :math:`U= u`, + :math:`S= \\mathtt{np.diag}(s)` and :math:`V^H = vh`. The 1D array `s` + contains the singular values of `a` and `u` and `vh` are unitary. The rows + of `vh` are the eigenvectors of :math:`A^H A` and the columns of `u` are + the eigenvectors of :math:`A A^H`. In both cases the corresponding + (possibly non-zero) eigenvalues are given by ``s**2``. + + If `a` has more than two dimensions, then broadcasting rules apply, as + explained in :ref:`routines.linalg-broadcasting`. This means that SVD is + working in "stacked" mode: it iterates over all indices of the first + ``a.ndim - 2`` dimensions and for each combination SVD is applied to the + last two indices. The matrix `a` can be reconstructed from the + decomposition with either ``(u * s[..., None, :]) @ vh`` or + ``u @ (s[..., None] * vh)``. (The ``@`` operator can be replaced by the + function ``np.matmul`` for python versions below 3.5.) + + If `a` is a ``matrix`` object (as opposed to an ``ndarray``), then so are + all the return values. + + Examples + -------- + >>> import numpy as np + >>> rng = np.random.default_rng() + >>> a = rng.normal(size=(9, 6)) + 1j*rng.normal(size=(9, 6)) + >>> b = rng.normal(size=(2, 7, 8, 3)) + 1j*rng.normal(size=(2, 7, 8, 3)) + + + Reconstruction based on full SVD, 2D case: + + >>> U, S, Vh = np.linalg.svd(a, full_matrices=True) + >>> U.shape, S.shape, Vh.shape + ((9, 9), (6,), (6, 6)) + >>> np.allclose(a, np.dot(U[:, :6] * S, Vh)) + True + >>> smat = np.zeros((9, 6), dtype=complex) + >>> smat[:6, :6] = np.diag(S) + >>> np.allclose(a, np.dot(U, np.dot(smat, Vh))) + True + + Reconstruction based on reduced SVD, 2D case: + + >>> U, S, Vh = np.linalg.svd(a, full_matrices=False) + >>> U.shape, S.shape, Vh.shape + ((9, 6), (6,), (6, 6)) + >>> np.allclose(a, np.dot(U * S, Vh)) + True + >>> smat = np.diag(S) + >>> np.allclose(a, np.dot(U, np.dot(smat, Vh))) + True + + Reconstruction based on full SVD, 4D case: + + >>> U, S, Vh = np.linalg.svd(b, full_matrices=True) + >>> U.shape, S.shape, Vh.shape + ((2, 7, 8, 8), (2, 7, 3), (2, 7, 3, 3)) + >>> np.allclose(b, np.matmul(U[..., :3] * S[..., None, :], Vh)) + True + >>> np.allclose(b, np.matmul(U[..., :3], S[..., None] * Vh)) + True + + Reconstruction based on reduced SVD, 4D case: + + >>> U, S, Vh = np.linalg.svd(b, full_matrices=False) + >>> U.shape, S.shape, Vh.shape + ((2, 7, 8, 3), (2, 7, 3), (2, 7, 3, 3)) + >>> np.allclose(b, np.matmul(U * S[..., None, :], Vh)) + True + >>> np.allclose(b, np.matmul(U, S[..., None] * Vh)) + True + + """ + import numpy as np + a, wrap = _makearray(a) + + if hermitian: + # note: lapack svd returns eigenvalues with s ** 2 sorted descending, + # but eig returns s sorted ascending, so we re-order the eigenvalues + # and related arrays to have the correct order + if compute_uv: + s, u = eigh(a) + sgn = sign(s) + s = abs(s) + sidx = argsort(s)[..., ::-1] + sgn = np.take_along_axis(sgn, sidx, axis=-1) + s = np.take_along_axis(s, sidx, axis=-1) + u = np.take_along_axis(u, sidx[..., None, :], axis=-1) + # singular values are unsigned, move the sign into v + vt = transpose(u * sgn[..., None, :]).conjugate() + return SVDResult(wrap(u), s, wrap(vt)) + else: + s = eigvalsh(a) + s = abs(s) + return sort(s)[..., ::-1] + + _assert_stacked_2d(a) + t, result_t = _commonType(a) + + m, n = a.shape[-2:] + if compute_uv: + if full_matrices: + gufunc = _umath_linalg.svd_f + else: + gufunc = _umath_linalg.svd_s + + signature = 'D->DdD' if isComplexType(t) else 'd->ddd' + with errstate(call=_raise_linalgerror_svd_nonconvergence, + invalid='call', over='ignore', divide='ignore', + under='ignore'): + u, s, vh = gufunc(a, signature=signature) + u = u.astype(result_t, copy=False) + s = s.astype(_realType(result_t), copy=False) + vh = vh.astype(result_t, copy=False) + return SVDResult(wrap(u), s, wrap(vh)) + else: + signature = 'D->d' if isComplexType(t) else 'd->d' + with errstate(call=_raise_linalgerror_svd_nonconvergence, + invalid='call', over='ignore', divide='ignore', + under='ignore'): + s = _umath_linalg.svd(a, signature=signature) + s = s.astype(_realType(result_t), copy=False) + return s + + +def _svdvals_dispatcher(x): + return (x,) + + +@array_function_dispatch(_svdvals_dispatcher) +def svdvals(x, /): + """ + Returns the singular values of a matrix (or a stack of matrices) ``x``. + When x is a stack of matrices, the function will compute the singular + values for each matrix in the stack. + + This function is Array API compatible. + + Calling ``np.svdvals(x)`` to get singular values is the same as + ``np.svd(x, compute_uv=False, hermitian=False)``. + + Parameters + ---------- + x : (..., M, N) array_like + Input array having shape (..., M, N) and whose last two + dimensions form matrices on which to perform singular value + decomposition. Should have a floating-point data type. + + Returns + ------- + out : ndarray + An array with shape (..., K) that contains the vector(s) + of singular values of length K, where K = min(M, N). + + See Also + -------- + scipy.linalg.svdvals : Compute singular values of a matrix. + + Examples + -------- + + >>> np.linalg.svdvals([[1, 2, 3, 4, 5], + ... [1, 4, 9, 16, 25], + ... [1, 8, 27, 64, 125]]) + array([146.68862757, 5.57510612, 0.60393245]) + + Determine the rank of a matrix using singular values: + + >>> s = np.linalg.svdvals([[1, 2, 3], + ... [2, 4, 6], + ... [-1, 1, -1]]); s + array([8.38434191e+00, 1.64402274e+00, 2.31534378e-16]) + >>> np.count_nonzero(s > 1e-10) # Matrix of rank 2 + 2 + + """ + return svd(x, compute_uv=False, hermitian=False) + + +def _cond_dispatcher(x, p=None): + return (x,) + + +@array_function_dispatch(_cond_dispatcher) +def cond(x, p=None): + """ + Compute the condition number of a matrix. + + This function is capable of returning the condition number using + one of seven different norms, depending on the value of `p` (see + Parameters below). + + Parameters + ---------- + x : (..., M, N) array_like + The matrix whose condition number is sought. + p : {None, 1, -1, 2, -2, inf, -inf, 'fro'}, optional + Order of the norm used in the condition number computation: + + ===== ============================ + p norm for matrices + ===== ============================ + None 2-norm, computed directly using the ``SVD`` + 'fro' Frobenius norm + inf max(sum(abs(x), axis=1)) + -inf min(sum(abs(x), axis=1)) + 1 max(sum(abs(x), axis=0)) + -1 min(sum(abs(x), axis=0)) + 2 2-norm (largest sing. value) + -2 smallest singular value + ===== ============================ + + inf means the `numpy.inf` object, and the Frobenius norm is + the root-of-sum-of-squares norm. + + Returns + ------- + c : {float, inf} + The condition number of the matrix. May be infinite. + + See Also + -------- + numpy.linalg.norm + + Notes + ----- + The condition number of `x` is defined as the norm of `x` times the + norm of the inverse of `x` [1]_; the norm can be the usual L2-norm + (root-of-sum-of-squares) or one of a number of other matrix norms. + + References + ---------- + .. [1] G. Strang, *Linear Algebra and Its Applications*, Orlando, FL, + Academic Press, Inc., 1980, pg. 285. + + Examples + -------- + >>> import numpy as np + >>> from numpy import linalg as LA + >>> a = np.array([[1, 0, -1], [0, 1, 0], [1, 0, 1]]) + >>> a + array([[ 1, 0, -1], + [ 0, 1, 0], + [ 1, 0, 1]]) + >>> LA.cond(a) + 1.4142135623730951 + >>> LA.cond(a, 'fro') + 3.1622776601683795 + >>> LA.cond(a, np.inf) + 2.0 + >>> LA.cond(a, -np.inf) + 1.0 + >>> LA.cond(a, 1) + 2.0 + >>> LA.cond(a, -1) + 1.0 + >>> LA.cond(a, 2) + 1.4142135623730951 + >>> LA.cond(a, -2) + 0.70710678118654746 # may vary + >>> (min(LA.svd(a, compute_uv=False)) * + ... min(LA.svd(LA.inv(a), compute_uv=False))) + 0.70710678118654746 # may vary + + """ + x = asarray(x) # in case we have a matrix + if _is_empty_2d(x): + raise LinAlgError("cond is not defined on empty arrays") + if p is None or p in {2, -2}: + s = svd(x, compute_uv=False) + with errstate(all='ignore'): + if p == -2: + r = s[..., -1] / s[..., 0] + else: + r = s[..., 0] / s[..., -1] + else: + # Call inv(x) ignoring errors. The result array will + # contain nans in the entries where inversion failed. + _assert_stacked_square(x) + t, result_t = _commonType(x) + result_t = _realType(result_t) # condition number is always real + signature = 'D->D' if isComplexType(t) else 'd->d' + with errstate(all='ignore'): + invx = _umath_linalg.inv(x, signature=signature) + r = norm(x, p, axis=(-2, -1)) * norm(invx, p, axis=(-2, -1)) + r = r.astype(result_t, copy=False) + + # Convert nans to infs unless the original array had nan entries + nan_mask = isnan(r) + if nan_mask.any(): + nan_mask &= ~isnan(x).any(axis=(-2, -1)) + if r.ndim > 0: + r[nan_mask] = inf + elif nan_mask: + # Convention is to return scalars instead of 0d arrays. + r = r.dtype.type(inf) + + return r + + +def _matrix_rank_dispatcher(A, tol=None, hermitian=None, *, rtol=None): + return (A,) + + +@array_function_dispatch(_matrix_rank_dispatcher) +def matrix_rank(A, tol=None, hermitian=False, *, rtol=None): + """ + Return matrix rank of array using SVD method + + Rank of the array is the number of singular values of the array that are + greater than `tol`. + + Parameters + ---------- + A : {(M,), (..., M, N)} array_like + Input vector or stack of matrices. + tol : (...) array_like, float, optional + Threshold below which SVD values are considered zero. If `tol` is + None, and ``S`` is an array with singular values for `M`, and + ``eps`` is the epsilon value for datatype of ``S``, then `tol` is + set to ``S.max() * max(M, N) * eps``. + hermitian : bool, optional + If True, `A` is assumed to be Hermitian (symmetric if real-valued), + enabling a more efficient method for finding singular values. + Defaults to False. + rtol : (...) array_like, float, optional + Parameter for the relative tolerance component. Only ``tol`` or + ``rtol`` can be set at a time. Defaults to ``max(M, N) * eps``. + + .. versionadded:: 2.0.0 + + Returns + ------- + rank : (...) array_like + Rank of A. + + Notes + ----- + The default threshold to detect rank deficiency is a test on the magnitude + of the singular values of `A`. By default, we identify singular values + less than ``S.max() * max(M, N) * eps`` as indicating rank deficiency + (with the symbols defined above). This is the algorithm MATLAB uses [1]_. + It also appears in *Numerical recipes* in the discussion of SVD solutions + for linear least squares [2]_. + + This default threshold is designed to detect rank deficiency accounting + for the numerical errors of the SVD computation. Imagine that there + is a column in `A` that is an exact (in floating point) linear combination + of other columns in `A`. Computing the SVD on `A` will not produce + a singular value exactly equal to 0 in general: any difference of + the smallest SVD value from 0 will be caused by numerical imprecision + in the calculation of the SVD. Our threshold for small SVD values takes + this numerical imprecision into account, and the default threshold will + detect such numerical rank deficiency. The threshold may declare a matrix + `A` rank deficient even if the linear combination of some columns of `A` + is not exactly equal to another column of `A` but only numerically very + close to another column of `A`. + + We chose our default threshold because it is in wide use. Other thresholds + are possible. For example, elsewhere in the 2007 edition of *Numerical + recipes* there is an alternative threshold of ``S.max() * + np.finfo(A.dtype).eps / 2. * np.sqrt(m + n + 1.)``. The authors describe + this threshold as being based on "expected roundoff error" (p 71). + + The thresholds above deal with floating point roundoff error in the + calculation of the SVD. However, you may have more information about + the sources of error in `A` that would make you consider other tolerance + values to detect *effective* rank deficiency. The most useful measure + of the tolerance depends on the operations you intend to use on your + matrix. For example, if your data come from uncertain measurements with + uncertainties greater than floating point epsilon, choosing a tolerance + near that uncertainty may be preferable. The tolerance may be absolute + if the uncertainties are absolute rather than relative. + + References + ---------- + .. [1] MATLAB reference documentation, "Rank" + https://www.mathworks.com/help/techdoc/ref/rank.html + .. [2] W. H. Press, S. A. Teukolsky, W. T. Vetterling and B. P. Flannery, + "Numerical Recipes (3rd edition)", Cambridge University Press, 2007, + page 795. + + Examples + -------- + >>> import numpy as np + >>> from numpy.linalg import matrix_rank + >>> matrix_rank(np.eye(4)) # Full rank matrix + 4 + >>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix + >>> matrix_rank(I) + 3 + >>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0 + 1 + >>> matrix_rank(np.zeros((4,))) + 0 + """ + if rtol is not None and tol is not None: + raise ValueError("`tol` and `rtol` can't be both set.") + + A = asarray(A) + if A.ndim < 2: + return int(not all(A == 0)) + S = svd(A, compute_uv=False, hermitian=hermitian) + + if tol is None: + if rtol is None: + rtol = max(A.shape[-2:]) * finfo(S.dtype).eps + else: + rtol = asarray(rtol)[..., newaxis] + tol = S.max(axis=-1, keepdims=True) * rtol + else: + tol = asarray(tol)[..., newaxis] + + return count_nonzero(S > tol, axis=-1) + + +# Generalized inverse + +def _pinv_dispatcher(a, rcond=None, hermitian=None, *, rtol=None): + return (a,) + + +@array_function_dispatch(_pinv_dispatcher) +def pinv(a, rcond=None, hermitian=False, *, rtol=_NoValue): + """ + Compute the (Moore-Penrose) pseudo-inverse of a matrix. + + Calculate the generalized inverse of a matrix using its + singular-value decomposition (SVD) and including all + *large* singular values. + + Parameters + ---------- + a : (..., M, N) array_like + Matrix or stack of matrices to be pseudo-inverted. + rcond : (...) array_like of float, optional + Cutoff for small singular values. + Singular values less than or equal to + ``rcond * largest_singular_value`` are set to zero. + Broadcasts against the stack of matrices. Default: ``1e-15``. + hermitian : bool, optional + If True, `a` is assumed to be Hermitian (symmetric if real-valued), + enabling a more efficient method for finding singular values. + Defaults to False. + rtol : (...) array_like of float, optional + Same as `rcond`, but it's an Array API compatible parameter name. + Only `rcond` or `rtol` can be set at a time. If none of them are + provided then NumPy's ``1e-15`` default is used. If ``rtol=None`` + is passed then the API standard default is used. + + .. versionadded:: 2.0.0 + + Returns + ------- + B : (..., N, M) ndarray + The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so + is `B`. + + Raises + ------ + LinAlgError + If the SVD computation does not converge. + + See Also + -------- + scipy.linalg.pinv : Similar function in SciPy. + scipy.linalg.pinvh : Compute the (Moore-Penrose) pseudo-inverse of a + Hermitian matrix. + + Notes + ----- + The pseudo-inverse of a matrix A, denoted :math:`A^+`, is + defined as: "the matrix that 'solves' [the least-squares problem] + :math:`Ax = b`," i.e., if :math:`\\bar{x}` is said solution, then + :math:`A^+` is that matrix such that :math:`\\bar{x} = A^+b`. + + It can be shown that if :math:`Q_1 \\Sigma Q_2^T = A` is the singular + value decomposition of A, then + :math:`A^+ = Q_2 \\Sigma^+ Q_1^T`, where :math:`Q_{1,2}` are + orthogonal matrices, :math:`\\Sigma` is a diagonal matrix consisting + of A's so-called singular values, (followed, typically, by + zeros), and then :math:`\\Sigma^+` is simply the diagonal matrix + consisting of the reciprocals of A's singular values + (again, followed by zeros). [1]_ + + References + ---------- + .. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, + FL, Academic Press, Inc., 1980, pp. 139-142. + + Examples + -------- + The following example checks that ``a * a+ * a == a`` and + ``a+ * a * a+ == a+``: + + >>> import numpy as np + >>> rng = np.random.default_rng() + >>> a = rng.normal(size=(9, 6)) + >>> B = np.linalg.pinv(a) + >>> np.allclose(a, np.dot(a, np.dot(B, a))) + True + >>> np.allclose(B, np.dot(B, np.dot(a, B))) + True + + """ + a, wrap = _makearray(a) + if rcond is None: + if rtol is _NoValue: + rcond = 1e-15 + elif rtol is None: + rcond = max(a.shape[-2:]) * finfo(a.dtype).eps + else: + rcond = rtol + elif rtol is not _NoValue: + raise ValueError("`rtol` and `rcond` can't be both set.") + else: + # NOTE: Deprecate `rcond` in a few versions. + pass + + rcond = asarray(rcond) + if _is_empty_2d(a): + m, n = a.shape[-2:] + res = empty(a.shape[:-2] + (n, m), dtype=a.dtype) + return wrap(res) + a = a.conjugate() + u, s, vt = svd(a, full_matrices=False, hermitian=hermitian) + + # discard small singular values + cutoff = rcond[..., newaxis] * amax(s, axis=-1, keepdims=True) + large = s > cutoff + s = divide(1, s, where=large, out=s) + s[~large] = 0 + + res = matmul(transpose(vt), multiply(s[..., newaxis], transpose(u))) + return wrap(res) + + +# Determinant + + +@array_function_dispatch(_unary_dispatcher) +def slogdet(a): + """ + Compute the sign and (natural) logarithm of the determinant of an array. + + If an array has a very small or very large determinant, then a call to + `det` may overflow or underflow. This routine is more robust against such + issues, because it computes the logarithm of the determinant rather than + the determinant itself. + + Parameters + ---------- + a : (..., M, M) array_like + Input array, has to be a square 2-D array. + + Returns + ------- + A namedtuple with the following attributes: + + sign : (...) array_like + A number representing the sign of the determinant. For a real matrix, + this is 1, 0, or -1. For a complex matrix, this is a complex number + with absolute value 1 (i.e., it is on the unit circle), or else 0. + logabsdet : (...) array_like + The natural log of the absolute value of the determinant. + + If the determinant is zero, then `sign` will be 0 and `logabsdet` + will be -inf. In all cases, the determinant is equal to + ``sign * np.exp(logabsdet)``. + + See Also + -------- + det + + Notes + ----- + Broadcasting rules apply, see the `numpy.linalg` documentation for + details. + + The determinant is computed via LU factorization using the LAPACK + routine ``z/dgetrf``. + + Examples + -------- + The determinant of a 2-D array ``[[a, b], [c, d]]`` is ``ad - bc``: + + >>> import numpy as np + >>> a = np.array([[1, 2], [3, 4]]) + >>> (sign, logabsdet) = np.linalg.slogdet(a) + >>> (sign, logabsdet) + (-1, 0.69314718055994529) # may vary + >>> sign * np.exp(logabsdet) + -2.0 + + Computing log-determinants for a stack of matrices: + + >>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ]) + >>> a.shape + (3, 2, 2) + >>> sign, logabsdet = np.linalg.slogdet(a) + >>> (sign, logabsdet) + (array([-1., -1., -1.]), array([ 0.69314718, 1.09861229, 2.07944154])) + >>> sign * np.exp(logabsdet) + array([-2., -3., -8.]) + + This routine succeeds where ordinary `det` does not: + + >>> np.linalg.det(np.eye(500) * 0.1) + 0.0 + >>> np.linalg.slogdet(np.eye(500) * 0.1) + (1, -1151.2925464970228) + + """ + a = asarray(a) + _assert_stacked_square(a) + t, result_t = _commonType(a) + real_t = _realType(result_t) + signature = 'D->Dd' if isComplexType(t) else 'd->dd' + sign, logdet = _umath_linalg.slogdet(a, signature=signature) + sign = sign.astype(result_t, copy=False) + logdet = logdet.astype(real_t, copy=False) + return SlogdetResult(sign, logdet) + + +@array_function_dispatch(_unary_dispatcher) +def det(a): + """ + Compute the determinant of an array. + + Parameters + ---------- + a : (..., M, M) array_like + Input array to compute determinants for. + + Returns + ------- + det : (...) array_like + Determinant of `a`. + + See Also + -------- + slogdet : Another way to represent the determinant, more suitable + for large matrices where underflow/overflow may occur. + scipy.linalg.det : Similar function in SciPy. + + Notes + ----- + Broadcasting rules apply, see the `numpy.linalg` documentation for + details. + + The determinant is computed via LU factorization using the LAPACK + routine ``z/dgetrf``. + + Examples + -------- + The determinant of a 2-D array [[a, b], [c, d]] is ad - bc: + + >>> import numpy as np + >>> a = np.array([[1, 2], [3, 4]]) + >>> np.linalg.det(a) + -2.0 # may vary + + Computing determinants for a stack of matrices: + + >>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ]) + >>> a.shape + (3, 2, 2) + >>> np.linalg.det(a) + array([-2., -3., -8.]) + + """ + a = asarray(a) + _assert_stacked_square(a) + t, result_t = _commonType(a) + signature = 'D->D' if isComplexType(t) else 'd->d' + r = _umath_linalg.det(a, signature=signature) + r = r.astype(result_t, copy=False) + return r + + +# Linear Least Squares + +def _lstsq_dispatcher(a, b, rcond=None): + return (a, b) + + +@array_function_dispatch(_lstsq_dispatcher) +def lstsq(a, b, rcond=None): + r""" + Return the least-squares solution to a linear matrix equation. + + Computes the vector `x` that approximately solves the equation + ``a @ x = b``. The equation may be under-, well-, or over-determined + (i.e., the number of linearly independent rows of `a` can be less than, + equal to, or greater than its number of linearly independent columns). + If `a` is square and of full rank, then `x` (but for round-off error) + is the "exact" solution of the equation. Else, `x` minimizes the + Euclidean 2-norm :math:`||b - ax||`. If there are multiple minimizing + solutions, the one with the smallest 2-norm :math:`||x||` is returned. + + Parameters + ---------- + a : (M, N) array_like + "Coefficient" matrix. + b : {(M,), (M, K)} array_like + Ordinate or "dependent variable" values. If `b` is two-dimensional, + the least-squares solution is calculated for each of the `K` columns + of `b`. + rcond : float, optional + Cut-off ratio for small singular values of `a`. + For the purposes of rank determination, singular values are treated + as zero if they are smaller than `rcond` times the largest singular + value of `a`. + The default uses the machine precision times ``max(M, N)``. Passing + ``-1`` will use machine precision. + + .. versionchanged:: 2.0 + Previously, the default was ``-1``, but a warning was given that + this would change. + + Returns + ------- + x : {(N,), (N, K)} ndarray + Least-squares solution. If `b` is two-dimensional, + the solutions are in the `K` columns of `x`. + residuals : {(1,), (K,), (0,)} ndarray + Sums of squared residuals: Squared Euclidean 2-norm for each column in + ``b - a @ x``. + If the rank of `a` is < N or M <= N, this is an empty array. + If `b` is 1-dimensional, this is a (1,) shape array. + Otherwise the shape is (K,). + rank : int + Rank of matrix `a`. + s : (min(M, N),) ndarray + Singular values of `a`. + + Raises + ------ + LinAlgError + If computation does not converge. + + See Also + -------- + scipy.linalg.lstsq : Similar function in SciPy. + + Notes + ----- + If `b` is a matrix, then all array results are returned as matrices. + + Examples + -------- + Fit a line, ``y = mx + c``, through some noisy data-points: + + >>> import numpy as np + >>> x = np.array([0, 1, 2, 3]) + >>> y = np.array([-1, 0.2, 0.9, 2.1]) + + By examining the coefficients, we see that the line should have a + gradient of roughly 1 and cut the y-axis at, more or less, -1. + + We can rewrite the line equation as ``y = Ap``, where ``A = [[x 1]]`` + and ``p = [[m], [c]]``. Now use `lstsq` to solve for `p`: + + >>> A = np.vstack([x, np.ones(len(x))]).T + >>> A + array([[ 0., 1.], + [ 1., 1.], + [ 2., 1.], + [ 3., 1.]]) + + >>> m, c = np.linalg.lstsq(A, y)[0] + >>> m, c + (1.0 -0.95) # may vary + + Plot the data along with the fitted line: + + >>> import matplotlib.pyplot as plt + >>> _ = plt.plot(x, y, 'o', label='Original data', markersize=10) + >>> _ = plt.plot(x, m*x + c, 'r', label='Fitted line') + >>> _ = plt.legend() + >>> plt.show() + + """ + a, _ = _makearray(a) + b, wrap = _makearray(b) + is_1d = b.ndim == 1 + if is_1d: + b = b[:, newaxis] + _assert_2d(a, b) + m, n = a.shape[-2:] + m2, n_rhs = b.shape[-2:] + if m != m2: + raise LinAlgError('Incompatible dimensions') + + t, result_t = _commonType(a, b) + result_real_t = _realType(result_t) + + if rcond is None: + rcond = finfo(t).eps * max(n, m) + + signature = 'DDd->Ddid' if isComplexType(t) else 'ddd->ddid' + if n_rhs == 0: + # lapack can't handle n_rhs = 0 - so allocate + # the array one larger in that axis + b = zeros(b.shape[:-2] + (m, n_rhs + 1), dtype=b.dtype) + + with errstate(call=_raise_linalgerror_lstsq, invalid='call', + over='ignore', divide='ignore', under='ignore'): + x, resids, rank, s = _umath_linalg.lstsq(a, b, rcond, + signature=signature) + if m == 0: + x[...] = 0 + if n_rhs == 0: + # remove the item we added + x = x[..., :n_rhs] + resids = resids[..., :n_rhs] + + # remove the axis we added + if is_1d: + x = x.squeeze(axis=-1) + # we probably should squeeze resids too, but we can't + # without breaking compatibility. + + # as documented + if rank != n or m <= n: + resids = array([], result_real_t) + + # coerce output arrays + s = s.astype(result_real_t, copy=False) + resids = resids.astype(result_real_t, copy=False) + # Copying lets the memory in r_parts be freed + x = x.astype(result_t, copy=True) + return wrap(x), wrap(resids), rank, s + + +def _multi_svd_norm(x, row_axis, col_axis, op, initial=None): + """Compute a function of the singular values of the 2-D matrices in `x`. + + This is a private utility function used by `numpy.linalg.norm()`. + + Parameters + ---------- + x : ndarray + row_axis, col_axis : int + The axes of `x` that hold the 2-D matrices. + op : callable + This should be either numpy.amin or `numpy.amax` or `numpy.sum`. + + Returns + ------- + result : float or ndarray + If `x` is 2-D, the return values is a float. + Otherwise, it is an array with ``x.ndim - 2`` dimensions. + The return values are either the minimum or maximum or sum of the + singular values of the matrices, depending on whether `op` + is `numpy.amin` or `numpy.amax` or `numpy.sum`. + + """ + y = moveaxis(x, (row_axis, col_axis), (-2, -1)) + result = op(svd(y, compute_uv=False), axis=-1, initial=initial) + return result + + +def _norm_dispatcher(x, ord=None, axis=None, keepdims=None): + return (x,) + + +@array_function_dispatch(_norm_dispatcher) +def norm(x, ord=None, axis=None, keepdims=False): + """ + Matrix or vector norm. + + This function is able to return one of eight different matrix norms, + or one of an infinite number of vector norms (described below), depending + on the value of the ``ord`` parameter. + + Parameters + ---------- + x : array_like + Input array. If `axis` is None, `x` must be 1-D or 2-D, unless `ord` + is None. If both `axis` and `ord` are None, the 2-norm of + ``x.ravel`` will be returned. + ord : {int, float, inf, -inf, 'fro', 'nuc'}, optional + Order of the norm (see table under ``Notes`` for what values are + supported for matrices and vectors respectively). inf means numpy's + `inf` object. The default is None. + axis : {None, int, 2-tuple of ints}, optional. + If `axis` is an integer, it specifies the axis of `x` along which to + compute the vector norms. If `axis` is a 2-tuple, it specifies the + axes that hold 2-D matrices, and the matrix norms of these matrices + are computed. If `axis` is None then either a vector norm (when `x` + is 1-D) or a matrix norm (when `x` is 2-D) is returned. The default + is None. + + keepdims : bool, optional + If this is set to True, the axes which are normed over are left in the + result as dimensions with size one. With this option the result will + broadcast correctly against the original `x`. + + Returns + ------- + n : float or ndarray + Norm of the matrix or vector(s). + + See Also + -------- + scipy.linalg.norm : Similar function in SciPy. + + Notes + ----- + For values of ``ord < 1``, the result is, strictly speaking, not a + mathematical 'norm', but it may still be useful for various numerical + purposes. + + The following norms can be calculated: + + ===== ============================ ========================== + ord norm for matrices norm for vectors + ===== ============================ ========================== + None Frobenius norm 2-norm + 'fro' Frobenius norm -- + 'nuc' nuclear norm -- + inf max(sum(abs(x), axis=1)) max(abs(x)) + -inf min(sum(abs(x), axis=1)) min(abs(x)) + 0 -- sum(x != 0) + 1 max(sum(abs(x), axis=0)) as below + -1 min(sum(abs(x), axis=0)) as below + 2 2-norm (largest sing. value) as below + -2 smallest singular value as below + other -- sum(abs(x)**ord)**(1./ord) + ===== ============================ ========================== + + The Frobenius norm is given by [1]_: + + :math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}` + + The nuclear norm is the sum of the singular values. + + Both the Frobenius and nuclear norm orders are only defined for + matrices and raise a ValueError when ``x.ndim != 2``. + + References + ---------- + .. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*, + Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15 + + Examples + -------- + + >>> import numpy as np + >>> from numpy import linalg as LA + >>> a = np.arange(9) - 4 + >>> a + array([-4, -3, -2, ..., 2, 3, 4]) + >>> b = a.reshape((3, 3)) + >>> b + array([[-4, -3, -2], + [-1, 0, 1], + [ 2, 3, 4]]) + + >>> LA.norm(a) + 7.745966692414834 + >>> LA.norm(b) + 7.745966692414834 + >>> LA.norm(b, 'fro') + 7.745966692414834 + >>> LA.norm(a, np.inf) + 4.0 + >>> LA.norm(b, np.inf) + 9.0 + >>> LA.norm(a, -np.inf) + 0.0 + >>> LA.norm(b, -np.inf) + 2.0 + + >>> LA.norm(a, 1) + 20.0 + >>> LA.norm(b, 1) + 7.0 + >>> LA.norm(a, -1) + -4.6566128774142013e-010 + >>> LA.norm(b, -1) + 6.0 + >>> LA.norm(a, 2) + 7.745966692414834 + >>> LA.norm(b, 2) + 7.3484692283495345 + + >>> LA.norm(a, -2) + 0.0 + >>> LA.norm(b, -2) + 1.8570331885190563e-016 # may vary + >>> LA.norm(a, 3) + 5.8480354764257312 # may vary + >>> LA.norm(a, -3) + 0.0 + + Using the `axis` argument to compute vector norms: + + >>> c = np.array([[ 1, 2, 3], + ... [-1, 1, 4]]) + >>> LA.norm(c, axis=0) + array([ 1.41421356, 2.23606798, 5. ]) + >>> LA.norm(c, axis=1) + array([ 3.74165739, 4.24264069]) + >>> LA.norm(c, ord=1, axis=1) + array([ 6., 6.]) + + Using the `axis` argument to compute matrix norms: + + >>> m = np.arange(8).reshape(2,2,2) + >>> LA.norm(m, axis=(1,2)) + array([ 3.74165739, 11.22497216]) + >>> LA.norm(m[0, :, :]), LA.norm(m[1, :, :]) + (3.7416573867739413, 11.224972160321824) + + """ + x = asarray(x) + + if not issubclass(x.dtype.type, (inexact, object_)): + x = x.astype(float) + + # Immediately handle some default, simple, fast, and common cases. + if axis is None: + ndim = x.ndim + if ( + (ord is None) or + (ord in ('f', 'fro') and ndim == 2) or + (ord == 2 and ndim == 1) + ): + x = x.ravel(order='K') + if isComplexType(x.dtype.type): + x_real = x.real + x_imag = x.imag + sqnorm = x_real.dot(x_real) + x_imag.dot(x_imag) + else: + sqnorm = x.dot(x) + ret = sqrt(sqnorm) + if keepdims: + ret = ret.reshape(ndim * [1]) + return ret + + # Normalize the `axis` argument to a tuple. + nd = x.ndim + if axis is None: + axis = tuple(range(nd)) + elif not isinstance(axis, tuple): + try: + axis = int(axis) + except Exception as e: + raise TypeError( + "'axis' must be None, an integer or a tuple of integers" + ) from e + axis = (axis,) + + if len(axis) == 1: + if ord == inf: + return abs(x).max(axis=axis, keepdims=keepdims, initial=0) + elif ord == -inf: + return abs(x).min(axis=axis, keepdims=keepdims) + elif ord == 0: + # Zero norm + return ( + (x != 0) + .astype(x.real.dtype) + .sum(axis=axis, keepdims=keepdims) + ) + elif ord == 1: + # special case for speedup + return add.reduce(abs(x), axis=axis, keepdims=keepdims) + elif ord is None or ord == 2: + # special case for speedup + s = (x.conj() * x).real + return sqrt(add.reduce(s, axis=axis, keepdims=keepdims)) + # None of the str-type keywords for ord ('fro', 'nuc') + # are valid for vectors + elif isinstance(ord, str): + raise ValueError(f"Invalid norm order '{ord}' for vectors") + else: + absx = abs(x) + absx **= ord + ret = add.reduce(absx, axis=axis, keepdims=keepdims) + ret **= reciprocal(ord, dtype=ret.dtype) + return ret + elif len(axis) == 2: + row_axis, col_axis = axis + row_axis = normalize_axis_index(row_axis, nd) + col_axis = normalize_axis_index(col_axis, nd) + if row_axis == col_axis: + raise ValueError('Duplicate axes given.') + if ord == 2: + ret = _multi_svd_norm(x, row_axis, col_axis, amax, 0) + elif ord == -2: + ret = _multi_svd_norm(x, row_axis, col_axis, amin) + elif ord == 1: + if col_axis > row_axis: + col_axis -= 1 + ret = add.reduce(abs(x), axis=row_axis).max(axis=col_axis, initial=0) + elif ord == inf: + if row_axis > col_axis: + row_axis -= 1 + ret = add.reduce(abs(x), axis=col_axis).max(axis=row_axis, initial=0) + elif ord == -1: + if col_axis > row_axis: + col_axis -= 1 + ret = add.reduce(abs(x), axis=row_axis).min(axis=col_axis) + elif ord == -inf: + if row_axis > col_axis: + row_axis -= 1 + ret = add.reduce(abs(x), axis=col_axis).min(axis=row_axis) + elif ord in [None, 'fro', 'f']: + ret = sqrt(add.reduce((x.conj() * x).real, axis=axis)) + elif ord == 'nuc': + ret = _multi_svd_norm(x, row_axis, col_axis, sum, 0) + else: + raise ValueError("Invalid norm order for matrices.") + if keepdims: + ret_shape = list(x.shape) + ret_shape[axis[0]] = 1 + ret_shape[axis[1]] = 1 + ret = ret.reshape(ret_shape) + return ret + else: + raise ValueError("Improper number of dimensions to norm.") + + +# multi_dot + +def _multidot_dispatcher(arrays, *, out=None): + yield from arrays + yield out + + +@array_function_dispatch(_multidot_dispatcher) +def multi_dot(arrays, *, out=None): + """ + Compute the dot product of two or more arrays in a single function call, + while automatically selecting the fastest evaluation order. + + `multi_dot` chains `numpy.dot` and uses optimal parenthesization + of the matrices [1]_ [2]_. Depending on the shapes of the matrices, + this can speed up the multiplication a lot. + + If the first argument is 1-D it is treated as a row vector. + If the last argument is 1-D it is treated as a column vector. + The other arguments must be 2-D. + + Think of `multi_dot` as:: + + def multi_dot(arrays): return functools.reduce(np.dot, arrays) + + + Parameters + ---------- + arrays : sequence of array_like + If the first argument is 1-D it is treated as row vector. + If the last argument is 1-D it is treated as column vector. + The other arguments must be 2-D. + out : ndarray, optional + Output argument. This must have the exact kind that would be returned + if it was not used. In particular, it must have the right type, must be + C-contiguous, and its dtype must be the dtype that would be returned + for `dot(a, b)`. This is a performance feature. Therefore, if these + conditions are not met, an exception is raised, instead of attempting + to be flexible. + + Returns + ------- + output : ndarray + Returns the dot product of the supplied arrays. + + See Also + -------- + numpy.dot : dot multiplication with two arguments. + + References + ---------- + + .. [1] Cormen, "Introduction to Algorithms", Chapter 15.2, p. 370-378 + .. [2] https://en.wikipedia.org/wiki/Matrix_chain_multiplication + + Examples + -------- + `multi_dot` allows you to write:: + + >>> import numpy as np + >>> from numpy.linalg import multi_dot + >>> # Prepare some data + >>> A = np.random.random((10000, 100)) + >>> B = np.random.random((100, 1000)) + >>> C = np.random.random((1000, 5)) + >>> D = np.random.random((5, 333)) + >>> # the actual dot multiplication + >>> _ = multi_dot([A, B, C, D]) + + instead of:: + + >>> _ = np.dot(np.dot(np.dot(A, B), C), D) + >>> # or + >>> _ = A.dot(B).dot(C).dot(D) + + Notes + ----- + The cost for a matrix multiplication can be calculated with the + following function:: + + def cost(A, B): + return A.shape[0] * A.shape[1] * B.shape[1] + + Assume we have three matrices + :math:`A_{10 \\times 100}, B_{100 \\times 5}, C_{5 \\times 50}`. + + The costs for the two different parenthesizations are as follows:: + + cost((AB)C) = 10*100*5 + 10*5*50 = 5000 + 2500 = 7500 + cost(A(BC)) = 10*100*50 + 100*5*50 = 50000 + 25000 = 75000 + + """ + n = len(arrays) + # optimization only makes sense for len(arrays) > 2 + if n < 2: + raise ValueError("Expecting at least two arrays.") + elif n == 2: + return dot(arrays[0], arrays[1], out=out) + + arrays = [asanyarray(a) for a in arrays] + + # save original ndim to reshape the result array into the proper form later + ndim_first, ndim_last = arrays[0].ndim, arrays[-1].ndim + # Explicitly convert vectors to 2D arrays to keep the logic of the internal + # _multi_dot_* functions as simple as possible. + if arrays[0].ndim == 1: + arrays[0] = atleast_2d(arrays[0]) + if arrays[-1].ndim == 1: + arrays[-1] = atleast_2d(arrays[-1]).T + _assert_2d(*arrays) + + # _multi_dot_three is much faster than _multi_dot_matrix_chain_order + if n == 3: + result = _multi_dot_three(arrays[0], arrays[1], arrays[2], out=out) + else: + order = _multi_dot_matrix_chain_order(arrays) + result = _multi_dot(arrays, order, 0, n - 1, out=out) + + # return proper shape + if ndim_first == 1 and ndim_last == 1: + return result[0, 0] # scalar + elif ndim_first == 1 or ndim_last == 1: + return result.ravel() # 1-D + else: + return result + + +def _multi_dot_three(A, B, C, out=None): + """ + Find the best order for three arrays and do the multiplication. + + For three arguments `_multi_dot_three` is approximately 15 times faster + than `_multi_dot_matrix_chain_order` + + """ + a0, a1b0 = A.shape + b1c0, c1 = C.shape + # cost1 = cost((AB)C) = a0*a1b0*b1c0 + a0*b1c0*c1 + cost1 = a0 * b1c0 * (a1b0 + c1) + # cost2 = cost(A(BC)) = a1b0*b1c0*c1 + a0*a1b0*c1 + cost2 = a1b0 * c1 * (a0 + b1c0) + + if cost1 < cost2: + return dot(dot(A, B), C, out=out) + else: + return dot(A, dot(B, C), out=out) + + +def _multi_dot_matrix_chain_order(arrays, return_costs=False): + """ + Return a np.array that encodes the optimal order of multiplications. + + The optimal order array is then used by `_multi_dot()` to do the + multiplication. + + Also return the cost matrix if `return_costs` is `True` + + The implementation CLOSELY follows Cormen, "Introduction to Algorithms", + Chapter 15.2, p. 370-378. Note that Cormen uses 1-based indices. + + cost[i, j] = min([ + cost[prefix] + cost[suffix] + cost_mult(prefix, suffix) + for k in range(i, j)]) + + """ + n = len(arrays) + # p stores the dimensions of the matrices + # Example for p: A_{10x100}, B_{100x5}, C_{5x50} --> p = [10, 100, 5, 50] + p = [a.shape[0] for a in arrays] + [arrays[-1].shape[1]] + # m is a matrix of costs of the subproblems + # m[i,j]: min number of scalar multiplications needed to compute A_{i..j} + m = zeros((n, n), dtype=double) + # s is the actual ordering + # s[i, j] is the value of k at which we split the product A_i..A_j + s = empty((n, n), dtype=intp) + + for l in range(1, n): + for i in range(n - l): + j = i + l + m[i, j] = inf + for k in range(i, j): + q = m[i, k] + m[k + 1, j] + p[i] * p[k + 1] * p[j + 1] + if q < m[i, j]: + m[i, j] = q + s[i, j] = k # Note that Cormen uses 1-based index + + return (s, m) if return_costs else s + + +def _multi_dot(arrays, order, i, j, out=None): + """Actually do the multiplication with the given order.""" + if i == j: + # the initial call with non-None out should never get here + assert out is None + + return arrays[i] + else: + return dot(_multi_dot(arrays, order, i, order[i, j]), + _multi_dot(arrays, order, order[i, j] + 1, j), + out=out) + + +# diagonal + +def _diagonal_dispatcher(x, /, *, offset=None): + return (x,) + + +@array_function_dispatch(_diagonal_dispatcher) +def diagonal(x, /, *, offset=0): + """ + Returns specified diagonals of a matrix (or a stack of matrices) ``x``. + + This function is Array API compatible, contrary to + :py:func:`numpy.diagonal`, the matrix is assumed + to be defined by the last two dimensions. + + Parameters + ---------- + x : (...,M,N) array_like + Input array having shape (..., M, N) and whose innermost two + dimensions form MxN matrices. + offset : int, optional + Offset specifying the off-diagonal relative to the main diagonal, + where:: + + * offset = 0: the main diagonal. + * offset > 0: off-diagonal above the main diagonal. + * offset < 0: off-diagonal below the main diagonal. + + Returns + ------- + out : (...,min(N,M)) ndarray + An array containing the diagonals and whose shape is determined by + removing the last two dimensions and appending a dimension equal to + the size of the resulting diagonals. The returned array must have + the same data type as ``x``. + + See Also + -------- + numpy.diagonal + + Examples + -------- + >>> a = np.arange(4).reshape(2, 2); a + array([[0, 1], + [2, 3]]) + >>> np.linalg.diagonal(a) + array([0, 3]) + + A 3-D example: + + >>> a = np.arange(8).reshape(2, 2, 2); a + array([[[0, 1], + [2, 3]], + [[4, 5], + [6, 7]]]) + >>> np.linalg.diagonal(a) + array([[0, 3], + [4, 7]]) + + Diagonals adjacent to the main diagonal can be obtained by using the + `offset` argument: + + >>> a = np.arange(9).reshape(3, 3) + >>> a + array([[0, 1, 2], + [3, 4, 5], + [6, 7, 8]]) + >>> np.linalg.diagonal(a, offset=1) # First superdiagonal + array([1, 5]) + >>> np.linalg.diagonal(a, offset=2) # Second superdiagonal + array([2]) + >>> np.linalg.diagonal(a, offset=-1) # First subdiagonal + array([3, 7]) + >>> np.linalg.diagonal(a, offset=-2) # Second subdiagonal + array([6]) + + The anti-diagonal can be obtained by reversing the order of elements + using either `numpy.flipud` or `numpy.fliplr`. + + >>> a = np.arange(9).reshape(3, 3) + >>> a + array([[0, 1, 2], + [3, 4, 5], + [6, 7, 8]]) + >>> np.linalg.diagonal(np.fliplr(a)) # Horizontal flip + array([2, 4, 6]) + >>> np.linalg.diagonal(np.flipud(a)) # Vertical flip + array([6, 4, 2]) + + Note that the order in which the diagonal is retrieved varies depending + on the flip function. + + """ + return _core_diagonal(x, offset, axis1=-2, axis2=-1) + + +# trace + +def _trace_dispatcher(x, /, *, offset=None, dtype=None): + return (x,) + + +@array_function_dispatch(_trace_dispatcher) +def trace(x, /, *, offset=0, dtype=None): + """ + Returns the sum along the specified diagonals of a matrix + (or a stack of matrices) ``x``. + + This function is Array API compatible, contrary to + :py:func:`numpy.trace`. + + Parameters + ---------- + x : (...,M,N) array_like + Input array having shape (..., M, N) and whose innermost two + dimensions form MxN matrices. + offset : int, optional + Offset specifying the off-diagonal relative to the main diagonal, + where:: + + * offset = 0: the main diagonal. + * offset > 0: off-diagonal above the main diagonal. + * offset < 0: off-diagonal below the main diagonal. + + dtype : dtype, optional + Data type of the returned array. + + Returns + ------- + out : ndarray + An array containing the traces and whose shape is determined by + removing the last two dimensions and storing the traces in the last + array dimension. For example, if x has rank k and shape: + (I, J, K, ..., L, M, N), then an output array has rank k-2 and shape: + (I, J, K, ..., L) where:: + + out[i, j, k, ..., l] = trace(a[i, j, k, ..., l, :, :]) + + The returned array must have a data type as described by the dtype + parameter above. + + See Also + -------- + numpy.trace + + Examples + -------- + >>> np.linalg.trace(np.eye(3)) + 3.0 + >>> a = np.arange(8).reshape((2, 2, 2)) + >>> np.linalg.trace(a) + array([3, 11]) + + Trace is computed with the last two axes as the 2-d sub-arrays. + This behavior differs from :py:func:`numpy.trace` which uses the first two + axes by default. + + >>> a = np.arange(24).reshape((3, 2, 2, 2)) + >>> np.linalg.trace(a).shape + (3, 2) + + Traces adjacent to the main diagonal can be obtained by using the + `offset` argument: + + >>> a = np.arange(9).reshape((3, 3)); a + array([[0, 1, 2], + [3, 4, 5], + [6, 7, 8]]) + >>> np.linalg.trace(a, offset=1) # First superdiagonal + 6 + >>> np.linalg.trace(a, offset=2) # Second superdiagonal + 2 + >>> np.linalg.trace(a, offset=-1) # First subdiagonal + 10 + >>> np.linalg.trace(a, offset=-2) # Second subdiagonal + 6 + + """ + return _core_trace(x, offset, axis1=-2, axis2=-1, dtype=dtype) + + +# cross + +def _cross_dispatcher(x1, x2, /, *, axis=None): + return (x1, x2,) + + +@array_function_dispatch(_cross_dispatcher) +def cross(x1, x2, /, *, axis=-1): + """ + Returns the cross product of 3-element vectors. + + If ``x1`` and/or ``x2`` are multi-dimensional arrays, then + the cross-product of each pair of corresponding 3-element vectors + is independently computed. + + This function is Array API compatible, contrary to + :func:`numpy.cross`. + + Parameters + ---------- + x1 : array_like + The first input array. + x2 : array_like + The second input array. Must be compatible with ``x1`` for all + non-compute axes. The size of the axis over which to compute + the cross-product must be the same size as the respective axis + in ``x1``. + axis : int, optional + The axis (dimension) of ``x1`` and ``x2`` containing the vectors for + which to compute the cross-product. Default: ``-1``. + + Returns + ------- + out : ndarray + An array containing the cross products. + + See Also + -------- + numpy.cross + + Examples + -------- + Vector cross-product. + + >>> x = np.array([1, 2, 3]) + >>> y = np.array([4, 5, 6]) + >>> np.linalg.cross(x, y) + array([-3, 6, -3]) + + Multiple vector cross-products. Note that the direction of the cross + product vector is defined by the *right-hand rule*. + + >>> x = np.array([[1,2,3], [4,5,6]]) + >>> y = np.array([[4,5,6], [1,2,3]]) + >>> np.linalg.cross(x, y) + array([[-3, 6, -3], + [ 3, -6, 3]]) + + >>> x = np.array([[1, 2], [3, 4], [5, 6]]) + >>> y = np.array([[4, 5], [6, 1], [2, 3]]) + >>> np.linalg.cross(x, y, axis=0) + array([[-24, 6], + [ 18, 24], + [-6, -18]]) + + """ + x1 = asanyarray(x1) + x2 = asanyarray(x2) + + if x1.shape[axis] != 3 or x2.shape[axis] != 3: + raise ValueError( + "Both input arrays must be (arrays of) 3-dimensional vectors, " + f"but they are {x1.shape[axis]} and {x2.shape[axis]} " + "dimensional instead." + ) + + return _core_cross(x1, x2, axis=axis) + + +# matmul + +def _matmul_dispatcher(x1, x2, /): + return (x1, x2) + + +@array_function_dispatch(_matmul_dispatcher) +def matmul(x1, x2, /): + """ + Computes the matrix product. + + This function is Array API compatible, contrary to + :func:`numpy.matmul`. + + Parameters + ---------- + x1 : array_like + The first input array. + x2 : array_like + The second input array. + + Returns + ------- + out : ndarray + The matrix product of the inputs. + This is a scalar only when both ``x1``, ``x2`` are 1-d vectors. + + Raises + ------ + ValueError + If the last dimension of ``x1`` is not the same size as + the second-to-last dimension of ``x2``. + + If a scalar value is passed in. + + See Also + -------- + numpy.matmul + + Examples + -------- + For 2-D arrays it is the matrix product: + + >>> a = np.array([[1, 0], + ... [0, 1]]) + >>> b = np.array([[4, 1], + ... [2, 2]]) + >>> np.linalg.matmul(a, b) + array([[4, 1], + [2, 2]]) + + For 2-D mixed with 1-D, the result is the usual. + + >>> a = np.array([[1, 0], + ... [0, 1]]) + >>> b = np.array([1, 2]) + >>> np.linalg.matmul(a, b) + array([1, 2]) + >>> np.linalg.matmul(b, a) + array([1, 2]) + + + Broadcasting is conventional for stacks of arrays + + >>> a = np.arange(2 * 2 * 4).reshape((2, 2, 4)) + >>> b = np.arange(2 * 2 * 4).reshape((2, 4, 2)) + >>> np.linalg.matmul(a,b).shape + (2, 2, 2) + >>> np.linalg.matmul(a, b)[0, 1, 1] + 98 + >>> sum(a[0, 1, :] * b[0 , :, 1]) + 98 + + Vector, vector returns the scalar inner product, but neither argument + is complex-conjugated: + + >>> np.linalg.matmul([2j, 3j], [2j, 3j]) + (-13+0j) + + Scalar multiplication raises an error. + + >>> np.linalg.matmul([1,2], 3) + Traceback (most recent call last): + ... + ValueError: matmul: Input operand 1 does not have enough dimensions ... + + """ + return _core_matmul(x1, x2) + + +# tensordot + +def _tensordot_dispatcher(x1, x2, /, *, axes=None): + return (x1, x2) + + +@array_function_dispatch(_tensordot_dispatcher) +def tensordot(x1, x2, /, *, axes=2): + return _core_tensordot(x1, x2, axes=axes) + + +tensordot.__doc__ = _core_tensordot.__doc__ + + +# matrix_transpose + +def _matrix_transpose_dispatcher(x): + return (x,) + +@array_function_dispatch(_matrix_transpose_dispatcher) +def matrix_transpose(x, /): + return _core_matrix_transpose(x) + + +matrix_transpose.__doc__ = f"""{_core_matrix_transpose.__doc__} + + Notes + ----- + This function is an alias of `numpy.matrix_transpose`. +""" + + +# matrix_norm + +def _matrix_norm_dispatcher(x, /, *, keepdims=None, ord=None): + return (x,) + +@array_function_dispatch(_matrix_norm_dispatcher) +def matrix_norm(x, /, *, keepdims=False, ord="fro"): + """ + Computes the matrix norm of a matrix (or a stack of matrices) ``x``. + + This function is Array API compatible. + + Parameters + ---------- + x : array_like + Input array having shape (..., M, N) and whose two innermost + dimensions form ``MxN`` matrices. + keepdims : bool, optional + If this is set to True, the axes which are normed over are left in + the result as dimensions with size one. Default: False. + ord : {1, -1, 2, -2, inf, -inf, 'fro', 'nuc'}, optional + The order of the norm. For details see the table under ``Notes`` + in `numpy.linalg.norm`. + + See Also + -------- + numpy.linalg.norm : Generic norm function + + Examples + -------- + >>> from numpy import linalg as LA + >>> a = np.arange(9) - 4 + >>> a + array([-4, -3, -2, ..., 2, 3, 4]) + >>> b = a.reshape((3, 3)) + >>> b + array([[-4, -3, -2], + [-1, 0, 1], + [ 2, 3, 4]]) + + >>> LA.matrix_norm(b) + 7.745966692414834 + >>> LA.matrix_norm(b, ord='fro') + 7.745966692414834 + >>> LA.matrix_norm(b, ord=np.inf) + 9.0 + >>> LA.matrix_norm(b, ord=-np.inf) + 2.0 + + >>> LA.matrix_norm(b, ord=1) + 7.0 + >>> LA.matrix_norm(b, ord=-1) + 6.0 + >>> LA.matrix_norm(b, ord=2) + 7.3484692283495345 + >>> LA.matrix_norm(b, ord=-2) + 1.8570331885190563e-016 # may vary + + """ + x = asanyarray(x) + return norm(x, axis=(-2, -1), keepdims=keepdims, ord=ord) + + +# vector_norm + +def _vector_norm_dispatcher(x, /, *, axis=None, keepdims=None, ord=None): + return (x,) + +@array_function_dispatch(_vector_norm_dispatcher) +def vector_norm(x, /, *, axis=None, keepdims=False, ord=2): + """ + Computes the vector norm of a vector (or batch of vectors) ``x``. + + This function is Array API compatible. + + Parameters + ---------- + x : array_like + Input array. + axis : {None, int, 2-tuple of ints}, optional + If an integer, ``axis`` specifies the axis (dimension) along which + to compute vector norms. If an n-tuple, ``axis`` specifies the axes + (dimensions) along which to compute batched vector norms. If ``None``, + the vector norm must be computed over all array values (i.e., + equivalent to computing the vector norm of a flattened array). + Default: ``None``. + keepdims : bool, optional + If this is set to True, the axes which are normed over are left in + the result as dimensions with size one. Default: False. + ord : {int, float, inf, -inf}, optional + The order of the norm. For details see the table under ``Notes`` + in `numpy.linalg.norm`. + + See Also + -------- + numpy.linalg.norm : Generic norm function + + Examples + -------- + >>> from numpy import linalg as LA + >>> a = np.arange(9) + 1 + >>> a + array([1, 2, 3, 4, 5, 6, 7, 8, 9]) + >>> b = a.reshape((3, 3)) + >>> b + array([[1, 2, 3], + [4, 5, 6], + [7, 8, 9]]) + + >>> LA.vector_norm(b) + 16.881943016134134 + >>> LA.vector_norm(b, ord=np.inf) + 9.0 + >>> LA.vector_norm(b, ord=-np.inf) + 1.0 + + >>> LA.vector_norm(b, ord=0) + 9.0 + >>> LA.vector_norm(b, ord=1) + 45.0 + >>> LA.vector_norm(b, ord=-1) + 0.3534857623790153 + >>> LA.vector_norm(b, ord=2) + 16.881943016134134 + >>> LA.vector_norm(b, ord=-2) + 0.8058837395885292 + + """ + x = asanyarray(x) + shape = list(x.shape) + if axis is None: + # Note: np.linalg.norm() doesn't handle 0-D arrays + x = x.ravel() + _axis = 0 + elif isinstance(axis, tuple): + # Note: The axis argument supports any number of axes, whereas + # np.linalg.norm() only supports a single axis for vector norm. + normalized_axis = normalize_axis_tuple(axis, x.ndim) + rest = tuple(i for i in range(x.ndim) if i not in normalized_axis) + newshape = axis + rest + x = _core_transpose(x, newshape).reshape( + ( + prod([x.shape[i] for i in axis], dtype=int), + *[x.shape[i] for i in rest] + ) + ) + _axis = 0 + else: + _axis = axis + + res = norm(x, axis=_axis, ord=ord) + + if keepdims: + # We can't reuse np.linalg.norm(keepdims) because of the reshape hacks + # above to avoid matrix norm logic. + _axis = normalize_axis_tuple( + range(len(shape)) if axis is None else axis, len(shape) + ) + for i in _axis: + shape[i] = 1 + res = res.reshape(tuple(shape)) + + return res + + +# vecdot + +def _vecdot_dispatcher(x1, x2, /, *, axis=None): + return (x1, x2) + +@array_function_dispatch(_vecdot_dispatcher) +def vecdot(x1, x2, /, *, axis=-1): + """ + Computes the vector dot product. + + This function is restricted to arguments compatible with the Array API, + contrary to :func:`numpy.vecdot`. + + Let :math:`\\mathbf{a}` be a vector in ``x1`` and :math:`\\mathbf{b}` be + a corresponding vector in ``x2``. The dot product is defined as: + + .. math:: + \\mathbf{a} \\cdot \\mathbf{b} = \\sum_{i=0}^{n-1} \\overline{a_i}b_i + + over the dimension specified by ``axis`` and where :math:`\\overline{a_i}` + denotes the complex conjugate if :math:`a_i` is complex and the identity + otherwise. + + Parameters + ---------- + x1 : array_like + First input array. + x2 : array_like + Second input array. + axis : int, optional + Axis over which to compute the dot product. Default: ``-1``. + + Returns + ------- + output : ndarray + The vector dot product of the input. + + See Also + -------- + numpy.vecdot + + Examples + -------- + Get the projected size along a given normal for an array of vectors. + + >>> v = np.array([[0., 5., 0.], [0., 0., 10.], [0., 6., 8.]]) + >>> n = np.array([0., 0.6, 0.8]) + >>> np.linalg.vecdot(v, n) + array([ 3., 8., 10.]) + + """ + return _core_vecdot(x1, x2, axis=axis) diff --git a/local_packages/numpy/linalg/_linalg.pyi b/local_packages/numpy/linalg/_linalg.pyi new file mode 100644 index 0000000..60320b0 --- /dev/null +++ b/local_packages/numpy/linalg/_linalg.pyi @@ -0,0 +1,548 @@ +from collections.abc import Iterable +from typing import ( + Any, + Literal as L, + NamedTuple, + Never, + SupportsIndex, + SupportsInt, + TypeAlias, + TypeVar, + overload, +) + +import numpy as np +from numpy import ( + complex128, + complexfloating, + float64, + floating, + int32, + object_, + signedinteger, + timedelta64, + unsignedinteger, + vecdot, +) +from numpy._core.fromnumeric import matrix_transpose +from numpy._globals import _NoValueType +from numpy._typing import ( + ArrayLike, + DTypeLike, + NDArray, + _ArrayLike, + _ArrayLikeBool_co, + _ArrayLikeComplex_co, + _ArrayLikeFloat_co, + _ArrayLikeInt_co, + _ArrayLikeNumber_co, + _ArrayLikeObject_co, + _ArrayLikeTD64_co, + _ArrayLikeUInt_co, + _NestedSequence, + _ShapeLike, +) +from numpy.linalg import LinAlgError + +__all__ = [ + "matrix_power", + "solve", + "tensorsolve", + "tensorinv", + "inv", + "cholesky", + "eigvals", + "eigvalsh", + "pinv", + "slogdet", + "det", + "svd", + "svdvals", + "eig", + "eigh", + "lstsq", + "norm", + "qr", + "cond", + "matrix_rank", + "LinAlgError", + "multi_dot", + "trace", + "diagonal", + "cross", + "outer", + "tensordot", + "matmul", + "matrix_transpose", + "matrix_norm", + "vector_norm", + "vecdot", +] + +_NumberT = TypeVar("_NumberT", bound=np.number) +_NumericScalarT = TypeVar("_NumericScalarT", bound=np.number | np.timedelta64 | np.object_) + +_ModeKind: TypeAlias = L["reduced", "complete", "r", "raw"] + +### + +fortran_int = np.intc + +class EigResult(NamedTuple): + eigenvalues: NDArray[Any] + eigenvectors: NDArray[Any] + +class EighResult(NamedTuple): + eigenvalues: NDArray[Any] + eigenvectors: NDArray[Any] + +class QRResult(NamedTuple): + Q: NDArray[Any] + R: NDArray[Any] + +class SlogdetResult(NamedTuple): + # TODO: `sign` and `logabsdet` are scalars for input 2D arrays and + # a `(x.ndim - 2)`` dimensionl arrays otherwise + sign: Any + logabsdet: Any + +class SVDResult(NamedTuple): + U: NDArray[Any] + S: NDArray[Any] + Vh: NDArray[Any] + +@overload +def tensorsolve( + a: _ArrayLikeInt_co, + b: _ArrayLikeInt_co, + axes: Iterable[int] | None = None, +) -> NDArray[float64]: ... +@overload +def tensorsolve( + a: _ArrayLikeFloat_co, + b: _ArrayLikeFloat_co, + axes: Iterable[int] | None = None, +) -> NDArray[floating]: ... +@overload +def tensorsolve( + a: _ArrayLikeComplex_co, + b: _ArrayLikeComplex_co, + axes: Iterable[int] | None = None, +) -> NDArray[complexfloating]: ... + +@overload +def solve( + a: _ArrayLikeInt_co, + b: _ArrayLikeInt_co, +) -> NDArray[float64]: ... +@overload +def solve( + a: _ArrayLikeFloat_co, + b: _ArrayLikeFloat_co, +) -> NDArray[floating]: ... +@overload +def solve( + a: _ArrayLikeComplex_co, + b: _ArrayLikeComplex_co, +) -> NDArray[complexfloating]: ... + +@overload +def tensorinv( + a: _ArrayLikeInt_co, + ind: int = 2, +) -> NDArray[float64]: ... +@overload +def tensorinv( + a: _ArrayLikeFloat_co, + ind: int = 2, +) -> NDArray[floating]: ... +@overload +def tensorinv( + a: _ArrayLikeComplex_co, + ind: int = 2, +) -> NDArray[complexfloating]: ... + +@overload +def inv(a: _ArrayLikeInt_co) -> NDArray[float64]: ... +@overload +def inv(a: _ArrayLikeFloat_co) -> NDArray[floating]: ... +@overload +def inv(a: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ... + +# TODO: The supported input and output dtypes are dependent on the value of `n`. +# For example: `n < 0` always casts integer types to float64 +def matrix_power( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + n: SupportsIndex, +) -> NDArray[Any]: ... + +@overload +def cholesky(a: _ArrayLikeInt_co, /, *, upper: bool = False) -> NDArray[float64]: ... +@overload +def cholesky(a: _ArrayLikeFloat_co, /, *, upper: bool = False) -> NDArray[floating]: ... +@overload +def cholesky(a: _ArrayLikeComplex_co, /, *, upper: bool = False) -> NDArray[complexfloating]: ... + +@overload +def outer(x1: _ArrayLike[Never], x2: _ArrayLike[Never], /) -> NDArray[Any]: ... +@overload +def outer(x1: _ArrayLikeBool_co, x2: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... +@overload +def outer(x1: _ArrayLike[_NumberT], x2: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... +@overload +def outer(x1: _ArrayLikeUInt_co, x2: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... +@overload +def outer(x1: _ArrayLikeInt_co, x2: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... +@overload +def outer(x1: _ArrayLikeFloat_co, x2: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... +@overload +def outer(x1: _ArrayLikeComplex_co, x2: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... +@overload +def outer(x1: _ArrayLikeTD64_co, x2: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... +@overload +def outer(x1: _ArrayLikeObject_co, x2: _ArrayLikeObject_co, /) -> NDArray[object_]: ... +@overload +def outer( + x1: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, + x2: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, + /, +) -> NDArray[Any]: ... + +@overload +def qr(a: _ArrayLikeInt_co, mode: _ModeKind = "reduced") -> QRResult: ... +@overload +def qr(a: _ArrayLikeFloat_co, mode: _ModeKind = "reduced") -> QRResult: ... +@overload +def qr(a: _ArrayLikeComplex_co, mode: _ModeKind = "reduced") -> QRResult: ... + +@overload +def eigvals(a: _ArrayLikeInt_co) -> NDArray[float64] | NDArray[complex128]: ... +@overload +def eigvals(a: _ArrayLikeFloat_co) -> NDArray[floating] | NDArray[complexfloating]: ... +@overload +def eigvals(a: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ... + +@overload +def eigvalsh(a: _ArrayLikeInt_co, UPLO: L["L", "U", "l", "u"] = "L") -> NDArray[float64]: ... +@overload +def eigvalsh(a: _ArrayLikeComplex_co, UPLO: L["L", "U", "l", "u"] = "L") -> NDArray[floating]: ... + +@overload +def eig(a: _ArrayLikeInt_co) -> EigResult: ... +@overload +def eig(a: _ArrayLikeFloat_co) -> EigResult: ... +@overload +def eig(a: _ArrayLikeComplex_co) -> EigResult: ... + +@overload +def eigh( + a: _ArrayLikeInt_co, + UPLO: L["L", "U", "l", "u"] = "L", +) -> EighResult: ... +@overload +def eigh( + a: _ArrayLikeFloat_co, + UPLO: L["L", "U", "l", "u"] = "L", +) -> EighResult: ... +@overload +def eigh( + a: _ArrayLikeComplex_co, + UPLO: L["L", "U", "l", "u"] = "L", +) -> EighResult: ... + +@overload +def svd( + a: _ArrayLikeInt_co, + full_matrices: bool = True, + compute_uv: L[True] = True, + hermitian: bool = False, +) -> SVDResult: ... +@overload +def svd( + a: _ArrayLikeFloat_co, + full_matrices: bool = True, + compute_uv: L[True] = True, + hermitian: bool = False, +) -> SVDResult: ... +@overload +def svd( + a: _ArrayLikeComplex_co, + full_matrices: bool = True, + compute_uv: L[True] = True, + hermitian: bool = False, +) -> SVDResult: ... +@overload +def svd( + a: _ArrayLikeInt_co, + full_matrices: bool = True, + *, + compute_uv: L[False], + hermitian: bool = False, +) -> NDArray[float64]: ... +@overload +def svd( + a: _ArrayLikeInt_co, + full_matrices: bool, + compute_uv: L[False], + hermitian: bool = False, +) -> NDArray[float64]: ... +@overload +def svd( + a: _ArrayLikeComplex_co, + full_matrices: bool = True, + *, + compute_uv: L[False], + hermitian: bool = False, +) -> NDArray[floating]: ... +@overload +def svd( + a: _ArrayLikeComplex_co, + full_matrices: bool, + compute_uv: L[False], + hermitian: bool = False, +) -> NDArray[floating]: ... + +# the ignored `overload-overlap` mypy error below is a false-positive +@overload +def svdvals( # type: ignore[overload-overlap] + x: _ArrayLike[np.float64 | np.complex128 | np.integer | np.bool] | _NestedSequence[complex], / +) -> NDArray[np.float64]: ... +@overload +def svdvals(x: _ArrayLike[np.float32 | np.complex64], /) -> NDArray[np.float32]: ... +@overload +def svdvals(x: _ArrayLikeNumber_co, /) -> NDArray[floating]: ... + +# TODO: Returns a scalar for 2D arrays and +# a `(x.ndim - 2)`` dimensionl array otherwise +def cond(x: _ArrayLikeComplex_co, p: float | L["fro", "nuc"] | None = None) -> Any: ... + +# TODO: Returns `int` for <2D arrays and `intp` otherwise +def matrix_rank( + A: _ArrayLikeComplex_co, + tol: _ArrayLikeFloat_co | None = None, + hermitian: bool = False, + *, + rtol: _ArrayLikeFloat_co | None = None, +) -> Any: ... + +@overload +def pinv( + a: _ArrayLikeInt_co, + rcond: _ArrayLikeFloat_co | None = None, + hermitian: bool = False, + *, + rtol: _ArrayLikeFloat_co | _NoValueType = ..., +) -> NDArray[float64]: ... +@overload +def pinv( + a: _ArrayLikeFloat_co, + rcond: _ArrayLikeFloat_co | None = None, + hermitian: bool = False, + *, + rtol: _ArrayLikeFloat_co | _NoValueType = ..., +) -> NDArray[floating]: ... +@overload +def pinv( + a: _ArrayLikeComplex_co, + rcond: _ArrayLikeFloat_co | None = None, + hermitian: bool = False, + *, + rtol: _ArrayLikeFloat_co | _NoValueType = ..., +) -> NDArray[complexfloating]: ... + +# TODO: Returns a 2-tuple of scalars for 2D arrays and +# a 2-tuple of `(a.ndim - 2)`` dimensionl arrays otherwise +def slogdet(a: _ArrayLikeComplex_co) -> SlogdetResult: ... + +# TODO: Returns a 2-tuple of scalars for 2D arrays and +# a 2-tuple of `(a.ndim - 2)`` dimensionl arrays otherwise +def det(a: _ArrayLikeComplex_co) -> Any: ... + +@overload +def lstsq(a: _ArrayLikeInt_co, b: _ArrayLikeInt_co, rcond: float | None = None) -> tuple[ + NDArray[float64], + NDArray[float64], + int32, + NDArray[float64], +]: ... +@overload +def lstsq(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, rcond: float | None = None) -> tuple[ + NDArray[floating], + NDArray[floating], + int32, + NDArray[floating], +]: ... +@overload +def lstsq(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, rcond: float | None = None) -> tuple[ + NDArray[complexfloating], + NDArray[floating], + int32, + NDArray[floating], +]: ... + +@overload +def norm( + x: ArrayLike, + ord: float | L["fro", "nuc"] | None = None, + axis: None = None, + keepdims: L[False] = False, +) -> floating: ... +@overload +def norm( + x: ArrayLike, + ord: float | L["fro", "nuc"] | None, + axis: SupportsInt | SupportsIndex | tuple[int, ...] | None, + keepdims: bool = False, +) -> Any: ... +@overload +def norm( + x: ArrayLike, + ord: float | L["fro", "nuc"] | None = None, + *, + axis: SupportsInt | SupportsIndex | tuple[int, ...] | None, + keepdims: bool = False, +) -> Any: ... + +@overload +def matrix_norm( + x: ArrayLike, + /, + *, + ord: float | L["fro", "nuc"] | None = "fro", + keepdims: L[False] = False, +) -> floating: ... +@overload +def matrix_norm( + x: ArrayLike, + /, + *, + ord: float | L["fro", "nuc"] | None = "fro", + keepdims: bool = False, +) -> Any: ... + +@overload +def vector_norm( + x: ArrayLike, + /, + *, + axis: None = None, + ord: float | None = 2, + keepdims: L[False] = False, +) -> floating: ... +@overload +def vector_norm( + x: ArrayLike, + /, + *, + axis: SupportsInt | SupportsIndex | tuple[int, ...], + ord: float | None = 2, + keepdims: bool = False, +) -> Any: ... + +# keep in sync with numpy._core.numeric.tensordot (ignoring `/, *`) +@overload +def tensordot( + a: _ArrayLike[_NumericScalarT], + b: _ArrayLike[_NumericScalarT], + /, + *, + axes: int | tuple[_ShapeLike, _ShapeLike] = 2, +) -> NDArray[_NumericScalarT]: ... +@overload +def tensordot( + a: _ArrayLikeBool_co, + b: _ArrayLikeBool_co, + /, + *, + axes: int | tuple[_ShapeLike, _ShapeLike] = 2, +) -> NDArray[np.bool_]: ... +@overload +def tensordot( + a: _ArrayLikeInt_co, + b: _ArrayLikeInt_co, + /, + *, + axes: int | tuple[_ShapeLike, _ShapeLike] = 2, +) -> NDArray[np.int_ | Any]: ... +@overload +def tensordot( + a: _ArrayLikeFloat_co, + b: _ArrayLikeFloat_co, + /, + *, + axes: int | tuple[_ShapeLike, _ShapeLike] = 2, +) -> NDArray[np.float64 | Any]: ... +@overload +def tensordot( + a: _ArrayLikeComplex_co, + b: _ArrayLikeComplex_co, + /, + *, + axes: int | tuple[_ShapeLike, _ShapeLike] = 2, +) -> NDArray[np.complex128 | Any]: ... + +# TODO: Returns a scalar or array +def multi_dot( + arrays: Iterable[_ArrayLikeComplex_co | _ArrayLikeObject_co | _ArrayLikeTD64_co], + *, + out: NDArray[Any] | None = None, +) -> Any: ... + +def diagonal( + x: ArrayLike, # >= 2D array + /, + *, + offset: SupportsIndex = 0, +) -> NDArray[Any]: ... + +def trace( + x: ArrayLike, # >= 2D array + /, + *, + offset: SupportsIndex = 0, + dtype: DTypeLike | None = None, +) -> Any: ... + +@overload +def cross( + x1: _ArrayLikeUInt_co, + x2: _ArrayLikeUInt_co, + /, + *, + axis: int = -1, +) -> NDArray[unsignedinteger]: ... +@overload +def cross( + x1: _ArrayLikeInt_co, + x2: _ArrayLikeInt_co, + /, + *, + axis: int = -1, +) -> NDArray[signedinteger]: ... +@overload +def cross( + x1: _ArrayLikeFloat_co, + x2: _ArrayLikeFloat_co, + /, + *, + axis: int = -1, +) -> NDArray[floating]: ... +@overload +def cross( + x1: _ArrayLikeComplex_co, + x2: _ArrayLikeComplex_co, + /, + *, + axis: int = -1, +) -> NDArray[complexfloating]: ... + +@overload +def matmul(x1: _ArrayLike[_NumberT], x2: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... +@overload +def matmul(x1: _ArrayLikeUInt_co, x2: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... +@overload +def matmul(x1: _ArrayLikeInt_co, x2: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... +@overload +def matmul(x1: _ArrayLikeFloat_co, x2: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... +@overload +def matmul(x1: _ArrayLikeComplex_co, x2: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... diff --git a/local_packages/numpy/linalg/_umath_linalg.cp312-win_amd64.lib b/local_packages/numpy/linalg/_umath_linalg.cp312-win_amd64.lib new file mode 100644 index 0000000..1d6350d Binary files /dev/null and b/local_packages/numpy/linalg/_umath_linalg.cp312-win_amd64.lib differ diff --git a/local_packages/numpy/linalg/_umath_linalg.cp312-win_amd64.pyd b/local_packages/numpy/linalg/_umath_linalg.cp312-win_amd64.pyd new file mode 100644 index 0000000..650fb41 Binary files /dev/null and b/local_packages/numpy/linalg/_umath_linalg.cp312-win_amd64.pyd differ diff --git a/local_packages/numpy/linalg/_umath_linalg.pyi b/local_packages/numpy/linalg/_umath_linalg.pyi new file mode 100644 index 0000000..f90706a --- /dev/null +++ b/local_packages/numpy/linalg/_umath_linalg.pyi @@ -0,0 +1,60 @@ +from typing import Final, Literal as L + +import numpy as np +from numpy._typing._ufunc import _GUFunc_Nin2_Nout1 + +__version__: Final[str] = ... +_ilp64: Final[bool] = ... + +### +# 1 -> 1 + +# (m,m) -> () +det: Final[np.ufunc] = ... +# (m,m) -> (m) +cholesky_lo: Final[np.ufunc] = ... +cholesky_up: Final[np.ufunc] = ... +eigvals: Final[np.ufunc] = ... +eigvalsh_lo: Final[np.ufunc] = ... +eigvalsh_up: Final[np.ufunc] = ... +# (m,m) -> (m,m) +inv: Final[np.ufunc] = ... +# (m,n) -> (p) +qr_r_raw: Final[np.ufunc] = ... +svd: Final[np.ufunc] = ... + +### +# 1 -> 2 + +# (m,m) -> (), () +slogdet: Final[np.ufunc] = ... +# (m,m) -> (m), (m,m) +eig: Final[np.ufunc] = ... +eigh_lo: Final[np.ufunc] = ... +eigh_up: Final[np.ufunc] = ... + +### +# 2 -> 1 + +# (m,n), (n) -> (m,m) +qr_complete: Final[_GUFunc_Nin2_Nout1[L["qr_complete"], L[2], None, L["(m,n),(n)->(m,m)"]]] = ... +# (m,n), (k) -> (m,k) +qr_reduced: Final[_GUFunc_Nin2_Nout1[L["qr_reduced"], L[2], None, L["(m,n),(k)->(m,k)"]]] = ... +# (m,m), (m,n) -> (m,n) +solve: Final[_GUFunc_Nin2_Nout1[L["solve"], L[4], None, L["(m,m),(m,n)->(m,n)"]]] = ... +# (m,m), (m) -> (m) +solve1: Final[_GUFunc_Nin2_Nout1[L["solve1"], L[4], None, L["(m,m),(m)->(m)"]]] = ... + +### +# 1 -> 3 + +# (m,n) -> (m,m), (p), (n,n) +svd_f: Final[np.ufunc] = ... +# (m,n) -> (m,p), (p), (p,n) +svd_s: Final[np.ufunc] = ... + +### +# 3 -> 4 + +# (m,n), (m,k), () -> (n,k), (k), (), (p) +lstsq: Final[np.ufunc] = ... diff --git a/local_packages/numpy/linalg/lapack_lite.cp312-win_amd64.lib b/local_packages/numpy/linalg/lapack_lite.cp312-win_amd64.lib new file mode 100644 index 0000000..36364a4 Binary files /dev/null and b/local_packages/numpy/linalg/lapack_lite.cp312-win_amd64.lib differ diff --git a/local_packages/numpy/linalg/lapack_lite.cp312-win_amd64.pyd b/local_packages/numpy/linalg/lapack_lite.cp312-win_amd64.pyd new file mode 100644 index 0000000..7110cf3 Binary files /dev/null and b/local_packages/numpy/linalg/lapack_lite.cp312-win_amd64.pyd differ diff --git a/local_packages/numpy/linalg/lapack_lite.pyi b/local_packages/numpy/linalg/lapack_lite.pyi new file mode 100644 index 0000000..3ec3919 --- /dev/null +++ b/local_packages/numpy/linalg/lapack_lite.pyi @@ -0,0 +1,143 @@ +from typing import Final, TypedDict, type_check_only + +import numpy as np +from numpy._typing import NDArray + +from ._linalg import fortran_int + +### + +@type_check_only +class _GELSD(TypedDict): + m: int + n: int + nrhs: int + lda: int + ldb: int + rank: int + lwork: int + info: int + +@type_check_only +class _DGELSD(_GELSD): + dgelsd_: int + rcond: float + +@type_check_only +class _ZGELSD(_GELSD): + zgelsd_: int + +@type_check_only +class _GEQRF(TypedDict): + m: int + n: int + lda: int + lwork: int + info: int + +@type_check_only +class _DGEQRF(_GEQRF): + dgeqrf_: int + +@type_check_only +class _ZGEQRF(_GEQRF): + zgeqrf_: int + +@type_check_only +class _DORGQR(TypedDict): + dorgqr_: int + info: int + +@type_check_only +class _ZUNGQR(TypedDict): + zungqr_: int + info: int + +### + +_ilp64: Final[bool] = ... + +class LapackError(Exception): ... + +def dgelsd( + m: int, + n: int, + nrhs: int, + a: NDArray[np.float64], + lda: int, + b: NDArray[np.float64], + ldb: int, + s: NDArray[np.float64], + rcond: float, + rank: int, + work: NDArray[np.float64], + lwork: int, + iwork: NDArray[fortran_int], + info: int, +) -> _DGELSD: ... +def zgelsd( + m: int, + n: int, + nrhs: int, + a: NDArray[np.complex128], + lda: int, + b: NDArray[np.complex128], + ldb: int, + s: NDArray[np.float64], + rcond: float, + rank: int, + work: NDArray[np.complex128], + lwork: int, + rwork: NDArray[np.float64], + iwork: NDArray[fortran_int], + info: int, +) -> _ZGELSD: ... + +# +def dgeqrf( + m: int, + n: int, + a: NDArray[np.float64], # in/out, shape: (lda, n) + lda: int, + tau: NDArray[np.float64], # out, shape: (min(m, n),) + work: NDArray[np.float64], # out, shape: (max(1, lwork),) + lwork: int, + info: int, # out +) -> _DGEQRF: ... +def zgeqrf( + m: int, + n: int, + a: NDArray[np.complex128], # in/out, shape: (lda, n) + lda: int, + tau: NDArray[np.complex128], # out, shape: (min(m, n),) + work: NDArray[np.complex128], # out, shape: (max(1, lwork),) + lwork: int, + info: int, # out +) -> _ZGEQRF: ... + +# +def dorgqr( + m: int, # >=0 + n: int, # m >= n >= 0 + k: int, # n >= k >= 0 + a: NDArray[np.float64], # in/out, shape: (lda, n) + lda: int, # >= max(1, m) + tau: NDArray[np.float64], # in, shape: (k,) + work: NDArray[np.float64], # out, shape: (max(1, lwork),) + lwork: int, + info: int, # out +) -> _DORGQR: ... +def zungqr( + m: int, + n: int, + k: int, + a: NDArray[np.complex128], + lda: int, + tau: NDArray[np.complex128], + work: NDArray[np.complex128], + lwork: int, + info: int, +) -> _ZUNGQR: ... + +# +def xerbla(srname: object, info: int) -> None: ... diff --git a/local_packages/numpy/linalg/tests/__init__.py b/local_packages/numpy/linalg/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/local_packages/numpy/linalg/tests/test_deprecations.py b/local_packages/numpy/linalg/tests/test_deprecations.py new file mode 100644 index 0000000..7fb5008 --- /dev/null +++ b/local_packages/numpy/linalg/tests/test_deprecations.py @@ -0,0 +1,21 @@ +"""Test deprecation and future warnings. + +""" +import pytest + +import numpy as np + + +def test_qr_mode_full_future_warning(): + """Check mode='full' FutureWarning. + + In numpy 1.8 the mode options 'full' and 'economic' in linalg.qr were + deprecated. The release date will probably be sometime in the summer + of 2013. + + """ + a = np.eye(2) + pytest.warns(DeprecationWarning, np.linalg.qr, a, mode='full') + pytest.warns(DeprecationWarning, np.linalg.qr, a, mode='f') + pytest.warns(DeprecationWarning, np.linalg.qr, a, mode='economic') + pytest.warns(DeprecationWarning, np.linalg.qr, a, mode='e') diff --git a/local_packages/numpy/linalg/tests/test_linalg.py b/local_packages/numpy/linalg/tests/test_linalg.py new file mode 100644 index 0000000..b374402 --- /dev/null +++ b/local_packages/numpy/linalg/tests/test_linalg.py @@ -0,0 +1,2442 @@ +""" Test functions for linalg module + +""" +import itertools +import os +import subprocess +import sys +import textwrap +import threading +import traceback +import warnings + +import pytest + +import numpy as np +from numpy import ( + array, + asarray, + atleast_2d, + cdouble, + csingle, + dot, + double, + identity, + inf, + linalg, + matmul, + multiply, + single, +) +from numpy._core import swapaxes +from numpy.exceptions import AxisError +from numpy.linalg import LinAlgError, matrix_power, matrix_rank, multi_dot, norm +from numpy.linalg._linalg import _multi_dot_matrix_chain_order +from numpy.testing import ( + HAS_LAPACK64, + IS_WASM, + NOGIL_BUILD, + assert_, + assert_allclose, + assert_almost_equal, + assert_array_equal, + assert_equal, + assert_raises, + assert_raises_regex, +) + +try: + import numpy.linalg.lapack_lite +except ImportError: + # May be broken when numpy was built without BLAS/LAPACK present + # If so, ensure we don't break the whole test suite - the `lapack_lite` + # submodule should be removed, it's only used in two tests in this file. + pass + + +def consistent_subclass(out, in_): + # For ndarray subclass input, our output should have the same subclass + # (non-ndarray input gets converted to ndarray). + return type(out) is (type(in_) if isinstance(in_, np.ndarray) + else np.ndarray) + + +old_assert_almost_equal = assert_almost_equal + + +def assert_almost_equal(a, b, single_decimal=6, double_decimal=12, **kw): + if asarray(a).dtype.type in (single, csingle): + decimal = single_decimal + else: + decimal = double_decimal + old_assert_almost_equal(a, b, decimal=decimal, **kw) + + +def get_real_dtype(dtype): + return {single: single, double: double, + csingle: single, cdouble: double}[dtype] + + +def get_complex_dtype(dtype): + return {single: csingle, double: cdouble, + csingle: csingle, cdouble: cdouble}[dtype] + + +def get_rtol(dtype): + # Choose a safe rtol + if dtype in (single, csingle): + return 1e-5 + else: + return 1e-11 + + +# used to categorize tests +all_tags = { + 'square', 'nonsquare', 'hermitian', # mutually exclusive + 'generalized', 'size-0', 'strided' # optional additions +} + + +class LinalgCase: + def __init__(self, name, a, b, tags=set()): + """ + A bundle of arguments to be passed to a test case, with an identifying + name, the operands a and b, and a set of tags to filter the tests + """ + assert_(isinstance(name, str)) + self.name = name + self.a = a + self.b = b + self.tags = frozenset(tags) # prevent shared tags + + def check(self, do): + """ + Run the function `do` on this test case, expanding arguments + """ + do(self.a, self.b, tags=self.tags) + + def __repr__(self): + return f'' + + +def apply_tag(tag, cases): + """ + Add the given tag (a string) to each of the cases (a list of LinalgCase + objects) + """ + assert tag in all_tags, "Invalid tag" + for case in cases: + case.tags = case.tags | {tag} + return cases + + +# +# Base test cases +# + +np.random.seed(1234) + +CASES = [] + +# square test cases +CASES += apply_tag('square', [ + LinalgCase("single", + array([[1., 2.], [3., 4.]], dtype=single), + array([2., 1.], dtype=single)), + LinalgCase("double", + array([[1., 2.], [3., 4.]], dtype=double), + array([2., 1.], dtype=double)), + LinalgCase("double_2", + array([[1., 2.], [3., 4.]], dtype=double), + array([[2., 1., 4.], [3., 4., 6.]], dtype=double)), + LinalgCase("csingle", + array([[1. + 2j, 2 + 3j], [3 + 4j, 4 + 5j]], dtype=csingle), + array([2. + 1j, 1. + 2j], dtype=csingle)), + LinalgCase("cdouble", + array([[1. + 2j, 2 + 3j], [3 + 4j, 4 + 5j]], dtype=cdouble), + array([2. + 1j, 1. + 2j], dtype=cdouble)), + LinalgCase("cdouble_2", + array([[1. + 2j, 2 + 3j], [3 + 4j, 4 + 5j]], dtype=cdouble), + array([[2. + 1j, 1. + 2j, 1 + 3j], [1 - 2j, 1 - 3j, 1 - 6j]], dtype=cdouble)), + LinalgCase("0x0", + np.empty((0, 0), dtype=double), + np.empty((0,), dtype=double), + tags={'size-0'}), + LinalgCase("8x8", + np.random.rand(8, 8), + np.random.rand(8)), + LinalgCase("1x1", + np.random.rand(1, 1), + np.random.rand(1)), + LinalgCase("nonarray", + [[1, 2], [3, 4]], + [2, 1]), +]) + +# non-square test-cases +CASES += apply_tag('nonsquare', [ + LinalgCase("single_nsq_1", + array([[1., 2., 3.], [3., 4., 6.]], dtype=single), + array([2., 1.], dtype=single)), + LinalgCase("single_nsq_2", + array([[1., 2.], [3., 4.], [5., 6.]], dtype=single), + array([2., 1., 3.], dtype=single)), + LinalgCase("double_nsq_1", + array([[1., 2., 3.], [3., 4., 6.]], dtype=double), + array([2., 1.], dtype=double)), + LinalgCase("double_nsq_2", + array([[1., 2.], [3., 4.], [5., 6.]], dtype=double), + array([2., 1., 3.], dtype=double)), + LinalgCase("csingle_nsq_1", + array( + [[1. + 1j, 2. + 2j, 3. - 3j], [3. - 5j, 4. + 9j, 6. + 2j]], dtype=csingle), + array([2. + 1j, 1. + 2j], dtype=csingle)), + LinalgCase("csingle_nsq_2", + array( + [[1. + 1j, 2. + 2j], [3. - 3j, 4. - 9j], [5. - 4j, 6. + 8j]], dtype=csingle), + array([2. + 1j, 1. + 2j, 3. - 3j], dtype=csingle)), + LinalgCase("cdouble_nsq_1", + array( + [[1. + 1j, 2. + 2j, 3. - 3j], [3. - 5j, 4. + 9j, 6. + 2j]], dtype=cdouble), + array([2. + 1j, 1. + 2j], dtype=cdouble)), + LinalgCase("cdouble_nsq_2", + array( + [[1. + 1j, 2. + 2j], [3. - 3j, 4. - 9j], [5. - 4j, 6. + 8j]], dtype=cdouble), + array([2. + 1j, 1. + 2j, 3. - 3j], dtype=cdouble)), + LinalgCase("cdouble_nsq_1_2", + array( + [[1. + 1j, 2. + 2j, 3. - 3j], [3. - 5j, 4. + 9j, 6. + 2j]], dtype=cdouble), + array([[2. + 1j, 1. + 2j], [1 - 1j, 2 - 2j]], dtype=cdouble)), + LinalgCase("cdouble_nsq_2_2", + array( + [[1. + 1j, 2. + 2j], [3. - 3j, 4. - 9j], [5. - 4j, 6. + 8j]], dtype=cdouble), + array([[2. + 1j, 1. + 2j], [1 - 1j, 2 - 2j], [1 - 1j, 2 - 2j]], dtype=cdouble)), + LinalgCase("8x11", + np.random.rand(8, 11), + np.random.rand(8)), + LinalgCase("1x5", + np.random.rand(1, 5), + np.random.rand(1)), + LinalgCase("5x1", + np.random.rand(5, 1), + np.random.rand(5)), + LinalgCase("0x4", + np.random.rand(0, 4), + np.random.rand(0), + tags={'size-0'}), + LinalgCase("4x0", + np.random.rand(4, 0), + np.random.rand(4), + tags={'size-0'}), +]) + +# hermitian test-cases +CASES += apply_tag('hermitian', [ + LinalgCase("hsingle", + array([[1., 2.], [2., 1.]], dtype=single), + None), + LinalgCase("hdouble", + array([[1., 2.], [2., 1.]], dtype=double), + None), + LinalgCase("hcsingle", + array([[1., 2 + 3j], [2 - 3j, 1]], dtype=csingle), + None), + LinalgCase("hcdouble", + array([[1., 2 + 3j], [2 - 3j, 1]], dtype=cdouble), + None), + LinalgCase("hempty", + np.empty((0, 0), dtype=double), + None, + tags={'size-0'}), + LinalgCase("hnonarray", + [[1, 2], [2, 1]], + None), + LinalgCase("matrix_b_only", + array([[1., 2.], [2., 1.]]), + None), + LinalgCase("hmatrix_1x1", + np.random.rand(1, 1), + None), +]) + + +# +# Gufunc test cases +# +def _make_generalized_cases(): + new_cases = [] + + for case in CASES: + if not isinstance(case.a, np.ndarray): + continue + + a = np.array([case.a, 2 * case.a, 3 * case.a]) + if case.b is None: + b = None + elif case.b.ndim == 1: + b = case.b + else: + b = np.array([case.b, 7 * case.b, 6 * case.b]) + new_case = LinalgCase(case.name + "_tile3", a, b, + tags=case.tags | {'generalized'}) + new_cases.append(new_case) + + a = np.array([case.a] * 2 * 3).reshape((3, 2) + case.a.shape) + if case.b is None: + b = None + elif case.b.ndim == 1: + b = np.array([case.b] * 2 * 3 * a.shape[-1])\ + .reshape((3, 2) + case.a.shape[-2:]) + else: + b = np.array([case.b] * 2 * 3).reshape((3, 2) + case.b.shape) + new_case = LinalgCase(case.name + "_tile213", a, b, + tags=case.tags | {'generalized'}) + new_cases.append(new_case) + + return new_cases + + +CASES += _make_generalized_cases() + + +# +# Generate stride combination variations of the above +# +def _stride_comb_iter(x): + """ + Generate cartesian product of strides for all axes + """ + + if not isinstance(x, np.ndarray): + yield x, "nop" + return + + stride_set = [(1,)] * x.ndim + stride_set[-1] = (1, 3, -4) + if x.ndim > 1: + stride_set[-2] = (1, 3, -4) + if x.ndim > 2: + stride_set[-3] = (1, -4) + + for repeats in itertools.product(*tuple(stride_set)): + new_shape = [abs(a * b) for a, b in zip(x.shape, repeats)] + slices = tuple(slice(None, None, repeat) for repeat in repeats) + + # new array with different strides, but same data + xi = np.empty(new_shape, dtype=x.dtype) + xi.view(np.uint32).fill(0xdeadbeef) + xi = xi[slices] + xi[...] = x + xi = xi.view(x.__class__) + assert_(np.all(xi == x)) + yield xi, "stride_" + "_".join(["%+d" % j for j in repeats]) + + # generate also zero strides if possible + if x.ndim >= 1 and x.shape[-1] == 1: + s = list(x.strides) + s[-1] = 0 + xi = np.lib.stride_tricks.as_strided(x, strides=s) + yield xi, "stride_xxx_0" + if x.ndim >= 2 and x.shape[-2] == 1: + s = list(x.strides) + s[-2] = 0 + xi = np.lib.stride_tricks.as_strided(x, strides=s) + yield xi, "stride_xxx_0_x" + if x.ndim >= 2 and x.shape[:-2] == (1, 1): + s = list(x.strides) + s[-1] = 0 + s[-2] = 0 + xi = np.lib.stride_tricks.as_strided(x, strides=s) + yield xi, "stride_xxx_0_0" + + +def _make_strided_cases(): + new_cases = [] + for case in CASES: + for a, a_label in _stride_comb_iter(case.a): + for b, b_label in _stride_comb_iter(case.b): + new_case = LinalgCase(case.name + "_" + a_label + "_" + b_label, a, b, + tags=case.tags | {'strided'}) + new_cases.append(new_case) + return new_cases + + +CASES += _make_strided_cases() + + +# +# Test different routines against the above cases +# +class LinalgTestCase: + TEST_CASES = CASES + + def check_cases(self, require=set(), exclude=set()): + """ + Run func on each of the cases with all of the tags in require, and none + of the tags in exclude + """ + for case in self.TEST_CASES: + # filter by require and exclude + if case.tags & require != require: + continue + if case.tags & exclude: + continue + + try: + case.check(self.do) + except Exception as e: + msg = f'In test case: {case!r}\n\n' + msg += traceback.format_exc() + raise AssertionError(msg) from e + + +class LinalgSquareTestCase(LinalgTestCase): + + def test_sq_cases(self): + self.check_cases(require={'square'}, + exclude={'generalized', 'size-0'}) + + def test_empty_sq_cases(self): + self.check_cases(require={'square', 'size-0'}, + exclude={'generalized'}) + + +class LinalgNonsquareTestCase(LinalgTestCase): + + def test_nonsq_cases(self): + self.check_cases(require={'nonsquare'}, + exclude={'generalized', 'size-0'}) + + def test_empty_nonsq_cases(self): + self.check_cases(require={'nonsquare', 'size-0'}, + exclude={'generalized'}) + + +class HermitianTestCase(LinalgTestCase): + + def test_herm_cases(self): + self.check_cases(require={'hermitian'}, + exclude={'generalized', 'size-0'}) + + def test_empty_herm_cases(self): + self.check_cases(require={'hermitian', 'size-0'}, + exclude={'generalized'}) + + +class LinalgGeneralizedSquareTestCase(LinalgTestCase): + + @pytest.mark.slow + def test_generalized_sq_cases(self): + self.check_cases(require={'generalized', 'square'}, + exclude={'size-0'}) + + @pytest.mark.slow + def test_generalized_empty_sq_cases(self): + self.check_cases(require={'generalized', 'square', 'size-0'}) + + +class LinalgGeneralizedNonsquareTestCase(LinalgTestCase): + + @pytest.mark.slow + def test_generalized_nonsq_cases(self): + self.check_cases(require={'generalized', 'nonsquare'}, + exclude={'size-0'}) + + @pytest.mark.slow + def test_generalized_empty_nonsq_cases(self): + self.check_cases(require={'generalized', 'nonsquare', 'size-0'}) + + +class HermitianGeneralizedTestCase(LinalgTestCase): + + @pytest.mark.slow + def test_generalized_herm_cases(self): + self.check_cases(require={'generalized', 'hermitian'}, + exclude={'size-0'}) + + @pytest.mark.slow + def test_generalized_empty_herm_cases(self): + self.check_cases(require={'generalized', 'hermitian', 'size-0'}, + exclude={'none'}) + + +def identity_like_generalized(a): + a = asarray(a) + if a.ndim >= 3: + r = np.empty(a.shape, dtype=a.dtype) + r[...] = identity(a.shape[-2]) + return r + else: + return identity(a.shape[0]) + + +class SolveCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): + # kept apart from TestSolve for use for testing with matrices. + def do(self, a, b, tags): + x = linalg.solve(a, b) + if np.array(b).ndim == 1: + # When a is (..., M, M) and b is (M,), it is the same as when b is + # (M, 1), except the result has shape (..., M) + adotx = matmul(a, x[..., None])[..., 0] + assert_almost_equal(np.broadcast_to(b, adotx.shape), adotx) + else: + adotx = matmul(a, x) + assert_almost_equal(b, adotx) + assert_(consistent_subclass(x, b)) + + +class TestSolve(SolveCases): + @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble]) + def test_types(self, dtype): + x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) + assert_equal(linalg.solve(x, x).dtype, dtype) + + def test_1_d(self): + class ArraySubclass(np.ndarray): + pass + a = np.arange(8).reshape(2, 2, 2) + b = np.arange(2).view(ArraySubclass) + result = linalg.solve(a, b) + assert result.shape == (2, 2) + + # If b is anything other than 1-D it should be treated as a stack of + # matrices + b = np.arange(4).reshape(2, 2).view(ArraySubclass) + result = linalg.solve(a, b) + assert result.shape == (2, 2, 2) + + b = np.arange(2).reshape(1, 2).view(ArraySubclass) + assert_raises(ValueError, linalg.solve, a, b) + + def test_0_size(self): + class ArraySubclass(np.ndarray): + pass + # Test system of 0x0 matrices + a = np.arange(8).reshape(2, 2, 2) + b = np.arange(6).reshape(1, 2, 3).view(ArraySubclass) + + expected = linalg.solve(a, b)[:, 0:0, :] + result = linalg.solve(a[:, 0:0, 0:0], b[:, 0:0, :]) + assert_array_equal(result, expected) + assert_(isinstance(result, ArraySubclass)) + + # Test errors for non-square and only b's dimension being 0 + assert_raises(linalg.LinAlgError, linalg.solve, a[:, 0:0, 0:1], b) + assert_raises(ValueError, linalg.solve, a, b[:, 0:0, :]) + + # Test broadcasting error + b = np.arange(6).reshape(1, 3, 2) # broadcasting error + assert_raises(ValueError, linalg.solve, a, b) + assert_raises(ValueError, linalg.solve, a[0:0], b[0:0]) + + # Test zero "single equations" with 0x0 matrices. + b = np.arange(2).view(ArraySubclass) + expected = linalg.solve(a, b)[:, 0:0] + result = linalg.solve(a[:, 0:0, 0:0], b[0:0]) + assert_array_equal(result, expected) + assert_(isinstance(result, ArraySubclass)) + + b = np.arange(3).reshape(1, 3) + assert_raises(ValueError, linalg.solve, a, b) + assert_raises(ValueError, linalg.solve, a[0:0], b[0:0]) + assert_raises(ValueError, linalg.solve, a[:, 0:0, 0:0], b) + + def test_0_size_k(self): + # test zero multiple equation (K=0) case. + class ArraySubclass(np.ndarray): + pass + a = np.arange(4).reshape(1, 2, 2) + b = np.arange(6).reshape(3, 2, 1).view(ArraySubclass) + + expected = linalg.solve(a, b)[:, :, 0:0] + result = linalg.solve(a, b[:, :, 0:0]) + assert_array_equal(result, expected) + assert_(isinstance(result, ArraySubclass)) + + # test both zero. + expected = linalg.solve(a, b)[:, 0:0, 0:0] + result = linalg.solve(a[:, 0:0, 0:0], b[:, 0:0, 0:0]) + assert_array_equal(result, expected) + assert_(isinstance(result, ArraySubclass)) + + +class InvCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): + + def do(self, a, b, tags): + a_inv = linalg.inv(a) + assert_almost_equal(matmul(a, a_inv), + identity_like_generalized(a)) + assert_(consistent_subclass(a_inv, a)) + + +class TestInv(InvCases): + @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble]) + def test_types(self, dtype): + x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) + assert_equal(linalg.inv(x).dtype, dtype) + + def test_0_size(self): + # Check that all kinds of 0-sized arrays work + class ArraySubclass(np.ndarray): + pass + a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass) + res = linalg.inv(a) + assert_(res.dtype.type is np.float64) + assert_equal(a.shape, res.shape) + assert_(isinstance(res, ArraySubclass)) + + a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass) + res = linalg.inv(a) + assert_(res.dtype.type is np.complex64) + assert_equal(a.shape, res.shape) + assert_(isinstance(res, ArraySubclass)) + + +class EigvalsCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): + + def do(self, a, b, tags): + ev = linalg.eigvals(a) + evalues, evectors = linalg.eig(a) + assert_almost_equal(ev, evalues) + + +class TestEigvals(EigvalsCases): + @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble]) + def test_types(self, dtype): + x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) + assert_equal(linalg.eigvals(x).dtype, dtype) + x = np.array([[1, 0.5], [-1, 1]], dtype=dtype) + assert_equal(linalg.eigvals(x).dtype, get_complex_dtype(dtype)) + + def test_0_size(self): + # Check that all kinds of 0-sized arrays work + class ArraySubclass(np.ndarray): + pass + a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass) + res = linalg.eigvals(a) + assert_(res.dtype.type is np.float64) + assert_equal((0, 1), res.shape) + # This is just for documentation, it might make sense to change: + assert_(isinstance(res, np.ndarray)) + + a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass) + res = linalg.eigvals(a) + assert_(res.dtype.type is np.complex64) + assert_equal((0,), res.shape) + # This is just for documentation, it might make sense to change: + assert_(isinstance(res, np.ndarray)) + + +class EigCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): + + def do(self, a, b, tags): + res = linalg.eig(a) + eigenvalues, eigenvectors = res.eigenvalues, res.eigenvectors + assert_allclose(matmul(a, eigenvectors), + np.asarray(eigenvectors) * np.asarray(eigenvalues)[..., None, :], + rtol=get_rtol(eigenvalues.dtype)) + assert_(consistent_subclass(eigenvectors, a)) + + +class TestEig(EigCases): + @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble]) + def test_types(self, dtype): + x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) + w, v = np.linalg.eig(x) + assert_equal(w.dtype, dtype) + assert_equal(v.dtype, dtype) + + x = np.array([[1, 0.5], [-1, 1]], dtype=dtype) + w, v = np.linalg.eig(x) + assert_equal(w.dtype, get_complex_dtype(dtype)) + assert_equal(v.dtype, get_complex_dtype(dtype)) + + def test_0_size(self): + # Check that all kinds of 0-sized arrays work + class ArraySubclass(np.ndarray): + pass + a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass) + res, res_v = linalg.eig(a) + assert_(res_v.dtype.type is np.float64) + assert_(res.dtype.type is np.float64) + assert_equal(a.shape, res_v.shape) + assert_equal((0, 1), res.shape) + # This is just for documentation, it might make sense to change: + assert_(isinstance(a, np.ndarray)) + + a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass) + res, res_v = linalg.eig(a) + assert_(res_v.dtype.type is np.complex64) + assert_(res.dtype.type is np.complex64) + assert_equal(a.shape, res_v.shape) + assert_equal((0,), res.shape) + # This is just for documentation, it might make sense to change: + assert_(isinstance(a, np.ndarray)) + + +class SVDBaseTests: + hermitian = False + + @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble]) + def test_types(self, dtype): + x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) + res = linalg.svd(x) + U, S, Vh = res.U, res.S, res.Vh + assert_equal(U.dtype, dtype) + assert_equal(S.dtype, get_real_dtype(dtype)) + assert_equal(Vh.dtype, dtype) + s = linalg.svd(x, compute_uv=False, hermitian=self.hermitian) + assert_equal(s.dtype, get_real_dtype(dtype)) + + +class SVDCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): + + def do(self, a, b, tags): + u, s, vt = linalg.svd(a, False) + assert_allclose(a, matmul(np.asarray(u) * np.asarray(s)[..., None, :], + np.asarray(vt)), + rtol=get_rtol(u.dtype)) + assert_(consistent_subclass(u, a)) + assert_(consistent_subclass(vt, a)) + + +class TestSVD(SVDCases, SVDBaseTests): + def test_empty_identity(self): + """ Empty input should put an identity matrix in u or vh """ + x = np.empty((4, 0)) + u, s, vh = linalg.svd(x, compute_uv=True, hermitian=self.hermitian) + assert_equal(u.shape, (4, 4)) + assert_equal(vh.shape, (0, 0)) + assert_equal(u, np.eye(4)) + + x = np.empty((0, 4)) + u, s, vh = linalg.svd(x, compute_uv=True, hermitian=self.hermitian) + assert_equal(u.shape, (0, 0)) + assert_equal(vh.shape, (4, 4)) + assert_equal(vh, np.eye(4)) + + def test_svdvals(self): + x = np.array([[1, 0.5], [0.5, 1]]) + s_from_svd = linalg.svd(x, compute_uv=False, hermitian=self.hermitian) + s_from_svdvals = linalg.svdvals(x) + assert_almost_equal(s_from_svd, s_from_svdvals) + + +class SVDHermitianCases(HermitianTestCase, HermitianGeneralizedTestCase): + + def do(self, a, b, tags): + u, s, vt = linalg.svd(a, False, hermitian=True) + assert_allclose(a, matmul(np.asarray(u) * np.asarray(s)[..., None, :], + np.asarray(vt)), + rtol=get_rtol(u.dtype)) + + def hermitian(mat): + axes = list(range(mat.ndim)) + axes[-1], axes[-2] = axes[-2], axes[-1] + return np.conj(np.transpose(mat, axes=axes)) + + assert_almost_equal(np.matmul(u, hermitian(u)), np.broadcast_to(np.eye(u.shape[-1]), u.shape)) + assert_almost_equal(np.matmul(vt, hermitian(vt)), np.broadcast_to(np.eye(vt.shape[-1]), vt.shape)) + assert_equal(np.sort(s)[..., ::-1], s) + assert_(consistent_subclass(u, a)) + assert_(consistent_subclass(vt, a)) + + +class TestSVDHermitian(SVDHermitianCases, SVDBaseTests): + hermitian = True + + +class CondCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): + # cond(x, p) for p in (None, 2, -2) + + def do(self, a, b, tags): + c = asarray(a) # a might be a matrix + if 'size-0' in tags: + assert_raises(LinAlgError, linalg.cond, c) + return + + # +-2 norms + s = linalg.svd(c, compute_uv=False) + assert_almost_equal( + linalg.cond(a), s[..., 0] / s[..., -1], + single_decimal=5, double_decimal=11) + assert_almost_equal( + linalg.cond(a, 2), s[..., 0] / s[..., -1], + single_decimal=5, double_decimal=11) + assert_almost_equal( + linalg.cond(a, -2), s[..., -1] / s[..., 0], + single_decimal=5, double_decimal=11) + + # Other norms + cinv = np.linalg.inv(c) + assert_almost_equal( + linalg.cond(a, 1), + abs(c).sum(-2).max(-1) * abs(cinv).sum(-2).max(-1), + single_decimal=5, double_decimal=11) + assert_almost_equal( + linalg.cond(a, -1), + abs(c).sum(-2).min(-1) * abs(cinv).sum(-2).min(-1), + single_decimal=5, double_decimal=11) + assert_almost_equal( + linalg.cond(a, np.inf), + abs(c).sum(-1).max(-1) * abs(cinv).sum(-1).max(-1), + single_decimal=5, double_decimal=11) + assert_almost_equal( + linalg.cond(a, -np.inf), + abs(c).sum(-1).min(-1) * abs(cinv).sum(-1).min(-1), + single_decimal=5, double_decimal=11) + assert_almost_equal( + linalg.cond(a, 'fro'), + np.sqrt((abs(c)**2).sum(-1).sum(-1) + * (abs(cinv)**2).sum(-1).sum(-1)), + single_decimal=5, double_decimal=11) + + +class TestCond(CondCases): + @pytest.mark.parametrize('is_complex', [False, True]) + def test_basic_nonsvd(self, is_complex): + # Smoketest the non-svd norms + A = array([[1., 0, 1], [0, -2., 0], [0, 0, 3.]]) + if is_complex: + # Since A is linearly scaled, the condition number should not change + A = A * (1 + 1j) + assert_almost_equal(linalg.cond(A, inf), 4) + assert_almost_equal(linalg.cond(A, -inf), 2 / 3) + assert_almost_equal(linalg.cond(A, 1), 4) + assert_almost_equal(linalg.cond(A, -1), 0.5) + assert_almost_equal(linalg.cond(A, 'fro'), np.sqrt(265 / 12)) + + @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble]) + @pytest.mark.parametrize('norm_ord', [1, -1, 2, -2, 'fro', np.inf, -np.inf]) + def test_cond_dtypes(self, dtype, norm_ord): + # Check that the condition number is computed in the same dtype + # as the input matrix + A = array([[1., 0, 1], [0, -2., 0], [0, 0, 3.]], dtype=dtype) + out_type = get_real_dtype(dtype) + assert_equal(linalg.cond(A, p=norm_ord).dtype, out_type) + + def test_singular(self): + # Singular matrices have infinite condition number for + # positive norms, and negative norms shouldn't raise + # exceptions + As = [np.zeros((2, 2)), np.ones((2, 2))] + p_pos = [None, 1, 2, 'fro'] + p_neg = [-1, -2] + for A, p in itertools.product(As, p_pos): + # Inversion may not hit exact infinity, so just check the + # number is large + assert_(linalg.cond(A, p) > 1e15) + for A, p in itertools.product(As, p_neg): + linalg.cond(A, p) + + @pytest.mark.xfail(True, run=False, + reason="Platform/LAPACK-dependent failure, " + "see gh-18914") + def test_nan(self): + # nans should be passed through, not converted to infs + ps = [None, 1, -1, 2, -2, 'fro'] + p_pos = [None, 1, 2, 'fro'] + + A = np.ones((2, 2)) + A[0, 1] = np.nan + for p in ps: + c = linalg.cond(A, p) + assert_(isinstance(c, np.float64)) + assert_(np.isnan(c)) + + A = np.ones((3, 2, 2)) + A[1, 0, 1] = np.nan + for p in ps: + c = linalg.cond(A, p) + assert_(np.isnan(c[1])) + if p in p_pos: + assert_(c[0] > 1e15) + assert_(c[2] > 1e15) + else: + assert_(not np.isnan(c[0])) + assert_(not np.isnan(c[2])) + + def test_stacked_singular(self): + # Check behavior when only some of the stacked matrices are + # singular + np.random.seed(1234) + A = np.random.rand(2, 2, 2, 2) + A[0, 0] = 0 + A[1, 1] = 0 + + for p in (None, 1, 2, 'fro', -1, -2): + c = linalg.cond(A, p) + assert_equal(c[0, 0], np.inf) + assert_equal(c[1, 1], np.inf) + assert_(np.isfinite(c[0, 1])) + assert_(np.isfinite(c[1, 0])) + + +class PinvCases(LinalgSquareTestCase, + LinalgNonsquareTestCase, + LinalgGeneralizedSquareTestCase, + LinalgGeneralizedNonsquareTestCase): + + def do(self, a, b, tags): + a_ginv = linalg.pinv(a) + # `a @ a_ginv == I` does not hold if a is singular + dot = matmul + assert_almost_equal(dot(dot(a, a_ginv), a), a, single_decimal=5, double_decimal=11) + assert_(consistent_subclass(a_ginv, a)) + + +class TestPinv(PinvCases): + pass + + +class PinvHermitianCases(HermitianTestCase, HermitianGeneralizedTestCase): + + def do(self, a, b, tags): + a_ginv = linalg.pinv(a, hermitian=True) + # `a @ a_ginv == I` does not hold if a is singular + dot = matmul + assert_almost_equal(dot(dot(a, a_ginv), a), a, single_decimal=5, double_decimal=11) + assert_(consistent_subclass(a_ginv, a)) + + +class TestPinvHermitian(PinvHermitianCases): + pass + + +def test_pinv_rtol_arg(): + a = np.array([[1, 2, 3], [4, 1, 1], [2, 3, 1]]) + + assert_almost_equal( + np.linalg.pinv(a, rcond=0.5), + np.linalg.pinv(a, rtol=0.5), + ) + + with pytest.raises( + ValueError, match=r"`rtol` and `rcond` can't be both set." + ): + np.linalg.pinv(a, rcond=0.5, rtol=0.5) + + +class DetCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): + + def do(self, a, b, tags): + d = linalg.det(a) + res = linalg.slogdet(a) + s, ld = res.sign, res.logabsdet + if asarray(a).dtype.type in (single, double): + ad = asarray(a).astype(double) + else: + ad = asarray(a).astype(cdouble) + ev = linalg.eigvals(ad) + assert_almost_equal(d, multiply.reduce(ev, axis=-1)) + assert_almost_equal(s * np.exp(ld), multiply.reduce(ev, axis=-1)) + + s = np.atleast_1d(s) + ld = np.atleast_1d(ld) + m = (s != 0) + assert_almost_equal(np.abs(s[m]), 1) + assert_equal(ld[~m], -inf) + + +class TestDet(DetCases): + def test_zero(self): + assert_equal(linalg.det([[0.0]]), 0.0) + assert_equal(type(linalg.det([[0.0]])), double) + assert_equal(linalg.det([[0.0j]]), 0.0) + assert_equal(type(linalg.det([[0.0j]])), cdouble) + + assert_equal(linalg.slogdet([[0.0]]), (0.0, -inf)) + assert_equal(type(linalg.slogdet([[0.0]])[0]), double) + assert_equal(type(linalg.slogdet([[0.0]])[1]), double) + assert_equal(linalg.slogdet([[0.0j]]), (0.0j, -inf)) + assert_equal(type(linalg.slogdet([[0.0j]])[0]), cdouble) + assert_equal(type(linalg.slogdet([[0.0j]])[1]), double) + + @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble]) + def test_types(self, dtype): + x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) + assert_equal(np.linalg.det(x).dtype, dtype) + ph, s = np.linalg.slogdet(x) + assert_equal(s.dtype, get_real_dtype(dtype)) + assert_equal(ph.dtype, dtype) + + def test_0_size(self): + a = np.zeros((0, 0), dtype=np.complex64) + res = linalg.det(a) + assert_equal(res, 1.) + assert_(res.dtype.type is np.complex64) + res = linalg.slogdet(a) + assert_equal(res, (1, 0)) + assert_(res[0].dtype.type is np.complex64) + assert_(res[1].dtype.type is np.float32) + + a = np.zeros((0, 0), dtype=np.float64) + res = linalg.det(a) + assert_equal(res, 1.) + assert_(res.dtype.type is np.float64) + res = linalg.slogdet(a) + assert_equal(res, (1, 0)) + assert_(res[0].dtype.type is np.float64) + assert_(res[1].dtype.type is np.float64) + + +class LstsqCases(LinalgSquareTestCase, LinalgNonsquareTestCase): + + def do(self, a, b, tags): + arr = np.asarray(a) + m, n = arr.shape + u, s, vt = linalg.svd(a, False) + x, residuals, rank, sv = linalg.lstsq(a, b, rcond=-1) + if m == 0: + assert_((x == 0).all()) + if m <= n: + assert_almost_equal(b, dot(a, x)) + assert_equal(rank, m) + else: + assert_equal(rank, n) + assert_almost_equal(sv, sv.__array_wrap__(s)) + if rank == n and m > n: + expect_resids = ( + np.asarray(abs(np.dot(a, x) - b)) ** 2).sum(axis=0) + expect_resids = np.asarray(expect_resids) + if np.asarray(b).ndim == 1: + expect_resids.shape = (1,) + assert_equal(residuals.shape, expect_resids.shape) + else: + expect_resids = np.array([]).view(type(x)) + assert_almost_equal(residuals, expect_resids) + assert_(np.issubdtype(residuals.dtype, np.floating)) + assert_(consistent_subclass(x, b)) + assert_(consistent_subclass(residuals, b)) + + +class TestLstsq(LstsqCases): + def test_rcond(self): + a = np.array([[0., 1., 0., 1., 2., 0.], + [0., 2., 0., 0., 1., 0.], + [1., 0., 1., 0., 0., 4.], + [0., 0., 0., 2., 3., 0.]]).T + + b = np.array([1, 0, 0, 0, 0, 0]) + + x, residuals, rank, s = linalg.lstsq(a, b, rcond=-1) + assert_(rank == 4) + x, residuals, rank, s = linalg.lstsq(a, b) + assert_(rank == 3) + x, residuals, rank, s = linalg.lstsq(a, b, rcond=None) + assert_(rank == 3) + + @pytest.mark.parametrize(["m", "n", "n_rhs"], [ + (4, 2, 2), + (0, 4, 1), + (0, 4, 2), + (4, 0, 1), + (4, 0, 2), + (4, 2, 0), + (0, 0, 0) + ]) + def test_empty_a_b(self, m, n, n_rhs): + a = np.arange(m * n).reshape(m, n) + b = np.ones((m, n_rhs)) + x, residuals, rank, s = linalg.lstsq(a, b, rcond=None) + if m == 0: + assert_((x == 0).all()) + assert_equal(x.shape, (n, n_rhs)) + assert_equal(residuals.shape, ((n_rhs,) if m > n else (0,))) + if m > n and n_rhs > 0: + # residuals are exactly the squared norms of b's columns + r = b - np.dot(a, x) + assert_almost_equal(residuals, (r * r).sum(axis=-2)) + assert_equal(rank, min(m, n)) + assert_equal(s.shape, (min(m, n),)) + + def test_incompatible_dims(self): + # use modified version of docstring example + x = np.array([0, 1, 2, 3]) + y = np.array([-1, 0.2, 0.9, 2.1, 3.3]) + A = np.vstack([x, np.ones(len(x))]).T + with assert_raises_regex(LinAlgError, "Incompatible dimensions"): + linalg.lstsq(A, y, rcond=None) + + +@pytest.mark.parametrize('dt', [np.dtype(c) for c in '?bBhHiIqQefdgFDGO']) +class TestMatrixPower: + + rshft_0 = np.eye(4) + rshft_1 = rshft_0[[3, 0, 1, 2]] + rshft_2 = rshft_0[[2, 3, 0, 1]] + rshft_3 = rshft_0[[1, 2, 3, 0]] + rshft_all = [rshft_0, rshft_1, rshft_2, rshft_3] + noninv = array([[1, 0], [0, 0]]) + stacked = np.block([[[rshft_0]]] * 2) + # FIXME the 'e' dtype might work in future + dtnoinv = [object, np.dtype('e'), np.dtype('g'), np.dtype('G')] + + def test_large_power(self, dt): + rshft = self.rshft_1.astype(dt) + assert_equal( + matrix_power(rshft, 2**100 + 2**10 + 2**5 + 0), self.rshft_0) + assert_equal( + matrix_power(rshft, 2**100 + 2**10 + 2**5 + 1), self.rshft_1) + assert_equal( + matrix_power(rshft, 2**100 + 2**10 + 2**5 + 2), self.rshft_2) + assert_equal( + matrix_power(rshft, 2**100 + 2**10 + 2**5 + 3), self.rshft_3) + + def test_power_is_zero(self, dt): + def tz(M): + mz = matrix_power(M, 0) + assert_equal(mz, identity_like_generalized(M)) + assert_equal(mz.dtype, M.dtype) + + for mat in self.rshft_all: + tz(mat.astype(dt)) + if dt != object: + tz(self.stacked.astype(dt)) + + def test_power_is_one(self, dt): + def tz(mat): + mz = matrix_power(mat, 1) + assert_equal(mz, mat) + assert_equal(mz.dtype, mat.dtype) + + for mat in self.rshft_all: + tz(mat.astype(dt)) + if dt != object: + tz(self.stacked.astype(dt)) + + def test_power_is_two(self, dt): + def tz(mat): + mz = matrix_power(mat, 2) + mmul = matmul if mat.dtype != object else dot + assert_equal(mz, mmul(mat, mat)) + assert_equal(mz.dtype, mat.dtype) + + for mat in self.rshft_all: + tz(mat.astype(dt)) + if dt != object: + tz(self.stacked.astype(dt)) + + def test_power_is_minus_one(self, dt): + def tz(mat): + invmat = matrix_power(mat, -1) + mmul = matmul if mat.dtype != object else dot + assert_almost_equal( + mmul(invmat, mat), identity_like_generalized(mat)) + + for mat in self.rshft_all: + if dt not in self.dtnoinv: + tz(mat.astype(dt)) + + def test_exceptions_bad_power(self, dt): + mat = self.rshft_0.astype(dt) + assert_raises(TypeError, matrix_power, mat, 1.5) + assert_raises(TypeError, matrix_power, mat, [1]) + + def test_exceptions_non_square(self, dt): + assert_raises(LinAlgError, matrix_power, np.array([1], dt), 1) + assert_raises(LinAlgError, matrix_power, np.array([[1], [2]], dt), 1) + assert_raises(LinAlgError, matrix_power, np.ones((4, 3, 2), dt), 1) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + def test_exceptions_not_invertible(self, dt): + if dt in self.dtnoinv: + return + mat = self.noninv.astype(dt) + assert_raises(LinAlgError, matrix_power, mat, -1) + + +class TestEigvalshCases(HermitianTestCase, HermitianGeneralizedTestCase): + + def do(self, a, b, tags): + # note that eigenvalue arrays returned by eig must be sorted since + # their order isn't guaranteed. + ev = linalg.eigvalsh(a, 'L') + evalues, evectors = linalg.eig(a) + evalues.sort(axis=-1) + assert_allclose(ev, evalues, rtol=get_rtol(ev.dtype)) + + ev2 = linalg.eigvalsh(a, 'U') + assert_allclose(ev2, evalues, rtol=get_rtol(ev.dtype)) + + +class TestEigvalsh: + @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble]) + def test_types(self, dtype): + x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) + w = np.linalg.eigvalsh(x) + assert_equal(w.dtype, get_real_dtype(dtype)) + + def test_invalid(self): + x = np.array([[1, 0.5], [0.5, 1]], dtype=np.float32) + assert_raises(ValueError, np.linalg.eigvalsh, x, UPLO="lrong") + assert_raises(ValueError, np.linalg.eigvalsh, x, "lower") + assert_raises(ValueError, np.linalg.eigvalsh, x, "upper") + + def test_UPLO(self): + Klo = np.array([[0, 0], [1, 0]], dtype=np.double) + Kup = np.array([[0, 1], [0, 0]], dtype=np.double) + tgt = np.array([-1, 1], dtype=np.double) + rtol = get_rtol(np.double) + + # Check default is 'L' + w = np.linalg.eigvalsh(Klo) + assert_allclose(w, tgt, rtol=rtol) + # Check 'L' + w = np.linalg.eigvalsh(Klo, UPLO='L') + assert_allclose(w, tgt, rtol=rtol) + # Check 'l' + w = np.linalg.eigvalsh(Klo, UPLO='l') + assert_allclose(w, tgt, rtol=rtol) + # Check 'U' + w = np.linalg.eigvalsh(Kup, UPLO='U') + assert_allclose(w, tgt, rtol=rtol) + # Check 'u' + w = np.linalg.eigvalsh(Kup, UPLO='u') + assert_allclose(w, tgt, rtol=rtol) + + def test_0_size(self): + # Check that all kinds of 0-sized arrays work + class ArraySubclass(np.ndarray): + pass + a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass) + res = linalg.eigvalsh(a) + assert_(res.dtype.type is np.float64) + assert_equal((0, 1), res.shape) + # This is just for documentation, it might make sense to change: + assert_(isinstance(res, np.ndarray)) + + a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass) + res = linalg.eigvalsh(a) + assert_(res.dtype.type is np.float32) + assert_equal((0,), res.shape) + # This is just for documentation, it might make sense to change: + assert_(isinstance(res, np.ndarray)) + + +class TestEighCases(HermitianTestCase, HermitianGeneralizedTestCase): + + def do(self, a, b, tags): + # note that eigenvalue arrays returned by eig must be sorted since + # their order isn't guaranteed. + res = linalg.eigh(a) + ev, evc = res.eigenvalues, res.eigenvectors + evalues, evectors = linalg.eig(a) + evalues.sort(axis=-1) + assert_almost_equal(ev, evalues) + + assert_allclose(matmul(a, evc), + np.asarray(ev)[..., None, :] * np.asarray(evc), + rtol=get_rtol(ev.dtype)) + + ev2, evc2 = linalg.eigh(a, 'U') + assert_almost_equal(ev2, evalues) + + assert_allclose(matmul(a, evc2), + np.asarray(ev2)[..., None, :] * np.asarray(evc2), + rtol=get_rtol(ev.dtype), err_msg=repr(a)) + + +class TestEigh: + @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble]) + def test_types(self, dtype): + x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) + w, v = np.linalg.eigh(x) + assert_equal(w.dtype, get_real_dtype(dtype)) + assert_equal(v.dtype, dtype) + + def test_invalid(self): + x = np.array([[1, 0.5], [0.5, 1]], dtype=np.float32) + assert_raises(ValueError, np.linalg.eigh, x, UPLO="lrong") + assert_raises(ValueError, np.linalg.eigh, x, "lower") + assert_raises(ValueError, np.linalg.eigh, x, "upper") + + def test_UPLO(self): + Klo = np.array([[0, 0], [1, 0]], dtype=np.double) + Kup = np.array([[0, 1], [0, 0]], dtype=np.double) + tgt = np.array([-1, 1], dtype=np.double) + rtol = get_rtol(np.double) + + # Check default is 'L' + w, v = np.linalg.eigh(Klo) + assert_allclose(w, tgt, rtol=rtol) + # Check 'L' + w, v = np.linalg.eigh(Klo, UPLO='L') + assert_allclose(w, tgt, rtol=rtol) + # Check 'l' + w, v = np.linalg.eigh(Klo, UPLO='l') + assert_allclose(w, tgt, rtol=rtol) + # Check 'U' + w, v = np.linalg.eigh(Kup, UPLO='U') + assert_allclose(w, tgt, rtol=rtol) + # Check 'u' + w, v = np.linalg.eigh(Kup, UPLO='u') + assert_allclose(w, tgt, rtol=rtol) + + def test_0_size(self): + # Check that all kinds of 0-sized arrays work + class ArraySubclass(np.ndarray): + pass + a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass) + res, res_v = linalg.eigh(a) + assert_(res_v.dtype.type is np.float64) + assert_(res.dtype.type is np.float64) + assert_equal(a.shape, res_v.shape) + assert_equal((0, 1), res.shape) + # This is just for documentation, it might make sense to change: + assert_(isinstance(a, np.ndarray)) + + a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass) + res, res_v = linalg.eigh(a) + assert_(res_v.dtype.type is np.complex64) + assert_(res.dtype.type is np.float32) + assert_equal(a.shape, res_v.shape) + assert_equal((0,), res.shape) + # This is just for documentation, it might make sense to change: + assert_(isinstance(a, np.ndarray)) + + +class _TestNormBase: + dt = None + dec = None + + @staticmethod + def check_dtype(x, res): + if issubclass(x.dtype.type, np.inexact): + assert_equal(res.dtype, x.real.dtype) + else: + # For integer input, don't have to test float precision of output. + assert_(issubclass(res.dtype.type, np.floating)) + + +class _TestNormGeneral(_TestNormBase): + + def test_empty(self): + assert_equal(norm([]), 0.0) + assert_equal(norm(array([], dtype=self.dt)), 0.0) + assert_equal(norm(atleast_2d(array([], dtype=self.dt))), 0.0) + + def test_vector_return_type(self): + a = np.array([1, 0, 1]) + + exact_types = np.typecodes['AllInteger'] + inexact_types = np.typecodes['AllFloat'] + + all_types = exact_types + inexact_types + + for each_type in all_types: + at = a.astype(each_type) + + an = norm(at, -np.inf) + self.check_dtype(at, an) + assert_almost_equal(an, 0.0) + + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', "divide by zero encountered", RuntimeWarning) + an = norm(at, -1) + self.check_dtype(at, an) + assert_almost_equal(an, 0.0) + + an = norm(at, 0) + self.check_dtype(at, an) + assert_almost_equal(an, 2) + + an = norm(at, 1) + self.check_dtype(at, an) + assert_almost_equal(an, 2.0) + + an = norm(at, 2) + self.check_dtype(at, an) + assert_almost_equal(an, an.dtype.type(2.0)**an.dtype.type(1.0 / 2.0)) + + an = norm(at, 4) + self.check_dtype(at, an) + assert_almost_equal(an, an.dtype.type(2.0)**an.dtype.type(1.0 / 4.0)) + + an = norm(at, np.inf) + self.check_dtype(at, an) + assert_almost_equal(an, 1.0) + + def test_vector(self): + a = [1, 2, 3, 4] + b = [-1, -2, -3, -4] + c = [-1, 2, -3, 4] + + def _test(v): + np.testing.assert_almost_equal(norm(v), 30 ** 0.5, + decimal=self.dec) + np.testing.assert_almost_equal(norm(v, inf), 4.0, + decimal=self.dec) + np.testing.assert_almost_equal(norm(v, -inf), 1.0, + decimal=self.dec) + np.testing.assert_almost_equal(norm(v, 1), 10.0, + decimal=self.dec) + np.testing.assert_almost_equal(norm(v, -1), 12.0 / 25, + decimal=self.dec) + np.testing.assert_almost_equal(norm(v, 2), 30 ** 0.5, + decimal=self.dec) + np.testing.assert_almost_equal(norm(v, -2), ((205. / 144) ** -0.5), + decimal=self.dec) + np.testing.assert_almost_equal(norm(v, 0), 4, + decimal=self.dec) + + for v in (a, b, c,): + _test(v) + + for v in (array(a, dtype=self.dt), array(b, dtype=self.dt), + array(c, dtype=self.dt)): + _test(v) + + def test_axis(self): + # Vector norms. + # Compare the use of `axis` with computing the norm of each row + # or column separately. + A = array([[1, 2, 3], [4, 5, 6]], dtype=self.dt) + for order in [None, -1, 0, 1, 2, 3, np.inf, -np.inf]: + expected0 = [norm(A[:, k], ord=order) for k in range(A.shape[1])] + assert_almost_equal(norm(A, ord=order, axis=0), expected0) + expected1 = [norm(A[k, :], ord=order) for k in range(A.shape[0])] + assert_almost_equal(norm(A, ord=order, axis=1), expected1) + + # Matrix norms. + B = np.arange(1, 25, dtype=self.dt).reshape(2, 3, 4) + nd = B.ndim + for order in [None, -2, 2, -1, 1, np.inf, -np.inf, 'fro']: + for axis in itertools.combinations(range(-nd, nd), 2): + row_axis, col_axis = axis + if row_axis < 0: + row_axis += nd + if col_axis < 0: + col_axis += nd + if row_axis == col_axis: + assert_raises(ValueError, norm, B, ord=order, axis=axis) + else: + n = norm(B, ord=order, axis=axis) + + # The logic using k_index only works for nd = 3. + # This has to be changed if nd is increased. + k_index = nd - (row_axis + col_axis) + if row_axis < col_axis: + expected = [norm(B[:].take(k, axis=k_index), ord=order) + for k in range(B.shape[k_index])] + else: + expected = [norm(B[:].take(k, axis=k_index).T, ord=order) + for k in range(B.shape[k_index])] + assert_almost_equal(n, expected) + + def test_keepdims(self): + A = np.arange(1, 25, dtype=self.dt).reshape(2, 3, 4) + + allclose_err = 'order {0}, axis = {1}' + shape_err = 'Shape mismatch found {0}, expected {1}, order={2}, axis={3}' + + # check the order=None, axis=None case + expected = norm(A, ord=None, axis=None) + found = norm(A, ord=None, axis=None, keepdims=True) + assert_allclose(np.squeeze(found), expected, + err_msg=allclose_err.format(None, None)) + expected_shape = (1, 1, 1) + assert_(found.shape == expected_shape, + shape_err.format(found.shape, expected_shape, None, None)) + + # Vector norms. + for order in [None, -1, 0, 1, 2, 3, np.inf, -np.inf]: + for k in range(A.ndim): + expected = norm(A, ord=order, axis=k) + found = norm(A, ord=order, axis=k, keepdims=True) + assert_allclose(np.squeeze(found), expected, + err_msg=allclose_err.format(order, k)) + expected_shape = list(A.shape) + expected_shape[k] = 1 + expected_shape = tuple(expected_shape) + assert_(found.shape == expected_shape, + shape_err.format(found.shape, expected_shape, order, k)) + + # Matrix norms. + for order in [None, -2, 2, -1, 1, np.inf, -np.inf, 'fro', 'nuc']: + for k in itertools.permutations(range(A.ndim), 2): + expected = norm(A, ord=order, axis=k) + found = norm(A, ord=order, axis=k, keepdims=True) + assert_allclose(np.squeeze(found), expected, + err_msg=allclose_err.format(order, k)) + expected_shape = list(A.shape) + expected_shape[k[0]] = 1 + expected_shape[k[1]] = 1 + expected_shape = tuple(expected_shape) + assert_(found.shape == expected_shape, + shape_err.format(found.shape, expected_shape, order, k)) + + +class _TestNorm2D(_TestNormBase): + # Define the part for 2d arrays separately, so we can subclass this + # and run the tests using np.matrix in matrixlib.tests.test_matrix_linalg. + array = np.array + + def test_matrix_empty(self): + assert_equal(norm(self.array([[]], dtype=self.dt)), 0.0) + + def test_matrix_return_type(self): + a = self.array([[1, 0, 1], [0, 1, 1]]) + + exact_types = np.typecodes['AllInteger'] + + # float32, complex64, float64, complex128 types are the only types + # allowed by `linalg`, which performs the matrix operations used + # within `norm`. + inexact_types = 'fdFD' + + all_types = exact_types + inexact_types + + for each_type in all_types: + at = a.astype(each_type) + + an = norm(at, -np.inf) + self.check_dtype(at, an) + assert_almost_equal(an, 2.0) + + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', "divide by zero encountered", RuntimeWarning) + an = norm(at, -1) + self.check_dtype(at, an) + assert_almost_equal(an, 1.0) + + an = norm(at, 1) + self.check_dtype(at, an) + assert_almost_equal(an, 2.0) + + an = norm(at, 2) + self.check_dtype(at, an) + assert_almost_equal(an, 3.0**(1.0 / 2.0)) + + an = norm(at, -2) + self.check_dtype(at, an) + assert_almost_equal(an, 1.0) + + an = norm(at, np.inf) + self.check_dtype(at, an) + assert_almost_equal(an, 2.0) + + an = norm(at, 'fro') + self.check_dtype(at, an) + assert_almost_equal(an, 2.0) + + an = norm(at, 'nuc') + self.check_dtype(at, an) + # Lower bar needed to support low precision floats. + # They end up being off by 1 in the 7th place. + np.testing.assert_almost_equal(an, 2.7320508075688772, decimal=6) + + def test_matrix_2x2(self): + A = self.array([[1, 3], [5, 7]], dtype=self.dt) + assert_almost_equal(norm(A), 84 ** 0.5) + assert_almost_equal(norm(A, 'fro'), 84 ** 0.5) + assert_almost_equal(norm(A, 'nuc'), 10.0) + assert_almost_equal(norm(A, inf), 12.0) + assert_almost_equal(norm(A, -inf), 4.0) + assert_almost_equal(norm(A, 1), 10.0) + assert_almost_equal(norm(A, -1), 6.0) + assert_almost_equal(norm(A, 2), 9.1231056256176615) + assert_almost_equal(norm(A, -2), 0.87689437438234041) + + assert_raises(ValueError, norm, A, 'nofro') + assert_raises(ValueError, norm, A, -3) + assert_raises(ValueError, norm, A, 0) + + def test_matrix_3x3(self): + # This test has been added because the 2x2 example + # happened to have equal nuclear norm and induced 1-norm. + # The 1/10 scaling factor accommodates the absolute tolerance + # used in assert_almost_equal. + A = (1 / 10) * \ + self.array([[1, 2, 3], [6, 0, 5], [3, 2, 1]], dtype=self.dt) + assert_almost_equal(norm(A), (1 / 10) * 89 ** 0.5) + assert_almost_equal(norm(A, 'fro'), (1 / 10) * 89 ** 0.5) + assert_almost_equal(norm(A, 'nuc'), 1.3366836911774836) + assert_almost_equal(norm(A, inf), 1.1) + assert_almost_equal(norm(A, -inf), 0.6) + assert_almost_equal(norm(A, 1), 1.0) + assert_almost_equal(norm(A, -1), 0.4) + assert_almost_equal(norm(A, 2), 0.88722940323461277) + assert_almost_equal(norm(A, -2), 0.19456584790481812) + + def test_bad_args(self): + # Check that bad arguments raise the appropriate exceptions. + + A = self.array([[1, 2, 3], [4, 5, 6]], dtype=self.dt) + B = np.arange(1, 25, dtype=self.dt).reshape(2, 3, 4) + + # Using `axis=` or passing in a 1-D array implies vector + # norms are being computed, so also using `ord='fro'` + # or `ord='nuc'` or any other string raises a ValueError. + assert_raises(ValueError, norm, A, 'fro', 0) + assert_raises(ValueError, norm, A, 'nuc', 0) + assert_raises(ValueError, norm, [3, 4], 'fro', None) + assert_raises(ValueError, norm, [3, 4], 'nuc', None) + assert_raises(ValueError, norm, [3, 4], 'test', None) + + # Similarly, norm should raise an exception when ord is any finite + # number other than 1, 2, -1 or -2 when computing matrix norms. + for order in [0, 3]: + assert_raises(ValueError, norm, A, order, None) + assert_raises(ValueError, norm, A, order, (0, 1)) + assert_raises(ValueError, norm, B, order, (1, 2)) + + # Invalid axis + assert_raises(AxisError, norm, B, None, 3) + assert_raises(AxisError, norm, B, None, (2, 3)) + assert_raises(ValueError, norm, B, None, (0, 1, 2)) + + +class _TestNorm(_TestNorm2D, _TestNormGeneral): + pass + + +class TestNorm_NonSystematic: + + def test_longdouble_norm(self): + # Non-regression test: p-norm of longdouble would previously raise + # UnboundLocalError. + x = np.arange(10, dtype=np.longdouble) + old_assert_almost_equal(norm(x, ord=3), 12.65, decimal=2) + + def test_intmin(self): + # Non-regression test: p-norm of signed integer would previously do + # float cast and abs in the wrong order. + x = np.array([-2 ** 31], dtype=np.int32) + old_assert_almost_equal(norm(x, ord=3), 2 ** 31, decimal=5) + + def test_complex_high_ord(self): + # gh-4156 + d = np.empty((2,), dtype=np.clongdouble) + d[0] = 6 + 7j + d[1] = -6 + 7j + res = 11.615898132184 + old_assert_almost_equal(np.linalg.norm(d, ord=3), res, decimal=10) + d = d.astype(np.complex128) + old_assert_almost_equal(np.linalg.norm(d, ord=3), res, decimal=9) + d = d.astype(np.complex64) + old_assert_almost_equal(np.linalg.norm(d, ord=3), res, decimal=5) + + +# Separate definitions so we can use them for matrix tests. +class _TestNormDoubleBase(_TestNormBase): + dt = np.double + dec = 12 + + +class _TestNormSingleBase(_TestNormBase): + dt = np.float32 + dec = 6 + + +class _TestNormInt64Base(_TestNormBase): + dt = np.int64 + dec = 12 + + +class TestNormDouble(_TestNorm, _TestNormDoubleBase): + pass + + +class TestNormSingle(_TestNorm, _TestNormSingleBase): + pass + + +class TestNormInt64(_TestNorm, _TestNormInt64Base): + pass + + +class TestMatrixRank: + + def test_matrix_rank(self): + # Full rank matrix + assert_equal(4, matrix_rank(np.eye(4))) + # rank deficient matrix + I = np.eye(4) + I[-1, -1] = 0. + assert_equal(matrix_rank(I), 3) + # All zeros - zero rank + assert_equal(matrix_rank(np.zeros((4, 4))), 0) + # 1 dimension - rank 1 unless all 0 + assert_equal(matrix_rank([1, 0, 0, 0]), 1) + assert_equal(matrix_rank(np.zeros((4,))), 0) + # accepts array-like + assert_equal(matrix_rank([1]), 1) + # greater than 2 dimensions treated as stacked matrices + ms = np.array([I, np.eye(4), np.zeros((4, 4))]) + assert_equal(matrix_rank(ms), np.array([3, 4, 0])) + # works on scalar + assert_equal(matrix_rank(1), 1) + + with assert_raises_regex( + ValueError, "`tol` and `rtol` can\'t be both set." + ): + matrix_rank(I, tol=0.01, rtol=0.01) + + def test_symmetric_rank(self): + assert_equal(4, matrix_rank(np.eye(4), hermitian=True)) + assert_equal(1, matrix_rank(np.ones((4, 4)), hermitian=True)) + assert_equal(0, matrix_rank(np.zeros((4, 4)), hermitian=True)) + # rank deficient matrix + I = np.eye(4) + I[-1, -1] = 0. + assert_equal(3, matrix_rank(I, hermitian=True)) + # manually supplied tolerance + I[-1, -1] = 1e-8 + assert_equal(4, matrix_rank(I, hermitian=True, tol=0.99e-8)) + assert_equal(3, matrix_rank(I, hermitian=True, tol=1.01e-8)) + + +def test_reduced_rank(): + # Test matrices with reduced rank + rng = np.random.RandomState(20120714) + for i in range(100): + # Make a rank deficient matrix + X = rng.normal(size=(40, 10)) + X[:, 0] = X[:, 1] + X[:, 2] + # Assert that matrix_rank detected deficiency + assert_equal(matrix_rank(X), 9) + X[:, 3] = X[:, 4] + X[:, 5] + assert_equal(matrix_rank(X), 8) + + +class TestQR: + # Define the array class here, so run this on matrices elsewhere. + array = np.array + + def check_qr(self, a): + # This test expects the argument `a` to be an ndarray or + # a subclass of an ndarray of inexact type. + a_type = type(a) + a_dtype = a.dtype + m, n = a.shape + k = min(m, n) + + # mode == 'complete' + res = linalg.qr(a, mode='complete') + Q, R = res.Q, res.R + assert_(Q.dtype == a_dtype) + assert_(R.dtype == a_dtype) + assert_(isinstance(Q, a_type)) + assert_(isinstance(R, a_type)) + assert_(Q.shape == (m, m)) + assert_(R.shape == (m, n)) + assert_almost_equal(dot(Q, R), a) + assert_almost_equal(dot(Q.T.conj(), Q), np.eye(m)) + assert_almost_equal(np.triu(R), R) + + # mode == 'reduced' + q1, r1 = linalg.qr(a, mode='reduced') + assert_(q1.dtype == a_dtype) + assert_(r1.dtype == a_dtype) + assert_(isinstance(q1, a_type)) + assert_(isinstance(r1, a_type)) + assert_(q1.shape == (m, k)) + assert_(r1.shape == (k, n)) + assert_almost_equal(dot(q1, r1), a) + assert_almost_equal(dot(q1.T.conj(), q1), np.eye(k)) + assert_almost_equal(np.triu(r1), r1) + + # mode == 'r' + r2 = linalg.qr(a, mode='r') + assert_(r2.dtype == a_dtype) + assert_(isinstance(r2, a_type)) + assert_almost_equal(r2, r1) + + @pytest.mark.parametrize(["m", "n"], [ + (3, 0), + (0, 3), + (0, 0) + ]) + def test_qr_empty(self, m, n): + k = min(m, n) + a = np.empty((m, n)) + + self.check_qr(a) + + h, tau = np.linalg.qr(a, mode='raw') + assert_equal(h.dtype, np.double) + assert_equal(tau.dtype, np.double) + assert_equal(h.shape, (n, m)) + assert_equal(tau.shape, (k,)) + + def test_mode_raw(self): + # The factorization is not unique and varies between libraries, + # so it is not possible to check against known values. Functional + # testing is a possibility, but awaits the exposure of more + # of the functions in lapack_lite. Consequently, this test is + # very limited in scope. Note that the results are in FORTRAN + # order, hence the h arrays are transposed. + a = self.array([[1, 2], [3, 4], [5, 6]], dtype=np.double) + + # Test double + h, tau = linalg.qr(a, mode='raw') + assert_(h.dtype == np.double) + assert_(tau.dtype == np.double) + assert_(h.shape == (2, 3)) + assert_(tau.shape == (2,)) + + h, tau = linalg.qr(a.T, mode='raw') + assert_(h.dtype == np.double) + assert_(tau.dtype == np.double) + assert_(h.shape == (3, 2)) + assert_(tau.shape == (2,)) + + def test_mode_all_but_economic(self): + a = self.array([[1, 2], [3, 4]]) + b = self.array([[1, 2], [3, 4], [5, 6]]) + for dt in "fd": + m1 = a.astype(dt) + m2 = b.astype(dt) + self.check_qr(m1) + self.check_qr(m2) + self.check_qr(m2.T) + + for dt in "fd": + m1 = 1 + 1j * a.astype(dt) + m2 = 1 + 1j * b.astype(dt) + self.check_qr(m1) + self.check_qr(m2) + self.check_qr(m2.T) + + def check_qr_stacked(self, a): + # This test expects the argument `a` to be an ndarray or + # a subclass of an ndarray of inexact type. + a_type = type(a) + a_dtype = a.dtype + m, n = a.shape[-2:] + k = min(m, n) + + # mode == 'complete' + q, r = linalg.qr(a, mode='complete') + assert_(q.dtype == a_dtype) + assert_(r.dtype == a_dtype) + assert_(isinstance(q, a_type)) + assert_(isinstance(r, a_type)) + assert_(q.shape[-2:] == (m, m)) + assert_(r.shape[-2:] == (m, n)) + assert_almost_equal(matmul(q, r), a) + I_mat = np.identity(q.shape[-1]) + stack_I_mat = np.broadcast_to(I_mat, + q.shape[:-2] + (q.shape[-1],) * 2) + assert_almost_equal(matmul(swapaxes(q, -1, -2).conj(), q), stack_I_mat) + assert_almost_equal(np.triu(r[..., :, :]), r) + + # mode == 'reduced' + q1, r1 = linalg.qr(a, mode='reduced') + assert_(q1.dtype == a_dtype) + assert_(r1.dtype == a_dtype) + assert_(isinstance(q1, a_type)) + assert_(isinstance(r1, a_type)) + assert_(q1.shape[-2:] == (m, k)) + assert_(r1.shape[-2:] == (k, n)) + assert_almost_equal(matmul(q1, r1), a) + I_mat = np.identity(q1.shape[-1]) + stack_I_mat = np.broadcast_to(I_mat, + q1.shape[:-2] + (q1.shape[-1],) * 2) + assert_almost_equal(matmul(swapaxes(q1, -1, -2).conj(), q1), + stack_I_mat) + assert_almost_equal(np.triu(r1[..., :, :]), r1) + + # mode == 'r' + r2 = linalg.qr(a, mode='r') + assert_(r2.dtype == a_dtype) + assert_(isinstance(r2, a_type)) + assert_almost_equal(r2, r1) + + @pytest.mark.parametrize("size", [ + (3, 4), (4, 3), (4, 4), + (3, 0), (0, 3)]) + @pytest.mark.parametrize("outer_size", [ + (2, 2), (2,), (2, 3, 4)]) + @pytest.mark.parametrize("dt", [ + np.single, np.double, + np.csingle, np.cdouble]) + def test_stacked_inputs(self, outer_size, size, dt): + + rng = np.random.default_rng(123) + A = rng.normal(size=outer_size + size).astype(dt) + B = rng.normal(size=outer_size + size).astype(dt) + self.check_qr_stacked(A) + self.check_qr_stacked(A + 1.j * B) + + +class TestCholesky: + + @pytest.mark.parametrize( + 'shape', [(1, 1), (2, 2), (3, 3), (50, 50), (3, 10, 10)] + ) + @pytest.mark.parametrize( + 'dtype', (np.float32, np.float64, np.complex64, np.complex128) + ) + @pytest.mark.parametrize( + 'upper', [False, True]) + def test_basic_property(self, shape, dtype, upper): + np.random.seed(1) + a = np.random.randn(*shape) + if np.issubdtype(dtype, np.complexfloating): + a = a + 1j * np.random.randn(*shape) + + t = list(range(len(shape))) + t[-2:] = -1, -2 + + a = np.matmul(a.transpose(t).conj(), a) + a = np.asarray(a, dtype=dtype) + + c = np.linalg.cholesky(a, upper=upper) + + # Check A = L L^H or A = U^H U + if upper: + b = np.matmul(c.transpose(t).conj(), c) + else: + b = np.matmul(c, c.transpose(t).conj()) + + atol = 500 * a.shape[0] * np.finfo(dtype).eps + assert_allclose(b, a, atol=atol, err_msg=f'{shape} {dtype}\n{a}\n{c}') + + # Check diag(L or U) is real and positive + d = np.diagonal(c, axis1=-2, axis2=-1) + assert_(np.all(np.isreal(d))) + assert_(np.all(d >= 0)) + + def test_0_size(self): + class ArraySubclass(np.ndarray): + pass + a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass) + res = linalg.cholesky(a) + assert_equal(a.shape, res.shape) + assert_(res.dtype.type is np.float64) + # for documentation purpose: + assert_(isinstance(res, np.ndarray)) + + a = np.zeros((1, 0, 0), dtype=np.complex64).view(ArraySubclass) + res = linalg.cholesky(a) + assert_equal(a.shape, res.shape) + assert_(res.dtype.type is np.complex64) + assert_(isinstance(res, np.ndarray)) + + def test_upper_lower_arg(self): + # Explicit test of upper argument that also checks the default. + a = np.array([[1 + 0j, 0 - 2j], [0 + 2j, 5 + 0j]]) + + assert_equal(linalg.cholesky(a), linalg.cholesky(a, upper=False)) + + assert_equal( + linalg.cholesky(a, upper=True), + linalg.cholesky(a).T.conj() + ) + + +class TestOuter: + arr1 = np.arange(3) + arr2 = np.arange(3) + expected = np.array( + [[0, 0, 0], + [0, 1, 2], + [0, 2, 4]] + ) + + assert_array_equal(np.linalg.outer(arr1, arr2), expected) + + with assert_raises_regex( + ValueError, "Input arrays must be one-dimensional" + ): + np.linalg.outer(arr1[:, np.newaxis], arr2) + + +def test_byteorder_check(): + # Byte order check should pass for native order + if sys.byteorder == 'little': + native = '<' + else: + native = '>' + + for dtt in (np.float32, np.float64): + arr = np.eye(4, dtype=dtt) + n_arr = arr.view(arr.dtype.newbyteorder(native)) + sw_arr = arr.view(arr.dtype.newbyteorder("S")).byteswap() + assert_equal(arr.dtype.byteorder, '=') + for routine in (linalg.inv, linalg.det, linalg.pinv): + # Normal call + res = routine(arr) + # Native but not '=' + assert_array_equal(res, routine(n_arr)) + # Swapped + assert_array_equal(res, routine(sw_arr)) + + +@pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") +def test_generalized_raise_multiloop(): + # It should raise an error even if the error doesn't occur in the + # last iteration of the ufunc inner loop + + invertible = np.array([[1, 2], [3, 4]]) + non_invertible = np.array([[1, 1], [1, 1]]) + + x = np.zeros([4, 4, 2, 2])[1::2] + x[...] = invertible + x[0, 0] = non_invertible + + assert_raises(np.linalg.LinAlgError, np.linalg.inv, x) + + +@pytest.mark.skipif( + threading.active_count() > 1, + reason="skipping test that uses fork because there are multiple threads") +@pytest.mark.skipif( + NOGIL_BUILD, + reason="Cannot safely use fork in tests on the free-threaded build") +def test_xerbla_override(): + # Check that our xerbla has been successfully linked in. If it is not, + # the default xerbla routine is called, which prints a message to stdout + # and may, or may not, abort the process depending on the LAPACK package. + + XERBLA_OK = 255 + + try: + pid = os.fork() + except (OSError, AttributeError): + # fork failed, or not running on POSIX + pytest.skip("Not POSIX or fork failed.") + + if pid == 0: + # child; close i/o file handles + os.close(1) + os.close(0) + # Avoid producing core files. + import resource + resource.setrlimit(resource.RLIMIT_CORE, (0, 0)) + # These calls may abort. + try: + np.linalg.lapack_lite.xerbla() + except ValueError: + pass + except Exception: + os._exit(os.EX_CONFIG) + + try: + a = np.array([[1.]]) + np.linalg.lapack_lite.dorgqr( + 1, 1, 1, a, + 0, # <- invalid value + a, a, 0, 0) + except ValueError as e: + if "DORGQR parameter number 5" in str(e): + # success, reuse error code to mark success as + # FORTRAN STOP returns as success. + os._exit(XERBLA_OK) + + # Did not abort, but our xerbla was not linked in. + os._exit(os.EX_CONFIG) + else: + # parent + pid, status = os.wait() + if os.WEXITSTATUS(status) != XERBLA_OK: + pytest.skip('Numpy xerbla not linked in.') + + +@pytest.mark.skipif(IS_WASM, reason="Cannot start subprocess") +@pytest.mark.slow +def test_sdot_bug_8577(): + # Regression test that loading certain other libraries does not + # result to wrong results in float32 linear algebra. + # + # There's a bug gh-8577 on OSX that can trigger this, and perhaps + # there are also other situations in which it occurs. + # + # Do the check in a separate process. + + bad_libs = ['PyQt5.QtWidgets', 'IPython'] + + template = textwrap.dedent(""" + import sys + {before} + try: + import {bad_lib} + except ImportError: + sys.exit(0) + {after} + x = np.ones(2, dtype=np.float32) + sys.exit(0 if np.allclose(x.dot(x), 2.0) else 1) + """) + + for bad_lib in bad_libs: + code = template.format(before="import numpy as np", after="", + bad_lib=bad_lib) + subprocess.check_call([sys.executable, "-c", code]) + + # Swapped import order + code = template.format(after="import numpy as np", before="", + bad_lib=bad_lib) + subprocess.check_call([sys.executable, "-c", code]) + + +class TestMultiDot: + + def test_basic_function_with_three_arguments(self): + # multi_dot with three arguments uses a fast hand coded algorithm to + # determine the optimal order. Therefore test it separately. + A = np.random.random((6, 2)) + B = np.random.random((2, 6)) + C = np.random.random((6, 2)) + + assert_almost_equal(multi_dot([A, B, C]), A.dot(B).dot(C)) + assert_almost_equal(multi_dot([A, B, C]), np.dot(A, np.dot(B, C))) + + def test_basic_function_with_two_arguments(self): + # separate code path with two arguments + A = np.random.random((6, 2)) + B = np.random.random((2, 6)) + + assert_almost_equal(multi_dot([A, B]), A.dot(B)) + assert_almost_equal(multi_dot([A, B]), np.dot(A, B)) + + def test_basic_function_with_dynamic_programming_optimization(self): + # multi_dot with four or more arguments uses the dynamic programming + # optimization and therefore deserve a separate + A = np.random.random((6, 2)) + B = np.random.random((2, 6)) + C = np.random.random((6, 2)) + D = np.random.random((2, 1)) + assert_almost_equal(multi_dot([A, B, C, D]), A.dot(B).dot(C).dot(D)) + + def test_vector_as_first_argument(self): + # The first argument can be 1-D + A1d = np.random.random(2) # 1-D + B = np.random.random((2, 6)) + C = np.random.random((6, 2)) + D = np.random.random((2, 2)) + + # the result should be 1-D + assert_equal(multi_dot([A1d, B, C, D]).shape, (2,)) + + def test_vector_as_last_argument(self): + # The last argument can be 1-D + A = np.random.random((6, 2)) + B = np.random.random((2, 6)) + C = np.random.random((6, 2)) + D1d = np.random.random(2) # 1-D + + # the result should be 1-D + assert_equal(multi_dot([A, B, C, D1d]).shape, (6,)) + + def test_vector_as_first_and_last_argument(self): + # The first and last arguments can be 1-D + A1d = np.random.random(2) # 1-D + B = np.random.random((2, 6)) + C = np.random.random((6, 2)) + D1d = np.random.random(2) # 1-D + + # the result should be a scalar + assert_equal(multi_dot([A1d, B, C, D1d]).shape, ()) + + def test_three_arguments_and_out(self): + # multi_dot with three arguments uses a fast hand coded algorithm to + # determine the optimal order. Therefore test it separately. + A = np.random.random((6, 2)) + B = np.random.random((2, 6)) + C = np.random.random((6, 2)) + + out = np.zeros((6, 2)) + ret = multi_dot([A, B, C], out=out) + assert out is ret + assert_almost_equal(out, A.dot(B).dot(C)) + assert_almost_equal(out, np.dot(A, np.dot(B, C))) + + def test_two_arguments_and_out(self): + # separate code path with two arguments + A = np.random.random((6, 2)) + B = np.random.random((2, 6)) + out = np.zeros((6, 6)) + ret = multi_dot([A, B], out=out) + assert out is ret + assert_almost_equal(out, A.dot(B)) + assert_almost_equal(out, np.dot(A, B)) + + def test_dynamic_programming_optimization_and_out(self): + # multi_dot with four or more arguments uses the dynamic programming + # optimization and therefore deserve a separate test + A = np.random.random((6, 2)) + B = np.random.random((2, 6)) + C = np.random.random((6, 2)) + D = np.random.random((2, 1)) + out = np.zeros((6, 1)) + ret = multi_dot([A, B, C, D], out=out) + assert out is ret + assert_almost_equal(out, A.dot(B).dot(C).dot(D)) + + def test_dynamic_programming_logic(self): + # Test for the dynamic programming part + # This test is directly taken from Cormen page 376. + arrays = [np.random.random((30, 35)), + np.random.random((35, 15)), + np.random.random((15, 5)), + np.random.random((5, 10)), + np.random.random((10, 20)), + np.random.random((20, 25))] + m_expected = np.array([[0., 15750., 7875., 9375., 11875., 15125.], + [0., 0., 2625., 4375., 7125., 10500.], + [0., 0., 0., 750., 2500., 5375.], + [0., 0., 0., 0., 1000., 3500.], + [0., 0., 0., 0., 0., 5000.], + [0., 0., 0., 0., 0., 0.]]) + s_expected = np.array([[0, 1, 1, 3, 3, 3], + [0, 0, 2, 3, 3, 3], + [0, 0, 0, 3, 3, 3], + [0, 0, 0, 0, 4, 5], + [0, 0, 0, 0, 0, 5], + [0, 0, 0, 0, 0, 0]], dtype=int) + s_expected -= 1 # Cormen uses 1-based index, python does not. + + s, m = _multi_dot_matrix_chain_order(arrays, return_costs=True) + + # Only the upper triangular part (without the diagonal) is interesting. + assert_almost_equal(np.triu(s[:-1, 1:]), + np.triu(s_expected[:-1, 1:])) + assert_almost_equal(np.triu(m), np.triu(m_expected)) + + def test_too_few_input_arrays(self): + assert_raises(ValueError, multi_dot, []) + assert_raises(ValueError, multi_dot, [np.random.random((3, 3))]) + + +class TestTensorinv: + + @pytest.mark.parametrize("arr, ind", [ + (np.ones((4, 6, 8, 2)), 2), + (np.ones((3, 3, 2)), 1), + ]) + def test_non_square_handling(self, arr, ind): + with assert_raises(LinAlgError): + linalg.tensorinv(arr, ind=ind) + + @pytest.mark.parametrize("shape, ind", [ + # examples from docstring + ((4, 6, 8, 3), 2), + ((24, 8, 3), 1), + ]) + def test_tensorinv_shape(self, shape, ind): + a = np.eye(24).reshape(shape) + ainv = linalg.tensorinv(a=a, ind=ind) + expected = a.shape[ind:] + a.shape[:ind] + actual = ainv.shape + assert_equal(actual, expected) + + @pytest.mark.parametrize("ind", [ + 0, -2, + ]) + def test_tensorinv_ind_limit(self, ind): + a = np.eye(24).reshape((4, 6, 8, 3)) + with assert_raises(ValueError): + linalg.tensorinv(a=a, ind=ind) + + def test_tensorinv_result(self): + # mimic a docstring example + a = np.eye(24).reshape((24, 8, 3)) + ainv = linalg.tensorinv(a, ind=1) + b = np.ones(24) + assert_allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b)) + + +class TestTensorsolve: + + @pytest.mark.parametrize("a, axes", [ + (np.ones((4, 6, 8, 2)), None), + (np.ones((3, 3, 2)), (0, 2)), + ]) + def test_non_square_handling(self, a, axes): + with assert_raises(LinAlgError): + b = np.ones(a.shape[:2]) + linalg.tensorsolve(a, b, axes=axes) + + @pytest.mark.parametrize("shape", + [(2, 3, 6), (3, 4, 4, 3), (0, 3, 3, 0)], + ) + def test_tensorsolve_result(self, shape): + a = np.random.randn(*shape) + b = np.ones(a.shape[:2]) + x = np.linalg.tensorsolve(a, b) + assert_allclose(np.tensordot(a, x, axes=len(x.shape)), b) + + +def test_unsupported_commontype(): + # linalg gracefully handles unsupported type + arr = np.array([[1, -2], [2, 5]], dtype='float16') + with assert_raises_regex(TypeError, "unsupported in linalg"): + linalg.cholesky(arr) + + +#@pytest.mark.slow +#@pytest.mark.xfail(not HAS_LAPACK64, run=False, +# reason="Numpy not compiled with 64-bit BLAS/LAPACK") +#@requires_memory(free_bytes=16e9) +@pytest.mark.skip(reason="Bad memory reports lead to OOM in ci testing") +def test_blas64_dot(): + n = 2**32 + a = np.zeros([1, n], dtype=np.float32) + b = np.ones([1, 1], dtype=np.float32) + a[0, -1] = 1 + c = np.dot(b, a) + assert_equal(c[0, -1], 1) + + +@pytest.mark.xfail(not HAS_LAPACK64, + reason="Numpy not compiled with 64-bit BLAS/LAPACK") +def test_blas64_geqrf_lwork_smoketest(): + # Smoke test LAPACK geqrf lwork call with 64-bit integers + dtype = np.float64 + lapack_routine = np.linalg.lapack_lite.dgeqrf + + m = 2**32 + 1 + n = 2**32 + 1 + lda = m + + # Dummy arrays, not referenced by the lapack routine, so don't + # need to be of the right size + a = np.zeros([1, 1], dtype=dtype) + work = np.zeros([1], dtype=dtype) + tau = np.zeros([1], dtype=dtype) + + # Size query + results = lapack_routine(m, n, a, lda, tau, work, -1, 0) + assert_equal(results['info'], 0) + assert_equal(results['m'], m) + assert_equal(results['n'], m) + + # Should result to an integer of a reasonable size + lwork = int(work.item()) + assert_(2**32 < lwork < 2**42) + + +def test_diagonal(): + # Here we only test if selected axes are compatible + # with Array API (last two). Core implementation + # of `diagonal` is tested in `test_multiarray.py`. + x = np.arange(60).reshape((3, 4, 5)) + actual = np.linalg.diagonal(x) + expected = np.array( + [ + [0, 6, 12, 18], + [20, 26, 32, 38], + [40, 46, 52, 58], + ] + ) + assert_equal(actual, expected) + + +def test_trace(): + # Here we only test if selected axes are compatible + # with Array API (last two). Core implementation + # of `trace` is tested in `test_multiarray.py`. + x = np.arange(60).reshape((3, 4, 5)) + actual = np.linalg.trace(x) + expected = np.array([36, 116, 196]) + + assert_equal(actual, expected) + + +def test_cross(): + x = np.arange(9).reshape((3, 3)) + actual = np.linalg.cross(x, x + 1) + expected = np.array([ + [-1, 2, -1], + [-1, 2, -1], + [-1, 2, -1], + ]) + + assert_equal(actual, expected) + + # We test that lists are converted to arrays. + u = [1, 2, 3] + v = [4, 5, 6] + actual = np.linalg.cross(u, v) + expected = array([-3, 6, -3]) + + assert_equal(actual, expected) + + with assert_raises_regex( + ValueError, + r"input arrays must be \(arrays of\) 3-dimensional vectors" + ): + x_2dim = x[:, 1:] + np.linalg.cross(x_2dim, x_2dim) + + +def test_tensordot(): + # np.linalg.tensordot is just an alias for np.tensordot + x = np.arange(6).reshape((2, 3)) + + assert np.linalg.tensordot(x, x) == 55 + assert np.linalg.tensordot(x, x, axes=[(0, 1), (0, 1)]) == 55 + + +def test_matmul(): + # np.linalg.matmul and np.matmul only differs in the number + # of arguments in the signature + x = np.arange(6).reshape((2, 3)) + actual = np.linalg.matmul(x, x.T) + expected = np.array([[5, 14], [14, 50]]) + + assert_equal(actual, expected) + + +def test_matrix_transpose(): + x = np.arange(6).reshape((2, 3)) + actual = np.linalg.matrix_transpose(x) + expected = x.T + + assert_equal(actual, expected) + + with assert_raises_regex( + ValueError, "array must be at least 2-dimensional" + ): + np.linalg.matrix_transpose(x[:, 0]) + + +def test_matrix_norm(): + x = np.arange(9).reshape((3, 3)) + actual = np.linalg.matrix_norm(x) + + assert_almost_equal(actual, np.float64(14.2828), double_decimal=3) + + actual = np.linalg.matrix_norm(x, keepdims=True) + + assert_almost_equal(actual, np.array([[14.2828]]), double_decimal=3) + + +def test_matrix_norm_empty(): + for shape in [(0, 2), (2, 0), (0, 0)]: + for dtype in [np.float64, np.float32, np.int32]: + x = np.zeros(shape, dtype) + assert_equal(np.linalg.matrix_norm(x, ord="fro"), 0) + assert_equal(np.linalg.matrix_norm(x, ord="nuc"), 0) + assert_equal(np.linalg.matrix_norm(x, ord=1), 0) + assert_equal(np.linalg.matrix_norm(x, ord=2), 0) + assert_equal(np.linalg.matrix_norm(x, ord=np.inf), 0) + +def test_vector_norm(): + x = np.arange(9).reshape((3, 3)) + actual = np.linalg.vector_norm(x) + + assert_almost_equal(actual, np.float64(14.2828), double_decimal=3) + + actual = np.linalg.vector_norm(x, axis=0) + + assert_almost_equal( + actual, np.array([6.7082, 8.124, 9.6436]), double_decimal=3 + ) + + actual = np.linalg.vector_norm(x, keepdims=True) + expected = np.full((1, 1), 14.2828, dtype='float64') + assert_equal(actual.shape, expected.shape) + assert_almost_equal(actual, expected, double_decimal=3) + + +def test_vector_norm_empty(): + for dtype in [np.float64, np.float32, np.int32]: + x = np.zeros(0, dtype) + assert_equal(np.linalg.vector_norm(x, ord=1), 0) + assert_equal(np.linalg.vector_norm(x, ord=2), 0) + assert_equal(np.linalg.vector_norm(x, ord=np.inf), 0) diff --git a/local_packages/numpy/linalg/tests/test_regression.py b/local_packages/numpy/linalg/tests/test_regression.py new file mode 100644 index 0000000..12d10bc --- /dev/null +++ b/local_packages/numpy/linalg/tests/test_regression.py @@ -0,0 +1,190 @@ +""" Test functions for linalg module +""" + +import pytest + +import numpy as np +from numpy import arange, array, dot, float64, linalg, transpose +from numpy.testing import ( + assert_, + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, + assert_array_less, + assert_equal, + assert_raises, +) + + +class TestRegression: + + def test_eig_build(self): + # Ticket #652 + rva = array([1.03221168e+02 + 0.j, + -1.91843603e+01 + 0.j, + -6.04004526e-01 + 15.84422474j, + -6.04004526e-01 - 15.84422474j, + -1.13692929e+01 + 0.j, + -6.57612485e-01 + 10.41755503j, + -6.57612485e-01 - 10.41755503j, + 1.82126812e+01 + 0.j, + 1.06011014e+01 + 0.j, + 7.80732773e+00 + 0.j, + -7.65390898e-01 + 0.j, + 1.51971555e-15 + 0.j, + -1.51308713e-15 + 0.j]) + a = arange(13 * 13, dtype=float64) + a = a.reshape((13, 13)) + a = a % 17 + va, ve = linalg.eig(a) + va.sort() + rva.sort() + assert_array_almost_equal(va, rva) + + def test_eigh_build(self): + # Ticket 662. + rvals = [68.60568999, 89.57756725, 106.67185574] + + cov = array([[77.70273908, 3.51489954, 15.64602427], + [ 3.51489954, 88.97013878, -1.07431931], + [15.64602427, -1.07431931, 98.18223512]]) + + vals, vecs = linalg.eigh(cov) + assert_array_almost_equal(vals, rvals) + + def test_svd_build(self): + # Ticket 627. + a = array([[0., 1.], [1., 1.], [2., 1.], [3., 1.]]) + m, n = a.shape + u, s, vh = linalg.svd(a) + + b = dot(transpose(u[:, n:]), a) + + assert_array_almost_equal(b, np.zeros((2, 2))) + + def test_norm_vector_badarg(self): + # Regression for #786: Frobenius norm for vectors raises + # ValueError. + assert_raises(ValueError, linalg.norm, array([1., 2., 3.]), 'fro') + + def test_lapack_endian(self): + # For bug #1482 + a = array([[ 5.7998084, -2.1825367], + [-2.1825367, 9.85910595]], dtype='>f8') + b = array(a, dtype=' 0.5) + assert_equal(c, 1) + assert_equal(np.linalg.matrix_rank(a), 1) + assert_array_less(1, np.linalg.norm(a, ord=2)) + + w_svdvals = linalg.svdvals(a) + assert_array_almost_equal(w, w_svdvals) + + def test_norm_object_array(self): + # gh-7575 + testvector = np.array([np.array([0, 1]), 0, 0], dtype=object) + + norm = linalg.norm(testvector) + assert_array_equal(norm, [0, 1]) + assert_(norm.dtype == np.dtype('float64')) + + norm = linalg.norm(testvector, ord=1) + assert_array_equal(norm, [0, 1]) + assert_(norm.dtype != np.dtype('float64')) + + norm = linalg.norm(testvector, ord=2) + assert_array_equal(norm, [0, 1]) + assert_(norm.dtype == np.dtype('float64')) + + assert_raises(ValueError, linalg.norm, testvector, ord='fro') + assert_raises(ValueError, linalg.norm, testvector, ord='nuc') + assert_raises(ValueError, linalg.norm, testvector, ord=np.inf) + assert_raises(ValueError, linalg.norm, testvector, ord=-np.inf) + assert_raises(ValueError, linalg.norm, testvector, ord=0) + assert_raises(ValueError, linalg.norm, testvector, ord=-1) + assert_raises(ValueError, linalg.norm, testvector, ord=-2) + + testmatrix = np.array([[np.array([0, 1]), 0, 0], + [0, 0, 0]], dtype=object) + + norm = linalg.norm(testmatrix) + assert_array_equal(norm, [0, 1]) + assert_(norm.dtype == np.dtype('float64')) + + norm = linalg.norm(testmatrix, ord='fro') + assert_array_equal(norm, [0, 1]) + assert_(norm.dtype == np.dtype('float64')) + + assert_raises(TypeError, linalg.norm, testmatrix, ord='nuc') + assert_raises(ValueError, linalg.norm, testmatrix, ord=np.inf) + assert_raises(ValueError, linalg.norm, testmatrix, ord=-np.inf) + assert_raises(ValueError, linalg.norm, testmatrix, ord=0) + assert_raises(ValueError, linalg.norm, testmatrix, ord=1) + assert_raises(ValueError, linalg.norm, testmatrix, ord=-1) + assert_raises(TypeError, linalg.norm, testmatrix, ord=2) + assert_raises(TypeError, linalg.norm, testmatrix, ord=-2) + assert_raises(ValueError, linalg.norm, testmatrix, ord=3) + + def test_lstsq_complex_larger_rhs(self): + # gh-9891 + size = 20 + n_rhs = 70 + G = np.random.randn(size, size) + 1j * np.random.randn(size, size) + u = np.random.randn(size, n_rhs) + 1j * np.random.randn(size, n_rhs) + b = G.dot(u) + # This should work without segmentation fault. + u_lstsq, res, rank, sv = linalg.lstsq(G, b, rcond=None) + # check results just in case + assert_array_almost_equal(u_lstsq, u) + + @pytest.mark.parametrize("upper", [True, False]) + def test_cholesky_empty_array(self, upper): + # gh-25840 - upper=True hung before. + res = np.linalg.cholesky(np.zeros((0, 0)), upper=upper) + assert res.size == 0 + + @pytest.mark.parametrize("rtol", [0.0, [0.0] * 4, np.zeros((4,))]) + def test_matrix_rank_rtol_argument(self, rtol): + # gh-25877 + x = np.zeros((4, 3, 2)) + res = np.linalg.matrix_rank(x, rtol=rtol) + assert res.shape == (4,) + + @pytest.mark.thread_unsafe(reason="test is already testing threads with openblas") + def test_openblas_threading(self): + # gh-27036 + # Test whether matrix multiplication involving a large matrix always + # gives the same (correct) answer + x = np.arange(500000, dtype=np.float64) + src = np.vstack((x, -10 * x)).T + matrix = np.array([[0, 1], [1, 0]]) + expected = np.vstack((-10 * x, x)).T # src @ matrix + for i in range(200): + result = src @ matrix + mismatches = (~np.isclose(result, expected)).sum() + if mismatches != 0: + assert False, ("unexpected result from matmul, " + "probably due to OpenBLAS threading issues") + + def test_norm_linux_arm(self): + # gh-30816 + a = np.arange(20000) / 50000 + b = a + 1j * np.roll(np.flip(a), 12345) + norm = np.linalg.norm(b) + assert_almost_equal(norm, 46.18628948075393) diff --git a/local_packages/numpy/ma/API_CHANGES.txt b/local_packages/numpy/ma/API_CHANGES.txt new file mode 100644 index 0000000..a3d792a --- /dev/null +++ b/local_packages/numpy/ma/API_CHANGES.txt @@ -0,0 +1,135 @@ +.. -*- rest -*- + +================================================== +API changes in the new masked array implementation +================================================== + +Masked arrays are subclasses of ndarray +--------------------------------------- + +Contrary to the original implementation, masked arrays are now regular +ndarrays:: + + >>> x = masked_array([1,2,3],mask=[0,0,1]) + >>> print isinstance(x, numpy.ndarray) + True + + +``_data`` returns a view of the masked array +-------------------------------------------- + +Masked arrays are composed of a ``_data`` part and a ``_mask``. Accessing the +``_data`` part will return a regular ndarray or any of its subclass, depending +on the initial data:: + + >>> x = masked_array(numpy.matrix([[1,2],[3,4]]),mask=[[0,0],[0,1]]) + >>> print x._data + [[1 2] + [3 4]] + >>> print type(x._data) + + + +In practice, ``_data`` is implemented as a property, not as an attribute. +Therefore, you cannot access it directly, and some simple tests such as the +following one will fail:: + + >>>x._data is x._data + False + + +``filled(x)`` can return a subclass of ndarray +---------------------------------------------- +The function ``filled(a)`` returns an array of the same type as ``a._data``:: + + >>> x = masked_array(numpy.matrix([[1,2],[3,4]]),mask=[[0,0],[0,1]]) + >>> y = filled(x) + >>> print type(y) + + >>> print y + matrix([[ 1, 2], + [ 3, 999999]]) + + +``put``, ``putmask`` behave like their ndarray counterparts +----------------------------------------------------------- + +Previously, ``putmask`` was used like this:: + + mask = [False,True,True] + x = array([1,4,7],mask=mask) + putmask(x,mask,[3]) + +which translated to:: + + x[~mask] = [3] + +(Note that a ``True``-value in a mask suppresses a value.) + +In other words, the mask had the same length as ``x``, whereas +``values`` had ``sum(~mask)`` elements. + +Now, the behaviour is similar to that of ``ndarray.putmask``, where +the mask and the values are both the same length as ``x``, i.e. + +:: + + putmask(x,mask,[3,0,0]) + + +``fill_value`` is a property +---------------------------- + +``fill_value`` is no longer a method, but a property:: + + >>> print x.fill_value + 999999 + +``cumsum`` and ``cumprod`` ignore missing values +------------------------------------------------ + +Missing values are assumed to be the identity element, i.e. 0 for +``cumsum`` and 1 for ``cumprod``:: + + >>> x = N.ma.array([1,2,3,4],mask=[False,True,False,False]) + >>> print x + [1 -- 3 4] + >>> print x.cumsum() + [1 -- 4 8] + >> print x.cumprod() + [1 -- 3 12] + +``bool(x)`` raises a ValueError +------------------------------- + +Masked arrays now behave like regular ``ndarrays``, in that they cannot be +converted to booleans: + +:: + + >>> x = N.ma.array([1,2,3]) + >>> bool(x) + Traceback (most recent call last): + File "", line 1, in + ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all() + + +================================== +New features (non exhaustive list) +================================== + +``mr_`` +------- + +``mr_`` mimics the behavior of ``r_`` for masked arrays:: + + >>> np.ma.mr_[3,4,5] + masked_array(data = [3 4 5], + mask = False, + fill_value=999999) + + +``anom`` +-------- + +The ``anom`` method returns the deviations from the average (anomalies). diff --git a/local_packages/numpy/ma/LICENSE b/local_packages/numpy/ma/LICENSE new file mode 100644 index 0000000..b41aae0 --- /dev/null +++ b/local_packages/numpy/ma/LICENSE @@ -0,0 +1,24 @@ +* Copyright (c) 2006, University of Georgia and Pierre G.F. Gerard-Marchant +* All rights reserved. +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions are met: +* +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above copyright +* notice, this list of conditions and the following disclaimer in the +* documentation and/or other materials provided with the distribution. +* * Neither the name of the University of Georgia nor the +* names of its contributors may be used to endorse or promote products +* derived from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY +* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +* DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/local_packages/numpy/ma/README.rst b/local_packages/numpy/ma/README.rst new file mode 100644 index 0000000..cd10103 --- /dev/null +++ b/local_packages/numpy/ma/README.rst @@ -0,0 +1,236 @@ +================================== +A guide to masked arrays in NumPy +================================== + +.. Contents:: + +See http://www.scipy.org/scipy/numpy/wiki/MaskedArray (dead link) +for updates of this document. + + +History +------- + +As a regular user of MaskedArray, I (Pierre G.F. Gerard-Marchant) became +increasingly frustrated with the subclassing of masked arrays (even if +I can only blame my inexperience). I needed to develop a class of arrays +that could store some additional information along with numerical values, +while keeping the possibility for missing data (picture storing a series +of dates along with measurements, what would later become the `TimeSeries +Scikit `__ +(dead link). + +I started to implement such a class, but then quickly realized that +any additional information disappeared when processing these subarrays +(for example, adding a constant value to a subarray would erase its +dates). I ended up writing the equivalent of *numpy.core.ma* for my +particular class, ufuncs included. Everything went fine until I needed to +subclass my new class, when more problems showed up: some attributes of +the new subclass were lost during processing. I identified the culprit as +MaskedArray, which returns masked ndarrays when I expected masked +arrays of my class. I was preparing myself to rewrite *numpy.core.ma* +when I forced myself to learn how to subclass ndarrays. As I became more +familiar with the *__new__* and *__array_finalize__* methods, +I started to wonder why masked arrays were objects, and not ndarrays, +and whether it wouldn't be more convenient for subclassing if they did +behave like regular ndarrays. + +The new *maskedarray* is what I eventually come up with. The +main differences with the initial *numpy.core.ma* package are +that MaskedArray is now a subclass of *ndarray* and that the +*_data* section can now be any subclass of *ndarray*. Apart from a +couple of issues listed below, the behavior of the new MaskedArray +class reproduces the old one. Initially the *maskedarray* +implementation was marginally slower than *numpy.ma* in some areas, +but work is underway to speed it up; the expectation is that it can be +made substantially faster than the present *numpy.ma*. + + +Note that if the subclass has some special methods and +attributes, they are not propagated to the masked version: +this would require a modification of the *__getattribute__* +method (first trying *ndarray.__getattribute__*, then trying +*self._data.__getattribute__* if an exception is raised in the first +place), which really slows things down. + +Main differences +---------------- + + * The *_data* part of the masked array can be any subclass of ndarray (but not recarray, cf below). + * *fill_value* is now a property, not a function. + * in the majority of cases, the mask is forced to *nomask* when no value is actually masked. A notable exception is when a masked array (with no masked values) has just been unpickled. + * I got rid of the *share_mask* flag, I never understood its purpose. + * *put*, *putmask* and *take* now mimic the ndarray methods, to avoid unpleasant surprises. Moreover, *put* and *putmask* both update the mask when needed. * if *a* is a masked array, *bool(a)* raises a *ValueError*, as it does with ndarrays. + * in the same way, the comparison of two masked arrays is a masked array, not a boolean + * *filled(a)* returns an array of the same subclass as *a._data*, and no test is performed on whether it is contiguous or not. + * the mask is always printed, even if it's *nomask*, which makes things easy (for me at least) to remember that a masked array is used. + * *cumsum* works as if the *_data* array was filled with 0. The mask is preserved, but not updated. + * *cumprod* works as if the *_data* array was filled with 1. The mask is preserved, but not updated. + +New features +------------ + +This list is non-exhaustive... + + * the *mr_* function mimics *r_* for masked arrays. + * the *anom* method returns the anomalies (deviations from the average) + +Using the new package with numpy.core.ma +---------------------------------------- + +I tried to make sure that the new package can understand old masked +arrays. Unfortunately, there's no upward compatibility. + +For example: + +>>> import numpy.core.ma as old_ma +>>> import maskedarray as new_ma +>>> x = old_ma.array([1,2,3,4,5], mask=[0,0,1,0,0]) +>>> x +array(data = + [ 1 2 999999 4 5], + mask = + [False False True False False], + fill_value=999999) +>>> y = new_ma.array([1,2,3,4,5], mask=[0,0,1,0,0]) +>>> y +array(data = [1 2 -- 4 5], + mask = [False False True False False], + fill_value=999999) +>>> x==y +array(data = + [True True True True True], + mask = + [False False True False False], + fill_value=?) +>>> old_ma.getmask(x) == new_ma.getmask(x) +array([True, True, True, True, True]) +>>> old_ma.getmask(y) == new_ma.getmask(y) +array([True, True, False, True, True]) +>>> old_ma.getmask(y) +False + + +Using maskedarray with matplotlib +--------------------------------- + +Starting with matplotlib 0.91.2, the masked array importing will work with +the maskedarray branch) as well as with earlier versions. + +By default matplotlib still uses numpy.ma, but there is an rcParams setting +that you can use to select maskedarray instead. In the matplotlibrc file +you will find:: + + #maskedarray : False # True to use external maskedarray module + # instead of numpy.ma; this is a temporary # + setting for testing maskedarray. + + +Uncomment and set to True to select maskedarray everywhere. +Alternatively, you can test a script with maskedarray by using a +command-line option, e.g.:: + + python simple_plot.py --maskedarray + + +Masked records +-------------- + +Like *numpy.ma.core*, the *ndarray*-based implementation +of MaskedArray is limited when working with records: you can +mask any record of the array, but not a field in a record. If you +need this feature, you may want to give the *mrecords* package +a try (available in the *maskedarray* directory in the scipy +sandbox). This module defines a new class, *MaskedRecord*. An +instance of this class accepts a *recarray* as data, and uses two +masks: the *fieldmask* has as many entries as records in the array, +each entry with the same fields as a record, but of boolean types: +they indicate whether the field is masked or not; a record entry +is flagged as masked in the *mask* array if all the fields are +masked. A few examples in the file should give you an idea of what +can be done. Note that *mrecords* is still experimental... + +Optimizing maskedarray +---------------------- + +Should masked arrays be filled before processing or not? +-------------------------------------------------------- + +In the current implementation, most operations on masked arrays involve +the following steps: + + * the input arrays are filled + * the operation is performed on the filled arrays + * the mask is set for the results, from the combination of the input masks and the mask corresponding to the domain of the operation. + +For example, consider the division of two masked arrays:: + + import numpy + import maskedarray as ma + x = ma.array([1,2,3,4],mask=[1,0,0,0], dtype=numpy.float64) + y = ma.array([-1,0,1,2], mask=[0,0,0,1], dtype=numpy.float64) + +The division of x by y is then computed as:: + + d1 = x.filled(0) # d1 = array([0., 2., 3., 4.]) + d2 = y.filled(1) # array([-1., 0., 1., 1.]) + m = ma.mask_or(ma.getmask(x), ma.getmask(y)) # m = + array([True,False,False,True]) + dm = ma.divide.domain(d1,d2) # array([False, True, False, False]) + result = (d1/d2).view(MaskedArray) # masked_array([-0. inf, 3., 4.]) + result._mask = logical_or(m, dm) + +Note that a division by zero takes place. To avoid it, we can consider +to fill the input arrays, taking the domain mask into account, so that:: + + d1 = x._data.copy() # d1 = array([1., 2., 3., 4.]) + d2 = y._data.copy() # array([-1., 0., 1., 2.]) + dm = ma.divide.domain(d1,d2) # array([False, True, False, False]) + numpy.putmask(d2, dm, 1) # d2 = array([-1., 1., 1., 2.]) + m = ma.mask_or(ma.getmask(x), ma.getmask(y)) # m = + array([True,False,False,True]) + result = (d1/d2).view(MaskedArray) # masked_array([-1. 0., 3., 2.]) + result._mask = logical_or(m, dm) + +Note that the *.copy()* is required to avoid updating the inputs with +*putmask*. The *.filled()* method also involves a *.copy()*. + +A third possibility consists in avoid filling the arrays:: + + d1 = x._data # d1 = array([1., 2., 3., 4.]) + d2 = y._data # array([-1., 0., 1., 2.]) + dm = ma.divide.domain(d1,d2) # array([False, True, False, False]) + m = ma.mask_or(ma.getmask(x), ma.getmask(y)) # m = + array([True,False,False,True]) + result = (d1/d2).view(MaskedArray) # masked_array([-1. inf, 3., 2.]) + result._mask = logical_or(m, dm) + +Note that here again the division by zero takes place. + +A quick benchmark gives the following results: + + * *numpy.ma.divide* : 2.69 ms per loop + * classical division : 2.21 ms per loop + * division w/ prefilling : 2.34 ms per loop + * division w/o filling : 1.55 ms per loop + +So, is it worth filling the arrays beforehand ? Yes, if we are interested +in avoiding floating-point exceptions that may fill the result with infs +and nans. No, if we are only interested into speed... + + +Thanks +------ + +I'd like to thank Paul Dubois, Travis Oliphant and Sasha for the +original masked array package: without you, I would never have started +that (it might be argued that I shouldn't have anyway, but that's +another story...). I also wish to extend these thanks to Reggie Dugard +and Eric Firing for their suggestions and numerous improvements. + + +Revision notes +-------------- + + * 08/25/2007 : Creation of this page + * 01/23/2007 : The package has been moved to the SciPy sandbox, and is regularly updated: please check out your SVN version! diff --git a/local_packages/numpy/ma/__init__.py b/local_packages/numpy/ma/__init__.py new file mode 100644 index 0000000..e2a742e --- /dev/null +++ b/local_packages/numpy/ma/__init__.py @@ -0,0 +1,53 @@ +""" +============= +Masked Arrays +============= + +Arrays sometimes contain invalid or missing data. When doing operations +on such arrays, we wish to suppress invalid values, which is the purpose masked +arrays fulfill (an example of typical use is given below). + +For example, examine the following array: + +>>> x = np.array([2, 1, 3, np.nan, 5, 2, 3, np.nan]) + +When we try to calculate the mean of the data, the result is undetermined: + +>>> np.mean(x) +nan + +The mean is calculated using roughly ``np.sum(x)/len(x)``, but since +any number added to ``NaN`` [1]_ produces ``NaN``, this doesn't work. Enter +masked arrays: + +>>> m = np.ma.masked_array(x, np.isnan(x)) +>>> m +masked_array(data=[2.0, 1.0, 3.0, --, 5.0, 2.0, 3.0, --], + mask=[False, False, False, True, False, False, False, True], + fill_value=1e+20) + +Here, we construct a masked array that suppress all ``NaN`` values. We +may now proceed to calculate the mean of the other values: + +>>> np.mean(m) +2.6666666666666665 + +.. [1] Not-a-Number, a floating point value that is the result of an + invalid operation. + +.. moduleauthor:: Pierre Gerard-Marchant +.. moduleauthor:: Jarrod Millman + +""" +from . import core, extras +from .core import * +from .extras import * + +__all__ = ['core', 'extras'] +__all__ += core.__all__ +__all__ += extras.__all__ + +from numpy._pytesttester import PytestTester + +test = PytestTester(__name__) +del PytestTester diff --git a/local_packages/numpy/ma/__init__.pyi b/local_packages/numpy/ma/__init__.pyi new file mode 100644 index 0000000..176e929 --- /dev/null +++ b/local_packages/numpy/ma/__init__.pyi @@ -0,0 +1,458 @@ +from . import core, extras +from .core import ( + MAError, + MaskedArray, + MaskError, + MaskType, + abs, + absolute, + add, + all, + allclose, + allequal, + alltrue, + amax, + amin, + angle, + anom, + anomalies, + any, + append, + arange, + arccos, + arccosh, + arcsin, + arcsinh, + arctan, + arctan2, + arctanh, + argmax, + argmin, + argsort, + around, + array, + asanyarray, + asarray, + bitwise_and, + bitwise_or, + bitwise_xor, + bool_, + ceil, + choose, + clip, + common_fill_value, + compress, + compressed, + concatenate, + conjugate, + convolve, + copy, + correlate, + cos, + cosh, + count, + cumprod, + cumsum, + default_fill_value, + diag, + diagonal, + diff, + divide, + empty, + empty_like, + equal, + exp, + expand_dims, + fabs, + filled, + fix_invalid, + flatten_mask, + flatten_structured_array, + floor, + floor_divide, + fmod, + frombuffer, + fromflex, + fromfunction, + getdata, + getmask, + getmaskarray, + greater, + greater_equal, + harden_mask, + hypot, + identity, + ids, + indices, + inner, + innerproduct, + is_mask, + is_masked, + isarray, + isMA, + isMaskedArray, + left_shift, + less, + less_equal, + log, + log2, + log10, + logical_and, + logical_not, + logical_or, + logical_xor, + make_mask, + make_mask_descr, + make_mask_none, + mask_or, + masked, + masked_array, + masked_equal, + masked_greater, + masked_greater_equal, + masked_inside, + masked_invalid, + masked_less, + masked_less_equal, + masked_not_equal, + masked_object, + masked_outside, + masked_print_option, + masked_singleton, + masked_values, + masked_where, + max, + maximum, + maximum_fill_value, + mean, + min, + minimum, + minimum_fill_value, + mod, + multiply, + mvoid, + ndim, + negative, + nomask, + nonzero, + not_equal, + ones, + ones_like, + outer, + outerproduct, + power, + prod, + product, + ptp, + put, + putmask, + ravel, + remainder, + repeat, + reshape, + resize, + right_shift, + round, + round_, + set_fill_value, + shape, + sin, + sinh, + size, + soften_mask, + sometrue, + sort, + sqrt, + squeeze, + std, + subtract, + sum, + swapaxes, + take, + tan, + tanh, + trace, + transpose, + true_divide, + var, + where, + zeros, + zeros_like, +) +from .extras import ( + apply_along_axis, + apply_over_axes, + atleast_1d, + atleast_2d, + atleast_3d, + average, + clump_masked, + clump_unmasked, + column_stack, + compress_cols, + compress_nd, + compress_rowcols, + compress_rows, + corrcoef, + count_masked, + cov, + diagflat, + dot, + dstack, + ediff1d, + flatnotmasked_contiguous, + flatnotmasked_edges, + hsplit, + hstack, + in1d, + intersect1d, + isin, + mask_cols, + mask_rowcols, + mask_rows, + masked_all, + masked_all_like, + median, + mr_, + ndenumerate, + notmasked_contiguous, + notmasked_edges, + polyfit, + row_stack, + setdiff1d, + setxor1d, + stack, + union1d, + unique, + vander, + vstack, +) + +__all__ = [ + "core", + "extras", + "MAError", + "MaskError", + "MaskType", + "MaskedArray", + "abs", + "absolute", + "add", + "all", + "allclose", + "allequal", + "alltrue", + "amax", + "amin", + "angle", + "anom", + "anomalies", + "any", + "append", + "arange", + "arccos", + "arccosh", + "arcsin", + "arcsinh", + "arctan", + "arctan2", + "arctanh", + "argmax", + "argmin", + "argsort", + "around", + "array", + "asanyarray", + "asarray", + "bitwise_and", + "bitwise_or", + "bitwise_xor", + "bool_", + "ceil", + "choose", + "clip", + "common_fill_value", + "compress", + "compressed", + "concatenate", + "conjugate", + "convolve", + "copy", + "correlate", + "cos", + "cosh", + "count", + "cumprod", + "cumsum", + "default_fill_value", + "diag", + "diagonal", + "diff", + "divide", + "empty", + "empty_like", + "equal", + "exp", + "expand_dims", + "fabs", + "filled", + "fix_invalid", + "flatten_mask", + "flatten_structured_array", + "floor", + "floor_divide", + "fmod", + "frombuffer", + "fromflex", + "fromfunction", + "getdata", + "getmask", + "getmaskarray", + "greater", + "greater_equal", + "harden_mask", + "hypot", + "identity", + "ids", + "indices", + "inner", + "innerproduct", + "isMA", + "isMaskedArray", + "is_mask", + "is_masked", + "isarray", + "left_shift", + "less", + "less_equal", + "log", + "log10", + "log2", + "logical_and", + "logical_not", + "logical_or", + "logical_xor", + "make_mask", + "make_mask_descr", + "make_mask_none", + "mask_or", + "masked", + "masked_array", + "masked_equal", + "masked_greater", + "masked_greater_equal", + "masked_inside", + "masked_invalid", + "masked_less", + "masked_less_equal", + "masked_not_equal", + "masked_object", + "masked_outside", + "masked_print_option", + "masked_singleton", + "masked_values", + "masked_where", + "max", + "maximum", + "maximum_fill_value", + "mean", + "min", + "minimum", + "minimum_fill_value", + "mod", + "multiply", + "mvoid", + "ndim", + "negative", + "nomask", + "nonzero", + "not_equal", + "ones", + "ones_like", + "outer", + "outerproduct", + "power", + "prod", + "product", + "ptp", + "put", + "putmask", + "ravel", + "remainder", + "repeat", + "reshape", + "resize", + "right_shift", + "round", + "round_", + "set_fill_value", + "shape", + "sin", + "sinh", + "size", + "soften_mask", + "sometrue", + "sort", + "sqrt", + "squeeze", + "std", + "subtract", + "sum", + "swapaxes", + "take", + "tan", + "tanh", + "trace", + "transpose", + "true_divide", + "var", + "where", + "zeros", + "zeros_like", + "apply_along_axis", + "apply_over_axes", + "atleast_1d", + "atleast_2d", + "atleast_3d", + "average", + "clump_masked", + "clump_unmasked", + "column_stack", + "compress_cols", + "compress_nd", + "compress_rowcols", + "compress_rows", + "count_masked", + "corrcoef", + "cov", + "diagflat", + "dot", + "dstack", + "ediff1d", + "flatnotmasked_contiguous", + "flatnotmasked_edges", + "hsplit", + "hstack", + "isin", + "in1d", + "intersect1d", + "mask_cols", + "mask_rowcols", + "mask_rows", + "masked_all", + "masked_all_like", + "median", + "mr_", + "ndenumerate", + "notmasked_contiguous", + "notmasked_edges", + "polyfit", + "row_stack", + "setdiff1d", + "setxor1d", + "stack", + "unique", + "union1d", + "vander", + "vstack", +] diff --git a/local_packages/numpy/ma/core.py b/local_packages/numpy/ma/core.py new file mode 100644 index 0000000..3b8c52e --- /dev/null +++ b/local_packages/numpy/ma/core.py @@ -0,0 +1,8929 @@ +""" +numpy.ma : a package to handle missing or invalid values. + +This package was initially written for numarray by Paul F. Dubois +at Lawrence Livermore National Laboratory. +In 2006, the package was completely rewritten by Pierre Gerard-Marchant +(University of Georgia) to make the MaskedArray class a subclass of ndarray, +and to improve support of structured arrays. + + +Copyright 1999, 2000, 2001 Regents of the University of California. +Released for unlimited redistribution. + +* Adapted for numpy_core 2005 by Travis Oliphant and (mainly) Paul Dubois. +* Subclassing of the base `ndarray` 2006 by Pierre Gerard-Marchant + (pgmdevlist_AT_gmail_DOT_com) +* Improvements suggested by Reggie Dugard (reggie_AT_merfinllc_DOT_com) + +.. moduleauthor:: Pierre Gerard-Marchant + +""" +import builtins +import functools +import inspect +import operator +import re +import textwrap +import warnings + +import numpy as np +import numpy._core.numerictypes as ntypes +import numpy._core.umath as umath +from numpy import ( + _NoValue, + amax, + amin, + angle, + array as narray, # noqa: F401 + bool_, + expand_dims, + finfo, # noqa: F401 + iinfo, # noqa: F401 + iscomplexobj, + ndarray, +) +from numpy._core import multiarray as mu +from numpy._core.numeric import normalize_axis_tuple +from numpy._utils import set_module + +__all__ = [ + 'MAError', 'MaskError', 'MaskType', 'MaskedArray', 'abs', 'absolute', + 'add', 'all', 'allclose', 'allequal', 'alltrue', 'amax', 'amin', + 'angle', 'anom', 'anomalies', 'any', 'append', 'arange', 'arccos', + 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctan2', 'arctanh', + 'argmax', 'argmin', 'argsort', 'around', 'array', 'asanyarray', + 'asarray', 'bitwise_and', 'bitwise_or', 'bitwise_xor', 'bool_', 'ceil', + 'choose', 'clip', 'common_fill_value', 'compress', 'compressed', + 'concatenate', 'conjugate', 'convolve', 'copy', 'correlate', 'cos', 'cosh', + 'count', 'cumprod', 'cumsum', 'default_fill_value', 'diag', 'diagonal', + 'diff', 'divide', 'empty', 'empty_like', 'equal', 'exp', + 'expand_dims', 'fabs', 'filled', 'fix_invalid', 'flatten_mask', + 'flatten_structured_array', 'floor', 'floor_divide', 'fmod', + 'frombuffer', 'fromflex', 'fromfunction', 'getdata', 'getmask', + 'getmaskarray', 'greater', 'greater_equal', 'harden_mask', 'hypot', + 'identity', 'ids', 'indices', 'inner', 'innerproduct', 'isMA', + 'isMaskedArray', 'is_mask', 'is_masked', 'isarray', 'left_shift', + 'less', 'less_equal', 'log', 'log10', 'log2', + 'logical_and', 'logical_not', 'logical_or', 'logical_xor', 'make_mask', + 'make_mask_descr', 'make_mask_none', 'mask_or', 'masked', + 'masked_array', 'masked_equal', 'masked_greater', + 'masked_greater_equal', 'masked_inside', 'masked_invalid', + 'masked_less', 'masked_less_equal', 'masked_not_equal', + 'masked_object', 'masked_outside', 'masked_print_option', + 'masked_singleton', 'masked_values', 'masked_where', 'max', 'maximum', + 'maximum_fill_value', 'mean', 'min', 'minimum', 'minimum_fill_value', + 'mod', 'multiply', 'mvoid', 'ndim', 'negative', 'nomask', 'nonzero', + 'not_equal', 'ones', 'ones_like', 'outer', 'outerproduct', 'power', 'prod', + 'product', 'ptp', 'put', 'putmask', 'ravel', 'remainder', + 'repeat', 'reshape', 'resize', 'right_shift', 'round', 'round_', + 'set_fill_value', 'shape', 'sin', 'sinh', 'size', 'soften_mask', + 'sometrue', 'sort', 'sqrt', 'squeeze', 'std', 'subtract', 'sum', + 'swapaxes', 'take', 'tan', 'tanh', 'trace', 'transpose', 'true_divide', + 'var', 'where', 'zeros', 'zeros_like', + ] + +MaskType = np.bool +nomask = MaskType(0) + +class MaskedArrayFutureWarning(FutureWarning): + pass + +def _deprecate_argsort_axis(arr): + """ + Adjust the axis passed to argsort, warning if necessary + + Parameters + ---------- + arr + The array which argsort was called on + + np.ma.argsort has a long-term bug where the default of the axis argument + is wrong (gh-8701), which now must be kept for backwards compatibility. + Thankfully, this only makes a difference when arrays are 2- or more- + dimensional, so we only need a warning then. + """ + if arr.ndim <= 1: + # no warning needed - but switch to -1 anyway, to avoid surprising + # subclasses, which are more likely to implement scalar axes. + return -1 + else: + # 2017-04-11, Numpy 1.13.0, gh-8701: warn on axis default + warnings.warn( + "In the future the default for argsort will be axis=-1, not the " + "current None, to match its documentation and np.argsort. " + "Explicitly pass -1 or None to silence this warning.", + MaskedArrayFutureWarning, stacklevel=3) + return None + + +def doc_note(initialdoc, note): + """ + Adds a Notes section to an existing docstring. + + """ + if initialdoc is None: + return + if note is None: + return initialdoc + + notesplit = re.split(r'\n\s*?Notes\n\s*?-----', inspect.cleandoc(initialdoc)) + notedoc = f"\n\nNotes\n-----\n{inspect.cleandoc(note)}\n" + + return ''.join(notesplit[:1] + [notedoc] + notesplit[1:]) + + +############################################################################### +# Exceptions # +############################################################################### + + +class MAError(Exception): + """ + Class for masked array related errors. + + """ + pass + + +class MaskError(MAError): + """ + Class for mask related errors. + + """ + pass + + +############################################################################### +# Filling options # +############################################################################### + + +# b: boolean - c: complex - f: floats - i: integer - O: object - S: string +default_filler = {'b': True, + 'c': 1.e20 + 0.0j, + 'f': 1.e20, + 'i': 999999, + 'O': '?', + 'S': b'N/A', + 'u': 999999, + 'V': b'???', + 'U': 'N/A', + 'T': 'N/A' + } + +# Add datetime64 and timedelta64 types +for v in ["Y", "M", "W", "D", "h", "m", "s", "ms", "us", "ns", "ps", + "fs", "as"]: + default_filler["M8[" + v + "]"] = np.datetime64("NaT", v) + default_filler["m8[" + v + "]"] = np.timedelta64("NaT", v) + +float_types_list = [np.half, np.single, np.double, np.longdouble, + np.csingle, np.cdouble, np.clongdouble] + +_minvals: dict[type, int] = {} +_maxvals: dict[type, int] = {} + +for sctype in ntypes.sctypeDict.values(): + scalar_dtype = np.dtype(sctype) + + if scalar_dtype.kind in "Mm": + info = np.iinfo(np.int64) + min_val, max_val = info.min + 1, info.max + elif np.issubdtype(scalar_dtype, np.integer): + info = np.iinfo(sctype) + min_val, max_val = info.min, info.max + elif np.issubdtype(scalar_dtype, np.floating): + info = np.finfo(sctype) + min_val, max_val = info.min, info.max + elif scalar_dtype.kind == "b": + min_val, max_val = 0, 1 + else: + min_val, max_val = None, None + + _minvals[sctype] = min_val + _maxvals[sctype] = max_val + +max_filler = _minvals +max_filler.update([(k, -np.inf) for k in float_types_list[:4]]) +max_filler.update([(k, complex(-np.inf, -np.inf)) for k in float_types_list[-3:]]) + +min_filler = _maxvals +min_filler.update([(k, +np.inf) for k in float_types_list[:4]]) +min_filler.update([(k, complex(+np.inf, +np.inf)) for k in float_types_list[-3:]]) + +del float_types_list + +def _recursive_fill_value(dtype, f): + """ + Recursively produce a fill value for `dtype`, calling f on scalar dtypes + """ + if dtype.names is not None: + # We wrap into `array` here, which ensures we use NumPy cast rules + # for integer casts, this allows the use of 99999 as a fill value + # for int8. + # TODO: This is probably a mess, but should best preserve behavior? + vals = tuple( + np.array(_recursive_fill_value(dtype[name], f)) + for name in dtype.names) + return np.array(vals, dtype=dtype)[()] # decay to void scalar from 0d + elif dtype.subdtype: + subtype, shape = dtype.subdtype + subval = _recursive_fill_value(subtype, f) + return np.full(shape, subval) + else: + return f(dtype) + + +def _get_dtype_of(obj): + """ Convert the argument for *_fill_value into a dtype """ + if isinstance(obj, np.dtype): + return obj + elif hasattr(obj, 'dtype'): + return obj.dtype + else: + return np.asanyarray(obj).dtype + + +def default_fill_value(obj): + """ + Return the default fill value for the argument object. + + The default filling value depends on the datatype of the input + array or the type of the input scalar: + + =========== ======== + datatype default + =========== ======== + bool True + int 999999 + float 1.e20 + complex 1.e20+0j + object '?' + string 'N/A' + StringDType 'N/A' + =========== ======== + + For structured types, a structured scalar is returned, with each field the + default fill value for its type. + + For subarray types, the fill value is an array of the same size containing + the default scalar fill value. + + Parameters + ---------- + obj : ndarray, dtype or scalar + The array data-type or scalar for which the default fill value + is returned. + + Returns + ------- + fill_value : scalar + The default fill value. + + Examples + -------- + >>> import numpy as np + >>> np.ma.default_fill_value(1) + 999999 + >>> np.ma.default_fill_value(np.array([1.1, 2., np.pi])) + 1e+20 + >>> np.ma.default_fill_value(np.dtype(complex)) + (1e+20+0j) + + """ + def _scalar_fill_value(dtype): + if dtype.kind in 'Mm': + return default_filler.get(dtype.str[1:], '?') + else: + return default_filler.get(dtype.kind, '?') + + dtype = _get_dtype_of(obj) + return _recursive_fill_value(dtype, _scalar_fill_value) + + +def _extremum_fill_value(obj, extremum, extremum_name): + + def _scalar_fill_value(dtype): + try: + return extremum[dtype.type] + except KeyError as e: + raise TypeError( + f"Unsuitable type {dtype} for calculating {extremum_name}." + ) from None + + dtype = _get_dtype_of(obj) + return _recursive_fill_value(dtype, _scalar_fill_value) + + +def minimum_fill_value(obj): + """ + Return the maximum value that can be represented by the dtype of an object. + + This function is useful for calculating a fill value suitable for + taking the minimum of an array with a given dtype. + + Parameters + ---------- + obj : ndarray, dtype or scalar + An object that can be queried for it's numeric type. + + Returns + ------- + val : scalar + The maximum representable value. + + Raises + ------ + TypeError + If `obj` isn't a suitable numeric type. + + See Also + -------- + maximum_fill_value : The inverse function. + set_fill_value : Set the filling value of a masked array. + MaskedArray.fill_value : Return current fill value. + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> a = np.int8() + >>> ma.minimum_fill_value(a) + 127 + >>> a = np.int32() + >>> ma.minimum_fill_value(a) + 2147483647 + + An array of numeric data can also be passed. + + >>> a = np.array([1, 2, 3], dtype=np.int8) + >>> ma.minimum_fill_value(a) + 127 + >>> a = np.array([1, 2, 3], dtype=np.float32) + >>> ma.minimum_fill_value(a) + inf + + """ + return _extremum_fill_value(obj, min_filler, "minimum") + + +def maximum_fill_value(obj): + """ + Return the minimum value that can be represented by the dtype of an object. + + This function is useful for calculating a fill value suitable for + taking the maximum of an array with a given dtype. + + Parameters + ---------- + obj : ndarray, dtype or scalar + An object that can be queried for it's numeric type. + + Returns + ------- + val : scalar + The minimum representable value. + + Raises + ------ + TypeError + If `obj` isn't a suitable numeric type. + + See Also + -------- + minimum_fill_value : The inverse function. + set_fill_value : Set the filling value of a masked array. + MaskedArray.fill_value : Return current fill value. + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> a = np.int8() + >>> ma.maximum_fill_value(a) + -128 + >>> a = np.int32() + >>> ma.maximum_fill_value(a) + -2147483648 + + An array of numeric data can also be passed. + + >>> a = np.array([1, 2, 3], dtype=np.int8) + >>> ma.maximum_fill_value(a) + -128 + >>> a = np.array([1, 2, 3], dtype=np.float32) + >>> ma.maximum_fill_value(a) + -inf + + """ + return _extremum_fill_value(obj, max_filler, "maximum") + + +def _recursive_set_fill_value(fillvalue, dt): + """ + Create a fill value for a structured dtype. + + Parameters + ---------- + fillvalue : scalar or array_like + Scalar or array representing the fill value. If it is of shorter + length than the number of fields in dt, it will be resized. + dt : dtype + The structured dtype for which to create the fill value. + + Returns + ------- + val : tuple + A tuple of values corresponding to the structured fill value. + + """ + fillvalue = np.resize(fillvalue, len(dt.names)) + output_value = [] + for (fval, name) in zip(fillvalue, dt.names): + cdtype = dt[name] + if cdtype.subdtype: + cdtype = cdtype.subdtype[0] + + if cdtype.names is not None: + output_value.append(tuple(_recursive_set_fill_value(fval, cdtype))) + else: + output_value.append(np.array(fval, dtype=cdtype).item()) + return tuple(output_value) + + +def _check_fill_value(fill_value, ndtype): + """ + Private function validating the given `fill_value` for the given dtype. + + If fill_value is None, it is set to the default corresponding to the dtype. + + If fill_value is not None, its value is forced to the given dtype. + + The result is always a 0d array. + + """ + ndtype = np.dtype(ndtype) + if fill_value is None: + fill_value = default_fill_value(ndtype) + # TODO: It seems better to always store a valid fill_value, the oddity + # about is that `_fill_value = None` would behave even more + # different then. + # (e.g. this allows arr_uint8.astype(int64) to have the default + # fill value again...) + # The one thing that changed in 2.0/2.1 around cast safety is that the + # default `int(99...)` is not a same-kind cast anymore, so if we + # have a uint, use the default uint. + if ndtype.kind == "u": + fill_value = np.uint(fill_value) + elif ndtype.names is not None: + if isinstance(fill_value, (ndarray, np.void)): + try: + fill_value = np.asarray(fill_value, dtype=ndtype) + except ValueError as e: + err_msg = "Unable to transform %s to dtype %s" + raise ValueError(err_msg % (fill_value, ndtype)) from e + else: + fill_value = np.asarray(fill_value, dtype=object) + fill_value = np.array(_recursive_set_fill_value(fill_value, ndtype), + dtype=ndtype) + elif isinstance(fill_value, str) and (ndtype.char not in 'OSTVU'): + # Note this check doesn't work if fill_value is not a scalar + err_msg = "Cannot set fill value of string with array of dtype %s" + raise TypeError(err_msg % ndtype) + else: + # In case we want to convert 1e20 to int. + # Also in case of converting string arrays. + try: + fill_value = np.asarray(fill_value, dtype=ndtype) + except (OverflowError, ValueError) as e: + # Raise TypeError instead of OverflowError or ValueError. + # OverflowError is seldom used, and the real problem here is + # that the passed fill_value is not compatible with the ndtype. + err_msg = "Cannot convert fill_value %s to dtype %s" + raise TypeError(err_msg % (fill_value, ndtype)) from e + return np.array(fill_value) + + +def set_fill_value(a, fill_value): + """ + Set the filling value of a, if a is a masked array. + + This function changes the fill value of the masked array `a` in place. + If `a` is not a masked array, the function returns silently, without + doing anything. + + Parameters + ---------- + a : array_like + Input array. + fill_value : dtype + Filling value. A consistency test is performed to make sure + the value is compatible with the dtype of `a`. + + Returns + ------- + None + Nothing returned by this function. + + See Also + -------- + maximum_fill_value : Return the default fill value for a dtype. + MaskedArray.fill_value : Return current fill value. + MaskedArray.set_fill_value : Equivalent method. + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> a = np.arange(5) + >>> a + array([0, 1, 2, 3, 4]) + >>> a = ma.masked_where(a < 3, a) + >>> a + masked_array(data=[--, --, --, 3, 4], + mask=[ True, True, True, False, False], + fill_value=999999) + >>> ma.set_fill_value(a, -999) + >>> a + masked_array(data=[--, --, --, 3, 4], + mask=[ True, True, True, False, False], + fill_value=-999) + + Nothing happens if `a` is not a masked array. + + >>> a = list(range(5)) + >>> a + [0, 1, 2, 3, 4] + >>> ma.set_fill_value(a, 100) + >>> a + [0, 1, 2, 3, 4] + >>> a = np.arange(5) + >>> a + array([0, 1, 2, 3, 4]) + >>> ma.set_fill_value(a, 100) + >>> a + array([0, 1, 2, 3, 4]) + + """ + if isinstance(a, MaskedArray): + a.set_fill_value(fill_value) + + +def get_fill_value(a): + """ + Return the filling value of a, if any. Otherwise, returns the + default filling value for that type. + + """ + if isinstance(a, MaskedArray): + result = a.fill_value + else: + result = default_fill_value(a) + return result + + +def common_fill_value(a, b): + """ + Return the common filling value of two masked arrays, if any. + + If ``a.fill_value == b.fill_value``, return the fill value, + otherwise return None. + + Parameters + ---------- + a, b : MaskedArray + The masked arrays for which to compare fill values. + + Returns + ------- + fill_value : scalar or None + The common fill value, or None. + + Examples + -------- + >>> import numpy as np + >>> x = np.ma.array([0, 1.], fill_value=3) + >>> y = np.ma.array([0, 1.], fill_value=3) + >>> np.ma.common_fill_value(x, y) + 3.0 + + """ + t1 = get_fill_value(a) + t2 = get_fill_value(b) + if t1 == t2: + return t1 + return None + + +def filled(a, fill_value=None): + """ + Return input as an `~numpy.ndarray`, with masked values replaced by + `fill_value`. + + If `a` is not a `MaskedArray`, `a` itself is returned. + If `a` is a `MaskedArray` with no masked values, then ``a.data`` is + returned. + If `a` is a `MaskedArray` and `fill_value` is None, `fill_value` is set to + ``a.fill_value``. + + Parameters + ---------- + a : MaskedArray or array_like + An input object. + fill_value : array_like, optional. + Can be scalar or non-scalar. If non-scalar, the + resulting filled array should be broadcastable + over input array. Default is None. + + Returns + ------- + a : ndarray + The filled array. + + See Also + -------- + compressed + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> x = ma.array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0], + ... [1, 0, 0], + ... [0, 0, 0]]) + >>> x.filled() + array([[999999, 1, 2], + [999999, 4, 5], + [ 6, 7, 8]]) + >>> x.filled(fill_value=333) + array([[333, 1, 2], + [333, 4, 5], + [ 6, 7, 8]]) + >>> x.filled(fill_value=np.arange(3)) + array([[0, 1, 2], + [0, 4, 5], + [6, 7, 8]]) + + """ + if hasattr(a, 'filled'): + return a.filled(fill_value) + + elif isinstance(a, ndarray): + # Should we check for contiguity ? and a.flags['CONTIGUOUS']: + return a + elif isinstance(a, dict): + return np.array(a, 'O') + else: + return np.array(a) + + +def get_masked_subclass(*arrays): + """ + Return the youngest subclass of MaskedArray from a list of (masked) arrays. + + In case of siblings, the first listed takes over. + + """ + if len(arrays) == 1: + arr = arrays[0] + if isinstance(arr, MaskedArray): + rcls = type(arr) + else: + rcls = MaskedArray + else: + arrcls = [type(a) for a in arrays] + rcls = arrcls[0] + if not issubclass(rcls, MaskedArray): + rcls = MaskedArray + for cls in arrcls[1:]: + if issubclass(cls, rcls): + rcls = cls + # Don't return MaskedConstant as result: revert to MaskedArray + if rcls.__name__ == 'MaskedConstant': + return MaskedArray + return rcls + + +def getdata(a, subok=True): + """ + Return the data of a masked array as an ndarray. + + Return the data of `a` (if any) as an ndarray if `a` is a ``MaskedArray``, + else return `a` as a ndarray or subclass (depending on `subok`) if not. + + Parameters + ---------- + a : array_like + Input ``MaskedArray``, alternatively a ndarray or a subclass thereof. + subok : bool + Whether to force the output to be a `pure` ndarray (False) or to + return a subclass of ndarray if appropriate (True, default). + + See Also + -------- + getmask : Return the mask of a masked array, or nomask. + getmaskarray : Return the mask of a masked array, or full array of False. + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> a = ma.masked_equal([[1,2],[3,4]], 2) + >>> a + masked_array( + data=[[1, --], + [3, 4]], + mask=[[False, True], + [False, False]], + fill_value=2) + >>> ma.getdata(a) + array([[1, 2], + [3, 4]]) + + Equivalently use the ``MaskedArray`` `data` attribute. + + >>> a.data + array([[1, 2], + [3, 4]]) + + """ + try: + data = a._data + except AttributeError: + data = np.array(a, copy=None, subok=subok) + if not subok: + return data.view(ndarray) + return data + + +get_data = getdata + + +def fix_invalid(a, mask=nomask, copy=True, fill_value=None): + """ + Return input with invalid data masked and replaced by a fill value. + + Invalid data means values of `nan`, `inf`, etc. + + Parameters + ---------- + a : array_like + Input array, a (subclass of) ndarray. + mask : sequence, optional + Mask. Must be convertible to an array of booleans with the same + shape as `data`. True indicates a masked (i.e. invalid) data. + copy : bool, optional + Whether to use a copy of `a` (True) or to fix `a` in place (False). + Default is True. + fill_value : scalar, optional + Value used for fixing invalid data. Default is None, in which case + the ``a.fill_value`` is used. + + Returns + ------- + b : MaskedArray + The input array with invalid entries fixed. + + Notes + ----- + A copy is performed by default. + + Examples + -------- + >>> import numpy as np + >>> x = np.ma.array([1., -1, np.nan, np.inf], mask=[1] + [0]*3) + >>> x + masked_array(data=[--, -1.0, nan, inf], + mask=[ True, False, False, False], + fill_value=1e+20) + >>> np.ma.fix_invalid(x) + masked_array(data=[--, -1.0, --, --], + mask=[ True, False, True, True], + fill_value=1e+20) + + >>> fixed = np.ma.fix_invalid(x) + >>> fixed.data + array([ 1.e+00, -1.e+00, 1.e+20, 1.e+20]) + >>> x.data + array([ 1., -1., nan, inf]) + + """ + a = masked_array(a, copy=copy, mask=mask, subok=True) + invalid = np.logical_not(np.isfinite(a._data)) + if not invalid.any(): + return a + a._mask |= invalid + if fill_value is None: + fill_value = a.fill_value + a._data[invalid] = fill_value + return a + +def is_string_or_list_of_strings(val): + return (isinstance(val, str) or + (isinstance(val, list) and val and + builtins.all(isinstance(s, str) for s in val))) + +############################################################################### +# Ufuncs # +############################################################################### + + +ufunc_domain = {} +ufunc_fills = {} + + +class _DomainCheckInterval: + """ + Define a valid interval, so that : + + ``domain_check_interval(a,b)(x) == True`` where + ``x < a`` or ``x > b``. + + """ + + def __init__(self, a, b): + "domain_check_interval(a,b)(x) = true where x < a or y > b" + if a > b: + (a, b) = (b, a) + self.a = a + self.b = b + + def __call__(self, x): + "Execute the call behavior." + # nans at masked positions cause RuntimeWarnings, even though + # they are masked. To avoid this we suppress warnings. + with np.errstate(invalid='ignore'): + return umath.logical_or(umath.greater(x, self.b), + umath.less(x, self.a)) + + +class _DomainTan: + """ + Define a valid interval for the `tan` function, so that: + + ``domain_tan(eps) = True`` where ``abs(cos(x)) < eps`` + + """ + + def __init__(self, eps): + "domain_tan(eps) = true where abs(cos(x)) < eps)" + self.eps = eps + + def __call__(self, x): + "Executes the call behavior." + with np.errstate(invalid='ignore'): + return umath.less(umath.absolute(umath.cos(x)), self.eps) + + +class _DomainSafeDivide: + """ + Define a domain for safe division. + + """ + + def __init__(self, tolerance=None): + self.tolerance = tolerance + + def __call__(self, a, b): + # Delay the selection of the tolerance to here in order to reduce numpy + # import times. The calculation of these parameters is a substantial + # component of numpy's import time. + if self.tolerance is None: + self.tolerance = np.finfo(float).tiny + # don't call ma ufuncs from __array_wrap__ which would fail for scalars + a, b = np.asarray(a), np.asarray(b) + with np.errstate(all='ignore'): + return umath.absolute(a) * self.tolerance >= umath.absolute(b) + + +class _DomainGreater: + """ + DomainGreater(v)(x) is True where x <= v. + + """ + + def __init__(self, critical_value): + "DomainGreater(v)(x) = true where x <= v" + self.critical_value = critical_value + + def __call__(self, x): + "Executes the call behavior." + with np.errstate(invalid='ignore'): + return umath.less_equal(x, self.critical_value) + + +class _DomainGreaterEqual: + """ + DomainGreaterEqual(v)(x) is True where x < v. + + """ + + def __init__(self, critical_value): + "DomainGreaterEqual(v)(x) = true where x < v" + self.critical_value = critical_value + + def __call__(self, x): + "Executes the call behavior." + with np.errstate(invalid='ignore'): + return umath.less(x, self.critical_value) + + +class _MaskedUFunc: + def __init__(self, ufunc): + self.f = ufunc + self.__doc__ = ufunc.__doc__ + self.__name__ = ufunc.__name__ + self.__qualname__ = ufunc.__qualname__ + + def __str__(self): + return f"Masked version of {self.f}" + + +class _MaskedUnaryOperation(_MaskedUFunc): + """ + Defines masked version of unary operations, where invalid values are + pre-masked. + + Parameters + ---------- + mufunc : callable + The function for which to define a masked version. Made available + as ``_MaskedUnaryOperation.f``. + fill : scalar, optional + Filling value, default is 0. + domain : class instance + Domain for the function. Should be one of the ``_Domain*`` + classes. Default is None. + + """ + + def __init__(self, mufunc, fill=0, domain=None): + super().__init__(mufunc) + self.fill = fill + self.domain = domain + ufunc_domain[mufunc] = domain + ufunc_fills[mufunc] = fill + + def __call__(self, a, *args, **kwargs): + """ + Execute the call behavior. + + """ + d = getdata(a) + # Deal with domain + if self.domain is not None: + # Case 1.1. : Domained function + # nans at masked positions cause RuntimeWarnings, even though + # they are masked. To avoid this we suppress warnings. + with np.errstate(divide='ignore', invalid='ignore'): + result = self.f(d, *args, **kwargs) + # Make a mask + m = ~umath.isfinite(result) + m |= self.domain(d) + m |= getmask(a) + else: + # Case 1.2. : Function without a domain + # Get the result and the mask + with np.errstate(divide='ignore', invalid='ignore'): + result = self.f(d, *args, **kwargs) + m = getmask(a) + + if not result.ndim: + # Case 2.1. : The result is scalarscalar + if m: + return masked + return result + + if m is not nomask: + # Case 2.2. The result is an array + # We need to fill the invalid data back w/ the input Now, + # that's plain silly: in C, we would just skip the element and + # keep the original, but we do have to do it that way in Python + + # In case result has a lower dtype than the inputs (as in + # equal) + try: + np.copyto(result, d, where=m) + except TypeError: + pass + # Transform to + masked_result = result.view(get_masked_subclass(a)) + masked_result._mask = m + masked_result._update_from(a) + return masked_result + + +class _MaskedBinaryOperation(_MaskedUFunc): + """ + Define masked version of binary operations, where invalid + values are pre-masked. + + Parameters + ---------- + mbfunc : function + The function for which to define a masked version. Made available + as ``_MaskedBinaryOperation.f``. + domain : class instance + Default domain for the function. Should be one of the ``_Domain*`` + classes. Default is None. + fillx : scalar, optional + Filling value for the first argument, default is 0. + filly : scalar, optional + Filling value for the second argument, default is 0. + + """ + + def __init__(self, mbfunc, fillx=0, filly=0): + """ + abfunc(fillx, filly) must be defined. + + abfunc(x, filly) = x for all x to enable reduce. + + """ + super().__init__(mbfunc) + self.fillx = fillx + self.filly = filly + ufunc_domain[mbfunc] = None + ufunc_fills[mbfunc] = (fillx, filly) + + def __call__(self, a, b, *args, **kwargs): + """ + Execute the call behavior. + + """ + # Get the data, as ndarray + (da, db) = (getdata(a), getdata(b)) + # Get the result + with np.errstate(): + np.seterr(divide='ignore', invalid='ignore') + result = self.f(da, db, *args, **kwargs) + # Get the mask for the result + (ma, mb) = (getmask(a), getmask(b)) + if ma is nomask: + if mb is nomask: + m = nomask + else: + m = umath.logical_or(getmaskarray(a), mb) + elif mb is nomask: + m = umath.logical_or(ma, getmaskarray(b)) + else: + m = umath.logical_or(ma, mb) + + # Case 1. : scalar + if not result.ndim: + if m: + return masked + return result + + # Case 2. : array + # Revert result to da where masked + if m is not nomask and m.any(): + # any errors, just abort; impossible to guarantee masked values + try: + np.copyto(result, da, casting='unsafe', where=m) + except Exception: + pass + + # Transforms to a (subclass of) MaskedArray + masked_result = result.view(get_masked_subclass(a, b)) + masked_result._mask = m + if isinstance(a, MaskedArray): + masked_result._update_from(a) + elif isinstance(b, MaskedArray): + masked_result._update_from(b) + return masked_result + + def reduce(self, target, axis=0, dtype=None): + """ + Reduce `target` along the given `axis`. + + """ + tclass = get_masked_subclass(target) + m = getmask(target) + t = filled(target, self.filly) + if t.shape == (): + t = t.reshape(1) + if m is not nomask: + m = make_mask(m, copy=True) + m.shape = (1,) + + if m is nomask: + tr = self.f.reduce(t, axis) + mr = nomask + else: + tr = self.f.reduce(t, axis, dtype=dtype) + mr = umath.logical_and.reduce(m, axis) + + if not tr.shape: + if mr: + return masked + else: + return tr + masked_tr = tr.view(tclass) + masked_tr._mask = mr + return masked_tr + + def outer(self, a, b): + """ + Return the function applied to the outer product of a and b. + + """ + (da, db) = (getdata(a), getdata(b)) + d = self.f.outer(da, db) + ma = getmask(a) + mb = getmask(b) + if ma is nomask and mb is nomask: + m = nomask + else: + ma = getmaskarray(a) + mb = getmaskarray(b) + m = umath.logical_or.outer(ma, mb) + if (not m.ndim) and m: + return masked + if m is not nomask: + np.copyto(d, da, where=m) + if not d.shape: + return d + masked_d = d.view(get_masked_subclass(a, b)) + masked_d._mask = m + return masked_d + + def accumulate(self, target, axis=0): + """Accumulate `target` along `axis` after filling with y fill + value. + + """ + tclass = get_masked_subclass(target) + t = filled(target, self.filly) + result = self.f.accumulate(t, axis) + masked_result = result.view(tclass) + return masked_result + + +class _DomainedBinaryOperation(_MaskedUFunc): + """ + Define binary operations that have a domain, like divide. + + They have no reduce, outer or accumulate. + + Parameters + ---------- + mbfunc : function + The function for which to define a masked version. Made available + as ``_DomainedBinaryOperation.f``. + domain : class instance + Default domain for the function. Should be one of the ``_Domain*`` + classes. + fillx : scalar, optional + Filling value for the first argument, default is 0. + filly : scalar, optional + Filling value for the second argument, default is 0. + + """ + + def __init__(self, dbfunc, domain, fillx=0, filly=0): + """abfunc(fillx, filly) must be defined. + abfunc(x, filly) = x for all x to enable reduce. + """ + super().__init__(dbfunc) + self.domain = domain + self.fillx = fillx + self.filly = filly + ufunc_domain[dbfunc] = domain + ufunc_fills[dbfunc] = (fillx, filly) + + def __call__(self, a, b, *args, **kwargs): + "Execute the call behavior." + # Get the data + (da, db) = (getdata(a), getdata(b)) + # Get the result + with np.errstate(divide='ignore', invalid='ignore'): + result = self.f(da, db, *args, **kwargs) + # Get the mask as a combination of the source masks and invalid + m = ~umath.isfinite(result) + m |= getmask(a) + m |= getmask(b) + # Apply the domain + domain = ufunc_domain.get(self.f, None) + if domain is not None: + m |= domain(da, db) + # Take care of the scalar case first + if not m.ndim: + if m: + return masked + else: + return result + # When the mask is True, put back da if possible + # any errors, just abort; impossible to guarantee masked values + try: + np.copyto(result, 0, casting='unsafe', where=m) + # avoid using "*" since this may be overlaid + masked_da = umath.multiply(m, da) + # only add back if it can be cast safely + if np.can_cast(masked_da.dtype, result.dtype, casting='safe'): + result += masked_da + except Exception: + pass + + # Transforms to a (subclass of) MaskedArray + masked_result = result.view(get_masked_subclass(a, b)) + masked_result._mask = m + if isinstance(a, MaskedArray): + masked_result._update_from(a) + elif isinstance(b, MaskedArray): + masked_result._update_from(b) + return masked_result + + +# Unary ufuncs +exp = _MaskedUnaryOperation(umath.exp) +conjugate = _MaskedUnaryOperation(umath.conjugate) +sin = _MaskedUnaryOperation(umath.sin) +cos = _MaskedUnaryOperation(umath.cos) +arctan = _MaskedUnaryOperation(umath.arctan) +arcsinh = _MaskedUnaryOperation(umath.arcsinh) +sinh = _MaskedUnaryOperation(umath.sinh) +cosh = _MaskedUnaryOperation(umath.cosh) +tanh = _MaskedUnaryOperation(umath.tanh) +abs = absolute = _MaskedUnaryOperation(umath.absolute) +angle = _MaskedUnaryOperation(angle) +fabs = _MaskedUnaryOperation(umath.fabs) +negative = _MaskedUnaryOperation(umath.negative) +floor = _MaskedUnaryOperation(umath.floor) +ceil = _MaskedUnaryOperation(umath.ceil) +around = _MaskedUnaryOperation(np.around) +logical_not = _MaskedUnaryOperation(umath.logical_not) + +# Domained unary ufuncs +sqrt = _MaskedUnaryOperation(umath.sqrt, 0.0, + _DomainGreaterEqual(0.0)) +log = _MaskedUnaryOperation(umath.log, 1.0, + _DomainGreater(0.0)) +log2 = _MaskedUnaryOperation(umath.log2, 1.0, + _DomainGreater(0.0)) +log10 = _MaskedUnaryOperation(umath.log10, 1.0, + _DomainGreater(0.0)) +tan = _MaskedUnaryOperation(umath.tan, 0.0, + _DomainTan(1e-35)) +arcsin = _MaskedUnaryOperation(umath.arcsin, 0.0, + _DomainCheckInterval(-1.0, 1.0)) +arccos = _MaskedUnaryOperation(umath.arccos, 0.0, + _DomainCheckInterval(-1.0, 1.0)) +arccosh = _MaskedUnaryOperation(umath.arccosh, 1.0, + _DomainGreaterEqual(1.0)) +arctanh = _MaskedUnaryOperation(umath.arctanh, 0.0, + _DomainCheckInterval(-1.0 + 1e-15, 1.0 - 1e-15)) + +# Binary ufuncs +add = _MaskedBinaryOperation(umath.add) +subtract = _MaskedBinaryOperation(umath.subtract) +multiply = _MaskedBinaryOperation(umath.multiply, 1, 1) +arctan2 = _MaskedBinaryOperation(umath.arctan2, 0.0, 1.0) +equal = _MaskedBinaryOperation(umath.equal) +equal.reduce = None +not_equal = _MaskedBinaryOperation(umath.not_equal) +not_equal.reduce = None +less_equal = _MaskedBinaryOperation(umath.less_equal) +less_equal.reduce = None +greater_equal = _MaskedBinaryOperation(umath.greater_equal) +greater_equal.reduce = None +less = _MaskedBinaryOperation(umath.less) +less.reduce = None +greater = _MaskedBinaryOperation(umath.greater) +greater.reduce = None +logical_and = _MaskedBinaryOperation(umath.logical_and) +alltrue = _MaskedBinaryOperation(umath.logical_and, 1, 1).reduce +logical_or = _MaskedBinaryOperation(umath.logical_or) +sometrue = logical_or.reduce +logical_xor = _MaskedBinaryOperation(umath.logical_xor) +bitwise_and = _MaskedBinaryOperation(umath.bitwise_and) +bitwise_or = _MaskedBinaryOperation(umath.bitwise_or) +bitwise_xor = _MaskedBinaryOperation(umath.bitwise_xor) +hypot = _MaskedBinaryOperation(umath.hypot) + +# Domained binary ufuncs +divide = _DomainedBinaryOperation(umath.divide, _DomainSafeDivide(), 0, 1) +true_divide = divide # Just an alias for divide. +floor_divide = _DomainedBinaryOperation(umath.floor_divide, + _DomainSafeDivide(), 0, 1) +remainder = _DomainedBinaryOperation(umath.remainder, + _DomainSafeDivide(), 0, 1) +fmod = _DomainedBinaryOperation(umath.fmod, _DomainSafeDivide(), 0, 1) +mod = remainder + +############################################################################### +# Mask creation functions # +############################################################################### + + +def _replace_dtype_fields_recursive(dtype, primitive_dtype): + "Private function allowing recursion in _replace_dtype_fields." + _recurse = _replace_dtype_fields_recursive + + # Do we have some name fields ? + if dtype.names is not None: + descr = [] + for name in dtype.names: + field = dtype.fields[name] + if len(field) == 3: + # Prepend the title to the name + name = (field[-1], name) + descr.append((name, _recurse(field[0], primitive_dtype))) + new_dtype = np.dtype(descr) + + # Is this some kind of composite a la (float,2) + elif dtype.subdtype: + descr = list(dtype.subdtype) + descr[0] = _recurse(dtype.subdtype[0], primitive_dtype) + new_dtype = np.dtype(tuple(descr)) + + # this is a primitive type, so do a direct replacement + else: + new_dtype = primitive_dtype + + # preserve identity of dtypes + if new_dtype == dtype: + new_dtype = dtype + + return new_dtype + + +def _replace_dtype_fields(dtype, primitive_dtype): + """ + Construct a dtype description list from a given dtype. + + Returns a new dtype object, with all fields and subtypes in the given type + recursively replaced with `primitive_dtype`. + + Arguments are coerced to dtypes first. + """ + dtype = np.dtype(dtype) + primitive_dtype = np.dtype(primitive_dtype) + return _replace_dtype_fields_recursive(dtype, primitive_dtype) + + +def make_mask_descr(ndtype): + """ + Construct a dtype description list from a given dtype. + + Returns a new dtype object, with the type of all fields in `ndtype` to a + boolean type. Field names are not altered. + + Parameters + ---------- + ndtype : dtype + The dtype to convert. + + Returns + ------- + result : dtype + A dtype that looks like `ndtype`, the type of all fields is boolean. + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> dtype = np.dtype({'names':['foo', 'bar'], + ... 'formats':[np.float32, np.int64]}) + >>> dtype + dtype([('foo', '>> ma.make_mask_descr(dtype) + dtype([('foo', '|b1'), ('bar', '|b1')]) + >>> ma.make_mask_descr(np.float32) + dtype('bool') + + """ + return _replace_dtype_fields(ndtype, MaskType) + + +def getmask(a): + """ + Return the mask of a masked array, or nomask. + + Return the mask of `a` as an ndarray if `a` is a `MaskedArray` and the + mask is not `nomask`, else return `nomask`. To guarantee a full array + of booleans of the same shape as a, use `getmaskarray`. + + Parameters + ---------- + a : array_like + Input `MaskedArray` for which the mask is required. + + See Also + -------- + getdata : Return the data of a masked array as an ndarray. + getmaskarray : Return the mask of a masked array, or full array of False. + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> a = ma.masked_equal([[1,2],[3,4]], 2) + >>> a + masked_array( + data=[[1, --], + [3, 4]], + mask=[[False, True], + [False, False]], + fill_value=2) + >>> ma.getmask(a) + array([[False, True], + [False, False]]) + + Equivalently use the `MaskedArray` `mask` attribute. + + >>> a.mask + array([[False, True], + [False, False]]) + + Result when mask == `nomask` + + >>> b = ma.masked_array([[1,2],[3,4]]) + >>> b + masked_array( + data=[[1, 2], + [3, 4]], + mask=False, + fill_value=999999) + >>> ma.nomask + False + >>> ma.getmask(b) == ma.nomask + True + >>> b.mask == ma.nomask + True + + """ + return getattr(a, '_mask', nomask) + + +get_mask = getmask + + +def getmaskarray(arr): + """ + Return the mask of a masked array, or full boolean array of False. + + Return the mask of `arr` as an ndarray if `arr` is a `MaskedArray` and + the mask is not `nomask`, else return a full boolean array of False of + the same shape as `arr`. + + Parameters + ---------- + arr : array_like + Input `MaskedArray` for which the mask is required. + + See Also + -------- + getmask : Return the mask of a masked array, or nomask. + getdata : Return the data of a masked array as an ndarray. + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> a = ma.masked_equal([[1,2],[3,4]], 2) + >>> a + masked_array( + data=[[1, --], + [3, 4]], + mask=[[False, True], + [False, False]], + fill_value=2) + >>> ma.getmaskarray(a) + array([[False, True], + [False, False]]) + + Result when mask == ``nomask`` + + >>> b = ma.masked_array([[1,2],[3,4]]) + >>> b + masked_array( + data=[[1, 2], + [3, 4]], + mask=False, + fill_value=999999) + >>> ma.getmaskarray(b) + array([[False, False], + [False, False]]) + + """ + mask = getmask(arr) + if mask is nomask: + mask = make_mask_none(np.shape(arr), getattr(arr, 'dtype', None)) + return mask + + +def is_mask(m): + """ + Return True if m is a valid, standard mask. + + This function does not check the contents of the input, only that the + type is MaskType. In particular, this function returns False if the + mask has a flexible dtype. + + Parameters + ---------- + m : array_like + Array to test. + + Returns + ------- + result : bool + True if `m.dtype.type` is MaskType, False otherwise. + + See Also + -------- + ma.isMaskedArray : Test whether input is an instance of MaskedArray. + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> m = ma.masked_equal([0, 1, 0, 2, 3], 0) + >>> m + masked_array(data=[--, 1, --, 2, 3], + mask=[ True, False, True, False, False], + fill_value=0) + >>> ma.is_mask(m) + False + >>> ma.is_mask(m.mask) + True + + Input must be an ndarray (or have similar attributes) + for it to be considered a valid mask. + + >>> m = [False, True, False] + >>> ma.is_mask(m) + False + >>> m = np.array([False, True, False]) + >>> m + array([False, True, False]) + >>> ma.is_mask(m) + True + + Arrays with complex dtypes don't return True. + + >>> dtype = np.dtype({'names':['monty', 'pithon'], + ... 'formats':[bool, bool]}) + >>> dtype + dtype([('monty', '|b1'), ('pithon', '|b1')]) + >>> m = np.array([(True, False), (False, True), (True, False)], + ... dtype=dtype) + >>> m + array([( True, False), (False, True), ( True, False)], + dtype=[('monty', '?'), ('pithon', '?')]) + >>> ma.is_mask(m) + False + + """ + try: + return m.dtype.type is MaskType + except AttributeError: + return False + + +def _shrink_mask(m): + """ + Shrink a mask to nomask if possible + """ + if m.dtype.names is None and not m.any(): + return nomask + else: + return m + + +def make_mask(m, copy=False, shrink=True, dtype=MaskType): + """ + Create a boolean mask from an array. + + Return `m` as a boolean mask, creating a copy if necessary or requested. + The function can accept any sequence that is convertible to integers, + or ``nomask``. Does not require that contents must be 0s and 1s, values + of 0 are interpreted as False, everything else as True. + + Parameters + ---------- + m : array_like + Potential mask. + copy : bool, optional + Whether to return a copy of `m` (True) or `m` itself (False). + shrink : bool, optional + Whether to shrink `m` to ``nomask`` if all its values are False. + dtype : dtype, optional + Data-type of the output mask. By default, the output mask has a + dtype of MaskType (bool). If the dtype is flexible, each field has + a boolean dtype. This is ignored when `m` is ``nomask``, in which + case ``nomask`` is always returned. + + Returns + ------- + result : ndarray + A boolean mask derived from `m`. + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> m = [True, False, True, True] + >>> ma.make_mask(m) + array([ True, False, True, True]) + >>> m = [1, 0, 1, 1] + >>> ma.make_mask(m) + array([ True, False, True, True]) + >>> m = [1, 0, 2, -3] + >>> ma.make_mask(m) + array([ True, False, True, True]) + + Effect of the `shrink` parameter. + + >>> m = np.zeros(4) + >>> m + array([0., 0., 0., 0.]) + >>> ma.make_mask(m) + False + >>> ma.make_mask(m, shrink=False) + array([False, False, False, False]) + + Using a flexible `dtype`. + + >>> m = [1, 0, 1, 1] + >>> n = [0, 1, 0, 0] + >>> arr = [] + >>> for man, mouse in zip(m, n): + ... arr.append((man, mouse)) + >>> arr + [(1, 0), (0, 1), (1, 0), (1, 0)] + >>> dtype = np.dtype({'names':['man', 'mouse'], + ... 'formats':[np.int64, np.int64]}) + >>> arr = np.array(arr, dtype=dtype) + >>> arr + array([(1, 0), (0, 1), (1, 0), (1, 0)], + dtype=[('man', '>> ma.make_mask(arr, dtype=dtype) + array([(True, False), (False, True), (True, False), (True, False)], + dtype=[('man', '|b1'), ('mouse', '|b1')]) + + """ + if m is nomask: + return nomask + + # Make sure the input dtype is valid. + dtype = make_mask_descr(dtype) + + # legacy boolean special case: "existence of fields implies true" + if isinstance(m, ndarray) and m.dtype.fields and dtype == np.bool: + return np.ones(m.shape, dtype=dtype) + + # Fill the mask in case there are missing data; turn it into an ndarray. + copy = None if not copy else True + result = np.array(filled(m, True), copy=copy, dtype=dtype, subok=True) + # Bas les masques ! + if shrink: + result = _shrink_mask(result) + return result + + +def make_mask_none(newshape, dtype=None): + """ + Return a boolean mask of the given shape, filled with False. + + This function returns a boolean ndarray with all entries False, that can + be used in common mask manipulations. If a complex dtype is specified, the + type of each field is converted to a boolean type. + + Parameters + ---------- + newshape : tuple + A tuple indicating the shape of the mask. + dtype : {None, dtype}, optional + If None, use a MaskType instance. Otherwise, use a new datatype with + the same fields as `dtype`, converted to boolean types. + + Returns + ------- + result : ndarray + An ndarray of appropriate shape and dtype, filled with False. + + See Also + -------- + make_mask : Create a boolean mask from an array. + make_mask_descr : Construct a dtype description list from a given dtype. + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> ma.make_mask_none((3,)) + array([False, False, False]) + + Defining a more complex dtype. + + >>> dtype = np.dtype({'names':['foo', 'bar'], + ... 'formats':[np.float32, np.int64]}) + >>> dtype + dtype([('foo', '>> ma.make_mask_none((3,), dtype=dtype) + array([(False, False), (False, False), (False, False)], + dtype=[('foo', '|b1'), ('bar', '|b1')]) + + """ + if dtype is None: + result = np.zeros(newshape, dtype=MaskType) + else: + result = np.zeros(newshape, dtype=make_mask_descr(dtype)) + return result + + +def _recursive_mask_or(m1, m2, newmask): + names = m1.dtype.names + for name in names: + current1 = m1[name] + if current1.dtype.names is not None: + _recursive_mask_or(current1, m2[name], newmask[name]) + else: + umath.logical_or(current1, m2[name], newmask[name]) + + +def mask_or(m1, m2, copy=False, shrink=True): + """ + Combine two masks with the ``logical_or`` operator. + + The result may be a view on `m1` or `m2` if the other is `nomask` + (i.e. False). + + Parameters + ---------- + m1, m2 : array_like + Input masks. + copy : bool, optional + If copy is False and one of the inputs is `nomask`, return a view + of the other input mask. Defaults to False. + shrink : bool, optional + Whether to shrink the output to `nomask` if all its values are + False. Defaults to True. + + Returns + ------- + mask : output mask + The result masks values that are masked in either `m1` or `m2`. + + Raises + ------ + ValueError + If `m1` and `m2` have different flexible dtypes. + + Examples + -------- + >>> import numpy as np + >>> m1 = np.ma.make_mask([0, 1, 1, 0]) + >>> m2 = np.ma.make_mask([1, 0, 0, 0]) + >>> np.ma.mask_or(m1, m2) + array([ True, True, True, False]) + + """ + + if (m1 is nomask) or (m1 is False): + dtype = getattr(m2, 'dtype', MaskType) + return make_mask(m2, copy=copy, shrink=shrink, dtype=dtype) + if (m2 is nomask) or (m2 is False): + dtype = getattr(m1, 'dtype', MaskType) + return make_mask(m1, copy=copy, shrink=shrink, dtype=dtype) + if m1 is m2 and is_mask(m1): + return _shrink_mask(m1) if shrink else m1 + (dtype1, dtype2) = (getattr(m1, 'dtype', None), getattr(m2, 'dtype', None)) + if dtype1 != dtype2: + raise ValueError(f"Incompatible dtypes '{dtype1}'<>'{dtype2}'") + if dtype1.names is not None: + # Allocate an output mask array with the properly broadcast shape. + newmask = np.empty(np.broadcast(m1, m2).shape, dtype1) + _recursive_mask_or(m1, m2, newmask) + return newmask + return make_mask(umath.logical_or(m1, m2), copy=copy, shrink=shrink) + + +def flatten_mask(mask): + """ + Returns a completely flattened version of the mask, where nested fields + are collapsed. + + Parameters + ---------- + mask : array_like + Input array, which will be interpreted as booleans. + + Returns + ------- + flattened_mask : ndarray of bools + The flattened input. + + Examples + -------- + >>> import numpy as np + >>> mask = np.array([0, 0, 1]) + >>> np.ma.flatten_mask(mask) + array([False, False, True]) + + >>> mask = np.array([(0, 0), (0, 1)], dtype=[('a', bool), ('b', bool)]) + >>> np.ma.flatten_mask(mask) + array([False, False, False, True]) + + >>> mdtype = [('a', bool), ('b', [('ba', bool), ('bb', bool)])] + >>> mask = np.array([(0, (0, 0)), (0, (0, 1))], dtype=mdtype) + >>> np.ma.flatten_mask(mask) + array([False, False, False, False, False, True]) + + """ + + def _flatmask(mask): + "Flatten the mask and returns a (maybe nested) sequence of booleans." + mnames = mask.dtype.names + if mnames is not None: + return [flatten_mask(mask[name]) for name in mnames] + else: + return mask + + def _flatsequence(sequence): + "Generates a flattened version of the sequence." + try: + for element in sequence: + if hasattr(element, '__iter__'): + yield from _flatsequence(element) + else: + yield element + except TypeError: + yield sequence + + mask = np.asarray(mask) + flattened = _flatsequence(_flatmask(mask)) + return np.array(list(flattened), dtype=bool) + + +def _check_mask_axis(mask, axis, keepdims=np._NoValue): + "Check whether there are masked values along the given axis" + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + if mask is not nomask: + return mask.all(axis=axis, **kwargs) + return nomask + + +############################################################################### +# Masking functions # +############################################################################### + +def masked_where(condition, a, copy=True): + """ + Mask an array where a condition is met. + + Return `a` as an array masked where `condition` is True. + Any masked values of `a` or `condition` are also masked in the output. + + Parameters + ---------- + condition : array_like + Masking condition. When `condition` tests floating point values for + equality, consider using ``masked_values`` instead. + a : array_like + Array to mask. + copy : bool + If True (default) make a copy of `a` in the result. If False modify + `a` in place and return a view. + + Returns + ------- + result : MaskedArray + The result of masking `a` where `condition` is True. + + See Also + -------- + masked_values : Mask using floating point equality. + masked_equal : Mask where equal to a given value. + masked_not_equal : Mask where *not* equal to a given value. + masked_less_equal : Mask where less than or equal to a given value. + masked_greater_equal : Mask where greater than or equal to a given value. + masked_less : Mask where less than a given value. + masked_greater : Mask where greater than a given value. + masked_inside : Mask inside a given interval. + masked_outside : Mask outside a given interval. + masked_invalid : Mask invalid values (NaNs or infs). + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> a = np.arange(4) + >>> a + array([0, 1, 2, 3]) + >>> ma.masked_where(a <= 2, a) + masked_array(data=[--, --, --, 3], + mask=[ True, True, True, False], + fill_value=999999) + + Mask array `b` conditional on `a`. + + >>> b = ['a', 'b', 'c', 'd'] + >>> ma.masked_where(a == 2, b) + masked_array(data=['a', 'b', --, 'd'], + mask=[False, False, True, False], + fill_value='N/A', + dtype='>> c = ma.masked_where(a <= 2, a) + >>> c + masked_array(data=[--, --, --, 3], + mask=[ True, True, True, False], + fill_value=999999) + >>> c[0] = 99 + >>> c + masked_array(data=[99, --, --, 3], + mask=[False, True, True, False], + fill_value=999999) + >>> a + array([0, 1, 2, 3]) + >>> c = ma.masked_where(a <= 2, a, copy=False) + >>> c[0] = 99 + >>> c + masked_array(data=[99, --, --, 3], + mask=[False, True, True, False], + fill_value=999999) + >>> a + array([99, 1, 2, 3]) + + When `condition` or `a` contain masked values. + + >>> a = np.arange(4) + >>> a = ma.masked_where(a == 2, a) + >>> a + masked_array(data=[0, 1, --, 3], + mask=[False, False, True, False], + fill_value=999999) + >>> b = np.arange(4) + >>> b = ma.masked_where(b == 0, b) + >>> b + masked_array(data=[--, 1, 2, 3], + mask=[ True, False, False, False], + fill_value=999999) + >>> ma.masked_where(a == 3, b) + masked_array(data=[--, 1, --, --], + mask=[ True, False, True, True], + fill_value=999999) + + """ + # Make sure that condition is a valid standard-type mask. + cond = make_mask(condition, shrink=False) + a = np.array(a, copy=copy, subok=True) + + (cshape, ashape) = (cond.shape, a.shape) + if cshape and cshape != ashape: + raise IndexError("Inconsistent shape between the condition and the input" + " (got %s and %s)" % (cshape, ashape)) + if hasattr(a, '_mask'): + cond = mask_or(cond, a._mask) + cls = type(a) + else: + cls = MaskedArray + result = a.view(cls) + # Assign to *.mask so that structured masks are handled correctly. + result.mask = _shrink_mask(cond) + # There is no view of a boolean so when 'a' is a MaskedArray with nomask + # the update to the result's mask has no effect. + if not copy and hasattr(a, '_mask') and getmask(a) is nomask: + a._mask = result._mask.view() + return result + + +def masked_greater(x, value, copy=True): + """ + Mask an array where greater than a given value. + + This function is a shortcut to ``masked_where``, with + `condition` = (x > value). + + See Also + -------- + masked_where : Mask where a condition is met. + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> a = np.arange(4) + >>> a + array([0, 1, 2, 3]) + >>> ma.masked_greater(a, 2) + masked_array(data=[0, 1, 2, --], + mask=[False, False, False, True], + fill_value=999999) + + """ + return masked_where(greater(x, value), x, copy=copy) + + +def masked_greater_equal(x, value, copy=True): + """ + Mask an array where greater than or equal to a given value. + + This function is a shortcut to ``masked_where``, with + `condition` = (x >= value). + + See Also + -------- + masked_where : Mask where a condition is met. + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> a = np.arange(4) + >>> a + array([0, 1, 2, 3]) + >>> ma.masked_greater_equal(a, 2) + masked_array(data=[0, 1, --, --], + mask=[False, False, True, True], + fill_value=999999) + + """ + return masked_where(greater_equal(x, value), x, copy=copy) + + +def masked_less(x, value, copy=True): + """ + Mask an array where less than a given value. + + This function is a shortcut to ``masked_where``, with + `condition` = (x < value). + + See Also + -------- + masked_where : Mask where a condition is met. + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> a = np.arange(4) + >>> a + array([0, 1, 2, 3]) + >>> ma.masked_less(a, 2) + masked_array(data=[--, --, 2, 3], + mask=[ True, True, False, False], + fill_value=999999) + + """ + return masked_where(less(x, value), x, copy=copy) + + +def masked_less_equal(x, value, copy=True): + """ + Mask an array where less than or equal to a given value. + + This function is a shortcut to ``masked_where``, with + `condition` = (x <= value). + + See Also + -------- + masked_where : Mask where a condition is met. + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> a = np.arange(4) + >>> a + array([0, 1, 2, 3]) + >>> ma.masked_less_equal(a, 2) + masked_array(data=[--, --, --, 3], + mask=[ True, True, True, False], + fill_value=999999) + + """ + return masked_where(less_equal(x, value), x, copy=copy) + + +def masked_not_equal(x, value, copy=True): + """ + Mask an array where *not* equal to a given value. + + This function is a shortcut to ``masked_where``, with + `condition` = (x != value). + + See Also + -------- + masked_where : Mask where a condition is met. + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> a = np.arange(4) + >>> a + array([0, 1, 2, 3]) + >>> ma.masked_not_equal(a, 2) + masked_array(data=[--, --, 2, --], + mask=[ True, True, False, True], + fill_value=999999) + + """ + return masked_where(not_equal(x, value), x, copy=copy) + + +def masked_equal(x, value, copy=True): + """ + Mask an array where equal to a given value. + + Return a MaskedArray, masked where the data in array `x` are + equal to `value`. The fill_value of the returned MaskedArray + is set to `value`. + + For floating point arrays, consider using ``masked_values(x, value)``. + + See Also + -------- + masked_where : Mask where a condition is met. + masked_values : Mask using floating point equality. + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> a = np.arange(4) + >>> a + array([0, 1, 2, 3]) + >>> ma.masked_equal(a, 2) + masked_array(data=[0, 1, --, 3], + mask=[False, False, True, False], + fill_value=2) + + """ + output = masked_where(equal(x, value), x, copy=copy) + output.fill_value = value + return output + + +def masked_inside(x, v1, v2, copy=True): + """ + Mask an array inside a given interval. + + Shortcut to ``masked_where``, where `condition` is True for `x` inside + the interval [v1,v2] (v1 <= x <= v2). The boundaries `v1` and `v2` + can be given in either order. + + See Also + -------- + masked_where : Mask where a condition is met. + + Notes + ----- + The array `x` is prefilled with its filling value. + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> x = [0.31, 1.2, 0.01, 0.2, -0.4, -1.1] + >>> ma.masked_inside(x, -0.3, 0.3) + masked_array(data=[0.31, 1.2, --, --, -0.4, -1.1], + mask=[False, False, True, True, False, False], + fill_value=1e+20) + + The order of `v1` and `v2` doesn't matter. + + >>> ma.masked_inside(x, 0.3, -0.3) + masked_array(data=[0.31, 1.2, --, --, -0.4, -1.1], + mask=[False, False, True, True, False, False], + fill_value=1e+20) + + """ + if v2 < v1: + (v1, v2) = (v2, v1) + xf = filled(x) + condition = (xf >= v1) & (xf <= v2) + return masked_where(condition, x, copy=copy) + + +def masked_outside(x, v1, v2, copy=True): + """ + Mask an array outside a given interval. + + Shortcut to ``masked_where``, where `condition` is True for `x` outside + the interval [v1,v2] (x < v1)|(x > v2). + The boundaries `v1` and `v2` can be given in either order. + + See Also + -------- + masked_where : Mask where a condition is met. + + Notes + ----- + The array `x` is prefilled with its filling value. + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> x = [0.31, 1.2, 0.01, 0.2, -0.4, -1.1] + >>> ma.masked_outside(x, -0.3, 0.3) + masked_array(data=[--, --, 0.01, 0.2, --, --], + mask=[ True, True, False, False, True, True], + fill_value=1e+20) + + The order of `v1` and `v2` doesn't matter. + + >>> ma.masked_outside(x, 0.3, -0.3) + masked_array(data=[--, --, 0.01, 0.2, --, --], + mask=[ True, True, False, False, True, True], + fill_value=1e+20) + + """ + if v2 < v1: + (v1, v2) = (v2, v1) + xf = filled(x) + condition = (xf < v1) | (xf > v2) + return masked_where(condition, x, copy=copy) + + +def masked_object(x, value, copy=True, shrink=True): + """ + Mask the array `x` where the data are exactly equal to value. + + This function is similar to `masked_values`, but only suitable + for object arrays: for floating point, use `masked_values` instead. + + Parameters + ---------- + x : array_like + Array to mask + value : object + Comparison value + copy : {True, False}, optional + Whether to return a copy of `x`. + shrink : {True, False}, optional + Whether to collapse a mask full of False to nomask + + Returns + ------- + result : MaskedArray + The result of masking `x` where equal to `value`. + + See Also + -------- + masked_where : Mask where a condition is met. + masked_equal : Mask where equal to a given value (integers). + masked_values : Mask using floating point equality. + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> food = np.array(['green_eggs', 'ham'], dtype=object) + >>> # don't eat spoiled food + >>> eat = ma.masked_object(food, 'green_eggs') + >>> eat + masked_array(data=[--, 'ham'], + mask=[ True, False], + fill_value='green_eggs', + dtype=object) + >>> # plain ol` ham is boring + >>> fresh_food = np.array(['cheese', 'ham', 'pineapple'], dtype=object) + >>> eat = ma.masked_object(fresh_food, 'green_eggs') + >>> eat + masked_array(data=['cheese', 'ham', 'pineapple'], + mask=False, + fill_value='green_eggs', + dtype=object) + + Note that `mask` is set to ``nomask`` if possible. + + >>> eat + masked_array(data=['cheese', 'ham', 'pineapple'], + mask=False, + fill_value='green_eggs', + dtype=object) + + """ + if isMaskedArray(x): + condition = umath.equal(x._data, value) + mask = x._mask + else: + condition = umath.equal(np.asarray(x), value) + mask = nomask + mask = mask_or(mask, make_mask(condition, shrink=shrink)) + return masked_array(x, mask=mask, copy=copy, fill_value=value) + + +def masked_values(x, value, rtol=1e-5, atol=1e-8, copy=True, shrink=True): + """ + Mask using floating point equality. + + Return a MaskedArray, masked where the data in array `x` are approximately + equal to `value`, determined using `isclose`. The default tolerances for + `masked_values` are the same as those for `isclose`. + + For integer types, exact equality is used, in the same way as + `masked_equal`. + + The fill_value is set to `value` and the mask is set to ``nomask`` if + possible. + + Parameters + ---------- + x : array_like + Array to mask. + value : float + Masking value. + rtol, atol : float, optional + Tolerance parameters passed on to `isclose` + copy : bool, optional + Whether to return a copy of `x`. + shrink : bool, optional + Whether to collapse a mask full of False to ``nomask``. + + Returns + ------- + result : MaskedArray + The result of masking `x` where approximately equal to `value`. + + See Also + -------- + masked_where : Mask where a condition is met. + masked_equal : Mask where equal to a given value (integers). + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> x = np.array([1, 1.1, 2, 1.1, 3]) + >>> ma.masked_values(x, 1.1) + masked_array(data=[1.0, --, 2.0, --, 3.0], + mask=[False, True, False, True, False], + fill_value=1.1) + + Note that `mask` is set to ``nomask`` if possible. + + >>> ma.masked_values(x, 2.1) + masked_array(data=[1. , 1.1, 2. , 1.1, 3. ], + mask=False, + fill_value=2.1) + + Unlike `masked_equal`, `masked_values` can perform approximate equalities. + + >>> ma.masked_values(x, 2.1, atol=1e-1) + masked_array(data=[1.0, 1.1, --, 1.1, 3.0], + mask=[False, False, True, False, False], + fill_value=2.1) + + """ + xnew = filled(x, value) + if np.issubdtype(xnew.dtype, np.floating): + mask = np.isclose(xnew, value, atol=atol, rtol=rtol) + else: + mask = umath.equal(xnew, value) + ret = masked_array(xnew, mask=mask, copy=copy, fill_value=value) + if shrink: + ret.shrink_mask() + return ret + + +def masked_invalid(a, copy=True): + """ + Mask an array where invalid values occur (NaNs or infs). + + This function is a shortcut to ``masked_where``, with + `condition` = ~(np.isfinite(a)). Any pre-existing mask is conserved. + Only applies to arrays with a dtype where NaNs or infs make sense + (i.e. floating point types), but accepts any array_like object. + + See Also + -------- + masked_where : Mask where a condition is met. + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> a = np.arange(5, dtype=float) + >>> a[2] = np.nan + >>> a[3] = np.inf + >>> a + array([ 0., 1., nan, inf, 4.]) + >>> ma.masked_invalid(a) + masked_array(data=[0.0, 1.0, --, --, 4.0], + mask=[False, False, True, True, False], + fill_value=1e+20) + + """ + a = np.array(a, copy=None, subok=True) + res = masked_where(~(np.isfinite(a)), a, copy=copy) + # masked_invalid previously never returned nomask as a mask and doing so + # threw off matplotlib (gh-22842). So use shrink=False: + if res._mask is nomask: + res._mask = make_mask_none(res.shape, res.dtype) + return res + +############################################################################### +# Printing options # +############################################################################### + + +class _MaskedPrintOption: + """ + Handle the string used to represent missing data in a masked array. + + """ + + def __init__(self, display): + """ + Create the masked_print_option object. + + """ + self._display = display + self._enabled = True + + def display(self): + """ + Display the string to print for masked values. + + """ + return self._display + + def set_display(self, s): + """ + Set the string to print for masked values. + + """ + self._display = s + + def enabled(self): + """ + Is the use of the display value enabled? + + """ + return self._enabled + + def enable(self, shrink=1): + """ + Set the enabling shrink to `shrink`. + + """ + self._enabled = shrink + + def __str__(self): + return str(self._display) + + __repr__ = __str__ + + +# if you single index into a masked location you get this object. +masked_print_option = _MaskedPrintOption('--') + + +def _recursive_printoption(result, mask, printopt): + """ + Puts printoptions in result where mask is True. + + Private function allowing for recursion + + """ + names = result.dtype.names + if names is not None: + for name in names: + curdata = result[name] + curmask = mask[name] + _recursive_printoption(curdata, curmask, printopt) + else: + np.copyto(result, printopt, where=mask) + + +# For better or worse, these end in a newline +_legacy_print_templates = { + 'long_std': textwrap.dedent("""\ + masked_%(name)s(data = + %(data)s, + %(nlen)s mask = + %(mask)s, + %(nlen)s fill_value = %(fill)s) + """), + 'long_flx': textwrap.dedent("""\ + masked_%(name)s(data = + %(data)s, + %(nlen)s mask = + %(mask)s, + %(nlen)s fill_value = %(fill)s, + %(nlen)s dtype = %(dtype)s) + """), + 'short_std': textwrap.dedent("""\ + masked_%(name)s(data = %(data)s, + %(nlen)s mask = %(mask)s, + %(nlen)s fill_value = %(fill)s) + """), + 'short_flx': textwrap.dedent("""\ + masked_%(name)s(data = %(data)s, + %(nlen)s mask = %(mask)s, + %(nlen)s fill_value = %(fill)s, + %(nlen)s dtype = %(dtype)s) + """) +} + +############################################################################### +# MaskedArray class # +############################################################################### + + +def _recursive_filled(a, mask, fill_value): + """ + Recursively fill `a` with `fill_value`. + + """ + names = a.dtype.names + for name in names: + current = a[name] + if current.dtype.names is not None: + _recursive_filled(current, mask[name], fill_value[name]) + else: + np.copyto(current, fill_value[name], where=mask[name]) + + +def flatten_structured_array(a): + """ + Flatten a structured array. + + The data type of the output is chosen such that it can represent all of the + (nested) fields. + + Parameters + ---------- + a : structured array + + Returns + ------- + output : masked array or ndarray + A flattened masked array if the input is a masked array, otherwise a + standard ndarray. + + Examples + -------- + >>> import numpy as np + >>> ndtype = [('a', int), ('b', float)] + >>> a = np.array([(1, 1), (2, 2)], dtype=ndtype) + >>> np.ma.flatten_structured_array(a) + array([[1., 1.], + [2., 2.]]) + + """ + + def flatten_sequence(iterable): + """ + Flattens a compound of nested iterables. + + """ + for elm in iter(iterable): + if hasattr(elm, "__iter__") and not isinstance(elm, (str, bytes)): + yield from flatten_sequence(elm) + else: + yield elm + + a = np.asanyarray(a) + inishape = a.shape + a = a.ravel() + if isinstance(a, MaskedArray): + out = np.array([tuple(flatten_sequence(d.item())) for d in a._data]) + out = out.view(MaskedArray) + out._mask = np.array([tuple(flatten_sequence(d.item())) + for d in getmaskarray(a)]) + else: + out = np.array([tuple(flatten_sequence(d.item())) for d in a]) + if len(inishape) > 1: + newshape = list(out.shape) + newshape[0] = inishape + out.shape = tuple(flatten_sequence(newshape)) + return out + + +def _arraymethod(funcname, onmask=True): + """ + Return a class method wrapper around a basic array method. + + Creates a class method which returns a masked array, where the new + ``_data`` array is the output of the corresponding basic method called + on the original ``_data``. + + If `onmask` is True, the new mask is the output of the method called + on the initial mask. Otherwise, the new mask is just a reference + to the initial mask. + + Parameters + ---------- + funcname : str + Name of the function to apply on data. + onmask : bool + Whether the mask must be processed also (True) or left + alone (False). Default is True. Make available as `_onmask` + attribute. + + Returns + ------- + method : instancemethod + Class method wrapper of the specified basic array method. + + """ + def wrapped_method(self, *args, **params): + result = getattr(self._data, funcname)(*args, **params) + result = result.view(type(self)) + result._update_from(self) + mask = self._mask + if not onmask: + result.__setmask__(mask) + elif mask is not nomask: + # __setmask__ makes a copy, which we don't want + result._mask = getattr(mask, funcname)(*args, **params) + return result + methdoc = getattr(ndarray, funcname, None) or getattr(np, funcname, None) + if methdoc is not None: + wrapped_method.__doc__ = methdoc.__doc__ + wrapped_method.__name__ = funcname + return wrapped_method + + +class MaskedIterator: + """ + Flat iterator object to iterate over masked arrays. + + A `MaskedIterator` iterator is returned by ``x.flat`` for any masked array + `x`. It allows iterating over the array as if it were a 1-D array, + either in a for-loop or by calling its `next` method. + + Iteration is done in C-contiguous style, with the last index varying the + fastest. The iterator can also be indexed using basic slicing or + advanced indexing. + + See Also + -------- + MaskedArray.flat : Return a flat iterator over an array. + MaskedArray.flatten : Returns a flattened copy of an array. + + Notes + ----- + `MaskedIterator` is not exported by the `ma` module. Instead of + instantiating a `MaskedIterator` directly, use `MaskedArray.flat`. + + Examples + -------- + >>> import numpy as np + >>> x = np.ma.array(arange(6).reshape(2, 3)) + >>> fl = x.flat + >>> type(fl) + + >>> for item in fl: + ... print(item) + ... + 0 + 1 + 2 + 3 + 4 + 5 + + Extracting more than a single element b indexing the `MaskedIterator` + returns a masked array: + + >>> fl[2:4] + masked_array(data = [2 3], + mask = False, + fill_value = 999999) + + """ + + def __init__(self, ma): + self.ma = ma + self.dataiter = ma._data.flat + + if ma._mask is nomask: + self.maskiter = None + else: + self.maskiter = ma._mask.flat + + def __iter__(self): + return self + + def __getitem__(self, indx): + result = self.dataiter.__getitem__(indx).view(type(self.ma)) + if self.maskiter is not None: + _mask = self.maskiter.__getitem__(indx) + if isinstance(_mask, ndarray): + # set shape to match that of data; this is needed for matrices + _mask.shape = result.shape + result._mask = _mask + elif isinstance(_mask, np.void): + return mvoid(result, mask=_mask, hardmask=self.ma._hardmask) + elif _mask: # Just a scalar, masked + return masked + return result + + # This won't work if ravel makes a copy + def __setitem__(self, index, value): + self.dataiter[index] = getdata(value) + if self.maskiter is not None: + self.maskiter[index] = getmaskarray(value) + + def __next__(self): + """ + Return the next value, or raise StopIteration. + + Examples + -------- + >>> import numpy as np + >>> x = np.ma.array([3, 2], mask=[0, 1]) + >>> fl = x.flat + >>> next(fl) + 3 + >>> next(fl) + masked + >>> next(fl) + Traceback (most recent call last): + ... + StopIteration + + """ + d = next(self.dataiter) + if self.maskiter is not None: + m = next(self.maskiter) + if isinstance(m, np.void): + return mvoid(d, mask=m, hardmask=self.ma._hardmask) + elif m: # Just a scalar, masked + return masked + return d + + +@set_module("numpy.ma") +class MaskedArray(ndarray): + """ + An array class with possibly masked values. + + Masked values of True exclude the corresponding element from any + computation. + + Construction:: + + x = MaskedArray(data, mask=nomask, dtype=None, copy=False, subok=True, + ndmin=0, fill_value=None, keep_mask=True, hard_mask=None, + shrink=True, order=None) + + Parameters + ---------- + data : array_like + Input data. + mask : sequence, optional + Mask. Must be convertible to an array of booleans with the same + shape as `data`. True indicates a masked (i.e. invalid) data. + dtype : dtype, optional + Data type of the output. + If `dtype` is None, the type of the data argument (``data.dtype``) + is used. If `dtype` is not None and different from ``data.dtype``, + a copy is performed. + copy : bool, optional + Whether to copy the input data (True), or to use a reference instead. + Default is False. + subok : bool, optional + Whether to return a subclass of `MaskedArray` if possible (True) or a + plain `MaskedArray`. Default is True. + ndmin : int, optional + Minimum number of dimensions. Default is 0. + fill_value : scalar, optional + Value used to fill in the masked values when necessary. + If None, a default based on the data-type is used. + keep_mask : bool, optional + Whether to combine `mask` with the mask of the input data, if any + (True), or to use only `mask` for the output (False). Default is True. + hard_mask : bool, optional + Whether to use a hard mask or not. With a hard mask, masked values + cannot be unmasked. Default is False. + shrink : bool, optional + Whether to force compression of an empty mask. Default is True. + order : {'C', 'F', 'A'}, optional + Specify the order of the array. If order is 'C', then the array + will be in C-contiguous order (last-index varies the fastest). + If order is 'F', then the returned array will be in + Fortran-contiguous order (first-index varies the fastest). + If order is 'A' (default), then the returned array may be + in any order (either C-, Fortran-contiguous, or even discontiguous), + unless a copy is required, in which case it will be C-contiguous. + + Examples + -------- + >>> import numpy as np + + The ``mask`` can be initialized with an array of boolean values + with the same shape as ``data``. + + >>> data = np.arange(6).reshape((2, 3)) + >>> np.ma.MaskedArray(data, mask=[[False, True, False], + ... [False, False, True]]) + masked_array( + data=[[0, --, 2], + [3, 4, --]], + mask=[[False, True, False], + [False, False, True]], + fill_value=999999) + + Alternatively, the ``mask`` can be initialized to homogeneous boolean + array with the same shape as ``data`` by passing in a scalar + boolean value: + + >>> np.ma.MaskedArray(data, mask=False) + masked_array( + data=[[0, 1, 2], + [3, 4, 5]], + mask=[[False, False, False], + [False, False, False]], + fill_value=999999) + + >>> np.ma.MaskedArray(data, mask=True) + masked_array( + data=[[--, --, --], + [--, --, --]], + mask=[[ True, True, True], + [ True, True, True]], + fill_value=999999, + dtype=int64) + + .. note:: + The recommended practice for initializing ``mask`` with a scalar + boolean value is to use ``True``/``False`` rather than + ``np.True_``/``np.False_``. The reason is :attr:`nomask` + is represented internally as ``np.False_``. + + >>> np.False_ is np.ma.nomask + True + + """ + + __array_priority__ = 15 + _defaultmask = nomask + _defaulthardmask = False + _baseclass = ndarray + + # Maximum number of elements per axis used when printing an array. The + # 1d case is handled separately because we need more values in this case. + _print_width = 100 + _print_width_1d = 1500 + + def __new__(cls, data=None, mask=nomask, dtype=None, copy=False, + subok=True, ndmin=0, fill_value=None, keep_mask=True, + hard_mask=None, shrink=True, order=None): + """ + Create a new masked array from scratch. + + Notes + ----- + A masked array can also be created by taking a .view(MaskedArray). + + """ + # Process data. + copy = None if not copy else True + _data = np.array(data, dtype=dtype, copy=copy, + order=order, subok=True, ndmin=ndmin) + _baseclass = getattr(data, '_baseclass', type(_data)) + # Check that we're not erasing the mask. + if isinstance(data, MaskedArray) and (data.shape != _data.shape): + copy = True + + # Here, we copy the _view_, so that we can attach new properties to it + # we must never do .view(MaskedConstant), as that would create a new + # instance of np.ma.masked, which make identity comparison fail + if isinstance(data, cls) and subok and not isinstance(data, MaskedConstant): + _data = ndarray.view(_data, type(data)) + else: + _data = ndarray.view(_data, cls) + + # Handle the case where data is not a subclass of ndarray, but + # still has the _mask attribute like MaskedArrays + if hasattr(data, '_mask') and not isinstance(data, ndarray): + _data._mask = data._mask + # FIXME: should we set `_data._sharedmask = True`? + # Process mask. + # Type of the mask + mdtype = make_mask_descr(_data.dtype) + if mask is nomask: + # Case 1. : no mask in input. + # Erase the current mask ? + if not keep_mask: + # With a reduced version + if shrink: + _data._mask = nomask + # With full version + else: + _data._mask = np.zeros(_data.shape, dtype=mdtype) + # Check whether we missed something + elif isinstance(data, (tuple, list)): + try: + # If data is a sequence of masked array + mask = np.array( + [getmaskarray(np.asanyarray(m, dtype=_data.dtype)) + for m in data], dtype=mdtype) + except (ValueError, TypeError): + # If data is nested + mask = nomask + # Force shrinking of the mask if needed (and possible) + if (mdtype == MaskType) and mask.any(): + _data._mask = mask + _data._sharedmask = False + else: + _data._sharedmask = not copy + if copy: + _data._mask = _data._mask.copy() + # Reset the shape of the original mask + if getmask(data) is not nomask: + # gh-21022 encounters an issue here + # because data._mask.shape is not writeable, but + # the op was also pointless in that case, because + # the shapes were the same, so we can at least + # avoid that path + if data._mask.shape != data.shape: + data._mask.shape = data.shape + else: + # Case 2. : With a mask in input. + # If mask is boolean, create an array of True or False + + # if users pass `mask=None` be forgiving here and cast it False + # for speed; although the default is `mask=nomask` and can differ. + if mask is None: + mask = False + + if mask is True and mdtype == MaskType: + mask = np.ones(_data.shape, dtype=mdtype) + elif mask is False and mdtype == MaskType: + mask = np.zeros(_data.shape, dtype=mdtype) + else: + # Read the mask with the current mdtype + try: + mask = np.array(mask, copy=copy, dtype=mdtype) + # Or assume it's a sequence of bool/int + except TypeError: + mask = np.array([tuple([m] * len(mdtype)) for m in mask], + dtype=mdtype) + # Make sure the mask and the data have the same shape + if mask.shape != _data.shape: + (nd, nm) = (_data.size, mask.size) + if nm == 1: + mask = np.resize(mask, _data.shape) + elif nm == nd: + mask = np.reshape(mask, _data.shape) + else: + msg = (f"Mask and data not compatible:" + f" data size is {nd}, mask size is {nm}.") + raise MaskError(msg) + copy = True + # Set the mask to the new value + if _data._mask is nomask: + _data._mask = mask + _data._sharedmask = not copy + elif not keep_mask: + _data._mask = mask + _data._sharedmask = not copy + else: + if _data.dtype.names is not None: + def _recursive_or(a, b): + "do a|=b on each field of a, recursively" + for name in a.dtype.names: + (af, bf) = (a[name], b[name]) + if af.dtype.names is not None: + _recursive_or(af, bf) + else: + af |= bf + + _recursive_or(_data._mask, mask) + else: + _data._mask = np.logical_or(mask, _data._mask) + _data._sharedmask = False + + # Update fill_value. + if fill_value is None: + fill_value = getattr(data, '_fill_value', None) + # But don't run the check unless we have something to check. + if fill_value is not None: + _data._fill_value = _check_fill_value(fill_value, _data.dtype) + # Process extra options .. + if hard_mask is None: + _data._hardmask = getattr(data, '_hardmask', False) + else: + _data._hardmask = hard_mask + _data._baseclass = _baseclass + return _data + + def _update_from(self, obj): + """ + Copies some attributes of obj to self. + + """ + if isinstance(obj, ndarray): + _baseclass = type(obj) + else: + _baseclass = ndarray + # We need to copy the _basedict to avoid backward propagation + _optinfo = {} + _optinfo.update(getattr(obj, '_optinfo', {})) + _optinfo.update(getattr(obj, '_basedict', {})) + if not isinstance(obj, MaskedArray): + _optinfo.update(getattr(obj, '__dict__', {})) + _dict = {'_fill_value': getattr(obj, '_fill_value', None), + '_hardmask': getattr(obj, '_hardmask', False), + '_sharedmask': getattr(obj, '_sharedmask', False), + '_isfield': getattr(obj, '_isfield', False), + '_baseclass': getattr(obj, '_baseclass', _baseclass), + '_optinfo': _optinfo, + '_basedict': _optinfo} + self.__dict__.update(_dict) + self.__dict__.update(_optinfo) + + def __array_finalize__(self, obj): + """ + Finalizes the masked array. + + """ + # Get main attributes. + self._update_from(obj) + + # We have to decide how to initialize self.mask, based on + # obj.mask. This is very difficult. There might be some + # correspondence between the elements in the array we are being + # created from (= obj) and us. Or there might not. This method can + # be called in all kinds of places for all kinds of reasons -- could + # be empty_like, could be slicing, could be a ufunc, could be a view. + # The numpy subclassing interface simply doesn't give us any way + # to know, which means that at best this method will be based on + # guesswork and heuristics. To make things worse, there isn't even any + # clear consensus about what the desired behavior is. For instance, + # most users think that np.empty_like(marr) -- which goes via this + # method -- should return a masked array with an empty mask (see + # gh-3404 and linked discussions), but others disagree, and they have + # existing code which depends on empty_like returning an array that + # matches the input mask. + # + # Historically our algorithm was: if the template object mask had the + # same *number of elements* as us, then we used *it's mask object + # itself* as our mask, so that writes to us would also write to the + # original array. This is horribly broken in multiple ways. + # + # Now what we do instead is, if the template object mask has the same + # number of elements as us, and we do not have the same base pointer + # as the template object (b/c views like arr[...] should keep the same + # mask), then we make a copy of the template object mask and use + # that. This is also horribly broken but somewhat less so. Maybe. + if isinstance(obj, ndarray): + # XX: This looks like a bug -- shouldn't it check self.dtype + # instead? + if obj.dtype.names is not None: + _mask = getmaskarray(obj) + else: + _mask = getmask(obj) + + # If self and obj point to exactly the same data, then probably + # self is a simple view of obj (e.g., self = obj[...]), so they + # should share the same mask. (This isn't 100% reliable, e.g. self + # could be the first row of obj, or have strange strides, but as a + # heuristic it's not bad.) In all other cases, we make a copy of + # the mask, so that future modifications to 'self' do not end up + # side-effecting 'obj' as well. + if (_mask is not nomask and obj.__array_interface__["data"][0] + != self.__array_interface__["data"][0]): + # We should make a copy. But we could get here via astype, + # in which case the mask might need a new dtype as well + # (e.g., changing to or from a structured dtype), and the + # order could have changed. So, change the mask type if + # needed and use astype instead of copy. + if self.dtype == obj.dtype: + _mask_dtype = _mask.dtype + else: + _mask_dtype = make_mask_descr(self.dtype) + + if self.flags.c_contiguous: + order = "C" + elif self.flags.f_contiguous: + order = "F" + else: + order = "K" + + _mask = _mask.astype(_mask_dtype, order) + else: + # Take a view so shape changes, etc., do not propagate back. + _mask = _mask.view() + else: + _mask = nomask + + self._mask = _mask + # Finalize the mask + if self._mask is not nomask: + try: + self._mask.shape = self.shape + except ValueError: + self._mask = nomask + except (TypeError, AttributeError): + # When _mask.shape is not writable (because it's a void) + pass + + # Finalize the fill_value + if self._fill_value is not None: + self._fill_value = _check_fill_value(self._fill_value, self.dtype) + elif self.dtype.names is not None: + # Finalize the default fill_value for structured arrays + self._fill_value = _check_fill_value(None, self.dtype) + + def __array_wrap__(self, obj, context=None, return_scalar=False): + """ + Special hook for ufuncs. + + Wraps the numpy array and sets the mask according to context. + + """ + if obj is self: # for in-place operations + result = obj + else: + result = obj.view(type(self)) + result._update_from(self) + + if context is not None: + result._mask = result._mask.copy() + func, args, out_i = context + # args sometimes contains outputs (gh-10459), which we don't want + input_args = args[:func.nin] + m = functools.reduce(mask_or, [getmaskarray(arg) for arg in input_args]) + # Get the domain mask + domain = ufunc_domain.get(func) + if domain is not None: + # Take the domain, and make sure it's a ndarray + with np.errstate(divide='ignore', invalid='ignore'): + # The result may be masked for two (unary) domains. + # That can't really be right as some domains drop + # the mask and some don't behaving differently here. + d = domain(*input_args).astype(bool, copy=False) + d = filled(d, True) + + if d.any(): + # Fill the result where the domain is wrong + try: + # Binary domain: take the last value + fill_value = ufunc_fills[func][-1] + except TypeError: + # Unary domain: just use this one + fill_value = ufunc_fills[func] + except KeyError: + # Domain not recognized, use fill_value instead + fill_value = self.fill_value + + np.copyto(result, fill_value, where=d) + + # Update the mask + if m is nomask: + m = d + else: + # Don't modify inplace, we risk back-propagation + m = (m | d) + + # Make sure the mask has the proper size + if result is not self and result.shape == () and m: + return masked + else: + result._mask = m + result._sharedmask = False + + return result + + def view(self, dtype=None, type=None, fill_value=None): + """ + Return a view of the MaskedArray data. + + Parameters + ---------- + dtype : data-type or ndarray sub-class, optional + Data-type descriptor of the returned view, e.g., float32 or int16. + The default, None, results in the view having the same data-type + as `a`. As with ``ndarray.view``, dtype can also be specified as + an ndarray sub-class, which then specifies the type of the + returned object (this is equivalent to setting the ``type`` + parameter). + type : Python type, optional + Type of the returned view, either ndarray or a subclass. The + default None results in type preservation. + fill_value : scalar, optional + The value to use for invalid entries (None by default). + If None, then this argument is inferred from the passed `dtype`, or + in its absence the original array, as discussed in the notes below. + + See Also + -------- + numpy.ndarray.view : Equivalent method on ndarray object. + + Notes + ----- + + ``a.view()`` is used two different ways: + + ``a.view(some_dtype)`` or ``a.view(dtype=some_dtype)`` constructs a view + of the array's memory with a different data-type. This can cause a + reinterpretation of the bytes of memory. + + ``a.view(ndarray_subclass)`` or ``a.view(type=ndarray_subclass)`` just + returns an instance of `ndarray_subclass` that looks at the same array + (same shape, dtype, etc.) This does not cause a reinterpretation of the + memory. + + If `fill_value` is not specified, but `dtype` is specified (and is not + an ndarray sub-class), the `fill_value` of the MaskedArray will be + reset. If neither `fill_value` nor `dtype` are specified (or if + `dtype` is an ndarray sub-class), then the fill value is preserved. + Finally, if `fill_value` is specified, but `dtype` is not, the fill + value is set to the specified value. + + For ``a.view(some_dtype)``, if ``some_dtype`` has a different number of + bytes per entry than the previous dtype (for example, converting a + regular array to a structured array), then the behavior of the view + cannot be predicted just from the superficial appearance of ``a`` (shown + by ``print(a)``). It also depends on exactly how ``a`` is stored in + memory. Therefore if ``a`` is C-ordered versus fortran-ordered, versus + defined as a slice or transpose, etc., the view may give different + results. + """ + + if dtype is None: + if type is None: + output = ndarray.view(self) + else: + output = ndarray.view(self, type) + elif type is None: + try: + if issubclass(dtype, ndarray): + output = ndarray.view(self, dtype) + dtype = None + else: + output = ndarray.view(self, dtype) + except TypeError: + output = ndarray.view(self, dtype) + else: + output = ndarray.view(self, dtype, type) + + # also make the mask be a view (so attr changes to the view's + # mask do no affect original object's mask) + # (especially important to avoid affecting np.masked singleton) + if getmask(output) is not nomask: + output._mask = output._mask.view() + + # Make sure to reset the _fill_value if needed + if getattr(output, '_fill_value', None) is not None: + if fill_value is None: + if dtype is None: + pass # leave _fill_value as is + else: + output._fill_value = None + else: + output.fill_value = fill_value + return output + + def __getitem__(self, indx): + """ + x.__getitem__(y) <==> x[y] + + Return the item described by i, as a masked array. + + """ + # We could directly use ndarray.__getitem__ on self. + # But then we would have to modify __array_finalize__ to prevent the + # mask of being reshaped if it hasn't been set up properly yet + # So it's easier to stick to the current version + dout = self.data[indx] + _mask = self._mask + + def _is_scalar(m): + return not isinstance(m, np.ndarray) + + def _scalar_heuristic(arr, elem): + """ + Return whether `elem` is a scalar result of indexing `arr`, or None + if undecidable without promoting nomask to a full mask + """ + # obviously a scalar + if not isinstance(elem, np.ndarray): + return True + + # object array scalar indexing can return anything + elif arr.dtype.type is np.object_: + if arr.dtype is not elem.dtype: + # elem is an array, but dtypes do not match, so must be + # an element + return True + + # well-behaved subclass that only returns 0d arrays when + # expected - this is not a scalar + elif type(arr).__getitem__ == ndarray.__getitem__: + return False + + return None + + if _mask is not nomask: + # _mask cannot be a subclass, so it tells us whether we should + # expect a scalar. It also cannot be of dtype object. + mout = _mask[indx] + scalar_expected = _is_scalar(mout) + + else: + # attempt to apply the heuristic to avoid constructing a full mask + mout = nomask + scalar_expected = _scalar_heuristic(self.data, dout) + if scalar_expected is None: + # heuristics have failed + # construct a full array, so we can be certain. This is costly. + # we could also fall back on ndarray.__getitem__(self.data, indx) + scalar_expected = _is_scalar(getmaskarray(self)[indx]) + + # Did we extract a single item? + if scalar_expected: + # A record + if isinstance(dout, np.void): + # We should always re-cast to mvoid, otherwise users can + # change masks on rows that already have masked values, but not + # on rows that have no masked values, which is inconsistent. + return mvoid(dout, mask=mout, hardmask=self._hardmask) + + # special case introduced in gh-5962 + elif (self.dtype.type is np.object_ and + isinstance(dout, np.ndarray) and + dout is not masked): + # If masked, turn into a MaskedArray, with everything masked. + if mout: + return MaskedArray(dout, mask=True) + else: + return dout + + # Just a scalar + elif mout: + return masked + else: + return dout + else: + # Force dout to MA + dout = dout.view(type(self)) + # Inherit attributes from self + dout._update_from(self) + # Check the fill_value + if is_string_or_list_of_strings(indx): + if self._fill_value is not None: + dout._fill_value = self._fill_value[indx] + + # Something like gh-15895 has happened if this check fails. + # _fill_value should always be an ndarray. + if not isinstance(dout._fill_value, np.ndarray): + raise RuntimeError('Internal NumPy error.') + # If we're indexing a multidimensional field in a + # structured array (such as dtype("(2,)i2,(2,)i1")), + # dimensionality goes up (M[field].ndim == M.ndim + + # M.dtype[field].ndim). That's fine for + # M[field] but problematic for M[field].fill_value + # which should have shape () to avoid breaking several + # methods. There is no great way out, so set to + # first element. See issue #6723. + if dout._fill_value.ndim > 0: + if not (dout._fill_value == + dout._fill_value.flat[0]).all(): + warnings.warn( + "Upon accessing multidimensional field " + f"{indx!s}, need to keep dimensionality " + "of fill_value at 0. Discarding " + "heterogeneous fill_value and setting " + f"all to {dout._fill_value[0]!s}.", + stacklevel=2) + # Need to use `.flat[0:1].squeeze(...)` instead of just + # `.flat[0]` to ensure the result is a 0d array and not + # a scalar. + dout._fill_value = dout._fill_value.flat[0:1].squeeze(axis=0) + dout._isfield = True + # Update the mask if needed + if mout is not nomask: + # set shape to match that of data; this is needed for matrices + dout._mask = reshape(mout, dout.shape) + dout._sharedmask = True + # Note: Don't try to check for m.any(), that'll take too long + return dout + + # setitem may put NaNs into integer arrays or occasionally overflow a + # float. But this may happen in masked values, so avoid otherwise + # correct warnings (as is typical also in masked calculations). + @np.errstate(over='ignore', invalid='ignore') + def __setitem__(self, indx, value): + """ + x.__setitem__(i, y) <==> x[i]=y + + Set item described by index. If value is masked, masks those + locations. + + """ + if self is masked: + raise MaskError('Cannot alter the masked element.') + _data = self._data + _mask = self._mask + if isinstance(indx, str): + _data[indx] = value + if _mask is nomask: + self._mask = _mask = make_mask_none(self.shape, self.dtype) + _mask[indx] = getmask(value) + return + + _dtype = _data.dtype + + if value is masked: + # The mask wasn't set: create a full version. + if _mask is nomask: + _mask = self._mask = make_mask_none(self.shape, _dtype) + # Now, set the mask to its value. + if _dtype.names is not None: + _mask[indx] = tuple([True] * len(_dtype.names)) + else: + _mask[indx] = True + return + + # Get the _data part of the new value + dval = getattr(value, '_data', value) + # Get the _mask part of the new value + mval = getmask(value) + if _dtype.names is not None and mval is nomask: + mval = tuple([False] * len(_dtype.names)) + if _mask is nomask: + # Set the data, then the mask + _data[indx] = dval + if mval is not nomask: + _mask = self._mask = make_mask_none(self.shape, _dtype) + _mask[indx] = mval + elif not self._hardmask: + # Set the data, then the mask + if (isinstance(indx, masked_array) and + not isinstance(value, masked_array)): + _data[indx.data] = dval + else: + _data[indx] = dval + _mask[indx] = mval + elif hasattr(indx, 'dtype') and (indx.dtype == MaskType): + indx = indx * umath.logical_not(_mask) + _data[indx] = dval + else: + if _dtype.names is not None: + err_msg = "Flexible 'hard' masks are not yet supported." + raise NotImplementedError(err_msg) + mindx = mask_or(_mask[indx], mval, copy=True) + dindx = self._data[indx] + if dindx.size > 1: + np.copyto(dindx, dval, where=~mindx) + elif mindx is nomask: + dindx = dval + _data[indx] = dindx + _mask[indx] = mindx + return + + # Define so that we can overwrite the setter. + @property + def dtype(self): + return super().dtype + + @dtype.setter + def dtype(self, dtype): + super(MaskedArray, type(self)).dtype.__set__(self, dtype) + if self._mask is not nomask: + self._mask = self._mask.view(make_mask_descr(dtype), ndarray) + # Try to reset the shape of the mask (if we don't have a void). + # This raises a ValueError if the dtype change won't work. + try: + self._mask.shape = self.shape + except (AttributeError, TypeError): + pass + + @property + def shape(self): + return super().shape + + @shape.setter + def shape(self, shape): + super(MaskedArray, type(self)).shape.__set__(self, shape) + # Cannot use self._mask, since it may not (yet) exist when a + # masked matrix sets the shape. + if getmask(self) is not nomask: + self._mask.shape = self.shape + + def __setmask__(self, mask, copy=False): + """ + Set the mask. + + """ + idtype = self.dtype + current_mask = self._mask + if mask is masked: + mask = True + + if current_mask is nomask: + # Make sure the mask is set + # Just don't do anything if there's nothing to do. + if mask is nomask: + return + current_mask = self._mask = make_mask_none(self.shape, idtype) + + if idtype.names is None: + # No named fields. + # Hardmask: don't unmask the data + if self._hardmask: + current_mask |= mask + # Softmask: set everything to False + # If it's obviously a compatible scalar, use a quick update + # method. + elif isinstance(mask, (int, float, np.bool, np.number)): + current_mask[...] = mask + # Otherwise fall back to the slower, general purpose way. + else: + current_mask.flat = mask + else: + # Named fields w/ + mdtype = current_mask.dtype + mask = np.asarray(mask) + # Mask is a singleton + if not mask.ndim: + # It's a boolean : make a record + if mask.dtype.kind == 'b': + mask = np.array(tuple([mask.item()] * len(mdtype)), + dtype=mdtype) + # It's a record: make sure the dtype is correct + else: + mask = mask.astype(mdtype) + # Mask is a sequence + else: + # Make sure the new mask is a ndarray with the proper dtype + try: + copy = None if not copy else True + mask = np.array(mask, copy=copy, dtype=mdtype) + # Or assume it's a sequence of bool/int + except TypeError: + mask = np.array([tuple([m] * len(mdtype)) for m in mask], + dtype=mdtype) + # Hardmask: don't unmask the data + if self._hardmask: + for n in idtype.names: + current_mask[n] |= mask[n] + # Softmask: set everything to False + # If it's obviously a compatible scalar, use a quick update + # method. + elif isinstance(mask, (int, float, np.bool, np.number)): + current_mask[...] = mask + # Otherwise fall back to the slower, general purpose way. + else: + current_mask.flat = mask + # Reshape if needed + if current_mask.shape: + current_mask.shape = self.shape + return + + _set_mask = __setmask__ + + @property + def mask(self): + """ Current mask. """ + + # We could try to force a reshape, but that wouldn't work in some + # cases. + # Return a view so that the dtype and shape cannot be changed in place + # This still preserves nomask by identity + return self._mask.view() + + @mask.setter + def mask(self, value): + self.__setmask__(value) + + @property + def recordmask(self): + """ + Get or set the mask of the array if it has no named fields. For + structured arrays, returns a ndarray of booleans where entries are + ``True`` if **all** the fields are masked, ``False`` otherwise: + + >>> x = np.ma.array([(1, 1), (2, 2), (3, 3), (4, 4), (5, 5)], + ... mask=[(0, 0), (1, 0), (1, 1), (0, 1), (0, 0)], + ... dtype=[('a', int), ('b', int)]) + >>> x.recordmask + array([False, False, True, False, False]) + """ + + _mask = self._mask.view(ndarray) + if _mask.dtype.names is None: + return _mask + return np.all(flatten_structured_array(_mask), axis=-1) + + @recordmask.setter + def recordmask(self, mask): + raise NotImplementedError("Coming soon: setting the mask per records!") + + def harden_mask(self): + """ + Force the mask to hard, preventing unmasking by assignment. + + Whether the mask of a masked array is hard or soft is determined by + its `~ma.MaskedArray.hardmask` property. `harden_mask` sets + `~ma.MaskedArray.hardmask` to ``True`` (and returns the modified + self). + + See Also + -------- + ma.MaskedArray.hardmask + ma.MaskedArray.soften_mask + + """ + self._hardmask = True + return self + + def soften_mask(self): + """ + Force the mask to soft (default), allowing unmasking by assignment. + + Whether the mask of a masked array is hard or soft is determined by + its `~ma.MaskedArray.hardmask` property. `soften_mask` sets + `~ma.MaskedArray.hardmask` to ``False`` (and returns the modified + self). + + See Also + -------- + ma.MaskedArray.hardmask + ma.MaskedArray.harden_mask + + """ + self._hardmask = False + return self + + @property + def hardmask(self): + """ + Specifies whether values can be unmasked through assignments. + + By default, assigning definite values to masked array entries will + unmask them. When `hardmask` is ``True``, the mask will not change + through assignments. + + See Also + -------- + ma.MaskedArray.harden_mask + ma.MaskedArray.soften_mask + + Examples + -------- + >>> import numpy as np + >>> x = np.arange(10) + >>> m = np.ma.masked_array(x, x>5) + >>> assert not m.hardmask + + Since `m` has a soft mask, assigning an element value unmasks that + element: + + >>> m[8] = 42 + >>> m + masked_array(data=[0, 1, 2, 3, 4, 5, --, --, 42, --], + mask=[False, False, False, False, False, False, + True, True, False, True], + fill_value=999999) + + After hardening, the mask is not affected by assignments: + + >>> hardened = np.ma.harden_mask(m) + >>> assert m.hardmask and hardened is m + >>> m[:] = 23 + >>> m + masked_array(data=[23, 23, 23, 23, 23, 23, --, --, 23, --], + mask=[False, False, False, False, False, False, + True, True, False, True], + fill_value=999999) + + """ + return self._hardmask + + def unshare_mask(self): + """ + Copy the mask and set the `sharedmask` flag to ``False``. + + Whether the mask is shared between masked arrays can be seen from + the `sharedmask` property. `unshare_mask` ensures the mask is not + shared. A copy of the mask is only made if it was shared. + + See Also + -------- + sharedmask + + """ + if self._sharedmask: + self._mask = self._mask.copy() + self._sharedmask = False + return self + + @property + def sharedmask(self): + """ Share status of the mask (read-only). """ + return self._sharedmask + + def shrink_mask(self): + """ + Reduce a mask to nomask when possible. + + Parameters + ---------- + None + + Returns + ------- + result : MaskedArray + A :class:`~ma.MaskedArray` object. + + Examples + -------- + >>> import numpy as np + >>> x = np.ma.array([[1,2 ], [3, 4]], mask=[0]*4) + >>> x.mask + array([[False, False], + [False, False]]) + >>> x.shrink_mask() + masked_array( + data=[[1, 2], + [3, 4]], + mask=False, + fill_value=999999) + >>> x.mask + False + + """ + self._mask = _shrink_mask(self._mask) + return self + + @property + def baseclass(self): + """ Class of the underlying data (read-only). """ + return self._baseclass + + def _get_data(self): + """ + Returns the underlying data, as a view of the masked array. + + If the underlying data is a subclass of :class:`numpy.ndarray`, it is + returned as such. + + >>> x = np.ma.array(np.matrix([[1, 2], [3, 4]]), mask=[[0, 1], [1, 0]]) + >>> x.data + matrix([[1, 2], + [3, 4]]) + + The type of the data can be accessed through the :attr:`baseclass` + attribute. + """ + return ndarray.view(self, self._baseclass) + + _data = property(fget=_get_data) + data = property(fget=_get_data) + + @property + def flat(self): + """ Return a flat iterator, or set a flattened version of self to value. """ + return MaskedIterator(self) + + @flat.setter + def flat(self, value): + y = self.ravel() + y[:] = value + + @property + def fill_value(self): + """ + The filling value of the masked array is a scalar. When setting, None + will set to a default based on the data type. + + Examples + -------- + >>> import numpy as np + >>> for dt in [np.int32, np.int64, np.float64, np.complex128]: + ... np.ma.array([0, 1], dtype=dt).get_fill_value() + ... + np.int64(999999) + np.int64(999999) + np.float64(1e+20) + np.complex128(1e+20+0j) + + >>> x = np.ma.array([0, 1.], fill_value=-np.inf) + >>> x.fill_value + np.float64(-inf) + >>> x.fill_value = np.pi + >>> x.fill_value + np.float64(3.1415926535897931) + + Reset to default: + + >>> x.fill_value = None + >>> x.fill_value + np.float64(1e+20) + + """ + if self._fill_value is None: + self._fill_value = _check_fill_value(None, self.dtype) + + # Temporary workaround to account for the fact that str and bytes + # scalars cannot be indexed with (), whereas all other numpy + # scalars can. See issues #7259 and #7267. + # The if-block can be removed after #7267 has been fixed. + if isinstance(self._fill_value, ndarray): + return self._fill_value[()] + return self._fill_value + + @fill_value.setter + def fill_value(self, value=None): + target = _check_fill_value(value, self.dtype) + if not target.ndim == 0: + # 2019-11-12, 1.18.0 + warnings.warn( + "Non-scalar arrays for the fill value are deprecated. Use " + "arrays with scalar values instead. The filled function " + "still supports any array as `fill_value`.", + DeprecationWarning, stacklevel=2) + + _fill_value = self._fill_value + if _fill_value is None: + # Create the attribute if it was undefined + self._fill_value = target + else: + # Don't overwrite the attribute, just fill it (for propagation) + _fill_value[()] = target + + # kept for compatibility + get_fill_value = fill_value.fget + set_fill_value = fill_value.fset + + def filled(self, fill_value=None): + """ + Return a copy of self, with masked values filled with a given value. + **However**, if there are no masked values to fill, self will be + returned instead as an ndarray. + + Parameters + ---------- + fill_value : array_like, optional + The value to use for invalid entries. Can be scalar or non-scalar. + If non-scalar, the resulting ndarray must be broadcastable over + input array. Default is None, in which case, the `fill_value` + attribute of the array is used instead. + + Returns + ------- + filled_array : ndarray + A copy of ``self`` with invalid entries replaced by *fill_value* + (be it the function argument or the attribute of ``self``), or + ``self`` itself as an ndarray if there are no invalid entries to + be replaced. + + Notes + ----- + The result is **not** a MaskedArray! + + Examples + -------- + >>> import numpy as np + >>> x = np.ma.array([1,2,3,4,5], mask=[0,0,1,0,1], fill_value=-999) + >>> x.filled() + array([ 1, 2, -999, 4, -999]) + >>> x.filled(fill_value=1000) + array([ 1, 2, 1000, 4, 1000]) + >>> type(x.filled()) + + + Subclassing is preserved. This means that if, e.g., the data part of + the masked array is a recarray, `filled` returns a recarray: + + >>> x = np.array([(-1, 2), (-3, 4)], dtype='i8,i8').view(np.recarray) + >>> m = np.ma.array(x, mask=[(True, False), (False, True)]) + >>> m.filled() + rec.array([(999999, 2), ( -3, 999999)], + dtype=[('f0', '>> import numpy as np + >>> x = np.ma.array(np.arange(5), mask=[0]*2 + [1]*3) + >>> x.compressed() + array([0, 1]) + >>> type(x.compressed()) + + + N-D arrays are compressed to 1-D. + + >>> arr = [[1, 2], [3, 4]] + >>> mask = [[1, 0], [0, 1]] + >>> x = np.ma.array(arr, mask=mask) + >>> x.compressed() + array([2, 3]) + + """ + data = ndarray.ravel(self._data) + if self._mask is not nomask: + data = data.compress(np.logical_not(ndarray.ravel(self._mask))) + return data + + def compress(self, condition, axis=None, out=None): + """ + Return `a` where condition is ``True``. + + If condition is a `~ma.MaskedArray`, missing values are considered + as ``False``. + + Parameters + ---------- + condition : var + Boolean 1-d array selecting which entries to return. If len(condition) + is less than the size of a along the axis, then output is truncated + to length of condition array. + axis : {None, int}, optional + Axis along which the operation must be performed. + out : {None, ndarray}, optional + Alternative output array in which to place the result. It must have + the same shape as the expected output but the type will be cast if + necessary. + + Returns + ------- + result : MaskedArray + A :class:`~ma.MaskedArray` object. + + Notes + ----- + Please note the difference with :meth:`compressed` ! + The output of :meth:`compress` has a mask, the output of + :meth:`compressed` does not. + + Examples + -------- + >>> import numpy as np + >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) + >>> x + masked_array( + data=[[1, --, 3], + [--, 5, --], + [7, --, 9]], + mask=[[False, True, False], + [ True, False, True], + [False, True, False]], + fill_value=999999) + >>> x.compress([1, 0, 1]) + masked_array(data=[1, 3], + mask=[False, False], + fill_value=999999) + + >>> x.compress([1, 0, 1], axis=1) + masked_array( + data=[[1, 3], + [--, --], + [7, 9]], + mask=[[False, False], + [ True, True], + [False, False]], + fill_value=999999) + + """ + # Get the basic components + (_data, _mask) = (self._data, self._mask) + + # Force the condition to a regular ndarray and forget the missing + # values. + condition = np.asarray(condition) + + _new = _data.compress(condition, axis=axis, out=out).view(type(self)) + _new._update_from(self) + if _mask is not nomask: + _new._mask = _mask.compress(condition, axis=axis) + return _new + + def _insert_masked_print(self): + """ + Replace masked values with masked_print_option, casting all innermost + dtypes to object. + """ + if masked_print_option.enabled(): + mask = self._mask + if mask is nomask: + res = self._data + else: + # convert to object array to make filled work + data = self._data + # For big arrays, to avoid a costly conversion to the + # object dtype, extract the corners before the conversion. + print_width = (self._print_width if self.ndim > 1 + else self._print_width_1d) + for axis in range(self.ndim): + if data.shape[axis] > print_width: + ind = print_width // 2 + arr = np.split(data, (ind, -ind), axis=axis) + data = np.concatenate((arr[0], arr[2]), axis=axis) + arr = np.split(mask, (ind, -ind), axis=axis) + mask = np.concatenate((arr[0], arr[2]), axis=axis) + + rdtype = _replace_dtype_fields(self.dtype, "O") + res = data.astype(rdtype) + _recursive_printoption(res, mask, masked_print_option) + else: + res = self.filled(self.fill_value) + return res + + def __str__(self): + return str(self._insert_masked_print()) + + def __repr__(self): + """ + Literal string representation. + + """ + if self._baseclass is np.ndarray: + name = 'array' + else: + name = self._baseclass.__name__ + + # 2016-11-19: Demoted to legacy format + if np._core.arrayprint._get_legacy_print_mode() <= 113: + is_long = self.ndim > 1 + parameters = { + 'name': name, + 'nlen': " " * len(name), + 'data': str(self), + 'mask': str(self._mask), + 'fill': str(self.fill_value), + 'dtype': str(self.dtype) + } + is_structured = bool(self.dtype.names) + key = '{}_{}'.format( + 'long' if is_long else 'short', + 'flx' if is_structured else 'std' + ) + return _legacy_print_templates[key] % parameters + + prefix = f"masked_{name}(" + + dtype_needed = ( + not np._core.arrayprint.dtype_is_implied(self.dtype) or + np.all(self.mask) or + self.size == 0 + ) + + # determine which keyword args need to be shown + keys = ['data', 'mask', 'fill_value'] + if dtype_needed: + keys.append('dtype') + + # array has only one row (non-column) + is_one_row = builtins.all(dim == 1 for dim in self.shape[:-1]) + + # choose what to indent each keyword with + min_indent = 2 + if is_one_row: + # first key on the same line as the type, remaining keys + # aligned by equals + indents = {} + indents[keys[0]] = prefix + for k in keys[1:]: + n = builtins.max(min_indent, len(prefix + keys[0]) - len(k)) + indents[k] = ' ' * n + prefix = '' # absorbed into the first indent + else: + # each key on its own line, indented by two spaces + indents = dict.fromkeys(keys, ' ' * min_indent) + prefix = prefix + '\n' # first key on the next line + + # format the field values + reprs = {} + reprs['data'] = np.array2string( + self._insert_masked_print(), + separator=", ", + prefix=indents['data'] + 'data=', + suffix=',') + reprs['mask'] = np.array2string( + self._mask, + separator=", ", + prefix=indents['mask'] + 'mask=', + suffix=',') + + if self._fill_value is None: + self.fill_value # initialize fill_value # noqa: B018 + + if (self._fill_value.dtype.kind in ("S", "U") + and self.dtype.kind == self._fill_value.dtype.kind): + # Allow strings: "N/A" has length 3 so would mismatch. + fill_repr = repr(self.fill_value.item()) + elif self._fill_value.dtype == self.dtype and not self.dtype == object: + # Guess that it is OK to use the string as item repr. To really + # fix this, it needs new logic (shared with structured scalars) + fill_repr = str(self.fill_value) + else: + fill_repr = repr(self.fill_value) + + reprs['fill_value'] = fill_repr + if dtype_needed: + reprs['dtype'] = np._core.arrayprint.dtype_short_repr(self.dtype) + + # join keys with values and indentations + result = ',\n'.join( + f'{indents[k]}{k}={reprs[k]}' + for k in keys + ) + return prefix + result + ')' + + def _delegate_binop(self, other): + # This emulates the logic in + # private/binop_override.h:forward_binop_should_defer + if isinstance(other, type(self)): + return False + array_ufunc = getattr(other, "__array_ufunc__", False) + if array_ufunc is False: + other_priority = getattr(other, "__array_priority__", -1000000) + return self.__array_priority__ < other_priority + else: + # If array_ufunc is not None, it will be called inside the ufunc; + # None explicitly tells us to not call the ufunc, i.e., defer. + return array_ufunc is None + + def _comparison(self, other, compare): + """Compare self with other using operator.eq or operator.ne. + + When either of the elements is masked, the result is masked as well, + but the underlying boolean data are still set, with self and other + considered equal if both are masked, and unequal otherwise. + + For structured arrays, all fields are combined, with masked values + ignored. The result is masked if all fields were masked, with self + and other considered equal only if both were fully masked. + """ + omask = getmask(other) + smask = self.mask + mask = mask_or(smask, omask, copy=True) + + odata = getdata(other) + if mask.dtype.names is not None: + # only == and != are reasonably defined for structured dtypes, + # so give up early for all other comparisons: + if compare not in (operator.eq, operator.ne): + return NotImplemented + # For possibly masked structured arrays we need to be careful, + # since the standard structured array comparison will use all + # fields, masked or not. To avoid masked fields influencing the + # outcome, we set all masked fields in self to other, so they'll + # count as equal. To prepare, we ensure we have the right shape. + broadcast_shape = np.broadcast(self, odata).shape + sbroadcast = np.broadcast_to(self, broadcast_shape, subok=True) + sbroadcast._mask = mask + sdata = sbroadcast.filled(odata) + # Now take care of the mask; the merged mask should have an item + # masked if all fields were masked (in one and/or other). + mask = (mask == np.ones((), mask.dtype)) + # Ensure we can compare masks below if other was not masked. + if omask is np.False_: + omask = np.zeros((), smask.dtype) + + else: + # For regular arrays, just use the data as they come. + sdata = self.data + + check = compare(sdata, odata) + + if isinstance(check, (np.bool, bool)): + return masked if mask else check + + if mask is not nomask: + if compare in (operator.eq, operator.ne): + # Adjust elements that were masked, which should be treated + # as equal if masked in both, unequal if masked in one. + # Note that this works automatically for structured arrays too. + # Ignore this for operations other than `==` and `!=` + check = np.where(mask, compare(smask, omask), check) + + if mask.shape != check.shape: + # Guarantee consistency of the shape, making a copy since the + # the mask may need to get written to later. + mask = np.broadcast_to(mask, check.shape).copy() + + check = check.view(type(self)) + check._update_from(self) + check._mask = mask + + # Cast fill value to np.bool if needed. If it cannot be cast, the + # default boolean fill value is used. + if check._fill_value is not None: + try: + fill = _check_fill_value(check._fill_value, np.bool) + except (TypeError, ValueError): + fill = _check_fill_value(None, np.bool) + check._fill_value = fill + + return check + + def __eq__(self, other): + """Check whether other equals self elementwise. + + When either of the elements is masked, the result is masked as well, + but the underlying boolean data are still set, with self and other + considered equal if both are masked, and unequal otherwise. + + For structured arrays, all fields are combined, with masked values + ignored. The result is masked if all fields were masked, with self + and other considered equal only if both were fully masked. + """ + return self._comparison(other, operator.eq) + + def __ne__(self, other): + """Check whether other does not equal self elementwise. + + When either of the elements is masked, the result is masked as well, + but the underlying boolean data are still set, with self and other + considered equal if both are masked, and unequal otherwise. + + For structured arrays, all fields are combined, with masked values + ignored. The result is masked if all fields were masked, with self + and other considered equal only if both were fully masked. + """ + return self._comparison(other, operator.ne) + + # All other comparisons: + def __le__(self, other): + return self._comparison(other, operator.le) + + def __lt__(self, other): + return self._comparison(other, operator.lt) + + def __ge__(self, other): + return self._comparison(other, operator.ge) + + def __gt__(self, other): + return self._comparison(other, operator.gt) + + def __add__(self, other): + """ + Add self to other, and return a new masked array. + + """ + if self._delegate_binop(other): + return NotImplemented + return add(self, other) + + def __radd__(self, other): + """ + Add other to self, and return a new masked array. + + """ + # In analogy with __rsub__ and __rdiv__, use original order: + # we get here from `other + self`. + return add(other, self) + + def __sub__(self, other): + """ + Subtract other from self, and return a new masked array. + + """ + if self._delegate_binop(other): + return NotImplemented + return subtract(self, other) + + def __rsub__(self, other): + """ + Subtract self from other, and return a new masked array. + + """ + return subtract(other, self) + + def __mul__(self, other): + "Multiply self by other, and return a new masked array." + if self._delegate_binop(other): + return NotImplemented + return multiply(self, other) + + def __rmul__(self, other): + """ + Multiply other by self, and return a new masked array. + + """ + # In analogy with __rsub__ and __rdiv__, use original order: + # we get here from `other * self`. + return multiply(other, self) + + def __truediv__(self, other): + """ + Divide other into self, and return a new masked array. + + """ + if self._delegate_binop(other): + return NotImplemented + return true_divide(self, other) + + def __rtruediv__(self, other): + """ + Divide self into other, and return a new masked array. + + """ + return true_divide(other, self) + + def __floordiv__(self, other): + """ + Divide other into self, and return a new masked array. + + """ + if self._delegate_binop(other): + return NotImplemented + return floor_divide(self, other) + + def __rfloordiv__(self, other): + """ + Divide self into other, and return a new masked array. + + """ + return floor_divide(other, self) + + def __pow__(self, other): + """ + Raise self to the power other, masking the potential NaNs/Infs + + """ + if self._delegate_binop(other): + return NotImplemented + return power(self, other) + + def __rpow__(self, other): + """ + Raise other to the power self, masking the potential NaNs/Infs + + """ + return power(other, self) + + def __iadd__(self, other): + """ + Add other to self in-place. + + """ + m = getmask(other) + if self._mask is nomask: + if m is not nomask and m.any(): + self._mask = make_mask_none(self.shape, self.dtype) + self._mask += m + elif m is not nomask: + self._mask += m + other_data = getdata(other) + other_data = np.where(self._mask, other_data.dtype.type(0), other_data) + self._data.__iadd__(other_data) + return self + + def __isub__(self, other): + """ + Subtract other from self in-place. + + """ + m = getmask(other) + if self._mask is nomask: + if m is not nomask and m.any(): + self._mask = make_mask_none(self.shape, self.dtype) + self._mask += m + elif m is not nomask: + self._mask += m + other_data = getdata(other) + other_data = np.where(self._mask, other_data.dtype.type(0), other_data) + self._data.__isub__(other_data) + return self + + def __imul__(self, other): + """ + Multiply self by other in-place. + + """ + m = getmask(other) + if self._mask is nomask: + if m is not nomask and m.any(): + self._mask = make_mask_none(self.shape, self.dtype) + self._mask += m + elif m is not nomask: + self._mask += m + other_data = getdata(other) + other_data = np.where(self._mask, other_data.dtype.type(1), other_data) + self._data.__imul__(other_data) + return self + + def __ifloordiv__(self, other): + """ + Floor divide self by other in-place. + + """ + other_data = getdata(other) + dom_mask = _DomainSafeDivide().__call__(self._data, other_data) + other_mask = getmask(other) + new_mask = mask_or(other_mask, dom_mask) + # The following 3 lines control the domain filling + if dom_mask.any(): + (_, fval) = ufunc_fills[np.floor_divide] + other_data = np.where( + dom_mask, other_data.dtype.type(fval), other_data) + self._mask |= new_mask + other_data = np.where(self._mask, other_data.dtype.type(1), other_data) + self._data.__ifloordiv__(other_data) + return self + + def __itruediv__(self, other): + """ + True divide self by other in-place. + + """ + other_data = getdata(other) + dom_mask = _DomainSafeDivide().__call__(self._data, other_data) + other_mask = getmask(other) + new_mask = mask_or(other_mask, dom_mask) + # The following 3 lines control the domain filling + if dom_mask.any(): + (_, fval) = ufunc_fills[np.true_divide] + other_data = np.where( + dom_mask, other_data.dtype.type(fval), other_data) + self._mask |= new_mask + other_data = np.where(self._mask, other_data.dtype.type(1), other_data) + self._data.__itruediv__(other_data) + return self + + def __ipow__(self, other): + """ + Raise self to the power other, in place. + + """ + other_data = getdata(other) + other_data = np.where(self._mask, other_data.dtype.type(1), other_data) + other_mask = getmask(other) + with np.errstate(divide='ignore', invalid='ignore'): + self._data.__ipow__(other_data) + invalid = np.logical_not(np.isfinite(self._data)) + if invalid.any(): + if self._mask is not nomask: + self._mask |= invalid + else: + self._mask = invalid + np.copyto(self._data, self.fill_value, where=invalid) + new_mask = mask_or(other_mask, invalid) + self._mask = mask_or(self._mask, new_mask) + return self + + def __float__(self): + """ + Convert to float. + + """ + if self.size > 1: + raise TypeError("Only length-1 arrays can be converted " + "to Python scalars") + elif self._mask: + warnings.warn("Warning: converting a masked element to nan.", stacklevel=2) + return np.nan + return float(self.item()) + + def __int__(self): + """ + Convert to int. + + """ + if self.size > 1: + raise TypeError("Only length-1 arrays can be converted " + "to Python scalars") + elif self._mask: + raise MaskError('Cannot convert masked element to a Python int.') + return int(self.item()) + + @property + def imag(self): + """ + The imaginary part of the masked array. + + This property is a view on the imaginary part of this `MaskedArray`. + + See Also + -------- + real + + Examples + -------- + >>> import numpy as np + >>> x = np.ma.array([1+1.j, -2j, 3.45+1.6j], mask=[False, True, False]) + >>> x.imag + masked_array(data=[1.0, --, 1.6], + mask=[False, True, False], + fill_value=1e+20) + + """ + result = self._data.imag.view(type(self)) + result.__setmask__(self._mask) + return result + + # kept for compatibility + get_imag = imag.fget + + @property + def real(self): + """ + The real part of the masked array. + + This property is a view on the real part of this `MaskedArray`. + + See Also + -------- + imag + + Examples + -------- + >>> import numpy as np + >>> x = np.ma.array([1+1.j, -2j, 3.45+1.6j], mask=[False, True, False]) + >>> x.real + masked_array(data=[1.0, --, 3.45], + mask=[False, True, False], + fill_value=1e+20) + + """ + result = self._data.real.view(type(self)) + result.__setmask__(self._mask) + return result + + # kept for compatibility + get_real = real.fget + + def count(self, axis=None, keepdims=np._NoValue): + """ + Count the non-masked elements of the array along the given axis. + + Parameters + ---------- + axis : None or int or tuple of ints, optional + Axis or axes along which the count is performed. + The default, None, performs the count over all + the dimensions of the input array. `axis` may be negative, in + which case it counts from the last to the first axis. + If this is a tuple of ints, the count is performed on multiple + axes, instead of a single axis or all the axes as before. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the array. + + Returns + ------- + result : ndarray or scalar + An array with the same shape as the input array, with the specified + axis removed. If the array is a 0-d array, or if `axis` is None, a + scalar is returned. + + See Also + -------- + ma.count_masked : Count masked elements in array or along a given axis. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = ma.arange(6).reshape((2, 3)) + >>> a[1, :] = ma.masked + >>> a + masked_array( + data=[[0, 1, 2], + [--, --, --]], + mask=[[False, False, False], + [ True, True, True]], + fill_value=999999) + >>> a.count() + 3 + + When the `axis` keyword is specified an array of appropriate size is + returned. + + >>> a.count(axis=0) + array([1, 1, 1]) + >>> a.count(axis=1) + array([3, 0]) + + """ + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + + m = self._mask + # special case for matrices (we assume no other subclasses modify + # their dimensions) + if isinstance(self.data, np.matrix): + if m is nomask: + m = np.zeros(self.shape, dtype=np.bool) + m = m.view(type(self.data)) + + if m is nomask: + # compare to _count_reduce_items in _methods.py + + if self.shape == (): + if axis not in (None, 0): + raise np.exceptions.AxisError(axis=axis, ndim=self.ndim) + return 1 + elif axis is None: + if kwargs.get('keepdims'): + return np.array(self.size, dtype=np.intp, ndmin=self.ndim) + return self.size + + axes = normalize_axis_tuple(axis, self.ndim) + items = 1 + for ax in axes: + items *= self.shape[ax] + + if kwargs.get('keepdims'): + out_dims = list(self.shape) + for a in axes: + out_dims[a] = 1 + else: + out_dims = [d for n, d in enumerate(self.shape) + if n not in axes] + # make sure to return a 0-d array if axis is supplied + return np.full(out_dims, items, dtype=np.intp) + + # take care of the masked singleton + if self is masked: + return 0 + + return (~m).sum(axis=axis, dtype=np.intp, **kwargs) + + def ravel(self, order='C'): + """ + Returns a 1D version of self, as a view. + + Parameters + ---------- + order : {'C', 'F', 'A', 'K'}, optional + The elements of `a` are read using this index order. 'C' means to + index the elements in C-like order, with the last axis index + changing fastest, back to the first axis index changing slowest. + 'F' means to index the elements in Fortran-like index order, with + the first index changing fastest, and the last index changing + slowest. Note that the 'C' and 'F' options take no account of the + memory layout of the underlying array, and only refer to the order + of axis indexing. 'A' means to read the elements in Fortran-like + index order if `m` is Fortran *contiguous* in memory, C-like order + otherwise. 'K' means to read the elements in the order they occur + in memory, except for reversing the data when strides are negative. + By default, 'C' index order is used. + (Masked arrays currently use 'A' on the data when 'K' is passed.) + + Returns + ------- + MaskedArray + Output view is of shape ``(self.size,)`` (or + ``(np.ma.product(self.shape),)``). + + Examples + -------- + >>> import numpy as np + >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) + >>> x + masked_array( + data=[[1, --, 3], + [--, 5, --], + [7, --, 9]], + mask=[[False, True, False], + [ True, False, True], + [False, True, False]], + fill_value=999999) + >>> x.ravel() + masked_array(data=[1, --, 3, --, 5, --, 7, --, 9], + mask=[False, True, False, True, False, True, False, True, + False], + fill_value=999999) + + """ + # The order of _data and _mask could be different (it shouldn't be + # normally). Passing order `K` or `A` would be incorrect. + # So we ignore the mask memory order. + # TODO: We don't actually support K, so use A instead. We could + # try to guess this correct by sorting strides or deprecate. + if order in "kKaA": + order = "F" if self._data.flags.fnc else "C" + r = ndarray.ravel(self._data, order=order).view(type(self)) + r._update_from(self) + if self._mask is not nomask: + r._mask = ndarray.ravel(self._mask, order=order).reshape(r.shape) + else: + r._mask = nomask + return r + + def reshape(self, *s, **kwargs): + """ + Give a new shape to the array without changing its data. + + Returns a masked array containing the same data, but with a new shape. + The result is a view on the original array; if this is not possible, a + ValueError is raised. + + Parameters + ---------- + shape : int or tuple of ints + The new shape should be compatible with the original shape. If an + integer is supplied, then the result will be a 1-D array of that + length. + order : {'C', 'F'}, optional + Determines whether the array data should be viewed as in C + (row-major) or FORTRAN (column-major) order. + + Returns + ------- + reshaped_array : array + A new view on the array. + + See Also + -------- + reshape : Equivalent function in the masked array module. + numpy.ndarray.reshape : Equivalent method on ndarray object. + numpy.reshape : Equivalent function in the NumPy module. + + Notes + ----- + The reshaping operation cannot guarantee that a copy will not be made, + to modify the shape in place, use ``a.shape = s`` + + Examples + -------- + >>> import numpy as np + >>> x = np.ma.array([[1,2],[3,4]], mask=[1,0,0,1]) + >>> x + masked_array( + data=[[--, 2], + [3, --]], + mask=[[ True, False], + [False, True]], + fill_value=999999) + >>> x = x.reshape((4,1)) + >>> x + masked_array( + data=[[--], + [2], + [3], + [--]], + mask=[[ True], + [False], + [False], + [ True]], + fill_value=999999) + + """ + result = self._data.reshape(*s, **kwargs).view(type(self)) + result._update_from(self) + mask = self._mask + if mask is not nomask: + result._mask = mask.reshape(*s, **kwargs) + return result + + def resize(self, newshape, refcheck=True, order=False): + """ + .. warning:: + + This method does nothing, except raise a ValueError exception. A + masked array does not own its data and therefore cannot safely be + resized in place. Use the `numpy.ma.resize` function instead. + + This method is difficult to implement safely and may be deprecated in + future releases of NumPy. + + """ + # Note : the 'order' keyword looks broken, let's just drop it + errmsg = "A masked array does not own its data "\ + "and therefore cannot be resized.\n" \ + "Use the numpy.ma.resize function instead." + raise ValueError(errmsg) + + def put(self, indices, values, mode='raise'): + """ + Set storage-indexed locations to corresponding values. + + Sets self._data.flat[n] = values[n] for each n in indices. + If `values` is shorter than `indices` then it will repeat. + If `values` has some masked values, the initial mask is updated + in consequence, else the corresponding values are unmasked. + + Parameters + ---------- + indices : 1-D array_like + Target indices, interpreted as integers. + values : array_like + Values to place in self._data copy at target indices. + mode : {'raise', 'wrap', 'clip'}, optional + Specifies how out-of-bounds indices will behave. + 'raise' : raise an error. + 'wrap' : wrap around. + 'clip' : clip to the range. + + Notes + ----- + `values` can be a scalar or length 1 array. + + Examples + -------- + >>> import numpy as np + >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) + >>> x + masked_array( + data=[[1, --, 3], + [--, 5, --], + [7, --, 9]], + mask=[[False, True, False], + [ True, False, True], + [False, True, False]], + fill_value=999999) + >>> x.put([0,4,8],[10,20,30]) + >>> x + masked_array( + data=[[10, --, 3], + [--, 20, --], + [7, --, 30]], + mask=[[False, True, False], + [ True, False, True], + [False, True, False]], + fill_value=999999) + + >>> x.put(4,999) + >>> x + masked_array( + data=[[10, --, 3], + [--, 999, --], + [7, --, 30]], + mask=[[False, True, False], + [ True, False, True], + [False, True, False]], + fill_value=999999) + + """ + # Hard mask: Get rid of the values/indices that fall on masked data + if self._hardmask and self._mask is not nomask: + mask = self._mask[indices] + indices = narray(indices, copy=None) + values = narray(values, copy=None, subok=True) + values.resize(indices.shape) + indices = indices[~mask] + values = values[~mask] + + self._data.put(indices, values, mode=mode) + + # short circuit if neither self nor values are masked + if self._mask is nomask and getmask(values) is nomask: + return + + m = getmaskarray(self) + + if getmask(values) is nomask: + m.put(indices, False, mode=mode) + else: + m.put(indices, values._mask, mode=mode) + m = make_mask(m, copy=False, shrink=True) + self._mask = m + return + + def ids(self): + """ + Return the addresses of the data and mask areas. + + Parameters + ---------- + None + + Examples + -------- + >>> import numpy as np + >>> x = np.ma.array([1, 2, 3], mask=[0, 1, 1]) + >>> x.ids() + (166670640, 166659832) # may vary + + If the array has no mask, the address of `nomask` is returned. This address + is typically not close to the data in memory: + + >>> x = np.ma.array([1, 2, 3]) + >>> x.ids() + (166691080, 3083169284) # may vary + + """ + if self._mask is nomask: + return (self.ctypes.data, id(nomask)) + return (self.ctypes.data, self._mask.ctypes.data) + + def iscontiguous(self): + """ + Return a boolean indicating whether the data is contiguous. + + Parameters + ---------- + None + + Examples + -------- + >>> import numpy as np + >>> x = np.ma.array([1, 2, 3]) + >>> x.iscontiguous() + True + + `iscontiguous` returns one of the flags of the masked array: + + >>> x.flags + C_CONTIGUOUS : True + F_CONTIGUOUS : True + OWNDATA : False + WRITEABLE : True + ALIGNED : True + WRITEBACKIFCOPY : False + + """ + return self.flags['CONTIGUOUS'] + + def all(self, axis=None, out=None, keepdims=np._NoValue): + """ + Returns True if all elements evaluate to True. + + The output array is masked where all the values along the given axis + are masked: if the output would have been a scalar and that all the + values are masked, then the output is `masked`. + + Refer to `numpy.all` for full documentation. + + See Also + -------- + numpy.ndarray.all : corresponding function for ndarrays + numpy.all : equivalent function + + Examples + -------- + >>> import numpy as np + >>> np.ma.array([1,2,3]).all() + True + >>> a = np.ma.array([1,2,3], mask=True) + >>> (a.all() is np.ma.masked) + True + + """ + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + + mask = _check_mask_axis(self._mask, axis, **kwargs) + if out is None: + d = self.filled(True).all(axis=axis, **kwargs).view(type(self)) + if d.ndim: + d.__setmask__(mask) + elif mask: + return masked + return d + self.filled(True).all(axis=axis, out=out, **kwargs) + if isinstance(out, MaskedArray): + if out.ndim or mask: + out.__setmask__(mask) + return out + + def any(self, axis=None, out=None, keepdims=np._NoValue): + """ + Returns True if any of the elements of `a` evaluate to True. + + Masked values are considered as False during computation. + + Refer to `numpy.any` for full documentation. + + See Also + -------- + numpy.ndarray.any : corresponding function for ndarrays + numpy.any : equivalent function + + """ + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + + mask = _check_mask_axis(self._mask, axis, **kwargs) + if out is None: + d = self.filled(False).any(axis=axis, **kwargs).view(type(self)) + if d.ndim: + d.__setmask__(mask) + elif mask: + d = masked + return d + self.filled(False).any(axis=axis, out=out, **kwargs) + if isinstance(out, MaskedArray): + if out.ndim or mask: + out.__setmask__(mask) + return out + + def nonzero(self): + """ + Return the indices of unmasked elements that are not zero. + + Returns a tuple of arrays, one for each dimension, containing the + indices of the non-zero elements in that dimension. The corresponding + non-zero values can be obtained with:: + + a[a.nonzero()] + + To group the indices by element, rather than dimension, use + instead:: + + np.transpose(a.nonzero()) + + The result of this is always a 2d array, with a row for each non-zero + element. + + Parameters + ---------- + None + + Returns + ------- + tuple_of_arrays : tuple + Indices of elements that are non-zero. + + See Also + -------- + numpy.nonzero : + Function operating on ndarrays. + flatnonzero : + Return indices that are non-zero in the flattened version of the input + array. + numpy.ndarray.nonzero : + Equivalent ndarray method. + count_nonzero : + Counts the number of non-zero elements in the input array. + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> x = ma.array(np.eye(3)) + >>> x + masked_array( + data=[[1., 0., 0.], + [0., 1., 0.], + [0., 0., 1.]], + mask=False, + fill_value=1e+20) + >>> x.nonzero() + (array([0, 1, 2]), array([0, 1, 2])) + + Masked elements are ignored. + + >>> x[1, 1] = ma.masked + >>> x + masked_array( + data=[[1.0, 0.0, 0.0], + [0.0, --, 0.0], + [0.0, 0.0, 1.0]], + mask=[[False, False, False], + [False, True, False], + [False, False, False]], + fill_value=1e+20) + >>> x.nonzero() + (array([0, 2]), array([0, 2])) + + Indices can also be grouped by element. + + >>> np.transpose(x.nonzero()) + array([[0, 0], + [2, 2]]) + + A common use for ``nonzero`` is to find the indices of an array, where + a condition is True. Given an array `a`, the condition `a` > 3 is a + boolean array and since False is interpreted as 0, ma.nonzero(a > 3) + yields the indices of the `a` where the condition is true. + + >>> a = ma.array([[1,2,3],[4,5,6],[7,8,9]]) + >>> a > 3 + masked_array( + data=[[False, False, False], + [ True, True, True], + [ True, True, True]], + mask=False, + fill_value=True) + >>> ma.nonzero(a > 3) + (array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2])) + + The ``nonzero`` method of the condition array can also be called. + + >>> (a > 3).nonzero() + (array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2])) + + """ + return np.asarray(self.filled(0)).nonzero() + + def trace(self, offset=0, axis1=0, axis2=1, dtype=None, out=None): + """ + (this docstring should be overwritten) + """ + # !!!: implement out + test! + m = self._mask + if m is nomask: + result = super().trace(offset=offset, axis1=axis1, axis2=axis2, + out=out) + return result.astype(dtype) + else: + D = self.diagonal(offset=offset, axis1=axis1, axis2=axis2) + return D.astype(dtype).filled(0).sum(axis=-1, out=out) + trace.__doc__ = ndarray.trace.__doc__ + + def dot(self, b, out=None, strict=False): + """ + a.dot(b, out=None) + + Masked dot product of two arrays. Note that `out` and `strict` are + located in different positions than in `ma.dot`. In order to + maintain compatibility with the functional version, it is + recommended that the optional arguments be treated as keyword only. + At some point that may be mandatory. + + Parameters + ---------- + b : masked_array_like + Inputs array. + out : masked_array, optional + Output argument. This must have the exact kind that would be + returned if it was not used. In particular, it must have the + right type, must be C-contiguous, and its dtype must be the + dtype that would be returned for `ma.dot(a,b)`. This is a + performance feature. Therefore, if these conditions are not + met, an exception is raised, instead of attempting to be + flexible. + strict : bool, optional + Whether masked data are propagated (True) or set to 0 (False) + for the computation. Default is False. Propagating the mask + means that if a masked value appears in a row or column, the + whole row or column is considered masked. + + See Also + -------- + numpy.ma.dot : equivalent function + + """ + return dot(self, b, out=out, strict=strict) + + def sum(self, axis=None, dtype=None, out=None, keepdims=np._NoValue): + """ + Return the sum of the array elements over the given axis. + + Masked elements are set to 0 internally. + + Refer to `numpy.sum` for full documentation. + + See Also + -------- + numpy.ndarray.sum : corresponding function for ndarrays + numpy.sum : equivalent function + + Examples + -------- + >>> import numpy as np + >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) + >>> x + masked_array( + data=[[1, --, 3], + [--, 5, --], + [7, --, 9]], + mask=[[False, True, False], + [ True, False, True], + [False, True, False]], + fill_value=999999) + >>> x.sum() + 25 + >>> x.sum(axis=1) + masked_array(data=[4, 5, 16], + mask=[False, False, False], + fill_value=999999) + >>> x.sum(axis=0) + masked_array(data=[8, 5, 12], + mask=[False, False, False], + fill_value=999999) + >>> print(type(x.sum(axis=0, dtype=np.int64)[0])) + + + """ + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + + _mask = self._mask + newmask = _check_mask_axis(_mask, axis, **kwargs) + # No explicit output + if out is None: + result = self.filled(0).sum(axis, dtype=dtype, **kwargs) + rndim = getattr(result, 'ndim', 0) + if rndim: + result = result.view(type(self)) + result.__setmask__(newmask) + elif newmask: + result = masked + return result + # Explicit output + result = self.filled(0).sum(axis, dtype=dtype, out=out, **kwargs) + if isinstance(out, MaskedArray): + outmask = getmask(out) + if outmask is nomask: + outmask = out._mask = make_mask_none(out.shape) + outmask.flat = newmask + return out + + def cumsum(self, axis=None, dtype=None, out=None): + """ + Return the cumulative sum of the array elements over the given axis. + + Masked values are set to 0 internally during the computation. + However, their position is saved, and the result will be masked at + the same locations. + + Refer to `numpy.cumsum` for full documentation. + + Notes + ----- + The mask is lost if `out` is not a valid :class:`ma.MaskedArray` ! + + Arithmetic is modular when using integer types, and no error is + raised on overflow. + + See Also + -------- + numpy.ndarray.cumsum : corresponding function for ndarrays + numpy.cumsum : equivalent function + + Examples + -------- + >>> import numpy as np + >>> marr = np.ma.array(np.arange(10), mask=[0,0,0,1,1,1,0,0,0,0]) + >>> marr.cumsum() + masked_array(data=[0, 1, 3, --, --, --, 9, 16, 24, 33], + mask=[False, False, False, True, True, True, False, False, + False, False], + fill_value=999999) + + """ + result = self.filled(0).cumsum(axis=axis, dtype=dtype, out=out) + if out is not None: + if isinstance(out, MaskedArray): + out.__setmask__(self.mask) + return out + result = result.view(type(self)) + result.__setmask__(self._mask) + return result + + def prod(self, axis=None, dtype=None, out=None, keepdims=np._NoValue): + """ + Return the product of the array elements over the given axis. + + Masked elements are set to 1 internally for computation. + + Refer to `numpy.prod` for full documentation. + + Notes + ----- + Arithmetic is modular when using integer types, and no error is raised + on overflow. + + See Also + -------- + numpy.ndarray.prod : corresponding function for ndarrays + numpy.prod : equivalent function + """ + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + + _mask = self._mask + newmask = _check_mask_axis(_mask, axis, **kwargs) + # No explicit output + if out is None: + result = self.filled(1).prod(axis, dtype=dtype, **kwargs) + rndim = getattr(result, 'ndim', 0) + if rndim: + result = result.view(type(self)) + result.__setmask__(newmask) + elif newmask: + result = masked + return result + # Explicit output + result = self.filled(1).prod(axis, dtype=dtype, out=out, **kwargs) + if isinstance(out, MaskedArray): + outmask = getmask(out) + if outmask is nomask: + outmask = out._mask = make_mask_none(out.shape) + outmask.flat = newmask + return out + product = prod + + def cumprod(self, axis=None, dtype=None, out=None): + """ + Return the cumulative product of the array elements over the given axis. + + Masked values are set to 1 internally during the computation. + However, their position is saved, and the result will be masked at + the same locations. + + Refer to `numpy.cumprod` for full documentation. + + Notes + ----- + The mask is lost if `out` is not a valid MaskedArray ! + + Arithmetic is modular when using integer types, and no error is + raised on overflow. + + See Also + -------- + numpy.ndarray.cumprod : corresponding function for ndarrays + numpy.cumprod : equivalent function + """ + result = self.filled(1).cumprod(axis=axis, dtype=dtype, out=out) + if out is not None: + if isinstance(out, MaskedArray): + out.__setmask__(self._mask) + return out + result = result.view(type(self)) + result.__setmask__(self._mask) + return result + + def mean(self, axis=None, dtype=None, out=None, keepdims=np._NoValue): + """ + Returns the average of the array elements along given axis. + + Masked entries are ignored, and result elements which are not + finite will be masked. + + Refer to `numpy.mean` for full documentation. + + See Also + -------- + numpy.ndarray.mean : corresponding function for ndarrays + numpy.mean : Equivalent function + numpy.ma.average : Weighted average. + + Examples + -------- + >>> import numpy as np + >>> a = np.ma.array([1,2,3], mask=[False, False, True]) + >>> a + masked_array(data=[1, 2, --], + mask=[False, False, True], + fill_value=999999) + >>> a.mean() + 1.5 + + """ + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + if self._mask is nomask: + result = super().mean(axis=axis, dtype=dtype, **kwargs)[()] + else: + is_float16_result = False + if dtype is None: + if issubclass(self.dtype.type, (ntypes.integer, ntypes.bool)): + dtype = mu.dtype('f8') + elif issubclass(self.dtype.type, ntypes.float16): + dtype = mu.dtype('f4') + is_float16_result = True + dsum = self.sum(axis=axis, dtype=dtype, **kwargs) + cnt = self.count(axis=axis, **kwargs) + if cnt.shape == () and (cnt == 0): + result = masked + elif is_float16_result: + result = self.dtype.type(dsum * 1. / cnt) + else: + result = dsum * 1. / cnt + if out is not None: + out.flat = result + if isinstance(out, MaskedArray): + outmask = getmask(out) + if outmask is nomask: + outmask = out._mask = make_mask_none(out.shape) + outmask.flat = getmask(result) + return out + return result + + def anom(self, axis=None, dtype=None): + """ + Compute the anomalies (deviations from the arithmetic mean) + along the given axis. + + Returns an array of anomalies, with the same shape as the input and + where the arithmetic mean is computed along the given axis. + + Parameters + ---------- + axis : int, optional + Axis over which the anomalies are taken. + The default is to use the mean of the flattened array as reference. + dtype : dtype, optional + Type to use in computing the variance. For arrays of integer type + the default is float32; for arrays of float types it is the same as + the array type. + + See Also + -------- + mean : Compute the mean of the array. + + Examples + -------- + >>> import numpy as np + >>> a = np.ma.array([1,2,3]) + >>> a.anom() + masked_array(data=[-1., 0., 1.], + mask=False, + fill_value=1e+20) + + """ + m = self.mean(axis, dtype) + if not axis: + return self - m + else: + return self - expand_dims(m, axis) + + def var(self, axis=None, dtype=None, out=None, ddof=0, + keepdims=np._NoValue, mean=np._NoValue): + """ + Returns the variance of the array elements along given axis. + + Masked entries are ignored, and result elements which are not + finite will be masked. + + Refer to `numpy.var` for full documentation. + + See Also + -------- + numpy.ndarray.var : corresponding function for ndarrays + numpy.var : Equivalent function + """ + kwargs = {} + + if keepdims is not np._NoValue: + kwargs['keepdims'] = keepdims + + # Easy case: nomask, business as usual + if self._mask is nomask: + + if mean is not np._NoValue: + kwargs['mean'] = mean + + ret = super().var(axis=axis, dtype=dtype, out=out, ddof=ddof, + **kwargs)[()] + if out is not None: + if isinstance(out, MaskedArray): + out.__setmask__(nomask) + return out + return ret + + # Some data are masked, yay! + cnt = self.count(axis=axis, **kwargs) - ddof + + if mean is not np._NoValue: + danom = self - mean + else: + danom = self - self.mean(axis, dtype, keepdims=True) + + if iscomplexobj(self): + danom = umath.absolute(danom) ** 2 + else: + danom *= danom + dvar = divide(danom.sum(axis, **kwargs), cnt).view(type(self)) + # Apply the mask if it's not a scalar + if dvar.ndim: + dvar._mask = mask_or(self._mask.all(axis, **kwargs), (cnt <= 0)) + dvar._update_from(self) + elif getmask(dvar): + # Make sure that masked is returned when the scalar is masked. + dvar = masked + if out is not None: + if isinstance(out, MaskedArray): + out.flat = 0 + out.__setmask__(True) + elif out.dtype.kind in 'biu': + errmsg = "Masked data information would be lost in one or "\ + "more location." + raise MaskError(errmsg) + else: + out.flat = np.nan + return out + # In case with have an explicit output + if out is not None: + # Set the data + out.flat = dvar + # Set the mask if needed + if isinstance(out, MaskedArray): + out.__setmask__(dvar.mask) + return out + return dvar + var.__doc__ = np.var.__doc__ + + def std(self, axis=None, dtype=None, out=None, ddof=0, + keepdims=np._NoValue, mean=np._NoValue): + """ + Returns the standard deviation of the array elements along given axis. + + Masked entries are ignored. + + Refer to `numpy.std` for full documentation. + + See Also + -------- + numpy.ndarray.std : corresponding function for ndarrays + numpy.std : Equivalent function + """ + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + + dvar = self.var(axis, dtype, out, ddof, **kwargs) + if dvar is not masked: + if out is not None: + np.power(out, 0.5, out=out, casting='unsafe') + return out + dvar = sqrt(dvar) + return dvar + + def round(self, decimals=0, out=None): + """ + Return each element rounded to the given number of decimals. + + Refer to `numpy.around` for full documentation. + + See Also + -------- + numpy.ndarray.round : corresponding function for ndarrays + numpy.around : equivalent function + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> x = ma.array([1.35, 2.5, 1.5, 1.75, 2.25, 2.75], + ... mask=[0, 0, 0, 1, 0, 0]) + >>> ma.round(x) + masked_array(data=[1.0, 2.0, 2.0, --, 2.0, 3.0], + mask=[False, False, False, True, False, False], + fill_value=1e+20) + + """ + result = self._data.round(decimals=decimals, out=out).view(type(self)) + if result.ndim > 0: + result._mask = self._mask + result._update_from(self) + elif self._mask: + # Return masked when the scalar is masked + result = masked + # No explicit output: we're done + if out is None: + return result + if isinstance(out, MaskedArray): + out.__setmask__(self._mask) + return out + + def argsort(self, axis=np._NoValue, kind=None, order=None, endwith=True, + fill_value=None, *, stable=False): + """ + Return an ndarray of indices that sort the array along the + specified axis. Masked values are filled beforehand to + `fill_value`. + + Parameters + ---------- + axis : int, optional + Axis along which to sort. If None, the default, the flattened array + is used. + kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional + The sorting algorithm used. + order : str or list of str, optional + When `a` is an array with fields defined, this argument specifies + which fields to compare first, second, etc. Not all fields need be + specified. + endwith : {True, False}, optional + Whether missing values (if any) should be treated as the largest values + (True) or the smallest values (False) + When the array contains unmasked values at the same extremes of the + datatype, the ordering of these values and the masked values is + undefined. + fill_value : scalar or None, optional + Value used internally for the masked values. + If ``fill_value`` is not None, it supersedes ``endwith``. + stable : bool, optional + Only for compatibility with ``np.argsort``. Ignored. + + Returns + ------- + index_array : ndarray, int + Array of indices that sort `a` along the specified axis. + In other words, ``a[index_array]`` yields a sorted `a`. + + See Also + -------- + ma.MaskedArray.sort : Describes sorting algorithms used. + lexsort : Indirect stable sort with multiple keys. + numpy.ndarray.sort : Inplace sort. + + Notes + ----- + See `sort` for notes on the different sorting algorithms. + + Examples + -------- + >>> import numpy as np + >>> a = np.ma.array([3,2,1], mask=[False, False, True]) + >>> a + masked_array(data=[3, 2, --], + mask=[False, False, True], + fill_value=999999) + >>> a.argsort() + array([1, 0, 2]) + + """ + if stable: + raise ValueError( + "`stable` parameter is not supported for masked arrays." + ) + + # 2017-04-11, Numpy 1.13.0, gh-8701: warn on axis default + if axis is np._NoValue: + axis = _deprecate_argsort_axis(self) + + if fill_value is None: + if endwith: + # nan > inf + if np.issubdtype(self.dtype, np.floating): + fill_value = np.nan + else: + fill_value = minimum_fill_value(self) + else: + fill_value = maximum_fill_value(self) + + filled = self.filled(fill_value) + return filled.argsort(axis=axis, kind=kind, order=order) + + def argmin(self, axis=None, fill_value=None, out=None, *, + keepdims=np._NoValue): + """ + Return array of indices to the minimum values along the given axis. + + Parameters + ---------- + axis : {None, integer} + If None, the index is into the flattened array, otherwise along + the specified axis + fill_value : scalar or None, optional + Value used to fill in the masked values. If None, the output of + minimum_fill_value(self._data) is used instead. + out : {None, array}, optional + Array into which the result can be placed. Its type is preserved + and it must be of the right shape to hold the output. + + Returns + ------- + ndarray or scalar + If multi-dimension input, returns a new ndarray of indices to the + minimum values along the given axis. Otherwise, returns a scalar + of index to the minimum values along the given axis. + + Examples + -------- + >>> import numpy as np + >>> x = np.ma.array(np.arange(4), mask=[1,1,0,0]) + >>> x.shape = (2,2) + >>> x + masked_array( + data=[[--, --], + [2, 3]], + mask=[[ True, True], + [False, False]], + fill_value=999999) + >>> x.argmin(axis=0, fill_value=-1) + array([0, 0]) + >>> x.argmin(axis=0, fill_value=9) + array([1, 1]) + + """ + if fill_value is None: + fill_value = minimum_fill_value(self) + d = self.filled(fill_value).view(ndarray) + keepdims = False if keepdims is np._NoValue else bool(keepdims) + return d.argmin(axis, out=out, keepdims=keepdims) + + def argmax(self, axis=None, fill_value=None, out=None, *, + keepdims=np._NoValue): + """ + Returns array of indices of the maximum values along the given axis. + Masked values are treated as if they had the value fill_value. + + Parameters + ---------- + axis : {None, integer} + If None, the index is into the flattened array, otherwise along + the specified axis + fill_value : scalar or None, optional + Value used to fill in the masked values. If None, the output of + maximum_fill_value(self._data) is used instead. + out : {None, array}, optional + Array into which the result can be placed. Its type is preserved + and it must be of the right shape to hold the output. + + Returns + ------- + index_array : {integer_array} + + Examples + -------- + >>> import numpy as np + >>> a = np.arange(6).reshape(2,3) + >>> a.argmax() + 5 + >>> a.argmax(0) + array([1, 1, 1]) + >>> a.argmax(1) + array([2, 2]) + + """ + if fill_value is None: + fill_value = maximum_fill_value(self._data) + d = self.filled(fill_value).view(ndarray) + keepdims = False if keepdims is np._NoValue else bool(keepdims) + return d.argmax(axis, out=out, keepdims=keepdims) + + def sort(self, axis=-1, kind=None, order=None, endwith=True, + fill_value=None, *, stable=False): + """ + Sort the array, in-place + + Parameters + ---------- + a : array_like + Array to be sorted. + axis : int, optional + Axis along which to sort. If None, the array is flattened before + sorting. The default is -1, which sorts along the last axis. + kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional + The sorting algorithm used. + order : list, optional + When `a` is a structured array, this argument specifies which fields + to compare first, second, and so on. This list does not need to + include all of the fields. + endwith : {True, False}, optional + Whether missing values (if any) should be treated as the largest values + (True) or the smallest values (False) + When the array contains unmasked values sorting at the same extremes of the + datatype, the ordering of these values and the masked values is + undefined. + fill_value : scalar or None, optional + Value used internally for the masked values. + If ``fill_value`` is not None, it supersedes ``endwith``. + stable : bool, optional + Only for compatibility with ``np.sort``. Ignored. + + See Also + -------- + numpy.ndarray.sort : Method to sort an array in-place. + argsort : Indirect sort. + lexsort : Indirect stable sort on multiple keys. + searchsorted : Find elements in a sorted array. + + Notes + ----- + See ``sort`` for notes on the different sorting algorithms. + + Examples + -------- + >>> import numpy as np + >>> a = np.ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0]) + >>> # Default + >>> a.sort() + >>> a + masked_array(data=[1, 3, 5, --, --], + mask=[False, False, False, True, True], + fill_value=999999) + + >>> a = np.ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0]) + >>> # Put missing values in the front + >>> a.sort(endwith=False) + >>> a + masked_array(data=[--, --, 1, 3, 5], + mask=[ True, True, False, False, False], + fill_value=999999) + + >>> a = np.ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0]) + >>> # fill_value takes over endwith + >>> a.sort(endwith=False, fill_value=3) + >>> a + masked_array(data=[1, --, --, 3, 5], + mask=[False, True, True, False, False], + fill_value=999999) + + """ + if stable: + raise ValueError( + "`stable` parameter is not supported for masked arrays." + ) + + if self._mask is nomask: + ndarray.sort(self, axis=axis, kind=kind, order=order) + return + + if self is masked: + return + + sidx = self.argsort(axis=axis, kind=kind, order=order, + fill_value=fill_value, endwith=endwith) + + self[...] = np.take_along_axis(self, sidx, axis=axis) + + def min(self, axis=None, out=None, fill_value=None, keepdims=np._NoValue): + """ + Return the minimum along a given axis. + + Parameters + ---------- + axis : None or int or tuple of ints, optional + Axis along which to operate. By default, ``axis`` is None and the + flattened input is used. + If this is a tuple of ints, the minimum is selected over multiple + axes, instead of a single axis or all the axes as before. + out : array_like, optional + Alternative output array in which to place the result. Must be of + the same shape and buffer length as the expected output. + fill_value : scalar or None, optional + Value used to fill in the masked values. + If None, use the output of `minimum_fill_value`. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the array. + + Returns + ------- + amin : array_like + New array holding the result. + If ``out`` was specified, ``out`` is returned. + + See Also + -------- + ma.minimum_fill_value + Returns the minimum filling value for a given datatype. + + Examples + -------- + >>> import numpy.ma as ma + >>> x = [[1., -2., 3.], [0.2, -0.7, 0.1]] + >>> mask = [[1, 1, 0], [0, 0, 1]] + >>> masked_x = ma.masked_array(x, mask) + >>> masked_x + masked_array( + data=[[--, --, 3.0], + [0.2, -0.7, --]], + mask=[[ True, True, False], + [False, False, True]], + fill_value=1e+20) + >>> ma.min(masked_x) + -0.7 + >>> ma.min(masked_x, axis=-1) + masked_array(data=[3.0, -0.7], + mask=[False, False], + fill_value=1e+20) + >>> ma.min(masked_x, axis=0, keepdims=True) + masked_array(data=[[0.2, -0.7, 3.0]], + mask=[[False, False, False]], + fill_value=1e+20) + >>> mask = [[1, 1, 1,], [1, 1, 1]] + >>> masked_x = ma.masked_array(x, mask) + >>> ma.min(masked_x, axis=0) + masked_array(data=[--, --, --], + mask=[ True, True, True], + fill_value=1e+20, + dtype=float64) + """ + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + + _mask = self._mask + newmask = _check_mask_axis(_mask, axis, **kwargs) + if fill_value is None: + fill_value = minimum_fill_value(self) + # No explicit output + if out is None: + result = self.filled(fill_value).min( + axis=axis, out=out, **kwargs).view(type(self)) + if result.ndim: + # Set the mask + result.__setmask__(newmask) + # Get rid of Infs + if newmask.ndim: + np.copyto(result, result.fill_value, where=newmask) + elif newmask: + result = masked + return result + # Explicit output + self.filled(fill_value).min(axis=axis, out=out, **kwargs) + if isinstance(out, MaskedArray): + outmask = getmask(out) + if outmask is nomask: + outmask = out._mask = make_mask_none(out.shape) + outmask.flat = newmask + else: + if out.dtype.kind in 'biu': + errmsg = "Masked data information would be lost in one or more"\ + " location." + raise MaskError(errmsg) + np.copyto(out, np.nan, where=newmask) + return out + + def max(self, axis=None, out=None, fill_value=None, keepdims=np._NoValue): + """ + Return the maximum along a given axis. + + Parameters + ---------- + axis : None or int or tuple of ints, optional + Axis along which to operate. By default, ``axis`` is None and the + flattened input is used. + If this is a tuple of ints, the maximum is selected over multiple + axes, instead of a single axis or all the axes as before. + out : array_like, optional + Alternative output array in which to place the result. Must + be of the same shape and buffer length as the expected output. + fill_value : scalar or None, optional + Value used to fill in the masked values. + If None, use the output of maximum_fill_value(). + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the array. + + Returns + ------- + amax : array_like + New array holding the result. + If ``out`` was specified, ``out`` is returned. + + See Also + -------- + ma.maximum_fill_value + Returns the maximum filling value for a given datatype. + + Examples + -------- + >>> import numpy.ma as ma + >>> x = [[-1., 2.5], [4., -2.], [3., 0.]] + >>> mask = [[0, 0], [1, 0], [1, 0]] + >>> masked_x = ma.masked_array(x, mask) + >>> masked_x + masked_array( + data=[[-1.0, 2.5], + [--, -2.0], + [--, 0.0]], + mask=[[False, False], + [ True, False], + [ True, False]], + fill_value=1e+20) + >>> ma.max(masked_x) + 2.5 + >>> ma.max(masked_x, axis=0) + masked_array(data=[-1.0, 2.5], + mask=[False, False], + fill_value=1e+20) + >>> ma.max(masked_x, axis=1, keepdims=True) + masked_array( + data=[[2.5], + [-2.0], + [0.0]], + mask=[[False], + [False], + [False]], + fill_value=1e+20) + >>> mask = [[1, 1], [1, 1], [1, 1]] + >>> masked_x = ma.masked_array(x, mask) + >>> ma.max(masked_x, axis=1) + masked_array(data=[--, --, --], + mask=[ True, True, True], + fill_value=1e+20, + dtype=float64) + """ + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + + _mask = self._mask + newmask = _check_mask_axis(_mask, axis, **kwargs) + if fill_value is None: + fill_value = maximum_fill_value(self) + # No explicit output + if out is None: + result = self.filled(fill_value).max( + axis=axis, out=out, **kwargs).view(type(self)) + if result.ndim: + # Set the mask + result.__setmask__(newmask) + # Get rid of Infs + if newmask.ndim: + np.copyto(result, result.fill_value, where=newmask) + elif newmask: + result = masked + return result + # Explicit output + self.filled(fill_value).max(axis=axis, out=out, **kwargs) + if isinstance(out, MaskedArray): + outmask = getmask(out) + if outmask is nomask: + outmask = out._mask = make_mask_none(out.shape) + outmask.flat = newmask + else: + + if out.dtype.kind in 'biu': + errmsg = "Masked data information would be lost in one or more"\ + " location." + raise MaskError(errmsg) + np.copyto(out, np.nan, where=newmask) + return out + + def ptp(self, axis=None, out=None, fill_value=None, keepdims=False): + """ + Return (maximum - minimum) along the given dimension + (i.e. peak-to-peak value). + + .. warning:: + `ptp` preserves the data type of the array. This means the + return value for an input of signed integers with n bits + (e.g. `np.int8`, `np.int16`, etc) is also a signed integer + with n bits. In that case, peak-to-peak values greater than + ``2**(n-1)-1`` will be returned as negative values. An example + with a work-around is shown below. + + Parameters + ---------- + axis : {None, int}, optional + Axis along which to find the peaks. If None (default) the + flattened array is used. + out : {None, array_like}, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output + but the type will be cast if necessary. + fill_value : scalar or None, optional + Value used to fill in the masked values. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the array. + + Returns + ------- + ptp : ndarray. + A new array holding the result, unless ``out`` was + specified, in which case a reference to ``out`` is returned. + + Examples + -------- + >>> import numpy as np + >>> x = np.ma.MaskedArray([[4, 9, 2, 10], + ... [6, 9, 7, 12]]) + + >>> x.ptp(axis=1) + masked_array(data=[8, 6], + mask=False, + fill_value=999999) + + >>> x.ptp(axis=0) + masked_array(data=[2, 0, 5, 2], + mask=False, + fill_value=999999) + + >>> x.ptp() + 10 + + This example shows that a negative value can be returned when + the input is an array of signed integers. + + >>> y = np.ma.MaskedArray([[1, 127], + ... [0, 127], + ... [-1, 127], + ... [-2, 127]], dtype=np.int8) + >>> y.ptp(axis=1) + masked_array(data=[ 126, 127, -128, -127], + mask=False, + fill_value=np.int64(999999), + dtype=int8) + + A work-around is to use the `view()` method to view the result as + unsigned integers with the same bit width: + + >>> y.ptp(axis=1).view(np.uint8) + masked_array(data=[126, 127, 128, 129], + mask=False, + fill_value=np.uint64(999999), + dtype=uint8) + """ + if out is None: + result = self.max(axis=axis, fill_value=fill_value, + keepdims=keepdims) + result -= self.min(axis=axis, fill_value=fill_value, + keepdims=keepdims) + return result + out.flat = self.max(axis=axis, out=out, fill_value=fill_value, + keepdims=keepdims) + min_value = self.min(axis=axis, fill_value=fill_value, + keepdims=keepdims) + np.subtract(out, min_value, out=out, casting='unsafe') + return out + + def partition(self, *args, **kwargs): + warnings.warn("Warning: 'partition' will ignore the 'mask' " + f"of the {self.__class__.__name__}.", + stacklevel=2) + return super().partition(*args, **kwargs) + + def argpartition(self, *args, **kwargs): + warnings.warn("Warning: 'argpartition' will ignore the 'mask' " + f"of the {self.__class__.__name__}.", + stacklevel=2) + return super().argpartition(*args, **kwargs) + + def take(self, indices, axis=None, out=None, mode='raise'): + """ + Take elements from a masked array along an axis. + + This function does the same thing as "fancy" indexing (indexing arrays + using arrays) for masked arrays. It can be easier to use if you need + elements along a given axis. + + Parameters + ---------- + a : masked_array + The source masked array. + indices : array_like + The indices of the values to extract. Also allow scalars for indices. + axis : int, optional + The axis over which to select values. By default, the flattened + input array is used. + out : MaskedArray, optional + If provided, the result will be placed in this array. It should + be of the appropriate shape and dtype. Note that `out` is always + buffered if `mode='raise'`; use other modes for better performance. + mode : {'raise', 'wrap', 'clip'}, optional + Specifies how out-of-bounds indices will behave. + + * 'raise' -- raise an error (default) + * 'wrap' -- wrap around + * 'clip' -- clip to the range + + 'clip' mode means that all indices that are too large are replaced + by the index that addresses the last element along that axis. Note + that this disables indexing with negative numbers. + + Returns + ------- + out : MaskedArray + The returned array has the same type as `a`. + + See Also + -------- + numpy.take : Equivalent function for ndarrays. + compress : Take elements using a boolean mask. + take_along_axis : Take elements by matching the array and the index arrays. + + Notes + ----- + This function behaves similarly to `numpy.take`, but it handles masked + values. The mask is retained in the output array, and masked values + in the input array remain masked in the output. + + Examples + -------- + >>> import numpy as np + >>> a = np.ma.array([4, 3, 5, 7, 6, 8], mask=[0, 0, 1, 0, 1, 0]) + >>> indices = [0, 1, 4] + >>> np.ma.take(a, indices) + masked_array(data=[4, 3, --], + mask=[False, False, True], + fill_value=999999) + + When `indices` is not one-dimensional, the output also has these dimensions: + + >>> np.ma.take(a, [[0, 1], [2, 3]]) + masked_array(data=[[4, 3], + [--, 7]], + mask=[[False, False], + [ True, False]], + fill_value=999999) + """ + (_data, _mask) = (self._data, self._mask) + cls = type(self) + # Make sure the indices are not masked + maskindices = getmask(indices) + if maskindices is not nomask: + indices = indices.filled(0) + # Get the data, promoting scalars to 0d arrays with [...] so that + # .view works correctly + if out is None: + out = _data.take(indices, axis=axis, mode=mode)[...].view(cls) + else: + np.take(_data, indices, axis=axis, mode=mode, out=out) + # Get the mask + if isinstance(out, MaskedArray): + if _mask is nomask: + outmask = maskindices + else: + outmask = _mask.take(indices, axis=axis, mode=mode) + outmask |= maskindices + out.__setmask__(outmask) + # demote 0d arrays back to scalars, for consistency with ndarray.take + return out[()] + + # Array methods + copy = _arraymethod('copy') + diagonal = _arraymethod('diagonal') + flatten = _arraymethod('flatten') + repeat = _arraymethod('repeat') + squeeze = _arraymethod('squeeze') + swapaxes = _arraymethod('swapaxes') + T = property(fget=lambda self: self.transpose()) + transpose = _arraymethod('transpose') + + @property + def mT(self): + """ + Return the matrix-transpose of the masked array. + + The matrix transpose is the transpose of the last two dimensions, even + if the array is of higher dimension. + + .. versionadded:: 2.0 + + Returns + ------- + result: MaskedArray + The masked array with the last two dimensions transposed + + Raises + ------ + ValueError + If the array is of dimension less than 2. + + See Also + -------- + ndarray.mT: + Equivalent method for arrays + """ + + if self.ndim < 2: + raise ValueError("matrix transpose with ndim < 2 is undefined") + + if self._mask is nomask: + return masked_array(data=self._data.mT) + else: + return masked_array(data=self.data.mT, mask=self.mask.mT) + + def tolist(self, fill_value=None): + """ + Return the data portion of the masked array as a hierarchical Python list. + + Data items are converted to the nearest compatible Python type. + Masked values are converted to `fill_value`. If `fill_value` is None, + the corresponding entries in the output list will be ``None``. + + Parameters + ---------- + fill_value : scalar, optional + The value to use for invalid entries. Default is None. + + Returns + ------- + result : list + The Python list representation of the masked array. + + Examples + -------- + >>> import numpy as np + >>> x = np.ma.array([[1,2,3], [4,5,6], [7,8,9]], mask=[0] + [1,0]*4) + >>> x.tolist() + [[1, None, 3], [None, 5, None], [7, None, 9]] + >>> x.tolist(-999) + [[1, -999, 3], [-999, 5, -999], [7, -999, 9]] + + """ + _mask = self._mask + # No mask ? Just return .data.tolist ? + if _mask is nomask: + return self._data.tolist() + # Explicit fill_value: fill the array and get the list + if fill_value is not None: + return self.filled(fill_value).tolist() + # Structured array. + names = self.dtype.names + if names: + result = self._data.astype([(_, object) for _ in names]) + for n in names: + result[n][_mask[n]] = None + return result.tolist() + # Standard arrays. + if _mask is nomask: + return [None] + # Set temps to save time when dealing w/ marrays. + inishape = self.shape + result = np.array(self._data.ravel(), dtype=object) + result[_mask.ravel()] = None + result.shape = inishape + return result.tolist() + + def tobytes(self, fill_value=None, order='C'): + """ + Return the array data as a string containing the raw bytes in the array. + + The array is filled with a fill value before the string conversion. + + Parameters + ---------- + fill_value : scalar, optional + Value used to fill in the masked values. Default is None, in which + case `MaskedArray.fill_value` is used. + order : {'C','F','A'}, optional + Order of the data item in the copy. Default is 'C'. + + - 'C' -- C order (row major). + - 'F' -- Fortran order (column major). + - 'A' -- Any, current order of array. + - None -- Same as 'A'. + + See Also + -------- + numpy.ndarray.tobytes + tolist, tofile + + Notes + ----- + As for `ndarray.tobytes`, information about the shape, dtype, etc., + but also about `fill_value`, will be lost. + + Examples + -------- + >>> import numpy as np + >>> x = np.ma.array(np.array([[1, 2], [3, 4]]), mask=[[0, 1], [1, 0]]) + >>> x.tobytes() + b'\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x00?B\\x0f\\x00\\x00\\x00\\x00\\x00?B\\x0f\\x00\\x00\\x00\\x00\\x00\\x04\\x00\\x00\\x00\\x00\\x00\\x00\\x00' + + """ + return self.filled(fill_value).tobytes(order=order) + + def tofile(self, fid, sep="", format="%s"): + """ + Save a masked array to a file in binary format. + + .. warning:: + This function is not implemented yet. + + Raises + ------ + NotImplementedError + When `tofile` is called. + + """ + raise NotImplementedError("MaskedArray.tofile() not implemented yet.") + + def toflex(self): + """ + Transforms a masked array into a flexible-type array. + + The flexible type array that is returned will have two fields: + + * the ``_data`` field stores the ``_data`` part of the array. + * the ``_mask`` field stores the ``_mask`` part of the array. + + Parameters + ---------- + None + + Returns + ------- + record : ndarray + A new flexible-type `ndarray` with two fields: the first element + containing a value, the second element containing the corresponding + mask boolean. The returned record shape matches self.shape. + + Notes + ----- + A side-effect of transforming a masked array into a flexible `ndarray` is + that meta information (``fill_value``, ...) will be lost. + + Examples + -------- + >>> import numpy as np + >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) + >>> x + masked_array( + data=[[1, --, 3], + [--, 5, --], + [7, --, 9]], + mask=[[False, True, False], + [ True, False, True], + [False, True, False]], + fill_value=999999) + >>> x.toflex() + array([[(1, False), (2, True), (3, False)], + [(4, True), (5, False), (6, True)], + [(7, False), (8, True), (9, False)]], + dtype=[('_data', 'i2", (2,))]) + # x = A[0]; y = x["A"]; then y.mask["A"].size==2 + # and we can not say masked/unmasked. + # The result is no longer mvoid! + # See also issue #6724. + return masked_array( + data=self._data[indx], mask=m[indx], + fill_value=self._fill_value[indx], + hard_mask=self._hardmask) + if m is not nomask and m[indx]: + return masked + return self._data[indx] + + def __setitem__(self, indx, value): + self._data[indx] = value + if self._hardmask: + self._mask[indx] |= getattr(value, "_mask", False) + else: + self._mask[indx] = getattr(value, "_mask", False) + + def __str__(self): + m = self._mask + if m is nomask: + return str(self._data) + + rdtype = _replace_dtype_fields(self._data.dtype, "O") + data_arr = super()._data + res = data_arr.astype(rdtype) + _recursive_printoption(res, self._mask, masked_print_option) + return str(res) + + __repr__ = __str__ + + def __iter__(self): + "Defines an iterator for mvoid" + (_data, _mask) = (self._data, self._mask) + if _mask is nomask: + yield from _data + else: + for (d, m) in zip(_data, _mask): + if m: + yield masked + else: + yield d + + def __len__(self): + return self._data.__len__() + + def filled(self, fill_value=None): + """ + Return a copy with masked fields filled with a given value. + + Parameters + ---------- + fill_value : array_like, optional + The value to use for invalid entries. Can be scalar or + non-scalar. If latter is the case, the filled array should + be broadcastable over input array. Default is None, in + which case the `fill_value` attribute is used instead. + + Returns + ------- + filled_void + A `np.void` object + + See Also + -------- + MaskedArray.filled + + """ + return asarray(self).filled(fill_value)[()] + + def tolist(self): + """ + Transforms the mvoid object into a tuple. + + Masked fields are replaced by None. + + Returns + ------- + returned_tuple + Tuple of fields + """ + _mask = self._mask + if _mask is nomask: + return self._data.tolist() + result = [] + for (d, m) in zip(self._data, self._mask): + if m: + result.append(None) + else: + # .item() makes sure we return a standard Python object + result.append(d.item()) + return tuple(result) + + +############################################################################## +# Shortcuts # +############################################################################## + + +def isMaskedArray(x): + """ + Test whether input is an instance of MaskedArray. + + This function returns True if `x` is an instance of MaskedArray + and returns False otherwise. Any object is accepted as input. + + Parameters + ---------- + x : object + Object to test. + + Returns + ------- + result : bool + True if `x` is a MaskedArray. + + See Also + -------- + isMA : Alias to isMaskedArray. + isarray : Alias to isMaskedArray. + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> a = np.eye(3, 3) + >>> a + array([[ 1., 0., 0.], + [ 0., 1., 0.], + [ 0., 0., 1.]]) + >>> m = ma.masked_values(a, 0) + >>> m + masked_array( + data=[[1.0, --, --], + [--, 1.0, --], + [--, --, 1.0]], + mask=[[False, True, True], + [ True, False, True], + [ True, True, False]], + fill_value=0.0) + >>> ma.isMaskedArray(a) + False + >>> ma.isMaskedArray(m) + True + >>> ma.isMaskedArray([0, 1, 2]) + False + + """ + return isinstance(x, MaskedArray) + + +isarray = isMaskedArray +isMA = isMaskedArray # backward compatibility + + +class MaskedConstant(MaskedArray): + # the lone np.ma.masked instance + __singleton = None + + @classmethod + def __has_singleton(cls): + # second case ensures `cls.__singleton` is not just a view on the + # superclass singleton + return cls.__singleton is not None and type(cls.__singleton) is cls + + def __new__(cls): + if not cls.__has_singleton(): + # We define the masked singleton as a float for higher precedence. + # Note that it can be tricky sometimes w/ type comparison + data = np.array(0.) + mask = np.array(True) + + # prevent any modifications + data.flags.writeable = False + mask.flags.writeable = False + + # don't fall back on MaskedArray.__new__(MaskedConstant), since + # that might confuse it - this way, the construction is entirely + # within our control + cls.__singleton = MaskedArray(data, mask=mask).view(cls) + + return cls.__singleton + + def __array_finalize__(self, obj): + if not self.__has_singleton(): + # this handles the `.view` in __new__, which we want to copy across + # properties normally + return super().__array_finalize__(obj) + elif self is self.__singleton: + # not clear how this can happen, play it safe + pass + else: + # everywhere else, we want to downcast to MaskedArray, to prevent a + # duplicate maskedconstant. + self.__class__ = MaskedArray + MaskedArray.__array_finalize__(self, obj) + + def __array_wrap__(self, obj, context=None, return_scalar=False): + return self.view(MaskedArray).__array_wrap__(obj, context) + + def __str__(self): + return str(masked_print_option._display) + + def __repr__(self): + if self is MaskedConstant.__singleton: + return 'masked' + else: + # it's a subclass, or something is wrong, make it obvious + return object.__repr__(self) + + def __format__(self, format_spec): + # Replace ndarray.__format__ with the default, which supports no + # format characters. + # Supporting format characters is unwise here, because we do not know + # what type the user was expecting - better to not guess. + try: + return object.__format__(self, format_spec) + except TypeError: + # 2020-03-23, NumPy 1.19.0 + warnings.warn( + "Format strings passed to MaskedConstant are ignored," + " but in future may error or produce different behavior", + FutureWarning, stacklevel=2 + ) + return object.__format__(self, "") + + def __reduce__(self): + """Override of MaskedArray's __reduce__. + """ + return (self.__class__, ()) + + # inplace operations have no effect. We have to override them to avoid + # trying to modify the readonly data and mask arrays + def __iop__(self, other): + return self + __iadd__ = \ + __isub__ = \ + __imul__ = \ + __ifloordiv__ = \ + __itruediv__ = \ + __ipow__ = \ + __iop__ + del __iop__ # don't leave this around + + def copy(self, *args, **kwargs): + """ Copy is a no-op on the maskedconstant, as it is a scalar """ + # maskedconstant is a scalar, so copy doesn't need to copy. There's + # precedent for this with `np.bool` scalars. + return self + + def __copy__(self): + return self + + def __deepcopy__(self, memo): + return self + + def __setattr__(self, attr, value): + if not self.__has_singleton(): + # allow the singleton to be initialized + return super().__setattr__(attr, value) + elif self is self.__singleton: + raise AttributeError( + f"attributes of {self!r} are not writeable") + else: + # duplicate instance - we can end up here from __array_finalize__, + # where we set the __class__ attribute + return super().__setattr__(attr, value) + + +masked = masked_singleton = MaskedConstant() +masked_array = MaskedArray + + +def array(data, dtype=None, copy=False, order=None, + mask=nomask, fill_value=None, keep_mask=True, + hard_mask=False, shrink=True, subok=True, ndmin=0): + """ + Shortcut to MaskedArray. + + The options are in a different order for convenience and backwards + compatibility. + + """ + return MaskedArray(data, mask=mask, dtype=dtype, copy=copy, + subok=subok, keep_mask=keep_mask, + hard_mask=hard_mask, fill_value=fill_value, + ndmin=ndmin, shrink=shrink, order=order) + + +array.__doc__ = masked_array.__doc__ + + +def is_masked(x): + """ + Determine whether input has masked values. + + Accepts any object as input, but always returns False unless the + input is a MaskedArray containing masked values. + + Parameters + ---------- + x : array_like + Array to check for masked values. + + Returns + ------- + result : bool + True if `x` is a MaskedArray with masked values, False otherwise. + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> x = ma.masked_equal([0, 1, 0, 2, 3], 0) + >>> x + masked_array(data=[--, 1, --, 2, 3], + mask=[ True, False, True, False, False], + fill_value=0) + >>> ma.is_masked(x) + True + >>> x = ma.masked_equal([0, 1, 0, 2, 3], 42) + >>> x + masked_array(data=[0, 1, 0, 2, 3], + mask=False, + fill_value=42) + >>> ma.is_masked(x) + False + + Always returns False if `x` isn't a MaskedArray. + + >>> x = [False, True, False] + >>> ma.is_masked(x) + False + >>> x = 'a string' + >>> ma.is_masked(x) + False + + """ + m = getmask(x) + if m is nomask: + return False + elif m.any(): + return True + return False + + +############################################################################## +# Extrema functions # +############################################################################## + + +class _extrema_operation(_MaskedUFunc): + """ + Generic class for maximum/minimum functions. + + .. note:: + This is the base class for `_maximum_operation` and + `_minimum_operation`. + + """ + def __init__(self, ufunc, compare, fill_value): + super().__init__(ufunc) + self.compare = compare + self.fill_value_func = fill_value + + def __call__(self, a, b): + "Executes the call behavior." + + return where(self.compare(a, b), a, b) + + def reduce(self, target, axis=np._NoValue): + "Reduce target along the given axis." + target = narray(target, copy=None, subok=True) + m = getmask(target) + + if axis is np._NoValue and target.ndim > 1: + name = self.__name__ + # 2017-05-06, Numpy 1.13.0: warn on axis default + warnings.warn( + f"In the future the default for ma.{name}.reduce will be axis=0, " + f"not the current None, to match np.{name}.reduce. " + "Explicitly pass 0 or None to silence this warning.", + MaskedArrayFutureWarning, stacklevel=2) + axis = None + + if axis is not np._NoValue: + kwargs = {'axis': axis} + else: + kwargs = {} + + if m is nomask: + t = self.f.reduce(target, **kwargs) + else: + target = target.filled( + self.fill_value_func(target)).view(type(target)) + t = self.f.reduce(target, **kwargs) + m = umath.logical_and.reduce(m, **kwargs) + if hasattr(t, '_mask'): + t._mask = m + elif m: + t = masked + return t + + def outer(self, a, b): + "Return the function applied to the outer product of a and b." + ma = getmask(a) + mb = getmask(b) + if ma is nomask and mb is nomask: + m = nomask + else: + ma = getmaskarray(a) + mb = getmaskarray(b) + m = logical_or.outer(ma, mb) + result = self.f.outer(filled(a), filled(b)) + if not isinstance(result, MaskedArray): + result = result.view(MaskedArray) + result._mask = m + return result + +def min(obj, axis=None, out=None, fill_value=None, keepdims=np._NoValue): + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + + try: + return obj.min(axis=axis, fill_value=fill_value, out=out, **kwargs) + except (AttributeError, TypeError): + # If obj doesn't have a min method, or if the method doesn't accept a + # fill_value argument + return asanyarray(obj).min(axis=axis, fill_value=fill_value, + out=out, **kwargs) + + +min.__doc__ = MaskedArray.min.__doc__ + +def max(obj, axis=None, out=None, fill_value=None, keepdims=np._NoValue): + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + + try: + return obj.max(axis=axis, fill_value=fill_value, out=out, **kwargs) + except (AttributeError, TypeError): + # If obj doesn't have a max method, or if the method doesn't accept a + # fill_value argument + return asanyarray(obj).max(axis=axis, fill_value=fill_value, + out=out, **kwargs) + + +max.__doc__ = MaskedArray.max.__doc__ + + +def ptp(obj, axis=None, out=None, fill_value=None, keepdims=np._NoValue): + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + try: + return obj.ptp(axis, out=out, fill_value=fill_value, **kwargs) + except (AttributeError, TypeError): + # If obj doesn't have a ptp method or if the method doesn't accept + # a fill_value argument + return asanyarray(obj).ptp(axis=axis, fill_value=fill_value, + out=out, **kwargs) + + +ptp.__doc__ = MaskedArray.ptp.__doc__ + + +############################################################################## +# Definition of functions from the corresponding methods # +############################################################################## + + +def _frommethod(methodname: str, reversed: bool = False): + """ + Define functions from existing MaskedArray methods. + + Parameters + ---------- + methodname : str + Name of the method to transform. + reversed : bool, optional + Whether to reverse the first two arguments of the method. Default is False. + """ + method = getattr(MaskedArray, methodname) + assert callable(method) + + signature = inspect.signature(method) + params = list(signature.parameters.values()) + params[0] = params[0].replace(name="a") # rename 'self' to 'a' + + if reversed: + assert len(params) >= 2 + params[0], params[1] = params[1], params[0] + + def wrapper(a, b, *args, **params): + return getattr(asanyarray(b), methodname)(a, *args, **params) + + else: + def wrapper(a, *args, **params): + return getattr(asanyarray(a), methodname)(*args, **params) + + wrapper.__signature__ = signature.replace(parameters=params) + wrapper.__name__ = wrapper.__qualname__ = methodname + + # __doc__ is None when using `python -OO ...` + if method.__doc__ is not None: + str_signature = f"{methodname}{signature}" + # TODO: For methods with a docstring "Parameters" section, that do not already + # mention `a` (see e.g. `MaskedArray.var.__doc__`), it should be inserted there. + wrapper.__doc__ = f" {str_signature}\n{method.__doc__}" + + return wrapper + + +all = _frommethod('all') +anomalies = anom = _frommethod('anom') +any = _frommethod('any') +argmax = _frommethod('argmax') +argmin = _frommethod('argmin') +compress = _frommethod('compress', reversed=True) +count = _frommethod('count') +cumprod = _frommethod('cumprod') +cumsum = _frommethod('cumsum') +copy = _frommethod('copy') +diagonal = _frommethod('diagonal') +harden_mask = _frommethod('harden_mask') +ids = _frommethod('ids') +maximum = _extrema_operation(umath.maximum, greater, maximum_fill_value) +mean = _frommethod('mean') +minimum = _extrema_operation(umath.minimum, less, minimum_fill_value) +nonzero = _frommethod('nonzero') +prod = _frommethod('prod') +product = _frommethod('product') +ravel = _frommethod('ravel') +repeat = _frommethod('repeat') +shrink_mask = _frommethod('shrink_mask') +soften_mask = _frommethod('soften_mask') +std = _frommethod('std') +sum = _frommethod('sum') +swapaxes = _frommethod('swapaxes') +#take = _frommethod('take') +trace = _frommethod('trace') +var = _frommethod('var') + + +def take(a, indices, axis=None, out=None, mode='raise'): + """ + + """ + a = masked_array(a) + return a.take(indices, axis=axis, out=out, mode=mode) + + +def power(a, b, third=None): + """ + Returns element-wise base array raised to power from second array. + + This is the masked array version of `numpy.power`. For details see + `numpy.power`. + + See Also + -------- + numpy.power + + Notes + ----- + The *out* argument to `numpy.power` is not supported, `third` has to be + None. + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> x = [11.2, -3.973, 0.801, -1.41] + >>> mask = [0, 0, 0, 1] + >>> masked_x = ma.masked_array(x, mask) + >>> masked_x + masked_array(data=[11.2, -3.973, 0.801, --], + mask=[False, False, False, True], + fill_value=1e+20) + >>> ma.power(masked_x, 2) + masked_array(data=[125.43999999999998, 15.784728999999999, + 0.6416010000000001, --], + mask=[False, False, False, True], + fill_value=1e+20) + >>> y = [-0.5, 2, 0, 17] + >>> masked_y = ma.masked_array(y, mask) + >>> masked_y + masked_array(data=[-0.5, 2.0, 0.0, --], + mask=[False, False, False, True], + fill_value=1e+20) + >>> ma.power(masked_x, masked_y) + masked_array(data=[0.2988071523335984, 15.784728999999999, 1.0, --], + mask=[False, False, False, True], + fill_value=1e+20) + + """ + if third is not None: + raise MaskError("3-argument power not supported.") + # Get the masks + ma = getmask(a) + mb = getmask(b) + m = mask_or(ma, mb) + # Get the rawdata + fa = getdata(a) + fb = getdata(b) + # Get the type of the result (so that we preserve subclasses) + if isinstance(a, MaskedArray): + basetype = type(a) + else: + basetype = MaskedArray + # Get the result and view it as a (subclass of) MaskedArray + with np.errstate(divide='ignore', invalid='ignore'): + result = np.where(m, fa, umath.power(fa, fb)).view(basetype) + result._update_from(a) + # Find where we're in trouble w/ NaNs and Infs + invalid = np.logical_not(np.isfinite(result.view(ndarray))) + # Add the initial mask + if m is not nomask: + if not result.ndim: + return masked + result._mask = np.logical_or(m, invalid) + # Fix the invalid parts + if invalid.any(): + if not result.ndim: + return masked + elif result._mask is nomask: + result._mask = invalid + result._data[invalid] = result.fill_value + return result + + +def argsort(a, axis=np._NoValue, kind=None, order=None, endwith=True, + fill_value=None, *, stable=None): + "Function version of the eponymous method." + a = np.asanyarray(a) + + # 2017-04-11, Numpy 1.13.0, gh-8701: warn on axis default + if axis is np._NoValue: + axis = _deprecate_argsort_axis(a) + + if isinstance(a, MaskedArray): + return a.argsort(axis=axis, kind=kind, order=order, endwith=endwith, + fill_value=fill_value, stable=None) + else: + return a.argsort(axis=axis, kind=kind, order=order, stable=None) + + +argsort.__doc__ = MaskedArray.argsort.__doc__ + +def sort(a, axis=-1, kind=None, order=None, endwith=True, fill_value=None, *, + stable=None): + """ + Return a sorted copy of the masked array. + + Equivalent to creating a copy of the array + and applying the MaskedArray ``sort()`` method. + + Refer to ``MaskedArray.sort`` for the full documentation + + See Also + -------- + MaskedArray.sort : equivalent method + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> x = [11.2, -3.973, 0.801, -1.41] + >>> mask = [0, 0, 0, 1] + >>> masked_x = ma.masked_array(x, mask) + >>> masked_x + masked_array(data=[11.2, -3.973, 0.801, --], + mask=[False, False, False, True], + fill_value=1e+20) + >>> ma.sort(masked_x) + masked_array(data=[-3.973, 0.801, 11.2, --], + mask=[False, False, False, True], + fill_value=1e+20) + """ + a = np.array(a, copy=True, subok=True) + if axis is None: + a = a.flatten() + axis = 0 + + if isinstance(a, MaskedArray): + a.sort(axis=axis, kind=kind, order=order, endwith=endwith, + fill_value=fill_value, stable=stable) + else: + a.sort(axis=axis, kind=kind, order=order, stable=stable) + return a + + +def compressed(x): + """ + Return all the non-masked data as a 1-D array. + + This function is equivalent to calling the "compressed" method of a + `ma.MaskedArray`, see `ma.MaskedArray.compressed` for details. + + See Also + -------- + ma.MaskedArray.compressed : Equivalent method. + + Examples + -------- + >>> import numpy as np + + Create an array with negative values masked: + + >>> import numpy as np + >>> x = np.array([[1, -1, 0], [2, -1, 3], [7, 4, -1]]) + >>> masked_x = np.ma.masked_array(x, mask=x < 0) + >>> masked_x + masked_array( + data=[[1, --, 0], + [2, --, 3], + [7, 4, --]], + mask=[[False, True, False], + [False, True, False], + [False, False, True]], + fill_value=999999) + + Compress the masked array into a 1-D array of non-masked values: + + >>> np.ma.compressed(masked_x) + array([1, 0, 2, 3, 7, 4]) + + """ + return asanyarray(x).compressed() + + +def concatenate(arrays, axis=0): + """ + Concatenate a sequence of arrays along the given axis. + + Parameters + ---------- + arrays : sequence of array_like + The arrays must have the same shape, except in the dimension + corresponding to `axis` (the first, by default). + axis : int, optional + The axis along which the arrays will be joined. Default is 0. + + Returns + ------- + result : MaskedArray + The concatenated array with any masked entries preserved. + + See Also + -------- + numpy.concatenate : Equivalent function in the top-level NumPy module. + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> a = ma.arange(3) + >>> a[1] = ma.masked + >>> b = ma.arange(2, 5) + >>> a + masked_array(data=[0, --, 2], + mask=[False, True, False], + fill_value=999999) + >>> b + masked_array(data=[2, 3, 4], + mask=False, + fill_value=999999) + >>> ma.concatenate([a, b]) + masked_array(data=[0, --, 2, 2, 3, 4], + mask=[False, True, False, False, False, False], + fill_value=999999) + + """ + d = np.concatenate([getdata(a) for a in arrays], axis) + rcls = get_masked_subclass(*arrays) + data = d.view(rcls) + # Check whether one of the arrays has a non-empty mask. + for x in arrays: + if getmask(x) is not nomask: + break + else: + return data + # OK, so we have to concatenate the masks + dm = np.concatenate([getmaskarray(a) for a in arrays], axis) + dm = dm.reshape(d.shape) + + # If we decide to keep a '_shrinkmask' option, we want to check that + # all of them are True, and then check for dm.any() + data._mask = _shrink_mask(dm) + return data + + +def diag(v, k=0): + """ + Extract a diagonal or construct a diagonal array. + + This function is the equivalent of `numpy.diag` that takes masked + values into account, see `numpy.diag` for details. + + See Also + -------- + numpy.diag : Equivalent function for ndarrays. + + Examples + -------- + >>> import numpy as np + + Create an array with negative values masked: + + >>> import numpy as np + >>> x = np.array([[11.2, -3.973, 18], [0.801, -1.41, 12], [7, 33, -12]]) + >>> masked_x = np.ma.masked_array(x, mask=x < 0) + >>> masked_x + masked_array( + data=[[11.2, --, 18.0], + [0.801, --, 12.0], + [7.0, 33.0, --]], + mask=[[False, True, False], + [False, True, False], + [False, False, True]], + fill_value=1e+20) + + Isolate the main diagonal from the masked array: + + >>> np.ma.diag(masked_x) + masked_array(data=[11.2, --, --], + mask=[False, True, True], + fill_value=1e+20) + + Isolate the first diagonal below the main diagonal: + + >>> np.ma.diag(masked_x, -1) + masked_array(data=[0.801, 33.0], + mask=[False, False], + fill_value=1e+20) + + """ + output = np.diag(v, k).view(MaskedArray) + if getmask(v) is not nomask: + output._mask = np.diag(v._mask, k) + return output + + +def left_shift(a, n): + """ + Shift the bits of an integer to the left. + + This is the masked array version of `numpy.left_shift`, for details + see that function. + + See Also + -------- + numpy.left_shift + + Examples + -------- + Shift with a masked array: + + >>> arr = np.ma.array([10, 20, 30], mask=[False, True, False]) + >>> np.ma.left_shift(arr, 1) + masked_array(data=[20, --, 60], + mask=[False, True, False], + fill_value=999999) + + Large shift: + + >>> np.ma.left_shift(10, 10) + masked_array(data=10240, + mask=False, + fill_value=999999) + + Shift with a scalar and an array: + + >>> scalar = 10 + >>> arr = np.ma.array([1, 2, 3], mask=[False, True, False]) + >>> np.ma.left_shift(scalar, arr) + masked_array(data=[20, --, 80], + mask=[False, True, False], + fill_value=999999) + + + """ + m = getmask(a) + if m is nomask: + d = umath.left_shift(filled(a), n) + return masked_array(d) + else: + d = umath.left_shift(filled(a, 0), n) + return masked_array(d, mask=m) + + +def right_shift(a, n): + """ + Shift the bits of an integer to the right. + + This is the masked array version of `numpy.right_shift`, for details + see that function. + + See Also + -------- + numpy.right_shift + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> x = [11, 3, 8, 1] + >>> mask = [0, 0, 0, 1] + >>> masked_x = ma.masked_array(x, mask) + >>> masked_x + masked_array(data=[11, 3, 8, --], + mask=[False, False, False, True], + fill_value=999999) + >>> ma.right_shift(masked_x,1) + masked_array(data=[5, 1, 4, --], + mask=[False, False, False, True], + fill_value=999999) + + """ + m = getmask(a) + if m is nomask: + d = umath.right_shift(filled(a), n) + return masked_array(d) + else: + d = umath.right_shift(filled(a, 0), n) + return masked_array(d, mask=m) + + +def put(a, indices, values, mode='raise'): + """ + Set storage-indexed locations to corresponding values. + + This function is equivalent to `MaskedArray.put`, see that method + for details. + + See Also + -------- + MaskedArray.put + + Examples + -------- + Putting values in a masked array: + + >>> a = np.ma.array([1, 2, 3, 4], mask=[False, True, False, False]) + >>> np.ma.put(a, [1, 3], [10, 30]) + >>> a + masked_array(data=[ 1, 10, 3, 30], + mask=False, + fill_value=999999) + + Using put with a 2D array: + + >>> b = np.ma.array([[1, 2], [3, 4]], mask=[[False, True], [False, False]]) + >>> np.ma.put(b, [[0, 1], [1, 0]], [[10, 20], [30, 40]]) + >>> b + masked_array( + data=[[40, 30], + [ 3, 4]], + mask=False, + fill_value=999999) + + """ + # We can't use 'frommethod', the order of arguments is different + try: + return a.put(indices, values, mode=mode) + except AttributeError: + return np.asarray(a).put(indices, values, mode=mode) + + +def putmask(a, mask, values): # , mode='raise'): + """ + Changes elements of an array based on conditional and input values. + + This is the masked array version of `numpy.putmask`, for details see + `numpy.putmask`. + + See Also + -------- + numpy.putmask + + Notes + ----- + Using a masked array as `values` will **not** transform a `ndarray` into + a `MaskedArray`. + + Examples + -------- + >>> import numpy as np + >>> arr = [[1, 2], [3, 4]] + >>> mask = [[1, 0], [0, 0]] + >>> x = np.ma.array(arr, mask=mask) + >>> np.ma.putmask(x, x < 4, 10*x) + >>> x + masked_array( + data=[[--, 20], + [30, 4]], + mask=[[ True, False], + [False, False]], + fill_value=999999) + >>> x.data + array([[10, 20], + [30, 4]]) + + """ + # We can't use 'frommethod', the order of arguments is different + if not isinstance(a, MaskedArray): + a = a.view(MaskedArray) + (valdata, valmask) = (getdata(values), getmask(values)) + if getmask(a) is nomask: + if valmask is not nomask: + a._sharedmask = True + a._mask = make_mask_none(a.shape, a.dtype) + np.copyto(a._mask, valmask, where=mask) + elif a._hardmask: + if valmask is not nomask: + m = a._mask.copy() + np.copyto(m, valmask, where=mask) + a.mask |= m + else: + if valmask is nomask: + valmask = getmaskarray(values) + np.copyto(a._mask, valmask, where=mask) + np.copyto(a._data, valdata, where=mask) + + +def transpose(a, axes=None): + """ + Permute the dimensions of an array. + + This function is exactly equivalent to `numpy.transpose`. + + See Also + -------- + numpy.transpose : Equivalent function in top-level NumPy module. + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> x = ma.arange(4).reshape((2,2)) + >>> x[1, 1] = ma.masked + >>> x + masked_array( + data=[[0, 1], + [2, --]], + mask=[[False, False], + [False, True]], + fill_value=999999) + + >>> ma.transpose(x) + masked_array( + data=[[0, 2], + [1, --]], + mask=[[False, False], + [False, True]], + fill_value=999999) + """ + # We can't use 'frommethod', as 'transpose' doesn't take keywords + try: + return a.transpose(axes) + except AttributeError: + return np.asarray(a).transpose(axes).view(MaskedArray) + + +def reshape(a, new_shape, order='C'): + """ + Returns an array containing the same data with a new shape. + + Refer to `MaskedArray.reshape` for full documentation. + + See Also + -------- + MaskedArray.reshape : equivalent function + + Examples + -------- + Reshaping a 1-D array: + + >>> a = np.ma.array([1, 2, 3, 4]) + >>> np.ma.reshape(a, (2, 2)) + masked_array( + data=[[1, 2], + [3, 4]], + mask=False, + fill_value=999999) + + Reshaping a 2-D array: + + >>> b = np.ma.array([[1, 2], [3, 4]]) + >>> np.ma.reshape(b, (1, 4)) + masked_array(data=[[1, 2, 3, 4]], + mask=False, + fill_value=999999) + + Reshaping a 1-D array with a mask: + + >>> c = np.ma.array([1, 2, 3, 4], mask=[False, True, False, False]) + >>> np.ma.reshape(c, (2, 2)) + masked_array( + data=[[1, --], + [3, 4]], + mask=[[False, True], + [False, False]], + fill_value=999999) + + """ + # We can't use 'frommethod', it whine about some parameters. Dmmit. + try: + return a.reshape(new_shape, order=order) + except AttributeError: + _tmp = np.asarray(a).reshape(new_shape, order=order) + return _tmp.view(MaskedArray) + + +def resize(x, new_shape): + """ + Return a new masked array with the specified size and shape. + + This is the masked equivalent of the `numpy.resize` function. The new + array is filled with repeated copies of `x` (in the order that the + data are stored in memory). If `x` is masked, the new array will be + masked, and the new mask will be a repetition of the old one. + + See Also + -------- + numpy.resize : Equivalent function in the top level NumPy module. + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> a = ma.array([[1, 2] ,[3, 4]]) + >>> a[0, 1] = ma.masked + >>> a + masked_array( + data=[[1, --], + [3, 4]], + mask=[[False, True], + [False, False]], + fill_value=999999) + >>> np.resize(a, (3, 3)) + masked_array( + data=[[1, 2, 3], + [4, 1, 2], + [3, 4, 1]], + mask=False, + fill_value=999999) + >>> ma.resize(a, (3, 3)) + masked_array( + data=[[1, --, 3], + [4, 1, --], + [3, 4, 1]], + mask=[[False, True, False], + [False, False, True], + [False, False, False]], + fill_value=999999) + + A MaskedArray is always returned, regardless of the input type. + + >>> a = np.array([[1, 2] ,[3, 4]]) + >>> ma.resize(a, (3, 3)) + masked_array( + data=[[1, 2, 3], + [4, 1, 2], + [3, 4, 1]], + mask=False, + fill_value=999999) + + """ + # We can't use _frommethods here, as N.resize is notoriously whiny. + m = getmask(x) + if m is not nomask: + m = np.resize(m, new_shape) + result = np.resize(x, new_shape).view(get_masked_subclass(x)) + if result.ndim: + result._mask = m + return result + + +def ndim(obj): + """ + maskedarray version of the numpy function. + + """ + return np.ndim(getdata(obj)) + + +ndim.__doc__ = np.ndim.__doc__ + + +def shape(obj): + "maskedarray version of the numpy function." + return np.shape(getdata(obj)) + + +shape.__doc__ = np.shape.__doc__ + + +def size(obj, axis=None): + "maskedarray version of the numpy function." + return np.size(getdata(obj), axis) + + +size.__doc__ = np.size.__doc__ + + +def diff(a, /, n=1, axis=-1, prepend=np._NoValue, append=np._NoValue): + """ + Calculate the n-th discrete difference along the given axis. + The first difference is given by ``out[i] = a[i+1] - a[i]`` along + the given axis, higher differences are calculated by using `diff` + recursively. + Preserves the input mask. + + Parameters + ---------- + a : array_like + Input array + n : int, optional + The number of times values are differenced. If zero, the input + is returned as-is. + axis : int, optional + The axis along which the difference is taken, default is the + last axis. + prepend, append : array_like, optional + Values to prepend or append to `a` along axis prior to + performing the difference. Scalar values are expanded to + arrays with length 1 in the direction of axis and the shape + of the input array in along all other axes. Otherwise the + dimension and shape must match `a` except along axis. + + Returns + ------- + diff : MaskedArray + The n-th differences. The shape of the output is the same as `a` + except along `axis` where the dimension is smaller by `n`. The + type of the output is the same as the type of the difference + between any two elements of `a`. This is the same as the type of + `a` in most cases. A notable exception is `datetime64`, which + results in a `timedelta64` output array. + + See Also + -------- + numpy.diff : Equivalent function in the top-level NumPy module. + + Notes + ----- + Type is preserved for boolean arrays, so the result will contain + `False` when consecutive elements are the same and `True` when they + differ. + + For unsigned integer arrays, the results will also be unsigned. This + should not be surprising, as the result is consistent with + calculating the difference directly: + + >>> u8_arr = np.array([1, 0], dtype=np.uint8) + >>> np.ma.diff(u8_arr) + masked_array(data=[255], + mask=False, + fill_value=np.uint64(999999), + dtype=uint8) + >>> u8_arr[1,...] - u8_arr[0,...] + np.uint8(255) + + If this is not desirable, then the array should be cast to a larger + integer type first: + + >>> i16_arr = u8_arr.astype(np.int16) + >>> np.ma.diff(i16_arr) + masked_array(data=[-1], + mask=False, + fill_value=np.int64(999999), + dtype=int16) + + Examples + -------- + >>> import numpy as np + >>> a = np.array([1, 2, 3, 4, 7, 0, 2, 3]) + >>> x = np.ma.masked_where(a < 2, a) + >>> np.ma.diff(x) + masked_array(data=[--, 1, 1, 3, --, --, 1], + mask=[ True, False, False, False, True, True, False], + fill_value=999999) + + >>> np.ma.diff(x, n=2) + masked_array(data=[--, 0, 2, --, --, --], + mask=[ True, False, False, True, True, True], + fill_value=999999) + + >>> a = np.array([[1, 3, 1, 5, 10], [0, 1, 5, 6, 8]]) + >>> x = np.ma.masked_equal(a, value=1) + >>> np.ma.diff(x) + masked_array( + data=[[--, --, --, 5], + [--, --, 1, 2]], + mask=[[ True, True, True, False], + [ True, True, False, False]], + fill_value=1) + + >>> np.ma.diff(x, axis=0) + masked_array(data=[[--, --, --, 1, -2]], + mask=[[ True, True, True, False, False]], + fill_value=1) + + """ + if n == 0: + return a + if n < 0: + raise ValueError("order must be non-negative but got " + repr(n)) + + a = np.ma.asanyarray(a) + if a.ndim == 0: + raise ValueError( + "diff requires input that is at least one dimensional" + ) + + combined = [] + if prepend is not np._NoValue: + prepend = np.ma.asanyarray(prepend) + if prepend.ndim == 0: + shape = list(a.shape) + shape[axis] = 1 + prepend = np.broadcast_to(prepend, tuple(shape)) + combined.append(prepend) + + combined.append(a) + + if append is not np._NoValue: + append = np.ma.asanyarray(append) + if append.ndim == 0: + shape = list(a.shape) + shape[axis] = 1 + append = np.broadcast_to(append, tuple(shape)) + combined.append(append) + + if len(combined) > 1: + a = np.ma.concatenate(combined, axis) + + # GH 22465 np.diff without prepend/append preserves the mask + return np.diff(a, n, axis) + + +############################################################################## +# Extra functions # +############################################################################## + + +def where(condition, x=_NoValue, y=_NoValue): + """ + Return a masked array with elements from `x` or `y`, depending on condition. + + .. note:: + When only `condition` is provided, this function is identical to + `nonzero`. The rest of this documentation covers only the case where + all three arguments are provided. + + Parameters + ---------- + condition : array_like, bool + Where True, yield `x`, otherwise yield `y`. + x, y : array_like, optional + Values from which to choose. `x`, `y` and `condition` need to be + broadcastable to some shape. + + Returns + ------- + out : MaskedArray + An masked array with `masked` elements where the condition is masked, + elements from `x` where `condition` is True, and elements from `y` + elsewhere. + + See Also + -------- + numpy.where : Equivalent function in the top-level NumPy module. + nonzero : The function that is called when x and y are omitted + + Examples + -------- + >>> import numpy as np + >>> x = np.ma.array(np.arange(9.).reshape(3, 3), mask=[[0, 1, 0], + ... [1, 0, 1], + ... [0, 1, 0]]) + >>> x + masked_array( + data=[[0.0, --, 2.0], + [--, 4.0, --], + [6.0, --, 8.0]], + mask=[[False, True, False], + [ True, False, True], + [False, True, False]], + fill_value=1e+20) + >>> np.ma.where(x > 5, x, -3.1416) + masked_array( + data=[[-3.1416, --, -3.1416], + [--, -3.1416, --], + [6.0, --, 8.0]], + mask=[[False, True, False], + [ True, False, True], + [False, True, False]], + fill_value=1e+20) + + """ + + # handle the single-argument case + missing = (x is _NoValue, y is _NoValue).count(True) + if missing == 1: + raise ValueError("Must provide both 'x' and 'y' or neither.") + if missing == 2: + return nonzero(condition) + + # we only care if the condition is true - false or masked pick y + cf = filled(condition, False) + xd = getdata(x) + yd = getdata(y) + + # we need the full arrays here for correct final dimensions + cm = getmaskarray(condition) + xm = getmaskarray(x) + ym = getmaskarray(y) + + # deal with the fact that masked.dtype == float64, but we don't actually + # want to treat it as that. + if x is masked and y is not masked: + xd = np.zeros((), dtype=yd.dtype) + xm = np.ones((), dtype=ym.dtype) + elif y is masked and x is not masked: + yd = np.zeros((), dtype=xd.dtype) + ym = np.ones((), dtype=xm.dtype) + + data = np.where(cf, xd, yd) + mask = np.where(cf, xm, ym) + mask = np.where(cm, np.ones((), dtype=mask.dtype), mask) + + # collapse the mask, for backwards compatibility + mask = _shrink_mask(mask) + + return masked_array(data, mask=mask) + + +def choose(indices, choices, out=None, mode='raise'): + """ + Use an index array to construct a new array from a list of choices. + + Given an array of integers and a list of n choice arrays, this method + will create a new array that merges each of the choice arrays. Where a + value in `index` is i, the new array will have the value that choices[i] + contains in the same place. + + Parameters + ---------- + indices : ndarray of ints + This array must contain integers in ``[0, n-1]``, where n is the + number of choices. + choices : sequence of arrays + Choice arrays. The index array and all of the choices should be + broadcastable to the same shape. + out : array, optional + If provided, the result will be inserted into this array. It should + be of the appropriate shape and `dtype`. + mode : {'raise', 'wrap', 'clip'}, optional + Specifies how out-of-bounds indices will behave. + + * 'raise' : raise an error + * 'wrap' : wrap around + * 'clip' : clip to the range + + Returns + ------- + merged_array : array + + See Also + -------- + choose : equivalent function + + Examples + -------- + >>> import numpy as np + >>> choice = np.array([[1,1,1], [2,2,2], [3,3,3]]) + >>> a = np.array([2, 1, 0]) + >>> np.ma.choose(a, choice) + masked_array(data=[3, 2, 1], + mask=False, + fill_value=999999) + + """ + def fmask(x): + "Returns the filled array, or True if masked." + if x is masked: + return True + return filled(x) + + def nmask(x): + "Returns the mask, True if ``masked``, False if ``nomask``." + if x is masked: + return True + return getmask(x) + # Get the indices. + c = filled(indices, 0) + # Get the masks. + masks = [nmask(x) for x in choices] + data = [fmask(x) for x in choices] + # Construct the mask + outputmask = np.choose(c, masks, mode=mode) + outputmask = make_mask(mask_or(outputmask, getmask(indices)), + copy=False, shrink=True) + # Get the choices. + d = np.choose(c, data, mode=mode, out=out).view(MaskedArray) + if out is not None: + if isinstance(out, MaskedArray): + out.__setmask__(outputmask) + return out + d.__setmask__(outputmask) + return d + + +def round_(a, decimals=0, out=None): + """ + Return a copy of a, rounded to 'decimals' places. + + When 'decimals' is negative, it specifies the number of positions + to the left of the decimal point. The real and imaginary parts of + complex numbers are rounded separately. Nothing is done if the + array is not of float type and 'decimals' is greater than or equal + to 0. + + Parameters + ---------- + decimals : int + Number of decimals to round to. May be negative. + out : array_like + Existing array to use for output. + If not given, returns a default copy of a. + + Notes + ----- + If out is given and does not have a mask attribute, the mask of a + is lost! + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> x = [11.2, -3.973, 0.801, -1.41] + >>> mask = [0, 0, 0, 1] + >>> masked_x = ma.masked_array(x, mask) + >>> masked_x + masked_array(data=[11.2, -3.973, 0.801, --], + mask=[False, False, False, True], + fill_value=1e+20) + >>> ma.round_(masked_x) + masked_array(data=[11.0, -4.0, 1.0, --], + mask=[False, False, False, True], + fill_value=1e+20) + >>> ma.round(masked_x, decimals=1) + masked_array(data=[11.2, -4.0, 0.8, --], + mask=[False, False, False, True], + fill_value=1e+20) + >>> ma.round_(masked_x, decimals=-1) + masked_array(data=[10.0, -0.0, 0.0, --], + mask=[False, False, False, True], + fill_value=1e+20) + """ + if out is None: + return np.round(a, decimals, out) + else: + np.round(getdata(a), decimals, out) + if hasattr(out, '_mask'): + out._mask = getmask(a) + return out + + +round = round_ + + +def _mask_propagate(a, axis): + """ + Mask whole 1-d vectors of an array that contain masked values. + """ + a = array(a, subok=False) + m = getmask(a) + if m is nomask or not m.any() or axis is None: + return a + a._mask = a._mask.copy() + axes = normalize_axis_tuple(axis, a.ndim) + for ax in axes: + a._mask |= m.any(axis=ax, keepdims=True) + return a + + +# Include masked dot here to avoid import problems in getting it from +# extras.py. Note that it is not included in __all__, but rather exported +# from extras in order to avoid backward compatibility problems. +def dot(a, b, strict=False, out=None): + """ + Return the dot product of two arrays. + + This function is the equivalent of `numpy.dot` that takes masked values + into account. Note that `strict` and `out` are in different position + than in the method version. In order to maintain compatibility with the + corresponding method, it is recommended that the optional arguments be + treated as keyword only. At some point that may be mandatory. + + Parameters + ---------- + a, b : masked_array_like + Inputs arrays. + strict : bool, optional + Whether masked data are propagated (True) or set to 0 (False) for + the computation. Default is False. Propagating the mask means that + if a masked value appears in a row or column, the whole row or + column is considered masked. + out : masked_array, optional + Output argument. This must have the exact kind that would be returned + if it was not used. In particular, it must have the right type, must be + C-contiguous, and its dtype must be the dtype that would be returned + for `dot(a,b)`. This is a performance feature. Therefore, if these + conditions are not met, an exception is raised, instead of attempting + to be flexible. + + See Also + -------- + numpy.dot : Equivalent function for ndarrays. + + Examples + -------- + >>> import numpy as np + >>> a = np.ma.array([[1, 2, 3], [4, 5, 6]], mask=[[1, 0, 0], [0, 0, 0]]) + >>> b = np.ma.array([[1, 2], [3, 4], [5, 6]], mask=[[1, 0], [0, 0], [0, 0]]) + >>> np.ma.dot(a, b) + masked_array( + data=[[21, 26], + [45, 64]], + mask=[[False, False], + [False, False]], + fill_value=999999) + >>> np.ma.dot(a, b, strict=True) + masked_array( + data=[[--, --], + [--, 64]], + mask=[[ True, True], + [ True, False]], + fill_value=999999) + + """ + if strict is True: + if np.ndim(a) == 0 or np.ndim(b) == 0: + pass + elif b.ndim == 1: + a = _mask_propagate(a, a.ndim - 1) + b = _mask_propagate(b, b.ndim - 1) + else: + a = _mask_propagate(a, a.ndim - 1) + b = _mask_propagate(b, b.ndim - 2) + am = ~getmaskarray(a) + bm = ~getmaskarray(b) + + if out is None: + d = np.dot(filled(a, 0), filled(b, 0)) + m = ~np.dot(am, bm) + if np.ndim(d) == 0: + d = np.asarray(d) + r = d.view(get_masked_subclass(a, b)) + r.__setmask__(m) + return r + else: + d = np.dot(filled(a, 0), filled(b, 0), out._data) + if out.mask.shape != d.shape: + out._mask = np.empty(d.shape, MaskType) + np.dot(am, bm, out._mask) + np.logical_not(out._mask, out._mask) + return out + + +def inner(a, b): + """ + Returns the inner product of a and b for arrays of floating point types. + + Like the generic NumPy equivalent the product sum is over the last dimension + of a and b. The first argument is not conjugated. + + """ + fa = filled(a, 0) + fb = filled(b, 0) + if fa.ndim == 0: + fa.shape = (1,) + if fb.ndim == 0: + fb.shape = (1,) + return np.inner(fa, fb).view(MaskedArray) + + +inner.__doc__ = doc_note(np.inner.__doc__, + "Masked values are replaced by 0.") +innerproduct = inner + + +def outer(a, b): + "maskedarray version of the numpy function." + fa = filled(a, 0).ravel() + fb = filled(b, 0).ravel() + d = np.outer(fa, fb) + ma = getmask(a) + mb = getmask(b) + if ma is nomask and mb is nomask: + return masked_array(d) + ma = getmaskarray(a) + mb = getmaskarray(b) + m = make_mask(1 - np.outer(1 - ma, 1 - mb), copy=False) + return masked_array(d, mask=m) + + +outer.__doc__ = doc_note(np.outer.__doc__, + "Masked values are replaced by 0.") +outerproduct = outer + + +def _convolve_or_correlate(f, a, v, mode, propagate_mask): + """ + Helper function for ma.correlate and ma.convolve + """ + if propagate_mask: + # results which are contributed to by either item in any pair being invalid + mask = ( + f(getmaskarray(a), np.ones(np.shape(v), dtype=bool), mode=mode) + | f(np.ones(np.shape(a), dtype=bool), getmaskarray(v), mode=mode) + ) + data = f(getdata(a), getdata(v), mode=mode) + else: + # results which are not contributed to by any pair of valid elements + mask = ~f(~getmaskarray(a), ~getmaskarray(v), mode=mode) + data = f(filled(a, 0), filled(v, 0), mode=mode) + + return masked_array(data, mask=mask) + + +def correlate(a, v, mode='valid', propagate_mask=True): + """ + Cross-correlation of two 1-dimensional sequences. + + Parameters + ---------- + a, v : array_like + Input sequences. + mode : {'valid', 'same', 'full'}, optional + Refer to the `np.convolve` docstring. Note that the default + is 'valid', unlike `convolve`, which uses 'full'. + propagate_mask : bool + If True, then a result element is masked if any masked element contributes + towards it. If False, then a result element is only masked if no non-masked + element contribute towards it + + Returns + ------- + out : MaskedArray + Discrete cross-correlation of `a` and `v`. + + See Also + -------- + numpy.correlate : Equivalent function in the top-level NumPy module. + + Examples + -------- + Basic correlation: + + >>> a = np.ma.array([1, 2, 3]) + >>> v = np.ma.array([0, 1, 0]) + >>> np.ma.correlate(a, v, mode='valid') + masked_array(data=[2], + mask=[False], + fill_value=999999) + + Correlation with masked elements: + + >>> a = np.ma.array([1, 2, 3], mask=[False, True, False]) + >>> v = np.ma.array([0, 1, 0]) + >>> np.ma.correlate(a, v, mode='valid', propagate_mask=True) + masked_array(data=[--], + mask=[ True], + fill_value=999999, + dtype=int64) + + Correlation with different modes and mixed array types: + + >>> a = np.ma.array([1, 2, 3]) + >>> v = np.ma.array([0, 1, 0]) + >>> np.ma.correlate(a, v, mode='full') + masked_array(data=[0, 1, 2, 3, 0], + mask=[False, False, False, False, False], + fill_value=999999) + + """ + return _convolve_or_correlate(np.correlate, a, v, mode, propagate_mask) + + +def convolve(a, v, mode='full', propagate_mask=True): + """ + Returns the discrete, linear convolution of two one-dimensional sequences. + + Parameters + ---------- + a, v : array_like + Input sequences. + mode : {'valid', 'same', 'full'}, optional + Refer to the `np.convolve` docstring. + propagate_mask : bool + If True, then if any masked element is included in the sum for a result + element, then the result is masked. + If False, then the result element is only masked if no non-masked cells + contribute towards it + + Returns + ------- + out : MaskedArray + Discrete, linear convolution of `a` and `v`. + + See Also + -------- + numpy.convolve : Equivalent function in the top-level NumPy module. + """ + return _convolve_or_correlate(np.convolve, a, v, mode, propagate_mask) + + +def allequal(a, b, fill_value=True): + """ + Return True if all entries of a and b are equal, using + fill_value as a truth value where either or both are masked. + + Parameters + ---------- + a, b : array_like + Input arrays to compare. + fill_value : bool, optional + Whether masked values in a or b are considered equal (True) or not + (False). + + Returns + ------- + y : bool + Returns True if the two arrays are equal within the given + tolerance, False otherwise. If either array contains NaN, + then False is returned. + + See Also + -------- + all, any + numpy.ma.allclose + + Examples + -------- + >>> import numpy as np + >>> a = np.ma.array([1e10, 1e-7, 42.0], mask=[0, 0, 1]) + >>> a + masked_array(data=[10000000000.0, 1e-07, --], + mask=[False, False, True], + fill_value=1e+20) + + >>> b = np.array([1e10, 1e-7, -42.0]) + >>> b + array([ 1.00000000e+10, 1.00000000e-07, -4.20000000e+01]) + >>> np.ma.allequal(a, b, fill_value=False) + False + >>> np.ma.allequal(a, b) + True + + """ + m = mask_or(getmask(a), getmask(b)) + if m is nomask: + x = getdata(a) + y = getdata(b) + d = umath.equal(x, y) + return d.all() + elif fill_value: + x = getdata(a) + y = getdata(b) + d = umath.equal(x, y) + dm = array(d, mask=m, copy=False) + return dm.filled(True).all(None) + else: + return False + + +def allclose(a, b, masked_equal=True, rtol=1e-5, atol=1e-8): + """ + Returns True if two arrays are element-wise equal within a tolerance. + + This function is equivalent to `allclose` except that masked values + are treated as equal (default) or unequal, depending on the `masked_equal` + argument. + + Parameters + ---------- + a, b : array_like + Input arrays to compare. + masked_equal : bool, optional + Whether masked values in `a` and `b` are considered equal (True) or not + (False). They are considered equal by default. + rtol : float, optional + Relative tolerance. The relative difference is equal to ``rtol * b``. + Default is 1e-5. + atol : float, optional + Absolute tolerance. The absolute difference is equal to `atol`. + Default is 1e-8. + + Returns + ------- + y : bool + Returns True if the two arrays are equal within the given + tolerance, False otherwise. If either array contains NaN, then + False is returned. + + See Also + -------- + all, any + numpy.allclose : the non-masked `allclose`. + + Notes + ----- + If the following equation is element-wise True, then `allclose` returns + True:: + + absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`)) + + Return True if all elements of `a` and `b` are equal subject to + given tolerances. + + Examples + -------- + >>> import numpy as np + >>> a = np.ma.array([1e10, 1e-7, 42.0], mask=[0, 0, 1]) + >>> a + masked_array(data=[10000000000.0, 1e-07, --], + mask=[False, False, True], + fill_value=1e+20) + >>> b = np.ma.array([1e10, 1e-8, -42.0], mask=[0, 0, 1]) + >>> np.ma.allclose(a, b) + False + + >>> a = np.ma.array([1e10, 1e-8, 42.0], mask=[0, 0, 1]) + >>> b = np.ma.array([1.00001e10, 1e-9, -42.0], mask=[0, 0, 1]) + >>> np.ma.allclose(a, b) + True + >>> np.ma.allclose(a, b, masked_equal=False) + False + + Masked values are not compared directly. + + >>> a = np.ma.array([1e10, 1e-8, 42.0], mask=[0, 0, 1]) + >>> b = np.ma.array([1.00001e10, 1e-9, 42.0], mask=[0, 0, 1]) + >>> np.ma.allclose(a, b) + True + >>> np.ma.allclose(a, b, masked_equal=False) + False + + """ + x = masked_array(a, copy=False) + y = masked_array(b, copy=False) + + # make sure y is an inexact type to avoid abs(MIN_INT); will cause + # casting of x later. + # NOTE: We explicitly allow timedelta, which used to work. This could + # possibly be deprecated. See also gh-18286. + # timedelta works if `atol` is an integer or also a timedelta. + # Although, the default tolerances are unlikely to be useful + if y.dtype.kind != "m": + dtype = np.result_type(y, 1.) + if y.dtype != dtype: + y = masked_array(y, dtype=dtype, copy=False) + + m = mask_or(getmask(x), getmask(y)) + xinf = np.isinf(masked_array(x, copy=False, mask=m)).filled(False) + # If we have some infs, they should fall at the same place. + if not np.all(xinf == filled(np.isinf(y), False)): + return False + # No infs at all + if not np.any(xinf): + d = filled(less_equal(absolute(x - y), atol + rtol * absolute(y)), + masked_equal) + return np.all(d) + + if not np.all(filled(x[xinf] == y[xinf], masked_equal)): + return False + x = x[~xinf] + y = y[~xinf] + + d = filled(less_equal(absolute(x - y), atol + rtol * absolute(y)), + masked_equal) + + return np.all(d) + + +def asarray(a, dtype=None, order=None): + """ + Convert the input to a masked array of the given data-type. + + No copy is performed if the input is already an `ndarray`. If `a` is + a subclass of `MaskedArray`, a base class `MaskedArray` is returned. + + Parameters + ---------- + a : array_like + Input data, in any form that can be converted to a masked array. This + includes lists, lists of tuples, tuples, tuples of tuples, tuples + of lists, ndarrays and masked arrays. + dtype : dtype, optional + By default, the data-type is inferred from the input data. + order : {'C', 'F'}, optional + Whether to use row-major ('C') or column-major ('FORTRAN') memory + representation. Default is 'C'. + + Returns + ------- + out : MaskedArray + Masked array interpretation of `a`. + + See Also + -------- + asanyarray : Similar to `asarray`, but conserves subclasses. + + Examples + -------- + >>> import numpy as np + >>> x = np.arange(10.).reshape(2, 5) + >>> x + array([[0., 1., 2., 3., 4.], + [5., 6., 7., 8., 9.]]) + >>> np.ma.asarray(x) + masked_array( + data=[[0., 1., 2., 3., 4.], + [5., 6., 7., 8., 9.]], + mask=False, + fill_value=1e+20) + >>> type(np.ma.asarray(x)) + + + """ + order = order or 'C' + return masked_array(a, dtype=dtype, copy=False, keep_mask=True, + subok=False, order=order) + + +def asanyarray(a, dtype=None, order=None): + """ + Convert the input to a masked array, conserving subclasses. + + If `a` is a subclass of `MaskedArray`, its class is conserved. + No copy is performed if the input is already an `ndarray`. + + Parameters + ---------- + a : array_like + Input data, in any form that can be converted to an array. + dtype : dtype, optional + By default, the data-type is inferred from the input data. + order : {'C', 'F', 'A', 'K'}, optional + Memory layout. 'A' and 'K' depend on the order of input array ``a``. + 'C' row-major (C-style), + 'F' column-major (Fortran-style) memory representation. + 'A' (any) means 'F' if ``a`` is Fortran contiguous, 'C' otherwise + 'K' (keep) preserve input order + Defaults to 'K'. + + Returns + ------- + out : MaskedArray + MaskedArray interpretation of `a`. + + See Also + -------- + asarray : Similar to `asanyarray`, but does not conserve subclass. + + Examples + -------- + >>> import numpy as np + >>> x = np.arange(10.).reshape(2, 5) + >>> x + array([[0., 1., 2., 3., 4.], + [5., 6., 7., 8., 9.]]) + >>> np.ma.asanyarray(x) + masked_array( + data=[[0., 1., 2., 3., 4.], + [5., 6., 7., 8., 9.]], + mask=False, + fill_value=1e+20) + >>> type(np.ma.asanyarray(x)) + + + """ + # workaround for #8666, to preserve identity. Ideally the bottom line + # would handle this for us. + if ( + isinstance(a, MaskedArray) + and (dtype is None or dtype == a.dtype) + and ( + order in {None, 'A', 'K'} + or order == 'C' and a.flags.carray + or order == 'F' and a.flags.f_contiguous + ) + ): + return a + return masked_array(a, dtype=dtype, copy=False, keep_mask=True, subok=True, + order=order) + + +############################################################################## +# Pickling # +############################################################################## + + +def fromfile(file, dtype=float, count=-1, sep=''): + raise NotImplementedError( + "fromfile() not yet implemented for a MaskedArray.") + + +def fromflex(fxarray): + """ + Build a masked array from a suitable flexible-type array. + + The input array has to have a data-type with ``_data`` and ``_mask`` + fields. This type of array is output by `MaskedArray.toflex`. + + Parameters + ---------- + fxarray : ndarray + The structured input array, containing ``_data`` and ``_mask`` + fields. If present, other fields are discarded. + + Returns + ------- + result : MaskedArray + The constructed masked array. + + See Also + -------- + MaskedArray.toflex : Build a flexible-type array from a masked array. + + Examples + -------- + >>> import numpy as np + >>> x = np.ma.array(np.arange(9).reshape(3, 3), mask=[0] + [1, 0] * 4) + >>> rec = x.toflex() + >>> rec + array([[(0, False), (1, True), (2, False)], + [(3, True), (4, False), (5, True)], + [(6, False), (7, True), (8, False)]], + dtype=[('_data', '>> x2 = np.ma.fromflex(rec) + >>> x2 + masked_array( + data=[[0, --, 2], + [--, 4, --], + [6, --, 8]], + mask=[[False, True, False], + [ True, False, True], + [False, True, False]], + fill_value=999999) + + Extra fields can be present in the structured array but are discarded: + + >>> dt = [('_data', '>> rec2 = np.zeros((2, 2), dtype=dt) + >>> rec2 + array([[(0, False, 0.), (0, False, 0.)], + [(0, False, 0.), (0, False, 0.)]], + dtype=[('_data', '>> y = np.ma.fromflex(rec2) + >>> y + masked_array( + data=[[0, 0], + [0, 0]], + mask=[[False, False], + [False, False]], + fill_value=np.int64(999999), + dtype=int32) + + """ + return masked_array(fxarray['_data'], mask=fxarray['_mask']) + + +def _convert2ma(funcname: str, np_ret: str, np_ma_ret: str, + params: dict[str, str] | None = None): + """Convert function from numpy to numpy.ma.""" + func = getattr(np, funcname) + params = params or {} + + @functools.wraps(func, assigned=set(functools.WRAPPER_ASSIGNMENTS) - {"__module__"}) + def wrapper(*args, **kwargs): + common_params = kwargs.keys() & params.keys() + extras = params | {p: kwargs.pop(p) for p in common_params} + + result = func.__call__(*args, **kwargs).view(MaskedArray) + + if "fill_value" in common_params: + result.fill_value = extras["fill_value"] + if "hardmask" in common_params: + result._hardmask = bool(extras["hardmask"]) + + return result + + # workaround for a doctest bug in Python 3.11 that incorrectly assumes `__code__` + # exists on wrapped functions + del wrapper.__wrapped__ + + # `arange`, `empty`, `empty_like`, `frombuffer`, and `zeros` have no signature + try: + signature = inspect.signature(func) + except ValueError: + signature = inspect.Signature([ + inspect.Parameter('args', inspect.Parameter.VAR_POSITIONAL), + inspect.Parameter('kwargs', inspect.Parameter.VAR_KEYWORD), + ]) + + if params: + sig_params = list(signature.parameters.values()) + + # pop `**kwargs` if present + sig_kwargs = None + if sig_params[-1].kind is inspect.Parameter.VAR_KEYWORD: + sig_kwargs = sig_params.pop() + + # add new keyword-only parameters + for param_name, default in params.items(): + new_param = inspect.Parameter( + param_name, + inspect.Parameter.KEYWORD_ONLY, + default=default, + ) + sig_params.append(new_param) + + # re-append `**kwargs` if it was present + if sig_kwargs: + sig_params.append(sig_kwargs) + + signature = signature.replace(parameters=sig_params) + + wrapper.__signature__ = signature + + # __doc__ is None when using `python -OO ...` + if func.__doc__ is not None: + assert np_ret in func.__doc__, ( + f"Failed to replace `{np_ret}` with `{np_ma_ret}`. " + f"The documentation string for return type, {np_ret}, is not " + f"found in the docstring for `np.{func.__name__}`. " + f"Fix the docstring for `np.{func.__name__}` or " + "update the expected string for return type." + ) + wrapper.__doc__ = inspect.cleandoc(func.__doc__).replace(np_ret, np_ma_ret) + + return wrapper + + +arange = _convert2ma( + 'arange', + params={'fill_value': None, 'hardmask': False}, + np_ret='arange : ndarray', + np_ma_ret='arange : MaskedArray', +) +clip = _convert2ma( + 'clip', + params={'fill_value': None, 'hardmask': False}, + np_ret='clipped_array : ndarray', + np_ma_ret='clipped_array : MaskedArray', +) +empty = _convert2ma( + 'empty', + params={'fill_value': None, 'hardmask': False}, + np_ret='out : ndarray', + np_ma_ret='out : MaskedArray', +) +empty_like = _convert2ma( + 'empty_like', + np_ret='out : ndarray', + np_ma_ret='out : MaskedArray', +) +frombuffer = _convert2ma( + 'frombuffer', + np_ret='out : ndarray', + np_ma_ret='out: MaskedArray', +) +fromfunction = _convert2ma( + 'fromfunction', + np_ret='fromfunction : any', + np_ma_ret='fromfunction: MaskedArray', +) +identity = _convert2ma( + 'identity', + params={'fill_value': None, 'hardmask': False}, + np_ret='out : ndarray', + np_ma_ret='out : MaskedArray', +) +indices = _convert2ma( + 'indices', + params={'fill_value': None, 'hardmask': False}, + np_ret='grid : one ndarray or tuple of ndarrays', + np_ma_ret='grid : one MaskedArray or tuple of MaskedArrays', +) +ones = _convert2ma( + 'ones', + params={'fill_value': None, 'hardmask': False}, + np_ret='out : ndarray', + np_ma_ret='out : MaskedArray', +) +ones_like = _convert2ma( + 'ones_like', + np_ret='out : ndarray', + np_ma_ret='out : MaskedArray', +) +squeeze = _convert2ma( + 'squeeze', + params={'fill_value': None, 'hardmask': False}, + np_ret='squeezed : ndarray', + np_ma_ret='squeezed : MaskedArray', +) +zeros = _convert2ma( + 'zeros', + params={'fill_value': None, 'hardmask': False}, + np_ret='out : ndarray', + np_ma_ret='out : MaskedArray', +) +zeros_like = _convert2ma( + 'zeros_like', + np_ret='out : ndarray', + np_ma_ret='out : MaskedArray', +) + + +def append(a, b, axis=None): + """Append values to the end of an array. + + Parameters + ---------- + a : array_like + Values are appended to a copy of this array. + b : array_like + These values are appended to a copy of `a`. It must be of the + correct shape (the same shape as `a`, excluding `axis`). If `axis` + is not specified, `b` can be any shape and will be flattened + before use. + axis : int, optional + The axis along which `v` are appended. If `axis` is not given, + both `a` and `b` are flattened before use. + + Returns + ------- + append : MaskedArray + A copy of `a` with `b` appended to `axis`. Note that `append` + does not occur in-place: a new array is allocated and filled. If + `axis` is None, the result is a flattened array. + + See Also + -------- + numpy.append : Equivalent function in the top-level NumPy module. + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> a = ma.masked_values([1, 2, 3], 2) + >>> b = ma.masked_values([[4, 5, 6], [7, 8, 9]], 7) + >>> ma.append(a, b) + masked_array(data=[1, --, 3, 4, 5, 6, --, 8, 9], + mask=[False, True, False, False, False, False, True, False, + False], + fill_value=999999) + """ + return concatenate([a, b], axis) diff --git a/local_packages/numpy/ma/core.pyi b/local_packages/numpy/ma/core.pyi new file mode 100644 index 0000000..d4a1f32 --- /dev/null +++ b/local_packages/numpy/ma/core.pyi @@ -0,0 +1,3733 @@ +# pyright: reportIncompatibleMethodOverride=false + +import datetime as dt +import types +from _typeshed import Incomplete +from collections.abc import Callable, Sequence +from typing import ( + Any, + Concatenate, + Final, + Generic, + Literal, + Never, + NoReturn, + Self, + SupportsComplex, + SupportsFloat, + SupportsIndex, + SupportsInt, + TypeAlias, + Unpack, + final, + overload, +) +from typing_extensions import Buffer, ParamSpec, TypeIs, TypeVar, override + +import numpy as np +from numpy import ( + _AnyShapeT, + _HasDType, + _HasDTypeWithRealAndImag, + _ModeKind, + _OrderACF, + _OrderCF, + _OrderKACF, + _PartitionKind, + _SortKind, + _ToIndices, + amax, + amin, + bool_, + bytes_, + character, + complex128, + complexfloating, + datetime64, + dtype, + dtypes, + expand_dims, + flexible, + float16, + float32, + float64, + floating, + generic, + inexact, + int8, + int64, + int_, + integer, + intp, + ndarray, + number, + object_, + signedinteger, + str_, + timedelta64, + ufunc, + unsignedinteger, + void, +) +from numpy._core.fromnumeric import _UFuncKwargs # type-check only +from numpy._globals import _NoValueType +from numpy._typing import ( + ArrayLike, + DTypeLike, + NDArray, + _32Bit, + _64Bit, + _AnyShape, + _ArrayLike, + _ArrayLikeBool_co, + _ArrayLikeBytes_co, + _ArrayLikeComplex128_co, + _ArrayLikeComplex_co, + _ArrayLikeDT64_co, + _ArrayLikeFloat64_co, + _ArrayLikeFloat_co, + _ArrayLikeInt, + _ArrayLikeInt_co, + _ArrayLikeNumber_co, + _ArrayLikeObject_co, + _ArrayLikeStr_co, + _ArrayLikeString_co, + _ArrayLikeTD64_co, + _ArrayLikeUInt_co, + _CharLike_co, + _DT64Codes, + _DTypeLike, + _DTypeLikeBool, + _DTypeLikeVoid, + _FloatLike_co, + _IntLike_co, + _NestedSequence, + _ScalarLike_co, + _Shape, + _ShapeLike, + _SupportsArrayFunc, + _SupportsDType, + _TD64Like_co, +) +from numpy._typing._dtype_like import _VoidDTypeLike + +__all__ = [ + "MAError", + "MaskError", + "MaskType", + "MaskedArray", + "abs", + "absolute", + "add", + "all", + "allclose", + "allequal", + "alltrue", + "amax", + "amin", + "angle", + "anom", + "anomalies", + "any", + "append", + "arange", + "arccos", + "arccosh", + "arcsin", + "arcsinh", + "arctan", + "arctan2", + "arctanh", + "argmax", + "argmin", + "argsort", + "around", + "array", + "asanyarray", + "asarray", + "bitwise_and", + "bitwise_or", + "bitwise_xor", + "bool_", + "ceil", + "choose", + "clip", + "common_fill_value", + "compress", + "compressed", + "concatenate", + "conjugate", + "convolve", + "copy", + "correlate", + "cos", + "cosh", + "count", + "cumprod", + "cumsum", + "default_fill_value", + "diag", + "diagonal", + "diff", + "divide", + "empty", + "empty_like", + "equal", + "exp", + "expand_dims", + "fabs", + "filled", + "fix_invalid", + "flatten_mask", + "flatten_structured_array", + "floor", + "floor_divide", + "fmod", + "frombuffer", + "fromflex", + "fromfunction", + "getdata", + "getmask", + "getmaskarray", + "greater", + "greater_equal", + "harden_mask", + "hypot", + "identity", + "ids", + "indices", + "inner", + "innerproduct", + "isMA", + "isMaskedArray", + "is_mask", + "is_masked", + "isarray", + "left_shift", + "less", + "less_equal", + "log", + "log2", + "log10", + "logical_and", + "logical_not", + "logical_or", + "logical_xor", + "make_mask", + "make_mask_descr", + "make_mask_none", + "mask_or", + "masked", + "masked_array", + "masked_equal", + "masked_greater", + "masked_greater_equal", + "masked_inside", + "masked_invalid", + "masked_less", + "masked_less_equal", + "masked_not_equal", + "masked_object", + "masked_outside", + "masked_print_option", + "masked_singleton", + "masked_values", + "masked_where", + "max", + "maximum", + "maximum_fill_value", + "mean", + "min", + "minimum", + "minimum_fill_value", + "mod", + "multiply", + "mvoid", + "ndim", + "negative", + "nomask", + "nonzero", + "not_equal", + "ones", + "ones_like", + "outer", + "outerproduct", + "power", + "prod", + "product", + "ptp", + "put", + "putmask", + "ravel", + "remainder", + "repeat", + "reshape", + "resize", + "right_shift", + "round", + "round_", + "set_fill_value", + "shape", + "sin", + "sinh", + "size", + "soften_mask", + "sometrue", + "sort", + "sqrt", + "squeeze", + "std", + "subtract", + "sum", + "swapaxes", + "take", + "tan", + "tanh", + "trace", + "transpose", + "true_divide", + "var", + "where", + "zeros", + "zeros_like", +] + +_ShapeT = TypeVar("_ShapeT", bound=_Shape) +_ShapeOrAnyT = TypeVar("_ShapeOrAnyT", bound=_Shape, default=_AnyShape) +_ShapeT_co = TypeVar("_ShapeT_co", bound=_Shape, default=_AnyShape, covariant=True) +_DTypeT = TypeVar("_DTypeT", bound=dtype) +_DTypeT_co = TypeVar("_DTypeT_co", bound=dtype, default=dtype, covariant=True) +_ArrayT = TypeVar("_ArrayT", bound=ndarray[Any, Any]) +_MArrayT = TypeVar("_MArrayT", bound=MaskedArray[Any, Any]) +_ScalarT = TypeVar("_ScalarT", bound=generic) +_ScalarT_co = TypeVar("_ScalarT_co", bound=generic, covariant=True) +_NumberT = TypeVar("_NumberT", bound=number) +_RealNumberT = TypeVar("_RealNumberT", bound=floating | integer) +_ArangeScalarT = TypeVar("_ArangeScalarT", bound=_ArangeScalar) +_UFuncT_co = TypeVar( + "_UFuncT_co", + # the `| Callable` simplifies self-binding to the ufunc's callable signature + bound=np.ufunc | Callable[..., object], + default=np.ufunc, + covariant=True, +) +_Pss = ParamSpec("_Pss") +_T = TypeVar("_T") + +_Ignored: TypeAlias = object + +# A subset of `MaskedArray` that can be parametrized w.r.t. `np.generic` +_MaskedArray: TypeAlias = MaskedArray[_AnyShape, dtype[_ScalarT]] +_Masked1D: TypeAlias = MaskedArray[tuple[int], dtype[_ScalarT]] + +_MaskedArrayUInt_co: TypeAlias = _MaskedArray[unsignedinteger | np.bool] +_MaskedArrayInt_co: TypeAlias = _MaskedArray[integer | np.bool] +_MaskedArrayFloat64_co: TypeAlias = _MaskedArray[floating[_64Bit] | float32 | float16 | integer | np.bool] +_MaskedArrayFloat_co: TypeAlias = _MaskedArray[floating | integer | np.bool] +_MaskedArrayComplex128_co: TypeAlias = _MaskedArray[number[_64Bit] | number[_32Bit] | float16 | integer | np.bool] +_MaskedArrayComplex_co: TypeAlias = _MaskedArray[inexact | integer | np.bool] +_MaskedArrayNumber_co: TypeAlias = _MaskedArray[number | np.bool] +_MaskedArrayTD64_co: TypeAlias = _MaskedArray[timedelta64 | integer | np.bool] + +_ArrayInt_co: TypeAlias = NDArray[integer | bool_] +_Array1D: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] + +_ConvertibleToInt: TypeAlias = SupportsInt | SupportsIndex | _CharLike_co +_ConvertibleToFloat: TypeAlias = SupportsFloat | SupportsIndex | _CharLike_co +_ConvertibleToComplex: TypeAlias = SupportsComplex | SupportsFloat | SupportsIndex | _CharLike_co +_ConvertibleToTD64: TypeAlias = dt.timedelta | int | _CharLike_co | character | number | timedelta64 | np.bool | None +_ConvertibleToDT64: TypeAlias = dt.date | int | _CharLike_co | character | number | datetime64 | np.bool | None +_ArangeScalar: TypeAlias = floating | integer | datetime64 | timedelta64 + +_NoMaskType: TypeAlias = np.bool_[Literal[False]] # type of `np.False_` +_MaskArray: TypeAlias = np.ndarray[_ShapeOrAnyT, np.dtype[np.bool_]] + +_FillValue: TypeAlias = complex | None # int | float | complex | None +_FillValueCallable: TypeAlias = Callable[[np.dtype | ArrayLike], _FillValue] +_DomainCallable: TypeAlias = Callable[..., NDArray[np.bool_]] + +### + +MaskType = np.bool_ + +nomask: Final[_NoMaskType] = ... + +class MaskedArrayFutureWarning(FutureWarning): ... +class MAError(Exception): ... +class MaskError(MAError): ... + +# not generic at runtime +class _MaskedUFunc(Generic[_UFuncT_co]): + f: _UFuncT_co # readonly + def __init__(self, /, ufunc: _UFuncT_co) -> None: ... + +# not generic at runtime +class _MaskedUnaryOperation(_MaskedUFunc[_UFuncT_co], Generic[_UFuncT_co]): + fill: Final[_FillValue] + domain: Final[_DomainCallable | None] + + def __init__(self, /, mufunc: _UFuncT_co, fill: _FillValue = 0, domain: _DomainCallable | None = None) -> None: ... + + # NOTE: This might not work with overloaded callable signatures might not work on + # pyright, which is a long-standing issue, and is unique to pyright: + # https://github.com/microsoft/pyright/issues/9663 + # https://github.com/microsoft/pyright/issues/10849 + # https://github.com/microsoft/pyright/issues/10899 + # https://github.com/microsoft/pyright/issues/11049 + def __call__( + self: _MaskedUnaryOperation[Callable[Concatenate[Any, _Pss], _T]], + /, + a: ArrayLike, + *args: _Pss.args, + **kwargs: _Pss.kwargs, + ) -> _T: ... + +# not generic at runtime +class _MaskedBinaryOperation(_MaskedUFunc[_UFuncT_co], Generic[_UFuncT_co]): + fillx: Final[_FillValue] + filly: Final[_FillValue] + + def __init__(self, /, mbfunc: _UFuncT_co, fillx: _FillValue = 0, filly: _FillValue = 0) -> None: ... + + # NOTE: See the comment in `_MaskedUnaryOperation.__call__` + def __call__( + self: _MaskedBinaryOperation[Callable[Concatenate[Any, Any, _Pss], _T]], + /, + a: ArrayLike, + b: ArrayLike, + *args: _Pss.args, + **kwargs: _Pss.kwargs, + ) -> _T: ... + + # NOTE: We cannot meaningfully annotate the return (d)types of these methods until + # the signatures of the corresponding `numpy.ufunc` methods are specified. + def reduce(self, /, target: ArrayLike, axis: SupportsIndex = 0, dtype: DTypeLike | None = None) -> Incomplete: ... + def outer(self, /, a: ArrayLike, b: ArrayLike) -> _MaskedArray[Incomplete]: ... + def accumulate(self, /, target: ArrayLike, axis: SupportsIndex = 0) -> _MaskedArray[Incomplete]: ... + +# not generic at runtime +class _DomainedBinaryOperation(_MaskedUFunc[_UFuncT_co], Generic[_UFuncT_co]): + domain: Final[_DomainCallable] + fillx: Final[_FillValue] + filly: Final[_FillValue] + + def __init__( + self, + /, + dbfunc: _UFuncT_co, + domain: _DomainCallable, + fillx: _FillValue = 0, + filly: _FillValue = 0, + ) -> None: ... + + # NOTE: See the comment in `_MaskedUnaryOperation.__call__` + def __call__( + self: _DomainedBinaryOperation[Callable[Concatenate[Any, Any, _Pss], _T]], + /, + a: ArrayLike, + b: ArrayLike, + *args: _Pss.args, + **kwargs: _Pss.kwargs, + ) -> _T: ... + +# not generic at runtime +class _extrema_operation(_MaskedUFunc[_UFuncT_co], Generic[_UFuncT_co]): + compare: Final[_MaskedBinaryOperation] + fill_value_func: Final[_FillValueCallable] + + def __init__( + self, + /, + ufunc: _UFuncT_co, + compare: _MaskedBinaryOperation, + fill_value: _FillValueCallable, + ) -> None: ... + + # NOTE: This class is only used internally for `maximum` and `minimum`, so we are + # able to annotate the `__call__` method specifically for those two functions. + @overload + def __call__(self, /, a: _ArrayLike[_ScalarT], b: _ArrayLike[_ScalarT]) -> _MaskedArray[_ScalarT]: ... + @overload + def __call__(self, /, a: ArrayLike, b: ArrayLike) -> _MaskedArray[Incomplete]: ... + + # NOTE: We cannot meaningfully annotate the return (d)types of these methods until + # the signatures of the corresponding `numpy.ufunc` methods are specified. + def reduce(self, /, target: ArrayLike, axis: SupportsIndex | _NoValueType = ...) -> Incomplete: ... + def outer(self, /, a: ArrayLike, b: ArrayLike) -> _MaskedArray[Incomplete]: ... + +@final +class _MaskedPrintOption: + _display: str + _enabled: bool | Literal[0, 1] + def __init__(self, /, display: str) -> None: ... + def display(self, /) -> str: ... + def set_display(self, /, s: str) -> None: ... + def enabled(self, /) -> bool: ... + def enable(self, /, shrink: bool | Literal[0, 1] = 1) -> None: ... + +masked_print_option: Final[_MaskedPrintOption] = ... + +exp: _MaskedUnaryOperation = ... +conjugate: _MaskedUnaryOperation = ... +sin: _MaskedUnaryOperation = ... +cos: _MaskedUnaryOperation = ... +arctan: _MaskedUnaryOperation = ... +arcsinh: _MaskedUnaryOperation = ... +sinh: _MaskedUnaryOperation = ... +cosh: _MaskedUnaryOperation = ... +tanh: _MaskedUnaryOperation = ... +abs: _MaskedUnaryOperation = ... +absolute: _MaskedUnaryOperation = ... +angle: _MaskedUnaryOperation = ... +fabs: _MaskedUnaryOperation = ... +negative: _MaskedUnaryOperation = ... +floor: _MaskedUnaryOperation = ... +ceil: _MaskedUnaryOperation = ... +around: _MaskedUnaryOperation = ... +logical_not: _MaskedUnaryOperation = ... +sqrt: _MaskedUnaryOperation = ... +log: _MaskedUnaryOperation = ... +log2: _MaskedUnaryOperation = ... +log10: _MaskedUnaryOperation = ... +tan: _MaskedUnaryOperation = ... +arcsin: _MaskedUnaryOperation = ... +arccos: _MaskedUnaryOperation = ... +arccosh: _MaskedUnaryOperation = ... +arctanh: _MaskedUnaryOperation = ... + +add: _MaskedBinaryOperation = ... +subtract: _MaskedBinaryOperation = ... +multiply: _MaskedBinaryOperation = ... +arctan2: _MaskedBinaryOperation = ... +equal: _MaskedBinaryOperation = ... +not_equal: _MaskedBinaryOperation = ... +less_equal: _MaskedBinaryOperation = ... +greater_equal: _MaskedBinaryOperation = ... +less: _MaskedBinaryOperation = ... +greater: _MaskedBinaryOperation = ... +logical_and: _MaskedBinaryOperation = ... +def alltrue(target: ArrayLike, axis: SupportsIndex | None = 0, dtype: _DTypeLikeBool | None = None) -> Incomplete: ... +logical_or: _MaskedBinaryOperation = ... +def sometrue(target: ArrayLike, axis: SupportsIndex | None = 0, dtype: _DTypeLikeBool | None = None) -> Incomplete: ... +logical_xor: _MaskedBinaryOperation = ... +bitwise_and: _MaskedBinaryOperation = ... +bitwise_or: _MaskedBinaryOperation = ... +bitwise_xor: _MaskedBinaryOperation = ... +hypot: _MaskedBinaryOperation = ... + +divide: _DomainedBinaryOperation = ... +true_divide: _DomainedBinaryOperation = ... +floor_divide: _DomainedBinaryOperation = ... +remainder: _DomainedBinaryOperation = ... +fmod: _DomainedBinaryOperation = ... +mod: _DomainedBinaryOperation = ... + +# `obj` can be anything (even `object()`), and is too "flexible", so we can't +# meaningfully annotate it, or its return type. +def default_fill_value(obj: object) -> Any: ... +def minimum_fill_value(obj: object) -> Any: ... +def maximum_fill_value(obj: object) -> Any: ... + +# +@overload # returns `a.fill_value` if `a` is a `MaskedArray` +def get_fill_value(a: _MaskedArray[_ScalarT]) -> _ScalarT: ... +@overload # otherwise returns `default_fill_value(a)` +def get_fill_value(a: object) -> Any: ... + +# this is a noop if `a` isn't a `MaskedArray`, so we only accept `MaskedArray` input +def set_fill_value(a: MaskedArray, fill_value: _ScalarLike_co) -> None: ... + +# the return type depends on the *values* of `a` and `b` (which cannot be known +# statically), which is why we need to return an awkward `_ | None` +@overload +def common_fill_value(a: _MaskedArray[_ScalarT], b: MaskedArray) -> _ScalarT | None: ... +@overload +def common_fill_value(a: object, b: object) -> Any: ... + +# keep in sync with `fix_invalid`, but return `ndarray` instead of `MaskedArray` +@overload +def filled(a: ndarray[_ShapeT, _DTypeT], fill_value: _ScalarLike_co | None = None) -> ndarray[_ShapeT, _DTypeT]: ... +@overload +def filled(a: _ArrayLike[_ScalarT], fill_value: _ScalarLike_co | None = None) -> NDArray[_ScalarT]: ... +@overload +def filled(a: ArrayLike, fill_value: _ScalarLike_co | None = None) -> NDArray[Incomplete]: ... + +# keep in sync with `filled`, but return `MaskedArray` instead of `ndarray` +@overload +def fix_invalid( + a: np.ndarray[_ShapeT, _DTypeT], + mask: _ArrayLikeBool_co = nomask, + copy: bool = True, + fill_value: _ScalarLike_co | None = None, +) -> MaskedArray[_ShapeT, _DTypeT]: ... +@overload +def fix_invalid( + a: _ArrayLike[_ScalarT], + mask: _ArrayLikeBool_co = nomask, + copy: bool = True, + fill_value: _ScalarLike_co | None = None, +) -> _MaskedArray[_ScalarT]: ... +@overload +def fix_invalid( + a: ArrayLike, + mask: _ArrayLikeBool_co = nomask, + copy: bool = True, + fill_value: _ScalarLike_co | None = None, +) -> _MaskedArray[Incomplete]: ... + +# +def get_masked_subclass(*arrays: object) -> type[MaskedArray]: ... + +# +@overload +def getdata(a: np.ndarray[_ShapeT, _DTypeT], subok: bool = True) -> np.ndarray[_ShapeT, _DTypeT]: ... +@overload +def getdata(a: _ArrayLike[_ScalarT], subok: bool = True) -> NDArray[_ScalarT]: ... +@overload +def getdata(a: ArrayLike, subok: bool = True) -> NDArray[Incomplete]: ... + +get_data = getdata + +# +@overload +def getmask(a: _ScalarLike_co) -> _NoMaskType: ... +@overload +def getmask(a: MaskedArray[_ShapeT, Any]) -> _MaskArray[_ShapeT] | _NoMaskType: ... +@overload +def getmask(a: ArrayLike) -> _MaskArray | _NoMaskType: ... + +get_mask = getmask + +# like `getmask`, but instead of `nomask` returns `make_mask_none(arr, arr.dtype?)` +@overload +def getmaskarray(arr: _ScalarLike_co) -> _MaskArray[tuple[()]]: ... +@overload +def getmaskarray(arr: np.ndarray[_ShapeT, Any]) -> _MaskArray[_ShapeT]: ... + +# It's sufficient for `m` to have dtype with type: `type[np.bool_]`, +# which isn't necessarily a ndarray. Please open an issue if this causes issues. +def is_mask(m: object) -> TypeIs[NDArray[bool_]]: ... + +# +@overload +def make_mask_descr(ndtype: _VoidDTypeLike) -> np.dtype[np.void]: ... +@overload +def make_mask_descr(ndtype: _DTypeLike[np.generic] | str | type) -> np.dtype[np.bool_]: ... + +# +@overload # m is nomask +def make_mask( + m: _NoMaskType, + copy: bool = False, + shrink: bool = True, + dtype: _DTypeLikeBool = ..., +) -> _NoMaskType: ... +@overload # m: ndarray, shrink=True (default), dtype: bool-like (default) +def make_mask( + m: np.ndarray[_ShapeT], + copy: bool = False, + shrink: Literal[True] = True, + dtype: _DTypeLikeBool = ..., +) -> _MaskArray[_ShapeT] | _NoMaskType: ... +@overload # m: ndarray, shrink=False (kwarg), dtype: bool-like (default) +def make_mask( + m: np.ndarray[_ShapeT], + copy: bool = False, + *, + shrink: Literal[False], + dtype: _DTypeLikeBool = ..., +) -> _MaskArray[_ShapeT]: ... +@overload # m: ndarray, dtype: void-like +def make_mask( + m: np.ndarray[_ShapeT], + copy: bool = False, + shrink: bool = True, + *, + dtype: _DTypeLikeVoid, +) -> np.ndarray[_ShapeT, np.dtype[np.void]]: ... +@overload # m: array-like, shrink=True (default), dtype: bool-like (default) +def make_mask( + m: ArrayLike, + copy: bool = False, + shrink: Literal[True] = True, + dtype: _DTypeLikeBool = ..., +) -> _MaskArray | _NoMaskType: ... +@overload # m: array-like, shrink=False (kwarg), dtype: bool-like (default) +def make_mask( + m: ArrayLike, + copy: bool = False, + *, + shrink: Literal[False], + dtype: _DTypeLikeBool = ..., +) -> _MaskArray: ... +@overload # m: array-like, dtype: void-like +def make_mask( + m: ArrayLike, + copy: bool = False, + shrink: bool = True, + *, + dtype: _DTypeLikeVoid, +) -> NDArray[np.void]: ... +@overload # fallback +def make_mask( + m: ArrayLike, + copy: bool = False, + shrink: bool = True, + *, + dtype: DTypeLike = ..., +) -> NDArray[Incomplete] | _NoMaskType: ... + +# +@overload # known shape, dtype: unstructured (default) +def make_mask_none(newshape: _ShapeT, dtype: np.dtype | type | str | None = None) -> _MaskArray[_ShapeT]: ... +@overload # known shape, dtype: structured +def make_mask_none(newshape: _ShapeT, dtype: _VoidDTypeLike) -> np.ndarray[_ShapeT, dtype[np.void]]: ... +@overload # unknown shape, dtype: unstructured (default) +def make_mask_none(newshape: _ShapeLike, dtype: np.dtype | type | str | None = None) -> _MaskArray: ... +@overload # unknown shape, dtype: structured +def make_mask_none(newshape: _ShapeLike, dtype: _VoidDTypeLike) -> NDArray[np.void]: ... + +# +@overload # nomask, scalar-like, shrink=True (default) +def mask_or( + m1: _NoMaskType | Literal[False], + m2: _ScalarLike_co, + copy: bool = False, + shrink: Literal[True] = True, +) -> _NoMaskType: ... +@overload # nomask, scalar-like, shrink=False (kwarg) +def mask_or( + m1: _NoMaskType | Literal[False], + m2: _ScalarLike_co, + copy: bool = False, + *, + shrink: Literal[False], +) -> _MaskArray[tuple[()]]: ... +@overload # scalar-like, nomask, shrink=True (default) +def mask_or( + m1: _ScalarLike_co, + m2: _NoMaskType | Literal[False], + copy: bool = False, + shrink: Literal[True] = True, +) -> _NoMaskType: ... +@overload # scalar-like, nomask, shrink=False (kwarg) +def mask_or( + m1: _ScalarLike_co, + m2: _NoMaskType | Literal[False], + copy: bool = False, + *, + shrink: Literal[False], +) -> _MaskArray[tuple[()]]: ... +@overload # ndarray, ndarray | nomask, shrink=True (default) +def mask_or( + m1: np.ndarray[_ShapeT, np.dtype[_ScalarT]], + m2: np.ndarray[_ShapeT, np.dtype[_ScalarT]] | _NoMaskType | Literal[False], + copy: bool = False, + shrink: Literal[True] = True, +) -> _MaskArray[_ShapeT] | _NoMaskType: ... +@overload # ndarray, ndarray | nomask, shrink=False (kwarg) +def mask_or( + m1: np.ndarray[_ShapeT, np.dtype[_ScalarT]], + m2: np.ndarray[_ShapeT, np.dtype[_ScalarT]] | _NoMaskType | Literal[False], + copy: bool = False, + *, + shrink: Literal[False], +) -> _MaskArray[_ShapeT]: ... +@overload # ndarray | nomask, ndarray, shrink=True (default) +def mask_or( + m1: np.ndarray[_ShapeT, np.dtype[_ScalarT]] | _NoMaskType | Literal[False], + m2: np.ndarray[_ShapeT, np.dtype[_ScalarT]], + copy: bool = False, + shrink: Literal[True] = True, +) -> _MaskArray[_ShapeT] | _NoMaskType: ... +@overload # ndarray | nomask, ndarray, shrink=False (kwarg) +def mask_or( + m1: np.ndarray[_ShapeT, np.dtype[_ScalarT]] | _NoMaskType | Literal[False], + m2: np.ndarray[_ShapeT, np.dtype[_ScalarT]], + copy: bool = False, + *, + shrink: Literal[False], +) -> _MaskArray[_ShapeT]: ... + +# +@overload +def flatten_mask(mask: np.ndarray[_ShapeT]) -> _MaskArray[_ShapeT]: ... +@overload +def flatten_mask(mask: ArrayLike) -> _MaskArray: ... + +# NOTE: we currently don't know the field types of `void` dtypes, so it's not possible +# to know the output dtype of the returned array. +@overload +def flatten_structured_array(a: MaskedArray[_ShapeT, np.dtype[np.void]]) -> MaskedArray[_ShapeT]: ... +@overload +def flatten_structured_array(a: np.ndarray[_ShapeT, np.dtype[np.void]]) -> np.ndarray[_ShapeT]: ... +@overload # for some reason this accepts unstructured array-likes, hence this fallback overload +def flatten_structured_array(a: ArrayLike) -> np.ndarray: ... + +# keep in sync with other the `masked_*` functions +@overload # known array with known shape and dtype +def masked_invalid(a: ndarray[_ShapeT, _DTypeT], copy: bool = True) -> MaskedArray[_ShapeT, _DTypeT]: ... +@overload # array-like of known scalar-type +def masked_invalid(a: _ArrayLike[_ScalarT], copy: bool = True) -> _MaskedArray[_ScalarT]: ... +@overload # unknown array-like +def masked_invalid(a: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... + +# keep in sync with other the `masked_*` functions +@overload # array-like of known scalar-type +def masked_where( + condition: _ArrayLikeBool_co, a: ndarray[_ShapeT, _DTypeT], copy: bool = True +) -> MaskedArray[_ShapeT, _DTypeT]: ... +@overload # array-like of known scalar-type +def masked_where(condition: _ArrayLikeBool_co, a: _ArrayLike[_ScalarT], copy: bool = True) -> _MaskedArray[_ScalarT]: ... +@overload # unknown array-like +def masked_where(condition: _ArrayLikeBool_co, a: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... + +# keep in sync with other the `masked_*` functions +@overload # known array with known shape and dtype +def masked_greater(x: ndarray[_ShapeT, _DTypeT], value: ArrayLike, copy: bool = True) -> MaskedArray[_ShapeT, _DTypeT]: ... +@overload # array-like of known scalar-type +def masked_greater(x: _ArrayLike[_ScalarT], value: ArrayLike, copy: bool = True) -> _MaskedArray[_ScalarT]: ... +@overload # unknown array-like +def masked_greater(x: ArrayLike, value: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... + +# keep in sync with other the `masked_*` functions +@overload # known array with known shape and dtype +def masked_greater_equal(x: ndarray[_ShapeT, _DTypeT], value: ArrayLike, copy: bool = True) -> MaskedArray[_ShapeT, _DTypeT]: ... +@overload # array-like of known scalar-type +def masked_greater_equal(x: _ArrayLike[_ScalarT], value: ArrayLike, copy: bool = True) -> _MaskedArray[_ScalarT]: ... +@overload # unknown array-like +def masked_greater_equal(x: ArrayLike, value: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... + +# keep in sync with other the `masked_*` functions +@overload # known array with known shape and dtype +def masked_less(x: ndarray[_ShapeT, _DTypeT], value: ArrayLike, copy: bool = True) -> MaskedArray[_ShapeT, _DTypeT]: ... +@overload # array-like of known scalar-type +def masked_less(x: _ArrayLike[_ScalarT], value: ArrayLike, copy: bool = True) -> _MaskedArray[_ScalarT]: ... +@overload # unknown array-like +def masked_less(x: ArrayLike, value: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... + +# keep in sync with other the `masked_*` functions +@overload # known array with known shape and dtype +def masked_less_equal(x: ndarray[_ShapeT, _DTypeT], value: ArrayLike, copy: bool = True) -> MaskedArray[_ShapeT, _DTypeT]: ... +@overload # array-like of known scalar-type +def masked_less_equal(x: _ArrayLike[_ScalarT], value: ArrayLike, copy: bool = True) -> _MaskedArray[_ScalarT]: ... +@overload # unknown array-like +def masked_less_equal(x: ArrayLike, value: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... + +# keep in sync with other the `masked_*` functions +@overload # known array with known shape and dtype +def masked_not_equal(x: ndarray[_ShapeT, _DTypeT], value: ArrayLike, copy: bool = True) -> MaskedArray[_ShapeT, _DTypeT]: ... +@overload # array-like of known scalar-type +def masked_not_equal(x: _ArrayLike[_ScalarT], value: ArrayLike, copy: bool = True) -> _MaskedArray[_ScalarT]: ... +@overload # unknown array-like +def masked_not_equal(x: ArrayLike, value: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... + +# keep in sync with other the `masked_*` functions +@overload # known array with known shape and dtype +def masked_equal(x: ndarray[_ShapeT, _DTypeT], value: ArrayLike, copy: bool = True) -> MaskedArray[_ShapeT, _DTypeT]: ... +@overload # array-like of known scalar-type +def masked_equal(x: _ArrayLike[_ScalarT], value: ArrayLike, copy: bool = True) -> _MaskedArray[_ScalarT]: ... +@overload # unknown array-like +def masked_equal(x: ArrayLike, value: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... + +# keep in sync with other the `masked_*` functions +@overload # known array with known shape and dtype +def masked_inside(x: ndarray[_ShapeT, _DTypeT], v1: ArrayLike, v2: ArrayLike, copy: bool = True) -> MaskedArray[_ShapeT, _DTypeT]: ... +@overload # array-like of known scalar-type +def masked_inside(x: _ArrayLike[_ScalarT], v1: ArrayLike, v2: ArrayLike, copy: bool = True) -> _MaskedArray[_ScalarT]: ... +@overload # unknown array-like +def masked_inside(x: ArrayLike, v1: ArrayLike, v2: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... + +# keep in sync with other the `masked_*` functions +@overload # known array with known shape and dtype +def masked_outside(x: ndarray[_ShapeT, _DTypeT], v1: ArrayLike, v2: ArrayLike, copy: bool = True) -> MaskedArray[_ShapeT, _DTypeT]: ... +@overload # array-like of known scalar-type +def masked_outside(x: _ArrayLike[_ScalarT], v1: ArrayLike, v2: ArrayLike, copy: bool = True) -> _MaskedArray[_ScalarT]: ... +@overload # unknown array-like +def masked_outside(x: ArrayLike, v1: ArrayLike, v2: ArrayLike, copy: bool = True) -> _MaskedArray[Incomplete]: ... + +# only intended for object arrays, so we assume that's how it's always used in practice +@overload +def masked_object( + x: np.ndarray[_ShapeT, np.dtype[np.object_]], + value: object, + copy: bool = True, + shrink: bool = True, +) -> MaskedArray[_ShapeT, np.dtype[np.object_]]: ... +@overload +def masked_object( + x: _ArrayLikeObject_co, + value: object, + copy: bool = True, + shrink: bool = True, +) -> _MaskedArray[np.object_]: ... + +# keep roughly in sync with `filled` +@overload +def masked_values( + x: np.ndarray[_ShapeT, _DTypeT], + value: _ScalarLike_co, + rtol: float = 1e-5, + atol: float = 1e-8, + copy: bool = True, + shrink: bool = True +) -> MaskedArray[_ShapeT, _DTypeT]: ... +@overload +def masked_values( + x: _ArrayLike[_ScalarT], + value: _ScalarLike_co, + rtol: float = 1e-5, + atol: float = 1e-8, + copy: bool = True, + shrink: bool = True +) -> _MaskedArray[_ScalarT]: ... +@overload +def masked_values( + x: ArrayLike, + value: _ScalarLike_co, + rtol: float = 1e-5, + atol: float = 1e-8, + copy: bool = True, + shrink: bool = True +) -> _MaskedArray[Incomplete]: ... + +# TODO: Support non-boolean mask dtypes, such as `np.void`. This will require adding an +# additional generic type parameter to (at least) `MaskedArray` and `MaskedIterator` to +# hold the dtype of the mask. + +class MaskedIterator(Generic[_ShapeT_co, _DTypeT_co]): + ma: MaskedArray[_ShapeT_co, _DTypeT_co] # readonly + dataiter: np.flatiter[ndarray[_ShapeT_co, _DTypeT_co]] # readonly + maskiter: Final[np.flatiter[NDArray[np.bool]]] + + def __init__(self, ma: MaskedArray[_ShapeT_co, _DTypeT_co]) -> None: ... + def __iter__(self) -> Self: ... + + # Similar to `MaskedArray.__getitem__` but without the `void` case. + @overload + def __getitem__(self, indx: _ArrayInt_co | tuple[_ArrayInt_co, ...], /) -> MaskedArray[_AnyShape, _DTypeT_co]: ... + @overload + def __getitem__(self, indx: SupportsIndex | tuple[SupportsIndex, ...], /) -> Incomplete: ... + @overload + def __getitem__(self, indx: _ToIndices, /) -> MaskedArray[_AnyShape, _DTypeT_co]: ... + + # Similar to `ndarray.__setitem__` but without the `void` case. + @overload # flexible | object_ | bool + def __setitem__( + self: MaskedIterator[Any, dtype[flexible | object_ | np.bool] | dtypes.StringDType], + index: _ToIndices, + value: object, + /, + ) -> None: ... + @overload # integer + def __setitem__( + self: MaskedIterator[Any, dtype[integer]], + index: _ToIndices, + value: _ConvertibleToInt | _NestedSequence[_ConvertibleToInt] | _ArrayLikeInt_co, + /, + ) -> None: ... + @overload # floating + def __setitem__( + self: MaskedIterator[Any, dtype[floating]], + index: _ToIndices, + value: _ConvertibleToFloat | _NestedSequence[_ConvertibleToFloat | None] | _ArrayLikeFloat_co | None, + /, + ) -> None: ... + @overload # complexfloating + def __setitem__( + self: MaskedIterator[Any, dtype[complexfloating]], + index: _ToIndices, + value: _ConvertibleToComplex | _NestedSequence[_ConvertibleToComplex | None] | _ArrayLikeNumber_co | None, + /, + ) -> None: ... + @overload # timedelta64 + def __setitem__( + self: MaskedIterator[Any, dtype[timedelta64]], + index: _ToIndices, + value: _ConvertibleToTD64 | _NestedSequence[_ConvertibleToTD64], + /, + ) -> None: ... + @overload # datetime64 + def __setitem__( + self: MaskedIterator[Any, dtype[datetime64]], + index: _ToIndices, + value: _ConvertibleToDT64 | _NestedSequence[_ConvertibleToDT64], + /, + ) -> None: ... + @overload # catch-all + def __setitem__(self, index: _ToIndices, value: ArrayLike, /) -> None: ... + + # TODO: Returns `mvoid[(), _DTypeT_co]` for masks with `np.void` dtype. + def __next__(self: MaskedIterator[Any, np.dtype[_ScalarT]]) -> _ScalarT: ... + +class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): + __array_priority__: Final[Literal[15]] = 15 + + @overload + def __new__( + cls, + data: _ArrayLike[_ScalarT], + mask: _ArrayLikeBool_co = nomask, + dtype: None = None, + copy: bool = False, + subok: bool = True, + ndmin: int = 0, + fill_value: _ScalarLike_co | None = None, + keep_mask: bool = True, + hard_mask: bool | None = None, + shrink: bool = True, + order: _OrderKACF | None = None, + ) -> _MaskedArray[_ScalarT]: ... + @overload + def __new__( + cls, + data: object, + mask: _ArrayLikeBool_co, + dtype: _DTypeLike[_ScalarT], + copy: bool = False, + subok: bool = True, + ndmin: int = 0, + fill_value: _ScalarLike_co | None = None, + keep_mask: bool = True, + hard_mask: bool | None = None, + shrink: bool = True, + order: _OrderKACF | None = None, + ) -> _MaskedArray[_ScalarT]: ... + @overload + def __new__( + cls, + data: object, + mask: _ArrayLikeBool_co = nomask, + *, + dtype: _DTypeLike[_ScalarT], + copy: bool = False, + subok: bool = True, + ndmin: int = 0, + fill_value: _ScalarLike_co | None = None, + keep_mask: bool = True, + hard_mask: bool | None = None, + shrink: bool = True, + order: _OrderKACF | None = None, + ) -> _MaskedArray[_ScalarT]: ... + @overload + def __new__( + cls, + data: object = None, + mask: _ArrayLikeBool_co = nomask, + dtype: DTypeLike | None = None, + copy: bool = False, + subok: bool = True, + ndmin: int = 0, + fill_value: _ScalarLike_co | None = None, + keep_mask: bool = True, + hard_mask: bool | None = None, + shrink: bool = True, + order: _OrderKACF | None = None, + ) -> _MaskedArray[Any]: ... + + def __array_wrap__( + self, + obj: ndarray[_ShapeT, _DTypeT], + context: tuple[ufunc, tuple[Any, ...], int] | None = None, + return_scalar: bool = False, + ) -> MaskedArray[_ShapeT, _DTypeT]: ... + + @overload # type: ignore[override] # () + def view(self, /, dtype: None = None, type: None = None, fill_value: _ScalarLike_co | None = None) -> Self: ... + @overload # (dtype: DTypeT) + def view( + self, + /, + dtype: _DTypeT | _HasDType[_DTypeT], + type: None = None, + fill_value: _ScalarLike_co | None = None + ) -> MaskedArray[_ShapeT_co, _DTypeT]: ... + @overload # (dtype: dtype[ScalarT]) + def view( + self, + /, + dtype: _DTypeLike[_ScalarT], + type: None = None, + fill_value: _ScalarLike_co | None = None + ) -> MaskedArray[_ShapeT_co, dtype[_ScalarT]]: ... + @overload # ([dtype: _, ]*, type: ArrayT) + def view( + self, + /, + dtype: DTypeLike | None = None, + *, + type: type[_ArrayT], + fill_value: _ScalarLike_co | None = None + ) -> _ArrayT: ... + @overload # (dtype: _, type: ArrayT) + def view(self, /, dtype: DTypeLike | None, type: type[_ArrayT], fill_value: _ScalarLike_co | None = None) -> _ArrayT: ... + @overload # (dtype: ArrayT, /) + def view(self, /, dtype: type[_ArrayT], type: None = None, fill_value: _ScalarLike_co | None = None) -> _ArrayT: ... + @overload # (dtype: ?) + def view( + self, + /, + # `_VoidDTypeLike | str | None` is like `DTypeLike` but without `_DTypeLike[Any]` to avoid + # overlaps with previous overloads. + dtype: _VoidDTypeLike | str | None, + type: None = None, + fill_value: _ScalarLike_co | None = None + ) -> MaskedArray[_ShapeT_co, dtype]: ... + + # Keep in sync with `ndarray.__getitem__` + @overload + def __getitem__(self, key: _ArrayInt_co | tuple[_ArrayInt_co, ...], /) -> MaskedArray[_AnyShape, _DTypeT_co]: ... + @overload + def __getitem__(self, key: SupportsIndex | tuple[SupportsIndex, ...], /) -> Any: ... + @overload + def __getitem__(self, key: _ToIndices, /) -> MaskedArray[_AnyShape, _DTypeT_co]: ... + @overload + def __getitem__(self: _MaskedArray[void], indx: str, /) -> MaskedArray[_ShapeT_co, dtype]: ... + @overload + def __getitem__(self: _MaskedArray[void], indx: list[str], /) -> MaskedArray[_ShapeT_co, dtype[void]]: ... + + @property + def shape(self) -> _ShapeT_co: ... + @shape.setter # type: ignore[override] + def shape(self: MaskedArray[_ShapeT, Any], shape: _ShapeT, /) -> None: ... + + def __setmask__(self, mask: _ArrayLikeBool_co, copy: bool = False) -> None: ... + @property + def mask(self) -> np.ndarray[_ShapeT_co, dtype[MaskType]] | MaskType: ... + @mask.setter + def mask(self, value: _ArrayLikeBool_co, /) -> None: ... + @property + def recordmask(self) -> np.ndarray[_ShapeT_co, dtype[MaskType]] | MaskType: ... + @recordmask.setter + def recordmask(self, mask: Never, /) -> NoReturn: ... + def harden_mask(self) -> Self: ... + def soften_mask(self) -> Self: ... + @property + def hardmask(self) -> bool: ... + def unshare_mask(self) -> Self: ... + @property + def sharedmask(self) -> bool: ... + def shrink_mask(self) -> Self: ... + + @property + def baseclass(self) -> type[ndarray]: ... + + @property + def _data(self) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + @property + def data(self) -> ndarray[_ShapeT_co, _DTypeT_co]: ... # type: ignore[override] + + @property # type: ignore[override] + def flat(self) -> MaskedIterator[_ShapeT_co, _DTypeT_co]: ... + @flat.setter + def flat(self, value: ArrayLike, /) -> None: ... + + @property + def fill_value(self: _MaskedArray[_ScalarT]) -> _ScalarT: ... + @fill_value.setter + def fill_value(self, value: _ScalarLike_co | None = None, /) -> None: ... + + def get_fill_value(self: _MaskedArray[_ScalarT]) -> _ScalarT: ... + def set_fill_value(self, /, value: _ScalarLike_co | None = None) -> None: ... + + def filled(self, /, fill_value: _ScalarLike_co | None = None) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def compressed(self) -> ndarray[tuple[int], _DTypeT_co]: ... + + # keep roughly in sync with `ma.core.compress`, but swap the first two arguments + @overload # type: ignore[override] + def compress( + self, + condition: _ArrayLikeBool_co, + axis: _ShapeLike | None, + out: _ArrayT + ) -> _ArrayT: ... + @overload + def compress( + self, + condition: _ArrayLikeBool_co, + axis: _ShapeLike | None = None, + *, + out: _ArrayT + ) -> _ArrayT: ... + @overload + def compress( + self, + condition: _ArrayLikeBool_co, + axis: None = None, + out: None = None + ) -> MaskedArray[tuple[int], _DTypeT_co]: ... + @overload + def compress( + self, + condition: _ArrayLikeBool_co, + axis: _ShapeLike | None = None, + out: None = None + ) -> MaskedArray[_AnyShape, _DTypeT_co]: ... + + # TODO: How to deal with the non-commutative nature of `==` and `!=`? + # xref numpy/numpy#17368 + def __eq__(self, other: Incomplete, /) -> Incomplete: ... + def __ne__(self, other: Incomplete, /) -> Incomplete: ... + + def __ge__(self, other: ArrayLike, /) -> _MaskedArray[bool_]: ... # type: ignore[override] + def __gt__(self, other: ArrayLike, /) -> _MaskedArray[bool_]: ... # type: ignore[override] + def __le__(self, other: ArrayLike, /) -> _MaskedArray[bool_]: ... # type: ignore[override] + def __lt__(self, other: ArrayLike, /) -> _MaskedArray[bool_]: ... # type: ignore[override] + + # Keep in sync with `ndarray.__add__` + @overload # type: ignore[override] + def __add__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __add__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[np.bool]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... + @overload + def __add__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... + @overload + def __add__(self: _MaskedArray[complex128], other: _ArrayLikeComplex128_co, /) -> _MaskedArray[complex128]: ... + @overload + def __add__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... + @overload + def __add__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: _MaskedArrayTD64_co, other: _ArrayLikeTD64_co, /) -> _MaskedArray[timedelta64]: ... + @overload + def __add__(self: _MaskedArrayTD64_co, other: _ArrayLikeDT64_co, /) -> _MaskedArray[datetime64]: ... + @overload + def __add__(self: _MaskedArray[datetime64], other: _ArrayLikeTD64_co, /) -> _MaskedArray[datetime64]: ... + @overload + def __add__(self: _MaskedArray[bytes_], other: _ArrayLikeBytes_co, /) -> _MaskedArray[bytes_]: ... + @overload + def __add__(self: _MaskedArray[str_], other: _ArrayLikeStr_co, /) -> _MaskedArray[str_]: ... + @overload + def __add__( + self: MaskedArray[Any, dtypes.StringDType], + other: _ArrayLikeStr_co | _ArrayLikeString_co, + /, + ) -> MaskedArray[_AnyShape, dtypes.StringDType]: ... + @overload + def __add__(self: _MaskedArray[object_], other: Any, /) -> Any: ... + @overload + def __add__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `ndarray.__radd__` + @overload # type: ignore[override] # signature equivalent to __add__ + def __radd__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __radd__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[np.bool]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... + @overload + def __radd__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... + @overload + def __radd__(self: _MaskedArray[complex128], other: _ArrayLikeComplex128_co, /) -> _MaskedArray[complex128]: ... + @overload + def __radd__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... + @overload + def __radd__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: _MaskedArrayTD64_co, other: _ArrayLikeTD64_co, /) -> _MaskedArray[timedelta64]: ... + @overload + def __radd__(self: _MaskedArrayTD64_co, other: _ArrayLikeDT64_co, /) -> _MaskedArray[datetime64]: ... + @overload + def __radd__(self: _MaskedArray[datetime64], other: _ArrayLikeTD64_co, /) -> _MaskedArray[datetime64]: ... + @overload + def __radd__(self: _MaskedArray[bytes_], other: _ArrayLikeBytes_co, /) -> _MaskedArray[bytes_]: ... + @overload + def __radd__(self: _MaskedArray[str_], other: _ArrayLikeStr_co, /) -> _MaskedArray[str_]: ... + @overload + def __radd__( + self: MaskedArray[Any, dtypes.StringDType], + other: _ArrayLikeStr_co | _ArrayLikeString_co, + /, + ) -> MaskedArray[_AnyShape, dtypes.StringDType]: ... + @overload + def __radd__(self: _MaskedArray[object_], other: Any, /) -> Any: ... + @overload + def __radd__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `ndarray.__sub__` + @overload # type: ignore[override] + def __sub__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __sub__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __sub__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> NoReturn: ... + @overload + def __sub__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __sub__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... + @overload + def __sub__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... + @overload + def __sub__(self: _MaskedArray[complex128], other: _ArrayLikeComplex128_co, /) -> _MaskedArray[complex128]: ... + @overload + def __sub__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... + @overload + def __sub__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __sub__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __sub__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __sub__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... # type: ignore[overload-overlap] + @overload + def __sub__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... # type: ignore[overload-overlap] + @overload + def __sub__(self: _MaskedArrayTD64_co, other: _ArrayLikeTD64_co, /) -> _MaskedArray[timedelta64]: ... + @overload + def __sub__(self: _MaskedArray[datetime64], other: _ArrayLikeTD64_co, /) -> _MaskedArray[datetime64]: ... + @overload + def __sub__(self: _MaskedArray[datetime64], other: _ArrayLikeDT64_co, /) -> _MaskedArray[timedelta64]: ... + @overload + def __sub__(self: _MaskedArray[object_], other: Any, /) -> Any: ... + @overload + def __sub__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `ndarray.__rsub__` + @overload # type: ignore[override] + def __rsub__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __rsub__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __rsub__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> NoReturn: ... + @overload + def __rsub__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __rsub__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... + @overload + def __rsub__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... + @overload + def __rsub__(self: _MaskedArray[complex128], other: _ArrayLikeComplex128_co, /) -> _MaskedArray[complex128]: ... + @overload + def __rsub__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... + @overload + def __rsub__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rsub__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rsub__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __rsub__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... # type: ignore[overload-overlap] + @overload + def __rsub__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... # type: ignore[overload-overlap] + @overload + def __rsub__(self: _MaskedArrayTD64_co, other: _ArrayLikeTD64_co, /) -> _MaskedArray[timedelta64]: ... + @overload + def __rsub__(self: _MaskedArrayTD64_co, other: _ArrayLikeDT64_co, /) -> _MaskedArray[datetime64]: ... + @overload + def __rsub__(self: _MaskedArray[datetime64], other: _ArrayLikeDT64_co, /) -> _MaskedArray[timedelta64]: ... + @overload + def __rsub__(self: _MaskedArray[object_], other: Any, /) -> Any: ... + @overload + def __rsub__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `ndarray.__mul__` + @overload # type: ignore[override] + def __mul__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __mul__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __mul__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[np.bool]: ... # type: ignore[overload-overlap] + @overload + def __mul__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __mul__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... + @overload + def __mul__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... + @overload + def __mul__(self: _MaskedArray[complex128], other: _ArrayLikeComplex128_co, /) -> _MaskedArray[complex128]: ... + @overload + def __mul__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... + @overload + def __mul__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __mul__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __mul__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __mul__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... # type: ignore[overload-overlap] + @overload + def __mul__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... + @overload + def __mul__(self: _MaskedArray[timedelta64], other: _ArrayLikeFloat_co, /) -> _MaskedArray[timedelta64]: ... + @overload + def __mul__(self: _MaskedArrayFloat_co, other: _ArrayLike[timedelta64], /) -> _MaskedArray[timedelta64]: ... + @overload + def __mul__( + self: MaskedArray[Any, dtype[character] | dtypes.StringDType], + other: _ArrayLikeInt, + /, + ) -> MaskedArray[tuple[Any, ...], _DTypeT_co]: ... + @overload + def __mul__(self: _MaskedArray[object_], other: Any, /) -> Any: ... + @overload + def __mul__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `ndarray.__rmul__` + @overload # type: ignore[override] # signature equivalent to __mul__ + def __rmul__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __rmul__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __rmul__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[np.bool]: ... # type: ignore[overload-overlap] + @overload + def __rmul__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __rmul__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... + @overload + def __rmul__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... + @overload + def __rmul__(self: _MaskedArray[complex128], other: _ArrayLikeComplex128_co, /) -> _MaskedArray[complex128]: ... + @overload + def __rmul__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... + @overload + def __rmul__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rmul__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rmul__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __rmul__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... # type: ignore[overload-overlap] + @overload + def __rmul__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... + @overload + def __rmul__(self: _MaskedArray[timedelta64], other: _ArrayLikeFloat_co, /) -> _MaskedArray[timedelta64]: ... + @overload + def __rmul__(self: _MaskedArrayFloat_co, other: _ArrayLike[timedelta64], /) -> _MaskedArray[timedelta64]: ... + @overload + def __rmul__( + self: MaskedArray[Any, dtype[character] | dtypes.StringDType], + other: _ArrayLikeInt, + /, + ) -> MaskedArray[tuple[Any, ...], _DTypeT_co]: ... + @overload + def __rmul__(self: _MaskedArray[object_], other: Any, /) -> Any: ... + @overload + def __rmul__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `ndarray.__truediv__` + @overload # type: ignore[override] + def __truediv__(self: _MaskedArrayInt_co | _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... + @overload + def __truediv__(self: _MaskedArrayFloat64_co, other: _ArrayLikeInt_co | _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... + @overload + def __truediv__(self: _MaskedArray[complex128], other: _ArrayLikeComplex128_co, /) -> _MaskedArray[complex128]: ... + @overload + def __truediv__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... + @overload + def __truediv__(self: _MaskedArray[floating], other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... + @overload + def __truediv__(self: _MaskedArrayFloat_co, other: _ArrayLike[floating], /) -> _MaskedArray[floating]: ... + @overload + def __truediv__(self: _MaskedArray[complexfloating], other: _ArrayLikeNumber_co, /) -> _MaskedArray[complexfloating]: ... + @overload + def __truediv__(self: _MaskedArrayNumber_co, other: _ArrayLike[complexfloating], /) -> _MaskedArray[complexfloating]: ... + @overload + def __truediv__(self: _MaskedArray[inexact], other: _ArrayLikeNumber_co, /) -> _MaskedArray[inexact]: ... + @overload + def __truediv__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... + @overload + def __truediv__(self: _MaskedArray[timedelta64], other: _ArrayLike[timedelta64], /) -> _MaskedArray[float64]: ... + @overload + def __truediv__(self: _MaskedArray[timedelta64], other: _ArrayLikeBool_co, /) -> NoReturn: ... + @overload + def __truediv__(self: _MaskedArray[timedelta64], other: _ArrayLikeFloat_co, /) -> _MaskedArray[timedelta64]: ... + @overload + def __truediv__(self: _MaskedArray[object_], other: Any, /) -> Any: ... + @overload + def __truediv__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `ndarray.__rtruediv__` + @overload # type: ignore[override] + def __rtruediv__(self: _MaskedArrayInt_co | _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... + @overload + def __rtruediv__(self: _MaskedArrayFloat64_co, other: _ArrayLikeInt_co | _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... + @overload + def __rtruediv__(self: _MaskedArray[complex128], other: _ArrayLikeComplex128_co, /) -> _MaskedArray[complex128]: ... + @overload + def __rtruediv__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... + @overload + def __rtruediv__(self: _MaskedArray[floating], other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... + @overload + def __rtruediv__(self: _MaskedArrayFloat_co, other: _ArrayLike[floating], /) -> _MaskedArray[floating]: ... + @overload + def __rtruediv__(self: _MaskedArray[complexfloating], other: _ArrayLikeNumber_co, /) -> _MaskedArray[complexfloating]: ... + @overload + def __rtruediv__(self: _MaskedArrayNumber_co, other: _ArrayLike[complexfloating], /) -> _MaskedArray[complexfloating]: ... + @overload + def __rtruediv__(self: _MaskedArray[inexact], other: _ArrayLikeNumber_co, /) -> _MaskedArray[inexact]: ... + @overload + def __rtruediv__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... + @overload + def __rtruediv__(self: _MaskedArray[timedelta64], other: _ArrayLike[timedelta64], /) -> _MaskedArray[float64]: ... + @overload + def __rtruediv__(self: _MaskedArray[integer | floating], other: _ArrayLike[timedelta64], /) -> _MaskedArray[timedelta64]: ... + @overload + def __rtruediv__(self: _MaskedArray[object_], other: Any, /) -> Any: ... + @overload + def __rtruediv__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `ndarray.__floordiv__` + @overload # type: ignore[override] + def __floordiv__(self: _MaskedArray[_RealNumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_RealNumberT]]: ... + @overload + def __floordiv__(self: _MaskedArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_RealNumberT]: ... # type: ignore[overload-overlap] + @overload + def __floordiv__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[int8]: ... # type: ignore[overload-overlap] + @overload + def __floordiv__(self: _MaskedArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> _MaskedArray[_RealNumberT]: ... # type: ignore[overload-overlap] + @overload + def __floordiv__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... + @overload + def __floordiv__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... + @overload + def __floordiv__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __floordiv__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __floordiv__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... + @overload + def __floordiv__(self: _MaskedArray[timedelta64], other: _ArrayLike[timedelta64], /) -> _MaskedArray[int64]: ... + @overload + def __floordiv__(self: _MaskedArray[timedelta64], other: _ArrayLikeBool_co, /) -> NoReturn: ... + @overload + def __floordiv__(self: _MaskedArray[timedelta64], other: _ArrayLikeFloat_co, /) -> _MaskedArray[timedelta64]: ... + @overload + def __floordiv__(self: _MaskedArray[object_], other: Any, /) -> Any: ... + @overload + def __floordiv__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `ndarray.__rfloordiv__` + @overload # type: ignore[override] + def __rfloordiv__(self: _MaskedArray[_RealNumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_RealNumberT]]: ... + @overload + def __rfloordiv__(self: _MaskedArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_RealNumberT]: ... # type: ignore[overload-overlap] + @overload + def __rfloordiv__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[int8]: ... # type: ignore[overload-overlap] + @overload + def __rfloordiv__(self: _MaskedArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> _MaskedArray[_RealNumberT]: ... # type: ignore[overload-overlap] + @overload + def __rfloordiv__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... + @overload + def __rfloordiv__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... + @overload + def __rfloordiv__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rfloordiv__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rfloordiv__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __rfloordiv__(self: _MaskedArray[timedelta64], other: _ArrayLike[timedelta64], /) -> _MaskedArray[int64]: ... + @overload + def __rfloordiv__(self: _MaskedArray[floating | integer], other: _ArrayLike[timedelta64], /) -> _MaskedArray[timedelta64]: ... + @overload + def __rfloordiv__(self: _MaskedArray[object_], other: Any, /) -> Any: ... + @overload + def __rfloordiv__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `ndarray.__pow__` (minus the `mod` parameter) + @overload # type: ignore[override] + def __pow__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __pow__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __pow__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[int8]: ... # type: ignore[overload-overlap] + @overload + def __pow__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __pow__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... + @overload + def __pow__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... + @overload + def __pow__(self: _MaskedArray[complex128], other: _ArrayLikeComplex128_co, /) -> _MaskedArray[complex128]: ... + @overload + def __pow__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... + @overload + def __pow__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __pow__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __pow__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __pow__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... + @overload + def __pow__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... + @overload + def __pow__(self: _MaskedArray[object_], other: Any, /) -> Any: ... + @overload + def __pow__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `ndarray.__rpow__` (minus the `mod` parameter) + @overload # type: ignore[override] + def __rpow__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __rpow__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __rpow__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[int8]: ... # type: ignore[overload-overlap] + @overload + def __rpow__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __rpow__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... + @overload + def __rpow__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... + @overload + def __rpow__(self: _MaskedArray[complex128], other: _ArrayLikeComplex128_co, /) -> _MaskedArray[complex128]: ... + @overload + def __rpow__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... + @overload + def __rpow__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rpow__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rpow__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __rpow__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... + @overload + def __rpow__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... + @overload + def __rpow__(self: _MaskedArray[object_], other: Any, /) -> Any: ... + @overload + def __rpow__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # + @property # type: ignore[misc] + def imag(self: _HasDTypeWithRealAndImag[object, _ScalarT], /) -> MaskedArray[_ShapeT_co, dtype[_ScalarT]]: ... # type: ignore[override] + def get_imag(self: _HasDTypeWithRealAndImag[object, _ScalarT], /) -> MaskedArray[_ShapeT_co, dtype[_ScalarT]]: ... + + # + @property # type: ignore[misc] + def real(self: _HasDTypeWithRealAndImag[_ScalarT, object], /) -> MaskedArray[_ShapeT_co, dtype[_ScalarT]]: ... # type: ignore[override] + def get_real(self: _HasDTypeWithRealAndImag[_ScalarT, object], /) -> MaskedArray[_ShapeT_co, dtype[_ScalarT]]: ... + + # keep in sync with `np.ma.count` + @overload + def count(self, axis: None = None, keepdims: Literal[False] | _NoValueType = ...) -> int: ... + @overload + def count(self, axis: _ShapeLike, keepdims: bool | _NoValueType = ...) -> NDArray[int_]: ... + @overload + def count(self, axis: _ShapeLike | None = None, *, keepdims: Literal[True]) -> NDArray[int_]: ... + @overload + def count(self, axis: _ShapeLike | None, keepdims: Literal[True]) -> NDArray[int_]: ... + + # Keep in sync with `ndarray.reshape` + # NOTE: reshape also accepts negative integers, so we can't use integer literals + @overload # (None) + def reshape(self, shape: None, /, *, order: _OrderACF = "C", copy: bool | None = None) -> Self: ... + @overload # (empty_sequence) + def reshape( # type: ignore[overload-overlap] # mypy false positive + self, + shape: Sequence[Never], + /, + *, + order: _OrderACF = "C", + copy: bool | None = None, + ) -> MaskedArray[tuple[()], _DTypeT_co]: ... + @overload # (() | (int) | (int, int) | ....) # up to 8-d + def reshape( + self, + shape: _AnyShapeT, + /, + *, + order: _OrderACF = "C", + copy: bool | None = None, + ) -> MaskedArray[_AnyShapeT, _DTypeT_co]: ... + @overload # (index) + def reshape( + self, + size1: SupportsIndex, + /, + *, + order: _OrderACF = "C", + copy: bool | None = None, + ) -> MaskedArray[tuple[int], _DTypeT_co]: ... + @overload # (index, index) + def reshape( + self, + size1: SupportsIndex, + size2: SupportsIndex, + /, + *, + order: _OrderACF = "C", + copy: bool | None = None, + ) -> MaskedArray[tuple[int, int], _DTypeT_co]: ... + @overload # (index, index, index) + def reshape( + self, + size1: SupportsIndex, + size2: SupportsIndex, + size3: SupportsIndex, + /, + *, + order: _OrderACF = "C", + copy: bool | None = None, + ) -> MaskedArray[tuple[int, int, int], _DTypeT_co]: ... + @overload # (index, index, index, index) + def reshape( + self, + size1: SupportsIndex, + size2: SupportsIndex, + size3: SupportsIndex, + size4: SupportsIndex, + /, + *, + order: _OrderACF = "C", + copy: bool | None = None, + ) -> MaskedArray[tuple[int, int, int, int], _DTypeT_co]: ... + @overload # (int, *(index, ...)) + def reshape( + self, + size0: SupportsIndex, + /, + *shape: SupportsIndex, + order: _OrderACF = "C", + copy: bool | None = None, + ) -> MaskedArray[_AnyShape, _DTypeT_co]: ... + @overload # (sequence[index]) + def reshape( + self, + shape: Sequence[SupportsIndex], + /, + *, + order: _OrderACF = "C", + copy: bool | None = None, + ) -> MaskedArray[_AnyShape, _DTypeT_co]: ... + + def resize(self, newshape: Never, refcheck: bool = True, order: bool = False) -> NoReturn: ... # type: ignore[override] + def put(self, indices: _ArrayLikeInt_co, values: ArrayLike, mode: _ModeKind = "raise") -> None: ... + def ids(self) -> tuple[int, int]: ... + def iscontiguous(self) -> bool: ... + + # Keep in sync with `ma.core.all` + @overload # type: ignore[override] + def all( + self, + axis: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + ) -> bool_: ... + @overload + def all( + self, + axis: _ShapeLike | None = None, + out: None = None, + *, + keepdims: Literal[True], + ) -> _MaskedArray[bool_]: ... + @overload + def all( + self, + axis: _ShapeLike | None, + out: None, + keepdims: Literal[True], + ) -> _MaskedArray[bool_]: ... + @overload + def all( + self, + axis: _ShapeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., + ) -> bool_ | _MaskedArray[bool_]: ... + @overload + def all( + self, + axis: _ShapeLike | None = None, + *, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., + ) -> _ArrayT: ... + @overload + def all( + self, + axis: _ShapeLike | None, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., + ) -> _ArrayT: ... + + # Keep in sync with `ma.core.any` + @overload # type: ignore[override] + def any( + self, + axis: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + ) -> bool_: ... + @overload + def any( + self, + axis: _ShapeLike | None = None, + out: None = None, + *, + keepdims: Literal[True], + ) -> _MaskedArray[bool_]: ... + @overload + def any( + self, + axis: _ShapeLike | None, + out: None, + keepdims: Literal[True], + ) -> _MaskedArray[bool_]: ... + @overload + def any( + self, + axis: _ShapeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., + ) -> bool_ | _MaskedArray[bool_]: ... + @overload + def any( + self, + axis: _ShapeLike | None = None, + *, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., + ) -> _ArrayT: ... + @overload + def any( + self, + axis: _ShapeLike | None, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., + ) -> _ArrayT: ... + + # Keep in sync with `ndarray.trace` and `ma.core.trace` + @overload + def trace( + self, # >= 2D MaskedArray + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, + dtype: DTypeLike | None = None, + out: None = None, + ) -> Any: ... + @overload + def trace( + self, # >= 2D MaskedArray + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, + ) -> _ArrayT: ... + @overload + def trace( + self, # >= 2D MaskedArray + offset: SupportsIndex, + axis1: SupportsIndex, + axis2: SupportsIndex, + dtype: DTypeLike | None, + out: _ArrayT, + ) -> _ArrayT: ... + + # This differs from `ndarray.dot`, in that 1D dot 1D returns a 0D array. + @overload + def dot(self, b: ArrayLike, out: None = None, strict: bool = False) -> _MaskedArray[Any]: ... + @overload + def dot(self, b: ArrayLike, out: _ArrayT, strict: bool = False) -> _ArrayT: ... + + # Keep in sync with `ma.core.sum` + @overload # type: ignore[override] + def sum( + self, + /, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., + ) -> Any: ... + @overload + def sum( + self, + /, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., + ) -> _ArrayT: ... + @overload + def sum( + self, + /, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., + ) -> _ArrayT: ... + + # Keep in sync with `ndarray.cumsum` and `ma.core.cumsum` + @overload # out: None (default) + def cumsum(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, out: None = None) -> _MaskedArray[Any]: ... + @overload # out: ndarray + def cumsum(self, /, axis: SupportsIndex | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ... + @overload + def cumsum(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... + + # Keep in sync with `ma.core.prod` + @overload # type: ignore[override] + def prod( + self, + /, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., + ) -> Any: ... + @overload + def prod( + self, + /, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., + ) -> _ArrayT: ... + @overload + def prod( + self, + /, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., + ) -> _ArrayT: ... + + product = prod + + # Keep in sync with `ndarray.cumprod` and `ma.core.cumprod` + @overload # out: None (default) + def cumprod(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, out: None = None) -> _MaskedArray[Any]: ... + @overload # out: ndarray + def cumprod(self, /, axis: SupportsIndex | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ... + @overload + def cumprod(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... + + # Keep in sync with `ma.core.mean` + @overload # type: ignore[override] + def mean( + self, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., + ) -> Any: ... + @overload + def mean( + self, + /, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., + ) -> _ArrayT: ... + @overload + def mean( + self, + /, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., + ) -> _ArrayT: ... + + # keep roughly in sync with `ma.core.anom` + @overload + def anom(self, axis: SupportsIndex | None = None, dtype: None = None) -> Self: ... + @overload + def anom(self, axis: SupportsIndex | None = None, *, dtype: DTypeLike) -> MaskedArray[_ShapeT_co, dtype]: ... + @overload + def anom(self, axis: SupportsIndex | None, dtype: DTypeLike) -> MaskedArray[_ShapeT_co, dtype]: ... + + # keep in sync with `std` and `ma.core.var` + @overload # type: ignore[override] + def var( + self, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + ) -> Any: ... + @overload + def var( + self, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: _ArrayT, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + ) -> _ArrayT: ... + @overload + def var( + self, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + ) -> _ArrayT: ... + + # keep in sync with `var` and `ma.core.std` + @overload # type: ignore[override] + def std( + self, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + ) -> Any: ... + @overload + def std( + self, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: _ArrayT, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + ) -> _ArrayT: ... + @overload + def std( + self, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., + ) -> _ArrayT: ... + + # Keep in sync with `ndarray.round` + @overload # out=None (default) + def round(self, /, decimals: SupportsIndex = 0, out: None = None) -> Self: ... + @overload # out=ndarray + def round(self, /, decimals: SupportsIndex, out: _ArrayT) -> _ArrayT: ... + @overload + def round(self, /, decimals: SupportsIndex = 0, *, out: _ArrayT) -> _ArrayT: ... + + def argsort( # type: ignore[override] + self, + axis: SupportsIndex | _NoValueType = ..., + kind: _SortKind | None = None, + order: str | Sequence[str] | None = None, + endwith: bool = True, + fill_value: _ScalarLike_co | None = None, + *, + stable: bool = False, + ) -> _MaskedArray[intp]: ... + + # Keep in-sync with np.ma.argmin + @overload # type: ignore[override] + def argmin( + self, + axis: None = None, + fill_value: _ScalarLike_co | None = None, + out: None = None, + *, + keepdims: Literal[False] | _NoValueType = ..., + ) -> intp: ... + @overload + def argmin( + self, + axis: SupportsIndex | None = None, + fill_value: _ScalarLike_co | None = None, + out: None = None, + *, + keepdims: bool | _NoValueType = ..., + ) -> Any: ... + @overload + def argmin( + self, + axis: SupportsIndex | None = None, + fill_value: _ScalarLike_co | None = None, + *, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., + ) -> _ArrayT: ... + @overload + def argmin( + self, + axis: SupportsIndex | None, + fill_value: _ScalarLike_co | None, + out: _ArrayT, + *, + keepdims: bool | _NoValueType = ..., + ) -> _ArrayT: ... + + # Keep in-sync with np.ma.argmax + @overload # type: ignore[override] + def argmax( + self, + axis: None = None, + fill_value: _ScalarLike_co | None = None, + out: None = None, + *, + keepdims: Literal[False] | _NoValueType = ..., + ) -> intp: ... + @overload + def argmax( + self, + axis: SupportsIndex | None = None, + fill_value: _ScalarLike_co | None = None, + out: None = None, + *, + keepdims: bool | _NoValueType = ..., + ) -> Any: ... + @overload + def argmax( + self, + axis: SupportsIndex | None = None, + fill_value: _ScalarLike_co | None = None, + *, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., + ) -> _ArrayT: ... + @overload + def argmax( + self, + axis: SupportsIndex | None, + fill_value: _ScalarLike_co | None, + out: _ArrayT, + *, + keepdims: bool | _NoValueType = ..., + ) -> _ArrayT: ... + + # + def sort( # type: ignore[override] + self, + axis: SupportsIndex = -1, + kind: _SortKind | None = None, + order: str | Sequence[str] | None = None, + endwith: bool | None = True, + fill_value: _ScalarLike_co | None = None, + *, + stable: Literal[False] | None = False, + ) -> None: ... + + # + @overload # type: ignore[override] + def min( + self: _MaskedArray[_ScalarT], + axis: None = None, + out: None = None, + fill_value: _ScalarLike_co | None = None, + keepdims: Literal[False] | _NoValueType = ..., + ) -> _ScalarT: ... + @overload + def min( + self, + axis: _ShapeLike | None = None, + out: None = None, + fill_value: _ScalarLike_co | None = None, + keepdims: bool | _NoValueType = ... + ) -> Any: ... + @overload + def min( + self, + axis: _ShapeLike | None, + out: _ArrayT, + fill_value: _ScalarLike_co | None = None, + keepdims: bool | _NoValueType = ..., + ) -> _ArrayT: ... + @overload + def min( + self, + axis: _ShapeLike | None = None, + *, + out: _ArrayT, + fill_value: _ScalarLike_co | None = None, + keepdims: bool | _NoValueType = ..., + ) -> _ArrayT: ... + + # + @overload # type: ignore[override] + def max( + self: _MaskedArray[_ScalarT], + axis: None = None, + out: None = None, + fill_value: _ScalarLike_co | None = None, + keepdims: Literal[False] | _NoValueType = ..., + ) -> _ScalarT: ... + @overload + def max( + self, + axis: _ShapeLike | None = None, + out: None = None, + fill_value: _ScalarLike_co | None = None, + keepdims: bool | _NoValueType = ... + ) -> Any: ... + @overload + def max( + self, + axis: _ShapeLike | None, + out: _ArrayT, + fill_value: _ScalarLike_co | None = None, + keepdims: bool | _NoValueType = ..., + ) -> _ArrayT: ... + @overload + def max( + self, + axis: _ShapeLike | None = None, + *, + out: _ArrayT, + fill_value: _ScalarLike_co | None = None, + keepdims: bool | _NoValueType = ..., + ) -> _ArrayT: ... + + # + @overload + def ptp( + self: _MaskedArray[_ScalarT], + axis: None = None, + out: None = None, + fill_value: _ScalarLike_co | None = None, + keepdims: Literal[False] = False, + ) -> _ScalarT: ... + @overload + def ptp( + self, + axis: _ShapeLike | None = None, + out: None = None, + fill_value: _ScalarLike_co | None = None, + keepdims: bool = False, + ) -> Any: ... + @overload + def ptp( + self, + axis: _ShapeLike | None, + out: _ArrayT, + fill_value: _ScalarLike_co | None = None, + keepdims: bool = False, + ) -> _ArrayT: ... + @overload + def ptp( + self, + axis: _ShapeLike | None = None, + *, + out: _ArrayT, + fill_value: _ScalarLike_co | None = None, + keepdims: bool = False, + ) -> _ArrayT: ... + + # + @overload + def partition( + self, + /, + kth: _ArrayLikeInt, + axis: SupportsIndex = -1, + kind: _PartitionKind = "introselect", + order: None = None + ) -> None: ... + @overload + def partition( + self: _MaskedArray[np.void], + /, + kth: _ArrayLikeInt, + axis: SupportsIndex = -1, + kind: _PartitionKind = "introselect", + order: str | Sequence[str] | None = None, + ) -> None: ... + + # + @overload + def argpartition( + self, + /, + kth: _ArrayLikeInt, + axis: SupportsIndex | None = -1, + kind: _PartitionKind = "introselect", + order: None = None, + ) -> _MaskedArray[intp]: ... + @overload + def argpartition( + self: _MaskedArray[np.void], + /, + kth: _ArrayLikeInt, + axis: SupportsIndex | None = -1, + kind: _PartitionKind = "introselect", + order: str | Sequence[str] | None = None, + ) -> _MaskedArray[intp]: ... + + # Keep in-sync with np.ma.take + @overload # type: ignore[override] + def take( # type: ignore[overload-overlap] + self: _MaskedArray[_ScalarT], + indices: _IntLike_co, + axis: None = None, + out: None = None, + mode: _ModeKind = "raise" + ) -> _ScalarT: ... + @overload + def take( + self: _MaskedArray[_ScalarT], + indices: _ArrayLikeInt_co, + axis: SupportsIndex | None = None, + out: None = None, + mode: _ModeKind = "raise", + ) -> _MaskedArray[_ScalarT]: ... + @overload + def take( + self, + indices: _ArrayLikeInt_co, + axis: SupportsIndex | None, + out: _ArrayT, + mode: _ModeKind = "raise", + ) -> _ArrayT: ... + @overload + def take( + self, + indices: _ArrayLikeInt_co, + axis: SupportsIndex | None = None, + *, + out: _ArrayT, + mode: _ModeKind = "raise", + ) -> _ArrayT: ... + + # keep in sync with `ndarray.diagonal` + @override + def diagonal( + self, + /, + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, + ) -> MaskedArray[_AnyShape, _DTypeT_co]: ... + + # keep in sync with `ndarray.repeat` + @override + @overload + def repeat( + self, + /, + repeats: _ArrayLikeInt_co, + axis: None = None, + ) -> MaskedArray[tuple[int], _DTypeT_co]: ... + @overload + def repeat( + self, + /, + repeats: _ArrayLikeInt_co, + axis: SupportsIndex, + ) -> MaskedArray[_AnyShape, _DTypeT_co]: ... + + # keep in sync with `ndarray.flatten` and `ndarray.ravel` + @override + def flatten(self, /, order: _OrderKACF = "C") -> MaskedArray[tuple[int], _DTypeT_co]: ... + @override + def ravel(self, order: _OrderKACF = "C") -> MaskedArray[tuple[int], _DTypeT_co]: ... + + # keep in sync with `ndarray.squeeze` + @override + def squeeze( + self, + /, + axis: SupportsIndex | tuple[SupportsIndex, ...] | None = None, + ) -> MaskedArray[_AnyShape, _DTypeT_co]: ... + + # + def toflex(self) -> MaskedArray[_ShapeT_co, np.dtype[np.void]]: ... + def torecords(self) -> MaskedArray[_ShapeT_co, np.dtype[np.void]]: ... + + # + @override + def tobytes(self, /, fill_value: Incomplete | None = None, order: _OrderKACF = "C") -> bytes: ... # type: ignore[override] + + # keep in sync with `ndarray.tolist` + @override + @overload + def tolist(self: MaskedArray[tuple[Never], dtype[generic[_T]]], /, fill_value: _ScalarLike_co | None = None) -> Any: ... + @overload + def tolist(self: MaskedArray[tuple[()], dtype[generic[_T]]], /, fill_value: _ScalarLike_co | None = None) -> _T: ... + @overload + def tolist(self: MaskedArray[tuple[int], dtype[generic[_T]]], /, fill_value: _ScalarLike_co | None = None) -> list[_T]: ... + @overload + def tolist( + self: MaskedArray[tuple[int, int], dtype[generic[_T]]], /, fill_value: _ScalarLike_co | None = None + ) -> list[list[_T]]: ... + @overload + def tolist( + self: MaskedArray[tuple[int, int, int], dtype[generic[_T]]], /, fill_value: _ScalarLike_co | None = None + ) -> list[list[list[_T]]]: ... + @overload + def tolist(self, /, fill_value: _ScalarLike_co | None = None) -> Any: ... + + # NOTE: will raise `NotImplementedError` + @override + def tofile(self, /, fid: Never, sep: str = "", format: str = "%s") -> NoReturn: ... # type: ignore[override] + + # + @override + def __deepcopy__(self, memo: dict[int, Any] | None = None) -> Self: ... + + # Keep `dtype` at the bottom to avoid name conflicts with `np.dtype` + @property + def dtype(self) -> _DTypeT_co: ... + @dtype.setter + def dtype(self: MaskedArray[_AnyShape, _DTypeT], dtype: _DTypeT, /) -> None: ... + +class mvoid(MaskedArray[_ShapeT_co, _DTypeT_co]): + def __new__( + self, # pyright: ignore[reportSelfClsParameterName] + data, + mask=..., + dtype=..., + fill_value=..., + hardmask=..., + copy=..., + subok=..., + ): ... + def __getitem__(self, indx): ... + def __setitem__(self, indx, value): ... + def __iter__(self): ... + def __len__(self): ... + def filled(self, fill_value=None): ... + def tolist(self): ... # type: ignore[override] + +def isMaskedArray(x: object) -> TypeIs[MaskedArray]: ... +def isarray(x: object) -> TypeIs[MaskedArray]: ... # alias to isMaskedArray +def isMA(x: object) -> TypeIs[MaskedArray]: ... # alias to isMaskedArray + +# 0D float64 array +class MaskedConstant(MaskedArray[tuple[()], dtype[float64]]): + def __new__(cls) -> Self: ... + + # these overrides are no-ops + @override + def __iadd__(self, other: _Ignored, /) -> Self: ... # type: ignore[override] + @override + def __isub__(self, other: _Ignored, /) -> Self: ... # type: ignore[override] + @override + def __imul__(self, other: _Ignored, /) -> Self: ... # type: ignore[override] + @override + def __ifloordiv__(self, other: _Ignored, /) -> Self: ... + @override + def __itruediv__(self, other: _Ignored, /) -> Self: ... # type: ignore[override] + @override + def __ipow__(self, other: _Ignored, /) -> Self: ... # type: ignore[override] + @override + def __deepcopy__(self, /, memo: _Ignored) -> Self: ... # type: ignore[override] + @override + def copy(self, /, *args: _Ignored, **kwargs: _Ignored) -> Self: ... + +masked: Final[MaskedConstant] = ... +masked_singleton: Final[MaskedConstant] = ... + +masked_array: TypeAlias = MaskedArray + +# keep in sync with `MaskedArray.__new__` +@overload +def array( + data: _ArrayLike[_ScalarT], + dtype: None = None, + copy: bool = False, + order: _OrderKACF | None = None, + mask: _ArrayLikeBool_co = nomask, + fill_value: _ScalarLike_co | None = None, + keep_mask: bool = True, + hard_mask: bool = False, + shrink: bool = True, + subok: bool = True, + ndmin: int = 0, +) -> _MaskedArray[_ScalarT]: ... +@overload +def array( + data: object, + dtype: _DTypeLike[_ScalarT], + copy: bool = False, + order: _OrderKACF | None = None, + mask: _ArrayLikeBool_co = nomask, + fill_value: _ScalarLike_co | None = None, + keep_mask: bool = True, + hard_mask: bool = False, + shrink: bool = True, + subok: bool = True, + ndmin: int = 0, +) -> _MaskedArray[_ScalarT]: ... +@overload +def array( + data: object, + dtype: DTypeLike | None = None, + copy: bool = False, + order: _OrderKACF | None = None, + mask: _ArrayLikeBool_co = nomask, + fill_value: _ScalarLike_co | None = None, + keep_mask: bool = True, + hard_mask: bool = False, + shrink: bool = True, + subok: bool = True, + ndmin: int = 0, +) -> _MaskedArray[_ScalarT]: ... + +# keep in sync with `array` +@overload +def asarray(a: _ArrayLike[_ScalarT], dtype: None = None, order: _OrderKACF | None = None) -> _MaskedArray[_ScalarT]: ... +@overload +def asarray(a: object, dtype: _DTypeLike[_ScalarT], order: _OrderKACF | None = None) -> _MaskedArray[_ScalarT]: ... +@overload +def asarray(a: object, dtype: DTypeLike | None = None, order: _OrderKACF | None = None) -> _MaskedArray[_ScalarT]: ... + +# keep in sync with `asarray` (but note the additional first overload) +@overload +def asanyarray(a: _MArrayT, dtype: None = None, order: _OrderKACF | None = None) -> _MArrayT: ... +@overload +def asanyarray(a: _ArrayLike[_ScalarT], dtype: None = None, order: _OrderKACF | None = None) -> _MaskedArray[_ScalarT]: ... +@overload +def asanyarray(a: object, dtype: _DTypeLike[_ScalarT], order: _OrderKACF | None = None) -> _MaskedArray[_ScalarT]: ... +@overload +def asanyarray(a: object, dtype: DTypeLike | None = None, order: _OrderKACF | None = None) -> _MaskedArray[_ScalarT]: ... + +# +def is_masked(x: object) -> bool: ... + +@overload +def min( + obj: _ArrayLike[_ScalarT], + axis: None = None, + out: None = None, + fill_value: _ScalarLike_co | None = None, + keepdims: Literal[False] | _NoValueType = ..., +) -> _ScalarT: ... +@overload +def min( + obj: ArrayLike, + axis: _ShapeLike | None = None, + out: None = None, + fill_value: _ScalarLike_co | None = None, + keepdims: bool | _NoValueType = ... +) -> Any: ... +@overload +def min( + obj: ArrayLike, + axis: _ShapeLike | None, + out: _ArrayT, + fill_value: _ScalarLike_co | None = None, + keepdims: bool | _NoValueType = ..., +) -> _ArrayT: ... +@overload +def min( + obj: ArrayLike, + axis: _ShapeLike | None = None, + *, + out: _ArrayT, + fill_value: _ScalarLike_co | None = None, + keepdims: bool | _NoValueType = ..., +) -> _ArrayT: ... + +@overload +def max( + obj: _ArrayLike[_ScalarT], + axis: None = None, + out: None = None, + fill_value: _ScalarLike_co | None = None, + keepdims: Literal[False] | _NoValueType = ..., +) -> _ScalarT: ... +@overload +def max( + obj: ArrayLike, + axis: _ShapeLike | None = None, + out: None = None, + fill_value: _ScalarLike_co | None = None, + keepdims: bool | _NoValueType = ... +) -> Any: ... +@overload +def max( + obj: ArrayLike, + axis: _ShapeLike | None, + out: _ArrayT, + fill_value: _ScalarLike_co | None = None, + keepdims: bool | _NoValueType = ..., +) -> _ArrayT: ... +@overload +def max( + obj: ArrayLike, + axis: _ShapeLike | None = None, + *, + out: _ArrayT, + fill_value: _ScalarLike_co | None = None, + keepdims: bool | _NoValueType = ..., +) -> _ArrayT: ... + +@overload +def ptp( + obj: _ArrayLike[_ScalarT], + axis: None = None, + out: None = None, + fill_value: _ScalarLike_co | None = None, + keepdims: Literal[False] | _NoValueType = ..., +) -> _ScalarT: ... +@overload +def ptp( + obj: ArrayLike, + axis: _ShapeLike | None = None, + out: None = None, + fill_value: _ScalarLike_co | None = None, + keepdims: bool | _NoValueType = ... +) -> Any: ... +@overload +def ptp( + obj: ArrayLike, + axis: _ShapeLike | None, + out: _ArrayT, + fill_value: _ScalarLike_co | None = None, + keepdims: bool | _NoValueType = ..., +) -> _ArrayT: ... +@overload +def ptp( + obj: ArrayLike, + axis: _ShapeLike | None = None, + *, + out: _ArrayT, + fill_value: _ScalarLike_co | None = None, + keepdims: bool | _NoValueType = ..., +) -> _ArrayT: ... + +# we cannot meaningfully annotate `frommethod` further, because the callable signature +# of the return type fully depends on the *value* of `methodname` and `reversed` in +# a way that cannot be expressed in the Python type system. +def _frommethod(methodname: str, reversed: bool = False) -> types.FunctionType: ... + +# NOTE: The following `*_mask` functions will accept any array-like input runtime, but +# since their use-cases are specific to masks, they only accept `MaskedArray` inputs. + +# keep in sync with `MaskedArray.harden_mask` +def harden_mask(a: _MArrayT) -> _MArrayT: ... +# keep in sync with `MaskedArray.soften_mask` +def soften_mask(a: _MArrayT) -> _MArrayT: ... +# keep in sync with `MaskedArray.shrink_mask` +def shrink_mask(a: _MArrayT) -> _MArrayT: ... + +# keep in sync with `MaskedArray.ids` +def ids(a: ArrayLike) -> tuple[int, int]: ... + +# keep in sync with `ndarray.nonzero` +def nonzero(a: ArrayLike) -> tuple[ndarray[tuple[int], np.dtype[intp]], ...]: ... + +# keep first overload in sync with `MaskedArray.ravel` +@overload +def ravel(a: np.ndarray[Any, _DTypeT], order: _OrderKACF = "C") -> MaskedArray[tuple[int], _DTypeT]: ... +@overload +def ravel(a: _ArrayLike[_ScalarT], order: _OrderKACF = "C") -> MaskedArray[tuple[int], np.dtype[_ScalarT]]: ... +@overload +def ravel(a: ArrayLike, order: _OrderKACF = "C") -> MaskedArray[tuple[int], _DTypeT_co]: ... + +# keep roughly in sync with `lib._function_base_impl.copy` +@overload +def copy(a: _MArrayT, order: _OrderKACF = "C") -> _MArrayT: ... +@overload +def copy(a: np.ndarray[_ShapeT, _DTypeT], order: _OrderKACF = "C") -> MaskedArray[_ShapeT, _DTypeT]: ... +@overload +def copy(a: _ArrayLike[_ScalarT], order: _OrderKACF = "C") -> _MaskedArray[_ScalarT]: ... +@overload +def copy(a: ArrayLike, order: _OrderKACF = "C") -> _MaskedArray[Incomplete]: ... + +# keep in sync with `_core.fromnumeric.diagonal` +@overload +def diagonal( + a: _ArrayLike[_ScalarT], + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, +) -> NDArray[_ScalarT]: ... +@overload +def diagonal( + a: ArrayLike, + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, +) -> NDArray[Incomplete]: ... + +# keep in sync with `_core.fromnumeric.repeat` +@overload +def repeat(a: _ArrayLike[_ScalarT], repeats: _ArrayLikeInt_co, axis: None = None) -> MaskedArray[tuple[int], dtype[_ScalarT]]: ... +@overload +def repeat(a: _ArrayLike[_ScalarT], repeats: _ArrayLikeInt_co, axis: SupportsIndex) -> _MaskedArray[_ScalarT]: ... +@overload +def repeat(a: ArrayLike, repeats: _ArrayLikeInt_co, axis: None = None) -> MaskedArray[tuple[int], dtype[Incomplete]]: ... +@overload +def repeat(a: ArrayLike, repeats: _ArrayLikeInt_co, axis: SupportsIndex) -> _MaskedArray[Incomplete]: ... + +# keep in sync with `_core.fromnumeric.swapaxes` +@overload +def swapaxes(a: _MArrayT, axis1: SupportsIndex, axis2: SupportsIndex) -> _MArrayT: ... +@overload +def swapaxes(a: _ArrayLike[_ScalarT], axis1: SupportsIndex, axis2: SupportsIndex) -> _MaskedArray[_ScalarT]: ... +@overload +def swapaxes(a: ArrayLike, axis1: SupportsIndex, axis2: SupportsIndex) -> _MaskedArray[Incomplete]: ... + +# NOTE: The `MaskedArray.anom` definition is specific to `MaskedArray`, so we need +# additional overloads to cover the array-like input here. +@overload # a: MaskedArray, dtype=None +def anom(a: _MArrayT, axis: SupportsIndex | None = None, dtype: None = None) -> _MArrayT: ... +@overload # a: array-like, dtype=None +def anom(a: _ArrayLike[_ScalarT], axis: SupportsIndex | None = None, dtype: None = None) -> _MaskedArray[_ScalarT]: ... +@overload # a: unknown array-like, dtype: dtype-like (positional) +def anom(a: ArrayLike, axis: SupportsIndex | None, dtype: _DTypeLike[_ScalarT]) -> _MaskedArray[_ScalarT]: ... +@overload # a: unknown array-like, dtype: dtype-like (keyword) +def anom(a: ArrayLike, axis: SupportsIndex | None = None, *, dtype: _DTypeLike[_ScalarT]) -> _MaskedArray[_ScalarT]: ... +@overload # a: unknown array-like, dtype: unknown dtype-like (positional) +def anom(a: ArrayLike, axis: SupportsIndex | None, dtype: DTypeLike) -> _MaskedArray[Incomplete]: ... +@overload # a: unknown array-like, dtype: unknown dtype-like (keyword) +def anom(a: ArrayLike, axis: SupportsIndex | None = None, *, dtype: DTypeLike) -> _MaskedArray[Incomplete]: ... + +anomalies = anom + +# Keep in sync with `any` and `MaskedArray.all` +@overload +def all(a: ArrayLike, axis: None = None, out: None = None, keepdims: Literal[False] | _NoValueType = ...) -> np.bool: ... +@overload +def all(a: ArrayLike, axis: _ShapeLike | None, out: None, keepdims: Literal[True]) -> _MaskedArray[np.bool]: ... +@overload +def all(a: ArrayLike, axis: _ShapeLike | None = None, out: None = None, *, keepdims: Literal[True]) -> _MaskedArray[np.bool]: ... +@overload +def all( + a: ArrayLike, axis: _ShapeLike | None = None, out: None = None, keepdims: bool | _NoValueType = ... +) -> np.bool | _MaskedArray[np.bool]: ... +@overload +def all(a: ArrayLike, axis: _ShapeLike | None, out: _ArrayT, keepdims: bool | _NoValueType = ...) -> _ArrayT: ... +@overload +def all(a: ArrayLike, axis: _ShapeLike | None = None, *, out: _ArrayT, keepdims: bool | _NoValueType = ...) -> _ArrayT: ... + +# Keep in sync with `all` and `MaskedArray.any` +@overload +def any(a: ArrayLike, axis: None = None, out: None = None, keepdims: Literal[False] | _NoValueType = ...) -> np.bool: ... +@overload +def any(a: ArrayLike, axis: _ShapeLike | None, out: None, keepdims: Literal[True]) -> _MaskedArray[np.bool]: ... +@overload +def any(a: ArrayLike, axis: _ShapeLike | None = None, out: None = None, *, keepdims: Literal[True]) -> _MaskedArray[np.bool]: ... +@overload +def any( + a: ArrayLike, axis: _ShapeLike | None = None, out: None = None, keepdims: bool | _NoValueType = ... +) -> np.bool | _MaskedArray[np.bool]: ... +@overload +def any(a: ArrayLike, axis: _ShapeLike | None, out: _ArrayT, keepdims: bool | _NoValueType = ...) -> _ArrayT: ... +@overload +def any(a: ArrayLike, axis: _ShapeLike | None = None, *, out: _ArrayT, keepdims: bool | _NoValueType = ...) -> _ArrayT: ... + +# NOTE: The `MaskedArray.compress` definition uses its `DTypeT_co` type parameter, +# which wouldn't work here for array-like inputs, so we need additional overloads. +@overload +def compress( + condition: _ArrayLikeBool_co, a: _ArrayLike[_ScalarT], axis: None = None, out: None = None +) -> MaskedArray[tuple[int], np.dtype[_ScalarT]]: ... +@overload +def compress( + condition: _ArrayLikeBool_co, a: _ArrayLike[_ScalarT], axis: _ShapeLike | None = None, out: None = None +) -> MaskedArray[_AnyShape, np.dtype[_ScalarT]]: ... +@overload +def compress(condition: _ArrayLikeBool_co, a: ArrayLike, axis: None = None, out: None = None) -> MaskedArray[tuple[int]]: ... +@overload +def compress( + condition: _ArrayLikeBool_co, a: ArrayLike, axis: _ShapeLike | None = None, out: None = None +) -> _MaskedArray[Incomplete]: ... +@overload +def compress(condition: _ArrayLikeBool_co, a: ArrayLike, axis: _ShapeLike | None, out: _ArrayT) -> _ArrayT: ... +@overload +def compress(condition: _ArrayLikeBool_co, a: ArrayLike, axis: _ShapeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... + +# Keep in sync with `cumprod` and `MaskedArray.cumsum` +@overload # out: None (default) +def cumsum( + a: ArrayLike, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, out: None = None +) -> _MaskedArray[Incomplete]: ... +@overload # out: ndarray (positional) +def cumsum(a: ArrayLike, axis: SupportsIndex | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ... +@overload # out: ndarray (kwarg) +def cumsum(a: ArrayLike, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... + +# Keep in sync with `cumsum` and `MaskedArray.cumsum` +@overload # out: None (default) +def cumprod( + a: ArrayLike, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, out: None = None +) -> _MaskedArray[Incomplete]: ... +@overload # out: ndarray (positional) +def cumprod(a: ArrayLike, axis: SupportsIndex | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ... +@overload # out: ndarray (kwarg) +def cumprod(a: ArrayLike, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... + +# Keep in sync with `sum`, `prod`, `product`, and `MaskedArray.mean` +@overload +def mean( + a: ArrayLike, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., +) -> Incomplete: ... +@overload +def mean( + a: ArrayLike, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., +) -> _ArrayT: ... +@overload +def mean( + a: ArrayLike, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., +) -> _ArrayT: ... + +# Keep in sync with `mean`, `prod`, `product`, and `MaskedArray.sum` +@overload +def sum( + a: ArrayLike, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., +) -> Incomplete: ... +@overload +def sum( + a: ArrayLike, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., +) -> _ArrayT: ... +@overload +def sum( + a: ArrayLike, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., +) -> _ArrayT: ... + +# Keep in sync with `product` and `MaskedArray.prod` +@overload +def prod( + a: ArrayLike, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., +) -> Incomplete: ... +@overload +def prod( + a: ArrayLike, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., +) -> _ArrayT: ... +@overload +def prod( + a: ArrayLike, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., +) -> _ArrayT: ... + +# Keep in sync with `prod` and `MaskedArray.prod` +@overload +def product( + a: ArrayLike, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., +) -> Incomplete: ... +@overload +def product( + a: ArrayLike, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., +) -> _ArrayT: ... +@overload +def product( + a: ArrayLike, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., +) -> _ArrayT: ... + +# Keep in sync with `MaskedArray.trace` and `_core.fromnumeric.trace` +@overload +def trace( + a: ArrayLike, + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, + dtype: DTypeLike | None = None, + out: None = None, +) -> Incomplete: ... +@overload +def trace( + a: ArrayLike, + offset: SupportsIndex, + axis1: SupportsIndex, + axis2: SupportsIndex, + dtype: DTypeLike | None, + out: _ArrayT, +) -> _ArrayT: ... +@overload +def trace( + a: ArrayLike, + offset: SupportsIndex = 0, + axis1: SupportsIndex = 0, + axis2: SupportsIndex = 1, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, +) -> _ArrayT: ... + +# keep in sync with `std` and `MaskedArray.var` +@overload +def std( + a: ArrayLike, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., +) -> Incomplete: ... +@overload +def std( + a: ArrayLike, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: _ArrayT, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., +) -> _ArrayT: ... +@overload +def std( + a: ArrayLike, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., +) -> _ArrayT: ... + +# keep in sync with `std` and `MaskedArray.var` +@overload +def var( + a: ArrayLike, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., +) -> Incomplete: ... +@overload +def var( + a: ArrayLike, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: _ArrayT, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., +) -> _ArrayT: ... +@overload +def var( + a: ArrayLike, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, + ddof: float = 0, + keepdims: bool | _NoValueType = ..., + mean: _ArrayLikeNumber_co | _NoValueType = ..., +) -> _ArrayT: ... + +# (a, b) +minimum: _extrema_operation = ... +maximum: _extrema_operation = ... + +# NOTE: this is a `_frommethod` instance at runtime +@overload +def count(a: ArrayLike, axis: None = None, keepdims: Literal[False] | _NoValueType = ...) -> int: ... +@overload +def count(a: ArrayLike, axis: _ShapeLike, keepdims: bool | _NoValueType = ...) -> NDArray[int_]: ... +@overload +def count(a: ArrayLike, axis: _ShapeLike | None = None, *, keepdims: Literal[True]) -> NDArray[int_]: ... +@overload +def count(a: ArrayLike, axis: _ShapeLike | None, keepdims: Literal[True]) -> NDArray[int_]: ... + +# NOTE: this is a `_frommethod` instance at runtime +@overload +def argmin( + a: ArrayLike, + axis: None = None, + fill_value: _ScalarLike_co | None = None, + out: None = None, + *, + keepdims: Literal[False] | _NoValueType = ..., +) -> intp: ... +@overload +def argmin( + a: ArrayLike, + axis: SupportsIndex | None = None, + fill_value: _ScalarLike_co | None = None, + out: None = None, + *, + keepdims: bool | _NoValueType = ..., +) -> Any: ... +@overload +def argmin( + a: ArrayLike, + axis: SupportsIndex | None = None, + fill_value: _ScalarLike_co | None = None, + *, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., +) -> _ArrayT: ... +@overload +def argmin( + a: ArrayLike, + axis: SupportsIndex | None, + fill_value: _ScalarLike_co | None, + out: _ArrayT, + *, + keepdims: bool | _NoValueType = ..., +) -> _ArrayT: ... + +# keep in sync with `argmin` +@overload +def argmax( + a: ArrayLike, + axis: None = None, + fill_value: _ScalarLike_co | None = None, + out: None = None, + *, + keepdims: Literal[False] | _NoValueType = ..., +) -> intp: ... +@overload +def argmax( + a: ArrayLike, + axis: SupportsIndex | None = None, + fill_value: _ScalarLike_co | None = None, + out: None = None, + *, + keepdims: bool | _NoValueType = ..., +) -> Any: ... +@overload +def argmax( + a: ArrayLike, + axis: SupportsIndex | None = None, + fill_value: _ScalarLike_co | None = None, + *, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., +) -> _ArrayT: ... +@overload +def argmax( + a: ArrayLike, + axis: SupportsIndex | None, + fill_value: _ScalarLike_co | None, + out: _ArrayT, + *, + keepdims: bool | _NoValueType = ..., +) -> _ArrayT: ... + +@overload +def take( + a: _ArrayLike[_ScalarT], + indices: _IntLike_co, + axis: None = None, + out: None = None, + mode: _ModeKind = "raise" +) -> _ScalarT: ... +@overload +def take( + a: _ArrayLike[_ScalarT], + indices: _ArrayLikeInt_co, + axis: SupportsIndex | None = None, + out: None = None, + mode: _ModeKind = "raise", +) -> _MaskedArray[_ScalarT]: ... +@overload +def take( + a: ArrayLike, + indices: _IntLike_co, + axis: SupportsIndex | None = None, + out: None = None, + mode: _ModeKind = "raise", +) -> Any: ... +@overload +def take( + a: ArrayLike, + indices: _ArrayLikeInt_co, + axis: SupportsIndex | None = None, + out: None = None, + mode: _ModeKind = "raise", +) -> _MaskedArray[Any]: ... +@overload +def take( + a: ArrayLike, + indices: _ArrayLikeInt_co, + axis: SupportsIndex | None, + out: _ArrayT, + mode: _ModeKind = "raise", +) -> _ArrayT: ... +@overload +def take( + a: ArrayLike, + indices: _ArrayLikeInt_co, + axis: SupportsIndex | None = None, + *, + out: _ArrayT, + mode: _ModeKind = "raise", +) -> _ArrayT: ... + +def power(a, b, third=None): ... +def argsort(a, axis=..., kind=None, order=None, endwith=True, fill_value=None, *, stable=None): ... +@overload +def sort( + a: _ArrayT, + axis: SupportsIndex = -1, + kind: _SortKind | None = None, + order: str | Sequence[str] | None = None, + endwith: bool | None = True, + fill_value: _ScalarLike_co | None = None, + *, + stable: Literal[False] | None = None, +) -> _ArrayT: ... +@overload +def sort( + a: ArrayLike, + axis: SupportsIndex = -1, + kind: _SortKind | None = None, + order: str | Sequence[str] | None = None, + endwith: bool | None = True, + fill_value: _ScalarLike_co | None = None, + *, + stable: Literal[False] | None = None, +) -> NDArray[Any]: ... +@overload +def compressed(x: _ArrayLike[_ScalarT_co]) -> _Array1D[_ScalarT_co]: ... +@overload +def compressed(x: ArrayLike) -> _Array1D[Any]: ... +def concatenate(arrays, axis=0): ... +def diag(v, k=0): ... +def left_shift(a, n): ... +def right_shift(a, n): ... +def put(a: NDArray[Any], indices: _ArrayLikeInt_co, values: ArrayLike, mode: _ModeKind = "raise") -> None: ... +def putmask(a: NDArray[Any], mask: _ArrayLikeBool_co, values: ArrayLike) -> None: ... +def transpose(a, axes=None): ... +def reshape(a, new_shape, order="C"): ... +def resize(x, new_shape): ... +def ndim(obj: ArrayLike) -> int: ... +def shape(obj): ... +def size(obj: ArrayLike, axis: SupportsIndex | None = None) -> int: ... +def diff(a, /, n=1, axis=-1, prepend=..., append=...): ... +def where(condition, x=..., y=...): ... +def choose(indices, choices, out=None, mode="raise"): ... +def round_(a, decimals=0, out=None): ... +round = round_ + +def inner(a, b): ... +innerproduct = inner + +def outer(a, b): ... +outerproduct = outer + +def correlate(a, v, mode="valid", propagate_mask=True): ... +def convolve(a, v, mode="full", propagate_mask=True): ... + +def allequal(a: ArrayLike, b: ArrayLike, fill_value: bool = True) -> bool: ... + +def allclose(a: ArrayLike, b: ArrayLike, masked_equal: bool = True, rtol: float = 1e-5, atol: float = 1e-8) -> bool: ... + +def fromflex(fxarray): ... + +def append(a, b, axis=None): ... +def dot(a, b, strict=False, out=None): ... + +# internal wrapper functions for the functions below +def _convert2ma( + funcname: str, + np_ret: str, + np_ma_ret: str, + params: dict[str, Any] | None = None, +) -> Callable[..., Any]: ... + +# keep in sync with `_core.multiarray.arange` +@overload # dtype= +def arange( + start_or_stop: _ArangeScalar | float, + /, + stop: _ArangeScalar | float | None = None, + step: _ArangeScalar | float | None = 1, + *, + dtype: _DTypeLike[_ArangeScalarT], + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _Masked1D[_ArangeScalarT]: ... +@overload # (int-like, int-like?, int-like?) +def arange( + start_or_stop: _IntLike_co, + /, + stop: _IntLike_co | None = None, + step: _IntLike_co | None = 1, + *, + dtype: type[int] | _DTypeLike[np.int_] | None = None, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _Masked1D[np.int_]: ... +@overload # (float, float-like?, float-like?) +def arange( + start_or_stop: float | floating, + /, + stop: _FloatLike_co | None = None, + step: _FloatLike_co | None = 1, + *, + dtype: type[float] | _DTypeLike[np.float64] | None = None, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _Masked1D[np.float64 | Any]: ... +@overload # (float-like, float, float-like?) +def arange( + start_or_stop: _FloatLike_co, + /, + stop: float | floating, + step: _FloatLike_co | None = 1, + *, + dtype: type[float] | _DTypeLike[np.float64] | None = None, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _Masked1D[np.float64 | Any]: ... +@overload # (timedelta, timedelta-like?, timedelta-like?) +def arange( + start_or_stop: np.timedelta64, + /, + stop: _TD64Like_co | None = None, + step: _TD64Like_co | None = 1, + *, + dtype: _DTypeLike[np.timedelta64] | None = None, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _Masked1D[np.timedelta64[Incomplete]]: ... +@overload # (timedelta-like, timedelta, timedelta-like?) +def arange( + start_or_stop: _TD64Like_co, + /, + stop: np.timedelta64, + step: _TD64Like_co | None = 1, + *, + dtype: _DTypeLike[np.timedelta64] | None = None, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _Masked1D[np.timedelta64[Incomplete]]: ... +@overload # (datetime, datetime, timedelta-like) (requires both start and stop) +def arange( + start_or_stop: np.datetime64, + /, + stop: np.datetime64, + step: _TD64Like_co | None = 1, + *, + dtype: _DTypeLike[np.datetime64] | None = None, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _Masked1D[np.datetime64[Incomplete]]: ... +@overload # (str, str, timedelta-like, dtype=dt64-like) (requires both start and stop) +def arange( + start_or_stop: str, + /, + stop: str, + step: _TD64Like_co | None = 1, + *, + dtype: _DTypeLike[np.datetime64] | _DT64Codes, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _Masked1D[np.datetime64[Incomplete]]: ... +@overload # dtype= +def arange( + start_or_stop: _ArangeScalar | float | str, + /, + stop: _ArangeScalar | float | str | None = None, + step: _ArangeScalar | float | None = 1, + *, + dtype: DTypeLike | None = None, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _Masked1D[Incomplete]: ... + +# based on `_core.fromnumeric.clip` +@overload +def clip( + a: _ScalarT, + a_min: ArrayLike | _NoValueType | None = ..., + a_max: ArrayLike | _NoValueType | None = ..., + out: None = None, + *, + min: ArrayLike | _NoValueType | None = ..., + max: ArrayLike | _NoValueType | None = ..., + fill_value: _FillValue | None = None, + hardmask: bool = False, + dtype: None = None, + **kwargs: Unpack[_UFuncKwargs], +) -> _ScalarT: ... +@overload +def clip( + a: NDArray[_ScalarT], + a_min: ArrayLike | _NoValueType | None = ..., + a_max: ArrayLike | _NoValueType | None = ..., + out: None = None, + *, + min: ArrayLike | _NoValueType | None = ..., + max: ArrayLike | _NoValueType | None = ..., + fill_value: _FillValue | None = None, + hardmask: bool = False, + dtype: None = None, + **kwargs: Unpack[_UFuncKwargs], +) -> _MaskedArray[_ScalarT]: ... +@overload +def clip( + a: ArrayLike, + a_min: ArrayLike | None, + a_max: ArrayLike | None, + out: _MArrayT, + *, + min: ArrayLike | _NoValueType | None = ..., + max: ArrayLike | _NoValueType | None = ..., + fill_value: _FillValue | None = None, + hardmask: bool = False, + dtype: DTypeLike | None = None, + **kwargs: Unpack[_UFuncKwargs], +) -> _MArrayT: ... +@overload +def clip( + a: ArrayLike, + a_min: ArrayLike | _NoValueType | None = ..., + a_max: ArrayLike | _NoValueType | None = ..., + *, + out: _MArrayT, + min: ArrayLike | _NoValueType | None = ..., + max: ArrayLike | _NoValueType | None = ..., + fill_value: _FillValue | None = None, + hardmask: bool = False, + dtype: DTypeLike | None = None, + **kwargs: Unpack[_UFuncKwargs], +) -> _MArrayT: ... +@overload +def clip( + a: ArrayLike, + a_min: ArrayLike | _NoValueType | None = ..., + a_max: ArrayLike | _NoValueType | None = ..., + out: None = None, + *, + min: ArrayLike | _NoValueType | None = ..., + max: ArrayLike | _NoValueType | None = ..., + fill_value: _FillValue | None = None, + hardmask: bool = False, + dtype: DTypeLike | None = None, + **kwargs: Unpack[_UFuncKwargs], +) -> Incomplete: ... + +# keep in sync with `_core.multiarray.ones` +@overload +def empty( + shape: SupportsIndex, + dtype: None = None, + order: _OrderCF = "C", + *, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> MaskedArray[tuple[int], np.dtype[np.float64]]: ... +@overload +def empty( + shape: SupportsIndex, + dtype: _DTypeT | _SupportsDType[_DTypeT], + order: _OrderCF = "C", + *, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> MaskedArray[tuple[int], _DTypeT]: ... +@overload +def empty( + shape: SupportsIndex, + dtype: type[_ScalarT], + order: _OrderCF = "C", + *, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> MaskedArray[tuple[int], np.dtype[_ScalarT]]: ... +@overload +def empty( + shape: SupportsIndex, + dtype: DTypeLike | None = None, + order: _OrderCF = "C", + *, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> MaskedArray[tuple[int]]: ... +@overload # known shape +def empty( + shape: _AnyShapeT, + dtype: None = None, + order: _OrderCF = "C", + *, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> MaskedArray[_AnyShapeT, np.dtype[np.float64]]: ... +@overload +def empty( + shape: _AnyShapeT, + dtype: _DTypeT | _SupportsDType[_DTypeT], + order: _OrderCF = "C", + *, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> MaskedArray[_AnyShapeT, _DTypeT]: ... +@overload +def empty( + shape: _AnyShapeT, + dtype: type[_ScalarT], + order: _OrderCF = "C", + *, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> MaskedArray[_AnyShapeT, np.dtype[_ScalarT]]: ... +@overload +def empty( + shape: _AnyShapeT, + dtype: DTypeLike | None = None, + order: _OrderCF = "C", + *, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> MaskedArray[_AnyShapeT]: ... +@overload # unknown shape +def empty( + shape: _ShapeLike, + dtype: None = None, + order: _OrderCF = "C", + *, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _MaskedArray[np.float64]: ... +@overload +def empty( + shape: _ShapeLike, + dtype: _DTypeT | _SupportsDType[_DTypeT], + order: _OrderCF = "C", + *, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> MaskedArray[_AnyShape, _DTypeT]: ... +@overload +def empty( + shape: _ShapeLike, + dtype: type[_ScalarT], + order: _OrderCF = "C", + *, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _MaskedArray[_ScalarT]: ... +@overload +def empty( + shape: _ShapeLike, + dtype: DTypeLike | None = None, + *, + device: Literal["cpu"] | None = None, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> MaskedArray: ... + +# keep in sync with `_core.multiarray.empty_like` +@overload +def empty_like( + a: _MArrayT, + /, + dtype: None = None, + order: _OrderKACF = "K", + subok: bool = True, + shape: _ShapeLike | None = None, + *, + device: Literal["cpu"] | None = None, +) -> _MArrayT: ... +@overload +def empty_like( + a: _ArrayLike[_ScalarT], + /, + dtype: None = None, + order: _OrderKACF = "K", + subok: bool = True, + shape: _ShapeLike | None = None, + *, + device: Literal["cpu"] | None = None, +) -> _MaskedArray[_ScalarT]: ... +@overload +def empty_like( + a: Incomplete, + /, + dtype: _DTypeLike[_ScalarT], + order: _OrderKACF = "K", + subok: bool = True, + shape: _ShapeLike | None = None, + *, + device: Literal["cpu"] | None = None, +) -> _MaskedArray[_ScalarT]: ... +@overload +def empty_like( + a: Incomplete, + /, + dtype: DTypeLike | None = None, + order: _OrderKACF = "K", + subok: bool = True, + shape: _ShapeLike | None = None, + *, + device: Literal["cpu"] | None = None, +) -> _MaskedArray[Incomplete]: ... + +# This is a bit of a hack to avoid having to duplicate all those `empty` overloads for +# `ones` and `zeros`, that relies on the fact that empty/zeros/ones have identical +# type signatures, but may cause some type-checkers to report incorrect names in case +# of user errors. Mypy and Pyright seem to handle this just fine. +ones = empty +ones_like = empty_like +zeros = empty +zeros_like = empty_like + +# keep in sync with `_core.multiarray.frombuffer` +@overload +def frombuffer( + buffer: Buffer, + *, + count: SupportsIndex = -1, + offset: SupportsIndex = 0, + like: _SupportsArrayFunc | None = None, +) -> _MaskedArray[np.float64]: ... +@overload +def frombuffer( + buffer: Buffer, + dtype: _DTypeLike[_ScalarT], + count: SupportsIndex = -1, + offset: SupportsIndex = 0, + *, + like: _SupportsArrayFunc | None = None, +) -> _MaskedArray[_ScalarT]: ... +@overload +def frombuffer( + buffer: Buffer, + dtype: DTypeLike | None = float, + count: SupportsIndex = -1, + offset: SupportsIndex = 0, + *, + like: _SupportsArrayFunc | None = None, +) -> _MaskedArray[Incomplete]: ... + +# keep roughly in sync with `_core.numeric.fromfunction` +def fromfunction( + function: Callable[..., np.ndarray[_ShapeT, _DTypeT]], + shape: Sequence[int], + *, + dtype: DTypeLike | None = float, + like: _SupportsArrayFunc | None = None, + **kwargs: object, +) -> MaskedArray[_ShapeT, _DTypeT]: ... + +# keep roughly in sync with `_core.numeric.identity` +@overload +def identity( + n: int, + dtype: None = None, + *, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> MaskedArray[tuple[int, int], np.dtype[np.float64]]: ... +@overload +def identity( + n: int, + dtype: _DTypeLike[_ScalarT], + *, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> MaskedArray[tuple[int, int], np.dtype[_ScalarT]]: ... +@overload +def identity( + n: int, + dtype: DTypeLike | None = None, + *, + like: _SupportsArrayFunc | None = None, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> MaskedArray[tuple[int, int], np.dtype[Incomplete]]: ... + +# keep roughly in sync with `_core.numeric.indices` +@overload +def indices( + dimensions: Sequence[int], + dtype: type[int] = int, + sparse: Literal[False] = False, + *, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _MaskedArray[np.intp]: ... +@overload +def indices( + dimensions: Sequence[int], + dtype: type[int], + sparse: Literal[True], + *, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> tuple[_MaskedArray[np.intp], ...]: ... +@overload +def indices( + dimensions: Sequence[int], + dtype: type[int] = int, + *, + sparse: Literal[True], + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> tuple[_MaskedArray[np.intp], ...]: ... +@overload +def indices( + dimensions: Sequence[int], + dtype: _DTypeLike[_ScalarT], + sparse: Literal[False] = False, + *, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _MaskedArray[_ScalarT]: ... +@overload +def indices( + dimensions: Sequence[int], + dtype: _DTypeLike[_ScalarT], + sparse: Literal[True], + *, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> tuple[_MaskedArray[_ScalarT], ...]: ... +@overload +def indices( + dimensions: Sequence[int], + dtype: DTypeLike | None = int, + sparse: Literal[False] = False, + *, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _MaskedArray[Incomplete]: ... +@overload +def indices( + dimensions: Sequence[int], + dtype: DTypeLike | None, + sparse: Literal[True], + *, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> tuple[_MaskedArray[Incomplete], ...]: ... +@overload +def indices( + dimensions: Sequence[int], + dtype: DTypeLike | None = int, + *, + sparse: Literal[True], + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> tuple[_MaskedArray[Incomplete], ...]: ... + +# keep roughly in sync with `_core.fromnumeric.squeeze` +@overload +def squeeze( + a: _ArrayLike[_ScalarT], + axis: _ShapeLike | None = None, + *, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _MaskedArray[_ScalarT]: ... +@overload +def squeeze( + a: ArrayLike, + axis: _ShapeLike | None = None, + *, + fill_value: _FillValue | None = None, + hardmask: bool = False, +) -> _MaskedArray[Incomplete]: ... diff --git a/local_packages/numpy/ma/extras.py b/local_packages/numpy/ma/extras.py new file mode 100644 index 0000000..7387d4f --- /dev/null +++ b/local_packages/numpy/ma/extras.py @@ -0,0 +1,2266 @@ +""" +Masked arrays add-ons. + +A collection of utilities for `numpy.ma`. + +:author: Pierre Gerard-Marchant +:contact: pierregm_at_uga_dot_edu + +""" +__all__ = [ + 'apply_along_axis', 'apply_over_axes', 'atleast_1d', 'atleast_2d', + 'atleast_3d', 'average', 'clump_masked', 'clump_unmasked', 'column_stack', + 'compress_cols', 'compress_nd', 'compress_rowcols', 'compress_rows', + 'count_masked', 'corrcoef', 'cov', 'diagflat', 'dot', 'dstack', 'ediff1d', + 'flatnotmasked_contiguous', 'flatnotmasked_edges', 'hsplit', 'hstack', + 'isin', 'in1d', 'intersect1d', 'mask_cols', 'mask_rowcols', 'mask_rows', + 'masked_all', 'masked_all_like', 'median', 'mr_', 'ndenumerate', + 'notmasked_contiguous', 'notmasked_edges', 'polyfit', 'row_stack', + 'setdiff1d', 'setxor1d', 'stack', 'unique', 'union1d', 'vander', 'vstack', + ] + +import functools +import itertools +import warnings + +import numpy as np +from numpy import array as nxarray, ndarray +from numpy.lib._function_base_impl import _ureduce +from numpy.lib._index_tricks_impl import AxisConcatenator +from numpy.lib.array_utils import normalize_axis_index, normalize_axis_tuple + +from . import core as ma +from .core import ( # noqa: F401 + MAError, + MaskedArray, + add, + array, + asarray, + concatenate, + count, + dot, + filled, + get_masked_subclass, + getdata, + getmask, + getmaskarray, + make_mask_descr, + mask_or, + masked, + masked_array, + nomask, + ones, + sort, + zeros, +) + + +def issequence(seq): + """ + Is seq a sequence (ndarray, list or tuple)? + + """ + return isinstance(seq, (ndarray, tuple, list)) + + +def count_masked(arr, axis=None): + """ + Count the number of masked elements along the given axis. + + Parameters + ---------- + arr : array_like + An array with (possibly) masked elements. + axis : int, optional + Axis along which to count. If None (default), a flattened + version of the array is used. + + Returns + ------- + count : int, ndarray + The total number of masked elements (axis=None) or the number + of masked elements along each slice of the given axis. + + See Also + -------- + MaskedArray.count : Count non-masked elements. + + Examples + -------- + >>> import numpy as np + >>> a = np.arange(9).reshape((3,3)) + >>> a = np.ma.array(a) + >>> a[1, 0] = np.ma.masked + >>> a[1, 2] = np.ma.masked + >>> a[2, 1] = np.ma.masked + >>> a + masked_array( + data=[[0, 1, 2], + [--, 4, --], + [6, --, 8]], + mask=[[False, False, False], + [ True, False, True], + [False, True, False]], + fill_value=999999) + >>> np.ma.count_masked(a) + 3 + + When the `axis` keyword is used an array is returned. + + >>> np.ma.count_masked(a, axis=0) + array([1, 1, 1]) + >>> np.ma.count_masked(a, axis=1) + array([0, 2, 1]) + + """ + m = getmaskarray(arr) + return m.sum(axis) + + +def masked_all(shape, dtype=float): + """ + Empty masked array with all elements masked. + + Return an empty masked array of the given shape and dtype, where all the + data are masked. + + Parameters + ---------- + shape : int or tuple of ints + Shape of the required MaskedArray, e.g., ``(2, 3)`` or ``2``. + dtype : dtype, optional + Data type of the output. + + Returns + ------- + a : MaskedArray + A masked array with all data masked. + + See Also + -------- + masked_all_like : Empty masked array modelled on an existing array. + + Notes + ----- + Unlike other masked array creation functions (e.g. `numpy.ma.zeros`, + `numpy.ma.ones`, `numpy.ma.full`), `masked_all` does not initialize the + values of the array, and may therefore be marginally faster. However, + the values stored in the newly allocated array are arbitrary. For + reproducible behavior, be sure to set each element of the array before + reading. + + Examples + -------- + >>> import numpy as np + >>> np.ma.masked_all((3, 3)) + masked_array( + data=[[--, --, --], + [--, --, --], + [--, --, --]], + mask=[[ True, True, True], + [ True, True, True], + [ True, True, True]], + fill_value=1e+20, + dtype=float64) + + The `dtype` parameter defines the underlying data type. + + >>> a = np.ma.masked_all((3, 3)) + >>> a.dtype + dtype('float64') + >>> a = np.ma.masked_all((3, 3), dtype=np.int32) + >>> a.dtype + dtype('int32') + + """ + a = masked_array(np.empty(shape, dtype), + mask=np.ones(shape, make_mask_descr(dtype))) + return a + + +def masked_all_like(arr): + """ + Empty masked array with the properties of an existing array. + + Return an empty masked array of the same shape and dtype as + the array `arr`, where all the data are masked. + + Parameters + ---------- + arr : ndarray + An array describing the shape and dtype of the required MaskedArray. + + Returns + ------- + a : MaskedArray + A masked array with all data masked. + + Raises + ------ + AttributeError + If `arr` doesn't have a shape attribute (i.e. not an ndarray) + + See Also + -------- + masked_all : Empty masked array with all elements masked. + + Notes + ----- + Unlike other masked array creation functions (e.g. `numpy.ma.zeros_like`, + `numpy.ma.ones_like`, `numpy.ma.full_like`), `masked_all_like` does not + initialize the values of the array, and may therefore be marginally + faster. However, the values stored in the newly allocated array are + arbitrary. For reproducible behavior, be sure to set each element of the + array before reading. + + Examples + -------- + >>> import numpy as np + >>> arr = np.zeros((2, 3), dtype=np.float32) + >>> arr + array([[0., 0., 0.], + [0., 0., 0.]], dtype=float32) + >>> np.ma.masked_all_like(arr) + masked_array( + data=[[--, --, --], + [--, --, --]], + mask=[[ True, True, True], + [ True, True, True]], + fill_value=np.float64(1e+20), + dtype=float32) + + The dtype of the masked array matches the dtype of `arr`. + + >>> arr.dtype + dtype('float32') + >>> np.ma.masked_all_like(arr).dtype + dtype('float32') + + """ + a = np.empty_like(arr).view(MaskedArray) + a._mask = np.ones(a.shape, dtype=make_mask_descr(a.dtype)) + return a + + +#####-------------------------------------------------------------------------- +#---- --- Standard functions --- +#####-------------------------------------------------------------------------- + +def _fromnxfunction_function(_fromnxfunction): + """ + Decorator to wrap a "_fromnxfunction" function, wrapping a numpy function as a + masked array function, with proper docstring and name. + + Parameters + ---------- + _fromnxfunction : ({params}) -> ndarray, {params}) -> masked_array + Wrapper function that calls the wrapped numpy function + + Returns + ------- + decorator : (f: ({params}) -> ndarray) -> ({params}) -> masked_array + Function that accepts a numpy function and returns a masked array function + + """ + def decorator(npfunc, /): + def wrapper(*args, **kwargs): + return _fromnxfunction(npfunc, *args, **kwargs) + + functools.update_wrapper(wrapper, npfunc, assigned=("__name__", "__qualname__")) + wrapper.__doc__ = ma.doc_note( + npfunc.__doc__, + "The function is applied to both the ``_data`` and the ``_mask``, if any.", + ) + return wrapper + + return decorator + + +@_fromnxfunction_function +def _fromnxfunction_single(npfunc, a, /, *args, **kwargs): + """ + Wraps a NumPy function that can be called with a single array argument followed by + auxiliary args that are passed verbatim for both the data and mask calls. + """ + return masked_array( + data=npfunc(np.asarray(a), *args, **kwargs), + mask=npfunc(getmaskarray(a), *args, **kwargs), + ) + + +@_fromnxfunction_function +def _fromnxfunction_seq(npfunc, arys, /, *args, **kwargs): + """ + Wraps a NumPy function that can be called with a single sequence of arrays followed + by auxiliary args that are passed verbatim for both the data and mask calls. + """ + return masked_array( + data=npfunc(tuple(np.asarray(a) for a in arys), *args, **kwargs), + mask=npfunc(tuple(getmaskarray(a) for a in arys), *args, **kwargs), + ) + +@_fromnxfunction_function +def _fromnxfunction_allargs(npfunc, /, *arys, **kwargs): + """ + Wraps a NumPy function that can be called with multiple array arguments. + All args are converted to arrays even if they are not so already. + This makes it possible to process scalars as 1-D arrays. + Only keyword arguments are passed through verbatim for the data and mask calls. + Arrays arguments are processed independently and the results are returned in a list. + If only one arg is present, the return value is just the processed array instead of + a list. + """ + out = tuple( + masked_array( + data=npfunc(np.asarray(a), **kwargs), + mask=npfunc(getmaskarray(a), **kwargs), + ) + for a in arys + ) + return out[0] if len(out) == 1 else out + + +atleast_1d = _fromnxfunction_allargs(np.atleast_1d) +atleast_2d = _fromnxfunction_allargs(np.atleast_2d) +atleast_3d = _fromnxfunction_allargs(np.atleast_3d) + +vstack = row_stack = _fromnxfunction_seq(np.vstack) +hstack = _fromnxfunction_seq(np.hstack) +column_stack = _fromnxfunction_seq(np.column_stack) +dstack = _fromnxfunction_seq(np.dstack) +stack = _fromnxfunction_seq(np.stack) + +hsplit = _fromnxfunction_single(np.hsplit) +diagflat = _fromnxfunction_single(np.diagflat) + + +#####-------------------------------------------------------------------------- +#---- +#####-------------------------------------------------------------------------- +def flatten_inplace(seq): + """Flatten a sequence in place.""" + k = 0 + while (k != len(seq)): + while hasattr(seq[k], '__iter__'): + seq[k:(k + 1)] = seq[k] + k += 1 + return seq + + +def apply_along_axis(func1d, axis, arr, *args, **kwargs): + """ + (This docstring should be overwritten) + """ + arr = array(arr, copy=False, subok=True) + nd = arr.ndim + axis = normalize_axis_index(axis, nd) + ind = [0] * (nd - 1) + i = np.zeros(nd, 'O') + indlist = list(range(nd)) + indlist.remove(axis) + i[axis] = slice(None, None) + outshape = np.asarray(arr.shape).take(indlist) + i.put(indlist, ind) + res = func1d(arr[tuple(i.tolist())], *args, **kwargs) + # if res is a number, then we have a smaller output array + asscalar = np.isscalar(res) + if not asscalar: + try: + len(res) + except TypeError: + asscalar = True + # Note: we shouldn't set the dtype of the output from the first result + # so we force the type to object, and build a list of dtypes. We'll + # just take the largest, to avoid some downcasting + dtypes = [] + if asscalar: + dtypes.append(np.asarray(res).dtype) + outarr = zeros(outshape, object) + outarr[tuple(ind)] = res + Ntot = np.prod(outshape) + k = 1 + while k < Ntot: + # increment the index + ind[-1] += 1 + n = -1 + while (ind[n] >= outshape[n]) and (n > (1 - nd)): + ind[n - 1] += 1 + ind[n] = 0 + n -= 1 + i.put(indlist, ind) + res = func1d(arr[tuple(i.tolist())], *args, **kwargs) + outarr[tuple(ind)] = res + dtypes.append(asarray(res).dtype) + k += 1 + else: + res = array(res, copy=False, subok=True) + j = i.copy() + j[axis] = ([slice(None, None)] * res.ndim) + j.put(indlist, ind) + Ntot = np.prod(outshape) + holdshape = outshape + outshape = list(arr.shape) + outshape[axis] = res.shape + dtypes.append(asarray(res).dtype) + outshape = flatten_inplace(outshape) + outarr = zeros(outshape, object) + outarr[tuple(flatten_inplace(j.tolist()))] = res + k = 1 + while k < Ntot: + # increment the index + ind[-1] += 1 + n = -1 + while (ind[n] >= holdshape[n]) and (n > (1 - nd)): + ind[n - 1] += 1 + ind[n] = 0 + n -= 1 + i.put(indlist, ind) + j.put(indlist, ind) + res = func1d(arr[tuple(i.tolist())], *args, **kwargs) + outarr[tuple(flatten_inplace(j.tolist()))] = res + dtypes.append(asarray(res).dtype) + k += 1 + max_dtypes = np.dtype(np.asarray(dtypes).max()) + if not hasattr(arr, '_mask'): + result = np.asarray(outarr, dtype=max_dtypes) + else: + result = asarray(outarr, dtype=max_dtypes) + result.fill_value = ma.default_fill_value(result) + return result + + +apply_along_axis.__doc__ = np.apply_along_axis.__doc__ + + +def apply_over_axes(func, a, axes): + """ + (This docstring will be overwritten) + """ + val = asarray(a) + N = a.ndim + if array(axes).ndim == 0: + axes = (axes,) + for axis in axes: + if axis < 0: + axis = N + axis + args = (val, axis) + res = func(*args) + if res.ndim == val.ndim: + val = res + else: + res = ma.expand_dims(res, axis) + if res.ndim == val.ndim: + val = res + else: + raise ValueError("function is not returning " + "an array of the correct shape") + return val + + +if apply_over_axes.__doc__ is not None: + apply_over_axes.__doc__ = np.apply_over_axes.__doc__[ + :np.apply_over_axes.__doc__.find('Notes')].rstrip() + \ + """ + + Examples + -------- + >>> import numpy as np + >>> a = np.ma.arange(24).reshape(2,3,4) + >>> a[:,0,1] = np.ma.masked + >>> a[:,1,:] = np.ma.masked + >>> a + masked_array( + data=[[[0, --, 2, 3], + [--, --, --, --], + [8, 9, 10, 11]], + [[12, --, 14, 15], + [--, --, --, --], + [20, 21, 22, 23]]], + mask=[[[False, True, False, False], + [ True, True, True, True], + [False, False, False, False]], + [[False, True, False, False], + [ True, True, True, True], + [False, False, False, False]]], + fill_value=999999) + >>> np.ma.apply_over_axes(np.ma.sum, a, [0,2]) + masked_array( + data=[[[46], + [--], + [124]]], + mask=[[[False], + [ True], + [False]]], + fill_value=999999) + + Tuple axis arguments to ufuncs are equivalent: + + >>> np.ma.sum(a, axis=(0,2)).reshape((1,-1,1)) + masked_array( + data=[[[46], + [--], + [124]]], + mask=[[[False], + [ True], + [False]]], + fill_value=999999) + """ + + +def average(a, axis=None, weights=None, returned=False, *, + keepdims=np._NoValue): + """ + Return the weighted average of array over the given axis. + + Parameters + ---------- + a : array_like + Data to be averaged. + Masked entries are not taken into account in the computation. + axis : None or int or tuple of ints, optional + Axis or axes along which to average `a`. The default, + `axis=None`, will average over all of the elements of the input array. + If axis is a tuple of ints, averaging is performed on all of the axes + specified in the tuple instead of a single axis or all the axes as + before. + weights : array_like, optional + An array of weights associated with the values in `a`. Each value in + `a` contributes to the average according to its associated weight. + The array of weights must be the same shape as `a` if no axis is + specified, otherwise the weights must have dimensions and shape + consistent with `a` along the specified axis. + If `weights=None`, then all data in `a` are assumed to have a + weight equal to one. + The calculation is:: + + avg = sum(a * weights) / sum(weights) + + where the sum is over all included elements. + The only constraint on the values of `weights` is that `sum(weights)` + must not be 0. + returned : bool, optional + Flag indicating whether a tuple ``(result, sum of weights)`` + should be returned as output (True), or just the result (False). + Default is False. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `a`. + *Note:* `keepdims` will not work with instances of `numpy.matrix` + or other classes whose methods do not support `keepdims`. + + .. versionadded:: 1.23.0 + + Returns + ------- + average, [sum_of_weights] : (tuple of) scalar or MaskedArray + The average along the specified axis. When returned is `True`, + return a tuple with the average as the first element and the sum + of the weights as the second element. The return type is `np.float64` + if `a` is of integer type and floats smaller than `float64`, or the + input data-type, otherwise. If returned, `sum_of_weights` is always + `float64`. + + Raises + ------ + ZeroDivisionError + When all weights along axis are zero. See `numpy.ma.average` for a + version robust to this type of error. + TypeError + When `weights` does not have the same shape as `a`, and `axis=None`. + ValueError + When `weights` does not have dimensions and shape consistent with `a` + along specified `axis`. + + Examples + -------- + >>> import numpy as np + >>> a = np.ma.array([1., 2., 3., 4.], mask=[False, False, True, True]) + >>> np.ma.average(a, weights=[3, 1, 0, 0]) + 1.25 + + >>> x = np.ma.arange(6.).reshape(3, 2) + >>> x + masked_array( + data=[[0., 1.], + [2., 3.], + [4., 5.]], + mask=False, + fill_value=1e+20) + >>> data = np.arange(8).reshape((2, 2, 2)) + >>> data + array([[[0, 1], + [2, 3]], + [[4, 5], + [6, 7]]]) + >>> np.ma.average(data, axis=(0, 1), weights=[[1./4, 3./4], [1., 1./2]]) + masked_array(data=[3.4, 4.4], + mask=[False, False], + fill_value=1e+20) + >>> np.ma.average(data, axis=0, weights=[[1./4, 3./4], [1., 1./2]]) + Traceback (most recent call last): + ... + ValueError: Shape of weights must be consistent + with shape of a along specified axis. + + >>> avg, sumweights = np.ma.average(x, axis=0, weights=[1, 2, 3], + ... returned=True) + >>> avg + masked_array(data=[2.6666666666666665, 3.6666666666666665], + mask=[False, False], + fill_value=1e+20) + + With ``keepdims=True``, the following result has shape (3, 1). + + >>> np.ma.average(x, axis=1, keepdims=True) + masked_array( + data=[[0.5], + [2.5], + [4.5]], + mask=False, + fill_value=1e+20) + """ + a = asarray(a) + m = getmask(a) + + if axis is not None: + axis = normalize_axis_tuple(axis, a.ndim, argname="axis") + + if keepdims is np._NoValue: + # Don't pass on the keepdims argument if one wasn't given. + keepdims_kw = {} + else: + keepdims_kw = {'keepdims': keepdims} + + if weights is None: + avg = a.mean(axis, **keepdims_kw) + scl = avg.dtype.type(a.count(axis)) + else: + wgt = asarray(weights) + + if issubclass(a.dtype.type, (np.integer, np.bool)): + result_dtype = np.result_type(a.dtype, wgt.dtype, 'f8') + else: + result_dtype = np.result_type(a.dtype, wgt.dtype) + + # Sanity checks + if a.shape != wgt.shape: + if axis is None: + raise TypeError( + "Axis must be specified when shapes of a and weights " + "differ.") + if wgt.shape != tuple(a.shape[ax] for ax in axis): + raise ValueError( + "Shape of weights must be consistent with " + "shape of a along specified axis.") + + # setup wgt to broadcast along axis + wgt = wgt.transpose(np.argsort(axis)) + wgt = wgt.reshape(tuple((s if ax in axis else 1) + for ax, s in enumerate(a.shape))) + + if m is not nomask: + wgt = wgt * (~a.mask) + wgt.mask |= a.mask + + scl = wgt.sum(axis=axis, dtype=result_dtype, **keepdims_kw) + avg = np.multiply(a, wgt, + dtype=result_dtype).sum(axis, **keepdims_kw) / scl + + if returned: + if scl.shape != avg.shape: + scl = np.broadcast_to(scl, avg.shape).copy() + return avg, scl + else: + return avg + + +def median(a, axis=None, out=None, overwrite_input=False, keepdims=False): + """ + Compute the median along the specified axis. + + Returns the median of the array elements. + + Parameters + ---------- + a : array_like + Input array or object that can be converted to an array. + axis : int, optional + Axis along which the medians are computed. The default (None) is + to compute the median along a flattened version of the array. + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output + but the type will be cast if necessary. + overwrite_input : bool, optional + If True, then allow use of memory of input array (a) for + calculations. The input array will be modified by the call to + median. This will save memory when you do not need to preserve + the contents of the input array. Treat the input as undefined, + but it will probably be fully or partially sorted. Default is + False. Note that, if `overwrite_input` is True, and the input + is not already an `ndarray`, an error will be raised. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the input array. + + Returns + ------- + median : ndarray + A new array holding the result is returned unless out is + specified, in which case a reference to out is returned. + Return data-type is `float64` for integers and floats smaller than + `float64`, or the input data-type, otherwise. + + See Also + -------- + mean + + Notes + ----- + Given a vector ``V`` with ``N`` non masked values, the median of ``V`` + is the middle value of a sorted copy of ``V`` (``Vs``) - i.e. + ``Vs[(N-1)/2]``, when ``N`` is odd, or ``{Vs[N/2 - 1] + Vs[N/2]}/2`` + when ``N`` is even. + + Examples + -------- + >>> import numpy as np + >>> x = np.ma.array(np.arange(8), mask=[0]*4 + [1]*4) + >>> np.ma.median(x) + 1.5 + + >>> x = np.ma.array(np.arange(10).reshape(2, 5), mask=[0]*6 + [1]*4) + >>> np.ma.median(x) + 2.5 + >>> np.ma.median(x, axis=-1, overwrite_input=True) + masked_array(data=[2.0, 5.0], + mask=[False, False], + fill_value=1e+20) + + """ + if not hasattr(a, 'mask'): + m = np.median(getdata(a, subok=True), axis=axis, + out=out, overwrite_input=overwrite_input, + keepdims=keepdims) + if isinstance(m, np.ndarray) and 1 <= m.ndim: + return masked_array(m, copy=False) + else: + return m + + return _ureduce(a, func=_median, keepdims=keepdims, axis=axis, out=out, + overwrite_input=overwrite_input) + + +def _median(a, axis=None, out=None, overwrite_input=False): + # when an unmasked NaN is present return it, so we need to sort the NaN + # values behind the mask + if np.issubdtype(a.dtype, np.inexact): + fill_value = np.inf + else: + fill_value = None + if overwrite_input: + if axis is None: + asorted = a.ravel() + asorted.sort(fill_value=fill_value) + else: + a.sort(axis=axis, fill_value=fill_value) + asorted = a + else: + asorted = sort(a, axis=axis, fill_value=fill_value) + + if axis is None: + axis = 0 + else: + axis = normalize_axis_index(axis, asorted.ndim) + + if asorted.shape[axis] == 0: + # for empty axis integer indices fail so use slicing to get same result + # as median (which is mean of empty slice = nan) + indexer = [slice(None)] * asorted.ndim + indexer[axis] = slice(0, 0) + indexer = tuple(indexer) + return np.ma.mean(asorted[indexer], axis=axis, out=out) + + if asorted.ndim == 1: + idx, odd = divmod(count(asorted), 2) + mid = asorted[idx + odd - 1:idx + 1] + if np.issubdtype(asorted.dtype, np.inexact) and asorted.size > 0: + # avoid inf / x = masked + s = mid.sum(out=out) + if not odd: + s = np.true_divide(s, 2., casting='safe', out=out) + s = np.lib._utils_impl._median_nancheck(asorted, s, axis) + else: + s = mid.mean(out=out) + + # if result is masked either the input contained enough + # minimum_fill_value so that it would be the median or all values + # masked + if np.ma.is_masked(s) and not np.all(asorted.mask): + return np.ma.minimum_fill_value(asorted) + return s + + counts = count(asorted, axis=axis, keepdims=True) + h = counts // 2 + + # duplicate high if odd number of elements so mean does nothing + odd = counts % 2 == 1 + l = np.where(odd, h, h - 1) + + lh = np.concatenate([l, h], axis=axis) + + # get low and high median + low_high = np.take_along_axis(asorted, lh, axis=axis) + + def replace_masked(s): + # Replace masked entries with minimum_full_value unless it all values + # are masked. This is required as the sort order of values equal or + # larger than the fill value is undefined and a valid value placed + # elsewhere, e.g. [4, --, inf]. + if np.ma.is_masked(s): + rep = (~np.all(asorted.mask, axis=axis, keepdims=True)) & s.mask + s.data[rep] = np.ma.minimum_fill_value(asorted) + s.mask[rep] = False + + replace_masked(low_high) + + if np.issubdtype(asorted.dtype, np.inexact): + # avoid inf / x = masked + s = np.ma.sum(low_high, axis=axis, out=out) + np.true_divide(s.data, 2., casting='unsafe', out=s.data) + + s = np.lib._utils_impl._median_nancheck(asorted, s, axis) + else: + s = np.ma.mean(low_high, axis=axis, out=out) + + return s + + +def compress_nd(x, axis=None): + """Suppress slices from multiple dimensions which contain masked values. + + Parameters + ---------- + x : array_like, MaskedArray + The array to operate on. If not a MaskedArray instance (or if no array + elements are masked), `x` is interpreted as a MaskedArray with `mask` + set to `nomask`. + axis : tuple of ints or int, optional + Which dimensions to suppress slices from can be configured with this + parameter. + - If axis is a tuple of ints, those are the axes to suppress slices from. + - If axis is an int, then that is the only axis to suppress slices from. + - If axis is None, all axis are selected. + + Returns + ------- + compress_array : ndarray + The compressed array. + + Examples + -------- + >>> import numpy as np + >>> arr = [[1, 2], [3, 4]] + >>> mask = [[0, 1], [0, 0]] + >>> x = np.ma.array(arr, mask=mask) + >>> np.ma.compress_nd(x, axis=0) + array([[3, 4]]) + >>> np.ma.compress_nd(x, axis=1) + array([[1], + [3]]) + >>> np.ma.compress_nd(x) + array([[3]]) + + """ + x = asarray(x) + m = getmask(x) + # Set axis to tuple of ints + if axis is None: + axis = tuple(range(x.ndim)) + else: + axis = normalize_axis_tuple(axis, x.ndim) + + # Nothing is masked: return x + if m is nomask or not m.any(): + return x._data + # All is masked: return empty + if m.all(): + return nxarray([]) + # Filter elements through boolean indexing + data = x._data + for ax in axis: + axes = tuple(list(range(ax)) + list(range(ax + 1, x.ndim))) + data = data[(slice(None),) * ax + (~m.any(axis=axes),)] + return data + + +def compress_rowcols(x, axis=None): + """ + Suppress the rows and/or columns of a 2-D array that contain + masked values. + + The suppression behavior is selected with the `axis` parameter. + + - If axis is None, both rows and columns are suppressed. + - If axis is 0, only rows are suppressed. + - If axis is 1 or -1, only columns are suppressed. + + Parameters + ---------- + x : array_like, MaskedArray + The array to operate on. If not a MaskedArray instance (or if no array + elements are masked), `x` is interpreted as a MaskedArray with + `mask` set to `nomask`. Must be a 2D array. + axis : int, optional + Axis along which to perform the operation. Default is None. + + Returns + ------- + compressed_array : ndarray + The compressed array. + + Examples + -------- + >>> import numpy as np + >>> x = np.ma.array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0], + ... [1, 0, 0], + ... [0, 0, 0]]) + >>> x + masked_array( + data=[[--, 1, 2], + [--, 4, 5], + [6, 7, 8]], + mask=[[ True, False, False], + [ True, False, False], + [False, False, False]], + fill_value=999999) + + >>> np.ma.compress_rowcols(x) + array([[7, 8]]) + >>> np.ma.compress_rowcols(x, 0) + array([[6, 7, 8]]) + >>> np.ma.compress_rowcols(x, 1) + array([[1, 2], + [4, 5], + [7, 8]]) + + """ + if asarray(x).ndim != 2: + raise NotImplementedError("compress_rowcols works for 2D arrays only.") + return compress_nd(x, axis=axis) + + +def compress_rows(a): + """ + Suppress whole rows of a 2-D array that contain masked values. + + This is equivalent to ``np.ma.compress_rowcols(a, 0)``, see + `compress_rowcols` for details. + + Parameters + ---------- + x : array_like, MaskedArray + The array to operate on. If not a MaskedArray instance (or if no array + elements are masked), `x` is interpreted as a MaskedArray with + `mask` set to `nomask`. Must be a 2D array. + + Returns + ------- + compressed_array : ndarray + The compressed array. + + See Also + -------- + compress_rowcols + + Examples + -------- + >>> import numpy as np + >>> a = np.ma.array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0], + ... [1, 0, 0], + ... [0, 0, 0]]) + >>> np.ma.compress_rows(a) + array([[6, 7, 8]]) + + """ + a = asarray(a) + if a.ndim != 2: + raise NotImplementedError("compress_rows works for 2D arrays only.") + return compress_rowcols(a, 0) + + +def compress_cols(a): + """ + Suppress whole columns of a 2-D array that contain masked values. + + This is equivalent to ``np.ma.compress_rowcols(a, 1)``, see + `compress_rowcols` for details. + + Parameters + ---------- + x : array_like, MaskedArray + The array to operate on. If not a MaskedArray instance (or if no array + elements are masked), `x` is interpreted as a MaskedArray with + `mask` set to `nomask`. Must be a 2D array. + + Returns + ------- + compressed_array : ndarray + The compressed array. + + See Also + -------- + compress_rowcols + + Examples + -------- + >>> import numpy as np + >>> a = np.ma.array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0], + ... [1, 0, 0], + ... [0, 0, 0]]) + >>> np.ma.compress_cols(a) + array([[1, 2], + [4, 5], + [7, 8]]) + + """ + a = asarray(a) + if a.ndim != 2: + raise NotImplementedError("compress_cols works for 2D arrays only.") + return compress_rowcols(a, 1) + + +def mask_rowcols(a, axis=None): + """ + Mask rows and/or columns of a 2D array that contain masked values. + + Mask whole rows and/or columns of a 2D array that contain + masked values. The masking behavior is selected using the + `axis` parameter. + + - If `axis` is None, rows *and* columns are masked. + - If `axis` is 0, only rows are masked. + - If `axis` is 1 or -1, only columns are masked. + + Parameters + ---------- + a : array_like, MaskedArray + The array to mask. If not a MaskedArray instance (or if no array + elements are masked), the result is a MaskedArray with `mask` set + to `nomask` (False). Must be a 2D array. + axis : int, optional + Axis along which to perform the operation. If None, applies to a + flattened version of the array. + + Returns + ------- + a : MaskedArray + A modified version of the input array, masked depending on the value + of the `axis` parameter. + + Raises + ------ + NotImplementedError + If input array `a` is not 2D. + + See Also + -------- + mask_rows : Mask rows of a 2D array that contain masked values. + mask_cols : Mask cols of a 2D array that contain masked values. + masked_where : Mask where a condition is met. + + Notes + ----- + The input array's mask is modified by this function. + + Examples + -------- + >>> import numpy as np + >>> a = np.zeros((3, 3), dtype=int) + >>> a[1, 1] = 1 + >>> a + array([[0, 0, 0], + [0, 1, 0], + [0, 0, 0]]) + >>> a = np.ma.masked_equal(a, 1) + >>> a + masked_array( + data=[[0, 0, 0], + [0, --, 0], + [0, 0, 0]], + mask=[[False, False, False], + [False, True, False], + [False, False, False]], + fill_value=1) + >>> np.ma.mask_rowcols(a) + masked_array( + data=[[0, --, 0], + [--, --, --], + [0, --, 0]], + mask=[[False, True, False], + [ True, True, True], + [False, True, False]], + fill_value=1) + + """ + a = array(a, subok=False) + if a.ndim != 2: + raise NotImplementedError("mask_rowcols works for 2D arrays only.") + m = getmask(a) + # Nothing is masked: return a + if m is nomask or not m.any(): + return a + maskedval = m.nonzero() + a._mask = a._mask.copy() + if not axis: + a[np.unique(maskedval[0])] = masked + if axis in [None, 1, -1]: + a[:, np.unique(maskedval[1])] = masked + return a + + +def mask_rows(a, axis=np._NoValue): + """ + Mask rows of a 2D array that contain masked values. + + This function is a shortcut to ``mask_rowcols`` with `axis` equal to 0. + + See Also + -------- + mask_rowcols : Mask rows and/or columns of a 2D array. + masked_where : Mask where a condition is met. + + Examples + -------- + >>> import numpy as np + >>> a = np.zeros((3, 3), dtype=int) + >>> a[1, 1] = 1 + >>> a + array([[0, 0, 0], + [0, 1, 0], + [0, 0, 0]]) + >>> a = np.ma.masked_equal(a, 1) + >>> a + masked_array( + data=[[0, 0, 0], + [0, --, 0], + [0, 0, 0]], + mask=[[False, False, False], + [False, True, False], + [False, False, False]], + fill_value=1) + + >>> np.ma.mask_rows(a) + masked_array( + data=[[0, 0, 0], + [--, --, --], + [0, 0, 0]], + mask=[[False, False, False], + [ True, True, True], + [False, False, False]], + fill_value=1) + + """ + if axis is not np._NoValue: + # remove the axis argument when this deprecation expires + # NumPy 1.18.0, 2019-11-28 + warnings.warn( + "The axis argument has always been ignored, in future passing it " + "will raise TypeError", DeprecationWarning, stacklevel=2) + return mask_rowcols(a, 0) + + +def mask_cols(a, axis=np._NoValue): + """ + Mask columns of a 2D array that contain masked values. + + This function is a shortcut to ``mask_rowcols`` with `axis` equal to 1. + + See Also + -------- + mask_rowcols : Mask rows and/or columns of a 2D array. + masked_where : Mask where a condition is met. + + Examples + -------- + >>> import numpy as np + >>> a = np.zeros((3, 3), dtype=int) + >>> a[1, 1] = 1 + >>> a + array([[0, 0, 0], + [0, 1, 0], + [0, 0, 0]]) + >>> a = np.ma.masked_equal(a, 1) + >>> a + masked_array( + data=[[0, 0, 0], + [0, --, 0], + [0, 0, 0]], + mask=[[False, False, False], + [False, True, False], + [False, False, False]], + fill_value=1) + >>> np.ma.mask_cols(a) + masked_array( + data=[[0, --, 0], + [0, --, 0], + [0, --, 0]], + mask=[[False, True, False], + [False, True, False], + [False, True, False]], + fill_value=1) + + """ + if axis is not np._NoValue: + # remove the axis argument when this deprecation expires + # NumPy 1.18.0, 2019-11-28 + warnings.warn( + "The axis argument has always been ignored, in future passing it " + "will raise TypeError", DeprecationWarning, stacklevel=2) + return mask_rowcols(a, 1) + + +#####-------------------------------------------------------------------------- +#---- --- arraysetops --- +#####-------------------------------------------------------------------------- + +def ediff1d(arr, to_end=None, to_begin=None): + """ + Compute the differences between consecutive elements of an array. + + This function is the equivalent of `numpy.ediff1d` that takes masked + values into account, see `numpy.ediff1d` for details. + + See Also + -------- + numpy.ediff1d : Equivalent function for ndarrays. + + Examples + -------- + >>> import numpy as np + >>> arr = np.ma.array([1, 2, 4, 7, 0]) + >>> np.ma.ediff1d(arr) + masked_array(data=[ 1, 2, 3, -7], + mask=False, + fill_value=999999) + + """ + arr = ma.asanyarray(arr).flat + ed = arr[1:] - arr[:-1] + arrays = [ed] + # + if to_begin is not None: + arrays.insert(0, to_begin) + if to_end is not None: + arrays.append(to_end) + # + if len(arrays) != 1: + # We'll save ourselves a copy of a potentially large array in the common + # case where neither to_begin or to_end was given. + ed = hstack(arrays) + # + return ed + + +def unique(ar1, return_index=False, return_inverse=False): + """ + Finds the unique elements of an array. + + Masked values are considered the same element (masked). The output array + is always a masked array. See `numpy.unique` for more details. + + See Also + -------- + numpy.unique : Equivalent function for ndarrays. + + Examples + -------- + >>> import numpy as np + >>> a = [1, 2, 1000, 2, 3] + >>> mask = [0, 0, 1, 0, 0] + >>> masked_a = np.ma.masked_array(a, mask) + >>> masked_a + masked_array(data=[1, 2, --, 2, 3], + mask=[False, False, True, False, False], + fill_value=999999) + >>> np.ma.unique(masked_a) + masked_array(data=[1, 2, 3, --], + mask=[False, False, False, True], + fill_value=999999) + >>> np.ma.unique(masked_a, return_index=True) + (masked_array(data=[1, 2, 3, --], + mask=[False, False, False, True], + fill_value=999999), array([0, 1, 4, 2])) + >>> np.ma.unique(masked_a, return_inverse=True) + (masked_array(data=[1, 2, 3, --], + mask=[False, False, False, True], + fill_value=999999), array([0, 1, 3, 1, 2])) + >>> np.ma.unique(masked_a, return_index=True, return_inverse=True) + (masked_array(data=[1, 2, 3, --], + mask=[False, False, False, True], + fill_value=999999), array([0, 1, 4, 2]), array([0, 1, 3, 1, 2])) + """ + output = np.unique(ar1, + return_index=return_index, + return_inverse=return_inverse) + if isinstance(output, tuple): + output = list(output) + output[0] = output[0].view(MaskedArray) + output = tuple(output) + else: + output = output.view(MaskedArray) + return output + + +def intersect1d(ar1, ar2, assume_unique=False): + """ + Returns the unique elements common to both arrays. + + Masked values are considered equal one to the other. + The output is always a masked array. + + See `numpy.intersect1d` for more details. + + See Also + -------- + numpy.intersect1d : Equivalent function for ndarrays. + + Examples + -------- + >>> import numpy as np + >>> x = np.ma.array([1, 3, 3, 3], mask=[0, 0, 0, 1]) + >>> y = np.ma.array([3, 1, 1, 1], mask=[0, 0, 0, 1]) + >>> np.ma.intersect1d(x, y) + masked_array(data=[1, 3, --], + mask=[False, False, True], + fill_value=999999) + + """ + if assume_unique: + aux = ma.concatenate((ar1, ar2)) + else: + # Might be faster than unique( intersect1d( ar1, ar2 ) )? + aux = ma.concatenate((unique(ar1), unique(ar2))) + aux.sort() + return aux[:-1][aux[1:] == aux[:-1]] + + +def setxor1d(ar1, ar2, assume_unique=False): + """ + Set exclusive-or of 1-D arrays with unique elements. + + The output is always a masked array. See `numpy.setxor1d` for more details. + + See Also + -------- + numpy.setxor1d : Equivalent function for ndarrays. + + Examples + -------- + >>> import numpy as np + >>> ar1 = np.ma.array([1, 2, 3, 2, 4]) + >>> ar2 = np.ma.array([2, 3, 5, 7, 5]) + >>> np.ma.setxor1d(ar1, ar2) + masked_array(data=[1, 4, 5, 7], + mask=False, + fill_value=999999) + + """ + if not assume_unique: + ar1 = unique(ar1) + ar2 = unique(ar2) + + aux = ma.concatenate((ar1, ar2), axis=None) + if aux.size == 0: + return aux + aux.sort() + auxf = aux.filled() +# flag = ediff1d( aux, to_end = 1, to_begin = 1 ) == 0 + flag = ma.concatenate(([True], (auxf[1:] != auxf[:-1]), [True])) +# flag2 = ediff1d( flag ) == 0 + flag2 = (flag[1:] == flag[:-1]) + return aux[flag2] + + +def in1d(ar1, ar2, assume_unique=False, invert=False): + """ + Test whether each element of an array is also present in a second + array. + + The output is always a masked array. + + We recommend using :func:`isin` instead of `in1d` for new code. + + See Also + -------- + isin : Version of this function that preserves the shape of ar1. + + Examples + -------- + >>> import numpy as np + >>> ar1 = np.ma.array([0, 1, 2, 5, 0]) + >>> ar2 = [0, 2] + >>> np.ma.in1d(ar1, ar2) + masked_array(data=[ True, False, True, False, True], + mask=False, + fill_value=True) + + """ + if not assume_unique: + ar1, rev_idx = unique(ar1, return_inverse=True) + ar2 = unique(ar2) + + ar = ma.concatenate((ar1, ar2)) + # We need this to be a stable sort, so always use 'mergesort' + # here. The values from the first array should always come before + # the values from the second array. + order = ar.argsort(kind='mergesort') + sar = ar[order] + if invert: + bool_ar = (sar[1:] != sar[:-1]) + else: + bool_ar = (sar[1:] == sar[:-1]) + flag = ma.concatenate((bool_ar, [invert])) + indx = order.argsort(kind='mergesort')[:len(ar1)] + + if assume_unique: + return flag[indx] + else: + return flag[indx][rev_idx] + + +def isin(element, test_elements, assume_unique=False, invert=False): + """ + Calculates `element in test_elements`, broadcasting over + `element` only. + + The output is always a masked array of the same shape as `element`. + See `numpy.isin` for more details. + + See Also + -------- + in1d : Flattened version of this function. + numpy.isin : Equivalent function for ndarrays. + + Examples + -------- + >>> import numpy as np + >>> element = np.ma.array([1, 2, 3, 4, 5, 6]) + >>> test_elements = [0, 2] + >>> np.ma.isin(element, test_elements) + masked_array(data=[False, True, False, False, False, False], + mask=False, + fill_value=True) + + """ + element = ma.asarray(element) + return in1d(element, test_elements, assume_unique=assume_unique, + invert=invert).reshape(element.shape) + + +def union1d(ar1, ar2): + """ + Union of two arrays. + + The output is always a masked array. See `numpy.union1d` for more details. + + See Also + -------- + numpy.union1d : Equivalent function for ndarrays. + + Examples + -------- + >>> import numpy as np + >>> ar1 = np.ma.array([1, 2, 3, 4]) + >>> ar2 = np.ma.array([3, 4, 5, 6]) + >>> np.ma.union1d(ar1, ar2) + masked_array(data=[1, 2, 3, 4, 5, 6], + mask=False, + fill_value=999999) + + """ + return unique(ma.concatenate((ar1, ar2), axis=None)) + + +def setdiff1d(ar1, ar2, assume_unique=False): + """ + Set difference of 1D arrays with unique elements. + + The output is always a masked array. See `numpy.setdiff1d` for more + details. + + See Also + -------- + numpy.setdiff1d : Equivalent function for ndarrays. + + Examples + -------- + >>> import numpy as np + >>> x = np.ma.array([1, 2, 3, 4], mask=[0, 1, 0, 1]) + >>> np.ma.setdiff1d(x, [1, 2]) + masked_array(data=[3, --], + mask=[False, True], + fill_value=999999) + + """ + if assume_unique: + ar1 = ma.asarray(ar1).ravel() + else: + ar1 = unique(ar1) + ar2 = unique(ar2) + return ar1[in1d(ar1, ar2, assume_unique=True, invert=True)] + + +############################################################################### +# Covariance # +############################################################################### + + +def _covhelper(x, y=None, rowvar=True, allow_masked=True): + """ + Private function for the computation of covariance and correlation + coefficients. + + """ + x = ma.array(x, ndmin=2, copy=True, dtype=float) + xmask = ma.getmaskarray(x) + # Quick exit if we can't process masked data + if not allow_masked and xmask.any(): + raise ValueError("Cannot process masked data.") + # + if x.shape[0] == 1: + rowvar = True + # Make sure that rowvar is either 0 or 1 + rowvar = int(bool(rowvar)) + axis = 1 - rowvar + if rowvar: + tup = (slice(None), None) + else: + tup = (None, slice(None)) + # + if y is None: + # Check if we can guarantee that the integers in the (N - ddof) + # normalisation can be accurately represented with single-precision + # before computing the dot product. + if x.shape[0] > 2 ** 24 or x.shape[1] > 2 ** 24: + xnm_dtype = np.float64 + else: + xnm_dtype = np.float32 + xnotmask = np.logical_not(xmask).astype(xnm_dtype) + else: + y = array(y, copy=False, ndmin=2, dtype=float) + ymask = ma.getmaskarray(y) + if not allow_masked and ymask.any(): + raise ValueError("Cannot process masked data.") + if xmask.any() or ymask.any(): + if y.shape == x.shape: + # Define some common mask + common_mask = np.logical_or(xmask, ymask) + if common_mask is not nomask: + xmask = x._mask = y._mask = ymask = common_mask + x._sharedmask = False + y._sharedmask = False + x = ma.concatenate((x, y), axis) + # Check if we can guarantee that the integers in the (N - ddof) + # normalisation can be accurately represented with single-precision + # before computing the dot product. + if x.shape[0] > 2 ** 24 or x.shape[1] > 2 ** 24: + xnm_dtype = np.float64 + else: + xnm_dtype = np.float32 + xnotmask = np.logical_not(np.concatenate((xmask, ymask), axis)).astype( + xnm_dtype + ) + x -= x.mean(axis=rowvar)[tup] + return (x, xnotmask, rowvar) + + +def cov(x, y=None, rowvar=True, bias=False, allow_masked=True, ddof=None): + """ + Estimate the covariance matrix. + + Except for the handling of missing data this function does the same as + `numpy.cov`. For more details and examples, see `numpy.cov`. + + By default, masked values are recognized as such. If `x` and `y` have the + same shape, a common mask is allocated: if ``x[i,j]`` is masked, then + ``y[i,j]`` will also be masked. + Setting `allow_masked` to False will raise an exception if values are + missing in either of the input arrays. + + Parameters + ---------- + x : array_like + A 1-D or 2-D array containing multiple variables and observations. + Each row of `x` represents a variable, and each column a single + observation of all those variables. Also see `rowvar` below. + y : array_like, optional + An additional set of variables and observations. `y` has the same + shape as `x`. + rowvar : bool, optional + If `rowvar` is True (default), then each row represents a + variable, with observations in the columns. Otherwise, the relationship + is transposed: each column represents a variable, while the rows + contain observations. + bias : bool, optional + Default normalization (False) is by ``(N-1)``, where ``N`` is the + number of observations given (unbiased estimate). If `bias` is True, + then normalization is by ``N``. This keyword can be overridden by + the keyword ``ddof`` in numpy versions >= 1.5. + allow_masked : bool, optional + If True, masked values are propagated pair-wise: if a value is masked + in `x`, the corresponding value is masked in `y`. + If False, raises a `ValueError` exception when some values are missing. + ddof : {None, int}, optional + If not ``None`` normalization is by ``(N - ddof)``, where ``N`` is + the number of observations; this overrides the value implied by + ``bias``. The default value is ``None``. + + Raises + ------ + ValueError + Raised if some values are missing and `allow_masked` is False. + + See Also + -------- + numpy.cov + + Examples + -------- + >>> import numpy as np + >>> x = np.ma.array([[0, 1], [1, 1]], mask=[0, 1, 0, 1]) + >>> y = np.ma.array([[1, 0], [0, 1]], mask=[0, 0, 1, 1]) + >>> np.ma.cov(x, y) + masked_array( + data=[[--, --, --, --], + [--, --, --, --], + [--, --, --, --], + [--, --, --, --]], + mask=[[ True, True, True, True], + [ True, True, True, True], + [ True, True, True, True], + [ True, True, True, True]], + fill_value=1e+20, + dtype=float64) + + """ + # Check inputs + if ddof is not None and ddof != int(ddof): + raise ValueError("ddof must be an integer") + # Set up ddof + if ddof is None: + if bias: + ddof = 0 + else: + ddof = 1 + + (x, xnotmask, rowvar) = _covhelper(x, y, rowvar, allow_masked) + if not rowvar: + fact = np.dot(xnotmask.T, xnotmask) - ddof + mask = np.less_equal(fact, 0, dtype=bool) + with np.errstate(divide="ignore", invalid="ignore"): + data = np.dot(filled(x.T, 0), filled(x.conj(), 0)) / fact + result = ma.array(data, mask=mask).squeeze() + else: + fact = np.dot(xnotmask, xnotmask.T) - ddof + mask = np.less_equal(fact, 0, dtype=bool) + with np.errstate(divide="ignore", invalid="ignore"): + data = np.dot(filled(x, 0), filled(x.T.conj(), 0)) / fact + result = ma.array(data, mask=mask).squeeze() + return result + + +def corrcoef(x, y=None, rowvar=True, allow_masked=True, + ): + """ + Return Pearson product-moment correlation coefficients. + + Except for the handling of missing data this function does the same as + `numpy.corrcoef`. For more details and examples, see `numpy.corrcoef`. + + Parameters + ---------- + x : array_like + A 1-D or 2-D array containing multiple variables and observations. + Each row of `x` represents a variable, and each column a single + observation of all those variables. Also see `rowvar` below. + y : array_like, optional + An additional set of variables and observations. `y` has the same + shape as `x`. + rowvar : bool, optional + If `rowvar` is True (default), then each row represents a + variable, with observations in the columns. Otherwise, the relationship + is transposed: each column represents a variable, while the rows + contain observations. + allow_masked : bool, optional + If True, masked values are propagated pair-wise: if a value is masked + in `x`, the corresponding value is masked in `y`. + If False, raises an exception. Because `bias` is deprecated, this + argument needs to be treated as keyword only to avoid a warning. + + See Also + -------- + numpy.corrcoef : Equivalent function in top-level NumPy module. + cov : Estimate the covariance matrix. + + Examples + -------- + >>> import numpy as np + >>> x = np.ma.array([[0, 1], [1, 1]], mask=[0, 1, 0, 1]) + >>> np.ma.corrcoef(x) + masked_array( + data=[[--, --], + [--, --]], + mask=[[ True, True], + [ True, True]], + fill_value=1e+20, + dtype=float64) + + """ + # Estimate the covariance matrix. + corr = cov(x, y, rowvar, allow_masked=allow_masked) + # The non-masked version returns a masked value for a scalar. + try: + std = ma.sqrt(ma.diagonal(corr)) + except ValueError: + return ma.MaskedConstant() + corr /= ma.multiply.outer(std, std) + return corr + +#####-------------------------------------------------------------------------- +#---- --- Concatenation helpers --- +#####-------------------------------------------------------------------------- + +class MAxisConcatenator(AxisConcatenator): + """ + Translate slice objects to concatenation along an axis. + + For documentation on usage, see `mr_class`. + + See Also + -------- + mr_class + + """ + __slots__ = () + + concatenate = staticmethod(concatenate) + + @classmethod + def makemat(cls, arr): + # There used to be a view as np.matrix here, but we may eventually + # deprecate that class. In preparation, we use the unmasked version + # to construct the matrix (with copy=False for backwards compatibility + # with the .view) + data = super().makemat(arr.data, copy=False) + return array(data, mask=arr.mask) + + def __getitem__(self, key): + # matrix builder syntax, like 'a, b; c, d' + if isinstance(key, str): + raise MAError("Unavailable for masked array.") + + return super().__getitem__(key) + + +class mr_class(MAxisConcatenator): + """ + Translate slice objects to concatenation along the first axis. + + This is the masked array version of `r_`. + + See Also + -------- + r_ + + Examples + -------- + >>> import numpy as np + >>> np.ma.mr_[np.ma.array([1,2,3]), 0, 0, np.ma.array([4,5,6])] + masked_array(data=[1, 2, 3, ..., 4, 5, 6], + mask=False, + fill_value=999999) + + """ + __slots__ = () + + def __init__(self): + MAxisConcatenator.__init__(self, 0) + + +mr_ = mr_class() + + +#####-------------------------------------------------------------------------- +#---- Find unmasked data --- +#####-------------------------------------------------------------------------- + +def ndenumerate(a, compressed=True): + """ + Multidimensional index iterator. + + Return an iterator yielding pairs of array coordinates and values, + skipping elements that are masked. With `compressed=False`, + `ma.masked` is yielded as the value of masked elements. This + behavior differs from that of `numpy.ndenumerate`, which yields the + value of the underlying data array. + + Notes + ----- + .. versionadded:: 1.23.0 + + Parameters + ---------- + a : array_like + An array with (possibly) masked elements. + compressed : bool, optional + If True (default), masked elements are skipped. + + See Also + -------- + numpy.ndenumerate : Equivalent function ignoring any mask. + + Examples + -------- + >>> import numpy as np + >>> a = np.ma.arange(9).reshape((3, 3)) + >>> a[1, 0] = np.ma.masked + >>> a[1, 2] = np.ma.masked + >>> a[2, 1] = np.ma.masked + >>> a + masked_array( + data=[[0, 1, 2], + [--, 4, --], + [6, --, 8]], + mask=[[False, False, False], + [ True, False, True], + [False, True, False]], + fill_value=999999) + >>> for index, x in np.ma.ndenumerate(a): + ... print(index, x) + (0, 0) 0 + (0, 1) 1 + (0, 2) 2 + (1, 1) 4 + (2, 0) 6 + (2, 2) 8 + + >>> for index, x in np.ma.ndenumerate(a, compressed=False): + ... print(index, x) + (0, 0) 0 + (0, 1) 1 + (0, 2) 2 + (1, 0) -- + (1, 1) 4 + (1, 2) -- + (2, 0) 6 + (2, 1) -- + (2, 2) 8 + """ + for it, mask in zip(np.ndenumerate(a), getmaskarray(a).flat): + if not mask: + yield it + elif not compressed: + yield it[0], masked + + +def flatnotmasked_edges(a): + """ + Find the indices of the first and last unmasked values. + + Expects a 1-D `MaskedArray`, returns None if all values are masked. + + Parameters + ---------- + a : array_like + Input 1-D `MaskedArray` + + Returns + ------- + edges : ndarray or None + The indices of first and last non-masked value in the array. + Returns None if all values are masked. + + See Also + -------- + flatnotmasked_contiguous, notmasked_contiguous, notmasked_edges + clump_masked, clump_unmasked + + Notes + ----- + Only accepts 1-D arrays. + + Examples + -------- + >>> import numpy as np + >>> a = np.ma.arange(10) + >>> np.ma.flatnotmasked_edges(a) + array([0, 9]) + + >>> mask = (a < 3) | (a > 8) | (a == 5) + >>> a[mask] = np.ma.masked + >>> np.array(a[~a.mask]) + array([3, 4, 6, 7, 8]) + + >>> np.ma.flatnotmasked_edges(a) + array([3, 8]) + + >>> a[:] = np.ma.masked + >>> print(np.ma.flatnotmasked_edges(a)) + None + + """ + m = getmask(a) + if m is nomask or not np.any(m): + return np.array([0, a.size - 1]) + unmasked = np.flatnonzero(~m) + if len(unmasked) > 0: + return unmasked[[0, -1]] + else: + return None + + +def notmasked_edges(a, axis=None): + """ + Find the indices of the first and last unmasked values along an axis. + + If all values are masked, return None. Otherwise, return a list + of two tuples, corresponding to the indices of the first and last + unmasked values respectively. + + Parameters + ---------- + a : array_like + The input array. + axis : int, optional + Axis along which to perform the operation. + If None (default), applies to a flattened version of the array. + + Returns + ------- + edges : ndarray or list + An array of start and end indexes if there are any masked data in + the array. If there are no masked data in the array, `edges` is a + list of the first and last index. + + See Also + -------- + flatnotmasked_contiguous, flatnotmasked_edges, notmasked_contiguous + clump_masked, clump_unmasked + + Examples + -------- + >>> import numpy as np + >>> a = np.arange(9).reshape((3, 3)) + >>> m = np.zeros_like(a) + >>> m[1:, 1:] = 1 + + >>> am = np.ma.array(a, mask=m) + >>> np.array(am[~am.mask]) + array([0, 1, 2, 3, 6]) + + >>> np.ma.notmasked_edges(am) + array([0, 6]) + + """ + a = asarray(a) + if axis is None or a.ndim == 1: + return flatnotmasked_edges(a) + m = getmaskarray(a) + idx = array(np.indices(a.shape), mask=np.asarray([m] * a.ndim)) + return [tuple(idx[i].min(axis).compressed() for i in range(a.ndim)), + tuple(idx[i].max(axis).compressed() for i in range(a.ndim)), ] + + +def flatnotmasked_contiguous(a): + """ + Find contiguous unmasked data in a masked array. + + Parameters + ---------- + a : array_like + The input array. + + Returns + ------- + slice_list : list + A sorted sequence of `slice` objects (start index, end index). + + See Also + -------- + flatnotmasked_edges, notmasked_contiguous, notmasked_edges + clump_masked, clump_unmasked + + Notes + ----- + Only accepts 2-D arrays at most. + + Examples + -------- + >>> import numpy as np + >>> a = np.ma.arange(10) + >>> np.ma.flatnotmasked_contiguous(a) + [slice(0, 10, None)] + + >>> mask = (a < 3) | (a > 8) | (a == 5) + >>> a[mask] = np.ma.masked + >>> np.array(a[~a.mask]) + array([3, 4, 6, 7, 8]) + + >>> np.ma.flatnotmasked_contiguous(a) + [slice(3, 5, None), slice(6, 9, None)] + >>> a[:] = np.ma.masked + >>> np.ma.flatnotmasked_contiguous(a) + [] + + """ + m = getmask(a) + if m is nomask: + return [slice(0, a.size)] + i = 0 + result = [] + for (k, g) in itertools.groupby(m.ravel()): + n = len(list(g)) + if not k: + result.append(slice(i, i + n)) + i += n + return result + + +def notmasked_contiguous(a, axis=None): + """ + Find contiguous unmasked data in a masked array along the given axis. + + Parameters + ---------- + a : array_like + The input array. + axis : int, optional + Axis along which to perform the operation. + If None (default), applies to a flattened version of the array, and this + is the same as `flatnotmasked_contiguous`. + + Returns + ------- + endpoints : list + A list of slices (start and end indexes) of unmasked indexes + in the array. + + If the input is 2d and axis is specified, the result is a list of lists. + + See Also + -------- + flatnotmasked_edges, flatnotmasked_contiguous, notmasked_edges + clump_masked, clump_unmasked + + Notes + ----- + Only accepts 2-D arrays at most. + + Examples + -------- + >>> import numpy as np + >>> a = np.arange(12).reshape((3, 4)) + >>> mask = np.zeros_like(a) + >>> mask[1:, :-1] = 1; mask[0, 1] = 1; mask[-1, 0] = 0 + >>> ma = np.ma.array(a, mask=mask) + >>> ma + masked_array( + data=[[0, --, 2, 3], + [--, --, --, 7], + [8, --, --, 11]], + mask=[[False, True, False, False], + [ True, True, True, False], + [False, True, True, False]], + fill_value=999999) + >>> np.array(ma[~ma.mask]) + array([ 0, 2, 3, 7, 8, 11]) + + >>> np.ma.notmasked_contiguous(ma) + [slice(0, 1, None), slice(2, 4, None), slice(7, 9, None), slice(11, 12, None)] + + >>> np.ma.notmasked_contiguous(ma, axis=0) + [[slice(0, 1, None), slice(2, 3, None)], [], [slice(0, 1, None)], [slice(0, 3, None)]] + + >>> np.ma.notmasked_contiguous(ma, axis=1) + [[slice(0, 1, None), slice(2, 4, None)], [slice(3, 4, None)], [slice(0, 1, None), slice(3, 4, None)]] + + """ # noqa: E501 + a = asarray(a) + nd = a.ndim + if nd > 2: + raise NotImplementedError("Currently limited to at most 2D array.") + if axis is None or nd == 1: + return flatnotmasked_contiguous(a) + # + result = [] + # + other = (axis + 1) % 2 + idx = [0, 0] + idx[axis] = slice(None, None) + # + for i in range(a.shape[other]): + idx[other] = i + result.append(flatnotmasked_contiguous(a[tuple(idx)])) + return result + + +def _ezclump(mask): + """ + Finds the clumps (groups of data with the same values) for a 1D bool array. + + Returns a series of slices. + """ + if mask.ndim > 1: + mask = mask.ravel() + idx = (mask[1:] ^ mask[:-1]).nonzero() + idx = idx[0] + 1 + + if mask[0]: + if len(idx) == 0: + return [slice(0, mask.size)] + + r = [slice(0, idx[0])] + r.extend((slice(left, right) + for left, right in zip(idx[1:-1:2], idx[2::2]))) + else: + if len(idx) == 0: + return [] + + r = [slice(left, right) for left, right in zip(idx[:-1:2], idx[1::2])] + + if mask[-1]: + r.append(slice(idx[-1], mask.size)) + return r + + +def clump_unmasked(a): + """ + Return list of slices corresponding to the unmasked clumps of a 1-D array. + (A "clump" is defined as a contiguous region of the array). + + Parameters + ---------- + a : ndarray + A one-dimensional masked array. + + Returns + ------- + slices : list of slice + The list of slices, one for each continuous region of unmasked + elements in `a`. + + See Also + -------- + flatnotmasked_edges, flatnotmasked_contiguous, notmasked_edges + notmasked_contiguous, clump_masked + + Examples + -------- + >>> import numpy as np + >>> a = np.ma.masked_array(np.arange(10)) + >>> a[[0, 1, 2, 6, 8, 9]] = np.ma.masked + >>> np.ma.clump_unmasked(a) + [slice(3, 6, None), slice(7, 8, None)] + + """ + mask = getattr(a, '_mask', nomask) + if mask is nomask: + return [slice(0, a.size)] + return _ezclump(~mask) + + +def clump_masked(a): + """ + Returns a list of slices corresponding to the masked clumps of a 1-D array. + (A "clump" is defined as a contiguous region of the array). + + Parameters + ---------- + a : ndarray + A one-dimensional masked array. + + Returns + ------- + slices : list of slice + The list of slices, one for each continuous region of masked elements + in `a`. + + See Also + -------- + flatnotmasked_edges, flatnotmasked_contiguous, notmasked_edges + notmasked_contiguous, clump_unmasked + + Examples + -------- + >>> import numpy as np + >>> a = np.ma.masked_array(np.arange(10)) + >>> a[[0, 1, 2, 6, 8, 9]] = np.ma.masked + >>> np.ma.clump_masked(a) + [slice(0, 3, None), slice(6, 7, None), slice(8, 10, None)] + + """ + mask = ma.getmask(a) + if mask is nomask: + return [] + return _ezclump(mask) + + +############################################################################### +# Polynomial fit # +############################################################################### + + +def vander(x, n=None): + """ + Masked values in the input array result in rows of zeros. + + """ + _vander = np.vander(x, n) + m = getmask(x) + if m is not nomask: + _vander[m] = 0 + return _vander + + +vander.__doc__ = ma.doc_note(np.vander.__doc__, vander.__doc__) + + +def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False): + """ + Any masked values in x is propagated in y, and vice-versa. + + """ + x = asarray(x) + y = asarray(y) + + m = getmask(x) + if y.ndim == 1: + m = mask_or(m, getmask(y)) + elif y.ndim == 2: + my = getmask(mask_rows(y)) + if my is not nomask: + m = mask_or(m, my[:, 0]) + else: + raise TypeError("Expected a 1D or 2D array for y!") + + if w is not None: + w = asarray(w) + if w.ndim != 1: + raise TypeError("expected a 1-d array for weights") + if w.shape[0] != y.shape[0]: + raise TypeError("expected w and y to have the same length") + m = mask_or(m, getmask(w)) + + if m is not nomask: + not_m = ~m + if w is not None: + w = w[not_m] + return np.polyfit(x[not_m], y[not_m], deg, rcond, full, w, cov) + else: + return np.polyfit(x, y, deg, rcond, full, w, cov) + + +polyfit.__doc__ = ma.doc_note(np.polyfit.__doc__, polyfit.__doc__) diff --git a/local_packages/numpy/ma/extras.pyi b/local_packages/numpy/ma/extras.pyi new file mode 100644 index 0000000..70881bd --- /dev/null +++ b/local_packages/numpy/ma/extras.pyi @@ -0,0 +1,297 @@ +from _typeshed import Incomplete +from collections.abc import Sequence +from typing import SupportsIndex, TypeAlias, TypeVar, overload + +import numpy as np +from numpy import _CastingKind +from numpy._typing import ( + ArrayLike, + DTypeLike, + _AnyShape, + _ArrayLike, + _DTypeLike, + _ShapeLike, +) +from numpy.lib._function_base_impl import average +from numpy.lib._index_tricks_impl import AxisConcatenator + +from .core import MaskedArray, dot + +__all__ = [ + "apply_along_axis", + "apply_over_axes", + "atleast_1d", + "atleast_2d", + "atleast_3d", + "average", + "clump_masked", + "clump_unmasked", + "column_stack", + "compress_cols", + "compress_nd", + "compress_rowcols", + "compress_rows", + "corrcoef", + "count_masked", + "cov", + "diagflat", + "dot", + "dstack", + "ediff1d", + "flatnotmasked_contiguous", + "flatnotmasked_edges", + "hsplit", + "hstack", + "in1d", + "intersect1d", + "isin", + "mask_cols", + "mask_rowcols", + "mask_rows", + "masked_all", + "masked_all_like", + "median", + "mr_", + "ndenumerate", + "notmasked_contiguous", + "notmasked_edges", + "polyfit", + "row_stack", + "setdiff1d", + "setxor1d", + "stack", + "union1d", + "unique", + "vander", + "vstack", +] + +_ScalarT = TypeVar("_ScalarT", bound=np.generic) +_ScalarT1 = TypeVar("_ScalarT1", bound=np.generic) +_ScalarT2 = TypeVar("_ScalarT2", bound=np.generic) +_MArrayT = TypeVar("_MArrayT", bound=MaskedArray) + +_MArray: TypeAlias = MaskedArray[_AnyShape, np.dtype[_ScalarT]] + +### + +# keep in sync with `numpy._core.shape_base.atleast_1d` +@overload +def atleast_1d(a0: _ArrayLike[_ScalarT], /) -> _MArray[_ScalarT]: ... +@overload +def atleast_1d(a0: _ArrayLike[_ScalarT1], a1: _ArrayLike[_ScalarT2], /) -> tuple[_MArray[_ScalarT1], _MArray[_ScalarT2]]: ... +@overload +def atleast_1d( + a0: _ArrayLike[_ScalarT], a1: _ArrayLike[_ScalarT], /, *arys: _ArrayLike[_ScalarT] +) -> tuple[_MArray[_ScalarT], ...]: ... +@overload +def atleast_1d(a0: ArrayLike, /) -> _MArray[Incomplete]: ... +@overload +def atleast_1d(a0: ArrayLike, a1: ArrayLike, /) -> tuple[_MArray[Incomplete], _MArray[Incomplete]]: ... +@overload +def atleast_1d(a0: ArrayLike, a1: ArrayLike, /, *ai: ArrayLike) -> tuple[_MArray[Incomplete], ...]: ... + +# keep in sync with `numpy._core.shape_base.atleast_2d` +@overload +def atleast_2d(a0: _ArrayLike[_ScalarT], /) -> _MArray[_ScalarT]: ... +@overload +def atleast_2d(a0: _ArrayLike[_ScalarT1], a1: _ArrayLike[_ScalarT2], /) -> tuple[_MArray[_ScalarT1], _MArray[_ScalarT2]]: ... +@overload +def atleast_2d( + a0: _ArrayLike[_ScalarT], a1: _ArrayLike[_ScalarT], /, *arys: _ArrayLike[_ScalarT] +) -> tuple[_MArray[_ScalarT], ...]: ... +@overload +def atleast_2d(a0: ArrayLike, /) -> _MArray[Incomplete]: ... +@overload +def atleast_2d(a0: ArrayLike, a1: ArrayLike, /) -> tuple[_MArray[Incomplete], _MArray[Incomplete]]: ... +@overload +def atleast_2d(a0: ArrayLike, a1: ArrayLike, /, *ai: ArrayLike) -> tuple[_MArray[Incomplete], ...]: ... + +# keep in sync with `numpy._core.shape_base.atleast_2d` +@overload +def atleast_3d(a0: _ArrayLike[_ScalarT], /) -> _MArray[_ScalarT]: ... +@overload +def atleast_3d(a0: _ArrayLike[_ScalarT1], a1: _ArrayLike[_ScalarT2], /) -> tuple[_MArray[_ScalarT1], _MArray[_ScalarT2]]: ... +@overload +def atleast_3d( + a0: _ArrayLike[_ScalarT], a1: _ArrayLike[_ScalarT], /, *arys: _ArrayLike[_ScalarT] +) -> tuple[_MArray[_ScalarT], ...]: ... +@overload +def atleast_3d(a0: ArrayLike, /) -> _MArray[Incomplete]: ... +@overload +def atleast_3d(a0: ArrayLike, a1: ArrayLike, /) -> tuple[_MArray[Incomplete], _MArray[Incomplete]]: ... +@overload +def atleast_3d(a0: ArrayLike, a1: ArrayLike, /, *ai: ArrayLike) -> tuple[_MArray[Incomplete], ...]: ... + +# keep in sync with `numpy._core.shape_base.vstack` +@overload +def vstack( + tup: Sequence[_ArrayLike[_ScalarT]], + *, + dtype: None = None, + casting: _CastingKind = "same_kind" +) -> _MArray[_ScalarT]: ... +@overload +def vstack( + tup: Sequence[ArrayLike], + *, + dtype: _DTypeLike[_ScalarT], + casting: _CastingKind = "same_kind" +) -> _MArray[_ScalarT]: ... +@overload +def vstack( + tup: Sequence[ArrayLike], + *, + dtype: DTypeLike | None = None, + casting: _CastingKind = "same_kind" +) -> _MArray[Incomplete]: ... + +row_stack = vstack + +# keep in sync with `numpy._core.shape_base.hstack` +@overload +def hstack( + tup: Sequence[_ArrayLike[_ScalarT]], + *, + dtype: None = None, + casting: _CastingKind = "same_kind" +) -> _MArray[_ScalarT]: ... +@overload +def hstack( + tup: Sequence[ArrayLike], + *, + dtype: _DTypeLike[_ScalarT], + casting: _CastingKind = "same_kind" +) -> _MArray[_ScalarT]: ... +@overload +def hstack( + tup: Sequence[ArrayLike], + *, + dtype: DTypeLike | None = None, + casting: _CastingKind = "same_kind" +) -> _MArray[Incomplete]: ... + +# keep in sync with `numpy._core.shape_base_impl.column_stack` +@overload +def column_stack(tup: Sequence[_ArrayLike[_ScalarT]]) -> _MArray[_ScalarT]: ... +@overload +def column_stack(tup: Sequence[ArrayLike]) -> _MArray[Incomplete]: ... + +# keep in sync with `numpy._core.shape_base_impl.dstack` +@overload +def dstack(tup: Sequence[_ArrayLike[_ScalarT]]) -> _MArray[_ScalarT]: ... +@overload +def dstack(tup: Sequence[ArrayLike]) -> _MArray[Incomplete]: ... + +# keep in sync with `numpy._core.shape_base.stack` +@overload +def stack( + arrays: Sequence[_ArrayLike[_ScalarT]], + axis: SupportsIndex = 0, + out: None = None, + *, + dtype: None = None, + casting: _CastingKind = "same_kind" +) -> _MArray[_ScalarT]: ... +@overload +def stack( + arrays: Sequence[ArrayLike], + axis: SupportsIndex = 0, + out: None = None, + *, + dtype: _DTypeLike[_ScalarT], + casting: _CastingKind = "same_kind" +) -> _MArray[_ScalarT]: ... +@overload +def stack( + arrays: Sequence[ArrayLike], + axis: SupportsIndex = 0, + out: None = None, + *, + dtype: DTypeLike | None = None, + casting: _CastingKind = "same_kind" +) -> _MArray[Incomplete]: ... +@overload +def stack( + arrays: Sequence[ArrayLike], + axis: SupportsIndex, + out: _MArrayT, + *, + dtype: DTypeLike | None = None, + casting: _CastingKind = "same_kind", +) -> _MArrayT: ... +@overload +def stack( + arrays: Sequence[ArrayLike], + axis: SupportsIndex = 0, + *, + out: _MArrayT, + dtype: DTypeLike | None = None, + casting: _CastingKind = "same_kind", +) -> _MArrayT: ... + +# keep in sync with `numpy._core.shape_base_impl.hsplit` +@overload +def hsplit(ary: _ArrayLike[_ScalarT], indices_or_sections: _ShapeLike) -> list[_MArray[_ScalarT]]: ... +@overload +def hsplit(ary: ArrayLike, indices_or_sections: _ShapeLike) -> list[_MArray[Incomplete]]: ... + +# keep in sync with `numpy._core.twodim_base_impl.hsplit` +@overload +def diagflat(v: _ArrayLike[_ScalarT], k: int = 0) -> _MArray[_ScalarT]: ... +@overload +def diagflat(v: ArrayLike, k: int = 0) -> _MArray[Incomplete]: ... + +# TODO: everything below + +def count_masked(arr, axis=None): ... +def masked_all(shape, dtype=float): ... # noqa: PYI014 +def masked_all_like(arr): ... + +def apply_along_axis(func1d, axis, arr, *args, **kwargs): ... +def apply_over_axes(func, a, axes): ... +def median(a, axis=None, out=None, overwrite_input=False, keepdims=False): ... +def compress_nd(x, axis=None): ... +def compress_rowcols(x, axis=None): ... +def compress_rows(a): ... +def compress_cols(a): ... +def mask_rows(a, axis=...): ... +def mask_cols(a, axis=...): ... +def ediff1d(arr, to_end=None, to_begin=None): ... +def unique(ar1, return_index=False, return_inverse=False): ... +def intersect1d(ar1, ar2, assume_unique=False): ... +def setxor1d(ar1, ar2, assume_unique=False): ... +def in1d(ar1, ar2, assume_unique=False, invert=False): ... +def isin(element, test_elements, assume_unique=False, invert=False): ... +def union1d(ar1, ar2): ... +def setdiff1d(ar1, ar2, assume_unique=False): ... +def cov(x, y=None, rowvar=True, bias=False, allow_masked=True, ddof=None): ... +def corrcoef(x, y=None, rowvar=True, allow_masked=True): ... + +class MAxisConcatenator(AxisConcatenator): + __slots__ = () + + @staticmethod + def concatenate(arrays: Incomplete, axis: int = 0) -> Incomplete: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + @classmethod + def makemat(cls, arr: Incomplete) -> Incomplete: ... # type: ignore[override] # pyright: ignore[reportIncompatibleVariableOverride] + +class mr_class(MAxisConcatenator): + __slots__ = () + + def __init__(self) -> None: ... + +mr_: mr_class + +def ndenumerate(a, compressed=True): ... +def flatnotmasked_edges(a): ... +def notmasked_edges(a, axis=None): ... +def flatnotmasked_contiguous(a): ... +def notmasked_contiguous(a, axis=None): ... +def clump_unmasked(a): ... +def clump_masked(a): ... +def vander(x, n=None): ... +def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False): ... + +# +def mask_rowcols(a: Incomplete, axis: Incomplete | None = None) -> MaskedArray[Incomplete, np.dtype[Incomplete]]: ... diff --git a/local_packages/numpy/ma/mrecords.py b/local_packages/numpy/ma/mrecords.py new file mode 100644 index 0000000..bb4a270 --- /dev/null +++ b/local_packages/numpy/ma/mrecords.py @@ -0,0 +1,762 @@ +""":mod:`numpy.ma..mrecords` + +Defines the equivalent of :class:`numpy.recarrays` for masked arrays, +where fields can be accessed as attributes. +Note that :class:`numpy.ma.MaskedArray` already supports structured datatypes +and the masking of individual fields. + +.. moduleauthor:: Pierre Gerard-Marchant + +""" +# We should make sure that no field is called '_mask','mask','_fieldmask', +# or whatever restricted keywords. An idea would be to no bother in the +# first place, and then rename the invalid fields with a trailing +# underscore. Maybe we could just overload the parser function ? + +import warnings + +import numpy as np +import numpy.ma as ma + +_byteorderconv = np._core.records._byteorderconv + + +_check_fill_value = ma.core._check_fill_value + + +__all__ = [ + 'MaskedRecords', 'mrecarray', 'fromarrays', 'fromrecords', + 'fromtextfile', 'addfield', +] + +reserved_fields = ['_data', '_mask', '_fieldmask', 'dtype'] + + +def _checknames(descr, names=None): + """ + Checks that field names ``descr`` are not reserved keywords. + + If this is the case, a default 'f%i' is substituted. If the argument + `names` is not None, updates the field names to valid names. + + """ + ndescr = len(descr) + default_names = [f'f{i}' for i in range(ndescr)] + if names is None: + new_names = default_names + else: + if isinstance(names, (tuple, list)): + new_names = names + elif isinstance(names, str): + new_names = names.split(',') + else: + raise NameError(f'illegal input names {names!r}') + nnames = len(new_names) + if nnames < ndescr: + new_names += default_names[nnames:] + ndescr = [] + for (n, d, t) in zip(new_names, default_names, descr.descr): + if n in reserved_fields: + if t[0] in reserved_fields: + ndescr.append((d, t[1])) + else: + ndescr.append(t) + else: + ndescr.append((n, t[1])) + return np.dtype(ndescr) + + +def _get_fieldmask(self): + mdescr = [(n, '|b1') for n in self.dtype.names] + fdmask = np.empty(self.shape, dtype=mdescr) + fdmask.flat = tuple([False] * len(mdescr)) + return fdmask + + +class MaskedRecords(ma.MaskedArray): + """ + + Attributes + ---------- + _data : recarray + Underlying data, as a record array. + _mask : boolean array + Mask of the records. A record is masked when all its fields are + masked. + _fieldmask : boolean recarray + Record array of booleans, setting the mask of each individual field + of each record. + _fill_value : record + Filling values for each field. + + """ + + def __new__(cls, shape, dtype=None, buf=None, offset=0, strides=None, + formats=None, names=None, titles=None, + byteorder=None, aligned=False, + mask=ma.nomask, hard_mask=False, fill_value=None, keep_mask=True, + copy=False, + **options): + + self = np.recarray.__new__(cls, shape, dtype=dtype, buf=buf, offset=offset, + strides=strides, formats=formats, names=names, + titles=titles, byteorder=byteorder, + aligned=aligned,) + + mdtype = ma.make_mask_descr(self.dtype) + if mask is ma.nomask or not np.size(mask): + if not keep_mask: + self._mask = tuple([False] * len(mdtype)) + else: + mask = np.array(mask, copy=copy) + if mask.shape != self.shape: + (nd, nm) = (self.size, mask.size) + if nm == 1: + mask = np.resize(mask, self.shape) + elif nm == nd: + mask = np.reshape(mask, self.shape) + else: + msg = (f"Mask and data not compatible: data size is {nd}," + " mask size is {nm}.") + raise ma.MAError(msg) + if not keep_mask: + self.__setmask__(mask) + self._sharedmask = True + else: + if mask.dtype == mdtype: + _mask = mask + else: + _mask = np.array([tuple([m] * len(mdtype)) for m in mask], + dtype=mdtype) + self._mask = _mask + return self + + def __array_finalize__(self, obj): + # Make sure we have a _fieldmask by default + _mask = getattr(obj, '_mask', None) + if _mask is None: + objmask = getattr(obj, '_mask', ma.nomask) + _dtype = np.ndarray.__getattribute__(self, 'dtype') + if objmask is ma.nomask: + _mask = ma.make_mask_none(self.shape, dtype=_dtype) + else: + mdescr = ma.make_mask_descr(_dtype) + _mask = np.array([tuple([m] * len(mdescr)) for m in objmask], + dtype=mdescr).view(np.recarray) + # Update some of the attributes + _dict = self.__dict__ + _dict.update(_mask=_mask) + self._update_from(obj) + if _dict['_baseclass'] == np.ndarray: + _dict['_baseclass'] = np.recarray + + @property + def _data(self): + """ + Returns the data as a recarray. + + """ + return np.ndarray.view(self, np.recarray) + + @property + def _fieldmask(self): + """ + Alias to mask. + + """ + return self._mask + + def __len__(self): + """ + Returns the length + + """ + # We have more than one record + if self.ndim: + return len(self._data) + # We have only one record: return the nb of fields + return len(self.dtype) + + def __getattribute__(self, attr): + try: + return object.__getattribute__(self, attr) + except AttributeError: + # attr must be a fieldname + pass + fielddict = np.ndarray.__getattribute__(self, 'dtype').fields + try: + res = fielddict[attr][:2] + except (TypeError, KeyError) as e: + raise AttributeError( + f'record array has no attribute {attr}') from e + # So far, so good + _localdict = np.ndarray.__getattribute__(self, '__dict__') + _data = np.ndarray.view(self, _localdict['_baseclass']) + obj = _data.getfield(*res) + if obj.dtype.names is not None: + raise NotImplementedError("MaskedRecords is currently limited to" + "simple records.") + # Get some special attributes + # Reset the object's mask + hasmasked = False + _mask = _localdict.get('_mask', None) + if _mask is not None: + try: + _mask = _mask[attr] + except IndexError: + # Couldn't find a mask: use the default (nomask) + pass + tp_len = len(_mask.dtype) + hasmasked = _mask.view((bool, ((tp_len,) if tp_len else ()))).any() + if (obj.shape or hasmasked): + obj = obj.view(ma.MaskedArray) + obj._baseclass = np.ndarray + obj._isfield = True + obj._mask = _mask + # Reset the field values + _fill_value = _localdict.get('_fill_value', None) + if _fill_value is not None: + try: + obj._fill_value = _fill_value[attr] + except ValueError: + obj._fill_value = None + else: + obj = obj.item() + return obj + + def __setattr__(self, attr, val): + """ + Sets the attribute attr to the value val. + + """ + # Should we call __setmask__ first ? + if attr in ['mask', 'fieldmask']: + self.__setmask__(val) + return + # Create a shortcut (so that we don't have to call getattr all the time) + _localdict = object.__getattribute__(self, '__dict__') + # Check whether we're creating a new field + newattr = attr not in _localdict + try: + # Is attr a generic attribute ? + ret = object.__setattr__(self, attr, val) + except Exception: + # Not a generic attribute: exit if it's not a valid field + fielddict = np.ndarray.__getattribute__(self, 'dtype').fields or {} + optinfo = np.ndarray.__getattribute__(self, '_optinfo') or {} + if not (attr in fielddict or attr in optinfo): + raise + else: + # Get the list of names + fielddict = np.ndarray.__getattribute__(self, 'dtype').fields or {} + # Check the attribute + if attr not in fielddict: + return ret + if newattr: + # We just added this one or this setattr worked on an + # internal attribute. + try: + object.__delattr__(self, attr) + except Exception: + return ret + # Let's try to set the field + try: + res = fielddict[attr][:2] + except (TypeError, KeyError) as e: + raise AttributeError( + f'record array has no attribute {attr}') from e + + if val is ma.masked: + _fill_value = _localdict['_fill_value'] + if _fill_value is not None: + dval = _localdict['_fill_value'][attr] + else: + dval = val + mval = True + else: + dval = ma.filled(val) + mval = ma.getmaskarray(val) + obj = np.ndarray.__getattribute__(self, '_data').setfield(dval, *res) + _localdict['_mask'].__setitem__(attr, mval) + return obj + + def __getitem__(self, indx): + """ + Returns all the fields sharing the same fieldname base. + + The fieldname base is either `_data` or `_mask`. + + """ + _localdict = self.__dict__ + _mask = np.ndarray.__getattribute__(self, '_mask') + _data = np.ndarray.view(self, _localdict['_baseclass']) + # We want a field + if isinstance(indx, str): + # Make sure _sharedmask is True to propagate back to _fieldmask + # Don't use _set_mask, there are some copies being made that + # break propagation Don't force the mask to nomask, that wreaks + # easy masking + obj = _data[indx].view(ma.MaskedArray) + obj._mask = _mask[indx] + obj._sharedmask = True + fval = _localdict['_fill_value'] + if fval is not None: + obj._fill_value = fval[indx] + # Force to masked if the mask is True + if not obj.ndim and obj._mask: + return ma.masked + return obj + # We want some elements. + # First, the data. + obj = np.asarray(_data[indx]).view(mrecarray) + obj._mask = np.asarray(_mask[indx]).view(np.recarray) + return obj + + def __setitem__(self, indx, value): + """ + Sets the given record to value. + + """ + ma.MaskedArray.__setitem__(self, indx, value) + if isinstance(indx, str): + self._mask[indx] = ma.getmaskarray(value) + + def __str__(self): + """ + Calculates the string representation. + + """ + if self.size > 1: + mstr = [f"({','.join([str(i) for i in s])})" + for s in zip(*[getattr(self, f) for f in self.dtype.names])] + return f"[{', '.join(mstr)}]" + else: + mstr = [f"{','.join([str(i) for i in s])}" + for s in zip([getattr(self, f) for f in self.dtype.names])] + return f"({', '.join(mstr)})" + + def __repr__(self): + """ + Calculates the repr representation. + + """ + _names = self.dtype.names + fmt = f"%{max(len(n) for n in _names) + 4}s : %s" + reprstr = [fmt % (f, getattr(self, f)) for f in self.dtype.names] + reprstr.insert(0, 'masked_records(') + reprstr.extend([fmt % (' fill_value', self.fill_value), + ' )']) + return str("\n".join(reprstr)) + + def view(self, dtype=None, type=None): + """ + Returns a view of the mrecarray. + + """ + # OK, basic copy-paste from MaskedArray.view. + if dtype is None: + if type is None: + output = np.ndarray.view(self) + else: + output = np.ndarray.view(self, type) + # Here again. + elif type is None: + try: + if issubclass(dtype, np.ndarray): + output = np.ndarray.view(self, dtype) + else: + output = np.ndarray.view(self, dtype) + # OK, there's the change + except TypeError: + dtype = np.dtype(dtype) + # we need to revert to MaskedArray, but keeping the possibility + # of subclasses (eg, TimeSeriesRecords), so we'll force a type + # set to the first parent + if dtype.fields is None: + basetype = self.__class__.__bases__[0] + output = self.__array__().view(dtype, basetype) + output._update_from(self) + else: + output = np.ndarray.view(self, dtype) + output._fill_value = None + else: + output = np.ndarray.view(self, dtype, type) + # Update the mask, just like in MaskedArray.view + if (getattr(output, '_mask', ma.nomask) is not ma.nomask): + mdtype = ma.make_mask_descr(output.dtype) + output._mask = self._mask.view(mdtype, np.ndarray) + output._mask.shape = output.shape + return output + + def harden_mask(self): + """ + Forces the mask to hard. + + """ + self._hardmask = True + + def soften_mask(self): + """ + Forces the mask to soft + + """ + self._hardmask = False + + def copy(self): + """ + Returns a copy of the masked record. + + """ + copied = self._data.copy().view(type(self)) + copied._mask = self._mask.copy() + return copied + + def tolist(self, fill_value=None): + """ + Return the data portion of the array as a list. + + Data items are converted to the nearest compatible Python type. + Masked values are converted to fill_value. If fill_value is None, + the corresponding entries in the output list will be ``None``. + + """ + if fill_value is not None: + return self.filled(fill_value).tolist() + result = np.array(self.filled().tolist(), dtype=object) + mask = np.array(self._mask.tolist()) + result[mask] = None + return result.tolist() + + def __getstate__(self): + """Return the internal state of the masked array. + + This is for pickling. + + """ + state = (1, + self.shape, + self.dtype, + self.flags.fnc, + self._data.tobytes(), + self._mask.tobytes(), + self._fill_value, + ) + return state + + def __setstate__(self, state): + """ + Restore the internal state of the masked array. + + This is for pickling. ``state`` is typically the output of the + ``__getstate__`` output, and is a 5-tuple: + + - class name + - a tuple giving the shape of the data + - a typecode for the data + - a binary string for the data + - a binary string for the mask. + + """ + (ver, shp, typ, isf, raw, msk, flv) = state + np.ndarray.__setstate__(self, (shp, typ, isf, raw)) + mdtype = np.dtype([(k, np.bool) for (k, _) in self.dtype.descr]) + self.__dict__['_mask'].__setstate__((shp, mdtype, isf, msk)) + self.fill_value = flv + + def __reduce__(self): + """ + Return a 3-tuple for pickling a MaskedArray. + + """ + return (_mrreconstruct, + (self.__class__, self._baseclass, (0,), 'b',), + self.__getstate__()) + + +def _mrreconstruct(subtype, baseclass, baseshape, basetype,): + """ + Build a new MaskedArray from the information stored in a pickle. + + """ + _data = np.ndarray.__new__(baseclass, baseshape, basetype).view(subtype) + _mask = np.ndarray.__new__(np.ndarray, baseshape, 'b1') + return subtype.__new__(subtype, _data, mask=_mask, dtype=basetype,) + + +mrecarray = MaskedRecords + + +############################################################################### +# Constructors # +############################################################################### + + +def fromarrays(arraylist, dtype=None, shape=None, formats=None, + names=None, titles=None, aligned=False, byteorder=None, + fill_value=None): + """ + Creates a mrecarray from a (flat) list of masked arrays. + + Parameters + ---------- + arraylist : sequence + A list of (masked) arrays. Each element of the sequence is first converted + to a masked array if needed. If a 2D array is passed as argument, it is + processed line by line + dtype : {None, dtype}, optional + Data type descriptor. + shape : {None, integer}, optional + Number of records. If None, shape is defined from the shape of the + first array in the list. + formats : {None, sequence}, optional + Sequence of formats for each individual field. If None, the formats will + be autodetected by inspecting the fields and selecting the highest dtype + possible. + names : {None, sequence}, optional + Sequence of the names of each field. + fill_value : {None, sequence}, optional + Sequence of data to be used as filling values. + + Notes + ----- + Lists of tuples should be preferred over lists of lists for faster processing. + + """ + datalist = [ma.getdata(x) for x in arraylist] + masklist = [np.atleast_1d(ma.getmaskarray(x)) for x in arraylist] + _array = np.rec.fromarrays(datalist, + dtype=dtype, shape=shape, formats=formats, + names=names, titles=titles, aligned=aligned, + byteorder=byteorder).view(mrecarray) + _array._mask.flat = list(zip(*masklist)) + if fill_value is not None: + _array.fill_value = fill_value + return _array + + +def fromrecords(reclist, dtype=None, shape=None, formats=None, names=None, + titles=None, aligned=False, byteorder=None, + fill_value=None, mask=ma.nomask): + """ + Creates a MaskedRecords from a list of records. + + Parameters + ---------- + reclist : sequence + A list of records. Each element of the sequence is first converted + to a masked array if needed. If a 2D array is passed as argument, it is + processed line by line + dtype : {None, dtype}, optional + Data type descriptor. + shape : {None,int}, optional + Number of records. If None, ``shape`` is defined from the shape of the + first array in the list. + formats : {None, sequence}, optional + Sequence of formats for each individual field. If None, the formats will + be autodetected by inspecting the fields and selecting the highest dtype + possible. + names : {None, sequence}, optional + Sequence of the names of each field. + fill_value : {None, sequence}, optional + Sequence of data to be used as filling values. + mask : {nomask, sequence}, optional. + External mask to apply on the data. + + Notes + ----- + Lists of tuples should be preferred over lists of lists for faster processing. + + """ + # Grab the initial _fieldmask, if needed: + _mask = getattr(reclist, '_mask', None) + # Get the list of records. + if isinstance(reclist, np.ndarray): + # Make sure we don't have some hidden mask + if isinstance(reclist, ma.MaskedArray): + reclist = reclist.filled().view(np.ndarray) + # Grab the initial dtype, just in case + if dtype is None: + dtype = reclist.dtype + reclist = reclist.tolist() + mrec = np.rec.fromrecords(reclist, dtype=dtype, shape=shape, formats=formats, + names=names, titles=titles, + aligned=aligned, byteorder=byteorder).view(mrecarray) + # Set the fill_value if needed + if fill_value is not None: + mrec.fill_value = fill_value + # Now, let's deal w/ the mask + if mask is not ma.nomask: + mask = np.asarray(mask) + maskrecordlength = len(mask.dtype) + if maskrecordlength: + mrec._mask.flat = mask + elif mask.ndim == 2: + mrec._mask.flat = [tuple(m) for m in mask] + else: + mrec.__setmask__(mask) + if _mask is not None: + mrec._mask[:] = _mask + return mrec + + +def _guessvartypes(arr): + """ + Tries to guess the dtypes of the str_ ndarray `arr`. + + Guesses by testing element-wise conversion. Returns a list of dtypes. + The array is first converted to ndarray. If the array is 2D, the test + is performed on the first line. An exception is raised if the file is + 3D or more. + + """ + vartypes = [] + arr = np.asarray(arr) + if arr.ndim == 2: + arr = arr[0] + elif arr.ndim > 2: + raise ValueError("The array should be 2D at most!") + # Start the conversion loop. + for f in arr: + try: + int(f) + except (ValueError, TypeError): + try: + float(f) + except (ValueError, TypeError): + try: + complex(f) + except (ValueError, TypeError): + vartypes.append(arr.dtype) + else: + vartypes.append(np.dtype(complex)) + else: + vartypes.append(np.dtype(float)) + else: + vartypes.append(np.dtype(int)) + return vartypes + + +def openfile(fname): + """ + Opens the file handle of file `fname`. + + """ + # A file handle + if hasattr(fname, 'readline'): + return fname + # Try to open the file and guess its type + try: + f = open(fname) + except FileNotFoundError as e: + raise FileNotFoundError(f"No such file: '{fname}'") from e + if f.readline()[:2] != "\\x": + f.seek(0, 0) + return f + f.close() + raise NotImplementedError("Wow, binary file") + + +def fromtextfile(fname, delimiter=None, commentchar='#', missingchar='', + varnames=None, vartypes=None): + """ + Creates a mrecarray from data stored in the file `filename`. + + Parameters + ---------- + fname : {file name/handle} + Handle of an opened file. + delimiter : {None, string}, optional + Alphanumeric character used to separate columns in the file. + If None, any (group of) white spacestring(s) will be used. + commentchar : {'#', string}, optional + Alphanumeric character used to mark the start of a comment. + missingchar : {'', string}, optional + String indicating missing data, and used to create the masks. + varnames : {None, sequence}, optional + Sequence of the variable names. If None, a list will be created from + the first non empty line of the file. + vartypes : {None, sequence}, optional + Sequence of the variables dtypes. If None, it will be estimated from + the first non-commented line. + + + Ultra simple: the varnames are in the header, one line""" + + # Try to open the file. + ftext = openfile(fname) + + # Get the first non-empty line as the varnames + while True: + line = ftext.readline() + firstline = line[:line.find(commentchar)].strip() + _varnames = firstline.split(delimiter) + if len(_varnames) > 1: + break + if varnames is None: + varnames = _varnames + + # Get the data. + _variables = ma.masked_array([line.strip().split(delimiter) for line in ftext + if line[0] != commentchar and len(line) > 1]) + (_, nfields) = _variables.shape + ftext.close() + + # Try to guess the dtype. + if vartypes is None: + vartypes = _guessvartypes(_variables[0]) + else: + vartypes = [np.dtype(v) for v in vartypes] + if len(vartypes) != nfields: + msg = f"Attempting to {len(vartypes)} dtypes for {nfields} fields!" + msg += " Reverting to default." + warnings.warn(msg, stacklevel=2) + vartypes = _guessvartypes(_variables[0]) + + # Construct the descriptor. + mdescr = list(zip(varnames, vartypes)) + mfillv = [ma.default_fill_value(f) for f in vartypes] + + # Get the data and the mask. + # We just need a list of masked_arrays. It's easier to create it like that: + _mask = (_variables.T == missingchar) + _datalist = [ma.masked_array(a, mask=m, dtype=t, fill_value=f) + for (a, m, t, f) in zip(_variables.T, _mask, vartypes, mfillv)] + + return fromarrays(_datalist, dtype=mdescr) + + +def addfield(mrecord, newfield, newfieldname=None): + """Adds a new field to the masked record array + + Uses `newfield` as data and `newfieldname` as name. If `newfieldname` + is None, the new field name is set to 'fi', where `i` is the number of + existing fields. + + """ + _data = mrecord._data + _mask = mrecord._mask + if newfieldname is None or newfieldname in reserved_fields: + newfieldname = f'f{len(_data.dtype)}' + newfield = ma.array(newfield) + # Get the new data. + # Create a new empty recarray + newdtype = np.dtype(_data.dtype.descr + [(newfieldname, newfield.dtype)]) + newdata = np.recarray(_data.shape, newdtype) + # Add the existing field + [newdata.setfield(_data.getfield(*f), *f) + for f in _data.dtype.fields.values()] + # Add the new field + newdata.setfield(newfield._data, *newdata.dtype.fields[newfieldname]) + newdata = newdata.view(MaskedRecords) + # Get the new mask + # Create a new empty recarray + newmdtype = np.dtype([(n, np.bool) for n in newdtype.names]) + newmask = np.recarray(_data.shape, newmdtype) + # Add the old masks + [newmask.setfield(_mask.getfield(*f), *f) + for f in _mask.dtype.fields.values()] + # Add the mask of the new field + newmask.setfield(ma.getmaskarray(newfield), + *newmask.dtype.fields[newfieldname]) + newdata._mask = newmask + return newdata diff --git a/local_packages/numpy/ma/mrecords.pyi b/local_packages/numpy/ma/mrecords.pyi new file mode 100644 index 0000000..737a34e --- /dev/null +++ b/local_packages/numpy/ma/mrecords.pyi @@ -0,0 +1,96 @@ +from typing import Any, Generic +from typing_extensions import TypeVar + +import numpy as np +from numpy._typing import _AnyShape + +from .core import MaskedArray + +__all__ = [ + "MaskedRecords", + "mrecarray", + "fromarrays", + "fromrecords", + "fromtextfile", + "addfield", +] + +_ShapeT_co = TypeVar("_ShapeT_co", bound=tuple[int, ...], default=_AnyShape, covariant=True) +_DTypeT_co = TypeVar("_DTypeT_co", bound=np.dtype, default=np.dtype, covariant=True) + +class MaskedRecords(MaskedArray[_ShapeT_co, _DTypeT_co], Generic[_ShapeT_co, _DTypeT_co]): + def __new__( + cls, + shape, + dtype=..., + buf=..., + offset=..., + strides=..., + formats=..., + names=..., + titles=..., + byteorder=..., + aligned=..., + mask=..., + hard_mask=..., + fill_value=..., + keep_mask=..., + copy=..., + **options, + ): ... + _mask: Any + _fill_value: Any + @property + def _data(self): ... + @property + def _fieldmask(self): ... + def __array_finalize__(self, obj): ... + def __len__(self): ... + def __getattribute__(self, attr): ... + def __setattr__(self, attr, val): ... + def __getitem__(self, indx): ... + def __setitem__(self, indx, value): ... + def view(self, dtype=None, type=None): ... + def harden_mask(self): ... + def soften_mask(self): ... + def copy(self): ... + def tolist(self, fill_value=None): ... + def __reduce__(self): ... + +mrecarray = MaskedRecords + +def fromarrays( + arraylist, + dtype=None, + shape=None, + formats=None, + names=None, + titles=None, + aligned=False, + byteorder=None, + fill_value=None, +): ... + +def fromrecords( + reclist, + dtype=None, + shape=None, + formats=None, + names=None, + titles=None, + aligned=False, + byteorder=None, + fill_value=None, + mask=..., +): ... + +def fromtextfile( + fname, + delimiter=None, + commentchar="#", + missingchar="", + varnames=None, + vartypes=None, +): ... + +def addfield(mrecord, newfield, newfieldname=None): ... diff --git a/local_packages/numpy/ma/tests/__init__.py b/local_packages/numpy/ma/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/local_packages/numpy/ma/tests/test_arrayobject.py b/local_packages/numpy/ma/tests/test_arrayobject.py new file mode 100644 index 0000000..2000cea --- /dev/null +++ b/local_packages/numpy/ma/tests/test_arrayobject.py @@ -0,0 +1,40 @@ +import pytest + +import numpy as np +from numpy.ma import masked_array +from numpy.testing import assert_array_equal + + +def test_matrix_transpose_raises_error_for_1d(): + msg = "matrix transpose with ndim < 2 is undefined" + ma_arr = masked_array(data=[1, 2, 3, 4, 5, 6], + mask=[1, 0, 1, 1, 1, 0]) + with pytest.raises(ValueError, match=msg): + ma_arr.mT + + +def test_matrix_transpose_equals_transpose_2d(): + ma_arr = masked_array(data=[[1, 2, 3], [4, 5, 6]], + mask=[[1, 0, 1], [1, 1, 0]]) + assert_array_equal(ma_arr.T, ma_arr.mT) + + +ARRAY_SHAPES_TO_TEST = ( + (5, 2), + (5, 2, 3), + (5, 2, 3, 4), +) + + +@pytest.mark.parametrize("shape", ARRAY_SHAPES_TO_TEST) +def test_matrix_transpose_equals_swapaxes(shape): + num_of_axes = len(shape) + vec = np.arange(shape[-1]) + arr = np.broadcast_to(vec, shape) + + rng = np.random.default_rng(42) + mask = rng.choice([0, 1], size=shape) + ma_arr = masked_array(data=arr, mask=mask) + + tgt = np.swapaxes(arr, num_of_axes - 2, num_of_axes - 1) + assert_array_equal(tgt, ma_arr.mT) diff --git a/local_packages/numpy/ma/tests/test_core.py b/local_packages/numpy/ma/tests/test_core.py new file mode 100644 index 0000000..ff87e59 --- /dev/null +++ b/local_packages/numpy/ma/tests/test_core.py @@ -0,0 +1,6015 @@ +"""Tests suite for MaskedArray & subclassing. + +:author: Pierre Gerard-Marchant +:contact: pierregm_at_uga_dot_edu +""" +__author__ = "Pierre GF Gerard-Marchant" + +import copy +import inspect +import itertools +import operator +import pickle +import sys +import textwrap +import warnings +from functools import reduce + +import pytest + +import numpy as np +import numpy._core.fromnumeric as fromnumeric +import numpy._core.umath as umath +import numpy.ma.core +from numpy import ndarray +from numpy._utils import asbytes +from numpy.exceptions import AxisError +from numpy.ma.core import ( + MAError, + MaskedArray, + MaskError, + MaskType, + abs, + absolute, + add, + all, + allclose, + allequal, + alltrue, + angle, + anom, + arange, + arccos, + arccosh, + arcsin, + arctan, + arctan2, + argsort, + array, + asarray, + choose, + concatenate, + conjugate, + cos, + cosh, + count, + default_fill_value, + diag, + divide, + empty, + empty_like, + equal, + exp, + filled, + fix_invalid, + flatten_mask, + flatten_structured_array, + fromflex, + getmask, + getmaskarray, + greater, + greater_equal, + identity, + inner, + isMaskedArray, + less, + less_equal, + log, + log10, + make_mask, + make_mask_descr, + mask_or, + masked, + masked_array, + masked_equal, + masked_greater, + masked_greater_equal, + masked_inside, + masked_less, + masked_less_equal, + masked_not_equal, + masked_outside, + masked_print_option, + masked_values, + masked_where, + max, + maximum, + maximum_fill_value, + min, + minimum, + minimum_fill_value, + mod, + multiply, + mvoid, + nomask, + not_equal, + ones, + ones_like, + outer, + power, + product, + put, + putmask, + ravel, + repeat, + reshape, + resize, + shape, + sin, + sinh, + sometrue, + sort, + sqrt, + subtract, + sum, + take, + tan, + tanh, + transpose, + where, + zeros, + zeros_like, +) +from numpy.ma.testutils import ( + assert_, + assert_almost_equal, + assert_array_equal, + assert_equal, + assert_equal_records, + assert_mask_equal, + assert_not_equal, + fail_if_equal, +) +from numpy.testing import IS_WASM, assert_raises, temppath +from numpy.testing._private.utils import requires_memory + +pi = np.pi + + +# For parametrized numeric testing +num_dts = [np.dtype(dt_) for dt_ in '?bhilqBHILQefdgFD'] +num_ids = [dt_.char for dt_ in num_dts] + +WARNING_MESSAGE = ("setting an item on a masked array which has a shared " + "mask will not copy") +WARNING_MARK_SPEC = f"ignore:.*{WARNING_MESSAGE}:numpy.ma.core.MaskedArrayFutureWarning" +class TestMaskedArray: + # Base test class for MaskedArrays. + + # message for warning filters + def _create_data(self): + # Base data definition. + x = np.array([1., 1., 1., -2., pi / 2.0, 4., 5., -10., 10., 1., 2., 3.]) + y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) + a10 = 10. + m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] + m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1] + xm = masked_array(x, mask=m1) + ym = masked_array(y, mask=m2) + z = np.array([-.5, 0., .5, .8]) + zm = masked_array(z, mask=[0, 1, 0, 0]) + xf = np.where(m1, 1e+20, x) + xm.set_fill_value(1e+20) + return x, y, a10, m1, m2, xm, ym, z, zm, xf + + def test_basicattributes(self): + # Tests some basic array attributes. + a = array([1, 3, 2]) + b = array([1, 3, 2], mask=[1, 0, 1]) + assert_equal(a.ndim, 1) + assert_equal(b.ndim, 1) + assert_equal(a.size, 3) + assert_equal(b.size, 3) + assert_equal(a.shape, (3,)) + assert_equal(b.shape, (3,)) + + def test_basic0d(self): + # Checks masking a scalar + x = masked_array(0) + assert_equal(str(x), '0') + x = masked_array(0, mask=True) + assert_equal(str(x), str(masked_print_option)) + x = masked_array(0, mask=False) + assert_equal(str(x), '0') + x = array(0, mask=1) + assert_(x.filled().dtype is x._data.dtype) + + def test_basic1d(self): + # Test of basic array creation and properties in 1 dimension. + x, _, _, m1, _, xm, ym, z, zm, xf = self._create_data() + assert_(not isMaskedArray(x)) + assert_(isMaskedArray(xm)) + assert_((xm - ym).filled(0).any()) + fail_if_equal(xm.mask.astype(int), ym.mask.astype(int)) + s = x.shape + assert_equal(np.shape(xm), s) + assert_equal(xm.shape, s) + assert_equal(xm.dtype, x.dtype) + assert_equal(zm.dtype, z.dtype) + assert_equal(xm.size, reduce(lambda x, y: x * y, s)) + assert_equal(count(xm), len(m1) - reduce(lambda x, y: x + y, m1)) + assert_array_equal(xm, xf) + assert_array_equal(filled(xm, 1.e20), xf) + assert_array_equal(x, xm) + + def test_basic2d(self): + # Test of basic array creation and properties in 2 dimensions. + x, y, _, m1, _, xm, ym, _, _, xf = self._create_data() + for s in [(4, 3), (6, 2)]: + x = x.reshape(s) + y = y.reshape(s) + xm = xm.reshape(s) + ym = ym.reshape(s) + xf = xf.reshape(s) + + assert_(not isMaskedArray(x)) + assert_(isMaskedArray(xm)) + assert_equal(shape(xm), s) + assert_equal(xm.shape, s) + assert_equal(xm.size, reduce(lambda x, y: x * y, s)) + assert_equal(count(xm), len(m1) - reduce(lambda x, y: x + y, m1)) + assert_equal(xm, xf) + assert_equal(filled(xm, 1.e20), xf) + assert_equal(x, xm) + + def test_concatenate_basic(self): + # Tests concatenations. + x, y, _, _, _, xm, ym, _, _, _ = self._create_data() + # basic concatenation + assert_equal(np.concatenate((x, y)), concatenate((xm, ym))) + assert_equal(np.concatenate((x, y)), concatenate((x, y))) + assert_equal(np.concatenate((x, y)), concatenate((xm, y))) + assert_equal(np.concatenate((x, y, x)), concatenate((x, ym, x))) + + def test_concatenate_alongaxis(self): + # Tests concatenations. + x, y, _, m1, m2, xm, ym, z, _, xf = self._create_data() + # Concatenation along an axis + s = (3, 4) + x = x.reshape(s) + y = y.reshape(s) + xm = xm.reshape(s) + ym = ym.reshape(s) + xf = xf.reshape(s) + + assert_equal(xm.mask, np.reshape(m1, s)) + assert_equal(ym.mask, np.reshape(m2, s)) + xmym = concatenate((xm, ym), 1) + assert_equal(np.concatenate((x, y), 1), xmym) + assert_equal(np.concatenate((xm.mask, ym.mask), 1), xmym._mask) + + x = zeros(2) + y = array(ones(2), mask=[False, True]) + z = concatenate((x, y)) + assert_array_equal(z, [0, 0, 1, 1]) + assert_array_equal(z.mask, [False, False, False, True]) + z = concatenate((y, x)) + assert_array_equal(z, [1, 1, 0, 0]) + assert_array_equal(z.mask, [False, True, False, False]) + + def test_concatenate_flexible(self): + # Tests the concatenation on flexible arrays. + data = masked_array(list(zip(np.random.rand(10), + np.arange(10))), + dtype=[('a', float), ('b', int)]) + + test = concatenate([data[:5], data[5:]]) + assert_equal_records(test, data) + + def test_creation_ndmin(self): + # Check the use of ndmin + x = array([1, 2, 3], mask=[1, 0, 0], ndmin=2) + assert_equal(x.shape, (1, 3)) + assert_equal(x._data, [[1, 2, 3]]) + assert_equal(x._mask, [[1, 0, 0]]) + + def test_creation_ndmin_from_maskedarray(self): + # Make sure we're not losing the original mask w/ ndmin + x = array([1, 2, 3]) + x[-1] = masked + xx = array(x, ndmin=2, dtype=float) + assert_equal(x.shape, x._mask.shape) + assert_equal(xx.shape, xx._mask.shape) + + def test_creation_maskcreation(self): + # Tests how masks are initialized at the creation of Maskedarrays. + data = arange(24, dtype=float) + data[[3, 6, 15]] = masked + dma_1 = MaskedArray(data) + assert_equal(dma_1.mask, data.mask) + dma_2 = MaskedArray(dma_1) + assert_equal(dma_2.mask, dma_1.mask) + dma_3 = MaskedArray(dma_1, mask=[1, 0, 0, 0] * 6) + fail_if_equal(dma_3.mask, dma_1.mask) + + x = array([1, 2, 3], mask=True) + assert_equal(x._mask, [True, True, True]) + x = array([1, 2, 3], mask=False) + assert_equal(x._mask, [False, False, False]) + y = array([1, 2, 3], mask=x._mask, copy=False) + assert_(np.may_share_memory(x.mask, y.mask)) + y = array([1, 2, 3], mask=x._mask, copy=True) + assert_(not np.may_share_memory(x.mask, y.mask)) + x = array([1, 2, 3], mask=None) + assert_equal(x._mask, [False, False, False]) + + def test_masked_singleton_array_creation_warns(self): + # The first works, but should not (ideally), there may be no way + # to solve this, however, as long as `np.ma.masked` is an ndarray. + np.array(np.ma.masked) + with pytest.warns(UserWarning): + # Tries to create a float array, using `float(np.ma.masked)`. + # We may want to define this is invalid behaviour in the future! + # (requiring np.ma.masked to be a known NumPy scalar probably + # with a DType.) + np.array([3., np.ma.masked]) + + def test_creation_with_list_of_maskedarrays(self): + # Tests creating a masked array from a list of masked arrays. + x = array(np.arange(5), mask=[1, 0, 0, 0, 0]) + data = array((x, x[::-1])) + assert_equal(data, [[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]]) + assert_equal(data._mask, [[1, 0, 0, 0, 0], [0, 0, 0, 0, 1]]) + + x.mask = nomask + data = array((x, x[::-1])) + assert_equal(data, [[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]]) + assert_(data.mask is nomask) + + def test_creation_with_list_of_maskedarrays_no_bool_cast(self): + # Tests the regression in gh-18551 + masked_str = np.ma.masked_array(['a', 'b'], mask=[True, False]) + normal_int = np.arange(2) + res = np.ma.asarray([masked_str, normal_int], dtype="U21") + assert_array_equal(res.mask, [[True, False], [False, False]]) + + # The above only failed due a long chain of oddity, try also with + # an object array that cannot be converted to bool always: + class NotBool: + def __bool__(self): + raise ValueError("not a bool!") + masked_obj = np.ma.masked_array([NotBool(), 'b'], mask=[True, False]) + # Check that the NotBool actually fails like we would expect: + with pytest.raises(ValueError, match="not a bool!"): + np.asarray([masked_obj], dtype=bool) + + res = np.ma.asarray([masked_obj, normal_int]) + assert_array_equal(res.mask, [[True, False], [False, False]]) + + def test_creation_from_ndarray_with_padding(self): + x = np.array([('A', 0)], dtype={'names': ['f0', 'f1'], + 'formats': ['S4', 'i8'], + 'offsets': [0, 8]}) + array(x) # used to fail due to 'V' padding field in x.dtype.descr + + def test_unknown_keyword_parameter(self): + with pytest.raises(TypeError, match="unexpected keyword argument"): + MaskedArray([1, 2, 3], maks=[0, 1, 0]) # `mask` is misspelled. + + def test_asarray(self): + xm = self._create_data()[5] + xm.fill_value = -9999 + xm._hardmask = True + xmm = asarray(xm) + assert_equal(xmm._data, xm._data) + assert_equal(xmm._mask, xm._mask) + assert_equal(xmm.fill_value, xm.fill_value) + assert_equal(xmm._hardmask, xm._hardmask) + + def test_asarray_default_order(self): + # See Issue #6646 + m = np.eye(3).T + assert_(not m.flags.c_contiguous) + + new_m = asarray(m) + assert_(new_m.flags.c_contiguous) + + def test_asarray_enforce_order(self): + # See Issue #6646 + m = np.eye(3).T + assert_(not m.flags.c_contiguous) + + new_m = asarray(m, order='C') + assert_(new_m.flags.c_contiguous) + + def test_fix_invalid(self): + # Checks fix_invalid. + with np.errstate(invalid='ignore'): + data = masked_array([np.nan, 0., 1.], mask=[0, 0, 1]) + data_fixed = fix_invalid(data) + assert_equal(data_fixed._data, [data.fill_value, 0., 1.]) + assert_equal(data_fixed._mask, [1., 0., 1.]) + + def test_maskedelement(self): + # Test of masked element + x = arange(6) + x[1] = masked + assert_(str(masked) == '--') + assert_(x[1] is masked) + assert_equal(filled(x[1], 0), 0) + + def test_set_element_as_object(self): + # Tests setting elements with object + a = empty(1, dtype=object) + x = (1, 2, 3, 4, 5) + a[0] = x + assert_equal(a[0], x) + assert_(a[0] is x) + + import datetime + dt = datetime.datetime.now() + a[0] = dt + assert_(a[0] is dt) + + def test_indexing(self): + # Tests conversions and indexing + x1 = np.array([1, 2, 4, 3]) + x2 = array(x1, mask=[1, 0, 0, 0]) + x3 = array(x1, mask=[0, 1, 0, 1]) + x4 = array(x1) + # test conversion to strings + str(x2) # raises? + repr(x2) # raises? + assert_equal(np.sort(x1), sort(x2, endwith=False)) + # tests of indexing + assert_(type(x2[1]) is type(x1[1])) + assert_(x1[1] == x2[1]) + assert_(x2[0] is masked) + assert_equal(x1[2], x2[2]) + assert_equal(x1[2:5], x2[2:5]) + assert_equal(x1[:], x2[:]) + assert_equal(x1[1:], x3[1:]) + x1[2] = 9 + x2[2] = 9 + assert_equal(x1, x2) + x1[1:3] = 99 + x2[1:3] = 99 + assert_equal(x1, x2) + x2[1] = masked + assert_equal(x1, x2) + x2[1:3] = masked + assert_equal(x1, x2) + x2[:] = x1 + x2[1] = masked + assert_(allequal(getmask(x2), array([0, 1, 0, 0]))) + x3[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0]) + assert_(allequal(getmask(x3), array([0, 1, 1, 0]))) + x4[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0]) + assert_(allequal(getmask(x4), array([0, 1, 1, 0]))) + assert_(allequal(x4, array([1, 2, 3, 4]))) + x1 = np.arange(5) * 1.0 + x2 = masked_values(x1, 3.0) + assert_equal(x1, x2) + assert_(allequal(array([0, 0, 0, 1, 0], MaskType), x2.mask)) + assert_equal(3.0, x2.fill_value) + x1 = array([1, 'hello', 2, 3], object) + x2 = np.array([1, 'hello', 2, 3], object) + s1 = x1[1] + s2 = x2[1] + assert_equal(type(s2), str) + assert_equal(type(s1), str) + assert_equal(s1, s2) + assert_(x1[1:1].shape == (0,)) + + def test_setitem_no_warning(self): + # Setitem shouldn't warn, because the assignment might be masked + # and warning for a masked assignment is weird (see gh-23000) + # (When the value is masked, otherwise a warning would be acceptable + # but is not given currently.) + x = np.ma.arange(60).reshape((6, 10)) + index = (slice(1, 5, 2), [7, 5]) + value = np.ma.masked_all((2, 2)) + value._data[...] = np.inf # not a valid integer... + x[index] = value + # The masked scalar is special cased, but test anyway (it's NaN): + x[...] = np.ma.masked + # Finally, a large value that cannot be cast to the float32 `x` + x = np.ma.arange(3., dtype=np.float32) + value = np.ma.array([2e234, 1, 1], mask=[True, False, False]) + x[...] = value + x[[0, 1, 2]] = value + + @pytest.mark.filterwarnings(WARNING_MARK_SPEC) + def test_copy(self): + # Tests of some subtle points of copying and sizing. + n = [0, 0, 1, 0, 0] + m = make_mask(n) + m2 = make_mask(m) + assert_(m is m2) + m3 = make_mask(m, copy=True) + assert_(m is not m3) + + x1 = np.arange(5) + y1 = array(x1, mask=m) + assert_equal(y1._data.__array_interface__, x1.__array_interface__) + assert_(allequal(x1, y1.data)) + assert_equal(y1._mask.__array_interface__, m.__array_interface__) + + y1a = array(y1) + # Default for masked array is not to copy; see gh-10318. + assert_(y1a._data.__array_interface__ == + y1._data.__array_interface__) + assert_(y1a._mask.__array_interface__ == + y1._mask.__array_interface__) + + y2 = array(x1, mask=m3) + assert_(y2._data.__array_interface__ == x1.__array_interface__) + assert_(y2._mask.__array_interface__ == m3.__array_interface__) + assert_(y2[2] is masked) + y2[2] = 9 + assert_(y2[2] is not masked) + assert_(y2._mask.__array_interface__ == m3.__array_interface__) + assert_(allequal(y2.mask, 0)) + + y2a = array(x1, mask=m, copy=1) + assert_(y2a._data.__array_interface__ != x1.__array_interface__) + #assert_( y2a._mask is not m) + assert_(y2a._mask.__array_interface__ != m.__array_interface__) + assert_(y2a[2] is masked) + y2a[2] = 9 + assert_(y2a[2] is not masked) + #assert_( y2a._mask is not m) + assert_(y2a._mask.__array_interface__ != m.__array_interface__) + assert_(allequal(y2a.mask, 0)) + + y3 = array(x1 * 1.0, mask=m) + assert_(filled(y3).dtype is (x1 * 1.0).dtype) + + x4 = arange(4) + x4[2] = masked + y4 = resize(x4, (8,)) + assert_equal(concatenate([x4, x4]), y4) + assert_equal(getmask(y4), [0, 0, 1, 0, 0, 0, 1, 0]) + y5 = repeat(x4, (2, 2, 2, 2), axis=0) + assert_equal(y5, [0, 0, 1, 1, 2, 2, 3, 3]) + y6 = repeat(x4, 2, axis=0) + assert_equal(y5, y6) + y7 = x4.repeat((2, 2, 2, 2), axis=0) + assert_equal(y5, y7) + y8 = x4.repeat(2, 0) + assert_equal(y5, y8) + + y9 = x4.copy() + assert_equal(y9._data, x4._data) + assert_equal(y9._mask, x4._mask) + + x = masked_array([1, 2, 3], mask=[0, 1, 0]) + # Copy is False by default + y = masked_array(x) + assert_equal(y._data.ctypes.data, x._data.ctypes.data) + assert_equal(y._mask.ctypes.data, x._mask.ctypes.data) + y = masked_array(x, copy=True) + assert_not_equal(y._data.ctypes.data, x._data.ctypes.data) + assert_not_equal(y._mask.ctypes.data, x._mask.ctypes.data) + + def test_copy_0d(self): + # gh-9430 + x = np.ma.array(43, mask=True) + xc = x.copy() + assert_equal(xc.mask, True) + + def test_copy_on_python_builtins(self): + # Tests copy works on python builtins (issue#8019) + assert_(isMaskedArray(np.ma.copy([1, 2, 3]))) + assert_(isMaskedArray(np.ma.copy((1, 2, 3)))) + + def test_copy_immutable(self): + # Tests that the copy method is immutable, GitHub issue #5247 + a = np.ma.array([1, 2, 3]) + b = np.ma.array([4, 5, 6]) + a_copy_method = a.copy + b.copy + assert_equal(a_copy_method(), [1, 2, 3]) + + def test_deepcopy(self): + from copy import deepcopy + a = array([0, 1, 2], mask=[False, True, False]) + copied = deepcopy(a) + assert_equal(copied.mask, a.mask) + assert_not_equal(id(a._mask), id(copied._mask)) + + copied[1] = 1 + assert_equal(copied.mask, [0, 0, 0]) + assert_equal(a.mask, [0, 1, 0]) + + copied = deepcopy(a) + assert_equal(copied.mask, a.mask) + copied.mask[1] = False + assert_equal(copied.mask, [0, 0, 0]) + assert_equal(a.mask, [0, 1, 0]) + + def test_format(self): + a = array([0, 1, 2], mask=[False, True, False]) + assert_equal(format(a), "[0 -- 2]") + assert_equal(format(masked), "--") + assert_equal(format(masked, ""), "--") + + # Postponed from PR #15410, perhaps address in the future. + # assert_equal(format(masked, " >5"), " --") + # assert_equal(format(masked, " <5"), "-- ") + + # Expect a FutureWarning for using format_spec with MaskedElement + with pytest.warns(FutureWarning): + with_format_string = format(masked, " >5") + assert_equal(with_format_string, "--") + + def test_str_repr(self): + a = array([0, 1, 2], mask=[False, True, False]) + assert_equal(str(a), '[0 -- 2]') + assert_equal( + repr(a), + textwrap.dedent('''\ + masked_array(data=[0, --, 2], + mask=[False, True, False], + fill_value=999999)''') + ) + + # arrays with a continuation + a = np.ma.arange(2000) + a[1:50] = np.ma.masked + assert_equal( + repr(a), + textwrap.dedent('''\ + masked_array(data=[0, --, --, ..., 1997, 1998, 1999], + mask=[False, True, True, ..., False, False, False], + fill_value=999999)''') + ) + + # line-wrapped 1d arrays are correctly aligned + a = np.ma.arange(20) + assert_equal( + repr(a), + textwrap.dedent('''\ + masked_array(data=[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, + 14, 15, 16, 17, 18, 19], + mask=False, + fill_value=999999)''') + ) + + # 2d arrays cause wrapping + a = array([[1, 2, 3], [4, 5, 6]], dtype=np.int8) + a[1, 1] = np.ma.masked + assert_equal( + repr(a), + textwrap.dedent(f'''\ + masked_array( + data=[[1, 2, 3], + [4, --, 6]], + mask=[[False, False, False], + [False, True, False]], + fill_value={np.array(999999)[()]!r}, + dtype=int8)''') + ) + + # but not it they're a row vector + assert_equal( + repr(a[:1]), + textwrap.dedent(f'''\ + masked_array(data=[[1, 2, 3]], + mask=[[False, False, False]], + fill_value={np.array(999999)[()]!r}, + dtype=int8)''') + ) + + # dtype=int is implied, so not shown + assert_equal( + repr(a.astype(int)), + textwrap.dedent('''\ + masked_array( + data=[[1, 2, 3], + [4, --, 6]], + mask=[[False, False, False], + [False, True, False]], + fill_value=999999)''') + ) + + def test_str_repr_legacy(self): + oldopts = np.get_printoptions() + np.set_printoptions(legacy='1.13') + try: + a = array([0, 1, 2], mask=[False, True, False]) + assert_equal(str(a), '[0 -- 2]') + assert_equal(repr(a), 'masked_array(data = [0 -- 2],\n' + ' mask = [False True False],\n' + ' fill_value = 999999)\n') + + a = np.ma.arange(2000) + a[1:50] = np.ma.masked + assert_equal( + repr(a), + 'masked_array(data = [0 -- -- ..., 1997 1998 1999],\n' + ' mask = [False True True ..., False False False],\n' + ' fill_value = 999999)\n' + ) + finally: + np.set_printoptions(**oldopts) + + def test_0d_unicode(self): + u = 'caf\xe9' + utype = type(u) + + arr_nomask = np.ma.array(u) + arr_masked = np.ma.array(u, mask=True) + + assert_equal(utype(arr_nomask), u) + assert_equal(utype(arr_masked), '--') + + def test_pickling(self): + # Tests pickling + for dtype in (int, float, str, object): + a = arange(10).astype(dtype) + a.fill_value = 999 + + masks = ([0, 0, 0, 1, 0, 1, 0, 1, 0, 1], # partially masked + True, # Fully masked + False) # Fully unmasked + + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + for mask in masks: + a.mask = mask + a_pickled = pickle.loads(pickle.dumps(a, protocol=proto)) + assert_equal(a_pickled._mask, a._mask) + assert_equal(a_pickled._data, a._data) + if dtype in (object, int): + assert_equal(a_pickled.fill_value, 999) + else: + assert_equal(a_pickled.fill_value, dtype(999)) + assert_array_equal(a_pickled.mask, mask) + + def test_pickling_subbaseclass(self): + # Test pickling w/ a subclass of ndarray + x = np.array([(1.0, 2), (3.0, 4)], + dtype=[('x', float), ('y', int)]).view(np.recarray) + a = masked_array(x, mask=[(True, False), (False, True)]) + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + a_pickled = pickle.loads(pickle.dumps(a, protocol=proto)) + assert_equal(a_pickled._mask, a._mask) + assert_equal(a_pickled, a) + assert_(isinstance(a_pickled._data, np.recarray)) + + def test_pickling_maskedconstant(self): + # Test pickling MaskedConstant + mc = np.ma.masked + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + mc_pickled = pickle.loads(pickle.dumps(mc, protocol=proto)) + assert_equal(mc_pickled._baseclass, mc._baseclass) + assert_equal(mc_pickled._mask, mc._mask) + assert_equal(mc_pickled._data, mc._data) + + def test_pickling_wstructured(self): + # Tests pickling w/ structured array + a = array([(1, 1.), (2, 2.)], mask=[(0, 0), (0, 1)], + dtype=[('a', int), ('b', float)]) + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + a_pickled = pickle.loads(pickle.dumps(a, protocol=proto)) + assert_equal(a_pickled._mask, a._mask) + assert_equal(a_pickled, a) + + def test_pickling_keepalignment(self): + # Tests pickling w/ F_CONTIGUOUS arrays + a = arange(10).reshape( (-1, 2)) + b = a.T + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + test = pickle.loads(pickle.dumps(b, protocol=proto)) + assert_equal(test, b) + + def test_single_element_subscript(self): + # Tests single element subscripts of Maskedarrays. + a = array([1, 3, 2]) + b = array([1, 3, 2], mask=[1, 0, 1]) + assert_equal(a[0].shape, ()) + assert_equal(b[0].shape, ()) + assert_equal(b[1].shape, ()) + + def test_topython(self): + # Tests some communication issues with Python. + assert_equal(1, int(array(1))) + assert_equal(1.0, float(array(1))) + assert_equal(1, int(array([[[1]]]))) + assert_equal(1.0, float(array([[1]]))) + assert_raises(TypeError, float, array([1, 1])) + + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', 'Warning: converting a masked element', UserWarning) + assert_(np.isnan(float(array([1], mask=[1])))) + + a = array([1, 2, 3], mask=[1, 0, 0]) + assert_raises(TypeError, lambda: float(a)) + assert_equal(float(a[-1]), 3.) + assert_(np.isnan(float(a[0]))) + assert_raises(TypeError, int, a) + assert_equal(int(a[-1]), 3) + assert_raises(MAError, lambda: int(a[0])) + + def test_oddfeatures_1(self): + # Test of other odd features + x = arange(20) + x = x.reshape(4, 5) + x.flat[5] = 12 + assert_(x[1, 0] == 12) + z = x + 10j * x + assert_equal(z.real, x) + assert_equal(z.imag, 10 * x) + assert_equal((z * conjugate(z)).real, 101 * x * x) + z.imag[...] = 0.0 + + x = arange(10) + x[3] = masked + assert_(str(x[3]) == str(masked)) + c = x >= 8 + assert_(count(where(c, masked, masked)) == 0) + assert_(shape(where(c, masked, masked)) == c.shape) + + z = masked_where(c, x) + assert_(z.dtype is x.dtype) + assert_(z[3] is masked) + assert_(z[4] is not masked) + assert_(z[7] is not masked) + assert_(z[8] is masked) + assert_(z[9] is masked) + assert_equal(x, z) + + def test_oddfeatures_2(self): + # Tests some more features. + x = array([1., 2., 3., 4., 5.]) + c = array([1, 1, 1, 0, 0]) + x[2] = masked + z = where(c, x, -x) + assert_equal(z, [1., 2., 0., -4., -5]) + c[0] = masked + z = where(c, x, -x) + assert_equal(z, [1., 2., 0., -4., -5]) + assert_(z[0] is masked) + assert_(z[1] is not masked) + assert_(z[2] is masked) + + def test_oddfeatures_3(self): + msg = "setting an item on a masked array which has a shared mask will not copy" + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', msg, numpy.ma.core.MaskedArrayFutureWarning) + # Tests some generic features + atest = array([10], mask=True) + btest = array([20]) + idx = atest.mask + atest[idx] = btest[idx] + assert_equal(atest, [20]) + + def test_filled_with_object_dtype(self): + a = np.ma.masked_all(1, dtype='O') + assert_equal(a.filled('x')[0], 'x') + + def test_filled_with_flexible_dtype(self): + # Test filled w/ flexible dtype + flexi = array([(1, 1, 1)], + dtype=[('i', int), ('s', '|S8'), ('f', float)]) + flexi[0] = masked + assert_equal(flexi.filled(), + np.array([(default_fill_value(0), + default_fill_value('0'), + default_fill_value(0.),)], dtype=flexi.dtype)) + flexi[0] = masked + assert_equal(flexi.filled(1), + np.array([(1, '1', 1.)], dtype=flexi.dtype)) + + def test_filled_with_mvoid(self): + # Test filled w/ mvoid + ndtype = [('a', int), ('b', float)] + a = mvoid((1, 2.), mask=[(0, 1)], dtype=ndtype) + # Filled using default + test = a.filled() + assert_equal(tuple(test), (1, default_fill_value(1.))) + # Explicit fill_value + test = a.filled((-1, -1)) + assert_equal(tuple(test), (1, -1)) + # Using predefined filling values + a.fill_value = (-999, -999) + assert_equal(tuple(a.filled()), (1, -999)) + + def test_filled_with_nested_dtype(self): + # Test filled w/ nested dtype + ndtype = [('A', int), ('B', [('BA', int), ('BB', int)])] + a = array([(1, (1, 1)), (2, (2, 2))], + mask=[(0, (1, 0)), (0, (0, 1))], dtype=ndtype) + test = a.filled(0) + control = np.array([(1, (0, 1)), (2, (2, 0))], dtype=ndtype) + assert_equal(test, control) + + test = a['B'].filled(0) + control = np.array([(0, 1), (2, 0)], dtype=a['B'].dtype) + assert_equal(test, control) + + # test if mask gets set correctly (see #6760) + Z = numpy.ma.zeros(2, numpy.dtype([("A", "(2,2)i1,(2,2)i1", (2, 2))])) + assert_equal(Z.data.dtype, numpy.dtype([('A', [('f0', 'i1', (2, 2)), + ('f1', 'i1', (2, 2))], (2, 2))])) + assert_equal(Z.mask.dtype, numpy.dtype([('A', [('f0', '?', (2, 2)), + ('f1', '?', (2, 2))], (2, 2))])) + + def test_filled_with_f_order(self): + # Test filled w/ F-contiguous array + a = array(np.array([(0, 1, 2), (4, 5, 6)], order='F'), + mask=np.array([(0, 0, 1), (1, 0, 0)], order='F'), + order='F') # this is currently ignored + assert_(a.flags['F_CONTIGUOUS']) + assert_(a.filled(0).flags['F_CONTIGUOUS']) + + def test_optinfo_propagation(self): + # Checks that _optinfo dictionary isn't back-propagated + x = array([1, 2, 3, ], dtype=float) + x._optinfo['info'] = '???' + y = x.copy() + assert_equal(y._optinfo['info'], '???') + y._optinfo['info'] = '!!!' + assert_equal(x._optinfo['info'], '???') + + def test_optinfo_forward_propagation(self): + a = array([1, 2, 2, 4]) + a._optinfo["key"] = "value" + assert_equal(a._optinfo["key"], (a == 2)._optinfo["key"]) + assert_equal(a._optinfo["key"], (a != 2)._optinfo["key"]) + assert_equal(a._optinfo["key"], (a > 2)._optinfo["key"]) + assert_equal(a._optinfo["key"], (a >= 2)._optinfo["key"]) + assert_equal(a._optinfo["key"], (a <= 2)._optinfo["key"]) + assert_equal(a._optinfo["key"], (a + 2)._optinfo["key"]) + assert_equal(a._optinfo["key"], (a - 2)._optinfo["key"]) + assert_equal(a._optinfo["key"], (a * 2)._optinfo["key"]) + assert_equal(a._optinfo["key"], (a / 2)._optinfo["key"]) + assert_equal(a._optinfo["key"], a[:2]._optinfo["key"]) + assert_equal(a._optinfo["key"], a[[0, 0, 2]]._optinfo["key"]) + assert_equal(a._optinfo["key"], np.exp(a)._optinfo["key"]) + assert_equal(a._optinfo["key"], np.abs(a)._optinfo["key"]) + assert_equal(a._optinfo["key"], array(a, copy=True)._optinfo["key"]) + assert_equal(a._optinfo["key"], np.zeros_like(a)._optinfo["key"]) + + def test_fancy_printoptions(self): + # Test printing a masked array w/ fancy dtype. + fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])]) + test = array([(1, (2, 3.0)), (4, (5, 6.0))], + mask=[(1, (0, 1)), (0, (1, 0))], + dtype=fancydtype) + control = "[(--, (2, --)) (4, (--, 6.0))]" + assert_equal(str(test), control) + + # Test 0-d array with multi-dimensional dtype + t_2d0 = masked_array(data=(0, [[0.0, 0.0, 0.0], + [0.0, 0.0, 0.0]], + 0.0), + mask=(False, [[True, False, True], + [False, False, True]], + False), + dtype="int, (2,3)float, float") + control = "(0, [[--, 0.0, --], [0.0, 0.0, --]], 0.0)" + assert_equal(str(t_2d0), control) + + def test_flatten_structured_array(self): + # Test flatten_structured_array on arrays + # On ndarray + ndtype = [('a', int), ('b', float)] + a = np.array([(1, 1), (2, 2)], dtype=ndtype) + test = flatten_structured_array(a) + control = np.array([[1., 1.], [2., 2.]], dtype=float) + assert_equal(test, control) + assert_equal(test.dtype, control.dtype) + # On masked_array + a = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype) + test = flatten_structured_array(a) + control = array([[1., 1.], [2., 2.]], + mask=[[0, 1], [1, 0]], dtype=float) + assert_equal(test, control) + assert_equal(test.dtype, control.dtype) + assert_equal(test.mask, control.mask) + # On masked array with nested structure + ndtype = [('a', int), ('b', [('ba', int), ('bb', float)])] + a = array([(1, (1, 1.1)), (2, (2, 2.2))], + mask=[(0, (1, 0)), (1, (0, 1))], dtype=ndtype) + test = flatten_structured_array(a) + control = array([[1., 1., 1.1], [2., 2., 2.2]], + mask=[[0, 1, 0], [1, 0, 1]], dtype=float) + assert_equal(test, control) + assert_equal(test.dtype, control.dtype) + assert_equal(test.mask, control.mask) + # Keeping the initial shape + ndtype = [('a', int), ('b', float)] + a = np.array([[(1, 1), ], [(2, 2), ]], dtype=ndtype) + test = flatten_structured_array(a) + control = np.array([[[1., 1.], ], [[2., 2.], ]], dtype=float) + assert_equal(test, control) + assert_equal(test.dtype, control.dtype) + # for strings + ndtype = [('a', 'U5'), ('b', [('c', 'U5')])] + arr = np.array([('NumPy', ('array',)), ('array', ('numpy',))], dtype=ndtype) + test = flatten_structured_array(arr) + control = np.array([['NumPy', 'array'], ['array', 'numpy']], dtype='U5') + assert_equal(test, control) + assert_equal(test.dtype, control.dtype) + + def test_void0d(self): + # Test creating a mvoid object + ndtype = [('a', int), ('b', int)] + a = np.array([(1, 2,)], dtype=ndtype)[0] + f = mvoid(a) + assert_(isinstance(f, mvoid)) + + a = masked_array([(1, 2)], mask=[(1, 0)], dtype=ndtype)[0] + assert_(isinstance(a, mvoid)) + + a = masked_array([(1, 2), (1, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype) + f = mvoid(a._data[0], a._mask[0]) + assert_(isinstance(f, mvoid)) + + def test_mvoid_getitem(self): + # Test mvoid.__getitem__ + ndtype = [('a', int), ('b', int)] + a = masked_array([(1, 2,), (3, 4)], mask=[(0, 0), (1, 0)], + dtype=ndtype) + # w/o mask + f = a[0] + assert_(isinstance(f, mvoid)) + assert_equal((f[0], f['a']), (1, 1)) + assert_equal(f['b'], 2) + # w/ mask + f = a[1] + assert_(isinstance(f, mvoid)) + assert_(f[0] is masked) + assert_(f['a'] is masked) + assert_equal(f[1], 4) + + # exotic dtype + A = masked_array(data=[([0, 1],)], + mask=[([True, False],)], + dtype=[("A", ">i2", (2,))]) + assert_equal(A[0]["A"], A["A"][0]) + assert_equal(A[0]["A"], masked_array(data=[0, 1], + mask=[True, False], dtype=">i2")) + + def test_mvoid_iter(self): + # Test iteration on __getitem__ + ndtype = [('a', int), ('b', int)] + a = masked_array([(1, 2,), (3, 4)], mask=[(0, 0), (1, 0)], + dtype=ndtype) + # w/o mask + assert_equal(list(a[0]), [1, 2]) + # w/ mask + assert_equal(list(a[1]), [masked, 4]) + + @pytest.mark.thread_unsafe(reason="masked_print_option.set_display global state") + def test_mvoid_print(self): + # Test printing a mvoid + mx = array([(1, 1), (2, 2)], dtype=[('a', int), ('b', int)]) + assert_equal(str(mx[0]), "(1, 1)") + mx['b'][0] = masked + ini_display = masked_print_option._display + masked_print_option.set_display("-X-") + try: + assert_equal(str(mx[0]), "(1, -X-)") + assert_equal(repr(mx[0]), "(1, -X-)") + finally: + masked_print_option.set_display(ini_display) + + # also check if there are object datatypes (see gh-7493) + mx = array([(1,), (2,)], dtype=[('a', 'O')]) + assert_equal(str(mx[0]), "(1,)") + + @pytest.mark.thread_unsafe(reason="masked_print_option global state") + def test_mvoid_multidim_print(self): + + # regression test for gh-6019 + t_ma = masked_array(data=[([1, 2, 3],)], + mask=[([False, True, False],)], + fill_value=([999999, 999999, 999999],), + dtype=[('a', ' 1: + assert_equal(np.concatenate((x, y), 1), concatenate((xm, ym), 1)) + assert_equal(np.add.reduce(x, 1), add.reduce(x, 1)) + assert_equal(np.sum(x, 1), sum(x, 1)) + assert_equal(np.prod(x, 1), product(x, 1)) + + def test_binops_d2D(self): + # Test binary operations on 2D data + a = array([[1.], [2.], [3.]], mask=[[False], [True], [True]]) + b = array([[2., 3.], [4., 5.], [6., 7.]]) + + test = a * b + control = array([[2., 3.], [2., 2.], [3., 3.]], + mask=[[0, 0], [1, 1], [1, 1]]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + + test = b * a + control = array([[2., 3.], [4., 5.], [6., 7.]], + mask=[[0, 0], [1, 1], [1, 1]]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + + a = array([[1.], [2.], [3.]]) + b = array([[2., 3.], [4., 5.], [6., 7.]], + mask=[[0, 0], [0, 0], [0, 1]]) + test = a * b + control = array([[2, 3], [8, 10], [18, 3]], + mask=[[0, 0], [0, 0], [0, 1]]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + + test = b * a + control = array([[2, 3], [8, 10], [18, 7]], + mask=[[0, 0], [0, 0], [0, 1]]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + + def test_domained_binops_d2D(self): + # Test domained binary operations on 2D data + a = array([[1.], [2.], [3.]], mask=[[False], [True], [True]]) + b = array([[2., 3.], [4., 5.], [6., 7.]]) + + test = a / b + control = array([[1. / 2., 1. / 3.], [2., 2.], [3., 3.]], + mask=[[0, 0], [1, 1], [1, 1]]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + + test = b / a + control = array([[2. / 1., 3. / 1.], [4., 5.], [6., 7.]], + mask=[[0, 0], [1, 1], [1, 1]]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + + a = array([[1.], [2.], [3.]]) + b = array([[2., 3.], [4., 5.], [6., 7.]], + mask=[[0, 0], [0, 0], [0, 1]]) + test = a / b + control = array([[1. / 2, 1. / 3], [2. / 4, 2. / 5], [3. / 6, 3]], + mask=[[0, 0], [0, 0], [0, 1]]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + + test = b / a + control = array([[2 / 1., 3 / 1.], [4 / 2., 5 / 2.], [6 / 3., 7]], + mask=[[0, 0], [0, 0], [0, 1]]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + + def test_noshrinking(self): + # Check that we don't shrink a mask when not wanted + # Binary operations + a = masked_array([1., 2., 3.], mask=[False, False, False], + shrink=False) + b = a + 1 + assert_equal(b.mask, [0, 0, 0]) + # In place binary operation + a += 1 + assert_equal(a.mask, [0, 0, 0]) + # Domained binary operation + b = a / 1. + assert_equal(b.mask, [0, 0, 0]) + # In place binary operation + a /= 1. + assert_equal(a.mask, [0, 0, 0]) + + def test_ufunc_nomask(self): + # check the case ufuncs should set the mask to false + m = np.ma.array([1]) + # check we don't get array([False], dtype=bool) + assert_equal(np.true_divide(m, 5).mask.shape, ()) + + def test_noshink_on_creation(self): + # Check that the mask is not shrunk on array creation when not wanted + a = np.ma.masked_values([1., 2.5, 3.1], 1.5, shrink=False) + assert_equal(a.mask, [0, 0, 0]) + + def test_mod(self): + # Tests mod + x, y, _, _, _, xm, ym, _, _, _ = self._create_data() + assert_equal(mod(x, y), mod(xm, ym)) + test = mod(ym, xm) + assert_equal(test, np.mod(ym, xm)) + assert_equal(test.mask, mask_or(xm.mask, ym.mask)) + test = mod(xm, ym) + assert_equal(test, np.mod(xm, ym)) + assert_equal(test.mask, mask_or(mask_or(xm.mask, ym.mask), (ym == 0))) + + def test_TakeTransposeInnerOuter(self): + # Test of take, transpose, inner, outer products + x = arange(24) + y = np.arange(24) + x[5:6] = masked + x = x.reshape(2, 3, 4) + y = y.reshape(2, 3, 4) + assert_equal(np.transpose(y, (2, 0, 1)), transpose(x, (2, 0, 1))) + assert_equal(np.take(y, (2, 0, 1), 1), take(x, (2, 0, 1), 1)) + assert_equal(np.inner(filled(x, 0), filled(y, 0)), + inner(x, y)) + assert_equal(np.outer(filled(x, 0), filled(y, 0)), + outer(x, y)) + y = array(['abc', 1, 'def', 2, 3], object) + y[2] = masked + t = take(y, [0, 3, 4]) + assert_(t[0] == 'abc') + assert_(t[1] == 2) + assert_(t[2] == 3) + + def test_imag_real(self): + # Check complex + xx = array([1 + 10j, 20 + 2j], mask=[1, 0]) + assert_equal(xx.imag, [10, 2]) + assert_equal(xx.imag.filled(), [1e+20, 2]) + assert_equal(xx.imag.dtype, xx._data.imag.dtype) + assert_equal(xx.real, [1, 20]) + assert_equal(xx.real.filled(), [1e+20, 20]) + assert_equal(xx.real.dtype, xx._data.real.dtype) + + def test_methods_with_output(self): + xm = array(np.random.uniform(0, 10, 12)).reshape(3, 4) + xm[:, 0] = xm[0] = xm[-1, -1] = masked + + funclist = ('sum', 'prod', 'var', 'std', 'max', 'min', 'ptp', 'mean',) + + for funcname in funclist: + npfunc = getattr(np, funcname) + xmmeth = getattr(xm, funcname) + # A ndarray as explicit input + output = np.empty(4, dtype=float) + output.fill(-9999) + result = npfunc(xm, axis=0, out=output) + # ... the result should be the given output + assert_(result is output) + assert_equal(result, xmmeth(axis=0, out=output)) + + output = empty(4, dtype=int) + result = xmmeth(axis=0, out=output) + assert_(result is output) + assert_(output[0] is masked) + + def test_eq_on_structured(self): + # Test the equality of structured arrays + ndtype = [('A', int), ('B', int)] + a = array([(1, 1), (2, 2)], mask=[(0, 1), (0, 0)], dtype=ndtype) + + test = (a == a) + assert_equal(test.data, [True, True]) + assert_equal(test.mask, [False, False]) + assert_(test.fill_value == True) + + test = (a == a[0]) + assert_equal(test.data, [True, False]) + assert_equal(test.mask, [False, False]) + assert_(test.fill_value == True) + + b = array([(1, 1), (2, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype) + test = (a == b) + assert_equal(test.data, [False, True]) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + test = (a[0] == b) + assert_equal(test.data, [False, False]) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + b = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype) + test = (a == b) + assert_equal(test.data, [True, True]) + assert_equal(test.mask, [False, False]) + assert_(test.fill_value == True) + + # complicated dtype, 2-dimensional array. + ndtype = [('A', int), ('B', [('BA', int), ('BB', int)])] + a = array([[(1, (1, 1)), (2, (2, 2))], + [(3, (3, 3)), (4, (4, 4))]], + mask=[[(0, (1, 0)), (0, (0, 1))], + [(1, (0, 0)), (1, (1, 1))]], dtype=ndtype) + test = (a[0, 0] == a) + assert_equal(test.data, [[True, False], [False, False]]) + assert_equal(test.mask, [[False, False], [False, True]]) + assert_(test.fill_value == True) + + def test_ne_on_structured(self): + # Test the equality of structured arrays + ndtype = [('A', int), ('B', int)] + a = array([(1, 1), (2, 2)], mask=[(0, 1), (0, 0)], dtype=ndtype) + + test = (a != a) + assert_equal(test.data, [False, False]) + assert_equal(test.mask, [False, False]) + assert_(test.fill_value == True) + + test = (a != a[0]) + assert_equal(test.data, [False, True]) + assert_equal(test.mask, [False, False]) + assert_(test.fill_value == True) + + b = array([(1, 1), (2, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype) + test = (a != b) + assert_equal(test.data, [True, False]) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + test = (a[0] != b) + assert_equal(test.data, [True, True]) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + b = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype) + test = (a != b) + assert_equal(test.data, [False, False]) + assert_equal(test.mask, [False, False]) + assert_(test.fill_value == True) + + # complicated dtype, 2-dimensional array. + ndtype = [('A', int), ('B', [('BA', int), ('BB', int)])] + a = array([[(1, (1, 1)), (2, (2, 2))], + [(3, (3, 3)), (4, (4, 4))]], + mask=[[(0, (1, 0)), (0, (0, 1))], + [(1, (0, 0)), (1, (1, 1))]], dtype=ndtype) + test = (a[0, 0] != a) + assert_equal(test.data, [[False, True], [True, True]]) + assert_equal(test.mask, [[False, False], [False, True]]) + assert_(test.fill_value == True) + + def test_eq_ne_structured_with_non_masked(self): + a = array([(1, 1), (2, 2), (3, 4)], + mask=[(0, 1), (0, 0), (1, 1)], dtype='i4,i4') + eq = a == a.data + ne = a.data != a + # Test the obvious. + assert_(np.all(eq)) + assert_(not np.any(ne)) + # Expect the mask set only for items with all fields masked. + expected_mask = a.mask == np.ones((), a.mask.dtype) + assert_array_equal(eq.mask, expected_mask) + assert_array_equal(ne.mask, expected_mask) + # The masked element will indicated not equal, because the + # masks did not match. + assert_equal(eq.data, [True, True, False]) + assert_array_equal(eq.data, ~ne.data) + + def test_eq_ne_structured_extra(self): + # ensure simple examples are symmetric and make sense. + # from https://github.com/numpy/numpy/pull/8590#discussion_r101126465 + dt = np.dtype('i4,i4') + for m1 in (mvoid((1, 2), mask=(0, 0), dtype=dt), + mvoid((1, 2), mask=(0, 1), dtype=dt), + mvoid((1, 2), mask=(1, 0), dtype=dt), + mvoid((1, 2), mask=(1, 1), dtype=dt)): + ma1 = m1.view(MaskedArray) + r1 = ma1.view('2i4') + for m2 in (np.array((1, 1), dtype=dt), + mvoid((1, 1), dtype=dt), + mvoid((1, 0), mask=(0, 1), dtype=dt), + mvoid((3, 2), mask=(0, 1), dtype=dt)): + ma2 = m2.view(MaskedArray) + r2 = ma2.view('2i4') + eq_expected = (r1 == r2).all() + assert_equal(m1 == m2, eq_expected) + assert_equal(m2 == m1, eq_expected) + assert_equal(ma1 == m2, eq_expected) + assert_equal(m1 == ma2, eq_expected) + assert_equal(ma1 == ma2, eq_expected) + # Also check it is the same if we do it element by element. + el_by_el = [m1[name] == m2[name] for name in dt.names] + assert_equal(array(el_by_el, dtype=bool).all(), eq_expected) + ne_expected = (r1 != r2).any() + assert_equal(m1 != m2, ne_expected) + assert_equal(m2 != m1, ne_expected) + assert_equal(ma1 != m2, ne_expected) + assert_equal(m1 != ma2, ne_expected) + assert_equal(ma1 != ma2, ne_expected) + el_by_el = [m1[name] != m2[name] for name in dt.names] + assert_equal(array(el_by_el, dtype=bool).any(), ne_expected) + + @pytest.mark.parametrize('dt', ['S', 'U', 'T']) + @pytest.mark.parametrize('fill', [None, 'A']) + def test_eq_for_strings(self, dt, fill): + # Test the equality of structured arrays + a = array(['a', 'b'], dtype=dt, mask=[0, 1], fill_value=fill) + + test = (a == a) + assert_equal(test.data, [True, True]) + assert_equal(test.mask, [False, True]) + assert_(test.fill_value == True) + + test = (a == a[0]) + assert_equal(test.data, [True, False]) + assert_equal(test.mask, [False, True]) + assert_(test.fill_value == True) + + b = array(['a', 'b'], dtype=dt, mask=[1, 0], fill_value=fill) + test = (a == b) + assert_equal(test.data, [False, False]) + assert_equal(test.mask, [True, True]) + assert_(test.fill_value == True) + + test = (a[0] == b) + assert_equal(test.data, [False, False]) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + test = (b == a[0]) + assert_equal(test.data, [False, False]) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + @pytest.mark.parametrize('dt', ['S', 'U', 'T']) + @pytest.mark.parametrize('fill', [None, 'A']) + def test_ne_for_strings(self, dt, fill): + # Test the equality of structured arrays + a = array(['a', 'b'], dtype=dt, mask=[0, 1], fill_value=fill) + + test = (a != a) + assert_equal(test.data, [False, False]) + assert_equal(test.mask, [False, True]) + assert_(test.fill_value == True) + + test = (a != a[0]) + assert_equal(test.data, [False, True]) + assert_equal(test.mask, [False, True]) + assert_(test.fill_value == True) + + b = array(['a', 'b'], dtype=dt, mask=[1, 0], fill_value=fill) + test = (a != b) + assert_equal(test.data, [True, True]) + assert_equal(test.mask, [True, True]) + assert_(test.fill_value == True) + + test = (a[0] != b) + assert_equal(test.data, [True, True]) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + test = (b != a[0]) + assert_equal(test.data, [True, True]) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + @pytest.mark.parametrize('dt1', num_dts, ids=num_ids) + @pytest.mark.parametrize('dt2', num_dts, ids=num_ids) + @pytest.mark.parametrize('fill', [None, 1]) + def test_eq_for_numeric(self, dt1, dt2, fill): + # Test the equality of structured arrays + a = array([0, 1], dtype=dt1, mask=[0, 1], fill_value=fill) + + test = (a == a) + assert_equal(test.data, [True, True]) + assert_equal(test.mask, [False, True]) + assert_(test.fill_value == True) + + test = (a == a[0]) + assert_equal(test.data, [True, False]) + assert_equal(test.mask, [False, True]) + assert_(test.fill_value == True) + + b = array([0, 1], dtype=dt2, mask=[1, 0], fill_value=fill) + test = (a == b) + assert_equal(test.data, [False, False]) + assert_equal(test.mask, [True, True]) + assert_(test.fill_value == True) + + test = (a[0] == b) + assert_equal(test.data, [False, False]) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + test = (b == a[0]) + assert_equal(test.data, [False, False]) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + @pytest.mark.parametrize("op", [operator.eq, operator.lt]) + def test_eq_broadcast_with_unmasked(self, op): + a = array([0, 1], mask=[0, 1]) + b = np.arange(10).reshape(5, 2) + result = op(a, b) + assert_(result.mask.shape == b.shape) + assert_equal(result.mask, np.zeros(b.shape, bool) | a.mask) + + @pytest.mark.parametrize("op", [operator.eq, operator.gt]) + def test_comp_no_mask_not_broadcast(self, op): + # Regression test for failing doctest in MaskedArray.nonzero + # after gh-24556. + a = array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) + result = op(a, 3) + assert_(not result.mask.shape) + assert_(result.mask is nomask) + + @pytest.mark.parametrize('dt1', num_dts, ids=num_ids) + @pytest.mark.parametrize('dt2', num_dts, ids=num_ids) + @pytest.mark.parametrize('fill', [None, 1]) + def test_ne_for_numeric(self, dt1, dt2, fill): + # Test the equality of structured arrays + a = array([0, 1], dtype=dt1, mask=[0, 1], fill_value=fill) + + test = (a != a) + assert_equal(test.data, [False, False]) + assert_equal(test.mask, [False, True]) + assert_(test.fill_value == True) + + test = (a != a[0]) + assert_equal(test.data, [False, True]) + assert_equal(test.mask, [False, True]) + assert_(test.fill_value == True) + + b = array([0, 1], dtype=dt2, mask=[1, 0], fill_value=fill) + test = (a != b) + assert_equal(test.data, [True, True]) + assert_equal(test.mask, [True, True]) + assert_(test.fill_value == True) + + test = (a[0] != b) + assert_equal(test.data, [True, True]) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + test = (b != a[0]) + assert_equal(test.data, [True, True]) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + @pytest.mark.parametrize('dt1', num_dts, ids=num_ids) + @pytest.mark.parametrize('dt2', num_dts, ids=num_ids) + @pytest.mark.parametrize('fill', [None, 1]) + @pytest.mark.parametrize('op', + [operator.le, operator.lt, operator.ge, operator.gt]) + def test_comparisons_for_numeric(self, op, dt1, dt2, fill): + # Test the equality of structured arrays + a = array([0, 1], dtype=dt1, mask=[0, 1], fill_value=fill) + + test = op(a, a) + assert_equal(test.data, op(a._data, a._data)) + assert_equal(test.mask, [False, True]) + assert_(test.fill_value == True) + + test = op(a, a[0]) + assert_equal(test.data, op(a._data, a._data[0])) + assert_equal(test.mask, [False, True]) + assert_(test.fill_value == True) + + b = array([0, 1], dtype=dt2, mask=[1, 0], fill_value=fill) + test = op(a, b) + assert_equal(test.data, op(a._data, b._data)) + assert_equal(test.mask, [True, True]) + assert_(test.fill_value == True) + + test = op(a[0], b) + assert_equal(test.data, op(a._data[0], b._data)) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + test = op(b, a[0]) + assert_equal(test.data, op(b._data, a._data[0])) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + @pytest.mark.parametrize('dt', ['S', 'U', 'T']) + @pytest.mark.parametrize('op', + [operator.le, operator.lt, operator.ge, operator.gt]) + @pytest.mark.parametrize('fill', [None, "N/A"]) + def test_comparisons_strings(self, dt, op, fill): + # See gh-21770, mask propagation is broken for strings (and some other + # cases) so we explicitly test strings here. + # In principle only == and != may need special handling... + ma1 = masked_array(["a", "b", "cde"], mask=[0, 1, 0], fill_value=fill, dtype=dt) + ma2 = masked_array(["cde", "b", "a"], mask=[0, 1, 0], fill_value=fill, dtype=dt) + assert_equal(op(ma1, ma2)._data, op(ma1._data, ma2._data)) + + if isinstance(fill, str): + fill = np.array(fill, dtype=dt) + + ma1 = masked_array(["a", "b", "cde"], mask=[0, 1, 0], fill_value=fill, dtype=dt) + ma2 = masked_array(["cde", "b", "a"], mask=[0, 1, 0], fill_value=fill, dtype=dt) + assert_equal(op(ma1, ma2)._data, op(ma1._data, ma2._data)) + + @pytest.mark.filterwarnings("ignore:.*Comparison to `None`.*:FutureWarning") + def test_eq_with_None(self): + # Really, comparisons with None should not be done, but check them + # anyway. Note that pep8 will flag these tests. + # Deprecation is in place for arrays, and when it happens this + # test will fail (and have to be changed accordingly). + + # With partial mask + a = array([None, 1], mask=[0, 1]) + assert_equal(a == None, array([True, False], mask=[0, 1])) # noqa: E711 + assert_equal(a.data == None, [True, False]) # noqa: E711 + assert_equal(a != None, array([False, True], mask=[0, 1])) # noqa: E711 + # With nomask + a = array([None, 1], mask=False) + assert_equal(a == None, [True, False]) # noqa: E711 + assert_equal(a != None, [False, True]) # noqa: E711 + # With complete mask + a = array([None, 2], mask=True) + assert_equal(a == None, array([False, True], mask=True)) # noqa: E711 + assert_equal(a != None, array([True, False], mask=True)) # noqa: E711 + # Fully masked, even comparison to None should return "masked" + a = masked + assert_equal(a == None, masked) # noqa: E711 + + def test_eq_with_scalar(self): + a = array(1) + assert_equal(a == 1, True) + assert_equal(a == 0, False) + assert_equal(a != 1, False) + assert_equal(a != 0, True) + b = array(1, mask=True) + assert_equal(b == 0, masked) + assert_equal(b == 1, masked) + assert_equal(b != 0, masked) + assert_equal(b != 1, masked) + + def test_eq_different_dimensions(self): + m1 = array([1, 1], mask=[0, 1]) + # test comparison with both masked and regular arrays. + for m2 in (array([[0, 1], [1, 2]]), + np.array([[0, 1], [1, 2]])): + test = (m1 == m2) + assert_equal(test.data, [[False, False], + [True, False]]) + assert_equal(test.mask, [[False, True], + [False, True]]) + + def test_numpyarithmetic(self): + # Check that the mask is not back-propagated when using numpy functions + a = masked_array([-1, 0, 1, 2, 3], mask=[0, 0, 0, 0, 1]) + control = masked_array([np.nan, np.nan, 0, np.log(2), -1], + mask=[1, 1, 0, 0, 1]) + + test = log(a) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + assert_equal(a.mask, [0, 0, 0, 0, 1]) + + test = np.log(a) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + assert_equal(a.mask, [0, 0, 0, 0, 1]) + + +class TestMaskedArrayAttributes: + + def test_keepmask(self): + # Tests the keep mask flag + x = masked_array([1, 2, 3], mask=[1, 0, 0]) + mx = masked_array(x) + assert_equal(mx.mask, x.mask) + mx = masked_array(x, mask=[0, 1, 0], keep_mask=False) + assert_equal(mx.mask, [0, 1, 0]) + mx = masked_array(x, mask=[0, 1, 0], keep_mask=True) + assert_equal(mx.mask, [1, 1, 0]) + # We default to true + mx = masked_array(x, mask=[0, 1, 0]) + assert_equal(mx.mask, [1, 1, 0]) + + def test_hardmask(self): + # Test hard_mask + d = arange(5) + n = [0, 0, 0, 1, 1] + m = make_mask(n) + xh = array(d, mask=m, hard_mask=True) + # We need to copy, to avoid updating d in xh ! + xs = array(d, mask=m, hard_mask=False, copy=True) + xh[[1, 4]] = [10, 40] + xs[[1, 4]] = [10, 40] + assert_equal(xh._data, [0, 10, 2, 3, 4]) + assert_equal(xs._data, [0, 10, 2, 3, 40]) + assert_equal(xs.mask, [0, 0, 0, 1, 0]) + assert_(xh._hardmask) + assert_(not xs._hardmask) + xh[1:4] = [10, 20, 30] + xs[1:4] = [10, 20, 30] + assert_equal(xh._data, [0, 10, 20, 3, 4]) + assert_equal(xs._data, [0, 10, 20, 30, 40]) + assert_equal(xs.mask, nomask) + xh[0] = masked + xs[0] = masked + assert_equal(xh.mask, [1, 0, 0, 1, 1]) + assert_equal(xs.mask, [1, 0, 0, 0, 0]) + xh[:] = 1 + xs[:] = 1 + assert_equal(xh._data, [0, 1, 1, 3, 4]) + assert_equal(xs._data, [1, 1, 1, 1, 1]) + assert_equal(xh.mask, [1, 0, 0, 1, 1]) + assert_equal(xs.mask, nomask) + # Switch to soft mask + xh.soften_mask() + xh[:] = arange(5) + assert_equal(xh._data, [0, 1, 2, 3, 4]) + assert_equal(xh.mask, nomask) + # Switch back to hard mask + xh.harden_mask() + xh[xh < 3] = masked + assert_equal(xh._data, [0, 1, 2, 3, 4]) + assert_equal(xh._mask, [1, 1, 1, 0, 0]) + xh[filled(xh > 1, False)] = 5 + assert_equal(xh._data, [0, 1, 2, 5, 5]) + assert_equal(xh._mask, [1, 1, 1, 0, 0]) + + xh = array([[1, 2], [3, 4]], mask=[[1, 0], [0, 0]], hard_mask=True) + xh[0] = 0 + assert_equal(xh._data, [[1, 0], [3, 4]]) + assert_equal(xh._mask, [[1, 0], [0, 0]]) + xh[-1, -1] = 5 + assert_equal(xh._data, [[1, 0], [3, 5]]) + assert_equal(xh._mask, [[1, 0], [0, 0]]) + xh[filled(xh < 5, False)] = 2 + assert_equal(xh._data, [[1, 2], [2, 5]]) + assert_equal(xh._mask, [[1, 0], [0, 0]]) + + def test_hardmask_again(self): + # Another test of hardmask + d = arange(5) + n = [0, 0, 0, 1, 1] + m = make_mask(n) + xh = array(d, mask=m, hard_mask=True) + xh[4:5] = 999 + xh[0:1] = 999 + assert_equal(xh._data, [999, 1, 2, 3, 4]) + + def test_hardmask_oncemore_yay(self): + # OK, yet another test of hardmask + # Make sure that harden_mask/soften_mask//unshare_mask returns self + a = array([1, 2, 3], mask=[1, 0, 0]) + b = a.harden_mask() + assert_equal(a, b) + b[0] = 0 + assert_equal(a, b) + assert_equal(b, array([1, 2, 3], mask=[1, 0, 0])) + a = b.soften_mask() + a[0] = 0 + assert_equal(a, b) + assert_equal(b, array([0, 2, 3], mask=[0, 0, 0])) + + def test_smallmask(self): + # Checks the behaviour of _smallmask + a = arange(10) + a[1] = masked + a[1] = 1 + assert_equal(a._mask, nomask) + a = arange(10) + a._smallmask = False + a[1] = masked + a[1] = 1 + assert_equal(a._mask, zeros(10)) + + def test_shrink_mask(self): + # Tests .shrink_mask() + a = array([1, 2, 3], mask=[0, 0, 0]) + b = a.shrink_mask() + assert_equal(a, b) + assert_equal(a.mask, nomask) + + # Mask cannot be shrunk on structured types, so is a no-op + a = np.ma.array([(1, 2.0)], [('a', int), ('b', float)]) + b = a.copy() + a.shrink_mask() + assert_equal(a.mask, b.mask) + + def test_flat(self): + # Test that flat can return all types of items [#4585, #4615] + # test 2-D record array + # ... on structured array w/ masked records + x = array([[(1, 1.1, 'one'), (2, 2.2, 'two'), (3, 3.3, 'thr')], + [(4, 4.4, 'fou'), (5, 5.5, 'fiv'), (6, 6.6, 'six')]], + dtype=[('a', int), ('b', float), ('c', '|S8')]) + x['a'][0, 1] = masked + x['b'][1, 0] = masked + x['c'][0, 2] = masked + x[-1, -1] = masked + xflat = x.flat + assert_equal(xflat[0], x[0, 0]) + assert_equal(xflat[1], x[0, 1]) + assert_equal(xflat[2], x[0, 2]) + assert_equal(xflat[:3], x[0]) + assert_equal(xflat[3], x[1, 0]) + assert_equal(xflat[4], x[1, 1]) + assert_equal(xflat[5], x[1, 2]) + assert_equal(xflat[3:], x[1]) + assert_equal(xflat[-1], x[-1, -1]) + i = 0 + j = 0 + for xf in xflat: + assert_equal(xf, x[j, i]) + i += 1 + if i >= x.shape[-1]: + i = 0 + j += 1 + + def test_assign_dtype(self): + # check that the mask's dtype is updated when dtype is changed + a = np.zeros(4, dtype='f4,i4') + + m = np.ma.array(a) + m.dtype = np.dtype('f4') + repr(m) # raises? + assert_equal(m.dtype, np.dtype('f4')) + + # check that dtype changes that change shape of mask too much + # are not allowed + def assign(): + m = np.ma.array(a) + m.dtype = np.dtype('f8') + assert_raises(ValueError, assign) + + b = a.view(dtype='f4', type=np.ma.MaskedArray) # raises? + assert_equal(b.dtype, np.dtype('f4')) + + # check that nomask is preserved + a = np.zeros(4, dtype='f4') + m = np.ma.array(a) + m.dtype = np.dtype('f4,i4') + assert_equal(m.dtype, np.dtype('f4,i4')) + assert_equal(m._mask, np.ma.nomask) + + +class TestFillingValues: + + def test_check_on_scalar(self): + # Test _check_fill_value set to valid and invalid values + _check_fill_value = np.ma.core._check_fill_value + + fval = _check_fill_value(0, int) + assert_equal(fval, 0) + fval = _check_fill_value(None, int) + assert_equal(fval, default_fill_value(0)) + + fval = _check_fill_value(0, "|S3") + assert_equal(fval, b"0") + fval = _check_fill_value(None, "|S3") + assert_equal(fval, default_fill_value(b"camelot!")) + assert_raises(TypeError, _check_fill_value, 1e+20, int) + assert_raises(TypeError, _check_fill_value, 'stuff', int) + + def test_check_on_fields(self): + # Tests _check_fill_value with records + _check_fill_value = np.ma.core._check_fill_value + ndtype = [('a', int), ('b', float), ('c', "|S3")] + # A check on a list should return a single record + fval = _check_fill_value([-999, -12345678.9, "???"], ndtype) + assert_(isinstance(fval, ndarray)) + assert_equal(fval.item(), [-999, -12345678.9, b"???"]) + # A check on None should output the defaults + fval = _check_fill_value(None, ndtype) + assert_(isinstance(fval, ndarray)) + assert_equal(fval.item(), [default_fill_value(0), + default_fill_value(0.), + asbytes(default_fill_value("0"))]) + #.....Using a structured type as fill_value should work + fill_val = np.array((-999, -12345678.9, "???"), dtype=ndtype) + fval = _check_fill_value(fill_val, ndtype) + assert_(isinstance(fval, ndarray)) + assert_equal(fval.item(), [-999, -12345678.9, b"???"]) + + #.....Using a flexible type w/ a different type shouldn't matter + # BEHAVIOR in 1.5 and earlier, and 1.13 and later: match structured + # types by position + fill_val = np.array((-999, -12345678.9, "???"), + dtype=[("A", int), ("B", float), ("C", "|S3")]) + fval = _check_fill_value(fill_val, ndtype) + assert_(isinstance(fval, ndarray)) + assert_equal(fval.item(), [-999, -12345678.9, b"???"]) + + #.....Using an object-array shouldn't matter either + fill_val = np.ndarray(shape=(1,), dtype=object) + fill_val[0] = (-999, -12345678.9, b"???") + fval = _check_fill_value(fill_val, object) + assert_(isinstance(fval, ndarray)) + assert_equal(fval.item(), [-999, -12345678.9, b"???"]) + # NOTE: This test was never run properly as "fill_value" rather than + # "fill_val" was assigned. Written properly, it fails. + #fill_val = np.array((-999, -12345678.9, "???")) + #fval = _check_fill_value(fill_val, ndtype) + #assert_(isinstance(fval, ndarray)) + #assert_equal(fval.item(), [-999, -12345678.9, b"???"]) + #.....One-field-only flexible type should work as well + ndtype = [("a", int)] + fval = _check_fill_value(-999999999, ndtype) + assert_(isinstance(fval, ndarray)) + assert_equal(fval.item(), (-999999999,)) + + def test_fillvalue_conversion(self): + # Tests the behavior of fill_value during conversion + # We had a tailored comment to make sure special attributes are + # properly dealt with + a = array([b'3', b'4', b'5']) + a._optinfo.update({'comment': "updated!"}) + + b = array(a, dtype=int) + assert_equal(b._data, [3, 4, 5]) + assert_equal(b.fill_value, default_fill_value(0)) + + b = array(a, dtype=float) + assert_equal(b._data, [3, 4, 5]) + assert_equal(b.fill_value, default_fill_value(0.)) + + b = a.astype(int) + assert_equal(b._data, [3, 4, 5]) + assert_equal(b.fill_value, default_fill_value(0)) + assert_equal(b._optinfo['comment'], "updated!") + + b = a.astype([('a', '|S3')]) + assert_equal(b['a']._data, a._data) + assert_equal(b['a'].fill_value, a.fill_value) + + def test_default_fill_value(self): + # check all calling conventions + f1 = default_fill_value(1.) + f2 = default_fill_value(np.array(1.)) + f3 = default_fill_value(np.array(1.).dtype) + assert_equal(f1, f2) + assert_equal(f1, f3) + + def test_default_fill_value_structured(self): + fields = array([(1, 1, 1)], + dtype=[('i', int), ('s', '|S8'), ('f', float)]) + + f1 = default_fill_value(fields) + f2 = default_fill_value(fields.dtype) + expected = np.array((default_fill_value(0), + default_fill_value('0'), + default_fill_value(0.)), dtype=fields.dtype) + assert_equal(f1, expected) + assert_equal(f2, expected) + + def test_default_fill_value_void(self): + dt = np.dtype([('v', 'V7')]) + f = default_fill_value(dt) + assert_equal(f['v'], np.array(default_fill_value(dt['v']), dt['v'])) + + def test_fillvalue(self): + # Yet more fun with the fill_value + data = masked_array([1, 2, 3], fill_value=-999) + series = data[[0, 2, 1]] + assert_equal(series._fill_value, data._fill_value) + + mtype = [('f', float), ('s', '|S3')] + x = array([(1, 'a'), (2, 'b'), (pi, 'pi')], dtype=mtype) + x.fill_value = 999 + assert_equal(x.fill_value.item(), [999., b'999']) + assert_equal(x['f'].fill_value, 999) + assert_equal(x['s'].fill_value, b'999') + + x.fill_value = (9, '???') + assert_equal(x.fill_value.item(), (9, b'???')) + assert_equal(x['f'].fill_value, 9) + assert_equal(x['s'].fill_value, b'???') + + x = array([1, 2, 3.1]) + x.fill_value = 999 + assert_equal(np.asarray(x.fill_value).dtype, float) + assert_equal(x.fill_value, 999.) + assert_equal(x._fill_value, np.array(999.)) + + @pytest.mark.filterwarnings("ignore:.*Numpy has detected.*:FutureWarning") + def test_subarray_fillvalue(self): + # gh-10483 test multi-field index fill value + fields = array([(1, 1, 1)], + dtype=[('i', int), ('s', '|S8'), ('f', float)]) + subfields = fields[['i', 'f']] + assert_equal(tuple(subfields.fill_value), (999999, 1.e+20)) + # test comparison does not raise: + subfields[1:] == subfields[:-1] + + def test_fillvalue_exotic_dtype(self): + # Tests yet more exotic flexible dtypes + _check_fill_value = np.ma.core._check_fill_value + ndtype = [('i', int), ('s', '|S8'), ('f', float)] + control = np.array((default_fill_value(0), + default_fill_value('0'), + default_fill_value(0.),), + dtype=ndtype) + assert_equal(_check_fill_value(None, ndtype), control) + # The shape shouldn't matter + ndtype = [('f0', float, (2, 2))] + control = np.array((default_fill_value(0.),), + dtype=[('f0', float)]).astype(ndtype) + assert_equal(_check_fill_value(None, ndtype), control) + control = np.array((0,), dtype=[('f0', float)]).astype(ndtype) + assert_equal(_check_fill_value(0, ndtype), control) + + ndtype = np.dtype("int, (2,3)float, float") + control = np.array((default_fill_value(0), + default_fill_value(0.), + default_fill_value(0.),), + dtype="int, float, float").astype(ndtype) + test = _check_fill_value(None, ndtype) + assert_equal(test, control) + control = np.array((0, 0, 0), dtype="int, float, float").astype(ndtype) + assert_equal(_check_fill_value(0, ndtype), control) + # but when indexing, fill value should become scalar not tuple + # See issue #6723 + M = masked_array(control) + assert_equal(M["f1"].fill_value.ndim, 0) + + def test_fillvalue_datetime_timedelta(self): + # Test default fillvalue for datetime64 and timedelta64 types. + # See issue #4476, this would return '?' which would cause errors + # elsewhere + + for timecode in ("as", "fs", "ps", "ns", "us", "ms", "s", "m", + "h", "D", "W", "M", "Y"): + control = numpy.datetime64("NaT", timecode) + test = default_fill_value(numpy.dtype(" 0 + + # test different unary domains + sqrt(m) + log(m) + tan(m) + arcsin(m) + arccos(m) + arccosh(m) + + # test binary domains + divide(m, 2) + + # also check that allclose uses ma ufuncs, to avoid warning + allclose(m, 0.5) + + def test_masked_array_underflow(self): + x = np.arange(0, 3, 0.1) + X = np.ma.array(x) + with np.errstate(under="raise"): + X2 = X / 2.0 + np.testing.assert_array_equal(X2, x / 2) + + +class TestMaskedArrayInPlaceArithmetic: + # Test MaskedArray Arithmetic + def _create_intdata(self): + x = arange(10) + y = arange(10) + xm = arange(10) + xm[2] = masked + return x, y, xm + + def _create_floatdata(self): + x, y, xm = self._create_intdata() + return x.astype(float), y.astype(float), xm.astype(float) + + def _create_otherdata(self): + o = np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + othertypes = [np.dtype(_).type for _ in o] + x, y, xm = self._create_intdata() + uint8data = ( + x.astype(np.uint8), + y.astype(np.uint8), + xm.astype(np.uint8) + ) + return othertypes, uint8data + + def test_inplace_addition_scalar(self): + # Test of inplace additions + x, y, xm = self._create_intdata() + xm[2] = masked + x += 1 + assert_equal(x, y + 1) + xm += 1 + assert_equal(xm, y + 1) + + x, _, xm = self._create_floatdata() + id1 = x.data.ctypes.data + x += 1. + assert_(id1 == x.data.ctypes.data) + assert_equal(x, y + 1.) + + def test_inplace_addition_array(self): + # Test of inplace additions + x, y, xm = self._create_intdata() + m = xm.mask + a = arange(10, dtype=np.int16) + a[-1] = masked + x += a + xm += a + assert_equal(x, y + a) + assert_equal(xm, y + a) + assert_equal(xm.mask, mask_or(m, a.mask)) + + def test_inplace_subtraction_scalar(self): + # Test of inplace subtractions + x, y, xm = self._create_intdata() + x -= 1 + assert_equal(x, y - 1) + xm -= 1 + assert_equal(xm, y - 1) + + def test_inplace_subtraction_array(self): + # Test of inplace subtractions + x, y, xm = self._create_floatdata() + m = xm.mask + a = arange(10, dtype=float) + a[-1] = masked + x -= a + xm -= a + assert_equal(x, y - a) + assert_equal(xm, y - a) + assert_equal(xm.mask, mask_or(m, a.mask)) + + def test_inplace_multiplication_scalar(self): + # Test of inplace multiplication + x, y, xm = self._create_floatdata() + x *= 2.0 + assert_equal(x, y * 2) + xm *= 2.0 + assert_equal(xm, y * 2) + + def test_inplace_multiplication_array(self): + # Test of inplace multiplication + x, y, xm = self._create_floatdata() + m = xm.mask + a = arange(10, dtype=float) + a[-1] = masked + x *= a + xm *= a + assert_equal(x, y * a) + assert_equal(xm, y * a) + assert_equal(xm.mask, mask_or(m, a.mask)) + + def test_inplace_division_scalar_int(self): + # Test of inplace division + x, y, xm = self._create_intdata() + x = arange(10) * 2 + xm = arange(10) * 2 + xm[2] = masked + x //= 2 + assert_equal(x, y) + xm //= 2 + assert_equal(xm, y) + + def test_inplace_division_scalar_float(self): + # Test of inplace division + x, y, xm = self._create_floatdata() + x /= 2.0 + assert_equal(x, y / 2.0) + xm /= arange(10) + assert_equal(xm, ones((10,))) + + def test_inplace_division_array_float(self): + # Test of inplace division + x, y, xm = self._create_floatdata() + m = xm.mask + a = arange(10, dtype=float) + a[-1] = masked + x /= a + xm /= a + assert_equal(x, y / a) + assert_equal(xm, y / a) + assert_equal(xm.mask, mask_or(mask_or(m, a.mask), (a == 0))) + + def test_inplace_division_misc(self): + + x = [1., 1., 1., -2., pi / 2., 4., 5., -10., 10., 1., 2., 3.] + y = [5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.] + m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] + m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1] + xm = masked_array(x, mask=m1) + ym = masked_array(y, mask=m2) + + z = xm / ym + assert_equal(z._mask, [1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1]) + assert_equal(z._data, + [1., 1., 1., -1., -pi / 2., 4., 5., 1., 1., 1., 2., 3.]) + + xm = xm.copy() + xm /= ym + assert_equal(xm._mask, [1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1]) + assert_equal(z._data, + [1., 1., 1., -1., -pi / 2., 4., 5., 1., 1., 1., 2., 3.]) + + def test_datafriendly_add(self): + # Test keeping data w/ (inplace) addition + x = array([1, 2, 3], mask=[0, 0, 1]) + # Test add w/ scalar + xx = x + 1 + assert_equal(xx.data, [2, 3, 3]) + assert_equal(xx.mask, [0, 0, 1]) + # Test iadd w/ scalar + x += 1 + assert_equal(x.data, [2, 3, 3]) + assert_equal(x.mask, [0, 0, 1]) + # Test add w/ array + x = array([1, 2, 3], mask=[0, 0, 1]) + xx = x + array([1, 2, 3], mask=[1, 0, 0]) + assert_equal(xx.data, [1, 4, 3]) + assert_equal(xx.mask, [1, 0, 1]) + # Test iadd w/ array + x = array([1, 2, 3], mask=[0, 0, 1]) + x += array([1, 2, 3], mask=[1, 0, 0]) + assert_equal(x.data, [1, 4, 3]) + assert_equal(x.mask, [1, 0, 1]) + + def test_datafriendly_sub(self): + # Test keeping data w/ (inplace) subtraction + # Test sub w/ scalar + x = array([1, 2, 3], mask=[0, 0, 1]) + xx = x - 1 + assert_equal(xx.data, [0, 1, 3]) + assert_equal(xx.mask, [0, 0, 1]) + # Test isub w/ scalar + x = array([1, 2, 3], mask=[0, 0, 1]) + x -= 1 + assert_equal(x.data, [0, 1, 3]) + assert_equal(x.mask, [0, 0, 1]) + # Test sub w/ array + x = array([1, 2, 3], mask=[0, 0, 1]) + xx = x - array([1, 2, 3], mask=[1, 0, 0]) + assert_equal(xx.data, [1, 0, 3]) + assert_equal(xx.mask, [1, 0, 1]) + # Test isub w/ array + x = array([1, 2, 3], mask=[0, 0, 1]) + x -= array([1, 2, 3], mask=[1, 0, 0]) + assert_equal(x.data, [1, 0, 3]) + assert_equal(x.mask, [1, 0, 1]) + + def test_datafriendly_mul(self): + # Test keeping data w/ (inplace) multiplication + # Test mul w/ scalar + x = array([1, 2, 3], mask=[0, 0, 1]) + xx = x * 2 + assert_equal(xx.data, [2, 4, 3]) + assert_equal(xx.mask, [0, 0, 1]) + # Test imul w/ scalar + x = array([1, 2, 3], mask=[0, 0, 1]) + x *= 2 + assert_equal(x.data, [2, 4, 3]) + assert_equal(x.mask, [0, 0, 1]) + # Test mul w/ array + x = array([1, 2, 3], mask=[0, 0, 1]) + xx = x * array([10, 20, 30], mask=[1, 0, 0]) + assert_equal(xx.data, [1, 40, 3]) + assert_equal(xx.mask, [1, 0, 1]) + # Test imul w/ array + x = array([1, 2, 3], mask=[0, 0, 1]) + x *= array([10, 20, 30], mask=[1, 0, 0]) + assert_equal(x.data, [1, 40, 3]) + assert_equal(x.mask, [1, 0, 1]) + + def test_datafriendly_div(self): + # Test keeping data w/ (inplace) division + # Test div on scalar + x = array([1, 2, 3], mask=[0, 0, 1]) + xx = x / 2. + assert_equal(xx.data, [1 / 2., 2 / 2., 3]) + assert_equal(xx.mask, [0, 0, 1]) + # Test idiv on scalar + x = array([1., 2., 3.], mask=[0, 0, 1]) + x /= 2. + assert_equal(x.data, [1 / 2., 2 / 2., 3]) + assert_equal(x.mask, [0, 0, 1]) + # Test div on array + x = array([1., 2., 3.], mask=[0, 0, 1]) + xx = x / array([10., 20., 30.], mask=[1, 0, 0]) + assert_equal(xx.data, [1., 2. / 20., 3.]) + assert_equal(xx.mask, [1, 0, 1]) + # Test idiv on array + x = array([1., 2., 3.], mask=[0, 0, 1]) + x /= array([10., 20., 30.], mask=[1, 0, 0]) + assert_equal(x.data, [1., 2 / 20., 3.]) + assert_equal(x.mask, [1, 0, 1]) + + def test_datafriendly_pow(self): + # Test keeping data w/ (inplace) power + # Test pow on scalar + x = array([1., 2., 3.], mask=[0, 0, 1]) + xx = x ** 2.5 + assert_equal(xx.data, [1., 2. ** 2.5, 3.]) + assert_equal(xx.mask, [0, 0, 1]) + # Test ipow on scalar + x **= 2.5 + assert_equal(x.data, [1., 2. ** 2.5, 3]) + assert_equal(x.mask, [0, 0, 1]) + + def test_datafriendly_add_arrays(self): + a = array([[1, 1], [3, 3]]) + b = array([1, 1], mask=[0, 0]) + a += b + assert_equal(a, [[2, 2], [4, 4]]) + if a.mask is not nomask: + assert_equal(a.mask, [[0, 0], [0, 0]]) + + a = array([[1, 1], [3, 3]]) + b = array([1, 1], mask=[0, 1]) + a += b + assert_equal(a, [[2, 2], [4, 4]]) + assert_equal(a.mask, [[0, 1], [0, 1]]) + + def test_datafriendly_sub_arrays(self): + a = array([[1, 1], [3, 3]]) + b = array([1, 1], mask=[0, 0]) + a -= b + assert_equal(a, [[0, 0], [2, 2]]) + if a.mask is not nomask: + assert_equal(a.mask, [[0, 0], [0, 0]]) + + a = array([[1, 1], [3, 3]]) + b = array([1, 1], mask=[0, 1]) + a -= b + assert_equal(a, [[0, 0], [2, 2]]) + assert_equal(a.mask, [[0, 1], [0, 1]]) + + def test_datafriendly_mul_arrays(self): + a = array([[1, 1], [3, 3]]) + b = array([1, 1], mask=[0, 0]) + a *= b + assert_equal(a, [[1, 1], [3, 3]]) + if a.mask is not nomask: + assert_equal(a.mask, [[0, 0], [0, 0]]) + + a = array([[1, 1], [3, 3]]) + b = array([1, 1], mask=[0, 1]) + a *= b + assert_equal(a, [[1, 1], [3, 3]]) + assert_equal(a.mask, [[0, 1], [0, 1]]) + + def test_inplace_addition_scalar_type(self): + # Test of inplace additions + othertypes, uint8data = self._create_otherdata() + for t in othertypes: + with warnings.catch_warnings(): + warnings.filterwarnings("error") + x, y, xm = (_.astype(t) for _ in uint8data) + xm[2] = masked + x += t(1) + assert_equal(x, y + t(1)) + xm += t(1) + assert_equal(xm, y + t(1)) + + def test_inplace_addition_array_type(self): + # Test of inplace additions + othertypes, uint8data = self._create_otherdata() + for t in othertypes: + with warnings.catch_warnings(): + warnings.filterwarnings("error") + x, y, xm = (_.astype(t) for _ in uint8data) + m = xm.mask + a = arange(10, dtype=t) + a[-1] = masked + x += a + xm += a + assert_equal(x, y + a) + assert_equal(xm, y + a) + assert_equal(xm.mask, mask_or(m, a.mask)) + + def test_inplace_subtraction_scalar_type(self): + # Test of inplace subtractions + othertypes, uint8data = self._create_otherdata() + for t in othertypes: + with warnings.catch_warnings(): + warnings.filterwarnings("error") + x, y, xm = (_.astype(t) for _ in uint8data) + x -= t(1) + assert_equal(x, y - t(1)) + xm -= t(1) + assert_equal(xm, y - t(1)) + + def test_inplace_subtraction_array_type(self): + # Test of inplace subtractions + othertypes, uint8data = self._create_otherdata() + for t in othertypes: + with warnings.catch_warnings(): + warnings.filterwarnings("error") + x, y, xm = (_.astype(t) for _ in uint8data) + m = xm.mask + a = arange(10, dtype=t) + a[-1] = masked + x -= a + xm -= a + assert_equal(x, y - a) + assert_equal(xm, y - a) + assert_equal(xm.mask, mask_or(m, a.mask)) + + def test_inplace_multiplication_scalar_type(self): + # Test of inplace multiplication + othertypes, uint8data = self._create_otherdata() + for t in othertypes: + with warnings.catch_warnings(): + warnings.filterwarnings("error") + x, y, xm = (_.astype(t) for _ in uint8data) + x *= t(2) + assert_equal(x, y * t(2)) + xm *= t(2) + assert_equal(xm, y * t(2)) + + def test_inplace_multiplication_array_type(self): + # Test of inplace multiplication + othertypes, uint8data = self._create_otherdata() + for t in othertypes: + with warnings.catch_warnings(): + warnings.filterwarnings("error") + x, y, xm = (_.astype(t) for _ in uint8data) + m = xm.mask + a = arange(10, dtype=t) + a[-1] = masked + x *= a + xm *= a + assert_equal(x, y * a) + assert_equal(xm, y * a) + assert_equal(xm.mask, mask_or(m, a.mask)) + + def test_inplace_floor_division_scalar_type(self): + # Test of inplace division + # Check for TypeError in case of unsupported types + othertypes, uint8data = self._create_otherdata() + unsupported = {np.dtype(t).type for t in np.typecodes["Complex"]} + for t in othertypes: + with warnings.catch_warnings(): + warnings.filterwarnings("error") + x, y, xm = (_.astype(t) for _ in uint8data) + x = arange(10, dtype=t) * t(2) + xm = arange(10, dtype=t) * t(2) + xm[2] = masked + try: + x //= t(2) + xm //= t(2) + assert_equal(x, y) + assert_equal(xm, y) + except TypeError: + msg = f"Supported type {t} throwing TypeError" + assert t in unsupported, msg + + def test_inplace_floor_division_array_type(self): + # Test of inplace division + # Check for TypeError in case of unsupported types + othertypes, uint8data = self._create_otherdata() + unsupported = {np.dtype(t).type for t in np.typecodes["Complex"]} + for t in othertypes: + with warnings.catch_warnings(): + warnings.filterwarnings("error") + x, y, xm = (_.astype(t) for _ in uint8data) + m = xm.mask + a = arange(10, dtype=t) + a[-1] = masked + try: + x //= a + xm //= a + assert_equal(x, y // a) + assert_equal(xm, y // a) + assert_equal( + xm.mask, + mask_or(mask_or(m, a.mask), (a == t(0))) + ) + except TypeError: + msg = f"Supported type {t} throwing TypeError" + assert t in unsupported, msg + + def test_inplace_division_scalar_type(self): + # Test of inplace division + othertypes, uint8data = self._create_otherdata() + with warnings.catch_warnings(): + warnings.simplefilter('error', DeprecationWarning) + for t in othertypes: + x, y, xm = (_.astype(t) for _ in uint8data) + x = arange(10, dtype=t) * t(2) + xm = arange(10, dtype=t) * t(2) + xm[2] = masked + nwarns = 0 + + # May get a DeprecationWarning or a TypeError. + # + # This is a consequence of the fact that this is true divide + # and will require casting to float for calculation and + # casting back to the original type. This will only be raised + # with integers. Whether it is an error or warning is only + # dependent on how stringent the casting rules are. + # + # Will handle the same way. + try: + x /= t(2) + assert_equal(x, y) + except (DeprecationWarning, TypeError): + nwarns += 1 + try: + xm /= t(2) + assert_equal(xm, y) + except (DeprecationWarning, TypeError): + nwarns += 1 + + if issubclass(t, np.integer): + assert_equal(nwarns, 2, f'Failed on type={t}.') + else: + assert_equal(nwarns, 0, f'Failed on type={t}.') + + def test_inplace_division_array_type(self): + # Test of inplace division + othertypes, uint8data = self._create_otherdata() + with warnings.catch_warnings(): + warnings.simplefilter('error', DeprecationWarning) + for t in othertypes: + x, y, xm = (_.astype(t) for _ in uint8data) + m = xm.mask + a = arange(10, dtype=t) + a[-1] = masked + nwarns = 0 + + # May get a DeprecationWarning or a TypeError. + # + # This is a consequence of the fact that this is true divide + # and will require casting to float for calculation and + # casting back to the original type. This will only be raised + # with integers. Whether it is an error or warning is only + # dependent on how stringent the casting rules are. + # + # Will handle the same way. + try: + x /= a + assert_equal(x, y / a) + except (DeprecationWarning, TypeError): + nwarns += 1 + try: + xm /= a + assert_equal(xm, y / a) + assert_equal( + xm.mask, + mask_or(mask_or(m, a.mask), (a == t(0))) + ) + except (DeprecationWarning, TypeError): + nwarns += 1 + + if issubclass(t, np.integer): + assert_equal(nwarns, 2, f'Failed on type={t}.') + else: + assert_equal(nwarns, 0, f'Failed on type={t}.') + + def test_inplace_pow_type(self): + # Test keeping data w/ (inplace) power + othertypes = self._create_otherdata()[0] + for t in othertypes: + with warnings.catch_warnings(): + warnings.filterwarnings("error") + # Test pow on scalar + x = array([1, 2, 3], mask=[0, 0, 1], dtype=t) + xx = x ** t(2) + xx_r = array([1, 2 ** 2, 3], mask=[0, 0, 1], dtype=t) + assert_equal(xx.data, xx_r.data) + assert_equal(xx.mask, xx_r.mask) + # Test ipow on scalar + x **= t(2) + assert_equal(x.data, xx_r.data) + assert_equal(x.mask, xx_r.mask) + + +class TestMaskedArrayMethods: + # Test class for miscellaneous MaskedArrays methods. + def _create_data(self): + # Base data definition. + x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928, + 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, + 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, + 6.04, 9.63, 7.712, 3.382, 4.489, 6.479, + 7.189, 9.645, 5.395, 4.961, 9.894, 2.893, + 7.357, 9.828, 6.272, 3.758, 6.693, 0.993]) + X = x.reshape(6, 6) + XX = x.reshape(3, 2, 2, 3) + + m = np.array([0, 1, 0, 1, 0, 0, + 1, 0, 1, 1, 0, 1, + 0, 0, 0, 1, 0, 1, + 0, 0, 0, 1, 1, 1, + 1, 0, 0, 1, 0, 0, + 0, 0, 1, 0, 1, 0]) + mx = array(data=x, mask=m) + mX = array(data=X, mask=m.reshape(X.shape)) + mXX = array(data=XX, mask=m.reshape(XX.shape)) + + m2 = np.array([1, 1, 0, 1, 0, 0, + 1, 1, 1, 1, 0, 1, + 0, 0, 1, 1, 0, 1, + 0, 0, 0, 1, 1, 1, + 1, 0, 0, 1, 1, 0, + 0, 0, 1, 0, 1, 1]) + m2x = array(data=x, mask=m2) + m2X = array(data=X, mask=m2.reshape(X.shape)) + m2XX = array(data=XX, mask=m2.reshape(XX.shape)) + return x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX + + def test_generic_methods(self): + # Tests some MaskedArray methods. + a = array([1, 3, 2]) + assert_equal(a.any(), a._data.any()) + assert_equal(a.all(), a._data.all()) + assert_equal(a.argmax(), a._data.argmax()) + assert_equal(a.argmin(), a._data.argmin()) + assert_equal(a.choose(0, 1, 2, 3, 4), a._data.choose(0, 1, 2, 3, 4)) + assert_equal(a.compress([1, 0, 1]), a._data.compress([1, 0, 1])) + assert_equal(a.conj(), a._data.conj()) + assert_equal(a.conjugate(), a._data.conjugate()) + + m = array([[1, 2], [3, 4]]) + assert_equal(m.diagonal(), m._data.diagonal()) + assert_equal(a.sum(), a._data.sum()) + assert_equal(a.take([1, 2]), a._data.take([1, 2])) + assert_equal(m.transpose(), m._data.transpose()) + + def test_allclose(self): + # Tests allclose on arrays + a = np.random.rand(10) + b = a + np.random.rand(10) * 1e-8 + assert_(allclose(a, b)) + # Test allclose w/ infs + a[0] = np.inf + assert_(not allclose(a, b)) + b[0] = np.inf + assert_(allclose(a, b)) + # Test allclose w/ masked + a = masked_array(a) + a[-1] = masked + assert_(allclose(a, b, masked_equal=True)) + assert_(not allclose(a, b, masked_equal=False)) + # Test comparison w/ scalar + a *= 1e-8 + a[0] = 0 + assert_(allclose(a, 0, masked_equal=True)) + + # Test that the function works for MIN_INT integer typed arrays + a = masked_array([np.iinfo(np.int_).min], dtype=np.int_) + assert_(allclose(a, a)) + + def test_allclose_timedelta(self): + # Allclose currently works for timedelta64 as long as `atol` is + # an integer or also a timedelta64 + a = np.array([[1, 2, 3, 4]], dtype="m8[ns]") + assert allclose(a, a, atol=0) + assert allclose(a, a, atol=np.timedelta64(1, "ns")) + + def test_allany(self): + # Checks the any/all methods/functions. + x = np.array([[0.13, 0.26, 0.90], + [0.28, 0.33, 0.63], + [0.31, 0.87, 0.70]]) + m = np.array([[True, False, False], + [False, False, False], + [True, True, False]], dtype=np.bool) + mx = masked_array(x, mask=m) + mxbig = (mx > 0.5) + mxsmall = (mx < 0.5) + + assert_(not mxbig.all()) + assert_(mxbig.any()) + assert_equal(mxbig.all(0), [False, False, True]) + assert_equal(mxbig.all(1), [False, False, True]) + assert_equal(mxbig.any(0), [False, False, True]) + assert_equal(mxbig.any(1), [True, True, True]) + + assert_(not mxsmall.all()) + assert_(mxsmall.any()) + assert_equal(mxsmall.all(0), [True, True, False]) + assert_equal(mxsmall.all(1), [False, False, False]) + assert_equal(mxsmall.any(0), [True, True, False]) + assert_equal(mxsmall.any(1), [True, True, False]) + + def test_allany_oddities(self): + # Some fun with all and any + store = empty((), dtype=bool) + full = array([1, 2, 3], mask=True) + + assert_(full.all() is masked) + full.all(out=store) + assert_(store) + assert_(store._mask, True) + assert_(store is not masked) + + store = empty((), dtype=bool) + assert_(full.any() is masked) + full.any(out=store) + assert_(not store) + assert_(store._mask, True) + assert_(store is not masked) + + def test_argmax_argmin(self): + # Tests argmin & argmax on MaskedArrays. + _, _, _, _, mx, mX, _, m2x, m2X, _ = self._create_data() + + assert_equal(mx.argmin(), 35) + assert_equal(mX.argmin(), 35) + assert_equal(m2x.argmin(), 4) + assert_equal(m2X.argmin(), 4) + assert_equal(mx.argmax(), 28) + assert_equal(mX.argmax(), 28) + assert_equal(m2x.argmax(), 31) + assert_equal(m2X.argmax(), 31) + + assert_equal(mX.argmin(0), [2, 2, 2, 5, 0, 5]) + assert_equal(m2X.argmin(0), [2, 2, 4, 5, 0, 4]) + assert_equal(mX.argmax(0), [0, 5, 0, 5, 4, 0]) + assert_equal(m2X.argmax(0), [5, 5, 0, 5, 1, 0]) + + assert_equal(mX.argmin(1), [4, 1, 0, 0, 5, 5, ]) + assert_equal(m2X.argmin(1), [4, 4, 0, 0, 5, 3]) + assert_equal(mX.argmax(1), [2, 4, 1, 1, 4, 1]) + assert_equal(m2X.argmax(1), [2, 4, 1, 1, 1, 1]) + + def test_clip(self): + # Tests clip on MaskedArrays. + x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928, + 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, + 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, + 6.04, 9.63, 7.712, 3.382, 4.489, 6.479, + 7.189, 9.645, 5.395, 4.961, 9.894, 2.893, + 7.357, 9.828, 6.272, 3.758, 6.693, 0.993]) + m = np.array([0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, + 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, + 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0]) + mx = array(x, mask=m) + clipped = mx.clip(2, 8) + assert_equal(clipped.mask, mx.mask) + assert_equal(clipped._data, x.clip(2, 8)) + assert_equal(clipped._data, mx._data.clip(2, 8)) + + def test_clip_out(self): + # gh-14140 + a = np.arange(10) + m = np.ma.MaskedArray(a, mask=[0, 1] * 5) + m.clip(0, 5, out=m) + assert_equal(m.mask, [0, 1] * 5) + + def test_compress(self): + # test compress + a = masked_array([1., 2., 3., 4., 5.], fill_value=9999) + condition = (a > 1.5) & (a < 3.5) + assert_equal(a.compress(condition), [2., 3.]) + + a[[2, 3]] = masked + b = a.compress(condition) + assert_equal(b._data, [2., 3.]) + assert_equal(b._mask, [0, 1]) + assert_equal(b.fill_value, 9999) + assert_equal(b, a[condition]) + + condition = (a < 4.) + b = a.compress(condition) + assert_equal(b._data, [1., 2., 3.]) + assert_equal(b._mask, [0, 0, 1]) + assert_equal(b.fill_value, 9999) + assert_equal(b, a[condition]) + + a = masked_array([[10, 20, 30], [40, 50, 60]], + mask=[[0, 0, 1], [1, 0, 0]]) + b = a.compress(a.ravel() >= 22) + assert_equal(b._data, [30, 40, 50, 60]) + assert_equal(b._mask, [1, 1, 0, 0]) + + x = np.array([3, 1, 2]) + b = a.compress(x >= 2, axis=1) + assert_equal(b._data, [[10, 30], [40, 60]]) + assert_equal(b._mask, [[0, 1], [1, 0]]) + + def test_compressed(self): + # Tests compressed + a = array([1, 2, 3, 4], mask=[0, 0, 0, 0]) + b = a.compressed() + assert_equal(b, a) + a[0] = masked + b = a.compressed() + assert_equal(b, [2, 3, 4]) + + def test_empty(self): + # Tests empty/like + datatype = [('a', int), ('b', float), ('c', '|S8')] + a = masked_array([(1, 1.1, '1.1'), (2, 2.2, '2.2'), (3, 3.3, '3.3')], + dtype=datatype) + assert_equal(len(a.fill_value.item()), len(datatype)) + + b = empty_like(a) + assert_equal(b.shape, a.shape) + assert_equal(b.fill_value, a.fill_value) + + b = empty(len(a), dtype=datatype) + assert_equal(b.shape, a.shape) + assert_equal(b.fill_value, a.fill_value) + + # check empty_like mask handling + a = masked_array([1, 2, 3], mask=[False, True, False]) + b = empty_like(a) + assert_(not np.may_share_memory(a.mask, b.mask)) + b = a.view(masked_array) + assert_(np.may_share_memory(a.mask, b.mask)) + + def test_zeros(self): + # Tests zeros/like + datatype = [('a', int), ('b', float), ('c', '|S8')] + a = masked_array([(1, 1.1, '1.1'), (2, 2.2, '2.2'), (3, 3.3, '3.3')], + dtype=datatype) + assert_equal(len(a.fill_value.item()), len(datatype)) + + b = zeros(len(a), dtype=datatype) + assert_equal(b.shape, a.shape) + assert_equal(b.fill_value, a.fill_value) + + b = zeros_like(a) + assert_equal(b.shape, a.shape) + assert_equal(b.fill_value, a.fill_value) + + # check zeros_like mask handling + a = masked_array([1, 2, 3], mask=[False, True, False]) + b = zeros_like(a) + assert_(not np.may_share_memory(a.mask, b.mask)) + b = a.view() + assert_(np.may_share_memory(a.mask, b.mask)) + + def test_ones(self): + # Tests ones/like + datatype = [('a', int), ('b', float), ('c', '|S8')] + a = masked_array([(1, 1.1, '1.1'), (2, 2.2, '2.2'), (3, 3.3, '3.3')], + dtype=datatype) + assert_equal(len(a.fill_value.item()), len(datatype)) + + b = ones(len(a), dtype=datatype) + assert_equal(b.shape, a.shape) + assert_equal(b.fill_value, a.fill_value) + + b = ones_like(a) + assert_equal(b.shape, a.shape) + assert_equal(b.fill_value, a.fill_value) + + # check ones_like mask handling + a = masked_array([1, 2, 3], mask=[False, True, False]) + b = ones_like(a) + assert_(not np.may_share_memory(a.mask, b.mask)) + b = a.view() + assert_(np.may_share_memory(a.mask, b.mask)) + + @pytest.mark.filterwarnings(WARNING_MARK_SPEC) + def test_put(self): + # Tests put. + d = arange(5) + n = [0, 0, 0, 1, 1] + m = make_mask(n) + x = array(d, mask=m) + assert_(x[3] is masked) + assert_(x[4] is masked) + x[[1, 4]] = [10, 40] + assert_(x[3] is masked) + assert_(x[4] is not masked) + assert_equal(x, [0, 10, 2, -1, 40]) + + x = masked_array(arange(10), mask=[1, 0, 0, 0, 0] * 2) + i = [0, 2, 4, 6] + x.put(i, [6, 4, 2, 0]) + assert_equal(x, asarray([6, 1, 4, 3, 2, 5, 0, 7, 8, 9, ])) + assert_equal(x.mask, [0, 0, 0, 0, 0, 1, 0, 0, 0, 0]) + x.put(i, masked_array([0, 2, 4, 6], [1, 0, 1, 0])) + assert_array_equal(x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, ]) + assert_equal(x.mask, [1, 0, 0, 0, 1, 1, 0, 0, 0, 0]) + + x = masked_array(arange(10), mask=[1, 0, 0, 0, 0] * 2) + put(x, i, [6, 4, 2, 0]) + assert_equal(x, asarray([6, 1, 4, 3, 2, 5, 0, 7, 8, 9, ])) + assert_equal(x.mask, [0, 0, 0, 0, 0, 1, 0, 0, 0, 0]) + put(x, i, masked_array([0, 2, 4, 6], [1, 0, 1, 0])) + assert_array_equal(x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, ]) + assert_equal(x.mask, [1, 0, 0, 0, 1, 1, 0, 0, 0, 0]) + + def test_put_nomask(self): + # GitHub issue 6425 + x = zeros(10) + z = array([3., -1.], mask=[False, True]) + + x.put([1, 2], z) + assert_(x[0] is not masked) + assert_equal(x[0], 0) + assert_(x[1] is not masked) + assert_equal(x[1], 3) + assert_(x[2] is masked) + assert_(x[3] is not masked) + assert_equal(x[3], 0) + + def test_put_hardmask(self): + # Tests put on hardmask + d = arange(5) + n = [0, 0, 0, 1, 1] + m = make_mask(n) + xh = array(d + 1, mask=m, hard_mask=True, copy=True) + xh.put([4, 2, 0, 1, 3], [1, 2, 3, 4, 5]) + assert_equal(xh._data, [3, 4, 2, 4, 5]) + + def test_putmask(self): + x = arange(6) + 1 + mx = array(x, mask=[0, 0, 0, 1, 1, 1]) + mask = [0, 0, 1, 0, 0, 1] + # w/o mask, w/o masked values + xx = x.copy() + putmask(xx, mask, 99) + assert_equal(xx, [1, 2, 99, 4, 5, 99]) + # w/ mask, w/o masked values + mxx = mx.copy() + putmask(mxx, mask, 99) + assert_equal(mxx._data, [1, 2, 99, 4, 5, 99]) + assert_equal(mxx._mask, [0, 0, 0, 1, 1, 0]) + # w/o mask, w/ masked values + values = array([10, 20, 30, 40, 50, 60], mask=[1, 1, 1, 0, 0, 0]) + xx = x.copy() + putmask(xx, mask, values) + assert_equal(xx._data, [1, 2, 30, 4, 5, 60]) + assert_equal(xx._mask, [0, 0, 1, 0, 0, 0]) + # w/ mask, w/ masked values + mxx = mx.copy() + putmask(mxx, mask, values) + assert_equal(mxx._data, [1, 2, 30, 4, 5, 60]) + assert_equal(mxx._mask, [0, 0, 1, 1, 1, 0]) + # w/ mask, w/ masked values + hardmask + mxx = mx.copy() + mxx.harden_mask() + putmask(mxx, mask, values) + assert_equal(mxx, [1, 2, 30, 4, 5, 60]) + + def test_ravel(self): + # Tests ravel + a = array([[1, 2, 3, 4, 5]], mask=[[0, 1, 0, 0, 0]]) + aravel = a.ravel() + assert_equal(aravel._mask.shape, aravel.shape) + a = array([0, 0], mask=[1, 1]) + aravel = a.ravel() + assert_equal(aravel._mask.shape, a.shape) + # Checks that small_mask is preserved + a = array([1, 2, 3, 4], mask=[0, 0, 0, 0], shrink=False) + assert_equal(a.ravel()._mask, [0, 0, 0, 0]) + # Test that the fill_value is preserved + a.fill_value = -99 + a.shape = (2, 2) + ar = a.ravel() + assert_equal(ar._mask, [0, 0, 0, 0]) + assert_equal(ar._data, [1, 2, 3, 4]) + assert_equal(ar.fill_value, -99) + # Test index ordering + assert_equal(a.ravel(order='C'), [1, 2, 3, 4]) + assert_equal(a.ravel(order='F'), [1, 3, 2, 4]) + + @pytest.mark.parametrize("order", "AKCF") + @pytest.mark.parametrize("data_order", "CF") + def test_ravel_order(self, order, data_order): + # Ravelling must ravel mask and data in the same order always to avoid + # misaligning the two in the ravel result. + arr = np.ones((5, 10), order=data_order) + arr[0, :] = 0 + mask = np.ones((10, 5), dtype=bool, order=data_order).T + mask[0, :] = False + x = array(arr, mask=mask) + assert x._data.flags.fnc != x._mask.flags.fnc + assert (x.filled(0) == 0).all() + raveled = x.ravel(order) + assert (raveled.filled(0) == 0).all() + + # NOTE: Can be wrong if arr order is neither C nor F and `order="K"` + assert_array_equal(arr.ravel(order), x.ravel(order)._data) + + def test_reshape(self): + # Tests reshape + x = arange(4) + x[0] = masked + y = x.reshape(2, 2) + assert_equal(y.shape, (2, 2,)) + assert_equal(y._mask.shape, (2, 2,)) + assert_equal(x.shape, (4,)) + assert_equal(x._mask.shape, (4,)) + + def test_sort(self): + # Test sort + x = array([1, 4, 2, 3], mask=[0, 1, 0, 0], dtype=np.uint8) + + sortedx = sort(x) + assert_equal(sortedx._data, [1, 2, 3, 4]) + assert_equal(sortedx._mask, [0, 0, 0, 1]) + + sortedx = sort(x, endwith=False) + assert_equal(sortedx._data, [4, 1, 2, 3]) + assert_equal(sortedx._mask, [1, 0, 0, 0]) + + x.sort() + assert_equal(x._data, [1, 2, 3, 4]) + assert_equal(x._mask, [0, 0, 0, 1]) + + x = array([1, 4, 2, 3], mask=[0, 1, 0, 0], dtype=np.uint8) + x.sort(endwith=False) + assert_equal(x._data, [4, 1, 2, 3]) + assert_equal(x._mask, [1, 0, 0, 0]) + + x = [1, 4, 2, 3] + sortedx = sort(x) + assert_(not isinstance(sorted, MaskedArray)) + + x = array([0, 1, -1, -2, 2], mask=nomask, dtype=np.int8) + sortedx = sort(x, endwith=False) + assert_equal(sortedx._data, [-2, -1, 0, 1, 2]) + x = array([0, 1, -1, -2, 2], mask=[0, 1, 0, 0, 1], dtype=np.int8) + sortedx = sort(x, endwith=False) + assert_equal(sortedx._data, [1, 2, -2, -1, 0]) + assert_equal(sortedx._mask, [1, 1, 0, 0, 0]) + + x = array([0, -1], dtype=np.int8) + sortedx = sort(x, kind="stable") + assert_equal(sortedx, array([-1, 0], dtype=np.int8)) + + def test_stable_sort(self): + x = array([1, 2, 3, 1, 2, 3], dtype=np.uint8) + expected = array([0, 3, 1, 4, 2, 5]) + computed = argsort(x, kind='stable') + assert_equal(computed, expected) + + def test_argsort_matches_sort(self): + x = array([1, 4, 2, 3], mask=[0, 1, 0, 0], dtype=np.uint8) + + for kwargs in [{}, + {"endwith": True}, + {"endwith": False}, + {"fill_value": 2}, + {"fill_value": 2, "endwith": True}, + {"fill_value": 2, "endwith": False}]: + sortedx = sort(x, **kwargs) + argsortedx = x[argsort(x, **kwargs)] + assert_equal(sortedx._data, argsortedx._data) + assert_equal(sortedx._mask, argsortedx._mask) + + def test_sort_2d(self): + # Check sort of 2D array. + # 2D array w/o mask + a = masked_array([[8, 4, 1], [2, 0, 9]]) + a.sort(0) + assert_equal(a, [[2, 0, 1], [8, 4, 9]]) + a = masked_array([[8, 4, 1], [2, 0, 9]]) + a.sort(1) + assert_equal(a, [[1, 4, 8], [0, 2, 9]]) + # 2D array w/mask + a = masked_array([[8, 4, 1], [2, 0, 9]], mask=[[1, 0, 0], [0, 0, 1]]) + a.sort(0) + assert_equal(a, [[2, 0, 1], [8, 4, 9]]) + assert_equal(a._mask, [[0, 0, 0], [1, 0, 1]]) + a = masked_array([[8, 4, 1], [2, 0, 9]], mask=[[1, 0, 0], [0, 0, 1]]) + a.sort(1) + assert_equal(a, [[1, 4, 8], [0, 2, 9]]) + assert_equal(a._mask, [[0, 0, 1], [0, 0, 1]]) + # 3D + a = masked_array([[[7, 8, 9], [4, 5, 6], [1, 2, 3]], + [[1, 2, 3], [7, 8, 9], [4, 5, 6]], + [[7, 8, 9], [1, 2, 3], [4, 5, 6]], + [[4, 5, 6], [1, 2, 3], [7, 8, 9]]]) + a[a % 4 == 0] = masked + am = a.copy() + an = a.filled(99) + am.sort(0) + an.sort(0) + assert_equal(am, an) + am = a.copy() + an = a.filled(99) + am.sort(1) + an.sort(1) + assert_equal(am, an) + am = a.copy() + an = a.filled(99) + am.sort(2) + an.sort(2) + assert_equal(am, an) + + def test_sort_flexible(self): + # Test sort on structured dtype. + a = array( + data=[(3, 3), (3, 2), (2, 2), (2, 1), (1, 0), (1, 1), (1, 2)], + mask=[(0, 0), (0, 1), (0, 0), (0, 0), (1, 0), (0, 0), (0, 0)], + dtype=[('A', int), ('B', int)]) + mask_last = array( + data=[(1, 1), (1, 2), (2, 1), (2, 2), (3, 3), (3, 2), (1, 0)], + mask=[(0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 1), (1, 0)], + dtype=[('A', int), ('B', int)]) + mask_first = array( + data=[(1, 0), (1, 1), (1, 2), (2, 1), (2, 2), (3, 2), (3, 3)], + mask=[(1, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 1), (0, 0)], + dtype=[('A', int), ('B', int)]) + + test = sort(a) + assert_equal(test, mask_last) + assert_equal(test.mask, mask_last.mask) + + test = sort(a, endwith=False) + assert_equal(test, mask_first) + assert_equal(test.mask, mask_first.mask) + + # Test sort on dtype with subarray (gh-8069) + # Just check that the sort does not error, structured array subarrays + # are treated as byte strings and that leads to differing behavior + # depending on endianness and `endwith`. + dt = np.dtype([('v', int, 2)]) + a = a.view(dt) + test = sort(a) + test = sort(a, endwith=False) + + def test_argsort(self): + # Test argsort + a = array([1, 5, 2, 4, 3], mask=[1, 0, 0, 1, 0]) + assert_equal(np.argsort(a), argsort(a)) + + def test_squeeze(self): + # Check squeeze + data = masked_array([[1, 2, 3]]) + assert_equal(data.squeeze(), [1, 2, 3]) + data = masked_array([[1, 2, 3]], mask=[[1, 1, 1]]) + assert_equal(data.squeeze(), [1, 2, 3]) + assert_equal(data.squeeze()._mask, [1, 1, 1]) + + # normal ndarrays return a view + arr = np.array([[1]]) + arr_sq = arr.squeeze() + assert_equal(arr_sq, 1) + arr_sq[...] = 2 + assert_equal(arr[0, 0], 2) + + # so maskedarrays should too + m_arr = masked_array([[1]], mask=True) + m_arr_sq = m_arr.squeeze() + assert_(m_arr_sq is not np.ma.masked) + assert_equal(m_arr_sq.mask, True) + m_arr_sq[...] = 2 + assert_equal(m_arr[0, 0], 2) + + def test_swapaxes(self): + # Tests swapaxes on MaskedArrays. + x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928, + 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, + 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, + 6.04, 9.63, 7.712, 3.382, 4.489, 6.479, + 7.189, 9.645, 5.395, 4.961, 9.894, 2.893, + 7.357, 9.828, 6.272, 3.758, 6.693, 0.993]) + m = np.array([0, 1, 0, 1, 0, 0, + 1, 0, 1, 1, 0, 1, + 0, 0, 0, 1, 0, 1, + 0, 0, 0, 1, 1, 1, + 1, 0, 0, 1, 0, 0, + 0, 0, 1, 0, 1, 0]) + mX = array(x, mask=m).reshape(6, 6) + mXX = mX.reshape(3, 2, 2, 3) + + mXswapped = mX.swapaxes(0, 1) + assert_equal(mXswapped[-1], mX[:, -1]) + + mXXswapped = mXX.swapaxes(0, 2) + assert_equal(mXXswapped.shape, (2, 2, 3, 3)) + + def test_take(self): + # Tests take + x = masked_array([10, 20, 30, 40], [0, 1, 0, 1]) + assert_equal(x.take([0, 0, 3]), masked_array([10, 10, 40], [0, 0, 1])) + assert_equal(x.take([0, 0, 3]), x[[0, 0, 3]]) + assert_equal(x.take([[0, 1], [0, 1]]), + masked_array([[10, 20], [10, 20]], [[0, 1], [0, 1]])) + + # assert_equal crashes when passed np.ma.mask + assert_(x[1] is np.ma.masked) + assert_(x.take(1) is np.ma.masked) + + x = array([[10, 20, 30], [40, 50, 60]], mask=[[0, 0, 1], [1, 0, 0, ]]) + assert_equal(x.take([0, 2], axis=1), + array([[10, 30], [40, 60]], mask=[[0, 1], [1, 0]])) + assert_equal(take(x, [0, 2], axis=1), + array([[10, 30], [40, 60]], mask=[[0, 1], [1, 0]])) + + def test_take_masked_indices(self): + # Test take w/ masked indices + a = np.array((40, 18, 37, 9, 22)) + indices = np.arange(3)[None, :] + np.arange(5)[:, None] + mindices = array(indices, mask=(indices >= len(a))) + # No mask + test = take(a, mindices, mode='clip') + ctrl = array([[40, 18, 37], + [18, 37, 9], + [37, 9, 22], + [9, 22, 22], + [22, 22, 22]]) + assert_equal(test, ctrl) + # Masked indices + test = take(a, mindices) + ctrl = array([[40, 18, 37], + [18, 37, 9], + [37, 9, 22], + [9, 22, 40], + [22, 40, 40]]) + ctrl[3, 2] = ctrl[4, 1] = ctrl[4, 2] = masked + assert_equal(test, ctrl) + assert_equal(test.mask, ctrl.mask) + # Masked input + masked indices + a = array((40, 18, 37, 9, 22), mask=(0, 1, 0, 0, 0)) + test = take(a, mindices) + ctrl[0, 1] = ctrl[1, 0] = masked + assert_equal(test, ctrl) + assert_equal(test.mask, ctrl.mask) + + def test_tolist(self): + # Tests to list + # ... on 1D + x = array(np.arange(12)) + x[[1, -2]] = masked + xlist = x.tolist() + assert_(xlist[1] is None) + assert_(xlist[-2] is None) + # ... on 2D + x.shape = (3, 4) + xlist = x.tolist() + ctrl = [[0, None, 2, 3], [4, 5, 6, 7], [8, 9, None, 11]] + assert_equal(xlist[0], [0, None, 2, 3]) + assert_equal(xlist[1], [4, 5, 6, 7]) + assert_equal(xlist[2], [8, 9, None, 11]) + assert_equal(xlist, ctrl) + # ... on structured array w/ masked records + x = array(list(zip([1, 2, 3], + [1.1, 2.2, 3.3], + ['one', 'two', 'thr'])), + dtype=[('a', int), ('b', float), ('c', '|S8')]) + x[-1] = masked + assert_equal(x.tolist(), + [(1, 1.1, b'one'), + (2, 2.2, b'two'), + (None, None, None)]) + # ... on structured array w/ masked fields + a = array([(1, 2,), (3, 4)], mask=[(0, 1), (0, 0)], + dtype=[('a', int), ('b', int)]) + test = a.tolist() + assert_equal(test, [[1, None], [3, 4]]) + # ... on mvoid + a = a[0] + test = a.tolist() + assert_equal(test, [1, None]) + + def test_tolist_specialcase(self): + # Test mvoid.tolist: make sure we return a standard Python object + a = array([(0, 1), (2, 3)], dtype=[('a', int), ('b', int)]) + # w/o mask: each entry is a np.void whose elements are standard Python + for entry in a: + for item in entry.tolist(): + assert_(not isinstance(item, np.generic)) + # w/ mask: each entry is a ma.void whose elements should be + # standard Python + a.mask[0] = (0, 1) + for entry in a: + for item in entry.tolist(): + assert_(not isinstance(item, np.generic)) + + def test_toflex(self): + # Test the conversion to records + data = arange(10) + record = data.toflex() + assert_equal(record['_data'], data._data) + assert_equal(record['_mask'], data._mask) + + data[[0, 1, 2, -1]] = masked + record = data.toflex() + assert_equal(record['_data'], data._data) + assert_equal(record['_mask'], data._mask) + + ndtype = [('i', int), ('s', '|S3'), ('f', float)] + data = array(list(zip(np.arange(10), + 'ABCDEFGHIJKLM', + np.random.rand(10))), + dtype=ndtype) + data[[0, 1, 2, -1]] = masked + record = data.toflex() + assert_equal(record['_data'], data._data) + assert_equal(record['_mask'], data._mask) + + ndtype = np.dtype("int, (2,3)float, float") + data = array(list(zip(np.arange(10), + np.random.rand(10), + np.random.rand(10))), + dtype=ndtype) + data[[0, 1, 2, -1]] = masked + record = data.toflex() + assert_equal_records(record['_data'], data._data) + assert_equal_records(record['_mask'], data._mask) + + def test_fromflex(self): + # Test the reconstruction of a masked_array from a record + a = array([1, 2, 3]) + test = fromflex(a.toflex()) + assert_equal(test, a) + assert_equal(test.mask, a.mask) + + a = array([1, 2, 3], mask=[0, 0, 1]) + test = fromflex(a.toflex()) + assert_equal(test, a) + assert_equal(test.mask, a.mask) + + a = array([(1, 1.), (2, 2.), (3, 3.)], mask=[(1, 0), (0, 0), (0, 1)], + dtype=[('A', int), ('B', float)]) + test = fromflex(a.toflex()) + assert_equal(test, a) + assert_equal(test.data, a.data) + + def test_arraymethod(self): + # Test a _arraymethod w/ n argument + marray = masked_array([[1, 2, 3, 4, 5]], mask=[0, 0, 1, 0, 0]) + control = masked_array([[1], [2], [3], [4], [5]], + mask=[0, 0, 1, 0, 0]) + assert_equal(marray.T, control) + assert_equal(marray.transpose(), control) + + assert_equal(MaskedArray.cumsum(marray.T, 0), control.cumsum(0)) + + def test_arraymethod_0d(self): + # gh-9430 + x = np.ma.array(42, mask=True) + assert_equal(x.T.mask, x.mask) + assert_equal(x.T.data, x.data) + + def test_transpose_view(self): + x = np.ma.array([[1, 2, 3], [4, 5, 6]]) + x[0, 1] = np.ma.masked + xt = x.T + + xt[1, 0] = 10 + xt[0, 1] = np.ma.masked + + assert_equal(x.data, xt.T.data) + assert_equal(x.mask, xt.T.mask) + + def test_diagonal_view(self): + x = np.ma.zeros((3, 3)) + x[0, 0] = 10 + x[1, 1] = np.ma.masked + x[2, 2] = 20 + xd = x.diagonal() + x[1, 1] = 15 + assert_equal(xd.mask, x.diagonal().mask) + assert_equal(xd.data, x.diagonal().data) + + +class TestMaskedArrayMathMethods: + def _create_data(self): + # Base data definition. + x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928, + 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, + 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, + 6.04, 9.63, 7.712, 3.382, 4.489, 6.479, + 7.189, 9.645, 5.395, 4.961, 9.894, 2.893, + 7.357, 9.828, 6.272, 3.758, 6.693, 0.993]) + X = x.reshape(6, 6) + XX = x.reshape(3, 2, 2, 3) + + m = np.array([0, 1, 0, 1, 0, 0, + 1, 0, 1, 1, 0, 1, + 0, 0, 0, 1, 0, 1, + 0, 0, 0, 1, 1, 1, + 1, 0, 0, 1, 0, 0, + 0, 0, 1, 0, 1, 0]) + mx = array(data=x, mask=m) + mX = array(data=X, mask=m.reshape(X.shape)) + mXX = array(data=XX, mask=m.reshape(XX.shape)) + + m2 = np.array([1, 1, 0, 1, 0, 0, + 1, 1, 1, 1, 0, 1, + 0, 0, 1, 1, 0, 1, + 0, 0, 0, 1, 1, 1, + 1, 0, 0, 1, 1, 0, + 0, 0, 1, 0, 1, 1]) + m2x = array(data=x, mask=m2) + m2X = array(data=X, mask=m2.reshape(X.shape)) + m2XX = array(data=XX, mask=m2.reshape(XX.shape)) + return x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX + + def test_cumsumprod(self): + # Tests cumsum & cumprod on MaskedArrays. + mX = self._create_data()[5] + mXcp = mX.cumsum(0) + assert_equal(mXcp._data, mX.filled(0).cumsum(0)) + mXcp = mX.cumsum(1) + assert_equal(mXcp._data, mX.filled(0).cumsum(1)) + + mXcp = mX.cumprod(0) + assert_equal(mXcp._data, mX.filled(1).cumprod(0)) + mXcp = mX.cumprod(1) + assert_equal(mXcp._data, mX.filled(1).cumprod(1)) + + def test_cumsumprod_with_output(self): + # Tests cumsum/cumprod w/ output + xm = array(np.random.uniform(0, 10, 12)).reshape(3, 4) + xm[:, 0] = xm[0] = xm[-1, -1] = masked + + for funcname in ('cumsum', 'cumprod'): + npfunc = getattr(np, funcname) + xmmeth = getattr(xm, funcname) + + # A ndarray as explicit input + output = np.empty((3, 4), dtype=float) + output.fill(-9999) + result = npfunc(xm, axis=0, out=output) + # ... the result should be the given output + assert_(result is output) + assert_equal(result, xmmeth(axis=0, out=output)) + + output = empty((3, 4), dtype=int) + result = xmmeth(axis=0, out=output) + assert_(result is output) + + def test_ptp(self): + # Tests ptp on MaskedArrays. + _, X, _, m, mx, mX, _, _, _, _ = self._create_data() + (n, m) = X.shape + assert_equal(mx.ptp(), np.ptp(mx.compressed())) + rows = np.zeros(n, float) + cols = np.zeros(m, float) + for k in range(m): + cols[k] = np.ptp(mX[:, k].compressed()) + for k in range(n): + rows[k] = np.ptp(mX[k].compressed()) + assert_equal(mX.ptp(0), cols) + assert_equal(mX.ptp(1), rows) + + def test_add_object(self): + x = masked_array(['a', 'b'], mask=[1, 0], dtype=object) + y = x + 'x' + assert_equal(y[1], 'bx') + assert_(y.mask[0]) + + def test_sum_object(self): + # Test sum on object dtype + a = masked_array([1, 2, 3], mask=[1, 0, 0], dtype=object) + assert_equal(a.sum(), 5) + a = masked_array([[1, 2, 3], [4, 5, 6]], dtype=object) + assert_equal(a.sum(axis=0), [5, 7, 9]) + + def test_prod_object(self): + # Test prod on object dtype + a = masked_array([1, 2, 3], mask=[1, 0, 0], dtype=object) + assert_equal(a.prod(), 2 * 3) + a = masked_array([[1, 2, 3], [4, 5, 6]], dtype=object) + assert_equal(a.prod(axis=0), [4, 10, 18]) + + def test_meananom_object(self): + # Test mean/anom on object dtype + a = masked_array([1, 2, 3], dtype=object) + assert_equal(a.mean(), 2) + assert_equal(a.anom(), [-1, 0, 1]) + + def test_anom_shape(self): + a = masked_array([1, 2, 3]) + assert_equal(a.anom().shape, a.shape) + a.mask = True + assert_equal(a.anom().shape, a.shape) + assert_(np.ma.is_masked(a.anom())) + + def test_anom(self): + a = masked_array(np.arange(1, 7).reshape(2, 3)) + assert_almost_equal(a.anom(), + [[-2.5, -1.5, -0.5], [0.5, 1.5, 2.5]]) + assert_almost_equal(a.anom(axis=0), + [[-1.5, -1.5, -1.5], [1.5, 1.5, 1.5]]) + assert_almost_equal(a.anom(axis=1), + [[-1., 0., 1.], [-1., 0., 1.]]) + a.mask = [[0, 0, 1], [0, 1, 0]] + mval = -99 + assert_almost_equal(a.anom().filled(mval), + [[-2.25, -1.25, mval], [0.75, mval, 2.75]]) + assert_almost_equal(a.anom(axis=0).filled(mval), + [[-1.5, 0.0, mval], [1.5, mval, 0.0]]) + assert_almost_equal(a.anom(axis=1).filled(mval), + [[-0.5, 0.5, mval], [-1.0, mval, 1.0]]) + + def test_trace(self): + # Tests trace on MaskedArrays. + _, X, _, _, _, mX, _, _, _, _ = self._create_data() + mXdiag = mX.diagonal() + assert_equal(mX.trace(), mX.diagonal().compressed().sum()) + assert_almost_equal(mX.trace(), + X.trace() - sum(mXdiag.mask * X.diagonal(), + axis=0)) + assert_equal(np.trace(mX), mX.trace()) + + # gh-5560 + arr = np.arange(2 * 4 * 4).reshape(2, 4, 4) + m_arr = np.ma.masked_array(arr, False) + assert_equal(arr.trace(axis1=1, axis2=2), m_arr.trace(axis1=1, axis2=2)) + + def test_dot(self): + # Tests dot on MaskedArrays. + _, _, _, _, mx, mX, mXX, _, _, _ = self._create_data() + fx = mx.filled(0) + r = mx.dot(mx) + assert_almost_equal(r.filled(0), fx.dot(fx)) + assert_(r.mask is nomask) + + fX = mX.filled(0) + r = mX.dot(mX) + assert_almost_equal(r.filled(0), fX.dot(fX)) + assert_(r.mask[1, 3]) + r1 = empty_like(r) + mX.dot(mX, out=r1) + assert_almost_equal(r, r1) + + mYY = mXX.swapaxes(-1, -2) + fXX, fYY = mXX.filled(0), mYY.filled(0) + r = mXX.dot(mYY) + assert_almost_equal(r.filled(0), fXX.dot(fYY)) + r1 = empty_like(r) + mXX.dot(mYY, out=r1) + assert_almost_equal(r, r1) + + def test_dot_shape_mismatch(self): + # regression test + x = masked_array([[1, 2], [3, 4]], mask=[[0, 1], [0, 0]]) + y = masked_array([[1, 2], [3, 4]], mask=[[0, 1], [0, 0]]) + z = masked_array([[0, 1], [3, 3]]) + x.dot(y, out=z) + assert_almost_equal(z.filled(0), [[1, 0], [15, 16]]) + assert_almost_equal(z.mask, [[0, 1], [0, 0]]) + + def test_varmean_nomask(self): + # gh-5769 + foo = array([1, 2, 3, 4], dtype='f8') + bar = array([1, 2, 3, 4], dtype='f8') + assert_equal(type(foo.mean()), np.float64) + assert_equal(type(foo.var()), np.float64) + assert (foo.mean() == bar.mean()) is np.bool(True) + + # check array type is preserved and out works + foo = array(np.arange(16).reshape((4, 4)), dtype='f8') + bar = empty(4, dtype='f4') + assert_equal(type(foo.mean(axis=1)), MaskedArray) + assert_equal(type(foo.var(axis=1)), MaskedArray) + assert_(foo.mean(axis=1, out=bar) is bar) + assert_(foo.var(axis=1, out=bar) is bar) + + def test_varstd(self): + # Tests var & std on MaskedArrays. + _, X, XX, _, _, mX, mXX, _, _, _ = self._create_data() + assert_almost_equal(mX.var(axis=None), mX.compressed().var()) + assert_almost_equal(mX.std(axis=None), mX.compressed().std()) + assert_almost_equal(mX.std(axis=None, ddof=1), + mX.compressed().std(ddof=1)) + assert_almost_equal(mX.var(axis=None, ddof=1), + mX.compressed().var(ddof=1)) + assert_equal(mXX.var(axis=3).shape, XX.var(axis=3).shape) + assert_equal(mX.var().shape, X.var().shape) + (mXvar0, mXvar1) = (mX.var(axis=0), mX.var(axis=1)) + assert_almost_equal(mX.var(axis=None, ddof=2), + mX.compressed().var(ddof=2)) + assert_almost_equal(mX.std(axis=None, ddof=2), + mX.compressed().std(ddof=2)) + for k in range(6): + assert_almost_equal(mXvar1[k], mX[k].compressed().var()) + assert_almost_equal(mXvar0[k], mX[:, k].compressed().var()) + assert_almost_equal(np.sqrt(mXvar0[k]), + mX[:, k].compressed().std()) + + @pytest.mark.filterwarnings(WARNING_MARK_SPEC) + def test_varstd_specialcases(self): + # Test a special case for var + nout = np.array(-1, dtype=float) + mout = array(-1, dtype=float) + + x = array(arange(10), mask=True) + for methodname in ('var', 'std'): + method = getattr(x, methodname) + assert_(method() is masked) + assert_(method(0) is masked) + assert_(method(-1) is masked) + # Using a masked array as explicit output + method(out=mout) + assert_(mout is not masked) + assert_equal(mout.mask, True) + # Using a ndarray as explicit output + method(out=nout) + assert_(np.isnan(nout)) + + x = array(arange(10), mask=True) + x[-1] = 9 + for methodname in ('var', 'std'): + method = getattr(x, methodname) + assert_(method(ddof=1) is masked) + assert_(method(0, ddof=1) is masked) + assert_(method(-1, ddof=1) is masked) + # Using a masked array as explicit output + method(out=mout, ddof=1) + assert_(mout is not masked) + assert_equal(mout.mask, True) + # Using a ndarray as explicit output + method(out=nout, ddof=1) + assert_(np.isnan(nout)) + + def test_varstd_ddof(self): + a = array([[1, 1, 0], [1, 1, 0]], mask=[[0, 0, 1], [0, 0, 1]]) + test = a.std(axis=0, ddof=0) + assert_equal(test.filled(0), [0, 0, 0]) + assert_equal(test.mask, [0, 0, 1]) + test = a.std(axis=0, ddof=1) + assert_equal(test.filled(0), [0, 0, 0]) + assert_equal(test.mask, [0, 0, 1]) + test = a.std(axis=0, ddof=2) + assert_equal(test.filled(0), [0, 0, 0]) + assert_equal(test.mask, [1, 1, 1]) + + def test_diag(self): + # Test diag + x = arange(9).reshape((3, 3)) + x[1, 1] = masked + out = np.diag(x) + assert_equal(out, [0, 4, 8]) + out = diag(x) + assert_equal(out, [0, 4, 8]) + assert_equal(out.mask, [0, 1, 0]) + out = diag(out) + control = array([[0, 0, 0], [0, 4, 0], [0, 0, 8]], + mask=[[0, 0, 0], [0, 1, 0], [0, 0, 0]]) + assert_equal(out, control) + + def test_axis_methods_nomask(self): + # Test the combination nomask & methods w/ axis + a = array([[1, 2, 3], [4, 5, 6]]) + + assert_equal(a.sum(0), [5, 7, 9]) + assert_equal(a.sum(-1), [6, 15]) + assert_equal(a.sum(1), [6, 15]) + + assert_equal(a.prod(0), [4, 10, 18]) + assert_equal(a.prod(-1), [6, 120]) + assert_equal(a.prod(1), [6, 120]) + + assert_equal(a.min(0), [1, 2, 3]) + assert_equal(a.min(-1), [1, 4]) + assert_equal(a.min(1), [1, 4]) + + assert_equal(a.max(0), [4, 5, 6]) + assert_equal(a.max(-1), [3, 6]) + assert_equal(a.max(1), [3, 6]) + + @pytest.mark.thread_unsafe(reason="crashes with low memory") + @requires_memory(free_bytes=2 * 10000 * 1000 * 2) + def test_mean_overflow(self): + # Test overflow in masked arrays + # gh-20272 + a = masked_array(np.full((10000, 10000), 65535, dtype=np.uint16), + mask=np.zeros((10000, 10000))) + assert_equal(a.mean(), 65535.0) + + def test_diff_with_prepend(self): + # GH 22465 + x = np.array([1, 2, 2, 3, 4, 2, 1, 1]) + + a = np.ma.masked_equal(x[3:], value=2) + a_prep = np.ma.masked_equal(x[:3], value=2) + diff1 = np.ma.diff(a, prepend=a_prep, axis=0) + + b = np.ma.masked_equal(x, value=2) + diff2 = np.ma.diff(b, axis=0) + + assert_(np.ma.allequal(diff1, diff2)) + + def test_diff_with_append(self): + # GH 22465 + x = np.array([1, 2, 2, 3, 4, 2, 1, 1]) + + a = np.ma.masked_equal(x[:3], value=2) + a_app = np.ma.masked_equal(x[3:], value=2) + diff1 = np.ma.diff(a, append=a_app, axis=0) + + b = np.ma.masked_equal(x, value=2) + diff2 = np.ma.diff(b, axis=0) + + assert_(np.ma.allequal(diff1, diff2)) + + def test_diff_with_dim_0(self): + with pytest.raises( + ValueError, + match="diff requires input that is at least one dimensional" + ): + np.ma.diff(np.array(1)) + + def test_diff_with_n_0(self): + a = np.ma.masked_equal([1, 2, 2, 3, 4, 2, 1, 1], value=2) + diff = np.ma.diff(a, n=0, axis=0) + + assert_(np.ma.allequal(a, diff)) + + +class TestMaskedArrayMathMethodsComplex: + # Test class for miscellaneous MaskedArrays methods. + def _create_data(self): + # Base data definition. + x = np.array([8.375j, 7.545j, 8.828j, 8.5j, 1.757j, 5.928, + 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, + 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, + 6.04, 9.63, 7.712, 3.382, 4.489, 6.479j, + 7.189j, 9.645, 5.395, 4.961, 9.894, 2.893, + 7.357, 9.828, 6.272, 3.758, 6.693, 0.993j]) + X = x.reshape(6, 6) + XX = x.reshape(3, 2, 2, 3) + + m = np.array([0, 1, 0, 1, 0, 0, + 1, 0, 1, 1, 0, 1, + 0, 0, 0, 1, 0, 1, + 0, 0, 0, 1, 1, 1, + 1, 0, 0, 1, 0, 0, + 0, 0, 1, 0, 1, 0]) + mx = array(data=x, mask=m) + mX = array(data=X, mask=m.reshape(X.shape)) + mXX = array(data=XX, mask=m.reshape(XX.shape)) + + m2 = np.array([1, 1, 0, 1, 0, 0, + 1, 1, 1, 1, 0, 1, + 0, 0, 1, 1, 0, 1, + 0, 0, 0, 1, 1, 1, + 1, 0, 0, 1, 1, 0, + 0, 0, 1, 0, 1, 1]) + m2x = array(data=x, mask=m2) + m2X = array(data=X, mask=m2.reshape(X.shape)) + m2XX = array(data=XX, mask=m2.reshape(XX.shape)) + return x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX + + def test_varstd(self): + # Tests var & std on MaskedArrays. + _, X, XX, _, _, mX, mXX, _, _, _ = self._create_data() + assert_almost_equal(mX.var(axis=None), mX.compressed().var()) + assert_almost_equal(mX.std(axis=None), mX.compressed().std()) + assert_equal(mXX.var(axis=3).shape, XX.var(axis=3).shape) + assert_equal(mX.var().shape, X.var().shape) + (mXvar0, mXvar1) = (mX.var(axis=0), mX.var(axis=1)) + assert_almost_equal(mX.var(axis=None, ddof=2), + mX.compressed().var(ddof=2)) + assert_almost_equal(mX.std(axis=None, ddof=2), + mX.compressed().std(ddof=2)) + for k in range(6): + assert_almost_equal(mXvar1[k], mX[k].compressed().var()) + assert_almost_equal(mXvar0[k], mX[:, k].compressed().var()) + assert_almost_equal(np.sqrt(mXvar0[k]), + mX[:, k].compressed().std()) + + +class TestMaskedArrayFunctions: + # Test class for miscellaneous functions. + def test_masked_where_bool(self): + x = [1, 2] + y = masked_where(False, x) + assert_equal(y, [1, 2]) + assert_equal(y[1], 2) + + def test_masked_equal_wlist(self): + x = [1, 2, 3] + mx = masked_equal(x, 3) + assert_equal(mx, x) + assert_equal(mx._mask, [0, 0, 1]) + mx = masked_not_equal(x, 3) + assert_equal(mx, x) + assert_equal(mx._mask, [1, 1, 0]) + + def test_masked_equal_fill_value(self): + x = [1, 2, 3] + mx = masked_equal(x, 3) + assert_equal(mx._mask, [0, 0, 1]) + assert_equal(mx.fill_value, 3) + + def test_masked_where_condition(self): + # Tests masking functions. + x = array([1., 2., 3., 4., 5.]) + x[2] = masked + assert_equal(masked_where(greater(x, 2), x), masked_greater(x, 2)) + assert_equal(masked_where(greater_equal(x, 2), x), + masked_greater_equal(x, 2)) + assert_equal(masked_where(less(x, 2), x), masked_less(x, 2)) + assert_equal(masked_where(less_equal(x, 2), x), + masked_less_equal(x, 2)) + assert_equal(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2)) + assert_equal(masked_where(equal(x, 2), x), masked_equal(x, 2)) + assert_equal(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2)) + assert_equal(masked_where([1, 1, 0, 0, 0], [1, 2, 3, 4, 5]), + [99, 99, 3, 4, 5]) + + def test_masked_where_oddities(self): + # Tests some generic features. + atest = ones((10, 10, 10), dtype=float) + btest = zeros(atest.shape, MaskType) + ctest = masked_where(btest, atest) + assert_equal(atest, ctest) + + def test_masked_where_shape_constraint(self): + a = arange(10) + with assert_raises(IndexError): + masked_equal(1, a) + test = masked_equal(a, 1) + assert_equal(test.mask, [0, 1, 0, 0, 0, 0, 0, 0, 0, 0]) + + def test_masked_where_structured(self): + # test that masked_where on a structured array sets a structured + # mask (see issue #2972) + a = np.zeros(10, dtype=[("A", " 6, x) + + def test_masked_otherfunctions(self): + assert_equal(masked_inside(list(range(5)), 1, 3), + [0, 199, 199, 199, 4]) + assert_equal(masked_outside(list(range(5)), 1, 3), [199, 1, 2, 3, 199]) + assert_equal(masked_inside(array(list(range(5)), + mask=[1, 0, 0, 0, 0]), 1, 3).mask, + [1, 1, 1, 1, 0]) + assert_equal(masked_outside(array(list(range(5)), + mask=[0, 1, 0, 0, 0]), 1, 3).mask, + [1, 1, 0, 0, 1]) + assert_equal(masked_equal(array(list(range(5)), + mask=[1, 0, 0, 0, 0]), 2).mask, + [1, 0, 1, 0, 0]) + assert_equal(masked_not_equal(array([2, 2, 1, 2, 1], + mask=[1, 0, 0, 0, 0]), 2).mask, + [1, 0, 1, 0, 1]) + + def test_round(self): + a = array([1.23456, 2.34567, 3.45678, 4.56789, 5.67890], + mask=[0, 1, 0, 0, 0]) + assert_equal(a.round(), [1., 2., 3., 5., 6.]) + assert_equal(a.round(1), [1.2, 2.3, 3.5, 4.6, 5.7]) + assert_equal(a.round(3), [1.235, 2.346, 3.457, 4.568, 5.679]) + b = empty_like(a) + a.round(out=b) + assert_equal(b, [1., 2., 3., 5., 6.]) + + x = array([1., 2., 3., 4., 5.]) + c = array([1, 1, 1, 0, 0]) + x[2] = masked + z = where(c, x, -x) + assert_equal(z, [1., 2., 0., -4., -5]) + c[0] = masked + z = where(c, x, -x) + assert_equal(z, [1., 2., 0., -4., -5]) + assert_(z[0] is masked) + assert_(z[1] is not masked) + assert_(z[2] is masked) + + def test_round_with_output(self): + # Testing round with an explicit output + + xm = array(np.random.uniform(0, 10, 12)).reshape(3, 4) + xm[:, 0] = xm[0] = xm[-1, -1] = masked + + # A ndarray as explicit input + output = np.empty((3, 4), dtype=float) + output.fill(-9999) + result = np.round(xm, decimals=2, out=output) + # ... the result should be the given output + assert_(result is output) + assert_equal(result, xm.round(decimals=2, out=output)) + + output = empty((3, 4), dtype=float) + result = xm.round(decimals=2, out=output) + assert_(result is output) + + def test_round_with_scalar(self): + # Testing round with scalar/zero dimension input + # GH issue 2244 + a = array(1.1, mask=[False]) + assert_equal(a.round(), 1) + + a = array(1.1, mask=[True]) + assert_(a.round() is masked) + + a = array(1.1, mask=[False]) + output = np.empty(1, dtype=float) + output.fill(-9999) + a.round(out=output) + assert_equal(output, 1) + + a = array(1.1, mask=[False]) + output = array(-9999., mask=[True]) + a.round(out=output) + assert_equal(output[()], 1) + + a = array(1.1, mask=[True]) + output = array(-9999., mask=[False]) + a.round(out=output) + assert_(output[()] is masked) + + def test_identity(self): + a = identity(5) + assert_(isinstance(a, MaskedArray)) + assert_equal(a, np.identity(5)) + + def test_power(self): + x = -1.1 + assert_almost_equal(power(x, 2.), 1.21) + assert_(power(x, masked) is masked) + x = array([-1.1, -1.1, 1.1, 1.1, 0.]) + b = array([0.5, 2., 0.5, 2., -1.], mask=[0, 0, 0, 0, 1]) + y = power(x, b) + assert_almost_equal(y, [0, 1.21, 1.04880884817, 1.21, 0.]) + assert_equal(y._mask, [1, 0, 0, 0, 1]) + b.mask = nomask + y = power(x, b) + assert_equal(y._mask, [1, 0, 0, 0, 1]) + z = x ** b + assert_equal(z._mask, y._mask) + assert_almost_equal(z, y) + assert_almost_equal(z._data, y._data) + x **= b + assert_equal(x._mask, y._mask) + assert_almost_equal(x, y) + assert_almost_equal(x._data, y._data) + + def test_power_with_broadcasting(self): + # Test power w/ broadcasting + a2 = np.array([[1., 2., 3.], [4., 5., 6.]]) + a2m = array(a2, mask=[[1, 0, 0], [0, 0, 1]]) + b1 = np.array([2, 4, 3]) + b2 = np.array([b1, b1]) + b2m = array(b2, mask=[[0, 1, 0], [0, 1, 0]]) + + ctrl = array([[1 ** 2, 2 ** 4, 3 ** 3], [4 ** 2, 5 ** 4, 6 ** 3]], + mask=[[1, 1, 0], [0, 1, 1]]) + # No broadcasting, base & exp w/ mask + test = a2m ** b2m + assert_equal(test, ctrl) + assert_equal(test.mask, ctrl.mask) + # No broadcasting, base w/ mask, exp w/o mask + test = a2m ** b2 + assert_equal(test, ctrl) + assert_equal(test.mask, a2m.mask) + # No broadcasting, base w/o mask, exp w/ mask + test = a2 ** b2m + assert_equal(test, ctrl) + assert_equal(test.mask, b2m.mask) + + ctrl = array([[2 ** 2, 4 ** 4, 3 ** 3], [2 ** 2, 4 ** 4, 3 ** 3]], + mask=[[0, 1, 0], [0, 1, 0]]) + test = b1 ** b2m + assert_equal(test, ctrl) + assert_equal(test.mask, ctrl.mask) + test = b2m ** b1 + assert_equal(test, ctrl) + assert_equal(test.mask, ctrl.mask) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + def test_where(self): + # Test the where function + x = np.array([1., 1., 1., -2., pi / 2.0, 4., 5., -10., 10., 1., 2., 3.]) + y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) + m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] + m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1] + xm = masked_array(x, mask=m1) + ym = masked_array(y, mask=m2) + xm.set_fill_value(1e+20) + + d = where(xm > 2, xm, -9) + assert_equal(d, [-9., -9., -9., -9., -9., 4., + -9., -9., 10., -9., -9., 3.]) + assert_equal(d._mask, xm._mask) + d = where(xm > 2, -9, ym) + assert_equal(d, [5., 0., 3., 2., -1., -9., + -9., -10., -9., 1., 0., -9.]) + assert_equal(d._mask, [1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0]) + d = where(xm > 2, xm, masked) + assert_equal(d, [-9., -9., -9., -9., -9., 4., + -9., -9., 10., -9., -9., 3.]) + tmp = xm._mask.copy() + tmp[(xm <= 2).filled(True)] = True + assert_equal(d._mask, tmp) + + with np.errstate(invalid="warn"): + # The fill value is 1e20, it cannot be converted to `int`: + with pytest.warns(RuntimeWarning, match="invalid value"): + ixm = xm.astype(int) + d = where(ixm > 2, ixm, masked) + assert_equal(d, [-9, -9, -9, -9, -9, 4, -9, -9, 10, -9, -9, 3]) + assert_equal(d.dtype, ixm.dtype) + + def test_where_object(self): + a = np.array(None) + b = masked_array(None) + r = b.copy() + assert_equal(np.ma.where(True, a, a), r) + assert_equal(np.ma.where(True, b, b), r) + + def test_where_with_masked_choice(self): + x = arange(10) + x[3] = masked + c = x >= 8 + # Set False to masked + z = where(c, x, masked) + assert_(z.dtype is x.dtype) + assert_(z[3] is masked) + assert_(z[4] is masked) + assert_(z[7] is masked) + assert_(z[8] is not masked) + assert_(z[9] is not masked) + assert_equal(x, z) + # Set True to masked + z = where(c, masked, x) + assert_(z.dtype is x.dtype) + assert_(z[3] is masked) + assert_(z[4] is not masked) + assert_(z[7] is not masked) + assert_(z[8] is masked) + assert_(z[9] is masked) + + def test_where_with_masked_condition(self): + x = array([1., 2., 3., 4., 5.]) + c = array([1, 1, 1, 0, 0]) + x[2] = masked + z = where(c, x, -x) + assert_equal(z, [1., 2., 0., -4., -5]) + c[0] = masked + z = where(c, x, -x) + assert_equal(z, [1., 2., 0., -4., -5]) + assert_(z[0] is masked) + assert_(z[1] is not masked) + assert_(z[2] is masked) + + x = arange(1, 6) + x[-1] = masked + y = arange(1, 6) * 10 + y[2] = masked + c = array([1, 1, 1, 0, 0], mask=[1, 0, 0, 0, 0]) + cm = c.filled(1) + z = where(c, x, y) + zm = where(cm, x, y) + assert_equal(z, zm) + assert_(getmask(zm) is nomask) + assert_equal(zm, [1, 2, 3, 40, 50]) + z = where(c, masked, 1) + assert_equal(z, [99, 99, 99, 1, 1]) + z = where(c, 1, masked) + assert_equal(z, [99, 1, 1, 99, 99]) + + def test_where_type(self): + # Test the type conservation with where + x = np.arange(4, dtype=np.int32) + y = np.arange(4, dtype=np.float32) * 2.2 + test = where(x > 1.5, y, x).dtype + control = np.result_type(np.int32, np.float32) + assert_equal(test, control) + + def test_where_broadcast(self): + # Issue 8599 + x = np.arange(9).reshape(3, 3) + y = np.zeros(3) + core = np.where([1, 0, 1], x, y) + ma = where([1, 0, 1], x, y) + + assert_equal(core, ma) + assert_equal(core.dtype, ma.dtype) + + def test_where_structured(self): + # Issue 8600 + dt = np.dtype([('a', int), ('b', int)]) + x = np.array([(1, 2), (3, 4), (5, 6)], dtype=dt) + y = np.array((10, 20), dtype=dt) + core = np.where([0, 1, 1], x, y) + ma = np.where([0, 1, 1], x, y) + + assert_equal(core, ma) + assert_equal(core.dtype, ma.dtype) + + def test_where_structured_masked(self): + dt = np.dtype([('a', int), ('b', int)]) + x = np.array([(1, 2), (3, 4), (5, 6)], dtype=dt) + + ma = where([0, 1, 1], x, masked) + expected = masked_where([1, 0, 0], x) + + assert_equal(ma.dtype, expected.dtype) + assert_equal(ma, expected) + assert_equal(ma.mask, expected.mask) + + def test_masked_invalid_error(self): + a = np.arange(5, dtype=object) + a[3] = np.inf + a[2] = np.nan + with pytest.raises(TypeError, + match="not supported for the input types"): + np.ma.masked_invalid(a) + + def test_masked_invalid_pandas(self): + # getdata() used to be bad for pandas series due to its _data + # attribute. This test is a regression test mainly and may be + # removed if getdata() is adjusted. + class Series: + _data = "nonsense" + + def __array__(self, dtype=None, copy=None): + return np.array([5, np.nan, np.inf]) + + arr = np.ma.masked_invalid(Series()) + assert_array_equal(arr._data, np.array(Series())) + assert_array_equal(arr._mask, [False, True, True]) + + @pytest.mark.parametrize("copy", [True, False]) + def test_masked_invalid_full_mask(self, copy): + # Matplotlib relied on masked_invalid always returning a full mask + # (Also astropy projects, but were ok with it gh-22720 and gh-22842) + a = np.ma.array([1, 2, 3, 4]) + assert a._mask is nomask + res = np.ma.masked_invalid(a, copy=copy) + assert res.mask is not nomask + # mask of a should not be mutated + assert a.mask is nomask + assert np.may_share_memory(a._data, res._data) != copy + + def test_choose(self): + # Test choose + choices = [[0, 1, 2, 3], [10, 11, 12, 13], + [20, 21, 22, 23], [30, 31, 32, 33]] + chosen = choose([2, 3, 1, 0], choices) + assert_equal(chosen, array([20, 31, 12, 3])) + chosen = choose([2, 4, 1, 0], choices, mode='clip') + assert_equal(chosen, array([20, 31, 12, 3])) + chosen = choose([2, 4, 1, 0], choices, mode='wrap') + assert_equal(chosen, array([20, 1, 12, 3])) + # Check with some masked indices + indices_ = array([2, 4, 1, 0], mask=[1, 0, 0, 1]) + chosen = choose(indices_, choices, mode='wrap') + assert_equal(chosen, array([99, 1, 12, 99])) + assert_equal(chosen.mask, [1, 0, 0, 1]) + # Check with some masked choices + choices = array(choices, mask=[[0, 0, 0, 1], [1, 1, 0, 1], + [1, 0, 0, 0], [0, 0, 0, 0]]) + indices_ = [2, 3, 1, 0] + chosen = choose(indices_, choices, mode='wrap') + assert_equal(chosen, array([20, 31, 12, 3])) + assert_equal(chosen.mask, [1, 0, 0, 1]) + + def test_choose_with_out(self): + # Test choose with an explicit out keyword + choices = [[0, 1, 2, 3], [10, 11, 12, 13], + [20, 21, 22, 23], [30, 31, 32, 33]] + store = empty(4, dtype=int) + chosen = choose([2, 3, 1, 0], choices, out=store) + assert_equal(store, array([20, 31, 12, 3])) + assert_(store is chosen) + # Check with some masked indices + out + store = empty(4, dtype=int) + indices_ = array([2, 3, 1, 0], mask=[1, 0, 0, 1]) + chosen = choose(indices_, choices, mode='wrap', out=store) + assert_equal(store, array([99, 31, 12, 99])) + assert_equal(store.mask, [1, 0, 0, 1]) + # Check with some masked choices + out ina ndarray ! + choices = array(choices, mask=[[0, 0, 0, 1], [1, 1, 0, 1], + [1, 0, 0, 0], [0, 0, 0, 0]]) + indices_ = [2, 3, 1, 0] + store = empty(4, dtype=int).view(ndarray) + chosen = choose(indices_, choices, mode='wrap', out=store) + assert_equal(store, array([999999, 31, 12, 999999])) + + def test_reshape(self): + a = arange(10) + a[0] = masked + # Try the default + b = a.reshape((5, 2)) + assert_equal(b.shape, (5, 2)) + assert_(b.flags['C']) + # Try w/ arguments as list instead of tuple + b = a.reshape(5, 2) + assert_equal(b.shape, (5, 2)) + assert_(b.flags['C']) + # Try w/ order + b = a.reshape((5, 2), order='F') + assert_equal(b.shape, (5, 2)) + assert_(b.flags['F']) + # Try w/ order + b = a.reshape(5, 2, order='F') + assert_equal(b.shape, (5, 2)) + assert_(b.flags['F']) + + c = np.reshape(a, (2, 5)) + assert_(isinstance(c, MaskedArray)) + assert_equal(c.shape, (2, 5)) + assert_(c[0, 0] is masked) + assert_(c.flags['C']) + + def test_make_mask_descr(self): + # Flexible + ntype = [('a', float), ('b', float)] + test = make_mask_descr(ntype) + assert_equal(test, [('a', bool), ('b', bool)]) + assert_(test is make_mask_descr(test)) + + # Standard w/ shape + ntype = (float, 2) + test = make_mask_descr(ntype) + assert_equal(test, (bool, 2)) + assert_(test is make_mask_descr(test)) + + # Standard standard + ntype = float + test = make_mask_descr(ntype) + assert_equal(test, np.dtype(bool)) + assert_(test is make_mask_descr(test)) + + # Nested + ntype = [('a', float), ('b', [('ba', float), ('bb', float)])] + test = make_mask_descr(ntype) + control = np.dtype([('a', 'b1'), ('b', [('ba', 'b1'), ('bb', 'b1')])]) + assert_equal(test, control) + assert_(test is make_mask_descr(test)) + + # Named+ shape + ntype = [('a', (float, 2))] + test = make_mask_descr(ntype) + assert_equal(test, np.dtype([('a', (bool, 2))])) + assert_(test is make_mask_descr(test)) + + # 2 names + ntype = [(('A', 'a'), float)] + test = make_mask_descr(ntype) + assert_equal(test, np.dtype([(('A', 'a'), bool)])) + assert_(test is make_mask_descr(test)) + + # nested boolean types should preserve identity + base_type = np.dtype([('a', int, 3)]) + base_mtype = make_mask_descr(base_type) + sub_type = np.dtype([('a', int), ('b', base_mtype)]) + test = make_mask_descr(sub_type) + assert_equal(test, np.dtype([('a', bool), ('b', [('a', bool, 3)])])) + assert_(test.fields['b'][0] is base_mtype) + + def test_make_mask(self): + # Test make_mask + # w/ a list as an input + mask = [0, 1] + test = make_mask(mask) + assert_equal(test.dtype, MaskType) + assert_equal(test, [0, 1]) + # w/ a ndarray as an input + mask = np.array([0, 1], dtype=bool) + test = make_mask(mask) + assert_equal(test.dtype, MaskType) + assert_equal(test, [0, 1]) + # w/ a flexible-type ndarray as an input - use default + mdtype = [('a', bool), ('b', bool)] + mask = np.array([(0, 0), (0, 1)], dtype=mdtype) + test = make_mask(mask) + assert_equal(test.dtype, MaskType) + assert_equal(test, [1, 1]) + # w/ a flexible-type ndarray as an input - use input dtype + mdtype = [('a', bool), ('b', bool)] + mask = np.array([(0, 0), (0, 1)], dtype=mdtype) + test = make_mask(mask, dtype=mask.dtype) + assert_equal(test.dtype, mdtype) + assert_equal(test, mask) + # w/ a flexible-type ndarray as an input - use input dtype + mdtype = [('a', float), ('b', float)] + bdtype = [('a', bool), ('b', bool)] + mask = np.array([(0, 0), (0, 1)], dtype=mdtype) + test = make_mask(mask, dtype=mask.dtype) + assert_equal(test.dtype, bdtype) + assert_equal(test, np.array([(0, 0), (0, 1)], dtype=bdtype)) + # Ensure this also works for void + mask = np.array((False, True), dtype='?,?')[()] + assert_(isinstance(mask, np.void)) + test = make_mask(mask, dtype=mask.dtype) + assert_equal(test, mask) + assert_(test is not mask) + mask = np.array((0, 1), dtype='i4,i4')[()] + test2 = make_mask(mask, dtype=mask.dtype) + assert_equal(test2, test) + # test that nomask is returned when m is nomask. + bools = [True, False] + dtypes = [MaskType, float] + msgformat = 'copy=%s, shrink=%s, dtype=%s' + for cpy, shr, dt in itertools.product(bools, bools, dtypes): + res = make_mask(nomask, copy=cpy, shrink=shr, dtype=dt) + assert_(res is nomask, msgformat % (cpy, shr, dt)) + + def test_mask_or(self): + # Initialize + mtype = [('a', bool), ('b', bool)] + mask = np.array([(0, 0), (0, 1), (1, 0), (0, 0)], dtype=mtype) + # Test using nomask as input + test = mask_or(mask, nomask) + assert_equal(test, mask) + test = mask_or(nomask, mask) + assert_equal(test, mask) + # Using False as input + test = mask_or(mask, False) + assert_equal(test, mask) + # Using another array w / the same dtype + other = np.array([(0, 1), (0, 1), (0, 1), (0, 1)], dtype=mtype) + test = mask_or(mask, other) + control = np.array([(0, 1), (0, 1), (1, 1), (0, 1)], dtype=mtype) + assert_equal(test, control) + # Using another array w / a different dtype + othertype = [('A', bool), ('B', bool)] + other = np.array([(0, 1), (0, 1), (0, 1), (0, 1)], dtype=othertype) + try: + test = mask_or(mask, other) + except ValueError: + pass + # Using nested arrays + dtype = [('a', bool), ('b', [('ba', bool), ('bb', bool)])] + amask = np.array([(0, (1, 0)), (0, (1, 0))], dtype=dtype) + bmask = np.array([(1, (0, 1)), (0, (0, 0))], dtype=dtype) + cntrl = np.array([(1, (1, 1)), (0, (1, 0))], dtype=dtype) + assert_equal(mask_or(amask, bmask), cntrl) + + a = np.array([False, False]) + assert mask_or(a, a) is nomask # gh-27360 + + def test_allequal(self): + x = array([1, 2, 3], mask=[0, 0, 0]) + y = array([1, 2, 3], mask=[1, 0, 0]) + z = array([[1, 2, 3], [4, 5, 6]], mask=[[0, 0, 0], [1, 1, 1]]) + + assert allequal(x, y) + assert not allequal(x, y, fill_value=False) + assert allequal(x, z) + + # test allequal for the same input, with mask=nomask, this test is for + # the scenario raised in https://github.com/numpy/numpy/issues/27201 + assert allequal(x, x) + assert allequal(x, x, fill_value=False) + + assert allequal(y, y) + assert not allequal(y, y, fill_value=False) + + def test_flatten_mask(self): + # Tests flatten mask + # Standard dtype + mask = np.array([0, 0, 1], dtype=bool) + assert_equal(flatten_mask(mask), mask) + # Flexible dtype + mask = np.array([(0, 0), (0, 1)], dtype=[('a', bool), ('b', bool)]) + test = flatten_mask(mask) + control = np.array([0, 0, 0, 1], dtype=bool) + assert_equal(test, control) + + mdtype = [('a', bool), ('b', [('ba', bool), ('bb', bool)])] + data = [(0, (0, 0)), (0, (0, 1))] + mask = np.array(data, dtype=mdtype) + test = flatten_mask(mask) + control = np.array([0, 0, 0, 0, 0, 1], dtype=bool) + assert_equal(test, control) + + def test_on_ndarray(self): + # Test functions on ndarrays + a = np.array([1, 2, 3, 4]) + m = array(a, mask=False) + test = anom(a) + assert_equal(test, m.anom()) + test = reshape(a, (2, 2)) + assert_equal(test, m.reshape(2, 2)) + + def test_compress(self): + # Test compress function on ndarray and masked array + # Address Github #2495. + arr = np.arange(8) + arr.shape = 4, 2 + cond = np.array([True, False, True, True]) + control = arr[[0, 2, 3]] + test = np.ma.compress(cond, arr, axis=0) + assert_equal(test, control) + marr = np.ma.array(arr) + test = np.ma.compress(cond, marr, axis=0) + assert_equal(test, control) + + def test_compressed(self): + # Test ma.compressed function. + # Address gh-4026 + a = np.ma.array([1, 2]) + test = np.ma.compressed(a) + assert_(type(test) is np.ndarray) + + # Test case when input data is ndarray subclass + class A(np.ndarray): + pass + + a = np.ma.array(A(shape=0)) + test = np.ma.compressed(a) + assert_(type(test) is A) + + # Test that compress flattens + test = np.ma.compressed([[1], [2]]) + assert_equal(test.ndim, 1) + test = np.ma.compressed([[[[[1]]]]]) + assert_equal(test.ndim, 1) + + # Test case when input is MaskedArray subclass + class M(MaskedArray): + pass + + test = np.ma.compressed(M([[[]], [[]]])) + assert_equal(test.ndim, 1) + + # with .compressed() overridden + class M(MaskedArray): + def compressed(self): + return 42 + + test = np.ma.compressed(M([[[]], [[]]])) + assert_equal(test, 42) + + def test_convolve(self): + a = masked_equal(np.arange(5), 2) + b = np.array([1, 1]) + + result = masked_equal([0, 1, -1, -1, 7, 4], -1) + test = np.ma.convolve(a, b, mode='full') + assert_equal(test, result) + + test = np.ma.convolve(a, b, mode='same') + assert_equal(test, result[:-1]) + + test = np.ma.convolve(a, b, mode='valid') + assert_equal(test, result[1:-1]) + + result = masked_equal([0, 1, 1, 3, 7, 4], -1) + test = np.ma.convolve(a, b, mode='full', propagate_mask=False) + assert_equal(test, result) + + test = np.ma.convolve(a, b, mode='same', propagate_mask=False) + assert_equal(test, result[:-1]) + + test = np.ma.convolve(a, b, mode='valid', propagate_mask=False) + assert_equal(test, result[1:-1]) + + test = np.ma.convolve([1, 1], [1, 1, 1]) + assert_equal(test, masked_equal([1, 2, 2, 1], -1)) + + a = [1, 1] + b = masked_equal([1, -1, -1, 1], -1) + test = np.ma.convolve(a, b, propagate_mask=False) + assert_equal(test, masked_equal([1, 1, -1, 1, 1], -1)) + test = np.ma.convolve(a, b, propagate_mask=True) + assert_equal(test, masked_equal([-1, -1, -1, -1, -1], -1)) + + +class TestMaskedFields: + def _create_data(self): + ilist = [1, 2, 3, 4, 5] + flist = [1.1, 2.2, 3.3, 4.4, 5.5] + slist = ['one', 'two', 'three', 'four', 'five'] + ddtype = [('a', int), ('b', float), ('c', '|S8')] + mdtype = [('a', bool), ('b', bool), ('c', bool)] + mask = [0, 1, 0, 0, 1] + base = array(list(zip(ilist, flist, slist)), mask=mask, dtype=ddtype) + return {"base": base, "mask": mask, "ddtype": ddtype, "mdtype": mdtype} + + def test_set_records_masks(self): + data = self._create_data() + base = data['base'] + mdtype = data['mdtype'] + # Set w/ nomask or masked + base.mask = nomask + assert_equal_records(base._mask, np.zeros(base.shape, dtype=mdtype)) + base.mask = masked + assert_equal_records(base._mask, np.ones(base.shape, dtype=mdtype)) + # Set w/ simple boolean + base.mask = False + assert_equal_records(base._mask, np.zeros(base.shape, dtype=mdtype)) + base.mask = True + assert_equal_records(base._mask, np.ones(base.shape, dtype=mdtype)) + # Set w/ list + base.mask = [0, 0, 0, 1, 1] + assert_equal_records(base._mask, + np.array([(x, x, x) for x in [0, 0, 0, 1, 1]], + dtype=mdtype)) + + def test_set_record_element(self): + # Check setting an element of a record) + base = self._create_data()['base'] + (base_a, base_b, base_c) = (base['a'], base['b'], base['c']) + base[0] = (pi, pi, 'pi') + + assert_equal(base_a.dtype, int) + assert_equal(base_a._data, [3, 2, 3, 4, 5]) + + assert_equal(base_b.dtype, float) + assert_equal(base_b._data, [pi, 2.2, 3.3, 4.4, 5.5]) + + assert_equal(base_c.dtype, '|S8') + assert_equal(base_c._data, + [b'pi', b'two', b'three', b'four', b'five']) + + def test_set_record_slice(self): + base = self._create_data()['base'] + (base_a, base_b, base_c) = (base['a'], base['b'], base['c']) + base[:3] = (pi, pi, 'pi') + + assert_equal(base_a.dtype, int) + assert_equal(base_a._data, [3, 3, 3, 4, 5]) + + assert_equal(base_b.dtype, float) + assert_equal(base_b._data, [pi, pi, pi, 4.4, 5.5]) + + assert_equal(base_c.dtype, '|S8') + assert_equal(base_c._data, + [b'pi', b'pi', b'pi', b'four', b'five']) + + def test_mask_element(self): + "Check record access" + base = self._create_data()['base'] + base[0] = masked + + for n in ('a', 'b', 'c'): + assert_equal(base[n].mask, [1, 1, 0, 0, 1]) + assert_equal(base[n]._data, base._data[n]) + + def test_getmaskarray(self): + # Test getmaskarray on flexible dtype + ndtype = [('a', int), ('b', float)] + test = empty(3, dtype=ndtype) + assert_equal(getmaskarray(test), + np.array([(0, 0), (0, 0), (0, 0)], + dtype=[('a', '|b1'), ('b', '|b1')])) + test[:] = masked + assert_equal(getmaskarray(test), + np.array([(1, 1), (1, 1), (1, 1)], + dtype=[('a', '|b1'), ('b', '|b1')])) + + def test_view(self): + # Test view w/ flexible dtype + iterator = list(zip(np.arange(10), np.random.rand(10))) + data = np.array(iterator) + a = array(iterator, dtype=[('a', float), ('b', float)]) + a.mask[0] = (1, 0) + controlmask = np.array([1] + 19 * [0], dtype=bool) + # Transform globally to simple dtype + test = a.view(float) + assert_equal(test, data.ravel()) + assert_equal(test.mask, controlmask) + # Transform globally to dty + test = a.view((float, 2)) + assert_equal(test, data) + assert_equal(test.mask, controlmask.reshape(-1, 2)) + + def test_getitem(self): + ndtype = [('a', float), ('b', float)] + a = array(list(zip(np.random.rand(10), np.arange(10))), dtype=ndtype) + a.mask = np.array(list(zip([0, 0, 0, 0, 0, 0, 0, 0, 1, 1], + [1, 0, 0, 0, 0, 0, 0, 0, 1, 0])), + dtype=[('a', bool), ('b', bool)]) + + def _test_index(i): + assert_equal(type(a[i]), mvoid) + assert_equal_records(a[i]._data, a._data[i]) + assert_equal_records(a[i]._mask, a._mask[i]) + + assert_equal(type(a[i, ...]), MaskedArray) + assert_equal_records(a[i, ...]._data, a._data[i, ...]) + assert_equal_records(a[i, ...]._mask, a._mask[i, ...]) + + _test_index(1) # No mask + _test_index(0) # One element masked + _test_index(-2) # All element masked + + def test_setitem(self): + # Issue 4866: check that one can set individual items in [record][col] + # and [col][record] order + ndtype = np.dtype([('a', float), ('b', int)]) + ma = np.ma.MaskedArray([(1.0, 1), (2.0, 2)], dtype=ndtype) + ma['a'][1] = 3.0 + assert_equal(ma['a'], np.array([1.0, 3.0])) + ma[1]['a'] = 4.0 + assert_equal(ma['a'], np.array([1.0, 4.0])) + # Issue 2403 + mdtype = np.dtype([('a', bool), ('b', bool)]) + # soft mask + control = np.array([(False, True), (True, True)], dtype=mdtype) + a = np.ma.masked_all((2,), dtype=ndtype) + a['a'][0] = 2 + assert_equal(a.mask, control) + a = np.ma.masked_all((2,), dtype=ndtype) + a[0]['a'] = 2 + assert_equal(a.mask, control) + # hard mask + control = np.array([(True, True), (True, True)], dtype=mdtype) + a = np.ma.masked_all((2,), dtype=ndtype) + a.harden_mask() + a['a'][0] = 2 + assert_equal(a.mask, control) + a = np.ma.masked_all((2,), dtype=ndtype) + a.harden_mask() + a[0]['a'] = 2 + assert_equal(a.mask, control) + + def test_setitem_scalar(self): + # 8510 + mask_0d = np.ma.masked_array(1, mask=True) + arr = np.ma.arange(3) + arr[0] = mask_0d + assert_array_equal(arr.mask, [True, False, False]) + + def test_element_len(self): + data = self._create_data() + # check that len() works for mvoid (Github issue #576) + for rec in data['base']: + assert_equal(len(rec), len(data['ddtype'])) + + +class TestMaskedObjectArray: + + def test_getitem(self): + arr = np.ma.array([None, None]) + for dt in [float, object]: + a0 = np.eye(2).astype(dt) + a1 = np.eye(3).astype(dt) + arr[0] = a0 + arr[1] = a1 + + assert_(arr[0] is a0) + assert_(arr[1] is a1) + assert_(isinstance(arr[0, ...], MaskedArray)) + assert_(isinstance(arr[1, ...], MaskedArray)) + assert_(arr[0, ...][()] is a0) + assert_(arr[1, ...][()] is a1) + + arr[0] = np.ma.masked + + assert_(arr[1] is a1) + assert_(isinstance(arr[0, ...], MaskedArray)) + assert_(isinstance(arr[1, ...], MaskedArray)) + assert_equal(arr[0, ...].mask, True) + assert_(arr[1, ...][()] is a1) + + # gh-5962 - object arrays of arrays do something special + assert_equal(arr[0].data, a0) + assert_equal(arr[0].mask, True) + assert_equal(arr[0, ...][()].data, a0) + assert_equal(arr[0, ...][()].mask, True) + + def test_nested_ma(self): + + arr = np.ma.array([None, None]) + # set the first object to be an unmasked masked constant. A little fiddly + arr[0, ...] = np.array([np.ma.masked], object)[0, ...] + + # check the above line did what we were aiming for + assert_(arr.data[0] is np.ma.masked) + + # test that getitem returned the value by identity + assert_(arr[0] is np.ma.masked) + + # now mask the masked value! + arr[0] = np.ma.masked + assert_(arr[0] is np.ma.masked) + + +class TestMaskedView: + def _create_data(self): + iterator = list(zip(np.arange(10), np.random.rand(10))) + data = np.array(iterator) + a = array(iterator, dtype=[('a', float), ('b', float)]) + a.mask[0] = (1, 0) + controlmask = np.array([1] + 19 * [0], dtype=bool) + return data, a, controlmask + + def test_view_to_nothing(self): + a = self._create_data()[1] + test = a.view() + assert_(isinstance(test, MaskedArray)) + assert_equal(test._data, a._data) + assert_equal(test._mask, a._mask) + + def test_view_to_type(self): + data, a, _ = self._create_data() + test = a.view(np.ndarray) + assert_(not isinstance(test, MaskedArray)) + assert_equal(test, a._data) + assert_equal_records(test, data.view(a.dtype).squeeze()) + + def test_view_to_simple_dtype(self): + data, a, controlmask = self._create_data() + # View globally + test = a.view(float) + assert_(isinstance(test, MaskedArray)) + assert_equal(test, data.ravel()) + assert_equal(test.mask, controlmask) + + def test_view_to_flexible_dtype(self): + a = self._create_data()[1] + + test = a.view([('A', float), ('B', float)]) + assert_equal(test.mask.dtype.names, ('A', 'B')) + assert_equal(test['A'], a['a']) + assert_equal(test['B'], a['b']) + + test = a[0].view([('A', float), ('B', float)]) + assert_(isinstance(test, MaskedArray)) + assert_equal(test.mask.dtype.names, ('A', 'B')) + assert_equal(test['A'], a['a'][0]) + assert_equal(test['B'], a['b'][0]) + + test = a[-1].view([('A', float), ('B', float)]) + assert_(isinstance(test, MaskedArray)) + assert_equal(test.dtype.names, ('A', 'B')) + assert_equal(test['A'], a['a'][-1]) + assert_equal(test['B'], a['b'][-1]) + + def test_view_to_subdtype(self): + data, a, controlmask = self._create_data() + # View globally + test = a.view((float, 2)) + assert_(isinstance(test, MaskedArray)) + assert_equal(test, data) + assert_equal(test.mask, controlmask.reshape(-1, 2)) + # View on 1 masked element + test = a[0].view((float, 2)) + assert_(isinstance(test, MaskedArray)) + assert_equal(test, data[0]) + assert_equal(test.mask, (1, 0)) + # View on 1 unmasked element + test = a[-1].view((float, 2)) + assert_(isinstance(test, MaskedArray)) + assert_equal(test, data[-1]) + + def test_view_to_dtype_and_type(self): + data, a, _ = self._create_data() + + test = a.view((float, 2), np.recarray) + assert_equal(test, data) + assert_(isinstance(test, np.recarray)) + assert_(not isinstance(test, MaskedArray)) + + +class TestOptionalArgs: + def test_ndarrayfuncs(self): + # test axis arg behaves the same as ndarray (including multiple axes) + + d = np.arange(24.0).reshape((2, 3, 4)) + m = np.zeros(24, dtype=bool).reshape((2, 3, 4)) + # mask out last element of last dimension + m[:, :, -1] = True + a = np.ma.array(d, mask=m) + + def testaxis(f, a, d): + numpy_f = numpy.__getattribute__(f) + ma_f = np.ma.__getattribute__(f) + + # test axis arg + assert_equal(ma_f(a, axis=1)[..., :-1], numpy_f(d[..., :-1], axis=1)) + assert_equal(ma_f(a, axis=(0, 1))[..., :-1], + numpy_f(d[..., :-1], axis=(0, 1))) + + def testkeepdims(f, a, d): + numpy_f = numpy.__getattribute__(f) + ma_f = np.ma.__getattribute__(f) + + # test keepdims arg + assert_equal(ma_f(a, keepdims=True).shape, + numpy_f(d, keepdims=True).shape) + assert_equal(ma_f(a, keepdims=False).shape, + numpy_f(d, keepdims=False).shape) + + # test both at once + assert_equal(ma_f(a, axis=1, keepdims=True)[..., :-1], + numpy_f(d[..., :-1], axis=1, keepdims=True)) + assert_equal(ma_f(a, axis=(0, 1), keepdims=True)[..., :-1], + numpy_f(d[..., :-1], axis=(0, 1), keepdims=True)) + + for f in ['sum', 'prod', 'mean', 'var', 'std']: + testaxis(f, a, d) + testkeepdims(f, a, d) + + for f in ['min', 'max']: + testaxis(f, a, d) + + d = (np.arange(24).reshape((2, 3, 4)) % 2 == 0) + a = np.ma.array(d, mask=m) + for f in ['all', 'any']: + testaxis(f, a, d) + testkeepdims(f, a, d) + + def test_count(self): + # test np.ma.count specially + + d = np.arange(24.0).reshape((2, 3, 4)) + m = np.zeros(24, dtype=bool).reshape((2, 3, 4)) + m[:, 0, :] = True + a = np.ma.array(d, mask=m) + + assert_equal(count(a), 16) + assert_equal(count(a, axis=1), 2 * ones((2, 4))) + assert_equal(count(a, axis=(0, 1)), 4 * ones((4,))) + assert_equal(count(a, keepdims=True), 16 * ones((1, 1, 1))) + assert_equal(count(a, axis=1, keepdims=True), 2 * ones((2, 1, 4))) + assert_equal(count(a, axis=(0, 1), keepdims=True), 4 * ones((1, 1, 4))) + assert_equal(count(a, axis=-2), 2 * ones((2, 4))) + assert_raises(ValueError, count, a, axis=(1, 1)) + assert_raises(AxisError, count, a, axis=3) + + # check the 'nomask' path + a = np.ma.array(d, mask=nomask) + + assert_equal(count(a), 24) + assert_equal(count(a, axis=1), 3 * ones((2, 4))) + assert_equal(count(a, axis=(0, 1)), 6 * ones((4,))) + assert_equal(count(a, keepdims=True), 24 * ones((1, 1, 1))) + assert_equal(np.ndim(count(a, keepdims=True)), 3) + assert_equal(count(a, axis=1, keepdims=True), 3 * ones((2, 1, 4))) + assert_equal(count(a, axis=(0, 1), keepdims=True), 6 * ones((1, 1, 4))) + assert_equal(count(a, axis=-2), 3 * ones((2, 4))) + assert_raises(ValueError, count, a, axis=(1, 1)) + assert_raises(AxisError, count, a, axis=3) + + # check the 'masked' singleton + assert_equal(count(np.ma.masked), 0) + + # check 0-d arrays do not allow axis > 0 + assert_raises(AxisError, count, np.ma.array(1), axis=1) + + +class TestMaskedConstant: + def _do_add_test(self, add): + # sanity check + assert_(add(np.ma.masked, 1) is np.ma.masked) + + # now try with a vector + vector = np.array([1, 2, 3]) + result = add(np.ma.masked, vector) + + # lots of things could go wrong here + assert_(result is not np.ma.masked) + assert_(not isinstance(result, np.ma.core.MaskedConstant)) + assert_equal(result.shape, vector.shape) + assert_equal(np.ma.getmask(result), np.ones(vector.shape, dtype=bool)) + + def test_ufunc(self): + self._do_add_test(np.add) + + def test_operator(self): + self._do_add_test(lambda a, b: a + b) + + def test_ctor(self): + m = np.ma.array(np.ma.masked) + + # most importantly, we do not want to create a new MaskedConstant + # instance + assert_(not isinstance(m, np.ma.core.MaskedConstant)) + assert_(m is not np.ma.masked) + + def test_repr(self): + # copies should not exist, but if they do, it should be obvious that + # something is wrong + assert_equal(repr(np.ma.masked), 'masked') + + # create a new instance in a weird way + masked2 = np.ma.MaskedArray.__new__(np.ma.core.MaskedConstant) + assert_not_equal(repr(masked2), 'masked') + + def test_pickle(self): + from io import BytesIO + + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + with BytesIO() as f: + pickle.dump(np.ma.masked, f, protocol=proto) + f.seek(0) + res = pickle.load(f) + assert_(res is np.ma.masked) + + def test_copy(self): + # gh-9328 + # copy is a no-op, like it is with np.True_ + assert_equal( + np.ma.masked.copy() is np.ma.masked, + np.True_.copy() is np.True_) + + def test__copy(self): + import copy + assert_( + copy.copy(np.ma.masked) is np.ma.masked) + + def test_deepcopy(self): + import copy + assert_( + copy.deepcopy(np.ma.masked) is np.ma.masked) + + def test_immutable(self): + orig = np.ma.masked + assert_raises(np.ma.core.MaskError, operator.setitem, orig, (), 1) + assert_raises(ValueError, operator.setitem, orig.data, (), 1) + assert_raises(ValueError, operator.setitem, orig.mask, (), False) + + view = np.ma.masked.view(np.ma.MaskedArray) + assert_raises(ValueError, operator.setitem, view, (), 1) + assert_raises(ValueError, operator.setitem, view.data, (), 1) + assert_raises(ValueError, operator.setitem, view.mask, (), False) + + def test_coercion_int(self): + a_i = np.zeros((), int) + assert_raises(MaskError, operator.setitem, a_i, (), np.ma.masked) + assert_raises(MaskError, int, np.ma.masked) + + def test_coercion_float(self): + a_f = np.zeros((), float) + pytest.warns(UserWarning, operator.setitem, a_f, (), np.ma.masked) + assert_(np.isnan(a_f[()])) + + @pytest.mark.xfail(reason="See gh-9750") + def test_coercion_unicode(self): + a_u = np.zeros((), 'U10') + a_u[()] = np.ma.masked + assert_equal(a_u[()], '--') + + @pytest.mark.xfail(reason="See gh-9750") + def test_coercion_bytes(self): + a_b = np.zeros((), 'S10') + a_b[()] = np.ma.masked + assert_equal(a_b[()], b'--') + + def test_subclass(self): + # https://github.com/astropy/astropy/issues/6645 + class Sub(type(np.ma.masked)): + pass + + a = Sub() + assert_(a is Sub()) + assert_(a is not np.ma.masked) + assert_not_equal(repr(a), 'masked') + + def test_attributes_readonly(self): + assert_raises(AttributeError, setattr, np.ma.masked, 'shape', (1,)) + assert_raises(AttributeError, setattr, np.ma.masked, 'dtype', np.int64) + + +class TestMaskedWhereAliases: + + # TODO: Test masked_object, masked_equal, ... + + def test_masked_values(self): + res = masked_values(np.array([-32768.0]), np.int16(-32768)) + assert_equal(res.mask, [True]) + + res = masked_values(np.inf, np.inf) + assert_equal(res.mask, True) + + res = np.ma.masked_values(np.inf, -np.inf) + assert_equal(res.mask, False) + + res = np.ma.masked_values([1, 2, 3, 4], 5, shrink=True) + assert_(res.mask is np.ma.nomask) + + res = np.ma.masked_values([1, 2, 3, 4], 5, shrink=False) + assert_equal(res.mask, [False] * 4) + + +def test_masked_array(): + a = np.ma.array([0, 1, 2, 3], mask=[0, 0, 1, 0]) + assert_equal(np.argwhere(a), [[1], [3]]) + + +def test_masked_array_no_copy(): + # check nomask array is updated in place + a = np.ma.array([1, 2, 3, 4]) + _ = np.ma.masked_where(a == 3, a, copy=False) + assert_array_equal(a.mask, [False, False, True, False]) + # check masked array is updated in place + a = np.ma.array([1, 2, 3, 4], mask=[1, 0, 0, 0]) + _ = np.ma.masked_where(a == 3, a, copy=False) + assert_array_equal(a.mask, [True, False, True, False]) + # check masked array with masked_invalid is updated in place + a = np.ma.array([np.inf, 1, 2, 3, 4]) + _ = np.ma.masked_invalid(a, copy=False) + assert_array_equal(a.mask, [True, False, False, False, False]) + + +def test_append_masked_array(): + a = np.ma.masked_equal([1, 2, 3], value=2) + b = np.ma.masked_equal([4, 3, 2], value=2) + + result = np.ma.append(a, b) + expected_data = [1, 2, 3, 4, 3, 2] + expected_mask = [False, True, False, False, False, True] + assert_array_equal(result.data, expected_data) + assert_array_equal(result.mask, expected_mask) + + a = np.ma.masked_all((2, 2)) + b = np.ma.ones((3, 1)) + + result = np.ma.append(a, b) + expected_data = [1] * 3 + expected_mask = [True] * 4 + [False] * 3 + assert_array_equal(result.data[-3], expected_data) + assert_array_equal(result.mask, expected_mask) + + result = np.ma.append(a, b, axis=None) + assert_array_equal(result.data[-3], expected_data) + assert_array_equal(result.mask, expected_mask) + + +def test_append_masked_array_along_axis(): + a = np.ma.masked_equal([1, 2, 3], value=2) + b = np.ma.masked_values([[4, 5, 6], [7, 8, 9]], 7) + + # When `axis` is specified, `values` must have the correct shape. + assert_raises(ValueError, np.ma.append, a, b, axis=0) + + result = np.ma.append(a[np.newaxis, :], b, axis=0) + expected = np.ma.arange(1, 10) + expected[[1, 6]] = np.ma.masked + expected = expected.reshape((3, 3)) + assert_array_equal(result.data, expected.data) + assert_array_equal(result.mask, expected.mask) + + +def test_default_fill_value_complex(): + # regression test for Python 3, where 'unicode' was not defined + assert_(default_fill_value(1 + 1j) == 1.e20 + 0.0j) + + +def test_string_dtype_fill_value_on_construction(): + # Regression test for gh-29421: allow string fill_value on StringDType masked arrays + dt = np.dtypes.StringDType() + data = np.array(["A", "test", "variable", ""], dtype=dt) + mask = [True, False, True, True] + # Prior to the fix, this would TypeError; now it should succeed + arr = np.ma.MaskedArray(data, mask=mask, fill_value="FILL", dtype=dt) + assert isinstance(arr.fill_value, str) + assert arr.fill_value == "FILL" + filled = arr.filled() + # Masked positions should be replaced by 'FILL' + assert filled.tolist() == ["FILL", "test", "FILL", "FILL"] + + +def test_string_dtype_default_fill_value(): + # Regression test for gh-29421: default fill_value for StringDType is 'N/A' + dt = np.dtypes.StringDType() + data = np.array(['x', 'y', 'z'], dtype=dt) + # no fill_value passed → uses default_fill_value internally + arr = np.ma.MaskedArray(data, mask=[True, False, True], dtype=dt) + # ensure it’s stored as a Python str and equals the expected default + assert isinstance(arr.fill_value, str) + assert arr.fill_value == 'N/A' + # masked slots should be replaced by that default + assert arr.filled().tolist() == ['N/A', 'y', 'N/A'] + + +def test_string_dtype_fill_value_persists_through_slice(): + # Regression test for gh-29421: .fill_value survives slicing/viewing + dt = np.dtypes.StringDType() + arr = np.ma.MaskedArray( + ['a', 'b', 'c'], + mask=[True, False, True], + dtype=dt + ) + arr.fill_value = 'Z' + # slice triggers __array_finalize__ + sub = arr[1:] + # the slice should carry the same fill_value and behavior + assert isinstance(sub.fill_value, str) + assert sub.fill_value == 'Z' + assert sub.filled().tolist() == ['b', 'Z'] + + +def test_setting_fill_value_attribute(): + # Regression test for gh-29421: setting .fill_value post-construction works too + dt = np.dtypes.StringDType() + arr = np.ma.MaskedArray( + ["x", "longstring", "mid"], mask=[False, True, False], dtype=dt + ) + # Setting the attribute should not raise + arr.fill_value = "Z" + assert arr.fill_value == "Z" + # And filled() should use the new fill_value + assert arr.filled()[0] == "x" + assert arr.filled()[1] == "Z" + assert arr.filled()[2] == "mid" + + +def test_ufunc_with_output(): + # check that giving an output argument always returns that output. + # Regression test for gh-8416. + x = array([1., 2., 3.], mask=[0, 0, 1]) + y = np.add(x, 1., out=x) + assert_(y is x) + + +def test_ufunc_with_out_varied(): + """ Test that masked arrays are immune to gh-10459 """ + # the mask of the output should not affect the result, however it is passed + a = array([ 1, 2, 3], mask=[1, 0, 0]) + b = array([10, 20, 30], mask=[1, 0, 0]) + out = array([ 0, 0, 0], mask=[0, 0, 1]) + expected = array([11, 22, 33], mask=[1, 0, 0]) + + out_pos = out.copy() + res_pos = np.add(a, b, out_pos) + + out_kw = out.copy() + res_kw = np.add(a, b, out=out_kw) + + out_tup = out.copy() + res_tup = np.add(a, b, out=(out_tup,)) + + assert_equal(res_kw.mask, expected.mask) + assert_equal(res_kw.data, expected.data) + assert_equal(res_tup.mask, expected.mask) + assert_equal(res_tup.data, expected.data) + assert_equal(res_pos.mask, expected.mask) + assert_equal(res_pos.data, expected.data) + + +def test_astype_mask_ordering(): + descr = np.dtype([('v', int, 3), ('x', [('y', float)])]) + x = array([ + [([1, 2, 3], (1.0,)), ([1, 2, 3], (2.0,))], + [([1, 2, 3], (3.0,)), ([1, 2, 3], (4.0,))]], dtype=descr) + x[0]['v'][0] = np.ma.masked + + x_a = x.astype(descr) + assert x_a.dtype.names == np.dtype(descr).names + assert x_a.mask.dtype.names == np.dtype(descr).names + assert_equal(x, x_a) + + assert_(x is x.astype(x.dtype, copy=False)) + assert_equal(type(x.astype(x.dtype, subok=False)), np.ndarray) + + x_f = x.astype(x.dtype, order='F') + assert_(x_f.flags.f_contiguous) + assert_(x_f.mask.flags.f_contiguous) + + # Also test the same indirectly, via np.array + x_a2 = np.array(x, dtype=descr, subok=True) + assert x_a2.dtype.names == np.dtype(descr).names + assert x_a2.mask.dtype.names == np.dtype(descr).names + assert_equal(x, x_a2) + + assert_(x is np.array(x, dtype=descr, copy=None, subok=True)) + + x_f2 = np.array(x, dtype=x.dtype, order='F', subok=True) + assert_(x_f2.flags.f_contiguous) + assert_(x_f2.mask.flags.f_contiguous) + + +@pytest.mark.parametrize('dt1', num_dts, ids=num_ids) +@pytest.mark.parametrize('dt2', num_dts, ids=num_ids) +@pytest.mark.filterwarnings('ignore::numpy.exceptions.ComplexWarning') +def test_astype_basic(dt1, dt2): + # See gh-12070 + src = np.ma.array(ones(3, dt1), fill_value=1) + dst = src.astype(dt2) + + assert_(src.fill_value == 1) + assert_(src.dtype == dt1) + assert_(src.fill_value.dtype == dt1) + + assert_(dst.fill_value == 1) + assert_(dst.dtype == dt2) + assert_(dst.fill_value.dtype == dt2) + + assert_equal(src, dst) + + +def test_fieldless_void(): + dt = np.dtype([]) # a void dtype with no fields + x = np.empty(4, dt) + + # these arrays contain no values, so there's little to test - but this + # shouldn't crash + mx = np.ma.array(x) + assert_equal(mx.dtype, x.dtype) + assert_equal(mx.shape, x.shape) + + mx = np.ma.array(x, mask=x) + assert_equal(mx.dtype, x.dtype) + assert_equal(mx.shape, x.shape) + + +def test_mask_shape_assignment_does_not_break_masked(): + a = np.ma.masked + b = np.ma.array(1, mask=a.mask) + b.shape = (1,) + assert_equal(a.mask.shape, ()) + + +@pytest.mark.skipif(sys.flags.optimize > 1, + reason="no docstrings present to inspect when PYTHONOPTIMIZE/Py_OptimizeFlag > 1") # noqa: E501 +def test_doc_note(): + def method(self): + """This docstring + + Has multiple lines + + And notes + + Notes + ----- + original note + """ + pass + + expected_doc = """This docstring + +Has multiple lines + +And notes + +Notes +----- +note + +original note""" + + assert_equal(np.ma.core.doc_note(method.__doc__, "note"), expected_doc) + + +def test_gh_22556(): + source = np.ma.array([0, [0, 1, 2]], dtype=object) + deepcopy = copy.deepcopy(source) + deepcopy[1].append('this should not appear in source') + assert len(source[1]) == 3 + + +def test_gh_21022(): + # testing for absence of reported error + source = np.ma.masked_array(data=[-1, -1], mask=True, dtype=np.float64) + axis = np.array(0) + result = np.prod(source, axis=axis, keepdims=False) + result = np.ma.masked_array(result, + mask=np.ones(result.shape, dtype=np.bool)) + array = np.ma.masked_array(data=-1, mask=True, dtype=np.float64) + copy.deepcopy(array) + copy.deepcopy(result) + + +def test_deepcopy_2d_obj(): + source = np.ma.array([[0, "dog"], + [1, 1], + [[1, 2], "cat"]], + mask=[[0, 1], + [0, 0], + [0, 0]], + dtype=object) + deepcopy = copy.deepcopy(source) + deepcopy[2, 0].extend(['this should not appear in source', 3]) + assert len(source[2, 0]) == 2 + assert len(deepcopy[2, 0]) == 4 + assert_equal(deepcopy._mask, source._mask) + deepcopy._mask[0, 0] = 1 + assert source._mask[0, 0] == 0 + + +def test_deepcopy_0d_obj(): + source = np.ma.array(0, mask=[0], dtype=object) + deepcopy = copy.deepcopy(source) + deepcopy[...] = 17 + assert_equal(source, 0) + assert_equal(deepcopy, 17) + + +def test_uint_fill_value_and_filled(): + # See also gh-27269 + a = np.ma.MaskedArray([1, 1], [True, False], dtype="uint16") + # the fill value should likely not be 99999, but for now guarantee it: + assert a.fill_value == 999999 + # However, it's type is uint: + assert a.fill_value.dtype.kind == "u" + # And this ensures things like filled work: + np.testing.assert_array_equal( + a.filled(), np.array([999999, 1]).astype("uint16"), strict=True) + + +@pytest.mark.parametrize( + ('fn', 'signature'), + [ + (np.ma.nonzero, "(a)"), + (np.ma.anomalies, "(a, axis=None, dtype=None)"), + (np.ma.cumsum, "(a, axis=None, dtype=None, out=None)"), + (np.ma.compress, "(condition, a, axis=None, out=None)"), + ] +) +def test_frommethod_signature(fn, signature): + assert str(inspect.signature(fn)) == signature + + +@pytest.mark.parametrize( + ('fn', 'signature'), + [ + ( + np.ma.empty, + ( + "(shape, dtype=None, order='C', *, device=None, like=None, " + "fill_value=None, hardmask=False)" + ), + ), + ( + np.ma.empty_like, + ( + "(prototype, /, dtype=None, order='K', subok=True, shape=None, *, " + "device=None)" + ), + ), + (np.ma.squeeze, "(a, axis=None, *, fill_value=None, hardmask=False)"), + ( + np.ma.identity, + "(n, dtype=None, *, like=None, fill_value=None, hardmask=False)", + ), + ] +) +def test_convert2ma_signature(fn, signature): + assert str(inspect.signature(fn)) == signature + assert fn.__module__ == 'numpy.ma.core' diff --git a/local_packages/numpy/ma/tests/test_deprecations.py b/local_packages/numpy/ma/tests/test_deprecations.py new file mode 100644 index 0000000..07120b1 --- /dev/null +++ b/local_packages/numpy/ma/tests/test_deprecations.py @@ -0,0 +1,65 @@ +"""Test deprecation and future warnings. + +""" +import pytest + +import numpy as np +from numpy.ma.core import MaskedArrayFutureWarning +from numpy.ma.testutils import assert_equal + + +class TestArgsort: + """ gh-8701 """ + def _test_base(self, argsort, cls): + arr_0d = np.array(1).view(cls) + argsort(arr_0d) + + arr_1d = np.array([1, 2, 3]).view(cls) + argsort(arr_1d) + + # argsort has a bad default for >1d arrays + arr_2d = np.array([[1, 2], [3, 4]]).view(cls) + result = pytest.warns( + np.ma.core.MaskedArrayFutureWarning, argsort, arr_2d) + assert_equal(result, argsort(arr_2d, axis=None)) + + # should be no warnings for explicitly specifying it + argsort(arr_2d, axis=None) + argsort(arr_2d, axis=-1) + + def test_function_ndarray(self): + return self._test_base(np.ma.argsort, np.ndarray) + + def test_function_maskedarray(self): + return self._test_base(np.ma.argsort, np.ma.MaskedArray) + + def test_method(self): + return self._test_base(np.ma.MaskedArray.argsort, np.ma.MaskedArray) + + +class TestMinimumMaximum: + + def test_axis_default(self): + # NumPy 1.13, 2017-05-06 + + data1d = np.ma.arange(6) + data2d = data1d.reshape(2, 3) + + ma_min = np.ma.minimum.reduce + ma_max = np.ma.maximum.reduce + + # check that the default axis is still None, but warns on 2d arrays + result = pytest.warns(MaskedArrayFutureWarning, ma_max, data2d) + assert_equal(result, ma_max(data2d, axis=None)) + + result = pytest.warns(MaskedArrayFutureWarning, ma_min, data2d) + assert_equal(result, ma_min(data2d, axis=None)) + + # no warnings on 1d, as both new and old defaults are equivalent + result = ma_min(data1d) + assert_equal(result, ma_min(data1d, axis=None)) + assert_equal(result, ma_min(data1d, axis=0)) + + result = ma_max(data1d) + assert_equal(result, ma_max(data1d, axis=None)) + assert_equal(result, ma_max(data1d, axis=0)) diff --git a/local_packages/numpy/ma/tests/test_extras.py b/local_packages/numpy/ma/tests/test_extras.py new file mode 100644 index 0000000..1993ffe --- /dev/null +++ b/local_packages/numpy/ma/tests/test_extras.py @@ -0,0 +1,1945 @@ +"""Tests suite for MaskedArray. +Adapted from the original test_ma by Pierre Gerard-Marchant + +:author: Pierre Gerard-Marchant +:contact: pierregm_at_uga_dot_edu + +""" +import inspect +import itertools + +import pytest + +import numpy as np +from numpy._core.numeric import normalize_axis_tuple +from numpy.ma.core import ( + MaskedArray, + arange, + array, + count, + getmaskarray, + masked, + masked_array, + nomask, + ones, + shape, + zeros, +) +from numpy.ma.extras import ( + _covhelper, + apply_along_axis, + apply_over_axes, + atleast_1d, + atleast_2d, + atleast_3d, + average, + clump_masked, + clump_unmasked, + compress_nd, + compress_rowcols, + corrcoef, + cov, + diagflat, + dot, + ediff1d, + flatnotmasked_contiguous, + in1d, + intersect1d, + isin, + mask_rowcols, + masked_all, + masked_all_like, + median, + mr_, + ndenumerate, + notmasked_contiguous, + notmasked_edges, + polyfit, + setdiff1d, + setxor1d, + stack, + union1d, + unique, + vstack, +) +from numpy.ma.testutils import ( + assert_, + assert_almost_equal, + assert_array_equal, + assert_equal, +) + + +class TestGeneric: + # + def test_masked_all(self): + # Tests masked_all + # Standard dtype + test = masked_all((2,), dtype=float) + control = array([1, 1], mask=[1, 1], dtype=float) + assert_equal(test, control) + # Flexible dtype + dt = np.dtype({'names': ['a', 'b'], 'formats': ['f', 'f']}) + test = masked_all((2,), dtype=dt) + control = array([(0, 0), (0, 0)], mask=[(1, 1), (1, 1)], dtype=dt) + assert_equal(test, control) + test = masked_all((2, 2), dtype=dt) + control = array([[(0, 0), (0, 0)], [(0, 0), (0, 0)]], + mask=[[(1, 1), (1, 1)], [(1, 1), (1, 1)]], + dtype=dt) + assert_equal(test, control) + # Nested dtype + dt = np.dtype([('a', 'f'), ('b', [('ba', 'f'), ('bb', 'f')])]) + test = masked_all((2,), dtype=dt) + control = array([(1, (1, 1)), (1, (1, 1))], + mask=[(1, (1, 1)), (1, (1, 1))], dtype=dt) + assert_equal(test, control) + test = masked_all((2,), dtype=dt) + control = array([(1, (1, 1)), (1, (1, 1))], + mask=[(1, (1, 1)), (1, (1, 1))], dtype=dt) + assert_equal(test, control) + test = masked_all((1, 1), dtype=dt) + control = array([[(1, (1, 1))]], mask=[[(1, (1, 1))]], dtype=dt) + assert_equal(test, control) + + def test_masked_all_with_object_nested(self): + # Test masked_all works with nested array with dtype of an 'object' + # refers to issue #15895 + my_dtype = np.dtype([('b', ([('c', object)], (1,)))]) + masked_arr = np.ma.masked_all((1,), my_dtype) + + assert_equal(type(masked_arr['b']), np.ma.core.MaskedArray) + assert_equal(type(masked_arr['b']['c']), np.ma.core.MaskedArray) + assert_equal(len(masked_arr['b']['c']), 1) + assert_equal(masked_arr['b']['c'].shape, (1, 1)) + assert_equal(masked_arr['b']['c']._fill_value.shape, ()) + + def test_masked_all_with_object(self): + # same as above except that the array is not nested + my_dtype = np.dtype([('b', (object, (1,)))]) + masked_arr = np.ma.masked_all((1,), my_dtype) + + assert_equal(type(masked_arr['b']), np.ma.core.MaskedArray) + assert_equal(len(masked_arr['b']), 1) + assert_equal(masked_arr['b'].shape, (1, 1)) + assert_equal(masked_arr['b']._fill_value.shape, ()) + + def test_masked_all_like(self): + # Tests masked_all + # Standard dtype + base = array([1, 2], dtype=float) + test = masked_all_like(base) + control = array([1, 1], mask=[1, 1], dtype=float) + assert_equal(test, control) + # Flexible dtype + dt = np.dtype({'names': ['a', 'b'], 'formats': ['f', 'f']}) + base = array([(0, 0), (0, 0)], mask=[(1, 1), (1, 1)], dtype=dt) + test = masked_all_like(base) + control = array([(10, 10), (10, 10)], mask=[(1, 1), (1, 1)], dtype=dt) + assert_equal(test, control) + # Nested dtype + dt = np.dtype([('a', 'f'), ('b', [('ba', 'f'), ('bb', 'f')])]) + control = array([(1, (1, 1)), (1, (1, 1))], + mask=[(1, (1, 1)), (1, (1, 1))], dtype=dt) + test = masked_all_like(control) + assert_equal(test, control) + + def check_clump(self, f): + for i in range(1, 7): + for j in range(2**i): + k = np.arange(i, dtype=int) + ja = np.full(i, j, dtype=int) + a = masked_array(2**k) + a.mask = (ja & (2**k)) != 0 + s = 0 + for sl in f(a): + s += a.data[sl].sum() + if f == clump_unmasked: + assert_equal(a.compressed().sum(), s) + else: + a.mask = ~a.mask + assert_equal(a.compressed().sum(), s) + + def test_clump_masked(self): + # Test clump_masked + a = masked_array(np.arange(10)) + a[[0, 1, 2, 6, 8, 9]] = masked + # + test = clump_masked(a) + control = [slice(0, 3), slice(6, 7), slice(8, 10)] + assert_equal(test, control) + + self.check_clump(clump_masked) + + def test_clump_unmasked(self): + # Test clump_unmasked + a = masked_array(np.arange(10)) + a[[0, 1, 2, 6, 8, 9]] = masked + test = clump_unmasked(a) + control = [slice(3, 6), slice(7, 8), ] + assert_equal(test, control) + + self.check_clump(clump_unmasked) + + def test_flatnotmasked_contiguous(self): + # Test flatnotmasked_contiguous + a = arange(10) + # No mask + test = flatnotmasked_contiguous(a) + assert_equal(test, [slice(0, a.size)]) + # mask of all false + a.mask = np.zeros(10, dtype=bool) + assert_equal(test, [slice(0, a.size)]) + # Some mask + a[(a < 3) | (a > 8) | (a == 5)] = masked + test = flatnotmasked_contiguous(a) + assert_equal(test, [slice(3, 5), slice(6, 9)]) + # + a[:] = masked + test = flatnotmasked_contiguous(a) + assert_equal(test, []) + + +class TestAverage: + # Several tests of average. Why so many ? Good point... + def test_testAverage1(self): + # Test of average. + ott = array([0., 1., 2., 3.], mask=[True, False, False, False]) + assert_equal(2.0, average(ott, axis=0)) + assert_equal(2.0, average(ott, weights=[1., 1., 2., 1.])) + result, wts = average(ott, weights=[1., 1., 2., 1.], returned=True) + assert_equal(2.0, result) + assert_(wts == 4.0) + ott[:] = masked + assert_equal(average(ott, axis=0).mask, [True]) + ott = array([0., 1., 2., 3.], mask=[True, False, False, False]) + ott = ott.reshape(2, 2) + ott[:, 1] = masked + assert_equal(average(ott, axis=0), [2.0, 0.0]) + assert_equal(average(ott, axis=1).mask[0], [True]) + assert_equal([2., 0.], average(ott, axis=0)) + result, wts = average(ott, axis=0, returned=True) + assert_equal(wts, [1., 0.]) + + def test_testAverage2(self): + # More tests of average. + w1 = [0, 1, 1, 1, 1, 0] + w2 = [[0, 1, 1, 1, 1, 0], [1, 0, 0, 0, 0, 1]] + x = arange(6, dtype=np.float64) + assert_equal(average(x, axis=0), 2.5) + assert_equal(average(x, axis=0, weights=w1), 2.5) + y = array([arange(6, dtype=np.float64), 2.0 * arange(6)]) + assert_equal(average(y, None), np.add.reduce(np.arange(6)) * 3. / 12.) + assert_equal(average(y, axis=0), np.arange(6) * 3. / 2.) + assert_equal(average(y, axis=1), + [average(x, axis=0), average(x, axis=0) * 2.0]) + assert_equal(average(y, None, weights=w2), 20. / 6.) + assert_equal(average(y, axis=0, weights=w2), + [0., 1., 2., 3., 4., 10.]) + assert_equal(average(y, axis=1), + [average(x, axis=0), average(x, axis=0) * 2.0]) + m1 = zeros(6) + m2 = [0, 0, 1, 1, 0, 0] + m3 = [[0, 0, 1, 1, 0, 0], [0, 1, 1, 1, 1, 0]] + m4 = ones(6) + m5 = [0, 1, 1, 1, 1, 1] + assert_equal(average(masked_array(x, m1), axis=0), 2.5) + assert_equal(average(masked_array(x, m2), axis=0), 2.5) + assert_equal(average(masked_array(x, m4), axis=0).mask, [True]) + assert_equal(average(masked_array(x, m5), axis=0), 0.0) + assert_equal(count(average(masked_array(x, m4), axis=0)), 0) + z = masked_array(y, m3) + assert_equal(average(z, None), 20. / 6.) + assert_equal(average(z, axis=0), [0., 1., 99., 99., 4.0, 7.5]) + assert_equal(average(z, axis=1), [2.5, 5.0]) + assert_equal(average(z, axis=0, weights=w2), + [0., 1., 99., 99., 4.0, 10.0]) + + def test_testAverage3(self): + # Yet more tests of average! + a = arange(6) + b = arange(6) * 3 + r1, w1 = average([[a, b], [b, a]], axis=1, returned=True) + assert_equal(shape(r1), shape(w1)) + assert_equal(r1.shape, w1.shape) + r2, w2 = average(ones((2, 2, 3)), axis=0, weights=[3, 1], returned=True) + assert_equal(shape(w2), shape(r2)) + r2, w2 = average(ones((2, 2, 3)), returned=True) + assert_equal(shape(w2), shape(r2)) + r2, w2 = average(ones((2, 2, 3)), weights=ones((2, 2, 3)), returned=True) + assert_equal(shape(w2), shape(r2)) + a2d = array([[1, 2], [0, 4]], float) + a2dm = masked_array(a2d, [[False, False], [True, False]]) + a2da = average(a2d, axis=0) + assert_equal(a2da, [0.5, 3.0]) + a2dma = average(a2dm, axis=0) + assert_equal(a2dma, [1.0, 3.0]) + a2dma = average(a2dm, axis=None) + assert_equal(a2dma, 7. / 3.) + a2dma = average(a2dm, axis=1) + assert_equal(a2dma, [1.5, 4.0]) + + def test_testAverage4(self): + # Test that `keepdims` works with average + x = np.array([2, 3, 4]).reshape(3, 1) + b = np.ma.array(x, mask=[[False], [False], [True]]) + w = np.array([4, 5, 6]).reshape(3, 1) + actual = average(b, weights=w, axis=1, keepdims=True) + desired = masked_array([[2.], [3.], [4.]], [[False], [False], [True]]) + assert_equal(actual, desired) + + def test_weight_and_input_dims_different(self): + # this test mirrors a test for np.average() + # in lib/test/test_function_base.py + y = np.arange(12).reshape(2, 2, 3) + w = np.array([0., 0., 1., .5, .5, 0., 0., .5, .5, 1., 0., 0.])\ + .reshape(2, 2, 3) + + m = np.full((2, 2, 3), False) + yma = np.ma.array(y, mask=m) + subw0 = w[:, :, 0] + + actual = average(yma, axis=(0, 1), weights=subw0) + desired = masked_array([7., 8., 9.], mask=[False, False, False]) + assert_almost_equal(actual, desired) + + m = np.full((2, 2, 3), False) + m[:, :, 0] = True + m[0, 0, 1] = True + yma = np.ma.array(y, mask=m) + actual = average(yma, axis=(0, 1), weights=subw0) + desired = masked_array( + [np.nan, 8., 9.], + mask=[True, False, False]) + assert_almost_equal(actual, desired) + + m = np.full((2, 2, 3), False) + yma = np.ma.array(y, mask=m) + + subw1 = w[1, :, :] + actual = average(yma, axis=(1, 2), weights=subw1) + desired = masked_array([2.25, 8.25], mask=[False, False]) + assert_almost_equal(actual, desired) + + # here the weights have the wrong shape for the specified axes + with pytest.raises( + ValueError, + match="Shape of weights must be consistent with " + "shape of a along specified axis"): + average(yma, axis=(0, 1, 2), weights=subw0) + + with pytest.raises( + ValueError, + match="Shape of weights must be consistent with " + "shape of a along specified axis"): + average(yma, axis=(0, 1), weights=subw1) + + # swapping the axes should be same as transposing weights + actual = average(yma, axis=(1, 0), weights=subw0) + desired = average(yma, axis=(0, 1), weights=subw0.T) + assert_almost_equal(actual, desired) + + def test_onintegers_with_mask(self): + # Test average on integers with mask + a = average(array([1, 2])) + assert_equal(a, 1.5) + a = average(array([1, 2, 3, 4], mask=[False, False, True, True])) + assert_equal(a, 1.5) + + def test_complex(self): + # Test with complex data. + # (Regression test for https://github.com/numpy/numpy/issues/2684) + mask = np.array([[0, 0, 0, 1, 0], + [0, 1, 0, 0, 0]], dtype=bool) + a = masked_array([[0, 1 + 2j, 3 + 4j, 5 + 6j, 7 + 8j], + [9j, 0 + 1j, 2 + 3j, 4 + 5j, 7 + 7j]], + mask=mask) + + av = average(a) + expected = np.average(a.compressed()) + assert_almost_equal(av.real, expected.real) + assert_almost_equal(av.imag, expected.imag) + + av0 = average(a, axis=0) + expected0 = average(a.real, axis=0) + average(a.imag, axis=0) * 1j + assert_almost_equal(av0.real, expected0.real) + assert_almost_equal(av0.imag, expected0.imag) + + av1 = average(a, axis=1) + expected1 = average(a.real, axis=1) + average(a.imag, axis=1) * 1j + assert_almost_equal(av1.real, expected1.real) + assert_almost_equal(av1.imag, expected1.imag) + + # Test with the 'weights' argument. + wts = np.array([[0.5, 1.0, 2.0, 1.0, 0.5], + [1.0, 1.0, 1.0, 1.0, 1.0]]) + wav = average(a, weights=wts) + expected = np.average(a.compressed(), weights=wts[~mask]) + assert_almost_equal(wav.real, expected.real) + assert_almost_equal(wav.imag, expected.imag) + + wav0 = average(a, weights=wts, axis=0) + expected0 = (average(a.real, weights=wts, axis=0) + + average(a.imag, weights=wts, axis=0) * 1j) + assert_almost_equal(wav0.real, expected0.real) + assert_almost_equal(wav0.imag, expected0.imag) + + wav1 = average(a, weights=wts, axis=1) + expected1 = (average(a.real, weights=wts, axis=1) + + average(a.imag, weights=wts, axis=1) * 1j) + assert_almost_equal(wav1.real, expected1.real) + assert_almost_equal(wav1.imag, expected1.imag) + + @pytest.mark.parametrize( + 'x, axis, expected_avg, weights, expected_wavg, expected_wsum', + [([1, 2, 3], None, [2.0], [3, 4, 1], [1.75], [8.0]), + ([[1, 2, 5], [1, 6, 11]], 0, [[1.0, 4.0, 8.0]], + [1, 3], [[1.0, 5.0, 9.5]], [[4, 4, 4]])], + ) + def test_basic_keepdims(self, x, axis, expected_avg, + weights, expected_wavg, expected_wsum): + avg = np.ma.average(x, axis=axis, keepdims=True) + assert avg.shape == np.shape(expected_avg) + assert_array_equal(avg, expected_avg) + + wavg = np.ma.average(x, axis=axis, weights=weights, keepdims=True) + assert wavg.shape == np.shape(expected_wavg) + assert_array_equal(wavg, expected_wavg) + + wavg, wsum = np.ma.average(x, axis=axis, weights=weights, + returned=True, keepdims=True) + assert wavg.shape == np.shape(expected_wavg) + assert_array_equal(wavg, expected_wavg) + assert wsum.shape == np.shape(expected_wsum) + assert_array_equal(wsum, expected_wsum) + + def test_masked_weights(self): + # Test with masked weights. + # (Regression test for https://github.com/numpy/numpy/issues/10438) + a = np.ma.array(np.arange(9).reshape(3, 3), + mask=[[1, 0, 0], [1, 0, 0], [0, 0, 0]]) + weights_unmasked = masked_array([5, 28, 31], mask=False) + weights_masked = masked_array([5, 28, 31], mask=[1, 0, 0]) + + avg_unmasked = average(a, axis=0, + weights=weights_unmasked, returned=False) + expected_unmasked = np.array([6.0, 5.21875, 6.21875]) + assert_almost_equal(avg_unmasked, expected_unmasked) + + avg_masked = average(a, axis=0, weights=weights_masked, returned=False) + expected_masked = np.array([6.0, 5.576271186440678, 6.576271186440678]) + assert_almost_equal(avg_masked, expected_masked) + + # weights should be masked if needed + # depending on the array mask. This is to avoid summing + # masked nan or other values that are not cancelled by a zero + a = np.ma.array([1.0, 2.0, 3.0, 4.0], + mask=[False, False, True, True]) + avg_unmasked = average(a, weights=[1, 1, 1, np.nan]) + + assert_almost_equal(avg_unmasked, 1.5) + + a = np.ma.array([ + [1.0, 2.0, 3.0, 4.0], + [5.0, 6.0, 7.0, 8.0], + [9.0, 1.0, 2.0, 3.0], + ], mask=[ + [False, True, True, False], + [True, False, True, True], + [True, False, True, False], + ]) + + avg_masked = np.ma.average(a, weights=[1, np.nan, 1], axis=0) + avg_expected = np.ma.array([1.0, np.nan, np.nan, 3.5], + mask=[False, True, True, False]) + + assert_almost_equal(avg_masked, avg_expected) + assert_equal(avg_masked.mask, avg_expected.mask) + + +class TestConcatenator: + # Tests for mr_, the equivalent of r_ for masked arrays. + + def test_1d(self): + # Tests mr_ on 1D arrays. + assert_array_equal(mr_[1, 2, 3, 4, 5, 6], array([1, 2, 3, 4, 5, 6])) + b = ones(5) + m = [1, 0, 0, 0, 0] + d = masked_array(b, mask=m) + c = mr_[d, 0, 0, d] + assert_(isinstance(c, MaskedArray)) + assert_array_equal(c, [1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1]) + assert_array_equal(c.mask, mr_[m, 0, 0, m]) + + def test_2d(self): + # Tests mr_ on 2D arrays. + a_1 = np.random.rand(5, 5) + a_2 = np.random.rand(5, 5) + m_1 = np.round(np.random.rand(5, 5), 0) + m_2 = np.round(np.random.rand(5, 5), 0) + b_1 = masked_array(a_1, mask=m_1) + b_2 = masked_array(a_2, mask=m_2) + # append columns + d = mr_['1', b_1, b_2] + assert_(d.shape == (5, 10)) + assert_array_equal(d[:, :5], b_1) + assert_array_equal(d[:, 5:], b_2) + assert_array_equal(d.mask, np.r_['1', m_1, m_2]) + d = mr_[b_1, b_2] + assert_(d.shape == (10, 5)) + assert_array_equal(d[:5, :], b_1) + assert_array_equal(d[5:, :], b_2) + assert_array_equal(d.mask, np.r_[m_1, m_2]) + + def test_masked_constant(self): + actual = mr_[np.ma.masked, 1] + assert_equal(actual.mask, [True, False]) + assert_equal(actual.data[1], 1) + + actual = mr_[[1, 2], np.ma.masked] + assert_equal(actual.mask, [False, False, True]) + assert_equal(actual.data[:2], [1, 2]) + + +class TestNotMasked: + # Tests notmasked_edges and notmasked_contiguous. + + def test_edges(self): + # Tests unmasked_edges + data = masked_array(np.arange(25).reshape(5, 5), + mask=[[0, 0, 1, 0, 0], + [0, 0, 0, 1, 1], + [1, 1, 0, 0, 0], + [0, 0, 0, 0, 0], + [1, 1, 1, 0, 0]],) + test = notmasked_edges(data, None) + assert_equal(test, [0, 24]) + test = notmasked_edges(data, 0) + assert_equal(test[0], [(0, 0, 1, 0, 0), (0, 1, 2, 3, 4)]) + assert_equal(test[1], [(3, 3, 3, 4, 4), (0, 1, 2, 3, 4)]) + test = notmasked_edges(data, 1) + assert_equal(test[0], [(0, 1, 2, 3, 4), (0, 0, 2, 0, 3)]) + assert_equal(test[1], [(0, 1, 2, 3, 4), (4, 2, 4, 4, 4)]) + # + test = notmasked_edges(data.data, None) + assert_equal(test, [0, 24]) + test = notmasked_edges(data.data, 0) + assert_equal(test[0], [(0, 0, 0, 0, 0), (0, 1, 2, 3, 4)]) + assert_equal(test[1], [(4, 4, 4, 4, 4), (0, 1, 2, 3, 4)]) + test = notmasked_edges(data.data, -1) + assert_equal(test[0], [(0, 1, 2, 3, 4), (0, 0, 0, 0, 0)]) + assert_equal(test[1], [(0, 1, 2, 3, 4), (4, 4, 4, 4, 4)]) + # + data[-2] = masked + test = notmasked_edges(data, 0) + assert_equal(test[0], [(0, 0, 1, 0, 0), (0, 1, 2, 3, 4)]) + assert_equal(test[1], [(1, 1, 2, 4, 4), (0, 1, 2, 3, 4)]) + test = notmasked_edges(data, -1) + assert_equal(test[0], [(0, 1, 2, 4), (0, 0, 2, 3)]) + assert_equal(test[1], [(0, 1, 2, 4), (4, 2, 4, 4)]) + + def test_contiguous(self): + # Tests notmasked_contiguous + a = masked_array(np.arange(24).reshape(3, 8), + mask=[[0, 0, 0, 0, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1, 1, 1], + [0, 0, 0, 0, 0, 0, 1, 0]]) + tmp = notmasked_contiguous(a, None) + assert_equal(tmp, [ + slice(0, 4, None), + slice(16, 22, None), + slice(23, 24, None) + ]) + + tmp = notmasked_contiguous(a, 0) + assert_equal(tmp, [ + [slice(0, 1, None), slice(2, 3, None)], + [slice(0, 1, None), slice(2, 3, None)], + [slice(0, 1, None), slice(2, 3, None)], + [slice(0, 1, None), slice(2, 3, None)], + [slice(2, 3, None)], + [slice(2, 3, None)], + [], + [slice(2, 3, None)] + ]) + # + tmp = notmasked_contiguous(a, 1) + assert_equal(tmp, [ + [slice(0, 4, None)], + [], + [slice(0, 6, None), slice(7, 8, None)] + ]) + + +class TestCompressFunctions: + + def test_compress_nd(self): + # Tests compress_nd + x = np.array(list(range(3 * 4 * 5))).reshape(3, 4, 5) + m = np.zeros((3, 4, 5)).astype(bool) + m[1, 1, 1] = True + x = array(x, mask=m) + + # axis=None + a = compress_nd(x) + assert_equal(a, [[[ 0, 2, 3, 4], + [10, 12, 13, 14], + [15, 17, 18, 19]], + [[40, 42, 43, 44], + [50, 52, 53, 54], + [55, 57, 58, 59]]]) + + # axis=0 + a = compress_nd(x, 0) + assert_equal(a, [[[ 0, 1, 2, 3, 4], + [ 5, 6, 7, 8, 9], + [10, 11, 12, 13, 14], + [15, 16, 17, 18, 19]], + [[40, 41, 42, 43, 44], + [45, 46, 47, 48, 49], + [50, 51, 52, 53, 54], + [55, 56, 57, 58, 59]]]) + + # axis=1 + a = compress_nd(x, 1) + assert_equal(a, [[[ 0, 1, 2, 3, 4], + [10, 11, 12, 13, 14], + [15, 16, 17, 18, 19]], + [[20, 21, 22, 23, 24], + [30, 31, 32, 33, 34], + [35, 36, 37, 38, 39]], + [[40, 41, 42, 43, 44], + [50, 51, 52, 53, 54], + [55, 56, 57, 58, 59]]]) + + a2 = compress_nd(x, (1,)) + a3 = compress_nd(x, -2) + a4 = compress_nd(x, (-2,)) + assert_equal(a, a2) + assert_equal(a, a3) + assert_equal(a, a4) + + # axis=2 + a = compress_nd(x, 2) + assert_equal(a, [[[ 0, 2, 3, 4], + [ 5, 7, 8, 9], + [10, 12, 13, 14], + [15, 17, 18, 19]], + [[20, 22, 23, 24], + [25, 27, 28, 29], + [30, 32, 33, 34], + [35, 37, 38, 39]], + [[40, 42, 43, 44], + [45, 47, 48, 49], + [50, 52, 53, 54], + [55, 57, 58, 59]]]) + + a2 = compress_nd(x, (2,)) + a3 = compress_nd(x, -1) + a4 = compress_nd(x, (-1,)) + assert_equal(a, a2) + assert_equal(a, a3) + assert_equal(a, a4) + + # axis=(0, 1) + a = compress_nd(x, (0, 1)) + assert_equal(a, [[[ 0, 1, 2, 3, 4], + [10, 11, 12, 13, 14], + [15, 16, 17, 18, 19]], + [[40, 41, 42, 43, 44], + [50, 51, 52, 53, 54], + [55, 56, 57, 58, 59]]]) + a2 = compress_nd(x, (0, -2)) + assert_equal(a, a2) + + # axis=(1, 2) + a = compress_nd(x, (1, 2)) + assert_equal(a, [[[ 0, 2, 3, 4], + [10, 12, 13, 14], + [15, 17, 18, 19]], + [[20, 22, 23, 24], + [30, 32, 33, 34], + [35, 37, 38, 39]], + [[40, 42, 43, 44], + [50, 52, 53, 54], + [55, 57, 58, 59]]]) + + a2 = compress_nd(x, (-2, 2)) + a3 = compress_nd(x, (1, -1)) + a4 = compress_nd(x, (-2, -1)) + assert_equal(a, a2) + assert_equal(a, a3) + assert_equal(a, a4) + + # axis=(0, 2) + a = compress_nd(x, (0, 2)) + assert_equal(a, [[[ 0, 2, 3, 4], + [ 5, 7, 8, 9], + [10, 12, 13, 14], + [15, 17, 18, 19]], + [[40, 42, 43, 44], + [45, 47, 48, 49], + [50, 52, 53, 54], + [55, 57, 58, 59]]]) + + a2 = compress_nd(x, (0, -1)) + assert_equal(a, a2) + + def test_compress_rowcols(self): + # Tests compress_rowcols + x = array(np.arange(9).reshape(3, 3), + mask=[[1, 0, 0], [0, 0, 0], [0, 0, 0]]) + assert_equal(compress_rowcols(x), [[4, 5], [7, 8]]) + assert_equal(compress_rowcols(x, 0), [[3, 4, 5], [6, 7, 8]]) + assert_equal(compress_rowcols(x, 1), [[1, 2], [4, 5], [7, 8]]) + x = array(x._data, mask=[[0, 0, 0], [0, 1, 0], [0, 0, 0]]) + assert_equal(compress_rowcols(x), [[0, 2], [6, 8]]) + assert_equal(compress_rowcols(x, 0), [[0, 1, 2], [6, 7, 8]]) + assert_equal(compress_rowcols(x, 1), [[0, 2], [3, 5], [6, 8]]) + x = array(x._data, mask=[[1, 0, 0], [0, 1, 0], [0, 0, 0]]) + assert_equal(compress_rowcols(x), [[8]]) + assert_equal(compress_rowcols(x, 0), [[6, 7, 8]]) + assert_equal(compress_rowcols(x, 1,), [[2], [5], [8]]) + x = array(x._data, mask=[[1, 0, 0], [0, 1, 0], [0, 0, 1]]) + assert_equal(compress_rowcols(x).size, 0) + assert_equal(compress_rowcols(x, 0).size, 0) + assert_equal(compress_rowcols(x, 1).size, 0) + + def test_mask_rowcols(self): + # Tests mask_rowcols. + x = array(np.arange(9).reshape(3, 3), + mask=[[1, 0, 0], [0, 0, 0], [0, 0, 0]]) + assert_equal(mask_rowcols(x).mask, + [[1, 1, 1], [1, 0, 0], [1, 0, 0]]) + assert_equal(mask_rowcols(x, 0).mask, + [[1, 1, 1], [0, 0, 0], [0, 0, 0]]) + assert_equal(mask_rowcols(x, 1).mask, + [[1, 0, 0], [1, 0, 0], [1, 0, 0]]) + x = array(x._data, mask=[[0, 0, 0], [0, 1, 0], [0, 0, 0]]) + assert_equal(mask_rowcols(x).mask, + [[0, 1, 0], [1, 1, 1], [0, 1, 0]]) + assert_equal(mask_rowcols(x, 0).mask, + [[0, 0, 0], [1, 1, 1], [0, 0, 0]]) + assert_equal(mask_rowcols(x, 1).mask, + [[0, 1, 0], [0, 1, 0], [0, 1, 0]]) + x = array(x._data, mask=[[1, 0, 0], [0, 1, 0], [0, 0, 0]]) + assert_equal(mask_rowcols(x).mask, + [[1, 1, 1], [1, 1, 1], [1, 1, 0]]) + assert_equal(mask_rowcols(x, 0).mask, + [[1, 1, 1], [1, 1, 1], [0, 0, 0]]) + assert_equal(mask_rowcols(x, 1,).mask, + [[1, 1, 0], [1, 1, 0], [1, 1, 0]]) + x = array(x._data, mask=[[1, 0, 0], [0, 1, 0], [0, 0, 1]]) + assert_(mask_rowcols(x).all() is masked) + assert_(mask_rowcols(x, 0).all() is masked) + assert_(mask_rowcols(x, 1).all() is masked) + assert_(mask_rowcols(x).mask.all()) + assert_(mask_rowcols(x, 0).mask.all()) + assert_(mask_rowcols(x, 1).mask.all()) + + @pytest.mark.parametrize("axis", [None, 0, 1]) + @pytest.mark.parametrize(["func", "rowcols_axis"], + [(np.ma.mask_rows, 0), (np.ma.mask_cols, 1)]) + def test_mask_row_cols_axis_deprecation(self, axis, func, rowcols_axis): + # Test deprecation of the axis argument to `mask_rows` and `mask_cols` + x = array(np.arange(9).reshape(3, 3), + mask=[[1, 0, 0], [0, 0, 0], [0, 0, 0]]) + + with pytest.warns(DeprecationWarning): + res = func(x, axis=axis) + assert_equal(res, mask_rowcols(x, rowcols_axis)) + + def test_dot(self): + # Tests dot product + n = np.arange(1, 7) + # + m = [1, 0, 0, 0, 0, 0] + a = masked_array(n, mask=m).reshape(2, 3) + b = masked_array(n, mask=m).reshape(3, 2) + c = dot(a, b, strict=True) + assert_equal(c.mask, [[1, 1], [1, 0]]) + c = dot(b, a, strict=True) + assert_equal(c.mask, [[1, 1, 1], [1, 0, 0], [1, 0, 0]]) + c = dot(a, b, strict=False) + assert_equal(c, np.dot(a.filled(0), b.filled(0))) + c = dot(b, a, strict=False) + assert_equal(c, np.dot(b.filled(0), a.filled(0))) + # + m = [0, 0, 0, 0, 0, 1] + a = masked_array(n, mask=m).reshape(2, 3) + b = masked_array(n, mask=m).reshape(3, 2) + c = dot(a, b, strict=True) + assert_equal(c.mask, [[0, 1], [1, 1]]) + c = dot(b, a, strict=True) + assert_equal(c.mask, [[0, 0, 1], [0, 0, 1], [1, 1, 1]]) + c = dot(a, b, strict=False) + assert_equal(c, np.dot(a.filled(0), b.filled(0))) + assert_equal(c, dot(a, b)) + c = dot(b, a, strict=False) + assert_equal(c, np.dot(b.filled(0), a.filled(0))) + # + m = [0, 0, 0, 0, 0, 0] + a = masked_array(n, mask=m).reshape(2, 3) + b = masked_array(n, mask=m).reshape(3, 2) + c = dot(a, b) + assert_equal(c.mask, nomask) + c = dot(b, a) + assert_equal(c.mask, nomask) + # + a = masked_array(n, mask=[1, 0, 0, 0, 0, 0]).reshape(2, 3) + b = masked_array(n, mask=[0, 0, 0, 0, 0, 0]).reshape(3, 2) + c = dot(a, b, strict=True) + assert_equal(c.mask, [[1, 1], [0, 0]]) + c = dot(a, b, strict=False) + assert_equal(c, np.dot(a.filled(0), b.filled(0))) + c = dot(b, a, strict=True) + assert_equal(c.mask, [[1, 0, 0], [1, 0, 0], [1, 0, 0]]) + c = dot(b, a, strict=False) + assert_equal(c, np.dot(b.filled(0), a.filled(0))) + # + a = masked_array(n, mask=[0, 0, 0, 0, 0, 1]).reshape(2, 3) + b = masked_array(n, mask=[0, 0, 0, 0, 0, 0]).reshape(3, 2) + c = dot(a, b, strict=True) + assert_equal(c.mask, [[0, 0], [1, 1]]) + c = dot(a, b) + assert_equal(c, np.dot(a.filled(0), b.filled(0))) + c = dot(b, a, strict=True) + assert_equal(c.mask, [[0, 0, 1], [0, 0, 1], [0, 0, 1]]) + c = dot(b, a, strict=False) + assert_equal(c, np.dot(b.filled(0), a.filled(0))) + # + a = masked_array(n, mask=[0, 0, 0, 0, 0, 1]).reshape(2, 3) + b = masked_array(n, mask=[0, 0, 1, 0, 0, 0]).reshape(3, 2) + c = dot(a, b, strict=True) + assert_equal(c.mask, [[1, 0], [1, 1]]) + c = dot(a, b, strict=False) + assert_equal(c, np.dot(a.filled(0), b.filled(0))) + c = dot(b, a, strict=True) + assert_equal(c.mask, [[0, 0, 1], [1, 1, 1], [0, 0, 1]]) + c = dot(b, a, strict=False) + assert_equal(c, np.dot(b.filled(0), a.filled(0))) + # + a = masked_array(np.arange(8).reshape(2, 2, 2), + mask=[[[1, 0], [0, 0]], [[0, 0], [0, 0]]]) + b = masked_array(np.arange(8).reshape(2, 2, 2), + mask=[[[0, 0], [0, 0]], [[0, 0], [0, 1]]]) + c = dot(a, b, strict=True) + assert_equal(c.mask, + [[[[1, 1], [1, 1]], [[0, 0], [0, 1]]], + [[[0, 0], [0, 1]], [[0, 0], [0, 1]]]]) + c = dot(a, b, strict=False) + assert_equal(c.mask, + [[[[0, 0], [0, 1]], [[0, 0], [0, 0]]], + [[[0, 0], [0, 0]], [[0, 0], [0, 0]]]]) + c = dot(b, a, strict=True) + assert_equal(c.mask, + [[[[1, 0], [0, 0]], [[1, 0], [0, 0]]], + [[[1, 0], [0, 0]], [[1, 1], [1, 1]]]]) + c = dot(b, a, strict=False) + assert_equal(c.mask, + [[[[0, 0], [0, 0]], [[0, 0], [0, 0]]], + [[[0, 0], [0, 0]], [[1, 0], [0, 0]]]]) + # + a = masked_array(np.arange(8).reshape(2, 2, 2), + mask=[[[1, 0], [0, 0]], [[0, 0], [0, 0]]]) + b = 5. + c = dot(a, b, strict=True) + assert_equal(c.mask, [[[1, 0], [0, 0]], [[0, 0], [0, 0]]]) + c = dot(a, b, strict=False) + assert_equal(c.mask, [[[1, 0], [0, 0]], [[0, 0], [0, 0]]]) + c = dot(b, a, strict=True) + assert_equal(c.mask, [[[1, 0], [0, 0]], [[0, 0], [0, 0]]]) + c = dot(b, a, strict=False) + assert_equal(c.mask, [[[1, 0], [0, 0]], [[0, 0], [0, 0]]]) + # + a = masked_array(np.arange(8).reshape(2, 2, 2), + mask=[[[1, 0], [0, 0]], [[0, 0], [0, 0]]]) + b = masked_array(np.arange(2), mask=[0, 1]) + c = dot(a, b, strict=True) + assert_equal(c.mask, [[1, 1], [1, 1]]) + c = dot(a, b, strict=False) + assert_equal(c.mask, [[1, 0], [0, 0]]) + + def test_dot_returns_maskedarray(self): + # See gh-6611 + a = np.eye(3) + b = array(a) + assert_(type(dot(a, a)) is MaskedArray) + assert_(type(dot(a, b)) is MaskedArray) + assert_(type(dot(b, a)) is MaskedArray) + assert_(type(dot(b, b)) is MaskedArray) + + def test_dot_out(self): + a = array(np.eye(3)) + out = array(np.zeros((3, 3))) + res = dot(a, a, out=out) + assert_(res is out) + assert_equal(a, res) + + +class TestApplyAlongAxis: + # Tests 2D functions + def test_3d(self): + a = arange(12.).reshape(2, 2, 3) + + def myfunc(b): + return b[1] + + xa = apply_along_axis(myfunc, 2, a) + assert_equal(xa, [[1, 4], [7, 10]]) + + # Tests kwargs functions + def test_3d_kwargs(self): + a = arange(12).reshape(2, 2, 3) + + def myfunc(b, offset=0): + return b[1 + offset] + + xa = apply_along_axis(myfunc, 2, a, offset=1) + assert_equal(xa, [[2, 5], [8, 11]]) + + +class TestApplyOverAxes: + # Tests apply_over_axes + def test_basic(self): + a = arange(24).reshape(2, 3, 4) + test = apply_over_axes(np.sum, a, [0, 2]) + ctrl = np.array([[[60], [92], [124]]]) + assert_equal(test, ctrl) + a[(a % 2).astype(bool)] = masked + test = apply_over_axes(np.sum, a, [0, 2]) + ctrl = np.array([[[28], [44], [60]]]) + assert_equal(test, ctrl) + + +class TestMedian: + def test_pytype(self): + r = np.ma.median([[np.inf, np.inf], [np.inf, np.inf]], axis=-1) + assert_equal(r, np.inf) + + def test_inf(self): + # test that even which computes handles inf / x = masked + r = np.ma.median(np.ma.masked_array([[np.inf, np.inf], + [np.inf, np.inf]]), axis=-1) + assert_equal(r, np.inf) + r = np.ma.median(np.ma.masked_array([[np.inf, np.inf], + [np.inf, np.inf]]), axis=None) + assert_equal(r, np.inf) + # all masked + r = np.ma.median(np.ma.masked_array([[np.inf, np.inf], + [np.inf, np.inf]], mask=True), + axis=-1) + assert_equal(r.mask, True) + r = np.ma.median(np.ma.masked_array([[np.inf, np.inf], + [np.inf, np.inf]], mask=True), + axis=None) + assert_equal(r.mask, True) + + def test_non_masked(self): + x = np.arange(9) + assert_equal(np.ma.median(x), 4.) + assert_(type(np.ma.median(x)) is not MaskedArray) + x = range(8) + assert_equal(np.ma.median(x), 3.5) + assert_(type(np.ma.median(x)) is not MaskedArray) + x = 5 + assert_equal(np.ma.median(x), 5.) + assert_(type(np.ma.median(x)) is not MaskedArray) + # integer + x = np.arange(9 * 8).reshape(9, 8) + assert_equal(np.ma.median(x, axis=0), np.median(x, axis=0)) + assert_equal(np.ma.median(x, axis=1), np.median(x, axis=1)) + assert_(np.ma.median(x, axis=1) is not MaskedArray) + # float + x = np.arange(9 * 8.).reshape(9, 8) + assert_equal(np.ma.median(x, axis=0), np.median(x, axis=0)) + assert_equal(np.ma.median(x, axis=1), np.median(x, axis=1)) + assert_(np.ma.median(x, axis=1) is not MaskedArray) + + def test_docstring_examples(self): + "test the examples given in the docstring of ma.median" + x = array(np.arange(8), mask=[0] * 4 + [1] * 4) + assert_equal(np.ma.median(x), 1.5) + assert_equal(np.ma.median(x).shape, (), "shape mismatch") + assert_(type(np.ma.median(x)) is not MaskedArray) + x = array(np.arange(10).reshape(2, 5), mask=[0] * 6 + [1] * 4) + assert_equal(np.ma.median(x), 2.5) + assert_equal(np.ma.median(x).shape, (), "shape mismatch") + assert_(type(np.ma.median(x)) is not MaskedArray) + ma_x = np.ma.median(x, axis=-1, overwrite_input=True) + assert_equal(ma_x, [2., 5.]) + assert_equal(ma_x.shape, (2,), "shape mismatch") + assert_(type(ma_x) is MaskedArray) + + def test_axis_argument_errors(self): + msg = "mask = %s, ndim = %s, axis = %s, overwrite_input = %s" + for ndmin in range(5): + for mask in [False, True]: + x = array(1, ndmin=ndmin, mask=mask) + + # Valid axis values should not raise exception + args = itertools.product(range(-ndmin, ndmin), [False, True]) + for axis, over in args: + try: + np.ma.median(x, axis=axis, overwrite_input=over) + except Exception: + raise AssertionError(msg % (mask, ndmin, axis, over)) + + # Invalid axis values should raise exception + args = itertools.product([-(ndmin + 1), ndmin], [False, True]) + for axis, over in args: + try: + np.ma.median(x, axis=axis, overwrite_input=over) + except np.exceptions.AxisError: + pass + else: + raise AssertionError(msg % (mask, ndmin, axis, over)) + + def test_masked_0d(self): + # Check values + x = array(1, mask=False) + assert_equal(np.ma.median(x), 1) + x = array(1, mask=True) + assert_equal(np.ma.median(x), np.ma.masked) + + def test_masked_1d(self): + x = array(np.arange(5), mask=True) + assert_equal(np.ma.median(x), np.ma.masked) + assert_equal(np.ma.median(x).shape, (), "shape mismatch") + assert_(type(np.ma.median(x)) is np.ma.core.MaskedConstant) + x = array(np.arange(5), mask=False) + assert_equal(np.ma.median(x), 2.) + assert_equal(np.ma.median(x).shape, (), "shape mismatch") + assert_(type(np.ma.median(x)) is not MaskedArray) + x = array(np.arange(5), mask=[0, 1, 0, 0, 0]) + assert_equal(np.ma.median(x), 2.5) + assert_equal(np.ma.median(x).shape, (), "shape mismatch") + assert_(type(np.ma.median(x)) is not MaskedArray) + x = array(np.arange(5), mask=[0, 1, 1, 1, 1]) + assert_equal(np.ma.median(x), 0.) + assert_equal(np.ma.median(x).shape, (), "shape mismatch") + assert_(type(np.ma.median(x)) is not MaskedArray) + # integer + x = array(np.arange(5), mask=[0, 1, 1, 0, 0]) + assert_equal(np.ma.median(x), 3.) + assert_equal(np.ma.median(x).shape, (), "shape mismatch") + assert_(type(np.ma.median(x)) is not MaskedArray) + # float + x = array(np.arange(5.), mask=[0, 1, 1, 0, 0]) + assert_equal(np.ma.median(x), 3.) + assert_equal(np.ma.median(x).shape, (), "shape mismatch") + assert_(type(np.ma.median(x)) is not MaskedArray) + # integer + x = array(np.arange(6), mask=[0, 1, 1, 1, 1, 0]) + assert_equal(np.ma.median(x), 2.5) + assert_equal(np.ma.median(x).shape, (), "shape mismatch") + assert_(type(np.ma.median(x)) is not MaskedArray) + # float + x = array(np.arange(6.), mask=[0, 1, 1, 1, 1, 0]) + assert_equal(np.ma.median(x), 2.5) + assert_equal(np.ma.median(x).shape, (), "shape mismatch") + assert_(type(np.ma.median(x)) is not MaskedArray) + + def test_1d_shape_consistency(self): + assert_equal(np.ma.median(array([1, 2, 3], mask=[0, 0, 0])).shape, + np.ma.median(array([1, 2, 3], mask=[0, 1, 0])).shape) + + def test_2d(self): + # Tests median w/ 2D + (n, p) = (101, 30) + x = masked_array(np.linspace(-1., 1., n),) + x[:10] = x[-10:] = masked + z = masked_array(np.empty((n, p), dtype=float)) + z[:, 0] = x[:] + idx = np.arange(len(x)) + for i in range(1, p): + np.random.shuffle(idx) + z[:, i] = x[idx] + assert_equal(median(z[:, 0]), 0) + assert_equal(median(z), 0) + assert_equal(median(z, axis=0), np.zeros(p)) + assert_equal(median(z.T, axis=1), np.zeros(p)) + + def test_2d_waxis(self): + # Tests median w/ 2D arrays and different axis. + x = masked_array(np.arange(30).reshape(10, 3)) + x[:3] = x[-3:] = masked + assert_equal(median(x), 14.5) + assert_(type(np.ma.median(x)) is not MaskedArray) + assert_equal(median(x, axis=0), [13.5, 14.5, 15.5]) + assert_(type(np.ma.median(x, axis=0)) is MaskedArray) + assert_equal(median(x, axis=1), [0, 0, 0, 10, 13, 16, 19, 0, 0, 0]) + assert_(type(np.ma.median(x, axis=1)) is MaskedArray) + assert_equal(median(x, axis=1).mask, [1, 1, 1, 0, 0, 0, 0, 1, 1, 1]) + + def test_3d(self): + # Tests median w/ 3D + x = np.ma.arange(24).reshape(3, 4, 2) + x[x % 3 == 0] = masked + assert_equal(median(x, 0), [[12, 9], [6, 15], [12, 9], [18, 15]]) + x = x.reshape((4, 3, 2)) + assert_equal(median(x, 0), [[99, 10], [11, 99], [13, 14]]) + x = np.ma.arange(24).reshape(4, 3, 2) + x[x % 5 == 0] = masked + assert_equal(median(x, 0), [[12, 10], [8, 9], [16, 17]]) + + def test_neg_axis(self): + x = masked_array(np.arange(30).reshape(10, 3)) + x[:3] = x[-3:] = masked + assert_equal(median(x, axis=-1), median(x, axis=1)) + + def test_out_1d(self): + # integer float even odd + for v in (30, 30., 31, 31.): + x = masked_array(np.arange(v)) + x[:3] = x[-3:] = masked + out = masked_array(np.ones(())) + r = median(x, out=out) + if v == 30: + assert_equal(out, 14.5) + else: + assert_equal(out, 15.) + assert_(r is out) + assert_(type(r) is MaskedArray) + + def test_out(self): + # integer float even odd + for v in (40, 40., 30, 30.): + x = masked_array(np.arange(v).reshape(10, -1)) + x[:3] = x[-3:] = masked + out = masked_array(np.ones(10)) + r = median(x, axis=1, out=out) + if v == 30: + e = masked_array([0.] * 3 + [10, 13, 16, 19] + [0.] * 3, + mask=[True] * 3 + [False] * 4 + [True] * 3) + else: + e = masked_array([0.] * 3 + [13.5, 17.5, 21.5, 25.5] + [0.] * 3, + mask=[True] * 3 + [False] * 4 + [True] * 3) + assert_equal(r, e) + assert_(r is out) + assert_(type(r) is MaskedArray) + + @pytest.mark.parametrize( + argnames='axis', + argvalues=[ + None, + 1, + (1, ), + (0, 1), + (-3, -1), + ] + ) + def test_keepdims_out(self, axis): + mask = np.zeros((3, 5, 7, 11), dtype=bool) + # Randomly set some elements to True: + w = np.random.random((4, 200)) * np.array(mask.shape)[:, None] + w = w.astype(np.intp) + mask[tuple(w)] = np.nan + d = masked_array(np.ones(mask.shape), mask=mask) + if axis is None: + shape_out = (1,) * d.ndim + else: + axis_norm = normalize_axis_tuple(axis, d.ndim) + shape_out = tuple( + 1 if i in axis_norm else d.shape[i] for i in range(d.ndim)) + out = masked_array(np.empty(shape_out)) + result = median(d, axis=axis, keepdims=True, out=out) + assert result is out + assert_equal(result.shape, shape_out) + + def test_single_non_masked_value_on_axis(self): + data = [[1., 0.], + [0., 3.], + [0., 0.]] + masked_arr = np.ma.masked_equal(data, 0) + expected = [1., 3.] + assert_array_equal(np.ma.median(masked_arr, axis=0), + expected) + + def test_nan(self): + for mask in (False, np.zeros(6, dtype=bool)): + dm = np.ma.array([[1, np.nan, 3], [1, 2, 3]]) + dm.mask = mask + + # scalar result + r = np.ma.median(dm, axis=None) + assert_(np.isscalar(r)) + assert_array_equal(r, np.nan) + r = np.ma.median(dm.ravel(), axis=0) + assert_(np.isscalar(r)) + assert_array_equal(r, np.nan) + + r = np.ma.median(dm, axis=0) + assert_equal(type(r), MaskedArray) + assert_array_equal(r, [1, np.nan, 3]) + r = np.ma.median(dm, axis=1) + assert_equal(type(r), MaskedArray) + assert_array_equal(r, [np.nan, 2]) + r = np.ma.median(dm, axis=-1) + assert_equal(type(r), MaskedArray) + assert_array_equal(r, [np.nan, 2]) + + dm = np.ma.array([[1, np.nan, 3], [1, 2, 3]]) + dm[:, 2] = np.ma.masked + assert_array_equal(np.ma.median(dm, axis=None), np.nan) + assert_array_equal(np.ma.median(dm, axis=0), [1, np.nan, 3]) + assert_array_equal(np.ma.median(dm, axis=1), [np.nan, 1.5]) + + def test_out_nan(self): + o = np.ma.masked_array(np.zeros((4,))) + d = np.ma.masked_array(np.ones((3, 4))) + d[2, 1] = np.nan + d[2, 2] = np.ma.masked + assert_equal(np.ma.median(d, 0, out=o), o) + o = np.ma.masked_array(np.zeros((3,))) + assert_equal(np.ma.median(d, 1, out=o), o) + o = np.ma.masked_array(np.zeros(())) + assert_equal(np.ma.median(d, out=o), o) + + def test_nan_behavior(self): + a = np.ma.masked_array(np.arange(24, dtype=float)) + a[::3] = np.ma.masked + a[2] = np.nan + assert_array_equal(np.ma.median(a), np.nan) + assert_array_equal(np.ma.median(a, axis=0), np.nan) + + a = np.ma.masked_array(np.arange(24, dtype=float).reshape(2, 3, 4)) + a.mask = np.arange(a.size) % 2 == 1 + aorig = a.copy() + a[1, 2, 3] = np.nan + a[1, 1, 2] = np.nan + + # no axis + assert_array_equal(np.ma.median(a), np.nan) + assert_(np.isscalar(np.ma.median(a))) + + # axis0 + b = np.ma.median(aorig, axis=0) + b[2, 3] = np.nan + b[1, 2] = np.nan + assert_equal(np.ma.median(a, 0), b) + + # axis1 + b = np.ma.median(aorig, axis=1) + b[1, 3] = np.nan + b[1, 2] = np.nan + assert_equal(np.ma.median(a, 1), b) + + # axis02 + b = np.ma.median(aorig, axis=(0, 2)) + b[1] = np.nan + b[2] = np.nan + assert_equal(np.ma.median(a, (0, 2)), b) + + def test_ambigous_fill(self): + # 255 is max value, used as filler for sort + a = np.array([[3, 3, 255], [3, 3, 255]], dtype=np.uint8) + a = np.ma.masked_array(a, mask=a == 3) + assert_array_equal(np.ma.median(a, axis=1), 255) + assert_array_equal(np.ma.median(a, axis=1).mask, False) + assert_array_equal(np.ma.median(a, axis=0), a[0]) + assert_array_equal(np.ma.median(a), 255) + + def test_special(self): + for inf in [np.inf, -np.inf]: + a = np.array([[inf, np.nan], [np.nan, np.nan]]) + a = np.ma.masked_array(a, mask=np.isnan(a)) + assert_equal(np.ma.median(a, axis=0), [inf, np.nan]) + assert_equal(np.ma.median(a, axis=1), [inf, np.nan]) + assert_equal(np.ma.median(a), inf) + + a = np.array([[np.nan, np.nan, inf], [np.nan, np.nan, inf]]) + a = np.ma.masked_array(a, mask=np.isnan(a)) + assert_array_equal(np.ma.median(a, axis=1), inf) + assert_array_equal(np.ma.median(a, axis=1).mask, False) + assert_array_equal(np.ma.median(a, axis=0), a[0]) + assert_array_equal(np.ma.median(a), inf) + + # no mask + a = np.array([[inf, inf], [inf, inf]]) + assert_equal(np.ma.median(a), inf) + assert_equal(np.ma.median(a, axis=0), inf) + assert_equal(np.ma.median(a, axis=1), inf) + + a = np.array([[inf, 7, -inf, -9], + [-10, np.nan, np.nan, 5], + [4, np.nan, np.nan, inf]], + dtype=np.float32) + a = np.ma.masked_array(a, mask=np.isnan(a)) + if inf > 0: + assert_equal(np.ma.median(a, axis=0), [4., 7., -inf, 5.]) + assert_equal(np.ma.median(a), 4.5) + else: + assert_equal(np.ma.median(a, axis=0), [-10., 7., -inf, -9.]) + assert_equal(np.ma.median(a), -2.5) + assert_equal(np.ma.median(a, axis=1), [-1., -2.5, inf]) + + for i in range(10): + for j in range(1, 10): + a = np.array([([np.nan] * i) + ([inf] * j)] * 2) + a = np.ma.masked_array(a, mask=np.isnan(a)) + assert_equal(np.ma.median(a), inf) + assert_equal(np.ma.median(a, axis=1), inf) + assert_equal(np.ma.median(a, axis=0), + ([np.nan] * i) + [inf] * j) + + def test_empty(self): + # empty arrays + a = np.ma.masked_array(np.array([], dtype=float)) + with pytest.warns(RuntimeWarning): + assert_array_equal(np.ma.median(a), np.nan) + + # multiple dimensions + a = np.ma.masked_array(np.array([], dtype=float, ndmin=3)) + # no axis + with pytest.warns(RuntimeWarning): + assert_array_equal(np.ma.median(a), np.nan) + + # axis 0 and 1 + b = np.ma.masked_array(np.array([], dtype=float, ndmin=2)) + assert_equal(np.ma.median(a, axis=0), b) + assert_equal(np.ma.median(a, axis=1), b) + + # axis 2 + b = np.ma.masked_array(np.array(np.nan, dtype=float, ndmin=2)) + with pytest.warns(RuntimeWarning): + assert_equal(np.ma.median(a, axis=2), b) + + def test_object(self): + o = np.ma.masked_array(np.arange(7.)) + assert_(type(np.ma.median(o.astype(object))), float) + o[2] = np.nan + assert_(type(np.ma.median(o.astype(object))), float) + + +class TestCov: + + def _create_data(self): + return array(np.random.rand(12)) + + def test_covhelper(self): + x = self._create_data() + # Test not mask output type is a float. + assert_(_covhelper(x, rowvar=True)[1].dtype, np.float32) + assert_(_covhelper(x, y=x, rowvar=False)[1].dtype, np.float32) + # Test not mask output is equal after casting to float. + mask = x > 0.5 + assert_array_equal( + _covhelper( + np.ma.masked_array(x, mask), rowvar=True + )[1].astype(bool), + ~mask.reshape(1, -1), + ) + assert_array_equal( + _covhelper( + np.ma.masked_array(x, mask), y=x, rowvar=False + )[1].astype(bool), + np.vstack((~mask, ~mask)), + ) + + def test_1d_without_missing(self): + # Test cov on 1D variable w/o missing values + x = self._create_data() + assert_almost_equal(np.cov(x), cov(x)) + assert_almost_equal(np.cov(x, rowvar=False), cov(x, rowvar=False)) + assert_almost_equal(np.cov(x, rowvar=False, bias=True), + cov(x, rowvar=False, bias=True)) + + def test_2d_without_missing(self): + # Test cov on 1 2D variable w/o missing values + x = self._create_data().reshape(3, 4) + assert_almost_equal(np.cov(x), cov(x)) + assert_almost_equal(np.cov(x, rowvar=False), cov(x, rowvar=False)) + assert_almost_equal(np.cov(x, rowvar=False, bias=True), + cov(x, rowvar=False, bias=True)) + + def test_1d_with_missing(self): + # Test cov 1 1D variable w/missing values + x = self._create_data() + x[-1] = masked + x -= x.mean() + nx = x.compressed() + assert_almost_equal(np.cov(nx), cov(x)) + assert_almost_equal(np.cov(nx, rowvar=False), cov(x, rowvar=False)) + assert_almost_equal(np.cov(nx, rowvar=False, bias=True), + cov(x, rowvar=False, bias=True)) + # + try: + cov(x, allow_masked=False) + except ValueError: + pass + # + # 2 1D variables w/ missing values + nx = x[1:-1] + assert_almost_equal(np.cov(nx, nx[::-1]), cov(x, x[::-1])) + assert_almost_equal(np.cov(nx, nx[::-1], rowvar=False), + cov(x, x[::-1], rowvar=False)) + assert_almost_equal(np.cov(nx, nx[::-1], rowvar=False, bias=True), + cov(x, x[::-1], rowvar=False, bias=True)) + + def test_2d_with_missing(self): + # Test cov on 2D variable w/ missing value + x = self._create_data() + x[-1] = masked + x = x.reshape(3, 4) + valid = np.logical_not(getmaskarray(x)).astype(int) + frac = np.dot(valid, valid.T) + xf = (x - x.mean(1)[:, None]).filled(0) + assert_almost_equal(cov(x), + np.cov(xf) * (x.shape[1] - 1) / (frac - 1.)) + assert_almost_equal(cov(x, bias=True), + np.cov(xf, bias=True) * x.shape[1] / frac) + frac = np.dot(valid.T, valid) + xf = (x - x.mean(0)).filled(0) + assert_almost_equal(cov(x, rowvar=False), + (np.cov(xf, rowvar=False) * + (x.shape[0] - 1) / (frac - 1.))) + assert_almost_equal(cov(x, rowvar=False, bias=True), + (np.cov(xf, rowvar=False, bias=True) * + x.shape[0] / frac)) + + +class TestCorrcoef: + + def _create_data(self): + data = array(np.random.rand(12)) + data2 = array(np.random.rand(12)) + return data, data2 + + def test_1d_without_missing(self): + # Test cov on 1D variable w/o missing values + x = self._create_data()[0] + assert_almost_equal(np.corrcoef(x), corrcoef(x)) + assert_almost_equal(np.corrcoef(x, rowvar=False), + corrcoef(x, rowvar=False)) + + def test_2d_without_missing(self): + # Test corrcoef on 1 2D variable w/o missing values + x = self._create_data()[0].reshape(3, 4) + assert_almost_equal(np.corrcoef(x), corrcoef(x)) + assert_almost_equal(np.corrcoef(x, rowvar=False), + corrcoef(x, rowvar=False)) + + def test_1d_with_missing(self): + # Test corrcoef 1 1D variable w/missing values + x = self._create_data()[0] + x[-1] = masked + x -= x.mean() + nx = x.compressed() + assert_almost_equal(np.corrcoef(nx, rowvar=False), + corrcoef(x, rowvar=False)) + try: + corrcoef(x, allow_masked=False) + except ValueError: + pass + # 2 1D variables w/ missing values + nx = x[1:-1] + assert_almost_equal(np.corrcoef(nx, nx[::-1]), corrcoef(x, x[::-1])) + assert_almost_equal(np.corrcoef(nx, nx[::-1], rowvar=False), + corrcoef(x, x[::-1], rowvar=False)) + + def test_2d_with_missing(self): + # Test corrcoef on 2D variable w/ missing value + x = self._create_data()[0] + x[-1] = masked + x = x.reshape(3, 4) + + test = corrcoef(x) + control = np.corrcoef(x) + assert_almost_equal(test[:-1, :-1], control[:-1, :-1]) + + +class TestPolynomial: + + def test_polyfit(self): + # Tests polyfit + # On ndarrays + x = np.random.rand(10) + y = np.random.rand(20).reshape(-1, 2) + assert_almost_equal(polyfit(x, y, 3), np.polyfit(x, y, 3)) + # ON 1D maskedarrays + x = x.view(MaskedArray) + x[0] = masked + y = y.view(MaskedArray) + y[0, 0] = y[-1, -1] = masked + # + (C, R, K, S, D) = polyfit(x, y[:, 0], 3, full=True) + (c, r, k, s, d) = np.polyfit(x[1:], y[1:, 0].compressed(), 3, + full=True) + for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)): + assert_almost_equal(a, a_) + # + (C, R, K, S, D) = polyfit(x, y[:, -1], 3, full=True) + (c, r, k, s, d) = np.polyfit(x[1:-1], y[1:-1, -1], 3, full=True) + for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)): + assert_almost_equal(a, a_) + # + (C, R, K, S, D) = polyfit(x, y, 3, full=True) + (c, r, k, s, d) = np.polyfit(x[1:-1], y[1:-1, :], 3, full=True) + for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)): + assert_almost_equal(a, a_) + # + w = np.random.rand(10) + 1 + wo = w.copy() + xs = x[1:-1] + ys = y[1:-1] + ws = w[1:-1] + (C, R, K, S, D) = polyfit(x, y, 3, full=True, w=w) + (c, r, k, s, d) = np.polyfit(xs, ys, 3, full=True, w=ws) + assert_equal(w, wo) + for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)): + assert_almost_equal(a, a_) + + def test_polyfit_with_masked_NaNs(self): + x = np.random.rand(10) + y = np.random.rand(20).reshape(-1, 2) + + x[0] = np.nan + y[-1, -1] = np.nan + x = x.view(MaskedArray) + y = y.view(MaskedArray) + x[0] = masked + y[-1, -1] = masked + + (C, R, K, S, D) = polyfit(x, y, 3, full=True) + (c, r, k, s, d) = np.polyfit(x[1:-1], y[1:-1, :], 3, full=True) + for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)): + assert_almost_equal(a, a_) + + +class TestArraySetOps: + + def test_unique_onlist(self): + # Test unique on list + data = [1, 1, 1, 2, 2, 3] + test = unique(data, return_index=True, return_inverse=True) + assert_(isinstance(test[0], MaskedArray)) + assert_equal(test[0], masked_array([1, 2, 3], mask=[0, 0, 0])) + assert_equal(test[1], [0, 3, 5]) + assert_equal(test[2], [0, 0, 0, 1, 1, 2]) + + def test_unique_onmaskedarray(self): + # Test unique on masked data w/use_mask=True + data = masked_array([1, 1, 1, 2, 2, 3], mask=[0, 0, 1, 0, 1, 0]) + test = unique(data, return_index=True, return_inverse=True) + assert_equal(test[0], masked_array([1, 2, 3, -1], mask=[0, 0, 0, 1])) + assert_equal(test[1], [0, 3, 5, 2]) + assert_equal(test[2], [0, 0, 3, 1, 3, 2]) + # + data.fill_value = 3 + data = masked_array(data=[1, 1, 1, 2, 2, 3], + mask=[0, 0, 1, 0, 1, 0], fill_value=3) + test = unique(data, return_index=True, return_inverse=True) + assert_equal(test[0], masked_array([1, 2, 3, -1], mask=[0, 0, 0, 1])) + assert_equal(test[1], [0, 3, 5, 2]) + assert_equal(test[2], [0, 0, 3, 1, 3, 2]) + + def test_unique_allmasked(self): + # Test all masked + data = masked_array([1, 1, 1], mask=True) + test = unique(data, return_index=True, return_inverse=True) + assert_equal(test[0], masked_array([1, ], mask=[True])) + assert_equal(test[1], [0]) + assert_equal(test[2], [0, 0, 0]) + # + # Test masked + data = masked + test = unique(data, return_index=True, return_inverse=True) + assert_equal(test[0], masked_array(masked)) + assert_equal(test[1], [0]) + assert_equal(test[2], [0]) + + def test_ediff1d(self): + # Tests mediff1d + x = masked_array(np.arange(5), mask=[1, 0, 0, 0, 1]) + control = array([1, 1, 1, 4], mask=[1, 0, 0, 1]) + test = ediff1d(x) + assert_equal(test, control) + assert_equal(test.filled(0), control.filled(0)) + assert_equal(test.mask, control.mask) + + def test_ediff1d_tobegin(self): + # Test ediff1d w/ to_begin + x = masked_array(np.arange(5), mask=[1, 0, 0, 0, 1]) + test = ediff1d(x, to_begin=masked) + control = array([0, 1, 1, 1, 4], mask=[1, 1, 0, 0, 1]) + assert_equal(test, control) + assert_equal(test.filled(0), control.filled(0)) + assert_equal(test.mask, control.mask) + # + test = ediff1d(x, to_begin=[1, 2, 3]) + control = array([1, 2, 3, 1, 1, 1, 4], mask=[0, 0, 0, 1, 0, 0, 1]) + assert_equal(test, control) + assert_equal(test.filled(0), control.filled(0)) + assert_equal(test.mask, control.mask) + + def test_ediff1d_toend(self): + # Test ediff1d w/ to_end + x = masked_array(np.arange(5), mask=[1, 0, 0, 0, 1]) + test = ediff1d(x, to_end=masked) + control = array([1, 1, 1, 4, 0], mask=[1, 0, 0, 1, 1]) + assert_equal(test, control) + assert_equal(test.filled(0), control.filled(0)) + assert_equal(test.mask, control.mask) + # + test = ediff1d(x, to_end=[1, 2, 3]) + control = array([1, 1, 1, 4, 1, 2, 3], mask=[1, 0, 0, 1, 0, 0, 0]) + assert_equal(test, control) + assert_equal(test.filled(0), control.filled(0)) + assert_equal(test.mask, control.mask) + + def test_ediff1d_tobegin_toend(self): + # Test ediff1d w/ to_begin and to_end + x = masked_array(np.arange(5), mask=[1, 0, 0, 0, 1]) + test = ediff1d(x, to_end=masked, to_begin=masked) + control = array([0, 1, 1, 1, 4, 0], mask=[1, 1, 0, 0, 1, 1]) + assert_equal(test, control) + assert_equal(test.filled(0), control.filled(0)) + assert_equal(test.mask, control.mask) + # + test = ediff1d(x, to_end=[1, 2, 3], to_begin=masked) + control = array([0, 1, 1, 1, 4, 1, 2, 3], + mask=[1, 1, 0, 0, 1, 0, 0, 0]) + assert_equal(test, control) + assert_equal(test.filled(0), control.filled(0)) + assert_equal(test.mask, control.mask) + + def test_ediff1d_ndarray(self): + # Test ediff1d w/ a ndarray + x = np.arange(5) + test = ediff1d(x) + control = array([1, 1, 1, 1], mask=[0, 0, 0, 0]) + assert_equal(test, control) + assert_(isinstance(test, MaskedArray)) + assert_equal(test.filled(0), control.filled(0)) + assert_equal(test.mask, control.mask) + # + test = ediff1d(x, to_end=masked, to_begin=masked) + control = array([0, 1, 1, 1, 1, 0], mask=[1, 0, 0, 0, 0, 1]) + assert_(isinstance(test, MaskedArray)) + assert_equal(test.filled(0), control.filled(0)) + assert_equal(test.mask, control.mask) + + def test_intersect1d(self): + # Test intersect1d + x = array([1, 3, 3, 3], mask=[0, 0, 0, 1]) + y = array([3, 1, 1, 1], mask=[0, 0, 0, 1]) + test = intersect1d(x, y) + control = array([1, 3, -1], mask=[0, 0, 1]) + assert_equal(test, control) + + def test_setxor1d(self): + # Test setxor1d + a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1]) + b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, 1]) + test = setxor1d(a, b) + assert_equal(test, array([3, 4, 7])) + # + a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1]) + b = [1, 2, 3, 4, 5] + test = setxor1d(a, b) + assert_equal(test, array([3, 4, 7, -1], mask=[0, 0, 0, 1])) + # + a = array([1, 2, 3]) + b = array([6, 5, 4]) + test = setxor1d(a, b) + assert_(isinstance(test, MaskedArray)) + assert_equal(test, [1, 2, 3, 4, 5, 6]) + # + a = array([1, 8, 2, 3], mask=[0, 1, 0, 0]) + b = array([6, 5, 4, 8], mask=[0, 0, 0, 1]) + test = setxor1d(a, b) + assert_(isinstance(test, MaskedArray)) + assert_equal(test, [1, 2, 3, 4, 5, 6]) + # + assert_array_equal([], setxor1d([], [])) + + def test_setxor1d_unique(self): + # Test setxor1d with assume_unique=True + a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1]) + b = [1, 2, 3, 4, 5] + test = setxor1d(a, b, assume_unique=True) + assert_equal(test, array([3, 4, 7, -1], mask=[0, 0, 0, 1])) + # + a = array([1, 8, 2, 3], mask=[0, 1, 0, 0]) + b = array([6, 5, 4, 8], mask=[0, 0, 0, 1]) + test = setxor1d(a, b, assume_unique=True) + assert_(isinstance(test, MaskedArray)) + assert_equal(test, [1, 2, 3, 4, 5, 6]) + # + a = array([[1], [8], [2], [3]]) + b = array([[6, 5], [4, 8]]) + test = setxor1d(a, b, assume_unique=True) + assert_(isinstance(test, MaskedArray)) + assert_equal(test, [1, 2, 3, 4, 5, 6]) + + def test_isin(self): + # the tests for in1d cover most of isin's behavior + # if in1d is removed, would need to change those tests to test + # isin instead. + a = np.arange(24).reshape([2, 3, 4]) + mask = np.zeros([2, 3, 4]) + mask[1, 2, 0] = 1 + a = array(a, mask=mask) + b = array(data=[0, 10, 20, 30, 1, 3, 11, 22, 33], + mask=[0, 1, 0, 1, 0, 1, 0, 1, 0]) + ec = zeros((2, 3, 4), dtype=bool) + ec[0, 0, 0] = True + ec[0, 0, 1] = True + ec[0, 2, 3] = True + c = isin(a, b) + assert_(isinstance(c, MaskedArray)) + assert_array_equal(c, ec) + # compare results of np.isin to ma.isin + d = np.isin(a, b[~b.mask]) & ~a.mask + assert_array_equal(c, d) + + def test_in1d(self): + # Test in1d + a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1]) + b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, 1]) + test = in1d(a, b) + assert_equal(test, [True, True, True, False, True]) + # + a = array([5, 5, 2, 1, -1], mask=[0, 0, 0, 0, 1]) + b = array([1, 5, -1], mask=[0, 0, 1]) + test = in1d(a, b) + assert_equal(test, [True, True, False, True, True]) + # + assert_array_equal([], in1d([], [])) + + def test_in1d_invert(self): + # Test in1d's invert parameter + a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1]) + b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, 1]) + assert_equal(np.invert(in1d(a, b)), in1d(a, b, invert=True)) + + a = array([5, 5, 2, 1, -1], mask=[0, 0, 0, 0, 1]) + b = array([1, 5, -1], mask=[0, 0, 1]) + assert_equal(np.invert(in1d(a, b)), in1d(a, b, invert=True)) + + assert_array_equal([], in1d([], [], invert=True)) + + def test_union1d(self): + # Test union1d + a = array([1, 2, 5, 7, 5, -1], mask=[0, 0, 0, 0, 0, 1]) + b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, 1]) + test = union1d(a, b) + control = array([1, 2, 3, 4, 5, 7, -1], mask=[0, 0, 0, 0, 0, 0, 1]) + assert_equal(test, control) + + # Tests gh-10340, arguments to union1d should be + # flattened if they are not already 1D + x = array([[0, 1, 2], [3, 4, 5]], mask=[[0, 0, 0], [0, 0, 1]]) + y = array([0, 1, 2, 3, 4], mask=[0, 0, 0, 0, 1]) + ez = array([0, 1, 2, 3, 4, 5], mask=[0, 0, 0, 0, 0, 1]) + z = union1d(x, y) + assert_equal(z, ez) + # + assert_array_equal([], union1d([], [])) + + def test_setdiff1d(self): + # Test setdiff1d + a = array([6, 5, 4, 7, 7, 1, 2, 1], mask=[0, 0, 0, 0, 0, 0, 0, 1]) + b = array([2, 4, 3, 3, 2, 1, 5]) + test = setdiff1d(a, b) + assert_equal(test, array([6, 7, -1], mask=[0, 0, 1])) + # + a = arange(10) + b = arange(8) + assert_equal(setdiff1d(a, b), array([8, 9])) + a = array([], np.uint32, mask=[]) + assert_equal(setdiff1d(a, []).dtype, np.uint32) + + def test_setdiff1d_char_array(self): + # Test setdiff1d_charray + a = np.array(['a', 'b', 'c']) + b = np.array(['a', 'b', 's']) + assert_array_equal(setdiff1d(a, b), np.array(['c'])) + + +class TestShapeBase: + + def test_atleast_2d(self): + # Test atleast_2d + a = masked_array([0, 1, 2], mask=[0, 1, 0]) + b = atleast_2d(a) + assert_equal(b.shape, (1, 3)) + assert_equal(b.mask.shape, b.data.shape) + assert_equal(a.shape, (3,)) + assert_equal(a.mask.shape, a.data.shape) + assert_equal(b.mask.shape, b.data.shape) + + def test_shape_scalar(self): + # the atleast and diagflat function should work with scalars + # GitHub issue #3367 + # Additionally, the atleast functions should accept multiple scalars + # correctly + b = atleast_1d(1.0) + assert_equal(b.shape, (1,)) + assert_equal(b.mask.shape, b.shape) + assert_equal(b.data.shape, b.shape) + + b = atleast_1d(1.0, 2.0) + for a in b: + assert_equal(a.shape, (1,)) + assert_equal(a.mask.shape, a.shape) + assert_equal(a.data.shape, a.shape) + + b = atleast_2d(1.0) + assert_equal(b.shape, (1, 1)) + assert_equal(b.mask.shape, b.shape) + assert_equal(b.data.shape, b.shape) + + b = atleast_2d(1.0, 2.0) + for a in b: + assert_equal(a.shape, (1, 1)) + assert_equal(a.mask.shape, a.shape) + assert_equal(a.data.shape, a.shape) + + b = atleast_3d(1.0) + assert_equal(b.shape, (1, 1, 1)) + assert_equal(b.mask.shape, b.shape) + assert_equal(b.data.shape, b.shape) + + b = atleast_3d(1.0, 2.0) + for a in b: + assert_equal(a.shape, (1, 1, 1)) + assert_equal(a.mask.shape, a.shape) + assert_equal(a.data.shape, a.shape) + + b = diagflat(1.0) + assert_equal(b.shape, (1, 1)) + assert_equal(b.mask.shape, b.data.shape) + + @pytest.mark.parametrize("fn", [atleast_1d, vstack, diagflat]) + def test_inspect_signature(self, fn): + name = fn.__name__ + assert getattr(np.ma, name) is fn + + assert fn.__module__ == "numpy.ma.extras" + + wrapped = getattr(np, fn.__name__) + sig_wrapped = inspect.signature(wrapped) + sig = inspect.signature(fn) + assert sig == sig_wrapped + + +class TestNDEnumerate: + + def test_ndenumerate_nomasked(self): + ordinary = np.arange(6.).reshape((1, 3, 2)) + empty_mask = np.zeros_like(ordinary, dtype=bool) + with_mask = masked_array(ordinary, mask=empty_mask) + assert_equal(list(np.ndenumerate(ordinary)), + list(ndenumerate(ordinary))) + assert_equal(list(ndenumerate(ordinary)), + list(ndenumerate(with_mask))) + assert_equal(list(ndenumerate(with_mask)), + list(ndenumerate(with_mask, compressed=False))) + + def test_ndenumerate_allmasked(self): + a = masked_all(()) + b = masked_all((100,)) + c = masked_all((2, 3, 4)) + assert_equal(list(ndenumerate(a)), []) + assert_equal(list(ndenumerate(b)), []) + assert_equal(list(ndenumerate(b, compressed=False)), + list(zip(np.ndindex((100,)), 100 * [masked]))) + assert_equal(list(ndenumerate(c)), []) + assert_equal(list(ndenumerate(c, compressed=False)), + list(zip(np.ndindex((2, 3, 4)), 2 * 3 * 4 * [masked]))) + + def test_ndenumerate_mixedmasked(self): + a = masked_array(np.arange(12).reshape((3, 4)), + mask=[[1, 1, 1, 1], + [1, 1, 0, 1], + [0, 0, 0, 0]]) + items = [((1, 2), 6), + ((2, 0), 8), ((2, 1), 9), ((2, 2), 10), ((2, 3), 11)] + assert_equal(list(ndenumerate(a)), items) + assert_equal(len(list(ndenumerate(a, compressed=False))), a.size) + for coordinate, value in ndenumerate(a, compressed=False): + assert_equal(a[coordinate], value) + + +class TestStack: + + def test_stack_1d(self): + a = masked_array([0, 1, 2], mask=[0, 1, 0]) + b = masked_array([9, 8, 7], mask=[1, 0, 0]) + + c = stack([a, b], axis=0) + assert_equal(c.shape, (2, 3)) + assert_array_equal(a.mask, c[0].mask) + assert_array_equal(b.mask, c[1].mask) + + d = vstack([a, b]) + assert_array_equal(c.data, d.data) + assert_array_equal(c.mask, d.mask) + + c = stack([a, b], axis=1) + assert_equal(c.shape, (3, 2)) + assert_array_equal(a.mask, c[:, 0].mask) + assert_array_equal(b.mask, c[:, 1].mask) + + def test_stack_masks(self): + a = masked_array([0, 1, 2], mask=True) + b = masked_array([9, 8, 7], mask=False) + + c = stack([a, b], axis=0) + assert_equal(c.shape, (2, 3)) + assert_array_equal(a.mask, c[0].mask) + assert_array_equal(b.mask, c[1].mask) + + d = vstack([a, b]) + assert_array_equal(c.data, d.data) + assert_array_equal(c.mask, d.mask) + + c = stack([a, b], axis=1) + assert_equal(c.shape, (3, 2)) + assert_array_equal(a.mask, c[:, 0].mask) + assert_array_equal(b.mask, c[:, 1].mask) + + def test_stack_nd(self): + # 2D + shp = (3, 2) + d1 = np.random.randint(0, 10, shp) + d2 = np.random.randint(0, 10, shp) + m1 = np.random.randint(0, 2, shp).astype(bool) + m2 = np.random.randint(0, 2, shp).astype(bool) + a1 = masked_array(d1, mask=m1) + a2 = masked_array(d2, mask=m2) + + c = stack([a1, a2], axis=0) + c_shp = (2,) + shp + assert_equal(c.shape, c_shp) + assert_array_equal(a1.mask, c[0].mask) + assert_array_equal(a2.mask, c[1].mask) + + c = stack([a1, a2], axis=-1) + c_shp = shp + (2,) + assert_equal(c.shape, c_shp) + assert_array_equal(a1.mask, c[..., 0].mask) + assert_array_equal(a2.mask, c[..., 1].mask) + + # 4D + shp = (3, 2, 4, 5,) + d1 = np.random.randint(0, 10, shp) + d2 = np.random.randint(0, 10, shp) + m1 = np.random.randint(0, 2, shp).astype(bool) + m2 = np.random.randint(0, 2, shp).astype(bool) + a1 = masked_array(d1, mask=m1) + a2 = masked_array(d2, mask=m2) + + c = stack([a1, a2], axis=0) + c_shp = (2,) + shp + assert_equal(c.shape, c_shp) + assert_array_equal(a1.mask, c[0].mask) + assert_array_equal(a2.mask, c[1].mask) + + c = stack([a1, a2], axis=-1) + c_shp = shp + (2,) + assert_equal(c.shape, c_shp) + assert_array_equal(a1.mask, c[..., 0].mask) + assert_array_equal(a2.mask, c[..., 1].mask) diff --git a/local_packages/numpy/ma/tests/test_mrecords.py b/local_packages/numpy/ma/tests/test_mrecords.py new file mode 100644 index 0000000..b4070df --- /dev/null +++ b/local_packages/numpy/ma/tests/test_mrecords.py @@ -0,0 +1,495 @@ +"""Tests suite for mrecords. + +:author: Pierre Gerard-Marchant +:contact: pierregm_at_uga_dot_edu + +""" +import pickle + +import numpy as np +import numpy.ma as ma +from numpy._core.records import ( + fromarrays as recfromarrays, + fromrecords as recfromrecords, + recarray, +) +from numpy.ma import masked, nomask +from numpy.ma.mrecords import ( + MaskedRecords, + addfield, + fromarrays, + fromrecords, + fromtextfile, + mrecarray, +) +from numpy.ma.testutils import assert_, assert_equal, assert_equal_records +from numpy.testing import temppath + + +class TestMRecords: + + ilist = [1, 2, 3, 4, 5] + flist = [1.1, 2.2, 3.3, 4.4, 5.5] + slist = [b'one', b'two', b'three', b'four', b'five'] + ddtype = [('a', int), ('b', float), ('c', '|S8')] + mask = [0, 1, 0, 0, 1] + base = ma.array(list(zip(ilist, flist, slist)), mask=mask, dtype=ddtype) + + def test_byview(self): + # Test creation by view + base = self.base + mbase = base.view(mrecarray) + assert_equal(mbase.recordmask, base.recordmask) + assert_equal_records(mbase._mask, base._mask) + assert_(isinstance(mbase._data, recarray)) + assert_equal_records(mbase._data, base._data.view(recarray)) + for field in ('a', 'b', 'c'): + assert_equal(base[field], mbase[field]) + assert_equal_records(mbase.view(mrecarray), mbase) + + def test_get(self): + # Tests fields retrieval + base = self.base.copy() + mbase = base.view(mrecarray) + # As fields.......... + for field in ('a', 'b', 'c'): + assert_equal(getattr(mbase, field), mbase[field]) + assert_equal(base[field], mbase[field]) + # as elements ....... + mbase_first = mbase[0] + assert_(isinstance(mbase_first, mrecarray)) + assert_equal(mbase_first.dtype, mbase.dtype) + assert_equal(mbase_first.tolist(), (1, 1.1, b'one')) + # Used to be mask, now it's recordmask + assert_equal(mbase_first.recordmask, nomask) + assert_equal(mbase_first._mask.item(), (False, False, False)) + assert_equal(mbase_first['a'], mbase['a'][0]) + mbase_last = mbase[-1] + assert_(isinstance(mbase_last, mrecarray)) + assert_equal(mbase_last.dtype, mbase.dtype) + assert_equal(mbase_last.tolist(), (None, None, None)) + # Used to be mask, now it's recordmask + assert_equal(mbase_last.recordmask, True) + assert_equal(mbase_last._mask.item(), (True, True, True)) + assert_equal(mbase_last['a'], mbase['a'][-1]) + assert_(mbase_last['a'] is masked) + # as slice .......... + mbase_sl = mbase[:2] + assert_(isinstance(mbase_sl, mrecarray)) + assert_equal(mbase_sl.dtype, mbase.dtype) + # Used to be mask, now it's recordmask + assert_equal(mbase_sl.recordmask, [0, 1]) + assert_equal_records(mbase_sl.mask, + np.array([(False, False, False), + (True, True, True)], + dtype=mbase._mask.dtype)) + assert_equal_records(mbase_sl, base[:2].view(mrecarray)) + for field in ('a', 'b', 'c'): + assert_equal(getattr(mbase_sl, field), base[:2][field]) + + def test_set_fields(self): + # Tests setting fields. + base = self.base.copy() + mbase = base.view(mrecarray) + mbase = mbase.copy() + mbase.fill_value = (999999, 1e20, 'N/A') + # Change the data, the mask should be conserved + mbase.a._data[:] = 5 + assert_equal(mbase['a']._data, [5, 5, 5, 5, 5]) + assert_equal(mbase['a']._mask, [0, 1, 0, 0, 1]) + # Change the elements, and the mask will follow + mbase.a = 1 + assert_equal(mbase['a']._data, [1] * 5) + assert_equal(ma.getmaskarray(mbase['a']), [0] * 5) + # Use to be _mask, now it's recordmask + assert_equal(mbase.recordmask, [False] * 5) + assert_equal(mbase._mask.tolist(), + np.array([(0, 0, 0), + (0, 1, 1), + (0, 0, 0), + (0, 0, 0), + (0, 1, 1)], + dtype=bool)) + # Set a field to mask ........................ + mbase.c = masked + # Use to be mask, and now it's still mask ! + assert_equal(mbase.c.mask, [1] * 5) + assert_equal(mbase.c.recordmask, [1] * 5) + assert_equal(ma.getmaskarray(mbase['c']), [1] * 5) + assert_equal(ma.getdata(mbase['c']), [b'N/A'] * 5) + assert_equal(mbase._mask.tolist(), + np.array([(0, 0, 1), + (0, 1, 1), + (0, 0, 1), + (0, 0, 1), + (0, 1, 1)], + dtype=bool)) + # Set fields by slices ....................... + mbase = base.view(mrecarray).copy() + mbase.a[3:] = 5 + assert_equal(mbase.a, [1, 2, 3, 5, 5]) + assert_equal(mbase.a._mask, [0, 1, 0, 0, 0]) + mbase.b[3:] = masked + assert_equal(mbase.b, base['b']) + assert_equal(mbase.b._mask, [0, 1, 0, 1, 1]) + # Set fields globally.......................... + ndtype = [('alpha', '|S1'), ('num', int)] + data = ma.array([('a', 1), ('b', 2), ('c', 3)], dtype=ndtype) + rdata = data.view(MaskedRecords) + val = ma.array([10, 20, 30], mask=[1, 0, 0]) + + rdata['num'] = val + assert_equal(rdata.num, val) + assert_equal(rdata.num.mask, [1, 0, 0]) + + def test_set_fields_mask(self): + # Tests setting the mask of a field. + base = self.base.copy() + # This one has already a mask.... + mbase = base.view(mrecarray) + mbase['a'][-2] = masked + assert_equal(mbase.a, [1, 2, 3, 4, 5]) + assert_equal(mbase.a._mask, [0, 1, 0, 1, 1]) + # This one has not yet + mbase = fromarrays([np.arange(5), np.random.rand(5)], + dtype=[('a', int), ('b', float)]) + mbase['a'][-2] = masked + assert_equal(mbase.a, [0, 1, 2, 3, 4]) + assert_equal(mbase.a._mask, [0, 0, 0, 1, 0]) + + def test_set_mask(self): + base = self.base.copy() + mbase = base.view(mrecarray) + # Set the mask to True ....................... + mbase.mask = masked + assert_equal(ma.getmaskarray(mbase['b']), [1] * 5) + assert_equal(mbase['a']._mask, mbase['b']._mask) + assert_equal(mbase['a']._mask, mbase['c']._mask) + assert_equal(mbase._mask.tolist(), + np.array([(1, 1, 1)] * 5, dtype=bool)) + # Delete the mask ............................ + mbase.mask = nomask + assert_equal(ma.getmaskarray(mbase['c']), [0] * 5) + assert_equal(mbase._mask.tolist(), + np.array([(0, 0, 0)] * 5, dtype=bool)) + + def test_set_mask_fromarray(self): + base = self.base.copy() + mbase = base.view(mrecarray) + # Sets the mask w/ an array + mbase.mask = [1, 0, 0, 0, 1] + assert_equal(mbase.a.mask, [1, 0, 0, 0, 1]) + assert_equal(mbase.b.mask, [1, 0, 0, 0, 1]) + assert_equal(mbase.c.mask, [1, 0, 0, 0, 1]) + # Yay, once more ! + mbase.mask = [0, 0, 0, 0, 1] + assert_equal(mbase.a.mask, [0, 0, 0, 0, 1]) + assert_equal(mbase.b.mask, [0, 0, 0, 0, 1]) + assert_equal(mbase.c.mask, [0, 0, 0, 0, 1]) + + def test_set_mask_fromfields(self): + mbase = self.base.copy().view(mrecarray) + + nmask = np.array( + [(0, 1, 0), (0, 1, 0), (1, 0, 1), (1, 0, 1), (0, 0, 0)], + dtype=[('a', bool), ('b', bool), ('c', bool)]) + mbase.mask = nmask + assert_equal(mbase.a.mask, [0, 0, 1, 1, 0]) + assert_equal(mbase.b.mask, [1, 1, 0, 0, 0]) + assert_equal(mbase.c.mask, [0, 0, 1, 1, 0]) + # Reinitialize and redo + mbase.mask = False + mbase.fieldmask = nmask + assert_equal(mbase.a.mask, [0, 0, 1, 1, 0]) + assert_equal(mbase.b.mask, [1, 1, 0, 0, 0]) + assert_equal(mbase.c.mask, [0, 0, 1, 1, 0]) + + def test_set_elements(self): + base = self.base.copy() + # Set an element to mask ..................... + mbase = base.view(mrecarray).copy() + mbase[-2] = masked + assert_equal( + mbase._mask.tolist(), + np.array([(0, 0, 0), (1, 1, 1), (0, 0, 0), (1, 1, 1), (1, 1, 1)], + dtype=bool)) + # Used to be mask, now it's recordmask! + assert_equal(mbase.recordmask, [0, 1, 0, 1, 1]) + # Set slices ................................. + mbase = base.view(mrecarray).copy() + mbase[:2] = (5, 5, 5) + assert_equal(mbase.a._data, [5, 5, 3, 4, 5]) + assert_equal(mbase.a._mask, [0, 0, 0, 0, 1]) + assert_equal(mbase.b._data, [5., 5., 3.3, 4.4, 5.5]) + assert_equal(mbase.b._mask, [0, 0, 0, 0, 1]) + assert_equal(mbase.c._data, + [b'5', b'5', b'three', b'four', b'five']) + assert_equal(mbase.b._mask, [0, 0, 0, 0, 1]) + + mbase = base.view(mrecarray).copy() + mbase[:2] = masked + assert_equal(mbase.a._data, [1, 2, 3, 4, 5]) + assert_equal(mbase.a._mask, [1, 1, 0, 0, 1]) + assert_equal(mbase.b._data, [1.1, 2.2, 3.3, 4.4, 5.5]) + assert_equal(mbase.b._mask, [1, 1, 0, 0, 1]) + assert_equal(mbase.c._data, + [b'one', b'two', b'three', b'four', b'five']) + assert_equal(mbase.b._mask, [1, 1, 0, 0, 1]) + + def test_setslices_hardmask(self): + # Tests setting slices w/ hardmask. + base = self.base.copy() + mbase = base.view(mrecarray) + mbase.harden_mask() + try: + mbase[-2:] = (5, 5, 5) + assert_equal(mbase.a._data, [1, 2, 3, 5, 5]) + assert_equal(mbase.b._data, [1.1, 2.2, 3.3, 5, 5.5]) + assert_equal(mbase.c._data, + [b'one', b'two', b'three', b'5', b'five']) + assert_equal(mbase.a._mask, [0, 1, 0, 0, 1]) + assert_equal(mbase.b._mask, mbase.a._mask) + assert_equal(mbase.b._mask, mbase.c._mask) + except NotImplementedError: + # OK, not implemented yet... + pass + except AssertionError: + raise + else: + raise Exception("Flexible hard masks should be supported !") + # Not using a tuple should crash + try: + mbase[-2:] = 3 + except (NotImplementedError, TypeError): + pass + else: + raise TypeError("Should have expected a readable buffer object!") + + def test_hardmask(self): + # Test hardmask + base = self.base.copy() + mbase = base.view(mrecarray) + mbase.harden_mask() + assert_(mbase._hardmask) + mbase.mask = nomask + assert_equal_records(mbase._mask, base._mask) + mbase.soften_mask() + assert_(not mbase._hardmask) + mbase.mask = nomask + # So, the mask of a field is no longer set to nomask... + assert_equal_records(mbase._mask, + ma.make_mask_none(base.shape, base.dtype)) + assert_(ma.make_mask(mbase['b']._mask) is nomask) + assert_equal(mbase['a']._mask, mbase['b']._mask) + + def test_pickling(self): + # Test pickling + base = self.base.copy() + mrec = base.view(mrecarray) + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + _ = pickle.dumps(mrec, protocol=proto) + mrec_ = pickle.loads(_) + assert_equal(mrec_.dtype, mrec.dtype) + assert_equal_records(mrec_._data, mrec._data) + assert_equal(mrec_._mask, mrec._mask) + assert_equal_records(mrec_._mask, mrec._mask) + + def test_filled(self): + # Test filling the array + _a = ma.array([1, 2, 3], mask=[0, 0, 1], dtype=int) + _b = ma.array([1.1, 2.2, 3.3], mask=[0, 0, 1], dtype=float) + _c = ma.array(['one', 'two', 'three'], mask=[0, 0, 1], dtype='|S8') + ddtype = [('a', int), ('b', float), ('c', '|S8')] + mrec = fromarrays([_a, _b, _c], dtype=ddtype, + fill_value=(99999, 99999., 'N/A')) + mrecfilled = mrec.filled() + assert_equal(mrecfilled['a'], np.array((1, 2, 99999), dtype=int)) + assert_equal(mrecfilled['b'], np.array((1.1, 2.2, 99999.), + dtype=float)) + assert_equal(mrecfilled['c'], np.array(('one', 'two', 'N/A'), + dtype='|S8')) + + def test_tolist(self): + # Test tolist. + _a = ma.array([1, 2, 3], mask=[0, 0, 1], dtype=int) + _b = ma.array([1.1, 2.2, 3.3], mask=[0, 0, 1], dtype=float) + _c = ma.array(['one', 'two', 'three'], mask=[1, 0, 0], dtype='|S8') + ddtype = [('a', int), ('b', float), ('c', '|S8')] + mrec = fromarrays([_a, _b, _c], dtype=ddtype, + fill_value=(99999, 99999., 'N/A')) + + assert_equal(mrec.tolist(), + [(1, 1.1, None), (2, 2.2, b'two'), + (None, None, b'three')]) + + def test_withnames(self): + # Test the creation w/ format and names + x = mrecarray(1, formats=float, names='base') + x[0]['base'] = 10 + assert_equal(x['base'][0], 10) + + def test_exotic_formats(self): + # Test that 'exotic' formats are processed properly + easy = mrecarray(1, dtype=[('i', int), ('s', '|S8'), ('f', float)]) + easy[0] = masked + assert_equal(easy.filled(1).item(), (1, b'1', 1.)) + + solo = mrecarray(1, dtype=[('f0', ' 1: + assert_(eq(np.concatenate((x, y), 1), + concatenate((xm, ym), 1))) + assert_(eq(np.add.reduce(x, 1), add.reduce(x, 1))) + assert_(eq(np.sum(x, 1), sum(x, 1))) + assert_(eq(np.prod(x, 1), product(x, 1))) + + def test_testCI(self): + # Test of conversions and indexing + x1 = np.array([1, 2, 4, 3]) + x2 = array(x1, mask=[1, 0, 0, 0]) + x3 = array(x1, mask=[0, 1, 0, 1]) + x4 = array(x1) + # test conversion to strings + str(x2) # raises? + repr(x2) # raises? + assert_(eq(np.sort(x1), sort(x2, fill_value=0))) + # tests of indexing + assert_(type(x2[1]) is type(x1[1])) + assert_(x1[1] == x2[1]) + assert_(x2[0] is masked) + assert_(eq(x1[2], x2[2])) + assert_(eq(x1[2:5], x2[2:5])) + assert_(eq(x1[:], x2[:])) + assert_(eq(x1[1:], x3[1:])) + x1[2] = 9 + x2[2] = 9 + assert_(eq(x1, x2)) + x1[1:3] = 99 + x2[1:3] = 99 + assert_(eq(x1, x2)) + x2[1] = masked + assert_(eq(x1, x2)) + x2[1:3] = masked + assert_(eq(x1, x2)) + x2[:] = x1 + x2[1] = masked + assert_(allequal(getmask(x2), array([0, 1, 0, 0]))) + x3[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0]) + assert_(allequal(getmask(x3), array([0, 1, 1, 0]))) + x4[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0]) + assert_(allequal(getmask(x4), array([0, 1, 1, 0]))) + assert_(allequal(x4, array([1, 2, 3, 4]))) + x1 = np.arange(5) * 1.0 + x2 = masked_values(x1, 3.0) + assert_(eq(x1, x2)) + assert_(allequal(array([0, 0, 0, 1, 0], MaskType), x2.mask)) + assert_(eq(3.0, x2.fill_value)) + x1 = array([1, 'hello', 2, 3], object) + x2 = np.array([1, 'hello', 2, 3], object) + s1 = x1[1] + s2 = x2[1] + assert_equal(type(s2), str) + assert_equal(type(s1), str) + assert_equal(s1, s2) + assert_(x1[1:1].shape == (0,)) + + def test_testCopySize(self): + # Tests of some subtle points of copying and sizing. + n = [0, 0, 1, 0, 0] + m = make_mask(n) + m2 = make_mask(m) + assert_(m is m2) + m3 = make_mask(m, copy=True) + assert_(m is not m3) + + x1 = np.arange(5) + y1 = array(x1, mask=m) + assert_(y1._data is not x1) + assert_(allequal(x1, y1._data)) + assert_(y1._mask is m) + + y1a = array(y1, copy=0) + # For copy=False, one might expect that the array would just + # passed on, i.e., that it would be "is" instead of "==". + # See gh-4043 for discussion. + assert_(y1a._mask.__array_interface__ == + y1._mask.__array_interface__) + + y2 = array(x1, mask=m3, copy=0) + assert_(y2._mask is m3) + assert_(y2[2] is masked) + y2[2] = 9 + assert_(y2[2] is not masked) + assert_(y2._mask is m3) + assert_(allequal(y2.mask, 0)) + + y2a = array(x1, mask=m, copy=1) + assert_(y2a._mask is not m) + assert_(y2a[2] is masked) + y2a[2] = 9 + assert_(y2a[2] is not masked) + assert_(y2a._mask is not m) + assert_(allequal(y2a.mask, 0)) + + y3 = array(x1 * 1.0, mask=m) + assert_(filled(y3).dtype is (x1 * 1.0).dtype) + + x4 = arange(4) + x4[2] = masked + y4 = resize(x4, (8,)) + assert_(eq(concatenate([x4, x4]), y4)) + assert_(eq(getmask(y4), [0, 0, 1, 0, 0, 0, 1, 0])) + y5 = repeat(x4, (2, 2, 2, 2), axis=0) + assert_(eq(y5, [0, 0, 1, 1, 2, 2, 3, 3])) + y6 = repeat(x4, 2, axis=0) + assert_(eq(y5, y6)) + + def test_testPut(self): + # Test of put + d = arange(5) + n = [0, 0, 0, 1, 1] + m = make_mask(n) + m2 = m.copy() + x = array(d, mask=m) + assert_(x[3] is masked) + assert_(x[4] is masked) + x[[1, 4]] = [10, 40] + assert_(x._mask is m) + assert_(x[3] is masked) + assert_(x[4] is not masked) + assert_(eq(x, [0, 10, 2, -1, 40])) + + x = array(d, mask=m2, copy=True) + x.put([0, 1, 2], [-1, 100, 200]) + assert_(x._mask is not m2) + assert_(x[3] is masked) + assert_(x[4] is masked) + assert_(eq(x, [-1, 100, 200, 0, 0])) + + def test_testPut2(self): + # Test of put + d = arange(5) + x = array(d, mask=[0, 0, 0, 0, 0]) + z = array([10, 40], mask=[1, 0]) + assert_(x[2] is not masked) + assert_(x[3] is not masked) + x[2:4] = z + assert_(x[2] is masked) + assert_(x[3] is not masked) + assert_(eq(x, [0, 1, 10, 40, 4])) + + d = arange(5) + x = array(d, mask=[0, 0, 0, 0, 0]) + y = x[2:4] + z = array([10, 40], mask=[1, 0]) + assert_(x[2] is not masked) + assert_(x[3] is not masked) + y[:] = z + assert_(y[0] is masked) + assert_(y[1] is not masked) + assert_(eq(y, [10, 40])) + assert_(x[2] is masked) + assert_(x[3] is not masked) + assert_(eq(x, [0, 1, 10, 40, 4])) + + def test_testMaPut(self): + _, _, _, _, _, _, ym, _, zm, _, _ = self._create_data() + m = [1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1] + i = np.nonzero(m)[0] + put(ym, i, zm) + assert_(all(take(ym, i, axis=0) == zm)) + + def test_testOddFeatures(self): + # Test of other odd features + x = arange(20) + x = x.reshape(4, 5) + x.flat[5] = 12 + assert_(x[1, 0] == 12) + z = x + 10j * x + assert_(eq(z.real, x)) + assert_(eq(z.imag, 10 * x)) + assert_(eq((z * conjugate(z)).real, 101 * x * x)) + z.imag[...] = 0.0 + + x = arange(10) + x[3] = masked + assert_(str(x[3]) == str(masked)) + c = x >= 8 + assert_(count(where(c, masked, masked)) == 0) + assert_(shape(where(c, masked, masked)) == c.shape) + z = where(c, x, masked) + assert_(z.dtype is x.dtype) + assert_(z[3] is masked) + assert_(z[4] is masked) + assert_(z[7] is masked) + assert_(z[8] is not masked) + assert_(z[9] is not masked) + assert_(eq(x, z)) + z = where(c, masked, x) + assert_(z.dtype is x.dtype) + assert_(z[3] is masked) + assert_(z[4] is not masked) + assert_(z[7] is not masked) + assert_(z[8] is masked) + assert_(z[9] is masked) + z = masked_where(c, x) + assert_(z.dtype is x.dtype) + assert_(z[3] is masked) + assert_(z[4] is not masked) + assert_(z[7] is not masked) + assert_(z[8] is masked) + assert_(z[9] is masked) + assert_(eq(x, z)) + x = array([1., 2., 3., 4., 5.]) + c = array([1, 1, 1, 0, 0]) + x[2] = masked + z = where(c, x, -x) + assert_(eq(z, [1., 2., 0., -4., -5])) + c[0] = masked + z = where(c, x, -x) + assert_(eq(z, [1., 2., 0., -4., -5])) + assert_(z[0] is masked) + assert_(z[1] is not masked) + assert_(z[2] is masked) + assert_(eq(masked_where(greater(x, 2), x), masked_greater(x, 2))) + assert_(eq(masked_where(greater_equal(x, 2), x), + masked_greater_equal(x, 2))) + assert_(eq(masked_where(less(x, 2), x), masked_less(x, 2))) + assert_(eq(masked_where(less_equal(x, 2), x), masked_less_equal(x, 2))) + assert_(eq(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2))) + assert_(eq(masked_where(equal(x, 2), x), masked_equal(x, 2))) + assert_(eq(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2))) + assert_(eq(masked_inside(list(range(5)), 1, 3), [0, 199, 199, 199, 4])) + assert_(eq(masked_outside(list(range(5)), 1, 3), [199, 1, 2, 3, 199])) + assert_(eq(masked_inside(array(list(range(5)), + mask=[1, 0, 0, 0, 0]), 1, 3).mask, + [1, 1, 1, 1, 0])) + assert_(eq(masked_outside(array(list(range(5)), + mask=[0, 1, 0, 0, 0]), 1, 3).mask, + [1, 1, 0, 0, 1])) + assert_(eq(masked_equal(array(list(range(5)), + mask=[1, 0, 0, 0, 0]), 2).mask, + [1, 0, 1, 0, 0])) + assert_(eq(masked_not_equal(array([2, 2, 1, 2, 1], + mask=[1, 0, 0, 0, 0]), 2).mask, + [1, 0, 1, 0, 1])) + assert_(eq(masked_where([1, 1, 0, 0, 0], [1, 2, 3, 4, 5]), + [99, 99, 3, 4, 5])) + atest = ones((10, 10, 10), dtype=np.float32) + btest = zeros(atest.shape, MaskType) + ctest = masked_where(btest, atest) + assert_(eq(atest, ctest)) + z = choose(c, (-x, x)) + assert_(eq(z, [1., 2., 0., -4., -5])) + assert_(z[0] is masked) + assert_(z[1] is not masked) + assert_(z[2] is masked) + x = arange(6) + x[5] = masked + y = arange(6) * 10 + y[2] = masked + c = array([1, 1, 1, 0, 0, 0], mask=[1, 0, 0, 0, 0, 0]) + cm = c.filled(1) + z = where(c, x, y) + zm = where(cm, x, y) + assert_(eq(z, zm)) + assert_(getmask(zm) is nomask) + assert_(eq(zm, [0, 1, 2, 30, 40, 50])) + z = where(c, masked, 1) + assert_(eq(z, [99, 99, 99, 1, 1, 1])) + z = where(c, 1, masked) + assert_(eq(z, [99, 1, 1, 99, 99, 99])) + + def test_testMinMax2(self): + # Test of minimum, maximum. + assert_(eq(minimum([1, 2, 3], [4, 0, 9]), [1, 0, 3])) + assert_(eq(maximum([1, 2, 3], [4, 0, 9]), [4, 2, 9])) + x = arange(5) + y = arange(5) - 2 + x[3] = masked + y[0] = masked + assert_(eq(minimum(x, y), where(less(x, y), x, y))) + assert_(eq(maximum(x, y), where(greater(x, y), x, y))) + assert_(minimum.reduce(x) == 0) + assert_(maximum.reduce(x) == 4) + + def test_testTakeTransposeInnerOuter(self): + # Test of take, transpose, inner, outer products + x = arange(24) + y = np.arange(24) + x[5:6] = masked + x = x.reshape(2, 3, 4) + y = y.reshape(2, 3, 4) + assert_(eq(np.transpose(y, (2, 0, 1)), transpose(x, (2, 0, 1)))) + assert_(eq(np.take(y, (2, 0, 1), 1), take(x, (2, 0, 1), 1))) + assert_(eq(np.inner(filled(x, 0), filled(y, 0)), + inner(x, y))) + assert_(eq(np.outer(filled(x, 0), filled(y, 0)), + outer(x, y))) + y = array(['abc', 1, 'def', 2, 3], object) + y[2] = masked + t = take(y, [0, 3, 4]) + assert_(t[0] == 'abc') + assert_(t[1] == 2) + assert_(t[2] == 3) + + def test_testInplace(self): + # Test of inplace operations and rich comparisons + y = arange(10) + + x = arange(10) + xm = arange(10) + xm[2] = masked + x += 1 + assert_(eq(x, y + 1)) + xm += 1 + assert_(eq(x, y + 1)) + + x = arange(10) + xm = arange(10) + xm[2] = masked + x -= 1 + assert_(eq(x, y - 1)) + xm -= 1 + assert_(eq(xm, y - 1)) + + x = arange(10) * 1.0 + xm = arange(10) * 1.0 + xm[2] = masked + x *= 2.0 + assert_(eq(x, y * 2)) + xm *= 2.0 + assert_(eq(xm, y * 2)) + + x = arange(10) * 2 + xm = arange(10) + xm[2] = masked + x //= 2 + assert_(eq(x, y)) + xm //= 2 + assert_(eq(x, y)) + + x = arange(10) * 1.0 + xm = arange(10) * 1.0 + xm[2] = masked + x /= 2.0 + assert_(eq(x, y / 2.0)) + xm /= arange(10) + assert_(eq(xm, ones((10,)))) + + x = arange(10).astype(np.float32) + xm = arange(10) + xm[2] = masked + x += 1. + assert_(eq(x, y + 1.)) + + def test_testPickle(self): + # Test of pickling + x = arange(12) + x[4:10:2] = masked + x = x.reshape(4, 3) + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + s = pickle.dumps(x, protocol=proto) + y = pickle.loads(s) + assert_(eq(x, y)) + + def test_testMasked(self): + # Test of masked element + xx = arange(6) + xx[1] = masked + assert_(str(masked) == '--') + assert_(xx[1] is masked) + assert_equal(filled(xx[1], 0), 0) + + def test_testAverage1(self): + # Test of average. + ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0]) + assert_(eq(2.0, average(ott, axis=0))) + assert_(eq(2.0, average(ott, weights=[1., 1., 2., 1.]))) + result, wts = average(ott, weights=[1., 1., 2., 1.], returned=True) + assert_(eq(2.0, result)) + assert_(wts == 4.0) + ott[:] = masked + assert_(average(ott, axis=0) is masked) + ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0]) + ott = ott.reshape(2, 2) + ott[:, 1] = masked + assert_(eq(average(ott, axis=0), [2.0, 0.0])) + assert_(average(ott, axis=1)[0] is masked) + assert_(eq([2., 0.], average(ott, axis=0))) + result, wts = average(ott, axis=0, returned=True) + assert_(eq(wts, [1., 0.])) + + def test_testAverage2(self): + # More tests of average. + w1 = [0, 1, 1, 1, 1, 0] + w2 = [[0, 1, 1, 1, 1, 0], [1, 0, 0, 0, 0, 1]] + x = arange(6) + assert_(allclose(average(x, axis=0), 2.5)) + assert_(allclose(average(x, axis=0, weights=w1), 2.5)) + y = array([arange(6), 2.0 * arange(6)]) + assert_(allclose(average(y, None), + np.add.reduce(np.arange(6)) * 3. / 12.)) + assert_(allclose(average(y, axis=0), np.arange(6) * 3. / 2.)) + assert_(allclose(average(y, axis=1), + [average(x, axis=0), average(x, axis=0) * 2.0])) + assert_(allclose(average(y, None, weights=w2), 20. / 6.)) + assert_(allclose(average(y, axis=0, weights=w2), + [0., 1., 2., 3., 4., 10.])) + assert_(allclose(average(y, axis=1), + [average(x, axis=0), average(x, axis=0) * 2.0])) + m1 = zeros(6) + m2 = [0, 0, 1, 1, 0, 0] + m3 = [[0, 0, 1, 1, 0, 0], [0, 1, 1, 1, 1, 0]] + m4 = ones(6) + m5 = [0, 1, 1, 1, 1, 1] + assert_(allclose(average(masked_array(x, m1), axis=0), 2.5)) + assert_(allclose(average(masked_array(x, m2), axis=0), 2.5)) + assert_(average(masked_array(x, m4), axis=0) is masked) + assert_equal(average(masked_array(x, m5), axis=0), 0.0) + assert_equal(count(average(masked_array(x, m4), axis=0)), 0) + z = masked_array(y, m3) + assert_(allclose(average(z, None), 20. / 6.)) + assert_(allclose(average(z, axis=0), + [0., 1., 99., 99., 4.0, 7.5])) + assert_(allclose(average(z, axis=1), [2.5, 5.0])) + assert_(allclose(average(z, axis=0, weights=w2), + [0., 1., 99., 99., 4.0, 10.0])) + + a = arange(6) + b = arange(6) * 3 + r1, w1 = average([[a, b], [b, a]], axis=1, returned=True) + assert_equal(shape(r1), shape(w1)) + assert_equal(r1.shape, w1.shape) + r2, w2 = average(ones((2, 2, 3)), axis=0, weights=[3, 1], returned=True) + assert_equal(shape(w2), shape(r2)) + r2, w2 = average(ones((2, 2, 3)), returned=True) + assert_equal(shape(w2), shape(r2)) + r2, w2 = average(ones((2, 2, 3)), weights=ones((2, 2, 3)), returned=True) + assert_(shape(w2) == shape(r2)) + a2d = array([[1, 2], [0, 4]], float) + a2dm = masked_array(a2d, [[0, 0], [1, 0]]) + a2da = average(a2d, axis=0) + assert_(eq(a2da, [0.5, 3.0])) + a2dma = average(a2dm, axis=0) + assert_(eq(a2dma, [1.0, 3.0])) + a2dma = average(a2dm, axis=None) + assert_(eq(a2dma, 7. / 3.)) + a2dma = average(a2dm, axis=1) + assert_(eq(a2dma, [1.5, 4.0])) + + def test_testToPython(self): + assert_equal(1, int(array(1))) + assert_equal(1.0, float(array(1))) + assert_equal(1, int(array([[[1]]]))) + assert_equal(1.0, float(array([[1]]))) + assert_raises(TypeError, float, array([1, 1])) + assert_raises(ValueError, bool, array([0, 1])) + assert_raises(ValueError, bool, array([0, 0], mask=[0, 1])) + + def test_testScalarArithmetic(self): + xm = array(0, mask=1) + # TODO FIXME: Find out what the following raises a warning in r8247 + with np.errstate(divide='ignore'): + assert_((1 / array(0)).mask) + assert_((1 + xm).mask) + assert_((-xm).mask) + assert_((-xm).mask) + assert_(maximum(xm, xm).mask) + assert_(minimum(xm, xm).mask) + assert_(xm.filled().dtype is xm._data.dtype) + x = array(0, mask=0) + assert_(x.filled() == x._data) + assert_equal(str(xm), str(masked_print_option)) + + def test_testArrayMethods(self): + a = array([1, 3, 2]) + assert_(eq(a.any(), a._data.any())) + assert_(eq(a.all(), a._data.all())) + assert_(eq(a.argmax(), a._data.argmax())) + assert_(eq(a.argmin(), a._data.argmin())) + assert_(eq(a.choose(0, 1, 2, 3, 4), + a._data.choose(0, 1, 2, 3, 4))) + assert_(eq(a.compress([1, 0, 1]), a._data.compress([1, 0, 1]))) + assert_(eq(a.conj(), a._data.conj())) + assert_(eq(a.conjugate(), a._data.conjugate())) + m = array([[1, 2], [3, 4]]) + assert_(eq(m.diagonal(), m._data.diagonal())) + assert_(eq(a.sum(), a._data.sum())) + assert_(eq(a.take([1, 2]), a._data.take([1, 2]))) + assert_(eq(m.transpose(), m._data.transpose())) + + def test_testArrayAttributes(self): + a = array([1, 3, 2]) + assert_equal(a.ndim, 1) + + def test_testAPI(self): + assert_(not [m for m in dir(np.ndarray) + if m not in dir(MaskedArray) and + not m.startswith('_')]) + + def test_testSingleElementSubscript(self): + a = array([1, 3, 2]) + b = array([1, 3, 2], mask=[1, 0, 1]) + assert_equal(a[0].shape, ()) + assert_equal(b[0].shape, ()) + assert_equal(b[1].shape, ()) + + def test_assignment_by_condition(self): + # Test for gh-18951 + a = array([1, 2, 3, 4], mask=[1, 0, 1, 0]) + c = a >= 3 + a[c] = 5 + assert_(a[2] is masked) + + def test_assignment_by_condition_2(self): + # gh-19721 + a = masked_array([0, 1], mask=[False, False]) + b = masked_array([0, 1], mask=[True, True]) + mask = a < 1 + b[mask] = a[mask] + expected_mask = [False, True] + assert_equal(b.mask, expected_mask) + + +class TestUfuncs: + + def _create_data(self): + return (array([1.0, 0, -1, pi / 2] * 2, mask=[0, 1] + [0] * 6), + array([1.0, 0, -1, pi / 2] * 2, mask=[1, 0] + [0] * 6),) + + def test_testUfuncRegression(self): + f_invalid_ignore = [ + 'sqrt', 'arctanh', 'arcsin', 'arccos', + 'arccosh', 'arctanh', 'log', 'log10', 'divide', + 'true_divide', 'floor_divide', 'remainder', 'fmod'] + for f in ['sqrt', 'log', 'log10', 'exp', 'conjugate', + 'sin', 'cos', 'tan', + 'arcsin', 'arccos', 'arctan', + 'sinh', 'cosh', 'tanh', + 'arcsinh', + 'arccosh', + 'arctanh', + 'absolute', 'fabs', 'negative', + 'floor', 'ceil', + 'logical_not', + 'add', 'subtract', 'multiply', + 'divide', 'true_divide', 'floor_divide', + 'remainder', 'fmod', 'hypot', 'arctan2', + 'equal', 'not_equal', 'less_equal', 'greater_equal', + 'less', 'greater', + 'logical_and', 'logical_or', 'logical_xor']: + try: + uf = getattr(umath, f) + except AttributeError: + uf = getattr(fromnumeric, f) + mf = getattr(np.ma, f) + args = self._create_data()[:uf.nin] + with np.errstate(): + if f in f_invalid_ignore: + np.seterr(invalid='ignore') + if f in ['arctanh', 'log', 'log10']: + np.seterr(divide='ignore') + ur = uf(*args) + mr = mf(*args) + assert_(eq(ur.filled(0), mr.filled(0), f)) + assert_(eqmask(ur.mask, mr.mask)) + + def test_reduce(self): + a = self._create_data()[0] + assert_(not alltrue(a, axis=0)) + assert_(sometrue(a, axis=0)) + assert_equal(sum(a[:3], axis=0), 0) + assert_equal(product(a, axis=0), 0) + + def test_minmax(self): + a = arange(1, 13).reshape(3, 4) + amask = masked_where(a < 5, a) + assert_equal(amask.max(), a.max()) + assert_equal(amask.min(), 5) + assert_((amask.max(0) == a.max(0)).all()) + assert_((amask.min(0) == [5, 6, 7, 8]).all()) + assert_(amask.max(1)[0].mask) + assert_(amask.min(1)[0].mask) + + def test_nonzero(self): + for t in "?bhilqpBHILQPfdgFDGO": + x = array([1, 0, 2, 0], mask=[0, 0, 1, 1]) + assert_(eq(nonzero(x), [0])) + + +class TestArrayMethods: + + def _create_data(self): + x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928, + 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, + 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, + 6.04, 9.63, 7.712, 3.382, 4.489, 6.479, + 7.189, 9.645, 5.395, 4.961, 9.894, 2.893, + 7.357, 9.828, 6.272, 3.758, 6.693, 0.993]) + X = x.reshape(6, 6) + XX = x.reshape(3, 2, 2, 3) + + m = np.array([0, 1, 0, 1, 0, 0, + 1, 0, 1, 1, 0, 1, + 0, 0, 0, 1, 0, 1, + 0, 0, 0, 1, 1, 1, + 1, 0, 0, 1, 0, 0, + 0, 0, 1, 0, 1, 0]) + mx = array(data=x, mask=m) + mX = array(data=X, mask=m.reshape(X.shape)) + mXX = array(data=XX, mask=m.reshape(XX.shape)) + + return x, X, XX, m, mx, mX, mXX + + def test_trace(self): + _, X, _, _, _, mX, _ = self._create_data() + mXdiag = mX.diagonal() + assert_equal(mX.trace(), mX.diagonal().compressed().sum()) + assert_(eq(mX.trace(), + X.trace() - sum(mXdiag.mask * X.diagonal(), + axis=0))) + + def test_clip(self): + x, _, _, _, mx, _, _ = self._create_data() + clipped = mx.clip(2, 8) + assert_(eq(clipped.mask, mx.mask)) + assert_(eq(clipped._data, x.clip(2, 8))) + assert_(eq(clipped._data, mx._data.clip(2, 8))) + + def test_ptp(self): + _, X, _, m, mx, mX, _ = self._create_data() + n, m = X.shape + # print(type(mx), mx.compressed()) + # raise Exception() + assert_equal(mx.ptp(), np.ptp(mx.compressed())) + rows = np.zeros(n, np.float64) + cols = np.zeros(m, np.float64) + for k in range(m): + cols[k] = np.ptp(mX[:, k].compressed()) + for k in range(n): + rows[k] = np.ptp(mX[k].compressed()) + assert_(eq(mX.ptp(0), cols)) + assert_(eq(mX.ptp(1), rows)) + + def test_swapaxes(self): + _, _, _, _, _, mX, mXX = self._create_data() + mXswapped = mX.swapaxes(0, 1) + assert_(eq(mXswapped[-1], mX[:, -1])) + mXXswapped = mXX.swapaxes(0, 2) + assert_equal(mXXswapped.shape, (2, 2, 3, 3)) + + def test_cumprod(self): + mX = self._create_data()[5] + mXcp = mX.cumprod(0) + assert_(eq(mXcp._data, mX.filled(1).cumprod(0))) + mXcp = mX.cumprod(1) + assert_(eq(mXcp._data, mX.filled(1).cumprod(1))) + + def test_cumsum(self): + mX = self._create_data()[5] + mXcp = mX.cumsum(0) + assert_(eq(mXcp._data, mX.filled(0).cumsum(0))) + mXcp = mX.cumsum(1) + assert_(eq(mXcp._data, mX.filled(0).cumsum(1))) + + def test_varstd(self): + _, X, XX, _, _, mX, mXX = self._create_data() + assert_(eq(mX.var(axis=None), mX.compressed().var())) + assert_(eq(mX.std(axis=None), mX.compressed().std())) + assert_(eq(mXX.var(axis=3).shape, XX.var(axis=3).shape)) + assert_(eq(mX.var().shape, X.var().shape)) + (mXvar0, mXvar1) = (mX.var(axis=0), mX.var(axis=1)) + for k in range(6): + assert_(eq(mXvar1[k], mX[k].compressed().var())) + assert_(eq(mXvar0[k], mX[:, k].compressed().var())) + assert_(eq(np.sqrt(mXvar0[k]), + mX[:, k].compressed().std())) + + +def eqmask(m1, m2): + if m1 is nomask: + return m2 is nomask + if m2 is nomask: + return m1 is nomask + return (m1 == m2).all() diff --git a/local_packages/numpy/ma/tests/test_regression.py b/local_packages/numpy/ma/tests/test_regression.py new file mode 100644 index 0000000..4e40a3f --- /dev/null +++ b/local_packages/numpy/ma/tests/test_regression.py @@ -0,0 +1,83 @@ +import numpy as np +from numpy.testing import assert_, assert_array_equal + + +class TestRegression: + def test_masked_array_create(self): + # Ticket #17 + x = np.ma.masked_array([0, 1, 2, 3, 0, 4, 5, 6], + mask=[0, 0, 0, 1, 1, 1, 0, 0]) + assert_array_equal(np.ma.nonzero(x), [[1, 2, 6, 7]]) + + def test_masked_array(self): + # Ticket #61 + np.ma.array(1, mask=[1]) + + def test_mem_masked_where(self): + # Ticket #62 + from numpy.ma import MaskType, masked_where + a = np.zeros((1, 1)) + b = np.zeros(a.shape, MaskType) + c = masked_where(b, a) + a - c + + def test_masked_array_multiply(self): + # Ticket #254 + a = np.ma.zeros((4, 1)) + a[2, 0] = np.ma.masked + b = np.zeros((4, 2)) + a * b + b * a + + def test_masked_array_repeat(self): + # Ticket #271 + np.ma.array([1], mask=False).repeat(10) + + def test_masked_array_repr_unicode(self): + # Ticket #1256 + repr(np.ma.array("Unicode")) + + def test_atleast_2d(self): + # Ticket #1559 + a = np.ma.masked_array([0.0, 1.2, 3.5], mask=[False, True, False]) + b = np.atleast_2d(a) + assert_(a.mask.ndim == 1) + assert_(b.mask.ndim == 2) + + def test_set_fill_value_unicode_py3(self): + # Ticket #2733 + a = np.ma.masked_array(['a', 'b', 'c'], mask=[1, 0, 0]) + a.fill_value = 'X' + assert_(a.fill_value == 'X') + + def test_var_sets_maskedarray_scalar(self): + # Issue gh-2757 + a = np.ma.array(np.arange(5), mask=True) + mout = np.ma.array(-1, dtype=float) + a.var(out=mout) + assert_(mout._data == 0) + + def test_mask_not_backmangled(self): + # See gh-10314. Test case taken from gh-3140. + a = np.ma.MaskedArray([1., 2.], mask=[False, False]) + assert_(a.mask.shape == (2,)) + b = np.tile(a, (2, 1)) + # Check that the above no longer changes a.shape to (1, 2) + assert_(a.mask.shape == (2,)) + assert_(b.shape == (2, 2)) + assert_(b.mask.shape == (2, 2)) + + def test_empty_list_on_structured(self): + # See gh-12464. Indexing with empty list should give empty result. + ma = np.ma.MaskedArray([(1, 1.), (2, 2.), (3, 3.)], dtype='i4,f4') + assert_array_equal(ma[[]], ma[:0]) + + def test_masked_array_tobytes_fortran(self): + ma = np.ma.arange(4).reshape((2, 2)) + assert_array_equal(ma.tobytes(order='F'), ma.T.tobytes()) + + def test_structured_array(self): + # see gh-22041 + np.ma.array((1, (b"", b"")), + dtype=[("x", np.int_), + ("y", [("i", np.void), ("j", np.void)])]) diff --git a/local_packages/numpy/ma/tests/test_subclassing.py b/local_packages/numpy/ma/tests/test_subclassing.py new file mode 100644 index 0000000..22bece9 --- /dev/null +++ b/local_packages/numpy/ma/tests/test_subclassing.py @@ -0,0 +1,469 @@ +"""Tests suite for MaskedArray & subclassing. + +:author: Pierre Gerard-Marchant +:contact: pierregm_at_uga_dot_edu + +""" +import numpy as np +from numpy.lib.mixins import NDArrayOperatorsMixin +from numpy.ma.core import ( + MaskedArray, + add, + arange, + array, + asanyarray, + asarray, + divide, + hypot, + log, + masked, + masked_array, + nomask, +) +from numpy.ma.testutils import assert_equal +from numpy.testing import assert_, assert_raises + +# from numpy.ma.core import ( + +def assert_startswith(a, b): + # produces a better error message than assert_(a.startswith(b)) + assert_equal(a[:len(b)], b) + +class SubArray(np.ndarray): + # Defines a generic np.ndarray subclass, that stores some metadata + # in the dictionary `info`. + def __new__(cls, arr, info={}): + x = np.asanyarray(arr).view(cls) + x.info = info.copy() + return x + + def __array_finalize__(self, obj): + super().__array_finalize__(obj) + self.info = getattr(obj, 'info', {}).copy() + + def __add__(self, other): + result = super().__add__(other) + result.info['added'] = result.info.get('added', 0) + 1 + return result + + def __iadd__(self, other): + result = super().__iadd__(other) + result.info['iadded'] = result.info.get('iadded', 0) + 1 + return result + + +subarray = SubArray + + +class SubMaskedArray(MaskedArray): + """Pure subclass of MaskedArray, keeping some info on subclass.""" + def __new__(cls, info=None, **kwargs): + obj = super().__new__(cls, **kwargs) + obj._optinfo['info'] = info + return obj + + +class MSubArray(SubArray, MaskedArray): + + def __new__(cls, data, info={}, mask=nomask): + subarr = SubArray(data, info) + _data = MaskedArray.__new__(cls, data=subarr, mask=mask) + _data.info = subarr.info + return _data + + @property + def _series(self): + _view = self.view(MaskedArray) + _view._sharedmask = False + return _view + + +msubarray = MSubArray + + +# Also a subclass that overrides __str__, __repr__ and __setitem__, disallowing +# setting to non-class values (and thus np.ma.core.masked_print_option) +# and overrides __array_wrap__, updating the info dict, to check that this +# doesn't get destroyed by MaskedArray._update_from. But this one also needs +# its own iterator... +class CSAIterator: + """ + Flat iterator object that uses its own setter/getter + (works around ndarray.flat not propagating subclass setters/getters + see https://github.com/numpy/numpy/issues/4564) + roughly following MaskedIterator + """ + def __init__(self, a): + self._original = a + self._dataiter = a.view(np.ndarray).flat + + def __iter__(self): + return self + + def __getitem__(self, indx): + out = self._dataiter.__getitem__(indx) + if not isinstance(out, np.ndarray): + out = out.__array__() + out = out.view(type(self._original)) + return out + + def __setitem__(self, index, value): + self._dataiter[index] = self._original._validate_input(value) + + def __next__(self): + return next(self._dataiter).__array__().view(type(self._original)) + + +class ComplicatedSubArray(SubArray): + + def __str__(self): + return f'myprefix {self.view(SubArray)} mypostfix' + + def __repr__(self): + # Return a repr that does not start with 'name(' + return f'<{self.__class__.__name__} {self}>' + + def _validate_input(self, value): + if not isinstance(value, ComplicatedSubArray): + raise ValueError("Can only set to MySubArray values") + return value + + def __setitem__(self, item, value): + # validation ensures direct assignment with ndarray or + # masked_print_option will fail + super().__setitem__(item, self._validate_input(value)) + + def __getitem__(self, item): + # ensure getter returns our own class also for scalars + value = super().__getitem__(item) + if not isinstance(value, np.ndarray): # scalar + value = value.__array__().view(ComplicatedSubArray) + return value + + @property + def flat(self): + return CSAIterator(self) + + @flat.setter + def flat(self, value): + y = self.ravel() + y[:] = value + + def __array_wrap__(self, obj, context=None, return_scalar=False): + obj = super().__array_wrap__(obj, context, return_scalar) + if context is not None and context[0] is np.multiply: + obj.info['multiplied'] = obj.info.get('multiplied', 0) + 1 + + return obj + + +class WrappedArray(NDArrayOperatorsMixin): + """ + Wrapping a MaskedArray rather than subclassing to test that + ufunc deferrals are commutative. + See: https://github.com/numpy/numpy/issues/15200) + """ + __slots__ = ('_array', 'attrs') + __array_priority__ = 20 + + def __init__(self, array, **attrs): + self._array = array + self.attrs = attrs + + def __repr__(self): + return f"{self.__class__.__name__}(\n{self._array}\n{self.attrs}\n)" + + def __array__(self, dtype=None, copy=None): + return np.asarray(self._array) + + def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): + if method == '__call__': + inputs = [arg._array if isinstance(arg, self.__class__) else arg + for arg in inputs] + return self.__class__(ufunc(*inputs, **kwargs), **self.attrs) + else: + return NotImplemented + + +class TestSubclassing: + # Test suite for masked subclasses of ndarray. + + def _create_data(self): + x = np.arange(5, dtype='float') + mx = msubarray(x, mask=[0, 1, 0, 0, 0]) + return x, mx + + def test_data_subclassing(self): + # Tests whether the subclass is kept. + x = np.arange(5) + m = [0, 0, 1, 0, 0] + xsub = SubArray(x) + xmsub = masked_array(xsub, mask=m) + assert_(isinstance(xmsub, MaskedArray)) + assert_equal(xmsub._data, xsub) + assert_(isinstance(xmsub._data, SubArray)) + + def test_maskedarray_subclassing(self): + # Tests subclassing MaskedArray + mx = self._create_data()[1] + assert_(isinstance(mx._data, subarray)) + + def test_masked_unary_operations(self): + # Tests masked_unary_operation + x, mx = self._create_data() + with np.errstate(divide='ignore'): + assert_(isinstance(log(mx), msubarray)) + assert_equal(log(x), np.log(x)) + + def test_masked_binary_operations(self): + # Tests masked_binary_operation + x, mx = self._create_data() + # Result should be a msubarray + assert_(isinstance(add(mx, mx), msubarray)) + assert_(isinstance(add(mx, x), msubarray)) + # Result should work + assert_equal(add(mx, x), mx + x) + assert_(isinstance(add(mx, mx)._data, subarray)) + assert_(isinstance(add.outer(mx, mx), msubarray)) + assert_(isinstance(hypot(mx, mx), msubarray)) + assert_(isinstance(hypot(mx, x), msubarray)) + + def test_masked_binary_operations2(self): + # Tests domained_masked_binary_operation + x, mx = self._create_data() + xmx = masked_array(mx.data.__array__(), mask=mx.mask) + assert_(isinstance(divide(mx, mx), msubarray)) + assert_(isinstance(divide(mx, x), msubarray)) + assert_equal(divide(mx, mx), divide(xmx, xmx)) + + def test_attributepropagation(self): + x = array(arange(5), mask=[0] + [1] * 4) + my = masked_array(subarray(x)) + ym = msubarray(x) + # + z = (my + 1) + assert_(isinstance(z, MaskedArray)) + assert_(not isinstance(z, MSubArray)) + assert_(isinstance(z._data, SubArray)) + assert_equal(z._data.info, {}) + # + z = (ym + 1) + assert_(isinstance(z, MaskedArray)) + assert_(isinstance(z, MSubArray)) + assert_(isinstance(z._data, SubArray)) + assert_(z._data.info['added'] > 0) + # Test that inplace methods from data get used (gh-4617) + ym += 1 + assert_(isinstance(ym, MaskedArray)) + assert_(isinstance(ym, MSubArray)) + assert_(isinstance(ym._data, SubArray)) + assert_(ym._data.info['iadded'] > 0) + # + ym._set_mask([1, 0, 0, 0, 1]) + assert_equal(ym._mask, [1, 0, 0, 0, 1]) + ym._series._set_mask([0, 0, 0, 0, 1]) + assert_equal(ym._mask, [0, 0, 0, 0, 1]) + # + xsub = subarray(x, info={'name': 'x'}) + mxsub = masked_array(xsub) + assert_(hasattr(mxsub, 'info')) + assert_equal(mxsub.info, xsub.info) + + def test_subclasspreservation(self): + # Checks that masked_array(...,subok=True) preserves the class. + x = np.arange(5) + m = [0, 0, 1, 0, 0] + xinfo = list(zip(x, m)) + xsub = MSubArray(x, mask=m, info={'xsub': xinfo}) + # + mxsub = masked_array(xsub, subok=False) + assert_(not isinstance(mxsub, MSubArray)) + assert_(isinstance(mxsub, MaskedArray)) + assert_equal(mxsub._mask, m) + # + mxsub = asarray(xsub) + assert_(not isinstance(mxsub, MSubArray)) + assert_(isinstance(mxsub, MaskedArray)) + assert_equal(mxsub._mask, m) + # + mxsub = masked_array(xsub, subok=True) + assert_(isinstance(mxsub, MSubArray)) + assert_equal(mxsub.info, xsub.info) + assert_equal(mxsub._mask, xsub._mask) + # + mxsub = asanyarray(xsub) + assert_(isinstance(mxsub, MSubArray)) + assert_equal(mxsub.info, xsub.info) + assert_equal(mxsub._mask, m) + + def test_subclass_items(self): + """test that getter and setter go via baseclass""" + x = np.arange(5) + xcsub = ComplicatedSubArray(x) + mxcsub = masked_array(xcsub, mask=[True, False, True, False, False]) + # getter should return a ComplicatedSubArray, even for single item + # first check we wrote ComplicatedSubArray correctly + assert_(isinstance(xcsub[1], ComplicatedSubArray)) + assert_(isinstance(xcsub[1, ...], ComplicatedSubArray)) + assert_(isinstance(xcsub[1:4], ComplicatedSubArray)) + + # now that it propagates inside the MaskedArray + assert_(isinstance(mxcsub[1], ComplicatedSubArray)) + assert_(isinstance(mxcsub[1, ...].data, ComplicatedSubArray)) + assert_(mxcsub[0] is masked) + assert_(isinstance(mxcsub[0, ...].data, ComplicatedSubArray)) + assert_(isinstance(mxcsub[1:4].data, ComplicatedSubArray)) + + # also for flattened version (which goes via MaskedIterator) + assert_(isinstance(mxcsub.flat[1].data, ComplicatedSubArray)) + assert_(mxcsub.flat[0] is masked) + assert_(isinstance(mxcsub.flat[1:4].base, ComplicatedSubArray)) + + # setter should only work with ComplicatedSubArray input + # first check we wrote ComplicatedSubArray correctly + assert_raises(ValueError, xcsub.__setitem__, 1, x[4]) + # now that it propagates inside the MaskedArray + assert_raises(ValueError, mxcsub.__setitem__, 1, x[4]) + assert_raises(ValueError, mxcsub.__setitem__, slice(1, 4), x[1:4]) + mxcsub[1] = xcsub[4] + mxcsub[1:4] = xcsub[1:4] + # also for flattened version (which goes via MaskedIterator) + assert_raises(ValueError, mxcsub.flat.__setitem__, 1, x[4]) + assert_raises(ValueError, mxcsub.flat.__setitem__, slice(1, 4), x[1:4]) + mxcsub.flat[1] = xcsub[4] + mxcsub.flat[1:4] = xcsub[1:4] + + def test_subclass_nomask_items(self): + x = np.arange(5) + xcsub = ComplicatedSubArray(x) + mxcsub_nomask = masked_array(xcsub) + + assert_(isinstance(mxcsub_nomask[1, ...].data, ComplicatedSubArray)) + assert_(isinstance(mxcsub_nomask[0, ...].data, ComplicatedSubArray)) + + assert_(isinstance(mxcsub_nomask[1], ComplicatedSubArray)) + assert_(isinstance(mxcsub_nomask[0], ComplicatedSubArray)) + + def test_subclass_repr(self): + """test that repr uses the name of the subclass + and 'array' for np.ndarray""" + x = np.arange(5) + mx = masked_array(x, mask=[True, False, True, False, False]) + assert_startswith(repr(mx), 'masked_array') + xsub = SubArray(x) + mxsub = masked_array(xsub, mask=[True, False, True, False, False]) + assert_startswith(repr(mxsub), + f'masked_{SubArray.__name__}(data=[--, 1, --, 3, 4]') + + def test_subclass_str(self): + """test str with subclass that has overridden str, setitem""" + # first without override + x = np.arange(5) + xsub = SubArray(x) + mxsub = masked_array(xsub, mask=[True, False, True, False, False]) + assert_equal(str(mxsub), '[-- 1 -- 3 4]') + + xcsub = ComplicatedSubArray(x) + assert_raises(ValueError, xcsub.__setitem__, 0, + np.ma.core.masked_print_option) + mxcsub = masked_array(xcsub, mask=[True, False, True, False, False]) + assert_equal(str(mxcsub), 'myprefix [-- 1 -- 3 4] mypostfix') + + def test_pure_subclass_info_preservation(self): + # Test that ufuncs and methods conserve extra information consistently; + # see gh-7122. + arr1 = SubMaskedArray('test', data=[1, 2, 3, 4, 5, 6]) + arr2 = SubMaskedArray(data=[0, 1, 2, 3, 4, 5]) + diff1 = np.subtract(arr1, arr2) + assert_('info' in diff1._optinfo) + assert_(diff1._optinfo['info'] == 'test') + diff2 = arr1 - arr2 + assert_('info' in diff2._optinfo) + assert_(diff2._optinfo['info'] == 'test') + + +class ArrayNoInheritance: + """Quantity-like class that does not inherit from ndarray""" + def __init__(self, data, units): + self.magnitude = data + self.units = units + + def __getattr__(self, attr): + return getattr(self.magnitude, attr) + + +def test_array_no_inheritance(): + data_masked = np.ma.array([1, 2, 3], mask=[True, False, True]) + data_masked_units = ArrayNoInheritance(data_masked, 'meters') + + # Get the masked representation of the Quantity-like class + new_array = np.ma.array(data_masked_units) + assert_equal(data_masked.data, new_array.data) + assert_equal(data_masked.mask, new_array.mask) + # Test sharing the mask + data_masked.mask = [True, False, False] + assert_equal(data_masked.mask, new_array.mask) + assert_(new_array.sharedmask) + + # Get the masked representation of the Quantity-like class + new_array = np.ma.array(data_masked_units, copy=True) + assert_equal(data_masked.data, new_array.data) + assert_equal(data_masked.mask, new_array.mask) + # Test that the mask is not shared when copy=True + data_masked.mask = [True, False, True] + assert_equal([True, False, False], new_array.mask) + assert_(not new_array.sharedmask) + + # Get the masked representation of the Quantity-like class + new_array = np.ma.array(data_masked_units, keep_mask=False) + assert_equal(data_masked.data, new_array.data) + # The change did not affect the original mask + assert_equal(data_masked.mask, [True, False, True]) + # Test that the mask is False and not shared when keep_mask=False + assert_(not new_array.mask) + assert_(not new_array.sharedmask) + + +class TestClassWrapping: + # Test suite for classes that wrap MaskedArrays + + def _create_data(self): + m = np.ma.masked_array([1, 3, 5], mask=[False, True, False]) + wm = WrappedArray(m) + return m, wm + + def test_masked_unary_operations(self): + # Tests masked_unary_operation + wm = self._create_data()[1] + with np.errstate(divide='ignore'): + assert_(isinstance(np.log(wm), WrappedArray)) + + def test_masked_binary_operations(self): + # Tests masked_binary_operation + m, wm = self._create_data() + # Result should be a WrappedArray + assert_(isinstance(np.add(wm, wm), WrappedArray)) + assert_(isinstance(np.add(m, wm), WrappedArray)) + assert_(isinstance(np.add(wm, m), WrappedArray)) + # add and '+' should call the same ufunc + assert_equal(np.add(m, wm), m + wm) + assert_(isinstance(np.hypot(m, wm), WrappedArray)) + assert_(isinstance(np.hypot(wm, m), WrappedArray)) + # Test domained binary operations + assert_(isinstance(np.divide(wm, m), WrappedArray)) + assert_(isinstance(np.divide(m, wm), WrappedArray)) + assert_equal(np.divide(wm, m) * m, np.divide(m, m) * wm) + # Test broadcasting + m2 = np.stack([m, m]) + assert_(isinstance(np.divide(wm, m2), WrappedArray)) + assert_(isinstance(np.divide(m2, wm), WrappedArray)) + assert_equal(np.divide(m2, wm), np.divide(wm, m2)) + + def test_mixins_have_slots(self): + mixin = NDArrayOperatorsMixin() + # Should raise an error + assert_raises(AttributeError, mixin.__setattr__, "not_a_real_attr", 1) + + m = np.ma.masked_array([1, 3, 5], mask=[False, True, False]) + wm = WrappedArray(m) + assert_raises(AttributeError, wm.__setattr__, "not_an_attr", 2) diff --git a/local_packages/numpy/ma/testutils.py b/local_packages/numpy/ma/testutils.py new file mode 100644 index 0000000..0df3b17 --- /dev/null +++ b/local_packages/numpy/ma/testutils.py @@ -0,0 +1,294 @@ +"""Miscellaneous functions for testing masked arrays and subclasses + +:author: Pierre Gerard-Marchant +:contact: pierregm_at_uga_dot_edu + +""" +import operator + +import numpy as np +import numpy._core.umath as umath +import numpy.testing +from numpy import ndarray +from numpy.testing import ( # noqa: F401 + assert_, + assert_allclose, + assert_array_almost_equal_nulp, + assert_raises, + build_err_msg, +) + +from .core import filled, getmask, mask_or, masked, masked_array, nomask + +__all__masked = [ + 'almost', 'approx', 'assert_almost_equal', 'assert_array_almost_equal', + 'assert_array_approx_equal', 'assert_array_compare', + 'assert_array_equal', 'assert_array_less', 'assert_close', + 'assert_equal', 'assert_equal_records', 'assert_mask_equal', + 'assert_not_equal', 'fail_if_array_equal', + ] + +# Include some normal test functions to avoid breaking other projects who +# have mistakenly included them from this file. SciPy is one. That is +# unfortunate, as some of these functions are not intended to work with +# masked arrays. But there was no way to tell before. +from unittest import TestCase # noqa: F401 + +__some__from_testing = [ + 'TestCase', 'assert_', 'assert_allclose', 'assert_array_almost_equal_nulp', + 'assert_raises' + ] + +__all__ = __all__masked + __some__from_testing # noqa: PLE0605 + + +def approx(a, b, fill_value=True, rtol=1e-5, atol=1e-8): + """ + Returns true if all components of a and b are equal to given tolerances. + + If fill_value is True, masked values considered equal. Otherwise, + masked values are considered unequal. The relative error rtol should + be positive and << 1.0 The absolute error atol comes into play for + those elements of b that are very small or zero; it says how small a + must be also. + + """ + m = mask_or(getmask(a), getmask(b)) + d1 = filled(a) + d2 = filled(b) + if d1.dtype.char == "O" or d2.dtype.char == "O": + return np.equal(d1, d2).ravel() + x = filled( + masked_array(d1, copy=False, mask=m), fill_value + ).astype(np.float64) + y = filled(masked_array(d2, copy=False, mask=m), 1).astype(np.float64) + d = np.less_equal(umath.absolute(x - y), atol + rtol * umath.absolute(y)) + return d.ravel() + + +def almost(a, b, decimal=6, fill_value=True): + """ + Returns True if a and b are equal up to decimal places. + + If fill_value is True, masked values considered equal. Otherwise, + masked values are considered unequal. + + """ + m = mask_or(getmask(a), getmask(b)) + d1 = filled(a) + d2 = filled(b) + if d1.dtype.char == "O" or d2.dtype.char == "O": + return np.equal(d1, d2).ravel() + x = filled( + masked_array(d1, copy=False, mask=m), fill_value + ).astype(np.float64) + y = filled(masked_array(d2, copy=False, mask=m), 1).astype(np.float64) + d = np.around(np.abs(x - y), decimal) <= 10.0 ** (-decimal) + return d.ravel() + + +def _assert_equal_on_sequences(actual, desired, err_msg=''): + """ + Asserts the equality of two non-array sequences. + + """ + assert_equal(len(actual), len(desired), err_msg) + for k in range(len(desired)): + assert_equal(actual[k], desired[k], f'item={k!r}\n{err_msg}') + + +def assert_equal_records(a, b): + """ + Asserts that two records are equal. + + Pretty crude for now. + + """ + assert_equal(a.dtype, b.dtype) + for f in a.dtype.names: + (af, bf) = (operator.getitem(a, f), operator.getitem(b, f)) + if not (af is masked) and not (bf is masked): + assert_equal(operator.getitem(a, f), operator.getitem(b, f)) + + +def assert_equal(actual, desired, err_msg=''): + """ + Asserts that two items are equal. + + """ + # Case #1: dictionary ..... + if isinstance(desired, dict): + if not isinstance(actual, dict): + raise AssertionError(repr(type(actual))) + assert_equal(len(actual), len(desired), err_msg) + for k in desired: + if k not in actual: + raise AssertionError(f"{k} not in {actual}") + assert_equal(actual[k], desired[k], f'key={k!r}\n{err_msg}') + return + # Case #2: lists ..... + if isinstance(desired, (list, tuple)) and isinstance(actual, (list, tuple)): + return _assert_equal_on_sequences(actual, desired, err_msg='') + if not (isinstance(actual, ndarray) or isinstance(desired, ndarray)): + msg = build_err_msg([actual, desired], err_msg,) + if not desired == actual: + raise AssertionError(msg) + return + # Case #4. arrays or equivalent + if ((actual is masked) and not (desired is masked)) or \ + ((desired is masked) and not (actual is masked)): + msg = build_err_msg([actual, desired], + err_msg, header='', names=('x', 'y')) + raise ValueError(msg) + actual = np.asanyarray(actual) + desired = np.asanyarray(desired) + (actual_dtype, desired_dtype) = (actual.dtype, desired.dtype) + if actual_dtype.char == "S" and desired_dtype.char == "S": + return _assert_equal_on_sequences(actual.tolist(), + desired.tolist(), + err_msg='') + return assert_array_equal(actual, desired, err_msg) + + +def fail_if_equal(actual, desired, err_msg='',): + """ + Raises an assertion error if two items are equal. + + """ + if isinstance(desired, dict): + if not isinstance(actual, dict): + raise AssertionError(repr(type(actual))) + fail_if_equal(len(actual), len(desired), err_msg) + for k in desired: + if k not in actual: + raise AssertionError(repr(k)) + fail_if_equal(actual[k], desired[k], f'key={k!r}\n{err_msg}') + return + if isinstance(desired, (list, tuple)) and isinstance(actual, (list, tuple)): + fail_if_equal(len(actual), len(desired), err_msg) + for k in range(len(desired)): + fail_if_equal(actual[k], desired[k], f'item={k!r}\n{err_msg}') + return + if isinstance(actual, np.ndarray) or isinstance(desired, np.ndarray): + return fail_if_array_equal(actual, desired, err_msg) + msg = build_err_msg([actual, desired], err_msg) + if not desired != actual: + raise AssertionError(msg) + + +assert_not_equal = fail_if_equal + + +def assert_almost_equal(actual, desired, decimal=7, err_msg='', verbose=True): + """ + Asserts that two items are almost equal. + + The test is equivalent to abs(desired-actual) < 0.5 * 10**(-decimal). + + """ + if isinstance(actual, np.ndarray) or isinstance(desired, np.ndarray): + return assert_array_almost_equal(actual, desired, decimal=decimal, + err_msg=err_msg, verbose=verbose) + msg = build_err_msg([actual, desired], + err_msg=err_msg, verbose=verbose) + if not round(abs(desired - actual), decimal) == 0: + raise AssertionError(msg) + + +assert_close = assert_almost_equal + + +def assert_array_compare(comparison, x, y, err_msg='', verbose=True, header='', + fill_value=True): + """ + Asserts that comparison between two masked arrays is satisfied. + + The comparison is elementwise. + + """ + # Allocate a common mask and refill + m = mask_or(getmask(x), getmask(y)) + x = masked_array(x, copy=False, mask=m, keep_mask=False, subok=False) + y = masked_array(y, copy=False, mask=m, keep_mask=False, subok=False) + if ((x is masked) and not (y is masked)) or \ + ((y is masked) and not (x is masked)): + msg = build_err_msg([x, y], err_msg=err_msg, verbose=verbose, + header=header, names=('x', 'y')) + raise ValueError(msg) + # OK, now run the basic tests on filled versions + return np.testing.assert_array_compare(comparison, + x.filled(fill_value), + y.filled(fill_value), + err_msg=err_msg, + verbose=verbose, header=header) + + +def assert_array_equal(x, y, err_msg='', verbose=True): + """ + Checks the elementwise equality of two masked arrays. + + """ + assert_array_compare(operator.__eq__, x, y, + err_msg=err_msg, verbose=verbose, + header='Arrays are not equal') + + +def fail_if_array_equal(x, y, err_msg='', verbose=True): + """ + Raises an assertion error if two masked arrays are not equal elementwise. + + """ + def compare(x, y): + return (not np.all(approx(x, y))) + assert_array_compare(compare, x, y, err_msg=err_msg, verbose=verbose, + header='Arrays are not equal') + + +def assert_array_approx_equal(x, y, decimal=6, err_msg='', verbose=True): + """ + Checks the equality of two masked arrays, up to given number odecimals. + + The equality is checked elementwise. + + """ + def compare(x, y): + "Returns the result of the loose comparison between x and y)." + return approx(x, y, rtol=10. ** -decimal) + assert_array_compare(compare, x, y, err_msg=err_msg, verbose=verbose, + header='Arrays are not almost equal') + + +def assert_array_almost_equal(x, y, decimal=6, err_msg='', verbose=True): + """ + Checks the equality of two masked arrays, up to given number odecimals. + + The equality is checked elementwise. + + """ + def compare(x, y): + "Returns the result of the loose comparison between x and y)." + return almost(x, y, decimal) + assert_array_compare(compare, x, y, err_msg=err_msg, verbose=verbose, + header='Arrays are not almost equal') + + +def assert_array_less(x, y, err_msg='', verbose=True): + """ + Checks that x is smaller than y elementwise. + + """ + assert_array_compare(operator.__lt__, x, y, + err_msg=err_msg, verbose=verbose, + header='Arrays are not less-ordered') + + +def assert_mask_equal(m1, m2, err_msg=''): + """ + Asserts the equality of two masks. + + """ + if m1 is nomask: + assert_(m2 is nomask) + if m2 is nomask: + assert_(m1 is nomask) + assert_array_equal(m1, m2, err_msg=err_msg) diff --git a/local_packages/numpy/ma/testutils.pyi b/local_packages/numpy/ma/testutils.pyi new file mode 100644 index 0000000..92b843b --- /dev/null +++ b/local_packages/numpy/ma/testutils.pyi @@ -0,0 +1,69 @@ +import numpy as np +from numpy._typing import NDArray +from numpy.testing import ( + TestCase, + assert_, + assert_allclose, + assert_array_almost_equal_nulp, + assert_raises, +) +from numpy.testing._private.utils import _ComparisonFunc + +__all__ = [ + "TestCase", + "almost", + "approx", + "assert_", + "assert_allclose", + "assert_almost_equal", + "assert_array_almost_equal", + "assert_array_almost_equal_nulp", + "assert_array_approx_equal", + "assert_array_compare", + "assert_array_equal", + "assert_array_less", + "assert_close", + "assert_equal", + "assert_equal_records", + "assert_mask_equal", + "assert_not_equal", + "assert_raises", + "fail_if_array_equal", +] + +def approx( + a: object, b: object, fill_value: bool = True, rtol: float = 1e-5, atol: float = 1e-8 +) -> np.ndarray[tuple[int], np.dtype[np.bool]]: ... +def almost(a: object, b: object, decimal: int = 6, fill_value: bool = True) -> np.ndarray[tuple[int], np.dtype[np.bool]]: ... + +# +def assert_equal_records(a: NDArray[np.void], b: NDArray[np.void]) -> None: ... +def assert_equal(actual: object, desired: object, err_msg: str = "") -> None: ... +def fail_if_equal(actual: object, desired: object, err_msg: str = "") -> None: ... +def assert_almost_equal( + actual: object, desired: object, decimal: int = 7, err_msg: str = "", verbose: bool = True +) -> None: ... + +# +def assert_array_compare( + comparison: _ComparisonFunc, + x: object, + y: object, + err_msg: str = "", + verbose: bool = True, + header: str = "", + fill_value: bool = True, +) -> None: ... +def assert_array_equal(x: object, y: object, err_msg: str = "", verbose: bool = True) -> None: ... +def fail_if_array_equal(x: object, y: object, err_msg: str = "", verbose: bool = True) -> None: ... +def assert_array_approx_equal( + x: object, y: object, decimal: int = 6, err_msg: str = "", verbose: bool = True +) -> None: ... +def assert_array_almost_equal( + x: object, y: object, decimal: int = 6, err_msg: str = "", verbose: bool = True +) -> None: ... +def assert_array_less(x: object, y: object, err_msg: str = "", verbose: bool = True) -> None: ... +def assert_mask_equal(m1: object, m2: object, err_msg: str = "") -> None: ... + +assert_not_equal = fail_if_equal +assert_close = assert_almost_equal diff --git a/local_packages/numpy/matlib.py b/local_packages/numpy/matlib.py new file mode 100644 index 0000000..f27d503 --- /dev/null +++ b/local_packages/numpy/matlib.py @@ -0,0 +1,380 @@ +import warnings + +# 2018-05-29, PendingDeprecationWarning added to matrix.__new__ +# 2020-01-23, numpy 1.19.0 PendingDeprecatonWarning +warnings.warn("Importing from numpy.matlib is deprecated since 1.19.0. " + "The matrix subclass is not the recommended way to represent " + "matrices or deal with linear algebra (see " + "https://docs.scipy.org/doc/numpy/user/numpy-for-matlab-users.html). " + "Please adjust your code to use regular ndarray. ", + PendingDeprecationWarning, stacklevel=2) + +import numpy as np + +# Matlib.py contains all functions in the numpy namespace with a few +# replacements. See doc/source/reference/routines.matlib.rst for details. +# Need * as we're copying the numpy namespace. +from numpy import * # noqa: F403 +from numpy.matrixlib.defmatrix import asmatrix, matrix + +__version__ = np.__version__ + +__all__ = ['rand', 'randn', 'repmat'] +__all__ += np.__all__ + +def empty(shape, dtype=None, order='C'): + """Return a new matrix of given shape and type, without initializing entries. + + Parameters + ---------- + shape : int or tuple of int + Shape of the empty matrix. + dtype : data-type, optional + Desired output data-type. + order : {'C', 'F'}, optional + Whether to store multi-dimensional data in row-major + (C-style) or column-major (Fortran-style) order in + memory. + + See Also + -------- + numpy.empty : Equivalent array function. + matlib.zeros : Return a matrix of zeros. + matlib.ones : Return a matrix of ones. + + Notes + ----- + Unlike other matrix creation functions (e.g. `matlib.zeros`, + `matlib.ones`), `matlib.empty` does not initialize the values of the + matrix, and may therefore be marginally faster. However, the values + stored in the newly allocated matrix are arbitrary. For reproducible + behavior, be sure to set each element of the matrix before reading. + + Examples + -------- + >>> import numpy.matlib + >>> np.matlib.empty((2, 2)) # filled with random data + matrix([[ 6.76425276e-320, 9.79033856e-307], # random + [ 7.39337286e-309, 3.22135945e-309]]) + >>> np.matlib.empty((2, 2), dtype=int) + matrix([[ 6600475, 0], # random + [ 6586976, 22740995]]) + + """ + return ndarray.__new__(matrix, shape, dtype, order=order) + +def ones(shape, dtype=None, order='C'): + """ + Matrix of ones. + + Return a matrix of given shape and type, filled with ones. + + Parameters + ---------- + shape : {sequence of ints, int} + Shape of the matrix + dtype : data-type, optional + The desired data-type for the matrix, default is np.float64. + order : {'C', 'F'}, optional + Whether to store matrix in C- or Fortran-contiguous order, + default is 'C'. + + Returns + ------- + out : matrix + Matrix of ones of given shape, dtype, and order. + + See Also + -------- + ones : Array of ones. + matlib.zeros : Zero matrix. + + Notes + ----- + If `shape` has length one i.e. ``(N,)``, or is a scalar ``N``, + `out` becomes a single row matrix of shape ``(1,N)``. + + Examples + -------- + >>> np.matlib.ones((2,3)) + matrix([[1., 1., 1.], + [1., 1., 1.]]) + + >>> np.matlib.ones(2) + matrix([[1., 1.]]) + + """ + a = ndarray.__new__(matrix, shape, dtype, order=order) + a.fill(1) + return a + +def zeros(shape, dtype=None, order='C'): + """ + Return a matrix of given shape and type, filled with zeros. + + Parameters + ---------- + shape : int or sequence of ints + Shape of the matrix + dtype : data-type, optional + The desired data-type for the matrix, default is float. + order : {'C', 'F'}, optional + Whether to store the result in C- or Fortran-contiguous order, + default is 'C'. + + Returns + ------- + out : matrix + Zero matrix of given shape, dtype, and order. + + See Also + -------- + numpy.zeros : Equivalent array function. + matlib.ones : Return a matrix of ones. + + Notes + ----- + If `shape` has length one i.e. ``(N,)``, or is a scalar ``N``, + `out` becomes a single row matrix of shape ``(1,N)``. + + Examples + -------- + >>> import numpy.matlib + >>> np.matlib.zeros((2, 3)) + matrix([[0., 0., 0.], + [0., 0., 0.]]) + + >>> np.matlib.zeros(2) + matrix([[0., 0.]]) + + """ + a = ndarray.__new__(matrix, shape, dtype, order=order) + a.fill(0) + return a + +def identity(n, dtype=None): + """ + Returns the square identity matrix of given size. + + Parameters + ---------- + n : int + Size of the returned identity matrix. + dtype : data-type, optional + Data-type of the output. Defaults to ``float``. + + Returns + ------- + out : matrix + `n` x `n` matrix with its main diagonal set to one, + and all other elements zero. + + See Also + -------- + numpy.identity : Equivalent array function. + matlib.eye : More general matrix identity function. + + Examples + -------- + >>> import numpy.matlib + >>> np.matlib.identity(3, dtype=int) + matrix([[1, 0, 0], + [0, 1, 0], + [0, 0, 1]]) + + """ + a = array([1] + n * [0], dtype=dtype) + b = empty((n, n), dtype=dtype) + b.flat = a + return b + +def eye(n, M=None, k=0, dtype=float, order='C'): + """ + Return a matrix with ones on the diagonal and zeros elsewhere. + + Parameters + ---------- + n : int + Number of rows in the output. + M : int, optional + Number of columns in the output, defaults to `n`. + k : int, optional + Index of the diagonal: 0 refers to the main diagonal, + a positive value refers to an upper diagonal, + and a negative value to a lower diagonal. + dtype : dtype, optional + Data-type of the returned matrix. + order : {'C', 'F'}, optional + Whether the output should be stored in row-major (C-style) or + column-major (Fortran-style) order in memory. + + Returns + ------- + I : matrix + A `n` x `M` matrix where all elements are equal to zero, + except for the `k`-th diagonal, whose values are equal to one. + + See Also + -------- + numpy.eye : Equivalent array function. + identity : Square identity matrix. + + Examples + -------- + >>> import numpy.matlib + >>> np.matlib.eye(3, k=1, dtype=float) + matrix([[0., 1., 0.], + [0., 0., 1.], + [0., 0., 0.]]) + + """ + return asmatrix(np.eye(n, M=M, k=k, dtype=dtype, order=order)) + +def rand(*args): + """ + Return a matrix of random values with given shape. + + Create a matrix of the given shape and propagate it with + random samples from a uniform distribution over ``[0, 1)``. + + Parameters + ---------- + \\*args : Arguments + Shape of the output. + If given as N integers, each integer specifies the size of one + dimension. + If given as a tuple, this tuple gives the complete shape. + + Returns + ------- + out : ndarray + The matrix of random values with shape given by `\\*args`. + + See Also + -------- + randn, numpy.random.RandomState.rand + + Examples + -------- + >>> np.random.seed(123) + >>> import numpy.matlib + >>> np.matlib.rand(2, 3) + matrix([[0.69646919, 0.28613933, 0.22685145], + [0.55131477, 0.71946897, 0.42310646]]) + >>> np.matlib.rand((2, 3)) + matrix([[0.9807642 , 0.68482974, 0.4809319 ], + [0.39211752, 0.34317802, 0.72904971]]) + + If the first argument is a tuple, other arguments are ignored: + + >>> np.matlib.rand((2, 3), 4) + matrix([[0.43857224, 0.0596779 , 0.39804426], + [0.73799541, 0.18249173, 0.17545176]]) + + """ + if isinstance(args[0], tuple): + args = args[0] + return asmatrix(np.random.rand(*args)) + +def randn(*args): + """ + Return a random matrix with data from the "standard normal" distribution. + + `randn` generates a matrix filled with random floats sampled from a + univariate "normal" (Gaussian) distribution of mean 0 and variance 1. + + Parameters + ---------- + \\*args : Arguments + Shape of the output. + If given as N integers, each integer specifies the size of one + dimension. If given as a tuple, this tuple gives the complete shape. + + Returns + ------- + Z : matrix of floats + A matrix of floating-point samples drawn from the standard normal + distribution. + + See Also + -------- + rand, numpy.random.RandomState.randn + + Notes + ----- + For random samples from the normal distribution with mean ``mu`` and + standard deviation ``sigma``, use:: + + sigma * np.matlib.randn(...) + mu + + Examples + -------- + >>> np.random.seed(123) + >>> import numpy.matlib + >>> np.matlib.randn(1) + matrix([[-1.0856306]]) + >>> np.matlib.randn(1, 2, 3) + matrix([[ 0.99734545, 0.2829785 , -1.50629471], + [-0.57860025, 1.65143654, -2.42667924]]) + + Two-by-four matrix of samples from the normal distribution with + mean 3 and standard deviation 2.5: + + >>> 2.5 * np.matlib.randn((2, 4)) + 3 + matrix([[1.92771843, 6.16484065, 0.83314899, 1.30278462], + [2.76322758, 6.72847407, 1.40274501, 1.8900451 ]]) + + """ + if isinstance(args[0], tuple): + args = args[0] + return asmatrix(np.random.randn(*args)) + +def repmat(a, m, n): + """ + Repeat a 0-D to 2-D array or matrix MxN times. + + Parameters + ---------- + a : array_like + The array or matrix to be repeated. + m, n : int + The number of times `a` is repeated along the first and second axes. + + Returns + ------- + out : ndarray + The result of repeating `a`. + + Examples + -------- + >>> import numpy.matlib + >>> a0 = np.array(1) + >>> np.matlib.repmat(a0, 2, 3) + array([[1, 1, 1], + [1, 1, 1]]) + + >>> a1 = np.arange(4) + >>> np.matlib.repmat(a1, 2, 2) + array([[0, 1, 2, 3, 0, 1, 2, 3], + [0, 1, 2, 3, 0, 1, 2, 3]]) + + >>> a2 = np.asmatrix(np.arange(6).reshape(2, 3)) + >>> np.matlib.repmat(a2, 2, 3) + matrix([[0, 1, 2, 0, 1, 2, 0, 1, 2], + [3, 4, 5, 3, 4, 5, 3, 4, 5], + [0, 1, 2, 0, 1, 2, 0, 1, 2], + [3, 4, 5, 3, 4, 5, 3, 4, 5]]) + + """ + a = asanyarray(a) + ndim = a.ndim + if ndim == 0: + origrows, origcols = (1, 1) + elif ndim == 1: + origrows, origcols = (1, a.shape[0]) + else: + origrows, origcols = a.shape + rows = origrows * m + cols = origcols * n + c = a.reshape(1, a.size).repeat(m, 0).reshape(rows, origcols).repeat(n, 0) + return c.reshape(rows, cols) diff --git a/local_packages/numpy/matlib.pyi b/local_packages/numpy/matlib.pyi new file mode 100644 index 0000000..f73e941 --- /dev/null +++ b/local_packages/numpy/matlib.pyi @@ -0,0 +1,582 @@ +from typing import Any, Literal, TypeAlias, TypeVar, overload + +import numpy as np +import numpy.typing as npt +from numpy import ( # noqa: F401 + False_, + ScalarType, + True_, + __array_namespace_info__, + __version__, + abs, + absolute, + acos, + acosh, + add, + all, + allclose, + amax, + amin, + angle, + any, + append, + apply_along_axis, + apply_over_axes, + arange, + arccos, + arccosh, + arcsin, + arcsinh, + arctan, + arctan2, + arctanh, + argmax, + argmin, + argpartition, + argsort, + argwhere, + around, + array, + array2string, + array_equal, + array_equiv, + array_repr, + array_split, + array_str, + asanyarray, + asarray, + asarray_chkfinite, + ascontiguousarray, + asfortranarray, + asin, + asinh, + asmatrix, + astype, + atan, + atan2, + atanh, + atleast_1d, + atleast_2d, + atleast_3d, + average, + bartlett, + base_repr, + binary_repr, + bincount, + bitwise_and, + bitwise_count, + bitwise_invert, + bitwise_left_shift, + bitwise_not, + bitwise_or, + bitwise_right_shift, + bitwise_xor, + blackman, + block, + bmat, + bool, + bool_, + broadcast, + broadcast_arrays, + broadcast_shapes, + broadcast_to, + busday_count, + busday_offset, + busdaycalendar, + byte, + bytes_, + c_, + can_cast, + cbrt, + cdouble, + ceil, + char, + character, + choose, + clip, + clongdouble, + column_stack, + common_type, + complex64, + complex128, + complex192, + complex256, + complexfloating, + compress, + concat, + concatenate, + conj, + conjugate, + convolve, + copy, + copysign, + copyto, + core, + corrcoef, + correlate, + cos, + cosh, + count_nonzero, + cov, + cross, + csingle, + ctypeslib, + cumprod, + cumsum, + cumulative_prod, + cumulative_sum, + datetime64, + datetime_as_string, + datetime_data, + deg2rad, + degrees, + delete, + diag, + diag_indices, + diag_indices_from, + diagflat, + diagonal, + diff, + digitize, + divide, + divmod, + dot, + double, + dsplit, + dstack, + dtype, + dtypes, + e, + ediff1d, + einsum, + einsum_path, + emath, + empty_like, + equal, + errstate, + euler_gamma, + exceptions, + exp, + exp2, + expand_dims, + expm1, + extract, + f2py, + fabs, + fft, + fill_diagonal, + finfo, + fix, + flatiter, + flatnonzero, + flexible, + flip, + fliplr, + flipud, + float16, + float32, + float64, + float96, + float128, + float_power, + floating, + floor, + floor_divide, + fmax, + fmin, + fmod, + format_float_positional, + format_float_scientific, + frexp, + from_dlpack, + frombuffer, + fromfile, + fromfunction, + fromiter, + frompyfunc, + fromregex, + fromstring, + full, + full_like, + gcd, + generic, + genfromtxt, + geomspace, + get_include, + get_printoptions, + getbufsize, + geterr, + geterrcall, + gradient, + greater, + greater_equal, + half, + hamming, + hanning, + heaviside, + histogram, + histogram2d, + histogram_bin_edges, + histogramdd, + hsplit, + hstack, + hypot, + i0, + iinfo, + imag, + index_exp, + indices, + inexact, + inf, + info, + inner, + insert, + int8, + int16, + int32, + int64, + int_, + intc, + integer, + interp, + intersect1d, + intp, + invert, + is_busday, + isclose, + iscomplex, + iscomplexobj, + isdtype, + isfinite, + isfortran, + isin, + isinf, + isnan, + isnat, + isneginf, + isposinf, + isreal, + isrealobj, + isscalar, + issubdtype, + iterable, + ix_, + kaiser, + kron, + lcm, + ldexp, + left_shift, + less, + less_equal, + lexsort, + lib, + linalg, + linspace, + little_endian, + load, + loadtxt, + log, + log1p, + log2, + log10, + logaddexp, + logaddexp2, + logical_and, + logical_not, + logical_or, + logical_xor, + logspace, + long, + longdouble, + longlong, + ma, + mask_indices, + matmul, + matrix, + matrix_transpose, + matvec, + max, + maximum, + may_share_memory, + mean, + median, + memmap, + meshgrid, + mgrid, + min, + min_scalar_type, + minimum, + mintypecode, + mod, + modf, + moveaxis, + multiply, + nan, + nan_to_num, + nanargmax, + nanargmin, + nancumprod, + nancumsum, + nanmax, + nanmean, + nanmedian, + nanmin, + nanpercentile, + nanprod, + nanquantile, + nanstd, + nansum, + nanvar, + ndarray, + ndenumerate, + ndim, + ndindex, + nditer, + negative, + nested_iters, + newaxis, + nextafter, + nonzero, + not_equal, + number, + object_, + ogrid, + ones_like, + outer, + packbits, + pad, + partition, + percentile, + permute_dims, + pi, + piecewise, + place, + poly, + poly1d, + polyadd, + polyder, + polydiv, + polyfit, + polyint, + polymul, + polynomial, + polysub, + polyval, + positive, + pow, + power, + printoptions, + prod, + promote_types, + ptp, + put, + put_along_axis, + putmask, + quantile, + r_, + rad2deg, + radians, + random, + ravel, + ravel_multi_index, + real, + real_if_close, + rec, + recarray, + reciprocal, + record, + remainder, + repeat, + require, + reshape, + resize, + result_type, + right_shift, + rint, + roll, + rollaxis, + roots, + rot90, + round, + row_stack, + s_, + save, + savetxt, + savez, + savez_compressed, + sctypeDict, + searchsorted, + select, + set_printoptions, + setbufsize, + setdiff1d, + seterr, + seterrcall, + setxor1d, + shape, + shares_memory, + short, + show_config, + show_runtime, + sign, + signbit, + signedinteger, + sin, + sinc, + single, + sinh, + size, + sort, + sort_complex, + spacing, + split, + sqrt, + square, + squeeze, + stack, + std, + str_, + strings, + subtract, + sum, + swapaxes, + take, + take_along_axis, + tan, + tanh, + tensordot, + test, + testing, + tile, + timedelta64, + trace, + transpose, + trapezoid, + tri, + tril, + tril_indices, + tril_indices_from, + trim_zeros, + triu, + triu_indices, + triu_indices_from, + true_divide, + trunc, + typecodes, + typename, + typing, + ubyte, + ufunc, + uint, + uint8, + uint16, + uint32, + uint64, + uintc, + uintp, + ulong, + ulonglong, + union1d, + unique, + unique_all, + unique_counts, + unique_inverse, + unique_values, + unpackbits, + unravel_index, + unsignedinteger, + unstack, + unwrap, + ushort, + vander, + var, + vdot, + vecdot, + vecmat, + vectorize, + void, + vsplit, + vstack, + where, + zeros_like, +) +from numpy._typing import _ArrayLike, _DTypeLike + +__all__ = ["rand", "randn", "repmat"] +__all__ += np.__all__ + +### + +_ScalarT = TypeVar("_ScalarT", bound=np.generic) +_Matrix: TypeAlias = np.matrix[tuple[int, int], np.dtype[_ScalarT]] +_Order: TypeAlias = Literal["C", "F"] + +### + +# +@overload +def empty(shape: int | tuple[int, int], dtype: None = None, order: _Order = "C") -> _Matrix[np.float64]: ... +@overload +def empty(shape: int | tuple[int, int], dtype: _DTypeLike[_ScalarT], order: _Order = "C") -> _Matrix[_ScalarT]: ... +@overload +def empty(shape: int | tuple[int, int], dtype: npt.DTypeLike, order: _Order = "C") -> _Matrix[Any]: ... + +# +@overload +def ones(shape: int | tuple[int, int], dtype: None = None, order: _Order = "C") -> _Matrix[np.float64]: ... +@overload +def ones(shape: int | tuple[int, int], dtype: _DTypeLike[_ScalarT], order: _Order = "C") -> _Matrix[_ScalarT]: ... +@overload +def ones(shape: int | tuple[int, int], dtype: npt.DTypeLike, order: _Order = "C") -> _Matrix[Any]: ... + +# +@overload +def zeros(shape: int | tuple[int, int], dtype: None = None, order: _Order = "C") -> _Matrix[np.float64]: ... +@overload +def zeros(shape: int | tuple[int, int], dtype: _DTypeLike[_ScalarT], order: _Order = "C") -> _Matrix[_ScalarT]: ... +@overload +def zeros(shape: int | tuple[int, int], dtype: npt.DTypeLike, order: _Order = "C") -> _Matrix[Any]: ... + +# +@overload +def identity(n: int, dtype: None = None) -> _Matrix[np.float64]: ... +@overload +def identity(n: int, dtype: _DTypeLike[_ScalarT]) -> _Matrix[_ScalarT]: ... +@overload +def identity(n: int, dtype: npt.DTypeLike | None = None) -> _Matrix[Any]: ... + +# +@overload +def eye( + n: int, + M: int | None = None, + k: int = 0, + dtype: type[np.float64] | None = ..., + order: _Order = "C", +) -> _Matrix[np.float64]: ... +@overload +def eye(n: int, M: int | None, k: int, dtype: _DTypeLike[_ScalarT], order: _Order = "C") -> _Matrix[_ScalarT]: ... +@overload +def eye(n: int, M: int | None = None, k: int = 0, *, dtype: _DTypeLike[_ScalarT], order: _Order = "C") -> _Matrix[_ScalarT]: ... +@overload +def eye(n: int, M: int | None = None, k: int = 0, dtype: npt.DTypeLike | None = ..., order: _Order = "C") -> _Matrix[Any]: ... + +# +@overload +def rand(arg: int | tuple[()] | tuple[int] | tuple[int, int], /) -> _Matrix[np.float64]: ... +@overload +def rand(arg: int, /, *args: int) -> _Matrix[np.float64]: ... + +# +@overload +def randn(arg: int | tuple[()] | tuple[int] | tuple[int, int], /) -> _Matrix[np.float64]: ... +@overload +def randn(arg: int, /, *args: int) -> _Matrix[np.float64]: ... + +# +@overload +def repmat(a: _Matrix[_ScalarT], m: int, n: int) -> _Matrix[_ScalarT]: ... +@overload +def repmat(a: _ArrayLike[_ScalarT], m: int, n: int) -> npt.NDArray[_ScalarT]: ... +@overload +def repmat(a: npt.ArrayLike, m: int, n: int) -> npt.NDArray[Any]: ... diff --git a/local_packages/numpy/matrixlib/__init__.py b/local_packages/numpy/matrixlib/__init__.py new file mode 100644 index 0000000..1ff5cb5 --- /dev/null +++ b/local_packages/numpy/matrixlib/__init__.py @@ -0,0 +1,12 @@ +"""Sub-package containing the matrix class and related functions. + +""" +from . import defmatrix +from .defmatrix import * + +__all__ = defmatrix.__all__ + +from numpy._pytesttester import PytestTester + +test = PytestTester(__name__) +del PytestTester diff --git a/local_packages/numpy/matrixlib/__init__.pyi b/local_packages/numpy/matrixlib/__init__.pyi new file mode 100644 index 0000000..ad4091d --- /dev/null +++ b/local_packages/numpy/matrixlib/__init__.pyi @@ -0,0 +1,3 @@ +from .defmatrix import asmatrix, bmat, matrix + +__all__ = ["matrix", "bmat", "asmatrix"] diff --git a/local_packages/numpy/matrixlib/defmatrix.py b/local_packages/numpy/matrixlib/defmatrix.py new file mode 100644 index 0000000..39b9a93 --- /dev/null +++ b/local_packages/numpy/matrixlib/defmatrix.py @@ -0,0 +1,1119 @@ +__all__ = ['matrix', 'bmat', 'asmatrix'] + +import ast +import sys +import warnings + +import numpy._core.numeric as N +from numpy._core.numeric import concatenate, isscalar +from numpy._utils import set_module + +# While not in __all__, matrix_power used to be defined here, so we import +# it for backward compatibility. +from numpy.linalg import matrix_power + + +def _convert_from_string(data): + for char in '[]': + data = data.replace(char, '') + + rows = data.split(';') + newdata = [] + for count, row in enumerate(rows): + trow = row.split(',') + newrow = [] + for col in trow: + temp = col.split() + newrow.extend(map(ast.literal_eval, temp)) + if count == 0: + Ncols = len(newrow) + elif len(newrow) != Ncols: + raise ValueError("Rows not the same size.") + newdata.append(newrow) + return newdata + + +@set_module('numpy') +def asmatrix(data, dtype=None): + """ + Interpret the input as a matrix. + + Unlike `matrix`, `asmatrix` does not make a copy if the input is already + a matrix or an ndarray. Equivalent to ``matrix(data, copy=False)``. + + Parameters + ---------- + data : array_like + Input data. + dtype : data-type + Data-type of the output matrix. + + Returns + ------- + mat : matrix + `data` interpreted as a matrix. + + Examples + -------- + >>> import numpy as np + >>> x = np.array([[1, 2], [3, 4]]) + + >>> m = np.asmatrix(x) + + >>> x[0,0] = 5 + + >>> m + matrix([[5, 2], + [3, 4]]) + + """ + return matrix(data, dtype=dtype, copy=False) + + +@set_module('numpy') +class matrix(N.ndarray): + """ + matrix(data, dtype=None, copy=True) + + Returns a matrix from an array-like object, or from a string of data. + + A matrix is a specialized 2-D array that retains its 2-D nature + through operations. It has certain special operators, such as ``*`` + (matrix multiplication) and ``**`` (matrix power). + + .. note:: It is no longer recommended to use this class, even for linear + algebra. Instead use regular arrays. The class may be removed + in the future. + + Parameters + ---------- + data : array_like or string + If `data` is a string, it is interpreted as a matrix with commas + or spaces separating columns, and semicolons separating rows. + dtype : data-type + Data-type of the output matrix. + copy : bool + If `data` is already an `ndarray`, then this flag determines + whether the data is copied (the default), or whether a view is + constructed. + + See Also + -------- + array + + Examples + -------- + >>> import numpy as np + >>> a = np.matrix('1 2; 3 4') + >>> a + matrix([[1, 2], + [3, 4]]) + + >>> np.matrix([[1, 2], [3, 4]]) + matrix([[1, 2], + [3, 4]]) + + """ + __array_priority__ = 10.0 + + def __new__(subtype, data, dtype=None, copy=True): + warnings.warn('the matrix subclass is not the recommended way to ' + 'represent matrices or deal with linear algebra (see ' + 'https://docs.scipy.org/doc/numpy/user/' + 'numpy-for-matlab-users.html). ' + 'Please adjust your code to use regular ndarray.', + PendingDeprecationWarning, stacklevel=2) + if isinstance(data, matrix): + dtype2 = data.dtype + if (dtype is None): + dtype = dtype2 + if (dtype2 == dtype) and (not copy): + return data + return data.astype(dtype) + + if isinstance(data, N.ndarray): + if dtype is None: + intype = data.dtype + else: + intype = N.dtype(dtype) + new = data.view(subtype) + if intype != data.dtype: + return new.astype(intype) + if copy: + return new.copy() + else: + return new + + if isinstance(data, str): + data = _convert_from_string(data) + + # now convert data to an array + copy = None if not copy else True + arr = N.array(data, dtype=dtype, copy=copy) + ndim = arr.ndim + shape = arr.shape + if (ndim > 2): + raise ValueError("matrix must be 2-dimensional") + elif ndim == 0: + shape = (1, 1) + elif ndim == 1: + shape = (1, shape[0]) + + order = 'C' + if (ndim == 2) and arr.flags.fortran: + order = 'F' + + if not (order or arr.flags.contiguous): + arr = arr.copy() + + ret = N.ndarray.__new__(subtype, shape, arr.dtype, + buffer=arr, + order=order) + return ret + + def __array_finalize__(self, obj): + self._getitem = False + if (isinstance(obj, matrix) and obj._getitem): + return + ndim = self.ndim + if (ndim == 2): + return + if (ndim > 2): + newshape = tuple(x for x in self.shape if x > 1) + ndim = len(newshape) + if ndim == 2: + self.shape = newshape + return + elif (ndim > 2): + raise ValueError("shape too large to be a matrix.") + else: + newshape = self.shape + if ndim == 0: + self.shape = (1, 1) + elif ndim == 1: + self.shape = (1, newshape[0]) + return + + def __getitem__(self, index): + self._getitem = True + + try: + out = N.ndarray.__getitem__(self, index) + finally: + self._getitem = False + + if not isinstance(out, N.ndarray): + return out + + if out.ndim == 0: + return out[()] + if out.ndim == 1: + sh = out.shape[0] + # Determine when we should have a column array + try: + n = len(index) + except Exception: + n = 0 + if n > 1 and isscalar(index[1]): + out.shape = (sh, 1) + else: + out.shape = (1, sh) + return out + + def __mul__(self, other): + if isinstance(other, (N.ndarray, list, tuple)): + # This promotes 1-D vectors to row vectors + return N.dot(self, asmatrix(other)) + if isscalar(other) or not hasattr(other, '__rmul__'): + return N.dot(self, other) + return NotImplemented + + def __rmul__(self, other): + return N.dot(other, self) + + def __imul__(self, other): + self[:] = self * other + return self + + def __pow__(self, other): + return matrix_power(self, other) + + def __ipow__(self, other): + self[:] = self ** other + return self + + def __rpow__(self, other): + return NotImplemented + + def _align(self, axis): + """A convenience function for operations that need to preserve axis + orientation. + """ + if axis is None: + return self[0, 0] + elif axis == 0: + return self + elif axis == 1: + return self.transpose() + else: + raise ValueError("unsupported axis") + + def _collapse(self, axis): + """A convenience function for operations that want to collapse + to a scalar like _align, but are using keepdims=True + """ + if axis is None: + return self[0, 0] + else: + return self + + # Necessary because base-class tolist expects dimension + # reduction by x[0] + def tolist(self): + """ + Return the matrix as a (possibly nested) list. + + See `ndarray.tolist` for full documentation. + + See Also + -------- + ndarray.tolist + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3,4))); x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> x.tolist() + [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]] + + """ + return self.__array__().tolist() + + # To preserve orientation of result... + def sum(self, axis=None, dtype=None, out=None): + """ + Returns the sum of the matrix elements, along the given axis. + + Refer to `numpy.sum` for full documentation. + + See Also + -------- + numpy.sum + + Notes + ----- + This is the same as `ndarray.sum`, except that where an `ndarray` would + be returned, a `matrix` object is returned instead. + + Examples + -------- + >>> x = np.matrix([[1, 2], [4, 3]]) + >>> x.sum() + 10 + >>> x.sum(axis=1) + matrix([[3], + [7]]) + >>> x.sum(axis=1, dtype='float') + matrix([[3.], + [7.]]) + >>> out = np.zeros((2, 1), dtype='float') + >>> x.sum(axis=1, dtype='float', out=np.asmatrix(out)) + matrix([[3.], + [7.]]) + + """ + return N.ndarray.sum(self, axis, dtype, out, keepdims=True)._collapse(axis) + + # To update docstring from array to matrix... + def squeeze(self, axis=None): + """ + Return a possibly reshaped matrix. + + Refer to `numpy.squeeze` for more documentation. + + Parameters + ---------- + axis : None or int or tuple of ints, optional + Selects a subset of the axes of length one in the shape. + If an axis is selected with shape entry greater than one, + an error is raised. + + Returns + ------- + squeezed : matrix + The matrix, but as a (1, N) matrix if it had shape (N, 1). + + See Also + -------- + numpy.squeeze : related function + + Notes + ----- + If `m` has a single column then that column is returned + as the single row of a matrix. Otherwise `m` is returned. + The returned matrix is always either `m` itself or a view into `m`. + Supplying an axis keyword argument will not affect the returned matrix + but it may cause an error to be raised. + + Examples + -------- + >>> c = np.matrix([[1], [2]]) + >>> c + matrix([[1], + [2]]) + >>> c.squeeze() + matrix([[1, 2]]) + >>> r = c.T + >>> r + matrix([[1, 2]]) + >>> r.squeeze() + matrix([[1, 2]]) + >>> m = np.matrix([[1, 2], [3, 4]]) + >>> m.squeeze() + matrix([[1, 2], + [3, 4]]) + + """ + return N.ndarray.squeeze(self, axis=axis) + + # To update docstring from array to matrix... + def flatten(self, order='C'): + """ + Return a flattened copy of the matrix. + + All `N` elements of the matrix are placed into a single row. + + Parameters + ---------- + order : {'C', 'F', 'A', 'K'}, optional + 'C' means to flatten in row-major (C-style) order. 'F' means to + flatten in column-major (Fortran-style) order. 'A' means to + flatten in column-major order if `m` is Fortran *contiguous* in + memory, row-major order otherwise. 'K' means to flatten `m` in + the order the elements occur in memory. The default is 'C'. + + Returns + ------- + y : matrix + A copy of the matrix, flattened to a `(1, N)` matrix where `N` + is the number of elements in the original matrix. + + See Also + -------- + ravel : Return a flattened array. + flat : A 1-D flat iterator over the matrix. + + Examples + -------- + >>> m = np.matrix([[1,2], [3,4]]) + >>> m.flatten() + matrix([[1, 2, 3, 4]]) + >>> m.flatten('F') + matrix([[1, 3, 2, 4]]) + + """ + return N.ndarray.flatten(self, order=order) + + def mean(self, axis=None, dtype=None, out=None): + """ + Returns the average of the matrix elements along the given axis. + + Refer to `numpy.mean` for full documentation. + + See Also + -------- + numpy.mean + + Notes + ----- + Same as `ndarray.mean` except that, where that returns an `ndarray`, + this returns a `matrix` object. + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3, 4))) + >>> x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> x.mean() + 5.5 + >>> x.mean(0) + matrix([[4., 5., 6., 7.]]) + >>> x.mean(1) + matrix([[ 1.5], + [ 5.5], + [ 9.5]]) + + """ + return N.ndarray.mean(self, axis, dtype, out, keepdims=True)._collapse(axis) + + def std(self, axis=None, dtype=None, out=None, ddof=0): + """ + Return the standard deviation of the array elements along the given axis. + + Refer to `numpy.std` for full documentation. + + See Also + -------- + numpy.std + + Notes + ----- + This is the same as `ndarray.std`, except that where an `ndarray` would + be returned, a `matrix` object is returned instead. + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3, 4))) + >>> x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> x.std() + 3.4520525295346629 # may vary + >>> x.std(0) + matrix([[ 3.26598632, 3.26598632, 3.26598632, 3.26598632]]) # may vary + >>> x.std(1) + matrix([[ 1.11803399], + [ 1.11803399], + [ 1.11803399]]) + + """ + return N.ndarray.std(self, axis, dtype, out, ddof, + keepdims=True)._collapse(axis) + + def var(self, axis=None, dtype=None, out=None, ddof=0): + """ + Returns the variance of the matrix elements, along the given axis. + + Refer to `numpy.var` for full documentation. + + See Also + -------- + numpy.var + + Notes + ----- + This is the same as `ndarray.var`, except that where an `ndarray` would + be returned, a `matrix` object is returned instead. + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3, 4))) + >>> x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> x.var() + 11.916666666666666 + >>> x.var(0) + matrix([[ 10.66666667, 10.66666667, 10.66666667, 10.66666667]]) # may vary + >>> x.var(1) + matrix([[1.25], + [1.25], + [1.25]]) + + """ + return N.ndarray.var(self, axis, dtype, out, ddof, + keepdims=True)._collapse(axis) + + def prod(self, axis=None, dtype=None, out=None): + """ + Return the product of the array elements over the given axis. + + Refer to `prod` for full documentation. + + See Also + -------- + prod, ndarray.prod + + Notes + ----- + Same as `ndarray.prod`, except, where that returns an `ndarray`, this + returns a `matrix` object instead. + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3,4))); x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> x.prod() + 0 + >>> x.prod(0) + matrix([[ 0, 45, 120, 231]]) + >>> x.prod(1) + matrix([[ 0], + [ 840], + [7920]]) + + """ + return N.ndarray.prod(self, axis, dtype, out, keepdims=True)._collapse(axis) + + def any(self, axis=None, out=None): + """ + Test whether any array element along a given axis evaluates to True. + + Refer to `numpy.any` for full documentation. + + Parameters + ---------- + axis : int, optional + Axis along which logical OR is performed + out : ndarray, optional + Output to existing array instead of creating new one, must have + same shape as expected output + + Returns + ------- + any : bool, ndarray + Returns a single bool if `axis` is ``None``; otherwise, + returns `ndarray` + + """ + return N.ndarray.any(self, axis, out, keepdims=True)._collapse(axis) + + def all(self, axis=None, out=None): + """ + Test whether all matrix elements along a given axis evaluate to True. + + Parameters + ---------- + See `numpy.all` for complete descriptions + + See Also + -------- + numpy.all + + Notes + ----- + This is the same as `ndarray.all`, but it returns a `matrix` object. + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3,4))); x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> y = x[0]; y + matrix([[0, 1, 2, 3]]) + >>> (x == y) + matrix([[ True, True, True, True], + [False, False, False, False], + [False, False, False, False]]) + >>> (x == y).all() + False + >>> (x == y).all(0) + matrix([[False, False, False, False]]) + >>> (x == y).all(1) + matrix([[ True], + [False], + [False]]) + + """ + return N.ndarray.all(self, axis, out, keepdims=True)._collapse(axis) + + def max(self, axis=None, out=None): + """ + Return the maximum value along an axis. + + Parameters + ---------- + See `amax` for complete descriptions + + See Also + -------- + amax, ndarray.max + + Notes + ----- + This is the same as `ndarray.max`, but returns a `matrix` object + where `ndarray.max` would return an ndarray. + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3,4))); x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> x.max() + 11 + >>> x.max(0) + matrix([[ 8, 9, 10, 11]]) + >>> x.max(1) + matrix([[ 3], + [ 7], + [11]]) + + """ + return N.ndarray.max(self, axis, out, keepdims=True)._collapse(axis) + + def argmax(self, axis=None, out=None): + """ + Indexes of the maximum values along an axis. + + Return the indexes of the first occurrences of the maximum values + along the specified axis. If axis is None, the index is for the + flattened matrix. + + Parameters + ---------- + See `numpy.argmax` for complete descriptions + + See Also + -------- + numpy.argmax + + Notes + ----- + This is the same as `ndarray.argmax`, but returns a `matrix` object + where `ndarray.argmax` would return an `ndarray`. + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3,4))); x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> x.argmax() + 11 + >>> x.argmax(0) + matrix([[2, 2, 2, 2]]) + >>> x.argmax(1) + matrix([[3], + [3], + [3]]) + + """ + return N.ndarray.argmax(self, axis, out)._align(axis) + + def min(self, axis=None, out=None): + """ + Return the minimum value along an axis. + + Parameters + ---------- + See `amin` for complete descriptions. + + See Also + -------- + amin, ndarray.min + + Notes + ----- + This is the same as `ndarray.min`, but returns a `matrix` object + where `ndarray.min` would return an ndarray. + + Examples + -------- + >>> x = -np.matrix(np.arange(12).reshape((3,4))); x + matrix([[ 0, -1, -2, -3], + [ -4, -5, -6, -7], + [ -8, -9, -10, -11]]) + >>> x.min() + -11 + >>> x.min(0) + matrix([[ -8, -9, -10, -11]]) + >>> x.min(1) + matrix([[ -3], + [ -7], + [-11]]) + + """ + return N.ndarray.min(self, axis, out, keepdims=True)._collapse(axis) + + def argmin(self, axis=None, out=None): + """ + Indexes of the minimum values along an axis. + + Return the indexes of the first occurrences of the minimum values + along the specified axis. If axis is None, the index is for the + flattened matrix. + + Parameters + ---------- + See `numpy.argmin` for complete descriptions. + + See Also + -------- + numpy.argmin + + Notes + ----- + This is the same as `ndarray.argmin`, but returns a `matrix` object + where `ndarray.argmin` would return an `ndarray`. + + Examples + -------- + >>> x = -np.matrix(np.arange(12).reshape((3,4))); x + matrix([[ 0, -1, -2, -3], + [ -4, -5, -6, -7], + [ -8, -9, -10, -11]]) + >>> x.argmin() + 11 + >>> x.argmin(0) + matrix([[2, 2, 2, 2]]) + >>> x.argmin(1) + matrix([[3], + [3], + [3]]) + + """ + return N.ndarray.argmin(self, axis, out)._align(axis) + + def ptp(self, axis=None, out=None): + """ + Peak-to-peak (maximum - minimum) value along the given axis. + + Refer to `numpy.ptp` for full documentation. + + See Also + -------- + numpy.ptp + + Notes + ----- + Same as `ndarray.ptp`, except, where that would return an `ndarray` object, + this returns a `matrix` object. + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3,4))); x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> x.ptp() + 11 + >>> x.ptp(0) + matrix([[8, 8, 8, 8]]) + >>> x.ptp(1) + matrix([[3], + [3], + [3]]) + + """ + return N.ptp(self, axis, out)._align(axis) + + @property + def I(self): # noqa: E743 + """ + Returns the (multiplicative) inverse of invertible `self`. + + Parameters + ---------- + None + + Returns + ------- + ret : matrix object + If `self` is non-singular, `ret` is such that ``ret * self`` == + ``self * ret`` == ``np.matrix(np.eye(self[0,:].size))`` all return + ``True``. + + Raises + ------ + numpy.linalg.LinAlgError: Singular matrix + If `self` is singular. + + See Also + -------- + linalg.inv + + Examples + -------- + >>> m = np.matrix('[1, 2; 3, 4]'); m + matrix([[1, 2], + [3, 4]]) + >>> m.getI() + matrix([[-2. , 1. ], + [ 1.5, -0.5]]) + >>> m.getI() * m + matrix([[ 1., 0.], # may vary + [ 0., 1.]]) + + """ + M, N = self.shape + if M == N: + from numpy.linalg import inv as func + else: + from numpy.linalg import pinv as func + return asmatrix(func(self)) + + @property + def A(self): + """ + Return `self` as an `ndarray` object. + + Equivalent to ``np.asarray(self)``. + + Parameters + ---------- + None + + Returns + ------- + ret : ndarray + `self` as an `ndarray` + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3,4))); x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> x.getA() + array([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + + """ + return self.__array__() + + @property + def A1(self): + """ + Return `self` as a flattened `ndarray`. + + Equivalent to ``np.asarray(x).ravel()`` + + Parameters + ---------- + None + + Returns + ------- + ret : ndarray + `self`, 1-D, as an `ndarray` + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3,4))); x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> x.getA1() + array([ 0, 1, 2, ..., 9, 10, 11]) + + + """ + return self.__array__().ravel() + + def ravel(self, order='C'): + """ + Return a flattened matrix. + + Refer to `numpy.ravel` for more documentation. + + Parameters + ---------- + order : {'C', 'F', 'A', 'K'}, optional + The elements of `m` are read using this index order. 'C' means to + index the elements in C-like order, with the last axis index + changing fastest, back to the first axis index changing slowest. + 'F' means to index the elements in Fortran-like index order, with + the first index changing fastest, and the last index changing + slowest. Note that the 'C' and 'F' options take no account of the + memory layout of the underlying array, and only refer to the order + of axis indexing. 'A' means to read the elements in Fortran-like + index order if `m` is Fortran *contiguous* in memory, C-like order + otherwise. 'K' means to read the elements in the order they occur + in memory, except for reversing the data when strides are negative. + By default, 'C' index order is used. + + Returns + ------- + ret : matrix + Return the matrix flattened to shape `(1, N)` where `N` + is the number of elements in the original matrix. + A copy is made only if necessary. + + See Also + -------- + matrix.flatten : returns a similar output matrix but always a copy + matrix.flat : a flat iterator on the array. + numpy.ravel : related function which returns an ndarray + + """ + return N.ndarray.ravel(self, order=order) + + @property + def T(self): + """ + Returns the transpose of the matrix. + + Does *not* conjugate! For the complex conjugate transpose, use ``.H``. + + Parameters + ---------- + None + + Returns + ------- + ret : matrix object + The (non-conjugated) transpose of the matrix. + + See Also + -------- + transpose, getH + + Examples + -------- + >>> m = np.matrix('[1, 2; 3, 4]') + >>> m + matrix([[1, 2], + [3, 4]]) + >>> m.getT() + matrix([[1, 3], + [2, 4]]) + + """ + return self.transpose() + + @property + def H(self): + """ + Returns the (complex) conjugate transpose of `self`. + + Equivalent to ``np.transpose(self)`` if `self` is real-valued. + + Parameters + ---------- + None + + Returns + ------- + ret : matrix object + complex conjugate transpose of `self` + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3,4))) + >>> z = x - 1j*x; z + matrix([[ 0. +0.j, 1. -1.j, 2. -2.j, 3. -3.j], + [ 4. -4.j, 5. -5.j, 6. -6.j, 7. -7.j], + [ 8. -8.j, 9. -9.j, 10.-10.j, 11.-11.j]]) + >>> z.getH() + matrix([[ 0. -0.j, 4. +4.j, 8. +8.j], + [ 1. +1.j, 5. +5.j, 9. +9.j], + [ 2. +2.j, 6. +6.j, 10.+10.j], + [ 3. +3.j, 7. +7.j, 11.+11.j]]) + + """ + if issubclass(self.dtype.type, N.complexfloating): + return self.transpose().conjugate() + else: + return self.transpose() + + # kept for compatibility + getT = T.fget + getA = A.fget + getA1 = A1.fget + getH = H.fget + getI = I.fget + +def _from_string(str, gdict, ldict): + rows = str.split(';') + rowtup = [] + for row in rows: + trow = row.split(',') + newrow = [] + for x in trow: + newrow.extend(x.split()) + trow = newrow + coltup = [] + for col in trow: + col = col.strip() + try: + thismat = ldict[col] + except KeyError: + try: + thismat = gdict[col] + except KeyError as e: + raise NameError(f"name {col!r} is not defined") from None + + coltup.append(thismat) + rowtup.append(concatenate(coltup, axis=-1)) + return concatenate(rowtup, axis=0) + + +@set_module('numpy') +def bmat(obj, ldict=None, gdict=None): + """ + Build a matrix object from a string, nested sequence, or array. + + Parameters + ---------- + obj : str or array_like + Input data. If a string, variables in the current scope may be + referenced by name. + ldict : dict, optional + A dictionary that replaces local operands in current frame. + Ignored if `obj` is not a string or `gdict` is None. + gdict : dict, optional + A dictionary that replaces global operands in current frame. + Ignored if `obj` is not a string. + + Returns + ------- + out : matrix + Returns a matrix object, which is a specialized 2-D array. + + See Also + -------- + block : + A generalization of this function for N-d arrays, that returns normal + ndarrays. + + Examples + -------- + >>> import numpy as np + >>> A = np.asmatrix('1 1; 1 1') + >>> B = np.asmatrix('2 2; 2 2') + >>> C = np.asmatrix('3 4; 5 6') + >>> D = np.asmatrix('7 8; 9 0') + + All the following expressions construct the same block matrix: + + >>> np.bmat([[A, B], [C, D]]) + matrix([[1, 1, 2, 2], + [1, 1, 2, 2], + [3, 4, 7, 8], + [5, 6, 9, 0]]) + >>> np.bmat(np.r_[np.c_[A, B], np.c_[C, D]]) + matrix([[1, 1, 2, 2], + [1, 1, 2, 2], + [3, 4, 7, 8], + [5, 6, 9, 0]]) + >>> np.bmat('A,B; C,D') + matrix([[1, 1, 2, 2], + [1, 1, 2, 2], + [3, 4, 7, 8], + [5, 6, 9, 0]]) + + """ + if isinstance(obj, str): + if gdict is None: + # get previous frame + frame = sys._getframe().f_back + glob_dict = frame.f_globals + loc_dict = frame.f_locals + else: + glob_dict = gdict + loc_dict = ldict + + return matrix(_from_string(obj, glob_dict, loc_dict)) + + if isinstance(obj, (tuple, list)): + # [[A,B],[C,D]] + arr_rows = [] + for row in obj: + if isinstance(row, N.ndarray): # not 2-d + return matrix(concatenate(obj, axis=-1)) + else: + arr_rows.append(concatenate(row, axis=-1)) + return matrix(concatenate(arr_rows, axis=0)) + if isinstance(obj, N.ndarray): + return matrix(obj) diff --git a/local_packages/numpy/matrixlib/defmatrix.pyi b/local_packages/numpy/matrixlib/defmatrix.pyi new file mode 100644 index 0000000..40c747d --- /dev/null +++ b/local_packages/numpy/matrixlib/defmatrix.pyi @@ -0,0 +1,218 @@ +from _typeshed import Incomplete +from collections.abc import Mapping, Sequence +from types import EllipsisType +from typing import Any, ClassVar, Literal as L, Self, SupportsIndex, TypeAlias, overload +from typing_extensions import TypeVar + +import numpy as np +from numpy._typing import ( + ArrayLike, + DTypeLike, + NDArray, + _AnyShape, + _ArrayLikeInt_co, + _NestedSequence, + _ShapeLike, +) + +__all__ = ["asmatrix", "bmat", "matrix"] + +_T = TypeVar("_T") +_ArrayT = TypeVar("_ArrayT", bound=np.ndarray) +_BoolOrIntArrayT = TypeVar("_BoolOrIntArrayT", bound=NDArray[np.integer | np.bool]) +_ScalarT = TypeVar("_ScalarT", bound=np.generic) +_ShapeT_co = TypeVar("_ShapeT_co", bound=_2D, default=_2D, covariant=True) +_DTypeT_co = TypeVar("_DTypeT_co", bound=np.dtype, default=np.dtype, covariant=True) + +_2D: TypeAlias = tuple[int, int] +_Matrix: TypeAlias = matrix[_2D, np.dtype[_ScalarT]] +_ToIndex1: TypeAlias = slice | EllipsisType | NDArray[np.integer | np.bool] | _NestedSequence[int] | None +_ToIndex2: TypeAlias = tuple[_ToIndex1, _ToIndex1 | SupportsIndex] | tuple[_ToIndex1 | SupportsIndex, _ToIndex1] + +class matrix(np.ndarray[_ShapeT_co, _DTypeT_co]): + __array_priority__: ClassVar[float] = 10.0 # pyright: ignore[reportIncompatibleMethodOverride] + + def __new__( + subtype, # pyright: ignore[reportSelfClsParameterName] + data: ArrayLike, + dtype: DTypeLike | None = None, + copy: bool = True, + ) -> _Matrix[Incomplete]: ... + + # + @overload # type: ignore[override] + def __getitem__( + self, key: SupportsIndex | _ArrayLikeInt_co | tuple[SupportsIndex | _ArrayLikeInt_co, ...], / + ) -> Incomplete: ... + @overload + def __getitem__(self, key: _ToIndex1 | _ToIndex2, /) -> matrix[_2D, _DTypeT_co]: ... + @overload + def __getitem__(self: _Matrix[np.void], key: str, /) -> _Matrix[Incomplete]: ... + @overload + def __getitem__(self: _Matrix[np.void], key: list[str], /) -> matrix[_2D, _DTypeT_co]: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # + def __mul__(self, other: ArrayLike, /) -> _Matrix[Incomplete]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + def __rmul__(self, other: ArrayLike, /) -> _Matrix[Incomplete]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + + # + def __pow__(self, other: ArrayLike, /) -> _Matrix[Incomplete]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + def __rpow__(self, other: ArrayLike, /) -> _Matrix[Incomplete]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + + # keep in sync with `prod` and `mean` + @overload # type: ignore[override] + def sum(self, axis: None = None, dtype: DTypeLike | None = None, out: None = None) -> Incomplete: ... + @overload + def sum(self, axis: _ShapeLike, dtype: DTypeLike | None = None, out: None = None) -> _Matrix[Incomplete]: ... + @overload + def sum(self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ... + @overload + def sum(self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # keep in sync with `sum` and `mean` + @overload # type: ignore[override] + def prod(self, axis: None = None, dtype: DTypeLike | None = None, out: None = None) -> Incomplete: ... + @overload + def prod(self, axis: _ShapeLike, dtype: DTypeLike | None = None, out: None = None) -> _Matrix[Incomplete]: ... + @overload + def prod(self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ... + @overload + def prod(self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # keep in sync with `sum` and `prod` + @overload # type: ignore[override] + def mean(self, axis: None = None, dtype: DTypeLike | None = None, out: None = None) -> Incomplete: ... + @overload + def mean(self, axis: _ShapeLike, dtype: DTypeLike | None = None, out: None = None) -> _Matrix[Incomplete]: ... + @overload + def mean(self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ... + @overload + def mean(self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # keep in sync with `var` + @overload # type: ignore[override] + def std(self, axis: None = None, dtype: DTypeLike | None = None, out: None = None, ddof: float = 0) -> Incomplete: ... + @overload + def std(self, axis: _ShapeLike, dtype: DTypeLike | None = None, out: None = None, ddof: float = 0) -> _Matrix[Incomplete]: ... + @overload + def std(self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: _ArrayT, ddof: float = 0) -> _ArrayT: ... + @overload + def std( # pyright: ignore[reportIncompatibleMethodOverride] + self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT, ddof: float = 0 + ) -> _ArrayT: ... + + # keep in sync with `std` + @overload # type: ignore[override] + def var(self, axis: None = None, dtype: DTypeLike | None = None, out: None = None, ddof: float = 0) -> Incomplete: ... + @overload + def var(self, axis: _ShapeLike, dtype: DTypeLike | None = None, out: None = None, ddof: float = 0) -> _Matrix[Incomplete]: ... + @overload + def var(self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: _ArrayT, ddof: float = 0) -> _ArrayT: ... + @overload + def var( # pyright: ignore[reportIncompatibleMethodOverride] + self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT, ddof: float = 0 + ) -> _ArrayT: ... + + # keep in sync with `all` + @overload # type: ignore[override] + def any(self, axis: None = None, out: None = None) -> np.bool: ... + @overload + def any(self, axis: _ShapeLike, out: None = None) -> _Matrix[np.bool]: ... + @overload + def any(self, axis: _ShapeLike | None, out: _ArrayT) -> _ArrayT: ... + @overload + def any(self, axis: _ShapeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # keep in sync with `any` + @overload # type: ignore[override] + def all(self, axis: None = None, out: None = None) -> np.bool: ... + @overload + def all(self, axis: _ShapeLike, out: None = None) -> _Matrix[np.bool]: ... + @overload + def all(self, axis: _ShapeLike | None, out: _ArrayT) -> _ArrayT: ... + @overload + def all(self, axis: _ShapeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # keep in sync with `min` and `ptp` + @overload # type: ignore[override] + def max(self: NDArray[_ScalarT], axis: None = None, out: None = None) -> _ScalarT: ... + @overload + def max(self, axis: _ShapeLike, out: None = None) -> matrix[_2D, _DTypeT_co]: ... + @overload + def max(self, axis: _ShapeLike | None, out: _ArrayT) -> _ArrayT: ... + @overload + def max(self, axis: _ShapeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # keep in sync with `max` and `ptp` + @overload # type: ignore[override] + def min(self: NDArray[_ScalarT], axis: None = None, out: None = None) -> _ScalarT: ... + @overload + def min(self, axis: _ShapeLike, out: None = None) -> matrix[_2D, _DTypeT_co]: ... + @overload + def min(self, axis: _ShapeLike | None, out: _ArrayT) -> _ArrayT: ... + @overload + def min(self, axis: _ShapeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # keep in sync with `max` and `min` + @overload + def ptp(self: NDArray[_ScalarT], axis: None = None, out: None = None) -> _ScalarT: ... + @overload + def ptp(self, axis: _ShapeLike, out: None = None) -> matrix[_2D, _DTypeT_co]: ... + @overload + def ptp(self, axis: _ShapeLike | None, out: _ArrayT) -> _ArrayT: ... + @overload + def ptp(self, axis: _ShapeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # keep in sync with `argmin` + @overload # type: ignore[override] + def argmax(self: NDArray[_ScalarT], axis: None = None, out: None = None) -> np.intp: ... + @overload + def argmax(self, axis: _ShapeLike, out: None = None) -> _Matrix[np.intp]: ... + @overload + def argmax(self, axis: _ShapeLike | None, out: _BoolOrIntArrayT) -> _BoolOrIntArrayT: ... + @overload + def argmax(self, axis: _ShapeLike | None = None, *, out: _BoolOrIntArrayT) -> _BoolOrIntArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # keep in sync with `argmax` + @overload # type: ignore[override] + def argmin(self: NDArray[_ScalarT], axis: None = None, out: None = None) -> np.intp: ... + @overload + def argmin(self, axis: _ShapeLike, out: None = None) -> _Matrix[np.intp]: ... + @overload + def argmin(self, axis: _ShapeLike | None, out: _BoolOrIntArrayT) -> _BoolOrIntArrayT: ... + @overload + def argmin(self, axis: _ShapeLike | None = None, *, out: _BoolOrIntArrayT) -> _BoolOrIntArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # the second overload handles the (rare) case that the matrix is not 2-d + @overload + def tolist(self: _Matrix[np.generic[_T]]) -> list[list[_T]]: ... # pyright: ignore[reportIncompatibleMethodOverride] + @overload + def tolist(self) -> Incomplete: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # these three methods will at least return a `2-d` array of shape (1, n) + def squeeze(self, /, axis: _ShapeLike | None = None) -> matrix[_2D, _DTypeT_co]: ... + def ravel(self, /, order: L["K", "A", "C", "F"] | None = "C") -> matrix[_2D, _DTypeT_co]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + def flatten(self, /, order: L["K", "A", "C", "F"] | None = "C") -> matrix[_2D, _DTypeT_co]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + + # matrix.T is inherited from _ScalarOrArrayCommon + def getT(self) -> Self: ... + @property + def I(self) -> _Matrix[Incomplete]: ... # noqa: E743 + def getI(self) -> _Matrix[Incomplete]: ... + @property + def A(self) -> np.ndarray[_2D, _DTypeT_co]: ... + def getA(self) -> np.ndarray[_2D, _DTypeT_co]: ... + @property + def A1(self) -> np.ndarray[_AnyShape, _DTypeT_co]: ... + def getA1(self) -> np.ndarray[_AnyShape, _DTypeT_co]: ... + @property + def H(self) -> matrix[_2D, _DTypeT_co]: ... + def getH(self) -> matrix[_2D, _DTypeT_co]: ... + +def bmat( + obj: str | Sequence[ArrayLike] | NDArray[Any], + ldict: Mapping[str, Any] | None = None, + gdict: Mapping[str, Any] | None = None, +) -> _Matrix[Incomplete]: ... + +def asmatrix(data: ArrayLike, dtype: DTypeLike | None = None) -> _Matrix[Incomplete]: ... diff --git a/local_packages/numpy/matrixlib/tests/__init__.py b/local_packages/numpy/matrixlib/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/local_packages/numpy/matrixlib/tests/test_defmatrix.py b/local_packages/numpy/matrixlib/tests/test_defmatrix.py new file mode 100644 index 0000000..a0e868f --- /dev/null +++ b/local_packages/numpy/matrixlib/tests/test_defmatrix.py @@ -0,0 +1,455 @@ +import collections.abc + +import numpy as np +from numpy import asmatrix, bmat, matrix +from numpy.linalg import matrix_power +from numpy.testing import ( + assert_, + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, + assert_equal, + assert_raises, +) + + +class TestCtor: + def test_basic(self): + A = np.array([[1, 2], [3, 4]]) + mA = matrix(A) + assert_(np.all(mA.A == A)) + + B = bmat("A,A;A,A") + C = bmat([[A, A], [A, A]]) + D = np.array([[1, 2, 1, 2], + [3, 4, 3, 4], + [1, 2, 1, 2], + [3, 4, 3, 4]]) + assert_(np.all(B.A == D)) + assert_(np.all(C.A == D)) + + E = np.array([[5, 6], [7, 8]]) + AEresult = matrix([[1, 2, 5, 6], [3, 4, 7, 8]]) + assert_(np.all(bmat([A, E]) == AEresult)) + + vec = np.arange(5) + mvec = matrix(vec) + assert_(mvec.shape == (1, 5)) + + def test_exceptions(self): + # Check for ValueError when called with invalid string data. + assert_raises(ValueError, matrix, "invalid") + + def test_bmat_nondefault_str(self): + A = np.array([[1, 2], [3, 4]]) + B = np.array([[5, 6], [7, 8]]) + Aresult = np.array([[1, 2, 1, 2], + [3, 4, 3, 4], + [1, 2, 1, 2], + [3, 4, 3, 4]]) + mixresult = np.array([[1, 2, 5, 6], + [3, 4, 7, 8], + [5, 6, 1, 2], + [7, 8, 3, 4]]) + assert_(np.all(bmat("A,A;A,A") == Aresult)) + assert_(np.all(bmat("A,A;A,A", ldict={'A': B}) == Aresult)) + assert_raises(TypeError, bmat, "A,A;A,A", gdict={'A': B}) + assert_( + np.all(bmat("A,A;A,A", ldict={'A': A}, gdict={'A': B}) == Aresult)) + b2 = bmat("A,B;C,D", ldict={'A': A, 'B': B}, gdict={'C': B, 'D': A}) + assert_(np.all(b2 == mixresult)) + + +class TestProperties: + def test_sum(self): + """Test whether matrix.sum(axis=1) preserves orientation. + Fails in NumPy <= 0.9.6.2127. + """ + M = matrix([[1, 2, 0, 0], + [3, 4, 0, 0], + [1, 2, 1, 2], + [3, 4, 3, 4]]) + sum0 = matrix([8, 12, 4, 6]) + sum1 = matrix([3, 7, 6, 14]).T + sumall = 30 + assert_array_equal(sum0, M.sum(axis=0)) + assert_array_equal(sum1, M.sum(axis=1)) + assert_equal(sumall, M.sum()) + + assert_array_equal(sum0, np.sum(M, axis=0)) + assert_array_equal(sum1, np.sum(M, axis=1)) + assert_equal(sumall, np.sum(M)) + + def test_prod(self): + x = matrix([[1, 2, 3], [4, 5, 6]]) + assert_equal(x.prod(), 720) + assert_equal(x.prod(0), matrix([[4, 10, 18]])) + assert_equal(x.prod(1), matrix([[6], [120]])) + + assert_equal(np.prod(x), 720) + assert_equal(np.prod(x, axis=0), matrix([[4, 10, 18]])) + assert_equal(np.prod(x, axis=1), matrix([[6], [120]])) + + y = matrix([0, 1, 3]) + assert_(y.prod() == 0) + + def test_max(self): + x = matrix([[1, 2, 3], [4, 5, 6]]) + assert_equal(x.max(), 6) + assert_equal(x.max(0), matrix([[4, 5, 6]])) + assert_equal(x.max(1), matrix([[3], [6]])) + + assert_equal(np.max(x), 6) + assert_equal(np.max(x, axis=0), matrix([[4, 5, 6]])) + assert_equal(np.max(x, axis=1), matrix([[3], [6]])) + + def test_min(self): + x = matrix([[1, 2, 3], [4, 5, 6]]) + assert_equal(x.min(), 1) + assert_equal(x.min(0), matrix([[1, 2, 3]])) + assert_equal(x.min(1), matrix([[1], [4]])) + + assert_equal(np.min(x), 1) + assert_equal(np.min(x, axis=0), matrix([[1, 2, 3]])) + assert_equal(np.min(x, axis=1), matrix([[1], [4]])) + + def test_ptp(self): + x = np.arange(4).reshape((2, 2)) + mx = x.view(np.matrix) + assert_(mx.ptp() == 3) + assert_(np.all(mx.ptp(0) == np.array([2, 2]))) + assert_(np.all(mx.ptp(1) == np.array([1, 1]))) + + def test_var(self): + x = np.arange(9).reshape((3, 3)) + mx = x.view(np.matrix) + assert_equal(x.var(ddof=0), mx.var(ddof=0)) + assert_equal(x.var(ddof=1), mx.var(ddof=1)) + + def test_basic(self): + import numpy.linalg as linalg + + A = np.array([[1., 2.], + [3., 4.]]) + mA = matrix(A) + assert_(np.allclose(linalg.inv(A), mA.I)) + assert_(np.all(np.array(np.transpose(A) == mA.T))) + assert_(np.all(np.array(np.transpose(A) == mA.H))) + assert_(np.all(A == mA.A)) + + B = A + 2j * A + mB = matrix(B) + assert_(np.allclose(linalg.inv(B), mB.I)) + assert_(np.all(np.array(np.transpose(B) == mB.T))) + assert_(np.all(np.array(np.transpose(B).conj() == mB.H))) + + def test_pinv(self): + x = matrix(np.arange(6).reshape(2, 3)) + xpinv = matrix([[-0.77777778, 0.27777778], + [-0.11111111, 0.11111111], + [ 0.55555556, -0.05555556]]) + assert_almost_equal(x.I, xpinv) + + def test_comparisons(self): + A = np.arange(100).reshape(10, 10) + mA = matrix(A) + mB = matrix(A) + 0.1 + assert_(np.all(mB == A + 0.1)) + assert_(np.all(mB == matrix(A + 0.1))) + assert_(not np.any(mB == matrix(A - 0.1))) + assert_(np.all(mA < mB)) + assert_(np.all(mA <= mB)) + assert_(np.all(mA <= mA)) + assert_(not np.any(mA < mA)) + + assert_(not np.any(mB < mA)) + assert_(np.all(mB >= mA)) + assert_(np.all(mB >= mB)) + assert_(not np.any(mB > mB)) + + assert_(np.all(mA == mA)) + assert_(not np.any(mA == mB)) + assert_(np.all(mB != mA)) + + assert_(not np.all(abs(mA) > 0)) + assert_(np.all(abs(mB > 0))) + + def test_asmatrix(self): + A = np.arange(100).reshape(10, 10) + mA = asmatrix(A) + A[0, 0] = -10 + assert_(A[0, 0] == mA[0, 0]) + + def test_noaxis(self): + A = matrix([[1, 0], [0, 1]]) + assert_(A.sum() == matrix(2)) + assert_(A.mean() == matrix(0.5)) + + def test_repr(self): + A = matrix([[1, 0], [0, 1]]) + assert_(repr(A) == "matrix([[1, 0],\n [0, 1]])") + + def test_make_bool_matrix_from_str(self): + A = matrix('True; True; False') + B = matrix([[True], [True], [False]]) + assert_array_equal(A, B) + +class TestCasting: + def test_basic(self): + A = np.arange(100).reshape(10, 10) + mA = matrix(A) + + mB = mA.copy() + O = np.ones((10, 10), np.float64) * 0.1 + mB = mB + O + assert_(mB.dtype.type == np.float64) + assert_(np.all(mA != mB)) + assert_(np.all(mB == mA + 0.1)) + + mC = mA.copy() + O = np.ones((10, 10), np.complex128) + mC = mC * O + assert_(mC.dtype.type == np.complex128) + assert_(np.all(mA != mB)) + + +class TestAlgebra: + def test_basic(self): + import numpy.linalg as linalg + + A = np.array([[1., 2.], [3., 4.]]) + mA = matrix(A) + + B = np.identity(2) + for i in range(6): + assert_(np.allclose((mA ** i).A, B)) + B = np.dot(B, A) + + Ainv = linalg.inv(A) + B = np.identity(2) + for i in range(6): + assert_(np.allclose((mA ** -i).A, B)) + B = np.dot(B, Ainv) + + assert_(np.allclose((mA * mA).A, np.dot(A, A))) + assert_(np.allclose((mA + mA).A, (A + A))) + assert_(np.allclose((3 * mA).A, (3 * A))) + + mA2 = matrix(A) + mA2 *= 3 + assert_(np.allclose(mA2.A, 3 * A)) + + def test_pow(self): + """Test raising a matrix to an integer power works as expected.""" + m = matrix("1. 2.; 3. 4.") + m2 = m.copy() + m2 **= 2 + mi = m.copy() + mi **= -1 + m4 = m2.copy() + m4 **= 2 + assert_array_almost_equal(m2, m**2) + assert_array_almost_equal(m4, np.dot(m2, m2)) + assert_array_almost_equal(np.dot(mi, m), np.eye(2)) + + def test_scalar_type_pow(self): + m = matrix([[1, 2], [3, 4]]) + for scalar_t in [np.int8, np.uint8]: + two = scalar_t(2) + assert_array_almost_equal(m ** 2, m ** two) + + def test_notimplemented(self): + '''Check that 'not implemented' operations produce a failure.''' + A = matrix([[1., 2.], + [3., 4.]]) + + # __rpow__ + with assert_raises(TypeError): + 1.0**A + + # __mul__ with something not a list, ndarray, tuple, or scalar + with assert_raises(TypeError): + A * object() + + +class TestMatrixReturn: + def test_instance_methods(self): + a = matrix([1.0], dtype='f8') + methodargs = { + 'astype': ('intc',), + 'clip': (0.0, 1.0), + 'compress': ([1],), + 'repeat': (1,), + 'reshape': (1,), + 'swapaxes': (0, 0), + 'dot': np.array([1.0]), + } + excluded_methods = [ + 'argmin', 'choose', 'dump', 'dumps', 'fill', 'getfield', + 'getA', 'getA1', 'item', 'nonzero', 'put', 'putmask', 'resize', + 'searchsorted', 'setflags', 'setfield', 'sort', + 'partition', 'argpartition', 'to_device', + 'take', 'tofile', 'tolist', 'tobytes', 'all', 'any', + 'sum', 'argmax', 'argmin', 'min', 'max', 'mean', 'var', 'ptp', + 'prod', 'std', 'ctypes', 'bitwise_count', + ] + for attrib in dir(a): + if attrib.startswith('_') or attrib in excluded_methods: + continue + f = getattr(a, attrib) + if isinstance(f, collections.abc.Callable): + # reset contents of a + a.astype('f8') + a.fill(1.0) + args = methodargs.get(attrib, ()) + b = f(*args) + assert_(type(b) is matrix, f"{attrib}") + assert_(type(a.real) is matrix) + assert_(type(a.imag) is matrix) + c, d = matrix([0.0]).nonzero() + assert_(type(c) is np.ndarray) + assert_(type(d) is np.ndarray) + + +class TestIndexing: + def test_basic(self): + x = asmatrix(np.zeros((3, 2), float)) + y = np.zeros((3, 1), float) + y[:, 0] = [0.8, 0.2, 0.3] + x[:, 1] = y > 0.5 + assert_equal(x, [[0, 1], [0, 0], [0, 0]]) + + +class TestNewScalarIndexing: + a = matrix([[1, 2], [3, 4]]) + + def test_dimesions(self): + a = self.a + x = a[0] + assert_equal(x.ndim, 2) + + def test_array_from_matrix_list(self): + a = self.a + x = np.array([a, a]) + assert_equal(x.shape, [2, 2, 2]) + + def test_array_to_list(self): + a = self.a + assert_equal(a.tolist(), [[1, 2], [3, 4]]) + + def test_fancy_indexing(self): + a = self.a + x = a[1, [0, 1, 0]] + assert_(isinstance(x, matrix)) + assert_equal(x, matrix([[3, 4, 3]])) + x = a[[1, 0]] + assert_(isinstance(x, matrix)) + assert_equal(x, matrix([[3, 4], [1, 2]])) + x = a[[[1], [0]], [[1, 0], [0, 1]]] + assert_(isinstance(x, matrix)) + assert_equal(x, matrix([[4, 3], [1, 2]])) + + def test_matrix_element(self): + x = matrix([[1, 2, 3], [4, 5, 6]]) + assert_equal(x[0][0], matrix([[1, 2, 3]])) + assert_equal(x[0][0].shape, (1, 3)) + assert_equal(x[0].shape, (1, 3)) + assert_equal(x[:, 0].shape, (2, 1)) + + x = matrix(0) + assert_equal(x[0, 0], 0) + assert_equal(x[0], 0) + assert_equal(x[:, 0].shape, x.shape) + + def test_scalar_indexing(self): + x = asmatrix(np.zeros((3, 2), float)) + assert_equal(x[0, 0], x[0][0]) + + def test_row_column_indexing(self): + x = asmatrix(np.eye(2)) + assert_array_equal(x[0, :], [[1, 0]]) + assert_array_equal(x[1, :], [[0, 1]]) + assert_array_equal(x[:, 0], [[1], [0]]) + assert_array_equal(x[:, 1], [[0], [1]]) + + def test_boolean_indexing(self): + A = np.arange(6) + A.shape = (3, 2) + x = asmatrix(A) + assert_array_equal(x[:, np.array([True, False])], x[:, 0]) + assert_array_equal(x[np.array([True, False, False]), :], x[0, :]) + + def test_list_indexing(self): + A = np.arange(6) + A.shape = (3, 2) + x = asmatrix(A) + assert_array_equal(x[:, [1, 0]], x[:, ::-1]) + assert_array_equal(x[[2, 1, 0], :], x[::-1, :]) + + +class TestPower: + def test_returntype(self): + a = np.array([[0, 1], [0, 0]]) + assert_(type(matrix_power(a, 2)) is np.ndarray) + a = asmatrix(a) + assert_(type(matrix_power(a, 2)) is matrix) + + def test_list(self): + assert_array_equal(matrix_power([[0, 1], [0, 0]], 2), [[0, 0], [0, 0]]) + + +class TestShape: + + a = np.array([[1], [2]]) + m = matrix([[1], [2]]) + + def test_shape(self): + assert_equal(self.a.shape, (2, 1)) + assert_equal(self.m.shape, (2, 1)) + + def test_numpy_ravel(self): + assert_equal(np.ravel(self.a).shape, (2,)) + assert_equal(np.ravel(self.m).shape, (2,)) + + def test_member_ravel(self): + assert_equal(self.a.ravel().shape, (2,)) + assert_equal(self.m.ravel().shape, (1, 2)) + + def test_member_flatten(self): + assert_equal(self.a.flatten().shape, (2,)) + assert_equal(self.m.flatten().shape, (1, 2)) + + def test_numpy_ravel_order(self): + x = np.array([[1, 2, 3], [4, 5, 6]]) + assert_equal(np.ravel(x), [1, 2, 3, 4, 5, 6]) + assert_equal(np.ravel(x, order='F'), [1, 4, 2, 5, 3, 6]) + assert_equal(np.ravel(x.T), [1, 4, 2, 5, 3, 6]) + assert_equal(np.ravel(x.T, order='A'), [1, 2, 3, 4, 5, 6]) + x = matrix([[1, 2, 3], [4, 5, 6]]) + assert_equal(np.ravel(x), [1, 2, 3, 4, 5, 6]) + assert_equal(np.ravel(x, order='F'), [1, 4, 2, 5, 3, 6]) + assert_equal(np.ravel(x.T), [1, 4, 2, 5, 3, 6]) + assert_equal(np.ravel(x.T, order='A'), [1, 2, 3, 4, 5, 6]) + + def test_matrix_ravel_order(self): + x = matrix([[1, 2, 3], [4, 5, 6]]) + assert_equal(x.ravel(), [[1, 2, 3, 4, 5, 6]]) + assert_equal(x.ravel(order='F'), [[1, 4, 2, 5, 3, 6]]) + assert_equal(x.T.ravel(), [[1, 4, 2, 5, 3, 6]]) + assert_equal(x.T.ravel(order='A'), [[1, 2, 3, 4, 5, 6]]) + + def test_array_memory_sharing(self): + assert_(np.may_share_memory(self.a, self.a.ravel())) + assert_(not np.may_share_memory(self.a, self.a.flatten())) + + def test_matrix_memory_sharing(self): + assert_(np.may_share_memory(self.m, self.m.ravel())) + assert_(not np.may_share_memory(self.m, self.m.flatten())) + + def test_expand_dims_matrix(self): + # matrices are always 2d - so expand_dims only makes sense when the + # type is changed away from matrix. + a = np.arange(10).reshape((2, 5)).view(np.matrix) + expanded = np.expand_dims(a, axis=1) + assert_equal(expanded.ndim, 3) + assert_(not isinstance(expanded, np.matrix)) diff --git a/local_packages/numpy/matrixlib/tests/test_interaction.py b/local_packages/numpy/matrixlib/tests/test_interaction.py new file mode 100644 index 0000000..87d133a --- /dev/null +++ b/local_packages/numpy/matrixlib/tests/test_interaction.py @@ -0,0 +1,360 @@ +"""Tests of interaction of matrix with other parts of numpy. + +Note that tests with MaskedArray and linalg are done in separate files. +""" +import textwrap +import warnings + +import pytest + +import numpy as np +from numpy.testing import ( + assert_, + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, + assert_equal, + assert_raises, + assert_raises_regex, +) + + +def test_fancy_indexing(): + # The matrix class messes with the shape. While this is always + # weird (getitem is not used, it does not have setitem nor knows + # about fancy indexing), this tests gh-3110 + # 2018-04-29: moved here from core.tests.test_index. + m = np.matrix([[1, 2], [3, 4]]) + + assert_(isinstance(m[[0, 1, 0], :], np.matrix)) + + # gh-3110. Note the transpose currently because matrices do *not* + # support dimension fixing for fancy indexing correctly. + x = np.asmatrix(np.arange(50).reshape(5, 10)) + assert_equal(x[:2, np.array(-1)], x[:2, -1].T) + + +def test_polynomial_mapdomain(): + # test that polynomial preserved matrix subtype. + # 2018-04-29: moved here from polynomial.tests.polyutils. + dom1 = [0, 4] + dom2 = [1, 3] + x = np.matrix([dom1, dom1]) + res = np.polynomial.polyutils.mapdomain(x, dom1, dom2) + assert_(isinstance(res, np.matrix)) + + +def test_sort_matrix_none(): + # 2018-04-29: moved here from core.tests.test_multiarray + a = np.matrix([[2, 1, 0]]) + actual = np.sort(a, axis=None) + expected = np.matrix([[0, 1, 2]]) + assert_equal(actual, expected) + assert_(type(expected) is np.matrix) + + +def test_partition_matrix_none(): + # gh-4301 + # 2018-04-29: moved here from core.tests.test_multiarray + a = np.matrix([[2, 1, 0]]) + actual = np.partition(a, 1, axis=None) + expected = np.matrix([[0, 1, 2]]) + assert_equal(actual, expected) + assert_(type(expected) is np.matrix) + + +def test_dot_scalar_and_matrix_of_objects(): + # Ticket #2469 + # 2018-04-29: moved here from core.tests.test_multiarray + arr = np.matrix([1, 2], dtype=object) + desired = np.matrix([[3, 6]], dtype=object) + assert_equal(np.dot(arr, 3), desired) + assert_equal(np.dot(3, arr), desired) + + +def test_inner_scalar_and_matrix(): + # 2018-04-29: moved here from core.tests.test_multiarray + for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?': + sca = np.array(3, dtype=dt)[()] + arr = np.matrix([[1, 2], [3, 4]], dtype=dt) + desired = np.matrix([[3, 6], [9, 12]], dtype=dt) + assert_equal(np.inner(arr, sca), desired) + assert_equal(np.inner(sca, arr), desired) + + +def test_inner_scalar_and_matrix_of_objects(): + # Ticket #4482 + # 2018-04-29: moved here from core.tests.test_multiarray + arr = np.matrix([1, 2], dtype=object) + desired = np.matrix([[3, 6]], dtype=object) + assert_equal(np.inner(arr, 3), desired) + assert_equal(np.inner(3, arr), desired) + + +def test_iter_allocate_output_subtype(): + # Make sure that the subtype with priority wins + # 2018-04-29: moved here from core.tests.test_nditer, given the + # matrix specific shape test. + + # matrix vs ndarray + a = np.matrix([[1, 2], [3, 4]]) + b = np.arange(4).reshape(2, 2).T + i = np.nditer([a, b, None], [], + [['readonly'], ['readonly'], ['writeonly', 'allocate']]) + assert_(type(i.operands[2]) is np.matrix) + assert_(type(i.operands[2]) is not np.ndarray) + assert_equal(i.operands[2].shape, (2, 2)) + + # matrix always wants things to be 2D + b = np.arange(4).reshape(1, 2, 2) + assert_raises(RuntimeError, np.nditer, [a, b, None], [], + [['readonly'], ['readonly'], ['writeonly', 'allocate']]) + # but if subtypes are disabled, the result can still work + i = np.nditer([a, b, None], [], + [['readonly'], ['readonly'], + ['writeonly', 'allocate', 'no_subtype']]) + assert_(type(i.operands[2]) is np.ndarray) + assert_(type(i.operands[2]) is not np.matrix) + assert_equal(i.operands[2].shape, (1, 2, 2)) + + +def like_function(): + # 2018-04-29: moved here from core.tests.test_numeric + a = np.matrix([[1, 2], [3, 4]]) + for like_function in np.zeros_like, np.ones_like, np.empty_like: + b = like_function(a) + assert_(type(b) is np.matrix) + + c = like_function(a, subok=False) + assert_(type(c) is not np.matrix) + + +def test_array_astype(): + # 2018-04-29: copied here from core.tests.test_api + # subok=True passes through a matrix + a = np.matrix([[0, 1, 2], [3, 4, 5]], dtype='f4') + b = a.astype('f4', subok=True, copy=False) + assert_(a is b) + + # subok=True is default, and creates a subtype on a cast + b = a.astype('i4', copy=False) + assert_equal(a, b) + assert_equal(type(b), np.matrix) + + # subok=False never returns a matrix + b = a.astype('f4', subok=False, copy=False) + assert_equal(a, b) + assert_(not (a is b)) + assert_(type(b) is not np.matrix) + + +def test_stack(): + # 2018-04-29: copied here from core.tests.test_shape_base + # check np.matrix cannot be stacked + m = np.matrix([[1, 2], [3, 4]]) + assert_raises_regex(ValueError, 'shape too large to be a matrix', + np.stack, [m, m]) + + +def test_object_scalar_multiply(): + # Tickets #2469 and #4482 + # 2018-04-29: moved here from core.tests.test_ufunc + arr = np.matrix([1, 2], dtype=object) + desired = np.matrix([[3, 6]], dtype=object) + assert_equal(np.multiply(arr, 3), desired) + assert_equal(np.multiply(3, arr), desired) + + +def test_nanfunctions_matrices(): + # Check that it works and that type and + # shape are preserved + # 2018-04-29: moved here from core.tests.test_nanfunctions + mat = np.matrix(np.eye(3)) + for f in [np.nanmin, np.nanmax]: + res = f(mat, axis=0) + assert_(isinstance(res, np.matrix)) + assert_(res.shape == (1, 3)) + res = f(mat, axis=1) + assert_(isinstance(res, np.matrix)) + assert_(res.shape == (3, 1)) + res = f(mat) + assert_(np.isscalar(res)) + # check that rows of nan are dealt with for subclasses (#4628) + mat[1] = np.nan + for f in [np.nanmin, np.nanmax]: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + res = f(mat, axis=0) + assert_(isinstance(res, np.matrix)) + assert_(not np.any(np.isnan(res))) + assert_(len(w) == 0) + + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + res = f(mat, axis=1) + assert_(isinstance(res, np.matrix)) + assert_(np.isnan(res[1, 0]) and not np.isnan(res[0, 0]) + and not np.isnan(res[2, 0])) + assert_(len(w) == 1, 'no warning raised') + assert_(issubclass(w[0].category, RuntimeWarning)) + + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + res = f(mat) + assert_(np.isscalar(res)) + assert_(res != np.nan) + assert_(len(w) == 0) + + +def test_nanfunctions_matrices_general(): + # Check that it works and that type and + # shape are preserved + # 2018-04-29: moved here from core.tests.test_nanfunctions + mat = np.matrix(np.eye(3)) + for f in (np.nanargmin, np.nanargmax, np.nansum, np.nanprod, + np.nanmean, np.nanvar, np.nanstd): + res = f(mat, axis=0) + assert_(isinstance(res, np.matrix)) + assert_(res.shape == (1, 3)) + res = f(mat, axis=1) + assert_(isinstance(res, np.matrix)) + assert_(res.shape == (3, 1)) + res = f(mat) + assert_(np.isscalar(res)) + + for f in np.nancumsum, np.nancumprod: + res = f(mat, axis=0) + assert_(isinstance(res, np.matrix)) + assert_(res.shape == (3, 3)) + res = f(mat, axis=1) + assert_(isinstance(res, np.matrix)) + assert_(res.shape == (3, 3)) + res = f(mat) + assert_(isinstance(res, np.matrix)) + assert_(res.shape == (1, 3 * 3)) + + +def test_average_matrix(): + # 2018-04-29: moved here from core.tests.test_function_base. + y = np.matrix(np.random.rand(5, 5)) + assert_array_equal(y.mean(0), np.average(y, 0)) + + a = np.matrix([[1, 2], [3, 4]]) + w = np.matrix([[1, 2], [3, 4]]) + + r = np.average(a, axis=0, weights=w) + assert_equal(type(r), np.matrix) + assert_equal(r, [[2.5, 10.0 / 3]]) + + +def test_dot_matrix(): + # Test to make sure matrices give the same answer as ndarrays + # 2018-04-29: moved here from core.tests.test_function_base. + x = np.linspace(0, 5) + y = np.linspace(-5, 0) + mx = np.matrix(x) + my = np.matrix(y) + r = np.dot(x, y) + mr = np.dot(mx, my.T) + assert_almost_equal(mr, r) + + +def test_ediff1d_matrix(): + # 2018-04-29: moved here from core.tests.test_arraysetops. + assert isinstance(np.ediff1d(np.matrix(1)), np.matrix) + assert isinstance(np.ediff1d(np.matrix(1), to_begin=1), np.matrix) + + +def test_apply_along_axis_matrix(): + # this test is particularly malicious because matrix + # refuses to become 1d + # 2018-04-29: moved here from core.tests.test_shape_base. + def double(row): + return row * 2 + + m = np.matrix([[0, 1], [2, 3]]) + expected = np.matrix([[0, 2], [4, 6]]) + + result = np.apply_along_axis(double, 0, m) + assert_(isinstance(result, np.matrix)) + assert_array_equal(result, expected) + + result = np.apply_along_axis(double, 1, m) + assert_(isinstance(result, np.matrix)) + assert_array_equal(result, expected) + + +def test_kron_matrix(): + # 2018-04-29: moved here from core.tests.test_shape_base. + a = np.ones([2, 2]) + m = np.asmatrix(a) + assert_equal(type(np.kron(a, a)), np.ndarray) + assert_equal(type(np.kron(m, m)), np.matrix) + assert_equal(type(np.kron(a, m)), np.matrix) + assert_equal(type(np.kron(m, a)), np.matrix) + + +class TestConcatenatorMatrix: + # 2018-04-29: moved here from core.tests.test_index_tricks. + def test_matrix(self): + a = [1, 2] + b = [3, 4] + + ab_r = np.r_['r', a, b] + ab_c = np.r_['c', a, b] + + assert_equal(type(ab_r), np.matrix) + assert_equal(type(ab_c), np.matrix) + + assert_equal(np.array(ab_r), [[1, 2, 3, 4]]) + assert_equal(np.array(ab_c), [[1], [2], [3], [4]]) + + assert_raises(ValueError, lambda: np.r_['rc', a, b]) + + def test_matrix_scalar(self): + r = np.r_['r', [1, 2], 3] + assert_equal(type(r), np.matrix) + assert_equal(np.array(r), [[1, 2, 3]]) + + def test_matrix_builder(self): + a = np.array([1]) + b = np.array([2]) + c = np.array([3]) + d = np.array([4]) + actual = np.r_['a, b; c, d'] + expected = np.bmat([[a, b], [c, d]]) + + assert_equal(actual, expected) + assert_equal(type(actual), type(expected)) + + +def test_array_equal_error_message_matrix(): + # 2018-04-29: moved here from testing.tests.test_utils. + with pytest.raises(AssertionError) as exc_info: + assert_equal(np.array([1, 2]), np.matrix([1, 2])) + msg = str(exc_info.value) + msg_reference = textwrap.dedent("""\ + + Arrays are not equal + + (shapes (2,), (1, 2) mismatch) + ACTUAL: array([1, 2]) + DESIRED: matrix([[1, 2]])""") + assert_equal(msg, msg_reference) + + +def test_array_almost_equal_matrix(): + # Matrix slicing keeps things 2-D, while array does not necessarily. + # See gh-8452. + # 2018-04-29: moved here from testing.tests.test_utils. + m1 = np.matrix([[1., 2.]]) + m2 = np.matrix([[1., np.nan]]) + m3 = np.matrix([[1., -np.inf]]) + m4 = np.matrix([[np.nan, np.inf]]) + m5 = np.matrix([[1., 2.], [np.nan, np.inf]]) + for assert_func in assert_array_almost_equal, assert_almost_equal: + for m in m1, m2, m3, m4, m5: + assert_func(m, m) + a = np.array(m) + assert_func(a, m) + assert_func(m, a) diff --git a/local_packages/numpy/matrixlib/tests/test_masked_matrix.py b/local_packages/numpy/matrixlib/tests/test_masked_matrix.py new file mode 100644 index 0000000..ee3dc96 --- /dev/null +++ b/local_packages/numpy/matrixlib/tests/test_masked_matrix.py @@ -0,0 +1,240 @@ +import pickle + +import numpy as np +from numpy.ma.core import ( + MaskedArray, + MaskType, + add, + allequal, + divide, + getmask, + hypot, + log, + masked, + masked_array, + masked_values, + nomask, +) +from numpy.ma.extras import mr_ +from numpy.ma.testutils import assert_, assert_array_equal, assert_equal, assert_raises + + +class MMatrix(MaskedArray, np.matrix,): + + def __new__(cls, data, mask=nomask): + mat = np.matrix(data) + _data = MaskedArray.__new__(cls, data=mat, mask=mask) + return _data + + def __array_finalize__(self, obj): + np.matrix.__array_finalize__(self, obj) + MaskedArray.__array_finalize__(self, obj) + + @property + def _series(self): + _view = self.view(MaskedArray) + _view._sharedmask = False + return _view + + +class TestMaskedMatrix: + def test_matrix_indexing(self): + # Tests conversions and indexing + x1 = np.matrix([[1, 2, 3], [4, 3, 2]]) + x2 = masked_array(x1, mask=[[1, 0, 0], [0, 1, 0]]) + x3 = masked_array(x1, mask=[[0, 1, 0], [1, 0, 0]]) + x4 = masked_array(x1) + # test conversion to strings + str(x2) # raises? + repr(x2) # raises? + # tests of indexing + assert_(type(x2[1, 0]) is type(x1[1, 0])) + assert_(x1[1, 0] == x2[1, 0]) + assert_(x2[1, 1] is masked) + assert_equal(x1[0, 2], x2[0, 2]) + assert_equal(x1[0, 1:], x2[0, 1:]) + assert_equal(x1[:, 2], x2[:, 2]) + assert_equal(x1[:], x2[:]) + assert_equal(x1[1:], x3[1:]) + x1[0, 2] = 9 + x2[0, 2] = 9 + assert_equal(x1, x2) + x1[0, 1:] = 99 + x2[0, 1:] = 99 + assert_equal(x1, x2) + x2[0, 1] = masked + assert_equal(x1, x2) + x2[0, 1:] = masked + assert_equal(x1, x2) + x2[0, :] = x1[0, :] + x2[0, 1] = masked + assert_(allequal(getmask(x2), np.array([[0, 1, 0], [0, 1, 0]]))) + x3[1, :] = masked_array([1, 2, 3], [1, 1, 0]) + assert_(allequal(getmask(x3)[1], masked_array([1, 1, 0]))) + assert_(allequal(getmask(x3[1]), masked_array([1, 1, 0]))) + x4[1, :] = masked_array([1, 2, 3], [1, 1, 0]) + assert_(allequal(getmask(x4[1]), masked_array([1, 1, 0]))) + assert_(allequal(x4[1], masked_array([1, 2, 3]))) + x1 = np.matrix(np.arange(5) * 1.0) + x2 = masked_values(x1, 3.0) + assert_equal(x1, x2) + assert_(allequal(masked_array([0, 0, 0, 1, 0], dtype=MaskType), + x2.mask)) + assert_equal(3.0, x2.fill_value) + + def test_pickling_subbaseclass(self): + # Test pickling w/ a subclass of ndarray + a = masked_array(np.matrix(list(range(10))), mask=[1, 0, 1, 0, 0] * 2) + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + a_pickled = pickle.loads(pickle.dumps(a, protocol=proto)) + assert_equal(a_pickled._mask, a._mask) + assert_equal(a_pickled, a) + assert_(isinstance(a_pickled._data, np.matrix)) + + def test_count_mean_with_matrix(self): + m = masked_array(np.matrix([[1, 2], [3, 4]]), mask=np.zeros((2, 2))) + + assert_equal(m.count(axis=0).shape, (1, 2)) + assert_equal(m.count(axis=1).shape, (2, 1)) + + # Make sure broadcasting inside mean and var work + assert_equal(m.mean(axis=0), [[2., 3.]]) + assert_equal(m.mean(axis=1), [[1.5], [3.5]]) + + def test_flat(self): + # Test that flat can return items even for matrices [#4585, #4615] + # test simple access + test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1]) + assert_equal(test.flat[1], 2) + assert_equal(test.flat[2], masked) + assert_(np.all(test.flat[0:2] == test[0, 0:2])) + # Test flat on masked_matrices + test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1]) + test.flat = masked_array([3, 2, 1], mask=[1, 0, 0]) + control = masked_array(np.matrix([[3, 2, 1]]), mask=[1, 0, 0]) + assert_equal(test, control) + # Test setting + test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1]) + testflat = test.flat + testflat[:] = testflat[np.array([2, 1, 0])] + assert_equal(test, control) + testflat[0] = 9 + # test that matrices keep the correct shape (#4615) + a = masked_array(np.matrix(np.eye(2)), mask=0) + b = a.flat + b01 = b[:2] + assert_equal(b01.data, np.array([[1., 0.]])) + assert_equal(b01.mask, np.array([[False, False]])) + + def test_allany_onmatrices(self): + x = np.array([[0.13, 0.26, 0.90], + [0.28, 0.33, 0.63], + [0.31, 0.87, 0.70]]) + X = np.matrix(x) + m = np.array([[True, False, False], + [False, False, False], + [True, True, False]], dtype=np.bool) + mX = masked_array(X, mask=m) + mXbig = (mX > 0.5) + mXsmall = (mX < 0.5) + + assert_(not mXbig.all()) + assert_(mXbig.any()) + assert_equal(mXbig.all(0), np.matrix([False, False, True])) + assert_equal(mXbig.all(1), np.matrix([False, False, True]).T) + assert_equal(mXbig.any(0), np.matrix([False, False, True])) + assert_equal(mXbig.any(1), np.matrix([True, True, True]).T) + + assert_(not mXsmall.all()) + assert_(mXsmall.any()) + assert_equal(mXsmall.all(0), np.matrix([True, True, False])) + assert_equal(mXsmall.all(1), np.matrix([False, False, False]).T) + assert_equal(mXsmall.any(0), np.matrix([True, True, False])) + assert_equal(mXsmall.any(1), np.matrix([True, True, False]).T) + + def test_compressed(self): + a = masked_array(np.matrix([1, 2, 3, 4]), mask=[0, 0, 0, 0]) + b = a.compressed() + assert_equal(b, a) + assert_(isinstance(b, np.matrix)) + a[0, 0] = masked + b = a.compressed() + assert_equal(b, [[2, 3, 4]]) + + def test_ravel(self): + a = masked_array(np.matrix([1, 2, 3, 4, 5]), mask=[[0, 1, 0, 0, 0]]) + aravel = a.ravel() + assert_equal(aravel.shape, (1, 5)) + assert_equal(aravel._mask.shape, a.shape) + + def test_view(self): + # Test view w/ flexible dtype + iterator = list(zip(np.arange(10), np.random.rand(10))) + data = np.array(iterator) + a = masked_array(iterator, dtype=[('a', float), ('b', float)]) + a.mask[0] = (1, 0) + test = a.view((float, 2), np.matrix) + assert_equal(test, data) + assert_(isinstance(test, np.matrix)) + assert_(not isinstance(test, MaskedArray)) + + +class TestSubclassing: + # Test suite for masked subclasses of ndarray. + + def _create_data(self): + x = np.arange(5, dtype='float') + mx = MMatrix(x, mask=[0, 1, 0, 0, 0]) + return x, mx + + def test_maskedarray_subclassing(self): + # Tests subclassing MaskedArray + mx = self._create_data()[1] + assert_(isinstance(mx._data, np.matrix)) + + def test_masked_unary_operations(self): + # Tests masked_unary_operation + x, mx = self._create_data() + with np.errstate(divide='ignore'): + assert_(isinstance(log(mx), MMatrix)) + assert_equal(log(x), np.log(x)) + + def test_masked_binary_operations(self): + # Tests masked_binary_operation + x, mx = self._create_data() + # Result should be a MMatrix + assert_(isinstance(add(mx, mx), MMatrix)) + assert_(isinstance(add(mx, x), MMatrix)) + # Result should work + assert_equal(add(mx, x), mx + x) + assert_(isinstance(add(mx, mx)._data, np.matrix)) + with assert_raises(TypeError): + add.outer(mx, mx) + assert_(isinstance(hypot(mx, mx), MMatrix)) + assert_(isinstance(hypot(mx, x), MMatrix)) + + def test_masked_binary_operations2(self): + # Tests domained_masked_binary_operation + x, mx = self._create_data() + xmx = masked_array(mx.data.__array__(), mask=mx.mask) + assert_(isinstance(divide(mx, mx), MMatrix)) + assert_(isinstance(divide(mx, x), MMatrix)) + assert_equal(divide(mx, mx), divide(xmx, xmx)) + +class TestConcatenator: + # Tests for mr_, the equivalent of r_ for masked arrays. + + def test_matrix_builder(self): + assert_raises(np.ma.MAError, lambda: mr_['1, 2; 3, 4']) + + def test_matrix(self): + # Test consistency with unmasked version. If we ever deprecate + # matrix, this test should either still pass, or both actual and + # expected should fail to be build. + actual = mr_['r', 1, 2, 3] + expected = np.ma.array(np.r_['r', 1, 2, 3]) + assert_array_equal(actual, expected) + + # outer type is masked array, inner type is matrix + assert_equal(type(actual), type(expected)) + assert_equal(type(actual.data), type(expected.data)) diff --git a/local_packages/numpy/matrixlib/tests/test_matrix_linalg.py b/local_packages/numpy/matrixlib/tests/test_matrix_linalg.py new file mode 100644 index 0000000..99acf32 --- /dev/null +++ b/local_packages/numpy/matrixlib/tests/test_matrix_linalg.py @@ -0,0 +1,110 @@ +""" Test functions for linalg module using the matrix class.""" +import pytest + +import numpy as np +from numpy.linalg.tests.test_linalg import ( + CondCases, + DetCases, + EigCases, + EigvalsCases, + InvCases, + LinalgCase, + LinalgTestCase, + LstsqCases, + PinvCases, + SolveCases, + SVDCases, + TestQR as _TestQR, + _TestNorm2D, + _TestNormDoubleBase, + _TestNormInt64Base, + _TestNormSingleBase, + apply_tag, +) + +CASES = [] + +# square test cases +CASES += apply_tag('square', [ + LinalgCase("0x0_matrix", + np.empty((0, 0), dtype=np.double).view(np.matrix), + np.empty((0, 1), dtype=np.double).view(np.matrix), + tags={'size-0'}), + LinalgCase("matrix_b_only", + np.array([[1., 2.], [3., 4.]]), + np.matrix([2., 1.]).T), + LinalgCase("matrix_a_and_b", + np.matrix([[1., 2.], [3., 4.]]), + np.matrix([2., 1.]).T), +]) + +# hermitian test-cases +CASES += apply_tag('hermitian', [ + LinalgCase("hmatrix_a_and_b", + np.matrix([[1., 2.], [2., 1.]]), + None), +]) +# No need to make generalized or strided cases for matrices. + + +class MatrixTestCase(LinalgTestCase): + TEST_CASES = CASES + + +class TestSolveMatrix(SolveCases, MatrixTestCase): + pass + + +class TestInvMatrix(InvCases, MatrixTestCase): + pass + + +class TestEigvalsMatrix(EigvalsCases, MatrixTestCase): + pass + + +class TestEigMatrix(EigCases, MatrixTestCase): + pass + + +class TestSVDMatrix(SVDCases, MatrixTestCase): + pass + + +class TestCondMatrix(CondCases, MatrixTestCase): + pass + + +class TestPinvMatrix(PinvCases, MatrixTestCase): + pass + + +class TestDetMatrix(DetCases, MatrixTestCase): + pass + + +@pytest.mark.thread_unsafe( + reason="residuals not calculated properly for square tests (gh-29851)" +) +class TestLstsqMatrix(LstsqCases, MatrixTestCase): + pass + + +class _TestNorm2DMatrix(_TestNorm2D): + array = np.matrix + + +class TestNormDoubleMatrix(_TestNorm2DMatrix, _TestNormDoubleBase): + pass + + +class TestNormSingleMatrix(_TestNorm2DMatrix, _TestNormSingleBase): + pass + + +class TestNormInt64Matrix(_TestNorm2DMatrix, _TestNormInt64Base): + pass + + +class TestQRMatrix(_TestQR): + array = np.matrix diff --git a/local_packages/numpy/matrixlib/tests/test_multiarray.py b/local_packages/numpy/matrixlib/tests/test_multiarray.py new file mode 100644 index 0000000..2d9d1f8 --- /dev/null +++ b/local_packages/numpy/matrixlib/tests/test_multiarray.py @@ -0,0 +1,17 @@ +import numpy as np +from numpy.testing import assert_, assert_array_equal, assert_equal + + +class TestView: + def test_type(self): + x = np.array([1, 2, 3]) + assert_(isinstance(x.view(np.matrix), np.matrix)) + + def test_keywords(self): + x = np.array([(1, 2)], dtype=[('a', np.int8), ('b', np.int8)]) + # We must be specific about the endianness here: + y = x.view(dtype='>> from numpy.polynomial import Chebyshev + >>> xdata = [1, 2, 3, 4] + >>> ydata = [1, 4, 9, 16] + >>> c = Chebyshev.fit(xdata, ydata, deg=1) + +is preferred over the `chebyshev.chebfit` function from the +``np.polynomial.chebyshev`` module:: + + >>> from numpy.polynomial.chebyshev import chebfit + >>> c = chebfit(xdata, ydata, deg=1) + +See :doc:`routines.polynomials.classes` for more details. + +Convenience Classes +=================== + +The following lists the various constants and methods common to all of +the classes representing the various kinds of polynomials. In the following, +the term ``Poly`` represents any one of the convenience classes (e.g. +`~polynomial.Polynomial`, `~chebyshev.Chebyshev`, `~hermite.Hermite`, etc.) +while the lowercase ``p`` represents an **instance** of a polynomial class. + +Constants +--------- + +- ``Poly.domain`` -- Default domain +- ``Poly.window`` -- Default window +- ``Poly.basis_name`` -- String used to represent the basis +- ``Poly.maxpower`` -- Maximum value ``n`` such that ``p**n`` is allowed + +Creation +-------- + +Methods for creating polynomial instances. + +- ``Poly.basis(degree)`` -- Basis polynomial of given degree +- ``Poly.identity()`` -- ``p`` where ``p(x) = x`` for all ``x`` +- ``Poly.fit(x, y, deg)`` -- ``p`` of degree ``deg`` with coefficients + determined by the least-squares fit to the data ``x``, ``y`` +- ``Poly.fromroots(roots)`` -- ``p`` with specified roots +- ``p.copy()`` -- Create a copy of ``p`` + +Conversion +---------- + +Methods for converting a polynomial instance of one kind to another. + +- ``p.cast(Poly)`` -- Convert ``p`` to instance of kind ``Poly`` +- ``p.convert(Poly)`` -- Convert ``p`` to instance of kind ``Poly`` or map + between ``domain`` and ``window`` + +Calculus +-------- +- ``p.deriv()`` -- Take the derivative of ``p`` +- ``p.integ()`` -- Integrate ``p`` + +Validation +---------- +- ``Poly.has_samecoef(p1, p2)`` -- Check if coefficients match +- ``Poly.has_samedomain(p1, p2)`` -- Check if domains match +- ``Poly.has_sametype(p1, p2)`` -- Check if types match +- ``Poly.has_samewindow(p1, p2)`` -- Check if windows match + +Misc +---- +- ``p.linspace()`` -- Return ``x, p(x)`` at equally-spaced points in ``domain`` +- ``p.mapparms()`` -- Return the parameters for the linear mapping between + ``domain`` and ``window``. +- ``p.roots()`` -- Return the roots of ``p``. +- ``p.trim()`` -- Remove trailing coefficients. +- ``p.cutdeg(degree)`` -- Truncate ``p`` to given degree +- ``p.truncate(size)`` -- Truncate ``p`` to given size + +""" +from .chebyshev import Chebyshev +from .hermite import Hermite +from .hermite_e import HermiteE +from .laguerre import Laguerre +from .legendre import Legendre +from .polynomial import Polynomial + +__all__ = [ # noqa: F822 + "set_default_printstyle", + "polynomial", "Polynomial", + "chebyshev", "Chebyshev", + "legendre", "Legendre", + "hermite", "Hermite", + "hermite_e", "HermiteE", + "laguerre", "Laguerre", +] + + +def set_default_printstyle(style): + """ + Set the default format for the string representation of polynomials. + + Values for ``style`` must be valid inputs to ``__format__``, i.e. 'ascii' + or 'unicode'. + + Parameters + ---------- + style : str + Format string for default printing style. Must be either 'ascii' or + 'unicode'. + + Notes + ----- + The default format depends on the platform: 'unicode' is used on + Unix-based systems and 'ascii' on Windows. This determination is based on + default font support for the unicode superscript and subscript ranges. + + Examples + -------- + >>> p = np.polynomial.Polynomial([1, 2, 3]) + >>> c = np.polynomial.Chebyshev([1, 2, 3]) + >>> np.polynomial.set_default_printstyle('unicode') + >>> print(p) + 1.0 + 2.0·x + 3.0·x² + >>> print(c) + 1.0 + 2.0·T₁(x) + 3.0·T₂(x) + >>> np.polynomial.set_default_printstyle('ascii') + >>> print(p) + 1.0 + 2.0 x + 3.0 x**2 + >>> print(c) + 1.0 + 2.0 T_1(x) + 3.0 T_2(x) + >>> # Formatting supersedes all class/package-level defaults + >>> print(f"{p:unicode}") + 1.0 + 2.0·x + 3.0·x² + """ + if style not in ('unicode', 'ascii'): + raise ValueError( + f"Unsupported format string '{style}'. Valid options are 'ascii' " + f"and 'unicode'" + ) + _use_unicode = True + if style == 'ascii': + _use_unicode = False + from ._polybase import ABCPolyBase + ABCPolyBase._use_unicode = _use_unicode + + +from numpy._pytesttester import PytestTester + +test = PytestTester(__name__) +del PytestTester diff --git a/local_packages/numpy/polynomial/__init__.pyi b/local_packages/numpy/polynomial/__init__.pyi new file mode 100644 index 0000000..ad005a2 --- /dev/null +++ b/local_packages/numpy/polynomial/__init__.pyi @@ -0,0 +1,31 @@ +from typing import Final, Literal + +from . import chebyshev, hermite, hermite_e, laguerre, legendre, polynomial +from .chebyshev import Chebyshev +from .hermite import Hermite +from .hermite_e import HermiteE +from .laguerre import Laguerre +from .legendre import Legendre +from .polynomial import Polynomial + +__all__ = [ + "set_default_printstyle", + "polynomial", + "Polynomial", + "chebyshev", + "Chebyshev", + "legendre", + "Legendre", + "hermite", + "Hermite", + "hermite_e", + "HermiteE", + "laguerre", + "Laguerre", +] + +def set_default_printstyle(style: Literal["ascii", "unicode"]) -> None: ... + +from numpy._pytesttester import PytestTester as _PytestTester + +test: Final[_PytestTester] diff --git a/local_packages/numpy/polynomial/_polybase.py b/local_packages/numpy/polynomial/_polybase.py new file mode 100644 index 0000000..f893433 --- /dev/null +++ b/local_packages/numpy/polynomial/_polybase.py @@ -0,0 +1,1191 @@ +""" +Abstract base class for the various polynomial Classes. + +The ABCPolyBase class provides the methods needed to implement the common API +for the various polynomial classes. It operates as a mixin, but uses the +abc module from the stdlib, hence it is only available for Python >= 2.6. + +""" +import abc +import numbers +import os +from collections.abc import Callable + +import numpy as np + +from . import polyutils as pu + +__all__ = ['ABCPolyBase'] + +class ABCPolyBase(abc.ABC): + """An abstract base class for immutable series classes. + + ABCPolyBase provides the standard Python numerical methods + '+', '-', '*', '//', '%', 'divmod', '**', and '()' along with the + methods listed below. + + Parameters + ---------- + coef : array_like + Series coefficients in order of increasing degree, i.e., + ``(1, 2, 3)`` gives ``1*P_0(x) + 2*P_1(x) + 3*P_2(x)``, where + ``P_i`` is the basis polynomials of degree ``i``. + domain : (2,) array_like, optional + Domain to use. The interval ``[domain[0], domain[1]]`` is mapped + to the interval ``[window[0], window[1]]`` by shifting and scaling. + The default value is the derived class domain. + window : (2,) array_like, optional + Window, see domain for its use. The default value is the + derived class window. + symbol : str, optional + Symbol used to represent the independent variable in string + representations of the polynomial expression, e.g. for printing. + The symbol must be a valid Python identifier. Default value is 'x'. + + .. versionadded:: 1.24 + + Attributes + ---------- + coef : (N,) ndarray + Series coefficients in order of increasing degree. + domain : (2,) ndarray + Domain that is mapped to window. + window : (2,) ndarray + Window that domain is mapped to. + symbol : str + Symbol representing the independent variable. + + Class Attributes + ---------------- + maxpower : int + Maximum power allowed, i.e., the largest number ``n`` such that + ``p(x)**n`` is allowed. This is to limit runaway polynomial size. + domain : (2,) ndarray + Default domain of the class. + window : (2,) ndarray + Default window of the class. + + """ + + # Not hashable + __hash__ = None + + # Opt out of numpy ufuncs and Python ops with ndarray subclasses. + __array_ufunc__ = None + + # Limit runaway size. T_n^m has degree n*m + maxpower = 100 + + # Unicode character mappings for improved __str__ + _superscript_mapping = str.maketrans({ + "0": "⁰", + "1": "¹", + "2": "²", + "3": "³", + "4": "⁴", + "5": "⁵", + "6": "⁶", + "7": "⁷", + "8": "⁸", + "9": "⁹" + }) + _subscript_mapping = str.maketrans({ + "0": "₀", + "1": "₁", + "2": "₂", + "3": "₃", + "4": "₄", + "5": "₅", + "6": "₆", + "7": "₇", + "8": "₈", + "9": "₉" + }) + # Some fonts don't support full unicode character ranges necessary for + # the full set of superscripts and subscripts, including common/default + # fonts in Windows shells/terminals. Therefore, default to ascii-only + # printing on windows. + _use_unicode = not os.name == 'nt' + + @property + def symbol(self): + return self._symbol + + @property + @abc.abstractmethod + def domain(self): + pass + + @property + @abc.abstractmethod + def window(self): + pass + + @property + @abc.abstractmethod + def basis_name(self): + pass + + @staticmethod + @abc.abstractmethod + def _add(c1, c2): + pass + + @staticmethod + @abc.abstractmethod + def _sub(c1, c2): + pass + + @staticmethod + @abc.abstractmethod + def _mul(c1, c2): + pass + + @staticmethod + @abc.abstractmethod + def _div(c1, c2): + pass + + @staticmethod + @abc.abstractmethod + def _pow(c, pow, maxpower=None): + pass + + @staticmethod + @abc.abstractmethod + def _val(x, c): + pass + + @staticmethod + @abc.abstractmethod + def _int(c, m, k, lbnd, scl): + pass + + @staticmethod + @abc.abstractmethod + def _der(c, m, scl): + pass + + @staticmethod + @abc.abstractmethod + def _fit(x, y, deg, rcond, full): + pass + + @staticmethod + @abc.abstractmethod + def _line(off, scl): + pass + + @staticmethod + @abc.abstractmethod + def _roots(c): + pass + + @staticmethod + @abc.abstractmethod + def _fromroots(r): + pass + + def has_samecoef(self, other): + """Check if coefficients match. + + Parameters + ---------- + other : class instance + The other class must have the ``coef`` attribute. + + Returns + ------- + bool : boolean + True if the coefficients are the same, False otherwise. + + """ + return ( + len(self.coef) == len(other.coef) + and np.all(self.coef == other.coef) + ) + + def has_samedomain(self, other): + """Check if domains match. + + Parameters + ---------- + other : class instance + The other class must have the ``domain`` attribute. + + Returns + ------- + bool : boolean + True if the domains are the same, False otherwise. + + """ + return np.all(self.domain == other.domain) + + def has_samewindow(self, other): + """Check if windows match. + + Parameters + ---------- + other : class instance + The other class must have the ``window`` attribute. + + Returns + ------- + bool : boolean + True if the windows are the same, False otherwise. + + """ + return np.all(self.window == other.window) + + def has_sametype(self, other): + """Check if types match. + + Parameters + ---------- + other : object + Class instance. + + Returns + ------- + bool : boolean + True if other is same class as self + + """ + return isinstance(other, self.__class__) + + def _get_coefficients(self, other): + """Interpret other as polynomial coefficients. + + The `other` argument is checked to see if it is of the same + class as self with identical domain and window. If so, + return its coefficients, otherwise return `other`. + + Parameters + ---------- + other : anything + Object to be checked. + + Returns + ------- + coef + The coefficients of`other` if it is a compatible instance, + of ABCPolyBase, otherwise `other`. + + Raises + ------ + TypeError + When `other` is an incompatible instance of ABCPolyBase. + + """ + if isinstance(other, ABCPolyBase): + if not isinstance(other, self.__class__): + raise TypeError("Polynomial types differ") + elif not np.all(self.domain == other.domain): + raise TypeError("Domains differ") + elif not np.all(self.window == other.window): + raise TypeError("Windows differ") + elif self.symbol != other.symbol: + raise ValueError("Polynomial symbols differ") + return other.coef + return other + + def __init__(self, coef, domain=None, window=None, symbol='x'): + [coef] = pu.as_series([coef], trim=False) + self.coef = coef + + if domain is not None: + [domain] = pu.as_series([domain], trim=False) + if len(domain) != 2: + raise ValueError("Domain has wrong number of elements.") + self.domain = domain + + if window is not None: + [window] = pu.as_series([window], trim=False) + if len(window) != 2: + raise ValueError("Window has wrong number of elements.") + self.window = window + + # Validation for symbol + try: + if not symbol.isidentifier(): + raise ValueError( + "Symbol string must be a valid Python identifier" + ) + # If a user passes in something other than a string, the above + # results in an AttributeError. Catch this and raise a more + # informative exception + except AttributeError: + raise TypeError("Symbol must be a non-empty string") + + self._symbol = symbol + + def __repr__(self): + coef = repr(self.coef)[6:-1] + domain = repr(self.domain)[6:-1] + window = repr(self.window)[6:-1] + name = self.__class__.__name__ + return (f"{name}({coef}, domain={domain}, window={window}, " + f"symbol='{self.symbol}')") + + def __format__(self, fmt_str): + if fmt_str == '': + return self.__str__() + if fmt_str not in ('ascii', 'unicode'): + raise ValueError( + f"Unsupported format string '{fmt_str}' passed to " + f"{self.__class__}.__format__. Valid options are " + f"'ascii' and 'unicode'" + ) + if fmt_str == 'ascii': + return self._generate_string(self._str_term_ascii) + return self._generate_string(self._str_term_unicode) + + def __str__(self): + if self._use_unicode: + return self._generate_string(self._str_term_unicode) + return self._generate_string(self._str_term_ascii) + + def _generate_string(self, term_method): + """ + Generate the full string representation of the polynomial, using + ``term_method`` to generate each polynomial term. + """ + # Get configuration for line breaks + linewidth = np.get_printoptions().get('linewidth', 75) + if linewidth < 1: + linewidth = 1 + out = pu.format_float(self.coef[0]) + + off, scale = self.mapparms() + + scaled_symbol, needs_parens = self._format_term(pu.format_float, + off, scale) + if needs_parens: + scaled_symbol = '(' + scaled_symbol + ')' + + for i, coef in enumerate(self.coef[1:]): + out += " " + power = str(i + 1) + # Polynomial coefficient + # The coefficient array can be an object array with elements that + # will raise a TypeError with >= 0 (e.g. strings or Python + # complex). In this case, represent the coefficient as-is. + try: + if coef >= 0: + next_term = "+ " + pu.format_float(coef, parens=True) + else: + next_term = "- " + pu.format_float(-coef, parens=True) + except TypeError: + next_term = f"+ {coef}" + # Polynomial term + next_term += term_method(power, scaled_symbol) + # Length of the current line with next term added + line_len = len(out.split('\n')[-1]) + len(next_term) + # If not the last term in the polynomial, it will be two + # characters longer due to the +/- with the next term + if i < len(self.coef[1:]) - 1: + line_len += 2 + # Handle linebreaking + if line_len >= linewidth: + next_term = next_term.replace(" ", "\n", 1) + out += next_term + return out + + @classmethod + def _str_term_unicode(cls, i, arg_str): + """ + String representation of single polynomial term using unicode + characters for superscripts and subscripts. + """ + if cls.basis_name is None: + raise NotImplementedError( + "Subclasses must define either a basis_name, or override " + "_str_term_unicode(cls, i, arg_str)" + ) + return (f"·{cls.basis_name}{i.translate(cls._subscript_mapping)}" + f"({arg_str})") + + @classmethod + def _str_term_ascii(cls, i, arg_str): + """ + String representation of a single polynomial term using ** and _ to + represent superscripts and subscripts, respectively. + """ + if cls.basis_name is None: + raise NotImplementedError( + "Subclasses must define either a basis_name, or override " + "_str_term_ascii(cls, i, arg_str)" + ) + return f" {cls.basis_name}_{i}({arg_str})" + + @classmethod + def _repr_latex_term(cls, i, arg_str, needs_parens): + if cls.basis_name is None: + raise NotImplementedError( + "Subclasses must define either a basis name, or override " + "_repr_latex_term(i, arg_str, needs_parens)") + # since we always add parens, we don't care if the expression needs them + return f"{{{cls.basis_name}}}_{{{i}}}({arg_str})" + + @staticmethod + def _repr_latex_scalar(x, parens=False): + # TODO: we're stuck with disabling math formatting until we handle + # exponents in this function + return fr'\text{{{pu.format_float(x, parens=parens)}}}' + + def _format_term(self, scalar_format: Callable, off: float, scale: float): + """ Format a single term in the expansion """ + if off == 0 and scale == 1: + term = self.symbol + needs_parens = False + elif scale == 1: + term = f"{scalar_format(off)} + {self.symbol}" + needs_parens = True + elif off == 0: + term = f"{scalar_format(scale)}{self.symbol}" + needs_parens = True + else: + term = ( + f"{scalar_format(off)} + " + f"{scalar_format(scale)}{self.symbol}" + ) + needs_parens = True + return term, needs_parens + + def _repr_latex_(self): + # get the scaled argument string to the basis functions + off, scale = self.mapparms() + term, needs_parens = self._format_term(self._repr_latex_scalar, + off, scale) + + mute = r"\color{{LightGray}}{{{}}}".format + + parts = [] + for i, c in enumerate(self.coef): + # prevent duplication of + and - signs + if i == 0: + coef_str = f"{self._repr_latex_scalar(c)}" + elif not isinstance(c, numbers.Real): + coef_str = f" + ({self._repr_latex_scalar(c)})" + elif c >= 0: + coef_str = f" + {self._repr_latex_scalar(c, parens=True)}" + else: + coef_str = f" - {self._repr_latex_scalar(-c, parens=True)}" + + # produce the string for the term + term_str = self._repr_latex_term(i, term, needs_parens) + if term_str == '1': + part = coef_str + else: + part = rf"{coef_str}\,{term_str}" + + if c == 0: + part = mute(part) + + parts.append(part) + + if parts: + body = ''.join(parts) + else: + # in case somehow there are no coefficients at all + body = '0' + + return rf"${self.symbol} \mapsto {body}$" + + # Pickle and copy + + def __getstate__(self): + ret = self.__dict__.copy() + ret['coef'] = self.coef.copy() + ret['domain'] = self.domain.copy() + ret['window'] = self.window.copy() + ret['symbol'] = self.symbol + return ret + + def __setstate__(self, dict): + self.__dict__ = dict + + # Call + + def __call__(self, arg): + arg = pu.mapdomain(arg, self.domain, self.window) + return self._val(arg, self.coef) + + def __iter__(self): + return iter(self.coef) + + def __len__(self): + return len(self.coef) + + # Numeric properties. + + def __neg__(self): + return self.__class__( + -self.coef, self.domain, self.window, self.symbol + ) + + def __pos__(self): + return self + + def __add__(self, other): + othercoef = self._get_coefficients(other) + try: + coef = self._add(self.coef, othercoef) + except Exception: + return NotImplemented + return self.__class__(coef, self.domain, self.window, self.symbol) + + def __sub__(self, other): + othercoef = self._get_coefficients(other) + try: + coef = self._sub(self.coef, othercoef) + except Exception: + return NotImplemented + return self.__class__(coef, self.domain, self.window, self.symbol) + + def __mul__(self, other): + othercoef = self._get_coefficients(other) + try: + coef = self._mul(self.coef, othercoef) + except Exception: + return NotImplemented + return self.__class__(coef, self.domain, self.window, self.symbol) + + def __truediv__(self, other): + # there is no true divide if the rhs is not a Number, although it + # could return the first n elements of an infinite series. + # It is hard to see where n would come from, though. + if not isinstance(other, numbers.Number) or isinstance(other, bool): + raise TypeError( + f"unsupported types for true division: " + f"'{type(self)}', '{type(other)}'" + ) + return self.__floordiv__(other) + + def __floordiv__(self, other): + res = self.__divmod__(other) + if res is NotImplemented: + return res + return res[0] + + def __mod__(self, other): + res = self.__divmod__(other) + if res is NotImplemented: + return res + return res[1] + + def __divmod__(self, other): + othercoef = self._get_coefficients(other) + try: + quo, rem = self._div(self.coef, othercoef) + except ZeroDivisionError: + raise + except Exception: + return NotImplemented + quo = self.__class__(quo, self.domain, self.window, self.symbol) + rem = self.__class__(rem, self.domain, self.window, self.symbol) + return quo, rem + + def __pow__(self, other): + coef = self._pow(self.coef, other, maxpower=self.maxpower) + res = self.__class__(coef, self.domain, self.window, self.symbol) + return res + + def __radd__(self, other): + try: + coef = self._add(other, self.coef) + except Exception: + return NotImplemented + return self.__class__(coef, self.domain, self.window, self.symbol) + + def __rsub__(self, other): + try: + coef = self._sub(other, self.coef) + except Exception: + return NotImplemented + return self.__class__(coef, self.domain, self.window, self.symbol) + + def __rmul__(self, other): + try: + coef = self._mul(other, self.coef) + except Exception: + return NotImplemented + return self.__class__(coef, self.domain, self.window, self.symbol) + + def __rtruediv__(self, other): + # An instance of ABCPolyBase is not considered a + # Number. + return NotImplemented + + def __rfloordiv__(self, other): + res = self.__rdivmod__(other) + if res is NotImplemented: + return res + return res[0] + + def __rmod__(self, other): + res = self.__rdivmod__(other) + if res is NotImplemented: + return res + return res[1] + + def __rdivmod__(self, other): + try: + quo, rem = self._div(other, self.coef) + except ZeroDivisionError: + raise + except Exception: + return NotImplemented + quo = self.__class__(quo, self.domain, self.window, self.symbol) + rem = self.__class__(rem, self.domain, self.window, self.symbol) + return quo, rem + + def __eq__(self, other): + res = (isinstance(other, self.__class__) and + np.all(self.domain == other.domain) and + np.all(self.window == other.window) and + (self.coef.shape == other.coef.shape) and + np.all(self.coef == other.coef) and + (self.symbol == other.symbol)) + return res + + def __ne__(self, other): + return not self.__eq__(other) + + # + # Extra methods. + # + + def copy(self): + """Return a copy. + + Returns + ------- + new_series : series + Copy of self. + + """ + return self.__class__(self.coef, self.domain, self.window, self.symbol) + + def degree(self): + """The degree of the series. + + Returns + ------- + degree : int + Degree of the series, one less than the number of coefficients. + + Examples + -------- + + Create a polynomial object for ``1 + 7*x + 4*x**2``: + + >>> np.polynomial.set_default_printstyle("unicode") + >>> poly = np.polynomial.Polynomial([1, 7, 4]) + >>> print(poly) + 1.0 + 7.0·x + 4.0·x² + >>> poly.degree() + 2 + + Note that this method does not check for non-zero coefficients. + You must trim the polynomial to remove any trailing zeroes: + + >>> poly = np.polynomial.Polynomial([1, 7, 0]) + >>> print(poly) + 1.0 + 7.0·x + 0.0·x² + >>> poly.degree() + 2 + >>> poly.trim().degree() + 1 + + """ + return len(self) - 1 + + def cutdeg(self, deg): + """Truncate series to the given degree. + + Reduce the degree of the series to `deg` by discarding the + high order terms. If `deg` is greater than the current degree a + copy of the current series is returned. This can be useful in least + squares where the coefficients of the high degree terms may be very + small. + + Parameters + ---------- + deg : non-negative int + The series is reduced to degree `deg` by discarding the high + order terms. The value of `deg` must be a non-negative integer. + + Returns + ------- + new_series : series + New instance of series with reduced degree. + + """ + return self.truncate(deg + 1) + + def trim(self, tol=0): + """Remove trailing coefficients + + Remove trailing coefficients until a coefficient is reached whose + absolute value greater than `tol` or the beginning of the series is + reached. If all the coefficients would be removed the series is set + to ``[0]``. A new series instance is returned with the new + coefficients. The current instance remains unchanged. + + Parameters + ---------- + tol : non-negative number. + All trailing coefficients less than `tol` will be removed. + + Returns + ------- + new_series : series + New instance of series with trimmed coefficients. + + """ + coef = pu.trimcoef(self.coef, tol) + return self.__class__(coef, self.domain, self.window, self.symbol) + + def truncate(self, size): + """Truncate series to length `size`. + + Reduce the series to length `size` by discarding the high + degree terms. The value of `size` must be a positive integer. This + can be useful in least squares where the coefficients of the + high degree terms may be very small. + + Parameters + ---------- + size : positive int + The series is reduced to length `size` by discarding the high + degree terms. The value of `size` must be a positive integer. + + Returns + ------- + new_series : series + New instance of series with truncated coefficients. + + """ + isize = int(size) + if isize != size or isize < 1: + raise ValueError("size must be a positive integer") + if isize >= len(self.coef): + coef = self.coef + else: + coef = self.coef[:isize] + return self.__class__(coef, self.domain, self.window, self.symbol) + + def convert(self, domain=None, kind=None, window=None): + """Convert series to a different kind and/or domain and/or window. + + Parameters + ---------- + domain : array_like, optional + The domain of the converted series. If the value is None, + the default domain of `kind` is used. + kind : class, optional + The polynomial series type class to which the current instance + should be converted. If kind is None, then the class of the + current instance is used. + window : array_like, optional + The window of the converted series. If the value is None, + the default window of `kind` is used. + + Returns + ------- + new_series : series + The returned class can be of different type than the current + instance and/or have a different domain and/or different + window. + + Notes + ----- + Conversion between domains and class types can result in + numerically ill defined series. + + """ + if kind is None: + kind = self.__class__ + if domain is None: + domain = kind.domain + if window is None: + window = kind.window + return self(kind.identity(domain, window=window, symbol=self.symbol)) + + def mapparms(self): + """Return the mapping parameters. + + The returned values define a linear map ``off + scl*x`` that is + applied to the input arguments before the series is evaluated. The + map depends on the ``domain`` and ``window``; if the current + ``domain`` is equal to the ``window`` the resulting map is the + identity. If the coefficients of the series instance are to be + used by themselves outside this class, then the linear function + must be substituted for the ``x`` in the standard representation of + the base polynomials. + + Returns + ------- + off, scl : float or complex + The mapping function is defined by ``off + scl*x``. + + Notes + ----- + If the current domain is the interval ``[l1, r1]`` and the window + is ``[l2, r2]``, then the linear mapping function ``L`` is + defined by the equations:: + + L(l1) = l2 + L(r1) = r2 + + """ + return pu.mapparms(self.domain, self.window) + + def integ(self, m=1, k=[], lbnd=None): + """Integrate. + + Return a series instance that is the definite integral of the + current series. + + Parameters + ---------- + m : non-negative int + The number of integrations to perform. + k : array_like + Integration constants. The first constant is applied to the + first integration, the second to the second, and so on. The + list of values must less than or equal to `m` in length and any + missing values are set to zero. + lbnd : Scalar + The lower bound of the definite integral. + + Returns + ------- + new_series : series + A new series representing the integral. The domain is the same + as the domain of the integrated series. + + """ + off, scl = self.mapparms() + if lbnd is None: + lbnd = 0 + else: + lbnd = off + scl * lbnd + coef = self._int(self.coef, m, k, lbnd, 1. / scl) + return self.__class__(coef, self.domain, self.window, self.symbol) + + def deriv(self, m=1): + """Differentiate. + + Return a series instance of that is the derivative of the current + series. + + Parameters + ---------- + m : non-negative int + Find the derivative of order `m`. + + Returns + ------- + new_series : series + A new series representing the derivative. The domain is the same + as the domain of the differentiated series. + + """ + off, scl = self.mapparms() + coef = self._der(self.coef, m, scl) + return self.__class__(coef, self.domain, self.window, self.symbol) + + def roots(self): + """Return the roots of the series polynomial. + + Compute the roots for the series. Note that the accuracy of the + roots decreases the further outside the `domain` they lie. + + Returns + ------- + roots : ndarray + Array containing the roots of the series. + + """ + roots = self._roots(self.coef) + return pu.mapdomain(roots, self.window, self.domain) + + def linspace(self, n=100, domain=None): + """Return x, y values at equally spaced points in domain. + + Returns the x, y values at `n` linearly spaced points across the + domain. Here y is the value of the polynomial at the points x. By + default the domain is the same as that of the series instance. + This method is intended mostly as a plotting aid. + + Parameters + ---------- + n : int, optional + Number of point pairs to return. The default value is 100. + domain : {None, array_like}, optional + If not None, the specified domain is used instead of that of + the calling instance. It should be of the form ``[beg,end]``. + The default is None which case the class domain is used. + + Returns + ------- + x, y : ndarray + x is equal to linspace(self.domain[0], self.domain[1], n) and + y is the series evaluated at element of x. + + """ + if domain is None: + domain = self.domain + x = np.linspace(domain[0], domain[1], n) + y = self(x) + return x, y + + @classmethod + def fit(cls, x, y, deg, domain=None, rcond=None, full=False, w=None, + window=None, symbol='x'): + """Least squares fit to data. + + Return a series instance that is the least squares fit to the data + `y` sampled at `x`. The domain of the returned instance can be + specified and this will often result in a superior fit with less + chance of ill conditioning. + + Parameters + ---------- + x : array_like, shape (M,) + x-coordinates of the M sample points ``(x[i], y[i])``. + y : array_like, shape (M,) + y-coordinates of the M sample points ``(x[i], y[i])``. + deg : int or 1-D array_like + Degree(s) of the fitting polynomials. If `deg` is a single integer + all terms up to and including the `deg`'th term are included in the + fit. For NumPy versions >= 1.11.0 a list of integers specifying the + degrees of the terms to include may be used instead. + domain : {None, [beg, end], []}, optional + Domain to use for the returned series. If ``None``, + then a minimal domain that covers the points `x` is chosen. If + ``[]`` the class domain is used. The default value was the + class domain in NumPy 1.4 and ``None`` in later versions. + The ``[]`` option was added in numpy 1.5.0. + rcond : float, optional + Relative condition number of the fit. Singular values smaller + than this relative to the largest singular value will be + ignored. The default value is ``len(x)*eps``, where eps is the + relative precision of the float type, about 2e-16 in most + cases. + full : bool, optional + Switch determining nature of return value. When it is False + (the default) just the coefficients are returned, when True + diagnostic information from the singular value decomposition is + also returned. + w : array_like, shape (M,), optional + Weights. If not None, the weight ``w[i]`` applies to the unsquared + residual ``y[i] - y_hat[i]`` at ``x[i]``. Ideally the weights are + chosen so that the errors of the products ``w[i]*y[i]`` all have + the same variance. When using inverse-variance weighting, use + ``w[i] = 1/sigma(y[i])``. The default value is None. + window : {[beg, end]}, optional + Window to use for the returned series. The default + value is the default class domain + symbol : str, optional + Symbol representing the independent variable. Default is 'x'. + + Returns + ------- + new_series : series + A series that represents the least squares fit to the data and + has the domain and window specified in the call. If the + coefficients for the unscaled and unshifted basis polynomials are + of interest, do ``new_series.convert().coef``. + + [resid, rank, sv, rcond] : list + These values are only returned if ``full == True`` + + - resid -- sum of squared residuals of the least squares fit + - rank -- the numerical rank of the scaled Vandermonde matrix + - sv -- singular values of the scaled Vandermonde matrix + - rcond -- value of `rcond`. + + For more details, see `linalg.lstsq`. + + """ + if domain is None: + domain = pu.getdomain(x) + if domain[0] == domain[1]: + domain[0] -= 1 + domain[1] += 1 + elif isinstance(domain, list) and len(domain) == 0: + domain = cls.domain + + if window is None: + window = cls.window + + xnew = pu.mapdomain(x, domain, window) + res = cls._fit(xnew, y, deg, w=w, rcond=rcond, full=full) + if full: + [coef, status] = res + return ( + cls(coef, domain=domain, window=window, symbol=symbol), status + ) + else: + coef = res + return cls(coef, domain=domain, window=window, symbol=symbol) + + @classmethod + def fromroots(cls, roots, domain=[], window=None, symbol='x'): + """Return series instance that has the specified roots. + + Returns a series representing the product + ``(x - r[0])*(x - r[1])*...*(x - r[n-1])``, where ``r`` is a + list of roots. + + Parameters + ---------- + roots : array_like + List of roots. + domain : {[], None, array_like}, optional + Domain for the resulting series. If None the domain is the + interval from the smallest root to the largest. If [] the + domain is the class domain. The default is []. + window : {None, array_like}, optional + Window for the returned series. If None the class window is + used. The default is None. + symbol : str, optional + Symbol representing the independent variable. Default is 'x'. + + Returns + ------- + new_series : series + Series with the specified roots. + + """ + [roots] = pu.as_series([roots], trim=False) + if domain is None: + domain = pu.getdomain(roots) + elif isinstance(domain, list) and len(domain) == 0: + domain = cls.domain + + if window is None: + window = cls.window + + deg = len(roots) + off, scl = pu.mapparms(domain, window) + rnew = off + scl * roots + coef = cls._fromroots(rnew) / scl**deg + return cls(coef, domain=domain, window=window, symbol=symbol) + + @classmethod + def identity(cls, domain=None, window=None, symbol='x'): + """Identity function. + + If ``p`` is the returned series, then ``p(x) == x`` for all + values of x. + + Parameters + ---------- + domain : {None, array_like}, optional + If given, the array must be of the form ``[beg, end]``, where + ``beg`` and ``end`` are the endpoints of the domain. If None is + given then the class domain is used. The default is None. + window : {None, array_like}, optional + If given, the resulting array must be if the form + ``[beg, end]``, where ``beg`` and ``end`` are the endpoints of + the window. If None is given then the class window is used. The + default is None. + symbol : str, optional + Symbol representing the independent variable. Default is 'x'. + + Returns + ------- + new_series : series + Series of representing the identity. + + """ + if domain is None: + domain = cls.domain + if window is None: + window = cls.window + off, scl = pu.mapparms(window, domain) + coef = cls._line(off, scl) + return cls(coef, domain, window, symbol) + + @classmethod + def basis(cls, deg, domain=None, window=None, symbol='x'): + """Series basis polynomial of degree `deg`. + + Returns the series representing the basis polynomial of degree `deg`. + + Parameters + ---------- + deg : int + Degree of the basis polynomial for the series. Must be >= 0. + domain : {None, array_like}, optional + If given, the array must be of the form ``[beg, end]``, where + ``beg`` and ``end`` are the endpoints of the domain. If None is + given then the class domain is used. The default is None. + window : {None, array_like}, optional + If given, the resulting array must be if the form + ``[beg, end]``, where ``beg`` and ``end`` are the endpoints of + the window. If None is given then the class window is used. The + default is None. + symbol : str, optional + Symbol representing the independent variable. Default is 'x'. + + Returns + ------- + new_series : series + A series with the coefficient of the `deg` term set to one and + all others zero. + + """ + if domain is None: + domain = cls.domain + if window is None: + window = cls.window + ideg = int(deg) + + if ideg != deg or ideg < 0: + raise ValueError("deg must be non-negative integer") + return cls([0] * ideg + [1], domain, window, symbol) + + @classmethod + def cast(cls, series, domain=None, window=None): + """Convert series to series of this class. + + The `series` is expected to be an instance of some polynomial + series of one of the types supported by by the numpy.polynomial + module, but could be some other class that supports the convert + method. + + Parameters + ---------- + series : series + The series instance to be converted. + domain : {None, array_like}, optional + If given, the array must be of the form ``[beg, end]``, where + ``beg`` and ``end`` are the endpoints of the domain. If None is + given then the class domain is used. The default is None. + window : {None, array_like}, optional + If given, the resulting array must be if the form + ``[beg, end]``, where ``beg`` and ``end`` are the endpoints of + the window. If None is given then the class window is used. The + default is None. + + Returns + ------- + new_series : series + A series of the same kind as the calling class and equal to + `series` when evaluated. + + See Also + -------- + convert : similar instance method + + """ + if domain is None: + domain = cls.domain + if window is None: + window = cls.window + return series.convert(domain, cls, window) diff --git a/local_packages/numpy/polynomial/_polybase.pyi b/local_packages/numpy/polynomial/_polybase.pyi new file mode 100644 index 0000000..b16b06c --- /dev/null +++ b/local_packages/numpy/polynomial/_polybase.pyi @@ -0,0 +1,262 @@ +import abc +import decimal +from collections.abc import Iterator, Sequence +from typing import ( + Any, + ClassVar, + Generic, + Literal, + Self, + SupportsIndex, + TypeAlias, + overload, +) +from typing_extensions import TypeIs, TypeVar + +import numpy as np +import numpy.typing as npt +from numpy._typing import ( + _ArrayLikeComplex_co, + _ArrayLikeFloat_co, + _FloatLike_co, + _NumberLike_co, +) + +from ._polytypes import ( + _AnyInt, + _Array2, + _ArrayLikeCoef_co, + _ArrayLikeCoefObject_co, + _CoefLike_co, + _CoefSeries, + _Series, + _SeriesLikeCoef_co, + _SeriesLikeInt_co, + _Tuple2, +) + +__all__ = ["ABCPolyBase"] + +_NameT_co = TypeVar("_NameT_co", bound=str | None, default=str | None, covariant=True) +_PolyT = TypeVar("_PolyT", bound=ABCPolyBase) +_AnyOther: TypeAlias = ABCPolyBase | _CoefLike_co | _SeriesLikeCoef_co + +class ABCPolyBase(Generic[_NameT_co], abc.ABC): + __hash__: ClassVar[None] = None # type: ignore[assignment] # pyright: ignore[reportIncompatibleMethodOverride] + __array_ufunc__: ClassVar[None] = None + maxpower: ClassVar[Literal[100]] = 100 + + _superscript_mapping: ClassVar[dict[int, str]] = ... + _subscript_mapping: ClassVar[dict[int, str]] = ... + _use_unicode: ClassVar[bool] = ... + + _symbol: str + @property + def symbol(self, /) -> str: ... + @property + @abc.abstractmethod + def domain(self) -> _Array2[np.float64 | Any]: ... + @property + @abc.abstractmethod + def window(self) -> _Array2[np.float64 | Any]: ... + @property + @abc.abstractmethod + def basis_name(self) -> _NameT_co: ... + + coef: _CoefSeries + + def __init__( + self, + /, + coef: _SeriesLikeCoef_co, + domain: _SeriesLikeCoef_co | None = None, + window: _SeriesLikeCoef_co | None = None, + symbol: str = "x", + ) -> None: ... + + # + @overload + def __call__(self, /, arg: _PolyT) -> _PolyT: ... + @overload + def __call__(self, /, arg: _FloatLike_co | decimal.Decimal) -> np.float64 | Any: ... + @overload + def __call__(self, /, arg: _NumberLike_co) -> np.complex128 | Any: ... + @overload + def __call__(self, /, arg: _ArrayLikeFloat_co) -> npt.NDArray[np.float64 | Any]: ... + @overload + def __call__(self, /, arg: _ArrayLikeComplex_co) -> npt.NDArray[np.complex128 | Any]: ... + @overload + def __call__(self, /, arg: _ArrayLikeCoefObject_co) -> npt.NDArray[np.object_]: ... + + # unary ops + def __neg__(self, /) -> Self: ... + def __pos__(self, /) -> Self: ... + + # binary ops + def __add__(self, x: _AnyOther, /) -> Self: ... + def __sub__(self, x: _AnyOther, /) -> Self: ... + def __mul__(self, x: _AnyOther, /) -> Self: ... + def __pow__(self, x: _AnyOther, /) -> Self: ... + def __truediv__(self, x: _AnyOther, /) -> Self: ... + def __floordiv__(self, x: _AnyOther, /) -> Self: ... + def __mod__(self, x: _AnyOther, /) -> Self: ... + def __divmod__(self, x: _AnyOther, /) -> _Tuple2[Self]: ... + + # reflected binary ops + def __radd__(self, x: _AnyOther, /) -> Self: ... + def __rsub__(self, x: _AnyOther, /) -> Self: ... + def __rmul__(self, x: _AnyOther, /) -> Self: ... + def __rtruediv__(self, x: _AnyOther, /) -> Self: ... + def __rfloordiv__(self, x: _AnyOther, /) -> Self: ... + def __rmod__(self, x: _AnyOther, /) -> Self: ... + def __rdivmod__(self, x: _AnyOther, /) -> _Tuple2[Self]: ... + + # iterable and sized + def __len__(self, /) -> int: ... + def __iter__(self, /) -> Iterator[np.float64 | Any]: ... + + # pickling + def __getstate__(self, /) -> dict[str, Any]: ... + def __setstate__(self, dict: dict[str, Any], /) -> None: ... + + # + def has_samecoef(self, /, other: ABCPolyBase) -> bool: ... + def has_samedomain(self, /, other: ABCPolyBase) -> bool: ... + def has_samewindow(self, /, other: ABCPolyBase) -> bool: ... + def has_sametype(self, /, other: object) -> TypeIs[Self]: ... + + # + def copy(self, /) -> Self: ... + def degree(self, /) -> int: ... + def cutdeg(self, /, deg: int) -> Self: ... + def trim(self, /, tol: _FloatLike_co = 0) -> Self: ... + def truncate(self, /, size: _AnyInt) -> Self: ... + + # + @overload + def convert( + self, + /, + domain: _SeriesLikeCoef_co | None, + kind: type[_PolyT], + window: _SeriesLikeCoef_co | None = None, + ) -> _PolyT: ... + @overload + def convert( + self, + /, + domain: _SeriesLikeCoef_co | None = None, + *, + kind: type[_PolyT], + window: _SeriesLikeCoef_co | None = None, + ) -> _PolyT: ... + @overload + def convert( + self, + /, + domain: _SeriesLikeCoef_co | None = None, + kind: None = None, + window: _SeriesLikeCoef_co | None = None, + ) -> Self: ... + + # + def mapparms(self, /) -> _Tuple2[Any]: ... + def integ( + self, + /, + m: SupportsIndex = 1, + k: _CoefLike_co | _SeriesLikeCoef_co = [], + lbnd: _CoefLike_co | None = None, + ) -> Self: ... + def deriv(self, /, m: SupportsIndex = 1) -> Self: ... + def roots(self, /) -> _CoefSeries: ... + def linspace( + self, + /, + n: SupportsIndex = 100, + domain: _SeriesLikeCoef_co | None = None, + ) -> _Tuple2[_Series[np.float64 | np.complex128]]: ... + + # + @overload + @classmethod + def fit( + cls, + x: _SeriesLikeCoef_co, + y: _SeriesLikeCoef_co, + deg: int | _SeriesLikeInt_co, + domain: _SeriesLikeCoef_co | None = None, + rcond: _FloatLike_co | None = None, + full: Literal[False] = False, + w: _SeriesLikeCoef_co | None = None, + window: _SeriesLikeCoef_co | None = None, + symbol: str = "x", + ) -> Self: ... + @overload + @classmethod + def fit( + cls, + x: _SeriesLikeCoef_co, + y: _SeriesLikeCoef_co, + deg: int | _SeriesLikeInt_co, + domain: _SeriesLikeCoef_co | None = None, + rcond: _FloatLike_co | None = None, + *, + full: Literal[True], + w: _SeriesLikeCoef_co | None = None, + window: _SeriesLikeCoef_co | None = None, + symbol: str = "x", + ) -> tuple[Self, Sequence[np.inexact | np.int32]]: ... + @overload + @classmethod + def fit( + cls, + x: _SeriesLikeCoef_co, + y: _SeriesLikeCoef_co, + deg: int | _SeriesLikeInt_co, + domain: _SeriesLikeCoef_co | None, + rcond: _FloatLike_co, + full: Literal[True], + /, + w: _SeriesLikeCoef_co | None = None, + window: _SeriesLikeCoef_co | None = None, + symbol: str = "x", + ) -> tuple[Self, Sequence[np.inexact | np.int32]]: ... + + # + @classmethod + def fromroots( + cls, + roots: _ArrayLikeCoef_co, + domain: _SeriesLikeCoef_co | None = [], + window: _SeriesLikeCoef_co | None = None, + symbol: str = "x", + ) -> Self: ... + @classmethod + def identity( + cls, + domain: _SeriesLikeCoef_co | None = None, + window: _SeriesLikeCoef_co | None = None, + symbol: str = "x", + ) -> Self: ... + @classmethod + def basis( + cls, + deg: _AnyInt, + domain: _SeriesLikeCoef_co | None = None, + window: _SeriesLikeCoef_co | None = None, + symbol: str = "x", + ) -> Self: ... + @classmethod + def cast( + cls, + series: ABCPolyBase, + domain: _SeriesLikeCoef_co | None = None, + window: _SeriesLikeCoef_co | None = None, + ) -> Self: ... + @classmethod + def _str_term_unicode(cls, /, i: str, arg_str: str) -> str: ... + @classmethod + def _str_term_ascii(cls, /, i: str, arg_str: str) -> str: ... + @classmethod + def _repr_latex_term(cls, /, i: str, arg_str: str, needs_parens: bool) -> str: ... diff --git a/local_packages/numpy/polynomial/_polytypes.pyi b/local_packages/numpy/polynomial/_polytypes.pyi new file mode 100644 index 0000000..b5a603b --- /dev/null +++ b/local_packages/numpy/polynomial/_polytypes.pyi @@ -0,0 +1,501 @@ +# ruff: noqa: PYI046 + +from collections.abc import Sequence +from typing import ( + Any, + Literal, + NoReturn, + Protocol, + Self, + SupportsIndex, + SupportsInt, + TypeAlias, + TypeVar, + overload, + type_check_only, +) + +import numpy as np +import numpy.typing as npt +from numpy._typing import ( + _ArrayLikeComplex_co, + # array-likes + _ArrayLikeFloat_co, + _ArrayLikeNumber_co, + _ComplexLike_co, + _FloatLike_co, + # scalar-likes + _IntLike_co, + _NestedSequence, + _NumberLike_co, + _SupportsArray, +) + +_T = TypeVar("_T") +_T_contra = TypeVar("_T_contra", contravariant=True) +_ScalarT = TypeVar("_ScalarT", bound=np.number | np.bool | np.object_) + +# compatible with e.g. int, float, complex, Decimal, Fraction, and ABCPolyBase +@type_check_only +class _SupportsCoefOps(Protocol[_T_contra]): + def __eq__(self, x: object, /) -> bool: ... + def __ne__(self, x: object, /) -> bool: ... + def __neg__(self, /) -> Self: ... + def __pos__(self, /) -> Self: ... + def __add__(self, x: _T_contra, /) -> Self: ... + def __sub__(self, x: _T_contra, /) -> Self: ... + def __mul__(self, x: _T_contra, /) -> Self: ... + def __pow__(self, x: _T_contra, /) -> Self | float: ... + def __radd__(self, x: _T_contra, /) -> Self: ... + def __rsub__(self, x: _T_contra, /) -> Self: ... + def __rmul__(self, x: _T_contra, /) -> Self: ... + +_Series: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] + +_FloatSeries: TypeAlias = _Series[np.floating] +_ComplexSeries: TypeAlias = _Series[np.complexfloating] +_ObjectSeries: TypeAlias = _Series[np.object_] +_CoefSeries: TypeAlias = _Series[np.inexact | np.object_] + +_FloatArray: TypeAlias = npt.NDArray[np.floating] +_ComplexArray: TypeAlias = npt.NDArray[np.complexfloating] +_ObjectArray: TypeAlias = npt.NDArray[np.object_] +_CoefArray: TypeAlias = npt.NDArray[np.inexact | np.object_] + +_Tuple2: TypeAlias = tuple[_T, _T] +_Array1: TypeAlias = np.ndarray[tuple[Literal[1]], np.dtype[_ScalarT]] +_Array2: TypeAlias = np.ndarray[tuple[Literal[2]], np.dtype[_ScalarT]] + +_AnyInt: TypeAlias = SupportsInt | SupportsIndex + +_CoefObjectLike_co: TypeAlias = np.object_ | _SupportsCoefOps[Any] +_CoefLike_co: TypeAlias = _NumberLike_co | _CoefObjectLike_co + +# The term "series" is used here to refer to 1-d arrays of numeric scalars. +_SeriesLikeBool_co: TypeAlias = _SupportsArray[np.dtype[np.bool]] | Sequence[bool | np.bool] +_SeriesLikeInt_co: TypeAlias = _SupportsArray[np.dtype[np.integer | np.bool]] | Sequence[_IntLike_co] +_SeriesLikeFloat_co: TypeAlias = _SupportsArray[np.dtype[np.floating | np.integer | np.bool]] | Sequence[_FloatLike_co] +_SeriesLikeComplex_co: TypeAlias = _SupportsArray[np.dtype[np.number | np.bool]] | Sequence[_ComplexLike_co] +_SeriesLikeObject_co: TypeAlias = _SupportsArray[np.dtype[np.object_]] | Sequence[_CoefObjectLike_co] +_SeriesLikeCoef_co: TypeAlias = _SupportsArray[np.dtype[np.number | np.bool | np.object_]] | Sequence[_CoefLike_co] + +_ArrayLikeCoefObject_co: TypeAlias = _CoefObjectLike_co | _SeriesLikeObject_co | _NestedSequence[_SeriesLikeObject_co] +_ArrayLikeCoef_co: TypeAlias = npt.NDArray[np.number | np.bool | np.object_] | _ArrayLikeNumber_co | _ArrayLikeCoefObject_co + +_Line: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] + +@type_check_only +class _FuncLine(Protocol): + @overload + def __call__(self, /, off: _ScalarT, scl: _ScalarT) -> _Line[_ScalarT]: ... + @overload + def __call__(self, /, off: int, scl: int) -> _Line[np.int_]: ... + @overload + def __call__(self, /, off: float, scl: float) -> _Line[np.float64]: ... + @overload + def __call__(self, /, off: complex, scl: complex) -> _Line[np.complex128]: ... + @overload + def __call__(self, /, off: _SupportsCoefOps[Any], scl: _SupportsCoefOps[Any]) -> _Line[np.object_]: ... + +@type_check_only +class _FuncFromRoots(Protocol): + @overload + def __call__(self, /, roots: _SeriesLikeFloat_co) -> _FloatSeries: ... + @overload + def __call__(self, /, roots: _SeriesLikeComplex_co) -> _ComplexSeries: ... + @overload + def __call__(self, /, roots: _SeriesLikeCoef_co) -> _ObjectSeries: ... + +@type_check_only +class _FuncBinOp(Protocol): + @overload + def __call__(self, /, c1: _SeriesLikeBool_co, c2: _SeriesLikeBool_co) -> NoReturn: ... + @overload + def __call__(self, /, c1: _SeriesLikeFloat_co, c2: _SeriesLikeFloat_co) -> _FloatSeries: ... + @overload + def __call__(self, /, c1: _SeriesLikeComplex_co, c2: _SeriesLikeComplex_co) -> _ComplexSeries: ... + @overload + def __call__(self, /, c1: _SeriesLikeCoef_co, c2: _SeriesLikeCoef_co) -> _ObjectSeries: ... + +@type_check_only +class _FuncUnOp(Protocol): + @overload + def __call__(self, /, c: _SeriesLikeFloat_co) -> _FloatSeries: ... + @overload + def __call__(self, /, c: _SeriesLikeComplex_co) -> _ComplexSeries: ... + @overload + def __call__(self, /, c: _SeriesLikeCoef_co) -> _ObjectSeries: ... + +@type_check_only +class _FuncPoly2Ortho(Protocol): + @overload + def __call__(self, /, pol: _SeriesLikeFloat_co) -> _FloatSeries: ... + @overload + def __call__(self, /, pol: _SeriesLikeComplex_co) -> _ComplexSeries: ... + @overload + def __call__(self, /, pol: _SeriesLikeCoef_co) -> _ObjectSeries: ... + +@type_check_only +class _FuncPow(Protocol): + @overload + def __call__(self, /, c: _SeriesLikeFloat_co, pow: _IntLike_co, maxpower: _IntLike_co | None = ...) -> _FloatSeries: ... + @overload + def __call__(self, /, c: _SeriesLikeComplex_co, pow: _IntLike_co, maxpower: _IntLike_co | None = ...) -> _ComplexSeries: ... + @overload + def __call__(self, /, c: _SeriesLikeCoef_co, pow: _IntLike_co, maxpower: _IntLike_co | None = ...) -> _ObjectSeries: ... + +@type_check_only +class _FuncDer(Protocol): + @overload + def __call__( + self, + /, + c: _ArrayLikeFloat_co, + m: SupportsIndex = 1, + scl: _FloatLike_co = 1, + axis: SupportsIndex = 0, + ) -> _FloatArray: ... + @overload + def __call__( + self, + /, + c: _ArrayLikeComplex_co, + m: SupportsIndex = 1, + scl: _ComplexLike_co = 1, + axis: SupportsIndex = 0, + ) -> _ComplexArray: ... + @overload + def __call__( + self, + /, + c: _ArrayLikeCoef_co, + m: SupportsIndex = 1, + scl: _CoefLike_co = 1, + axis: SupportsIndex = 0, + ) -> _ObjectArray: ... + +@type_check_only +class _FuncInteg(Protocol): + @overload + def __call__( + self, + /, + c: _ArrayLikeFloat_co, + m: SupportsIndex = 1, + k: _FloatLike_co | _SeriesLikeFloat_co = [], + lbnd: _FloatLike_co = 0, + scl: _FloatLike_co = 1, + axis: SupportsIndex = 0, + ) -> _FloatArray: ... + @overload + def __call__( + self, + /, + c: _ArrayLikeComplex_co, + m: SupportsIndex = 1, + k: _ComplexLike_co | _SeriesLikeComplex_co = [], + lbnd: _ComplexLike_co = 0, + scl: _ComplexLike_co = 1, + axis: SupportsIndex = 0, + ) -> _ComplexArray: ... + @overload + def __call__( + self, + /, + c: _ArrayLikeCoef_co, + m: SupportsIndex = 1, + k: _CoefLike_co | _SeriesLikeCoef_co = [], + lbnd: _CoefLike_co = 0, + scl: _CoefLike_co = 1, + axis: SupportsIndex = 0, + ) -> _ObjectArray: ... + +@type_check_only +class _FuncVal(Protocol): + @overload + def __call__(self, /, x: _FloatLike_co, c: _SeriesLikeFloat_co, tensor: bool = True) -> np.floating: ... + @overload + def __call__(self, /, x: _NumberLike_co, c: _SeriesLikeComplex_co, tensor: bool = True) -> np.complexfloating: ... + @overload + def __call__(self, /, x: _ArrayLikeFloat_co, c: _ArrayLikeFloat_co, tensor: bool = True) -> _FloatArray: ... + @overload + def __call__(self, /, x: _ArrayLikeComplex_co, c: _ArrayLikeComplex_co, tensor: bool = True) -> _ComplexArray: ... + @overload + def __call__(self, /, x: _ArrayLikeCoef_co, c: _ArrayLikeCoef_co, tensor: bool = True) -> _ObjectArray: ... + @overload + def __call__(self, /, x: _CoefLike_co, c: _SeriesLikeObject_co, tensor: bool = True) -> _SupportsCoefOps[Any]: ... + +@type_check_only +class _FuncVal2D(Protocol): + @overload + def __call__(self, /, x: _FloatLike_co, y: _FloatLike_co, c: _SeriesLikeFloat_co) -> np.floating: ... + @overload + def __call__(self, /, x: _NumberLike_co, y: _NumberLike_co, c: _SeriesLikeComplex_co) -> np.complexfloating: ... + @overload + def __call__(self, /, x: _ArrayLikeFloat_co, y: _ArrayLikeFloat_co, c: _ArrayLikeFloat_co) -> _FloatArray: ... + @overload + def __call__(self, /, x: _ArrayLikeComplex_co, y: _ArrayLikeComplex_co, c: _ArrayLikeComplex_co) -> _ComplexArray: ... + @overload + def __call__(self, /, x: _ArrayLikeCoef_co, y: _ArrayLikeCoef_co, c: _ArrayLikeCoef_co) -> _ObjectArray: ... + @overload + def __call__(self, /, x: _CoefLike_co, y: _CoefLike_co, c: _SeriesLikeCoef_co) -> _SupportsCoefOps[Any]: ... + +@type_check_only +class _FuncVal3D(Protocol): + @overload + def __call__( + self, + /, + x: _FloatLike_co, + y: _FloatLike_co, + z: _FloatLike_co, + c: _SeriesLikeFloat_co, + ) -> np.floating: ... + @overload + def __call__( + self, + /, + x: _NumberLike_co, + y: _NumberLike_co, + z: _NumberLike_co, + c: _SeriesLikeComplex_co, + ) -> np.complexfloating: ... + @overload + def __call__( + self, + /, + x: _ArrayLikeFloat_co, + y: _ArrayLikeFloat_co, + z: _ArrayLikeFloat_co, + c: _ArrayLikeFloat_co, + ) -> _FloatArray: ... + @overload + def __call__( + self, + /, + x: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co, + z: _ArrayLikeComplex_co, + c: _ArrayLikeComplex_co, + ) -> _ComplexArray: ... + @overload + def __call__( + self, + /, + x: _ArrayLikeCoef_co, + y: _ArrayLikeCoef_co, + z: _ArrayLikeCoef_co, + c: _ArrayLikeCoef_co, + ) -> _ObjectArray: ... + @overload + def __call__( + self, + /, + x: _CoefLike_co, + y: _CoefLike_co, + z: _CoefLike_co, + c: _SeriesLikeCoef_co, + ) -> _SupportsCoefOps[Any]: ... + +@type_check_only +class _FuncVander(Protocol): + @overload + def __call__(self, /, x: _ArrayLikeFloat_co, deg: SupportsIndex) -> _FloatArray: ... + @overload + def __call__(self, /, x: _ArrayLikeComplex_co, deg: SupportsIndex) -> _ComplexArray: ... + @overload + def __call__(self, /, x: _ArrayLikeCoef_co, deg: SupportsIndex) -> _ObjectArray: ... + @overload + def __call__(self, /, x: npt.ArrayLike, deg: SupportsIndex) -> _CoefArray: ... + +_AnyDegrees: TypeAlias = Sequence[SupportsIndex] + +@type_check_only +class _FuncVander2D(Protocol): + @overload + def __call__(self, /, x: _ArrayLikeFloat_co, y: _ArrayLikeFloat_co, deg: _AnyDegrees) -> _FloatArray: ... + @overload + def __call__(self, /, x: _ArrayLikeComplex_co, y: _ArrayLikeComplex_co, deg: _AnyDegrees) -> _ComplexArray: ... + @overload + def __call__(self, /, x: _ArrayLikeCoef_co, y: _ArrayLikeCoef_co, deg: _AnyDegrees) -> _ObjectArray: ... + @overload + def __call__(self, /, x: npt.ArrayLike, y: npt.ArrayLike, deg: _AnyDegrees) -> _CoefArray: ... + +@type_check_only +class _FuncVander3D(Protocol): + @overload + def __call__( + self, + /, + x: _ArrayLikeFloat_co, + y: _ArrayLikeFloat_co, + z: _ArrayLikeFloat_co, + deg: _AnyDegrees, + ) -> _FloatArray: ... + @overload + def __call__( + self, + /, + x: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co, + z: _ArrayLikeComplex_co, + deg: _AnyDegrees, + ) -> _ComplexArray: ... + @overload + def __call__( + self, + /, + x: _ArrayLikeCoef_co, + y: _ArrayLikeCoef_co, + z: _ArrayLikeCoef_co, + deg: _AnyDegrees, + ) -> _ObjectArray: ... + @overload + def __call__( + self, + /, + x: npt.ArrayLike, + y: npt.ArrayLike, + z: npt.ArrayLike, + deg: _AnyDegrees, + ) -> _CoefArray: ... + +_FullFitResult: TypeAlias = Sequence[np.inexact | np.int32] + +@type_check_only +class _FuncFit(Protocol): + @overload + def __call__( + self, + /, + x: _SeriesLikeFloat_co, + y: _ArrayLikeFloat_co, + deg: int | _SeriesLikeInt_co, + rcond: float | None = ..., + full: Literal[False] = False, + w: _SeriesLikeFloat_co | None = ..., + ) -> _FloatArray: ... + @overload + def __call__( + self, + x: _SeriesLikeFloat_co, + y: _ArrayLikeFloat_co, + deg: int | _SeriesLikeInt_co, + rcond: float | None, + full: Literal[True], + /, + w: _SeriesLikeFloat_co | None = ..., + ) -> tuple[_FloatArray, _FullFitResult]: ... + @overload + def __call__( + self, + /, + x: _SeriesLikeFloat_co, + y: _ArrayLikeFloat_co, + deg: int | _SeriesLikeInt_co, + rcond: float | None = ..., + *, + full: Literal[True], + w: _SeriesLikeFloat_co | None = ..., + ) -> tuple[_FloatArray, _FullFitResult]: ... + @overload + def __call__( + self, + /, + x: _SeriesLikeComplex_co, + y: _ArrayLikeComplex_co, + deg: int | _SeriesLikeInt_co, + rcond: float | None = ..., + full: Literal[False] = False, + w: _SeriesLikeFloat_co | None = ..., + ) -> _ComplexArray: ... + @overload + def __call__( + self, + x: _SeriesLikeComplex_co, + y: _ArrayLikeComplex_co, + deg: int | _SeriesLikeInt_co, + rcond: float | None, + full: Literal[True], + /, + w: _SeriesLikeFloat_co | None = ..., + ) -> tuple[_ComplexArray, _FullFitResult]: ... + @overload + def __call__( + self, + /, + x: _SeriesLikeComplex_co, + y: _ArrayLikeComplex_co, + deg: int | _SeriesLikeInt_co, + rcond: float | None = ..., + *, + full: Literal[True], + w: _SeriesLikeFloat_co | None = ..., + ) -> tuple[_ComplexArray, _FullFitResult]: ... + @overload + def __call__( + self, + /, + x: _SeriesLikeComplex_co, + y: _ArrayLikeCoef_co, + deg: int | _SeriesLikeInt_co, + rcond: float | None = ..., + full: Literal[False] = False, + w: _SeriesLikeFloat_co | None = ..., + ) -> _ObjectArray: ... + @overload + def __call__( + self, + x: _SeriesLikeComplex_co, + y: _ArrayLikeCoef_co, + deg: int | _SeriesLikeInt_co, + rcond: float | None, + full: Literal[True], + /, + w: _SeriesLikeFloat_co | None = ..., + ) -> tuple[_ObjectArray, _FullFitResult]: ... + @overload + def __call__( + self, + /, + x: _SeriesLikeComplex_co, + y: _ArrayLikeCoef_co, + deg: int | _SeriesLikeInt_co, + rcond: float | None = ..., + *, + full: Literal[True], + w: _SeriesLikeFloat_co | None = ..., + ) -> tuple[_ObjectArray, _FullFitResult]: ... + +@type_check_only +class _FuncRoots(Protocol): + @overload + def __call__(self, /, c: _SeriesLikeFloat_co) -> _Series[np.float64]: ... + @overload + def __call__(self, /, c: _SeriesLikeComplex_co) -> _Series[np.complex128]: ... + @overload + def __call__(self, /, c: _SeriesLikeCoef_co) -> _ObjectSeries: ... + +_Companion: TypeAlias = np.ndarray[tuple[int, int], np.dtype[_ScalarT]] + +@type_check_only +class _FuncCompanion(Protocol): + @overload + def __call__(self, /, c: _SeriesLikeFloat_co) -> _Companion[np.float64]: ... + @overload + def __call__(self, /, c: _SeriesLikeComplex_co) -> _Companion[np.complex128]: ... + @overload + def __call__(self, /, c: _SeriesLikeCoef_co) -> _Companion[np.object_]: ... + +@type_check_only +class _FuncGauss(Protocol): + def __call__(self, /, deg: SupportsIndex) -> _Tuple2[_Series[np.float64]]: ... + +@type_check_only +class _FuncWeight(Protocol): + @overload + def __call__(self, /, x: _ArrayLikeFloat_co) -> npt.NDArray[np.float64]: ... + @overload + def __call__(self, /, x: _ArrayLikeComplex_co) -> npt.NDArray[np.complex128]: ... + @overload + def __call__(self, /, x: _ArrayLikeCoef_co) -> _ObjectArray: ... diff --git a/local_packages/numpy/polynomial/chebyshev.py b/local_packages/numpy/polynomial/chebyshev.py new file mode 100644 index 0000000..55b48b9 --- /dev/null +++ b/local_packages/numpy/polynomial/chebyshev.py @@ -0,0 +1,2001 @@ +""" +==================================================== +Chebyshev Series (:mod:`numpy.polynomial.chebyshev`) +==================================================== + +This module provides a number of objects (mostly functions) useful for +dealing with Chebyshev series, including a `Chebyshev` class that +encapsulates the usual arithmetic operations. (General information +on how this module represents and works with such polynomials is in the +docstring for its "parent" sub-package, `numpy.polynomial`). + +Classes +------- + +.. autosummary:: + :toctree: generated/ + + Chebyshev + + +Constants +--------- + +.. autosummary:: + :toctree: generated/ + + chebdomain + chebzero + chebone + chebx + +Arithmetic +---------- + +.. autosummary:: + :toctree: generated/ + + chebadd + chebsub + chebmulx + chebmul + chebdiv + chebpow + chebval + chebval2d + chebval3d + chebgrid2d + chebgrid3d + +Calculus +-------- + +.. autosummary:: + :toctree: generated/ + + chebder + chebint + +Misc Functions +-------------- + +.. autosummary:: + :toctree: generated/ + + chebfromroots + chebroots + chebvander + chebvander2d + chebvander3d + chebgauss + chebweight + chebcompanion + chebfit + chebpts1 + chebpts2 + chebtrim + chebline + cheb2poly + poly2cheb + chebinterpolate + +See also +-------- +`numpy.polynomial` + +Notes +----- +The implementations of multiplication, division, integration, and +differentiation use the algebraic identities [1]_: + +.. math:: + T_n(x) = \\frac{z^n + z^{-n}}{2} \\\\ + z\\frac{dx}{dz} = \\frac{z - z^{-1}}{2}. + +where + +.. math:: x = \\frac{z + z^{-1}}{2}. + +These identities allow a Chebyshev series to be expressed as a finite, +symmetric Laurent series. In this module, this sort of Laurent series +is referred to as a "z-series." + +References +---------- +.. [1] A. T. Benjamin, et al., "Combinatorial Trigonometry with Chebyshev + Polynomials," *Journal of Statistical Planning and Inference 14*, 2008 + (https://web.archive.org/web/20080221202153/https://www.math.hmc.edu/~benjamin/papers/CombTrig.pdf, pg. 4) + +""" # noqa: E501 +import numpy as np + +from . import polyutils as pu +from ._polybase import ABCPolyBase + +__all__ = [ + 'chebzero', 'chebone', 'chebx', 'chebdomain', 'chebline', 'chebadd', + 'chebsub', 'chebmulx', 'chebmul', 'chebdiv', 'chebpow', 'chebval', + 'chebder', 'chebint', 'cheb2poly', 'poly2cheb', 'chebfromroots', + 'chebvander', 'chebfit', 'chebtrim', 'chebroots', 'chebpts1', + 'chebpts2', 'Chebyshev', 'chebval2d', 'chebval3d', 'chebgrid2d', + 'chebgrid3d', 'chebvander2d', 'chebvander3d', 'chebcompanion', + 'chebgauss', 'chebweight', 'chebinterpolate'] + +chebtrim = pu.trimcoef + +# +# A collection of functions for manipulating z-series. These are private +# functions and do minimal error checking. +# + +def _cseries_to_zseries(c): + """Convert Chebyshev series to z-series. + + Convert a Chebyshev series to the equivalent z-series. The result is + never an empty array. The dtype of the return is the same as that of + the input. No checks are run on the arguments as this routine is for + internal use. + + Parameters + ---------- + c : 1-D ndarray + Chebyshev coefficients, ordered from low to high + + Returns + ------- + zs : 1-D ndarray + Odd length symmetric z-series, ordered from low to high. + + """ + n = c.size + zs = np.zeros(2 * n - 1, dtype=c.dtype) + zs[n - 1:] = c / 2 + return zs + zs[::-1] + + +def _zseries_to_cseries(zs): + """Convert z-series to a Chebyshev series. + + Convert a z series to the equivalent Chebyshev series. The result is + never an empty array. The dtype of the return is the same as that of + the input. No checks are run on the arguments as this routine is for + internal use. + + Parameters + ---------- + zs : 1-D ndarray + Odd length symmetric z-series, ordered from low to high. + + Returns + ------- + c : 1-D ndarray + Chebyshev coefficients, ordered from low to high. + + """ + n = (zs.size + 1) // 2 + c = zs[n - 1:].copy() + c[1:n] *= 2 + return c + + +def _zseries_mul(z1, z2): + """Multiply two z-series. + + Multiply two z-series to produce a z-series. + + Parameters + ---------- + z1, z2 : 1-D ndarray + The arrays must be 1-D but this is not checked. + + Returns + ------- + product : 1-D ndarray + The product z-series. + + Notes + ----- + This is simply convolution. If symmetric/anti-symmetric z-series are + denoted by S/A then the following rules apply: + + S*S, A*A -> S + S*A, A*S -> A + + """ + return np.convolve(z1, z2) + + +def _zseries_div(z1, z2): + """Divide the first z-series by the second. + + Divide `z1` by `z2` and return the quotient and remainder as z-series. + Warning: this implementation only applies when both z1 and z2 have the + same symmetry, which is sufficient for present purposes. + + Parameters + ---------- + z1, z2 : 1-D ndarray + The arrays must be 1-D and have the same symmetry, but this is not + checked. + + Returns + ------- + + (quotient, remainder) : 1-D ndarrays + Quotient and remainder as z-series. + + Notes + ----- + This is not the same as polynomial division on account of the desired form + of the remainder. If symmetric/anti-symmetric z-series are denoted by S/A + then the following rules apply: + + S/S -> S,S + A/A -> S,A + + The restriction to types of the same symmetry could be fixed but seems like + unneeded generality. There is no natural form for the remainder in the case + where there is no symmetry. + + """ + z1 = z1.copy() + z2 = z2.copy() + lc1 = len(z1) + lc2 = len(z2) + if lc2 == 1: + z1 /= z2 + return z1, z1[:1] * 0 + elif lc1 < lc2: + return z1[:1] * 0, z1 + else: + dlen = lc1 - lc2 + scl = z2[0] + z2 /= scl + quo = np.empty(dlen + 1, dtype=z1.dtype) + i = 0 + j = dlen + while i < j: + r = z1[i] + quo[i] = z1[i] + quo[dlen - i] = r + tmp = r * z2 + z1[i:i + lc2] -= tmp + z1[j:j + lc2] -= tmp + i += 1 + j -= 1 + r = z1[i] + quo[i] = r + tmp = r * z2 + z1[i:i + lc2] -= tmp + quo /= scl + rem = z1[i + 1:i - 1 + lc2].copy() + return quo, rem + + +def _zseries_der(zs): + """Differentiate a z-series. + + The derivative is with respect to x, not z. This is achieved using the + chain rule and the value of dx/dz given in the module notes. + + Parameters + ---------- + zs : z-series + The z-series to differentiate. + + Returns + ------- + derivative : z-series + The derivative + + Notes + ----- + The zseries for x (ns) has been multiplied by two in order to avoid + using floats that are incompatible with Decimal and likely other + specialized scalar types. This scaling has been compensated by + multiplying the value of zs by two also so that the two cancels in the + division. + + """ + n = len(zs) // 2 + ns = np.array([-1, 0, 1], dtype=zs.dtype) + zs *= np.arange(-n, n + 1) * 2 + d, r = _zseries_div(zs, ns) + return d + + +def _zseries_int(zs): + """Integrate a z-series. + + The integral is with respect to x, not z. This is achieved by a change + of variable using dx/dz given in the module notes. + + Parameters + ---------- + zs : z-series + The z-series to integrate + + Returns + ------- + integral : z-series + The indefinite integral + + Notes + ----- + The zseries for x (ns) has been multiplied by two in order to avoid + using floats that are incompatible with Decimal and likely other + specialized scalar types. This scaling has been compensated by + dividing the resulting zs by two. + + """ + n = 1 + len(zs) // 2 + ns = np.array([-1, 0, 1], dtype=zs.dtype) + zs = _zseries_mul(zs, ns) + div = np.arange(-n, n + 1) * 2 + zs[:n] /= div[:n] + zs[n + 1:] /= div[n + 1:] + zs[n] = 0 + return zs + +# +# Chebyshev series functions +# + + +def poly2cheb(pol): + """ + Convert a polynomial to a Chebyshev series. + + Convert an array representing the coefficients of a polynomial (relative + to the "standard" basis) ordered from lowest degree to highest, to an + array of the coefficients of the equivalent Chebyshev series, ordered + from lowest to highest degree. + + Parameters + ---------- + pol : array_like + 1-D array containing the polynomial coefficients + + Returns + ------- + c : ndarray + 1-D array containing the coefficients of the equivalent Chebyshev + series. + + See Also + -------- + cheb2poly + + Notes + ----- + The easy way to do conversions between polynomial basis sets + is to use the convert method of a class instance. + + Examples + -------- + >>> from numpy import polynomial as P + >>> p = P.Polynomial(range(4)) + >>> p + Polynomial([0., 1., 2., 3.], domain=[-1., 1.], window=[-1., 1.], symbol='x') + >>> c = p.convert(kind=P.Chebyshev) + >>> c + Chebyshev([1. , 3.25, 1. , 0.75], domain=[-1., 1.], window=[-1., ... + >>> P.chebyshev.poly2cheb(range(4)) + array([1. , 3.25, 1. , 0.75]) + + """ + [pol] = pu.as_series([pol]) + deg = len(pol) - 1 + res = 0 + for i in range(deg, -1, -1): + res = chebadd(chebmulx(res), pol[i]) + return res + + +def cheb2poly(c): + """ + Convert a Chebyshev series to a polynomial. + + Convert an array representing the coefficients of a Chebyshev series, + ordered from lowest degree to highest, to an array of the coefficients + of the equivalent polynomial (relative to the "standard" basis) ordered + from lowest to highest degree. + + Parameters + ---------- + c : array_like + 1-D array containing the Chebyshev series coefficients, ordered + from lowest order term to highest. + + Returns + ------- + pol : ndarray + 1-D array containing the coefficients of the equivalent polynomial + (relative to the "standard" basis) ordered from lowest order term + to highest. + + See Also + -------- + poly2cheb + + Notes + ----- + The easy way to do conversions between polynomial basis sets + is to use the convert method of a class instance. + + Examples + -------- + >>> from numpy import polynomial as P + >>> c = P.Chebyshev(range(4)) + >>> c + Chebyshev([0., 1., 2., 3.], domain=[-1., 1.], window=[-1., 1.], symbol='x') + >>> p = c.convert(kind=P.Polynomial) + >>> p + Polynomial([-2., -8., 4., 12.], domain=[-1., 1.], window=[-1., 1.], ... + >>> P.chebyshev.cheb2poly(range(4)) + array([-2., -8., 4., 12.]) + + """ + from .polynomial import polyadd, polymulx, polysub + + [c] = pu.as_series([c]) + n = len(c) + if n < 3: + return c + else: + c0 = c[-2] + c1 = c[-1] + # i is the current degree of c1 + for i in range(n - 1, 1, -1): + tmp = c0 + c0 = polysub(c[i - 2], c1) + c1 = polyadd(tmp, polymulx(c1) * 2) + return polyadd(c0, polymulx(c1)) + + +# +# These are constant arrays are of integer type so as to be compatible +# with the widest range of other types, such as Decimal. +# + +# Chebyshev default domain. +chebdomain = np.array([-1., 1.]) + +# Chebyshev coefficients representing zero. +chebzero = np.array([0]) + +# Chebyshev coefficients representing one. +chebone = np.array([1]) + +# Chebyshev coefficients representing the identity x. +chebx = np.array([0, 1]) + + +def chebline(off, scl): + """ + Chebyshev series whose graph is a straight line. + + Parameters + ---------- + off, scl : scalars + The specified line is given by ``off + scl*x``. + + Returns + ------- + y : ndarray + This module's representation of the Chebyshev series for + ``off + scl*x``. + + See Also + -------- + numpy.polynomial.polynomial.polyline + numpy.polynomial.legendre.legline + numpy.polynomial.laguerre.lagline + numpy.polynomial.hermite.hermline + numpy.polynomial.hermite_e.hermeline + + Examples + -------- + >>> import numpy.polynomial.chebyshev as C + >>> C.chebline(3,2) + array([3, 2]) + >>> C.chebval(-3, C.chebline(3,2)) # should be -3 + -3.0 + + """ + if scl != 0: + return np.array([off, scl]) + else: + return np.array([off]) + + +def chebfromroots(roots): + """ + Generate a Chebyshev series with given roots. + + The function returns the coefficients of the polynomial + + .. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n), + + in Chebyshev form, where the :math:`r_n` are the roots specified in + `roots`. If a zero has multiplicity n, then it must appear in `roots` + n times. For instance, if 2 is a root of multiplicity three and 3 is a + root of multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. + The roots can appear in any order. + + If the returned coefficients are `c`, then + + .. math:: p(x) = c_0 + c_1 * T_1(x) + ... + c_n * T_n(x) + + The coefficient of the last term is not generally 1 for monic + polynomials in Chebyshev form. + + Parameters + ---------- + roots : array_like + Sequence containing the roots. + + Returns + ------- + out : ndarray + 1-D array of coefficients. If all roots are real then `out` is a + real array, if some of the roots are complex, then `out` is complex + even if all the coefficients in the result are real (see Examples + below). + + See Also + -------- + numpy.polynomial.polynomial.polyfromroots + numpy.polynomial.legendre.legfromroots + numpy.polynomial.laguerre.lagfromroots + numpy.polynomial.hermite.hermfromroots + numpy.polynomial.hermite_e.hermefromroots + + Examples + -------- + >>> import numpy.polynomial.chebyshev as C + >>> C.chebfromroots((-1,0,1)) # x^3 - x relative to the standard basis + array([ 0. , -0.25, 0. , 0.25]) + >>> j = complex(0,1) + >>> C.chebfromroots((-j,j)) # x^2 + 1 relative to the standard basis + array([1.5+0.j, 0. +0.j, 0.5+0.j]) + + """ + return pu._fromroots(chebline, chebmul, roots) + + +def chebadd(c1, c2): + """ + Add one Chebyshev series to another. + + Returns the sum of two Chebyshev series `c1` + `c2`. The arguments + are sequences of coefficients ordered from lowest order term to + highest, i.e., [1,2,3] represents the series ``T_0 + 2*T_1 + 3*T_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Chebyshev series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the Chebyshev series of their sum. + + See Also + -------- + chebsub, chebmulx, chebmul, chebdiv, chebpow + + Notes + ----- + Unlike multiplication, division, etc., the sum of two Chebyshev series + is a Chebyshev series (without having to "reproject" the result onto + the basis set) so addition, just like that of "standard" polynomials, + is simply "component-wise." + + Examples + -------- + >>> from numpy.polynomial import chebyshev as C + >>> c1 = (1,2,3) + >>> c2 = (3,2,1) + >>> C.chebadd(c1,c2) + array([4., 4., 4.]) + + """ + return pu._add(c1, c2) + + +def chebsub(c1, c2): + """ + Subtract one Chebyshev series from another. + + Returns the difference of two Chebyshev series `c1` - `c2`. The + sequences of coefficients are from lowest order term to highest, i.e., + [1,2,3] represents the series ``T_0 + 2*T_1 + 3*T_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Chebyshev series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of Chebyshev series coefficients representing their difference. + + See Also + -------- + chebadd, chebmulx, chebmul, chebdiv, chebpow + + Notes + ----- + Unlike multiplication, division, etc., the difference of two Chebyshev + series is a Chebyshev series (without having to "reproject" the result + onto the basis set) so subtraction, just like that of "standard" + polynomials, is simply "component-wise." + + Examples + -------- + >>> from numpy.polynomial import chebyshev as C + >>> c1 = (1,2,3) + >>> c2 = (3,2,1) + >>> C.chebsub(c1,c2) + array([-2., 0., 2.]) + >>> C.chebsub(c2,c1) # -C.chebsub(c1,c2) + array([ 2., 0., -2.]) + + """ + return pu._sub(c1, c2) + + +def chebmulx(c): + """Multiply a Chebyshev series by x. + + Multiply the polynomial `c` by x, where x is the independent + variable. + + + Parameters + ---------- + c : array_like + 1-D array of Chebyshev series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the result of the multiplication. + + See Also + -------- + chebadd, chebsub, chebmul, chebdiv, chebpow + + Examples + -------- + >>> from numpy.polynomial import chebyshev as C + >>> C.chebmulx([1,2,3]) + array([1. , 2.5, 1. , 1.5]) + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + # The zero series needs special treatment + if len(c) == 1 and c[0] == 0: + return c + + prd = np.empty(len(c) + 1, dtype=c.dtype) + prd[0] = c[0] * 0 + prd[1] = c[0] + if len(c) > 1: + tmp = c[1:] / 2 + prd[2:] = tmp + prd[0:-2] += tmp + return prd + + +def chebmul(c1, c2): + """ + Multiply one Chebyshev series by another. + + Returns the product of two Chebyshev series `c1` * `c2`. The arguments + are sequences of coefficients, from lowest order "term" to highest, + e.g., [1,2,3] represents the series ``T_0 + 2*T_1 + 3*T_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Chebyshev series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of Chebyshev series coefficients representing their product. + + See Also + -------- + chebadd, chebsub, chebmulx, chebdiv, chebpow + + Notes + ----- + In general, the (polynomial) product of two C-series results in terms + that are not in the Chebyshev polynomial basis set. Thus, to express + the product as a C-series, it is typically necessary to "reproject" + the product onto said basis set, which typically produces + "unintuitive live" (but correct) results; see Examples section below. + + Examples + -------- + >>> from numpy.polynomial import chebyshev as C + >>> c1 = (1,2,3) + >>> c2 = (3,2,1) + >>> C.chebmul(c1,c2) # multiplication requires "reprojection" + array([ 6.5, 12. , 12. , 4. , 1.5]) + + """ + # c1, c2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + z1 = _cseries_to_zseries(c1) + z2 = _cseries_to_zseries(c2) + prd = _zseries_mul(z1, z2) + ret = _zseries_to_cseries(prd) + return pu.trimseq(ret) + + +def chebdiv(c1, c2): + """ + Divide one Chebyshev series by another. + + Returns the quotient-with-remainder of two Chebyshev series + `c1` / `c2`. The arguments are sequences of coefficients from lowest + order "term" to highest, e.g., [1,2,3] represents the series + ``T_0 + 2*T_1 + 3*T_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Chebyshev series coefficients ordered from low to + high. + + Returns + ------- + [quo, rem] : ndarrays + Of Chebyshev series coefficients representing the quotient and + remainder. + + See Also + -------- + chebadd, chebsub, chebmulx, chebmul, chebpow + + Notes + ----- + In general, the (polynomial) division of one C-series by another + results in quotient and remainder terms that are not in the Chebyshev + polynomial basis set. Thus, to express these results as C-series, it + is typically necessary to "reproject" the results onto said basis + set, which typically produces "unintuitive" (but correct) results; + see Examples section below. + + Examples + -------- + >>> from numpy.polynomial import chebyshev as C + >>> c1 = (1,2,3) + >>> c2 = (3,2,1) + >>> C.chebdiv(c1,c2) # quotient "intuitive," remainder not + (array([3.]), array([-8., -4.])) + >>> c2 = (0,1,2,3) + >>> C.chebdiv(c2,c1) # neither "intuitive" + (array([0., 2.]), array([-2., -4.])) + + """ + # c1, c2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + if c2[-1] == 0: + raise ZeroDivisionError # FIXME: add message with details to exception + + # note: this is more efficient than `pu._div(chebmul, c1, c2)` + lc1 = len(c1) + lc2 = len(c2) + if lc1 < lc2: + return c1[:1] * 0, c1 + elif lc2 == 1: + return c1 / c2[-1], c1[:1] * 0 + else: + z1 = _cseries_to_zseries(c1) + z2 = _cseries_to_zseries(c2) + quo, rem = _zseries_div(z1, z2) + quo = pu.trimseq(_zseries_to_cseries(quo)) + rem = pu.trimseq(_zseries_to_cseries(rem)) + return quo, rem + + +def chebpow(c, pow, maxpower=16): + """Raise a Chebyshev series to a power. + + Returns the Chebyshev series `c` raised to the power `pow`. The + argument `c` is a sequence of coefficients ordered from low to high. + i.e., [1,2,3] is the series ``T_0 + 2*T_1 + 3*T_2.`` + + Parameters + ---------- + c : array_like + 1-D array of Chebyshev series coefficients ordered from low to + high. + pow : integer + Power to which the series will be raised + maxpower : integer, optional + Maximum power allowed. This is mainly to limit growth of the series + to unmanageable size. Default is 16 + + Returns + ------- + coef : ndarray + Chebyshev series of power. + + See Also + -------- + chebadd, chebsub, chebmulx, chebmul, chebdiv + + Examples + -------- + >>> from numpy.polynomial import chebyshev as C + >>> C.chebpow([1, 2, 3, 4], 2) + array([15.5, 22. , 16. , ..., 12.5, 12. , 8. ]) + + """ + # note: this is more efficient than `pu._pow(chebmul, c1, c2)`, as it + # avoids converting between z and c series repeatedly + + # c is a trimmed copy + [c] = pu.as_series([c]) + power = int(pow) + if power != pow or power < 0: + raise ValueError("Power must be a non-negative integer.") + elif maxpower is not None and power > maxpower: + raise ValueError("Power is too large") + elif power == 0: + return np.array([1], dtype=c.dtype) + elif power == 1: + return c + else: + # This can be made more efficient by using powers of two + # in the usual way. + zs = _cseries_to_zseries(c) + prd = zs + for i in range(2, power + 1): + prd = np.convolve(prd, zs) + return _zseries_to_cseries(prd) + + +def chebder(c, m=1, scl=1, axis=0): + """ + Differentiate a Chebyshev series. + + Returns the Chebyshev series coefficients `c` differentiated `m` times + along `axis`. At each iteration the result is multiplied by `scl` (the + scaling factor is for use in a linear change of variable). The argument + `c` is an array of coefficients from low to high degree along each + axis, e.g., [1,2,3] represents the series ``1*T_0 + 2*T_1 + 3*T_2`` + while [[1,2],[1,2]] represents ``1*T_0(x)*T_0(y) + 1*T_1(x)*T_0(y) + + 2*T_0(x)*T_1(y) + 2*T_1(x)*T_1(y)`` if axis=0 is ``x`` and axis=1 is + ``y``. + + Parameters + ---------- + c : array_like + Array of Chebyshev series coefficients. If c is multidimensional + the different axis correspond to different variables with the + degree in each axis given by the corresponding index. + m : int, optional + Number of derivatives taken, must be non-negative. (Default: 1) + scl : scalar, optional + Each differentiation is multiplied by `scl`. The end result is + multiplication by ``scl**m``. This is for use in a linear change of + variable. (Default: 1) + axis : int, optional + Axis over which the derivative is taken. (Default: 0). + + Returns + ------- + der : ndarray + Chebyshev series of the derivative. + + See Also + -------- + chebint + + Notes + ----- + In general, the result of differentiating a C-series needs to be + "reprojected" onto the C-series basis set. Thus, typically, the + result of this function is "unintuitive," albeit correct; see Examples + section below. + + Examples + -------- + >>> from numpy.polynomial import chebyshev as C + >>> c = (1,2,3,4) + >>> C.chebder(c) + array([14., 12., 24.]) + >>> C.chebder(c,3) + array([96.]) + >>> C.chebder(c,scl=-1) + array([-14., -12., -24.]) + >>> C.chebder(c,2,-1) + array([12., 96.]) + + """ + c = np.array(c, ndmin=1, copy=True) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + cnt = pu._as_int(m, "the order of derivation") + iaxis = pu._as_int(axis, "the axis") + if cnt < 0: + raise ValueError("The order of derivation must be non-negative") + iaxis = np.lib.array_utils.normalize_axis_index(iaxis, c.ndim) + + if cnt == 0: + return c + + c = np.moveaxis(c, iaxis, 0) + n = len(c) + if cnt >= n: + c = c[:1] * 0 + else: + for i in range(cnt): + n = n - 1 + c *= scl + der = np.empty((n,) + c.shape[1:], dtype=c.dtype) + for j in range(n, 2, -1): + der[j - 1] = (2 * j) * c[j] + c[j - 2] += (j * c[j]) / (j - 2) + if n > 1: + der[1] = 4 * c[2] + der[0] = c[1] + c = der + c = np.moveaxis(c, 0, iaxis) + return c + + +def chebint(c, m=1, k=[], lbnd=0, scl=1, axis=0): + """ + Integrate a Chebyshev series. + + Returns the Chebyshev series coefficients `c` integrated `m` times from + `lbnd` along `axis`. At each iteration the resulting series is + **multiplied** by `scl` and an integration constant, `k`, is added. + The scaling factor is for use in a linear change of variable. ("Buyer + beware": note that, depending on what one is doing, one may want `scl` + to be the reciprocal of what one might expect; for more information, + see the Notes section below.) The argument `c` is an array of + coefficients from low to high degree along each axis, e.g., [1,2,3] + represents the series ``T_0 + 2*T_1 + 3*T_2`` while [[1,2],[1,2]] + represents ``1*T_0(x)*T_0(y) + 1*T_1(x)*T_0(y) + 2*T_0(x)*T_1(y) + + 2*T_1(x)*T_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``. + + Parameters + ---------- + c : array_like + Array of Chebyshev series coefficients. If c is multidimensional + the different axis correspond to different variables with the + degree in each axis given by the corresponding index. + m : int, optional + Order of integration, must be positive. (Default: 1) + k : {[], list, scalar}, optional + Integration constant(s). The value of the first integral at zero + is the first value in the list, the value of the second integral + at zero is the second value, etc. If ``k == []`` (the default), + all constants are set to zero. If ``m == 1``, a single scalar can + be given instead of a list. + lbnd : scalar, optional + The lower bound of the integral. (Default: 0) + scl : scalar, optional + Following each integration the result is *multiplied* by `scl` + before the integration constant is added. (Default: 1) + axis : int, optional + Axis over which the integral is taken. (Default: 0). + + Returns + ------- + S : ndarray + C-series coefficients of the integral. + + Raises + ------ + ValueError + If ``m < 1``, ``len(k) > m``, ``np.ndim(lbnd) != 0``, or + ``np.ndim(scl) != 0``. + + See Also + -------- + chebder + + Notes + ----- + Note that the result of each integration is *multiplied* by `scl`. + Why is this important to note? Say one is making a linear change of + variable :math:`u = ax + b` in an integral relative to `x`. Then + :math:`dx = du/a`, so one will need to set `scl` equal to + :math:`1/a`- perhaps not what one would have first thought. + + Also note that, in general, the result of integrating a C-series needs + to be "reprojected" onto the C-series basis set. Thus, typically, + the result of this function is "unintuitive," albeit correct; see + Examples section below. + + Examples + -------- + >>> from numpy.polynomial import chebyshev as C + >>> c = (1,2,3) + >>> C.chebint(c) + array([ 0.5, -0.5, 0.5, 0.5]) + >>> C.chebint(c,3) + array([ 0.03125 , -0.1875 , 0.04166667, -0.05208333, 0.01041667, # may vary + 0.00625 ]) + >>> C.chebint(c, k=3) + array([ 3.5, -0.5, 0.5, 0.5]) + >>> C.chebint(c,lbnd=-2) + array([ 8.5, -0.5, 0.5, 0.5]) + >>> C.chebint(c,scl=-2) + array([-1., 1., -1., -1.]) + + """ + c = np.array(c, ndmin=1, copy=True) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + if not np.iterable(k): + k = [k] + cnt = pu._as_int(m, "the order of integration") + iaxis = pu._as_int(axis, "the axis") + if cnt < 0: + raise ValueError("The order of integration must be non-negative") + if len(k) > cnt: + raise ValueError("Too many integration constants") + if np.ndim(lbnd) != 0: + raise ValueError("lbnd must be a scalar.") + if np.ndim(scl) != 0: + raise ValueError("scl must be a scalar.") + iaxis = np.lib.array_utils.normalize_axis_index(iaxis, c.ndim) + + if cnt == 0: + return c + + c = np.moveaxis(c, iaxis, 0) + k = list(k) + [0] * (cnt - len(k)) + for i in range(cnt): + n = len(c) + c *= scl + if n == 1 and np.all(c[0] == 0): + c[0] += k[i] + else: + tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype) + tmp[0] = c[0] * 0 + tmp[1] = c[0] + if n > 1: + tmp[2] = c[1] / 4 + for j in range(2, n): + tmp[j + 1] = c[j] / (2 * (j + 1)) + tmp[j - 1] -= c[j] / (2 * (j - 1)) + tmp[0] += k[i] - chebval(lbnd, tmp) + c = tmp + c = np.moveaxis(c, 0, iaxis) + return c + + +def chebval(x, c, tensor=True): + """ + Evaluate a Chebyshev series at points x. + + If `c` is of length `n + 1`, this function returns the value: + + .. math:: p(x) = c_0 * T_0(x) + c_1 * T_1(x) + ... + c_n * T_n(x) + + The parameter `x` is converted to an array only if it is a tuple or a + list, otherwise it is treated as a scalar. In either case, either `x` + or its elements must support multiplication and addition both with + themselves and with the elements of `c`. + + If `c` is a 1-D array, then ``p(x)`` will have the same shape as `x`. If + `c` is multidimensional, then the shape of the result depends on the + value of `tensor`. If `tensor` is true the shape will be c.shape[1:] + + x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that + scalars have shape (,). + + Trailing zeros in the coefficients will be used in the evaluation, so + they should be avoided if efficiency is a concern. + + Parameters + ---------- + x : array_like, compatible object + If `x` is a list or tuple, it is converted to an ndarray, otherwise + it is left unchanged and treated as a scalar. In either case, `x` + or its elements must support addition and multiplication with + themselves and with the elements of `c`. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree n are contained in c[n]. If `c` is multidimensional the + remaining indices enumerate multiple polynomials. In the two + dimensional case the coefficients may be thought of as stored in + the columns of `c`. + tensor : boolean, optional + If True, the shape of the coefficient array is extended with ones + on the right, one for each dimension of `x`. Scalars have dimension 0 + for this action. The result is that every column of coefficients in + `c` is evaluated for every element of `x`. If False, `x` is broadcast + over the columns of `c` for the evaluation. This keyword is useful + when `c` is multidimensional. The default value is True. + + Returns + ------- + values : ndarray, algebra_like + The shape of the return value is described above. + + See Also + -------- + chebval2d, chebgrid2d, chebval3d, chebgrid3d + + Notes + ----- + The evaluation uses Clenshaw recursion, aka synthetic division. + + """ + c = np.array(c, ndmin=1, copy=True) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + if isinstance(x, (tuple, list)): + x = np.asarray(x) + if isinstance(x, np.ndarray) and tensor: + c = c.reshape(c.shape + (1,) * x.ndim) + + if len(c) == 1: + c0 = c[0] + c1 = 0 + elif len(c) == 2: + c0 = c[0] + c1 = c[1] + else: + x2 = 2 * x + c0 = c[-2] + c1 = c[-1] + for i in range(3, len(c) + 1): + tmp = c0 + c0 = c[-i] - c1 + c1 = tmp + c1 * x2 + return c0 + c1 * x + + +def chebval2d(x, y, c): + """ + Evaluate a 2-D Chebyshev series at points (x, y). + + This function returns the values: + + .. math:: p(x,y) = \\sum_{i,j} c_{i,j} * T_i(x) * T_j(y) + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars and they + must have the same shape after conversion. In either case, either `x` + and `y` or their elements must support multiplication and addition both + with themselves and with the elements of `c`. + + If `c` is a 1-D array a one is implicitly appended to its shape to make + it 2-D. The shape of the result will be c.shape[2:] + x.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points ``(x, y)``, + where `x` and `y` must have the same shape. If `x` or `y` is a list + or tuple, it is first converted to an ndarray, otherwise it is left + unchanged and if it isn't an ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term + of multi-degree i,j is contained in ``c[i,j]``. If `c` has + dimension greater than 2 the remaining indices enumerate multiple + sets of coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional Chebyshev series at points formed + from pairs of corresponding values from `x` and `y`. + + See Also + -------- + chebval, chebgrid2d, chebval3d, chebgrid3d + """ + return pu._valnd(chebval, c, x, y) + + +def chebgrid2d(x, y, c): + """ + Evaluate a 2-D Chebyshev series on the Cartesian product of x and y. + + This function returns the values: + + .. math:: p(a,b) = \\sum_{i,j} c_{i,j} * T_i(a) * T_j(b), + + where the points `(a, b)` consist of all pairs formed by taking + `a` from `x` and `b` from `y`. The resulting points form a grid with + `x` in the first dimension and `y` in the second. + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars. In either + case, either `x` and `y` or their elements must support multiplication + and addition both with themselves and with the elements of `c`. + + If `c` has fewer than two dimensions, ones are implicitly appended to + its shape to make it 2-D. The shape of the result will be c.shape[2:] + + x.shape + y.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points in the + Cartesian product of `x` and `y`. If `x` or `y` is a list or + tuple, it is first converted to an ndarray, otherwise it is left + unchanged and, if it isn't an ndarray, it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term of + multi-degree i,j is contained in ``c[i,j]``. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional Chebyshev series at points in the + Cartesian product of `x` and `y`. + + See Also + -------- + chebval, chebval2d, chebval3d, chebgrid3d + """ + return pu._gridnd(chebval, c, x, y) + + +def chebval3d(x, y, z, c): + """ + Evaluate a 3-D Chebyshev series at points (x, y, z). + + This function returns the values: + + .. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * T_i(x) * T_j(y) * T_k(z) + + The parameters `x`, `y`, and `z` are converted to arrays only if + they are tuples or a lists, otherwise they are treated as a scalars and + they must have the same shape after conversion. In either case, either + `x`, `y`, and `z` or their elements must support multiplication and + addition both with themselves and with the elements of `c`. + + If `c` has fewer than 3 dimensions, ones are implicitly appended to its + shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape. + + Parameters + ---------- + x, y, z : array_like, compatible object + The three dimensional series is evaluated at the points + ``(x, y, z)``, where `x`, `y`, and `z` must have the same shape. If + any of `x`, `y`, or `z` is a list or tuple, it is first converted + to an ndarray, otherwise it is left unchanged and if it isn't an + ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term of + multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension + greater than 3 the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the multidimensional polynomial on points formed with + triples of corresponding values from `x`, `y`, and `z`. + + See Also + -------- + chebval, chebval2d, chebgrid2d, chebgrid3d + """ + return pu._valnd(chebval, c, x, y, z) + + +def chebgrid3d(x, y, z, c): + """ + Evaluate a 3-D Chebyshev series on the Cartesian product of x, y, and z. + + This function returns the values: + + .. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * T_i(a) * T_j(b) * T_k(c) + + where the points ``(a, b, c)`` consist of all triples formed by taking + `a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form + a grid with `x` in the first dimension, `y` in the second, and `z` in + the third. + + The parameters `x`, `y`, and `z` are converted to arrays only if they + are tuples or a lists, otherwise they are treated as a scalars. In + either case, either `x`, `y`, and `z` or their elements must support + multiplication and addition both with themselves and with the elements + of `c`. + + If `c` has fewer than three dimensions, ones are implicitly appended to + its shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape + y.shape + z.shape. + + Parameters + ---------- + x, y, z : array_like, compatible objects + The three dimensional series is evaluated at the points in the + Cartesian product of `x`, `y`, and `z`. If `x`, `y`, or `z` is a + list or tuple, it is first converted to an ndarray, otherwise it is + left unchanged and, if it isn't an ndarray, it is treated as a + scalar. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree i,j are contained in ``c[i,j]``. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points in the Cartesian + product of `x` and `y`. + + See Also + -------- + chebval, chebval2d, chebgrid2d, chebval3d + """ + return pu._gridnd(chebval, c, x, y, z) + + +def chebvander(x, deg): + """Pseudo-Vandermonde matrix of given degree. + + Returns the pseudo-Vandermonde matrix of degree `deg` and sample points + `x`. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., i] = T_i(x), + + where ``0 <= i <= deg``. The leading indices of `V` index the elements of + `x` and the last index is the degree of the Chebyshev polynomial. + + If `c` is a 1-D array of coefficients of length ``n + 1`` and `V` is the + matrix ``V = chebvander(x, n)``, then ``np.dot(V, c)`` and + ``chebval(x, c)`` are the same up to roundoff. This equivalence is + useful both for least squares fitting and for the evaluation of a large + number of Chebyshev series of the same degree and sample points. + + Parameters + ---------- + x : array_like + Array of points. The dtype is converted to float64 or complex128 + depending on whether any of the elements are complex. If `x` is + scalar it is converted to a 1-D array. + deg : int + Degree of the resulting matrix. + + Returns + ------- + vander : ndarray + The pseudo Vandermonde matrix. The shape of the returned matrix is + ``x.shape + (deg + 1,)``, where The last index is the degree of the + corresponding Chebyshev polynomial. The dtype will be the same as + the converted `x`. + + """ + ideg = pu._as_int(deg, "deg") + if ideg < 0: + raise ValueError("deg must be non-negative") + + x = np.array(x, copy=None, ndmin=1) + 0.0 + dims = (ideg + 1,) + x.shape + dtyp = x.dtype + v = np.empty(dims, dtype=dtyp) + # Use forward recursion to generate the entries. + v[0] = x * 0 + 1 + if ideg > 0: + x2 = 2 * x + v[1] = x + for i in range(2, ideg + 1): + v[i] = v[i - 1] * x2 - v[i - 2] + return np.moveaxis(v, 0, -1) + + +def chebvander2d(x, y, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points ``(x, y)``. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (deg[1] + 1)*i + j] = T_i(x) * T_j(y), + + where ``0 <= i <= deg[0]`` and ``0 <= j <= deg[1]``. The leading indices of + `V` index the points ``(x, y)`` and the last index encodes the degrees of + the Chebyshev polynomials. + + If ``V = chebvander2d(x, y, [xdeg, ydeg])``, then the columns of `V` + correspond to the elements of a 2-D coefficient array `c` of shape + (xdeg + 1, ydeg + 1) in the order + + .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ... + + and ``np.dot(V, c.flat)`` and ``chebval2d(x, y, c)`` will be the same + up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 2-D Chebyshev + series of the same degrees and sample points. + + Parameters + ---------- + x, y : array_like + Arrays of point coordinates, all of the same shape. The dtypes + will be converted to either float64 or complex128 depending on + whether any of the elements are complex. Scalars are converted to + 1-D arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg]. + + Returns + ------- + vander2d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg[1]+1)`. The dtype will be the same + as the converted `x` and `y`. + + See Also + -------- + chebvander, chebvander3d, chebval2d, chebval3d + """ + return pu._vander_nd_flat((chebvander, chebvander), (x, y), deg) + + +def chebvander3d(x, y, z, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points ``(x, y, z)``. If `l`, `m`, `n` are the given degrees in `x`, `y`, `z`, + then The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = T_i(x)*T_j(y)*T_k(z), + + where ``0 <= i <= l``, ``0 <= j <= m``, and ``0 <= j <= n``. The leading + indices of `V` index the points ``(x, y, z)`` and the last index encodes + the degrees of the Chebyshev polynomials. + + If ``V = chebvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns + of `V` correspond to the elements of a 3-D coefficient array `c` of + shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order + + .. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},... + + and ``np.dot(V, c.flat)`` and ``chebval3d(x, y, z, c)`` will be the + same up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 3-D Chebyshev + series of the same degrees and sample points. + + Parameters + ---------- + x, y, z : array_like + Arrays of point coordinates, all of the same shape. The dtypes will + be converted to either float64 or complex128 depending on whether + any of the elements are complex. Scalars are converted to 1-D + arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg, z_deg]. + + Returns + ------- + vander3d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg[1]+1)*(deg[2]+1)`. The dtype will + be the same as the converted `x`, `y`, and `z`. + + See Also + -------- + chebvander, chebvander3d, chebval2d, chebval3d + """ + return pu._vander_nd_flat((chebvander, chebvander, chebvander), (x, y, z), deg) + + +def chebfit(x, y, deg, rcond=None, full=False, w=None): + """ + Least squares fit of Chebyshev series to data. + + Return the coefficients of a Chebyshev series of degree `deg` that is the + least squares fit to the data values `y` given at points `x`. If `y` is + 1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple + fits are done, one for each column of `y`, and the resulting + coefficients are stored in the corresponding columns of a 2-D return. + The fitted polynomial(s) are in the form + + .. math:: p(x) = c_0 + c_1 * T_1(x) + ... + c_n * T_n(x), + + where `n` is `deg`. + + Parameters + ---------- + x : array_like, shape (M,) + x-coordinates of the M sample points ``(x[i], y[i])``. + y : array_like, shape (M,) or (M, K) + y-coordinates of the sample points. Several data sets of sample + points sharing the same x-coordinates can be fitted at once by + passing in a 2D-array that contains one dataset per column. + deg : int or 1-D array_like + Degree(s) of the fitting polynomials. If `deg` is a single integer, + all terms up to and including the `deg`'th term are included in the + fit. For NumPy versions >= 1.11.0 a list of integers specifying the + degrees of the terms to include may be used instead. + rcond : float, optional + Relative condition number of the fit. Singular values smaller than + this relative to the largest singular value will be ignored. The + default value is ``len(x)*eps``, where eps is the relative precision of + the float type, about 2e-16 in most cases. + full : bool, optional + Switch determining nature of return value. When it is False (the + default) just the coefficients are returned, when True diagnostic + information from the singular value decomposition is also returned. + w : array_like, shape (`M`,), optional + Weights. If not None, the weight ``w[i]`` applies to the unsquared + residual ``y[i] - y_hat[i]`` at ``x[i]``. Ideally the weights are + chosen so that the errors of the products ``w[i]*y[i]`` all have the + same variance. When using inverse-variance weighting, use + ``w[i] = 1/sigma(y[i])``. The default value is None. + + Returns + ------- + coef : ndarray, shape (M,) or (M, K) + Chebyshev coefficients ordered from low to high. If `y` was 2-D, + the coefficients for the data in column k of `y` are in column + `k`. + + [residuals, rank, singular_values, rcond] : list + These values are only returned if ``full == True`` + + - residuals -- sum of squared residuals of the least squares fit + - rank -- the numerical rank of the scaled Vandermonde matrix + - singular_values -- singular values of the scaled Vandermonde matrix + - rcond -- value of `rcond`. + + For more details, see `numpy.linalg.lstsq`. + + Warns + ----- + RankWarning + The rank of the coefficient matrix in the least-squares fit is + deficient. The warning is only raised if ``full == False``. The + warnings can be turned off by + + >>> import warnings + >>> warnings.simplefilter('ignore', np.exceptions.RankWarning) + + See Also + -------- + numpy.polynomial.polynomial.polyfit + numpy.polynomial.legendre.legfit + numpy.polynomial.laguerre.lagfit + numpy.polynomial.hermite.hermfit + numpy.polynomial.hermite_e.hermefit + chebval : Evaluates a Chebyshev series. + chebvander : Vandermonde matrix of Chebyshev series. + chebweight : Chebyshev weight function. + numpy.linalg.lstsq : Computes a least-squares fit from the matrix. + scipy.interpolate.UnivariateSpline : Computes spline fits. + + Notes + ----- + The solution is the coefficients of the Chebyshev series `p` that + minimizes the sum of the weighted squared errors + + .. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2, + + where :math:`w_j` are the weights. This problem is solved by setting up + as the (typically) overdetermined matrix equation + + .. math:: V(x) * c = w * y, + + where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the + coefficients to be solved for, `w` are the weights, and `y` are the + observed values. This equation is then solved using the singular value + decomposition of `V`. + + If some of the singular values of `V` are so small that they are + neglected, then a `~exceptions.RankWarning` will be issued. This means that + the coefficient values may be poorly determined. Using a lower order fit + will usually get rid of the warning. The `rcond` parameter can also be + set to a value smaller than its default, but the resulting fit may be + spurious and have large contributions from roundoff error. + + Fits using Chebyshev series are usually better conditioned than fits + using power series, but much can depend on the distribution of the + sample points and the smoothness of the data. If the quality of the fit + is inadequate splines may be a good alternative. + + References + ---------- + .. [1] Wikipedia, "Curve fitting", + https://en.wikipedia.org/wiki/Curve_fitting + + Examples + -------- + + """ + return pu._fit(chebvander, x, y, deg, rcond, full, w) + + +def chebcompanion(c): + """Return the scaled companion matrix of c. + + The basis polynomials are scaled so that the companion matrix is + symmetric when `c` is a Chebyshev basis polynomial. This provides + better eigenvalue estimates than the unscaled case and for basis + polynomials the eigenvalues are guaranteed to be real if + `numpy.linalg.eigvalsh` is used to obtain them. + + Parameters + ---------- + c : array_like + 1-D array of Chebyshev series coefficients ordered from low to high + degree. + + Returns + ------- + mat : ndarray + Scaled companion matrix of dimensions (deg, deg). + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) < 2: + raise ValueError('Series must have maximum degree of at least 1.') + if len(c) == 2: + return np.array([[-c[0] / c[1]]]) + + n = len(c) - 1 + mat = np.zeros((n, n), dtype=c.dtype) + scl = np.array([1.] + [np.sqrt(.5)] * (n - 1)) + top = mat.reshape(-1)[1::n + 1] + bot = mat.reshape(-1)[n::n + 1] + top[0] = np.sqrt(.5) + top[1:] = 1 / 2 + bot[...] = top + mat[:, -1] -= (c[:-1] / c[-1]) * (scl / scl[-1]) * .5 + return mat + + +def chebroots(c): + """ + Compute the roots of a Chebyshev series. + + Return the roots (a.k.a. "zeros") of the polynomial + + .. math:: p(x) = \\sum_i c[i] * T_i(x). + + Parameters + ---------- + c : 1-D array_like + 1-D array of coefficients. + + Returns + ------- + out : ndarray + Array of the roots of the series. If all the roots are real, + then `out` is also real, otherwise it is complex. + + See Also + -------- + numpy.polynomial.polynomial.polyroots + numpy.polynomial.legendre.legroots + numpy.polynomial.laguerre.lagroots + numpy.polynomial.hermite.hermroots + numpy.polynomial.hermite_e.hermeroots + + Notes + ----- + The root estimates are obtained as the eigenvalues of the companion + matrix, Roots far from the origin of the complex plane may have large + errors due to the numerical instability of the series for such + values. Roots with multiplicity greater than 1 will also show larger + errors as the value of the series near such points is relatively + insensitive to errors in the roots. Isolated roots near the origin can + be improved by a few iterations of Newton's method. + + The Chebyshev series basis polynomials aren't powers of `x` so the + results of this function may seem unintuitive. + + Examples + -------- + >>> import numpy.polynomial.chebyshev as cheb + >>> cheb.chebroots((-1, 1,-1, 1)) # T3 - T2 + T1 - T0 has real roots + array([ -5.00000000e-01, 2.60860684e-17, 1.00000000e+00]) # may vary + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) < 2: + return np.array([], dtype=c.dtype) + if len(c) == 2: + return np.array([-c[0] / c[1]]) + + # rotated companion matrix reduces error + m = chebcompanion(c)[::-1, ::-1] + r = np.linalg.eigvals(m) + r.sort() + return r + + +def chebinterpolate(func, deg, args=()): + """Interpolate a function at the Chebyshev points of the first kind. + + Returns the Chebyshev series that interpolates `func` at the Chebyshev + points of the first kind in the interval [-1, 1]. The interpolating + series tends to a minmax approximation to `func` with increasing `deg` + if the function is continuous in the interval. + + Parameters + ---------- + func : function + The function to be approximated. It must be a function of a single + variable of the form ``f(x, a, b, c...)``, where ``a, b, c...`` are + extra arguments passed in the `args` parameter. + deg : int + Degree of the interpolating polynomial + args : tuple, optional + Extra arguments to be used in the function call. Default is no extra + arguments. + + Returns + ------- + coef : ndarray, shape (deg + 1,) + Chebyshev coefficients of the interpolating series ordered from low to + high. + + Examples + -------- + >>> import numpy.polynomial.chebyshev as C + >>> C.chebinterpolate(lambda x: np.tanh(x) + 0.5, 8) + array([ 5.00000000e-01, 8.11675684e-01, -9.86864911e-17, + -5.42457905e-02, -2.71387850e-16, 4.51658839e-03, + 2.46716228e-17, -3.79694221e-04, -3.26899002e-16]) + + Notes + ----- + The Chebyshev polynomials used in the interpolation are orthogonal when + sampled at the Chebyshev points of the first kind. If it is desired to + constrain some of the coefficients they can simply be set to the desired + value after the interpolation, no new interpolation or fit is needed. This + is especially useful if it is known apriori that some of coefficients are + zero. For instance, if the function is even then the coefficients of the + terms of odd degree in the result can be set to zero. + + """ + deg = np.asarray(deg) + + # check arguments. + if deg.ndim > 0 or deg.dtype.kind not in 'iu' or deg.size == 0: + raise TypeError("deg must be an int") + if deg < 0: + raise ValueError("expected deg >= 0") + + order = deg + 1 + xcheb = chebpts1(order) + yfunc = func(xcheb, *args) + m = chebvander(xcheb, deg) + c = np.dot(m.T, yfunc) + c[0] /= order + c[1:] /= 0.5 * order + + return c + + +def chebgauss(deg): + """ + Gauss-Chebyshev quadrature. + + Computes the sample points and weights for Gauss-Chebyshev quadrature. + These sample points and weights will correctly integrate polynomials of + degree :math:`2*deg - 1` or less over the interval :math:`[-1, 1]` with + the weight function :math:`f(x) = 1/\\sqrt{1 - x^2}`. + + Parameters + ---------- + deg : int + Number of sample points and weights. It must be >= 1. + + Returns + ------- + x : ndarray + 1-D ndarray containing the sample points. + y : ndarray + 1-D ndarray containing the weights. + + Notes + ----- + The results have only been tested up to degree 100, higher degrees may + be problematic. For Gauss-Chebyshev there are closed form solutions for + the sample points and weights. If n = `deg`, then + + .. math:: x_i = \\cos(\\pi (2 i - 1) / (2 n)) + + .. math:: w_i = \\pi / n + + """ + ideg = pu._as_int(deg, "deg") + if ideg <= 0: + raise ValueError("deg must be a positive integer") + + x = np.cos(np.pi * np.arange(1, 2 * ideg, 2) / (2.0 * ideg)) + w = np.ones(ideg) * (np.pi / ideg) + + return x, w + + +def chebweight(x): + """ + The weight function of the Chebyshev polynomials. + + The weight function is :math:`1/\\sqrt{1 - x^2}` and the interval of + integration is :math:`[-1, 1]`. The Chebyshev polynomials are + orthogonal, but not normalized, with respect to this weight function. + + Parameters + ---------- + x : array_like + Values at which the weight function will be computed. + + Returns + ------- + w : ndarray + The weight function at `x`. + """ + w = 1. / (np.sqrt(1. + x) * np.sqrt(1. - x)) + return w + + +def chebpts1(npts): + """ + Chebyshev points of the first kind. + + The Chebyshev points of the first kind are the points ``cos(x)``, + where ``x = [pi*(k + .5)/npts for k in range(npts)]``. + + Parameters + ---------- + npts : int + Number of sample points desired. + + Returns + ------- + pts : ndarray + The Chebyshev points of the first kind. + + See Also + -------- + chebpts2 + """ + _npts = int(npts) + if _npts != npts: + raise ValueError("npts must be integer") + if _npts < 1: + raise ValueError("npts must be >= 1") + + x = 0.5 * np.pi / _npts * np.arange(-_npts + 1, _npts + 1, 2) + return np.sin(x) + + +def chebpts2(npts): + """ + Chebyshev points of the second kind. + + The Chebyshev points of the second kind are the points ``cos(x)``, + where ``x = [pi*k/(npts - 1) for k in range(npts)]`` sorted in ascending + order. + + Parameters + ---------- + npts : int + Number of sample points desired. + + Returns + ------- + pts : ndarray + The Chebyshev points of the second kind. + """ + _npts = int(npts) + if _npts != npts: + raise ValueError("npts must be integer") + if _npts < 2: + raise ValueError("npts must be >= 2") + + x = np.linspace(-np.pi, 0, _npts) + return np.cos(x) + + +# +# Chebyshev series class +# + +class Chebyshev(ABCPolyBase): + """A Chebyshev series class. + + The Chebyshev class provides the standard Python numerical methods + '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the + attributes and methods listed below. + + Parameters + ---------- + coef : array_like + Chebyshev coefficients in order of increasing degree, i.e., + ``(1, 2, 3)`` gives ``1*T_0(x) + 2*T_1(x) + 3*T_2(x)``. + domain : (2,) array_like, optional + Domain to use. The interval ``[domain[0], domain[1]]`` is mapped + to the interval ``[window[0], window[1]]`` by shifting and scaling. + The default value is [-1., 1.]. + window : (2,) array_like, optional + Window, see `domain` for its use. The default value is [-1., 1.]. + symbol : str, optional + Symbol used to represent the independent variable in string + representations of the polynomial expression, e.g. for printing. + The symbol must be a valid Python identifier. Default value is 'x'. + + .. versionadded:: 1.24 + + """ + # Virtual Functions + _add = staticmethod(chebadd) + _sub = staticmethod(chebsub) + _mul = staticmethod(chebmul) + _div = staticmethod(chebdiv) + _pow = staticmethod(chebpow) + _val = staticmethod(chebval) + _int = staticmethod(chebint) + _der = staticmethod(chebder) + _fit = staticmethod(chebfit) + _line = staticmethod(chebline) + _roots = staticmethod(chebroots) + _fromroots = staticmethod(chebfromroots) + + @classmethod + def interpolate(cls, func, deg, domain=None, args=()): + """Interpolate a function at the Chebyshev points of the first kind. + + Returns the series that interpolates `func` at the Chebyshev points of + the first kind scaled and shifted to the `domain`. The resulting series + tends to a minmax approximation of `func` when the function is + continuous in the domain. + + Parameters + ---------- + func : function + The function to be interpolated. It must be a function of a single + variable of the form ``f(x, a, b, c...)``, where ``a, b, c...`` are + extra arguments passed in the `args` parameter. + deg : int + Degree of the interpolating polynomial. + domain : {None, [beg, end]}, optional + Domain over which `func` is interpolated. The default is None, in + which case the domain is [-1, 1]. + args : tuple, optional + Extra arguments to be used in the function call. Default is no + extra arguments. + + Returns + ------- + polynomial : Chebyshev instance + Interpolating Chebyshev instance. + + Notes + ----- + See `numpy.polynomial.chebinterpolate` for more details. + + """ + if domain is None: + domain = cls.domain + xfunc = lambda x: func(pu.mapdomain(x, cls.window, domain), *args) + coef = chebinterpolate(xfunc, deg) + return cls(coef, domain=domain) + + # Virtual properties + domain = np.array(chebdomain) + window = np.array(chebdomain) + basis_name = 'T' diff --git a/local_packages/numpy/polynomial/chebyshev.pyi b/local_packages/numpy/polynomial/chebyshev.pyi new file mode 100644 index 0000000..1cfb278 --- /dev/null +++ b/local_packages/numpy/polynomial/chebyshev.pyi @@ -0,0 +1,180 @@ +from _typeshed import ConvertibleToInt +from collections.abc import Callable, Iterable +from typing import ( + Any, + ClassVar, + Concatenate, + Final, + Literal as L, + Self, + TypeVar, + overload, +) + +import numpy as np +import numpy.typing as npt +from numpy._typing import _IntLike_co + +from ._polybase import ABCPolyBase +from ._polytypes import ( + _Array1, + _Array2, + _CoefSeries, + _FuncBinOp, + _FuncCompanion, + _FuncDer, + _FuncFit, + _FuncFromRoots, + _FuncGauss, + _FuncInteg, + _FuncLine, + _FuncPoly2Ortho, + _FuncPow, + _FuncRoots, + _FuncUnOp, + _FuncVal, + _FuncVal2D, + _FuncVal3D, + _FuncVander, + _FuncVander2D, + _FuncVander3D, + _FuncWeight, + _Series, + _SeriesLikeCoef_co, +) +from .polyutils import trimcoef as chebtrim + +__all__ = [ + "chebzero", + "chebone", + "chebx", + "chebdomain", + "chebline", + "chebadd", + "chebsub", + "chebmulx", + "chebmul", + "chebdiv", + "chebpow", + "chebval", + "chebder", + "chebint", + "cheb2poly", + "poly2cheb", + "chebfromroots", + "chebvander", + "chebfit", + "chebtrim", + "chebroots", + "chebpts1", + "chebpts2", + "Chebyshev", + "chebval2d", + "chebval3d", + "chebgrid2d", + "chebgrid3d", + "chebvander2d", + "chebvander3d", + "chebcompanion", + "chebgauss", + "chebweight", + "chebinterpolate", +] + +_NumberOrObjectT = TypeVar("_NumberOrObjectT", bound=np.number | np.object_) +_CoefScalarT = TypeVar("_CoefScalarT", bound=np.number | np.bool | np.object_) + +def _cseries_to_zseries(c: npt.NDArray[_NumberOrObjectT]) -> _Series[_NumberOrObjectT]: ... +def _zseries_to_cseries(zs: npt.NDArray[_NumberOrObjectT]) -> _Series[_NumberOrObjectT]: ... +def _zseries_mul(z1: npt.NDArray[_NumberOrObjectT], z2: npt.NDArray[_NumberOrObjectT]) -> _Series[_NumberOrObjectT]: ... +def _zseries_div(z1: npt.NDArray[_NumberOrObjectT], z2: npt.NDArray[_NumberOrObjectT]) -> _Series[_NumberOrObjectT]: ... +def _zseries_der(zs: npt.NDArray[_NumberOrObjectT]) -> _Series[_NumberOrObjectT]: ... +def _zseries_int(zs: npt.NDArray[_NumberOrObjectT]) -> _Series[_NumberOrObjectT]: ... + +poly2cheb: Final[_FuncPoly2Ortho] = ... +cheb2poly: Final[_FuncUnOp] = ... + +chebdomain: Final[_Array2[np.float64]] = ... +chebzero: Final[_Array1[np.int_]] = ... +chebone: Final[_Array1[np.int_]] = ... +chebx: Final[_Array2[np.int_]] = ... + +chebline: Final[_FuncLine] = ... +chebfromroots: Final[_FuncFromRoots] = ... +chebadd: Final[_FuncBinOp] = ... +chebsub: Final[_FuncBinOp] = ... +chebmulx: Final[_FuncUnOp] = ... +chebmul: Final[_FuncBinOp] = ... +chebdiv: Final[_FuncBinOp] = ... +chebpow: Final[_FuncPow] = ... +chebder: Final[_FuncDer] = ... +chebint: Final[_FuncInteg] = ... +chebval: Final[_FuncVal] = ... +chebval2d: Final[_FuncVal2D] = ... +chebval3d: Final[_FuncVal3D] = ... +chebgrid2d: Final[_FuncVal2D] = ... +chebgrid3d: Final[_FuncVal3D] = ... +chebvander: Final[_FuncVander] = ... +chebvander2d: Final[_FuncVander2D] = ... +chebvander3d: Final[_FuncVander3D] = ... +chebfit: Final[_FuncFit] = ... +chebcompanion: Final[_FuncCompanion] = ... +chebroots: Final[_FuncRoots] = ... +chebgauss: Final[_FuncGauss] = ... +chebweight: Final[_FuncWeight] = ... +def chebpts1(npts: ConvertibleToInt) -> np.ndarray[tuple[int], np.dtype[np.float64]]: ... +def chebpts2(npts: ConvertibleToInt) -> np.ndarray[tuple[int], np.dtype[np.float64]]: ... + +# keep in sync with `Chebyshev.interpolate` (minus `domain` parameter) +@overload +def chebinterpolate( + func: np.ufunc, + deg: _IntLike_co, + args: tuple[()] = (), +) -> npt.NDArray[np.float64 | np.complex128 | np.object_]: ... +@overload +def chebinterpolate( + func: Callable[[npt.NDArray[np.float64]], _CoefScalarT], + deg: _IntLike_co, + args: tuple[()] = (), +) -> npt.NDArray[_CoefScalarT]: ... +@overload +def chebinterpolate( + func: Callable[Concatenate[npt.NDArray[np.float64], ...], _CoefScalarT], + deg: _IntLike_co, + args: Iterable[Any], +) -> npt.NDArray[_CoefScalarT]: ... + +class Chebyshev(ABCPolyBase[L["T"]]): + basis_name: ClassVar[L["T"]] = "T" # pyright: ignore[reportIncompatibleMethodOverride] + domain: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] + window: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] + + @overload + @classmethod + def interpolate( + cls, + func: Callable[[npt.NDArray[np.float64]], _CoefSeries], + deg: _IntLike_co, + domain: _SeriesLikeCoef_co | None = None, + args: tuple[()] = (), + ) -> Self: ... + @overload + @classmethod + def interpolate( + cls, + func: Callable[Concatenate[npt.NDArray[np.float64], ...], _CoefSeries], + deg: _IntLike_co, + domain: _SeriesLikeCoef_co | None = None, + *, + args: Iterable[Any], + ) -> Self: ... + @overload + @classmethod + def interpolate( + cls, + func: Callable[Concatenate[npt.NDArray[np.float64], ...], _CoefSeries], + deg: _IntLike_co, + domain: _SeriesLikeCoef_co | None, + args: Iterable[Any], + ) -> Self: ... diff --git a/local_packages/numpy/polynomial/hermite.py b/local_packages/numpy/polynomial/hermite.py new file mode 100644 index 0000000..c6007d1 --- /dev/null +++ b/local_packages/numpy/polynomial/hermite.py @@ -0,0 +1,1738 @@ +""" +============================================================== +Hermite Series, "Physicists" (:mod:`numpy.polynomial.hermite`) +============================================================== + +This module provides a number of objects (mostly functions) useful for +dealing with Hermite series, including a `Hermite` class that +encapsulates the usual arithmetic operations. (General information +on how this module represents and works with such polynomials is in the +docstring for its "parent" sub-package, `numpy.polynomial`). + +Classes +------- +.. autosummary:: + :toctree: generated/ + + Hermite + +Constants +--------- +.. autosummary:: + :toctree: generated/ + + hermdomain + hermzero + hermone + hermx + +Arithmetic +---------- +.. autosummary:: + :toctree: generated/ + + hermadd + hermsub + hermmulx + hermmul + hermdiv + hermpow + hermval + hermval2d + hermval3d + hermgrid2d + hermgrid3d + +Calculus +-------- +.. autosummary:: + :toctree: generated/ + + hermder + hermint + +Misc Functions +-------------- +.. autosummary:: + :toctree: generated/ + + hermfromroots + hermroots + hermvander + hermvander2d + hermvander3d + hermgauss + hermweight + hermcompanion + hermfit + hermtrim + hermline + herm2poly + poly2herm + +See also +-------- +`numpy.polynomial` + +""" +import numpy as np + +from . import polyutils as pu +from ._polybase import ABCPolyBase + +__all__ = [ + 'hermzero', 'hermone', 'hermx', 'hermdomain', 'hermline', 'hermadd', + 'hermsub', 'hermmulx', 'hermmul', 'hermdiv', 'hermpow', 'hermval', + 'hermder', 'hermint', 'herm2poly', 'poly2herm', 'hermfromroots', + 'hermvander', 'hermfit', 'hermtrim', 'hermroots', 'Hermite', + 'hermval2d', 'hermval3d', 'hermgrid2d', 'hermgrid3d', 'hermvander2d', + 'hermvander3d', 'hermcompanion', 'hermgauss', 'hermweight'] + +hermtrim = pu.trimcoef + + +def poly2herm(pol): + """ + poly2herm(pol) + + Convert a polynomial to a Hermite series. + + Convert an array representing the coefficients of a polynomial (relative + to the "standard" basis) ordered from lowest degree to highest, to an + array of the coefficients of the equivalent Hermite series, ordered + from lowest to highest degree. + + Parameters + ---------- + pol : array_like + 1-D array containing the polynomial coefficients + + Returns + ------- + c : ndarray + 1-D array containing the coefficients of the equivalent Hermite + series. + + See Also + -------- + herm2poly + + Notes + ----- + The easy way to do conversions between polynomial basis sets + is to use the convert method of a class instance. + + Examples + -------- + >>> from numpy.polynomial.hermite import poly2herm + >>> poly2herm(np.arange(4)) + array([1. , 2.75 , 0.5 , 0.375]) + + """ + [pol] = pu.as_series([pol]) + deg = len(pol) - 1 + res = 0 + for i in range(deg, -1, -1): + res = hermadd(hermmulx(res), pol[i]) + return res + + +def herm2poly(c): + """ + Convert a Hermite series to a polynomial. + + Convert an array representing the coefficients of a Hermite series, + ordered from lowest degree to highest, to an array of the coefficients + of the equivalent polynomial (relative to the "standard" basis) ordered + from lowest to highest degree. + + Parameters + ---------- + c : array_like + 1-D array containing the Hermite series coefficients, ordered + from lowest order term to highest. + + Returns + ------- + pol : ndarray + 1-D array containing the coefficients of the equivalent polynomial + (relative to the "standard" basis) ordered from lowest order term + to highest. + + See Also + -------- + poly2herm + + Notes + ----- + The easy way to do conversions between polynomial basis sets + is to use the convert method of a class instance. + + Examples + -------- + >>> from numpy.polynomial.hermite import herm2poly + >>> herm2poly([ 1. , 2.75 , 0.5 , 0.375]) + array([0., 1., 2., 3.]) + + """ + from .polynomial import polyadd, polymulx, polysub + + [c] = pu.as_series([c]) + n = len(c) + if n == 1: + return c + if n == 2: + c[1] *= 2 + return c + else: + c0 = c[-2] + c1 = c[-1] + # i is the current degree of c1 + for i in range(n - 1, 1, -1): + tmp = c0 + c0 = polysub(c[i - 2], c1 * (2 * (i - 1))) + c1 = polyadd(tmp, polymulx(c1) * 2) + return polyadd(c0, polymulx(c1) * 2) + + +# +# These are constant arrays are of integer type so as to be compatible +# with the widest range of other types, such as Decimal. +# + +# Hermite +hermdomain = np.array([-1., 1.]) + +# Hermite coefficients representing zero. +hermzero = np.array([0]) + +# Hermite coefficients representing one. +hermone = np.array([1]) + +# Hermite coefficients representing the identity x. +hermx = np.array([0, 1 / 2]) + + +def hermline(off, scl): + """ + Hermite series whose graph is a straight line. + + + + Parameters + ---------- + off, scl : scalars + The specified line is given by ``off + scl*x``. + + Returns + ------- + y : ndarray + This module's representation of the Hermite series for + ``off + scl*x``. + + See Also + -------- + numpy.polynomial.polynomial.polyline + numpy.polynomial.chebyshev.chebline + numpy.polynomial.legendre.legline + numpy.polynomial.laguerre.lagline + numpy.polynomial.hermite_e.hermeline + + Examples + -------- + >>> from numpy.polynomial.hermite import hermline, hermval + >>> hermval(0,hermline(3, 2)) + 3.0 + >>> hermval(1,hermline(3, 2)) + 5.0 + + """ + if scl != 0: + return np.array([off, scl / 2]) + else: + return np.array([off]) + + +def hermfromroots(roots): + """ + Generate a Hermite series with given roots. + + The function returns the coefficients of the polynomial + + .. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n), + + in Hermite form, where the :math:`r_n` are the roots specified in `roots`. + If a zero has multiplicity n, then it must appear in `roots` n times. + For instance, if 2 is a root of multiplicity three and 3 is a root of + multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The + roots can appear in any order. + + If the returned coefficients are `c`, then + + .. math:: p(x) = c_0 + c_1 * H_1(x) + ... + c_n * H_n(x) + + The coefficient of the last term is not generally 1 for monic + polynomials in Hermite form. + + Parameters + ---------- + roots : array_like + Sequence containing the roots. + + Returns + ------- + out : ndarray + 1-D array of coefficients. If all roots are real then `out` is a + real array, if some of the roots are complex, then `out` is complex + even if all the coefficients in the result are real (see Examples + below). + + See Also + -------- + numpy.polynomial.polynomial.polyfromroots + numpy.polynomial.legendre.legfromroots + numpy.polynomial.laguerre.lagfromroots + numpy.polynomial.chebyshev.chebfromroots + numpy.polynomial.hermite_e.hermefromroots + + Examples + -------- + >>> from numpy.polynomial.hermite import hermfromroots, hermval + >>> coef = hermfromroots((-1, 0, 1)) + >>> hermval((-1, 0, 1), coef) + array([0., 0., 0.]) + >>> coef = hermfromroots((-1j, 1j)) + >>> hermval((-1j, 1j), coef) + array([0.+0.j, 0.+0.j]) + + """ + return pu._fromroots(hermline, hermmul, roots) + + +def hermadd(c1, c2): + """ + Add one Hermite series to another. + + Returns the sum of two Hermite series `c1` + `c2`. The arguments + are sequences of coefficients ordered from lowest order term to + highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Hermite series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the Hermite series of their sum. + + See Also + -------- + hermsub, hermmulx, hermmul, hermdiv, hermpow + + Notes + ----- + Unlike multiplication, division, etc., the sum of two Hermite series + is a Hermite series (without having to "reproject" the result onto + the basis set) so addition, just like that of "standard" polynomials, + is simply "component-wise." + + Examples + -------- + >>> from numpy.polynomial.hermite import hermadd + >>> hermadd([1, 2, 3], [1, 2, 3, 4]) + array([2., 4., 6., 4.]) + + """ + return pu._add(c1, c2) + + +def hermsub(c1, c2): + """ + Subtract one Hermite series from another. + + Returns the difference of two Hermite series `c1` - `c2`. The + sequences of coefficients are from lowest order term to highest, i.e., + [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Hermite series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of Hermite series coefficients representing their difference. + + See Also + -------- + hermadd, hermmulx, hermmul, hermdiv, hermpow + + Notes + ----- + Unlike multiplication, division, etc., the difference of two Hermite + series is a Hermite series (without having to "reproject" the result + onto the basis set) so subtraction, just like that of "standard" + polynomials, is simply "component-wise." + + Examples + -------- + >>> from numpy.polynomial.hermite import hermsub + >>> hermsub([1, 2, 3, 4], [1, 2, 3]) + array([0., 0., 0., 4.]) + + """ + return pu._sub(c1, c2) + + +def hermmulx(c): + """Multiply a Hermite series by x. + + Multiply the Hermite series `c` by x, where x is the independent + variable. + + + Parameters + ---------- + c : array_like + 1-D array of Hermite series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the result of the multiplication. + + See Also + -------- + hermadd, hermsub, hermmul, hermdiv, hermpow + + Notes + ----- + The multiplication uses the recursion relationship for Hermite + polynomials in the form + + .. math:: + + xP_i(x) = (P_{i + 1}(x)/2 + i*P_{i - 1}(x)) + + Examples + -------- + >>> from numpy.polynomial.hermite import hermmulx + >>> hermmulx([1, 2, 3]) + array([2. , 6.5, 1. , 1.5]) + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + # The zero series needs special treatment + if len(c) == 1 and c[0] == 0: + return c + + prd = np.empty(len(c) + 1, dtype=c.dtype) + prd[0] = c[0] * 0 + prd[1] = c[0] / 2 + for i in range(1, len(c)): + prd[i + 1] = c[i] / 2 + prd[i - 1] += c[i] * i + return prd + + +def hermmul(c1, c2): + """ + Multiply one Hermite series by another. + + Returns the product of two Hermite series `c1` * `c2`. The arguments + are sequences of coefficients, from lowest order "term" to highest, + e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Hermite series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of Hermite series coefficients representing their product. + + See Also + -------- + hermadd, hermsub, hermmulx, hermdiv, hermpow + + Notes + ----- + In general, the (polynomial) product of two C-series results in terms + that are not in the Hermite polynomial basis set. Thus, to express + the product as a Hermite series, it is necessary to "reproject" the + product onto said basis set, which may produce "unintuitive" (but + correct) results; see Examples section below. + + Examples + -------- + >>> from numpy.polynomial.hermite import hermmul + >>> hermmul([1, 2, 3], [0, 1, 2]) + array([52., 29., 52., 7., 6.]) + + """ + # s1, s2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + + if len(c1) > len(c2): + c = c2 + xs = c1 + else: + c = c1 + xs = c2 + + if len(c) == 1: + c0 = c[0] * xs + c1 = 0 + elif len(c) == 2: + c0 = c[0] * xs + c1 = c[1] * xs + else: + nd = len(c) + c0 = c[-2] * xs + c1 = c[-1] * xs + for i in range(3, len(c) + 1): + tmp = c0 + nd = nd - 1 + c0 = hermsub(c[-i] * xs, c1 * (2 * (nd - 1))) + c1 = hermadd(tmp, hermmulx(c1) * 2) + return hermadd(c0, hermmulx(c1) * 2) + + +def hermdiv(c1, c2): + """ + Divide one Hermite series by another. + + Returns the quotient-with-remainder of two Hermite series + `c1` / `c2`. The arguments are sequences of coefficients from lowest + order "term" to highest, e.g., [1,2,3] represents the series + ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Hermite series coefficients ordered from low to + high. + + Returns + ------- + [quo, rem] : ndarrays + Of Hermite series coefficients representing the quotient and + remainder. + + See Also + -------- + hermadd, hermsub, hermmulx, hermmul, hermpow + + Notes + ----- + In general, the (polynomial) division of one Hermite series by another + results in quotient and remainder terms that are not in the Hermite + polynomial basis set. Thus, to express these results as a Hermite + series, it is necessary to "reproject" the results onto the Hermite + basis set, which may produce "unintuitive" (but correct) results; see + Examples section below. + + Examples + -------- + >>> from numpy.polynomial.hermite import hermdiv + >>> hermdiv([ 52., 29., 52., 7., 6.], [0, 1, 2]) + (array([1., 2., 3.]), array([0.])) + >>> hermdiv([ 54., 31., 52., 7., 6.], [0, 1, 2]) + (array([1., 2., 3.]), array([2., 2.])) + >>> hermdiv([ 53., 30., 52., 7., 6.], [0, 1, 2]) + (array([1., 2., 3.]), array([1., 1.])) + + """ + return pu._div(hermmul, c1, c2) + + +def hermpow(c, pow, maxpower=16): + """Raise a Hermite series to a power. + + Returns the Hermite series `c` raised to the power `pow`. The + argument `c` is a sequence of coefficients ordered from low to high. + i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.`` + + Parameters + ---------- + c : array_like + 1-D array of Hermite series coefficients ordered from low to + high. + pow : integer + Power to which the series will be raised + maxpower : integer, optional + Maximum power allowed. This is mainly to limit growth of the series + to unmanageable size. Default is 16 + + Returns + ------- + coef : ndarray + Hermite series of power. + + See Also + -------- + hermadd, hermsub, hermmulx, hermmul, hermdiv + + Examples + -------- + >>> from numpy.polynomial.hermite import hermpow + >>> hermpow([1, 2, 3], 2) + array([81., 52., 82., 12., 9.]) + + """ + return pu._pow(hermmul, c, pow, maxpower) + + +def hermder(c, m=1, scl=1, axis=0): + """ + Differentiate a Hermite series. + + Returns the Hermite series coefficients `c` differentiated `m` times + along `axis`. At each iteration the result is multiplied by `scl` (the + scaling factor is for use in a linear change of variable). The argument + `c` is an array of coefficients from low to high degree along each + axis, e.g., [1,2,3] represents the series ``1*H_0 + 2*H_1 + 3*H_2`` + while [[1,2],[1,2]] represents ``1*H_0(x)*H_0(y) + 1*H_1(x)*H_0(y) + + 2*H_0(x)*H_1(y) + 2*H_1(x)*H_1(y)`` if axis=0 is ``x`` and axis=1 is + ``y``. + + Parameters + ---------- + c : array_like + Array of Hermite series coefficients. If `c` is multidimensional the + different axis correspond to different variables with the degree in + each axis given by the corresponding index. + m : int, optional + Number of derivatives taken, must be non-negative. (Default: 1) + scl : scalar, optional + Each differentiation is multiplied by `scl`. The end result is + multiplication by ``scl**m``. This is for use in a linear change of + variable. (Default: 1) + axis : int, optional + Axis over which the derivative is taken. (Default: 0). + + Returns + ------- + der : ndarray + Hermite series of the derivative. + + See Also + -------- + hermint + + Notes + ----- + In general, the result of differentiating a Hermite series does not + resemble the same operation on a power series. Thus the result of this + function may be "unintuitive," albeit correct; see Examples section + below. + + Examples + -------- + >>> from numpy.polynomial.hermite import hermder + >>> hermder([ 1. , 0.5, 0.5, 0.5]) + array([1., 2., 3.]) + >>> hermder([-0.5, 1./2., 1./8., 1./12., 1./16.], m=2) + array([1., 2., 3.]) + + """ + c = np.array(c, ndmin=1, copy=True) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + cnt = pu._as_int(m, "the order of derivation") + iaxis = pu._as_int(axis, "the axis") + if cnt < 0: + raise ValueError("The order of derivation must be non-negative") + iaxis = np.lib.array_utils.normalize_axis_index(iaxis, c.ndim) + + if cnt == 0: + return c + + c = np.moveaxis(c, iaxis, 0) + n = len(c) + if cnt >= n: + c = c[:1] * 0 + else: + for i in range(cnt): + n = n - 1 + c *= scl + der = np.empty((n,) + c.shape[1:], dtype=c.dtype) + for j in range(n, 0, -1): + der[j - 1] = (2 * j) * c[j] + c = der + c = np.moveaxis(c, 0, iaxis) + return c + + +def hermint(c, m=1, k=[], lbnd=0, scl=1, axis=0): + """ + Integrate a Hermite series. + + Returns the Hermite series coefficients `c` integrated `m` times from + `lbnd` along `axis`. At each iteration the resulting series is + **multiplied** by `scl` and an integration constant, `k`, is added. + The scaling factor is for use in a linear change of variable. ("Buyer + beware": note that, depending on what one is doing, one may want `scl` + to be the reciprocal of what one might expect; for more information, + see the Notes section below.) The argument `c` is an array of + coefficients from low to high degree along each axis, e.g., [1,2,3] + represents the series ``H_0 + 2*H_1 + 3*H_2`` while [[1,2],[1,2]] + represents ``1*H_0(x)*H_0(y) + 1*H_1(x)*H_0(y) + 2*H_0(x)*H_1(y) + + 2*H_1(x)*H_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``. + + Parameters + ---------- + c : array_like + Array of Hermite series coefficients. If c is multidimensional the + different axis correspond to different variables with the degree in + each axis given by the corresponding index. + m : int, optional + Order of integration, must be positive. (Default: 1) + k : {[], list, scalar}, optional + Integration constant(s). The value of the first integral at + ``lbnd`` is the first value in the list, the value of the second + integral at ``lbnd`` is the second value, etc. If ``k == []`` (the + default), all constants are set to zero. If ``m == 1``, a single + scalar can be given instead of a list. + lbnd : scalar, optional + The lower bound of the integral. (Default: 0) + scl : scalar, optional + Following each integration the result is *multiplied* by `scl` + before the integration constant is added. (Default: 1) + axis : int, optional + Axis over which the integral is taken. (Default: 0). + + Returns + ------- + S : ndarray + Hermite series coefficients of the integral. + + Raises + ------ + ValueError + If ``m < 0``, ``len(k) > m``, ``np.ndim(lbnd) != 0``, or + ``np.ndim(scl) != 0``. + + See Also + -------- + hermder + + Notes + ----- + Note that the result of each integration is *multiplied* by `scl`. + Why is this important to note? Say one is making a linear change of + variable :math:`u = ax + b` in an integral relative to `x`. Then + :math:`dx = du/a`, so one will need to set `scl` equal to + :math:`1/a` - perhaps not what one would have first thought. + + Also note that, in general, the result of integrating a C-series needs + to be "reprojected" onto the C-series basis set. Thus, typically, + the result of this function is "unintuitive," albeit correct; see + Examples section below. + + Examples + -------- + >>> from numpy.polynomial.hermite import hermint + >>> hermint([1,2,3]) # integrate once, value 0 at 0. + array([1. , 0.5, 0.5, 0.5]) + >>> hermint([1,2,3], m=2) # integrate twice, value & deriv 0 at 0 + array([-0.5 , 0.5 , 0.125 , 0.08333333, 0.0625 ]) # may vary + >>> hermint([1,2,3], k=1) # integrate once, value 1 at 0. + array([2. , 0.5, 0.5, 0.5]) + >>> hermint([1,2,3], lbnd=-1) # integrate once, value 0 at -1 + array([-2. , 0.5, 0.5, 0.5]) + >>> hermint([1,2,3], m=2, k=[1,2], lbnd=-1) + array([ 1.66666667, -0.5 , 0.125 , 0.08333333, 0.0625 ]) # may vary + + """ + c = np.array(c, ndmin=1, copy=True) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + if not np.iterable(k): + k = [k] + cnt = pu._as_int(m, "the order of integration") + iaxis = pu._as_int(axis, "the axis") + if cnt < 0: + raise ValueError("The order of integration must be non-negative") + if len(k) > cnt: + raise ValueError("Too many integration constants") + if np.ndim(lbnd) != 0: + raise ValueError("lbnd must be a scalar.") + if np.ndim(scl) != 0: + raise ValueError("scl must be a scalar.") + iaxis = np.lib.array_utils.normalize_axis_index(iaxis, c.ndim) + + if cnt == 0: + return c + + c = np.moveaxis(c, iaxis, 0) + k = list(k) + [0] * (cnt - len(k)) + for i in range(cnt): + n = len(c) + c *= scl + if n == 1 and np.all(c[0] == 0): + c[0] += k[i] + else: + tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype) + tmp[0] = c[0] * 0 + tmp[1] = c[0] / 2 + for j in range(1, n): + tmp[j + 1] = c[j] / (2 * (j + 1)) + tmp[0] += k[i] - hermval(lbnd, tmp) + c = tmp + c = np.moveaxis(c, 0, iaxis) + return c + + +def hermval(x, c, tensor=True): + """ + Evaluate a Hermite series at points x. + + If `c` is of length ``n + 1``, this function returns the value: + + .. math:: p(x) = c_0 * H_0(x) + c_1 * H_1(x) + ... + c_n * H_n(x) + + The parameter `x` is converted to an array only if it is a tuple or a + list, otherwise it is treated as a scalar. In either case, either `x` + or its elements must support multiplication and addition both with + themselves and with the elements of `c`. + + If `c` is a 1-D array, then ``p(x)`` will have the same shape as `x`. If + `c` is multidimensional, then the shape of the result depends on the + value of `tensor`. If `tensor` is true the shape will be c.shape[1:] + + x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that + scalars have shape (,). + + Trailing zeros in the coefficients will be used in the evaluation, so + they should be avoided if efficiency is a concern. + + Parameters + ---------- + x : array_like, compatible object + If `x` is a list or tuple, it is converted to an ndarray, otherwise + it is left unchanged and treated as a scalar. In either case, `x` + or its elements must support addition and multiplication with + themselves and with the elements of `c`. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree n are contained in c[n]. If `c` is multidimensional the + remaining indices enumerate multiple polynomials. In the two + dimensional case the coefficients may be thought of as stored in + the columns of `c`. + tensor : boolean, optional + If True, the shape of the coefficient array is extended with ones + on the right, one for each dimension of `x`. Scalars have dimension 0 + for this action. The result is that every column of coefficients in + `c` is evaluated for every element of `x`. If False, `x` is broadcast + over the columns of `c` for the evaluation. This keyword is useful + when `c` is multidimensional. The default value is True. + + Returns + ------- + values : ndarray, algebra_like + The shape of the return value is described above. + + See Also + -------- + hermval2d, hermgrid2d, hermval3d, hermgrid3d + + Notes + ----- + The evaluation uses Clenshaw recursion, aka synthetic division. + + Examples + -------- + >>> from numpy.polynomial.hermite import hermval + >>> coef = [1,2,3] + >>> hermval(1, coef) + 11.0 + >>> hermval([[1,2],[3,4]], coef) + array([[ 11., 51.], + [115., 203.]]) + + """ + c = np.array(c, ndmin=1, copy=None) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + if isinstance(x, (tuple, list)): + x = np.asarray(x) + if isinstance(x, np.ndarray) and tensor: + c = c.reshape(c.shape + (1,) * x.ndim) + + x2 = x * 2 + if len(c) == 1: + c0 = c[0] + c1 = 0 + elif len(c) == 2: + c0 = c[0] + c1 = c[1] + else: + nd = len(c) + c0 = c[-2] + c1 = c[-1] + for i in range(3, len(c) + 1): + tmp = c0 + nd = nd - 1 + c0 = c[-i] - c1 * (2 * (nd - 1)) + c1 = tmp + c1 * x2 + return c0 + c1 * x2 + + +def hermval2d(x, y, c): + """ + Evaluate a 2-D Hermite series at points (x, y). + + This function returns the values: + + .. math:: p(x,y) = \\sum_{i,j} c_{i,j} * H_i(x) * H_j(y) + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars and they + must have the same shape after conversion. In either case, either `x` + and `y` or their elements must support multiplication and addition both + with themselves and with the elements of `c`. + + If `c` is a 1-D array a one is implicitly appended to its shape to make + it 2-D. The shape of the result will be c.shape[2:] + x.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points ``(x, y)``, + where `x` and `y` must have the same shape. If `x` or `y` is a list + or tuple, it is first converted to an ndarray, otherwise it is left + unchanged and if it isn't an ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term + of multi-degree i,j is contained in ``c[i,j]``. If `c` has + dimension greater than two the remaining indices enumerate multiple + sets of coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points formed with + pairs of corresponding values from `x` and `y`. + + See Also + -------- + hermval, hermgrid2d, hermval3d, hermgrid3d + + Examples + -------- + >>> from numpy.polynomial.hermite import hermval2d + >>> x = [1, 2] + >>> y = [4, 5] + >>> c = [[1, 2, 3], [4, 5, 6]] + >>> hermval2d(x, y, c) + array([1035., 2883.]) + + """ + return pu._valnd(hermval, c, x, y) + + +def hermgrid2d(x, y, c): + """ + Evaluate a 2-D Hermite series on the Cartesian product of x and y. + + This function returns the values: + + .. math:: p(a,b) = \\sum_{i,j} c_{i,j} * H_i(a) * H_j(b) + + where the points ``(a, b)`` consist of all pairs formed by taking + `a` from `x` and `b` from `y`. The resulting points form a grid with + `x` in the first dimension and `y` in the second. + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars. In either + case, either `x` and `y` or their elements must support multiplication + and addition both with themselves and with the elements of `c`. + + If `c` has fewer than two dimensions, ones are implicitly appended to + its shape to make it 2-D. The shape of the result will be c.shape[2:] + + x.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points in the + Cartesian product of `x` and `y`. If `x` or `y` is a list or + tuple, it is first converted to an ndarray, otherwise it is left + unchanged and, if it isn't an ndarray, it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree i,j are contained in ``c[i,j]``. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points in the Cartesian + product of `x` and `y`. + + See Also + -------- + hermval, hermval2d, hermval3d, hermgrid3d + + Examples + -------- + >>> from numpy.polynomial.hermite import hermgrid2d + >>> x = [1, 2, 3] + >>> y = [4, 5] + >>> c = [[1, 2, 3], [4, 5, 6]] + >>> hermgrid2d(x, y, c) + array([[1035., 1599.], + [1867., 2883.], + [2699., 4167.]]) + + """ + return pu._gridnd(hermval, c, x, y) + + +def hermval3d(x, y, z, c): + """ + Evaluate a 3-D Hermite series at points (x, y, z). + + This function returns the values: + + .. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * H_i(x) * H_j(y) * H_k(z) + + The parameters `x`, `y`, and `z` are converted to arrays only if + they are tuples or a lists, otherwise they are treated as a scalars and + they must have the same shape after conversion. In either case, either + `x`, `y`, and `z` or their elements must support multiplication and + addition both with themselves and with the elements of `c`. + + If `c` has fewer than 3 dimensions, ones are implicitly appended to its + shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape. + + Parameters + ---------- + x, y, z : array_like, compatible object + The three dimensional series is evaluated at the points + ``(x, y, z)``, where `x`, `y`, and `z` must have the same shape. If + any of `x`, `y`, or `z` is a list or tuple, it is first converted + to an ndarray, otherwise it is left unchanged and if it isn't an + ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term of + multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension + greater than 3 the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the multidimensional polynomial on points formed with + triples of corresponding values from `x`, `y`, and `z`. + + See Also + -------- + hermval, hermval2d, hermgrid2d, hermgrid3d + + Examples + -------- + >>> from numpy.polynomial.hermite import hermval3d + >>> x = [1, 2] + >>> y = [4, 5] + >>> z = [6, 7] + >>> c = [[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]] + >>> hermval3d(x, y, z, c) + array([ 40077., 120131.]) + + """ + return pu._valnd(hermval, c, x, y, z) + + +def hermgrid3d(x, y, z, c): + """ + Evaluate a 3-D Hermite series on the Cartesian product of x, y, and z. + + This function returns the values: + + .. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * H_i(a) * H_j(b) * H_k(c) + + where the points ``(a, b, c)`` consist of all triples formed by taking + `a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form + a grid with `x` in the first dimension, `y` in the second, and `z` in + the third. + + The parameters `x`, `y`, and `z` are converted to arrays only if they + are tuples or a lists, otherwise they are treated as a scalars. In + either case, either `x`, `y`, and `z` or their elements must support + multiplication and addition both with themselves and with the elements + of `c`. + + If `c` has fewer than three dimensions, ones are implicitly appended to + its shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape + y.shape + z.shape. + + Parameters + ---------- + x, y, z : array_like, compatible objects + The three dimensional series is evaluated at the points in the + Cartesian product of `x`, `y`, and `z`. If `x`, `y`, or `z` is a + list or tuple, it is first converted to an ndarray, otherwise it is + left unchanged and, if it isn't an ndarray, it is treated as a + scalar. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree i,j are contained in ``c[i,j]``. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points in the Cartesian + product of `x` and `y`. + + See Also + -------- + hermval, hermval2d, hermgrid2d, hermval3d + + Examples + -------- + >>> from numpy.polynomial.hermite import hermgrid3d + >>> x = [1, 2] + >>> y = [4, 5] + >>> z = [6, 7] + >>> c = [[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]] + >>> hermgrid3d(x, y, z, c) + array([[[ 40077., 54117.], + [ 49293., 66561.]], + [[ 72375., 97719.], + [ 88975., 120131.]]]) + + """ + return pu._gridnd(hermval, c, x, y, z) + + +def hermvander(x, deg): + """Pseudo-Vandermonde matrix of given degree. + + Returns the pseudo-Vandermonde matrix of degree `deg` and sample points + `x`. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., i] = H_i(x), + + where ``0 <= i <= deg``. The leading indices of `V` index the elements of + `x` and the last index is the degree of the Hermite polynomial. + + If `c` is a 1-D array of coefficients of length ``n + 1`` and `V` is the + array ``V = hermvander(x, n)``, then ``np.dot(V, c)`` and + ``hermval(x, c)`` are the same up to roundoff. This equivalence is + useful both for least squares fitting and for the evaluation of a large + number of Hermite series of the same degree and sample points. + + Parameters + ---------- + x : array_like + Array of points. The dtype is converted to float64 or complex128 + depending on whether any of the elements are complex. If `x` is + scalar it is converted to a 1-D array. + deg : int + Degree of the resulting matrix. + + Returns + ------- + vander : ndarray + The pseudo-Vandermonde matrix. The shape of the returned matrix is + ``x.shape + (deg + 1,)``, where The last index is the degree of the + corresponding Hermite polynomial. The dtype will be the same as + the converted `x`. + + Examples + -------- + >>> import numpy as np + >>> from numpy.polynomial.hermite import hermvander + >>> x = np.array([-1, 0, 1]) + >>> hermvander(x, 3) + array([[ 1., -2., 2., 4.], + [ 1., 0., -2., -0.], + [ 1., 2., 2., -4.]]) + + """ + ideg = pu._as_int(deg, "deg") + if ideg < 0: + raise ValueError("deg must be non-negative") + + x = np.array(x, copy=None, ndmin=1) + 0.0 + dims = (ideg + 1,) + x.shape + dtyp = x.dtype + v = np.empty(dims, dtype=dtyp) + v[0] = x * 0 + 1 + if ideg > 0: + x2 = x * 2 + v[1] = x2 + for i in range(2, ideg + 1): + v[i] = (v[i - 1] * x2 - v[i - 2] * (2 * (i - 1))) + return np.moveaxis(v, 0, -1) + + +def hermvander2d(x, y, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points ``(x, y)``. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (deg[1] + 1)*i + j] = H_i(x) * H_j(y), + + where ``0 <= i <= deg[0]`` and ``0 <= j <= deg[1]``. The leading indices of + `V` index the points ``(x, y)`` and the last index encodes the degrees of + the Hermite polynomials. + + If ``V = hermvander2d(x, y, [xdeg, ydeg])``, then the columns of `V` + correspond to the elements of a 2-D coefficient array `c` of shape + (xdeg + 1, ydeg + 1) in the order + + .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ... + + and ``np.dot(V, c.flat)`` and ``hermval2d(x, y, c)`` will be the same + up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 2-D Hermite + series of the same degrees and sample points. + + Parameters + ---------- + x, y : array_like + Arrays of point coordinates, all of the same shape. The dtypes + will be converted to either float64 or complex128 depending on + whether any of the elements are complex. Scalars are converted to 1-D + arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg]. + + Returns + ------- + vander2d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg[1]+1)`. The dtype will be the same + as the converted `x` and `y`. + + See Also + -------- + hermvander, hermvander3d, hermval2d, hermval3d + + Examples + -------- + >>> import numpy as np + >>> from numpy.polynomial.hermite import hermvander2d + >>> x = np.array([-1, 0, 1]) + >>> y = np.array([-1, 0, 1]) + >>> hermvander2d(x, y, [2, 2]) + array([[ 1., -2., 2., -2., 4., -4., 2., -4., 4.], + [ 1., 0., -2., 0., 0., -0., -2., -0., 4.], + [ 1., 2., 2., 2., 4., 4., 2., 4., 4.]]) + + """ + return pu._vander_nd_flat((hermvander, hermvander), (x, y), deg) + + +def hermvander3d(x, y, z, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points ``(x, y, z)``. If `l`, `m`, `n` are the given degrees in `x`, `y`, `z`, + then The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = H_i(x)*H_j(y)*H_k(z), + + where ``0 <= i <= l``, ``0 <= j <= m``, and ``0 <= j <= n``. The leading + indices of `V` index the points ``(x, y, z)`` and the last index encodes + the degrees of the Hermite polynomials. + + If ``V = hermvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns + of `V` correspond to the elements of a 3-D coefficient array `c` of + shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order + + .. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},... + + and ``np.dot(V, c.flat)`` and ``hermval3d(x, y, z, c)`` will be the + same up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 3-D Hermite + series of the same degrees and sample points. + + Parameters + ---------- + x, y, z : array_like + Arrays of point coordinates, all of the same shape. The dtypes will + be converted to either float64 or complex128 depending on whether + any of the elements are complex. Scalars are converted to 1-D + arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg, z_deg]. + + Returns + ------- + vander3d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg[1]+1)*(deg[2]+1)`. The dtype will + be the same as the converted `x`, `y`, and `z`. + + See Also + -------- + hermvander, hermvander3d, hermval2d, hermval3d + + Examples + -------- + >>> from numpy.polynomial.hermite import hermvander3d + >>> x = np.array([-1, 0, 1]) + >>> y = np.array([-1, 0, 1]) + >>> z = np.array([-1, 0, 1]) + >>> hermvander3d(x, y, z, [0, 1, 2]) + array([[ 1., -2., 2., -2., 4., -4.], + [ 1., 0., -2., 0., 0., -0.], + [ 1., 2., 2., 2., 4., 4.]]) + + """ + return pu._vander_nd_flat((hermvander, hermvander, hermvander), (x, y, z), deg) + + +def hermfit(x, y, deg, rcond=None, full=False, w=None): + """ + Least squares fit of Hermite series to data. + + Return the coefficients of a Hermite series of degree `deg` that is the + least squares fit to the data values `y` given at points `x`. If `y` is + 1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple + fits are done, one for each column of `y`, and the resulting + coefficients are stored in the corresponding columns of a 2-D return. + The fitted polynomial(s) are in the form + + .. math:: p(x) = c_0 + c_1 * H_1(x) + ... + c_n * H_n(x), + + where `n` is `deg`. + + Parameters + ---------- + x : array_like, shape (M,) + x-coordinates of the M sample points ``(x[i], y[i])``. + y : array_like, shape (M,) or (M, K) + y-coordinates of the sample points. Several data sets of sample + points sharing the same x-coordinates can be fitted at once by + passing in a 2D-array that contains one dataset per column. + deg : int or 1-D array_like + Degree(s) of the fitting polynomials. If `deg` is a single integer + all terms up to and including the `deg`'th term are included in the + fit. For NumPy versions >= 1.11.0 a list of integers specifying the + degrees of the terms to include may be used instead. + rcond : float, optional + Relative condition number of the fit. Singular values smaller than + this relative to the largest singular value will be ignored. The + default value is len(x)*eps, where eps is the relative precision of + the float type, about 2e-16 in most cases. + full : bool, optional + Switch determining nature of return value. When it is False (the + default) just the coefficients are returned, when True diagnostic + information from the singular value decomposition is also returned. + w : array_like, shape (`M`,), optional + Weights. If not None, the weight ``w[i]`` applies to the unsquared + residual ``y[i] - y_hat[i]`` at ``x[i]``. Ideally the weights are + chosen so that the errors of the products ``w[i]*y[i]`` all have the + same variance. When using inverse-variance weighting, use + ``w[i] = 1/sigma(y[i])``. The default value is None. + + Returns + ------- + coef : ndarray, shape (M,) or (M, K) + Hermite coefficients ordered from low to high. If `y` was 2-D, + the coefficients for the data in column k of `y` are in column + `k`. + + [residuals, rank, singular_values, rcond] : list + These values are only returned if ``full == True`` + + - residuals -- sum of squared residuals of the least squares fit + - rank -- the numerical rank of the scaled Vandermonde matrix + - singular_values -- singular values of the scaled Vandermonde matrix + - rcond -- value of `rcond`. + + For more details, see `numpy.linalg.lstsq`. + + Warns + ----- + RankWarning + The rank of the coefficient matrix in the least-squares fit is + deficient. The warning is only raised if ``full == False``. The + warnings can be turned off by + + >>> import warnings + >>> warnings.simplefilter('ignore', np.exceptions.RankWarning) + + See Also + -------- + numpy.polynomial.chebyshev.chebfit + numpy.polynomial.legendre.legfit + numpy.polynomial.laguerre.lagfit + numpy.polynomial.polynomial.polyfit + numpy.polynomial.hermite_e.hermefit + hermval : Evaluates a Hermite series. + hermvander : Vandermonde matrix of Hermite series. + hermweight : Hermite weight function + numpy.linalg.lstsq : Computes a least-squares fit from the matrix. + scipy.interpolate.UnivariateSpline : Computes spline fits. + + Notes + ----- + The solution is the coefficients of the Hermite series `p` that + minimizes the sum of the weighted squared errors + + .. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2, + + where the :math:`w_j` are the weights. This problem is solved by + setting up the (typically) overdetermined matrix equation + + .. math:: V(x) * c = w * y, + + where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the + coefficients to be solved for, `w` are the weights, `y` are the + observed values. This equation is then solved using the singular value + decomposition of `V`. + + If some of the singular values of `V` are so small that they are + neglected, then a `~exceptions.RankWarning` will be issued. This means that + the coefficient values may be poorly determined. Using a lower order fit + will usually get rid of the warning. The `rcond` parameter can also be + set to a value smaller than its default, but the resulting fit may be + spurious and have large contributions from roundoff error. + + Fits using Hermite series are probably most useful when the data can be + approximated by ``sqrt(w(x)) * p(x)``, where ``w(x)`` is the Hermite + weight. In that case the weight ``sqrt(w(x[i]))`` should be used + together with data values ``y[i]/sqrt(w(x[i]))``. The weight function is + available as `hermweight`. + + References + ---------- + .. [1] Wikipedia, "Curve fitting", + https://en.wikipedia.org/wiki/Curve_fitting + + Examples + -------- + >>> import numpy as np + >>> from numpy.polynomial.hermite import hermfit, hermval + >>> x = np.linspace(-10, 10) + >>> rng = np.random.default_rng() + >>> err = rng.normal(scale=1./10, size=len(x)) + >>> y = hermval(x, [1, 2, 3]) + err + >>> hermfit(x, y, 2) + array([1.02294967, 2.00016403, 2.99994614]) # may vary + + """ + return pu._fit(hermvander, x, y, deg, rcond, full, w) + + +def hermcompanion(c): + """Return the scaled companion matrix of c. + + The basis polynomials are scaled so that the companion matrix is + symmetric when `c` is a Hermite basis polynomial. This provides + better eigenvalue estimates than the unscaled case and for basis + polynomials the eigenvalues are guaranteed to be real if + `numpy.linalg.eigvalsh` is used to obtain them. + + Parameters + ---------- + c : array_like + 1-D array of Hermite series coefficients ordered from low to high + degree. + + Returns + ------- + mat : ndarray + Scaled companion matrix of dimensions (deg, deg). + + Examples + -------- + >>> from numpy.polynomial.hermite import hermcompanion + >>> hermcompanion([1, 0, 1]) + array([[0. , 0.35355339], + [0.70710678, 0. ]]) + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) < 2: + raise ValueError('Series must have maximum degree of at least 1.') + if len(c) == 2: + return np.array([[-.5 * c[0] / c[1]]]) + + n = len(c) - 1 + mat = np.zeros((n, n), dtype=c.dtype) + scl = np.hstack((1., 1. / np.sqrt(2. * np.arange(n - 1, 0, -1)))) + scl = np.multiply.accumulate(scl)[::-1] + top = mat.reshape(-1)[1::n + 1] + bot = mat.reshape(-1)[n::n + 1] + top[...] = np.sqrt(.5 * np.arange(1, n)) + bot[...] = top + mat[:, -1] -= scl * c[:-1] / (2.0 * c[-1]) + return mat + + +def hermroots(c): + """ + Compute the roots of a Hermite series. + + Return the roots (a.k.a. "zeros") of the polynomial + + .. math:: p(x) = \\sum_i c[i] * H_i(x). + + Parameters + ---------- + c : 1-D array_like + 1-D array of coefficients. + + Returns + ------- + out : ndarray + Array of the roots of the series. If all the roots are real, + then `out` is also real, otherwise it is complex. + + See Also + -------- + numpy.polynomial.polynomial.polyroots + numpy.polynomial.legendre.legroots + numpy.polynomial.laguerre.lagroots + numpy.polynomial.chebyshev.chebroots + numpy.polynomial.hermite_e.hermeroots + + Notes + ----- + The root estimates are obtained as the eigenvalues of the companion + matrix, Roots far from the origin of the complex plane may have large + errors due to the numerical instability of the series for such + values. Roots with multiplicity greater than 1 will also show larger + errors as the value of the series near such points is relatively + insensitive to errors in the roots. Isolated roots near the origin can + be improved by a few iterations of Newton's method. + + The Hermite series basis polynomials aren't powers of `x` so the + results of this function may seem unintuitive. + + Examples + -------- + >>> from numpy.polynomial.hermite import hermroots, hermfromroots + >>> coef = hermfromroots([-1, 0, 1]) + >>> coef + array([0. , 0.25 , 0. , 0.125]) + >>> hermroots(coef) + array([-1.00000000e+00, -1.38777878e-17, 1.00000000e+00]) + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) <= 1: + return np.array([], dtype=c.dtype) + if len(c) == 2: + return np.array([-.5 * c[0] / c[1]]) + + # rotated companion matrix reduces error + m = hermcompanion(c)[::-1, ::-1] + r = np.linalg.eigvals(m) + r.sort() + return r + + +def _normed_hermite_n(x, n): + """ + Evaluate a normalized Hermite polynomial. + + Compute the value of the normalized Hermite polynomial of degree ``n`` + at the points ``x``. + + + Parameters + ---------- + x : ndarray of double. + Points at which to evaluate the function + n : int + Degree of the normalized Hermite function to be evaluated. + + Returns + ------- + values : ndarray + The shape of the return value is described above. + + Notes + ----- + This function is needed for finding the Gauss points and integration + weights for high degrees. The values of the standard Hermite functions + overflow when n >= 207. + + """ + if n == 0: + return np.full(x.shape, 1 / np.sqrt(np.sqrt(np.pi))) + + c0 = 0. + c1 = 1. / np.sqrt(np.sqrt(np.pi)) + nd = float(n) + for i in range(n - 1): + tmp = c0 + c0 = -c1 * np.sqrt((nd - 1.) / nd) + c1 = tmp + c1 * x * np.sqrt(2. / nd) + nd = nd - 1.0 + return c0 + c1 * x * np.sqrt(2) + + +def hermgauss(deg): + """ + Gauss-Hermite quadrature. + + Computes the sample points and weights for Gauss-Hermite quadrature. + These sample points and weights will correctly integrate polynomials of + degree :math:`2*deg - 1` or less over the interval :math:`[-\\inf, \\inf]` + with the weight function :math:`f(x) = \\exp(-x^2)`. + + Parameters + ---------- + deg : int + Number of sample points and weights. It must be >= 1. + + Returns + ------- + x : ndarray + 1-D ndarray containing the sample points. + y : ndarray + 1-D ndarray containing the weights. + + Notes + ----- + The results have only been tested up to degree 100, higher degrees may + be problematic. The weights are determined by using the fact that + + .. math:: w_k = c / (H'_n(x_k) * H_{n-1}(x_k)) + + where :math:`c` is a constant independent of :math:`k` and :math:`x_k` + is the k'th root of :math:`H_n`, and then scaling the results to get + the right value when integrating 1. + + Examples + -------- + >>> from numpy.polynomial.hermite import hermgauss + >>> hermgauss(2) + (array([-0.70710678, 0.70710678]), array([0.88622693, 0.88622693])) + + """ + ideg = pu._as_int(deg, "deg") + if ideg <= 0: + raise ValueError("deg must be a positive integer") + + # first approximation of roots. We use the fact that the companion + # matrix is symmetric in this case in order to obtain better zeros. + c = np.array([0] * deg + [1], dtype=np.float64) + m = hermcompanion(c) + x = np.linalg.eigvalsh(m) + + # improve roots by one application of Newton + dy = _normed_hermite_n(x, ideg) + df = _normed_hermite_n(x, ideg - 1) * np.sqrt(2 * ideg) + x -= dy / df + + # compute the weights. We scale the factor to avoid possible numerical + # overflow. + fm = _normed_hermite_n(x, ideg - 1) + fm /= np.abs(fm).max() + w = 1 / (fm * fm) + + # for Hermite we can also symmetrize + w = (w + w[::-1]) / 2 + x = (x - x[::-1]) / 2 + + # scale w to get the right value + w *= np.sqrt(np.pi) / w.sum() + + return x, w + + +def hermweight(x): + """ + Weight function of the Hermite polynomials. + + The weight function is :math:`\\exp(-x^2)` and the interval of + integration is :math:`[-\\inf, \\inf]`. the Hermite polynomials are + orthogonal, but not normalized, with respect to this weight function. + + Parameters + ---------- + x : array_like + Values at which the weight function will be computed. + + Returns + ------- + w : ndarray + The weight function at `x`. + + Examples + -------- + >>> import numpy as np + >>> from numpy.polynomial.hermite import hermweight + >>> x = np.arange(-2, 2) + >>> hermweight(x) + array([0.01831564, 0.36787944, 1. , 0.36787944]) + + """ + w = np.exp(-x**2) + return w + + +# +# Hermite series class +# + +class Hermite(ABCPolyBase): + """A Hermite series class. + + The Hermite class provides the standard Python numerical methods + '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the + attributes and methods listed below. + + Parameters + ---------- + coef : array_like + Hermite coefficients in order of increasing degree, i.e, + ``(1, 2, 3)`` gives ``1*H_0(x) + 2*H_1(x) + 3*H_2(x)``. + domain : (2,) array_like, optional + Domain to use. The interval ``[domain[0], domain[1]]`` is mapped + to the interval ``[window[0], window[1]]`` by shifting and scaling. + The default value is [-1., 1.]. + window : (2,) array_like, optional + Window, see `domain` for its use. The default value is [-1., 1.]. + symbol : str, optional + Symbol used to represent the independent variable in string + representations of the polynomial expression, e.g. for printing. + The symbol must be a valid Python identifier. Default value is 'x'. + + .. versionadded:: 1.24 + + """ + # Virtual Functions + _add = staticmethod(hermadd) + _sub = staticmethod(hermsub) + _mul = staticmethod(hermmul) + _div = staticmethod(hermdiv) + _pow = staticmethod(hermpow) + _val = staticmethod(hermval) + _int = staticmethod(hermint) + _der = staticmethod(hermder) + _fit = staticmethod(hermfit) + _line = staticmethod(hermline) + _roots = staticmethod(hermroots) + _fromroots = staticmethod(hermfromroots) + + # Virtual properties + domain = np.array(hermdomain) + window = np.array(hermdomain) + basis_name = 'H' diff --git a/local_packages/numpy/polynomial/hermite.pyi b/local_packages/numpy/polynomial/hermite.pyi new file mode 100644 index 0000000..04b8238 --- /dev/null +++ b/local_packages/numpy/polynomial/hermite.pyi @@ -0,0 +1,106 @@ +from typing import Any, ClassVar, Final, Literal as L, TypeVar + +import numpy as np +from numpy._typing import _Shape + +from ._polybase import ABCPolyBase +from ._polytypes import ( + _Array1, + _Array2, + _FuncBinOp, + _FuncCompanion, + _FuncDer, + _FuncFit, + _FuncFromRoots, + _FuncGauss, + _FuncInteg, + _FuncLine, + _FuncPoly2Ortho, + _FuncPow, + _FuncRoots, + _FuncUnOp, + _FuncVal, + _FuncVal2D, + _FuncVal3D, + _FuncVander, + _FuncVander2D, + _FuncVander3D, + _FuncWeight, +) +from .polyutils import trimcoef as hermtrim + +__all__ = [ + "hermzero", + "hermone", + "hermx", + "hermdomain", + "hermline", + "hermadd", + "hermsub", + "hermmulx", + "hermmul", + "hermdiv", + "hermpow", + "hermval", + "hermder", + "hermint", + "herm2poly", + "poly2herm", + "hermfromroots", + "hermvander", + "hermfit", + "hermtrim", + "hermroots", + "Hermite", + "hermval2d", + "hermval3d", + "hermgrid2d", + "hermgrid3d", + "hermvander2d", + "hermvander3d", + "hermcompanion", + "hermgauss", + "hermweight", +] + +_ShapeT = TypeVar("_ShapeT", bound=_Shape) + +poly2herm: Final[_FuncPoly2Ortho] = ... +herm2poly: Final[_FuncUnOp] = ... + +hermdomain: Final[_Array2[np.float64]] = ... +hermzero: Final[_Array1[np.int_]] = ... +hermone: Final[_Array1[np.int_]] = ... +hermx: Final[_Array2[np.int_]] = ... + +hermline: Final[_FuncLine] = ... +hermfromroots: Final[_FuncFromRoots] = ... +hermadd: Final[_FuncBinOp] = ... +hermsub: Final[_FuncBinOp] = ... +hermmulx: Final[_FuncUnOp] = ... +hermmul: Final[_FuncBinOp] = ... +hermdiv: Final[_FuncBinOp] = ... +hermpow: Final[_FuncPow] = ... +hermder: Final[_FuncDer] = ... +hermint: Final[_FuncInteg] = ... +hermval: Final[_FuncVal] = ... +hermval2d: Final[_FuncVal2D] = ... +hermval3d: Final[_FuncVal3D] = ... +hermgrid2d: Final[_FuncVal2D] = ... +hermgrid3d: Final[_FuncVal3D] = ... +hermvander: Final[_FuncVander] = ... +hermvander2d: Final[_FuncVander2D] = ... +hermvander3d: Final[_FuncVander3D] = ... +hermfit: Final[_FuncFit] = ... +hermcompanion: Final[_FuncCompanion] = ... +hermroots: Final[_FuncRoots] = ... + +def _normed_hermite_n(x: np.ndarray[_ShapeT, np.dtype[np.float64]], n: int) -> np.ndarray[_ShapeT, np.dtype[np.float64]]: ... + +hermgauss: Final[_FuncGauss] = ... +hermweight: Final[_FuncWeight] = ... + +class Hermite(ABCPolyBase[L["H"]]): + basis_name: ClassVar[L["H"]] = "H" # pyright: ignore[reportIncompatibleMethodOverride] + domain: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] + window: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] diff --git a/local_packages/numpy/polynomial/hermite_e.py b/local_packages/numpy/polynomial/hermite_e.py new file mode 100644 index 0000000..f5d82aa --- /dev/null +++ b/local_packages/numpy/polynomial/hermite_e.py @@ -0,0 +1,1640 @@ +""" +=================================================================== +HermiteE Series, "Probabilists" (:mod:`numpy.polynomial.hermite_e`) +=================================================================== + +This module provides a number of objects (mostly functions) useful for +dealing with Hermite_e series, including a `HermiteE` class that +encapsulates the usual arithmetic operations. (General information +on how this module represents and works with such polynomials is in the +docstring for its "parent" sub-package, `numpy.polynomial`). + +Classes +------- +.. autosummary:: + :toctree: generated/ + + HermiteE + +Constants +--------- +.. autosummary:: + :toctree: generated/ + + hermedomain + hermezero + hermeone + hermex + +Arithmetic +---------- +.. autosummary:: + :toctree: generated/ + + hermeadd + hermesub + hermemulx + hermemul + hermediv + hermepow + hermeval + hermeval2d + hermeval3d + hermegrid2d + hermegrid3d + +Calculus +-------- +.. autosummary:: + :toctree: generated/ + + hermeder + hermeint + +Misc Functions +-------------- +.. autosummary:: + :toctree: generated/ + + hermefromroots + hermeroots + hermevander + hermevander2d + hermevander3d + hermegauss + hermeweight + hermecompanion + hermefit + hermetrim + hermeline + herme2poly + poly2herme + +See also +-------- +`numpy.polynomial` + +""" +import numpy as np + +from . import polyutils as pu +from ._polybase import ABCPolyBase + +__all__ = [ + 'hermezero', 'hermeone', 'hermex', 'hermedomain', 'hermeline', + 'hermeadd', 'hermesub', 'hermemulx', 'hermemul', 'hermediv', + 'hermepow', 'hermeval', 'hermeder', 'hermeint', 'herme2poly', + 'poly2herme', 'hermefromroots', 'hermevander', 'hermefit', 'hermetrim', + 'hermeroots', 'HermiteE', 'hermeval2d', 'hermeval3d', 'hermegrid2d', + 'hermegrid3d', 'hermevander2d', 'hermevander3d', 'hermecompanion', + 'hermegauss', 'hermeweight'] + +hermetrim = pu.trimcoef + + +def poly2herme(pol): + """ + poly2herme(pol) + + Convert a polynomial to a Hermite series. + + Convert an array representing the coefficients of a polynomial (relative + to the "standard" basis) ordered from lowest degree to highest, to an + array of the coefficients of the equivalent Hermite series, ordered + from lowest to highest degree. + + Parameters + ---------- + pol : array_like + 1-D array containing the polynomial coefficients + + Returns + ------- + c : ndarray + 1-D array containing the coefficients of the equivalent Hermite + series. + + See Also + -------- + herme2poly + + Notes + ----- + The easy way to do conversions between polynomial basis sets + is to use the convert method of a class instance. + + Examples + -------- + >>> import numpy as np + >>> from numpy.polynomial.hermite_e import poly2herme + >>> poly2herme(np.arange(4)) + array([ 2., 10., 2., 3.]) + + """ + [pol] = pu.as_series([pol]) + deg = len(pol) - 1 + res = 0 + for i in range(deg, -1, -1): + res = hermeadd(hermemulx(res), pol[i]) + return res + + +def herme2poly(c): + """ + Convert a Hermite series to a polynomial. + + Convert an array representing the coefficients of a Hermite series, + ordered from lowest degree to highest, to an array of the coefficients + of the equivalent polynomial (relative to the "standard" basis) ordered + from lowest to highest degree. + + Parameters + ---------- + c : array_like + 1-D array containing the Hermite series coefficients, ordered + from lowest order term to highest. + + Returns + ------- + pol : ndarray + 1-D array containing the coefficients of the equivalent polynomial + (relative to the "standard" basis) ordered from lowest order term + to highest. + + See Also + -------- + poly2herme + + Notes + ----- + The easy way to do conversions between polynomial basis sets + is to use the convert method of a class instance. + + Examples + -------- + >>> from numpy.polynomial.hermite_e import herme2poly + >>> herme2poly([ 2., 10., 2., 3.]) + array([0., 1., 2., 3.]) + + """ + from .polynomial import polyadd, polymulx, polysub + + [c] = pu.as_series([c]) + n = len(c) + if n == 1: + return c + if n == 2: + return c + else: + c0 = c[-2] + c1 = c[-1] + # i is the current degree of c1 + for i in range(n - 1, 1, -1): + tmp = c0 + c0 = polysub(c[i - 2], c1 * (i - 1)) + c1 = polyadd(tmp, polymulx(c1)) + return polyadd(c0, polymulx(c1)) + + +# +# These are constant arrays are of integer type so as to be compatible +# with the widest range of other types, such as Decimal. +# + +# Hermite +hermedomain = np.array([-1., 1.]) + +# Hermite coefficients representing zero. +hermezero = np.array([0]) + +# Hermite coefficients representing one. +hermeone = np.array([1]) + +# Hermite coefficients representing the identity x. +hermex = np.array([0, 1]) + + +def hermeline(off, scl): + """ + Hermite series whose graph is a straight line. + + Parameters + ---------- + off, scl : scalars + The specified line is given by ``off + scl*x``. + + Returns + ------- + y : ndarray + This module's representation of the Hermite series for + ``off + scl*x``. + + See Also + -------- + numpy.polynomial.polynomial.polyline + numpy.polynomial.chebyshev.chebline + numpy.polynomial.legendre.legline + numpy.polynomial.laguerre.lagline + numpy.polynomial.hermite.hermline + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermeline + >>> from numpy.polynomial.hermite_e import hermeline, hermeval + >>> hermeval(0,hermeline(3, 2)) + 3.0 + >>> hermeval(1,hermeline(3, 2)) + 5.0 + + """ + if scl != 0: + return np.array([off, scl]) + else: + return np.array([off]) + + +def hermefromroots(roots): + """ + Generate a HermiteE series with given roots. + + The function returns the coefficients of the polynomial + + .. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n), + + in HermiteE form, where the :math:`r_n` are the roots specified in `roots`. + If a zero has multiplicity n, then it must appear in `roots` n times. + For instance, if 2 is a root of multiplicity three and 3 is a root of + multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The + roots can appear in any order. + + If the returned coefficients are `c`, then + + .. math:: p(x) = c_0 + c_1 * He_1(x) + ... + c_n * He_n(x) + + The coefficient of the last term is not generally 1 for monic + polynomials in HermiteE form. + + Parameters + ---------- + roots : array_like + Sequence containing the roots. + + Returns + ------- + out : ndarray + 1-D array of coefficients. If all roots are real then `out` is a + real array, if some of the roots are complex, then `out` is complex + even if all the coefficients in the result are real (see Examples + below). + + See Also + -------- + numpy.polynomial.polynomial.polyfromroots + numpy.polynomial.legendre.legfromroots + numpy.polynomial.laguerre.lagfromroots + numpy.polynomial.hermite.hermfromroots + numpy.polynomial.chebyshev.chebfromroots + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermefromroots, hermeval + >>> coef = hermefromroots((-1, 0, 1)) + >>> hermeval((-1, 0, 1), coef) + array([0., 0., 0.]) + >>> coef = hermefromroots((-1j, 1j)) + >>> hermeval((-1j, 1j), coef) + array([0.+0.j, 0.+0.j]) + + """ + return pu._fromroots(hermeline, hermemul, roots) + + +def hermeadd(c1, c2): + """ + Add one Hermite series to another. + + Returns the sum of two Hermite series `c1` + `c2`. The arguments + are sequences of coefficients ordered from lowest order term to + highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Hermite series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the Hermite series of their sum. + + See Also + -------- + hermesub, hermemulx, hermemul, hermediv, hermepow + + Notes + ----- + Unlike multiplication, division, etc., the sum of two Hermite series + is a Hermite series (without having to "reproject" the result onto + the basis set) so addition, just like that of "standard" polynomials, + is simply "component-wise." + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermeadd + >>> hermeadd([1, 2, 3], [1, 2, 3, 4]) + array([2., 4., 6., 4.]) + + """ + return pu._add(c1, c2) + + +def hermesub(c1, c2): + """ + Subtract one Hermite series from another. + + Returns the difference of two Hermite series `c1` - `c2`. The + sequences of coefficients are from lowest order term to highest, i.e., + [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Hermite series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of Hermite series coefficients representing their difference. + + See Also + -------- + hermeadd, hermemulx, hermemul, hermediv, hermepow + + Notes + ----- + Unlike multiplication, division, etc., the difference of two Hermite + series is a Hermite series (without having to "reproject" the result + onto the basis set) so subtraction, just like that of "standard" + polynomials, is simply "component-wise." + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermesub + >>> hermesub([1, 2, 3, 4], [1, 2, 3]) + array([0., 0., 0., 4.]) + + """ + return pu._sub(c1, c2) + + +def hermemulx(c): + """Multiply a Hermite series by x. + + Multiply the Hermite series `c` by x, where x is the independent + variable. + + + Parameters + ---------- + c : array_like + 1-D array of Hermite series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the result of the multiplication. + + See Also + -------- + hermeadd, hermesub, hermemul, hermediv, hermepow + + Notes + ----- + The multiplication uses the recursion relationship for Hermite + polynomials in the form + + .. math:: + + xP_i(x) = (P_{i + 1}(x) + iP_{i - 1}(x))) + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermemulx + >>> hermemulx([1, 2, 3]) + array([2., 7., 2., 3.]) + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + # The zero series needs special treatment + if len(c) == 1 and c[0] == 0: + return c + + prd = np.empty(len(c) + 1, dtype=c.dtype) + prd[0] = c[0] * 0 + prd[1] = c[0] + for i in range(1, len(c)): + prd[i + 1] = c[i] + prd[i - 1] += c[i] * i + return prd + + +def hermemul(c1, c2): + """ + Multiply one Hermite series by another. + + Returns the product of two Hermite series `c1` * `c2`. The arguments + are sequences of coefficients, from lowest order "term" to highest, + e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Hermite series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of Hermite series coefficients representing their product. + + See Also + -------- + hermeadd, hermesub, hermemulx, hermediv, hermepow + + Notes + ----- + In general, the (polynomial) product of two C-series results in terms + that are not in the Hermite polynomial basis set. Thus, to express + the product as a Hermite series, it is necessary to "reproject" the + product onto said basis set, which may produce "unintuitive" (but + correct) results; see Examples section below. + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermemul + >>> hermemul([1, 2, 3], [0, 1, 2]) + array([14., 15., 28., 7., 6.]) + + """ + # s1, s2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + + if len(c1) > len(c2): + c = c2 + xs = c1 + else: + c = c1 + xs = c2 + + if len(c) == 1: + c0 = c[0] * xs + c1 = 0 + elif len(c) == 2: + c0 = c[0] * xs + c1 = c[1] * xs + else: + nd = len(c) + c0 = c[-2] * xs + c1 = c[-1] * xs + for i in range(3, len(c) + 1): + tmp = c0 + nd = nd - 1 + c0 = hermesub(c[-i] * xs, c1 * (nd - 1)) + c1 = hermeadd(tmp, hermemulx(c1)) + return hermeadd(c0, hermemulx(c1)) + + +def hermediv(c1, c2): + """ + Divide one Hermite series by another. + + Returns the quotient-with-remainder of two Hermite series + `c1` / `c2`. The arguments are sequences of coefficients from lowest + order "term" to highest, e.g., [1,2,3] represents the series + ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Hermite series coefficients ordered from low to + high. + + Returns + ------- + [quo, rem] : ndarrays + Of Hermite series coefficients representing the quotient and + remainder. + + See Also + -------- + hermeadd, hermesub, hermemulx, hermemul, hermepow + + Notes + ----- + In general, the (polynomial) division of one Hermite series by another + results in quotient and remainder terms that are not in the Hermite + polynomial basis set. Thus, to express these results as a Hermite + series, it is necessary to "reproject" the results onto the Hermite + basis set, which may produce "unintuitive" (but correct) results; see + Examples section below. + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermediv + >>> hermediv([ 14., 15., 28., 7., 6.], [0, 1, 2]) + (array([1., 2., 3.]), array([0.])) + >>> hermediv([ 15., 17., 28., 7., 6.], [0, 1, 2]) + (array([1., 2., 3.]), array([1., 2.])) + + """ + return pu._div(hermemul, c1, c2) + + +def hermepow(c, pow, maxpower=16): + """Raise a Hermite series to a power. + + Returns the Hermite series `c` raised to the power `pow`. The + argument `c` is a sequence of coefficients ordered from low to high. + i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.`` + + Parameters + ---------- + c : array_like + 1-D array of Hermite series coefficients ordered from low to + high. + pow : integer + Power to which the series will be raised + maxpower : integer, optional + Maximum power allowed. This is mainly to limit growth of the series + to unmanageable size. Default is 16 + + Returns + ------- + coef : ndarray + Hermite series of power. + + See Also + -------- + hermeadd, hermesub, hermemulx, hermemul, hermediv + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermepow + >>> hermepow([1, 2, 3], 2) + array([23., 28., 46., 12., 9.]) + + """ + return pu._pow(hermemul, c, pow, maxpower) + + +def hermeder(c, m=1, scl=1, axis=0): + """ + Differentiate a Hermite_e series. + + Returns the series coefficients `c` differentiated `m` times along + `axis`. At each iteration the result is multiplied by `scl` (the + scaling factor is for use in a linear change of variable). The argument + `c` is an array of coefficients from low to high degree along each + axis, e.g., [1,2,3] represents the series ``1*He_0 + 2*He_1 + 3*He_2`` + while [[1,2],[1,2]] represents ``1*He_0(x)*He_0(y) + 1*He_1(x)*He_0(y) + + 2*He_0(x)*He_1(y) + 2*He_1(x)*He_1(y)`` if axis=0 is ``x`` and axis=1 + is ``y``. + + Parameters + ---------- + c : array_like + Array of Hermite_e series coefficients. If `c` is multidimensional + the different axis correspond to different variables with the + degree in each axis given by the corresponding index. + m : int, optional + Number of derivatives taken, must be non-negative. (Default: 1) + scl : scalar, optional + Each differentiation is multiplied by `scl`. The end result is + multiplication by ``scl**m``. This is for use in a linear change of + variable. (Default: 1) + axis : int, optional + Axis over which the derivative is taken. (Default: 0). + + Returns + ------- + der : ndarray + Hermite series of the derivative. + + See Also + -------- + hermeint + + Notes + ----- + In general, the result of differentiating a Hermite series does not + resemble the same operation on a power series. Thus the result of this + function may be "unintuitive," albeit correct; see Examples section + below. + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermeder + >>> hermeder([ 1., 1., 1., 1.]) + array([1., 2., 3.]) + >>> hermeder([-0.25, 1., 1./2., 1./3., 1./4 ], m=2) + array([1., 2., 3.]) + + """ + c = np.array(c, ndmin=1, copy=True) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + cnt = pu._as_int(m, "the order of derivation") + iaxis = pu._as_int(axis, "the axis") + if cnt < 0: + raise ValueError("The order of derivation must be non-negative") + iaxis = np.lib.array_utils.normalize_axis_index(iaxis, c.ndim) + + if cnt == 0: + return c + + c = np.moveaxis(c, iaxis, 0) + n = len(c) + if cnt >= n: + return c[:1] * 0 + else: + for i in range(cnt): + n = n - 1 + c *= scl + der = np.empty((n,) + c.shape[1:], dtype=c.dtype) + for j in range(n, 0, -1): + der[j - 1] = j * c[j] + c = der + c = np.moveaxis(c, 0, iaxis) + return c + + +def hermeint(c, m=1, k=[], lbnd=0, scl=1, axis=0): + """ + Integrate a Hermite_e series. + + Returns the Hermite_e series coefficients `c` integrated `m` times from + `lbnd` along `axis`. At each iteration the resulting series is + **multiplied** by `scl` and an integration constant, `k`, is added. + The scaling factor is for use in a linear change of variable. ("Buyer + beware": note that, depending on what one is doing, one may want `scl` + to be the reciprocal of what one might expect; for more information, + see the Notes section below.) The argument `c` is an array of + coefficients from low to high degree along each axis, e.g., [1,2,3] + represents the series ``H_0 + 2*H_1 + 3*H_2`` while [[1,2],[1,2]] + represents ``1*H_0(x)*H_0(y) + 1*H_1(x)*H_0(y) + 2*H_0(x)*H_1(y) + + 2*H_1(x)*H_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``. + + Parameters + ---------- + c : array_like + Array of Hermite_e series coefficients. If c is multidimensional + the different axis correspond to different variables with the + degree in each axis given by the corresponding index. + m : int, optional + Order of integration, must be positive. (Default: 1) + k : {[], list, scalar}, optional + Integration constant(s). The value of the first integral at + ``lbnd`` is the first value in the list, the value of the second + integral at ``lbnd`` is the second value, etc. If ``k == []`` (the + default), all constants are set to zero. If ``m == 1``, a single + scalar can be given instead of a list. + lbnd : scalar, optional + The lower bound of the integral. (Default: 0) + scl : scalar, optional + Following each integration the result is *multiplied* by `scl` + before the integration constant is added. (Default: 1) + axis : int, optional + Axis over which the integral is taken. (Default: 0). + + Returns + ------- + S : ndarray + Hermite_e series coefficients of the integral. + + Raises + ------ + ValueError + If ``m < 0``, ``len(k) > m``, ``np.ndim(lbnd) != 0``, or + ``np.ndim(scl) != 0``. + + See Also + -------- + hermeder + + Notes + ----- + Note that the result of each integration is *multiplied* by `scl`. + Why is this important to note? Say one is making a linear change of + variable :math:`u = ax + b` in an integral relative to `x`. Then + :math:`dx = du/a`, so one will need to set `scl` equal to + :math:`1/a` - perhaps not what one would have first thought. + + Also note that, in general, the result of integrating a C-series needs + to be "reprojected" onto the C-series basis set. Thus, typically, + the result of this function is "unintuitive," albeit correct; see + Examples section below. + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermeint + >>> hermeint([1, 2, 3]) # integrate once, value 0 at 0. + array([1., 1., 1., 1.]) + >>> hermeint([1, 2, 3], m=2) # integrate twice, value & deriv 0 at 0 + array([-0.25 , 1. , 0.5 , 0.33333333, 0.25 ]) # may vary + >>> hermeint([1, 2, 3], k=1) # integrate once, value 1 at 0. + array([2., 1., 1., 1.]) + >>> hermeint([1, 2, 3], lbnd=-1) # integrate once, value 0 at -1 + array([-1., 1., 1., 1.]) + >>> hermeint([1, 2, 3], m=2, k=[1, 2], lbnd=-1) + array([ 1.83333333, 0. , 0.5 , 0.33333333, 0.25 ]) # may vary + + """ + c = np.array(c, ndmin=1, copy=True) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + if not np.iterable(k): + k = [k] + cnt = pu._as_int(m, "the order of integration") + iaxis = pu._as_int(axis, "the axis") + if cnt < 0: + raise ValueError("The order of integration must be non-negative") + if len(k) > cnt: + raise ValueError("Too many integration constants") + if np.ndim(lbnd) != 0: + raise ValueError("lbnd must be a scalar.") + if np.ndim(scl) != 0: + raise ValueError("scl must be a scalar.") + iaxis = np.lib.array_utils.normalize_axis_index(iaxis, c.ndim) + + if cnt == 0: + return c + + c = np.moveaxis(c, iaxis, 0) + k = list(k) + [0] * (cnt - len(k)) + for i in range(cnt): + n = len(c) + c *= scl + if n == 1 and np.all(c[0] == 0): + c[0] += k[i] + else: + tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype) + tmp[0] = c[0] * 0 + tmp[1] = c[0] + for j in range(1, n): + tmp[j + 1] = c[j] / (j + 1) + tmp[0] += k[i] - hermeval(lbnd, tmp) + c = tmp + c = np.moveaxis(c, 0, iaxis) + return c + + +def hermeval(x, c, tensor=True): + """ + Evaluate a HermiteE series at points x. + + If `c` is of length ``n + 1``, this function returns the value: + + .. math:: p(x) = c_0 * He_0(x) + c_1 * He_1(x) + ... + c_n * He_n(x) + + The parameter `x` is converted to an array only if it is a tuple or a + list, otherwise it is treated as a scalar. In either case, either `x` + or its elements must support multiplication and addition both with + themselves and with the elements of `c`. + + If `c` is a 1-D array, then ``p(x)`` will have the same shape as `x`. If + `c` is multidimensional, then the shape of the result depends on the + value of `tensor`. If `tensor` is true the shape will be c.shape[1:] + + x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that + scalars have shape (,). + + Trailing zeros in the coefficients will be used in the evaluation, so + they should be avoided if efficiency is a concern. + + Parameters + ---------- + x : array_like, compatible object + If `x` is a list or tuple, it is converted to an ndarray, otherwise + it is left unchanged and treated as a scalar. In either case, `x` + or its elements must support addition and multiplication with + with themselves and with the elements of `c`. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree n are contained in c[n]. If `c` is multidimensional the + remaining indices enumerate multiple polynomials. In the two + dimensional case the coefficients may be thought of as stored in + the columns of `c`. + tensor : boolean, optional + If True, the shape of the coefficient array is extended with ones + on the right, one for each dimension of `x`. Scalars have dimension 0 + for this action. The result is that every column of coefficients in + `c` is evaluated for every element of `x`. If False, `x` is broadcast + over the columns of `c` for the evaluation. This keyword is useful + when `c` is multidimensional. The default value is True. + + Returns + ------- + values : ndarray, algebra_like + The shape of the return value is described above. + + See Also + -------- + hermeval2d, hermegrid2d, hermeval3d, hermegrid3d + + Notes + ----- + The evaluation uses Clenshaw recursion, aka synthetic division. + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermeval + >>> coef = [1,2,3] + >>> hermeval(1, coef) + 3.0 + >>> hermeval([[1,2],[3,4]], coef) + array([[ 3., 14.], + [31., 54.]]) + + """ + c = np.array(c, ndmin=1, copy=None) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + if isinstance(x, (tuple, list)): + x = np.asarray(x) + if isinstance(x, np.ndarray) and tensor: + c = c.reshape(c.shape + (1,) * x.ndim) + + if len(c) == 1: + c0 = c[0] + c1 = 0 + elif len(c) == 2: + c0 = c[0] + c1 = c[1] + else: + nd = len(c) + c0 = c[-2] + c1 = c[-1] + for i in range(3, len(c) + 1): + tmp = c0 + nd = nd - 1 + c0 = c[-i] - c1 * (nd - 1) + c1 = tmp + c1 * x + return c0 + c1 * x + + +def hermeval2d(x, y, c): + """ + Evaluate a 2-D HermiteE series at points (x, y). + + This function returns the values: + + .. math:: p(x,y) = \\sum_{i,j} c_{i,j} * He_i(x) * He_j(y) + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars and they + must have the same shape after conversion. In either case, either `x` + and `y` or their elements must support multiplication and addition both + with themselves and with the elements of `c`. + + If `c` is a 1-D array a one is implicitly appended to its shape to make + it 2-D. The shape of the result will be c.shape[2:] + x.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points ``(x, y)``, + where `x` and `y` must have the same shape. If `x` or `y` is a list + or tuple, it is first converted to an ndarray, otherwise it is left + unchanged and if it isn't an ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term + of multi-degree i,j is contained in ``c[i,j]``. If `c` has + dimension greater than two the remaining indices enumerate multiple + sets of coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points formed with + pairs of corresponding values from `x` and `y`. + + See Also + -------- + hermeval, hermegrid2d, hermeval3d, hermegrid3d + """ + return pu._valnd(hermeval, c, x, y) + + +def hermegrid2d(x, y, c): + """ + Evaluate a 2-D HermiteE series on the Cartesian product of x and y. + + This function returns the values: + + .. math:: p(a,b) = \\sum_{i,j} c_{i,j} * H_i(a) * H_j(b) + + where the points ``(a, b)`` consist of all pairs formed by taking + `a` from `x` and `b` from `y`. The resulting points form a grid with + `x` in the first dimension and `y` in the second. + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars. In either + case, either `x` and `y` or their elements must support multiplication + and addition both with themselves and with the elements of `c`. + + If `c` has fewer than two dimensions, ones are implicitly appended to + its shape to make it 2-D. The shape of the result will be c.shape[2:] + + x.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points in the + Cartesian product of `x` and `y`. If `x` or `y` is a list or + tuple, it is first converted to an ndarray, otherwise it is left + unchanged and, if it isn't an ndarray, it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree i,j are contained in ``c[i,j]``. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points in the Cartesian + product of `x` and `y`. + + See Also + -------- + hermeval, hermeval2d, hermeval3d, hermegrid3d + """ + return pu._gridnd(hermeval, c, x, y) + + +def hermeval3d(x, y, z, c): + """ + Evaluate a 3-D Hermite_e series at points (x, y, z). + + This function returns the values: + + .. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * He_i(x) * He_j(y) * He_k(z) + + The parameters `x`, `y`, and `z` are converted to arrays only if + they are tuples or a lists, otherwise they are treated as a scalars and + they must have the same shape after conversion. In either case, either + `x`, `y`, and `z` or their elements must support multiplication and + addition both with themselves and with the elements of `c`. + + If `c` has fewer than 3 dimensions, ones are implicitly appended to its + shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape. + + Parameters + ---------- + x, y, z : array_like, compatible object + The three dimensional series is evaluated at the points + `(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If + any of `x`, `y`, or `z` is a list or tuple, it is first converted + to an ndarray, otherwise it is left unchanged and if it isn't an + ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term of + multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension + greater than 3 the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the multidimensional polynomial on points formed with + triples of corresponding values from `x`, `y`, and `z`. + + See Also + -------- + hermeval, hermeval2d, hermegrid2d, hermegrid3d + """ + return pu._valnd(hermeval, c, x, y, z) + + +def hermegrid3d(x, y, z, c): + """ + Evaluate a 3-D HermiteE series on the Cartesian product of x, y, and z. + + This function returns the values: + + .. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * He_i(a) * He_j(b) * He_k(c) + + where the points ``(a, b, c)`` consist of all triples formed by taking + `a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form + a grid with `x` in the first dimension, `y` in the second, and `z` in + the third. + + The parameters `x`, `y`, and `z` are converted to arrays only if they + are tuples or a lists, otherwise they are treated as a scalars. In + either case, either `x`, `y`, and `z` or their elements must support + multiplication and addition both with themselves and with the elements + of `c`. + + If `c` has fewer than three dimensions, ones are implicitly appended to + its shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape + y.shape + z.shape. + + Parameters + ---------- + x, y, z : array_like, compatible objects + The three dimensional series is evaluated at the points in the + Cartesian product of `x`, `y`, and `z`. If `x`, `y`, or `z` is a + list or tuple, it is first converted to an ndarray, otherwise it is + left unchanged and, if it isn't an ndarray, it is treated as a + scalar. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree i,j are contained in ``c[i,j]``. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points in the Cartesian + product of `x` and `y`. + + See Also + -------- + hermeval, hermeval2d, hermegrid2d, hermeval3d + """ + return pu._gridnd(hermeval, c, x, y, z) + + +def hermevander(x, deg): + """Pseudo-Vandermonde matrix of given degree. + + Returns the pseudo-Vandermonde matrix of degree `deg` and sample points + `x`. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., i] = He_i(x), + + where ``0 <= i <= deg``. The leading indices of `V` index the elements of + `x` and the last index is the degree of the HermiteE polynomial. + + If `c` is a 1-D array of coefficients of length ``n + 1`` and `V` is the + array ``V = hermevander(x, n)``, then ``np.dot(V, c)`` and + ``hermeval(x, c)`` are the same up to roundoff. This equivalence is + useful both for least squares fitting and for the evaluation of a large + number of HermiteE series of the same degree and sample points. + + Parameters + ---------- + x : array_like + Array of points. The dtype is converted to float64 or complex128 + depending on whether any of the elements are complex. If `x` is + scalar it is converted to a 1-D array. + deg : int + Degree of the resulting matrix. + + Returns + ------- + vander : ndarray + The pseudo-Vandermonde matrix. The shape of the returned matrix is + ``x.shape + (deg + 1,)``, where The last index is the degree of the + corresponding HermiteE polynomial. The dtype will be the same as + the converted `x`. + + Examples + -------- + >>> import numpy as np + >>> from numpy.polynomial.hermite_e import hermevander + >>> x = np.array([-1, 0, 1]) + >>> hermevander(x, 3) + array([[ 1., -1., 0., 2.], + [ 1., 0., -1., -0.], + [ 1., 1., 0., -2.]]) + + """ + ideg = pu._as_int(deg, "deg") + if ideg < 0: + raise ValueError("deg must be non-negative") + + x = np.array(x, copy=None, ndmin=1) + 0.0 + dims = (ideg + 1,) + x.shape + dtyp = x.dtype + v = np.empty(dims, dtype=dtyp) + v[0] = x * 0 + 1 + if ideg > 0: + v[1] = x + for i in range(2, ideg + 1): + v[i] = (v[i - 1] * x - v[i - 2] * (i - 1)) + return np.moveaxis(v, 0, -1) + + +def hermevander2d(x, y, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points ``(x, y)``. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (deg[1] + 1)*i + j] = He_i(x) * He_j(y), + + where ``0 <= i <= deg[0]`` and ``0 <= j <= deg[1]``. The leading indices of + `V` index the points ``(x, y)`` and the last index encodes the degrees of + the HermiteE polynomials. + + If ``V = hermevander2d(x, y, [xdeg, ydeg])``, then the columns of `V` + correspond to the elements of a 2-D coefficient array `c` of shape + (xdeg + 1, ydeg + 1) in the order + + .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ... + + and ``np.dot(V, c.flat)`` and ``hermeval2d(x, y, c)`` will be the same + up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 2-D HermiteE + series of the same degrees and sample points. + + Parameters + ---------- + x, y : array_like + Arrays of point coordinates, all of the same shape. The dtypes + will be converted to either float64 or complex128 depending on + whether any of the elements are complex. Scalars are converted to + 1-D arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg]. + + Returns + ------- + vander2d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg[1]+1)`. The dtype will be the same + as the converted `x` and `y`. + + See Also + -------- + hermevander, hermevander3d, hermeval2d, hermeval3d + """ + return pu._vander_nd_flat((hermevander, hermevander), (x, y), deg) + + +def hermevander3d(x, y, z, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points ``(x, y, z)``. If `l`, `m`, `n` are the given degrees in `x`, `y`, `z`, + then Hehe pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = He_i(x)*He_j(y)*He_k(z), + + where ``0 <= i <= l``, ``0 <= j <= m``, and ``0 <= j <= n``. The leading + indices of `V` index the points ``(x, y, z)`` and the last index encodes + the degrees of the HermiteE polynomials. + + If ``V = hermevander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns + of `V` correspond to the elements of a 3-D coefficient array `c` of + shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order + + .. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},... + + and ``np.dot(V, c.flat)`` and ``hermeval3d(x, y, z, c)`` will be the + same up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 3-D HermiteE + series of the same degrees and sample points. + + Parameters + ---------- + x, y, z : array_like + Arrays of point coordinates, all of the same shape. The dtypes will + be converted to either float64 or complex128 depending on whether + any of the elements are complex. Scalars are converted to 1-D + arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg, z_deg]. + + Returns + ------- + vander3d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg[1]+1)*(deg[2]+1)`. The dtype will + be the same as the converted `x`, `y`, and `z`. + + See Also + -------- + hermevander, hermevander3d, hermeval2d, hermeval3d + """ + return pu._vander_nd_flat((hermevander, hermevander, hermevander), (x, y, z), deg) + + +def hermefit(x, y, deg, rcond=None, full=False, w=None): + """ + Least squares fit of Hermite series to data. + + Return the coefficients of a HermiteE series of degree `deg` that is + the least squares fit to the data values `y` given at points `x`. If + `y` is 1-D the returned coefficients will also be 1-D. If `y` is 2-D + multiple fits are done, one for each column of `y`, and the resulting + coefficients are stored in the corresponding columns of a 2-D return. + The fitted polynomial(s) are in the form + + .. math:: p(x) = c_0 + c_1 * He_1(x) + ... + c_n * He_n(x), + + where `n` is `deg`. + + Parameters + ---------- + x : array_like, shape (M,) + x-coordinates of the M sample points ``(x[i], y[i])``. + y : array_like, shape (M,) or (M, K) + y-coordinates of the sample points. Several data sets of sample + points sharing the same x-coordinates can be fitted at once by + passing in a 2D-array that contains one dataset per column. + deg : int or 1-D array_like + Degree(s) of the fitting polynomials. If `deg` is a single integer + all terms up to and including the `deg`'th term are included in the + fit. For NumPy versions >= 1.11.0 a list of integers specifying the + degrees of the terms to include may be used instead. + rcond : float, optional + Relative condition number of the fit. Singular values smaller than + this relative to the largest singular value will be ignored. The + default value is len(x)*eps, where eps is the relative precision of + the float type, about 2e-16 in most cases. + full : bool, optional + Switch determining nature of return value. When it is False (the + default) just the coefficients are returned, when True diagnostic + information from the singular value decomposition is also returned. + w : array_like, shape (`M`,), optional + Weights. If not None, the weight ``w[i]`` applies to the unsquared + residual ``y[i] - y_hat[i]`` at ``x[i]``. Ideally the weights are + chosen so that the errors of the products ``w[i]*y[i]`` all have the + same variance. When using inverse-variance weighting, use + ``w[i] = 1/sigma(y[i])``. The default value is None. + + Returns + ------- + coef : ndarray, shape (M,) or (M, K) + Hermite coefficients ordered from low to high. If `y` was 2-D, + the coefficients for the data in column k of `y` are in column + `k`. + + [residuals, rank, singular_values, rcond] : list + These values are only returned if ``full == True`` + + - residuals -- sum of squared residuals of the least squares fit + - rank -- the numerical rank of the scaled Vandermonde matrix + - singular_values -- singular values of the scaled Vandermonde matrix + - rcond -- value of `rcond`. + + For more details, see `numpy.linalg.lstsq`. + + Warns + ----- + RankWarning + The rank of the coefficient matrix in the least-squares fit is + deficient. The warning is only raised if ``full = False``. The + warnings can be turned off by + + >>> import warnings + >>> warnings.simplefilter('ignore', np.exceptions.RankWarning) + + See Also + -------- + numpy.polynomial.chebyshev.chebfit + numpy.polynomial.legendre.legfit + numpy.polynomial.polynomial.polyfit + numpy.polynomial.hermite.hermfit + numpy.polynomial.laguerre.lagfit + hermeval : Evaluates a Hermite series. + hermevander : pseudo Vandermonde matrix of Hermite series. + hermeweight : HermiteE weight function. + numpy.linalg.lstsq : Computes a least-squares fit from the matrix. + scipy.interpolate.UnivariateSpline : Computes spline fits. + + Notes + ----- + The solution is the coefficients of the HermiteE series `p` that + minimizes the sum of the weighted squared errors + + .. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2, + + where the :math:`w_j` are the weights. This problem is solved by + setting up the (typically) overdetermined matrix equation + + .. math:: V(x) * c = w * y, + + where `V` is the pseudo Vandermonde matrix of `x`, the elements of `c` + are the coefficients to be solved for, and the elements of `y` are the + observed values. This equation is then solved using the singular value + decomposition of `V`. + + If some of the singular values of `V` are so small that they are + neglected, then a `~exceptions.RankWarning` will be issued. This means that + the coefficient values may be poorly determined. Using a lower order fit + will usually get rid of the warning. The `rcond` parameter can also be + set to a value smaller than its default, but the resulting fit may be + spurious and have large contributions from roundoff error. + + Fits using HermiteE series are probably most useful when the data can + be approximated by ``sqrt(w(x)) * p(x)``, where ``w(x)`` is the HermiteE + weight. In that case the weight ``sqrt(w(x[i]))`` should be used + together with data values ``y[i]/sqrt(w(x[i]))``. The weight function is + available as `hermeweight`. + + References + ---------- + .. [1] Wikipedia, "Curve fitting", + https://en.wikipedia.org/wiki/Curve_fitting + + Examples + -------- + >>> import numpy as np + >>> from numpy.polynomial.hermite_e import hermefit, hermeval + >>> x = np.linspace(-10, 10) + >>> rng = np.random.default_rng() + >>> err = rng.normal(scale=1./10, size=len(x)) + >>> y = hermeval(x, [1, 2, 3]) + err + >>> hermefit(x, y, 2) + array([1.02284196, 2.00032805, 2.99978457]) # may vary + + """ + return pu._fit(hermevander, x, y, deg, rcond, full, w) + + +def hermecompanion(c): + """ + Return the scaled companion matrix of c. + + The basis polynomials are scaled so that the companion matrix is + symmetric when `c` is a HermiteE basis polynomial. This provides + better eigenvalue estimates than the unscaled case and for basis + polynomials the eigenvalues are guaranteed to be real if + `numpy.linalg.eigvalsh` is used to obtain them. + + Parameters + ---------- + c : array_like + 1-D array of HermiteE series coefficients ordered from low to high + degree. + + Returns + ------- + mat : ndarray + Scaled companion matrix of dimensions (deg, deg). + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) < 2: + raise ValueError('Series must have maximum degree of at least 1.') + if len(c) == 2: + return np.array([[-c[0] / c[1]]]) + + n = len(c) - 1 + mat = np.zeros((n, n), dtype=c.dtype) + scl = np.hstack((1., 1. / np.sqrt(np.arange(n - 1, 0, -1)))) + scl = np.multiply.accumulate(scl)[::-1] + top = mat.reshape(-1)[1::n + 1] + bot = mat.reshape(-1)[n::n + 1] + top[...] = np.sqrt(np.arange(1, n)) + bot[...] = top + mat[:, -1] -= scl * c[:-1] / c[-1] + return mat + + +def hermeroots(c): + """ + Compute the roots of a HermiteE series. + + Return the roots (a.k.a. "zeros") of the polynomial + + .. math:: p(x) = \\sum_i c[i] * He_i(x). + + Parameters + ---------- + c : 1-D array_like + 1-D array of coefficients. + + Returns + ------- + out : ndarray + Array of the roots of the series. If all the roots are real, + then `out` is also real, otherwise it is complex. + + See Also + -------- + numpy.polynomial.polynomial.polyroots + numpy.polynomial.legendre.legroots + numpy.polynomial.laguerre.lagroots + numpy.polynomial.hermite.hermroots + numpy.polynomial.chebyshev.chebroots + + Notes + ----- + The root estimates are obtained as the eigenvalues of the companion + matrix, Roots far from the origin of the complex plane may have large + errors due to the numerical instability of the series for such + values. Roots with multiplicity greater than 1 will also show larger + errors as the value of the series near such points is relatively + insensitive to errors in the roots. Isolated roots near the origin can + be improved by a few iterations of Newton's method. + + The HermiteE series basis polynomials aren't powers of `x` so the + results of this function may seem unintuitive. + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermeroots, hermefromroots + >>> coef = hermefromroots([-1, 0, 1]) + >>> coef + array([0., 2., 0., 1.]) + >>> hermeroots(coef) + array([-1., 0., 1.]) # may vary + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) <= 1: + return np.array([], dtype=c.dtype) + if len(c) == 2: + return np.array([-c[0] / c[1]]) + + # rotated companion matrix reduces error + m = hermecompanion(c)[::-1, ::-1] + r = np.linalg.eigvals(m) + r.sort() + return r + + +def _normed_hermite_e_n(x, n): + """ + Evaluate a normalized HermiteE polynomial. + + Compute the value of the normalized HermiteE polynomial of degree ``n`` + at the points ``x``. + + + Parameters + ---------- + x : ndarray of double. + Points at which to evaluate the function + n : int + Degree of the normalized HermiteE function to be evaluated. + + Returns + ------- + values : ndarray + The shape of the return value is described above. + + Notes + ----- + This function is needed for finding the Gauss points and integration + weights for high degrees. The values of the standard HermiteE functions + overflow when n >= 207. + + """ + if n == 0: + return np.full(x.shape, 1 / np.sqrt(np.sqrt(2 * np.pi))) + + c0 = 0. + c1 = 1. / np.sqrt(np.sqrt(2 * np.pi)) + nd = float(n) + for i in range(n - 1): + tmp = c0 + c0 = -c1 * np.sqrt((nd - 1.) / nd) + c1 = tmp + c1 * x * np.sqrt(1. / nd) + nd = nd - 1.0 + return c0 + c1 * x + + +def hermegauss(deg): + """ + Gauss-HermiteE quadrature. + + Computes the sample points and weights for Gauss-HermiteE quadrature. + These sample points and weights will correctly integrate polynomials of + degree :math:`2*deg - 1` or less over the interval :math:`[-\\inf, \\inf]` + with the weight function :math:`f(x) = \\exp(-x^2/2)`. + + Parameters + ---------- + deg : int + Number of sample points and weights. It must be >= 1. + + Returns + ------- + x : ndarray + 1-D ndarray containing the sample points. + y : ndarray + 1-D ndarray containing the weights. + + Notes + ----- + The results have only been tested up to degree 100, higher degrees may + be problematic. The weights are determined by using the fact that + + .. math:: w_k = c / (He'_n(x_k) * He_{n-1}(x_k)) + + where :math:`c` is a constant independent of :math:`k` and :math:`x_k` + is the k'th root of :math:`He_n`, and then scaling the results to get + the right value when integrating 1. + + """ + ideg = pu._as_int(deg, "deg") + if ideg <= 0: + raise ValueError("deg must be a positive integer") + + # first approximation of roots. We use the fact that the companion + # matrix is symmetric in this case in order to obtain better zeros. + c = np.array([0] * deg + [1]) + m = hermecompanion(c) + x = np.linalg.eigvalsh(m) + + # improve roots by one application of Newton + dy = _normed_hermite_e_n(x, ideg) + df = _normed_hermite_e_n(x, ideg - 1) * np.sqrt(ideg) + x -= dy / df + + # compute the weights. We scale the factor to avoid possible numerical + # overflow. + fm = _normed_hermite_e_n(x, ideg - 1) + fm /= np.abs(fm).max() + w = 1 / (fm * fm) + + # for Hermite_e we can also symmetrize + w = (w + w[::-1]) / 2 + x = (x - x[::-1]) / 2 + + # scale w to get the right value + w *= np.sqrt(2 * np.pi) / w.sum() + + return x, w + + +def hermeweight(x): + """Weight function of the Hermite_e polynomials. + + The weight function is :math:`\\exp(-x^2/2)` and the interval of + integration is :math:`[-\\inf, \\inf]`. the HermiteE polynomials are + orthogonal, but not normalized, with respect to this weight function. + + Parameters + ---------- + x : array_like + Values at which the weight function will be computed. + + Returns + ------- + w : ndarray + The weight function at `x`. + """ + w = np.exp(-.5 * x**2) + return w + + +# +# HermiteE series class +# + +class HermiteE(ABCPolyBase): + """A HermiteE series class. + + The HermiteE class provides the standard Python numerical methods + '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the + attributes and methods listed below. + + Parameters + ---------- + coef : array_like + HermiteE coefficients in order of increasing degree, i.e, + ``(1, 2, 3)`` gives ``1*He_0(x) + 2*He_1(X) + 3*He_2(x)``. + domain : (2,) array_like, optional + Domain to use. The interval ``[domain[0], domain[1]]`` is mapped + to the interval ``[window[0], window[1]]`` by shifting and scaling. + The default value is [-1., 1.]. + window : (2,) array_like, optional + Window, see `domain` for its use. The default value is [-1., 1.]. + symbol : str, optional + Symbol used to represent the independent variable in string + representations of the polynomial expression, e.g. for printing. + The symbol must be a valid Python identifier. Default value is 'x'. + + .. versionadded:: 1.24 + + """ + # Virtual Functions + _add = staticmethod(hermeadd) + _sub = staticmethod(hermesub) + _mul = staticmethod(hermemul) + _div = staticmethod(hermediv) + _pow = staticmethod(hermepow) + _val = staticmethod(hermeval) + _int = staticmethod(hermeint) + _der = staticmethod(hermeder) + _fit = staticmethod(hermefit) + _line = staticmethod(hermeline) + _roots = staticmethod(hermeroots) + _fromroots = staticmethod(hermefromroots) + + # Virtual properties + domain = np.array(hermedomain) + window = np.array(hermedomain) + basis_name = 'He' diff --git a/local_packages/numpy/polynomial/hermite_e.pyi b/local_packages/numpy/polynomial/hermite_e.pyi new file mode 100644 index 0000000..b996de5 --- /dev/null +++ b/local_packages/numpy/polynomial/hermite_e.pyi @@ -0,0 +1,106 @@ +from typing import Any, ClassVar, Final, Literal as L, TypeVar + +import numpy as np +from numpy._typing import _Shape + +from ._polybase import ABCPolyBase +from ._polytypes import ( + _Array1, + _Array2, + _FuncBinOp, + _FuncCompanion, + _FuncDer, + _FuncFit, + _FuncFromRoots, + _FuncGauss, + _FuncInteg, + _FuncLine, + _FuncPoly2Ortho, + _FuncPow, + _FuncRoots, + _FuncUnOp, + _FuncVal, + _FuncVal2D, + _FuncVal3D, + _FuncVander, + _FuncVander2D, + _FuncVander3D, + _FuncWeight, +) +from .polyutils import trimcoef as hermetrim + +__all__ = [ + "hermezero", + "hermeone", + "hermex", + "hermedomain", + "hermeline", + "hermeadd", + "hermesub", + "hermemulx", + "hermemul", + "hermediv", + "hermepow", + "hermeval", + "hermeder", + "hermeint", + "herme2poly", + "poly2herme", + "hermefromroots", + "hermevander", + "hermefit", + "hermetrim", + "hermeroots", + "HermiteE", + "hermeval2d", + "hermeval3d", + "hermegrid2d", + "hermegrid3d", + "hermevander2d", + "hermevander3d", + "hermecompanion", + "hermegauss", + "hermeweight", +] + +_ShapeT = TypeVar("_ShapeT", bound=_Shape) + +poly2herme: Final[_FuncPoly2Ortho] = ... +herme2poly: Final[_FuncUnOp] = ... + +hermedomain: Final[_Array2[np.float64]] = ... +hermezero: Final[_Array1[np.int_]] = ... +hermeone: Final[_Array1[np.int_]] = ... +hermex: Final[_Array2[np.int_]] = ... + +hermeline: Final[_FuncLine] = ... +hermefromroots: Final[_FuncFromRoots] = ... +hermeadd: Final[_FuncBinOp] = ... +hermesub: Final[_FuncBinOp] = ... +hermemulx: Final[_FuncUnOp] = ... +hermemul: Final[_FuncBinOp] = ... +hermediv: Final[_FuncBinOp] = ... +hermepow: Final[_FuncPow] = ... +hermeder: Final[_FuncDer] = ... +hermeint: Final[_FuncInteg] = ... +hermeval: Final[_FuncVal] = ... +hermeval2d: Final[_FuncVal2D] = ... +hermeval3d: Final[_FuncVal3D] = ... +hermegrid2d: Final[_FuncVal2D] = ... +hermegrid3d: Final[_FuncVal3D] = ... +hermevander: Final[_FuncVander] = ... +hermevander2d: Final[_FuncVander2D] = ... +hermevander3d: Final[_FuncVander3D] = ... +hermefit: Final[_FuncFit] = ... +hermecompanion: Final[_FuncCompanion] = ... +hermeroots: Final[_FuncRoots] = ... + +def _normed_hermite_e_n(x: np.ndarray[_ShapeT, np.dtype[np.float64]], n: int) -> np.ndarray[_ShapeT, np.dtype[np.float64]]: ... + +hermegauss: Final[_FuncGauss] = ... +hermeweight: Final[_FuncWeight] = ... + +class HermiteE(ABCPolyBase[L["He"]]): + basis_name: ClassVar[L["He"]] = "He" # pyright: ignore[reportIncompatibleMethodOverride] + domain: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] + window: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] diff --git a/local_packages/numpy/polynomial/laguerre.py b/local_packages/numpy/polynomial/laguerre.py new file mode 100644 index 0000000..b1d87bf --- /dev/null +++ b/local_packages/numpy/polynomial/laguerre.py @@ -0,0 +1,1673 @@ +""" +================================================== +Laguerre Series (:mod:`numpy.polynomial.laguerre`) +================================================== + +This module provides a number of objects (mostly functions) useful for +dealing with Laguerre series, including a `Laguerre` class that +encapsulates the usual arithmetic operations. (General information +on how this module represents and works with such polynomials is in the +docstring for its "parent" sub-package, `numpy.polynomial`). + +Classes +------- +.. autosummary:: + :toctree: generated/ + + Laguerre + +Constants +--------- +.. autosummary:: + :toctree: generated/ + + lagdomain + lagzero + lagone + lagx + +Arithmetic +---------- +.. autosummary:: + :toctree: generated/ + + lagadd + lagsub + lagmulx + lagmul + lagdiv + lagpow + lagval + lagval2d + lagval3d + laggrid2d + laggrid3d + +Calculus +-------- +.. autosummary:: + :toctree: generated/ + + lagder + lagint + +Misc Functions +-------------- +.. autosummary:: + :toctree: generated/ + + lagfromroots + lagroots + lagvander + lagvander2d + lagvander3d + laggauss + lagweight + lagcompanion + lagfit + lagtrim + lagline + lag2poly + poly2lag + +See also +-------- +`numpy.polynomial` + +""" +import numpy as np + +from . import polyutils as pu +from ._polybase import ABCPolyBase + +__all__ = [ + 'lagzero', 'lagone', 'lagx', 'lagdomain', 'lagline', 'lagadd', + 'lagsub', 'lagmulx', 'lagmul', 'lagdiv', 'lagpow', 'lagval', 'lagder', + 'lagint', 'lag2poly', 'poly2lag', 'lagfromroots', 'lagvander', + 'lagfit', 'lagtrim', 'lagroots', 'Laguerre', 'lagval2d', 'lagval3d', + 'laggrid2d', 'laggrid3d', 'lagvander2d', 'lagvander3d', 'lagcompanion', + 'laggauss', 'lagweight'] + +lagtrim = pu.trimcoef + + +def poly2lag(pol): + """ + poly2lag(pol) + + Convert a polynomial to a Laguerre series. + + Convert an array representing the coefficients of a polynomial (relative + to the "standard" basis) ordered from lowest degree to highest, to an + array of the coefficients of the equivalent Laguerre series, ordered + from lowest to highest degree. + + Parameters + ---------- + pol : array_like + 1-D array containing the polynomial coefficients + + Returns + ------- + c : ndarray + 1-D array containing the coefficients of the equivalent Laguerre + series. + + See Also + -------- + lag2poly + + Notes + ----- + The easy way to do conversions between polynomial basis sets + is to use the convert method of a class instance. + + Examples + -------- + >>> import numpy as np + >>> from numpy.polynomial.laguerre import poly2lag + >>> poly2lag(np.arange(4)) + array([ 23., -63., 58., -18.]) + + """ + [pol] = pu.as_series([pol]) + res = 0 + for p in pol[::-1]: + res = lagadd(lagmulx(res), p) + return res + + +def lag2poly(c): + """ + Convert a Laguerre series to a polynomial. + + Convert an array representing the coefficients of a Laguerre series, + ordered from lowest degree to highest, to an array of the coefficients + of the equivalent polynomial (relative to the "standard" basis) ordered + from lowest to highest degree. + + Parameters + ---------- + c : array_like + 1-D array containing the Laguerre series coefficients, ordered + from lowest order term to highest. + + Returns + ------- + pol : ndarray + 1-D array containing the coefficients of the equivalent polynomial + (relative to the "standard" basis) ordered from lowest order term + to highest. + + See Also + -------- + poly2lag + + Notes + ----- + The easy way to do conversions between polynomial basis sets + is to use the convert method of a class instance. + + Examples + -------- + >>> from numpy.polynomial.laguerre import lag2poly + >>> lag2poly([ 23., -63., 58., -18.]) + array([0., 1., 2., 3.]) + + """ + from .polynomial import polyadd, polymulx, polysub + + [c] = pu.as_series([c]) + n = len(c) + if n == 1: + return c + else: + c0 = c[-2] + c1 = c[-1] + # i is the current degree of c1 + for i in range(n - 1, 1, -1): + tmp = c0 + c0 = polysub(c[i - 2], (c1 * (i - 1)) / i) + c1 = polyadd(tmp, polysub((2 * i - 1) * c1, polymulx(c1)) / i) + return polyadd(c0, polysub(c1, polymulx(c1))) + + +# +# These are constant arrays are of integer type so as to be compatible +# with the widest range of other types, such as Decimal. +# + +# Laguerre +lagdomain = np.array([0., 1.]) + +# Laguerre coefficients representing zero. +lagzero = np.array([0]) + +# Laguerre coefficients representing one. +lagone = np.array([1]) + +# Laguerre coefficients representing the identity x. +lagx = np.array([1, -1]) + + +def lagline(off, scl): + """ + Laguerre series whose graph is a straight line. + + Parameters + ---------- + off, scl : scalars + The specified line is given by ``off + scl*x``. + + Returns + ------- + y : ndarray + This module's representation of the Laguerre series for + ``off + scl*x``. + + See Also + -------- + numpy.polynomial.polynomial.polyline + numpy.polynomial.chebyshev.chebline + numpy.polynomial.legendre.legline + numpy.polynomial.hermite.hermline + numpy.polynomial.hermite_e.hermeline + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagline, lagval + >>> lagval(0,lagline(3, 2)) + 3.0 + >>> lagval(1,lagline(3, 2)) + 5.0 + + """ + if scl != 0: + return np.array([off + scl, -scl]) + else: + return np.array([off]) + + +def lagfromroots(roots): + """ + Generate a Laguerre series with given roots. + + The function returns the coefficients of the polynomial + + .. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n), + + in Laguerre form, where the :math:`r_n` are the roots specified in `roots`. + If a zero has multiplicity n, then it must appear in `roots` n times. + For instance, if 2 is a root of multiplicity three and 3 is a root of + multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The + roots can appear in any order. + + If the returned coefficients are `c`, then + + .. math:: p(x) = c_0 + c_1 * L_1(x) + ... + c_n * L_n(x) + + The coefficient of the last term is not generally 1 for monic + polynomials in Laguerre form. + + Parameters + ---------- + roots : array_like + Sequence containing the roots. + + Returns + ------- + out : ndarray + 1-D array of coefficients. If all roots are real then `out` is a + real array, if some of the roots are complex, then `out` is complex + even if all the coefficients in the result are real (see Examples + below). + + See Also + -------- + numpy.polynomial.polynomial.polyfromroots + numpy.polynomial.legendre.legfromroots + numpy.polynomial.chebyshev.chebfromroots + numpy.polynomial.hermite.hermfromroots + numpy.polynomial.hermite_e.hermefromroots + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagfromroots, lagval + >>> coef = lagfromroots((-1, 0, 1)) + >>> lagval((-1, 0, 1), coef) + array([0., 0., 0.]) + >>> coef = lagfromroots((-1j, 1j)) + >>> lagval((-1j, 1j), coef) + array([0.+0.j, 0.+0.j]) + + """ + return pu._fromroots(lagline, lagmul, roots) + + +def lagadd(c1, c2): + """ + Add one Laguerre series to another. + + Returns the sum of two Laguerre series `c1` + `c2`. The arguments + are sequences of coefficients ordered from lowest order term to + highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Laguerre series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the Laguerre series of their sum. + + See Also + -------- + lagsub, lagmulx, lagmul, lagdiv, lagpow + + Notes + ----- + Unlike multiplication, division, etc., the sum of two Laguerre series + is a Laguerre series (without having to "reproject" the result onto + the basis set) so addition, just like that of "standard" polynomials, + is simply "component-wise." + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagadd + >>> lagadd([1, 2, 3], [1, 2, 3, 4]) + array([2., 4., 6., 4.]) + + """ + return pu._add(c1, c2) + + +def lagsub(c1, c2): + """ + Subtract one Laguerre series from another. + + Returns the difference of two Laguerre series `c1` - `c2`. The + sequences of coefficients are from lowest order term to highest, i.e., + [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Laguerre series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of Laguerre series coefficients representing their difference. + + See Also + -------- + lagadd, lagmulx, lagmul, lagdiv, lagpow + + Notes + ----- + Unlike multiplication, division, etc., the difference of two Laguerre + series is a Laguerre series (without having to "reproject" the result + onto the basis set) so subtraction, just like that of "standard" + polynomials, is simply "component-wise." + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagsub + >>> lagsub([1, 2, 3, 4], [1, 2, 3]) + array([0., 0., 0., 4.]) + + """ + return pu._sub(c1, c2) + + +def lagmulx(c): + """Multiply a Laguerre series by x. + + Multiply the Laguerre series `c` by x, where x is the independent + variable. + + + Parameters + ---------- + c : array_like + 1-D array of Laguerre series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the result of the multiplication. + + See Also + -------- + lagadd, lagsub, lagmul, lagdiv, lagpow + + Notes + ----- + The multiplication uses the recursion relationship for Laguerre + polynomials in the form + + .. math:: + + xP_i(x) = (-(i + 1)*P_{i + 1}(x) + (2i + 1)P_{i}(x) - iP_{i - 1}(x)) + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagmulx + >>> lagmulx([1, 2, 3]) + array([-1., -1., 11., -9.]) + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + # The zero series needs special treatment + if len(c) == 1 and c[0] == 0: + return c + + prd = np.empty(len(c) + 1, dtype=c.dtype) + prd[0] = c[0] + prd[1] = -c[0] + for i in range(1, len(c)): + prd[i + 1] = -c[i] * (i + 1) + prd[i] += c[i] * (2 * i + 1) + prd[i - 1] -= c[i] * i + return prd + + +def lagmul(c1, c2): + """ + Multiply one Laguerre series by another. + + Returns the product of two Laguerre series `c1` * `c2`. The arguments + are sequences of coefficients, from lowest order "term" to highest, + e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Laguerre series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of Laguerre series coefficients representing their product. + + See Also + -------- + lagadd, lagsub, lagmulx, lagdiv, lagpow + + Notes + ----- + In general, the (polynomial) product of two C-series results in terms + that are not in the Laguerre polynomial basis set. Thus, to express + the product as a Laguerre series, it is necessary to "reproject" the + product onto said basis set, which may produce "unintuitive" (but + correct) results; see Examples section below. + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagmul + >>> lagmul([1, 2, 3], [0, 1, 2]) + array([ 8., -13., 38., -51., 36.]) + + """ + # s1, s2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + + if len(c1) > len(c2): + c = c2 + xs = c1 + else: + c = c1 + xs = c2 + + if len(c) == 1: + c0 = c[0] * xs + c1 = 0 + elif len(c) == 2: + c0 = c[0] * xs + c1 = c[1] * xs + else: + nd = len(c) + c0 = c[-2] * xs + c1 = c[-1] * xs + for i in range(3, len(c) + 1): + tmp = c0 + nd = nd - 1 + c0 = lagsub(c[-i] * xs, (c1 * (nd - 1)) / nd) + c1 = lagadd(tmp, lagsub((2 * nd - 1) * c1, lagmulx(c1)) / nd) + return lagadd(c0, lagsub(c1, lagmulx(c1))) + + +def lagdiv(c1, c2): + """ + Divide one Laguerre series by another. + + Returns the quotient-with-remainder of two Laguerre series + `c1` / `c2`. The arguments are sequences of coefficients from lowest + order "term" to highest, e.g., [1,2,3] represents the series + ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Laguerre series coefficients ordered from low to + high. + + Returns + ------- + [quo, rem] : ndarrays + Of Laguerre series coefficients representing the quotient and + remainder. + + See Also + -------- + lagadd, lagsub, lagmulx, lagmul, lagpow + + Notes + ----- + In general, the (polynomial) division of one Laguerre series by another + results in quotient and remainder terms that are not in the Laguerre + polynomial basis set. Thus, to express these results as a Laguerre + series, it is necessary to "reproject" the results onto the Laguerre + basis set, which may produce "unintuitive" (but correct) results; see + Examples section below. + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagdiv + >>> lagdiv([ 8., -13., 38., -51., 36.], [0, 1, 2]) + (array([1., 2., 3.]), array([0.])) + >>> lagdiv([ 9., -12., 38., -51., 36.], [0, 1, 2]) + (array([1., 2., 3.]), array([1., 1.])) + + """ + return pu._div(lagmul, c1, c2) + + +def lagpow(c, pow, maxpower=16): + """Raise a Laguerre series to a power. + + Returns the Laguerre series `c` raised to the power `pow`. The + argument `c` is a sequence of coefficients ordered from low to high. + i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.`` + + Parameters + ---------- + c : array_like + 1-D array of Laguerre series coefficients ordered from low to + high. + pow : integer + Power to which the series will be raised + maxpower : integer, optional + Maximum power allowed. This is mainly to limit growth of the series + to unmanageable size. Default is 16 + + Returns + ------- + coef : ndarray + Laguerre series of power. + + See Also + -------- + lagadd, lagsub, lagmulx, lagmul, lagdiv + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagpow + >>> lagpow([1, 2, 3], 2) + array([ 14., -16., 56., -72., 54.]) + + """ + return pu._pow(lagmul, c, pow, maxpower) + + +def lagder(c, m=1, scl=1, axis=0): + """ + Differentiate a Laguerre series. + + Returns the Laguerre series coefficients `c` differentiated `m` times + along `axis`. At each iteration the result is multiplied by `scl` (the + scaling factor is for use in a linear change of variable). The argument + `c` is an array of coefficients from low to high degree along each + axis, e.g., [1,2,3] represents the series ``1*L_0 + 2*L_1 + 3*L_2`` + while [[1,2],[1,2]] represents ``1*L_0(x)*L_0(y) + 1*L_1(x)*L_0(y) + + 2*L_0(x)*L_1(y) + 2*L_1(x)*L_1(y)`` if axis=0 is ``x`` and axis=1 is + ``y``. + + Parameters + ---------- + c : array_like + Array of Laguerre series coefficients. If `c` is multidimensional + the different axis correspond to different variables with the + degree in each axis given by the corresponding index. + m : int, optional + Number of derivatives taken, must be non-negative. (Default: 1) + scl : scalar, optional + Each differentiation is multiplied by `scl`. The end result is + multiplication by ``scl**m``. This is for use in a linear change of + variable. (Default: 1) + axis : int, optional + Axis over which the derivative is taken. (Default: 0). + + Returns + ------- + der : ndarray + Laguerre series of the derivative. + + See Also + -------- + lagint + + Notes + ----- + In general, the result of differentiating a Laguerre series does not + resemble the same operation on a power series. Thus the result of this + function may be "unintuitive," albeit correct; see Examples section + below. + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagder + >>> lagder([ 1., 1., 1., -3.]) + array([1., 2., 3.]) + >>> lagder([ 1., 0., 0., -4., 3.], m=2) + array([1., 2., 3.]) + + """ + c = np.array(c, ndmin=1, copy=True) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + + cnt = pu._as_int(m, "the order of derivation") + iaxis = pu._as_int(axis, "the axis") + if cnt < 0: + raise ValueError("The order of derivation must be non-negative") + iaxis = np.lib.array_utils.normalize_axis_index(iaxis, c.ndim) + + if cnt == 0: + return c + + c = np.moveaxis(c, iaxis, 0) + n = len(c) + if cnt >= n: + c = c[:1] * 0 + else: + for i in range(cnt): + n = n - 1 + c *= scl + der = np.empty((n,) + c.shape[1:], dtype=c.dtype) + for j in range(n, 1, -1): + der[j - 1] = -c[j] + c[j - 1] += c[j] + der[0] = -c[1] + c = der + c = np.moveaxis(c, 0, iaxis) + return c + + +def lagint(c, m=1, k=[], lbnd=0, scl=1, axis=0): + """ + Integrate a Laguerre series. + + Returns the Laguerre series coefficients `c` integrated `m` times from + `lbnd` along `axis`. At each iteration the resulting series is + **multiplied** by `scl` and an integration constant, `k`, is added. + The scaling factor is for use in a linear change of variable. ("Buyer + beware": note that, depending on what one is doing, one may want `scl` + to be the reciprocal of what one might expect; for more information, + see the Notes section below.) The argument `c` is an array of + coefficients from low to high degree along each axis, e.g., [1,2,3] + represents the series ``L_0 + 2*L_1 + 3*L_2`` while [[1,2],[1,2]] + represents ``1*L_0(x)*L_0(y) + 1*L_1(x)*L_0(y) + 2*L_0(x)*L_1(y) + + 2*L_1(x)*L_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``. + + + Parameters + ---------- + c : array_like + Array of Laguerre series coefficients. If `c` is multidimensional + the different axis correspond to different variables with the + degree in each axis given by the corresponding index. + m : int, optional + Order of integration, must be positive. (Default: 1) + k : {[], list, scalar}, optional + Integration constant(s). The value of the first integral at + ``lbnd`` is the first value in the list, the value of the second + integral at ``lbnd`` is the second value, etc. If ``k == []`` (the + default), all constants are set to zero. If ``m == 1``, a single + scalar can be given instead of a list. + lbnd : scalar, optional + The lower bound of the integral. (Default: 0) + scl : scalar, optional + Following each integration the result is *multiplied* by `scl` + before the integration constant is added. (Default: 1) + axis : int, optional + Axis over which the integral is taken. (Default: 0). + + Returns + ------- + S : ndarray + Laguerre series coefficients of the integral. + + Raises + ------ + ValueError + If ``m < 0``, ``len(k) > m``, ``np.ndim(lbnd) != 0``, or + ``np.ndim(scl) != 0``. + + See Also + -------- + lagder + + Notes + ----- + Note that the result of each integration is *multiplied* by `scl`. + Why is this important to note? Say one is making a linear change of + variable :math:`u = ax + b` in an integral relative to `x`. Then + :math:`dx = du/a`, so one will need to set `scl` equal to + :math:`1/a` - perhaps not what one would have first thought. + + Also note that, in general, the result of integrating a C-series needs + to be "reprojected" onto the C-series basis set. Thus, typically, + the result of this function is "unintuitive," albeit correct; see + Examples section below. + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagint + >>> lagint([1,2,3]) + array([ 1., 1., 1., -3.]) + >>> lagint([1,2,3], m=2) + array([ 1., 0., 0., -4., 3.]) + >>> lagint([1,2,3], k=1) + array([ 2., 1., 1., -3.]) + >>> lagint([1,2,3], lbnd=-1) + array([11.5, 1. , 1. , -3. ]) + >>> lagint([1,2], m=2, k=[1,2], lbnd=-1) + array([ 11.16666667, -5. , -3. , 2. ]) # may vary + + """ + c = np.array(c, ndmin=1, copy=True) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + if not np.iterable(k): + k = [k] + cnt = pu._as_int(m, "the order of integration") + iaxis = pu._as_int(axis, "the axis") + if cnt < 0: + raise ValueError("The order of integration must be non-negative") + if len(k) > cnt: + raise ValueError("Too many integration constants") + if np.ndim(lbnd) != 0: + raise ValueError("lbnd must be a scalar.") + if np.ndim(scl) != 0: + raise ValueError("scl must be a scalar.") + iaxis = np.lib.array_utils.normalize_axis_index(iaxis, c.ndim) + + if cnt == 0: + return c + + c = np.moveaxis(c, iaxis, 0) + k = list(k) + [0] * (cnt - len(k)) + for i in range(cnt): + n = len(c) + c *= scl + if n == 1 and np.all(c[0] == 0): + c[0] += k[i] + else: + tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype) + tmp[0] = c[0] + tmp[1] = -c[0] + for j in range(1, n): + tmp[j] += c[j] + tmp[j + 1] = -c[j] + tmp[0] += k[i] - lagval(lbnd, tmp) + c = tmp + c = np.moveaxis(c, 0, iaxis) + return c + + +def lagval(x, c, tensor=True): + """ + Evaluate a Laguerre series at points x. + + If `c` is of length ``n + 1``, this function returns the value: + + .. math:: p(x) = c_0 * L_0(x) + c_1 * L_1(x) + ... + c_n * L_n(x) + + The parameter `x` is converted to an array only if it is a tuple or a + list, otherwise it is treated as a scalar. In either case, either `x` + or its elements must support multiplication and addition both with + themselves and with the elements of `c`. + + If `c` is a 1-D array, then ``p(x)`` will have the same shape as `x`. If + `c` is multidimensional, then the shape of the result depends on the + value of `tensor`. If `tensor` is true the shape will be c.shape[1:] + + x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that + scalars have shape (,). + + Trailing zeros in the coefficients will be used in the evaluation, so + they should be avoided if efficiency is a concern. + + Parameters + ---------- + x : array_like, compatible object + If `x` is a list or tuple, it is converted to an ndarray, otherwise + it is left unchanged and treated as a scalar. In either case, `x` + or its elements must support addition and multiplication with + themselves and with the elements of `c`. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree n are contained in c[n]. If `c` is multidimensional the + remaining indices enumerate multiple polynomials. In the two + dimensional case the coefficients may be thought of as stored in + the columns of `c`. + tensor : boolean, optional + If True, the shape of the coefficient array is extended with ones + on the right, one for each dimension of `x`. Scalars have dimension 0 + for this action. The result is that every column of coefficients in + `c` is evaluated for every element of `x`. If False, `x` is broadcast + over the columns of `c` for the evaluation. This keyword is useful + when `c` is multidimensional. The default value is True. + + Returns + ------- + values : ndarray, algebra_like + The shape of the return value is described above. + + See Also + -------- + lagval2d, laggrid2d, lagval3d, laggrid3d + + Notes + ----- + The evaluation uses Clenshaw recursion, aka synthetic division. + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagval + >>> coef = [1, 2, 3] + >>> lagval(1, coef) + -0.5 + >>> lagval([[1, 2],[3, 4]], coef) + array([[-0.5, -4. ], + [-4.5, -2. ]]) + + """ + c = np.array(c, ndmin=1, copy=None) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + if isinstance(x, (tuple, list)): + x = np.asarray(x) + if isinstance(x, np.ndarray) and tensor: + c = c.reshape(c.shape + (1,) * x.ndim) + + if len(c) == 1: + c0 = c[0] + c1 = 0 + elif len(c) == 2: + c0 = c[0] + c1 = c[1] + else: + nd = len(c) + c0 = c[-2] + c1 = c[-1] + for i in range(3, len(c) + 1): + tmp = c0 + nd = nd - 1 + c0 = c[-i] - (c1 * (nd - 1)) / nd + c1 = tmp + (c1 * ((2 * nd - 1) - x)) / nd + return c0 + c1 * (1 - x) + + +def lagval2d(x, y, c): + """ + Evaluate a 2-D Laguerre series at points (x, y). + + This function returns the values: + + .. math:: p(x,y) = \\sum_{i,j} c_{i,j} * L_i(x) * L_j(y) + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars and they + must have the same shape after conversion. In either case, either `x` + and `y` or their elements must support multiplication and addition both + with themselves and with the elements of `c`. + + If `c` is a 1-D array a one is implicitly appended to its shape to make + it 2-D. The shape of the result will be c.shape[2:] + x.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points ``(x, y)``, + where `x` and `y` must have the same shape. If `x` or `y` is a list + or tuple, it is first converted to an ndarray, otherwise it is left + unchanged and if it isn't an ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term + of multi-degree i,j is contained in ``c[i,j]``. If `c` has + dimension greater than two the remaining indices enumerate multiple + sets of coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points formed with + pairs of corresponding values from `x` and `y`. + + See Also + -------- + lagval, laggrid2d, lagval3d, laggrid3d + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagval2d + >>> c = [[1, 2],[3, 4]] + >>> lagval2d(1, 1, c) + 1.0 + """ + return pu._valnd(lagval, c, x, y) + + +def laggrid2d(x, y, c): + """ + Evaluate a 2-D Laguerre series on the Cartesian product of x and y. + + This function returns the values: + + .. math:: p(a,b) = \\sum_{i,j} c_{i,j} * L_i(a) * L_j(b) + + where the points ``(a, b)`` consist of all pairs formed by taking + `a` from `x` and `b` from `y`. The resulting points form a grid with + `x` in the first dimension and `y` in the second. + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars. In either + case, either `x` and `y` or their elements must support multiplication + and addition both with themselves and with the elements of `c`. + + If `c` has fewer than two dimensions, ones are implicitly appended to + its shape to make it 2-D. The shape of the result will be c.shape[2:] + + x.shape + y.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points in the + Cartesian product of `x` and `y`. If `x` or `y` is a list or + tuple, it is first converted to an ndarray, otherwise it is left + unchanged and, if it isn't an ndarray, it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term of + multi-degree i,j is contained in ``c[i,j]``. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional Chebyshev series at points in the + Cartesian product of `x` and `y`. + + See Also + -------- + lagval, lagval2d, lagval3d, laggrid3d + + Examples + -------- + >>> from numpy.polynomial.laguerre import laggrid2d + >>> c = [[1, 2], [3, 4]] + >>> laggrid2d([0, 1], [0, 1], c) + array([[10., 4.], + [ 3., 1.]]) + + """ + return pu._gridnd(lagval, c, x, y) + + +def lagval3d(x, y, z, c): + """ + Evaluate a 3-D Laguerre series at points (x, y, z). + + This function returns the values: + + .. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * L_i(x) * L_j(y) * L_k(z) + + The parameters `x`, `y`, and `z` are converted to arrays only if + they are tuples or a lists, otherwise they are treated as a scalars and + they must have the same shape after conversion. In either case, either + `x`, `y`, and `z` or their elements must support multiplication and + addition both with themselves and with the elements of `c`. + + If `c` has fewer than 3 dimensions, ones are implicitly appended to its + shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape. + + Parameters + ---------- + x, y, z : array_like, compatible object + The three dimensional series is evaluated at the points + ``(x, y, z)``, where `x`, `y`, and `z` must have the same shape. If + any of `x`, `y`, or `z` is a list or tuple, it is first converted + to an ndarray, otherwise it is left unchanged and if it isn't an + ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term of + multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension + greater than 3 the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the multidimensional polynomial on points formed with + triples of corresponding values from `x`, `y`, and `z`. + + See Also + -------- + lagval, lagval2d, laggrid2d, laggrid3d + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagval3d + >>> c = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]] + >>> lagval3d(1, 1, 2, c) + -1.0 + + """ + return pu._valnd(lagval, c, x, y, z) + + +def laggrid3d(x, y, z, c): + """ + Evaluate a 3-D Laguerre series on the Cartesian product of x, y, and z. + + This function returns the values: + + .. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * L_i(a) * L_j(b) * L_k(c) + + where the points ``(a, b, c)`` consist of all triples formed by taking + `a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form + a grid with `x` in the first dimension, `y` in the second, and `z` in + the third. + + The parameters `x`, `y`, and `z` are converted to arrays only if they + are tuples or a lists, otherwise they are treated as a scalars. In + either case, either `x`, `y`, and `z` or their elements must support + multiplication and addition both with themselves and with the elements + of `c`. + + If `c` has fewer than three dimensions, ones are implicitly appended to + its shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape + y.shape + z.shape. + + Parameters + ---------- + x, y, z : array_like, compatible objects + The three dimensional series is evaluated at the points in the + Cartesian product of `x`, `y`, and `z`. If `x`, `y`, or `z` is a + list or tuple, it is first converted to an ndarray, otherwise it is + left unchanged and, if it isn't an ndarray, it is treated as a + scalar. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree i,j are contained in ``c[i,j]``. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points in the Cartesian + product of `x` and `y`. + + See Also + -------- + lagval, lagval2d, laggrid2d, lagval3d + + Examples + -------- + >>> from numpy.polynomial.laguerre import laggrid3d + >>> c = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]] + >>> laggrid3d([0, 1], [0, 1], [2, 4], c) + array([[[ -4., -44.], + [ -2., -18.]], + [[ -2., -14.], + [ -1., -5.]]]) + + """ + return pu._gridnd(lagval, c, x, y, z) + + +def lagvander(x, deg): + """Pseudo-Vandermonde matrix of given degree. + + Returns the pseudo-Vandermonde matrix of degree `deg` and sample points + `x`. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., i] = L_i(x) + + where ``0 <= i <= deg``. The leading indices of `V` index the elements of + `x` and the last index is the degree of the Laguerre polynomial. + + If `c` is a 1-D array of coefficients of length ``n + 1`` and `V` is the + array ``V = lagvander(x, n)``, then ``np.dot(V, c)`` and + ``lagval(x, c)`` are the same up to roundoff. This equivalence is + useful both for least squares fitting and for the evaluation of a large + number of Laguerre series of the same degree and sample points. + + Parameters + ---------- + x : array_like + Array of points. The dtype is converted to float64 or complex128 + depending on whether any of the elements are complex. If `x` is + scalar it is converted to a 1-D array. + deg : int + Degree of the resulting matrix. + + Returns + ------- + vander : ndarray + The pseudo-Vandermonde matrix. The shape of the returned matrix is + ``x.shape + (deg + 1,)``, where The last index is the degree of the + corresponding Laguerre polynomial. The dtype will be the same as + the converted `x`. + + Examples + -------- + >>> import numpy as np + >>> from numpy.polynomial.laguerre import lagvander + >>> x = np.array([0, 1, 2]) + >>> lagvander(x, 3) + array([[ 1. , 1. , 1. , 1. ], + [ 1. , 0. , -0.5 , -0.66666667], + [ 1. , -1. , -1. , -0.33333333]]) + + """ + ideg = pu._as_int(deg, "deg") + if ideg < 0: + raise ValueError("deg must be non-negative") + + x = np.array(x, copy=None, ndmin=1) + 0.0 + dims = (ideg + 1,) + x.shape + dtyp = x.dtype + v = np.empty(dims, dtype=dtyp) + v[0] = x * 0 + 1 + if ideg > 0: + v[1] = 1 - x + for i in range(2, ideg + 1): + v[i] = (v[i - 1] * (2 * i - 1 - x) - v[i - 2] * (i - 1)) / i + return np.moveaxis(v, 0, -1) + + +def lagvander2d(x, y, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points ``(x, y)``. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (deg[1] + 1)*i + j] = L_i(x) * L_j(y), + + where ``0 <= i <= deg[0]`` and ``0 <= j <= deg[1]``. The leading indices of + `V` index the points ``(x, y)`` and the last index encodes the degrees of + the Laguerre polynomials. + + If ``V = lagvander2d(x, y, [xdeg, ydeg])``, then the columns of `V` + correspond to the elements of a 2-D coefficient array `c` of shape + (xdeg + 1, ydeg + 1) in the order + + .. math:: c_{00}, c_{01}, c_{02}, ... , c_{10}, c_{11}, c_{12}, ... + + and ``np.dot(V, c.flat)`` and ``lagval2d(x, y, c)`` will be the same + up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 2-D Laguerre + series of the same degrees and sample points. + + Parameters + ---------- + x, y : array_like + Arrays of point coordinates, all of the same shape. The dtypes + will be converted to either float64 or complex128 depending on + whether any of the elements are complex. Scalars are converted to + 1-D arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg]. + + Returns + ------- + vander2d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg[1]+1)`. The dtype will be the same + as the converted `x` and `y`. + + See Also + -------- + lagvander, lagvander3d, lagval2d, lagval3d + + Examples + -------- + >>> import numpy as np + >>> from numpy.polynomial.laguerre import lagvander2d + >>> x = np.array([0]) + >>> y = np.array([2]) + >>> lagvander2d(x, y, [2, 1]) + array([[ 1., -1., 1., -1., 1., -1.]]) + + """ + return pu._vander_nd_flat((lagvander, lagvander), (x, y), deg) + + +def lagvander3d(x, y, z, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points ``(x, y, z)``. If `l`, `m`, `n` are the given degrees in `x`, `y`, `z`, + then The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = L_i(x)*L_j(y)*L_k(z), + + where ``0 <= i <= l``, ``0 <= j <= m``, and ``0 <= j <= n``. The leading + indices of `V` index the points ``(x, y, z)`` and the last index encodes + the degrees of the Laguerre polynomials. + + If ``V = lagvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns + of `V` correspond to the elements of a 3-D coefficient array `c` of + shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order + + .. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},... + + and ``np.dot(V, c.flat)`` and ``lagval3d(x, y, z, c)`` will be the + same up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 3-D Laguerre + series of the same degrees and sample points. + + Parameters + ---------- + x, y, z : array_like + Arrays of point coordinates, all of the same shape. The dtypes will + be converted to either float64 or complex128 depending on whether + any of the elements are complex. Scalars are converted to 1-D + arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg, z_deg]. + + Returns + ------- + vander3d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg[1]+1)*(deg[2]+1)`. The dtype will + be the same as the converted `x`, `y`, and `z`. + + See Also + -------- + lagvander, lagvander3d, lagval2d, lagval3d + + Examples + -------- + >>> import numpy as np + >>> from numpy.polynomial.laguerre import lagvander3d + >>> x = np.array([0]) + >>> y = np.array([2]) + >>> z = np.array([0]) + >>> lagvander3d(x, y, z, [2, 1, 3]) + array([[ 1., 1., 1., 1., -1., -1., -1., -1., 1., 1., 1., 1., -1., + -1., -1., -1., 1., 1., 1., 1., -1., -1., -1., -1.]]) + + """ + return pu._vander_nd_flat((lagvander, lagvander, lagvander), (x, y, z), deg) + + +def lagfit(x, y, deg, rcond=None, full=False, w=None): + """ + Least squares fit of Laguerre series to data. + + Return the coefficients of a Laguerre series of degree `deg` that is the + least squares fit to the data values `y` given at points `x`. If `y` is + 1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple + fits are done, one for each column of `y`, and the resulting + coefficients are stored in the corresponding columns of a 2-D return. + The fitted polynomial(s) are in the form + + .. math:: p(x) = c_0 + c_1 * L_1(x) + ... + c_n * L_n(x), + + where ``n`` is `deg`. + + Parameters + ---------- + x : array_like, shape (M,) + x-coordinates of the M sample points ``(x[i], y[i])``. + y : array_like, shape (M,) or (M, K) + y-coordinates of the sample points. Several data sets of sample + points sharing the same x-coordinates can be fitted at once by + passing in a 2D-array that contains one dataset per column. + deg : int or 1-D array_like + Degree(s) of the fitting polynomials. If `deg` is a single integer + all terms up to and including the `deg`'th term are included in the + fit. For NumPy versions >= 1.11.0 a list of integers specifying the + degrees of the terms to include may be used instead. + rcond : float, optional + Relative condition number of the fit. Singular values smaller than + this relative to the largest singular value will be ignored. The + default value is len(x)*eps, where eps is the relative precision of + the float type, about 2e-16 in most cases. + full : bool, optional + Switch determining nature of return value. When it is False (the + default) just the coefficients are returned, when True diagnostic + information from the singular value decomposition is also returned. + w : array_like, shape (`M`,), optional + Weights. If not None, the weight ``w[i]`` applies to the unsquared + residual ``y[i] - y_hat[i]`` at ``x[i]``. Ideally the weights are + chosen so that the errors of the products ``w[i]*y[i]`` all have the + same variance. When using inverse-variance weighting, use + ``w[i] = 1/sigma(y[i])``. The default value is None. + + Returns + ------- + coef : ndarray, shape (M,) or (M, K) + Laguerre coefficients ordered from low to high. If `y` was 2-D, + the coefficients for the data in column *k* of `y` are in column + *k*. + + [residuals, rank, singular_values, rcond] : list + These values are only returned if ``full == True`` + + - residuals -- sum of squared residuals of the least squares fit + - rank -- the numerical rank of the scaled Vandermonde matrix + - singular_values -- singular values of the scaled Vandermonde matrix + - rcond -- value of `rcond`. + + For more details, see `numpy.linalg.lstsq`. + + Warns + ----- + RankWarning + The rank of the coefficient matrix in the least-squares fit is + deficient. The warning is only raised if ``full == False``. The + warnings can be turned off by + + >>> import warnings + >>> warnings.simplefilter('ignore', np.exceptions.RankWarning) + + See Also + -------- + numpy.polynomial.polynomial.polyfit + numpy.polynomial.legendre.legfit + numpy.polynomial.chebyshev.chebfit + numpy.polynomial.hermite.hermfit + numpy.polynomial.hermite_e.hermefit + lagval : Evaluates a Laguerre series. + lagvander : pseudo Vandermonde matrix of Laguerre series. + lagweight : Laguerre weight function. + numpy.linalg.lstsq : Computes a least-squares fit from the matrix. + scipy.interpolate.UnivariateSpline : Computes spline fits. + + Notes + ----- + The solution is the coefficients of the Laguerre series ``p`` that + minimizes the sum of the weighted squared errors + + .. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2, + + where the :math:`w_j` are the weights. This problem is solved by + setting up as the (typically) overdetermined matrix equation + + .. math:: V(x) * c = w * y, + + where ``V`` is the weighted pseudo Vandermonde matrix of `x`, ``c`` are the + coefficients to be solved for, `w` are the weights, and `y` are the + observed values. This equation is then solved using the singular value + decomposition of ``V``. + + If some of the singular values of `V` are so small that they are + neglected, then a `~exceptions.RankWarning` will be issued. This means that + the coefficient values may be poorly determined. Using a lower order fit + will usually get rid of the warning. The `rcond` parameter can also be + set to a value smaller than its default, but the resulting fit may be + spurious and have large contributions from roundoff error. + + Fits using Laguerre series are probably most useful when the data can + be approximated by ``sqrt(w(x)) * p(x)``, where ``w(x)`` is the Laguerre + weight. In that case the weight ``sqrt(w(x[i]))`` should be used + together with data values ``y[i]/sqrt(w(x[i]))``. The weight function is + available as `lagweight`. + + References + ---------- + .. [1] Wikipedia, "Curve fitting", + https://en.wikipedia.org/wiki/Curve_fitting + + Examples + -------- + >>> import numpy as np + >>> from numpy.polynomial.laguerre import lagfit, lagval + >>> x = np.linspace(0, 10) + >>> rng = np.random.default_rng() + >>> err = rng.normal(scale=1./10, size=len(x)) + >>> y = lagval(x, [1, 2, 3]) + err + >>> lagfit(x, y, 2) + array([1.00578369, 1.99417356, 2.99827656]) # may vary + + """ + return pu._fit(lagvander, x, y, deg, rcond, full, w) + + +def lagcompanion(c): + """ + Return the companion matrix of c. + + The usual companion matrix of the Laguerre polynomials is already + symmetric when `c` is a basis Laguerre polynomial, so no scaling is + applied. + + Parameters + ---------- + c : array_like + 1-D array of Laguerre series coefficients ordered from low to high + degree. + + Returns + ------- + mat : ndarray + Companion matrix of dimensions (deg, deg). + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagcompanion + >>> lagcompanion([1, 2, 3]) + array([[ 1. , -0.33333333], + [-1. , 4.33333333]]) + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) < 2: + raise ValueError('Series must have maximum degree of at least 1.') + if len(c) == 2: + return np.array([[1 + c[0] / c[1]]]) + + n = len(c) - 1 + mat = np.zeros((n, n), dtype=c.dtype) + top = mat.reshape(-1)[1::n + 1] + mid = mat.reshape(-1)[0::n + 1] + bot = mat.reshape(-1)[n::n + 1] + top[...] = -np.arange(1, n) + mid[...] = 2. * np.arange(n) + 1. + bot[...] = top + mat[:, -1] += (c[:-1] / c[-1]) * n + return mat + + +def lagroots(c): + """ + Compute the roots of a Laguerre series. + + Return the roots (a.k.a. "zeros") of the polynomial + + .. math:: p(x) = \\sum_i c[i] * L_i(x). + + Parameters + ---------- + c : 1-D array_like + 1-D array of coefficients. + + Returns + ------- + out : ndarray + Array of the roots of the series. If all the roots are real, + then `out` is also real, otherwise it is complex. + + See Also + -------- + numpy.polynomial.polynomial.polyroots + numpy.polynomial.legendre.legroots + numpy.polynomial.chebyshev.chebroots + numpy.polynomial.hermite.hermroots + numpy.polynomial.hermite_e.hermeroots + + Notes + ----- + The root estimates are obtained as the eigenvalues of the companion + matrix, Roots far from the origin of the complex plane may have large + errors due to the numerical instability of the series for such + values. Roots with multiplicity greater than 1 will also show larger + errors as the value of the series near such points is relatively + insensitive to errors in the roots. Isolated roots near the origin can + be improved by a few iterations of Newton's method. + + The Laguerre series basis polynomials aren't powers of `x` so the + results of this function may seem unintuitive. + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagroots, lagfromroots + >>> coef = lagfromroots([0, 1, 2]) + >>> coef + array([ 2., -8., 12., -6.]) + >>> lagroots(coef) + array([-4.4408921e-16, 1.0000000e+00, 2.0000000e+00]) + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) <= 1: + return np.array([], dtype=c.dtype) + if len(c) == 2: + return np.array([1 + c[0] / c[1]]) + + # rotated companion matrix reduces error + m = lagcompanion(c)[::-1, ::-1] + r = np.linalg.eigvals(m) + r.sort() + return r + + +def laggauss(deg): + """ + Gauss-Laguerre quadrature. + + Computes the sample points and weights for Gauss-Laguerre quadrature. + These sample points and weights will correctly integrate polynomials of + degree :math:`2*deg - 1` or less over the interval :math:`[0, \\inf]` + with the weight function :math:`f(x) = \\exp(-x)`. + + Parameters + ---------- + deg : int + Number of sample points and weights. It must be >= 1. + + Returns + ------- + x : ndarray + 1-D ndarray containing the sample points. + y : ndarray + 1-D ndarray containing the weights. + + Notes + ----- + The results have only been tested up to degree 100 higher degrees may + be problematic. The weights are determined by using the fact that + + .. math:: w_k = c / (L'_n(x_k) * L_{n-1}(x_k)) + + where :math:`c` is a constant independent of :math:`k` and :math:`x_k` + is the k'th root of :math:`L_n`, and then scaling the results to get + the right value when integrating 1. + + Examples + -------- + >>> from numpy.polynomial.laguerre import laggauss + >>> laggauss(2) + (array([0.58578644, 3.41421356]), array([0.85355339, 0.14644661])) + + """ + ideg = pu._as_int(deg, "deg") + if ideg <= 0: + raise ValueError("deg must be a positive integer") + + # first approximation of roots. We use the fact that the companion + # matrix is symmetric in this case in order to obtain better zeros. + c = np.array([0] * deg + [1]) + m = lagcompanion(c) + x = np.linalg.eigvalsh(m) + + # improve roots by one application of Newton + dy = lagval(x, c) + df = lagval(x, lagder(c)) + x -= dy / df + + # compute the weights. We scale the factor to avoid possible numerical + # overflow. + fm = lagval(x, c[1:]) + fm /= np.abs(fm).max() + df /= np.abs(df).max() + w = 1 / (fm * df) + + # scale w to get the right value, 1 in this case + w /= w.sum() + + return x, w + + +def lagweight(x): + """Weight function of the Laguerre polynomials. + + The weight function is :math:`exp(-x)` and the interval of integration + is :math:`[0, \\inf]`. The Laguerre polynomials are orthogonal, but not + normalized, with respect to this weight function. + + Parameters + ---------- + x : array_like + Values at which the weight function will be computed. + + Returns + ------- + w : ndarray + The weight function at `x`. + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagweight + >>> x = np.array([0, 1, 2]) + >>> lagweight(x) + array([1. , 0.36787944, 0.13533528]) + + """ + w = np.exp(-x) + return w + +# +# Laguerre series class +# + +class Laguerre(ABCPolyBase): + """A Laguerre series class. + + The Laguerre class provides the standard Python numerical methods + '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the + attributes and methods listed below. + + Parameters + ---------- + coef : array_like + Laguerre coefficients in order of increasing degree, i.e, + ``(1, 2, 3)`` gives ``1*L_0(x) + 2*L_1(X) + 3*L_2(x)``. + domain : (2,) array_like, optional + Domain to use. The interval ``[domain[0], domain[1]]`` is mapped + to the interval ``[window[0], window[1]]`` by shifting and scaling. + The default value is [0., 1.]. + window : (2,) array_like, optional + Window, see `domain` for its use. The default value is [0., 1.]. + symbol : str, optional + Symbol used to represent the independent variable in string + representations of the polynomial expression, e.g. for printing. + The symbol must be a valid Python identifier. Default value is 'x'. + + .. versionadded:: 1.24 + + """ + # Virtual Functions + _add = staticmethod(lagadd) + _sub = staticmethod(lagsub) + _mul = staticmethod(lagmul) + _div = staticmethod(lagdiv) + _pow = staticmethod(lagpow) + _val = staticmethod(lagval) + _int = staticmethod(lagint) + _der = staticmethod(lagder) + _fit = staticmethod(lagfit) + _line = staticmethod(lagline) + _roots = staticmethod(lagroots) + _fromroots = staticmethod(lagfromroots) + + # Virtual properties + domain = np.array(lagdomain) + window = np.array(lagdomain) + basis_name = 'L' diff --git a/local_packages/numpy/polynomial/laguerre.pyi b/local_packages/numpy/polynomial/laguerre.pyi new file mode 100644 index 0000000..8b70b89 --- /dev/null +++ b/local_packages/numpy/polynomial/laguerre.pyi @@ -0,0 +1,100 @@ +from typing import Any, ClassVar, Final, Literal as L + +import numpy as np + +from ._polybase import ABCPolyBase +from ._polytypes import ( + _Array1, + _Array2, + _FuncBinOp, + _FuncCompanion, + _FuncDer, + _FuncFit, + _FuncFromRoots, + _FuncGauss, + _FuncInteg, + _FuncLine, + _FuncPoly2Ortho, + _FuncPow, + _FuncRoots, + _FuncUnOp, + _FuncVal, + _FuncVal2D, + _FuncVal3D, + _FuncVander, + _FuncVander2D, + _FuncVander3D, + _FuncWeight, +) +from .polyutils import trimcoef as lagtrim + +__all__ = [ + "lagzero", + "lagone", + "lagx", + "lagdomain", + "lagline", + "lagadd", + "lagsub", + "lagmulx", + "lagmul", + "lagdiv", + "lagpow", + "lagval", + "lagder", + "lagint", + "lag2poly", + "poly2lag", + "lagfromroots", + "lagvander", + "lagfit", + "lagtrim", + "lagroots", + "Laguerre", + "lagval2d", + "lagval3d", + "laggrid2d", + "laggrid3d", + "lagvander2d", + "lagvander3d", + "lagcompanion", + "laggauss", + "lagweight", +] + +poly2lag: Final[_FuncPoly2Ortho] = ... +lag2poly: Final[_FuncUnOp] = ... + +lagdomain: Final[_Array2[np.float64]] = ... +lagzero: Final[_Array1[np.int_]] = ... +lagone: Final[_Array1[np.int_]] = ... +lagx: Final[_Array2[np.int_]] = ... + +lagline: Final[_FuncLine] = ... +lagfromroots: Final[_FuncFromRoots] = ... +lagadd: Final[_FuncBinOp] = ... +lagsub: Final[_FuncBinOp] = ... +lagmulx: Final[_FuncUnOp] = ... +lagmul: Final[_FuncBinOp] = ... +lagdiv: Final[_FuncBinOp] = ... +lagpow: Final[_FuncPow] = ... +lagder: Final[_FuncDer] = ... +lagint: Final[_FuncInteg] = ... +lagval: Final[_FuncVal] = ... +lagval2d: Final[_FuncVal2D] = ... +lagval3d: Final[_FuncVal3D] = ... +laggrid2d: Final[_FuncVal2D] = ... +laggrid3d: Final[_FuncVal3D] = ... +lagvander: Final[_FuncVander] = ... +lagvander2d: Final[_FuncVander2D] = ... +lagvander3d: Final[_FuncVander3D] = ... +lagfit: Final[_FuncFit] = ... +lagcompanion: Final[_FuncCompanion] = ... +lagroots: Final[_FuncRoots] = ... +laggauss: Final[_FuncGauss] = ... +lagweight: Final[_FuncWeight] = ... + +class Laguerre(ABCPolyBase[L["L"]]): + basis_name: ClassVar[L["L"]] = "L" # pyright: ignore[reportIncompatibleMethodOverride] + domain: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] + window: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] diff --git a/local_packages/numpy/polynomial/legendre.py b/local_packages/numpy/polynomial/legendre.py new file mode 100644 index 0000000..237e340 --- /dev/null +++ b/local_packages/numpy/polynomial/legendre.py @@ -0,0 +1,1603 @@ +""" +================================================== +Legendre Series (:mod:`numpy.polynomial.legendre`) +================================================== + +This module provides a number of objects (mostly functions) useful for +dealing with Legendre series, including a `Legendre` class that +encapsulates the usual arithmetic operations. (General information +on how this module represents and works with such polynomials is in the +docstring for its "parent" sub-package, `numpy.polynomial`). + +Classes +------- +.. autosummary:: + :toctree: generated/ + + Legendre + +Constants +--------- + +.. autosummary:: + :toctree: generated/ + + legdomain + legzero + legone + legx + +Arithmetic +---------- + +.. autosummary:: + :toctree: generated/ + + legadd + legsub + legmulx + legmul + legdiv + legpow + legval + legval2d + legval3d + leggrid2d + leggrid3d + +Calculus +-------- + +.. autosummary:: + :toctree: generated/ + + legder + legint + +Misc Functions +-------------- + +.. autosummary:: + :toctree: generated/ + + legfromroots + legroots + legvander + legvander2d + legvander3d + leggauss + legweight + legcompanion + legfit + legtrim + legline + leg2poly + poly2leg + +See also +-------- +numpy.polynomial + +""" +import numpy as np + +from . import polyutils as pu +from ._polybase import ABCPolyBase + +__all__ = [ + 'legzero', 'legone', 'legx', 'legdomain', 'legline', 'legadd', + 'legsub', 'legmulx', 'legmul', 'legdiv', 'legpow', 'legval', 'legder', + 'legint', 'leg2poly', 'poly2leg', 'legfromroots', 'legvander', + 'legfit', 'legtrim', 'legroots', 'Legendre', 'legval2d', 'legval3d', + 'leggrid2d', 'leggrid3d', 'legvander2d', 'legvander3d', 'legcompanion', + 'leggauss', 'legweight'] + +legtrim = pu.trimcoef + + +def poly2leg(pol): + """ + Convert a polynomial to a Legendre series. + + Convert an array representing the coefficients of a polynomial (relative + to the "standard" basis) ordered from lowest degree to highest, to an + array of the coefficients of the equivalent Legendre series, ordered + from lowest to highest degree. + + Parameters + ---------- + pol : array_like + 1-D array containing the polynomial coefficients + + Returns + ------- + c : ndarray + 1-D array containing the coefficients of the equivalent Legendre + series. + + See Also + -------- + leg2poly + + Notes + ----- + The easy way to do conversions between polynomial basis sets + is to use the convert method of a class instance. + + Examples + -------- + >>> import numpy as np + >>> from numpy import polynomial as P + >>> p = P.Polynomial(np.arange(4)) + >>> p + Polynomial([0., 1., 2., 3.], domain=[-1., 1.], window=[-1., 1.], ... + >>> c = P.Legendre(P.legendre.poly2leg(p.coef)) + >>> c + Legendre([ 1. , 3.25, 1. , 0.75], domain=[-1, 1], window=[-1, 1]) # may vary + + """ + [pol] = pu.as_series([pol]) + deg = len(pol) - 1 + res = 0 + for i in range(deg, -1, -1): + res = legadd(legmulx(res), pol[i]) + return res + + +def leg2poly(c): + """ + Convert a Legendre series to a polynomial. + + Convert an array representing the coefficients of a Legendre series, + ordered from lowest degree to highest, to an array of the coefficients + of the equivalent polynomial (relative to the "standard" basis) ordered + from lowest to highest degree. + + Parameters + ---------- + c : array_like + 1-D array containing the Legendre series coefficients, ordered + from lowest order term to highest. + + Returns + ------- + pol : ndarray + 1-D array containing the coefficients of the equivalent polynomial + (relative to the "standard" basis) ordered from lowest order term + to highest. + + See Also + -------- + poly2leg + + Notes + ----- + The easy way to do conversions between polynomial basis sets + is to use the convert method of a class instance. + + Examples + -------- + >>> from numpy import polynomial as P + >>> c = P.Legendre(range(4)) + >>> c + Legendre([0., 1., 2., 3.], domain=[-1., 1.], window=[-1., 1.], symbol='x') + >>> p = c.convert(kind=P.Polynomial) + >>> p + Polynomial([-1. , -3.5, 3. , 7.5], domain=[-1., 1.], window=[-1., ... + >>> P.legendre.leg2poly(range(4)) + array([-1. , -3.5, 3. , 7.5]) + + + """ + from .polynomial import polyadd, polymulx, polysub + + [c] = pu.as_series([c]) + n = len(c) + if n < 3: + return c + else: + c0 = c[-2] + c1 = c[-1] + # i is the current degree of c1 + for i in range(n - 1, 1, -1): + tmp = c0 + c0 = polysub(c[i - 2], (c1 * (i - 1)) / i) + c1 = polyadd(tmp, (polymulx(c1) * (2 * i - 1)) / i) + return polyadd(c0, polymulx(c1)) + + +# +# These are constant arrays are of integer type so as to be compatible +# with the widest range of other types, such as Decimal. +# + +# Legendre +legdomain = np.array([-1., 1.]) + +# Legendre coefficients representing zero. +legzero = np.array([0]) + +# Legendre coefficients representing one. +legone = np.array([1]) + +# Legendre coefficients representing the identity x. +legx = np.array([0, 1]) + + +def legline(off, scl): + """ + Legendre series whose graph is a straight line. + + + + Parameters + ---------- + off, scl : scalars + The specified line is given by ``off + scl*x``. + + Returns + ------- + y : ndarray + This module's representation of the Legendre series for + ``off + scl*x``. + + See Also + -------- + numpy.polynomial.polynomial.polyline + numpy.polynomial.chebyshev.chebline + numpy.polynomial.laguerre.lagline + numpy.polynomial.hermite.hermline + numpy.polynomial.hermite_e.hermeline + + Examples + -------- + >>> import numpy.polynomial.legendre as L + >>> L.legline(3,2) + array([3, 2]) + >>> L.legval(-3, L.legline(3,2)) # should be -3 + -3.0 + + """ + if scl != 0: + return np.array([off, scl]) + else: + return np.array([off]) + + +def legfromroots(roots): + """ + Generate a Legendre series with given roots. + + The function returns the coefficients of the polynomial + + .. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n), + + in Legendre form, where the :math:`r_n` are the roots specified in `roots`. + If a zero has multiplicity n, then it must appear in `roots` n times. + For instance, if 2 is a root of multiplicity three and 3 is a root of + multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The + roots can appear in any order. + + If the returned coefficients are `c`, then + + .. math:: p(x) = c_0 + c_1 * L_1(x) + ... + c_n * L_n(x) + + The coefficient of the last term is not generally 1 for monic + polynomials in Legendre form. + + Parameters + ---------- + roots : array_like + Sequence containing the roots. + + Returns + ------- + out : ndarray + 1-D array of coefficients. If all roots are real then `out` is a + real array, if some of the roots are complex, then `out` is complex + even if all the coefficients in the result are real (see Examples + below). + + See Also + -------- + numpy.polynomial.polynomial.polyfromroots + numpy.polynomial.chebyshev.chebfromroots + numpy.polynomial.laguerre.lagfromroots + numpy.polynomial.hermite.hermfromroots + numpy.polynomial.hermite_e.hermefromroots + + Examples + -------- + >>> import numpy.polynomial.legendre as L + >>> L.legfromroots((-1,0,1)) # x^3 - x relative to the standard basis + array([ 0. , -0.4, 0. , 0.4]) + >>> j = complex(0,1) + >>> L.legfromroots((-j,j)) # x^2 + 1 relative to the standard basis + array([ 1.33333333+0.j, 0.00000000+0.j, 0.66666667+0.j]) # may vary + + """ + return pu._fromroots(legline, legmul, roots) + + +def legadd(c1, c2): + """ + Add one Legendre series to another. + + Returns the sum of two Legendre series `c1` + `c2`. The arguments + are sequences of coefficients ordered from lowest order term to + highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Legendre series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the Legendre series of their sum. + + See Also + -------- + legsub, legmulx, legmul, legdiv, legpow + + Notes + ----- + Unlike multiplication, division, etc., the sum of two Legendre series + is a Legendre series (without having to "reproject" the result onto + the basis set) so addition, just like that of "standard" polynomials, + is simply "component-wise." + + Examples + -------- + >>> from numpy.polynomial import legendre as L + >>> c1 = (1,2,3) + >>> c2 = (3,2,1) + >>> L.legadd(c1,c2) + array([4., 4., 4.]) + + """ + return pu._add(c1, c2) + + +def legsub(c1, c2): + """ + Subtract one Legendre series from another. + + Returns the difference of two Legendre series `c1` - `c2`. The + sequences of coefficients are from lowest order term to highest, i.e., + [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Legendre series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of Legendre series coefficients representing their difference. + + See Also + -------- + legadd, legmulx, legmul, legdiv, legpow + + Notes + ----- + Unlike multiplication, division, etc., the difference of two Legendre + series is a Legendre series (without having to "reproject" the result + onto the basis set) so subtraction, just like that of "standard" + polynomials, is simply "component-wise." + + Examples + -------- + >>> from numpy.polynomial import legendre as L + >>> c1 = (1,2,3) + >>> c2 = (3,2,1) + >>> L.legsub(c1,c2) + array([-2., 0., 2.]) + >>> L.legsub(c2,c1) # -C.legsub(c1,c2) + array([ 2., 0., -2.]) + + """ + return pu._sub(c1, c2) + + +def legmulx(c): + """Multiply a Legendre series by x. + + Multiply the Legendre series `c` by x, where x is the independent + variable. + + + Parameters + ---------- + c : array_like + 1-D array of Legendre series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the result of the multiplication. + + See Also + -------- + legadd, legsub, legmul, legdiv, legpow + + Notes + ----- + The multiplication uses the recursion relationship for Legendre + polynomials in the form + + .. math:: + + xP_i(x) = ((i + 1)*P_{i + 1}(x) + i*P_{i - 1}(x))/(2i + 1) + + Examples + -------- + >>> from numpy.polynomial import legendre as L + >>> L.legmulx([1,2,3]) + array([ 0.66666667, 2.2, 1.33333333, 1.8]) # may vary + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + # The zero series needs special treatment + if len(c) == 1 and c[0] == 0: + return c + + prd = np.empty(len(c) + 1, dtype=c.dtype) + prd[0] = c[0] * 0 + prd[1] = c[0] + for i in range(1, len(c)): + j = i + 1 + k = i - 1 + s = i + j + prd[j] = (c[i] * j) / s + prd[k] += (c[i] * i) / s + return prd + + +def legmul(c1, c2): + """ + Multiply one Legendre series by another. + + Returns the product of two Legendre series `c1` * `c2`. The arguments + are sequences of coefficients, from lowest order "term" to highest, + e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Legendre series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of Legendre series coefficients representing their product. + + See Also + -------- + legadd, legsub, legmulx, legdiv, legpow + + Notes + ----- + In general, the (polynomial) product of two C-series results in terms + that are not in the Legendre polynomial basis set. Thus, to express + the product as a Legendre series, it is necessary to "reproject" the + product onto said basis set, which may produce "unintuitive" (but + correct) results; see Examples section below. + + Examples + -------- + >>> from numpy.polynomial import legendre as L + >>> c1 = (1,2,3) + >>> c2 = (3,2) + >>> L.legmul(c1,c2) # multiplication requires "reprojection" + array([ 4.33333333, 10.4 , 11.66666667, 3.6 ]) # may vary + + """ + # s1, s2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + + if len(c1) > len(c2): + c = c2 + xs = c1 + else: + c = c1 + xs = c2 + + if len(c) == 1: + c0 = c[0] * xs + c1 = 0 + elif len(c) == 2: + c0 = c[0] * xs + c1 = c[1] * xs + else: + nd = len(c) + c0 = c[-2] * xs + c1 = c[-1] * xs + for i in range(3, len(c) + 1): + tmp = c0 + nd = nd - 1 + c0 = legsub(c[-i] * xs, (c1 * (nd - 1)) / nd) + c1 = legadd(tmp, (legmulx(c1) * (2 * nd - 1)) / nd) + return legadd(c0, legmulx(c1)) + + +def legdiv(c1, c2): + """ + Divide one Legendre series by another. + + Returns the quotient-with-remainder of two Legendre series + `c1` / `c2`. The arguments are sequences of coefficients from lowest + order "term" to highest, e.g., [1,2,3] represents the series + ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Legendre series coefficients ordered from low to + high. + + Returns + ------- + quo, rem : ndarrays + Of Legendre series coefficients representing the quotient and + remainder. + + See Also + -------- + legadd, legsub, legmulx, legmul, legpow + + Notes + ----- + In general, the (polynomial) division of one Legendre series by another + results in quotient and remainder terms that are not in the Legendre + polynomial basis set. Thus, to express these results as a Legendre + series, it is necessary to "reproject" the results onto the Legendre + basis set, which may produce "unintuitive" (but correct) results; see + Examples section below. + + Examples + -------- + >>> from numpy.polynomial import legendre as L + >>> c1 = (1,2,3) + >>> c2 = (3,2,1) + >>> L.legdiv(c1,c2) # quotient "intuitive," remainder not + (array([3.]), array([-8., -4.])) + >>> c2 = (0,1,2,3) + >>> L.legdiv(c2,c1) # neither "intuitive" + (array([-0.07407407, 1.66666667]), array([-1.03703704, -2.51851852])) # may vary + + """ + return pu._div(legmul, c1, c2) + + +def legpow(c, pow, maxpower=16): + """Raise a Legendre series to a power. + + Returns the Legendre series `c` raised to the power `pow`. The + argument `c` is a sequence of coefficients ordered from low to high. + i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.`` + + Parameters + ---------- + c : array_like + 1-D array of Legendre series coefficients ordered from low to + high. + pow : integer + Power to which the series will be raised + maxpower : integer, optional + Maximum power allowed. This is mainly to limit growth of the series + to unmanageable size. Default is 16 + + Returns + ------- + coef : ndarray + Legendre series of power. + + See Also + -------- + legadd, legsub, legmulx, legmul, legdiv + + """ + return pu._pow(legmul, c, pow, maxpower) + + +def legder(c, m=1, scl=1, axis=0): + """ + Differentiate a Legendre series. + + Returns the Legendre series coefficients `c` differentiated `m` times + along `axis`. At each iteration the result is multiplied by `scl` (the + scaling factor is for use in a linear change of variable). The argument + `c` is an array of coefficients from low to high degree along each + axis, e.g., [1,2,3] represents the series ``1*L_0 + 2*L_1 + 3*L_2`` + while [[1,2],[1,2]] represents ``1*L_0(x)*L_0(y) + 1*L_1(x)*L_0(y) + + 2*L_0(x)*L_1(y) + 2*L_1(x)*L_1(y)`` if axis=0 is ``x`` and axis=1 is + ``y``. + + Parameters + ---------- + c : array_like + Array of Legendre series coefficients. If c is multidimensional the + different axis correspond to different variables with the degree in + each axis given by the corresponding index. + m : int, optional + Number of derivatives taken, must be non-negative. (Default: 1) + scl : scalar, optional + Each differentiation is multiplied by `scl`. The end result is + multiplication by ``scl**m``. This is for use in a linear change of + variable. (Default: 1) + axis : int, optional + Axis over which the derivative is taken. (Default: 0). + + Returns + ------- + der : ndarray + Legendre series of the derivative. + + See Also + -------- + legint + + Notes + ----- + In general, the result of differentiating a Legendre series does not + resemble the same operation on a power series. Thus the result of this + function may be "unintuitive," albeit correct; see Examples section + below. + + Examples + -------- + >>> from numpy.polynomial import legendre as L + >>> c = (1,2,3,4) + >>> L.legder(c) + array([ 6., 9., 20.]) + >>> L.legder(c, 3) + array([60.]) + >>> L.legder(c, scl=-1) + array([ -6., -9., -20.]) + >>> L.legder(c, 2,-1) + array([ 9., 60.]) + + """ + c = np.array(c, ndmin=1, copy=True) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + cnt = pu._as_int(m, "the order of derivation") + iaxis = pu._as_int(axis, "the axis") + if cnt < 0: + raise ValueError("The order of derivation must be non-negative") + iaxis = np.lib.array_utils.normalize_axis_index(iaxis, c.ndim) + + if cnt == 0: + return c + + c = np.moveaxis(c, iaxis, 0) + n = len(c) + if cnt >= n: + c = c[:1] * 0 + else: + for i in range(cnt): + n = n - 1 + c *= scl + der = np.empty((n,) + c.shape[1:], dtype=c.dtype) + for j in range(n, 2, -1): + der[j - 1] = (2 * j - 1) * c[j] + c[j - 2] += c[j] + if n > 1: + der[1] = 3 * c[2] + der[0] = c[1] + c = der + c = np.moveaxis(c, 0, iaxis) + return c + + +def legint(c, m=1, k=[], lbnd=0, scl=1, axis=0): + """ + Integrate a Legendre series. + + Returns the Legendre series coefficients `c` integrated `m` times from + `lbnd` along `axis`. At each iteration the resulting series is + **multiplied** by `scl` and an integration constant, `k`, is added. + The scaling factor is for use in a linear change of variable. ("Buyer + beware": note that, depending on what one is doing, one may want `scl` + to be the reciprocal of what one might expect; for more information, + see the Notes section below.) The argument `c` is an array of + coefficients from low to high degree along each axis, e.g., [1,2,3] + represents the series ``L_0 + 2*L_1 + 3*L_2`` while [[1,2],[1,2]] + represents ``1*L_0(x)*L_0(y) + 1*L_1(x)*L_0(y) + 2*L_0(x)*L_1(y) + + 2*L_1(x)*L_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``. + + Parameters + ---------- + c : array_like + Array of Legendre series coefficients. If c is multidimensional the + different axis correspond to different variables with the degree in + each axis given by the corresponding index. + m : int, optional + Order of integration, must be positive. (Default: 1) + k : {[], list, scalar}, optional + Integration constant(s). The value of the first integral at + ``lbnd`` is the first value in the list, the value of the second + integral at ``lbnd`` is the second value, etc. If ``k == []`` (the + default), all constants are set to zero. If ``m == 1``, a single + scalar can be given instead of a list. + lbnd : scalar, optional + The lower bound of the integral. (Default: 0) + scl : scalar, optional + Following each integration the result is *multiplied* by `scl` + before the integration constant is added. (Default: 1) + axis : int, optional + Axis over which the integral is taken. (Default: 0). + + Returns + ------- + S : ndarray + Legendre series coefficient array of the integral. + + Raises + ------ + ValueError + If ``m < 0``, ``len(k) > m``, ``np.ndim(lbnd) != 0``, or + ``np.ndim(scl) != 0``. + + See Also + -------- + legder + + Notes + ----- + Note that the result of each integration is *multiplied* by `scl`. + Why is this important to note? Say one is making a linear change of + variable :math:`u = ax + b` in an integral relative to `x`. Then + :math:`dx = du/a`, so one will need to set `scl` equal to + :math:`1/a` - perhaps not what one would have first thought. + + Also note that, in general, the result of integrating a C-series needs + to be "reprojected" onto the C-series basis set. Thus, typically, + the result of this function is "unintuitive," albeit correct; see + Examples section below. + + Examples + -------- + >>> from numpy.polynomial import legendre as L + >>> c = (1,2,3) + >>> L.legint(c) + array([ 0.33333333, 0.4 , 0.66666667, 0.6 ]) # may vary + >>> L.legint(c, 3) + array([ 1.66666667e-02, -1.78571429e-02, 4.76190476e-02, # may vary + -1.73472348e-18, 1.90476190e-02, 9.52380952e-03]) + >>> L.legint(c, k=3) + array([ 3.33333333, 0.4 , 0.66666667, 0.6 ]) # may vary + >>> L.legint(c, lbnd=-2) + array([ 7.33333333, 0.4 , 0.66666667, 0.6 ]) # may vary + >>> L.legint(c, scl=2) + array([ 0.66666667, 0.8 , 1.33333333, 1.2 ]) # may vary + + """ + c = np.array(c, ndmin=1, copy=True) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + if not np.iterable(k): + k = [k] + cnt = pu._as_int(m, "the order of integration") + iaxis = pu._as_int(axis, "the axis") + if cnt < 0: + raise ValueError("The order of integration must be non-negative") + if len(k) > cnt: + raise ValueError("Too many integration constants") + if np.ndim(lbnd) != 0: + raise ValueError("lbnd must be a scalar.") + if np.ndim(scl) != 0: + raise ValueError("scl must be a scalar.") + iaxis = np.lib.array_utils.normalize_axis_index(iaxis, c.ndim) + + if cnt == 0: + return c + + c = np.moveaxis(c, iaxis, 0) + k = list(k) + [0] * (cnt - len(k)) + for i in range(cnt): + n = len(c) + c *= scl + if n == 1 and np.all(c[0] == 0): + c[0] += k[i] + else: + tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype) + tmp[0] = c[0] * 0 + tmp[1] = c[0] + if n > 1: + tmp[2] = c[1] / 3 + for j in range(2, n): + t = c[j] / (2 * j + 1) + tmp[j + 1] = t + tmp[j - 1] -= t + tmp[0] += k[i] - legval(lbnd, tmp) + c = tmp + c = np.moveaxis(c, 0, iaxis) + return c + + +def legval(x, c, tensor=True): + """ + Evaluate a Legendre series at points x. + + If `c` is of length ``n + 1``, this function returns the value: + + .. math:: p(x) = c_0 * L_0(x) + c_1 * L_1(x) + ... + c_n * L_n(x) + + The parameter `x` is converted to an array only if it is a tuple or a + list, otherwise it is treated as a scalar. In either case, either `x` + or its elements must support multiplication and addition both with + themselves and with the elements of `c`. + + If `c` is a 1-D array, then ``p(x)`` will have the same shape as `x`. If + `c` is multidimensional, then the shape of the result depends on the + value of `tensor`. If `tensor` is true the shape will be c.shape[1:] + + x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that + scalars have shape (,). + + Trailing zeros in the coefficients will be used in the evaluation, so + they should be avoided if efficiency is a concern. + + Parameters + ---------- + x : array_like, compatible object + If `x` is a list or tuple, it is converted to an ndarray, otherwise + it is left unchanged and treated as a scalar. In either case, `x` + or its elements must support addition and multiplication with + themselves and with the elements of `c`. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree n are contained in c[n]. If `c` is multidimensional the + remaining indices enumerate multiple polynomials. In the two + dimensional case the coefficients may be thought of as stored in + the columns of `c`. + tensor : boolean, optional + If True, the shape of the coefficient array is extended with ones + on the right, one for each dimension of `x`. Scalars have dimension 0 + for this action. The result is that every column of coefficients in + `c` is evaluated for every element of `x`. If False, `x` is broadcast + over the columns of `c` for the evaluation. This keyword is useful + when `c` is multidimensional. The default value is True. + + Returns + ------- + values : ndarray, algebra_like + The shape of the return value is described above. + + See Also + -------- + legval2d, leggrid2d, legval3d, leggrid3d + + Notes + ----- + The evaluation uses Clenshaw recursion, aka synthetic division. + + """ + c = np.array(c, ndmin=1, copy=None) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + if isinstance(x, (tuple, list)): + x = np.asarray(x) + if isinstance(x, np.ndarray) and tensor: + c = c.reshape(c.shape + (1,) * x.ndim) + + if len(c) == 1: + c0 = c[0] + c1 = 0 + elif len(c) == 2: + c0 = c[0] + c1 = c[1] + else: + nd = len(c) + c0 = c[-2] + c1 = c[-1] + for i in range(3, len(c) + 1): + tmp = c0 + nd = nd - 1 + c0 = c[-i] - c1 * ((nd - 1) / nd) + c1 = tmp + c1 * x * ((2 * nd - 1) / nd) + return c0 + c1 * x + + +def legval2d(x, y, c): + """ + Evaluate a 2-D Legendre series at points (x, y). + + This function returns the values: + + .. math:: p(x,y) = \\sum_{i,j} c_{i,j} * L_i(x) * L_j(y) + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars and they + must have the same shape after conversion. In either case, either `x` + and `y` or their elements must support multiplication and addition both + with themselves and with the elements of `c`. + + If `c` is a 1-D array a one is implicitly appended to its shape to make + it 2-D. The shape of the result will be c.shape[2:] + x.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points ``(x, y)``, + where `x` and `y` must have the same shape. If `x` or `y` is a list + or tuple, it is first converted to an ndarray, otherwise it is left + unchanged and if it isn't an ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term + of multi-degree i,j is contained in ``c[i,j]``. If `c` has + dimension greater than two the remaining indices enumerate multiple + sets of coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional Legendre series at points formed + from pairs of corresponding values from `x` and `y`. + + See Also + -------- + legval, leggrid2d, legval3d, leggrid3d + """ + return pu._valnd(legval, c, x, y) + + +def leggrid2d(x, y, c): + """ + Evaluate a 2-D Legendre series on the Cartesian product of x and y. + + This function returns the values: + + .. math:: p(a,b) = \\sum_{i,j} c_{i,j} * L_i(a) * L_j(b) + + where the points ``(a, b)`` consist of all pairs formed by taking + `a` from `x` and `b` from `y`. The resulting points form a grid with + `x` in the first dimension and `y` in the second. + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars. In either + case, either `x` and `y` or their elements must support multiplication + and addition both with themselves and with the elements of `c`. + + If `c` has fewer than two dimensions, ones are implicitly appended to + its shape to make it 2-D. The shape of the result will be c.shape[2:] + + x.shape + y.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points in the + Cartesian product of `x` and `y`. If `x` or `y` is a list or + tuple, it is first converted to an ndarray, otherwise it is left + unchanged and, if it isn't an ndarray, it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term of + multi-degree i,j is contained in ``c[i,j]``. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional Chebyshev series at points in the + Cartesian product of `x` and `y`. + + See Also + -------- + legval, legval2d, legval3d, leggrid3d + """ + return pu._gridnd(legval, c, x, y) + + +def legval3d(x, y, z, c): + """ + Evaluate a 3-D Legendre series at points (x, y, z). + + This function returns the values: + + .. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * L_i(x) * L_j(y) * L_k(z) + + The parameters `x`, `y`, and `z` are converted to arrays only if + they are tuples or a lists, otherwise they are treated as a scalars and + they must have the same shape after conversion. In either case, either + `x`, `y`, and `z` or their elements must support multiplication and + addition both with themselves and with the elements of `c`. + + If `c` has fewer than 3 dimensions, ones are implicitly appended to its + shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape. + + Parameters + ---------- + x, y, z : array_like, compatible object + The three dimensional series is evaluated at the points + ``(x, y, z)``, where `x`, `y`, and `z` must have the same shape. If + any of `x`, `y`, or `z` is a list or tuple, it is first converted + to an ndarray, otherwise it is left unchanged and if it isn't an + ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term of + multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension + greater than 3 the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the multidimensional polynomial on points formed with + triples of corresponding values from `x`, `y`, and `z`. + + See Also + -------- + legval, legval2d, leggrid2d, leggrid3d + """ + return pu._valnd(legval, c, x, y, z) + + +def leggrid3d(x, y, z, c): + """ + Evaluate a 3-D Legendre series on the Cartesian product of x, y, and z. + + This function returns the values: + + .. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * L_i(a) * L_j(b) * L_k(c) + + where the points ``(a, b, c)`` consist of all triples formed by taking + `a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form + a grid with `x` in the first dimension, `y` in the second, and `z` in + the third. + + The parameters `x`, `y`, and `z` are converted to arrays only if they + are tuples or a lists, otherwise they are treated as a scalars. In + either case, either `x`, `y`, and `z` or their elements must support + multiplication and addition both with themselves and with the elements + of `c`. + + If `c` has fewer than three dimensions, ones are implicitly appended to + its shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape + y.shape + z.shape. + + Parameters + ---------- + x, y, z : array_like, compatible objects + The three dimensional series is evaluated at the points in the + Cartesian product of `x`, `y`, and `z`. If `x`, `y`, or `z` is a + list or tuple, it is first converted to an ndarray, otherwise it is + left unchanged and, if it isn't an ndarray, it is treated as a + scalar. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree i,j are contained in ``c[i,j]``. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points in the Cartesian + product of `x` and `y`. + + See Also + -------- + legval, legval2d, leggrid2d, legval3d + """ + return pu._gridnd(legval, c, x, y, z) + + +def legvander(x, deg): + """Pseudo-Vandermonde matrix of given degree. + + Returns the pseudo-Vandermonde matrix of degree `deg` and sample points + `x`. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., i] = L_i(x) + + where ``0 <= i <= deg``. The leading indices of `V` index the elements of + `x` and the last index is the degree of the Legendre polynomial. + + If `c` is a 1-D array of coefficients of length ``n + 1`` and `V` is the + array ``V = legvander(x, n)``, then ``np.dot(V, c)`` and + ``legval(x, c)`` are the same up to roundoff. This equivalence is + useful both for least squares fitting and for the evaluation of a large + number of Legendre series of the same degree and sample points. + + Parameters + ---------- + x : array_like + Array of points. The dtype is converted to float64 or complex128 + depending on whether any of the elements are complex. If `x` is + scalar it is converted to a 1-D array. + deg : int + Degree of the resulting matrix. + + Returns + ------- + vander : ndarray + The pseudo-Vandermonde matrix. The shape of the returned matrix is + ``x.shape + (deg + 1,)``, where The last index is the degree of the + corresponding Legendre polynomial. The dtype will be the same as + the converted `x`. + + """ + ideg = pu._as_int(deg, "deg") + if ideg < 0: + raise ValueError("deg must be non-negative") + + x = np.array(x, copy=None, ndmin=1) + 0.0 + dims = (ideg + 1,) + x.shape + dtyp = x.dtype + v = np.empty(dims, dtype=dtyp) + # Use forward recursion to generate the entries. This is not as accurate + # as reverse recursion in this application but it is more efficient. + v[0] = x * 0 + 1 + if ideg > 0: + v[1] = x + for i in range(2, ideg + 1): + v[i] = (v[i - 1] * x * (2 * i - 1) - v[i - 2] * (i - 1)) / i + return np.moveaxis(v, 0, -1) + + +def legvander2d(x, y, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points ``(x, y)``. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (deg[1] + 1)*i + j] = L_i(x) * L_j(y), + + where ``0 <= i <= deg[0]`` and ``0 <= j <= deg[1]``. The leading indices of + `V` index the points ``(x, y)`` and the last index encodes the degrees of + the Legendre polynomials. + + If ``V = legvander2d(x, y, [xdeg, ydeg])``, then the columns of `V` + correspond to the elements of a 2-D coefficient array `c` of shape + (xdeg + 1, ydeg + 1) in the order + + .. math:: c_{00}, c_{01}, c_{02}, ... , c_{10}, c_{11}, c_{12}, ... + + and ``np.dot(V, c.flat)`` and ``legval2d(x, y, c)`` will be the same + up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 2-D Legendre + series of the same degrees and sample points. + + Parameters + ---------- + x, y : array_like + Arrays of point coordinates, all of the same shape. The dtypes + will be converted to either float64 or complex128 depending on + whether any of the elements are complex. Scalars are converted to + 1-D arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg]. + + Returns + ------- + vander2d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg[1]+1)`. The dtype will be the same + as the converted `x` and `y`. + + See Also + -------- + legvander, legvander3d, legval2d, legval3d + """ + return pu._vander_nd_flat((legvander, legvander), (x, y), deg) + + +def legvander3d(x, y, z, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points ``(x, y, z)``. If `l`, `m`, `n` are the given degrees in `x`, `y`, `z`, + then The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = L_i(x)*L_j(y)*L_k(z), + + where ``0 <= i <= l``, ``0 <= j <= m``, and ``0 <= j <= n``. The leading + indices of `V` index the points ``(x, y, z)`` and the last index encodes + the degrees of the Legendre polynomials. + + If ``V = legvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns + of `V` correspond to the elements of a 3-D coefficient array `c` of + shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order + + .. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},... + + and ``np.dot(V, c.flat)`` and ``legval3d(x, y, z, c)`` will be the + same up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 3-D Legendre + series of the same degrees and sample points. + + Parameters + ---------- + x, y, z : array_like + Arrays of point coordinates, all of the same shape. The dtypes will + be converted to either float64 or complex128 depending on whether + any of the elements are complex. Scalars are converted to 1-D + arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg, z_deg]. + + Returns + ------- + vander3d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg[1]+1)*(deg[2]+1)`. The dtype will + be the same as the converted `x`, `y`, and `z`. + + See Also + -------- + legvander, legvander3d, legval2d, legval3d + """ + return pu._vander_nd_flat((legvander, legvander, legvander), (x, y, z), deg) + + +def legfit(x, y, deg, rcond=None, full=False, w=None): + """ + Least squares fit of Legendre series to data. + + Return the coefficients of a Legendre series of degree `deg` that is the + least squares fit to the data values `y` given at points `x`. If `y` is + 1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple + fits are done, one for each column of `y`, and the resulting + coefficients are stored in the corresponding columns of a 2-D return. + The fitted polynomial(s) are in the form + + .. math:: p(x) = c_0 + c_1 * L_1(x) + ... + c_n * L_n(x), + + where `n` is `deg`. + + Parameters + ---------- + x : array_like, shape (M,) + x-coordinates of the M sample points ``(x[i], y[i])``. + y : array_like, shape (M,) or (M, K) + y-coordinates of the sample points. Several data sets of sample + points sharing the same x-coordinates can be fitted at once by + passing in a 2D-array that contains one dataset per column. + deg : int or 1-D array_like + Degree(s) of the fitting polynomials. If `deg` is a single integer + all terms up to and including the `deg`'th term are included in the + fit. For NumPy versions >= 1.11.0 a list of integers specifying the + degrees of the terms to include may be used instead. + rcond : float, optional + Relative condition number of the fit. Singular values smaller than + this relative to the largest singular value will be ignored. The + default value is len(x)*eps, where eps is the relative precision of + the float type, about 2e-16 in most cases. + full : bool, optional + Switch determining nature of return value. When it is False (the + default) just the coefficients are returned, when True diagnostic + information from the singular value decomposition is also returned. + w : array_like, shape (`M`,), optional + Weights. If not None, the weight ``w[i]`` applies to the unsquared + residual ``y[i] - y_hat[i]`` at ``x[i]``. Ideally the weights are + chosen so that the errors of the products ``w[i]*y[i]`` all have the + same variance. When using inverse-variance weighting, use + ``w[i] = 1/sigma(y[i])``. The default value is None. + + Returns + ------- + coef : ndarray, shape (M,) or (M, K) + Legendre coefficients ordered from low to high. If `y` was + 2-D, the coefficients for the data in column k of `y` are in + column `k`. If `deg` is specified as a list, coefficients for + terms not included in the fit are set equal to zero in the + returned `coef`. + + [residuals, rank, singular_values, rcond] : list + These values are only returned if ``full == True`` + + - residuals -- sum of squared residuals of the least squares fit + - rank -- the numerical rank of the scaled Vandermonde matrix + - singular_values -- singular values of the scaled Vandermonde matrix + - rcond -- value of `rcond`. + + For more details, see `numpy.linalg.lstsq`. + + Warns + ----- + RankWarning + The rank of the coefficient matrix in the least-squares fit is + deficient. The warning is only raised if ``full == False``. The + warnings can be turned off by + + >>> import warnings + >>> warnings.simplefilter('ignore', np.exceptions.RankWarning) + + See Also + -------- + numpy.polynomial.polynomial.polyfit + numpy.polynomial.chebyshev.chebfit + numpy.polynomial.laguerre.lagfit + numpy.polynomial.hermite.hermfit + numpy.polynomial.hermite_e.hermefit + legval : Evaluates a Legendre series. + legvander : Vandermonde matrix of Legendre series. + legweight : Legendre weight function (= 1). + numpy.linalg.lstsq : Computes a least-squares fit from the matrix. + scipy.interpolate.UnivariateSpline : Computes spline fits. + + Notes + ----- + The solution is the coefficients of the Legendre series `p` that + minimizes the sum of the weighted squared errors + + .. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2, + + where :math:`w_j` are the weights. This problem is solved by setting up + as the (typically) overdetermined matrix equation + + .. math:: V(x) * c = w * y, + + where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the + coefficients to be solved for, `w` are the weights, and `y` are the + observed values. This equation is then solved using the singular value + decomposition of `V`. + + If some of the singular values of `V` are so small that they are + neglected, then a `~exceptions.RankWarning` will be issued. This means that + the coefficient values may be poorly determined. Using a lower order fit + will usually get rid of the warning. The `rcond` parameter can also be + set to a value smaller than its default, but the resulting fit may be + spurious and have large contributions from roundoff error. + + Fits using Legendre series are usually better conditioned than fits + using power series, but much can depend on the distribution of the + sample points and the smoothness of the data. If the quality of the fit + is inadequate splines may be a good alternative. + + References + ---------- + .. [1] Wikipedia, "Curve fitting", + https://en.wikipedia.org/wiki/Curve_fitting + + Examples + -------- + + """ + return pu._fit(legvander, x, y, deg, rcond, full, w) + + +def legcompanion(c): + """Return the scaled companion matrix of c. + + The basis polynomials are scaled so that the companion matrix is + symmetric when `c` is an Legendre basis polynomial. This provides + better eigenvalue estimates than the unscaled case and for basis + polynomials the eigenvalues are guaranteed to be real if + `numpy.linalg.eigvalsh` is used to obtain them. + + Parameters + ---------- + c : array_like + 1-D array of Legendre series coefficients ordered from low to high + degree. + + Returns + ------- + mat : ndarray + Scaled companion matrix of dimensions (deg, deg). + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) < 2: + raise ValueError('Series must have maximum degree of at least 1.') + if len(c) == 2: + return np.array([[-c[0] / c[1]]]) + + n = len(c) - 1 + mat = np.zeros((n, n), dtype=c.dtype) + scl = 1. / np.sqrt(2 * np.arange(n) + 1) + top = mat.reshape(-1)[1::n + 1] + bot = mat.reshape(-1)[n::n + 1] + top[...] = np.arange(1, n) * scl[:n - 1] * scl[1:n] + bot[...] = top + mat[:, -1] -= (c[:-1] / c[-1]) * (scl / scl[-1]) * (n / (2 * n - 1)) + return mat + + +def legroots(c): + """ + Compute the roots of a Legendre series. + + Return the roots (a.k.a. "zeros") of the polynomial + + .. math:: p(x) = \\sum_i c[i] * L_i(x). + + Parameters + ---------- + c : 1-D array_like + 1-D array of coefficients. + + Returns + ------- + out : ndarray + Array of the roots of the series. If all the roots are real, + then `out` is also real, otherwise it is complex. + + See Also + -------- + numpy.polynomial.polynomial.polyroots + numpy.polynomial.chebyshev.chebroots + numpy.polynomial.laguerre.lagroots + numpy.polynomial.hermite.hermroots + numpy.polynomial.hermite_e.hermeroots + + Notes + ----- + The root estimates are obtained as the eigenvalues of the companion + matrix, Roots far from the origin of the complex plane may have large + errors due to the numerical instability of the series for such values. + Roots with multiplicity greater than 1 will also show larger errors as + the value of the series near such points is relatively insensitive to + errors in the roots. Isolated roots near the origin can be improved by + a few iterations of Newton's method. + + The Legendre series basis polynomials aren't powers of ``x`` so the + results of this function may seem unintuitive. + + Examples + -------- + >>> import numpy.polynomial.legendre as leg + >>> leg.legroots((1, 2, 3, 4)) # 4L_3 + 3L_2 + 2L_1 + 1L_0, all real roots + array([-0.85099543, -0.11407192, 0.51506735]) # may vary + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) < 2: + return np.array([], dtype=c.dtype) + if len(c) == 2: + return np.array([-c[0] / c[1]]) + + # rotated companion matrix reduces error + m = legcompanion(c)[::-1, ::-1] + r = np.linalg.eigvals(m) + r.sort() + return r + + +def leggauss(deg): + """ + Gauss-Legendre quadrature. + + Computes the sample points and weights for Gauss-Legendre quadrature. + These sample points and weights will correctly integrate polynomials of + degree :math:`2*deg - 1` or less over the interval :math:`[-1, 1]` with + the weight function :math:`f(x) = 1`. + + Parameters + ---------- + deg : int + Number of sample points and weights. It must be >= 1. + + Returns + ------- + x : ndarray + 1-D ndarray containing the sample points. + y : ndarray + 1-D ndarray containing the weights. + + Notes + ----- + The results have only been tested up to degree 100, higher degrees may + be problematic. The weights are determined by using the fact that + + .. math:: w_k = c / (L'_n(x_k) * L_{n-1}(x_k)) + + where :math:`c` is a constant independent of :math:`k` and :math:`x_k` + is the k'th root of :math:`L_n`, and then scaling the results to get + the right value when integrating 1. + + """ + ideg = pu._as_int(deg, "deg") + if ideg <= 0: + raise ValueError("deg must be a positive integer") + + # first approximation of roots. We use the fact that the companion + # matrix is symmetric in this case in order to obtain better zeros. + c = np.array([0] * deg + [1]) + m = legcompanion(c) + x = np.linalg.eigvalsh(m) + + # improve roots by one application of Newton + dy = legval(x, c) + df = legval(x, legder(c)) + x -= dy / df + + # compute the weights. We scale the factor to avoid possible numerical + # overflow. + fm = legval(x, c[1:]) + fm /= np.abs(fm).max() + df /= np.abs(df).max() + w = 1 / (fm * df) + + # for Legendre we can also symmetrize + w = (w + w[::-1]) / 2 + x = (x - x[::-1]) / 2 + + # scale w to get the right value + w *= 2. / w.sum() + + return x, w + + +def legweight(x): + """ + Weight function of the Legendre polynomials. + + The weight function is :math:`1` and the interval of integration is + :math:`[-1, 1]`. The Legendre polynomials are orthogonal, but not + normalized, with respect to this weight function. + + Parameters + ---------- + x : array_like + Values at which the weight function will be computed. + + Returns + ------- + w : ndarray + The weight function at `x`. + """ + w = x * 0.0 + 1.0 + return w + +# +# Legendre series class +# + +class Legendre(ABCPolyBase): + """A Legendre series class. + + The Legendre class provides the standard Python numerical methods + '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the + attributes and methods listed below. + + Parameters + ---------- + coef : array_like + Legendre coefficients in order of increasing degree, i.e., + ``(1, 2, 3)`` gives ``1*P_0(x) + 2*P_1(x) + 3*P_2(x)``. + domain : (2,) array_like, optional + Domain to use. The interval ``[domain[0], domain[1]]`` is mapped + to the interval ``[window[0], window[1]]`` by shifting and scaling. + The default value is [-1., 1.]. + window : (2,) array_like, optional + Window, see `domain` for its use. The default value is [-1., 1.]. + symbol : str, optional + Symbol used to represent the independent variable in string + representations of the polynomial expression, e.g. for printing. + The symbol must be a valid Python identifier. Default value is 'x'. + + .. versionadded:: 1.24 + + """ + # Virtual Functions + _add = staticmethod(legadd) + _sub = staticmethod(legsub) + _mul = staticmethod(legmul) + _div = staticmethod(legdiv) + _pow = staticmethod(legpow) + _val = staticmethod(legval) + _int = staticmethod(legint) + _der = staticmethod(legder) + _fit = staticmethod(legfit) + _line = staticmethod(legline) + _roots = staticmethod(legroots) + _fromroots = staticmethod(legfromroots) + + # Virtual properties + domain = np.array(legdomain) + window = np.array(legdomain) + basis_name = 'P' diff --git a/local_packages/numpy/polynomial/legendre.pyi b/local_packages/numpy/polynomial/legendre.pyi new file mode 100644 index 0000000..53f8f7c --- /dev/null +++ b/local_packages/numpy/polynomial/legendre.pyi @@ -0,0 +1,100 @@ +from typing import Any, ClassVar, Final, Literal as L + +import numpy as np + +from ._polybase import ABCPolyBase +from ._polytypes import ( + _Array1, + _Array2, + _FuncBinOp, + _FuncCompanion, + _FuncDer, + _FuncFit, + _FuncFromRoots, + _FuncGauss, + _FuncInteg, + _FuncLine, + _FuncPoly2Ortho, + _FuncPow, + _FuncRoots, + _FuncUnOp, + _FuncVal, + _FuncVal2D, + _FuncVal3D, + _FuncVander, + _FuncVander2D, + _FuncVander3D, + _FuncWeight, +) +from .polyutils import trimcoef as legtrim + +__all__ = [ + "legzero", + "legone", + "legx", + "legdomain", + "legline", + "legadd", + "legsub", + "legmulx", + "legmul", + "legdiv", + "legpow", + "legval", + "legder", + "legint", + "leg2poly", + "poly2leg", + "legfromroots", + "legvander", + "legfit", + "legtrim", + "legroots", + "Legendre", + "legval2d", + "legval3d", + "leggrid2d", + "leggrid3d", + "legvander2d", + "legvander3d", + "legcompanion", + "leggauss", + "legweight", +] + +poly2leg: Final[_FuncPoly2Ortho] = ... +leg2poly: Final[_FuncUnOp] = ... + +legdomain: Final[_Array2[np.float64]] = ... +legzero: Final[_Array1[np.int_]] = ... +legone: Final[_Array1[np.int_]] = ... +legx: Final[_Array2[np.int_]] = ... + +legline: Final[_FuncLine] = ... +legfromroots: Final[_FuncFromRoots] = ... +legadd: Final[_FuncBinOp] = ... +legsub: Final[_FuncBinOp] = ... +legmulx: Final[_FuncUnOp] = ... +legmul: Final[_FuncBinOp] = ... +legdiv: Final[_FuncBinOp] = ... +legpow: Final[_FuncPow] = ... +legder: Final[_FuncDer] = ... +legint: Final[_FuncInteg] = ... +legval: Final[_FuncVal] = ... +legval2d: Final[_FuncVal2D] = ... +legval3d: Final[_FuncVal3D] = ... +leggrid2d: Final[_FuncVal2D] = ... +leggrid3d: Final[_FuncVal3D] = ... +legvander: Final[_FuncVander] = ... +legvander2d: Final[_FuncVander2D] = ... +legvander3d: Final[_FuncVander3D] = ... +legfit: Final[_FuncFit] = ... +legcompanion: Final[_FuncCompanion] = ... +legroots: Final[_FuncRoots] = ... +leggauss: Final[_FuncGauss] = ... +legweight: Final[_FuncWeight] = ... + +class Legendre(ABCPolyBase[L["P"]]): + basis_name: ClassVar[L["P"]] = "P" # pyright: ignore[reportIncompatibleMethodOverride] + domain: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] + window: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] diff --git a/local_packages/numpy/polynomial/polynomial.py b/local_packages/numpy/polynomial/polynomial.py new file mode 100644 index 0000000..e3823c8 --- /dev/null +++ b/local_packages/numpy/polynomial/polynomial.py @@ -0,0 +1,1625 @@ +""" +================================================= +Power Series (:mod:`numpy.polynomial.polynomial`) +================================================= + +This module provides a number of objects (mostly functions) useful for +dealing with polynomials, including a `Polynomial` class that +encapsulates the usual arithmetic operations. (General information +on how this module represents and works with polynomial objects is in +the docstring for its "parent" sub-package, `numpy.polynomial`). + +Classes +------- +.. autosummary:: + :toctree: generated/ + + Polynomial + +Constants +--------- +.. autosummary:: + :toctree: generated/ + + polydomain + polyzero + polyone + polyx + +Arithmetic +---------- +.. autosummary:: + :toctree: generated/ + + polyadd + polysub + polymulx + polymul + polydiv + polypow + polyval + polyval2d + polyval3d + polygrid2d + polygrid3d + +Calculus +-------- +.. autosummary:: + :toctree: generated/ + + polyder + polyint + +Misc Functions +-------------- +.. autosummary:: + :toctree: generated/ + + polyfromroots + polyroots + polyvalfromroots + polyvander + polyvander2d + polyvander3d + polycompanion + polyfit + polytrim + polyline + +See Also +-------- +`numpy.polynomial` + +""" +__all__ = [ + 'polyzero', 'polyone', 'polyx', 'polydomain', 'polyline', 'polyadd', + 'polysub', 'polymulx', 'polymul', 'polydiv', 'polypow', 'polyval', + 'polyvalfromroots', 'polyder', 'polyint', 'polyfromroots', 'polyvander', + 'polyfit', 'polytrim', 'polyroots', 'Polynomial', 'polyval2d', 'polyval3d', + 'polygrid2d', 'polygrid3d', 'polyvander2d', 'polyvander3d', + 'polycompanion'] + +import numpy as np +from numpy._core.overrides import array_function_dispatch as _array_function_dispatch + +from . import polyutils as pu +from ._polybase import ABCPolyBase + +polytrim = pu.trimcoef + +# +# These are constant arrays are of integer type so as to be compatible +# with the widest range of other types, such as Decimal. +# + +# Polynomial default domain. +polydomain = np.array([-1., 1.]) + +# Polynomial coefficients representing zero. +polyzero = np.array([0]) + +# Polynomial coefficients representing one. +polyone = np.array([1]) + +# Polynomial coefficients representing the identity x. +polyx = np.array([0, 1]) + +# +# Polynomial series functions +# + + +def polyline(off, scl): + """ + Returns an array representing a linear polynomial. + + Parameters + ---------- + off, scl : scalars + The "y-intercept" and "slope" of the line, respectively. + + Returns + ------- + y : ndarray + This module's representation of the linear polynomial ``off + + scl*x``. + + See Also + -------- + numpy.polynomial.chebyshev.chebline + numpy.polynomial.legendre.legline + numpy.polynomial.laguerre.lagline + numpy.polynomial.hermite.hermline + numpy.polynomial.hermite_e.hermeline + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> P.polyline(1, -1) + array([ 1, -1]) + >>> P.polyval(1, P.polyline(1, -1)) # should be 0 + 0.0 + + """ + if scl != 0: + return np.array([off, scl]) + else: + return np.array([off]) + + +def polyfromroots(roots): + """ + Generate a monic polynomial with given roots. + + Return the coefficients of the polynomial + + .. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n), + + where the :math:`r_n` are the roots specified in `roots`. If a zero has + multiplicity n, then it must appear in `roots` n times. For instance, + if 2 is a root of multiplicity three and 3 is a root of multiplicity 2, + then `roots` looks something like [2, 2, 2, 3, 3]. The roots can appear + in any order. + + If the returned coefficients are `c`, then + + .. math:: p(x) = c_0 + c_1 * x + ... + x^n + + The coefficient of the last term is 1 for monic polynomials in this + form. + + Parameters + ---------- + roots : array_like + Sequence containing the roots. + + Returns + ------- + out : ndarray + 1-D array of the polynomial's coefficients If all the roots are + real, then `out` is also real, otherwise it is complex. (see + Examples below). + + See Also + -------- + numpy.polynomial.chebyshev.chebfromroots + numpy.polynomial.legendre.legfromroots + numpy.polynomial.laguerre.lagfromroots + numpy.polynomial.hermite.hermfromroots + numpy.polynomial.hermite_e.hermefromroots + + Notes + ----- + The coefficients are determined by multiplying together linear factors + of the form ``(x - r_i)``, i.e. + + .. math:: p(x) = (x - r_0) (x - r_1) ... (x - r_n) + + where ``n == len(roots) - 1``; note that this implies that ``1`` is always + returned for :math:`a_n`. + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> P.polyfromroots((-1,0,1)) # x(x - 1)(x + 1) = x^3 - x + array([ 0., -1., 0., 1.]) + >>> j = complex(0,1) + >>> P.polyfromroots((-j,j)) # complex returned, though values are real + array([1.+0.j, 0.+0.j, 1.+0.j]) + + """ + return pu._fromroots(polyline, polymul, roots) + + +def polyadd(c1, c2): + """ + Add one polynomial to another. + + Returns the sum of two polynomials `c1` + `c2`. The arguments are + sequences of coefficients from lowest order term to highest, i.e., + [1,2,3] represents the polynomial ``1 + 2*x + 3*x**2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of polynomial coefficients ordered from low to high. + + Returns + ------- + out : ndarray + The coefficient array representing their sum. + + See Also + -------- + polysub, polymulx, polymul, polydiv, polypow + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> c1 = (1, 2, 3) + >>> c2 = (3, 2, 1) + >>> sum = P.polyadd(c1,c2); sum + array([4., 4., 4.]) + >>> P.polyval(2, sum) # 4 + 4(2) + 4(2**2) + 28.0 + + """ + return pu._add(c1, c2) + + +def polysub(c1, c2): + """ + Subtract one polynomial from another. + + Returns the difference of two polynomials `c1` - `c2`. The arguments + are sequences of coefficients from lowest order term to highest, i.e., + [1,2,3] represents the polynomial ``1 + 2*x + 3*x**2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of polynomial coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of coefficients representing their difference. + + See Also + -------- + polyadd, polymulx, polymul, polydiv, polypow + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> c1 = (1, 2, 3) + >>> c2 = (3, 2, 1) + >>> P.polysub(c1,c2) + array([-2., 0., 2.]) + >>> P.polysub(c2, c1) # -P.polysub(c1,c2) + array([ 2., 0., -2.]) + + """ + return pu._sub(c1, c2) + + +def polymulx(c): + """Multiply a polynomial by x. + + Multiply the polynomial `c` by x, where x is the independent + variable. + + + Parameters + ---------- + c : array_like + 1-D array of polynomial coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the result of the multiplication. + + See Also + -------- + polyadd, polysub, polymul, polydiv, polypow + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> c = (1, 2, 3) + >>> P.polymulx(c) + array([0., 1., 2., 3.]) + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + # The zero series needs special treatment + if len(c) == 1 and c[0] == 0: + return c + + prd = np.empty(len(c) + 1, dtype=c.dtype) + prd[0] = c[0] * 0 + prd[1:] = c + return prd + + +def polymul(c1, c2): + """ + Multiply one polynomial by another. + + Returns the product of two polynomials `c1` * `c2`. The arguments are + sequences of coefficients, from lowest order term to highest, e.g., + [1,2,3] represents the polynomial ``1 + 2*x + 3*x**2.`` + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of coefficients representing a polynomial, relative to the + "standard" basis, and ordered from lowest order term to highest. + + Returns + ------- + out : ndarray + Of the coefficients of their product. + + See Also + -------- + polyadd, polysub, polymulx, polydiv, polypow + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> c1 = (1, 2, 3) + >>> c2 = (3, 2, 1) + >>> P.polymul(c1, c2) + array([ 3., 8., 14., 8., 3.]) + + """ + # c1, c2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + ret = np.convolve(c1, c2) + return pu.trimseq(ret) + + +def polydiv(c1, c2): + """ + Divide one polynomial by another. + + Returns the quotient-with-remainder of two polynomials `c1` / `c2`. + The arguments are sequences of coefficients, from lowest order term + to highest, e.g., [1,2,3] represents ``1 + 2*x + 3*x**2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of polynomial coefficients ordered from low to high. + + Returns + ------- + [quo, rem] : ndarrays + Of coefficient series representing the quotient and remainder. + + See Also + -------- + polyadd, polysub, polymulx, polymul, polypow + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> c1 = (1, 2, 3) + >>> c2 = (3, 2, 1) + >>> P.polydiv(c1, c2) + (array([3.]), array([-8., -4.])) + >>> P.polydiv(c2, c1) + (array([ 0.33333333]), array([ 2.66666667, 1.33333333])) # may vary + + """ + # c1, c2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + if c2[-1] == 0: + raise ZeroDivisionError # FIXME: add message with details to exception + + # note: this is more efficient than `pu._div(polymul, c1, c2)` + lc1 = len(c1) + lc2 = len(c2) + if lc1 < lc2: + return c1[:1] * 0, c1 + elif lc2 == 1: + return c1 / c2[-1], c1[:1] * 0 + else: + dlen = lc1 - lc2 + scl = c2[-1] + c2 = c2[:-1] / scl + i = dlen + j = lc1 - 1 + while i >= 0: + c1[i:j] -= c2 * c1[j] + i -= 1 + j -= 1 + return c1[j + 1:] / scl, pu.trimseq(c1[:j + 1]) + + +def polypow(c, pow, maxpower=None): + """Raise a polynomial to a power. + + Returns the polynomial `c` raised to the power `pow`. The argument + `c` is a sequence of coefficients ordered from low to high. i.e., + [1,2,3] is the series ``1 + 2*x + 3*x**2.`` + + Parameters + ---------- + c : array_like + 1-D array of array of series coefficients ordered from low to + high degree. + pow : integer + Power to which the series will be raised + maxpower : integer, optional + Maximum power allowed. This is mainly to limit growth of the series + to unmanageable size. Default is 16 + + Returns + ------- + coef : ndarray + Power series of power. + + See Also + -------- + polyadd, polysub, polymulx, polymul, polydiv + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> P.polypow([1, 2, 3], 2) + array([ 1., 4., 10., 12., 9.]) + + """ + # note: this is more efficient than `pu._pow(polymul, c1, c2)`, as it + # avoids calling `as_series` repeatedly + return pu._pow(np.convolve, c, pow, maxpower) + + +def polyder(c, m=1, scl=1, axis=0): + """ + Differentiate a polynomial. + + Returns the polynomial coefficients `c` differentiated `m` times along + `axis`. At each iteration the result is multiplied by `scl` (the + scaling factor is for use in a linear change of variable). The + argument `c` is an array of coefficients from low to high degree along + each axis, e.g., [1,2,3] represents the polynomial ``1 + 2*x + 3*x**2`` + while [[1,2],[1,2]] represents ``1 + 1*x + 2*y + 2*x*y`` if axis=0 is + ``x`` and axis=1 is ``y``. + + Parameters + ---------- + c : array_like + Array of polynomial coefficients. If c is multidimensional the + different axis correspond to different variables with the degree + in each axis given by the corresponding index. + m : int, optional + Number of derivatives taken, must be non-negative. (Default: 1) + scl : scalar, optional + Each differentiation is multiplied by `scl`. The end result is + multiplication by ``scl**m``. This is for use in a linear change + of variable. (Default: 1) + axis : int, optional + Axis over which the derivative is taken. (Default: 0). + + Returns + ------- + der : ndarray + Polynomial coefficients of the derivative. + + See Also + -------- + polyint + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> c = (1, 2, 3, 4) + >>> P.polyder(c) # (d/dx)(c) + array([ 2., 6., 12.]) + >>> P.polyder(c, 3) # (d**3/dx**3)(c) + array([24.]) + >>> P.polyder(c, scl=-1) # (d/d(-x))(c) + array([ -2., -6., -12.]) + >>> P.polyder(c, 2, -1) # (d**2/d(-x)**2)(c) + array([ 6., 24.]) + + """ + c = np.array(c, ndmin=1, copy=True) + if c.dtype.char in '?bBhHiIlLqQpP': + # astype fails with NA + c = c + 0.0 + cdt = c.dtype + cnt = pu._as_int(m, "the order of derivation") + iaxis = pu._as_int(axis, "the axis") + if cnt < 0: + raise ValueError("The order of derivation must be non-negative") + iaxis = np.lib.array_utils.normalize_axis_index(iaxis, c.ndim) + + if cnt == 0: + return c + + c = np.moveaxis(c, iaxis, 0) + n = len(c) + if cnt >= n: + c = c[:1] * 0 + else: + for i in range(cnt): + n = n - 1 + c *= scl + der = np.empty((n,) + c.shape[1:], dtype=cdt) + for j in range(n, 0, -1): + der[j - 1] = j * c[j] + c = der + c = np.moveaxis(c, 0, iaxis) + return c + + +def polyint(c, m=1, k=[], lbnd=0, scl=1, axis=0): + """ + Integrate a polynomial. + + Returns the polynomial coefficients `c` integrated `m` times from + `lbnd` along `axis`. At each iteration the resulting series is + **multiplied** by `scl` and an integration constant, `k`, is added. + The scaling factor is for use in a linear change of variable. ("Buyer + beware": note that, depending on what one is doing, one may want `scl` + to be the reciprocal of what one might expect; for more information, + see the Notes section below.) The argument `c` is an array of + coefficients, from low to high degree along each axis, e.g., [1,2,3] + represents the polynomial ``1 + 2*x + 3*x**2`` while [[1,2],[1,2]] + represents ``1 + 1*x + 2*y + 2*x*y`` if axis=0 is ``x`` and axis=1 is + ``y``. + + Parameters + ---------- + c : array_like + 1-D array of polynomial coefficients, ordered from low to high. + m : int, optional + Order of integration, must be positive. (Default: 1) + k : {[], list, scalar}, optional + Integration constant(s). The value of the first integral at zero + is the first value in the list, the value of the second integral + at zero is the second value, etc. If ``k == []`` (the default), + all constants are set to zero. If ``m == 1``, a single scalar can + be given instead of a list. + lbnd : scalar, optional + The lower bound of the integral. (Default: 0) + scl : scalar, optional + Following each integration the result is *multiplied* by `scl` + before the integration constant is added. (Default: 1) + axis : int, optional + Axis over which the integral is taken. (Default: 0). + + Returns + ------- + S : ndarray + Coefficient array of the integral. + + Raises + ------ + ValueError + If ``m < 1``, ``len(k) > m``, ``np.ndim(lbnd) != 0``, or + ``np.ndim(scl) != 0``. + + See Also + -------- + polyder + + Notes + ----- + Note that the result of each integration is *multiplied* by `scl`. Why + is this important to note? Say one is making a linear change of + variable :math:`u = ax + b` in an integral relative to `x`. Then + :math:`dx = du/a`, so one will need to set `scl` equal to + :math:`1/a` - perhaps not what one would have first thought. + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> c = (1, 2, 3) + >>> P.polyint(c) # should return array([0, 1, 1, 1]) + array([0., 1., 1., 1.]) + >>> P.polyint(c, 3) # should return array([0, 0, 0, 1/6, 1/12, 1/20]) + array([ 0. , 0. , 0. , 0.16666667, 0.08333333, # may vary + 0.05 ]) + >>> P.polyint(c, k=3) # should return array([3, 1, 1, 1]) + array([3., 1., 1., 1.]) + >>> P.polyint(c,lbnd=-2) # should return array([6, 1, 1, 1]) + array([6., 1., 1., 1.]) + >>> P.polyint(c,scl=-2) # should return array([0, -2, -2, -2]) + array([ 0., -2., -2., -2.]) + + """ + c = np.array(c, ndmin=1, copy=True) + if c.dtype.char in '?bBhHiIlLqQpP': + # astype doesn't preserve mask attribute. + c = c + 0.0 + cdt = c.dtype + if not np.iterable(k): + k = [k] + cnt = pu._as_int(m, "the order of integration") + iaxis = pu._as_int(axis, "the axis") + if cnt < 0: + raise ValueError("The order of integration must be non-negative") + if len(k) > cnt: + raise ValueError("Too many integration constants") + if np.ndim(lbnd) != 0: + raise ValueError("lbnd must be a scalar.") + if np.ndim(scl) != 0: + raise ValueError("scl must be a scalar.") + iaxis = np.lib.array_utils.normalize_axis_index(iaxis, c.ndim) + + if cnt == 0: + return c + + k = list(k) + [0] * (cnt - len(k)) + c = np.moveaxis(c, iaxis, 0) + for i in range(cnt): + n = len(c) + c *= scl + if n == 1 and np.all(c[0] == 0): + c[0] += k[i] + else: + tmp = np.empty((n + 1,) + c.shape[1:], dtype=cdt) + tmp[0] = c[0] * 0 + tmp[1] = c[0] + for j in range(1, n): + tmp[j + 1] = c[j] / (j + 1) + tmp[0] += k[i] - polyval(lbnd, tmp) + c = tmp + c = np.moveaxis(c, 0, iaxis) + return c + + +def polyval(x, c, tensor=True): + """ + Evaluate a polynomial at points x. + + If `c` is of length ``n + 1``, this function returns the value + + .. math:: p(x) = c_0 + c_1 * x + ... + c_n * x^n + + The parameter `x` is converted to an array only if it is a tuple or a + list, otherwise it is treated as a scalar. In either case, either `x` + or its elements must support multiplication and addition both with + themselves and with the elements of `c`. + + If `c` is a 1-D array, then ``p(x)`` will have the same shape as `x`. If + `c` is multidimensional, then the shape of the result depends on the + value of `tensor`. If `tensor` is true the shape will be c.shape[1:] + + x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that + scalars have shape (,). + + Trailing zeros in the coefficients will be used in the evaluation, so + they should be avoided if efficiency is a concern. + + Parameters + ---------- + x : array_like, compatible object + If `x` is a list or tuple, it is converted to an ndarray, otherwise + it is left unchanged and treated as a scalar. In either case, `x` + or its elements must support addition and multiplication with + with themselves and with the elements of `c`. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree n are contained in c[n]. If `c` is multidimensional the + remaining indices enumerate multiple polynomials. In the two + dimensional case the coefficients may be thought of as stored in + the columns of `c`. + tensor : boolean, optional + If True, the shape of the coefficient array is extended with ones + on the right, one for each dimension of `x`. Scalars have dimension 0 + for this action. The result is that every column of coefficients in + `c` is evaluated for every element of `x`. If False, `x` is broadcast + over the columns of `c` for the evaluation. This keyword is useful + when `c` is multidimensional. The default value is True. + + Returns + ------- + values : ndarray, compatible object + The shape of the returned array is described above. + + See Also + -------- + polyval2d, polygrid2d, polyval3d, polygrid3d + + Notes + ----- + The evaluation uses Horner's method. + + When using coefficients from polynomials created with ``Polynomial.fit()``, + use ``p(x)`` or ``polyval(x, p.convert().coef)`` to handle domain/window + scaling correctly, not ``polyval(x, p.coef)``. + + Examples + -------- + >>> import numpy as np + >>> from numpy.polynomial.polynomial import polyval + >>> polyval(1, [1,2,3]) + 6.0 + >>> a = np.arange(4).reshape(2,2) + >>> a + array([[0, 1], + [2, 3]]) + >>> polyval(a, [1, 2, 3]) + array([[ 1., 6.], + [17., 34.]]) + >>> coef = np.arange(4).reshape(2, 2) # multidimensional coefficients + >>> coef + array([[0, 1], + [2, 3]]) + >>> polyval([1, 2], coef, tensor=True) + array([[2., 4.], + [4., 7.]]) + >>> polyval([1, 2], coef, tensor=False) + array([2., 7.]) + + """ + c = np.array(c, ndmin=1, copy=None) + if c.dtype.char in '?bBhHiIlLqQpP': + # astype fails with NA + c = c + 0.0 + if isinstance(x, (tuple, list)): + x = np.asarray(x) + if isinstance(x, np.ndarray) and tensor: + c = c.reshape(c.shape + (1,) * x.ndim) + + c0 = c[-1] + x * 0 + for i in range(2, len(c) + 1): + c0 = c[-i] + c0 * x + return c0 + + +def polyvalfromroots(x, r, tensor=True): + """ + Evaluate a polynomial specified by its roots at points x. + + If `r` is of length ``N``, this function returns the value + + .. math:: p(x) = \\prod_{n=1}^{N} (x - r_n) + + The parameter `x` is converted to an array only if it is a tuple or a + list, otherwise it is treated as a scalar. In either case, either `x` + or its elements must support multiplication and addition both with + themselves and with the elements of `r`. + + If `r` is a 1-D array, then ``p(x)`` will have the same shape as `x`. If `r` + is multidimensional, then the shape of the result depends on the value of + `tensor`. If `tensor` is ``True`` the shape will be r.shape[1:] + x.shape; + that is, each polynomial is evaluated at every value of `x`. If `tensor` is + ``False``, the shape will be r.shape[1:]; that is, each polynomial is + evaluated only for the corresponding broadcast value of `x`. Note that + scalars have shape (,). + + Parameters + ---------- + x : array_like, compatible object + If `x` is a list or tuple, it is converted to an ndarray, otherwise + it is left unchanged and treated as a scalar. In either case, `x` + or its elements must support addition and multiplication with + with themselves and with the elements of `r`. + r : array_like + Array of roots. If `r` is multidimensional the first index is the + root index, while the remaining indices enumerate multiple + polynomials. For instance, in the two dimensional case the roots + of each polynomial may be thought of as stored in the columns of `r`. + tensor : boolean, optional + If True, the shape of the roots array is extended with ones on the + right, one for each dimension of `x`. Scalars have dimension 0 for this + action. The result is that every column of coefficients in `r` is + evaluated for every element of `x`. If False, `x` is broadcast over the + columns of `r` for the evaluation. This keyword is useful when `r` is + multidimensional. The default value is True. + + Returns + ------- + values : ndarray, compatible object + The shape of the returned array is described above. + + See Also + -------- + polyroots, polyfromroots, polyval + + Examples + -------- + >>> from numpy.polynomial.polynomial import polyvalfromroots + >>> polyvalfromroots(1, [1, 2, 3]) + 0.0 + >>> a = np.arange(4).reshape(2, 2) + >>> a + array([[0, 1], + [2, 3]]) + >>> polyvalfromroots(a, [-1, 0, 1]) + array([[-0., 0.], + [ 6., 24.]]) + >>> r = np.arange(-2, 2).reshape(2,2) # multidimensional coefficients + >>> r # each column of r defines one polynomial + array([[-2, -1], + [ 0, 1]]) + >>> b = [-2, 1] + >>> polyvalfromroots(b, r, tensor=True) + array([[-0., 3.], + [ 3., 0.]]) + >>> polyvalfromroots(b, r, tensor=False) + array([-0., 0.]) + + """ + r = np.array(r, ndmin=1, copy=None) + if r.dtype.char in '?bBhHiIlLqQpP': + r = r.astype(np.double) + if isinstance(x, (tuple, list)): + x = np.asarray(x) + if isinstance(x, np.ndarray): + if tensor: + r = r.reshape(r.shape + (1,) * x.ndim) + elif x.ndim >= r.ndim: + raise ValueError("x.ndim must be < r.ndim when tensor == False") + return np.prod(x - r, axis=0) + +def _polyval2d_dispatcher(x, y, c): + return (x, y, c) + +def _polygrid2d_dispatcher(x, y, c): + return (x, y, c) + +@_array_function_dispatch(_polyval2d_dispatcher) +def polyval2d(x, y, c): + """ + Evaluate a 2-D polynomial at points (x, y). + + This function returns the value + + .. math:: p(x,y) = \\sum_{i,j} c_{i,j} * x^i * y^j + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars and they + must have the same shape after conversion. In either case, either `x` + and `y` or their elements must support multiplication and addition both + with themselves and with the elements of `c`. + + If `c` has fewer than two dimensions, ones are implicitly appended to + its shape to make it 2-D. The shape of the result will be c.shape[2:] + + x.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points ``(x, y)``, + where `x` and `y` must have the same shape. If `x` or `y` is a list + or tuple, it is first converted to an ndarray, otherwise it is left + unchanged and, if it isn't an ndarray, it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term + of multi-degree i,j is contained in ``c[i,j]``. If `c` has + dimension greater than two the remaining indices enumerate multiple + sets of coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points formed with + pairs of corresponding values from `x` and `y`. + + See Also + -------- + polyval, polygrid2d, polyval3d, polygrid3d + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> c = ((1, 2, 3), (4, 5, 6)) + >>> P.polyval2d(1, 1, c) + 21.0 + + """ + return pu._valnd(polyval, c, x, y) + +@_array_function_dispatch(_polygrid2d_dispatcher) +def polygrid2d(x, y, c): + """ + Evaluate a 2-D polynomial on the Cartesian product of x and y. + + This function returns the values: + + .. math:: p(a,b) = \\sum_{i,j} c_{i,j} * a^i * b^j + + where the points ``(a, b)`` consist of all pairs formed by taking + `a` from `x` and `b` from `y`. The resulting points form a grid with + `x` in the first dimension and `y` in the second. + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars. In either + case, either `x` and `y` or their elements must support multiplication + and addition both with themselves and with the elements of `c`. + + If `c` has fewer than two dimensions, ones are implicitly appended to + its shape to make it 2-D. The shape of the result will be c.shape[2:] + + x.shape + y.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points in the + Cartesian product of `x` and `y`. If `x` or `y` is a list or + tuple, it is first converted to an ndarray, otherwise it is left + unchanged and, if it isn't an ndarray, it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree i,j are contained in ``c[i,j]``. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points in the Cartesian + product of `x` and `y`. + + See Also + -------- + polyval, polyval2d, polyval3d, polygrid3d + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> c = ((1, 2, 3), (4, 5, 6)) + >>> P.polygrid2d([0, 1], [0, 1], c) + array([[ 1., 6.], + [ 5., 21.]]) + + """ + return pu._gridnd(polyval, c, x, y) + + +def polyval3d(x, y, z, c): + """ + Evaluate a 3-D polynomial at points (x, y, z). + + This function returns the values: + + .. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * x^i * y^j * z^k + + The parameters `x`, `y`, and `z` are converted to arrays only if + they are tuples or a lists, otherwise they are treated as a scalars and + they must have the same shape after conversion. In either case, either + `x`, `y`, and `z` or their elements must support multiplication and + addition both with themselves and with the elements of `c`. + + If `c` has fewer than 3 dimensions, ones are implicitly appended to its + shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape. + + Parameters + ---------- + x, y, z : array_like, compatible object + The three dimensional series is evaluated at the points + ``(x, y, z)``, where `x`, `y`, and `z` must have the same shape. If + any of `x`, `y`, or `z` is a list or tuple, it is first converted + to an ndarray, otherwise it is left unchanged and if it isn't an + ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term of + multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension + greater than 3 the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the multidimensional polynomial on points formed with + triples of corresponding values from `x`, `y`, and `z`. + + See Also + -------- + polyval, polyval2d, polygrid2d, polygrid3d + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> c = ((1, 2, 3), (4, 5, 6), (7, 8, 9)) + >>> P.polyval3d(1, 1, 1, c) + 45.0 + + """ + return pu._valnd(polyval, c, x, y, z) + + +def polygrid3d(x, y, z, c): + """ + Evaluate a 3-D polynomial on the Cartesian product of x, y and z. + + This function returns the values: + + .. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * a^i * b^j * c^k + + where the points ``(a, b, c)`` consist of all triples formed by taking + `a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form + a grid with `x` in the first dimension, `y` in the second, and `z` in + the third. + + The parameters `x`, `y`, and `z` are converted to arrays only if they + are tuples or a lists, otherwise they are treated as a scalars. In + either case, either `x`, `y`, and `z` or their elements must support + multiplication and addition both with themselves and with the elements + of `c`. + + If `c` has fewer than three dimensions, ones are implicitly appended to + its shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape + y.shape + z.shape. + + Parameters + ---------- + x, y, z : array_like, compatible objects + The three dimensional series is evaluated at the points in the + Cartesian product of `x`, `y`, and `z`. If `x`, `y`, or `z` is a + list or tuple, it is first converted to an ndarray, otherwise it is + left unchanged and, if it isn't an ndarray, it is treated as a + scalar. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree i,j are contained in ``c[i,j]``. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points in the Cartesian + product of `x` and `y`. + + See Also + -------- + polyval, polyval2d, polygrid2d, polyval3d + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> c = ((1, 2, 3), (4, 5, 6), (7, 8, 9)) + >>> P.polygrid3d([0, 1], [0, 1], [0, 1], c) + array([[ 1., 13.], + [ 6., 51.]]) + + """ + return pu._gridnd(polyval, c, x, y, z) + + +def polyvander(x, deg): + """Vandermonde matrix of given degree. + + Returns the Vandermonde matrix of degree `deg` and sample points + `x`. The Vandermonde matrix is defined by + + .. math:: V[..., i] = x^i, + + where ``0 <= i <= deg``. The leading indices of `V` index the elements of + `x` and the last index is the power of `x`. + + If `c` is a 1-D array of coefficients of length ``n + 1`` and `V` is the + matrix ``V = polyvander(x, n)``, then ``np.dot(V, c)`` and + ``polyval(x, c)`` are the same up to roundoff. This equivalence is + useful both for least squares fitting and for the evaluation of a large + number of polynomials of the same degree and sample points. + + Parameters + ---------- + x : array_like + Array of points. The dtype is converted to float64 or complex128 + depending on whether any of the elements are complex. If `x` is + scalar it is converted to a 1-D array. + deg : int + Degree of the resulting matrix. + + Returns + ------- + vander : ndarray. + The Vandermonde matrix. The shape of the returned matrix is + ``x.shape + (deg + 1,)``, where the last index is the power of `x`. + The dtype will be the same as the converted `x`. + + See Also + -------- + polyvander2d, polyvander3d + + Examples + -------- + The Vandermonde matrix of degree ``deg = 5`` and sample points + ``x = [-1, 2, 3]`` contains the element-wise powers of `x` + from 0 to 5 as its columns. + + >>> from numpy.polynomial import polynomial as P + >>> x, deg = [-1, 2, 3], 5 + >>> P.polyvander(x=x, deg=deg) + array([[ 1., -1., 1., -1., 1., -1.], + [ 1., 2., 4., 8., 16., 32.], + [ 1., 3., 9., 27., 81., 243.]]) + + """ + ideg = pu._as_int(deg, "deg") + if ideg < 0: + raise ValueError("deg must be non-negative") + + x = np.array(x, copy=None, ndmin=1) + 0.0 + dims = (ideg + 1,) + x.shape + dtyp = x.dtype + v = np.empty(dims, dtype=dtyp) + v[0] = x * 0 + 1 + if ideg > 0: + v[1] = x + for i in range(2, ideg + 1): + v[i] = v[i - 1] * x + return np.moveaxis(v, 0, -1) + + +def polyvander2d(x, y, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points ``(x, y)``. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (deg[1] + 1)*i + j] = x^i * y^j, + + where ``0 <= i <= deg[0]`` and ``0 <= j <= deg[1]``. The leading indices of + `V` index the points ``(x, y)`` and the last index encodes the powers of + `x` and `y`. + + If ``V = polyvander2d(x, y, [xdeg, ydeg])``, then the columns of `V` + correspond to the elements of a 2-D coefficient array `c` of shape + (xdeg + 1, ydeg + 1) in the order + + .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ... + + and ``np.dot(V, c.flat)`` and ``polyval2d(x, y, c)`` will be the same + up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 2-D polynomials + of the same degrees and sample points. + + Parameters + ---------- + x, y : array_like + Arrays of point coordinates, all of the same shape. The dtypes + will be converted to either float64 or complex128 depending on + whether any of the elements are complex. Scalars are converted to + 1-D arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg]. + + Returns + ------- + vander2d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg([1]+1)`. The dtype will be the same + as the converted `x` and `y`. + + See Also + -------- + polyvander, polyvander3d, polyval2d, polyval3d + + Examples + -------- + >>> import numpy as np + + The 2-D pseudo-Vandermonde matrix of degree ``[1, 2]`` and sample + points ``x = [-1, 2]`` and ``y = [1, 3]`` is as follows: + + >>> from numpy.polynomial import polynomial as P + >>> x = np.array([-1, 2]) + >>> y = np.array([1, 3]) + >>> m, n = 1, 2 + >>> deg = np.array([m, n]) + >>> V = P.polyvander2d(x=x, y=y, deg=deg) + >>> V + array([[ 1., 1., 1., -1., -1., -1.], + [ 1., 3., 9., 2., 6., 18.]]) + + We can verify the columns for any ``0 <= i <= m`` and ``0 <= j <= n``: + + >>> i, j = 0, 1 + >>> V[:, (deg[1]+1)*i + j] == x**i * y**j + array([ True, True]) + + The (1D) Vandermonde matrix of sample points ``x`` and degree ``m`` is a + special case of the (2D) pseudo-Vandermonde matrix with ``y`` points all + zero and degree ``[m, 0]``. + + >>> P.polyvander2d(x=x, y=0*x, deg=(m, 0)) == P.polyvander(x=x, deg=m) + array([[ True, True], + [ True, True]]) + + """ + return pu._vander_nd_flat((polyvander, polyvander), (x, y), deg) + + +def polyvander3d(x, y, z, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points ``(x, y, z)``. If `l`, `m`, `n` are the given degrees in `x`, `y`, `z`, + then The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = x^i * y^j * z^k, + + where ``0 <= i <= l``, ``0 <= j <= m``, and ``0 <= j <= n``. The leading + indices of `V` index the points ``(x, y, z)`` and the last index encodes + the powers of `x`, `y`, and `z`. + + If ``V = polyvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns + of `V` correspond to the elements of a 3-D coefficient array `c` of + shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order + + .. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},... + + and ``np.dot(V, c.flat)`` and ``polyval3d(x, y, z, c)`` will be the + same up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 3-D polynomials + of the same degrees and sample points. + + Parameters + ---------- + x, y, z : array_like + Arrays of point coordinates, all of the same shape. The dtypes will + be converted to either float64 or complex128 depending on whether + any of the elements are complex. Scalars are converted to 1-D + arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg, z_deg]. + + Returns + ------- + vander3d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg([1]+1)*(deg[2]+1)`. The dtype will + be the same as the converted `x`, `y`, and `z`. + + See Also + -------- + polyvander, polyvander3d, polyval2d, polyval3d + + Examples + -------- + >>> import numpy as np + >>> from numpy.polynomial import polynomial as P + >>> x = np.asarray([-1, 2, 1]) + >>> y = np.asarray([1, -2, -3]) + >>> z = np.asarray([2, 2, 5]) + >>> l, m, n = [2, 2, 1] + >>> deg = [l, m, n] + >>> V = P.polyvander3d(x=x, y=y, z=z, deg=deg) + >>> V + array([[ 1., 2., 1., 2., 1., 2., -1., -2., -1., + -2., -1., -2., 1., 2., 1., 2., 1., 2.], + [ 1., 2., -2., -4., 4., 8., 2., 4., -4., + -8., 8., 16., 4., 8., -8., -16., 16., 32.], + [ 1., 5., -3., -15., 9., 45., 1., 5., -3., + -15., 9., 45., 1., 5., -3., -15., 9., 45.]]) + + We can verify the columns for any ``0 <= i <= l``, ``0 <= j <= m``, + and ``0 <= k <= n`` + + >>> i, j, k = 2, 1, 0 + >>> V[:, (m+1)*(n+1)*i + (n+1)*j + k] == x**i * y**j * z**k + array([ True, True, True]) + + """ + return pu._vander_nd_flat((polyvander, polyvander, polyvander), (x, y, z), deg) + + +def polyfit(x, y, deg, rcond=None, full=False, w=None): + """ + Least-squares fit of a polynomial to data. + + Return the coefficients of a polynomial of degree `deg` that is the + least squares fit to the data values `y` given at points `x`. If `y` is + 1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple + fits are done, one for each column of `y`, and the resulting + coefficients are stored in the corresponding columns of a 2-D return. + The fitted polynomial(s) are in the form + + .. math:: p(x) = c_0 + c_1 * x + ... + c_n * x^n, + + where `n` is `deg`. + + Parameters + ---------- + x : array_like, shape (`M`,) + x-coordinates of the `M` sample (data) points ``(x[i], y[i])``. + y : array_like, shape (`M`,) or (`M`, `K`) + y-coordinates of the sample points. Several sets of sample points + sharing the same x-coordinates can be (independently) fit with one + call to `polyfit` by passing in for `y` a 2-D array that contains + one data set per column. + deg : int or 1-D array_like + Degree(s) of the fitting polynomials. If `deg` is a single integer + all terms up to and including the `deg`'th term are included in the + fit. For NumPy versions >= 1.11.0 a list of integers specifying the + degrees of the terms to include may be used instead. + rcond : float, optional + Relative condition number of the fit. Singular values smaller + than `rcond`, relative to the largest singular value, will be + ignored. The default value is ``len(x)*eps``, where `eps` is the + relative precision of the platform's float type, about 2e-16 in + most cases. + full : bool, optional + Switch determining the nature of the return value. When ``False`` + (the default) just the coefficients are returned; when ``True``, + diagnostic information from the singular value decomposition (used + to solve the fit's matrix equation) is also returned. + w : array_like, shape (`M`,), optional + Weights. If not None, the weight ``w[i]`` applies to the unsquared + residual ``y[i] - y_hat[i]`` at ``x[i]``. Ideally the weights are + chosen so that the errors of the products ``w[i]*y[i]`` all have the + same variance. When using inverse-variance weighting, use + ``w[i] = 1/sigma(y[i])``. The default value is None. + + Returns + ------- + coef : ndarray, shape (`deg` + 1,) or (`deg` + 1, `K`) + Polynomial coefficients ordered from low to high. If `y` was 2-D, + the coefficients in column `k` of `coef` represent the polynomial + fit to the data in `y`'s `k`-th column. + + [residuals, rank, singular_values, rcond] : list + These values are only returned if ``full == True`` + + - residuals -- sum of squared residuals of the least squares fit + - rank -- the numerical rank of the scaled Vandermonde matrix + - singular_values -- singular values of the scaled Vandermonde matrix + - rcond -- value of `rcond`. + + For more details, see `numpy.linalg.lstsq`. + + Raises + ------ + RankWarning + Raised if the matrix in the least-squares fit is rank deficient. + The warning is only raised if ``full == False``. The warnings can + be turned off by: + + >>> import warnings + >>> warnings.simplefilter('ignore', np.exceptions.RankWarning) + + See Also + -------- + numpy.polynomial.chebyshev.chebfit + numpy.polynomial.legendre.legfit + numpy.polynomial.laguerre.lagfit + numpy.polynomial.hermite.hermfit + numpy.polynomial.hermite_e.hermefit + polyval : Evaluates a polynomial. + polyvander : Vandermonde matrix for powers. + numpy.linalg.lstsq : Computes a least-squares fit from the matrix. + scipy.interpolate.UnivariateSpline : Computes spline fits. + + Notes + ----- + The solution is the coefficients of the polynomial `p` that minimizes + the sum of the weighted squared errors + + .. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2, + + where the :math:`w_j` are the weights. This problem is solved by + setting up the (typically) over-determined matrix equation: + + .. math:: V(x) * c = w * y, + + where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the + coefficients to be solved for, `w` are the weights, and `y` are the + observed values. This equation is then solved using the singular value + decomposition of `V`. + + If some of the singular values of `V` are so small that they are + neglected (and `full` == ``False``), a `~exceptions.RankWarning` will be + raised. This means that the coefficient values may be poorly determined. + Fitting to a lower order polynomial will usually get rid of the warning + (but may not be what you want, of course; if you have independent + reason(s) for choosing the degree which isn't working, you may have to: + a) reconsider those reasons, and/or b) reconsider the quality of your + data). The `rcond` parameter can also be set to a value smaller than + its default, but the resulting fit may be spurious and have large + contributions from roundoff error. + + Polynomial fits using double precision tend to "fail" at about + (polynomial) degree 20. Fits using Chebyshev or Legendre series are + generally better conditioned, but much can still depend on the + distribution of the sample points and the smoothness of the data. If + the quality of the fit is inadequate, splines may be a good + alternative. + + Examples + -------- + >>> import numpy as np + >>> from numpy.polynomial import polynomial as P + >>> x = np.linspace(-1,1,51) # x "data": [-1, -0.96, ..., 0.96, 1] + >>> rng = np.random.default_rng() + >>> err = rng.normal(size=len(x)) + >>> y = x**3 - x + err # x^3 - x + Gaussian noise + >>> c, stats = P.polyfit(x,y,3,full=True) + >>> c # c[0], c[1] approx. -1, c[2] should be approx. 0, c[3] approx. 1 + array([ 0.23111996, -1.02785049, -0.2241444 , 1.08405657]) # may vary + >>> stats # note the large SSR, explaining the rather poor results + [array([48.312088]), # may vary + 4, + array([1.38446749, 1.32119158, 0.50443316, 0.28853036]), + 1.1324274851176597e-14] + + Same thing without the added noise + + >>> y = x**3 - x + >>> c, stats = P.polyfit(x,y,3,full=True) + >>> c # c[0], c[1] ~= -1, c[2] should be "very close to 0", c[3] ~= 1 + array([-6.73496154e-17, -1.00000000e+00, 0.00000000e+00, 1.00000000e+00]) + >>> stats # note the minuscule SSR + [array([8.79579319e-31]), + np.int32(4), + array([1.38446749, 1.32119158, 0.50443316, 0.28853036]), + 1.1324274851176597e-14] + + """ + return pu._fit(polyvander, x, y, deg, rcond, full, w) + + +def polycompanion(c): + """ + Return the companion matrix of c. + + The companion matrix for power series cannot be made symmetric by + scaling the basis, so this function differs from those for the + orthogonal polynomials. + + Parameters + ---------- + c : array_like + 1-D array of polynomial coefficients ordered from low to high + degree. + + Returns + ------- + mat : ndarray + Companion matrix of dimensions (deg, deg). + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> c = (1, 2, 3) + >>> P.polycompanion(c) + array([[ 0. , -0.33333333], + [ 1. , -0.66666667]]) + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) < 2: + raise ValueError('Series must have maximum degree of at least 1.') + if len(c) == 2: + return np.array([[-c[0] / c[1]]]) + + n = len(c) - 1 + mat = np.zeros((n, n), dtype=c.dtype) + bot = mat.reshape(-1)[n::n + 1] + bot[...] = 1 + mat[:, -1] -= c[:-1] / c[-1] + return mat + + +def polyroots(c): + """ + Compute the roots of a polynomial. + + Return the roots (a.k.a. "zeros") of the polynomial + + .. math:: p(x) = \\sum_i c[i] * x^i. + + Parameters + ---------- + c : 1-D array_like + 1-D array of polynomial coefficients. + + Returns + ------- + out : ndarray + Array of the roots of the polynomial. If all the roots are real, + then `out` is also real, otherwise it is complex. + + See Also + -------- + numpy.polynomial.chebyshev.chebroots + numpy.polynomial.legendre.legroots + numpy.polynomial.laguerre.lagroots + numpy.polynomial.hermite.hermroots + numpy.polynomial.hermite_e.hermeroots + + Notes + ----- + The root estimates are obtained as the eigenvalues of the companion + matrix, Roots far from the origin of the complex plane may have large + errors due to the numerical instability of the power series for such + values. Roots with multiplicity greater than 1 will also show larger + errors as the value of the series near such points is relatively + insensitive to errors in the roots. Isolated roots near the origin can + be improved by a few iterations of Newton's method. + + Examples + -------- + >>> import numpy.polynomial.polynomial as poly + >>> poly.polyroots(poly.polyfromroots((-1,0,1))) + array([-1., 0., 1.]) + >>> poly.polyroots(poly.polyfromroots((-1,0,1))).dtype + dtype('float64') + >>> j = complex(0,1) + >>> poly.polyroots(poly.polyfromroots((-j,0,j))) + array([ 0.00000000e+00+0.j, 0.00000000e+00+1.j, 2.77555756e-17-1.j]) # may vary + + """ # noqa: E501 + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) < 2: + return np.array([], dtype=c.dtype) + if len(c) == 2: + return np.array([-c[0] / c[1]]) + + m = polycompanion(c) + r = np.linalg.eigvals(m) + r.sort() + return r + + +# +# polynomial class +# + +class Polynomial(ABCPolyBase): + """A power series class. + + The Polynomial class provides the standard Python numerical methods + '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the + attributes and methods listed below. + + Parameters + ---------- + coef : array_like + Polynomial coefficients in order of increasing degree, i.e., + ``(1, 2, 3)`` give ``1 + 2*x + 3*x**2``. + domain : (2,) array_like, optional + Domain to use. The interval ``[domain[0], domain[1]]`` is mapped + to the interval ``[window[0], window[1]]`` by shifting and scaling. + The default value is [-1., 1.]. + window : (2,) array_like, optional + Window, see `domain` for its use. The default value is [-1., 1.]. + symbol : str, optional + Symbol used to represent the independent variable in string + representations of the polynomial expression, e.g. for printing. + The symbol must be a valid Python identifier. Default value is 'x'. + + .. versionadded:: 1.24 + + """ + # Virtual Functions + _add = staticmethod(polyadd) + _sub = staticmethod(polysub) + _mul = staticmethod(polymul) + _div = staticmethod(polydiv) + _pow = staticmethod(polypow) + _val = staticmethod(polyval) + _int = staticmethod(polyint) + _der = staticmethod(polyder) + _fit = staticmethod(polyfit) + _line = staticmethod(polyline) + _roots = staticmethod(polyroots) + _fromroots = staticmethod(polyfromroots) + + # Virtual properties + domain = np.array(polydomain) + window = np.array(polydomain) + basis_name = None + + @classmethod + def _str_term_unicode(cls, i, arg_str): + if i == '1': + return f"·{arg_str}" + else: + return f"·{arg_str}{i.translate(cls._superscript_mapping)}" + + @staticmethod + def _str_term_ascii(i, arg_str): + if i == '1': + return f" {arg_str}" + else: + return f" {arg_str}**{i}" + + @staticmethod + def _repr_latex_term(i, arg_str, needs_parens): + if needs_parens: + arg_str = rf"\left({arg_str}\right)" + if i == 0: + return '1' + elif i == 1: + return arg_str + else: + return f"{arg_str}^{{{i}}}" diff --git a/local_packages/numpy/polynomial/polynomial.pyi b/local_packages/numpy/polynomial/polynomial.pyi new file mode 100644 index 0000000..86f2884 --- /dev/null +++ b/local_packages/numpy/polynomial/polynomial.pyi @@ -0,0 +1,109 @@ +from typing import Any, ClassVar, Final, overload + +import numpy as np +import numpy.typing as npt +from numpy._typing import ( + _ArrayLikeFloat_co, + _ArrayLikeNumber_co, + _FloatLike_co, + _NumberLike_co, +) + +from ._polybase import ABCPolyBase +from ._polytypes import ( + _Array1, + _Array2, + _ArrayLikeCoef_co, + _FuncBinOp, + _FuncCompanion, + _FuncDer, + _FuncFit, + _FuncFromRoots, + _FuncInteg, + _FuncLine, + _FuncPow, + _FuncRoots, + _FuncUnOp, + _FuncVal, + _FuncVal2D, + _FuncVal3D, + _FuncVander, + _FuncVander2D, + _FuncVander3D, +) +from .polyutils import trimcoef as polytrim + +__all__ = [ + "polyzero", + "polyone", + "polyx", + "polydomain", + "polyline", + "polyadd", + "polysub", + "polymulx", + "polymul", + "polydiv", + "polypow", + "polyval", + "polyvalfromroots", + "polyder", + "polyint", + "polyfromroots", + "polyvander", + "polyfit", + "polytrim", + "polyroots", + "Polynomial", + "polyval2d", + "polyval3d", + "polygrid2d", + "polygrid3d", + "polyvander2d", + "polyvander3d", + "polycompanion", +] + +polydomain: Final[_Array2[np.float64]] = ... +polyzero: Final[_Array1[np.int_]] = ... +polyone: Final[_Array1[np.int_]] = ... +polyx: Final[_Array2[np.int_]] = ... + +polyline: Final[_FuncLine] = ... +polyfromroots: Final[_FuncFromRoots] = ... +polyadd: Final[_FuncBinOp] = ... +polysub: Final[_FuncBinOp] = ... +polymulx: Final[_FuncUnOp] = ... +polymul: Final[_FuncBinOp] = ... +polydiv: Final[_FuncBinOp] = ... +polypow: Final[_FuncPow] = ... +polyder: Final[_FuncDer] = ... +polyint: Final[_FuncInteg] = ... +polyval: Final[_FuncVal] = ... +polyval2d: Final[_FuncVal2D] = ... +polyval3d: Final[_FuncVal3D] = ... + +@overload +def polyvalfromroots(x: _FloatLike_co, r: _FloatLike_co, tensor: bool = True) -> np.float64 | Any: ... +@overload +def polyvalfromroots(x: _NumberLike_co, r: _NumberLike_co, tensor: bool = True) -> np.complex128 | Any: ... +@overload +def polyvalfromroots(x: _ArrayLikeFloat_co, r: _ArrayLikeFloat_co, tensor: bool = True) -> npt.NDArray[np.float64 | Any]: ... +@overload +def polyvalfromroots(x: _ArrayLikeNumber_co, r: _ArrayLikeNumber_co, tensor: bool = True) -> npt.NDArray[np.complex128 | Any]: ... +@overload +def polyvalfromroots(x: _ArrayLikeCoef_co, r: _ArrayLikeCoef_co, tensor: bool = True) -> npt.NDArray[np.object_ | Any]: ... + +polygrid2d: Final[_FuncVal2D] = ... +polygrid3d: Final[_FuncVal3D] = ... +polyvander: Final[_FuncVander] = ... +polyvander2d: Final[_FuncVander2D] = ... +polyvander3d: Final[_FuncVander3D] = ... +polyfit: Final[_FuncFit] = ... +polycompanion: Final[_FuncCompanion] = ... +polyroots: Final[_FuncRoots] = ... + +class Polynomial(ABCPolyBase[None]): + basis_name: ClassVar[None] = None # pyright: ignore[reportIncompatibleMethodOverride] + domain: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] + window: _Array2[np.float64 | Any] = ... # pyright: ignore[reportIncompatibleMethodOverride] diff --git a/local_packages/numpy/polynomial/polyutils.py b/local_packages/numpy/polynomial/polyutils.py new file mode 100644 index 0000000..5e0e1af --- /dev/null +++ b/local_packages/numpy/polynomial/polyutils.py @@ -0,0 +1,759 @@ +""" +Utility classes and functions for the polynomial modules. + +This module provides: error and warning objects; a polynomial base class; +and some routines used in both the `polynomial` and `chebyshev` modules. + +Functions +--------- + +.. autosummary:: + :toctree: generated/ + + as_series convert list of array_likes into 1-D arrays of common type. + trimseq remove trailing zeros. + trimcoef remove small trailing coefficients. + getdomain return the domain appropriate for a given set of abscissae. + mapdomain maps points between domains. + mapparms parameters of the linear map between domains. + +""" +import functools +import operator +import warnings + +import numpy as np + +__all__ = [ + 'as_series', 'trimseq', 'trimcoef', 'getdomain', 'mapdomain', 'mapparms', + 'format_float'] + +# +# Helper functions to convert inputs to 1-D arrays +# +def trimseq(seq): + """Remove small Poly series coefficients. + + Parameters + ---------- + seq : sequence + Sequence of Poly series coefficients. + + Returns + ------- + series : sequence + Subsequence with trailing zeros removed. If the resulting sequence + would be empty, return the first element. The returned sequence may + or may not be a view. + + Notes + ----- + Do not lose the type info if the sequence contains unknown objects. + + """ + if len(seq) == 0 or seq[-1] != 0: + return seq + else: + for i in range(len(seq) - 1, -1, -1): + if seq[i] != 0: + break + return seq[:i + 1] + + +def as_series(alist, trim=True): + """ + Return argument as a list of 1-d arrays. + + The returned list contains array(s) of dtype double, complex double, or + object. A 1-d argument of shape ``(N,)`` is parsed into ``N`` arrays of + size one; a 2-d argument of shape ``(M,N)`` is parsed into ``M`` arrays + of size ``N`` (i.e., is "parsed by row"); and a higher dimensional array + raises a Value Error if it is not first reshaped into either a 1-d or 2-d + array. + + Parameters + ---------- + alist : array_like + A 1- or 2-d array_like + trim : boolean, optional + When True, trailing zeros are removed from the inputs. + When False, the inputs are passed through intact. + + Returns + ------- + [a1, a2,...] : list of 1-D arrays + A copy of the input data as a list of 1-d arrays. + + Raises + ------ + ValueError + Raised when `as_series` cannot convert its input to 1-d arrays, or at + least one of the resulting arrays is empty. + + Examples + -------- + >>> import numpy as np + >>> from numpy.polynomial import polyutils as pu + >>> a = np.arange(4) + >>> pu.as_series(a) + [array([0.]), array([1.]), array([2.]), array([3.])] + >>> b = np.arange(6).reshape((2,3)) + >>> pu.as_series(b) + [array([0., 1., 2.]), array([3., 4., 5.])] + + >>> pu.as_series((1, np.arange(3), np.arange(2, dtype=np.float16))) + [array([1.]), array([0., 1., 2.]), array([0., 1.])] + + >>> pu.as_series([2, [1.1, 0.]]) + [array([2.]), array([1.1])] + + >>> pu.as_series([2, [1.1, 0.]], trim=False) + [array([2.]), array([1.1, 0. ])] + + """ + arrays = [np.array(a, ndmin=1, copy=None) for a in alist] + for a in arrays: + if a.size == 0: + raise ValueError("Coefficient array is empty") + if a.ndim != 1: + raise ValueError("Coefficient array is not 1-d") + if trim: + arrays = [trimseq(a) for a in arrays] + + try: + dtype = np.common_type(*arrays) + except Exception as e: + object_dtype = np.dtypes.ObjectDType() + has_one_object_type = False + ret = [] + for a in arrays: + if a.dtype != object_dtype: + tmp = np.empty(len(a), dtype=object_dtype) + tmp[:] = a[:] + ret.append(tmp) + else: + has_one_object_type = True + ret.append(a.copy()) + if not has_one_object_type: + raise ValueError("Coefficient arrays have no common type") from e + else: + ret = [np.array(a, copy=True, dtype=dtype) for a in arrays] + return ret + + +def trimcoef(c, tol=0): + """ + Remove "small" "trailing" coefficients from a polynomial. + + "Small" means "small in absolute value" and is controlled by the + parameter `tol`; "trailing" means highest order coefficient(s), e.g., in + ``[0, 1, 1, 0, 0]`` (which represents ``0 + x + x**2 + 0*x**3 + 0*x**4``) + both the 3-rd and 4-th order coefficients would be "trimmed." + + Parameters + ---------- + c : array_like + 1-d array of coefficients, ordered from lowest order to highest. + tol : number, optional + Trailing (i.e., highest order) elements with absolute value less + than or equal to `tol` (default value is zero) are removed. + + Returns + ------- + trimmed : ndarray + 1-d array with trailing zeros removed. If the resulting series + would be empty, a series containing a single zero is returned. + + Raises + ------ + ValueError + If `tol` < 0 + + Examples + -------- + >>> from numpy.polynomial import polyutils as pu + >>> pu.trimcoef((0,0,3,0,5,0,0)) + array([0., 0., 3., 0., 5.]) + >>> pu.trimcoef((0,0,1e-3,0,1e-5,0,0),1e-3) # item == tol is trimmed + array([0.]) + >>> i = complex(0,1) # works for complex + >>> pu.trimcoef((3e-4,1e-3*(1-i),5e-4,2e-5*(1+i)), 1e-3) + array([0.0003+0.j , 0.001 -0.001j]) + + """ + if tol < 0: + raise ValueError("tol must be non-negative") + + [c] = as_series([c]) + [ind] = np.nonzero(np.abs(c) > tol) + if len(ind) == 0: + return c[:1] * 0 + else: + return c[:ind[-1] + 1].copy() + +def getdomain(x): + """ + Return a domain suitable for given abscissae. + + Find a domain suitable for a polynomial or Chebyshev series + defined at the values supplied. + + Parameters + ---------- + x : array_like + 1-d array of abscissae whose domain will be determined. + + Returns + ------- + domain : ndarray + 1-d array containing two values. If the inputs are complex, then + the two returned points are the lower left and upper right corners + of the smallest rectangle (aligned with the axes) in the complex + plane containing the points `x`. If the inputs are real, then the + two points are the ends of the smallest interval containing the + points `x`. + + See Also + -------- + mapparms, mapdomain + + Examples + -------- + >>> import numpy as np + >>> from numpy.polynomial import polyutils as pu + >>> points = np.arange(4)**2 - 5; points + array([-5, -4, -1, 4]) + >>> pu.getdomain(points) + array([-5., 4.]) + >>> c = np.exp(complex(0,1)*np.pi*np.arange(12)/6) # unit circle + >>> pu.getdomain(c) + array([-1.-1.j, 1.+1.j]) + + """ + [x] = as_series([x], trim=False) + if x.dtype.char in np.typecodes['Complex']: + rmin, rmax = x.real.min(), x.real.max() + imin, imax = x.imag.min(), x.imag.max() + return np.array((complex(rmin, imin), complex(rmax, imax))) + else: + return np.array((x.min(), x.max())) + +def mapparms(old, new): + """ + Linear map parameters between domains. + + Return the parameters of the linear map ``offset + scale*x`` that maps + `old` to `new` such that ``old[i] -> new[i]``, ``i = 0, 1``. + + Parameters + ---------- + old, new : array_like + Domains. Each domain must (successfully) convert to a 1-d array + containing precisely two values. + + Returns + ------- + offset, scale : scalars + The map ``L(x) = offset + scale*x`` maps the first domain to the + second. + + See Also + -------- + getdomain, mapdomain + + Notes + ----- + Also works for complex numbers, and thus can be used to calculate the + parameters required to map any line in the complex plane to any other + line therein. + + Examples + -------- + >>> from numpy.polynomial import polyutils as pu + >>> pu.mapparms((-1,1),(-1,1)) + (0.0, 1.0) + >>> pu.mapparms((1,-1),(-1,1)) + (-0.0, -1.0) + >>> i = complex(0,1) + >>> pu.mapparms((-i,-1),(1,i)) + ((1+1j), (1-0j)) + + """ + oldlen = old[1] - old[0] + newlen = new[1] - new[0] + off = (old[1] * new[0] - old[0] * new[1]) / oldlen + scl = newlen / oldlen + return off, scl + +def mapdomain(x, old, new): + """ + Apply linear map to input points. + + The linear map ``offset + scale*x`` that maps the domain `old` to + the domain `new` is applied to the points `x`. + + Parameters + ---------- + x : array_like + Points to be mapped. If `x` is a subtype of ndarray the subtype + will be preserved. + old, new : array_like + The two domains that determine the map. Each must (successfully) + convert to 1-d arrays containing precisely two values. + + Returns + ------- + x_out : ndarray + Array of points of the same shape as `x`, after application of the + linear map between the two domains. + + See Also + -------- + getdomain, mapparms + + Notes + ----- + Effectively, this implements: + + .. math:: + x\\_out = new[0] + m(x - old[0]) + + where + + .. math:: + m = \\frac{new[1]-new[0]}{old[1]-old[0]} + + Examples + -------- + >>> import numpy as np + >>> from numpy.polynomial import polyutils as pu + >>> old_domain = (-1,1) + >>> new_domain = (0,2*np.pi) + >>> x = np.linspace(-1,1,6); x + array([-1. , -0.6, -0.2, 0.2, 0.6, 1. ]) + >>> x_out = pu.mapdomain(x, old_domain, new_domain); x_out + array([ 0. , 1.25663706, 2.51327412, 3.76991118, 5.02654825, # may vary + 6.28318531]) + >>> x - pu.mapdomain(x_out, new_domain, old_domain) + array([0., 0., 0., 0., 0., 0.]) + + Also works for complex numbers (and thus can be used to map any line in + the complex plane to any other line therein). + + >>> i = complex(0,1) + >>> old = (-1 - i, 1 + i) + >>> new = (-1 + i, 1 - i) + >>> z = np.linspace(old[0], old[1], 6); z + array([-1. -1.j , -0.6-0.6j, -0.2-0.2j, 0.2+0.2j, 0.6+0.6j, 1. +1.j ]) + >>> new_z = pu.mapdomain(z, old, new); new_z + array([-1.0+1.j , -0.6+0.6j, -0.2+0.2j, 0.2-0.2j, 0.6-0.6j, 1.0-1.j ]) # may vary + + """ + if type(x) not in (int, float, complex) and not isinstance(x, np.generic): + x = np.asanyarray(x) + off, scl = mapparms(old, new) + return off + scl * x + + +def _nth_slice(i, ndim): + sl = [np.newaxis] * ndim + sl[i] = slice(None) + return tuple(sl) + + +def _vander_nd(vander_fs, points, degrees): + r""" + A generalization of the Vandermonde matrix for N dimensions + + The result is built by combining the results of 1d Vandermonde matrices, + + .. math:: + W[i_0, \ldots, i_M, j_0, \ldots, j_N] = \prod_{k=0}^N{V_k(x_k)[i_0, \ldots, i_M, j_k]} + + where + + .. math:: + N &= \texttt{len(points)} = \texttt{len(degrees)} = \texttt{len(vander\_fs)} \\ + M &= \texttt{points[k].ndim} \\ + V_k &= \texttt{vander\_fs[k]} \\ + x_k &= \texttt{points[k]} \\ + 0 \le j_k &\le \texttt{degrees[k]} + + Expanding the one-dimensional :math:`V_k` functions gives: + + .. math:: + W[i_0, \ldots, i_M, j_0, \ldots, j_N] = \prod_{k=0}^N{B_{k, j_k}(x_k[i_0, \ldots, i_M])} + + where :math:`B_{k,m}` is the m'th basis of the polynomial construction used along + dimension :math:`k`. For a regular polynomial, :math:`B_{k, m}(x) = P_m(x) = x^m`. + + Parameters + ---------- + vander_fs : Sequence[function(array_like, int) -> ndarray] + The 1d vander function to use for each axis, such as ``polyvander`` + points : Sequence[array_like] + Arrays of point coordinates, all of the same shape. The dtypes + will be converted to either float64 or complex128 depending on + whether any of the elements are complex. Scalars are converted to + 1-D arrays. + This must be the same length as `vander_fs`. + degrees : Sequence[int] + The maximum degree (inclusive) to use for each axis. + This must be the same length as `vander_fs`. + + Returns + ------- + vander_nd : ndarray + An array of shape ``points[0].shape + tuple(d + 1 for d in degrees)``. + """ # noqa: E501 + n_dims = len(vander_fs) + if n_dims != len(points): + raise ValueError( + f"Expected {n_dims} dimensions of sample points, got {len(points)}") + if n_dims != len(degrees): + raise ValueError( + f"Expected {n_dims} dimensions of degrees, got {len(degrees)}") + if n_dims == 0: + raise ValueError("Unable to guess a dtype or shape when no points are given") + + # convert to the same shape and type + points = tuple(np.asarray(tuple(points)) + 0.0) + + # produce the vandermonde matrix for each dimension, placing the last + # axis of each in an independent trailing axis of the output + vander_arrays = ( + vander_fs[i](points[i], degrees[i])[(...,) + _nth_slice(i, n_dims)] + for i in range(n_dims) + ) + + # we checked this wasn't empty already, so no `initial` needed + return functools.reduce(operator.mul, vander_arrays) + + +def _vander_nd_flat(vander_fs, points, degrees): + """ + Like `_vander_nd`, but flattens the last ``len(degrees)`` axes into a single axis + + Used to implement the public ``vanderd`` functions. + """ + v = _vander_nd(vander_fs, points, degrees) + return v.reshape(v.shape[:-len(degrees)] + (-1,)) + + +def _fromroots(line_f, mul_f, roots): + """ + Helper function used to implement the ``fromroots`` functions. + + Parameters + ---------- + line_f : function(float, float) -> ndarray + The ``line`` function, such as ``polyline`` + mul_f : function(array_like, array_like) -> ndarray + The ``mul`` function, such as ``polymul`` + roots + See the ``fromroots`` functions for more detail + """ + if len(roots) == 0: + return np.ones(1) + else: + [roots] = as_series([roots], trim=False) + roots.sort() + p = [line_f(-r, 1) for r in roots] + n = len(p) + while n > 1: + m, r = divmod(n, 2) + tmp = [mul_f(p[i], p[i + m]) for i in range(m)] + if r: + tmp[0] = mul_f(tmp[0], p[-1]) + p = tmp + n = m + return p[0] + + +def _valnd(val_f, c, *args): + """ + Helper function used to implement the ``vald`` functions. + + Parameters + ---------- + val_f : function(array_like, array_like, tensor: bool) -> array_like + The ``val`` function, such as ``polyval`` + c, args + See the ``vald`` functions for more detail + """ + args = [np.asanyarray(a) for a in args] + shape0 = args[0].shape + if not all(a.shape == shape0 for a in args[1:]): + if len(args) == 3: + raise ValueError('x, y, z are incompatible') + elif len(args) == 2: + raise ValueError('x, y are incompatible') + else: + raise ValueError('ordinates are incompatible') + it = iter(args) + x0 = next(it) + + # use tensor on only the first + c = val_f(x0, c) + for xi in it: + c = val_f(xi, c, tensor=False) + return c + + +def _gridnd(val_f, c, *args): + """ + Helper function used to implement the ``gridd`` functions. + + Parameters + ---------- + val_f : function(array_like, array_like, tensor: bool) -> array_like + The ``val`` function, such as ``polyval`` + c, args + See the ``gridd`` functions for more detail + """ + for xi in args: + c = val_f(xi, c) + return c + + +def _div(mul_f, c1, c2): + """ + Helper function used to implement the ``div`` functions. + + Implementation uses repeated subtraction of c2 multiplied by the nth basis. + For some polynomial types, a more efficient approach may be possible. + + Parameters + ---------- + mul_f : function(array_like, array_like) -> array_like + The ``mul`` function, such as ``polymul`` + c1, c2 + See the ``div`` functions for more detail + """ + # c1, c2 are trimmed copies + [c1, c2] = as_series([c1, c2]) + if c2[-1] == 0: + raise ZeroDivisionError # FIXME: add message with details to exception + + lc1 = len(c1) + lc2 = len(c2) + if lc1 < lc2: + return c1[:1] * 0, c1 + elif lc2 == 1: + return c1 / c2[-1], c1[:1] * 0 + else: + quo = np.empty(lc1 - lc2 + 1, dtype=c1.dtype) + rem = c1 + for i in range(lc1 - lc2, - 1, -1): + p = mul_f([0] * i + [1], c2) + q = rem[-1] / p[-1] + rem = rem[:-1] - q * p[:-1] + quo[i] = q + return quo, trimseq(rem) + + +def _add(c1, c2): + """ Helper function used to implement the ``add`` functions. """ + # c1, c2 are trimmed copies + [c1, c2] = as_series([c1, c2]) + if len(c1) > len(c2): + c1[:c2.size] += c2 + ret = c1 + else: + c2[:c1.size] += c1 + ret = c2 + return trimseq(ret) + + +def _sub(c1, c2): + """ Helper function used to implement the ``sub`` functions. """ + # c1, c2 are trimmed copies + [c1, c2] = as_series([c1, c2]) + if len(c1) > len(c2): + c1[:c2.size] -= c2 + ret = c1 + else: + c2 = -c2 + c2[:c1.size] += c1 + ret = c2 + return trimseq(ret) + + +def _fit(vander_f, x, y, deg, rcond=None, full=False, w=None): + """ + Helper function used to implement the ``fit`` functions. + + Parameters + ---------- + vander_f : function(array_like, int) -> ndarray + The 1d vander function, such as ``polyvander`` + c1, c2 + See the ``fit`` functions for more detail + """ + x = np.asarray(x) + 0.0 + y = np.asarray(y) + 0.0 + deg = np.asarray(deg) + + # check arguments. + if deg.ndim > 1 or deg.dtype.kind not in 'iu' or deg.size == 0: + raise TypeError("deg must be an int or non-empty 1-D array of int") + if deg.min() < 0: + raise ValueError("expected deg >= 0") + if x.ndim != 1: + raise TypeError("expected 1D vector for x") + if x.size == 0: + raise TypeError("expected non-empty vector for x") + if y.ndim < 1 or y.ndim > 2: + raise TypeError("expected 1D or 2D array for y") + if len(x) != len(y): + raise TypeError("expected x and y to have same length") + + if deg.ndim == 0: + lmax = deg + order = lmax + 1 + van = vander_f(x, lmax) + else: + deg = np.sort(deg) + lmax = deg[-1] + order = len(deg) + van = vander_f(x, lmax)[:, deg] + + # set up the least squares matrices in transposed form + lhs = van.T + rhs = y.T + if w is not None: + w = np.asarray(w) + 0.0 + if w.ndim != 1: + raise TypeError("expected 1D vector for w") + if len(x) != len(w): + raise TypeError("expected x and w to have same length") + # apply weights. Don't use inplace operations as they + # can cause problems with NA. + lhs = lhs * w + rhs = rhs * w + + # set rcond + if rcond is None: + rcond = len(x) * np.finfo(x.dtype).eps + + # Determine the norms of the design matrix columns. + if issubclass(lhs.dtype.type, np.complexfloating): + scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1)) + else: + scl = np.sqrt(np.square(lhs).sum(1)) + scl[scl == 0] = 1 + + # Solve the least squares problem. + c, resids, rank, s = np.linalg.lstsq(lhs.T / scl, rhs.T, rcond) + c = (c.T / scl).T + + # Expand c to include non-fitted coefficients which are set to zero + if deg.ndim > 0: + if c.ndim == 2: + cc = np.zeros((lmax + 1, c.shape[1]), dtype=c.dtype) + else: + cc = np.zeros(lmax + 1, dtype=c.dtype) + cc[deg] = c + c = cc + + # warn on rank reduction + if rank != order and not full: + msg = "The fit may be poorly conditioned" + warnings.warn(msg, np.exceptions.RankWarning, stacklevel=2) + + if full: + return c, [resids, rank, s, rcond] + else: + return c + + +def _pow(mul_f, c, pow, maxpower): + """ + Helper function used to implement the ``pow`` functions. + + Parameters + ---------- + mul_f : function(array_like, array_like) -> ndarray + The ``mul`` function, such as ``polymul`` + c : array_like + 1-D array of array of series coefficients + pow, maxpower + See the ``pow`` functions for more detail + """ + # c is a trimmed copy + [c] = as_series([c]) + power = int(pow) + if power != pow or power < 0: + raise ValueError("Power must be a non-negative integer.") + elif maxpower is not None and power > maxpower: + raise ValueError("Power is too large") + elif power == 0: + return np.array([1], dtype=c.dtype) + elif power == 1: + return c + else: + # This can be made more efficient by using powers of two + # in the usual way. + prd = c + for i in range(2, power + 1): + prd = mul_f(prd, c) + return prd + + +def _as_int(x, desc): + """ + Like `operator.index`, but emits a custom exception when passed an + incorrect type + + Parameters + ---------- + x : int-like + Value to interpret as an integer + desc : str + description to include in any error message + + Raises + ------ + TypeError : if x is a float or non-numeric + """ + try: + return operator.index(x) + except TypeError as e: + raise TypeError(f"{desc} must be an integer, received {x}") from e + + +def format_float(x, parens=False): + from numpy._core.multiarray import dragon4_positional, dragon4_scientific + + if not np.issubdtype(type(x), np.floating): + return str(x) + + opts = np.get_printoptions() + + if np.isnan(x): + return opts['nanstr'] + elif np.isinf(x): + return opts['infstr'] + + exp_format = False + if x != 0: + a = np.abs(x) + if a >= 1.e8 or a < 10**min(0, -(opts['precision'] - 1) // 2): + exp_format = True + + trim, unique = '0', True + if opts['floatmode'] == 'fixed': + trim, unique = 'k', False + + if exp_format: + s = dragon4_scientific(x, precision=opts['precision'], + unique=unique, trim=trim, + sign=opts['sign'] == '+') + if parens: + s = '(' + s + ')' + else: + s = dragon4_positional(x, precision=opts['precision'], + fractional=True, + unique=unique, trim=trim, + sign=opts['sign'] == '+') + return s diff --git a/local_packages/numpy/polynomial/polyutils.pyi b/local_packages/numpy/polynomial/polyutils.pyi new file mode 100644 index 0000000..79ca317 --- /dev/null +++ b/local_packages/numpy/polynomial/polyutils.pyi @@ -0,0 +1,307 @@ +from collections.abc import Callable, Iterable, Sequence +from typing import ( + Final, + Literal, + Protocol, + SupportsIndex, + TypeAlias, + TypeVar, + overload, + type_check_only, +) + +import numpy as np +import numpy.typing as npt +from numpy._typing import ( + _ArrayLikeComplex_co, + _ArrayLikeFloat_co, + _ArrayLikeObject_co, + _FloatLike_co, + _NumberLike_co, +) + +from ._polytypes import ( + _AnyInt, + _Array2, + _ArrayLikeCoef_co, + _CoefArray, + _CoefLike_co, + _CoefSeries, + _ComplexArray, + _ComplexSeries, + _FloatArray, + _FloatSeries, + _FuncBinOp, + _ObjectArray, + _ObjectSeries, + _SeriesLikeCoef_co, + _SeriesLikeComplex_co, + _SeriesLikeFloat_co, + _SeriesLikeInt_co, + _SeriesLikeObject_co, + _Tuple2, +) + +__all__ = ["as_series", "format_float", "getdomain", "mapdomain", "mapparms", "trimcoef", "trimseq"] + +_T = TypeVar("_T") +_SeqT = TypeVar("_SeqT", bound=_CoefArray | Sequence[_CoefLike_co]) + +_AnyLineF: TypeAlias = Callable[[float, float], _CoefArray] +_AnyMulF: TypeAlias = Callable[[np.ndarray | list[int], np.ndarray], _CoefArray] +_AnyVanderF: TypeAlias = Callable[[np.ndarray, int], _CoefArray] + +@type_check_only +class _ValFunc(Protocol[_T]): + def __call__(self, x: np.ndarray, c: _T, /, *, tensor: bool = True) -> _T: ... + +### + +@overload +def as_series(alist: npt.NDArray[np.integer] | _FloatArray, trim: bool = True) -> list[_FloatSeries]: ... +@overload +def as_series(alist: _ComplexArray, trim: bool = True) -> list[_ComplexSeries]: ... +@overload +def as_series(alist: _ObjectArray, trim: bool = True) -> list[_ObjectSeries]: ... +@overload +def as_series(alist: Iterable[_FloatArray | npt.NDArray[np.integer]], trim: bool = True) -> list[_FloatSeries]: ... +@overload +def as_series(alist: Iterable[_ComplexArray], trim: bool = True) -> list[_ComplexSeries]: ... +@overload +def as_series(alist: Iterable[_ObjectArray], trim: bool = True) -> list[_ObjectSeries]: ... +@overload +def as_series(alist: Iterable[_SeriesLikeFloat_co | float], trim: bool = True) -> list[_FloatSeries]: ... +@overload +def as_series(alist: Iterable[_SeriesLikeComplex_co | complex], trim: bool = True) -> list[_ComplexSeries]: ... +@overload +def as_series(alist: Iterable[_SeriesLikeCoef_co | object], trim: bool = True) -> list[_ObjectSeries]: ... + +# +def trimseq(seq: _SeqT) -> _SeqT: ... + +# +@overload +def trimcoef(c: npt.NDArray[np.integer] | _FloatArray, tol: _FloatLike_co = 0) -> _FloatSeries: ... +@overload +def trimcoef(c: _ComplexArray, tol: _FloatLike_co = 0) -> _ComplexSeries: ... +@overload +def trimcoef(c: _ObjectArray, tol: _FloatLike_co = 0) -> _ObjectSeries: ... +@overload +def trimcoef(c: _SeriesLikeFloat_co | float, tol: _FloatLike_co = 0) -> _FloatSeries: ... +@overload +def trimcoef(c: _SeriesLikeComplex_co | complex, tol: _FloatLike_co = 0) -> _ComplexSeries: ... +@overload +def trimcoef(c: _SeriesLikeCoef_co | object, tol: _FloatLike_co = 0) -> _ObjectSeries: ... + +# +@overload +def getdomain(x: _FloatArray | npt.NDArray[np.integer]) -> _Array2[np.float64]: ... +@overload +def getdomain(x: _ComplexArray) -> _Array2[np.complex128]: ... +@overload +def getdomain(x: _ObjectArray) -> _Array2[np.object_]: ... +@overload +def getdomain(x: _SeriesLikeFloat_co | float) -> _Array2[np.float64]: ... +@overload +def getdomain(x: _SeriesLikeComplex_co | complex) -> _Array2[np.complex128]: ... +@overload +def getdomain(x: _SeriesLikeCoef_co | object) -> _Array2[np.object_]: ... + +# +@overload +def mapparms(old: npt.NDArray[np.floating | np.integer], new: npt.NDArray[np.floating | np.integer]) -> _Tuple2[np.floating]: ... +@overload +def mapparms(old: npt.NDArray[np.number], new: npt.NDArray[np.number]) -> _Tuple2[np.complexfloating]: ... +@overload +def mapparms(old: npt.NDArray[np.object_ | np.number], new: npt.NDArray[np.object_ | np.number]) -> _Tuple2[object]: ... +@overload +def mapparms(old: Sequence[float], new: Sequence[float]) -> _Tuple2[float]: ... +@overload +def mapparms(old: Sequence[complex], new: Sequence[complex]) -> _Tuple2[complex]: ... +@overload +def mapparms(old: _SeriesLikeFloat_co, new: _SeriesLikeFloat_co) -> _Tuple2[np.floating]: ... +@overload +def mapparms(old: _SeriesLikeComplex_co, new: _SeriesLikeComplex_co) -> _Tuple2[np.complexfloating]: ... +@overload +def mapparms(old: _SeriesLikeCoef_co, new: _SeriesLikeCoef_co) -> _Tuple2[object]: ... + +# +@overload +def mapdomain(x: _FloatLike_co, old: _SeriesLikeFloat_co, new: _SeriesLikeFloat_co) -> np.floating: ... +@overload +def mapdomain(x: _NumberLike_co, old: _SeriesLikeComplex_co, new: _SeriesLikeComplex_co) -> np.complexfloating: ... +@overload +def mapdomain( + x: npt.NDArray[np.floating | np.integer], + old: npt.NDArray[np.floating | np.integer], + new: npt.NDArray[np.floating | np.integer], +) -> _FloatSeries: ... +@overload +def mapdomain(x: npt.NDArray[np.number], old: npt.NDArray[np.number], new: npt.NDArray[np.number]) -> _ComplexSeries: ... +@overload +def mapdomain( + x: npt.NDArray[np.object_ | np.number], + old: npt.NDArray[np.object_ | np.number], + new: npt.NDArray[np.object_ | np.number], +) -> _ObjectSeries: ... +@overload +def mapdomain(x: _SeriesLikeFloat_co, old: _SeriesLikeFloat_co, new: _SeriesLikeFloat_co) -> _FloatSeries: ... +@overload +def mapdomain(x: _SeriesLikeComplex_co, old: _SeriesLikeComplex_co, new: _SeriesLikeComplex_co) -> _ComplexSeries: ... +@overload +def mapdomain(x: _SeriesLikeCoef_co, old: _SeriesLikeCoef_co, new: _SeriesLikeCoef_co) -> _ObjectSeries: ... +@overload +def mapdomain(x: _CoefLike_co, old: _SeriesLikeCoef_co, new: _SeriesLikeCoef_co) -> object: ... + +# +def _nth_slice(i: SupportsIndex, ndim: SupportsIndex) -> tuple[slice | None, ...]: ... + +# keep in sync with `vander_nd_flat` +@overload +def _vander_nd( + vander_fs: Sequence[_AnyVanderF], + points: Sequence[_ArrayLikeFloat_co], + degrees: Sequence[SupportsIndex], +) -> _FloatArray: ... +@overload +def _vander_nd( + vander_fs: Sequence[_AnyVanderF], + points: Sequence[_ArrayLikeComplex_co], + degrees: Sequence[SupportsIndex], +) -> _ComplexArray: ... +@overload +def _vander_nd( + vander_fs: Sequence[_AnyVanderF], + points: Sequence[_ArrayLikeObject_co | _ArrayLikeComplex_co], + degrees: Sequence[SupportsIndex], +) -> _ObjectArray: ... +@overload +def _vander_nd( + vander_fs: Sequence[_AnyVanderF], + points: Sequence[npt.ArrayLike], + degrees: Sequence[SupportsIndex], +) -> _CoefArray: ... + +# keep in sync with `vander_nd` +@overload +def _vander_nd_flat( + vander_fs: Sequence[_AnyVanderF], + points: Sequence[_ArrayLikeFloat_co], + degrees: Sequence[SupportsIndex], +) -> _FloatArray: ... +@overload +def _vander_nd_flat( + vander_fs: Sequence[_AnyVanderF], + points: Sequence[_ArrayLikeComplex_co], + degrees: Sequence[SupportsIndex], +) -> _ComplexArray: ... +@overload +def _vander_nd_flat( + vander_fs: Sequence[_AnyVanderF], + points: Sequence[_ArrayLikeObject_co | _ArrayLikeComplex_co], + degrees: Sequence[SupportsIndex], +) -> _ObjectArray: ... +@overload +def _vander_nd_flat( + vander_fs: Sequence[_AnyVanderF], + points: Sequence[npt.ArrayLike], + degrees: Sequence[SupportsIndex], +) -> _CoefArray: ... + +# keep in sync with `._polytypes._FuncFromRoots` +@overload +def _fromroots(line_f: _AnyLineF, mul_f: _AnyMulF, roots: _SeriesLikeFloat_co) -> _FloatSeries: ... +@overload +def _fromroots(line_f: _AnyLineF, mul_f: _AnyMulF, roots: _SeriesLikeComplex_co) -> _ComplexSeries: ... +@overload +def _fromroots(line_f: _AnyLineF, mul_f: _AnyMulF, roots: _SeriesLikeObject_co) -> _ObjectSeries: ... +@overload +def _fromroots(line_f: _AnyLineF, mul_f: _AnyMulF, roots: _SeriesLikeCoef_co) -> _CoefSeries: ... + +# keep in sync with `_gridnd` +def _valnd(val_f: _ValFunc[_T], c: _T, *args: npt.ArrayLike) -> _T: ... + +# keep in sync with `_valnd` +def _gridnd(val_f: _ValFunc[_T], c: _T, *args: npt.ArrayLike) -> _T: ... + +# keep in sync with `_polytypes._FuncBinOp` +@overload +def _div(mul_f: _AnyMulF, c1: _SeriesLikeFloat_co, c2: _SeriesLikeFloat_co) -> _Tuple2[_FloatSeries]: ... +@overload +def _div(mul_f: _AnyMulF, c1: _SeriesLikeComplex_co, c2: _SeriesLikeComplex_co) -> _Tuple2[_ComplexSeries]: ... +@overload +def _div(mul_f: _AnyMulF, c1: _SeriesLikeObject_co, c2: _SeriesLikeObject_co) -> _Tuple2[_ObjectSeries]: ... +@overload +def _div(mul_f: _AnyMulF, c1: _SeriesLikeCoef_co, c2: _SeriesLikeCoef_co) -> _Tuple2[_CoefSeries]: ... + +_add: Final[_FuncBinOp] = ... +_sub: Final[_FuncBinOp] = ... + +# keep in sync with `_polytypes._FuncPow` +@overload +def _pow(mul_f: _AnyMulF, c: _SeriesLikeFloat_co, pow: _AnyInt, maxpower: _AnyInt | None) -> _FloatSeries: ... +@overload +def _pow(mul_f: _AnyMulF, c: _SeriesLikeComplex_co, pow: _AnyInt, maxpower: _AnyInt | None) -> _ComplexSeries: ... +@overload +def _pow(mul_f: _AnyMulF, c: _SeriesLikeObject_co, pow: _AnyInt, maxpower: _AnyInt | None) -> _ObjectSeries: ... +@overload +def _pow(mul_f: _AnyMulF, c: _SeriesLikeCoef_co, pow: _AnyInt, maxpower: _AnyInt | None) -> _CoefSeries: ... + +# keep in sync with `_polytypes._FuncFit` +@overload +def _fit( + vander_f: _AnyVanderF, + x: _SeriesLikeFloat_co, + y: _ArrayLikeFloat_co, + deg: _SeriesLikeInt_co, + rcond: _FloatLike_co | None = None, + full: Literal[False] = False, + w: _SeriesLikeFloat_co | None = None, +) -> _FloatArray: ... +@overload +def _fit( + vander_f: _AnyVanderF, + x: _SeriesLikeComplex_co, + y: _ArrayLikeComplex_co, + deg: _SeriesLikeInt_co, + rcond: _FloatLike_co | None = None, + full: Literal[False] = False, + w: _SeriesLikeComplex_co | None = None, +) -> _ComplexArray: ... +@overload +def _fit( + vander_f: _AnyVanderF, + x: _SeriesLikeCoef_co, + y: _ArrayLikeCoef_co, + deg: _SeriesLikeInt_co, + rcond: _FloatLike_co | None = None, + full: Literal[False] = False, + w: _SeriesLikeCoef_co | None = None, +) -> _CoefArray: ... +@overload +def _fit( + vander_f: _AnyVanderF, + x: _SeriesLikeCoef_co, + y: _SeriesLikeCoef_co, + deg: _SeriesLikeInt_co, + rcond: _FloatLike_co | None, + full: Literal[True], + w: _SeriesLikeCoef_co | None = None, +) -> tuple[_CoefSeries, Sequence[np.inexact | np.int32]]: ... +@overload +def _fit( + vander_f: _AnyVanderF, + x: _SeriesLikeCoef_co, + y: _SeriesLikeCoef_co, + deg: _SeriesLikeInt_co, + rcond: _FloatLike_co | None = None, + *, + full: Literal[True], + w: _SeriesLikeCoef_co | None = None, +) -> tuple[_CoefSeries, Sequence[np.inexact | np.int32]]: ... + +# +def _as_int(x: SupportsIndex, desc: str) -> int: ... + +# +def format_float(x: _FloatLike_co, parens: bool = False) -> str: ... diff --git a/local_packages/numpy/polynomial/tests/__init__.py b/local_packages/numpy/polynomial/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/local_packages/numpy/polynomial/tests/test_chebyshev.py b/local_packages/numpy/polynomial/tests/test_chebyshev.py new file mode 100644 index 0000000..14777ac --- /dev/null +++ b/local_packages/numpy/polynomial/tests/test_chebyshev.py @@ -0,0 +1,618 @@ +"""Tests for chebyshev module. + +""" +from functools import reduce + +import numpy as np +import numpy.polynomial.chebyshev as cheb +from numpy.polynomial.polynomial import polyval +from numpy.testing import assert_, assert_almost_equal, assert_equal, assert_raises + + +def trim(x): + return cheb.chebtrim(x, tol=1e-6) + + +T0 = [1] +T1 = [0, 1] +T2 = [-1, 0, 2] +T3 = [0, -3, 0, 4] +T4 = [1, 0, -8, 0, 8] +T5 = [0, 5, 0, -20, 0, 16] +T6 = [-1, 0, 18, 0, -48, 0, 32] +T7 = [0, -7, 0, 56, 0, -112, 0, 64] +T8 = [1, 0, -32, 0, 160, 0, -256, 0, 128] +T9 = [0, 9, 0, -120, 0, 432, 0, -576, 0, 256] + +Tlist = [T0, T1, T2, T3, T4, T5, T6, T7, T8, T9] + + +class TestPrivate: + + def test__cseries_to_zseries(self): + for i in range(5): + inp = np.array([2] + [1] * i, np.double) + tgt = np.array([.5] * i + [2] + [.5] * i, np.double) + res = cheb._cseries_to_zseries(inp) + assert_equal(res, tgt) + + def test__zseries_to_cseries(self): + for i in range(5): + inp = np.array([.5] * i + [2] + [.5] * i, np.double) + tgt = np.array([2] + [1] * i, np.double) + res = cheb._zseries_to_cseries(inp) + assert_equal(res, tgt) + + +class TestConstants: + + def test_chebdomain(self): + assert_equal(cheb.chebdomain, [-1, 1]) + + def test_chebzero(self): + assert_equal(cheb.chebzero, [0]) + + def test_chebone(self): + assert_equal(cheb.chebone, [1]) + + def test_chebx(self): + assert_equal(cheb.chebx, [0, 1]) + + +class TestArithmetic: + + def test_chebadd(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] += 1 + res = cheb.chebadd([0] * i + [1], [0] * j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_chebsub(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] -= 1 + res = cheb.chebsub([0] * i + [1], [0] * j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_chebmulx(self): + assert_equal(cheb.chebmulx([0]), [0]) + assert_equal(cheb.chebmulx([1]), [0, 1]) + for i in range(1, 5): + ser = [0] * i + [1] + tgt = [0] * (i - 1) + [.5, 0, .5] + assert_equal(cheb.chebmulx(ser), tgt) + + def test_chebmul(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + tgt = np.zeros(i + j + 1) + tgt[i + j] += .5 + tgt[abs(i - j)] += .5 + res = cheb.chebmul([0] * i + [1], [0] * j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_chebdiv(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + ci = [0] * i + [1] + cj = [0] * j + [1] + tgt = cheb.chebadd(ci, cj) + quo, rem = cheb.chebdiv(tgt, ci) + res = cheb.chebadd(cheb.chebmul(quo, ci), rem) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_chebpow(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + c = np.arange(i + 1) + tgt = reduce(cheb.chebmul, [c] * j, np.array([1])) + res = cheb.chebpow(c, j) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + +class TestEvaluation: + # coefficients of 1 + 2*x + 3*x**2 + c1d = np.array([2.5, 2., 1.5]) + c2d = np.einsum('i,j->ij', c1d, c1d) + c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d) + + # some random values in [-1, 1) + x = np.random.random((3, 5)) * 2 - 1 + y = polyval(x, [1., 2., 3.]) + + def test_chebval(self): + # check empty input + assert_equal(cheb.chebval([], [1]).size, 0) + + # check normal input) + x = np.linspace(-1, 1) + y = [polyval(x, c) for c in Tlist] + for i in range(10): + msg = f"At i={i}" + tgt = y[i] + res = cheb.chebval(x, [0] * i + [1]) + assert_almost_equal(res, tgt, err_msg=msg) + + # check that shape is preserved + for i in range(3): + dims = [2] * i + x = np.zeros(dims) + assert_equal(cheb.chebval(x, [1]).shape, dims) + assert_equal(cheb.chebval(x, [1, 0]).shape, dims) + assert_equal(cheb.chebval(x, [1, 0, 0]).shape, dims) + + def test_chebval2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + # test exceptions + assert_raises(ValueError, cheb.chebval2d, x1, x2[:2], self.c2d) + + # test values + tgt = y1 * y2 + res = cheb.chebval2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + # test shape + z = np.ones((2, 3)) + res = cheb.chebval2d(z, z, self.c2d) + assert_(res.shape == (2, 3)) + + def test_chebval3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + # test exceptions + assert_raises(ValueError, cheb.chebval3d, x1, x2, x3[:2], self.c3d) + + # test values + tgt = y1 * y2 * y3 + res = cheb.chebval3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + # test shape + z = np.ones((2, 3)) + res = cheb.chebval3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3)) + + def test_chebgrid2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + # test values + tgt = np.einsum('i,j->ij', y1, y2) + res = cheb.chebgrid2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + # test shape + z = np.ones((2, 3)) + res = cheb.chebgrid2d(z, z, self.c2d) + assert_(res.shape == (2, 3) * 2) + + def test_chebgrid3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + # test values + tgt = np.einsum('i,j,k->ijk', y1, y2, y3) + res = cheb.chebgrid3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + # test shape + z = np.ones((2, 3)) + res = cheb.chebgrid3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3) * 3) + + +class TestIntegral: + + def test_chebint(self): + # check exceptions + assert_raises(TypeError, cheb.chebint, [0], .5) + assert_raises(ValueError, cheb.chebint, [0], -1) + assert_raises(ValueError, cheb.chebint, [0], 1, [0, 0]) + assert_raises(ValueError, cheb.chebint, [0], lbnd=[0]) + assert_raises(ValueError, cheb.chebint, [0], scl=[0]) + assert_raises(TypeError, cheb.chebint, [0], axis=.5) + + # test integration of zero polynomial + for i in range(2, 5): + k = [0] * (i - 2) + [1] + res = cheb.chebint([0], m=i, k=k) + assert_almost_equal(res, [0, 1]) + + # check single integration with integration constant + for i in range(5): + scl = i + 1 + pol = [0] * i + [1] + tgt = [i] + [0] * i + [1 / scl] + chebpol = cheb.poly2cheb(pol) + chebint = cheb.chebint(chebpol, m=1, k=[i]) + res = cheb.cheb2poly(chebint) + assert_almost_equal(trim(res), trim(tgt)) + + # check single integration with integration constant and lbnd + for i in range(5): + scl = i + 1 + pol = [0] * i + [1] + chebpol = cheb.poly2cheb(pol) + chebint = cheb.chebint(chebpol, m=1, k=[i], lbnd=-1) + assert_almost_equal(cheb.chebval(-1, chebint), i) + + # check single integration with integration constant and scaling + for i in range(5): + scl = i + 1 + pol = [0] * i + [1] + tgt = [i] + [0] * i + [2 / scl] + chebpol = cheb.poly2cheb(pol) + chebint = cheb.chebint(chebpol, m=1, k=[i], scl=2) + res = cheb.cheb2poly(chebint) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with default k + for i in range(5): + for j in range(2, 5): + pol = [0] * i + [1] + tgt = pol[:] + for k in range(j): + tgt = cheb.chebint(tgt, m=1) + res = cheb.chebint(pol, m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with defined k + for i in range(5): + for j in range(2, 5): + pol = [0] * i + [1] + tgt = pol[:] + for k in range(j): + tgt = cheb.chebint(tgt, m=1, k=[k]) + res = cheb.chebint(pol, m=j, k=list(range(j))) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with lbnd + for i in range(5): + for j in range(2, 5): + pol = [0] * i + [1] + tgt = pol[:] + for k in range(j): + tgt = cheb.chebint(tgt, m=1, k=[k], lbnd=-1) + res = cheb.chebint(pol, m=j, k=list(range(j)), lbnd=-1) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with scaling + for i in range(5): + for j in range(2, 5): + pol = [0] * i + [1] + tgt = pol[:] + for k in range(j): + tgt = cheb.chebint(tgt, m=1, k=[k], scl=2) + res = cheb.chebint(pol, m=j, k=list(range(j)), scl=2) + assert_almost_equal(trim(res), trim(tgt)) + + def test_chebint_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([cheb.chebint(c) for c in c2d.T]).T + res = cheb.chebint(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([cheb.chebint(c) for c in c2d]) + res = cheb.chebint(c2d, axis=1) + assert_almost_equal(res, tgt) + + tgt = np.vstack([cheb.chebint(c, k=3) for c in c2d]) + res = cheb.chebint(c2d, k=3, axis=1) + assert_almost_equal(res, tgt) + + +class TestDerivative: + + def test_chebder(self): + # check exceptions + assert_raises(TypeError, cheb.chebder, [0], .5) + assert_raises(ValueError, cheb.chebder, [0], -1) + + # check that zeroth derivative does nothing + for i in range(5): + tgt = [0] * i + [1] + res = cheb.chebder(tgt, m=0) + assert_equal(trim(res), trim(tgt)) + + # check that derivation is the inverse of integration + for i in range(5): + for j in range(2, 5): + tgt = [0] * i + [1] + res = cheb.chebder(cheb.chebint(tgt, m=j), m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check derivation with scaling + for i in range(5): + for j in range(2, 5): + tgt = [0] * i + [1] + res = cheb.chebder(cheb.chebint(tgt, m=j, scl=2), m=j, scl=.5) + assert_almost_equal(trim(res), trim(tgt)) + + def test_chebder_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([cheb.chebder(c) for c in c2d.T]).T + res = cheb.chebder(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([cheb.chebder(c) for c in c2d]) + res = cheb.chebder(c2d, axis=1) + assert_almost_equal(res, tgt) + + +class TestVander: + # some random values in [-1, 1) + x = np.random.random((3, 5)) * 2 - 1 + + def test_chebvander(self): + # check for 1d x + x = np.arange(3) + v = cheb.chebvander(x, 3) + assert_(v.shape == (3, 4)) + for i in range(4): + coef = [0] * i + [1] + assert_almost_equal(v[..., i], cheb.chebval(x, coef)) + + # check for 2d x + x = np.array([[1, 2], [3, 4], [5, 6]]) + v = cheb.chebvander(x, 3) + assert_(v.shape == (3, 2, 4)) + for i in range(4): + coef = [0] * i + [1] + assert_almost_equal(v[..., i], cheb.chebval(x, coef)) + + def test_chebvander2d(self): + # also tests chebval2d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3)) + van = cheb.chebvander2d(x1, x2, [1, 2]) + tgt = cheb.chebval2d(x1, x2, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = cheb.chebvander2d([x1], [x2], [1, 2]) + assert_(van.shape == (1, 5, 6)) + + def test_chebvander3d(self): + # also tests chebval3d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3, 4)) + van = cheb.chebvander3d(x1, x2, x3, [1, 2, 3]) + tgt = cheb.chebval3d(x1, x2, x3, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = cheb.chebvander3d([x1], [x2], [x3], [1, 2, 3]) + assert_(van.shape == (1, 5, 24)) + + +class TestFitting: + + def test_chebfit(self): + def f(x): + return x * (x - 1) * (x - 2) + + def f2(x): + return x**4 + x**2 + 1 + + # Test exceptions + assert_raises(ValueError, cheb.chebfit, [1], [1], -1) + assert_raises(TypeError, cheb.chebfit, [[1]], [1], 0) + assert_raises(TypeError, cheb.chebfit, [], [1], 0) + assert_raises(TypeError, cheb.chebfit, [1], [[[1]]], 0) + assert_raises(TypeError, cheb.chebfit, [1, 2], [1], 0) + assert_raises(TypeError, cheb.chebfit, [1], [1, 2], 0) + assert_raises(TypeError, cheb.chebfit, [1], [1], 0, w=[[1]]) + assert_raises(TypeError, cheb.chebfit, [1], [1], 0, w=[1, 1]) + assert_raises(ValueError, cheb.chebfit, [1], [1], [-1,]) + assert_raises(ValueError, cheb.chebfit, [1], [1], [2, -1, 6]) + assert_raises(TypeError, cheb.chebfit, [1], [1], []) + + # Test fit + x = np.linspace(0, 2) + y = f(x) + # + coef3 = cheb.chebfit(x, y, 3) + assert_equal(len(coef3), 4) + assert_almost_equal(cheb.chebval(x, coef3), y) + coef3 = cheb.chebfit(x, y, [0, 1, 2, 3]) + assert_equal(len(coef3), 4) + assert_almost_equal(cheb.chebval(x, coef3), y) + # + coef4 = cheb.chebfit(x, y, 4) + assert_equal(len(coef4), 5) + assert_almost_equal(cheb.chebval(x, coef4), y) + coef4 = cheb.chebfit(x, y, [0, 1, 2, 3, 4]) + assert_equal(len(coef4), 5) + assert_almost_equal(cheb.chebval(x, coef4), y) + # check things still work if deg is not in strict increasing + coef4 = cheb.chebfit(x, y, [2, 3, 4, 1, 0]) + assert_equal(len(coef4), 5) + assert_almost_equal(cheb.chebval(x, coef4), y) + # + coef2d = cheb.chebfit(x, np.array([y, y]).T, 3) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + coef2d = cheb.chebfit(x, np.array([y, y]).T, [0, 1, 2, 3]) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + # test weighting + w = np.zeros_like(x) + yw = y.copy() + w[1::2] = 1 + y[0::2] = 0 + wcoef3 = cheb.chebfit(x, yw, 3, w=w) + assert_almost_equal(wcoef3, coef3) + wcoef3 = cheb.chebfit(x, yw, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef3, coef3) + # + wcoef2d = cheb.chebfit(x, np.array([yw, yw]).T, 3, w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + wcoef2d = cheb.chebfit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + # test scaling with complex values x points whose square + # is zero when summed. + x = [1, 1j, -1, -1j] + assert_almost_equal(cheb.chebfit(x, x, 1), [0, 1]) + assert_almost_equal(cheb.chebfit(x, x, [0, 1]), [0, 1]) + # test fitting only even polynomials + x = np.linspace(-1, 1) + y = f2(x) + coef1 = cheb.chebfit(x, y, 4) + assert_almost_equal(cheb.chebval(x, coef1), y) + coef2 = cheb.chebfit(x, y, [0, 2, 4]) + assert_almost_equal(cheb.chebval(x, coef2), y) + assert_almost_equal(coef1, coef2) + + +class TestInterpolate: + + def f(self, x): + return x * (x - 1) * (x - 2) + + def test_raises(self): + assert_raises(ValueError, cheb.chebinterpolate, self.f, -1) + assert_raises(TypeError, cheb.chebinterpolate, self.f, 10.) + + def test_dimensions(self): + for deg in range(1, 5): + assert_(cheb.chebinterpolate(self.f, deg).shape == (deg + 1,)) + + def test_approximation(self): + + def powx(x, p): + return x**p + + x = np.linspace(-1, 1, 10) + for deg in range(10): + for p in range(deg + 1): + c = cheb.chebinterpolate(powx, deg, (p,)) + assert_almost_equal(cheb.chebval(x, c), powx(x, p), decimal=12) + + +class TestCompanion: + + def test_raises(self): + assert_raises(ValueError, cheb.chebcompanion, []) + assert_raises(ValueError, cheb.chebcompanion, [1]) + + def test_dimensions(self): + for i in range(1, 5): + coef = [0] * i + [1] + assert_(cheb.chebcompanion(coef).shape == (i, i)) + + def test_linear_root(self): + assert_(cheb.chebcompanion([1, 2])[0, 0] == -.5) + + +class TestGauss: + + def test_100(self): + x, w = cheb.chebgauss(100) + + # test orthogonality. Note that the results need to be normalized, + # otherwise the huge values that can arise from fast growing + # functions like Laguerre can be very confusing. + v = cheb.chebvander(x, 99) + vv = np.dot(v.T * w, v) + vd = 1 / np.sqrt(vv.diagonal()) + vv = vd[:, None] * vv * vd + assert_almost_equal(vv, np.eye(100)) + + # check that the integral of 1 is correct + tgt = np.pi + assert_almost_equal(w.sum(), tgt) + + +class TestMisc: + + def test_chebfromroots(self): + res = cheb.chebfromroots([]) + assert_almost_equal(trim(res), [1]) + for i in range(1, 5): + roots = np.cos(np.linspace(-np.pi, 0, 2 * i + 1)[1::2]) + tgt = [0] * i + [1] + res = cheb.chebfromroots(roots) * 2**(i - 1) + assert_almost_equal(trim(res), trim(tgt)) + + def test_chebroots(self): + assert_almost_equal(cheb.chebroots([1]), []) + assert_almost_equal(cheb.chebroots([1, 2]), [-.5]) + for i in range(2, 5): + tgt = np.linspace(-1, 1, i) + res = cheb.chebroots(cheb.chebfromroots(tgt)) + assert_almost_equal(trim(res), trim(tgt)) + + def test_chebtrim(self): + coef = [2, -1, 1, 0] + + # Test exceptions + assert_raises(ValueError, cheb.chebtrim, coef, -1) + + # Test results + assert_equal(cheb.chebtrim(coef), coef[:-1]) + assert_equal(cheb.chebtrim(coef, 1), coef[:-3]) + assert_equal(cheb.chebtrim(coef, 2), [0]) + + def test_chebline(self): + assert_equal(cheb.chebline(3, 4), [3, 4]) + + def test_cheb2poly(self): + for i in range(10): + assert_almost_equal(cheb.cheb2poly([0] * i + [1]), Tlist[i]) + + def test_poly2cheb(self): + for i in range(10): + assert_almost_equal(cheb.poly2cheb(Tlist[i]), [0] * i + [1]) + + def test_weight(self): + x = np.linspace(-1, 1, 11)[1:-1] + tgt = 1. / (np.sqrt(1 + x) * np.sqrt(1 - x)) + res = cheb.chebweight(x) + assert_almost_equal(res, tgt) + + def test_chebpts1(self): + # test exceptions + assert_raises(ValueError, cheb.chebpts1, 1.5) + assert_raises(ValueError, cheb.chebpts1, 0) + + # test points + tgt = [0] + assert_almost_equal(cheb.chebpts1(1), tgt) + tgt = [-0.70710678118654746, 0.70710678118654746] + assert_almost_equal(cheb.chebpts1(2), tgt) + tgt = [-0.86602540378443871, 0, 0.86602540378443871] + assert_almost_equal(cheb.chebpts1(3), tgt) + tgt = [-0.9238795325, -0.3826834323, 0.3826834323, 0.9238795325] + assert_almost_equal(cheb.chebpts1(4), tgt) + + def test_chebpts2(self): + # test exceptions + assert_raises(ValueError, cheb.chebpts2, 1.5) + assert_raises(ValueError, cheb.chebpts2, 1) + + # test points + tgt = [-1, 1] + assert_almost_equal(cheb.chebpts2(2), tgt) + tgt = [-1, 0, 1] + assert_almost_equal(cheb.chebpts2(3), tgt) + tgt = [-1, -0.5, .5, 1] + assert_almost_equal(cheb.chebpts2(4), tgt) + tgt = [-1.0, -0.707106781187, 0, 0.707106781187, 1.0] + assert_almost_equal(cheb.chebpts2(5), tgt) diff --git a/local_packages/numpy/polynomial/tests/test_classes.py b/local_packages/numpy/polynomial/tests/test_classes.py new file mode 100644 index 0000000..156dccf --- /dev/null +++ b/local_packages/numpy/polynomial/tests/test_classes.py @@ -0,0 +1,613 @@ +"""Test inter-conversion of different polynomial classes. + +This tests the convert and cast methods of all the polynomial classes. + +""" +import operator as op +from numbers import Number + +import pytest + +import numpy as np +from numpy.exceptions import RankWarning +from numpy.polynomial import ( + Chebyshev, + Hermite, + HermiteE, + Laguerre, + Legendre, + Polynomial, +) +from numpy.testing import assert_, assert_almost_equal, assert_equal, assert_raises + +# +# fixtures +# + +classes = ( + Polynomial, Legendre, Chebyshev, Laguerre, + Hermite, HermiteE + ) +classids = tuple(cls.__name__ for cls in classes) + +@pytest.fixture(params=classes, ids=classids) +def Poly(request): + return request.param + + +# +# helper functions +# +random = np.random.random + + +def assert_poly_almost_equal(p1, p2, msg=""): + try: + assert_(np.all(p1.domain == p2.domain)) + assert_(np.all(p1.window == p2.window)) + assert_almost_equal(p1.coef, p2.coef) + except AssertionError: + msg = f"Result: {p1}\nTarget: {p2}" + raise AssertionError(msg) + + +# +# Test conversion methods that depend on combinations of two classes. +# + +Poly1 = Poly +Poly2 = Poly + + +def test_conversion(Poly1, Poly2): + x = np.linspace(0, 1, 10) + coef = random((3,)) + + d1 = Poly1.domain + random((2,)) * .25 + w1 = Poly1.window + random((2,)) * .25 + p1 = Poly1(coef, domain=d1, window=w1) + + d2 = Poly2.domain + random((2,)) * .25 + w2 = Poly2.window + random((2,)) * .25 + p2 = p1.convert(kind=Poly2, domain=d2, window=w2) + + assert_almost_equal(p2.domain, d2) + assert_almost_equal(p2.window, w2) + assert_almost_equal(p2(x), p1(x)) + + +def test_cast(Poly1, Poly2): + x = np.linspace(0, 1, 10) + coef = random((3,)) + + d1 = Poly1.domain + random((2,)) * .25 + w1 = Poly1.window + random((2,)) * .25 + p1 = Poly1(coef, domain=d1, window=w1) + + d2 = Poly2.domain + random((2,)) * .25 + w2 = Poly2.window + random((2,)) * .25 + p2 = Poly2.cast(p1, domain=d2, window=w2) + + assert_almost_equal(p2.domain, d2) + assert_almost_equal(p2.window, w2) + assert_almost_equal(p2(x), p1(x)) + + +# +# test methods that depend on one class +# + + +def test_identity(Poly): + d = Poly.domain + random((2,)) * .25 + w = Poly.window + random((2,)) * .25 + x = np.linspace(d[0], d[1], 11) + p = Poly.identity(domain=d, window=w) + assert_equal(p.domain, d) + assert_equal(p.window, w) + assert_almost_equal(p(x), x) + + +def test_basis(Poly): + d = Poly.domain + random((2,)) * .25 + w = Poly.window + random((2,)) * .25 + p = Poly.basis(5, domain=d, window=w) + assert_equal(p.domain, d) + assert_equal(p.window, w) + assert_equal(p.coef, [0] * 5 + [1]) + + +def test_fromroots(Poly): + # check that requested roots are zeros of a polynomial + # of correct degree, domain, and window. + d = Poly.domain + random((2,)) * .25 + w = Poly.window + random((2,)) * .25 + r = random((5,)) + p1 = Poly.fromroots(r, domain=d, window=w) + assert_equal(p1.degree(), len(r)) + assert_equal(p1.domain, d) + assert_equal(p1.window, w) + assert_almost_equal(p1(r), 0) + + # check that polynomial is monic + pdom = Polynomial.domain + pwin = Polynomial.window + p2 = Polynomial.cast(p1, domain=pdom, window=pwin) + assert_almost_equal(p2.coef[-1], 1) + + +def test_bad_conditioned_fit(Poly): + + x = [0., 0., 1.] + y = [1., 2., 3.] + + # check RankWarning is raised + with pytest.warns(RankWarning) as record: + Poly.fit(x, y, 2) + assert record[0].message.args[0] == "The fit may be poorly conditioned" + + +def test_fit(Poly): + + def f(x): + return x * (x - 1) * (x - 2) + x = np.linspace(0, 3) + y = f(x) + + # check default value of domain and window + p = Poly.fit(x, y, 3) + assert_almost_equal(p.domain, [0, 3]) + assert_almost_equal(p(x), y) + assert_equal(p.degree(), 3) + + # check with given domains and window + d = Poly.domain + random((2,)) * .25 + w = Poly.window + random((2,)) * .25 + p = Poly.fit(x, y, 3, domain=d, window=w) + assert_almost_equal(p(x), y) + assert_almost_equal(p.domain, d) + assert_almost_equal(p.window, w) + p = Poly.fit(x, y, [0, 1, 2, 3], domain=d, window=w) + assert_almost_equal(p(x), y) + assert_almost_equal(p.domain, d) + assert_almost_equal(p.window, w) + + # check with class domain default + p = Poly.fit(x, y, 3, []) + assert_equal(p.domain, Poly.domain) + assert_equal(p.window, Poly.window) + p = Poly.fit(x, y, [0, 1, 2, 3], []) + assert_equal(p.domain, Poly.domain) + assert_equal(p.window, Poly.window) + + # check that fit accepts weights. + w = np.zeros_like(x) + z = y + random(y.shape) * .25 + w[::2] = 1 + p1 = Poly.fit(x[::2], z[::2], 3) + p2 = Poly.fit(x, z, 3, w=w) + p3 = Poly.fit(x, z, [0, 1, 2, 3], w=w) + assert_almost_equal(p1(x), p2(x)) + assert_almost_equal(p2(x), p3(x)) + + +def test_equal(Poly): + p1 = Poly([1, 2, 3], domain=[0, 1], window=[2, 3]) + p2 = Poly([1, 1, 1], domain=[0, 1], window=[2, 3]) + p3 = Poly([1, 2, 3], domain=[1, 2], window=[2, 3]) + p4 = Poly([1, 2, 3], domain=[0, 1], window=[1, 2]) + assert_(p1 == p1) + assert_(not p1 == p2) + assert_(not p1 == p3) + assert_(not p1 == p4) + + +def test_not_equal(Poly): + p1 = Poly([1, 2, 3], domain=[0, 1], window=[2, 3]) + p2 = Poly([1, 1, 1], domain=[0, 1], window=[2, 3]) + p3 = Poly([1, 2, 3], domain=[1, 2], window=[2, 3]) + p4 = Poly([1, 2, 3], domain=[0, 1], window=[1, 2]) + assert_(not p1 != p1) + assert_(p1 != p2) + assert_(p1 != p3) + assert_(p1 != p4) + + +def test_add(Poly): + # This checks commutation, not numerical correctness + c1 = list(random((4,)) + .5) + c2 = list(random((3,)) + .5) + p1 = Poly(c1) + p2 = Poly(c2) + p3 = p1 + p2 + assert_poly_almost_equal(p2 + p1, p3) + assert_poly_almost_equal(p1 + c2, p3) + assert_poly_almost_equal(c2 + p1, p3) + assert_poly_almost_equal(p1 + tuple(c2), p3) + assert_poly_almost_equal(tuple(c2) + p1, p3) + assert_poly_almost_equal(p1 + np.array(c2), p3) + assert_poly_almost_equal(np.array(c2) + p1, p3) + assert_raises(TypeError, op.add, p1, Poly([0], domain=Poly.domain + 1)) + assert_raises(TypeError, op.add, p1, Poly([0], window=Poly.window + 1)) + if Poly is Polynomial: + assert_raises(TypeError, op.add, p1, Chebyshev([0])) + else: + assert_raises(TypeError, op.add, p1, Polynomial([0])) + + +def test_sub(Poly): + # This checks commutation, not numerical correctness + c1 = list(random((4,)) + .5) + c2 = list(random((3,)) + .5) + p1 = Poly(c1) + p2 = Poly(c2) + p3 = p1 - p2 + assert_poly_almost_equal(p2 - p1, -p3) + assert_poly_almost_equal(p1 - c2, p3) + assert_poly_almost_equal(c2 - p1, -p3) + assert_poly_almost_equal(p1 - tuple(c2), p3) + assert_poly_almost_equal(tuple(c2) - p1, -p3) + assert_poly_almost_equal(p1 - np.array(c2), p3) + assert_poly_almost_equal(np.array(c2) - p1, -p3) + assert_raises(TypeError, op.sub, p1, Poly([0], domain=Poly.domain + 1)) + assert_raises(TypeError, op.sub, p1, Poly([0], window=Poly.window + 1)) + if Poly is Polynomial: + assert_raises(TypeError, op.sub, p1, Chebyshev([0])) + else: + assert_raises(TypeError, op.sub, p1, Polynomial([0])) + + +def test_mul(Poly): + c1 = list(random((4,)) + .5) + c2 = list(random((3,)) + .5) + p1 = Poly(c1) + p2 = Poly(c2) + p3 = p1 * p2 + assert_poly_almost_equal(p2 * p1, p3) + assert_poly_almost_equal(p1 * c2, p3) + assert_poly_almost_equal(c2 * p1, p3) + assert_poly_almost_equal(p1 * tuple(c2), p3) + assert_poly_almost_equal(tuple(c2) * p1, p3) + assert_poly_almost_equal(p1 * np.array(c2), p3) + assert_poly_almost_equal(np.array(c2) * p1, p3) + assert_poly_almost_equal(p1 * 2, p1 * Poly([2])) + assert_poly_almost_equal(2 * p1, p1 * Poly([2])) + assert_raises(TypeError, op.mul, p1, Poly([0], domain=Poly.domain + 1)) + assert_raises(TypeError, op.mul, p1, Poly([0], window=Poly.window + 1)) + if Poly is Polynomial: + assert_raises(TypeError, op.mul, p1, Chebyshev([0])) + else: + assert_raises(TypeError, op.mul, p1, Polynomial([0])) + + +def test_floordiv(Poly): + c1 = list(random((4,)) + .5) + c2 = list(random((3,)) + .5) + c3 = list(random((2,)) + .5) + p1 = Poly(c1) + p2 = Poly(c2) + p3 = Poly(c3) + p4 = p1 * p2 + p3 + c4 = list(p4.coef) + assert_poly_almost_equal(p4 // p2, p1) + assert_poly_almost_equal(p4 // c2, p1) + assert_poly_almost_equal(c4 // p2, p1) + assert_poly_almost_equal(p4 // tuple(c2), p1) + assert_poly_almost_equal(tuple(c4) // p2, p1) + assert_poly_almost_equal(p4 // np.array(c2), p1) + assert_poly_almost_equal(np.array(c4) // p2, p1) + assert_poly_almost_equal(2 // p2, Poly([0])) + assert_poly_almost_equal(p2 // 2, 0.5 * p2) + assert_raises( + TypeError, op.floordiv, p1, Poly([0], domain=Poly.domain + 1)) + assert_raises( + TypeError, op.floordiv, p1, Poly([0], window=Poly.window + 1)) + if Poly is Polynomial: + assert_raises(TypeError, op.floordiv, p1, Chebyshev([0])) + else: + assert_raises(TypeError, op.floordiv, p1, Polynomial([0])) + + +def test_truediv(Poly): + # true division is valid only if the denominator is a Number and + # not a python bool. + p1 = Poly([1, 2, 3]) + p2 = p1 * 5 + + for stype in np.ScalarType: + if not issubclass(stype, Number) or issubclass(stype, bool): + continue + s = stype(5) + assert_poly_almost_equal(op.truediv(p2, s), p1) + assert_raises(TypeError, op.truediv, s, p2) + for stype in (int, float): + s = stype(5) + assert_poly_almost_equal(op.truediv(p2, s), p1) + assert_raises(TypeError, op.truediv, s, p2) + for stype in [complex]: + s = stype(5, 0) + assert_poly_almost_equal(op.truediv(p2, s), p1) + assert_raises(TypeError, op.truediv, s, p2) + for s in [(), [], {}, False, np.array([1])]: + assert_raises(TypeError, op.truediv, p2, s) + assert_raises(TypeError, op.truediv, s, p2) + for ptype in classes: + assert_raises(TypeError, op.truediv, p2, ptype(1)) + + +def test_mod(Poly): + # This checks commutation, not numerical correctness + c1 = list(random((4,)) + .5) + c2 = list(random((3,)) + .5) + c3 = list(random((2,)) + .5) + p1 = Poly(c1) + p2 = Poly(c2) + p3 = Poly(c3) + p4 = p1 * p2 + p3 + c4 = list(p4.coef) + assert_poly_almost_equal(p4 % p2, p3) + assert_poly_almost_equal(p4 % c2, p3) + assert_poly_almost_equal(c4 % p2, p3) + assert_poly_almost_equal(p4 % tuple(c2), p3) + assert_poly_almost_equal(tuple(c4) % p2, p3) + assert_poly_almost_equal(p4 % np.array(c2), p3) + assert_poly_almost_equal(np.array(c4) % p2, p3) + assert_poly_almost_equal(2 % p2, Poly([2])) + assert_poly_almost_equal(p2 % 2, Poly([0])) + assert_raises(TypeError, op.mod, p1, Poly([0], domain=Poly.domain + 1)) + assert_raises(TypeError, op.mod, p1, Poly([0], window=Poly.window + 1)) + if Poly is Polynomial: + assert_raises(TypeError, op.mod, p1, Chebyshev([0])) + else: + assert_raises(TypeError, op.mod, p1, Polynomial([0])) + + +def test_divmod(Poly): + # This checks commutation, not numerical correctness + c1 = list(random((4,)) + .5) + c2 = list(random((3,)) + .5) + c3 = list(random((2,)) + .5) + p1 = Poly(c1) + p2 = Poly(c2) + p3 = Poly(c3) + p4 = p1 * p2 + p3 + c4 = list(p4.coef) + quo, rem = divmod(p4, p2) + assert_poly_almost_equal(quo, p1) + assert_poly_almost_equal(rem, p3) + quo, rem = divmod(p4, c2) + assert_poly_almost_equal(quo, p1) + assert_poly_almost_equal(rem, p3) + quo, rem = divmod(c4, p2) + assert_poly_almost_equal(quo, p1) + assert_poly_almost_equal(rem, p3) + quo, rem = divmod(p4, tuple(c2)) + assert_poly_almost_equal(quo, p1) + assert_poly_almost_equal(rem, p3) + quo, rem = divmod(tuple(c4), p2) + assert_poly_almost_equal(quo, p1) + assert_poly_almost_equal(rem, p3) + quo, rem = divmod(p4, np.array(c2)) + assert_poly_almost_equal(quo, p1) + assert_poly_almost_equal(rem, p3) + quo, rem = divmod(np.array(c4), p2) + assert_poly_almost_equal(quo, p1) + assert_poly_almost_equal(rem, p3) + quo, rem = divmod(p2, 2) + assert_poly_almost_equal(quo, 0.5 * p2) + assert_poly_almost_equal(rem, Poly([0])) + quo, rem = divmod(2, p2) + assert_poly_almost_equal(quo, Poly([0])) + assert_poly_almost_equal(rem, Poly([2])) + assert_raises(TypeError, divmod, p1, Poly([0], domain=Poly.domain + 1)) + assert_raises(TypeError, divmod, p1, Poly([0], window=Poly.window + 1)) + if Poly is Polynomial: + assert_raises(TypeError, divmod, p1, Chebyshev([0])) + else: + assert_raises(TypeError, divmod, p1, Polynomial([0])) + + +def test_roots(Poly): + d = Poly.domain * 1.25 + .25 + w = Poly.window + tgt = np.linspace(d[0], d[1], 5) + res = np.sort(Poly.fromroots(tgt, domain=d, window=w).roots()) + assert_almost_equal(res, tgt) + # default domain and window + res = np.sort(Poly.fromroots(tgt).roots()) + assert_almost_equal(res, tgt) + + +def test_degree(Poly): + p = Poly.basis(5) + assert_equal(p.degree(), 5) + + +def test_copy(Poly): + p1 = Poly.basis(5) + p2 = p1.copy() + assert_(p1 == p2) + assert_(p1 is not p2) + assert_(p1.coef is not p2.coef) + assert_(p1.domain is not p2.domain) + assert_(p1.window is not p2.window) + + +def test_integ(Poly): + P = Polynomial + # Check defaults + p0 = Poly.cast(P([1 * 2, 2 * 3, 3 * 4])) + p1 = P.cast(p0.integ()) + p2 = P.cast(p0.integ(2)) + assert_poly_almost_equal(p1, P([0, 2, 3, 4])) + assert_poly_almost_equal(p2, P([0, 0, 1, 1, 1])) + # Check with k + p0 = Poly.cast(P([1 * 2, 2 * 3, 3 * 4])) + p1 = P.cast(p0.integ(k=1)) + p2 = P.cast(p0.integ(2, k=[1, 1])) + assert_poly_almost_equal(p1, P([1, 2, 3, 4])) + assert_poly_almost_equal(p2, P([1, 1, 1, 1, 1])) + # Check with lbnd + p0 = Poly.cast(P([1 * 2, 2 * 3, 3 * 4])) + p1 = P.cast(p0.integ(lbnd=1)) + p2 = P.cast(p0.integ(2, lbnd=1)) + assert_poly_almost_equal(p1, P([-9, 2, 3, 4])) + assert_poly_almost_equal(p2, P([6, -9, 1, 1, 1])) + # Check scaling + d = 2 * Poly.domain + p0 = Poly.cast(P([1 * 2, 2 * 3, 3 * 4]), domain=d) + p1 = P.cast(p0.integ()) + p2 = P.cast(p0.integ(2)) + assert_poly_almost_equal(p1, P([0, 2, 3, 4])) + assert_poly_almost_equal(p2, P([0, 0, 1, 1, 1])) + + +def test_deriv(Poly): + # Check that the derivative is the inverse of integration. It is + # assumes that the integration has been checked elsewhere. + d = Poly.domain + random((2,)) * .25 + w = Poly.window + random((2,)) * .25 + p1 = Poly([1, 2, 3], domain=d, window=w) + p2 = p1.integ(2, k=[1, 2]) + p3 = p1.integ(1, k=[1]) + assert_almost_equal(p2.deriv(1).coef, p3.coef) + assert_almost_equal(p2.deriv(2).coef, p1.coef) + # default domain and window + p1 = Poly([1, 2, 3]) + p2 = p1.integ(2, k=[1, 2]) + p3 = p1.integ(1, k=[1]) + assert_almost_equal(p2.deriv(1).coef, p3.coef) + assert_almost_equal(p2.deriv(2).coef, p1.coef) + + +def test_linspace(Poly): + d = Poly.domain + random((2,)) * .25 + w = Poly.window + random((2,)) * .25 + p = Poly([1, 2, 3], domain=d, window=w) + # check default domain + xtgt = np.linspace(d[0], d[1], 20) + ytgt = p(xtgt) + xres, yres = p.linspace(20) + assert_almost_equal(xres, xtgt) + assert_almost_equal(yres, ytgt) + # check specified domain + xtgt = np.linspace(0, 2, 20) + ytgt = p(xtgt) + xres, yres = p.linspace(20, domain=[0, 2]) + assert_almost_equal(xres, xtgt) + assert_almost_equal(yres, ytgt) + + +def test_pow(Poly): + d = Poly.domain + random((2,)) * .25 + w = Poly.window + random((2,)) * .25 + tgt = Poly([1], domain=d, window=w) + tst = Poly([1, 2, 3], domain=d, window=w) + for i in range(5): + assert_poly_almost_equal(tst**i, tgt) + tgt = tgt * tst + # default domain and window + tgt = Poly([1]) + tst = Poly([1, 2, 3]) + for i in range(5): + assert_poly_almost_equal(tst**i, tgt) + tgt = tgt * tst + # check error for invalid powers + assert_raises(ValueError, op.pow, tgt, 1.5) + assert_raises(ValueError, op.pow, tgt, -1) + + +def test_call(Poly): + P = Polynomial + d = Poly.domain + x = np.linspace(d[0], d[1], 11) + + # Check defaults + p = Poly.cast(P([1, 2, 3])) + tgt = 1 + x * (2 + 3 * x) + res = p(x) + assert_almost_equal(res, tgt) + + +def test_call_with_list(Poly): + p = Poly([1, 2, 3]) + x = [-1, 0, 2] + res = p(x) + assert_equal(res, p(np.array(x))) + + +def test_cutdeg(Poly): + p = Poly([1, 2, 3]) + assert_raises(ValueError, p.cutdeg, .5) + assert_raises(ValueError, p.cutdeg, -1) + assert_equal(len(p.cutdeg(3)), 3) + assert_equal(len(p.cutdeg(2)), 3) + assert_equal(len(p.cutdeg(1)), 2) + assert_equal(len(p.cutdeg(0)), 1) + + +def test_truncate(Poly): + p = Poly([1, 2, 3]) + assert_raises(ValueError, p.truncate, .5) + assert_raises(ValueError, p.truncate, 0) + assert_equal(len(p.truncate(4)), 3) + assert_equal(len(p.truncate(3)), 3) + assert_equal(len(p.truncate(2)), 2) + assert_equal(len(p.truncate(1)), 1) + + +def test_trim(Poly): + c = [1, 1e-6, 1e-12, 0] + p = Poly(c) + assert_equal(p.trim().coef, c[:3]) + assert_equal(p.trim(1e-10).coef, c[:2]) + assert_equal(p.trim(1e-5).coef, c[:1]) + + +def test_mapparms(Poly): + # check with defaults. Should be identity. + d = Poly.domain + w = Poly.window + p = Poly([1], domain=d, window=w) + assert_almost_equal([0, 1], p.mapparms()) + # + w = 2 * d + 1 + p = Poly([1], domain=d, window=w) + assert_almost_equal([1, 2], p.mapparms()) + + +def test_ufunc_override(Poly): + p = Poly([1, 2, 3]) + x = np.ones(3) + assert_raises(TypeError, np.add, p, x) + assert_raises(TypeError, np.add, x, p) + + +# +# Test class method that only exists for some classes +# + + +class TestInterpolate: + + def f(self, x): + return x * (x - 1) * (x - 2) + + def test_raises(self): + assert_raises(ValueError, Chebyshev.interpolate, self.f, -1) + assert_raises(TypeError, Chebyshev.interpolate, self.f, 10.) + + def test_dimensions(self): + for deg in range(1, 5): + assert_(Chebyshev.interpolate(self.f, deg).degree() == deg) + + def test_approximation(self): + + def powx(x, p): + return x**p + + x = np.linspace(0, 2, 10) + for deg in range(10): + for t in range(deg + 1): + p = Chebyshev.interpolate(powx, deg, domain=[0, 2], args=(t,)) + assert_almost_equal(p(x), powx(x, t), decimal=11) diff --git a/local_packages/numpy/polynomial/tests/test_hermite.py b/local_packages/numpy/polynomial/tests/test_hermite.py new file mode 100644 index 0000000..a289ba0 --- /dev/null +++ b/local_packages/numpy/polynomial/tests/test_hermite.py @@ -0,0 +1,553 @@ +"""Tests for hermite module. + +""" +from functools import reduce + +import numpy as np +import numpy.polynomial.hermite as herm +from numpy.polynomial.polynomial import polyval +from numpy.testing import assert_, assert_almost_equal, assert_equal, assert_raises + +H0 = np.array([1]) +H1 = np.array([0, 2]) +H2 = np.array([-2, 0, 4]) +H3 = np.array([0, -12, 0, 8]) +H4 = np.array([12, 0, -48, 0, 16]) +H5 = np.array([0, 120, 0, -160, 0, 32]) +H6 = np.array([-120, 0, 720, 0, -480, 0, 64]) +H7 = np.array([0, -1680, 0, 3360, 0, -1344, 0, 128]) +H8 = np.array([1680, 0, -13440, 0, 13440, 0, -3584, 0, 256]) +H9 = np.array([0, 30240, 0, -80640, 0, 48384, 0, -9216, 0, 512]) + +Hlist = [H0, H1, H2, H3, H4, H5, H6, H7, H8, H9] + + +def trim(x): + return herm.hermtrim(x, tol=1e-6) + + +class TestConstants: + + def test_hermdomain(self): + assert_equal(herm.hermdomain, [-1, 1]) + + def test_hermzero(self): + assert_equal(herm.hermzero, [0]) + + def test_hermone(self): + assert_equal(herm.hermone, [1]) + + def test_hermx(self): + assert_equal(herm.hermx, [0, .5]) + + +class TestArithmetic: + x = np.linspace(-3, 3, 100) + + def test_hermadd(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] += 1 + res = herm.hermadd([0] * i + [1], [0] * j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_hermsub(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] -= 1 + res = herm.hermsub([0] * i + [1], [0] * j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_hermmulx(self): + assert_equal(herm.hermmulx([0]), [0]) + assert_equal(herm.hermmulx([1]), [0, .5]) + for i in range(1, 5): + ser = [0] * i + [1] + tgt = [0] * (i - 1) + [i, 0, .5] + assert_equal(herm.hermmulx(ser), tgt) + + def test_hermmul(self): + # check values of result + for i in range(5): + pol1 = [0] * i + [1] + val1 = herm.hermval(self.x, pol1) + for j in range(5): + msg = f"At i={i}, j={j}" + pol2 = [0] * j + [1] + val2 = herm.hermval(self.x, pol2) + pol3 = herm.hermmul(pol1, pol2) + val3 = herm.hermval(self.x, pol3) + assert_(len(pol3) == i + j + 1, msg) + assert_almost_equal(val3, val1 * val2, err_msg=msg) + + def test_hermdiv(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + ci = [0] * i + [1] + cj = [0] * j + [1] + tgt = herm.hermadd(ci, cj) + quo, rem = herm.hermdiv(tgt, ci) + res = herm.hermadd(herm.hermmul(quo, ci), rem) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_hermpow(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + c = np.arange(i + 1) + tgt = reduce(herm.hermmul, [c] * j, np.array([1])) + res = herm.hermpow(c, j) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + +class TestEvaluation: + # coefficients of 1 + 2*x + 3*x**2 + c1d = np.array([2.5, 1., .75]) + c2d = np.einsum('i,j->ij', c1d, c1d) + c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d) + + # some random values in [-1, 1) + x = np.random.random((3, 5)) * 2 - 1 + y = polyval(x, [1., 2., 3.]) + + def test_hermval(self): + # check empty input + assert_equal(herm.hermval([], [1]).size, 0) + + # check normal input) + x = np.linspace(-1, 1) + y = [polyval(x, c) for c in Hlist] + for i in range(10): + msg = f"At i={i}" + tgt = y[i] + res = herm.hermval(x, [0] * i + [1]) + assert_almost_equal(res, tgt, err_msg=msg) + + # check that shape is preserved + for i in range(3): + dims = [2] * i + x = np.zeros(dims) + assert_equal(herm.hermval(x, [1]).shape, dims) + assert_equal(herm.hermval(x, [1, 0]).shape, dims) + assert_equal(herm.hermval(x, [1, 0, 0]).shape, dims) + + def test_hermval2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + # test exceptions + assert_raises(ValueError, herm.hermval2d, x1, x2[:2], self.c2d) + + # test values + tgt = y1 * y2 + res = herm.hermval2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + # test shape + z = np.ones((2, 3)) + res = herm.hermval2d(z, z, self.c2d) + assert_(res.shape == (2, 3)) + + def test_hermval3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + # test exceptions + assert_raises(ValueError, herm.hermval3d, x1, x2, x3[:2], self.c3d) + + # test values + tgt = y1 * y2 * y3 + res = herm.hermval3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + # test shape + z = np.ones((2, 3)) + res = herm.hermval3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3)) + + def test_hermgrid2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + # test values + tgt = np.einsum('i,j->ij', y1, y2) + res = herm.hermgrid2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + # test shape + z = np.ones((2, 3)) + res = herm.hermgrid2d(z, z, self.c2d) + assert_(res.shape == (2, 3) * 2) + + def test_hermgrid3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + # test values + tgt = np.einsum('i,j,k->ijk', y1, y2, y3) + res = herm.hermgrid3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + # test shape + z = np.ones((2, 3)) + res = herm.hermgrid3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3) * 3) + + +class TestIntegral: + + def test_hermint(self): + # check exceptions + assert_raises(TypeError, herm.hermint, [0], .5) + assert_raises(ValueError, herm.hermint, [0], -1) + assert_raises(ValueError, herm.hermint, [0], 1, [0, 0]) + assert_raises(ValueError, herm.hermint, [0], lbnd=[0]) + assert_raises(ValueError, herm.hermint, [0], scl=[0]) + assert_raises(TypeError, herm.hermint, [0], axis=.5) + + # test integration of zero polynomial + for i in range(2, 5): + k = [0] * (i - 2) + [1] + res = herm.hermint([0], m=i, k=k) + assert_almost_equal(res, [0, .5]) + + # check single integration with integration constant + for i in range(5): + scl = i + 1 + pol = [0] * i + [1] + tgt = [i] + [0] * i + [1 / scl] + hermpol = herm.poly2herm(pol) + hermint = herm.hermint(hermpol, m=1, k=[i]) + res = herm.herm2poly(hermint) + assert_almost_equal(trim(res), trim(tgt)) + + # check single integration with integration constant and lbnd + for i in range(5): + scl = i + 1 + pol = [0] * i + [1] + hermpol = herm.poly2herm(pol) + hermint = herm.hermint(hermpol, m=1, k=[i], lbnd=-1) + assert_almost_equal(herm.hermval(-1, hermint), i) + + # check single integration with integration constant and scaling + for i in range(5): + scl = i + 1 + pol = [0] * i + [1] + tgt = [i] + [0] * i + [2 / scl] + hermpol = herm.poly2herm(pol) + hermint = herm.hermint(hermpol, m=1, k=[i], scl=2) + res = herm.herm2poly(hermint) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with default k + for i in range(5): + for j in range(2, 5): + pol = [0] * i + [1] + tgt = pol[:] + for k in range(j): + tgt = herm.hermint(tgt, m=1) + res = herm.hermint(pol, m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with defined k + for i in range(5): + for j in range(2, 5): + pol = [0] * i + [1] + tgt = pol[:] + for k in range(j): + tgt = herm.hermint(tgt, m=1, k=[k]) + res = herm.hermint(pol, m=j, k=list(range(j))) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with lbnd + for i in range(5): + for j in range(2, 5): + pol = [0] * i + [1] + tgt = pol[:] + for k in range(j): + tgt = herm.hermint(tgt, m=1, k=[k], lbnd=-1) + res = herm.hermint(pol, m=j, k=list(range(j)), lbnd=-1) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with scaling + for i in range(5): + for j in range(2, 5): + pol = [0] * i + [1] + tgt = pol[:] + for k in range(j): + tgt = herm.hermint(tgt, m=1, k=[k], scl=2) + res = herm.hermint(pol, m=j, k=list(range(j)), scl=2) + assert_almost_equal(trim(res), trim(tgt)) + + def test_hermint_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([herm.hermint(c) for c in c2d.T]).T + res = herm.hermint(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([herm.hermint(c) for c in c2d]) + res = herm.hermint(c2d, axis=1) + assert_almost_equal(res, tgt) + + tgt = np.vstack([herm.hermint(c, k=3) for c in c2d]) + res = herm.hermint(c2d, k=3, axis=1) + assert_almost_equal(res, tgt) + + +class TestDerivative: + + def test_hermder(self): + # check exceptions + assert_raises(TypeError, herm.hermder, [0], .5) + assert_raises(ValueError, herm.hermder, [0], -1) + + # check that zeroth derivative does nothing + for i in range(5): + tgt = [0] * i + [1] + res = herm.hermder(tgt, m=0) + assert_equal(trim(res), trim(tgt)) + + # check that derivation is the inverse of integration + for i in range(5): + for j in range(2, 5): + tgt = [0] * i + [1] + res = herm.hermder(herm.hermint(tgt, m=j), m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check derivation with scaling + for i in range(5): + for j in range(2, 5): + tgt = [0] * i + [1] + res = herm.hermder(herm.hermint(tgt, m=j, scl=2), m=j, scl=.5) + assert_almost_equal(trim(res), trim(tgt)) + + def test_hermder_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([herm.hermder(c) for c in c2d.T]).T + res = herm.hermder(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([herm.hermder(c) for c in c2d]) + res = herm.hermder(c2d, axis=1) + assert_almost_equal(res, tgt) + + +class TestVander: + # some random values in [-1, 1) + x = np.random.random((3, 5)) * 2 - 1 + + def test_hermvander(self): + # check for 1d x + x = np.arange(3) + v = herm.hermvander(x, 3) + assert_(v.shape == (3, 4)) + for i in range(4): + coef = [0] * i + [1] + assert_almost_equal(v[..., i], herm.hermval(x, coef)) + + # check for 2d x + x = np.array([[1, 2], [3, 4], [5, 6]]) + v = herm.hermvander(x, 3) + assert_(v.shape == (3, 2, 4)) + for i in range(4): + coef = [0] * i + [1] + assert_almost_equal(v[..., i], herm.hermval(x, coef)) + + def test_hermvander2d(self): + # also tests hermval2d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3)) + van = herm.hermvander2d(x1, x2, [1, 2]) + tgt = herm.hermval2d(x1, x2, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = herm.hermvander2d([x1], [x2], [1, 2]) + assert_(van.shape == (1, 5, 6)) + + def test_hermvander3d(self): + # also tests hermval3d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3, 4)) + van = herm.hermvander3d(x1, x2, x3, [1, 2, 3]) + tgt = herm.hermval3d(x1, x2, x3, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = herm.hermvander3d([x1], [x2], [x3], [1, 2, 3]) + assert_(van.shape == (1, 5, 24)) + + +class TestFitting: + + def test_hermfit(self): + def f(x): + return x * (x - 1) * (x - 2) + + def f2(x): + return x**4 + x**2 + 1 + + # Test exceptions + assert_raises(ValueError, herm.hermfit, [1], [1], -1) + assert_raises(TypeError, herm.hermfit, [[1]], [1], 0) + assert_raises(TypeError, herm.hermfit, [], [1], 0) + assert_raises(TypeError, herm.hermfit, [1], [[[1]]], 0) + assert_raises(TypeError, herm.hermfit, [1, 2], [1], 0) + assert_raises(TypeError, herm.hermfit, [1], [1, 2], 0) + assert_raises(TypeError, herm.hermfit, [1], [1], 0, w=[[1]]) + assert_raises(TypeError, herm.hermfit, [1], [1], 0, w=[1, 1]) + assert_raises(ValueError, herm.hermfit, [1], [1], [-1,]) + assert_raises(ValueError, herm.hermfit, [1], [1], [2, -1, 6]) + assert_raises(TypeError, herm.hermfit, [1], [1], []) + + # Test fit + x = np.linspace(0, 2) + y = f(x) + # + coef3 = herm.hermfit(x, y, 3) + assert_equal(len(coef3), 4) + assert_almost_equal(herm.hermval(x, coef3), y) + coef3 = herm.hermfit(x, y, [0, 1, 2, 3]) + assert_equal(len(coef3), 4) + assert_almost_equal(herm.hermval(x, coef3), y) + # + coef4 = herm.hermfit(x, y, 4) + assert_equal(len(coef4), 5) + assert_almost_equal(herm.hermval(x, coef4), y) + coef4 = herm.hermfit(x, y, [0, 1, 2, 3, 4]) + assert_equal(len(coef4), 5) + assert_almost_equal(herm.hermval(x, coef4), y) + # check things still work if deg is not in strict increasing + coef4 = herm.hermfit(x, y, [2, 3, 4, 1, 0]) + assert_equal(len(coef4), 5) + assert_almost_equal(herm.hermval(x, coef4), y) + # + coef2d = herm.hermfit(x, np.array([y, y]).T, 3) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + coef2d = herm.hermfit(x, np.array([y, y]).T, [0, 1, 2, 3]) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + # test weighting + w = np.zeros_like(x) + yw = y.copy() + w[1::2] = 1 + y[0::2] = 0 + wcoef3 = herm.hermfit(x, yw, 3, w=w) + assert_almost_equal(wcoef3, coef3) + wcoef3 = herm.hermfit(x, yw, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef3, coef3) + # + wcoef2d = herm.hermfit(x, np.array([yw, yw]).T, 3, w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + wcoef2d = herm.hermfit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + # test scaling with complex values x points whose square + # is zero when summed. + x = [1, 1j, -1, -1j] + assert_almost_equal(herm.hermfit(x, x, 1), [0, .5]) + assert_almost_equal(herm.hermfit(x, x, [0, 1]), [0, .5]) + # test fitting only even Legendre polynomials + x = np.linspace(-1, 1) + y = f2(x) + coef1 = herm.hermfit(x, y, 4) + assert_almost_equal(herm.hermval(x, coef1), y) + coef2 = herm.hermfit(x, y, [0, 2, 4]) + assert_almost_equal(herm.hermval(x, coef2), y) + assert_almost_equal(coef1, coef2) + + +class TestCompanion: + + def test_raises(self): + assert_raises(ValueError, herm.hermcompanion, []) + assert_raises(ValueError, herm.hermcompanion, [1]) + + def test_dimensions(self): + for i in range(1, 5): + coef = [0] * i + [1] + assert_(herm.hermcompanion(coef).shape == (i, i)) + + def test_linear_root(self): + assert_(herm.hermcompanion([1, 2])[0, 0] == -.25) + + +class TestGauss: + + def test_100(self): + x, w = herm.hermgauss(100) + + # test orthogonality. Note that the results need to be normalized, + # otherwise the huge values that can arise from fast growing + # functions like Laguerre can be very confusing. + v = herm.hermvander(x, 99) + vv = np.dot(v.T * w, v) + vd = 1 / np.sqrt(vv.diagonal()) + vv = vd[:, None] * vv * vd + assert_almost_equal(vv, np.eye(100)) + + # check that the integral of 1 is correct + tgt = np.sqrt(np.pi) + assert_almost_equal(w.sum(), tgt) + + +class TestMisc: + + def test_hermfromroots(self): + res = herm.hermfromroots([]) + assert_almost_equal(trim(res), [1]) + for i in range(1, 5): + roots = np.cos(np.linspace(-np.pi, 0, 2 * i + 1)[1::2]) + pol = herm.hermfromroots(roots) + res = herm.hermval(roots, pol) + tgt = 0 + assert_(len(pol) == i + 1) + assert_almost_equal(herm.herm2poly(pol)[-1], 1) + assert_almost_equal(res, tgt) + + def test_hermroots(self): + assert_almost_equal(herm.hermroots([1]), []) + assert_almost_equal(herm.hermroots([1, 1]), [-.5]) + for i in range(2, 5): + tgt = np.linspace(-1, 1, i) + res = herm.hermroots(herm.hermfromroots(tgt)) + assert_almost_equal(trim(res), trim(tgt)) + + def test_hermtrim(self): + coef = [2, -1, 1, 0] + + # Test exceptions + assert_raises(ValueError, herm.hermtrim, coef, -1) + + # Test results + assert_equal(herm.hermtrim(coef), coef[:-1]) + assert_equal(herm.hermtrim(coef, 1), coef[:-3]) + assert_equal(herm.hermtrim(coef, 2), [0]) + + def test_hermline(self): + assert_equal(herm.hermline(3, 4), [3, 2]) + + def test_herm2poly(self): + for i in range(10): + assert_almost_equal(herm.herm2poly([0] * i + [1]), Hlist[i]) + + def test_poly2herm(self): + for i in range(10): + assert_almost_equal(herm.poly2herm(Hlist[i]), [0] * i + [1]) + + def test_weight(self): + x = np.linspace(-5, 5, 11) + tgt = np.exp(-x**2) + res = herm.hermweight(x) + assert_almost_equal(res, tgt) diff --git a/local_packages/numpy/polynomial/tests/test_hermite_e.py b/local_packages/numpy/polynomial/tests/test_hermite_e.py new file mode 100644 index 0000000..233dfb2 --- /dev/null +++ b/local_packages/numpy/polynomial/tests/test_hermite_e.py @@ -0,0 +1,554 @@ +"""Tests for hermite_e module. + +""" +from functools import reduce + +import numpy as np +import numpy.polynomial.hermite_e as herme +from numpy.polynomial.polynomial import polyval +from numpy.testing import assert_, assert_almost_equal, assert_equal, assert_raises + +He0 = np.array([1]) +He1 = np.array([0, 1]) +He2 = np.array([-1, 0, 1]) +He3 = np.array([0, -3, 0, 1]) +He4 = np.array([3, 0, -6, 0, 1]) +He5 = np.array([0, 15, 0, -10, 0, 1]) +He6 = np.array([-15, 0, 45, 0, -15, 0, 1]) +He7 = np.array([0, -105, 0, 105, 0, -21, 0, 1]) +He8 = np.array([105, 0, -420, 0, 210, 0, -28, 0, 1]) +He9 = np.array([0, 945, 0, -1260, 0, 378, 0, -36, 0, 1]) + +Helist = [He0, He1, He2, He3, He4, He5, He6, He7, He8, He9] + + +def trim(x): + return herme.hermetrim(x, tol=1e-6) + + +class TestConstants: + + def test_hermedomain(self): + assert_equal(herme.hermedomain, [-1, 1]) + + def test_hermezero(self): + assert_equal(herme.hermezero, [0]) + + def test_hermeone(self): + assert_equal(herme.hermeone, [1]) + + def test_hermex(self): + assert_equal(herme.hermex, [0, 1]) + + +class TestArithmetic: + x = np.linspace(-3, 3, 100) + + def test_hermeadd(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] += 1 + res = herme.hermeadd([0] * i + [1], [0] * j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_hermesub(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] -= 1 + res = herme.hermesub([0] * i + [1], [0] * j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_hermemulx(self): + assert_equal(herme.hermemulx([0]), [0]) + assert_equal(herme.hermemulx([1]), [0, 1]) + for i in range(1, 5): + ser = [0] * i + [1] + tgt = [0] * (i - 1) + [i, 0, 1] + assert_equal(herme.hermemulx(ser), tgt) + + def test_hermemul(self): + # check values of result + for i in range(5): + pol1 = [0] * i + [1] + val1 = herme.hermeval(self.x, pol1) + for j in range(5): + msg = f"At i={i}, j={j}" + pol2 = [0] * j + [1] + val2 = herme.hermeval(self.x, pol2) + pol3 = herme.hermemul(pol1, pol2) + val3 = herme.hermeval(self.x, pol3) + assert_(len(pol3) == i + j + 1, msg) + assert_almost_equal(val3, val1 * val2, err_msg=msg) + + def test_hermediv(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + ci = [0] * i + [1] + cj = [0] * j + [1] + tgt = herme.hermeadd(ci, cj) + quo, rem = herme.hermediv(tgt, ci) + res = herme.hermeadd(herme.hermemul(quo, ci), rem) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_hermepow(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + c = np.arange(i + 1) + tgt = reduce(herme.hermemul, [c] * j, np.array([1])) + res = herme.hermepow(c, j) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + +class TestEvaluation: + # coefficients of 1 + 2*x + 3*x**2 + c1d = np.array([4., 2., 3.]) + c2d = np.einsum('i,j->ij', c1d, c1d) + c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d) + + # some random values in [-1, 1) + x = np.random.random((3, 5)) * 2 - 1 + y = polyval(x, [1., 2., 3.]) + + def test_hermeval(self): + # check empty input + assert_equal(herme.hermeval([], [1]).size, 0) + + # check normal input) + x = np.linspace(-1, 1) + y = [polyval(x, c) for c in Helist] + for i in range(10): + msg = f"At i={i}" + tgt = y[i] + res = herme.hermeval(x, [0] * i + [1]) + assert_almost_equal(res, tgt, err_msg=msg) + + # check that shape is preserved + for i in range(3): + dims = [2] * i + x = np.zeros(dims) + assert_equal(herme.hermeval(x, [1]).shape, dims) + assert_equal(herme.hermeval(x, [1, 0]).shape, dims) + assert_equal(herme.hermeval(x, [1, 0, 0]).shape, dims) + + def test_hermeval2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + # test exceptions + assert_raises(ValueError, herme.hermeval2d, x1, x2[:2], self.c2d) + + # test values + tgt = y1 * y2 + res = herme.hermeval2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + # test shape + z = np.ones((2, 3)) + res = herme.hermeval2d(z, z, self.c2d) + assert_(res.shape == (2, 3)) + + def test_hermeval3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + # test exceptions + assert_raises(ValueError, herme.hermeval3d, x1, x2, x3[:2], self.c3d) + + # test values + tgt = y1 * y2 * y3 + res = herme.hermeval3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + # test shape + z = np.ones((2, 3)) + res = herme.hermeval3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3)) + + def test_hermegrid2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + # test values + tgt = np.einsum('i,j->ij', y1, y2) + res = herme.hermegrid2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + # test shape + z = np.ones((2, 3)) + res = herme.hermegrid2d(z, z, self.c2d) + assert_(res.shape == (2, 3) * 2) + + def test_hermegrid3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + # test values + tgt = np.einsum('i,j,k->ijk', y1, y2, y3) + res = herme.hermegrid3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + # test shape + z = np.ones((2, 3)) + res = herme.hermegrid3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3) * 3) + + +class TestIntegral: + + def test_hermeint(self): + # check exceptions + assert_raises(TypeError, herme.hermeint, [0], .5) + assert_raises(ValueError, herme.hermeint, [0], -1) + assert_raises(ValueError, herme.hermeint, [0], 1, [0, 0]) + assert_raises(ValueError, herme.hermeint, [0], lbnd=[0]) + assert_raises(ValueError, herme.hermeint, [0], scl=[0]) + assert_raises(TypeError, herme.hermeint, [0], axis=.5) + + # test integration of zero polynomial + for i in range(2, 5): + k = [0] * (i - 2) + [1] + res = herme.hermeint([0], m=i, k=k) + assert_almost_equal(res, [0, 1]) + + # check single integration with integration constant + for i in range(5): + scl = i + 1 + pol = [0] * i + [1] + tgt = [i] + [0] * i + [1 / scl] + hermepol = herme.poly2herme(pol) + hermeint = herme.hermeint(hermepol, m=1, k=[i]) + res = herme.herme2poly(hermeint) + assert_almost_equal(trim(res), trim(tgt)) + + # check single integration with integration constant and lbnd + for i in range(5): + scl = i + 1 + pol = [0] * i + [1] + hermepol = herme.poly2herme(pol) + hermeint = herme.hermeint(hermepol, m=1, k=[i], lbnd=-1) + assert_almost_equal(herme.hermeval(-1, hermeint), i) + + # check single integration with integration constant and scaling + for i in range(5): + scl = i + 1 + pol = [0] * i + [1] + tgt = [i] + [0] * i + [2 / scl] + hermepol = herme.poly2herme(pol) + hermeint = herme.hermeint(hermepol, m=1, k=[i], scl=2) + res = herme.herme2poly(hermeint) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with default k + for i in range(5): + for j in range(2, 5): + pol = [0] * i + [1] + tgt = pol[:] + for k in range(j): + tgt = herme.hermeint(tgt, m=1) + res = herme.hermeint(pol, m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with defined k + for i in range(5): + for j in range(2, 5): + pol = [0] * i + [1] + tgt = pol[:] + for k in range(j): + tgt = herme.hermeint(tgt, m=1, k=[k]) + res = herme.hermeint(pol, m=j, k=list(range(j))) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with lbnd + for i in range(5): + for j in range(2, 5): + pol = [0] * i + [1] + tgt = pol[:] + for k in range(j): + tgt = herme.hermeint(tgt, m=1, k=[k], lbnd=-1) + res = herme.hermeint(pol, m=j, k=list(range(j)), lbnd=-1) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with scaling + for i in range(5): + for j in range(2, 5): + pol = [0] * i + [1] + tgt = pol[:] + for k in range(j): + tgt = herme.hermeint(tgt, m=1, k=[k], scl=2) + res = herme.hermeint(pol, m=j, k=list(range(j)), scl=2) + assert_almost_equal(trim(res), trim(tgt)) + + def test_hermeint_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([herme.hermeint(c) for c in c2d.T]).T + res = herme.hermeint(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([herme.hermeint(c) for c in c2d]) + res = herme.hermeint(c2d, axis=1) + assert_almost_equal(res, tgt) + + tgt = np.vstack([herme.hermeint(c, k=3) for c in c2d]) + res = herme.hermeint(c2d, k=3, axis=1) + assert_almost_equal(res, tgt) + + +class TestDerivative: + + def test_hermeder(self): + # check exceptions + assert_raises(TypeError, herme.hermeder, [0], .5) + assert_raises(ValueError, herme.hermeder, [0], -1) + + # check that zeroth derivative does nothing + for i in range(5): + tgt = [0] * i + [1] + res = herme.hermeder(tgt, m=0) + assert_equal(trim(res), trim(tgt)) + + # check that derivation is the inverse of integration + for i in range(5): + for j in range(2, 5): + tgt = [0] * i + [1] + res = herme.hermeder(herme.hermeint(tgt, m=j), m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check derivation with scaling + for i in range(5): + for j in range(2, 5): + tgt = [0] * i + [1] + res = herme.hermeder( + herme.hermeint(tgt, m=j, scl=2), m=j, scl=.5) + assert_almost_equal(trim(res), trim(tgt)) + + def test_hermeder_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([herme.hermeder(c) for c in c2d.T]).T + res = herme.hermeder(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([herme.hermeder(c) for c in c2d]) + res = herme.hermeder(c2d, axis=1) + assert_almost_equal(res, tgt) + + +class TestVander: + # some random values in [-1, 1) + x = np.random.random((3, 5)) * 2 - 1 + + def test_hermevander(self): + # check for 1d x + x = np.arange(3) + v = herme.hermevander(x, 3) + assert_(v.shape == (3, 4)) + for i in range(4): + coef = [0] * i + [1] + assert_almost_equal(v[..., i], herme.hermeval(x, coef)) + + # check for 2d x + x = np.array([[1, 2], [3, 4], [5, 6]]) + v = herme.hermevander(x, 3) + assert_(v.shape == (3, 2, 4)) + for i in range(4): + coef = [0] * i + [1] + assert_almost_equal(v[..., i], herme.hermeval(x, coef)) + + def test_hermevander2d(self): + # also tests hermeval2d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3)) + van = herme.hermevander2d(x1, x2, [1, 2]) + tgt = herme.hermeval2d(x1, x2, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = herme.hermevander2d([x1], [x2], [1, 2]) + assert_(van.shape == (1, 5, 6)) + + def test_hermevander3d(self): + # also tests hermeval3d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3, 4)) + van = herme.hermevander3d(x1, x2, x3, [1, 2, 3]) + tgt = herme.hermeval3d(x1, x2, x3, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = herme.hermevander3d([x1], [x2], [x3], [1, 2, 3]) + assert_(van.shape == (1, 5, 24)) + + +class TestFitting: + + def test_hermefit(self): + def f(x): + return x * (x - 1) * (x - 2) + + def f2(x): + return x**4 + x**2 + 1 + + # Test exceptions + assert_raises(ValueError, herme.hermefit, [1], [1], -1) + assert_raises(TypeError, herme.hermefit, [[1]], [1], 0) + assert_raises(TypeError, herme.hermefit, [], [1], 0) + assert_raises(TypeError, herme.hermefit, [1], [[[1]]], 0) + assert_raises(TypeError, herme.hermefit, [1, 2], [1], 0) + assert_raises(TypeError, herme.hermefit, [1], [1, 2], 0) + assert_raises(TypeError, herme.hermefit, [1], [1], 0, w=[[1]]) + assert_raises(TypeError, herme.hermefit, [1], [1], 0, w=[1, 1]) + assert_raises(ValueError, herme.hermefit, [1], [1], [-1,]) + assert_raises(ValueError, herme.hermefit, [1], [1], [2, -1, 6]) + assert_raises(TypeError, herme.hermefit, [1], [1], []) + + # Test fit + x = np.linspace(0, 2) + y = f(x) + # + coef3 = herme.hermefit(x, y, 3) + assert_equal(len(coef3), 4) + assert_almost_equal(herme.hermeval(x, coef3), y) + coef3 = herme.hermefit(x, y, [0, 1, 2, 3]) + assert_equal(len(coef3), 4) + assert_almost_equal(herme.hermeval(x, coef3), y) + # + coef4 = herme.hermefit(x, y, 4) + assert_equal(len(coef4), 5) + assert_almost_equal(herme.hermeval(x, coef4), y) + coef4 = herme.hermefit(x, y, [0, 1, 2, 3, 4]) + assert_equal(len(coef4), 5) + assert_almost_equal(herme.hermeval(x, coef4), y) + # check things still work if deg is not in strict increasing + coef4 = herme.hermefit(x, y, [2, 3, 4, 1, 0]) + assert_equal(len(coef4), 5) + assert_almost_equal(herme.hermeval(x, coef4), y) + # + coef2d = herme.hermefit(x, np.array([y, y]).T, 3) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + coef2d = herme.hermefit(x, np.array([y, y]).T, [0, 1, 2, 3]) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + # test weighting + w = np.zeros_like(x) + yw = y.copy() + w[1::2] = 1 + y[0::2] = 0 + wcoef3 = herme.hermefit(x, yw, 3, w=w) + assert_almost_equal(wcoef3, coef3) + wcoef3 = herme.hermefit(x, yw, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef3, coef3) + # + wcoef2d = herme.hermefit(x, np.array([yw, yw]).T, 3, w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + wcoef2d = herme.hermefit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + # test scaling with complex values x points whose square + # is zero when summed. + x = [1, 1j, -1, -1j] + assert_almost_equal(herme.hermefit(x, x, 1), [0, 1]) + assert_almost_equal(herme.hermefit(x, x, [0, 1]), [0, 1]) + # test fitting only even Legendre polynomials + x = np.linspace(-1, 1) + y = f2(x) + coef1 = herme.hermefit(x, y, 4) + assert_almost_equal(herme.hermeval(x, coef1), y) + coef2 = herme.hermefit(x, y, [0, 2, 4]) + assert_almost_equal(herme.hermeval(x, coef2), y) + assert_almost_equal(coef1, coef2) + + +class TestCompanion: + + def test_raises(self): + assert_raises(ValueError, herme.hermecompanion, []) + assert_raises(ValueError, herme.hermecompanion, [1]) + + def test_dimensions(self): + for i in range(1, 5): + coef = [0] * i + [1] + assert_(herme.hermecompanion(coef).shape == (i, i)) + + def test_linear_root(self): + assert_(herme.hermecompanion([1, 2])[0, 0] == -.5) + + +class TestGauss: + + def test_100(self): + x, w = herme.hermegauss(100) + + # test orthogonality. Note that the results need to be normalized, + # otherwise the huge values that can arise from fast growing + # functions like Laguerre can be very confusing. + v = herme.hermevander(x, 99) + vv = np.dot(v.T * w, v) + vd = 1 / np.sqrt(vv.diagonal()) + vv = vd[:, None] * vv * vd + assert_almost_equal(vv, np.eye(100)) + + # check that the integral of 1 is correct + tgt = np.sqrt(2 * np.pi) + assert_almost_equal(w.sum(), tgt) + + +class TestMisc: + + def test_hermefromroots(self): + res = herme.hermefromroots([]) + assert_almost_equal(trim(res), [1]) + for i in range(1, 5): + roots = np.cos(np.linspace(-np.pi, 0, 2 * i + 1)[1::2]) + pol = herme.hermefromroots(roots) + res = herme.hermeval(roots, pol) + tgt = 0 + assert_(len(pol) == i + 1) + assert_almost_equal(herme.herme2poly(pol)[-1], 1) + assert_almost_equal(res, tgt) + + def test_hermeroots(self): + assert_almost_equal(herme.hermeroots([1]), []) + assert_almost_equal(herme.hermeroots([1, 1]), [-1]) + for i in range(2, 5): + tgt = np.linspace(-1, 1, i) + res = herme.hermeroots(herme.hermefromroots(tgt)) + assert_almost_equal(trim(res), trim(tgt)) + + def test_hermetrim(self): + coef = [2, -1, 1, 0] + + # Test exceptions + assert_raises(ValueError, herme.hermetrim, coef, -1) + + # Test results + assert_equal(herme.hermetrim(coef), coef[:-1]) + assert_equal(herme.hermetrim(coef, 1), coef[:-3]) + assert_equal(herme.hermetrim(coef, 2), [0]) + + def test_hermeline(self): + assert_equal(herme.hermeline(3, 4), [3, 4]) + + def test_herme2poly(self): + for i in range(10): + assert_almost_equal(herme.herme2poly([0] * i + [1]), Helist[i]) + + def test_poly2herme(self): + for i in range(10): + assert_almost_equal(herme.poly2herme(Helist[i]), [0] * i + [1]) + + def test_weight(self): + x = np.linspace(-5, 5, 11) + tgt = np.exp(-.5 * x**2) + res = herme.hermeweight(x) + assert_almost_equal(res, tgt) diff --git a/local_packages/numpy/polynomial/tests/test_laguerre.py b/local_packages/numpy/polynomial/tests/test_laguerre.py new file mode 100644 index 0000000..884f15a --- /dev/null +++ b/local_packages/numpy/polynomial/tests/test_laguerre.py @@ -0,0 +1,535 @@ +"""Tests for laguerre module. + +""" +from functools import reduce + +import numpy as np +import numpy.polynomial.laguerre as lag +from numpy.polynomial.polynomial import polyval +from numpy.testing import assert_, assert_almost_equal, assert_equal, assert_raises + +L0 = np.array([1]) / 1 +L1 = np.array([1, -1]) / 1 +L2 = np.array([2, -4, 1]) / 2 +L3 = np.array([6, -18, 9, -1]) / 6 +L4 = np.array([24, -96, 72, -16, 1]) / 24 +L5 = np.array([120, -600, 600, -200, 25, -1]) / 120 +L6 = np.array([720, -4320, 5400, -2400, 450, -36, 1]) / 720 + +Llist = [L0, L1, L2, L3, L4, L5, L6] + + +def trim(x): + return lag.lagtrim(x, tol=1e-6) + + +class TestConstants: + + def test_lagdomain(self): + assert_equal(lag.lagdomain, [0, 1]) + + def test_lagzero(self): + assert_equal(lag.lagzero, [0]) + + def test_lagone(self): + assert_equal(lag.lagone, [1]) + + def test_lagx(self): + assert_equal(lag.lagx, [1, -1]) + + +class TestArithmetic: + x = np.linspace(-3, 3, 100) + + def test_lagadd(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] += 1 + res = lag.lagadd([0] * i + [1], [0] * j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_lagsub(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] -= 1 + res = lag.lagsub([0] * i + [1], [0] * j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_lagmulx(self): + assert_equal(lag.lagmulx([0]), [0]) + assert_equal(lag.lagmulx([1]), [1, -1]) + for i in range(1, 5): + ser = [0] * i + [1] + tgt = [0] * (i - 1) + [-i, 2 * i + 1, -(i + 1)] + assert_almost_equal(lag.lagmulx(ser), tgt) + + def test_lagmul(self): + # check values of result + for i in range(5): + pol1 = [0] * i + [1] + val1 = lag.lagval(self.x, pol1) + for j in range(5): + msg = f"At i={i}, j={j}" + pol2 = [0] * j + [1] + val2 = lag.lagval(self.x, pol2) + pol3 = lag.lagmul(pol1, pol2) + val3 = lag.lagval(self.x, pol3) + assert_(len(pol3) == i + j + 1, msg) + assert_almost_equal(val3, val1 * val2, err_msg=msg) + + def test_lagdiv(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + ci = [0] * i + [1] + cj = [0] * j + [1] + tgt = lag.lagadd(ci, cj) + quo, rem = lag.lagdiv(tgt, ci) + res = lag.lagadd(lag.lagmul(quo, ci), rem) + assert_almost_equal(trim(res), trim(tgt), err_msg=msg) + + def test_lagpow(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + c = np.arange(i + 1) + tgt = reduce(lag.lagmul, [c] * j, np.array([1])) + res = lag.lagpow(c, j) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + +class TestEvaluation: + # coefficients of 1 + 2*x + 3*x**2 + c1d = np.array([9., -14., 6.]) + c2d = np.einsum('i,j->ij', c1d, c1d) + c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d) + + # some random values in [-1, 1) + x = np.random.random((3, 5)) * 2 - 1 + y = polyval(x, [1., 2., 3.]) + + def test_lagval(self): + # check empty input + assert_equal(lag.lagval([], [1]).size, 0) + + # check normal input) + x = np.linspace(-1, 1) + y = [polyval(x, c) for c in Llist] + for i in range(7): + msg = f"At i={i}" + tgt = y[i] + res = lag.lagval(x, [0] * i + [1]) + assert_almost_equal(res, tgt, err_msg=msg) + + # check that shape is preserved + for i in range(3): + dims = [2] * i + x = np.zeros(dims) + assert_equal(lag.lagval(x, [1]).shape, dims) + assert_equal(lag.lagval(x, [1, 0]).shape, dims) + assert_equal(lag.lagval(x, [1, 0, 0]).shape, dims) + + def test_lagval2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + # test exceptions + assert_raises(ValueError, lag.lagval2d, x1, x2[:2], self.c2d) + + # test values + tgt = y1 * y2 + res = lag.lagval2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + # test shape + z = np.ones((2, 3)) + res = lag.lagval2d(z, z, self.c2d) + assert_(res.shape == (2, 3)) + + def test_lagval3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + # test exceptions + assert_raises(ValueError, lag.lagval3d, x1, x2, x3[:2], self.c3d) + + # test values + tgt = y1 * y2 * y3 + res = lag.lagval3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + # test shape + z = np.ones((2, 3)) + res = lag.lagval3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3)) + + def test_laggrid2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + # test values + tgt = np.einsum('i,j->ij', y1, y2) + res = lag.laggrid2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + # test shape + z = np.ones((2, 3)) + res = lag.laggrid2d(z, z, self.c2d) + assert_(res.shape == (2, 3) * 2) + + def test_laggrid3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + # test values + tgt = np.einsum('i,j,k->ijk', y1, y2, y3) + res = lag.laggrid3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + # test shape + z = np.ones((2, 3)) + res = lag.laggrid3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3) * 3) + + +class TestIntegral: + + def test_lagint(self): + # check exceptions + assert_raises(TypeError, lag.lagint, [0], .5) + assert_raises(ValueError, lag.lagint, [0], -1) + assert_raises(ValueError, lag.lagint, [0], 1, [0, 0]) + assert_raises(ValueError, lag.lagint, [0], lbnd=[0]) + assert_raises(ValueError, lag.lagint, [0], scl=[0]) + assert_raises(TypeError, lag.lagint, [0], axis=.5) + + # test integration of zero polynomial + for i in range(2, 5): + k = [0] * (i - 2) + [1] + res = lag.lagint([0], m=i, k=k) + assert_almost_equal(res, [1, -1]) + + # check single integration with integration constant + for i in range(5): + scl = i + 1 + pol = [0] * i + [1] + tgt = [i] + [0] * i + [1 / scl] + lagpol = lag.poly2lag(pol) + lagint = lag.lagint(lagpol, m=1, k=[i]) + res = lag.lag2poly(lagint) + assert_almost_equal(trim(res), trim(tgt)) + + # check single integration with integration constant and lbnd + for i in range(5): + scl = i + 1 + pol = [0] * i + [1] + lagpol = lag.poly2lag(pol) + lagint = lag.lagint(lagpol, m=1, k=[i], lbnd=-1) + assert_almost_equal(lag.lagval(-1, lagint), i) + + # check single integration with integration constant and scaling + for i in range(5): + scl = i + 1 + pol = [0] * i + [1] + tgt = [i] + [0] * i + [2 / scl] + lagpol = lag.poly2lag(pol) + lagint = lag.lagint(lagpol, m=1, k=[i], scl=2) + res = lag.lag2poly(lagint) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with default k + for i in range(5): + for j in range(2, 5): + pol = [0] * i + [1] + tgt = pol[:] + for k in range(j): + tgt = lag.lagint(tgt, m=1) + res = lag.lagint(pol, m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with defined k + for i in range(5): + for j in range(2, 5): + pol = [0] * i + [1] + tgt = pol[:] + for k in range(j): + tgt = lag.lagint(tgt, m=1, k=[k]) + res = lag.lagint(pol, m=j, k=list(range(j))) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with lbnd + for i in range(5): + for j in range(2, 5): + pol = [0] * i + [1] + tgt = pol[:] + for k in range(j): + tgt = lag.lagint(tgt, m=1, k=[k], lbnd=-1) + res = lag.lagint(pol, m=j, k=list(range(j)), lbnd=-1) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with scaling + for i in range(5): + for j in range(2, 5): + pol = [0] * i + [1] + tgt = pol[:] + for k in range(j): + tgt = lag.lagint(tgt, m=1, k=[k], scl=2) + res = lag.lagint(pol, m=j, k=list(range(j)), scl=2) + assert_almost_equal(trim(res), trim(tgt)) + + def test_lagint_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([lag.lagint(c) for c in c2d.T]).T + res = lag.lagint(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([lag.lagint(c) for c in c2d]) + res = lag.lagint(c2d, axis=1) + assert_almost_equal(res, tgt) + + tgt = np.vstack([lag.lagint(c, k=3) for c in c2d]) + res = lag.lagint(c2d, k=3, axis=1) + assert_almost_equal(res, tgt) + + +class TestDerivative: + + def test_lagder(self): + # check exceptions + assert_raises(TypeError, lag.lagder, [0], .5) + assert_raises(ValueError, lag.lagder, [0], -1) + + # check that zeroth derivative does nothing + for i in range(5): + tgt = [0] * i + [1] + res = lag.lagder(tgt, m=0) + assert_equal(trim(res), trim(tgt)) + + # check that derivation is the inverse of integration + for i in range(5): + for j in range(2, 5): + tgt = [0] * i + [1] + res = lag.lagder(lag.lagint(tgt, m=j), m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check derivation with scaling + for i in range(5): + for j in range(2, 5): + tgt = [0] * i + [1] + res = lag.lagder(lag.lagint(tgt, m=j, scl=2), m=j, scl=.5) + assert_almost_equal(trim(res), trim(tgt)) + + def test_lagder_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([lag.lagder(c) for c in c2d.T]).T + res = lag.lagder(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([lag.lagder(c) for c in c2d]) + res = lag.lagder(c2d, axis=1) + assert_almost_equal(res, tgt) + + +class TestVander: + # some random values in [-1, 1) + x = np.random.random((3, 5)) * 2 - 1 + + def test_lagvander(self): + # check for 1d x + x = np.arange(3) + v = lag.lagvander(x, 3) + assert_(v.shape == (3, 4)) + for i in range(4): + coef = [0] * i + [1] + assert_almost_equal(v[..., i], lag.lagval(x, coef)) + + # check for 2d x + x = np.array([[1, 2], [3, 4], [5, 6]]) + v = lag.lagvander(x, 3) + assert_(v.shape == (3, 2, 4)) + for i in range(4): + coef = [0] * i + [1] + assert_almost_equal(v[..., i], lag.lagval(x, coef)) + + def test_lagvander2d(self): + # also tests lagval2d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3)) + van = lag.lagvander2d(x1, x2, [1, 2]) + tgt = lag.lagval2d(x1, x2, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = lag.lagvander2d([x1], [x2], [1, 2]) + assert_(van.shape == (1, 5, 6)) + + def test_lagvander3d(self): + # also tests lagval3d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3, 4)) + van = lag.lagvander3d(x1, x2, x3, [1, 2, 3]) + tgt = lag.lagval3d(x1, x2, x3, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = lag.lagvander3d([x1], [x2], [x3], [1, 2, 3]) + assert_(van.shape == (1, 5, 24)) + + +class TestFitting: + + def test_lagfit(self): + def f(x): + return x * (x - 1) * (x - 2) + + # Test exceptions + assert_raises(ValueError, lag.lagfit, [1], [1], -1) + assert_raises(TypeError, lag.lagfit, [[1]], [1], 0) + assert_raises(TypeError, lag.lagfit, [], [1], 0) + assert_raises(TypeError, lag.lagfit, [1], [[[1]]], 0) + assert_raises(TypeError, lag.lagfit, [1, 2], [1], 0) + assert_raises(TypeError, lag.lagfit, [1], [1, 2], 0) + assert_raises(TypeError, lag.lagfit, [1], [1], 0, w=[[1]]) + assert_raises(TypeError, lag.lagfit, [1], [1], 0, w=[1, 1]) + assert_raises(ValueError, lag.lagfit, [1], [1], [-1,]) + assert_raises(ValueError, lag.lagfit, [1], [1], [2, -1, 6]) + assert_raises(TypeError, lag.lagfit, [1], [1], []) + + # Test fit + x = np.linspace(0, 2) + y = f(x) + # + coef3 = lag.lagfit(x, y, 3) + assert_equal(len(coef3), 4) + assert_almost_equal(lag.lagval(x, coef3), y) + coef3 = lag.lagfit(x, y, [0, 1, 2, 3]) + assert_equal(len(coef3), 4) + assert_almost_equal(lag.lagval(x, coef3), y) + # + coef4 = lag.lagfit(x, y, 4) + assert_equal(len(coef4), 5) + assert_almost_equal(lag.lagval(x, coef4), y) + coef4 = lag.lagfit(x, y, [0, 1, 2, 3, 4]) + assert_equal(len(coef4), 5) + assert_almost_equal(lag.lagval(x, coef4), y) + # + coef2d = lag.lagfit(x, np.array([y, y]).T, 3) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + coef2d = lag.lagfit(x, np.array([y, y]).T, [0, 1, 2, 3]) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + # test weighting + w = np.zeros_like(x) + yw = y.copy() + w[1::2] = 1 + y[0::2] = 0 + wcoef3 = lag.lagfit(x, yw, 3, w=w) + assert_almost_equal(wcoef3, coef3) + wcoef3 = lag.lagfit(x, yw, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef3, coef3) + # + wcoef2d = lag.lagfit(x, np.array([yw, yw]).T, 3, w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + wcoef2d = lag.lagfit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + # test scaling with complex values x points whose square + # is zero when summed. + x = [1, 1j, -1, -1j] + assert_almost_equal(lag.lagfit(x, x, 1), [1, -1]) + assert_almost_equal(lag.lagfit(x, x, [0, 1]), [1, -1]) + + +class TestCompanion: + + def test_raises(self): + assert_raises(ValueError, lag.lagcompanion, []) + assert_raises(ValueError, lag.lagcompanion, [1]) + + def test_dimensions(self): + for i in range(1, 5): + coef = [0] * i + [1] + assert_(lag.lagcompanion(coef).shape == (i, i)) + + def test_linear_root(self): + assert_(lag.lagcompanion([1, 2])[0, 0] == 1.5) + + +class TestGauss: + + def test_100(self): + x, w = lag.laggauss(100) + + # test orthogonality. Note that the results need to be normalized, + # otherwise the huge values that can arise from fast growing + # functions like Laguerre can be very confusing. + v = lag.lagvander(x, 99) + vv = np.dot(v.T * w, v) + vd = 1 / np.sqrt(vv.diagonal()) + vv = vd[:, None] * vv * vd + assert_almost_equal(vv, np.eye(100)) + + # check that the integral of 1 is correct + tgt = 1.0 + assert_almost_equal(w.sum(), tgt) + + +class TestMisc: + + def test_lagfromroots(self): + res = lag.lagfromroots([]) + assert_almost_equal(trim(res), [1]) + for i in range(1, 5): + roots = np.cos(np.linspace(-np.pi, 0, 2 * i + 1)[1::2]) + pol = lag.lagfromroots(roots) + res = lag.lagval(roots, pol) + tgt = 0 + assert_(len(pol) == i + 1) + assert_almost_equal(lag.lag2poly(pol)[-1], 1) + assert_almost_equal(res, tgt) + + def test_lagroots(self): + assert_almost_equal(lag.lagroots([1]), []) + assert_almost_equal(lag.lagroots([0, 1]), [1]) + for i in range(2, 5): + tgt = np.linspace(0, 3, i) + res = lag.lagroots(lag.lagfromroots(tgt)) + assert_almost_equal(trim(res), trim(tgt)) + + def test_lagtrim(self): + coef = [2, -1, 1, 0] + + # Test exceptions + assert_raises(ValueError, lag.lagtrim, coef, -1) + + # Test results + assert_equal(lag.lagtrim(coef), coef[:-1]) + assert_equal(lag.lagtrim(coef, 1), coef[:-3]) + assert_equal(lag.lagtrim(coef, 2), [0]) + + def test_lagline(self): + assert_equal(lag.lagline(3, 4), [7, -4]) + + def test_lag2poly(self): + for i in range(7): + assert_almost_equal(lag.lag2poly([0] * i + [1]), Llist[i]) + + def test_poly2lag(self): + for i in range(7): + assert_almost_equal(lag.poly2lag(Llist[i]), [0] * i + [1]) + + def test_weight(self): + x = np.linspace(0, 10, 11) + tgt = np.exp(-x) + res = lag.lagweight(x) + assert_almost_equal(res, tgt) diff --git a/local_packages/numpy/polynomial/tests/test_legendre.py b/local_packages/numpy/polynomial/tests/test_legendre.py new file mode 100644 index 0000000..6c87f44 --- /dev/null +++ b/local_packages/numpy/polynomial/tests/test_legendre.py @@ -0,0 +1,566 @@ +"""Tests for legendre module. + +""" +from functools import reduce + +import numpy as np +import numpy.polynomial.legendre as leg +from numpy.polynomial.polynomial import polyval +from numpy.testing import assert_, assert_almost_equal, assert_equal, assert_raises + +L0 = np.array([1]) +L1 = np.array([0, 1]) +L2 = np.array([-1, 0, 3]) / 2 +L3 = np.array([0, -3, 0, 5]) / 2 +L4 = np.array([3, 0, -30, 0, 35]) / 8 +L5 = np.array([0, 15, 0, -70, 0, 63]) / 8 +L6 = np.array([-5, 0, 105, 0, -315, 0, 231]) / 16 +L7 = np.array([0, -35, 0, 315, 0, -693, 0, 429]) / 16 +L8 = np.array([35, 0, -1260, 0, 6930, 0, -12012, 0, 6435]) / 128 +L9 = np.array([0, 315, 0, -4620, 0, 18018, 0, -25740, 0, 12155]) / 128 + +Llist = [L0, L1, L2, L3, L4, L5, L6, L7, L8, L9] + + +def trim(x): + return leg.legtrim(x, tol=1e-6) + + +class TestConstants: + + def test_legdomain(self): + assert_equal(leg.legdomain, [-1, 1]) + + def test_legzero(self): + assert_equal(leg.legzero, [0]) + + def test_legone(self): + assert_equal(leg.legone, [1]) + + def test_legx(self): + assert_equal(leg.legx, [0, 1]) + + +class TestArithmetic: + x = np.linspace(-1, 1, 100) + + def test_legadd(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] += 1 + res = leg.legadd([0] * i + [1], [0] * j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_legsub(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] -= 1 + res = leg.legsub([0] * i + [1], [0] * j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_legmulx(self): + assert_equal(leg.legmulx([0]), [0]) + assert_equal(leg.legmulx([1]), [0, 1]) + for i in range(1, 5): + tmp = 2 * i + 1 + ser = [0] * i + [1] + tgt = [0] * (i - 1) + [i / tmp, 0, (i + 1) / tmp] + assert_equal(leg.legmulx(ser), tgt) + + def test_legmul(self): + # check values of result + for i in range(5): + pol1 = [0] * i + [1] + val1 = leg.legval(self.x, pol1) + for j in range(5): + msg = f"At i={i}, j={j}" + pol2 = [0] * j + [1] + val2 = leg.legval(self.x, pol2) + pol3 = leg.legmul(pol1, pol2) + val3 = leg.legval(self.x, pol3) + assert_(len(pol3) == i + j + 1, msg) + assert_almost_equal(val3, val1 * val2, err_msg=msg) + + def test_legdiv(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + ci = [0] * i + [1] + cj = [0] * j + [1] + tgt = leg.legadd(ci, cj) + quo, rem = leg.legdiv(tgt, ci) + res = leg.legadd(leg.legmul(quo, ci), rem) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_legpow(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + c = np.arange(i + 1) + tgt = reduce(leg.legmul, [c] * j, np.array([1])) + res = leg.legpow(c, j) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + +class TestEvaluation: + # coefficients of 1 + 2*x + 3*x**2 + c1d = np.array([2., 2., 2.]) + c2d = np.einsum('i,j->ij', c1d, c1d) + c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d) + + # some random values in [-1, 1) + x = np.random.random((3, 5)) * 2 - 1 + y = polyval(x, [1., 2., 3.]) + + def test_legval(self): + # check empty input + assert_equal(leg.legval([], [1]).size, 0) + + # check normal input) + x = np.linspace(-1, 1) + y = [polyval(x, c) for c in Llist] + for i in range(10): + msg = f"At i={i}" + tgt = y[i] + res = leg.legval(x, [0] * i + [1]) + assert_almost_equal(res, tgt, err_msg=msg) + + # check that shape is preserved + for i in range(3): + dims = [2] * i + x = np.zeros(dims) + assert_equal(leg.legval(x, [1]).shape, dims) + assert_equal(leg.legval(x, [1, 0]).shape, dims) + assert_equal(leg.legval(x, [1, 0, 0]).shape, dims) + + def test_legval2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + # test exceptions + assert_raises(ValueError, leg.legval2d, x1, x2[:2], self.c2d) + + # test values + tgt = y1 * y2 + res = leg.legval2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + # test shape + z = np.ones((2, 3)) + res = leg.legval2d(z, z, self.c2d) + assert_(res.shape == (2, 3)) + + def test_legval3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + # test exceptions + assert_raises(ValueError, leg.legval3d, x1, x2, x3[:2], self.c3d) + + # test values + tgt = y1 * y2 * y3 + res = leg.legval3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + # test shape + z = np.ones((2, 3)) + res = leg.legval3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3)) + + def test_leggrid2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + # test values + tgt = np.einsum('i,j->ij', y1, y2) + res = leg.leggrid2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + # test shape + z = np.ones((2, 3)) + res = leg.leggrid2d(z, z, self.c2d) + assert_(res.shape == (2, 3) * 2) + + def test_leggrid3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + # test values + tgt = np.einsum('i,j,k->ijk', y1, y2, y3) + res = leg.leggrid3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + # test shape + z = np.ones((2, 3)) + res = leg.leggrid3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3) * 3) + + +class TestIntegral: + + def test_legint(self): + # check exceptions + assert_raises(TypeError, leg.legint, [0], .5) + assert_raises(ValueError, leg.legint, [0], -1) + assert_raises(ValueError, leg.legint, [0], 1, [0, 0]) + assert_raises(ValueError, leg.legint, [0], lbnd=[0]) + assert_raises(ValueError, leg.legint, [0], scl=[0]) + assert_raises(TypeError, leg.legint, [0], axis=.5) + + # test integration of zero polynomial + for i in range(2, 5): + k = [0] * (i - 2) + [1] + res = leg.legint([0], m=i, k=k) + assert_almost_equal(res, [0, 1]) + + # check single integration with integration constant + for i in range(5): + scl = i + 1 + pol = [0] * i + [1] + tgt = [i] + [0] * i + [1 / scl] + legpol = leg.poly2leg(pol) + legint = leg.legint(legpol, m=1, k=[i]) + res = leg.leg2poly(legint) + assert_almost_equal(trim(res), trim(tgt)) + + # check single integration with integration constant and lbnd + for i in range(5): + scl = i + 1 + pol = [0] * i + [1] + legpol = leg.poly2leg(pol) + legint = leg.legint(legpol, m=1, k=[i], lbnd=-1) + assert_almost_equal(leg.legval(-1, legint), i) + + # check single integration with integration constant and scaling + for i in range(5): + scl = i + 1 + pol = [0] * i + [1] + tgt = [i] + [0] * i + [2 / scl] + legpol = leg.poly2leg(pol) + legint = leg.legint(legpol, m=1, k=[i], scl=2) + res = leg.leg2poly(legint) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with default k + for i in range(5): + for j in range(2, 5): + pol = [0] * i + [1] + tgt = pol[:] + for k in range(j): + tgt = leg.legint(tgt, m=1) + res = leg.legint(pol, m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with defined k + for i in range(5): + for j in range(2, 5): + pol = [0] * i + [1] + tgt = pol[:] + for k in range(j): + tgt = leg.legint(tgt, m=1, k=[k]) + res = leg.legint(pol, m=j, k=list(range(j))) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with lbnd + for i in range(5): + for j in range(2, 5): + pol = [0] * i + [1] + tgt = pol[:] + for k in range(j): + tgt = leg.legint(tgt, m=1, k=[k], lbnd=-1) + res = leg.legint(pol, m=j, k=list(range(j)), lbnd=-1) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with scaling + for i in range(5): + for j in range(2, 5): + pol = [0] * i + [1] + tgt = pol[:] + for k in range(j): + tgt = leg.legint(tgt, m=1, k=[k], scl=2) + res = leg.legint(pol, m=j, k=list(range(j)), scl=2) + assert_almost_equal(trim(res), trim(tgt)) + + def test_legint_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([leg.legint(c) for c in c2d.T]).T + res = leg.legint(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([leg.legint(c) for c in c2d]) + res = leg.legint(c2d, axis=1) + assert_almost_equal(res, tgt) + + tgt = np.vstack([leg.legint(c, k=3) for c in c2d]) + res = leg.legint(c2d, k=3, axis=1) + assert_almost_equal(res, tgt) + + def test_legint_zerointord(self): + assert_equal(leg.legint((1, 2, 3), 0), (1, 2, 3)) + + +class TestDerivative: + + def test_legder(self): + # check exceptions + assert_raises(TypeError, leg.legder, [0], .5) + assert_raises(ValueError, leg.legder, [0], -1) + + # check that zeroth derivative does nothing + for i in range(5): + tgt = [0] * i + [1] + res = leg.legder(tgt, m=0) + assert_equal(trim(res), trim(tgt)) + + # check that derivation is the inverse of integration + for i in range(5): + for j in range(2, 5): + tgt = [0] * i + [1] + res = leg.legder(leg.legint(tgt, m=j), m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check derivation with scaling + for i in range(5): + for j in range(2, 5): + tgt = [0] * i + [1] + res = leg.legder(leg.legint(tgt, m=j, scl=2), m=j, scl=.5) + assert_almost_equal(trim(res), trim(tgt)) + + def test_legder_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([leg.legder(c) for c in c2d.T]).T + res = leg.legder(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([leg.legder(c) for c in c2d]) + res = leg.legder(c2d, axis=1) + assert_almost_equal(res, tgt) + + def test_legder_orderhigherthancoeff(self): + c = (1, 2, 3, 4) + assert_equal(leg.legder(c, 4), [0]) + +class TestVander: + # some random values in [-1, 1) + x = np.random.random((3, 5)) * 2 - 1 + + def test_legvander(self): + # check for 1d x + x = np.arange(3) + v = leg.legvander(x, 3) + assert_(v.shape == (3, 4)) + for i in range(4): + coef = [0] * i + [1] + assert_almost_equal(v[..., i], leg.legval(x, coef)) + + # check for 2d x + x = np.array([[1, 2], [3, 4], [5, 6]]) + v = leg.legvander(x, 3) + assert_(v.shape == (3, 2, 4)) + for i in range(4): + coef = [0] * i + [1] + assert_almost_equal(v[..., i], leg.legval(x, coef)) + + def test_legvander2d(self): + # also tests polyval2d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3)) + van = leg.legvander2d(x1, x2, [1, 2]) + tgt = leg.legval2d(x1, x2, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = leg.legvander2d([x1], [x2], [1, 2]) + assert_(van.shape == (1, 5, 6)) + + def test_legvander3d(self): + # also tests polyval3d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3, 4)) + van = leg.legvander3d(x1, x2, x3, [1, 2, 3]) + tgt = leg.legval3d(x1, x2, x3, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = leg.legvander3d([x1], [x2], [x3], [1, 2, 3]) + assert_(van.shape == (1, 5, 24)) + + def test_legvander_negdeg(self): + assert_raises(ValueError, leg.legvander, (1, 2, 3), -1) + + +class TestFitting: + + def test_legfit(self): + def f(x): + return x * (x - 1) * (x - 2) + + def f2(x): + return x**4 + x**2 + 1 + + # Test exceptions + assert_raises(ValueError, leg.legfit, [1], [1], -1) + assert_raises(TypeError, leg.legfit, [[1]], [1], 0) + assert_raises(TypeError, leg.legfit, [], [1], 0) + assert_raises(TypeError, leg.legfit, [1], [[[1]]], 0) + assert_raises(TypeError, leg.legfit, [1, 2], [1], 0) + assert_raises(TypeError, leg.legfit, [1], [1, 2], 0) + assert_raises(TypeError, leg.legfit, [1], [1], 0, w=[[1]]) + assert_raises(TypeError, leg.legfit, [1], [1], 0, w=[1, 1]) + assert_raises(ValueError, leg.legfit, [1], [1], [-1,]) + assert_raises(ValueError, leg.legfit, [1], [1], [2, -1, 6]) + assert_raises(TypeError, leg.legfit, [1], [1], []) + + # Test fit + x = np.linspace(0, 2) + y = f(x) + # + coef3 = leg.legfit(x, y, 3) + assert_equal(len(coef3), 4) + assert_almost_equal(leg.legval(x, coef3), y) + coef3 = leg.legfit(x, y, [0, 1, 2, 3]) + assert_equal(len(coef3), 4) + assert_almost_equal(leg.legval(x, coef3), y) + # + coef4 = leg.legfit(x, y, 4) + assert_equal(len(coef4), 5) + assert_almost_equal(leg.legval(x, coef4), y) + coef4 = leg.legfit(x, y, [0, 1, 2, 3, 4]) + assert_equal(len(coef4), 5) + assert_almost_equal(leg.legval(x, coef4), y) + # check things still work if deg is not in strict increasing + coef4 = leg.legfit(x, y, [2, 3, 4, 1, 0]) + assert_equal(len(coef4), 5) + assert_almost_equal(leg.legval(x, coef4), y) + # + coef2d = leg.legfit(x, np.array([y, y]).T, 3) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + coef2d = leg.legfit(x, np.array([y, y]).T, [0, 1, 2, 3]) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + # test weighting + w = np.zeros_like(x) + yw = y.copy() + w[1::2] = 1 + y[0::2] = 0 + wcoef3 = leg.legfit(x, yw, 3, w=w) + assert_almost_equal(wcoef3, coef3) + wcoef3 = leg.legfit(x, yw, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef3, coef3) + # + wcoef2d = leg.legfit(x, np.array([yw, yw]).T, 3, w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + wcoef2d = leg.legfit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + # test scaling with complex values x points whose square + # is zero when summed. + x = [1, 1j, -1, -1j] + assert_almost_equal(leg.legfit(x, x, 1), [0, 1]) + assert_almost_equal(leg.legfit(x, x, [0, 1]), [0, 1]) + # test fitting only even Legendre polynomials + x = np.linspace(-1, 1) + y = f2(x) + coef1 = leg.legfit(x, y, 4) + assert_almost_equal(leg.legval(x, coef1), y) + coef2 = leg.legfit(x, y, [0, 2, 4]) + assert_almost_equal(leg.legval(x, coef2), y) + assert_almost_equal(coef1, coef2) + + +class TestCompanion: + + def test_raises(self): + assert_raises(ValueError, leg.legcompanion, []) + assert_raises(ValueError, leg.legcompanion, [1]) + + def test_dimensions(self): + for i in range(1, 5): + coef = [0] * i + [1] + assert_(leg.legcompanion(coef).shape == (i, i)) + + def test_linear_root(self): + assert_(leg.legcompanion([1, 2])[0, 0] == -.5) + + +class TestGauss: + + def test_100(self): + x, w = leg.leggauss(100) + + # test orthogonality. Note that the results need to be normalized, + # otherwise the huge values that can arise from fast growing + # functions like Laguerre can be very confusing. + v = leg.legvander(x, 99) + vv = np.dot(v.T * w, v) + vd = 1 / np.sqrt(vv.diagonal()) + vv = vd[:, None] * vv * vd + assert_almost_equal(vv, np.eye(100)) + + # check that the integral of 1 is correct + tgt = 2.0 + assert_almost_equal(w.sum(), tgt) + + +class TestMisc: + + def test_legfromroots(self): + res = leg.legfromroots([]) + assert_almost_equal(trim(res), [1]) + for i in range(1, 5): + roots = np.cos(np.linspace(-np.pi, 0, 2 * i + 1)[1::2]) + pol = leg.legfromroots(roots) + res = leg.legval(roots, pol) + tgt = 0 + assert_(len(pol) == i + 1) + assert_almost_equal(leg.leg2poly(pol)[-1], 1) + assert_almost_equal(res, tgt) + + def test_legroots(self): + assert_almost_equal(leg.legroots([1]), []) + assert_almost_equal(leg.legroots([1, 2]), [-.5]) + for i in range(2, 5): + tgt = np.linspace(-1, 1, i) + res = leg.legroots(leg.legfromroots(tgt)) + assert_almost_equal(trim(res), trim(tgt)) + + def test_legtrim(self): + coef = [2, -1, 1, 0] + + # Test exceptions + assert_raises(ValueError, leg.legtrim, coef, -1) + + # Test results + assert_equal(leg.legtrim(coef), coef[:-1]) + assert_equal(leg.legtrim(coef, 1), coef[:-3]) + assert_equal(leg.legtrim(coef, 2), [0]) + + def test_legline(self): + assert_equal(leg.legline(3, 4), [3, 4]) + + def test_legline_zeroscl(self): + assert_equal(leg.legline(3, 0), [3]) + + def test_leg2poly(self): + for i in range(10): + assert_almost_equal(leg.leg2poly([0] * i + [1]), Llist[i]) + + def test_poly2leg(self): + for i in range(10): + assert_almost_equal(leg.poly2leg(Llist[i]), [0] * i + [1]) + + def test_weight(self): + x = np.linspace(-1, 1, 11) + tgt = 1. + res = leg.legweight(x) + assert_almost_equal(res, tgt) diff --git a/local_packages/numpy/polynomial/tests/test_polynomial.py b/local_packages/numpy/polynomial/tests/test_polynomial.py new file mode 100644 index 0000000..4c924a7 --- /dev/null +++ b/local_packages/numpy/polynomial/tests/test_polynomial.py @@ -0,0 +1,691 @@ +"""Tests for polynomial module. + +""" +import pickle +from copy import deepcopy +from fractions import Fraction +from functools import reduce + +import pytest + +import numpy as np +import numpy.polynomial.polynomial as poly +from numpy.testing import ( + assert_, + assert_almost_equal, + assert_array_equal, + assert_equal, + assert_raises, + assert_raises_regex, +) + + +def trim(x): + return poly.polytrim(x, tol=1e-6) + + +T0 = [1] +T1 = [0, 1] +T2 = [-1, 0, 2] +T3 = [0, -3, 0, 4] +T4 = [1, 0, -8, 0, 8] +T5 = [0, 5, 0, -20, 0, 16] +T6 = [-1, 0, 18, 0, -48, 0, 32] +T7 = [0, -7, 0, 56, 0, -112, 0, 64] +T8 = [1, 0, -32, 0, 160, 0, -256, 0, 128] +T9 = [0, 9, 0, -120, 0, 432, 0, -576, 0, 256] + +Tlist = [T0, T1, T2, T3, T4, T5, T6, T7, T8, T9] + + +class TestConstants: + + def test_polydomain(self): + assert_equal(poly.polydomain, [-1, 1]) + + def test_polyzero(self): + assert_equal(poly.polyzero, [0]) + + def test_polyone(self): + assert_equal(poly.polyone, [1]) + + def test_polyx(self): + assert_equal(poly.polyx, [0, 1]) + + def test_copy(self): + x = poly.Polynomial([1, 2, 3]) + y = deepcopy(x) + assert_equal(x, y) + + def test_pickle(self): + x = poly.Polynomial([1, 2, 3]) + y = pickle.loads(pickle.dumps(x)) + assert_equal(x, y) + +class TestArithmetic: + + def test_polyadd(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] += 1 + res = poly.polyadd([0] * i + [1], [0] * j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_polysub(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] -= 1 + res = poly.polysub([0] * i + [1], [0] * j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_polymulx(self): + assert_equal(poly.polymulx([0]), [0]) + assert_equal(poly.polymulx([1]), [0, 1]) + for i in range(1, 5): + ser = [0] * i + [1] + tgt = [0] * (i + 1) + [1] + assert_equal(poly.polymulx(ser), tgt) + + def test_polymul(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + tgt = np.zeros(i + j + 1) + tgt[i + j] += 1 + res = poly.polymul([0] * i + [1], [0] * j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_polydiv(self): + # check zero division + assert_raises(ZeroDivisionError, poly.polydiv, [1], [0]) + + # check scalar division + quo, rem = poly.polydiv([2], [2]) + assert_equal((quo, rem), (1, 0)) + quo, rem = poly.polydiv([2, 2], [2]) + assert_equal((quo, rem), ((1, 1), 0)) + + # check rest. + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + ci = [0] * i + [1, 2] + cj = [0] * j + [1, 2] + tgt = poly.polyadd(ci, cj) + quo, rem = poly.polydiv(tgt, ci) + res = poly.polyadd(poly.polymul(quo, ci), rem) + assert_equal(res, tgt, err_msg=msg) + + def test_polypow(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + c = np.arange(i + 1) + tgt = reduce(poly.polymul, [c] * j, np.array([1])) + res = poly.polypow(c, j) + assert_equal(trim(res), trim(tgt), err_msg=msg) + +class TestFraction: + + def test_Fraction(self): + # assert we can use Polynomials with coefficients of object dtype + f = Fraction(2, 3) + one = Fraction(1, 1) + zero = Fraction(0, 1) + p = poly.Polynomial([f, f], domain=[zero, one], window=[zero, one]) + + x = 2 * p + p ** 2 + assert_equal(x.coef, np.array([Fraction(16, 9), Fraction(20, 9), + Fraction(4, 9)], dtype=object)) + assert_equal(p.domain, [zero, one]) + assert_equal(p.coef.dtype, np.dtypes.ObjectDType()) + assert_(isinstance(p(f), Fraction)) + assert_equal(p(f), Fraction(10, 9)) + p_deriv = poly.Polynomial([Fraction(2, 3)], domain=[zero, one], + window=[zero, one]) + assert_equal(p.deriv(), p_deriv) + +class TestEvaluation: + # coefficients of 1 + 2*x + 3*x**2 + c1d = np.array([1., 2., 3.]) + c2d = np.einsum('i,j->ij', c1d, c1d) + c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d) + + # some random values in [-1, 1) + x = np.random.random((3, 5)) * 2 - 1 + y = poly.polyval(x, [1., 2., 3.]) + + def test_polyval(self): + # check empty input + assert_equal(poly.polyval([], [1]).size, 0) + + # check normal input) + x = np.linspace(-1, 1) + y = [x**i for i in range(5)] + for i in range(5): + tgt = y[i] + res = poly.polyval(x, [0] * i + [1]) + assert_almost_equal(res, tgt) + tgt = x * (x**2 - 1) + res = poly.polyval(x, [0, -1, 0, 1]) + assert_almost_equal(res, tgt) + + # check that shape is preserved + for i in range(3): + dims = [2] * i + x = np.zeros(dims) + assert_equal(poly.polyval(x, [1]).shape, dims) + assert_equal(poly.polyval(x, [1, 0]).shape, dims) + assert_equal(poly.polyval(x, [1, 0, 0]).shape, dims) + + # check masked arrays are processed correctly + mask = [False, True, False] + mx = np.ma.array([1, 2, 3], mask=mask) + res = np.polyval([7, 5, 3], mx) + assert_array_equal(res.mask, mask) + + # check subtypes of ndarray are preserved + class C(np.ndarray): + pass + + cx = np.array([1, 2, 3]).view(C) + assert_equal(type(np.polyval([2, 3, 4], cx)), C) + + def test_polyvalfromroots(self): + # check exception for broadcasting x values over root array with + # too few dimensions + assert_raises(ValueError, poly.polyvalfromroots, + [1], [1], tensor=False) + + # check empty input + assert_equal(poly.polyvalfromroots([], [1]).size, 0) + assert_(poly.polyvalfromroots([], [1]).shape == (0,)) + + # check empty input + multidimensional roots + assert_equal(poly.polyvalfromroots([], [[1] * 5]).size, 0) + assert_(poly.polyvalfromroots([], [[1] * 5]).shape == (5, 0)) + + # check scalar input + assert_equal(poly.polyvalfromroots(1, 1), 0) + assert_(poly.polyvalfromroots(1, np.ones((3, 3))).shape == (3,)) + + # check normal input) + x = np.linspace(-1, 1) + y = [x**i for i in range(5)] + for i in range(1, 5): + tgt = y[i] + res = poly.polyvalfromroots(x, [0] * i) + assert_almost_equal(res, tgt) + tgt = x * (x - 1) * (x + 1) + res = poly.polyvalfromroots(x, [-1, 0, 1]) + assert_almost_equal(res, tgt) + + # check that shape is preserved + for i in range(3): + dims = [2] * i + x = np.zeros(dims) + assert_equal(poly.polyvalfromroots(x, [1]).shape, dims) + assert_equal(poly.polyvalfromroots(x, [1, 0]).shape, dims) + assert_equal(poly.polyvalfromroots(x, [1, 0, 0]).shape, dims) + + # check compatibility with factorization + ptest = [15, 2, -16, -2, 1] + r = poly.polyroots(ptest) + x = np.linspace(-1, 1) + assert_almost_equal(poly.polyval(x, ptest), + poly.polyvalfromroots(x, r)) + + # check multidimensional arrays of roots and values + # check tensor=False + rshape = (3, 5) + x = np.arange(-3, 2) + r = np.random.randint(-5, 5, size=rshape) + res = poly.polyvalfromroots(x, r, tensor=False) + tgt = np.empty(r.shape[1:]) + for ii in range(tgt.size): + tgt[ii] = poly.polyvalfromroots(x[ii], r[:, ii]) + assert_equal(res, tgt) + + # check tensor=True + x = np.vstack([x, 2 * x]) + res = poly.polyvalfromroots(x, r, tensor=True) + tgt = np.empty(r.shape[1:] + x.shape) + for ii in range(r.shape[1]): + for jj in range(x.shape[0]): + tgt[ii, jj, :] = poly.polyvalfromroots(x[jj], r[:, ii]) + assert_equal(res, tgt) + + def test_polyval2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + # test exceptions + assert_raises_regex(ValueError, 'incompatible', + poly.polyval2d, x1, x2[:2], self.c2d) + + # test values + tgt = y1 * y2 + res = poly.polyval2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + # test shape + z = np.ones((2, 3)) + res = poly.polyval2d(z, z, self.c2d) + assert_(res.shape == (2, 3)) + + def test_polyval3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + # test exceptions + assert_raises_regex(ValueError, 'incompatible', + poly.polyval3d, x1, x2, x3[:2], self.c3d) + + # test values + tgt = y1 * y2 * y3 + res = poly.polyval3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + # test shape + z = np.ones((2, 3)) + res = poly.polyval3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3)) + + def test_polygrid2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + # test values + tgt = np.einsum('i,j->ij', y1, y2) + res = poly.polygrid2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + # test shape + z = np.ones((2, 3)) + res = poly.polygrid2d(z, z, self.c2d) + assert_(res.shape == (2, 3) * 2) + + def test_polygrid3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + # test values + tgt = np.einsum('i,j,k->ijk', y1, y2, y3) + res = poly.polygrid3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + # test shape + z = np.ones((2, 3)) + res = poly.polygrid3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3) * 3) + + +class TestIntegral: + + def test_polyint(self): + # check exceptions + assert_raises(TypeError, poly.polyint, [0], .5) + assert_raises(ValueError, poly.polyint, [0], -1) + assert_raises(ValueError, poly.polyint, [0], 1, [0, 0]) + assert_raises(ValueError, poly.polyint, [0], lbnd=[0]) + assert_raises(ValueError, poly.polyint, [0], scl=[0]) + assert_raises(TypeError, poly.polyint, [0], axis=.5) + assert_raises(TypeError, poly.polyint, [1, 1], 1.) + + # test integration of zero polynomial + for i in range(2, 5): + k = [0] * (i - 2) + [1] + res = poly.polyint([0], m=i, k=k) + assert_almost_equal(res, [0, 1]) + + # check single integration with integration constant + for i in range(5): + scl = i + 1 + pol = [0] * i + [1] + tgt = [i] + [0] * i + [1 / scl] + res = poly.polyint(pol, m=1, k=[i]) + assert_almost_equal(trim(res), trim(tgt)) + + # check single integration with integration constant and lbnd + for i in range(5): + scl = i + 1 + pol = [0] * i + [1] + res = poly.polyint(pol, m=1, k=[i], lbnd=-1) + assert_almost_equal(poly.polyval(-1, res), i) + + # check single integration with integration constant and scaling + for i in range(5): + scl = i + 1 + pol = [0] * i + [1] + tgt = [i] + [0] * i + [2 / scl] + res = poly.polyint(pol, m=1, k=[i], scl=2) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with default k + for i in range(5): + for j in range(2, 5): + pol = [0] * i + [1] + tgt = pol[:] + for k in range(j): + tgt = poly.polyint(tgt, m=1) + res = poly.polyint(pol, m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with defined k + for i in range(5): + for j in range(2, 5): + pol = [0] * i + [1] + tgt = pol[:] + for k in range(j): + tgt = poly.polyint(tgt, m=1, k=[k]) + res = poly.polyint(pol, m=j, k=list(range(j))) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with lbnd + for i in range(5): + for j in range(2, 5): + pol = [0] * i + [1] + tgt = pol[:] + for k in range(j): + tgt = poly.polyint(tgt, m=1, k=[k], lbnd=-1) + res = poly.polyint(pol, m=j, k=list(range(j)), lbnd=-1) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with scaling + for i in range(5): + for j in range(2, 5): + pol = [0] * i + [1] + tgt = pol[:] + for k in range(j): + tgt = poly.polyint(tgt, m=1, k=[k], scl=2) + res = poly.polyint(pol, m=j, k=list(range(j)), scl=2) + assert_almost_equal(trim(res), trim(tgt)) + + def test_polyint_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([poly.polyint(c) for c in c2d.T]).T + res = poly.polyint(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([poly.polyint(c) for c in c2d]) + res = poly.polyint(c2d, axis=1) + assert_almost_equal(res, tgt) + + tgt = np.vstack([poly.polyint(c, k=3) for c in c2d]) + res = poly.polyint(c2d, k=3, axis=1) + assert_almost_equal(res, tgt) + + +class TestDerivative: + + def test_polyder(self): + # check exceptions + assert_raises(TypeError, poly.polyder, [0], .5) + assert_raises(ValueError, poly.polyder, [0], -1) + + # check that zeroth derivative does nothing + for i in range(5): + tgt = [0] * i + [1] + res = poly.polyder(tgt, m=0) + assert_equal(trim(res), trim(tgt)) + + # check that derivation is the inverse of integration + for i in range(5): + for j in range(2, 5): + tgt = [0] * i + [1] + res = poly.polyder(poly.polyint(tgt, m=j), m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check derivation with scaling + for i in range(5): + for j in range(2, 5): + tgt = [0] * i + [1] + res = poly.polyder(poly.polyint(tgt, m=j, scl=2), m=j, scl=.5) + assert_almost_equal(trim(res), trim(tgt)) + + def test_polyder_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([poly.polyder(c) for c in c2d.T]).T + res = poly.polyder(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([poly.polyder(c) for c in c2d]) + res = poly.polyder(c2d, axis=1) + assert_almost_equal(res, tgt) + + +class TestVander: + # some random values in [-1, 1) + x = np.random.random((3, 5)) * 2 - 1 + + def test_polyvander(self): + # check for 1d x + x = np.arange(3) + v = poly.polyvander(x, 3) + assert_(v.shape == (3, 4)) + for i in range(4): + coef = [0] * i + [1] + assert_almost_equal(v[..., i], poly.polyval(x, coef)) + + # check for 2d x + x = np.array([[1, 2], [3, 4], [5, 6]]) + v = poly.polyvander(x, 3) + assert_(v.shape == (3, 2, 4)) + for i in range(4): + coef = [0] * i + [1] + assert_almost_equal(v[..., i], poly.polyval(x, coef)) + + def test_polyvander2d(self): + # also tests polyval2d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3)) + van = poly.polyvander2d(x1, x2, [1, 2]) + tgt = poly.polyval2d(x1, x2, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = poly.polyvander2d([x1], [x2], [1, 2]) + assert_(van.shape == (1, 5, 6)) + + def test_polyvander3d(self): + # also tests polyval3d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3, 4)) + van = poly.polyvander3d(x1, x2, x3, [1, 2, 3]) + tgt = poly.polyval3d(x1, x2, x3, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = poly.polyvander3d([x1], [x2], [x3], [1, 2, 3]) + assert_(van.shape == (1, 5, 24)) + + def test_polyvandernegdeg(self): + x = np.arange(3) + assert_raises(ValueError, poly.polyvander, x, -1) + + +class TestCompanion: + + def test_raises(self): + assert_raises(ValueError, poly.polycompanion, []) + assert_raises(ValueError, poly.polycompanion, [1]) + + def test_dimensions(self): + for i in range(1, 5): + coef = [0] * i + [1] + assert_(poly.polycompanion(coef).shape == (i, i)) + + def test_linear_root(self): + assert_(poly.polycompanion([1, 2])[0, 0] == -.5) + + +class TestMisc: + + def test_polyfromroots(self): + res = poly.polyfromroots([]) + assert_almost_equal(trim(res), [1]) + for i in range(1, 5): + roots = np.cos(np.linspace(-np.pi, 0, 2 * i + 1)[1::2]) + tgt = Tlist[i] + res = poly.polyfromroots(roots) * 2**(i - 1) + assert_almost_equal(trim(res), trim(tgt)) + + def test_polyroots(self): + assert_almost_equal(poly.polyroots([1]), []) + assert_almost_equal(poly.polyroots([1, 2]), [-.5]) + for i in range(2, 5): + tgt = np.linspace(-1, 1, i) + res = poly.polyroots(poly.polyfromroots(tgt)) + assert_almost_equal(trim(res), trim(tgt)) + + # Testing for larger root values + for i in np.logspace(10, 25, num=1000, base=10): + tgt = np.array([-1, 1, i]) + res = poly.polyroots(poly.polyfromroots(tgt)) + # Adapting the expected precision according to the root value, + # to take into account numerical calculation error. + assert_almost_equal(res, tgt, 15 - int(np.log10(i))) + for i in np.logspace(10, 25, num=1000, base=10): + tgt = np.array([-1, 1.01, i]) + res = poly.polyroots(poly.polyfromroots(tgt)) + # Adapting the expected precision according to the root value, + # to take into account numerical calculation error. + assert_almost_equal(res, tgt, 14 - int(np.log10(i))) + + def test_polyfit(self): + def f(x): + return x * (x - 1) * (x - 2) + + def f2(x): + return x**4 + x**2 + 1 + + # Test exceptions + assert_raises(ValueError, poly.polyfit, [1], [1], -1) + assert_raises(TypeError, poly.polyfit, [[1]], [1], 0) + assert_raises(TypeError, poly.polyfit, [], [1], 0) + assert_raises(TypeError, poly.polyfit, [1], [[[1]]], 0) + assert_raises(TypeError, poly.polyfit, [1, 2], [1], 0) + assert_raises(TypeError, poly.polyfit, [1], [1, 2], 0) + assert_raises(TypeError, poly.polyfit, [1], [1], 0, w=[[1]]) + assert_raises(TypeError, poly.polyfit, [1], [1], 0, w=[1, 1]) + assert_raises(ValueError, poly.polyfit, [1], [1], [-1,]) + assert_raises(ValueError, poly.polyfit, [1], [1], [2, -1, 6]) + assert_raises(TypeError, poly.polyfit, [1], [1], []) + + # Test fit + x = np.linspace(0, 2) + y = f(x) + # + coef3 = poly.polyfit(x, y, 3) + assert_equal(len(coef3), 4) + assert_almost_equal(poly.polyval(x, coef3), y) + coef3 = poly.polyfit(x, y, [0, 1, 2, 3]) + assert_equal(len(coef3), 4) + assert_almost_equal(poly.polyval(x, coef3), y) + # + coef4 = poly.polyfit(x, y, 4) + assert_equal(len(coef4), 5) + assert_almost_equal(poly.polyval(x, coef4), y) + coef4 = poly.polyfit(x, y, [0, 1, 2, 3, 4]) + assert_equal(len(coef4), 5) + assert_almost_equal(poly.polyval(x, coef4), y) + # + coef2d = poly.polyfit(x, np.array([y, y]).T, 3) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + coef2d = poly.polyfit(x, np.array([y, y]).T, [0, 1, 2, 3]) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + # test weighting + w = np.zeros_like(x) + yw = y.copy() + w[1::2] = 1 + yw[0::2] = 0 + wcoef3 = poly.polyfit(x, yw, 3, w=w) + assert_almost_equal(wcoef3, coef3) + wcoef3 = poly.polyfit(x, yw, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef3, coef3) + # + wcoef2d = poly.polyfit(x, np.array([yw, yw]).T, 3, w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + wcoef2d = poly.polyfit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + # test scaling with complex values x points whose square + # is zero when summed. + x = [1, 1j, -1, -1j] + assert_almost_equal(poly.polyfit(x, x, 1), [0, 1]) + assert_almost_equal(poly.polyfit(x, x, [0, 1]), [0, 1]) + # test fitting only even Polyendre polynomials + x = np.linspace(-1, 1) + y = f2(x) + coef1 = poly.polyfit(x, y, 4) + assert_almost_equal(poly.polyval(x, coef1), y) + coef2 = poly.polyfit(x, y, [0, 2, 4]) + assert_almost_equal(poly.polyval(x, coef2), y) + assert_almost_equal(coef1, coef2) + + def test_polytrim(self): + coef = [2, -1, 1, 0] + + # Test exceptions + assert_raises(ValueError, poly.polytrim, coef, -1) + + # Test results + assert_equal(poly.polytrim(coef), coef[:-1]) + assert_equal(poly.polytrim(coef, 1), coef[:-3]) + assert_equal(poly.polytrim(coef, 2), [0]) + + def test_polyline(self): + assert_equal(poly.polyline(3, 4), [3, 4]) + + def test_polyline_zero(self): + assert_equal(poly.polyline(3, 0), [3]) + + def test_fit_degenerate_domain(self): + p = poly.Polynomial.fit([1], [2], deg=0) + assert_equal(p.coef, [2.]) + p = poly.Polynomial.fit([1, 1], [2, 2.1], deg=0) + assert_almost_equal(p.coef, [2.05]) + with pytest.warns(np.exceptions.RankWarning): + p = poly.Polynomial.fit([1, 1], [2, 2.1], deg=1) + + def test_result_type(self): + w = np.array([-1, 1], dtype=np.float32) + p = np.polynomial.Polynomial(w, domain=w, window=w) + v = p(2) + assert_equal(v.dtype, np.float32) + + arr = np.polydiv(1, np.float32(1)) + assert_equal(arr[0].dtype, np.float64) + +class ArrayFunctionInterceptor: + def __init__(self): + self.called = False + + def __array_function__(self, func, types, args, kwargs): + self.called = True + return "intercepted" + +def test_polyval2d_array_function_hook(): + x = ArrayFunctionInterceptor() + y = ArrayFunctionInterceptor() + c = ArrayFunctionInterceptor() + result = np.polynomial.polynomial.polyval2d(x, y, c) + assert result == "intercepted" + +def test_polygrid2d_array_function_hook(): + x = ArrayFunctionInterceptor() + y = ArrayFunctionInterceptor() + c = ArrayFunctionInterceptor() + result = np.polynomial.polynomial.polygrid2d(x, y, c) + assert result == "intercepted" diff --git a/local_packages/numpy/polynomial/tests/test_polyutils.py b/local_packages/numpy/polynomial/tests/test_polyutils.py new file mode 100644 index 0000000..a6f5e39 --- /dev/null +++ b/local_packages/numpy/polynomial/tests/test_polyutils.py @@ -0,0 +1,123 @@ +"""Tests for polyutils module. + +""" +import numpy as np +import numpy.polynomial.polyutils as pu +from numpy.testing import assert_, assert_almost_equal, assert_equal, assert_raises + + +class TestMisc: + + def test_trimseq(self): + tgt = [1] + for num_trailing_zeros in range(5): + res = pu.trimseq([1] + [0] * num_trailing_zeros) + assert_equal(res, tgt) + + def test_trimseq_empty_input(self): + for empty_seq in [[], np.array([], dtype=np.int32)]: + assert_equal(pu.trimseq(empty_seq), empty_seq) + + def test_as_series(self): + # check exceptions + assert_raises(ValueError, pu.as_series, [[]]) + assert_raises(ValueError, pu.as_series, [[[1, 2]]]) + assert_raises(ValueError, pu.as_series, [[1], ['a']]) + # check common types + types = ['i', 'd', 'O'] + for i in range(len(types)): + for j in range(i): + ci = np.ones(1, types[i]) + cj = np.ones(1, types[j]) + [resi, resj] = pu.as_series([ci, cj]) + assert_(resi.dtype.char == resj.dtype.char) + assert_(resj.dtype.char == types[i]) + + def test_trimcoef(self): + coef = [2, -1, 1, 0] + # Test exceptions + assert_raises(ValueError, pu.trimcoef, coef, -1) + # Test results + assert_equal(pu.trimcoef(coef), coef[:-1]) + assert_equal(pu.trimcoef(coef, 1), coef[:-3]) + assert_equal(pu.trimcoef(coef, 2), [0]) + + def test_vander_nd_exception(self): + # n_dims != len(points) + assert_raises(ValueError, pu._vander_nd, (), (1, 2, 3), [90]) + # n_dims != len(degrees) + assert_raises(ValueError, pu._vander_nd, (), (), [90.65]) + # n_dims == 0 + assert_raises(ValueError, pu._vander_nd, (), (), []) + + def test_div_zerodiv(self): + # c2[-1] == 0 + assert_raises(ZeroDivisionError, pu._div, pu._div, (1, 2, 3), [0]) + + def test_pow_too_large(self): + # power > maxpower + assert_raises(ValueError, pu._pow, (), [1, 2, 3], 5, 4) + +class TestDomain: + + def test_getdomain(self): + # test for real values + x = [1, 10, 3, -1] + tgt = [-1, 10] + res = pu.getdomain(x) + assert_almost_equal(res, tgt) + + # test for complex values + x = [1 + 1j, 1 - 1j, 0, 2] + tgt = [-1j, 2 + 1j] + res = pu.getdomain(x) + assert_almost_equal(res, tgt) + + def test_mapdomain(self): + # test for real values + dom1 = [0, 4] + dom2 = [1, 3] + tgt = dom2 + res = pu.mapdomain(dom1, dom1, dom2) + assert_almost_equal(res, tgt) + + # test for complex values + dom1 = [0 - 1j, 2 + 1j] + dom2 = [-2, 2] + tgt = dom2 + x = dom1 + res = pu.mapdomain(x, dom1, dom2) + assert_almost_equal(res, tgt) + + # test for multidimensional arrays + dom1 = [0, 4] + dom2 = [1, 3] + tgt = np.array([dom2, dom2]) + x = np.array([dom1, dom1]) + res = pu.mapdomain(x, dom1, dom2) + assert_almost_equal(res, tgt) + + # test that subtypes are preserved. + class MyNDArray(np.ndarray): + pass + + dom1 = [0, 4] + dom2 = [1, 3] + x = np.array([dom1, dom1]).view(MyNDArray) + res = pu.mapdomain(x, dom1, dom2) + assert_(isinstance(res, MyNDArray)) + + def test_mapparms(self): + # test for real values + dom1 = [0, 4] + dom2 = [1, 3] + tgt = [1, .5] + res = pu. mapparms(dom1, dom2) + assert_almost_equal(res, tgt) + + # test for complex values + dom1 = [0 - 1j, 2 + 1j] + dom2 = [-2, 2] + tgt = [-1 + 1j, 1 - 1j] + res = pu.mapparms(dom1, dom2) + assert_almost_equal(res, tgt) diff --git a/local_packages/numpy/polynomial/tests/test_printing.py b/local_packages/numpy/polynomial/tests/test_printing.py new file mode 100644 index 0000000..f7d0131 --- /dev/null +++ b/local_packages/numpy/polynomial/tests/test_printing.py @@ -0,0 +1,557 @@ +from decimal import Decimal + +# For testing polynomial printing with object arrays +from fractions import Fraction +from math import inf, nan + +import pytest + +import numpy.polynomial as poly +from numpy._core import arange, array, printoptions +from numpy.testing import assert_, assert_equal + + +class TestStrUnicodeSuperSubscripts: + + @pytest.fixture(scope='class', autouse=True) + def use_unicode(self): + poly.set_default_printstyle('unicode') + + @pytest.mark.parametrize(('inp', 'tgt'), ( + ([1, 2, 3], "1.0 + 2.0·x + 3.0·x²"), + ([-1, 0, 3, -1], "-1.0 + 0.0·x + 3.0·x² - 1.0·x³"), + (arange(12), ("0.0 + 1.0·x + 2.0·x² + 3.0·x³ + 4.0·x⁴ + 5.0·x⁵ + " + "6.0·x⁶ + 7.0·x⁷ +\n8.0·x⁸ + 9.0·x⁹ + 10.0·x¹⁰ + " + "11.0·x¹¹")), + )) + def test_polynomial_str(self, inp, tgt): + p = poly.Polynomial(inp) + res = str(p) + assert_equal(res, tgt) + + @pytest.mark.parametrize(('inp', 'tgt'), ( + ([1, 2, 3], "1.0 + 2.0·T₁(x) + 3.0·T₂(x)"), + ([-1, 0, 3, -1], "-1.0 + 0.0·T₁(x) + 3.0·T₂(x) - 1.0·T₃(x)"), + (arange(12), ("0.0 + 1.0·T₁(x) + 2.0·T₂(x) + 3.0·T₃(x) + 4.0·T₄(x) + " + "5.0·T₅(x) +\n6.0·T₆(x) + 7.0·T₇(x) + 8.0·T₈(x) + " + "9.0·T₉(x) + 10.0·T₁₀(x) + 11.0·T₁₁(x)")), + )) + def test_chebyshev_str(self, inp, tgt): + res = str(poly.Chebyshev(inp)) + assert_equal(res, tgt) + + @pytest.mark.parametrize(('inp', 'tgt'), ( + ([1, 2, 3], "1.0 + 2.0·P₁(x) + 3.0·P₂(x)"), + ([-1, 0, 3, -1], "-1.0 + 0.0·P₁(x) + 3.0·P₂(x) - 1.0·P₃(x)"), + (arange(12), ("0.0 + 1.0·P₁(x) + 2.0·P₂(x) + 3.0·P₃(x) + 4.0·P₄(x) + " + "5.0·P₅(x) +\n6.0·P₆(x) + 7.0·P₇(x) + 8.0·P₈(x) + " + "9.0·P₉(x) + 10.0·P₁₀(x) + 11.0·P₁₁(x)")), + )) + def test_legendre_str(self, inp, tgt): + res = str(poly.Legendre(inp)) + assert_equal(res, tgt) + + @pytest.mark.parametrize(('inp', 'tgt'), ( + ([1, 2, 3], "1.0 + 2.0·H₁(x) + 3.0·H₂(x)"), + ([-1, 0, 3, -1], "-1.0 + 0.0·H₁(x) + 3.0·H₂(x) - 1.0·H₃(x)"), + (arange(12), ("0.0 + 1.0·H₁(x) + 2.0·H₂(x) + 3.0·H₃(x) + 4.0·H₄(x) + " + "5.0·H₅(x) +\n6.0·H₆(x) + 7.0·H₇(x) + 8.0·H₈(x) + " + "9.0·H₉(x) + 10.0·H₁₀(x) + 11.0·H₁₁(x)")), + )) + def test_hermite_str(self, inp, tgt): + res = str(poly.Hermite(inp)) + assert_equal(res, tgt) + + @pytest.mark.parametrize(('inp', 'tgt'), ( + ([1, 2, 3], "1.0 + 2.0·He₁(x) + 3.0·He₂(x)"), + ([-1, 0, 3, -1], "-1.0 + 0.0·He₁(x) + 3.0·He₂(x) - 1.0·He₃(x)"), + (arange(12), ("0.0 + 1.0·He₁(x) + 2.0·He₂(x) + 3.0·He₃(x) + " + "4.0·He₄(x) + 5.0·He₅(x) +\n6.0·He₆(x) + 7.0·He₇(x) + " + "8.0·He₈(x) + 9.0·He₉(x) + 10.0·He₁₀(x) +\n" + "11.0·He₁₁(x)")), + )) + def test_hermiteE_str(self, inp, tgt): + res = str(poly.HermiteE(inp)) + assert_equal(res, tgt) + + @pytest.mark.parametrize(('inp', 'tgt'), ( + ([1, 2, 3], "1.0 + 2.0·L₁(x) + 3.0·L₂(x)"), + ([-1, 0, 3, -1], "-1.0 + 0.0·L₁(x) + 3.0·L₂(x) - 1.0·L₃(x)"), + (arange(12), ("0.0 + 1.0·L₁(x) + 2.0·L₂(x) + 3.0·L₃(x) + 4.0·L₄(x) + " + "5.0·L₅(x) +\n6.0·L₆(x) + 7.0·L₇(x) + 8.0·L₈(x) + " + "9.0·L₉(x) + 10.0·L₁₀(x) + 11.0·L₁₁(x)")), + )) + def test_laguerre_str(self, inp, tgt): + res = str(poly.Laguerre(inp)) + assert_equal(res, tgt) + + def test_polynomial_str_domains(self): + res = str(poly.Polynomial([0, 1])) + tgt = '0.0 + 1.0·x' + assert_equal(res, tgt) + + res = str(poly.Polynomial([0, 1], domain=[1, 2])) + tgt = '0.0 + 1.0·(-3.0 + 2.0x)' + assert_equal(res, tgt) + +class TestStrAscii: + + @pytest.fixture(scope='class', autouse=True) + def use_ascii(self): + poly.set_default_printstyle('ascii') + + @pytest.mark.parametrize(('inp', 'tgt'), ( + ([1, 2, 3], "1.0 + 2.0 x + 3.0 x**2"), + ([-1, 0, 3, -1], "-1.0 + 0.0 x + 3.0 x**2 - 1.0 x**3"), + (arange(12), ("0.0 + 1.0 x + 2.0 x**2 + 3.0 x**3 + 4.0 x**4 + " + "5.0 x**5 + 6.0 x**6 +\n7.0 x**7 + 8.0 x**8 + " + "9.0 x**9 + 10.0 x**10 + 11.0 x**11")), + )) + def test_polynomial_str(self, inp, tgt): + res = str(poly.Polynomial(inp)) + assert_equal(res, tgt) + + @pytest.mark.parametrize(('inp', 'tgt'), ( + ([1, 2, 3], "1.0 + 2.0 T_1(x) + 3.0 T_2(x)"), + ([-1, 0, 3, -1], "-1.0 + 0.0 T_1(x) + 3.0 T_2(x) - 1.0 T_3(x)"), + (arange(12), ("0.0 + 1.0 T_1(x) + 2.0 T_2(x) + 3.0 T_3(x) + " + "4.0 T_4(x) + 5.0 T_5(x) +\n6.0 T_6(x) + 7.0 T_7(x) + " + "8.0 T_8(x) + 9.0 T_9(x) + 10.0 T_10(x) +\n" + "11.0 T_11(x)")), + )) + def test_chebyshev_str(self, inp, tgt): + res = str(poly.Chebyshev(inp)) + assert_equal(res, tgt) + + @pytest.mark.parametrize(('inp', 'tgt'), ( + ([1, 2, 3], "1.0 + 2.0 P_1(x) + 3.0 P_2(x)"), + ([-1, 0, 3, -1], "-1.0 + 0.0 P_1(x) + 3.0 P_2(x) - 1.0 P_3(x)"), + (arange(12), ("0.0 + 1.0 P_1(x) + 2.0 P_2(x) + 3.0 P_3(x) + " + "4.0 P_4(x) + 5.0 P_5(x) +\n6.0 P_6(x) + 7.0 P_7(x) + " + "8.0 P_8(x) + 9.0 P_9(x) + 10.0 P_10(x) +\n" + "11.0 P_11(x)")), + )) + def test_legendre_str(self, inp, tgt): + res = str(poly.Legendre(inp)) + assert_equal(res, tgt) + + @pytest.mark.parametrize(('inp', 'tgt'), ( + ([1, 2, 3], "1.0 + 2.0 H_1(x) + 3.0 H_2(x)"), + ([-1, 0, 3, -1], "-1.0 + 0.0 H_1(x) + 3.0 H_2(x) - 1.0 H_3(x)"), + (arange(12), ("0.0 + 1.0 H_1(x) + 2.0 H_2(x) + 3.0 H_3(x) + " + "4.0 H_4(x) + 5.0 H_5(x) +\n6.0 H_6(x) + 7.0 H_7(x) + " + "8.0 H_8(x) + 9.0 H_9(x) + 10.0 H_10(x) +\n" + "11.0 H_11(x)")), + )) + def test_hermite_str(self, inp, tgt): + res = str(poly.Hermite(inp)) + assert_equal(res, tgt) + + @pytest.mark.parametrize(('inp', 'tgt'), ( + ([1, 2, 3], "1.0 + 2.0 He_1(x) + 3.0 He_2(x)"), + ([-1, 0, 3, -1], "-1.0 + 0.0 He_1(x) + 3.0 He_2(x) - 1.0 He_3(x)"), + (arange(12), ("0.0 + 1.0 He_1(x) + 2.0 He_2(x) + 3.0 He_3(x) + " + "4.0 He_4(x) +\n5.0 He_5(x) + 6.0 He_6(x) + " + "7.0 He_7(x) + 8.0 He_8(x) + 9.0 He_9(x) +\n" + "10.0 He_10(x) + 11.0 He_11(x)")), + )) + def test_hermiteE_str(self, inp, tgt): + res = str(poly.HermiteE(inp)) + assert_equal(res, tgt) + + @pytest.mark.parametrize(('inp', 'tgt'), ( + ([1, 2, 3], "1.0 + 2.0 L_1(x) + 3.0 L_2(x)"), + ([-1, 0, 3, -1], "-1.0 + 0.0 L_1(x) + 3.0 L_2(x) - 1.0 L_3(x)"), + (arange(12), ("0.0 + 1.0 L_1(x) + 2.0 L_2(x) + 3.0 L_3(x) + " + "4.0 L_4(x) + 5.0 L_5(x) +\n6.0 L_6(x) + 7.0 L_7(x) + " + "8.0 L_8(x) + 9.0 L_9(x) + 10.0 L_10(x) +\n" + "11.0 L_11(x)")), + )) + def test_laguerre_str(self, inp, tgt): + res = str(poly.Laguerre(inp)) + assert_equal(res, tgt) + + def test_polynomial_str_domains(self): + res = str(poly.Polynomial([0, 1])) + tgt = '0.0 + 1.0 x' + assert_equal(res, tgt) + + res = str(poly.Polynomial([0, 1], domain=[1, 2])) + tgt = '0.0 + 1.0 (-3.0 + 2.0x)' + assert_equal(res, tgt) + +class TestLinebreaking: + + @pytest.fixture(scope='class', autouse=True) + def use_ascii(self): + poly.set_default_printstyle('ascii') + + def test_single_line_one_less(self): + # With 'ascii' style, len(str(p)) is default linewidth - 1 (i.e. 74) + p = poly.Polynomial([12345678, 12345678, 12345678, 12345678, 123]) + assert_equal(len(str(p)), 74) + assert_equal(str(p), ( + '12345678.0 + 12345678.0 x + 12345678.0 x**2 + ' + '12345678.0 x**3 + 123.0 x**4' + )) + + def test_num_chars_is_linewidth(self): + # len(str(p)) == default linewidth == 75 + p = poly.Polynomial([12345678, 12345678, 12345678, 12345678, 1234]) + assert_equal(len(str(p)), 75) + assert_equal(str(p), ( + '12345678.0 + 12345678.0 x + 12345678.0 x**2 + ' + '12345678.0 x**3 +\n1234.0 x**4' + )) + + def test_first_linebreak_multiline_one_less_than_linewidth(self): + # Multiline str where len(first_line) + len(next_term) == lw - 1 == 74 + p = poly.Polynomial( + [12345678, 12345678, 12345678, 12345678, 1, 12345678] + ) + assert_equal(len(str(p).split('\n')[0]), 74) + assert_equal(str(p), ( + '12345678.0 + 12345678.0 x + 12345678.0 x**2 + ' + '12345678.0 x**3 + 1.0 x**4 +\n12345678.0 x**5' + )) + + def test_first_linebreak_multiline_on_linewidth(self): + # First line is one character longer than previous test + p = poly.Polynomial( + [12345678, 12345678, 12345678, 12345678.12, 1, 12345678] + ) + assert_equal(str(p), ( + '12345678.0 + 12345678.0 x + 12345678.0 x**2 + ' + '12345678.12 x**3 +\n1.0 x**4 + 12345678.0 x**5' + )) + + @pytest.mark.parametrize(('lw', 'tgt'), ( + (75, ('0.0 + 10.0 x + 200.0 x**2 + 3000.0 x**3 + 40000.0 x**4 + ' + '500000.0 x**5 +\n600000.0 x**6 + 70000.0 x**7 + 8000.0 x**8 + ' + '900.0 x**9')), + (45, ('0.0 + 10.0 x + 200.0 x**2 + 3000.0 x**3 +\n40000.0 x**4 + ' + '500000.0 x**5 +\n600000.0 x**6 + 70000.0 x**7 + 8000.0 x**8 +\n' + '900.0 x**9')), + (132, ('0.0 + 10.0 x + 200.0 x**2 + 3000.0 x**3 + 40000.0 x**4 + ' + '500000.0 x**5 + 600000.0 x**6 + 70000.0 x**7 + 8000.0 x**8 + ' + '900.0 x**9')), + )) + def test_linewidth_printoption(self, lw, tgt): + p = poly.Polynomial( + [0, 10, 200, 3000, 40000, 500000, 600000, 70000, 8000, 900] + ) + with printoptions(linewidth=lw): + assert_equal(str(p), tgt) + for line in str(p).split('\n'): + assert_(len(line) < lw) + + +@pytest.mark.thread_unsafe(reason="set_default_printstyle() is global state") +def test_set_default_printoptions(): + p = poly.Polynomial([1, 2, 3]) + c = poly.Chebyshev([1, 2, 3]) + poly.set_default_printstyle('ascii') + assert_equal(str(p), "1.0 + 2.0 x + 3.0 x**2") + assert_equal(str(c), "1.0 + 2.0 T_1(x) + 3.0 T_2(x)") + poly.set_default_printstyle('unicode') + assert_equal(str(p), "1.0 + 2.0·x + 3.0·x²") + assert_equal(str(c), "1.0 + 2.0·T₁(x) + 3.0·T₂(x)") + with pytest.raises(ValueError): + poly.set_default_printstyle('invalid_input') + + +@pytest.mark.thread_unsafe(reason="set_default_printstyle() is global state") +def test_complex_coefficients(): + """Test both numpy and built-in complex.""" + coefs = [0 + 1j, 1 + 1j, -2 + 2j, 3 + 0j] + # numpy complex + p1 = poly.Polynomial(coefs) + # Python complex + p2 = poly.Polynomial(array(coefs, dtype=object)) + poly.set_default_printstyle('unicode') + assert_equal(str(p1), "1j + (1+1j)·x - (2-2j)·x² + (3+0j)·x³") + assert_equal(str(p2), "1j + (1+1j)·x + (-2+2j)·x² + (3+0j)·x³") + poly.set_default_printstyle('ascii') + assert_equal(str(p1), "1j + (1+1j) x - (2-2j) x**2 + (3+0j) x**3") + assert_equal(str(p2), "1j + (1+1j) x + (-2+2j) x**2 + (3+0j) x**3") + + +@pytest.mark.parametrize(('coefs', 'tgt'), ( + (array([Fraction(1, 2), Fraction(3, 4)], dtype=object), ( + "1/2 + 3/4·x" + )), + (array([1, 2, Fraction(5, 7)], dtype=object), ( + "1 + 2·x + 5/7·x²" + )), + (array([Decimal('1.00'), Decimal('2.2'), 3], dtype=object), ( + "1.00 + 2.2·x + 3·x²" + )), +)) +def test_numeric_object_coefficients(coefs, tgt): + p = poly.Polynomial(coefs) + poly.set_default_printstyle('unicode') + assert_equal(str(p), tgt) + + +@pytest.mark.parametrize(('coefs', 'tgt'), ( + (array([1, 2, 'f'], dtype=object), '1 + 2·x + f·x²'), + (array([1, 2, [3, 4]], dtype=object), '1 + 2·x + [3, 4]·x²'), +)) +def test_nonnumeric_object_coefficients(coefs, tgt): + """ + Test coef fallback for object arrays of non-numeric coefficients. + """ + p = poly.Polynomial(coefs) + poly.set_default_printstyle('unicode') + assert_equal(str(p), tgt) + + +class TestFormat: + def test_format_unicode(self): + poly.set_default_printstyle('ascii') + p = poly.Polynomial([1, 2, 0, -1]) + assert_equal(format(p, 'unicode'), "1.0 + 2.0·x + 0.0·x² - 1.0·x³") + + def test_format_ascii(self): + poly.set_default_printstyle('unicode') + p = poly.Polynomial([1, 2, 0, -1]) + assert_equal( + format(p, 'ascii'), "1.0 + 2.0 x + 0.0 x**2 - 1.0 x**3" + ) + + def test_empty_formatstr(self): + poly.set_default_printstyle('ascii') + p = poly.Polynomial([1, 2, 3]) + assert_equal(format(p), "1.0 + 2.0 x + 3.0 x**2") + assert_equal(f"{p}", "1.0 + 2.0 x + 3.0 x**2") + + def test_bad_formatstr(self): + p = poly.Polynomial([1, 2, 0, -1]) + with pytest.raises(ValueError): + format(p, '.2f') + + +@pytest.mark.parametrize(('poly', 'tgt'), ( + (poly.Polynomial, '1.0 + 2.0·z + 3.0·z²'), + (poly.Chebyshev, '1.0 + 2.0·T₁(z) + 3.0·T₂(z)'), + (poly.Hermite, '1.0 + 2.0·H₁(z) + 3.0·H₂(z)'), + (poly.HermiteE, '1.0 + 2.0·He₁(z) + 3.0·He₂(z)'), + (poly.Laguerre, '1.0 + 2.0·L₁(z) + 3.0·L₂(z)'), + (poly.Legendre, '1.0 + 2.0·P₁(z) + 3.0·P₂(z)'), +)) +def test_symbol(poly, tgt): + p = poly([1, 2, 3], symbol='z') + assert_equal(f"{p:unicode}", tgt) + + +class TestRepr: + def test_polynomial_repr(self): + res = repr(poly.Polynomial([0, 1])) + tgt = ( + "Polynomial([0., 1.], domain=[-1., 1.], window=[-1., 1.], " + "symbol='x')" + ) + assert_equal(res, tgt) + + def test_chebyshev_repr(self): + res = repr(poly.Chebyshev([0, 1])) + tgt = ( + "Chebyshev([0., 1.], domain=[-1., 1.], window=[-1., 1.], " + "symbol='x')" + ) + assert_equal(res, tgt) + + def test_legendre_repr(self): + res = repr(poly.Legendre([0, 1])) + tgt = ( + "Legendre([0., 1.], domain=[-1., 1.], window=[-1., 1.], " + "symbol='x')" + ) + assert_equal(res, tgt) + + def test_hermite_repr(self): + res = repr(poly.Hermite([0, 1])) + tgt = ( + "Hermite([0., 1.], domain=[-1., 1.], window=[-1., 1.], " + "symbol='x')" + ) + assert_equal(res, tgt) + + def test_hermiteE_repr(self): + res = repr(poly.HermiteE([0, 1])) + tgt = ( + "HermiteE([0., 1.], domain=[-1., 1.], window=[-1., 1.], " + "symbol='x')" + ) + assert_equal(res, tgt) + + def test_laguerre_repr(self): + res = repr(poly.Laguerre([0, 1])) + tgt = ( + "Laguerre([0., 1.], domain=[0., 1.], window=[0., 1.], " + "symbol='x')" + ) + assert_equal(res, tgt) + + +class TestLatexRepr: + """Test the latex repr used by Jupyter""" + + @staticmethod + def as_latex(obj): + # right now we ignore the formatting of scalars in our tests, since + # it makes them too verbose. Ideally, the formatting of scalars will + # be fixed such that tests below continue to pass + obj._repr_latex_scalar = lambda x, parens=False: str(x) + try: + return obj._repr_latex_() + finally: + del obj._repr_latex_scalar + + def test_simple_polynomial(self): + # default input + p = poly.Polynomial([1, 2, 3]) + assert_equal(self.as_latex(p), + r'$x \mapsto 1.0 + 2.0\,x + 3.0\,x^{2}$') + + # translated input + p = poly.Polynomial([1, 2, 3], domain=[-2, 0]) + assert_equal(self.as_latex(p), + r'$x \mapsto 1.0 + 2.0\,\left(1.0 + x\right) + 3.0\,\left(1.0 + x\right)^{2}$') # noqa: E501 + + # scaled input + p = poly.Polynomial([1, 2, 3], domain=[-0.5, 0.5]) + assert_equal(self.as_latex(p), + r'$x \mapsto 1.0 + 2.0\,\left(2.0x\right) + 3.0\,\left(2.0x\right)^{2}$') + + # affine input + p = poly.Polynomial([1, 2, 3], domain=[-1, 0]) + assert_equal(self.as_latex(p), + r'$x \mapsto 1.0 + 2.0\,\left(1.0 + 2.0x\right) + 3.0\,\left(1.0 + 2.0x\right)^{2}$') # noqa: E501 + + def test_basis_func(self): + p = poly.Chebyshev([1, 2, 3]) + assert_equal(self.as_latex(p), + r'$x \mapsto 1.0\,{T}_{0}(x) + 2.0\,{T}_{1}(x) + 3.0\,{T}_{2}(x)$') + # affine input - check no surplus parens are added + p = poly.Chebyshev([1, 2, 3], domain=[-1, 0]) + assert_equal(self.as_latex(p), + r'$x \mapsto 1.0\,{T}_{0}(1.0 + 2.0x) + 2.0\,{T}_{1}(1.0 + 2.0x) + 3.0\,{T}_{2}(1.0 + 2.0x)$') # noqa: E501 + + def test_multichar_basis_func(self): + p = poly.HermiteE([1, 2, 3]) + assert_equal(self.as_latex(p), + r'$x \mapsto 1.0\,{He}_{0}(x) + 2.0\,{He}_{1}(x) + 3.0\,{He}_{2}(x)$') + + def test_symbol_basic(self): + # default input + p = poly.Polynomial([1, 2, 3], symbol='z') + assert_equal(self.as_latex(p), + r'$z \mapsto 1.0 + 2.0\,z + 3.0\,z^{2}$') + + # translated input + p = poly.Polynomial([1, 2, 3], domain=[-2, 0], symbol='z') + assert_equal( + self.as_latex(p), + ( + r'$z \mapsto 1.0 + 2.0\,\left(1.0 + z\right) + 3.0\,' + r'\left(1.0 + z\right)^{2}$' + ), + ) + + # scaled input + p = poly.Polynomial([1, 2, 3], domain=[-0.5, 0.5], symbol='z') + assert_equal( + self.as_latex(p), + ( + r'$z \mapsto 1.0 + 2.0\,\left(2.0z\right) + 3.0\,' + r'\left(2.0z\right)^{2}$' + ), + ) + + # affine input + p = poly.Polynomial([1, 2, 3], domain=[-1, 0], symbol='z') + assert_equal( + self.as_latex(p), + ( + r'$z \mapsto 1.0 + 2.0\,\left(1.0 + 2.0z\right) + 3.0\,' + r'\left(1.0 + 2.0z\right)^{2}$' + ), + ) + + def test_numeric_object_coefficients(self): + coefs = array([Fraction(1, 2), Fraction(1)]) + p = poly.Polynomial(coefs) + assert_equal(self.as_latex(p), '$x \\mapsto 1/2 + 1\\,x$') + + +SWITCH_TO_EXP = ( + '1.0 + (1.0e-01) x + (1.0e-02) x**2', + '1.2 + (1.2e-01) x + (1.2e-02) x**2', + '1.23 + 0.12 x + (1.23e-02) x**2 + (1.23e-03) x**3', + '1.235 + 0.123 x + (1.235e-02) x**2 + (1.235e-03) x**3', + '1.2346 + 0.1235 x + 0.0123 x**2 + (1.2346e-03) x**3 + (1.2346e-04) x**4', + '1.23457 + 0.12346 x + 0.01235 x**2 + (1.23457e-03) x**3 + ' + '(1.23457e-04) x**4', + '1.234568 + 0.123457 x + 0.012346 x**2 + 0.001235 x**3 + ' + '(1.234568e-04) x**4 + (1.234568e-05) x**5', + '1.2345679 + 0.1234568 x + 0.0123457 x**2 + 0.0012346 x**3 + ' + '(1.2345679e-04) x**4 + (1.2345679e-05) x**5') + +class TestPrintOptions: + """ + Test the output is properly configured via printoptions. + The exponential notation is enabled automatically when the values + are too small or too large. + """ + + @pytest.fixture(scope='class', autouse=True) + def use_ascii(self): + poly.set_default_printstyle('ascii') + + def test_str(self): + p = poly.Polynomial([1 / 2, 1 / 7, 1 / 7 * 10**8, 1 / 7 * 10**9]) + assert_equal(str(p), '0.5 + 0.14285714 x + 14285714.28571429 x**2 ' + '+ (1.42857143e+08) x**3') + + with printoptions(precision=3): + assert_equal(str(p), '0.5 + 0.143 x + 14285714.286 x**2 ' + '+ (1.429e+08) x**3') + + def test_latex(self): + p = poly.Polynomial([1 / 2, 1 / 7, 1 / 7 * 10**8, 1 / 7 * 10**9]) + assert_equal(p._repr_latex_(), + r'$x \mapsto \text{0.5} + \text{0.14285714}\,x + ' + r'\text{14285714.28571429}\,x^{2} + ' + r'\text{(1.42857143e+08)}\,x^{3}$') + + with printoptions(precision=3): + assert_equal(p._repr_latex_(), + r'$x \mapsto \text{0.5} + \text{0.143}\,x + ' + r'\text{14285714.286}\,x^{2} + \text{(1.429e+08)}\,x^{3}$') + + def test_fixed(self): + p = poly.Polynomial([1 / 2]) + assert_equal(str(p), '0.5') + + with printoptions(floatmode='fixed'): + assert_equal(str(p), '0.50000000') + + with printoptions(floatmode='fixed', precision=4): + assert_equal(str(p), '0.5000') + + def test_switch_to_exp(self): + for i, s in enumerate(SWITCH_TO_EXP): + with printoptions(precision=i): + p = poly.Polynomial([1.23456789 * 10**-i + for i in range(i // 2 + 3)]) + assert str(p).replace('\n', ' ') == s + + def test_non_finite(self): + p = poly.Polynomial([nan, inf]) + assert str(p) == 'nan + inf x' + assert p._repr_latex_() == r'$x \mapsto \text{nan} + \text{inf}\,x$' # noqa: RUF027 + with printoptions(nanstr='NAN', infstr='INF'): + assert str(p) == 'NAN + INF x' + assert p._repr_latex_() == \ + r'$x \mapsto \text{NAN} + \text{INF}\,x$' diff --git a/local_packages/numpy/polynomial/tests/test_symbol.py b/local_packages/numpy/polynomial/tests/test_symbol.py new file mode 100644 index 0000000..3de9e38 --- /dev/null +++ b/local_packages/numpy/polynomial/tests/test_symbol.py @@ -0,0 +1,217 @@ +""" +Tests related to the ``symbol`` attribute of the ABCPolyBase class. +""" + +import pytest + +import numpy.polynomial as poly +from numpy._core import array +from numpy.testing import assert_, assert_equal, assert_raises + + +class TestInit: + """ + Test polynomial creation with symbol kwarg. + """ + c = [1, 2, 3] + + def test_default_symbol(self): + p = poly.Polynomial(self.c) + assert_equal(p.symbol, 'x') + + @pytest.mark.parametrize(('bad_input', 'exception'), ( + ('', ValueError), + ('3', ValueError), + (None, TypeError), + (1, TypeError), + )) + def test_symbol_bad_input(self, bad_input, exception): + with pytest.raises(exception): + p = poly.Polynomial(self.c, symbol=bad_input) + + @pytest.mark.parametrize('symbol', ( + 'x', + 'x_1', + 'A', + 'xyz', + 'β', + )) + def test_valid_symbols(self, symbol): + """ + Values for symbol that should pass input validation. + """ + p = poly.Polynomial(self.c, symbol=symbol) + assert_equal(p.symbol, symbol) + + def test_property(self): + """ + 'symbol' attribute is read only. + """ + p = poly.Polynomial(self.c, symbol='x') + with pytest.raises(AttributeError): + p.symbol = 'z' + + def test_change_symbol(self): + p = poly.Polynomial(self.c, symbol='y') + # Create new polynomial from p with different symbol + pt = poly.Polynomial(p.coef, symbol='t') + assert_equal(pt.symbol, 't') + + +class TestUnaryOperators: + p = poly.Polynomial([1, 2, 3], symbol='z') + + def test_neg(self): + n = -self.p + assert_equal(n.symbol, 'z') + + def test_scalarmul(self): + out = self.p * 10 + assert_equal(out.symbol, 'z') + + def test_rscalarmul(self): + out = 10 * self.p + assert_equal(out.symbol, 'z') + + def test_pow(self): + out = self.p ** 3 + assert_equal(out.symbol, 'z') + + +@pytest.mark.parametrize( + 'rhs', + ( + poly.Polynomial([4, 5, 6], symbol='z'), + array([4, 5, 6]), + ), +) +class TestBinaryOperatorsSameSymbol: + """ + Ensure symbol is preserved for numeric operations on polynomials with + the same symbol + """ + p = poly.Polynomial([1, 2, 3], symbol='z') + + def test_add(self, rhs): + out = self.p + rhs + assert_equal(out.symbol, 'z') + + def test_sub(self, rhs): + out = self.p - rhs + assert_equal(out.symbol, 'z') + + def test_polymul(self, rhs): + out = self.p * rhs + assert_equal(out.symbol, 'z') + + def test_divmod(self, rhs): + for out in divmod(self.p, rhs): + assert_equal(out.symbol, 'z') + + def test_radd(self, rhs): + out = rhs + self.p + assert_equal(out.symbol, 'z') + + def test_rsub(self, rhs): + out = rhs - self.p + assert_equal(out.symbol, 'z') + + def test_rmul(self, rhs): + out = rhs * self.p + assert_equal(out.symbol, 'z') + + def test_rdivmod(self, rhs): + for out in divmod(rhs, self.p): + assert_equal(out.symbol, 'z') + + +class TestBinaryOperatorsDifferentSymbol: + p = poly.Polynomial([1, 2, 3], symbol='x') + other = poly.Polynomial([4, 5, 6], symbol='y') + ops = (p.__add__, p.__sub__, p.__mul__, p.__floordiv__, p.__mod__) + + @pytest.mark.parametrize('f', ops) + def test_binops_fails(self, f): + assert_raises(ValueError, f, self.other) + + +class TestEquality: + p = poly.Polynomial([1, 2, 3], symbol='x') + + def test_eq(self): + other = poly.Polynomial([1, 2, 3], symbol='x') + assert_(self.p == other) + + def test_neq(self): + other = poly.Polynomial([1, 2, 3], symbol='y') + assert_(not self.p == other) + + +class TestExtraMethods: + """ + Test other methods for manipulating/creating polynomial objects. + """ + p = poly.Polynomial([1, 2, 3, 0], symbol='z') + + def test_copy(self): + other = self.p.copy() + assert_equal(other.symbol, 'z') + + def test_trim(self): + other = self.p.trim() + assert_equal(other.symbol, 'z') + + def test_truncate(self): + other = self.p.truncate(2) + assert_equal(other.symbol, 'z') + + @pytest.mark.parametrize('kwarg', ( + {'domain': [-10, 10]}, + {'window': [-10, 10]}, + {'kind': poly.Chebyshev}, + )) + def test_convert(self, kwarg): + other = self.p.convert(**kwarg) + assert_equal(other.symbol, 'z') + + def test_integ(self): + other = self.p.integ() + assert_equal(other.symbol, 'z') + + def test_deriv(self): + other = self.p.deriv() + assert_equal(other.symbol, 'z') + + +def test_composition(): + p = poly.Polynomial([3, 2, 1], symbol="t") + q = poly.Polynomial([5, 1, 0, -1], symbol="λ_1") + r = p(q) + assert r.symbol == "λ_1" + + +# +# Class methods that result in new polynomial class instances +# + + +def test_fit(): + x, y = (range(10),) * 2 + p = poly.Polynomial.fit(x, y, deg=1, symbol='z') + assert_equal(p.symbol, 'z') + + +def test_froomroots(): + roots = [-2, 2] + p = poly.Polynomial.fromroots(roots, symbol='z') + assert_equal(p.symbol, 'z') + + +def test_identity(): + p = poly.Polynomial.identity(domain=[-1, 1], window=[5, 20], symbol='z') + assert_equal(p.symbol, 'z') + + +def test_basis(): + p = poly.Polynomial.basis(3, symbol='z') + assert_equal(p.symbol, 'z') diff --git a/local_packages/numpy/py.typed b/local_packages/numpy/py.typed new file mode 100644 index 0000000..e69de29 diff --git a/local_packages/numpy/random/LICENSE.md b/local_packages/numpy/random/LICENSE.md new file mode 100644 index 0000000..a6cf1b1 --- /dev/null +++ b/local_packages/numpy/random/LICENSE.md @@ -0,0 +1,71 @@ +**This software is dual-licensed under the The University of Illinois/NCSA +Open Source License (NCSA) and The 3-Clause BSD License** + +# NCSA Open Source License +**Copyright (c) 2019 Kevin Sheppard. All rights reserved.** + +Developed by: Kevin Sheppard (, +) +[http://www.kevinsheppard.com](http://www.kevinsheppard.com) + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal with +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +Redistributions of source code must retain the above copyright notice, this +list of conditions and the following disclaimers. + +Redistributions in binary form must reproduce the above copyright notice, this +list of conditions and the following disclaimers in the documentation and/or +other materials provided with the distribution. + +Neither the names of Kevin Sheppard, nor the names of any contributors may be +used to endorse or promote products derived from this Software without specific +prior written permission. + +**THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH +THE SOFTWARE.** + + +# 3-Clause BSD License +**Copyright (c) 2019 Kevin Sheppard. All rights reserved.** + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +**THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +THE POSSIBILITY OF SUCH DAMAGE.** + +# Components + +Many parts of this module have been derived from original sources, +often the algorithm's designer. Component licenses are located with +the component code. diff --git a/local_packages/numpy/random/__init__.pxd b/local_packages/numpy/random/__init__.pxd new file mode 100644 index 0000000..1f90572 --- /dev/null +++ b/local_packages/numpy/random/__init__.pxd @@ -0,0 +1,14 @@ +cimport numpy as np +from libc.stdint cimport uint32_t, uint64_t + +cdef extern from "numpy/random/bitgen.h": + struct bitgen: + void *state + uint64_t (*next_uint64)(void *st) nogil + uint32_t (*next_uint32)(void *st) nogil + double (*next_double)(void *st) nogil + uint64_t (*next_raw)(void *st) nogil + + ctypedef bitgen bitgen_t + +from numpy.random.bit_generator cimport BitGenerator, SeedSequence diff --git a/local_packages/numpy/random/__init__.py b/local_packages/numpy/random/__init__.py new file mode 100644 index 0000000..3e21d59 --- /dev/null +++ b/local_packages/numpy/random/__init__.py @@ -0,0 +1,213 @@ +""" +======================== +Random Number Generation +======================== + +Use ``default_rng()`` to create a `Generator` and call its methods. + +=============== ========================================================= +Generator +--------------- --------------------------------------------------------- +Generator Class implementing all of the random number distributions +default_rng Default constructor for ``Generator`` +=============== ========================================================= + +============================================= === +BitGenerator Streams that work with Generator +--------------------------------------------- --- +MT19937 +PCG64 +PCG64DXSM +Philox +SFC64 +============================================= === + +============================================= === +Getting entropy to initialize a BitGenerator +--------------------------------------------- --- +SeedSequence +============================================= === + + +Legacy +------ + +For backwards compatibility with previous versions of numpy before 1.17, the +various aliases to the global `RandomState` methods are left alone and do not +use the new `Generator` API. + +==================== ========================================================= +Utility functions +-------------------- --------------------------------------------------------- +random Uniformly distributed floats over ``[0, 1)`` +bytes Uniformly distributed random bytes. +permutation Randomly permute a sequence / generate a random sequence. +shuffle Randomly permute a sequence in place. +choice Random sample from 1-D array. +==================== ========================================================= + +==================== ========================================================= +Compatibility +functions - removed +in the new API +-------------------- --------------------------------------------------------- +rand Uniformly distributed values. +randn Normally distributed values. +ranf Uniformly distributed floating point numbers. +random_integers Uniformly distributed integers in a given range. + (deprecated, use ``integers(..., closed=True)`` instead) +random_sample Alias for `random_sample` +randint Uniformly distributed integers in a given range +seed Seed the legacy random number generator. +==================== ========================================================= + +==================== ========================================================= +Univariate +distributions +-------------------- --------------------------------------------------------- +beta Beta distribution over ``[0, 1]``. +binomial Binomial distribution. +chisquare :math:`\\chi^2` distribution. +exponential Exponential distribution. +f F (Fisher-Snedecor) distribution. +gamma Gamma distribution. +geometric Geometric distribution. +gumbel Gumbel distribution. +hypergeometric Hypergeometric distribution. +laplace Laplace distribution. +logistic Logistic distribution. +lognormal Log-normal distribution. +logseries Logarithmic series distribution. +negative_binomial Negative binomial distribution. +noncentral_chisquare Non-central chi-square distribution. +noncentral_f Non-central F distribution. +normal Normal / Gaussian distribution. +pareto Pareto distribution. +poisson Poisson distribution. +power Power distribution. +rayleigh Rayleigh distribution. +triangular Triangular distribution. +uniform Uniform distribution. +vonmises Von Mises circular distribution. +wald Wald (inverse Gaussian) distribution. +weibull Weibull distribution. +zipf Zipf's distribution over ranked data. +==================== ========================================================= + +==================== ========================================================== +Multivariate +distributions +-------------------- ---------------------------------------------------------- +dirichlet Multivariate generalization of Beta distribution. +multinomial Multivariate generalization of the binomial distribution. +multivariate_normal Multivariate generalization of the normal distribution. +==================== ========================================================== + +==================== ========================================================= +Standard +distributions +-------------------- --------------------------------------------------------- +standard_cauchy Standard Cauchy-Lorentz distribution. +standard_exponential Standard exponential distribution. +standard_gamma Standard Gamma distribution. +standard_normal Standard normal distribution. +standard_t Standard Student's t-distribution. +==================== ========================================================= + +==================== ========================================================= +Internal functions +-------------------- --------------------------------------------------------- +get_state Get tuple representing internal state of generator. +set_state Set state of generator. +==================== ========================================================= + + +""" +__all__ = [ + 'beta', + 'binomial', + 'bytes', + 'chisquare', + 'choice', + 'dirichlet', + 'exponential', + 'f', + 'gamma', + 'geometric', + 'get_state', + 'gumbel', + 'hypergeometric', + 'laplace', + 'logistic', + 'lognormal', + 'logseries', + 'multinomial', + 'multivariate_normal', + 'negative_binomial', + 'noncentral_chisquare', + 'noncentral_f', + 'normal', + 'pareto', + 'permutation', + 'poisson', + 'power', + 'rand', + 'randint', + 'randn', + 'random', + 'random_integers', + 'random_sample', + 'ranf', + 'rayleigh', + 'sample', + 'seed', + 'set_state', + 'shuffle', + 'standard_cauchy', + 'standard_exponential', + 'standard_gamma', + 'standard_normal', + 'standard_t', + 'triangular', + 'uniform', + 'vonmises', + 'wald', + 'weibull', + 'zipf', +] + +# add these for module-freeze analysis (like PyInstaller) +from . import _bounded_integers, _common, _pickle +from ._generator import Generator, default_rng +from ._mt19937 import MT19937 +from ._pcg64 import PCG64, PCG64DXSM +from ._philox import Philox +from ._sfc64 import SFC64 +from .bit_generator import BitGenerator, SeedSequence +from .mtrand import * + +__all__ += ['Generator', 'RandomState', 'SeedSequence', 'MT19937', + 'Philox', 'PCG64', 'PCG64DXSM', 'SFC64', 'default_rng', + 'BitGenerator'] + + +def __RandomState_ctor(): + """Return a RandomState instance. + + This function exists solely to assist (un)pickling. + + Note that the state of the RandomState returned here is irrelevant, as this + function's entire purpose is to return a newly allocated RandomState whose + state pickle can set. Consequently the RandomState returned by this function + is a freshly allocated copy with a seed=0. + + See https://github.com/numpy/numpy/issues/4763 for a detailed discussion + + """ + return RandomState(seed=0) + + +from numpy._pytesttester import PytestTester + +test = PytestTester(__name__) +del PytestTester diff --git a/local_packages/numpy/random/__init__.pyi b/local_packages/numpy/random/__init__.pyi new file mode 100644 index 0000000..e9b9fb5 --- /dev/null +++ b/local_packages/numpy/random/__init__.pyi @@ -0,0 +1,124 @@ +from ._generator import Generator, default_rng +from ._mt19937 import MT19937 +from ._pcg64 import PCG64, PCG64DXSM +from ._philox import Philox +from ._sfc64 import SFC64 +from .bit_generator import BitGenerator, SeedSequence +from .mtrand import ( + RandomState, + beta, + binomial, + bytes, + chisquare, + choice, + dirichlet, + exponential, + f, + gamma, + geometric, + get_bit_generator, # noqa: F401 + get_state, + gumbel, + hypergeometric, + laplace, + logistic, + lognormal, + logseries, + multinomial, + multivariate_normal, + negative_binomial, + noncentral_chisquare, + noncentral_f, + normal, + pareto, + permutation, + poisson, + power, + rand, + randint, + randn, + random, + random_integers, + random_sample, + ranf, + rayleigh, + sample, + seed, + set_bit_generator, # noqa: F401 + set_state, + shuffle, + standard_cauchy, + standard_exponential, + standard_gamma, + standard_normal, + standard_t, + triangular, + uniform, + vonmises, + wald, + weibull, + zipf, +) + +__all__ = [ + "beta", + "binomial", + "bytes", + "chisquare", + "choice", + "dirichlet", + "exponential", + "f", + "gamma", + "geometric", + "get_state", + "gumbel", + "hypergeometric", + "laplace", + "logistic", + "lognormal", + "logseries", + "multinomial", + "multivariate_normal", + "negative_binomial", + "noncentral_chisquare", + "noncentral_f", + "normal", + "pareto", + "permutation", + "poisson", + "power", + "rand", + "randint", + "randn", + "random", + "random_integers", + "random_sample", + "ranf", + "rayleigh", + "sample", + "seed", + "set_state", + "shuffle", + "standard_cauchy", + "standard_exponential", + "standard_gamma", + "standard_normal", + "standard_t", + "triangular", + "uniform", + "vonmises", + "wald", + "weibull", + "zipf", + "Generator", + "RandomState", + "SeedSequence", + "MT19937", + "Philox", + "PCG64", + "PCG64DXSM", + "SFC64", + "default_rng", + "BitGenerator", +] diff --git a/local_packages/numpy/random/_bounded_integers.cp312-win_amd64.lib b/local_packages/numpy/random/_bounded_integers.cp312-win_amd64.lib new file mode 100644 index 0000000..0673882 Binary files /dev/null and b/local_packages/numpy/random/_bounded_integers.cp312-win_amd64.lib differ diff --git a/local_packages/numpy/random/_bounded_integers.cp312-win_amd64.pyd b/local_packages/numpy/random/_bounded_integers.cp312-win_amd64.pyd new file mode 100644 index 0000000..8b4f453 Binary files /dev/null and b/local_packages/numpy/random/_bounded_integers.cp312-win_amd64.pyd differ diff --git a/local_packages/numpy/random/_bounded_integers.pxd b/local_packages/numpy/random/_bounded_integers.pxd new file mode 100644 index 0000000..e6ec4aa --- /dev/null +++ b/local_packages/numpy/random/_bounded_integers.pxd @@ -0,0 +1,38 @@ +from libc.stdint cimport (uint8_t, uint16_t, uint32_t, uint64_t, + int8_t, int16_t, int32_t, int64_t, intptr_t) +import numpy as np +cimport numpy as np +ctypedef np.npy_bool bool_t + +from numpy.random cimport bitgen_t + +cdef inline uint64_t _gen_mask(uint64_t max_val) noexcept nogil: + """Mask generator for use in bounded random numbers""" + # Smallest bit mask >= max + cdef uint64_t mask = max_val + mask |= mask >> 1 + mask |= mask >> 2 + mask |= mask >> 4 + mask |= mask >> 8 + mask |= mask >> 16 + mask |= mask >> 32 + return mask + + +cdef object _rand_uint64(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock) + +cdef object _rand_uint32(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock) + +cdef object _rand_uint16(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock) + +cdef object _rand_uint8(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock) + +cdef object _rand_bool(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock) + +cdef object _rand_int64(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock) + +cdef object _rand_int32(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock) + +cdef object _rand_int16(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock) + +cdef object _rand_int8(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock) diff --git a/local_packages/numpy/random/_bounded_integers.pyi b/local_packages/numpy/random/_bounded_integers.pyi new file mode 100644 index 0000000..c9c2ef6 --- /dev/null +++ b/local_packages/numpy/random/_bounded_integers.pyi @@ -0,0 +1 @@ +__all__: list[str] = [] diff --git a/local_packages/numpy/random/_common.cp312-win_amd64.lib b/local_packages/numpy/random/_common.cp312-win_amd64.lib new file mode 100644 index 0000000..a01495a Binary files /dev/null and b/local_packages/numpy/random/_common.cp312-win_amd64.lib differ diff --git a/local_packages/numpy/random/_common.cp312-win_amd64.pyd b/local_packages/numpy/random/_common.cp312-win_amd64.pyd new file mode 100644 index 0000000..f35d4de Binary files /dev/null and b/local_packages/numpy/random/_common.cp312-win_amd64.pyd differ diff --git a/local_packages/numpy/random/_common.pxd b/local_packages/numpy/random/_common.pxd new file mode 100644 index 0000000..7b6ae56 --- /dev/null +++ b/local_packages/numpy/random/_common.pxd @@ -0,0 +1,110 @@ +#cython: language_level=3 + +from libc.stdint cimport uint32_t, uint64_t, int32_t, int64_t + +import numpy as np +cimport numpy as np + +from numpy.random cimport bitgen_t + +cdef double POISSON_LAM_MAX +cdef double LEGACY_POISSON_LAM_MAX +cdef uint64_t MAXSIZE + +cdef enum ConstraintType: + CONS_NONE + CONS_NON_NEGATIVE + CONS_POSITIVE + CONS_POSITIVE_NOT_NAN + CONS_BOUNDED_0_1 + CONS_BOUNDED_GT_0_1 + CONS_BOUNDED_LT_0_1 + CONS_GT_1 + CONS_GTE_1 + CONS_POISSON + LEGACY_CONS_POISSON + LEGACY_CONS_NON_NEGATIVE_INBOUNDS_LONG + +ctypedef ConstraintType constraint_type +ctypedef fused double_or_int64: + double + int64_t + +cdef object benchmark(bitgen_t *bitgen, object lock, Py_ssize_t cnt, object method) +cdef object random_raw(bitgen_t *bitgen, object lock, object size, object output) +cdef object prepare_cffi(bitgen_t *bitgen) +cdef object prepare_ctypes(bitgen_t *bitgen) +cdef int check_constraint(double_or_int64 val, object name, constraint_type cons) except -1 +cdef int check_array_constraint(np.ndarray val, object name, constraint_type cons) except -1 + +cdef extern from "include/aligned_malloc.h": + cdef void *PyArray_realloc_aligned(void *p, size_t n) + cdef void *PyArray_malloc_aligned(size_t n) + cdef void *PyArray_calloc_aligned(size_t n, size_t s) + cdef void PyArray_free_aligned(void *p) + +ctypedef void (*random_double_fill)(bitgen_t *state, np.npy_intp count, double* out) noexcept nogil +ctypedef double (*random_double_0)(void *state) noexcept nogil +ctypedef double (*random_double_1)(void *state, double a) noexcept nogil +ctypedef double (*random_double_2)(void *state, double a, double b) noexcept nogil +ctypedef double (*random_double_3)(void *state, double a, double b, double c) noexcept nogil + +ctypedef void (*random_float_fill)(bitgen_t *state, np.npy_intp count, float* out) noexcept nogil +ctypedef float (*random_float_0)(bitgen_t *state) noexcept nogil +ctypedef float (*random_float_1)(bitgen_t *state, float a) noexcept nogil + +ctypedef int64_t (*random_uint_0)(void *state) noexcept nogil +ctypedef int64_t (*random_uint_d)(void *state, double a) noexcept nogil +ctypedef int64_t (*random_uint_dd)(void *state, double a, double b) noexcept nogil +ctypedef int64_t (*random_uint_di)(void *state, double a, uint64_t b) noexcept nogil +ctypedef int64_t (*random_uint_i)(void *state, int64_t a) noexcept nogil +ctypedef int64_t (*random_uint_iii)(void *state, int64_t a, int64_t b, int64_t c) noexcept nogil + +ctypedef uint32_t (*random_uint_0_32)(bitgen_t *state) noexcept nogil +ctypedef uint32_t (*random_uint_1_i_32)(bitgen_t *state, uint32_t a) noexcept nogil + +ctypedef int32_t (*random_int_2_i_32)(bitgen_t *state, int32_t a, int32_t b) noexcept nogil +ctypedef int64_t (*random_int_2_i)(bitgen_t *state, int64_t a, int64_t b) noexcept nogil + +cdef double kahan_sum(double *darr, np.npy_intp n) noexcept + +cdef inline double uint64_to_double(uint64_t rnd) noexcept nogil: + return (rnd >> 11) * (1.0 / 9007199254740992.0) + +cdef object double_fill(void *func, bitgen_t *state, object size, object lock, object out) + +cdef object float_fill(void *func, bitgen_t *state, object size, object lock, object out) + +cdef object float_fill_from_double(void *func, bitgen_t *state, object size, object lock, object out) + +cdef object wrap_int(object val, object bits) + +cdef np.ndarray int_to_array(object value, object name, object bits, object uint_size) + +cdef validate_output_shape(iter_shape, np.ndarray output) + +cdef object cont(void *func, void *state, object size, object lock, int narg, + object a, object a_name, constraint_type a_constraint, + object b, object b_name, constraint_type b_constraint, + object c, object c_name, constraint_type c_constraint, + object out) + +cdef object disc(void *func, void *state, object size, object lock, + int narg_double, int narg_int64, + object a, object a_name, constraint_type a_constraint, + object b, object b_name, constraint_type b_constraint, + object c, object c_name, constraint_type c_constraint) + +cdef object cont_f(void *func, bitgen_t *state, object size, object lock, + object a, object a_name, constraint_type a_constraint, + object out) + +cdef object cont_broadcast_3(void *func, void *state, object size, object lock, + np.ndarray a_arr, object a_name, constraint_type a_constraint, + np.ndarray b_arr, object b_name, constraint_type b_constraint, + np.ndarray c_arr, object c_name, constraint_type c_constraint) + +cdef object discrete_broadcast_iii(void *func, void *state, object size, object lock, + np.ndarray a_arr, object a_name, constraint_type a_constraint, + np.ndarray b_arr, object b_name, constraint_type b_constraint, + np.ndarray c_arr, object c_name, constraint_type c_constraint) diff --git a/local_packages/numpy/random/_common.pyi b/local_packages/numpy/random/_common.pyi new file mode 100644 index 0000000..b667fd1 --- /dev/null +++ b/local_packages/numpy/random/_common.pyi @@ -0,0 +1,16 @@ +from collections.abc import Callable +from typing import Any, NamedTuple, TypeAlias + +import numpy as np + +__all__: list[str] = ["interface"] + +_CDataVoidPointer: TypeAlias = Any + +class interface(NamedTuple): + state_address: int + state: _CDataVoidPointer + next_uint64: Callable[..., np.uint64] + next_uint32: Callable[..., np.uint32] + next_double: Callable[..., np.float64] + bit_generator: _CDataVoidPointer diff --git a/local_packages/numpy/random/_examples/cffi/extending.py b/local_packages/numpy/random/_examples/cffi/extending.py new file mode 100644 index 0000000..ad4c9ac --- /dev/null +++ b/local_packages/numpy/random/_examples/cffi/extending.py @@ -0,0 +1,44 @@ +""" +Use cffi to access any of the underlying C functions from distributions.h +""" +import os + +import cffi + +import numpy as np + +from .parse import parse_distributions_h + +ffi = cffi.FFI() + +inc_dir = os.path.join(np.get_include(), 'numpy') + +# Basic numpy types +ffi.cdef(''' + typedef intptr_t npy_intp; + typedef unsigned char npy_bool; + +''') + +parse_distributions_h(ffi, inc_dir) + +lib = ffi.dlopen(np.random._generator.__file__) + +# Compare the distributions.h random_standard_normal_fill to +# Generator.standard_random +bit_gen = np.random.PCG64() +rng = np.random.Generator(bit_gen) +state = bit_gen.state + +interface = rng.bit_generator.cffi +n = 100 +vals_cffi = ffi.new('double[%d]' % n) +lib.random_standard_normal_fill(interface.bit_generator, n, vals_cffi) + +# reset the state +bit_gen.state = state + +vals = rng.standard_normal(n) + +for i in range(n): + assert vals[i] == vals_cffi[i] diff --git a/local_packages/numpy/random/_examples/cffi/parse.py b/local_packages/numpy/random/_examples/cffi/parse.py new file mode 100644 index 0000000..0f80adb --- /dev/null +++ b/local_packages/numpy/random/_examples/cffi/parse.py @@ -0,0 +1,53 @@ +import os + + +def parse_distributions_h(ffi, inc_dir): + """ + Parse distributions.h located in inc_dir for CFFI, filling in the ffi.cdef + + Read the function declarations without the "#define ..." macros that will + be filled in when loading the library. + """ + + with open(os.path.join(inc_dir, 'random', 'bitgen.h')) as fid: + s = [] + for line in fid: + # massage the include file + if line.strip().startswith('#'): + continue + s.append(line) + ffi.cdef('\n'.join(s)) + + with open(os.path.join(inc_dir, 'random', 'distributions.h')) as fid: + s = [] + in_skip = 0 + ignoring = False + for line in fid: + # check for and remove extern "C" guards + if ignoring: + if line.strip().startswith('#endif'): + ignoring = False + continue + if line.strip().startswith('#ifdef __cplusplus'): + ignoring = True + + # massage the include file + if line.strip().startswith('#'): + continue + + # skip any inlined function definition + # which starts with 'static inline xxx(...) {' + # and ends with a closing '}' + if line.strip().startswith('static inline'): + in_skip += line.count('{') + continue + elif in_skip > 0: + in_skip += line.count('{') + in_skip -= line.count('}') + continue + + # replace defines with their value or remove them + line = line.replace('DECLDIR', '') + line = line.replace('RAND_INT_TYPE', 'int64_t') + s.append(line) + ffi.cdef('\n'.join(s)) diff --git a/local_packages/numpy/random/_examples/cython/extending.pyx b/local_packages/numpy/random/_examples/cython/extending.pyx new file mode 100644 index 0000000..6a0f45e --- /dev/null +++ b/local_packages/numpy/random/_examples/cython/extending.pyx @@ -0,0 +1,77 @@ +#cython: language_level=3 + +from libc.stdint cimport uint32_t +from cpython.pycapsule cimport PyCapsule_IsValid, PyCapsule_GetPointer + +import numpy as np +cimport numpy as np +cimport cython + +from numpy.random cimport bitgen_t +from numpy.random import PCG64 + +np.import_array() + + +@cython.boundscheck(False) +@cython.wraparound(False) +def uniform_mean(Py_ssize_t n): + cdef Py_ssize_t i + cdef bitgen_t *rng + cdef const char *capsule_name = "BitGenerator" + cdef double[::1] random_values + cdef np.ndarray randoms + + x = PCG64() + capsule = x.capsule + if not PyCapsule_IsValid(capsule, capsule_name): + raise ValueError("Invalid pointer to anon_func_state") + rng = PyCapsule_GetPointer(capsule, capsule_name) + random_values = np.empty(n) + # Best practice is to acquire the lock whenever generating random values. + # This prevents other threads from modifying the state. Acquiring the lock + # is only necessary if the GIL is also released, as in this example. + with x.lock, nogil: + for i in range(n): + random_values[i] = rng.next_double(rng.state) + randoms = np.asarray(random_values) + return randoms.mean() + + +# This function is declared nogil so it can be used without the GIL below +cdef uint32_t bounded_uint(uint32_t lb, uint32_t ub, bitgen_t *rng) nogil: + cdef uint32_t mask, delta, val + mask = delta = ub - lb + mask |= mask >> 1 + mask |= mask >> 2 + mask |= mask >> 4 + mask |= mask >> 8 + mask |= mask >> 16 + + val = rng.next_uint32(rng.state) & mask + while val > delta: + val = rng.next_uint32(rng.state) & mask + + return lb + val + + +@cython.boundscheck(False) +@cython.wraparound(False) +def bounded_uints(uint32_t lb, uint32_t ub, Py_ssize_t n): + cdef Py_ssize_t i + cdef bitgen_t *rng + cdef uint32_t[::1] out + cdef const char *capsule_name = "BitGenerator" + + x = PCG64() + out = np.empty(n, dtype=np.uint32) + capsule = x.capsule + + if not PyCapsule_IsValid(capsule, capsule_name): + raise ValueError("Invalid pointer to anon_func_state") + rng = PyCapsule_GetPointer(capsule, capsule_name) + + with x.lock, nogil: + for i in range(n): + out[i] = bounded_uint(lb, ub, rng) + return np.asarray(out) diff --git a/local_packages/numpy/random/_examples/cython/extending_distributions.pyx b/local_packages/numpy/random/_examples/cython/extending_distributions.pyx new file mode 100644 index 0000000..8de7226 --- /dev/null +++ b/local_packages/numpy/random/_examples/cython/extending_distributions.pyx @@ -0,0 +1,117 @@ +#cython: language_level=3 +""" +This file shows how the to use a BitGenerator to create a distribution. +""" +import numpy as np +cimport numpy as np +cimport cython +from cpython.pycapsule cimport PyCapsule_IsValid, PyCapsule_GetPointer +from libc.stdint cimport uint16_t, uint64_t +from numpy.random cimport bitgen_t +from numpy.random import PCG64 +from numpy.random.c_distributions cimport ( + random_standard_uniform_fill, random_standard_uniform_fill_f) + +np.import_array() + + +@cython.boundscheck(False) +@cython.wraparound(False) +def uniforms(Py_ssize_t n): + """ + Create an array of `n` uniformly distributed doubles. + A 'real' distribution would want to process the values into + some non-uniform distribution + """ + cdef Py_ssize_t i + cdef bitgen_t *rng + cdef const char *capsule_name = "BitGenerator" + cdef double[::1] random_values + + x = PCG64() + capsule = x.capsule + # Optional check that the capsule if from a BitGenerator + if not PyCapsule_IsValid(capsule, capsule_name): + raise ValueError("Invalid pointer to anon_func_state") + # Cast the pointer + rng = PyCapsule_GetPointer(capsule, capsule_name) + random_values = np.empty(n, dtype='float64') + with x.lock, nogil: + for i in range(n): + # Call the function + random_values[i] = rng.next_double(rng.state) + randoms = np.asarray(random_values) + + return randoms + +# cython example 2 +@cython.boundscheck(False) +@cython.wraparound(False) +def uint10_uniforms(Py_ssize_t n): + """Uniform 10 bit integers stored as 16-bit unsigned integers""" + cdef Py_ssize_t i + cdef bitgen_t *rng + cdef const char *capsule_name = "BitGenerator" + cdef uint16_t[::1] random_values + cdef int bits_remaining + cdef int width = 10 + cdef uint64_t buff, mask = 0x3FF + + x = PCG64() + capsule = x.capsule + if not PyCapsule_IsValid(capsule, capsule_name): + raise ValueError("Invalid pointer to anon_func_state") + rng = PyCapsule_GetPointer(capsule, capsule_name) + random_values = np.empty(n, dtype='uint16') + # Best practice is to release GIL and acquire the lock + bits_remaining = 0 + with x.lock, nogil: + for i in range(n): + if bits_remaining < width: + buff = rng.next_uint64(rng.state) + random_values[i] = buff & mask + buff >>= width + + randoms = np.asarray(random_values) + return randoms + +# cython example 3 +def uniforms_ex(bit_generator, Py_ssize_t n, dtype=np.float64): + """ + Create an array of `n` uniformly distributed doubles via a "fill" function. + + A 'real' distribution would want to process the values into + some non-uniform distribution + + Parameters + ---------- + bit_generator: BitGenerator instance + n: int + Output vector length + dtype: {str, dtype}, optional + Desired dtype, either 'd' (or 'float64') or 'f' (or 'float32'). The + default dtype value is 'd' + """ + cdef bitgen_t *rng + cdef const char *capsule_name = "BitGenerator" + cdef np.ndarray randoms + + capsule = bit_generator.capsule + # Optional check that the capsule if from a BitGenerator + if not PyCapsule_IsValid(capsule, capsule_name): + raise ValueError("Invalid pointer to anon_func_state") + # Cast the pointer + rng = PyCapsule_GetPointer(capsule, capsule_name) + + _dtype = np.dtype(dtype) + randoms = np.empty(n, dtype=_dtype) + if _dtype == np.float32: + with bit_generator.lock: + random_standard_uniform_fill_f(rng, n, np.PyArray_DATA(randoms)) + elif _dtype == np.float64: + with bit_generator.lock: + random_standard_uniform_fill(rng, n, np.PyArray_DATA(randoms)) + else: + raise TypeError('Unsupported dtype %r for random' % _dtype) + return randoms + diff --git a/local_packages/numpy/random/_examples/cython/meson.build b/local_packages/numpy/random/_examples/cython/meson.build new file mode 100644 index 0000000..7aa367d --- /dev/null +++ b/local_packages/numpy/random/_examples/cython/meson.build @@ -0,0 +1,53 @@ +project('random-build-examples', 'c', 'cpp', 'cython') + +py_mod = import('python') +py3 = py_mod.find_installation(pure: false) + +cc = meson.get_compiler('c') +cy = meson.get_compiler('cython') + +# Keep synced with pyproject.toml +if not cy.version().version_compare('>=3.0.6') + error('tests requires Cython >= 3.0.6') +endif + +base_cython_args = [] +if cy.version().version_compare('>=3.1.0') + base_cython_args += ['-Xfreethreading_compatible=True'] +endif + +_numpy_abs = run_command(py3, ['-c', + 'import os; os.chdir(".."); import numpy; print(os.path.abspath(numpy.get_include() + "../../.."))'], + check: true).stdout().strip() + +npymath_path = _numpy_abs / '_core' / 'lib' +npy_include_path = _numpy_abs / '_core' / 'include' +npyrandom_path = _numpy_abs / 'random' / 'lib' +npymath_lib = cc.find_library('npymath', dirs: npymath_path) +npyrandom_lib = cc.find_library('npyrandom', dirs: npyrandom_path) + +py3.extension_module( + 'extending_distributions', + 'extending_distributions.pyx', + install: false, + include_directories: [npy_include_path], + dependencies: [npyrandom_lib, npymath_lib], + cython_args: base_cython_args, +) +py3.extension_module( + 'extending', + 'extending.pyx', + install: false, + include_directories: [npy_include_path], + dependencies: [npyrandom_lib, npymath_lib], + cython_args: base_cython_args, +) +py3.extension_module( + 'extending_cpp', + 'extending_distributions.pyx', + install: false, + override_options : ['cython_language=cpp'], + cython_args: base_cython_args + ['--module-name', 'extending_cpp'], + include_directories: [npy_include_path], + dependencies: [npyrandom_lib, npymath_lib], +) diff --git a/local_packages/numpy/random/_examples/numba/extending.py b/local_packages/numpy/random/_examples/numba/extending.py new file mode 100644 index 0000000..c1d0f4f --- /dev/null +++ b/local_packages/numpy/random/_examples/numba/extending.py @@ -0,0 +1,86 @@ +from timeit import timeit + +import numba as nb + +import numpy as np +from numpy.random import PCG64 + +bit_gen = PCG64() +next_d = bit_gen.cffi.next_double +state_addr = bit_gen.cffi.state_address + +def normals(n, state): + out = np.empty(n) + for i in range((n + 1) // 2): + x1 = 2.0 * next_d(state) - 1.0 + x2 = 2.0 * next_d(state) - 1.0 + r2 = x1 * x1 + x2 * x2 + while r2 >= 1.0 or r2 == 0.0: + x1 = 2.0 * next_d(state) - 1.0 + x2 = 2.0 * next_d(state) - 1.0 + r2 = x1 * x1 + x2 * x2 + f = np.sqrt(-2.0 * np.log(r2) / r2) + out[2 * i] = f * x1 + if 2 * i + 1 < n: + out[2 * i + 1] = f * x2 + return out + + +# Compile using Numba +normalsj = nb.jit(normals, nopython=True) +# Must use state address not state with numba +n = 10000 + +def numbacall(): + return normalsj(n, state_addr) + + +rg = np.random.Generator(PCG64()) + +def numpycall(): + return rg.normal(size=n) + + +# Check that the functions work +r1 = numbacall() +r2 = numpycall() +assert r1.shape == (n,) +assert r1.shape == r2.shape + +t1 = timeit(numbacall, number=1000) +print(f'{t1:.2f} secs for {n} PCG64 (Numba/PCG64) gaussian randoms') +t2 = timeit(numpycall, number=1000) +print(f'{t2:.2f} secs for {n} PCG64 (NumPy/PCG64) gaussian randoms') + +# example 2 + +next_u32 = bit_gen.ctypes.next_uint32 +ctypes_state = bit_gen.ctypes.state + +@nb.jit(nopython=True) +def bounded_uint(lb, ub, state): + mask = delta = ub - lb + mask |= mask >> 1 + mask |= mask >> 2 + mask |= mask >> 4 + mask |= mask >> 8 + mask |= mask >> 16 + + val = next_u32(state) & mask + while val > delta: + val = next_u32(state) & mask + + return lb + val + + +print(bounded_uint(323, 2394691, ctypes_state.value)) + + +@nb.jit(nopython=True) +def bounded_uints(lb, ub, n, state): + out = np.empty(n, dtype=np.uint32) + for i in range(n): + out[i] = bounded_uint(lb, ub, state) + + +bounded_uints(323, 2394691, 10000000, ctypes_state.value) diff --git a/local_packages/numpy/random/_examples/numba/extending_distributions.py b/local_packages/numpy/random/_examples/numba/extending_distributions.py new file mode 100644 index 0000000..d0462e7 --- /dev/null +++ b/local_packages/numpy/random/_examples/numba/extending_distributions.py @@ -0,0 +1,67 @@ +r""" +Building the required library in this example requires a source distribution +of NumPy or clone of the NumPy git repository since distributions.c is not +included in binary distributions. + +On *nix, execute in numpy/random/src/distributions + +export ${PYTHON_VERSION}=3.8 # Python version +export PYTHON_INCLUDE=#path to Python's include folder, usually \ + ${PYTHON_HOME}/include/python${PYTHON_VERSION}m +export NUMPY_INCLUDE=#path to numpy's include folder, usually \ + ${PYTHON_HOME}/lib/python${PYTHON_VERSION}/site-packages/numpy/_core/include +gcc -shared -o libdistributions.so -fPIC distributions.c \ + -I${NUMPY_INCLUDE} -I${PYTHON_INCLUDE} +mv libdistributions.so ../../_examples/numba/ + +On Windows + +rem PYTHON_HOME and PYTHON_VERSION are setup dependent, this is an example +set PYTHON_HOME=c:\Anaconda +set PYTHON_VERSION=38 +cl.exe /LD .\distributions.c -DDLL_EXPORT \ + -I%PYTHON_HOME%\lib\site-packages\numpy\_core\include \ + -I%PYTHON_HOME%\include %PYTHON_HOME%\libs\python%PYTHON_VERSION%.lib +move distributions.dll ../../_examples/numba/ +""" +import os + +import numba as nb +from cffi import FFI + +import numpy as np +from numpy.random import PCG64 + +ffi = FFI() +if os.path.exists('./distributions.dll'): + lib = ffi.dlopen('./distributions.dll') +elif os.path.exists('./libdistributions.so'): + lib = ffi.dlopen('./libdistributions.so') +else: + raise RuntimeError('Required DLL/so file was not found.') + +ffi.cdef(""" +double random_standard_normal(void *bitgen_state); +""") +x = PCG64() +xffi = x.cffi +bit_generator = xffi.bit_generator + +random_standard_normal = lib.random_standard_normal + + +def normals(n, bit_generator): + out = np.empty(n) + for i in range(n): + out[i] = random_standard_normal(bit_generator) + return out + + +normalsj = nb.jit(normals, nopython=True) + +# Numba requires a memory address for void * +# Can also get address from x.ctypes.bit_generator.value +bit_generator_address = int(ffi.cast('uintptr_t', bit_generator)) + +norm = normalsj(1000, bit_generator_address) +print(norm[:12]) diff --git a/local_packages/numpy/random/_generator.cp312-win_amd64.lib b/local_packages/numpy/random/_generator.cp312-win_amd64.lib new file mode 100644 index 0000000..398da35 Binary files /dev/null and b/local_packages/numpy/random/_generator.cp312-win_amd64.lib differ diff --git a/local_packages/numpy/random/_generator.cp312-win_amd64.pyd b/local_packages/numpy/random/_generator.cp312-win_amd64.pyd new file mode 100644 index 0000000..2f3ad2c Binary files /dev/null and b/local_packages/numpy/random/_generator.cp312-win_amd64.pyd differ diff --git a/local_packages/numpy/random/_generator.pyi b/local_packages/numpy/random/_generator.pyi new file mode 100644 index 0000000..1f7c342 --- /dev/null +++ b/local_packages/numpy/random/_generator.pyi @@ -0,0 +1,862 @@ +from collections.abc import Callable, MutableSequence +from typing import Any, Literal, TypeAlias, TypeVar, overload + +import numpy as np +from numpy import dtype, float32, float64, int64 +from numpy._typing import ( + ArrayLike, + DTypeLike, + NDArray, + _ArrayLikeFloat_co, + _ArrayLikeInt_co, + _BoolCodes, + _DoubleCodes, + _DTypeLike, + _DTypeLikeBool, + _Float32Codes, + _Float64Codes, + _FloatLike_co, + _Int8Codes, + _Int16Codes, + _Int32Codes, + _Int64Codes, + _IntPCodes, + _ShapeLike, + _SingleCodes, + _SupportsDType, + _UInt8Codes, + _UInt16Codes, + _UInt32Codes, + _UInt64Codes, + _UIntPCodes, +) +from numpy.random import BitGenerator, RandomState, SeedSequence + +_IntegerT = TypeVar("_IntegerT", bound=np.integer) + +_DTypeLikeFloat32: TypeAlias = ( + dtype[float32] + | _SupportsDType[dtype[float32]] + | type[float32] + | _Float32Codes + | _SingleCodes +) + +_DTypeLikeFloat64: TypeAlias = ( + dtype[float64] + | _SupportsDType[dtype[float64]] + | type[float] + | type[float64] + | _Float64Codes + | _DoubleCodes +) + +class Generator: + def __init__(self, bit_generator: BitGenerator) -> None: ... + def __repr__(self) -> str: ... + def __str__(self) -> str: ... + def __getstate__(self) -> None: ... + def __setstate__(self, state: dict[str, Any] | None) -> None: ... + def __reduce__(self) -> tuple[ + Callable[[BitGenerator], Generator], + tuple[BitGenerator], + None]: ... + @property + def bit_generator(self) -> BitGenerator: ... + def spawn(self, n_children: int) -> list[Generator]: ... + def bytes(self, length: int) -> bytes: ... + @overload + def standard_normal( # type: ignore[misc] + self, + size: None = None, + dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ..., + out: None = None, + ) -> float: ... + @overload + def standard_normal( # type: ignore[misc] + self, + size: _ShapeLike | None = None, + ) -> NDArray[float64]: ... + @overload + def standard_normal( # type: ignore[misc] + self, + *, + out: NDArray[float64] | None = None, + ) -> NDArray[float64]: ... + @overload + def standard_normal( # type: ignore[misc] + self, + size: _ShapeLike | None = None, + dtype: _DTypeLikeFloat32 = ..., + out: NDArray[float32] | None = None, + ) -> NDArray[float32]: ... + @overload + def standard_normal( # type: ignore[misc] + self, + size: _ShapeLike | None = None, + dtype: _DTypeLikeFloat64 = ..., + out: NDArray[float64] | None = None, + ) -> NDArray[float64]: ... + @overload + def permutation(self, x: int, axis: int = 0) -> NDArray[int64]: ... + @overload + def permutation(self, x: ArrayLike, axis: int = 0) -> NDArray[Any]: ... + @overload + def standard_exponential( # type: ignore[misc] + self, + size: None = None, + dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ..., + method: Literal["zig", "inv"] = "zig", + out: None = None, + ) -> float: ... + @overload + def standard_exponential( + self, + size: _ShapeLike | None = None, + ) -> NDArray[float64]: ... + @overload + def standard_exponential( + self, + *, + out: NDArray[float64] | None = None, + ) -> NDArray[float64]: ... + @overload + def standard_exponential( + self, + size: _ShapeLike | None = None, + *, + method: Literal["zig", "inv"] = "zig", + out: NDArray[float64] | None = None, + ) -> NDArray[float64]: ... + @overload + def standard_exponential( + self, + size: _ShapeLike | None = None, + dtype: _DTypeLikeFloat32 = ..., + method: Literal["zig", "inv"] = "zig", + out: NDArray[float32] | None = None, + ) -> NDArray[float32]: ... + @overload + def standard_exponential( + self, + size: _ShapeLike | None = None, + dtype: _DTypeLikeFloat64 = ..., + method: Literal["zig", "inv"] = "zig", + out: NDArray[float64] | None = None, + ) -> NDArray[float64]: ... + @overload + def random( # type: ignore[misc] + self, + size: None = None, + dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ..., + out: None = None, + ) -> float: ... + @overload + def random( + self, + *, + out: NDArray[float64] | None = None, + ) -> NDArray[float64]: ... + @overload + def random( + self, + size: _ShapeLike | None = None, + *, + out: NDArray[float64] | None = None, + ) -> NDArray[float64]: ... + @overload + def random( + self, + size: _ShapeLike | None = None, + dtype: _DTypeLikeFloat32 = ..., + out: NDArray[float32] | None = None, + ) -> NDArray[float32]: ... + @overload + def random( + self, + size: _ShapeLike | None = None, + dtype: _DTypeLikeFloat64 = ..., + out: NDArray[float64] | None = None, + ) -> NDArray[float64]: ... + @overload + def beta( + self, + a: _FloatLike_co, + b: _FloatLike_co, + size: None = None, + ) -> float: ... # type: ignore[misc] + @overload + def beta( + self, + a: _ArrayLikeFloat_co, + b: _ArrayLikeFloat_co, + size: _ShapeLike | None = None + ) -> NDArray[float64]: ... + @overload + def exponential(self, scale: _FloatLike_co = 1.0, size: None = None) -> float: ... # type: ignore[misc] + @overload + def exponential(self, scale: _ArrayLikeFloat_co = 1.0, size: _ShapeLike | None = None) -> NDArray[float64]: ... + + # + @overload + def integers( + self, + low: int, + high: int | None = None, + size: None = None, + dtype: _DTypeLike[np.int64] | _Int64Codes = ..., + endpoint: bool = False, + ) -> np.int64: ... + @overload + def integers( + self, + low: int, + high: int | None = None, + size: None = None, + *, + dtype: type[bool], + endpoint: bool = False, + ) -> bool: ... + @overload + def integers( + self, + low: int, + high: int | None = None, + size: None = None, + *, + dtype: type[int], + endpoint: bool = False, + ) -> int: ... + @overload + def integers( + self, + low: int, + high: int | None = None, + size: None = None, + *, + dtype: _DTypeLike[np.bool] | _BoolCodes, + endpoint: bool = False, + ) -> np.bool: ... + @overload + def integers( + self, + low: int, + high: int | None = None, + size: None = None, + *, + dtype: _DTypeLike[_IntegerT], + endpoint: bool = False, + ) -> _IntegerT: ... + @overload + def integers( + self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + dtype: _DTypeLike[np.int64] | _Int64Codes = ..., + endpoint: bool = False, + ) -> NDArray[np.int64]: ... + @overload + def integers( + self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + *, + dtype: _DTypeLikeBool, + endpoint: bool = False, + ) -> NDArray[np.bool]: ... + @overload + def integers( + self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + *, + dtype: _DTypeLike[_IntegerT], + endpoint: bool = False, + ) -> NDArray[_IntegerT]: ... + @overload + def integers( + self, + low: int, + high: int | None = None, + size: None = None, + *, + dtype: _Int8Codes, + endpoint: bool = False, + ) -> np.int8: ... + @overload + def integers( + self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + *, + dtype: _Int8Codes, + endpoint: bool = False, + ) -> NDArray[np.int8]: ... + @overload + def integers( + self, + low: int, + high: int | None = None, + size: None = None, + *, + dtype: _UInt8Codes, + endpoint: bool = False, + ) -> np.uint8: ... + @overload + def integers( + self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + *, + dtype: _UInt8Codes, + endpoint: bool = False, + ) -> NDArray[np.uint8]: ... + @overload + def integers( + self, + low: int, + high: int | None = None, + size: None = None, + *, + dtype: _Int16Codes, + endpoint: bool = False, + ) -> np.int16: ... + @overload + def integers( + self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + *, + dtype: _Int16Codes, + endpoint: bool = False, + ) -> NDArray[np.int16]: ... + @overload + def integers( + self, + low: int, + high: int | None = None, + size: None = None, + *, + dtype: _UInt16Codes, + endpoint: bool = False, + ) -> np.uint16: ... + @overload + def integers( + self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + *, + dtype: _UInt16Codes, + endpoint: bool = False, + ) -> NDArray[np.uint16]: ... + @overload + def integers( + self, + low: int, + high: int | None = None, + size: None = None, + *, + dtype: _Int32Codes, + endpoint: bool = False, + ) -> np.int32: ... + @overload + def integers( + self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + *, + dtype: _Int32Codes, + endpoint: bool = False, + ) -> NDArray[np.int32]: ... + @overload + def integers( + self, + low: int, + high: int | None = None, + size: None = None, + *, + dtype: _UInt32Codes, + endpoint: bool = False, + ) -> np.uint32: ... + @overload + def integers( + self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + *, + dtype: _UInt32Codes, + endpoint: bool = False, + ) -> NDArray[np.uint32]: ... + @overload + def integers( + self, + low: int, + high: int | None = None, + size: None = None, + *, + dtype: _UInt64Codes, + endpoint: bool = False, + ) -> np.uint64: ... + @overload + def integers( + self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + *, + dtype: _UInt64Codes, + endpoint: bool = False, + ) -> NDArray[np.uint64]: ... + @overload + def integers( + self, + low: int, + high: int | None = None, + size: None = None, + *, + dtype: _IntPCodes, + endpoint: bool = False, + ) -> np.intp: ... + @overload + def integers( + self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + *, + dtype: _IntPCodes, + endpoint: bool = False, + ) -> NDArray[np.intp]: ... + @overload + def integers( + self, + low: int, + high: int | None = None, + size: None = None, + *, + dtype: _UIntPCodes, + endpoint: bool = False, + ) -> np.uintp: ... + @overload + def integers( + self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + *, + dtype: _UIntPCodes, + endpoint: bool = False, + ) -> NDArray[np.uintp]: ... + @overload + def integers( + self, + low: int, + high: int | None = None, + size: None = None, + dtype: DTypeLike | None = ..., + endpoint: bool = False, + ) -> Any: ... + @overload + def integers( + self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + dtype: DTypeLike | None = ..., + endpoint: bool = False, + ) -> NDArray[Any]: ... + + # TODO: Use a TypeVar _T here to get away from Any output? + # Should be int->NDArray[int64], ArrayLike[_T] -> _T | NDArray[Any] + @overload + def choice( + self, + a: int, + size: None = None, + replace: bool = True, + p: _ArrayLikeFloat_co | None = None, + axis: int = 0, + shuffle: bool = True, + ) -> int: ... + @overload + def choice( + self, + a: int, + size: _ShapeLike | None = None, + replace: bool = True, + p: _ArrayLikeFloat_co | None = None, + axis: int = 0, + shuffle: bool = True, + ) -> NDArray[int64]: ... + @overload + def choice( + self, + a: ArrayLike, + size: None = None, + replace: bool = True, + p: _ArrayLikeFloat_co | None = None, + axis: int = 0, + shuffle: bool = True, + ) -> Any: ... + @overload + def choice( + self, + a: ArrayLike, + size: _ShapeLike | None = None, + replace: bool = True, + p: _ArrayLikeFloat_co | None = None, + axis: int = 0, + shuffle: bool = True, + ) -> NDArray[Any]: ... + @overload + def uniform( + self, + low: _FloatLike_co = 0.0, + high: _FloatLike_co = 1.0, + size: None = None, + ) -> float: ... # type: ignore[misc] + @overload + def uniform( + self, + low: _ArrayLikeFloat_co = 0.0, + high: _ArrayLikeFloat_co = 1.0, + size: _ShapeLike | None = None, + ) -> NDArray[float64]: ... + @overload + def normal( + self, + loc: _FloatLike_co = 0.0, + scale: _FloatLike_co = 1.0, + size: None = None, + ) -> float: ... # type: ignore[misc] + @overload + def normal( + self, + loc: _ArrayLikeFloat_co = 0.0, + scale: _ArrayLikeFloat_co = 1.0, + size: _ShapeLike | None = None, + ) -> NDArray[float64]: ... + @overload + def standard_gamma( # type: ignore[misc] + self, + shape: _FloatLike_co, + size: None = None, + dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ..., + out: None = None, + ) -> float: ... + @overload + def standard_gamma( + self, + shape: _ArrayLikeFloat_co, + size: _ShapeLike | None = None, + ) -> NDArray[float64]: ... + @overload + def standard_gamma( + self, + shape: _ArrayLikeFloat_co, + *, + out: NDArray[float64] | None = None, + ) -> NDArray[float64]: ... + @overload + def standard_gamma( + self, + shape: _ArrayLikeFloat_co, + size: _ShapeLike | None = None, + dtype: _DTypeLikeFloat32 = ..., + out: NDArray[float32] | None = None, + ) -> NDArray[float32]: ... + @overload + def standard_gamma( + self, + shape: _ArrayLikeFloat_co, + size: _ShapeLike | None = None, + dtype: _DTypeLikeFloat64 = ..., + out: NDArray[float64] | None = None, + ) -> NDArray[float64]: ... + @overload + def gamma( + self, shape: _FloatLike_co, scale: _FloatLike_co = 1.0, size: None = None + ) -> float: ... # type: ignore[misc] + @overload + def gamma( + self, + shape: _ArrayLikeFloat_co, + scale: _ArrayLikeFloat_co = 1.0, + size: _ShapeLike | None = None, + ) -> NDArray[float64]: ... + @overload + def f( + self, dfnum: _FloatLike_co, dfden: _FloatLike_co, size: None = None + ) -> float: ... # type: ignore[misc] + @overload + def f( + self, + dfnum: _ArrayLikeFloat_co, + dfden: _ArrayLikeFloat_co, + size: _ShapeLike | None = None + ) -> NDArray[float64]: ... + @overload + def noncentral_f( + self, + dfnum: _FloatLike_co, + dfden: _FloatLike_co, + nonc: _FloatLike_co, + size: None = None, + ) -> float: ... # type: ignore[misc] + @overload + def noncentral_f( + self, + dfnum: _ArrayLikeFloat_co, + dfden: _ArrayLikeFloat_co, + nonc: _ArrayLikeFloat_co, + size: _ShapeLike | None = None, + ) -> NDArray[float64]: ... + @overload + def chisquare(self, df: _FloatLike_co, size: None = None) -> float: ... # type: ignore[misc] + @overload + def chisquare( + self, df: _ArrayLikeFloat_co, size: _ShapeLike | None = None + ) -> NDArray[float64]: ... + @overload + def noncentral_chisquare( + self, df: _FloatLike_co, nonc: _FloatLike_co, size: None = None + ) -> float: ... # type: ignore[misc] + @overload + def noncentral_chisquare( + self, + df: _ArrayLikeFloat_co, + nonc: _ArrayLikeFloat_co, + size: _ShapeLike | None = None + ) -> NDArray[float64]: ... + @overload + def standard_t(self, df: _FloatLike_co, size: None = None) -> float: ... # type: ignore[misc] + @overload + def standard_t( + self, df: _ArrayLikeFloat_co, size: None = None + ) -> NDArray[float64]: ... + @overload + def standard_t( + self, df: _ArrayLikeFloat_co, size: _ShapeLike | None = None + ) -> NDArray[float64]: ... + @overload + def vonmises( + self, mu: _FloatLike_co, kappa: _FloatLike_co, size: None = None + ) -> float: ... # type: ignore[misc] + @overload + def vonmises( + self, + mu: _ArrayLikeFloat_co, + kappa: _ArrayLikeFloat_co, + size: _ShapeLike | None = None + ) -> NDArray[float64]: ... + @overload + def pareto(self, a: _FloatLike_co, size: None = None) -> float: ... # type: ignore[misc] + @overload + def pareto( + self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None + ) -> NDArray[float64]: ... + @overload + def weibull(self, a: _FloatLike_co, size: None = None) -> float: ... # type: ignore[misc] + @overload + def weibull( + self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None + ) -> NDArray[float64]: ... + @overload + def power(self, a: _FloatLike_co, size: None = None) -> float: ... # type: ignore[misc] + @overload + def power( + self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None + ) -> NDArray[float64]: ... + @overload + def standard_cauchy(self, size: None = None) -> float: ... # type: ignore[misc] + @overload + def standard_cauchy(self, size: _ShapeLike | None = None) -> NDArray[float64]: ... + @overload + def laplace( + self, + loc: _FloatLike_co = 0.0, + scale: _FloatLike_co = 1.0, + size: None = None, + ) -> float: ... # type: ignore[misc] + @overload + def laplace( + self, + loc: _ArrayLikeFloat_co = 0.0, + scale: _ArrayLikeFloat_co = 1.0, + size: _ShapeLike | None = None, + ) -> NDArray[float64]: ... + @overload + def gumbel( + self, + loc: _FloatLike_co = 0.0, + scale: _FloatLike_co = 1.0, + size: None = None, + ) -> float: ... # type: ignore[misc] + @overload + def gumbel( + self, + loc: _ArrayLikeFloat_co = 0.0, + scale: _ArrayLikeFloat_co = 1.0, + size: _ShapeLike | None = None, + ) -> NDArray[float64]: ... + @overload + def logistic( + self, + loc: _FloatLike_co = 0.0, + scale: _FloatLike_co = 1.0, + size: None = None, + ) -> float: ... # type: ignore[misc] + @overload + def logistic( + self, + loc: _ArrayLikeFloat_co = 0.0, + scale: _ArrayLikeFloat_co = 1.0, + size: _ShapeLike | None = None, + ) -> NDArray[float64]: ... + @overload + def lognormal( + self, + mean: _FloatLike_co = 0.0, + sigma: _FloatLike_co = 1.0, + size: None = None, + ) -> float: ... # type: ignore[misc] + @overload + def lognormal( + self, + mean: _ArrayLikeFloat_co = 0.0, + sigma: _ArrayLikeFloat_co = 1.0, + size: _ShapeLike | None = None, + ) -> NDArray[float64]: ... + @overload + def rayleigh(self, scale: _FloatLike_co = 1.0, size: None = None) -> float: ... # type: ignore[misc] + @overload + def rayleigh( + self, scale: _ArrayLikeFloat_co = 1.0, size: _ShapeLike | None = None + ) -> NDArray[float64]: ... + @overload + def wald( + self, mean: _FloatLike_co, scale: _FloatLike_co, size: None = None + ) -> float: ... # type: ignore[misc] + @overload + def wald( + self, + mean: _ArrayLikeFloat_co, + scale: _ArrayLikeFloat_co, + size: _ShapeLike | None = None + ) -> NDArray[float64]: ... + @overload + def triangular( + self, + left: _FloatLike_co, + mode: _FloatLike_co, + right: _FloatLike_co, + size: None = None, + ) -> float: ... # type: ignore[misc] + @overload + def triangular( + self, + left: _ArrayLikeFloat_co, + mode: _ArrayLikeFloat_co, + right: _ArrayLikeFloat_co, + size: _ShapeLike | None = None, + ) -> NDArray[float64]: ... + @overload + def binomial(self, n: int, p: _FloatLike_co, size: None = None) -> int: ... # type: ignore[misc] + @overload + def binomial( + self, n: _ArrayLikeInt_co, p: _ArrayLikeFloat_co, size: _ShapeLike | None = None + ) -> NDArray[int64]: ... + @overload + def negative_binomial( + self, n: _FloatLike_co, p: _FloatLike_co, size: None = None + ) -> int: ... # type: ignore[misc] + @overload + def negative_binomial( + self, + n: _ArrayLikeFloat_co, + p: _ArrayLikeFloat_co, + size: _ShapeLike | None = None + ) -> NDArray[int64]: ... + @overload + def poisson(self, lam: _FloatLike_co = 1.0, size: None = None) -> int: ... # type: ignore[misc] + @overload + def poisson( + self, lam: _ArrayLikeFloat_co = 1.0, size: _ShapeLike | None = None + ) -> NDArray[int64]: ... + @overload + def zipf(self, a: _FloatLike_co, size: None = None) -> int: ... # type: ignore[misc] + @overload + def zipf( + self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None + ) -> NDArray[int64]: ... + @overload + def geometric(self, p: _FloatLike_co, size: None = None) -> int: ... # type: ignore[misc] + @overload + def geometric( + self, p: _ArrayLikeFloat_co, size: _ShapeLike | None = None + ) -> NDArray[int64]: ... + @overload + def hypergeometric( + self, ngood: int, nbad: int, nsample: int, size: None = None + ) -> int: ... # type: ignore[misc] + @overload + def hypergeometric( + self, + ngood: _ArrayLikeInt_co, + nbad: _ArrayLikeInt_co, + nsample: _ArrayLikeInt_co, + size: _ShapeLike | None = None, + ) -> NDArray[int64]: ... + @overload + def logseries(self, p: _FloatLike_co, size: None = None) -> int: ... # type: ignore[misc] + @overload + def logseries( + self, p: _ArrayLikeFloat_co, size: _ShapeLike | None = None + ) -> NDArray[int64]: ... + def multivariate_normal( + self, + mean: _ArrayLikeFloat_co, + cov: _ArrayLikeFloat_co, + size: _ShapeLike | None = None, + check_valid: Literal["warn", "raise", "ignore"] = "warn", + tol: float = 1e-8, + *, + method: Literal["svd", "eigh", "cholesky"] = "svd", + ) -> NDArray[float64]: ... + def multinomial( + self, n: _ArrayLikeInt_co, + pvals: _ArrayLikeFloat_co, + size: _ShapeLike | None = None + ) -> NDArray[int64]: ... + def multivariate_hypergeometric( + self, + colors: _ArrayLikeInt_co, + nsample: int, + size: _ShapeLike | None = None, + method: Literal["marginals", "count"] = "marginals", + ) -> NDArray[int64]: ... + def dirichlet( + self, alpha: _ArrayLikeFloat_co, size: _ShapeLike | None = None + ) -> NDArray[float64]: ... + def permuted( + self, x: ArrayLike, *, axis: int | None = None, out: NDArray[Any] | None = None + ) -> NDArray[Any]: ... + + # axis must be 0 for MutableSequence + @overload + def shuffle(self, /, x: np.ndarray, axis: int = 0) -> None: ... + @overload + def shuffle(self, /, x: MutableSequence[Any], axis: Literal[0] = 0) -> None: ... + +def default_rng( + seed: _ArrayLikeInt_co | SeedSequence | BitGenerator | Generator | RandomState | None = None +) -> Generator: ... diff --git a/local_packages/numpy/random/_mt19937.cp312-win_amd64.lib b/local_packages/numpy/random/_mt19937.cp312-win_amd64.lib new file mode 100644 index 0000000..160b1cf Binary files /dev/null and b/local_packages/numpy/random/_mt19937.cp312-win_amd64.lib differ diff --git a/local_packages/numpy/random/_mt19937.cp312-win_amd64.pyd b/local_packages/numpy/random/_mt19937.cp312-win_amd64.pyd new file mode 100644 index 0000000..e969edb Binary files /dev/null and b/local_packages/numpy/random/_mt19937.cp312-win_amd64.pyd differ diff --git a/local_packages/numpy/random/_mt19937.pyi b/local_packages/numpy/random/_mt19937.pyi new file mode 100644 index 0000000..03373a6 --- /dev/null +++ b/local_packages/numpy/random/_mt19937.pyi @@ -0,0 +1,27 @@ +from typing import TypedDict, type_check_only + +from numpy import uint32 +from numpy._typing import _ArrayLikeInt_co +from numpy.random.bit_generator import BitGenerator, SeedSequence +from numpy.typing import NDArray + +__all__ = ["MT19937"] + +@type_check_only +class _MT19937Internal(TypedDict): + key: NDArray[uint32] + pos: int + +@type_check_only +class _MT19937State(TypedDict): + bit_generator: str + state: _MT19937Internal + +class MT19937(BitGenerator): + def __init__(self, seed: _ArrayLikeInt_co | SeedSequence | None = ...) -> None: ... + def _legacy_seeding(self, seed: _ArrayLikeInt_co) -> None: ... + def jumped(self, jumps: int = 1) -> MT19937: ... + @property # type: ignore[override] + def state(self) -> _MT19937State: ... + @state.setter + def state(self, value: _MT19937State) -> None: ... diff --git a/local_packages/numpy/random/_pcg64.cp312-win_amd64.lib b/local_packages/numpy/random/_pcg64.cp312-win_amd64.lib new file mode 100644 index 0000000..26152ee Binary files /dev/null and b/local_packages/numpy/random/_pcg64.cp312-win_amd64.lib differ diff --git a/local_packages/numpy/random/_pcg64.cp312-win_amd64.pyd b/local_packages/numpy/random/_pcg64.cp312-win_amd64.pyd new file mode 100644 index 0000000..f7ddb6a Binary files /dev/null and b/local_packages/numpy/random/_pcg64.cp312-win_amd64.pyd differ diff --git a/local_packages/numpy/random/_pcg64.pyi b/local_packages/numpy/random/_pcg64.pyi new file mode 100644 index 0000000..a9e81f7 --- /dev/null +++ b/local_packages/numpy/random/_pcg64.pyi @@ -0,0 +1,41 @@ +from typing import TypedDict, type_check_only + +from numpy._typing import _ArrayLikeInt_co +from numpy.random.bit_generator import BitGenerator, SeedSequence + +__all__ = ["PCG64"] + +@type_check_only +class _PCG64Internal(TypedDict): + state: int + inc: int + +@type_check_only +class _PCG64State(TypedDict): + bit_generator: str + state: _PCG64Internal + has_uint32: int + uinteger: int + +class PCG64(BitGenerator): + def __init__(self, seed: _ArrayLikeInt_co | SeedSequence | None = ...) -> None: ... + def jumped(self, jumps: int = 1) -> PCG64: ... + @property # type: ignore[override] + def state( + self, + ) -> _PCG64State: ... + @state.setter + def state( + self, + value: _PCG64State, + ) -> None: ... + def advance(self, delta: int) -> PCG64: ... + +class PCG64DXSM(BitGenerator): + def __init__(self, seed: _ArrayLikeInt_co | SeedSequence | None = ...) -> None: ... + def jumped(self, jumps: int = 1) -> PCG64DXSM: ... + @property # type: ignore[override] + def state(self) -> _PCG64State: ... + @state.setter + def state(self, value: _PCG64State) -> None: ... + def advance(self, delta: int) -> PCG64DXSM: ... diff --git a/local_packages/numpy/random/_philox.cp312-win_amd64.lib b/local_packages/numpy/random/_philox.cp312-win_amd64.lib new file mode 100644 index 0000000..cf13d4e Binary files /dev/null and b/local_packages/numpy/random/_philox.cp312-win_amd64.lib differ diff --git a/local_packages/numpy/random/_philox.cp312-win_amd64.pyd b/local_packages/numpy/random/_philox.cp312-win_amd64.pyd new file mode 100644 index 0000000..06493de Binary files /dev/null and b/local_packages/numpy/random/_philox.cp312-win_amd64.pyd differ diff --git a/local_packages/numpy/random/_philox.pyi b/local_packages/numpy/random/_philox.pyi new file mode 100644 index 0000000..3089f11 --- /dev/null +++ b/local_packages/numpy/random/_philox.pyi @@ -0,0 +1,36 @@ +from typing import TypedDict, type_check_only + +from numpy import uint64 +from numpy._typing import _ArrayLikeInt_co +from numpy.random.bit_generator import BitGenerator, SeedSequence +from numpy.typing import NDArray + +__all__ = ["Philox"] + +@type_check_only +class _PhiloxInternal(TypedDict): + counter: NDArray[uint64] + key: NDArray[uint64] + +@type_check_only +class _PhiloxState(TypedDict): + bit_generator: str + state: _PhiloxInternal + buffer: NDArray[uint64] + buffer_pos: int + has_uint32: int + uinteger: int + +class Philox(BitGenerator): + def __init__( + self, + seed: _ArrayLikeInt_co | SeedSequence | None = ..., + counter: _ArrayLikeInt_co | None = ..., + key: _ArrayLikeInt_co | None = ..., + ) -> None: ... + @property # type: ignore[override] + def state(self) -> _PhiloxState: ... + @state.setter + def state(self, value: _PhiloxState) -> None: ... + def jumped(self, jumps: int = 1) -> Philox: ... + def advance(self, delta: int) -> Philox: ... diff --git a/local_packages/numpy/random/_pickle.py b/local_packages/numpy/random/_pickle.py new file mode 100644 index 0000000..05f7232 --- /dev/null +++ b/local_packages/numpy/random/_pickle.py @@ -0,0 +1,88 @@ +from ._generator import Generator +from ._mt19937 import MT19937 +from ._pcg64 import PCG64, PCG64DXSM +from ._philox import Philox +from ._sfc64 import SFC64 +from .bit_generator import BitGenerator +from .mtrand import RandomState + +BitGenerators = {'MT19937': MT19937, + 'PCG64': PCG64, + 'PCG64DXSM': PCG64DXSM, + 'Philox': Philox, + 'SFC64': SFC64, + } + + +def __bit_generator_ctor(bit_generator: str | type[BitGenerator] = 'MT19937'): + """ + Pickling helper function that returns a bit generator object + + Parameters + ---------- + bit_generator : type[BitGenerator] or str + BitGenerator class or string containing the name of the BitGenerator + + Returns + ------- + BitGenerator + BitGenerator instance + """ + if isinstance(bit_generator, type): + bit_gen_class = bit_generator + elif bit_generator in BitGenerators: + bit_gen_class = BitGenerators[bit_generator] + else: + raise ValueError( + str(bit_generator) + ' is not a known BitGenerator module.' + ) + + return bit_gen_class() + + +def __generator_ctor(bit_generator_name="MT19937", + bit_generator_ctor=__bit_generator_ctor): + """ + Pickling helper function that returns a Generator object + + Parameters + ---------- + bit_generator_name : str or BitGenerator + String containing the core BitGenerator's name or a + BitGenerator instance + bit_generator_ctor : callable, optional + Callable function that takes bit_generator_name as its only argument + and returns an instantized bit generator. + + Returns + ------- + rg : Generator + Generator using the named core BitGenerator + """ + if isinstance(bit_generator_name, BitGenerator): + return Generator(bit_generator_name) + # Legacy path that uses a bit generator name and ctor + return Generator(bit_generator_ctor(bit_generator_name)) + + +def __randomstate_ctor(bit_generator_name="MT19937", + bit_generator_ctor=__bit_generator_ctor): + """ + Pickling helper function that returns a legacy RandomState-like object + + Parameters + ---------- + bit_generator_name : str + String containing the core BitGenerator's name + bit_generator_ctor : callable, optional + Callable function that takes bit_generator_name as its only argument + and returns an instantized bit generator. + + Returns + ------- + rs : RandomState + Legacy RandomState using the named core BitGenerator + """ + if isinstance(bit_generator_name, BitGenerator): + return RandomState(bit_generator_name) + return RandomState(bit_generator_ctor(bit_generator_name)) diff --git a/local_packages/numpy/random/_pickle.pyi b/local_packages/numpy/random/_pickle.pyi new file mode 100644 index 0000000..b8b1b7b --- /dev/null +++ b/local_packages/numpy/random/_pickle.pyi @@ -0,0 +1,43 @@ +from collections.abc import Callable +from typing import Final, Literal, TypedDict, TypeVar, overload, type_check_only + +from numpy.random._generator import Generator +from numpy.random._mt19937 import MT19937 +from numpy.random._pcg64 import PCG64, PCG64DXSM +from numpy.random._philox import Philox +from numpy.random._sfc64 import SFC64 +from numpy.random.bit_generator import BitGenerator +from numpy.random.mtrand import RandomState + +_T = TypeVar("_T", bound=BitGenerator) + +@type_check_only +class _BitGenerators(TypedDict): + MT19937: type[MT19937] + PCG64: type[PCG64] + PCG64DXSM: type[PCG64DXSM] + Philox: type[Philox] + SFC64: type[SFC64] + +BitGenerators: Final[_BitGenerators] = ... + +@overload +def __bit_generator_ctor(bit_generator: Literal["MT19937"] = "MT19937") -> MT19937: ... +@overload +def __bit_generator_ctor(bit_generator: Literal["PCG64"]) -> PCG64: ... +@overload +def __bit_generator_ctor(bit_generator: Literal["PCG64DXSM"]) -> PCG64DXSM: ... +@overload +def __bit_generator_ctor(bit_generator: Literal["Philox"]) -> Philox: ... +@overload +def __bit_generator_ctor(bit_generator: Literal["SFC64"]) -> SFC64: ... +@overload +def __bit_generator_ctor(bit_generator: type[_T]) -> _T: ... +def __generator_ctor( + bit_generator_name: str | type[BitGenerator] | BitGenerator = "MT19937", + bit_generator_ctor: Callable[[str | type[BitGenerator]], BitGenerator] = ..., +) -> Generator: ... +def __randomstate_ctor( + bit_generator_name: str | type[BitGenerator] | BitGenerator = "MT19937", + bit_generator_ctor: Callable[[str | type[BitGenerator]], BitGenerator] = ..., +) -> RandomState: ... diff --git a/local_packages/numpy/random/_sfc64.cp312-win_amd64.lib b/local_packages/numpy/random/_sfc64.cp312-win_amd64.lib new file mode 100644 index 0000000..f94a5af Binary files /dev/null and b/local_packages/numpy/random/_sfc64.cp312-win_amd64.lib differ diff --git a/local_packages/numpy/random/_sfc64.cp312-win_amd64.pyd b/local_packages/numpy/random/_sfc64.cp312-win_amd64.pyd new file mode 100644 index 0000000..b33a20b Binary files /dev/null and b/local_packages/numpy/random/_sfc64.cp312-win_amd64.pyd differ diff --git a/local_packages/numpy/random/_sfc64.pyi b/local_packages/numpy/random/_sfc64.pyi new file mode 100644 index 0000000..f5f3fed --- /dev/null +++ b/local_packages/numpy/random/_sfc64.pyi @@ -0,0 +1,25 @@ +from typing import TypedDict, type_check_only + +from numpy import uint64 +from numpy._typing import NDArray, _ArrayLikeInt_co +from numpy.random.bit_generator import BitGenerator, SeedSequence + +__all__ = ["SFC64"] + +@type_check_only +class _SFC64Internal(TypedDict): + state: NDArray[uint64] + +@type_check_only +class _SFC64State(TypedDict): + bit_generator: str + state: _SFC64Internal + has_uint32: int + uinteger: int + +class SFC64(BitGenerator): + def __init__(self, seed: _ArrayLikeInt_co | SeedSequence | None = ...) -> None: ... + @property # type: ignore[override] + def state(self) -> _SFC64State: ... + @state.setter + def state(self, value: _SFC64State) -> None: ... diff --git a/local_packages/numpy/random/bit_generator.cp312-win_amd64.lib b/local_packages/numpy/random/bit_generator.cp312-win_amd64.lib new file mode 100644 index 0000000..6d12af7 Binary files /dev/null and b/local_packages/numpy/random/bit_generator.cp312-win_amd64.lib differ diff --git a/local_packages/numpy/random/bit_generator.cp312-win_amd64.pyd b/local_packages/numpy/random/bit_generator.cp312-win_amd64.pyd new file mode 100644 index 0000000..f685e67 Binary files /dev/null and b/local_packages/numpy/random/bit_generator.cp312-win_amd64.pyd differ diff --git a/local_packages/numpy/random/bit_generator.pxd b/local_packages/numpy/random/bit_generator.pxd new file mode 100644 index 0000000..4ba18f1 --- /dev/null +++ b/local_packages/numpy/random/bit_generator.pxd @@ -0,0 +1,40 @@ +cimport numpy as np +from libc.stdint cimport uint32_t, uint64_t + +cdef extern from "numpy/random/bitgen.h": + struct bitgen: + void *state + uint64_t (*next_uint64)(void *st) nogil + uint32_t (*next_uint32)(void *st) nogil + double (*next_double)(void *st) nogil + uint64_t (*next_raw)(void *st) nogil + + ctypedef bitgen bitgen_t + +cdef class BitGenerator(): + cdef readonly object _seed_seq + cdef readonly object lock + cdef bitgen_t _bitgen + cdef readonly object _ctypes + cdef readonly object _cffi + cdef readonly object capsule + + +cdef class SeedSequence(): + cdef readonly object entropy + cdef readonly tuple spawn_key + cdef readonly Py_ssize_t pool_size + cdef readonly object pool + cdef readonly uint32_t n_children_spawned + + cdef mix_entropy(self, np.ndarray[np.npy_uint32, ndim=1] mixer, + np.ndarray[np.npy_uint32, ndim=1] entropy_array) + cdef get_assembled_entropy(self) + +cdef class SeedlessSeedSequence: + pass + +# NOTE: This has no implementation and should not be used. It purely exists for +# backwards compatibility, see https://github.com/scipy/scipy/issues/24215. +cdef class SeedlessSequence: + pass diff --git a/local_packages/numpy/random/bit_generator.pyi b/local_packages/numpy/random/bit_generator.pyi new file mode 100644 index 0000000..ee4499d --- /dev/null +++ b/local_packages/numpy/random/bit_generator.pyi @@ -0,0 +1,123 @@ +import abc +from _typeshed import Incomplete +from collections.abc import Callable, Mapping, Sequence +from threading import Lock +from typing import ( + Any, + ClassVar, + Literal, + NamedTuple, + Self, + TypeAlias, + TypedDict, + overload, + type_check_only, +) +from typing_extensions import CapsuleType + +import numpy as np +from numpy._typing import ( + NDArray, + _ArrayLikeInt_co, + _DTypeLike, + _ShapeLike, + _UInt32Codes, + _UInt64Codes, +) + +__all__ = ["BitGenerator", "SeedSequence"] + +### + +_DTypeLikeUint_: TypeAlias = _DTypeLike[np.uint32 | np.uint64] | _UInt32Codes | _UInt64Codes + +@type_check_only +class _SeedSeqState(TypedDict): + entropy: int | Sequence[int] | None + spawn_key: tuple[int, ...] + pool_size: int + n_children_spawned: int + +@type_check_only +class _Interface(NamedTuple): + state_address: Incomplete + state: Incomplete + next_uint64: Incomplete + next_uint32: Incomplete + next_double: Incomplete + bit_generator: Incomplete + +@type_check_only +class _CythonMixin: + def __setstate_cython__(self, pyx_state: object, /) -> None: ... + def __reduce_cython__(self) -> Any: ... # noqa: ANN401 + +@type_check_only +class _GenerateStateMixin(_CythonMixin): + def generate_state(self, /, n_words: int, dtype: _DTypeLikeUint_ = ...) -> NDArray[np.uint32 | np.uint64]: ... + +### + +class ISeedSequence(abc.ABC): + @abc.abstractmethod + def generate_state(self, /, n_words: int, dtype: _DTypeLikeUint_ = ...) -> NDArray[np.uint32 | np.uint64]: ... + +class ISpawnableSeedSequence(ISeedSequence, abc.ABC): + @abc.abstractmethod + def spawn(self, /, n_children: int) -> list[Self]: ... + +class SeedlessSeedSequence(_GenerateStateMixin, ISpawnableSeedSequence): + def spawn(self, /, n_children: int) -> list[Self]: ... + +class SeedSequence(_GenerateStateMixin, ISpawnableSeedSequence): + __pyx_vtable__: ClassVar[CapsuleType] = ... + + entropy: int | Sequence[int] | None + spawn_key: tuple[int, ...] + pool_size: int + n_children_spawned: int + pool: NDArray[np.uint32] + + def __init__( + self, + /, + entropy: _ArrayLikeInt_co | None = None, + *, + spawn_key: Sequence[int] = (), + pool_size: int = 4, + n_children_spawned: int = ..., + ) -> None: ... + def spawn(self, /, n_children: int) -> list[Self]: ... + @property + def state(self) -> _SeedSeqState: ... + +class BitGenerator(_CythonMixin, abc.ABC): + lock: Lock + @property + def state(self) -> Mapping[str, Any]: ... + @state.setter + def state(self, value: Mapping[str, Any], /) -> None: ... + @property + def seed_seq(self) -> ISeedSequence: ... + @property + def ctypes(self) -> _Interface: ... + @property + def cffi(self) -> _Interface: ... + @property + def capsule(self) -> CapsuleType: ... + + # + def __init__(self, /, seed: _ArrayLikeInt_co | SeedSequence | None = None) -> None: ... + def __reduce__(self) -> tuple[Callable[[str], Self], tuple[str], tuple[Mapping[str, Any], ISeedSequence]]: ... + def spawn(self, /, n_children: int) -> list[Self]: ... + def _benchmark(self, /, cnt: int, method: str = "uint64") -> None: ... + + # + @overload + def random_raw(self, /, size: None = None, output: Literal[True] = True) -> int: ... + @overload + def random_raw(self, /, size: _ShapeLike, output: Literal[True] = True) -> NDArray[np.uint64]: ... + @overload + def random_raw(self, /, size: _ShapeLike | None, output: Literal[False]) -> None: ... + @overload + def random_raw(self, /, size: _ShapeLike | None = None, *, output: Literal[False]) -> None: ... diff --git a/local_packages/numpy/random/c_distributions.pxd b/local_packages/numpy/random/c_distributions.pxd new file mode 100644 index 0000000..da790ca --- /dev/null +++ b/local_packages/numpy/random/c_distributions.pxd @@ -0,0 +1,119 @@ +#cython: wraparound=False, nonecheck=False, boundscheck=False, cdivision=True, language_level=3 +from numpy cimport npy_intp + +from libc.stdint cimport (uint64_t, int32_t, int64_t) +from numpy.random cimport bitgen_t + +cdef extern from "numpy/random/distributions.h": + + struct s_binomial_t: + int has_binomial + double psave + int64_t nsave + double r + double q + double fm + int64_t m + double p1 + double xm + double xl + double xr + double c + double laml + double lamr + double p2 + double p3 + double p4 + + ctypedef s_binomial_t binomial_t + + float random_standard_uniform_f(bitgen_t *bitgen_state) nogil + double random_standard_uniform(bitgen_t *bitgen_state) nogil + void random_standard_uniform_fill(bitgen_t* bitgen_state, npy_intp cnt, double *out) nogil + void random_standard_uniform_fill_f(bitgen_t *bitgen_state, npy_intp cnt, float *out) nogil + + double random_standard_exponential(bitgen_t *bitgen_state) nogil + float random_standard_exponential_f(bitgen_t *bitgen_state) nogil + void random_standard_exponential_fill(bitgen_t *bitgen_state, npy_intp cnt, double *out) nogil + void random_standard_exponential_fill_f(bitgen_t *bitgen_state, npy_intp cnt, float *out) nogil + void random_standard_exponential_inv_fill(bitgen_t *bitgen_state, npy_intp cnt, double *out) nogil + void random_standard_exponential_inv_fill_f(bitgen_t *bitgen_state, npy_intp cnt, float *out) nogil + + double random_standard_normal(bitgen_t* bitgen_state) nogil + float random_standard_normal_f(bitgen_t *bitgen_state) nogil + void random_standard_normal_fill(bitgen_t *bitgen_state, npy_intp count, double *out) nogil + void random_standard_normal_fill_f(bitgen_t *bitgen_state, npy_intp count, float *out) nogil + double random_standard_gamma(bitgen_t *bitgen_state, double shape) nogil + float random_standard_gamma_f(bitgen_t *bitgen_state, float shape) nogil + + float random_standard_uniform_f(bitgen_t *bitgen_state) nogil + void random_standard_uniform_fill_f(bitgen_t* bitgen_state, npy_intp cnt, float *out) nogil + float random_standard_normal_f(bitgen_t* bitgen_state) nogil + float random_standard_gamma_f(bitgen_t *bitgen_state, float shape) nogil + + int64_t random_positive_int64(bitgen_t *bitgen_state) nogil + int32_t random_positive_int32(bitgen_t *bitgen_state) nogil + int64_t random_positive_int(bitgen_t *bitgen_state) nogil + uint64_t random_uint(bitgen_t *bitgen_state) nogil + + double random_normal(bitgen_t *bitgen_state, double loc, double scale) nogil + + double random_gamma(bitgen_t *bitgen_state, double shape, double scale) nogil + float random_gamma_f(bitgen_t *bitgen_state, float shape, float scale) nogil + + double random_exponential(bitgen_t *bitgen_state, double scale) nogil + double random_uniform(bitgen_t *bitgen_state, double lower, double range) nogil + double random_beta(bitgen_t *bitgen_state, double a, double b) nogil + double random_chisquare(bitgen_t *bitgen_state, double df) nogil + double random_f(bitgen_t *bitgen_state, double dfnum, double dfden) nogil + double random_standard_cauchy(bitgen_t *bitgen_state) nogil + double random_pareto(bitgen_t *bitgen_state, double a) nogil + double random_weibull(bitgen_t *bitgen_state, double a) nogil + double random_power(bitgen_t *bitgen_state, double a) nogil + double random_laplace(bitgen_t *bitgen_state, double loc, double scale) nogil + double random_gumbel(bitgen_t *bitgen_state, double loc, double scale) nogil + double random_logistic(bitgen_t *bitgen_state, double loc, double scale) nogil + double random_lognormal(bitgen_t *bitgen_state, double mean, double sigma) nogil + double random_rayleigh(bitgen_t *bitgen_state, double mode) nogil + double random_standard_t(bitgen_t *bitgen_state, double df) nogil + double random_noncentral_chisquare(bitgen_t *bitgen_state, double df, + double nonc) nogil + double random_noncentral_f(bitgen_t *bitgen_state, double dfnum, + double dfden, double nonc) nogil + double random_wald(bitgen_t *bitgen_state, double mean, double scale) nogil + double random_vonmises(bitgen_t *bitgen_state, double mu, double kappa) nogil + double random_triangular(bitgen_t *bitgen_state, double left, double mode, + double right) nogil + + int64_t random_poisson(bitgen_t *bitgen_state, double lam) nogil + int64_t random_negative_binomial(bitgen_t *bitgen_state, double n, double p) nogil + int64_t random_binomial(bitgen_t *bitgen_state, double p, int64_t n, binomial_t *binomial) nogil + int64_t random_logseries(bitgen_t *bitgen_state, double p) nogil + int64_t random_geometric_search(bitgen_t *bitgen_state, double p) nogil + int64_t random_geometric_inversion(bitgen_t *bitgen_state, double p) nogil + int64_t random_geometric(bitgen_t *bitgen_state, double p) nogil + int64_t random_zipf(bitgen_t *bitgen_state, double a) nogil + int64_t random_hypergeometric(bitgen_t *bitgen_state, int64_t good, int64_t bad, + int64_t sample) nogil + + uint64_t random_interval(bitgen_t *bitgen_state, uint64_t max) nogil + + # Generate random uint64 numbers in closed interval [off, off + rng]. + uint64_t random_bounded_uint64(bitgen_t *bitgen_state, + uint64_t off, uint64_t rng, + uint64_t mask, bint use_masked) nogil + + void random_multinomial(bitgen_t *bitgen_state, int64_t n, int64_t *mnix, + double *pix, npy_intp d, binomial_t *binomial) nogil + + int random_multivariate_hypergeometric_count(bitgen_t *bitgen_state, + int64_t total, + size_t num_colors, int64_t *colors, + int64_t nsample, + size_t num_variates, int64_t *variates) nogil + void random_multivariate_hypergeometric_marginals(bitgen_t *bitgen_state, + int64_t total, + size_t num_colors, int64_t *colors, + int64_t nsample, + size_t num_variates, int64_t *variates) nogil + diff --git a/local_packages/numpy/random/lib/npyrandom.lib b/local_packages/numpy/random/lib/npyrandom.lib new file mode 100644 index 0000000..325bc60 Binary files /dev/null and b/local_packages/numpy/random/lib/npyrandom.lib differ diff --git a/local_packages/numpy/random/mtrand.cp312-win_amd64.lib b/local_packages/numpy/random/mtrand.cp312-win_amd64.lib new file mode 100644 index 0000000..579f395 Binary files /dev/null and b/local_packages/numpy/random/mtrand.cp312-win_amd64.lib differ diff --git a/local_packages/numpy/random/mtrand.cp312-win_amd64.pyd b/local_packages/numpy/random/mtrand.cp312-win_amd64.pyd new file mode 100644 index 0000000..fd7be96 Binary files /dev/null and b/local_packages/numpy/random/mtrand.cp312-win_amd64.pyd differ diff --git a/local_packages/numpy/random/mtrand.pyi b/local_packages/numpy/random/mtrand.pyi new file mode 100644 index 0000000..c20d351 --- /dev/null +++ b/local_packages/numpy/random/mtrand.pyi @@ -0,0 +1,759 @@ +import builtins +from collections.abc import Callable +from typing import Any, Literal, overload + +import numpy as np +from numpy import ( + dtype, + float64, + int8, + int16, + int32, + int64, + int_, + long, + uint, + uint8, + uint16, + uint32, + uint64, + ulong, +) +from numpy._typing import ( + ArrayLike, + NDArray, + _ArrayLikeFloat_co, + _ArrayLikeInt_co, + _DTypeLikeBool, + _Int8Codes, + _Int16Codes, + _Int32Codes, + _Int64Codes, + _IntCodes, + _LongCodes, + _ShapeLike, + _SupportsDType, + _UInt8Codes, + _UInt16Codes, + _UInt32Codes, + _UInt64Codes, + _UIntCodes, + _ULongCodes, +) +from numpy.random.bit_generator import BitGenerator + +__all__ = [ + "RandomState", + "beta", + "binomial", + "bytes", + "chisquare", + "choice", + "dirichlet", + "exponential", + "f", + "gamma", + "geometric", + "get_bit_generator", + "get_state", + "gumbel", + "hypergeometric", + "laplace", + "logistic", + "lognormal", + "logseries", + "multinomial", + "multivariate_normal", + "negative_binomial", + "noncentral_chisquare", + "noncentral_f", + "normal", + "pareto", + "permutation", + "poisson", + "power", + "rand", + "randint", + "randn", + "random", + "random_integers", + "random_sample", + "ranf", + "rayleigh", + "sample", + "seed", + "set_bit_generator", + "set_state", + "shuffle", + "standard_cauchy", + "standard_exponential", + "standard_gamma", + "standard_normal", + "standard_t", + "triangular", + "uniform", + "vonmises", + "wald", + "weibull", + "zipf", +] + +class RandomState: + _bit_generator: BitGenerator + def __init__(self, seed: _ArrayLikeInt_co | BitGenerator | None = ...) -> None: ... + def __repr__(self) -> str: ... + def __str__(self) -> str: ... + def __getstate__(self) -> dict[str, Any]: ... + def __setstate__(self, state: dict[str, Any]) -> None: ... + def __reduce__(self) -> tuple[Callable[[BitGenerator], RandomState], tuple[BitGenerator], dict[str, Any]]: ... # noqa: E501 + def seed(self, seed: _ArrayLikeFloat_co | None = None) -> None: ... + @overload + def get_state(self, legacy: Literal[False] = False) -> dict[str, Any]: ... + @overload + def get_state( + self, legacy: Literal[True] = True + ) -> dict[str, Any] | tuple[str, NDArray[uint32], int, int, float]: ... + def set_state( + self, state: dict[str, Any] | tuple[str, NDArray[uint32], int, int, float] + ) -> None: ... + @overload + def random_sample(self, size: None = None) -> float: ... # type: ignore[misc] + @overload + def random_sample(self, size: _ShapeLike) -> NDArray[float64]: ... + @overload + def random(self, size: None = None) -> float: ... # type: ignore[misc] + @overload + def random(self, size: _ShapeLike) -> NDArray[float64]: ... + @overload + def beta(self, a: float, b: float, size: None = None) -> float: ... # type: ignore[misc] + @overload + def beta( + self, + a: _ArrayLikeFloat_co, + b: _ArrayLikeFloat_co, + size: _ShapeLike | None = None + ) -> NDArray[float64]: ... + @overload + def exponential(self, scale: float = 1.0, size: None = None) -> float: ... # type: ignore[misc] + @overload + def exponential( + self, scale: _ArrayLikeFloat_co = 1.0, size: _ShapeLike | None = None + ) -> NDArray[float64]: ... + @overload + def standard_exponential(self, size: None = None) -> float: ... # type: ignore[misc] + @overload + def standard_exponential(self, size: _ShapeLike) -> NDArray[float64]: ... + @overload + def tomaxint(self, size: None = None) -> int: ... # type: ignore[misc] + @overload + # Generates long values, but stores it in a 64bit int: + def tomaxint(self, size: _ShapeLike) -> NDArray[int64]: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: int | None = None, + size: None = None, + ) -> int: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: int | None = None, + size: None = None, + dtype: type[bool] = ..., + ) -> bool: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: int | None = None, + size: None = None, + dtype: type[np.bool] = ..., + ) -> np.bool: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: int | None = None, + size: None = None, + dtype: type[int] = ..., + ) -> int: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: int | None = None, + size: None = None, + dtype: dtype[uint8] | type[uint8] | _UInt8Codes | _SupportsDType[dtype[uint8]] = ..., # noqa: E501 + ) -> uint8: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: int | None = None, + size: None = None, + dtype: dtype[uint16] | type[uint16] | _UInt16Codes | _SupportsDType[dtype[uint16]] = ..., # noqa: E501 + ) -> uint16: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: int | None = None, + size: None = None, + dtype: dtype[uint32] | type[uint32] | _UInt32Codes | _SupportsDType[dtype[uint32]] = ..., # noqa: E501 + ) -> uint32: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: int | None = None, + size: None = None, + dtype: dtype[uint] | type[uint] | _UIntCodes | _SupportsDType[dtype[uint]] = ..., # noqa: E501 + ) -> uint: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: int | None = None, + size: None = None, + dtype: dtype[ulong] | type[ulong] | _ULongCodes | _SupportsDType[dtype[ulong]] = ..., # noqa: E501 + ) -> ulong: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: int | None = None, + size: None = None, + dtype: dtype[uint64] | type[uint64] | _UInt64Codes | _SupportsDType[dtype[uint64]] = ..., # noqa: E501 + ) -> uint64: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: int | None = None, + size: None = None, + dtype: dtype[int8] | type[int8] | _Int8Codes | _SupportsDType[dtype[int8]] = ..., # noqa: E501 + ) -> int8: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: int | None = None, + size: None = None, + dtype: dtype[int16] | type[int16] | _Int16Codes | _SupportsDType[dtype[int16]] = ..., # noqa: E501 + ) -> int16: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: int | None = None, + size: None = None, + dtype: dtype[int32] | type[int32] | _Int32Codes | _SupportsDType[dtype[int32]] = ..., # noqa: E501 + ) -> int32: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: int | None = None, + size: None = None, + dtype: dtype[int_] | type[int_] | _IntCodes | _SupportsDType[dtype[int_]] = ..., # noqa: E501 + ) -> int_: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: int | None = None, + size: None = None, + dtype: dtype[long] | type[long] | _LongCodes | _SupportsDType[dtype[long]] = ..., # noqa: E501 + ) -> long: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: int | None = None, + size: None = None, + dtype: dtype[int64] | type[int64] | _Int64Codes | _SupportsDType[dtype[int64]] = ..., # noqa: E501 + ) -> int64: ... + @overload + def randint( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + ) -> NDArray[long]: ... + @overload + def randint( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + dtype: _DTypeLikeBool = ..., + ) -> NDArray[np.bool]: ... + @overload + def randint( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + dtype: dtype[int8] | type[int8] | _Int8Codes | _SupportsDType[dtype[int8]] = ..., # noqa: E501 + ) -> NDArray[int8]: ... + @overload + def randint( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + dtype: dtype[int16] | type[int16] | _Int16Codes | _SupportsDType[dtype[int16]] = ..., # noqa: E501 + ) -> NDArray[int16]: ... + @overload + def randint( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + dtype: dtype[int32] | type[int32] | _Int32Codes | _SupportsDType[dtype[int32]] = ..., # noqa: E501 + ) -> NDArray[int32]: ... + @overload + def randint( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + dtype: dtype[int64] | type[int64] | _Int64Codes | _SupportsDType[dtype[int64]] | None = ..., # noqa: E501 + ) -> NDArray[int64]: ... + @overload + def randint( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + dtype: dtype[uint8] | type[uint8] | _UInt8Codes | _SupportsDType[dtype[uint8]] = ..., # noqa: E501 + ) -> NDArray[uint8]: ... + @overload + def randint( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + dtype: dtype[uint16] | type[uint16] | _UInt16Codes | _SupportsDType[dtype[uint16]] = ..., # noqa: E501 + ) -> NDArray[uint16]: ... + @overload + def randint( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + dtype: dtype[uint32] | type[uint32] | _UInt32Codes | _SupportsDType[dtype[uint32]] = ..., # noqa: E501 + ) -> NDArray[uint32]: ... + @overload + def randint( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + dtype: dtype[uint64] | type[uint64] | _UInt64Codes | _SupportsDType[dtype[uint64]] = ..., # noqa: E501 + ) -> NDArray[uint64]: ... + @overload + def randint( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + dtype: dtype[long] | type[int] | type[long] | _LongCodes | _SupportsDType[dtype[long]] = ..., # noqa: E501 + ) -> NDArray[long]: ... + @overload + def randint( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + dtype: dtype[ulong] | type[ulong] | _ULongCodes | _SupportsDType[dtype[ulong]] = ..., # noqa: E501 + ) -> NDArray[ulong]: ... + def bytes(self, length: int) -> builtins.bytes: ... + @overload + def choice( + self, + a: int, + size: None = None, + replace: bool = True, + p: _ArrayLikeFloat_co | None = None, + ) -> int: ... + @overload + def choice( + self, + a: int, + size: _ShapeLike | None = None, + replace: bool = True, + p: _ArrayLikeFloat_co | None = None, + ) -> NDArray[long]: ... + @overload + def choice( + self, + a: ArrayLike, + size: None = None, + replace: bool = True, + p: _ArrayLikeFloat_co | None = None, + ) -> Any: ... + @overload + def choice( + self, + a: ArrayLike, + size: _ShapeLike | None = None, + replace: bool = True, + p: _ArrayLikeFloat_co | None = None, + ) -> NDArray[Any]: ... + @overload + def uniform( + self, low: float = 0.0, high: float = 1.0, size: None = None + ) -> float: ... # type: ignore[misc] + @overload + def uniform( + self, + low: _ArrayLikeFloat_co = 0.0, + high: _ArrayLikeFloat_co = 1.0, + size: _ShapeLike | None = None, + ) -> NDArray[float64]: ... + @overload + def rand(self) -> float: ... + @overload + def rand(self, *args: int) -> NDArray[float64]: ... + @overload + def randn(self) -> float: ... + @overload + def randn(self, *args: int) -> NDArray[float64]: ... + @overload + def random_integers( + self, low: int, high: int | None = None, size: None = None + ) -> int: ... # type: ignore[misc] + @overload + def random_integers( + self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + ) -> NDArray[long]: ... + @overload + def standard_normal(self, size: None = None) -> float: ... # type: ignore[misc] + @overload + def standard_normal( # type: ignore[misc] + self, size: _ShapeLike | None = None + ) -> NDArray[float64]: ... + @overload + def normal( + self, loc: float = 0.0, scale: float = 1.0, size: None = None + ) -> float: ... # type: ignore[misc] + @overload + def normal( + self, + loc: _ArrayLikeFloat_co = 0.0, + scale: _ArrayLikeFloat_co = 1.0, + size: _ShapeLike | None = None, + ) -> NDArray[float64]: ... + @overload + def standard_gamma( # type: ignore[misc] + self, + shape: float, + size: None = None, + ) -> float: ... + @overload + def standard_gamma( + self, + shape: _ArrayLikeFloat_co, + size: _ShapeLike | None = None, + ) -> NDArray[float64]: ... + @overload + def gamma(self, shape: float, scale: float = 1.0, size: None = None) -> float: ... # type: ignore[misc] + @overload + def gamma( + self, + shape: _ArrayLikeFloat_co, + scale: _ArrayLikeFloat_co = 1.0, + size: _ShapeLike | None = None, + ) -> NDArray[float64]: ... + @overload + def f(self, dfnum: float, dfden: float, size: None = None) -> float: ... # type: ignore[misc] + @overload + def f( + self, + dfnum: _ArrayLikeFloat_co, + dfden: _ArrayLikeFloat_co, + size: _ShapeLike | None = None + ) -> NDArray[float64]: ... + @overload + def noncentral_f( + self, dfnum: float, dfden: float, nonc: float, size: None = None + ) -> float: ... # type: ignore[misc] + @overload + def noncentral_f( + self, + dfnum: _ArrayLikeFloat_co, + dfden: _ArrayLikeFloat_co, + nonc: _ArrayLikeFloat_co, + size: _ShapeLike | None = None, + ) -> NDArray[float64]: ... + @overload + def chisquare(self, df: float, size: None = None) -> float: ... # type: ignore[misc] + @overload + def chisquare( + self, df: _ArrayLikeFloat_co, size: _ShapeLike | None = None + ) -> NDArray[float64]: ... + @overload + def noncentral_chisquare( + self, df: float, nonc: float, size: None = None + ) -> float: ... # type: ignore[misc] + @overload + def noncentral_chisquare( + self, + df: _ArrayLikeFloat_co, + nonc: _ArrayLikeFloat_co, + size: _ShapeLike | None = None + ) -> NDArray[float64]: ... + @overload + def standard_t(self, df: float, size: None = None) -> float: ... # type: ignore[misc] + @overload + def standard_t( + self, df: _ArrayLikeFloat_co, size: None = None + ) -> NDArray[float64]: ... + @overload + def standard_t( + self, df: _ArrayLikeFloat_co, size: _ShapeLike | None = None + ) -> NDArray[float64]: ... + @overload + def vonmises(self, mu: float, kappa: float, size: None = None) -> float: ... # type: ignore[misc] + @overload + def vonmises( + self, + mu: _ArrayLikeFloat_co, + kappa: _ArrayLikeFloat_co, + size: _ShapeLike | None = None + ) -> NDArray[float64]: ... + @overload + def pareto(self, a: float, size: None = None) -> float: ... # type: ignore[misc] + @overload + def pareto( + self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None + ) -> NDArray[float64]: ... + @overload + def weibull(self, a: float, size: None = None) -> float: ... # type: ignore[misc] + @overload + def weibull( + self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None + ) -> NDArray[float64]: ... + @overload + def power(self, a: float, size: None = None) -> float: ... # type: ignore[misc] + @overload + def power( + self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None + ) -> NDArray[float64]: ... + @overload + def standard_cauchy(self, size: None = None) -> float: ... # type: ignore[misc] + @overload + def standard_cauchy(self, size: _ShapeLike | None = None) -> NDArray[float64]: ... + @overload + def laplace( + self, loc: float = 0.0, scale: float = 1.0, size: None = None + ) -> float: ... # type: ignore[misc] + @overload + def laplace( + self, + loc: _ArrayLikeFloat_co = 0.0, + scale: _ArrayLikeFloat_co = 1.0, + size: _ShapeLike | None = None, + ) -> NDArray[float64]: ... + @overload + def gumbel( + self, loc: float = 0.0, scale: float = 1.0, size: None = None + ) -> float: ... # type: ignore[misc] + @overload + def gumbel( + self, + loc: _ArrayLikeFloat_co = 0.0, + scale: _ArrayLikeFloat_co = 1.0, + size: _ShapeLike | None = None, + ) -> NDArray[float64]: ... + @overload + def logistic( + self, loc: float = 0.0, scale: float = 1.0, size: None = None + ) -> float: ... # type: ignore[misc] + @overload + def logistic( + self, + loc: _ArrayLikeFloat_co = 0.0, + scale: _ArrayLikeFloat_co = 1.0, + size: _ShapeLike | None = None, + ) -> NDArray[float64]: ... + @overload + def lognormal( + self, mean: float = 0.0, sigma: float = 1.0, size: None = None + ) -> float: ... # type: ignore[misc] + @overload + def lognormal( + self, + mean: _ArrayLikeFloat_co = 0.0, + sigma: _ArrayLikeFloat_co = 1.0, + size: _ShapeLike | None = None, + ) -> NDArray[float64]: ... + @overload + def rayleigh(self, scale: float = 1.0, size: None = None) -> float: ... # type: ignore[misc] + @overload + def rayleigh( + self, scale: _ArrayLikeFloat_co = 1.0, size: _ShapeLike | None = None + ) -> NDArray[float64]: ... + @overload + def wald(self, mean: float, scale: float, size: None = None) -> float: ... # type: ignore[misc] + @overload + def wald( + self, + mean: _ArrayLikeFloat_co, + scale: _ArrayLikeFloat_co, + size: _ShapeLike | None = None + ) -> NDArray[float64]: ... + @overload + def triangular( + self, left: float, mode: float, right: float, size: None = None + ) -> float: ... # type: ignore[misc] + @overload + def triangular( + self, + left: _ArrayLikeFloat_co, + mode: _ArrayLikeFloat_co, + right: _ArrayLikeFloat_co, + size: _ShapeLike | None = None, + ) -> NDArray[float64]: ... + @overload + def binomial( + self, n: int, p: float, size: None = None + ) -> int: ... # type: ignore[misc] + @overload + def binomial( + self, n: _ArrayLikeInt_co, p: _ArrayLikeFloat_co, size: _ShapeLike | None = None + ) -> NDArray[long]: ... + @overload + def negative_binomial( + self, n: float, p: float, size: None = None + ) -> int: ... # type: ignore[misc] + @overload + def negative_binomial( + self, + n: _ArrayLikeFloat_co, + p: _ArrayLikeFloat_co, + size: _ShapeLike | None = None + ) -> NDArray[long]: ... + @overload + def poisson( + self, lam: float = 1.0, size: None = None + ) -> int: ... # type: ignore[misc] + @overload + def poisson( + self, lam: _ArrayLikeFloat_co = 1.0, size: _ShapeLike | None = None + ) -> NDArray[long]: ... + @overload + def zipf(self, a: float, size: None = None) -> int: ... # type: ignore[misc] + @overload + def zipf( + self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = None + ) -> NDArray[long]: ... + @overload + def geometric(self, p: float, size: None = None) -> int: ... # type: ignore[misc] + @overload + def geometric( + self, p: _ArrayLikeFloat_co, size: _ShapeLike | None = None + ) -> NDArray[long]: ... + @overload + def hypergeometric( + self, ngood: int, nbad: int, nsample: int, size: None = None + ) -> int: ... # type: ignore[misc] + @overload + def hypergeometric( + self, + ngood: _ArrayLikeInt_co, + nbad: _ArrayLikeInt_co, + nsample: _ArrayLikeInt_co, + size: _ShapeLike | None = None, + ) -> NDArray[long]: ... + @overload + def logseries(self, p: float, size: None = None) -> int: ... # type: ignore[misc] + @overload + def logseries( + self, p: _ArrayLikeFloat_co, size: _ShapeLike | None = None + ) -> NDArray[long]: ... + def multivariate_normal( + self, + mean: _ArrayLikeFloat_co, + cov: _ArrayLikeFloat_co, + size: _ShapeLike | None = None, + check_valid: Literal["warn", "raise", "ignore"] = "warn", + tol: float = 1e-8, + ) -> NDArray[float64]: ... + def multinomial( + self, n: _ArrayLikeInt_co, + pvals: _ArrayLikeFloat_co, + size: _ShapeLike | None = None + ) -> NDArray[long]: ... + def dirichlet( + self, alpha: _ArrayLikeFloat_co, size: _ShapeLike | None = None + ) -> NDArray[float64]: ... + def shuffle(self, x: ArrayLike) -> None: ... + @overload + def permutation(self, x: int) -> NDArray[long]: ... + @overload + def permutation(self, x: ArrayLike) -> NDArray[Any]: ... + +_rand: RandomState + +beta = _rand.beta +binomial = _rand.binomial +bytes = _rand.bytes +chisquare = _rand.chisquare +choice = _rand.choice +dirichlet = _rand.dirichlet +exponential = _rand.exponential +f = _rand.f +gamma = _rand.gamma +get_state = _rand.get_state +geometric = _rand.geometric +gumbel = _rand.gumbel +hypergeometric = _rand.hypergeometric +laplace = _rand.laplace +logistic = _rand.logistic +lognormal = _rand.lognormal +logseries = _rand.logseries +multinomial = _rand.multinomial +multivariate_normal = _rand.multivariate_normal +negative_binomial = _rand.negative_binomial +noncentral_chisquare = _rand.noncentral_chisquare +noncentral_f = _rand.noncentral_f +normal = _rand.normal +pareto = _rand.pareto +permutation = _rand.permutation +poisson = _rand.poisson +power = _rand.power +rand = _rand.rand +randint = _rand.randint +randn = _rand.randn +random = _rand.random +random_integers = _rand.random_integers +random_sample = _rand.random_sample +rayleigh = _rand.rayleigh +seed = _rand.seed +set_state = _rand.set_state +shuffle = _rand.shuffle +standard_cauchy = _rand.standard_cauchy +standard_exponential = _rand.standard_exponential +standard_gamma = _rand.standard_gamma +standard_normal = _rand.standard_normal +standard_t = _rand.standard_t +triangular = _rand.triangular +uniform = _rand.uniform +vonmises = _rand.vonmises +wald = _rand.wald +weibull = _rand.weibull +zipf = _rand.zipf +# Two legacy that are trivial wrappers around random_sample +sample = _rand.random_sample +ranf = _rand.random_sample + +def set_bit_generator(bitgen: BitGenerator) -> None: ... + +def get_bit_generator() -> BitGenerator: ... diff --git a/local_packages/numpy/random/tests/__init__.py b/local_packages/numpy/random/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/local_packages/numpy/random/tests/data/__init__.py b/local_packages/numpy/random/tests/data/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/local_packages/numpy/random/tests/data/generator_pcg64_np121.pkl.gz b/local_packages/numpy/random/tests/data/generator_pcg64_np121.pkl.gz new file mode 100644 index 0000000..b7ad03d Binary files /dev/null and b/local_packages/numpy/random/tests/data/generator_pcg64_np121.pkl.gz differ diff --git a/local_packages/numpy/random/tests/data/generator_pcg64_np126.pkl.gz b/local_packages/numpy/random/tests/data/generator_pcg64_np126.pkl.gz new file mode 100644 index 0000000..6c5130b Binary files /dev/null and b/local_packages/numpy/random/tests/data/generator_pcg64_np126.pkl.gz differ diff --git a/local_packages/numpy/random/tests/data/mt19937-testset-1.csv b/local_packages/numpy/random/tests/data/mt19937-testset-1.csv new file mode 100644 index 0000000..b97bfa6 --- /dev/null +++ b/local_packages/numpy/random/tests/data/mt19937-testset-1.csv @@ -0,0 +1,1001 @@ +seed, 0xdeadbeaf +0, 0xc816921f +1, 0xb3623c6d +2, 0x5fa391bb +3, 0x40178d9 +4, 0x7dcc9811 +5, 0x548eb8e6 +6, 0x92ba3125 +7, 0x65fde68d +8, 0x2f81ec95 +9, 0xbd94f7a2 +10, 0xdc4d9bcc +11, 0xa672bf13 +12, 0xb41113e +13, 0xec7e0066 +14, 0x50239372 +15, 0xd9d66b1d +16, 0xab72a161 +17, 0xddc2e29f +18, 0x7ea29ab4 +19, 0x80d141ba +20, 0xb1c7edf1 +21, 0x44d29203 +22, 0xe224d98 +23, 0x5b3e9d26 +24, 0x14fd567c +25, 0x27d98c96 +26, 0x838779fc +27, 0x92a138a +28, 0x5d08965b +29, 0x531e0ad6 +30, 0x984ee8f4 +31, 0x1ed78539 +32, 0x32bd6d8d +33, 0xc37c8516 +34, 0x9aef5c6b +35, 0x3aacd139 +36, 0xd96ed154 +37, 0x489cd1ed +38, 0x2cba4b3b +39, 0x76c6ae72 +40, 0x2dae02b9 +41, 0x52ac5fd6 +42, 0xc2b5e265 +43, 0x630e6a28 +44, 0x3f560d5d +45, 0x9315bdf3 +46, 0xf1055aba +47, 0x840e42c6 +48, 0xf2099c6b +49, 0x15ff7696 +50, 0x7948d146 +51, 0x97342961 +52, 0x7a7a21c +53, 0xc66f4fb1 +54, 0x23c4103e +55, 0xd7321f98 +56, 0xeb7efb75 +57, 0xe02490b5 +58, 0x2aa02de +59, 0x8bee0bf7 +60, 0xfc2da059 +61, 0xae835034 +62, 0x678f2075 +63, 0x6d03094b +64, 0x56455e05 +65, 0x18b32373 +66, 0x8ff0356b +67, 0x1fe442fb +68, 0x3f1ab6c3 +69, 0xb6fd21b +70, 0xfc310eb2 +71, 0xb19e9a4d +72, 0x17ddee72 +73, 0xfd534251 +74, 0x9e500564 +75, 0x9013a036 +76, 0xcf08f118 +77, 0x6b6d5969 +78, 0x3ccf1977 +79, 0x7cc11497 +80, 0x651c6ac9 +81, 0x4d6b104b +82, 0x9a28314e +83, 0x14c237be +84, 0x9cfc8d52 +85, 0x2947fad5 +86, 0xd71eff49 +87, 0x5188730e +88, 0x4b894614 +89, 0xf4fa2a34 +90, 0x42f7cc69 +91, 0x4089c9e8 +92, 0xbf0bbfe4 +93, 0x3cea65c +94, 0xc6221207 +95, 0x1bb71a8f +96, 0x54843fe7 +97, 0xbc59de4c +98, 0x79c6ee64 +99, 0x14e57a26 +100, 0x68d88fe +101, 0x2b86ef64 +102, 0x8ffff3c1 +103, 0x5bdd573f +104, 0x85671813 +105, 0xefe32ca2 +106, 0x105ded1e +107, 0x90ca2769 +108, 0xb33963ac +109, 0x363fbbc3 +110, 0x3b3763ae +111, 0x1d50ab88 +112, 0xc9ec01eb +113, 0xc8bbeada +114, 0x5d704692 +115, 0x5fd9e40 +116, 0xe61c125 +117, 0x2fe05792 +118, 0xda8afb72 +119, 0x4cbaa653 +120, 0xdd2243df +121, 0x896fd3f5 +122, 0x5bc23db +123, 0xa1c4e807 +124, 0x57d1a24d +125, 0x66503ddc +126, 0xcf7c0838 +127, 0x19e034fc +128, 0x66807450 +129, 0xfc219b3b +130, 0xe8a843e7 +131, 0x9ce61f08 +132, 0x92b950d6 +133, 0xce955ec4 +134, 0xda0d1f0d +135, 0x960c6250 +136, 0x39552432 +137, 0xde845e84 +138, 0xff3b4b11 +139, 0x5d918e6f +140, 0xbb930df2 +141, 0x7cfb0993 +142, 0x5400e1e9 +143, 0x3bfa0954 +144, 0x7e2605fb +145, 0x11941591 +146, 0x887e6994 +147, 0xdc8bed45 +148, 0x45b3fb50 +149, 0xfbdf8358 +150, 0x41507468 +151, 0x34c87166 +152, 0x17f64d77 +153, 0x3bbaf4f8 +154, 0x4f26f37e +155, 0x4a56ebf2 +156, 0x81100f1 +157, 0x96d94eae +158, 0xca88fda5 +159, 0x2eef3a60 +160, 0x952afbd3 +161, 0x2bec88c7 +162, 0x52335c4b +163, 0x8296db8e +164, 0x4da7d00a +165, 0xc00ac899 +166, 0xadff8c72 +167, 0xbecf26cf +168, 0x8835c83c +169, 0x1d13c804 +170, 0xaa940ddc +171, 0x68222cfe +172, 0x4569c0e1 +173, 0x29077976 +174, 0x32d4a5af +175, 0xd31fcdef +176, 0xdc60682b +177, 0x7c95c368 +178, 0x75a70213 +179, 0x43021751 +180, 0x5e52e0a6 +181, 0xf7e190b5 +182, 0xee3e4bb +183, 0x2fe3b150 +184, 0xcf419c07 +185, 0x478a4570 +186, 0xe5c3ea50 +187, 0x417f30a8 +188, 0xf0cfdaa0 +189, 0xd1f7f738 +190, 0x2c70fc23 +191, 0x54fc89f9 +192, 0x444dcf01 +193, 0xec2a002d +194, 0xef0c3a88 +195, 0xde21be9 +196, 0x88ab3296 +197, 0x3028897c +198, 0x264b200b +199, 0xd8ae0706 +200, 0x9eef901a +201, 0xbd1b96e0 +202, 0xea71366c +203, 0x1465b694 +204, 0x5a794650 +205, 0x83df52d4 +206, 0x8262413d +207, 0x5bc148c0 +208, 0xe0ecd80c +209, 0x40649571 +210, 0xb4d2ee5f +211, 0xedfd7d09 +212, 0xa082e25f +213, 0xc62992d1 +214, 0xbc7e65ee +215, 0x5499cf8a +216, 0xac28f775 +217, 0x649840fb +218, 0xd4c54805 +219, 0x1d166ba6 +220, 0xbeb1171f +221, 0x45b66703 +222, 0x78c03349 +223, 0x38d2a6ff +224, 0x935cae8b +225, 0x1d07dc3f +226, 0x6c1ed365 +227, 0x579fc585 +228, 0x1320c0ec +229, 0x632757eb +230, 0xd265a397 +231, 0x70e9b6c2 +232, 0xc81e322c +233, 0xa27153cf +234, 0x2118ba19 +235, 0x514ec400 +236, 0x2bd0ecd6 +237, 0xc3e7dae3 +238, 0xfa39355e +239, 0x48f23cc1 +240, 0xbcf75948 +241, 0x53ccc70c +242, 0x75346423 +243, 0x951181e0 +244, 0x348e90df +245, 0x14365d7f +246, 0xfbc95d7a +247, 0xdc98a9e6 +248, 0xed202df7 +249, 0xa59ec913 +250, 0x6b6e9ae2 +251, 0x1697f265 +252, 0x15d322d0 +253, 0xa2e7ee0a +254, 0x88860b7e +255, 0x455d8b9d +256, 0x2f5c59cb +257, 0xac49c9f1 +258, 0xa6a6a039 +259, 0xc057f56b +260, 0xf1ff1208 +261, 0x5eb8dc9d +262, 0xe6702509 +263, 0xe238b0ed +264, 0x5ae32e3d +265, 0xa88ebbdf +266, 0xef885ae7 +267, 0xafa6d49b +268, 0xc94499e0 +269, 0x1a196325 +270, 0x88938da3 +271, 0x14f4345 +272, 0xd8e33637 +273, 0xa3551bd5 +274, 0x73fe35c7 +275, 0x9561e94b +276, 0xd673bf68 +277, 0x16134872 +278, 0x68c42f9f +279, 0xdf7574c8 +280, 0x8809bab9 +281, 0x1432cf69 +282, 0xafb66bf1 +283, 0xc184aa7b +284, 0xedbf2007 +285, 0xbd420ce1 +286, 0x761033a0 +287, 0xff7e351f +288, 0xd6c3780e +289, 0x5844416f +290, 0xc6c0ee1c +291, 0xd2e147db +292, 0x92ac601a +293, 0x393e846b +294, 0x18196cca +295, 0x54a22be +296, 0x32bab1c4 +297, 0x60365183 +298, 0x64fa342 +299, 0xca24a493 +300, 0xd8cc8b83 +301, 0x3faf102b +302, 0x6e09bb58 +303, 0x812f0ea +304, 0x592c95d8 +305, 0xe45ea4c5 +306, 0x23aebf83 +307, 0xbd9691d4 +308, 0xf47b4baa +309, 0x4ac7b487 +310, 0xcce18803 +311, 0x3377556e +312, 0x3ff8e6b6 +313, 0x99d22063 +314, 0x23250bec +315, 0x4e1f9861 +316, 0x8554249b +317, 0x8635c2fc +318, 0xe8426e8a +319, 0x966c29d8 +320, 0x270b6082 +321, 0x3180a8a1 +322, 0xe7e1668b +323, 0x7f868dc +324, 0xcf4c17cf +325, 0xe31de4d1 +326, 0xc8c8aff4 +327, 0xae8db704 +328, 0x3c928cc2 +329, 0xe12cd48 +330, 0xb33ecd04 +331, 0xb93d7cbe +332, 0x49c69d6a +333, 0x7d3bce64 +334, 0x86bc219 +335, 0x8408233b +336, 0x44dc7479 +337, 0xdf80d538 +338, 0xf3db02c3 +339, 0xbbbd31d7 +340, 0x121281f +341, 0x7521e9a3 +342, 0x8859675a +343, 0x75aa6502 +344, 0x430ed15b +345, 0xecf0a28d +346, 0x659774fd +347, 0xd58a2311 +348, 0x512389a9 +349, 0xff65e1ff +350, 0xb6ddf222 +351, 0xe3458895 +352, 0x8b13cd6e +353, 0xd4a22870 +354, 0xe604c50c +355, 0x27f54f26 +356, 0x8f7f422f +357, 0x9735b4cf +358, 0x414072b0 +359, 0x76a1c6d5 +360, 0xa2208c06 +361, 0x83cd0f61 +362, 0x6c4f7ead +363, 0x6553cf76 +364, 0xeffcf44 +365, 0x7f434a3f +366, 0x9dc364bd +367, 0x3cdf52ed +368, 0xad597594 +369, 0x9c3e211b +370, 0x6c04a33f +371, 0x885dafa6 +372, 0xbbdaca71 +373, 0x7ae5dd5c +374, 0x37675644 +375, 0x251853c6 +376, 0x130b086b +377, 0x143fa54b +378, 0x54cdc282 +379, 0x9faff5b3 +380, 0x502a5c8b +381, 0xd9524550 +382, 0xae221aa6 +383, 0x55cf759b +384, 0x24782da4 +385, 0xd715d815 +386, 0x250ea09a +387, 0x4e0744ac +388, 0x11e15814 +389, 0xabe5f9df +390, 0xc8146350 +391, 0xfba67d9b +392, 0x2b82e42f +393, 0xd4ea96fc +394, 0x5ffc179e +395, 0x1598bafe +396, 0x7fb6d662 +397, 0x1a12a0db +398, 0x450cee4a +399, 0x85f8e12 +400, 0xce71b594 +401, 0xd4bb1d19 +402, 0x968f379d +403, 0x54cc1d52 +404, 0x467e6066 +405, 0x7da5f9a9 +406, 0x70977034 +407, 0x49e65c4b +408, 0xd08570d1 +409, 0x7acdf60b +410, 0xdffa038b +411, 0x9ce14e4c +412, 0x107cbbf8 +413, 0xdd746ca0 +414, 0xc6370a46 +415, 0xe7f83312 +416, 0x373fa9ce +417, 0xd822a2c6 +418, 0x1d4efea6 +419, 0xc53dcadb +420, 0x9b4e898f +421, 0x71daa6bf +422, 0x7a0bc78b +423, 0xd7b86f50 +424, 0x1b8b3286 +425, 0xcf9425dd +426, 0xd5263220 +427, 0x4ea0b647 +428, 0xc767fe64 +429, 0xcfc5e67 +430, 0xcc6a2942 +431, 0xa51eff00 +432, 0x76092e1b +433, 0xf606e80f +434, 0x824b5e20 +435, 0xebb55e14 +436, 0x783d96a6 +437, 0x10696512 +438, 0x17ee510a +439, 0x3ab70a1f +440, 0xcce6b210 +441, 0x8f72f0fb +442, 0xf0610b41 +443, 0x83d01fb5 +444, 0x6b3de36 +445, 0xe4c2e84f +446, 0x9c43bb15 +447, 0xddf2905 +448, 0x7dd63556 +449, 0x3662ca09 +450, 0xfb81f35b +451, 0xc2c8a72a +452, 0x8e93c37 +453, 0xa93da2d4 +454, 0xa03af8f1 +455, 0x8d75159a +456, 0x15f010b0 +457, 0xa296ab06 +458, 0xe55962ba +459, 0xeae700a9 +460, 0xe388964a +461, 0x917f2bec +462, 0x1c203fea +463, 0x792a01ba +464, 0xa93a80ac +465, 0x9eb8a197 +466, 0x56c0bc73 +467, 0xb8f05799 +468, 0xf429a8c8 +469, 0xb92cee42 +470, 0xf8864ec +471, 0x62f2518a +472, 0x3a7bfa3e +473, 0x12e56e6d +474, 0xd7a18313 +475, 0x41fa3899 +476, 0xa09c4956 +477, 0xebcfd94a +478, 0xc485f90b +479, 0x4391ce40 +480, 0x742a3333 +481, 0xc932f9e5 +482, 0x75c6c263 +483, 0x80937f0 +484, 0xcf21833c +485, 0x16027520 +486, 0xd42e669f +487, 0xb0f01fb7 +488, 0xb35896f1 +489, 0x763737a9 +490, 0x1bb20209 +491, 0x3551f189 +492, 0x56bc2602 +493, 0xb6eacf4 +494, 0x42ec4d11 +495, 0x245cc68 +496, 0xc27ac43b +497, 0x9d903466 +498, 0xce3f0c05 +499, 0xb708c31c +500, 0xc0fd37eb +501, 0x95938b2c +502, 0xf20175a7 +503, 0x4a86ee9b +504, 0xbe039a58 +505, 0xd41cabe7 +506, 0x83bc99ba +507, 0x761d60e1 +508, 0x7737cc2e +509, 0x2b82fc4b +510, 0x375aa401 +511, 0xfe9597a0 +512, 0x5543806a +513, 0x44f31238 +514, 0x7df31538 +515, 0x74cfa770 +516, 0x8755d881 +517, 0x1fde665a +518, 0xda8bf315 +519, 0x973d8e95 +520, 0x72205228 +521, 0x8fe59717 +522, 0x7bb90b34 +523, 0xef6ed945 +524, 0x16fd4a38 +525, 0x5db44de1 +526, 0xf09f93b3 +527, 0xe84824cc +528, 0x945bb50e +529, 0xd0be4aa5 +530, 0x47c277c2 +531, 0xd3800c28 +532, 0xac1c33ec +533, 0xd3dacce +534, 0x811c8387 +535, 0x6761b36 +536, 0x70d3882f +537, 0xd6e62e3a +538, 0xea25daa2 +539, 0xb07f39d1 +540, 0x391d89d7 +541, 0x84b6fb5e +542, 0x3dda3fca +543, 0x229e80a4 +544, 0x3d94a4b7 +545, 0x5d3d576a +546, 0xad7818a0 +547, 0xce23b03a +548, 0x7aa2079c +549, 0x9a6be555 +550, 0x83f3b34a +551, 0x1848f9d9 +552, 0xd8fefc1c +553, 0x48e6ce48 +554, 0x52e55750 +555, 0xf41a71cf +556, 0xba08e259 +557, 0xfaf06a15 +558, 0xeaaac0fb +559, 0x34f90098 +560, 0xb1dfffbb +561, 0x718daec2 +562, 0xab4dda21 +563, 0xd27cc1ee +564, 0x4aafbc4c +565, 0x356dfb4f +566, 0x83fcdfd6 +567, 0x8f0bcde0 +568, 0x4363f844 +569, 0xadc0f4d5 +570, 0x3bde994e +571, 0x3884d452 +572, 0x21876b4a +573, 0x9c985398 +574, 0xca55a226 +575, 0x3a88c583 +576, 0x916dc33c +577, 0x8f67d1d7 +578, 0x3b26a667 +579, 0xe4ddeb4b +580, 0x1a9d8c33 +581, 0x81c9b74f +582, 0x9ed1e9df +583, 0x6e61aecf +584, 0x95e95a5d +585, 0x68864ff5 +586, 0xb8fa5b9 +587, 0x72b1b3de +588, 0x5e18a86b +589, 0xd7f2337d +590, 0xd70e0925 +591, 0xb573a4c1 +592, 0xc77b3f8a +593, 0x389b20de +594, 0x16cf6afb +595, 0xa39bd275 +596, 0xf491cf01 +597, 0x6f88a802 +598, 0x8510af05 +599, 0xe7cd549a +600, 0x8603179a +601, 0xef43f191 +602, 0xf9b64c60 +603, 0xb00254a7 +604, 0xd7c06a2d +605, 0x17e9380b +606, 0x529e727b +607, 0xaaa8fe0a +608, 0xfb64ff4c +609, 0xcd75af26 +610, 0xfb717c87 +611, 0xa0789899 +612, 0x10391ec9 +613, 0x7e9b40b3 +614, 0x18536554 +615, 0x728c05f7 +616, 0x787dca98 +617, 0xad948d1 +618, 0x44c18def +619, 0x3303f2ec +620, 0xa15acb5 +621, 0xb58d38f4 +622, 0xfe041ef8 +623, 0xd151a956 +624, 0x7b9168e8 +625, 0x5ebeca06 +626, 0x90fe95df +627, 0xf76875aa +628, 0xb2e0d664 +629, 0x2e3253b7 +630, 0x68e34469 +631, 0x1f0c2d89 +632, 0x13a34ac2 +633, 0x5ffeb841 +634, 0xe381e91c +635, 0xb8549a92 +636, 0x3f35cf1 +637, 0xda0f9dcb +638, 0xdd9828a6 +639, 0xe1428f29 +640, 0xf4db80b5 +641, 0xdac30af5 +642, 0x1af1dd17 +643, 0x9a540254 +644, 0xcab68a38 +645, 0x33560361 +646, 0x2fbf3886 +647, 0xbc785923 +648, 0xe081cd10 +649, 0x8e473356 +650, 0xd102c357 +651, 0xeea4fe48 +652, 0x248d3453 +653, 0x1da79ac +654, 0x815a65ff +655, 0x27693e76 +656, 0xb7d5af40 +657, 0x6d245d30 +658, 0x9e06fa8f +659, 0xb0570dcb +660, 0x469f0005 +661, 0x3e0ca132 +662, 0xd89bbf3 +663, 0xd61ccd47 +664, 0x6383878 +665, 0x62b5956 +666, 0x4dc83675 +667, 0x93fd8492 +668, 0x5a0091f5 +669, 0xc9f9bc3 +670, 0xa26e7778 +671, 0xeabf2d01 +672, 0xe612dc06 +673, 0x85d89ff9 +674, 0xd1763179 +675, 0xcb88947b +676, 0x9e8757a5 +677, 0xe100e85c +678, 0x904166eb +679, 0x4996243d +680, 0x4038e1cb +681, 0x2be2c63d +682, 0x77017e81 +683, 0x3b1f556b +684, 0x1c785c77 +685, 0x6869b8bd +686, 0xe1217ed4 +687, 0x4012ab2f +688, 0xc06c0d8e +689, 0x2122eb68 +690, 0xad1783fd +691, 0x5f0c80e3 +692, 0x828f7efa +693, 0x29328399 +694, 0xeadf1087 +695, 0x85dc0037 +696, 0x9691ef26 +697, 0xc0947a53 +698, 0x2a178d2a +699, 0x2a2c7e8f +700, 0x90378380 +701, 0xaad8d326 +702, 0x9cf1c3c8 +703, 0x84eccd44 +704, 0x79e61808 +705, 0x8b3f454e +706, 0x209e6e1 +707, 0x51f88378 +708, 0xc210226f +709, 0xd982adb5 +710, 0x55d44a31 +711, 0x9817d443 +712, 0xa328c626 +713, 0x13455966 +714, 0xb8f681d3 +715, 0x2a3c713b +716, 0xc186959b +717, 0x814a74b0 +718, 0xed7bc90 +719, 0xa88d3d6d +720, 0x88a9f561 +721, 0x73aa1c0a +722, 0xdfeff404 +723, 0xec037e4b +724, 0xa5c209f0 +725, 0xb3a223b4 +726, 0x24ce3709 +727, 0x3184c790 +728, 0xa1398c62 +729, 0x2f92034e +730, 0xbb37a79a +731, 0x605287b4 +732, 0x8faa772c +733, 0x6ce56c1d +734, 0xc035fb4c +735, 0x7cf5b316 +736, 0x6502645 +737, 0xa283d810 +738, 0x778bc2f1 +739, 0xfdf99313 +740, 0x1f513265 +741, 0xbd3837e2 +742, 0x9b84a9a +743, 0x2139ce91 +744, 0x61a8e890 +745, 0xf9ff12db +746, 0xb43d2ea7 +747, 0x88532e61 +748, 0x175a6655 +749, 0x7a6c4f72 +750, 0x6dafc1b7 +751, 0x449b1459 +752, 0x514f654f +753, 0x9a6731e2 +754, 0x8632da43 +755, 0xc81b0422 +756, 0x81fe9005 +757, 0x15b79618 +758, 0xb5fa629f +759, 0x987a474f +760, 0x1c74f54e +761, 0xf9743232 +762, 0xec4b55f +763, 0x87d761e5 +764, 0xd1ad78b7 +765, 0x453d9350 +766, 0xc7a7d85 +767, 0xb2576ff5 +768, 0xcdde49b7 +769, 0x8e1f763e +770, 0x1338583e +771, 0xfd65b9dc +772, 0x4f19c4f4 +773, 0x3a52d73d +774, 0xd3509c4c +775, 0xda24fe31 +776, 0xe2de56ba +777, 0x2db5e540 +778, 0x23172734 +779, 0x4db572f +780, 0xeb941718 +781, 0x84c2649a +782, 0x3b1e5b6a +783, 0x4c9c61b9 +784, 0x3bccd11 +785, 0xb4d7b78e +786, 0x48580ae5 +787, 0xd273ab68 +788, 0x25c11615 +789, 0x470b53f6 +790, 0x329c2068 +791, 0x1693721b +792, 0xf8c9aacf +793, 0x4c3d5693 +794, 0xd778284e +795, 0xae1cb24f +796, 0x3c11d1b3 +797, 0xddd2b0c0 +798, 0x90269fa7 +799, 0x5666e0a2 +800, 0xf9f195a4 +801, 0x61d78eb2 +802, 0xada5a7c0 +803, 0xaa272fbe +804, 0xba3bae2f +805, 0xd0b70fc2 +806, 0x529f32b +807, 0xda7a3e21 +808, 0x9a776a20 +809, 0xb21f9635 +810, 0xb3acc14e +811, 0xac55f56 +812, 0x29dccf41 +813, 0x32dabdb3 +814, 0xaa032f58 +815, 0xfa406af4 +816, 0xce3c415d +817, 0xb44fb4d9 +818, 0x32248d1c +819, 0x680c6440 +820, 0xae2337b +821, 0x294cb597 +822, 0x5bca48fe +823, 0xaef19f40 +824, 0xad60406 +825, 0x4781f090 +826, 0xfd691ffc +827, 0xb6568268 +828, 0xa56c72cb +829, 0xf8a9e0fc +830, 0x9af4fd02 +831, 0x2cd30932 +832, 0x776cefd7 +833, 0xe31f476e +834, 0x6d94a437 +835, 0xb3cab598 +836, 0xf582d13f +837, 0x3bf8759d +838, 0xc3777dc +839, 0x5e425ea8 +840, 0x1c7ff4ed +841, 0x1c2e97d1 +842, 0xc062d2b4 +843, 0x46dc80e0 +844, 0xbcdb47e6 +845, 0x32282fe0 +846, 0xaba89063 +847, 0x5e94e9bb +848, 0x3e667f78 +849, 0xea6eb21a +850, 0xe56e54e8 +851, 0xa0383510 +852, 0x6768fe2b +853, 0xb53ac3e0 +854, 0x779569a0 +855, 0xeca83c6a +856, 0x24db4d2d +857, 0x4585f696 +858, 0xf84748b2 +859, 0xf6a4dd5b +860, 0x31fb524d +861, 0x67ab39fe +862, 0x5882a899 +863, 0x9a05fcf6 +864, 0x712b5674 +865, 0xe8c6958f +866, 0x4b448bb3 +867, 0x530b9abf +868, 0xb491f491 +869, 0x98352c62 +870, 0x2d0a50e3 +871, 0xeb4384da +872, 0x36246f07 +873, 0xcbc5c1a +874, 0xae24031d +875, 0x44d11ed6 +876, 0xf07f1608 +877, 0xf296aadd +878, 0x3bcfe3be +879, 0x8fa1e7df +880, 0xfd317a6e +881, 0xe4975c44 +882, 0x15205892 +883, 0xa762d4df +884, 0xf1167365 +885, 0x6811cc00 +886, 0x8315f23 +887, 0xe045b4b1 +888, 0xa8496414 +889, 0xbed313ae +890, 0xcdae3ddb +891, 0xa9c22c9 +892, 0x275fab1a +893, 0xedd65fa +894, 0x4c188229 +895, 0x63a83e58 +896, 0x18aa9207 +897, 0xa41f2e78 +898, 0xd9f63653 +899, 0xbe2be73b +900, 0xa3364d39 +901, 0x896d5428 +902, 0xc737539e +903, 0x745a78c6 +904, 0xf0b2b042 +905, 0x510773b4 +906, 0x92ad8e37 +907, 0x27f2f8c4 +908, 0x23704cc8 +909, 0x3d95a77f +910, 0xf08587a4 +911, 0xbd696a25 +912, 0x948924f3 +913, 0x8cddb634 +914, 0xcd2a4910 +915, 0x8e0e300e +916, 0x83815a9b +917, 0x67383510 +918, 0x3c18f0d0 +919, 0xc7a7bccc +920, 0x7cc2d3a2 +921, 0x52eb2eeb +922, 0xe4a257e5 +923, 0xec76160e +924, 0x63f9ad68 +925, 0x36d0bbbf +926, 0x957bc4e4 +927, 0xc9ed90ff +928, 0x4cb6059d +929, 0x2f86eca1 +930, 0x3e3665a3 +931, 0x9b7eb6f4 +932, 0x492e7e18 +933, 0xa098aa51 +934, 0x7eb568b2 +935, 0x3fd639ba +936, 0x7bebcf1 +937, 0x99c844ad +938, 0x43cb5ec7 +939, 0x8dfbbef5 +940, 0x5be413ff +941, 0xd93b976d +942, 0xc1c7a86d +943, 0x1f0e93d0 +944, 0x498204a2 +945, 0xe8fe832a +946, 0x2236bd7 +947, 0x89953769 +948, 0x2acc3491 +949, 0x2c4f22c6 +950, 0xd7996277 +951, 0x3bcdc349 +952, 0xfc286630 +953, 0x5f8909fd +954, 0x242677c0 +955, 0x4cb34104 +956, 0xa6ff8100 +957, 0x39ea47ec +958, 0x9bd54140 +959, 0x7502ffe8 +960, 0x7ebef8ae +961, 0x1ed8abe4 +962, 0xfaba8450 +963, 0xc197b65f +964, 0x19431455 +965, 0xe229c176 +966, 0xeb2967da +967, 0xe0c5dc05 +968, 0xa84e3227 +969, 0x10dd9e0f +970, 0xbdb70b02 +971, 0xce24808a +972, 0x423edab8 +973, 0x194caf71 +974, 0x144f150d +975, 0xf811c2d2 +976, 0xc224ee85 +977, 0x2b217a5b +978, 0xf78a5a79 +979, 0x6554a4b1 +980, 0x769582df +981, 0xf4b2cf93 +982, 0x89648483 +983, 0xb3283a3e +984, 0x82b895db +985, 0x79388ef0 +986, 0x54bc42a6 +987, 0xc4dd39d9 +988, 0x45b33b7d +989, 0x8703b2c1 +990, 0x1cc94806 +991, 0xe0f43e49 +992, 0xcaa7b6bc +993, 0x4f88e9af +994, 0x1477cce5 +995, 0x347dd115 +996, 0x36e335fa +997, 0xb93c9a31 +998, 0xaac3a175 +999, 0x68a19647 diff --git a/local_packages/numpy/random/tests/data/mt19937-testset-2.csv b/local_packages/numpy/random/tests/data/mt19937-testset-2.csv new file mode 100644 index 0000000..cdb8e47 --- /dev/null +++ b/local_packages/numpy/random/tests/data/mt19937-testset-2.csv @@ -0,0 +1,1001 @@ +seed, 0x0 +0, 0x7ab4ea94 +1, 0x9b561119 +2, 0x4957d02e +3, 0x7dd3fdc2 +4, 0x5affe54 +5, 0x5a01741c +6, 0x8b9e8c1f +7, 0xda5bf11a +8, 0x509226 +9, 0x64e2ea17 +10, 0x82c6dab5 +11, 0xe4302515 +12, 0x8198b873 +13, 0xc3ec9a82 +14, 0x829dff28 +15, 0x5278e44f +16, 0x994a7d2c +17, 0xf1c89398 +18, 0xaf2fddec +19, 0x22abc6ee +20, 0x963dbd43 +21, 0xc29edffb +22, 0x41c1ce07 +23, 0x9c90034d +24, 0x1f17a796 +25, 0x3833caa8 +26, 0xb8795528 +27, 0xebc595a2 +28, 0xf8f5b5dd +29, 0xc2881f72 +30, 0x18e5d3f0 +31, 0x9b19ac7a +32, 0xb9992436 +33, 0xc00052b3 +34, 0xb63f4475 +35, 0x962642d9 +36, 0x63506c10 +37, 0x2be6b127 +38, 0x569bdbc6 +39, 0x7f185e01 +40, 0xebb55f53 +41, 0x1c30198c +42, 0x7c8d75c6 +43, 0xd3f2186b +44, 0xaca5b9b1 +45, 0xbc49ff45 +46, 0xc4a802af +47, 0x2cecd86f +48, 0x8e0da529 +49, 0x1f22b00e +50, 0x4559ea80 +51, 0x60f587d8 +52, 0x7c7460e9 +53, 0x67be0a4a +54, 0x987a0183 +55, 0x7bd30f1 +56, 0xab18c4ac +57, 0xffdbfb64 +58, 0x9ea917f9 +59, 0x1239dab7 +60, 0x38efabeb +61, 0x5da91888 +62, 0x8f49ed62 +63, 0x83f60b1e +64, 0x5950a3fc +65, 0xd8911104 +66, 0x19e8859e +67, 0x1a4d89ec +68, 0x968ca180 +69, 0x9e1b6da3 +70, 0x3d99c2c +71, 0x55f76289 +72, 0x8fa28b9e +73, 0x9fe01d33 +74, 0xdade4e38 +75, 0x1ea04290 +76, 0xa7263313 +77, 0xaafc762e +78, 0x460476d6 +79, 0x31226e12 +80, 0x451d3f05 +81, 0xd0d2764b +82, 0xd06e1ab3 +83, 0x1394e3f4 +84, 0x2fc04ea3 +85, 0x5b8401c +86, 0xebd6c929 +87, 0xe881687c +88, 0x94bdd66a +89, 0xabf85983 +90, 0x223ad12d +91, 0x2aaeeaa3 +92, 0x1f704934 +93, 0x2db2efb6 +94, 0xf49b8dfb +95, 0x5bdbbb9d +96, 0xba0cd0db +97, 0x4ec4674e +98, 0xad0129e +99, 0x7a66129b +100, 0x50d12c5e +101, 0x85b1d335 +102, 0x3efda58a +103, 0xecd886fb +104, 0x8ecadd3d +105, 0x60ebac0f +106, 0x5e10fe79 +107, 0xa84f7e5d +108, 0x43931288 +109, 0xfacf448 +110, 0x4ee01997 +111, 0xcdc0a651 +112, 0x33c87037 +113, 0x8b50fc03 +114, 0xf52aad34 +115, 0xda6cd856 +116, 0x7585bea0 +117, 0xe947c762 +118, 0x4ddff5d8 +119, 0xe0e79b3b +120, 0xb804cf09 +121, 0x84765c44 +122, 0x3ff666b4 +123, 0xe31621ad +124, 0x816f2236 +125, 0x228176bc +126, 0xfdc14904 +127, 0x635f5077 +128, 0x6981a817 +129, 0xfd9a0300 +130, 0xd3fa8a24 +131, 0xd67c1a77 +132, 0x903fe97a +133, 0xf7c4a4d5 +134, 0x109f2058 +135, 0x48ab87fe +136, 0xfd6f1928 +137, 0x707e9452 +138, 0xf327db9e +139, 0x7b80d76d +140, 0xfb6ba193 +141, 0x454a1ad0 +142, 0xe20b51e +143, 0xb774d085 +144, 0x6b1ed574 +145, 0xb1e77de4 +146, 0xe2a83b37 +147, 0x33d3176f +148, 0x2f0ca0fc +149, 0x17f51e2 +150, 0x7c1fbf55 +151, 0xf09e9cd0 +152, 0xe3d9bacd +153, 0x4244db0a +154, 0x876c09fc +155, 0x9db4fc2f +156, 0xd3771d60 +157, 0x25fc6a75 +158, 0xb309915c +159, 0xc50ee027 +160, 0xaa5b7b38 +161, 0x4c650ded +162, 0x1acb2879 +163, 0x50db5887 +164, 0x90054847 +165, 0xfef23e5b +166, 0x2dd7b7d5 +167, 0x990b8c2e +168, 0x6001a601 +169, 0xb5d314c4 +170, 0xfbfb7bf9 +171, 0x1aba997d +172, 0x814e7304 +173, 0x989d956a +174, 0x86d5a29c +175, 0x70a9fa08 +176, 0xc4ccba87 +177, 0x7e9cb366 +178, 0xee18eb0a +179, 0x44f5be58 +180, 0x91d4af2d +181, 0x5ab6e593 +182, 0x9fd6bb4d +183, 0x85894ce +184, 0x728a2401 +185, 0xf006f6d4 +186, 0xd782741e +187, 0x842cd5bd +188, 0xfb5883aa +189, 0x7e5a471 +190, 0x83ff6965 +191, 0xc9675c6b +192, 0xb6ced3c7 +193, 0x3de6425b +194, 0x25e14db4 +195, 0x69ca3dec +196, 0x81342d13 +197, 0xd7cd8417 +198, 0x88d15e69 +199, 0xefba17c9 +200, 0x43d595e6 +201, 0x89d4cf25 +202, 0x7cae9b9b +203, 0x2242c621 +204, 0x27fc3598 +205, 0x467b1d84 +206, 0xe84d4622 +207, 0xa26bf980 +208, 0x80411010 +209, 0xe2c2bfea +210, 0xbc6ca25a +211, 0x3ddb592a +212, 0xdd46eb9e +213, 0xdfe8f657 +214, 0x2cedc974 +215, 0xf0dc546b +216, 0xd46be68f +217, 0x26d8a5aa +218, 0x76e96ba3 +219, 0x7d5b5353 +220, 0xf532237c +221, 0x6478b79 +222, 0x9b81a5e5 +223, 0x5fc68e5c +224, 0x68436e70 +225, 0x2a0043f9 +226, 0x108d523c +227, 0x7a4c32a3 +228, 0x9c84c742 +229, 0x6f813dae +230, 0xfcc5bbcc +231, 0x215b6f3a +232, 0x84cb321d +233, 0x7913a248 +234, 0xb1e6b585 +235, 0x49376b31 +236, 0x1dc896b0 +237, 0x347051ad +238, 0x5524c042 +239, 0xda0eef9d +240, 0xf2e73342 +241, 0xbeee2f9d +242, 0x7c702874 +243, 0x9eb3bd34 +244, 0x97b09700 +245, 0xcdbab1d4 +246, 0x4a2f6ed1 +247, 0x2047bda5 +248, 0x3ecc7005 +249, 0x8d0d5e67 +250, 0x40876fb5 +251, 0xb5fd2187 +252, 0xe915d8af +253, 0x9a2351c7 +254, 0xccc658ae +255, 0xebb1eddc +256, 0xc4a83671 +257, 0xffb2548f +258, 0xe4fe387a +259, 0x477aaab4 +260, 0x8475a4e4 +261, 0xf8823e46 +262, 0xe4130f71 +263, 0xbdb54482 +264, 0x98fe0462 +265, 0xf36b27b8 +266, 0xed7733da +267, 0x5f428afc +268, 0x43a3a21a +269, 0xf8370b55 +270, 0xfade1de1 +271, 0xd9a038ea +272, 0x3c69af23 +273, 0x24df7dd0 +274, 0xf66d9353 +275, 0x71d811be +276, 0xcc4d024b +277, 0xb8c30bf0 +278, 0x4198509d +279, 0x8b37ba36 +280, 0xa41ae29a +281, 0x8cf7799e +282, 0x5cd0136a +283, 0xa11324ef +284, 0x2f8b6d4b +285, 0x3657cf17 +286, 0x35b6873f +287, 0xee6e5bd7 +288, 0xbeeaa98 +289, 0x9ad3c581 +290, 0xe2376c3f +291, 0x738027cc +292, 0x536ac839 +293, 0xf066227 +294, 0x6c9cb0f9 +295, 0x84082ae6 +296, 0xab38ae9d +297, 0x493eade9 +298, 0xcb630b3a +299, 0x64d44250 +300, 0xe5efb557 +301, 0xea2424d9 +302, 0x11a690ba +303, 0x30a48ae4 +304, 0x58987e53 +305, 0x94ec6076 +306, 0x5d3308fa +307, 0xf1635ebb +308, 0x56a5ab90 +309, 0x2b2f2ee4 +310, 0x6f9e6483 +311, 0x8b93e327 +312, 0xa7ce140b +313, 0x4c8aa42 +314, 0x7657bb3f +315, 0xf250fd75 +316, 0x1edfcb0f +317, 0xdb42ace3 +318, 0xf8147e16 +319, 0xd1992bd +320, 0x64bb14d1 +321, 0x423e724d +322, 0x7b172f7c +323, 0x17171696 +324, 0x4acaf83b +325, 0x7a83527e +326, 0xfc980c60 +327, 0xc8b56bb +328, 0x2453f77f +329, 0x85ad1bf9 +330, 0x62a85dfe +331, 0x48238c4d +332, 0xbb3ec1eb +333, 0x4c1c039c +334, 0x1f37f571 +335, 0x98aecb63 +336, 0xc3b3ddd6 +337, 0xd22dad4 +338, 0xe49671a3 +339, 0xe3baf945 +340, 0xb9e21680 +341, 0xda562856 +342, 0xe8b88ce4 +343, 0x86f88de2 +344, 0x986faf76 +345, 0x6f0025c3 +346, 0x3fe21234 +347, 0xd8d3f729 +348, 0xc2d11c6f +349, 0xd4f9e8f +350, 0xf61a0aa +351, 0xc48bb313 +352, 0xe944e940 +353, 0xf1801b2e +354, 0x253590be +355, 0x981f069d +356, 0x891454d8 +357, 0xa4f824ad +358, 0x6dd2cc48 +359, 0x3018827e +360, 0x3fb329e6 +361, 0x65276517 +362, 0x8d2c0dd2 +363, 0xc965b48e +364, 0x85d14d90 +365, 0x5a51623c +366, 0xa9573d6a +367, 0x82d00edf +368, 0x5ed7ce07 +369, 0x1d946abc +370, 0x24fa567b +371, 0x83ef5ecc +372, 0x9001724a +373, 0xc4fe48f3 +374, 0x1e07c25c +375, 0xf4d5e65e +376, 0xb734f6e9 +377, 0x327a2df8 +378, 0x766d59b7 +379, 0x625e6b61 +380, 0xe82f32d7 +381, 0x1566c638 +382, 0x2e815871 +383, 0x606514aa +384, 0x36b7386e +385, 0xcaa8ce08 +386, 0xb453fe9c +387, 0x48574e23 +388, 0x71f0da06 +389, 0xa8a79463 +390, 0x6b590210 +391, 0x86e989db +392, 0x42899f4f +393, 0x7a654ef9 +394, 0x4c4fe932 +395, 0x77b2fd10 +396, 0xb6b4565c +397, 0xa2e537a3 +398, 0xef5a3dca +399, 0x41235ea8 +400, 0x95c90541 +401, 0x50ad32c4 +402, 0xc1b8e0a4 +403, 0x498e9aab +404, 0xffc965f1 +405, 0x72633485 +406, 0x3a731aef +407, 0x7cfddd0b +408, 0xb04d4129 +409, 0x184fc28e +410, 0x424369b0 +411, 0xf9ae13a1 +412, 0xaf357c8d +413, 0x7a19228e +414, 0xb46de2a8 +415, 0xeff2ac76 +416, 0xa6c9357b +417, 0x614f19c1 +418, 0x8ee1a53f +419, 0xbe1257b1 +420, 0xf72651fe +421, 0xd347c298 +422, 0x96dd2f23 +423, 0x5bb1d63e +424, 0x32e10887 +425, 0x36a144da +426, 0x9d70e791 +427, 0x5e535a25 +428, 0x214253da +429, 0x2e43dd40 +430, 0xfc0413f4 +431, 0x1f5ea409 +432, 0x1754c126 +433, 0xcdbeebbe +434, 0x1fb44a14 +435, 0xaec7926 +436, 0xb9d9a1e +437, 0x9e4a6577 +438, 0x8b1f04c5 +439, 0x19854e8a +440, 0x531080cd +441, 0xc0cbd73 +442, 0x20399d77 +443, 0x7d8e9ed5 +444, 0x66177598 +445, 0x4d18a5c2 +446, 0xe08ebf58 +447, 0xb1f9c87b +448, 0x66bedb10 +449, 0x26670d21 +450, 0x7a7892da +451, 0x69b69d86 +452, 0xd04f1d1c +453, 0xaf469625 +454, 0x7946b813 +455, 0x1ee596bd +456, 0x7f365d85 +457, 0x795b662b +458, 0x194ad02d +459, 0x5a9649b5 +460, 0x6085e278 +461, 0x2cf54550 +462, 0x9c77ea0b +463, 0x3c6ff8b +464, 0x2141cd34 +465, 0xb90bc671 +466, 0x35037c4b +467, 0xd04c0d76 +468, 0xc75bff8 +469, 0x8f52003b +470, 0xfad3d031 +471, 0x667024bc +472, 0xcb04ea36 +473, 0x3e03d587 +474, 0x2644d3a0 +475, 0xa8fe99ba +476, 0x2b9a55fc +477, 0x45c4d44a +478, 0xd059881 +479, 0xe07fcd20 +480, 0x4e22046c +481, 0x7c2cbf81 +482, 0xbf7f23de +483, 0x69d924c3 +484, 0xe53cd01 +485, 0x3879017c +486, 0xa590e558 +487, 0x263bc076 +488, 0x245465b1 +489, 0x449212c6 +490, 0x249dcb29 +491, 0x703d42d7 +492, 0x140eb9ec +493, 0xc86c5741 +494, 0x7992aa5b +495, 0xb8b76a91 +496, 0x771dac3d +497, 0x4ecd81e3 +498, 0xe5ac30b3 +499, 0xf4d7a5a6 +500, 0xac24b97 +501, 0x63494d78 +502, 0x627ffa89 +503, 0xfa4f330 +504, 0x8098a1aa +505, 0xcc0c61dc +506, 0x34749fa0 +507, 0x7f217822 +508, 0x418d6f15 +509, 0xa4b6e51e +510, 0x1036de68 +511, 0x1436986e +512, 0x44df961d +513, 0x368e4651 +514, 0x6a9e5d8c +515, 0x27d1597e +516, 0xa1926c62 +517, 0x8d1f2b55 +518, 0x5797eb42 +519, 0xa90f9e81 +520, 0x57547b10 +521, 0xdbbcca8e +522, 0x9edd2d86 +523, 0xbb0a7527 +524, 0x7662380c +525, 0xe7c98590 +526, 0x950fbf3f +527, 0xdc2b76b3 +528, 0x8a945102 +529, 0x3f0a1a85 +530, 0xeb215834 +531, 0xc59f2802 +532, 0xe2a4610 +533, 0x8b5a8665 +534, 0x8b2d9933 +535, 0x40a4f0bc +536, 0xaab5bc67 +537, 0x1442a69e +538, 0xdf531193 +539, 0x698d3db4 +540, 0x2d40324e +541, 0x1a25feb2 +542, 0xe8cc898f +543, 0xf12e98f5 +544, 0xc03ad34c +545, 0xf62fceff +546, 0xdd827e1e +547, 0x7d8ccb3b +548, 0xab2d6bc1 +549, 0xc323a124 +550, 0x8184a19a +551, 0xc3c4e934 +552, 0x5487424d +553, 0xd6a81a44 +554, 0x90a8689d +555, 0xe69c4c67 +556, 0xbdae02dd +557, 0x72a18a79 +558, 0x2a88e907 +559, 0x31cf4b5d +560, 0xb157772f +561, 0x206ba601 +562, 0x18529232 +563, 0x7dac90d8 +564, 0x3a5f8a09 +565, 0x9f4b64a3 +566, 0xae373af9 +567, 0x1d79447c +568, 0x2a23684b +569, 0x41fb7ba4 +570, 0x55e4bb9e +571, 0xd7619d3e +572, 0xc04e4dd8 +573, 0x8418d516 +574, 0x2b2ca585 +575, 0xfa8eedf +576, 0x5bafd977 +577, 0x31974fb0 +578, 0x9eb6697b +579, 0xc8be22f5 +580, 0x173b126a +581, 0x8809becf +582, 0x3e41efe1 +583, 0x3d6cbbb8 +584, 0x278c81d8 +585, 0xa6f08434 +586, 0xa0e6601d +587, 0x2fccd88d +588, 0x3cbc8beb +589, 0x5f65d864 +590, 0xa1ff8ddf +591, 0x609dcb7c +592, 0x4a4e1663 +593, 0xeae5531 +594, 0x962a7c85 +595, 0x1e110607 +596, 0x8c5db5d0 +597, 0xc7f2337e +598, 0xc94fcc9c +599, 0xe7f62629 +600, 0x6c9aa9f8 +601, 0x2e27fe0e +602, 0x4d0dae12 +603, 0x9eecf588 +604, 0x977ba3f2 +605, 0xed0a51af +606, 0x3f3ec633 +607, 0xc174b2ec +608, 0x590be8a9 +609, 0x4f630d18 +610, 0xf579e989 +611, 0xe2a55584 +612, 0xee11edcd +613, 0x150a4833 +614, 0xc0a0535c +615, 0xb5e00993 +616, 0xb6435700 +617, 0xa98dbff +618, 0x315716af +619, 0x94395776 +620, 0x6cbd48d9 +621, 0xab17f8fc +622, 0xa794ffb7 +623, 0x6b55e231 +624, 0x89ff5783 +625, 0x431dcb26 +626, 0x270f9bf8 +627, 0x2af1b8d0 +628, 0x881745ed +629, 0x17e1be4e +630, 0x132a0ec4 +631, 0x5712df17 +632, 0x2dfb3334 +633, 0xf5a35519 +634, 0xcafbdac6 +635, 0x73b6189d +636, 0x10107cac +637, 0x18c1045e +638, 0xbc19bbad +639, 0x8b4f05ac +640, 0x5830d038 +641, 0x468cd98a +642, 0x5b83a201 +643, 0xf0ccdd9c +644, 0xcb20c4bd +645, 0x1ff186c9 +646, 0xcdddb47f +647, 0x5c65ce6 +648, 0xb748c580 +649, 0x23b6f262 +650, 0xe2ba8e5c +651, 0x9a164a03 +652, 0x62d3322e +653, 0x918d8b43 +654, 0x45c8b49d +655, 0xce172c6e +656, 0x23febc6 +657, 0x84fdc5b7 +658, 0xe7d1fd82 +659, 0xf0ddf3a6 +660, 0x87050436 +661, 0x13d46375 +662, 0x5b191c78 +663, 0x2cbd99c0 +664, 0x7686c7f +665, 0xcff56c84 +666, 0x7f9b4486 +667, 0xefc997fe +668, 0x984d4588 +669, 0xfa44f36a +670, 0x7a5276c1 +671, 0xcfde6176 +672, 0xcacf7b1d +673, 0xcffae9a7 +674, 0xe98848d5 +675, 0xd4346001 +676, 0xa2196cac +677, 0x217f07dc +678, 0x42d5bef +679, 0x6f2e8838 +680, 0x4677a24 +681, 0x4ad9cd54 +682, 0x43df42af +683, 0x2dde417 +684, 0xaef5acb1 +685, 0xf377f4b3 +686, 0x7d870d40 +687, 0xe53df1c2 +688, 0xaeb5be50 +689, 0x7c92eac0 +690, 0x4f00838c +691, 0x91e05e84 +692, 0x23856c80 +693, 0xc4266fa6 +694, 0x912fddb +695, 0x34d42d22 +696, 0x6c02ffa +697, 0xe47d093 +698, 0x183c55b3 +699, 0xc161d142 +700, 0x3d43ff5f +701, 0xc944a36 +702, 0x27bb9fc6 +703, 0x75c91080 +704, 0x2460d0dc +705, 0xd2174558 +706, 0x68062dbf +707, 0x778e5c6e +708, 0xa4dc9a +709, 0x7a191e69 +710, 0xc084b2ba +711, 0xbb391d2 +712, 0x88849be +713, 0x69c02714 +714, 0x69d4a389 +715, 0x8f51854d +716, 0xaf10bb82 +717, 0x4d5d1c77 +718, 0x53b53109 +719, 0xa0a92aa0 +720, 0x83ecb757 +721, 0x5325752a +722, 0x114e466e +723, 0x4b3f2780 +724, 0xa7a6a39c +725, 0x5e723357 +726, 0xa6b8be9b +727, 0x157c32ff +728, 0x8b898012 +729, 0xd7ff2b1e +730, 0x69cd8444 +731, 0x6ad8030c +732, 0xa08a49ec +733, 0xfbc055d3 +734, 0xedf17e46 +735, 0xc9526200 +736, 0x3849b88a +737, 0x2746860b +738, 0xae13d0c1 +739, 0x4f15154f +740, 0xd65c3975 +741, 0x6a377278 +742, 0x54d501f7 +743, 0x81a054ea +744, 0x143592ba +745, 0x97714ad6 +746, 0x4f9926d9 +747, 0x4f7ac56d +748, 0xe87ca939 +749, 0x58b76f6f +750, 0x60901ad8 +751, 0x3e401bb6 +752, 0xa058468e +753, 0xc0bb14f6 +754, 0x2cb8f02a +755, 0x7c2cf756 +756, 0x34c31de5 +757, 0x9b243e83 +758, 0xa5c85ab4 +759, 0x2741e3b3 +760, 0x1249000e +761, 0x3fc4e72b +762, 0xa3e038a2 +763, 0x952dd92c +764, 0x2b821966 +765, 0xfa81b365 +766, 0x530919b9 +767, 0x4486d66f +768, 0xccf4f3c1 +769, 0xa8bddd1d +770, 0xcc295eb9 +771, 0xfccbe42f +772, 0x38bacd8d +773, 0x2261854f +774, 0x56068c62 +775, 0x9bdaeb8 +776, 0x555fa5b6 +777, 0x20fe615e +778, 0x49fb23d3 +779, 0xd093bad6 +780, 0x54919e86 +781, 0x7373eb24 +782, 0xfbaa7a98 +783, 0x5f62fb39 +784, 0xe03bc9ec +785, 0xa5074d41 +786, 0xa1cefb1 +787, 0x13912d74 +788, 0xf6421b8 +789, 0xfcb48812 +790, 0x8f1db50b +791, 0xc1654b87 +792, 0x948b43c2 +793, 0xf503ef77 +794, 0x117d891d +795, 0x5493ffa +796, 0x171313b1 +797, 0xa4b62e1e +798, 0x77454ea6 +799, 0xbea0aff0 +800, 0x13c36389 +801, 0xe3b60bac +802, 0xa176bed3 +803, 0x2863d428 +804, 0xe2314f46 +805, 0xa85cd3d4 +806, 0x7866e57 +807, 0x8f03f5bc +808, 0x239ae +809, 0x46f279fb +810, 0xcca00559 +811, 0xaa07a104 +812, 0x89123d08 +813, 0x2e6856ba +814, 0x43a9780d +815, 0x676cff25 +816, 0x6744b87d +817, 0xee260d4f +818, 0xb98d8b77 +819, 0x9b0ca455 +820, 0x659f6fe +821, 0x28d20d1c +822, 0x601f2657 +823, 0xdec3073e +824, 0x61263863 +825, 0x1a13435a +826, 0x27497d1e +827, 0x17a8458e +828, 0xdddc407d +829, 0x4bb2e8ac +830, 0x16b2aedb +831, 0x77ccd696 +832, 0x9d108fcd +833, 0x25ad233e +834, 0xaa9bc370 +835, 0xa873ab50 +836, 0xaf19c9d9 +837, 0x696e1e6b +838, 0x1fdc4bf4 +839, 0x4c2ebc81 +840, 0xde4929ed +841, 0xf4d0c10c +842, 0xb6595b76 +843, 0x75cbb1b3 +844, 0xbcb6de49 +845, 0xe23157fd +846, 0x5e596078 +847, 0xa69b0d29 +848, 0x2118a41 +849, 0x7088c16 +850, 0xc75e1e1 +851, 0x6a4af2d6 +852, 0xf19c6521 +853, 0xaff7b3b1 +854, 0x615295c7 +855, 0xbda3a8d7 +856, 0x5b5ca72e +857, 0xdad9d80f +858, 0xfa81c084 +859, 0xf4703fa +860, 0x3ca54540 +861, 0xa8961d51 +862, 0x53d1ecc2 +863, 0x808d83b6 +864, 0x68e8c48e +865, 0x89be2039 +866, 0x9088ea11 +867, 0xb8665d12 +868, 0x91272f9 +869, 0x53dddff2 +870, 0xb7a54ab +871, 0xd2b645ca +872, 0x99fb8590 +873, 0x5315c8e +874, 0x2a913806 +875, 0x7f15eb2b +876, 0xa7f1cc5d +877, 0xbb2ee836 +878, 0xd9fafd60 +879, 0x17448d6f +880, 0x999ec436 +881, 0x482ec606 +882, 0x9b403c0e +883, 0x569eb51b +884, 0xb275d1a6 +885, 0xadd29c31 +886, 0xb7ebdb15 +887, 0xdfef3662 +888, 0x51aba6db +889, 0x6d41946d +890, 0x77bf8896 +891, 0xcafa6fab +892, 0x976ab40f +893, 0x49a6d86b +894, 0x56639e55 +895, 0x9945b996 +896, 0x81459b50 +897, 0xbce97542 +898, 0xe397c9c9 +899, 0x247a5955 +900, 0xb72b1573 +901, 0x86306f86 +902, 0x34f65dc5 +903, 0x909360c0 +904, 0xf3f696ef +905, 0xcb9faae5 +906, 0x93daecd9 +907, 0xde1af7af +908, 0x43a1f2d +909, 0x6d75cde5 +910, 0x9e412b6 +911, 0x5673fed +912, 0x16bb511a +913, 0x35ef4cca +914, 0x4e615aca +915, 0x5cdaf47a +916, 0x26676047 +917, 0x8c199325 +918, 0x2adf0cb9 +919, 0x84f2e6fd +920, 0x5e627f64 +921, 0xb7cee354 +922, 0x542ab4a6 +923, 0xe59cd83b +924, 0x89cc3f10 +925, 0x92b0f5f +926, 0xc1328370 +927, 0x8208d9f7 +928, 0x68eb00cf +929, 0xfadd4ac4 +930, 0x2517784f +931, 0x4042b99 +932, 0x75ce0230 +933, 0x97c5a1b4 +934, 0x1a97f709 +935, 0x4c62781e +936, 0xf530a83 +937, 0x75776413 +938, 0x321c7240 +939, 0x6afe4e36 +940, 0xad00a2b4 +941, 0xbc05477d +942, 0xb0911e80 +943, 0x9935b87d +944, 0xd535eec5 +945, 0x149af45e +946, 0x786934b0 +947, 0xbc13cdac +948, 0x208bfa2e +949, 0xcf4b39cc +950, 0x6ac6c172 +951, 0xbfa9a37 +952, 0x42d28db6 +953, 0x2bf1ea63 +954, 0xbed6e677 +955, 0x50325d27 +956, 0xa79d3b8b +957, 0x52448bb1 +958, 0xefaad1bd +959, 0x833a2e54 +960, 0xd9de549a +961, 0x9f59672f +962, 0x9d5f5f16 +963, 0x1c914489 +964, 0xc08fa058 +965, 0xb188698b +966, 0xdc4672b5 +967, 0x594f720e +968, 0x56ed428f +969, 0x9b0898af +970, 0x8a64d3d5 +971, 0x773308d6 +972, 0x84d62098 +973, 0x46da7cf9 +974, 0x1114eae7 +975, 0xf9f2a092 +976, 0x5363a28 +977, 0xf2db7b3a +978, 0x102c71a9 +979, 0xe8e76aaf +980, 0x77a97b3b +981, 0x77b090d +982, 0x1099620e +983, 0xa6daaae6 +984, 0x86ff4713 +985, 0xc0ef85b8 +986, 0xf621d409 +987, 0xfd1561e2 +988, 0x4bcc687d +989, 0x596f760 +990, 0x7c8819f9 +991, 0x8cb865b8 +992, 0xadea115a +993, 0x56609348 +994, 0xb321ac14 +995, 0x1bac7db2 +996, 0x5fe6ee2 +997, 0xe9bfe072 +998, 0x15549e74 +999, 0xad8c191b diff --git a/local_packages/numpy/random/tests/data/pcg64-testset-1.csv b/local_packages/numpy/random/tests/data/pcg64-testset-1.csv new file mode 100644 index 0000000..0c8271f --- /dev/null +++ b/local_packages/numpy/random/tests/data/pcg64-testset-1.csv @@ -0,0 +1,1001 @@ +seed, 0xdeadbeaf +0, 0x60d24054e17a0698 +1, 0xd5e79d89856e4f12 +2, 0xd254972fe64bd782 +3, 0xf1e3072a53c72571 +4, 0xd7c1d7393d4115c9 +5, 0x77b75928b763e1e2 +6, 0xee6dee05190f7909 +7, 0x15f7b1c51d7fa319 +8, 0x27e44105f26ac2d7 +9, 0xcc0d88b29e5b415 +10, 0xe07b1a90c685e361 +11, 0xd2e430240de95e38 +12, 0x3260bca9a24ca9da +13, 0x9b3cf2e92385adb7 +14, 0x30b5514548271976 +15, 0xa3a1fa16c124faf9 +16, 0xf53e17e918e45bb6 +17, 0x26f19faaeb833bfc +18, 0x95e1d605730cce1b +19, 0xa7b520c5c093c1aa +20, 0x4b68c010c9b106a3 +21, 0x25e19fe91df703f0 +22, 0x898364bb0bf593cb +23, 0x5bd6ab7dbaa125db +24, 0xd1fe47f25152045c +25, 0x3bb11919addf2409 +26, 0x26a8cb7b3f54af8 +27, 0xe6a27ee11200aa24 +28, 0x7cb585ab01e22000 +29, 0x78e60028676d2ef3 +30, 0x5c32535e5a899528 +31, 0x83e8b6f8c4a46fb3 +32, 0xe56ef7668a161246 +33, 0x36dcbc15aeb73055 +34, 0x5ea247f0bd188acb +35, 0x438b547b84601a80 +36, 0x8acda2a1273e9e3d +37, 0x2b05e30a4b40c24c +38, 0xfd87236bd13af032 +39, 0x471df211d8d985ef +40, 0x18e8a5609a793292 +41, 0x46f0951fab6dc4e3 +42, 0x6c199c4e700f6795 +43, 0xf04aa16bfb7d22cb +44, 0xd763d269fbaffc89 +45, 0x9991930cefbe5c2b +46, 0xb2a11b953f824c96 +47, 0x63fd9f52172c44b0 +48, 0x183bdad907b1d848 +49, 0xe17953cddb931c52 +50, 0x515cf16726ec205a +51, 0x88c327605150711a +52, 0xc7090dd79cbc8dc3 +53, 0xcb487cedeb00a350 +54, 0xc8abf254d87b657 +55, 0xd43cc4cbfb493d1a +56, 0x8705452e5d9ed1e +57, 0xcecd11446769cf43 +58, 0xde72156c8d65bc69 +59, 0x796a8f0f47d52ee8 +60, 0xb4c0da443917d6c3 +61, 0xe07ad7568a8e3dc3 +62, 0xc24a8da39ce6dc21 +63, 0x92b21ea80a8556eb +64, 0x572f21e531edf3af +65, 0x9b917ed56bbed198 +66, 0xe65fd8ddc5ab3d7d +67, 0xf55a80a8ec84fa18 +68, 0x18fc22e1a5227b61 +69, 0x72305dc7eeaa79d3 +70, 0x47ce58a36e7592cf +71, 0x14c6374340c0f7cc +72, 0x6f98273d4eb5a2c +73, 0x59a8702c46fe8f8a +74, 0xb67cbd8113cfe57f +75, 0xaa03c5db5f5b7690 +76, 0x3fb0f77ea4568013 +77, 0x756530990398b26e +78, 0x4c1952b2a3a6a343 +79, 0x1da15c5383074582 +80, 0xb405b21c81c274f7 +81, 0xbe664677a16788b +82, 0x9d2e37550bcee656 +83, 0x8b4589f0d9defe02 +84, 0x2935f018ee06a59 +85, 0x3834bf88be97ed11 +86, 0xa610d049cea79b6d +87, 0xd49ffc0d09a59ea9 +88, 0x4073365b76567adf +89, 0x499eefb9bb7513e2 +90, 0x74a743ee6b0138a9 +91, 0x3bf0880f2d947594 +92, 0x555d1c0498600a99 +93, 0x923b32a88ef2ffa4 +94, 0x7325411065fbedea +95, 0x9f4129ff8b79d300 +96, 0xab2b0a9b8a3785dc +97, 0x11734bdfba3a1713 +98, 0xc8333398841ba585 +99, 0xee2409cc234e6742 +100, 0xf6638e700872ecd2 +101, 0x10875300c13cd284 +102, 0x27a9bbed7c15b2d3 +103, 0x3c87f8fef31ce9bd +104, 0x92be263cd0914a95 +105, 0xa7b0f11bc742307e +106, 0x4a56f788cc1c1a3c +107, 0x4a130fa32257a48b +108, 0x5d4d9eda16e90286 +109, 0x7cc2af564844bedc +110, 0x2532867bfe7cda1a +111, 0xb1c504676611fd17 +112, 0xce8e86cfb4189aee +113, 0x99685898980d1970 +114, 0x8c3b67db23bcf1e +115, 0x73e14c93905b135f +116, 0xf0271b64ac2bd4d3 +117, 0xf4beba82f3ec1b2d +118, 0x1cdbf3ee9f210af +119, 0x2e938557c09c3ea6 +120, 0x2d314ccfa6ffd81d +121, 0x31ad47079950ade4 +122, 0x342b27547b900872 +123, 0x171b0e20b9ef1a76 +124, 0xdf10ce6318b03654 +125, 0x1d625df4aa718897 +126, 0x8712715a9f6e02ec +127, 0xb4a072da725bca3b +128, 0x19d346cb7734bd42 +129, 0xfd4281d311cb2958 +130, 0x58274c9519fc8789 +131, 0x4cacf29d885fd544 +132, 0x784b14d1c2523b80 +133, 0x2d25242131bb2373 +134, 0xcd2a5e43a7d9abf9 +135, 0x15eda3806e650ecb +136, 0xdaac5e277d764d96 +137, 0xdc5a5dd59aaa94e0 +138, 0x40d00237a46d5999 +139, 0x6205dd35a692743f +140, 0xbbd8236740361f09 +141, 0x1625c9f4e7288bf9 +142, 0xb74f12df1479e3ce +143, 0xb2d72a51b43d7131 +144, 0xf006a324b3707c83 +145, 0x28e8ab4abe7655b8 +146, 0xfb480093ad7ab55 +147, 0x3f8abd0d6ff8d272 +148, 0xc81a94177ac26bb7 +149, 0x3cdc178307751b14 +150, 0x9de84cc2b10ba025 +151, 0x3f8ab5aefcd046e2 +152, 0x43bdb894e1ee83b2 +153, 0xe288a40f3f06ac9d +154, 0xdab62a7d04b4f30f +155, 0x49f4e20295e1a805 +156, 0x3643764805e0edef +157, 0x9449954618b6b +158, 0x6c87e0d4508e0ce0 +159, 0x3a334be688a9dd7b +160, 0xb35c39228776e499 +161, 0xc4118bfff938490e +162, 0x88cbde3dcbb034b2 +163, 0xf91b287793c417c3 +164, 0x42b15f731a59f5b3 +165, 0xffa27104bbe4814d +166, 0x1b6789d138beccde +167, 0x542c2c1440d0ceb9 +168, 0x367294504d18fa0d +169, 0xf918b60e804a1b58 +170, 0xd390964e33a9d0e3 +171, 0x23bb1be7c4030fe8 +172, 0x9731054d039a8afb +173, 0x1a6205026b9d139b +174, 0x2fa13b318254a07e +175, 0x69571de7d8520626 +176, 0x641a13d7c03332b7 +177, 0x76a6237818f7a441 +178, 0x4e77860d0c660d81 +179, 0x4441448a1c1cbdb2 +180, 0xccd7783a042046e5 +181, 0xf620d8e0805e3200 +182, 0x7de02971367fdd0c +183, 0x539c263c5914cab1 +184, 0x9c3b9ba1a87bbf08 +185, 0x6d95baa34cda215f +186, 0x2db3f83ace0bac5f +187, 0x7f5af1da2dc670a4 +188, 0xfcc098d16c891bfb +189, 0x81a33df1d7a5ab12 +190, 0x767b0f863c8e9882 +191, 0x7a92983830de483d +192, 0xfa7598c37a79ac25 +193, 0xb89b3ca42ce03053 +194, 0x457a542b8efed4f7 +195, 0x571b7737fd0eeda7 +196, 0xa0f59e524485c0a +197, 0x82dca766b7901efd +198, 0xa68243caf6a3bd5d +199, 0x1bac981c6c740e5e +200, 0xbcd51bedf9103e44 +201, 0x4e197efd3ae5a7bf +202, 0x523568efd782268b +203, 0x5ec4ef1191fef09 +204, 0xed751ed5e31c9ab +205, 0x44eac24de03e1b29 +206, 0x9237d57c011d3fb3 +207, 0xa8c6da0f7692f235 +208, 0x9f9eb6bc15d6cac7 +209, 0x34bb8e0c93427aad +210, 0x115febd738eaac4a +211, 0xa439991ed139d27a +212, 0x45c7c2633d8710a2 +213, 0x48b7475f3405a3ce +214, 0x80158497c77bd00b +215, 0x935c316a5b1657cb +216, 0x59c5d54440e9695e +217, 0x337c78c5b3d0ede2 +218, 0x8c46bb956b93790d +219, 0xbf1dd03e471d71c5 +220, 0x2d375e90a4bef583 +221, 0xd0365428331b3790 +222, 0xfcd3969ac827ecd4 +223, 0x392fb6c580498410 +224, 0x6d6db4ceab5ea6c0 +225, 0x9bf84f1972e24786 +226, 0x798dfd820959dcc5 +227, 0x2e425095e65e8bfb +228, 0x8c1aa11536b1c9c3 +229, 0xd28e2ef9b12f6f74 +230, 0x86583bc98c8f78d2 +231, 0x489877530e3f93e7 +232, 0xb1d9430631104a15 +233, 0x1814f6098e6263bd +234, 0x8e2658a4e0d4cd53 +235, 0x5afe20e2531cdb2a +236, 0x30d02f7c4755c9bf +237, 0xe1e217cda16ed2d2 +238, 0xccb4913a42e3b791 +239, 0xfff21363ac183226 +240, 0xe788690bbda147a7 +241, 0x76905cf5917bfc6a +242, 0x2a8fa58f7916f52c +243, 0xf903c0cc0357815a +244, 0x15d20f243a4998d2 +245, 0x5b7decee5a86ea44 +246, 0x114f7fc421211185 +247, 0x328eb21715764c50 +248, 0xaffaa3f45c0678fd +249, 0x2579e6ef50378393 +250, 0x7610ab7743c19795 +251, 0xf9923d2bd101b197 +252, 0x57e42e7a62ba7e53 +253, 0x9f1dc217b4f02901 +254, 0x88a9ebd86509b234 +255, 0x867fc926aecc8591 +256, 0xaf22c1bfef04c718 +257, 0x39f701f0313f4288 +258, 0x6171ad397e6faab2 +259, 0x239bb5b9abdec4fc +260, 0xd9a591e25dd01c6e +261, 0x826dc4a75b628e49 +262, 0xf112b152c408f47 +263, 0x6843a06110f86c0 +264, 0x965e56a7185c1332 +265, 0x8d84492edbc71710 +266, 0xeee8ec111cfd1319 +267, 0xf2858e94ad98e458 +268, 0xbc9589fdf5f3a97e +269, 0xaf0ceef3bc375130 +270, 0x48f4aaf13fa75c1e +271, 0x111e9db47bee758f +272, 0xea3171df130164ba +273, 0x2a7bbe30bf827ab6 +274, 0xc516c3fdbf758c35 +275, 0xec55097754b04be5 +276, 0x374a997d52b6d3e6 +277, 0x487df5456085ffbc +278, 0x528883b84df8eafe +279, 0x805f77ab5ba26f86 +280, 0x8eb81477dc04f213 +281, 0x471ea08ec6794d72 +282, 0x69d3667ecc4d2176 +283, 0x98b7b6e295548a66 +284, 0x3877713c173f8f2 +285, 0xa00542570d0e8de3 +286, 0xf534b1bfa4033e50 +287, 0x7e1fedeac8bf6b26 +288, 0x8043f37c89628af4 +289, 0x1dd7039ec295e86d +290, 0xce9c05b763a40cc4 +291, 0x246926481e61028f +292, 0xb7cb0f1babf5893b +293, 0xefe6b777f37fc63e +294, 0xebbcabb4cb35cdcb +295, 0x39fa63cd711eeea9 +296, 0xad5d3ba7aaf30c8d +297, 0x8e9e78fe46021990 +298, 0xc7eaef6e7d5a3c62 +299, 0xefccdd5495d3f386 +300, 0x2179557ee8cfc76a +301, 0x88a77f621f0885ce +302, 0xafda62674543d90c +303, 0xb8e6fbe2e13e56c0 +304, 0x8bfbbe26a14f9b1a +305, 0x1404f59f5851f8c3 +306, 0x1140c53a0489566d +307, 0x3edf2d138b5c3f1d +308, 0x75d6bb275d817dc +309, 0x8e660ae27107664e +310, 0x7a8021038ee303e1 +311, 0x2042ef5eefa9079f +312, 0xe3e7b90bbf6d457a +313, 0xf3f819d2bb9405b +314, 0x522e42155cae0c10 +315, 0xf5bfbb975b40e233 +316, 0x2cf82b614dd95cfa +317, 0x183ef4a96bc40e55 +318, 0x9f6e351c5ba4e752 +319, 0x37c1110683c90846 +320, 0x1d89b7a996d8a977 +321, 0x18a444f77c7cb4d9 +322, 0xd0a8a971b78dc893 +323, 0x860232fb9e6543f1 +324, 0x60b6097f51002555 +325, 0xca1e5214123e3894 +326, 0xe03fe695c95f99bb +327, 0x2c7c6779d5f03622 +328, 0xafeeee42f63055d1 +329, 0x670dde905515936a +330, 0x9a922f42b59fb094 +331, 0xddb5ff49af5a651a +332, 0xe61b04c9e58ebbf8 +333, 0x4e459dcf272e7fc4 +334, 0xd549e92c16adceeb +335, 0x7a17dba1299d4a9c +336, 0x825d756109f2b585 +337, 0xba142e61a9cb203e +338, 0xc2a19f00e9c04a30 +339, 0x2d0f8140d23d0652 +340, 0x8b866d4d4d6caaf4 +341, 0x4f11d90dd91f8217 +342, 0xf6efc37373b9e0d +343, 0x248493d6cd6a4736 +344, 0xd12b6ae74a951a3e +345, 0x56e34722070b70a7 +346, 0x22d3f201cc9fa0eb +347, 0xbfdcc320008291b7 +348, 0x1a7a6922e9204fbd +349, 0x831421e0c4945ae4 +350, 0x66316feddddf0e11 +351, 0xa8c86a1517456554 +352, 0x14a9049ad989e335 +353, 0x837022259f141ecd +354, 0xcb71793a06c261f7 +355, 0x4aeefc07ebe09a79 +356, 0x8982f15aa3b6594b +357, 0x67bccfa7ed9b0d5b +358, 0xb377463b523e9dec +359, 0x53d3d594870fecb7 +360, 0xa5274b1caec5a60a +361, 0xd6316d0cb643db39 +362, 0xabc1a9b536de88ce +363, 0xed2fdb1383d2a077 +364, 0x12319c6feb97221b +365, 0x7e0f6cd40ef47403 +366, 0x86135c84fe26dbf8 +367, 0xc96622d3fbbee19b +368, 0xe3989d8d8511573f +369, 0x42cc365554d1fdc7 +370, 0x4c1a1eb8bbce8b4f +371, 0xfc4e30e7ef2034c1 +372, 0xc490444317a91e76 +373, 0x7ccdf469ff5dc81c +374, 0xf5a0da4110cc09d7 +375, 0x505227baf34c0fb5 +376, 0xbe58737e8a35cc88 +377, 0xd449bee91b3e8c41 +378, 0x3e590e23299d0e6 +379, 0x291a7d9e0a64caf7 +380, 0xdc6fafbdfebd2293 +381, 0x8223f1e259fe8a65 +382, 0x6186fbc9efd9e3df +383, 0xfda39b07e4007ffb +384, 0xfc19aea98574dc02 +385, 0xd0e10d354fcacd8c +386, 0xc9619916544a55a5 +387, 0xd454d50a8c8558cd +388, 0xcd94a246712d91e +389, 0x76a771f5d1231cce +390, 0xdd20cb2b7b370ee5 +391, 0xa6f4f50feca57c49 +392, 0x78c8fb431f17ab9c +393, 0x1b692b79a59b43cc +394, 0x4c45045d287da7e6 +395, 0x522132e18bf43928 +396, 0x25c458983138b41c +397, 0x2a1fb426ef229796 +398, 0x74dc324c74e5dd3d +399, 0x6df75e3eb6eb5374 +400, 0xb63f2f4f9ca25b61 +401, 0xac72286112ee54d6 +402, 0x5a966f3d0a6863c4 +403, 0x8d7046bc64a46fc2 +404, 0xa7b740fd6e3087eb +405, 0xcdbcbe0340cfcdf5 +406, 0xcb632613bf312b65 +407, 0xa91b3f2c2aac238b +408, 0xa06deb3f5ae555a3 +409, 0x29d72e1f8db69 +410, 0x2d004bae09728ea6 +411, 0xc6eee5dce0736cc1 +412, 0xa7493145500ff60f +413, 0xc4d68c4aa18ab93c +414, 0x8210c29e79d48d7f +415, 0xd0999d7889ecbef6 +416, 0x6e3bd61e66e93566 +417, 0xe6cc13d47d7d7b1f +418, 0x3d6f181f42e03979 +419, 0xbed4e14fd867604a +420, 0xbe511c84067bd86d +421, 0x49a876d89e697d38 +422, 0xc04c3dde8f889c98 +423, 0xaf293eeab0f53e3f +424, 0x9f6291dd65732cd6 +425, 0xd7811ac01de78c01 +426, 0xe385cf0261d50ec2 +427, 0x5a64134b3542bbf +428, 0xf9d1302bc6f13a68 +429, 0x5d2aabbea37d8c31 +430, 0xd9842e99a5192970 +431, 0x713eadc4cd30e837 +432, 0xb7b002fc72abb413 +433, 0x276cfeea526af1cf +434, 0x8519fe79b633a0ce +435, 0x2f0e87363705a3e2 +436, 0x9adbac0be3c371e7 +437, 0xf3f44ba899a6173c +438, 0x782d6c29618fde2b +439, 0x7f61062acec408f +440, 0x6e79cd836359258f +441, 0x5c8e9b138df5785a +442, 0xa54359c9f39a9a84 +443, 0xeec3f033135084b0 +444, 0x883ee717787a535c +445, 0x9a2422b513a73b00 +446, 0x2dd4beddcdd64a58 +447, 0x90c8a13202239c7b +448, 0x85b352ab759646d9 +449, 0x139f5cb2e46c53aa +450, 0xe1d3ba6c721c66d1 +451, 0xaa66e0edc4b60a98 +452, 0x3521275c75be29b6 +453, 0x490a5190b3edfa5d +454, 0xd2abcdd2ccb2f14e +455, 0x9d9be8bef4a5857d +456, 0xde19676f13ef7755 +457, 0xdac2fee2e42615f3 +458, 0xf4239801cb02f2ab +459, 0xaa8bf923ed91875c +460, 0x61d18a1940e4c7c0 +461, 0x1eb6aa3d5f077a6d +462, 0xee7374c063bf29d8 +463, 0x2f0a59e34d76268d +464, 0xc92e80e17d1eb3e9 +465, 0xafd05b3ec3d2ca72 +466, 0x28a61ad8d6c497b8 +467, 0xa7094d6834ad7d47 +468, 0x57d80ea9eccbb4f +469, 0xb047e0fee6cdaf16 +470, 0x44f41b5eb48c00bb +471, 0xd6dc8e1eb9c8c9ba +472, 0x47adfd2c638c7849 +473, 0x365d63db7d526c68 +474, 0xc21cda439016135d +475, 0x14d10c3f0f98863c +476, 0xa93e56f74e037602 +477, 0x3b4e9c8915bdc9 +478, 0xb46f5ae155e54aa2 +479, 0x8e470d21ce1943e1 +480, 0x60b96301b5ba2e8d +481, 0x1b473a41d381f9ff +482, 0xabcf5a8e3269e73f +483, 0xd410f6e94fb21fa1 +484, 0x65d1a47eebf87e5e +485, 0x48eaa201c61cb843 +486, 0x212c1abc2499bfc5 +487, 0x4255ad8377d2d8d +488, 0x44caeef472010612 +489, 0xffae764524f572f2 +490, 0x78d374d20c9ee550 +491, 0x6e003206c0511cee +492, 0x7998a159145bfb82 +493, 0x921239650bda1d4d +494, 0xae05025509bcfdc5 +495, 0xc6430c980be407b4 +496, 0x78524f1744b153f1 +497, 0x84089e6f468181fe +498, 0x8d0d21d7dfb6c254 +499, 0x90bad90502a33603 +500, 0x3072a403cbd16315 +501, 0xdfadddf3f1c040c2 +502, 0x22f0b0639d9ff975 +503, 0xb49e48a4cad0765b +504, 0x95a0a04f8239709d +505, 0x56e147a24a4c481f +506, 0xacf16ef61dea4c7e +507, 0x424040afd2700de6 +508, 0xc67e8096a3c717a9 +509, 0x39f164181dd0a399 +510, 0x2449cedc1d62198c +511, 0x7a53df11a1f1a61c +512, 0x5596f1d4a3badae3 +513, 0x38ed4c822072b3d0 +514, 0xf07ef346b3fd730a +515, 0xfd349c35c3ed51fd +516, 0x2f15c9c7890f8f32 +517, 0x3b470df52b173c29 +518, 0xd31bfc8981281af7 +519, 0xbbcc9bdf561215bb +520, 0x5782fffea326574f +521, 0xb0ebdcfcc5e03290 +522, 0x7fd89d93d2b3fbef +523, 0x280ea1865d9ba2 +524, 0xe726959845b2c100 +525, 0xd0361f032cd7dbb1 +526, 0x3c65ec2028b81a22 +527, 0x5221e9b2188920bf +528, 0xeb5ab27c4125ec20 +529, 0x80a32dd48b54f0a4 +530, 0x369b5ced1012bebb +531, 0x582d35d76530bc6f +532, 0x7b50dc9b48e1e37d +533, 0x37fdfe8bbacf8dad +534, 0x7a0cb7e6e93840ea +535, 0xa1132c870be0b2ce +536, 0x9d8ac2c68267cd1a +537, 0x470969b647fa7df4 +538, 0xabcb7d8adf7e2d24 +539, 0xacdebec9bdf9eb1c +540, 0xe30f4cbf7eb6a59 +541, 0x746673836c4df41d +542, 0x75120a6b647bb326 +543, 0x2f4eab556c3f6878 +544, 0xd84651ab05405b7a +545, 0x9e695808b9622284 +546, 0xc93b71e56aa6e1a5 +547, 0x2be7f3be4a7b7050 +548, 0x6497e910b6733241 +549, 0xcf7050dfd08076fc +550, 0x4e3cc156eca183f7 +551, 0xf801a33d9326c265 +552, 0x6aa293c8a47d40e6 +553, 0x28c429755faa6230 +554, 0x82b818651f54e7bb +555, 0xa84d726d7acdbead +556, 0x5cfa535d5774965d +557, 0x4a34b7b1cb48d53 +558, 0x86a7b5bce426de84 +559, 0xfcd2307cecdb7318 +560, 0x16dbaaa71181a038 +561, 0x88e7e8cd261c2547 +562, 0x3c09ba6d1d5ea913 +563, 0x5dd3d643734ee5b6 +564, 0x326d725fe8cbb33 +565, 0x7bcca9ca2da8e784 +566, 0x482dcf6b11d7f9a4 +567, 0x1291b605b4cd3e04 +568, 0x6988181b50e2f4a8 +569, 0x649e3c37131fc292 +570, 0x4eeb67b9e21eba54 +571, 0xc051d39073dec45f +572, 0xc99c52e110270d67 +573, 0xcb813d5d77868add +574, 0x423a5f13573e7ac0 +575, 0x231ac4cc4fe73616 +576, 0x4c22b888a6e600ea +577, 0x8059a6dc7c9e25c6 +578, 0x49f498a5b8ad22de +579, 0xf1e812cc6d1826c8 +580, 0xbbaf60abe8b11e00 +581, 0x1d31d7f4d8be9a6a +582, 0xfeadce70a9a10c14 +583, 0xb47c635bc136996a +584, 0xd88e694c8da030cb +585, 0xc41bbe132aff1364 +586, 0x34249ab18a4b0800 +587, 0xf14b5c825aa736cc +588, 0x2710be6b08df78e +589, 0x2ab56bcc9bf9e740 +590, 0x9b7f6e591b5f648 +591, 0xfb665c3772f34135 +592, 0x628a0a5d2db5d8d5 +593, 0xb3e3f251e61b5259 +594, 0x82310ae33faf1b23 +595, 0x24af8723a65cbd0b +596, 0x671c93282fc4ad97 +597, 0x6cabeaac77270cad +598, 0xef4643fe38b02b7f +599, 0x7b011549d1ac6653 +600, 0xe2af87b9fccfe89 +601, 0x36b71ad67197ac8a +602, 0xdbba55d06f2fd93b +603, 0xf571dbd764b7f7e5 +604, 0x38ea402501cdbd45 +605, 0xb8ab5b5b1bab2913 +606, 0xfab973c4d45f32bd +607, 0x9364f1717c2636b9 +608, 0xfad00f4d983e00fe +609, 0xc90c532a11aef75a +610, 0x64a6eda96e44783c +611, 0x35891f2eb84520be +612, 0x28d216080caed43 +613, 0x129629cc5bd206f6 +614, 0x22c3d39822cbb4b3 +615, 0xf1efbf4cce1eaa2b +616, 0x7070cba12524ed08 +617, 0xa7ed0be9deabf20d +618, 0x8ddb4cd6b454f76b +619, 0xb82814b1db37b63 +620, 0x418e83b36de01876 +621, 0x9a538c7f39c6413 +622, 0xee0cd7abf8a2ecb9 +623, 0xa9222b07e95590f3 +624, 0x6296a415d68341e6 +625, 0x981e0a5a8f811929 +626, 0x4bb372d3b0de283d +627, 0xa9805b5971866e16 +628, 0xaf3b5f5183497657 +629, 0x2152b0fd23c3d9f +630, 0xb730c325b7173180 +631, 0x1e3439d231608c19 +632, 0x1c5ba6031379823c +633, 0x87f5d12d6d365cbc +634, 0xd3bc7f29614bc594 +635, 0x63102214bb391268 +636, 0x482bbd5bba648a44 +637, 0x6a23604690759dc4 +638, 0x4091d41408d3a39e +639, 0x7cd017f922101b15 +640, 0x7ce9004ac5f9231 +641, 0x978bc3d8ec7f7fdf +642, 0x5bd0c4d780580c11 +643, 0x4313c068bb040153 +644, 0x3ab7dab7bc38bf80 +645, 0x3aaf9c187728deea +646, 0x6633a4ce8efb88d9 +647, 0x7263b089878f00fc +648, 0xd0d767e96fe00eb8 +649, 0x184a7c0c01908028 +650, 0x1ebdf41e6f76e186 +651, 0xeb740ee1d0402083 +652, 0xfccf4974edb1c339 +653, 0x16e2707aa28306d +654, 0x1684f0bdb018c3a5 +655, 0x887b6b67b88aa862 +656, 0x923d7810a2bea33a +657, 0x56b3560babef5d6b +658, 0xb39a14614c54b8c6 +659, 0x33e4dc545a509fc8 +660, 0x26e21f84142da9b +661, 0xdd07598125756855 +662, 0x572d49a071d7ae0a +663, 0xba3c7e3baea28760 +664, 0x7ecdb2d714db4b61 +665, 0x1c62b4920e1b2fe2 +666, 0x71bfafb70092834a +667, 0xd710a4228f60d56a +668, 0xeb16277d4ce4e95b +669, 0x968168c90b16d3a1 +670, 0xac3439dfe8ad0062 +671, 0x5a8226f9dd5876ad +672, 0xb843affe917291b0 +673, 0xd76d1e67051f8259 +674, 0xb73a6638cce8ccde +675, 0xa0e6afd3c7295f9 +676, 0xff8857b4bbb5f4c6 +677, 0x99becf78938f0426 +678, 0xfcd17edc1e70f004 +679, 0x6223b8b23f2f50 +680, 0xca875f3e84587b4c +681, 0x7d1e81e589f87fb9 +682, 0x9eb621586aa826fc +683, 0xf46fb9ef5b9c2086 +684, 0x2882c9b7092725f3 +685, 0x5493f099bbedcd02 +686, 0x90c1ec979ffa811d +687, 0x963f765025bcc53 +688, 0x56194e3ec3d9d4e9 +689, 0x7ec4720954cac1f0 +690, 0xfab3145171af7f90 +691, 0x52a0b4e41a13b593 +692, 0x740e2d4d5909d126 +693, 0x98f5339c09c94a28 +694, 0x1700e462fe8dec76 +695, 0x3dbffc2aa4695ac3 +696, 0x5763edacabdfe2a1 +697, 0x7b5b623ce49ef21d +698, 0x30addc66f49860df +699, 0xcc7511a6c31bceda +700, 0x1b25b61ca75db43b +701, 0x416bc4c298e59046 +702, 0x4cd11fe2d74e4649 +703, 0xb54458a9229fc978 +704, 0x8c21a27882b6ca35 +705, 0x57887c8b5e01639b +706, 0xf4e893da996680bb +707, 0x8d601297702c9c0d +708, 0x2a27904a30aa53af +709, 0x497800f6917ea8d0 +710, 0xe96db3340ada9c00 +711, 0xcc23166f14c010ee +712, 0x782690d78fa65ec9 +713, 0xf3e00d74a0878eda +714, 0xa7cbb683decca0a3 +715, 0xdd2e038e683a94aa +716, 0xe2096ff8da896ca5 +717, 0xf7c83400afdabe11 +718, 0x395b8c6f6a4086a4 +719, 0x4a164ec05bee71d4 +720, 0xe87aa5d1ca0462fe +721, 0x8dbc5aed6dff9ceb +722, 0x12120d1e9552707b +723, 0x877dca6889b3e6cd +724, 0xbd65605c01e900fb +725, 0xbd6b82c4157c3115 +726, 0x8b60282732caf78a +727, 0x279fcf5e5de9e57f +728, 0x34b34ebfb6a37eae +729, 0xd258cc1a14e03b7b +730, 0x9a528ba3db4a13fb +731, 0xffa0aea59d057746 +732, 0x27fa7f456cd37c4e +733, 0xe1117a57a6fdce63 +734, 0xdc8fc903970a1551 +735, 0x492dd104f30faf29 +736, 0x110def0959e5652b +737, 0x7f8d1997636fdd15 +738, 0xfb77b05e538a9b59 +739, 0x2e41fa35b4b01fc6 +740, 0xbc35ae69a3374085 +741, 0x192c2a681c2d9b4b +742, 0x12566b8866c189d6 +743, 0x9d88ea785c5185c8 +744, 0x30a621ad5f983c4 +745, 0x8b875efe1206f587 +746, 0x224d25c3af6e3423 +747, 0x7503e976a1ac7bcc +748, 0x3c98aa869e823859 +749, 0x3d8835304b646892 +750, 0xf6353330ff970bc2 +751, 0x8a673f5e2edb8acb +752, 0xf2fdcc53493838b9 +753, 0x85ddcd526236af16 +754, 0x60afb99814c676c5 +755, 0x32a1c2749e281ca8 +756, 0x2367a92ae3bee9ca +757, 0x219fe082703743cc +758, 0x34d8b74dc85182a9 +759, 0xdd04164c72db23f +760, 0xe293ac28fe2671a9 +761, 0x9ca7d169cbda6f45 +762, 0x705c47972b4240ed +763, 0xc10eda9eeb536209 +764, 0xc36ddacd0c94e85d +765, 0x8eb592c27e8cd0d2 +766, 0x3e815991c76e7cc4 +767, 0xac9cfce31acf7580 +768, 0xbf7a4cb31c7aee94 +769, 0x663077444aceecf6 +770, 0xe7f614ff386eb568 +771, 0x79d7a229c66912c0 +772, 0x161ed4311f63e1f3 +773, 0x308a5faeb9982ede +774, 0x7b38ddb9b7efd10 +775, 0x1e103a2589b27ecf +776, 0x67b02baf4259f27e +777, 0x868921c115ea2eee +778, 0x959791912200f71e +779, 0x4dd55f36dec10557 +780, 0xe3464d90080cb99d +781, 0xfb2d4f6accce652f +782, 0x109900a9257d77ba +783, 0x3c4bda8e2c83684c +784, 0xc9ae040fb7f868c6 +785, 0x78098ffe994f4905 +786, 0x7a94c33eca77f0b4 +787, 0xbe6a2a95e9b5c0e8 +788, 0x797d39cf963f4837 +789, 0x8d2e249e4425d06d +790, 0x6ae2c30cd5da06f4 +791, 0x904489de762b179f +792, 0x84713e2dfb591e3b +793, 0x6405a40da3f6f51b +794, 0x976b560d663a2df1 +795, 0xed1c544784ba1e22 +796, 0xca658e995ed9344c +797, 0x2b1c6b8e4db49025 +798, 0x52b1513da528bad +799, 0x3c63406d256d9968 +800, 0x63a31ca3d423f85e +801, 0xb05a81f55789a720 +802, 0xd04412992c476c8e +803, 0x828ec2f77a150a3d +804, 0xee50926671bb60c6 +805, 0x5aa70f93e2df61b4 +806, 0x94d60fa2e8655858 +807, 0x3f5e5b770703cc7d +808, 0xc62dfb2688ca7784 +809, 0xaaf02e1e8ba89fe4 +810, 0x4ab74e0d8c047405 +811, 0x31ee04fbac6fcead +812, 0x1203b78b8228f5af +813, 0x412a70836f9aa71a +814, 0xab51cf98c03f1819 +815, 0x783a3ce9ce137f65 +816, 0x8897085b0a072cf2 +817, 0x685dd9bde8798cb +818, 0x9a1fac7b1705e2c1 +819, 0xf3e9ff98de48e9cb +820, 0x5c2d3eb1a1fbe917 +821, 0x3bda718b6b54d82e +822, 0x29f2dd18f22f0821 +823, 0xb992da1572ac3597 +824, 0xacb69e7aa14b34f7 +825, 0xcd36e3ad14f088d1 +826, 0x6aaacc96a1ec55e8 +827, 0xf8ac593f154fe68f +828, 0x18fc9cbff012339f +829, 0x2f3368ccbbb99899 +830, 0x7cec7d17f37031f7 +831, 0x96e86bfaadcb8fc2 +832, 0x74f9e7ee3d42a752 +833, 0xbd52f6c7d9b0733 +834, 0xa48e6d96bb6ce1c9 +835, 0xaefa058254b82133 +836, 0xb7a19edfd0929107 +837, 0x6160ce9125b26e26 +838, 0x6537dbbde1d2aed +839, 0xc567f9a6bec52dde +840, 0xca29fd3f22443342 +841, 0x7732aa6db6a1c476 +842, 0x8f5a4d7df6b11b3 +843, 0x76649262aa7e31e1 +844, 0x60a13eb125fbc829 +845, 0xc81e4d123dd21ac1 +846, 0x643cbb09bb72f86b +847, 0xf971a98fb25555a6 +848, 0xffa2774c66692d56 +849, 0xcb33c16c50b13ea9 +850, 0xfabf388dffda0e9b +851, 0x55d41ec12ca24b9f +852, 0x91cf693a3467e807 +853, 0x6be2c00b2c31d6dd +854, 0xc5cf513b5251ae28 +855, 0xffc4384212403dec +856, 0x45d4e1865255a69d +857, 0xfb1dcf956972086a +858, 0xcae946a55c4c55b8 +859, 0x7351ac7720e385c1 +860, 0x19aa8ffd86240254 +861, 0x8f515ae78f4040da +862, 0x1e1ed2058de50fce +863, 0x22d006dcdb374243 +864, 0x6e0f0ede7c95b441 +865, 0x70e8aa81b53b4d25 +866, 0x998f309ea41e3814 +867, 0x89ed6598fb66f390 +868, 0xb5997dc3278060df +869, 0xb2a021eac4f7e046 +870, 0x3705b60aa2fd0768 +871, 0xfc415079ab9200e +872, 0xf2871ac4cf45ecc9 +873, 0x24bf758d2246175f +874, 0xac503dd6f8141b3 +875, 0x4e879d12d9f03b3 +876, 0x82034af8cf93b644 +877, 0x59899dd7e478a6c7 +878, 0xae90addb6eb11507 +879, 0x1524ddf76730cdef +880, 0x6fd4afd5456b1c9d +881, 0xcddb9221ea001cbc +882, 0x64ff400bbf2e8604 +883, 0x6dda10549b06ed9b +884, 0xed2c85104c261527 +885, 0xc7e09217d29929a8 +886, 0x56284df611a428b1 +887, 0x1a7608289c0a61 +888, 0x7cb63db15166ff66 +889, 0xc6013c76fcdcdc72 +890, 0x8e5dd566c7a5a676 +891, 0x5a8e8565f40d133b +892, 0xe465973455848c44 +893, 0xf92eecbfe0f3c2c0 +894, 0x7d64155d4dcc5cac +895, 0xf17595706f988dad +896, 0xd590a001a6a19c5c +897, 0x82a164475758db3d +898, 0x6b144993ea1bbe32 +899, 0x22a81a7a6e453779 +900, 0x8e8c298df1a68a73 +901, 0x78056afd6d936b4c +902, 0xaaceef0325faaf62 +903, 0xe78bb7699f82266f +904, 0x523a2d283c5a5166 +905, 0x7076d87088f6c6db +906, 0x6087dd54cff5aeb2 +907, 0x7ef82e62cb851680 +908, 0x4e8bcc8ed84d03d8 +909, 0xd12fa0361df3cfd3 +910, 0xefb89c79f8127297 +911, 0xa9af4e2fbce0b1f8 +912, 0x462136685b70331e +913, 0xe9e74c93da699b77 +914, 0x9ec69215fb11d0c3 +915, 0xc10f229939e3e111 +916, 0x3f67fa79e41d2374 +917, 0xd5e7c1a9a7185162 +918, 0xa1dcce9ec91492fe +919, 0xd4e61f0727b5d21b +920, 0xdf6cdce46551800a +921, 0xa3f256ce906982d3 +922, 0x209742a6b9ffc27 +923, 0x4006c96958526a57 +924, 0x9606aebc75a1967e +925, 0x91b9f42fb64189df +926, 0xb27119defcb938bc +927, 0x128cc7a84ba05597 +928, 0x6c3df613c62d0d30 +929, 0x3adf69d48b629ec7 +930, 0xda42ee493837b128 +931, 0xb8e770480e760bb5 +932, 0x9feb55d57c99c626 +933, 0x29812d80afdae3ed +934, 0xae4222a64276a8c7 +935, 0xe3897212a5b4ed53 +936, 0x98bedfd13886e669 +937, 0xca858675d7fc0d0e +938, 0x28a359f665354234 +939, 0xfac2ccabe4128b35 +940, 0x61373cc5d11ca180 +941, 0x7007605a4512a87a +942, 0xe71f8eade7b30b3d +943, 0x3a9e77f9b99bd04d +944, 0x70d3e42488098866 +945, 0xd30fc159c7cd4d99 +946, 0xe4d3f6600d2e2d6f +947, 0x1088324dfa955c25 +948, 0x516437acd4764623 +949, 0x38a31abe50d0aa03 +950, 0x72e1054e9dc02ba +951, 0xe6971dd664d1a2e2 +952, 0xf6698cb095d3b702 +953, 0xad995a5a8c19bd92 +954, 0x34e53c6936f656e6 +955, 0x10de240bc07c757a +956, 0x3e3b9a6861c2bd1c +957, 0x9c0b0b97d3712ec9 +958, 0xabf1505a75043aed +959, 0xbdf93d3de3274179 +960, 0x28fa5904d3f62c28 +961, 0xc3b97b39ef6c5133 +962, 0xf2b2219225b8679d +963, 0x8be4ec0f930c0aaa +964, 0x47de5a56aa590643 +965, 0xb6f871b304129856 +966, 0x80a61c06233ab0f9 +967, 0x3ce6c3af8101b055 +968, 0x85b911708274e7d1 +969, 0x4cab65d093a488b7 +970, 0xaabc4b10661fe28e +971, 0x35b16dea64474a68 +972, 0x1d6eb5b093361223 +973, 0xc39107b92f0fe1fb +974, 0x1d09e048073c4841 +975, 0xc6a02f43aca8cb2f +976, 0xaf6613dbc7da909c +977, 0x5ac2a40c230aa756 +978, 0x33afb5e7c01c39a5 +979, 0xc7b0b20ea8b7d0ef +980, 0xdf7306c8ccb1bbea +981, 0x9710efc0c188b2a0 +982, 0xd6303eadb72c873e +983, 0xa38ca609b118f35a +984, 0x8390613065c6e535 +985, 0xdf9a0106757e431f +986, 0x8bcf77039788e143 +987, 0x6026806a986b378e +988, 0x482ff3b1394cb1dc +989, 0x2a27d0ccac9ede9c +990, 0x53c77f26e271b3ab +991, 0x1ba004cf276cf3f +992, 0xc135b0517dc81f7c +993, 0x5d137838db75e442 +994, 0x3fe505f93d1dbdd7 +995, 0x351654ae7d598294 +996, 0x173f8d182af9d84d +997, 0xf97dfcd164fe11c5 +998, 0xcda423e5ad43b290 +999, 0xa5cb380b8de10d10 diff --git a/local_packages/numpy/random/tests/data/pcg64-testset-2.csv b/local_packages/numpy/random/tests/data/pcg64-testset-2.csv new file mode 100644 index 0000000..7c13e31 --- /dev/null +++ b/local_packages/numpy/random/tests/data/pcg64-testset-2.csv @@ -0,0 +1,1001 @@ +seed, 0x0 +0, 0xa30febcfd9c2825f +1, 0x4510bdf882d9d721 +2, 0xa7d3da94ecde8b8 +3, 0x43b27b61342f01d +4, 0xd0327a782cde513b +5, 0xe9aa5979a6401c4e +6, 0x9b4c7b7180edb27f +7, 0xbac0495ff8829a45 +8, 0x8b2b01e7a1dc7fbf +9, 0xef60e8078f56bfed +10, 0xd0dbc74d4700374c +11, 0xb37868abbe90b0 +12, 0xdb7ed8bf64e6f5f0 +13, 0x89910738de7951f +14, 0xbacab307c3cfd379 +15, 0x2cf7c449d8b927a6 +16, 0xdcf94b3a16db7f0e +17, 0x8a9d33d905a8792e +18, 0x4cb9eb2014951238 +19, 0x6c353acf7b26d6f1 +20, 0x73ff53d673aa30c +21, 0x1fd10760015eca68 +22, 0xabae0aa9021eeba8 +23, 0xa5ae363a868ee2bb +24, 0x9d89e0f041de6631 +25, 0x6238b133c3991a65 +26, 0xff49267d75fef51a +27, 0xfb180656ce13c53f +28, 0xaf7fadf36128712d +29, 0xa6847fc6f339c63e +30, 0xb03e0b80d71ea5bc +31, 0x63905abcb43969af +32, 0x2295af3ee00a3bba +33, 0xb8b375b994330415 +34, 0x867d9ef1d8716a3b +35, 0x4f6c02f5601b4e18 +36, 0x7c5fb4c16c470d18 +37, 0xe3b57986b804b343 +38, 0xef1d79d212aca692 +39, 0x5b98774c8806209c +40, 0x924fc76bac38a5d1 +41, 0x5266084c412ddeed +42, 0x98240bf9b831d6a3 +43, 0x5681599e81219442 +44, 0x6441248fc2ba92bc +45, 0xe3e9051a540349ea +46, 0x3a2700034390baa3 +47, 0x9f893155b6d402bc +48, 0x158207910c6d8aef +49, 0xd5282ab7608c2cbc +50, 0xc97f4651669dee4f +51, 0x3d4750d95103ed60 +52, 0xe0614542caac1f04 +53, 0xefe5092144cfc6c +54, 0x560bc486abd7e9ae +55, 0x2678b71392daa4b8 +56, 0x734970d3dc2ba416 +57, 0xcbdbe849e51e4aaf +58, 0x3b0b5e28b491556c +59, 0xd51449ac45abd88 +60, 0x6790b59991f1b7ab +61, 0x32d1c039ff2415bc +62, 0x173b9772f24f72e0 +63, 0x9490a9ca9f883b1b +64, 0x4c775989e6214222 +65, 0xac07db37e6ee6114 +66, 0x331371b2e3f10aee +67, 0xf12e5326c21c28e4 +68, 0x5d77dc280c70d614 +69, 0x1b01bd17a2f281ec +70, 0xa10d3b5882938487 +71, 0xed5a0033c394ae8f +72, 0x70bc8ea568ea44b4 +73, 0xf4600ae77965e730 +74, 0x7ff92c0b321ce233 +75, 0x6cdbc87d0cc1d670 +76, 0x9ec64f0cf2000eb1 +77, 0xfebea50259800f68 +78, 0xf2edf9019a8fd343 +79, 0x75c584ac042e5468 +80, 0xc1fa8481d5bf9a1d +81, 0x7f57180168514ac2 +82, 0x878100716b94f81e +83, 0xc929406e3af17fd2 +84, 0x6a26e2c013e4bf4d +85, 0xbc071d8848280955 +86, 0xb60d75abbfd1bdac +87, 0xee9b76afeca9fa69 +88, 0x1d6c399d2f452810 +89, 0xbaa0bc1621e25c83 +90, 0xed6ba792f8671ba5 +91, 0xf7ca02c2ab11d8d7 +92, 0x3c3cadadf0b21e3 +93, 0xdd1784571e864e9c +94, 0xfb2f992015157509 +95, 0xf50bb9f0d3ced743 +96, 0x261565f75c3e185f +97, 0xf8fe33b284513e60 +98, 0xe3d2d10b5e024664 +99, 0xd28717566242cf35 +100, 0x7ae07d133ac5b789 +101, 0x3b7ccaaa53ac338e +102, 0xcd480bace4871650 +103, 0xec6c78f923c080e9 +104, 0x44211d0ff8919d59 +105, 0x89f79af76d2a45fe +106, 0x71583fd8a837548b +107, 0xee57269c261511f5 +108, 0xa5ee8f3b128c5d1 +109, 0xbb64c20ed0765a17 +110, 0x9d4790ab2eeaf7e4 +111, 0x742f3db806d9e98 +112, 0xb81ec97aed6a0d1b +113, 0x41808b34f6a8a23 +114, 0xc20913af175dfd4d +115, 0x834427db263b22bb +116, 0xedd9c632e611828a +117, 0x10eac8524496f571 +118, 0xd76091b97eb00ab7 +119, 0x111298ae9fe95666 +120, 0x5824b2e2a6719c43 +121, 0x6e280ec539e934ed +122, 0xf74fd832df90083e +123, 0x8fee6d0f241c2e97 +124, 0x4244f331c2f19c3c +125, 0x3dde75a845cce97f +126, 0xe35bb8e635a9915b +127, 0x39d2943037f7932e +128, 0x1fe2d134201d0970 +129, 0x49d00b63c749b804 +130, 0x960c2942cd4e4e04 +131, 0x8dd8e009dbc0435f +132, 0xcf493495c3a055cd +133, 0x8f7b5a1c0f9fe9cd +134, 0x49d5f90374641a25 +135, 0x69b3932073d3524c +136, 0xd170603e7de84ee2 +137, 0xa062ba3ed3539948 +138, 0xf5861cc5b5d56c82 +139, 0x5e914998a30c7e76 +140, 0x8d77f2ad1503c0f1 +141, 0x980b6a9e3b4181fb +142, 0xd9299cd50694c084 +143, 0x253dc0f8f1cec4c5 +144, 0x68110fb9d1b3e695 +145, 0xe8f3120d0aabc461 +146, 0xb066e7df0dfb042 +147, 0xd29ce0f797e6b60b +148, 0x6a569bb7ca33bd42 +149, 0xd46e08b2dc2385f8 +150, 0x28c61d11d055767 +151, 0x5d73aa3d1a2bb725 +152, 0x1421191e1c14829a +153, 0xa711bfb6423df35e +154, 0x461af97a86308006 +155, 0xb3e1018ff3519367 +156, 0xf19cf866a268ef2b +157, 0x207715eac9199d1d +158, 0xdd621c410975b78c +159, 0xf390aea68683610 +160, 0x617a2d107a0047d9 +161, 0x6e05ac416e5bebf0 +162, 0x7d253e70506c1bed +163, 0xf9f96f4a7dd53810 +164, 0xc693b29cb1573f73 +165, 0x4f1146b0020ea544 +166, 0x45140608fbd40579 +167, 0xdcf57219828ce6be +168, 0xe19d58cca37b5b32 +169, 0x82bda95b2a161235 +170, 0x5823c3d8a2b6c9ba +171, 0xfeb2e74092fdf89a +172, 0x50e1ad1abc8f869d +173, 0x2ec63d0c105eb8da +174, 0xe14e1c4845a3264a +175, 0xcff53670455eb6aa +176, 0xaafaccd24619fa3e +177, 0xf55a988486e2422a +178, 0xecfba16a90ff4d04 +179, 0xbf8d36c2f644757a +180, 0xdc56ed75a0dd6249 +181, 0x3f45023eff17c3bb +182, 0x2428bbfe90023fab +183, 0xab892c611adcb70c +184, 0xb6f13d8c0c2b9d74 +185, 0x2ac3fb11d224f2a8 +186, 0x65433dcfae2d9351 +187, 0xe906859ae4b45f82 +188, 0x8fb7f5f093d76a3b +189, 0x940dd290b5e88d1a +190, 0x31b27d21bef116e7 +191, 0x86a964e2c83b5296 +192, 0x85ffd17bc079a9e8 +193, 0x16c47c724e7ab7f1 +194, 0xfb6098a9867e7d7f +195, 0x9246fb69092c6cb2 +196, 0x1a4033572760f32 +197, 0xc5cc568a8b273b84 +198, 0xfa6f9f2fbdd44abc +199, 0x9701b8e087718ba3 +200, 0x51d6a7dcf73f8f3a +201, 0x30008172cc6a972d +202, 0xac2ab49a5ca6ac81 +203, 0x31f28ef79461e54c +204, 0x93e35a8da8cc6132 +205, 0x9a2c58beeba3d5b9 +206, 0xf6615c1de266ac39 +207, 0x127ff9f8166b766b +208, 0x7ffe380e80a69556 +209, 0xbe7d2c228e1542f7 +210, 0x2d5ebb4e50ba1746 +211, 0x63585761ae1bf684 +212, 0x1019eb5cee022fea +213, 0xb9d3540ab58da30d +214, 0x1677f4cb45620eb9 +215, 0x6524baee51783822 +216, 0xdf9f2ddcfabb0adc +217, 0x78e8acc43b287935 +218, 0xe9a1974e999222b5 +219, 0xc41324ec2291e780 +220, 0xea52abc9ecdcbc9f +221, 0x209d7bcd46ec6b04 +222, 0x12d504c09803db2e +223, 0x1200e6bf21475d81 +224, 0xde6d3c2b35fd2cfc +225, 0xa2526900ac33bd3c +226, 0x7f1f5290fc432bc5 +227, 0x29ddfb380a3d69c8 +228, 0xac79cb6942a2909d +229, 0x516996685b67a92a +230, 0xb5fc39041cb828bb +231, 0x75d9d8ca0644a276 +232, 0x81e98b76be92a3e9 +233, 0xca27888fafe12179 +234, 0x17be2ae039925765 +235, 0x9429846c0e6d0342 +236, 0x327dfd50439815e9 +237, 0xcee20cd7bc254aeb +238, 0x7d250389f453f29e +239, 0xfd1b232a85c95569 +240, 0x2ed55fac80f3e9e9 +241, 0xf6886c20417a1be7 +242, 0xcd08e61f0b0fdfde +243, 0x7b33e34da5c27bff +244, 0xd043c4b7d5603dd5 +245, 0x9a544e4c70a3b686 +246, 0xa7b60398c381f771 +247, 0xe9e7a3487c4bd4f2 +248, 0x10b58fdfe1ff112c +249, 0xd5c1c9748c0f4ceb +250, 0x61be9d09159d54ff +251, 0x5356f51e8239f510 +252, 0xfe7889d9b202ecef +253, 0xc7fc19ca5d263d5d +254, 0x7c4c07e61dfd9f69 +255, 0x6c315fe5015f300a +256, 0xe0a5bc00039747b4 +257, 0x16397fdcf829ee80 +258, 0xb55aee80d16a5169 +259, 0xca0609944d007eea +260, 0xcc982249f65a02ce +261, 0x528161feb149c148 +262, 0xcbf08ba49b41c006 +263, 0x39af1ff0b6f14138 +264, 0x5cc036be69799aec +265, 0x6adde125b1db21c5 +266, 0x8a99d83d6b613b67 +267, 0x1cd43fca9451f74c +268, 0x682dbb26ecc96365 +269, 0x13b4be2ceb43e3 +270, 0xbe8fbc3b6f4f581e +271, 0xda148a2f4bda5719 +272, 0x239106ca3319f393 +273, 0xb42b4dde641f0dd5 +274, 0xd233cfdf4cb0af74 +275, 0xfb5919d905589afc +276, 0xd802a8860c10b66a +277, 0x6c923e1d00e7b5bc +278, 0xfacce1134f383b89 +279, 0xf9570abda7a6d553 +280, 0x80f0f9796a208f18 +281, 0xc0e1df5280951c57 +282, 0xe9f143f08257bbe0 +283, 0x79e4c6463123d588 +284, 0xdd2118583f2b1684 +285, 0xb399ff5f2329fa18 +286, 0x4b3e9ebae96f813c +287, 0xc484dbf247787384 +288, 0x921865eb97603f2c +289, 0x18063c68e257d300 +290, 0x643181f345e7fc26 +291, 0x12e0b0e8eadf9fa7 +292, 0x79e613fe73dfa354 +293, 0x6db4c59203b7217a +294, 0x6c7a0e9ba6139eaf +295, 0x9617c7ac4e3f6d97 +296, 0x1f68a7b4fb1b4b75 +297, 0xef0b7ab24944f466 +298, 0xaf1dee1f4be1bc89 +299, 0xd2e355c959f5fd8d +300, 0xe594c3fb95d96efc +301, 0x9554766ca3342906 +302, 0xa4bbdc77d12842c +303, 0xb62400211ee489a8 +304, 0x91abadaaa3bbe67c +305, 0xd371eeb91deb42bb +306, 0x883bab35cbd2b6e5 +307, 0xd030c3d9411a9041 +308, 0xff3c110a858ff000 +309, 0x59bdf5ca47d0bde7 +310, 0x2bc80fa3cdba1853 +311, 0x6444ccb652662cb8 +312, 0xc0c7e256b9e90339 +313, 0x70714ea9c9d72302 +314, 0x96a0142f9d897d27 +315, 0x209a9097c5a91ef7 +316, 0xb9e33afc5171e009 +317, 0x47b37af433a58d40 +318, 0x30cc4ffbfa831d26 +319, 0xdcea4a85ff815466 +320, 0x907d5bd027f2e5cc +321, 0x7c081f6852e04a4b +322, 0xe61950749c1d502b +323, 0x1604e937ee69834a +324, 0xb2372d952dd25309 +325, 0x53f6a5b834c72577 +326, 0x2ce7a74395e0b694 +327, 0xacbf9ab4fe91f225 +328, 0x5ce1e63d3a2bb90f +329, 0x54740da3a5ed139b +330, 0xf194ddb39f29880b +331, 0x3305374f5d8ec08b +332, 0x831dd0164927ff4a +333, 0x625baa78e4458cf +334, 0x29d27dc0a4a71152 +335, 0xe227bae9a1401034 +336, 0xca0c209831846b2b +337, 0x8e8cc54b08b5a411 +338, 0x38f2b4acaac27db6 +339, 0x8ec88baac814e86b +340, 0x31c08e46b007bde +341, 0xb686c02722794c09 +342, 0xb77cf8fc682e3907 +343, 0xa56334e7f606f4b2 +344, 0x9c80b127bddd5f4f +345, 0x12df14834cd858bf +346, 0x3f14762a9cf5fb9f +347, 0x930a70941ef5779e +348, 0x64e96c849c30c080 +349, 0xfdf53bfba1300484 +350, 0xec7a9363c21bc616 +351, 0x26e9fd6a115ecb47 +352, 0x9707a84b5bc77fbb +353, 0xb23b2737b20d5903 +354, 0x22f4825ae80f6501 +355, 0x500644b12be6a01b +356, 0xb746645b2af082db +357, 0xe6af051f697892f8 +358, 0x577c724248a1cfc6 +359, 0x3d2b6a434c84eed3 +360, 0xd260f5efd7328314 +361, 0x95c16cc84bb3f55c +362, 0x7a01b2e4e0e80ca7 +363, 0x41930c3ce70a0935 +364, 0x1299bccf39d4e110 +365, 0x494883ba1a8a87f +366, 0x9478ecfe2d918e60 +367, 0x30ec9a5670cda8af +368, 0xf9bc877e833e2b99 +369, 0x1b83a0acfbb4a8db +370, 0x73bc1740c0d18880 +371, 0x65086ca9773cb3e1 +372, 0x3b78c3ccd63cff2e +373, 0xbfae748795acfb31 +374, 0xa4c9d5d56a15ba20 +375, 0xb9cb41721e52b71e +376, 0x1532f15d4dc47748 +377, 0x5a4d647a4b9ee632 +378, 0x8513c7c5a50898d9 +379, 0x6d3d98ccd5461b2e +380, 0xa65e99be2fe98d6 +381, 0x31abc8855334a0e5 +382, 0xf1ed22a661dca5b8 +383, 0x299e2b63229e03be +384, 0xda201a06687bce48 +385, 0xd27794b302142c55 +386, 0x642bd3e1c7898a9d +387, 0x777f1ff00afa1a87 +388, 0xd2f1c84fb3877baa +389, 0xae417583289191fd +390, 0xd641f1d88e0e2d55 +391, 0xc1f1d98fb5d18ebf +392, 0xb0f72aecdadce97b +393, 0xe9b8abc764f6018a +394, 0xd2a37cff8e890594 +395, 0x2dd70d631a528771 +396, 0xbf8ba0478c18e336 +397, 0x1630bf47f372ce0a +398, 0x6d04ea20dc3f46b8 +399, 0x6591881bf34337f2 +400, 0x33c149c7eb5b4103 +401, 0xf01a8c9857c86748 +402, 0x184348cdfc16d215 +403, 0x141168b253d2ed7 +404, 0x52aaf012ef50a6f1 +405, 0xfda1722387e16f4c +406, 0x43c30f57d6c038fa +407, 0xd4a8611f5f96d214 +408, 0x2c512ce17e987f2c +409, 0x961ce450f0fa2822 +410, 0xf55a506ec6cea9cd +411, 0xb76d694d9c7f5ef6 +412, 0xfb029216dbd8e988 +413, 0x93162501896a0081 +414, 0xfbbbd2c5ab300f5c +415, 0xd648b6da7387d491 +416, 0xc73b4697471d9d98 +417, 0xe37412bf1c93ee76 +418, 0xa1a96d96570e6637 +419, 0x5b3ab4f82428f65c +420, 0x873d849b188aa36f +421, 0x39fbee0ffc9fa9ff +422, 0xc70d21b744d677fe +423, 0x2b8a43c23043d209 +424, 0x93c33eaa37370d16 +425, 0x8930ac1880f2b0ef +426, 0xac01d27707036af0 +427, 0xc2af3fee504343a0 +428, 0x1c1dae2ad5535d97 +429, 0x9ffc21804b76a480 +430, 0x69f903412cc13563 +431, 0x9d3c4e2759a0c47d +432, 0xb1a8f894be6302b9 +433, 0x95e1fd7951479506 +434, 0xbb9e6c03cd4ae8e3 +435, 0x85206010c9b737cf +436, 0x767e813694d6238c +437, 0x4969af329ccbb30a +438, 0x3aa9af1075aaea5c +439, 0xb1ff519e8118a993 +440, 0xb21a23a3c91180fe +441, 0x320b24582ca3fd88 +442, 0xf8ca56415fb4e453 +443, 0xabd0899c07205e77 +444, 0x87fdc7a44b4ad50f +445, 0xd75744911641a278 +446, 0x7c8c9a65df6fcb95 +447, 0x79d785e3c7a5b695 +448, 0x421e4565ba1f592f +449, 0x27f87eb2517835cf +450, 0xb62cc4297441c83e +451, 0xd817a80ac815ca6d +452, 0xad84388130df2aa8 +453, 0x5e6b1640452d6ac8 +454, 0x936285e15edce2a3 +455, 0x903bccc4969768e8 +456, 0xefc2cb7b109d3140 +457, 0x633e9dfdda2d903a +458, 0x2a2f3225925678a1 +459, 0xe07eac91a27f8547 +460, 0xe50ced40eda78cb3 +461, 0xc5b22500e1c7441 +462, 0x32becf61bca3aa72 +463, 0xa2e37c4b30671344 +464, 0xc9f1c1910f45d544 +465, 0x9b50333b2dcdf730 +466, 0x310bfd53a1684b94 +467, 0x1e1dc21e66ac6455 +468, 0x81876c2bfb1ed5a1 +469, 0xd0c54a3e25eadc7b +470, 0x3791b6fbbd5c7ba0 +471, 0x133be57356c599fc +472, 0x8d1148eb8e83fdea +473, 0x311aedba0d8b42cc +474, 0x1142ae52745f94bb +475, 0xc5f4ab2fbde8c4a3 +476, 0xd23be827b5b24f6d +477, 0x65f95194cd122715 +478, 0x4b48969d73125922 +479, 0x46f165052b8ff988 +480, 0x5c689f94b9275ff4 +481, 0x93b03823ff2d536b +482, 0x871f3775aa4e3523 +483, 0x5af829f7cc0f66a5 +484, 0xa32e05739cbeac8c +485, 0xacff1856ddace0fe +486, 0x8eeb5e7f991a5322 +487, 0x6325c2720e0dbdea +488, 0x9fb817bc4fdf5200 +489, 0x9786f0d850e43d78 +490, 0x571f76dd7f9fb77a +491, 0x4d9e94e181cbc63f +492, 0x8bb632d3376c547a +493, 0x9cc26d9efd1c88b9 +494, 0x9c5d49579df52b0b +495, 0x6201abf7e1cda07b +496, 0x90d68f0c6c884963 +497, 0xfc5b66188ef7f561 +498, 0x6d9303cf2e0e0f95 +499, 0xd7cfcff535f5ed07 +500, 0x14d1a1228daa4ac6 +501, 0xe00ef5762f66ae50 +502, 0xf113a79471582978 +503, 0x430985281785dc7a +504, 0x31914108c206ed5 +505, 0x7ba6707b6419971c +506, 0x2ec63b033ce112e5 +507, 0xf8bcd36ced3b41e3 +508, 0xe5cf908c8010414b +509, 0xf5ee224b7c703e30 +510, 0x9a9733af0b12338b +511, 0x83e18cc00ace34f8 +512, 0xd52cff39e23008b8 +513, 0xa700578136b9c0c5 +514, 0x3fa179d32ac51f99 +515, 0xef2d5eab6d4ad380 +516, 0x709024a5abd032df +517, 0xc607c7ee349ede87 +518, 0x803d784e9731eb5f +519, 0x2ef06f4ba769282d +520, 0x4bc1dca1e9f07eb9 +521, 0x930c958a7a72f94d +522, 0x249bc8db2cc7a3bf +523, 0x3845305798f9a5d +524, 0x6f137eca9ab6f948 +525, 0xc31f5a963d31bd67 +526, 0x9d39693d5383626f +527, 0x52fb41c335a8b98e +528, 0xb79d1a29a06006ec +529, 0x7c0926a7a3eda2cc +530, 0xffdf5214406fd53e +531, 0xc6aa02a7e94282b9 +532, 0xd4a4431b4aa301ee +533, 0x4271cc0f9420d3ab +534, 0x26fccd7cc7fc2485 +535, 0x330594bb945b8d5a +536, 0x6ea8eaad12e5cb8c +537, 0x831c3467726bede3 +538, 0x31d1eb10017eaa61 +539, 0xc7aa75e41508f5cb +540, 0xde51810f0cadd0b5 +541, 0x50e5b3e73692f80b +542, 0x82107ec55636e188 +543, 0x9828ef175d843ab4 +544, 0xb8edc6a860dd421e +545, 0x25c0c138fd537ac3 +546, 0x47e72a771e8eb563 +547, 0xbb0f8c5333f4a2cc +548, 0x91750d2fb9b2d479 +549, 0xe662d8f6fe38df36 +550, 0x72a6d879fb5619f0 +551, 0x6817c7878dcbf077 +552, 0x4e7741cb484661e8 +553, 0x3b3b3ba0be5711bf +554, 0xa6989f5d25868765 +555, 0x43c276398997e4e0 +556, 0xdcbe16a94da28870 +557, 0x454936980a699c99 +558, 0xac614bfa8f0266c6 +559, 0x9174841392e213d5 +560, 0xa0e2acffc5fc9d1f +561, 0xe53a08a7a0e6521a +562, 0x2b845cf7c24172e0 +563, 0x265a4fc5f7adec0d +564, 0x1f34fbe5f1e49420 +565, 0x139181f6fb647f20 +566, 0x88c35d46e2fcd05e +567, 0x2a6d5b55903c0459 +568, 0xcea28eb621ad7bf1 +569, 0x5c9cdc13e7aaa30 +570, 0x5fe63e14746e7103 +571, 0x7923e53d73835db9 +572, 0x376e661210bf1b06 +573, 0x5b1cab85450efdd5 +574, 0x3908dc096c70b452 +575, 0x4825e303cd1f396f +576, 0xed476bfd702957c3 +577, 0x6acc013aff5db743 +578, 0x62c80b776343d488 +579, 0x9c75edcd5b012697 +580, 0xaa053362a3b9770a +581, 0xa907e236c7c07e94 +582, 0x15b2c380451692c0 +583, 0x94f79142697bd61f +584, 0xbc657d31ea98d44f +585, 0xcbaa5e52517a1f5e +586, 0x96aa2e44a7c4a03f +587, 0x216d3c66db2b515d +588, 0x157001807e3ca88a +589, 0x52b3a596bdd3859a +590, 0xed747e7fc5e3adac +591, 0x78fd765ddb2c448d +592, 0xe53dc7299ed8614e +593, 0x75ad41fb1d7a790a +594, 0xc14f6b944b0e6cb1 +595, 0x7c314b69fce3df1c +596, 0xb56d82eb740d7abc +597, 0x5132a93c41251fdb +598, 0xe3ce35bd2a82f958 +599, 0x440571a981c722f2 +600, 0x194cdfd9f186bc9 +601, 0xb89e522a5db00939 +602, 0xad35f339f68df3c8 +603, 0xa82ab18420322293 +604, 0xaffa6df9b72b27c4 +605, 0x9615694d23beaa2c +606, 0x1d82ebe563abad91 +607, 0xab50ef65fbd94385 +608, 0x1b070dbd70a9a14 +609, 0x2ececa796abbadf0 +610, 0x6bbeafe9e81ab2a2 +611, 0x60dcd0d2a9b76914 +612, 0x1e748039ef05c33f +613, 0x6d4d17f2213ccdff +614, 0x9fa56132957bc987 +615, 0x60a17185de2428eb +616, 0xb56038ddf306479c +617, 0x3b1db5df92d06d8b +618, 0x24d1bba8bdedf580 +619, 0xbfb7e6740ebaa4d9 +620, 0xab31c4473e46f61d +621, 0x6deb3cdd8fd5869f +622, 0x23032e47746d72d6 +623, 0xa9e72d734e10f2e8 +624, 0xbffd199b6157bc23 +625, 0x29f8254df273fb62 +626, 0xb076142130ee55ec +627, 0x5b0b08374126c309 +628, 0xea4536aae979521f +629, 0xc064e7abec91a174 +630, 0x46133ef80c59d935 +631, 0xf0227e2da1b14160 +632, 0x675a76641e1af5a +633, 0x2f50a069b33d198c +634, 0x3ded5a65e1d657eb +635, 0xbb6999b020694f6b +636, 0x86b2f2b33487aed7 +637, 0x76e14e85f8bfb4cf +638, 0x38f7f1e44bd4e0db +639, 0xc1a7d41b7e80d4ae +640, 0x1dfaaf80bbceb42e +641, 0x3f51c11497720c2b +642, 0xce6da1415ddb8b80 +643, 0x7377d8bcd359b5f3 +644, 0xe077208f3f810aca +645, 0x9a06a8a2dacbffce +646, 0xca1f99156b09b735 +647, 0x2ff9a93064d91451 +648, 0x50f3ea93f351a7ef +649, 0x606fceccb07054de +650, 0x7e83d6d2f8f6685d +651, 0x78f3995291c5d407 +652, 0xd28d2460e22d0228 +653, 0x2c5636f68a0054dd +654, 0xd9fafb1c56c8f6cb +655, 0xe39889b5f9d74464 +656, 0x1355372bf5db2cc1 +657, 0x26768426b9ac323 +658, 0x4af1dbdc1111fd89 +659, 0x66973587943b927f +660, 0xf86f5f50684dfb1d +661, 0x1247d574ff79b534 +662, 0xc8039f3259210fe2 +663, 0x79b573235c92a9f5 +664, 0x213f642d8450e2f0 +665, 0x5db7706973376566 +666, 0x6182c12e69b373d7 +667, 0x3e5ac47300aec07f +668, 0x4b5b6c57b1574376 +669, 0x6b7fcceefd56b17c +670, 0xf656c3455cb9d4b8 +671, 0x7577e2e13329721f +672, 0xf33c0c53ce956e8d +673, 0x7d0f328ee356174 +674, 0x10ec9a168088686e +675, 0x71ef1776d062dfa +676, 0xaa7b590a488a6bc4 +677, 0x38612b6dd8049a1c +678, 0x939045e36874f731 +679, 0xcb9d1d74c56d5ac9 +680, 0x54f1c1c8fef1d8ff +681, 0x3ee4b85c8c7e939e +682, 0xb9b4608e019f352c +683, 0x79d4701275d12e6a +684, 0x2632a2d9835c7f19 +685, 0x1662cd9fba293692 +686, 0xbcb70265115ee944 +687, 0xdc43fb9761468604 +688, 0xe3eec4e7d3871352 +689, 0x829531753226989d +690, 0x2748cc67f540e074 +691, 0x39c4af25d607837d +692, 0x741a243f4cb5df99 +693, 0xda1353287e18b49a +694, 0xa6735689d751ea74 +695, 0x46326d587340ce0b +696, 0xc18531df4550012b +697, 0x6f7901e05dd4b818 +698, 0xfb966afc4c001d63 +699, 0x6dc10fca67a9cfdb +700, 0xd6527ffadf0feaae +701, 0x3b900172045e25d +702, 0xb7dd594cdded6a46 +703, 0x6602aee7ec1599fc +704, 0x7fbf12f23747546a +705, 0x32e63f662bd2de0d +706, 0xedf47770b67ed641 +707, 0x331bef83481c5c2a +708, 0x8fc4256fdf05158c +709, 0x98eba48dabccf5e0 +710, 0xdbc2f2cdb7b1c154 +711, 0x7777755616517ad3 +712, 0xd473c147d2628ac1 +713, 0x861e15d1d760b5a7 +714, 0xf4d25926405ecb07 +715, 0xb7739c69effff86e +716, 0xe97fbafa6f96830c +717, 0xf13e8a334e8bede1 +718, 0xcd60010cba4ee4f9 +719, 0x1f537ac2b82e6008 +720, 0x1fda8d781a89140a +721, 0x9dc204f3f4a463f0 +722, 0x456dcd18eb56a1ab +723, 0x629957bc87bd16a1 +724, 0x2c8000ddb8c75253 +725, 0xc31dae9ec8449284 +726, 0xdac05c8baa2b691a +727, 0x21ff7be9ffa3e7ac +728, 0x844f4b5ed4ee08d0 +729, 0x651f913fd636c994 +730, 0xca3e71a2110b2d49 +731, 0x7709bc42253ed09d +732, 0xbb164d45b6569d43 +733, 0x90ec2f040c20a112 +734, 0xfa6e77e9166f5be4 +735, 0x6b6d12c1842d587d +736, 0xfcd7ff8466e25e2a +737, 0x6a5a2ed8bd971297 +738, 0x2ec35f6bba5adcbc +739, 0xc83676e16651249a +740, 0x458f6064cefe10ba +741, 0x90d54d527e6cd028 +742, 0xa5613e88db27c388 +743, 0x331e0c7d85aa1abc +744, 0x8cee4977e210358 +745, 0xfcae379aa6cbff8e +746, 0xd1407afc97a57e86 +747, 0x1fab25c864f094ae +748, 0xd914864a63004552 +749, 0x4214d226a20f1384 +750, 0x3f4e0d80c488b715 +751, 0xc5ca2f654024b7c8 +752, 0xc1e27a124e7c821c +753, 0xd890a915ffc7918c +754, 0x22fba040ce51a9f8 +755, 0xbf61cebd8891617a +756, 0x7846609ee228e319 +757, 0x536d1854375509b8 +758, 0xbbfb45fc6e666f50 +759, 0xd85b4c0527f9d7d6 +760, 0x528cc9c7fa2a84c8 +761, 0x27a1baece647f2cb +762, 0xfddf0cb92fe09dc3 +763, 0xeb5008fe965d8d96 +764, 0x4a3307937eb2e5c8 +765, 0xd07d74c240c6c363 +766, 0x16f62290179d1bbf +767, 0xe99c9bcc9cb1ece7 +768, 0xc64f9be03c8a93be +769, 0x32659effaf666c1f +770, 0x4bb228cfb30b6672 +771, 0x98764870842068a5 +772, 0x5b12ef2d2cd8bdcc +773, 0xbc79d1c1b41f28b8 +774, 0x97a517cf3279fc9a +775, 0x34ffd46c1d4d6025 +776, 0x9c302307ee25c8f0 +777, 0x399604eed1f18a8 +778, 0x1c9b813c2043142a +779, 0x2944ea5e55267fe9 +780, 0x5a8a9f5e728ea667 +781, 0x30c8440adb804a0 +782, 0xee0e6b627099a937 +783, 0x3d50757ada3c52da +784, 0x4548916b32c813ab +785, 0x602a186fe5bf109b +786, 0xf0d440a2227ba304 +787, 0x5a10d4e0ca9ea32b +788, 0x6e5eb90da13ba64c +789, 0x4c6af8fd04241ab2 +790, 0xf9eb31d26e093006 +791, 0x5d674878839fe3ea +792, 0x1562b55b2484e47c +793, 0xa87188c099c1cb61 +794, 0xb7736b8aa02a3392 +795, 0x5f4b301125abb20f +796, 0x361d566984637f44 +797, 0x68c4b3feac8bd0c3 +798, 0x7066c634dd2503c1 +799, 0xfecbf7c9441eb6ea +800, 0xdbc26ae0fc81436b +801, 0x9ef3e2b48252e7a4 +802, 0x31a49b4c339b37c7 +803, 0xb01b2a83cf346cf4 +804, 0xc24dc2347f82fbe3 +805, 0x134cad272dcd410f +806, 0x61260742823ba59c +807, 0x53ac4c193a97c730 +808, 0x9207c9833af34b52 +809, 0xa72e7ee77078d1f5 +810, 0x2e6f6e1b05936885 +811, 0x783b99ce5dbf9464 +812, 0xfdfeb6f0d027bb44 +813, 0x40eeb27096f92b0 +814, 0x5ef96ff5d4a4521f +815, 0x5595806ae873718a +816, 0x67d449eecf4ca1c3 +817, 0xde837ab611364f3f +818, 0x7034c24d2b139be9 +819, 0xe21166603e0a9c86 +820, 0x935694435c1f0d51 +821, 0x6cb3bec90c126088 +822, 0x4096ef662b7a9f89 +823, 0xd2d85b8d238d8c15 +824, 0xa4ea533ce3ec59b2 +825, 0x3654729d80a2db29 +826, 0x214c4cc3906d29d4 +827, 0x201c447e7588e373 +828, 0xe8b8f0ae25f683eb +829, 0x6744aaf5754e38af +830, 0xd1ffb10d6f27a061 +831, 0xe536733a7b3a6c30 +832, 0x39f0f66e47cbf2c9 +833, 0x856a9593526fde2 +834, 0x2e2a817a0098ea4b +835, 0xc5e1eeb551a0e3d3 +836, 0x3f21e2f5e2d50b2 +837, 0x906af56c66dd9f8c +838, 0x30f6dbd70329fac8 +839, 0xc443dfddf3c01a60 +840, 0x7ab85d9aa9675470 +841, 0x8c9080bd39717bfc +842, 0x4b1ccdb3c3597f6f +843, 0x74e2542d70ab5d67 +844, 0xbb3d236aad00f74 +845, 0xcf3cadf9a2804774 +846, 0xe851d9750e42bd07 +847, 0xc0ad82029b1c371f +848, 0x7ee119eb552d6c07 +849, 0xd8024049bd1d784a +850, 0xfa67a899760363 +851, 0xaa7c2f438b178197 +852, 0xc473674a47ffe064 +853, 0x539fbe3fc674c270 +854, 0xdb48484748a76f3b +855, 0xc73b2b092060d +856, 0xa1d2a15345016f5d +857, 0x4d0fe8599f9bba47 +858, 0xa0edc275e6f8f1d1 +859, 0x40590a8655bc8d72 +860, 0x35b4223161f05f75 +861, 0xa04c0c0f616752dc +862, 0x7f371ed2ca45432d +863, 0x2ff1a08f75ac6438 +864, 0xe2dc5c3682282f48 +865, 0xe1e4179fa98d9013 +866, 0x8cb083d6843a73d5 +867, 0xb4c2b5921b706854 +868, 0x738e14c0e7352445 +869, 0xcd2b646f91afd8c7 +870, 0xd5779a5b57a264fd +871, 0xc39ff855586c7d07 +872, 0x3e3f0098c631a859 +873, 0x644e02fae032110 +874, 0xa8834613c0a45278 +875, 0x69482f2c08e10657 +876, 0xe4ee475bdb87e69a +877, 0xdc1ef7b25c0d0019 +878, 0x88a3fa2be18d8744 +879, 0x60a02e0b21c5bec7 +880, 0xb6867b88aa19bc1a +881, 0xb599409affcf10eb +882, 0xaeaa1778a5e59daa +883, 0xd7a91a52c16663e3 +884, 0x93cb269affe07b1c +885, 0x841b6ced3a4ba815 +886, 0x84541768e1540a5c +887, 0xe3943c84f83b3020 +888, 0x5de366fbd7b45258 +889, 0xd787cc3bde91a661 +890, 0x814071446edecb57 +891, 0x15d8c602a1141514 +892, 0x72f07bc8002d1d0d +893, 0x4a8bd8dc9a1f0f3e +894, 0x8723796ae0f20d35 +895, 0xda7283c2051f73b2 +896, 0x2df0cc247f90bd3b +897, 0x79a8522b968f990a +898, 0x951ede190c8b9d02 +899, 0xc512f1a5b14b018a +900, 0xf0e3ddc03b9a4259 +901, 0x8cf4a35ad312e15f +902, 0xebef28926b11094b +903, 0x5628ba687325921c +904, 0xc3aa75e57edc49c3 +905, 0xc38382fa98e762ba +906, 0x8d209e896285848e +907, 0x2c7d6adf592b4a3e +908, 0x62de48e36f8338f3 +909, 0x4a752741e00de30e +910, 0xf7855b70f1f6ec2b +911, 0xa505fa4428199e43 +912, 0xe8b6b423b826bbac +913, 0x4bd1206cf8786d05 +914, 0x6dcf040391fe3bf4 +915, 0x913f500f87e1bba3 +916, 0x5acf775aa180a5d5 +917, 0x74dd28d9432ce739 +918, 0x996c2ff2f0dc2495 +919, 0x73dbfe6c56effe4 +920, 0x56fddd25196f5e40 +921, 0xe87810158f5b7 +922, 0x7b8795e996383f1f +923, 0x9ba5ee7c777c4c82 +924, 0x17ce3908d270fe1c +925, 0x3df9e613c1aedfae +926, 0xcdd26871b32fc8e1 +927, 0xd71cb13afc633979 +928, 0x63427c8ea9b1c79e +929, 0xd070f7664d3b405d +930, 0x46f2a9e32d9fb769 +931, 0xb4c3822a45e9fe9b +932, 0x8ba30b97fe6f5ec7 +933, 0x70aa554ee2fc11f9 +934, 0xa80c99dbe0cfcfaf +935, 0x36d9250cb2d68ed +936, 0x2995e4b9e1cd1db4 +937, 0x4b3803ba57fc570f +938, 0xae3959e7d740eaa5 +939, 0xb4cbd6662adbae08 +940, 0xae46576446e8dbc4 +941, 0xc4828e008a9a8a54 +942, 0x145d7db8e6554b2f +943, 0x1b1b8916a730c371 +944, 0xdaf84b2bebe31963 +945, 0x5b59b80ef23a2403 +946, 0x9180c7e89cab6fd3 +947, 0x80e58f5411babf34 +948, 0xa06cf55185b9b005 +949, 0x13b2c798424173ad +950, 0xc510f8e706311d49 +951, 0x1f974b83b6046d3a +952, 0xae6e8e85e822d1c3 +953, 0x66f2c8dc3274a31a +954, 0x7e04dbcbf65bd377 +955, 0xabf41ede01ec20a4 +956, 0x5efa0948f6bbb2ea +957, 0xbc91c99d8592255 +958, 0xf6d6917911d86d75 +959, 0x85ce273d54e9097a +960, 0xbdfd30f2420fff92 +961, 0x8802f02f610b537c +962, 0xd1d70037ed543229 +963, 0x908aaf97f9693a46 +964, 0x1f6cfeaa0834d53a +965, 0xa453fd1648ce04d2 +966, 0x2c38bb85ebc64af9 +967, 0xd2daff551c90c4f8 +968, 0xae5a0d949797d784 +969, 0xf0974c8552ac9593 +970, 0xa10b70499f65c693 +971, 0x39a449ebd594ddff +972, 0x8ea090f2b17b9b49 +973, 0xc592de318090fd83 +974, 0xb63e4fbc467b6912 +975, 0x57a0c1c5ce0e4dcc +976, 0xa7c517cf3d436b35 +977, 0xef6dcb0f3fad038b +978, 0xaf4fb60315b91287 +979, 0x5e0776f67304f331 +980, 0xe927753b8e6f7932 +981, 0xd3df2dd92559e304 +982, 0xdaed52aa6af44413 +983, 0x1b59f4dac1e181f8 +984, 0x4a73c2293877ef39 +985, 0xca45d0d015fe44de +986, 0x4659c8b7853735a8 +987, 0x12de6466bdf8adeb +988, 0xaeea857a09bfec15 +989, 0xcc9cf4b3c0b88a23 +990, 0xa44ae52396a5e1bf +991, 0x5847a724305d137f +992, 0x8f4d4de223956182 +993, 0x58254dfada867a8 +994, 0x900a98222c2f339e +995, 0xdb575260935d51d5 +996, 0x13fb4bfbbc0d7b53 +997, 0x62213850186bb92b +998, 0x2a34823312c00388 +999, 0x6148329042f743b0 diff --git a/local_packages/numpy/random/tests/data/pcg64dxsm-testset-1.csv b/local_packages/numpy/random/tests/data/pcg64dxsm-testset-1.csv new file mode 100644 index 0000000..39cef05 --- /dev/null +++ b/local_packages/numpy/random/tests/data/pcg64dxsm-testset-1.csv @@ -0,0 +1,1001 @@ +seed, 0xdeadbeaf +0, 0xdf1ddcf1e22521fe +1, 0xc71b2f9c706cf151 +2, 0x6922a8cc24ad96b2 +3, 0x82738c549beccc30 +4, 0x5e8415cdb1f17580 +5, 0x64c54ad0c09cb43 +6, 0x361a17a607dce278 +7, 0x4346f6afb7acad68 +8, 0x6e9f14d4f6398d6b +9, 0xf818d4343f8ed822 +10, 0x6327647daf508ed6 +11, 0xe1d1dbe5496a262a +12, 0xfc081e619076b2e0 +13, 0x37126563a956ab1 +14, 0x8bb46e155db16b9 +15, 0x56449f006c9f3fb4 +16, 0x34a9273550941803 +17, 0x5b4df62660f99462 +18, 0xb8665cad532e3018 +19, 0x72fc3e5f7f84216a +20, 0x71d3c47f6fd59939 +21, 0xfd4218afa1de463b +22, 0xc84054c78e0a9a71 +23, 0xae59034726be61a8 +24, 0xa6a5f21de983654d +25, 0x3b633acf572009da +26, 0x6a0884f347ab54c8 +27, 0x7a907ebe9adcab50 +28, 0xbe779be53d7b8d4a +29, 0xf5976e8c69b9dcd1 +30, 0x1d8302f114699e11 +31, 0x7d37e43042c038a0 +32, 0x2cc1d4edc2a40f35 +33, 0x83e3347bb2d581f1 +34, 0x253f8698651a844d +35, 0x4312dea0dd4e32f6 +36, 0x10f106439964ea3a +37, 0x810eb374844868cc +38, 0x366342a54b1978cc +39, 0x9fb39b13aaddfb5e +40, 0xdb91fd0d9482bed7 +41, 0x89f6ea4ca9c68204 +42, 0x146b31ccca461792 +43, 0x203fd9724deb2486 +44, 0x58a84f23748e25cb +45, 0x2f20eb6aeb94e88 +46, 0x14d3581460e473c +47, 0xad5bd0d25f37d047 +48, 0x1cf88fa16de258b2 +49, 0x3bcab6485b7a341 +50, 0xb2433b37f227d90c +51, 0x2cffd7e0a8360cc8 +52, 0x5d2eeff7c9ebc847 +53, 0x6fd7c7ae23f9f64b +54, 0x381650b2d00f175d +55, 0x9d93edcedc873cae +56, 0x56e369a033d4cb49 +57, 0x7547997116a3bac +58, 0x11debaa897fd4665 +59, 0xdf799d2b73bd6fb8 +60, 0x3747d299c66624d +61, 0xac9346701afd0cfa +62, 0xac90e150fa13c7bf +63, 0x85c56ad2248c2871 +64, 0xdea66bf35c45f195 +65, 0x59cf910ea079fb74 +66, 0x2f841bb782274586 +67, 0x9814df4384d92bd9 +68, 0x15bc70824be09925 +69, 0x16d4d0524c0503a3 +70, 0xf04ea249135c0cc7 +71, 0xa707ab509b7e3032 +72, 0x465459efa869e372 +73, 0x64cbf70a783fab67 +74, 0x36b3541a14ca8ed7 +75, 0x9a4dfae8f4c596bf +76, 0x11d9a04224281be3 +77, 0xe09bbe6d5e98ec32 +78, 0xa6c60d908973aa0d +79, 0x7c524c57dd5915c8 +80, 0xa810c170b27f1fdc +81, 0xce5d409819621583 +82, 0xfe2ee3d5332a3525 +83, 0x162fb7c8b32045eb +84, 0x4a3327156b0b2d83 +85, 0x808d0282f971064 +86, 0x2e6f04cf5ed27e60 +87, 0xaf6800699cca67a9 +88, 0xc7590aae7244c3bf +89, 0x7824345f4713f5f9 +90, 0x8f713505f8fd059b +91, 0x3d5b5b9bb6b1e80e +92, 0x8674f45e5dc40d79 +93, 0xcb1e36846aa14773 +94, 0xe0ae45b2b9b778c1 +95, 0xd7254ce931eefcfb +96, 0xef34e15e4f55ac0a +97, 0xf17cc0ba15a99bc4 +98, 0x77bb0f7ffe7b31f1 +99, 0x6ee86438d2e71d38 +100, 0x584890f86829a455 +101, 0x7baf0d8d30ba70fe +102, 0xb1ac8f326b8403ae +103, 0xcc1963435c874ba7 +104, 0x9c483b953d1334ce +105, 0xc0924bcbf3e10941 +106, 0x21bcc581558717b1 +107, 0x2c5ad1623f8d292b +108, 0xa8ea110f6124557e +109, 0x15f24a6c5c4c591 +110, 0x40fe0d9cd7629126 +111, 0xcfe8f2b3b081484d +112, 0x891383f4b4cac284 +113, 0x76f2fcdef7fa845 +114, 0x4edd12133aed0584 +115, 0xd53c06d12308873d +116, 0xf7f22882c17f86bf +117, 0xfbaa4aad72f35e10 +118, 0x627610da2e3c0cc3 +119, 0x582b16a143634d9a +120, 0x9b4a7f69ed38f4a0 +121, 0x2df694974d1e1cbe +122, 0xe5be6eaafed5d4b +123, 0xc48e2a288ad6605e +124, 0xbcb088149ce27c2b +125, 0x3cb6a7fb06ceecbe +126, 0x516735fff3b9e3ac +127, 0x5cbafc551ee5008d +128, 0xee27d1ab855c5fd5 +129, 0xc99fb341f6baf846 +130, 0x7ad8891b92058e6d +131, 0xf50310d03c1ac6c7 +132, 0x947e281d998cbd3e +133, 0x1d4d94a93824fe80 +134, 0x5568b77289e7ee73 +135, 0x7d82d1b2b41e3c8b +136, 0x1af462c7abc787b +137, 0xcfd8dfe80bfae1ef +138, 0xd314caeb723a63ea +139, 0x1c63ddcfc1145429 +140, 0x3801b7cc6cbf2437 +141, 0xc327d5b9fdafddd3 +142, 0xe140278430ca3c78 +143, 0x4d0345a685cb6ef8 +144, 0x47640dc86e261ff9 +145, 0xab817f158523ebf4 +146, 0x37c51e35fbe65a6b +147, 0xab090f475d30a178 +148, 0x4d3ec225bf599fc1 +149, 0xefd517b0041679b1 +150, 0x20ad50bca4da32c5 +151, 0x75e1f7cd07fad86d +152, 0x348cf781ee655f4b +153, 0x9375f0e5ffc2d2ec +154, 0x7689082fd5f7279c +155, 0x633e56f763561e77 +156, 0x9d1752d70861f9fd +157, 0xa3c994b4e70b0b0f +158, 0xabf7276a58701b88 +159, 0xbfa18d1a0540d000 +160, 0xc6a28a2475646d26 +161, 0x7cdf108583f65085 +162, 0x82dcefb9f32104be +163, 0xc6baadd0adc6b446 +164, 0x7a63cff01075b1b4 +165, 0x67ac62e575c89919 +166, 0x96fa4320a0942035 +167, 0xc4658859385b325f +168, 0xde22c17ff47808f6 +169, 0xbb952c4d89e2f2ec +170, 0x638251fbc55bdc37 +171, 0x38918b307a03b3ea +172, 0xccb60f2cedbb570b +173, 0x3c06f4086a28f012 +174, 0x4e8d238388986e33 +175, 0x1760b7793514a143 +176, 0xa3f924efe49ee7d6 +177, 0xaf6be2dbaebc0bdf +178, 0x6782682090dffe09 +179, 0xb63a4d90d848e8ef +180, 0x5f649c7eaf4c54c5 +181, 0xbe57582426a085ba +182, 0xb5dd825aa52fb76d +183, 0x74cb4e6ca4039617 +184, 0x382e578bf0a49588 +185, 0xc043e8ea6e1dcdae +186, 0xf902addd5c04fa7c +187, 0xf3337994612528db +188, 0x4e8fd48d6d15b4e6 +189, 0x7190a509927c07ab +190, 0x864c2dee5b7108ae +191, 0xbb9972ddc196f467 +192, 0x1ea02ab3ca10a448 +193, 0xe50a8ffde35ddef9 +194, 0x7bd2f59a67183541 +195, 0x5a940b30d8fcd27a +196, 0x82b4cea62623d4d3 +197, 0x6fbda76d4afef445 +198, 0x8b1f6880f418328e +199, 0x8b69a025c72c54b7 +200, 0xb71e0f3986a3835f +201, 0xa4a7ddb8b9816825 +202, 0x945dcda28228b1d8 +203, 0xb471abf2f8044d72 +204, 0xf07d4af64742b1ba +205, 0xfca5190bc4dd6a2a +206, 0xd681497262e11bc5 +207, 0xbe95d5f00c577028 +208, 0x56313439fd8bde19 +209, 0x3f3d9ac9b5ee6522 +210, 0x7b8d457dd2b49bbe +211, 0xe76b5747885d214b +212, 0xa8a695b3deb493ea +213, 0x5292446548c95d71 +214, 0xbf5cdf0d436412df +215, 0x7936abaed779d28d +216, 0x659c6e8073b3a06d +217, 0x86c9ff28f5543b71 +218, 0x6faa748445a99146 +219, 0xdcc1e6ab57904fd7 +220, 0x770bd61233addc5f +221, 0x16963e041e46d94f +222, 0x158e6cb2934157ac +223, 0xb65088a8fd246441 +224, 0x2b12ced6ce8a68c3 +225, 0x59a18d02cd6082b3 +226, 0x4ddbc318cb5488ee +227, 0x3d4cf520b3ed20a1 +228, 0x7028b3a92e2b292d +229, 0xf141da264a250e4d +230, 0x9788d53e86041c37 +231, 0x1bb91238a7c97dbf +232, 0x81953d0ddb634309 +233, 0xfa39ccfe14d2d46 +234, 0xf7c7861c9b7e8399 +235, 0x18d27ca50d9dc249 +236, 0x258dfdf38510d0d9 +237, 0x9e72d8af910ea76f +238, 0x4f8ef24b96de50ad +239, 0xb9d9c12297e03dc9 +240, 0x91994e41b4a1929c +241, 0x8defa79b2ccc83b9 +242, 0x948566748706dac5 +243, 0x7b0454946e70e4cf +244, 0x340b7cb298c70ed7 +245, 0x6602005330cebd95 +246, 0xf71cb803aa61f722 +247, 0x4683fb07fc70ae8a +248, 0xc6db9f0c4de3ed88 +249, 0x3e8dfae2a593cef9 +250, 0x615f7c38e3862b33 +251, 0x676c7996550d857 +252, 0xc6d520d54a5c266a +253, 0x202b1e8eef14aa2e +254, 0xa3a84891a27a582 +255, 0x84dbee451658d47f +256, 0x254c7cd97e777e3a +257, 0xf50b6e977f0eba50 +258, 0x2898b1d3062a4798 +259, 0x4096f7cbbb019773 +260, 0x9fb8e75548062c50 +261, 0x4647071e5ca318ec +262, 0x2b4750bdb3b3b01 +263, 0x88ac41cc69a39786 +264, 0x705e25476ef46fa3 +265, 0xc0c1db19884a48a6 +266, 0x1364c0afdbb465e5 +267, 0x58e98534701272a6 +268, 0x746a5ea9701517c0 +269, 0x523a70bc6b300b67 +270, 0x9b1c098eda8564ad +271, 0xfbaeb28d3637067f +272, 0xddd9a13551fdba65 +273, 0x56461a670559e832 +274, 0xab4fd79be85570ad +275, 0xd4b691ecaff8ca55 +276, 0x11a4495939e7f004 +277, 0x40d069d19477eb47 +278, 0xe790783d285cd81e +279, 0xde8218b16d935bc7 +280, 0x2635e8c65cd4182d +281, 0xeae402623e3454 +282, 0x9f99c833184e0279 +283, 0x3d0f79a0d52d84e7 +284, 0xc1f8edb10c625b90 +285, 0x9b4546363d1f0489 +286, 0x98d86d0b1212a282 +287, 0x386b53863161200d +288, 0xbe1165c7fe48a135 +289, 0xb9658b04dbbfdc8c +290, 0xcea14eddfe84d71a +291, 0x55d03298be74abe7 +292, 0x5be3b50d961ffd7e +293, 0xc76b1045dc4b78e1 +294, 0x7830e3ff3f6c3d4c +295, 0xb617adb36ca3729 +296, 0x4a51bdb194f14aa9 +297, 0x246024e54e6b682a +298, 0x33d42fc9c6d33083 +299, 0xadccba149f31e1d +300, 0x5183e66b9002f8b +301, 0x70eb2416404d51b7 +302, 0x26c25eb225535351 +303, 0xbc2d5b0d23076561 +304, 0x5823019ddead1da +305, 0x85cfa109fca69f62 +306, 0x26017933e7e1efd9 +307, 0x3ec7be9a32212753 +308, 0x697e8a0697cd6f60 +309, 0x44735f6cca03920f +310, 0x8cc655eb94ee212e +311, 0x8b8b74eba84929a0 +312, 0x7708ccedd0c98c80 +313, 0x1b6f21f19777cbe1 +314, 0x363e564bd5fadedb +315, 0x5921543a641591fe +316, 0xc390786d68ea8a1b +317, 0x9b293138dc033fca +318, 0x45447ca8dc843345 +319, 0xee6ef6755bc49c5e +320, 0x70a3a1f5163c3be5 +321, 0xf05e25448b6343b0 +322, 0x4739f4f8717b7e69 +323, 0xb006141975bf957 +324, 0x31874a91b707f452 +325, 0x3a07f2c90bae2869 +326, 0xb73dae5499a55c5e +327, 0x489070893bb51575 +328, 0x7129acf423940575 +329, 0x38c41f4b90130972 +330, 0xc5260ca65f5a84a1 +331, 0x6e76194f39563932 +332, 0x62ca1f9ca3de3ca6 +333, 0xb4a97874e640853f +334, 0x38ed0f71e311cc02 +335, 0xde183b81099e8f47 +336, 0x9bb8bf8e6694346 +337, 0xd15497b6bf81e0f2 +338, 0xaaae52536c00111 +339, 0x4e4e60d1435aaafd +340, 0x5a15512e5d6ea721 +341, 0xff0f1ffabfc6664f +342, 0xba3ffcedc5f97fec +343, 0xef87f391c0c6bfb6 +344, 0x4a888c5d31eb0f98 +345, 0x559a3fbfd7946e95 +346, 0xe45b44a0db5a9bad +347, 0x9457898964190af1 +348, 0xd9357dfaab76cd9e +349, 0xa60e907178d965a1 +350, 0x76b2dc3032dc2f4a +351, 0x13549b9c2802120 +352, 0x8656b965a66a1800 +353, 0x16802e6e22456a23 +354, 0x23b62edc60efaa9 +355, 0x6832a366e1e4ea3b +356, 0x46b1b41093ff2b1e +357, 0x55c857128143f219 +358, 0x7fc35ddf5e138200 +359, 0x790abe78be67467e +360, 0xa4446fc08babd466 +361, 0xc23d70327999b855 +362, 0x2e019d1597148196 +363, 0xfefd98e560403ab8 +364, 0xbe5f0a33da330d58 +365, 0x3078a4e9d43ca395 +366, 0x511bfedd6f12f2b3 +367, 0x8bc138e335be987c +368, 0x24640f803465716d +369, 0xf6530b04d0bd618f +370, 0x9b7833e5aa782716 +371, 0x778cd35aea5841b1 +372, 0xecea3c458cefbc60 +373, 0x5107ae83fc527f46 +374, 0x278ad83d44bd2d1a +375, 0x7014a382295aeb16 +376, 0xf326dd762048743f +377, 0x858633d56279e553 +378, 0x76408154085f01bc +379, 0x3e77d3364d02e746 +380, 0x2f26cea26cadd50b +381, 0x6d6846a4ecb84273 +382, 0x4847e96f2df5f76 +383, 0x5a8610f46e13ff61 +384, 0x4e7a7cac403e10dd +385, 0x754bdf2e20c7bc90 +386, 0x8bdd80e6c51bd0be +387, 0x61c655fae2b4bc52 +388, 0x60873ef48e3d2f03 +389, 0x9d7d8d3698a0b4a4 +390, 0xdf48e9c355cd5d4b +391, 0x69ecf03e20be99ac +392, 0xc1a0c5a339bd1815 +393, 0x2e3263a6a3adccb +394, 0x23557459719adbdc +395, 0xd1b709a3b330e5a +396, 0xade5ab00a5d88b9d +397, 0x69a6bd644120cfad +398, 0x40187ecceee92342 +399, 0x1c41964ba1ac78da +400, 0x9ac5c51cbecabe67 +401, 0xbdc075781cf36d55 +402, 0xeaf5a32246ded56 +403, 0xcda0b67e39c0fb71 +404, 0x4839ee456ef7cc95 +405, 0xf17092fdd41d5658 +406, 0x2b5d422e60ae3253 +407, 0x3effe71102008551 +408, 0x20a47108e83934b7 +409, 0xd02da65fe768a88f +410, 0xeb046bd56afa4026 +411, 0x70c0509c08e0fbe0 +412, 0x1d35c38d4f8bac6c +413, 0x9aa8eb6466f392e0 +414, 0x587bd4a430740f30 +415, 0x82978fe4bad4195 +416, 0xdc4ebc4c0feb50ab +417, 0xd3b7164d0240c06f +418, 0x6e2ad6e5a5003a63 +419, 0xa24b430e2ee6b59c +420, 0x2905f49fd5073094 +421, 0x5f209e4de03aa941 +422, 0x57b7da3e0bedb1dc +423, 0x5e054018875b01f5 +424, 0xb2f2da6145658db3 +425, 0xbd9c94a69a8eb651 +426, 0x9c5f9a07cd6ac749 +427, 0x2296c4af4d529c38 +428, 0x522ed800fafdefab +429, 0xe2a447ced0c66791 +430, 0x937f10d45e455fef +431, 0xc882987d9e29a24 +432, 0x4610bfd6a247ee1a +433, 0x562ba3e50870059 +434, 0x59d8d58793602189 +435, 0xfe9a606e3e34abe +436, 0x6825f7932a5e9282 +437, 0xe77f7061bab476ad +438, 0xbf42001da340ace3 +439, 0x9c3e9230f5e47960 +440, 0x2c0f700d96d5ad58 +441, 0x330048b7cd18f1f9 +442, 0xffc08785eca5cca9 +443, 0xb5879046915f07a5 +444, 0xef51fe26f83c988e +445, 0xfa4c2968e7881a9a +446, 0xc0a9744455a4aad +447, 0xbd2ad686d6313928 +448, 0x6b9f0984c127682a +449, 0xc9aaa00a5da59ed8 +450, 0x762a0c4b98980dbf +451, 0x52d1a2393d3ca2d1 +452, 0x1e9308f2861db15c +453, 0xe7b3c74fe4b4a844 +454, 0x485e15704a7fc594 +455, 0x9e7f67ea44c221f6 +456, 0xbab9ad47fde916e0 +457, 0x50e383912b7fc1f4 +458, 0xaad63db8abcef62d +459, 0xc2f0c5699f47f013 +460, 0xee15b36ada826812 +461, 0x2a1b1cf1e1777142 +462, 0x8adb03ede79e937d +463, 0xf14105ef65643bf3 +464, 0x752bbaefc374a3c7 +465, 0xa4980a08a5a21d23 +466, 0x418a1c05194b2db7 +467, 0xdd6ff32efe1c3cd6 +468, 0x272473ed1f0d3aa2 +469, 0x1e7fdebadabe6c06 +470, 0xd1baa90c17b3842f +471, 0xd3d3a778e9c8404a +472, 0x781ae7fda49fa1a0 +473, 0x61c44fdbdacc672d +474, 0x6d447d0a1404f257 +475, 0x9303e8bdfbfb894d +476, 0x3b3482cdec016244 +477, 0xb149bf245d062e7b +478, 0x96f8d54b14cf992d +479, 0x4741549a01f8c3d0 +480, 0x48270811b2992af +481, 0x7b58f175cd25d147 +482, 0x8f19a840b56f4be9 +483, 0x84a77f43c0951a93 +484, 0x34e1a69381f0c374 +485, 0xb158383c9b4040f +486, 0x372f1abc7cf3a9fa +487, 0x5439819a84571763 +488, 0xabf8515e9084e2fa +489, 0xb02312b9387ff99 +490, 0x238a85bb47a68b12 +491, 0x2068cb83857c49bb +492, 0xc6170e743083664c +493, 0x745cf8470bcb8467 +494, 0xe3a759a301670300 +495, 0x292c7686ad3e67da +496, 0x359efedaff192a45 +497, 0x511f2c31a2d8c475 +498, 0x97fd041bf21c20b3 +499, 0x25ef1fe841b7b3f6 +500, 0xbb71739e656f262d +501, 0x2729b0e989b6b7b8 +502, 0xd2142702ec7dbabf +503, 0x7008decd2488ee3f +504, 0x69daa95e303298d7 +505, 0xc35eca4efb8baa5a +506, 0xf3f16d261cec3b6c +507, 0x22371c1d75396bd3 +508, 0x7aefa08eccae857e +509, 0x255b493c5e3c2a2f +510, 0x779474a077d34241 +511, 0x5199c42686bea241 +512, 0x16c83931e293b8d3 +513, 0xa57fe8db8c0302c7 +514, 0xd7ace619e5312eb1 +515, 0x8740f013306d217c +516, 0xb6a1ad5e29f4d453 +517, 0x31abf7c964688597 +518, 0xbc3d791daed71e7 +519, 0x31ee4ca67b7056ed +520, 0x1ab5416bfe290ea3 +521, 0x93db416f6d3b843a +522, 0xed83bbe5b1dd2fed +523, 0xece38271470d9b6d +524, 0x3a620f42663cd8ae +525, 0x50c87e02acafee5d +526, 0xcabeb8bedbc6dab5 +527, 0x2880a6d09970c729 +528, 0x4aba5dd3bfc81bc +529, 0xaba54edf41080cec +530, 0xb86bb916fc85a169 +531, 0x4c41de87bc79d8ca +532, 0xcce2a202622945fe +533, 0x513f086fad94c107 +534, 0x18b3960c11f8cc96 +535, 0x2f0d1cfd1896e236 +536, 0x1702ae3880d79b15 +537, 0x88923749029ae81 +538, 0x84810d4bdec668eb +539, 0xf85b0a123f4fc68d +540, 0x93efd68974b6e4d1 +541, 0x5d16d6d993a071c9 +542, 0x94436858f94ca43b +543, 0xb3dbb9ed0cb180b6 +544, 0x6447030a010b8c99 +545, 0xd7224897c62925d8 +546, 0xb0c13c1d50605d3a +547, 0xdff02c7cb9d45f30 +548, 0xe8103179f983570d +549, 0xbc552037d6d0a24e +550, 0x775e500b01486b0d +551, 0x2050ac632c694dd6 +552, 0x218910387c4d7ae7 +553, 0xf83e8b68ff885d5d +554, 0xe3374ec25fca51a3 +555, 0xfa750ffa3a60f3af +556, 0x29ee40ba6df5592e +557, 0x70e21a68f48260d2 +558, 0x3805ca72cd40886e +559, 0x2f23e73f8eabf062 +560, 0x2296f80cdf6531ae +561, 0x903099ed968db43a +562, 0xf044445cf9f2929f +563, 0xcd47fdc2de1b7a1 +564, 0xaab1cbd4f849da99 +565, 0x5fc990688da01acb +566, 0xa9cee52ea7dab392 +567, 0xecefc3a4349283a8 +568, 0xdd6b572972e3fafc +569, 0xc1f0b1a2ffb155da +570, 0xc30d53fc17bd25c8 +571, 0x8afa89c77834db28 +572, 0x5569a596fb32896c +573, 0x36f207fc8df3e3d4 +574, 0x57c2bd58517d81db +575, 0xb524693e73d0061c +576, 0xb69f6eb233f5c48b +577, 0x4f0fb23cab8dc695 +578, 0x492c1ad0a48df8df +579, 0xf6dcc348ec8dec1f +580, 0xa4d8708d6eb2e262 +581, 0x4c2072c2c9766ff1 +582, 0xa9bf27c4304875f0 +583, 0xfc8fb8066d4f9ae2 +584, 0x188095f6235fec3c +585, 0x1d8227a2938c2864 +586, 0x89ea50c599010378 +587, 0xcac86df0a7c6d56d +588, 0x47a8c5df84c7d78 +589, 0xe607ae24ea228bfa +590, 0x36624a7996efe104 +591, 0x5d72881c1227d810 +592, 0x78694a6750374c8 +593, 0x7b9a217d4ab5ff45 +594, 0xd53e5d6f7504becc +595, 0x197a72d3f4889a0e +596, 0xfdc70c4755a8df36 +597, 0xd0fda83748c77f74 +598, 0x7ddc919ac9d6dcc9 +599, 0x785c810a6a2dc08b +600, 0xba4be83e7e36896c +601, 0x379d6fe80cf2bffe +602, 0x74cae2dabc429206 +603, 0x1efac32d5d34c917 +604, 0x3cb64e2f98d36e70 +605, 0xc0a7c3cdc3c60aa7 +606, 0x699dfadd38790ebe +607, 0x4861e61b3ecfbeac +608, 0x531744826c345baa +609, 0x5ec26427ad450cba +610, 0xf2c1741479abdcae +611, 0xe9328a78b2595458 +612, 0x30cd1bdf087acd7f +613, 0x7491ced4e009adbe +614, 0xdcd942df1e2e7023 +615, 0xfe63f01689fee35 +616, 0x80282dfe5eaedc42 +617, 0x6ecdea86495f8427 +618, 0xe0adfdd5e9ed31c3 +619, 0xf32bd2a7418127e +620, 0x8aabba078db6ee2 +621, 0xa8a8e60499145aca +622, 0xf76b086ac4e8a0f2 +623, 0x6e55b3c452ff27f8 +624, 0xe18fa7cd025a71bf +625, 0xeed7b685fde0fa25 +626, 0xba9b6c95867fa721 +627, 0x4c2603bc69de2df2 +628, 0xaac87eee1b58cd66 +629, 0x3c9af6656e01282c +630, 0x2dfa05ce8ff476b6 +631, 0xeae9143fcf92f23d +632, 0x3f0699f631be3bc8 +633, 0xa0f5f79f2492bd67 +634, 0x59c47722388131ed +635, 0x5f6e9d2941cef1de +636, 0xe9ad915c09788b7b +637, 0x92c6d37e4f9482f5 +638, 0x57d301b7fdadd911 +639, 0x7e952d23d2a8443 +640, 0xbb2fa5e0704b3871 +641, 0xe5642199be36e2d5 +642, 0x5020b60d54358291 +643, 0xa0b6317ec3f60343 +644, 0xb57b08b99540bc5c +645, 0x21f1890adc997a88 +646, 0xfcf824200dd9da2d +647, 0x8146293d83d425d1 +648, 0xdadfbf5fbb99d420 +649, 0x1eb9bbc5e6482b7d +650, 0xd40ff44f1bbd0f1c +651, 0xa9f948ba2d08afa5 +652, 0x638cc07c5301e601 +653, 0x1f984baa606e14e8 +654, 0x44e153671081f398 +655, 0xb17882eeb1d77a5d +656, 0x5fd8dbee995f14c +657, 0xff3533e87f81b7fe +658, 0x2f44124293c49795 +659, 0x3bf6b51e9360248 +660, 0x72d615edf1436371 +661, 0x8fc5cf4a38adab9d +662, 0xfa517e9022078374 +663, 0xf356733f3e26f4d8 +664, 0x20ea099cdc6aad40 +665, 0xe15b977deb37637d +666, 0xcc85601b89dae88d +667, 0x5768c62f8dd4905c +668, 0xa43cc632b4e56ea +669, 0xc4240cf980e82458 +670, 0xb194e8ffb4b3eeb6 +671, 0xee753cf2219c5fa1 +672, 0xfe2500192181d44d +673, 0x2d03d7d6493dd821 +674, 0xff0e787bb98e7f9b +675, 0xa05cf8d3bd810ce7 +676, 0x718d5d6dcbbdcd65 +677, 0x8d0b5343a06931c +678, 0xae3a00a932e7eaf9 +679, 0x7ed3d8f18f983e18 +680, 0x3bb778ee466dc143 +681, 0x711c685c4e9062c0 +682, 0x104c3af5d7ac9834 +683, 0x17bdbb671fb5d5cf +684, 0xabf26caead4d2292 +685, 0xa45f02866467c005 +686, 0xf3769a32dc945d2d +687, 0xe78d0007f6aabb66 +688, 0x34b60be4acbd8d4b +689, 0x58c0b04b69359084 +690, 0x3a8bb354c212b1 +691, 0x6b82a8f3d70058d5 +692, 0x405bdef80a276a4a +693, 0xe20ca40ee9195cad +694, 0xf5dd96ba2446fefd +695, 0xc1e180c55fe55e3c +696, 0xa329caf6daa952b3 +697, 0xb4809dd0c84a6b0a +698, 0xd27f82661070cee7 +699, 0xa7121f15ee2b0d8a +700, 0x4bdaea70d6b34583 +701, 0xe821dc2f310f7a49 +702, 0x4c00a5a68e76f647 +703, 0x331065b064a2d5ea +704, 0xac0c2ce3dc04fa37 +705, 0x56b32b37b8229008 +706, 0xe757cdb51534fcfa +707, 0xd3ff183576b2fad7 +708, 0x179e1f4190f197a7 +709, 0xf874c626a7c9aae5 +710, 0xd58514ffc37c80e4 +711, 0xc65de31d33fa7fd3 +712, 0x6f6637052025769b +713, 0xca1c6bdadb519cc0 +714, 0xd1f3534cde37828a +715, 0xc858c339eee4830a +716, 0x2371eacc215e02f4 +717, 0x84e5022db85bbbe9 +718, 0x5f71c50bba48610e +719, 0xe420192dad9c323f +720, 0x2889342721fca003 +721, 0x83e64f63334f501d +722, 0xac2617172953f2c +723, 0xfa1f78d8433938ff +724, 0x5578382760051462 +725, 0x375d7a2e3b90af16 +726, 0xb93ff44e6c07552d +727, 0xded1d5ad811e818c +728, 0x7cf256b3b29e3a8c +729, 0x78d581b8e7bf95e8 +730, 0x5b69192f2caa6ad3 +731, 0xa9e25855a52de3ce +732, 0x69d8e8fc45cc188d +733, 0x5dd012c139ad347d +734, 0xfcb01c07b77db606 +735, 0x56253e36ab3d1cce +736, 0x1181edbb3ea2192 +737, 0x325bef47ff19a08d +738, 0xd3e231ceb27e5f7 +739, 0x8e819dd2de7956d2 +740, 0x34a9689fe6f84a51 +741, 0x3e4eeb719a9c2927 +742, 0x5c3b3440581d0aaf +743, 0x57caf51897d7c920 +744, 0xec6a458130464b40 +745, 0xe98f044e0da40e9b +746, 0xbe38662020eeb8e7 +747, 0x7b8c407c632724ae +748, 0x16c7cfa97b33a544 +749, 0xd23359e2e978ae5a +750, 0x4fdba458250933dd +751, 0x3c9e0713cfe616ba +752, 0x6f0df87b13163b42 +753, 0xc460902cb852cc97 +754, 0x289df8fefd6b0bce +755, 0x4ac2a2a1c3fb8029 +756, 0x2fc3e24d8b68eef7 +757, 0x34564386a59aab9a +758, 0x31047391ebd67ce4 +759, 0x6c23d070a0564d41 +760, 0xba6387b2b72545f7 +761, 0xcdcf1008058387af +762, 0xc9308fa98db05192 +763, 0xdbdbb5abd01a9d84 +764, 0x937088275c7804ab +765, 0x6f6accfefe34ee81 +766, 0x5c33c74c49cfdb2c +767, 0x5e1a771edfb92bd3 +768, 0x6e89b009069ecae7 +769, 0x34d64e17ec0e8968 +770, 0x841203d0cde0c330 +771, 0x7642cc9d7eb9e9cb +772, 0xca01d2e8c128b97e +773, 0x5b8390617b3304ab +774, 0x52ec4ed10de1eb2d +775, 0xb90f288b9616f237 +776, 0x5bd43cd49617b2e2 +777, 0x1a53e21d25230596 +778, 0x36ccd15207a21cd6 +779, 0xc8263d780618fd3c +780, 0x6eb520598c6ce1cb +781, 0x493c99a3b341564f +782, 0xab999e9c5aa8764f +783, 0xab2fa4ceaba84b +784, 0xbbd2f17e5cb2331b +785, 0xc8b4d377c0cc4e81 +786, 0x31f71a6e165c4b1e +787, 0xd1011e55fb3addaa +788, 0x5f7ec34728dfa59 +789, 0x2aef59e60a84eb0f +790, 0x5dde6f09aec9ad5f +791, 0x968c6cdbc0ef0438 +792, 0x1957133afa15b13a +793, 0xbaf28f27573a64c2 +794, 0xc6f6ddd543ebf862 +795, 0xdd7534315ec9ae1e +796, 0xd2b80cd2758dd3b +797, 0xa38c3da00cc81538 +798, 0x15c95b82d3f9b0f9 +799, 0x6704930287ce2571 +800, 0x9c40cc2f6f4ecb0c +801, 0xc8de91f50b22e94e +802, 0x39272e8fddbfdf0a +803, 0x879e0aa810a117d +804, 0xa312fff4e9e5f3bd +805, 0x10dd747f2835dfec +806, 0xeb8466db7171cdae +807, 0xaa808d87b9ad040a +808, 0xab4d2229a329243a +809, 0x7c622f70d46f789c +810, 0x5d41cef5965b2a8e +811, 0xce97ec4702410d99 +812, 0x5beba2812c91211b +813, 0xf134b46c93a3fec7 +814, 0x76401d5630127226 +815, 0xc55fc9d9eacd4ec1 +816, 0xaec8cefaa12f813f +817, 0x2f845dcfd7b00722 +818, 0x3380ab4c20885921 +819, 0xdb68ad2597691b74 +820, 0x8a7e4951455f563f +821, 0x2372d007ed761c53 +822, 0xcab691907714c4f1 +823, 0x16bc31d6f3abec1a +824, 0x7dff639fbcf1824 +825, 0x6666985fbcff543d +826, 0xb618948e3d8e6d0c +827, 0x77b87837c794e068 +828, 0xcd48288d54fcb5a8 +829, 0x47a773ed6ae30dc3 +830, 0xba85ae44e203c942 +831, 0xa7a7b21791a25b2d +832, 0x4029dd92e63f19e0 +833, 0xc2ad66ab85e7d5aa +834, 0xa0f237c96fdab0db +835, 0xffefb0ab1ca18ed +836, 0x90cb4500785fd7d5 +837, 0xa7dd3120f4876435 +838, 0x53f7872624694300 +839, 0xea111326ff0040d9 +840, 0x5f83cb4cce40c83b +841, 0x918e04936c3b504d +842, 0x87a8db4c0e15e87c +843, 0x7cff39da6a0dedd0 +844, 0x36f7de2037f85381 +845, 0xd1d8d94022a1e9a7 +846, 0x2c9930127dc33ec9 +847, 0x6cb4719dcd0101c6 +848, 0xc01868cde76935f7 +849, 0x6b86f2ec1ab50143 +850, 0x68af607d8d94ae61 +851, 0xe216c5b95feedf34 +852, 0x4b866bd91efe2e4b +853, 0x4bff79df08f92c99 +854, 0x6ff664ea806acfd1 +855, 0x7fce0b3f9ece39bc +856, 0x29bc90b59cb3db97 +857, 0x833c4b419198607d +858, 0xf3573e36ca4d4768 +859, 0x50d71c0a3c2a3fa8 +860, 0xd754591aea2017e7 +861, 0x3f9126f1ee1ebf3 +862, 0xe775d7f4b1e43de8 +863, 0xe93d51628c263060 +864, 0x83e77f6fb32d6d82 +865, 0x43dd7eef823408e4 +866, 0x1c843c2c90180662 +867, 0xe924dafb9a16066b +868, 0x6af3ee96e7b7fbd9 +869, 0x94d5c4f37befcd1f +870, 0x40ffb04bedef4236 +871, 0x71c17bbc20e553e +872, 0x101f7a0a6208729f +873, 0x5ca34570cf923548 +874, 0x8e3139db2e96e814 +875, 0x3ab96d96263d048d +876, 0x97f3c0bbc6755c3c +877, 0x31fc72daedaef3dc +878, 0x71f8d7855d10789b +879, 0xce6dc97b4662333b +880, 0xfddc2aabd342bc61 +881, 0xefbd4007ff8c7d2e +882, 0xf72cd6c689ef8758 +883, 0x932c8b0c0e755137 +884, 0x94cc4dedd58ff69 +885, 0xde4dfd6890535979 +886, 0xdb00dcd2dcb4a50a +887, 0xb0466240b4548107 +888, 0x9cb9264c7b90d1a3 +889, 0x357e378e9be5766b +890, 0x6e0316ef03367bbf +891, 0x201ea18839544ca +892, 0x803ff3406be5f338 +893, 0xf9d5e82fd4144bb2 +894, 0x1b6b88ca701e9f47 +895, 0xd1fe5ab8e1f89cc0 +896, 0x14171fe176c4bece +897, 0x887948bdef78beaa +898, 0x80449ddc3eb9b977 +899, 0x5f4e1f900fb4bcf3 +900, 0xbe30f8701909f8e2 +901, 0xd1f2a2fb5503306d +902, 0x6b1c77238dc23803 +903, 0x102156a6c9860f66 +904, 0x4cd446e099edf4c1 +905, 0xc79ac6cbc911f33b +906, 0x3ee096ffe3384f1c +907, 0xb58f83b18a306dc7 +908, 0x9f76582141de56b2 +909, 0x9ddfa85e02c13866 +910, 0x4d9a19d4ce90a543 +911, 0xbf81ab39fd17d376 +912, 0x5327e5054c6a74f1 +913, 0xd5062dd31db1a9b7 +914, 0x645853735527edc +915, 0x485393967f91af08 +916, 0xeff9667dcf77ca68 +917, 0xd012313f5fbec464 +918, 0xbeae35bdfae55144 +919, 0x302c41ebac8444a0 +920, 0x9ccdb6c2fe58fba8 +921, 0x567753af68ed23f8 +922, 0xff90f790e43efec3 +923, 0x970cc756fb799696 +924, 0xe59239d1c44915 +925, 0x4d2d189fb3941f05 +926, 0x96f23085db165a9c +927, 0xa1202dec7a37b1a5 +928, 0xc0c1ee74bcd7dc1a +929, 0x9edcf2048b30333a +930, 0xd848588ba7e865fb +931, 0x8d9f0897317cab40 +932, 0x67b96f15e25924fb +933, 0xefc8d8536619ee42 +934, 0xf3f621d22bdde0c2 +935, 0x68610a0de862ae32 +936, 0xa22ca5142de24cbd +937, 0x8815452f4e6b4801 +938, 0x4e9c1b607b2750e5 +939, 0x19b3c09ba6fc9b25 +940, 0x9b2543c8836780ac +941, 0xe702b8f950e56431 +942, 0xb357cc329cac3917 +943, 0x387bf86a17a31e08 +944, 0x9940b983d331b163 +945, 0xf5d89d7fe9095e18 +946, 0x4362682329e5c4d1 +947, 0xd2132573f6ae7b42 +948, 0xc0a5849e23a61606 +949, 0xdadbddf47265bc02 +950, 0x1b96f00339a705f7 +951, 0x94e6642329288913 +952, 0x825ab3f10e6d330b +953, 0x1a1c31ac9d883ea0 +954, 0xb49076b7155c6f47 +955, 0x920cf3085dfe3ccb +956, 0x9743407c9f28e825 +957, 0x6ce8a28622402719 +958, 0xce2fe67e06baf8a6 +959, 0x3a16b34784ecf5e6 +960, 0x140467cc1d162a0c +961, 0x32d4772692ab625 +962, 0xa4f4b28562f43336 +963, 0x885b4335457bd84a +964, 0x499d3ed26c87ad8a +965, 0xc7328bcedb9a545e +966, 0xc6dd76a6cbf5d2b2 +967, 0xba9c22be404ee1aa +968, 0x70e6aee45f23521d +969, 0x61e03a798593c177 +970, 0x171671f809c68213 +971, 0x28d54872fc1d914c +972, 0x43c2fcd9bd098b53 +973, 0x172ad4c4a98b9d37 +974, 0x330860c9460f2516 +975, 0x49547f472df984f4 +976, 0x873b2436d3f0e114 +977, 0x6f99accf4ea050b6 +978, 0x5968ac874ed51613 +979, 0x4939d70d29a3c611 +980, 0x11f381ed28738d3d +981, 0xa97430d36ab3a869 +982, 0xe6fa880801129e22 +983, 0xf84decbd8f48c913 +984, 0x4425c0ed1e9a82a5 +985, 0x7a1f9485e9929d5a +986, 0xc7c51f155dfce1c6 +987, 0x9619a39501d74f2b +988, 0x7c7035955dbf4c1b +989, 0xc61ee569cf57c2c9 +990, 0x3eaf7c5b0df734e1 +991, 0xe71cb4064d1ede05 +992, 0x356e3cec80e418b2 +993, 0xca04306243a15be6 +994, 0x941cf3881fa18896 +995, 0x30dbb0e819d644e0 +996, 0xaae22c0bef02859a +997, 0x7bd30917bbaa8a94 +998, 0x2672547bc8d7d329 +999, 0x4955c92aaa231578 diff --git a/local_packages/numpy/random/tests/data/pcg64dxsm-testset-2.csv b/local_packages/numpy/random/tests/data/pcg64dxsm-testset-2.csv new file mode 100644 index 0000000..878c5ea --- /dev/null +++ b/local_packages/numpy/random/tests/data/pcg64dxsm-testset-2.csv @@ -0,0 +1,1001 @@ +seed, 0x0 +0, 0xd97e4a147f788a70 +1, 0x8dfa7bce56e3a253 +2, 0x13556ed9f53d3c10 +3, 0x55dbf1c241341e98 +4, 0xa2cd98f722eb0e0a +5, 0x83dfc407203ade8 +6, 0xeaa083df518f030d +7, 0x44968c87e432852b +8, 0x573107b9cb8d9ecc +9, 0x9eedd1da50b9daca +10, 0xb33a6735ca451e3c +11, 0x72830d2b39677262 +12, 0x9da8c512fd0207e8 +13, 0x1fc5c91954a2672b +14, 0xd33479437116e08 +15, 0x9ccdd9390cee46f3 +16, 0x1fd39bb01acd9e76 +17, 0xedc1869a42ff7fe5 +18, 0xbd68ca0b42a6e7e9 +19, 0x620b67df09621b1f +20, 0xfa11d51bd6950221 +21, 0xc8c45b36e7d28d08 +22, 0xe9c91272fbaad777 +23, 0x2dc87a143f220e90 +24, 0x6376a7c82361f49d +25, 0x552c5e434232fe75 +26, 0x468f7f872ac195bc +27, 0x32bed6858125cf89 +28, 0xe4f06111494d09d3 +29, 0xa5c166ffea248b80 +30, 0x4e26605b97064a3f +31, 0xceafd9f6fc5569d +32, 0xb772f2f9eed9e106 +33, 0x672c65e6a93534e2 +34, 0xcdc5e1a28d1bd6a0 +35, 0x1ed9c96daeebd3e3 +36, 0x4d189dcfc0c93c3f +37, 0x50df5a95c62f4b43 +38, 0xcccf4949fa65bbb8 +39, 0x19b8073d53cdc984 +40, 0x6fb40bba35483703 +41, 0xb02de4aef86b515a +42, 0x4d90c63655350310 +43, 0xea44e4089825b16c +44, 0x8d676958b1f9da2b +45, 0x6d313940917ae195 +46, 0x1b1d35a4c1dd19f4 +47, 0x117720f8397337ef +48, 0xcc073cf3ac11eeaa +49, 0x8331ec58a9ff8acb +50, 0xf3dc2a308b6b866f +51, 0x7eba1202663382b6 +52, 0x8269839debeb4e5a +53, 0x87fd3dc0f9181a8e +54, 0xabe62ddd3c925f03 +55, 0x7f56f146944fe8d4 +56, 0xc535972150852068 +57, 0x60b252d453bd3a68 +58, 0x4251f0134634490a +59, 0x338950da210dfeb2 +60, 0xcadfe932971c9471 +61, 0xfb7049457fab470e +62, 0x9bfb8145a4459dff +63, 0x4a89dda3898f9d8a +64, 0x88cc560151483929 +65, 0x277dc820f4b6796e +66, 0x3524bd07ea0afb88 +67, 0x92eb6ffb2bf14311 +68, 0xf6559be0783f3fe9 +69, 0xf0844f9af54af00d +70, 0xdd5e0b59adcef8a +71, 0x4ff7e4f2ab18554c +72, 0x3fa22c8a02634587 +73, 0x1db8e1a9442fe300 +74, 0x40cf15953ad3d3e7 +75, 0x92af15fe1a9f6f0a +76, 0xab4a0e466fb0cfd +77, 0x944f1555a06cca82 +78, 0x10cf48412f1f6066 +79, 0x7f51f9a455f9e8e1 +80, 0x47ee93530f024c7e +81, 0x36cf2f0413e0f6f2 +82, 0xa315e23731969407 +83, 0xd8e2796327cf5f87 +84, 0xa86072696a555c34 +85, 0xee3f0b8804feaab7 +86, 0x41e80dc858f8360b +87, 0x31ec2e9b78f5b29 +88, 0xd397fb9b8561344c +89, 0x28081e724e649b74 +90, 0x5c135fc3fc672348 +91, 0x9a276ca70ce9caa0 +92, 0x9216da059229050a +93, 0xcf7d375ed68007b0 +94, 0xa68ad1963724a770 +95, 0xd4350de8d3b6787c +96, 0xee7d2c2cc275b6d2 +97, 0x71645ec738749735 +98, 0x45abdf8c68d33dbb +99, 0xe71cadb692c705ea +100, 0x60af6f061fd90622 +101, 0x1eabe2072632c99d +102, 0x947dda995a402cb6 +103, 0xbb19f49a3454f3b +104, 0xe6e43e907407758c +105, 0xfe2b67016bd6873a +106, 0x7fdb4dd8ab30a722 +107, 0x39d3265b0ff1a45b +108, 0xed24c0e4fce8d0c2 +109, 0xf6e074f86faf669d +110, 0x9142040df8dc2a79 +111, 0x9682ab16bc939a9c +112, 0x6a4e80c378d971c8 +113, 0x31309c2c7fc2d3d6 +114, 0xb7237ec682993339 +115, 0x6a30c06bb83dccd9 +116, 0x21c8e9b6d8e7c382 +117, 0x258a24ae6f086a19 +118, 0xb76edb5be7df5c35 +119, 0x3c11d7d5c16e7175 +120, 0xbdfc34c31eff66e1 +121, 0x8af66e44be8bf3a2 +122, 0x3053292e193dec28 +123, 0xd0cc44545b454995 +124, 0x408ac01a9289d56 +125, 0x4e02d34318ec2e85 +126, 0x9413ff3777c6eb6b +127, 0xa3a301f8e37eb3df +128, 0x14e6306bd8d8f9f9 +129, 0xd3ea06ce16c4a653 +130, 0x170abe5429122982 +131, 0x7f9e6fddc6cacb85 +132, 0xa41b93e10a10a4c8 +133, 0x239216f9d5b6d0b5 +134, 0x985fcb6cb4190d98 +135, 0xb45e3e7c68f480c6 +136, 0xc1b2fc2e0446211c +137, 0x4596adb28858c498 +138, 0x2dd706f3458ddc75 +139, 0x29c988c86f75464 +140, 0xac33a65aa679a60 +141, 0xa28fef762d39d938 +142, 0x541e6fa48647f53 +143, 0x27838d56b2649735 +144, 0x8e143d318a796212 +145, 0xaea6097745f586b8 +146, 0x636143330f8ee2e6 +147, 0xc2d05fd8b945b172 +148, 0x6e355f9eb4353055 +149, 0xeb64ca42e8bf282e +150, 0xe8202dfd9da0fe5 +151, 0x7305689c9d790cba +152, 0xf122f8b1bef32970 +153, 0x9562887e38c32ba5 +154, 0xf9cd9be121b738d +155, 0x6238e0c398307913 +156, 0x5f2e79bb07c30f47 +157, 0x8ce8e45c465006e +158, 0x39281fe1e99e2441 +159, 0xafb10c2ca2874fea +160, 0x6e52f91633f83cf +161, 0x8ff12c1ac73c4494 +162, 0xe48608a09365af59 +163, 0xefd9bbc7e76e6a33 +164, 0xbe16a39d5c38ec92 +165, 0x6a6ffbcaf5a2330f +166, 0xdd5d6ac7d998d43d +167, 0x207bf978226d4f11 +168, 0xf8eec56bd2a0f62e +169, 0xa5bccf05dce0d975 +170, 0x93cf3ec1afe457a6 +171, 0x38651466d201f736 +172, 0x3ad21473985c9184 +173, 0xc6407a3bd38c92a6 +174, 0xb1ec42c7afa90a25 +175, 0xbdeca984df8b7dd3 +176, 0xb6926b1d00aa6c55 +177, 0x86141d0022352d49 +178, 0x169316256135ee09 +179, 0xffb1c7767af02a5c +180, 0x502af38ad19f5c91 +181, 0xfbf6cbc080086658 +182, 0x33cf9b219edae501 +183, 0x46e69bebd77b8862 +184, 0xf11e0cc91125d041 +185, 0xb4cd1649f85e078f +186, 0xb49be408db4e952 +187, 0xb0b8db46140cce3c +188, 0xba647f2174012be7 +189, 0x4f0a09e406970ac9 +190, 0xf868c7aec9890a5c +191, 0xde4c8fa7498ea090 +192, 0x872ceb197978c1d4 +193, 0x1eb5cd9c3269b258 +194, 0x3ea189f91724f014 +195, 0x41379656f7746f2c +196, 0x7bd18493aca60e51 +197, 0x5380c23b0cbbf15e +198, 0x920b72835f88246b +199, 0x24d7f734a4548b8e +200, 0x9944edb57e5aa145 +201, 0x4628e136ebb8afe1 +202, 0xb4ee6a776356e2a7 +203, 0x481cbe9744ccf7d7 +204, 0x7e8d67e8b0b995d9 +205, 0xeeacde100af7b47e +206, 0x103da08f2487dab7 +207, 0x6b9890a91d831459 +208, 0xd0c5beae37b572c7 +209, 0xfdccc371ee73fcc +210, 0x65438f0a367a2003 +211, 0x5d23b2c818a7e943 +212, 0x9a8ed45ac04b58b3 +213, 0xdaf3c3f1695dce10 +214, 0x5960eec706fa2bc0 +215, 0x98ca652facb80d40 +216, 0x72970ae5e2194143 +217, 0x18c6374d878c5c94 +218, 0x20fa51f997381900 +219, 0x3af253dba26d6e1d +220, 0x1b23d65db15c7f78 +221, 0x9f53ae976259b0e3 +222, 0x9a6addb28dc92d49 +223, 0x1e085c4accd0a7d7 +224, 0xe9d3f4cc9bad6ce5 +225, 0xe018fad78b5b1059 +226, 0x5ef7682232b4b95 +227, 0xb2242aa649f5de80 +228, 0x8f3e6d8dd99b9e4e +229, 0xb9be6cc22949d62a +230, 0xecbdc7beaa5ff1fe +231, 0xd388db43a855bdf0 +232, 0xd71ee3238852568d +233, 0x85ab3056304c04b5 +234, 0x2ed7ae7ad3cfc3cb +235, 0x781d1b03d40b6c48 +236, 0x7d3c740886657e6d +237, 0x982cfa6828daa6b0 +238, 0x278579599c529464 +239, 0x773adecfae9f0e08 +240, 0x63a243ea4b85c5d7 +241, 0x59940074fc3709e1 +242, 0xc914a2eed58a6363 +243, 0x2602b04274dd724c +244, 0xdf636eb7636c2c42 +245, 0x891a334d0d26c547 +246, 0xde8cd586d499e22d +247, 0x3ea1aa4d9b7035b6 +248, 0xd085cff6f9501523 +249, 0xe82a872f374959e +250, 0x55cb495bbd42cc53 +251, 0x5f42b3226e56ca97 +252, 0xea463f6f203493a3 +253, 0xeef3718e57731737 +254, 0x1bd4f9d62b7f9f3c +255, 0x19284f5e74817511 +256, 0xaf6e842c7450ca87 +257, 0x1d27d2b08a6b3600 +258, 0xfb4b912b396a52e3 +259, 0x30804d4c5c710121 +260, 0x4907e82564e36338 +261, 0x6441cf3b2900ddb7 +262, 0xd76de6f51988dc66 +263, 0x4f298ef96fd5e6d2 +264, 0x65432960c009f83d +265, 0x65ebed07e1d2e3df +266, 0xf83ee8078febca20 +267, 0x7bb18e9d74fc5b29 +268, 0x597b5fbc2261d91 +269, 0xea4f8ed0732b15b2 +270, 0xba2267f74f458268 +271, 0x3f304acabd746bbb +272, 0x7bd187af85659a82 +273, 0x88e20dbdb7a08ea3 +274, 0x2a2dc948c772fcb4 +275, 0x87784fec2993c867 +276, 0x89163933cd362d4e +277, 0xfd7b24f04302f957 +278, 0x9bdd544405dfb153 +279, 0xddee0fac58ffc611 +280, 0xa8e8993417e71ec1 +281, 0x55e0ab46ff7757af +282, 0x53e7645f08d3d7df +283, 0xbf78e563bc656ba2 +284, 0x1d162253b45ee2de +285, 0x15e2bfefedf29eb4 +286, 0x4e2a4584aa394702 +287, 0xa89fb12b01525897 +288, 0x825bd98f0544e4df +289, 0xfc6c50da6750700 +290, 0xc24aaabde7d28423 +291, 0x79d6f4660fcb19e5 +292, 0xee7d4fb40c8d659f +293, 0x70bc281b462e811d +294, 0x23ed4dc9636519a7 +295, 0xcb7c3f5a5711b935 +296, 0xe73090e0508c5d9d +297, 0xb25a331f375952a6 +298, 0xa64c86e0c04740f6 +299, 0xb8f3ffc8d56ac124 +300, 0x2479266fc5ee6b15 +301, 0x8d5792d27f5ffbcb +302, 0xb064298be946cd52 +303, 0xf0934a98912ffe26 +304, 0xbe805682c6634d98 +305, 0xe0e6e2c010012b4f +306, 0x58c47d475f75976 +307, 0x358c9a6e646b2b4a +308, 0x7e7c4ffca5b17ba7 +309, 0x43585c8c9a24a04c +310, 0x5154ddbcd68d5c2c +311, 0x4a2b062d3742a5e +312, 0xca5691191da2b946 +313, 0x696a542109457466 +314, 0x9eb5d658a5022ba5 +315, 0x8158cf6b599ab8dc +316, 0x1b95391eaa4af4a6 +317, 0x9953e79bd0fc3107 +318, 0x8639690086748123 +319, 0x2d35781c287c6842 +320, 0x393ef0001cd7bc8f +321, 0xe3a61be8c5f2c22a +322, 0x5e4ff21b847cc29b +323, 0x4c9c9389a370eb84 +324, 0xd43a25a8fc3635fa +325, 0xf6790e4a85385508 +326, 0x37edf0c81cb95e1d +327, 0x52db00d6e6e79af8 +328, 0x3b202bceeb7f096 +329, 0x2a164a1c776136bb +330, 0x73e03ee3fd80fd1b +331, 0xd2c58c0746b8d858 +332, 0x2ed2cb0038153d22 +333, 0x98996d0fc8ceeacc +334, 0xa4ed0589936b37f +335, 0x5f61cf41a6d2c172 +336, 0xa6d4afb538c110d7 +337, 0xe85834541baadf1a +338, 0x4c8967107fd49212 +339, 0x49bafb762ab1a8c1 +340, 0x45d540e2a834bf17 +341, 0x1c0ec8b4ed671dac +342, 0x3d503ce2c83fe883 +343, 0x437bfffd95f42022 +344, 0xc82d1e3d5c2bc8d2 +345, 0x7a0a9cbfcb0d3f24 +346, 0xc0a4f00251b7a3be +347, 0xb5be24e74bb6a1c6 +348, 0xa3104b94b57545b1 +349, 0x86de7d0c4b97b361 +350, 0x879c1483f26538a6 +351, 0xd74c87557f6accfb +352, 0x2f9be40dbf0fe8a1 +353, 0x445a93398f608d89 +354, 0x7b3cb8a7211d7fdc +355, 0xe86cc51290d031e7 +356, 0x33ef3594052ad79f +357, 0xc61911d241dbb590 +358, 0x37cccb0c0e3de461 +359, 0xb75259124080b48b +360, 0xd81e8961beb4abe5 +361, 0xf4542deb84a754e +362, 0x6ea036d00385f02e +363, 0xa7b60b0ac3b88681 +364, 0x108a6c36ca30baf5 +365, 0x4a2adc5bbfe2bf07 +366, 0x4079501f892a5342 +367, 0x55e113963c5448f0 +368, 0x8019ff4903b37242 +369, 0x109c6dcdb7ec6618 +370, 0x1239ac50944da450 +371, 0xe1399c7f94c651c1 +372, 0x5a6bbbae388d365a +373, 0x4d72be57b8810929 +374, 0x3f067df24384e1fb +375, 0x4f8b9e0f7f6c7be +376, 0x202492c342a3b08 +377, 0x250753192af93a3 +378, 0xfba1159d9de2cb8e +379, 0xba964497ab05505c +380, 0x1329ec5d8a709dca +381, 0x32927cacb6cd22bb +382, 0x6b4d7db904187d56 +383, 0xe76adccf8e841e02 +384, 0x8c4bf4b6a788202 +385, 0x3013a3b409831651 +386, 0x7427d125c475412f +387, 0x84dcc4bb2bf43202 +388, 0x117526f1101372a5 +389, 0xfe95d64b8984bd72 +390, 0x524e129934cc55c1 +391, 0xc3db4b0418c36d30 +392, 0xe1cb2047e9c19f7a +393, 0xea43d6c8d8982795 +394, 0xe80ac8a37df89ed +395, 0xfecc2104329ed306 +396, 0xa5c38aac9c1d51ea +397, 0x3abe5d1c01e4fe17 +398, 0x717a805d97fcc7ac +399, 0x94441f8207a1fb78 +400, 0x22d7869c5f002607 +401, 0x349e899f28c3a1b9 +402, 0x5639950cdea92b75 +403, 0x7e08450497c375b +404, 0x94bf898b475d211d +405, 0x75c761a402375104 +406, 0x1930920ec9d2a1e7 +407, 0xb774ba1bc6f6e4e2 +408, 0xf715602412e5d900 +409, 0x87bb995f4a13f0ba +410, 0xa3c787868dfa9c8d +411, 0xa17fd42a5a4f0987 +412, 0x4a9f7d435242b86 +413, 0x240364aff88f8aef +414, 0xe7cd4cf4bf39f144 +415, 0xd030f313ca4c2692 +416, 0xc46696f4e03ec1e9 +417, 0x22c60f1ec21060b3 +418, 0x16c88058fd68986f +419, 0x69ca448e8e6bde3f +420, 0x3466c2cdec218abd +421, 0x837ac4d05e6b117d +422, 0x911210e154690191 +423, 0x9ece851d6fa358b7 +424, 0x42f79cb0c45e7897 +425, 0xbf7583babd7c499b +426, 0x2059fe8031c6e0b9 +427, 0xabbec8fc00f7e51d +428, 0x88809d86a3a256e1 +429, 0xd36056df829fdcb5 +430, 0x515632b6cb914c64 +431, 0xba76d06c2558874 +432, 0x632c54ca4214d253 +433, 0xadec487adf2cb215 +434, 0x521e663e1940513d +435, 0xb1b638b548806694 +436, 0xbe2d5bfbe57d2c72 +437, 0x8b89e7719db02f7 +438, 0x90ba5281c1d56e63 +439, 0x899e1b92fceea102 +440, 0xf90d918e15182fa6 +441, 0x94a489ce96c948c4 +442, 0xad34db453517fcd4 +443, 0xc5264eb2de15930f +444, 0x101b4e6603a21cee +445, 0xef9b6258d6e85fff +446, 0x6075c7d6c048bd7a +447, 0x6f03232c64e438aa +448, 0x18c983d7105ee469 +449, 0x3ffc23f5c1375879 +450, 0xbc1b4a00afb1f9f +451, 0x5afa6b2bb8c6b46e +452, 0xe7fce4af2f2c152a +453, 0x5b00ab5c4b3982c7 +454, 0x2d4b0c9c0eb4bd0c +455, 0x61d926270642f1f2 +456, 0x7219c485c23a2377 +457, 0x7e471c752fecd895 +458, 0x23c4d30a4d17ba1f +459, 0x65cb277fe565ca22 +460, 0xcbb56ed9c701363b +461, 0xfd04ab3a6eba8282 +462, 0x19c9e5c8bab38500 +463, 0xea4c15227676b65b +464, 0x20f3412606c8da6f +465, 0xb06782d3bf61a239 +466, 0xf96e02d5276a9a31 +467, 0x835d256b42aa52a6 +468, 0x25b09151747f39c1 +469, 0x64507386e1103eda +470, 0x51cbc05716ef88e4 +471, 0x998cd9b7989e81cc +472, 0x9d7115416bec28d1 +473, 0xc992ca39de97906b +474, 0xd571e6f7ca598214 +475, 0xafc7fb6ccd9abbf8 +476, 0x88ef456febff7bf4 +477, 0xdbe87ccc55b157d2 +478, 0xaab95e405f8a4f6d +479, 0xad586a385e74af4f +480, 0x23cd15225c8485aa +481, 0x370940bf47900ac7 +482, 0xefd6afda1a4b0ead +483, 0x9cb1a4c90993dd7a +484, 0xff7893e8b2f70b11 +485, 0xb09e1807c0638e8e +486, 0xb10915dcb4978f74 +487, 0x88212ab0051a85eb +488, 0x7af41b76e1ec793f +489, 0x2e5c486406d3fefd +490, 0xebe54eff67f513cc +491, 0xab6c90d0876a79b8 +492, 0x224df82f93fe9089 +493, 0xc51c1ce053dc9cd2 +494, 0x5ef35a4d8a633ee7 +495, 0x4aca033459c2585f +496, 0xd066932c6eefb23d +497, 0x5309768aab9a7591 +498, 0xa2a3e33823df37f9 +499, 0xcec77ff6a359ee9 +500, 0x784dc62d999d3483 +501, 0x84e789fb8acc985d +502, 0xd590237e86aa60f +503, 0x737e2ffe1c8ad600 +504, 0xc019c3a39a99eab8 +505, 0x6a39e9836964c516 +506, 0xe0fe43129535d9da +507, 0xdfc5f603d639d4de +508, 0x7b9a7d048a9c03b6 +509, 0xbb5aa520faa27fdd +510, 0x2a09b4200f398fa2 +511, 0x38cc88107904064e +512, 0xa9a90d0b2d92bb25 +513, 0x9419762f87e987e3 +514, 0x1a52c525153dedcd +515, 0xc26d9973dd65ae99 +516, 0x8e89bd9d0dc6e6a1 +517, 0x2f30868dc01bfb53 +518, 0x20f09d99b46501c4 +519, 0x78b468a563b8f1e9 +520, 0xcccf34b0b6c380c7 +521, 0xf554e7dc815297e6 +522, 0x332a585cfb4a50ef +523, 0xa9fb64a2b6da41d7 +524, 0xdcd2a5a337391ce0 +525, 0x8a9bd3e324c6463d +526, 0x9f4487d725503bdd +527, 0xf72282d82f1d0ff +528, 0x308f4160abb72d42 +529, 0x648de1db3a601b08 +530, 0x36cab5192e7ebd39 +531, 0x7975fbe4ab6a1c66 +532, 0xd515b4d72243864e +533, 0x43a568f8b915e895 +534, 0x15fa9f2057bdb91d +535, 0x7a43858ef7a222dc +536, 0x17b4a9175ac074fe +537, 0xa932c833b8d0f8f8 +538, 0x1d2db93a9a587678 +539, 0x98abd1d146124d27 +540, 0xf0ab0431671740aa +541, 0xa9d182467540ad33 +542, 0x41c8a6cfc331b7fc +543, 0xa52c6bd0fcd1d228 +544, 0x2773c29a34dc6fa3 +545, 0x3098230746fc1f37 +546, 0xd63311bb4f23fabe +547, 0x6712bf530cd2faec +548, 0x342e8f342e42c4dd +549, 0xfbd83331851cdcad +550, 0xe903be1361bbc34d +551, 0xd94372e5077e3ef9 +552, 0x95aaa234f194bd8 +553, 0x20c0c8fb11e27538 +554, 0xfaf47dc90462b30b +555, 0x8ddc6d144147682a +556, 0xf626833fd926af55 +557, 0x5df93c34290d1793 +558, 0xb06a903e6e9fca5e +559, 0x10c792dc851d77ca +560, 0xd9b1b817b18e56cb +561, 0x3a81730c408eb408 +562, 0x65052c04a8d4b63c +563, 0x3328546598e33742 +564, 0xeca44a13f62d156d +565, 0x69f83d1d86b20170 +566, 0x937764200412027d +567, 0xc57eb1b58df0f191 +568, 0xa1c7d67dce81bc41 +569, 0x8e709c59a6a579ce +570, 0x776a2f5155d46c70 +571, 0xd92906fbbc373aa5 +572, 0xe97ad478a2a98bf6 +573, 0xc296c8819ac815f +574, 0x613ede67ba70e93e +575, 0xe145222498f99cde +576, 0xafcdfa7a3c1cf9bf +577, 0x1c89252176db670d +578, 0xad245eda5c0865ff +579, 0x249463d3053eb917 +580, 0xc9be16d337517c0b +581, 0xefcc82bf67b8f731 +582, 0x1e01577d029e0d00 +583, 0xad9c24b2a4f3d418 +584, 0xed2cceb510db4d0f +585, 0xbddadcdb92400c70 +586, 0x67d6b0476ef82186 +587, 0xbc7662ff7bf19f73 +588, 0x9d94452a729e6e92 +589, 0x6b278d8594f55428 +590, 0x6c4b31cceb1b2109 +591, 0xccc6c3a726701e9 +592, 0x6bc28ece07df8925 +593, 0xc0422b7bf150ccc4 +594, 0xab7158f044e73479 +595, 0xdf3347546d9ed83f +596, 0x3b3235a02c70dff4 +597, 0x2551c49c14ea8d77 +598, 0xee2f7f5bb3cc228e +599, 0x39b87bfe8c882d39 +600, 0x7dd420fad380b51c +601, 0xffe64976af093f96 +602, 0x4a4f48dc6e7eaa5f +603, 0x85f2514d32fdc8cc +604, 0x1ab1215fd7f94801 +605, 0x4cd1200fc795b774 +606, 0xcf8af463a38942ee +607, 0x319caa7ce3022721 +608, 0x8cd9798a76d1aea4 +609, 0x2bd3933ac7afd34e +610, 0x85d4c323403cf811 +611, 0xd7b956d3064efa30 +612, 0x67a078dbf1f13068 +613, 0x665fa6c83e87c290 +614, 0x9333ac2416d2469b +615, 0xdfb1fd21a0094977 +616, 0xa1962a6e2c25f8ff +617, 0x1f3b10a7ed5287cf +618, 0x70641efb3d362713 +619, 0xe527a2cf85d00918 +620, 0x9741e45d3f9890a3 +621, 0x6cb74b5d4d36db4b +622, 0xf24734d622bd2209 +623, 0xadd6d94f78e9d378 +624, 0xc3bbdb59225cca7f +625, 0x5ad36614275b30cd +626, 0x495568dd74eea434 +627, 0xf35de47e0ffe1f2d +628, 0xefa209dca719ab18 +629, 0x844ddcaeb5b99ae8 +630, 0x37449670a1dc7b19 +631, 0x5a4612c166f845c1 +632, 0xe70f7782f2087947 +633, 0x98d484deac365721 +634, 0x705302198cf52457 +635, 0x7135ae0f5b77df41 +636, 0x342ac6e44a9b6fc3 +637, 0x2713fd2a59af5826 +638, 0x6e1a3f90f84efa75 +639, 0x9fb3b4dd446ca040 +640, 0x530044ae91e6bd49 +641, 0xe984c4183974dc3e +642, 0x40c1fa961997d066 +643, 0xb7868250d8c21559 +644, 0x8bc929fa085fd1de +645, 0x7bdb63288dc8733e +646, 0xac4faad24326a468 +647, 0x1c6e799833aea0b1 +648, 0xcc8a749e94f20f36 +649, 0x4e7abfd0443547c5 +650, 0xb661c73bb8caa358 +651, 0x4a800f5728ff2351 +652, 0x8c15e15189b9f7ed +653, 0xab367846b811362c +654, 0x4ba7508f0851ca2a +655, 0xe9af891acbafc356 +656, 0xbdebe183989601f8 +657, 0x4c665ea496afc061 +658, 0x3ca1d14a5f2ed7c +659, 0xfbdff10a1027dd21 +660, 0xdfd28f77c8cff968 +661, 0xc4fbaadf8a3e9c77 +662, 0xdac7e448b218c589 +663, 0xb26390b5befd19e2 +664, 0xd2ef14916c66dba9 +665, 0xfab600284b0ff86b +666, 0xf04a1c229b58dabb +667, 0xc21c45637e452476 +668, 0xd1435966f75e0791 +669, 0xc1f28522eda4a2d0 +670, 0x52332ae8f1222185 +671, 0x81c6c0790c0bf47e +672, 0xfebd215e7d8ffb86 +673, 0x68c5dce55dbe962b +674, 0x231d09cb0d2531d1 +675, 0x3218fba199dbbc6b +676, 0x8f23c535f8ea0bf6 +677, 0x6c228963e1df8bd9 +678, 0x9843c7722ed153e3 +679, 0xd032d99e419bddec +680, 0xe2dca88aa7814cab +681, 0x4d53fb8c6a59cdc2 +682, 0x8fb3abc46157b68b +683, 0xa3e733087e09b8e +684, 0x6bdc1aee029d6b96 +685, 0x4089667a8906d65b +686, 0x8f3026a52d39dd03 +687, 0x6d2e0ccb567bae84 +688, 0x74bad450199e464 +689, 0xf114fb68a8f300d5 +690, 0xc7a5cc7b374c7d10 +691, 0xf0e93da639b279d1 +692, 0xb9943841ad493166 +693, 0x77a69290455a3664 +694, 0x41530da2ebea054b +695, 0xe8f9fab03ea24abf +696, 0xaa931f0c9f55a57a +697, 0xb4d68a75d56f97ae +698, 0x3d58ff898b6ba297 +699, 0x49d81e08faf5a3f5 +700, 0xfc5207b9f3697f3b +701, 0xa25911abb3cf19b7 +702, 0x6b8908eb67c3a41 +703, 0xd63ef402e2e3fa33 +704, 0x728e75d3f33b14c5 +705, 0x248cb1b8bc6f379a +706, 0x3aa3d6d2b8c72996 +707, 0x49cc50bd2d3d2860 +708, 0xb4e1387647c72075 +709, 0x435a1630a4a81ed3 +710, 0xa5ea13005d2460cf +711, 0xc7a613df37d159ec +712, 0x95721ccc218b857e +713, 0xd4b70d8c86b124d3 +714, 0x2b82bcc4b612d494 +715, 0xaf13062885276050 +716, 0xcbd8fcf571a33d9c +717, 0x3f7f67ca1125fc15 +718, 0xddf4bb45aac81b4c +719, 0x23606da62de9c040 +720, 0xa3a172375666b636 +721, 0x292f87387a6c6c3c +722, 0xd1d10d00c5496fe1 +723, 0x86b0411ce8a25550 +724, 0x38e0487872e33976 +725, 0x363e49f88ddfd42c +726, 0x45bdf1e9f6b66b0a +727, 0x8a6fff3de394f9b5 +728, 0x8502158bb03f6209 +729, 0x22e24d16dba42907 +730, 0x3fe3ba427cc2b779 +731, 0x77144793f66b3d7e +732, 0xcf8912ccb29b8af9 +733, 0xdc856caff2abd670 +734, 0xe6d3ae0b0d9d4c8b +735, 0xb8f5d40e454c539f +736, 0x79ca953114fbc6b7 +737, 0x478d6f4bbfa38837 +738, 0x9babae1a3ffdc340 +739, 0x40edd56802bae613 +740, 0x97a56c2dcccf0641 +741, 0xafc250257f027f8e +742, 0x8da41ef1edf69125 +743, 0x6574b0280ff9d309 +744, 0x197c776151b8f820 +745, 0x6b03e077c9dac3b6 +746, 0x24a40ebbc5c341c5 +747, 0x50e585169a6a1c4b +748, 0x37783a5a6a3e4e02 +749, 0xb3de81ee6fbad647 +750, 0xf4f292f57ca4591e +751, 0x6214e9e7d44d30a +752, 0x5920190c56d21c12 +753, 0x9ac163419b5e0c9b +754, 0xfc2328761ae8ed93 +755, 0xc68f945b545508c6 +756, 0x687c49a17ce0a5e2 +757, 0x276d8f53d30d4ab4 +758, 0x8201804970343ce1 +759, 0x1b5d323cc2e7fb7e +760, 0x6f351ef04fd904b +761, 0x6c793a7d455d5198 +762, 0x46f5d108430ae91f +763, 0xac16a15b2a0cf77f +764, 0xa0d479d9e4122b9d +765, 0x3afd94604307f19 +766, 0x2573ed6d39d38dbf +767, 0xa58e14ba60b4294b +768, 0xe69c1aed5840d156 +769, 0x4cf6fda7f04855c2 +770, 0x2fb65a56ef5f22da +771, 0xf95819434d5dc220 +772, 0x29c65133623dafba +773, 0x8e997bd018467523 +774, 0xfd08ba9d498461a7 +775, 0xdd52243bc78a5592 +776, 0x39c30108f6db88b3 +777, 0x38af8e1894f259b9 +778, 0x97eedf3b4ae5f6de +779, 0x757825add80c5ece +780, 0xf0fdd90ac14edb14 +781, 0xbbb19d4cc8cac6d4 +782, 0x9a82234edfae05e3 +783, 0x704401c61d1edf1c +784, 0x8b0eb481fb3a1fb2 +785, 0xef6f36e7cc06c002 +786, 0x7a208b17e04b8cd7 +787, 0xf20e33d498838fe9 +788, 0xc2bdb22117058326 +789, 0x6ec31939eb4ca543 +790, 0x6f1654838f507a21 +791, 0xc65ab81a955d2b93 +792, 0x40b1420fdd9531b8 +793, 0xe31f221cab9f4f40 +794, 0x798cdd414c1deb7a +795, 0x9c84e9c7d41cd983 +796, 0x63d6b1ae3b60b7fa +797, 0xb42bfdd1a2f78ffa +798, 0x37e431eaccaaa8e9 +799, 0x7508142a0f73eac9 +800, 0x91662a023df5893a +801, 0x59782070e2fe3031 +802, 0xb2acd589a8ce7961 +803, 0xa224743fa877b292 +804, 0xaa5362aa27e6ed9e +805, 0xa394a4e520c0c1c7 +806, 0xe49b16d2018ffb6f +807, 0xb8074b9f2f1e762b +808, 0xcf5f86143d5c23a7 +809, 0xfd838785db987087 +810, 0x31b1889df389aff8 +811, 0x30aaca876a4383b +812, 0x1731bb71c4c38d4f +813, 0x9a83a65395e05458 +814, 0x99cd0c8d67c8f4fc +815, 0xfbd9fdc849b761a5 +816, 0x82c04834fc466889 +817, 0xdeef9d6e715e8c97 +818, 0x549c281c16da6078 +819, 0x2d70661254ad599d +820, 0x57995793a72acac +821, 0xf1727005116183ba +822, 0xa22bb38945285de3 +823, 0x4f2d687fe45131ff +824, 0x5666c87ddbbc981f +825, 0xbcb4b2d4e7a517d0 +826, 0x5e794dd2e20b785d +827, 0x449ad020149e093c +828, 0x7704ee0412d106f5 +829, 0x83cbdf257b072ac1 +830, 0xae5c4fc9f638b0da +831, 0x7b9e5a64e372ed47 +832, 0x7eddbbb22c2cdf57 +833, 0x3f19ebfa155b08e +834, 0x91d991154dfd7177 +835, 0x611ae74b952d387f +836, 0x3fdf7a335bda36ee +837, 0xdf182433fc7a7c05 +838, 0x62c78598d1f8db0a +839, 0xc3750c69d2c5c1f0 +840, 0xf1318024709efdee +841, 0xaa3fd360d224dc29 +842, 0x62af53b2f307c19 +843, 0xdf527683c58120c2 +844, 0x3281deecc496f93d +845, 0x4f704ad31527ef08 +846, 0x127a14a5e07cfdfc +847, 0x90d0b1f549255c92 +848, 0xbc3406b212c5e1fc +849, 0x4e89f39379dba91d +850, 0x1290ef43c4998e6e +851, 0xecfeb1a1cb1c6e1b +852, 0x2067e90403003bf1 +853, 0x38ae04be30bdbeba +854, 0x8a3537f298baedda +855, 0xd07f3b825cdb2936 +856, 0xea020b5aebae8b45 +857, 0xfcd614ab031132b0 +858, 0x5fb682a4ff2268f5 +859, 0xd1c4662ce65596f4 +860, 0x7026b8270dd0b8dc +861, 0x8101ec4b4beae45a +862, 0xa0e9dc87940610a6 +863, 0x83ec33679d83165b +864, 0x981847ca82e86d41 +865, 0xda84c188a304a0b7 +866, 0x3c37529c5a5bbbb8 +867, 0x34a8491ce3e19a5a +868, 0xd36ad716a2fa6cb8 +869, 0xfd1d1d6a5189a15c +870, 0x9716eb47851e8d8d +871, 0x7dfb13ea3b15c5aa +872, 0xbdf6e707f45113a5 +873, 0xb8118261b04bd097 +874, 0x6191f9895881bec6 +875, 0x7aac257ae11acf9b +876, 0x35a491e1537ff120 +877, 0xe078943432efa71c +878, 0xb3338485dd3dc2b9 +879, 0x456060975d2bb3b5 +880, 0xaddc4c451bdfc44c +881, 0x18bfa7beacf96430 +882, 0x8802ebcaf0f67498 +883, 0xad922a5a825bd780 +884, 0x9fb4587d748f4efa +885, 0xdb2a445136cd5e7 +886, 0xb98b3676ea8e96ac +887, 0xb02d8d244d784878 +888, 0xa1a8442b18860abb +889, 0x6a3029ba1361e5d1 +890, 0xf426d5fac161eb1 +891, 0xfa5ac2b87acecb23 +892, 0xaa659896e50535df +893, 0xf40dd7a3d3c5c8ed +894, 0x3f8367abecb705bc +895, 0x2d60e7525873358f +896, 0xc4a9d3948a0c3937 +897, 0x5ecc04fef6003909 +898, 0x7a865004918cba2 +899, 0x47ae110a678ec10b +900, 0xa0f02f629d91aa67 +901, 0x4848b99e7fac9347 +902, 0xaa858346d63b80ac +903, 0xeb5bf42ee161eeef +904, 0x4d35d723d3c6ba37 +905, 0xdf22ca6ca93b64a7 +906, 0x9d198520f97b25b1 +907, 0x3068415350778efe +908, 0xf3709f2e8793c2fe +909, 0xd1517bac8dd9f16f +910, 0xfb99bccaa15861dc +911, 0xa9ad607d796a2521 +912, 0x55d3793d36bd22e4 +913, 0xf99270d891ff7401 +914, 0x401750a5c4aa8238 +915, 0xd84b3003e6f28309 +916, 0x8a23798b5fa7c98b +917, 0xadd58bbc8f43e399 +918, 0xbd8c741ada62c6a8 +919, 0xbdc6937bc55b49fa +920, 0x4aefa82201b8502 +921, 0x17adf29a717b303 +922, 0xa6ed2197be168f6c +923, 0x1ba47543f4359a95 +924, 0xe34299949ac01ae9 +925, 0x711c76cffc9b62f3 +926, 0xbac259895508a4b7 +927, 0x3c8b3b3626b0d900 +928, 0x1a8d23fbe2ae71bf +929, 0xca984fa3b5a5c3a1 +930, 0xb1986ab7521a9c93 +931, 0xd6b5b2c8d47a75b5 +932, 0xc7f1c4a88afb4957 +933, 0xdeb58033a3acd6cc +934, 0xabe49ddfe1167e67 +935, 0x8d559c10205c06e3 +936, 0xea07a1a7de67a651 +937, 0xcbef60db15b6fef8 +938, 0xbfca142cff280e7 +939, 0x362693eba0732221 +940, 0x7463237e134db103 +941, 0x45574ddb5035e17a +942, 0xfc65e0cb9b94a1aa +943, 0x3154c55f1d86b36d +944, 0x2d93a96dd6ab2d8b +945, 0xbe3bc1d1f2542a25 +946, 0xdd4b541f7385bdaa +947, 0x3b56b919d914e3f8 +948, 0x82fd51468a21895f +949, 0x8988cf120731b916 +950, 0xa06a61db5fb93e32 +951, 0x6ed66c1b36f68623 +952, 0x875ae844d2f01c59 +953, 0x17ccd7ac912e5925 +954, 0x12fe2a66b8e40cb1 +955, 0xf843e5e3923ad791 +956, 0xa17560f2fd4ef48 +957, 0x27a2968191a8ee07 +958, 0xa9aab4d22ff44a3c +959, 0x63cd0dcc3bb083ae +960, 0x7a30b48c6160bf85 +961, 0x956160fb572503b3 +962, 0xc47f6b7546640257 +963, 0xaf4b625f7f49153 +964, 0x2f5c86a790e0c7e8 +965, 0xb52e0610ae07f0b8 +966, 0x38a589292c3d849e +967, 0xc3e9ef655d30b4ef +968, 0xb5695f765cda998a +969, 0xde5d5e692a028e91 +970, 0x839476721555f72e +971, 0x48b20679b17d9ebf +972, 0xe3d4c6b2c26fb0df +973, 0xce5a9834f0b4e71f +974, 0x533abb253d5d420e +975, 0x9eac5ad9aed34627 +976, 0xc0f2a01ab3c90dbb +977, 0x6528eda93f6a066c +978, 0xc16a1b625e467ade +979, 0x1a4a320fb5e8b098 +980, 0x8819cccd8b4ab32f +981, 0x42daa88531fd0bfd +982, 0xcf732226409be17c +983, 0xfddcdb25ccbf378c +984, 0x9b15b603bf589fc1 +985, 0x2436066b95d366fe +986, 0x8d42eff2e9cbda90 +987, 0x694b2fc8a4e8303c +988, 0x8e207f98aaea3ccd +989, 0x4730d7a620f822d9 +990, 0x468dc9ca30fe2fd4 +991, 0x74b36d8a1c0f031b +992, 0x3c1aac1c488c1a94 +993, 0x19d0101042444585 +994, 0x8ec50c56d0c8adf4 +995, 0x721ec629e4d66394 +996, 0x3ca5ad93abeac4a4 +997, 0xaaebc76e71592623 +998, 0x969cc319e3ed6058 +999, 0xc0a277e3b2bfc3de diff --git a/local_packages/numpy/random/tests/data/philox-testset-1.csv b/local_packages/numpy/random/tests/data/philox-testset-1.csv new file mode 100644 index 0000000..e448cbf --- /dev/null +++ b/local_packages/numpy/random/tests/data/philox-testset-1.csv @@ -0,0 +1,1001 @@ +seed, 0xdeadbeaf +0, 0xedc95200e2bd66a5 +1, 0x581d4e43b7682352 +2, 0x4be7278f5e373eab +3, 0xee47f17991a9e7ea +4, 0x38a7d2ae422f2e2c +5, 0xe2a6730a3b4a8a15 +6, 0x1588b7a841486442 +7, 0x13ad777246700504 +8, 0x14d157e0f5e18204 +9, 0xd87c22a7ee8c13f1 +10, 0x30cc389ce3542ba1 +11, 0xb8a53348955bb2e9 +12, 0xc08802e3c454f74f +13, 0xb444f627671a5780 +14, 0x4b6dd42b29cbf567 +15, 0x6109c7dc0bc5f7d5 +16, 0x85c954715d6b5b1e +17, 0x646178d3d9a3a5d5 +18, 0xebbde42b1cd83465 +19, 0x3d015102f6bc9c1a +20, 0x720fe2ec3798d5fd +21, 0x93120961289ceb2e +22, 0xc9207e960a56fae2 +23, 0xa7f042f31d991b98 +24, 0x5fac117415fae74b +25, 0xd0a970ba8dddc287 +26, 0x84b4e7e51b43106 +27, 0x6ad02bf525ea265f +28, 0xcdc7e5992b36ef8f +29, 0x44d4985209261d60 +30, 0x628c02d50f4b902e +31, 0xc7b1914922d1e76d +32, 0xfde99ff895cba51d +33, 0x175a0be050fa985f +34, 0x47297d3699e03228 +35, 0xccf1e9aeaa3339cd +36, 0x9fdd18ebeeaf15b1 +37, 0x7c94c9ab68747011 +38, 0x612d8ef22c1fa80f +39, 0x13f52b860de89ab5 +40, 0x81f264b8c139c43b +41, 0x8d017ba4ef1e85ba +42, 0x6d0556f46219951e +43, 0x8ee7b85663cf67b6 +44, 0x2432fc707645fe67 +45, 0xaf814046051e5941 +46, 0x4d432a83739ac76f +47, 0x59e5060d0983ccdd +48, 0xdd20e828b83d9b53 +49, 0x1b891800d7385f4c +50, 0x10e86a026c52ff5e +51, 0xb932f11723f7b90c +52, 0xb2413d0a1f3582d0 +53, 0xe7cd4edda65fc6b5 +54, 0x6d3808848d56593b +55, 0x192a727c3c7f47d9 +56, 0x9659d8aea5db8c16 +57, 0x4242c79fe2c77c16 +58, 0x605f90c913827cea +59, 0x53e153c8bfc2138a +60, 0xed2158fbdef5910e +61, 0xae9e6e29d4cb5060 +62, 0x7dd51afaad3b11ce +63, 0x2b9ba533d01a5453 +64, 0x7e0e9cf2b6c72c8 +65, 0x1cc8b3c7747ed147 +66, 0x9b102651e2e11b48 +67, 0x30b0b53cbaac33ea +68, 0x70c28aec39b99b85 +69, 0x5f1417ff536fdb75 +70, 0x3a1d91abd53acf58 +71, 0xba116a1772168259 +72, 0xf5369bc9bd284151 +73, 0x67bf11373bf183ca +74, 0xef0b2d44dbd33dc7 +75, 0xbfd567ee1a2953ed +76, 0x7d373f2579b5e5c6 +77, 0x756eeae7bcdd99be +78, 0x75f16eb9faa56f3b +79, 0x96d55ded2b54b9a5 +80, 0x94495191db692c24 +81, 0x32358bdd56bab38c +82, 0x3f6b64078576579 +83, 0x7177e7948bc064c9 +84, 0x2cbf23f09ba9bc91 +85, 0x9b97cc31c26645f5 +86, 0x5af2d239ff9028b1 +87, 0x316fa920e0332abe +88, 0x46535b7d1cae10a0 +89, 0x21f0a6869298022c +90, 0xf395c623b12deb14 +91, 0x8573995180675aa7 +92, 0xc3076509f4dc42d5 +93, 0x15e11e49760c6066 +94, 0xe8a6d311e67a021d +95, 0x7482f389c883339b +96, 0xda6f881573cba403 +97, 0xb110ffb847e42f07 +98, 0x2c3393140605ccf9 +99, 0xba1c8ba37d8bdc33 +100, 0x59adf43db7a86fe0 +101, 0xb4fcbf6aa585ca85 +102, 0xd794a93c18033fa6 +103, 0x6e839c01985f9d4 +104, 0x64065bf28222b2c7 +105, 0x6a6359b293fa0640 +106, 0x5ff610969e383e44 +107, 0xa8172c263f05c7f7 +108, 0x62a0172e8bd75d07 +109, 0x7be66e3c453b65ac +110, 0x6a3b8d5a14014292 +111, 0xa2583e6087450020 +112, 0xd5d3ecc480c627d2 +113, 0xa24e83f1eec8a27c +114, 0xa23febd2a99ee75a +115, 0x9a5fbf91c7310366 +116, 0x5b63156932e039b +117, 0x942af3c569908505 +118, 0x89a850f71ab6a912 +119, 0xfeadc803ac132fe9 +120, 0x67bf60e758250f3 +121, 0x533c25103466a697 +122, 0xb7deede3482f9769 +123, 0x325e043b53bba915 +124, 0x9e8d9e7fde132006 +125, 0x6bacc6860bbc436e +126, 0xb3ea0534c42b1c53 +127, 0xb2389334db583172 +128, 0xa74b1bfbf5242ee4 +129, 0x53a487e2dc51d15c +130, 0xe5a3b538d2c7a82e +131, 0x7b6c70bb0c4cadaf +132, 0xae20791b2081df1 +133, 0xc685c12e3c61d32c +134, 0x60110e6b0286e882 +135, 0x49682119c774045c +136, 0x53dc11a3bbd072e +137, 0xbdc87c6e732d9c2d +138, 0xcc4620861ebac8fd +139, 0x7e9c3558759350cc +140, 0x157408dee34891ba +141, 0x9bcad1855b80651b +142, 0xd81b29141d636908 +143, 0x1ed041a9f319c69d +144, 0x805b2f541208b490 +145, 0x484ef3bba2eb7c66 +146, 0xb6b5e37d50a99691 +147, 0xabc26a7d9e97e85f +148, 0xcba2a3cce0417c2f +149, 0xa030dfffd701993c +150, 0x2bf2dc50582ebf33 +151, 0xd9df13dd3eb9993e +152, 0x31ca28b757232ae5 +153, 0x614562a0ccf37263 +154, 0x44d635b01725afbb +155, 0x5ae230bc9ca9cd +156, 0xb23a124eb98705c6 +157, 0x6395675444981b11 +158, 0xd97314c34119f9ca +159, 0x9de61048327dd980 +160, 0x16bac6bded819707 +161, 0xcea3700e3e84b8c7 +162, 0xaa96955e2ee9c408 +163, 0x95361dcc93b5bc99 +164, 0x306921aed3713287 +165, 0x4df87f3130cd302a +166, 0x37c451daeb6a4af5 +167, 0x8dbbe35f911d5cc1 +168, 0x518157ce61cb10f9 +169, 0x669f577aebc7b35b +170, 0x4b0a5824a8786040 +171, 0x519bc3528de379f5 +172, 0x6128012516b54e02 +173, 0x98e4f165e5e6a6dd +174, 0x6404d03618a9b882 +175, 0x15b6aeb3d9cd8dc5 +176, 0x87ed2c1bae83c35b +177, 0x8377fc0252d41278 +178, 0x843f89d257a9ba02 +179, 0xcdda696ea95d0180 +180, 0xcfc4b23a50a89def +181, 0xf37fd270d5e29902 +182, 0xafe14418f76b7efa +183, 0xf984b81577076842 +184, 0xe8c60649ccb5458d +185, 0x3b7be8e50f8ff27b +186, 0xaa7506f25cef1464 +187, 0x5e513da59f106688 +188, 0x3c585e1f21a90d91 +189, 0x1df0e2075af292a +190, 0x29fdd36d4f72795f +191, 0xb162fe6c24cb4741 +192, 0x45073a8c02bd12c4 +193, 0xcbaaa395c2106f34 +194, 0x5db3c4c6011bc21c +195, 0x1b02aac4f752e377 +196, 0xa2dfb583eb7bec5 +197, 0xfe1d728805d34bb1 +198, 0xf647fb78bb4601ec +199, 0xd17be06f0d1f51ef +200, 0x39ec97c26e3d18a0 +201, 0xb7117c6037e142c8 +202, 0xe3a6ce6e6c71a028 +203, 0xe70a265e5db90bb2 +204, 0x24da4480530def1e +205, 0xfd82b28ce11d9a90 +206, 0x5bf61ead55074a1d +207, 0xbe9899c61dec480d +208, 0xae7d66d21e51ec9e +209, 0x384ee62c26a08419 +210, 0x6648dccb7c2f4abf +211, 0xc72aa0c2c708bdc9 +212, 0x205c5946b2b5ba71 +213, 0xd4d8d0b01890a812 +214, 0x56f185493625378d +215, 0x92f8072c81d39bd0 +216, 0xa60b3ceecb3e4979 +217, 0xfcf41d88b63b5896 +218, 0xf5a49aa845c14003 +219, 0xffcc7e99eee1e705 +220, 0xdd98312a7a43b32d +221, 0xa6339bd7730b004 +222, 0xdac7874ba7e30386 +223, 0xadf6f0b0d321c8 +224, 0x126a173ae4ffa39f +225, 0x5c854b137385c1e7 +226, 0x8173d471b1e69c00 +227, 0x23fa34de43581e27 +228, 0x343b373aef4507b1 +229, 0xa482d262b4ea919c +230, 0xf7fbef1b6f7fbba +231, 0xd8ce559487976613 +232, 0xbf3c8dd1e6ebc654 +233, 0xda41ed375451e988 +234, 0xf54906371fd4b9b3 +235, 0x5b6bb41231a04230 +236, 0x866d816482b29c17 +237, 0x11315b96941f27dc +238, 0xff95c79205c47d50 +239, 0x19c4fff96fbdac98 +240, 0xbfb1ae6e4131d0f4 +241, 0x9d20923f3cdb82c9 +242, 0x282175507c865dff +243, 0xdfd5e58a40fe29be +244, 0xedbd906ff40c8e4f +245, 0x11b04fc82614ccb3 +246, 0xeceb8afda76ae49f +247, 0xa4856913847c2cdf +248, 0x6f1425f15a627f2a +249, 0xdf144ffedf60349e +250, 0x392d7ecfd77cc65f +251, 0x72b8e2531049b2c6 +252, 0x5a7eb2bdb0ec9529 +253, 0xdcfd4306443e78c1 +254, 0x89ad67ed86cd7583 +255, 0x276b06c0779a6c8f +256, 0xb2dbb723196a0ac3 +257, 0x66c86a3b65906016 +258, 0x938348768a730b47 +259, 0x5f5282de938d1a96 +260, 0xa4d4588c4b473b1f +261, 0x8daed5962be4796f +262, 0x9dde8d796985a56e +263, 0x46be06dbd9ed9543 +264, 0xdf98286ceb9c5955 +265, 0xa1da1f52d7a7ca2b +266, 0x5a7f1449f24bbd62 +267, 0x3aedc4e324e525fd +268, 0xced62464cd0154e1 +269, 0x148fc035e7d88ce3 +270, 0x82f8878948f40d4c +271, 0x4c04d9cdd6135c17 +272, 0xdf046948d86b3b93 +273, 0x2f0dec84f403fe40 +274, 0xa61954fb71e63c0d +275, 0x616d8496f00382e8 +276, 0x162c622472746e27 +277, 0x43bcfe48731d2ceb +278, 0xff22432f9ff16d85 +279, 0xc033ed32bb0ad5a4 +280, 0x5d3717cc91c0ce09 +281, 0x7a39a4852d251075 +282, 0x61cd73d71d6e6a6 +283, 0xe37e2ea4783ab1a5 +284, 0x60e1882162579ea8 +285, 0x9258ec33f1a88e00 +286, 0x24b32acf029f0407 +287, 0x1410fc9aea6d3fac +288, 0x6054cf2a3c71d8f7 +289, 0x82f7605157a66183 +290, 0x3b34c1c0dff9eac5 +291, 0xfebe01b6d5c61819 +292, 0x7372187c68b777f2 +293, 0xc6923812cda479f0 +294, 0x386613be41b45156 +295, 0x92cfebe8cc4014b +296, 0x8e13c4595849828b +297, 0x90e47390d412291f +298, 0x6b21a1d93d285138 +299, 0xbf5b1f5922f04b12 +300, 0x21e65d1643b3cb69 +301, 0xf7683b131948ac3c +302, 0xe5d99fc926196ed2 +303, 0x7b138debbec90116 +304, 0x8a2650a75c2c2a5c +305, 0x20689a768f9b347b +306, 0xdfa2900cfb72dc6e +307, 0x98959c3855611cc2 +308, 0x5fdb71b89596cc7c +309, 0x1c14ac5c49568c7b +310, 0x958c4293016091fe +311, 0x7484522eb0087243 +312, 0xc4018dfb34fc190f +313, 0xca638567e9888860 +314, 0x102cd4805f0c0e89 +315, 0xcc3bc438e04548f8 +316, 0xb808944bb56ea5be +317, 0xffd4778dbf945c57 +318, 0xfe42617784c0233b +319, 0x3eccbfeae9b42d3c +320, 0xd9f1b585fd0bfa60 +321, 0x5c063d1b2705d5dd +322, 0x8e8bec3519941b64 +323, 0x9e94c36cbec2a42 +324, 0x1cd19f5b64ffd3ad +325, 0x9632e3aebfc68e66 +326, 0x98960c2d9da4ae45 +327, 0xb76994b1f2bbfc1f +328, 0xca184a737d3971cc +329, 0x964d31b07183adfb +330, 0xe9e0ff351cd276d4 +331, 0xb5747c860b05bbe4 +332, 0x5549ddc3bd3862e2 +333, 0x495496677b27873b +334, 0x53910baa26e3ea18 +335, 0xaa07a07ad0a688d3 +336, 0xbb43bd1f09ecdb1e +337, 0xe2ebc105699dd84 +338, 0x6e815a2729584035 +339, 0x2caab1713b17948a +340, 0x43d39d209fa41c90 +341, 0xfe3e71089d5d1c3a +342, 0xa778646c32f81177 +343, 0x8d42bfb86e6e92d5 +344, 0x175571f70b4fcfbe +345, 0x2a66a6fe10dc3b5b +346, 0xd9545e85235ca709 +347, 0x5642781c77ced48a +348, 0x24facc40b72ccd09 +349, 0xa800fbacce33f6f8 +350, 0x675f58a0ff19fba +351, 0x35aedf57bb5cde1b +352, 0xe5535a6b63f6d068 +353, 0x84dffd0102aaa85d +354, 0x621faad65467aaa7 +355, 0x596ad85b556b112f +356, 0x837545fff8894c7a +357, 0x3d9a4ae1356bc6a6 +358, 0xcd8b7153205d4ad0 +359, 0x98afdd40f1ed09a6 +360, 0xa38b2dc55a5cf87f +361, 0x484aecce2b6838bc +362, 0x6af05c26bdab18d9 +363, 0xf418b7399dcf2e4b +364, 0x1cfa38789b0d2445 +365, 0xfbed23c34166ee67 +366, 0x38e6820039e4912a +367, 0x1fe94911e963591e +368, 0x1291c79aee29ad70 +369, 0x65eccfc89506f963 +370, 0x7d14de3b2f55b1f6 +371, 0x82eb79c36cd2a739 +372, 0x41ffe3b75ea0def5 +373, 0x9eba9156470a51d9 +374, 0xd17c00b981db37d1 +375, 0xf688769a75601aa7 +376, 0xbcf738e9e03d571e +377, 0x14712e56df8f919b +378, 0xab14e227d156e310 +379, 0xf53d193e993e351e +380, 0x857fae46bd312141 +381, 0xc2dd71e41b639966 +382, 0x74f8b987a3d00ad1 +383, 0x5bce8526dc527981 +384, 0x94910926c172a379 +385, 0x503c45557688a9d5 +386, 0x244d03834e05807f +387, 0x6e014cbab9c7a31f +388, 0xae544c638530facf +389, 0x9b853aaaf9cbc22d +390, 0xfb42ab7024d060ed +391, 0x74cc3fba0dfd7ff2 +392, 0x24ec9e8f62144ad5 +393, 0x72f082954307bbe7 +394, 0x36feda21bbf67577 +395, 0x3222191611b832f1 +396, 0xd0584e81bcac8b0b +397, 0xdce8d793ef75e771 +398, 0x978824c6c2578fc +399, 0x6e8f77503b3c2ee4 +400, 0xc85d2d86fecf5d03 +401, 0x3d35b4a5d4d723c4 +402, 0xd3987dfd4727fff3 +403, 0xd3cde63fb6a31add +404, 0xf6699e86165bdaeb +405, 0x9d60ba158ec364c4 +406, 0x920c3c18b346bfc9 +407, 0x770fd1fdfbc236ca +408, 0x45998cfc5fc12ddd +409, 0xd74a3454e888834b +410, 0xbf2aa68081a4a28f +411, 0xea41b26a6f1da1b3 +412, 0x5560a2d24b9d5903 +413, 0xe3791f652a228d8b +414, 0x365116d3b5a8520c +415, 0xb1b2bd46528f8969 +416, 0xfcfe14943ef16ae7 +417, 0xf4d43425e8a535dc +418, 0xe6cf10a78782a7e0 +419, 0x9c7ac0de46556e3e +420, 0xc667ae0856eed9ef +421, 0x47dbb532e16f9c7e +422, 0xdf4785a5d89ee82e +423, 0xbd014925ce79dbcf +424, 0xea0d663fb58fa5be +425, 0x51af07d5cc3821fb +426, 0x27a1bdcdc4159a9d +427, 0x520c986c59b1e140 +428, 0x50b73fd9bacd5b39 +429, 0xae5240641f51e4f3 +430, 0x71faecc164ed9681 +431, 0xda95aa35529a7ee +432, 0xe25ba29b853c1c6d +433, 0x9871a925cda53735 +434, 0xde481ad8540e114d +435, 0xa2997f540e8abca0 +436, 0xc9683c5035e28185 +437, 0x1082471b57182bac +438, 0xbd3ecf0f0b788988 +439, 0xf479760776fbb342 +440, 0x3730929200d91f44 +441, 0xc1762d79ae72809c +442, 0xfaa0a4c7b1686cb3 +443, 0xd581e6d55afdafcd +444, 0x6cf57bdfba2dcf6d +445, 0xdef79d9fe6a5bcef +446, 0x13ed376e18132bd3 +447, 0xbe67efd72defa2a +448, 0x5acc176c468966ea +449, 0x8b35b626af139187 +450, 0x446de3fac0d973ac +451, 0xe1d49e06dc890317 +452, 0x817bc3fd21fc09b7 +453, 0xb71c3958a13d5579 +454, 0x8746e010f73d7148 +455, 0x1b61c06009922e83 +456, 0xba17e62e6b092316 +457, 0x1375fa23c4db8290 +458, 0x3f071230f51245a6 +459, 0x51c99a086a61cd13 +460, 0x5f0f2ae78589e1fd +461, 0x604834e114bbbc27 +462, 0x5eb2a7a34814e9a9 +463, 0x77a6907f386bf11e +464, 0x99525de2bd407eeb +465, 0xb818348c57b3b98f +466, 0x25f5f9e702fbe78d +467, 0x8f66669e6f884473 +468, 0x1e47d46e2af4f919 +469, 0xf6a19df846476833 +470, 0xff00c67bcd06621f +471, 0xe3dfe069795d72d8 +472, 0x8affc88b2fea4d73 +473, 0x66df747e5f827168 +474, 0xf368ec338d898a0e +475, 0x9e1f1a739c5984a2 +476, 0x46a1c90e1ca32cbc +477, 0xc261bc305ed8d762 +478, 0x754d7949f7da9e72 +479, 0x4c8fbbb14ef47b17 +480, 0xccbdc67a3848d80d +481, 0x3c25e6f58bae751d +482, 0x7078b163b936d9b6 +483, 0x440e27463c134ecf +484, 0x6c83ee39f324db0f +485, 0x27cf901b22aea535 +486, 0x57262dec79a3f366 +487, 0x91db09f1dbb524fb +488, 0xd7436eefba865df2 +489, 0x16c86b0a275a3f43 +490, 0x689493e6681deaa9 +491, 0x7e1dc536c1a9ac42 +492, 0x1145beac3ac7f5cc +493, 0x3d05e211a104b2b0 +494, 0x4f9e77ced3c52f44 +495, 0x53de1369354add72 +496, 0x1fb60f835f47cdeb +497, 0x6ab36f089e40c106 +498, 0xaabffcb0d3d04c7 +499, 0xaa399686d921bd25 +500, 0x2bf8dd8b6d6fa7f0 +501, 0x1ddbf4e124329613 +502, 0x466a740241466a72 +503, 0x98d7381eb68a761 +504, 0x817691510bc4857a +505, 0x8837622c0171fe33 +506, 0xcba078873179ee16 +507, 0x13adad1ab7b75af4 +508, 0x3bac3f502428840c +509, 0xbeb3cce138de9a91 +510, 0x30ef556e40b5f0b4 +511, 0x19c22abdf3bbb108 +512, 0x977e66ea4ddc7cf +513, 0x9f4a505f223d3bf3 +514, 0x6bc3f42ac79ec87b +515, 0x31e77712158d6c23 +516, 0x6d8de4295a28af0d +517, 0xee1807dbda72adb7 +518, 0xda54140179cd038f +519, 0x715aa5cdac38e062 +520, 0x5a7e55e99a22fa16 +521, 0xf190c36aa8edbe4f +522, 0xccadd93a82c1d044 +523, 0x7070e6d5012c3f15 +524, 0x50a83341a26c1ba5 +525, 0x11bca7cc634142e5 +526, 0x623a0d27867d8b04 +527, 0x75c18acff54fbf6e +528, 0x455ae7d933497a6f +529, 0xf624cf27d030c3d3 +530, 0x7a852716f8758bac +531, 0xe7a497ac1fa2b5b4 +532, 0xf84f097498f57562 +533, 0xc4bb392f87f65943 +534, 0x618e79a5d499fbfb +535, 0xb3c0b61d82b48b8 +536, 0x4750a10815c78ea7 +537, 0x9cf09cca3ddece69 +538, 0x2a69f1c94cc901a2 +539, 0x347a0e446e1ce86d +540, 0xb06f3a5a5ab37bb1 +541, 0x8035bd0713d591db +542, 0x539c9637042c3a1f +543, 0xd7ba4dc6b273cbd7 +544, 0x12f3f99933444f85 +545, 0x4a9517b9783fb9a4 +546, 0x6422b2ea95093bc5 +547, 0x3a5ecff0f996c2a6 +548, 0x31de504efc76a723 +549, 0x7ccb7c5233c21a9f +550, 0xc687d9e6ce4186e8 +551, 0x6e40769d6940376a +552, 0xf51207314f1f7528 +553, 0x67ee3acb190865e3 +554, 0xe08d586270588761 +555, 0xe387fa489af1a75c +556, 0x73414a52d29d8375 +557, 0x671a38191cf2a357 +558, 0xe00fb25b1aa54008 +559, 0x11a0610e22cf549b +560, 0xc90cc865d57c75be +561, 0x90d0863cc15f2b79 +562, 0x8b3e60d32ebcb856 +563, 0xb28cc55af621e04a +564, 0xcf60bd3cb2a5ab1d +565, 0x212cb5d421948f86 +566, 0xee297b96e0a3363f +567, 0x4e9392ff998760d1 +568, 0x61940c8d0105ba3e +569, 0x14ebcbae72a59a16 +570, 0xdf0f39a3d10c02af +571, 0xfc047b2b3c1c549d +572, 0x91718b5b98e3b286 +573, 0x9ea9539b1547d326 +574, 0x7a5a624a89a165e6 +575, 0x145b37dcaa8c4166 +576, 0x63814bbb90e5616c +577, 0xc4bc3ca6c38bb739 +578, 0x853c3a61ddc6626c +579, 0xa7ce8481c433829a +580, 0x8aff426941cc07b +581, 0x2dc3347ca68d8b95 +582, 0xce69f44f349e9917 +583, 0x2fa5cb8aca009b11 +584, 0xf26bb012115d9aca +585, 0xafa01c2f2d27235a +586, 0xabcba21f1b40305e +587, 0xfec20c896c0c1128 +588, 0xc5f7a71ebacadfa0 +589, 0xc8479ad14bab4eef +590, 0xad86ec9a3e7d3dc +591, 0xbbecd65292b915c5 +592, 0xb1f9e28149e67446 +593, 0x708d081c03dad352 +594, 0xaa8a84dbd1de916c +595, 0x9aa3efb29ba9480b +596, 0xd3c63969ff11443e +597, 0x1e9e9ac861315919 +598, 0x4fe227f91e66b41d +599, 0xefc0212d43d253ab +600, 0x98341437727c42d1 +601, 0x5ea85c0fe9008adc +602, 0x7891b15faa808613 +603, 0x32db2d63989aacfd +604, 0xc92f7f28e88fd7bc +605, 0x3513545eb6549475 +606, 0x49abe0082906fbf8 +607, 0xcee1e1a6551e729c +608, 0x38556672b592a28e +609, 0xc3e61409c4ec2d45 +610, 0x96c67ce2995a0fd4 +611, 0x9b9b0cada870293 +612, 0x82d6dd5dada48037 +613, 0xeea4f415299f1706 +614, 0x371107895f152ab3 +615, 0x2f6686159f4396bb +616, 0x61005a2ff3680089 +617, 0x9d2f2cafb595e6b6 +618, 0x4a812a920f011672 +619, 0x317554d3a77385d7 +620, 0x24c01086727eb74b +621, 0xa15ff76d618a3a9e +622, 0x2121bfd983859940 +623, 0x384d11577eea8114 +624, 0xab0f4299f3c44d88 +625, 0x136fd4b07cfa14d9 +626, 0x665fe45cbfaa972a +627, 0x76c5a23398a314e9 +628, 0x5507036357ccda98 +629, 0xd9b8c5ac9dce632b +630, 0x366bc71781da6e27 +631, 0xdd2b2ba1d6be6d15 +632, 0xf33ed0d50ea6f1a6 +633, 0xf05a9b1900174c18 +634, 0x3947e1419e2787cf +635, 0x6c742b1e029637d0 +636, 0x32aba12196a0d2e8 +637, 0x1b94aab2e82e7df +638, 0x68b617db19229d6 +639, 0x6c88a95ac0a33f98 +640, 0xdc9b95fd60c2d23e +641, 0x999e6971d3afc8b3 +642, 0x7071fc6ad8b60129 +643, 0x41a8184ef62485f6 +644, 0xb68e0605c7d5e713 +645, 0x272b961a1d1bbee +646, 0x23f04e76446187b0 +647, 0x999a7a8f6d33f260 +648, 0xdbd6318df4f168d +649, 0x8f5e74c84c40711e +650, 0x8ccc6b04393a19d6 +651, 0xadcd24b782dd8d3d +652, 0x1a966b4f80ef9499 +653, 0xcb6d4f9ff5a280f0 +654, 0x8095ff2b8484018a +655, 0xbfd3389611b8e771 +656, 0x278eb670b7d12d51 +657, 0x31df54ca8d65c20f +658, 0x121c7fb38af6985e +659, 0x84fb94f38fe1d0a +660, 0x15ae8af1a6d48f02 +661, 0x8d51e4a62cba1a28 +662, 0x58e6b6b3ae0f9e42 +663, 0x9365a0a85669cc99 +664, 0xe56e92f65a2106df +665, 0x68fa299c66b428fc +666, 0x55e51bb0b0a832c6 +667, 0x48b565293f9bc494 +668, 0x73d8132b1cbabb57 +669, 0x9178ac3926c36cbc +670, 0xe2f22c7b28ea5e0f +671, 0x6af45322a99afb12 +672, 0x59072fcb486a46f4 +673, 0x166b717b08d3d8e +674, 0xd4e627a2dfacc4ab +675, 0x33dad6f2921dedaa +676, 0x4b13b806834a6704 +677, 0xe5f7971b398ed54d +678, 0x20bfae65e3e6899b +679, 0x881dab45d2b4fc98 +680, 0x6f248126b5b885be +681, 0x7aeb39e986f9deee +682, 0xf819f9574b8c3a03 +683, 0xff3d93ed6bd9781a +684, 0x3a31e2e24a2f6385 +685, 0x7888a88f8944a5e +686, 0x4faee12f5de95537 +687, 0x7f3e4efccdb2ed67 +688, 0x91e0f2fc12593af5 +689, 0xb5be8a4b886a40d3 +690, 0x998e8288ac3a9b1b +691, 0x85c48fc8b1349e7b +692, 0xf03af25222d8fae5 +693, 0x45467e805b242c2e +694, 0xa2350db793dbebdc +695, 0xfebe5b61d2174553 +696, 0xa9a331f02c54ad0b +697, 0xe94e49a0f905aef3 +698, 0xe54b4c812b55e3da +699, 0xdc454114c6bc0278 +700, 0x99c7765ab476baa2 +701, 0xccd9590e47fdff7c +702, 0xfa2bcae7afd6cb71 +703, 0x2c1bf1a433a6f0f7 +704, 0x53882c62ff0aab28 +705, 0x80ac900f844dacc +706, 0x27ba8eb5c4a44d54 +707, 0x78f3dfb072a46004 +708, 0x34e00e6ec629edce +709, 0x5b88d19b552d1fbd +710, 0xe4df375dc79df432 +711, 0x37446312ff79c3b4 +712, 0xb72256900a95fa6d +713, 0x89f3171fbdff0bfc +714, 0xd37885b048687eba +715, 0xbb033213b283b60e +716, 0xcf10b523ee769030 +717, 0xbf8070b6cfd7bafb +718, 0xb7194da81fd1763b +719, 0xbfc303de88e68d24 +720, 0xb949c7a5aea8a072 +721, 0x844216e7bae90455 +722, 0xf1e7f20840049a33 +723, 0x96e3263ad0cae794 +724, 0x10772d51f6e9ba49 +725, 0xcea24fccae9d23b3 +726, 0xefd378add9dde040 +727, 0xba0c7c5275805976 +728, 0x2e2a04608f64fa8c +729, 0xafb42ec43aa0fa7 +730, 0x30444b84241ac465 +731, 0x19ef384bac4493ab +732, 0xfd1ac615d3ba5ab9 +733, 0x6cc781ba38643aff +734, 0x30ff27ebed875cfd +735, 0xee1a261aca97ae62 +736, 0xc5a92715202bc940 +737, 0x9e6ec76f93c657ff +738, 0x9b9fd55f55191ca5 +739, 0x654b13af008d8f03 +740, 0x1b7f030d9bd0719f +741, 0x6d622e277550cb7f +742, 0x3f8ee6b8830d0538 +743, 0x475462bcd0de190f +744, 0x21380e8a513bdbcd +745, 0x629bf3771b1bd7a4 +746, 0x3b5fd0b62c353709 +747, 0xf95634006ec3867e +748, 0x1be8bb584a6653c2 +749, 0x2e2d3cfa85320ce8 +750, 0x5b904b692252d11d +751, 0x4bfd76631d527990 +752, 0xc019571ca2bec4a0 +753, 0xf2eb730cea4cd751 +754, 0xd4571d709530191a +755, 0x3b5bd947061f5a7d +756, 0x56e2322cd2d1d1c0 +757, 0xa8830a5f62019f83 +758, 0x901d130c1b873cf3 +759, 0xb5dd29b363c61299 +760, 0xbb710bec3a17b26d +761, 0xc0c464daca0f2328 +762, 0x4dc8055df02650f5 +763, 0x3d3cd9bbe8b957af +764, 0xdb79612c2635b828 +765, 0xe25b3a8ad8fa3040 +766, 0xd5875c563cbf236b +767, 0x46861c1c3849c9bc +768, 0xf84bf1a2814dff43 +769, 0x6d8103902e0ad5e6 +770, 0x99f51c9be8af79e5 +771, 0xb0bfa8540ff94a96 +772, 0xaf45109a4e06f7d0 +773, 0x281df3e55aea9bfc +774, 0x6a1155ca8aa40e60 +775, 0x754d32c5de1f5da +776, 0xce1eafb1c6ca916f +777, 0xc4f2185fa8577bd1 +778, 0x4a188e9bdb5501d9 +779, 0xbb14107e99bd5550 +780, 0xf0381d8425ec2962 +781, 0x213dbfffc16ec4f6 +782, 0x7a999c5a28ea65bc +783, 0x23758c2aba7709ff +784, 0xea7e4bb205e93b44 +785, 0x9c5a31e53911c658 +786, 0x7f04d0bbdc689ddc +787, 0xe3ed89ab8d78dcb3 +788, 0x73c38bfb43986210 +789, 0x740c7d787eb8e158 +790, 0x5284fafdfb3fb9ec +791, 0x2e91a58ac1fb1409 +792, 0xb94a600bf0a09af3 +793, 0x533ea4dbe07d81dd +794, 0x48c3f1a736b3c5fd +795, 0x56ae3499fa8720ce +796, 0x526f2def663ca818 +797, 0x2f085759c65665c4 +798, 0xf715f042c69e0db4 +799, 0x110889c399231e60 +800, 0x64584a244866f3a0 +801, 0xf02ec101a39405d3 +802, 0xe73cd5e9a7f17283 +803, 0xfea64869e7028234 +804, 0x97559974ad877891 +805, 0xc8695aba1dc9f2e5 +806, 0x7b62b76ffc2264ec +807, 0xf5e1df172ec5ccd +808, 0xafaeb68765e443bd +809, 0xd3870eb2e8337623 +810, 0x4f944d684138fb39 +811, 0x6977c575038916ad +812, 0x8ada1a225df95a56 +813, 0xe4044c6c58d15e54 +814, 0x4e5121366681cf2 +815, 0xcf8640b079357b0d +816, 0xcd5b157d44106fa3 +817, 0x9d7a5481279e25a1 +818, 0xe10e9db41fb4b34f +819, 0x1052607be1eadff9 +820, 0x3403d67232fe2265 +821, 0xac9358f498c34afc +822, 0x820172da0dc39c9 +823, 0xe186e91a3b826b6a +824, 0x1a838e2a40284445 +825, 0x1870b617ebd7bce6 +826, 0xcb7cba4424be1ed7 +827, 0x6a2e56e40fdf9041 +828, 0xace93bbe108f97ee +829, 0xfeb9bc74ac41ca08 +830, 0x8cb2d05b0f6a1f51 +831, 0x73792309f3fac0a9 +832, 0x2507343d431308ca +833, 0xd0ea1197be615412 +834, 0xb1870812f1d2fa94 +835, 0x6d067b6935dcd23e +836, 0xaf161014e5492c31 +837, 0xd4be0dce97064be4 +838, 0xf8edfe3fc75c20f1 +839, 0x894751dc442d2d9c +840, 0xb4a95f6a6663456c +841, 0x74e93162e2d805db +842, 0x784bc5f3a7a2f645 +843, 0xd234d7c5b0582ea9 +844, 0x491f28d0ab6cb97c +845, 0xa79419e5cf4336c3 +846, 0x66b00141978c849 +847, 0xa7ddbd64698d563f +848, 0xefc33a4a5d97d4b2 +849, 0x95075514a65aebdc +850, 0x40eca5b3e28cd25e +851, 0x90ec7d00e9c9e35d +852, 0x63e84104d5af417a +853, 0xdaca0ea32df5744 +854, 0x7ed54f2587795881 +855, 0x5a73931760af4ee0 +856, 0x857d1a185a3081ec +857, 0x6eac2aabe67fb463 +858, 0xd1f86155d8bfc55f +859, 0x6d56398f3e7877ef +860, 0x7642f61dfc62bc17 +861, 0x1d76b12843246ffa +862, 0xde7817809b8a31d0 +863, 0xbcca9cd091198f9d +864, 0xf71ca566dddcdfd4 +865, 0xea4386ee8b61d082 +866, 0xe351729d6010bac4 +867, 0xfd685d8a49910dd6 +868, 0xa7a20ea6c686bd3 +869, 0x1cdaf82f4dbd5536 +870, 0xa3da1d1e77dda3e0 +871, 0x4f723b3818ff8b2a +872, 0x1290669eca152469 +873, 0xb54158b52d30651b +874, 0xc06b74f2c7f0fee +875, 0x7d5840bcbf702379 +876, 0x19fa4c1254a82ed +877, 0xcf5ce090ad0b38ea +878, 0xd4edd6ac9437e16d +879, 0xc6ebf25eb623b426 +880, 0xd2b6dbdf00d8fea2 +881, 0x949cf98391cc59e1 +882, 0x380a0c7d0356f7b3 +883, 0x8ffefe32465473bf +884, 0x637b6542d27c861e +885, 0x347d12ffc664ecd9 +886, 0xea66e3a0c75a6b37 +887, 0xc3aff6f34fb537a1 +888, 0x67bdf3579959bf49 +889, 0xa17a348e3a74b723 +890, 0x93c9ef26ddadd569 +891, 0x483909059a5ac0b2 +892, 0x26ec9074b56d5a0d +893, 0x6216000d9a48403a +894, 0x79b43909eab1ec05 +895, 0xe4a8e8d03649e0de +896, 0x1435d666f3ccdc08 +897, 0xb9e22ba902650a0e +898, 0x44dffcccc68b41f8 +899, 0x23e60dcc7a559a17 +900, 0x6fd1735eacd81266 +901, 0xf6bda0745ea20c8e +902, 0x85efcaefe271e07c +903, 0x9be996ee931cef42 +904, 0xe78b41c158611d64 +905, 0xd6201df605839830 +906, 0x702e8e47d2769fd3 +907, 0xb8dcf70e18cf14c +908, 0xac2690bab1bf5c17 +909, 0x92b166b71205d696 +910, 0xb0e73c795fc6df28 +911, 0x4bf2322c8b6b6f0d +912, 0xa842fbe67918cea0 +913, 0xb01a8675d9294e54 +914, 0xfbe3c94f03ca5af2 +915, 0x51a5c089600c441f +916, 0x60f0fd7512d85ded +917, 0xef3113d3bc2cadb0 +918, 0xe1ea128ade300d60 +919, 0xde413b7f8d92d746 +920, 0xfc32c6d43f47c5d8 +921, 0x69d551d8c2b54c68 +922, 0xb9bc68c175777943 +923, 0xb9c79c687f0dae90 +924, 0xd799421ef883c06e +925, 0xbff553ca95a29a3e +926, 0xfc9ffac46bd0aca1 +927, 0x4f6c3a30c80c3e5a +928, 0x8b7245bc6dc4a0a +929, 0xaf4e191a4575ff60 +930, 0x41218c4a76b90f0b +931, 0x986052aa51b8e89b +932, 0x284b464ed5622f9 +933, 0xba6bded912626b40 +934, 0x43cad3ed7443cb5c +935, 0x21641fa95725f328 +936, 0x6d99d6d09d755822 +937, 0x8246dfa2d4838492 +938, 0xd2ee70b9056f4726 +939, 0x87db515a786fbb8b +940, 0x7c63e4c1d7786e7d +941, 0xd1a9d548f10b3e88 +942, 0xa00856475f3b74c9 +943, 0x7f1964ce67148bf4 +944, 0x446650ec71e6018c +945, 0xb1805ca07d1b6345 +946, 0x869c0a1625b7271b +947, 0x79d6da06ce2ecfe2 +948, 0xec7b3cafc5e3c85f +949, 0x1745ce21e39f2c3d +950, 0xd9a0a7af6ee97825 +951, 0x680e0e52a6e11d5c +952, 0xd86b3f344ff7f4cd +953, 0xab56af117c840b9c +954, 0x5c5404c7e333a10e +955, 0x4f1eb462f35d990d +956, 0xf857605a5644458e +957, 0x3bb87cdf09262f86 +958, 0xd57295baf6da64b +959, 0xb5993f48472f2894 +960, 0x7d1a501608c060b2 +961, 0x45fabe2d0e54adf0 +962, 0xbb41c3806afb4efe +963, 0xbfbc506049424c8 +964, 0xb7dd6b67f2203344 +965, 0x389ce52eff883b81 +966, 0xe259c55c0cf6d000 +967, 0x70fb3e3824f7d213 +968, 0x9f36d5599ed55f4b +969, 0xd14cf6f12f83c4f7 +970, 0x570a09d56aaa0b66 +971, 0x8accafd527f4598 +972, 0xa42d64c62175adfd +973, 0xddb9c6a87b6e1558 +974, 0xd80b6c69fa1cde2a +975, 0x44ebaac10082207b +976, 0xf99be8889552fa1a +977, 0x38253cd4b38b5dc5 +978, 0x85356c8b02675791 +979, 0xbf91677b2ecdcf55 +980, 0x2316cb85e93f366e +981, 0x9abf35954db6b053 +982, 0xf49f7425e086b45a +983, 0x8f5b625e074afde2 +984, 0xe0d614559791b080 +985, 0xbf7b866afab2a525 +986, 0xde89d7e1641a6412 +987, 0x1d10687d8ae5b86f +988, 0x1f034caa0e904cbd +989, 0x2086357aec8a7a2c +990, 0x22dc476b80c56e1e +991, 0xbef5a73cc0e3a493 +992, 0xddfa3829b26ed797 +993, 0x8917a87ec3d4dc78 +994, 0xfeabe390628c365e +995, 0x581b0c4f6fb2d642 +996, 0x1ef8c590adbf5b9a +997, 0x4d8e13aac0cce879 +998, 0xfe38f71e5977fad0 +999, 0x1f83a32d4adfd2ed diff --git a/local_packages/numpy/random/tests/data/philox-testset-2.csv b/local_packages/numpy/random/tests/data/philox-testset-2.csv new file mode 100644 index 0000000..69d24c3 --- /dev/null +++ b/local_packages/numpy/random/tests/data/philox-testset-2.csv @@ -0,0 +1,1001 @@ +seed, 0x0 +0, 0x399e5b222b82fa9 +1, 0x41fd08c1f00f3bc5 +2, 0x78b8824162ee4d04 +3, 0x176747919e02739d +4, 0xfaa88f002a8d3596 +5, 0x418eb6f592e6c227 +6, 0xef83020b8344dd45 +7, 0x30a74a1a6eaa064b +8, 0x93d43bf97a490c3 +9, 0xe4ba28b442194cc +10, 0xc829083a168a8656 +11, 0x73f45d50f8e22849 +12, 0xf912db57352824cc +13, 0xf524216927b12ada +14, 0x22b7697473b1dfda +15, 0x311e2a936414b39f +16, 0xb905abfdcc425be6 +17, 0x4b14630d031eac9c +18, 0x1cf0c4ae01222bc8 +19, 0xa6c33efc6e82ef3 +20, 0x43b3576937ba0948 +21, 0x1e483d17cdde108a +22, 0x6722784cac11ac88 +23, 0xee87569a48fc45d7 +24, 0xb821dcbe74d18661 +25, 0xa5d1876ef3da1a81 +26, 0xe4121c2af72a483 +27, 0x2d747e355a52cf43 +28, 0x609059957bd03725 +29, 0xc3327244b49e16c5 +30, 0xb5ae6cb000dde769 +31, 0x774315003209017 +32, 0xa2013397ba8db605 +33, 0x73b228945dbcd957 +34, 0x801af7190375d3c0 +35, 0xae6dca29f24c9c67 +36, 0xd1cc0bcb1ca26249 +37, 0x1defa62a5bd853be +38, 0x67c2f5557fa89462 +39, 0xf1729b58122fab02 +40, 0xb67eb71949ec6c42 +41, 0x5456366ec1f8f7d7 +42, 0x44492b32eb7966f5 +43, 0xa801804159f175f1 +44, 0x5a416f23cac70d84 +45, 0x186f55293302303d +46, 0x7339d5d7b6a43639 +47, 0xfc6df38d6a566121 +48, 0xed2fe018f150b39e +49, 0x508e0b04a781fa1b +50, 0x8bee9d50f32eaf50 +51, 0x9870015d37e63cc +52, 0x93c6b12309c14f2d +53, 0xb571cf798abe93ff +54, 0x85c35a297a88ae6e +55, 0x9b1b79afe497a2ae +56, 0x1ca02e5b95d96b8d +57, 0x5bb695a666c0a94a +58, 0x4e3caf9bbab0b208 +59, 0x44a44be1a89f2dc1 +60, 0x4ff37c33445758d1 +61, 0xd0e02875322f35da +62, 0xfd449a91fb92646b +63, 0xbe0b49096b95db4d +64, 0xffa3647cad13ef5d +65, 0x75c127a61acd10c8 +66, 0xd65f697756f5f98e +67, 0x3ced84be93d94434 +68, 0x4da3095c2fc46d68 +69, 0x67564e2a771ee9ac +70, 0x36944775180644a9 +71, 0xf458db1c177cdb60 +72, 0x5b58406dcd034c8 +73, 0x793301a3fdab2a73 +74, 0x1c2a1a16d6db6128 +75, 0xc2dacd4ddddbe56c +76, 0x2e7d15be2301a111 +77, 0xd4f4a6341b3bcd18 +78, 0x3622996bbe6a9e3b +79, 0xaf29aa9a7d6d47da +80, 0x6d7dbb74a4cd68ae +81, 0xc260a17e0f39f841 +82, 0xdee0170f2af66f0d +83, 0xf84ae780d7b5a06e +84, 0x8326247b73f43c3a +85, 0xd44eef44b4f98b84 +86, 0x3d10aee62ec895e3 +87, 0x4f23fef01bf703b3 +88, 0xf8e50aa57d888df6 +89, 0x7da67411e3bef261 +90, 0x1d00f2769b2f96d7 +91, 0x7ef9a15b7444b84e +92, 0xcfa16436cc2b7e21 +93, 0x29ab8cfac00460ff +94, 0x23613de8608b0e70 +95, 0xb1aa0980625798a8 +96, 0xb9256fd29db7df99 +97, 0xdacf311bf3e7fa18 +98, 0xa013c8f9fada20d8 +99, 0xaf5fd4fe8230fe3e +100, 0xd3d59ca55102bc5c +101, 0x9d08e2aa5242767f +102, 0x40278fe131e83b53 +103, 0x56397d03c7c14c98 +104, 0xe874b77b119359b3 +105, 0x926a1ba4304ab19f +106, 0x1e115d5aa695a91d +107, 0xc6a459df441f2fe3 +108, 0x2ca842bc1b0b3c6a +109, 0x24c804cf8e5eed16 +110, 0x7ca00fc4a4c3ebd3 +111, 0x546af7cecc4a4ba6 +112, 0x8faae1fa18fd6e3 +113, 0x40420b0089641a6a +114, 0x88175a35d9abcb83 +115, 0xf7d746d1b8b1357c +116, 0x7dae771a651be970 +117, 0x2f6485247ee4df84 +118, 0x6883702fab2d8ec5 +119, 0xeb7eea829a67f9a6 +120, 0x60d5880b485562ed +121, 0x7d4ca3d7e41a4e7e +122, 0xbb7fef961ab8de18 +123, 0x3b92452fb810c164 +124, 0x5f4b4755348b338 +125, 0xca45a715a7539806 +126, 0xc33efd9da5399dd +127, 0x593d665a51d4aedd +128, 0x75d6b8636563036b +129, 0x7b57caa55e262082 +130, 0x4ede7427969e0dd5 +131, 0xc3f19b6f78ea00b +132, 0xeea7bab9be2181ea +133, 0x652c45fe9c420c04 +134, 0x14ba9e3d175670ee +135, 0xd2ad156ba6490474 +136, 0x4d65ae41065f614 +137, 0x6ff911c8afa28eb1 +138, 0xedc2b33588f3cb68 +139, 0x437c8bc324666a2f +140, 0x828cee25457a3f0 +141, 0x530c986091f31b9b +142, 0x2f34671e8326ade7 +143, 0x4f686a8f4d77f6da +144, 0xa4c1987083498895 +145, 0xbce5a88b672b0fb1 +146, 0x8476115a9e6a00cc +147, 0x16de18a55dd2c238 +148, 0xdf38cf4c416232bc +149, 0x2cb837924e7559f3 +150, 0xfad4727484e982ed +151, 0x32a55d4b7801e4f +152, 0x8b9ef96804bd10a5 +153, 0xa1fd422c9b5cf2a9 +154, 0xf46ddb122eb7e442 +155, 0x6e3842547afa3b33 +156, 0x863dee1c34afe5c4 +157, 0x6a43a1935b6db171 +158, 0x1060a5c2f8145821 +159, 0xf783ec9ed34c4607 +160, 0x1da4a86bf5f8c0b0 +161, 0x4c7714041ba12af8 +162, 0x580da7010be2f192 +163, 0xad682fe795a7ea7a +164, 0x6687b6cb88a9ed2c +165, 0x3c8d4b175517cd18 +166, 0xe9247c3a524a6b6b +167, 0x337ca9cfaa02658 +168, 0xed95399481c6feec +169, 0x58726a088e606062 +170, 0xfe7588a5b4ee342a +171, 0xee434c7ed146fdee +172, 0xe2ade8b60fdc4ba5 +173, 0xd57e4c155de4eaab +174, 0xdefeae12de1137cb +175, 0xb7a276a241316ac1 +176, 0xeb838b1b1df4ca15 +177, 0x6f78965edea32f6f +178, 0x18bebd264d7a5d53 +179, 0x3641c691d77005ec +180, 0xbe70ed7efea8c24c +181, 0x33047fa8d03ca560 +182, 0x3bed0d2221ff0f87 +183, 0x23083a6ffbcf38a2 +184, 0xc23eb827073d3fa5 +185, 0xc873bb3415e9fb9b +186, 0xa4645179e54147fe +187, 0x2c72fb443f66e207 +188, 0x98084915dd89d8f4 +189, 0x88baa2de12c99037 +190, 0x85c74ab238cb795f +191, 0xe122186469ea3a26 +192, 0x4c3bba99b3249292 +193, 0x85d6845d9a015234 +194, 0x147ddd69c13e6a31 +195, 0x255f4d678c9a570b +196, 0x2d7c0c410bf962b4 +197, 0x58eb7649e0aa16ca +198, 0x9d240bf662fe0783 +199, 0x5f74f6fa32d293cc +200, 0x4928e52f0f79d9b9 +201, 0xe61c2b87146b706d +202, 0xcfcd90d100cf5431 +203, 0xf15ea8138e6aa178 +204, 0x6ab8287024f9a819 +205, 0xed8942593db74e01 +206, 0xefc00e4ec2ae36dd +207, 0xc21429fb9387f334 +208, 0xf9a3389e285a9bce +209, 0xacdee8c43aae49b3 +210, 0xefc382f02ad55c25 +211, 0x1153b50e8d406b72 +212, 0xb00d39ebcc2f89d8 +213, 0xde62f0b9831c8850 +214, 0xc076994662eef6c7 +215, 0x66f08f4752f1e3ef +216, 0x283b90619796249a +217, 0x4e4869bc4227499e +218, 0xb45ad78a49efd7ed +219, 0xffe19aa77abf5f4b +220, 0xfce11a0daf913aef +221, 0x7e4e64450d5cdceb +222, 0xe9621997cfd62762 +223, 0x4d2c9e156868081 +224, 0x4e2d96eb7cc9a08 +225, 0xda74849bba6e3bd3 +226, 0x6f4621da935e7fde +227, 0xb94b914aa0497259 +228, 0xd50d03e8b8db1563 +229, 0x1a45c1ce5dca422e +230, 0xc8d30d33276f843f +231, 0xb57245774e4176b4 +232, 0x8d36342c05abbbb1 +233, 0x3591ad893ecf9e78 +234, 0x62f4717239ee0ac8 +235, 0x9b71148a1a1d4200 +236, 0x65f8e0f56dd94463 +237, 0x453b1fcfd4fac8c2 +238, 0x4c25e48e54a55865 +239, 0xa866baa05112ace2 +240, 0x7741d3c69c6e79c5 +241, 0x7deb375e8f4f7a8a +242, 0xc242087ede42abd8 +243, 0x2fa9d1d488750c4b +244, 0xe8940137a935d3d3 +245, 0x1dab4918ca24b2f2 +246, 0xe2368c782168fe3e +247, 0x6e8b2d1d73695909 +248, 0x70455ebea268b33e +249, 0x656a919202e28da1 +250, 0x5a5a8935647da999 +251, 0x428c6f77e118c13c +252, 0xa87aee2b675bb083 +253, 0x3873a6412b239969 +254, 0x5f72c1e91cb8a2ee +255, 0xa25af80a1beb5679 +256, 0x1af65d27c7b4abc3 +257, 0x133437060670e067 +258, 0xb1990fa39a97d32e +259, 0x724adc89ae10ed17 +260, 0x3f682a3f2363a240 +261, 0x29198f8dbd343499 +262, 0xdfaeeaa42bc51105 +263, 0x5baff3901b9480c2 +264, 0x3f760a67043e77f5 +265, 0x610fa7aa355a43ba +266, 0x394856ac09c4f7a7 +267, 0x1d9229d058aee82e +268, 0x19c674804c41aeec +269, 0x74cf12372012f4aa +270, 0xa5d89b353fa2f6ca +271, 0x697e4f672ac363dd +272, 0xde6f55ba73df5af9 +273, 0x679cf537510bd68f +274, 0x3dc916114ae9ef7e +275, 0xd7e31a66ec2ee7ba +276, 0xc21bebb968728495 +277, 0xc5e0781414e2adfd +278, 0x71147b5412ddd4bd +279, 0x3b864b410625cca9 +280, 0x433d67c0036cdc6 +281, 0x48083afa0ae20b1b +282, 0x2d80beecd64ac4e8 +283, 0x2a753c27c3a3ee3e +284, 0xb2c5e6afd1fe051a +285, 0xea677930cd66c46b +286, 0x4c3960932f92810a +287, 0xf1b367a9e527eaba +288, 0xb7d92a8a9a69a98e +289, 0x9f9ad3210bd6b453 +290, 0x817f2889db2dcbd8 +291, 0x4270a665ac15813c +292, 0x90b85353bd2be4dd +293, 0x10c0460f7b2d68d +294, 0x11cef32b94f947f5 +295, 0x3cf29ed8e7d477e8 +296, 0x793aaa9bd50599ef +297, 0xbac15d1190014aad +298, 0x987944ae80b5cb13 +299, 0x460aa51f8d57c484 +300, 0xc77df0385f97c2d3 +301, 0x92e743b7293a3822 +302, 0xbc3458bcfbcbb8c0 +303, 0xe277bcf3d04b4ed7 +304, 0xa537ae5cf1c9a31c +305, 0x95eb00d30bd8cfb2 +306, 0x6376361c24e4f2dd +307, 0x374477fe87b9ea8e +308, 0x8210f1a9a039902e +309, 0xe7628f7031321f68 +310, 0x8b8e9c0888fc1d3d +311, 0x306be461fdc9e0ed +312, 0x510009372f9b56f5 +313, 0xa6e6fa486b7a027a +314, 0x9d3f002025203b5a +315, 0x7a46e0e81ecbef86 +316, 0x41e280c611d04df0 +317, 0xedcec10418a99e8a +318, 0x5c27b6327e0b9dbd +319, 0xa81ed2035b509f07 +320, 0x3581e855983a4cc4 +321, 0x4744594b25e9809d +322, 0xc737ac7c27fbd0ed +323, 0x1b523a307045433a +324, 0x8b4ce9171076f1d9 +325, 0x2db02d817cd5eec0 +326, 0x24a1f1229af50288 +327, 0x5550c0dcf583ff16 +328, 0x3587baaa122ec422 +329, 0xf9d3dc894229e510 +330, 0xf3100430d5cf8e87 +331, 0xc31af79862f8e2fb +332, 0xd20582063b9f3537 +333, 0xac5e90ac95fcc7ad +334, 0x107c4c704d5109d4 +335, 0xebc8628906dbfd70 +336, 0x215242776da8c531 +337, 0xa98002f1dcf08b51 +338, 0xbc3bdc07f3b09718 +339, 0x238677062495b512 +340, 0x53b4796f2a3c49e8 +341, 0x6424286467e22f0e +342, 0x14d0952a11a71bac +343, 0x2f97098149b82514 +344, 0x3777f2fdc425ad2 +345, 0xa32f2382938876d4 +346, 0xda8a39a021f20ae3 +347, 0x364361ef0a6ac32c +348, 0x4413eede008ff05a +349, 0x8dda8ace851aa327 +350, 0x4303cabbdcecd1ee +351, 0x2e69f06d74aa549f +352, 0x4797079cd4d9275c +353, 0xc7b1890917e98307 +354, 0x34031b0e822a4b4c +355, 0xfc79f76b566303ea +356, 0x77014adbe255a930 +357, 0xab6c43dd162f3be5 +358, 0xa430041f3463f6b9 +359, 0x5c191a32ada3f84a +360, 0xe8674a0781645a31 +361, 0x3a11cb667b8d0916 +362, 0xaedc73e80c39fd8a +363, 0xfde12c1b42328765 +364, 0x97abb7dcccdc1a0b +365, 0x52475c14d2167bc8 +366, 0x540e8811196d5aff +367, 0xa867e4ccdb2b4b77 +368, 0x2be04af61e5bcfb9 +369, 0x81b645102bfc5dfd +370, 0x96a52c9a66c6450f +371, 0x632ec2d136889234 +372, 0x4ed530c0b36a6c25 +373, 0x6f4851225546b75 +374, 0x2c065d6ba46a1144 +375, 0xf8a3613ff416551d +376, 0xb5f0fd60e9c971a9 +377, 0x339011a03bb4be65 +378, 0x9439f72b6995ded6 +379, 0xc1b03f3ef3b2292d +380, 0xad12fd221daab3ae +381, 0xf615b770f2cf996f +382, 0x269d0fdcb764172 +383, 0x67837025e8039256 +384, 0x6402831fc823fafa +385, 0x22854146a4abb964 +386, 0x7b5ad9b5a1bad7a8 +387, 0x67170e7beb6ac935 +388, 0xfc2d1e8e24adfaaa +389, 0x7ded4395345ff40d +390, 0x418981760a80dd07 +391, 0xc03bef38022c1d2 +392, 0x3a11850b26eade29 +393, 0xaa56d02c7175c5f4 +394, 0xd83b7917b9bfbff5 +395, 0x3c1df2f8fa6fced3 +396, 0xf3d6e2999c0bb760 +397, 0xc66d683a59a950e3 +398, 0x8e3972a9d73ffabf +399, 0x97720a0443edffd9 +400, 0xa85f5d2fe198444a +401, 0xfc5f0458e1b0de5e +402, 0xe3973f03df632b87 +403, 0xe151073c84c594b3 +404, 0x68eb4e22e7ff8ecf +405, 0x274f36eaed7cae27 +406, 0x3b87b1eb60896b13 +407, 0xbe0b2f831442d70a +408, 0x2782ed7a48a1b328 +409, 0xb3619d890310f704 +410, 0xb03926b11b55921a +411, 0xdb46fc44aa6a0ce4 +412, 0x4b063e2ef2e9453a +413, 0xe1584f1aeec60fb5 +414, 0x7092bd6a879c5a49 +415, 0xb84e1e7c7d52b0e6 +416, 0x29d09ca48db64dfb +417, 0x8f6c4a402066e905 +418, 0x77390795eabc36b +419, 0xcc2dc2e4141cc69f +420, 0x2727f83beb9e3c7c +421, 0x1b29868619331de0 +422, 0xd38c571e192c246f +423, 0x535327479fe37b6f +424, 0xaff9ce5758617eb3 +425, 0x5658539e9288a4e4 +426, 0x8df91d87126c4c6d +427, 0xe931cf8fdba6e255 +428, 0x815dfdf25fbee9e8 +429, 0x5c61f4c7cba91697 +430, 0xdd5f5512fe2313a1 +431, 0x499dd918a92a53cd +432, 0xa7e969d007c97dfd +433, 0xb8d39c6fc81ac0bb +434, 0x1d646983def5746c +435, 0x44d4b3b17432a60c +436, 0x65664232a14db1e3 +437, 0xda8fae6433e7500b +438, 0xbe51b94ff2a3fe94 +439, 0xe9b1bd9a9098ef9f +440, 0xfe47d54176297ef5 +441, 0xb8ab99bc03bb7135 +442, 0xcfad97f608565b38 +443, 0xf05da71f6760d9c1 +444, 0xef8da40a7c70e7b +445, 0xe0465d58dbd5d138 +446, 0xb54a2d70eb1a938 +447, 0xfdd50c905958f2d8 +448, 0x3c41933c90a57d43 +449, 0x678f6d894c6ad0bb +450, 0x403e8f4582274e8 +451, 0x5cbbe975668df6b0 +452, 0x297e6520a7902f03 +453, 0x8f6dded33cd1efd7 +454, 0x8e903c97be8d783b +455, 0x10bd015577e30f77 +456, 0x3fcd69d1c36eab0c +457, 0xb45989f3ca198d3 +458, 0x507655ce02b491a9 +459, 0xa92cf99bb78602ce +460, 0xebfb82055fbc2f0f +461, 0x3334256279289b7a +462, 0xc19d2a0f740ee0ac +463, 0x8bb070dea3934905 +464, 0xa4ab57d3a8d1b3eb +465, 0xfee1b09bcacf7ff4 +466, 0xccc7fb41ceec41fa +467, 0xd4da49094eb5a74d +468, 0xed5c693770af02ed +469, 0x369dabc9bbfaa8e4 +470, 0x7eab9f360d054199 +471, 0xe36dbebf5ee94076 +472, 0xd30840e499b23d7 +473, 0x8678e6cb545015ff +474, 0x3a47932ca0b336e +475, 0xeb7c742b6e93d6fe +476, 0x1404ea51fe5a62a9 +477, 0xa72cd49db978e288 +478, 0xfd7bada020173dcf +479, 0xc9e74fc7abe50054 +480, 0x93197847bb66808d +481, 0x25fd5f053dce5698 +482, 0xe198a9b18cc21f4 +483, 0x5cc27b1689452d5d +484, 0x8b3657af955a98dc +485, 0xc17f7584f54aa1c0 +486, 0xe821b088246b1427 +487, 0x32b5a9f6b45b6fa0 +488, 0x2aef7c315c2bae0c +489, 0xe1af8129846b705a +490, 0x4123b4c091b34614 +491, 0x6999d61ec341c073 +492, 0x14b9a8fcf86831ea +493, 0xfd4cff6548f46c9f +494, 0x350c3b7e6cc8d7d6 +495, 0x202a5047fecafcd5 +496, 0xa82509fe496bb57d +497, 0x835e4b2608b575fe +498, 0xf3abe3da919f54ec +499, 0x8705a21e2c9b8796 +500, 0xfd02d1427005c314 +501, 0xa38458faa637f49b +502, 0x61622f2360e7622a +503, 0xe89335a773c2963b +504, 0x481264b659b0e0d0 +505, 0x1e82ae94ebf62f15 +506, 0x8ea7812de49209d4 +507, 0xff963d764680584 +508, 0x418a68bef717f4af +509, 0x581f0e7621a8ab91 +510, 0x840337e9a0ec4150 +511, 0x951ef61b344be505 +512, 0xc8b1b899feb61ec2 +513, 0x8b78ca13c56f6ed9 +514, 0x3d2fd793715a946f +515, 0xf1c04fabcd0f4084 +516, 0x92b602614a9a9fcc +517, 0x7991bd7a94a65be7 +518, 0x5dead10b06cad2d7 +519, 0xda7719b33f722f06 +520, 0x9d87a722b7bff71e +521, 0xb038e479071409e9 +522, 0xf4e8bbec48054775 +523, 0x4fec2cd7a28a88ea +524, 0x839e28526aad3e56 +525, 0xd37ec57852a98bf0 +526, 0xdef2cbbe00f3a02d +527, 0x1aecfe01a9e4d801 +528, 0x59018d3c8beaf067 +529, 0x892753e6ac8bf3cd +530, 0xefdd3437023d2d1c +531, 0x447bfbd148c8cb88 +532, 0x282380221bd442b8 +533, 0xfce8658d1347384a +534, 0x60b211a7ec6bfa8 +535, 0xd21729cfcc692974 +536, 0x162087ecd5038a47 +537, 0x2b17000c4bce39d2 +538, 0x3a1f75ff6adcdce0 +539, 0x721a411d312f1a2c +540, 0x9c13b6133f66934d +541, 0xaa975d14978980e5 +542, 0x9403dbd4754203fa +543, 0x588c15762fdd643 +544, 0xdd1290f8d0ada73a +545, 0xd9b77380936103f4 +546, 0xb2e2047a356eb829 +547, 0x7019e5e7f76f7a47 +548, 0x3c29a461f62b001d +549, 0xa07dc6cfab59c116 +550, 0x9b97e278433f8eb +551, 0x6affc714e7236588 +552, 0x36170aeb32911a73 +553, 0x4a665104d364a789 +554, 0x4be01464ec276c9c +555, 0x71bb10271a8b4ecf +556, 0xbf62e1d068bc018 +557, 0xc9ada5db2cbbb413 +558, 0x2bded75e726650e5 +559, 0x33d5a7af2f34385d +560, 0x8179c46661d85657 +561, 0x324ebcfd29267359 +562, 0xac4c9311dc9f9110 +563, 0xc14bb6a52f9f9c0 +564, 0xc430abe15e7fb9db +565, 0xf1cce5c14df91c38 +566, 0x651e3efa2c0750d3 +567, 0x38a33604a8be5c75 +568, 0x7aaf77fe7ff56a49 +569, 0xc0d1cc56bbf27706 +570, 0x887aa47324e156c6 +571, 0x12547c004b085e8d +572, 0xd86a8d6fbbbfd011 +573, 0x57c860188c92d7b4 +574, 0xcd5d3843d361b8ca +575, 0x8f586ef05a9cb3ef +576, 0x174456e1ba6267d5 +577, 0xf5dc302c62fe583c +578, 0xa349442fabcdb71 +579, 0xe5123c1a8b6fd08e +580, 0x80681552aa318593 +581, 0xb295396deaef1e31 +582, 0xabb626e0b900e32b +583, 0xf024db8d3f19c15e +584, 0x1d04bb9548e2fb6c +585, 0xd8ed2b2214936c2b +586, 0x618ca1e430a52bc9 +587, 0xccbca44a6088136b +588, 0xd0481855c8b9ccbe +589, 0x3c92a2fade28bdf7 +590, 0x855e9fefc38c0816 +591, 0x1269bbfe55a7b27c +592, 0x1d6c853d83726d43 +593, 0xc8655511cc7fcafc +594, 0x301503eb125a9b0e +595, 0xb3108e4532016b11 +596, 0xbb7ab6245da9cb3d +597, 0x18004c49116d85eb +598, 0x3480849c20f61129 +599, 0xe28f45157463937b +600, 0x8e85e61060f2ce1 +601, 0x1673da4ec589ba5e +602, 0x74b9a6bd1b194712 +603, 0xed39e147fa8b7601 +604, 0x28ce54019102ca77 +605, 0x42e0347f6d7a2f30 +606, 0xb6a908d1c4814731 +607, 0x16c3435e4e9a126d +608, 0x8880190514c1ad54 +609, 0xfffd86229a6f773c +610, 0x4f2420cdb0aa1a93 +611, 0xf8e1acb4120fc1fa +612, 0x63a8c553ab36a2f2 +613, 0x86b88cf3c0a6a190 +614, 0x44d8b2801622c792 +615, 0xf6eae14e93082ff1 +616, 0xd9ed4f5d1b8fac61 +617, 0x1808ce17f4e1f70 +618, 0x446e83ea336f262f +619, 0xc7c802b04c0917b7 +620, 0x626f45fd64968b73 +621, 0x9ffa540edc9b2c5c +622, 0xa96a1e219e486af8 +623, 0x2bb8963884e887a1 +624, 0xba7f68a5d029e3c4 +625, 0xefc45f44392d9ca0 +626, 0x98d77762503c5eab +627, 0xd89bcf62f2da627c +628, 0xa3cab8347f833151 +629, 0xa095b7595907d5c7 +630, 0x3b3041274286181 +631, 0xb518db8919eb71fa +632, 0x187036c14fdc9a36 +633, 0xd06e28301e696f5d +634, 0xdbc71184e0c56492 +635, 0xfe51e9cae6125bfd +636, 0x3b12d17cd014df24 +637, 0x3b95e4e2c986ac1a +638, 0x29c1cce59fb2dea2 +639, 0x58c05793182a49d6 +640, 0xc016477e330d8c00 +641, 0x79ef335133ada5d +642, 0x168e2cad941203f3 +643, 0xf99d0f219d702ef0 +644, 0x655628068f8f135b +645, 0xdcdea51910ae3f92 +646, 0x8e4505039c567892 +647, 0x91a9ec7e947c89ae +648, 0x8717172530f93949 +649, 0x1c80aba9a440171a +650, 0x9c8f83f6ebe7441e +651, 0x6c05e1efea4aa7f9 +652, 0x10af696b777c01b +653, 0x5892e9d9a92fc309 +654, 0xd2ba7da71e709432 +655, 0x46378c7c3269a466 +656, 0x942c63dfe18e772c +657, 0x6245cf02ef2476f +658, 0x6f265b2759ea2aea +659, 0x5aa757f17d17f4a6 +660, 0x1ad6a3c44fa09be6 +661, 0xe861af14e7015fb8 +662, 0x86be2e7db388c77 +663, 0x5c7bba32b519e9a0 +664, 0x3feb314850c4437b +665, 0x97955add60cfb45b +666, 0xfdb536230a540bdc +667, 0xdac9d7bf6e58512e +668, 0x4894c00e474e8120 +669, 0xa1918a37739da366 +670, 0xa8097f2096532807 +671, 0x592afe50e6c5e643 +672, 0xd69050ee6dcb33dc +673, 0xa6956b262dd3c561 +674, 0x1a55c815555e63f7 +675, 0x2ec7fd37516de2bb +676, 0x8ec251d9c70e76ba +677, 0x9b76e4abafd2689 +678, 0x9ce3f5c751a57df1 +679, 0x915c4818bf287bc7 +680, 0x2293a0d1fe07c735 +681, 0x7627dcd5d5a66d3d +682, 0xb5e4f92cc49c7138 +683, 0x6fc51298731d268c +684, 0xd19800aa95441f87 +685, 0x14f70f31162fa115 +686, 0x41a3da3752936f59 +687, 0xbec0652be95652ee +688, 0x7aa4bdb1020a290f +689, 0x4382d0d9bee899ef +690, 0xe6d988ae4277d6ff +691, 0xe618088ccb2a32d1 +692, 0x411669dfaa899e90 +693, 0x234e2bf4ba76d9f +694, 0xe109fe4cb7828687 +695, 0x1fb96b5022b0b360 +696, 0x6b24ad76c061a716 +697, 0x7e1781d4d7ecee15 +698, 0xf20c2dbe82ba38ba +699, 0xeda8e8ae1d943655 +700, 0xa58d196e2a77eaec +701, 0x44564765a5995a0b +702, 0x11902fe871ecae21 +703, 0x2ea60279900e675d +704, 0x38427227c18a9a96 +705, 0xe0af01490a1b1b48 +706, 0x826f91997e057824 +707, 0x1e57308e6e50451 +708, 0xb42d469bbbfdc350 +709, 0xb9734cff1109c49b +710, 0x98967559bb9d364f +711, 0xd6be360041907c12 +712, 0xa86a1279122a1e21 +713, 0x26f99a8527bfc698 +714, 0xfa8b85758f28f5d6 +715, 0xe3057429940806ae +716, 0x4bee2d7e84f93b2b +717, 0x948350a76ea506f4 +718, 0xa139154488045e74 +719, 0x8893579ba5e78085 +720, 0x5f21c215c6a9e397 +721, 0x456134f3a59641dc +722, 0x92c0273f8e97a9c6 +723, 0xd2936c9c3f0c6936 +724, 0xcfa4221e752c4735 +725, 0x28cd5a7457355dca +726, 0xecdfdde23d90999f +727, 0x60631b2d494d032b +728, 0xf67289df269a827f +729, 0xcbe8011ef0f5b7ef +730, 0x20eea973c70a84f5 +731, 0xbe1fd200398557ce +732, 0xd2279ee030191bba +733, 0xf2bd4291dedaf819 +734, 0xfc6d167dbe8c402 +735, 0x39ac298da5d0044b +736, 0xceac026f5f561ce +737, 0x10a5b0bdd8ad60e6 +738, 0xdeb3c626df6d4bcb +739, 0x3c128962e77ff6ca +740, 0xc786262e9c67a0e5 +741, 0x4332855b3febcdc0 +742, 0x7bda9724d1c0e020 +743, 0x6a8c93399bc4df22 +744, 0xa9b20100ac707396 +745, 0xa11a3458502c4eb5 +746, 0xb185461c60478941 +747, 0x13131d56195b7ff6 +748, 0x8d55875ddbd4aa1c +749, 0xc09b67425f469aa5 +750, 0x39e33786cc7594c4 +751, 0x75e96db8e4b08b93 +752, 0xda01cd12a3275d1e +753, 0x2c49e7822344fab5 +754, 0x9bd5f10612514ca7 +755, 0x1c801a5c828e7332 +756, 0x29797d3f4f6c7b4c +757, 0xac992715e21e4e53 +758, 0xe40e89ee887ddb37 +759, 0x15189a2b265a783b +760, 0xa854159a52af5c5 +761, 0xb9d8a5a81c12bead +762, 0x3240cdc9d59e2a58 +763, 0x1d0b872234cf8e23 +764, 0xc01224cf6ce12cff +765, 0x2601e9f3905c8663 +766, 0xd4ecf9890168d6b4 +767, 0xa45db796d89bfdd5 +768, 0x9f389406dad64ab4 +769, 0xa5a851adce43ffe3 +770, 0xd0962c41c26e5aa9 +771, 0x8a671679e48510a4 +772, 0xc196dc0924a6bfeb +773, 0x3ead661043b549cb +774, 0x51af4ca737d405ac +775, 0xf4425b5c62275fb6 +776, 0x71e69d1f818c10f5 +777, 0xacaf4af2d3c70162 +778, 0x2e1f1d4fd7524244 +779, 0xe54fdd8f388890e8 +780, 0xfda0d33e84eb2b83 +781, 0x53965c5e392b81da +782, 0x5c92288267263097 +783, 0xcac1b431c878c66c +784, 0x36c0e1cf417241c6 +785, 0x5cc4d9cd1a36bf2c +786, 0x32e4257bb5d3e470 +787, 0x4aecff904adb44fb +788, 0x4d91a8e0d1d60cac +789, 0xa3b478388385b038 +790, 0x48d955f24eba70be +791, 0x310e4deb07f24f68 +792, 0x8853e73b1f30a5a +793, 0x278aee45c2a65c5 +794, 0xf6932eedbd62fb0b +795, 0xafb95958c82fafad +796, 0x78e807c18616c16c +797, 0xd7abadda7488ed9f +798, 0x2dd72e2572aa2ae6 +799, 0x6ec3791982c2be09 +800, 0x6865bb314fac478f +801, 0xa14dc0ce09000d1a +802, 0xb8081ad134da10f2 +803, 0xc4ac1534aa825ef5 +804, 0xd83aeb48ae2d538f +805, 0x38052027e3074be4 +806, 0xa9833e06ef136582 +807, 0x4f02d790ec9fd78 +808, 0xec2f60bc711c5bdc +809, 0x9253b0d12268e561 +810, 0xa8ac607fdd62c206 +811, 0x895e28ebc920289f +812, 0xe2fd42b154243ac7 +813, 0xc69cac2f776eee19 +814, 0xf4d4ac11db56d0dc +815, 0xa8d37049b9f39833 +816, 0x75abbf8a196c337c +817, 0xb115bb76750d27b8 +818, 0x39426d187839154 +819, 0xd488423e7f38bf83 +820, 0xbb92e0c76ecb6a62 +821, 0x3055a018ce39f4e3 +822, 0xc93fe0e907729bfb +823, 0x65985d17c5863340 +824, 0x2088ae081b2028e1 +825, 0x6e628de873314057 +826, 0x864377cccf573f0e +827, 0xae03f4c9aa63d132 +828, 0xb1db766d6404c66d +829, 0xdce5a22414a374b +830, 0x622155b777819997 +831, 0x69fe96e620371f3c +832, 0xa9c67dbc326d94fc +833, 0x932a84ae5dd43bab +834, 0xe2301a20f6c48c3f +835, 0x795d2e79c6477300 +836, 0xd8e3e631289521e7 +837, 0xae2684979002dfd6 +838, 0xc9c2392377550f89 +839, 0xa1b0c99d508ef7ec +840, 0x593aef3c5a5272ec +841, 0xe32e511a4b7162cd +842, 0xab3b81655f5a2857 +843, 0x1b535e1a0aaf053e +844, 0x5b33f56c1b6a07e2 +845, 0x782dc8cfcac4ef36 +846, 0xb3d4f256eecfd202 +847, 0xf73a6598f58c4f7e +848, 0xd5722189524870ae +849, 0x707878de6b995fc0 +850, 0xc3eb6ba73e3d7e8a +851, 0xca75c017655b75a7 +852, 0x1b29369ea3541e5f +853, 0x352e98858bdb58a3 +854, 0x1e4412d184b6b27d +855, 0x2d375ba0304b2d17 +856, 0x56c30fce69a5d08e +857, 0x6b8c2b0c06584bda +858, 0xde4dfff228c8c91f +859, 0xb7c9edd574e6287f +860, 0xf6078281c9fca2b2 +861, 0xb9b9a51de02a2f1e +862, 0xa411bef31c0103b0 +863, 0xc5facd8fc5e1d7a3 +864, 0x54e631c05ddf7359 +865, 0x815b42b3fd06c474 +866, 0xc9ac07566fda18ec +867, 0xd84ea62957bd8e15 +868, 0x5575f74b5cfd8803 +869, 0x5779a8d460c2e304 +870, 0xfd6e87e264a85587 +871, 0xa1d674daa320b26d +872, 0x2c3c3ec64b35afc4 +873, 0x393a274ff03e6935 +874, 0x1f40ecbac52c50ea +875, 0xc3de64fa324ffc0c +876, 0x56ae828b7f9deb04 +877, 0xe7c1a77b5c1f2cb3 +878, 0xa4c4aab19ea921cc +879, 0xec164c238825822c +880, 0xa6a3304770c03b03 +881, 0x3a63641d5b1e8123 +882, 0x42677be3a54617ef +883, 0xa2680423e3a200c0 +884, 0x8b17cf75f3f37277 +885, 0xe7ce65a49242be3d +886, 0x7f85934271323e4b +887, 0xcfb0f431f79a4fab +888, 0x392e4041a8505b65 +889, 0xd3e5daf0d8b25ea6 +890, 0x9447eff675d80f53 +891, 0xea27a9d53cfaeea8 +892, 0xe3f2335945a83ba +893, 0x8875a43ce216413b +894, 0xe49941f9eabce33e +895, 0x9357c1296683a5b1 +896, 0xf0f16439e81ee701 +897, 0x3181515295ffd79a +898, 0x9d7150fffd169ed8 +899, 0x2d6a1d281e255a72 +900, 0x81bf1286fb3a92b6 +901, 0x566d3079b499e279 +902, 0xc7939ca8f047341 +903, 0xb1f8050e7c2d59f6 +904, 0x605701045e7be192 +905, 0x51b73360e8e31a1c +906, 0x9f4ad54483ba9fe0 +907, 0xd3085b8fcf69d1c8 +908, 0xc3e7475026dc5f0b +909, 0x5800f8554b157354 +910, 0x37dfdf858cfcd963 +911, 0x3a1fce05ce385072 +912, 0xf495c062645c20c3 +913, 0xdcbeec2c3492c773 +914, 0xc38f427589d1d0b4 +915, 0x681ead60216a8184 +916, 0x4bd569c40cc88c41 +917, 0x49b0d442e130b7a2 +918, 0xee349156b7d1fa3f +919, 0x2bde2d2db055135b +920, 0xc6a460d2fbcb2378 +921, 0xd0f170494ff3dbb +922, 0xb294422492528a23 +923, 0xfc95873c854e7b86 +924, 0x6c9c3ad1797bb19c +925, 0xe0c06f2aab65062d +926, 0x58e32ce0f11e3a81 +927, 0xa745fcd729ff5036 +928, 0x599b249b2fc2cdb2 +929, 0x78f23b5b0dd5b082 +930, 0x6de3e957f549ecfc +931, 0x9d0712fa6d878756 +932, 0x9076e8554e4a413a +933, 0xf3185818c0294de8 +934, 0x5de7cdf4b455b9b6 +935, 0xb15f6908ed703f7d +936, 0x98c654dfedc6818 +937, 0x120502ab0e93ae42 +938, 0x67966a98a58dc120 +939, 0x1caa0fc628989482 +940, 0xd8b2c3cd480a8625 +941, 0x85c70071b3aed671 +942, 0xff385f8473714662 +943, 0xe2868e4bf3773b63 +944, 0x96cf8019b279298e +945, 0x8511cc930bd74800 +946, 0x5312e48fdd55f5ab +947, 0xfcdae564b52df78d +948, 0x9eee48373e652176 +949, 0x953788f6bcbc56b0 +950, 0xd1a3855dbd2f6b37 +951, 0x3ad32acf77f4d1e9 +952, 0x917c7be81b003e30 +953, 0x9ce817da1e2e9dfb +954, 0x2968983db162d44d +955, 0x1e005decef5828ad +956, 0xc38fe59d1aa4f3d5 +957, 0xf357f1710dc02f1d +958, 0x2613912a4c83ec67 +959, 0x832a11470b9a17cb +960, 0x5e85508a611f0dad +961, 0x2781131677f59d56 +962, 0xa82358d7d4b0237f +963, 0xfbf8b3cc030c3af6 +964, 0x68b2f68ac8a55adb +965, 0x3b6fcf353add0ada +966, 0xd1956049bcd15bd5 +967, 0x95b76f31c7f98b6d +968, 0x814b6690df971a84 +969, 0xdcf7959cddd819e4 +970, 0xcf8c72c5d804fc88 +971, 0x56883769c8945a22 +972, 0x1f034652f658cf46 +973, 0x41df1324cda235a1 +974, 0xeccd32524504a054 +975, 0x974e0910a04ec02c +976, 0x72104507b821f6db +977, 0x791f8d089f273044 +978, 0xe0f79a4f567f73c3 +979, 0x52fe5bea3997f024 +980, 0x5f8b9b446494f78 +981, 0xfd9f511947059190 +982, 0x3aea9dac6063bce3 +983, 0xbfdae4dfc24aee60 +984, 0xa82cdbbf0a280318 +985, 0xf460aae18d70aa9d +986, 0x997367cb204a57c4 +987, 0x616e21ab95ba05ef +988, 0x9bfc93bec116769f +989, 0x2b2ee27c37a3fa5b +990, 0xb25c6ed54006ee38 +991, 0xab04d4a5c69e69a5 +992, 0x6d2f6b45f2d8438f +993, 0x4ad2f32afc82f092 +994, 0x513d718908f709c0 +995, 0x5272aadc4fffca51 +996, 0xeb3f87e66156ef5d +997, 0xf8a3d5a46a86ba85 +998, 0xdb4548a86f27abfd +999, 0x57c05f47ff62380d diff --git a/local_packages/numpy/random/tests/data/sfc64-testset-1.csv b/local_packages/numpy/random/tests/data/sfc64-testset-1.csv new file mode 100644 index 0000000..4fffe69 --- /dev/null +++ b/local_packages/numpy/random/tests/data/sfc64-testset-1.csv @@ -0,0 +1,1001 @@ +seed, 0xdeadbeaf +0, 0xa475f55fbb6bc638 +1, 0xb2d594b6c29d971c +2, 0x275bc4ece4484fb1 +3, 0x569be72d9b3492fb +4, 0x89a5bb9b206a670c +5, 0xd951bfa06afdc3f9 +6, 0x7ee2e1029d52a265 +7, 0x12ef1d4de0cb4d4c +8, 0x41658ba8f0ef0280 +9, 0x5b650c82e4fe09c5 +10, 0x638a9f3e30ec4e94 +11, 0x147487fb2ba9233e +12, 0x89ef035603d2d1fb +13, 0xe66ca57a190e6cbe +14, 0x330f673740dd61fc +15, 0xc71d3dce2f8bb34e +16, 0x3c07c39ff150b185 +17, 0x5df952b6cae8f099 +18, 0x9f09f2b1f0ceac80 +19, 0x19598eee2d0c4c67 +20, 0x64e06483702e0ebd +21, 0xda04d1fdb545f7fa +22, 0xf2cf53b61a0c4f9b +23, 0xf0bb724ce196f66e +24, 0x71cefde55d9cf0f +25, 0x6323f62824a20048 +26, 0x1e93604680f14b4e +27, 0xd9d8fad1d4654025 +28, 0xf4ee25af2e76ca08 +29, 0x6af3325896befa98 +30, 0xad9e43abf5e04053 +31, 0xbf930e318ce09de3 +32, 0x61f9583b4f9ffe76 +33, 0x9b69d0b3d5ec8958 +34, 0xa608f250f9b2ca41 +35, 0x6fdba7073dc2bb5d +36, 0xa9d57601efea6d26 +37, 0xc24a88a994954105 +38, 0xc728b1f78d88fe5b +39, 0x88da88c2b083b3b2 +40, 0xa9e27f7303c76cfd +41, 0xc4c24608c29176eb +42, 0x5420b58466b972fd +43, 0xd2018a661b6756c8 +44, 0x7caed83d9573fc7 +45, 0x562a3d81b849a06a +46, 0x16588af120c21f2c +47, 0x658109a7e0eb4837 +48, 0x877aabb14d3822e1 +49, 0x95704c342c3745fe +50, 0xeeb8a0dc81603616 +51, 0x431bf94889290419 +52, 0xe4a9410ab92a5863 +53, 0xbc6be64ea60f12ba +54, 0x328a2da920015063 +55, 0x40f6b3bf8271ae07 +56, 0x4068ff00a0e854f8 +57, 0x1b287572ca13fa78 +58, 0xa11624a600490b99 +59, 0x4a04ef29eb7150fa +60, 0xcc9469ab5ffb739 +61, 0x99a6a9f8d95e782 +62, 0x8e90356573e7a070 +63, 0xa740b8fb415c81c4 +64, 0x47eccef67447f3da +65, 0x2c720afe3a62a49b +66, 0xe2a747f0a43eacf4 +67, 0xba063a87ab165576 +68, 0xbc1c78ed27feb5a3 +69, 0x285a19fa3974f9d +70, 0x489c61e704f5f0e3 +71, 0xf5ab04f6b03f238b +72, 0x7e25f88138a110dd +73, 0xc3d1cef3d7c1f1d1 +74, 0xc3de6ec64d0d8e00 +75, 0x73682a15b6cc5088 +76, 0x6fecbeb319163dc5 +77, 0x7e100d5defe570a1 +78, 0xad2af9af076dce57 +79, 0x3c65100e23cd3a9a +80, 0x4b442cc6cfe521bb +81, 0xe89dc50f8ab1ef75 +82, 0x8b3c6fdc2496566 +83, 0xdfc50042bc2c308c +84, 0xe39c5f158b33d2b2 +85, 0x92f6adefdfeb0ac +86, 0xdf5808a949c85b3e +87, 0x437384021c9dace9 +88, 0xa7b5ed0d3d67d8f +89, 0xe1408f8b21da3c34 +90, 0xa1bba125c1e80522 +91, 0x7611dc4710385264 +92, 0xb00a46ea84082917 +93, 0x51bf8002ffa87cef +94, 0x9bb81013e9810adc +95, 0xd28f6600013541cd +96, 0xc2ca3b1fa7791c1f +97, 0x47f9ad58f099c82c +98, 0x4d1bb9458469caf9 +99, 0xca0b165b2844257 +100, 0xc3b2e667d075dc66 +101, 0xde22f71136a3dbb1 +102, 0x23b4e3b6f219e4c3 +103, 0x327e0db4c9782f66 +104, 0x9365506a6c7a1807 +105, 0x3e868382dedd3be7 +106, 0xff04fa6534bcaa99 +107, 0x96621a8862995305 +108, 0x81bf39cb5f8e1df7 +109, 0x79b684bb8c37af7a +110, 0xae3bc073c3cde33c +111, 0x7805674112c899ac +112, 0xd95a27995abb20f2 +113, 0x71a503c57b105c40 +114, 0x5ff00d6a73ec8acc +115, 0x12f96391d91e47c2 +116, 0xd55ca097b3bd4947 +117, 0x794d79d20468b04 +118, 0x35d814efb0d7a07d +119, 0xfa9ac9bd0aae76d3 +120, 0xa77b8a3711e175cd +121, 0xe6694fbf421f9489 +122, 0xd8f1756525a1a0aa +123, 0xe38dfa8426277433 +124, 0x16b640c269bbcd44 +125, 0x2a7a5a67ca24cfeb +126, 0x669039c28d5344b4 +127, 0x2a445ee81fd596bb +128, 0x600df94cf25607e0 +129, 0x9358561a7579abff +130, 0xee1d52ea179fc274 +131, 0x21a8b325e89d31be +132, 0x36fc0917486eec0a +133, 0x3d99f40717a6be9f +134, 0x39ac140051ca55ff +135, 0xcef7447c26711575 +136, 0xf22666870eff441d +137, 0x4a53c6134e1c7268 +138, 0xd26de518ad6bdb1b +139, 0x1a736bf75b8b0e55 +140, 0xef1523f4e6bd0219 +141, 0xb287b32fd615ad92 +142, 0x2583d6af5e841dd5 +143, 0x4b9294aae7ca670c +144, 0xf5aa4a84174f3ca9 +145, 0x886300f9e0dc6376 +146, 0x3611401e475ef130 +147, 0x69b56432b367e1ac +148, 0x30c330e9ab36b7c4 +149, 0x1e0e73079a85b8d5 +150, 0x40fdfc7a5bfaecf +151, 0xd7760f3e8e75a085 +152, 0x1cc1891f7f625313 +153, 0xeece1fe6165b4272 +154, 0xe61111b0c166a3c1 +155, 0x2f1201563312f185 +156, 0xfd10e8ecdd2a57cb +157, 0x51cdc8c9dd3a89bf +158, 0xed13cc93938b5496 +159, 0x843816129750526b +160, 0xd09995cd6819ada +161, 0x4601e778d40607df +162, 0xef9df06bd66c2ea0 +163, 0xae0bdecd3db65d69 +164, 0xbb921a3c65a4ae9a +165, 0xd66698ce8e9361be +166, 0xacdc91647b6068f4 +167, 0xe505ef68f2a5c1c0 +168, 0xd6e62fd27c6ab137 +169, 0x6a2ba2c6a4641d86 +170, 0x9c89143715c3b81 +171, 0xe408c4e00362601a +172, 0x986155cbf5d4bd9d +173, 0xb9e6831728c893a7 +174, 0xb985497c3bf88d8c +175, 0xd0d729214b727bec +176, 0x4e557f75fece38a +177, 0x6572067fdfd623ca +178, 0x178d49bb4d5cd794 +179, 0xe6baf59f60445d82 +180, 0x5607d53518e3a8d2 +181, 0xba7931adb6ebbd61 +182, 0xe853576172611329 +183, 0xe945daff96000c44 +184, 0x565b9ba3d952a176 +185, 0xcdb54d4f88c584c8 +186, 0x482a7499bee9b5e5 +187, 0x76560dd0affe825b +188, 0x2a56221faa5ca22c +189, 0x7729be5b361f5a25 +190, 0xd6f2195795764876 +191, 0x59ef7f8f423f18c5 +192, 0x7ebefed6d02adde1 +193, 0xcfec7265329c73e5 +194, 0x4fd8606a5e59881c +195, 0x95860982ae370b73 +196, 0xdecfa33b1f902acc +197, 0xf9b8a57400b7c0a6 +198, 0xd20b822672ec857b +199, 0x4eb81084096c7364 +200, 0xe535c29a44d9b6ad +201, 0xdef8b48ebacb2e29 +202, 0x1063bc2b8ba0e915 +203, 0xe4e837fb53d76d02 +204, 0x4df935db53579fb8 +205, 0xa30a0c8053869a89 +206, 0xe891ee58a388a7b5 +207, 0x17931a0c64b8a985 +208, 0xaf2d350b494ce1b3 +209, 0x2ab9345ffbcfed82 +210, 0x7de3fe628a2592f0 +211, 0x85cf54fab8b7e79d +212, 0x42d221520edab71b +213, 0x17b695b3af36c233 +214, 0xa4ffe50fe53eb485 +215, 0x1102d242db800e4d +216, 0xc8dc01f0233b3b6 +217, 0x984a030321053d36 +218, 0x27fa8dc7b7112c0e +219, 0xba634dd8294e177f +220, 0xe67ce34b36332eb +221, 0x8f1351e1894fb41a +222, 0xb522a3048761fd30 +223, 0xc350ad9bc6729edc +224, 0xe0ed105bd3c805e1 +225, 0xa14043d2b0825aa7 +226, 0xee7779ce7fc11fdf +227, 0xc0fa8ba23a60ab25 +228, 0xb596d1ce259afbad +229, 0xaa9b8445537fdf62 +230, 0x770ab2c700762e13 +231, 0xe812f1183e40cc1 +232, 0x44bc898e57aefbbd +233, 0xdd8a871df785c996 +234, 0x88836a5e371eb36b +235, 0xb6081c9152623f27 +236, 0x895acbcd6528ca96 +237, 0xfb67e33ddfbed435 +238, 0xaf7af47d323ce26 +239, 0xe354a510c3c39b2d +240, 0x5cacdedda0672ba3 +241, 0xa440d9a2c6c22b09 +242, 0x6395099f48d64304 +243, 0xc11cf04c75f655b5 +244, 0x1c4e054d144ddb30 +245, 0x3e0c2db89d336636 +246, 0x127ecf18a5b0b9a7 +247, 0x3b50551a88ea7a73 +248, 0xbd27003e47f1f684 +249, 0xf32d657782baac9b +250, 0x727f5cabf020bc9 +251, 0x39c1c1c226197dc7 +252, 0x5552c87b35deeb69 +253, 0x64d54067b5ce493f +254, 0x3494b091fe28dda0 +255, 0xdf0278bc85ee2965 +256, 0xdef16fec25efbd66 +257, 0xe2be09f578c4ce28 +258, 0xd27a9271979d3019 +259, 0x427f6fcd71845e3 +260, 0x26b52c5f81ec142b +261, 0x98267efc3986ad46 +262, 0x7bf4165ddb7e4374 +263, 0xd05f7996d7941010 +264, 0x3b3991de97b45f14 +265, 0x9068217fb4f27a30 +266, 0xd8fe295160afc7f3 +267, 0x8a159fab4c3bc06f +268, 0x57855506d19080b6 +269, 0x7636df6b3f2367a4 +270, 0x2844ee3abd1d5ec9 +271, 0xe5788de061f51c16 +272, 0x69e78cc9132a164 +273, 0xacd53cde6d8cd421 +274, 0xb23f3100068e91da +275, 0x4140070a47f53891 +276, 0xe4a422225a96e53a +277, 0xb82a8925a272a2ac +278, 0x7c2f9573590fe3b7 +279, 0xbaf80764db170575 +280, 0x955abffa54358368 +281, 0x355ce7460614a869 +282, 0x3700ede779a4afbf +283, 0x10a6ec01d92d68cd +284, 0x3308f5a0a4c0afef +285, 0x97b892d7601136c9 +286, 0x4955c3b941b8552e +287, 0xca85aa67e941961d +288, 0xb1859ae5db28e9d2 +289, 0x305d072ac1521fbd +290, 0xed52a868996085bb +291, 0x723bfa6a76358852 +292, 0x78d946ecd97c5fb3 +293, 0x39205b30a8e23e79 +294, 0xb927e3d086baadbe +295, 0xa18d6946136e1ff5 +296, 0xdab6f0b51c1eb5ff +297, 0xf0a640bf7a1af60c +298, 0xf0e81db09004d0d4 +299, 0xfe76cebdbe5a4dde +300, 0x2dafe9cc3decc376 +301, 0x4c871fdf1af34205 +302, 0xe79617d0c8fa893b +303, 0xee658aaad3a141f7 +304, 0xfd91aa74863e19f1 +305, 0x841b8f55c103cc22 +306, 0x22766ed65444ad5d +307, 0x56d03d1beca6c17a +308, 0x5fd4c112c92036ae +309, 0x75466ae58a5616dc +310, 0xfbf98b1081e802a9 +311, 0xdc325e957bf6d8f5 +312, 0xb08da7015ebd19b7 +313, 0xf25a9c0944f0c073 +314, 0xf4625bafa0ced718 +315, 0x4349c9e093a9e692 +316, 0x75a9ccd4dd8935cb +317, 0x7e6cf9e539361e91 +318, 0x20fdd22fb6edd475 +319, 0x5973021b57c2311f +320, 0x75392403667edc15 +321, 0xed9b2156ea70d9f1 +322, 0xf40c114db50b64a0 +323, 0xe26bb2c9eef20c62 +324, 0x409c1e3037869f03 +325, 0xcdfd71fdda3b7f91 +326, 0xa0dfae46816777d6 +327, 0xde060a8f61a8deb8 +328, 0x890e082a8b0ca4fc +329, 0xb9f2958eddf2d0db +330, 0xd17c148020d20e30 +331, 0xffdc9cc176fe7201 +332, 0xffb83d925b764c1 +333, 0x817ea639e313da8d +334, 0xa4dd335dd891ca91 +335, 0x1342d25a5e81f488 +336, 0xfa7eb9c3cf466b03 +337, 0xfe0a423d44b185d0 +338, 0x101cfd430ab96049 +339, 0x7b5d3eda9c4504b +340, 0xe20ccc006e0193f1 +341, 0xf54ccddedebc5df0 +342, 0xc0edd142bd58f1db +343, 0x3831f40d378d2430 +344, 0x80132353f0a88289 +345, 0x688f23c419d03ef8 +346, 0x4c6837e697884066 +347, 0x699387bb2e9a3a8f +348, 0x8996f860342448d8 +349, 0xb0f80dff99bfa5cc +350, 0x3e927a7f9ea12c8e +351, 0xd7e498d1e5f9dff3 +352, 0x78ecb97bb3f864cc +353, 0x3c4ffd069a014d38 +354, 0xf8d5073a1e09b4d4 +355, 0x8717e854f9faef23 +356, 0xfbcc5478d8d0ad7 +357, 0xd3cd8b233ca274ff +358, 0x8bd8f11f79beb265 +359, 0xf64498a832d8fd0e +360, 0xb01bba75112131ec +361, 0x55572445a7869781 +362, 0x7b56622f18cb3d7a +363, 0x7f192c9e075bdb83 +364, 0xd9a112f836b83ff3 +365, 0x68673b37269653dc +366, 0xe46a9433fb6a0879 +367, 0x127d756ca4779001 +368, 0xc1378e8b1e8eab94 +369, 0x1006edb0f51d078c +370, 0xc6dd53961232d926 +371, 0x9a4aeef44038256d +372, 0xd357f4fa652d4f5f +373, 0x59f3d2cc3378598 +374, 0xe76e6207a824a7fc +375, 0x5fc5e33712ceffef +376, 0x77d24aeb0ccb1adc +377, 0x5be4b2826805659e +378, 0x257c69d787e64634 +379, 0x58dd52ca6bc727b1 +380, 0x3ab997767235ea33 +381, 0x986a2a7a966fad14 +382, 0xc900f8b27761dcc4 +383, 0x44991bdb13795700 +384, 0xe5c145a4fe733b2 +385, 0x56f041b56bffe0d3 +386, 0x5779c4fef8067996 +387, 0xa0fe8748e829532d +388, 0x840c1277d78d9dd4 +389, 0x37ebcb315432acbc +390, 0xf4bc8738433ba3be +391, 0x8b122993f2e10062 +392, 0xe1fe8481f2681ed5 +393, 0x8e23f1630d9f494a +394, 0xda24661a01b7d0b3 +395, 0x7a02942a179cee36 +396, 0xf1e08a3c09b71ac +397, 0x3dec2cc7ee0bd8fd +398, 0x1f3e480113d805d4 +399, 0xc061b973ad4e3f2c +400, 0x6bea750f17a66836 +401, 0xbc2add72eac84c25 +402, 0xcff058d3f97934ca +403, 0x54ccc30987778ec2 +404, 0x93449ec1e1469558 +405, 0xe2ff369eb0c6836 +406, 0x41c2df2d63bf8e55 +407, 0xf9302629b6c71be2 +408, 0xdd30376b8e5ab29a +409, 0x12db9e04f911d754 +410, 0x8d03d6cd359f1b97 +411, 0xe15956511abf1cee +412, 0x9b68e10e2c2fd940 +413, 0x2e28de6491c1ce53 +414, 0x52b329b72d0c109d +415, 0xc2c0b115f9da2a60 +416, 0x6ca084105271bbff +417, 0x49b92b8676058c1e +418, 0x767fc92a70f7e5a3 +419, 0x87ba4ed4b65a6aa0 +420, 0xf70b052e0a3975e9 +421, 0x3e925c3306db9eec +422, 0x43253f1d96ac9513 +423, 0xe3e04f1a1ea454c4 +424, 0x763e3f4cc81ba0c8 +425, 0x2a2721ac69265705 +426, 0xdf3b0ac6416ea214 +427, 0xa6a6b57450f3e000 +428, 0xc3d3b1ac7dbfe6ac +429, 0xb66e5e6f7d2e4ec0 +430, 0x43c65296f98f0f04 +431, 0xdb0f6e3ff974d842 +432, 0x3d6b48e02ebb203b +433, 0xd74674ebf09d8f27 +434, 0xbe65243c58fc1200 +435, 0x55eb210a68d42625 +436, 0x87badab097dbe883 +437, 0xada3fda85a53824f +438, 0xef2791e8f48cd37a +439, 0x3fe7fceb927a641a +440, 0xd3bffd3ff031ac78 +441, 0xb94efe03da4d18fb +442, 0x162a0ad8da65ea68 +443, 0x300f234ef5b7e4a6 +444, 0xa2a8b4c77024e4fb +445, 0x5950f095ddd7b109 +446, 0xded66dd2b1bb02ba +447, 0x8ec24b7fa509bcb6 +448, 0x9bede53d924bdad6 +449, 0xa9c3f46423be1930 +450, 0x6dfc90597f8de8b4 +451, 0xb7419ebc65b434f0 +452, 0xa6596949238f58b9 +453, 0x966cbade640829b8 +454, 0x58c74877bdcbf65e +455, 0xaa103b8f89b0c453 +456, 0x219f0a86e41179a4 +457, 0x90f534fc06ddc57f +458, 0x8db7cdd644f1affa +459, 0x38f91de0167127ac +460, 0xdcd2a65e4df43daa +461, 0x3e04f34a7e01f834 +462, 0x5b237eea68007768 +463, 0x7ff4d2b015921768 +464, 0xf786b286549d3d51 +465, 0xaefa053fc2c3884c +466, 0x8e6a8ff381515d36 +467, 0x35b94f3d0a1fce3c +468, 0x165266d19e9abb64 +469, 0x1deb5caa5f9d8076 +470, 0x13ab91290c7cfe9d +471, 0x3651ca9856be3e05 +472, 0xe7b705f6e9cccc19 +473, 0xd6e7f79668c127ed +474, 0xa9faf37154896f92 +475, 0x89fbf190603e0ab1 +476, 0xb34d155a86f942d0 +477, 0xb2d4400a78bfdd76 +478, 0x7c0946aca8cfb3f0 +479, 0x7492771591c9d0e8 +480, 0xd084d95c5ca2eb28 +481, 0xb18d12bd3a6023e +482, 0xea217ed7b864d80b +483, 0xe52f69a755dd5c6f +484, 0x127133993d81c4aa +485, 0xe07188fcf1670bfb +486, 0x178fbfe668e4661d +487, 0x1c9ee14bb0cda154 +488, 0x8d043b96b6668f98 +489, 0xbc858986ec96ca2b +490, 0x7660f779d528b6b7 +491, 0xd448c6a1f74ae1d3 +492, 0x178e122cfc2a6862 +493, 0x236f000abaf2d23b +494, 0x171b27f3f0921915 +495, 0x4c3ff07652f50a70 +496, 0x18663e5e7d3a66ca +497, 0xb38c97946c750cc9 +498, 0xc5031aae6f78f909 +499, 0x4d1514e2925e95c1 +500, 0x4c2184a741dabfbb +501, 0xfd410364edf77182 +502, 0xc228157f863ee873 +503, 0x9856fdc735cc09fc +504, 0x660496cd1e41d60e +505, 0x2edf1d7e01954c32 +506, 0xd32e94639bdd98cf +507, 0x8e153f48709a77d +508, 0x89357f332d2d6561 +509, 0x1840d512c97085e6 +510, 0x2f18d035c9e26a85 +511, 0x77b88b1448b26d5b +512, 0xc1ca6ef4cdae0799 +513, 0xcc203f9e4508165f +514, 0xeaf762fbc9e0cbbe +515, 0xc070c687f3c4a290 +516, 0xd49ed321068d5c15 +517, 0x84a55eec17ee64ee +518, 0x4d8ee685298a8871 +519, 0x9ff5f17d7e029793 +520, 0x791d7d0d62e46302 +521, 0xab218b9114e22bc6 +522, 0x4902b7ab3f7119a7 +523, 0x694930f2e29b049e +524, 0x1a3c90650848999f +525, 0x79f1b9d8499c932b +526, 0xfacb6d3d55e3c92f +527, 0x8fd8b4f25a5da9f5 +528, 0xd037dcc3a7e62ae7 +529, 0xfecf57300d8f84f4 +530, 0x32079b1e1dc12d48 +531, 0xe5f8f1e62b288f54 +532, 0x97feba3a9c108894 +533, 0xd279a51e1899a9a0 +534, 0xd68eea8e8e363fa8 +535, 0x7394cf2deeca9386 +536, 0x5f70b0c80f1dbf10 +537, 0x8d646916ed40462 +538, 0xd253bb1c8a12bbb6 +539, 0x38f399a821fbd73e +540, 0x947523a26333ac90 +541, 0xb52e90affbc52a37 +542, 0xcf899cd964654da4 +543, 0xdf66ae9cca8d99e7 +544, 0x6051478e57c21b6a +545, 0xffa7dc975af3c1da +546, 0x195c7bff2d1a8f5 +547, 0x64f12b6575cf984d +548, 0x536034cb842cf9e1 +549, 0x180f247ce5bbfad +550, 0x8ced45081b134867 +551, 0x532bbfdf426710f3 +552, 0x4747933e74c4f54d +553, 0x197a890dc4793401 +554, 0x76c7cc2bd42fae2 +555, 0xdabfd67f69675dd0 +556, 0x85c690a68cdb3197 +557, 0xe482cec89ce8f92 +558, 0x20bc9fb7797011b1 +559, 0x76dc85a2185782ad +560, 0x3df37c164422117a +561, 0x99211f5d231e0ab0 +562, 0xef7fd794a0a91f4 +563, 0x419577151915f5fe +564, 0x3ce14a0a7135dae3 +565, 0x389b57598a075d6a +566, 0x8cc2a9d51b5af9aa +567, 0xe80a9beffbd13f13 +568, 0x65e96b22ea8a54d8 +569, 0x79f38c4164138ede +570, 0xd1955846cba03d81 +571, 0x60359fe58e4f26d6 +572, 0x4ea724f585f8d13e +573, 0x316dfdbadc801a3c +574, 0x20aa29b7c6dd66fe +575, 0x65eaf83a6a008caa +576, 0x407000aff1b9e8cb +577, 0xb4d49bfb2b268c40 +578, 0xd4e6fe8a7a0f14a9 +579, 0xe34afef924e8f58e +580, 0xe377b0c891844824 +581, 0x29c2e20c112d30c8 +582, 0x906aad1fe0c18a95 +583, 0x308385f0efbb6474 +584, 0xf23900481bf70445 +585, 0xfdfe3ade7f937a55 +586, 0xf37aae71c33c4f97 +587, 0x1c81e3775a8bed85 +588, 0x7eb5013882ce35ea +589, 0x37a1c1692495818d +590, 0x3f90ae118622a0ba +591, 0x58e4fe6fea29b037 +592, 0xd10ff1d269808825 +593, 0xbce30edb60c21bba +594, 0x123732329afd6fee +595, 0x429b4059f797d840 +596, 0x421166568a8c4be1 +597, 0x88f895c424c1bd7f +598, 0x2adaf7a7b9f781cb +599, 0xa425644b26cb698 +600, 0x8cc44d2486cc5743 +601, 0xdb9f357a33abf6ba +602, 0x1a57c4ea77a4d70c +603, 0x1dea29be75239e44 +604, 0x463141a137121a06 +605, 0x8fecfbbe0b8a9517 +606, 0x92c83984b3566123 +607, 0x3b1c69180ed28665 +608, 0x14a6073425ea8717 +609, 0x71f4c2b3283238d7 +610, 0xb3d491e3152f19f +611, 0x3a0ba3a11ebac5d2 +612, 0xddb4d1dd4c0f54ac +613, 0xdb8f36fe02414035 +614, 0x1cf5df5031b1902c +615, 0x23a20ed12ef95870 +616, 0xf113e573b2dedcbb +617, 0x308e2395cde0a9fa +618, 0xd377a22581c3a7da +619, 0xe0ced97a947a66fb +620, 0xe44f4de9cd754b00 +621, 0x2344943337d9d1bf +622, 0x4b5ae5e2ea6e749c +623, 0x9b8d2e3ef41d1c01 +624, 0x59a5a53ebbd24c6b +625, 0x4f7611bf9e8a06fb +626, 0xea38c7b61361cd06 +627, 0xf125a2bfdd2c0c7 +628, 0x2df8dcb5926b9ebb +629, 0x233e18720cc56988 +630, 0x974c61379b4aa95e +631, 0xc7fe24c1c868910b +632, 0x818fd1affc82a842 +633, 0xcee92a952a26d38e +634, 0x8962f575ebcbf43 +635, 0x7770687e3678c460 +636, 0xdfb1db4ed1298117 +637, 0xb9db54cb03d434d3 +638, 0x34aebbf2244257ad +639, 0xd836db0cb210c490 +640, 0x935daed7138957cd +641, 0x3cd914b14e7948fd +642, 0xd0472e9ed0a0f7f0 +643, 0xa9df33dca697f75e +644, 0x15e9ea259398721a +645, 0x23eeba0f970abd60 +646, 0x2217fdf8bbe99a12 +647, 0x5ea490a95717b198 +648, 0xf4e2bfc28280b639 +649, 0x9d19916072d6f05c +650, 0x5e0387cab1734c6a +651, 0x93c2c8ac26e5f01e +652, 0xb0d934354d957eb1 +653, 0xee5099a1eef3188c +654, 0x8be0abca8edc1115 +655, 0x989a60845dbf5aa3 +656, 0x181c7ed964eee892 +657, 0x49838ea07481288d +658, 0x17dbc75d66116b2e +659, 0xa4cafb7a87c0117e +660, 0xab2d0ae44cdc2e6e +661, 0xdf802f2457e7da6 +662, 0x4b966c4b9187e124 +663, 0x62de9db6f4811e1a +664, 0x1e20485968bc62 +665, 0xe9ac288265caca94 +666, 0xc5c694d349aa8c1a +667, 0x3d67f2083d9bdf10 +668, 0x9a2468e503085486 +669, 0x9d6acd3dc152d1a3 +670, 0xca951e2aeee8df77 +671, 0x2707371af9cdd7b0 +672, 0x2347ae6a4eb5ecbd +673, 0x16abe5582cb426f +674, 0x523af4ff980bbccb +675, 0xb07a0f043e3694aa +676, 0x14d7c3da81b2de7 +677, 0xf471f1b8ac22305b +678, 0xdb087ffff9e18520 +679, 0x1a352db3574359e8 +680, 0x48d5431502cc7476 +681, 0x7c9b7e7003dfd1bf +682, 0x4f43a48aae987169 +683, 0x9a5d3eb66dedb3e9 +684, 0xa7b331af76a9f817 +685, 0xba440154b118ab2d +686, 0x64d22344ce24c9c6 +687, 0xa22377bd52bd043 +688, 0x9dfa1bb18ca6c5f7 +689, 0xdccf44a92f644c8b +690, 0xf623d0a49fd18145 +691, 0x556d5c37978e28b3 +692, 0xad96e32ce9d2bb8b +693, 0x2e479c120be52798 +694, 0x7501cf871af7b2f7 +695, 0xd02536a5d026a5b8 +696, 0x4b37ff53e76ab5a4 +697, 0xdb3a4039caaeab13 +698, 0x6cbd65e3b700c7be +699, 0x7367abd98761a147 +700, 0xf4f9ba216a35aa77 +701, 0xf88ca25ce921eb86 +702, 0xb211de082ec2cbf2 +703, 0xdd94aa46ec57e12e +704, 0xa967d74ad8210240 +705, 0xdaa1fada8cfa887 +706, 0x85901d081c4488ee +707, 0xcf67f79a699ef06 +708, 0x7f2f1f0de921ee14 +709, 0x28bc61e9d3f2328b +710, 0x3332f2963faf18e5 +711, 0x4167ac71fcf43a6 +712, 0x843c1746b0160b74 +713, 0xd9be80070c578a5e +714, 0xbd7250c9af1473e7 +715, 0x43f78afaa3647899 +716, 0x91c6b5dd715a75a5 +717, 0x29cc66c8a07bfef3 +718, 0x3f5c667311dc22be +719, 0x4f49cd47958260cd +720, 0xbef8be43d920b64e +721, 0x7a892a5f13061d8b +722, 0x9532f40125c819b1 +723, 0x924fca3045f8a564 +724, 0x9b2c6442453b0c20 +725, 0x7e21009085b8e793 +726, 0x9b98c17e17af59d2 +727, 0xba61acb73e3ae89a +728, 0xb9d61a710555c138 +729, 0xc2a425d80978974b +730, 0xa275e13592da7d67 +731, 0xe962103202d9ad0f +732, 0xbdf8367a4d6f33fd +733, 0xe59beb2f8648bdc8 +734, 0xb4c387d8fbc4ac1c +735, 0x5e3f276b63054b75 +736, 0xf27e616aa54d8464 +737, 0x3f271661d1cd7426 +738, 0x43a69dbee7502c78 +739, 0x8066fcea6df059a1 +740, 0x3c10f19409bdc993 +741, 0x6ba6f43fb21f23e0 +742, 0x9e182d70a5bccf09 +743, 0x1520783d2a63a199 +744, 0xba1dcc0c70b9cace +745, 0x1009e1e9b1032d8 +746, 0xf632f6a95fb0315 +747, 0x48e711c7114cbfff +748, 0xef281dcec67debf7 +749, 0x33789894d6abf59b +750, 0x6c8e541fffbe7f9c +751, 0x85417f13b08e0a88 +752, 0x9a581e36d589608f +753, 0x461dca50b1befd35 +754, 0x5a3231680dde6462 +755, 0xcc57acf729780b97 +756, 0x50301efef62e1054 +757, 0x675d042cd4f6bbc9 +758, 0x1652fdd3794384c9 +759, 0x1c93bbeeb763cd4d +760, 0x44b7240c4b105242 +761, 0x4c6af2a1b606ccfb +762, 0x18fc43ece2ec1a40 +763, 0x859a5511aeae8acb +764, 0x2f56826f1996ad2f +765, 0xa8e95ce8bb363bdf +766, 0xf4da396054e50e4b +767, 0x5493865e9895883c +768, 0x768e4c8b332ac0e3 +769, 0x32195d2aa583fca5 +770, 0xf2f353f21266bc15 +771, 0x43cddf1d021307d +772, 0x6031e3aa30300e4a +773, 0x4f1298469ac6088f +774, 0x4b4d450bafac574e +775, 0x23e1cf9c0582a22b +776, 0x2e9036980db49cd0 +777, 0xe4e228b113c411b2 +778, 0x8bddcdb82b51706 +779, 0xd2a7ea8288593629 +780, 0x67fe90e98fdda61 +781, 0x7b63494dba95717b +782, 0x105625904510d782 +783, 0xdf4aa2242454e50a +784, 0x32541d6cd7d6c7e3 +785, 0x5661fb432591cf3b +786, 0xce920a5ed047bce7 +787, 0xed4178a3c96eea8f +788, 0xe378cd996e39863b +789, 0x169e1fdc8e2b05e1 +790, 0xaee1812ef7149a96 +791, 0x648571c7453d12c5 +792, 0xb7b6bc9328573c43 +793, 0xe7fb969078e270d7 +794, 0xdfc2b1b8985f6e6f +795, 0x862b6527ee39a1aa +796, 0x1ee329aea91d7882 +797, 0x20d25324f2fe704 +798, 0xbfcc47401fc3bbfd +799, 0x1515cdc8d48b2904 +800, 0xbd6eefe86284261c +801, 0x9b1f28e3b35f22ee +802, 0x842a29d35e5aecda +803, 0xf2346109ad370765 +804, 0x24d68add5a71afd9 +805, 0x4a691421613d91e2 +806, 0x60e3058b3c244051 +807, 0x79194905cdaa5de8 +808, 0xe0e2df35c01e8987 +809, 0xe29b78beffbb5e4a +810, 0xcdcdbc020218c19e +811, 0x5ae0af8c16feae43 +812, 0x8109292feeaf14fa +813, 0x34113f7508dfa521 +814, 0xc062ac163f56730a +815, 0xf1660e66ec6d4c4c +816, 0x5966c55f60151c80 +817, 0x3865ae8ec934b17 +818, 0x472a7314afb055ec +819, 0x7a24277309a44a44 +820, 0x556e02dd35d38baa +821, 0x9849611a1bc96ec1 +822, 0xd176f5d5a8eb0843 +823, 0x44db12ec60510030 +824, 0x272e3a06a0030078 +825, 0x7c4764dbefc075ea +826, 0x910712f3735c1183 +827, 0xd49a2da74ae7aff6 +828, 0xcf9b3e6e8f776d71 +829, 0x27789fe3ec481a02 +830, 0x86659f82c6b5912b +831, 0xe044b3dbf339158c +832, 0x99d81f6bb62a37b0 +833, 0x5f5830c246fada9a +834, 0xe68abab1eeb432cb +835, 0x49c5c5ace04e104 +836, 0x1ac3871b3fc6771b +837, 0x773b39f32d070652 +838, 0x9c4138c2ae58b1f3 +839, 0xac41c63d7452ac60 +840, 0x9248826b245359e1 +841, 0x99bba1c7a64f1670 +842, 0xe0dc99ff4ebb92f2 +843, 0x113638652740f87c +844, 0xebf51e94da88cfc +845, 0x5441c344b81b2585 +846, 0xe1e69e0bc2de652a +847, 0xe9ab6d64ae42ed1e +848, 0x879af8730e305f31 +849, 0x36b9ad912c7e00d6 +850, 0x83ef5e9fca853886 +851, 0xda54d48bb20ea974 +852, 0x32c6d93aefa92aa2 +853, 0x4e887b2c3391847d +854, 0x50966e815f42b1b8 +855, 0x53411ac087832837 +856, 0x46f64fef79df4f29 +857, 0xb34aae3924cd272c +858, 0xf5ad455869a0adbe +859, 0x8351ded7144edac8 +860, 0xeb558af089677494 +861, 0x36ed71d69293a8d6 +862, 0x659f90bf5431b254 +863, 0x53349102b7519949 +864, 0x3db83e20b1713610 +865, 0x6d63f96090556254 +866, 0x4cc0467e8f45c645 +867, 0xb8840c4bd5cd4091 +868, 0xbd381463cc93d584 +869, 0x203410d878c2066d +870, 0x2ebea06213cf71c8 +871, 0x598e8fb75e3fceb4 +872, 0xdcca41ceba0fce02 +873, 0x61bf69212b56aae5 +874, 0x97eed7f70c9114fa +875, 0xf46f37a8b7a063f9 +876, 0x66c8f4ffe5bd6efa +877, 0xe43fd6efda2d4e32 +878, 0x12d6c799e5ad01de +879, 0x9ac83e7f8b709360 +880, 0xbbb7bb3c1957513d +881, 0x7f87c08d4b3796b0 +882, 0x9a7d1d74b6aa4a5c +883, 0xa4314530ff741b6f +884, 0x99a80c6b6f15fca8 +885, 0xd2fec81d6d5fc3ce +886, 0x15a98be1cc40cea +887, 0x98693eb7719366f3 +888, 0x36ccdc2a9e9d4de8 +889, 0x3c8208f63d77df25 +890, 0xca2e376e2343df6 +891, 0xcc9b17cbb54420c6 +892, 0x8724c44a64d7dcb8 +893, 0x9d00c6949ff33869 +894, 0xf4f8e584d2699372 +895, 0x88f4748cdd5a2d53 +896, 0xe215072a1205bc6d +897, 0x190934fe6d740442 +898, 0x7fac5c0ab2af106d +899, 0x1b86633a0bd84fa1 +900, 0x1293e54318492dfb +901, 0x433324fd390f34b9 +902, 0x4c5eb2c67a44643b +903, 0x59a6e281c388b0dd +904, 0xe78e03f9c44623b7 +905, 0x91307a93c768fc3d +906, 0xde8867b004d8e3ff +907, 0xdf52c3f57b7c5862 +908, 0x993f3e1d10358a92 +909, 0x9ccb10bc3e18662d +910, 0x45093ce48a114c73 +911, 0xd59d05979d26330a +912, 0x417c0e03300119a9 +913, 0x1c336500f90cde81 +914, 0x1c8ccd29ead9b85b +915, 0xb76baf3e55d4d950 +916, 0x133ad6196c75fd7e +917, 0x34200b0cde7ed560 +918, 0x9c7c3dacb213c8d9 +919, 0xd97563c4fd9bf1b6 +920, 0x5d910e871835b6cb +921, 0x7d46c4733a16bdf9 +922, 0xe41d73194ddc87b2 +923, 0x7d3d8a0855a465a9 +924, 0x70c2a8b5d3f90c0f +925, 0x9e7565ca5dccfe12 +926, 0x2c0acb4577aa51b1 +927, 0x3d2cd211145b79c7 +928, 0x15a7b17aa6da7732 +929, 0xab44a3730c27d780 +930, 0xf008bd6c802bde3a +931, 0x82ed86ddf3619f77 +932, 0xaabe982ab15c49f9 +933, 0x9bcad8fa6d8e58a4 +934, 0x8f39ed8243718aa1 +935, 0xe9489340e03e3cb6 +936, 0xc722314f5eefb8d0 +937, 0x870e8869a436df59 +938, 0x4dae75b8087a8204 +939, 0xe1d790f6ec6e425b +940, 0xafd39ea1b1d0ed09 +941, 0xdf2c99e464ddf08f +942, 0x74936d859ab9644d +943, 0x3871302164250e73 +944, 0x764b68921e911886 +945, 0x2a1d024b26bb9d66 +946, 0x797fba43918e75b4 +947, 0x62ec6d24ccca335b +948, 0xf4bd8b951762b520 +949, 0x9d450dede9119397 +950, 0x5393a26d10f8c124 +951, 0x6b74769392896b57 +952, 0x7f61dbcc0e328581 +953, 0x64e1df3884d0d94 +954, 0xba77dcdf23738c37 +955, 0xf8e288bc0a177475 +956, 0x4a8abfd1702ecb7d +957, 0x53f22886694736a7 +958, 0x8fc982597ced3e3 +959, 0x1bc46090f820fff7 +960, 0x8bd31f965d02229f +961, 0x65cd0cb29996ee53 +962, 0x702e0f4fcf8c2e9f +963, 0x293b77bff307a9a0 +964, 0x125a986b8b305788 +965, 0x416b0eea428ebf3c +966, 0xeac85421ab0e8469 +967, 0x7f5496095019aa68 +968, 0x1a96d7afbc708e0 +969, 0xb91262e6766e01e1 +970, 0xd0a549cc4ccc6954 +971, 0x75a9a073f50c8a0d +972, 0xae275d2c1c6cd23c +973, 0xcf159b5ec5d28fd4 +974, 0x75d0838ce9b92b +975, 0xd4eddcee6dc4677f +976, 0x6a0a8ad5df6b75b8 +977, 0x6f3fd0ef0f13ecc4 +978, 0xb75a5826c1a8f8a8 +979, 0xd47098bbc7943766 +980, 0x3d4ddd62d5f23dd1 +981, 0x760a904e4583841c +982, 0x2afeb5022b4cf1f +983, 0x66d5f653729f0a13 +984, 0x9a6a5ab62980d30f +985, 0xc332f5643bbf8d5b +986, 0x848fb702e4056a90 +987, 0xa057beaf3f9e8c5f +988, 0x6cc603e4560a6c6a +989, 0xec761811a7b23211 +990, 0xb14aa4090a82aaa5 +991, 0xe29d9d028a5b2dbb +992, 0x5564e53738d68f97 +993, 0xfabca36542eaaf3b +994, 0xb9912fcb782020a2 +995, 0xe865e01b349284fd +996, 0x540b5ff11c5f9274 +997, 0x3463f64e1e7451dc +998, 0xe15d3e2f33b735f8 +999, 0xf5433336eadef6e diff --git a/local_packages/numpy/random/tests/data/sfc64-testset-2.csv b/local_packages/numpy/random/tests/data/sfc64-testset-2.csv new file mode 100644 index 0000000..70aebd5 --- /dev/null +++ b/local_packages/numpy/random/tests/data/sfc64-testset-2.csv @@ -0,0 +1,1001 @@ +seed, 0x0 +0, 0x91959e5fb96a6332 +1, 0x3c1dd8a25a7e9f21 +2, 0x657bdffc99798d9e +3, 0x1a04de320b19e022 +4, 0x65b92af0e5f3c61c +5, 0x9c84070ce8f743c0 +6, 0xbb10e573693cdb25 +7, 0xd65ea9e76b37fb6b +8, 0x503efd0e76c8ae66 +9, 0xd711dcd04c26d0f +10, 0x12f53f435814ac8c +11, 0xb392cd402cfc82bd +12, 0x461764550e06c889 +13, 0x716a48b3514e6979 +14, 0xdd0a322213c18ad7 +15, 0x6673a8ca0a05c4d7 +16, 0x2992ef333437f844 +17, 0xc4aaf7e8240b2aad +18, 0x6ab0a1af1f41474f +19, 0xb0bae400c226941d +20, 0xe5f80c2eeeab48c6 +21, 0x3832c6a93a4024bf +22, 0x280bd824fabe8368 +23, 0x66b626228321e5ff +24, 0xe0bdfba5325a307e +25, 0x3a5f65c6ef254e05 +26, 0x99ea12503cb02f94 +27, 0x5d01fd2db77d420b +28, 0x6959bf5f36b2368d +29, 0xd856e30c62b5f5be +30, 0xe33233e1d8140e66 +31, 0xb78be619d415fa8d +32, 0x4f943bb2cc63d3b +33, 0x9b1460b290952d81 +34, 0x19205d794826740e +35, 0x64617bd9d7a6a1ff +36, 0x30442124b55ea76a +37, 0xebbbc3b29d0333fc +38, 0x39235a0fe359751c +39, 0xf9629768891121aa +40, 0x32052f53f366e05a +41, 0x60cc5b412c925bc8 +42, 0xf8b7ecda1c0e5a9 +43, 0x195f036e170a2568 +44, 0xfe06d0381a9ca782 +45, 0x919d89e8b88eebbf +46, 0xa47fb30148cf0d43 +47, 0x5c983e99d5f9fd56 +48, 0xe7492cdb6a1d42cd +49, 0xf9cfe5c865b0cfd8 +50, 0x35b653367bbc3b99 +51, 0xb1d92f6f4d4e440b +52, 0x737e1d5bd87ed9c0 +53, 0x7a880ca1498f8e17 +54, 0x687dae8494f9a3f7 +55, 0x6bae1989f441d5d7 +56, 0x71ad3fa5a9195c2e +57, 0x16b3969779f5d03 +58, 0xd1bce2ac973f15b3 +59, 0xa114b1ee2ce0dcdd +60, 0x270d75c11eb1b8d5 +61, 0xc48ffa087c0a7bc +62, 0xaaf9dc48cda9848d +63, 0x8111cf10ef6e584d +64, 0x6736df6af40ee6f4 +65, 0x1a1a111682fbf98d +66, 0xeb217658e1cb3b5d +67, 0xcaf58a8b79de9dec +68, 0x25d0ffd63c88d7a1 +69, 0x4c498cd871b7f176 +70, 0x4069a6156eb0cf3c +71, 0xdf012f12edcdd867 +72, 0x7734c0ac8edb1689 +73, 0xed6960ac53dbc245 +74, 0x305e20da8868c661 +75, 0x5f0c7a3719956f95 +76, 0x66842bbe3b28895 +77, 0xb608bc9a31eac410 +78, 0xfcb17d5529503abd +79, 0x829ae5cbc29b92ee +80, 0x17f2f0027bc24f3a +81, 0x435926c33d8f44cc +82, 0x3ab899327098dbec +83, 0xaf78573b27f8ead8 +84, 0xa8b334fabcf8dc60 +85, 0xcdf3b366a6a303db +86, 0x8da9379dd62b34c8 +87, 0xb0ba511955f264a7 +88, 0x9d72e21a644f961d +89, 0xfac28382e2e7e710 +90, 0xd457065f048410aa +91, 0x1cae57d952563969 +92, 0x5a160a6223253e03 +93, 0x2c45df736d73c8bd +94, 0x7f651ebc6ad9cec5 +95, 0x77a6be96c7d2e7e7 +96, 0x1721fb1dbfd6546a +97, 0xf73f433ecff3c997 +98, 0xed1e80f680965bfe +99, 0x6705ad67a3003b30 +100, 0xac21134efcadb9f7 +101, 0x4d2ba0a91d456ac +102, 0x59da7b59434eb52b +103, 0x26c1d070fd414b5f +104, 0xed7079ddfce83d9a +105, 0x9277d21f88e0fb7a +106, 0xfae16b9a8d53d282 +107, 0xb08a0e2e405fdf7d +108, 0x2ea20df44229d6ec +109, 0x80e4634cd3612825 +110, 0xbe62e8aeba8f8a1a +111, 0x4981209769c190fb +112, 0xcec96ef14c7e1f65 +113, 0x73fe4457b47e7b53 +114, 0x1d66300677315c31 +115, 0xe26821290498c4cc +116, 0xf6110248fd8fb1c5 +117, 0x30fd7fe32dbd8be3 +118, 0x534ec9b910a2bd72 +119, 0x8f9bfe878bbf7382 +120, 0x4f4eb5295c0c2193 +121, 0xdeb22f03a913be9e +122, 0x40f716f8e2a8886c +123, 0xc65007d0e386cdb1 +124, 0x9bdd26d92b143a14 +125, 0xf644b0b77ea44625 +126, 0x75f5a53f6b01993a +127, 0xfe803e347bf41010 +128, 0x594bff5fa17bc360 +129, 0x3551edfb450373c7 +130, 0x898f9dad433615db +131, 0x923d2406daa26d49 +132, 0x99e07faccbc33426 +133, 0x7389f9ff4470f807 +134, 0xdc2a25957c6df90b +135, 0x33c6d8965ef3053f +136, 0x51a8f07e838f1ab +137, 0x91c5db369380274f +138, 0xc37de65ac56b207e +139, 0xfcc6d2375dde7f14 +140, 0xa4e6418bff505958 +141, 0x4b8b9f78e46953c4 +142, 0x255ab2e0f93cf278 +143, 0xdf650717af3d96ef +144, 0x2caa21cba3aae2b2 +145, 0xce7e46c6f393daa4 +146, 0x1d5b3573f9997ac7 +147, 0x5280c556e850847d +148, 0x32edc31bef920ad7 +149, 0xefaa6b0b08cf2c6 +150, 0x5151c99d97b111c5 +151, 0x35ccf4bf53d17590 +152, 0xa210d7bd8697b385 +153, 0xa9419f95738fbe61 +154, 0xdeccf93a1a4fdc90 +155, 0xd0ea3365b18e7a05 +156, 0x84122df6dcd31b9a +157, 0x33040a2125cea5f5 +158, 0xfe18306a862f6d86 +159, 0xdb97c8392e5c4457 +160, 0xc3e0fa735e80e422 +161, 0x7d106ff36467a0c1 +162, 0xb9825eecc720a76d +163, 0x7fefc6f771647081 +164, 0xf5df3f5b3977bf13 +165, 0x18fb22736d36f1e0 +166, 0xadc4637b4953abfc +167, 0x174e66d3e17974bd +168, 0xf1614c51df4db5db +169, 0x6664ecde5717b293 +170, 0xd5bc5b6839265c26 +171, 0xf6ca9ce1af3f1832 +172, 0xca696789a9d506ea +173, 0x7399c246c8f9d53 +174, 0xadf49049626417e2 +175, 0xbcd84af37d09ab91 +176, 0xbb41c177f3a3fa45 +177, 0x592becc814d55302 +178, 0xa88b4e65f6cfe5f7 +179, 0xa0a55e34ff879426 +180, 0x3c2ea6aa725b42b7 +181, 0x65ac4a407b1f9521 +182, 0xde63d53f7e88b556 +183, 0x18bc76696d015f40 +184, 0xd1363f2cd4c116a8 +185, 0x2fe859be19a48e4a +186, 0x83d6099b1415e656 +187, 0x43f2cbc1a4ee6410 +188, 0xb2eca3d3421c533d +189, 0xc52b98ea3f031f5d +190, 0xfe57eb01da07e9d1 +191, 0xf9377883537a6031 +192, 0x364030c05dac7add +193, 0x6815cb06b35d4404 +194, 0xceae2d4ce31894be +195, 0xc602bcdf6062bf6a +196, 0xc8e4bd8dcc6062e3 +197, 0x9c29e87b92a1a791 +198, 0x41e626b871ca9651 +199, 0x325c3d1fb8efbcd8 +200, 0x7dbbacf8e3419fb3 +201, 0x3602e72516bb7319 +202, 0x537a008ebd94d24b +203, 0xda7714fc9d4d161d +204, 0x1c8c73700e1b621b +205, 0x2749b80937d6c939 +206, 0x76ee6abac5b14d33 +207, 0xf18d1e92cb6a8b5c +208, 0x6ce9579d9291c721 +209, 0x60523c745a40e58 +210, 0x637f837fcc901757 +211, 0x2ff71b19661dc5b3 +212, 0x393ab586326ad16f +213, 0xa0970ea30fe742b7 +214, 0x570222d7f27fe5ae +215, 0x3b5806d43fd38629 +216, 0x129a0ad7420180c5 +217, 0x1c4726355778d52c +218, 0x7c1459cf77656499 +219, 0xfe038a0932132069 +220, 0x4c4cc317a937483a +221, 0xa333d24067e926ba +222, 0x401d9b6ab37f6ef2 +223, 0x87ad0e491ebe4a2a +224, 0xfc02f312e72d121d +225, 0xfde715b3b99767b2 +226, 0xd111c342ba521c92 +227, 0x83b221b10879c617 +228, 0x6a1bf5c01fdf4277 +229, 0x166bfc0c3f5892ee +230, 0x4608d556d7c57856 +231, 0x8d786857c95ece49 +232, 0x2d357445a1aca4ac +233, 0x79620dae28ecd796 +234, 0x90e715dc0f2201c4 +235, 0x173b68b4c9f4b665 +236, 0x4e14d040ebac4eef +237, 0xbd25960b4b892e +238, 0x911a199db6f1989d +239, 0xfe822d7c601fd2e0 +240, 0x9b4c1d58d8223a69 +241, 0x907c1891283843b0 +242, 0xf4868bf54061c4b2 +243, 0x17f8cd1fc24efd85 +244, 0xd44253f9af14c3aa +245, 0x16d0da0cb911d43c +246, 0x3c6a46615828e79a +247, 0x498591c1138e11a5 +248, 0xcc0f26336d0d6141 +249, 0x4d3ebc873212309a +250, 0x16bad7792d5c2c6a +251, 0x474215a80b2bbd11 +252, 0x7159848abd8492fc +253, 0x359341c50973685f +254, 0x27512ee7bf784a4a +255, 0x45228ea080f70447 +256, 0x880cab616500d50e +257, 0x12fae93f9830d56e +258, 0x6744ee64348d9acd +259, 0x484dada28cd2a828 +260, 0x98491d0729e41863 +261, 0x2f15aac43c2863b0 +262, 0x5727a34d77a1da0f +263, 0xa435cebef6a62eed +264, 0xd211697d57b053b0 +265, 0x65aa757b68bd557 +266, 0xe3a1b7a2d8a3e06a +267, 0x2adf64e67252a7a9 +268, 0xadadcb75cadee276 +269, 0x7934bc57ac8d97bf +270, 0xccff0d0f412e0606 +271, 0x101a82aa3e8f3db9 +272, 0xb0f2498094b4575c +273, 0xba2561d9ef26ed8a +274, 0xfbcd1268fc3febe1 +275, 0x9aa10bb19eb152e0 +276, 0xf496217a601a6d72 +277, 0xe4be1e4f2fa91363 +278, 0x473a602bf3dd68eb +279, 0xfe8ed2a48c26f4b5 +280, 0x20e94b1a00159476 +281, 0x93e1cb1c6af86ec7 +282, 0x4fcba3898f7442ba +283, 0x5150c3a3d94891df +284, 0x91cfce6c85b033ea +285, 0x625e8a832a806491 +286, 0x28c97ba72e3ec0b2 +287, 0x8e172de217c71ea1 +288, 0x926b80216c732639 +289, 0x28b19431a649ae3d +290, 0x57c039a6e95a3795 +291, 0xfbc354182fe52718 +292, 0x819dfd7c7d534cef +293, 0xabb4093a619ed44f +294, 0xe785b7ac6f656745 +295, 0xb647b4588b2f942f +296, 0x64cf870a14c72d27 +297, 0x6d4a4a2a0ba9b37e +298, 0x78bfb0427d7ce6b0 +299, 0x8dcc72b8bfc79ac6 +300, 0x1c14d915d5e76c99 +301, 0xaf48ddea6f096d79 +302, 0x51b39b67aa130d8 +303, 0x1aeeb39d4def06de +304, 0xd678092ffedfdd27 +305, 0x8f54787f325111d3 +306, 0xf2ca2e827beaa6bc +307, 0x339d134099e98545 +308, 0x1f6a8a7b33942e43 +309, 0x952c8065dbef669a +310, 0xe066aeb6690147f7 +311, 0xed25aa92cf58ebb6 +312, 0x7601edce215ef521 +313, 0xed1c5b396abd9434 +314, 0x4fd1e407535de9d5 +315, 0xccc8315a0d4d1441 +316, 0x85753e250bb86976 +317, 0xf232e469378761c3 +318, 0x81d691b8e9aef3c6 +319, 0x224a2f9cab0ad0e +320, 0x978f3d3e50007f4e +321, 0xd3713e6a6c0cbe60 +322, 0xcce8f1eadd41f80d +323, 0x34bda028a97d469 +324, 0x90e242fdf0f59183 +325, 0x4d749754fbc5f092 +326, 0x4399f5b7851cc87b +327, 0xcb921a5f25f6c5d7 +328, 0x120bf5d0162101 +329, 0x1304cc2aa352735a +330, 0xf7236c5d0d5d417b +331, 0xc31b320fc1654306 +332, 0xb468c6b23f3fb4e7 +333, 0xb5985b5bfaca4166 +334, 0x898285a1cd2f8375 +335, 0xa13493da372aa7c9 +336, 0x15c80c09c12634e7 +337, 0x9b765c5cc9d438bd +338, 0xee7da816a9201dcb +339, 0x92e269f73b5a248e +340, 0xa8086c5de81400ce +341, 0xe0053901853d42be +342, 0x821df32c012f433e +343, 0x17a6d69ca37387c7 +344, 0x2b10044bfba3501f +345, 0x8dfd262afc2e8515 +346, 0xd68c2c7b60226371 +347, 0xe81ac114e4416774 +348, 0x5896d60061ebc471 +349, 0xa996e3147811dbd1 +350, 0xa819c7b80ecb3661 +351, 0x982ad71b38afbc01 +352, 0xab152b65aa17b7fe +353, 0x4582bc282ef187ef +354, 0xab5a17fe8d9bc669 +355, 0x83664fa9cb0284b7 +356, 0x234c4b0091968f52 +357, 0x8ab5f51805688d37 +358, 0xe9e11186e0c53eda +359, 0x10df37ef1de2eccf +360, 0x780f1b0d52db968f +361, 0x50bd4ff292872cd5 +362, 0x51e681c265f5ad0 +363, 0x842c49660a527566 +364, 0x6e56ee026e9eda87 +365, 0x4cf39e40d8c80393 +366, 0x13e466df371f7e1f +367, 0xf2ce1799f38e028e +368, 0x833c8db7adc6ff0e +369, 0xc6e189abc2ec98f +370, 0xafebb3721283fec5 +371, 0xb49bc1eb5cc17bdc +372, 0xf1d02e818f5e4488 +373, 0xe5e9d5b41a1dd815 +374, 0xce8aca6573b1bfe5 +375, 0x9b0a5d70e268b1d5 +376, 0xf3c0503a8358f4de +377, 0x2681605dd755669d +378, 0xea265ca7601efc70 +379, 0xa93747f0a159439f +380, 0x62a86ede78a23e50 +381, 0xac8a18935c3d063c +382, 0x729c0a298f5059f5 +383, 0xbbf195e5b54399f4 +384, 0x38aa9d551f968900 +385, 0x3b3e700c58778caa +386, 0x68e6e33c4443957a +387, 0x7c56fc13eb269815 +388, 0xaf7daca39711804a +389, 0x50fde6d10f9544b3 +390, 0xf3d37159f6f6c03d +391, 0x82d298f5c1a71685 +392, 0x478661ac54c5002c +393, 0x6053768e1a324ae0 +394, 0xde8fb4a7e56707ea +395, 0xaa2809301faa8cf4 +396, 0x690a8d49fedd0722 +397, 0xe17c481b9c217de9 +398, 0x60d1d8a2b57288e3 +399, 0x149adfaadc6b0886 +400, 0xa3c18b6eb79cd5fa +401, 0x5774e3a091af5f58 +402, 0x2acca57ff30e5712 +403, 0x94454d67367c4b0c +404, 0x581b2985ac2df5ca +405, 0x71618e50744f3e70 +406, 0x270a7f3bd9a94ae6 +407, 0x3ef81af9bb36cd7b +408, 0x8a4a2592875254aa +409, 0x704ac6086fbb414a +410, 0xda774d5d3f57414d +411, 0xe20d3358b918ae9e +412, 0x934a6b9f7b91e247 +413, 0xf91649cde87ec42c +414, 0x248cec5f9b6ced30 +415, 0x56791809fd8d64ba +416, 0xf502b2765c1395f +417, 0x6b04ec973d75aa7f +418, 0xb0339f2794bb26f +419, 0x4c524636efbaea49 +420, 0x6bbf3876e9738748 +421, 0xf686524e754e9e24 +422, 0x8dafa05a42d19cd3 +423, 0xc5f069ab2434008e +424, 0x4fd64cc713cba76 +425, 0xdbf93450c881ed5f +426, 0x492e278ebabb59a2 +427, 0x993fddfde4542642 +428, 0xecde68a72c8d4e52 +429, 0xe0760b3074c311fd +430, 0x68dc0e7e06528707 +431, 0x52b50edf49c0fdc7 +432, 0xb2bd4185c138f412 +433, 0x431496d7e1d86f3 +434, 0xa4e605b037e26c44 +435, 0x58236ae1f0aca2b5 +436, 0x26c72c420fc314d8 +437, 0x20134e982ab99a2b +438, 0x544b59b8b211374b +439, 0x1301c42f3a14d993 +440, 0x52a6ea740f763b0f +441, 0xf209d70c2bebf119 +442, 0xac66a4ebc2aa1be +443, 0x683713ed35878788 +444, 0x2b5578acec06b80c +445, 0x86428efa11c45b36 +446, 0xb49010adb17d291e +447, 0x73b686bd8664b6be +448, 0x6d28ebf57b6884cc +449, 0x9712091230ff58d9 +450, 0xc9c91f74c38b286 +451, 0x776310ac41dc008e +452, 0x2f3739df0bf6a88e +453, 0x5792dc62b94db675 +454, 0x5715910d024b06af +455, 0xeb1dd745458da08 +456, 0xfce7b07ccfa851a7 +457, 0xc305f1e983ac368 +458, 0x485aa9519ac00bb0 +459, 0xa5354f6589fb0ea0 +460, 0x32fee02dfdbf4454 +461, 0x4d1ddc304bbefaaa +462, 0x789a270a1737e57e +463, 0x9f3072f4b1ed8156 +464, 0x4de3c00e89058120 +465, 0xb00a02529e0a86fa +466, 0x539f6f0edd845d9a +467, 0x85e578fe15a8c001 +468, 0xa12c8e1a72cce7d8 +469, 0xc6908abbc2b1828 +470, 0xcf70090774cbb38c +471, 0x3b636a6977b45d4a +472, 0xf0a731b220680b57 +473, 0x18973929f51443a8 +474, 0xe93e1fbe7eadabe +475, 0x8233730f0a6dfa02 +476, 0x66e50b6919b0ab74 +477, 0xb1aba87c97fd08a2 +478, 0xd4dffc1fbc117ad6 +479, 0x6f7fa65724b96e6a +480, 0x4bd5800dee92e0fa +481, 0xe18a959db6256da +482, 0xe53a291bc66df487 +483, 0xb7ec306a08651806 +484, 0x1847a6b80d2821e1 +485, 0xda50391283b14d39 +486, 0xacc4d3cd7cceb97a +487, 0x57f70185165b7bc6 +488, 0x302b6d597c3aaba7 +489, 0xa47f32d037eab51e +490, 0xe1509b4408abc559 +491, 0x4f30a1d7c2934157 +492, 0x2ad03e6c60b650b2 +493, 0x334d9c337b0a9064 +494, 0xc7f442821e7aac12 +495, 0xbcdeb09298694cdd +496, 0xe42402389f8f0fb4 +497, 0xe5de56af539df727 +498, 0x7017f9b2101ee240 +499, 0x1ee5e68d5b10001d +500, 0x436229051836387a +501, 0xcd532d6d6ec38fb7 +502, 0x30a66606fdf38272 +503, 0xfdaa2ab9cf798496 +504, 0x4277b4adec70e7df +505, 0x72cfc30256e0eaef +506, 0x3c3359fd9bd34917 +507, 0xb7aa89598856efb0 +508, 0xf72226f8bf299ef5 +509, 0x258c499275a4356f +510, 0x999a56bfc7f20d76 +511, 0x2b3e7432e20c18b +512, 0x2d1251332f760cb5 +513, 0x7420e0eea62157c5 +514, 0xe85c895aa27cec3d +515, 0x27a0545c7020d57c +516, 0xc68638a65b4fff0d +517, 0xfda473983a4ea747 +518, 0xd19fe65fb4c06062 +519, 0x6b1374e050ee15e4 +520, 0x80065ecd49bc4bef +521, 0x4ee655954bc838de +522, 0xe8fb777504a72299 +523, 0x86b652ea70f4bdde +524, 0xcdc9e0fbde7e4f33 +525, 0x352c0a50cd3ac56 +526, 0x4b8605d368be75dc +527, 0x1ac9ea8129efbc37 +528, 0x470325faa99f39c5 +529, 0x25dd7ef9adccf7a1 +530, 0x5ae2c7a03e965816 +531, 0xf733d2df59dacc7d +532, 0xa05bbf0a8a1a7a70 +533, 0xe8aa3f102846ef5f +534, 0xc9b85ec49ae71789 +535, 0xb904c14ed1cb1936 +536, 0x5ae618230b5f0444 +537, 0x97987fe47b5d7467 +538, 0xabb3aca8865ca761 +539, 0x38bfdf29d4508228 +540, 0x353654f408353330 +541, 0xeb7e92930ae4ef0d +542, 0xec50f1a7ca526b96 +543, 0xd5e2dc08b5697544 +544, 0x24c7fd69d5ec32df +545, 0x6f7e1095568b8620 +546, 0x6ed9c16ca13b3c8 +547, 0xe676ef460002130f +548, 0xa3a01a3992c4b430 +549, 0xe2130406c3b1f202 +550, 0xa8f7263e2aedcd20 +551, 0xc45d71ef2e35f507 +552, 0x37155594021da7ba +553, 0x22dc94f19de73159 +554, 0x7969fc6bffc5443f +555, 0x97def7e44faa6bfe +556, 0x8b940f5e8931d71f +557, 0xd95b1dd3f1a3fdd5 +558, 0x1c83bfdca615701a +559, 0xb7fcb56279ceca6b +560, 0xd84f8950f20dcd0 +561, 0xb03343698de3cbe0 +562, 0xf64565d448d71f71 +563, 0xda52b4676e0ae662 +564, 0xda39c2c05b4ffb91 +565, 0xb35e2560421f6a85 +566, 0x1a7b108d48ac3646 +567, 0xc4e264dc390d79ed +568, 0xa10727dfd9813256 +569, 0x40d23154e720e4f7 +570, 0xd9fa7cd7e313e119 +571, 0xcbf29107859e6013 +572, 0xc357338553d940b7 +573, 0x2641b7ab0bdfcbaa +574, 0xd12f2b6060533ae7 +575, 0xd0435aa626411c56 +576, 0x44af4a488a9cec72 +577, 0xb934232ea8fa5696 +578, 0x760a8b12072b572d +579, 0xfab18f9942cfa9b3 +580, 0x5676834c1fe84d16 +581, 0x9c54e4fddb353236 +582, 0xab49edfc9551f293 +583, 0x567f1fb45a871d +584, 0x32a967c873998834 +585, 0x99240aad380ef8d1 +586, 0x7f66cbd432859a64 +587, 0x4cdc8a4658166822 +588, 0x984e3984a5766492 +589, 0xa3b2d0a3d64d3d94 +590, 0x177f667172f2affc +591, 0xb1a90607a73a303f +592, 0xe600b6c36427f878 +593, 0xf758f9834cb7f466 +594, 0x8ee9fce4a3f36449 +595, 0xcb8f11533e7da347 +596, 0xe7cf647794dabd7c +597, 0xc9d92cfe6110806 +598, 0xea1335fa9145a1ec +599, 0xbc6c29821d094552 +600, 0x37b9d6a858cc8bc3 +601, 0xf24e4c694929893e +602, 0x55d025ce2d7d0004 +603, 0xccdc69acccf4267b +604, 0xc491c04340c222eb +605, 0xba50f75ecec9befb +606, 0x1ec7bd85b8fe3bb9 +607, 0xe4de66498c59ae8a +608, 0x38aa9e912712c889 +609, 0xcee0e43c5cc31566 +610, 0x72b69aa708fc7ed +611, 0xdff70b7f6fa96679 +612, 0xd6d71d82112aadc3 +613, 0x365177892cb78531 +614, 0xa54852b39de4f72c +615, 0x11dd5832bf16dd59 +616, 0x248a0f3369c97097 +617, 0xa14cec0260e26792 +618, 0x3517616ff142bed1 +619, 0x9b693ad39dab7636 +620, 0x739dff825e994434 +621, 0x67711e7356098c9 +622, 0xa81f8515d2fdf458 +623, 0xdac2908113fe568e +624, 0xe99944ebc6e2806a +625, 0x671728ca5b030975 +626, 0xfdad20edb2b4a789 +627, 0xedc6e466bd0369d2 +628, 0x88b5d469821f7e1b +629, 0x2eabf94049a522a5 +630, 0x247794b7a2f5a8e3 +631, 0x278942bdbe02c649 +632, 0xbe5a9a9196ab99c1 +633, 0x75955060866da1b5 +634, 0xdedcfa149273c0b5 +635, 0xdbeb7a57758f3867 +636, 0x7b9053347a2c8d5a +637, 0xa059b3f2eed338a5 +638, 0x59401a46ded3b79f +639, 0x38044ba56a6d19fb +640, 0x72c7221b4e77e779 +641, 0x526df3491a3a34da +642, 0xc3b31184ba16c0c2 +643, 0xd94c7144488624af +644, 0xcf966ee4dc373f91 +645, 0x62049e65dd416266 +646, 0x7c2adccb925bf8f +647, 0xd5fa5c22ed4ef8e1 +648, 0xd00134ebd11f2cd1 +649, 0xfbdf81767bed3634 +650, 0x62e8cc8ff66b6e26 +651, 0x3a72d6bcd4f2dcf7 +652, 0xf1cd45b1b46a86ed +653, 0x1271f98e0938bb9a +654, 0x82e6927e83dc31fa +655, 0x7b9b0e0acb67b92d +656, 0x6df503e397b2e701 +657, 0x93888f6fb561e0c3 +658, 0x393fb6069a40291 +659, 0x967a7d894cc0754d +660, 0x6e298996ad866333 +661, 0x5ff3cf5559d6ab46 +662, 0xd0d70508c40349f5 +663, 0xc64c66c0dd426b33 +664, 0x8fea340ee35c64dd +665, 0xf9cd381eb3060005 +666, 0xfcc37c2799fc0b11 +667, 0x6a37c91d65b489fa +668, 0x57231000fa0a0c9d +669, 0x55f6e292c6703f9a +670, 0xd0508ffbfa55a7a6 +671, 0x885db543276bdac8 +672, 0xc26dbe6a26b0e704 +673, 0x21f884874ebd709e +674, 0x711f0b6c8f732220 +675, 0x354d0a361eaee195 +676, 0x721344d8d30b006a +677, 0xa0e090a0d3a56f07 +678, 0x16b3d5d823a4952b +679, 0x59d7874bc9eae7b6 +680, 0x9bbb32710076455f +681, 0xd4fb22242ffabafd +682, 0xe1d4ac6770be1d89 +683, 0xb259cedebc73dc8a +684, 0x35faaa3b4246ab69 +685, 0x5d26addefdaee89 +686, 0x8e7ec350da0f3545 +687, 0xd0f316eed9f8fc79 +688, 0x98b2a52c9bf291b2 +689, 0xe4d294a8aca6a314 +690, 0x25bd554e6aa7673c +691, 0xcfde5dcba5be2a6c +692, 0xb5e01fb48d2d2107 +693, 0xe1caf28948028536 +694, 0xd434aa0a26f3ee9b +695, 0xd17723381641b8f6 +696, 0xfe73bd1f3f3768a2 +697, 0x1cc6b1abd08d67e9 +698, 0x247e328371a28de0 +699, 0x502e7942e5a9104a +700, 0x6a030fd242eb4502 +701, 0xa2ffe02744014ce8 +702, 0x59290763b18fe04e +703, 0xcf14241564271436 +704, 0xb0fb73c3c1503aff +705, 0x94e27c622f82137a +706, 0x747a5b406ac3e1f0 +707, 0x9a914e96a732031d +708, 0x59f68c6c8f078835 +709, 0x809d012c73eb4724 +710, 0x5b3c3b73e1b37d74 +711, 0xdde60ef3ba49cdf7 +712, 0x87a14e1f9c761986 +713, 0x4109b960604522af +714, 0x122d0e1ed0eb6bb9 +715, 0xadc0d29e80bfe33 +716, 0xa25b1b44f5fc8e4e +717, 0xbab85d8a9b793f20 +718, 0x825f4cbced0e7d1e +719, 0x2d6ae8807acb37ea +720, 0x8234420adce2e39 +721, 0x4a8ad4da6b804807 +722, 0x1e19f9bc215e5245 +723, 0x1d6f4848a916dd5e +724, 0x9ac40dfcdc2d39cc +725, 0x9f3524e3086155ec +726, 0x861fffc43124b2ef +727, 0xe640e3b756396372 +728, 0x41cb0f0c5e149669 +729, 0xe0bd37e1192e4205 +730, 0x62917d3858f4ce47 +731, 0xa36e7eb4d855820a +732, 0x204b90255a3bf724 +733, 0x66ee83a0175535bc +734, 0x2c14ce7c6b0c1423 +735, 0x85d9495fa514f70d +736, 0x5a4fe45ead874dbc +737, 0xe72248dcb8cfc863 +738, 0xfc21ff2932ed98cd +739, 0xcbba1edd735b5cad +740, 0x91ddc32809679bf5 +741, 0x192cdf2c7631ea1f +742, 0xbbc451ddf2ea286f +743, 0xad9e80cae2397a64 +744, 0x6918f0119b95d0e5 +745, 0xa40379017a27d70a +746, 0x1aaeddb600e61e1 +747, 0x15afd93cbd7adda9 +748, 0x156719bc2b757ff4 +749, 0x13d9a59e2b2df49d +750, 0x9a490986eaddf0a +751, 0xef9a350f0b3eb6b4 +752, 0x5de7f6295ba4fa4d +753, 0x7f37fd087c3fdb49 +754, 0xa9fe3749d6f3f209 +755, 0x50912ac036d9bfb +756, 0x982cb4d726a441f8 +757, 0x8ca8d8af59b872d0 +758, 0x7f8adfb0ceeade8a +759, 0xdad390ec742be44 +760, 0xa637944d0045be5b +761, 0x3569a3b3af807061 +762, 0x9599da8eae14511d +763, 0xc333e8d19589b01a +764, 0xfb9b524a20b571e1 +765, 0xbd9dc8b37ce5c3e1 +766, 0x142333005fa389ac +767, 0x1368bc37cd5bcce1 +768, 0x16094907ad6ecf73 +769, 0xb32c90dbba4c1130 +770, 0x82761d97c1747dd0 +771, 0x599f9f267ae3444d +772, 0x79ad3382994852e1 +773, 0x2511f06d9ef06e54 +774, 0xb35e6ab7d5bbddae +775, 0xfca9fa83a2988732 +776, 0x7d4350f0394ac3ba +777, 0xa52a9527bb176ea3 +778, 0xb49fa0ceb2aa8353 +779, 0x1f62e504d1468cc0 +780, 0xe1a77bfccce6efc3 +781, 0x776cdff4dc0d6797 +782, 0x56612e39b652c1f2 +783, 0x5f096a29294eda04 +784, 0x7978abc3aabd8b23 +785, 0x79dd875e0485b979 +786, 0x8a98aa4d5735d778 +787, 0xcca43940f69d2388 +788, 0xb2d4b156f144f93a +789, 0xbd528a676e9a862 +790, 0x2a394939c8e7ec5e +791, 0xb1da900c6efe4abc +792, 0x9869af479de4c034 +793, 0x78dbdfb88ac7c1db +794, 0x18cb169143088041 +795, 0xe69e5461c51a3e13 +796, 0x5389fa16ea98183c +797, 0xed7c80d1be1ea520 +798, 0x87246fc359758ced +799, 0xab323eba95fae4ed +800, 0xbc4c0dde7f8a1828 +801, 0xdb739f7955610b1a +802, 0xecd8c68c3434cc +803, 0x138c2eb88c477f44 +804, 0x28a65f96727aae41 +805, 0xdee879f2cf5629d +806, 0x684f0c90ef20070f +807, 0xa24a819ef5621800 +808, 0x8d0054f870e4fdcb +809, 0x99e8c6e695b600b +810, 0x50b705245891f7c3 +811, 0xc02eed3a6e58e51a +812, 0x443d64e95443606c +813, 0xca24959cfbd2d120 +814, 0xe072609ea48815bc +815, 0xbcc715026590315b +816, 0x3e76df24d7aa5938 +817, 0xd8ff04940d9b79ae +818, 0x54474ce790059bcd +819, 0x278390dd6aa70e81 +820, 0xf4df619fe35414e4 +821, 0x757d71270264e615 +822, 0x1e8a373699c11b23 +823, 0xef68c82046e67dd6 +824, 0xe280006599972620 +825, 0x234e095183b0f4d6 +826, 0xe3b7560ed9839749 +827, 0xcd5ec4086572332e +828, 0xc41c0d4aaa279108 +829, 0x4b9cd6126bc16a6d +830, 0x4a7252734f3e3dd0 +831, 0xb3132df156cc103a +832, 0xf9e4abbf7b64464a +833, 0xf936df27fb3c47b7 +834, 0x9142960873f6d71a +835, 0x4ba6aa3235cdb10d +836, 0x3237a2e765ba7766 +837, 0xd62f0b94c8e99e54 +838, 0x26b682f90a3ae41b +839, 0x40ad5e82072b6f81 +840, 0xd0198101f5484000 +841, 0xe4fac60ba11c332 +842, 0x472d0b0a95ef9d38 +843, 0x8512557aec5a3d8f +844, 0xef83169d3efd4de9 +845, 0x53fe89283e7a7676 +846, 0x2f50933053d69fc4 +847, 0x76f5e4362e2e53a2 +848, 0x8676fdccce28874a +849, 0x2737764c1fb1f821 +850, 0x4a6f70afc066ab55 +851, 0x27f8e151e310fca4 +852, 0xd606960ccbe85161 +853, 0xcce51d7ddd270a32 +854, 0xb4235999794875c2 +855, 0x580084e358e884 +856, 0x2159d5e6dc8586d7 +857, 0x87bd54d8599b3ba4 +858, 0x3e9ade6a2181664 +859, 0x5e6e140406d97623 +860, 0x511545d5aa0080a2 +861, 0xf49d78ed219aac57 +862, 0xbece1f9c90b8ea87 +863, 0x1c741cac36a2c514 +864, 0x7453c141047db967 +865, 0xd751832a5037eba2 +866, 0x71370a3f30ada1f7 +867, 0x7c01cf2dcb408631 +868, 0x1052a4fbdccc0fa1 +869, 0x13d525c9df3fb6c +870, 0xa3aa8dbfee760c55 +871, 0xc0288d200f5155cf +872, 0x79f4bcd12af567c3 +873, 0x8160d163bb548755 +874, 0x5cf2995fb69fd2df +875, 0xcc98ed01396639df +876, 0xad95f1d9cfc8256e +877, 0xa3df27d9fbdbfb9d +878, 0x83e5f5dda4d52929 +879, 0x9adc05043009f55b +880, 0xdfe8329dfde1c001 +881, 0x9980ccdd5298e6a2 +882, 0x636a7bd134f6ef56 +883, 0xef5ff780c4be6ba4 +884, 0x290d71dc77a56d16 +885, 0x6d65db9ff58de1e6 +886, 0x944b063b3805a696 +887, 0xce468ca2cce33008 +888, 0x5ba1ccb840f80f48 +889, 0x28ddce36fc9ad268 +890, 0x4f77ef254d507a21 +891, 0xce9b4057fadf3ab +892, 0xb518bc68298730e6 +893, 0xd2eb5b8e2ec665b0 +894, 0xe1583303a4f87344 +895, 0x9d5a0df4fbe1bed5 +896, 0x2ba9bc03ec8cfd07 +897, 0x479ed880a96ca669 +898, 0xcedf96338324771a +899, 0x312f4fc2da41ffaa +900, 0xa0eb9cf23b5e1ed8 +901, 0xf8f88f975dc3f539 +902, 0x4a37e185d0e96e0f +903, 0xf829654a5c0b46f9 +904, 0x3909cca7a7f8c7fb +905, 0x4c2e1d66ceb45105 +906, 0xaffaa19e1db8af87 +907, 0x9ec498246bd18c76 +908, 0x21d51558edc089da +909, 0xe8984112cd1b1561 +910, 0x7de1d2cf54b0c0e1 +911, 0xa06729aed50bfb9d +912, 0xcf19f733e5db19e1 +913, 0x70edf2624ab777cd +914, 0x46685becad10e078 +915, 0x825e0f6add46785 +916, 0x66d4af3b15f70de4 +917, 0xc676614b0666b21 +918, 0x282a916c864f5cb7 +919, 0x2707283a3f512167 +920, 0x37ff3afda7461623 +921, 0xc767eb1205e4ca86 +922, 0x46b359aecc4ea25b +923, 0x67fbbb797a16dbb1 +924, 0x64fd4ba57122290e +925, 0x8acc2a8ae59d8fac +926, 0x64a49298599acc67 +927, 0xedf00de67177ce30 +928, 0x1ea9d8d7e76d2d2c +929, 0x363fcac323f70eb2 +930, 0x19e6e3ec8a9712eb +931, 0xca541e96b0961f09 +932, 0x4d8fd34c2822ec46 +933, 0x2fdd56a50b32f705 +934, 0xaac2fcf251e3fd3 +935, 0xb0c600299e57045c +936, 0xd951ec589e909e38 +937, 0x4dc8414390cae508 +938, 0x537ef9d5e2321344 +939, 0xa57bc21fd31aa2dc +940, 0xa3a60df564183750 +941, 0xbe69a5ce2e369fb6 +942, 0x7744601f4c053ec8 +943, 0x3838452af42f2612 +944, 0xd4f0dad7115a54e9 +945, 0x629cf68d8009a624 +946, 0x2211c8fa34cb98cb +947, 0x8040b19e2213db83 +948, 0xb2a86d3ba2384fd +949, 0x4b85cec4f93f0dab +950, 0xc8d212d21ea6845d +951, 0x5b271a03a4fe2be0 +952, 0xff4f671319ad8434 +953, 0x8e615a919d5afa96 +954, 0xea7f47c53161160a +955, 0x33273930b13c6efc +956, 0x98eedda27fb59c3c +957, 0x188dc5e92e939677 +958, 0x9dbd0fa0911430f1 +959, 0x5b3dcf3fa75dfd2b +960, 0x3f03846febdb275d +961, 0x20cc24faea9e9cf6 +962, 0x854f3ac66199ff5d +963, 0x31169ac99d341e6f +964, 0xa85daed3c0bc1bbe +965, 0x64633711e71ba5dd +966, 0x530e79978dc73334 +967, 0x636f2ee6e20aef13 +968, 0xf6220f8b6d9a58fb +969, 0x425db8fa32141a7b +970, 0xac7c210f4b02be95 +971, 0x5fe8cfbe197a7754 +972, 0xfff7d40c79420ea +973, 0x5f8bab9ef4697b77 +974, 0xaf6fe54e45b23fe8 +975, 0xce79456ccc70bbce +976, 0x645ef680f48f1c00 +977, 0xa4dfac46e2028595 +978, 0x6bece4c41effc5df +979, 0xd316df886442641f +980, 0xa4f6ff994edd2a6 +981, 0x30281ae3cc49abe4 +982, 0x39acb7b663dea974 +983, 0x5e8829b01a7c06fb +984, 0x87bdb08cf027f13e +985, 0xdfa5ede784e802f6 +986, 0x46d03d55711c38cc +987, 0xa55a961fc9788306 +988, 0xbf09ded495a2e57a +989, 0xcd601b29a639cc16 +990, 0x2193ce026bfd1085 +991, 0x25ba27f3f225be13 +992, 0x6f685be82f64f2fe +993, 0xec8454108229c450 +994, 0x6e79d8d205447a44 +995, 0x9ed7b6a96b9ccd68 +996, 0xae7134b3b7f8ee37 +997, 0x66963de0e5ebcc02 +998, 0x29c8dcd0d17c423f +999, 0xfb8482c827eb90bc diff --git a/local_packages/numpy/random/tests/data/sfc64_np126.pkl.gz b/local_packages/numpy/random/tests/data/sfc64_np126.pkl.gz new file mode 100644 index 0000000..94fbceb Binary files /dev/null and b/local_packages/numpy/random/tests/data/sfc64_np126.pkl.gz differ diff --git a/local_packages/numpy/random/tests/test_direct.py b/local_packages/numpy/random/tests/test_direct.py new file mode 100644 index 0000000..9916f8a --- /dev/null +++ b/local_packages/numpy/random/tests/test_direct.py @@ -0,0 +1,595 @@ +import os +import sys +from os.path import join + +import pytest + +import numpy as np +from numpy.random import ( + MT19937, + PCG64, + PCG64DXSM, + SFC64, + Generator, + Philox, + RandomState, + SeedSequence, + default_rng, +) +from numpy.random._common import interface +from numpy.testing import ( + assert_allclose, + assert_array_equal, + assert_equal, + assert_raises, +) + +try: + import cffi # noqa: F401 + + MISSING_CFFI = False +except ImportError: + MISSING_CFFI = True + +try: + import ctypes # noqa: F401 + + MISSING_CTYPES = False +except ImportError: + MISSING_CTYPES = False + +if sys.flags.optimize > 1: + # no docstrings present to inspect when PYTHONOPTIMIZE/Py_OptimizeFlag > 1 + # cffi cannot succeed + MISSING_CFFI = True + + +pwd = os.path.dirname(os.path.abspath(__file__)) + + +def assert_state_equal(actual, target): + for key in actual: + if isinstance(actual[key], dict): + assert_state_equal(actual[key], target[key]) + elif isinstance(actual[key], np.ndarray): + assert_array_equal(actual[key], target[key]) + else: + assert actual[key] == target[key] + + +def uint32_to_float32(u): + return ((u >> np.uint32(8)) * (1.0 / 2**24)).astype(np.float32) + + +def uniform32_from_uint64(x): + x = np.uint64(x) + upper = np.array(x >> np.uint64(32), dtype=np.uint32) + lower = np.uint64(0xffffffff) + lower = np.array(x & lower, dtype=np.uint32) + joined = np.column_stack([lower, upper]).ravel() + return uint32_to_float32(joined) + + +def uniform32_from_uint53(x): + x = np.uint64(x) >> np.uint64(16) + x = np.uint32(x & np.uint64(0xffffffff)) + return uint32_to_float32(x) + + +def uniform32_from_uint32(x): + return uint32_to_float32(x) + + +def uniform32_from_uint(x, bits): + if bits == 64: + return uniform32_from_uint64(x) + elif bits == 53: + return uniform32_from_uint53(x) + elif bits == 32: + return uniform32_from_uint32(x) + else: + raise NotImplementedError + + +def uniform_from_uint(x, bits): + if bits in (64, 63, 53): + return uniform_from_uint64(x) + elif bits == 32: + return uniform_from_uint32(x) + + +def uniform_from_uint64(x): + return (x >> np.uint64(11)) * (1.0 / 9007199254740992.0) + + +def uniform_from_uint32(x): + out = np.empty(len(x) // 2) + for i in range(0, len(x), 2): + a = x[i] >> 5 + b = x[i + 1] >> 6 + out[i // 2] = (a * 67108864.0 + b) / 9007199254740992.0 + return out + + +def uniform_from_dsfmt(x): + return x.view(np.double) - 1.0 + + +def gauss_from_uint(x, n, bits): + if bits in (64, 63): + doubles = uniform_from_uint64(x) + elif bits == 32: + doubles = uniform_from_uint32(x) + else: # bits == 'dsfmt' + doubles = uniform_from_dsfmt(x) + gauss = [] + loc = 0 + x1 = x2 = 0.0 + while len(gauss) < n: + r2 = 2 + while r2 >= 1.0 or r2 == 0.0: + x1 = 2.0 * doubles[loc] - 1.0 + x2 = 2.0 * doubles[loc + 1] - 1.0 + r2 = x1 * x1 + x2 * x2 + loc += 2 + + f = np.sqrt(-2.0 * np.log(r2) / r2) + gauss.append(f * x2) + gauss.append(f * x1) + + return gauss[:n] + + +def test_seedsequence(): + from numpy.random.bit_generator import ( + ISeedSequence, + ISpawnableSeedSequence, + SeedlessSeedSequence, + ) + + s1 = SeedSequence(range(10), spawn_key=(1, 2), pool_size=6) + s1.spawn(10) + s2 = SeedSequence(**s1.state) + assert_equal(s1.state, s2.state) + assert_equal(s1.n_children_spawned, s2.n_children_spawned) + + # The interfaces cannot be instantiated themselves. + assert_raises(TypeError, ISeedSequence) + assert_raises(TypeError, ISpawnableSeedSequence) + dummy = SeedlessSeedSequence() + assert_raises(NotImplementedError, dummy.generate_state, 10) + assert len(dummy.spawn(10)) == 10 + + +def test_generator_spawning(): + """ Test spawning new generators and bit_generators directly. + """ + rng = np.random.default_rng() + seq = rng.bit_generator.seed_seq + new_ss = seq.spawn(5) + expected_keys = [seq.spawn_key + (i,) for i in range(5)] + assert [c.spawn_key for c in new_ss] == expected_keys + + new_bgs = rng.bit_generator.spawn(5) + expected_keys = [seq.spawn_key + (i,) for i in range(5, 10)] + assert [bg.seed_seq.spawn_key for bg in new_bgs] == expected_keys + + new_rngs = rng.spawn(5) + expected_keys = [seq.spawn_key + (i,) for i in range(10, 15)] + found_keys = [rng.bit_generator.seed_seq.spawn_key for rng in new_rngs] + assert found_keys == expected_keys + + # Sanity check that streams are actually different: + assert new_rngs[0].uniform() != new_rngs[1].uniform() + + +def test_non_spawnable(): + from numpy.random.bit_generator import ISeedSequence + + class FakeSeedSequence: + def generate_state(self, n_words, dtype=np.uint32): + return np.zeros(n_words, dtype=dtype) + + ISeedSequence.register(FakeSeedSequence) + + rng = np.random.default_rng(FakeSeedSequence()) + + with pytest.raises(TypeError, match="The underlying SeedSequence"): + rng.spawn(5) + + with pytest.raises(TypeError, match="The underlying SeedSequence"): + rng.bit_generator.spawn(5) + + +class Base: + dtype = np.uint64 + data2 = data1 = {} + + @classmethod + def setup_class(cls): + cls.bit_generator = PCG64 + cls.bits = 64 + cls.dtype = np.uint64 + cls.seed_error_type = TypeError + cls.invalid_init_types = [] + cls.invalid_init_values = [] + + @classmethod + def _read_csv(cls, filename): + with open(filename) as csv: + seed = csv.readline() + seed = seed.split(',') + seed = [int(s.strip(), 0) for s in seed[1:]] + data = [] + for line in csv: + data.append(int(line.split(',')[-1].strip(), 0)) + return {'seed': seed, 'data': np.array(data, dtype=cls.dtype)} + + def test_raw(self): + bit_generator = self.bit_generator(*self.data1['seed']) + uints = bit_generator.random_raw(1000) + assert_equal(uints, self.data1['data']) + + bit_generator = self.bit_generator(*self.data1['seed']) + uints = bit_generator.random_raw() + assert_equal(uints, self.data1['data'][0]) + + bit_generator = self.bit_generator(*self.data2['seed']) + uints = bit_generator.random_raw(1000) + assert_equal(uints, self.data2['data']) + + def test_random_raw(self): + bit_generator = self.bit_generator(*self.data1['seed']) + uints = bit_generator.random_raw(output=False) + assert uints is None + uints = bit_generator.random_raw(1000, output=False) + assert uints is None + + def test_gauss_inv(self): + n = 25 + rs = RandomState(self.bit_generator(*self.data1['seed'])) + gauss = rs.standard_normal(n) + assert_allclose(gauss, + gauss_from_uint(self.data1['data'], n, self.bits)) + + rs = RandomState(self.bit_generator(*self.data2['seed'])) + gauss = rs.standard_normal(25) + assert_allclose(gauss, + gauss_from_uint(self.data2['data'], n, self.bits)) + + def test_uniform_double(self): + rs = Generator(self.bit_generator(*self.data1['seed'])) + vals = uniform_from_uint(self.data1['data'], self.bits) + uniforms = rs.random(len(vals)) + assert_allclose(uniforms, vals) + assert_equal(uniforms.dtype, np.float64) + + rs = Generator(self.bit_generator(*self.data2['seed'])) + vals = uniform_from_uint(self.data2['data'], self.bits) + uniforms = rs.random(len(vals)) + assert_allclose(uniforms, vals) + assert_equal(uniforms.dtype, np.float64) + + def test_uniform_float(self): + rs = Generator(self.bit_generator(*self.data1['seed'])) + vals = uniform32_from_uint(self.data1['data'], self.bits) + uniforms = rs.random(len(vals), dtype=np.float32) + assert_allclose(uniforms, vals) + assert_equal(uniforms.dtype, np.float32) + + rs = Generator(self.bit_generator(*self.data2['seed'])) + vals = uniform32_from_uint(self.data2['data'], self.bits) + uniforms = rs.random(len(vals), dtype=np.float32) + assert_allclose(uniforms, vals) + assert_equal(uniforms.dtype, np.float32) + + def test_repr(self): + rs = Generator(self.bit_generator(*self.data1['seed'])) + assert 'Generator' in repr(rs) + assert f'{id(rs):#x}'.upper().replace('X', 'x') in repr(rs) + + def test_str(self): + rs = Generator(self.bit_generator(*self.data1['seed'])) + assert 'Generator' in str(rs) + assert str(self.bit_generator.__name__) in str(rs) + assert f'{id(rs):#x}'.upper().replace('X', 'x') not in str(rs) + + def test_pickle(self): + import pickle + + bit_generator = self.bit_generator(*self.data1['seed']) + state = bit_generator.state + bitgen_pkl = pickle.dumps(bit_generator) + reloaded = pickle.loads(bitgen_pkl) + reloaded_state = reloaded.state + assert_array_equal(Generator(bit_generator).standard_normal(1000), + Generator(reloaded).standard_normal(1000)) + assert bit_generator is not reloaded + assert_state_equal(reloaded_state, state) + + ss = SeedSequence(100) + aa = pickle.loads(pickle.dumps(ss)) + assert_equal(ss.state, aa.state) + + def test_pickle_preserves_seed_sequence(self): + # GH 26234 + # Add explicit test that bit generators preserve seed sequences + import pickle + + bit_generator = self.bit_generator(*self.data1['seed']) + ss = bit_generator.seed_seq + bg_plk = pickle.loads(pickle.dumps(bit_generator)) + ss_plk = bg_plk.seed_seq + assert_equal(ss.state, ss_plk.state) + assert_equal(ss.pool, ss_plk.pool) + + bit_generator.seed_seq.spawn(10) + bg_plk = pickle.loads(pickle.dumps(bit_generator)) + ss_plk = bg_plk.seed_seq + assert_equal(ss.state, ss_plk.state) + assert_equal(ss.n_children_spawned, ss_plk.n_children_spawned) + + def test_invalid_state_type(self): + bit_generator = self.bit_generator(*self.data1['seed']) + with pytest.raises(TypeError): + bit_generator.state = {'1'} + + def test_invalid_state_value(self): + bit_generator = self.bit_generator(*self.data1['seed']) + state = bit_generator.state + state['bit_generator'] = 'otherBitGenerator' + with pytest.raises(ValueError): + bit_generator.state = state + + def test_invalid_init_type(self): + bit_generator = self.bit_generator + for st in self.invalid_init_types: + with pytest.raises(TypeError): + bit_generator(*st) + + def test_invalid_init_values(self): + bit_generator = self.bit_generator + for st in self.invalid_init_values: + with pytest.raises((ValueError, OverflowError)): + bit_generator(*st) + + def test_benchmark(self): + bit_generator = self.bit_generator(*self.data1['seed']) + bit_generator._benchmark(1) + bit_generator._benchmark(1, 'double') + with pytest.raises(ValueError): + bit_generator._benchmark(1, 'int32') + + @pytest.mark.skipif(MISSING_CFFI, reason='cffi not available') + def test_cffi(self): + bit_generator = self.bit_generator(*self.data1['seed']) + cffi_interface = bit_generator.cffi + assert isinstance(cffi_interface, interface) + other_cffi_interface = bit_generator.cffi + assert other_cffi_interface is cffi_interface + + @pytest.mark.skipif(MISSING_CTYPES, reason='ctypes not available') + def test_ctypes(self): + bit_generator = self.bit_generator(*self.data1['seed']) + ctypes_interface = bit_generator.ctypes + assert isinstance(ctypes_interface, interface) + other_ctypes_interface = bit_generator.ctypes + assert other_ctypes_interface is ctypes_interface + + def test_getstate(self): + bit_generator = self.bit_generator(*self.data1['seed']) + state = bit_generator.state + alt_state = bit_generator.__getstate__() + assert isinstance(alt_state, tuple) + assert_state_equal(state, alt_state[0]) + assert isinstance(alt_state[1], SeedSequence) + +class TestPhilox(Base): + @classmethod + def setup_class(cls): + cls.bit_generator = Philox + cls.bits = 64 + cls.dtype = np.uint64 + cls.data1 = cls._read_csv( + join(pwd, './data/philox-testset-1.csv')) + cls.data2 = cls._read_csv( + join(pwd, './data/philox-testset-2.csv')) + cls.seed_error_type = TypeError + cls.invalid_init_types = [] + cls.invalid_init_values = [(1, None, 1), (-1,), (None, None, 2 ** 257 + 1)] + + def test_set_key(self): + bit_generator = self.bit_generator(*self.data1['seed']) + state = bit_generator.state + keyed = self.bit_generator(counter=state['state']['counter'], + key=state['state']['key']) + assert_state_equal(bit_generator.state, keyed.state) + + +class TestPCG64(Base): + @classmethod + def setup_class(cls): + cls.bit_generator = PCG64 + cls.bits = 64 + cls.dtype = np.uint64 + cls.data1 = cls._read_csv(join(pwd, './data/pcg64-testset-1.csv')) + cls.data2 = cls._read_csv(join(pwd, './data/pcg64-testset-2.csv')) + cls.seed_error_type = (ValueError, TypeError) + cls.invalid_init_types = [(3.2,), ([None],), (1, None)] + cls.invalid_init_values = [(-1,)] + + def test_advance_symmetry(self): + rs = Generator(self.bit_generator(*self.data1['seed'])) + state = rs.bit_generator.state + step = -0x9e3779b97f4a7c150000000000000000 + rs.bit_generator.advance(step) + val_neg = rs.integers(10) + rs.bit_generator.state = state + rs.bit_generator.advance(2**128 + step) + val_pos = rs.integers(10) + rs.bit_generator.state = state + rs.bit_generator.advance(10 * 2**128 + step) + val_big = rs.integers(10) + assert val_neg == val_pos + assert val_big == val_pos + + def test_advange_large(self): + rs = Generator(self.bit_generator(38219308213743)) + pcg = rs.bit_generator + state = pcg.state["state"] + initial_state = 287608843259529770491897792873167516365 + assert state["state"] == initial_state + pcg.advance(sum(2**i for i in (96, 64, 32, 16, 8, 4, 2, 1))) + state = pcg.state["state"] + advanced_state = 135275564607035429730177404003164635391 + assert state["state"] == advanced_state + + +class TestPCG64DXSM(Base): + @classmethod + def setup_class(cls): + cls.bit_generator = PCG64DXSM + cls.bits = 64 + cls.dtype = np.uint64 + cls.data1 = cls._read_csv(join(pwd, './data/pcg64dxsm-testset-1.csv')) + cls.data2 = cls._read_csv(join(pwd, './data/pcg64dxsm-testset-2.csv')) + cls.seed_error_type = (ValueError, TypeError) + cls.invalid_init_types = [(3.2,), ([None],), (1, None)] + cls.invalid_init_values = [(-1,)] + + def test_advance_symmetry(self): + rs = Generator(self.bit_generator(*self.data1['seed'])) + state = rs.bit_generator.state + step = -0x9e3779b97f4a7c150000000000000000 + rs.bit_generator.advance(step) + val_neg = rs.integers(10) + rs.bit_generator.state = state + rs.bit_generator.advance(2**128 + step) + val_pos = rs.integers(10) + rs.bit_generator.state = state + rs.bit_generator.advance(10 * 2**128 + step) + val_big = rs.integers(10) + assert val_neg == val_pos + assert val_big == val_pos + + def test_advange_large(self): + rs = Generator(self.bit_generator(38219308213743)) + pcg = rs.bit_generator + state = pcg.state + initial_state = 287608843259529770491897792873167516365 + assert state["state"]["state"] == initial_state + pcg.advance(sum(2**i for i in (96, 64, 32, 16, 8, 4, 2, 1))) + state = pcg.state["state"] + advanced_state = 277778083536782149546677086420637664879 + assert state["state"] == advanced_state + + +class TestMT19937(Base): + @classmethod + def setup_class(cls): + cls.bit_generator = MT19937 + cls.bits = 32 + cls.dtype = np.uint32 + cls.data1 = cls._read_csv(join(pwd, './data/mt19937-testset-1.csv')) + cls.data2 = cls._read_csv(join(pwd, './data/mt19937-testset-2.csv')) + cls.seed_error_type = ValueError + cls.invalid_init_types = [] + cls.invalid_init_values = [(-1,)] + + def test_seed_float_array(self): + assert_raises(TypeError, self.bit_generator, np.array([np.pi])) + assert_raises(TypeError, self.bit_generator, np.array([-np.pi])) + assert_raises(TypeError, self.bit_generator, np.array([np.pi, -np.pi])) + assert_raises(TypeError, self.bit_generator, np.array([0, np.pi])) + assert_raises(TypeError, self.bit_generator, [np.pi]) + assert_raises(TypeError, self.bit_generator, [0, np.pi]) + + def test_state_tuple(self): + rs = Generator(self.bit_generator(*self.data1['seed'])) + bit_generator = rs.bit_generator + state = bit_generator.state + desired = rs.integers(2 ** 16) + tup = (state['bit_generator'], state['state']['key'], + state['state']['pos']) + bit_generator.state = tup + actual = rs.integers(2 ** 16) + assert_equal(actual, desired) + tup = tup + (0, 0.0) + bit_generator.state = tup + actual = rs.integers(2 ** 16) + assert_equal(actual, desired) + + +class TestSFC64(Base): + @classmethod + def setup_class(cls): + cls.bit_generator = SFC64 + cls.bits = 64 + cls.dtype = np.uint64 + cls.data1 = cls._read_csv( + join(pwd, './data/sfc64-testset-1.csv')) + cls.data2 = cls._read_csv( + join(pwd, './data/sfc64-testset-2.csv')) + cls.seed_error_type = (ValueError, TypeError) + cls.invalid_init_types = [(3.2,), ([None],), (1, None)] + cls.invalid_init_values = [(-1,)] + + def test_legacy_pickle(self): + # Pickling format was changed in 2.0.x + import gzip + import pickle + + expected_state = np.array( + [ + 9957867060933711493, + 532597980065565856, + 14769588338631205282, + 13 + ], + dtype=np.uint64 + ) + + base_path = os.path.split(os.path.abspath(__file__))[0] + pkl_file = os.path.join(base_path, "data", "sfc64_np126.pkl.gz") + with gzip.open(pkl_file) as gz: + sfc = pickle.load(gz) + + assert isinstance(sfc, SFC64) + assert_equal(sfc.state["state"]["state"], expected_state) + + +class TestDefaultRNG: + def test_seed(self): + for args in [(), (None,), (1234,), ([1234, 5678],)]: + rg = default_rng(*args) + assert isinstance(rg.bit_generator, PCG64) + + def test_passthrough(self): + bg = Philox() + rg = default_rng(bg) + assert rg.bit_generator is bg + rg2 = default_rng(rg) + assert rg2 is rg + assert rg2.bit_generator is bg + + @pytest.mark.thread_unsafe( + reason="np.random.set_bit_generator affects global state" + ) + def test_coercion_RandomState_Generator(self): + # use default_rng to coerce RandomState to Generator + rs = RandomState(1234) + rg = default_rng(rs) + assert isinstance(rg.bit_generator, MT19937) + assert rg.bit_generator is rs._bit_generator + + # RandomState with a non MT19937 bit generator + _original = np.random.get_bit_generator() + bg = PCG64(12342298) + np.random.set_bit_generator(bg) + rs = np.random.mtrand._rand + rg = default_rng(rs) + assert rg.bit_generator is bg + + # vital to get global state back to original, otherwise + # other tests start to fail. + np.random.set_bit_generator(_original) diff --git a/local_packages/numpy/random/tests/test_extending.py b/local_packages/numpy/random/tests/test_extending.py new file mode 100644 index 0000000..a1e64ec --- /dev/null +++ b/local_packages/numpy/random/tests/test_extending.py @@ -0,0 +1,131 @@ +import os +import shutil +import subprocess +import sys +import sysconfig +import warnings +from importlib.util import module_from_spec, spec_from_file_location + +import pytest + +import numpy as np +from numpy.testing import IS_EDITABLE, IS_WASM + +try: + import cffi +except ImportError: + cffi = None + +if sys.flags.optimize > 1: + # no docstrings present to inspect when PYTHONOPTIMIZE/Py_OptimizeFlag > 1 + # cffi cannot succeed + cffi = None + +try: + with warnings.catch_warnings(record=True) as w: + # numba issue gh-4733 + warnings.filterwarnings('always', '', DeprecationWarning) + import numba +except (ImportError, SystemError): + # Certain numpy/numba versions trigger a SystemError due to a numba bug + numba = None + +try: + import cython + from Cython.Compiler.Version import version as cython_version +except ImportError: + cython = None +else: + from numpy._utils import _pep440 + # Note: keep in sync with the one in pyproject.toml + required_version = '3.0.6' + if _pep440.parse(cython_version) < _pep440.Version(required_version): + # too old or wrong cython, skip the test + cython = None + + +@pytest.mark.skipif( + IS_EDITABLE, + reason='Editable install cannot find .pxd headers' +) +@pytest.mark.skipif( + sys.platform == "win32" and sys.maxsize < 2**32, + reason="Failing in 32-bit Windows wheel build job, skip for now" +) +@pytest.mark.skipif(IS_WASM, reason="Can't start subprocess") +@pytest.mark.skipif(cython is None, reason="requires cython") +@pytest.mark.skipif(sysconfig.get_platform() == 'win-arm64', + reason='Meson unable to find MSVC linker on win-arm64') +@pytest.mark.slow +@pytest.mark.thread_unsafe( + reason="building cython code in a subprocess doesn't make sense to do in many " + "threads and sometimes crashes" +) +def test_cython(tmp_path): + import glob + # build the examples in a temporary directory + srcdir = os.path.join(os.path.dirname(__file__), '..') + shutil.copytree(srcdir, tmp_path / 'random') + build_dir = tmp_path / 'random' / '_examples' / 'cython' + target_dir = build_dir / "build" + os.makedirs(target_dir, exist_ok=True) + # Ensure we use the correct Python interpreter even when `meson` is + # installed in a different Python environment (see gh-24956) + native_file = str(build_dir / 'interpreter-native-file.ini') + with open(native_file, 'w') as f: + f.write("[binaries]\n") + f.write(f"python = '{sys.executable}'\n") + f.write(f"python3 = '{sys.executable}'") + if sys.platform == "win32": + subprocess.check_call(["meson", "setup", + "--buildtype=release", + "--vsenv", "--native-file", native_file, + str(build_dir)], + cwd=target_dir, + ) + else: + subprocess.check_call(["meson", "setup", + "--native-file", native_file, str(build_dir)], + cwd=target_dir + ) + subprocess.check_call(["meson", "compile", "-vv"], cwd=target_dir) + + # gh-16162: make sure numpy's __init__.pxd was used for cython + # not really part of this test, but it is a convenient place to check + + g = glob.glob(str(target_dir / "*" / "extending.pyx.c")) + with open(g[0]) as fid: + txt_to_find = 'NumPy API declarations from "numpy/__init__' + for line in fid: + if txt_to_find in line: + break + else: + assert False, f"Could not find '{txt_to_find}' in C file, wrong pxd used" + # import without adding the directory to sys.path + suffix = sysconfig.get_config_var('EXT_SUFFIX') + + def load(modname): + so = (target_dir / modname).with_suffix(suffix) + spec = spec_from_file_location(modname, so) + mod = module_from_spec(spec) + spec.loader.exec_module(mod) + return mod + + # test that the module can be imported + load("extending") + load("extending_cpp") + # actually test the cython c-extension + extending_distributions = load("extending_distributions") + from numpy.random import PCG64 + values = extending_distributions.uniforms_ex(PCG64(0), 10, 'd') + assert values.shape == (10,) + assert values.dtype == np.float64 + +@pytest.mark.skipif(numba is None or cffi is None, + reason="requires numba and cffi") +def test_numba(): + from numpy.random._examples.numba import extending # noqa: F401 + +@pytest.mark.skipif(cffi is None, reason="requires cffi") +def test_cffi(): + from numpy.random._examples.cffi import extending # noqa: F401 diff --git a/local_packages/numpy/random/tests/test_generator_mt19937.py b/local_packages/numpy/random/tests/test_generator_mt19937.py new file mode 100644 index 0000000..7d13c49 --- /dev/null +++ b/local_packages/numpy/random/tests/test_generator_mt19937.py @@ -0,0 +1,2825 @@ +import hashlib +import os.path +import sys +import warnings + +import pytest + +import numpy as np +from numpy.exceptions import AxisError +from numpy.linalg import LinAlgError +from numpy.random import MT19937, Generator, RandomState, SeedSequence +from numpy.testing import ( + IS_WASM, + assert_, + assert_allclose, + assert_array_almost_equal, + assert_array_equal, + assert_equal, + assert_no_warnings, + assert_raises, +) + +random = Generator(MT19937()) + +JUMP_TEST_DATA = [ + { + "seed": 0, + "steps": 10, + "initial": {"key_sha256": "bb1636883c2707b51c5b7fc26c6927af4430f2e0785a8c7bc886337f919f9edf", "pos": 9}, # noqa: E501 + "jumped": {"key_sha256": "ff682ac12bb140f2d72fba8d3506cf4e46817a0db27aae1683867629031d8d55", "pos": 598}, # noqa: E501 + }, + { + "seed": 384908324, + "steps": 312, + "initial": {"key_sha256": "16b791a1e04886ccbbb4d448d6ff791267dc458ae599475d08d5cced29d11614", "pos": 311}, # noqa: E501 + "jumped": {"key_sha256": "a0110a2cf23b56be0feaed8f787a7fc84bef0cb5623003d75b26bdfa1c18002c", "pos": 276}, # noqa: E501 + }, + { + "seed": [839438204, 980239840, 859048019, 821], + "steps": 511, + "initial": {"key_sha256": "d306cf01314d51bd37892d874308200951a35265ede54d200f1e065004c3e9ea", "pos": 510}, # noqa: E501 + "jumped": {"key_sha256": "0e00ab449f01a5195a83b4aee0dfbc2ce8d46466a640b92e33977d2e42f777f8", "pos": 475}, # noqa: E501 + }, +] + + +@pytest.fixture(scope='module', params=[True, False]) +def endpoint(request): + return request.param + + +class TestSeed: + def test_scalar(self): + s = Generator(MT19937(0)) + assert_equal(s.integers(1000), 479) + s = Generator(MT19937(4294967295)) + assert_equal(s.integers(1000), 324) + + def test_array(self): + s = Generator(MT19937(range(10))) + assert_equal(s.integers(1000), 465) + s = Generator(MT19937(np.arange(10))) + assert_equal(s.integers(1000), 465) + s = Generator(MT19937([0])) + assert_equal(s.integers(1000), 479) + s = Generator(MT19937([4294967295])) + assert_equal(s.integers(1000), 324) + + def test_seedsequence(self): + s = MT19937(SeedSequence(0)) + assert_equal(s.random_raw(1), 2058676884) + + def test_invalid_scalar(self): + # seed must be an unsigned 32 bit integer + assert_raises(TypeError, MT19937, -0.5) + assert_raises(ValueError, MT19937, -1) + + def test_invalid_array(self): + # seed must be an unsigned integer + assert_raises(TypeError, MT19937, [-0.5]) + assert_raises(ValueError, MT19937, [-1]) + assert_raises(ValueError, MT19937, [1, -2, 4294967296]) + + def test_noninstantized_bitgen(self): + assert_raises(ValueError, Generator, MT19937) + + +class TestBinomial: + def test_n_zero(self): + # Tests the corner case of n == 0 for the binomial distribution. + # binomial(0, p) should be zero for any p in [0, 1]. + # This test addresses issue #3480. + zeros = np.zeros(2, dtype='int') + for p in [0, .5, 1]: + assert_(random.binomial(0, p) == 0) + assert_array_equal(random.binomial(zeros, p), zeros) + + def test_p_is_nan(self): + # Issue #4571. + assert_raises(ValueError, random.binomial, 1, np.nan) + + def test_p_extremely_small(self): + n = 50000000000 + p = 5e-17 + sample_size = 20000000 + x = random.binomial(n, p, size=sample_size) + sample_mean = x.mean() + expected_mean = n * p + sigma = np.sqrt(n * p * (1 - p) / sample_size) + # Note: the parameters were chosen so that expected_mean - 6*sigma + # is a positive value. The first `assert` below validates that + # assumption (in case someone edits the parameters in the future). + # The second `assert` is the actual test. + low_bound = expected_mean - 6 * sigma + assert low_bound > 0, "bad test params: 6-sigma lower bound is negative" + test_msg = (f"sample mean {sample_mean} deviates from the expected mean " + f"{expected_mean} by more than 6*sigma") + assert abs(expected_mean - sample_mean) < 6 * sigma, test_msg + + +class TestMultinomial: + def test_basic(self): + random.multinomial(100, [0.2, 0.8]) + + def test_zero_probability(self): + random.multinomial(100, [0.2, 0.8, 0.0, 0.0, 0.0]) + + def test_int_negative_interval(self): + assert_(-5 <= random.integers(-5, -1) < -1) + x = random.integers(-5, -1, 5) + assert_(np.all(-5 <= x)) + assert_(np.all(x < -1)) + + def test_size(self): + # gh-3173 + p = [0.5, 0.5] + assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2)) + assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2)) + assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2)) + assert_equal(random.multinomial(1, p, [2, 2]).shape, (2, 2, 2)) + assert_equal(random.multinomial(1, p, (2, 2)).shape, (2, 2, 2)) + assert_equal(random.multinomial(1, p, np.array((2, 2))).shape, + (2, 2, 2)) + + assert_raises(TypeError, random.multinomial, 1, p, + float(1)) + + def test_invalid_prob(self): + assert_raises(ValueError, random.multinomial, 100, [1.1, 0.2]) + assert_raises(ValueError, random.multinomial, 100, [-.1, 0.9]) + + def test_invalid_n(self): + assert_raises(ValueError, random.multinomial, -1, [0.8, 0.2]) + assert_raises(ValueError, random.multinomial, [-1] * 10, [0.8, 0.2]) + + def test_p_non_contiguous(self): + p = np.arange(15.) + p /= np.sum(p[1::3]) + pvals = p[1::3] + random = Generator(MT19937(1432985819)) + non_contig = random.multinomial(100, pvals=pvals) + random = Generator(MT19937(1432985819)) + contig = random.multinomial(100, pvals=np.ascontiguousarray(pvals)) + assert_array_equal(non_contig, contig) + + def test_multinomial_pvals_float32(self): + x = np.array([9.9e-01, 9.9e-01, 1.0e-09, 1.0e-09, 1.0e-09, 1.0e-09, + 1.0e-09, 1.0e-09, 1.0e-09, 1.0e-09], dtype=np.float32) + pvals = x / x.sum() + random = Generator(MT19937(1432985819)) + match = r"[\w\s]*pvals array is cast to 64-bit floating" + with pytest.raises(ValueError, match=match): + random.multinomial(1, pvals) + + +class TestMultivariateHypergeometric: + + seed = 8675309 + + def test_argument_validation(self): + # Error cases... + + # `colors` must be a 1-d sequence + assert_raises(ValueError, random.multivariate_hypergeometric, + 10, 4) + + # Negative nsample + assert_raises(ValueError, random.multivariate_hypergeometric, + [2, 3, 4], -1) + + # Negative color + assert_raises(ValueError, random.multivariate_hypergeometric, + [-1, 2, 3], 2) + + # nsample exceeds sum(colors) + assert_raises(ValueError, random.multivariate_hypergeometric, + [2, 3, 4], 10) + + # nsample exceeds sum(colors) (edge case of empty colors) + assert_raises(ValueError, random.multivariate_hypergeometric, + [], 1) + + # Validation errors associated with very large values in colors. + assert_raises(ValueError, random.multivariate_hypergeometric, + [999999999, 101], 5, 1, 'marginals') + + int64_info = np.iinfo(np.int64) + max_int64 = int64_info.max + max_int64_index = max_int64 // int64_info.dtype.itemsize + assert_raises(ValueError, random.multivariate_hypergeometric, + [max_int64_index - 100, 101], 5, 1, 'count') + + @pytest.mark.parametrize('method', ['count', 'marginals']) + def test_edge_cases(self, method): + # Set the seed, but in fact, all the results in this test are + # deterministic, so we don't really need this. + random = Generator(MT19937(self.seed)) + + x = random.multivariate_hypergeometric([0, 0, 0], 0, method=method) + assert_array_equal(x, [0, 0, 0]) + + x = random.multivariate_hypergeometric([], 0, method=method) + assert_array_equal(x, []) + + x = random.multivariate_hypergeometric([], 0, size=1, method=method) + assert_array_equal(x, np.empty((1, 0), dtype=np.int64)) + + x = random.multivariate_hypergeometric([1, 2, 3], 0, method=method) + assert_array_equal(x, [0, 0, 0]) + + x = random.multivariate_hypergeometric([9, 0, 0], 3, method=method) + assert_array_equal(x, [3, 0, 0]) + + colors = [1, 1, 0, 1, 1] + x = random.multivariate_hypergeometric(colors, sum(colors), + method=method) + assert_array_equal(x, colors) + + x = random.multivariate_hypergeometric([3, 4, 5], 12, size=3, + method=method) + assert_array_equal(x, [[3, 4, 5]] * 3) + + # Cases for nsample: + # nsample < 10 + # 10 <= nsample < colors.sum()/2 + # colors.sum()/2 < nsample < colors.sum() - 10 + # colors.sum() - 10 < nsample < colors.sum() + @pytest.mark.parametrize('nsample', [8, 25, 45, 55]) + @pytest.mark.parametrize('method', ['count', 'marginals']) + @pytest.mark.parametrize('size', [5, (2, 3), 150000]) + def test_typical_cases(self, nsample, method, size): + random = Generator(MT19937(self.seed)) + + colors = np.array([10, 5, 20, 25]) + sample = random.multivariate_hypergeometric(colors, nsample, size, + method=method) + if isinstance(size, int): + expected_shape = (size,) + colors.shape + else: + expected_shape = size + colors.shape + assert_equal(sample.shape, expected_shape) + assert_((sample >= 0).all()) + assert_((sample <= colors).all()) + assert_array_equal(sample.sum(axis=-1), + np.full(size, fill_value=nsample, dtype=int)) + if isinstance(size, int) and size >= 100000: + # This sample is large enough to compare its mean to + # the expected values. + assert_allclose(sample.mean(axis=0), + nsample * colors / colors.sum(), + rtol=1e-3, atol=0.005) + + def test_repeatability1(self): + random = Generator(MT19937(self.seed)) + sample = random.multivariate_hypergeometric([3, 4, 5], 5, size=5, + method='count') + expected = np.array([[2, 1, 2], + [2, 1, 2], + [1, 1, 3], + [2, 0, 3], + [2, 1, 2]]) + assert_array_equal(sample, expected) + + def test_repeatability2(self): + random = Generator(MT19937(self.seed)) + sample = random.multivariate_hypergeometric([20, 30, 50], 50, + size=5, + method='marginals') + expected = np.array([[ 9, 17, 24], + [ 7, 13, 30], + [ 9, 15, 26], + [ 9, 17, 24], + [12, 14, 24]]) + assert_array_equal(sample, expected) + + def test_repeatability3(self): + random = Generator(MT19937(self.seed)) + sample = random.multivariate_hypergeometric([20, 30, 50], 12, + size=5, + method='marginals') + expected = np.array([[2, 3, 7], + [5, 3, 4], + [2, 5, 5], + [5, 3, 4], + [1, 5, 6]]) + assert_array_equal(sample, expected) + + +class TestSetState: + def _create_rng(self): + seed = 1234567890 + rg = Generator(MT19937(seed)) + bit_generator = rg.bit_generator + state = bit_generator.state + legacy_state = (state['bit_generator'], + state['state']['key'], + state['state']['pos']) + return rg, bit_generator, state + + def test_gaussian_reset(self): + # Make sure the cached every-other-Gaussian is reset. + rg, bit_generator, state = self._create_rng() + old = rg.standard_normal(size=3) + bit_generator.state = state + new = rg.standard_normal(size=3) + assert_(np.all(old == new)) + + def test_gaussian_reset_in_media_res(self): + # When the state is saved with a cached Gaussian, make sure the + # cached Gaussian is restored. + rg, bit_generator, state = self._create_rng() + rg.standard_normal() + state = bit_generator.state + old = rg.standard_normal(size=3) + bit_generator.state = state + new = rg.standard_normal(size=3) + assert_(np.all(old == new)) + + def test_negative_binomial(self): + # Ensure that the negative binomial results take floating point + # arguments without truncation. + rg, _, _ = self._create_rng() + rg.negative_binomial(0.5, 0.5) + + +class TestIntegers: + rfunc = random.integers + + # valid integer/boolean types + itype = [bool, np.int8, np.uint8, np.int16, np.uint16, + np.int32, np.uint32, np.int64, np.uint64] + + def test_unsupported_type(self, endpoint): + assert_raises(TypeError, self.rfunc, 1, endpoint=endpoint, dtype=float) + + def test_bounds_checking(self, endpoint): + for dt in self.itype: + lbnd = 0 if dt is bool else np.iinfo(dt).min + ubnd = 2 if dt is bool else np.iinfo(dt).max + 1 + ubnd = ubnd - 1 if endpoint else ubnd + assert_raises(ValueError, self.rfunc, lbnd - 1, ubnd, + endpoint=endpoint, dtype=dt) + assert_raises(ValueError, self.rfunc, lbnd, ubnd + 1, + endpoint=endpoint, dtype=dt) + assert_raises(ValueError, self.rfunc, ubnd, lbnd, + endpoint=endpoint, dtype=dt) + assert_raises(ValueError, self.rfunc, 1, 0, endpoint=endpoint, + dtype=dt) + + assert_raises(ValueError, self.rfunc, [lbnd - 1], ubnd, + endpoint=endpoint, dtype=dt) + assert_raises(ValueError, self.rfunc, [lbnd], [ubnd + 1], + endpoint=endpoint, dtype=dt) + assert_raises(ValueError, self.rfunc, [ubnd], [lbnd], + endpoint=endpoint, dtype=dt) + assert_raises(ValueError, self.rfunc, 1, [0], + endpoint=endpoint, dtype=dt) + assert_raises(ValueError, self.rfunc, [ubnd + 1], [ubnd], + endpoint=endpoint, dtype=dt) + + def test_bounds_checking_array(self, endpoint): + for dt in self.itype: + lbnd = 0 if dt is bool else np.iinfo(dt).min + ubnd = 2 if dt is bool else np.iinfo(dt).max + (not endpoint) + + assert_raises(ValueError, self.rfunc, [lbnd - 1] * 2, [ubnd] * 2, + endpoint=endpoint, dtype=dt) + assert_raises(ValueError, self.rfunc, [lbnd] * 2, + [ubnd + 1] * 2, endpoint=endpoint, dtype=dt) + assert_raises(ValueError, self.rfunc, ubnd, [lbnd] * 2, + endpoint=endpoint, dtype=dt) + assert_raises(ValueError, self.rfunc, [1] * 2, 0, + endpoint=endpoint, dtype=dt) + + def test_rng_zero_and_extremes(self, endpoint): + for dt in self.itype: + lbnd = 0 if dt is bool else np.iinfo(dt).min + ubnd = 2 if dt is bool else np.iinfo(dt).max + 1 + ubnd = ubnd - 1 if endpoint else ubnd + is_open = not endpoint + + tgt = ubnd - 1 + assert_equal(self.rfunc(tgt, tgt + is_open, size=1000, + endpoint=endpoint, dtype=dt), tgt) + assert_equal(self.rfunc([tgt], tgt + is_open, size=1000, + endpoint=endpoint, dtype=dt), tgt) + + tgt = lbnd + assert_equal(self.rfunc(tgt, tgt + is_open, size=1000, + endpoint=endpoint, dtype=dt), tgt) + assert_equal(self.rfunc(tgt, [tgt + is_open], size=1000, + endpoint=endpoint, dtype=dt), tgt) + + tgt = (lbnd + ubnd) // 2 + assert_equal(self.rfunc(tgt, tgt + is_open, size=1000, + endpoint=endpoint, dtype=dt), tgt) + assert_equal(self.rfunc([tgt], [tgt + is_open], + size=1000, endpoint=endpoint, dtype=dt), + tgt) + + def test_rng_zero_and_extremes_array(self, endpoint): + size = 1000 + for dt in self.itype: + lbnd = 0 if dt is bool else np.iinfo(dt).min + ubnd = 2 if dt is bool else np.iinfo(dt).max + 1 + ubnd = ubnd - 1 if endpoint else ubnd + + tgt = ubnd - 1 + assert_equal(self.rfunc([tgt], [tgt + 1], + size=size, dtype=dt), tgt) + assert_equal(self.rfunc( + [tgt] * size, [tgt + 1] * size, dtype=dt), tgt) + assert_equal(self.rfunc( + [tgt] * size, [tgt + 1] * size, size=size, dtype=dt), tgt) + + tgt = lbnd + assert_equal(self.rfunc([tgt], [tgt + 1], + size=size, dtype=dt), tgt) + assert_equal(self.rfunc( + [tgt] * size, [tgt + 1] * size, dtype=dt), tgt) + assert_equal(self.rfunc( + [tgt] * size, [tgt + 1] * size, size=size, dtype=dt), tgt) + + tgt = (lbnd + ubnd) // 2 + assert_equal(self.rfunc([tgt], [tgt + 1], + size=size, dtype=dt), tgt) + assert_equal(self.rfunc( + [tgt] * size, [tgt + 1] * size, dtype=dt), tgt) + assert_equal(self.rfunc( + [tgt] * size, [tgt + 1] * size, size=size, dtype=dt), tgt) + + def test_full_range(self, endpoint): + # Test for ticket #1690 + + for dt in self.itype: + lbnd = 0 if dt is bool else np.iinfo(dt).min + ubnd = 2 if dt is bool else np.iinfo(dt).max + 1 + ubnd = ubnd - 1 if endpoint else ubnd + + try: + self.rfunc(lbnd, ubnd, endpoint=endpoint, dtype=dt) + except Exception as e: + raise AssertionError("No error should have been raised, " + "but one was with the following " + "message:\n\n%s" % str(e)) + + def test_full_range_array(self, endpoint): + # Test for ticket #1690 + + for dt in self.itype: + lbnd = 0 if dt is bool else np.iinfo(dt).min + ubnd = 2 if dt is bool else np.iinfo(dt).max + 1 + ubnd = ubnd - 1 if endpoint else ubnd + + try: + self.rfunc([lbnd] * 2, [ubnd], endpoint=endpoint, dtype=dt) + except Exception as e: + raise AssertionError("No error should have been raised, " + "but one was with the following " + "message:\n\n%s" % str(e)) + + def test_in_bounds_fuzz(self, endpoint): + # Don't use fixed seed + random = Generator(MT19937()) + + for dt in self.itype[1:]: + for ubnd in [4, 8, 16]: + vals = self.rfunc(2, ubnd - endpoint, size=2 ** 16, + endpoint=endpoint, dtype=dt) + assert_(vals.max() < ubnd) + assert_(vals.min() >= 2) + + vals = self.rfunc(0, 2 - endpoint, size=2 ** 16, endpoint=endpoint, + dtype=bool) + assert_(vals.max() < 2) + assert_(vals.min() >= 0) + + def test_scalar_array_equiv(self, endpoint): + for dt in self.itype: + lbnd = 0 if dt is bool else np.iinfo(dt).min + ubnd = 2 if dt is bool else np.iinfo(dt).max + 1 + ubnd = ubnd - 1 if endpoint else ubnd + + size = 1000 + random = Generator(MT19937(1234)) + scalar = random.integers(lbnd, ubnd, size=size, endpoint=endpoint, + dtype=dt) + + random = Generator(MT19937(1234)) + scalar_array = random.integers([lbnd], [ubnd], size=size, + endpoint=endpoint, dtype=dt) + + random = Generator(MT19937(1234)) + array = random.integers([lbnd] * size, [ubnd] * + size, size=size, endpoint=endpoint, dtype=dt) + assert_array_equal(scalar, scalar_array) + assert_array_equal(scalar, array) + + def test_repeatability(self, endpoint): + # We use a sha256 hash of generated sequences of 1000 samples + # in the range [0, 6) for all but bool, where the range + # is [0, 2). Hashes are for little endian numbers. + tgt = {'bool': '053594a9b82d656f967c54869bc6970aa0358cf94ad469c81478459c6a90eee3', # noqa: E501 + 'int16': '54de9072b6ee9ff7f20b58329556a46a447a8a29d67db51201bf88baa6e4e5d4', # noqa: E501 + 'int32': 'd3a0d5efb04542b25ac712e50d21f39ac30f312a5052e9bbb1ad3baa791ac84b', # noqa: E501 + 'int64': '14e224389ac4580bfbdccb5697d6190b496f91227cf67df60989de3d546389b1', # noqa: E501 + 'int8': '0e203226ff3fbbd1580f15da4621e5f7164d0d8d6b51696dd42d004ece2cbec1', # noqa: E501 + 'uint16': '54de9072b6ee9ff7f20b58329556a46a447a8a29d67db51201bf88baa6e4e5d4', # noqa: E501 + 'uint32': 'd3a0d5efb04542b25ac712e50d21f39ac30f312a5052e9bbb1ad3baa791ac84b', # noqa: E501 + 'uint64': '14e224389ac4580bfbdccb5697d6190b496f91227cf67df60989de3d546389b1', # noqa: E501 + 'uint8': '0e203226ff3fbbd1580f15da4621e5f7164d0d8d6b51696dd42d004ece2cbec1'} # noqa: E501 + + for dt in self.itype[1:]: + random = Generator(MT19937(1234)) + + # view as little endian for hash + if sys.byteorder == 'little': + val = random.integers(0, 6 - endpoint, size=1000, endpoint=endpoint, + dtype=dt) + else: + val = random.integers(0, 6 - endpoint, size=1000, endpoint=endpoint, + dtype=dt).byteswap() + + res = hashlib.sha256(val).hexdigest() + assert_(tgt[np.dtype(dt).name] == res) + + # bools do not depend on endianness + random = Generator(MT19937(1234)) + val = random.integers(0, 2 - endpoint, size=1000, endpoint=endpoint, + dtype=bool).view(np.int8) + res = hashlib.sha256(val).hexdigest() + assert_(tgt[np.dtype(bool).name] == res) + + def test_repeatability_broadcasting(self, endpoint): + for dt in self.itype: + lbnd = 0 if dt in (bool, np.bool) else np.iinfo(dt).min + ubnd = 2 if dt in (bool, np.bool) else np.iinfo(dt).max + 1 + ubnd = ubnd - 1 if endpoint else ubnd + + # view as little endian for hash + random = Generator(MT19937(1234)) + val = random.integers(lbnd, ubnd, size=1000, endpoint=endpoint, + dtype=dt) + + random = Generator(MT19937(1234)) + val_bc = random.integers([lbnd] * 1000, ubnd, endpoint=endpoint, + dtype=dt) + + assert_array_equal(val, val_bc) + + random = Generator(MT19937(1234)) + val_bc = random.integers([lbnd] * 1000, [ubnd] * 1000, + endpoint=endpoint, dtype=dt) + + assert_array_equal(val, val_bc) + + @pytest.mark.parametrize( + 'bound, expected', + [(2**32 - 1, np.array([517043486, 1364798665, 1733884389, 1353720612, + 3769704066, 1170797179, 4108474671])), + (2**32, np.array([517043487, 1364798666, 1733884390, 1353720613, + 3769704067, 1170797180, 4108474672])), + (2**32 + 1, np.array([517043487, 1733884390, 3769704068, 4108474673, + 1831631863, 1215661561, 3869512430]))] + ) + def test_repeatability_32bit_boundary(self, bound, expected): + for size in [None, len(expected)]: + random = Generator(MT19937(1234)) + x = random.integers(bound, size=size) + assert_equal(x, expected if size is not None else expected[0]) + + def test_repeatability_32bit_boundary_broadcasting(self): + desired = np.array([[[1622936284, 3620788691, 1659384060], + [1417365545, 760222891, 1909653332], + [3788118662, 660249498, 4092002593]], + [[3625610153, 2979601262, 3844162757], + [ 685800658, 120261497, 2694012896], + [1207779440, 1586594375, 3854335050]], + [[3004074748, 2310761796, 3012642217], + [2067714190, 2786677879, 1363865881], + [ 791663441, 1867303284, 2169727960]], + [[1939603804, 1250951100, 298950036], + [1040128489, 3791912209, 3317053765], + [3155528714, 61360675, 2305155588]], + [[ 817688762, 1335621943, 3288952434], + [1770890872, 1102951817, 1957607470], + [3099996017, 798043451, 48334215]]]) + for size in [None, (5, 3, 3)]: + random = Generator(MT19937(12345)) + x = random.integers([[-1], [0], [1]], + [2**32 - 1, 2**32, 2**32 + 1], + size=size) + assert_array_equal(x, desired if size is not None else desired[0]) + + def test_int64_uint64_broadcast_exceptions(self, endpoint): + configs = {np.uint64: ((0, 2**65), (-1, 2**62), (10, 9), (0, 0)), + np.int64: ((0, 2**64), (-(2**64), 2**62), (10, 9), (0, 0), + (-2**63 - 1, -2**63 - 1))} + for dtype in configs: + for config in configs[dtype]: + low, high = config + high = high - endpoint + low_a = np.array([[low] * 10]) + high_a = np.array([high] * 10) + assert_raises(ValueError, random.integers, low, high, + endpoint=endpoint, dtype=dtype) + assert_raises(ValueError, random.integers, low_a, high, + endpoint=endpoint, dtype=dtype) + assert_raises(ValueError, random.integers, low, high_a, + endpoint=endpoint, dtype=dtype) + assert_raises(ValueError, random.integers, low_a, high_a, + endpoint=endpoint, dtype=dtype) + + low_o = np.array([[low] * 10], dtype=object) + high_o = np.array([high] * 10, dtype=object) + assert_raises(ValueError, random.integers, low_o, high, + endpoint=endpoint, dtype=dtype) + assert_raises(ValueError, random.integers, low, high_o, + endpoint=endpoint, dtype=dtype) + assert_raises(ValueError, random.integers, low_o, high_o, + endpoint=endpoint, dtype=dtype) + + def test_int64_uint64_corner_case(self, endpoint): + # When stored in Numpy arrays, `lbnd` is casted + # as np.int64, and `ubnd` is casted as np.uint64. + # Checking whether `lbnd` >= `ubnd` used to be + # done solely via direct comparison, which is incorrect + # because when Numpy tries to compare both numbers, + # it casts both to np.float64 because there is + # no integer superset of np.int64 and np.uint64. However, + # `ubnd` is too large to be represented in np.float64, + # causing it be round down to np.iinfo(np.int64).max, + # leading to a ValueError because `lbnd` now equals + # the new `ubnd`. + + dt = np.int64 + tgt = np.iinfo(np.int64).max + lbnd = np.int64(np.iinfo(np.int64).max) + ubnd = np.uint64(np.iinfo(np.int64).max + 1 - endpoint) + + # None of these function calls should + # generate a ValueError now. + actual = random.integers(lbnd, ubnd, endpoint=endpoint, dtype=dt) + assert_equal(actual, tgt) + + def test_respect_dtype_singleton(self, endpoint): + # See gh-7203 + for dt in self.itype: + lbnd = 0 if dt is bool else np.iinfo(dt).min + ubnd = 2 if dt is bool else np.iinfo(dt).max + 1 + ubnd = ubnd - 1 if endpoint else ubnd + dt = np.bool if dt is bool else dt + + sample = self.rfunc(lbnd, ubnd, endpoint=endpoint, dtype=dt) + assert_equal(sample.dtype, dt) + + for dt in (bool, int): + lbnd = 0 if dt is bool else np.iinfo(dt).min + ubnd = 2 if dt is bool else np.iinfo(dt).max + 1 + ubnd = ubnd - 1 if endpoint else ubnd + + # gh-7284: Ensure that we get Python data types + sample = self.rfunc(lbnd, ubnd, endpoint=endpoint, dtype=dt) + assert not hasattr(sample, 'dtype') + assert_equal(type(sample), dt) + + def test_respect_dtype_array(self, endpoint): + # See gh-7203 + for dt in self.itype: + lbnd = 0 if dt is bool else np.iinfo(dt).min + ubnd = 2 if dt is bool else np.iinfo(dt).max + 1 + ubnd = ubnd - 1 if endpoint else ubnd + dt = np.bool if dt is bool else dt + + sample = self.rfunc([lbnd], [ubnd], endpoint=endpoint, dtype=dt) + assert_equal(sample.dtype, dt) + sample = self.rfunc([lbnd] * 2, [ubnd] * 2, endpoint=endpoint, + dtype=dt) + assert_equal(sample.dtype, dt) + + def test_zero_size(self, endpoint): + # See gh-7203 + for dt in self.itype: + sample = self.rfunc(0, 0, (3, 0, 4), endpoint=endpoint, dtype=dt) + assert sample.shape == (3, 0, 4) + assert sample.dtype == dt + assert self.rfunc(0, -10, 0, endpoint=endpoint, + dtype=dt).shape == (0,) + assert_equal(random.integers(0, 0, size=(3, 0, 4)).shape, + (3, 0, 4)) + assert_equal(random.integers(0, -10, size=0).shape, (0,)) + assert_equal(random.integers(10, 10, size=0).shape, (0,)) + + def test_error_byteorder(self): + other_byteord_dt = 'i4' + with pytest.raises(ValueError): + random.integers(0, 200, size=10, dtype=other_byteord_dt) + + # chi2max is the maximum acceptable chi-squared value. + @pytest.mark.slow + @pytest.mark.parametrize('sample_size,high,dtype,chi2max', + [(5000000, 5, np.int8, 125.0), # p-value ~4.6e-25 + (5000000, 7, np.uint8, 150.0), # p-value ~7.7e-30 + (10000000, 2500, np.int16, 3300.0), # p-value ~3.0e-25 + (50000000, 5000, np.uint16, 6500.0), # p-value ~3.5e-25 + ]) + def test_integers_small_dtype_chisquared(self, sample_size, high, + dtype, chi2max): + # Regression test for gh-14774. + samples = random.integers(high, size=sample_size, dtype=dtype) + + values, counts = np.unique(samples, return_counts=True) + expected = sample_size / high + chi2 = ((counts - expected)**2 / expected).sum() + assert chi2 < chi2max + + +class TestRandomDist: + # Make sure the random distribution returns the correct value for a + # given seed + seed = 1234567890 + + def test_integers(self): + random = Generator(MT19937(self.seed)) + actual = random.integers(-99, 99, size=(3, 2)) + desired = np.array([[-80, -56], [41, 37], [-83, -16]]) + assert_array_equal(actual, desired) + + def test_integers_masked(self): + # Test masked rejection sampling algorithm to generate array of + # uint32 in an interval. + random = Generator(MT19937(self.seed)) + actual = random.integers(0, 99, size=(3, 2), dtype=np.uint32) + desired = np.array([[9, 21], [70, 68], [8, 41]], dtype=np.uint32) + assert_array_equal(actual, desired) + + def test_integers_closed(self): + random = Generator(MT19937(self.seed)) + actual = random.integers(-99, 99, size=(3, 2), endpoint=True) + desired = np.array([[-80, -56], [41, 38], [-83, -15]]) + assert_array_equal(actual, desired) + + def test_integers_max_int(self): + # Tests whether integers with closed=True can generate the + # maximum allowed Python int that can be converted + # into a C long. Previous implementations of this + # method have thrown an OverflowError when attempting + # to generate this integer. + actual = random.integers(np.iinfo('l').max, np.iinfo('l').max, + endpoint=True) + + desired = np.iinfo('l').max + assert_equal(actual, desired) + + def test_random(self): + random = Generator(MT19937(self.seed)) + actual = random.random((3, 2)) + desired = np.array([[0.096999199829214, 0.707517457682192], + [0.084364834598269, 0.767731206553125], + [0.665069021359413, 0.715487190596693]]) + assert_array_almost_equal(actual, desired, decimal=15) + + random = Generator(MT19937(self.seed)) + actual = random.random() + assert_array_almost_equal(actual, desired[0, 0], decimal=15) + + def test_random_float(self): + random = Generator(MT19937(self.seed)) + actual = random.random((3, 2)) + desired = np.array([[0.0969992 , 0.70751746], # noqa: E203 + [0.08436483, 0.76773121], + [0.66506902, 0.71548719]]) + assert_array_almost_equal(actual, desired, decimal=7) + + def test_random_float_scalar(self): + random = Generator(MT19937(self.seed)) + actual = random.random(dtype=np.float32) + desired = 0.0969992 + assert_array_almost_equal(actual, desired, decimal=7) + + @pytest.mark.parametrize('dtype, uint_view_type', + [(np.float32, np.uint32), + (np.float64, np.uint64)]) + def test_random_distribution_of_lsb(self, dtype, uint_view_type): + random = Generator(MT19937(self.seed)) + sample = random.random(100000, dtype=dtype) + num_ones_in_lsb = np.count_nonzero(sample.view(uint_view_type) & 1) + # The probability of a 1 in the least significant bit is 0.25. + # With a sample size of 100000, the probability that num_ones_in_lsb + # is outside the following range is less than 5e-11. + assert 24100 < num_ones_in_lsb < 25900 + + def test_random_unsupported_type(self): + assert_raises(TypeError, random.random, dtype='int32') + + def test_choice_uniform_replace(self): + random = Generator(MT19937(self.seed)) + actual = random.choice(4, 4) + desired = np.array([0, 0, 2, 2], dtype=np.int64) + assert_array_equal(actual, desired) + + def test_choice_nonuniform_replace(self): + random = Generator(MT19937(self.seed)) + actual = random.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1]) + desired = np.array([0, 1, 0, 1], dtype=np.int64) + assert_array_equal(actual, desired) + + def test_choice_uniform_noreplace(self): + random = Generator(MT19937(self.seed)) + actual = random.choice(4, 3, replace=False) + desired = np.array([2, 0, 3], dtype=np.int64) + assert_array_equal(actual, desired) + actual = random.choice(4, 4, replace=False, shuffle=False) + desired = np.arange(4, dtype=np.int64) + assert_array_equal(actual, desired) + + def test_choice_nonuniform_noreplace(self): + random = Generator(MT19937(self.seed)) + actual = random.choice(4, 3, replace=False, p=[0.1, 0.3, 0.5, 0.1]) + desired = np.array([0, 2, 3], dtype=np.int64) + assert_array_equal(actual, desired) + + def test_choice_noninteger(self): + random = Generator(MT19937(self.seed)) + actual = random.choice(['a', 'b', 'c', 'd'], 4) + desired = np.array(['a', 'a', 'c', 'c']) + assert_array_equal(actual, desired) + + def test_choice_multidimensional_default_axis(self): + random = Generator(MT19937(self.seed)) + actual = random.choice([[0, 1], [2, 3], [4, 5], [6, 7]], 3) + desired = np.array([[0, 1], [0, 1], [4, 5]]) + assert_array_equal(actual, desired) + + def test_choice_multidimensional_custom_axis(self): + random = Generator(MT19937(self.seed)) + actual = random.choice([[0, 1], [2, 3], [4, 5], [6, 7]], 1, axis=1) + desired = np.array([[0], [2], [4], [6]]) + assert_array_equal(actual, desired) + + def test_choice_exceptions(self): + sample = random.choice + assert_raises(ValueError, sample, -1, 3) + assert_raises(ValueError, sample, 3., 3) + assert_raises(ValueError, sample, [], 3) + assert_raises(ValueError, sample, [1, 2, 3, 4], 3, + p=[[0.25, 0.25], [0.25, 0.25]]) + assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4, 0.2]) + assert_raises(ValueError, sample, [1, 2], 3, p=[1.1, -0.1]) + assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4]) + assert_raises(ValueError, sample, [1, 2, 3], 4, replace=False) + # gh-13087 + assert_raises(ValueError, sample, [1, 2, 3], -2, replace=False) + assert_raises(ValueError, sample, [1, 2, 3], (-1,), replace=False) + assert_raises(ValueError, sample, [1, 2, 3], (-1, 1), replace=False) + assert_raises(ValueError, sample, [1, 2, 3], 2, + replace=False, p=[1, 0, 0]) + + def test_choice_return_shape(self): + p = [0.1, 0.9] + # Check scalar + assert_(np.isscalar(random.choice(2, replace=True))) + assert_(np.isscalar(random.choice(2, replace=False))) + assert_(np.isscalar(random.choice(2, replace=True, p=p))) + assert_(np.isscalar(random.choice(2, replace=False, p=p))) + assert_(np.isscalar(random.choice([1, 2], replace=True))) + assert_(random.choice([None], replace=True) is None) + a = np.array([1, 2]) + arr = np.empty(1, dtype=object) + arr[0] = a + assert_(random.choice(arr, replace=True) is a) + + # Check 0-d array + s = () + assert_(not np.isscalar(random.choice(2, s, replace=True))) + assert_(not np.isscalar(random.choice(2, s, replace=False))) + assert_(not np.isscalar(random.choice(2, s, replace=True, p=p))) + assert_(not np.isscalar(random.choice(2, s, replace=False, p=p))) + assert_(not np.isscalar(random.choice([1, 2], s, replace=True))) + assert_(random.choice([None], s, replace=True).ndim == 0) + a = np.array([1, 2]) + arr = np.empty(1, dtype=object) + arr[0] = a + assert_(random.choice(arr, s, replace=True).item() is a) + + # Check multi dimensional array + s = (2, 3) + p = [0.1, 0.1, 0.1, 0.1, 0.4, 0.2] + assert_equal(random.choice(6, s, replace=True).shape, s) + assert_equal(random.choice(6, s, replace=False).shape, s) + assert_equal(random.choice(6, s, replace=True, p=p).shape, s) + assert_equal(random.choice(6, s, replace=False, p=p).shape, s) + assert_equal(random.choice(np.arange(6), s, replace=True).shape, s) + + # Check zero-size + assert_equal(random.integers(0, 0, size=(3, 0, 4)).shape, (3, 0, 4)) + assert_equal(random.integers(0, -10, size=0).shape, (0,)) + assert_equal(random.integers(10, 10, size=0).shape, (0,)) + assert_equal(random.choice(0, size=0).shape, (0,)) + assert_equal(random.choice([], size=(0,)).shape, (0,)) + assert_equal(random.choice(['a', 'b'], size=(3, 0, 4)).shape, + (3, 0, 4)) + assert_raises(ValueError, random.choice, [], 10) + + def test_choice_nan_probabilities(self): + a = np.array([42, 1, 2]) + p = [None, None, None] + assert_raises(ValueError, random.choice, a, p=p) + + def test_choice_p_non_contiguous(self): + p = np.ones(10) / 5 + p[1::2] = 3.0 + random = Generator(MT19937(self.seed)) + non_contig = random.choice(5, 3, p=p[::2]) + random = Generator(MT19937(self.seed)) + contig = random.choice(5, 3, p=np.ascontiguousarray(p[::2])) + assert_array_equal(non_contig, contig) + + def test_choice_return_type(self): + # gh 9867 + p = np.ones(4) / 4. + actual = random.choice(4, 2) + assert actual.dtype == np.int64 + actual = random.choice(4, 2, replace=False) + assert actual.dtype == np.int64 + actual = random.choice(4, 2, p=p) + assert actual.dtype == np.int64 + actual = random.choice(4, 2, p=p, replace=False) + assert actual.dtype == np.int64 + + def test_choice_large_sample(self): + choice_hash = '4266599d12bfcfb815213303432341c06b4349f5455890446578877bb322e222' + random = Generator(MT19937(self.seed)) + actual = random.choice(10000, 5000, replace=False) + if sys.byteorder != 'little': + actual = actual.byteswap() + res = hashlib.sha256(actual.view(np.int8)).hexdigest() + assert_(choice_hash == res) + + def test_choice_array_size_empty_tuple(self): + random = Generator(MT19937(self.seed)) + assert_array_equal(random.choice([1, 2, 3], size=()), np.array(1), + strict=True) + assert_array_equal(random.choice([[1, 2, 3]], size=()), [1, 2, 3]) + assert_array_equal(random.choice([[1]], size=()), [1], strict=True) + assert_array_equal(random.choice([[1]], size=(), axis=1), [1], + strict=True) + + def test_bytes(self): + random = Generator(MT19937(self.seed)) + actual = random.bytes(10) + desired = b'\x86\xf0\xd4\x18\xe1\x81\t8%\xdd' + assert_equal(actual, desired) + + def test_shuffle(self): + # Test lists, arrays (of various dtypes), and multidimensional versions + # of both, c-contiguous or not: + for conv in [lambda x: np.array([]), + lambda x: x, + lambda x: np.asarray(x).astype(np.int8), + lambda x: np.asarray(x).astype(np.float32), + lambda x: np.asarray(x).astype(np.complex64), + lambda x: np.asarray(x).astype(object), + lambda x: [(i, i) for i in x], + lambda x: np.asarray([[i, i] for i in x]), + lambda x: np.vstack([x, x]).T, + # gh-11442 + lambda x: (np.asarray([(i, i) for i in x], + [("a", int), ("b", int)]) + .view(np.recarray)), + # gh-4270 + lambda x: np.asarray([(i, i) for i in x], + [("a", object, (1,)), + ("b", np.int32, (1,))])]: + random = Generator(MT19937(self.seed)) + alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]) + random.shuffle(alist) + actual = alist + desired = conv([4, 1, 9, 8, 0, 5, 3, 6, 2, 7]) + assert_array_equal(actual, desired) + + def test_shuffle_custom_axis(self): + random = Generator(MT19937(self.seed)) + actual = np.arange(16).reshape((4, 4)) + random.shuffle(actual, axis=1) + desired = np.array([[ 0, 3, 1, 2], + [ 4, 7, 5, 6], + [ 8, 11, 9, 10], + [12, 15, 13, 14]]) + assert_array_equal(actual, desired) + random = Generator(MT19937(self.seed)) + actual = np.arange(16).reshape((4, 4)) + random.shuffle(actual, axis=-1) + assert_array_equal(actual, desired) + + def test_shuffle_custom_axis_empty(self): + random = Generator(MT19937(self.seed)) + desired = np.array([]).reshape((0, 6)) + for axis in (0, 1): + actual = np.array([]).reshape((0, 6)) + random.shuffle(actual, axis=axis) + assert_array_equal(actual, desired) + + def test_shuffle_axis_nonsquare(self): + y1 = np.arange(20).reshape(2, 10) + y2 = y1.copy() + random = Generator(MT19937(self.seed)) + random.shuffle(y1, axis=1) + random = Generator(MT19937(self.seed)) + random.shuffle(y2.T) + assert_array_equal(y1, y2) + + def test_shuffle_masked(self): + # gh-3263 + a = np.ma.masked_values(np.reshape(range(20), (5, 4)) % 3 - 1, -1) + b = np.ma.masked_values(np.arange(20) % 3 - 1, -1) + a_orig = a.copy() + b_orig = b.copy() + for i in range(50): + random.shuffle(a) + assert_equal( + sorted(a.data[~a.mask]), sorted(a_orig.data[~a_orig.mask])) + random.shuffle(b) + assert_equal( + sorted(b.data[~b.mask]), sorted(b_orig.data[~b_orig.mask])) + + def test_shuffle_exceptions(self): + random = Generator(MT19937(self.seed)) + arr = np.arange(10) + assert_raises(AxisError, random.shuffle, arr, 1) + arr = np.arange(9).reshape((3, 3)) + assert_raises(AxisError, random.shuffle, arr, 3) + assert_raises(TypeError, random.shuffle, arr, slice(1, 2, None)) + arr = [[1, 2, 3], [4, 5, 6]] + assert_raises(NotImplementedError, random.shuffle, arr, 1) + + arr = np.array(3) + assert_raises(TypeError, random.shuffle, arr) + arr = np.ones((3, 2)) + assert_raises(AxisError, random.shuffle, arr, 2) + + def test_shuffle_not_writeable(self): + random = Generator(MT19937(self.seed)) + a = np.zeros(5) + a.flags.writeable = False + with pytest.raises(ValueError, match='read-only'): + random.shuffle(a) + + def test_permutation(self): + random = Generator(MT19937(self.seed)) + alist = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0] + actual = random.permutation(alist) + desired = [4, 1, 9, 8, 0, 5, 3, 6, 2, 7] + assert_array_equal(actual, desired) + + random = Generator(MT19937(self.seed)) + arr_2d = np.atleast_2d([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]).T + actual = random.permutation(arr_2d) + assert_array_equal(actual, np.atleast_2d(desired).T) + + bad_x_str = "abcd" + assert_raises(AxisError, random.permutation, bad_x_str) + + bad_x_float = 1.2 + assert_raises(AxisError, random.permutation, bad_x_float) + + random = Generator(MT19937(self.seed)) + integer_val = 10 + desired = [3, 0, 8, 7, 9, 4, 2, 5, 1, 6] + + actual = random.permutation(integer_val) + assert_array_equal(actual, desired) + + def test_permutation_custom_axis(self): + a = np.arange(16).reshape((4, 4)) + desired = np.array([[ 0, 3, 1, 2], + [ 4, 7, 5, 6], + [ 8, 11, 9, 10], + [12, 15, 13, 14]]) + random = Generator(MT19937(self.seed)) + actual = random.permutation(a, axis=1) + assert_array_equal(actual, desired) + random = Generator(MT19937(self.seed)) + actual = random.permutation(a, axis=-1) + assert_array_equal(actual, desired) + + def test_permutation_exceptions(self): + random = Generator(MT19937(self.seed)) + arr = np.arange(10) + assert_raises(AxisError, random.permutation, arr, 1) + arr = np.arange(9).reshape((3, 3)) + assert_raises(AxisError, random.permutation, arr, 3) + assert_raises(TypeError, random.permutation, arr, slice(1, 2, None)) + + @pytest.mark.parametrize("dtype", [int, object]) + @pytest.mark.parametrize("axis, expected", + [(None, np.array([[3, 7, 0, 9, 10, 11], + [8, 4, 2, 5, 1, 6]])), + (0, np.array([[6, 1, 2, 9, 10, 11], + [0, 7, 8, 3, 4, 5]])), + (1, np.array([[ 5, 3, 4, 0, 2, 1], + [11, 9, 10, 6, 8, 7]]))]) + def test_permuted(self, dtype, axis, expected): + random = Generator(MT19937(self.seed)) + x = np.arange(12).reshape(2, 6).astype(dtype) + random.permuted(x, axis=axis, out=x) + assert_array_equal(x, expected) + + random = Generator(MT19937(self.seed)) + x = np.arange(12).reshape(2, 6).astype(dtype) + y = random.permuted(x, axis=axis) + assert y.dtype == dtype + assert_array_equal(y, expected) + + def test_permuted_with_strides(self): + random = Generator(MT19937(self.seed)) + x0 = np.arange(22).reshape(2, 11) + x1 = x0.copy() + x = x0[:, ::3] + y = random.permuted(x, axis=1, out=x) + expected = np.array([[0, 9, 3, 6], + [14, 20, 11, 17]]) + assert_array_equal(y, expected) + x1[:, ::3] = expected + # Verify that the original x0 was modified in-place as expected. + assert_array_equal(x1, x0) + + def test_permuted_empty(self): + y = random.permuted([]) + assert_array_equal(y, []) + + @pytest.mark.parametrize('outshape', [(2, 3), 5]) + def test_permuted_out_with_wrong_shape(self, outshape): + a = np.array([1, 2, 3]) + out = np.zeros(outshape, dtype=a.dtype) + with pytest.raises(ValueError, match='same shape'): + random.permuted(a, out=out) + + def test_permuted_out_with_wrong_type(self): + out = np.zeros((3, 5), dtype=np.int32) + x = np.ones((3, 5)) + with pytest.raises(TypeError, match='Cannot cast'): + random.permuted(x, axis=1, out=out) + + def test_permuted_not_writeable(self): + x = np.zeros((2, 5)) + x.flags.writeable = False + with pytest.raises(ValueError, match='read-only'): + random.permuted(x, axis=1, out=x) + + def test_beta(self): + random = Generator(MT19937(self.seed)) + actual = random.beta(.1, .9, size=(3, 2)) + desired = np.array( + [[1.083029353267698e-10, 2.449965303168024e-11], + [2.397085162969853e-02, 3.590779671820755e-08], + [2.830254190078299e-04, 1.744709918330393e-01]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_binomial(self): + random = Generator(MT19937(self.seed)) + actual = random.binomial(100.123, .456, size=(3, 2)) + desired = np.array([[42, 41], + [42, 48], + [44, 50]]) + assert_array_equal(actual, desired) + + random = Generator(MT19937(self.seed)) + actual = random.binomial(100.123, .456) + desired = 42 + assert_array_equal(actual, desired) + + def test_chisquare(self): + random = Generator(MT19937(self.seed)) + actual = random.chisquare(50, size=(3, 2)) + desired = np.array([[32.9850547060149, 39.0219480493301], + [56.2006134779419, 57.3474165711485], + [55.4243733880198, 55.4209797925213]]) + assert_array_almost_equal(actual, desired, decimal=13) + + def test_dirichlet(self): + random = Generator(MT19937(self.seed)) + alpha = np.array([51.72840233779265162, 39.74494232180943953]) + actual = random.dirichlet(alpha, size=(3, 2)) + desired = np.array([[[0.5439892869558927, 0.45601071304410745], + [0.5588917345860708, 0.4411082654139292 ]], # noqa: E202 + [[0.5632074165063435, 0.43679258349365657], + [0.54862581112627, 0.45137418887373015]], + [[0.49961831357047226, 0.5003816864295278 ], # noqa: E202 + [0.52374806183482, 0.47625193816517997]]]) + assert_array_almost_equal(actual, desired, decimal=15) + bad_alpha = np.array([5.4e-01, -1.0e-16]) + assert_raises(ValueError, random.dirichlet, bad_alpha) + + random = Generator(MT19937(self.seed)) + alpha = np.array([51.72840233779265162, 39.74494232180943953]) + actual = random.dirichlet(alpha) + assert_array_almost_equal(actual, desired[0, 0], decimal=15) + + def test_dirichlet_size(self): + # gh-3173 + p = np.array([51.72840233779265162, 39.74494232180943953]) + assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2)) + assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2)) + assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2)) + assert_equal(random.dirichlet(p, [2, 2]).shape, (2, 2, 2)) + assert_equal(random.dirichlet(p, (2, 2)).shape, (2, 2, 2)) + assert_equal(random.dirichlet(p, np.array((2, 2))).shape, (2, 2, 2)) + + assert_raises(TypeError, random.dirichlet, p, float(1)) + + def test_dirichlet_bad_alpha(self): + # gh-2089 + alpha = np.array([5.4e-01, -1.0e-16]) + assert_raises(ValueError, random.dirichlet, alpha) + + # gh-15876 + assert_raises(ValueError, random.dirichlet, [[5, 1]]) + assert_raises(ValueError, random.dirichlet, [[5], [1]]) + assert_raises(ValueError, random.dirichlet, [[[5], [1]], [[1], [5]]]) + assert_raises(ValueError, random.dirichlet, np.array([[5, 1], [1, 5]])) + + def test_dirichlet_alpha_non_contiguous(self): + a = np.array([51.72840233779265162, -1.0, 39.74494232180943953]) + alpha = a[::2] + random = Generator(MT19937(self.seed)) + non_contig = random.dirichlet(alpha, size=(3, 2)) + random = Generator(MT19937(self.seed)) + contig = random.dirichlet(np.ascontiguousarray(alpha), + size=(3, 2)) + assert_array_almost_equal(non_contig, contig) + + def test_dirichlet_small_alpha(self): + eps = 1.0e-9 # 1.0e-10 -> runtime x 10; 1e-11 -> runtime x 200, etc. + alpha = eps * np.array([1., 1.0e-3]) + random = Generator(MT19937(self.seed)) + actual = random.dirichlet(alpha, size=(3, 2)) + expected = np.array([ + [[1., 0.], + [1., 0.]], + [[1., 0.], + [1., 0.]], + [[1., 0.], + [1., 0.]] + ]) + assert_array_almost_equal(actual, expected, decimal=15) + + @pytest.mark.slow + @pytest.mark.thread_unsafe(reason="crashes with low memory") + def test_dirichlet_moderately_small_alpha(self): + # Use alpha.max() < 0.1 to trigger stick breaking code path + alpha = np.array([0.02, 0.04, 0.03]) + exact_mean = alpha / alpha.sum() + random = Generator(MT19937(self.seed)) + sample = random.dirichlet(alpha, size=20000000) + sample_mean = sample.mean(axis=0) + assert_allclose(sample_mean, exact_mean, rtol=1e-3) + + # This set of parameters includes inputs with alpha.max() >= 0.1 and + # alpha.max() < 0.1 to exercise both generation methods within the + # dirichlet code. + @pytest.mark.parametrize( + 'alpha', + [[5, 9, 0, 8], + [0.5, 0, 0, 0], + [1, 5, 0, 0, 1.5, 0, 0, 0], + [0.01, 0.03, 0, 0.005], + [1e-5, 0, 0, 0], + [0.002, 0.015, 0, 0, 0.04, 0, 0, 0], + [0.0], + [0, 0, 0]], + ) + def test_dirichlet_multiple_zeros_in_alpha(self, alpha): + alpha = np.array(alpha) + y = random.dirichlet(alpha) + assert_equal(y[alpha == 0], 0.0) + + def test_exponential(self): + random = Generator(MT19937(self.seed)) + actual = random.exponential(1.1234, size=(3, 2)) + desired = np.array([[0.098845481066258, 1.560752510746964], + [0.075730916041636, 1.769098974710777], + [1.488602544592235, 2.49684815275751 ]]) # noqa: E202 + assert_array_almost_equal(actual, desired, decimal=15) + + def test_exponential_0(self): + assert_equal(random.exponential(scale=0), 0) + assert_raises(ValueError, random.exponential, scale=-0.) + + def test_f(self): + random = Generator(MT19937(self.seed)) + actual = random.f(12, 77, size=(3, 2)) + desired = np.array([[0.461720027077085, 1.100441958872451], + [1.100337455217484, 0.91421736740018 ], # noqa: E202 + [0.500811891303113, 0.826802454552058]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_gamma(self): + random = Generator(MT19937(self.seed)) + actual = random.gamma(5, 3, size=(3, 2)) + desired = np.array([[ 5.03850858902096, 7.9228656732049 ], # noqa: E202 + [18.73983605132985, 19.57961681699238], + [18.17897755150825, 18.17653912505234]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_gamma_0(self): + assert_equal(random.gamma(shape=0, scale=0), 0) + assert_raises(ValueError, random.gamma, shape=-0., scale=-0.) + + def test_geometric(self): + random = Generator(MT19937(self.seed)) + actual = random.geometric(.123456789, size=(3, 2)) + desired = np.array([[1, 11], + [1, 12], + [11, 17]]) + assert_array_equal(actual, desired) + + def test_geometric_exceptions(self): + assert_raises(ValueError, random.geometric, 1.1) + assert_raises(ValueError, random.geometric, [1.1] * 10) + assert_raises(ValueError, random.geometric, -0.1) + assert_raises(ValueError, random.geometric, [-0.1] * 10) + with np.errstate(invalid='ignore'): + assert_raises(ValueError, random.geometric, np.nan) + assert_raises(ValueError, random.geometric, [np.nan] * 10) + + def test_gumbel(self): + random = Generator(MT19937(self.seed)) + actual = random.gumbel(loc=.123456789, scale=2.0, size=(3, 2)) + desired = np.array([[ 4.688397515056245, -0.289514845417841], + [ 4.981176042584683, -0.633224272589149], + [-0.055915275687488, -0.333962478257953]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_gumbel_0(self): + assert_equal(random.gumbel(scale=0), 0) + assert_raises(ValueError, random.gumbel, scale=-0.) + + def test_hypergeometric(self): + random = Generator(MT19937(self.seed)) + actual = random.hypergeometric(10.1, 5.5, 14, size=(3, 2)) + desired = np.array([[ 9, 9], + [ 9, 9], + [10, 9]]) + assert_array_equal(actual, desired) + + # Test nbad = 0 + actual = random.hypergeometric(5, 0, 3, size=4) + desired = np.array([3, 3, 3, 3]) + assert_array_equal(actual, desired) + + actual = random.hypergeometric(15, 0, 12, size=4) + desired = np.array([12, 12, 12, 12]) + assert_array_equal(actual, desired) + + # Test ngood = 0 + actual = random.hypergeometric(0, 5, 3, size=4) + desired = np.array([0, 0, 0, 0]) + assert_array_equal(actual, desired) + + actual = random.hypergeometric(0, 15, 12, size=4) + desired = np.array([0, 0, 0, 0]) + assert_array_equal(actual, desired) + + def test_laplace(self): + random = Generator(MT19937(self.seed)) + actual = random.laplace(loc=.123456789, scale=2.0, size=(3, 2)) + desired = np.array([[-3.156353949272393, 1.195863024830054], + [-3.435458081645966, 1.656882398925444], + [ 0.924824032467446, 1.251116432209336]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_laplace_0(self): + assert_equal(random.laplace(scale=0), 0) + assert_raises(ValueError, random.laplace, scale=-0.) + + def test_logistic(self): + random = Generator(MT19937(self.seed)) + actual = random.logistic(loc=.123456789, scale=2.0, size=(3, 2)) + desired = np.array([[-4.338584631510999, 1.890171436749954], + [-4.64547787337966 , 2.514545562919217], # noqa: E203 + [ 1.495389489198666, 1.967827627577474]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_lognormal(self): + random = Generator(MT19937(self.seed)) + actual = random.lognormal(mean=.123456789, sigma=2.0, size=(3, 2)) + desired = np.array([[ 0.0268252166335, 13.9534486483053], + [ 0.1204014788936, 2.2422077497792], + [ 4.2484199496128, 12.0093343977523]]) + assert_array_almost_equal(actual, desired, decimal=13) + + def test_lognormal_0(self): + assert_equal(random.lognormal(sigma=0), 1) + assert_raises(ValueError, random.lognormal, sigma=-0.) + + def test_logseries(self): + random = Generator(MT19937(self.seed)) + actual = random.logseries(p=.923456789, size=(3, 2)) + desired = np.array([[14, 17], + [3, 18], + [5, 1]]) + assert_array_equal(actual, desired) + + def test_logseries_zero(self): + random = Generator(MT19937(self.seed)) + assert random.logseries(0) == 1 + + @pytest.mark.parametrize("value", [np.nextafter(0., -1), 1., np.nan, 5.]) + def test_logseries_exceptions(self, value): + random = Generator(MT19937(self.seed)) + with np.errstate(invalid="ignore"): + with pytest.raises(ValueError): + random.logseries(value) + with pytest.raises(ValueError): + # contiguous path: + random.logseries(np.array([value] * 10)) + with pytest.raises(ValueError): + # non-contiguous path: + random.logseries(np.array([value] * 10)[::2]) + + def test_multinomial(self): + random = Generator(MT19937(self.seed)) + actual = random.multinomial(20, [1 / 6.] * 6, size=(3, 2)) + desired = np.array([[[1, 5, 1, 6, 4, 3], + [4, 2, 6, 2, 4, 2]], + [[5, 3, 2, 6, 3, 1], + [4, 4, 0, 2, 3, 7]], + [[6, 3, 1, 5, 3, 2], + [5, 5, 3, 1, 2, 4]]]) + assert_array_equal(actual, desired) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + @pytest.mark.parametrize("method", ["svd", "eigh", "cholesky"]) + def test_multivariate_normal(self, method): + random = Generator(MT19937(self.seed)) + mean = (.123456789, 10) + cov = [[1, 0], [0, 1]] + size = (3, 2) + actual = random.multivariate_normal(mean, cov, size, method=method) + desired = np.array([[[-1.747478062846581, 11.25613495182354 ], # noqa: E202 + [-0.9967333370066214, 10.342002097029821]], + [[ 0.7850019631242964, 11.181113712443013], + [ 0.8901349653255224, 8.873825399642492]], + [[ 0.7130260107430003, 9.551628690083056], + [ 0.7127098726541128, 11.991709234143173]]]) + + assert_array_almost_equal(actual, desired, decimal=15) + + # Check for default size, was raising deprecation warning + actual = random.multivariate_normal(mean, cov, method=method) + desired = np.array([0.233278563284287, 9.424140804347195]) + assert_array_almost_equal(actual, desired, decimal=15) + # Check that non symmetric covariance input raises exception when + # check_valid='raises' if using default svd method. + mean = [0, 0] + cov = [[1, 2], [1, 2]] + assert_raises(ValueError, random.multivariate_normal, mean, cov, + check_valid='raise') + + # Check that non positive-semidefinite covariance warns with + # RuntimeWarning + cov = [[1, 2], [2, 1]] + pytest.warns(RuntimeWarning, random.multivariate_normal, mean, cov) + pytest.warns(RuntimeWarning, random.multivariate_normal, mean, cov, + method='eigh') + assert_raises(LinAlgError, random.multivariate_normal, mean, cov, + method='cholesky') + + # and that it doesn't warn with RuntimeWarning check_valid='ignore' + assert_no_warnings(random.multivariate_normal, mean, cov, + check_valid='ignore') + + # and that it raises with RuntimeWarning check_valid='raises' + assert_raises(ValueError, random.multivariate_normal, mean, cov, + check_valid='raise') + assert_raises(ValueError, random.multivariate_normal, mean, cov, + check_valid='raise', method='eigh') + + # check degenerate samples from singular covariance matrix + cov = [[1, 1], [1, 1]] + if method in ('svd', 'eigh'): + samples = random.multivariate_normal(mean, cov, size=(3, 2), + method=method) + assert_array_almost_equal(samples[..., 0], samples[..., 1], + decimal=6) + else: + assert_raises(LinAlgError, random.multivariate_normal, mean, cov, + method='cholesky') + + cov = np.array([[1, 0.1], [0.1, 1]], dtype=np.float32) + with warnings.catch_warnings(): + warnings.simplefilter("error") + random.multivariate_normal(mean, cov, method=method) + + mu = np.zeros(2) + cov = np.eye(2) + assert_raises(ValueError, random.multivariate_normal, mean, cov, + check_valid='other') + assert_raises(ValueError, random.multivariate_normal, + np.zeros((2, 1, 1)), cov) + assert_raises(ValueError, random.multivariate_normal, + mu, np.empty((3, 2))) + assert_raises(ValueError, random.multivariate_normal, + mu, np.eye(3)) + + @pytest.mark.parametrize('mean, cov', [([0], [[1 + 1j]]), ([0j], [[1]])]) + def test_multivariate_normal_disallow_complex(self, mean, cov): + random = Generator(MT19937(self.seed)) + with pytest.raises(TypeError, match="must not be complex"): + random.multivariate_normal(mean, cov) + + @pytest.mark.parametrize("method", ["svd", "eigh", "cholesky"]) + def test_multivariate_normal_basic_stats(self, method): + random = Generator(MT19937(self.seed)) + n_s = 1000 + mean = np.array([1, 2]) + cov = np.array([[2, 1], [1, 2]]) + s = random.multivariate_normal(mean, cov, size=(n_s,), method=method) + s_center = s - mean + cov_emp = (s_center.T @ s_center) / (n_s - 1) + # these are pretty loose and are only designed to detect major errors + assert np.all(np.abs(s_center.mean(-2)) < 0.1) + assert np.all(np.abs(cov_emp - cov) < 0.2) + + def test_negative_binomial(self): + random = Generator(MT19937(self.seed)) + actual = random.negative_binomial(n=100, p=.12345, size=(3, 2)) + desired = np.array([[543, 727], + [775, 760], + [600, 674]]) + assert_array_equal(actual, desired) + + def test_negative_binomial_exceptions(self): + with np.errstate(invalid='ignore'): + assert_raises(ValueError, random.negative_binomial, 100, np.nan) + assert_raises(ValueError, random.negative_binomial, 100, + [np.nan] * 10) + + def test_negative_binomial_p0_exception(self): + # Verify that p=0 raises an exception. + with assert_raises(ValueError): + x = random.negative_binomial(1, 0) + + def test_negative_binomial_invalid_p_n_combination(self): + # Verify that values of p and n that would result in an overflow + # or infinite loop raise an exception. + with np.errstate(invalid='ignore'): + assert_raises(ValueError, random.negative_binomial, 2**62, 0.1) + assert_raises(ValueError, random.negative_binomial, [2**62], [0.1]) + + def test_noncentral_chisquare(self): + random = Generator(MT19937(self.seed)) + actual = random.noncentral_chisquare(df=5, nonc=5, size=(3, 2)) + desired = np.array([[ 1.70561552362133, 15.97378184942111], + [13.71483425173724, 20.17859633310629], + [11.3615477156643 , 3.67891108738029]]) # noqa: E203 + assert_array_almost_equal(actual, desired, decimal=14) + + actual = random.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2)) + desired = np.array([[9.41427665607629e-04, 1.70473157518850e-04], + [1.14554372041263e+00, 1.38187755933435e-03], + [1.90659181905387e+00, 1.21772577941822e+00]]) + assert_array_almost_equal(actual, desired, decimal=14) + + random = Generator(MT19937(self.seed)) + actual = random.noncentral_chisquare(df=5, nonc=0, size=(3, 2)) + desired = np.array([[0.82947954590419, 1.80139670767078], + [6.58720057417794, 7.00491463609814], + [6.31101879073157, 6.30982307753005]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_noncentral_f(self): + random = Generator(MT19937(self.seed)) + actual = random.noncentral_f(dfnum=5, dfden=2, nonc=1, + size=(3, 2)) + desired = np.array([[0.060310671139 , 0.23866058175939], # noqa: E203 + [0.86860246709073, 0.2668510459738 ], # noqa: E202 + [0.23375780078364, 1.88922102885943]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_noncentral_f_nan(self): + random = Generator(MT19937(self.seed)) + actual = random.noncentral_f(dfnum=5, dfden=2, nonc=np.nan) + assert np.isnan(actual) + + def test_normal(self): + random = Generator(MT19937(self.seed)) + actual = random.normal(loc=.123456789, scale=2.0, size=(3, 2)) + desired = np.array([[-3.618412914693162, 2.635726692647081], + [-2.116923463013243, 0.807460983059643], + [ 1.446547137248593, 2.485684213886024]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_normal_0(self): + assert_equal(random.normal(scale=0), 0) + assert_raises(ValueError, random.normal, scale=-0.) + + def test_pareto(self): + random = Generator(MT19937(self.seed)) + actual = random.pareto(a=.123456789, size=(3, 2)) + desired = np.array([[1.0394926776069018e+00, 7.7142534343505773e+04], + [7.2640150889064703e-01, 3.4650454783825594e+05], + [4.5852344481994740e+04, 6.5851383009539105e+07]]) + # For some reason on 32-bit x86 Ubuntu 12.10 the [1, 0] entry in this + # matrix differs by 24 nulps. Discussion: + # https://mail.python.org/pipermail/numpy-discussion/2012-September/063801.html + # Consensus is that this is probably some gcc quirk that affects + # rounding but not in any important way, so we just use a looser + # tolerance on this test: + np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=30) + + def test_poisson(self): + random = Generator(MT19937(self.seed)) + actual = random.poisson(lam=.123456789, size=(3, 2)) + desired = np.array([[0, 0], + [0, 0], + [0, 0]]) + assert_array_equal(actual, desired) + + def test_poisson_exceptions(self): + lambig = np.iinfo('int64').max + lamneg = -1 + assert_raises(ValueError, random.poisson, lamneg) + assert_raises(ValueError, random.poisson, [lamneg] * 10) + assert_raises(ValueError, random.poisson, lambig) + assert_raises(ValueError, random.poisson, [lambig] * 10) + with np.errstate(invalid='ignore'): + assert_raises(ValueError, random.poisson, np.nan) + assert_raises(ValueError, random.poisson, [np.nan] * 10) + + def test_power(self): + random = Generator(MT19937(self.seed)) + actual = random.power(a=.123456789, size=(3, 2)) + desired = np.array([[1.977857368842754e-09, 9.806792196620341e-02], + [2.482442984543471e-10, 1.527108843266079e-01], + [8.188283434244285e-02, 3.950547209346948e-01]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_rayleigh(self): + random = Generator(MT19937(self.seed)) + actual = random.rayleigh(scale=10, size=(3, 2)) + desired = np.array([[4.19494429102666, 16.66920198906598], + [3.67184544902662, 17.74695521962917], + [16.27935397855501, 21.08355560691792]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_rayleigh_0(self): + assert_equal(random.rayleigh(scale=0), 0) + assert_raises(ValueError, random.rayleigh, scale=-0.) + + def test_standard_cauchy(self): + random = Generator(MT19937(self.seed)) + actual = random.standard_cauchy(size=(3, 2)) + desired = np.array([[-1.489437778266206, -3.275389641569784], + [ 0.560102864910406, -0.680780916282552], + [-1.314912905226277, 0.295852965660225]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_standard_exponential(self): + random = Generator(MT19937(self.seed)) + actual = random.standard_exponential(size=(3, 2), method='inv') + desired = np.array([[0.102031839440643, 1.229350298474972], + [0.088137284693098, 1.459859985522667], + [1.093830802293668, 1.256977002164613]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_standard_expoential_type_error(self): + assert_raises(TypeError, random.standard_exponential, dtype=np.int32) + + def test_standard_gamma(self): + random = Generator(MT19937(self.seed)) + actual = random.standard_gamma(shape=3, size=(3, 2)) + desired = np.array([[0.62970724056362, 1.22379851271008], + [3.899412530884 , 4.12479964250139], # noqa: E203 + [3.74994102464584, 3.74929307690815]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_standard_gammma_scalar_float(self): + random = Generator(MT19937(self.seed)) + actual = random.standard_gamma(3, dtype=np.float32) + desired = 2.9242148399353027 + assert_array_almost_equal(actual, desired, decimal=6) + + def test_standard_gamma_float(self): + random = Generator(MT19937(self.seed)) + actual = random.standard_gamma(shape=3, size=(3, 2)) + desired = np.array([[0.62971, 1.2238], + [3.89941, 4.1248], + [3.74994, 3.74929]]) + assert_array_almost_equal(actual, desired, decimal=5) + + def test_standard_gammma_float_out(self): + actual = np.zeros((3, 2), dtype=np.float32) + random = Generator(MT19937(self.seed)) + random.standard_gamma(10.0, out=actual, dtype=np.float32) + desired = np.array([[10.14987, 7.87012], + [ 9.46284, 12.56832], + [13.82495, 7.81533]], dtype=np.float32) + assert_array_almost_equal(actual, desired, decimal=5) + + random = Generator(MT19937(self.seed)) + random.standard_gamma(10.0, out=actual, size=(3, 2), dtype=np.float32) + assert_array_almost_equal(actual, desired, decimal=5) + + def test_standard_gamma_unknown_type(self): + assert_raises(TypeError, random.standard_gamma, 1., + dtype='int32') + + def test_out_size_mismatch(self): + out = np.zeros(10) + assert_raises(ValueError, random.standard_gamma, 10.0, size=20, + out=out) + assert_raises(ValueError, random.standard_gamma, 10.0, size=(10, 1), + out=out) + + def test_standard_gamma_0(self): + assert_equal(random.standard_gamma(shape=0), 0) + assert_raises(ValueError, random.standard_gamma, shape=-0.) + + def test_standard_normal(self): + random = Generator(MT19937(self.seed)) + actual = random.standard_normal(size=(3, 2)) + desired = np.array([[-1.870934851846581, 1.25613495182354 ], # noqa: E202 + [-1.120190126006621, 0.342002097029821], + [ 0.661545174124296, 1.181113712443012]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_standard_normal_unsupported_type(self): + assert_raises(TypeError, random.standard_normal, dtype=np.int32) + + def test_standard_t(self): + random = Generator(MT19937(self.seed)) + actual = random.standard_t(df=10, size=(3, 2)) + desired = np.array([[-1.484666193042647, 0.30597891831161], + [ 1.056684299648085, -0.407312602088507], + [ 0.130704414281157, -2.038053410490321]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_triangular(self): + random = Generator(MT19937(self.seed)) + actual = random.triangular(left=5.12, mode=10.23, right=20.34, + size=(3, 2)) + desired = np.array([[ 7.86664070590917, 13.6313848513185 ], # noqa: E202 + [ 7.68152445215983, 14.36169131136546], + [13.16105603911429, 13.72341621856971]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_uniform(self): + random = Generator(MT19937(self.seed)) + actual = random.uniform(low=1.23, high=10.54, size=(3, 2)) + desired = np.array([[2.13306255040998 , 7.816987531021207], # noqa: E203 + [2.015436610109887, 8.377577533009589], + [7.421792588856135, 7.891185744455209]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_uniform_range_bounds(self): + fmin = np.finfo('float').min + fmax = np.finfo('float').max + + func = random.uniform + assert_raises(OverflowError, func, -np.inf, 0) + assert_raises(OverflowError, func, 0, np.inf) + assert_raises(OverflowError, func, fmin, fmax) + assert_raises(OverflowError, func, [-np.inf], [0]) + assert_raises(OverflowError, func, [0], [np.inf]) + + # (fmax / 1e17) - fmin is within range, so this should not throw + # account for i386 extended precision DBL_MAX / 1e17 + DBL_MAX > + # DBL_MAX by increasing fmin a bit + random.uniform(low=np.nextafter(fmin, 1), high=fmax / 1e17) + + def test_uniform_zero_range(self): + func = random.uniform + result = func(1.5, 1.5) + assert_allclose(result, 1.5) + result = func([0.0, np.pi], [0.0, np.pi]) + assert_allclose(result, [0.0, np.pi]) + result = func([[2145.12], [2145.12]], [2145.12, 2145.12]) + assert_allclose(result, 2145.12 + np.zeros((2, 2))) + + def test_uniform_neg_range(self): + func = random.uniform + assert_raises(ValueError, func, 2, 1) + assert_raises(ValueError, func, [1, 2], [1, 1]) + assert_raises(ValueError, func, [[0, 1], [2, 3]], 2) + + def test_scalar_exception_propagation(self): + # Tests that exceptions are correctly propagated in distributions + # when called with objects that throw exceptions when converted to + # scalars. + # + # Regression test for gh: 8865 + + class ThrowingFloat(np.ndarray): + def __float__(self): + raise TypeError + + throwing_float = np.array(1.0).view(ThrowingFloat) + assert_raises(TypeError, random.uniform, throwing_float, + throwing_float) + + class ThrowingInteger(np.ndarray): + def __int__(self): + raise TypeError + + throwing_int = np.array(1).view(ThrowingInteger) + assert_raises(TypeError, random.hypergeometric, throwing_int, 1, 1) + + def test_vonmises(self): + random = Generator(MT19937(self.seed)) + actual = random.vonmises(mu=1.23, kappa=1.54, size=(3, 2)) + desired = np.array([[ 1.107972248690106, 2.841536476232361], + [ 1.832602376042457, 1.945511926976032], + [-0.260147475776542, 2.058047492231698]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_vonmises_small(self): + # check infinite loop, gh-4720 + random = Generator(MT19937(self.seed)) + r = random.vonmises(mu=0., kappa=1.1e-8, size=10**6) + assert_(np.isfinite(r).all()) + + def test_vonmises_nan(self): + random = Generator(MT19937(self.seed)) + r = random.vonmises(mu=0., kappa=np.nan) + assert_(np.isnan(r)) + + @pytest.mark.parametrize("kappa", [1e4, 1e15]) + def test_vonmises_large_kappa(self, kappa): + random = Generator(MT19937(self.seed)) + rs = RandomState(random.bit_generator) + state = random.bit_generator.state + + random_state_vals = rs.vonmises(0, kappa, size=10) + random.bit_generator.state = state + gen_vals = random.vonmises(0, kappa, size=10) + if kappa < 1e6: + assert_allclose(random_state_vals, gen_vals) + else: + assert np.all(random_state_vals != gen_vals) + + @pytest.mark.parametrize("mu", [-7., -np.pi, -3.1, np.pi, 3.2]) + @pytest.mark.parametrize("kappa", [1e-9, 1e-6, 1, 1e3, 1e15]) + def test_vonmises_large_kappa_range(self, mu, kappa): + random = Generator(MT19937(self.seed)) + r = random.vonmises(mu, kappa, 50) + assert_(np.all(r > -np.pi) and np.all(r <= np.pi)) + + def test_wald(self): + random = Generator(MT19937(self.seed)) + actual = random.wald(mean=1.23, scale=1.54, size=(3, 2)) + desired = np.array([[0.26871721804551, 3.2233942732115 ], # noqa: E202 + [2.20328374987066, 2.40958405189353], + [2.07093587449261, 0.73073890064369]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_wald_nonnegative(self): + random = Generator(MT19937(self.seed)) + samples = random.wald(mean=1e9, scale=2.25, size=1000) + assert_(np.all(samples >= 0.0)) + + def test_weibull(self): + random = Generator(MT19937(self.seed)) + actual = random.weibull(a=1.23, size=(3, 2)) + desired = np.array([[0.138613914769468, 1.306463419753191], + [0.111623365934763, 1.446570494646721], + [1.257145775276011, 1.914247725027957]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_weibull_0(self): + random = Generator(MT19937(self.seed)) + assert_equal(random.weibull(a=0, size=12), np.zeros(12)) + assert_raises(ValueError, random.weibull, a=-0.) + + def test_zipf(self): + random = Generator(MT19937(self.seed)) + actual = random.zipf(a=1.23, size=(3, 2)) + desired = np.array([[ 1, 1], + [ 10, 867], + [354, 2]]) + assert_array_equal(actual, desired) + + +class TestBroadcast: + # tests that functions that broadcast behave + # correctly when presented with non-scalar arguments + seed = 123456789 + + def test_uniform(self): + random = Generator(MT19937(self.seed)) + low = [0] + high = [1] + uniform = random.uniform + desired = np.array([0.16693771389729, 0.19635129550675, 0.75563050964095]) + + random = Generator(MT19937(self.seed)) + actual = random.uniform(low * 3, high) + assert_array_almost_equal(actual, desired, decimal=14) + + random = Generator(MT19937(self.seed)) + actual = random.uniform(low, high * 3) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_normal(self): + loc = [0] + scale = [1] + bad_scale = [-1] + random = Generator(MT19937(self.seed)) + desired = np.array([-0.38736406738527, 0.79594375042255, 0.0197076236097]) + + random = Generator(MT19937(self.seed)) + actual = random.normal(loc * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, random.normal, loc * 3, bad_scale) + + random = Generator(MT19937(self.seed)) + normal = random.normal + actual = normal(loc, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, normal, loc, bad_scale * 3) + + def test_beta(self): + a = [1] + b = [2] + bad_a = [-1] + bad_b = [-2] + desired = np.array([0.18719338682602, 0.73234824491364, 0.17928615186455]) + + random = Generator(MT19937(self.seed)) + beta = random.beta + actual = beta(a * 3, b) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, beta, bad_a * 3, b) + assert_raises(ValueError, beta, a * 3, bad_b) + + random = Generator(MT19937(self.seed)) + actual = random.beta(a, b * 3) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_exponential(self): + scale = [1] + bad_scale = [-1] + desired = np.array([0.67245993212806, 0.21380495318094, 0.7177848928629]) + + random = Generator(MT19937(self.seed)) + actual = random.exponential(scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, random.exponential, bad_scale * 3) + + def test_standard_gamma(self): + shape = [1] + bad_shape = [-1] + desired = np.array([0.67245993212806, 0.21380495318094, 0.7177848928629]) + + random = Generator(MT19937(self.seed)) + std_gamma = random.standard_gamma + actual = std_gamma(shape * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, std_gamma, bad_shape * 3) + + def test_gamma(self): + shape = [1] + scale = [2] + bad_shape = [-1] + bad_scale = [-2] + desired = np.array([1.34491986425611, 0.42760990636187, 1.4355697857258]) + + random = Generator(MT19937(self.seed)) + gamma = random.gamma + actual = gamma(shape * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, gamma, bad_shape * 3, scale) + assert_raises(ValueError, gamma, shape * 3, bad_scale) + + random = Generator(MT19937(self.seed)) + gamma = random.gamma + actual = gamma(shape, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, gamma, bad_shape, scale * 3) + assert_raises(ValueError, gamma, shape, bad_scale * 3) + + def test_f(self): + dfnum = [1] + dfden = [2] + bad_dfnum = [-1] + bad_dfden = [-2] + desired = np.array([0.07765056244107, 7.72951397913186, 0.05786093891763]) + + random = Generator(MT19937(self.seed)) + f = random.f + actual = f(dfnum * 3, dfden) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, f, bad_dfnum * 3, dfden) + assert_raises(ValueError, f, dfnum * 3, bad_dfden) + + random = Generator(MT19937(self.seed)) + f = random.f + actual = f(dfnum, dfden * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, f, bad_dfnum, dfden * 3) + assert_raises(ValueError, f, dfnum, bad_dfden * 3) + + def test_noncentral_f(self): + dfnum = [2] + dfden = [3] + nonc = [4] + bad_dfnum = [0] + bad_dfden = [-1] + bad_nonc = [-2] + desired = np.array([2.02434240411421, 12.91838601070124, 1.24395160354629]) + + random = Generator(MT19937(self.seed)) + nonc_f = random.noncentral_f + actual = nonc_f(dfnum * 3, dfden, nonc) + assert_array_almost_equal(actual, desired, decimal=14) + assert np.all(np.isnan(nonc_f(dfnum, dfden, [np.nan] * 3))) + + assert_raises(ValueError, nonc_f, bad_dfnum * 3, dfden, nonc) + assert_raises(ValueError, nonc_f, dfnum * 3, bad_dfden, nonc) + assert_raises(ValueError, nonc_f, dfnum * 3, dfden, bad_nonc) + + random = Generator(MT19937(self.seed)) + nonc_f = random.noncentral_f + actual = nonc_f(dfnum, dfden * 3, nonc) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, nonc_f, bad_dfnum, dfden * 3, nonc) + assert_raises(ValueError, nonc_f, dfnum, bad_dfden * 3, nonc) + assert_raises(ValueError, nonc_f, dfnum, dfden * 3, bad_nonc) + + random = Generator(MT19937(self.seed)) + nonc_f = random.noncentral_f + actual = nonc_f(dfnum, dfden, nonc * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, nonc_f, bad_dfnum, dfden, nonc * 3) + assert_raises(ValueError, nonc_f, dfnum, bad_dfden, nonc * 3) + assert_raises(ValueError, nonc_f, dfnum, dfden, bad_nonc * 3) + + def test_noncentral_f_small_df(self): + random = Generator(MT19937(self.seed)) + desired = np.array([0.04714867120827, 0.1239390327694]) + actual = random.noncentral_f(0.9, 0.9, 2, size=2) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_chisquare(self): + df = [1] + bad_df = [-1] + desired = np.array([0.05573640064251, 1.47220224353539, 2.9469379318589]) + + random = Generator(MT19937(self.seed)) + actual = random.chisquare(df * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, random.chisquare, bad_df * 3) + + def test_noncentral_chisquare(self): + df = [1] + nonc = [2] + bad_df = [-1] + bad_nonc = [-2] + desired = np.array([0.07710766249436, 5.27829115110304, 0.630732147399]) + + random = Generator(MT19937(self.seed)) + nonc_chi = random.noncentral_chisquare + actual = nonc_chi(df * 3, nonc) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, nonc_chi, bad_df * 3, nonc) + assert_raises(ValueError, nonc_chi, df * 3, bad_nonc) + + random = Generator(MT19937(self.seed)) + nonc_chi = random.noncentral_chisquare + actual = nonc_chi(df, nonc * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, nonc_chi, bad_df, nonc * 3) + assert_raises(ValueError, nonc_chi, df, bad_nonc * 3) + + def test_standard_t(self): + df = [1] + bad_df = [-1] + desired = np.array([-1.39498829447098, -1.23058658835223, 0.17207021065983]) + + random = Generator(MT19937(self.seed)) + actual = random.standard_t(df * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, random.standard_t, bad_df * 3) + + def test_vonmises(self): + mu = [2] + kappa = [1] + bad_kappa = [-1] + desired = np.array([2.25935584988528, 2.23326261461399, -2.84152146503326]) + + random = Generator(MT19937(self.seed)) + actual = random.vonmises(mu * 3, kappa) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, random.vonmises, mu * 3, bad_kappa) + + random = Generator(MT19937(self.seed)) + actual = random.vonmises(mu, kappa * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, random.vonmises, mu, bad_kappa * 3) + + def test_pareto(self): + a = [1] + bad_a = [-1] + desired = np.array([0.95905052946317, 0.2383810889437, 1.04988745750013]) + + random = Generator(MT19937(self.seed)) + actual = random.pareto(a * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, random.pareto, bad_a * 3) + + def test_weibull(self): + a = [1] + bad_a = [-1] + desired = np.array([0.67245993212806, 0.21380495318094, 0.7177848928629]) + + random = Generator(MT19937(self.seed)) + actual = random.weibull(a * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, random.weibull, bad_a * 3) + + def test_power(self): + a = [1] + bad_a = [-1] + desired = np.array([0.48954864361052, 0.19249412888486, 0.51216834058807]) + + random = Generator(MT19937(self.seed)) + actual = random.power(a * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, random.power, bad_a * 3) + + def test_laplace(self): + loc = [0] + scale = [1] + bad_scale = [-1] + desired = np.array([-1.09698732625119, -0.93470271947368, 0.71592671378202]) + + random = Generator(MT19937(self.seed)) + laplace = random.laplace + actual = laplace(loc * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, laplace, loc * 3, bad_scale) + + random = Generator(MT19937(self.seed)) + laplace = random.laplace + actual = laplace(loc, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, laplace, loc, bad_scale * 3) + + def test_gumbel(self): + loc = [0] + scale = [1] + bad_scale = [-1] + desired = np.array([1.70020068231762, 1.52054354273631, -0.34293267607081]) + + random = Generator(MT19937(self.seed)) + gumbel = random.gumbel + actual = gumbel(loc * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, gumbel, loc * 3, bad_scale) + + random = Generator(MT19937(self.seed)) + gumbel = random.gumbel + actual = gumbel(loc, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, gumbel, loc, bad_scale * 3) + + def test_logistic(self): + loc = [0] + scale = [1] + bad_scale = [-1] + desired = np.array([-1.607487640433, -1.40925686003678, 1.12887112820397]) + + random = Generator(MT19937(self.seed)) + actual = random.logistic(loc * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, random.logistic, loc * 3, bad_scale) + + random = Generator(MT19937(self.seed)) + actual = random.logistic(loc, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, random.logistic, loc, bad_scale * 3) + assert_equal(random.logistic(1.0, 0.0), 1.0) + + def test_lognormal(self): + mean = [0] + sigma = [1] + bad_sigma = [-1] + desired = np.array([0.67884390500697, 2.21653186290321, 1.01990310084276]) + + random = Generator(MT19937(self.seed)) + lognormal = random.lognormal + actual = lognormal(mean * 3, sigma) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, lognormal, mean * 3, bad_sigma) + + random = Generator(MT19937(self.seed)) + actual = random.lognormal(mean, sigma * 3) + assert_raises(ValueError, random.lognormal, mean, bad_sigma * 3) + + def test_rayleigh(self): + scale = [1] + bad_scale = [-1] + desired = np.array( + [1.1597068009872629, + 0.6539188836253857, + 1.1981526554349398] + ) + + random = Generator(MT19937(self.seed)) + actual = random.rayleigh(scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, random.rayleigh, bad_scale * 3) + + def test_wald(self): + mean = [0.5] + scale = [1] + bad_mean = [0] + bad_scale = [-2] + desired = np.array([0.38052407392905, 0.50701641508592, 0.484935249864]) + + random = Generator(MT19937(self.seed)) + actual = random.wald(mean * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, random.wald, bad_mean * 3, scale) + assert_raises(ValueError, random.wald, mean * 3, bad_scale) + + random = Generator(MT19937(self.seed)) + actual = random.wald(mean, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, random.wald, bad_mean, scale * 3) + assert_raises(ValueError, random.wald, mean, bad_scale * 3) + + def test_triangular(self): + left = [1] + right = [3] + mode = [2] + bad_left_one = [3] + bad_mode_one = [4] + bad_left_two, bad_mode_two = right * 2 + desired = np.array([1.57781954604754, 1.62665986867957, 2.30090130831326]) + + random = Generator(MT19937(self.seed)) + triangular = random.triangular + actual = triangular(left * 3, mode, right) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, triangular, bad_left_one * 3, mode, right) + assert_raises(ValueError, triangular, left * 3, bad_mode_one, right) + assert_raises(ValueError, triangular, bad_left_two * 3, bad_mode_two, + right) + + random = Generator(MT19937(self.seed)) + triangular = random.triangular + actual = triangular(left, mode * 3, right) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, triangular, bad_left_one, mode * 3, right) + assert_raises(ValueError, triangular, left, bad_mode_one * 3, right) + assert_raises(ValueError, triangular, bad_left_two, bad_mode_two * 3, + right) + + random = Generator(MT19937(self.seed)) + triangular = random.triangular + actual = triangular(left, mode, right * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, triangular, bad_left_one, mode, right * 3) + assert_raises(ValueError, triangular, left, bad_mode_one, right * 3) + assert_raises(ValueError, triangular, bad_left_two, bad_mode_two, + right * 3) + + assert_raises(ValueError, triangular, 10., 0., 20.) + assert_raises(ValueError, triangular, 10., 25., 20.) + assert_raises(ValueError, triangular, 10., 10., 10.) + + def test_binomial(self): + n = [1] + p = [0.5] + bad_n = [-1] + bad_p_one = [-1] + bad_p_two = [1.5] + desired = np.array([0, 0, 1]) + + random = Generator(MT19937(self.seed)) + binom = random.binomial + actual = binom(n * 3, p) + assert_array_equal(actual, desired) + assert_raises(ValueError, binom, bad_n * 3, p) + assert_raises(ValueError, binom, n * 3, bad_p_one) + assert_raises(ValueError, binom, n * 3, bad_p_two) + + random = Generator(MT19937(self.seed)) + actual = random.binomial(n, p * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, binom, bad_n, p * 3) + assert_raises(ValueError, binom, n, bad_p_one * 3) + assert_raises(ValueError, binom, n, bad_p_two * 3) + + def test_negative_binomial(self): + n = [1] + p = [0.5] + bad_n = [-1] + bad_p_one = [-1] + bad_p_two = [1.5] + desired = np.array([0, 2, 1], dtype=np.int64) + + random = Generator(MT19937(self.seed)) + neg_binom = random.negative_binomial + actual = neg_binom(n * 3, p) + assert_array_equal(actual, desired) + assert_raises(ValueError, neg_binom, bad_n * 3, p) + assert_raises(ValueError, neg_binom, n * 3, bad_p_one) + assert_raises(ValueError, neg_binom, n * 3, bad_p_two) + + random = Generator(MT19937(self.seed)) + neg_binom = random.negative_binomial + actual = neg_binom(n, p * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, neg_binom, bad_n, p * 3) + assert_raises(ValueError, neg_binom, n, bad_p_one * 3) + assert_raises(ValueError, neg_binom, n, bad_p_two * 3) + + def test_poisson(self): + + lam = [1] + bad_lam_one = [-1] + desired = np.array([0, 0, 3]) + + random = Generator(MT19937(self.seed)) + max_lam = random._poisson_lam_max + bad_lam_two = [max_lam * 2] + poisson = random.poisson + actual = poisson(lam * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, poisson, bad_lam_one * 3) + assert_raises(ValueError, poisson, bad_lam_two * 3) + + def test_zipf(self): + a = [2] + bad_a = [0] + desired = np.array([1, 8, 1]) + + random = Generator(MT19937(self.seed)) + zipf = random.zipf + actual = zipf(a * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, zipf, bad_a * 3) + with np.errstate(invalid='ignore'): + assert_raises(ValueError, zipf, np.nan) + assert_raises(ValueError, zipf, [0, 0, np.nan]) + + def test_geometric(self): + p = [0.5] + bad_p_one = [-1] + bad_p_two = [1.5] + desired = np.array([1, 1, 3]) + + random = Generator(MT19937(self.seed)) + geometric = random.geometric + actual = geometric(p * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, geometric, bad_p_one * 3) + assert_raises(ValueError, geometric, bad_p_two * 3) + + def test_hypergeometric(self): + ngood = [1] + nbad = [2] + nsample = [2] + bad_ngood = [-1] + bad_nbad = [-2] + bad_nsample_one = [-1] + bad_nsample_two = [4] + desired = np.array([0, 0, 1]) + + random = Generator(MT19937(self.seed)) + actual = random.hypergeometric(ngood * 3, nbad, nsample) + assert_array_equal(actual, desired) + assert_raises(ValueError, random.hypergeometric, bad_ngood * 3, nbad, nsample) + assert_raises(ValueError, random.hypergeometric, ngood * 3, bad_nbad, nsample) + assert_raises(ValueError, random.hypergeometric, ngood * 3, nbad, bad_nsample_one) # noqa: E501 + assert_raises(ValueError, random.hypergeometric, ngood * 3, nbad, bad_nsample_two) # noqa: E501 + + random = Generator(MT19937(self.seed)) + actual = random.hypergeometric(ngood, nbad * 3, nsample) + assert_array_equal(actual, desired) + assert_raises(ValueError, random.hypergeometric, bad_ngood, nbad * 3, nsample) + assert_raises(ValueError, random.hypergeometric, ngood, bad_nbad * 3, nsample) + assert_raises(ValueError, random.hypergeometric, ngood, nbad * 3, bad_nsample_one) # noqa: E501 + assert_raises(ValueError, random.hypergeometric, ngood, nbad * 3, bad_nsample_two) # noqa: E501 + + random = Generator(MT19937(self.seed)) + hypergeom = random.hypergeometric + actual = hypergeom(ngood, nbad, nsample * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, hypergeom, bad_ngood, nbad, nsample * 3) + assert_raises(ValueError, hypergeom, ngood, bad_nbad, nsample * 3) + assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_one * 3) + assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_two * 3) + + assert_raises(ValueError, hypergeom, -1, 10, 20) + assert_raises(ValueError, hypergeom, 10, -1, 20) + assert_raises(ValueError, hypergeom, 10, 10, -1) + assert_raises(ValueError, hypergeom, 10, 10, 25) + + # ValueError for arguments that are too big. + assert_raises(ValueError, hypergeom, 2**30, 10, 20) + assert_raises(ValueError, hypergeom, 999, 2**31, 50) + assert_raises(ValueError, hypergeom, 999, [2**29, 2**30], 1000) + + def test_logseries(self): + p = [0.5] + bad_p_one = [2] + bad_p_two = [-1] + desired = np.array([1, 1, 1]) + + random = Generator(MT19937(self.seed)) + logseries = random.logseries + actual = logseries(p * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, logseries, bad_p_one * 3) + assert_raises(ValueError, logseries, bad_p_two * 3) + + def test_multinomial(self): + random = Generator(MT19937(self.seed)) + actual = random.multinomial([5, 20], [1 / 6.] * 6, size=(3, 2)) + desired = np.array([[[0, 0, 2, 1, 2, 0], + [2, 3, 6, 4, 2, 3]], + [[1, 0, 1, 0, 2, 1], + [7, 2, 2, 1, 4, 4]], + [[0, 2, 0, 1, 2, 0], + [3, 2, 3, 3, 4, 5]]], dtype=np.int64) + assert_array_equal(actual, desired) + + random = Generator(MT19937(self.seed)) + actual = random.multinomial([5, 20], [1 / 6.] * 6) + desired = np.array([[0, 0, 2, 1, 2, 0], + [2, 3, 6, 4, 2, 3]], dtype=np.int64) + assert_array_equal(actual, desired) + + random = Generator(MT19937(self.seed)) + actual = random.multinomial([5, 20], [[1 / 6.] * 6] * 2) + desired = np.array([[0, 0, 2, 1, 2, 0], + [2, 3, 6, 4, 2, 3]], dtype=np.int64) + assert_array_equal(actual, desired) + + random = Generator(MT19937(self.seed)) + actual = random.multinomial([[5], [20]], [[1 / 6.] * 6] * 2) + desired = np.array([[[0, 0, 2, 1, 2, 0], + [0, 0, 2, 1, 1, 1]], + [[4, 2, 3, 3, 5, 3], + [7, 2, 2, 1, 4, 4]]], dtype=np.int64) + assert_array_equal(actual, desired) + + @pytest.mark.parametrize("n", [10, + np.array([10, 10]), + np.array([[[10]], [[10]]]) + ] + ) + def test_multinomial_pval_broadcast(self, n): + random = Generator(MT19937(self.seed)) + pvals = np.array([1 / 4] * 4) + actual = random.multinomial(n, pvals) + n_shape = () if isinstance(n, int) else n.shape + expected_shape = n_shape + (4,) + assert actual.shape == expected_shape + pvals = np.vstack([pvals, pvals]) + actual = random.multinomial(n, pvals) + expected_shape = np.broadcast_shapes(n_shape, pvals.shape[:-1]) + (4,) + assert actual.shape == expected_shape + + pvals = np.vstack([[pvals], [pvals]]) + actual = random.multinomial(n, pvals) + expected_shape = np.broadcast_shapes(n_shape, pvals.shape[:-1]) + assert actual.shape == expected_shape + (4,) + actual = random.multinomial(n, pvals, size=(3, 2) + expected_shape) + assert actual.shape == (3, 2) + expected_shape + (4,) + + with pytest.raises(ValueError): + # Ensure that size is not broadcast + actual = random.multinomial(n, pvals, size=(1,) * 6) + + def test_invalid_pvals_broadcast(self): + random = Generator(MT19937(self.seed)) + pvals = [[1 / 6] * 6, [1 / 4] * 6] + assert_raises(ValueError, random.multinomial, 1, pvals) + assert_raises(ValueError, random.multinomial, 6, 0.5) + + def test_empty_outputs(self): + random = Generator(MT19937(self.seed)) + actual = random.multinomial(np.empty((10, 0, 6), "i8"), [1 / 6] * 6) + assert actual.shape == (10, 0, 6, 6) + actual = random.multinomial(12, np.empty((10, 0, 10))) + assert actual.shape == (10, 0, 10) + actual = random.multinomial(np.empty((3, 0, 7), "i8"), + np.empty((3, 0, 7, 4))) + assert actual.shape == (3, 0, 7, 4) + + +@pytest.mark.skipif(IS_WASM, reason="can't start thread") +class TestThread: + # make sure each state produces the same sequence even in threads + seeds = range(4) + + def check_function(self, function, sz): + from threading import Thread + + out1 = np.empty((len(self.seeds),) + sz) + out2 = np.empty((len(self.seeds),) + sz) + + # threaded generation + t = [Thread(target=function, args=(Generator(MT19937(s)), o)) + for s, o in zip(self.seeds, out1)] + [x.start() for x in t] + [x.join() for x in t] + + # the same serial + for s, o in zip(self.seeds, out2): + function(Generator(MT19937(s)), o) + + # these platforms change x87 fpu precision mode in threads + if np.intp().dtype.itemsize == 4 and sys.platform == "win32": + assert_array_almost_equal(out1, out2) + else: + assert_array_equal(out1, out2) + + def test_normal(self): + def gen_random(state, out): + out[...] = state.normal(size=10000) + + self.check_function(gen_random, sz=(10000,)) + + def test_exp(self): + def gen_random(state, out): + out[...] = state.exponential(scale=np.ones((100, 1000))) + + self.check_function(gen_random, sz=(100, 1000)) + + def test_multinomial(self): + def gen_random(state, out): + out[...] = state.multinomial(10, [1 / 6.] * 6, size=10000) + + self.check_function(gen_random, sz=(10000, 6)) + + +# See Issue #4263 +class TestSingleEltArrayInput: + def _create_arrays(self): + return np.array([2]), np.array([3]), np.array([4]), (1,) + + def test_one_arg_funcs(self): + argOne, _, _, tgtShape = self._create_arrays() + funcs = (random.exponential, random.standard_gamma, + random.chisquare, random.standard_t, + random.pareto, random.weibull, + random.power, random.rayleigh, + random.poisson, random.zipf, + random.geometric, random.logseries) + + probfuncs = (random.geometric, random.logseries) + + for func in funcs: + if func in probfuncs: # p < 1.0 + out = func(np.array([0.5])) + + else: + out = func(argOne) + + assert_equal(out.shape, tgtShape) + + def test_two_arg_funcs(self): + argOne, argTwo, _, tgtShape = self._create_arrays() + funcs = (random.uniform, random.normal, + random.beta, random.gamma, + random.f, random.noncentral_chisquare, + random.vonmises, random.laplace, + random.gumbel, random.logistic, + random.lognormal, random.wald, + random.binomial, random.negative_binomial) + + probfuncs = (random.binomial, random.negative_binomial) + + for func in funcs: + if func in probfuncs: # p <= 1 + argTwo = np.array([0.5]) + + else: + argTwo = argTwo + + out = func(argOne, argTwo) + assert_equal(out.shape, tgtShape) + + out = func(argOne[0], argTwo) + assert_equal(out.shape, tgtShape) + + out = func(argOne, argTwo[0]) + assert_equal(out.shape, tgtShape) + + def test_integers(self, endpoint): + _, _, _, tgtShape = self._create_arrays() + itype = [np.bool, np.int8, np.uint8, np.int16, np.uint16, + np.int32, np.uint32, np.int64, np.uint64] + func = random.integers + high = np.array([1]) + low = np.array([0]) + + for dt in itype: + out = func(low, high, endpoint=endpoint, dtype=dt) + assert_equal(out.shape, tgtShape) + + out = func(low[0], high, endpoint=endpoint, dtype=dt) + assert_equal(out.shape, tgtShape) + + out = func(low, high[0], endpoint=endpoint, dtype=dt) + assert_equal(out.shape, tgtShape) + + def test_three_arg_funcs(self): + argOne, argTwo, argThree, tgtShape = self._create_arrays() + funcs = [random.noncentral_f, random.triangular, + random.hypergeometric] + + for func in funcs: + out = func(argOne, argTwo, argThree) + assert_equal(out.shape, tgtShape) + + out = func(argOne[0], argTwo, argThree) + assert_equal(out.shape, tgtShape) + + out = func(argOne, argTwo[0], argThree) + assert_equal(out.shape, tgtShape) + + +@pytest.mark.parametrize("config", JUMP_TEST_DATA) +def test_jumped(config): + # Each config contains the initial seed, a number of raw steps + # the sha256 hashes of the initial and the final states' keys and + # the position of the initial and the final state. + # These were produced using the original C implementation. + seed = config["seed"] + steps = config["steps"] + + mt19937 = MT19937(seed) + # Burn step + mt19937.random_raw(steps) + key = mt19937.state["state"]["key"] + if sys.byteorder == 'big': + key = key.byteswap() + sha256 = hashlib.sha256(key) + assert mt19937.state["state"]["pos"] == config["initial"]["pos"] + assert sha256.hexdigest() == config["initial"]["key_sha256"] + + jumped = mt19937.jumped() + key = jumped.state["state"]["key"] + if sys.byteorder == 'big': + key = key.byteswap() + sha256 = hashlib.sha256(key) + assert jumped.state["state"]["pos"] == config["jumped"]["pos"] + assert sha256.hexdigest() == config["jumped"]["key_sha256"] + + +def test_broadcast_size_error(): + mu = np.ones(3) + sigma = np.ones((4, 3)) + size = (10, 4, 2) + assert random.normal(mu, sigma, size=(5, 4, 3)).shape == (5, 4, 3) + with pytest.raises(ValueError): + random.normal(mu, sigma, size=size) + with pytest.raises(ValueError): + random.normal(mu, sigma, size=(1, 3)) + with pytest.raises(ValueError): + random.normal(mu, sigma, size=(4, 1, 1)) + # 1 arg + shape = np.ones((4, 3)) + with pytest.raises(ValueError): + random.standard_gamma(shape, size=size) + with pytest.raises(ValueError): + random.standard_gamma(shape, size=(3,)) + with pytest.raises(ValueError): + random.standard_gamma(shape, size=3) + # Check out + out = np.empty(size) + with pytest.raises(ValueError): + random.standard_gamma(shape, out=out) + + # 2 arg + with pytest.raises(ValueError): + random.binomial(1, [0.3, 0.7], size=(2, 1)) + with pytest.raises(ValueError): + random.binomial([1, 2], 0.3, size=(2, 1)) + with pytest.raises(ValueError): + random.binomial([1, 2], [0.3, 0.7], size=(2, 1)) + with pytest.raises(ValueError): + random.multinomial([2, 2], [.3, .7], size=(2, 1)) + + # 3 arg + a = random.chisquare(5, size=3) + b = random.chisquare(5, size=(4, 3)) + c = random.chisquare(5, size=(5, 4, 3)) + assert random.noncentral_f(a, b, c).shape == (5, 4, 3) + with pytest.raises(ValueError, match=r"Output size \(6, 5, 1, 1\) is"): + random.noncentral_f(a, b, c, size=(6, 5, 1, 1)) + + +def test_broadcast_size_scalar(): + mu = np.ones(3) + sigma = np.ones(3) + random.normal(mu, sigma, size=3) + with pytest.raises(ValueError): + random.normal(mu, sigma, size=2) + + +def test_ragged_shuffle(): + # GH 18142 + seq = [[], [], 1] + gen = Generator(MT19937(0)) + assert_no_warnings(gen.shuffle, seq) + assert seq == [1, [], []] + + +@pytest.mark.parametrize("high", [-2, [-2]]) +@pytest.mark.parametrize("endpoint", [True, False]) +def test_single_arg_integer_exception(high, endpoint): + # GH 14333 + gen = Generator(MT19937(0)) + msg = 'high < 0' if endpoint else 'high <= 0' + with pytest.raises(ValueError, match=msg): + gen.integers(high, endpoint=endpoint) + msg = 'low > high' if endpoint else 'low >= high' + with pytest.raises(ValueError, match=msg): + gen.integers(-1, high, endpoint=endpoint) + with pytest.raises(ValueError, match=msg): + gen.integers([-1], high, endpoint=endpoint) + + +@pytest.mark.parametrize("dtype", ["f4", "f8"]) +def test_c_contig_req_out(dtype): + # GH 18704 + out = np.empty((2, 3), order="F", dtype=dtype) + shape = [1, 2, 3] + with pytest.raises(ValueError, match="Supplied output array"): + random.standard_gamma(shape, out=out, dtype=dtype) + with pytest.raises(ValueError, match="Supplied output array"): + random.standard_gamma(shape, out=out, size=out.shape, dtype=dtype) + + +@pytest.mark.parametrize("dtype", ["f4", "f8"]) +@pytest.mark.parametrize("order", ["F", "C"]) +@pytest.mark.parametrize("dist", [random.standard_normal, random.random]) +def test_contig_req_out(dist, order, dtype): + # GH 18704 + out = np.empty((2, 3), dtype=dtype, order=order) + variates = dist(out=out, dtype=dtype) + assert variates is out + variates = dist(out=out, dtype=dtype, size=out.shape) + assert variates is out + + +def test_generator_ctor_old_style_pickle(): + rg = np.random.Generator(np.random.PCG64DXSM(0)) + rg.standard_normal(1) + # Directly call reduce which is used in pickling + ctor, (bit_gen, ), _ = rg.__reduce__() + # Simulate unpickling an old pickle that only has the name + assert bit_gen.__class__.__name__ == "PCG64DXSM" + print(ctor) + b = ctor(*("PCG64DXSM",)) + print(b) + b.bit_generator.state = bit_gen.state + state_b = b.bit_generator.state + assert bit_gen.state == state_b + + +def test_pickle_preserves_seed_sequence(): + # GH 26234 + # Add explicit test that bit generators preserve seed sequences + import pickle + + rg = np.random.Generator(np.random.PCG64DXSM(20240411)) + ss = rg.bit_generator.seed_seq + rg_plk = pickle.loads(pickle.dumps(rg)) + ss_plk = rg_plk.bit_generator.seed_seq + assert_equal(ss.state, ss_plk.state) + assert_equal(ss.pool, ss_plk.pool) + + rg.bit_generator.seed_seq.spawn(10) + rg_plk = pickle.loads(pickle.dumps(rg)) + ss_plk = rg_plk.bit_generator.seed_seq + assert_equal(ss.state, ss_plk.state) + + +@pytest.mark.parametrize("version", [121, 126]) +def test_legacy_pickle(version): + # Pickling format was changes in 1.22.x and in 2.0.x + import gzip + import pickle + + base_path = os.path.split(os.path.abspath(__file__))[0] + pkl_file = os.path.join( + base_path, "data", f"generator_pcg64_np{version}.pkl.gz" + ) + with gzip.open(pkl_file) as gz: + rg = pickle.load(gz) + state = rg.bit_generator.state['state'] + + assert isinstance(rg, Generator) + assert isinstance(rg.bit_generator, np.random.PCG64) + assert state['state'] == 35399562948360463058890781895381311971 + assert state['inc'] == 87136372517582989555478159403783844777 diff --git a/local_packages/numpy/random/tests/test_generator_mt19937_regressions.py b/local_packages/numpy/random/tests/test_generator_mt19937_regressions.py new file mode 100644 index 0000000..21093ef --- /dev/null +++ b/local_packages/numpy/random/tests/test_generator_mt19937_regressions.py @@ -0,0 +1,221 @@ +import pytest + +import numpy as np +from numpy.random import MT19937, Generator +from numpy.testing import assert_, assert_array_equal + + +class TestRegression: + def _create_generator(self): + return Generator(MT19937(121263137472525314065)) + + def test_vonmises_range(self): + # Make sure generated random variables are in [-pi, pi]. + # Regression test for ticket #986. + mt19937 = self._create_generator() + for mu in np.linspace(-7., 7., 5): + r = mt19937.vonmises(mu, 1, 50) + assert_(np.all(r > -np.pi) and np.all(r <= np.pi)) + + def test_hypergeometric_range(self): + # Test for ticket #921 + mt19937 = self._create_generator() + assert_(np.all(mt19937.hypergeometric(3, 18, 11, size=10) < 4)) + assert_(np.all(mt19937.hypergeometric(18, 3, 11, size=10) > 0)) + + # Test for ticket #5623 + args = (2**20 - 2, 2**20 - 2, 2**20 - 2) # Check for 32-bit systems + assert_(mt19937.hypergeometric(*args) > 0) + + def test_logseries_convergence(self): + # Test for ticket #923 + mt19937 = self._create_generator() + N = 1000 + rvsn = mt19937.logseries(0.8, size=N) + # these two frequency counts should be close to theoretical + # numbers with this large sample + # theoretical large N result is 0.49706795 + freq = np.sum(rvsn == 1) / N + msg = f'Frequency was {freq:f}, should be > 0.45' + assert_(freq > 0.45, msg) + # theoretical large N result is 0.19882718 + freq = np.sum(rvsn == 2) / N + msg = f'Frequency was {freq:f}, should be < 0.23' + assert_(freq < 0.23, msg) + + def test_shuffle_mixed_dimension(self): + # Test for trac ticket #2074 + for t in [[1, 2, 3, None], + [(1, 1), (2, 2), (3, 3), None], + [1, (2, 2), (3, 3), None], + [(1, 1), 2, 3, None]]: + mt19937 = Generator(MT19937(12345)) + shuffled = np.array(t, dtype=object) + mt19937.shuffle(shuffled) + expected = np.array([t[2], t[0], t[3], t[1]], dtype=object) + assert_array_equal(np.array(shuffled, dtype=object), expected) + + def test_call_within_randomstate(self): + # Check that custom BitGenerator does not call into global state + res = np.array([1, 8, 0, 1, 5, 3, 3, 8, 1, 4]) + for i in range(3): + mt19937 = Generator(MT19937(i)) + m = Generator(MT19937(4321)) + # If m.state is not honored, the result will change + assert_array_equal(m.choice(10, size=10, p=np.ones(10) / 10.), res) + + def test_multivariate_normal_size_types(self): + # Test for multivariate_normal issue with 'size' argument. + # Check that the multivariate_normal size argument can be a + # numpy integer. + mt19937 = self._create_generator() + mt19937.multivariate_normal([0], [[0]], size=1) + mt19937.multivariate_normal([0], [[0]], size=np.int_(1)) + mt19937.multivariate_normal([0], [[0]], size=np.int64(1)) + + def test_beta_small_parameters(self): + # Test that beta with small a and b parameters does not produce + # NaNs due to roundoff errors causing 0 / 0, gh-5851 + mt19937 = self._create_generator() + x = mt19937.beta(0.0001, 0.0001, size=100) + assert_(not np.any(np.isnan(x)), 'Nans in mt19937.beta') + + def test_beta_very_small_parameters(self): + # gh-24203: beta would hang with very small parameters. + mt19937 = self._create_generator() + mt19937.beta(1e-49, 1e-40) + + def test_beta_ridiculously_small_parameters(self): + # gh-24266: beta would generate nan when the parameters + # were subnormal or a small multiple of the smallest normal. + mt19937 = self._create_generator() + tiny = np.finfo(1.0).tiny + x = mt19937.beta(tiny / 32, tiny / 40, size=50) + assert not np.any(np.isnan(x)) + + def test_beta_expected_zero_frequency(self): + # gh-24475: For small a and b (e.g. a=0.0025, b=0.0025), beta + # would generate too many zeros. + mt19937 = self._create_generator() + a = 0.0025 + b = 0.0025 + n = 1000000 + x = mt19937.beta(a, b, size=n) + nzeros = np.count_nonzero(x == 0) + # beta CDF at x = np.finfo(np.double).smallest_subnormal/2 + # is p = 0.0776169083131899, e.g, + # + # import numpy as np + # from mpmath import mp + # mp.dps = 160 + # x = mp.mpf(np.finfo(np.float64).smallest_subnormal)/2 + # # CDF of the beta distribution at x: + # p = mp.betainc(a, b, x1=0, x2=x, regularized=True) + # n = 1000000 + # exprected_freq = float(n*p) + # + expected_freq = 77616.90831318991 + assert 0.95 * expected_freq < nzeros < 1.05 * expected_freq + + def test_choice_sum_of_probs_tolerance(self): + # The sum of probs should be 1.0 with some tolerance. + # For low precision dtypes the tolerance was too tight. + # See numpy github issue 6123. + mt19937 = self._create_generator() + a = [1, 2, 3] + counts = [4, 4, 2] + for dt in np.float16, np.float32, np.float64: + probs = np.array(counts, dtype=dt) / sum(counts) + c = mt19937.choice(a, p=probs) + assert_(c in a) + with pytest.raises(ValueError): + mt19937.choice(a, p=probs * 0.9) + + def test_shuffle_of_array_of_different_length_strings(self): + # Test that permuting an array of different length strings + # will not cause a segfault on garbage collection + # Tests gh-7710 + mt19937 = self._create_generator() + + a = np.array(['a', 'a' * 1000]) + + for _ in range(100): + mt19937.shuffle(a) + + # Force Garbage Collection - should not segfault. + import gc + gc.collect() + + def test_shuffle_of_array_of_objects(self): + # Test that permuting an array of objects will not cause + # a segfault on garbage collection. + # See gh-7719 + mt19937 = self._create_generator() + a = np.array([np.arange(1), np.arange(4)], dtype=object) + + for _ in range(1000): + mt19937.shuffle(a) + + # Force Garbage Collection - should not segfault. + import gc + gc.collect() + + def test_permutation_subclass(self): + + class N(np.ndarray): + pass + + mt19937 = Generator(MT19937(1)) + orig = np.arange(3).view(N) + perm = mt19937.permutation(orig) + assert_array_equal(perm, np.array([2, 0, 1])) + assert_array_equal(orig, np.arange(3).view(N)) + + class M: + a = np.arange(5) + + def __array__(self, dtype=None, copy=None): + return self.a + + mt19937 = Generator(MT19937(1)) + m = M() + perm = mt19937.permutation(m) + assert_array_equal(perm, np.array([4, 1, 3, 0, 2])) + assert_array_equal(m.__array__(), np.arange(5)) + + def test_gamma_0(self): + mt19937 = self._create_generator() + assert mt19937.standard_gamma(0.0) == 0.0 + assert_array_equal(mt19937.standard_gamma([0.0]), 0.0) + + actual = mt19937.standard_gamma([0.0], dtype='float') + expected = np.array([0.], dtype=np.float32) + assert_array_equal(actual, expected) + + def test_geometric_tiny_prob(self): + # Regression test for gh-17007. + # When p = 1e-30, the probability that a sample will exceed 2**63-1 + # is 0.9999999999907766, so we expect the result to be all 2**63-1. + mt19937 = self._create_generator() + assert_array_equal(mt19937.geometric(p=1e-30, size=3), + np.iinfo(np.int64).max) + + def test_zipf_large_parameter(self): + # Regression test for part of gh-9829: a call such as rng.zipf(10000) + # would hang. + mt19937 = self._create_generator() + n = 8 + sample = mt19937.zipf(10000, size=n) + assert_array_equal(sample, np.ones(n, dtype=np.int64)) + + def test_zipf_a_near_1(self): + # Regression test for gh-9829: a call such as rng.zipf(1.0000000000001) + # would hang. + mt19937 = self._create_generator() + n = 100000 + sample = mt19937.zipf(1.0000000000001, size=n) + # Not much of a test, but let's do something more than verify that + # it doesn't hang. Certainly for a monotonically decreasing + # discrete distribution truncated to signed 64 bit integers, more + # than half should be less than 2**62. + assert np.count_nonzero(sample < 2**62) > n / 2 diff --git a/local_packages/numpy/random/tests/test_random.py b/local_packages/numpy/random/tests/test_random.py new file mode 100644 index 0000000..f110aa8 --- /dev/null +++ b/local_packages/numpy/random/tests/test_random.py @@ -0,0 +1,1724 @@ +import sys +import warnings + +import pytest + +import numpy as np +from numpy import random +from numpy.testing import ( + IS_WASM, + assert_, + assert_array_almost_equal, + assert_array_equal, + assert_equal, + assert_no_warnings, + assert_raises, +) + + +class TestSeed: + def test_scalar(self): + s = np.random.RandomState(0) + assert_equal(s.randint(1000), 684) + s = np.random.RandomState(4294967295) + assert_equal(s.randint(1000), 419) + + def test_array(self): + s = np.random.RandomState(range(10)) + assert_equal(s.randint(1000), 468) + s = np.random.RandomState(np.arange(10)) + assert_equal(s.randint(1000), 468) + s = np.random.RandomState([0]) + assert_equal(s.randint(1000), 973) + s = np.random.RandomState([4294967295]) + assert_equal(s.randint(1000), 265) + + def test_invalid_scalar(self): + # seed must be an unsigned 32 bit integer + assert_raises(TypeError, np.random.RandomState, -0.5) + assert_raises(ValueError, np.random.RandomState, -1) + + def test_invalid_array(self): + # seed must be an unsigned 32 bit integer + assert_raises(TypeError, np.random.RandomState, [-0.5]) + assert_raises(ValueError, np.random.RandomState, [-1]) + assert_raises(ValueError, np.random.RandomState, [4294967296]) + assert_raises(ValueError, np.random.RandomState, [1, 2, 4294967296]) + assert_raises(ValueError, np.random.RandomState, [1, -2, 4294967296]) + + def test_invalid_array_shape(self): + # gh-9832 + assert_raises(ValueError, np.random.RandomState, + np.array([], dtype=np.int64)) + assert_raises(ValueError, np.random.RandomState, [[1, 2, 3]]) + assert_raises(ValueError, np.random.RandomState, [[1, 2, 3], + [4, 5, 6]]) + + +class TestBinomial: + def test_n_zero(self): + # Tests the corner case of n == 0 for the binomial distribution. + # binomial(0, p) should be zero for any p in [0, 1]. + # This test addresses issue #3480. + zeros = np.zeros(2, dtype='int') + for p in [0, .5, 1]: + assert_(random.binomial(0, p) == 0) + assert_array_equal(random.binomial(zeros, p), zeros) + + def test_p_is_nan(self): + # Issue #4571. + assert_raises(ValueError, random.binomial, 1, np.nan) + + +class TestMultinomial: + def test_basic(self): + random.multinomial(100, [0.2, 0.8]) + + def test_zero_probability(self): + random.multinomial(100, [0.2, 0.8, 0.0, 0.0, 0.0]) + + def test_int_negative_interval(self): + assert_(-5 <= random.randint(-5, -1) < -1) + x = random.randint(-5, -1, 5) + assert_(np.all(-5 <= x)) + assert_(np.all(x < -1)) + + def test_size(self): + # gh-3173 + p = [0.5, 0.5] + assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2)) + assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2)) + assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2)) + assert_equal(np.random.multinomial(1, p, [2, 2]).shape, (2, 2, 2)) + assert_equal(np.random.multinomial(1, p, (2, 2)).shape, (2, 2, 2)) + assert_equal(np.random.multinomial(1, p, np.array((2, 2))).shape, + (2, 2, 2)) + + assert_raises(TypeError, np.random.multinomial, 1, p, + float(1)) + + def test_multidimensional_pvals(self): + assert_raises(ValueError, np.random.multinomial, 10, [[0, 1]]) + assert_raises(ValueError, np.random.multinomial, 10, [[0], [1]]) + assert_raises(ValueError, np.random.multinomial, 10, [[[0], [1]], [[1], [0]]]) + assert_raises(ValueError, np.random.multinomial, 10, np.array([[0, 1], [1, 0]])) + + +class TestSetState: + def _create_rng(self): + seed = 1234567890 + prng = random.RandomState(seed) + state = prng.get_state() + return prng, state + + def test_basic(self): + prng, state = self._create_rng() + old = prng.tomaxint(16) + prng.set_state(state) + new = prng.tomaxint(16) + assert_(np.all(old == new)) + + def test_gaussian_reset(self): + # Make sure the cached every-other-Gaussian is reset. + prng, state = self._create_rng() + old = prng.standard_normal(size=3) + prng.set_state(state) + new = prng.standard_normal(size=3) + assert_(np.all(old == new)) + + def test_gaussian_reset_in_media_res(self): + # When the state is saved with a cached Gaussian, make sure the + # cached Gaussian is restored. + prng, state = self._create_rng() + prng.standard_normal() + state = prng.get_state() + old = prng.standard_normal(size=3) + prng.set_state(state) + new = prng.standard_normal(size=3) + assert_(np.all(old == new)) + + def test_backwards_compatibility(self): + # Make sure we can accept old state tuples that do not have the + # cached Gaussian value. + prng, state = self._create_rng() + old_state = state[:-2] + x1 = prng.standard_normal(size=16) + prng.set_state(old_state) + x2 = prng.standard_normal(size=16) + prng.set_state(state) + x3 = prng.standard_normal(size=16) + assert_(np.all(x1 == x2)) + assert_(np.all(x1 == x3)) + + def test_negative_binomial(self): + # Ensure that the negative binomial results take floating point + # arguments without truncation. + prng, _ = self._create_rng() + prng.negative_binomial(0.5, 0.5) + + def test_set_invalid_state(self): + # gh-25402 + prng, _ = self._create_rng() + with pytest.raises(IndexError): + prng.set_state(()) + + +class TestRandint: + + # valid integer/boolean types + itype = [np.bool, np.int8, np.uint8, np.int16, np.uint16, + np.int32, np.uint32, np.int64, np.uint64] + + def test_unsupported_type(self): + rng = random.RandomState() + assert_raises(TypeError, rng.randint, 1, dtype=float) + + def test_bounds_checking(self): + rng = random.RandomState() + for dt in self.itype: + lbnd = 0 if dt is np.bool else np.iinfo(dt).min + ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1 + assert_raises(ValueError, rng.randint, lbnd - 1, ubnd, dtype=dt) + assert_raises(ValueError, rng.randint, lbnd, ubnd + 1, dtype=dt) + assert_raises(ValueError, rng.randint, ubnd, lbnd, dtype=dt) + assert_raises(ValueError, rng.randint, 1, 0, dtype=dt) + + def test_rng_zero_and_extremes(self): + rng = random.RandomState() + for dt in self.itype: + lbnd = 0 if dt is np.bool else np.iinfo(dt).min + ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1 + + tgt = ubnd - 1 + assert_equal(rng.randint(tgt, tgt + 1, size=1000, dtype=dt), tgt) + + tgt = lbnd + assert_equal(rng.randint(tgt, tgt + 1, size=1000, dtype=dt), tgt) + + tgt = (lbnd + ubnd) // 2 + assert_equal(rng.randint(tgt, tgt + 1, size=1000, dtype=dt), tgt) + + def test_full_range(self): + # Test for ticket #1690 + rng = random.RandomState() + + for dt in self.itype: + lbnd = 0 if dt is np.bool else np.iinfo(dt).min + ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1 + + try: + rng.randint(lbnd, ubnd, dtype=dt) + except Exception as e: + raise AssertionError("No error should have been raised, " + "but one was with the following " + "message:\n\n%s" % str(e)) + + def test_in_bounds_fuzz(self): + # Don't use fixed seed + rng = random.RandomState() + + for dt in self.itype[1:]: + for ubnd in [4, 8, 16]: + vals = rng.randint(2, ubnd, size=2**16, dtype=dt) + assert_(vals.max() < ubnd) + assert_(vals.min() >= 2) + + vals = rng.randint(0, 2, size=2**16, dtype=np.bool) + + assert_(vals.max() < 2) + assert_(vals.min() >= 0) + + def test_repeatability(self): + import hashlib + # We use a sha256 hash of generated sequences of 1000 samples + # in the range [0, 6) for all but bool, where the range + # is [0, 2). Hashes are for little endian numbers. + tgt = {'bool': '509aea74d792fb931784c4b0135392c65aec64beee12b0cc167548a2c3d31e71', # noqa: E501 + 'int16': '7b07f1a920e46f6d0fe02314155a2330bcfd7635e708da50e536c5ebb631a7d4', # noqa: E501 + 'int32': 'e577bfed6c935de944424667e3da285012e741892dcb7051a8f1ce68ab05c92f', # noqa: E501 + 'int64': '0fbead0b06759df2cfb55e43148822d4a1ff953c7eb19a5b08445a63bb64fa9e', # noqa: E501 + 'int8': '001aac3a5acb935a9b186cbe14a1ca064b8bb2dd0b045d48abeacf74d0203404', # noqa: E501 + 'uint16': '7b07f1a920e46f6d0fe02314155a2330bcfd7635e708da50e536c5ebb631a7d4', # noqa: E501 + 'uint32': 'e577bfed6c935de944424667e3da285012e741892dcb7051a8f1ce68ab05c92f', # noqa: E501 + 'uint64': '0fbead0b06759df2cfb55e43148822d4a1ff953c7eb19a5b08445a63bb64fa9e', # noqa: E501 + 'uint8': '001aac3a5acb935a9b186cbe14a1ca064b8bb2dd0b045d48abeacf74d0203404'} # noqa: E501 + + for dt in self.itype[1:]: + rng = random.RandomState(1234) + + # view as little endian for hash + if sys.byteorder == 'little': + val = rng.randint(0, 6, size=1000, dtype=dt) + else: + val = rng.randint(0, 6, size=1000, dtype=dt).byteswap() + + res = hashlib.sha256(val.view(np.int8)).hexdigest() + assert_(tgt[np.dtype(dt).name] == res) + + # bools do not depend on endianness + rng = random.RandomState(1234) + val = rng.randint(0, 2, size=1000, dtype=bool).view(np.int8) + res = hashlib.sha256(val).hexdigest() + assert_(tgt[np.dtype(bool).name] == res) + + def test_int64_uint64_corner_case(self): + # When stored in Numpy arrays, `lbnd` is casted + # as np.int64, and `ubnd` is casted as np.uint64. + # Checking whether `lbnd` >= `ubnd` used to be + # done solely via direct comparison, which is incorrect + # because when Numpy tries to compare both numbers, + # it casts both to np.float64 because there is + # no integer superset of np.int64 and np.uint64. However, + # `ubnd` is too large to be represented in np.float64, + # causing it be round down to np.iinfo(np.int64).max, + # leading to a ValueError because `lbnd` now equals + # the new `ubnd`. + + dt = np.int64 + tgt = np.iinfo(np.int64).max + lbnd = np.int64(np.iinfo(np.int64).max) + ubnd = np.uint64(np.iinfo(np.int64).max + 1) + + # None of these function calls should + # generate a ValueError now. + actual = np.random.randint(lbnd, ubnd, dtype=dt) + assert_equal(actual, tgt) + + def test_respect_dtype_singleton(self): + # See gh-7203 + rng = random.RandomState() + for dt in self.itype: + lbnd = 0 if dt is np.bool else np.iinfo(dt).min + ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1 + + sample = rng.randint(lbnd, ubnd, dtype=dt) + assert_equal(sample.dtype, np.dtype(dt)) + + for dt in (bool, int): + # The legacy rng uses "long" as the default integer: + lbnd = 0 if dt is bool else np.iinfo("long").min + ubnd = 2 if dt is bool else np.iinfo("long").max + 1 + + # gh-7284: Ensure that we get Python data types + sample = rng.randint(lbnd, ubnd, dtype=dt) + assert_(not hasattr(sample, 'dtype')) + assert_equal(type(sample), dt) + + +class TestRandomDist: + # Make sure the random distribution returns the correct value for a + # given seed + seed = 1234567890 + + def test_rand(self): + rng = random.RandomState(self.seed) + actual = rng.rand(3, 2) + desired = np.array([[0.61879477158567997, 0.59162362775974664], + [0.88868358904449662, 0.89165480011560816], + [0.4575674820298663, 0.7781880808593471]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_randn(self): + rng = random.RandomState(self.seed) + actual = rng.randn(3, 2) + desired = np.array([[1.34016345771863121, 1.73759122771936081], + [1.498988344300628, -0.2286433324536169], + [2.031033998682787, 2.17032494605655257]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_randint(self): + rng = random.RandomState(self.seed) + actual = rng.randint(-99, 99, size=(3, 2)) + desired = np.array([[31, 3], + [-52, 41], + [-48, -66]]) + assert_array_equal(actual, desired) + + def test_random_integers(self): + rng = random.RandomState(self.seed) + with pytest.warns(DeprecationWarning): + actual = rng.random_integers(-99, 99, size=(3, 2)) + desired = np.array([[31, 3], + [-52, 41], + [-48, -66]]) + assert_array_equal(actual, desired) + + def test_random_integers_max_int(self): + # Tests whether random_integers can generate the + # maximum allowed Python int that can be converted + # into a C long. Previous implementations of this + # method have thrown an OverflowError when attempting + # to generate this integer. + with pytest.warns(DeprecationWarning): + actual = np.random.random_integers(np.iinfo('l').max, + np.iinfo('l').max) + + desired = np.iinfo('l').max + assert_equal(actual, desired) + + def test_random_integers_deprecated(self): + with warnings.catch_warnings(): + warnings.simplefilter("error", DeprecationWarning) + + # DeprecationWarning raised with high == None + assert_raises(DeprecationWarning, + np.random.random_integers, + np.iinfo('l').max) + + # DeprecationWarning raised with high != None + assert_raises(DeprecationWarning, + np.random.random_integers, + np.iinfo('l').max, np.iinfo('l').max) + + def test_random(self): + rng = random.RandomState(self.seed) + actual = rng.random((3, 2)) + desired = np.array([[0.61879477158567997, 0.59162362775974664], + [0.88868358904449662, 0.89165480011560816], + [0.4575674820298663, 0.7781880808593471]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_choice_uniform_replace(self): + rng = random.RandomState(self.seed) + actual = rng.choice(4, 4) + desired = np.array([2, 3, 2, 3]) + assert_array_equal(actual, desired) + + def test_choice_nonuniform_replace(self): + rng = random.RandomState(self.seed) + actual = rng.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1]) + desired = np.array([1, 1, 2, 2]) + assert_array_equal(actual, desired) + + def test_choice_uniform_noreplace(self): + rng = random.RandomState(self.seed) + actual = rng.choice(4, 3, replace=False) + desired = np.array([0, 1, 3]) + assert_array_equal(actual, desired) + + def test_choice_nonuniform_noreplace(self): + rng = random.RandomState(self.seed) + actual = rng.choice(4, 3, replace=False, + p=[0.1, 0.3, 0.5, 0.1]) + desired = np.array([2, 3, 1]) + assert_array_equal(actual, desired) + + def test_choice_noninteger(self): + rng = random.RandomState(self.seed) + actual = rng.choice(['a', 'b', 'c', 'd'], 4) + desired = np.array(['c', 'd', 'c', 'd']) + assert_array_equal(actual, desired) + + def test_choice_exceptions(self): + sample = np.random.choice + assert_raises(ValueError, sample, -1, 3) + assert_raises(ValueError, sample, 3., 3) + assert_raises(ValueError, sample, [[1, 2], [3, 4]], 3) + assert_raises(ValueError, sample, [], 3) + assert_raises(ValueError, sample, [1, 2, 3, 4], 3, + p=[[0.25, 0.25], [0.25, 0.25]]) + assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4, 0.2]) + assert_raises(ValueError, sample, [1, 2], 3, p=[1.1, -0.1]) + assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4]) + assert_raises(ValueError, sample, [1, 2, 3], 4, replace=False) + # gh-13087 + assert_raises(ValueError, sample, [1, 2, 3], -2, replace=False) + assert_raises(ValueError, sample, [1, 2, 3], (-1,), replace=False) + assert_raises(ValueError, sample, [1, 2, 3], (-1, 1), replace=False) + assert_raises(ValueError, sample, [1, 2, 3], 2, + replace=False, p=[1, 0, 0]) + + def test_choice_return_shape(self): + p = [0.1, 0.9] + # Check scalar + assert_(np.isscalar(np.random.choice(2, replace=True))) + assert_(np.isscalar(np.random.choice(2, replace=False))) + assert_(np.isscalar(np.random.choice(2, replace=True, p=p))) + assert_(np.isscalar(np.random.choice(2, replace=False, p=p))) + assert_(np.isscalar(np.random.choice([1, 2], replace=True))) + assert_(np.random.choice([None], replace=True) is None) + a = np.array([1, 2]) + arr = np.empty(1, dtype=object) + arr[0] = a + assert_(np.random.choice(arr, replace=True) is a) + + # Check 0-d array + s = () + assert_(not np.isscalar(np.random.choice(2, s, replace=True))) + assert_(not np.isscalar(np.random.choice(2, s, replace=False))) + assert_(not np.isscalar(np.random.choice(2, s, replace=True, p=p))) + assert_(not np.isscalar(np.random.choice(2, s, replace=False, p=p))) + assert_(not np.isscalar(np.random.choice([1, 2], s, replace=True))) + assert_(np.random.choice([None], s, replace=True).ndim == 0) + a = np.array([1, 2]) + arr = np.empty(1, dtype=object) + arr[0] = a + assert_(np.random.choice(arr, s, replace=True).item() is a) + + # Check multi dimensional array + s = (2, 3) + p = [0.1, 0.1, 0.1, 0.1, 0.4, 0.2] + assert_equal(np.random.choice(6, s, replace=True).shape, s) + assert_equal(np.random.choice(6, s, replace=False).shape, s) + assert_equal(np.random.choice(6, s, replace=True, p=p).shape, s) + assert_equal(np.random.choice(6, s, replace=False, p=p).shape, s) + assert_equal(np.random.choice(np.arange(6), s, replace=True).shape, s) + + # Check zero-size + assert_equal(np.random.randint(0, 0, size=(3, 0, 4)).shape, (3, 0, 4)) + assert_equal(np.random.randint(0, -10, size=0).shape, (0,)) + assert_equal(np.random.randint(10, 10, size=0).shape, (0,)) + assert_equal(np.random.choice(0, size=0).shape, (0,)) + assert_equal(np.random.choice([], size=(0,)).shape, (0,)) + assert_equal(np.random.choice(['a', 'b'], size=(3, 0, 4)).shape, + (3, 0, 4)) + assert_raises(ValueError, np.random.choice, [], 10) + + def test_choice_nan_probabilities(self): + a = np.array([42, 1, 2]) + p = [None, None, None] + assert_raises(ValueError, np.random.choice, a, p=p) + + def test_bytes(self): + rng = random.RandomState(self.seed) + actual = rng.bytes(10) + desired = b'\x82Ui\x9e\xff\x97+Wf\xa5' + assert_equal(actual, desired) + + def test_shuffle(self): + # Test lists, arrays (of various dtypes), and multidimensional versions + # of both, c-contiguous or not: + for conv in [lambda x: np.array([]), + lambda x: x, + lambda x: np.asarray(x).astype(np.int8), + lambda x: np.asarray(x).astype(np.float32), + lambda x: np.asarray(x).astype(np.complex64), + lambda x: np.asarray(x).astype(object), + lambda x: [(i, i) for i in x], + lambda x: np.asarray([[i, i] for i in x]), + lambda x: np.vstack([x, x]).T, + # gh-11442 + lambda x: (np.asarray([(i, i) for i in x], + [("a", int), ("b", int)]) + .view(np.recarray)), + # gh-4270 + lambda x: np.asarray([(i, i) for i in x], + [("a", object), ("b", np.int32)])]: + rng = random.RandomState(self.seed) + alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]) + rng.shuffle(alist) + actual = alist + desired = conv([0, 1, 9, 6, 2, 4, 5, 8, 7, 3]) + assert_array_equal(actual, desired) + + def test_shuffle_masked(self): + # gh-3263 + a = np.ma.masked_values(np.reshape(range(20), (5, 4)) % 3 - 1, -1) + b = np.ma.masked_values(np.arange(20) % 3 - 1, -1) + a_orig = a.copy() + b_orig = b.copy() + for i in range(50): + np.random.shuffle(a) + assert_equal( + sorted(a.data[~a.mask]), sorted(a_orig.data[~a_orig.mask])) + np.random.shuffle(b) + assert_equal( + sorted(b.data[~b.mask]), sorted(b_orig.data[~b_orig.mask])) + + @pytest.mark.parametrize("random", + [np.random, np.random.RandomState(), np.random.default_rng()]) + def test_shuffle_untyped_warning(self, random): + # Create a dict works like a sequence but isn't one + values = {0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6} + with pytest.warns(UserWarning, + match="you are shuffling a 'dict' object") as rec: + random.shuffle(values) + assert "test_random" in rec[0].filename + + @pytest.mark.parametrize("random", + [np.random, np.random.RandomState(), np.random.default_rng()]) + @pytest.mark.parametrize("use_array_like", [True, False]) + def test_shuffle_no_object_unpacking(self, random, use_array_like): + class MyArr(np.ndarray): + pass + + items = [ + None, np.array([3]), np.float64(3), np.array(10), np.float64(7) + ] + arr = np.array(items, dtype=object) + item_ids = {id(i) for i in items} + if use_array_like: + arr = arr.view(MyArr) + + # The array was created fine, and did not modify any objects: + assert all(id(i) in item_ids for i in arr) + + if use_array_like and not isinstance(random, np.random.Generator): + # The old API gives incorrect results, but warns about it. + with pytest.warns(UserWarning, + match="Shuffling a one dimensional array.*"): + random.shuffle(arr) + else: + random.shuffle(arr) + assert all(id(i) in item_ids for i in arr) + + def test_shuffle_memoryview(self): + # gh-18273 + # allow graceful handling of memoryviews + # (treat the same as arrays) + rng = random.RandomState(self.seed) + a = np.arange(5).data + rng.shuffle(a) + assert_equal(np.asarray(a), [0, 1, 4, 3, 2]) + rng = random.RandomState(self.seed) + rng.shuffle(a) + assert_equal(np.asarray(a), [0, 1, 2, 3, 4]) + rng = np.random.default_rng(self.seed) + rng.shuffle(a) + assert_equal(np.asarray(a), [4, 1, 0, 3, 2]) + + def test_shuffle_not_writeable(self): + a = np.zeros(3) + a.flags.writeable = False + with pytest.raises(ValueError, match='read-only'): + np.random.shuffle(a) + + def test_beta(self): + rng = random.RandomState(self.seed) + actual = rng.beta(.1, .9, size=(3, 2)) + desired = np.array( + [[1.45341850513746058e-02, 5.31297615662868145e-04], + [1.85366619058432324e-06, 4.19214516800110563e-03], + [1.58405155108498093e-04, 1.26252891949397652e-04]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_binomial(self): + rng = random.RandomState(self.seed) + actual = rng.binomial(100, .456, size=(3, 2)) + desired = np.array([[37, 43], + [42, 48], + [46, 45]]) + assert_array_equal(actual, desired) + + def test_chisquare(self): + rng = random.RandomState(self.seed) + actual = rng.chisquare(50, size=(3, 2)) + desired = np.array([[63.87858175501090585, 68.68407748911370447], + [65.77116116901505904, 47.09686762438974483], + [72.3828403199695174, 74.18408615260374006]]) + assert_array_almost_equal(actual, desired, decimal=13) + + def test_dirichlet(self): + rng = random.RandomState(self.seed) + alpha = np.array([51.72840233779265162, 39.74494232180943953]) + actual = rng.dirichlet(alpha, size=(3, 2)) + desired = np.array([[[0.54539444573611562, 0.45460555426388438], + [0.62345816822039413, 0.37654183177960598]], + [[0.55206000085785778, 0.44793999914214233], + [0.58964023305154301, 0.41035976694845688]], + [[0.59266909280647828, 0.40733090719352177], + [0.56974431743975207, 0.43025568256024799]]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_dirichlet_size(self): + # gh-3173 + p = np.array([51.72840233779265162, 39.74494232180943953]) + assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2)) + assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2)) + assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2)) + assert_equal(np.random.dirichlet(p, [2, 2]).shape, (2, 2, 2)) + assert_equal(np.random.dirichlet(p, (2, 2)).shape, (2, 2, 2)) + assert_equal(np.random.dirichlet(p, np.array((2, 2))).shape, (2, 2, 2)) + + assert_raises(TypeError, np.random.dirichlet, p, float(1)) + + def test_dirichlet_bad_alpha(self): + # gh-2089 + alpha = np.array([5.4e-01, -1.0e-16]) + assert_raises(ValueError, np.random.mtrand.dirichlet, alpha) + + # gh-15876 + assert_raises(ValueError, random.dirichlet, [[5, 1]]) + assert_raises(ValueError, random.dirichlet, [[5], [1]]) + assert_raises(ValueError, random.dirichlet, [[[5], [1]], [[1], [5]]]) + assert_raises(ValueError, random.dirichlet, np.array([[5, 1], [1, 5]])) + + def test_exponential(self): + rng = random.RandomState(self.seed) + actual = rng.exponential(1.1234, size=(3, 2)) + desired = np.array([[1.08342649775011624, 1.00607889924557314], + [2.46628830085216721, 2.49668106809923884], + [0.68717433461363442, 1.69175666993575979]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_exponential_0(self): + assert_equal(np.random.exponential(scale=0), 0) + assert_raises(ValueError, np.random.exponential, scale=-0.) + + def test_f(self): + rng = random.RandomState(self.seed) + actual = rng.f(12, 77, size=(3, 2)) + desired = np.array([[1.21975394418575878, 1.75135759791559775], + [1.44803115017146489, 1.22108959480396262], + [1.02176975757740629, 1.34431827623300415]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_gamma(self): + rng = random.RandomState(self.seed) + actual = rng.gamma(5, 3, size=(3, 2)) + desired = np.array([[24.60509188649287182, 28.54993563207210627], + [26.13476110204064184, 12.56988482927716078], + [31.71863275789960568, 33.30143302795922011]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_gamma_0(self): + assert_equal(np.random.gamma(shape=0, scale=0), 0) + assert_raises(ValueError, np.random.gamma, shape=-0., scale=-0.) + + def test_geometric(self): + rng = random.RandomState(self.seed) + actual = rng.geometric(.123456789, size=(3, 2)) + desired = np.array([[8, 7], + [17, 17], + [5, 12]]) + assert_array_equal(actual, desired) + + def test_gumbel(self): + rng = random.RandomState(self.seed) + actual = rng.gumbel(loc=.123456789, scale=2.0, size=(3, 2)) + desired = np.array([[0.19591898743416816, 0.34405539668096674], + [-1.4492522252274278, -1.47374816298446865], + [1.10651090478803416, -0.69535848626236174]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_gumbel_0(self): + assert_equal(np.random.gumbel(scale=0), 0) + assert_raises(ValueError, np.random.gumbel, scale=-0.) + + def test_hypergeometric(self): + rng = random.RandomState(self.seed) + actual = rng.hypergeometric(10, 5, 14, size=(3, 2)) + desired = np.array([[10, 10], + [10, 10], + [9, 9]]) + assert_array_equal(actual, desired) + + # Test nbad = 0 + actual = rng.hypergeometric(5, 0, 3, size=4) + desired = np.array([3, 3, 3, 3]) + assert_array_equal(actual, desired) + + actual = rng.hypergeometric(15, 0, 12, size=4) + desired = np.array([12, 12, 12, 12]) + assert_array_equal(actual, desired) + + # Test ngood = 0 + actual = rng.hypergeometric(0, 5, 3, size=4) + desired = np.array([0, 0, 0, 0]) + assert_array_equal(actual, desired) + + actual = rng.hypergeometric(0, 15, 12, size=4) + desired = np.array([0, 0, 0, 0]) + assert_array_equal(actual, desired) + + def test_laplace(self): + rng = random.RandomState(self.seed) + actual = rng.laplace(loc=.123456789, scale=2.0, size=(3, 2)) + desired = np.array([[0.66599721112760157, 0.52829452552221945], + [3.12791959514407125, 3.18202813572992005], + [-0.05391065675859356, 1.74901336242837324]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_laplace_0(self): + assert_equal(np.random.laplace(scale=0), 0) + assert_raises(ValueError, np.random.laplace, scale=-0.) + + def test_logistic(self): + rng = random.RandomState(self.seed) + actual = rng.logistic(loc=.123456789, scale=2.0, size=(3, 2)) + desired = np.array([[1.09232835305011444, 0.8648196662399954], + [4.27818590694950185, 4.33897006346929714], + [-0.21682183359214885, 2.63373365386060332]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_lognormal(self): + rng = random.RandomState(self.seed) + actual = rng.lognormal(mean=.123456789, sigma=2.0, size=(3, 2)) + desired = np.array([[16.50698631688883822, 36.54846706092654784], + [22.67886599981281748, 0.71617561058995771], + [65.72798501792723869, 86.84341601437161273]]) + assert_array_almost_equal(actual, desired, decimal=13) + + def test_lognormal_0(self): + assert_equal(np.random.lognormal(sigma=0), 1) + assert_raises(ValueError, np.random.lognormal, sigma=-0.) + + def test_logseries(self): + rng = random.RandomState(self.seed) + actual = rng.logseries(p=.923456789, size=(3, 2)) + desired = np.array([[2, 2], + [6, 17], + [3, 6]]) + assert_array_equal(actual, desired) + + def test_multinomial(self): + rng = random.RandomState(self.seed) + actual = rng.multinomial(20, [1 / 6.] * 6, size=(3, 2)) + desired = np.array([[[4, 3, 5, 4, 2, 2], + [5, 2, 8, 2, 2, 1]], + [[3, 4, 3, 6, 0, 4], + [2, 1, 4, 3, 6, 4]], + [[4, 4, 2, 5, 2, 3], + [4, 3, 4, 2, 3, 4]]]) + assert_array_equal(actual, desired) + + def test_multivariate_normal(self): + rng = random.RandomState(self.seed) + mean = (.123456789, 10) + cov = [[1, 0], [0, 1]] + size = (3, 2) + actual = rng.multivariate_normal(mean, cov, size) + desired = np.array([[[1.463620246718631, 11.73759122771936], + [1.622445133300628, 9.771356667546383]], + [[2.154490787682787, 12.170324946056553], + [1.719909438201865, 9.230548443648306]], + [[0.689515026297799, 9.880729819607714], + [-0.023054015651998, 9.201096623542879]]]) + + assert_array_almost_equal(actual, desired, decimal=15) + + # Check for default size, was raising deprecation warning + actual = rng.multivariate_normal(mean, cov) + desired = np.array([0.895289569463708, 9.17180864067987]) + assert_array_almost_equal(actual, desired, decimal=15) + + # Check that non positive-semidefinite covariance warns with + # RuntimeWarning + mean = [0, 0] + cov = [[1, 2], [2, 1]] + pytest.warns(RuntimeWarning, rng.multivariate_normal, mean, cov) + + # and that it doesn't warn with RuntimeWarning check_valid='ignore' + assert_no_warnings(rng.multivariate_normal, mean, cov, + check_valid='ignore') + + # and that it raises with RuntimeWarning check_valid='raises' + assert_raises(ValueError, rng.multivariate_normal, mean, cov, + check_valid='raise') + + cov = np.array([[1, 0.1], [0.1, 1]], dtype=np.float32) + with warnings.catch_warnings(): + warnings.simplefilter('error') + rng.multivariate_normal(mean, cov) + + def test_negative_binomial(self): + rng = random.RandomState(self.seed) + actual = rng.negative_binomial(n=100, p=.12345, size=(3, 2)) + desired = np.array([[848, 841], + [892, 611], + [779, 647]]) + assert_array_equal(actual, desired) + + def test_noncentral_chisquare(self): + rng = random.RandomState(self.seed) + actual = rng.noncentral_chisquare(df=5, nonc=5, size=(3, 2)) + desired = np.array([[23.91905354498517511, 13.35324692733826346], + [31.22452661329736401, 16.60047399466177254], + [5.03461598262724586, 17.94973089023519464]]) + assert_array_almost_equal(actual, desired, decimal=14) + + actual = rng.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2)) + desired = np.array([[1.47145377828516666, 0.15052899268012659], + [0.00943803056963588, 1.02647251615666169], + [0.332334982684171, 0.15451287602753125]]) + assert_array_almost_equal(actual, desired, decimal=14) + + rng = random.RandomState(self.seed) + actual = rng.noncentral_chisquare(df=5, nonc=0, size=(3, 2)) + desired = np.array([[9.597154162763948, 11.725484450296079], + [10.413711048138335, 3.694475922923986], + [13.484222138963087, 14.377255424602957]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_noncentral_f(self): + rng = random.RandomState(self.seed) + actual = rng.noncentral_f(dfnum=5, dfden=2, nonc=1, + size=(3, 2)) + desired = np.array([[1.40598099674926669, 0.34207973179285761], + [3.57715069265772545, 7.92632662577829805], + [0.43741599463544162, 1.1774208752428319]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_normal(self): + rng = random.RandomState(self.seed) + actual = rng.normal(loc=.123456789, scale=2.0, size=(3, 2)) + desired = np.array([[2.80378370443726244, 3.59863924443872163], + [3.121433477601256, -0.33382987590723379], + [4.18552478636557357, 4.46410668111310471]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_normal_0(self): + assert_equal(np.random.normal(scale=0), 0) + assert_raises(ValueError, np.random.normal, scale=-0.) + + def test_pareto(self): + rng = random.RandomState(self.seed) + actual = rng.pareto(a=.123456789, size=(3, 2)) + desired = np.array( + [[2.46852460439034849e+03, 1.41286880810518346e+03], + [5.28287797029485181e+07, 6.57720981047328785e+07], + [1.40840323350391515e+02, 1.98390255135251704e+05]]) + # For some reason on 32-bit x86 Ubuntu 12.10 the [1, 0] entry in this + # matrix differs by 24 nulps. Discussion: + # https://mail.python.org/pipermail/numpy-discussion/2012-September/063801.html + # Consensus is that this is probably some gcc quirk that affects + # rounding but not in any important way, so we just use a looser + # tolerance on this test: + np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=30) + + def test_poisson(self): + rng = random.RandomState(self.seed) + actual = rng.poisson(lam=.123456789, size=(3, 2)) + desired = np.array([[0, 0], + [1, 0], + [0, 0]]) + assert_array_equal(actual, desired) + + def test_poisson_exceptions(self): + lambig = np.iinfo('l').max + lamneg = -1 + assert_raises(ValueError, np.random.poisson, lamneg) + assert_raises(ValueError, np.random.poisson, [lamneg] * 10) + assert_raises(ValueError, np.random.poisson, lambig) + assert_raises(ValueError, np.random.poisson, [lambig] * 10) + + def test_power(self): + rng = random.RandomState(self.seed) + actual = rng.power(a=.123456789, size=(3, 2)) + desired = np.array([[0.02048932883240791, 0.01424192241128213], + [0.38446073748535298, 0.39499689943484395], + [0.00177699707563439, 0.13115505880863756]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_rayleigh(self): + rng = random.RandomState(self.seed) + actual = rng.rayleigh(scale=10, size=(3, 2)) + desired = np.array([[13.8882496494248393, 13.383318339044731], + [20.95413364294492098, 21.08285015800712614], + [11.06066537006854311, 17.35468505778271009]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_rayleigh_0(self): + assert_equal(np.random.rayleigh(scale=0), 0) + assert_raises(ValueError, np.random.rayleigh, scale=-0.) + + def test_standard_cauchy(self): + rng = random.RandomState(self.seed) + actual = rng.standard_cauchy(size=(3, 2)) + desired = np.array([[0.77127660196445336, -6.55601161955910605], + [0.93582023391158309, -2.07479293013759447], + [-4.74601644297011926, 0.18338989290760804]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_standard_exponential(self): + rng = random.RandomState(self.seed) + actual = rng.standard_exponential(size=(3, 2)) + desired = np.array([[0.96441739162374596, 0.89556604882105506], + [2.1953785836319808, 2.22243285392490542], + [0.6116915921431676, 1.50592546727413201]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_standard_gamma(self): + rng = random.RandomState(self.seed) + actual = rng.standard_gamma(shape=3, size=(3, 2)) + desired = np.array([[5.50841531318455058, 6.62953470301903103], + [5.93988484943779227, 2.31044849402133989], + [7.54838614231317084, 8.012756093271868]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_standard_gamma_0(self): + assert_equal(np.random.standard_gamma(shape=0), 0) + assert_raises(ValueError, np.random.standard_gamma, shape=-0.) + + def test_standard_normal(self): + rng = random.RandomState(self.seed) + actual = rng.standard_normal(size=(3, 2)) + desired = np.array([[1.34016345771863121, 1.73759122771936081], + [1.498988344300628, -0.2286433324536169], + [2.031033998682787, 2.17032494605655257]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_standard_t(self): + rng = random.RandomState(self.seed) + actual = rng.standard_t(df=10, size=(3, 2)) + desired = np.array([[0.97140611862659965, -0.08830486548450577], + [1.36311143689505321, -0.55317463909867071], + [-0.18473749069684214, 0.61181537341755321]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_triangular(self): + rng = random.RandomState(self.seed) + actual = rng.triangular(left=5.12, mode=10.23, right=20.34, + size=(3, 2)) + desired = np.array([[12.68117178949215784, 12.4129206149193152], + [16.20131377335158263, 16.25692138747600524], + [11.20400690911820263, 14.4978144835829923]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_uniform(self): + rng = random.RandomState(self.seed) + actual = rng.uniform(low=1.23, high=10.54, size=(3, 2)) + desired = np.array([[6.99097932346268003, 6.73801597444323974], + [9.50364421400426274, 9.53130618907631089], + [5.48995325769805476, 8.47493103280052118]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_uniform_range_bounds(self): + fmin = np.finfo('float').min + fmax = np.finfo('float').max + + func = np.random.uniform + assert_raises(OverflowError, func, -np.inf, 0) + assert_raises(OverflowError, func, 0, np.inf) + assert_raises(OverflowError, func, fmin, fmax) + assert_raises(OverflowError, func, [-np.inf], [0]) + assert_raises(OverflowError, func, [0], [np.inf]) + + # (fmax / 1e17) - fmin is within range, so this should not throw + # account for i386 extended precision DBL_MAX / 1e17 + DBL_MAX > + # DBL_MAX by increasing fmin a bit + np.random.uniform(low=np.nextafter(fmin, 1), high=fmax / 1e17) + + def test_scalar_exception_propagation(self): + # Tests that exceptions are correctly propagated in distributions + # when called with objects that throw exceptions when converted to + # scalars. + # + # Regression test for gh: 8865 + + class ThrowingFloat(np.ndarray): + def __float__(self): + raise TypeError + + throwing_float = np.array(1.0).view(ThrowingFloat) + assert_raises(TypeError, np.random.uniform, throwing_float, + throwing_float) + + class ThrowingInteger(np.ndarray): + def __int__(self): + raise TypeError + + __index__ = __int__ + + throwing_int = np.array(1).view(ThrowingInteger) + assert_raises(TypeError, np.random.hypergeometric, throwing_int, 1, 1) + + def test_vonmises(self): + rng = random.RandomState(self.seed) + actual = rng.vonmises(mu=1.23, kappa=1.54, size=(3, 2)) + desired = np.array([[2.28567572673902042, 2.89163838442285037], + [0.38198375564286025, 2.57638023113890746], + [1.19153771588353052, 1.83509849681825354]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_vonmises_small(self): + # check infinite loop, gh-4720 + np.random.seed(self.seed) + r = np.random.vonmises(mu=0., kappa=1.1e-8, size=10**6) + np.testing.assert_(np.isfinite(r).all()) + + def test_wald(self): + rng = random.RandomState(self.seed) + actual = rng.wald(mean=1.23, scale=1.54, size=(3, 2)) + desired = np.array([[3.82935265715889983, 5.13125249184285526], + [0.35045403618358717, 1.50832396872003538], + [0.24124319895843183, 0.22031101461955038]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_weibull(self): + rng = random.RandomState(self.seed) + actual = rng.weibull(a=1.23, size=(3, 2)) + desired = np.array([[0.97097342648766727, 0.91422896443565516], + [1.89517770034962929, 1.91414357960479564], + [0.67057783752390987, 1.39494046635066793]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_weibull_0(self): + np.random.seed(self.seed) + assert_equal(np.random.weibull(a=0, size=12), np.zeros(12)) + assert_raises(ValueError, np.random.weibull, a=-0.) + + def test_zipf(self): + rng = random.RandomState(self.seed) + actual = rng.zipf(a=1.23, size=(3, 2)) + desired = np.array([[66, 29], + [1, 1], + [3, 13]]) + assert_array_equal(actual, desired) + + +class TestBroadcast: + # tests that functions that broadcast behave + # correctly when presented with non-scalar arguments + seed = 123456789 + + # TODO: Include test for randint once it can broadcast + # Can steal the test written in PR #6938 + + def test_uniform(self): + low = [0] + high = [1] + desired = np.array([0.53283302478975902, + 0.53413660089041659, + 0.50955303552646702]) + + rng = random.RandomState(self.seed) + actual = rng.uniform(low * 3, high) + assert_array_almost_equal(actual, desired, decimal=14) + + rng = random.RandomState(self.seed) + actual = rng.uniform(low, high * 3) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_normal(self): + loc = [0] + scale = [1] + bad_scale = [-1] + desired = np.array([2.2129019979039612, + 2.1283977976520019, + 1.8417114045748335]) + + rng = random.RandomState(self.seed) + actual = rng.normal(loc * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, rng.normal, loc * 3, bad_scale) + + rng = random.RandomState(self.seed) + actual = rng.normal(loc, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, rng.normal, loc, bad_scale * 3) + + def test_beta(self): + a = [1] + b = [2] + bad_a = [-1] + bad_b = [-2] + desired = np.array([0.19843558305989056, + 0.075230336409423643, + 0.24976865978980844]) + + rng = random.RandomState(self.seed) + actual = rng.beta(a * 3, b) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, rng.beta, bad_a * 3, b) + assert_raises(ValueError, rng.beta, a * 3, bad_b) + + rng = random.RandomState(self.seed) + actual = rng.beta(a, b * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, rng.beta, bad_a, b * 3) + assert_raises(ValueError, rng.beta, a, bad_b * 3) + + def test_exponential(self): + scale = [1] + bad_scale = [-1] + desired = np.array([0.76106853658845242, + 0.76386282278691653, + 0.71243813125891797]) + + rng = random.RandomState(self.seed) + actual = rng.exponential(scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, rng.exponential, bad_scale * 3) + + def test_standard_gamma(self): + shape = [1] + bad_shape = [-1] + desired = np.array([0.76106853658845242, + 0.76386282278691653, + 0.71243813125891797]) + + rng = random.RandomState(self.seed) + actual = rng.standard_gamma(shape * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, rng.standard_gamma, bad_shape * 3) + + def test_gamma(self): + shape = [1] + scale = [2] + bad_shape = [-1] + bad_scale = [-2] + desired = np.array([1.5221370731769048, + 1.5277256455738331, + 1.4248762625178359]) + + rng = random.RandomState(self.seed) + actual = rng.gamma(shape * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, rng.gamma, bad_shape * 3, scale) + assert_raises(ValueError, rng.gamma, shape * 3, bad_scale) + + rng = random.RandomState(self.seed) + actual = rng.gamma(shape, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, rng.gamma, bad_shape, scale * 3) + assert_raises(ValueError, rng.gamma, shape, bad_scale * 3) + + def test_f(self): + dfnum = [1] + dfden = [2] + bad_dfnum = [-1] + bad_dfden = [-2] + desired = np.array([0.80038951638264799, + 0.86768719635363512, + 2.7251095168386801]) + + rng = random.RandomState(self.seed) + actual = rng.f(dfnum * 3, dfden) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, rng.f, bad_dfnum * 3, dfden) + assert_raises(ValueError, rng.f, dfnum * 3, bad_dfden) + + rng = random.RandomState(self.seed) + actual = rng.f(dfnum, dfden * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, rng.f, bad_dfnum, dfden * 3) + assert_raises(ValueError, rng.f, dfnum, bad_dfden * 3) + + def test_noncentral_f(self): + dfnum = [2] + dfden = [3] + nonc = [4] + bad_dfnum = [0] + bad_dfden = [-1] + bad_nonc = [-2] + desired = np.array([9.1393943263705211, + 13.025456344595602, + 8.8018098359100545]) + + rng = random.RandomState(self.seed) + actual = rng.noncentral_f(dfnum * 3, dfden, nonc) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, rng.noncentral_f, bad_dfnum * 3, dfden, nonc) + assert_raises(ValueError, rng.noncentral_f, dfnum * 3, bad_dfden, nonc) + assert_raises(ValueError, rng.noncentral_f, dfnum * 3, dfden, bad_nonc) + + rng = random.RandomState(self.seed) + actual = rng.noncentral_f(dfnum, dfden * 3, nonc) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, rng.noncentral_f, bad_dfnum, dfden * 3, nonc) + assert_raises(ValueError, rng.noncentral_f, dfnum, bad_dfden * 3, nonc) + assert_raises(ValueError, rng.noncentral_f, dfnum, dfden * 3, bad_nonc) + + rng = random.RandomState(self.seed) + actual = rng.noncentral_f(dfnum, dfden, nonc * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, rng.noncentral_f, bad_dfnum, dfden, nonc * 3) + assert_raises(ValueError, rng.noncentral_f, dfnum, bad_dfden, nonc * 3) + assert_raises(ValueError, rng.noncentral_f, dfnum, dfden, bad_nonc * 3) + + def test_noncentral_f_small_df(self): + rng = random.RandomState(self.seed) + desired = np.array([6.869638627492048, 0.785880199263955]) + actual = rng.noncentral_f(0.9, 0.9, 2, size=2) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_chisquare(self): + df = [1] + bad_df = [-1] + desired = np.array([0.57022801133088286, + 0.51947702108840776, + 0.1320969254923558]) + + rng = random.RandomState(self.seed) + actual = rng.chisquare(df * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, rng.chisquare, bad_df * 3) + + def test_noncentral_chisquare(self): + df = [1] + nonc = [2] + bad_df = [-1] + bad_nonc = [-2] + desired = np.array([9.0015599467913763, + 4.5804135049718742, + 6.0872302432834564]) + + rng = random.RandomState(self.seed) + actual = rng.noncentral_chisquare(df * 3, nonc) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, rng.noncentral_chisquare, bad_df * 3, nonc) + assert_raises(ValueError, rng.noncentral_chisquare, df * 3, bad_nonc) + + rng = random.RandomState(self.seed) + actual = rng.noncentral_chisquare(df, nonc * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, rng.noncentral_chisquare, bad_df, nonc * 3) + assert_raises(ValueError, rng.noncentral_chisquare, df, bad_nonc * 3) + + def test_standard_t(self): + df = [1] + bad_df = [-1] + desired = np.array([3.0702872575217643, + 5.8560725167361607, + 1.0274791436474273]) + + rng = random.RandomState(self.seed) + actual = rng.standard_t(df * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, rng.standard_t, bad_df * 3) + + def test_vonmises(self): + mu = [2] + kappa = [1] + bad_kappa = [-1] + desired = np.array([2.9883443664201312, + -2.7064099483995943, + -1.8672476700665914]) + + rng = random.RandomState(self.seed) + actual = rng.vonmises(mu * 3, kappa) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, rng.vonmises, mu * 3, bad_kappa) + + rng = random.RandomState(self.seed) + actual = rng.vonmises(mu, kappa * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, rng.vonmises, mu, bad_kappa * 3) + + def test_pareto(self): + a = [1] + bad_a = [-1] + desired = np.array([1.1405622680198362, + 1.1465519762044529, + 1.0389564467453547]) + + rng = random.RandomState(self.seed) + actual = rng.pareto(a * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, rng.pareto, bad_a * 3) + + def test_weibull(self): + a = [1] + bad_a = [-1] + desired = np.array([0.76106853658845242, + 0.76386282278691653, + 0.71243813125891797]) + + rng = random.RandomState(self.seed) + actual = rng.weibull(a * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, rng.weibull, bad_a * 3) + + def test_power(self): + a = [1] + bad_a = [-1] + desired = np.array([0.53283302478975902, + 0.53413660089041659, + 0.50955303552646702]) + + rng = random.RandomState(self.seed) + actual = rng.power(a * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, rng.power, bad_a * 3) + + def test_laplace(self): + loc = [0] + scale = [1] + bad_scale = [-1] + desired = np.array([0.067921356028507157, + 0.070715642226971326, + 0.019290950698972624]) + + rng = random.RandomState(self.seed) + actual = rng.laplace(loc * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, rng.laplace, loc * 3, bad_scale) + + rng = random.RandomState(self.seed) + actual = rng.laplace(loc, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, rng.laplace, loc, bad_scale * 3) + + def test_gumbel(self): + loc = [0] + scale = [1] + bad_scale = [-1] + desired = np.array([0.2730318639556768, + 0.26936705726291116, + 0.33906220393037939]) + + rng = random.RandomState(self.seed) + actual = rng.gumbel(loc * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, rng.gumbel, loc * 3, bad_scale) + + rng = random.RandomState(self.seed) + actual = rng.gumbel(loc, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, rng.gumbel, loc, bad_scale * 3) + + def test_logistic(self): + loc = [0] + scale = [1] + bad_scale = [-1] + desired = np.array([0.13152135837586171, + 0.13675915696285773, + 0.038216792802833396]) + + rng = random.RandomState(self.seed) + actual = rng.logistic(loc * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, rng.logistic, loc * 3, bad_scale) + + rng = random.RandomState(self.seed) + actual = rng.logistic(loc, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, rng.logistic, loc, bad_scale * 3) + + def test_lognormal(self): + mean = [0] + sigma = [1] + bad_sigma = [-1] + desired = np.array([9.1422086044848427, + 8.4013952870126261, + 6.3073234116578671]) + + rng = random.RandomState(self.seed) + actual = rng.lognormal(mean * 3, sigma) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, rng.lognormal, mean * 3, bad_sigma) + + rng = random.RandomState(self.seed) + actual = rng.lognormal(mean, sigma * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, rng.lognormal, mean, bad_sigma * 3) + + def test_rayleigh(self): + scale = [1] + bad_scale = [-1] + desired = np.array([1.2337491937897689, + 1.2360119924878694, + 1.1936818095781789]) + + rng = random.RandomState(self.seed) + actual = rng.rayleigh(scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, rng.rayleigh, bad_scale * 3) + + def test_wald(self): + mean = [0.5] + scale = [1] + bad_mean = [0] + bad_scale = [-2] + desired = np.array([0.11873681120271318, + 0.12450084820795027, + 0.9096122728408238]) + + rng = random.RandomState(self.seed) + actual = rng.wald(mean * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, rng.wald, bad_mean * 3, scale) + assert_raises(ValueError, rng.wald, mean * 3, bad_scale) + + rng = random.RandomState(self.seed) + actual = rng.wald(mean, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, rng.wald, bad_mean, scale * 3) + assert_raises(ValueError, rng.wald, mean, bad_scale * 3) + assert_raises(ValueError, rng.wald, 0.0, 1) + assert_raises(ValueError, rng.wald, 0.5, 0.0) + + def test_triangular(self): + left = [1] + right = [3] + mode = [2] + bad_left_one = [3] + bad_mode_one = [4] + bad_left_two, bad_mode_two = right * 2 + desired = np.array([2.03339048710429, + 2.0347400359389356, + 2.0095991069536208]) + + rng = random.RandomState(self.seed) + actual = rng.triangular(left * 3, mode, right) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, rng.triangular, bad_left_one * 3, mode, right) + assert_raises(ValueError, rng.triangular, left * 3, bad_mode_one, right) + assert_raises(ValueError, rng.triangular, bad_left_two * 3, bad_mode_two, + right) + + rng = random.RandomState(self.seed) + actual = rng.triangular(left, mode * 3, right) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, rng.triangular, bad_left_one, mode * 3, right) + assert_raises(ValueError, rng.triangular, left, bad_mode_one * 3, right) + assert_raises(ValueError, rng.triangular, bad_left_two, bad_mode_two * 3, + right) + + rng = random.RandomState(self.seed) + actual = rng.triangular(left, mode, right * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, rng.triangular, bad_left_one, mode, right * 3) + assert_raises(ValueError, rng.triangular, left, bad_mode_one, right * 3) + assert_raises(ValueError, rng.triangular, bad_left_two, bad_mode_two, + right * 3) + + def test_binomial(self): + n = [1] + p = [0.5] + bad_n = [-1] + bad_p_one = [-1] + bad_p_two = [1.5] + desired = np.array([1, 1, 1]) + + rng = random.RandomState(self.seed) + actual = rng.binomial(n * 3, p) + assert_array_equal(actual, desired) + assert_raises(ValueError, rng.binomial, bad_n * 3, p) + assert_raises(ValueError, rng.binomial, n * 3, bad_p_one) + assert_raises(ValueError, rng.binomial, n * 3, bad_p_two) + + rng = random.RandomState(self.seed) + actual = rng.binomial(n, p * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, rng.binomial, bad_n, p * 3) + assert_raises(ValueError, rng.binomial, n, bad_p_one * 3) + assert_raises(ValueError, rng.binomial, n, bad_p_two * 3) + + def test_negative_binomial(self): + n = [1] + p = [0.5] + bad_n = [-1] + bad_p_one = [-1] + bad_p_two = [1.5] + desired = np.array([1, 0, 1]) + + rng = random.RandomState(self.seed) + actual = rng.negative_binomial(n * 3, p) + assert_array_equal(actual, desired) + assert_raises(ValueError, rng.negative_binomial, bad_n * 3, p) + assert_raises(ValueError, rng.negative_binomial, n * 3, bad_p_one) + assert_raises(ValueError, rng.negative_binomial, n * 3, bad_p_two) + + rng = random.RandomState(self.seed) + actual = rng.negative_binomial(n, p * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, rng.negative_binomial, bad_n, p * 3) + assert_raises(ValueError, rng.negative_binomial, n, bad_p_one * 3) + assert_raises(ValueError, rng.negative_binomial, n, bad_p_two * 3) + + def test_poisson(self): + max_lam = np.random.RandomState()._poisson_lam_max + + lam = [1] + bad_lam_one = [-1] + bad_lam_two = [max_lam * 2] + desired = np.array([1, 1, 0]) + + rng = random.RandomState(self.seed) + actual = rng.poisson(lam * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, rng.poisson, bad_lam_one * 3) + assert_raises(ValueError, rng.poisson, bad_lam_two * 3) + + def test_zipf(self): + a = [2] + bad_a = [0] + desired = np.array([2, 2, 1]) + + rng = random.RandomState(self.seed) + actual = rng.zipf(a * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, rng.zipf, bad_a * 3) + with np.errstate(invalid='ignore'): + assert_raises(ValueError, rng.zipf, np.nan) + assert_raises(ValueError, rng.zipf, [0, 0, np.nan]) + + def test_geometric(self): + p = [0.5] + bad_p_one = [-1] + bad_p_two = [1.5] + desired = np.array([2, 2, 2]) + + rng = random.RandomState(self.seed) + actual = rng.geometric(p * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, rng.geometric, bad_p_one * 3) + assert_raises(ValueError, rng.geometric, bad_p_two * 3) + + def test_hypergeometric(self): + ngood = [1] + nbad = [2] + nsample = [2] + bad_ngood = [-1] + bad_nbad = [-2] + bad_nsample_one = [0] + bad_nsample_two = [4] + desired = np.array([1, 1, 1]) + + rng = random.RandomState(self.seed) + actual = rng.hypergeometric(ngood * 3, nbad, nsample) + assert_array_equal(actual, desired) + assert_raises(ValueError, rng.hypergeometric, bad_ngood * 3, nbad, nsample) + assert_raises(ValueError, rng.hypergeometric, ngood * 3, bad_nbad, nsample) + assert_raises(ValueError, rng.hypergeometric, ngood * 3, nbad, bad_nsample_one) + assert_raises(ValueError, rng.hypergeometric, ngood * 3, nbad, bad_nsample_two) + + rng = random.RandomState(self.seed) + actual = rng.hypergeometric(ngood, nbad * 3, nsample) + assert_array_equal(actual, desired) + assert_raises(ValueError, rng.hypergeometric, bad_ngood, nbad * 3, nsample) + assert_raises(ValueError, rng.hypergeometric, ngood, bad_nbad * 3, nsample) + assert_raises(ValueError, rng.hypergeometric, ngood, nbad * 3, bad_nsample_one) + assert_raises(ValueError, rng.hypergeometric, ngood, nbad * 3, bad_nsample_two) + + rng = random.RandomState(self.seed) + actual = rng.hypergeometric(ngood, nbad, nsample * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, rng.hypergeometric, bad_ngood, nbad, nsample * 3) + assert_raises(ValueError, rng.hypergeometric, ngood, bad_nbad, nsample * 3) + assert_raises(ValueError, rng.hypergeometric, ngood, nbad, bad_nsample_one * 3) + assert_raises(ValueError, rng.hypergeometric, ngood, nbad, bad_nsample_two * 3) + + def test_logseries(self): + p = [0.5] + bad_p_one = [2] + bad_p_two = [-1] + desired = np.array([1, 1, 1]) + + rng = random.RandomState(self.seed) + actual = rng.logseries(p * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, rng.logseries, bad_p_one * 3) + assert_raises(ValueError, rng.logseries, bad_p_two * 3) + + +@pytest.mark.skipif(IS_WASM, reason="can't start thread") +class TestThread: + # make sure each state produces the same sequence even in threads + seeds = range(4) + + def check_function(self, function, sz): + from threading import Thread + + out1 = np.empty((len(self.seeds),) + sz) + out2 = np.empty((len(self.seeds),) + sz) + + # threaded generation + t = [Thread(target=function, args=(np.random.RandomState(s), o)) + for s, o in zip(self.seeds, out1)] + [x.start() for x in t] + [x.join() for x in t] + + # the same serial + for s, o in zip(self.seeds, out2): + function(np.random.RandomState(s), o) + + # these platforms change x87 fpu precision mode in threads + if np.intp().dtype.itemsize == 4 and sys.platform == "win32": + assert_array_almost_equal(out1, out2) + else: + assert_array_equal(out1, out2) + + def test_normal(self): + def gen_random(state, out): + out[...] = state.normal(size=10000) + self.check_function(gen_random, sz=(10000,)) + + def test_exp(self): + def gen_random(state, out): + out[...] = state.exponential(scale=np.ones((100, 1000))) + self.check_function(gen_random, sz=(100, 1000)) + + def test_multinomial(self): + def gen_random(state, out): + out[...] = state.multinomial(10, [1 / 6.] * 6, size=10000) + self.check_function(gen_random, sz=(10000, 6)) + + +# See Issue #4263 +class TestSingleEltArrayInput: + def _create_arrays(self): + return np.array([2]), np.array([3]), np.array([4]), (1,) + + def test_one_arg_funcs(self): + argOne, _, _, tgtShape = self._create_arrays() + funcs = (np.random.exponential, np.random.standard_gamma, + np.random.chisquare, np.random.standard_t, + np.random.pareto, np.random.weibull, + np.random.power, np.random.rayleigh, + np.random.poisson, np.random.zipf, + np.random.geometric, np.random.logseries) + + probfuncs = (np.random.geometric, np.random.logseries) + + for func in funcs: + if func in probfuncs: # p < 1.0 + out = func(np.array([0.5])) + + else: + out = func(argOne) + + assert_equal(out.shape, tgtShape) + + def test_two_arg_funcs(self): + argOne, argTwo, _, tgtShape = self._create_arrays() + funcs = (np.random.uniform, np.random.normal, + np.random.beta, np.random.gamma, + np.random.f, np.random.noncentral_chisquare, + np.random.vonmises, np.random.laplace, + np.random.gumbel, np.random.logistic, + np.random.lognormal, np.random.wald, + np.random.binomial, np.random.negative_binomial) + + probfuncs = (np.random.binomial, np.random.negative_binomial) + + for func in funcs: + if func in probfuncs: # p <= 1 + argTwo = np.array([0.5]) + + else: + argTwo = argTwo + + out = func(argOne, argTwo) + assert_equal(out.shape, tgtShape) + + out = func(argOne[0], argTwo) + assert_equal(out.shape, tgtShape) + + out = func(argOne, argTwo[0]) + assert_equal(out.shape, tgtShape) + + def test_randint(self): + _, _, _, tgtShape = self._create_arrays() + itype = [bool, np.int8, np.uint8, np.int16, np.uint16, + np.int32, np.uint32, np.int64, np.uint64] + func = np.random.randint + high = np.array([1]) + low = np.array([0]) + + for dt in itype: + out = func(low, high, dtype=dt) + assert_equal(out.shape, tgtShape) + + out = func(low[0], high, dtype=dt) + assert_equal(out.shape, tgtShape) + + out = func(low, high[0], dtype=dt) + assert_equal(out.shape, tgtShape) + + def test_three_arg_funcs(self): + argOne, argTwo, argThree, tgtShape = self._create_arrays() + funcs = [np.random.noncentral_f, np.random.triangular, + np.random.hypergeometric] + + for func in funcs: + out = func(argOne, argTwo, argThree) + assert_equal(out.shape, tgtShape) + + out = func(argOne[0], argTwo, argThree) + assert_equal(out.shape, tgtShape) + + out = func(argOne, argTwo[0], argThree) + assert_equal(out.shape, tgtShape) diff --git a/local_packages/numpy/random/tests/test_randomstate.py b/local_packages/numpy/random/tests/test_randomstate.py new file mode 100644 index 0000000..63ffb5a --- /dev/null +++ b/local_packages/numpy/random/tests/test_randomstate.py @@ -0,0 +1,2099 @@ +import hashlib +import pickle +import sys +import warnings + +import pytest + +import numpy as np +from numpy import random +from numpy.random import MT19937, PCG64 +from numpy.testing import ( + IS_WASM, + assert_, + assert_array_almost_equal, + assert_array_equal, + assert_equal, + assert_no_warnings, + assert_raises, +) + +INT_FUNCS = {'binomial': (100.0, 0.6), + 'geometric': (.5,), + 'hypergeometric': (20, 20, 10), + 'logseries': (.5,), + 'multinomial': (20, np.ones(6) / 6.0), + 'negative_binomial': (100, .5), + 'poisson': (10.0,), + 'zipf': (2,), + } + +if np.iinfo(np.long).max < 2**32: + # Windows and some 32-bit platforms, e.g., ARM + INT_FUNC_HASHES = {'binomial': '2fbead005fc63942decb5326d36a1f32fe2c9d32c904ee61e46866b88447c263', # noqa: E501 + 'logseries': '23ead5dcde35d4cfd4ef2c105e4c3d43304b45dc1b1444b7823b9ee4fa144ebb', # noqa: E501 + 'geometric': '0d764db64f5c3bad48c8c33551c13b4d07a1e7b470f77629bef6c985cac76fcf', # noqa: E501 + 'hypergeometric': '7b59bf2f1691626c5815cdcd9a49e1dd68697251d4521575219e4d2a1b8b2c67', # noqa: E501 + 'multinomial': 'd754fa5b92943a38ec07630de92362dd2e02c43577fc147417dc5b9db94ccdd3', # noqa: E501 + 'negative_binomial': '8eb216f7cb2a63cf55605422845caaff002fddc64a7dc8b2d45acd477a49e824', # noqa: E501 + 'poisson': '70c891d76104013ebd6f6bcf30d403a9074b886ff62e4e6b8eb605bf1a4673b7', # noqa: E501 + 'zipf': '01f074f97517cd5d21747148ac6ca4074dde7fcb7acbaec0a936606fecacd93f', # noqa: E501 + } +else: + INT_FUNC_HASHES = {'binomial': '8626dd9d052cb608e93d8868de0a7b347258b199493871a1dc56e2a26cacb112', # noqa: E501 + 'geometric': '8edd53d272e49c4fc8fbbe6c7d08d563d62e482921f3131d0a0e068af30f0db9', # noqa: E501 + 'hypergeometric': '83496cc4281c77b786c9b7ad88b74d42e01603a55c60577ebab81c3ba8d45657', # noqa: E501 + 'logseries': '65878a38747c176bc00e930ebafebb69d4e1e16cd3a704e264ea8f5e24f548db', # noqa: E501 + 'multinomial': '7a984ae6dca26fd25374479e118b22f55db0aedccd5a0f2584ceada33db98605', # noqa: E501 + 'negative_binomial': 'd636d968e6a24ae92ab52fe11c46ac45b0897e98714426764e820a7d77602a61', # noqa: E501 + 'poisson': '956552176f77e7c9cb20d0118fc9cf690be488d790ed4b4c4747b965e61b0bb4', # noqa: E501 + 'zipf': 'f84ba7feffda41e606e20b28dfc0f1ea9964a74574513d4a4cbc98433a8bfa45', # noqa: E501 + } + + +@pytest.fixture(scope='module', params=INT_FUNCS) +def int_func(request): + return (request.param, INT_FUNCS[request.param], + INT_FUNC_HASHES[request.param]) + + +@pytest.fixture +def restore_singleton_bitgen(): + """Ensures that the singleton bitgen is restored after a test""" + orig_bitgen = np.random.get_bit_generator() + yield + np.random.set_bit_generator(orig_bitgen) + + +def assert_mt19937_state_equal(a, b): + assert_equal(a['bit_generator'], b['bit_generator']) + assert_array_equal(a['state']['key'], b['state']['key']) + assert_array_equal(a['state']['pos'], b['state']['pos']) + assert_equal(a['has_gauss'], b['has_gauss']) + assert_equal(a['gauss'], b['gauss']) + + +class TestSeed: + def test_scalar(self): + s = random.RandomState(0) + assert_equal(s.randint(1000), 684) + s = random.RandomState(4294967295) + assert_equal(s.randint(1000), 419) + + def test_array(self): + s = random.RandomState(range(10)) + assert_equal(s.randint(1000), 468) + s = random.RandomState(np.arange(10)) + assert_equal(s.randint(1000), 468) + s = random.RandomState([0]) + assert_equal(s.randint(1000), 973) + s = random.RandomState([4294967295]) + assert_equal(s.randint(1000), 265) + + def test_invalid_scalar(self): + # seed must be an unsigned 32 bit integer + assert_raises(TypeError, random.RandomState, -0.5) + assert_raises(ValueError, random.RandomState, -1) + + def test_invalid_array(self): + # seed must be an unsigned 32 bit integer + assert_raises(TypeError, random.RandomState, [-0.5]) + assert_raises(ValueError, random.RandomState, [-1]) + assert_raises(ValueError, random.RandomState, [4294967296]) + assert_raises(ValueError, random.RandomState, [1, 2, 4294967296]) + assert_raises(ValueError, random.RandomState, [1, -2, 4294967296]) + + def test_invalid_array_shape(self): + # gh-9832 + assert_raises(ValueError, random.RandomState, np.array([], + dtype=np.int64)) + assert_raises(ValueError, random.RandomState, [[1, 2, 3]]) + assert_raises(ValueError, random.RandomState, [[1, 2, 3], + [4, 5, 6]]) + + def test_cannot_seed(self): + rs = random.RandomState(PCG64(0)) + with assert_raises(TypeError): + rs.seed(1234) + + def test_invalid_initialization(self): + assert_raises(ValueError, random.RandomState, MT19937) + + +class TestBinomial: + def test_n_zero(self): + # Tests the corner case of n == 0 for the binomial distribution. + # binomial(0, p) should be zero for any p in [0, 1]. + # This test addresses issue #3480. + zeros = np.zeros(2, dtype='int') + for p in [0, .5, 1]: + assert_(random.binomial(0, p) == 0) + assert_array_equal(random.binomial(zeros, p), zeros) + + def test_p_is_nan(self): + # Issue #4571. + assert_raises(ValueError, random.binomial, 1, np.nan) + + +class TestMultinomial: + def test_basic(self): + random.multinomial(100, [0.2, 0.8]) + + def test_zero_probability(self): + random.multinomial(100, [0.2, 0.8, 0.0, 0.0, 0.0]) + + def test_int_negative_interval(self): + assert_(-5 <= random.randint(-5, -1) < -1) + x = random.randint(-5, -1, 5) + assert_(np.all(-5 <= x)) + assert_(np.all(x < -1)) + + def test_size(self): + # gh-3173 + p = [0.5, 0.5] + assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2)) + assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2)) + assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2)) + assert_equal(random.multinomial(1, p, [2, 2]).shape, (2, 2, 2)) + assert_equal(random.multinomial(1, p, (2, 2)).shape, (2, 2, 2)) + assert_equal(random.multinomial(1, p, np.array((2, 2))).shape, + (2, 2, 2)) + + assert_raises(TypeError, random.multinomial, 1, p, + float(1)) + + def test_invalid_prob(self): + assert_raises(ValueError, random.multinomial, 100, [1.1, 0.2]) + assert_raises(ValueError, random.multinomial, 100, [-.1, 0.9]) + + def test_invalid_n(self): + assert_raises(ValueError, random.multinomial, -1, [0.8, 0.2]) + + def test_p_non_contiguous(self): + p = np.arange(15.) + p /= np.sum(p[1::3]) + pvals = p[1::3] + rng = random.RandomState(1432985819) + non_contig = rng.multinomial(100, pvals=pvals) + rng = random.RandomState(1432985819) + contig = rng.multinomial(100, pvals=np.ascontiguousarray(pvals)) + assert_array_equal(non_contig, contig) + + def test_multinomial_pvals_float32(self): + x = np.array([9.9e-01, 9.9e-01, 1.0e-09, 1.0e-09, 1.0e-09, 1.0e-09, + 1.0e-09, 1.0e-09, 1.0e-09, 1.0e-09], dtype=np.float32) + pvals = x / x.sum() + match = r"[\w\s]*pvals array is cast to 64-bit floating" + with pytest.raises(ValueError, match=match): + random.multinomial(1, pvals) + + def test_multinomial_n_float(self): + # Non-index integer types should gracefully truncate floats + random.multinomial(100.5, [0.2, 0.8]) + + +class TestSetState: + def _create_state(self): + seed = 1234567890 + random_state = random.RandomState(seed) + state = random_state.get_state() + return random_state, state + + def test_basic(self): + random_state, state = self._create_state() + old = random_state.tomaxint(16) + random_state.set_state(state) + new = random_state.tomaxint(16) + assert_(np.all(old == new)) + + def test_gaussian_reset(self): + # Make sure the cached every-other-Gaussian is reset. + random_state, state = self._create_state() + old = random_state.standard_normal(size=3) + random_state.set_state(state) + new = random_state.standard_normal(size=3) + assert_(np.all(old == new)) + + def test_gaussian_reset_in_media_res(self): + # When the state is saved with a cached Gaussian, make sure the + # cached Gaussian is restored. + random_state, state = self._create_state() + random_state.standard_normal() + state = random_state.get_state() + old = random_state.standard_normal(size=3) + random_state.set_state(state) + new = random_state.standard_normal(size=3) + assert_(np.all(old == new)) + + def test_backwards_compatibility(self): + # Make sure we can accept old state tuples that do not have the + # cached Gaussian value. + random_state, state = self._create_state() + old_state = state[:-2] + x1 = random_state.standard_normal(size=16) + random_state.set_state(old_state) + x2 = random_state.standard_normal(size=16) + random_state.set_state(state) + x3 = random_state.standard_normal(size=16) + assert_(np.all(x1 == x2)) + assert_(np.all(x1 == x3)) + + def test_negative_binomial(self): + # Ensure that the negative binomial results take floating point + # arguments without truncation. + random_state, _ = self._create_state() + random_state.negative_binomial(0.5, 0.5) + + def test_get_state_warning(self): + rs = random.RandomState(PCG64()) + with pytest.warns(RuntimeWarning): + state = rs.get_state() + assert isinstance(state, dict) + assert state['bit_generator'] == 'PCG64' + + def test_invalid_legacy_state_setting(self): + random_state, state = self._create_state() + state = random_state.get_state() + new_state = ('Unknown', ) + state[1:] + assert_raises(ValueError, random_state.set_state, new_state) + assert_raises(TypeError, random_state.set_state, + np.array(new_state, dtype=object)) + state = random_state.get_state(legacy=False) + del state['bit_generator'] + assert_raises(ValueError, random_state.set_state, state) + + def test_pickle(self): + random_state, _ = self._create_state() + random_state.seed(0) + random_state.random_sample(100) + random_state.standard_normal() + pickled = random_state.get_state(legacy=False) + assert_equal(pickled['has_gauss'], 1) + rs_unpick = pickle.loads(pickle.dumps(random_state)) + unpickled = rs_unpick.get_state(legacy=False) + assert_mt19937_state_equal(pickled, unpickled) + + def test_state_setting(self): + random_state, state = self._create_state() + attr_state = random_state.__getstate__() + random_state.standard_normal() + random_state.__setstate__(attr_state) + state = random_state.get_state(legacy=False) + assert_mt19937_state_equal(attr_state, state) + + def test_repr(self): + random_state, _ = self._create_state() + assert repr(random_state).startswith('RandomState(MT19937)') + + +class TestRandint: + + # valid integer/boolean types + itype = [np.bool, np.int8, np.uint8, np.int16, np.uint16, + np.int32, np.uint32, np.int64, np.uint64] + + def test_unsupported_type(self): + rng = np.random.RandomState() + assert_raises(TypeError, rng.randint, 1, dtype=float) + + def test_bounds_checking(self): + rng = np.random.RandomState() + for dt in self.itype: + lbnd = 0 if dt is np.bool else np.iinfo(dt).min + ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1 + assert_raises(ValueError, rng.randint, lbnd - 1, ubnd, dtype=dt) + assert_raises(ValueError, rng.randint, lbnd, ubnd + 1, dtype=dt) + assert_raises(ValueError, rng.randint, ubnd, lbnd, dtype=dt) + assert_raises(ValueError, rng.randint, 1, 0, dtype=dt) + + def test_rng_zero_and_extremes(self): + rng = np.random.RandomState() + for dt in self.itype: + lbnd = 0 if dt is np.bool else np.iinfo(dt).min + ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1 + + tgt = ubnd - 1 + assert_equal(rng.randint(tgt, tgt + 1, size=1000, dtype=dt), tgt) + + tgt = lbnd + assert_equal(rng.randint(tgt, tgt + 1, size=1000, dtype=dt), tgt) + + tgt = (lbnd + ubnd) // 2 + assert_equal(rng.randint(tgt, tgt + 1, size=1000, dtype=dt), tgt) + + def test_full_range(self): + # Test for ticket #1690 + rng = np.random.RandomState() + + for dt in self.itype: + lbnd = 0 if dt is np.bool else np.iinfo(dt).min + ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1 + + try: + rng.randint(lbnd, ubnd, dtype=dt) + except Exception as e: + raise AssertionError("No error should have been raised, " + "but one was with the following " + "message:\n\n%s" % str(e)) + + def test_in_bounds_fuzz(self): + # Don't use fixed seed + rng = np.random.RandomState() + + for dt in self.itype[1:]: + for ubnd in [4, 8, 16]: + vals = rng.randint(2, ubnd, size=2**16, dtype=dt) + assert_(vals.max() < ubnd) + assert_(vals.min() >= 2) + + vals = rng.randint(0, 2, size=2**16, dtype=np.bool) + + assert_(vals.max() < 2) + assert_(vals.min() >= 0) + + def test_repeatability(self): + # We use a sha256 hash of generated sequences of 1000 samples + # in the range [0, 6) for all but bool, where the range + # is [0, 2). Hashes are for little endian numbers. + tgt = {'bool': '509aea74d792fb931784c4b0135392c65aec64beee12b0cc167548a2c3d31e71', # noqa: E501 + 'int16': '7b07f1a920e46f6d0fe02314155a2330bcfd7635e708da50e536c5ebb631a7d4', # noqa: E501 + 'int32': 'e577bfed6c935de944424667e3da285012e741892dcb7051a8f1ce68ab05c92f', # noqa: E501 + 'int64': '0fbead0b06759df2cfb55e43148822d4a1ff953c7eb19a5b08445a63bb64fa9e', # noqa: E501 + 'int8': '001aac3a5acb935a9b186cbe14a1ca064b8bb2dd0b045d48abeacf74d0203404', # noqa: E501 + 'uint16': '7b07f1a920e46f6d0fe02314155a2330bcfd7635e708da50e536c5ebb631a7d4', # noqa: E501 + 'uint32': 'e577bfed6c935de944424667e3da285012e741892dcb7051a8f1ce68ab05c92f', # noqa: E501 + 'uint64': '0fbead0b06759df2cfb55e43148822d4a1ff953c7eb19a5b08445a63bb64fa9e', # noqa: E501 + 'uint8': '001aac3a5acb935a9b186cbe14a1ca064b8bb2dd0b045d48abeacf74d0203404'} # noqa: E501 + + for dt in self.itype[1:]: + rng = random.RandomState(1234) + + # view as little endian for hash + if sys.byteorder == 'little': + val = rng.randint(0, 6, size=1000, dtype=dt) + else: + val = rng.randint(0, 6, size=1000, dtype=dt).byteswap() + + res = hashlib.sha256(val.view(np.int8)).hexdigest() + assert_(tgt[np.dtype(dt).name] == res) + + # bools do not depend on endianness + rng = random.RandomState(1234) + val = rng.randint(0, 2, size=1000, dtype=bool).view(np.int8) + res = hashlib.sha256(val).hexdigest() + assert_(tgt[np.dtype(bool).name] == res) + + @pytest.mark.skipif(np.iinfo('l').max < 2**32, + reason='Cannot test with 32-bit C long') + def test_repeatability_32bit_boundary_broadcasting(self): + desired = np.array([[[3992670689, 2438360420, 2557845020], + [4107320065, 4142558326, 3216529513], + [1605979228, 2807061240, 665605495]], + [[3211410639, 4128781000, 457175120], + [1712592594, 1282922662, 3081439808], + [3997822960, 2008322436, 1563495165]], + [[1398375547, 4269260146, 115316740], + [3414372578, 3437564012, 2112038651], + [3572980305, 2260248732, 3908238631]], + [[2561372503, 223155946, 3127879445], + [ 441282060, 3514786552, 2148440361], + [1629275283, 3479737011, 3003195987]], + [[ 412181688, 940383289, 3047321305], + [2978368172, 764731833, 2282559898], + [ 105711276, 720447391, 3596512484]]]) + for size in [None, (5, 3, 3)]: + rng = random.RandomState(12345) + x = rng.randint([[-1], [0], [1]], [2**32 - 1, 2**32, 2**32 + 1], + size=size) + assert_array_equal(x, desired if size is not None else desired[0]) + + def test_int64_uint64_corner_case(self): + # When stored in Numpy arrays, `lbnd` is casted + # as np.int64, and `ubnd` is casted as np.uint64. + # Checking whether `lbnd` >= `ubnd` used to be + # done solely via direct comparison, which is incorrect + # because when Numpy tries to compare both numbers, + # it casts both to np.float64 because there is + # no integer superset of np.int64 and np.uint64. However, + # `ubnd` is too large to be represented in np.float64, + # causing it be round down to np.iinfo(np.int64).max, + # leading to a ValueError because `lbnd` now equals + # the new `ubnd`. + + dt = np.int64 + tgt = np.iinfo(np.int64).max + lbnd = np.int64(np.iinfo(np.int64).max) + ubnd = np.uint64(np.iinfo(np.int64).max + 1) + + # None of these function calls should + # generate a ValueError now. + actual = random.randint(lbnd, ubnd, dtype=dt) + assert_equal(actual, tgt) + + def test_respect_dtype_singleton(self): + # See gh-7203 + rng = np.random.RandomState() + + for dt in self.itype: + lbnd = 0 if dt is np.bool else np.iinfo(dt).min + ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1 + + sample = rng.randint(lbnd, ubnd, dtype=dt) + assert_equal(sample.dtype, np.dtype(dt)) + + for dt in (bool, int): + # The legacy random generation forces the use of "long" on this + # branch even when the input is `int` and the default dtype + # for int changed (dtype=int is also the functions default) + op_dtype = "long" if dt is int else "bool" + lbnd = 0 if dt is bool else np.iinfo(op_dtype).min + ubnd = 2 if dt is bool else np.iinfo(op_dtype).max + 1 + + sample = rng.randint(lbnd, ubnd, dtype=dt) + assert_(not hasattr(sample, 'dtype')) + assert_equal(type(sample), dt) + + +class TestRandomDist: + # Make sure the random distribution returns the correct value for a + # given seed + seed = 1234567890 + + def test_rand(self): + rng = random.RandomState(self.seed) + actual = rng.rand(3, 2) + desired = np.array([[0.61879477158567997, 0.59162362775974664], + [0.88868358904449662, 0.89165480011560816], + [0.4575674820298663, 0.7781880808593471]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_rand_singleton(self): + rng = random.RandomState(self.seed) + actual = rng.rand() + desired = 0.61879477158567997 + assert_array_almost_equal(actual, desired, decimal=15) + + def test_randn(self): + rng = random.RandomState(self.seed) + actual = rng.randn(3, 2) + desired = np.array([[1.34016345771863121, 1.73759122771936081], + [1.498988344300628, -0.2286433324536169], + [2.031033998682787, 2.17032494605655257]]) + assert_array_almost_equal(actual, desired, decimal=15) + + rng = random.RandomState(self.seed) + actual = rng.randn() + assert_array_almost_equal(actual, desired[0, 0], decimal=15) + + def test_randint(self): + rng = random.RandomState(self.seed) + actual = rng.randint(-99, 99, size=(3, 2)) + desired = np.array([[31, 3], + [-52, 41], + [-48, -66]]) + assert_array_equal(actual, desired) + + def test_random_integers(self): + rng = random.RandomState(self.seed) + with pytest.warns(DeprecationWarning): + actual = rng.random_integers(-99, 99, size=(3, 2)) + desired = np.array([[31, 3], + [-52, 41], + [-48, -66]]) + assert_array_equal(actual, desired) + + rng = random.RandomState(self.seed) + with pytest.warns(DeprecationWarning): + actual = rng.random_integers(198, size=(3, 2)) + assert_array_equal(actual, desired + 100) + + def test_tomaxint(self): + rs = random.RandomState(self.seed) + actual = rs.tomaxint(size=(3, 2)) + if np.iinfo(np.long).max == 2147483647: + desired = np.array([[1328851649, 731237375], + [1270502067, 320041495], + [1908433478, 499156889]], dtype=np.int64) + else: + desired = np.array([[5707374374421908479, 5456764827585442327], + [8196659375100692377, 8224063923314595285], + [4220315081820346526, 7177518203184491332]], + dtype=np.int64) + + assert_equal(actual, desired) + + rs.seed(self.seed) + actual = rs.tomaxint() + assert_equal(actual, desired[0, 0]) + + def test_random_integers_max_int(self): + # Tests whether random_integers can generate the + # maximum allowed Python int that can be converted + # into a C long. Previous implementations of this + # method have thrown an OverflowError when attempting + # to generate this integer. + with pytest.warns(DeprecationWarning): + actual = random.random_integers(np.iinfo('l').max, + np.iinfo('l').max) + + desired = np.iinfo('l').max + assert_equal(actual, desired) + with pytest.warns(DeprecationWarning): + typer = np.dtype('l').type + actual = random.random_integers(typer(np.iinfo('l').max), + typer(np.iinfo('l').max)) + assert_equal(actual, desired) + + def test_random_integers_deprecated(self): + with warnings.catch_warnings(): + warnings.simplefilter("error", DeprecationWarning) + + # DeprecationWarning raised with high == None + assert_raises(DeprecationWarning, + random.random_integers, + np.iinfo('l').max) + + # DeprecationWarning raised with high != None + assert_raises(DeprecationWarning, + random.random_integers, + np.iinfo('l').max, np.iinfo('l').max) + + def test_random_sample(self): + rng = random.RandomState(self.seed) + actual = rng.random_sample((3, 2)) + desired = np.array([[0.61879477158567997, 0.59162362775974664], + [0.88868358904449662, 0.89165480011560816], + [0.4575674820298663, 0.7781880808593471]]) + assert_array_almost_equal(actual, desired, decimal=15) + + rng = random.RandomState(self.seed) + actual = rng.random_sample() + assert_array_almost_equal(actual, desired[0, 0], decimal=15) + + def test_choice_uniform_replace(self): + rng = random.RandomState(self.seed) + actual = rng.choice(4, 4) + desired = np.array([2, 3, 2, 3]) + assert_array_equal(actual, desired) + + def test_choice_nonuniform_replace(self): + rng = random.RandomState(self.seed) + actual = rng.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1]) + desired = np.array([1, 1, 2, 2]) + assert_array_equal(actual, desired) + + def test_choice_uniform_noreplace(self): + rng = random.RandomState(self.seed) + actual = rng.choice(4, 3, replace=False) + desired = np.array([0, 1, 3]) + assert_array_equal(actual, desired) + + def test_choice_nonuniform_noreplace(self): + rng = random.RandomState(self.seed) + actual = rng.choice(4, 3, replace=False, p=[0.1, 0.3, 0.5, 0.1]) + desired = np.array([2, 3, 1]) + assert_array_equal(actual, desired) + + def test_choice_noninteger(self): + rng = random.RandomState(self.seed) + actual = rng.choice(['a', 'b', 'c', 'd'], 4) + desired = np.array(['c', 'd', 'c', 'd']) + assert_array_equal(actual, desired) + + def test_choice_exceptions(self): + sample = random.choice + assert_raises(ValueError, sample, -1, 3) + assert_raises(ValueError, sample, 3., 3) + assert_raises(ValueError, sample, [[1, 2], [3, 4]], 3) + assert_raises(ValueError, sample, [], 3) + assert_raises(ValueError, sample, [1, 2, 3, 4], 3, + p=[[0.25, 0.25], [0.25, 0.25]]) + assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4, 0.2]) + assert_raises(ValueError, sample, [1, 2], 3, p=[1.1, -0.1]) + assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4]) + assert_raises(ValueError, sample, [1, 2, 3], 4, replace=False) + # gh-13087 + assert_raises(ValueError, sample, [1, 2, 3], -2, replace=False) + assert_raises(ValueError, sample, [1, 2, 3], (-1,), replace=False) + assert_raises(ValueError, sample, [1, 2, 3], (-1, 1), replace=False) + assert_raises(ValueError, sample, [1, 2, 3], 2, + replace=False, p=[1, 0, 0]) + + def test_choice_return_shape(self): + p = [0.1, 0.9] + # Check scalar + assert_(np.isscalar(random.choice(2, replace=True))) + assert_(np.isscalar(random.choice(2, replace=False))) + assert_(np.isscalar(random.choice(2, replace=True, p=p))) + assert_(np.isscalar(random.choice(2, replace=False, p=p))) + assert_(np.isscalar(random.choice([1, 2], replace=True))) + assert_(random.choice([None], replace=True) is None) + a = np.array([1, 2]) + arr = np.empty(1, dtype=object) + arr[0] = a + assert_(random.choice(arr, replace=True) is a) + + # Check 0-d array + s = () + assert_(not np.isscalar(random.choice(2, s, replace=True))) + assert_(not np.isscalar(random.choice(2, s, replace=False))) + assert_(not np.isscalar(random.choice(2, s, replace=True, p=p))) + assert_(not np.isscalar(random.choice(2, s, replace=False, p=p))) + assert_(not np.isscalar(random.choice([1, 2], s, replace=True))) + assert_(random.choice([None], s, replace=True).ndim == 0) + a = np.array([1, 2]) + arr = np.empty(1, dtype=object) + arr[0] = a + assert_(random.choice(arr, s, replace=True).item() is a) + + # Check multi dimensional array + s = (2, 3) + p = [0.1, 0.1, 0.1, 0.1, 0.4, 0.2] + assert_equal(random.choice(6, s, replace=True).shape, s) + assert_equal(random.choice(6, s, replace=False).shape, s) + assert_equal(random.choice(6, s, replace=True, p=p).shape, s) + assert_equal(random.choice(6, s, replace=False, p=p).shape, s) + assert_equal(random.choice(np.arange(6), s, replace=True).shape, s) + + # Check zero-size + assert_equal(random.randint(0, 0, size=(3, 0, 4)).shape, (3, 0, 4)) + assert_equal(random.randint(0, -10, size=0).shape, (0,)) + assert_equal(random.randint(10, 10, size=0).shape, (0,)) + assert_equal(random.choice(0, size=0).shape, (0,)) + assert_equal(random.choice([], size=(0,)).shape, (0,)) + assert_equal(random.choice(['a', 'b'], size=(3, 0, 4)).shape, + (3, 0, 4)) + assert_raises(ValueError, random.choice, [], 10) + + def test_choice_nan_probabilities(self): + a = np.array([42, 1, 2]) + p = [None, None, None] + assert_raises(ValueError, random.choice, a, p=p) + + def test_choice_p_non_contiguous(self): + p = np.ones(10) / 5 + p[1::2] = 3.0 + rng = random.RandomState(self.seed) + non_contig = rng.choice(5, 3, p=p[::2]) + rng = random.RandomState(self.seed) + contig = rng.choice(5, 3, p=np.ascontiguousarray(p[::2])) + assert_array_equal(non_contig, contig) + + def test_bytes(self): + rng = random.RandomState(self.seed) + actual = rng.bytes(10) + desired = b'\x82Ui\x9e\xff\x97+Wf\xa5' + assert_equal(actual, desired) + + def test_shuffle(self): + # Test lists, arrays (of various dtypes), and multidimensional versions + # of both, c-contiguous or not: + for conv in [lambda x: np.array([]), + lambda x: x, + lambda x: np.asarray(x).astype(np.int8), + lambda x: np.asarray(x).astype(np.float32), + lambda x: np.asarray(x).astype(np.complex64), + lambda x: np.asarray(x).astype(object), + lambda x: [(i, i) for i in x], + lambda x: np.asarray([[i, i] for i in x]), + lambda x: np.vstack([x, x]).T, + # gh-11442 + lambda x: (np.asarray([(i, i) for i in x], + [("a", int), ("b", int)]) + .view(np.recarray)), + # gh-4270 + lambda x: np.asarray([(i, i) for i in x], + [("a", object, (1,)), + ("b", np.int32, (1,))])]: + rng = random.RandomState(self.seed) + alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]) + rng.shuffle(alist) + actual = alist + desired = conv([0, 1, 9, 6, 2, 4, 5, 8, 7, 3]) + assert_array_equal(actual, desired) + + def test_shuffle_masked(self): + # gh-3263 + a = np.ma.masked_values(np.reshape(range(20), (5, 4)) % 3 - 1, -1) + b = np.ma.masked_values(np.arange(20) % 3 - 1, -1) + a_orig = a.copy() + b_orig = b.copy() + for i in range(50): + random.shuffle(a) + assert_equal( + sorted(a.data[~a.mask]), sorted(a_orig.data[~a_orig.mask])) + random.shuffle(b) + assert_equal( + sorted(b.data[~b.mask]), sorted(b_orig.data[~b_orig.mask])) + + def test_shuffle_invalid_objects(self): + x = np.array(3) + assert_raises(TypeError, random.shuffle, x) + + def test_permutation(self): + rng = random.RandomState(self.seed) + alist = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0] + actual = rng.permutation(alist) + desired = [0, 1, 9, 6, 2, 4, 5, 8, 7, 3] + assert_array_equal(actual, desired) + + rng = random.RandomState(self.seed) + arr_2d = np.atleast_2d([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]).T + actual = rng.permutation(arr_2d) + assert_array_equal(actual, np.atleast_2d(desired).T) + + rng = random.RandomState(self.seed) + bad_x_str = "abcd" + assert_raises(IndexError, random.permutation, bad_x_str) + + rng = random.RandomState(self.seed) + bad_x_float = 1.2 + assert_raises(IndexError, random.permutation, bad_x_float) + + integer_val = 10 + desired = [9, 0, 8, 5, 1, 3, 4, 7, 6, 2] + + rng = random.RandomState(self.seed) + actual = rng.permutation(integer_val) + assert_array_equal(actual, desired) + + def test_beta(self): + rng = random.RandomState(self.seed) + actual = rng.beta(.1, .9, size=(3, 2)) + desired = np.array( + [[1.45341850513746058e-02, 5.31297615662868145e-04], + [1.85366619058432324e-06, 4.19214516800110563e-03], + [1.58405155108498093e-04, 1.26252891949397652e-04]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_binomial(self): + rng = random.RandomState(self.seed) + actual = rng.binomial(100.123, .456, size=(3, 2)) + desired = np.array([[37, 43], + [42, 48], + [46, 45]]) + assert_array_equal(actual, desired) + + rng = random.RandomState(self.seed) + actual = rng.binomial(100.123, .456) + desired = 37 + assert_array_equal(actual, desired) + + def test_chisquare(self): + rng = random.RandomState(self.seed) + actual = rng.chisquare(50, size=(3, 2)) + desired = np.array([[63.87858175501090585, 68.68407748911370447], + [65.77116116901505904, 47.09686762438974483], + [72.3828403199695174, 74.18408615260374006]]) + assert_array_almost_equal(actual, desired, decimal=13) + + def test_dirichlet(self): + rng = random.RandomState(self.seed) + alpha = np.array([51.72840233779265162, 39.74494232180943953]) + actual = rng.dirichlet(alpha, size=(3, 2)) + desired = np.array([[[0.54539444573611562, 0.45460555426388438], + [0.62345816822039413, 0.37654183177960598]], + [[0.55206000085785778, 0.44793999914214233], + [0.58964023305154301, 0.41035976694845688]], + [[0.59266909280647828, 0.40733090719352177], + [0.56974431743975207, 0.43025568256024799]]]) + assert_array_almost_equal(actual, desired, decimal=15) + bad_alpha = np.array([5.4e-01, -1.0e-16]) + assert_raises(ValueError, random.dirichlet, bad_alpha) + + rng = random.RandomState(self.seed) + alpha = np.array([51.72840233779265162, 39.74494232180943953]) + actual = rng.dirichlet(alpha) + assert_array_almost_equal(actual, desired[0, 0], decimal=15) + + def test_dirichlet_size(self): + # gh-3173 + p = np.array([51.72840233779265162, 39.74494232180943953]) + assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2)) + assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2)) + assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2)) + assert_equal(random.dirichlet(p, [2, 2]).shape, (2, 2, 2)) + assert_equal(random.dirichlet(p, (2, 2)).shape, (2, 2, 2)) + assert_equal(random.dirichlet(p, np.array((2, 2))).shape, (2, 2, 2)) + + assert_raises(TypeError, random.dirichlet, p, float(1)) + + def test_dirichlet_bad_alpha(self): + # gh-2089 + alpha = np.array([5.4e-01, -1.0e-16]) + assert_raises(ValueError, random.dirichlet, alpha) + + def test_dirichlet_alpha_non_contiguous(self): + a = np.array([51.72840233779265162, -1.0, 39.74494232180943953]) + alpha = a[::2] + rng = random.RandomState(self.seed) + non_contig = rng.dirichlet(alpha, size=(3, 2)) + rng = random.RandomState(self.seed) + contig = rng.dirichlet(np.ascontiguousarray(alpha), + size=(3, 2)) + assert_array_almost_equal(non_contig, contig) + + def test_exponential(self): + rng = random.RandomState(self.seed) + actual = rng.exponential(1.1234, size=(3, 2)) + desired = np.array([[1.08342649775011624, 1.00607889924557314], + [2.46628830085216721, 2.49668106809923884], + [0.68717433461363442, 1.69175666993575979]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_exponential_0(self): + assert_equal(random.exponential(scale=0), 0) + assert_raises(ValueError, random.exponential, scale=-0.) + + def test_f(self): + rng = random.RandomState(self.seed) + actual = rng.f(12, 77, size=(3, 2)) + desired = np.array([[1.21975394418575878, 1.75135759791559775], + [1.44803115017146489, 1.22108959480396262], + [1.02176975757740629, 1.34431827623300415]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_gamma(self): + rng = random.RandomState(self.seed) + actual = rng.gamma(5, 3, size=(3, 2)) + desired = np.array([[24.60509188649287182, 28.54993563207210627], + [26.13476110204064184, 12.56988482927716078], + [31.71863275789960568, 33.30143302795922011]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_gamma_0(self): + assert_equal(random.gamma(shape=0, scale=0), 0) + assert_raises(ValueError, random.gamma, shape=-0., scale=-0.) + + def test_geometric(self): + rng = random.RandomState(self.seed) + actual = rng.geometric(.123456789, size=(3, 2)) + desired = np.array([[8, 7], + [17, 17], + [5, 12]]) + assert_array_equal(actual, desired) + + def test_geometric_exceptions(self): + assert_raises(ValueError, random.geometric, 1.1) + assert_raises(ValueError, random.geometric, [1.1] * 10) + assert_raises(ValueError, random.geometric, -0.1) + assert_raises(ValueError, random.geometric, [-0.1] * 10) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', RuntimeWarning) + assert_raises(ValueError, random.geometric, np.nan) + assert_raises(ValueError, random.geometric, [np.nan] * 10) + + def test_gumbel(self): + rng = random.RandomState(self.seed) + actual = rng.gumbel(loc=.123456789, scale=2.0, size=(3, 2)) + desired = np.array([[0.19591898743416816, 0.34405539668096674], + [-1.4492522252274278, -1.47374816298446865], + [1.10651090478803416, -0.69535848626236174]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_gumbel_0(self): + assert_equal(random.gumbel(scale=0), 0) + assert_raises(ValueError, random.gumbel, scale=-0.) + + def test_hypergeometric(self): + rng = random.RandomState(self.seed) + actual = rng.hypergeometric(10.1, 5.5, 14, size=(3, 2)) + desired = np.array([[10, 10], + [10, 10], + [9, 9]]) + assert_array_equal(actual, desired) + + # Test nbad = 0 + actual = rng.hypergeometric(5, 0, 3, size=4) + desired = np.array([3, 3, 3, 3]) + assert_array_equal(actual, desired) + + actual = rng.hypergeometric(15, 0, 12, size=4) + desired = np.array([12, 12, 12, 12]) + assert_array_equal(actual, desired) + + # Test ngood = 0 + actual = rng.hypergeometric(0, 5, 3, size=4) + desired = np.array([0, 0, 0, 0]) + assert_array_equal(actual, desired) + + actual = rng.hypergeometric(0, 15, 12, size=4) + desired = np.array([0, 0, 0, 0]) + assert_array_equal(actual, desired) + + def test_laplace(self): + rng = random.RandomState(self.seed) + actual = rng.laplace(loc=.123456789, scale=2.0, size=(3, 2)) + desired = np.array([[0.66599721112760157, 0.52829452552221945], + [3.12791959514407125, 3.18202813572992005], + [-0.05391065675859356, 1.74901336242837324]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_laplace_0(self): + assert_equal(random.laplace(scale=0), 0) + assert_raises(ValueError, random.laplace, scale=-0.) + + def test_logistic(self): + rng = random.RandomState(self.seed) + actual = rng.logistic(loc=.123456789, scale=2.0, size=(3, 2)) + desired = np.array([[1.09232835305011444, 0.8648196662399954], + [4.27818590694950185, 4.33897006346929714], + [-0.21682183359214885, 2.63373365386060332]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_lognormal(self): + rng = random.RandomState(self.seed) + actual = rng.lognormal(mean=.123456789, sigma=2.0, size=(3, 2)) + desired = np.array([[16.50698631688883822, 36.54846706092654784], + [22.67886599981281748, 0.71617561058995771], + [65.72798501792723869, 86.84341601437161273]]) + assert_array_almost_equal(actual, desired, decimal=13) + + def test_lognormal_0(self): + assert_equal(random.lognormal(sigma=0), 1) + assert_raises(ValueError, random.lognormal, sigma=-0.) + + def test_logseries(self): + rng = random.RandomState(self.seed) + actual = rng.logseries(p=.923456789, size=(3, 2)) + desired = np.array([[2, 2], + [6, 17], + [3, 6]]) + assert_array_equal(actual, desired) + + def test_logseries_zero(self): + assert random.logseries(0) == 1 + + @pytest.mark.parametrize("value", [np.nextafter(0., -1), 1., np.nan, 5.]) + def test_logseries_exceptions(self, value): + with np.errstate(invalid="ignore"): + with pytest.raises(ValueError): + random.logseries(value) + with pytest.raises(ValueError): + # contiguous path: + random.logseries(np.array([value] * 10)) + with pytest.raises(ValueError): + # non-contiguous path: + random.logseries(np.array([value] * 10)[::2]) + + def test_multinomial(self): + rng = random.RandomState(self.seed) + actual = rng.multinomial(20, [1 / 6.] * 6, size=(3, 2)) + desired = np.array([[[4, 3, 5, 4, 2, 2], + [5, 2, 8, 2, 2, 1]], + [[3, 4, 3, 6, 0, 4], + [2, 1, 4, 3, 6, 4]], + [[4, 4, 2, 5, 2, 3], + [4, 3, 4, 2, 3, 4]]]) + assert_array_equal(actual, desired) + + def test_multivariate_normal(self): + rng = random.RandomState(self.seed) + mean = (.123456789, 10) + cov = [[1, 0], [0, 1]] + size = (3, 2) + actual = rng.multivariate_normal(mean, cov, size) + desired = np.array([[[1.463620246718631, 11.73759122771936], + [1.622445133300628, 9.771356667546383]], + [[2.154490787682787, 12.170324946056553], + [1.719909438201865, 9.230548443648306]], + [[0.689515026297799, 9.880729819607714], + [-0.023054015651998, 9.201096623542879]]]) + + assert_array_almost_equal(actual, desired, decimal=15) + + # Check for default size, was raising deprecation warning + actual = rng.multivariate_normal(mean, cov) + desired = np.array([0.895289569463708, 9.17180864067987]) + assert_array_almost_equal(actual, desired, decimal=15) + + # Check that non positive-semidefinite covariance warns with + # RuntimeWarning + mean = [0, 0] + cov = [[1, 2], [2, 1]] + pytest.warns(RuntimeWarning, rng.multivariate_normal, mean, cov) + + # and that it doesn't warn with RuntimeWarning check_valid='ignore' + assert_no_warnings(rng.multivariate_normal, mean, cov, + check_valid='ignore') + + # and that it raises with RuntimeWarning check_valid='raises' + assert_raises(ValueError, rng.multivariate_normal, mean, cov, + check_valid='raise') + + cov = np.array([[1, 0.1], [0.1, 1]], dtype=np.float32) + with warnings.catch_warnings(): + warnings.simplefilter('error', RuntimeWarning) + rng.multivariate_normal(mean, cov) + + mu = np.zeros(2) + cov = np.eye(2) + assert_raises(ValueError, rng.multivariate_normal, mean, cov, + check_valid='other') + assert_raises(ValueError, rng.multivariate_normal, + np.zeros((2, 1, 1)), cov) + assert_raises(ValueError, rng.multivariate_normal, + mu, np.empty((3, 2))) + assert_raises(ValueError, rng.multivariate_normal, + mu, np.eye(3)) + + def test_negative_binomial(self): + rng = random.RandomState(self.seed) + actual = rng.negative_binomial(n=100, p=.12345, size=(3, 2)) + desired = np.array([[848, 841], + [892, 611], + [779, 647]]) + assert_array_equal(actual, desired) + + def test_negative_binomial_exceptions(self): + with warnings.catch_warnings(): + warnings.simplefilter('ignore', RuntimeWarning) + assert_raises(ValueError, random.negative_binomial, 100, np.nan) + assert_raises(ValueError, random.negative_binomial, 100, + [np.nan] * 10) + + def test_noncentral_chisquare(self): + rng = random.RandomState(self.seed) + actual = rng.noncentral_chisquare(df=5, nonc=5, size=(3, 2)) + desired = np.array([[23.91905354498517511, 13.35324692733826346], + [31.22452661329736401, 16.60047399466177254], + [5.03461598262724586, 17.94973089023519464]]) + assert_array_almost_equal(actual, desired, decimal=14) + + actual = rng.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2)) + desired = np.array([[1.47145377828516666, 0.15052899268012659], + [0.00943803056963588, 1.02647251615666169], + [0.332334982684171, 0.15451287602753125]]) + assert_array_almost_equal(actual, desired, decimal=14) + + rng = random.RandomState(self.seed) + actual = rng.noncentral_chisquare(df=5, nonc=0, size=(3, 2)) + desired = np.array([[9.597154162763948, 11.725484450296079], + [10.413711048138335, 3.694475922923986], + [13.484222138963087, 14.377255424602957]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_noncentral_f(self): + rng = random.RandomState(self.seed) + actual = rng.noncentral_f(dfnum=5, dfden=2, nonc=1, + size=(3, 2)) + desired = np.array([[1.40598099674926669, 0.34207973179285761], + [3.57715069265772545, 7.92632662577829805], + [0.43741599463544162, 1.1774208752428319]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_noncentral_f_nan(self): + random.seed(self.seed) + actual = random.noncentral_f(dfnum=5, dfden=2, nonc=np.nan) + assert np.isnan(actual) + + def test_normal(self): + rng = random.RandomState(self.seed) + actual = rng.normal(loc=.123456789, scale=2.0, size=(3, 2)) + desired = np.array([[2.80378370443726244, 3.59863924443872163], + [3.121433477601256, -0.33382987590723379], + [4.18552478636557357, 4.46410668111310471]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_normal_0(self): + assert_equal(random.normal(scale=0), 0) + assert_raises(ValueError, random.normal, scale=-0.) + + def test_pareto(self): + rng = random.RandomState(self.seed) + actual = rng.pareto(a=.123456789, size=(3, 2)) + desired = np.array( + [[2.46852460439034849e+03, 1.41286880810518346e+03], + [5.28287797029485181e+07, 6.57720981047328785e+07], + [1.40840323350391515e+02, 1.98390255135251704e+05]]) + # For some reason on 32-bit x86 Ubuntu 12.10 the [1, 0] entry in this + # matrix differs by 24 nulps. Discussion: + # https://mail.python.org/pipermail/numpy-discussion/2012-September/063801.html + # Consensus is that this is probably some gcc quirk that affects + # rounding but not in any important way, so we just use a looser + # tolerance on this test: + np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=30) + + def test_poisson(self): + rng = random.RandomState(self.seed) + actual = rng.poisson(lam=.123456789, size=(3, 2)) + desired = np.array([[0, 0], + [1, 0], + [0, 0]]) + assert_array_equal(actual, desired) + + def test_poisson_exceptions(self): + lambig = np.iinfo('l').max + lamneg = -1 + assert_raises(ValueError, random.poisson, lamneg) + assert_raises(ValueError, random.poisson, [lamneg] * 10) + assert_raises(ValueError, random.poisson, lambig) + assert_raises(ValueError, random.poisson, [lambig] * 10) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', RuntimeWarning) + assert_raises(ValueError, random.poisson, np.nan) + assert_raises(ValueError, random.poisson, [np.nan] * 10) + + def test_power(self): + rng = random.RandomState(self.seed) + actual = rng.power(a=.123456789, size=(3, 2)) + desired = np.array([[0.02048932883240791, 0.01424192241128213], + [0.38446073748535298, 0.39499689943484395], + [0.00177699707563439, 0.13115505880863756]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_rayleigh(self): + rng = random.RandomState(self.seed) + actual = rng.rayleigh(scale=10, size=(3, 2)) + desired = np.array([[13.8882496494248393, 13.383318339044731], + [20.95413364294492098, 21.08285015800712614], + [11.06066537006854311, 17.35468505778271009]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_rayleigh_0(self): + assert_equal(random.rayleigh(scale=0), 0) + assert_raises(ValueError, random.rayleigh, scale=-0.) + + def test_standard_cauchy(self): + rng = random.RandomState(self.seed) + actual = rng.standard_cauchy(size=(3, 2)) + desired = np.array([[0.77127660196445336, -6.55601161955910605], + [0.93582023391158309, -2.07479293013759447], + [-4.74601644297011926, 0.18338989290760804]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_standard_exponential(self): + rng = random.RandomState(self.seed) + actual = rng.standard_exponential(size=(3, 2)) + desired = np.array([[0.96441739162374596, 0.89556604882105506], + [2.1953785836319808, 2.22243285392490542], + [0.6116915921431676, 1.50592546727413201]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_standard_gamma(self): + rng = random.RandomState(self.seed) + actual = rng.standard_gamma(shape=3, size=(3, 2)) + desired = np.array([[5.50841531318455058, 6.62953470301903103], + [5.93988484943779227, 2.31044849402133989], + [7.54838614231317084, 8.012756093271868]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_standard_gamma_0(self): + assert_equal(random.standard_gamma(shape=0), 0) + assert_raises(ValueError, random.standard_gamma, shape=-0.) + + def test_standard_normal(self): + rng = random.RandomState(self.seed) + actual = rng.standard_normal(size=(3, 2)) + desired = np.array([[1.34016345771863121, 1.73759122771936081], + [1.498988344300628, -0.2286433324536169], + [2.031033998682787, 2.17032494605655257]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_randn_singleton(self): + rng = random.RandomState(self.seed) + actual = rng.randn() + desired = np.array(1.34016345771863121) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_standard_t(self): + rng = random.RandomState(self.seed) + actual = rng.standard_t(df=10, size=(3, 2)) + desired = np.array([[0.97140611862659965, -0.08830486548450577], + [1.36311143689505321, -0.55317463909867071], + [-0.18473749069684214, 0.61181537341755321]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_triangular(self): + rng = random.RandomState(self.seed) + actual = rng.triangular(left=5.12, mode=10.23, right=20.34, + size=(3, 2)) + desired = np.array([[12.68117178949215784, 12.4129206149193152], + [16.20131377335158263, 16.25692138747600524], + [11.20400690911820263, 14.4978144835829923]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_uniform(self): + rng = random.RandomState(self.seed) + actual = rng.uniform(low=1.23, high=10.54, size=(3, 2)) + desired = np.array([[6.99097932346268003, 6.73801597444323974], + [9.50364421400426274, 9.53130618907631089], + [5.48995325769805476, 8.47493103280052118]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_uniform_range_bounds(self): + fmin = np.finfo('float').min + fmax = np.finfo('float').max + + func = random.uniform + assert_raises(OverflowError, func, -np.inf, 0) + assert_raises(OverflowError, func, 0, np.inf) + assert_raises(OverflowError, func, fmin, fmax) + assert_raises(OverflowError, func, [-np.inf], [0]) + assert_raises(OverflowError, func, [0], [np.inf]) + + # (fmax / 1e17) - fmin is within range, so this should not throw + # account for i386 extended precision DBL_MAX / 1e17 + DBL_MAX > + # DBL_MAX by increasing fmin a bit + random.uniform(low=np.nextafter(fmin, 1), high=fmax / 1e17) + + def test_scalar_exception_propagation(self): + # Tests that exceptions are correctly propagated in distributions + # when called with objects that throw exceptions when converted to + # scalars. + # + # Regression test for gh: 8865 + + class ThrowingFloat(np.ndarray): + def __float__(self): + raise TypeError + + throwing_float = np.array(1.0).view(ThrowingFloat) + assert_raises(TypeError, random.uniform, throwing_float, + throwing_float) + + class ThrowingInteger(np.ndarray): + def __int__(self): + raise TypeError + + throwing_int = np.array(1).view(ThrowingInteger) + assert_raises(TypeError, random.hypergeometric, throwing_int, 1, 1) + + def test_vonmises(self): + rng = random.RandomState(self.seed) + actual = rng.vonmises(mu=1.23, kappa=1.54, size=(3, 2)) + desired = np.array([[2.28567572673902042, 2.89163838442285037], + [0.38198375564286025, 2.57638023113890746], + [1.19153771588353052, 1.83509849681825354]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_vonmises_small(self): + # check infinite loop, gh-4720 + random.seed(self.seed) + r = random.vonmises(mu=0., kappa=1.1e-8, size=10**6) + assert_(np.isfinite(r).all()) + + def test_vonmises_large(self): + # guard against changes in RandomState when Generator is fixed + rng = random.RandomState(self.seed) + actual = rng.vonmises(mu=0., kappa=1e7, size=3) + desired = np.array([4.634253748521111e-04, + 3.558873596114509e-04, + -2.337119622577433e-04]) + assert_array_almost_equal(actual, desired, decimal=8) + + def test_vonmises_nan(self): + random.seed(self.seed) + r = random.vonmises(mu=0., kappa=np.nan) + assert_(np.isnan(r)) + + def test_wald(self): + rng = random.RandomState(self.seed) + actual = rng.wald(mean=1.23, scale=1.54, size=(3, 2)) + desired = np.array([[3.82935265715889983, 5.13125249184285526], + [0.35045403618358717, 1.50832396872003538], + [0.24124319895843183, 0.22031101461955038]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_weibull(self): + rng = random.RandomState(self.seed) + actual = rng.weibull(a=1.23, size=(3, 2)) + desired = np.array([[0.97097342648766727, 0.91422896443565516], + [1.89517770034962929, 1.91414357960479564], + [0.67057783752390987, 1.39494046635066793]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_weibull_0(self): + random.seed(self.seed) + assert_equal(random.weibull(a=0, size=12), np.zeros(12)) + assert_raises(ValueError, random.weibull, a=-0.) + + def test_zipf(self): + rng = random.RandomState(self.seed) + actual = rng.zipf(a=1.23, size=(3, 2)) + desired = np.array([[66, 29], + [1, 1], + [3, 13]]) + assert_array_equal(actual, desired) + + +class TestBroadcast: + # tests that functions that broadcast behave + # correctly when presented with non-scalar arguments + seed = 123456789 + + def test_uniform(self): + low = [0] + high = [1] + desired = np.array([0.53283302478975902, + 0.53413660089041659, + 0.50955303552646702]) + + rng = random.RandomState(self.seed) + actual = rng.uniform(low * 3, high) + assert_array_almost_equal(actual, desired, decimal=14) + + rng = random.RandomState(self.seed) + actual = rng.uniform(low, high * 3) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_normal(self): + loc = [0] + scale = [1] + bad_scale = [-1] + desired = np.array([2.2129019979039612, + 2.1283977976520019, + 1.8417114045748335]) + + rng = random.RandomState(self.seed) + actual = rng.normal(loc * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, rng.normal, loc * 3, bad_scale) + + rng = random.RandomState(self.seed) + actual = rng.normal(loc, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, rng.normal, loc, bad_scale * 3) + + def test_beta(self): + a = [1] + b = [2] + bad_a = [-1] + bad_b = [-2] + desired = np.array([0.19843558305989056, + 0.075230336409423643, + 0.24976865978980844]) + + rng = random.RandomState(self.seed) + actual = rng.beta(a * 3, b) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, rng.beta, bad_a * 3, b) + assert_raises(ValueError, rng.beta, a * 3, bad_b) + + rng = random.RandomState(self.seed) + actual = rng.beta(a, b * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, rng.beta, bad_a, b * 3) + assert_raises(ValueError, rng.beta, a, bad_b * 3) + + def test_exponential(self): + scale = [1] + bad_scale = [-1] + desired = np.array([0.76106853658845242, + 0.76386282278691653, + 0.71243813125891797]) + + rng = random.RandomState(self.seed) + actual = rng.exponential(scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, rng.exponential, bad_scale * 3) + + def test_standard_gamma(self): + shape = [1] + bad_shape = [-1] + desired = np.array([0.76106853658845242, + 0.76386282278691653, + 0.71243813125891797]) + + rng = random.RandomState(self.seed) + actual = rng.standard_gamma(shape * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, rng.standard_gamma, bad_shape * 3) + + def test_gamma(self): + shape = [1] + scale = [2] + bad_shape = [-1] + bad_scale = [-2] + desired = np.array([1.5221370731769048, + 1.5277256455738331, + 1.4248762625178359]) + + rng = random.RandomState(self.seed) + actual = rng.gamma(shape * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, rng.gamma, bad_shape * 3, scale) + assert_raises(ValueError, rng.gamma, shape * 3, bad_scale) + + rng = random.RandomState(self.seed) + actual = rng.gamma(shape, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, rng.gamma, bad_shape, scale * 3) + assert_raises(ValueError, rng.gamma, shape, bad_scale * 3) + + def test_f(self): + dfnum = [1] + dfden = [2] + bad_dfnum = [-1] + bad_dfden = [-2] + desired = np.array([0.80038951638264799, + 0.86768719635363512, + 2.7251095168386801]) + + rng = random.RandomState(self.seed) + actual = rng.f(dfnum * 3, dfden) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, rng.f, bad_dfnum * 3, dfden) + assert_raises(ValueError, rng.f, dfnum * 3, bad_dfden) + + rng = random.RandomState(self.seed) + actual = rng.f(dfnum, dfden * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, rng.f, bad_dfnum, dfden * 3) + assert_raises(ValueError, rng.f, dfnum, bad_dfden * 3) + + def test_noncentral_f(self): + dfnum = [2] + dfden = [3] + nonc = [4] + bad_dfnum = [0] + bad_dfden = [-1] + bad_nonc = [-2] + desired = np.array([9.1393943263705211, + 13.025456344595602, + 8.8018098359100545]) + + rng = random.RandomState(self.seed) + actual = rng.noncentral_f(dfnum * 3, dfden, nonc) + assert_array_almost_equal(actual, desired, decimal=14) + assert np.all(np.isnan(rng.noncentral_f(dfnum, dfden, [np.nan] * 3))) + + assert_raises(ValueError, rng.noncentral_f, bad_dfnum * 3, dfden, nonc) + assert_raises(ValueError, rng.noncentral_f, dfnum * 3, bad_dfden, nonc) + assert_raises(ValueError, rng.noncentral_f, dfnum * 3, dfden, bad_nonc) + + rng = random.RandomState(self.seed) + actual = rng.noncentral_f(dfnum, dfden * 3, nonc) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, rng.noncentral_f, bad_dfnum, dfden * 3, nonc) + assert_raises(ValueError, rng.noncentral_f, dfnum, bad_dfden * 3, nonc) + assert_raises(ValueError, rng.noncentral_f, dfnum, dfden * 3, bad_nonc) + + rng = random.RandomState(self.seed) + actual = rng.noncentral_f(dfnum, dfden, nonc * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, rng.noncentral_f, bad_dfnum, dfden, nonc * 3) + assert_raises(ValueError, rng.noncentral_f, dfnum, bad_dfden, nonc * 3) + assert_raises(ValueError, rng.noncentral_f, dfnum, dfden, bad_nonc * 3) + + def test_noncentral_f_small_df(self): + rng = random.RandomState(self.seed) + desired = np.array([6.869638627492048, 0.785880199263955]) + actual = rng.noncentral_f(0.9, 0.9, 2, size=2) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_chisquare(self): + df = [1] + bad_df = [-1] + desired = np.array([0.57022801133088286, + 0.51947702108840776, + 0.1320969254923558]) + + rng = random.RandomState(self.seed) + actual = rng.chisquare(df * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, rng.chisquare, bad_df * 3) + + def test_noncentral_chisquare(self): + df = [1] + nonc = [2] + bad_df = [-1] + bad_nonc = [-2] + desired = np.array([9.0015599467913763, + 4.5804135049718742, + 6.0872302432834564]) + + rng = random.RandomState(self.seed) + actual = rng.noncentral_chisquare(df * 3, nonc) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, rng.noncentral_chisquare, bad_df * 3, nonc) + assert_raises(ValueError, rng.noncentral_chisquare, df * 3, bad_nonc) + + rng = random.RandomState(self.seed) + actual = rng.noncentral_chisquare(df, nonc * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, rng.noncentral_chisquare, bad_df, nonc * 3) + assert_raises(ValueError, rng.noncentral_chisquare, df, bad_nonc * 3) + + def test_standard_t(self): + df = [1] + bad_df = [-1] + desired = np.array([3.0702872575217643, + 5.8560725167361607, + 1.0274791436474273]) + + rng = random.RandomState(self.seed) + actual = rng.standard_t(df * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, rng.standard_t, bad_df * 3) + assert_raises(ValueError, random.standard_t, bad_df * 3) + + def test_vonmises(self): + mu = [2] + kappa = [1] + bad_kappa = [-1] + desired = np.array([2.9883443664201312, + -2.7064099483995943, + -1.8672476700665914]) + + rng = random.RandomState(self.seed) + actual = rng.vonmises(mu * 3, kappa) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, rng.vonmises, mu * 3, bad_kappa) + + rng = random.RandomState(self.seed) + actual = rng.vonmises(mu, kappa * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, rng.vonmises, mu, bad_kappa * 3) + + def test_pareto(self): + a = [1] + bad_a = [-1] + desired = np.array([1.1405622680198362, + 1.1465519762044529, + 1.0389564467453547]) + + rng = random.RandomState(self.seed) + actual = rng.pareto(a * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, rng.pareto, bad_a * 3) + assert_raises(ValueError, random.pareto, bad_a * 3) + + def test_weibull(self): + a = [1] + bad_a = [-1] + desired = np.array([0.76106853658845242, + 0.76386282278691653, + 0.71243813125891797]) + + rng = random.RandomState(self.seed) + actual = rng.weibull(a * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, rng.weibull, bad_a * 3) + assert_raises(ValueError, random.weibull, bad_a * 3) + + def test_power(self): + a = [1] + bad_a = [-1] + desired = np.array([0.53283302478975902, + 0.53413660089041659, + 0.50955303552646702]) + + rng = random.RandomState(self.seed) + actual = rng.power(a * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, rng.power, bad_a * 3) + assert_raises(ValueError, random.power, bad_a * 3) + + def test_laplace(self): + loc = [0] + scale = [1] + bad_scale = [-1] + desired = np.array([0.067921356028507157, + 0.070715642226971326, + 0.019290950698972624]) + + rng = random.RandomState(self.seed) + actual = rng.laplace(loc * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, rng.laplace, loc * 3, bad_scale) + + rng = random.RandomState(self.seed) + actual = rng.laplace(loc, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, rng.laplace, loc, bad_scale * 3) + + def test_gumbel(self): + loc = [0] + scale = [1] + bad_scale = [-1] + desired = np.array([0.2730318639556768, + 0.26936705726291116, + 0.33906220393037939]) + + rng = random.RandomState(self.seed) + actual = rng.gumbel(loc * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, rng.gumbel, loc * 3, bad_scale) + + rng = random.RandomState(self.seed) + actual = rng.gumbel(loc, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, rng.gumbel, loc, bad_scale * 3) + + def test_logistic(self): + loc = [0] + scale = [1] + bad_scale = [-1] + desired = np.array([0.13152135837586171, + 0.13675915696285773, + 0.038216792802833396]) + + rng = random.RandomState(self.seed) + actual = rng.logistic(loc * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, rng.logistic, loc * 3, bad_scale) + + rng = random.RandomState(self.seed) + actual = rng.logistic(loc, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, rng.logistic, loc, bad_scale * 3) + assert_equal(rng.logistic(1.0, 0.0), 1.0) + + def test_lognormal(self): + mean = [0] + sigma = [1] + bad_sigma = [-1] + desired = np.array([9.1422086044848427, + 8.4013952870126261, + 6.3073234116578671]) + + rng = random.RandomState(self.seed) + actual = rng.lognormal(mean * 3, sigma) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, rng.lognormal, mean * 3, bad_sigma) + assert_raises(ValueError, random.lognormal, mean * 3, bad_sigma) + + rng = random.RandomState(self.seed) + actual = rng.lognormal(mean, sigma * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, rng.lognormal, mean, bad_sigma * 3) + assert_raises(ValueError, random.lognormal, mean, bad_sigma * 3) + + def test_rayleigh(self): + scale = [1] + bad_scale = [-1] + desired = np.array([1.2337491937897689, + 1.2360119924878694, + 1.1936818095781789]) + + rng = random.RandomState(self.seed) + actual = rng.rayleigh(scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, rng.rayleigh, bad_scale * 3) + + def test_wald(self): + mean = [0.5] + scale = [1] + bad_mean = [0] + bad_scale = [-2] + desired = np.array([0.11873681120271318, + 0.12450084820795027, + 0.9096122728408238]) + + rng = random.RandomState(self.seed) + actual = rng.wald(mean * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, rng.wald, bad_mean * 3, scale) + assert_raises(ValueError, rng.wald, mean * 3, bad_scale) + assert_raises(ValueError, random.wald, bad_mean * 3, scale) + assert_raises(ValueError, random.wald, mean * 3, bad_scale) + + rng = random.RandomState(self.seed) + actual = rng.wald(mean, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, rng.wald, bad_mean, scale * 3) + assert_raises(ValueError, rng.wald, mean, bad_scale * 3) + assert_raises(ValueError, rng.wald, 0.0, 1) + assert_raises(ValueError, rng.wald, 0.5, 0.0) + + def test_triangular(self): + left = [1] + right = [3] + mode = [2] + bad_left_one = [3] + bad_mode_one = [4] + bad_left_two, bad_mode_two = right * 2 + desired = np.array([2.03339048710429, + 2.0347400359389356, + 2.0095991069536208]) + + rng = random.RandomState(self.seed) + actual = rng.triangular(left * 3, mode, right) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, rng.triangular, bad_left_one * 3, mode, right) + assert_raises(ValueError, rng.triangular, left * 3, bad_mode_one, right) + assert_raises(ValueError, rng.triangular, bad_left_two * 3, bad_mode_two, + right) + + rng = random.RandomState(self.seed) + actual = rng.triangular(left, mode * 3, right) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, rng.triangular, bad_left_one, mode * 3, right) + assert_raises(ValueError, rng.triangular, left, bad_mode_one * 3, right) + assert_raises(ValueError, rng.triangular, bad_left_two, bad_mode_two * 3, + right) + + rng = random.RandomState(self.seed) + actual = rng.triangular(left, mode, right * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, rng.triangular, bad_left_one, mode, right * 3) + assert_raises(ValueError, rng.triangular, left, bad_mode_one, right * 3) + assert_raises(ValueError, rng.triangular, bad_left_two, bad_mode_two, + right * 3) + + assert_raises(ValueError, rng.triangular, 10., 0., 20.) + assert_raises(ValueError, rng.triangular, 10., 25., 20.) + assert_raises(ValueError, rng.triangular, 10., 10., 10.) + + def test_binomial(self): + n = [1] + p = [0.5] + bad_n = [-1] + bad_p_one = [-1] + bad_p_two = [1.5] + desired = np.array([1, 1, 1]) + + rng = random.RandomState(self.seed) + actual = rng.binomial(n * 3, p) + assert_array_equal(actual, desired) + assert_raises(ValueError, rng.binomial, bad_n * 3, p) + assert_raises(ValueError, rng.binomial, n * 3, bad_p_one) + assert_raises(ValueError, rng.binomial, n * 3, bad_p_two) + + rng = random.RandomState(self.seed) + actual = rng.binomial(n, p * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, rng.binomial, bad_n, p * 3) + assert_raises(ValueError, rng.binomial, n, bad_p_one * 3) + assert_raises(ValueError, rng.binomial, n, bad_p_two * 3) + + def test_negative_binomial(self): + n = [1] + p = [0.5] + bad_n = [-1] + bad_p_one = [-1] + bad_p_two = [1.5] + desired = np.array([1, 0, 1]) + + rng = random.RandomState(self.seed) + actual = rng.negative_binomial(n * 3, p) + assert_array_equal(actual, desired) + assert_raises(ValueError, rng.negative_binomial, bad_n * 3, p) + assert_raises(ValueError, rng.negative_binomial, n * 3, bad_p_one) + assert_raises(ValueError, rng.negative_binomial, n * 3, bad_p_two) + + rng = random.RandomState(self.seed) + actual = rng.negative_binomial(n, p * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, rng.negative_binomial, bad_n, p * 3) + assert_raises(ValueError, rng.negative_binomial, n, bad_p_one * 3) + assert_raises(ValueError, rng.negative_binomial, n, bad_p_two * 3) + + def test_poisson(self): + max_lam = random.RandomState()._poisson_lam_max + + lam = [1] + bad_lam_one = [-1] + bad_lam_two = [max_lam * 2] + desired = np.array([1, 1, 0]) + + rng = random.RandomState(self.seed) + actual = rng.poisson(lam * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, rng.poisson, bad_lam_one * 3) + assert_raises(ValueError, rng.poisson, bad_lam_two * 3) + + def test_zipf(self): + a = [2] + bad_a = [0] + desired = np.array([2, 2, 1]) + + rng = random.RandomState(self.seed) + actual = rng.zipf(a * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, rng.zipf, bad_a * 3) + with np.errstate(invalid='ignore'): + assert_raises(ValueError, rng.zipf, np.nan) + assert_raises(ValueError, rng.zipf, [0, 0, np.nan]) + + def test_geometric(self): + p = [0.5] + bad_p_one = [-1] + bad_p_two = [1.5] + desired = np.array([2, 2, 2]) + + rng = random.RandomState(self.seed) + actual = rng.geometric(p * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, rng.geometric, bad_p_one * 3) + assert_raises(ValueError, rng.geometric, bad_p_two * 3) + + def test_hypergeometric(self): + ngood = [1] + nbad = [2] + nsample = [2] + bad_ngood = [-1] + bad_nbad = [-2] + bad_nsample_one = [0] + bad_nsample_two = [4] + desired = np.array([1, 1, 1]) + + rng = random.RandomState(self.seed) + actual = rng.hypergeometric(ngood * 3, nbad, nsample) + assert_array_equal(actual, desired) + assert_raises(ValueError, rng.hypergeometric, bad_ngood * 3, nbad, nsample) + assert_raises(ValueError, rng.hypergeometric, ngood * 3, bad_nbad, nsample) + assert_raises(ValueError, rng.hypergeometric, ngood * 3, nbad, bad_nsample_one) + assert_raises(ValueError, rng.hypergeometric, ngood * 3, nbad, bad_nsample_two) + + rng = random.RandomState(self.seed) + actual = rng.hypergeometric(ngood, nbad * 3, nsample) + assert_array_equal(actual, desired) + assert_raises(ValueError, rng.hypergeometric, bad_ngood, nbad * 3, nsample) + assert_raises(ValueError, rng.hypergeometric, ngood, bad_nbad * 3, nsample) + assert_raises(ValueError, rng.hypergeometric, ngood, nbad * 3, bad_nsample_one) + assert_raises(ValueError, rng.hypergeometric, ngood, nbad * 3, bad_nsample_two) + + rng = random.RandomState(self.seed) + actual = rng.hypergeometric(ngood, nbad, nsample * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, rng.hypergeometric, bad_ngood, nbad, nsample * 3) + assert_raises(ValueError, rng.hypergeometric, ngood, bad_nbad, nsample * 3) + assert_raises(ValueError, rng.hypergeometric, ngood, nbad, bad_nsample_one * 3) + assert_raises(ValueError, rng.hypergeometric, ngood, nbad, bad_nsample_two * 3) + + assert_raises(ValueError, rng.hypergeometric, -1, 10, 20) + assert_raises(ValueError, rng.hypergeometric, 10, -1, 20) + assert_raises(ValueError, rng.hypergeometric, 10, 10, 0) + assert_raises(ValueError, rng.hypergeometric, 10, 10, 25) + + def test_logseries(self): + p = [0.5] + bad_p_one = [2] + bad_p_two = [-1] + desired = np.array([1, 1, 1]) + + rng = random.RandomState(self.seed) + actual = rng.logseries(p * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, rng.logseries, bad_p_one * 3) + assert_raises(ValueError, rng.logseries, bad_p_two * 3) + + +@pytest.mark.skipif(IS_WASM, reason="can't start thread") +class TestThread: + # make sure each state produces the same sequence even in threads + seeds = range(4) + + def check_function(self, function, sz): + from threading import Thread + + out1 = np.empty((len(self.seeds),) + sz) + out2 = np.empty((len(self.seeds),) + sz) + + # threaded generation + t = [Thread(target=function, args=(random.RandomState(s), o)) + for s, o in zip(self.seeds, out1)] + [x.start() for x in t] + [x.join() for x in t] + + # the same serial + for s, o in zip(self.seeds, out2): + function(random.RandomState(s), o) + + # these platforms change x87 fpu precision mode in threads + if np.intp().dtype.itemsize == 4 and sys.platform == "win32": + assert_array_almost_equal(out1, out2) + else: + assert_array_equal(out1, out2) + + def test_normal(self): + def gen_random(state, out): + out[...] = state.normal(size=10000) + + self.check_function(gen_random, sz=(10000,)) + + def test_exp(self): + def gen_random(state, out): + out[...] = state.exponential(scale=np.ones((100, 1000))) + + self.check_function(gen_random, sz=(100, 1000)) + + def test_multinomial(self): + def gen_random(state, out): + out[...] = state.multinomial(10, [1 / 6.] * 6, size=10000) + + self.check_function(gen_random, sz=(10000, 6)) + + +# See Issue #4263 +class TestSingleEltArrayInput: + def _create_arrays(self): + return np.array([2]), np.array([3]), np.array([4]), (1,) + + def test_one_arg_funcs(self): + argOne, _, _, tgtShape = self._create_arrays() + funcs = (random.exponential, random.standard_gamma, + random.chisquare, random.standard_t, + random.pareto, random.weibull, + random.power, random.rayleigh, + random.poisson, random.zipf, + random.geometric, random.logseries) + + probfuncs = (random.geometric, random.logseries) + + for func in funcs: + if func in probfuncs: # p < 1.0 + out = func(np.array([0.5])) + + else: + out = func(argOne) + + assert_equal(out.shape, tgtShape) + + def test_two_arg_funcs(self): + argOne, argTwo, _, tgtShape = self._create_arrays() + funcs = (random.uniform, random.normal, + random.beta, random.gamma, + random.f, random.noncentral_chisquare, + random.vonmises, random.laplace, + random.gumbel, random.logistic, + random.lognormal, random.wald, + random.binomial, random.negative_binomial) + + probfuncs = (random.binomial, random.negative_binomial) + + for func in funcs: + if func in probfuncs: # p <= 1 + argTwo = np.array([0.5]) + + else: + argTwo = argTwo + + out = func(argOne, argTwo) + assert_equal(out.shape, tgtShape) + + out = func(argOne[0], argTwo) + assert_equal(out.shape, tgtShape) + + out = func(argOne, argTwo[0]) + assert_equal(out.shape, tgtShape) + + def test_three_arg_funcs(self): + argOne, argTwo, argThree, tgtShape = self._create_arrays() + funcs = [random.noncentral_f, random.triangular, + random.hypergeometric] + + for func in funcs: + out = func(argOne, argTwo, argThree) + assert_equal(out.shape, tgtShape) + + out = func(argOne[0], argTwo, argThree) + assert_equal(out.shape, tgtShape) + + out = func(argOne, argTwo[0], argThree) + assert_equal(out.shape, tgtShape) + + +# Ensure returned array dtype is correct for platform +def test_integer_dtype(int_func): + random.seed(123456789) + fname, args, sha256 = int_func + f = getattr(random, fname) + actual = f(*args, size=2) + assert_(actual.dtype == np.dtype('l')) + + +def test_integer_repeat(int_func): + rng = random.RandomState(123456789) + fname, args, sha256 = int_func + f = getattr(rng, fname) + val = f(*args, size=1000000) + if sys.byteorder != 'little': + val = val.byteswap() + res = hashlib.sha256(val.view(np.int8)).hexdigest() + assert_(res == sha256) + + +def test_broadcast_size_error(): + # GH-16833 + with pytest.raises(ValueError): + random.binomial(1, [0.3, 0.7], size=(2, 1)) + with pytest.raises(ValueError): + random.binomial([1, 2], 0.3, size=(2, 1)) + with pytest.raises(ValueError): + random.binomial([1, 2], [0.3, 0.7], size=(2, 1)) + + +def test_randomstate_ctor_old_style_pickle(): + rs = np.random.RandomState(MT19937(0)) + rs.standard_normal(1) + # Directly call reduce which is used in pickling + ctor, args, state_a = rs.__reduce__() + # Simulate unpickling an old pickle that only has the name + assert args[0].__class__.__name__ == "MT19937" + b = ctor(*("MT19937",)) + b.set_state(state_a) + state_b = b.get_state(legacy=False) + + assert_equal(state_a['bit_generator'], state_b['bit_generator']) + assert_array_equal(state_a['state']['key'], state_b['state']['key']) + assert_array_equal(state_a['state']['pos'], state_b['state']['pos']) + assert_equal(state_a['has_gauss'], state_b['has_gauss']) + assert_equal(state_a['gauss'], state_b['gauss']) + + +@pytest.mark.thread_unsafe(reason="np.random.set_bit_generator affects global state") +def test_hot_swap(restore_singleton_bitgen): + # GH 21808 + def_bg = np.random.default_rng(0) + bg = def_bg.bit_generator + np.random.set_bit_generator(bg) + assert isinstance(np.random.mtrand._rand._bit_generator, type(bg)) + + second_bg = np.random.get_bit_generator() + assert bg is second_bg + + +@pytest.mark.thread_unsafe(reason="np.random.set_bit_generator affects global state") +def test_seed_alt_bit_gen(restore_singleton_bitgen): + # GH 21808 + bg = PCG64(0) + np.random.set_bit_generator(bg) + state = np.random.get_state(legacy=False) + np.random.seed(1) + new_state = np.random.get_state(legacy=False) + print(state) + print(new_state) + assert state["bit_generator"] == "PCG64" + assert state["state"]["state"] != new_state["state"]["state"] + assert state["state"]["inc"] != new_state["state"]["inc"] + + +@pytest.mark.thread_unsafe(reason="np.random.set_bit_generator affects global state") +def test_state_error_alt_bit_gen(restore_singleton_bitgen): + # GH 21808 + state = np.random.get_state() + bg = PCG64(0) + np.random.set_bit_generator(bg) + with pytest.raises(ValueError, match="state must be for a PCG64"): + np.random.set_state(state) + + +@pytest.mark.thread_unsafe(reason="np.random.set_bit_generator affects global state") +def test_swap_worked(restore_singleton_bitgen): + # GH 21808 + np.random.seed(98765) + vals = np.random.randint(0, 2 ** 30, 10) + bg = PCG64(0) + state = bg.state + np.random.set_bit_generator(bg) + state_direct = np.random.get_state(legacy=False) + for field in state: + assert state[field] == state_direct[field] + np.random.seed(98765) + pcg_vals = np.random.randint(0, 2 ** 30, 10) + assert not np.all(vals == pcg_vals) + new_state = bg.state + assert new_state["state"]["state"] != state["state"]["state"] + assert new_state["state"]["inc"] == new_state["state"]["inc"] + + +@pytest.mark.thread_unsafe(reason="np.random.set_bit_generator affects global state") +def test_swapped_singleton_against_direct(restore_singleton_bitgen): + np.random.set_bit_generator(PCG64(98765)) + singleton_vals = np.random.randint(0, 2 ** 30, 10) + rg = np.random.RandomState(PCG64(98765)) + non_singleton_vals = rg.randint(0, 2 ** 30, 10) + assert_equal(non_singleton_vals, singleton_vals) diff --git a/local_packages/numpy/random/tests/test_randomstate_regression.py b/local_packages/numpy/random/tests/test_randomstate_regression.py new file mode 100644 index 0000000..1c8882d --- /dev/null +++ b/local_packages/numpy/random/tests/test_randomstate_regression.py @@ -0,0 +1,213 @@ +import sys + +import pytest + +import numpy as np +from numpy import random +from numpy.testing import assert_, assert_array_equal, assert_raises + + +class TestRegression: + + def test_VonMises_range(self): + # Make sure generated random variables are in [-pi, pi]. + # Regression test for ticket #986. + for mu in np.linspace(-7., 7., 5): + r = random.vonmises(mu, 1, 50) + assert_(np.all(r > -np.pi) and np.all(r <= np.pi)) + + def test_hypergeometric_range(self): + # Test for ticket #921 + assert_(np.all(random.hypergeometric(3, 18, 11, size=10) < 4)) + assert_(np.all(random.hypergeometric(18, 3, 11, size=10) > 0)) + + # Test for ticket #5623 + args = [ + (2**20 - 2, 2**20 - 2, 2**20 - 2), # Check for 32-bit systems + ] + is_64bits = sys.maxsize > 2**32 + if is_64bits and sys.platform != 'win32': + # Check for 64-bit systems + args.append((2**40 - 2, 2**40 - 2, 2**40 - 2)) + for arg in args: + assert_(random.hypergeometric(*arg) > 0) + + def test_logseries_convergence(self): + # Test for ticket #923 + N = 1000 + random.seed(0) + rvsn = random.logseries(0.8, size=N) + # these two frequency counts should be close to theoretical + # numbers with this large sample + # theoretical large N result is 0.49706795 + freq = np.sum(rvsn == 1) / N + msg = f'Frequency was {freq:f}, should be > 0.45' + assert_(freq > 0.45, msg) + # theoretical large N result is 0.19882718 + freq = np.sum(rvsn == 2) / N + msg = f'Frequency was {freq:f}, should be < 0.23' + assert_(freq < 0.23, msg) + + def test_shuffle_mixed_dimension(self): + # Test for trac ticket #2074 + for t in [[1, 2, 3, None], + [(1, 1), (2, 2), (3, 3), None], + [1, (2, 2), (3, 3), None], + [(1, 1), 2, 3, None]]: + rng = random.RandomState(12345) + shuffled = list(t) + rng.shuffle(shuffled) + expected = np.array([t[0], t[3], t[1], t[2]], dtype=object) + assert_array_equal(np.array(shuffled, dtype=object), expected) + + def test_call_within_randomstate(self): + # Check that custom RandomState does not call into global state + m = random.RandomState() + res = np.array([0, 8, 7, 2, 1, 9, 4, 7, 0, 3]) + for i in range(3): + random.seed(i) + m.seed(4321) + # If m.state is not honored, the result will change + assert_array_equal(m.choice(10, size=10, p=np.ones(10) / 10.), res) + + def test_multivariate_normal_size_types(self): + # Test for multivariate_normal issue with 'size' argument. + # Check that the multivariate_normal size argument can be a + # numpy integer. + random.multivariate_normal([0], [[0]], size=1) + random.multivariate_normal([0], [[0]], size=np.int_(1)) + random.multivariate_normal([0], [[0]], size=np.int64(1)) + + def test_beta_small_parameters(self): + # Test that beta with small a and b parameters does not produce + # NaNs due to roundoff errors causing 0 / 0, gh-5851 + random.seed(1234567890) + x = random.beta(0.0001, 0.0001, size=100) + assert_(not np.any(np.isnan(x)), 'Nans in random.beta') + + def test_choice_sum_of_probs_tolerance(self): + # The sum of probs should be 1.0 with some tolerance. + # For low precision dtypes the tolerance was too tight. + # See numpy github issue 6123. + random.seed(1234) + a = [1, 2, 3] + counts = [4, 4, 2] + for dt in np.float16, np.float32, np.float64: + probs = np.array(counts, dtype=dt) / sum(counts) + c = random.choice(a, p=probs) + assert_(c in a) + assert_raises(ValueError, random.choice, a, p=probs * 0.9) + + def test_shuffle_of_array_of_different_length_strings(self): + # Test that permuting an array of different length strings + # will not cause a segfault on garbage collection + # Tests gh-7710 + random.seed(1234) + + a = np.array(['a', 'a' * 1000]) + + for _ in range(100): + random.shuffle(a) + + # Force Garbage Collection - should not segfault. + import gc + gc.collect() + + def test_shuffle_of_array_of_objects(self): + # Test that permuting an array of objects will not cause + # a segfault on garbage collection. + # See gh-7719 + random.seed(1234) + a = np.array([np.arange(1), np.arange(4)], dtype=object) + + for _ in range(1000): + random.shuffle(a) + + # Force Garbage Collection - should not segfault. + import gc + gc.collect() + + def test_permutation_subclass(self): + class N(np.ndarray): + pass + + rng = random.RandomState(1) + orig = np.arange(3).view(N) + perm = rng.permutation(orig) + assert_array_equal(perm, np.array([0, 2, 1])) + assert_array_equal(orig, np.arange(3).view(N)) + + class M: + a = np.arange(5) + + def __array__(self, dtype=None, copy=None): + return self.a + + rng = random.RandomState(1) + m = M() + perm = rng.permutation(m) + assert_array_equal(perm, np.array([2, 1, 4, 0, 3])) + assert_array_equal(m.__array__(), np.arange(5)) + + def test_warns_byteorder(self): + # GH 13159 + other_byteord_dt = 'i4' + with pytest.deprecated_call(match='non-native byteorder is not'): + random.randint(0, 200, size=10, dtype=other_byteord_dt) + + def test_named_argument_initialization(self): + # GH 13669 + rs1 = np.random.RandomState(123456789) + rs2 = np.random.RandomState(seed=123456789) + assert rs1.randint(0, 100) == rs2.randint(0, 100) + + def test_choice_retun_dtype(self): + # GH 9867, now long since the NumPy default changed. + c = np.random.choice(10, p=[.1] * 10, size=2) + assert c.dtype == np.dtype(np.long) + c = np.random.choice(10, p=[.1] * 10, replace=False, size=2) + assert c.dtype == np.dtype(np.long) + c = np.random.choice(10, size=2) + assert c.dtype == np.dtype(np.long) + c = np.random.choice(10, replace=False, size=2) + assert c.dtype == np.dtype(np.long) + + @pytest.mark.skipif(np.iinfo('l').max < 2**32, + reason='Cannot test with 32-bit C long') + def test_randint_117(self): + # GH 14189 + rng = random.RandomState(0) + expected = np.array([2357136044, 2546248239, 3071714933, 3626093760, + 2588848963, 3684848379, 2340255427, 3638918503, + 1819583497, 2678185683], dtype='int64') + actual = rng.randint(2**32, size=10) + assert_array_equal(actual, expected) + + def test_p_zero_stream(self): + # Regression test for gh-14522. Ensure that future versions + # generate the same variates as version 1.16. + rng = random.RandomState(12345) + assert_array_equal(rng.binomial(1, [0, 0.25, 0.5, 0.75, 1]), + [0, 0, 0, 1, 1]) + + def test_n_zero_stream(self): + # Regression test for gh-14522. Ensure that future versions + # generate the same variates as version 1.16. + rng = random.RandomState(8675309) + expected = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [3, 4, 2, 3, 3, 1, 5, 3, 1, 3]]) + assert_array_equal(rng.binomial([[0], [10]], 0.25, size=(2, 10)), + expected) + + +def test_multinomial_empty(): + # gh-20483 + # Ensure that empty p-vals are correctly handled + assert random.multinomial(10, []).shape == (0,) + assert random.multinomial(3, [], size=(7, 5, 3)).shape == (7, 5, 3, 0) + + +def test_multinomial_1d_pval(): + # gh-20483 + with pytest.raises(TypeError, match="pvals must be a 1-d"): + random.multinomial(10, 0.3) diff --git a/local_packages/numpy/random/tests/test_regression.py b/local_packages/numpy/random/tests/test_regression.py new file mode 100644 index 0000000..eeaf6d2 --- /dev/null +++ b/local_packages/numpy/random/tests/test_regression.py @@ -0,0 +1,175 @@ +import inspect +import sys + +import pytest + +import numpy as np +from numpy import random +from numpy.testing import IS_PYPY, assert_, assert_array_equal, assert_raises + + +class TestRegression: + + def test_VonMises_range(self): + # Make sure generated random variables are in [-pi, pi]. + # Regression test for ticket #986. + for mu in np.linspace(-7., 7., 5): + r = random.mtrand.vonmises(mu, 1, 50) + assert_(np.all(r > -np.pi) and np.all(r <= np.pi)) + + def test_hypergeometric_range(self): + # Test for ticket #921 + assert_(np.all(np.random.hypergeometric(3, 18, 11, size=10) < 4)) + assert_(np.all(np.random.hypergeometric(18, 3, 11, size=10) > 0)) + + # Test for ticket #5623 + args = [ + (2**20 - 2, 2**20 - 2, 2**20 - 2), # Check for 32-bit systems + ] + is_64bits = sys.maxsize > 2**32 + if is_64bits and sys.platform != 'win32': + # Check for 64-bit systems + args.append((2**40 - 2, 2**40 - 2, 2**40 - 2)) + for arg in args: + assert_(np.random.hypergeometric(*arg) > 0) + + def test_logseries_convergence(self): + # Test for ticket #923 + N = 1000 + np.random.seed(0) + rvsn = np.random.logseries(0.8, size=N) + # these two frequency counts should be close to theoretical + # numbers with this large sample + # theoretical large N result is 0.49706795 + freq = np.sum(rvsn == 1) / N + msg = f'Frequency was {freq:f}, should be > 0.45' + assert_(freq > 0.45, msg) + # theoretical large N result is 0.19882718 + freq = np.sum(rvsn == 2) / N + msg = f'Frequency was {freq:f}, should be < 0.23' + assert_(freq < 0.23, msg) + + def test_shuffle_mixed_dimension(self): + # Test for trac ticket #2074 + for t in [[1, 2, 3, None], + [(1, 1), (2, 2), (3, 3), None], + [1, (2, 2), (3, 3), None], + [(1, 1), 2, 3, None]]: + rng = np.random.RandomState(12345) + shuffled = list(t) + rng.shuffle(shuffled) + expected = np.array([t[0], t[3], t[1], t[2]], dtype=object) + assert_array_equal(np.array(shuffled, dtype=object), expected) + + def test_call_within_randomstate(self): + # Check that custom RandomState does not call into global state + m = np.random.RandomState() + res = np.array([0, 8, 7, 2, 1, 9, 4, 7, 0, 3]) + for i in range(3): + np.random.seed(i) + m.seed(4321) + # If m.state is not honored, the result will change + assert_array_equal(m.choice(10, size=10, p=np.ones(10) / 10.), res) + + def test_multivariate_normal_size_types(self): + # Test for multivariate_normal issue with 'size' argument. + # Check that the multivariate_normal size argument can be a + # numpy integer. + np.random.multivariate_normal([0], [[0]], size=1) + np.random.multivariate_normal([0], [[0]], size=np.int_(1)) + np.random.multivariate_normal([0], [[0]], size=np.int64(1)) + + def test_beta_small_parameters(self): + # Test that beta with small a and b parameters does not produce + # NaNs due to roundoff errors causing 0 / 0, gh-5851 + np.random.seed(1234567890) + x = np.random.beta(0.0001, 0.0001, size=100) + assert_(not np.any(np.isnan(x)), 'Nans in np.random.beta') + + def test_choice_sum_of_probs_tolerance(self): + # The sum of probs should be 1.0 with some tolerance. + # For low precision dtypes the tolerance was too tight. + # See numpy github issue 6123. + np.random.seed(1234) + a = [1, 2, 3] + counts = [4, 4, 2] + for dt in np.float16, np.float32, np.float64: + probs = np.array(counts, dtype=dt) / sum(counts) + c = np.random.choice(a, p=probs) + assert_(c in a) + assert_raises(ValueError, np.random.choice, a, p=probs * 0.9) + + def test_shuffle_of_array_of_different_length_strings(self): + # Test that permuting an array of different length strings + # will not cause a segfault on garbage collection + # Tests gh-7710 + np.random.seed(1234) + + a = np.array(['a', 'a' * 1000]) + + for _ in range(100): + np.random.shuffle(a) + + # Force Garbage Collection - should not segfault. + import gc + gc.collect() + + def test_shuffle_of_array_of_objects(self): + # Test that permuting an array of objects will not cause + # a segfault on garbage collection. + # See gh-7719 + np.random.seed(1234) + a = np.array([np.arange(1), np.arange(4)], dtype=object) + + for _ in range(1000): + np.random.shuffle(a) + + # Force Garbage Collection - should not segfault. + import gc + gc.collect() + + def test_permutation_subclass(self): + class N(np.ndarray): + pass + + rng = np.random.RandomState(1) + orig = np.arange(3).view(N) + perm = rng.permutation(orig) + assert_array_equal(perm, np.array([0, 2, 1])) + assert_array_equal(orig, np.arange(3).view(N)) + + class M: + a = np.arange(5) + + def __array__(self, dtype=None, copy=None): + return self.a + + rng = np.random.RandomState(1) + m = M() + perm = rng.permutation(m) + assert_array_equal(perm, np.array([2, 1, 4, 0, 3])) + assert_array_equal(m.__array__(), np.arange(5)) + + @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") + @pytest.mark.skipif(IS_PYPY, reason="PyPy does not modify tp_doc") + @pytest.mark.parametrize( + "cls", + [ + random.Generator, + random.MT19937, + random.PCG64, + random.PCG64DXSM, + random.Philox, + random.RandomState, + random.SFC64, + random.BitGenerator, + random.SeedSequence, + random.bit_generator.SeedlessSeedSequence, + ], + ) + def test_inspect_signature(self, cls: type) -> None: + assert hasattr(cls, "__text_signature__") + try: + inspect.signature(cls) + except ValueError: + pytest.fail(f"invalid signature: {cls.__module__}.{cls.__qualname__}") diff --git a/local_packages/numpy/random/tests/test_seed_sequence.py b/local_packages/numpy/random/tests/test_seed_sequence.py new file mode 100644 index 0000000..87ae4ff --- /dev/null +++ b/local_packages/numpy/random/tests/test_seed_sequence.py @@ -0,0 +1,79 @@ +import numpy as np +from numpy.random import SeedSequence +from numpy.testing import assert_array_compare, assert_array_equal + + +def test_reference_data(): + """ Check that SeedSequence generates data the same as the C++ reference. + + https://gist.github.com/imneme/540829265469e673d045 + """ + inputs = [ + [3735928559, 195939070, 229505742, 305419896], + [3668361503, 4165561550, 1661411377, 3634257570], + [164546577, 4166754639, 1765190214, 1303880213], + [446610472, 3941463886, 522937693, 1882353782], + [1864922766, 1719732118, 3882010307, 1776744564], + [4141682960, 3310988675, 553637289, 902896340], + [1134851934, 2352871630, 3699409824, 2648159817], + [1240956131, 3107113773, 1283198141, 1924506131], + [2669565031, 579818610, 3042504477, 2774880435], + [2766103236, 2883057919, 4029656435, 862374500], + ] + outputs = [ + [3914649087, 576849849, 3593928901, 2229911004], + [2240804226, 3691353228, 1365957195, 2654016646], + [3562296087, 3191708229, 1147942216, 3726991905], + [1403443605, 3591372999, 1291086759, 441919183], + [1086200464, 2191331643, 560336446, 3658716651], + [3249937430, 2346751812, 847844327, 2996632307], + [2584285912, 4034195531, 3523502488, 169742686], + [959045797, 3875435559, 1886309314, 359682705], + [3978441347, 432478529, 3223635119, 138903045], + [296367413, 4262059219, 13109864, 3283683422], + ] + outputs64 = [ + [2477551240072187391, 9577394838764454085], + [15854241394484835714, 11398914698975566411], + [13708282465491374871, 16007308345579681096], + [15424829579845884309, 1898028439751125927], + [9411697742461147792, 15714068361935982142], + [10079222287618677782, 12870437757549876199], + [17326737873898640088, 729039288628699544], + [16644868984619524261, 1544825456798124994], + [1857481142255628931, 596584038813451439], + [18305404959516669237, 14103312907920476776], + ] + for seed, expected, expected64 in zip(inputs, outputs, outputs64): + expected = np.array(expected, dtype=np.uint32) + ss = SeedSequence(seed) + state = ss.generate_state(len(expected)) + assert_array_equal(state, expected) + state64 = ss.generate_state(len(expected64), dtype=np.uint64) + assert_array_equal(state64, expected64) + + +def test_zero_padding(): + """ Ensure that the implicit zero-padding does not cause problems. + """ + # Ensure that large integers are inserted in little-endian fashion to avoid + # trailing 0s. + ss0 = SeedSequence(42) + ss1 = SeedSequence(42 << 32) + assert_array_compare( + np.not_equal, + ss0.generate_state(4), + ss1.generate_state(4)) + + # Ensure backwards compatibility with the original 0.17 release for small + # integers and no spawn key. + expected42 = np.array([3444837047, 2669555309, 2046530742, 3581440988], + dtype=np.uint32) + assert_array_equal(SeedSequence(42).generate_state(4), expected42) + + # Regression test for gh-16539 to ensure that the implicit 0s don't + # conflict with spawn keys. + assert_array_compare( + np.not_equal, + SeedSequence(42, spawn_key=(0,)).generate_state(4), + expected42) diff --git a/local_packages/numpy/random/tests/test_smoke.py b/local_packages/numpy/random/tests/test_smoke.py new file mode 100644 index 0000000..5353a72 --- /dev/null +++ b/local_packages/numpy/random/tests/test_smoke.py @@ -0,0 +1,882 @@ +import pickle +from dataclasses import dataclass +from functools import partial + +import pytest + +import numpy as np +from numpy.random import MT19937, PCG64, PCG64DXSM, SFC64, Generator, Philox +from numpy.testing import assert_, assert_array_equal, assert_equal + +DTYPES_BOOL_INT_UINT = (np.bool, np.int8, np.int16, np.int32, np.int64, + np.uint8, np.uint16, np.uint32, np.uint64) + + +def params_0(f): + val = f() + assert_(np.isscalar(val)) + val = f(10) + assert_(val.shape == (10,)) + val = f((10, 10)) + assert_(val.shape == (10, 10)) + val = f((10, 10, 10)) + assert_(val.shape == (10, 10, 10)) + val = f(size=(5, 5)) + assert_(val.shape == (5, 5)) + + +def params_1(f, bounded=False): + a = 5.0 + b = np.arange(2.0, 12.0) + c = np.arange(2.0, 102.0).reshape((10, 10)) + d = np.arange(2.0, 1002.0).reshape((10, 10, 10)) + e = np.array([2.0, 3.0]) + g = np.arange(2.0, 12.0).reshape((1, 10, 1)) + if bounded: + a = 0.5 + b = b / (1.5 * b.max()) + c = c / (1.5 * c.max()) + d = d / (1.5 * d.max()) + e = e / (1.5 * e.max()) + g = g / (1.5 * g.max()) + + # Scalar + f(a) + # Scalar - size + f(a, size=(10, 10)) + # 1d + f(b) + # 2d + f(c) + # 3d + f(d) + # 1d size + f(b, size=10) + # 2d - size - broadcast + f(e, size=(10, 2)) + # 3d - size + f(g, size=(10, 10, 10)) + + +def comp_state(state1, state2): + identical = True + if isinstance(state1, dict): + for key in state1: + identical &= comp_state(state1[key], state2[key]) + elif type(state1) != type(state2): + identical &= type(state1) == type(state2) + elif (isinstance(state1, (list, tuple, np.ndarray)) and isinstance( + state2, (list, tuple, np.ndarray))): + for s1, s2 in zip(state1, state2): + identical &= comp_state(s1, s2) + else: + identical &= state1 == state2 + return identical + + +def warmup(rg, n=None): + if n is None: + n = 11 + np.random.randint(0, 20) + rg.standard_normal(n) + rg.standard_normal(n) + rg.standard_normal(n, dtype=np.float32) + rg.standard_normal(n, dtype=np.float32) + rg.integers(0, 2 ** 24, n, dtype=np.uint64) + rg.integers(0, 2 ** 48, n, dtype=np.uint64) + rg.standard_gamma(11.0, n) + rg.standard_gamma(11.0, n, dtype=np.float32) + rg.random(n, dtype=np.float64) + rg.random(n, dtype=np.float32) + + +@dataclass +class RNGData: + bit_generator: type[np.random.BitGenerator] + advance: int + seed: list[int] + rg: Generator + seed_vector_bits: int + + +class RNG: + @classmethod + def _create_rng(cls): + # Overridden in test classes. Place holder to silence IDE noise + bit_generator = PCG64 + advance = None + seed = [12345] + rg = Generator(bit_generator(*seed)) + seed_vector_bits = 64 + return RNGData(bit_generator, advance, seed, rg, seed_vector_bits) + + def test_init(self): + data = self._create_rng() + data.rg = Generator(data.bit_generator()) + state = data.rg.bit_generator.state + data.rg.standard_normal(1) + data.rg.standard_normal(1) + data.rg.bit_generator.state = state + new_state = data.rg.bit_generator.state + assert_(comp_state(state, new_state)) + + def test_advance(self): + data = self._create_rng() + state = data.rg.bit_generator.state + if hasattr(data.rg.bit_generator, 'advance'): + data.rg.bit_generator.advance(data.advance) + assert_(not comp_state(state, data.rg.bit_generator.state)) + else: + bitgen_name = data.rg.bit_generator.__class__.__name__ + pytest.skip(f'Advance is not supported by {bitgen_name}') + + def test_jump(self): + rg = self._create_rng().rg + state = rg.bit_generator.state + if hasattr(rg.bit_generator, 'jumped'): + bit_gen2 = rg.bit_generator.jumped() + jumped_state = bit_gen2.state + assert_(not comp_state(state, jumped_state)) + rg.random(2 * 3 * 5 * 7 * 11 * 13 * 17) + rg.bit_generator.state = state + bit_gen3 = rg.bit_generator.jumped() + rejumped_state = bit_gen3.state + assert_(comp_state(jumped_state, rejumped_state)) + else: + bitgen_name = rg.bit_generator.__class__.__name__ + if bitgen_name not in ('SFC64',): + raise AttributeError(f'no "jumped" in {bitgen_name}') + pytest.skip(f'Jump is not supported by {bitgen_name}') + + def test_uniform(self): + rg = self._create_rng().rg + r = rg.uniform(-1.0, 0.0, size=10) + assert_(len(r) == 10) + assert_((r > -1).all()) + assert_((r <= 0).all()) + + def test_uniform_array(self): + rg = self._create_rng().rg + r = rg.uniform(np.array([-1.0] * 10), 0.0, size=10) + assert_(len(r) == 10) + assert_((r > -1).all()) + assert_((r <= 0).all()) + r = rg.uniform(np.array([-1.0] * 10), + np.array([0.0] * 10), size=10) + assert_(len(r) == 10) + assert_((r > -1).all()) + assert_((r <= 0).all()) + r = rg.uniform(-1.0, np.array([0.0] * 10), size=10) + assert_(len(r) == 10) + assert_((r > -1).all()) + assert_((r <= 0).all()) + + def test_random(self): + rg = self._create_rng().rg + assert_(len(rg.random(10)) == 10) + params_0(rg.random) + + def test_standard_normal_zig(self): + rg = self._create_rng().rg + assert_(len(rg.standard_normal(10)) == 10) + + def test_standard_normal(self): + rg = self._create_rng().rg + assert_(len(rg.standard_normal(10)) == 10) + params_0(rg.standard_normal) + + def test_standard_gamma(self): + rg = self._create_rng().rg + assert_(len(rg.standard_gamma(10, 10)) == 10) + assert_(len(rg.standard_gamma(np.array([10] * 10), 10)) == 10) + params_1(rg.standard_gamma) + + def test_standard_exponential(self): + rg = self._create_rng().rg + assert_(len(rg.standard_exponential(10)) == 10) + params_0(rg.standard_exponential) + + def test_standard_exponential_float(self): + rg = self._create_rng().rg + randoms = rg.standard_exponential(10, dtype='float32') + assert_(len(randoms) == 10) + assert randoms.dtype == np.float32 + params_0(partial(rg.standard_exponential, dtype='float32')) + + def test_standard_exponential_float_log(self): + rg = self._create_rng().rg + randoms = rg.standard_exponential(10, dtype='float32', + method='inv') + assert_(len(randoms) == 10) + assert randoms.dtype == np.float32 + params_0(partial(rg.standard_exponential, dtype='float32', + method='inv')) + + def test_standard_cauchy(self): + rg = self._create_rng().rg + assert_(len(rg.standard_cauchy(10)) == 10) + params_0(rg.standard_cauchy) + + def test_standard_t(self): + rg = self._create_rng().rg + assert_(len(rg.standard_t(10, 10)) == 10) + params_1(rg.standard_t) + + def test_binomial(self): + rg = self._create_rng().rg + assert_(rg.binomial(10, .5) >= 0) + assert_(rg.binomial(1000, .5) >= 0) + + def test_reset_state(self): + rg = self._create_rng().rg + state = rg.bit_generator.state + int_1 = rg.integers(2**31) + rg.bit_generator.state = state + int_2 = rg.integers(2**31) + assert_(int_1 == int_2) + + def test_entropy_init(self): + bit_generator = self._create_rng().bit_generator + rg = Generator(bit_generator()) + rg2 = Generator(bit_generator()) + assert_(not comp_state(rg.bit_generator.state, + rg2.bit_generator.state)) + + def test_seed(self): + data = self._create_rng() + rg = Generator(data.bit_generator(*data.seed)) + rg2 = Generator(data.bit_generator(*data.seed)) + rg.random() + rg2.random() + assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state)) + + def test_reset_state_gauss(self): + data = self._create_rng() + rg = Generator(data.bit_generator(*data.seed)) + rg.standard_normal() + state = rg.bit_generator.state + n1 = rg.standard_normal(size=10) + rg2 = Generator(data.bit_generator()) + rg2.bit_generator.state = state + n2 = rg2.standard_normal(size=10) + assert_array_equal(n1, n2) + + def test_reset_state_uint32(self): + data = self._create_rng() + rg = Generator(data.bit_generator(*data.seed)) + rg.integers(0, 2 ** 24, 120, dtype=np.uint32) + state = rg.bit_generator.state + n1 = rg.integers(0, 2 ** 24, 10, dtype=np.uint32) + rg2 = Generator(data.bit_generator()) + rg2.bit_generator.state = state + n2 = rg2.integers(0, 2 ** 24, 10, dtype=np.uint32) + assert_array_equal(n1, n2) + + def test_reset_state_float(self): + data = self._create_rng() + rg = Generator(data.bit_generator(*data.seed)) + rg.random(dtype='float32') + state = rg.bit_generator.state + n1 = rg.random(size=10, dtype='float32') + rg2 = Generator(data.bit_generator()) + rg2.bit_generator.state = state + n2 = rg2.random(size=10, dtype='float32') + assert_((n1 == n2).all()) + + def test_shuffle(self): + rg = self._create_rng().rg + original = np.arange(200, 0, -1) + permuted = rg.permutation(original) + assert_((original != permuted).any()) + + def test_permutation(self): + rg = self._create_rng().rg + original = np.arange(200, 0, -1) + permuted = rg.permutation(original) + assert_((original != permuted).any()) + + def test_beta(self): + rg = self._create_rng().rg + vals = rg.beta(2.0, 2.0, 10) + assert_(len(vals) == 10) + vals = rg.beta(np.array([2.0] * 10), 2.0) + assert_(len(vals) == 10) + vals = rg.beta(2.0, np.array([2.0] * 10)) + assert_(len(vals) == 10) + vals = rg.beta(np.array([2.0] * 10), np.array([2.0] * 10)) + assert_(len(vals) == 10) + vals = rg.beta(np.array([2.0] * 10), np.array([[2.0]] * 10)) + assert_(vals.shape == (10, 10)) + + def test_bytes(self): + rg = self._create_rng().rg + vals = rg.bytes(10) + assert_(len(vals) == 10) + + def test_chisquare(self): + rg = self._create_rng().rg + vals = rg.chisquare(2.0, 10) + assert_(len(vals) == 10) + params_1(rg.chisquare) + + def test_exponential(self): + rg = self._create_rng().rg + vals = rg.exponential(2.0, 10) + assert_(len(vals) == 10) + params_1(rg.exponential) + + def test_f(self): + rg = self._create_rng().rg + vals = rg.f(3, 1000, 10) + assert_(len(vals) == 10) + + def test_gamma(self): + rg = self._create_rng().rg + vals = rg.gamma(3, 2, 10) + assert_(len(vals) == 10) + + def test_geometric(self): + rg = self._create_rng().rg + vals = rg.geometric(0.5, 10) + assert_(len(vals) == 10) + params_1(rg.exponential, bounded=True) + + def test_gumbel(self): + rg = self._create_rng().rg + vals = rg.gumbel(2.0, 2.0, 10) + assert_(len(vals) == 10) + + def test_laplace(self): + rg = self._create_rng().rg + vals = rg.laplace(2.0, 2.0, 10) + assert_(len(vals) == 10) + + def test_logitic(self): + rg = self._create_rng().rg + vals = rg.logistic(2.0, 2.0, 10) + assert_(len(vals) == 10) + + def test_logseries(self): + rg = self._create_rng().rg + vals = rg.logseries(0.5, 10) + assert_(len(vals) == 10) + + def test_negative_binomial(self): + rg = self._create_rng().rg + vals = rg.negative_binomial(10, 0.2, 10) + assert_(len(vals) == 10) + + def test_noncentral_chisquare(self): + rg = self._create_rng().rg + vals = rg.noncentral_chisquare(10, 2, 10) + assert_(len(vals) == 10) + + def test_noncentral_f(self): + rg = self._create_rng().rg + vals = rg.noncentral_f(3, 1000, 2, 10) + assert_(len(vals) == 10) + vals = rg.noncentral_f(np.array([3] * 10), 1000, 2) + assert_(len(vals) == 10) + vals = rg.noncentral_f(3, np.array([1000] * 10), 2) + assert_(len(vals) == 10) + vals = rg.noncentral_f(3, 1000, np.array([2] * 10)) + assert_(len(vals) == 10) + + def test_normal(self): + rg = self._create_rng().rg + vals = rg.normal(10, 0.2, 10) + assert_(len(vals) == 10) + + def test_pareto(self): + rg = self._create_rng().rg + vals = rg.pareto(3.0, 10) + assert_(len(vals) == 10) + + def test_poisson(self): + rg = self._create_rng().rg + vals = rg.poisson(10, 10) + assert_(len(vals) == 10) + vals = rg.poisson(np.array([10] * 10)) + assert_(len(vals) == 10) + params_1(rg.poisson) + + def test_power(self): + rg = self._create_rng().rg + vals = rg.power(0.2, 10) + assert_(len(vals) == 10) + + def test_integers(self): + rg = self._create_rng().rg + vals = rg.integers(10, 20, 10) + assert_(len(vals) == 10) + + def test_rayleigh(self): + rg = self._create_rng().rg + vals = rg.rayleigh(0.2, 10) + assert_(len(vals) == 10) + params_1(rg.rayleigh, bounded=True) + + def test_vonmises(self): + rg = self._create_rng().rg + vals = rg.vonmises(10, 0.2, 10) + assert_(len(vals) == 10) + + def test_wald(self): + rg = self._create_rng().rg + vals = rg.wald(1.0, 1.0, 10) + assert_(len(vals) == 10) + + def test_weibull(self): + rg = self._create_rng().rg + vals = rg.weibull(1.0, 10) + assert_(len(vals) == 10) + + def test_zipf(self): + rg = self._create_rng().rg + vec_1d = np.arange(2.0, 102.0) + vec_2d = np.arange(2.0, 102.0)[None, :] + mat = np.arange(2.0, 102.0, 0.01).reshape((100, 100)) + vals = rg.zipf(10, 10) + assert_(len(vals) == 10) + vals = rg.zipf(vec_1d) + assert_(len(vals) == 100) + vals = rg.zipf(vec_2d) + assert_(vals.shape == (1, 100)) + vals = rg.zipf(mat) + assert_(vals.shape == (100, 100)) + + def test_hypergeometric(self): + rg = self._create_rng().rg + vals = rg.hypergeometric(25, 25, 20) + assert_(np.isscalar(vals)) + vals = rg.hypergeometric(np.array([25] * 10), 25, 20) + assert_(vals.shape == (10,)) + + def test_triangular(self): + rg = self._create_rng().rg + vals = rg.triangular(-5, 0, 5) + assert_(np.isscalar(vals)) + vals = rg.triangular(-5, np.array([0] * 10), 5) + assert_(vals.shape == (10,)) + + def test_multivariate_normal(self): + rg = self._create_rng().rg + mean = [0, 0] + cov = [[1, 0], [0, 100]] # diagonal covariance + x = rg.multivariate_normal(mean, cov, 5000) + assert_(x.shape == (5000, 2)) + x_zig = rg.multivariate_normal(mean, cov, 5000) + assert_(x.shape == (5000, 2)) + x_inv = rg.multivariate_normal(mean, cov, 5000) + assert_(x.shape == (5000, 2)) + assert_((x_zig != x_inv).any()) + + def test_multinomial(self): + rg = self._create_rng().rg + vals = rg.multinomial(100, [1.0 / 3, 2.0 / 3]) + assert_(vals.shape == (2,)) + vals = rg.multinomial(100, [1.0 / 3, 2.0 / 3], size=10) + assert_(vals.shape == (10, 2)) + + def test_dirichlet(self): + rg = self._create_rng().rg + s = rg.dirichlet((10, 5, 3), 20) + assert_(s.shape == (20, 3)) + + def test_pickle(self): + rg = self._create_rng().rg + pick = pickle.dumps(rg) + unpick = pickle.loads(pick) + assert_(type(rg) == type(unpick)) + assert_(comp_state(rg.bit_generator.state, + unpick.bit_generator.state)) + + pick = pickle.dumps(rg) + unpick = pickle.loads(pick) + assert_(type(rg) == type(unpick)) + assert_(comp_state(rg.bit_generator.state, + unpick.bit_generator.state)) + + def test_seed_array(self): + data = self._create_rng() + if data.seed_vector_bits is None: + bitgen_name = data.bit_generator.__name__ + pytest.skip(f'Vector seeding is not supported by {bitgen_name}') + + if data.seed_vector_bits == 32: + dtype = np.uint32 + else: + dtype = np.uint64 + seed = np.array([1], dtype=dtype) + bg = data.bit_generator(seed) + state1 = bg.state + bg = data.bit_generator(1) + state2 = bg.state + assert_(comp_state(state1, state2)) + + seed = np.arange(4, dtype=dtype) + bg = data.bit_generator(seed) + state1 = bg.state + bg = data.bit_generator(seed[0]) + state2 = bg.state + assert_(not comp_state(state1, state2)) + + seed = np.arange(1500, dtype=dtype) + bg = data.bit_generator(seed) + state1 = bg.state + bg = data.bit_generator(seed[0]) + state2 = bg.state + assert_(not comp_state(state1, state2)) + + seed = 2 ** np.mod(np.arange(1500, dtype=dtype), + data.seed_vector_bits - 1) + 1 + bg = data.bit_generator(seed) + state1 = bg.state + bg = data.bit_generator(seed[0]) + state2 = bg.state + assert_(not comp_state(state1, state2)) + + def test_uniform_float(self): + bit_generator = self._create_rng().bit_generator + rg = Generator(bit_generator(12345)) + warmup(rg) + state = rg.bit_generator.state + r1 = rg.random(11, dtype=np.float32) + rg2 = Generator(bit_generator()) + warmup(rg2) + rg2.bit_generator.state = state + r2 = rg2.random(11, dtype=np.float32) + assert_array_equal(r1, r2) + assert_equal(r1.dtype, np.float32) + assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state)) + + def test_gamma_floats(self): + bit_generator = self._create_rng().bit_generator + rg = Generator(bit_generator()) + warmup(rg) + state = rg.bit_generator.state + r1 = rg.standard_gamma(4.0, 11, dtype=np.float32) + rg2 = Generator(bit_generator()) + warmup(rg2) + rg2.bit_generator.state = state + r2 = rg2.standard_gamma(4.0, 11, dtype=np.float32) + assert_array_equal(r1, r2) + assert_equal(r1.dtype, np.float32) + assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state)) + + def test_normal_floats(self): + bit_generator = self._create_rng().bit_generator + rg = Generator(bit_generator()) + warmup(rg) + state = rg.bit_generator.state + r1 = rg.standard_normal(11, dtype=np.float32) + rg2 = Generator(bit_generator()) + warmup(rg2) + rg2.bit_generator.state = state + r2 = rg2.standard_normal(11, dtype=np.float32) + assert_array_equal(r1, r2) + assert_equal(r1.dtype, np.float32) + assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state)) + + def test_normal_zig_floats(self): + bit_generator = self._create_rng().bit_generator + rg = Generator(bit_generator()) + warmup(rg) + state = rg.bit_generator.state + r1 = rg.standard_normal(11, dtype=np.float32) + rg2 = Generator(bit_generator()) + warmup(rg2) + rg2.bit_generator.state = state + r2 = rg2.standard_normal(11, dtype=np.float32) + assert_array_equal(r1, r2) + assert_equal(r1.dtype, np.float32) + assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state)) + + def test_output_fill(self): + rg = self._create_rng().rg + state = rg.bit_generator.state + size = (31, 7, 97) + existing = np.empty(size) + rg.bit_generator.state = state + rg.standard_normal(out=existing) + rg.bit_generator.state = state + direct = rg.standard_normal(size=size) + assert_equal(direct, existing) + + sized = np.empty(size) + rg.bit_generator.state = state + rg.standard_normal(out=sized, size=sized.shape) + + existing = np.empty(size, dtype=np.float32) + rg.bit_generator.state = state + rg.standard_normal(out=existing, dtype=np.float32) + rg.bit_generator.state = state + direct = rg.standard_normal(size=size, dtype=np.float32) + assert_equal(direct, existing) + + def test_output_filling_uniform(self): + rg = self._create_rng().rg + state = rg.bit_generator.state + size = (31, 7, 97) + existing = np.empty(size) + rg.bit_generator.state = state + rg.random(out=existing) + rg.bit_generator.state = state + direct = rg.random(size=size) + assert_equal(direct, existing) + + existing = np.empty(size, dtype=np.float32) + rg.bit_generator.state = state + rg.random(out=existing, dtype=np.float32) + rg.bit_generator.state = state + direct = rg.random(size=size, dtype=np.float32) + assert_equal(direct, existing) + + def test_output_filling_exponential(self): + rg = self._create_rng().rg + state = rg.bit_generator.state + size = (31, 7, 97) + existing = np.empty(size) + rg.bit_generator.state = state + rg.standard_exponential(out=existing) + rg.bit_generator.state = state + direct = rg.standard_exponential(size=size) + assert_equal(direct, existing) + + existing = np.empty(size, dtype=np.float32) + rg.bit_generator.state = state + rg.standard_exponential(out=existing, dtype=np.float32) + rg.bit_generator.state = state + direct = rg.standard_exponential(size=size, dtype=np.float32) + assert_equal(direct, existing) + + def test_output_filling_gamma(self): + rg = self._create_rng().rg + state = rg.bit_generator.state + size = (31, 7, 97) + existing = np.zeros(size) + rg.bit_generator.state = state + rg.standard_gamma(1.0, out=existing) + rg.bit_generator.state = state + direct = rg.standard_gamma(1.0, size=size) + assert_equal(direct, existing) + + existing = np.zeros(size, dtype=np.float32) + rg.bit_generator.state = state + rg.standard_gamma(1.0, out=existing, dtype=np.float32) + rg.bit_generator.state = state + direct = rg.standard_gamma(1.0, size=size, dtype=np.float32) + assert_equal(direct, existing) + + def test_output_filling_gamma_broadcast(self): + rg = self._create_rng().rg + state = rg.bit_generator.state + size = (31, 7, 97) + mu = np.arange(97.0) + 1.0 + existing = np.zeros(size) + rg.bit_generator.state = state + rg.standard_gamma(mu, out=existing) + rg.bit_generator.state = state + direct = rg.standard_gamma(mu, size=size) + assert_equal(direct, existing) + + existing = np.zeros(size, dtype=np.float32) + rg.bit_generator.state = state + rg.standard_gamma(mu, out=existing, dtype=np.float32) + rg.bit_generator.state = state + direct = rg.standard_gamma(mu, size=size, dtype=np.float32) + assert_equal(direct, existing) + + def test_output_fill_error(self): + rg = self._create_rng().rg + size = (31, 7, 97) + existing = np.empty(size) + with pytest.raises(TypeError): + rg.standard_normal(out=existing, dtype=np.float32) + with pytest.raises(ValueError): + rg.standard_normal(out=existing[::3]) + existing = np.empty(size, dtype=np.float32) + with pytest.raises(TypeError): + rg.standard_normal(out=existing, dtype=np.float64) + + existing = np.zeros(size, dtype=np.float32) + with pytest.raises(TypeError): + rg.standard_gamma(1.0, out=existing, dtype=np.float64) + with pytest.raises(ValueError): + rg.standard_gamma(1.0, out=existing[::3], dtype=np.float32) + existing = np.zeros(size, dtype=np.float64) + with pytest.raises(TypeError): + rg.standard_gamma(1.0, out=existing, dtype=np.float32) + with pytest.raises(ValueError): + rg.standard_gamma(1.0, out=existing[::3]) + + @pytest.mark.parametrize("dtype", DTYPES_BOOL_INT_UINT) + def test_integers_broadcast(self, dtype): + rg = self._create_rng().rg + initial_state = rg.bit_generator.state + + def reset_state(rng): + rng.bit_generator.state = initial_state + + if dtype == np.bool: + upper = 2 + lower = 0 + else: + info = np.iinfo(dtype) + upper = int(info.max) + 1 + lower = info.min + reset_state(rg) + rg.bit_generator.state = initial_state + a = rg.integers(lower, [upper] * 10, dtype=dtype) + reset_state(rg) + b = rg.integers([lower] * 10, upper, dtype=dtype) + assert_equal(a, b) + reset_state(rg) + c = rg.integers(lower, upper, size=10, dtype=dtype) + assert_equal(a, c) + reset_state(rg) + d = rg.integers(np.array( + [lower] * 10), np.array([upper], dtype=object), size=10, + dtype=dtype) + assert_equal(a, d) + reset_state(rg) + e = rg.integers( + np.array([lower] * 10), np.array([upper] * 10), size=10, + dtype=dtype) + assert_equal(a, e) + + reset_state(rg) + a = rg.integers(0, upper, size=10, dtype=dtype) + reset_state(rg) + b = rg.integers([upper] * 10, dtype=dtype) + assert_equal(a, b) + + @pytest.mark.parametrize("dtype", DTYPES_BOOL_INT_UINT) + def test_integers_numpy(self, dtype): + rg = self._create_rng().rg + high = np.array([1]) + low = np.array([0]) + + out = rg.integers(low, high, dtype=dtype) + assert out.shape == (1,) + + out = rg.integers(low[0], high, dtype=dtype) + assert out.shape == (1,) + + out = rg.integers(low, high[0], dtype=dtype) + assert out.shape == (1,) + + @pytest.mark.parametrize("dtype", DTYPES_BOOL_INT_UINT) + def test_integers_broadcast_errors(self, dtype): + rg = self._create_rng().rg + if dtype == np.bool: + upper = 2 + lower = 0 + else: + info = np.iinfo(dtype) + upper = int(info.max) + 1 + lower = info.min + with pytest.raises(ValueError): + rg.integers(lower, [upper + 1] * 10, dtype=dtype) + with pytest.raises(ValueError): + rg.integers(lower - 1, [upper] * 10, dtype=dtype) + with pytest.raises(ValueError): + rg.integers([lower - 1], [upper] * 10, dtype=dtype) + with pytest.raises(ValueError): + rg.integers([0], [0], dtype=dtype) + + +class TestMT19937(RNG): + @classmethod + def _create_rng(cls): + bit_generator = MT19937 + advance = None + seed = [2 ** 21 + 2 ** 16 + 2 ** 5 + 1] + rg = Generator(bit_generator(*seed)) + seed_vector_bits = 32 + return RNGData(bit_generator, advance, seed, rg, seed_vector_bits) + + def test_numpy_state(self): + rg = self._create_rng().rg + nprg = np.random.RandomState() + nprg.standard_normal(99) + state = nprg.get_state() + rg.bit_generator.state = state + state2 = rg.bit_generator.state + assert_((state[1] == state2['state']['key']).all()) + assert_(state[2] == state2['state']['pos']) + + +class TestPhilox(RNG): + @classmethod + def _create_rng(cls): + bit_generator = Philox + advance = 2**63 + 2**31 + 2**15 + 1 + seed = [12345] + rg = Generator(bit_generator(*seed)) + seed_vector_bits = 64 + return RNGData(bit_generator, advance, seed, rg, seed_vector_bits) + + +class TestSFC64(RNG): + @classmethod + def _create_rng(cls): + bit_generator = SFC64 + advance = None + seed = [12345] + rg = Generator(bit_generator(*seed)) + seed_vector_bits = 192 + return RNGData(bit_generator, advance, seed, rg, seed_vector_bits) + + +class TestPCG64(RNG): + @classmethod + def _create_rng(cls): + bit_generator = PCG64 + advance = 2**63 + 2**31 + 2**15 + 1 + seed = [12345] + rg = Generator(bit_generator(*seed)) + seed_vector_bits = 64 + return RNGData(bit_generator, advance, seed, rg, seed_vector_bits) + + +class TestPCG64DXSM(RNG): + @classmethod + def _create_rng(cls): + bit_generator = PCG64DXSM + advance = 2**63 + 2**31 + 2**15 + 1 + seed = [12345] + rg = Generator(bit_generator(*seed)) + seed_vector_bits = 64 + return RNGData(bit_generator, advance, seed, rg, seed_vector_bits) + + +class TestDefaultRNG(RNG): + @classmethod + def _create_rng(cls): + # This will duplicate some tests that directly instantiate a fresh + # Generator(), but that's okay. + bit_generator = PCG64 + advance = 2**63 + 2**31 + 2**15 + 1 + seed = [12345] + rg = np.random.default_rng(*seed) + seed_vector_bits = 64 + return RNGData(bit_generator, advance, seed, rg, seed_vector_bits) + + def test_default_is_pcg64(self): + # In order to change the default BitGenerator, we'll go through + # a deprecation cycle to move to a different function. + rg = self._create_rng().rg + assert_(isinstance(rg.bit_generator, PCG64)) + + def test_seed(self): + np.random.default_rng() + np.random.default_rng(None) + np.random.default_rng(12345) + np.random.default_rng(0) + np.random.default_rng(43660444402423911716352051725018508569) + np.random.default_rng([43660444402423911716352051725018508569, + 279705150948142787361475340226491943209]) + with pytest.raises(ValueError): + np.random.default_rng(-1) + with pytest.raises(ValueError): + np.random.default_rng([12345, -1]) diff --git a/local_packages/numpy/rec/__init__.py b/local_packages/numpy/rec/__init__.py new file mode 100644 index 0000000..420240c --- /dev/null +++ b/local_packages/numpy/rec/__init__.py @@ -0,0 +1,2 @@ +from numpy._core.records import * +from numpy._core.records import __all__, __doc__ diff --git a/local_packages/numpy/rec/__init__.pyi b/local_packages/numpy/rec/__init__.pyi new file mode 100644 index 0000000..6a78c66 --- /dev/null +++ b/local_packages/numpy/rec/__init__.pyi @@ -0,0 +1,23 @@ +from numpy._core.records import ( + array, + find_duplicate, + format_parser, + fromarrays, + fromfile, + fromrecords, + fromstring, + recarray, + record, +) + +__all__ = [ + "record", + "recarray", + "format_parser", + "fromarrays", + "fromrecords", + "fromstring", + "fromfile", + "array", + "find_duplicate", +] diff --git a/local_packages/numpy/strings/__init__.py b/local_packages/numpy/strings/__init__.py new file mode 100644 index 0000000..561dadc --- /dev/null +++ b/local_packages/numpy/strings/__init__.py @@ -0,0 +1,2 @@ +from numpy._core.strings import * +from numpy._core.strings import __all__, __doc__ diff --git a/local_packages/numpy/strings/__init__.pyi b/local_packages/numpy/strings/__init__.pyi new file mode 100644 index 0000000..b2fb363 --- /dev/null +++ b/local_packages/numpy/strings/__init__.pyi @@ -0,0 +1,97 @@ +from numpy._core.strings import ( + add, + capitalize, + center, + count, + decode, + encode, + endswith, + equal, + expandtabs, + find, + greater, + greater_equal, + index, + isalnum, + isalpha, + isdecimal, + isdigit, + islower, + isnumeric, + isspace, + istitle, + isupper, + less, + less_equal, + ljust, + lower, + lstrip, + mod, + multiply, + not_equal, + partition, + replace, + rfind, + rindex, + rjust, + rpartition, + rstrip, + slice, + startswith, + str_len, + strip, + swapcase, + title, + translate, + upper, + zfill, +) + +__all__ = [ + "equal", + "not_equal", + "less", + "less_equal", + "greater", + "greater_equal", + "add", + "multiply", + "isalpha", + "isdigit", + "isspace", + "isalnum", + "islower", + "isupper", + "istitle", + "isdecimal", + "isnumeric", + "str_len", + "find", + "rfind", + "index", + "rindex", + "count", + "startswith", + "endswith", + "lstrip", + "rstrip", + "strip", + "replace", + "expandtabs", + "center", + "ljust", + "rjust", + "zfill", + "partition", + "rpartition", + "upper", + "lower", + "swapcase", + "capitalize", + "title", + "mod", + "decode", + "encode", + "translate", + "slice", +] diff --git a/local_packages/numpy/testing/__init__.py b/local_packages/numpy/testing/__init__.py new file mode 100644 index 0000000..fe0c4f2 --- /dev/null +++ b/local_packages/numpy/testing/__init__.py @@ -0,0 +1,22 @@ +"""Common test support for all numpy test scripts. + +This single module should provide all the common functionality for numpy tests +in a single location, so that test scripts can just import it and work right +away. + +""" +from unittest import TestCase + +from . import _private, overrides +from ._private import extbuild +from ._private.utils import * +from ._private.utils import _assert_valid_refcount, _gen_alignment_data + +__all__ = ( + _private.utils.__all__ + ['TestCase', 'overrides'] +) + +from numpy._pytesttester import PytestTester + +test = PytestTester(__name__) +del PytestTester diff --git a/local_packages/numpy/testing/__init__.pyi b/local_packages/numpy/testing/__init__.pyi new file mode 100644 index 0000000..684a7c3 --- /dev/null +++ b/local_packages/numpy/testing/__init__.pyi @@ -0,0 +1,107 @@ +from unittest import TestCase + +from . import _private as _private, overrides +from ._private import extbuild as extbuild +from ._private.utils import ( + BLAS_SUPPORTS_FPE, + HAS_LAPACK64, + HAS_REFCOUNT, + IS_64BIT, + IS_EDITABLE, + IS_INSTALLED, + IS_MUSL, + IS_PYPY, + IS_PYSTON, + IS_WASM, + NOGIL_BUILD, + NUMPY_ROOT, + IgnoreException, + KnownFailureException, + SkipTest, + assert_, + assert_allclose, + assert_almost_equal, + assert_approx_equal, + assert_array_almost_equal, + assert_array_almost_equal_nulp, + assert_array_compare, + assert_array_equal, + assert_array_less, + assert_array_max_ulp, + assert_equal, + assert_no_gc_cycles, + assert_no_warnings, + assert_raises, + assert_raises_regex, + assert_string_equal, + assert_warns, + break_cycles, + build_err_msg, + check_support_sve, + clear_and_catch_warnings, + decorate_methods, + jiffies, + measure, + memusage, + print_assert_equal, + run_threaded, + rundocs, + runstring, + suppress_warnings, + tempdir, + temppath, + verbose, +) + +__all__ = [ + "BLAS_SUPPORTS_FPE", + "HAS_LAPACK64", + "HAS_REFCOUNT", + "IS_64BIT", + "IS_EDITABLE", + "IS_INSTALLED", + "IS_MUSL", + "IS_PYPY", + "IS_PYSTON", + "IS_WASM", + "NOGIL_BUILD", + "NUMPY_ROOT", + "IgnoreException", + "KnownFailureException", + "SkipTest", + "TestCase", + "assert_", + "assert_allclose", + "assert_almost_equal", + "assert_approx_equal", + "assert_array_almost_equal", + "assert_array_almost_equal_nulp", + "assert_array_compare", + "assert_array_equal", + "assert_array_less", + "assert_array_max_ulp", + "assert_equal", + "assert_no_gc_cycles", + "assert_no_warnings", + "assert_raises", + "assert_raises_regex", + "assert_string_equal", + "assert_warns", + "break_cycles", + "build_err_msg", + "check_support_sve", + "clear_and_catch_warnings", + "decorate_methods", + "jiffies", + "measure", + "memusage", + "overrides", + "print_assert_equal", + "run_threaded", + "rundocs", + "runstring", + "suppress_warnings", + "tempdir", + "temppath", + "verbose", +] diff --git a/local_packages/numpy/testing/_private/__init__.py b/local_packages/numpy/testing/_private/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/local_packages/numpy/testing/_private/__init__.pyi b/local_packages/numpy/testing/_private/__init__.pyi new file mode 100644 index 0000000..e69de29 diff --git a/local_packages/numpy/testing/_private/extbuild.py b/local_packages/numpy/testing/_private/extbuild.py new file mode 100644 index 0000000..2a724b7 --- /dev/null +++ b/local_packages/numpy/testing/_private/extbuild.py @@ -0,0 +1,250 @@ +""" +Build a c-extension module on-the-fly in tests. +See build_and_import_extensions for usage hints + +""" + +import os +import pathlib +import subprocess +import sys +import sysconfig +import textwrap + +__all__ = ['build_and_import_extension', 'compile_extension_module'] + + +def build_and_import_extension( + modname, functions, *, prologue="", build_dir=None, + include_dirs=None, more_init=""): + """ + Build and imports a c-extension module `modname` from a list of function + fragments `functions`. + + + Parameters + ---------- + functions : list of fragments + Each fragment is a sequence of func_name, calling convention, snippet. + prologue : string + Code to precede the rest, usually extra ``#include`` or ``#define`` + macros. + build_dir : pathlib.Path + Where to build the module, usually a temporary directory + include_dirs : list + Extra directories to find include files when compiling + more_init : string + Code to appear in the module PyMODINIT_FUNC + + Returns + ------- + out: module + The module will have been loaded and is ready for use + + Examples + -------- + >>> functions = [("test_bytes", "METH_O", \"\"\" + if ( !PyBytesCheck(args)) { + Py_RETURN_FALSE; + } + Py_RETURN_TRUE; + \"\"\")] + >>> mod = build_and_import_extension("testme", functions) + >>> assert not mod.test_bytes('abc') + >>> assert mod.test_bytes(b'abc') + """ + if include_dirs is None: + include_dirs = [] + body = prologue + _make_methods(functions, modname) + init = """ + PyObject *mod = PyModule_Create(&moduledef); + #ifdef Py_GIL_DISABLED + PyUnstable_Module_SetGIL(mod, Py_MOD_GIL_NOT_USED); + #endif + """ + if not build_dir: + build_dir = pathlib.Path('.') + if more_init: + init += """#define INITERROR return NULL + """ + init += more_init + init += "\nreturn mod;" + source_string = _make_source(modname, init, body) + mod_so = compile_extension_module( + modname, build_dir, include_dirs, source_string) + import importlib.util + spec = importlib.util.spec_from_file_location(modname, mod_so) + foo = importlib.util.module_from_spec(spec) + spec.loader.exec_module(foo) + return foo + + +def compile_extension_module( + name, builddir, include_dirs, + source_string, libraries=None, library_dirs=None): + """ + Build an extension module and return the filename of the resulting + native code file. + + Parameters + ---------- + name : string + name of the module, possibly including dots if it is a module inside a + package. + builddir : pathlib.Path + Where to build the module, usually a temporary directory + include_dirs : list + Extra directories to find include files when compiling + libraries : list + Libraries to link into the extension module + library_dirs: list + Where to find the libraries, ``-L`` passed to the linker + """ + modname = name.split('.')[-1] + dirname = builddir / name + dirname.mkdir(exist_ok=True) + cfile = _convert_str_to_file(source_string, dirname) + include_dirs = include_dirs or [] + libraries = libraries or [] + library_dirs = library_dirs or [] + + return _c_compile( + cfile, outputfilename=dirname / modname, + include_dirs=include_dirs, libraries=libraries, + library_dirs=library_dirs, + ) + + +def _convert_str_to_file(source, dirname): + """Helper function to create a file ``source.c`` in `dirname` that contains + the string in `source`. Returns the file name + """ + filename = dirname / 'source.c' + with filename.open('w') as f: + f.write(str(source)) + return filename + + +def _make_methods(functions, modname): + """ Turns the name, signature, code in functions into complete functions + and lists them in a methods_table. Then turns the methods_table into a + ``PyMethodDef`` structure and returns the resulting code fragment ready + for compilation + """ + methods_table = [] + codes = [] + for funcname, flags, code in functions: + cfuncname = f"{modname}_{funcname}" + if 'METH_KEYWORDS' in flags: + signature = '(PyObject *self, PyObject *args, PyObject *kwargs)' + else: + signature = '(PyObject *self, PyObject *args)' + methods_table.append( + "{\"%s\", (PyCFunction)%s, %s}," % (funcname, cfuncname, flags)) + func_code = f""" + static PyObject* {cfuncname}{signature} + {{ + {code} + }} + """ + codes.append(func_code) + + body = "\n".join(codes) + """ + static PyMethodDef methods[] = { + %(methods)s + { NULL } + }; + static struct PyModuleDef moduledef = { + PyModuleDef_HEAD_INIT, + "%(modname)s", /* m_name */ + NULL, /* m_doc */ + -1, /* m_size */ + methods, /* m_methods */ + }; + """ % {'methods': '\n'.join(methods_table), 'modname': modname} + return body + + +def _make_source(name, init, body): + """ Combines the code fragments into source code ready to be compiled + """ + code = """ + #include + + %(body)s + + PyMODINIT_FUNC + PyInit_%(name)s(void) { + %(init)s + } + """ % { + 'name': name, 'init': init, 'body': body, + } + return code + + +def _c_compile(cfile, outputfilename, include_dirs, libraries, + library_dirs): + link_extra = [] + if sys.platform == 'win32': + compile_extra = ["/we4013"] + link_extra.append('/DEBUG') # generate .pdb file + elif sys.platform.startswith('linux'): + compile_extra = [ + "-O0", "-g", "-Werror=implicit-function-declaration", "-fPIC"] + else: + compile_extra = [] + + return build( + cfile, outputfilename, + compile_extra, link_extra, + include_dirs, libraries, library_dirs) + + +def build(cfile, outputfilename, compile_extra, link_extra, + include_dirs, libraries, library_dirs): + "use meson to build" + + build_dir = cfile.parent / "build" + os.makedirs(build_dir, exist_ok=True) + with open(cfile.parent / "meson.build", "wt") as fid: + link_dirs = ['-L' + d for d in library_dirs] + fid.write(textwrap.dedent(f"""\ + project('foo', 'c') + py = import('python').find_installation(pure: false) + py.extension_module( + '{outputfilename.parts[-1]}', + '{cfile.parts[-1]}', + c_args: {compile_extra}, + link_args: {link_dirs}, + include_directories: {include_dirs}, + ) + """)) + native_file_name = cfile.parent / ".mesonpy-native-file.ini" + with open(native_file_name, "wt") as fid: + fid.write(textwrap.dedent(f"""\ + [binaries] + python = '{sys.executable}' + """)) + if sys.platform == "win32": + subprocess.check_call(["meson", "setup", + "--buildtype=release", + "--vsenv", ".."], + cwd=build_dir, + ) + else: + subprocess.check_call(["meson", "setup", "--vsenv", + "..", f'--native-file={os.fspath(native_file_name)}'], + cwd=build_dir + ) + + so_name = outputfilename.parts[-1] + get_so_suffix() + subprocess.check_call(["meson", "compile"], cwd=build_dir) + os.rename(str(build_dir / so_name), cfile.parent / so_name) + return cfile.parent / so_name + + +def get_so_suffix(): + ret = sysconfig.get_config_var('EXT_SUFFIX') + assert ret + return ret diff --git a/local_packages/numpy/testing/_private/extbuild.pyi b/local_packages/numpy/testing/_private/extbuild.pyi new file mode 100644 index 0000000..c1ae507 --- /dev/null +++ b/local_packages/numpy/testing/_private/extbuild.pyi @@ -0,0 +1,25 @@ +import pathlib +import types +from collections.abc import Sequence + +__all__ = ["build_and_import_extension", "compile_extension_module"] + +def build_and_import_extension( + modname: str, + functions: Sequence[tuple[str, str, str]], + *, + prologue: str = "", + build_dir: pathlib.Path | None = None, + include_dirs: Sequence[str] | None = None, + more_init: str = "", +) -> types.ModuleType: ... + +# +def compile_extension_module( + name: str, + builddir: pathlib.Path, + include_dirs: Sequence[str], + source_string: str, + libraries: Sequence[str] | None = None, + library_dirs: Sequence[str] | None = None, +) -> pathlib.Path: ... diff --git a/local_packages/numpy/testing/_private/utils.py b/local_packages/numpy/testing/_private/utils.py new file mode 100644 index 0000000..e1a687b --- /dev/null +++ b/local_packages/numpy/testing/_private/utils.py @@ -0,0 +1,2830 @@ +""" +Utility function to facilitate testing. + +""" +import concurrent.futures +import contextlib +import gc +import importlib.metadata +import operator +import os +import pathlib +import platform +import pprint +import re +import shutil +import sys +import sysconfig +import threading +import warnings +from functools import partial, wraps +from io import StringIO +from tempfile import mkdtemp, mkstemp +from unittest.case import SkipTest +from warnings import WarningMessage + +import numpy as np +import numpy.linalg._umath_linalg +from numpy import isfinite, isnan +from numpy._core import arange, array, array_repr, empty, float32, intp, isnat, ndarray + +__all__ = [ + 'assert_equal', 'assert_almost_equal', 'assert_approx_equal', + 'assert_array_equal', 'assert_array_less', 'assert_string_equal', + 'assert_array_almost_equal', 'assert_raises', 'build_err_msg', + 'decorate_methods', 'jiffies', 'memusage', 'print_assert_equal', + 'rundocs', 'runstring', 'verbose', 'measure', + 'assert_', 'assert_array_almost_equal_nulp', 'assert_raises_regex', + 'assert_array_max_ulp', 'assert_warns', 'assert_no_warnings', + 'assert_allclose', 'IgnoreException', 'clear_and_catch_warnings', + 'SkipTest', 'KnownFailureException', 'temppath', 'tempdir', 'IS_PYPY', + 'HAS_REFCOUNT', "IS_WASM", 'suppress_warnings', 'assert_array_compare', + 'assert_no_gc_cycles', 'break_cycles', 'HAS_LAPACK64', 'IS_PYSTON', + 'IS_MUSL', 'check_support_sve', 'NOGIL_BUILD', + 'IS_EDITABLE', 'IS_INSTALLED', 'NUMPY_ROOT', 'run_threaded', 'IS_64BIT', + 'BLAS_SUPPORTS_FPE', + ] + + +class KnownFailureException(Exception): + '''Raise this exception to mark a test as a known failing test.''' + pass + + +KnownFailureTest = KnownFailureException # backwards compat +verbose = 0 + +NUMPY_ROOT = pathlib.Path(np.__file__).parent + +try: + np_dist = importlib.metadata.distribution('numpy') +except importlib.metadata.PackageNotFoundError: + IS_INSTALLED = IS_EDITABLE = False +else: + IS_INSTALLED = True + try: + if sys.version_info >= (3, 13): + IS_EDITABLE = np_dist.origin.dir_info.editable + else: + # Backport importlib.metadata.Distribution.origin + import json # noqa: E401 + import types + origin = json.loads( + np_dist.read_text('direct_url.json') or '{}', + object_hook=lambda data: types.SimpleNamespace(**data), + ) + IS_EDITABLE = origin.dir_info.editable + except AttributeError: + IS_EDITABLE = False + + # spin installs numpy directly via meson, instead of using meson-python, and + # runs the module by setting PYTHONPATH. This is problematic because the + # resulting installation lacks the Python metadata (.dist-info), and numpy + # might already be installed on the environment, causing us to find its + # metadata, even though we are not actually loading that package. + # Work around this issue by checking if the numpy root matches. + if not IS_EDITABLE and np_dist.locate_file('numpy') != NUMPY_ROOT: + IS_INSTALLED = False + +IS_WASM = platform.machine() in ["wasm32", "wasm64"] +IS_PYPY = sys.implementation.name == 'pypy' +IS_PYSTON = hasattr(sys, "pyston_version_info") +HAS_REFCOUNT = getattr(sys, 'getrefcount', None) is not None and not IS_PYSTON +BLAS_SUPPORTS_FPE = np._core._multiarray_umath._blas_supports_fpe(None) + +HAS_LAPACK64 = numpy.linalg._umath_linalg._ilp64 + +IS_MUSL = False +# alternate way is +# from packaging.tags import sys_tags +# _tags = list(sys_tags()) +# if 'musllinux' in _tags[0].platform: +_v = sysconfig.get_config_var('HOST_GNU_TYPE') or '' +if 'musl' in _v: + IS_MUSL = True + +NOGIL_BUILD = bool(sysconfig.get_config_var("Py_GIL_DISABLED")) +IS_64BIT = np.dtype(np.intp).itemsize == 8 + +def assert_(val, msg=''): + """ + Assert that works in release mode. + Accepts callable msg to allow deferring evaluation until failure. + + The Python built-in ``assert`` does not work when executing code in + optimized mode (the ``-O`` flag) - no byte-code is generated for it. + + For documentation on usage, refer to the Python documentation. + + """ + __tracebackhide__ = True # Hide traceback for py.test + if not val: + try: + smsg = msg() + except TypeError: + smsg = msg + raise AssertionError(smsg) + + +if os.name == 'nt': + # Code "stolen" from enthought/debug/memusage.py + def GetPerformanceAttributes(object, counter, instance=None, + inum=-1, format=None, machine=None): + # NOTE: Many counters require 2 samples to give accurate results, + # including "% Processor Time" (as by definition, at any instant, a + # thread's CPU usage is either 0 or 100). To read counters like this, + # you should copy this function, but keep the counter open, and call + # CollectQueryData() each time you need to know. + # See http://msdn.microsoft.com/library/en-us/dnperfmo/html/perfmonpt2.asp + # (dead link) + # My older explanation for this was that the "AddCounter" process + # forced the CPU to 100%, but the above makes more sense :) + import win32pdh + if format is None: + format = win32pdh.PDH_FMT_LONG + path = win32pdh.MakeCounterPath((machine, object, instance, None, + inum, counter)) + hq = win32pdh.OpenQuery() + try: + hc = win32pdh.AddCounter(hq, path) + try: + win32pdh.CollectQueryData(hq) + type, val = win32pdh.GetFormattedCounterValue(hc, format) + return val + finally: + win32pdh.RemoveCounter(hc) + finally: + win32pdh.CloseQuery(hq) + + def memusage(processName="python", instance=0): + # from win32pdhutil, part of the win32all package + import win32pdh + return GetPerformanceAttributes("Process", "Virtual Bytes", + processName, instance, + win32pdh.PDH_FMT_LONG, None) +elif sys.platform[:5] == 'linux': + + def memusage(_proc_pid_stat=None): + """ + Return virtual memory size in bytes of the running python. + + """ + _proc_pid_stat = _proc_pid_stat or f'/proc/{os.getpid()}/stat' + try: + with open(_proc_pid_stat) as f: + l = f.readline().split(' ') + return int(l[22]) + except Exception: + return +else: + def memusage(): + """ + Return memory usage of running python. [Not implemented] + + """ + raise NotImplementedError + + +if sys.platform[:5] == 'linux': + def jiffies(_proc_pid_stat=None, _load_time=None): + """ + Return number of jiffies elapsed. + + Return number of jiffies (1/100ths of a second) that this + process has been scheduled in user mode. See man 5 proc. + + """ + _proc_pid_stat = _proc_pid_stat or f'/proc/{os.getpid()}/stat' + _load_time = _load_time or [] + import time + if not _load_time: + _load_time.append(time.time()) + try: + with open(_proc_pid_stat) as f: + l = f.readline().split(' ') + return int(l[13]) + except Exception: + return int(100 * (time.time() - _load_time[0])) +else: + # os.getpid is not in all platforms available. + # Using time is safe but inaccurate, especially when process + # was suspended or sleeping. + def jiffies(_load_time=[]): + """ + Return number of jiffies elapsed. + + Return number of jiffies (1/100ths of a second) that this + process has been scheduled in user mode. See man 5 proc. + + """ + import time + if not _load_time: + _load_time.append(time.time()) + return int(100 * (time.time() - _load_time[0])) + + +def build_err_msg(arrays, err_msg, header='Items are not equal:', + verbose=True, names=('ACTUAL', 'DESIRED'), precision=8): + msg = ['\n' + header] + err_msg = str(err_msg) + if err_msg: + if err_msg.find('\n') == -1 and len(err_msg) < 79 - len(header): + msg = [msg[0] + ' ' + err_msg] + else: + msg.append(err_msg) + if verbose: + for i, a in enumerate(arrays): + + if isinstance(a, ndarray): + # precision argument is only needed if the objects are ndarrays + r_func = partial(array_repr, precision=precision) + else: + r_func = repr + + try: + r = r_func(a) + except Exception as exc: + r = f'[repr failed for <{type(a).__name__}>: {exc}]' + if r.count('\n') > 3: + r = '\n'.join(r.splitlines()[:3]) + r += '...' + msg.append(f' {names[i]}: {r}') + return '\n'.join(msg) + + +def assert_equal(actual, desired, err_msg='', verbose=True, *, strict=False): + """ + Raises an AssertionError if two objects are not equal. + + Given two objects (scalars, lists, tuples, dictionaries or numpy arrays), + check that all elements of these objects are equal. An exception is raised + at the first conflicting values. + + This function handles NaN comparisons as if NaN was a "normal" number. + That is, AssertionError is not raised if both objects have NaNs in the same + positions. This is in contrast to the IEEE standard on NaNs, which says + that NaN compared to anything must return False. + + Parameters + ---------- + actual : array_like + The object to check. + desired : array_like + The expected object. + err_msg : str, optional + The error message to be printed in case of failure. + verbose : bool, optional + If True, the conflicting values are appended to the error message. + strict : bool, optional + If True and either of the `actual` and `desired` arguments is an array, + raise an ``AssertionError`` when either the shape or the data type of + the arguments does not match. If neither argument is an array, this + parameter has no effect. + + .. versionadded:: 2.0.0 + + Raises + ------ + AssertionError + If actual and desired are not equal. + + See Also + -------- + assert_allclose + assert_array_almost_equal_nulp, + assert_array_max_ulp, + + Notes + ----- + When one of `actual` and `desired` is a scalar and the other is array_like, the + function checks that each element of the array_like is equal to the scalar. + Note that empty arrays are therefore considered equal to scalars. + This behaviour can be disabled by setting ``strict==True``. + + Examples + -------- + >>> np.testing.assert_equal([4, 5], [4, 6]) + Traceback (most recent call last): + ... + AssertionError: + Items are not equal: + item=1 + ACTUAL: 5 + DESIRED: 6 + + The following comparison does not raise an exception. There are NaNs + in the inputs, but they are in the same positions. + + >>> np.testing.assert_equal(np.array([1.0, 2.0, np.nan]), [1, 2, np.nan]) + + As mentioned in the Notes section, `assert_equal` has special + handling for scalars when one of the arguments is an array. + Here, the test checks that each value in `x` is 3: + + >>> x = np.full((2, 5), fill_value=3) + >>> np.testing.assert_equal(x, 3) + + Use `strict` to raise an AssertionError when comparing a scalar with an + array of a different shape: + + >>> np.testing.assert_equal(x, 3, strict=True) + Traceback (most recent call last): + ... + AssertionError: + Arrays are not equal + + (shapes (2, 5), () mismatch) + ACTUAL: array([[3, 3, 3, 3, 3], + [3, 3, 3, 3, 3]]) + DESIRED: array(3) + + The `strict` parameter also ensures that the array data types match: + + >>> x = np.array([2, 2, 2]) + >>> y = np.array([2., 2., 2.], dtype=np.float32) + >>> np.testing.assert_equal(x, y, strict=True) + Traceback (most recent call last): + ... + AssertionError: + Arrays are not equal + + (dtypes int64, float32 mismatch) + ACTUAL: array([2, 2, 2]) + DESIRED: array([2., 2., 2.], dtype=float32) + """ + __tracebackhide__ = True # Hide traceback for py.test + if isinstance(desired, dict): + if not isinstance(actual, dict): + raise AssertionError(repr(type(actual))) + assert_equal(len(actual), len(desired), err_msg, verbose) + for k in desired: + if k not in actual: + raise AssertionError(repr(k)) + assert_equal(actual[k], desired[k], f'key={k!r}\n{err_msg}', + verbose) + return + if isinstance(desired, (list, tuple)) and isinstance(actual, (list, tuple)): + assert_equal(len(actual), len(desired), err_msg, verbose) + for k in range(len(desired)): + assert_equal(actual[k], desired[k], f'item={k!r}\n{err_msg}', + verbose) + return + from numpy import imag, iscomplexobj, real + from numpy._core import isscalar, ndarray, signbit + if isinstance(actual, ndarray) or isinstance(desired, ndarray): + return assert_array_equal(actual, desired, err_msg, verbose, + strict=strict) + msg = build_err_msg([actual, desired], err_msg, verbose=verbose) + + # Handle complex numbers: separate into real/imag to handle + # nan/inf/negative zero correctly + # XXX: catch ValueError for subclasses of ndarray where iscomplex fail + try: + usecomplex = iscomplexobj(actual) or iscomplexobj(desired) + except (ValueError, TypeError): + usecomplex = False + + if usecomplex: + if iscomplexobj(actual): + actualr = real(actual) + actuali = imag(actual) + else: + actualr = actual + actuali = 0 + if iscomplexobj(desired): + desiredr = real(desired) + desiredi = imag(desired) + else: + desiredr = desired + desiredi = 0 + try: + assert_equal(actualr, desiredr) + assert_equal(actuali, desiredi) + except AssertionError: + raise AssertionError(msg) + + # isscalar test to check cases such as [np.nan] != np.nan + if isscalar(desired) != isscalar(actual): + raise AssertionError(msg) + + try: + isdesnat = isnat(desired) + isactnat = isnat(actual) + dtypes_match = (np.asarray(desired).dtype.type == + np.asarray(actual).dtype.type) + if isdesnat and isactnat: + # If both are NaT (and have the same dtype -- datetime or + # timedelta) they are considered equal. + if dtypes_match: + return + else: + raise AssertionError(msg) + + except (TypeError, ValueError, NotImplementedError): + pass + + # Inf/nan/negative zero handling + try: + isdesnan = isnan(desired) + isactnan = isnan(actual) + if isdesnan and isactnan: + return # both nan, so equal + + # handle signed zero specially for floats + array_actual = np.asarray(actual) + array_desired = np.asarray(desired) + if (array_actual.dtype.char in 'Mm' or + array_desired.dtype.char in 'Mm'): + # version 1.18 + # until this version, isnan failed for datetime64 and timedelta64. + # Now it succeeds but comparison to scalar with a different type + # emits a DeprecationWarning. + # Avoid that by skipping the next check + raise NotImplementedError('cannot compare to a scalar ' + 'with a different type') + + if desired == 0 and actual == 0: + if not signbit(desired) == signbit(actual): + raise AssertionError(msg) + + except (TypeError, ValueError, NotImplementedError): + pass + + try: + # Explicitly use __eq__ for comparison, gh-2552 + if not (desired == actual): + raise AssertionError(msg) + + except (DeprecationWarning, FutureWarning) as e: + # this handles the case when the two types are not even comparable + if 'elementwise == comparison' in e.args[0]: + raise AssertionError(msg) + else: + raise + + +def print_assert_equal(test_string, actual, desired): + """ + Test if two objects are equal, and print an error message if test fails. + + The test is performed with ``actual == desired``. + + Parameters + ---------- + test_string : str + The message supplied to AssertionError. + actual : object + The object to test for equality against `desired`. + desired : object + The expected result. + + Examples + -------- + >>> np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 1]) + >>> np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 2]) + Traceback (most recent call last): + ... + AssertionError: Test XYZ of func xyz failed + ACTUAL: + [0, 1] + DESIRED: + [0, 2] + + """ + __tracebackhide__ = True # Hide traceback for py.test + import pprint + + if not (actual == desired): + msg = StringIO() + msg.write(test_string) + msg.write(' failed\nACTUAL: \n') + pprint.pprint(actual, msg) + msg.write('DESIRED: \n') + pprint.pprint(desired, msg) + raise AssertionError(msg.getvalue()) + + +def assert_almost_equal(actual, desired, decimal=7, err_msg='', verbose=True): + """ + Raises an AssertionError if two items are not equal up to desired + precision. + + .. note:: It is recommended to use one of `assert_allclose`, + `assert_array_almost_equal_nulp` or `assert_array_max_ulp` + instead of this function for more consistent floating point + comparisons. + + The test verifies that the elements of `actual` and `desired` satisfy:: + + abs(desired-actual) < float64(1.5 * 10**(-decimal)) + + That is a looser test than originally documented, but agrees with what the + actual implementation in `assert_array_almost_equal` did up to rounding + vagaries. An exception is raised at conflicting values. For ndarrays this + delegates to assert_array_almost_equal + + Parameters + ---------- + actual : array_like + The object to check. + desired : array_like + The expected object. + decimal : int, optional + Desired precision, default is 7. + err_msg : str, optional + The error message to be printed in case of failure. + verbose : bool, optional + If True, the conflicting values are appended to the error message. + + Raises + ------ + AssertionError + If actual and desired are not equal up to specified precision. + + See Also + -------- + assert_allclose: Compare two array_like objects for equality with desired + relative and/or absolute precision. + assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal + + Examples + -------- + >>> from numpy.testing import assert_almost_equal + >>> assert_almost_equal(2.3333333333333, 2.33333334) + >>> assert_almost_equal(2.3333333333333, 2.33333334, decimal=10) + Traceback (most recent call last): + ... + AssertionError: + Arrays are not almost equal to 10 decimals + ACTUAL: 2.3333333333333 + DESIRED: 2.33333334 + + >>> assert_almost_equal(np.array([1.0,2.3333333333333]), + ... np.array([1.0,2.33333334]), decimal=9) + Traceback (most recent call last): + ... + AssertionError: + Arrays are not almost equal to 9 decimals + + Mismatched elements: 1 / 2 (50%) + Mismatch at index: + [1]: 2.3333333333333 (ACTUAL), 2.33333334 (DESIRED) + Max absolute difference among violations: 6.66669964e-09 + Max relative difference among violations: 2.85715698e-09 + ACTUAL: array([1. , 2.333333333]) + DESIRED: array([1. , 2.33333334]) + + """ + __tracebackhide__ = True # Hide traceback for py.test + from numpy import imag, iscomplexobj, real + from numpy._core import ndarray + + # Handle complex numbers: separate into real/imag to handle + # nan/inf/negative zero correctly + # XXX: catch ValueError for subclasses of ndarray where iscomplex fail + try: + usecomplex = iscomplexobj(actual) or iscomplexobj(desired) + except ValueError: + usecomplex = False + + def _build_err_msg(): + header = ('Arrays are not almost equal to %d decimals' % decimal) + return build_err_msg([actual, desired], err_msg, verbose=verbose, + header=header) + + if usecomplex: + if iscomplexobj(actual): + actualr = real(actual) + actuali = imag(actual) + else: + actualr = actual + actuali = 0 + if iscomplexobj(desired): + desiredr = real(desired) + desiredi = imag(desired) + else: + desiredr = desired + desiredi = 0 + try: + assert_almost_equal(actualr, desiredr, decimal=decimal) + assert_almost_equal(actuali, desiredi, decimal=decimal) + except AssertionError: + raise AssertionError(_build_err_msg()) + + if isinstance(actual, (ndarray, tuple, list)) \ + or isinstance(desired, (ndarray, tuple, list)): + return assert_array_almost_equal(actual, desired, decimal, err_msg) + try: + # If one of desired/actual is not finite, handle it specially here: + # check that both are nan if any is a nan, and test for equality + # otherwise + if not (isfinite(desired) and isfinite(actual)): + if isnan(desired) or isnan(actual): + if not (isnan(desired) and isnan(actual)): + raise AssertionError(_build_err_msg()) + elif not desired == actual: + raise AssertionError(_build_err_msg()) + return + except (NotImplementedError, TypeError): + pass + if abs(desired - actual) >= np.float64(1.5 * 10.0**(-decimal)): + raise AssertionError(_build_err_msg()) + + +def assert_approx_equal(actual, desired, significant=7, err_msg='', + verbose=True): + """ + Raises an AssertionError if two items are not equal up to significant + digits. + + .. note:: It is recommended to use one of `assert_allclose`, + `assert_array_almost_equal_nulp` or `assert_array_max_ulp` + instead of this function for more consistent floating point + comparisons. + + Given two numbers, check that they are approximately equal. + Approximately equal is defined as the number of significant digits + that agree. + + Parameters + ---------- + actual : scalar + The object to check. + desired : scalar + The expected object. + significant : int, optional + Desired precision, default is 7. + err_msg : str, optional + The error message to be printed in case of failure. + verbose : bool, optional + If True, the conflicting values are appended to the error message. + + Raises + ------ + AssertionError + If actual and desired are not equal up to specified precision. + + See Also + -------- + assert_allclose: Compare two array_like objects for equality with desired + relative and/or absolute precision. + assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal + + Examples + -------- + >>> np.testing.assert_approx_equal(0.12345677777777e-20, 0.1234567e-20) + >>> np.testing.assert_approx_equal(0.12345670e-20, 0.12345671e-20, + ... significant=8) + >>> np.testing.assert_approx_equal(0.12345670e-20, 0.12345672e-20, + ... significant=8) + Traceback (most recent call last): + ... + AssertionError: + Items are not equal to 8 significant digits: + ACTUAL: 1.234567e-21 + DESIRED: 1.2345672e-21 + + the evaluated condition that raises the exception is + + >>> abs(0.12345670e-20/1e-21 - 0.12345672e-20/1e-21) >= 10**-(8-1) + True + + """ + __tracebackhide__ = True # Hide traceback for py.test + import numpy as np + + (actual, desired) = map(float, (actual, desired)) + if desired == actual: + return + # Normalized the numbers to be in range (-10.0,10.0) + # scale = float(pow(10,math.floor(math.log10(0.5*(abs(desired)+abs(actual)))))) + with np.errstate(invalid='ignore'): + scale = 0.5 * (np.abs(desired) + np.abs(actual)) + scale = np.power(10, np.floor(np.log10(scale))) + try: + sc_desired = desired / scale + except ZeroDivisionError: + sc_desired = 0.0 + try: + sc_actual = actual / scale + except ZeroDivisionError: + sc_actual = 0.0 + msg = build_err_msg( + [actual, desired], err_msg, + header='Items are not equal to %d significant digits:' % significant, + verbose=verbose) + try: + # If one of desired/actual is not finite, handle it specially here: + # check that both are nan if any is a nan, and test for equality + # otherwise + if not (isfinite(desired) and isfinite(actual)): + if isnan(desired) or isnan(actual): + if not (isnan(desired) and isnan(actual)): + raise AssertionError(msg) + elif not desired == actual: + raise AssertionError(msg) + return + except (TypeError, NotImplementedError): + pass + if np.abs(sc_desired - sc_actual) >= np.power(10., -(significant - 1)): + raise AssertionError(msg) + + +def assert_array_compare(comparison, x, y, err_msg='', verbose=True, header='', + precision=6, equal_nan=True, equal_inf=True, + *, strict=False, names=('ACTUAL', 'DESIRED')): + __tracebackhide__ = True # Hide traceback for py.test + from numpy._core import all, array2string, errstate, inf, isnan, max, object_ + + x = np.asanyarray(x) + y = np.asanyarray(y) + + # original array for output formatting + ox, oy = x, y + + def isnumber(x): + return type(x.dtype)._is_numeric + + def istime(x): + return x.dtype.char in "Mm" + + def isvstring(x): + return x.dtype.char == "T" + + def robust_any_difference(x, y): + # We include work-arounds here to handle three types of slightly + # pathological ndarray subclasses: + # (1) all() on fully masked arrays returns np.ma.masked, so we use != True + # (np.ma.masked != True evaluates as np.ma.masked, which is falsy). + # (2) __eq__ on some ndarray subclasses returns Python booleans + # instead of element-wise comparisons, so we cast to np.bool() in + # that case (or in case __eq__ returns some other value with no + # all() method). + # (3) subclasses with bare-bones __array_function__ implementations may + # not implement np.all(), so favor using the .all() method + # We are not committed to supporting cases (2) and (3), but it's nice to + # support them if possible. + result = x == y + if not hasattr(result, "all") or not callable(result.all): + result = np.bool(result) + return result.all() != True + + def func_assert_same_pos(x, y, func=isnan, hasval='nan'): + """Handling nan/inf. + + Combine results of running func on x and y, checking that they are True + at the same locations. + + """ + __tracebackhide__ = True # Hide traceback for py.test + + x_id = func(x) + y_id = func(y) + if robust_any_difference(x_id, y_id): + msg = build_err_msg( + [x, y], + err_msg + '\n%s location mismatch:' + % (hasval), verbose=verbose, header=header, + names=names, + precision=precision) + raise AssertionError(msg) + # If there is a scalar, then here we know the array has the same + # flag as it everywhere, so we should return the scalar flag. + # np.ma.masked is also handled and converted to np.False_ (even if the other + # array has nans/infs etc.; that's OK given the handling later of fully-masked + # results). + if isinstance(x_id, bool) or x_id.ndim == 0: + return np.bool(x_id) + elif isinstance(y_id, bool) or y_id.ndim == 0: + return np.bool(y_id) + else: + return y_id + + def assert_same_inf_values(x, y, infs_mask): + """ + Verify all inf values match in the two arrays + """ + __tracebackhide__ = True # Hide traceback for py.test + + if not infs_mask.any(): + return + if x.ndim > 0 and y.ndim > 0: + x = x[infs_mask] + y = y[infs_mask] + else: + assert infs_mask.all() + + if robust_any_difference(x, y): + msg = build_err_msg( + [x, y], + err_msg + '\ninf values mismatch:', + verbose=verbose, header=header, + names=names, + precision=precision) + raise AssertionError(msg) + + try: + if strict: + cond = x.shape == y.shape and x.dtype == y.dtype + else: + cond = (x.shape == () or y.shape == ()) or x.shape == y.shape + if not cond: + if x.shape != y.shape: + reason = f'\n(shapes {x.shape}, {y.shape} mismatch)' + else: + reason = f'\n(dtypes {x.dtype}, {y.dtype} mismatch)' + msg = build_err_msg([x, y], + err_msg + + reason, + verbose=verbose, header=header, + names=names, + precision=precision) + raise AssertionError(msg) + + flagged = np.bool(False) + if isnumber(x) and isnumber(y): + if equal_nan: + flagged = func_assert_same_pos(x, y, func=isnan, hasval='nan') + + if equal_inf: + # If equal_nan=True, skip comparing nans below for equality if they are + # also infs (e.g. inf+nanj) since that would always fail. + isinf_func = lambda xy: np.logical_and(np.isinf(xy), np.invert(flagged)) + infs_mask = func_assert_same_pos( + x, y, + func=isinf_func, + hasval='inf') + assert_same_inf_values(x, y, infs_mask) + flagged |= infs_mask + + elif istime(x) and istime(y): + # If one is datetime64 and the other timedelta64 there is no point + if equal_nan and x.dtype.type == y.dtype.type: + flagged = func_assert_same_pos(x, y, func=isnat, hasval="NaT") + + elif isvstring(x) and isvstring(y): + dt = x.dtype + if equal_nan and dt == y.dtype and hasattr(dt, 'na_object'): + is_nan = (isinstance(dt.na_object, float) and + np.isnan(dt.na_object)) + bool_errors = 0 + try: + bool(dt.na_object) + except TypeError: + bool_errors = 1 + if is_nan or bool_errors: + # nan-like NA object + flagged = func_assert_same_pos( + x, y, func=isnan, hasval=x.dtype.na_object) + + if flagged.ndim > 0: + x, y = x[~flagged], y[~flagged] + # Only do the comparison if actual values are left + if x.size == 0: + return + elif flagged: + # no sense doing comparison if everything is flagged. + return + + val = comparison(x, y) + invalids = np.logical_not(val) + + if isinstance(val, bool): + cond = val + reduced = array([val]) + else: + reduced = val.ravel() + cond = reduced.all() + + # The below comparison is a hack to ensure that fully masked + # results, for which val.ravel().all() returns np.ma.masked, + # do not trigger a failure (np.ma.masked != True evaluates as + # np.ma.masked, which is falsy). + if cond != True: + n_mismatch = reduced.size - reduced.sum(dtype=intp) + n_elements = flagged.size if flagged.ndim != 0 else reduced.size + percent_mismatch = 100 * n_mismatch / n_elements + remarks = [f'Mismatched elements: {n_mismatch} / {n_elements} ' + f'({percent_mismatch:.3g}%)'] + if invalids.ndim != 0: + if flagged.ndim > 0: + positions = np.argwhere(np.asarray(~flagged))[invalids] + else: + positions = np.argwhere(np.asarray(invalids)) + s = "\n".join( + [ + f" {p.tolist()}: {ox if ox.ndim == 0 else ox[tuple(p)]} " + f"({names[0]}), {oy if oy.ndim == 0 else oy[tuple(p)]} " + f"({names[1]})" + for p in positions[:5] + ] + ) + if len(positions) == 1: + remarks.append( + f"Mismatch at index:\n{s}" + ) + elif len(positions) <= 5: + remarks.append( + f"Mismatch at indices:\n{s}" + ) + else: + remarks.append( + f"First 5 mismatches are at indices:\n{s}" + ) + + with errstate(all='ignore'): + # ignore errors for non-numeric types + with contextlib.suppress(TypeError): + error = abs(x - y) + if np.issubdtype(x.dtype, np.unsignedinteger): + error2 = abs(y - x) + np.minimum(error, error2, out=error) + + reduced_error = error[invalids] + max_abs_error = max(reduced_error) + if getattr(error, 'dtype', object_) == object_: + remarks.append( + 'Max absolute difference among violations: ' + + str(max_abs_error)) + else: + remarks.append( + 'Max absolute difference among violations: ' + + array2string(max_abs_error)) + + # note: this definition of relative error matches that one + # used by assert_allclose (found in np.isclose) + # Filter values where the divisor would be zero + nonzero = np.bool(y != 0) + nonzero_and_invalid = np.logical_and(invalids, nonzero) + + if all(~nonzero_and_invalid): + max_rel_error = array(inf) + else: + nonzero_invalid_error = error[nonzero_and_invalid] + broadcasted_y = np.broadcast_to(y, error.shape) + nonzero_invalid_y = broadcasted_y[nonzero_and_invalid] + max_rel_error = max(nonzero_invalid_error + / abs(nonzero_invalid_y)) + + if getattr(error, 'dtype', object_) == object_: + remarks.append( + 'Max relative difference among violations: ' + + str(max_rel_error)) + else: + remarks.append( + 'Max relative difference among violations: ' + + array2string(max_rel_error)) + err_msg = str(err_msg) + err_msg += '\n' + '\n'.join(remarks) + msg = build_err_msg([ox, oy], err_msg, + verbose=verbose, header=header, + names=names, + precision=precision) + raise AssertionError(msg) + except ValueError: + import traceback + efmt = traceback.format_exc() + header = f'error during assertion:\n\n{efmt}\n\n{header}' + + msg = build_err_msg([x, y], err_msg, verbose=verbose, header=header, + names=names, precision=precision) + raise ValueError(msg) + + +def assert_array_equal(actual, desired, err_msg='', verbose=True, *, + strict=False): + """ + Raises an AssertionError if two array_like objects are not equal. + + Given two array_like objects, check that the shape is equal and all + elements of these objects are equal (but see the Notes for the special + handling of a scalar). An exception is raised at shape mismatch or + conflicting values. In contrast to the standard usage in numpy, NaNs + are compared like numbers, no assertion is raised if both objects have + NaNs in the same positions. + + The usual caution for verifying equality with floating point numbers is + advised. + + .. note:: When either `actual` or `desired` is already an instance of + `numpy.ndarray` and `desired` is not a ``dict``, the behavior of + ``assert_equal(actual, desired)`` is identical to the behavior of this + function. Otherwise, this function performs `np.asanyarray` on the + inputs before comparison, whereas `assert_equal` defines special + comparison rules for common Python types. For example, only + `assert_equal` can be used to compare nested Python lists. In new code, + consider using only `assert_equal`, explicitly converting either + `actual` or `desired` to arrays if the behavior of `assert_array_equal` + is desired. + + Parameters + ---------- + actual : array_like + The actual object to check. + desired : array_like + The desired, expected object. + err_msg : str, optional + The error message to be printed in case of failure. + verbose : bool, optional + If True, the conflicting values are appended to the error message. + strict : bool, optional + If True, raise an AssertionError when either the shape or the data + type of the array_like objects does not match. The special + handling for scalars mentioned in the Notes section is disabled. + + .. versionadded:: 1.24.0 + + Raises + ------ + AssertionError + If actual and desired objects are not equal. + + See Also + -------- + assert_allclose: Compare two array_like objects for equality with desired + relative and/or absolute precision. + assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal + + Notes + ----- + When one of `actual` and `desired` is a scalar and the other is array_like, the + function checks that each element of the array_like is equal to the scalar. + Note that empty arrays are therefore considered equal to scalars. + This behaviour can be disabled by setting ``strict==True``. + + Examples + -------- + The first assert does not raise an exception: + + >>> np.testing.assert_array_equal([1.0,2.33333,np.nan], + ... [np.exp(0),2.33333, np.nan]) + + Assert fails with numerical imprecision with floats: + + >>> np.testing.assert_array_equal([1.0,np.pi,np.nan], + ... [1, np.sqrt(np.pi)**2, np.nan]) + Traceback (most recent call last): + ... + AssertionError: + Arrays are not equal + + Mismatched elements: 1 / 3 (33.3%) + Mismatch at index: + [1]: 3.141592653589793 (ACTUAL), 3.1415926535897927 (DESIRED) + Max absolute difference among violations: 4.4408921e-16 + Max relative difference among violations: 1.41357986e-16 + ACTUAL: array([1. , 3.141593, nan]) + DESIRED: array([1. , 3.141593, nan]) + + Use `assert_allclose` or one of the nulp (number of floating point values) + functions for these cases instead: + + >>> np.testing.assert_allclose([1.0,np.pi,np.nan], + ... [1, np.sqrt(np.pi)**2, np.nan], + ... rtol=1e-10, atol=0) + + As mentioned in the Notes section, `assert_array_equal` has special + handling for scalars. Here the test checks that each value in `x` is 3: + + >>> x = np.full((2, 5), fill_value=3) + >>> np.testing.assert_array_equal(x, 3) + + Use `strict` to raise an AssertionError when comparing a scalar with an + array: + + >>> np.testing.assert_array_equal(x, 3, strict=True) + Traceback (most recent call last): + ... + AssertionError: + Arrays are not equal + + (shapes (2, 5), () mismatch) + ACTUAL: array([[3, 3, 3, 3, 3], + [3, 3, 3, 3, 3]]) + DESIRED: array(3) + + The `strict` parameter also ensures that the array data types match: + + >>> x = np.array([2, 2, 2]) + >>> y = np.array([2., 2., 2.], dtype=np.float32) + >>> np.testing.assert_array_equal(x, y, strict=True) + Traceback (most recent call last): + ... + AssertionError: + Arrays are not equal + + (dtypes int64, float32 mismatch) + ACTUAL: array([2, 2, 2]) + DESIRED: array([2., 2., 2.], dtype=float32) + """ + __tracebackhide__ = True # Hide traceback for py.test + assert_array_compare(operator.__eq__, actual, desired, err_msg=err_msg, + verbose=verbose, header='Arrays are not equal', + strict=strict) + + +def assert_array_almost_equal(actual, desired, decimal=6, err_msg='', + verbose=True): + """ + Raises an AssertionError if two objects are not equal up to desired + precision. + + .. note:: It is recommended to use one of `assert_allclose`, + `assert_array_almost_equal_nulp` or `assert_array_max_ulp` + instead of this function for more consistent floating point + comparisons. + + The test verifies identical shapes and that the elements of ``actual`` and + ``desired`` satisfy:: + + abs(desired-actual) < 1.5 * 10**(-decimal) + + That is a looser test than originally documented, but agrees with what the + actual implementation did up to rounding vagaries. An exception is raised + at shape mismatch or conflicting values. In contrast to the standard usage + in numpy, NaNs are compared like numbers, no assertion is raised if both + objects have NaNs in the same positions. + + Parameters + ---------- + actual : array_like + The actual object to check. + desired : array_like + The desired, expected object. + decimal : int, optional + Desired precision, default is 6. + err_msg : str, optional + The error message to be printed in case of failure. + verbose : bool, optional + If True, the conflicting values are appended to the error message. + + Raises + ------ + AssertionError + If actual and desired are not equal up to specified precision. + + See Also + -------- + assert_allclose: Compare two array_like objects for equality with desired + relative and/or absolute precision. + assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal + + Examples + -------- + the first assert does not raise an exception + + >>> np.testing.assert_array_almost_equal([1.0,2.333,np.nan], + ... [1.0,2.333,np.nan]) + + >>> np.testing.assert_array_almost_equal([1.0,2.33333,np.nan], + ... [1.0,2.33339,np.nan], decimal=5) + Traceback (most recent call last): + ... + AssertionError: + Arrays are not almost equal to 5 decimals + + Mismatched elements: 1 / 3 (33.3%) + Mismatch at index: + [1]: 2.33333 (ACTUAL), 2.33339 (DESIRED) + Max absolute difference among violations: 6.e-05 + Max relative difference among violations: 2.57136612e-05 + ACTUAL: array([1. , 2.33333, nan]) + DESIRED: array([1. , 2.33339, nan]) + + >>> np.testing.assert_array_almost_equal([1.0,2.33333,np.nan], + ... [1.0,2.33333, 5], decimal=5) + Traceback (most recent call last): + ... + AssertionError: + Arrays are not almost equal to 5 decimals + + nan location mismatch: + ACTUAL: array([1. , 2.33333, nan]) + DESIRED: array([1. , 2.33333, 5. ]) + + """ + __tracebackhide__ = True # Hide traceback for py.test + from numpy._core import number, result_type + from numpy._core.numerictypes import issubdtype + + def compare(x, y): + # make sure y is an inexact type to avoid abs(MIN_INT); will cause + # casting of x later. + dtype = result_type(y, 1.) + y = np.asanyarray(y, dtype) + z = abs(x - y) + + if not issubdtype(z.dtype, number): + z = z.astype(np.float64) # handle object arrays + + return z < 1.5 * 10.0**(-decimal) + + assert_array_compare(compare, actual, desired, err_msg=err_msg, + verbose=verbose, + header=('Arrays are not almost equal to %d decimals' % decimal), + precision=decimal) + + +def assert_array_less(x, y, err_msg='', verbose=True, *, strict=False): + """ + Raises an AssertionError if two array_like objects are not ordered by less + than. + + Given two array_like objects `x` and `y`, check that the shape is equal and + all elements of `x` are strictly less than the corresponding elements of + `y` (but see the Notes for the special handling of a scalar). An exception + is raised at shape mismatch or values that are not correctly ordered. In + contrast to the standard usage in NumPy, no assertion is raised if both + objects have NaNs in the same positions. + + Parameters + ---------- + x : array_like + The smaller object to check. + y : array_like + The larger object to compare. + err_msg : string + The error message to be printed in case of failure. + verbose : bool + If True, the conflicting values are appended to the error message. + strict : bool, optional + If True, raise an AssertionError when either the shape or the data + type of the array_like objects does not match. The special + handling for scalars mentioned in the Notes section is disabled. + + .. versionadded:: 2.0.0 + + Raises + ------ + AssertionError + If x is not strictly smaller than y, element-wise. + + See Also + -------- + assert_array_equal: tests objects for equality + assert_array_almost_equal: test objects for equality up to precision + + Notes + ----- + When one of `x` and `y` is a scalar and the other is array_like, the + function performs the comparison as though the scalar were broadcasted + to the shape of the array. This behaviour can be disabled with the `strict` + parameter. + + Examples + -------- + The following assertion passes because each finite element of `x` is + strictly less than the corresponding element of `y`, and the NaNs are in + corresponding locations. + + >>> x = [1.0, 1.0, np.nan] + >>> y = [1.1, 2.0, np.nan] + >>> np.testing.assert_array_less(x, y) + + The following assertion fails because the zeroth element of `x` is no + longer strictly less than the zeroth element of `y`. + + >>> y[0] = 1 + >>> np.testing.assert_array_less(x, y) + Traceback (most recent call last): + ... + AssertionError: + Arrays are not strictly ordered `x < y` + + Mismatched elements: 1 / 3 (33.3%) + Mismatch at index: + [0]: 1.0 (x), 1.0 (y) + Max absolute difference among violations: 0. + Max relative difference among violations: 0. + x: array([ 1., 1., nan]) + y: array([ 1., 2., nan]) + + Here, `y` is a scalar, so each element of `x` is compared to `y`, and + the assertion passes. + + >>> x = [1.0, 4.0] + >>> y = 5.0 + >>> np.testing.assert_array_less(x, y) + + However, with ``strict=True``, the assertion will fail because the shapes + do not match. + + >>> np.testing.assert_array_less(x, y, strict=True) + Traceback (most recent call last): + ... + AssertionError: + Arrays are not strictly ordered `x < y` + + (shapes (2,), () mismatch) + x: array([1., 4.]) + y: array(5.) + + With ``strict=True``, the assertion also fails if the dtypes of the two + arrays do not match. + + >>> y = [5, 5] + >>> np.testing.assert_array_less(x, y, strict=True) + Traceback (most recent call last): + ... + AssertionError: + Arrays are not strictly ordered `x < y` + + (dtypes float64, int64 mismatch) + x: array([1., 4.]) + y: array([5, 5]) + """ + __tracebackhide__ = True # Hide traceback for py.test + assert_array_compare(operator.__lt__, x, y, err_msg=err_msg, + verbose=verbose, + header='Arrays are not strictly ordered `x < y`', + equal_inf=False, + strict=strict, + names=('x', 'y')) + + +def runstring(astr, dict): + exec(astr, dict) + + +def assert_string_equal(actual, desired): + """ + Test if two strings are equal. + + If the given strings are equal, `assert_string_equal` does nothing. + If they are not equal, an AssertionError is raised, and the diff + between the strings is shown. + + Parameters + ---------- + actual : str + The string to test for equality against the expected string. + desired : str + The expected string. + + Examples + -------- + >>> np.testing.assert_string_equal('abc', 'abc') + >>> np.testing.assert_string_equal('abc', 'abcd') + Traceback (most recent call last): + File "", line 1, in + ... + AssertionError: Differences in strings: + - abc+ abcd? + + + """ + # delay import of difflib to reduce startup time + __tracebackhide__ = True # Hide traceback for py.test + import difflib + + if not isinstance(actual, str): + raise AssertionError(repr(type(actual))) + if not isinstance(desired, str): + raise AssertionError(repr(type(desired))) + if desired == actual: + return + + diff = list(difflib.Differ().compare(actual.splitlines(True), + desired.splitlines(True))) + diff_list = [] + while diff: + d1 = diff.pop(0) + if d1.startswith(' '): + continue + if d1.startswith('- '): + l = [d1] + d2 = diff.pop(0) + if d2.startswith('? '): + l.append(d2) + d2 = diff.pop(0) + if not d2.startswith('+ '): + raise AssertionError(repr(d2)) + l.append(d2) + if diff: + d3 = diff.pop(0) + if d3.startswith('? '): + l.append(d3) + else: + diff.insert(0, d3) + if d2[2:] == d1[2:]: + continue + diff_list.extend(l) + continue + raise AssertionError(repr(d1)) + if not diff_list: + return + msg = f"Differences in strings:\n{''.join(diff_list).rstrip()}" + if actual != desired: + raise AssertionError(msg) + + +def rundocs(filename=None, raise_on_error=True): + """ + Run doctests found in the given file. + + By default `rundocs` raises an AssertionError on failure. + + Parameters + ---------- + filename : str + The path to the file for which the doctests are run. + raise_on_error : bool + Whether to raise an AssertionError when a doctest fails. Default is + True. + + Notes + ----- + The doctests can be run by the user/developer by adding the ``doctests`` + argument to the ``test()`` call. For example, to run all tests (including + doctests) for ``numpy.lib``: + + >>> np.lib.test(doctests=True) # doctest: +SKIP + """ + import doctest + + from numpy.distutils.misc_util import exec_mod_from_location + if filename is None: + f = sys._getframe(1) + filename = f.f_globals['__file__'] + name = os.path.splitext(os.path.basename(filename))[0] + m = exec_mod_from_location(name, filename) + + tests = doctest.DocTestFinder().find(m) + runner = doctest.DocTestRunner(verbose=False) + + msg = [] + if raise_on_error: + out = msg.append + else: + out = None + + for test in tests: + runner.run(test, out=out) + + if runner.failures > 0 and raise_on_error: + raise AssertionError("Some doctests failed:\n%s" % "\n".join(msg)) + + +def check_support_sve(__cache=[]): + """ + gh-22982 + """ + + if __cache: + return __cache[0] + + import subprocess + cmd = 'lscpu' + try: + output = subprocess.run(cmd, capture_output=True, text=True) + result = 'sve' in output.stdout + except (OSError, subprocess.SubprocessError): + result = False + __cache.append(result) + return __cache[0] + + +# +# assert_raises and assert_raises_regex are taken from unittest. +# +import unittest + + +class _Dummy(unittest.TestCase): + def nop(self): + pass + + +_d = _Dummy('nop') + + +def assert_raises(*args, **kwargs): + """ + assert_raises(exception_class, callable, *args, **kwargs) + assert_raises(exception_class) + + Fail unless an exception of class exception_class is thrown + by callable when invoked with arguments args and keyword + arguments kwargs. If a different type of exception is + thrown, it will not be caught, and the test case will be + deemed to have suffered an error, exactly as for an + unexpected exception. + + Alternatively, `assert_raises` can be used as a context manager: + + >>> from numpy.testing import assert_raises + >>> with assert_raises(ZeroDivisionError): + ... 1 / 0 + + is equivalent to + + >>> def div(x, y): + ... return x / y + >>> assert_raises(ZeroDivisionError, div, 1, 0) + + """ + __tracebackhide__ = True # Hide traceback for py.test + return _d.assertRaises(*args, **kwargs) + + +def assert_raises_regex(exception_class, expected_regexp, *args, **kwargs): + """ + assert_raises_regex(exception_class, expected_regexp, callable, *args, + **kwargs) + assert_raises_regex(exception_class, expected_regexp) + + Fail unless an exception of class exception_class and with message that + matches expected_regexp is thrown by callable when invoked with arguments + args and keyword arguments kwargs. + + Alternatively, can be used as a context manager like `assert_raises`. + """ + __tracebackhide__ = True # Hide traceback for py.test + return _d.assertRaisesRegex(exception_class, expected_regexp, *args, **kwargs) + + +def decorate_methods(cls, decorator, testmatch=None): + """ + Apply a decorator to all methods in a class matching a regular expression. + + The given decorator is applied to all public methods of `cls` that are + matched by the regular expression `testmatch` + (``testmatch.search(methodname)``). Methods that are private, i.e. start + with an underscore, are ignored. + + Parameters + ---------- + cls : class + Class whose methods to decorate. + decorator : function + Decorator to apply to methods + testmatch : compiled regexp or str, optional + The regular expression. Default value is None, in which case the + nose default (``re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep)``) + is used. + If `testmatch` is a string, it is compiled to a regular expression + first. + + """ + if testmatch is None: + testmatch = re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep) + else: + testmatch = re.compile(testmatch) + cls_attr = cls.__dict__ + + # delayed import to reduce startup time + from inspect import isfunction + + methods = [_m for _m in cls_attr.values() if isfunction(_m)] + for function in methods: + try: + if hasattr(function, 'compat_func_name'): + funcname = function.compat_func_name + else: + funcname = function.__name__ + except AttributeError: + # not a function + continue + if testmatch.search(funcname) and not funcname.startswith('_'): + setattr(cls, funcname, decorator(function)) + + +def measure(code_str, times=1, label=None): + """ + Return elapsed time for executing code in the namespace of the caller. + + The supplied code string is compiled with the Python builtin ``compile``. + The precision of the timing is 10 milli-seconds. If the code will execute + fast on this timescale, it can be executed many times to get reasonable + timing accuracy. + + Parameters + ---------- + code_str : str + The code to be timed. + times : int, optional + The number of times the code is executed. Default is 1. The code is + only compiled once. + label : str, optional + A label to identify `code_str` with. This is passed into ``compile`` + as the second argument (for run-time error messages). + + Returns + ------- + elapsed : float + Total elapsed time in seconds for executing `code_str` `times` times. + + Examples + -------- + >>> times = 10 + >>> etime = np.testing.measure('for i in range(1000): np.sqrt(i**2)', times=times) + >>> print("Time for a single execution : ", etime / times, "s") # doctest: +SKIP + Time for a single execution : 0.005 s + + """ + frame = sys._getframe(1) + locs, globs = frame.f_locals, frame.f_globals + + code = compile(code_str, f'Test name: {label} ', 'exec') + i = 0 + elapsed = jiffies() + while i < times: + i += 1 + exec(code, globs, locs) + elapsed = jiffies() - elapsed + return 0.01 * elapsed + + +def _assert_valid_refcount(op): + """ + Check that ufuncs don't mishandle refcount of object `1`. + Used in a few regression tests. + """ + if not HAS_REFCOUNT: + return True + + import gc + + import numpy as np + + b = np.arange(100 * 100).reshape(100, 100) + c = b + i = 1 + + gc.disable() + try: + rc = sys.getrefcount(i) + for j in range(15): + d = op(b, c) + assert_(sys.getrefcount(i) >= rc) + finally: + gc.enable() + + +def assert_allclose(actual, desired, rtol=1e-7, atol=0, equal_nan=True, + err_msg='', verbose=True, *, strict=False): + """ + Raises an AssertionError if two objects are not equal up to desired + tolerance. + + Given two array_like objects, check that their shapes and all elements + are equal (but see the Notes for the special handling of a scalar). An + exception is raised if the shapes mismatch or any values conflict. In + contrast to the standard usage in numpy, NaNs are compared like numbers, + no assertion is raised if both objects have NaNs in the same positions. + + The test is equivalent to ``allclose(actual, desired, rtol, atol)``, + except that it is stricter: it doesn't broadcast its operands, and has + tighter default tolerance values. It compares the difference between + `actual` and `desired` to ``atol + rtol * abs(desired)``. + + Parameters + ---------- + actual : array_like + Array obtained. + desired : array_like + Array desired. + rtol : float, optional + Relative tolerance. + atol : float, optional + Absolute tolerance. + equal_nan : bool, optional. + If True, NaNs will compare equal. + err_msg : str, optional + The error message to be printed in case of failure. + verbose : bool, optional + If True, the conflicting values are appended to the error message. + strict : bool, optional + If True, raise an ``AssertionError`` when either the shape or the data + type of the arguments does not match. The special handling of scalars + mentioned in the Notes section is disabled. + + .. versionadded:: 2.0.0 + + Raises + ------ + AssertionError + If actual and desired are not equal up to specified precision. + + See Also + -------- + assert_array_almost_equal_nulp, assert_array_max_ulp + + Notes + ----- + When one of `actual` and `desired` is a scalar and the other is array_like, the + function performs the comparison as if the scalar were broadcasted to the shape + of the array. Note that empty arrays are therefore considered equal to scalars. + This behaviour can be disabled by setting ``strict==True``. + + Examples + -------- + >>> x = [1e-5, 1e-3, 1e-1] + >>> y = np.arccos(np.cos(x)) + >>> np.testing.assert_allclose(x, y, rtol=1e-5, atol=0) + + As mentioned in the Notes section, `assert_allclose` has special + handling for scalars. Here, the test checks that the value of `numpy.sin` + is nearly zero at integer multiples of π. + + >>> x = np.arange(3) * np.pi + >>> np.testing.assert_allclose(np.sin(x), 0, atol=1e-15) + + Use `strict` to raise an ``AssertionError`` when comparing an array + with one or more dimensions against a scalar. + + >>> np.testing.assert_allclose(np.sin(x), 0, atol=1e-15, strict=True) + Traceback (most recent call last): + ... + AssertionError: + Not equal to tolerance rtol=1e-07, atol=1e-15 + + (shapes (3,), () mismatch) + ACTUAL: array([ 0.000000e+00, 1.224647e-16, -2.449294e-16]) + DESIRED: array(0) + + The `strict` parameter also ensures that the array data types match: + + >>> y = np.zeros(3, dtype=np.float32) + >>> np.testing.assert_allclose(np.sin(x), y, atol=1e-15, strict=True) + Traceback (most recent call last): + ... + AssertionError: + Not equal to tolerance rtol=1e-07, atol=1e-15 + + (dtypes float64, float32 mismatch) + ACTUAL: array([ 0.000000e+00, 1.224647e-16, -2.449294e-16]) + DESIRED: array([0., 0., 0.], dtype=float32) + + """ + __tracebackhide__ = True # Hide traceback for py.test + import numpy as np + + def compare(x, y): + return np._core.numeric.isclose(x, y, rtol=rtol, atol=atol, + equal_nan=equal_nan) + + actual, desired = np.asanyarray(actual), np.asanyarray(desired) + header = f'Not equal to tolerance rtol={rtol:g}, atol={atol:g}' + assert_array_compare(compare, actual, desired, err_msg=str(err_msg), + verbose=verbose, header=header, equal_nan=equal_nan, + strict=strict) + + +def assert_array_almost_equal_nulp(x, y, nulp=1): + """ + Compare two arrays relatively to their spacing. + + This is a relatively robust method to compare two arrays whose amplitude + is variable. + + Parameters + ---------- + x, y : array_like + Input arrays. + nulp : int, optional + The maximum number of unit in the last place for tolerance (see Notes). + Default is 1. + + Returns + ------- + None + + Raises + ------ + AssertionError + If the spacing between `x` and `y` for one or more elements is larger + than `nulp`. + + See Also + -------- + assert_array_max_ulp : Check that all items of arrays differ in at most + N Units in the Last Place. + spacing : Return the distance between x and the nearest adjacent number. + + Notes + ----- + An assertion is raised if the following condition is not met:: + + abs(x - y) <= nulp * spacing(maximum(abs(x), abs(y))) + + Examples + -------- + >>> x = np.array([1., 1e-10, 1e-20]) + >>> eps = np.finfo(x.dtype).eps + >>> np.testing.assert_array_almost_equal_nulp(x, x*eps/2 + x) + + >>> np.testing.assert_array_almost_equal_nulp(x, x*eps + x) + Traceback (most recent call last): + ... + AssertionError: Arrays are not equal to 1 ULP (max is 2) + + """ + __tracebackhide__ = True # Hide traceback for py.test + import numpy as np + ax = np.abs(x) + ay = np.abs(y) + ref = nulp * np.spacing(np.where(ax > ay, ax, ay)) + if not np.all(np.abs(x - y) <= ref): + if np.iscomplexobj(x) or np.iscomplexobj(y): + msg = f"Arrays are not equal to {nulp} ULP" + else: + max_nulp = np.max(nulp_diff(x, y)) + msg = f"Arrays are not equal to {nulp} ULP (max is {max_nulp:g})" + raise AssertionError(msg) + + +def assert_array_max_ulp(a, b, maxulp=1, dtype=None): + """ + Check that all items of arrays differ in at most N Units in the Last Place. + + Parameters + ---------- + a, b : array_like + Input arrays to be compared. + maxulp : int, optional + The maximum number of units in the last place that elements of `a` and + `b` can differ. Default is 1. + dtype : dtype, optional + Data-type to convert `a` and `b` to if given. Default is None. + + Returns + ------- + ret : ndarray + Array containing number of representable floating point numbers between + items in `a` and `b`. + + Raises + ------ + AssertionError + If one or more elements differ by more than `maxulp`. + + Notes + ----- + For computing the ULP difference, this API does not differentiate between + various representations of NAN (ULP difference between 0x7fc00000 and 0xffc00000 + is zero). + + See Also + -------- + assert_array_almost_equal_nulp : Compare two arrays relatively to their + spacing. + + Examples + -------- + >>> a = np.linspace(0., 1., 100) + >>> res = np.testing.assert_array_max_ulp(a, np.arcsin(np.sin(a))) + + """ + __tracebackhide__ = True # Hide traceback for py.test + import numpy as np + ret = nulp_diff(a, b, dtype) + if not np.all(ret <= maxulp): + raise AssertionError("Arrays are not almost equal up to %g " + "ULP (max difference is %g ULP)" % + (maxulp, np.max(ret))) + return ret + + +def nulp_diff(x, y, dtype=None): + """For each item in x and y, return the number of representable floating + points between them. + + Parameters + ---------- + x : array_like + first input array + y : array_like + second input array + dtype : dtype, optional + Data-type to convert `x` and `y` to if given. Default is None. + + Returns + ------- + nulp : array_like + number of representable floating point numbers between each item in x + and y. + + Notes + ----- + For computing the ULP difference, this API does not differentiate between + various representations of NAN (ULP difference between 0x7fc00000 and 0xffc00000 + is zero). + + Examples + -------- + # By definition, epsilon is the smallest number such as 1 + eps != 1, so + # there should be exactly one ULP between 1 and 1 + eps + >>> nulp_diff(1, 1 + np.finfo(x.dtype).eps) + 1.0 + """ + import numpy as np + if dtype: + x = np.asarray(x, dtype=dtype) + y = np.asarray(y, dtype=dtype) + else: + x = np.asarray(x) + y = np.asarray(y) + + t = np.common_type(x, y) + if np.iscomplexobj(x) or np.iscomplexobj(y): + raise NotImplementedError("_nulp not implemented for complex array") + + x = np.array([x], dtype=t) + y = np.array([y], dtype=t) + + x[np.isnan(x)] = np.nan + y[np.isnan(y)] = np.nan + + if not x.shape == y.shape: + raise ValueError(f"Arrays do not have the same shape: {x.shape} - {y.shape}") + + def _diff(rx, ry, vdt): + diff = np.asarray(rx - ry, dtype=vdt) + return np.abs(diff) + + rx = integer_repr(x) + ry = integer_repr(y) + return _diff(rx, ry, t) + + +def _integer_repr(x, vdt, comp): + # Reinterpret binary representation of the float as sign-magnitude: + # take into account two-complement representation + # See also + # https://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/ + rx = x.view(vdt) + if not (rx.size == 1): + rx[rx < 0] = comp - rx[rx < 0] + elif rx < 0: + rx = comp - rx + + return rx + + +def integer_repr(x): + """Return the signed-magnitude interpretation of the binary representation + of x.""" + import numpy as np + if x.dtype == np.float16: + return _integer_repr(x, np.int16, np.int16(-2**15)) + elif x.dtype == np.float32: + return _integer_repr(x, np.int32, np.int32(-2**31)) + elif x.dtype == np.float64: + return _integer_repr(x, np.int64, np.int64(-2**63)) + else: + raise ValueError(f'Unsupported dtype {x.dtype}') + + +@contextlib.contextmanager +def _assert_warns_context(warning_class, name=None): + __tracebackhide__ = True # Hide traceback for py.test + with suppress_warnings(_warn=False) as sup: + l = sup.record(warning_class) + yield + if not len(l) > 0: + name_str = f' when calling {name}' if name is not None else '' + raise AssertionError("No warning raised" + name_str) + + +def assert_warns(warning_class, *args, **kwargs): + """ + Fail unless the given callable throws the specified warning. + + A warning of class warning_class should be thrown by the callable when + invoked with arguments args and keyword arguments kwargs. + If a different type of warning is thrown, it will not be caught. + + If called with all arguments other than the warning class omitted, may be + used as a context manager:: + + with assert_warns(SomeWarning): + do_something() + + The ability to be used as a context manager is new in NumPy v1.11.0. + + .. deprecated:: 2.4 + + This is deprecated. Use `warnings.catch_warnings` or + ``pytest.warns`` instead. + + Parameters + ---------- + warning_class : class + The class defining the warning that `func` is expected to throw. + func : callable, optional + Callable to test + *args : Arguments + Arguments for `func`. + **kwargs : Kwargs + Keyword arguments for `func`. + + Returns + ------- + The value returned by `func`. + + Examples + -------- + >>> import warnings + >>> def deprecated_func(num): + ... warnings.warn("Please upgrade", DeprecationWarning) + ... return num*num + >>> with np.testing.assert_warns(DeprecationWarning): + ... assert deprecated_func(4) == 16 + >>> # or passing a func + >>> ret = np.testing.assert_warns(DeprecationWarning, deprecated_func, 4) + >>> assert ret == 16 + """ + warnings.warn( + "NumPy warning suppression and assertion utilities are deprecated. " + "Use warnings.catch_warnings, warnings.filterwarnings, pytest.warns, " + "or pytest.filterwarnings instead. (Deprecated NumPy 2.4)", + DeprecationWarning, stacklevel=2) + if not args and not kwargs: + return _assert_warns_context(warning_class) + elif len(args) < 1: + if "match" in kwargs: + raise RuntimeError( + "assert_warns does not use 'match' kwarg, " + "use pytest.warns instead" + ) + raise RuntimeError("assert_warns(...) needs at least one arg") + + func = args[0] + args = args[1:] + with _assert_warns_context(warning_class, name=func.__name__): + return func(*args, **kwargs) + + +@contextlib.contextmanager +def _assert_no_warnings_context(name=None): + __tracebackhide__ = True # Hide traceback for py.test + with warnings.catch_warnings(record=True) as l: + warnings.simplefilter('always') + yield + if len(l) > 0: + name_str = f' when calling {name}' if name is not None else '' + raise AssertionError(f'Got warnings{name_str}: {l}') + + +def assert_no_warnings(*args, **kwargs): + """ + Fail if the given callable produces any warnings. + + If called with all arguments omitted, may be used as a context manager:: + + with assert_no_warnings(): + do_something() + + The ability to be used as a context manager is new in NumPy v1.11.0. + + Parameters + ---------- + func : callable + The callable to test. + \\*args : Arguments + Arguments passed to `func`. + \\*\\*kwargs : Kwargs + Keyword arguments passed to `func`. + + Returns + ------- + The value returned by `func`. + + """ + if not args: + return _assert_no_warnings_context() + + func = args[0] + args = args[1:] + with _assert_no_warnings_context(name=func.__name__): + return func(*args, **kwargs) + + +def _gen_alignment_data(dtype=float32, type='binary', max_size=24): + """ + generator producing data with different alignment and offsets + to test simd vectorization + + Parameters + ---------- + dtype : dtype + data type to produce + type : string + 'unary': create data for unary operations, creates one input + and output array + 'binary': create data for unary operations, creates two input + and output array + max_size : integer + maximum size of data to produce + + Returns + ------- + if type is 'unary' yields one output, one input array and a message + containing information on the data + if type is 'binary' yields one output array, two input array and a message + containing information on the data + + """ + ufmt = 'unary offset=(%d, %d), size=%d, dtype=%r, %s' + bfmt = 'binary offset=(%d, %d, %d), size=%d, dtype=%r, %s' + for o in range(3): + for s in range(o + 2, max(o + 3, max_size)): + if type == 'unary': + inp = lambda: arange(s, dtype=dtype)[o:] + out = empty((s,), dtype=dtype)[o:] + yield out, inp(), ufmt % (o, o, s, dtype, 'out of place') + d = inp() + yield d, d, ufmt % (o, o, s, dtype, 'in place') + yield out[1:], inp()[:-1], ufmt % \ + (o + 1, o, s - 1, dtype, 'out of place') + yield out[:-1], inp()[1:], ufmt % \ + (o, o + 1, s - 1, dtype, 'out of place') + yield inp()[:-1], inp()[1:], ufmt % \ + (o, o + 1, s - 1, dtype, 'aliased') + yield inp()[1:], inp()[:-1], ufmt % \ + (o + 1, o, s - 1, dtype, 'aliased') + if type == 'binary': + inp1 = lambda: arange(s, dtype=dtype)[o:] + inp2 = lambda: arange(s, dtype=dtype)[o:] + out = empty((s,), dtype=dtype)[o:] + yield out, inp1(), inp2(), bfmt % \ + (o, o, o, s, dtype, 'out of place') + d = inp1() + yield d, d, inp2(), bfmt % \ + (o, o, o, s, dtype, 'in place1') + d = inp2() + yield d, inp1(), d, bfmt % \ + (o, o, o, s, dtype, 'in place2') + yield out[1:], inp1()[:-1], inp2()[:-1], bfmt % \ + (o + 1, o, o, s - 1, dtype, 'out of place') + yield out[:-1], inp1()[1:], inp2()[:-1], bfmt % \ + (o, o + 1, o, s - 1, dtype, 'out of place') + yield out[:-1], inp1()[:-1], inp2()[1:], bfmt % \ + (o, o, o + 1, s - 1, dtype, 'out of place') + yield inp1()[1:], inp1()[:-1], inp2()[:-1], bfmt % \ + (o + 1, o, o, s - 1, dtype, 'aliased') + yield inp1()[:-1], inp1()[1:], inp2()[:-1], bfmt % \ + (o, o + 1, o, s - 1, dtype, 'aliased') + yield inp1()[:-1], inp1()[:-1], inp2()[1:], bfmt % \ + (o, o, o + 1, s - 1, dtype, 'aliased') + + +class IgnoreException(Exception): + "Ignoring this exception due to disabled feature" + pass + + +@contextlib.contextmanager +def tempdir(*args, **kwargs): + """Context manager to provide a temporary test folder. + + All arguments are passed as this to the underlying tempfile.mkdtemp + function. + + """ + tmpdir = mkdtemp(*args, **kwargs) + try: + yield tmpdir + finally: + shutil.rmtree(tmpdir) + + +@contextlib.contextmanager +def temppath(*args, **kwargs): + """Context manager for temporary files. + + Context manager that returns the path to a closed temporary file. Its + parameters are the same as for tempfile.mkstemp and are passed directly + to that function. The underlying file is removed when the context is + exited, so it should be closed at that time. + + Windows does not allow a temporary file to be opened if it is already + open, so the underlying file must be closed after opening before it + can be opened again. + + """ + fd, path = mkstemp(*args, **kwargs) + os.close(fd) + try: + yield path + finally: + os.remove(path) + + +class clear_and_catch_warnings(warnings.catch_warnings): + """ Context manager that resets warning registry for catching warnings + + Warnings can be slippery, because, whenever a warning is triggered, Python + adds a ``__warningregistry__`` member to the *calling* module. This makes + it impossible to retrigger the warning in this module, whatever you put in + the warnings filters. This context manager accepts a sequence of `modules` + as a keyword argument to its constructor and: + + * stores and removes any ``__warningregistry__`` entries in given `modules` + on entry; + * resets ``__warningregistry__`` to its previous state on exit. + + This makes it possible to trigger any warning afresh inside the context + manager without disturbing the state of warnings outside. + + For compatibility with Python, please consider all arguments to be + keyword-only. + + Parameters + ---------- + record : bool, optional + Specifies whether warnings should be captured by a custom + implementation of ``warnings.showwarning()`` and be appended to a list + returned by the context manager. Otherwise None is returned by the + context manager. The objects appended to the list are arguments whose + attributes mirror the arguments to ``showwarning()``. + modules : sequence, optional + Sequence of modules for which to reset warnings registry on entry and + restore on exit. To work correctly, all 'ignore' filters should + filter by one of these modules. + + Examples + -------- + >>> import warnings + >>> with np.testing.clear_and_catch_warnings( + ... modules=[np._core.fromnumeric]): + ... warnings.simplefilter('always') + ... warnings.filterwarnings('ignore', module='np._core.fromnumeric') + ... # do something that raises a warning but ignore those in + ... # np._core.fromnumeric + """ + class_modules = () + + def __init__(self, record=False, modules=()): + self.modules = set(modules).union(self.class_modules) + self._warnreg_copies = {} + super().__init__(record=record) + + def __enter__(self): + for mod in self.modules: + if hasattr(mod, '__warningregistry__'): + mod_reg = mod.__warningregistry__ + self._warnreg_copies[mod] = mod_reg.copy() + mod_reg.clear() + return super().__enter__() + + def __exit__(self, *exc_info): + super().__exit__(*exc_info) + for mod in self.modules: + if hasattr(mod, '__warningregistry__'): + mod.__warningregistry__.clear() + if mod in self._warnreg_copies: + mod.__warningregistry__.update(self._warnreg_copies[mod]) + + +class suppress_warnings: + """ + Context manager and decorator doing much the same as + ``warnings.catch_warnings``. + + However, it also provides a filter mechanism to work around + https://bugs.python.org/issue4180. + + This bug causes Python before 3.4 to not reliably show warnings again + after they have been ignored once (even within catch_warnings). It + means that no "ignore" filter can be used easily, since following + tests might need to see the warning. Additionally it allows easier + specificity for testing warnings and can be nested. + + .. deprecated:: 2.4 + + This is deprecated. Use `warnings.filterwarnings` or + ``pytest.filterwarnings`` instead. + + Parameters + ---------- + forwarding_rule : str, optional + One of "always", "once", "module", or "location". Analogous to + the usual warnings module filter mode, it is useful to reduce + noise mostly on the outmost level. Unsuppressed and unrecorded + warnings will be forwarded based on this rule. Defaults to "always". + "location" is equivalent to the warnings "default", match by exact + location the warning warning originated from. + + Notes + ----- + Filters added inside the context manager will be discarded again + when leaving it. Upon entering all filters defined outside a + context will be applied automatically. + + When a recording filter is added, matching warnings are stored in the + ``log`` attribute as well as in the list returned by ``record``. + + If filters are added and the ``module`` keyword is given, the + warning registry of this module will additionally be cleared when + applying it, entering the context, or exiting it. This could cause + warnings to appear a second time after leaving the context if they + were configured to be printed once (default) and were already + printed before the context was entered. + + Nesting this context manager will work as expected when the + forwarding rule is "always" (default). Unfiltered and unrecorded + warnings will be passed out and be matched by the outer level. + On the outmost level they will be printed (or caught by another + warnings context). The forwarding rule argument can modify this + behaviour. + + Like ``catch_warnings`` this context manager is not threadsafe. + + Examples + -------- + + With a context manager:: + + with np.testing.suppress_warnings() as sup: + sup.filter(DeprecationWarning, "Some text") + sup.filter(module=np.ma.core) + log = sup.record(FutureWarning, "Does this occur?") + command_giving_warnings() + # The FutureWarning was given once, the filtered warnings were + # ignored. All other warnings abide outside settings (may be + # printed/error) + assert_(len(log) == 1) + assert_(len(sup.log) == 1) # also stored in log attribute + + Or as a decorator:: + + sup = np.testing.suppress_warnings() + sup.filter(module=np.ma.core) # module must match exactly + @sup + def some_function(): + # do something which causes a warning in np.ma.core + pass + """ + def __init__(self, forwarding_rule="always", _warn=True): + if _warn: + warnings.warn( + "NumPy warning suppression and assertion utilities are deprecated. " + "Use warnings.catch_warnings, warnings.filterwarnings, pytest.warns, " + "or pytest.filterwarnings instead. (Deprecated NumPy 2.4)", + DeprecationWarning, stacklevel=2) + self._entered = False + + # Suppressions are either instance or defined inside one with block: + self._suppressions = [] + + if forwarding_rule not in {"always", "module", "once", "location"}: + raise ValueError("unsupported forwarding rule.") + self._forwarding_rule = forwarding_rule + + def _clear_registries(self): + if hasattr(warnings, "_filters_mutated"): + # clearing the registry should not be necessary on new pythons, + # instead the filters should be mutated. + warnings._filters_mutated() + return + # Simply clear the registry, this should normally be harmless, + # note that on new pythons it would be invalidated anyway. + for module in self._tmp_modules: + if hasattr(module, "__warningregistry__"): + module.__warningregistry__.clear() + + def _filter(self, category=Warning, message="", module=None, record=False): + if record: + record = [] # The log where to store warnings + else: + record = None + if self._entered: + if module is None: + warnings.filterwarnings( + "always", category=category, message=message) + else: + module_regex = module.__name__.replace('.', r'\.') + '$' + warnings.filterwarnings( + "always", category=category, message=message, + module=module_regex) + self._tmp_modules.add(module) + self._clear_registries() + + self._tmp_suppressions.append( + (category, message, re.compile(message, re.I), module, record)) + else: + self._suppressions.append( + (category, message, re.compile(message, re.I), module, record)) + + return record + + def filter(self, category=Warning, message="", module=None): + """ + Add a new suppressing filter or apply it if the state is entered. + + Parameters + ---------- + category : class, optional + Warning class to filter + message : string, optional + Regular expression matching the warning message. + module : module, optional + Module to filter for. Note that the module (and its file) + must match exactly and cannot be a submodule. This may make + it unreliable for external modules. + + Notes + ----- + When added within a context, filters are only added inside + the context and will be forgotten when the context is exited. + """ + self._filter(category=category, message=message, module=module, + record=False) + + def record(self, category=Warning, message="", module=None): + """ + Append a new recording filter or apply it if the state is entered. + + All warnings matching will be appended to the ``log`` attribute. + + Parameters + ---------- + category : class, optional + Warning class to filter + message : string, optional + Regular expression matching the warning message. + module : module, optional + Module to filter for. Note that the module (and its file) + must match exactly and cannot be a submodule. This may make + it unreliable for external modules. + + Returns + ------- + log : list + A list which will be filled with all matched warnings. + + Notes + ----- + When added within a context, filters are only added inside + the context and will be forgotten when the context is exited. + """ + return self._filter(category=category, message=message, module=module, + record=True) + + def __enter__(self): + if self._entered: + raise RuntimeError("cannot enter suppress_warnings twice.") + + self._orig_show = warnings.showwarning + self._filters = warnings.filters + warnings.filters = self._filters[:] + + self._entered = True + self._tmp_suppressions = [] + self._tmp_modules = set() + self._forwarded = set() + + self.log = [] # reset global log (no need to keep same list) + + for cat, mess, _, mod, log in self._suppressions: + if log is not None: + del log[:] # clear the log + if mod is None: + warnings.filterwarnings( + "always", category=cat, message=mess) + else: + module_regex = mod.__name__.replace('.', r'\.') + '$' + warnings.filterwarnings( + "always", category=cat, message=mess, + module=module_regex) + self._tmp_modules.add(mod) + warnings.showwarning = self._showwarning + self._clear_registries() + + return self + + def __exit__(self, *exc_info): + warnings.showwarning = self._orig_show + warnings.filters = self._filters + self._clear_registries() + self._entered = False + del self._orig_show + del self._filters + + def _showwarning(self, message, category, filename, lineno, + *args, use_warnmsg=None, **kwargs): + for cat, _, pattern, mod, rec in ( + self._suppressions + self._tmp_suppressions)[::-1]: + if (issubclass(category, cat) and + pattern.match(message.args[0]) is not None): + if mod is None: + # Message and category match, either recorded or ignored + if rec is not None: + msg = WarningMessage(message, category, filename, + lineno, **kwargs) + self.log.append(msg) + rec.append(msg) + return + # Use startswith, because warnings strips the c or o from + # .pyc/.pyo files. + elif mod.__file__.startswith(filename): + # The message and module (filename) match + if rec is not None: + msg = WarningMessage(message, category, filename, + lineno, **kwargs) + self.log.append(msg) + rec.append(msg) + return + + # There is no filter in place, so pass to the outside handler + # unless we should only pass it once + if self._forwarding_rule == "always": + if use_warnmsg is None: + self._orig_show(message, category, filename, lineno, + *args, **kwargs) + else: + self._orig_showmsg(use_warnmsg) + return + + if self._forwarding_rule == "once": + signature = (message.args, category) + elif self._forwarding_rule == "module": + signature = (message.args, category, filename) + elif self._forwarding_rule == "location": + signature = (message.args, category, filename, lineno) + + if signature in self._forwarded: + return + self._forwarded.add(signature) + if use_warnmsg is None: + self._orig_show(message, category, filename, lineno, *args, + **kwargs) + else: + self._orig_showmsg(use_warnmsg) + + def __call__(self, func): + """ + Function decorator to apply certain suppressions to a whole + function. + """ + @wraps(func) + def new_func(*args, **kwargs): + with self: + return func(*args, **kwargs) + + return new_func + + +@contextlib.contextmanager +def _assert_no_gc_cycles_context(name=None): + __tracebackhide__ = True # Hide traceback for py.test + + # not meaningful to test if there is no refcounting + if not HAS_REFCOUNT: + yield + return + + assert_(gc.isenabled()) + gc.disable() + gc_debug = gc.get_debug() + try: + for i in range(100): + if gc.collect() == 0: + break + else: + raise RuntimeError( + "Unable to fully collect garbage - perhaps a __del__ method " + "is creating more reference cycles?") + + gc.set_debug(gc.DEBUG_SAVEALL) + yield + # gc.collect returns the number of unreachable objects in cycles that + # were found -- we are checking that no cycles were created in the context + n_objects_in_cycles = gc.collect() + objects_in_cycles = gc.garbage[:] + finally: + del gc.garbage[:] + gc.set_debug(gc_debug) + gc.enable() + + if n_objects_in_cycles: + name_str = f' when calling {name}' if name is not None else '' + raise AssertionError( + "Reference cycles were found{}: {} objects were collected, " + "of which {} are shown below:{}" + .format( + name_str, + n_objects_in_cycles, + len(objects_in_cycles), + ''.join( + "\n {} object with id={}:\n {}".format( + type(o).__name__, + id(o), + pprint.pformat(o).replace('\n', '\n ') + ) for o in objects_in_cycles + ) + ) + ) + + +def assert_no_gc_cycles(*args, **kwargs): + """ + Fail if the given callable produces any reference cycles. + + If called with all arguments omitted, may be used as a context manager:: + + with assert_no_gc_cycles(): + do_something() + + Parameters + ---------- + func : callable + The callable to test. + \\*args : Arguments + Arguments passed to `func`. + \\*\\*kwargs : Kwargs + Keyword arguments passed to `func`. + + Returns + ------- + Nothing. The result is deliberately discarded to ensure that all cycles + are found. + + """ + if not args: + return _assert_no_gc_cycles_context() + + func = args[0] + args = args[1:] + with _assert_no_gc_cycles_context(name=func.__name__): + func(*args, **kwargs) + + +def break_cycles(): + """ + Break reference cycles by calling gc.collect + Objects can call other objects' methods (for instance, another object's + __del__) inside their own __del__. On PyPy, the interpreter only runs + between calls to gc.collect, so multiple calls are needed to completely + release all cycles. + """ + + gc.collect() + if IS_PYPY: + # a few more, just to make sure all the finalizers are called + gc.collect() + gc.collect() + gc.collect() + gc.collect() + + +def requires_memory(free_bytes): + """Decorator to skip a test if not enough memory is available""" + import pytest + + def decorator(func): + @wraps(func) + def wrapper(*a, **kw): + msg = check_free_memory(free_bytes) + if msg is not None: + pytest.skip(msg) + + try: + return func(*a, **kw) + except MemoryError: + # Probably ran out of memory regardless: don't regard as failure + pytest.xfail("MemoryError raised") + + return wrapper + + return decorator + + +def check_free_memory(free_bytes): + """ + Check whether `free_bytes` amount of memory is currently free. + Returns: None if enough memory available, otherwise error message + """ + env_var = 'NPY_AVAILABLE_MEM' + env_value = os.environ.get(env_var) + if env_value is not None: + try: + mem_free = _parse_size(env_value) + except ValueError as exc: + raise ValueError(f'Invalid environment variable {env_var}: {exc}') + + msg = (f'{free_bytes / 1e9} GB memory required, but environment variable ' + f'NPY_AVAILABLE_MEM={env_value} set') + else: + mem_free = _get_mem_available() + + if mem_free is None: + msg = ("Could not determine available memory; set NPY_AVAILABLE_MEM " + "environment variable (e.g. NPY_AVAILABLE_MEM=16GB) to run " + "the test.") + mem_free = -1 + else: + free_bytes_gb = free_bytes / 1e9 + mem_free_gb = mem_free / 1e9 + msg = f'{free_bytes_gb} GB memory required, but {mem_free_gb} GB available' + + return msg if mem_free < free_bytes else None + + +def _parse_size(size_str): + """Convert memory size strings ('12 GB' etc.) to float""" + suffixes = {'': 1, 'b': 1, + 'k': 1000, 'm': 1000**2, 'g': 1000**3, 't': 1000**4, + 'kb': 1000, 'mb': 1000**2, 'gb': 1000**3, 'tb': 1000**4, + 'kib': 1024, 'mib': 1024**2, 'gib': 1024**3, 'tib': 1024**4} + + pipe_suffixes = "|".join(suffixes.keys()) + + size_re = re.compile(fr'^\s*(\d+|\d+\.\d+)\s*({pipe_suffixes})\s*$', re.I) + + m = size_re.match(size_str.lower()) + if not m or m.group(2) not in suffixes: + raise ValueError(f'value {size_str!r} not a valid size') + return int(float(m.group(1)) * suffixes[m.group(2)]) + + +def _get_mem_available(): + """Return available memory in bytes, or None if unknown.""" + try: + import psutil + return psutil.virtual_memory().available + except (ImportError, AttributeError): + pass + + if sys.platform.startswith('linux'): + info = {} + with open('/proc/meminfo') as f: + for line in f: + p = line.split() + info[p[0].strip(':').lower()] = int(p[1]) * 1024 + + if 'memavailable' in info: + # Linux >= 3.14 + return info['memavailable'] + else: + return info['memfree'] + info['cached'] + + return None + + +def _no_tracing(func): + """ + Decorator to temporarily turn off tracing for the duration of a test. + Needed in tests that check refcounting, otherwise the tracing itself + influences the refcounts + """ + if not hasattr(sys, 'gettrace'): + return func + else: + @wraps(func) + def wrapper(*args, **kwargs): + original_trace = sys.gettrace() + try: + sys.settrace(None) + return func(*args, **kwargs) + finally: + sys.settrace(original_trace) + return wrapper + + +def _get_glibc_version(): + try: + ver = os.confstr('CS_GNU_LIBC_VERSION').rsplit(' ')[1] + except Exception: + ver = '0.0' + + return ver + + +_glibcver = _get_glibc_version() +_glibc_older_than = lambda x: (_glibcver != '0.0' and _glibcver < x) + + +def run_threaded(func, max_workers=8, pass_count=False, + pass_barrier=False, outer_iterations=1, + prepare_args=None): + """Runs a function many times in parallel""" + for _ in range(outer_iterations): + with (concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) + as tpe): + if prepare_args is None: + args = [] + else: + args = prepare_args() + if pass_barrier: + barrier = threading.Barrier(max_workers) + args.append(barrier) + if pass_count: + all_args = [(func, i, *args) for i in range(max_workers)] + else: + all_args = [(func, *args) for i in range(max_workers)] + try: + futures = [] + for arg in all_args: + futures.append(tpe.submit(*arg)) + except RuntimeError as e: + import pytest + pytest.skip(f"Spawning {max_workers} threads failed with " + f"error {e!r} (likely due to resource limits on the " + "system running the tests)") + finally: + if len(futures) < max_workers and pass_barrier: + barrier.abort() + for f in futures: + f.result() diff --git a/local_packages/numpy/testing/_private/utils.pyi b/local_packages/numpy/testing/_private/utils.pyi new file mode 100644 index 0000000..016bbec --- /dev/null +++ b/local_packages/numpy/testing/_private/utils.pyi @@ -0,0 +1,505 @@ +import ast +import sys +import types +import unittest +import warnings +from _typeshed import ConvertibleToFloat, GenericPath, StrOrBytesPath, StrPath +from collections.abc import Callable, Iterable, Sequence +from contextlib import _GeneratorContextManager +from pathlib import Path +from re import Pattern +from typing import ( + Any, + AnyStr, + ClassVar, + Final, + Generic, + Literal as L, + NoReturn, + ParamSpec, + Self, + SupportsIndex, + TypeAlias, + TypeVarTuple, + overload, + type_check_only, +) +from typing_extensions import TypeVar, deprecated +from unittest.case import SkipTest + +import numpy as np +from numpy._typing import ( + ArrayLike, + DTypeLike, + NDArray, + _ArrayLikeDT64_co, + _ArrayLikeNumber_co, + _ArrayLikeObject_co, + _ArrayLikeTD64_co, +) + +__all__ = [ # noqa: RUF022 + "IS_EDITABLE", + "IS_MUSL", + "IS_PYPY", + "IS_PYSTON", + "IS_WASM", + "IS_INSTALLED", + "IS_64BIT", + "HAS_LAPACK64", + "HAS_REFCOUNT", + "BLAS_SUPPORTS_FPE", + "NOGIL_BUILD", + "NUMPY_ROOT", + "assert_", + "assert_array_almost_equal_nulp", + "assert_raises_regex", + "assert_array_max_ulp", + "assert_warns", + "assert_no_warnings", + "assert_allclose", + "assert_equal", + "assert_almost_equal", + "assert_approx_equal", + "assert_array_equal", + "assert_array_less", + "assert_string_equal", + "assert_array_almost_equal", + "assert_raises", + "build_err_msg", + "decorate_methods", + "jiffies", + "memusage", + "print_assert_equal", + "rundocs", + "runstring", + "verbose", + "measure", + "IgnoreException", + "clear_and_catch_warnings", + "SkipTest", + "KnownFailureException", + "temppath", + "tempdir", + "suppress_warnings", + "assert_array_compare", + "assert_no_gc_cycles", + "break_cycles", + "check_support_sve", + "run_threaded", +] + +### + +_T = TypeVar("_T") +_Ts = TypeVarTuple("_Ts") +_Tss = ParamSpec("_Tss") +_ET = TypeVar("_ET", bound=BaseException, default=BaseException) +_FT = TypeVar("_FT", bound=Callable[..., Any]) +_W_co = TypeVar("_W_co", bound=_WarnLog | None, default=_WarnLog | None, covariant=True) + +_StrLike: TypeAlias = str | bytes +_RegexLike: TypeAlias = _StrLike | Pattern[Any] +_NumericArrayLike: TypeAlias = _ArrayLikeNumber_co | _ArrayLikeObject_co + +_ExceptionSpec: TypeAlias = type[_ET] | tuple[type[_ET], ...] +_WarningSpec: TypeAlias = type[Warning] +_WarnLog: TypeAlias = list[warnings.WarningMessage] +_ToModules: TypeAlias = Iterable[types.ModuleType] + +# Must return a bool or an ndarray/generic type that is supported by `np.logical_and.reduce` +_ComparisonFunc: TypeAlias = Callable[ + [NDArray[Any], NDArray[Any]], + bool | np.bool | np.number | NDArray[np.bool | np.number | np.object_], +] + +# Type-check only `clear_and_catch_warnings` subclasses for both values of the +# `record` parameter. Copied from the stdlib `warnings` stubs. +@type_check_only +class _clear_and_catch_warnings_with_records(clear_and_catch_warnings): + def __enter__(self) -> list[warnings.WarningMessage]: ... + +@type_check_only +class _clear_and_catch_warnings_without_records(clear_and_catch_warnings): + def __enter__(self) -> None: ... + +### + +verbose: int = 0 +NUMPY_ROOT: Final[Path] = ... +IS_INSTALLED: Final[bool] = ... +IS_EDITABLE: Final[bool] = ... +IS_MUSL: Final[bool] = ... +IS_PYPY: Final[bool] = ... +IS_PYSTON: Final[bool] = ... +IS_WASM: Final[bool] = ... +IS_64BIT: Final[bool] = ... +HAS_REFCOUNT: Final[bool] = ... +HAS_LAPACK64: Final[bool] = ... +BLAS_SUPPORTS_FPE: Final[bool] = ... +NOGIL_BUILD: Final[bool] = ... + +class KnownFailureException(Exception): ... +class IgnoreException(Exception): ... + +class clear_and_catch_warnings(warnings.catch_warnings[_W_co], Generic[_W_co]): + class_modules: ClassVar[tuple[types.ModuleType, ...]] = () + modules: Final[set[types.ModuleType]] + @overload # record: True + def __init__(self: clear_and_catch_warnings[_WarnLog], /, record: L[True], modules: _ToModules = ()) -> None: ... + @overload # record: False (default) + def __init__(self: clear_and_catch_warnings[None], /, record: L[False] = False, modules: _ToModules = ()) -> None: ... + @overload # record; bool + def __init__(self, /, record: bool, modules: _ToModules = ()) -> None: ... + +@deprecated("Please use warnings.filterwarnings or pytest.mark.filterwarnings instead") +class suppress_warnings: + log: Final[_WarnLog] + def __init__(self, /, forwarding_rule: L["always", "module", "once", "location"] = "always") -> None: ... + def __enter__(self) -> Self: ... + def __exit__(self, cls: type[BaseException] | None, exc: BaseException | None, tb: types.TracebackType | None, /) -> None: ... + def __call__(self, /, func: _FT) -> _FT: ... + + # + def filter(self, /, category: type[Warning] = ..., message: str = "", module: types.ModuleType | None = None) -> None: ... + def record(self, /, category: type[Warning] = ..., message: str = "", module: types.ModuleType | None = None) -> _WarnLog: ... + +# Contrary to runtime we can't do `os.name` checks while type checking, +# only `sys.platform` checks +if sys.platform == "win32" or sys.platform == "cygwin": + def memusage(processName: str = "python", instance: int = 0) -> int: ... +elif sys.platform == "linux": + def memusage(_proc_pid_stat: StrOrBytesPath | None = None) -> int | None: ... +else: + def memusage() -> NoReturn: ... + +if sys.platform == "linux": + def jiffies(_proc_pid_stat: StrOrBytesPath | None = None, _load_time: list[float] | None = None) -> int: ... +else: + def jiffies(_load_time: list[float] = []) -> int: ... + +# +def build_err_msg( + arrays: Iterable[object], + err_msg: object, + header: str = "Items are not equal:", + verbose: bool = True, + names: Sequence[str] = ("ACTUAL", "DESIRED"), # = ('ACTUAL', 'DESIRED') + precision: SupportsIndex | None = 8, +) -> str: ... + +# +def print_assert_equal(test_string: str, actual: object, desired: object) -> None: ... + +# +def assert_(val: object, msg: str | Callable[[], str] = "") -> None: ... + +# +def assert_equal( + actual: object, + desired: object, + err_msg: object = "", + verbose: bool = True, + *, + strict: bool = False, +) -> None: ... + +def assert_almost_equal( + actual: _NumericArrayLike, + desired: _NumericArrayLike, + decimal: int = 7, + err_msg: object = "", + verbose: bool = True, +) -> None: ... + +# +def assert_approx_equal( + actual: ConvertibleToFloat, + desired: ConvertibleToFloat, + significant: int = 7, + err_msg: object = "", + verbose: bool = True, +) -> None: ... + +# +def assert_array_compare( + comparison: _ComparisonFunc, + x: ArrayLike, + y: ArrayLike, + err_msg: object = "", + verbose: bool = True, + header: str = "", + precision: SupportsIndex = 6, + equal_nan: bool = True, + equal_inf: bool = True, + *, + strict: bool = False, + names: tuple[str, str] = ("ACTUAL", "DESIRED"), +) -> None: ... + +# +def assert_array_equal( + actual: object, + desired: object, + err_msg: object = "", + verbose: bool = True, + *, + strict: bool = False, +) -> None: ... + +# +def assert_array_almost_equal( + actual: _NumericArrayLike, + desired: _NumericArrayLike, + decimal: float = 6, + err_msg: object = "", + verbose: bool = True, +) -> None: ... + +@overload +def assert_array_less( + x: _ArrayLikeDT64_co, + y: _ArrayLikeDT64_co, + err_msg: object = "", + verbose: bool = True, + *, + strict: bool = False, +) -> None: ... +@overload +def assert_array_less( + x: _ArrayLikeTD64_co, + y: _ArrayLikeTD64_co, + err_msg: object = "", + verbose: bool = True, + *, + strict: bool = False, +) -> None: ... +@overload +def assert_array_less( + x: _NumericArrayLike, + y: _NumericArrayLike, + err_msg: object = "", + verbose: bool = True, + *, + strict: bool = False, +) -> None: ... + +# +def assert_string_equal(actual: str, desired: str) -> None: ... + +# +@overload +def assert_raises( + exception_class: _ExceptionSpec[_ET], + /, + *, + msg: str | None = None, +) -> unittest.case._AssertRaisesContext[_ET]: ... +@overload +def assert_raises( + exception_class: _ExceptionSpec, + callable: Callable[_Tss, Any], + /, + *args: _Tss.args, + **kwargs: _Tss.kwargs, +) -> None: ... + +# +@overload +def assert_raises_regex( + exception_class: _ExceptionSpec[_ET], + expected_regexp: _RegexLike, + *, + msg: str | None = None, +) -> unittest.case._AssertRaisesContext[_ET]: ... +@overload +def assert_raises_regex( + exception_class: _ExceptionSpec, + expected_regexp: _RegexLike, + callable: Callable[_Tss, Any], + *args: _Tss.args, + **kwargs: _Tss.kwargs, +) -> None: ... + +# +@overload +def assert_allclose( + actual: _ArrayLikeTD64_co, + desired: _ArrayLikeTD64_co, + rtol: float = 1e-7, + atol: float = 0, + equal_nan: bool = True, + err_msg: object = "", + verbose: bool = True, + *, + strict: bool = False, +) -> None: ... +@overload +def assert_allclose( + actual: _NumericArrayLike, + desired: _NumericArrayLike, + rtol: float = 1e-7, + atol: float = 0, + equal_nan: bool = True, + err_msg: object = "", + verbose: bool = True, + *, + strict: bool = False, +) -> None: ... + +# +def assert_array_almost_equal_nulp( + x: _ArrayLikeNumber_co, + y: _ArrayLikeNumber_co, + nulp: float = 1, +) -> None: ... + +# +def assert_array_max_ulp( + a: _ArrayLikeNumber_co, + b: _ArrayLikeNumber_co, + maxulp: float = 1, + dtype: DTypeLike | None = None, +) -> NDArray[Any]: ... + +# +@overload +@deprecated("Please use warnings.catch_warnings or pytest.warns instead") +def assert_warns(warning_class: _WarningSpec) -> _GeneratorContextManager[None]: ... +@overload +@deprecated("Please use warnings.catch_warnings or pytest.warns instead") +def assert_warns(warning_class: _WarningSpec, func: Callable[_Tss, _T], *args: _Tss.args, **kwargs: _Tss.kwargs) -> _T: ... + +# +@overload +def assert_no_warnings() -> _GeneratorContextManager[None]: ... +@overload +def assert_no_warnings(func: Callable[_Tss, _T], /, *args: _Tss.args, **kwargs: _Tss.kwargs) -> _T: ... + +# +@overload +def assert_no_gc_cycles() -> _GeneratorContextManager[None]: ... +@overload +def assert_no_gc_cycles(func: Callable[_Tss, Any], /, *args: _Tss.args, **kwargs: _Tss.kwargs) -> None: ... + +### + +# +@overload +def tempdir( + suffix: None = None, + prefix: None = None, + dir: None = None, +) -> _GeneratorContextManager[str]: ... +@overload +def tempdir( + suffix: AnyStr | None = None, + prefix: AnyStr | None = None, + *, + dir: GenericPath[AnyStr], +) -> _GeneratorContextManager[AnyStr]: ... +@overload +def tempdir( + suffix: AnyStr | None = None, + *, + prefix: AnyStr, + dir: GenericPath[AnyStr] | None = None, +) -> _GeneratorContextManager[AnyStr]: ... +@overload +def tempdir( + suffix: AnyStr, + prefix: AnyStr | None = None, + dir: GenericPath[AnyStr] | None = None, +) -> _GeneratorContextManager[AnyStr]: ... + +# +@overload +def temppath( + suffix: None = None, + prefix: None = None, + dir: None = None, + text: bool = False, +) -> _GeneratorContextManager[str]: ... +@overload +def temppath( + suffix: AnyStr | None, + prefix: AnyStr | None, + dir: GenericPath[AnyStr], + text: bool = False, +) -> _GeneratorContextManager[AnyStr]: ... +@overload +def temppath( + suffix: AnyStr | None = None, + prefix: AnyStr | None = None, + *, + dir: GenericPath[AnyStr], + text: bool = False, +) -> _GeneratorContextManager[AnyStr]: ... +@overload +def temppath( + suffix: AnyStr | None, + prefix: AnyStr, + dir: GenericPath[AnyStr] | None = None, + text: bool = False, +) -> _GeneratorContextManager[AnyStr]: ... +@overload +def temppath( + suffix: AnyStr | None = None, + *, + prefix: AnyStr, + dir: GenericPath[AnyStr] | None = None, + text: bool = False, +) -> _GeneratorContextManager[AnyStr]: ... +@overload +def temppath( + suffix: AnyStr, + prefix: AnyStr | None = None, + dir: GenericPath[AnyStr] | None = None, + text: bool = False, +) -> _GeneratorContextManager[AnyStr]: ... + +# +def check_support_sve(__cache: list[bool] = ..., /) -> bool: ... # stubdefaulter: ignore[missing-default] + +# +def decorate_methods( + cls: type, + decorator: Callable[[Callable[..., Any]], Any], + testmatch: _RegexLike | None = None, +) -> None: ... + +# +@overload +def run_threaded( + func: Callable[[], None], + max_workers: int = 8, + pass_count: bool = False, + pass_barrier: bool = False, + outer_iterations: int = 1, + prepare_args: None = None, +) -> None: ... +@overload +def run_threaded( + func: Callable[[*_Ts], None], + max_workers: int, + pass_count: bool, + pass_barrier: bool, + outer_iterations: int, + prepare_args: tuple[*_Ts], +) -> None: ... +@overload +def run_threaded( + func: Callable[[*_Ts], None], + max_workers: int = 8, + pass_count: bool = False, + pass_barrier: bool = False, + outer_iterations: int = 1, + *, + prepare_args: tuple[*_Ts], +) -> None: ... + +# +def runstring(astr: _StrLike | types.CodeType, dict: dict[str, Any] | None) -> Any: ... # noqa: ANN401 +def rundocs(filename: StrPath | None = None, raise_on_error: bool = True) -> None: ... +def measure(code_str: _StrLike | ast.AST, times: int = 1, label: str | None = None) -> float: ... +def break_cycles() -> None: ... diff --git a/local_packages/numpy/testing/overrides.py b/local_packages/numpy/testing/overrides.py new file mode 100644 index 0000000..61771c4 --- /dev/null +++ b/local_packages/numpy/testing/overrides.py @@ -0,0 +1,84 @@ +"""Tools for testing implementations of __array_function__ and ufunc overrides + + +""" + +import numpy._core.umath as _umath +from numpy import ufunc as _ufunc +from numpy._core.overrides import ARRAY_FUNCTIONS as _array_functions + + +def get_overridable_numpy_ufuncs(): + """List all numpy ufuncs overridable via `__array_ufunc__` + + Parameters + ---------- + None + + Returns + ------- + set + A set containing all overridable ufuncs in the public numpy API. + """ + ufuncs = {obj for obj in _umath.__dict__.values() + if isinstance(obj, _ufunc)} + return ufuncs + + +def allows_array_ufunc_override(func): + """Determine if a function can be overridden via `__array_ufunc__` + + Parameters + ---------- + func : callable + Function that may be overridable via `__array_ufunc__` + + Returns + ------- + bool + `True` if `func` is overridable via `__array_ufunc__` and + `False` otherwise. + + Notes + ----- + This function is equivalent to ``isinstance(func, np.ufunc)`` and + will work correctly for ufuncs defined outside of Numpy. + + """ + return isinstance(func, _ufunc) + + +def get_overridable_numpy_array_functions(): + """List all numpy functions overridable via `__array_function__` + + Parameters + ---------- + None + + Returns + ------- + set + A set containing all functions in the public numpy API that are + overridable via `__array_function__`. + + """ + # 'import numpy' doesn't import recfunctions, so make sure it's imported + # so ufuncs defined there show up in the ufunc listing + from numpy.lib import recfunctions # noqa: F401 + return _array_functions.copy() + +def allows_array_function_override(func): + """Determine if a Numpy function can be overridden via `__array_function__` + + Parameters + ---------- + func : callable + Function that may be overridable via `__array_function__` + + Returns + ------- + bool + `True` if `func` is a function in the Numpy API that is + overridable via `__array_function__` and `False` otherwise. + """ + return func in _array_functions diff --git a/local_packages/numpy/testing/overrides.pyi b/local_packages/numpy/testing/overrides.pyi new file mode 100644 index 0000000..916154c --- /dev/null +++ b/local_packages/numpy/testing/overrides.pyi @@ -0,0 +1,10 @@ +from collections.abc import Callable, Hashable +from typing import Any +from typing_extensions import TypeIs + +import numpy as np + +def get_overridable_numpy_ufuncs() -> set[np.ufunc]: ... +def get_overridable_numpy_array_functions() -> set[Callable[..., Any]]: ... +def allows_array_ufunc_override(func: object) -> TypeIs[np.ufunc]: ... +def allows_array_function_override(func: Hashable) -> bool: ... diff --git a/local_packages/numpy/testing/print_coercion_tables.py b/local_packages/numpy/testing/print_coercion_tables.py new file mode 100644 index 0000000..89f0de3 --- /dev/null +++ b/local_packages/numpy/testing/print_coercion_tables.py @@ -0,0 +1,207 @@ +#!/usr/bin/env python3 +"""Prints type-coercion tables for the built-in NumPy types + +""" +from collections import namedtuple + +import numpy as np +from numpy._core.numerictypes import obj2sctype + + +# Generic object that can be added, but doesn't do anything else +class GenericObject: + def __init__(self, v): + self.v = v + + def __add__(self, other): + return self + + def __radd__(self, other): + return self + + dtype = np.dtype('O') + +def print_cancast_table(ntypes): + print('X', end=' ') + for char in ntypes: + print(char, end=' ') + print() + for row in ntypes: + print(row, end=' ') + for col in ntypes: + if np.can_cast(row, col, "equiv"): + cast = "#" + elif np.can_cast(row, col, "safe"): + cast = "=" + elif np.can_cast(row, col, "same_kind"): + cast = "~" + elif np.can_cast(row, col, "unsafe"): + cast = "." + else: + cast = " " + print(cast, end=' ') + print() + +def print_coercion_table(ntypes, inputfirstvalue, inputsecondvalue, firstarray, + use_promote_types=False): + print('+', end=' ') + for char in ntypes: + print(char, end=' ') + print() + for row in ntypes: + if row == 'O': + rowtype = GenericObject + else: + rowtype = obj2sctype(row) + + print(row, end=' ') + for col in ntypes: + if col == 'O': + coltype = GenericObject + else: + coltype = obj2sctype(col) + try: + if firstarray: + rowvalue = np.array([rowtype(inputfirstvalue)], dtype=rowtype) + else: + rowvalue = rowtype(inputfirstvalue) + colvalue = coltype(inputsecondvalue) + if use_promote_types: + char = np.promote_types(rowvalue.dtype, colvalue.dtype).char + else: + value = np.add(rowvalue, colvalue) + if isinstance(value, np.ndarray): + char = value.dtype.char + else: + char = np.dtype(type(value)).char + except ValueError: + char = '!' + except OverflowError: + char = '@' + except TypeError: + char = '#' + print(char, end=' ') + print() + + +def print_new_cast_table(*, can_cast=True, legacy=False, flags=False): + """Prints new casts, the values given are default "can-cast" values, not + actual ones. + """ + from numpy._core._multiarray_tests import get_all_cast_information + + cast_table = { + -1: " ", + 0: "#", # No cast (classify as equivalent here) + 1: "#", # equivalent casting + 2: "=", # safe casting + 3: "~", # same-kind casting + 4: ".", # unsafe casting + } + flags_table = { + 0: "▗", 7: "█", + 1: "▚", 2: "▐", 4: "▄", + 3: "▜", 5: "▙", + 6: "▟", + } + + cast_info = namedtuple("cast_info", ["can_cast", "legacy", "flags"]) + no_cast_info = cast_info(" ", " ", " ") + + casts = get_all_cast_information() + table = {} + dtypes = set() + for cast in casts: + dtypes.add(cast["from"]) + dtypes.add(cast["to"]) + + if cast["from"] not in table: + table[cast["from"]] = {} + to_dict = table[cast["from"]] + + can_cast = cast_table[cast["casting"]] + legacy = "L" if cast["legacy"] else "." + flags = 0 + if cast["requires_pyapi"]: + flags |= 1 + if cast["supports_unaligned"]: + flags |= 2 + if cast["no_floatingpoint_errors"]: + flags |= 4 + + flags = flags_table[flags] + to_dict[cast["to"]] = cast_info(can_cast=can_cast, legacy=legacy, flags=flags) + + # The np.dtype(x.type) is a bit strange, because dtype classes do + # not expose much yet. + types = np.typecodes["All"] + + def sorter(x): + # This is a bit weird hack, to get a table as close as possible to + # the one printing all typecodes (but expecting user-dtypes). + dtype = np.dtype(x.type) + try: + indx = types.index(dtype.char) + except ValueError: + indx = np.inf + return (indx, dtype.char) + + dtypes = sorted(dtypes, key=sorter) + + def print_table(field="can_cast"): + print('X', end=' ') + for dt in dtypes: + print(np.dtype(dt.type).char, end=' ') + print() + for from_dt in dtypes: + print(np.dtype(from_dt.type).char, end=' ') + row = table.get(from_dt, {}) + for to_dt in dtypes: + print(getattr(row.get(to_dt, no_cast_info), field), end=' ') + print() + + if can_cast: + # Print the actual table: + print() + print("Casting: # is equivalent, = is safe, ~ is same-kind, and . is unsafe") + print() + print_table("can_cast") + + if legacy: + print() + print("L denotes a legacy cast . a non-legacy one.") + print() + print_table("legacy") + + if flags: + print() + print(f"{flags_table[0]}: no flags, " + f"{flags_table[1]}: PyAPI, " + f"{flags_table[2]}: supports unaligned, " + f"{flags_table[4]}: no-float-errors") + print() + print_table("flags") + + +if __name__ == '__main__': + print("can cast") + print_cancast_table(np.typecodes['All']) + print() + print("In these tables, ValueError is '!', OverflowError is '@', TypeError is '#'") + print() + print("scalar + scalar") + print_coercion_table(np.typecodes['All'], 0, 0, False) + print() + print("scalar + neg scalar") + print_coercion_table(np.typecodes['All'], 0, -1, False) + print() + print("array + scalar") + print_coercion_table(np.typecodes['All'], 0, 0, True) + print() + print("array + neg scalar") + print_coercion_table(np.typecodes['All'], 0, -1, True) + print() + print("promote_types") + print_coercion_table(np.typecodes['All'], 0, 0, False, True) + print("New casting type promotion:") + print_new_cast_table(can_cast=True, legacy=True, flags=True) diff --git a/local_packages/numpy/testing/print_coercion_tables.pyi b/local_packages/numpy/testing/print_coercion_tables.pyi new file mode 100644 index 0000000..f463a18 --- /dev/null +++ b/local_packages/numpy/testing/print_coercion_tables.pyi @@ -0,0 +1,26 @@ +from collections.abc import Iterable +from typing import ClassVar, Generic, Self +from typing_extensions import TypeVar + +import numpy as np + +_VT_co = TypeVar("_VT_co", default=object, covariant=True) + +# undocumented +class GenericObject(Generic[_VT_co]): + dtype: ClassVar[np.dtype[np.object_]] = ... + v: _VT_co + + def __init__(self, /, v: _VT_co) -> None: ... + def __add__(self, other: object, /) -> Self: ... + def __radd__(self, other: object, /) -> Self: ... + +def print_cancast_table(ntypes: Iterable[str]) -> None: ... +def print_coercion_table( + ntypes: Iterable[str], + inputfirstvalue: int, + inputsecondvalue: int, + firstarray: bool, + use_promote_types: bool = False, +) -> None: ... +def print_new_cast_table(*, can_cast: bool = True, legacy: bool = False, flags: bool = False) -> None: ... diff --git a/local_packages/numpy/testing/tests/__init__.py b/local_packages/numpy/testing/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/local_packages/numpy/testing/tests/test_utils.py b/local_packages/numpy/testing/tests/test_utils.py new file mode 100644 index 0000000..6d43343 --- /dev/null +++ b/local_packages/numpy/testing/tests/test_utils.py @@ -0,0 +1,2123 @@ +import itertools +import os +import re +import sys +import warnings +import weakref + +import pytest + +import numpy as np +import numpy._core._multiarray_umath as ncu +from numpy.testing import ( + HAS_REFCOUNT, + assert_, + assert_allclose, + assert_almost_equal, + assert_approx_equal, + assert_array_almost_equal, + assert_array_almost_equal_nulp, + assert_array_equal, + assert_array_less, + assert_array_max_ulp, + assert_equal, + assert_no_gc_cycles, + assert_no_warnings, + assert_raises, + assert_string_equal, + assert_warns, + build_err_msg, + clear_and_catch_warnings, + suppress_warnings, + tempdir, + temppath, +) + + +class _GenericTest: + + def _assert_func(self, *args, **kwargs): + pass + + def _test_equal(self, a, b): + self._assert_func(a, b) + + def _test_not_equal(self, a, b): + with assert_raises(AssertionError): + self._assert_func(a, b) + + def test_array_rank1_eq(self): + """Test two equal array of rank 1 are found equal.""" + a = np.array([1, 2]) + b = np.array([1, 2]) + + self._test_equal(a, b) + + def test_array_rank1_noteq(self): + """Test two different array of rank 1 are found not equal.""" + a = np.array([1, 2]) + b = np.array([2, 2]) + + self._test_not_equal(a, b) + + def test_array_rank2_eq(self): + """Test two equal array of rank 2 are found equal.""" + a = np.array([[1, 2], [3, 4]]) + b = np.array([[1, 2], [3, 4]]) + + self._test_equal(a, b) + + def test_array_diffshape(self): + """Test two arrays with different shapes are found not equal.""" + a = np.array([1, 2]) + b = np.array([[1, 2], [1, 2]]) + + self._test_not_equal(a, b) + + def test_objarray(self): + """Test object arrays.""" + a = np.array([1, 1], dtype=object) + self._test_equal(a, 1) + + def test_array_likes(self): + self._test_equal([1, 2, 3], (1, 2, 3)) + + +class TestArrayEqual(_GenericTest): + + def _assert_func(self, *args, **kwargs): + assert_array_equal(*args, **kwargs) + + def test_generic_rank1(self): + """Test rank 1 array for all dtypes.""" + def foo(t): + a = np.empty(2, t) + a.fill(1) + b = a.copy() + c = a.copy() + c.fill(0) + self._test_equal(a, b) + self._test_not_equal(c, b) + + # Test numeric types and object + for t in '?bhilqpBHILQPfdgFDG': + foo(t) + + # Test strings + for t in ['S1', 'U1']: + foo(t) + + def test_0_ndim_array(self): + x = np.array(473963742225900817127911193656584771) + y = np.array(18535119325151578301457182298393896) + + with pytest.raises(AssertionError) as exc_info: + self._assert_func(x, y) + msg = str(exc_info.value) + assert_('Mismatched elements: 1 / 1 (100%)\n' + in msg) + + y = x + self._assert_func(x, y) + + x = np.array(4395065348745.5643764887869876) + y = np.array(0) + expected_msg = ('Mismatched elements: 1 / 1 (100%)\n' + 'Max absolute difference among violations: ' + '4.39506535e+12\n' + 'Max relative difference among violations: inf\n') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(x, y) + + x = y + self._assert_func(x, y) + + def test_generic_rank3(self): + """Test rank 3 array for all dtypes.""" + def foo(t): + a = np.empty((4, 2, 3), t) + a.fill(1) + b = a.copy() + c = a.copy() + c.fill(0) + self._test_equal(a, b) + self._test_not_equal(c, b) + + # Test numeric types and object + for t in '?bhilqpBHILQPfdgFDG': + foo(t) + + # Test strings + for t in ['S1', 'U1']: + foo(t) + + def test_nan_array(self): + """Test arrays with nan values in them.""" + a = np.array([1, 2, np.nan]) + b = np.array([1, 2, np.nan]) + + self._test_equal(a, b) + + c = np.array([1, 2, 3]) + self._test_not_equal(c, b) + + def test_string_arrays(self): + """Test two arrays with different shapes are found not equal.""" + a = np.array(['floupi', 'floupa']) + b = np.array(['floupi', 'floupa']) + + self._test_equal(a, b) + + c = np.array(['floupipi', 'floupa']) + + self._test_not_equal(c, b) + + def test_recarrays(self): + """Test record arrays.""" + a = np.empty(2, [('floupi', float), ('floupa', float)]) + a['floupi'] = [1, 2] + a['floupa'] = [1, 2] + b = a.copy() + + self._test_equal(a, b) + + c = np.empty(2, [('floupipi', float), + ('floupi', float), ('floupa', float)]) + c['floupipi'] = a['floupi'].copy() + c['floupa'] = a['floupa'].copy() + + with pytest.raises(TypeError): + self._test_not_equal(c, b) + + def test_masked_nan_inf(self): + # Regression test for gh-11121 + a = np.ma.MaskedArray([3., 4., 6.5], mask=[False, True, False]) + b = np.array([3., np.nan, 6.5]) + self._test_equal(a, b) + self._test_equal(b, a) + a = np.ma.MaskedArray([3., 4., 6.5], mask=[True, False, False]) + b = np.array([np.inf, 4., 6.5]) + self._test_equal(a, b) + self._test_equal(b, a) + + # Also provides test cases for gh-11121 + def test_masked_scalar(self): + # Test masked scalar vs. plain/masked scalar + for a_val, b_val, b_masked in itertools.product( + [3., np.nan, np.inf], + [3., 4., np.nan, np.inf, -np.inf], + [False, True], + ): + a = np.ma.MaskedArray(a_val, mask=True) + b = np.ma.MaskedArray(b_val, mask=True) if b_masked else np.array(b_val) + self._test_equal(a, b) + self._test_equal(b, a) + + # Test masked scalar vs. plain array + for a_val, b_val in itertools.product( + [3., np.nan, -np.inf], + itertools.product([3., 4., np.nan, np.inf, -np.inf], repeat=2), + ): + a = np.ma.MaskedArray(a_val, mask=True) + b = np.array(b_val) + self._test_equal(a, b) + self._test_equal(b, a) + + # Test masked scalar vs. masked array + for a_val, b_val, b_mask in itertools.product( + [3., np.nan, np.inf], + itertools.product([3., 4., np.nan, np.inf, -np.inf], repeat=2), + itertools.product([False, True], repeat=2), + ): + a = np.ma.MaskedArray(a_val, mask=True) + b = np.ma.MaskedArray(b_val, mask=b_mask) + self._test_equal(a, b) + self._test_equal(b, a) + + def test_subclass_that_overrides_eq(self): + # While we cannot guarantee testing functions will always work for + # subclasses, the tests should ideally rely only on subclasses having + # comparison operators, not on them being able to store booleans + # (which, e.g., astropy Quantity cannot usefully do). See gh-8452. + class MyArray(np.ndarray): + def __eq__(self, other): + return bool(np.equal(self, other).all()) + + def __ne__(self, other): + return not self == other + + a = np.array([1., 2.]).view(MyArray) + b = np.array([2., 3.]).view(MyArray) + assert_(type(a == a), bool) + assert_(a == a) + assert_(a != b) + self._test_equal(a, a) + self._test_not_equal(a, b) + self._test_not_equal(b, a) + + expected_msg = ('Mismatched elements: 1 / 2 (50%)\n' + 'Max absolute difference among violations: 1.\n' + 'Max relative difference among violations: 0.5') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._test_equal(a, b) + + c = np.array([0., 2.9]).view(MyArray) + expected_msg = ('Mismatched elements: 1 / 2 (50%)\n' + 'Max absolute difference among violations: 2.\n' + 'Max relative difference among violations: inf') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._test_equal(b, c) + + def test_subclass_that_does_not_implement_npall(self): + class MyArray(np.ndarray): + def __array_function__(self, *args, **kwargs): + return NotImplemented + + a = np.array([1., 2.]).view(MyArray) + b = np.array([2., 3.]).view(MyArray) + with assert_raises(TypeError): + np.all(a) + self._test_equal(a, a) + self._test_not_equal(a, b) + self._test_not_equal(b, a) + + def test_suppress_overflow_warnings(self): + # Based on issue #18992 + with pytest.raises(AssertionError): + with np.errstate(all="raise"): + np.testing.assert_array_equal( + np.array([1, 2, 3], np.float32), + np.array([1, 1e-40, 3], np.float32)) + + def test_array_vs_scalar_is_equal(self): + """Test comparing an array with a scalar when all values are equal.""" + a = np.array([1., 1., 1.]) + b = 1. + + self._test_equal(a, b) + + def test_array_vs_array_not_equal(self): + """Test comparing an array with a scalar when not all values equal.""" + a = np.array([34986, 545676, 439655, 563766]) + b = np.array([34986, 545676, 439655, 0]) + + expected_msg = ('Mismatched elements: 1 / 4 (25%)\n' + 'Mismatch at index:\n' + ' [3]: 563766 (ACTUAL), 0 (DESIRED)\n' + 'Max absolute difference among violations: 563766\n' + 'Max relative difference among violations: inf') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(a, b) + + a = np.array([34986, 545676, 439655.2, 563766]) + expected_msg = ('Mismatched elements: 2 / 4 (50%)\n' + 'Mismatch at indices:\n' + ' [2]: 439655.2 (ACTUAL), 439655 (DESIRED)\n' + ' [3]: 563766.0 (ACTUAL), 0 (DESIRED)\n' + 'Max absolute difference among violations: ' + '563766.\n' + 'Max relative difference among violations: ' + '4.54902139e-07') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(a, b) + + def test_array_vs_scalar_strict(self): + """Test comparing an array with a scalar with strict option.""" + a = np.array([1., 1., 1.]) + b = 1. + + with pytest.raises(AssertionError): + self._assert_func(a, b, strict=True) + + def test_array_vs_array_strict(self): + """Test comparing two arrays with strict option.""" + a = np.array([1., 1., 1.]) + b = np.array([1., 1., 1.]) + + self._assert_func(a, b, strict=True) + + def test_array_vs_float_array_strict(self): + """Test comparing two arrays with strict option.""" + a = np.array([1, 1, 1]) + b = np.array([1., 1., 1.]) + + with pytest.raises(AssertionError): + self._assert_func(a, b, strict=True) + + +class TestBuildErrorMessage: + + def test_build_err_msg_defaults(self): + x = np.array([1.00001, 2.00002, 3.00003]) + y = np.array([1.00002, 2.00003, 3.00004]) + err_msg = 'There is a mismatch' + + a = build_err_msg([x, y], err_msg) + b = ('\nItems are not equal: There is a mismatch\n ACTUAL: array([' + '1.00001, 2.00002, 3.00003])\n DESIRED: array([1.00002, ' + '2.00003, 3.00004])') + assert_equal(a, b) + + def test_build_err_msg_no_verbose(self): + x = np.array([1.00001, 2.00002, 3.00003]) + y = np.array([1.00002, 2.00003, 3.00004]) + err_msg = 'There is a mismatch' + + a = build_err_msg([x, y], err_msg, verbose=False) + b = '\nItems are not equal: There is a mismatch' + assert_equal(a, b) + + def test_build_err_msg_custom_names(self): + x = np.array([1.00001, 2.00002, 3.00003]) + y = np.array([1.00002, 2.00003, 3.00004]) + err_msg = 'There is a mismatch' + + a = build_err_msg([x, y], err_msg, names=('FOO', 'BAR')) + b = ('\nItems are not equal: There is a mismatch\n FOO: array([' + '1.00001, 2.00002, 3.00003])\n BAR: array([1.00002, 2.00003, ' + '3.00004])') + assert_equal(a, b) + + def test_build_err_msg_custom_precision(self): + x = np.array([1.000000001, 2.00002, 3.00003]) + y = np.array([1.000000002, 2.00003, 3.00004]) + err_msg = 'There is a mismatch' + + a = build_err_msg([x, y], err_msg, precision=10) + b = ('\nItems are not equal: There is a mismatch\n ACTUAL: array([' + '1.000000001, 2.00002 , 3.00003 ])\n DESIRED: array([' + '1.000000002, 2.00003 , 3.00004 ])') + assert_equal(a, b) + + +class TestEqual(TestArrayEqual): + + def _assert_func(self, *args, **kwargs): + assert_equal(*args, **kwargs) + + def test_nan_items(self): + self._assert_func(np.nan, np.nan) + self._assert_func([np.nan], [np.nan]) + self._test_not_equal(np.nan, [np.nan]) + self._test_not_equal(np.nan, 1) + + def test_inf_items(self): + self._assert_func(np.inf, np.inf) + self._assert_func([np.inf], [np.inf]) + self._test_not_equal(np.inf, [np.inf]) + + def test_datetime(self): + self._test_equal( + np.datetime64("2017-01-01", "s"), + np.datetime64("2017-01-01", "s") + ) + self._test_equal( + np.datetime64("2017-01-01", "s"), + np.datetime64("2017-01-01", "m") + ) + + # gh-10081 + self._test_not_equal( + np.datetime64("2017-01-01", "s"), + np.datetime64("2017-01-02", "s") + ) + self._test_not_equal( + np.datetime64("2017-01-01", "s"), + np.datetime64("2017-01-02", "m") + ) + + def test_nat_items(self): + # not a datetime + nadt_no_unit = np.datetime64("NaT") + nadt_s = np.datetime64("NaT", "s") + nadt_d = np.datetime64("NaT", "ns") + # not a timedelta + natd_no_unit = np.timedelta64("NaT") + natd_s = np.timedelta64("NaT", "s") + natd_d = np.timedelta64("NaT", "ns") + + dts = [nadt_no_unit, nadt_s, nadt_d] + tds = [natd_no_unit, natd_s, natd_d] + for a, b in itertools.product(dts, dts): + self._assert_func(a, b) + self._assert_func([a], [b]) + self._test_not_equal([a], b) + + for a, b in itertools.product(tds, tds): + self._assert_func(a, b) + self._assert_func([a], [b]) + self._test_not_equal([a], b) + + for a, b in itertools.product(tds, dts): + self._test_not_equal(a, b) + self._test_not_equal(a, [b]) + self._test_not_equal([a], [b]) + self._test_not_equal([a], np.datetime64("2017-01-01", "s")) + self._test_not_equal([b], np.datetime64("2017-01-01", "s")) + self._test_not_equal([a], np.timedelta64(123, "s")) + self._test_not_equal([b], np.timedelta64(123, "s")) + + def test_non_numeric(self): + self._assert_func('ab', 'ab') + self._test_not_equal('ab', 'abb') + + def test_complex_item(self): + self._assert_func(complex(1, 2), complex(1, 2)) + self._assert_func(complex(1, np.nan), complex(1, np.nan)) + self._test_not_equal(complex(1, np.nan), complex(1, 2)) + self._test_not_equal(complex(np.nan, 1), complex(1, np.nan)) + self._test_not_equal(complex(np.nan, np.inf), complex(np.nan, 2)) + + def test_negative_zero(self): + self._test_not_equal(ncu.PZERO, ncu.NZERO) + + def test_complex(self): + x = np.array([complex(1, 2), complex(1, np.nan)]) + y = np.array([complex(1, 2), complex(1, 2)]) + self._assert_func(x, x) + self._test_not_equal(x, y) + + def test_object(self): + # gh-12942 + import datetime + a = np.array([datetime.datetime(2000, 1, 1), + datetime.datetime(2000, 1, 2)]) + self._test_not_equal(a, a[::-1]) + + +class TestArrayAlmostEqual(_GenericTest): + + def _assert_func(self, *args, **kwargs): + assert_array_almost_equal(*args, **kwargs) + + def test_closeness(self): + # Note that in the course of time we ended up with + # `abs(x - y) < 1.5 * 10**(-decimal)` + # instead of the previously documented + # `abs(x - y) < 0.5 * 10**(-decimal)` + # so this check serves to preserve the wrongness. + + # test scalars + expected_msg = ('Mismatched elements: 1 / 1 (100%)\n' + 'Max absolute difference among violations: 1.5\n' + 'Max relative difference among violations: inf') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(1.5, 0.0, decimal=0) + + # test arrays + self._assert_func([1.499999], [0.0], decimal=0) + + expected_msg = ('Mismatched elements: 1 / 1 (100%)\n' + 'Mismatch at index:\n' + ' [0]: 1.5 (ACTUAL), 0.0 (DESIRED)\n' + 'Max absolute difference among violations: 1.5\n' + 'Max relative difference among violations: inf') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func([1.5], [0.0], decimal=0) + + a = [1.4999999, 0.00003] + b = [1.49999991, 0] + expected_msg = ('Mismatched elements: 1 / 2 (50%)\n' + 'Mismatch at index:\n' + ' [1]: 3e-05 (ACTUAL), 0.0 (DESIRED)\n' + 'Max absolute difference among violations: 3.e-05\n' + 'Max relative difference among violations: inf') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(a, b, decimal=7) + + expected_msg = ('Mismatched elements: 1 / 2 (50%)\n' + 'Mismatch at index:\n' + ' [1]: 0.0 (ACTUAL), 3e-05 (DESIRED)\n' + 'Max absolute difference among violations: 3.e-05\n' + 'Max relative difference among violations: 1.') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(b, a, decimal=7) + + def test_simple(self): + x = np.array([1234.2222]) + y = np.array([1234.2223]) + + self._assert_func(x, y, decimal=3) + self._assert_func(x, y, decimal=4) + + expected_msg = ('Mismatched elements: 1 / 1 (100%)\n' + 'Mismatch at index:\n' + ' [0]: 1234.2222 (ACTUAL), 1234.2223 (DESIRED)\n' + 'Max absolute difference among violations: ' + '1.e-04\n' + 'Max relative difference among violations: ' + '8.10226812e-08') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(x, y, decimal=5) + + def test_array_vs_scalar(self): + a = [5498.42354, 849.54345, 0.00] + b = 5498.42354 + expected_msg = ('Mismatched elements: 2 / 3 (66.7%)\n' + 'Mismatch at indices:\n' + ' [1]: 849.54345 (ACTUAL), 5498.42354 (DESIRED)\n' + ' [2]: 0.0 (ACTUAL), 5498.42354 (DESIRED)\n' + 'Max absolute difference among violations: ' + '5498.42354\n' + 'Max relative difference among violations: 1.') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(a, b, decimal=9) + + expected_msg = ('Mismatched elements: 2 / 3 (66.7%)\n' + 'Mismatch at indices:\n' + ' [1]: 5498.42354 (ACTUAL), 849.54345 (DESIRED)\n' + ' [2]: 5498.42354 (ACTUAL), 0.0 (DESIRED)\n' + 'Max absolute difference among violations: ' + '5498.42354\n' + 'Max relative difference among violations: 5.4722099') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(b, a, decimal=9) + + a = [5498.42354, 0.00] + expected_msg = ('Mismatched elements: 1 / 2 (50%)\n' + 'Mismatch at index:\n' + ' [1]: 5498.42354 (ACTUAL), 0.0 (DESIRED)\n' + 'Max absolute difference among violations: ' + '5498.42354\n' + 'Max relative difference among violations: inf') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(b, a, decimal=7) + + b = 0 + expected_msg = ('Mismatched elements: 1 / 2 (50%)\n' + 'Mismatch at index:\n' + ' [0]: 5498.42354 (ACTUAL), 0 (DESIRED)\n' + 'Max absolute difference among violations: ' + '5498.42354\n' + 'Max relative difference among violations: inf') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(a, b, decimal=7) + + def test_nan(self): + anan = np.array([np.nan]) + aone = np.array([1]) + ainf = np.array([np.inf]) + self._assert_func(anan, anan) + assert_raises(AssertionError, + lambda: self._assert_func(anan, aone)) + assert_raises(AssertionError, + lambda: self._assert_func(anan, ainf)) + assert_raises(AssertionError, + lambda: self._assert_func(ainf, anan)) + + def test_inf(self): + a = np.array([[1., 2.], [3., 4.]]) + b = a.copy() + a[0, 0] = np.inf + assert_raises(AssertionError, + lambda: self._assert_func(a, b)) + b[0, 0] = -np.inf + assert_raises(AssertionError, + lambda: self._assert_func(a, b)) + + def test_complex_inf(self): + a = np.array([np.inf + 1.j, 2. + 1.j, 3. + 1.j]) + b = a.copy() + self._assert_func(a, b) + b[1] = 3. + 1.j + expected_msg = ('Mismatched elements: 1 / 3 (33.3%)\n' + 'Mismatch at index:\n' + ' [1]: (2+1j) (ACTUAL), (3+1j) (DESIRED)\n' + 'Max absolute difference among violations: 1.\n') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(a, b) + + def test_subclass(self): + a = np.array([[1., 2.], [3., 4.]]) + b = np.ma.masked_array([[1., 2.], [0., 4.]], + [[False, False], [True, False]]) + self._assert_func(a, b) + self._assert_func(b, a) + self._assert_func(b, b) + + # Test fully masked as well (see gh-11123). + a = np.ma.MaskedArray(3.5, mask=True) + b = np.array([3., 4., 6.5]) + self._test_equal(a, b) + self._test_equal(b, a) + a = np.ma.masked + b = np.array([3., 4., 6.5]) + self._test_equal(a, b) + self._test_equal(b, a) + a = np.ma.MaskedArray([3., 4., 6.5], mask=[True, True, True]) + b = np.array([1., 2., 3.]) + self._test_equal(a, b) + self._test_equal(b, a) + a = np.ma.MaskedArray([3., 4., 6.5], mask=[True, True, True]) + b = np.array(1.) + self._test_equal(a, b) + self._test_equal(b, a) + + def test_subclass_2(self): + # While we cannot guarantee testing functions will always work for + # subclasses, the tests should ideally rely only on subclasses having + # comparison operators, not on them being able to store booleans + # (which, e.g., astropy Quantity cannot usefully do). See gh-8452. + class MyArray(np.ndarray): + def __eq__(self, other): + return super().__eq__(other).view(np.ndarray) + + def __lt__(self, other): + return super().__lt__(other).view(np.ndarray) + + def all(self, *args, **kwargs): + return all(self) + + a = np.array([1., 2.]).view(MyArray) + self._assert_func(a, a) + + z = np.array([True, True]).view(MyArray) + all(z) + b = np.array([1., 202]).view(MyArray) + expected_msg = ('Mismatched elements: 1 / 2 (50%)\n' + 'Mismatch at index:\n' + ' [1]: 2.0 (ACTUAL), 202.0 (DESIRED)\n' + 'Max absolute difference among violations: 200.\n' + 'Max relative difference among violations: 0.99009') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(a, b) + + def test_subclass_that_cannot_be_bool(self): + # While we cannot guarantee testing functions will always work for + # subclasses, the tests should ideally rely only on subclasses having + # comparison operators, not on them being able to store booleans + # (which, e.g., astropy Quantity cannot usefully do). See gh-8452. + class MyArray(np.ndarray): + def __eq__(self, other): + return super().__eq__(other).view(np.ndarray) + + def __lt__(self, other): + return super().__lt__(other).view(np.ndarray) + + def all(self, *args, **kwargs): + raise NotImplementedError + + a = np.array([1., 2.]).view(MyArray) + self._assert_func(a, a) + + +class TestAlmostEqual(_GenericTest): + + def _assert_func(self, *args, **kwargs): + assert_almost_equal(*args, **kwargs) + + def test_closeness(self): + # Note that in the course of time we ended up with + # `abs(x - y) < 1.5 * 10**(-decimal)` + # instead of the previously documented + # `abs(x - y) < 0.5 * 10**(-decimal)` + # so this check serves to preserve the wrongness. + + # test scalars + self._assert_func(1.499999, 0.0, decimal=0) + assert_raises(AssertionError, + lambda: self._assert_func(1.5, 0.0, decimal=0)) + + # test arrays + self._assert_func([1.499999], [0.0], decimal=0) + assert_raises(AssertionError, + lambda: self._assert_func([1.5], [0.0], decimal=0)) + + def test_nan_item(self): + self._assert_func(np.nan, np.nan) + assert_raises(AssertionError, + lambda: self._assert_func(np.nan, 1)) + assert_raises(AssertionError, + lambda: self._assert_func(np.nan, np.inf)) + assert_raises(AssertionError, + lambda: self._assert_func(np.inf, np.nan)) + + def test_inf_item(self): + self._assert_func(np.inf, np.inf) + self._assert_func(-np.inf, -np.inf) + assert_raises(AssertionError, + lambda: self._assert_func(np.inf, 1)) + assert_raises(AssertionError, + lambda: self._assert_func(-np.inf, np.inf)) + + def test_simple_item(self): + self._test_not_equal(1, 2) + + def test_complex_item(self): + self._assert_func(complex(1, 2), complex(1, 2)) + self._assert_func(complex(1, np.nan), complex(1, np.nan)) + self._assert_func(complex(np.inf, np.nan), complex(np.inf, np.nan)) + self._test_not_equal(complex(1, np.nan), complex(1, 2)) + self._test_not_equal(complex(np.nan, 1), complex(1, np.nan)) + self._test_not_equal(complex(np.nan, np.inf), complex(np.nan, 2)) + + def test_complex(self): + x = np.array([complex(1, 2), complex(1, np.nan)]) + z = np.array([complex(1, 2), complex(np.nan, 1)]) + y = np.array([complex(1, 2), complex(1, 2)]) + self._assert_func(x, x) + self._test_not_equal(x, y) + self._test_not_equal(x, z) + + def test_error_message(self): + """Check the message is formatted correctly for the decimal value. + Also check the message when input includes inf or nan (gh12200)""" + x = np.array([1.00000000001, 2.00000000002, 3.00003]) + y = np.array([1.00000000002, 2.00000000003, 3.00004]) + + # Test with a different amount of decimal digits + expected_msg = ('Mismatched elements: 3 / 3 (100%)\n' + 'Mismatch at indices:\n' + ' [0]: 1.00000000001 (ACTUAL), 1.00000000002 (DESIRED)\n' + ' [1]: 2.00000000002 (ACTUAL), 2.00000000003 (DESIRED)\n' + ' [2]: 3.00003 (ACTUAL), 3.00004 (DESIRED)\n' + 'Max absolute difference among violations: 1.e-05\n' + 'Max relative difference among violations: ' + '3.33328889e-06\n' + ' ACTUAL: array([1.00000000001, ' + '2.00000000002, ' + '3.00003 ])\n' + ' DESIRED: array([1.00000000002, 2.00000000003, ' + '3.00004 ])') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(x, y, decimal=12) + + # With the default value of decimal digits, only the 3rd element + # differs. Note that we only check for the formatting of the arrays + # themselves. + expected_msg = ('Mismatched elements: 1 / 3 (33.3%)\n' + 'Mismatch at index:\n' + ' [2]: 3.00003 (ACTUAL), 3.00004 (DESIRED)\n' + 'Max absolute difference among violations: 1.e-05\n' + 'Max relative difference among violations: ' + '3.33328889e-06\n' + ' ACTUAL: array([1. , 2. , 3.00003])\n' + ' DESIRED: array([1. , 2. , 3.00004])') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(x, y) + + # Check the error message when input includes inf + x = np.array([np.inf, 0]) + y = np.array([np.inf, 1]) + expected_msg = ('Mismatched elements: 1 / 2 (50%)\n' + 'Mismatch at index:\n' + ' [1]: 0.0 (ACTUAL), 1.0 (DESIRED)\n' + 'Max absolute difference among violations: 1.\n' + 'Max relative difference among violations: 1.\n' + ' ACTUAL: array([inf, 0.])\n' + ' DESIRED: array([inf, 1.])') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(x, y) + + # Check the error message when dividing by zero + x = np.array([1, 2]) + y = np.array([0, 0]) + expected_msg = ('Mismatched elements: 2 / 2 (100%)\n' + 'Mismatch at indices:\n' + ' [0]: 1 (ACTUAL), 0 (DESIRED)\n' + ' [1]: 2 (ACTUAL), 0 (DESIRED)\n' + 'Max absolute difference among violations: 2\n' + 'Max relative difference among violations: inf') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(x, y) + + def test_error_message_2(self): + """Check the message is formatted correctly """ + """when either x or y is a scalar.""" + x = 2 + y = np.ones(20) + expected_msg = ('Mismatched elements: 20 / 20 (100%)\n' + 'First 5 mismatches are at indices:\n' + ' [0]: 2 (ACTUAL), 1.0 (DESIRED)\n' + ' [1]: 2 (ACTUAL), 1.0 (DESIRED)\n' + ' [2]: 2 (ACTUAL), 1.0 (DESIRED)\n' + ' [3]: 2 (ACTUAL), 1.0 (DESIRED)\n' + ' [4]: 2 (ACTUAL), 1.0 (DESIRED)\n' + 'Max absolute difference among violations: 1.\n' + 'Max relative difference among violations: 1.') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(x, y) + + y = 2 + x = np.ones(20) + expected_msg = ('Mismatched elements: 20 / 20 (100%)\n' + 'First 5 mismatches are at indices:\n' + ' [0]: 1.0 (ACTUAL), 2 (DESIRED)\n' + ' [1]: 1.0 (ACTUAL), 2 (DESIRED)\n' + ' [2]: 1.0 (ACTUAL), 2 (DESIRED)\n' + ' [3]: 1.0 (ACTUAL), 2 (DESIRED)\n' + ' [4]: 1.0 (ACTUAL), 2 (DESIRED)\n' + 'Max absolute difference among violations: 1.\n' + 'Max relative difference among violations: 0.5') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(x, y) + + def test_subclass_that_cannot_be_bool(self): + # While we cannot guarantee testing functions will always work for + # subclasses, the tests should ideally rely only on subclasses having + # comparison operators, not on them being able to store booleans + # (which, e.g., astropy Quantity cannot usefully do). See gh-8452. + class MyArray(np.ndarray): + def __eq__(self, other): + return super().__eq__(other).view(np.ndarray) + + def __lt__(self, other): + return super().__lt__(other).view(np.ndarray) + + def all(self, *args, **kwargs): + raise NotImplementedError + + a = np.array([1., 2.]).view(MyArray) + self._assert_func(a, a) + + +class TestApproxEqual: + + def _assert_func(self, *args, **kwargs): + assert_approx_equal(*args, **kwargs) + + def test_simple_0d_arrays(self): + x = np.array(1234.22) + y = np.array(1234.23) + + self._assert_func(x, y, significant=5) + self._assert_func(x, y, significant=6) + assert_raises(AssertionError, + lambda: self._assert_func(x, y, significant=7)) + + def test_simple_items(self): + x = 1234.22 + y = 1234.23 + + self._assert_func(x, y, significant=4) + self._assert_func(x, y, significant=5) + self._assert_func(x, y, significant=6) + assert_raises(AssertionError, + lambda: self._assert_func(x, y, significant=7)) + + def test_nan_array(self): + anan = np.array(np.nan) + aone = np.array(1) + ainf = np.array(np.inf) + self._assert_func(anan, anan) + assert_raises(AssertionError, lambda: self._assert_func(anan, aone)) + assert_raises(AssertionError, lambda: self._assert_func(anan, ainf)) + assert_raises(AssertionError, lambda: self._assert_func(ainf, anan)) + + def test_nan_items(self): + anan = np.array(np.nan) + aone = np.array(1) + ainf = np.array(np.inf) + self._assert_func(anan, anan) + assert_raises(AssertionError, lambda: self._assert_func(anan, aone)) + assert_raises(AssertionError, lambda: self._assert_func(anan, ainf)) + assert_raises(AssertionError, lambda: self._assert_func(ainf, anan)) + + +class TestArrayAssertLess: + + def _assert_func(self, *args, **kwargs): + assert_array_less(*args, **kwargs) + + def test_simple_arrays(self): + x = np.array([1.1, 2.2]) + y = np.array([1.2, 2.3]) + + self._assert_func(x, y) + assert_raises(AssertionError, lambda: self._assert_func(y, x)) + + y = np.array([1.0, 2.3]) + + assert_raises(AssertionError, lambda: self._assert_func(x, y)) + assert_raises(AssertionError, lambda: self._assert_func(y, x)) + + a = np.array([1, 3, 6, 20]) + b = np.array([2, 4, 6, 8]) + + expected_msg = ('Mismatched elements: 2 / 4 (50%)\n' + 'Mismatch at indices:\n' + ' [2]: 6 (x), 6 (y)\n' + ' [3]: 20 (x), 8 (y)\n' + 'Max absolute difference among violations: 12\n' + 'Max relative difference among violations: 1.5') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(a, b) + + def test_rank2(self): + x = np.array([[1.1, 2.2], [3.3, 4.4]]) + y = np.array([[1.2, 2.3], [3.4, 4.5]]) + + self._assert_func(x, y) + expected_msg = ('Mismatched elements: 4 / 4 (100%)\n' + 'Mismatch at indices:\n' + ' [0, 0]: 1.2 (x), 1.1 (y)\n' + ' [0, 1]: 2.3 (x), 2.2 (y)\n' + ' [1, 0]: 3.4 (x), 3.3 (y)\n' + ' [1, 1]: 4.5 (x), 4.4 (y)\n' + 'Max absolute difference among violations: 0.1\n' + 'Max relative difference among violations: 0.09090909') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(y, x) + + y = np.array([[1.0, 2.3], [3.4, 4.5]]) + assert_raises(AssertionError, lambda: self._assert_func(x, y)) + assert_raises(AssertionError, lambda: self._assert_func(y, x)) + + def test_rank3(self): + x = np.ones(shape=(2, 2, 2)) + y = np.ones(shape=(2, 2, 2)) + 1 + + self._assert_func(x, y) + assert_raises(AssertionError, lambda: self._assert_func(y, x)) + + y[0, 0, 0] = 0 + expected_msg = ('Mismatched elements: 1 / 8 (12.5%)\n' + 'Mismatch at index:\n' + ' [0, 0, 0]: 1.0 (x), 0.0 (y)\n' + 'Max absolute difference among violations: 1.\n' + 'Max relative difference among violations: inf') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(x, y) + + assert_raises(AssertionError, lambda: self._assert_func(y, x)) + + def test_simple_items(self): + x = 1.1 + y = 2.2 + + self._assert_func(x, y) + expected_msg = ('Mismatched elements: 1 / 1 (100%)\n' + 'Max absolute difference among violations: 1.1\n' + 'Max relative difference among violations: 1.') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(y, x) + + y = np.array([2.2, 3.3]) + + self._assert_func(x, y) + assert_raises(AssertionError, lambda: self._assert_func(y, x)) + + y = np.array([1.0, 3.3]) + + assert_raises(AssertionError, lambda: self._assert_func(x, y)) + + def test_simple_items_and_array(self): + x = np.array([[621.345454, 390.5436, 43.54657, 626.4535], + [54.54, 627.3399, 13., 405.5435], + [543.545, 8.34, 91.543, 333.3]]) + y = 627.34 + self._assert_func(x, y) + + y = 8.339999 + self._assert_func(y, x) + + x = np.array([[3.4536, 2390.5436, 435.54657, 324525.4535], + [5449.54, 999090.54, 130303.54, 405.5435], + [543.545, 8.34, 91.543, 999090.53999]]) + y = 999090.54 + + expected_msg = ('Mismatched elements: 1 / 12 (8.33%)\n' + 'Mismatch at index:\n' + ' [1, 1]: 999090.54 (x), 999090.54 (y)\n' + 'Max absolute difference among violations: 0.\n' + 'Max relative difference among violations: 0.') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(x, y) + + expected_msg = ('Mismatched elements: 12 / 12 (100%)\n' + 'First 5 mismatches are at indices:\n' + ' [0, 0]: 999090.54 (x), 3.4536 (y)\n' + ' [0, 1]: 999090.54 (x), 2390.5436 (y)\n' + ' [0, 2]: 999090.54 (x), 435.54657 (y)\n' + ' [0, 3]: 999090.54 (x), 324525.4535 (y)\n' + ' [1, 0]: 999090.54 (x), 5449.54 (y)\n' + 'Max absolute difference among violations: ' + '999087.0864\n' + 'Max relative difference among violations: ' + '289288.5934676') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(y, x) + + def test_zeroes(self): + x = np.array([546456., 0, 15.455]) + y = np.array(87654.) + + expected_msg = ('Mismatched elements: 1 / 3 (33.3%)\n' + 'Mismatch at index:\n' + ' [0]: 546456.0 (x), 87654.0 (y)\n' + 'Max absolute difference among violations: 458802.\n' + 'Max relative difference among violations: 5.23423917') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(x, y) + + expected_msg = ('Mismatched elements: 2 / 3 (66.7%)\n' + 'Mismatch at indices:\n' + ' [1]: 87654.0 (x), 0.0 (y)\n' + ' [2]: 87654.0 (x), 15.455 (y)\n' + 'Max absolute difference among violations: 87654.\n' + 'Max relative difference among violations: ' + '5670.5626011') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(y, x) + + y = 0 + + expected_msg = ('Mismatched elements: 3 / 3 (100%)\n' + 'Mismatch at indices:\n' + ' [0]: 546456.0 (x), 0 (y)\n' + ' [1]: 0.0 (x), 0 (y)\n' + ' [2]: 15.455 (x), 0 (y)\n' + 'Max absolute difference among violations: 546456.\n' + 'Max relative difference among violations: inf') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(x, y) + + expected_msg = ('Mismatched elements: 1 / 3 (33.3%)\n' + 'Mismatch at index:\n' + ' [1]: 0 (x), 0.0 (y)\n' + 'Max absolute difference among violations: 0.\n' + 'Max relative difference among violations: inf') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(y, x) + + def test_nan_noncompare(self): + anan = np.array(np.nan) + aone = np.array(1) + ainf = np.array(np.inf) + self._assert_func(anan, anan) + assert_raises(AssertionError, lambda: self._assert_func(aone, anan)) + assert_raises(AssertionError, lambda: self._assert_func(anan, aone)) + assert_raises(AssertionError, lambda: self._assert_func(anan, ainf)) + assert_raises(AssertionError, lambda: self._assert_func(ainf, anan)) + + def test_nan_noncompare_array(self): + x = np.array([1.1, 2.2, 3.3]) + anan = np.array(np.nan) + + assert_raises(AssertionError, lambda: self._assert_func(x, anan)) + assert_raises(AssertionError, lambda: self._assert_func(anan, x)) + + x = np.array([1.1, 2.2, np.nan]) + + assert_raises(AssertionError, lambda: self._assert_func(x, anan)) + assert_raises(AssertionError, lambda: self._assert_func(anan, x)) + + y = np.array([1.0, 2.0, np.nan]) + + self._assert_func(y, x) + assert_raises(AssertionError, lambda: self._assert_func(x, y)) + + def test_inf_compare(self): + aone = np.array(1) + ainf = np.array(np.inf) + + self._assert_func(aone, ainf) + self._assert_func(-ainf, aone) + self._assert_func(-ainf, ainf) + assert_raises(AssertionError, lambda: self._assert_func(ainf, aone)) + assert_raises(AssertionError, lambda: self._assert_func(aone, -ainf)) + assert_raises(AssertionError, lambda: self._assert_func(ainf, ainf)) + assert_raises(AssertionError, lambda: self._assert_func(ainf, -ainf)) + assert_raises(AssertionError, lambda: self._assert_func(-ainf, -ainf)) + + def test_inf_compare_array(self): + x = np.array([1.1, 2.2, np.inf]) + ainf = np.array(np.inf) + + assert_raises(AssertionError, lambda: self._assert_func(x, ainf)) + assert_raises(AssertionError, lambda: self._assert_func(ainf, x)) + assert_raises(AssertionError, lambda: self._assert_func(x, -ainf)) + assert_raises(AssertionError, lambda: self._assert_func(-x, -ainf)) + assert_raises(AssertionError, lambda: self._assert_func(-ainf, -x)) + self._assert_func(-ainf, x) + + def test_strict(self): + """Test the behavior of the `strict` option.""" + x = np.zeros(3) + y = np.ones(()) + self._assert_func(x, y) + with pytest.raises(AssertionError): + self._assert_func(x, y, strict=True) + y = np.broadcast_to(y, x.shape) + self._assert_func(x, y) + with pytest.raises(AssertionError): + self._assert_func(x, y.astype(np.float32), strict=True) + +@pytest.mark.filterwarnings( + "ignore:.*NumPy warning suppression and assertion utilities are deprecated" + ".*:DeprecationWarning") +@pytest.mark.thread_unsafe(reason="checks global module & deprecated warnings") +class TestWarns: + + def test_warn(self): + def f(): + warnings.warn("yo") + return 3 + + before_filters = sys.modules['warnings'].filters[:] + assert_equal(assert_warns(UserWarning, f), 3) + after_filters = sys.modules['warnings'].filters + + assert_raises(AssertionError, assert_no_warnings, f) + assert_equal(assert_no_warnings(lambda x: x, 1), 1) + + # Check that the warnings state is unchanged + assert_equal(before_filters, after_filters, + "assert_warns does not preserver warnings state") + + def test_context_manager(self): + + before_filters = sys.modules['warnings'].filters[:] + with assert_warns(UserWarning): + warnings.warn("yo") + after_filters = sys.modules['warnings'].filters + + def no_warnings(): + with assert_no_warnings(): + warnings.warn("yo") + + assert_raises(AssertionError, no_warnings) + assert_equal(before_filters, after_filters, + "assert_warns does not preserver warnings state") + + def test_args(self): + def f(a=0, b=1): + warnings.warn("yo") + return a + b + + assert assert_warns(UserWarning, f, b=20) == 20 + + with pytest.raises(RuntimeError) as exc: + # assert_warns cannot do regexp matching, use pytest.warns + with assert_warns(UserWarning, match="A"): + warnings.warn("B", UserWarning) + assert "assert_warns" in str(exc) + assert "pytest.warns" in str(exc) + + with pytest.raises(RuntimeError) as exc: + # assert_warns cannot do regexp matching, use pytest.warns + with assert_warns(UserWarning, wrong="A"): + warnings.warn("B", UserWarning) + assert "assert_warns" in str(exc) + assert "pytest.warns" not in str(exc) + + def test_warn_wrong_warning(self): + def f(): + warnings.warn("yo", DeprecationWarning) + + failed = False + with warnings.catch_warnings(): + warnings.simplefilter("error", DeprecationWarning) + try: + # Should raise a DeprecationWarning + assert_warns(UserWarning, f) + failed = True + except DeprecationWarning: + pass + + if failed: + raise AssertionError("wrong warning caught by assert_warn") + + +class TestAssertAllclose: + + def test_simple(self): + x = 1e-3 + y = 1e-9 + + assert_allclose(x, y, atol=1) + assert_raises(AssertionError, assert_allclose, x, y) + + expected_msg = ('Mismatched elements: 1 / 1 (100%)\n' + 'Max absolute difference among violations: 0.001\n' + 'Max relative difference among violations: 999999.') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + assert_allclose(x, y) + + z = 0 + expected_msg = ('Mismatched elements: 1 / 1 (100%)\n' + 'Max absolute difference among violations: 1.e-09\n' + 'Max relative difference among violations: inf') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + assert_allclose(y, z) + + expected_msg = ('Mismatched elements: 1 / 1 (100%)\n' + 'Max absolute difference among violations: 1.e-09\n' + 'Max relative difference among violations: 1.') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + assert_allclose(z, y) + + a = np.array([x, y, x, y]) + b = np.array([x, y, x, x]) + + assert_allclose(a, b, atol=1) + assert_raises(AssertionError, assert_allclose, a, b) + + b[-1] = y * (1 + 1e-8) + assert_allclose(a, b) + assert_raises(AssertionError, assert_allclose, a, b, rtol=1e-9) + + assert_allclose(6, 10, rtol=0.5) + assert_raises(AssertionError, assert_allclose, 10, 6, rtol=0.5) + + b = np.array([x, y, x, x]) + c = np.array([x, y, x, z]) + expected_msg = ('Mismatched elements: 1 / 4 (25%)\n' + 'Mismatch at index:\n' + ' [3]: 0.001 (ACTUAL), 0.0 (DESIRED)\n' + 'Max absolute difference among violations: 0.001\n' + 'Max relative difference among violations: inf') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + assert_allclose(b, c) + + expected_msg = ('Mismatched elements: 1 / 4 (25%)\n' + 'Mismatch at index:\n' + ' [3]: 0.0 (ACTUAL), 0.001 (DESIRED)\n' + 'Max absolute difference among violations: 0.001\n' + 'Max relative difference among violations: 1.') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + assert_allclose(c, b) + + def test_min_int(self): + a = np.array([np.iinfo(np.int_).min], dtype=np.int_) + # Should not raise: + assert_allclose(a, a) + + def test_report_fail_percentage(self): + a = np.array([1, 1, 1, 1]) + b = np.array([1, 1, 1, 2]) + + expected_msg = ('Mismatched elements: 1 / 4 (25%)\n' + 'Mismatch at index:\n' + ' [3]: 1 (ACTUAL), 2 (DESIRED)\n' + 'Max absolute difference among violations: 1\n' + 'Max relative difference among violations: 0.5') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + assert_allclose(a, b) + + def test_equal_nan(self): + a = np.array([np.nan]) + b = np.array([np.nan]) + # Should not raise: + assert_allclose(a, b, equal_nan=True) + + a = np.array([complex(np.nan, np.inf)]) + b = np.array([complex(np.nan, np.inf)]) + assert_allclose(a, b, equal_nan=True) + b = np.array([complex(np.nan, -np.inf)]) + assert_allclose(a, b, equal_nan=True) + + def test_not_equal_nan(self): + a = np.array([np.nan]) + b = np.array([np.nan]) + assert_raises(AssertionError, assert_allclose, a, b, equal_nan=False) + + a = np.array([complex(np.nan, np.inf)]) + b = np.array([complex(np.nan, np.inf)]) + assert_raises(AssertionError, assert_allclose, a, b, equal_nan=False) + + def test_equal_nan_default(self): + # Make sure equal_nan default behavior remains unchanged. (All + # of these functions use assert_array_compare under the hood.) + # None of these should raise. + a = np.array([np.nan]) + b = np.array([np.nan]) + assert_array_equal(a, b) + assert_array_almost_equal(a, b) + assert_array_less(a, b) + assert_allclose(a, b) + + def test_report_max_relative_error(self): + a = np.array([0, 1]) + b = np.array([0, 2]) + + expected_msg = 'Max relative difference among violations: 0.5' + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + assert_allclose(a, b) + + def test_timedelta(self): + # see gh-18286 + a = np.array([[1, 2, 3, "NaT"]], dtype="m8[ns]") + assert_allclose(a, a) + + def test_error_message_unsigned(self): + """Check the message is formatted correctly when overflow can occur + (gh21768)""" + # Ensure to test for potential overflow in the case of: + # x - y + # and + # y - x + x = np.asarray([0, 1, 8], dtype='uint8') + y = np.asarray([4, 4, 4], dtype='uint8') + expected_msg = 'Max absolute difference among violations: 4' + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + assert_allclose(x, y, atol=3) + + def test_strict(self): + """Test the behavior of the `strict` option.""" + x = np.ones(3) + y = np.ones(()) + assert_allclose(x, y) + with pytest.raises(AssertionError): + assert_allclose(x, y, strict=True) + assert_allclose(x, x) + with pytest.raises(AssertionError): + assert_allclose(x, x.astype(np.float32), strict=True) + + def test_infs(self): + a = np.array([np.inf]) + b = np.array([np.inf]) + assert_allclose(a, b) + + b = np.array([3.]) + expected_msg = 'inf location mismatch:' + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + assert_allclose(a, b) + + b = np.array([-np.inf]) + expected_msg = 'inf values mismatch:' + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + assert_allclose(a, b) + b = np.array([complex(np.inf, 1.)]) + expected_msg = 'inf values mismatch:' + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + assert_allclose(a, b) + + a = np.array([complex(np.inf, 1.)]) + b = np.array([complex(np.inf, 1.)]) + assert_allclose(a, b) + + b = np.array([complex(np.inf, 2.)]) + expected_msg = 'inf values mismatch:' + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + assert_allclose(a, b) + +class TestArrayAlmostEqualNulp: + + def test_float64_pass(self): + # The number of units of least precision + # In this case, use a few places above the lowest level (ie nulp=1) + nulp = 5 + x = np.linspace(-20, 20, 50, dtype=np.float64) + x = 10**x + x = np.r_[-x, x] + + # Addition + eps = np.finfo(x.dtype).eps + y = x + x * eps * nulp / 2. + assert_array_almost_equal_nulp(x, y, nulp) + + # Subtraction + epsneg = np.finfo(x.dtype).epsneg + y = x - x * epsneg * nulp / 2. + assert_array_almost_equal_nulp(x, y, nulp) + + def test_float64_fail(self): + nulp = 5 + x = np.linspace(-20, 20, 50, dtype=np.float64) + x = 10**x + x = np.r_[-x, x] + + eps = np.finfo(x.dtype).eps + y = x + x * eps * nulp * 2. + assert_raises(AssertionError, assert_array_almost_equal_nulp, + x, y, nulp) + + epsneg = np.finfo(x.dtype).epsneg + y = x - x * epsneg * nulp * 2. + assert_raises(AssertionError, assert_array_almost_equal_nulp, + x, y, nulp) + + def test_float64_ignore_nan(self): + # Ignore ULP differences between various NAN's + # Note that MIPS may reverse quiet and signaling nans + # so we use the builtin version as a base. + offset = np.uint64(0xffffffff) + nan1_i64 = np.array(np.nan, dtype=np.float64).view(np.uint64) + nan2_i64 = nan1_i64 ^ offset # nan payload on MIPS is all ones. + nan1_f64 = nan1_i64.view(np.float64) + nan2_f64 = nan2_i64.view(np.float64) + assert_array_max_ulp(nan1_f64, nan2_f64, 0) + + def test_float32_pass(self): + nulp = 5 + x = np.linspace(-20, 20, 50, dtype=np.float32) + x = 10**x + x = np.r_[-x, x] + + eps = np.finfo(x.dtype).eps + y = x + x * eps * nulp / 2. + assert_array_almost_equal_nulp(x, y, nulp) + + epsneg = np.finfo(x.dtype).epsneg + y = x - x * epsneg * nulp / 2. + assert_array_almost_equal_nulp(x, y, nulp) + + def test_float32_fail(self): + nulp = 5 + x = np.linspace(-20, 20, 50, dtype=np.float32) + x = 10**x + x = np.r_[-x, x] + + eps = np.finfo(x.dtype).eps + y = x + x * eps * nulp * 2. + assert_raises(AssertionError, assert_array_almost_equal_nulp, + x, y, nulp) + + epsneg = np.finfo(x.dtype).epsneg + y = x - x * epsneg * nulp * 2. + assert_raises(AssertionError, assert_array_almost_equal_nulp, + x, y, nulp) + + def test_float32_ignore_nan(self): + # Ignore ULP differences between various NAN's + # Note that MIPS may reverse quiet and signaling nans + # so we use the builtin version as a base. + offset = np.uint32(0xffff) + nan1_i32 = np.array(np.nan, dtype=np.float32).view(np.uint32) + nan2_i32 = nan1_i32 ^ offset # nan payload on MIPS is all ones. + nan1_f32 = nan1_i32.view(np.float32) + nan2_f32 = nan2_i32.view(np.float32) + assert_array_max_ulp(nan1_f32, nan2_f32, 0) + + def test_float16_pass(self): + nulp = 5 + x = np.linspace(-4, 4, 10, dtype=np.float16) + x = 10**x + x = np.r_[-x, x] + + eps = np.finfo(x.dtype).eps + y = x + x * eps * nulp / 2. + assert_array_almost_equal_nulp(x, y, nulp) + + epsneg = np.finfo(x.dtype).epsneg + y = x - x * epsneg * nulp / 2. + assert_array_almost_equal_nulp(x, y, nulp) + + def test_float16_fail(self): + nulp = 5 + x = np.linspace(-4, 4, 10, dtype=np.float16) + x = 10**x + x = np.r_[-x, x] + + eps = np.finfo(x.dtype).eps + y = x + x * eps * nulp * 2. + assert_raises(AssertionError, assert_array_almost_equal_nulp, + x, y, nulp) + + epsneg = np.finfo(x.dtype).epsneg + y = x - x * epsneg * nulp * 2. + assert_raises(AssertionError, assert_array_almost_equal_nulp, + x, y, nulp) + + def test_float16_ignore_nan(self): + # Ignore ULP differences between various NAN's + # Note that MIPS may reverse quiet and signaling nans + # so we use the builtin version as a base. + offset = np.uint16(0xff) + nan1_i16 = np.array(np.nan, dtype=np.float16).view(np.uint16) + nan2_i16 = nan1_i16 ^ offset # nan payload on MIPS is all ones. + nan1_f16 = nan1_i16.view(np.float16) + nan2_f16 = nan2_i16.view(np.float16) + assert_array_max_ulp(nan1_f16, nan2_f16, 0) + + def test_complex128_pass(self): + nulp = 5 + x = np.linspace(-20, 20, 50, dtype=np.float64) + x = 10**x + x = np.r_[-x, x] + xi = x + x * 1j + + eps = np.finfo(x.dtype).eps + y = x + x * eps * nulp / 2. + assert_array_almost_equal_nulp(xi, x + y * 1j, nulp) + assert_array_almost_equal_nulp(xi, y + x * 1j, nulp) + # The test condition needs to be at least a factor of sqrt(2) smaller + # because the real and imaginary parts both change + y = x + x * eps * nulp / 4. + assert_array_almost_equal_nulp(xi, y + y * 1j, nulp) + + epsneg = np.finfo(x.dtype).epsneg + y = x - x * epsneg * nulp / 2. + assert_array_almost_equal_nulp(xi, x + y * 1j, nulp) + assert_array_almost_equal_nulp(xi, y + x * 1j, nulp) + y = x - x * epsneg * nulp / 4. + assert_array_almost_equal_nulp(xi, y + y * 1j, nulp) + + def test_complex128_fail(self): + nulp = 5 + x = np.linspace(-20, 20, 50, dtype=np.float64) + x = 10**x + x = np.r_[-x, x] + xi = x + x * 1j + + eps = np.finfo(x.dtype).eps + y = x + x * eps * nulp * 2. + assert_raises(AssertionError, assert_array_almost_equal_nulp, + xi, x + y * 1j, nulp) + assert_raises(AssertionError, assert_array_almost_equal_nulp, + xi, y + x * 1j, nulp) + # The test condition needs to be at least a factor of sqrt(2) smaller + # because the real and imaginary parts both change + y = x + x * eps * nulp + assert_raises(AssertionError, assert_array_almost_equal_nulp, + xi, y + y * 1j, nulp) + + epsneg = np.finfo(x.dtype).epsneg + y = x - x * epsneg * nulp * 2. + assert_raises(AssertionError, assert_array_almost_equal_nulp, + xi, x + y * 1j, nulp) + assert_raises(AssertionError, assert_array_almost_equal_nulp, + xi, y + x * 1j, nulp) + y = x - x * epsneg * nulp + assert_raises(AssertionError, assert_array_almost_equal_nulp, + xi, y + y * 1j, nulp) + + def test_complex64_pass(self): + nulp = 5 + x = np.linspace(-20, 20, 50, dtype=np.float32) + x = 10**x + x = np.r_[-x, x] + xi = x + x * 1j + + eps = np.finfo(x.dtype).eps + y = x + x * eps * nulp / 2. + assert_array_almost_equal_nulp(xi, x + y * 1j, nulp) + assert_array_almost_equal_nulp(xi, y + x * 1j, nulp) + y = x + x * eps * nulp / 4. + assert_array_almost_equal_nulp(xi, y + y * 1j, nulp) + + epsneg = np.finfo(x.dtype).epsneg + y = x - x * epsneg * nulp / 2. + assert_array_almost_equal_nulp(xi, x + y * 1j, nulp) + assert_array_almost_equal_nulp(xi, y + x * 1j, nulp) + y = x - x * epsneg * nulp / 4. + assert_array_almost_equal_nulp(xi, y + y * 1j, nulp) + + def test_complex64_fail(self): + nulp = 5 + x = np.linspace(-20, 20, 50, dtype=np.float32) + x = 10**x + x = np.r_[-x, x] + xi = x + x * 1j + + eps = np.finfo(x.dtype).eps + y = x + x * eps * nulp * 2. + assert_raises(AssertionError, assert_array_almost_equal_nulp, + xi, x + y * 1j, nulp) + assert_raises(AssertionError, assert_array_almost_equal_nulp, + xi, y + x * 1j, nulp) + y = x + x * eps * nulp + assert_raises(AssertionError, assert_array_almost_equal_nulp, + xi, y + y * 1j, nulp) + + epsneg = np.finfo(x.dtype).epsneg + y = x - x * epsneg * nulp * 2. + assert_raises(AssertionError, assert_array_almost_equal_nulp, + xi, x + y * 1j, nulp) + assert_raises(AssertionError, assert_array_almost_equal_nulp, + xi, y + x * 1j, nulp) + y = x - x * epsneg * nulp + assert_raises(AssertionError, assert_array_almost_equal_nulp, + xi, y + y * 1j, nulp) + + +class TestULP: + + def test_equal(self): + x = np.random.randn(10) + assert_array_max_ulp(x, x, maxulp=0) + + def test_single(self): + # Generate 1 + small deviation, check that adding eps gives a few UNL + x = np.ones(10).astype(np.float32) + x += 0.01 * np.random.randn(10).astype(np.float32) + eps = np.finfo(np.float32).eps + assert_array_max_ulp(x, x + eps, maxulp=20) + + def test_double(self): + # Generate 1 + small deviation, check that adding eps gives a few UNL + x = np.ones(10).astype(np.float64) + x += 0.01 * np.random.randn(10).astype(np.float64) + eps = np.finfo(np.float64).eps + assert_array_max_ulp(x, x + eps, maxulp=200) + + def test_inf(self): + for dt in [np.float32, np.float64]: + inf = np.array([np.inf]).astype(dt) + big = np.array([np.finfo(dt).max]) + assert_array_max_ulp(inf, big, maxulp=200) + + def test_nan(self): + # Test that nan is 'far' from small, tiny, inf, max and min + for dt in [np.float32, np.float64]: + if dt == np.float32: + maxulp = 1e6 + else: + maxulp = 1e12 + inf = np.array([np.inf]).astype(dt) + nan = np.array([np.nan]).astype(dt) + big = np.array([np.finfo(dt).max]) + tiny = np.array([np.finfo(dt).tiny]) + zero = np.array([0.0]).astype(dt) + nzero = np.array([-0.0]).astype(dt) + assert_raises(AssertionError, + lambda: assert_array_max_ulp(nan, inf, + maxulp=maxulp)) + assert_raises(AssertionError, + lambda: assert_array_max_ulp(nan, big, + maxulp=maxulp)) + assert_raises(AssertionError, + lambda: assert_array_max_ulp(nan, tiny, + maxulp=maxulp)) + assert_raises(AssertionError, + lambda: assert_array_max_ulp(nan, zero, + maxulp=maxulp)) + assert_raises(AssertionError, + lambda: assert_array_max_ulp(nan, nzero, + maxulp=maxulp)) + + +class TestStringEqual: + def test_simple(self): + assert_string_equal("hello", "hello") + assert_string_equal("hello\nmultiline", "hello\nmultiline") + + with pytest.raises(AssertionError) as exc_info: + assert_string_equal("foo\nbar", "hello\nbar") + msg = str(exc_info.value) + assert_equal(msg, "Differences in strings:\n- foo\n+ hello") + + assert_raises(AssertionError, + lambda: assert_string_equal("foo", "hello")) + + def test_regex(self): + assert_string_equal("a+*b", "a+*b") + + assert_raises(AssertionError, + lambda: assert_string_equal("aaa", "a+b")) + + +def assert_warn_len_equal(mod, n_in_context): + try: + mod_warns = mod.__warningregistry__ + except AttributeError: + # the lack of a __warningregistry__ + # attribute means that no warning has + # occurred; this can be triggered in + # a parallel test scenario, while in + # a serial test scenario an initial + # warning (and therefore the attribute) + # are always created first + mod_warns = {} + + num_warns = len(mod_warns) + + if 'version' in mod_warns: + # Python adds a 'version' entry to the registry, + # do not count it. + num_warns -= 1 + + assert_equal(num_warns, n_in_context) + + +def test_warn_len_equal_call_scenarios(): + # assert_warn_len_equal is called under + # varying circumstances depending on serial + # vs. parallel test scenarios; this test + # simply aims to probe both code paths and + # check that no assertion is uncaught + + # parallel scenario -- no warning issued yet + class mod: + pass + + mod_inst = mod() + + assert_warn_len_equal(mod=mod_inst, + n_in_context=0) + + # serial test scenario -- the __warningregistry__ + # attribute should be present + class mod: + def __init__(self): + self.__warningregistry__ = {'warning1': 1, + 'warning2': 2} + + mod_inst = mod() + assert_warn_len_equal(mod=mod_inst, + n_in_context=2) + + +def _get_fresh_mod(): + # Get this module, with warning registry empty + my_mod = sys.modules[__name__] + try: + my_mod.__warningregistry__.clear() + except AttributeError: + # will not have a __warningregistry__ unless warning has been + # raised in the module at some point + pass + return my_mod + + +@pytest.mark.thread_unsafe(reason="checks global module & deprecated warnings") +def test_clear_and_catch_warnings(): + # Initial state of module, no warnings + my_mod = _get_fresh_mod() + assert_equal(getattr(my_mod, '__warningregistry__', {}), {}) + with clear_and_catch_warnings(modules=[my_mod]): + warnings.simplefilter('ignore') + warnings.warn('Some warning') + assert_equal(my_mod.__warningregistry__, {}) + # Without specified modules, don't clear warnings during context. + # catch_warnings doesn't make an entry for 'ignore'. + with clear_and_catch_warnings(): + warnings.simplefilter('ignore') + warnings.warn('Some warning') + assert_warn_len_equal(my_mod, 0) + + # Manually adding two warnings to the registry: + my_mod.__warningregistry__ = {'warning1': 1, + 'warning2': 2} + + # Confirm that specifying module keeps old warning, does not add new + with clear_and_catch_warnings(modules=[my_mod]): + warnings.simplefilter('ignore') + warnings.warn('Another warning') + assert_warn_len_equal(my_mod, 2) + + # Another warning, no module spec it clears up registry + with clear_and_catch_warnings(): + warnings.simplefilter('ignore') + warnings.warn('Another warning') + assert_warn_len_equal(my_mod, 0) + + +@pytest.mark.filterwarnings( + "ignore:.*NumPy warning suppression and assertion utilities are deprecated" + ".*:DeprecationWarning") +@pytest.mark.thread_unsafe(reason="checks global module & deprecated warnings") +def test_suppress_warnings_module(): + # Initial state of module, no warnings + my_mod = _get_fresh_mod() + assert_equal(getattr(my_mod, '__warningregistry__', {}), {}) + + def warn_other_module(): + # Apply along axis is implemented in python; stacklevel=2 means + # we end up inside its module, not ours. + def warn(arr): + warnings.warn("Some warning 2", stacklevel=2) + return arr + np.apply_along_axis(warn, 0, [0]) + + # Test module based warning suppression: + assert_warn_len_equal(my_mod, 0) + with suppress_warnings() as sup: + sup.record(UserWarning) + # suppress warning from other module (may have .pyc ending), + # if apply_along_axis is moved, had to be changed. + sup.filter(module=np.lib._shape_base_impl) + warnings.warn("Some warning") + warn_other_module() + # Check that the suppression did test the file correctly (this module + # got filtered) + assert_equal(len(sup.log), 1) + assert_equal(sup.log[0].message.args[0], "Some warning") + assert_warn_len_equal(my_mod, 0) + sup = suppress_warnings() + # Will have to be changed if apply_along_axis is moved: + sup.filter(module=my_mod) + with sup: + warnings.warn('Some warning') + assert_warn_len_equal(my_mod, 0) + # And test repeat works: + sup.filter(module=my_mod) + with sup: + warnings.warn('Some warning') + assert_warn_len_equal(my_mod, 0) + + # Without specified modules + with suppress_warnings(): + warnings.simplefilter('ignore') + warnings.warn('Some warning') + assert_warn_len_equal(my_mod, 0) + + +@pytest.mark.filterwarnings( + "ignore:.*NumPy warning suppression and assertion utilities are deprecated" + ".*:DeprecationWarning") +@pytest.mark.thread_unsafe(reason="checks global module & deprecated warnings") +def test_suppress_warnings_type(): + # Initial state of module, no warnings + my_mod = _get_fresh_mod() + assert_equal(getattr(my_mod, '__warningregistry__', {}), {}) + + # Test module based warning suppression: + with suppress_warnings() as sup: + sup.filter(UserWarning) + warnings.warn('Some warning') + assert_warn_len_equal(my_mod, 0) + sup = suppress_warnings() + sup.filter(UserWarning) + with sup: + warnings.warn('Some warning') + assert_warn_len_equal(my_mod, 0) + # And test repeat works: + sup.filter(module=my_mod) + with sup: + warnings.warn('Some warning') + assert_warn_len_equal(my_mod, 0) + + # Without specified modules + with suppress_warnings(): + warnings.simplefilter('ignore') + warnings.warn('Some warning') + assert_warn_len_equal(my_mod, 0) + + +@pytest.mark.filterwarnings( + "ignore:.*NumPy warning suppression and assertion utilities are deprecated" + ".*:DeprecationWarning") +@pytest.mark.thread_unsafe( + reason="uses deprecated thread-unsafe warnings control utilities" +) +def test_suppress_warnings_decorate_no_record(): + sup = suppress_warnings() + sup.filter(UserWarning) + + @sup + def warn(category): + warnings.warn('Some warning', category) + + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + warn(UserWarning) # should be suppressed + warn(RuntimeWarning) + assert_equal(len(w), 1) + + +@pytest.mark.filterwarnings( + "ignore:.*NumPy warning suppression and assertion utilities are deprecated" + ".*:DeprecationWarning") +@pytest.mark.thread_unsafe( + reason="uses deprecated thread-unsafe warnings control utilities" +) +def test_suppress_warnings_record(): + sup = suppress_warnings() + log1 = sup.record() + + with sup: + log2 = sup.record(message='Some other warning 2') + sup.filter(message='Some warning') + warnings.warn('Some warning') + warnings.warn('Some other warning') + warnings.warn('Some other warning 2') + + assert_equal(len(sup.log), 2) + assert_equal(len(log1), 1) + assert_equal(len(log2), 1) + assert_equal(log2[0].message.args[0], 'Some other warning 2') + + # Do it again, with the same context to see if some warnings survived: + with sup: + log2 = sup.record(message='Some other warning 2') + sup.filter(message='Some warning') + warnings.warn('Some warning') + warnings.warn('Some other warning') + warnings.warn('Some other warning 2') + + assert_equal(len(sup.log), 2) + assert_equal(len(log1), 1) + assert_equal(len(log2), 1) + assert_equal(log2[0].message.args[0], 'Some other warning 2') + + # Test nested: + with suppress_warnings() as sup: + sup.record() + with suppress_warnings() as sup2: + sup2.record(message='Some warning') + warnings.warn('Some warning') + warnings.warn('Some other warning') + assert_equal(len(sup2.log), 1) + # includes a DeprecationWarning for suppress_warnings + assert_equal(len(sup.log), 2) + + +@pytest.mark.filterwarnings( + "ignore:.*NumPy warning suppression and assertion utilities are deprecated" + ".*:DeprecationWarning") +@pytest.mark.thread_unsafe( + reason="uses deprecated thread-unsafe warnings control utilities" +) +def test_suppress_warnings_forwarding(): + def warn_other_module(): + # Apply along axis is implemented in python; stacklevel=2 means + # we end up inside its module, not ours. + def warn(arr): + warnings.warn("Some warning", stacklevel=2) + return arr + np.apply_along_axis(warn, 0, [0]) + + with suppress_warnings() as sup: + sup.record() + with suppress_warnings("always"): + for i in range(2): + warnings.warn("Some warning") + + # includes a DeprecationWarning for suppress_warnings + assert_equal(len(sup.log), 3) + + with suppress_warnings() as sup: + sup.record() + with suppress_warnings("location"): + for i in range(2): + warnings.warn("Some warning") + warnings.warn("Some warning") + + # includes a DeprecationWarning for suppress_warnings + assert_equal(len(sup.log), 3) + + with suppress_warnings() as sup: + sup.record() + with suppress_warnings("module"): + for i in range(2): + warnings.warn("Some warning") + warnings.warn("Some warning") + warn_other_module() + + # includes a DeprecationWarning for suppress_warnings + assert_equal(len(sup.log), 3) + + with suppress_warnings() as sup: + sup.record() + with suppress_warnings("once"): + for i in range(2): + warnings.warn("Some warning") + warnings.warn("Some other warning") + warn_other_module() + + # includes a DeprecationWarning for suppress_warnings + assert_equal(len(sup.log), 3) + + +def test_tempdir(): + with tempdir() as tdir: + fpath = os.path.join(tdir, 'tmp') + with open(fpath, 'w'): + pass + assert_(not os.path.isdir(tdir)) + + raised = False + try: + with tempdir() as tdir: + raise ValueError + except ValueError: + raised = True + assert_(raised) + assert_(not os.path.isdir(tdir)) + + +def test_temppath(): + with temppath() as fpath: + with open(fpath, 'w'): + pass + assert_(not os.path.isfile(fpath)) + + raised = False + try: + with temppath() as fpath: + raise ValueError + except ValueError: + raised = True + assert_(raised) + assert_(not os.path.isfile(fpath)) + + +class my_cacw(clear_and_catch_warnings): + + class_modules = (sys.modules[__name__],) + + +@pytest.mark.thread_unsafe(reason="checks global module & deprecated warnings") +def test_clear_and_catch_warnings_inherit(): + # Test can subclass and add default modules + my_mod = _get_fresh_mod() + with my_cacw(): + warnings.simplefilter('ignore') + warnings.warn('Some warning') + assert_equal(my_mod.__warningregistry__, {}) + + +@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") +@pytest.mark.thread_unsafe(reason="garbage collector is global state") +class TestAssertNoGcCycles: + """ Test assert_no_gc_cycles """ + + def test_passes(self): + def no_cycle(): + b = [] + b.append([]) + return b + + with assert_no_gc_cycles(): + no_cycle() + + assert_no_gc_cycles(no_cycle) + + def test_asserts(self): + def make_cycle(): + a = [] + a.append(a) + a.append(a) + return a + + with assert_raises(AssertionError): + with assert_no_gc_cycles(): + make_cycle() + + with assert_raises(AssertionError): + assert_no_gc_cycles(make_cycle) + + @pytest.mark.slow + def test_fails(self): + """ + Test that in cases where the garbage cannot be collected, we raise an + error, instead of hanging forever trying to clear it. + """ + + class ReferenceCycleInDel: + """ + An object that not only contains a reference cycle, but creates new + cycles whenever it's garbage-collected and its __del__ runs + """ + make_cycle = True + + def __init__(self): + self.cycle = self + + def __del__(self): + # break the current cycle so that `self` can be freed + self.cycle = None + + if ReferenceCycleInDel.make_cycle: + # but create a new one so that the garbage collector (GC) has more + # work to do. + ReferenceCycleInDel() + + try: + w = weakref.ref(ReferenceCycleInDel()) + try: + with assert_raises(RuntimeError): + # this will be unable to get a baseline empty garbage + assert_no_gc_cycles(lambda: None) + except AssertionError: + # the above test is only necessary if the GC actually tried to free + # our object anyway. + if w() is not None: + pytest.skip("GC does not call __del__ on cyclic objects") + raise + + finally: + # make sure that we stop creating reference cycles + ReferenceCycleInDel.make_cycle = False diff --git a/local_packages/numpy/tests/__init__.py b/local_packages/numpy/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/local_packages/numpy/tests/test__all__.py b/local_packages/numpy/tests/test__all__.py new file mode 100644 index 0000000..2dc8166 --- /dev/null +++ b/local_packages/numpy/tests/test__all__.py @@ -0,0 +1,10 @@ + +import collections + +import numpy as np + + +def test_no_duplicates_in_np__all__(): + # Regression test for gh-10198. + dups = {k: v for k, v in collections.Counter(np.__all__).items() if v > 1} + assert len(dups) == 0 diff --git a/local_packages/numpy/tests/test_configtool.py b/local_packages/numpy/tests/test_configtool.py new file mode 100644 index 0000000..917bbf5 --- /dev/null +++ b/local_packages/numpy/tests/test_configtool.py @@ -0,0 +1,51 @@ +import importlib.metadata +import os +import pathlib +import subprocess + +import pytest + +import numpy as np +import numpy._core.include +import numpy._core.lib.pkgconfig +from numpy.testing import IS_EDITABLE, IS_INSTALLED, IS_WASM, NUMPY_ROOT + +INCLUDE_DIR = NUMPY_ROOT / '_core' / 'include' +PKG_CONFIG_DIR = NUMPY_ROOT / '_core' / 'lib' / 'pkgconfig' + + +@pytest.mark.skipif(not IS_INSTALLED, + reason="`numpy-config` not expected to be installed") +@pytest.mark.skipif(IS_WASM, + reason="wasm interpreter cannot start subprocess") +class TestNumpyConfig: + def check_numpyconfig(self, arg): + p = subprocess.run(['numpy-config', arg], capture_output=True, text=True) + p.check_returncode() + return p.stdout.strip() + + def test_configtool_version(self): + stdout = self.check_numpyconfig('--version') + assert stdout == np.__version__ + + def test_configtool_cflags(self): + stdout = self.check_numpyconfig('--cflags') + assert f'-I{os.fspath(INCLUDE_DIR)}' in stdout + + def test_configtool_pkgconfigdir(self): + stdout = self.check_numpyconfig('--pkgconfigdir') + assert pathlib.Path(stdout) == PKG_CONFIG_DIR.resolve() + + +@pytest.mark.skipif(not IS_INSTALLED, + reason="numpy must be installed to check its entrypoints") +def test_pkg_config_entrypoint(): + (entrypoint,) = importlib.metadata.entry_points(group='pkg_config', name='numpy') + assert entrypoint.value == numpy._core.lib.pkgconfig.__name__ + + +@pytest.mark.skipif(not IS_INSTALLED, + reason="numpy.pc is only available when numpy is installed") +@pytest.mark.skipif(IS_EDITABLE, reason="editable installs don't have a numpy.pc") +def test_pkg_config_config_exists(): + assert PKG_CONFIG_DIR.joinpath('numpy.pc').is_file() diff --git a/local_packages/numpy/tests/test_ctypeslib.py b/local_packages/numpy/tests/test_ctypeslib.py new file mode 100644 index 0000000..b88910c --- /dev/null +++ b/local_packages/numpy/tests/test_ctypeslib.py @@ -0,0 +1,383 @@ +import sys +import sysconfig +import weakref +from pathlib import Path + +import pytest + +import numpy as np +from numpy.ctypeslib import as_array, load_library, ndpointer +from numpy.testing import assert_, assert_array_equal, assert_equal, assert_raises + +try: + import ctypes +except ImportError: + ctypes = None +else: + cdll = None + test_cdll = None + if hasattr(sys, 'gettotalrefcount'): + try: + cdll = load_library( + '_multiarray_umath_d', np._core._multiarray_umath.__file__ + ) + except OSError: + pass + try: + test_cdll = load_library( + '_multiarray_tests', np._core._multiarray_tests.__file__ + ) + except OSError: + pass + if cdll is None: + cdll = load_library( + '_multiarray_umath', np._core._multiarray_umath.__file__) + if test_cdll is None: + test_cdll = load_library( + '_multiarray_tests', np._core._multiarray_tests.__file__ + ) + + c_forward_pointer = test_cdll.forward_pointer + + +@pytest.mark.skipif(ctypes is None, + reason="ctypes not available in this python") +@pytest.mark.skipif(sys.platform == 'cygwin', + reason="Known to fail on cygwin") +class TestLoadLibrary: + def test_basic(self): + loader_path = np._core._multiarray_umath.__file__ + + out1 = load_library('_multiarray_umath', loader_path) + out2 = load_library(Path('_multiarray_umath'), loader_path) + out3 = load_library('_multiarray_umath', Path(loader_path)) + out4 = load_library(b'_multiarray_umath', loader_path) + + assert isinstance(out1, ctypes.CDLL) + assert out1 is out2 is out3 is out4 + + def test_basic2(self): + # Regression for #801: load_library with a full library name + # (including extension) does not work. + try: + so_ext = sysconfig.get_config_var('EXT_SUFFIX') + load_library(f'_multiarray_umath{so_ext}', + np._core._multiarray_umath.__file__) + except ImportError as e: + msg = ("ctypes is not available on this python: skipping the test" + " (import error was: %s)" % str(e)) + print(msg) + + +class TestNdpointer: + def test_dtype(self): + dt = np.intc + p = ndpointer(dtype=dt) + assert_(p.from_param(np.array([1], dt))) + dt = 'i4') + p = ndpointer(dtype=dt) + p.from_param(np.array([1], dt)) + assert_raises(TypeError, p.from_param, + np.array([1], dt.newbyteorder('swap'))) + dtnames = ['x', 'y'] + dtformats = [np.intc, np.float64] + dtdescr = {'names': dtnames, 'formats': dtformats} + dt = np.dtype(dtdescr) + p = ndpointer(dtype=dt) + assert_(p.from_param(np.zeros((10,), dt))) + samedt = np.dtype(dtdescr) + p = ndpointer(dtype=samedt) + assert_(p.from_param(np.zeros((10,), dt))) + dt2 = np.dtype(dtdescr, align=True) + if dt.itemsize != dt2.itemsize: + assert_raises(TypeError, p.from_param, np.zeros((10,), dt2)) + else: + assert_(p.from_param(np.zeros((10,), dt2))) + + def test_ndim(self): + p = ndpointer(ndim=0) + assert_(p.from_param(np.array(1))) + assert_raises(TypeError, p.from_param, np.array([1])) + p = ndpointer(ndim=1) + assert_raises(TypeError, p.from_param, np.array(1)) + assert_(p.from_param(np.array([1]))) + p = ndpointer(ndim=2) + assert_(p.from_param(np.array([[1]]))) + + def test_shape(self): + p = ndpointer(shape=(1, 2)) + assert_(p.from_param(np.array([[1, 2]]))) + assert_raises(TypeError, p.from_param, np.array([[1], [2]])) + p = ndpointer(shape=()) + assert_(p.from_param(np.array(1))) + + def test_flags(self): + x = np.array([[1, 2], [3, 4]], order='F') + p = ndpointer(flags='FORTRAN') + assert_(p.from_param(x)) + p = ndpointer(flags='CONTIGUOUS') + assert_raises(TypeError, p.from_param, x) + p = ndpointer(flags=x.flags.num) + assert_(p.from_param(x)) + assert_raises(TypeError, p.from_param, np.array([[1, 2], [3, 4]])) + + @pytest.mark.thread_unsafe(reason="checks that global ndpointer cache is updating") + def test_cache(self): + assert_(ndpointer(dtype=np.float64) is ndpointer(dtype=np.float64)) + + # shapes are normalized + assert_(ndpointer(shape=2) is ndpointer(shape=(2,))) + + # 1.12 <= v < 1.16 had a bug that made these fail + assert_(ndpointer(shape=2) is not ndpointer(ndim=2)) + assert_(ndpointer(ndim=2) is not ndpointer(shape=2)) + +@pytest.mark.skipif(ctypes is None, + reason="ctypes not available on this python installation") +class TestNdpointerCFunc: + def test_arguments(self): + """ Test that arguments are coerced from arrays """ + c_forward_pointer.restype = ctypes.c_void_p + c_forward_pointer.argtypes = (ndpointer(ndim=2),) + + c_forward_pointer(np.zeros((2, 3))) + # too many dimensions + assert_raises( + ctypes.ArgumentError, c_forward_pointer, np.zeros((2, 3, 4))) + + @pytest.mark.parametrize( + 'dt', [ + float, + np.dtype({ + 'formats': ['u2') + ct = np.ctypeslib.as_ctypes_type(dt) + assert_equal(ct, ctypes.c_uint16.__ctype_be__) + + dt = np.dtype('u2') + ct = np.ctypeslib.as_ctypes_type(dt) + assert_equal(ct, ctypes.c_uint16) + + @pytest.mark.thread_unsafe(reason="some sort of data race? (gh-29943)") + def test_subarray(self): + dt = np.dtype((np.int32, (2, 3))) + ct = np.ctypeslib.as_ctypes_type(dt) + assert_equal(ct, 2 * (3 * ctypes.c_int32)) + + def test_structure(self): + dt = np.dtype([ + ('a', np.uint16), + ('b', np.uint32), + ]) + + ct = np.ctypeslib.as_ctypes_type(dt) + assert_(issubclass(ct, ctypes.Structure)) + assert_equal(ctypes.sizeof(ct), dt.itemsize) + assert_equal(ct._fields_, [ + ('a', ctypes.c_uint16), + ('b', ctypes.c_uint32), + ]) + + @pytest.mark.thread_unsafe(reason="some sort of data race? (gh-29943)") + def test_structure_aligned(self): + dt = np.dtype([ + ('a', np.uint16), + ('b', np.uint32), + ], align=True) + + ct = np.ctypeslib.as_ctypes_type(dt) + assert_(issubclass(ct, ctypes.Structure)) + assert_equal(ctypes.sizeof(ct), dt.itemsize) + assert_equal(ct._fields_, [ + ('a', ctypes.c_uint16), + ('', ctypes.c_char * 2), # padding + ('b', ctypes.c_uint32), + ]) + + def test_union(self): + dt = np.dtype({ + 'names': ['a', 'b'], + 'offsets': [0, 0], + 'formats': [np.uint16, np.uint32] + }) + + ct = np.ctypeslib.as_ctypes_type(dt) + assert_(issubclass(ct, ctypes.Union)) + assert_equal(ctypes.sizeof(ct), dt.itemsize) + assert_equal(ct._fields_, [ + ('a', ctypes.c_uint16), + ('b', ctypes.c_uint32), + ]) + + @pytest.mark.thread_unsafe(reason="some sort of data race? (gh-29943)") + def test_padded_union(self): + dt = np.dtype({ + 'names': ['a', 'b'], + 'offsets': [0, 0], + 'formats': [np.uint16, np.uint32], + 'itemsize': 5, + }) + + ct = np.ctypeslib.as_ctypes_type(dt) + assert_(issubclass(ct, ctypes.Union)) + assert_equal(ctypes.sizeof(ct), dt.itemsize) + assert_equal(ct._fields_, [ + ('a', ctypes.c_uint16), + ('b', ctypes.c_uint32), + ('', ctypes.c_char * 5), # padding + ]) + + def test_overlapping(self): + dt = np.dtype({ + 'names': ['a', 'b'], + 'offsets': [0, 2], + 'formats': [np.uint32, np.uint32] + }) + assert_raises(NotImplementedError, np.ctypeslib.as_ctypes_type, dt) diff --git a/local_packages/numpy/tests/test_lazyloading.py b/local_packages/numpy/tests/test_lazyloading.py new file mode 100644 index 0000000..7b03248 --- /dev/null +++ b/local_packages/numpy/tests/test_lazyloading.py @@ -0,0 +1,42 @@ +import subprocess +import sys +import textwrap + +import pytest + +from numpy.testing import IS_WASM + + +@pytest.mark.skipif(IS_WASM, reason="can't start subprocess") +def test_lazy_load(): + # gh-22045. lazyload doesn't import submodule names into the namespace + + # Test within a new process, to ensure that we do not mess with the + # global state during the test run (could lead to cryptic test failures). + # This is generally unsafe, especially, since we also reload the C-modules. + code = textwrap.dedent(r""" + import sys + from importlib.util import LazyLoader, find_spec, module_from_spec + + # create lazy load of numpy as np + spec = find_spec("numpy") + module = module_from_spec(spec) + sys.modules["numpy"] = module + loader = LazyLoader(spec.loader) + loader.exec_module(module) + np = module + + # test a subpackage import + from numpy.lib import recfunctions # noqa: F401 + + # test triggering the import of the package + np.ndarray + """) + p = subprocess.run( + (sys.executable, '-c', code), + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + encoding='utf-8', + check=False, + ) + assert p.returncode == 0, p.stdout diff --git a/local_packages/numpy/tests/test_matlib.py b/local_packages/numpy/tests/test_matlib.py new file mode 100644 index 0000000..2aac1f2 --- /dev/null +++ b/local_packages/numpy/tests/test_matlib.py @@ -0,0 +1,59 @@ +import numpy as np +import numpy.matlib +from numpy.testing import assert_, assert_array_equal + + +def test_empty(): + x = numpy.matlib.empty((2,)) + assert_(isinstance(x, np.matrix)) + assert_(x.shape, (1, 2)) + +def test_ones(): + assert_array_equal(numpy.matlib.ones((2, 3)), + np.matrix([[ 1., 1., 1.], + [ 1., 1., 1.]])) + + assert_array_equal(numpy.matlib.ones(2), np.matrix([[ 1., 1.]])) + +def test_zeros(): + assert_array_equal(numpy.matlib.zeros((2, 3)), + np.matrix([[ 0., 0., 0.], + [ 0., 0., 0.]])) + + assert_array_equal(numpy.matlib.zeros(2), np.matrix([[0., 0.]])) + +def test_identity(): + x = numpy.matlib.identity(2, dtype=int) + assert_array_equal(x, np.matrix([[1, 0], [0, 1]])) + +def test_eye(): + xc = numpy.matlib.eye(3, k=1, dtype=int) + assert_array_equal(xc, np.matrix([[ 0, 1, 0], + [ 0, 0, 1], + [ 0, 0, 0]])) + assert xc.flags.c_contiguous + assert not xc.flags.f_contiguous + + xf = numpy.matlib.eye(3, 4, dtype=int, order='F') + assert_array_equal(xf, np.matrix([[ 1, 0, 0, 0], + [ 0, 1, 0, 0], + [ 0, 0, 1, 0]])) + assert not xf.flags.c_contiguous + assert xf.flags.f_contiguous + +def test_rand(): + x = numpy.matlib.rand(3) + # check matrix type, array would have shape (3,) + assert_(x.ndim == 2) + +def test_randn(): + x = np.matlib.randn(3) + # check matrix type, array would have shape (3,) + assert_(x.ndim == 2) + +def test_repmat(): + a1 = np.arange(4) + x = numpy.matlib.repmat(a1, 2, 2) + y = np.array([[0, 1, 2, 3, 0, 1, 2, 3], + [0, 1, 2, 3, 0, 1, 2, 3]]) + assert_array_equal(x, y) diff --git a/local_packages/numpy/tests/test_numpy_config.py b/local_packages/numpy/tests/test_numpy_config.py new file mode 100644 index 0000000..9219379 --- /dev/null +++ b/local_packages/numpy/tests/test_numpy_config.py @@ -0,0 +1,47 @@ +""" +Check the numpy config is valid. +""" +from unittest.mock import patch + +import pytest + +import numpy as np + +pytestmark = pytest.mark.skipif( + not hasattr(np.__config__, "_built_with_meson"), + reason="Requires Meson builds", +) + + +class TestNumPyConfigs: + REQUIRED_CONFIG_KEYS = [ + "Compilers", + "Machine Information", + "Python Information", + ] + + @patch("numpy.__config__._check_pyyaml") + @pytest.mark.thread_unsafe(reason="unittest.mock.patch updates global state") + def test_pyyaml_not_found(self, mock_yaml_importer): + mock_yaml_importer.side_effect = ModuleNotFoundError() + with pytest.warns(UserWarning): + np.show_config() + + def test_dict_mode(self): + config = np.show_config(mode="dicts") + + assert isinstance(config, dict) + assert all(key in config for key in self.REQUIRED_CONFIG_KEYS), ( + "Required key missing," + " see index of `False` with `REQUIRED_CONFIG_KEYS`" + ) + + def test_invalid_mode(self): + with pytest.raises(AttributeError): + np.show_config(mode="foo") + + def test_warn_to_add_tests(self): + assert len(np.__config__.DisplayModes) == 2, ( + "New mode detected," + " please add UT if applicable and increment this count" + ) diff --git a/local_packages/numpy/tests/test_numpy_version.py b/local_packages/numpy/tests/test_numpy_version.py new file mode 100644 index 0000000..ea16422 --- /dev/null +++ b/local_packages/numpy/tests/test_numpy_version.py @@ -0,0 +1,54 @@ +""" +Check the numpy version is valid. + +Note that a development version is marked by the presence of 'dev0' or '+' +in the version string, all else is treated as a release. The version string +itself is set from the output of ``git describe`` which relies on tags. + +Examples +-------- + +Valid Development: 1.22.0.dev0 1.22.0.dev0+5-g7999db4df2 1.22.0+5-g7999db4df2 +Valid Release: 1.21.0.rc1, 1.21.0.b1, 1.21.0 +Invalid: 1.22.0.dev, 1.22.0.dev0-5-g7999db4dfB, 1.21.0.d1, 1.21.a + +Note that a release is determined by the version string, which in turn +is controlled by the result of the ``git describe`` command. +""" +import re + +import numpy as np +from numpy.testing import assert_ + + +def test_valid_numpy_version(): + # Verify that the numpy version is a valid one (no .post suffix or other + # nonsense). See gh-6431 for an issue caused by an invalid version. + version_pattern = r"^[0-9]+\.[0-9]+\.[0-9]+(a[0-9]|b[0-9]|rc[0-9])?" + dev_suffix = r"(\.dev[0-9]+(\+git[0-9]+\.[0-9a-f]+)?)?" + res = re.match(version_pattern + dev_suffix + '$', np.__version__) + + assert_(res is not None, np.__version__) + + +def test_short_version(): + # Check numpy.short_version actually exists + if np.version.release: + assert_(np.__version__ == np.version.short_version, + "short_version mismatch in release version") + else: + assert_(np.__version__.split("+")[0] == np.version.short_version, + "short_version mismatch in development version") + + +def test_version_module(): + contents = {s for s in dir(np.version) if not s.startswith('_')} + expected = { + 'full_version', + 'git_revision', + 'release', + 'short_version', + 'version', + } + + assert contents == expected diff --git a/local_packages/numpy/tests/test_public_api.py b/local_packages/numpy/tests/test_public_api.py new file mode 100644 index 0000000..1771054 --- /dev/null +++ b/local_packages/numpy/tests/test_public_api.py @@ -0,0 +1,807 @@ +import functools +import importlib +import inspect +import pkgutil +import subprocess +import sys +import sysconfig +import types +import warnings + +import pytest + +import numpy +import numpy as np +from numpy.testing import IS_WASM + +try: + import ctypes +except ImportError: + ctypes = None + + +def check_dir(module, module_name=None): + """Returns a mapping of all objects with the wrong __module__ attribute.""" + if module_name is None: + module_name = module.__name__ + results = {} + for name in dir(module): + if name == "core": + continue + item = getattr(module, name) + if (hasattr(item, '__module__') and hasattr(item, '__name__') + and item.__module__ != module_name): + results[name] = item.__module__ + '.' + item.__name__ + return results + + +def test_numpy_namespace(): + # We override dir to not show these members + allowlist = { + 'recarray': 'numpy.rec.recarray', + } + bad_results = check_dir(np) + # pytest gives better error messages with the builtin assert than with + # assert_equal + assert bad_results == allowlist + + +@pytest.mark.skipif(IS_WASM, reason="can't start subprocess") +@pytest.mark.parametrize('name', ['testing']) +def test_import_lazy_import(name): + """Make sure we can actually use the modules we lazy load. + + While not exported as part of the public API, it was accessible. With the + use of __getattr__ and __dir__, this isn't always true It can happen that + an infinite recursion may happen. + + This is the only way I found that would force the failure to appear on the + badly implemented code. + + We also test for the presence of the lazily imported modules in dir + + """ + exe = (sys.executable, '-c', "import numpy; numpy." + name) + result = subprocess.check_output(exe) + assert not result + + # Make sure they are still in the __dir__ + assert name in dir(np) + + +def test_dir_testing(): + """Assert that output of dir has only one "testing/tester" + attribute without duplicate""" + assert len(dir(np)) == len(set(dir(np))) + + +def test_numpy_linalg(): + bad_results = check_dir(np.linalg) + assert bad_results == {} + + +def test_numpy_fft(): + bad_results = check_dir(np.fft) + assert bad_results == {} + + +@pytest.mark.skipif(ctypes is None, + reason="ctypes not available in this python") +def test_NPY_NO_EXPORT(): + cdll = ctypes.CDLL(np._core._multiarray_tests.__file__) + # Make sure an arbitrary NPY_NO_EXPORT function is actually hidden + f = getattr(cdll, 'test_not_exported', None) + assert f is None, ("'test_not_exported' is mistakenly exported, " + "NPY_NO_EXPORT does not work") + + +# Historically NumPy has not used leading underscores for private submodules +# much. This has resulted in lots of things that look like public modules +# (i.e. things that can be imported as `import numpy.somesubmodule.somefile`), +# but were never intended to be public. The PUBLIC_MODULES list contains +# modules that are either public because they were meant to be, or because they +# contain public functions/objects that aren't present in any other namespace +# for whatever reason and therefore should be treated as public. +# +# The PRIVATE_BUT_PRESENT_MODULES list contains modules that look public (lack +# of underscores) but should not be used. For many of those modules the +# current status is fine. For others it may make sense to work on making them +# private, to clean up our public API and avoid confusion. +PUBLIC_MODULES = ['numpy.' + s for s in [ + "ctypeslib", + "dtypes", + "exceptions", + "f2py", + "fft", + "lib", + "lib.array_utils", + "lib.format", + "lib.introspect", + "lib.mixins", + "lib.npyio", + "lib.recfunctions", # note: still needs cleaning, was forgotten for 2.0 + "lib.scimath", + "lib.stride_tricks", + "linalg", + "ma", + "ma.extras", + "ma.mrecords", + "polynomial", + "polynomial.chebyshev", + "polynomial.hermite", + "polynomial.hermite_e", + "polynomial.laguerre", + "polynomial.legendre", + "polynomial.polynomial", + "random", + "strings", + "testing", + "testing.overrides", + "typing", + "typing.mypy_plugin", + "version", +]] +if sys.version_info < (3, 12): + PUBLIC_MODULES += [ + 'numpy.' + s for s in [ + "distutils", + "distutils.cpuinfo", + "distutils.exec_command", + "distutils.misc_util", + "distutils.log", + "distutils.system_info", + ] + ] + + +PUBLIC_ALIASED_MODULES = [ + "numpy.char", + "numpy.emath", + "numpy.rec", +] + + +PRIVATE_BUT_PRESENT_MODULES = ['numpy.' + s for s in [ + "conftest", + "core", + "core.multiarray", + "core.numeric", + "core.umath", + "core.arrayprint", + "core.defchararray", + "core.einsumfunc", + "core.fromnumeric", + "core.function_base", + "core.getlimits", + "core.numerictypes", + "core.overrides", + "core.records", + "core.shape_base", + "f2py.auxfuncs", + "f2py.capi_maps", + "f2py.cb_rules", + "f2py.cfuncs", + "f2py.common_rules", + "f2py.crackfortran", + "f2py.diagnose", + "f2py.f2py2e", + "f2py.f90mod_rules", + "f2py.func2subr", + "f2py.rules", + "f2py.symbolic", + "f2py.use_rules", + "lib.user_array", # note: not in np.lib, but probably should just be deleted + "linalg.lapack_lite", + "ma.core", + "ma.testutils", + "matlib", + "matrixlib", + "matrixlib.defmatrix", + "polynomial.polyutils", + "random.mtrand", + "random.bit_generator", + "testing.print_coercion_tables", +]] +if sys.version_info < (3, 12): + PRIVATE_BUT_PRESENT_MODULES += [ + 'numpy.' + s for s in [ + "distutils.armccompiler", + "distutils.fujitsuccompiler", + "distutils.ccompiler", + 'distutils.ccompiler_opt', + "distutils.command", + "distutils.command.autodist", + "distutils.command.bdist_rpm", + "distutils.command.build", + "distutils.command.build_clib", + "distutils.command.build_ext", + "distutils.command.build_py", + "distutils.command.build_scripts", + "distutils.command.build_src", + "distutils.command.config", + "distutils.command.config_compiler", + "distutils.command.develop", + "distutils.command.egg_info", + "distutils.command.install", + "distutils.command.install_clib", + "distutils.command.install_data", + "distutils.command.install_headers", + "distutils.command.sdist", + "distutils.conv_template", + "distutils.core", + "distutils.extension", + "distutils.fcompiler", + "distutils.fcompiler.absoft", + "distutils.fcompiler.arm", + "distutils.fcompiler.compaq", + "distutils.fcompiler.environment", + "distutils.fcompiler.g95", + "distutils.fcompiler.gnu", + "distutils.fcompiler.hpux", + "distutils.fcompiler.ibm", + "distutils.fcompiler.intel", + "distutils.fcompiler.lahey", + "distutils.fcompiler.mips", + "distutils.fcompiler.nag", + "distutils.fcompiler.none", + "distutils.fcompiler.pathf95", + "distutils.fcompiler.pg", + "distutils.fcompiler.nv", + "distutils.fcompiler.sun", + "distutils.fcompiler.vast", + "distutils.fcompiler.fujitsu", + "distutils.from_template", + "distutils.intelccompiler", + "distutils.lib2def", + "distutils.line_endings", + "distutils.mingw32ccompiler", + "distutils.msvccompiler", + "distutils.npy_pkg_config", + "distutils.numpy_distribution", + "distutils.pathccompiler", + "distutils.unixccompiler", + ] + ] + + +def is_unexpected(name): + """Check if this needs to be considered.""" + return ( + '._' not in name and '.tests' not in name and '.setup' not in name + and name not in PUBLIC_MODULES + and name not in PUBLIC_ALIASED_MODULES + and name not in PRIVATE_BUT_PRESENT_MODULES + ) + + +if sys.version_info >= (3, 12): + SKIP_LIST = [] +else: + SKIP_LIST = ["numpy.distutils.msvc9compiler"] + + +def test_all_modules_are_expected(): + """ + Test that we don't add anything that looks like a new public module by + accident. Check is based on filenames. + """ + + modnames = [] + for _, modname, ispkg in pkgutil.walk_packages(path=np.__path__, + prefix=np.__name__ + '.', + onerror=None): + if is_unexpected(modname) and modname not in SKIP_LIST: + # We have a name that is new. If that's on purpose, add it to + # PUBLIC_MODULES. We don't expect to have to add anything to + # PRIVATE_BUT_PRESENT_MODULES. Use an underscore in the name! + modnames.append(modname) + + if modnames: + raise AssertionError(f'Found unexpected modules: {modnames}') + + +# Stuff that clearly shouldn't be in the API and is detected by the next test +# below +SKIP_LIST_2 = [ + 'numpy.lib.math', + 'numpy.matlib.char', + 'numpy.matlib.rec', + 'numpy.matlib.emath', + 'numpy.matlib.exceptions', + 'numpy.matlib.math', + 'numpy.matlib.linalg', + 'numpy.matlib.fft', + 'numpy.matlib.random', + 'numpy.matlib.ctypeslib', + 'numpy.matlib.ma', +] +if sys.version_info < (3, 12): + SKIP_LIST_2 += [ + 'numpy.distutils.log.sys', + 'numpy.distutils.log.logging', + 'numpy.distutils.log.warnings', + ] + + +def test_all_modules_are_expected_2(): + """ + Method checking all objects. The pkgutil-based method in + `test_all_modules_are_expected` does not catch imports into a namespace, + only filenames. So this test is more thorough, and checks this like: + + import .lib.scimath as emath + + To check if something in a module is (effectively) public, one can check if + there's anything in that namespace that's a public function/object but is + not exposed in a higher-level namespace. For example for a `numpy.lib` + submodule:: + + mod = np.lib.mixins + for obj in mod.__all__: + if obj in np.__all__: + continue + elif obj in np.lib.__all__: + continue + + else: + print(obj) + + """ + + def find_unexpected_members(mod_name): + members = [] + module = importlib.import_module(mod_name) + if hasattr(module, '__all__'): + objnames = module.__all__ + else: + objnames = dir(module) + + for objname in objnames: + if not objname.startswith('_'): + fullobjname = mod_name + '.' + objname + if isinstance(getattr(module, objname), types.ModuleType): + if is_unexpected(fullobjname): + if fullobjname not in SKIP_LIST_2: + members.append(fullobjname) + + return members + + unexpected_members = find_unexpected_members("numpy") + for modname in PUBLIC_MODULES: + unexpected_members.extend(find_unexpected_members(modname)) + + if unexpected_members: + raise AssertionError("Found unexpected object(s) that look like " + f"modules: {unexpected_members}") + + +def test_api_importable(): + """ + Check that all submodules listed higher up in this file can be imported + + Note that if a PRIVATE_BUT_PRESENT_MODULES entry goes missing, it may + simply need to be removed from the list (deprecation may or may not be + needed - apply common sense). + """ + def check_importable(module_name): + try: + importlib.import_module(module_name) + except (ImportError, AttributeError): + return False + + return True + + module_names = [] + for module_name in PUBLIC_MODULES: + if not check_importable(module_name): + module_names.append(module_name) + + if module_names: + raise AssertionError("Modules in the public API that cannot be " + f"imported: {module_names}") + + for module_name in PUBLIC_ALIASED_MODULES: + try: + eval(module_name) + except AttributeError: + module_names.append(module_name) + + if module_names: + raise AssertionError("Modules in the public API that were not " + f"found: {module_names}") + + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', category=DeprecationWarning) + warnings.filterwarnings('always', category=ImportWarning) + for module_name in PRIVATE_BUT_PRESENT_MODULES: + if not check_importable(module_name): + # Nasty hack to avoid new FreeBSD failures. This + # is only needed for NumPy 2.4.x, so go with it + if not module_name == 'numpy.distutils.msvccompiler': + module_names.append(module_name) + + if module_names: + raise AssertionError("Modules that are not really public but looked " + "public and can not be imported: " + f"{module_names}") + + +@pytest.mark.xfail( + sysconfig.get_config_var("Py_DEBUG") not in (None, 0, "0"), + reason=( + "NumPy possibly built with `USE_DEBUG=True ./tools/travis-test.sh`, " + "which does not expose the `array_api` entry point. " + "See https://github.com/numpy/numpy/pull/19800" + ), +) +def test_array_api_entry_point(): + """ + Entry point for Array API implementation can be found with importlib and + returns the main numpy namespace. + """ + # For a development install that did not go through meson-python, + # the entrypoint will not have been installed. So ensure this test fails + # only if numpy is inside site-packages. + numpy_in_sitepackages = sysconfig.get_path('platlib') in np.__file__ + + eps = importlib.metadata.entry_points() + xp_eps = eps.select(group="array_api") + if len(xp_eps) == 0: + if numpy_in_sitepackages: + msg = "No entry points for 'array_api' found" + raise AssertionError(msg) from None + return + + try: + ep = next(ep for ep in xp_eps if ep.name == "numpy") + except StopIteration: + if numpy_in_sitepackages: + msg = "'numpy' not in array_api entry points" + raise AssertionError(msg) from None + return + + if ep.value == 'numpy.array_api': + # Looks like the entrypoint for the current numpy build isn't + # installed, but an older numpy is also installed and hence the + # entrypoint is pointing to the old (no longer existing) location. + # This isn't a problem except for when running tests with `spin` or an + # in-place build. + return + + xp = ep.load() + msg = ( + f"numpy entry point value '{ep.value}' " + "does not point to our Array API implementation" + ) + assert xp is numpy, msg + + +def test_main_namespace_all_dir_coherence(): + """ + Checks if `dir(np)` and `np.__all__` are consistent and return + the same content, excluding exceptions and private members. + """ + def _remove_private_members(member_set): + return {m for m in member_set if not m.startswith('_')} + + def _remove_exceptions(member_set): + return member_set.difference({ + "bool" # included only in __dir__ + }) + + all_members = _remove_private_members(np.__all__) + all_members = _remove_exceptions(all_members) + + dir_members = _remove_private_members(np.__dir__()) + dir_members = _remove_exceptions(dir_members) + + assert all_members == dir_members, ( + "Members that break symmetry: " + f"{all_members.symmetric_difference(dir_members)}" + ) + + +@pytest.mark.filterwarnings( + r"ignore:numpy.core(\.\w+)? is deprecated:DeprecationWarning" +) +def test_core_shims_coherence(): + """ + Check that all "semi-public" members of `numpy._core` are also accessible + from `numpy.core` shims. + """ + import numpy.core as core + + for member_name in dir(np._core): + # Skip private and test members. Also if a module is aliased, + # no need to add it to np.core + if ( + member_name.startswith("_") + or member_name in ["tests", "strings"] + or f"numpy.{member_name}" in PUBLIC_ALIASED_MODULES + ): + continue + + member = getattr(np._core, member_name) + + # np.core is a shim and all submodules of np.core are shims + # but we should be able to import everything in those shims + # that are available in the "real" modules in np._core, with + # the exception of the namespace packages (__spec__.origin is None), + # like numpy._core.include, or numpy._core.lib.pkgconfig. + if ( + inspect.ismodule(member) + and member.__spec__ and member.__spec__.origin is not None + ): + submodule = member + submodule_name = member_name + for submodule_member_name in dir(submodule): + # ignore dunder names + if submodule_member_name.startswith("__"): + continue + submodule_member = getattr(submodule, submodule_member_name) + + core_submodule = __import__( + f"numpy.core.{submodule_name}", + fromlist=[submodule_member_name] + ) + + assert submodule_member is getattr( + core_submodule, submodule_member_name + ) + + else: + assert member is getattr(core, member_name) + + +def test_functions_single_location(): + """ + Check that each public function is available from one location only. + + Test performs BFS search traversing NumPy's public API. It flags + any function-like object that is accessible from more that one place. + """ + from collections.abc import Callable + from typing import Any + + from numpy._core._multiarray_umath import ( + _ArrayFunctionDispatcher as dispatched_function, + ) + + visited_modules: set[types.ModuleType] = {np} + visited_functions: set[Callable[..., Any]] = set() + # Functions often have `__name__` overridden, therefore we need + # to keep track of locations where functions have been found. + functions_original_paths: dict[Callable[..., Any], str] = {} + + # Here we aggregate functions with more than one location. + # It must be empty for the test to pass. + duplicated_functions: list[tuple] = [] + + modules_queue = [np] + + while len(modules_queue) > 0: + + module = modules_queue.pop() + + for member_name in dir(module): + member = getattr(module, member_name) + + # first check if we got a module + if ( + inspect.ismodule(member) and # it's a module + "numpy" in member.__name__ and # inside NumPy + not member_name.startswith("_") and # not private + "numpy._core" not in member.__name__ and # outside _core + # not a legacy or testing module + member_name not in ["f2py", "ma", "testing", "tests"] and + member not in visited_modules # not visited yet + ): + modules_queue.append(member) + visited_modules.add(member) + + # else check if we got a function-like object + elif ( + inspect.isfunction(member) or + isinstance(member, (dispatched_function, np.ufunc)) + ): + if member in visited_functions: + + # skip main namespace functions with aliases + if ( + member.__name__ in [ + "absolute", # np.abs + "arccos", # np.acos + "arccosh", # np.acosh + "arcsin", # np.asin + "arcsinh", # np.asinh + "arctan", # np.atan + "arctan2", # np.atan2 + "arctanh", # np.atanh + "left_shift", # np.bitwise_left_shift + "right_shift", # np.bitwise_right_shift + "conjugate", # np.conj + "invert", # np.bitwise_not & np.bitwise_invert + "remainder", # np.mod + "divide", # np.true_divide + "concatenate", # np.concat + "power", # np.pow + "transpose", # np.permute_dims + ] and + module.__name__ == "numpy" + ): + continue + # skip trimcoef from numpy.polynomial as it is + # duplicated by design. + if ( + member.__name__ == "trimcoef" and + module.__name__.startswith("numpy.polynomial") + ): + continue + + # skip ufuncs that are exported in np.strings as well + if member.__name__ in ( + "add", + "equal", + "not_equal", + "greater", + "greater_equal", + "less", + "less_equal", + ) and module.__name__ == "numpy.strings": + continue + + # numpy.char reexports all numpy.strings functions for + # backwards-compatibility + if module.__name__ == "numpy.char": + continue + + # function is present in more than one location! + duplicated_functions.append( + (member.__name__, + module.__name__, + functions_original_paths[member]) + ) + else: + visited_functions.add(member) + functions_original_paths[member] = module.__name__ + + del visited_functions, visited_modules, functions_original_paths + + assert len(duplicated_functions) == 0, duplicated_functions + + +def test___module___attribute(): + modules_queue = [np] + visited_modules = {np} + visited_functions = set() + incorrect_entries = [] + + while len(modules_queue) > 0: + module = modules_queue.pop() + for member_name in dir(module): + member = getattr(module, member_name) + # first check if we got a module + if ( + inspect.ismodule(member) and # it's a module + "numpy" in member.__name__ and # inside NumPy + not member_name.startswith("_") and # not private + "numpy._core" not in member.__name__ and # outside _core + # not in a skip module list + member_name not in [ + "char", "core", "f2py", "ma", "lapack_lite", "mrecords", + "testing", "tests", "polynomial", "typing", "mtrand", + "bit_generator", + ] and + member not in visited_modules # not visited yet + ): + modules_queue.append(member) + visited_modules.add(member) + elif ( + not inspect.ismodule(member) and + hasattr(member, "__name__") and + not member.__name__.startswith("_") and + member.__module__ != module.__name__ and + member not in visited_functions + ): + # skip ufuncs that are exported in np.strings as well + if member.__name__ in ( + "add", "equal", "not_equal", "greater", "greater_equal", + "less", "less_equal", + ) and module.__name__ == "numpy.strings": + continue + + # recarray and record are exported in np and np.rec + if ( + (member.__name__ == "recarray" and module.__name__ == "numpy") or + (member.__name__ == "record" and module.__name__ == "numpy.rec") + ): + continue + + # ctypeslib exports ctypes c_long/c_longlong + if ( + member.__name__ in ("c_long", "c_longlong") and + module.__name__ == "numpy.ctypeslib" + ): + continue + + # skip cdef classes + if member.__name__ in ( + "BitGenerator", "Generator", "MT19937", "PCG64", "PCG64DXSM", + "Philox", "RandomState", "SFC64", "SeedSequence", + ): + continue + + incorrect_entries.append( + { + "Func": member.__name__, + "actual": member.__module__, + "expected": module.__name__, + } + ) + visited_functions.add(member) + + if incorrect_entries: + assert len(incorrect_entries) == 0, incorrect_entries + + +def _check_correct_qualname_and_module(obj) -> bool: + qualname = obj.__qualname__ + name = obj.__name__ + module_name = obj.__module__ + assert name == qualname.split(".")[-1] + + module = sys.modules[module_name] + actual_obj = functools.reduce(getattr, qualname.split("."), module) + return ( + actual_obj is obj or + # `obj` may be a bound method/property of `actual_obj`: + ( + hasattr(actual_obj, "__get__") and hasattr(obj, "__self__") and + actual_obj.__module__ == obj.__module__ and + actual_obj.__qualname__ == qualname + ) + ) + + +def test___qualname___and___module___attribute(): + # NumPy messes with module and name/qualname attributes, but any object + # should be discoverable based on its module and qualname, so test that. + # We do this for anything with a name (ensuring qualname is also set). + modules_queue = [np] + visited_modules = {np} + visited_functions = set() + incorrect_entries = [] + + while len(modules_queue) > 0: + module = modules_queue.pop() + for member_name in dir(module): + member = getattr(module, member_name) + # first check if we got a module + if ( + inspect.ismodule(member) and # it's a module + "numpy" in member.__name__ and # inside NumPy + not member_name.startswith("_") and # not private + member_name not in {"tests", "typing"} and # type names don't match + "numpy._core" not in member.__name__ and # outside _core + member not in visited_modules # not visited yet + ): + modules_queue.append(member) + visited_modules.add(member) + elif ( + not inspect.ismodule(member) and + hasattr(member, "__name__") and + not member.__name__.startswith("_") and + not member_name.startswith("_") and + not _check_correct_qualname_and_module(member) and + member not in visited_functions + ): + incorrect_entries.append( + { + "found_at": f"{module.__name__}:{member_name}", + "advertises": f"{member.__module__}:{member.__qualname__}", + } + ) + visited_functions.add(member) + + if incorrect_entries: + assert len(incorrect_entries) == 0, incorrect_entries diff --git a/local_packages/numpy/tests/test_reloading.py b/local_packages/numpy/tests/test_reloading.py new file mode 100644 index 0000000..aa87ae1 --- /dev/null +++ b/local_packages/numpy/tests/test_reloading.py @@ -0,0 +1,76 @@ +import pickle +import subprocess +import sys +import textwrap +from importlib import reload + +import pytest + +import numpy.exceptions as ex +from numpy.testing import IS_WASM, assert_, assert_equal, assert_raises + + +@pytest.mark.thread_unsafe(reason="reloads global module") +def test_numpy_reloading(): + # gh-7844. Also check that relevant globals retain their identity. + import numpy as np + import numpy._globals + + _NoValue = np._NoValue + VisibleDeprecationWarning = ex.VisibleDeprecationWarning + ModuleDeprecationWarning = ex.ModuleDeprecationWarning + + with pytest.warns(UserWarning): + reload(np) + assert_(_NoValue is np._NoValue) + assert_(ModuleDeprecationWarning is ex.ModuleDeprecationWarning) + assert_(VisibleDeprecationWarning is ex.VisibleDeprecationWarning) + + assert_raises(RuntimeError, reload, numpy._globals) + with pytest.warns(UserWarning): + reload(np) + assert_(_NoValue is np._NoValue) + assert_(ModuleDeprecationWarning is ex.ModuleDeprecationWarning) + assert_(VisibleDeprecationWarning is ex.VisibleDeprecationWarning) + +def test_novalue(): + import numpy as np + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + assert_equal(repr(np._NoValue), '') + assert_(pickle.loads(pickle.dumps(np._NoValue, + protocol=proto)) is np._NoValue) + + +@pytest.mark.skipif(IS_WASM, reason="can't start subprocess") +def test_full_reimport(): + # Reimporting numpy like this is not safe due to use of global C state, + # and has unexpected side effects. Test that an ImportError is raised. + # When all extension modules are isolated, this should test that clearing + # sys.modules and reimporting numpy works without error. + + # Test within a new process, to ensure that we do not mess with the + # global state during the test run (could lead to cryptic test failures). + # This is generally unsafe, especially, since we also reload the C-modules. + code = textwrap.dedent(r""" + import sys + import numpy as np + + for k in [k for k in sys.modules if k.startswith('numpy')]: + del sys.modules[k] + + try: + import numpy as np + except ImportError as err: + if str(err) != "cannot load module more than once per process": + raise SystemExit(f"Unexpected ImportError: {err}") + else: + raise SystemExit("DID NOT RAISE ImportError") + """) + p = subprocess.run( + (sys.executable, '-c', code), + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + encoding='utf-8', + check=False, + ) + assert p.returncode == 0, p.stdout diff --git a/local_packages/numpy/tests/test_scripts.py b/local_packages/numpy/tests/test_scripts.py new file mode 100644 index 0000000..01b7439 --- /dev/null +++ b/local_packages/numpy/tests/test_scripts.py @@ -0,0 +1,48 @@ +""" Test scripts + +Test that we can run executable scripts that have been installed with numpy. +""" +import os +import subprocess +import sys +from os.path import dirname, isfile, join as pathjoin + +import pytest + +import numpy as np +from numpy.testing import IS_WASM, assert_equal + +is_inplace = isfile(pathjoin(dirname(np.__file__), '..', 'setup.py')) + + +def find_f2py_commands(): + if sys.platform == 'win32': + exe_dir = dirname(sys.executable) + if exe_dir.endswith('Scripts'): # virtualenv + return [os.path.join(exe_dir, 'f2py')] + else: + return [os.path.join(exe_dir, "Scripts", 'f2py')] + else: + # Three scripts are installed in Unix-like systems: + # 'f2py', 'f2py{major}', and 'f2py{major.minor}'. For example, + # if installed with python3.9 the scripts would be named + # 'f2py', 'f2py3', and 'f2py3.9'. + version = sys.version_info + major = str(version.major) + minor = str(version.minor) + return ['f2py', 'f2py' + major, 'f2py' + major + '.' + minor] + + +@pytest.mark.skipif(is_inplace, reason="Cannot test f2py command inplace") +@pytest.mark.xfail(reason="Test is unreliable") +@pytest.mark.parametrize('f2py_cmd', find_f2py_commands()) +def test_f2py(f2py_cmd): + # test that we can run f2py script + stdout = subprocess.check_output([f2py_cmd, '-v']) + assert_equal(stdout.strip(), np.__version__.encode('ascii')) + + +@pytest.mark.skipif(IS_WASM, reason="Cannot start subprocess") +def test_pep338(): + stdout = subprocess.check_output([sys.executable, '-mnumpy.f2py', '-v']) + assert_equal(stdout.strip(), np.__version__.encode('ascii')) diff --git a/local_packages/numpy/tests/test_warnings.py b/local_packages/numpy/tests/test_warnings.py new file mode 100644 index 0000000..7efa2a1 --- /dev/null +++ b/local_packages/numpy/tests/test_warnings.py @@ -0,0 +1,79 @@ +""" +Tests which scan for certain occurrences in the code, they may not find +all of these occurrences but should catch almost all. +""" +import ast +import tokenize +from pathlib import Path + +import pytest + +import numpy + + +class ParseCall(ast.NodeVisitor): + def __init__(self): + self.ls = [] + + def visit_Attribute(self, node): + ast.NodeVisitor.generic_visit(self, node) + self.ls.append(node.attr) + + def visit_Name(self, node): + self.ls.append(node.id) + + +class FindFuncs(ast.NodeVisitor): + def __init__(self, filename): + super().__init__() + self.__filename = filename + + def visit_Call(self, node): + p = ParseCall() + p.visit(node.func) + ast.NodeVisitor.generic_visit(self, node) + + if p.ls[-1] == 'simplefilter' or p.ls[-1] == 'filterwarnings': + if getattr(node.args[0], "value", None) == "ignore": + if not self.__filename.name.startswith("test_"): + raise AssertionError( + "ignore filters should only be used in tests; " + f"found in {self.__filename} on line {node.lineno}") + + if p.ls[-1] == 'warn' and ( + len(p.ls) == 1 or p.ls[-2] == 'warnings'): + + if "testing/tests/test_warnings.py" == self.__filename: + # This file + return + + # See if stacklevel exists: + if len(node.args) == 3: + return + args = {kw.arg for kw in node.keywords} + if "stacklevel" in args: + return + raise AssertionError( + "warnings should have an appropriate stacklevel; " + f"found in {self.__filename} on line {node.lineno}") + + +@pytest.mark.slow +def test_warning_calls(): + # combined "ignore" and stacklevel error + base = Path(numpy.__file__).parent + + for path in base.rglob("*.py"): + if base / "testing" in path.parents: + continue + if path == base / "__init__.py": + continue + if path == base / "random" / "__init__.py": + continue + if path == base / "conftest.py": + continue + # use tokenize to auto-detect encoding on systems where no + # default encoding is defined (e.g. LANG='C') + with tokenize.open(str(path)) as file: + tree = ast.parse(file.read()) + FindFuncs(path).visit(tree) diff --git a/local_packages/numpy/typing/__init__.py b/local_packages/numpy/typing/__init__.py new file mode 100644 index 0000000..ef4c088 --- /dev/null +++ b/local_packages/numpy/typing/__init__.py @@ -0,0 +1,233 @@ +""" +============================ +Typing (:mod:`numpy.typing`) +============================ + +.. versionadded:: 1.20 + +Large parts of the NumPy API have :pep:`484`-style type annotations. In +addition a number of type aliases are available to users, most prominently +the two below: + +- `ArrayLike`: objects that can be converted to arrays +- `DTypeLike`: objects that can be converted to dtypes + +.. _typing-extensions: https://pypi.org/project/typing-extensions/ + +Mypy plugin +----------- + +.. versionadded:: 1.21 + +.. automodule:: numpy.typing.mypy_plugin + +.. currentmodule:: numpy.typing + +Differences from the runtime NumPy API +-------------------------------------- + +NumPy is very flexible. Trying to describe the full range of +possibilities statically would result in types that are not very +helpful. For that reason, the typed NumPy API is often stricter than +the runtime NumPy API. This section describes some notable +differences. + +ArrayLike +~~~~~~~~~ + +The `ArrayLike` type tries to avoid creating object arrays. For +example, + +.. code-block:: python + + >>> np.array(x**2 for x in range(10)) + array( at ...>, dtype=object) + +is valid NumPy code which will create a 0-dimensional object +array. Type checkers will complain about the above example when using +the NumPy types however. If you really intended to do the above, then +you can either use a ``# type: ignore`` comment: + +.. code-block:: python + + >>> np.array(x**2 for x in range(10)) # type: ignore + +or explicitly type the array like object as `~typing.Any`: + +.. code-block:: python + + >>> from typing import Any + >>> array_like: Any = (x**2 for x in range(10)) + >>> np.array(array_like) + array( at ...>, dtype=object) + +ndarray +~~~~~~~ + +It's possible to mutate the dtype of an array at runtime. For example, +the following code is valid: + +.. code-block:: python + + >>> x = np.array([1, 2]) + >>> x.dtype = np.bool + +This sort of mutation is not allowed by the types. Users who want to +write statically typed code should instead use the `numpy.ndarray.view` +method to create a view of the array with a different dtype. + +DTypeLike +~~~~~~~~~ + +The `DTypeLike` type tries to avoid creation of dtype objects using +dictionary of fields like below: + +.. code-block:: python + + >>> x = np.dtype({"field1": (float, 1), "field2": (int, 3)}) + +Although this is valid NumPy code, the type checker will complain about it, +since its usage is discouraged. +Please see : :ref:`Data type objects ` + +Number precision +~~~~~~~~~~~~~~~~ + +The precision of `numpy.number` subclasses is treated as a invariant generic +parameter (see :class:`~NBitBase`), simplifying the annotating of processes +involving precision-based casting. + +.. code-block:: python + + >>> from typing import TypeVar + >>> import numpy as np + >>> import numpy.typing as npt + + >>> T = TypeVar("T", bound=npt.NBitBase) + >>> def func(a: np.floating[T], b: np.floating[T]) -> np.floating[T]: + ... ... + +Consequently, the likes of `~numpy.float16`, `~numpy.float32` and +`~numpy.float64` are still sub-types of `~numpy.floating`, but, contrary to +runtime, they're not necessarily considered as sub-classes. + +.. deprecated:: 2.3 + The :class:`~numpy.typing.NBitBase` helper is deprecated and will be + removed in a future release. Prefer expressing precision relationships via + ``typing.overload`` or ``TypeVar`` definitions bounded by concrete scalar + classes. For example: + + .. code-block:: python + + from typing import TypeVar + import numpy as np + + S = TypeVar("S", bound=np.floating) + + def func(a: S, b: S) -> S: + ... + + or in the case of different input types mapping to different output types: + + .. code-block:: python + + from typing import overload + import numpy as np + + @overload + def phase(x: np.complex64) -> np.float32: ... + @overload + def phase(x: np.complex128) -> np.float64: ... + @overload + def phase(x: np.clongdouble) -> np.longdouble: ... + def phase(x: np.complexfloating) -> np.floating: + ... + +Timedelta64 +~~~~~~~~~~~ + +The `~numpy.timedelta64` class is not considered a subclass of +`~numpy.signedinteger`, the former only inheriting from `~numpy.generic` +while static type checking. + +0D arrays +~~~~~~~~~ + +During runtime numpy aggressively casts any passed 0D arrays into their +corresponding `~numpy.generic` instance. Until the introduction of shape +typing (see :pep:`646`) it is unfortunately not possible to make the +necessary distinction between 0D and >0D arrays. While thus not strictly +correct, all operations that can potentially perform a 0D-array -> scalar +cast are currently annotated as exclusively returning an `~numpy.ndarray`. + +If it is known in advance that an operation *will* perform a +0D-array -> scalar cast, then one can consider manually remedying the +situation with either `typing.cast` or a ``# type: ignore`` comment. + +Record array dtypes +~~~~~~~~~~~~~~~~~~~ + +The dtype of `numpy.recarray`, and the :ref:`routines.array-creation.rec` +functions in general, can be specified in one of two ways: + +* Directly via the ``dtype`` argument. +* With up to five helper arguments that operate via `numpy.rec.format_parser`: + ``formats``, ``names``, ``titles``, ``aligned`` and ``byteorder``. + +These two approaches are currently typed as being mutually exclusive, +*i.e.* if ``dtype`` is specified than one may not specify ``formats``. +While this mutual exclusivity is not (strictly) enforced during runtime, +combining both dtype specifiers can lead to unexpected or even downright +buggy behavior. + +API +--- + +""" +# NOTE: The API section will be appended with additional entries +# further down in this file + +# pyright: reportDeprecated=false + +from numpy._typing import ArrayLike, DTypeLike, NBitBase, NDArray + +__all__ = ["ArrayLike", "DTypeLike", "NBitBase", "NDArray"] + + +__DIR = __all__ + [k for k in globals() if k.startswith("__") and k.endswith("__")] +__DIR_SET = frozenset(__DIR) + + +def __dir__() -> list[str]: + return __DIR + +def __getattr__(name: str) -> object: + if name == "NBitBase": + import warnings + + # Deprecated in NumPy 2.3, 2025-05-01 + warnings.warn( + "`NBitBase` is deprecated and will be removed from numpy.typing in the " + "future. Use `@typing.overload` or a `TypeVar` with a scalar-type as upper " + "bound, instead. (deprecated in NumPy 2.3)", + DeprecationWarning, + stacklevel=2, + ) + return NBitBase + + if name in __DIR_SET: + return globals()[name] + + raise AttributeError(f"module {__name__!r} has no attribute {name!r}") + + +if __doc__ is not None: + from numpy._typing._add_docstring import _docstrings + __doc__ += _docstrings + __doc__ += '\n.. autoclass:: numpy.typing.NBitBase\n' + del _docstrings + +from numpy._pytesttester import PytestTester + +test = PytestTester(__name__) +del PytestTester diff --git a/local_packages/numpy/typing/__init__.pyi b/local_packages/numpy/typing/__init__.pyi new file mode 100644 index 0000000..7a4c7b4 --- /dev/null +++ b/local_packages/numpy/typing/__init__.pyi @@ -0,0 +1,3 @@ +from numpy._typing import ArrayLike, DTypeLike, NBitBase, NDArray + +__all__ = ["ArrayLike", "DTypeLike", "NBitBase", "NDArray"] diff --git a/local_packages/numpy/typing/mypy_plugin.py b/local_packages/numpy/typing/mypy_plugin.py new file mode 100644 index 0000000..fb78eb0 --- /dev/null +++ b/local_packages/numpy/typing/mypy_plugin.py @@ -0,0 +1,200 @@ +"""A mypy_ plugin for managing a number of platform-specific annotations. +Its functionality can be split into three distinct parts: + +* Assigning the (platform-dependent) precisions of certain `~numpy.number` + subclasses, including the likes of `~numpy.int_`, `~numpy.intp` and + `~numpy.longlong`. See the documentation on + :ref:`scalar types ` for a comprehensive overview + of the affected classes. Without the plugin the precision of all relevant + classes will be inferred as `~typing.Any`. +* Removing all extended-precision `~numpy.number` subclasses that are + unavailable for the platform in question. Most notably this includes the + likes of `~numpy.float128` and `~numpy.complex256`. Without the plugin *all* + extended-precision types will, as far as mypy is concerned, be available + to all platforms. +* Assigning the (platform-dependent) precision of `~numpy.ctypeslib.c_intp`. + Without the plugin the type will default to `ctypes.c_int64`. + + .. versionadded:: 1.22 + +.. deprecated:: 2.3 + The :mod:`numpy.typing.mypy_plugin` entry-point is deprecated in favor of + platform-agnostic static type inference. Remove + ``numpy.typing.mypy_plugin`` from the ``plugins`` section of your mypy + configuration; if that surfaces new errors, please open an issue with a + minimal reproducer. + +Examples +-------- +To enable the plugin, one must add it to their mypy `configuration file`_: + +.. code-block:: ini + + [mypy] + plugins = numpy.typing.mypy_plugin + +.. _mypy: https://mypy-lang.org/ +.. _configuration file: https://mypy.readthedocs.io/en/stable/config_file.html + +""" + +from collections.abc import Callable, Iterable +from typing import TYPE_CHECKING, Final, TypeAlias, cast + +import numpy as np + +__all__: list[str] = [] + + +def _get_precision_dict() -> dict[str, str]: + names = [ + ("_NBitByte", np.byte), + ("_NBitShort", np.short), + ("_NBitIntC", np.intc), + ("_NBitIntP", np.intp), + ("_NBitInt", np.int_), + ("_NBitLong", np.long), + ("_NBitLongLong", np.longlong), + + ("_NBitHalf", np.half), + ("_NBitSingle", np.single), + ("_NBitDouble", np.double), + ("_NBitLongDouble", np.longdouble), + ] + ret: dict[str, str] = {} + for name, typ in names: + n = 8 * np.dtype(typ).itemsize + ret[f"{_MODULE}._nbit.{name}"] = f"{_MODULE}._nbit_base._{n}Bit" + return ret + + +def _get_extended_precision_list() -> list[str]: + extended_names = [ + "float96", + "float128", + "complex192", + "complex256", + ] + return [i for i in extended_names if hasattr(np, i)] + +def _get_c_intp_name() -> str: + # Adapted from `np.core._internal._getintp_ctype` + return { + "i": "c_int", + "l": "c_long", + "q": "c_longlong", + }.get(np.dtype("n").char, "c_long") + + +_MODULE: Final = "numpy._typing" + +#: A dictionary mapping type-aliases in `numpy._typing._nbit` to +#: concrete `numpy.typing.NBitBase` subclasses. +_PRECISION_DICT: Final = _get_precision_dict() + +#: A list with the names of all extended precision `np.number` subclasses. +_EXTENDED_PRECISION_LIST: Final = _get_extended_precision_list() + +#: The name of the ctypes equivalent of `np.intp` +_C_INTP: Final = _get_c_intp_name() + + +try: + if TYPE_CHECKING: + from mypy.typeanal import TypeAnalyser + + import mypy.types + from mypy.build import PRI_MED + from mypy.nodes import ImportFrom, MypyFile, Statement + from mypy.plugin import AnalyzeTypeContext, Plugin + +except ModuleNotFoundError as e: + + def plugin(version: str) -> type: + raise e + +else: + + _HookFunc: TypeAlias = Callable[[AnalyzeTypeContext], mypy.types.Type] + + def _hook(ctx: AnalyzeTypeContext) -> mypy.types.Type: + """Replace a type-alias with a concrete ``NBitBase`` subclass.""" + typ, _, api = ctx + name = typ.name.split(".")[-1] + name_new = _PRECISION_DICT[f"{_MODULE}._nbit.{name}"] + return cast("TypeAnalyser", api).named_type(name_new) + + def _index(iterable: Iterable[Statement], id: str) -> int: + """Identify the first ``ImportFrom`` instance the specified `id`.""" + for i, value in enumerate(iterable): + if getattr(value, "id", None) == id: + return i + raise ValueError("Failed to identify a `ImportFrom` instance " + f"with the following id: {id!r}") + + def _override_imports( + file: MypyFile, + module: str, + imports: list[tuple[str, str | None]], + ) -> None: + """Override the first `module`-based import with new `imports`.""" + # Construct a new `from module import y` statement + import_obj = ImportFrom(module, 0, names=imports) + import_obj.is_top_level = True + + # Replace the first `module`-based import statement with `import_obj` + for lst in [file.defs, cast("list[Statement]", file.imports)]: + i = _index(lst, module) + lst[i] = import_obj + + class _NumpyPlugin(Plugin): + """A mypy plugin for handling versus numpy-specific typing tasks.""" + + def get_type_analyze_hook(self, fullname: str) -> _HookFunc | None: + """Set the precision of platform-specific `numpy.number` + subclasses. + + For example: `numpy.int_`, `numpy.longlong` and `numpy.longdouble`. + """ + if fullname in _PRECISION_DICT: + return _hook + return None + + def get_additional_deps( + self, file: MypyFile + ) -> list[tuple[int, str, int]]: + """Handle all import-based overrides. + + * Import platform-specific extended-precision `numpy.number` + subclasses (*e.g.* `numpy.float96` and `numpy.float128`). + * Import the appropriate `ctypes` equivalent to `numpy.intp`. + + """ + fullname = file.fullname + if fullname == "numpy": + _override_imports( + file, + f"{_MODULE}._extended_precision", + imports=[(v, v) for v in _EXTENDED_PRECISION_LIST], + ) + elif fullname == "numpy.ctypeslib": + _override_imports( + file, + "ctypes", + imports=[(_C_INTP, "_c_intp")], + ) + return [(PRI_MED, fullname, -1)] + + def plugin(version: str) -> type: + import warnings + + plugin = "numpy.typing.mypy_plugin" + # Deprecated 2025-01-10, NumPy 2.3 + warn_msg = ( + f"`{plugin}` is deprecated, and will be removed in a future " + f"release. Please remove `plugins = {plugin}` in your mypy config." + f"(deprecated in NumPy 2.3)" + ) + warnings.warn(warn_msg, DeprecationWarning, stacklevel=3) + + return _NumpyPlugin diff --git a/local_packages/numpy/typing/tests/__init__.py b/local_packages/numpy/typing/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/local_packages/numpy/typing/tests/data/fail/arithmetic.pyi b/local_packages/numpy/typing/tests/data/fail/arithmetic.pyi new file mode 100644 index 0000000..a68df2e --- /dev/null +++ b/local_packages/numpy/typing/tests/data/fail/arithmetic.pyi @@ -0,0 +1,126 @@ +from typing import Any + +import numpy as np +import numpy.typing as npt + +b_ = np.bool() +dt = np.datetime64(0, "D") +td = np.timedelta64(0, "D") + +AR_b: npt.NDArray[np.bool] +AR_u: npt.NDArray[np.uint32] +AR_i: npt.NDArray[np.int64] +AR_f: npt.NDArray[np.longdouble] +AR_c: npt.NDArray[np.complex128] +AR_m: npt.NDArray[np.timedelta64] +AR_M: npt.NDArray[np.datetime64] + +ANY: Any + +AR_LIKE_b: list[bool] +AR_LIKE_u: list[np.uint32] +AR_LIKE_i: list[int] +AR_LIKE_f: list[float] +AR_LIKE_c: list[complex] +AR_LIKE_m: list[np.timedelta64] +AR_LIKE_M: list[np.datetime64] + +# Array subtraction + +# NOTE: mypys `NoReturn` errors are, unfortunately, not that great +_1 = AR_b - AR_LIKE_b # type: ignore[var-annotated] +_2 = AR_LIKE_b - AR_b # type: ignore[var-annotated] +AR_i - b"" # type: ignore[operator] + +AR_f - AR_LIKE_m # type: ignore[operator] +AR_f - AR_LIKE_M # type: ignore[operator] +AR_c - AR_LIKE_m # type: ignore[operator] +AR_c - AR_LIKE_M # type: ignore[operator] + +AR_m - AR_LIKE_f # type: ignore[operator] +AR_M - AR_LIKE_f # type: ignore[operator] +AR_m - AR_LIKE_c # type: ignore[operator] +AR_M - AR_LIKE_c # type: ignore[operator] + +AR_m - AR_LIKE_M # type: ignore[operator] +AR_LIKE_m - AR_M # type: ignore[operator] + +# array floor division + +AR_M // AR_LIKE_b # type: ignore[operator] +AR_M // AR_LIKE_u # type: ignore[operator] +AR_M // AR_LIKE_i # type: ignore[operator] +AR_M // AR_LIKE_f # type: ignore[operator] +AR_M // AR_LIKE_c # type: ignore[operator] +AR_M // AR_LIKE_m # type: ignore[operator] +AR_M // AR_LIKE_M # type: ignore[operator] + +AR_b // AR_LIKE_M # type: ignore[operator] +AR_u // AR_LIKE_M # type: ignore[operator] +AR_i // AR_LIKE_M # type: ignore[operator] +AR_f // AR_LIKE_M # type: ignore[operator] +AR_c // AR_LIKE_M # type: ignore[operator] +AR_m // AR_LIKE_M # type: ignore[operator] +AR_M // AR_LIKE_M # type: ignore[operator] + +_3 = AR_m // AR_LIKE_b # type: ignore[var-annotated] +AR_m // AR_LIKE_c # type: ignore[operator] + +AR_b // AR_LIKE_m # type: ignore[operator] +AR_u // AR_LIKE_m # type: ignore[operator] +AR_i // AR_LIKE_m # type: ignore[operator] +AR_f // AR_LIKE_m # type: ignore[operator] +AR_c // AR_LIKE_m # type: ignore[operator] + +# regression tests for https://github.com/numpy/numpy/issues/28957 +AR_c // 2 # type: ignore[operator] +AR_c // AR_i # type: ignore[operator] +AR_c // AR_c # type: ignore[operator] + +# Array multiplication + +AR_b *= AR_LIKE_u # type: ignore[arg-type] +AR_b *= AR_LIKE_i # type: ignore[arg-type] +AR_b *= AR_LIKE_f # type: ignore[arg-type] +AR_b *= AR_LIKE_c # type: ignore[arg-type] +AR_b *= AR_LIKE_m # type: ignore[arg-type] + +AR_u *= AR_LIKE_f # type: ignore[arg-type] +AR_u *= AR_LIKE_c # type: ignore[arg-type] +AR_u *= AR_LIKE_m # type: ignore[arg-type] + +AR_i *= AR_LIKE_f # type: ignore[arg-type] +AR_i *= AR_LIKE_c # type: ignore[arg-type] +AR_i *= AR_LIKE_m # type: ignore[arg-type] + +AR_f *= AR_LIKE_c # type: ignore[arg-type] +AR_f *= AR_LIKE_m # type: ignore[arg-type] + +# Array power + +AR_b **= AR_LIKE_b # type: ignore[misc] +AR_b **= AR_LIKE_u # type: ignore[misc] +AR_b **= AR_LIKE_i # type: ignore[misc] +AR_b **= AR_LIKE_f # type: ignore[misc] +AR_b **= AR_LIKE_c # type: ignore[misc] + +AR_u **= AR_LIKE_f # type: ignore[arg-type] +AR_u **= AR_LIKE_c # type: ignore[arg-type] + +AR_i **= AR_LIKE_f # type: ignore[arg-type] +AR_i **= AR_LIKE_c # type: ignore[arg-type] + +AR_f **= AR_LIKE_c # type: ignore[arg-type] + +# Scalars + +b_ - b_ # type: ignore[operator] + +dt + dt # type: ignore[operator] +td - dt # type: ignore[operator] +td % 1 # type: ignore[operator] +td / dt # type: ignore[operator] +td % dt # type: ignore[operator] + +-b_ # type: ignore[operator] ++b_ # type: ignore[operator] diff --git a/local_packages/numpy/typing/tests/data/fail/array_constructors.pyi b/local_packages/numpy/typing/tests/data/fail/array_constructors.pyi new file mode 100644 index 0000000..6ed6199 --- /dev/null +++ b/local_packages/numpy/typing/tests/data/fail/array_constructors.pyi @@ -0,0 +1,34 @@ +import numpy as np +import numpy.typing as npt + +a: npt.NDArray[np.float64] +generator = (i for i in range(10)) + +np.require(a, requirements=1) # type: ignore[call-overload] +np.require(a, requirements="TEST") # type: ignore[arg-type] + +np.zeros("test") # type: ignore[arg-type] +np.zeros() # type: ignore[call-overload] + +np.ones("test") # type: ignore[arg-type] +np.ones() # type: ignore[call-overload] + +np.array(0, float, True) # type: ignore[call-overload] + +np.linspace(None, "bob") # type: ignore[call-overload] +np.linspace(0, 2, num=10.0) # type: ignore[call-overload] +np.linspace(0, 2, endpoint="True") # type: ignore[call-overload] +np.linspace(0, 2, retstep=b"False") # type: ignore[call-overload] +np.linspace(0, 2, dtype=0) # type: ignore[call-overload] +np.linspace(0, 2, axis=None) # type: ignore[call-overload] + +np.logspace(None, "bob") # type: ignore[call-overload] +np.logspace(0, 2, base=None) # type: ignore[call-overload] + +np.geomspace(None, "bob") # type: ignore[call-overload] + +np.stack(generator) # type: ignore[call-overload] +np.hstack({1, 2}) # type: ignore[call-overload] +np.vstack(1) # type: ignore[call-overload] + +np.array([1], like=1) # type: ignore[call-overload] diff --git a/local_packages/numpy/typing/tests/data/fail/array_like.pyi b/local_packages/numpy/typing/tests/data/fail/array_like.pyi new file mode 100644 index 0000000..4e37354 --- /dev/null +++ b/local_packages/numpy/typing/tests/data/fail/array_like.pyi @@ -0,0 +1,15 @@ +import numpy as np +from numpy._typing import ArrayLike + +class A: ... + +x1: ArrayLike = (i for i in range(10)) # type: ignore[assignment] +x2: ArrayLike = A() # type: ignore[assignment] +x3: ArrayLike = {1: "foo", 2: "bar"} # type: ignore[assignment] + +scalar = np.int64(1) +scalar.__array__(dtype=np.float64) # type: ignore[call-overload] +array = np.array([1]) +array.__array__(dtype=np.float64) # type: ignore[call-overload] + +array.setfield(np.eye(1), np.int32, (0, 1)) # type: ignore[arg-type] diff --git a/local_packages/numpy/typing/tests/data/fail/array_pad.pyi b/local_packages/numpy/typing/tests/data/fail/array_pad.pyi new file mode 100644 index 0000000..42e61c8 --- /dev/null +++ b/local_packages/numpy/typing/tests/data/fail/array_pad.pyi @@ -0,0 +1,6 @@ +import numpy as np +import numpy.typing as npt + +AR_i8: npt.NDArray[np.int64] + +np.pad(AR_i8, 2, mode="bob") # type: ignore[call-overload] diff --git a/local_packages/numpy/typing/tests/data/fail/arrayprint.pyi b/local_packages/numpy/typing/tests/data/fail/arrayprint.pyi new file mode 100644 index 0000000..3c9dc93 --- /dev/null +++ b/local_packages/numpy/typing/tests/data/fail/arrayprint.pyi @@ -0,0 +1,15 @@ +from collections.abc import Callable +from typing import Any + +import numpy as np +import numpy.typing as npt + +AR: npt.NDArray[np.float64] +func1: Callable[[Any], str] +func2: Callable[[np.integer], str] + +np.array2string(AR, legacy="1.14") # type: ignore[arg-type] +np.array2string(AR, sign="*") # type: ignore[arg-type] +np.array2string(AR, floatmode="default") # type: ignore[arg-type] +np.array2string(AR, formatter={"A": func1}) # type: ignore[arg-type] +np.array2string(AR, formatter={"float": func2}) # type: ignore[arg-type] diff --git a/local_packages/numpy/typing/tests/data/fail/arrayterator.pyi b/local_packages/numpy/typing/tests/data/fail/arrayterator.pyi new file mode 100644 index 0000000..8d2295a --- /dev/null +++ b/local_packages/numpy/typing/tests/data/fail/arrayterator.pyi @@ -0,0 +1,14 @@ +import numpy as np +import numpy.typing as npt + +AR_i8: npt.NDArray[np.int64] +ar_iter = np.lib.Arrayterator(AR_i8) + +np.lib.Arrayterator(np.int64()) # type: ignore[arg-type] +ar_iter.shape = (10, 5) # type: ignore[misc] +ar_iter[None] # type: ignore[index] +ar_iter[None, 1] # type: ignore[index] +ar_iter[np.intp()] # type: ignore[index] +ar_iter[np.intp(), ...] # type: ignore[index] +ar_iter[AR_i8] # type: ignore[index] +ar_iter[AR_i8, :] # type: ignore[index] diff --git a/local_packages/numpy/typing/tests/data/fail/bitwise_ops.pyi b/local_packages/numpy/typing/tests/data/fail/bitwise_ops.pyi new file mode 100644 index 0000000..f4de292 --- /dev/null +++ b/local_packages/numpy/typing/tests/data/fail/bitwise_ops.pyi @@ -0,0 +1,17 @@ +import numpy as np + +i8 = np.int64() +i4 = np.int32() +u8 = np.uint64() +b_ = np.bool() +i = 0 + +f8 = np.float64() + +b_ >> f8 # type: ignore[operator] +i8 << f8 # type: ignore[operator] +i | f8 # type: ignore[operator] +i8 ^ f8 # type: ignore[operator] +u8 & f8 # type: ignore[operator] +~f8 # type: ignore[operator] +# TODO: Certain mixes like i4 << u8 go to float and thus should fail diff --git a/local_packages/numpy/typing/tests/data/fail/char.pyi b/local_packages/numpy/typing/tests/data/fail/char.pyi new file mode 100644 index 0000000..3dbe5ed --- /dev/null +++ b/local_packages/numpy/typing/tests/data/fail/char.pyi @@ -0,0 +1,63 @@ +import numpy as np +import numpy.typing as npt + +AR_U: npt.NDArray[np.str_] +AR_S: npt.NDArray[np.bytes_] + +np.char.equal(AR_U, AR_S) # type: ignore[arg-type] +np.char.not_equal(AR_U, AR_S) # type: ignore[arg-type] + +np.char.greater_equal(AR_U, AR_S) # type: ignore[arg-type] +np.char.less_equal(AR_U, AR_S) # type: ignore[arg-type] +np.char.greater(AR_U, AR_S) # type: ignore[arg-type] +np.char.less(AR_U, AR_S) # type: ignore[arg-type] + +np.char.encode(AR_S) # type: ignore[arg-type] +np.char.decode(AR_U) # type: ignore[arg-type] + +np.char.join(AR_U, b"_") # type: ignore[arg-type] +np.char.join(AR_S, "_") # type: ignore[arg-type] + +np.char.ljust(AR_U, 5, fillchar=b"a") # type: ignore[arg-type] +np.char.rjust(AR_U, 5, fillchar=b"a") # type: ignore[arg-type] + +np.char.lstrip(AR_U, chars=b"a") # type: ignore[arg-type] +np.char.lstrip(AR_S, chars="a") # type: ignore[arg-type] +np.char.strip(AR_U, chars=b"a") # type: ignore[arg-type] +np.char.strip(AR_S, chars="a") # type: ignore[arg-type] +np.char.rstrip(AR_U, chars=b"a") # type: ignore[arg-type] +np.char.rstrip(AR_S, chars="a") # type: ignore[arg-type] + +np.char.partition(AR_U, b"a") # type: ignore[arg-type] +np.char.partition(AR_S, "a") # type: ignore[arg-type] +np.char.rpartition(AR_U, b"a") # type: ignore[arg-type] +np.char.rpartition(AR_S, "a") # type: ignore[arg-type] + +np.char.replace(AR_U, b"_", b"-") # type: ignore[arg-type] +np.char.replace(AR_S, "_", "-") # type: ignore[arg-type] + +np.char.split(AR_U, b"_") # type: ignore[arg-type] +np.char.split(AR_S, "_") # type: ignore[arg-type] +np.char.rsplit(AR_U, b"_") # type: ignore[arg-type] +np.char.rsplit(AR_S, "_") # type: ignore[arg-type] + +np.char.count(AR_U, b"a", start=[1, 2, 3]) # type: ignore[arg-type] +np.char.count(AR_S, "a", end=9) # type: ignore[arg-type] + +np.char.endswith(AR_U, b"a", start=[1, 2, 3]) # type: ignore[arg-type] +np.char.endswith(AR_S, "a", end=9) # type: ignore[arg-type] +np.char.startswith(AR_U, b"a", start=[1, 2, 3]) # type: ignore[arg-type] +np.char.startswith(AR_S, "a", end=9) # type: ignore[arg-type] + +np.char.find(AR_U, b"a", start=[1, 2, 3]) # type: ignore[arg-type] +np.char.find(AR_S, "a", end=9) # type: ignore[arg-type] +np.char.rfind(AR_U, b"a", start=[1, 2, 3]) # type: ignore[arg-type] +np.char.rfind(AR_S, "a", end=9) # type: ignore[arg-type] + +np.char.index(AR_U, b"a", start=[1, 2, 3]) # type: ignore[arg-type] +np.char.index(AR_S, "a", end=9) # type: ignore[arg-type] +np.char.rindex(AR_U, b"a", start=[1, 2, 3]) # type: ignore[arg-type] +np.char.rindex(AR_S, "a", end=9) # type: ignore[arg-type] + +np.char.isdecimal(AR_S) # type: ignore[arg-type] +np.char.isnumeric(AR_S) # type: ignore[arg-type] diff --git a/local_packages/numpy/typing/tests/data/fail/chararray.pyi b/local_packages/numpy/typing/tests/data/fail/chararray.pyi new file mode 100644 index 0000000..5898955 --- /dev/null +++ b/local_packages/numpy/typing/tests/data/fail/chararray.pyi @@ -0,0 +1,61 @@ +from typing import Any + +import numpy as np + +AR_U: np.char.chararray[tuple[Any, ...], np.dtype[np.str_]] +AR_S: np.char.chararray[tuple[Any, ...], np.dtype[np.bytes_]] + +AR_S.encode() # type: ignore[misc] +AR_U.decode() # type: ignore[misc] + +AR_U.join(b"_") # type: ignore[arg-type] +AR_S.join("_") # type: ignore[arg-type] + +AR_U.ljust(5, fillchar=b"a") # type: ignore[arg-type] +AR_U.rjust(5, fillchar=b"a") # type: ignore[arg-type] + +AR_U.lstrip(chars=b"a") # type: ignore[arg-type] +AR_S.lstrip(chars="a") # type: ignore[arg-type] +AR_U.strip(chars=b"a") # type: ignore[arg-type] +AR_S.strip(chars="a") # type: ignore[arg-type] +AR_U.rstrip(chars=b"a") # type: ignore[arg-type] +AR_S.rstrip(chars="a") # type: ignore[arg-type] + +AR_U.partition(b"a") # type: ignore[arg-type] +AR_S.partition("a") # type: ignore[arg-type] +AR_U.rpartition(b"a") # type: ignore[arg-type] +AR_S.rpartition("a") # type: ignore[arg-type] + +AR_U.replace(b"_", b"-") # type: ignore[arg-type] +AR_S.replace("_", "-") # type: ignore[arg-type] + +AR_U.split(b"_") # type: ignore[arg-type] +AR_S.split("_") # type: ignore[arg-type] +AR_S.split(1) # type: ignore[arg-type] +AR_U.rsplit(b"_") # type: ignore[arg-type] +AR_S.rsplit("_") # type: ignore[arg-type] + +AR_U.count(b"a", start=[1, 2, 3]) # type: ignore[arg-type] +AR_S.count("a", end=9) # type: ignore[arg-type] + +AR_U.endswith(b"a", start=[1, 2, 3]) # type: ignore[arg-type] +AR_S.endswith("a", end=9) # type: ignore[arg-type] +AR_U.startswith(b"a", start=[1, 2, 3]) # type: ignore[arg-type] +AR_S.startswith("a", end=9) # type: ignore[arg-type] + +AR_U.find(b"a", start=[1, 2, 3]) # type: ignore[arg-type] +AR_S.find("a", end=9) # type: ignore[arg-type] +AR_U.rfind(b"a", start=[1, 2, 3]) # type: ignore[arg-type] +AR_S.rfind("a", end=9) # type: ignore[arg-type] + +AR_U.index(b"a", start=[1, 2, 3]) # type: ignore[arg-type] +AR_S.index("a", end=9) # type: ignore[arg-type] +AR_U.rindex(b"a", start=[1, 2, 3]) # type: ignore[arg-type] +AR_S.rindex("a", end=9) # type: ignore[arg-type] + +AR_U == AR_S # type: ignore[operator] +AR_U != AR_S # type: ignore[operator] +AR_U >= AR_S # type: ignore[operator] +AR_U <= AR_S # type: ignore[operator] +AR_U > AR_S # type: ignore[operator] +AR_U < AR_S # type: ignore[operator] diff --git a/local_packages/numpy/typing/tests/data/fail/comparisons.pyi b/local_packages/numpy/typing/tests/data/fail/comparisons.pyi new file mode 100644 index 0000000..d2965b5 --- /dev/null +++ b/local_packages/numpy/typing/tests/data/fail/comparisons.pyi @@ -0,0 +1,27 @@ +import numpy as np +import numpy.typing as npt + +AR_i: npt.NDArray[np.int64] +AR_f: npt.NDArray[np.float64] +AR_c: npt.NDArray[np.complex128] +AR_m: npt.NDArray[np.timedelta64] +AR_M: npt.NDArray[np.datetime64] + +AR_f > AR_m # type: ignore[operator] +AR_c > AR_m # type: ignore[operator] + +AR_m > AR_f # type: ignore[operator] +AR_m > AR_c # type: ignore[operator] + +AR_i > AR_M # type: ignore[operator] +AR_f > AR_M # type: ignore[operator] +AR_m > AR_M # type: ignore[operator] + +AR_M > AR_i # type: ignore[operator] +AR_M > AR_f # type: ignore[operator] +AR_M > AR_m # type: ignore[operator] + +AR_i > "" # type: ignore[operator] +AR_i > b"" # type: ignore[operator] +"" > AR_M # type: ignore[operator] +b"" > AR_M # type: ignore[operator] diff --git a/local_packages/numpy/typing/tests/data/fail/constants.pyi b/local_packages/numpy/typing/tests/data/fail/constants.pyi new file mode 100644 index 0000000..10717f6 --- /dev/null +++ b/local_packages/numpy/typing/tests/data/fail/constants.pyi @@ -0,0 +1,3 @@ +import numpy as np + +np.little_endian = np.little_endian # type: ignore[misc] diff --git a/local_packages/numpy/typing/tests/data/fail/datasource.pyi b/local_packages/numpy/typing/tests/data/fail/datasource.pyi new file mode 100644 index 0000000..4c603cf --- /dev/null +++ b/local_packages/numpy/typing/tests/data/fail/datasource.pyi @@ -0,0 +1,16 @@ +from pathlib import Path + +import numpy as np + +path: Path +d1: np.lib.npyio.DataSource + +d1.abspath(path) # type: ignore[arg-type] +d1.abspath(b"...") # type: ignore[arg-type] + +d1.exists(path) # type: ignore[arg-type] +d1.exists(b"...") # type: ignore[arg-type] + +d1.open(path, "r") # type: ignore[arg-type] +d1.open(b"...", encoding="utf8") # type: ignore[arg-type] +d1.open(None, newline="/n") # type: ignore[arg-type] diff --git a/local_packages/numpy/typing/tests/data/fail/dtype.pyi b/local_packages/numpy/typing/tests/data/fail/dtype.pyi new file mode 100644 index 0000000..64a7c3f --- /dev/null +++ b/local_packages/numpy/typing/tests/data/fail/dtype.pyi @@ -0,0 +1,17 @@ +import numpy as np + +class Test1: + not_dtype = np.dtype(float) + +class Test2: + dtype = float + +np.dtype(Test1()) # type: ignore[call-overload] +np.dtype(Test2()) # type: ignore[arg-type] + +np.dtype( # type: ignore[call-overload] + { + "field1": (float, 1), + "field2": (int, 3), + } +) diff --git a/local_packages/numpy/typing/tests/data/fail/einsumfunc.pyi b/local_packages/numpy/typing/tests/data/fail/einsumfunc.pyi new file mode 100644 index 0000000..982ad98 --- /dev/null +++ b/local_packages/numpy/typing/tests/data/fail/einsumfunc.pyi @@ -0,0 +1,12 @@ +import numpy as np +import numpy.typing as npt + +AR_i: npt.NDArray[np.int64] +AR_f: npt.NDArray[np.float64] +AR_m: npt.NDArray[np.timedelta64] +AR_U: npt.NDArray[np.str_] + +np.einsum("i,i->i", AR_i, AR_m) # type: ignore[arg-type] +np.einsum("i,i->i", AR_f, AR_f, dtype=np.int32) # type: ignore[arg-type] +np.einsum("i,i->i", AR_i, AR_i, out=AR_U) # type: ignore[type-var] +np.einsum("i,i->i", AR_i, AR_i, out=AR_U, casting="unsafe") # type: ignore[call-overload] diff --git a/local_packages/numpy/typing/tests/data/fail/flatiter.pyi b/local_packages/numpy/typing/tests/data/fail/flatiter.pyi new file mode 100644 index 0000000..2c6e912 --- /dev/null +++ b/local_packages/numpy/typing/tests/data/fail/flatiter.pyi @@ -0,0 +1,38 @@ +from typing import Any + +import numpy as np +import numpy.typing as npt + +class _Index: + def __index__(self) -> int: ... + +class _MyArray: + def __array__(self) -> np.ndarray[tuple[int], np.dtypes.Float64DType]: ... + +_index: _Index +_my_array: _MyArray +_something: Any +_dtype: np.dtype[np.int8] + +_a_nd: np.flatiter[npt.NDArray[np.float64]] + +### + +_a_nd.base = _something # type: ignore[misc] +_a_nd.coords = _something # type: ignore[misc] +_a_nd.index = _something # type: ignore[misc] + +_a_nd.copy("C") # type: ignore[call-arg] +_a_nd.copy(order="C") # type: ignore[call-arg] + +# NOTE: Contrary to `ndarray.__getitem__` its counterpart in `flatiter` +# does not accept objects with the `__array__` or `__index__` protocols; +# boolean indexing is just plain broken (gh-17175) +_a_nd[np.True_] # type: ignore[call-overload] +_a_nd[_index] # type: ignore[call-overload] +_a_nd[_my_array] # type: ignore[call-overload] + +# `dtype` and `copy` are no-ops in `flatiter.__array__` +_a_nd.__array__(_dtype) # type: ignore[arg-type] +_a_nd.__array__(dtype=_dtype) # type: ignore[call-arg] +_a_nd.__array__(copy=True) # type: ignore[arg-type] diff --git a/local_packages/numpy/typing/tests/data/fail/fromnumeric.pyi b/local_packages/numpy/typing/tests/data/fail/fromnumeric.pyi new file mode 100644 index 0000000..92b0cb3 --- /dev/null +++ b/local_packages/numpy/typing/tests/data/fail/fromnumeric.pyi @@ -0,0 +1,148 @@ +"""Tests for :mod:`numpy._core.fromnumeric`.""" + +import numpy as np +import numpy.typing as npt + +A = np.array(True, ndmin=2, dtype=bool) +A.setflags(write=False) +AR_U: npt.NDArray[np.str_] +AR_M: npt.NDArray[np.datetime64] +AR_f4: npt.NDArray[np.float32] + +a = np.bool(True) + +np.take(a, None) # type: ignore[call-overload] +np.take(a, axis=1.0) # type: ignore[call-overload] +np.take(A, out=1) # type: ignore[call-overload] +np.take(A, mode="bob") # type: ignore[call-overload] + +np.reshape(a, None) # type: ignore[call-overload] +np.reshape(A, 1, order="bob") # type: ignore[call-overload] + +np.choose(a, None) # type: ignore[call-overload] +np.choose(a, out=1.0) # type: ignore[call-overload] +np.choose(A, mode="bob") # type: ignore[call-overload] + +np.repeat(a, None) # type: ignore[call-overload] +np.repeat(A, 1, axis=1.0) # type: ignore[call-overload] + +np.swapaxes(A, None, 1) # type: ignore[call-overload] +np.swapaxes(A, 1, [0]) # type: ignore[call-overload] + +np.transpose(A, axes=1.0) # type: ignore[call-overload] + +np.partition(a, None) # type: ignore[call-overload] +np.partition(a, 0, axis="bob") # type: ignore[call-overload] +np.partition(A, 0, kind="bob") # type: ignore[call-overload] +np.partition(A, 0, order=range(5)) # type: ignore[arg-type] + +np.argpartition(a, None) # type: ignore[arg-type] +np.argpartition(a, 0, axis="bob") # type: ignore[arg-type] +np.argpartition(A, 0, kind="bob") # type: ignore[arg-type] +np.argpartition(A, 0, order=range(5)) # type: ignore[arg-type] + +np.sort(A, axis="bob") # type: ignore[call-overload] +np.sort(A, kind="bob") # type: ignore[call-overload] +np.sort(A, order=range(5)) # type: ignore[arg-type] + +np.argsort(A, axis="bob") # type: ignore[arg-type] +np.argsort(A, kind="bob") # type: ignore[arg-type] +np.argsort(A, order=range(5)) # type: ignore[arg-type] + +np.argmax(A, axis="bob") # type: ignore[call-overload] +np.argmax(A, kind="bob") # type: ignore[call-overload] +np.argmax(A, out=AR_f4) # type: ignore[type-var] + +np.argmin(A, axis="bob") # type: ignore[call-overload] +np.argmin(A, kind="bob") # type: ignore[call-overload] +np.argmin(A, out=AR_f4) # type: ignore[type-var] + +np.searchsorted(A[0], 0, side="bob") # type: ignore[call-overload] +np.searchsorted(A[0], 0, sorter=1.0) # type: ignore[call-overload] + +np.resize(A, 1.0) # type: ignore[call-overload] + +np.squeeze(A, 1.0) # type: ignore[call-overload] + +np.diagonal(A, offset=None) # type: ignore[call-overload] +np.diagonal(A, axis1="bob") # type: ignore[call-overload] +np.diagonal(A, axis2=[]) # type: ignore[call-overload] + +np.trace(A, offset=None) # type: ignore[call-overload] +np.trace(A, axis1="bob") # type: ignore[call-overload] +np.trace(A, axis2=[]) # type: ignore[call-overload] + +np.ravel(a, order="bob") # type: ignore[call-overload] + +np.nonzero(0) # type: ignore[arg-type] + +np.compress([True], A, axis=1.0) # type: ignore[call-overload] + +np.clip(a, 1, 2, out=1) # type: ignore[call-overload] + +np.sum(a, axis=1.0) # type: ignore[call-overload] +np.sum(a, keepdims=1.0) # type: ignore[call-overload] +np.sum(a, initial=[1]) # type: ignore[call-overload] + +np.all(a, axis=1.0) # type: ignore[call-overload] +np.all(a, keepdims=1.0) # type: ignore[call-overload] +np.all(a, out=1.0) # type: ignore[call-overload] + +np.any(a, axis=1.0) # type: ignore[call-overload] +np.any(a, keepdims=1.0) # type: ignore[call-overload] +np.any(a, out=1.0) # type: ignore[call-overload] + +np.cumsum(a, axis=1.0) # type: ignore[call-overload] +np.cumsum(a, dtype=1.0) # type: ignore[call-overload] +np.cumsum(a, out=1.0) # type: ignore[call-overload] + +np.ptp(a, axis=1.0) # type: ignore[call-overload] +np.ptp(a, keepdims=1.0) # type: ignore[call-overload] +np.ptp(a, out=1.0) # type: ignore[call-overload] + +np.amax(a, axis=1.0) # type: ignore[call-overload] +np.amax(a, keepdims=1.0) # type: ignore[call-overload] +np.amax(a, out=1.0) # type: ignore[call-overload] +np.amax(a, initial=[1.0]) # type: ignore[call-overload] +np.amax(a, where=[1.0]) # type: ignore[arg-type] + +np.amin(a, axis=1.0) # type: ignore[call-overload] +np.amin(a, keepdims=1.0) # type: ignore[call-overload] +np.amin(a, out=1.0) # type: ignore[call-overload] +np.amin(a, initial=[1.0]) # type: ignore[call-overload] +np.amin(a, where=[1.0]) # type: ignore[arg-type] + +np.prod(a, axis=1.0) # type: ignore[call-overload] +np.prod(a, out=False) # type: ignore[call-overload] +np.prod(a, keepdims=1.0) # type: ignore[call-overload] +np.prod(a, initial=int) # type: ignore[call-overload] +np.prod(a, where=1.0) # type: ignore[call-overload] +np.prod(AR_U) # type: ignore[arg-type] + +np.cumprod(a, axis=1.0) # type: ignore[call-overload] +np.cumprod(a, out=False) # type: ignore[call-overload] +np.cumprod(AR_U) # type: ignore[arg-type] + +np.size(a, axis=1.0) # type: ignore[arg-type] + +np.around(a, decimals=1.0) # type: ignore[call-overload] +np.around(a, out=type) # type: ignore[call-overload] +np.around(AR_U) # type: ignore[arg-type] + +np.mean(a, axis=1.0) # type: ignore[call-overload] +np.mean(a, out=False) # type: ignore[call-overload] +np.mean(a, keepdims=1.0) # type: ignore[call-overload] +np.mean(AR_U) # type: ignore[arg-type] +np.mean(AR_M) # type: ignore[arg-type] + +np.std(a, axis=1.0) # type: ignore[call-overload] +np.std(a, out=False) # type: ignore[call-overload] +np.std(a, ddof="test") # type: ignore[call-overload] +np.std(a, keepdims=1.0) # type: ignore[call-overload] +np.std(AR_U) # type: ignore[arg-type] + +np.var(a, axis=1.0) # type: ignore[call-overload] +np.var(a, out=False) # type: ignore[call-overload] +np.var(a, ddof="test") # type: ignore[call-overload] +np.var(a, keepdims=1.0) # type: ignore[call-overload] +np.var(AR_U) # type: ignore[arg-type] diff --git a/local_packages/numpy/typing/tests/data/fail/histograms.pyi b/local_packages/numpy/typing/tests/data/fail/histograms.pyi new file mode 100644 index 0000000..5f78927 --- /dev/null +++ b/local_packages/numpy/typing/tests/data/fail/histograms.pyi @@ -0,0 +1,12 @@ +import numpy as np +import numpy.typing as npt + +AR_i8: npt.NDArray[np.int64] +AR_f8: npt.NDArray[np.float64] + +np.histogram_bin_edges(AR_i8, range=(0, 1, 2)) # type: ignore[arg-type] + +np.histogram(AR_i8, range=(0, 1, 2)) # type: ignore[arg-type] + +np.histogramdd(AR_i8, range=(0, 1)) # type: ignore[arg-type] +np.histogramdd(AR_i8, range=[(0, 1, 2)]) # type: ignore[list-item] diff --git a/local_packages/numpy/typing/tests/data/fail/index_tricks.pyi b/local_packages/numpy/typing/tests/data/fail/index_tricks.pyi new file mode 100644 index 0000000..8b7b1ae --- /dev/null +++ b/local_packages/numpy/typing/tests/data/fail/index_tricks.pyi @@ -0,0 +1,14 @@ +import numpy as np + +AR_LIKE_i: list[int] +AR_LIKE_f: list[float] + +np.ndindex([1, 2, 3]) # type: ignore[call-overload] +np.unravel_index(AR_LIKE_f, (1, 2, 3)) # type: ignore[arg-type] +np.ravel_multi_index(AR_LIKE_i, (1, 2, 3), mode="bob") # type: ignore[call-overload] +np.mgrid[1] # type: ignore[index] +np.mgrid[...] # type: ignore[index] +np.ogrid[1] # type: ignore[index] +np.ogrid[...] # type: ignore[index] +np.fill_diagonal(AR_LIKE_f, 2) # type: ignore[arg-type] +np.diag_indices(1.0) # type: ignore[arg-type] diff --git a/local_packages/numpy/typing/tests/data/fail/lib_function_base.pyi b/local_packages/numpy/typing/tests/data/fail/lib_function_base.pyi new file mode 100644 index 0000000..d7be993 --- /dev/null +++ b/local_packages/numpy/typing/tests/data/fail/lib_function_base.pyi @@ -0,0 +1,60 @@ +from typing import Any + +import numpy as np +import numpy.typing as npt + +AR_f8: npt.NDArray[np.float64] +AR_c16: npt.NDArray[np.complex128] +AR_m: npt.NDArray[np.timedelta64] +AR_M: npt.NDArray[np.datetime64] +AR_O: npt.NDArray[np.object_] +AR_b_list: list[npt.NDArray[np.bool]] + +def fn_none_i(a: None, /) -> npt.NDArray[Any]: ... +def fn_ar_i(a: npt.NDArray[np.float64], posarg: int, /) -> npt.NDArray[Any]: ... + +np.average(AR_m) # type: ignore[type-var] +np.select(1, [AR_f8]) # type: ignore[call-overload] +np.angle(AR_m) # type: ignore[type-var] +np.unwrap(AR_m) # type: ignore[type-var] +np.unwrap(AR_c16) # type: ignore[type-var] +np.trim_zeros(1) # type: ignore[arg-type] +np.place(1, [True], 1.5) # type: ignore[arg-type] +np.vectorize(1) # type: ignore[arg-type] +np.place(AR_f8, slice(None), 5) # type: ignore[arg-type] + +np.piecewise(AR_f8, True, [fn_ar_i], "wrong") # type: ignore[call-overload] +np.piecewise(AR_f8, AR_b_list, [fn_none_i]) # type: ignore[call-overload] +np.piecewise(AR_f8, AR_b_list, [fn_ar_i]) # type: ignore[call-overload] +np.piecewise(AR_f8, AR_b_list, [fn_ar_i], 3.14) # type: ignore[call-overload] +np.piecewise(AR_f8, AR_b_list, [fn_ar_i], 42, None) # type: ignore[call-overload] +np.piecewise(AR_f8, AR_b_list, [fn_ar_i], 42, _=None) # type: ignore[list-item] + +np.interp(AR_f8, AR_c16, AR_f8) # type: ignore[arg-type] +np.interp(AR_c16, AR_f8, AR_f8) # type: ignore[arg-type] +np.interp(AR_f8, AR_f8, AR_f8, period=AR_c16) # type: ignore[call-overload] +np.interp(AR_f8, AR_f8, AR_O) # type: ignore[arg-type] + +np.cov(AR_m) # type: ignore[type-var] +np.cov(AR_O) # type: ignore[type-var] +np.corrcoef(AR_m) # type: ignore[type-var] +np.corrcoef(AR_O) # type: ignore[type-var] +np.corrcoef(AR_f8, bias=True) # type: ignore[call-overload] +np.corrcoef(AR_f8, ddof=2) # type: ignore[call-overload] +np.blackman(1j) # type: ignore[arg-type] +np.bartlett(1j) # type: ignore[arg-type] +np.hanning(1j) # type: ignore[arg-type] +np.hamming(1j) # type: ignore[arg-type] +np.hamming(AR_c16) # type: ignore[arg-type] +np.kaiser(1j, 1) # type: ignore[arg-type] +np.sinc(AR_O) # type: ignore[type-var] +np.median(AR_M) # type: ignore[type-var] + +np.percentile(AR_f8, 50j) # type: ignore[call-overload] +np.percentile(AR_f8, 50, interpolation="bob") # type: ignore[call-overload] +np.quantile(AR_f8, 0.5j) # type: ignore[call-overload] +np.quantile(AR_f8, 0.5, interpolation="bob") # type: ignore[call-overload] +np.meshgrid(AR_f8, AR_f8, indexing="bob") # type: ignore[call-overload] +np.delete(AR_f8, AR_f8) # type: ignore[arg-type] +np.insert(AR_f8, AR_f8, 1.5) # type: ignore[arg-type] +np.digitize(AR_f8, 1j) # type: ignore[call-overload] diff --git a/local_packages/numpy/typing/tests/data/fail/lib_polynomial.pyi b/local_packages/numpy/typing/tests/data/fail/lib_polynomial.pyi new file mode 100644 index 0000000..727eb7f --- /dev/null +++ b/local_packages/numpy/typing/tests/data/fail/lib_polynomial.pyi @@ -0,0 +1,29 @@ +import numpy as np +import numpy.typing as npt + +AR_f8: npt.NDArray[np.float64] +AR_c16: npt.NDArray[np.complex128] +AR_O: npt.NDArray[np.object_] +AR_U: npt.NDArray[np.str_] + +poly_obj: np.poly1d + +np.polymul(AR_f8, AR_U) # type: ignore[arg-type] +np.polydiv(AR_f8, AR_U) # type: ignore[arg-type] + +5**poly_obj # type: ignore[operator] + +np.polyint(AR_U) # type: ignore[arg-type] +np.polyint(AR_f8, m=1j) # type: ignore[call-overload] + +np.polyder(AR_U) # type: ignore[arg-type] +np.polyder(AR_f8, m=1j) # type: ignore[call-overload] + +np.polyfit(AR_O, AR_f8, 1) # type: ignore[arg-type] +np.polyfit(AR_f8, AR_f8, 1, rcond=1j) # type: ignore[call-overload] +np.polyfit(AR_f8, AR_f8, 1, w=AR_c16) # type: ignore[arg-type] +np.polyfit(AR_f8, AR_f8, 1, cov="bob") # type: ignore[call-overload] + +np.polyval(AR_f8, AR_U) # type: ignore[arg-type] +np.polyadd(AR_f8, AR_U) # type: ignore[arg-type] +np.polysub(AR_f8, AR_U) # type: ignore[arg-type] diff --git a/local_packages/numpy/typing/tests/data/fail/lib_utils.pyi b/local_packages/numpy/typing/tests/data/fail/lib_utils.pyi new file mode 100644 index 0000000..25af32b --- /dev/null +++ b/local_packages/numpy/typing/tests/data/fail/lib_utils.pyi @@ -0,0 +1,3 @@ +import numpy.lib.array_utils as array_utils + +array_utils.byte_bounds(1) # type: ignore[arg-type] diff --git a/local_packages/numpy/typing/tests/data/fail/lib_version.pyi b/local_packages/numpy/typing/tests/data/fail/lib_version.pyi new file mode 100644 index 0000000..62011a8 --- /dev/null +++ b/local_packages/numpy/typing/tests/data/fail/lib_version.pyi @@ -0,0 +1,6 @@ +from numpy.lib import NumpyVersion + +version: NumpyVersion + +NumpyVersion(b"1.8.0") # type: ignore[arg-type] +version >= b"1.8.0" # type: ignore[operator] diff --git a/local_packages/numpy/typing/tests/data/fail/linalg.pyi b/local_packages/numpy/typing/tests/data/fail/linalg.pyi new file mode 100644 index 0000000..78aceb2 --- /dev/null +++ b/local_packages/numpy/typing/tests/data/fail/linalg.pyi @@ -0,0 +1,52 @@ +import numpy as np +import numpy.typing as npt + +AR_f8: npt.NDArray[np.float64] +AR_O: npt.NDArray[np.object_] +AR_M: npt.NDArray[np.datetime64] + +np.linalg.tensorsolve(AR_O, AR_O) # type: ignore[arg-type] + +np.linalg.solve(AR_O, AR_O) # type: ignore[arg-type] + +np.linalg.tensorinv(AR_O) # type: ignore[arg-type] + +np.linalg.inv(AR_O) # type: ignore[arg-type] + +np.linalg.matrix_power(AR_M, 5) # type: ignore[arg-type] + +np.linalg.cholesky(AR_O) # type: ignore[arg-type] + +np.linalg.qr(AR_O) # type: ignore[arg-type] +np.linalg.qr(AR_f8, mode="bob") # type: ignore[call-overload] + +np.linalg.eigvals(AR_O) # type: ignore[arg-type] + +np.linalg.eigvalsh(AR_O) # type: ignore[arg-type] +np.linalg.eigvalsh(AR_O, UPLO="bob") # type: ignore[call-overload] + +np.linalg.eig(AR_O) # type: ignore[arg-type] + +np.linalg.eigh(AR_O) # type: ignore[arg-type] +np.linalg.eigh(AR_O, UPLO="bob") # type: ignore[call-overload] + +np.linalg.svd(AR_O) # type: ignore[arg-type] + +np.linalg.svdvals(AR_O) # type: ignore[arg-type] +np.linalg.svdvals(AR_M) # type: ignore[arg-type] +np.linalg.svdvals(x=AR_f8) # type: ignore[call-overload] + +np.linalg.cond(AR_O) # type: ignore[arg-type] +np.linalg.cond(AR_f8, p="bob") # type: ignore[arg-type] + +np.linalg.matrix_rank(AR_O) # type: ignore[arg-type] + +np.linalg.pinv(AR_O) # type: ignore[arg-type] + +np.linalg.slogdet(AR_O) # type: ignore[arg-type] + +np.linalg.det(AR_O) # type: ignore[arg-type] + +np.linalg.norm(AR_f8, ord="bob") # type: ignore[call-overload] + +np.linalg.multi_dot([AR_M]) # type: ignore[list-item] diff --git a/local_packages/numpy/typing/tests/data/fail/ma.pyi b/local_packages/numpy/typing/tests/data/fail/ma.pyi new file mode 100644 index 0000000..084ae97 --- /dev/null +++ b/local_packages/numpy/typing/tests/data/fail/ma.pyi @@ -0,0 +1,155 @@ +from typing import TypeAlias, TypeVar + +import numpy as np +import numpy.typing as npt +from numpy._typing import _AnyShape + +_ScalarT = TypeVar("_ScalarT", bound=np.generic) +MaskedArray: TypeAlias = np.ma.MaskedArray[_AnyShape, np.dtype[_ScalarT]] + +MAR_1d_f8: np.ma.MaskedArray[tuple[int], np.dtype[np.float64]] +MAR_b: MaskedArray[np.bool] +MAR_c: MaskedArray[np.complex128] +MAR_td64: MaskedArray[np.timedelta64] + +AR_b: npt.NDArray[np.bool] + +MAR_1d_f8.shape = (3, 1) # type: ignore[assignment] +MAR_1d_f8.dtype = np.bool # type: ignore[assignment] + +def invalid_recordmask_setter() -> None: + # We make an inner function for this one to avoid the + # `NoReturn` causing an early exit for type checkers. + MAR_1d_f8.recordmask = [True] # type: ignore[assignment] + +np.ma.min(MAR_1d_f8, axis=1.0) # type: ignore[call-overload] +np.ma.min(MAR_1d_f8, keepdims=1.0) # type: ignore[call-overload] +np.ma.min(MAR_1d_f8, out=1.0) # type: ignore[call-overload] +np.ma.min(MAR_1d_f8, fill_value=lambda x: 27) # type: ignore[call-overload] + +MAR_1d_f8.min(axis=1.0) # type: ignore[call-overload] +MAR_1d_f8.min(keepdims=1.0) # type: ignore[call-overload] +MAR_1d_f8.min(out=1.0) # type: ignore[call-overload] +MAR_1d_f8.min(fill_value=lambda x: 27) # type: ignore[call-overload] + +np.ma.max(MAR_1d_f8, axis=1.0) # type: ignore[call-overload] +np.ma.max(MAR_1d_f8, keepdims=1.0) # type: ignore[call-overload] +np.ma.max(MAR_1d_f8, out=1.0) # type: ignore[call-overload] +np.ma.max(MAR_1d_f8, fill_value=lambda x: 27) # type: ignore[call-overload] + +MAR_1d_f8.max(axis=1.0) # type: ignore[call-overload] +MAR_1d_f8.max(keepdims=1.0) # type: ignore[call-overload] +MAR_1d_f8.max(out=1.0) # type: ignore[call-overload] +MAR_1d_f8.max(fill_value=lambda x: 27) # type: ignore[call-overload] + +np.ma.ptp(MAR_1d_f8, axis=1.0) # type: ignore[call-overload] +np.ma.ptp(MAR_1d_f8, keepdims=1.0) # type: ignore[call-overload] +np.ma.ptp(MAR_1d_f8, out=1.0) # type: ignore[call-overload] +np.ma.ptp(MAR_1d_f8, fill_value=lambda x: 27) # type: ignore[call-overload] + +MAR_1d_f8.ptp(axis=1.0) # type: ignore[call-overload] +MAR_1d_f8.ptp(keepdims=1.0) # type: ignore[call-overload] +MAR_1d_f8.ptp(out=1.0) # type: ignore[call-overload] +MAR_1d_f8.ptp(fill_value=lambda x: 27) # type: ignore[call-overload] + +MAR_1d_f8.argmin(axis=1.0) # type: ignore[call-overload] +MAR_1d_f8.argmin(keepdims=1.0) # type: ignore[call-overload] +MAR_1d_f8.argmin(out=1.0) # type: ignore[call-overload] +MAR_1d_f8.argmin(fill_value=lambda x: 27) # type: ignore[call-overload] + +np.ma.argmin(MAR_1d_f8, axis=1.0) # type: ignore[call-overload] +np.ma.argmin(MAR_1d_f8, axis=(1,)) # type: ignore[call-overload] +np.ma.argmin(MAR_1d_f8, keepdims=1.0) # type: ignore[call-overload] +np.ma.argmin(MAR_1d_f8, out=1.0) # type: ignore[call-overload] +np.ma.argmin(MAR_1d_f8, fill_value=lambda x: 27) # type: ignore[call-overload] + +MAR_1d_f8.argmax(axis=1.0) # type: ignore[call-overload] +MAR_1d_f8.argmax(keepdims=1.0) # type: ignore[call-overload] +MAR_1d_f8.argmax(out=1.0) # type: ignore[call-overload] +MAR_1d_f8.argmax(fill_value=lambda x: 27) # type: ignore[call-overload] + +np.ma.argmax(MAR_1d_f8, axis=1.0) # type: ignore[call-overload] +np.ma.argmax(MAR_1d_f8, axis=(0,)) # type: ignore[call-overload] +np.ma.argmax(MAR_1d_f8, keepdims=1.0) # type: ignore[call-overload] +np.ma.argmax(MAR_1d_f8, out=1.0) # type: ignore[call-overload] +np.ma.argmax(MAR_1d_f8, fill_value=lambda x: 27) # type: ignore[call-overload] + +MAR_1d_f8.all(axis=1.0) # type: ignore[call-overload] +MAR_1d_f8.all(keepdims=1.0) # type: ignore[call-overload] +MAR_1d_f8.all(out=1.0) # type: ignore[call-overload] + +MAR_1d_f8.any(axis=1.0) # type: ignore[call-overload] +MAR_1d_f8.any(keepdims=1.0) # type: ignore[call-overload] +MAR_1d_f8.any(out=1.0) # type: ignore[call-overload] + +MAR_1d_f8.sort(axis=(0, 1)) # type: ignore[arg-type] +MAR_1d_f8.sort(axis=None) # type: ignore[arg-type] +MAR_1d_f8.sort(kind="cabbage") # type: ignore[arg-type] +MAR_1d_f8.sort(order=lambda: "cabbage") # type: ignore[arg-type] +MAR_1d_f8.sort(endwith="cabbage") # type: ignore[arg-type] +MAR_1d_f8.sort(fill_value=lambda: "cabbage") # type: ignore[arg-type] +MAR_1d_f8.sort(stable="cabbage") # type: ignore[arg-type] +MAR_1d_f8.sort(stable=True) # type: ignore[arg-type] + +MAR_1d_f8.take(axis=1.0) # type: ignore[call-overload] +MAR_1d_f8.take(out=1) # type: ignore[call-overload] +MAR_1d_f8.take(mode="bob") # type: ignore[call-overload] + +np.ma.take(None) # type: ignore[call-overload] +np.ma.take(axis=1.0) # type: ignore[call-overload] +np.ma.take(out=1) # type: ignore[call-overload] +np.ma.take(mode="bob") # type: ignore[call-overload] + +MAR_1d_f8.partition(["cabbage"]) # type: ignore[arg-type] +MAR_1d_f8.partition(axis=(0, 1)) # type: ignore[arg-type, call-arg] +MAR_1d_f8.partition(kind="cabbage") # type: ignore[arg-type, call-arg] +MAR_1d_f8.partition(order=lambda: "cabbage") # type: ignore[arg-type, call-arg] +MAR_1d_f8.partition(AR_b) # type: ignore[arg-type] + +MAR_1d_f8.argpartition(["cabbage"]) # type: ignore[arg-type] +MAR_1d_f8.argpartition(axis=(0, 1)) # type: ignore[arg-type, call-arg] +MAR_1d_f8.argpartition(kind="cabbage") # type: ignore[arg-type, call-arg] +MAR_1d_f8.argpartition(order=lambda: "cabbage") # type: ignore[arg-type, call-arg] +MAR_1d_f8.argpartition(AR_b) # type: ignore[arg-type] + +np.ma.ndim(lambda: "lambda") # type: ignore[arg-type] + +np.ma.size(AR_b, axis="0") # type: ignore[arg-type] + +MAR_1d_f8 >= (lambda x: "mango") # type: ignore[operator] +MAR_1d_f8 > (lambda x: "mango") # type: ignore[operator] +MAR_1d_f8 <= (lambda x: "mango") # type: ignore[operator] +MAR_1d_f8 < (lambda x: "mango") # type: ignore[operator] + +MAR_1d_f8.count(axis=0.) # type: ignore[call-overload] + +np.ma.count(MAR_1d_f8, axis=0.) # type: ignore[call-overload] + +MAR_1d_f8.put(4, 999, mode="flip") # type: ignore[arg-type] + +np.ma.put(MAR_1d_f8, 4, 999, mode="flip") # type: ignore[arg-type] + +np.ma.put([1, 1, 3], 0, 999) # type: ignore[arg-type] + +np.ma.compressed(lambda: "compress me") # type: ignore[call-overload] + +np.ma.allequal(MAR_1d_f8, [1, 2, 3], fill_value=1.5) # type: ignore[arg-type] + +np.ma.allclose(MAR_1d_f8, [1, 2, 3], masked_equal=4.5) # type: ignore[arg-type] +np.ma.allclose(MAR_1d_f8, [1, 2, 3], rtol=".4") # type: ignore[arg-type] +np.ma.allclose(MAR_1d_f8, [1, 2, 3], atol=".5") # type: ignore[arg-type] + +MAR_1d_f8.__setmask__("mask") # type: ignore[arg-type] + +MAR_b *= 2 # type: ignore[arg-type] +MAR_c //= 2 # type: ignore[misc] +MAR_td64 **= 2 # type: ignore[misc] + +MAR_1d_f8.swapaxes(axis1=1, axis2=0) # type: ignore[call-arg] + +MAR_1d_f8.argsort(axis=(1, 0)) # type: ignore[arg-type] + +np.ma.MaskedArray(np.array([1, 2, 3]), keep_mask="yes") # type: ignore[call-overload] +np.ma.MaskedArray(np.array([1, 2, 3]), subok=None) # type: ignore[call-overload] +np.ma.MaskedArray(np.array([1, 2, 3]), ndim=None) # type: ignore[call-overload] +np.ma.MaskedArray(np.array([1, 2, 3]), order="Corinthian") # type: ignore[call-overload] diff --git a/local_packages/numpy/typing/tests/data/fail/memmap.pyi b/local_packages/numpy/typing/tests/data/fail/memmap.pyi new file mode 100644 index 0000000..3a4fc7d --- /dev/null +++ b/local_packages/numpy/typing/tests/data/fail/memmap.pyi @@ -0,0 +1,5 @@ +import numpy as np + +with open("file.txt", "r") as f: + np.memmap(f) # type: ignore[call-overload] +np.memmap("test.txt", shape=[10, 5]) # type: ignore[call-overload] diff --git a/local_packages/numpy/typing/tests/data/fail/modules.pyi b/local_packages/numpy/typing/tests/data/fail/modules.pyi new file mode 100644 index 0000000..c12a182 --- /dev/null +++ b/local_packages/numpy/typing/tests/data/fail/modules.pyi @@ -0,0 +1,17 @@ +import numpy as np + +np.testing.bob # type: ignore[attr-defined] +np.bob # type: ignore[attr-defined] + +# Stdlib modules in the namespace by accident +np.warnings # type: ignore[attr-defined] +np.sys # type: ignore[attr-defined] +np.os # type: ignore[attr-defined] +np.math # type: ignore[attr-defined] + +# Public sub-modules that are not imported to their parent module by default; +# e.g. one must first execute `import numpy.lib.recfunctions` +np.lib.recfunctions # type: ignore[attr-defined] + +np.__deprecated_attrs__ # type: ignore[attr-defined] +np.__expired_functions__ # type: ignore[attr-defined] diff --git a/local_packages/numpy/typing/tests/data/fail/multiarray.pyi b/local_packages/numpy/typing/tests/data/fail/multiarray.pyi new file mode 100644 index 0000000..51128df --- /dev/null +++ b/local_packages/numpy/typing/tests/data/fail/multiarray.pyi @@ -0,0 +1,52 @@ +import numpy as np +import numpy.typing as npt + +i8: np.int64 + +AR_b: npt.NDArray[np.bool] +AR_u1: npt.NDArray[np.uint8] +AR_i8: npt.NDArray[np.int64] +AR_f8: npt.NDArray[np.float64] +AR_M: npt.NDArray[np.datetime64] + +M: np.datetime64 + +AR_LIKE_f: list[float] + +def func(a: int) -> None: ... + +np.where(AR_b, 1) # type: ignore[call-overload] + +np.can_cast(AR_f8, 1) # type: ignore[arg-type] + +np.vdot(AR_M, AR_M) # type: ignore[arg-type] + +np.copyto(AR_LIKE_f, AR_f8) # type: ignore[arg-type] + +np.putmask(AR_LIKE_f, [True, True, False], 1.5) # type: ignore[arg-type] + +np.packbits(AR_f8) # type: ignore[arg-type] +np.packbits(AR_u1, bitorder=">") # type: ignore[call-overload] + +np.unpackbits(AR_i8) # type: ignore[arg-type] +np.unpackbits(AR_u1, bitorder=">") # type: ignore[call-overload] + +np.shares_memory(1, 1, max_work=i8) # type: ignore[arg-type] +np.may_share_memory(1, 1, max_work=i8) # type: ignore[arg-type] + +np.arange(stop=10) # type: ignore[call-overload] + +np.datetime_data(int) # type: ignore[arg-type] + +np.busday_offset("2012", 10) # type: ignore[call-overload] + +np.datetime_as_string("2012") # type: ignore[call-overload] + +np.char.compare_chararrays("a", b"a", "==", False) # type: ignore[call-overload] + +np.nested_iters([AR_i8, AR_i8]) # type: ignore[call-arg] +np.nested_iters([AR_i8, AR_i8], 0) # type: ignore[arg-type] +np.nested_iters([AR_i8, AR_i8], [0]) # type: ignore[list-item] +np.nested_iters([AR_i8, AR_i8], [[0], [1]], flags=["test"]) # type: ignore[list-item] +np.nested_iters([AR_i8, AR_i8], [[0], [1]], op_flags=[["test"]]) # type: ignore[list-item] +np.nested_iters([AR_i8, AR_i8], [[0], [1]], buffersize=1.0) # type: ignore[arg-type] diff --git a/local_packages/numpy/typing/tests/data/fail/ndarray.pyi b/local_packages/numpy/typing/tests/data/fail/ndarray.pyi new file mode 100644 index 0000000..2aeec08 --- /dev/null +++ b/local_packages/numpy/typing/tests/data/fail/ndarray.pyi @@ -0,0 +1,11 @@ +import numpy as np + +# Ban setting dtype since mutating the type of the array in place +# makes having ndarray be generic over dtype impossible. Generally +# users should use `ndarray.view` in this situation anyway. See +# +# https://github.com/numpy/numpy-stubs/issues/7 +# +# for more context. +float_array = np.array([1.0]) +float_array.dtype = np.bool # type: ignore[assignment, misc] diff --git a/local_packages/numpy/typing/tests/data/fail/ndarray_misc.pyi b/local_packages/numpy/typing/tests/data/fail/ndarray_misc.pyi new file mode 100644 index 0000000..2941893 --- /dev/null +++ b/local_packages/numpy/typing/tests/data/fail/ndarray_misc.pyi @@ -0,0 +1,49 @@ +""" +Tests for miscellaneous (non-magic) ``np.ndarray``/``np.generic`` methods. + +More extensive tests are performed for the methods' +function-based counterpart in `../from_numeric.py`. + +""" +from typing import Never + +import numpy as np +import numpy.typing as npt + +f8: np.float64 +AR_f8: npt.NDArray[np.float64] +AR_M: npt.NDArray[np.datetime64] +AR_b: npt.NDArray[np.bool] + +ctypes_obj = AR_f8.ctypes + +f8.argpartition(0) # type: ignore[attr-defined] +f8.partition(0) # type: ignore[attr-defined] +f8.dot(1) # type: ignore[attr-defined] + +# NOTE: The following functions retur `Never`, causing mypy to stop analysis at that +# point, which we circumvent by wrapping them in a function. + +def f8_diagonal(x: np.float64) -> Never: + return x.diagonal() # type: ignore[misc] + +def f8_nonzero(x: np.float64) -> Never: + return x.nonzero() # type: ignore[misc] + +def f8_setfield(x: np.float64) -> Never: + return x.setfield(2, np.float64) # type: ignore[misc] + +def f8_sort(x: np.float64) -> Never: + return x.sort() # type: ignore[misc] + +def f8_trace(x: np.float64) -> Never: + return x.trace() # type: ignore[misc] + +AR_M.__complex__() # type: ignore[misc] +AR_b.__index__() # type: ignore[misc] + +AR_f8[1.5] # type: ignore[call-overload] +AR_f8["field_a"] # type: ignore[call-overload] +AR_f8[["field_a", "field_b"]] # type: ignore[index] + +AR_f8.__array_finalize__(object()) # type: ignore[arg-type] diff --git a/local_packages/numpy/typing/tests/data/fail/nditer.pyi b/local_packages/numpy/typing/tests/data/fail/nditer.pyi new file mode 100644 index 0000000..fae728d --- /dev/null +++ b/local_packages/numpy/typing/tests/data/fail/nditer.pyi @@ -0,0 +1,8 @@ +import numpy as np + +class Test(np.nditer): ... # type: ignore[misc] + +np.nditer([0, 1], flags=["test"]) # type: ignore[list-item] +np.nditer([0, 1], op_flags=[["test"]]) # type: ignore[list-item] +np.nditer([0, 1], itershape=(1.0,)) # type: ignore[arg-type] +np.nditer([0, 1], buffersize=1.0) # type: ignore[call-overload] diff --git a/local_packages/numpy/typing/tests/data/fail/nested_sequence.pyi b/local_packages/numpy/typing/tests/data/fail/nested_sequence.pyi new file mode 100644 index 0000000..1004a36 --- /dev/null +++ b/local_packages/numpy/typing/tests/data/fail/nested_sequence.pyi @@ -0,0 +1,17 @@ +from collections.abc import Sequence + +from numpy._typing import _NestedSequence + +a: Sequence[float] +b: list[complex] +c: tuple[str, ...] +d: int +e: str + +def func(a: _NestedSequence[int]) -> None: ... + +reveal_type(func(a)) # type: ignore[arg-type, misc] +reveal_type(func(b)) # type: ignore[arg-type, misc] +reveal_type(func(c)) # type: ignore[arg-type, misc] +reveal_type(func(d)) # type: ignore[arg-type, misc] +reveal_type(func(e)) # type: ignore[arg-type, misc] diff --git a/local_packages/numpy/typing/tests/data/fail/npyio.pyi b/local_packages/numpy/typing/tests/data/fail/npyio.pyi new file mode 100644 index 0000000..e20be3a --- /dev/null +++ b/local_packages/numpy/typing/tests/data/fail/npyio.pyi @@ -0,0 +1,24 @@ +import pathlib +from typing import IO + +import numpy as np +import numpy.typing as npt + +str_path: str +bytes_path: bytes +pathlib_path: pathlib.Path +str_file: IO[str] +AR_i8: npt.NDArray[np.int64] + +np.load(str_file) # type: ignore[arg-type] + +np.save(bytes_path, AR_i8) # type: ignore[arg-type] +np.save(str_path, AR_i8, fix_imports=True) # type: ignore[call-arg] + +np.savez(bytes_path, AR_i8) # type: ignore[arg-type] + +np.savez_compressed(bytes_path, AR_i8) # type: ignore[arg-type] + +np.loadtxt(bytes_path) # type: ignore[arg-type] + +np.fromregex(bytes_path, ".", np.int64) # type: ignore[call-overload] diff --git a/local_packages/numpy/typing/tests/data/fail/numerictypes.pyi b/local_packages/numpy/typing/tests/data/fail/numerictypes.pyi new file mode 100644 index 0000000..a1fd47a --- /dev/null +++ b/local_packages/numpy/typing/tests/data/fail/numerictypes.pyi @@ -0,0 +1,5 @@ +import numpy as np + +np.isdtype(1, np.int64) # type: ignore[arg-type] + +np.issubdtype(1, np.int64) # type: ignore[arg-type] diff --git a/local_packages/numpy/typing/tests/data/fail/random.pyi b/local_packages/numpy/typing/tests/data/fail/random.pyi new file mode 100644 index 0000000..1abf4b7 --- /dev/null +++ b/local_packages/numpy/typing/tests/data/fail/random.pyi @@ -0,0 +1,62 @@ +import numpy as np +import numpy.typing as npt + +SEED_FLOAT: float = 457.3 +SEED_ARR_FLOAT: npt.NDArray[np.float64] = np.array([1.0, 2, 3, 4]) +SEED_ARRLIKE_FLOAT: list[float] = [1.0, 2.0, 3.0, 4.0] +SEED_SEED_SEQ: np.random.SeedSequence = np.random.SeedSequence(0) +SEED_STR: str = "String seeding not allowed" + +# default rng +np.random.default_rng(SEED_FLOAT) # type: ignore[arg-type] +np.random.default_rng(SEED_ARR_FLOAT) # type: ignore[arg-type] +np.random.default_rng(SEED_ARRLIKE_FLOAT) # type: ignore[arg-type] +np.random.default_rng(SEED_STR) # type: ignore[arg-type] + +# Seed Sequence +np.random.SeedSequence(SEED_FLOAT) # type: ignore[arg-type] +np.random.SeedSequence(SEED_ARR_FLOAT) # type: ignore[arg-type] +np.random.SeedSequence(SEED_ARRLIKE_FLOAT) # type: ignore[arg-type] +np.random.SeedSequence(SEED_SEED_SEQ) # type: ignore[arg-type] +np.random.SeedSequence(SEED_STR) # type: ignore[arg-type] + +seed_seq: np.random.bit_generator.SeedSequence = np.random.SeedSequence() +seed_seq.spawn(11.5) # type: ignore[arg-type] +seed_seq.generate_state(3.14) # type: ignore[arg-type] +seed_seq.generate_state(3, np.uint8) # type: ignore[arg-type] +seed_seq.generate_state(3, "uint8") # type: ignore[arg-type] +seed_seq.generate_state(3, "u1") # type: ignore[arg-type] +seed_seq.generate_state(3, np.uint16) # type: ignore[arg-type] +seed_seq.generate_state(3, "uint16") # type: ignore[arg-type] +seed_seq.generate_state(3, "u2") # type: ignore[arg-type] +seed_seq.generate_state(3, np.int32) # type: ignore[arg-type] +seed_seq.generate_state(3, "int32") # type: ignore[arg-type] +seed_seq.generate_state(3, "i4") # type: ignore[arg-type] + +# Bit Generators +np.random.MT19937(SEED_FLOAT) # type: ignore[arg-type] +np.random.MT19937(SEED_ARR_FLOAT) # type: ignore[arg-type] +np.random.MT19937(SEED_ARRLIKE_FLOAT) # type: ignore[arg-type] +np.random.MT19937(SEED_STR) # type: ignore[arg-type] + +np.random.PCG64(SEED_FLOAT) # type: ignore[arg-type] +np.random.PCG64(SEED_ARR_FLOAT) # type: ignore[arg-type] +np.random.PCG64(SEED_ARRLIKE_FLOAT) # type: ignore[arg-type] +np.random.PCG64(SEED_STR) # type: ignore[arg-type] + +np.random.Philox(SEED_FLOAT) # type: ignore[arg-type] +np.random.Philox(SEED_ARR_FLOAT) # type: ignore[arg-type] +np.random.Philox(SEED_ARRLIKE_FLOAT) # type: ignore[arg-type] +np.random.Philox(SEED_STR) # type: ignore[arg-type] + +np.random.SFC64(SEED_FLOAT) # type: ignore[arg-type] +np.random.SFC64(SEED_ARR_FLOAT) # type: ignore[arg-type] +np.random.SFC64(SEED_ARRLIKE_FLOAT) # type: ignore[arg-type] +np.random.SFC64(SEED_STR) # type: ignore[arg-type] + +# Generator +np.random.Generator(None) # type: ignore[arg-type] +np.random.Generator(12333283902830213) # type: ignore[arg-type] +np.random.Generator("OxFEEDF00D") # type: ignore[arg-type] +np.random.Generator([123, 234]) # type: ignore[arg-type] +np.random.Generator(np.array([123, 234], dtype="u4")) # type: ignore[arg-type] diff --git a/local_packages/numpy/typing/tests/data/fail/rec.pyi b/local_packages/numpy/typing/tests/data/fail/rec.pyi new file mode 100644 index 0000000..c9d43dd --- /dev/null +++ b/local_packages/numpy/typing/tests/data/fail/rec.pyi @@ -0,0 +1,17 @@ +import numpy as np +import numpy.typing as npt + +AR_i8: npt.NDArray[np.int64] + +np.rec.fromarrays(1) # type: ignore[call-overload] +np.rec.fromarrays([1, 2, 3], dtype=[("f8", "f8")], formats=["f8", "f8"]) # type: ignore[call-overload] + +np.rec.fromrecords(AR_i8) # type: ignore[arg-type] +np.rec.fromrecords([(1.5,)], dtype=[("f8", "f8")], formats=["f8", "f8"]) # type: ignore[call-overload] + +np.rec.fromstring("string", dtype=[("f8", "f8")]) # type: ignore[call-overload] +np.rec.fromstring(b"bytes") # type: ignore[call-overload] +np.rec.fromstring(b"(1.5,)", dtype=[("f8", "f8")], formats=["f8", "f8"]) # type: ignore[call-overload] + +with open("test", "r") as f: + np.rec.fromfile(f, dtype=[("f8", "f8")]) # type: ignore[call-overload] diff --git a/local_packages/numpy/typing/tests/data/fail/scalars.pyi b/local_packages/numpy/typing/tests/data/fail/scalars.pyi new file mode 100644 index 0000000..018a88e --- /dev/null +++ b/local_packages/numpy/typing/tests/data/fail/scalars.pyi @@ -0,0 +1,86 @@ +import numpy as np + +f2: np.float16 +f8: np.float64 +c8: np.complex64 + +# Construction + +np.float32(3j) # type: ignore[arg-type] + +# Technically the following examples are valid NumPy code. But they +# are not considered a best practice, and people who wish to use the +# stubs should instead do +# +# np.array([1.0, 0.0, 0.0], dtype=np.float32) +# np.array([], dtype=np.complex64) +# +# See e.g. the discussion on the mailing list +# +# https://mail.python.org/pipermail/numpy-discussion/2020-April/080566.html +# +# and the issue +# +# https://github.com/numpy/numpy-stubs/issues/41 +# +# for more context. +np.float32([1.0, 0.0, 0.0]) # type: ignore[arg-type] +np.complex64([]) # type: ignore[call-overload] + +# TODO: protocols (can't check for non-existent protocols w/ __getattr__) + +np.datetime64(0) # type: ignore[call-overload] + +class A: + def __float__(self) -> float: ... + +np.int8(A()) # type: ignore[arg-type] +np.int16(A()) # type: ignore[arg-type] +np.int32(A()) # type: ignore[arg-type] +np.int64(A()) # type: ignore[arg-type] +np.uint8(A()) # type: ignore[arg-type] +np.uint16(A()) # type: ignore[arg-type] +np.uint32(A()) # type: ignore[arg-type] +np.uint64(A()) # type: ignore[arg-type] + +np.void("test") # type: ignore[call-overload] +np.void("test", dtype=None) # type: ignore[call-overload] + +np.generic(1) # type: ignore[abstract] +np.number(1) # type: ignore[abstract] +np.integer(1) # type: ignore[abstract] +np.inexact(1) # type: ignore[abstract] +np.character("test") # type: ignore[abstract] +np.flexible(b"test") # type: ignore[abstract] + +np.float64(value=0.0) # type: ignore[call-arg] +np.int64(value=0) # type: ignore[call-arg] +np.uint64(value=0) # type: ignore[call-arg] +np.complex128(value=0.0j) # type: ignore[call-overload] +np.str_(value="bob") # type: ignore[call-overload] +np.bytes_(value=b"test") # type: ignore[call-overload] +np.void(value=b"test") # type: ignore[call-overload] +np.bool(value=True) # type: ignore[call-overload] +np.datetime64(value="2019") # type: ignore[call-overload] +np.timedelta64(value=0) # type: ignore[call-overload] + +np.bytes_(b"hello", encoding="utf-8") # type: ignore[call-overload] +np.str_("hello", encoding="utf-8") # type: ignore[call-overload] + +f8.item(1) # type: ignore[call-overload] +f8.item((0, 1)) # type: ignore[arg-type] +f8.squeeze(axis=1) # type: ignore[arg-type] +f8.squeeze(axis=(0, 1)) # type: ignore[arg-type] +f8.transpose(1) # type: ignore[arg-type] + +def func(a: np.float32) -> None: ... + +func(f2) # type: ignore[arg-type] +func(f8) # type: ignore[arg-type] + +c8.__getnewargs__() # type: ignore[attr-defined] +f2.__getnewargs__() # type: ignore[attr-defined] +f2.hex() # type: ignore[attr-defined] +np.float16.fromhex("0x0.0p+0") # type: ignore[attr-defined] +f2.__trunc__() # type: ignore[attr-defined] +f2.__getformat__("float") # type: ignore[attr-defined] diff --git a/local_packages/numpy/typing/tests/data/fail/shape.pyi b/local_packages/numpy/typing/tests/data/fail/shape.pyi new file mode 100644 index 0000000..a024d7b --- /dev/null +++ b/local_packages/numpy/typing/tests/data/fail/shape.pyi @@ -0,0 +1,7 @@ +from typing import Any + +import numpy as np + +# test bounds of _ShapeT_co + +np.ndarray[tuple[str, str], Any] # type: ignore[type-var] diff --git a/local_packages/numpy/typing/tests/data/fail/shape_base.pyi b/local_packages/numpy/typing/tests/data/fail/shape_base.pyi new file mode 100644 index 0000000..652b24b --- /dev/null +++ b/local_packages/numpy/typing/tests/data/fail/shape_base.pyi @@ -0,0 +1,8 @@ +import numpy as np + +class DTypeLike: + dtype: np.dtype[np.int_] + +dtype_like: DTypeLike + +np.expand_dims(dtype_like, (5, 10)) # type: ignore[call-overload] diff --git a/local_packages/numpy/typing/tests/data/fail/stride_tricks.pyi b/local_packages/numpy/typing/tests/data/fail/stride_tricks.pyi new file mode 100644 index 0000000..7f9a26b --- /dev/null +++ b/local_packages/numpy/typing/tests/data/fail/stride_tricks.pyi @@ -0,0 +1,9 @@ +import numpy as np +import numpy.typing as npt + +AR_f8: npt.NDArray[np.float64] + +np.lib.stride_tricks.as_strided(AR_f8, shape=8) # type: ignore[call-overload] +np.lib.stride_tricks.as_strided(AR_f8, strides=8) # type: ignore[call-overload] + +np.lib.stride_tricks.sliding_window_view(AR_f8, axis=(1,)) # type: ignore[call-overload] diff --git a/local_packages/numpy/typing/tests/data/fail/strings.pyi b/local_packages/numpy/typing/tests/data/fail/strings.pyi new file mode 100644 index 0000000..328a521 --- /dev/null +++ b/local_packages/numpy/typing/tests/data/fail/strings.pyi @@ -0,0 +1,52 @@ +import numpy as np +import numpy.typing as npt + +AR_U: npt.NDArray[np.str_] +AR_S: npt.NDArray[np.bytes_] + +np.strings.equal(AR_U, AR_S) # type: ignore[arg-type] +np.strings.not_equal(AR_U, AR_S) # type: ignore[arg-type] + +np.strings.greater_equal(AR_U, AR_S) # type: ignore[arg-type] +np.strings.less_equal(AR_U, AR_S) # type: ignore[arg-type] +np.strings.greater(AR_U, AR_S) # type: ignore[arg-type] +np.strings.less(AR_U, AR_S) # type: ignore[arg-type] + +np.strings.encode(AR_S) # type: ignore[arg-type] +np.strings.decode(AR_U) # type: ignore[arg-type] + +np.strings.lstrip(AR_U, b"a") # type: ignore[arg-type] +np.strings.lstrip(AR_S, "a") # type: ignore[arg-type] +np.strings.strip(AR_U, b"a") # type: ignore[arg-type] +np.strings.strip(AR_S, "a") # type: ignore[arg-type] +np.strings.rstrip(AR_U, b"a") # type: ignore[arg-type] +np.strings.rstrip(AR_S, "a") # type: ignore[arg-type] + +np.strings.partition(AR_U, b"a") # type: ignore[arg-type] +np.strings.partition(AR_S, "a") # type: ignore[arg-type] +np.strings.rpartition(AR_U, b"a") # type: ignore[arg-type] +np.strings.rpartition(AR_S, "a") # type: ignore[arg-type] + +np.strings.count(AR_U, b"a", [1, 2, 3], [1, 2, 3]) # type: ignore[arg-type] +np.strings.count(AR_S, "a", 0, 9) # type: ignore[arg-type] + +np.strings.endswith(AR_U, b"a", [1, 2, 3], [1, 2, 3]) # type: ignore[arg-type] +np.strings.endswith(AR_S, "a", 0, 9) # type: ignore[arg-type] +np.strings.startswith(AR_U, b"a", [1, 2, 3], [1, 2, 3]) # type: ignore[arg-type] +np.strings.startswith(AR_S, "a", 0, 9) # type: ignore[arg-type] + +np.strings.find(AR_U, b"a", [1, 2, 3], [1, 2, 3]) # type: ignore[arg-type] +np.strings.find(AR_S, "a", 0, 9) # type: ignore[arg-type] +np.strings.rfind(AR_U, b"a", [1, 2, 3], [1, 2, 3]) # type: ignore[arg-type] +np.strings.rfind(AR_S, "a", 0, 9) # type: ignore[arg-type] + +np.strings.index(AR_U, b"a", start=[1, 2, 3]) # type: ignore[arg-type] +np.strings.index(AR_S, "a", end=9) # type: ignore[arg-type] +np.strings.rindex(AR_U, b"a", start=[1, 2, 3]) # type: ignore[arg-type] +np.strings.rindex(AR_S, "a", end=9) # type: ignore[arg-type] + +np.strings.isdecimal(AR_S) # type: ignore[arg-type] +np.strings.isnumeric(AR_S) # type: ignore[arg-type] + +np.strings.replace(AR_U, b"_", b"-", 10) # type: ignore[arg-type] +np.strings.replace(AR_S, "_", "-", 1) # type: ignore[arg-type] diff --git a/local_packages/numpy/typing/tests/data/fail/testing.pyi b/local_packages/numpy/typing/tests/data/fail/testing.pyi new file mode 100644 index 0000000..517062c --- /dev/null +++ b/local_packages/numpy/typing/tests/data/fail/testing.pyi @@ -0,0 +1,28 @@ +import numpy as np +import numpy.typing as npt + +AR_U: npt.NDArray[np.str_] + +def func(x: object) -> bool: ... + +np.testing.assert_(True, msg=1) # type: ignore[arg-type] +np.testing.build_err_msg(1, "test") # type: ignore[arg-type] +np.testing.assert_almost_equal(AR_U, AR_U) # type: ignore[arg-type] +np.testing.assert_approx_equal([1, 2, 3], [1, 2, 3]) # type: ignore[arg-type] +np.testing.assert_array_almost_equal(AR_U, AR_U) # type: ignore[arg-type] +np.testing.assert_array_less(AR_U, AR_U) # type: ignore[arg-type] +np.testing.assert_string_equal(b"a", b"a") # type: ignore[arg-type] + +np.testing.assert_raises(expected_exception=TypeError, callable=func) # type: ignore[call-overload] +np.testing.assert_raises_regex(expected_exception=TypeError, expected_regex="T", callable=func) # type: ignore[call-overload] + +np.testing.assert_allclose(AR_U, AR_U) # type: ignore[arg-type] +np.testing.assert_array_almost_equal_nulp(AR_U, AR_U) # type: ignore[arg-type] +np.testing.assert_array_max_ulp(AR_U, AR_U) # type: ignore[arg-type] + +np.testing.assert_warns(RuntimeWarning, func) # type: ignore[call-overload] +np.testing.assert_no_warnings(func=func) # type: ignore[call-overload] +np.testing.assert_no_warnings(func) # type: ignore[call-overload] +np.testing.assert_no_warnings(func, y=None) # type: ignore[call-overload] + +np.testing.assert_no_gc_cycles(func=func) # type: ignore[call-overload] diff --git a/local_packages/numpy/typing/tests/data/fail/twodim_base.pyi b/local_packages/numpy/typing/tests/data/fail/twodim_base.pyi new file mode 100644 index 0000000..473419c --- /dev/null +++ b/local_packages/numpy/typing/tests/data/fail/twodim_base.pyi @@ -0,0 +1,39 @@ +from typing import type_check_only + +import numpy as np +import numpy.typing as npt + +_0d_bool: np.bool +_nd_bool: npt.NDArray[np.bool] +_nd_td64: npt.NDArray[np.timedelta64] +_to_2d_bool: list[list[bool]] + +@type_check_only +def func1(ar: np.ndarray, a: int) -> npt.NDArray[np.str_]: ... +@type_check_only +def func2(ar: np.ndarray, a: float) -> float: ... + +### + +np.eye(10, M=20.0) # type: ignore[call-overload] +np.eye(10, k=2.5, dtype=int) # type: ignore[call-overload] + +np.diag(_nd_bool, k=0.5) # type: ignore[call-overload] +np.diagflat(_nd_bool, k=0.5) # type: ignore[call-overload] + +np.tri(10, M=20.0) # type: ignore[call-overload] +np.tri(10, k=2.5, dtype=int) # type: ignore[call-overload] + +np.tril(_nd_bool, k=0.5) # type: ignore[call-overload] +np.triu(_nd_bool, k=0.5) # type: ignore[call-overload] + +np.vander(_nd_td64) # type: ignore[type-var] + +np.histogram2d(_nd_td64) # type: ignore[call-overload] + +np.mask_indices(10, func1) # type: ignore[arg-type] +np.mask_indices(10, func2, 10.5) # type: ignore[arg-type] + +np.tril_indices(3.14) # type: ignore[arg-type] + +np.tril_indices_from(_to_2d_bool) # type: ignore[arg-type] diff --git a/local_packages/numpy/typing/tests/data/fail/type_check.pyi b/local_packages/numpy/typing/tests/data/fail/type_check.pyi new file mode 100644 index 0000000..8b68e99 --- /dev/null +++ b/local_packages/numpy/typing/tests/data/fail/type_check.pyi @@ -0,0 +1,12 @@ +import numpy as np + +DTYPE_i8: np.dtype[np.int64] + +np.mintypecode(DTYPE_i8) # type: ignore[arg-type] +np.iscomplexobj(DTYPE_i8) # type: ignore[arg-type] +np.isrealobj(DTYPE_i8) # type: ignore[arg-type] + +np.typename(DTYPE_i8) # type: ignore[call-overload] +np.typename("invalid") # type: ignore[call-overload] + +np.common_type(np.timedelta64()) # type: ignore[arg-type] diff --git a/local_packages/numpy/typing/tests/data/fail/ufunc_config.pyi b/local_packages/numpy/typing/tests/data/fail/ufunc_config.pyi new file mode 100644 index 0000000..c67b6a3 --- /dev/null +++ b/local_packages/numpy/typing/tests/data/fail/ufunc_config.pyi @@ -0,0 +1,21 @@ +"""Typing tests for `numpy._core._ufunc_config`.""" + +import numpy as np + +def func1(a: str, b: int, c: float) -> None: ... +def func2(a: str, *, b: int) -> None: ... + +class Write1: + def write1(self, a: str) -> None: ... + +class Write2: + def write(self, a: str, b: str) -> None: ... + +class Write3: + def write(self, *, a: str) -> None: ... + +np.seterrcall(func1) # type: ignore[arg-type] +np.seterrcall(func2) # type: ignore[arg-type] +np.seterrcall(Write1()) # type: ignore[arg-type] +np.seterrcall(Write2()) # type: ignore[arg-type] +np.seterrcall(Write3()) # type: ignore[arg-type] diff --git a/local_packages/numpy/typing/tests/data/fail/ufunclike.pyi b/local_packages/numpy/typing/tests/data/fail/ufunclike.pyi new file mode 100644 index 0000000..e556e40 --- /dev/null +++ b/local_packages/numpy/typing/tests/data/fail/ufunclike.pyi @@ -0,0 +1,21 @@ +import numpy as np +import numpy.typing as npt + +AR_c: npt.NDArray[np.complex128] +AR_m: npt.NDArray[np.timedelta64] +AR_M: npt.NDArray[np.datetime64] +AR_O: npt.NDArray[np.object_] + +np.fix(AR_c) # type: ignore[arg-type] +np.fix(AR_m) # type: ignore[arg-type] +np.fix(AR_M) # type: ignore[arg-type] + +np.isposinf(AR_c) # type: ignore[arg-type] +np.isposinf(AR_m) # type: ignore[arg-type] +np.isposinf(AR_M) # type: ignore[arg-type] +np.isposinf(AR_O) # type: ignore[arg-type] + +np.isneginf(AR_c) # type: ignore[arg-type] +np.isneginf(AR_m) # type: ignore[arg-type] +np.isneginf(AR_M) # type: ignore[arg-type] +np.isneginf(AR_O) # type: ignore[arg-type] diff --git a/local_packages/numpy/typing/tests/data/fail/ufuncs.pyi b/local_packages/numpy/typing/tests/data/fail/ufuncs.pyi new file mode 100644 index 0000000..1b1628d --- /dev/null +++ b/local_packages/numpy/typing/tests/data/fail/ufuncs.pyi @@ -0,0 +1,17 @@ +import numpy as np +import numpy.typing as npt + +AR_f8: npt.NDArray[np.float64] + +np.sin.nin + "foo" # type: ignore[operator] +np.sin(1, foo="bar") # type: ignore[call-overload] + +np.abs(None) # type: ignore[call-overload] + +np.add(1, 1, 1) # type: ignore[call-overload] +np.add(1, 1, axis=0) # type: ignore[call-overload] + +np.matmul(AR_f8, AR_f8, where=True) # type: ignore[call-overload] + +np.frexp(AR_f8, out=None) # type: ignore[call-overload] +np.frexp(AR_f8, out=AR_f8) # type: ignore[call-overload] diff --git a/local_packages/numpy/typing/tests/data/fail/warnings_and_errors.pyi b/local_packages/numpy/typing/tests/data/fail/warnings_and_errors.pyi new file mode 100644 index 0000000..8ba34f6 --- /dev/null +++ b/local_packages/numpy/typing/tests/data/fail/warnings_and_errors.pyi @@ -0,0 +1,5 @@ +import numpy.exceptions as ex + +ex.AxisError(1.0) # type: ignore[call-overload] +ex.AxisError(1, ndim=2.0) # type: ignore[call-overload] +ex.AxisError(2, msg_prefix=404) # type: ignore[call-overload] diff --git a/local_packages/numpy/typing/tests/data/misc/extended_precision.pyi b/local_packages/numpy/typing/tests/data/misc/extended_precision.pyi new file mode 100644 index 0000000..7978faf --- /dev/null +++ b/local_packages/numpy/typing/tests/data/misc/extended_precision.pyi @@ -0,0 +1,9 @@ +from typing import assert_type + +import numpy as np +from numpy._typing import _96Bit, _128Bit + +assert_type(np.float96(), np.floating[_96Bit]) +assert_type(np.float128(), np.floating[_128Bit]) +assert_type(np.complex192(), np.complexfloating[_96Bit, _96Bit]) +assert_type(np.complex256(), np.complexfloating[_128Bit, _128Bit]) diff --git a/local_packages/numpy/typing/tests/data/mypy.ini b/local_packages/numpy/typing/tests/data/mypy.ini new file mode 100644 index 0000000..4aa465a --- /dev/null +++ b/local_packages/numpy/typing/tests/data/mypy.ini @@ -0,0 +1,8 @@ +[mypy] +strict = True +enable_error_code = deprecated, ignore-without-code, truthy-bool +disallow_any_unimported = True +allow_redefinition_new = True +local_partial_types = True +show_absolute_path = True +pretty = True diff --git a/local_packages/numpy/typing/tests/data/pass/arithmetic.py b/local_packages/numpy/typing/tests/data/pass/arithmetic.py new file mode 100644 index 0000000..e347ec0 --- /dev/null +++ b/local_packages/numpy/typing/tests/data/pass/arithmetic.py @@ -0,0 +1,614 @@ +from __future__ import annotations + +from typing import Any, cast + +import pytest + +import numpy as np +import numpy.typing as npt + +c16 = np.complex128(1) +f8 = np.float64(1) +i8 = np.int64(1) +u8 = np.uint64(1) + +c8 = np.complex64(1) +f4 = np.float32(1) +i4 = np.int32(1) +u4 = np.uint32(1) + +dt = np.datetime64(1, "D") +td = np.timedelta64(1, "D") + +b_ = np.bool(1) + +b = bool(1) +c = complex(1) +f = float(1) +i = 1 + + +class Object: + def __array__(self, dtype: np.typing.DTypeLike | None = None, + copy: bool | None = None) -> np.ndarray[Any, np.dtype[np.object_]]: + ret = np.empty((), dtype=object) + ret[()] = self + return ret + + def __sub__(self, value: Any) -> Object: + return self + + def __rsub__(self, value: Any) -> Object: + return self + + def __floordiv__(self, value: Any) -> Object: + return self + + def __rfloordiv__(self, value: Any) -> Object: + return self + + def __mul__(self, value: Any) -> Object: + return self + + def __rmul__(self, value: Any) -> Object: + return self + + def __pow__(self, value: Any) -> Object: + return self + + def __rpow__(self, value: Any) -> Object: + return self + + +AR_b: npt.NDArray[np.bool] = np.array([True]) +AR_u: npt.NDArray[np.uint32] = np.array([1], dtype=np.uint32) +AR_i: npt.NDArray[np.int64] = np.array([1]) +AR_integer: npt.NDArray[np.integer] = cast(npt.NDArray[np.integer], AR_i) +AR_f: npt.NDArray[np.float64] = np.array([1.0]) +AR_c: npt.NDArray[np.complex128] = np.array([1j]) +AR_m: npt.NDArray[np.timedelta64] = np.array([np.timedelta64(1, "D")]) +AR_M: npt.NDArray[np.datetime64] = np.array([np.datetime64(1, "D")]) +AR_O: npt.NDArray[np.object_] = np.array([Object()]) + +AR_LIKE_b = [True] +AR_LIKE_u = [np.uint32(1)] +AR_LIKE_i = [1] +AR_LIKE_f = [1.0] +AR_LIKE_c = [1j] +AR_LIKE_m = [np.timedelta64(1, "D")] +AR_LIKE_M = [np.datetime64(1, "D")] +AR_LIKE_O = [Object()] + +# Array subtractions + +AR_b - AR_LIKE_u +AR_b - AR_LIKE_i +AR_b - AR_LIKE_f +AR_b - AR_LIKE_c +AR_b - AR_LIKE_m +AR_b - AR_LIKE_O + +AR_LIKE_u - AR_b +AR_LIKE_i - AR_b +AR_LIKE_f - AR_b +AR_LIKE_c - AR_b +AR_LIKE_m - AR_b +AR_LIKE_M - AR_b +AR_LIKE_O - AR_b + +AR_u - AR_LIKE_b +AR_u - AR_LIKE_u +AR_u - AR_LIKE_i +AR_u - AR_LIKE_f +AR_u - AR_LIKE_c +AR_u - AR_LIKE_m +AR_u - AR_LIKE_O + +AR_LIKE_b - AR_u +AR_LIKE_u - AR_u +AR_LIKE_i - AR_u +AR_LIKE_f - AR_u +AR_LIKE_c - AR_u +AR_LIKE_m - AR_u +AR_LIKE_M - AR_u +AR_LIKE_O - AR_u + +AR_i - AR_LIKE_b +AR_i - AR_LIKE_u +AR_i - AR_LIKE_i +AR_i - AR_LIKE_f +AR_i - AR_LIKE_c +AR_i - AR_LIKE_m +AR_i - AR_LIKE_O + +AR_LIKE_b - AR_i +AR_LIKE_u - AR_i +AR_LIKE_i - AR_i +AR_LIKE_f - AR_i +AR_LIKE_c - AR_i +AR_LIKE_m - AR_i +AR_LIKE_M - AR_i +AR_LIKE_O - AR_i + +AR_f - AR_LIKE_b +AR_f - AR_LIKE_u +AR_f - AR_LIKE_i +AR_f - AR_LIKE_f +AR_f - AR_LIKE_c +AR_f - AR_LIKE_O + +AR_LIKE_b - AR_f +AR_LIKE_u - AR_f +AR_LIKE_i - AR_f +AR_LIKE_f - AR_f +AR_LIKE_c - AR_f +AR_LIKE_O - AR_f + +AR_c - AR_LIKE_b +AR_c - AR_LIKE_u +AR_c - AR_LIKE_i +AR_c - AR_LIKE_f +AR_c - AR_LIKE_c +AR_c - AR_LIKE_O + +AR_LIKE_b - AR_c +AR_LIKE_u - AR_c +AR_LIKE_i - AR_c +AR_LIKE_f - AR_c +AR_LIKE_c - AR_c +AR_LIKE_O - AR_c + +AR_m - AR_LIKE_b +AR_m - AR_LIKE_u +AR_m - AR_LIKE_i +AR_m - AR_LIKE_m + +AR_LIKE_b - AR_m +AR_LIKE_u - AR_m +AR_LIKE_i - AR_m +AR_LIKE_m - AR_m +AR_LIKE_M - AR_m + +AR_M - AR_LIKE_b +AR_M - AR_LIKE_u +AR_M - AR_LIKE_i +AR_M - AR_LIKE_m +AR_M - AR_LIKE_M + +AR_LIKE_M - AR_M + +AR_O - AR_LIKE_b +AR_O - AR_LIKE_u +AR_O - AR_LIKE_i +AR_O - AR_LIKE_f +AR_O - AR_LIKE_c +AR_O - AR_LIKE_O + +AR_LIKE_b - AR_O +AR_LIKE_u - AR_O +AR_LIKE_i - AR_O +AR_LIKE_f - AR_O +AR_LIKE_c - AR_O +AR_LIKE_O - AR_O + +AR_u += AR_b +AR_u += AR_u +AR_u += 1 # Allowed during runtime as long as the object is 0D and >=0 + +# Array floor division + +AR_b // AR_LIKE_b +AR_b // AR_LIKE_u +AR_b // AR_LIKE_i +AR_b // AR_LIKE_f +AR_b // AR_LIKE_O + +AR_LIKE_b // AR_b +AR_LIKE_u // AR_b +AR_LIKE_i // AR_b +AR_LIKE_f // AR_b +AR_LIKE_O // AR_b + +AR_u // AR_LIKE_b +AR_u // AR_LIKE_u +AR_u // AR_LIKE_i +AR_u // AR_LIKE_f +AR_u // AR_LIKE_O + +AR_LIKE_b // AR_u +AR_LIKE_u // AR_u +AR_LIKE_i // AR_u +AR_LIKE_f // AR_u +AR_LIKE_m // AR_u +AR_LIKE_O // AR_u + +AR_i // AR_LIKE_b +AR_i // AR_LIKE_u +AR_i // AR_LIKE_i +AR_i // AR_LIKE_f +AR_i // AR_LIKE_O + +AR_LIKE_b // AR_i +AR_LIKE_u // AR_i +AR_LIKE_i // AR_i +AR_LIKE_f // AR_i +AR_LIKE_m // AR_i +AR_LIKE_O // AR_i + +AR_f // AR_LIKE_b +AR_f // AR_LIKE_u +AR_f // AR_LIKE_i +AR_f // AR_LIKE_f +AR_f // AR_LIKE_O + +AR_LIKE_b // AR_f +AR_LIKE_u // AR_f +AR_LIKE_i // AR_f +AR_LIKE_f // AR_f +AR_LIKE_m // AR_f +AR_LIKE_O // AR_f + +AR_m // AR_LIKE_u +AR_m // AR_LIKE_i +AR_m // AR_LIKE_f +AR_m // AR_LIKE_m + +AR_LIKE_m // AR_m + +AR_m /= f +AR_m //= f +AR_m /= AR_f +AR_m /= AR_LIKE_f +AR_m //= AR_f +AR_m //= AR_LIKE_f + +AR_O // AR_LIKE_b +AR_O // AR_LIKE_u +AR_O // AR_LIKE_i +AR_O // AR_LIKE_f +AR_O // AR_LIKE_O + +AR_LIKE_b // AR_O +AR_LIKE_u // AR_O +AR_LIKE_i // AR_O +AR_LIKE_f // AR_O +AR_LIKE_O // AR_O + +# Inplace multiplication + +AR_b *= AR_LIKE_b + +AR_u *= AR_LIKE_b +AR_u *= AR_LIKE_u + +AR_i *= AR_LIKE_b +AR_i *= AR_LIKE_u +AR_i *= AR_LIKE_i + +AR_integer *= AR_LIKE_b +AR_integer *= AR_LIKE_u +AR_integer *= AR_LIKE_i + +AR_f *= AR_LIKE_b +AR_f *= AR_LIKE_u +AR_f *= AR_LIKE_i +AR_f *= AR_LIKE_f + +AR_c *= AR_LIKE_b +AR_c *= AR_LIKE_u +AR_c *= AR_LIKE_i +AR_c *= AR_LIKE_f +AR_c *= AR_LIKE_c + +AR_m *= AR_LIKE_b +AR_m *= AR_LIKE_u +AR_m *= AR_LIKE_i +AR_m *= AR_LIKE_f + +AR_O *= AR_LIKE_b +AR_O *= AR_LIKE_u +AR_O *= AR_LIKE_i +AR_O *= AR_LIKE_f +AR_O *= AR_LIKE_c +AR_O *= AR_LIKE_O + +# Inplace power + +AR_u **= AR_LIKE_b +AR_u **= AR_LIKE_u + +AR_i **= AR_LIKE_b +AR_i **= AR_LIKE_u +AR_i **= AR_LIKE_i + +AR_integer **= AR_LIKE_b +AR_integer **= AR_LIKE_u +AR_integer **= AR_LIKE_i + +AR_f **= AR_LIKE_b +AR_f **= AR_LIKE_u +AR_f **= AR_LIKE_i +AR_f **= AR_LIKE_f + +AR_c **= AR_LIKE_b +AR_c **= AR_LIKE_u +AR_c **= AR_LIKE_i +AR_c **= AR_LIKE_f +AR_c **= AR_LIKE_c + +AR_O **= AR_LIKE_b +AR_O **= AR_LIKE_u +AR_O **= AR_LIKE_i +AR_O **= AR_LIKE_f +AR_O **= AR_LIKE_c +AR_O **= AR_LIKE_O + +# unary ops + +-c16 +-c8 +-f8 +-f4 +-i8 +-i4 +with pytest.warns(RuntimeWarning): + -u8 + -u4 +-td +-AR_f + ++c16 ++c8 ++f8 ++f4 ++i8 ++i4 ++u8 ++u4 ++td ++AR_f + +abs(c16) +abs(c8) +abs(f8) +abs(f4) +abs(i8) +abs(i4) +abs(u8) +abs(u4) +abs(td) +abs(b_) +abs(AR_f) + +# Time structures + +dt + td +dt + i +dt + i4 +dt + i8 +dt - dt +dt - i +dt - i4 +dt - i8 + +td + td +td + i +td + i4 +td + i8 +td - td +td - i +td - i4 +td - i8 +td / f +td / f4 +td / f8 +td / td +td // td +td % td + + +# boolean + +b_ / b +b_ / b_ +b_ / i +b_ / i8 +b_ / i4 +b_ / u8 +b_ / u4 +b_ / f +b_ / f8 +b_ / f4 +b_ / c +b_ / c16 +b_ / c8 + +b / b_ +b_ / b_ +i / b_ +i8 / b_ +i4 / b_ +u8 / b_ +u4 / b_ +f / b_ +f8 / b_ +f4 / b_ +c / b_ +c16 / b_ +c8 / b_ + +# Complex + +c16 + c16 +c16 + f8 +c16 + i8 +c16 + c8 +c16 + f4 +c16 + i4 +c16 + b_ +c16 + b +c16 + c +c16 + f +c16 + i +c16 + AR_f + +c16 + c16 +f8 + c16 +i8 + c16 +c8 + c16 +f4 + c16 +i4 + c16 +b_ + c16 +b + c16 +c + c16 +f + c16 +i + c16 +AR_f + c16 + +c8 + c16 +c8 + f8 +c8 + i8 +c8 + c8 +c8 + f4 +c8 + i4 +c8 + b_ +c8 + b +c8 + c +c8 + f +c8 + i +c8 + AR_f + +c16 + c8 +f8 + c8 +i8 + c8 +c8 + c8 +f4 + c8 +i4 + c8 +b_ + c8 +b + c8 +c + c8 +f + c8 +i + c8 +AR_f + c8 + +# Float + +f8 + f8 +f8 + i8 +f8 + f4 +f8 + i4 +f8 + b_ +f8 + b +f8 + c +f8 + f +f8 + i +f8 + AR_f + +f8 + f8 +i8 + f8 +f4 + f8 +i4 + f8 +b_ + f8 +b + f8 +c + f8 +f + f8 +i + f8 +AR_f + f8 + +f4 + f8 +f4 + i8 +f4 + f4 +f4 + i4 +f4 + b_ +f4 + b +f4 + c +f4 + f +f4 + i +f4 + AR_f + +f8 + f4 +i8 + f4 +f4 + f4 +i4 + f4 +b_ + f4 +b + f4 +c + f4 +f + f4 +i + f4 +AR_f + f4 + +# Int + +i8 + i8 +i8 + u8 +i8 + i4 +i8 + u4 +i8 + b_ +i8 + b +i8 + c +i8 + f +i8 + i +i8 + AR_f + +u8 + u8 +u8 + i4 +u8 + u4 +u8 + b_ +u8 + b +u8 + c +u8 + f +u8 + i +u8 + AR_f + +i8 + i8 +u8 + i8 +i4 + i8 +u4 + i8 +b_ + i8 +b + i8 +c + i8 +f + i8 +i + i8 +AR_f + i8 + +u8 + u8 +i4 + u8 +u4 + u8 +b_ + u8 +b + u8 +c + u8 +f + u8 +i + u8 +AR_f + u8 + +i4 + i8 +i4 + i4 +i4 + i +i4 + b_ +i4 + b +i4 + AR_f + +u4 + i8 +u4 + i4 +u4 + u8 +u4 + u4 +u4 + i +u4 + b_ +u4 + b +u4 + AR_f + +i8 + i4 +i4 + i4 +i + i4 +b_ + i4 +b + i4 +AR_f + i4 + +i8 + u4 +i4 + u4 +u8 + u4 +u4 + u4 +b_ + u4 +b + u4 +i + u4 +AR_f + u4 diff --git a/local_packages/numpy/typing/tests/data/pass/array_constructors.py b/local_packages/numpy/typing/tests/data/pass/array_constructors.py new file mode 100644 index 0000000..d91d257 --- /dev/null +++ b/local_packages/numpy/typing/tests/data/pass/array_constructors.py @@ -0,0 +1,138 @@ +from typing import Any + +import numpy as np +import numpy.typing as npt + + +class Index: + def __index__(self) -> int: + return 0 + + +class SubClass(npt.NDArray[np.float64]): + pass + + +def func(i: int, j: int, **kwargs: Any) -> SubClass: + return B + + +i8 = np.int64(1) + +A = np.array([1]) +B = A.view(SubClass).copy() +B_stack = np.array([[1], [1]]).view(SubClass) +C = [1] + +np.ndarray(Index()) +np.ndarray([Index()]) + +np.array(1, dtype=float) +np.array(1, copy=None) +np.array(1, order='F') +np.array(1, order=None) +np.array(1, subok=True) +np.array(1, ndmin=3) +np.array(1, str, copy=True, order='C', subok=False, ndmin=2) + +np.asarray(A) +np.asarray(B) +np.asarray(C) + +np.asanyarray(A) +np.asanyarray(B) +np.asanyarray(B, dtype=int) +np.asanyarray(C) + +np.ascontiguousarray(A) +np.ascontiguousarray(B) +np.ascontiguousarray(C) + +np.asfortranarray(A) +np.asfortranarray(B) +np.asfortranarray(C) + +np.require(A) +np.require(B) +np.require(B, dtype=int) +np.require(B, requirements=None) +np.require(B, requirements="E") +np.require(B, requirements=["ENSUREARRAY"]) +np.require(B, requirements={"F", "E"}) +np.require(B, requirements=["C", "OWNDATA"]) +np.require(B, requirements="W") +np.require(B, requirements="A") +np.require(C) + +np.linspace(0, 2) +np.linspace(0.5, [0, 1, 2]) +np.linspace([0, 1, 2], 3) +np.linspace(0j, 2) +np.linspace(0, 2, num=10) +np.linspace(0, 2, endpoint=True) +np.linspace(0, 2, retstep=True) +np.linspace(0j, 2j, retstep=True) +np.linspace(0, 2, dtype=bool) +np.linspace([0, 1], [2, 3], axis=Index()) + +np.logspace(0, 2, base=2) +np.logspace(0, 2, base=2) +np.logspace(0, 2, base=[1j, 2j], num=2) + +np.geomspace(1, 2) + +np.zeros_like(A) +np.zeros_like(C) +np.zeros_like(B) +np.zeros_like(B, dtype=np.int64) + +np.ones_like(A) +np.ones_like(C) +np.ones_like(B) +np.ones_like(B, dtype=np.int64) + +np.empty_like(A) +np.empty_like(C) +np.empty_like(B) +np.empty_like(B, dtype=np.int64) + +np.full_like(A, i8) +np.full_like(C, i8) +np.full_like(B, i8) +np.full_like(B, i8, dtype=np.int64) + +np.ones(1) +np.ones([1, 1, 1]) + +np.full(1, i8) +np.full([1, 1, 1], i8) + +np.indices([1, 2, 3]) +np.indices([1, 2, 3], sparse=True) + +np.fromfunction(func, (3, 5)) + +np.identity(10) + +np.atleast_1d(C) +np.atleast_1d(A) +np.atleast_1d(C, C) +np.atleast_1d(C, A) +np.atleast_1d(A, A) + +np.atleast_2d(C) + +np.atleast_3d(C) + +np.vstack([C, C]) +np.vstack([C, A]) +np.vstack([A, A]) + +np.hstack([C, C]) + +np.stack([C, C]) +np.stack([C, C], axis=0) +np.stack([C, C], out=B_stack) + +np.block([[C, C], [C, C]]) +np.block(A) diff --git a/local_packages/numpy/typing/tests/data/pass/array_like.py b/local_packages/numpy/typing/tests/data/pass/array_like.py new file mode 100644 index 0000000..f922bea --- /dev/null +++ b/local_packages/numpy/typing/tests/data/pass/array_like.py @@ -0,0 +1,43 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +import numpy as np + +if TYPE_CHECKING: + from numpy._typing import ArrayLike, NDArray, _SupportsArray + +x1: ArrayLike = True +x2: ArrayLike = 5 +x3: ArrayLike = 1.0 +x4: ArrayLike = 1 + 1j +x5: ArrayLike = np.int8(1) +x6: ArrayLike = np.float64(1) +x7: ArrayLike = np.complex128(1) +x8: ArrayLike = np.array([1, 2, 3]) +x9: ArrayLike = [1, 2, 3] +x10: ArrayLike = (1, 2, 3) +x11: ArrayLike = "foo" +x12: ArrayLike = memoryview(b'foo') + + +class A: + def __array__(self, dtype: np.dtype | None = None) -> NDArray[np.float64]: + return np.array([1.0, 2.0, 3.0]) + + +x13: ArrayLike = A() + +scalar: _SupportsArray[np.dtype[np.int64]] = np.int64(1) +scalar.__array__() +array: _SupportsArray[np.dtype[np.int_]] = np.array(1) +array.__array__() + +a: _SupportsArray[np.dtype[np.float64]] = A() +a.__array__() +a.__array__() + +# Escape hatch for when you mean to make something like an object +# array. +object_array_scalar: object = (i for i in range(10)) +np.array(object_array_scalar) diff --git a/local_packages/numpy/typing/tests/data/pass/arrayprint.py b/local_packages/numpy/typing/tests/data/pass/arrayprint.py new file mode 100644 index 0000000..6c704c7 --- /dev/null +++ b/local_packages/numpy/typing/tests/data/pass/arrayprint.py @@ -0,0 +1,37 @@ +import numpy as np + +AR = np.arange(10) +AR.setflags(write=False) + +with np.printoptions(): + np.set_printoptions( + precision=1, + threshold=2, + edgeitems=3, + linewidth=4, + suppress=False, + nanstr="Bob", + infstr="Bill", + formatter={}, + sign="+", + floatmode="unique", + ) + np.get_printoptions() + str(AR) + + np.array2string( + AR, + max_line_width=5, + precision=2, + suppress_small=True, + separator=";", + prefix="test", + threshold=5, + floatmode="fixed", + suffix="?", + legacy="1.13", + ) + np.format_float_scientific(1, precision=5) + np.format_float_positional(1, trim="k") + np.array_repr(AR) + np.array_str(AR) diff --git a/local_packages/numpy/typing/tests/data/pass/arrayterator.py b/local_packages/numpy/typing/tests/data/pass/arrayterator.py new file mode 100644 index 0000000..a99c09a --- /dev/null +++ b/local_packages/numpy/typing/tests/data/pass/arrayterator.py @@ -0,0 +1,28 @@ + +from __future__ import annotations + +from typing import Any + +import numpy as np + +AR_i8: np.ndarray[Any, np.dtype[np.int_]] = np.arange(10) +ar_iter = np.lib.Arrayterator(AR_i8) + +ar_iter.var +ar_iter.buf_size +ar_iter.start +ar_iter.stop +ar_iter.step +ar_iter.shape +ar_iter.flat + +ar_iter.__array__() + +for i in ar_iter: + pass + +ar_iter[0] +ar_iter[...] +ar_iter[:] +ar_iter[0, 0, 0] +ar_iter[..., 0, :] diff --git a/local_packages/numpy/typing/tests/data/pass/bitwise_ops.py b/local_packages/numpy/typing/tests/data/pass/bitwise_ops.py new file mode 100644 index 0000000..2d4815b --- /dev/null +++ b/local_packages/numpy/typing/tests/data/pass/bitwise_ops.py @@ -0,0 +1,131 @@ +import numpy as np + +i8 = np.int64(1) +u8 = np.uint64(1) + +i4 = np.int32(1) +u4 = np.uint32(1) + +b_ = np.bool(1) + +b = bool(1) +i = 1 + +AR = np.array([0, 1, 2], dtype=np.int32) +AR.setflags(write=False) + + +i8 << i8 +i8 >> i8 +i8 | i8 +i8 ^ i8 +i8 & i8 + +i << AR +i >> AR +i | AR +i ^ AR +i & AR + +i8 << AR +i8 >> AR +i8 | AR +i8 ^ AR +i8 & AR + +i4 << i4 +i4 >> i4 +i4 | i4 +i4 ^ i4 +i4 & i4 + +i8 << i4 +i8 >> i4 +i8 | i4 +i8 ^ i4 +i8 & i4 + +i8 << i +i8 >> i +i8 | i +i8 ^ i +i8 & i + +i8 << b_ +i8 >> b_ +i8 | b_ +i8 ^ b_ +i8 & b_ + +i8 << b +i8 >> b +i8 | b +i8 ^ b +i8 & b + +u8 << u8 +u8 >> u8 +u8 | u8 +u8 ^ u8 +u8 & u8 + +u4 << u4 +u4 >> u4 +u4 | u4 +u4 ^ u4 +u4 & u4 + +u4 << i4 +u4 >> i4 +u4 | i4 +u4 ^ i4 +u4 & i4 + +u4 << i +u4 >> i +u4 | i +u4 ^ i +u4 & i + +u8 << b_ +u8 >> b_ +u8 | b_ +u8 ^ b_ +u8 & b_ + +u8 << b +u8 >> b +u8 | b +u8 ^ b +u8 & b + +b_ << b_ +b_ >> b_ +b_ | b_ +b_ ^ b_ +b_ & b_ + +b_ << AR +b_ >> AR +b_ | AR +b_ ^ AR +b_ & AR + +b_ << b +b_ >> b +b_ | b +b_ ^ b +b_ & b + +b_ << i +b_ >> i +b_ | i +b_ ^ i +b_ & i + +~i8 +~i4 +~u8 +~u4 +~b_ +~AR diff --git a/local_packages/numpy/typing/tests/data/pass/comparisons.py b/local_packages/numpy/typing/tests/data/pass/comparisons.py new file mode 100644 index 0000000..b2e5276 --- /dev/null +++ b/local_packages/numpy/typing/tests/data/pass/comparisons.py @@ -0,0 +1,316 @@ +from __future__ import annotations + +from typing import Any, cast + +import numpy as np + +c16 = np.complex128() +f8 = np.float64() +i8 = np.int64() +u8 = np.uint64() + +c8 = np.complex64() +f4 = np.float32() +i4 = np.int32() +u4 = np.uint32() + +dt = np.datetime64(0, "D") +td = np.timedelta64(0, "D") + +b_ = np.bool() + +b = False +c = complex() +f = 0.0 +i = 0 + +SEQ = (0, 1, 2, 3, 4) + +AR_b: np.ndarray[Any, np.dtype[np.bool]] = np.array([True]) +AR_u: np.ndarray[Any, np.dtype[np.uint32]] = np.array([1], dtype=np.uint32) +AR_i: np.ndarray[Any, np.dtype[np.int_]] = np.array([1]) +AR_f: np.ndarray[Any, np.dtype[np.float64]] = np.array([1.0]) +AR_c: np.ndarray[Any, np.dtype[np.complex128]] = np.array([1.0j]) +AR_S: np.ndarray[Any, np.dtype[np.bytes_]] = np.array([b"a"], "S") +AR_T = cast(np.ndarray[Any, np.dtypes.StringDType], np.array(["a"], "T")) +AR_U: np.ndarray[Any, np.dtype[np.str_]] = np.array(["a"], "U") +AR_m: np.ndarray[Any, np.dtype[np.timedelta64]] = np.array([np.timedelta64("1")]) +AR_M: np.ndarray[Any, np.dtype[np.datetime64]] = np.array([np.datetime64("1")]) +AR_O: np.ndarray[Any, np.dtype[np.object_]] = np.array([1], dtype=object) + +# Arrays + +AR_b > AR_b +AR_b > AR_u +AR_b > AR_i +AR_b > AR_f +AR_b > AR_c + +AR_u > AR_b +AR_u > AR_u +AR_u > AR_i +AR_u > AR_f +AR_u > AR_c + +AR_i > AR_b +AR_i > AR_u +AR_i > AR_i +AR_i > AR_f +AR_i > AR_c + +AR_f > AR_b +AR_f > AR_u +AR_f > AR_i +AR_f > AR_f +AR_f > AR_c + +AR_c > AR_b +AR_c > AR_u +AR_c > AR_i +AR_c > AR_f +AR_c > AR_c + +AR_S > AR_S +AR_S > b"" + +AR_T > AR_T +AR_T > AR_U +AR_T > "" + +AR_U > AR_U +AR_U > AR_T +AR_U > "" + +AR_m > AR_b +AR_m > AR_u +AR_m > AR_i +AR_b > AR_m +AR_u > AR_m +AR_i > AR_m + +AR_M > AR_M + +AR_O > AR_O +1 > AR_O +AR_O > 1 + +# Time structures + +dt > dt + +td > td +td > i +td > i4 +td > i8 +td > AR_i +td > SEQ + +# boolean + +b_ > b +b_ > b_ +b_ > i +b_ > i8 +b_ > i4 +b_ > u8 +b_ > u4 +b_ > f +b_ > f8 +b_ > f4 +b_ > c +b_ > c16 +b_ > c8 +b_ > AR_i +b_ > SEQ + +# Complex + +c16 > c16 +c16 > f8 +c16 > i8 +c16 > c8 +c16 > f4 +c16 > i4 +c16 > b_ +c16 > b +c16 > c +c16 > f +c16 > i +c16 > AR_i +c16 > SEQ + +c16 > c16 +f8 > c16 +i8 > c16 +c8 > c16 +f4 > c16 +i4 > c16 +b_ > c16 +b > c16 +c > c16 +f > c16 +i > c16 +AR_i > c16 +SEQ > c16 + +c8 > c16 +c8 > f8 +c8 > i8 +c8 > c8 +c8 > f4 +c8 > i4 +c8 > b_ +c8 > b +c8 > c +c8 > f +c8 > i +c8 > AR_i +c8 > SEQ + +c16 > c8 +f8 > c8 +i8 > c8 +c8 > c8 +f4 > c8 +i4 > c8 +b_ > c8 +b > c8 +c > c8 +f > c8 +i > c8 +AR_i > c8 +SEQ > c8 + +# Float + +f8 > f8 +f8 > i8 +f8 > f4 +f8 > i4 +f8 > b_ +f8 > b +f8 > c +f8 > f +f8 > i +f8 > AR_i +f8 > SEQ + +f8 > f8 +i8 > f8 +f4 > f8 +i4 > f8 +b_ > f8 +b > f8 +c > f8 +f > f8 +i > f8 +AR_i > f8 +SEQ > f8 + +f4 > f8 +f4 > i8 +f4 > f4 +f4 > i4 +f4 > b_ +f4 > b +f4 > c +f4 > f +f4 > i +f4 > AR_i +f4 > SEQ + +f8 > f4 +i8 > f4 +f4 > f4 +i4 > f4 +b_ > f4 +b > f4 +c > f4 +f > f4 +i > f4 +AR_i > f4 +SEQ > f4 + +# Int + +i8 > i8 +i8 > u8 +i8 > i4 +i8 > u4 +i8 > b_ +i8 > b +i8 > c +i8 > f +i8 > i +i8 > AR_i +i8 > SEQ + +u8 > u8 +u8 > i4 +u8 > u4 +u8 > b_ +u8 > b +u8 > c +u8 > f +u8 > i +u8 > AR_i +u8 > SEQ + +i8 > i8 +u8 > i8 +i4 > i8 +u4 > i8 +b_ > i8 +b > i8 +c > i8 +f > i8 +i > i8 +AR_i > i8 +SEQ > i8 + +u8 > u8 +i4 > u8 +u4 > u8 +b_ > u8 +b > u8 +c > u8 +f > u8 +i > u8 +AR_i > u8 +SEQ > u8 + +i4 > i8 +i4 > i4 +i4 > i +i4 > b_ +i4 > b +i4 > AR_i +i4 > SEQ + +u4 > i8 +u4 > i4 +u4 > u8 +u4 > u4 +u4 > i +u4 > b_ +u4 > b +u4 > AR_i +u4 > SEQ + +i8 > i4 +i4 > i4 +i > i4 +b_ > i4 +b > i4 +AR_i > i4 +SEQ > i4 + +i8 > u4 +i4 > u4 +u8 > u4 +u4 > u4 +b_ > u4 +b > u4 +i > u4 +AR_i > u4 +SEQ > u4 diff --git a/local_packages/numpy/typing/tests/data/pass/dtype.py b/local_packages/numpy/typing/tests/data/pass/dtype.py new file mode 100644 index 0000000..9f11518 --- /dev/null +++ b/local_packages/numpy/typing/tests/data/pass/dtype.py @@ -0,0 +1,57 @@ +import numpy as np + +dtype_obj = np.dtype(np.str_) +void_dtype_obj = np.dtype([("f0", np.float64), ("f1", np.float32)]) + +np.dtype(dtype=np.int64) +np.dtype(int) +np.dtype("int") +np.dtype(None) + +np.dtype((int, 2)) +np.dtype((int, (1,))) + +np.dtype({"names": ["a", "b"], "formats": [int, float]}) +np.dtype({"names": ["a"], "formats": [int], "titles": [object]}) +np.dtype({"names": ["a"], "formats": [int], "titles": [object()]}) + +np.dtype([("name", np.str_, 16), ("grades", np.float64, (2,)), ("age", "int32")]) + +np.dtype( + { + "names": ["a", "b"], + "formats": [int, float], + "itemsize": 9, + "aligned": False, + "titles": ["x", "y"], + "offsets": [0, 1], + } +) + +np.dtype((np.float64, float)) + + +class Test: + dtype = np.dtype(float) + + +np.dtype(Test()) + +# Methods and attributes +dtype_obj.base +dtype_obj.subdtype +dtype_obj.newbyteorder() +dtype_obj.type +dtype_obj.name +dtype_obj.names + +dtype_obj * 0 +dtype_obj * 2 + +0 * dtype_obj +2 * dtype_obj + +void_dtype_obj["f0"] +void_dtype_obj[0] +void_dtype_obj[["f0", "f1"]] +void_dtype_obj[["f0"]] diff --git a/local_packages/numpy/typing/tests/data/pass/einsumfunc.py b/local_packages/numpy/typing/tests/data/pass/einsumfunc.py new file mode 100644 index 0000000..429764e --- /dev/null +++ b/local_packages/numpy/typing/tests/data/pass/einsumfunc.py @@ -0,0 +1,36 @@ +from __future__ import annotations + +from typing import Any + +import numpy as np + +AR_LIKE_b = [True, True, True] +AR_LIKE_u = [np.uint32(1), np.uint32(2), np.uint32(3)] +AR_LIKE_i = [1, 2, 3] +AR_LIKE_f = [1.0, 2.0, 3.0] +AR_LIKE_c = [1j, 2j, 3j] +AR_LIKE_U = ["1", "2", "3"] + +OUT_f: np.ndarray[Any, np.dtype[np.float64]] = np.empty(3, dtype=np.float64) +OUT_c: np.ndarray[Any, np.dtype[np.complex128]] = np.empty(3, dtype=np.complex128) + +np.einsum("i,i->i", AR_LIKE_b, AR_LIKE_b) +np.einsum("i,i->i", AR_LIKE_u, AR_LIKE_u) +np.einsum("i,i->i", AR_LIKE_i, AR_LIKE_i) +np.einsum("i,i->i", AR_LIKE_f, AR_LIKE_f) +np.einsum("i,i->i", AR_LIKE_c, AR_LIKE_c) +np.einsum("i,i->i", AR_LIKE_b, AR_LIKE_i) +np.einsum("i,i,i,i->i", AR_LIKE_b, AR_LIKE_u, AR_LIKE_i, AR_LIKE_c) + +np.einsum("i,i->i", AR_LIKE_f, AR_LIKE_f, dtype="c16") +np.einsum("i,i->i", AR_LIKE_U, AR_LIKE_U, dtype=bool, casting="unsafe") +np.einsum("i,i->i", AR_LIKE_f, AR_LIKE_f, out=OUT_c) +np.einsum("i,i->i", AR_LIKE_U, AR_LIKE_U, dtype=int, casting="unsafe", out=OUT_f) + +np.einsum_path("i,i->i", AR_LIKE_b, AR_LIKE_b) +np.einsum_path("i,i->i", AR_LIKE_u, AR_LIKE_u) +np.einsum_path("i,i->i", AR_LIKE_i, AR_LIKE_i) +np.einsum_path("i,i->i", AR_LIKE_f, AR_LIKE_f) +np.einsum_path("i,i->i", AR_LIKE_c, AR_LIKE_c) +np.einsum_path("i,i->i", AR_LIKE_b, AR_LIKE_i) +np.einsum_path("i,i,i,i->i", AR_LIKE_b, AR_LIKE_u, AR_LIKE_i, AR_LIKE_c) diff --git a/local_packages/numpy/typing/tests/data/pass/flatiter.py b/local_packages/numpy/typing/tests/data/pass/flatiter.py new file mode 100644 index 0000000..70de3a6 --- /dev/null +++ b/local_packages/numpy/typing/tests/data/pass/flatiter.py @@ -0,0 +1,26 @@ +import numpy as np + +a = np.empty((2, 2)).flat + +a.base +a.copy() +a.coords +a.index +iter(a) +next(a) +a[0] +a[...] +a[:] +a.__array__() + +b = np.array([1]).flat +a[b] + +a[0] = "1" +a[:] = "2" +a[...] = "3" +a[[]] = "4" +a[[0]] = "5" +a[[[0]]] = "6" +a[[[[[0]]]]] = "7" +a[b] = "8" diff --git a/local_packages/numpy/typing/tests/data/pass/fromnumeric.py b/local_packages/numpy/typing/tests/data/pass/fromnumeric.py new file mode 100644 index 0000000..7cc2bcf --- /dev/null +++ b/local_packages/numpy/typing/tests/data/pass/fromnumeric.py @@ -0,0 +1,272 @@ +"""Tests for :mod:`numpy._core.fromnumeric`.""" + +import numpy as np + +A = np.array(True, ndmin=2, dtype=bool) +B = np.array(1.0, ndmin=2, dtype=np.float32) +A.setflags(write=False) +B.setflags(write=False) + +a = np.bool(True) +b = np.float32(1.0) +c = 1.0 +d = np.array(1.0, dtype=np.float32) # writeable + +np.take(a, 0) +np.take(b, 0) +np.take(c, 0) +np.take(A, 0) +np.take(B, 0) +np.take(A, [0]) +np.take(B, [0]) + +np.reshape(a, 1) +np.reshape(b, 1) +np.reshape(c, 1) +np.reshape(A, 1) +np.reshape(B, 1) + +np.choose(a, [True, True]) +np.choose(A, [1.0, 1.0]) + +np.repeat(a, 1) +np.repeat(b, 1) +np.repeat(c, 1) +np.repeat(A, 1) +np.repeat(B, 1) + +np.swapaxes(A, 0, 0) +np.swapaxes(B, 0, 0) + +np.transpose(a) +np.transpose(b) +np.transpose(c) +np.transpose(A) +np.transpose(B) + +np.partition(a, 0, axis=None) +np.partition(b, 0, axis=None) +np.partition(c, 0, axis=None) +np.partition(A, 0) +np.partition(B, 0) + +np.argpartition(a, 0) +np.argpartition(b, 0) +np.argpartition(c, 0) +np.argpartition(A, 0) +np.argpartition(B, 0) + +np.sort(A, 0) +np.sort(B, 0) + +np.argsort(A, 0) +np.argsort(B, 0) + +np.argmax(A) +np.argmax(B) +np.argmax(A, axis=0) +np.argmax(B, axis=0) + +np.argmin(A) +np.argmin(B) +np.argmin(A, axis=0) +np.argmin(B, axis=0) + +np.searchsorted(A[0], 0) +np.searchsorted(B[0], 0) +np.searchsorted(A[0], [0]) +np.searchsorted(B[0], [0]) + +np.resize(a, (5, 5)) +np.resize(b, (5, 5)) +np.resize(c, (5, 5)) +np.resize(A, (5, 5)) +np.resize(B, (5, 5)) + +np.squeeze(a) +np.squeeze(b) +np.squeeze(c) +np.squeeze(A) +np.squeeze(B) + +np.diagonal(A) +np.diagonal(B) + +np.trace(A) +np.trace(B) + +np.ravel(a) +np.ravel(b) +np.ravel(c) +np.ravel(A) +np.ravel(B) + +np.nonzero(A) +np.nonzero(B) + +np.shape(a) +np.shape(b) +np.shape(c) +np.shape(A) +np.shape(B) + +np.compress([True], a) +np.compress([True], b) +np.compress([True], c) +np.compress([True], A) +np.compress([True], B) + +np.clip(a, 0, 1.0) +np.clip(b, -1, 1) +np.clip(a, 0, None) +np.clip(b, None, 1) +np.clip(c, 0, 1) +np.clip(A, 0, 1) +np.clip(B, 0, 1) +np.clip(B, [0, 1], [1, 2]) + +np.sum(a) +np.sum(b) +np.sum(c) +np.sum(A) +np.sum(B) +np.sum(A, axis=0) +np.sum(B, axis=0) + +np.all(a) +np.all(b) +np.all(c) +np.all(A) +np.all(B) +np.all(A, axis=0) +np.all(B, axis=0) +np.all(A, keepdims=True) +np.all(B, keepdims=True) + +np.any(a) +np.any(b) +np.any(c) +np.any(A) +np.any(B) +np.any(A, axis=0) +np.any(B, axis=0) +np.any(A, keepdims=True) +np.any(B, keepdims=True) + +np.cumsum(a) +np.cumsum(b) +np.cumsum(c) +np.cumsum(A) +np.cumsum(B) + +np.cumulative_sum(a) +np.cumulative_sum(b) +np.cumulative_sum(c) +np.cumulative_sum(A, axis=0) +np.cumulative_sum(B, axis=0) + +np.ptp(b) +np.ptp(c) +np.ptp(B) +np.ptp(B, axis=0) +np.ptp(B, keepdims=True) + +np.amax(a) +np.amax(b) +np.amax(c) +np.amax(A) +np.amax(B) +np.amax(A, axis=0) +np.amax(B, axis=0) +np.amax(A, keepdims=True) +np.amax(B, keepdims=True) + +np.amin(a) +np.amin(b) +np.amin(c) +np.amin(A) +np.amin(B) +np.amin(A, axis=0) +np.amin(B, axis=0) +np.amin(A, keepdims=True) +np.amin(B, keepdims=True) + +np.prod(a) +np.prod(b) +np.prod(c) +np.prod(A) +np.prod(B) +np.prod(a, dtype=None) +np.prod(A, dtype=None) +np.prod(A, axis=0) +np.prod(B, axis=0) +np.prod(A, keepdims=True) +np.prod(B, keepdims=True) +np.prod(b, out=d) +np.prod(B, out=d) + +np.cumprod(a) +np.cumprod(b) +np.cumprod(c) +np.cumprod(A) +np.cumprod(B) + +np.cumulative_prod(a) +np.cumulative_prod(b) +np.cumulative_prod(c) +np.cumulative_prod(A, axis=0) +np.cumulative_prod(B, axis=0) + +np.ndim(a) +np.ndim(b) +np.ndim(c) +np.ndim(A) +np.ndim(B) + +np.size(a) +np.size(b) +np.size(c) +np.size(A) +np.size(B) + +np.around(a) +np.around(b) +np.around(c) +np.around(A) +np.around(B) + +np.mean(a) +np.mean(b) +np.mean(c) +np.mean(A) +np.mean(B) +np.mean(A, axis=0) +np.mean(B, axis=0) +np.mean(A, keepdims=True) +np.mean(B, keepdims=True) +np.mean(b, out=d) +np.mean(B, out=d) + +np.std(a) +np.std(b) +np.std(c) +np.std(A) +np.std(B) +np.std(A, axis=0) +np.std(B, axis=0) +np.std(A, keepdims=True) +np.std(B, keepdims=True) +np.std(b, out=d) +np.std(B, out=d) + +np.var(a) +np.var(b) +np.var(c) +np.var(A) +np.var(B) +np.var(A, axis=0) +np.var(B, axis=0) +np.var(A, keepdims=True) +np.var(B, keepdims=True) +np.var(b, out=d) +np.var(B, out=d) diff --git a/local_packages/numpy/typing/tests/data/pass/index_tricks.py b/local_packages/numpy/typing/tests/data/pass/index_tricks.py new file mode 100644 index 0000000..ea98156 --- /dev/null +++ b/local_packages/numpy/typing/tests/data/pass/index_tricks.py @@ -0,0 +1,62 @@ +from __future__ import annotations + +from typing import Any + +import numpy as np + +AR_LIKE_b = [[True, True], [True, True]] +AR_LIKE_i = [[1, 2], [3, 4]] +AR_LIKE_f = [[1.0, 2.0], [3.0, 4.0]] +AR_LIKE_U = [["1", "2"], ["3", "4"]] + +AR_i8: np.ndarray[Any, np.dtype[np.int64]] = np.array(AR_LIKE_i, dtype=np.int64) + +np.ndenumerate(AR_i8) +np.ndenumerate(AR_LIKE_f) +np.ndenumerate(AR_LIKE_U) + +next(np.ndenumerate(AR_i8)) +next(np.ndenumerate(AR_LIKE_f)) +next(np.ndenumerate(AR_LIKE_U)) + +iter(np.ndenumerate(AR_i8)) +iter(np.ndenumerate(AR_LIKE_f)) +iter(np.ndenumerate(AR_LIKE_U)) + +iter(np.ndindex(1, 2, 3)) +next(np.ndindex(1, 2, 3)) + +np.unravel_index([22, 41, 37], (7, 6)) +np.unravel_index([31, 41, 13], (7, 6), order='F') +np.unravel_index(1621, (6, 7, 8, 9)) + +np.ravel_multi_index(AR_LIKE_i, (7, 6)) +np.ravel_multi_index(AR_LIKE_i, (7, 6), order='F') +np.ravel_multi_index(AR_LIKE_i, (4, 6), mode='clip') +np.ravel_multi_index(AR_LIKE_i, (4, 4), mode=('clip', 'wrap')) +np.ravel_multi_index((3, 1, 4, 1), (6, 7, 8, 9)) + +np.mgrid[1:1:2] +np.mgrid[1:1:2, None:10] + +np.ogrid[1:1:2] +np.ogrid[1:1:2, None:10] + +np.index_exp[0:1] +np.index_exp[0:1, None:3] +np.index_exp[0, 0:1, ..., [0, 1, 3]] + +np.s_[0:1] +np.s_[0:1, None:3] +np.s_[0, 0:1, ..., [0, 1, 3]] + +np.ix_(AR_LIKE_b[0]) +np.ix_(AR_LIKE_i[0], AR_LIKE_f[0]) +np.ix_(AR_i8[0]) + +np.fill_diagonal(AR_i8, 5) + +np.diag_indices(4) +np.diag_indices(2, 3) + +np.diag_indices_from(AR_i8) diff --git a/local_packages/numpy/typing/tests/data/pass/lib_user_array.py b/local_packages/numpy/typing/tests/data/pass/lib_user_array.py new file mode 100644 index 0000000..f79dc38 --- /dev/null +++ b/local_packages/numpy/typing/tests/data/pass/lib_user_array.py @@ -0,0 +1,22 @@ +"""Based on the `if __name__ == "__main__"` test code in `lib/_user_array_impl.py`.""" + +from __future__ import annotations + +import numpy as np +from numpy.lib.user_array import container # type: ignore[deprecated] + +N = 10_000 +W = H = int(N**0.5) + +a: np.ndarray[tuple[int, int], np.dtype[np.int32]] +ua: container[tuple[int, int], np.dtype[np.int32]] + +a = np.arange(N, dtype=np.int32).reshape(W, H) +ua = container(a) + +ua_small: container[tuple[int, int], np.dtype[np.int32]] = ua[:3, :5] +ua_small[0, 0] = 10 + +ua_bool: container[tuple[int, int], np.dtype[np.bool]] = ua_small > 1 + +# shape: tuple[int, int] = np.shape(ua) diff --git a/local_packages/numpy/typing/tests/data/pass/lib_utils.py b/local_packages/numpy/typing/tests/data/pass/lib_utils.py new file mode 100644 index 0000000..f9b3381 --- /dev/null +++ b/local_packages/numpy/typing/tests/data/pass/lib_utils.py @@ -0,0 +1,19 @@ +from __future__ import annotations + +from io import StringIO + +import numpy as np +import numpy.lib.array_utils as array_utils + +FILE = StringIO() +AR = np.arange(10, dtype=np.float64) + + +def func(a: int) -> bool: + return True + + +array_utils.byte_bounds(AR) +array_utils.byte_bounds(np.float64()) + +np.info(1, output=FILE) diff --git a/local_packages/numpy/typing/tests/data/pass/lib_version.py b/local_packages/numpy/typing/tests/data/pass/lib_version.py new file mode 100644 index 0000000..f3825ec --- /dev/null +++ b/local_packages/numpy/typing/tests/data/pass/lib_version.py @@ -0,0 +1,18 @@ +from numpy.lib import NumpyVersion + +version = NumpyVersion("1.8.0") + +version.vstring +version.version +version.major +version.minor +version.bugfix +version.pre_release +version.is_devversion + +version == version +version != version +version < "1.8.0" +version <= version +version > version +version >= "1.8.0" diff --git a/local_packages/numpy/typing/tests/data/pass/literal.py b/local_packages/numpy/typing/tests/data/pass/literal.py new file mode 100644 index 0000000..f1e0cb2 --- /dev/null +++ b/local_packages/numpy/typing/tests/data/pass/literal.py @@ -0,0 +1,52 @@ +from __future__ import annotations + +from functools import partial +from typing import TYPE_CHECKING, Any + +import pytest + +import numpy as np + +if TYPE_CHECKING: + from collections.abc import Callable + +AR = np.array(0) +AR.setflags(write=False) + +KACF = frozenset({None, "K", "A", "C", "F"}) +ACF = frozenset({None, "A", "C", "F"}) +CF = frozenset({None, "C", "F"}) + +order_list: list[tuple[frozenset[str | None], Callable[..., Any]]] = [ + (KACF, AR.tobytes), + (KACF, partial(AR.astype, int)), + (KACF, AR.copy), + (ACF, partial(AR.reshape, 1)), + (KACF, AR.flatten), + (KACF, AR.ravel), + (KACF, partial(np.array, 1)), + # NOTE: __call__ is needed due to mypy bugs (#17620, #17631) + (KACF, partial(np.ndarray.__call__, 1)), + (CF, partial(np.zeros.__call__, 1)), + (CF, partial(np.ones.__call__, 1)), + (CF, partial(np.empty.__call__, 1)), + (CF, partial(np.full, 1, 1)), + (KACF, partial(np.zeros_like, AR)), + (KACF, partial(np.ones_like, AR)), + (KACF, partial(np.empty_like, AR)), + (KACF, partial(np.full_like, AR, 1)), + (KACF, partial(np.add.__call__, 1, 1)), # i.e. np.ufunc.__call__ + (ACF, partial(np.reshape, AR, 1)), + (KACF, partial(np.ravel, AR)), + (KACF, partial(np.asarray, 1)), + (KACF, partial(np.asanyarray, 1)), +] + +for order_set, func in order_list: + for order in order_set: + func(order=order) + + invalid_orders = KACF - order_set + for order in invalid_orders: + with pytest.raises(ValueError): + func(order=order) diff --git a/local_packages/numpy/typing/tests/data/pass/ma.py b/local_packages/numpy/typing/tests/data/pass/ma.py new file mode 100644 index 0000000..3ccea66 --- /dev/null +++ b/local_packages/numpy/typing/tests/data/pass/ma.py @@ -0,0 +1,199 @@ +import datetime as dt +from typing import Any, TypeAlias, TypeVar, cast + +import numpy as np +import numpy.typing as npt +from numpy._typing import _Shape + +_ScalarT = TypeVar("_ScalarT", bound=np.generic) +MaskedArray: TypeAlias = np.ma.MaskedArray[_Shape, np.dtype[_ScalarT]] + +# mypy: disable-error-code=no-untyped-call + +MAR_b: MaskedArray[np.bool] = np.ma.MaskedArray([True]) +MAR_u: MaskedArray[np.uint32] = np.ma.MaskedArray([1], dtype=np.uint32) +MAR_i: MaskedArray[np.int64] = np.ma.MaskedArray([1]) +MAR_f: MaskedArray[np.float64] = np.ma.MaskedArray([1.0]) +MAR_c: MaskedArray[np.complex128] = np.ma.MaskedArray([1j]) +MAR_td64: MaskedArray[np.timedelta64] = np.ma.MaskedArray([np.timedelta64(1, "D")]) +MAR_dt64: MaskedArray[np.datetime64] = np.ma.MaskedArray([np.datetime64(1, "D")]) +MAR_S: MaskedArray[np.bytes_] = np.ma.MaskedArray([b'foo'], dtype=np.bytes_) +MAR_U: MaskedArray[np.str_] = np.ma.MaskedArray(['foo'], dtype=np.str_) +MAR_T = cast(np.ma.MaskedArray[Any, np.dtypes.StringDType], + np.ma.MaskedArray(["a"], dtype="T")) +MAR_V: MaskedArray[np.void] = np.ma.MaskedArray( + [(1, 1)], + mask=[(False, False)], + dtype=[('a', int), ('b', int)] +) + +AR_b: npt.NDArray[np.bool] = np.array([True, False, True]) + +AR_LIKE_b = [True] +AR_LIKE_u = [np.uint32(1)] +AR_LIKE_i = [1] +AR_LIKE_f = [1.0] +AR_LIKE_c = [1j] +AR_LIKE_m = [np.timedelta64(1, "D")] +AR_LIKE_M = [np.datetime64(1, "D")] + +MAR_f.mask = AR_b +MAR_f.mask = np.False_ + +MAR_i.fill_value = 0 + +MAR_b.flat[MAR_i > 0] = False +MAR_i.flat[:] = 1 +MAR_f.flat[[0]] = AR_LIKE_f +MAR_c.flat[[0, 0]] = [3, 4 + 3j] +MAR_td64.flat[0] = dt.timedelta(1) +MAR_dt64.flat[0] = dt.datetime(2020, 1, 1) + +MAR_b[MAR_i > 0] = False +MAR_i[:] = 1 +MAR_f[[0]] = AR_LIKE_f +MAR_c[[0, 0]] = [3, 4 + 3j] +MAR_td64[0] = dt.timedelta(1) +MAR_dt64[0] = dt.datetime(2020, 1, 1) +MAR_V['a'] = [2] + +# Inplace addition + +MAR_b += AR_LIKE_b + +MAR_u += AR_LIKE_b +MAR_u += AR_LIKE_u + +MAR_i += AR_LIKE_b +MAR_i += 2 +MAR_i += AR_LIKE_i + +MAR_f += AR_LIKE_b +MAR_f += 2 +MAR_f += AR_LIKE_u +MAR_f += AR_LIKE_i +MAR_f += AR_LIKE_f + +MAR_c += AR_LIKE_b +MAR_c += AR_LIKE_u +MAR_c += AR_LIKE_i +MAR_c += AR_LIKE_f +MAR_c += AR_LIKE_c + +MAR_td64 += AR_LIKE_b +MAR_td64 += AR_LIKE_u +MAR_td64 += AR_LIKE_i +MAR_td64 += AR_LIKE_m +MAR_dt64 += AR_LIKE_b +MAR_dt64 += AR_LIKE_u +MAR_dt64 += AR_LIKE_i +MAR_dt64 += AR_LIKE_m + +MAR_S += b'snakes' +MAR_U += 'snakes' +MAR_T += 'snakes' + +# Inplace subtraction + +MAR_u -= AR_LIKE_b +MAR_u -= AR_LIKE_u + +MAR_i -= AR_LIKE_b +MAR_i -= AR_LIKE_i + +MAR_f -= AR_LIKE_b +MAR_f -= AR_LIKE_u +MAR_f -= AR_LIKE_i +MAR_f -= AR_LIKE_f + +MAR_c -= AR_LIKE_b +MAR_c -= AR_LIKE_u +MAR_c -= AR_LIKE_i +MAR_c -= AR_LIKE_f +MAR_c -= AR_LIKE_c + +MAR_td64 -= AR_LIKE_b +MAR_td64 -= AR_LIKE_u +MAR_td64 -= AR_LIKE_i +MAR_td64 -= AR_LIKE_m +MAR_dt64 -= AR_LIKE_b +MAR_dt64 -= AR_LIKE_u +MAR_dt64 -= AR_LIKE_i +MAR_dt64 -= AR_LIKE_m + +# Inplace floor division + +MAR_f //= AR_LIKE_b +MAR_f //= 2 +MAR_f //= AR_LIKE_u +MAR_f //= AR_LIKE_i +MAR_f //= AR_LIKE_f + +MAR_td64 //= AR_LIKE_i + +# Inplace true division + +MAR_f /= AR_LIKE_b +MAR_f /= 2 +MAR_f /= AR_LIKE_u +MAR_f /= AR_LIKE_i +MAR_f /= AR_LIKE_f + +MAR_c /= AR_LIKE_b +MAR_c /= AR_LIKE_u +MAR_c /= AR_LIKE_i +MAR_c /= AR_LIKE_f +MAR_c /= AR_LIKE_c + +MAR_td64 /= AR_LIKE_i + +# Inplace multiplication + +MAR_b *= AR_LIKE_b + +MAR_u *= AR_LIKE_b +MAR_u *= AR_LIKE_u + +MAR_i *= AR_LIKE_b +MAR_i *= 2 +MAR_i *= AR_LIKE_i + +MAR_f *= AR_LIKE_b +MAR_f *= 2 +MAR_f *= AR_LIKE_u +MAR_f *= AR_LIKE_i +MAR_f *= AR_LIKE_f + +MAR_c *= AR_LIKE_b +MAR_c *= AR_LIKE_u +MAR_c *= AR_LIKE_i +MAR_c *= AR_LIKE_f +MAR_c *= AR_LIKE_c + +MAR_td64 *= AR_LIKE_b +MAR_td64 *= AR_LIKE_u +MAR_td64 *= AR_LIKE_i +MAR_td64 *= AR_LIKE_f + +MAR_S *= 2 +MAR_U *= 2 +MAR_T *= 2 + +# Inplace power + +MAR_u **= AR_LIKE_b +MAR_u **= AR_LIKE_u + +MAR_i **= AR_LIKE_b +MAR_i **= AR_LIKE_i + +MAR_f **= AR_LIKE_b +MAR_f **= AR_LIKE_u +MAR_f **= AR_LIKE_i +MAR_f **= AR_LIKE_f + +MAR_c **= AR_LIKE_b +MAR_c **= AR_LIKE_u +MAR_c **= AR_LIKE_i +MAR_c **= AR_LIKE_f +MAR_c **= AR_LIKE_c diff --git a/local_packages/numpy/typing/tests/data/pass/mod.py b/local_packages/numpy/typing/tests/data/pass/mod.py new file mode 100644 index 0000000..4643264 --- /dev/null +++ b/local_packages/numpy/typing/tests/data/pass/mod.py @@ -0,0 +1,149 @@ +import numpy as np + +f8 = np.float64(1) +i8 = np.int64(1) +u8 = np.uint64(1) + +f4 = np.float32(1) +i4 = np.int32(1) +u4 = np.uint32(1) + +td = np.timedelta64(1, "D") +b_ = np.bool(1) + +b = bool(1) +f = float(1) +i = 1 + +AR = np.array([1], dtype=np.bool) +AR.setflags(write=False) + +AR2 = np.array([1], dtype=np.timedelta64) +AR2.setflags(write=False) + +# Time structures + +td % td +td % AR2 +AR2 % td + +divmod(td, td) +divmod(td, AR2) +divmod(AR2, td) + +# Bool + +b_ % b +b_ % i +b_ % f +b_ % b_ +b_ % i8 +b_ % u8 +b_ % f8 +b_ % AR + +divmod(b_, b) +divmod(b_, i) +divmod(b_, f) +divmod(b_, b_) +divmod(b_, i8) +divmod(b_, u8) +divmod(b_, f8) +divmod(b_, AR) + +b % b_ +i % b_ +f % b_ +b_ % b_ +i8 % b_ +u8 % b_ +f8 % b_ +AR % b_ + +divmod(b, b_) +divmod(i, b_) +divmod(f, b_) +divmod(b_, b_) +divmod(i8, b_) +divmod(u8, b_) +divmod(f8, b_) +divmod(AR, b_) + +# int + +i8 % b +i8 % i +i8 % f +i8 % i8 +i8 % f8 +i4 % i8 +i4 % f8 +i4 % i4 +i4 % f4 +i8 % AR + +divmod(i8, b) +divmod(i8, i) +divmod(i8, f) +divmod(i8, i8) +divmod(i8, f8) +divmod(i8, i4) +divmod(i8, f4) +divmod(i4, i4) +divmod(i4, f4) +divmod(i8, AR) + +b % i8 +i % i8 +f % i8 +i8 % i8 +f8 % i8 +i8 % i4 +f8 % i4 +i4 % i4 +f4 % i4 +AR % i8 + +divmod(b, i8) +divmod(i, i8) +divmod(f, i8) +divmod(i8, i8) +divmod(f8, i8) +divmod(i4, i8) +divmod(f4, i8) +divmod(i4, i4) +divmod(f4, i4) +divmod(AR, i8) + +# float + +f8 % b +f8 % i +f8 % f +i8 % f4 +f4 % f4 +f8 % AR + +divmod(f8, b) +divmod(f8, i) +divmod(f8, f) +divmod(f8, f8) +divmod(f8, f4) +divmod(f4, f4) +divmod(f8, AR) + +b % f8 +i % f8 +f % f8 +f8 % f8 +f8 % f8 +f4 % f4 +AR % f8 + +divmod(b, f8) +divmod(i, f8) +divmod(f, f8) +divmod(f8, f8) +divmod(f4, f8) +divmod(f4, f4) +divmod(AR, f8) diff --git a/local_packages/numpy/typing/tests/data/pass/modules.py b/local_packages/numpy/typing/tests/data/pass/modules.py new file mode 100644 index 0000000..0c2fd4b --- /dev/null +++ b/local_packages/numpy/typing/tests/data/pass/modules.py @@ -0,0 +1,45 @@ +import numpy as np +from numpy import f2py + +np.char +np.ctypeslib +np.emath +np.fft +np.lib +np.linalg +np.ma +np.matrixlib +np.polynomial +np.random +np.rec +np.strings +np.testing +np.version + +np.lib.format +np.lib.mixins +np.lib.scimath +np.lib.stride_tricks +np.lib.array_utils +np.ma.extras +np.polynomial.chebyshev +np.polynomial.hermite +np.polynomial.hermite_e +np.polynomial.laguerre +np.polynomial.legendre +np.polynomial.polynomial + +np.__path__ +np.__version__ + +np.__all__ +np.char.__all__ +np.ctypeslib.__all__ +np.emath.__all__ +np.lib.__all__ +np.ma.__all__ +np.random.__all__ +np.rec.__all__ +np.strings.__all__ +np.testing.__all__ +f2py.__all__ diff --git a/local_packages/numpy/typing/tests/data/pass/multiarray.py b/local_packages/numpy/typing/tests/data/pass/multiarray.py new file mode 100644 index 0000000..3a50559 --- /dev/null +++ b/local_packages/numpy/typing/tests/data/pass/multiarray.py @@ -0,0 +1,77 @@ +import numpy as np +import numpy.typing as npt + +AR_f8: npt.NDArray[np.float64] = np.array([1.0]) +AR_i4 = np.array([1], dtype=np.int32) +AR_u1 = np.array([1], dtype=np.uint8) + +AR_LIKE_f = [1.5] +AR_LIKE_i = [1] + +b_f8 = np.broadcast(AR_f8) +b_i4_f8_f8 = np.broadcast(AR_i4, AR_f8, AR_f8) + +next(b_f8) +b_f8.reset() +b_f8.index +b_f8.iters +b_f8.nd +b_f8.ndim +b_f8.numiter +b_f8.shape +b_f8.size + +next(b_i4_f8_f8) +b_i4_f8_f8.reset() +b_i4_f8_f8.ndim +b_i4_f8_f8.index +b_i4_f8_f8.iters +b_i4_f8_f8.nd +b_i4_f8_f8.numiter +b_i4_f8_f8.shape +b_i4_f8_f8.size + +np.inner(AR_f8, AR_i4) + +np.where([True, True, False]) +np.where([True, True, False], 1, 0) + +np.lexsort([0, 1, 2]) + +np.can_cast(np.dtype("i8"), int) +np.can_cast(AR_f8, "f8") +np.can_cast(AR_f8, np.complex128, casting="unsafe") + +np.min_scalar_type([1]) +np.min_scalar_type(AR_f8) + +np.result_type(int, AR_i4) +np.result_type(AR_f8, AR_u1) +np.result_type(AR_f8, np.complex128) + +np.dot(AR_LIKE_f, AR_i4) +np.dot(AR_u1, 1) +np.dot(1.5j, 1) +np.dot(AR_u1, 1, out=AR_f8) + +np.vdot(AR_LIKE_f, AR_i4) +np.vdot(AR_u1, 1) +np.vdot(1.5j, 1) + +np.bincount(AR_i4) + +np.copyto(AR_f8, [1.6]) + +np.putmask(AR_f8, [True], 1.5) + +np.packbits(AR_i4) +np.packbits(AR_u1) + +np.unpackbits(AR_u1) + +np.shares_memory(1, 2) +np.shares_memory(AR_f8, AR_f8, max_work=-1) + +np.may_share_memory(1, 2) +np.may_share_memory(AR_f8, AR_f8, max_work=0) +np.may_share_memory(AR_f8, AR_f8, max_work=-1) diff --git a/local_packages/numpy/typing/tests/data/pass/ndarray_conversion.py b/local_packages/numpy/typing/tests/data/pass/ndarray_conversion.py new file mode 100644 index 0000000..a4e0bcf --- /dev/null +++ b/local_packages/numpy/typing/tests/data/pass/ndarray_conversion.py @@ -0,0 +1,81 @@ +import os +import tempfile + +import numpy as np + +nd = np.array([[1, 2], [3, 4]]) +scalar_array = np.array(1) + +# item +scalar_array.item() +nd.item(1) +nd.item(0, 1) +nd.item((0, 1)) + +# tobytes +nd.tobytes() +nd.tobytes("C") +nd.tobytes(None) + +# tofile +if os.name != "nt": + with tempfile.NamedTemporaryFile(suffix=".txt") as tmp: + nd.tofile(tmp.name) + nd.tofile(tmp.name, "") + nd.tofile(tmp.name, sep="") + + nd.tofile(tmp.name, "", "%s") + nd.tofile(tmp.name, format="%s") + + nd.tofile(tmp) + +# dump is pretty simple +# dumps is pretty simple + +# astype +nd.astype("float") +nd.astype(float) + +nd.astype(float, "K") +nd.astype(float, order="K") + +nd.astype(float, "K", "unsafe") +nd.astype(float, casting="unsafe") + +nd.astype(float, "K", "unsafe", True) +nd.astype(float, subok=True) + +nd.astype(float, "K", "unsafe", True, True) +nd.astype(float, copy=True) + +# byteswap +nd.byteswap() +nd.byteswap(True) + +# copy +nd.copy() +nd.copy("C") + +# view +nd.view() +nd.view(np.int64) +nd.view(dtype=np.int64) +nd.view(np.int64, np.matrix) +nd.view(type=np.matrix) + +# getfield +complex_array = np.array([[1 + 1j, 0], [0, 1 - 1j]], dtype=np.complex128) + +complex_array.getfield("float") +complex_array.getfield(float) + +complex_array.getfield("float", 8) +complex_array.getfield(float, offset=8) + +# setflags +nd.setflags() +nd.setflags(write=True) +nd.setflags(write=True, align=True) +nd.setflags(write=True, align=True, uic=False) + +# fill is pretty simple diff --git a/local_packages/numpy/typing/tests/data/pass/ndarray_misc.py b/local_packages/numpy/typing/tests/data/pass/ndarray_misc.py new file mode 100644 index 0000000..3437338 --- /dev/null +++ b/local_packages/numpy/typing/tests/data/pass/ndarray_misc.py @@ -0,0 +1,199 @@ +""" +Tests for miscellaneous (non-magic) ``np.ndarray``/``np.generic`` methods. + +More extensive tests are performed for the methods' +function-based counterpart in `../from_numeric.py`. + +""" + +from __future__ import annotations + +import operator +from collections.abc import Hashable +from typing import Any, cast + +import numpy as np +import numpy.typing as npt + + +class SubClass(npt.NDArray[np.float64]): ... +class IntSubClass(npt.NDArray[np.intp]): ... + + +i4 = np.int32(1) +A: np.ndarray[Any, np.dtype[np.int32]] = np.array([[1]], dtype=np.int32) +B0 = np.empty((), dtype=np.int32).view(SubClass) +B1 = np.empty((1,), dtype=np.int32).view(SubClass) +B2 = np.empty((1, 1), dtype=np.int32).view(SubClass) +B_int0: IntSubClass = np.empty((), dtype=np.intp).view(IntSubClass) +C: np.ndarray[Any, np.dtype[np.int32]] = np.array([0, 1, 2], dtype=np.int32) +D = np.ones(3).view(SubClass) + +ctypes_obj = A.ctypes + +i4.all() +A.all() +A.all(axis=0) +A.all(keepdims=True) +A.all(out=B0) + +i4.any() +A.any() +A.any(axis=0) +A.any(keepdims=True) +A.any(out=B0) + +i4.argmax() +A.argmax() +A.argmax(axis=0) +A.argmax(out=B_int0) + +i4.argmin() +A.argmin() +A.argmin(axis=0) +A.argmin(out=B_int0) + +i4.argsort() +i4.argsort(stable=True) +A.argsort() +A.argsort(stable=True) + +A.sort() +A.sort(stable=True) + +i4.choose([()]) +_choices = np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8]], dtype=np.int32) +C.choose(_choices) +C.choose(_choices, out=D) + +i4.clip(1) +A.clip(1) +A.clip(None, 1) +A.clip(1, out=B2) +A.clip(None, 1, out=B2) + +i4.compress([1]) +A.compress([1]) +A.compress([1], out=B1) + +i4.conj() +A.conj() +B0.conj() + +i4.conjugate() +A.conjugate() +B0.conjugate() + +i4.cumprod() +A.cumprod() +A.cumprod(out=B1) + +i4.cumsum() +A.cumsum() +A.cumsum(out=B1) + +i4.max() +A.max() +A.max(axis=0) +A.max(keepdims=True) +A.max(out=B0) + +i4.mean() +A.mean() +A.mean(axis=0) +A.mean(keepdims=True) +A.mean(out=B0) + +i4.min() +A.min() +A.min(axis=0) +A.min(keepdims=True) +A.min(out=B0) + +i4.prod() +A.prod() +A.prod(axis=0) +A.prod(keepdims=True) +A.prod(out=B0) + +i4.round() +A.round() +A.round(out=B2) + +i4.repeat(1) +A.repeat(1) +B0.repeat(1) + +i4.std() +A.std() +A.std(axis=0) +A.std(keepdims=True, mean=0.) +A.std(out=B0.astype(np.float64)) + +i4.sum() +A.sum() +A.sum(axis=0) +A.sum(keepdims=True) +A.sum(out=B0) + +i4.take(0) +A.take(0) +A.take([0]) +A.take(0, out=B0) +A.take([0], out=B1) + +i4.var() +A.var() +A.var(axis=0) +A.var(keepdims=True, mean=0.) +A.var(out=B0) + +A.argpartition([0]) + +A.diagonal() + +A.dot(1) +A.dot(1, out=B2) + +A.nonzero() + +C.searchsorted(1) + +A.trace() +A.trace(out=B0) + +void = cast(np.void, np.array(1, dtype=[("f", np.float64)]).take(0)) +void.setfield(10, np.float64) + +A.item(0) +C.item(0) + +A.ravel() +C.ravel() + +A.flatten() +C.flatten() + +A.reshape(1) +C.reshape(3) + +int(np.array(1.0, dtype=np.float64)) +int(np.array("1", dtype=np.str_)) + +float(np.array(1.0, dtype=np.float64)) +float(np.array("1", dtype=np.str_)) + +complex(np.array(1.0, dtype=np.float64)) + +operator.index(np.array(1, dtype=np.int64)) + +# this fails on numpy 2.2.1 +# https://github.com/scipy/scipy/blob/a755ee77ec47a64849abe42c349936475a6c2f24/scipy/io/arff/tests/test_arffread.py#L41-L44 +A_float = np.array([[1, 5], [2, 4], [np.nan, np.nan]]) +A_void: npt.NDArray[np.void] = np.empty(3, [("yop", float), ("yap", float)]) +A_void["yop"] = A_float[:, 0] +A_void["yap"] = A_float[:, 1] + +# regression test for https://github.com/numpy/numpy/issues/30445 +def f(x: np.generic) -> Hashable: + return x diff --git a/local_packages/numpy/typing/tests/data/pass/ndarray_shape_manipulation.py b/local_packages/numpy/typing/tests/data/pass/ndarray_shape_manipulation.py new file mode 100644 index 0000000..0ca3dff --- /dev/null +++ b/local_packages/numpy/typing/tests/data/pass/ndarray_shape_manipulation.py @@ -0,0 +1,47 @@ +import numpy as np + +nd1 = np.array([[1, 2], [3, 4]]) + +# reshape +nd1.reshape(4) +nd1.reshape(2, 2) +nd1.reshape((2, 2)) + +nd1.reshape((2, 2), order="C") +nd1.reshape(4, order="C") + +# resize +nd1.resize() +nd1.resize(4) +nd1.resize(2, 2) +nd1.resize((2, 2)) + +nd1.resize((2, 2), refcheck=True) +nd1.resize(4, refcheck=True) + +nd2 = np.array([[1, 2], [3, 4]]) + +# transpose +nd2.transpose() +nd2.transpose(1, 0) +nd2.transpose((1, 0)) + +# swapaxes +nd2.swapaxes(0, 1) + +# flatten +nd2.flatten() +nd2.flatten("C") + +# ravel +nd2.ravel() +nd2.ravel("C") + +# squeeze +nd2.squeeze() + +nd3 = np.array([[1, 2]]) +nd3.squeeze(0) + +nd4 = np.array([[[1, 2]]]) +nd4.squeeze((0, 1)) diff --git a/local_packages/numpy/typing/tests/data/pass/nditer.py b/local_packages/numpy/typing/tests/data/pass/nditer.py new file mode 100644 index 0000000..25a5b44 --- /dev/null +++ b/local_packages/numpy/typing/tests/data/pass/nditer.py @@ -0,0 +1,4 @@ +import numpy as np + +arr = np.array([1]) +np.nditer([arr, None]) diff --git a/local_packages/numpy/typing/tests/data/pass/numeric.py b/local_packages/numpy/typing/tests/data/pass/numeric.py new file mode 100644 index 0000000..825a6dd --- /dev/null +++ b/local_packages/numpy/typing/tests/data/pass/numeric.py @@ -0,0 +1,90 @@ +""" +Tests for :mod:`numpy._core.numeric`. + +Does not include tests which fall under ``array_constructors``. +""" + +from typing import Any + +import numpy as np + + +class SubClass(np.ndarray[tuple[Any, ...], np.dtype[np.float64]]): ... + + +i8 = np.int64(1) + +A = np.arange(27).reshape(3, 3, 3) +B = A.tolist() +C = np.empty((27, 27)).view(SubClass) + +np.count_nonzero(i8) +np.count_nonzero(A) +np.count_nonzero(B) +np.count_nonzero(A, keepdims=True) +np.count_nonzero(A, axis=0) + +np.isfortran(i8) +np.isfortran(A) + +np.argwhere(i8) +np.argwhere(A) + +np.flatnonzero(i8) +np.flatnonzero(A) + +np.correlate(B[0][0], A.ravel(), mode="valid") +np.correlate(A.ravel(), A.ravel(), mode="same") + +np.convolve(B[0][0], A.ravel(), mode="valid") +np.convolve(A.ravel(), A.ravel(), mode="same") + +np.outer(i8, A) +np.outer(B, A) +np.outer(A, A) +np.outer(A, A, out=C) + +np.tensordot(B, A) +np.tensordot(A, A) +np.tensordot(A, A, axes=0) +np.tensordot(A, A, axes=(0, 1)) + +np.isscalar(i8) +np.isscalar(A) +np.isscalar(B) + +np.roll(A, 1) +np.roll(A, (1, 2)) +np.roll(B, 1) + +np.rollaxis(A, 0, 1) + +np.moveaxis(A, 0, 1) +np.moveaxis(A, (0, 1), (1, 2)) + +np.cross(B, A) +np.cross(A, A) + +np.indices([0, 1, 2]) +np.indices([0, 1, 2], sparse=False) +np.indices([0, 1, 2], sparse=True) + +np.binary_repr(1) + +np.base_repr(1) + +np.allclose(i8, A) +np.allclose(B, A) +np.allclose(A, A) + +np.isclose(i8, A) +np.isclose(B, A) +np.isclose(A, A) + +np.array_equal(i8, A) +np.array_equal(B, A) +np.array_equal(A, A) + +np.array_equiv(i8, A) +np.array_equiv(B, A) +np.array_equiv(A, A) diff --git a/local_packages/numpy/typing/tests/data/pass/numerictypes.py b/local_packages/numpy/typing/tests/data/pass/numerictypes.py new file mode 100644 index 0000000..24e1a99 --- /dev/null +++ b/local_packages/numpy/typing/tests/data/pass/numerictypes.py @@ -0,0 +1,17 @@ +import numpy as np + +np.isdtype(np.float64, (np.int64, np.float64)) +np.isdtype(np.int64, "signed integer") + +np.issubdtype("S1", np.bytes_) +np.issubdtype(np.float64, np.float32) + +np.ScalarType +np.ScalarType[0] +np.ScalarType[3] +np.ScalarType[8] +np.ScalarType[10] + +np.typecodes["Character"] +np.typecodes["Complex"] +np.typecodes["All"] diff --git a/local_packages/numpy/typing/tests/data/pass/random.py b/local_packages/numpy/typing/tests/data/pass/random.py new file mode 100644 index 0000000..fd07e37 --- /dev/null +++ b/local_packages/numpy/typing/tests/data/pass/random.py @@ -0,0 +1,1498 @@ +from __future__ import annotations + +from typing import Any + +import numpy as np + +SEED_NONE = None +SEED_INT = 4579435749574957634658964293569 +SEED_ARR: np.ndarray[Any, np.dtype[np.int64]] = np.array([1, 2, 3, 4], dtype=np.int64) +SEED_ARRLIKE: list[int] = [1, 2, 3, 4] +SEED_SEED_SEQ: np.random.SeedSequence = np.random.SeedSequence(0) +SEED_MT19937: np.random.MT19937 = np.random.MT19937(0) +SEED_PCG64: np.random.PCG64 = np.random.PCG64(0) +SEED_PHILOX: np.random.Philox = np.random.Philox(0) +SEED_SFC64: np.random.SFC64 = np.random.SFC64(0) + +# default rng +np.random.default_rng() +np.random.default_rng(SEED_NONE) +np.random.default_rng(SEED_INT) +np.random.default_rng(SEED_ARR) +np.random.default_rng(SEED_ARRLIKE) +np.random.default_rng(SEED_SEED_SEQ) +np.random.default_rng(SEED_MT19937) +np.random.default_rng(SEED_PCG64) +np.random.default_rng(SEED_PHILOX) +np.random.default_rng(SEED_SFC64) + +# Seed Sequence +np.random.SeedSequence(SEED_NONE) +np.random.SeedSequence(SEED_INT) +np.random.SeedSequence(SEED_ARR) +np.random.SeedSequence(SEED_ARRLIKE) + +# Bit Generators +np.random.MT19937(SEED_NONE) +np.random.MT19937(SEED_INT) +np.random.MT19937(SEED_ARR) +np.random.MT19937(SEED_ARRLIKE) +np.random.MT19937(SEED_SEED_SEQ) + +np.random.PCG64(SEED_NONE) +np.random.PCG64(SEED_INT) +np.random.PCG64(SEED_ARR) +np.random.PCG64(SEED_ARRLIKE) +np.random.PCG64(SEED_SEED_SEQ) + +np.random.Philox(SEED_NONE) +np.random.Philox(SEED_INT) +np.random.Philox(SEED_ARR) +np.random.Philox(SEED_ARRLIKE) +np.random.Philox(SEED_SEED_SEQ) + +np.random.SFC64(SEED_NONE) +np.random.SFC64(SEED_INT) +np.random.SFC64(SEED_ARR) +np.random.SFC64(SEED_ARRLIKE) +np.random.SFC64(SEED_SEED_SEQ) + +seed_seq: np.random.bit_generator.SeedSequence = np.random.SeedSequence(SEED_NONE) +seed_seq.spawn(10) +seed_seq.generate_state(3) +seed_seq.generate_state(3, "u4") +seed_seq.generate_state(3, "uint32") +seed_seq.generate_state(3, "u8") +seed_seq.generate_state(3, "uint64") +seed_seq.generate_state(3, np.uint32) +seed_seq.generate_state(3, np.uint64) + + +def_gen: np.random.Generator = np.random.default_rng() + +D_arr_0p1: np.ndarray[Any, np.dtype[np.float64]] = np.array([0.1]) +D_arr_0p5: np.ndarray[Any, np.dtype[np.float64]] = np.array([0.5]) +D_arr_0p9: np.ndarray[Any, np.dtype[np.float64]] = np.array([0.9]) +D_arr_1p5: np.ndarray[Any, np.dtype[np.float64]] = np.array([1.5]) +I_arr_10: np.ndarray[Any, np.dtype[np.int_]] = np.array([10], dtype=np.int_) +I_arr_20: np.ndarray[Any, np.dtype[np.int_]] = np.array([20], dtype=np.int_) +D_arr_like_0p1: list[float] = [0.1] +D_arr_like_0p5: list[float] = [0.5] +D_arr_like_0p9: list[float] = [0.9] +D_arr_like_1p5: list[float] = [1.5] +I_arr_like_10: list[int] = [10] +I_arr_like_20: list[int] = [20] +D_2D_like: list[list[float]] = [[1, 2], [2, 3], [3, 4], [4, 5.1]] +D_2D: np.ndarray[Any, np.dtype[np.float64]] = np.array(D_2D_like) + +S_out: np.ndarray[Any, np.dtype[np.float32]] = np.empty(1, dtype=np.float32) +D_out: np.ndarray[Any, np.dtype[np.float64]] = np.empty(1) + +def_gen.standard_normal() +def_gen.standard_normal(dtype=np.float32) +def_gen.standard_normal(dtype="float32") +def_gen.standard_normal(dtype="double") +def_gen.standard_normal(dtype=np.float64) +def_gen.standard_normal(size=None) +def_gen.standard_normal(size=1) +def_gen.standard_normal(size=1, dtype=np.float32) +def_gen.standard_normal(size=1, dtype="f4") +def_gen.standard_normal(size=1, dtype="float32", out=S_out) +def_gen.standard_normal(dtype=np.float32, out=S_out) +def_gen.standard_normal(size=1, dtype=np.float64) +def_gen.standard_normal(size=1, dtype="float64") +def_gen.standard_normal(size=1, dtype="f8") +def_gen.standard_normal(out=D_out) +def_gen.standard_normal(size=1, dtype="float64") +def_gen.standard_normal(size=1, dtype="float64", out=D_out) + +def_gen.random() +def_gen.random(dtype=np.float32) +def_gen.random(dtype="float32") +def_gen.random(dtype="double") +def_gen.random(dtype=np.float64) +def_gen.random(size=None) +def_gen.random(size=1) +def_gen.random(size=1, dtype=np.float32) +def_gen.random(size=1, dtype="f4") +def_gen.random(size=1, dtype="float32", out=S_out) +def_gen.random(dtype=np.float32, out=S_out) +def_gen.random(size=1, dtype=np.float64) +def_gen.random(size=1, dtype="float64") +def_gen.random(size=1, dtype="f8") +def_gen.random(out=D_out) +def_gen.random(size=1, dtype="float64") +def_gen.random(size=1, dtype="float64", out=D_out) + +def_gen.standard_cauchy() +def_gen.standard_cauchy(size=None) +def_gen.standard_cauchy(size=1) + +def_gen.standard_exponential() +def_gen.standard_exponential(method="inv") +def_gen.standard_exponential(dtype=np.float32) +def_gen.standard_exponential(dtype="float32") +def_gen.standard_exponential(dtype="double") +def_gen.standard_exponential(dtype=np.float64) +def_gen.standard_exponential(size=None) +def_gen.standard_exponential(size=None, method="inv") +def_gen.standard_exponential(size=1, method="inv") +def_gen.standard_exponential(size=1, dtype=np.float32) +def_gen.standard_exponential(size=1, dtype="f4", method="inv") +def_gen.standard_exponential(size=1, dtype="float32", out=S_out) +def_gen.standard_exponential(dtype=np.float32, out=S_out) +def_gen.standard_exponential(size=1, dtype=np.float64, method="inv") +def_gen.standard_exponential(size=1, dtype="float64") +def_gen.standard_exponential(size=1, dtype="f8") +def_gen.standard_exponential(out=D_out) +def_gen.standard_exponential(size=1, dtype="float64") +def_gen.standard_exponential(size=1, dtype="float64", out=D_out) + +def_gen.zipf(1.5) +def_gen.zipf(1.5, size=None) +def_gen.zipf(1.5, size=1) +def_gen.zipf(D_arr_1p5) +def_gen.zipf(D_arr_1p5, size=1) +def_gen.zipf(D_arr_like_1p5) +def_gen.zipf(D_arr_like_1p5, size=1) + +def_gen.weibull(0.5) +def_gen.weibull(0.5, size=None) +def_gen.weibull(0.5, size=1) +def_gen.weibull(D_arr_0p5) +def_gen.weibull(D_arr_0p5, size=1) +def_gen.weibull(D_arr_like_0p5) +def_gen.weibull(D_arr_like_0p5, size=1) + +def_gen.standard_t(0.5) +def_gen.standard_t(0.5, size=None) +def_gen.standard_t(0.5, size=1) +def_gen.standard_t(D_arr_0p5) +def_gen.standard_t(D_arr_0p5, size=1) +def_gen.standard_t(D_arr_like_0p5) +def_gen.standard_t(D_arr_like_0p5, size=1) + +def_gen.poisson(0.5) +def_gen.poisson(0.5, size=None) +def_gen.poisson(0.5, size=1) +def_gen.poisson(D_arr_0p5) +def_gen.poisson(D_arr_0p5, size=1) +def_gen.poisson(D_arr_like_0p5) +def_gen.poisson(D_arr_like_0p5, size=1) + +def_gen.power(0.5) +def_gen.power(0.5, size=None) +def_gen.power(0.5, size=1) +def_gen.power(D_arr_0p5) +def_gen.power(D_arr_0p5, size=1) +def_gen.power(D_arr_like_0p5) +def_gen.power(D_arr_like_0p5, size=1) + +def_gen.pareto(0.5) +def_gen.pareto(0.5, size=None) +def_gen.pareto(0.5, size=1) +def_gen.pareto(D_arr_0p5) +def_gen.pareto(D_arr_0p5, size=1) +def_gen.pareto(D_arr_like_0p5) +def_gen.pareto(D_arr_like_0p5, size=1) + +def_gen.chisquare(0.5) +def_gen.chisquare(0.5, size=None) +def_gen.chisquare(0.5, size=1) +def_gen.chisquare(D_arr_0p5) +def_gen.chisquare(D_arr_0p5, size=1) +def_gen.chisquare(D_arr_like_0p5) +def_gen.chisquare(D_arr_like_0p5, size=1) + +def_gen.exponential(0.5) +def_gen.exponential(0.5, size=None) +def_gen.exponential(0.5, size=1) +def_gen.exponential(D_arr_0p5) +def_gen.exponential(D_arr_0p5, size=1) +def_gen.exponential(D_arr_like_0p5) +def_gen.exponential(D_arr_like_0p5, size=1) + +def_gen.geometric(0.5) +def_gen.geometric(0.5, size=None) +def_gen.geometric(0.5, size=1) +def_gen.geometric(D_arr_0p5) +def_gen.geometric(D_arr_0p5, size=1) +def_gen.geometric(D_arr_like_0p5) +def_gen.geometric(D_arr_like_0p5, size=1) + +def_gen.logseries(0.5) +def_gen.logseries(0.5, size=None) +def_gen.logseries(0.5, size=1) +def_gen.logseries(D_arr_0p5) +def_gen.logseries(D_arr_0p5, size=1) +def_gen.logseries(D_arr_like_0p5) +def_gen.logseries(D_arr_like_0p5, size=1) + +def_gen.rayleigh(0.5) +def_gen.rayleigh(0.5, size=None) +def_gen.rayleigh(0.5, size=1) +def_gen.rayleigh(D_arr_0p5) +def_gen.rayleigh(D_arr_0p5, size=1) +def_gen.rayleigh(D_arr_like_0p5) +def_gen.rayleigh(D_arr_like_0p5, size=1) + +def_gen.standard_gamma(0.5) +def_gen.standard_gamma(0.5, size=None) +def_gen.standard_gamma(0.5, dtype="float32") +def_gen.standard_gamma(0.5, size=None, dtype="float32") +def_gen.standard_gamma(0.5, size=1) +def_gen.standard_gamma(D_arr_0p5) +def_gen.standard_gamma(D_arr_0p5, dtype="f4") +def_gen.standard_gamma(0.5, size=1, dtype="float32", out=S_out) +def_gen.standard_gamma(D_arr_0p5, dtype=np.float32, out=S_out) +def_gen.standard_gamma(D_arr_0p5, size=1) +def_gen.standard_gamma(D_arr_like_0p5) +def_gen.standard_gamma(D_arr_like_0p5, size=1) +def_gen.standard_gamma(0.5, out=D_out) +def_gen.standard_gamma(D_arr_like_0p5, out=D_out) +def_gen.standard_gamma(D_arr_like_0p5, size=1) +def_gen.standard_gamma(D_arr_like_0p5, size=1, out=D_out, dtype=np.float64) + +def_gen.vonmises(0.5, 0.5) +def_gen.vonmises(0.5, 0.5, size=None) +def_gen.vonmises(0.5, 0.5, size=1) +def_gen.vonmises(D_arr_0p5, 0.5) +def_gen.vonmises(0.5, D_arr_0p5) +def_gen.vonmises(D_arr_0p5, 0.5, size=1) +def_gen.vonmises(0.5, D_arr_0p5, size=1) +def_gen.vonmises(D_arr_like_0p5, 0.5) +def_gen.vonmises(0.5, D_arr_like_0p5) +def_gen.vonmises(D_arr_0p5, D_arr_0p5) +def_gen.vonmises(D_arr_like_0p5, D_arr_like_0p5) +def_gen.vonmises(D_arr_0p5, D_arr_0p5, size=1) +def_gen.vonmises(D_arr_like_0p5, D_arr_like_0p5, size=1) + +def_gen.wald(0.5, 0.5) +def_gen.wald(0.5, 0.5, size=None) +def_gen.wald(0.5, 0.5, size=1) +def_gen.wald(D_arr_0p5, 0.5) +def_gen.wald(0.5, D_arr_0p5) +def_gen.wald(D_arr_0p5, 0.5, size=1) +def_gen.wald(0.5, D_arr_0p5, size=1) +def_gen.wald(D_arr_like_0p5, 0.5) +def_gen.wald(0.5, D_arr_like_0p5) +def_gen.wald(D_arr_0p5, D_arr_0p5) +def_gen.wald(D_arr_like_0p5, D_arr_like_0p5) +def_gen.wald(D_arr_0p5, D_arr_0p5, size=1) +def_gen.wald(D_arr_like_0p5, D_arr_like_0p5, size=1) + +def_gen.uniform(0.5, 0.5) +def_gen.uniform(0.5, 0.5, size=None) +def_gen.uniform(0.5, 0.5, size=1) +def_gen.uniform(D_arr_0p5, 0.5) +def_gen.uniform(0.5, D_arr_0p5) +def_gen.uniform(D_arr_0p5, 0.5, size=1) +def_gen.uniform(0.5, D_arr_0p5, size=1) +def_gen.uniform(D_arr_like_0p5, 0.5) +def_gen.uniform(0.5, D_arr_like_0p5) +def_gen.uniform(D_arr_0p5, D_arr_0p5) +def_gen.uniform(D_arr_like_0p5, D_arr_like_0p5) +def_gen.uniform(D_arr_0p5, D_arr_0p5, size=1) +def_gen.uniform(D_arr_like_0p5, D_arr_like_0p5, size=1) + +def_gen.beta(0.5, 0.5) +def_gen.beta(0.5, 0.5, size=None) +def_gen.beta(0.5, 0.5, size=1) +def_gen.beta(D_arr_0p5, 0.5) +def_gen.beta(0.5, D_arr_0p5) +def_gen.beta(D_arr_0p5, 0.5, size=1) +def_gen.beta(0.5, D_arr_0p5, size=1) +def_gen.beta(D_arr_like_0p5, 0.5) +def_gen.beta(0.5, D_arr_like_0p5) +def_gen.beta(D_arr_0p5, D_arr_0p5) +def_gen.beta(D_arr_like_0p5, D_arr_like_0p5) +def_gen.beta(D_arr_0p5, D_arr_0p5, size=1) +def_gen.beta(D_arr_like_0p5, D_arr_like_0p5, size=1) + +def_gen.f(0.5, 0.5) +def_gen.f(0.5, 0.5, size=None) +def_gen.f(0.5, 0.5, size=1) +def_gen.f(D_arr_0p5, 0.5) +def_gen.f(0.5, D_arr_0p5) +def_gen.f(D_arr_0p5, 0.5, size=1) +def_gen.f(0.5, D_arr_0p5, size=1) +def_gen.f(D_arr_like_0p5, 0.5) +def_gen.f(0.5, D_arr_like_0p5) +def_gen.f(D_arr_0p5, D_arr_0p5) +def_gen.f(D_arr_like_0p5, D_arr_like_0p5) +def_gen.f(D_arr_0p5, D_arr_0p5, size=1) +def_gen.f(D_arr_like_0p5, D_arr_like_0p5, size=1) + +def_gen.gamma(0.5, 0.5) +def_gen.gamma(0.5, 0.5, size=None) +def_gen.gamma(0.5, 0.5, size=1) +def_gen.gamma(D_arr_0p5, 0.5) +def_gen.gamma(0.5, D_arr_0p5) +def_gen.gamma(D_arr_0p5, 0.5, size=1) +def_gen.gamma(0.5, D_arr_0p5, size=1) +def_gen.gamma(D_arr_like_0p5, 0.5) +def_gen.gamma(0.5, D_arr_like_0p5) +def_gen.gamma(D_arr_0p5, D_arr_0p5) +def_gen.gamma(D_arr_like_0p5, D_arr_like_0p5) +def_gen.gamma(D_arr_0p5, D_arr_0p5, size=1) +def_gen.gamma(D_arr_like_0p5, D_arr_like_0p5, size=1) + +def_gen.gumbel(0.5, 0.5) +def_gen.gumbel(0.5, 0.5, size=None) +def_gen.gumbel(0.5, 0.5, size=1) +def_gen.gumbel(D_arr_0p5, 0.5) +def_gen.gumbel(0.5, D_arr_0p5) +def_gen.gumbel(D_arr_0p5, 0.5, size=1) +def_gen.gumbel(0.5, D_arr_0p5, size=1) +def_gen.gumbel(D_arr_like_0p5, 0.5) +def_gen.gumbel(0.5, D_arr_like_0p5) +def_gen.gumbel(D_arr_0p5, D_arr_0p5) +def_gen.gumbel(D_arr_like_0p5, D_arr_like_0p5) +def_gen.gumbel(D_arr_0p5, D_arr_0p5, size=1) +def_gen.gumbel(D_arr_like_0p5, D_arr_like_0p5, size=1) + +def_gen.laplace(0.5, 0.5) +def_gen.laplace(0.5, 0.5, size=None) +def_gen.laplace(0.5, 0.5, size=1) +def_gen.laplace(D_arr_0p5, 0.5) +def_gen.laplace(0.5, D_arr_0p5) +def_gen.laplace(D_arr_0p5, 0.5, size=1) +def_gen.laplace(0.5, D_arr_0p5, size=1) +def_gen.laplace(D_arr_like_0p5, 0.5) +def_gen.laplace(0.5, D_arr_like_0p5) +def_gen.laplace(D_arr_0p5, D_arr_0p5) +def_gen.laplace(D_arr_like_0p5, D_arr_like_0p5) +def_gen.laplace(D_arr_0p5, D_arr_0p5, size=1) +def_gen.laplace(D_arr_like_0p5, D_arr_like_0p5, size=1) + +def_gen.logistic(0.5, 0.5) +def_gen.logistic(0.5, 0.5, size=None) +def_gen.logistic(0.5, 0.5, size=1) +def_gen.logistic(D_arr_0p5, 0.5) +def_gen.logistic(0.5, D_arr_0p5) +def_gen.logistic(D_arr_0p5, 0.5, size=1) +def_gen.logistic(0.5, D_arr_0p5, size=1) +def_gen.logistic(D_arr_like_0p5, 0.5) +def_gen.logistic(0.5, D_arr_like_0p5) +def_gen.logistic(D_arr_0p5, D_arr_0p5) +def_gen.logistic(D_arr_like_0p5, D_arr_like_0p5) +def_gen.logistic(D_arr_0p5, D_arr_0p5, size=1) +def_gen.logistic(D_arr_like_0p5, D_arr_like_0p5, size=1) + +def_gen.lognormal(0.5, 0.5) +def_gen.lognormal(0.5, 0.5, size=None) +def_gen.lognormal(0.5, 0.5, size=1) +def_gen.lognormal(D_arr_0p5, 0.5) +def_gen.lognormal(0.5, D_arr_0p5) +def_gen.lognormal(D_arr_0p5, 0.5, size=1) +def_gen.lognormal(0.5, D_arr_0p5, size=1) +def_gen.lognormal(D_arr_like_0p5, 0.5) +def_gen.lognormal(0.5, D_arr_like_0p5) +def_gen.lognormal(D_arr_0p5, D_arr_0p5) +def_gen.lognormal(D_arr_like_0p5, D_arr_like_0p5) +def_gen.lognormal(D_arr_0p5, D_arr_0p5, size=1) +def_gen.lognormal(D_arr_like_0p5, D_arr_like_0p5, size=1) + +def_gen.noncentral_chisquare(0.5, 0.5) +def_gen.noncentral_chisquare(0.5, 0.5, size=None) +def_gen.noncentral_chisquare(0.5, 0.5, size=1) +def_gen.noncentral_chisquare(D_arr_0p5, 0.5) +def_gen.noncentral_chisquare(0.5, D_arr_0p5) +def_gen.noncentral_chisquare(D_arr_0p5, 0.5, size=1) +def_gen.noncentral_chisquare(0.5, D_arr_0p5, size=1) +def_gen.noncentral_chisquare(D_arr_like_0p5, 0.5) +def_gen.noncentral_chisquare(0.5, D_arr_like_0p5) +def_gen.noncentral_chisquare(D_arr_0p5, D_arr_0p5) +def_gen.noncentral_chisquare(D_arr_like_0p5, D_arr_like_0p5) +def_gen.noncentral_chisquare(D_arr_0p5, D_arr_0p5, size=1) +def_gen.noncentral_chisquare(D_arr_like_0p5, D_arr_like_0p5, size=1) + +def_gen.normal(0.5, 0.5) +def_gen.normal(0.5, 0.5, size=None) +def_gen.normal(0.5, 0.5, size=1) +def_gen.normal(D_arr_0p5, 0.5) +def_gen.normal(0.5, D_arr_0p5) +def_gen.normal(D_arr_0p5, 0.5, size=1) +def_gen.normal(0.5, D_arr_0p5, size=1) +def_gen.normal(D_arr_like_0p5, 0.5) +def_gen.normal(0.5, D_arr_like_0p5) +def_gen.normal(D_arr_0p5, D_arr_0p5) +def_gen.normal(D_arr_like_0p5, D_arr_like_0p5) +def_gen.normal(D_arr_0p5, D_arr_0p5, size=1) +def_gen.normal(D_arr_like_0p5, D_arr_like_0p5, size=1) + +def_gen.triangular(0.1, 0.5, 0.9) +def_gen.triangular(0.1, 0.5, 0.9, size=None) +def_gen.triangular(0.1, 0.5, 0.9, size=1) +def_gen.triangular(D_arr_0p1, 0.5, 0.9) +def_gen.triangular(0.1, D_arr_0p5, 0.9) +def_gen.triangular(D_arr_0p1, 0.5, D_arr_like_0p9, size=1) +def_gen.triangular(0.1, D_arr_0p5, 0.9, size=1) +def_gen.triangular(D_arr_like_0p1, 0.5, D_arr_0p9) +def_gen.triangular(0.5, D_arr_like_0p5, 0.9) +def_gen.triangular(D_arr_0p1, D_arr_0p5, 0.9) +def_gen.triangular(D_arr_like_0p1, D_arr_like_0p5, 0.9) +def_gen.triangular(D_arr_0p1, D_arr_0p5, D_arr_0p9, size=1) +def_gen.triangular(D_arr_like_0p1, D_arr_like_0p5, D_arr_like_0p9, size=1) + +def_gen.noncentral_f(0.1, 0.5, 0.9) +def_gen.noncentral_f(0.1, 0.5, 0.9, size=None) +def_gen.noncentral_f(0.1, 0.5, 0.9, size=1) +def_gen.noncentral_f(D_arr_0p1, 0.5, 0.9) +def_gen.noncentral_f(0.1, D_arr_0p5, 0.9) +def_gen.noncentral_f(D_arr_0p1, 0.5, D_arr_like_0p9, size=1) +def_gen.noncentral_f(0.1, D_arr_0p5, 0.9, size=1) +def_gen.noncentral_f(D_arr_like_0p1, 0.5, D_arr_0p9) +def_gen.noncentral_f(0.5, D_arr_like_0p5, 0.9) +def_gen.noncentral_f(D_arr_0p1, D_arr_0p5, 0.9) +def_gen.noncentral_f(D_arr_like_0p1, D_arr_like_0p5, 0.9) +def_gen.noncentral_f(D_arr_0p1, D_arr_0p5, D_arr_0p9, size=1) +def_gen.noncentral_f(D_arr_like_0p1, D_arr_like_0p5, D_arr_like_0p9, size=1) + +def_gen.binomial(10, 0.5) +def_gen.binomial(10, 0.5, size=None) +def_gen.binomial(10, 0.5, size=1) +def_gen.binomial(I_arr_10, 0.5) +def_gen.binomial(10, D_arr_0p5) +def_gen.binomial(I_arr_10, 0.5, size=1) +def_gen.binomial(10, D_arr_0p5, size=1) +def_gen.binomial(I_arr_like_10, 0.5) +def_gen.binomial(10, D_arr_like_0p5) +def_gen.binomial(I_arr_10, D_arr_0p5) +def_gen.binomial(I_arr_like_10, D_arr_like_0p5) +def_gen.binomial(I_arr_10, D_arr_0p5, size=1) +def_gen.binomial(I_arr_like_10, D_arr_like_0p5, size=1) + +def_gen.negative_binomial(10, 0.5) +def_gen.negative_binomial(10, 0.5, size=None) +def_gen.negative_binomial(10, 0.5, size=1) +def_gen.negative_binomial(I_arr_10, 0.5) +def_gen.negative_binomial(10, D_arr_0p5) +def_gen.negative_binomial(I_arr_10, 0.5, size=1) +def_gen.negative_binomial(10, D_arr_0p5, size=1) +def_gen.negative_binomial(I_arr_like_10, 0.5) +def_gen.negative_binomial(10, D_arr_like_0p5) +def_gen.negative_binomial(I_arr_10, D_arr_0p5) +def_gen.negative_binomial(I_arr_like_10, D_arr_like_0p5) +def_gen.negative_binomial(I_arr_10, D_arr_0p5, size=1) +def_gen.negative_binomial(I_arr_like_10, D_arr_like_0p5, size=1) + +def_gen.hypergeometric(20, 20, 10) +def_gen.hypergeometric(20, 20, 10, size=None) +def_gen.hypergeometric(20, 20, 10, size=1) +def_gen.hypergeometric(I_arr_20, 20, 10) +def_gen.hypergeometric(20, I_arr_20, 10) +def_gen.hypergeometric(I_arr_20, 20, I_arr_like_10, size=1) +def_gen.hypergeometric(20, I_arr_20, 10, size=1) +def_gen.hypergeometric(I_arr_like_20, 20, I_arr_10) +def_gen.hypergeometric(20, I_arr_like_20, 10) +def_gen.hypergeometric(I_arr_20, I_arr_20, 10) +def_gen.hypergeometric(I_arr_like_20, I_arr_like_20, 10) +def_gen.hypergeometric(I_arr_20, I_arr_20, I_arr_10, size=1) +def_gen.hypergeometric(I_arr_like_20, I_arr_like_20, I_arr_like_10, size=1) + +I_int64_100: np.ndarray[Any, np.dtype[np.int64]] = np.array([100], dtype=np.int64) + +def_gen.integers(0, 100) +def_gen.integers(100) +def_gen.integers([100]) +def_gen.integers(0, [100]) + +I_bool_low: np.ndarray[Any, np.dtype[np.bool]] = np.array([0], dtype=np.bool) +I_bool_low_like: list[int] = [0] +I_bool_high_open: np.ndarray[Any, np.dtype[np.bool]] = np.array([1], dtype=np.bool) +I_bool_high_closed: np.ndarray[Any, np.dtype[np.bool]] = np.array([1], dtype=np.bool) + +def_gen.integers(2, dtype=bool) +def_gen.integers(0, 2, dtype=bool) +def_gen.integers(1, dtype=bool, endpoint=True) +def_gen.integers(0, 1, dtype=bool, endpoint=True) +def_gen.integers(I_bool_low_like, 1, dtype=bool, endpoint=True) +def_gen.integers(I_bool_high_open, dtype=bool) +def_gen.integers(I_bool_low, I_bool_high_open, dtype=bool) +def_gen.integers(0, I_bool_high_open, dtype=bool) +def_gen.integers(I_bool_high_closed, dtype=bool, endpoint=True) +def_gen.integers(I_bool_low, I_bool_high_closed, dtype=bool, endpoint=True) +def_gen.integers(0, I_bool_high_closed, dtype=bool, endpoint=True) + +def_gen.integers(2, dtype=np.bool) +def_gen.integers(0, 2, dtype=np.bool) +def_gen.integers(1, dtype=np.bool, endpoint=True) +def_gen.integers(0, 1, dtype=np.bool, endpoint=True) +def_gen.integers(I_bool_low_like, 1, dtype=np.bool, endpoint=True) +def_gen.integers(I_bool_high_open, dtype=np.bool) +def_gen.integers(I_bool_low, I_bool_high_open, dtype=np.bool) +def_gen.integers(0, I_bool_high_open, dtype=np.bool) +def_gen.integers(I_bool_high_closed, dtype=np.bool, endpoint=True) +def_gen.integers(I_bool_low, I_bool_high_closed, dtype=np.bool, endpoint=True) +def_gen.integers(0, I_bool_high_closed, dtype=np.bool, endpoint=True) + +I_u1_low: np.ndarray[Any, np.dtype[np.uint8]] = np.array([0], dtype=np.uint8) +I_u1_low_like: list[int] = [0] +I_u1_high_open: np.ndarray[Any, np.dtype[np.uint8]] = np.array([255], dtype=np.uint8) +I_u1_high_closed: np.ndarray[Any, np.dtype[np.uint8]] = np.array([255], dtype=np.uint8) + +def_gen.integers(256, dtype="u1") +def_gen.integers(0, 256, dtype="u1") +def_gen.integers(255, dtype="u1", endpoint=True) +def_gen.integers(0, 255, dtype="u1", endpoint=True) +def_gen.integers(I_u1_low_like, 255, dtype="u1", endpoint=True) +def_gen.integers(I_u1_high_open, dtype="u1") +def_gen.integers(I_u1_low, I_u1_high_open, dtype="u1") +def_gen.integers(0, I_u1_high_open, dtype="u1") +def_gen.integers(I_u1_high_closed, dtype="u1", endpoint=True) +def_gen.integers(I_u1_low, I_u1_high_closed, dtype="u1", endpoint=True) +def_gen.integers(0, I_u1_high_closed, dtype="u1", endpoint=True) + +def_gen.integers(256, dtype="uint8") +def_gen.integers(0, 256, dtype="uint8") +def_gen.integers(255, dtype="uint8", endpoint=True) +def_gen.integers(0, 255, dtype="uint8", endpoint=True) +def_gen.integers(I_u1_low_like, 255, dtype="uint8", endpoint=True) +def_gen.integers(I_u1_high_open, dtype="uint8") +def_gen.integers(I_u1_low, I_u1_high_open, dtype="uint8") +def_gen.integers(0, I_u1_high_open, dtype="uint8") +def_gen.integers(I_u1_high_closed, dtype="uint8", endpoint=True) +def_gen.integers(I_u1_low, I_u1_high_closed, dtype="uint8", endpoint=True) +def_gen.integers(0, I_u1_high_closed, dtype="uint8", endpoint=True) + +def_gen.integers(256, dtype=np.uint8) +def_gen.integers(0, 256, dtype=np.uint8) +def_gen.integers(255, dtype=np.uint8, endpoint=True) +def_gen.integers(0, 255, dtype=np.uint8, endpoint=True) +def_gen.integers(I_u1_low_like, 255, dtype=np.uint8, endpoint=True) +def_gen.integers(I_u1_high_open, dtype=np.uint8) +def_gen.integers(I_u1_low, I_u1_high_open, dtype=np.uint8) +def_gen.integers(0, I_u1_high_open, dtype=np.uint8) +def_gen.integers(I_u1_high_closed, dtype=np.uint8, endpoint=True) +def_gen.integers(I_u1_low, I_u1_high_closed, dtype=np.uint8, endpoint=True) +def_gen.integers(0, I_u1_high_closed, dtype=np.uint8, endpoint=True) + +I_u2_low: np.ndarray[Any, np.dtype[np.uint16]] = np.array([0], dtype=np.uint16) +I_u2_low_like: list[int] = [0] +I_u2_high_open: np.ndarray[Any, np.dtype[np.uint16]] = np.array([65535], dtype=np.uint16) +I_u2_high_closed: np.ndarray[Any, np.dtype[np.uint16]] = np.array([65535], dtype=np.uint16) + +def_gen.integers(65536, dtype="u2") +def_gen.integers(0, 65536, dtype="u2") +def_gen.integers(65535, dtype="u2", endpoint=True) +def_gen.integers(0, 65535, dtype="u2", endpoint=True) +def_gen.integers(I_u2_low_like, 65535, dtype="u2", endpoint=True) +def_gen.integers(I_u2_high_open, dtype="u2") +def_gen.integers(I_u2_low, I_u2_high_open, dtype="u2") +def_gen.integers(0, I_u2_high_open, dtype="u2") +def_gen.integers(I_u2_high_closed, dtype="u2", endpoint=True) +def_gen.integers(I_u2_low, I_u2_high_closed, dtype="u2", endpoint=True) +def_gen.integers(0, I_u2_high_closed, dtype="u2", endpoint=True) + +def_gen.integers(65536, dtype="uint16") +def_gen.integers(0, 65536, dtype="uint16") +def_gen.integers(65535, dtype="uint16", endpoint=True) +def_gen.integers(0, 65535, dtype="uint16", endpoint=True) +def_gen.integers(I_u2_low_like, 65535, dtype="uint16", endpoint=True) +def_gen.integers(I_u2_high_open, dtype="uint16") +def_gen.integers(I_u2_low, I_u2_high_open, dtype="uint16") +def_gen.integers(0, I_u2_high_open, dtype="uint16") +def_gen.integers(I_u2_high_closed, dtype="uint16", endpoint=True) +def_gen.integers(I_u2_low, I_u2_high_closed, dtype="uint16", endpoint=True) +def_gen.integers(0, I_u2_high_closed, dtype="uint16", endpoint=True) + +def_gen.integers(65536, dtype=np.uint16) +def_gen.integers(0, 65536, dtype=np.uint16) +def_gen.integers(65535, dtype=np.uint16, endpoint=True) +def_gen.integers(0, 65535, dtype=np.uint16, endpoint=True) +def_gen.integers(I_u2_low_like, 65535, dtype=np.uint16, endpoint=True) +def_gen.integers(I_u2_high_open, dtype=np.uint16) +def_gen.integers(I_u2_low, I_u2_high_open, dtype=np.uint16) +def_gen.integers(0, I_u2_high_open, dtype=np.uint16) +def_gen.integers(I_u2_high_closed, dtype=np.uint16, endpoint=True) +def_gen.integers(I_u2_low, I_u2_high_closed, dtype=np.uint16, endpoint=True) +def_gen.integers(0, I_u2_high_closed, dtype=np.uint16, endpoint=True) + +I_u4_low: np.ndarray[Any, np.dtype[np.uint32]] = np.array([0], dtype=np.uint32) +I_u4_low_like: list[int] = [0] +I_u4_high_open: np.ndarray[Any, np.dtype[np.uint32]] = np.array([4294967295], dtype=np.uint32) +I_u4_high_closed: np.ndarray[Any, np.dtype[np.uint32]] = np.array([4294967295], dtype=np.uint32) + +def_gen.integers(4294967296, dtype="u4") +def_gen.integers(0, 4294967296, dtype="u4") +def_gen.integers(4294967295, dtype="u4", endpoint=True) +def_gen.integers(0, 4294967295, dtype="u4", endpoint=True) +def_gen.integers(I_u4_low_like, 4294967295, dtype="u4", endpoint=True) +def_gen.integers(I_u4_high_open, dtype="u4") +def_gen.integers(I_u4_low, I_u4_high_open, dtype="u4") +def_gen.integers(0, I_u4_high_open, dtype="u4") +def_gen.integers(I_u4_high_closed, dtype="u4", endpoint=True) +def_gen.integers(I_u4_low, I_u4_high_closed, dtype="u4", endpoint=True) +def_gen.integers(0, I_u4_high_closed, dtype="u4", endpoint=True) + +def_gen.integers(4294967296, dtype="uint32") +def_gen.integers(0, 4294967296, dtype="uint32") +def_gen.integers(4294967295, dtype="uint32", endpoint=True) +def_gen.integers(0, 4294967295, dtype="uint32", endpoint=True) +def_gen.integers(I_u4_low_like, 4294967295, dtype="uint32", endpoint=True) +def_gen.integers(I_u4_high_open, dtype="uint32") +def_gen.integers(I_u4_low, I_u4_high_open, dtype="uint32") +def_gen.integers(0, I_u4_high_open, dtype="uint32") +def_gen.integers(I_u4_high_closed, dtype="uint32", endpoint=True) +def_gen.integers(I_u4_low, I_u4_high_closed, dtype="uint32", endpoint=True) +def_gen.integers(0, I_u4_high_closed, dtype="uint32", endpoint=True) + +def_gen.integers(4294967296, dtype=np.uint32) +def_gen.integers(0, 4294967296, dtype=np.uint32) +def_gen.integers(4294967295, dtype=np.uint32, endpoint=True) +def_gen.integers(0, 4294967295, dtype=np.uint32, endpoint=True) +def_gen.integers(I_u4_low_like, 4294967295, dtype=np.uint32, endpoint=True) +def_gen.integers(I_u4_high_open, dtype=np.uint32) +def_gen.integers(I_u4_low, I_u4_high_open, dtype=np.uint32) +def_gen.integers(0, I_u4_high_open, dtype=np.uint32) +def_gen.integers(I_u4_high_closed, dtype=np.uint32, endpoint=True) +def_gen.integers(I_u4_low, I_u4_high_closed, dtype=np.uint32, endpoint=True) +def_gen.integers(0, I_u4_high_closed, dtype=np.uint32, endpoint=True) + +I_u8_low: np.ndarray[Any, np.dtype[np.uint64]] = np.array([0], dtype=np.uint64) +I_u8_low_like: list[int] = [0] +I_u8_high_open: np.ndarray[Any, np.dtype[np.uint64]] = np.array([18446744073709551615], dtype=np.uint64) +I_u8_high_closed: np.ndarray[Any, np.dtype[np.uint64]] = np.array([18446744073709551615], dtype=np.uint64) + +def_gen.integers(18446744073709551616, dtype="u8") +def_gen.integers(0, 18446744073709551616, dtype="u8") +def_gen.integers(18446744073709551615, dtype="u8", endpoint=True) +def_gen.integers(0, 18446744073709551615, dtype="u8", endpoint=True) +def_gen.integers(I_u8_low_like, 18446744073709551615, dtype="u8", endpoint=True) +def_gen.integers(I_u8_high_open, dtype="u8") +def_gen.integers(I_u8_low, I_u8_high_open, dtype="u8") +def_gen.integers(0, I_u8_high_open, dtype="u8") +def_gen.integers(I_u8_high_closed, dtype="u8", endpoint=True) +def_gen.integers(I_u8_low, I_u8_high_closed, dtype="u8", endpoint=True) +def_gen.integers(0, I_u8_high_closed, dtype="u8", endpoint=True) + +def_gen.integers(18446744073709551616, dtype="uint64") +def_gen.integers(0, 18446744073709551616, dtype="uint64") +def_gen.integers(18446744073709551615, dtype="uint64", endpoint=True) +def_gen.integers(0, 18446744073709551615, dtype="uint64", endpoint=True) +def_gen.integers(I_u8_low_like, 18446744073709551615, dtype="uint64", endpoint=True) +def_gen.integers(I_u8_high_open, dtype="uint64") +def_gen.integers(I_u8_low, I_u8_high_open, dtype="uint64") +def_gen.integers(0, I_u8_high_open, dtype="uint64") +def_gen.integers(I_u8_high_closed, dtype="uint64", endpoint=True) +def_gen.integers(I_u8_low, I_u8_high_closed, dtype="uint64", endpoint=True) +def_gen.integers(0, I_u8_high_closed, dtype="uint64", endpoint=True) + +def_gen.integers(18446744073709551616, dtype=np.uint64) +def_gen.integers(0, 18446744073709551616, dtype=np.uint64) +def_gen.integers(18446744073709551615, dtype=np.uint64, endpoint=True) +def_gen.integers(0, 18446744073709551615, dtype=np.uint64, endpoint=True) +def_gen.integers(I_u8_low_like, 18446744073709551615, dtype=np.uint64, endpoint=True) +def_gen.integers(I_u8_high_open, dtype=np.uint64) +def_gen.integers(I_u8_low, I_u8_high_open, dtype=np.uint64) +def_gen.integers(0, I_u8_high_open, dtype=np.uint64) +def_gen.integers(I_u8_high_closed, dtype=np.uint64, endpoint=True) +def_gen.integers(I_u8_low, I_u8_high_closed, dtype=np.uint64, endpoint=True) +def_gen.integers(0, I_u8_high_closed, dtype=np.uint64, endpoint=True) + +I_i1_low: np.ndarray[Any, np.dtype[np.int8]] = np.array([-128], dtype=np.int8) +I_i1_low_like: list[int] = [-128] +I_i1_high_open: np.ndarray[Any, np.dtype[np.int8]] = np.array([127], dtype=np.int8) +I_i1_high_closed: np.ndarray[Any, np.dtype[np.int8]] = np.array([127], dtype=np.int8) + +def_gen.integers(128, dtype="i1") +def_gen.integers(-128, 128, dtype="i1") +def_gen.integers(127, dtype="i1", endpoint=True) +def_gen.integers(-128, 127, dtype="i1", endpoint=True) +def_gen.integers(I_i1_low_like, 127, dtype="i1", endpoint=True) +def_gen.integers(I_i1_high_open, dtype="i1") +def_gen.integers(I_i1_low, I_i1_high_open, dtype="i1") +def_gen.integers(-128, I_i1_high_open, dtype="i1") +def_gen.integers(I_i1_high_closed, dtype="i1", endpoint=True) +def_gen.integers(I_i1_low, I_i1_high_closed, dtype="i1", endpoint=True) +def_gen.integers(-128, I_i1_high_closed, dtype="i1", endpoint=True) + +def_gen.integers(128, dtype="int8") +def_gen.integers(-128, 128, dtype="int8") +def_gen.integers(127, dtype="int8", endpoint=True) +def_gen.integers(-128, 127, dtype="int8", endpoint=True) +def_gen.integers(I_i1_low_like, 127, dtype="int8", endpoint=True) +def_gen.integers(I_i1_high_open, dtype="int8") +def_gen.integers(I_i1_low, I_i1_high_open, dtype="int8") +def_gen.integers(-128, I_i1_high_open, dtype="int8") +def_gen.integers(I_i1_high_closed, dtype="int8", endpoint=True) +def_gen.integers(I_i1_low, I_i1_high_closed, dtype="int8", endpoint=True) +def_gen.integers(-128, I_i1_high_closed, dtype="int8", endpoint=True) + +def_gen.integers(128, dtype=np.int8) +def_gen.integers(-128, 128, dtype=np.int8) +def_gen.integers(127, dtype=np.int8, endpoint=True) +def_gen.integers(-128, 127, dtype=np.int8, endpoint=True) +def_gen.integers(I_i1_low_like, 127, dtype=np.int8, endpoint=True) +def_gen.integers(I_i1_high_open, dtype=np.int8) +def_gen.integers(I_i1_low, I_i1_high_open, dtype=np.int8) +def_gen.integers(-128, I_i1_high_open, dtype=np.int8) +def_gen.integers(I_i1_high_closed, dtype=np.int8, endpoint=True) +def_gen.integers(I_i1_low, I_i1_high_closed, dtype=np.int8, endpoint=True) +def_gen.integers(-128, I_i1_high_closed, dtype=np.int8, endpoint=True) + +I_i2_low: np.ndarray[Any, np.dtype[np.int16]] = np.array([-32768], dtype=np.int16) +I_i2_low_like: list[int] = [-32768] +I_i2_high_open: np.ndarray[Any, np.dtype[np.int16]] = np.array([32767], dtype=np.int16) +I_i2_high_closed: np.ndarray[Any, np.dtype[np.int16]] = np.array([32767], dtype=np.int16) + +def_gen.integers(32768, dtype="i2") +def_gen.integers(-32768, 32768, dtype="i2") +def_gen.integers(32767, dtype="i2", endpoint=True) +def_gen.integers(-32768, 32767, dtype="i2", endpoint=True) +def_gen.integers(I_i2_low_like, 32767, dtype="i2", endpoint=True) +def_gen.integers(I_i2_high_open, dtype="i2") +def_gen.integers(I_i2_low, I_i2_high_open, dtype="i2") +def_gen.integers(-32768, I_i2_high_open, dtype="i2") +def_gen.integers(I_i2_high_closed, dtype="i2", endpoint=True) +def_gen.integers(I_i2_low, I_i2_high_closed, dtype="i2", endpoint=True) +def_gen.integers(-32768, I_i2_high_closed, dtype="i2", endpoint=True) + +def_gen.integers(32768, dtype="int16") +def_gen.integers(-32768, 32768, dtype="int16") +def_gen.integers(32767, dtype="int16", endpoint=True) +def_gen.integers(-32768, 32767, dtype="int16", endpoint=True) +def_gen.integers(I_i2_low_like, 32767, dtype="int16", endpoint=True) +def_gen.integers(I_i2_high_open, dtype="int16") +def_gen.integers(I_i2_low, I_i2_high_open, dtype="int16") +def_gen.integers(-32768, I_i2_high_open, dtype="int16") +def_gen.integers(I_i2_high_closed, dtype="int16", endpoint=True) +def_gen.integers(I_i2_low, I_i2_high_closed, dtype="int16", endpoint=True) +def_gen.integers(-32768, I_i2_high_closed, dtype="int16", endpoint=True) + +def_gen.integers(32768, dtype=np.int16) +def_gen.integers(-32768, 32768, dtype=np.int16) +def_gen.integers(32767, dtype=np.int16, endpoint=True) +def_gen.integers(-32768, 32767, dtype=np.int16, endpoint=True) +def_gen.integers(I_i2_low_like, 32767, dtype=np.int16, endpoint=True) +def_gen.integers(I_i2_high_open, dtype=np.int16) +def_gen.integers(I_i2_low, I_i2_high_open, dtype=np.int16) +def_gen.integers(-32768, I_i2_high_open, dtype=np.int16) +def_gen.integers(I_i2_high_closed, dtype=np.int16, endpoint=True) +def_gen.integers(I_i2_low, I_i2_high_closed, dtype=np.int16, endpoint=True) +def_gen.integers(-32768, I_i2_high_closed, dtype=np.int16, endpoint=True) + +I_i4_low: np.ndarray[Any, np.dtype[np.int32]] = np.array([-2147483648], dtype=np.int32) +I_i4_low_like: list[int] = [-2147483648] +I_i4_high_open: np.ndarray[Any, np.dtype[np.int32]] = np.array([2147483647], dtype=np.int32) +I_i4_high_closed: np.ndarray[Any, np.dtype[np.int32]] = np.array([2147483647], dtype=np.int32) + +def_gen.integers(2147483648, dtype="i4") +def_gen.integers(-2147483648, 2147483648, dtype="i4") +def_gen.integers(2147483647, dtype="i4", endpoint=True) +def_gen.integers(-2147483648, 2147483647, dtype="i4", endpoint=True) +def_gen.integers(I_i4_low_like, 2147483647, dtype="i4", endpoint=True) +def_gen.integers(I_i4_high_open, dtype="i4") +def_gen.integers(I_i4_low, I_i4_high_open, dtype="i4") +def_gen.integers(-2147483648, I_i4_high_open, dtype="i4") +def_gen.integers(I_i4_high_closed, dtype="i4", endpoint=True) +def_gen.integers(I_i4_low, I_i4_high_closed, dtype="i4", endpoint=True) +def_gen.integers(-2147483648, I_i4_high_closed, dtype="i4", endpoint=True) + +def_gen.integers(2147483648, dtype="int32") +def_gen.integers(-2147483648, 2147483648, dtype="int32") +def_gen.integers(2147483647, dtype="int32", endpoint=True) +def_gen.integers(-2147483648, 2147483647, dtype="int32", endpoint=True) +def_gen.integers(I_i4_low_like, 2147483647, dtype="int32", endpoint=True) +def_gen.integers(I_i4_high_open, dtype="int32") +def_gen.integers(I_i4_low, I_i4_high_open, dtype="int32") +def_gen.integers(-2147483648, I_i4_high_open, dtype="int32") +def_gen.integers(I_i4_high_closed, dtype="int32", endpoint=True) +def_gen.integers(I_i4_low, I_i4_high_closed, dtype="int32", endpoint=True) +def_gen.integers(-2147483648, I_i4_high_closed, dtype="int32", endpoint=True) + +def_gen.integers(2147483648, dtype=np.int32) +def_gen.integers(-2147483648, 2147483648, dtype=np.int32) +def_gen.integers(2147483647, dtype=np.int32, endpoint=True) +def_gen.integers(-2147483648, 2147483647, dtype=np.int32, endpoint=True) +def_gen.integers(I_i4_low_like, 2147483647, dtype=np.int32, endpoint=True) +def_gen.integers(I_i4_high_open, dtype=np.int32) +def_gen.integers(I_i4_low, I_i4_high_open, dtype=np.int32) +def_gen.integers(-2147483648, I_i4_high_open, dtype=np.int32) +def_gen.integers(I_i4_high_closed, dtype=np.int32, endpoint=True) +def_gen.integers(I_i4_low, I_i4_high_closed, dtype=np.int32, endpoint=True) +def_gen.integers(-2147483648, I_i4_high_closed, dtype=np.int32, endpoint=True) + +I_i8_low: np.ndarray[Any, np.dtype[np.int64]] = np.array([-9223372036854775808], dtype=np.int64) +I_i8_low_like: list[int] = [-9223372036854775808] +I_i8_high_open: np.ndarray[Any, np.dtype[np.int64]] = np.array([9223372036854775807], dtype=np.int64) +I_i8_high_closed: np.ndarray[Any, np.dtype[np.int64]] = np.array([9223372036854775807], dtype=np.int64) + +def_gen.integers(9223372036854775808, dtype="i8") +def_gen.integers(-9223372036854775808, 9223372036854775808, dtype="i8") +def_gen.integers(9223372036854775807, dtype="i8", endpoint=True) +def_gen.integers(-9223372036854775808, 9223372036854775807, dtype="i8", endpoint=True) +def_gen.integers(I_i8_low_like, 9223372036854775807, dtype="i8", endpoint=True) +def_gen.integers(I_i8_high_open, dtype="i8") +def_gen.integers(I_i8_low, I_i8_high_open, dtype="i8") +def_gen.integers(-9223372036854775808, I_i8_high_open, dtype="i8") +def_gen.integers(I_i8_high_closed, dtype="i8", endpoint=True) +def_gen.integers(I_i8_low, I_i8_high_closed, dtype="i8", endpoint=True) +def_gen.integers(-9223372036854775808, I_i8_high_closed, dtype="i8", endpoint=True) + +def_gen.integers(9223372036854775808, dtype="int64") +def_gen.integers(-9223372036854775808, 9223372036854775808, dtype="int64") +def_gen.integers(9223372036854775807, dtype="int64", endpoint=True) +def_gen.integers(-9223372036854775808, 9223372036854775807, dtype="int64", endpoint=True) +def_gen.integers(I_i8_low_like, 9223372036854775807, dtype="int64", endpoint=True) +def_gen.integers(I_i8_high_open, dtype="int64") +def_gen.integers(I_i8_low, I_i8_high_open, dtype="int64") +def_gen.integers(-9223372036854775808, I_i8_high_open, dtype="int64") +def_gen.integers(I_i8_high_closed, dtype="int64", endpoint=True) +def_gen.integers(I_i8_low, I_i8_high_closed, dtype="int64", endpoint=True) +def_gen.integers(-9223372036854775808, I_i8_high_closed, dtype="int64", endpoint=True) + +def_gen.integers(9223372036854775808, dtype=np.int64) +def_gen.integers(-9223372036854775808, 9223372036854775808, dtype=np.int64) +def_gen.integers(9223372036854775807, dtype=np.int64, endpoint=True) +def_gen.integers(-9223372036854775808, 9223372036854775807, dtype=np.int64, endpoint=True) +def_gen.integers(I_i8_low_like, 9223372036854775807, dtype=np.int64, endpoint=True) +def_gen.integers(I_i8_high_open, dtype=np.int64) +def_gen.integers(I_i8_low, I_i8_high_open, dtype=np.int64) +def_gen.integers(-9223372036854775808, I_i8_high_open, dtype=np.int64) +def_gen.integers(I_i8_high_closed, dtype=np.int64, endpoint=True) +def_gen.integers(I_i8_low, I_i8_high_closed, dtype=np.int64, endpoint=True) +def_gen.integers(-9223372036854775808, I_i8_high_closed, dtype=np.int64, endpoint=True) + + +def_gen.bit_generator + +def_gen.bytes(2) + +def_gen.choice(5) +def_gen.choice(5, 3) +def_gen.choice(5, 3, replace=True) +def_gen.choice(5, 3, p=[1 / 5] * 5) +def_gen.choice(5, 3, p=[1 / 5] * 5, replace=False) + +def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"]) +def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"], 3) +def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, p=[1 / 4] * 4) +def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, replace=True) +def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, replace=False, p=np.array([1 / 8, 1 / 8, 1 / 2, 1 / 4])) + +def_gen.dirichlet([0.5, 0.5]) +def_gen.dirichlet(np.array([0.5, 0.5])) +def_gen.dirichlet(np.array([0.5, 0.5]), size=3) + +def_gen.multinomial(20, [1 / 6.0] * 6) +def_gen.multinomial(20, np.array([0.5, 0.5])) +def_gen.multinomial(20, [1 / 6.0] * 6, size=2) +def_gen.multinomial([[10], [20]], [1 / 6.0] * 6, size=(2, 2)) +def_gen.multinomial(np.array([[10], [20]]), np.array([0.5, 0.5]), size=(2, 2)) + +def_gen.multivariate_hypergeometric([3, 5, 7], 2) +def_gen.multivariate_hypergeometric(np.array([3, 5, 7]), 2) +def_gen.multivariate_hypergeometric(np.array([3, 5, 7]), 2, size=4) +def_gen.multivariate_hypergeometric(np.array([3, 5, 7]), 2, size=(4, 7)) +def_gen.multivariate_hypergeometric([3, 5, 7], 2, method="count") +def_gen.multivariate_hypergeometric(np.array([3, 5, 7]), 2, method="marginals") + +def_gen.multivariate_normal([0.0], [[1.0]]) +def_gen.multivariate_normal([0.0], np.array([[1.0]])) +def_gen.multivariate_normal(np.array([0.0]), [[1.0]]) +def_gen.multivariate_normal([0.0], np.array([[1.0]])) + +def_gen.permutation(10) +def_gen.permutation([1, 2, 3, 4]) +def_gen.permutation(np.array([1, 2, 3, 4])) +def_gen.permutation(D_2D, axis=1) +def_gen.permuted(D_2D) +def_gen.permuted(D_2D_like) +def_gen.permuted(D_2D, axis=1) +def_gen.permuted(D_2D, out=D_2D) +def_gen.permuted(D_2D_like, out=D_2D) +def_gen.permuted(D_2D_like, out=D_2D) +def_gen.permuted(D_2D, axis=1, out=D_2D) + +def_gen.shuffle(np.arange(10)) +def_gen.shuffle([1, 2, 3, 4, 5]) +def_gen.shuffle(D_2D, axis=1) + +def_gen.__str__() +def_gen.__repr__() +def_gen.__setstate__(dict(def_gen.bit_generator.state)) + +# RandomState +random_st: np.random.RandomState = np.random.RandomState() + +random_st.standard_normal() +random_st.standard_normal(size=None) +random_st.standard_normal(size=1) + +random_st.random() +random_st.random(size=None) +random_st.random(size=1) + +random_st.standard_cauchy() +random_st.standard_cauchy(size=None) +random_st.standard_cauchy(size=1) + +random_st.standard_exponential() +random_st.standard_exponential(size=None) +random_st.standard_exponential(size=1) + +random_st.zipf(1.5) +random_st.zipf(1.5, size=None) +random_st.zipf(1.5, size=1) +random_st.zipf(D_arr_1p5) +random_st.zipf(D_arr_1p5, size=1) +random_st.zipf(D_arr_like_1p5) +random_st.zipf(D_arr_like_1p5, size=1) + +random_st.weibull(0.5) +random_st.weibull(0.5, size=None) +random_st.weibull(0.5, size=1) +random_st.weibull(D_arr_0p5) +random_st.weibull(D_arr_0p5, size=1) +random_st.weibull(D_arr_like_0p5) +random_st.weibull(D_arr_like_0p5, size=1) + +random_st.standard_t(0.5) +random_st.standard_t(0.5, size=None) +random_st.standard_t(0.5, size=1) +random_st.standard_t(D_arr_0p5) +random_st.standard_t(D_arr_0p5, size=1) +random_st.standard_t(D_arr_like_0p5) +random_st.standard_t(D_arr_like_0p5, size=1) + +random_st.poisson(0.5) +random_st.poisson(0.5, size=None) +random_st.poisson(0.5, size=1) +random_st.poisson(D_arr_0p5) +random_st.poisson(D_arr_0p5, size=1) +random_st.poisson(D_arr_like_0p5) +random_st.poisson(D_arr_like_0p5, size=1) + +random_st.power(0.5) +random_st.power(0.5, size=None) +random_st.power(0.5, size=1) +random_st.power(D_arr_0p5) +random_st.power(D_arr_0p5, size=1) +random_st.power(D_arr_like_0p5) +random_st.power(D_arr_like_0p5, size=1) + +random_st.pareto(0.5) +random_st.pareto(0.5, size=None) +random_st.pareto(0.5, size=1) +random_st.pareto(D_arr_0p5) +random_st.pareto(D_arr_0p5, size=1) +random_st.pareto(D_arr_like_0p5) +random_st.pareto(D_arr_like_0p5, size=1) + +random_st.chisquare(0.5) +random_st.chisquare(0.5, size=None) +random_st.chisquare(0.5, size=1) +random_st.chisquare(D_arr_0p5) +random_st.chisquare(D_arr_0p5, size=1) +random_st.chisquare(D_arr_like_0p5) +random_st.chisquare(D_arr_like_0p5, size=1) + +random_st.exponential(0.5) +random_st.exponential(0.5, size=None) +random_st.exponential(0.5, size=1) +random_st.exponential(D_arr_0p5) +random_st.exponential(D_arr_0p5, size=1) +random_st.exponential(D_arr_like_0p5) +random_st.exponential(D_arr_like_0p5, size=1) + +random_st.geometric(0.5) +random_st.geometric(0.5, size=None) +random_st.geometric(0.5, size=1) +random_st.geometric(D_arr_0p5) +random_st.geometric(D_arr_0p5, size=1) +random_st.geometric(D_arr_like_0p5) +random_st.geometric(D_arr_like_0p5, size=1) + +random_st.logseries(0.5) +random_st.logseries(0.5, size=None) +random_st.logseries(0.5, size=1) +random_st.logseries(D_arr_0p5) +random_st.logseries(D_arr_0p5, size=1) +random_st.logseries(D_arr_like_0p5) +random_st.logseries(D_arr_like_0p5, size=1) + +random_st.rayleigh(0.5) +random_st.rayleigh(0.5, size=None) +random_st.rayleigh(0.5, size=1) +random_st.rayleigh(D_arr_0p5) +random_st.rayleigh(D_arr_0p5, size=1) +random_st.rayleigh(D_arr_like_0p5) +random_st.rayleigh(D_arr_like_0p5, size=1) + +random_st.standard_gamma(0.5) +random_st.standard_gamma(0.5, size=None) +random_st.standard_gamma(0.5, size=1) +random_st.standard_gamma(D_arr_0p5) +random_st.standard_gamma(D_arr_0p5, size=1) +random_st.standard_gamma(D_arr_like_0p5) +random_st.standard_gamma(D_arr_like_0p5, size=1) +random_st.standard_gamma(D_arr_like_0p5, size=1) + +random_st.vonmises(0.5, 0.5) +random_st.vonmises(0.5, 0.5, size=None) +random_st.vonmises(0.5, 0.5, size=1) +random_st.vonmises(D_arr_0p5, 0.5) +random_st.vonmises(0.5, D_arr_0p5) +random_st.vonmises(D_arr_0p5, 0.5, size=1) +random_st.vonmises(0.5, D_arr_0p5, size=1) +random_st.vonmises(D_arr_like_0p5, 0.5) +random_st.vonmises(0.5, D_arr_like_0p5) +random_st.vonmises(D_arr_0p5, D_arr_0p5) +random_st.vonmises(D_arr_like_0p5, D_arr_like_0p5) +random_st.vonmises(D_arr_0p5, D_arr_0p5, size=1) +random_st.vonmises(D_arr_like_0p5, D_arr_like_0p5, size=1) + +random_st.wald(0.5, 0.5) +random_st.wald(0.5, 0.5, size=None) +random_st.wald(0.5, 0.5, size=1) +random_st.wald(D_arr_0p5, 0.5) +random_st.wald(0.5, D_arr_0p5) +random_st.wald(D_arr_0p5, 0.5, size=1) +random_st.wald(0.5, D_arr_0p5, size=1) +random_st.wald(D_arr_like_0p5, 0.5) +random_st.wald(0.5, D_arr_like_0p5) +random_st.wald(D_arr_0p5, D_arr_0p5) +random_st.wald(D_arr_like_0p5, D_arr_like_0p5) +random_st.wald(D_arr_0p5, D_arr_0p5, size=1) +random_st.wald(D_arr_like_0p5, D_arr_like_0p5, size=1) + +random_st.uniform(0.5, 0.5) +random_st.uniform(0.5, 0.5, size=None) +random_st.uniform(0.5, 0.5, size=1) +random_st.uniform(D_arr_0p5, 0.5) +random_st.uniform(0.5, D_arr_0p5) +random_st.uniform(D_arr_0p5, 0.5, size=1) +random_st.uniform(0.5, D_arr_0p5, size=1) +random_st.uniform(D_arr_like_0p5, 0.5) +random_st.uniform(0.5, D_arr_like_0p5) +random_st.uniform(D_arr_0p5, D_arr_0p5) +random_st.uniform(D_arr_like_0p5, D_arr_like_0p5) +random_st.uniform(D_arr_0p5, D_arr_0p5, size=1) +random_st.uniform(D_arr_like_0p5, D_arr_like_0p5, size=1) + +random_st.beta(0.5, 0.5) +random_st.beta(0.5, 0.5, size=None) +random_st.beta(0.5, 0.5, size=1) +random_st.beta(D_arr_0p5, 0.5) +random_st.beta(0.5, D_arr_0p5) +random_st.beta(D_arr_0p5, 0.5, size=1) +random_st.beta(0.5, D_arr_0p5, size=1) +random_st.beta(D_arr_like_0p5, 0.5) +random_st.beta(0.5, D_arr_like_0p5) +random_st.beta(D_arr_0p5, D_arr_0p5) +random_st.beta(D_arr_like_0p5, D_arr_like_0p5) +random_st.beta(D_arr_0p5, D_arr_0p5, size=1) +random_st.beta(D_arr_like_0p5, D_arr_like_0p5, size=1) + +random_st.f(0.5, 0.5) +random_st.f(0.5, 0.5, size=None) +random_st.f(0.5, 0.5, size=1) +random_st.f(D_arr_0p5, 0.5) +random_st.f(0.5, D_arr_0p5) +random_st.f(D_arr_0p5, 0.5, size=1) +random_st.f(0.5, D_arr_0p5, size=1) +random_st.f(D_arr_like_0p5, 0.5) +random_st.f(0.5, D_arr_like_0p5) +random_st.f(D_arr_0p5, D_arr_0p5) +random_st.f(D_arr_like_0p5, D_arr_like_0p5) +random_st.f(D_arr_0p5, D_arr_0p5, size=1) +random_st.f(D_arr_like_0p5, D_arr_like_0p5, size=1) + +random_st.gamma(0.5, 0.5) +random_st.gamma(0.5, 0.5, size=None) +random_st.gamma(0.5, 0.5, size=1) +random_st.gamma(D_arr_0p5, 0.5) +random_st.gamma(0.5, D_arr_0p5) +random_st.gamma(D_arr_0p5, 0.5, size=1) +random_st.gamma(0.5, D_arr_0p5, size=1) +random_st.gamma(D_arr_like_0p5, 0.5) +random_st.gamma(0.5, D_arr_like_0p5) +random_st.gamma(D_arr_0p5, D_arr_0p5) +random_st.gamma(D_arr_like_0p5, D_arr_like_0p5) +random_st.gamma(D_arr_0p5, D_arr_0p5, size=1) +random_st.gamma(D_arr_like_0p5, D_arr_like_0p5, size=1) + +random_st.gumbel(0.5, 0.5) +random_st.gumbel(0.5, 0.5, size=None) +random_st.gumbel(0.5, 0.5, size=1) +random_st.gumbel(D_arr_0p5, 0.5) +random_st.gumbel(0.5, D_arr_0p5) +random_st.gumbel(D_arr_0p5, 0.5, size=1) +random_st.gumbel(0.5, D_arr_0p5, size=1) +random_st.gumbel(D_arr_like_0p5, 0.5) +random_st.gumbel(0.5, D_arr_like_0p5) +random_st.gumbel(D_arr_0p5, D_arr_0p5) +random_st.gumbel(D_arr_like_0p5, D_arr_like_0p5) +random_st.gumbel(D_arr_0p5, D_arr_0p5, size=1) +random_st.gumbel(D_arr_like_0p5, D_arr_like_0p5, size=1) + +random_st.laplace(0.5, 0.5) +random_st.laplace(0.5, 0.5, size=None) +random_st.laplace(0.5, 0.5, size=1) +random_st.laplace(D_arr_0p5, 0.5) +random_st.laplace(0.5, D_arr_0p5) +random_st.laplace(D_arr_0p5, 0.5, size=1) +random_st.laplace(0.5, D_arr_0p5, size=1) +random_st.laplace(D_arr_like_0p5, 0.5) +random_st.laplace(0.5, D_arr_like_0p5) +random_st.laplace(D_arr_0p5, D_arr_0p5) +random_st.laplace(D_arr_like_0p5, D_arr_like_0p5) +random_st.laplace(D_arr_0p5, D_arr_0p5, size=1) +random_st.laplace(D_arr_like_0p5, D_arr_like_0p5, size=1) + +random_st.logistic(0.5, 0.5) +random_st.logistic(0.5, 0.5, size=None) +random_st.logistic(0.5, 0.5, size=1) +random_st.logistic(D_arr_0p5, 0.5) +random_st.logistic(0.5, D_arr_0p5) +random_st.logistic(D_arr_0p5, 0.5, size=1) +random_st.logistic(0.5, D_arr_0p5, size=1) +random_st.logistic(D_arr_like_0p5, 0.5) +random_st.logistic(0.5, D_arr_like_0p5) +random_st.logistic(D_arr_0p5, D_arr_0p5) +random_st.logistic(D_arr_like_0p5, D_arr_like_0p5) +random_st.logistic(D_arr_0p5, D_arr_0p5, size=1) +random_st.logistic(D_arr_like_0p5, D_arr_like_0p5, size=1) + +random_st.lognormal(0.5, 0.5) +random_st.lognormal(0.5, 0.5, size=None) +random_st.lognormal(0.5, 0.5, size=1) +random_st.lognormal(D_arr_0p5, 0.5) +random_st.lognormal(0.5, D_arr_0p5) +random_st.lognormal(D_arr_0p5, 0.5, size=1) +random_st.lognormal(0.5, D_arr_0p5, size=1) +random_st.lognormal(D_arr_like_0p5, 0.5) +random_st.lognormal(0.5, D_arr_like_0p5) +random_st.lognormal(D_arr_0p5, D_arr_0p5) +random_st.lognormal(D_arr_like_0p5, D_arr_like_0p5) +random_st.lognormal(D_arr_0p5, D_arr_0p5, size=1) +random_st.lognormal(D_arr_like_0p5, D_arr_like_0p5, size=1) + +random_st.noncentral_chisquare(0.5, 0.5) +random_st.noncentral_chisquare(0.5, 0.5, size=None) +random_st.noncentral_chisquare(0.5, 0.5, size=1) +random_st.noncentral_chisquare(D_arr_0p5, 0.5) +random_st.noncentral_chisquare(0.5, D_arr_0p5) +random_st.noncentral_chisquare(D_arr_0p5, 0.5, size=1) +random_st.noncentral_chisquare(0.5, D_arr_0p5, size=1) +random_st.noncentral_chisquare(D_arr_like_0p5, 0.5) +random_st.noncentral_chisquare(0.5, D_arr_like_0p5) +random_st.noncentral_chisquare(D_arr_0p5, D_arr_0p5) +random_st.noncentral_chisquare(D_arr_like_0p5, D_arr_like_0p5) +random_st.noncentral_chisquare(D_arr_0p5, D_arr_0p5, size=1) +random_st.noncentral_chisquare(D_arr_like_0p5, D_arr_like_0p5, size=1) + +random_st.normal(0.5, 0.5) +random_st.normal(0.5, 0.5, size=None) +random_st.normal(0.5, 0.5, size=1) +random_st.normal(D_arr_0p5, 0.5) +random_st.normal(0.5, D_arr_0p5) +random_st.normal(D_arr_0p5, 0.5, size=1) +random_st.normal(0.5, D_arr_0p5, size=1) +random_st.normal(D_arr_like_0p5, 0.5) +random_st.normal(0.5, D_arr_like_0p5) +random_st.normal(D_arr_0p5, D_arr_0p5) +random_st.normal(D_arr_like_0p5, D_arr_like_0p5) +random_st.normal(D_arr_0p5, D_arr_0p5, size=1) +random_st.normal(D_arr_like_0p5, D_arr_like_0p5, size=1) + +random_st.triangular(0.1, 0.5, 0.9) +random_st.triangular(0.1, 0.5, 0.9, size=None) +random_st.triangular(0.1, 0.5, 0.9, size=1) +random_st.triangular(D_arr_0p1, 0.5, 0.9) +random_st.triangular(0.1, D_arr_0p5, 0.9) +random_st.triangular(D_arr_0p1, 0.5, D_arr_like_0p9, size=1) +random_st.triangular(0.1, D_arr_0p5, 0.9, size=1) +random_st.triangular(D_arr_like_0p1, 0.5, D_arr_0p9) +random_st.triangular(0.5, D_arr_like_0p5, 0.9) +random_st.triangular(D_arr_0p1, D_arr_0p5, 0.9) +random_st.triangular(D_arr_like_0p1, D_arr_like_0p5, 0.9) +random_st.triangular(D_arr_0p1, D_arr_0p5, D_arr_0p9, size=1) +random_st.triangular(D_arr_like_0p1, D_arr_like_0p5, D_arr_like_0p9, size=1) + +random_st.noncentral_f(0.1, 0.5, 0.9) +random_st.noncentral_f(0.1, 0.5, 0.9, size=None) +random_st.noncentral_f(0.1, 0.5, 0.9, size=1) +random_st.noncentral_f(D_arr_0p1, 0.5, 0.9) +random_st.noncentral_f(0.1, D_arr_0p5, 0.9) +random_st.noncentral_f(D_arr_0p1, 0.5, D_arr_like_0p9, size=1) +random_st.noncentral_f(0.1, D_arr_0p5, 0.9, size=1) +random_st.noncentral_f(D_arr_like_0p1, 0.5, D_arr_0p9) +random_st.noncentral_f(0.5, D_arr_like_0p5, 0.9) +random_st.noncentral_f(D_arr_0p1, D_arr_0p5, 0.9) +random_st.noncentral_f(D_arr_like_0p1, D_arr_like_0p5, 0.9) +random_st.noncentral_f(D_arr_0p1, D_arr_0p5, D_arr_0p9, size=1) +random_st.noncentral_f(D_arr_like_0p1, D_arr_like_0p5, D_arr_like_0p9, size=1) + +random_st.binomial(10, 0.5) +random_st.binomial(10, 0.5, size=None) +random_st.binomial(10, 0.5, size=1) +random_st.binomial(I_arr_10, 0.5) +random_st.binomial(10, D_arr_0p5) +random_st.binomial(I_arr_10, 0.5, size=1) +random_st.binomial(10, D_arr_0p5, size=1) +random_st.binomial(I_arr_like_10, 0.5) +random_st.binomial(10, D_arr_like_0p5) +random_st.binomial(I_arr_10, D_arr_0p5) +random_st.binomial(I_arr_like_10, D_arr_like_0p5) +random_st.binomial(I_arr_10, D_arr_0p5, size=1) +random_st.binomial(I_arr_like_10, D_arr_like_0p5, size=1) + +random_st.negative_binomial(10, 0.5) +random_st.negative_binomial(10, 0.5, size=None) +random_st.negative_binomial(10, 0.5, size=1) +random_st.negative_binomial(I_arr_10, 0.5) +random_st.negative_binomial(10, D_arr_0p5) +random_st.negative_binomial(I_arr_10, 0.5, size=1) +random_st.negative_binomial(10, D_arr_0p5, size=1) +random_st.negative_binomial(I_arr_like_10, 0.5) +random_st.negative_binomial(10, D_arr_like_0p5) +random_st.negative_binomial(I_arr_10, D_arr_0p5) +random_st.negative_binomial(I_arr_like_10, D_arr_like_0p5) +random_st.negative_binomial(I_arr_10, D_arr_0p5, size=1) +random_st.negative_binomial(I_arr_like_10, D_arr_like_0p5, size=1) + +random_st.hypergeometric(20, 20, 10) +random_st.hypergeometric(20, 20, 10, size=None) +random_st.hypergeometric(20, 20, 10, size=1) +random_st.hypergeometric(I_arr_20, 20, 10) +random_st.hypergeometric(20, I_arr_20, 10) +random_st.hypergeometric(I_arr_20, 20, I_arr_like_10, size=1) +random_st.hypergeometric(20, I_arr_20, 10, size=1) +random_st.hypergeometric(I_arr_like_20, 20, I_arr_10) +random_st.hypergeometric(20, I_arr_like_20, 10) +random_st.hypergeometric(I_arr_20, I_arr_20, 10) +random_st.hypergeometric(I_arr_like_20, I_arr_like_20, 10) +random_st.hypergeometric(I_arr_20, I_arr_20, I_arr_10, size=1) +random_st.hypergeometric(I_arr_like_20, I_arr_like_20, I_arr_like_10, size=1) + +random_st.randint(0, 100) +random_st.randint(100) +random_st.randint([100]) +random_st.randint(0, [100]) + +random_st.randint(2, dtype=bool) +random_st.randint(0, 2, dtype=bool) +random_st.randint(I_bool_high_open, dtype=bool) +random_st.randint(I_bool_low, I_bool_high_open, dtype=bool) +random_st.randint(0, I_bool_high_open, dtype=bool) + +random_st.randint(2, dtype=np.bool) +random_st.randint(0, 2, dtype=np.bool) +random_st.randint(I_bool_high_open, dtype=np.bool) +random_st.randint(I_bool_low, I_bool_high_open, dtype=np.bool) +random_st.randint(0, I_bool_high_open, dtype=np.bool) + +random_st.randint(256, dtype="u1") +random_st.randint(0, 256, dtype="u1") +random_st.randint(I_u1_high_open, dtype="u1") +random_st.randint(I_u1_low, I_u1_high_open, dtype="u1") +random_st.randint(0, I_u1_high_open, dtype="u1") + +random_st.randint(256, dtype="uint8") +random_st.randint(0, 256, dtype="uint8") +random_st.randint(I_u1_high_open, dtype="uint8") +random_st.randint(I_u1_low, I_u1_high_open, dtype="uint8") +random_st.randint(0, I_u1_high_open, dtype="uint8") + +random_st.randint(256, dtype=np.uint8) +random_st.randint(0, 256, dtype=np.uint8) +random_st.randint(I_u1_high_open, dtype=np.uint8) +random_st.randint(I_u1_low, I_u1_high_open, dtype=np.uint8) +random_st.randint(0, I_u1_high_open, dtype=np.uint8) + +random_st.randint(65536, dtype="u2") +random_st.randint(0, 65536, dtype="u2") +random_st.randint(I_u2_high_open, dtype="u2") +random_st.randint(I_u2_low, I_u2_high_open, dtype="u2") +random_st.randint(0, I_u2_high_open, dtype="u2") + +random_st.randint(65536, dtype="uint16") +random_st.randint(0, 65536, dtype="uint16") +random_st.randint(I_u2_high_open, dtype="uint16") +random_st.randint(I_u2_low, I_u2_high_open, dtype="uint16") +random_st.randint(0, I_u2_high_open, dtype="uint16") + +random_st.randint(65536, dtype=np.uint16) +random_st.randint(0, 65536, dtype=np.uint16) +random_st.randint(I_u2_high_open, dtype=np.uint16) +random_st.randint(I_u2_low, I_u2_high_open, dtype=np.uint16) +random_st.randint(0, I_u2_high_open, dtype=np.uint16) + +random_st.randint(4294967296, dtype="u4") +random_st.randint(0, 4294967296, dtype="u4") +random_st.randint(I_u4_high_open, dtype="u4") +random_st.randint(I_u4_low, I_u4_high_open, dtype="u4") +random_st.randint(0, I_u4_high_open, dtype="u4") + +random_st.randint(4294967296, dtype="uint32") +random_st.randint(0, 4294967296, dtype="uint32") +random_st.randint(I_u4_high_open, dtype="uint32") +random_st.randint(I_u4_low, I_u4_high_open, dtype="uint32") +random_st.randint(0, I_u4_high_open, dtype="uint32") + +random_st.randint(4294967296, dtype=np.uint32) +random_st.randint(0, 4294967296, dtype=np.uint32) +random_st.randint(I_u4_high_open, dtype=np.uint32) +random_st.randint(I_u4_low, I_u4_high_open, dtype=np.uint32) +random_st.randint(0, I_u4_high_open, dtype=np.uint32) + + +random_st.randint(18446744073709551616, dtype="u8") +random_st.randint(0, 18446744073709551616, dtype="u8") +random_st.randint(I_u8_high_open, dtype="u8") +random_st.randint(I_u8_low, I_u8_high_open, dtype="u8") +random_st.randint(0, I_u8_high_open, dtype="u8") + +random_st.randint(18446744073709551616, dtype="uint64") +random_st.randint(0, 18446744073709551616, dtype="uint64") +random_st.randint(I_u8_high_open, dtype="uint64") +random_st.randint(I_u8_low, I_u8_high_open, dtype="uint64") +random_st.randint(0, I_u8_high_open, dtype="uint64") + +random_st.randint(18446744073709551616, dtype=np.uint64) +random_st.randint(0, 18446744073709551616, dtype=np.uint64) +random_st.randint(I_u8_high_open, dtype=np.uint64) +random_st.randint(I_u8_low, I_u8_high_open, dtype=np.uint64) +random_st.randint(0, I_u8_high_open, dtype=np.uint64) + +random_st.randint(128, dtype="i1") +random_st.randint(-128, 128, dtype="i1") +random_st.randint(I_i1_high_open, dtype="i1") +random_st.randint(I_i1_low, I_i1_high_open, dtype="i1") +random_st.randint(-128, I_i1_high_open, dtype="i1") + +random_st.randint(128, dtype="int8") +random_st.randint(-128, 128, dtype="int8") +random_st.randint(I_i1_high_open, dtype="int8") +random_st.randint(I_i1_low, I_i1_high_open, dtype="int8") +random_st.randint(-128, I_i1_high_open, dtype="int8") + +random_st.randint(128, dtype=np.int8) +random_st.randint(-128, 128, dtype=np.int8) +random_st.randint(I_i1_high_open, dtype=np.int8) +random_st.randint(I_i1_low, I_i1_high_open, dtype=np.int8) +random_st.randint(-128, I_i1_high_open, dtype=np.int8) + +random_st.randint(32768, dtype="i2") +random_st.randint(-32768, 32768, dtype="i2") +random_st.randint(I_i2_high_open, dtype="i2") +random_st.randint(I_i2_low, I_i2_high_open, dtype="i2") +random_st.randint(-32768, I_i2_high_open, dtype="i2") +random_st.randint(32768, dtype="int16") +random_st.randint(-32768, 32768, dtype="int16") +random_st.randint(I_i2_high_open, dtype="int16") +random_st.randint(I_i2_low, I_i2_high_open, dtype="int16") +random_st.randint(-32768, I_i2_high_open, dtype="int16") +random_st.randint(32768, dtype=np.int16) +random_st.randint(-32768, 32768, dtype=np.int16) +random_st.randint(I_i2_high_open, dtype=np.int16) +random_st.randint(I_i2_low, I_i2_high_open, dtype=np.int16) +random_st.randint(-32768, I_i2_high_open, dtype=np.int16) + +random_st.randint(2147483648, dtype="i4") +random_st.randint(-2147483648, 2147483648, dtype="i4") +random_st.randint(I_i4_high_open, dtype="i4") +random_st.randint(I_i4_low, I_i4_high_open, dtype="i4") +random_st.randint(-2147483648, I_i4_high_open, dtype="i4") + +random_st.randint(2147483648, dtype="int32") +random_st.randint(-2147483648, 2147483648, dtype="int32") +random_st.randint(I_i4_high_open, dtype="int32") +random_st.randint(I_i4_low, I_i4_high_open, dtype="int32") +random_st.randint(-2147483648, I_i4_high_open, dtype="int32") + +random_st.randint(2147483648, dtype=np.int32) +random_st.randint(-2147483648, 2147483648, dtype=np.int32) +random_st.randint(I_i4_high_open, dtype=np.int32) +random_st.randint(I_i4_low, I_i4_high_open, dtype=np.int32) +random_st.randint(-2147483648, I_i4_high_open, dtype=np.int32) + +random_st.randint(9223372036854775808, dtype="i8") +random_st.randint(-9223372036854775808, 9223372036854775808, dtype="i8") +random_st.randint(I_i8_high_open, dtype="i8") +random_st.randint(I_i8_low, I_i8_high_open, dtype="i8") +random_st.randint(-9223372036854775808, I_i8_high_open, dtype="i8") + +random_st.randint(9223372036854775808, dtype="int64") +random_st.randint(-9223372036854775808, 9223372036854775808, dtype="int64") +random_st.randint(I_i8_high_open, dtype="int64") +random_st.randint(I_i8_low, I_i8_high_open, dtype="int64") +random_st.randint(-9223372036854775808, I_i8_high_open, dtype="int64") + +random_st.randint(9223372036854775808, dtype=np.int64) +random_st.randint(-9223372036854775808, 9223372036854775808, dtype=np.int64) +random_st.randint(I_i8_high_open, dtype=np.int64) +random_st.randint(I_i8_low, I_i8_high_open, dtype=np.int64) +random_st.randint(-9223372036854775808, I_i8_high_open, dtype=np.int64) + +bg: np.random.BitGenerator = random_st._bit_generator + +random_st.bytes(2) + +random_st.choice(5) +random_st.choice(5, 3) +random_st.choice(5, 3, replace=True) +random_st.choice(5, 3, p=[1 / 5] * 5) +random_st.choice(5, 3, p=[1 / 5] * 5, replace=False) + +random_st.choice(["pooh", "rabbit", "piglet", "Christopher"]) +random_st.choice(["pooh", "rabbit", "piglet", "Christopher"], 3) +random_st.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, p=[1 / 4] * 4) +random_st.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, replace=True) +random_st.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, replace=False, p=np.array([1 / 8, 1 / 8, 1 / 2, 1 / 4])) + +random_st.dirichlet([0.5, 0.5]) +random_st.dirichlet(np.array([0.5, 0.5])) +random_st.dirichlet(np.array([0.5, 0.5]), size=3) + +random_st.multinomial(20, [1 / 6.0] * 6) +random_st.multinomial(20, np.array([0.5, 0.5])) +random_st.multinomial(20, [1 / 6.0] * 6, size=2) + +random_st.multivariate_normal([0.0], [[1.0]]) +random_st.multivariate_normal([0.0], np.array([[1.0]])) +random_st.multivariate_normal(np.array([0.0]), [[1.0]]) +random_st.multivariate_normal([0.0], np.array([[1.0]])) + +random_st.permutation(10) +random_st.permutation([1, 2, 3, 4]) +random_st.permutation(np.array([1, 2, 3, 4])) +random_st.permutation(D_2D) + +random_st.shuffle(np.arange(10)) +random_st.shuffle([1, 2, 3, 4, 5]) +random_st.shuffle(D_2D) + +np.random.RandomState(SEED_PCG64) +np.random.RandomState(0) +np.random.RandomState([0, 1, 2]) +random_st.__str__() +random_st.__repr__() +random_st_state = random_st.__getstate__() +random_st.__setstate__(random_st_state) +random_st.seed() +random_st.seed(1) +random_st.seed([0, 1]) +random_st_get_state = random_st.get_state() +random_st_get_state_legacy = random_st.get_state(legacy=True) +random_st.set_state(random_st_get_state) + +random_st.rand() +random_st.rand(1) +random_st.rand(1, 2) +random_st.randn() +random_st.randn(1) +random_st.randn(1, 2) +random_st.random_sample() +random_st.random_sample(1) +random_st.random_sample(size=(1, 2)) + +random_st.tomaxint() +random_st.tomaxint(1) +random_st.tomaxint((1,)) + +np.random.mtrand.set_bit_generator(SEED_PCG64) +np.random.mtrand.get_bit_generator() diff --git a/local_packages/numpy/typing/tests/data/pass/recfunctions.py b/local_packages/numpy/typing/tests/data/pass/recfunctions.py new file mode 100644 index 0000000..586f050 --- /dev/null +++ b/local_packages/numpy/typing/tests/data/pass/recfunctions.py @@ -0,0 +1,164 @@ +"""These tests are based on the doctests from `numpy/lib/recfunctions.py`.""" + +from typing import Any, assert_type + +import numpy as np +import numpy.typing as npt +from numpy.lib import recfunctions as rfn + + +def test_recursive_fill_fields() -> None: + a: npt.NDArray[np.void] = np.array( + [(1, 10.0), (2, 20.0)], + dtype=[("A", np.int64), ("B", np.float64)], + ) + b = np.zeros((3,), dtype=a.dtype) + out = rfn.recursive_fill_fields(a, b) + assert_type(out, np.ndarray[tuple[int], np.dtype[np.void]]) + + +def test_get_names() -> None: + names: tuple[str | Any, ...] + names = rfn.get_names(np.empty((1,), dtype=[("A", int)]).dtype) + names = rfn.get_names(np.empty((1,), dtype=[("A", int), ("B", float)]).dtype) + + adtype = np.dtype([("a", int), ("b", [("b_a", int), ("b_b", int)])]) + names = rfn.get_names(adtype) + + +def test_get_names_flat() -> None: + names: tuple[str, ...] + names = rfn.get_names_flat(np.empty((1,), dtype=[("A", int)]).dtype) + names = rfn.get_names_flat(np.empty((1,), dtype=[("A", int), ("B", float)]).dtype) + + adtype = np.dtype([("a", int), ("b", [("b_a", int), ("b_b", int)])]) + names = rfn.get_names_flat(adtype) + + +def test_flatten_descr() -> None: + ndtype = np.dtype([("a", " None: + ndtype = np.dtype([ + ("A", int), + ("B", [("B_A", int), ("B_B", [("B_B_A", int), ("B_B_B", int)])]), + ]) + assert_type(rfn.get_fieldstructure(ndtype), dict[str, list[str]]) + + +def test_merge_arrays() -> None: + assert_type( + rfn.merge_arrays(( + np.ones((2,), np.int_), + np.ones((3,), np.float64), + )), + np.recarray[tuple[int], np.dtype[np.void]], + ) + + +def test_drop_fields() -> None: + ndtype = [("a", np.int64), ("b", [("b_a", np.double), ("b_b", np.int64)])] + a = np.ones((3,), dtype=ndtype) + + assert_type( + rfn.drop_fields(a, "a"), + np.ndarray[tuple[int], np.dtype[np.void]], + ) + assert_type( + rfn.drop_fields(a, "a", asrecarray=True), + np.rec.recarray[tuple[int], np.dtype[np.void]], + ) + assert_type( + rfn.rec_drop_fields(a, "a"), + np.rec.recarray[tuple[int], np.dtype[np.void]], + ) + + +def test_rename_fields() -> None: + ndtype = [("a", np.int64), ("b", [("b_a", np.double), ("b_b", np.int64)])] + a = np.ones((3,), dtype=ndtype) + + assert_type( + rfn.rename_fields(a, {"a": "A", "b_b": "B_B"}), + np.ndarray[tuple[int], np.dtype[np.void]], + ) + + +def test_repack_fields() -> None: + dt: np.dtype[np.void] = np.dtype("u1, None: + a = np.zeros(4, dtype=[("a", "i4"), ("b", "f4,u2"), ("c", "f4", 2)]) + assert_type(rfn.structured_to_unstructured(a), npt.NDArray[Any]) + + +def unstructured_to_structured() -> None: + dt: np.dtype[np.void] = np.dtype([("a", "i4"), ("b", "f4,u2"), ("c", "f4", 2)]) + a = np.arange(20, dtype=np.int32).reshape((4, 5)) + assert_type(rfn.unstructured_to_structured(a, dt), npt.NDArray[np.void]) + + +def test_apply_along_fields() -> None: + b = np.ones(4, dtype=[("x", "i4"), ("y", "f4"), ("z", "f8")]) + assert_type( + rfn.apply_along_fields(np.mean, b), + np.ndarray[tuple[int], np.dtype[np.void]], + ) + + +def test_assign_fields_by_name() -> None: + b = np.ones(4, dtype=[("x", "i4"), ("y", "f4"), ("z", "f8")]) + assert_type( + rfn.apply_along_fields(np.mean, b), + np.ndarray[tuple[int], np.dtype[np.void]], + ) + + +def test_require_fields() -> None: + a = np.ones(4, dtype=[("a", "i4"), ("b", "f8"), ("c", "u1")]) + assert_type( + rfn.require_fields(a, [("b", "f4"), ("c", "u1")]), + np.ndarray[tuple[int], np.dtype[np.void]], + ) + + +def test_stack_arrays() -> None: + x = np.zeros((2,), np.int32) + assert_type( + rfn.stack_arrays(x), + np.ndarray[tuple[int], np.dtype[np.int32]], + ) + + z = np.ones((2,), [("A", "|S3"), ("B", float)]) + zz = np.ones((2,), [("A", "|S3"), ("B", np.float64), ("C", np.float64)]) + assert_type( + rfn.stack_arrays((z, zz)), + np.ma.MaskedArray[tuple[Any, ...], np.dtype[np.void]], + ) + + +def test_find_duplicates() -> None: + ndtype = np.dtype([("a", int)]) + + a = np.ma.ones(7).view(ndtype) + assert_type( + rfn.find_duplicates(a), + np.ma.MaskedArray[tuple[int], np.dtype[np.void]], + ) + assert_type( + rfn.find_duplicates(a, ignoremask=True, return_index=True), + tuple[ + np.ma.MaskedArray[tuple[int], np.dtype[np.void]], + np.ndarray[tuple[int], np.dtype[np.int_]], + ], + ) diff --git a/local_packages/numpy/typing/tests/data/pass/scalars.py b/local_packages/numpy/typing/tests/data/pass/scalars.py new file mode 100644 index 0000000..eeb707b --- /dev/null +++ b/local_packages/numpy/typing/tests/data/pass/scalars.py @@ -0,0 +1,249 @@ +import datetime as dt + +import pytest + +import numpy as np + +b = np.bool() +b_ = np.bool_() +u8 = np.uint64() +i8 = np.int64() +f8 = np.float64() +c16 = np.complex128() +U = np.str_() +S = np.bytes_() + + +# Construction +class D: + def __index__(self) -> int: + return 0 + + +class C: + def __complex__(self) -> complex: + return 3j + + +class B: + def __int__(self) -> int: + return 4 + + +class A: + def __float__(self) -> float: + return 4.0 + + +np.complex64(3j) +np.complex64(A()) +np.complex64(C()) +np.complex128(3j) +np.complex128(C()) +np.complex128(None) +np.complex64("1.2") +np.complex128(b"2j") + +np.int8(4) +np.int16(3.4) +np.int32(4) +np.int64(-1) +np.uint8(B()) +np.uint32() +np.int32("1") +np.int64(b"2") + +np.float16(A()) +np.float32(16) +np.float64(3.0) +np.float64(None) +np.float32("1") +np.float16(b"2.5") + +np.uint64(D()) +np.float32(D()) +np.complex64(D()) + +np.bytes_(b"hello") +np.bytes_("hello", 'utf-8') +np.bytes_("hello", encoding='utf-8') +np.str_("hello") +np.str_(b"hello", 'utf-8') +np.str_(b"hello", encoding='utf-8') + +# Array-ish semantics +np.int8().real +np.int16().imag +np.int32().data +np.int64().flags + +np.uint8().itemsize * 2 +np.uint16().ndim + 1 +np.uint32().strides +np.uint64().shape + +# Time structures +np.datetime64() +np.datetime64(0, "D") +np.datetime64(0, b"D") +np.datetime64(0, ('ms', 3)) +np.datetime64("2019") +np.datetime64(b"2019") +np.datetime64("2019", "D") +np.datetime64("2019", "us") +np.datetime64("2019", "as") +np.datetime64(np.datetime64()) +np.datetime64(np.datetime64()) +np.datetime64(dt.datetime(2000, 5, 3)) +np.datetime64(dt.datetime(2000, 5, 3), "D") +np.datetime64(dt.datetime(2000, 5, 3), "us") +np.datetime64(dt.datetime(2000, 5, 3), "as") +np.datetime64(dt.date(2000, 5, 3)) +np.datetime64(dt.date(2000, 5, 3), "D") +np.datetime64(dt.date(2000, 5, 3), "us") +np.datetime64(dt.date(2000, 5, 3), "as") +np.datetime64(None) +np.datetime64(None, "D") + +np.timedelta64() +np.timedelta64(0) +np.timedelta64(0, "D") +np.timedelta64(0, ('ms', 3)) +np.timedelta64(0, b"D") +np.timedelta64("3") +np.timedelta64(b"5") +np.timedelta64(np.timedelta64(2)) +np.timedelta64(dt.timedelta(2)) +np.timedelta64(None) +np.timedelta64(None, "D") + +np.void(1) +np.void(np.int64(1)) +np.void(True) +np.void(np.bool(True)) +np.void(b"test") +np.void(np.bytes_("test")) +np.void(object(), [("a", "O"), ("b", "O")]) +np.void(object(), dtype=[("a", "O"), ("b", "O")]) + +# Protocols +i8 = np.int64() +u8 = np.uint64() +f8 = np.float64() +c16 = np.complex128() +b = np.bool() +td = np.timedelta64() +U = np.str_("1") +S = np.bytes_("1") +AR = np.array(1, dtype=np.float64) + +int(i8) +int(u8) +int(f8) +int(b) +int(td) +int(U) +int(S) +int(AR) +with pytest.warns(np.exceptions.ComplexWarning): + int(c16) + +float(i8) +float(u8) +float(f8) +float(b_) +float(td) +float(U) +float(S) +float(AR) +with pytest.warns(np.exceptions.ComplexWarning): + float(c16) + +complex(i8) +complex(u8) +complex(f8) +complex(c16) +complex(b_) +complex(td) +complex(U) +complex(AR) + + +# Misc +c16.dtype +c16.real +c16.imag +c16.real.real +c16.real.imag +c16.ndim +c16.size +c16.itemsize +c16.shape +c16.strides +c16.squeeze() +c16.byteswap() +c16.transpose() + +# Aliases +np.byte() +np.short() +np.intc() +np.intp() +np.int_() +np.longlong() + +np.ubyte() +np.ushort() +np.uintc() +np.uintp() +np.uint() +np.ulonglong() + +np.half() +np.single() +np.double() +np.longdouble() + +np.csingle() +np.cdouble() +np.clongdouble() + +b.item() +i8.item() +u8.item() +f8.item() +c16.item() +U.item() +S.item() + +b.tolist() +i8.tolist() +u8.tolist() +f8.tolist() +c16.tolist() +U.tolist() +S.tolist() + +b.ravel() +i8.ravel() +u8.ravel() +f8.ravel() +c16.ravel() +U.ravel() +S.ravel() + +b.flatten() +i8.flatten() +u8.flatten() +f8.flatten() +c16.flatten() +U.flatten() +S.flatten() + +b.reshape(1) +i8.reshape(1) +u8.reshape(1) +f8.reshape(1) +c16.reshape(1) +U.reshape(1) +S.reshape(1) diff --git a/local_packages/numpy/typing/tests/data/pass/shape.py b/local_packages/numpy/typing/tests/data/pass/shape.py new file mode 100644 index 0000000..e3b497b --- /dev/null +++ b/local_packages/numpy/typing/tests/data/pass/shape.py @@ -0,0 +1,19 @@ +from typing import Any, NamedTuple + +import numpy as np + + +# Subtype of tuple[int, int] +class XYGrid(NamedTuple): + x_axis: int + y_axis: int + +# Test variance of _ShapeT_co +def accepts_2d(a: np.ndarray[tuple[int, int], Any]) -> None: + return None + + +accepts_2d(np.empty(XYGrid(2, 2))) +accepts_2d(np.zeros(XYGrid(2, 2), dtype=int)) +accepts_2d(np.ones(XYGrid(2, 2), dtype=int)) +accepts_2d(np.full(XYGrid(2, 2), fill_value=5, dtype=int)) diff --git a/local_packages/numpy/typing/tests/data/pass/simple.py b/local_packages/numpy/typing/tests/data/pass/simple.py new file mode 100644 index 0000000..003e9ee --- /dev/null +++ b/local_packages/numpy/typing/tests/data/pass/simple.py @@ -0,0 +1,170 @@ +"""Simple expression that should pass with mypy.""" +import operator +from collections.abc import Iterable + +import numpy as np +import numpy.typing as npt + +# Basic checks +array = np.array([1, 2]) + + +def ndarray_func(x: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]: + return x + + +ndarray_func(np.array([1, 2], dtype=np.float64)) +array == 1 +array.dtype == float + +# Dtype construction +np.dtype(float) +np.dtype(np.float64) +np.dtype(None) +np.dtype("float64") +np.dtype(np.dtype(float)) +np.dtype(("U", 10)) +np.dtype((np.int32, (2, 2))) +# Define the arguments on the previous line to prevent bidirectional +# type inference in mypy from broadening the types. +two_tuples_dtype = [("R", "u1"), ("G", "u1"), ("B", "u1")] +np.dtype(two_tuples_dtype) + +three_tuples_dtype = [("R", "u1", 2)] +np.dtype(three_tuples_dtype) + +mixed_tuples_dtype = [("R", "u1"), ("G", np.str_, 1)] +np.dtype(mixed_tuples_dtype) + +shape_tuple_dtype = [("R", "u1", (2, 2))] +np.dtype(shape_tuple_dtype) + +shape_like_dtype = [("R", "u1", (2, 2)), ("G", np.str_, 1)] +np.dtype(shape_like_dtype) + +object_dtype = [("field1", object)] +np.dtype(object_dtype) + +np.dtype((np.int32, (np.int8, 4))) + +# Dtype comparison +np.dtype(float) == float +np.dtype(float) != np.float64 +np.dtype(float) < None +np.dtype(float) <= "float64" +np.dtype(float) > np.dtype(float) +np.dtype(float) >= np.dtype(("U", 10)) + +# Iteration and indexing +def iterable_func(x: Iterable[object]) -> Iterable[object]: + return x + + +iterable_func(array) +list(array) +iter(array) +zip(array, array) +array[1] +array[:] +array[...] +array[:] = 0 + +array_2d = np.ones((3, 3)) +array_2d[:2, :2] +array_2d[:2, :2] = 0 +array_2d[..., 0] +array_2d[..., 0] = 2 +array_2d[-1, -1] = None + +array_obj = np.zeros(1, dtype=np.object_) +array_obj[0] = slice(None) + +# Other special methods +len(array) +str(array) +array_scalar = np.array(1) +int(array_scalar) +float(array_scalar) +complex(array_scalar) +bytes(array_scalar) +operator.index(array_scalar) +bool(array_scalar) + +# comparisons +array < 1 +array <= 1 +array == 1 +array != 1 +array > 1 +array >= 1 +1 < array +1 <= array +1 == array +1 != array +1 > array +1 >= array + +# binary arithmetic +array + 1 +1 + array +array += 1 + +array - 1 +1 - array +array -= 1 + +array * 1 +1 * array +array *= 1 + +nonzero_array = np.array([1, 2]) +array / 1 +1 / nonzero_array +float_array = np.array([1.0, 2.0]) +float_array /= 1 + +array // 1 +1 // nonzero_array +array //= 1 + +array % 1 +1 % nonzero_array +array %= 1 + +divmod(array, 1) +divmod(1, nonzero_array) + +array ** 1 +1 ** array +array **= 1 + +array << 1 +1 << array +array <<= 1 + +array >> 1 +1 >> array +array >>= 1 + +array & 1 +1 & array +array &= 1 + +array ^ 1 +1 ^ array +array ^= 1 + +array | 1 +1 | array +array |= 1 + +# unary arithmetic +-array ++array +abs(array) +~array + +# Other methods +array.transpose() + +array @ array diff --git a/local_packages/numpy/typing/tests/data/pass/ufunc_config.py b/local_packages/numpy/typing/tests/data/pass/ufunc_config.py new file mode 100644 index 0000000..778e1b5 --- /dev/null +++ b/local_packages/numpy/typing/tests/data/pass/ufunc_config.py @@ -0,0 +1,64 @@ +"""Typing tests for `numpy._core._ufunc_config`.""" + +import numpy as np + + +def func1(a: str, b: int) -> None: + return None + + +def func2(a: str, b: int, c: float = 1.0) -> None: + return None + + +def func3(a: str, b: int) -> int: + return 0 + + +class Write1: + def write(self, a: str) -> None: + return None + + +class Write2: + def write(self, a: str, b: int = 1) -> None: + return None + + +class Write3: + def write(self, a: str) -> int: + return 0 + + +_err_default = np.geterr() +_bufsize_default = np.getbufsize() +_errcall_default = np.geterrcall() + +try: + np.seterr(all=None) + np.seterr(divide="ignore") + np.seterr(over="warn") + np.seterr(under="call") + np.seterr(invalid="raise") + np.geterr() + + np.setbufsize(4096) + np.getbufsize() + + np.seterrcall(func1) + np.seterrcall(func2) + np.seterrcall(func3) + np.seterrcall(Write1()) + np.seterrcall(Write2()) + np.seterrcall(Write3()) + np.geterrcall() + + with np.errstate(call=func1, all="call"): + pass + with np.errstate(call=Write1(), divide="log", over="log"): + pass + +finally: + np.seterr(**_err_default) + np.setbufsize(_bufsize_default) + np.seterrcall(_errcall_default) diff --git a/local_packages/numpy/typing/tests/data/pass/ufunclike.py b/local_packages/numpy/typing/tests/data/pass/ufunclike.py new file mode 100644 index 0000000..7e556d1 --- /dev/null +++ b/local_packages/numpy/typing/tests/data/pass/ufunclike.py @@ -0,0 +1,52 @@ +from __future__ import annotations + +from typing import Any + +import numpy as np + + +class Object: + def __ceil__(self) -> Object: + return self + + def __floor__(self) -> Object: + return self + + def __trunc__(self) -> Object: + return self + + def __ge__(self, value: object) -> bool: + return True + + def __array__(self, dtype: np.typing.DTypeLike | None = None, + copy: bool | None = None) -> np.ndarray[Any, np.dtype[np.object_]]: + ret = np.empty((), dtype=object) + ret[()] = self + return ret + + +AR_LIKE_b = [True, True, False] +AR_LIKE_u = [np.uint32(1), np.uint32(2), np.uint32(3)] +AR_LIKE_i = [1, 2, 3] +AR_LIKE_f = [1.0, 2.0, 3.0] +AR_LIKE_O = [Object(), Object(), Object()] +AR_U: np.ndarray[Any, np.dtype[np.str_]] = np.zeros(3, dtype="U5") + +np.fix(AR_LIKE_b) # type: ignore[deprecated] +np.fix(AR_LIKE_u) # type: ignore[deprecated] +np.fix(AR_LIKE_i) # type: ignore[deprecated] +np.fix(AR_LIKE_f) # type: ignore[deprecated] +np.fix(AR_LIKE_O) # type: ignore[deprecated] +np.fix(AR_LIKE_f, out=AR_U) # type: ignore[deprecated] + +np.isposinf(AR_LIKE_b) +np.isposinf(AR_LIKE_u) +np.isposinf(AR_LIKE_i) +np.isposinf(AR_LIKE_f) +np.isposinf(AR_LIKE_f, out=AR_U) + +np.isneginf(AR_LIKE_b) +np.isneginf(AR_LIKE_u) +np.isneginf(AR_LIKE_i) +np.isneginf(AR_LIKE_f) +np.isneginf(AR_LIKE_f, out=AR_U) diff --git a/local_packages/numpy/typing/tests/data/pass/ufuncs.py b/local_packages/numpy/typing/tests/data/pass/ufuncs.py new file mode 100644 index 0000000..dbc61bb --- /dev/null +++ b/local_packages/numpy/typing/tests/data/pass/ufuncs.py @@ -0,0 +1,16 @@ +import numpy as np + +np.sin(1) +np.sin([1, 2, 3]) +np.sin(1, out=np.empty(1)) +np.matmul(np.ones((2, 2, 2)), np.ones((2, 2, 2)), axes=[(0, 1), (0, 1), (0, 1)]) +np.sin(1, signature="D->D") +# NOTE: `np.generic` subclasses are not guaranteed to support addition; +# re-enable this we can infer the exact return type of `np.sin(...)`. +# +# np.sin(1) + np.sin(1) +np.sin.types[0] +np.sin.__name__ +np.sin.__doc__ + +np.abs(np.array([1])) diff --git a/local_packages/numpy/typing/tests/data/pass/warnings_and_errors.py b/local_packages/numpy/typing/tests/data/pass/warnings_and_errors.py new file mode 100644 index 0000000..c351afb --- /dev/null +++ b/local_packages/numpy/typing/tests/data/pass/warnings_and_errors.py @@ -0,0 +1,6 @@ +import numpy.exceptions as ex + +ex.AxisError("test") +ex.AxisError(1, ndim=2) +ex.AxisError(1, ndim=2, msg_prefix="error") +ex.AxisError(1, ndim=2, msg_prefix=None) diff --git a/local_packages/numpy/typing/tests/data/reveal/arithmetic.pyi b/local_packages/numpy/typing/tests/data/reveal/arithmetic.pyi new file mode 100644 index 0000000..491bce4 --- /dev/null +++ b/local_packages/numpy/typing/tests/data/reveal/arithmetic.pyi @@ -0,0 +1,719 @@ +import datetime as dt +from typing import Any, assert_type + +import numpy as np +import numpy.typing as npt +from numpy._typing import _64Bit, _128Bit + +b: bool +c: complex +f: float +i: int + +c16: np.complex128 +c8: np.complex64 + +# Can't directly import `np.float128` as it is not available on all platforms +f16: np.floating[_128Bit] +f8: np.float64 +f4: np.float32 + +i8: np.int64 +i4: np.int32 + +u8: np.uint64 +u4: np.uint32 + +b_: np.bool + +M8: np.datetime64 +M8_none: np.datetime64[None] +M8_date: np.datetime64[dt.date] +M8_time: np.datetime64[dt.datetime] +M8_int: np.datetime64[int] +date: dt.date +time: dt.datetime + +m8: np.timedelta64 +m8_none: np.timedelta64[None] +m8_int: np.timedelta64[int] +m8_delta: np.timedelta64[dt.timedelta] +delta: dt.timedelta + +AR_b: npt.NDArray[np.bool] +AR_u: npt.NDArray[np.uint32] +AR_i: npt.NDArray[np.int64] +AR_f: npt.NDArray[np.float64] +AR_c: npt.NDArray[np.complex128] +AR_m: npt.NDArray[np.timedelta64] +AR_M: npt.NDArray[np.datetime64] +AR_O: npt.NDArray[np.object_] +AR_S: npt.NDArray[np.bytes_] +AR_U: npt.NDArray[np.str_] +AR_T: np.ndarray[tuple[Any, ...], np.dtypes.StringDType] +AR_floating: npt.NDArray[np.floating] +AR_number: npt.NDArray[np.number] +AR_Any: npt.NDArray[Any] + +AR_LIKE_b: list[bool] +AR_LIKE_u: list[np.uint32] +AR_LIKE_i: list[int] +AR_LIKE_f: list[float] +AR_LIKE_c: list[complex] +AR_LIKE_m: list[np.timedelta64] +AR_LIKE_M: list[np.datetime64] +AR_LIKE_O: list[np.object_] + +# Array subtraction + +assert_type(AR_number - AR_number, npt.NDArray[np.number]) + +assert_type(AR_b - AR_LIKE_u, npt.NDArray[np.uint32]) +assert_type(AR_b - AR_LIKE_i, npt.NDArray[np.signedinteger]) +assert_type(AR_b - AR_LIKE_f, npt.NDArray[np.floating]) +assert_type(AR_b - AR_LIKE_c, npt.NDArray[np.complexfloating]) +assert_type(AR_b - AR_LIKE_m, npt.NDArray[np.timedelta64]) +assert_type(AR_b - AR_LIKE_O, Any) + +assert_type(AR_LIKE_u - AR_b, npt.NDArray[np.uint32]) +assert_type(AR_LIKE_i - AR_b, npt.NDArray[np.signedinteger]) +assert_type(AR_LIKE_f - AR_b, npt.NDArray[np.floating]) +assert_type(AR_LIKE_c - AR_b, npt.NDArray[np.complexfloating]) +assert_type(AR_LIKE_m - AR_b, npt.NDArray[np.timedelta64]) +assert_type(AR_LIKE_M - AR_b, npt.NDArray[np.datetime64]) +assert_type(AR_LIKE_O - AR_b, Any) + +assert_type(AR_u - AR_LIKE_b, npt.NDArray[np.uint32]) +assert_type(AR_u - AR_LIKE_u, npt.NDArray[np.unsignedinteger]) +assert_type(AR_u - AR_LIKE_i, npt.NDArray[np.signedinteger]) +assert_type(AR_u - AR_LIKE_f, npt.NDArray[np.floating]) +assert_type(AR_u - AR_LIKE_c, npt.NDArray[np.complexfloating]) +assert_type(AR_u - AR_LIKE_m, npt.NDArray[np.timedelta64]) +assert_type(AR_u - AR_LIKE_O, Any) + +assert_type(AR_LIKE_b - AR_u, npt.NDArray[np.uint32]) +assert_type(AR_LIKE_u - AR_u, npt.NDArray[np.unsignedinteger]) +assert_type(AR_LIKE_i - AR_u, npt.NDArray[np.signedinteger]) +assert_type(AR_LIKE_f - AR_u, npt.NDArray[np.floating]) +assert_type(AR_LIKE_c - AR_u, npt.NDArray[np.complexfloating]) +assert_type(AR_LIKE_m - AR_u, npt.NDArray[np.timedelta64]) +assert_type(AR_LIKE_M - AR_u, npt.NDArray[np.datetime64]) +assert_type(AR_LIKE_O - AR_u, Any) + +assert_type(AR_i - AR_LIKE_b, npt.NDArray[np.int64]) +assert_type(AR_i - AR_LIKE_u, npt.NDArray[np.signedinteger]) +assert_type(AR_i - AR_LIKE_i, npt.NDArray[np.signedinteger]) +assert_type(AR_i - AR_LIKE_f, npt.NDArray[np.floating]) +assert_type(AR_i - AR_LIKE_c, npt.NDArray[np.complexfloating]) +assert_type(AR_i - AR_LIKE_m, npt.NDArray[np.timedelta64]) +assert_type(AR_i - AR_LIKE_O, Any) + +assert_type(AR_LIKE_b - AR_i, npt.NDArray[np.int64]) +assert_type(AR_LIKE_u - AR_i, npt.NDArray[np.signedinteger]) +assert_type(AR_LIKE_i - AR_i, npt.NDArray[np.signedinteger]) +assert_type(AR_LIKE_f - AR_i, npt.NDArray[np.floating]) +assert_type(AR_LIKE_c - AR_i, npt.NDArray[np.complexfloating]) +assert_type(AR_LIKE_m - AR_i, npt.NDArray[np.timedelta64]) +assert_type(AR_LIKE_M - AR_i, npt.NDArray[np.datetime64]) +assert_type(AR_LIKE_O - AR_i, Any) + +assert_type(AR_f - AR_LIKE_b, npt.NDArray[np.float64]) +assert_type(AR_f - AR_LIKE_u, npt.NDArray[np.float64]) +assert_type(AR_f - AR_LIKE_i, npt.NDArray[np.float64]) +assert_type(AR_f - AR_LIKE_f, npt.NDArray[np.float64]) +assert_type(AR_f - AR_LIKE_c, npt.NDArray[np.complexfloating]) +assert_type(AR_f - AR_LIKE_O, Any) + +assert_type(AR_LIKE_b - AR_f, npt.NDArray[np.float64]) +assert_type(AR_LIKE_u - AR_f, npt.NDArray[np.float64]) +assert_type(AR_LIKE_i - AR_f, npt.NDArray[np.float64]) +assert_type(AR_LIKE_f - AR_f, npt.NDArray[np.float64]) +assert_type(AR_LIKE_c - AR_f, npt.NDArray[np.complexfloating]) +assert_type(AR_LIKE_O - AR_f, Any) + +assert_type(AR_c - AR_LIKE_b, npt.NDArray[np.complex128]) +assert_type(AR_c - AR_LIKE_u, npt.NDArray[np.complex128]) +assert_type(AR_c - AR_LIKE_i, npt.NDArray[np.complex128]) +assert_type(AR_c - AR_LIKE_f, npt.NDArray[np.complex128]) +assert_type(AR_c - AR_LIKE_c, npt.NDArray[np.complex128]) +assert_type(AR_c - AR_LIKE_O, Any) + +assert_type(AR_LIKE_b - AR_c, npt.NDArray[np.complex128]) +assert_type(AR_LIKE_u - AR_c, npt.NDArray[np.complex128]) +assert_type(AR_LIKE_i - AR_c, npt.NDArray[np.complex128]) +assert_type(AR_LIKE_f - AR_c, npt.NDArray[np.complex128]) +assert_type(AR_LIKE_c - AR_c, npt.NDArray[np.complex128]) +assert_type(AR_LIKE_O - AR_c, Any) + +assert_type(AR_m - AR_LIKE_b, npt.NDArray[np.timedelta64]) +assert_type(AR_m - AR_LIKE_u, npt.NDArray[np.timedelta64]) +assert_type(AR_m - AR_LIKE_i, npt.NDArray[np.timedelta64]) +assert_type(AR_m - AR_LIKE_m, npt.NDArray[np.timedelta64]) +assert_type(AR_m - AR_LIKE_O, Any) + +assert_type(AR_LIKE_b - AR_m, npt.NDArray[np.timedelta64]) +assert_type(AR_LIKE_u - AR_m, npt.NDArray[np.timedelta64]) +assert_type(AR_LIKE_i - AR_m, npt.NDArray[np.timedelta64]) +assert_type(AR_LIKE_m - AR_m, npt.NDArray[np.timedelta64]) +assert_type(AR_LIKE_M - AR_m, npt.NDArray[np.datetime64]) +assert_type(AR_LIKE_O - AR_m, Any) + +assert_type(AR_M - AR_LIKE_b, npt.NDArray[np.datetime64]) +assert_type(AR_M - AR_LIKE_u, npt.NDArray[np.datetime64]) +assert_type(AR_M - AR_LIKE_i, npt.NDArray[np.datetime64]) +assert_type(AR_M - AR_LIKE_m, npt.NDArray[np.datetime64]) +assert_type(AR_M - AR_LIKE_M, npt.NDArray[np.timedelta64]) +assert_type(AR_M - AR_LIKE_O, Any) + +assert_type(AR_LIKE_M - AR_M, npt.NDArray[np.timedelta64]) +assert_type(AR_LIKE_O - AR_M, Any) + +assert_type(AR_O - AR_LIKE_b, Any) +assert_type(AR_O - AR_LIKE_u, Any) +assert_type(AR_O - AR_LIKE_i, Any) +assert_type(AR_O - AR_LIKE_f, Any) +assert_type(AR_O - AR_LIKE_c, Any) +assert_type(AR_O - AR_LIKE_m, Any) +assert_type(AR_O - AR_LIKE_M, Any) +assert_type(AR_O - AR_LIKE_O, Any) + +assert_type(AR_LIKE_b - AR_O, Any) +assert_type(AR_LIKE_u - AR_O, Any) +assert_type(AR_LIKE_i - AR_O, Any) +assert_type(AR_LIKE_f - AR_O, Any) +assert_type(AR_LIKE_c - AR_O, Any) +assert_type(AR_LIKE_m - AR_O, Any) +assert_type(AR_LIKE_M - AR_O, Any) +assert_type(AR_LIKE_O - AR_O, Any) + +# Array "true" division + +assert_type(AR_f / b, npt.NDArray[np.float64]) +assert_type(AR_f / i, npt.NDArray[np.float64]) +assert_type(AR_f / f, npt.NDArray[np.float64]) + +assert_type(b / AR_f, npt.NDArray[np.float64]) +assert_type(i / AR_f, npt.NDArray[np.float64]) +assert_type(f / AR_f, npt.NDArray[np.float64]) + +assert_type(AR_b / AR_LIKE_b, npt.NDArray[np.float64]) +assert_type(AR_b / AR_LIKE_u, npt.NDArray[np.float64]) +assert_type(AR_b / AR_LIKE_i, npt.NDArray[np.float64]) +assert_type(AR_b / AR_LIKE_f, npt.NDArray[np.float64]) +assert_type(AR_b / AR_LIKE_O, Any) + +assert_type(AR_LIKE_b / AR_b, npt.NDArray[np.float64]) +assert_type(AR_LIKE_u / AR_b, npt.NDArray[np.float64]) +assert_type(AR_LIKE_i / AR_b, npt.NDArray[np.float64]) +assert_type(AR_LIKE_f / AR_b, npt.NDArray[np.float64]) +assert_type(AR_LIKE_O / AR_b, Any) + +assert_type(AR_u / AR_LIKE_b, npt.NDArray[np.float64]) +assert_type(AR_u / AR_LIKE_u, npt.NDArray[np.float64]) +assert_type(AR_u / AR_LIKE_i, npt.NDArray[np.float64]) +assert_type(AR_u / AR_LIKE_f, npt.NDArray[np.float64]) +assert_type(AR_u / AR_LIKE_O, Any) + +assert_type(AR_LIKE_b / AR_u, npt.NDArray[np.float64]) +assert_type(AR_LIKE_u / AR_u, npt.NDArray[np.float64]) +assert_type(AR_LIKE_i / AR_u, npt.NDArray[np.float64]) +assert_type(AR_LIKE_f / AR_u, npt.NDArray[np.float64]) +assert_type(AR_LIKE_m / AR_u, npt.NDArray[np.timedelta64]) +assert_type(AR_LIKE_O / AR_u, Any) + +assert_type(AR_i / AR_LIKE_b, npt.NDArray[np.float64]) +assert_type(AR_i / AR_LIKE_u, npt.NDArray[np.float64]) +assert_type(AR_i / AR_LIKE_i, npt.NDArray[np.float64]) +assert_type(AR_i / AR_LIKE_f, npt.NDArray[np.float64]) +assert_type(AR_i / AR_LIKE_O, Any) + +assert_type(AR_LIKE_b / AR_i, npt.NDArray[np.float64]) +assert_type(AR_LIKE_u / AR_i, npt.NDArray[np.float64]) +assert_type(AR_LIKE_i / AR_i, npt.NDArray[np.float64]) +assert_type(AR_LIKE_f / AR_i, npt.NDArray[np.float64]) +assert_type(AR_LIKE_m / AR_i, npt.NDArray[np.timedelta64]) +assert_type(AR_LIKE_O / AR_i, Any) + +assert_type(AR_f / AR_LIKE_b, npt.NDArray[np.float64]) +assert_type(AR_f / AR_LIKE_u, npt.NDArray[np.float64]) +assert_type(AR_f / AR_LIKE_i, npt.NDArray[np.float64]) +assert_type(AR_f / AR_LIKE_f, npt.NDArray[np.float64]) +assert_type(AR_f / AR_LIKE_O, Any) + +assert_type(AR_LIKE_b / AR_f, npt.NDArray[np.float64]) +assert_type(AR_LIKE_u / AR_f, npt.NDArray[np.float64]) +assert_type(AR_LIKE_i / AR_f, npt.NDArray[np.float64]) +assert_type(AR_LIKE_f / AR_f, npt.NDArray[np.float64]) +assert_type(AR_LIKE_m / AR_f, npt.NDArray[np.timedelta64]) +assert_type(AR_LIKE_O / AR_f, Any) + +assert_type(AR_m / AR_LIKE_u, npt.NDArray[np.timedelta64]) +assert_type(AR_m / AR_LIKE_i, npt.NDArray[np.timedelta64]) +assert_type(AR_m / AR_LIKE_f, npt.NDArray[np.timedelta64]) +assert_type(AR_m / AR_LIKE_m, npt.NDArray[np.float64]) +assert_type(AR_m / AR_LIKE_O, Any) + +assert_type(AR_LIKE_m / AR_m, npt.NDArray[np.float64]) +assert_type(AR_LIKE_O / AR_m, Any) + +assert_type(AR_O / AR_LIKE_b, Any) +assert_type(AR_O / AR_LIKE_u, Any) +assert_type(AR_O / AR_LIKE_i, Any) +assert_type(AR_O / AR_LIKE_f, Any) +assert_type(AR_O / AR_LIKE_m, Any) +assert_type(AR_O / AR_LIKE_M, Any) +assert_type(AR_O / AR_LIKE_O, Any) + +assert_type(AR_LIKE_b / AR_O, Any) +assert_type(AR_LIKE_u / AR_O, Any) +assert_type(AR_LIKE_i / AR_O, Any) +assert_type(AR_LIKE_f / AR_O, Any) +assert_type(AR_LIKE_m / AR_O, Any) +assert_type(AR_LIKE_M / AR_O, Any) +assert_type(AR_LIKE_O / AR_O, Any) + +# Array floor division + +assert_type(AR_b // AR_LIKE_b, npt.NDArray[np.int8]) +assert_type(AR_b // AR_LIKE_u, npt.NDArray[np.uint32]) +assert_type(AR_b // AR_LIKE_i, npt.NDArray[np.signedinteger]) +assert_type(AR_b // AR_LIKE_f, npt.NDArray[np.floating]) +assert_type(AR_b // AR_LIKE_O, Any) + +assert_type(AR_LIKE_b // AR_b, npt.NDArray[np.int8]) +assert_type(AR_LIKE_u // AR_b, npt.NDArray[np.uint32]) +assert_type(AR_LIKE_i // AR_b, npt.NDArray[np.signedinteger]) +assert_type(AR_LIKE_f // AR_b, npt.NDArray[np.floating]) +assert_type(AR_LIKE_O // AR_b, Any) + +assert_type(AR_u // AR_LIKE_b, npt.NDArray[np.uint32]) +assert_type(AR_u // AR_LIKE_u, npt.NDArray[np.unsignedinteger]) +assert_type(AR_u // AR_LIKE_i, npt.NDArray[np.signedinteger]) +assert_type(AR_u // AR_LIKE_f, npt.NDArray[np.floating]) +assert_type(AR_u // AR_LIKE_O, Any) + +assert_type(AR_LIKE_b // AR_u, npt.NDArray[np.uint32]) +assert_type(AR_LIKE_u // AR_u, npt.NDArray[np.unsignedinteger]) +assert_type(AR_LIKE_i // AR_u, npt.NDArray[np.signedinteger]) +assert_type(AR_LIKE_f // AR_u, npt.NDArray[np.floating]) +assert_type(AR_LIKE_m // AR_u, npt.NDArray[np.timedelta64]) +assert_type(AR_LIKE_O // AR_u, Any) + +assert_type(AR_i // AR_LIKE_b, npt.NDArray[np.int64]) +assert_type(AR_i // AR_LIKE_u, npt.NDArray[np.signedinteger]) +assert_type(AR_i // AR_LIKE_i, npt.NDArray[np.signedinteger]) +assert_type(AR_i // AR_LIKE_f, npt.NDArray[np.floating]) +assert_type(AR_i // AR_LIKE_O, Any) + +assert_type(AR_LIKE_b // AR_i, npt.NDArray[np.int64]) +assert_type(AR_LIKE_u // AR_i, npt.NDArray[np.signedinteger]) +assert_type(AR_LIKE_i // AR_i, npt.NDArray[np.signedinteger]) +assert_type(AR_LIKE_f // AR_i, npt.NDArray[np.floating]) +assert_type(AR_LIKE_m // AR_i, npt.NDArray[np.timedelta64]) +assert_type(AR_LIKE_O // AR_i, Any) + +assert_type(AR_f // AR_LIKE_b, npt.NDArray[np.float64]) +assert_type(AR_f // AR_LIKE_u, npt.NDArray[np.float64]) +assert_type(AR_f // AR_LIKE_i, npt.NDArray[np.float64]) +assert_type(AR_f // AR_LIKE_f, npt.NDArray[np.float64]) +assert_type(AR_f // AR_LIKE_O, Any) + +assert_type(AR_LIKE_b // AR_f, npt.NDArray[np.float64]) +assert_type(AR_LIKE_u // AR_f, npt.NDArray[np.float64]) +assert_type(AR_LIKE_i // AR_f, npt.NDArray[np.float64]) +assert_type(AR_LIKE_f // AR_f, npt.NDArray[np.float64]) +assert_type(AR_LIKE_m // AR_f, npt.NDArray[np.timedelta64]) +assert_type(AR_LIKE_O // AR_f, Any) + +assert_type(AR_m // AR_LIKE_u, npt.NDArray[np.timedelta64]) +assert_type(AR_m // AR_LIKE_i, npt.NDArray[np.timedelta64]) +assert_type(AR_m // AR_LIKE_f, npt.NDArray[np.timedelta64]) +assert_type(AR_m // AR_LIKE_m, npt.NDArray[np.int64]) +assert_type(AR_m // AR_LIKE_O, Any) + +assert_type(AR_LIKE_m // AR_m, npt.NDArray[np.int64]) +assert_type(AR_LIKE_O // AR_m, Any) + +assert_type(AR_O // AR_LIKE_b, Any) +assert_type(AR_O // AR_LIKE_u, Any) +assert_type(AR_O // AR_LIKE_i, Any) +assert_type(AR_O // AR_LIKE_f, Any) +assert_type(AR_O // AR_LIKE_m, Any) +assert_type(AR_O // AR_LIKE_M, Any) +assert_type(AR_O // AR_LIKE_O, Any) + +assert_type(AR_LIKE_b // AR_O, Any) +assert_type(AR_LIKE_u // AR_O, Any) +assert_type(AR_LIKE_i // AR_O, Any) +assert_type(AR_LIKE_f // AR_O, Any) +assert_type(AR_LIKE_m // AR_O, Any) +assert_type(AR_LIKE_M // AR_O, Any) +assert_type(AR_LIKE_O // AR_O, Any) + +# unary ops + +assert_type(-f16, np.floating[_128Bit]) +assert_type(-c16, np.complex128) +assert_type(-c8, np.complex64) +assert_type(-f8, np.float64) +assert_type(-f4, np.float32) +assert_type(-i8, np.int64) +assert_type(-i4, np.int32) +assert_type(-u8, np.uint64) +assert_type(-u4, np.uint32) +assert_type(-m8, np.timedelta64) +assert_type(-m8_none, np.timedelta64[None]) +assert_type(-m8_int, np.timedelta64[int]) +assert_type(-m8_delta, np.timedelta64[dt.timedelta]) +assert_type(-AR_f, npt.NDArray[np.float64]) + +assert_type(+f16, np.floating[_128Bit]) +assert_type(+c16, np.complex128) +assert_type(+c8, np.complex64) +assert_type(+f8, np.float64) +assert_type(+f4, np.float32) +assert_type(+i8, np.int64) +assert_type(+i4, np.int32) +assert_type(+u8, np.uint64) +assert_type(+u4, np.uint32) +assert_type(+m8_none, np.timedelta64[None]) +assert_type(+m8_int, np.timedelta64[int]) +assert_type(+m8_delta, np.timedelta64[dt.timedelta]) +assert_type(+AR_f, npt.NDArray[np.float64]) + +assert_type(abs(f16), np.floating[_128Bit]) +assert_type(abs(c16), np.float64) +assert_type(abs(c8), np.float32) +assert_type(abs(f8), np.float64) +assert_type(abs(f4), np.float32) +assert_type(abs(i8), np.int64) +assert_type(abs(i4), np.int32) +assert_type(abs(u8), np.uint64) +assert_type(abs(u4), np.uint32) +assert_type(abs(m8), np.timedelta64) +assert_type(abs(m8_none), np.timedelta64[None]) +assert_type(abs(m8_int), np.timedelta64[int]) +assert_type(abs(m8_delta), np.timedelta64[dt.timedelta]) +assert_type(abs(b_), np.bool) +assert_type(abs(AR_O), npt.NDArray[np.object_]) + +# Time structures + +assert_type(M8 + m8, np.datetime64) +assert_type(M8 + i, np.datetime64) +assert_type(M8 + i8, np.datetime64) +assert_type(M8 - M8, np.timedelta64) +assert_type(M8 - i, np.datetime64) +assert_type(M8 - i8, np.datetime64) + +assert_type(M8_none + m8, np.datetime64[None]) +assert_type(M8_none + i, np.datetime64[None]) +assert_type(M8_none + i8, np.datetime64[None]) +assert_type(M8_none - M8, np.timedelta64[None]) +assert_type(M8_none - m8, np.datetime64[None]) +assert_type(M8_none - i, np.datetime64[None]) +assert_type(M8_none - i8, np.datetime64[None]) + +assert_type(m8 + m8, np.timedelta64) +assert_type(m8 + i, np.timedelta64) +assert_type(m8 + i8, np.timedelta64) +assert_type(m8 - m8, np.timedelta64) +assert_type(m8 - i, np.timedelta64) +assert_type(m8 - i8, np.timedelta64) +assert_type(m8 * f, np.timedelta64) +assert_type(m8 * f4, np.timedelta64) +assert_type(m8 * np.True_, np.timedelta64) +assert_type(m8 / f, np.timedelta64) +assert_type(m8 / f4, np.timedelta64) +assert_type(m8 / m8, np.float64) +assert_type(m8 // m8, np.int64) +assert_type(m8 % m8, np.timedelta64) +assert_type(divmod(m8, m8), tuple[np.int64, np.timedelta64]) + +assert_type(m8_none + m8, np.timedelta64[None]) +assert_type(m8_none + i, np.timedelta64[None]) +assert_type(m8_none + i8, np.timedelta64[None]) +assert_type(m8_none - i, np.timedelta64[None]) +assert_type(m8_none - i8, np.timedelta64[None]) + +assert_type(m8_int + i, np.timedelta64[int]) +assert_type(m8_int + m8_delta, np.timedelta64[int]) +assert_type(m8_int + m8, np.timedelta64[int | None]) +assert_type(m8_int - i, np.timedelta64[int]) +assert_type(m8_int - m8_delta, np.timedelta64[int]) +assert_type(m8_int - m8, np.timedelta64[int | None]) + +assert_type(m8_delta + date, dt.date) +assert_type(m8_delta + time, dt.datetime) +assert_type(m8_delta + delta, dt.timedelta) +assert_type(m8_delta - delta, dt.timedelta) +assert_type(m8_delta / delta, float) +assert_type(m8_delta // delta, int) +assert_type(m8_delta % delta, dt.timedelta) +assert_type(divmod(m8_delta, delta), tuple[int, dt.timedelta]) + +# boolean + +assert_type(b_ / b, np.float64) +assert_type(b_ / b_, np.float64) +assert_type(b_ / i, np.float64) +assert_type(b_ / i8, np.float64) +assert_type(b_ / i4, np.float64) +assert_type(b_ / u8, np.float64) +assert_type(b_ / u4, np.float64) +assert_type(b_ / f, np.float64) +assert_type(b_ / f16, np.floating[_128Bit]) +assert_type(b_ / f8, np.float64) +assert_type(b_ / f4, np.float32) +assert_type(b_ / c, np.complex128) +assert_type(b_ / c16, np.complex128) +assert_type(b_ / c8, np.complex64) + +assert_type(b / b_, np.float64) +assert_type(b_ / b_, np.float64) +assert_type(i / b_, np.float64) +assert_type(i8 / b_, np.float64) +assert_type(i4 / b_, np.float64) +assert_type(u8 / b_, np.float64) +assert_type(u4 / b_, np.float64) +assert_type(f / b_, np.float64) +assert_type(f16 / b_, np.floating[_128Bit]) +assert_type(f8 / b_, np.float64) +assert_type(f4 / b_, np.float32) +assert_type(c / b_, np.complex128) +assert_type(c16 / b_, np.complex128) +assert_type(c8 / b_, np.complex64) + +# Complex + +assert_type(c16 + f16, np.complexfloating) +assert_type(c16 + c16, np.complex128) +assert_type(c16 + f8, np.complex128) +assert_type(c16 + i8, np.complex128) +assert_type(c16 + c8, np.complex128) +assert_type(c16 + f4, np.complex128) +assert_type(c16 + i4, np.complex128) +assert_type(c16 + b_, np.complex128) +assert_type(c16 + b, np.complex128) +assert_type(c16 + c, np.complex128) +assert_type(c16 + f, np.complex128) +assert_type(c16 + AR_f, npt.NDArray[np.complex128]) + +assert_type(f16 + c16, np.complexfloating) +assert_type(c16 + c16, np.complex128) +assert_type(f8 + c16, np.complex128) +assert_type(i8 + c16, np.complex128) +assert_type(c8 + c16, np.complex128 | np.complex64) +assert_type(f4 + c16, np.complexfloating) +assert_type(i4 + c16, np.complex128) +assert_type(b_ + c16, np.complex128) +assert_type(b + c16, np.complex128) +assert_type(c + c16, np.complex128) +assert_type(f + c16, np.complex128) +assert_type(AR_f + c16, npt.NDArray[np.complex128]) + +assert_type(c8 + f16, np.complex64 | np.complexfloating[_128Bit, _128Bit]) +assert_type(c8 + c16, np.complex64 | np.complex128) +assert_type(c8 + f8, np.complex64 | np.complex128) +assert_type(c8 + i8, np.complex64 | np.complexfloating[_64Bit, _64Bit]) +assert_type(c8 + c8, np.complex64) +assert_type(c8 + f4, np.complex64) +assert_type(c8 + i4, np.complex64) +assert_type(c8 + b_, np.complex64) +assert_type(c8 + b, np.complex64) +assert_type(c8 + c, np.complex64 | np.complex128) +assert_type(c8 + f, np.complex64 | np.complex128) +assert_type(c8 + AR_f, npt.NDArray[np.complexfloating]) + +assert_type(f16 + c8, np.complexfloating[_128Bit, _128Bit] | np.complex64) +assert_type(c16 + c8, np.complex128) +assert_type(f8 + c8, np.complexfloating[_64Bit, _64Bit]) +assert_type(i8 + c8, np.complexfloating[_64Bit, _64Bit] | np.complex64) +assert_type(c8 + c8, np.complex64) +assert_type(f4 + c8, np.complex64) +assert_type(i4 + c8, np.complex64) +assert_type(b_ + c8, np.complex64) +assert_type(b + c8, np.complex64) +assert_type(c + c8, np.complex64 | np.complex128) +assert_type(f + c8, np.complex64 | np.complex128) +assert_type(AR_f + c8, npt.NDArray[np.complexfloating]) + +# Float + +assert_type(f8 + f16, np.floating) +assert_type(f8 + f8, np.float64) +assert_type(f8 + i8, np.float64) +assert_type(f8 + f4, np.float64) +assert_type(f8 + i4, np.float64) +assert_type(f8 + b_, np.float64) +assert_type(f8 + b, np.float64) +assert_type(f8 + c, np.float64 | np.complex128) +assert_type(f8 + f, np.float64) +assert_type(f8 + AR_f, npt.NDArray[np.float64]) + +assert_type(f16 + f8, np.floating) +assert_type(f8 + f8, np.float64) +assert_type(i8 + f8, np.float64) +assert_type(f4 + f8, np.floating) +assert_type(i4 + f8, np.float64) +assert_type(b_ + f8, np.float64) +assert_type(b + f8, np.float64) +assert_type(c + f8, np.complex128 | np.float64) +assert_type(f + f8, np.float64) +assert_type(AR_f + f8, npt.NDArray[np.float64]) + +assert_type(f4 + f16, np.floating) +assert_type(f4 + f8, np.floating) +assert_type(f4 + i8, np.floating) +assert_type(f4 + f4, np.float32) +assert_type(f4 + i4, np.floating) +assert_type(f4 + b_, np.float32) +assert_type(f4 + b, np.float32) +assert_type(f4 + c, np.complexfloating) +assert_type(f4 + f, np.float32) +assert_type(f4 + AR_f, npt.NDArray[np.float64]) + +assert_type(f16 + f4, np.floating) +assert_type(f8 + f4, np.float64) +assert_type(i8 + f4, np.floating) +assert_type(f4 + f4, np.float32) +assert_type(i4 + f4, np.floating) +assert_type(b_ + f4, np.float32) +assert_type(b + f4, np.float32) +assert_type(c + f4, np.complexfloating) +assert_type(f + f4, np.float32) +assert_type(AR_f + f4, npt.NDArray[np.float64]) + +# Int + +assert_type(i8 + i8, np.int64) +assert_type(i8 + u8, Any) +assert_type(i8 + i4, np.signedinteger) +assert_type(i8 + u4, Any) +assert_type(i8 + b_, np.int64) +assert_type(i8 + b, np.int64) +assert_type(i8 + c, np.complex128) +assert_type(i8 + f, np.float64) +assert_type(i8 + AR_f, npt.NDArray[np.float64]) + +assert_type(u8 + u8, np.uint64) +assert_type(u8 + i4, Any) +assert_type(u8 + u4, np.unsignedinteger) +assert_type(u8 + b_, np.uint64) +assert_type(u8 + b, np.uint64) +assert_type(u8 + c, np.complex128) +assert_type(u8 + f, np.float64) +assert_type(u8 + AR_f, npt.NDArray[np.float64]) + +assert_type(i8 + i8, np.int64) +assert_type(u8 + i8, Any) +assert_type(i4 + i8, np.signedinteger) +assert_type(u4 + i8, Any) +assert_type(b_ + i8, np.int64) +assert_type(b + i8, np.int64) +assert_type(c + i8, np.complex128) +assert_type(f + i8, np.float64) +assert_type(AR_f + i8, npt.NDArray[np.float64]) + +assert_type(u8 + u8, np.uint64) +assert_type(i4 + u8, Any) +assert_type(u4 + u8, np.unsignedinteger) +assert_type(b_ + u8, np.uint64) +assert_type(b + u8, np.uint64) +assert_type(c + u8, np.complex128) +assert_type(f + u8, np.float64) +assert_type(AR_f + u8, npt.NDArray[np.float64]) + +assert_type(i4 + i8, np.signedinteger) +assert_type(i4 + i4, np.int32) +assert_type(i4 + b_, np.int32) +assert_type(i4 + b, np.int32) +assert_type(i4 + AR_f, npt.NDArray[np.float64]) + +assert_type(u4 + i8, Any) +assert_type(u4 + i4, Any) +assert_type(u4 + u8, np.unsignedinteger) +assert_type(u4 + u4, np.uint32) +assert_type(u4 + b_, np.uint32) +assert_type(u4 + b, np.uint32) +assert_type(u4 + AR_f, npt.NDArray[np.float64]) + +assert_type(i8 + i4, np.signedinteger) +assert_type(i4 + i4, np.int32) +assert_type(b_ + i4, np.int32) +assert_type(b + i4, np.int32) +assert_type(AR_f + i4, npt.NDArray[np.float64]) + +assert_type(i8 + u4, Any) +assert_type(i4 + u4, Any) +assert_type(u8 + u4, np.unsignedinteger) +assert_type(u4 + u4, np.uint32) +assert_type(b_ + u4, np.uint32) +assert_type(b + u4, np.uint32) +assert_type(AR_f + u4, npt.NDArray[np.float64]) + +# Any + +assert_type(AR_Any + 2, npt.NDArray[Any]) + +# regression tests for https://github.com/numpy/numpy/issues/28805 + +assert_type(AR_floating + f, npt.NDArray[np.floating]) +assert_type(AR_floating - f, npt.NDArray[np.floating]) +assert_type(AR_floating * f, npt.NDArray[np.floating]) +assert_type(AR_floating ** f, npt.NDArray[np.floating]) +assert_type(AR_floating / f, npt.NDArray[np.floating]) +assert_type(AR_floating // f, npt.NDArray[np.floating]) +assert_type(AR_floating % f, npt.NDArray[np.floating]) +assert_type(divmod(AR_floating, f), tuple[npt.NDArray[np.floating], npt.NDArray[np.floating]]) + +assert_type(f + AR_floating, npt.NDArray[np.floating]) +assert_type(f - AR_floating, npt.NDArray[np.floating]) +assert_type(f * AR_floating, npt.NDArray[np.floating]) +assert_type(f ** AR_floating, npt.NDArray[np.floating]) +assert_type(f / AR_floating, npt.NDArray[np.floating]) +assert_type(f // AR_floating, npt.NDArray[np.floating]) +assert_type(f % AR_floating, npt.NDArray[np.floating]) +assert_type(divmod(f, AR_floating), tuple[npt.NDArray[np.floating], npt.NDArray[np.floating]]) + +# character-like + +assert_type(AR_S + b"", npt.NDArray[np.bytes_]) +assert_type(AR_S + [b""], npt.NDArray[np.bytes_]) +assert_type([b""] + AR_S, npt.NDArray[np.bytes_]) +assert_type(AR_S + AR_S, npt.NDArray[np.bytes_]) + +assert_type(AR_U + "", npt.NDArray[np.str_]) +assert_type(AR_U + [""], npt.NDArray[np.str_]) +assert_type("" + AR_U, npt.NDArray[np.str_]) +assert_type([""] + AR_U, npt.NDArray[np.str_]) +assert_type(AR_U + AR_U, npt.NDArray[np.str_]) + +assert_type(AR_T + "", np.ndarray[tuple[Any, ...], np.dtypes.StringDType]) +assert_type(AR_T + [""], np.ndarray[tuple[Any, ...], np.dtypes.StringDType]) +assert_type("" + AR_T, np.ndarray[tuple[Any, ...], np.dtypes.StringDType]) +assert_type([""] + AR_T, np.ndarray[tuple[Any, ...], np.dtypes.StringDType]) +assert_type(AR_T + AR_T, np.ndarray[tuple[Any, ...], np.dtypes.StringDType]) +assert_type(AR_T + AR_U, np.ndarray[tuple[Any, ...], np.dtypes.StringDType]) +assert_type(AR_U + AR_T, np.ndarray[tuple[Any, ...], np.dtypes.StringDType]) + +assert_type(AR_S * i, np.ndarray[tuple[Any, ...], np.dtype[np.bytes_]]) +assert_type(AR_S * AR_LIKE_i, np.ndarray[tuple[Any, ...], np.dtype[np.bytes_]]) +assert_type(AR_S * AR_i, np.ndarray[tuple[Any, ...], np.dtype[np.bytes_]]) +assert_type(i * AR_S, np.ndarray[tuple[Any, ...], np.dtype[np.bytes_]]) +# mypy incorrectly infers `AR_LIKE_i * AR_S` as `list[int]` +assert_type(AR_i * AR_S, np.ndarray[tuple[Any, ...], np.dtype[np.bytes_]]) + +assert_type(AR_U * i, np.ndarray[tuple[Any, ...], np.dtype[np.str_]]) +assert_type(AR_U * AR_LIKE_i, np.ndarray[tuple[Any, ...], np.dtype[np.str_]]) +assert_type(AR_U * AR_i, np.ndarray[tuple[Any, ...], np.dtype[np.str_]]) +assert_type(i * AR_U, np.ndarray[tuple[Any, ...], np.dtype[np.str_]]) +# mypy incorrectly infers `AR_LIKE_i * AR_U` as `list[int]` +assert_type(AR_i * AR_U, np.ndarray[tuple[Any, ...], np.dtype[np.str_]]) + +assert_type(AR_T * i, np.ndarray[tuple[Any, ...], np.dtypes.StringDType]) +assert_type(AR_T * AR_LIKE_i, np.ndarray[tuple[Any, ...], np.dtypes.StringDType]) +assert_type(AR_T * AR_i, np.ndarray[tuple[Any, ...], np.dtypes.StringDType]) +assert_type(i * AR_T, np.ndarray[tuple[Any, ...], np.dtypes.StringDType]) +# mypy incorrectly infers `AR_LIKE_i * AR_T` as `list[int]` +assert_type(AR_i * AR_T, np.ndarray[tuple[Any, ...], np.dtypes.StringDType]) diff --git a/local_packages/numpy/typing/tests/data/reveal/array_api_info.pyi b/local_packages/numpy/typing/tests/data/reveal/array_api_info.pyi new file mode 100644 index 0000000..765f9ef --- /dev/null +++ b/local_packages/numpy/typing/tests/data/reveal/array_api_info.pyi @@ -0,0 +1,70 @@ +from typing import Literal, Never, assert_type + +import numpy as np + +info = np.__array_namespace_info__() + +assert_type(info.__module__, Literal["numpy"]) + +assert_type(info.default_device(), Literal["cpu"]) +assert_type(info.devices()[0], Literal["cpu"]) +assert_type(info.devices()[-1], Literal["cpu"]) + +assert_type(info.capabilities()["boolean indexing"], Literal[True]) +assert_type(info.capabilities()["data-dependent shapes"], Literal[True]) + +assert_type(info.default_dtypes()["real floating"], np.dtype[np.float64]) +assert_type(info.default_dtypes()["complex floating"], np.dtype[np.complex128]) +assert_type(info.default_dtypes()["integral"], np.dtype[np.int_]) +assert_type(info.default_dtypes()["indexing"], np.dtype[np.intp]) + +assert_type(info.dtypes()["bool"], np.dtype[np.bool]) +assert_type(info.dtypes()["int8"], np.dtype[np.int8]) +assert_type(info.dtypes()["uint8"], np.dtype[np.uint8]) +assert_type(info.dtypes()["float32"], np.dtype[np.float32]) +assert_type(info.dtypes()["complex64"], np.dtype[np.complex64]) + +assert_type(info.dtypes(kind="bool")["bool"], np.dtype[np.bool]) +assert_type(info.dtypes(kind="signed integer")["int64"], np.dtype[np.int64]) +assert_type(info.dtypes(kind="unsigned integer")["uint64"], np.dtype[np.uint64]) +assert_type(info.dtypes(kind="integral")["int32"], np.dtype[np.int32]) +assert_type(info.dtypes(kind="integral")["uint32"], np.dtype[np.uint32]) +assert_type(info.dtypes(kind="real floating")["float64"], np.dtype[np.float64]) +assert_type(info.dtypes(kind="complex floating")["complex128"], np.dtype[np.complex128]) +assert_type(info.dtypes(kind="numeric")["int16"], np.dtype[np.int16]) +assert_type(info.dtypes(kind="numeric")["uint16"], np.dtype[np.uint16]) +assert_type(info.dtypes(kind="numeric")["float64"], np.dtype[np.float64]) +assert_type(info.dtypes(kind="numeric")["complex128"], np.dtype[np.complex128]) + +assert_type(info.dtypes(kind=()), dict[Never, Never]) + +assert_type(info.dtypes(kind=("bool",))["bool"], np.dtype[np.bool]) +assert_type(info.dtypes(kind=("signed integer",))["int64"], np.dtype[np.int64]) +assert_type(info.dtypes(kind=("integral",))["uint32"], np.dtype[np.uint32]) +assert_type(info.dtypes(kind=("complex floating",))["complex128"], np.dtype[np.complex128]) +assert_type(info.dtypes(kind=("numeric",))["float64"], np.dtype[np.float64]) + +assert_type( + info.dtypes(kind=("signed integer", "unsigned integer"))["int8"], + np.dtype[np.int8], +) +assert_type( + info.dtypes(kind=("signed integer", "unsigned integer"))["uint8"], + np.dtype[np.uint8], +) +assert_type( + info.dtypes(kind=("integral", "real floating", "complex floating"))["int16"], + np.dtype[np.int16], +) +assert_type( + info.dtypes(kind=("integral", "real floating", "complex floating"))["uint16"], + np.dtype[np.uint16], +) +assert_type( + info.dtypes(kind=("integral", "real floating", "complex floating"))["float32"], + np.dtype[np.float32], +) +assert_type( + info.dtypes(kind=("integral", "real floating", "complex floating"))["complex64"], + np.dtype[np.complex64], +) diff --git a/local_packages/numpy/typing/tests/data/reveal/array_constructors.pyi b/local_packages/numpy/typing/tests/data/reveal/array_constructors.pyi new file mode 100644 index 0000000..bfdbe87 --- /dev/null +++ b/local_packages/numpy/typing/tests/data/reveal/array_constructors.pyi @@ -0,0 +1,279 @@ +import sys +from collections import deque +from pathlib import Path +from typing import Any, Generic, TypeVar, assert_type + +import numpy as np +import numpy.typing as npt + +_ScalarT_co = TypeVar("_ScalarT_co", bound=np.generic, covariant=True) + +class SubClass(npt.NDArray[_ScalarT_co]): ... + +class IntoSubClass(Generic[_ScalarT_co]): + def __array__(self) -> SubClass[_ScalarT_co]: ... + +i8: np.int64 + +A: npt.NDArray[np.float64] +B: SubClass[np.float64] +C: list[int] +D: SubClass[np.float64 | np.int64] +E: IntoSubClass[np.float64 | np.int64] + +mixed_shape: tuple[int, np.int64] + +def func(i: int, j: int, **kwargs: Any) -> SubClass[np.float64]: ... + +assert_type(np.empty_like(A), npt.NDArray[np.float64]) +assert_type(np.empty_like(B), SubClass[np.float64]) +assert_type(np.empty_like([1, 1.0]), npt.NDArray[Any]) +assert_type(np.empty_like(A, dtype=np.int64), npt.NDArray[np.int64]) +assert_type(np.empty_like(A, dtype="c16"), npt.NDArray[Any]) + +assert_type(np.array(A), npt.NDArray[np.float64]) +assert_type(np.array(B), npt.NDArray[np.float64]) +assert_type(np.array([1, 1.0]), npt.NDArray[Any]) +assert_type(np.array(deque([1, 2, 3])), npt.NDArray[Any]) +assert_type(np.array(A, dtype=np.int64), npt.NDArray[np.int64]) +assert_type(np.array(A, dtype="c16"), npt.NDArray[Any]) +assert_type(np.array(A, like=A), npt.NDArray[np.float64]) +assert_type(np.array(A, subok=True), npt.NDArray[np.float64]) +assert_type(np.array(B, subok=True), SubClass[np.float64]) +assert_type(np.array(B, subok=True, ndmin=0), SubClass[np.float64]) +assert_type(np.array(B, subok=True, ndmin=1), SubClass[np.float64]) +assert_type(np.array(D), npt.NDArray[np.float64 | np.int64]) +assert_type(np.array(E, subok=True), SubClass[np.float64 | np.int64]) +# https://github.com/numpy/numpy/issues/29245 +assert_type(np.array([], dtype=np.bool), npt.NDArray[np.bool]) + +assert_type(np.zeros([1, 5, 6]), npt.NDArray[np.float64]) +assert_type(np.zeros([1, 5, 6], dtype=np.int64), npt.NDArray[np.int64]) +assert_type(np.zeros([1, 5, 6], dtype="c16"), npt.NDArray[Any]) +assert_type(np.zeros(mixed_shape), npt.NDArray[np.float64]) + +assert_type(np.empty([1, 5, 6]), npt.NDArray[np.float64]) +assert_type(np.empty([1, 5, 6], dtype=np.int64), npt.NDArray[np.int64]) +assert_type(np.empty([1, 5, 6], dtype="c16"), npt.NDArray[Any]) +assert_type(np.empty(mixed_shape), npt.NDArray[np.float64]) + +assert_type(np.concatenate(A), npt.NDArray[np.float64]) +assert_type(np.concatenate([A, A]), npt.NDArray[Any]) # pyright correctly infers this as NDArray[float64] +assert_type(np.concatenate([[1], A]), npt.NDArray[Any]) +assert_type(np.concatenate([[1], [1]]), npt.NDArray[Any]) +assert_type(np.concatenate((A, A)), npt.NDArray[np.float64]) +assert_type(np.concatenate(([1], [1])), npt.NDArray[Any]) +assert_type(np.concatenate([1, 1.0]), npt.NDArray[Any]) +assert_type(np.concatenate(A, dtype=np.int64), npt.NDArray[np.int64]) +assert_type(np.concatenate(A, dtype="c16"), npt.NDArray[Any]) +assert_type(np.concatenate([1, 1.0], out=A), npt.NDArray[np.float64]) + +assert_type(np.asarray(A), npt.NDArray[np.float64]) +assert_type(np.asarray(B), npt.NDArray[np.float64]) +assert_type(np.asarray([1, 1.0]), npt.NDArray[Any]) +assert_type(np.asarray(A, dtype=np.int64), npt.NDArray[np.int64]) +assert_type(np.asarray(A, dtype="c16"), npt.NDArray[Any]) + +assert_type(np.asanyarray(A), npt.NDArray[np.float64]) +assert_type(np.asanyarray(B), SubClass[np.float64]) +assert_type(np.asanyarray([1, 1.0]), npt.NDArray[Any]) +assert_type(np.asanyarray(A, dtype=np.int64), npt.NDArray[np.int64]) +assert_type(np.asanyarray(A, dtype="c16"), npt.NDArray[Any]) + +assert_type(np.ascontiguousarray(A), npt.NDArray[np.float64]) +assert_type(np.ascontiguousarray(B), npt.NDArray[np.float64]) +assert_type(np.ascontiguousarray([1, 1.0]), npt.NDArray[Any]) +assert_type(np.ascontiguousarray(A, dtype=np.int64), npt.NDArray[np.int64]) +assert_type(np.ascontiguousarray(A, dtype="c16"), npt.NDArray[Any]) + +assert_type(np.asfortranarray(A), npt.NDArray[np.float64]) +assert_type(np.asfortranarray(B), npt.NDArray[np.float64]) +assert_type(np.asfortranarray([1, 1.0]), npt.NDArray[Any]) +assert_type(np.asfortranarray(A, dtype=np.int64), npt.NDArray[np.int64]) +assert_type(np.asfortranarray(A, dtype="c16"), npt.NDArray[Any]) + +assert_type(np.fromstring("1 1 1", sep=" "), npt.NDArray[np.float64]) +assert_type(np.fromstring(b"1 1 1", sep=" "), npt.NDArray[np.float64]) +assert_type(np.fromstring("1 1 1", dtype=np.int64, sep=" "), npt.NDArray[np.int64]) +assert_type(np.fromstring(b"1 1 1", dtype=np.int64, sep=" "), npt.NDArray[np.int64]) +assert_type(np.fromstring("1 1 1", dtype="c16", sep=" "), npt.NDArray[Any]) +assert_type(np.fromstring(b"1 1 1", dtype="c16", sep=" "), npt.NDArray[Any]) + +assert_type(np.fromfile("test.txt", sep=" "), npt.NDArray[np.float64]) +assert_type(np.fromfile("test.txt", dtype=np.int64, sep=" "), npt.NDArray[np.int64]) +assert_type(np.fromfile("test.txt", dtype="c16", sep=" "), npt.NDArray[Any]) +with open("test.txt") as f: + assert_type(np.fromfile(f, sep=" "), npt.NDArray[np.float64]) + assert_type(np.fromfile(b"test.txt", sep=" "), npt.NDArray[np.float64]) + assert_type(np.fromfile(Path("test.txt"), sep=" "), npt.NDArray[np.float64]) + +assert_type(np.fromiter("12345", np.float64), npt.NDArray[np.float64]) +assert_type(np.fromiter("12345", float), npt.NDArray[Any]) + +assert_type(np.frombuffer(A), npt.NDArray[np.float64]) +assert_type(np.frombuffer(A, dtype=np.int64), npt.NDArray[np.int64]) +assert_type(np.frombuffer(A, dtype="c16"), npt.NDArray[Any]) + +_x_bool: bool +_x_int: int +_x_float: float +_x_timedelta: np.timedelta64 +_x_datetime: np.datetime64 + +assert_type(np.arange(False, True), np.ndarray[tuple[int], np.dtype[np.int_]]) +assert_type(np.arange(10), np.ndarray[tuple[int], np.dtype[np.int_]]) +assert_type(np.arange(0, 10, step=2), np.ndarray[tuple[int], np.dtype[np.int_]]) +assert_type(np.arange(10.0), np.ndarray[tuple[int], np.dtype[np.float64 | Any]]) +assert_type(np.arange(0, stop=10.0), np.ndarray[tuple[int], np.dtype[np.float64 | Any]]) +assert_type(np.arange(_x_timedelta), np.ndarray[tuple[int], np.dtype[np.timedelta64[Any]]]) +assert_type(np.arange(0, _x_timedelta), np.ndarray[tuple[int], np.dtype[np.timedelta64[Any]]]) +assert_type(np.arange(_x_datetime, _x_datetime), np.ndarray[tuple[int], np.dtype[np.datetime64[Any]]]) +assert_type(np.arange(10, dtype=np.float64), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.arange(0, 10, step=2, dtype=np.int16), np.ndarray[tuple[int], np.dtype[np.int16]]) +assert_type(np.arange(10, dtype=int), np.ndarray[tuple[int], np.dtype[np.int_]]) +assert_type(np.arange(0, 10, dtype="f8"), np.ndarray[tuple[int], np.dtype]) +# https://github.com/numpy/numpy/issues/30628 +assert_type(np.arange("2025-12-20", "2025-12-23", dtype="datetime64[D]"), np.ndarray[tuple[int], np.dtype[np.datetime64[Any]]]) + +assert_type(np.require(A), npt.NDArray[np.float64]) +assert_type(np.require(B), SubClass[np.float64]) +assert_type(np.require(B, requirements=None), SubClass[np.float64]) +assert_type(np.require(B, dtype=int), npt.NDArray[Any]) +assert_type(np.require(B, requirements="E"), npt.NDArray[Any]) +assert_type(np.require(B, requirements=["ENSUREARRAY"]), npt.NDArray[Any]) +assert_type(np.require(B, requirements={"F", "E"}), npt.NDArray[Any]) +assert_type(np.require(B, requirements=["C", "OWNDATA"]), SubClass[np.float64]) +assert_type(np.require(B, requirements="W"), SubClass[np.float64]) +assert_type(np.require(B, requirements="A"), SubClass[np.float64]) +assert_type(np.require(C), npt.NDArray[Any]) + +assert_type(np.linspace(0, 10), npt.NDArray[np.float64]) +assert_type(np.linspace(0, 10j), npt.NDArray[np.complexfloating]) +assert_type(np.linspace(0, 10, dtype=np.int64), npt.NDArray[np.int64]) +assert_type(np.linspace(0, 10, dtype=int), npt.NDArray[Any]) +assert_type(np.linspace(0, 10, retstep=True), tuple[npt.NDArray[np.float64], np.float64]) +assert_type(np.linspace(0j, 10, retstep=True), tuple[npt.NDArray[np.complexfloating], np.complexfloating]) +assert_type(np.linspace(0, 10, retstep=True, dtype=np.int64), tuple[npt.NDArray[np.int64], np.int64]) +assert_type(np.linspace(0j, 10, retstep=True, dtype=int), tuple[npt.NDArray[Any], Any]) + +assert_type(np.logspace(0, 10), npt.NDArray[np.float64]) +assert_type(np.logspace(0, 10j), npt.NDArray[np.complexfloating]) +assert_type(np.logspace(0, 10, dtype=np.int64), npt.NDArray[np.int64]) +assert_type(np.logspace(0, 10, dtype=int), npt.NDArray[Any]) + +assert_type(np.geomspace(0, 10), npt.NDArray[np.float64]) +assert_type(np.geomspace(0, 10j), npt.NDArray[np.complexfloating]) +assert_type(np.geomspace(0, 10, dtype=np.int64), npt.NDArray[np.int64]) +assert_type(np.geomspace(0, 10, dtype=int), npt.NDArray[Any]) + +assert_type(np.zeros_like(A), npt.NDArray[np.float64]) +assert_type(np.zeros_like(C), npt.NDArray[Any]) +assert_type(np.zeros_like(A, dtype=float), npt.NDArray[Any]) +assert_type(np.zeros_like(B), SubClass[np.float64]) +assert_type(np.zeros_like(B, dtype=np.int64), npt.NDArray[np.int64]) + +assert_type(np.ones_like(A), npt.NDArray[np.float64]) +assert_type(np.ones_like(C), npt.NDArray[Any]) +assert_type(np.ones_like(A, dtype=float), npt.NDArray[Any]) +assert_type(np.ones_like(B), SubClass[np.float64]) +assert_type(np.ones_like(B, dtype=np.int64), npt.NDArray[np.int64]) + +assert_type(np.full_like(A, i8), npt.NDArray[np.float64]) +assert_type(np.full_like(C, i8), npt.NDArray[Any]) +assert_type(np.full_like(A, i8, dtype=int), npt.NDArray[Any]) +assert_type(np.full_like(B, i8), SubClass[np.float64]) +assert_type(np.full_like(B, i8, dtype=np.int64), npt.NDArray[np.int64]) + +_size: int +_shape_0d: tuple[()] +_shape_1d: tuple[int] +_shape_2d: tuple[int, int] +_shape_nd: tuple[int, ...] +_shape_like: list[int] + +assert_type(np.ones(_shape_0d), np.ndarray[tuple[()], np.dtype[np.float64]]) +assert_type(np.ones(_size), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.ones(_shape_2d), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(np.ones(_shape_nd), np.ndarray[tuple[int, ...], np.dtype[np.float64]]) +assert_type(np.ones(_shape_1d, dtype=np.int64), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(np.ones(_shape_like), npt.NDArray[np.float64]) +assert_type( + np.ones(_shape_like, dtype=np.dtypes.Int64DType()), + np.ndarray[tuple[Any, ...], np.dtypes.Int64DType], +) +assert_type(np.ones(_shape_like, dtype=int), npt.NDArray[Any]) +assert_type(np.ones(mixed_shape), npt.NDArray[np.float64]) + +assert_type(np.full(_size, i8), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(np.full(_shape_2d, i8), np.ndarray[tuple[int, int], np.dtype[np.int64]]) +assert_type(np.full(_shape_like, i8), npt.NDArray[np.int64]) +assert_type(np.full(_shape_like, 42), npt.NDArray[Any]) +assert_type(np.full(_size, i8, dtype=np.float64), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.full(_size, i8, dtype=float), np.ndarray[tuple[int], np.dtype]) +assert_type(np.full(_shape_like, 42, dtype=float), npt.NDArray[Any]) +assert_type(np.full(_shape_0d, i8, dtype=object), np.ndarray[tuple[()], np.dtype]) + +assert_type(np.indices([1, 2, 3]), npt.NDArray[np.int_]) +assert_type(np.indices([1, 2, 3], sparse=True), tuple[npt.NDArray[np.int_], ...]) + +assert_type(np.fromfunction(func, (3, 5)), SubClass[np.float64]) + +assert_type(np.identity(3), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(np.identity(3, dtype=np.int8), np.ndarray[tuple[int, int], np.dtype[np.int8]]) +assert_type(np.identity(3, dtype=bool), np.ndarray[tuple[int, int], np.dtype[np.bool]]) +assert_type(np.identity(3, dtype="bool"), np.ndarray[tuple[int, int], np.dtype[np.bool]]) +assert_type(np.identity(3, dtype="b1"), np.ndarray[tuple[int, int], np.dtype[np.bool]]) +assert_type(np.identity(3, dtype="?"), np.ndarray[tuple[int, int], np.dtype[np.bool]]) +assert_type(np.identity(3, dtype=int), np.ndarray[tuple[int, int], np.dtype[np.int_ | Any]]) +assert_type(np.identity(3, dtype="int"), np.ndarray[tuple[int, int], np.dtype[np.int_ | Any]]) +assert_type(np.identity(3, dtype="n"), np.ndarray[tuple[int, int], np.dtype[np.int_ | Any]]) +assert_type(np.identity(3, dtype=float), np.ndarray[tuple[int, int], np.dtype[np.float64 | Any]]) +assert_type(np.identity(3, dtype="float"), np.ndarray[tuple[int, int], np.dtype[np.float64 | Any]]) +assert_type(np.identity(3, dtype="f8"), np.ndarray[tuple[int, int], np.dtype[np.float64 | Any]]) +assert_type(np.identity(3, dtype="d"), np.ndarray[tuple[int, int], np.dtype[np.float64 | Any]]) +assert_type(np.identity(3, dtype=complex), np.ndarray[tuple[int, int], np.dtype[np.complex128 | Any]]) +assert_type(np.identity(3, dtype="complex"), np.ndarray[tuple[int, int], np.dtype[np.complex128 | Any]]) +assert_type(np.identity(3, dtype="c16"), np.ndarray[tuple[int, int], np.dtype[np.complex128 | Any]]) +assert_type(np.identity(3, dtype="D"), np.ndarray[tuple[int, int], np.dtype[np.complex128 | Any]]) + +assert_type(np.atleast_1d(A), npt.NDArray[np.float64]) +assert_type(np.atleast_1d(C), npt.NDArray[Any]) +assert_type(np.atleast_1d(A, A), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]) +assert_type(np.atleast_1d(A, C), tuple[npt.NDArray[Any], npt.NDArray[Any]]) +assert_type(np.atleast_1d(C, C), tuple[npt.NDArray[Any], npt.NDArray[Any]]) +assert_type(np.atleast_1d(A, A, A), tuple[npt.NDArray[np.float64], ...]) +assert_type(np.atleast_1d(C, C, C), tuple[npt.NDArray[Any], ...]) + +assert_type(np.atleast_2d(A), npt.NDArray[np.float64]) +assert_type(np.atleast_2d(A, A), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]) +assert_type(np.atleast_2d(A, A, A), tuple[npt.NDArray[np.float64], ...]) + +assert_type(np.atleast_3d(A), npt.NDArray[np.float64]) +assert_type(np.atleast_3d(A, A), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]) +assert_type(np.atleast_3d(A, A, A), tuple[npt.NDArray[np.float64], ...]) + +assert_type(np.vstack([A, A]), npt.NDArray[np.float64]) +assert_type(np.vstack([A, A], dtype=np.float32), npt.NDArray[np.float32]) +assert_type(np.vstack([A, C]), npt.NDArray[Any]) +assert_type(np.vstack([C, C]), npt.NDArray[Any]) + +assert_type(np.hstack([A, A]), npt.NDArray[np.float64]) +assert_type(np.hstack([A, A], dtype=np.float32), npt.NDArray[np.float32]) + +assert_type(np.stack([A, A]), npt.NDArray[np.float64]) +assert_type(np.stack([A, A], dtype=np.float32), npt.NDArray[np.float32]) +assert_type(np.stack([A, C]), npt.NDArray[Any]) +assert_type(np.stack([C, C]), npt.NDArray[Any]) +assert_type(np.stack([A, A], axis=0), npt.NDArray[np.float64]) +assert_type(np.stack([A, A], out=B), SubClass[np.float64]) + +assert_type(np.block([[A, A], [A, A]]), npt.NDArray[Any]) # pyright correctly infers this as NDArray[float64] +assert_type(np.block(C), npt.NDArray[Any]) + +if sys.version_info >= (3, 12): + from collections.abc import Buffer + + def create_array(obj: npt.ArrayLike) -> npt.NDArray[Any]: ... + + buffer: Buffer + assert_type(create_array(buffer), npt.NDArray[Any]) diff --git a/local_packages/numpy/typing/tests/data/reveal/arraypad.pyi b/local_packages/numpy/typing/tests/data/reveal/arraypad.pyi new file mode 100644 index 0000000..3d53d91 --- /dev/null +++ b/local_packages/numpy/typing/tests/data/reveal/arraypad.pyi @@ -0,0 +1,27 @@ +from collections.abc import Mapping +from typing import Any, SupportsIndex, assert_type + +import numpy as np +import numpy.typing as npt + +def mode_func( + ar: npt.NDArray[np.number], + width: tuple[int, int], + iaxis: SupportsIndex, + kwargs: Mapping[str, Any], +) -> None: ... + +AR_i8: npt.NDArray[np.int64] +AR_f8: npt.NDArray[np.float64] +AR_LIKE: list[int] + +assert_type(np.pad(AR_i8, (2, 3), "constant"), npt.NDArray[np.int64]) +assert_type(np.pad(AR_LIKE, (2, 3), "constant"), npt.NDArray[Any]) + +assert_type(np.pad(AR_f8, (2, 3), mode_func), npt.NDArray[np.float64]) +assert_type(np.pad(AR_f8, (2, 3), mode_func, a=1, b=2), npt.NDArray[np.float64]) + +assert_type(np.pad(AR_i8, {-1: (2, 3)}), npt.NDArray[np.int64]) +assert_type(np.pad(AR_i8, {-2: 4}), npt.NDArray[np.int64]) +pad_width: dict[int, int | tuple[int, int]] = {-1: (2, 3), -2: 4} +assert_type(np.pad(AR_i8, pad_width), npt.NDArray[np.int64]) diff --git a/local_packages/numpy/typing/tests/data/reveal/arrayprint.pyi b/local_packages/numpy/typing/tests/data/reveal/arrayprint.pyi new file mode 100644 index 0000000..17e175e --- /dev/null +++ b/local_packages/numpy/typing/tests/data/reveal/arrayprint.pyi @@ -0,0 +1,25 @@ +import contextlib +from collections.abc import Callable +from typing import assert_type + +import numpy as np +import numpy.typing as npt +from numpy._core.arrayprint import _FormatOptions + +AR: npt.NDArray[np.int64] +func_float: Callable[[np.floating], str] +func_int: Callable[[np.integer], str] + +assert_type(np.get_printoptions(), _FormatOptions) +assert_type( + np.array2string(AR, formatter={"float_kind": func_float, "int_kind": func_int}), + str, +) +assert_type(np.format_float_scientific(1.0), str) +assert_type(np.format_float_positional(1), str) +assert_type(np.array_repr(AR), str) +assert_type(np.array_str(AR), str) + +assert_type(np.printoptions(), contextlib._GeneratorContextManager[_FormatOptions]) +with np.printoptions() as dct: + assert_type(dct, _FormatOptions) diff --git a/local_packages/numpy/typing/tests/data/reveal/arraysetops.pyi b/local_packages/numpy/typing/tests/data/reveal/arraysetops.pyi new file mode 100644 index 0000000..7e5ca5c --- /dev/null +++ b/local_packages/numpy/typing/tests/data/reveal/arraysetops.pyi @@ -0,0 +1,74 @@ +from typing import Any, assert_type + +import numpy as np +import numpy.typing as npt +from numpy.lib._arraysetops_impl import ( + UniqueAllResult, + UniqueCountsResult, + UniqueInverseResult, +) + +AR_b: npt.NDArray[np.bool] +AR_i8: npt.NDArray[np.int64] +AR_f8: npt.NDArray[np.float64] +AR_M: npt.NDArray[np.datetime64] +AR_O: npt.NDArray[np.object_] + +AR_LIKE_f8: list[float] + +assert_type(np.ediff1d(AR_b), npt.NDArray[np.int8]) +assert_type(np.ediff1d(AR_i8, to_end=[1, 2, 3]), npt.NDArray[np.int64]) +assert_type(np.ediff1d(AR_M), npt.NDArray[np.timedelta64]) +assert_type(np.ediff1d(AR_O), npt.NDArray[np.object_]) +assert_type(np.ediff1d(AR_LIKE_f8, to_begin=[1, 1.5]), npt.NDArray[Any]) + +assert_type(np.intersect1d(AR_i8, AR_i8), npt.NDArray[np.int64]) +assert_type(np.intersect1d(AR_M, AR_M, assume_unique=True), npt.NDArray[np.datetime64]) +assert_type(np.intersect1d(AR_f8, AR_i8), npt.NDArray[Any]) +assert_type( + np.intersect1d(AR_f8, AR_f8, return_indices=True), + tuple[npt.NDArray[np.float64], npt.NDArray[np.intp], npt.NDArray[np.intp]], +) + +assert_type(np.setxor1d(AR_i8, AR_i8), npt.NDArray[np.int64]) +assert_type(np.setxor1d(AR_M, AR_M, assume_unique=True), npt.NDArray[np.datetime64]) +assert_type(np.setxor1d(AR_f8, AR_i8), npt.NDArray[Any]) + +assert_type(np.isin(AR_i8, AR_i8), npt.NDArray[np.bool]) +assert_type(np.isin(AR_M, AR_M, assume_unique=True), npt.NDArray[np.bool]) +assert_type(np.isin(AR_f8, AR_i8), npt.NDArray[np.bool]) +assert_type(np.isin(AR_f8, AR_LIKE_f8, invert=True), npt.NDArray[np.bool]) + +assert_type(np.union1d(AR_i8, AR_i8), npt.NDArray[np.int64]) +assert_type(np.union1d(AR_M, AR_M), npt.NDArray[np.datetime64]) +assert_type(np.union1d(AR_f8, AR_i8), npt.NDArray[Any]) + +assert_type(np.setdiff1d(AR_i8, AR_i8), npt.NDArray[np.int64]) +assert_type(np.setdiff1d(AR_M, AR_M, assume_unique=True), npt.NDArray[np.datetime64]) +assert_type(np.setdiff1d(AR_f8, AR_i8), npt.NDArray[Any]) + +assert_type(np.unique(AR_f8), npt.NDArray[np.float64]) +assert_type(np.unique(AR_LIKE_f8, axis=0), npt.NDArray[Any]) +assert_type(np.unique(AR_f8, return_index=True), tuple[npt.NDArray[np.float64], npt.NDArray[np.intp]]) +assert_type(np.unique(AR_LIKE_f8, return_index=True), tuple[npt.NDArray[Any], npt.NDArray[np.intp]]) +assert_type(np.unique(AR_f8, return_inverse=True), tuple[npt.NDArray[np.float64], npt.NDArray[np.intp]]) +assert_type(np.unique(AR_LIKE_f8, return_inverse=True), tuple[npt.NDArray[Any], npt.NDArray[np.intp]]) +assert_type(np.unique(AR_f8, return_counts=True), tuple[npt.NDArray[np.float64], npt.NDArray[np.intp]]) +assert_type(np.unique(AR_LIKE_f8, return_counts=True), tuple[npt.NDArray[Any], npt.NDArray[np.intp]]) +assert_type(np.unique(AR_f8, return_index=True, return_inverse=True), tuple[npt.NDArray[np.float64], npt.NDArray[np.intp], npt.NDArray[np.intp]]) +assert_type(np.unique(AR_LIKE_f8, return_index=True, return_inverse=True), tuple[npt.NDArray[Any], npt.NDArray[np.intp], npt.NDArray[np.intp]]) +assert_type(np.unique(AR_f8, return_index=True, return_counts=True), tuple[npt.NDArray[np.float64], npt.NDArray[np.intp], npt.NDArray[np.intp]]) +assert_type(np.unique(AR_LIKE_f8, return_index=True, return_counts=True), tuple[npt.NDArray[Any], npt.NDArray[np.intp], npt.NDArray[np.intp]]) +assert_type(np.unique(AR_f8, return_inverse=True, return_counts=True), tuple[npt.NDArray[np.float64], npt.NDArray[np.intp], npt.NDArray[np.intp]]) +assert_type(np.unique(AR_LIKE_f8, return_inverse=True, return_counts=True), tuple[npt.NDArray[Any], npt.NDArray[np.intp], npt.NDArray[np.intp]]) +assert_type(np.unique(AR_f8, return_index=True, return_inverse=True, return_counts=True), tuple[npt.NDArray[np.float64], npt.NDArray[np.intp], npt.NDArray[np.intp], npt.NDArray[np.intp]]) +assert_type(np.unique(AR_LIKE_f8, return_index=True, return_inverse=True, return_counts=True), tuple[npt.NDArray[Any], npt.NDArray[np.intp], npt.NDArray[np.intp], npt.NDArray[np.intp]]) + +assert_type(np.unique_all(AR_f8), UniqueAllResult[np.float64]) +assert_type(np.unique_all(AR_LIKE_f8), UniqueAllResult[Any]) +assert_type(np.unique_counts(AR_f8), UniqueCountsResult[np.float64]) +assert_type(np.unique_counts(AR_LIKE_f8), UniqueCountsResult[Any]) +assert_type(np.unique_inverse(AR_f8), UniqueInverseResult[np.float64]) +assert_type(np.unique_inverse(AR_LIKE_f8), UniqueInverseResult[Any]) +assert_type(np.unique_values(AR_f8), npt.NDArray[np.float64]) +assert_type(np.unique_values(AR_LIKE_f8), npt.NDArray[Any]) diff --git a/local_packages/numpy/typing/tests/data/reveal/arrayterator.pyi b/local_packages/numpy/typing/tests/data/reveal/arrayterator.pyi new file mode 100644 index 0000000..85eeff4 --- /dev/null +++ b/local_packages/numpy/typing/tests/data/reveal/arrayterator.pyi @@ -0,0 +1,27 @@ +from collections.abc import Generator +from typing import Any, assert_type + +import numpy as np +import numpy.typing as npt + +AR_i8: npt.NDArray[np.int64] +ar_iter = np.lib.Arrayterator(AR_i8) + +assert_type(ar_iter.var, npt.NDArray[np.int64]) +assert_type(ar_iter.buf_size, int | None) +assert_type(ar_iter.start, list[int]) +assert_type(ar_iter.stop, list[int]) +assert_type(ar_iter.step, list[int]) +assert_type(ar_iter.shape, tuple[Any, ...]) +assert_type(ar_iter.flat, Generator[np.int64]) + +assert_type(ar_iter.__array__(), npt.NDArray[np.int64]) + +for i in ar_iter: + assert_type(i, npt.NDArray[np.int64]) + +assert_type(ar_iter[0], np.lib.Arrayterator[tuple[Any, ...], np.dtype[np.int64]]) +assert_type(ar_iter[...], np.lib.Arrayterator[tuple[Any, ...], np.dtype[np.int64]]) +assert_type(ar_iter[:], np.lib.Arrayterator[tuple[Any, ...], np.dtype[np.int64]]) +assert_type(ar_iter[0, 0, 0], np.lib.Arrayterator[tuple[Any, ...], np.dtype[np.int64]]) +assert_type(ar_iter[..., 0, :], np.lib.Arrayterator[tuple[Any, ...], np.dtype[np.int64]]) diff --git a/local_packages/numpy/typing/tests/data/reveal/bitwise_ops.pyi b/local_packages/numpy/typing/tests/data/reveal/bitwise_ops.pyi new file mode 100644 index 0000000..49986bd --- /dev/null +++ b/local_packages/numpy/typing/tests/data/reveal/bitwise_ops.pyi @@ -0,0 +1,166 @@ +from typing import Literal as L, TypeAlias, assert_type + +import numpy as np +import numpy.typing as npt + +FalseType: TypeAlias = L[False] +TrueType: TypeAlias = L[True] + +i4: np.int32 +i8: np.int64 + +u4: np.uint32 +u8: np.uint64 + +b_: np.bool[bool] +b0_: np.bool[FalseType] +b1_: np.bool[TrueType] + +b: bool +b0: FalseType +b1: TrueType + +i: int + +AR: npt.NDArray[np.int32] + +assert_type(i8 << i8, np.int64) +assert_type(i8 >> i8, np.int64) +assert_type(i8 | i8, np.int64) +assert_type(i8 ^ i8, np.int64) +assert_type(i8 & i8, np.int64) + +assert_type(i8 << AR, npt.NDArray[np.signedinteger]) +assert_type(i8 >> AR, npt.NDArray[np.signedinteger]) +assert_type(i8 | AR, npt.NDArray[np.signedinteger]) +assert_type(i8 ^ AR, npt.NDArray[np.signedinteger]) +assert_type(i8 & AR, npt.NDArray[np.signedinteger]) + +assert_type(i4 << i4, np.int32) +assert_type(i4 >> i4, np.int32) +assert_type(i4 | i4, np.int32) +assert_type(i4 ^ i4, np.int32) +assert_type(i4 & i4, np.int32) + +assert_type(i8 << i4, np.signedinteger) +assert_type(i8 >> i4, np.signedinteger) +assert_type(i8 | i4, np.signedinteger) +assert_type(i8 ^ i4, np.signedinteger) +assert_type(i8 & i4, np.signedinteger) + +assert_type(i8 << b_, np.int64) +assert_type(i8 >> b_, np.int64) +assert_type(i8 | b_, np.int64) +assert_type(i8 ^ b_, np.int64) +assert_type(i8 & b_, np.int64) + +assert_type(i8 << b, np.int64) +assert_type(i8 >> b, np.int64) +assert_type(i8 | b, np.int64) +assert_type(i8 ^ b, np.int64) +assert_type(i8 & b, np.int64) + +assert_type(u8 << u8, np.uint64) +assert_type(u8 >> u8, np.uint64) +assert_type(u8 | u8, np.uint64) +assert_type(u8 ^ u8, np.uint64) +assert_type(u8 & u8, np.uint64) + +assert_type(u8 << AR, npt.NDArray[np.signedinteger]) +assert_type(u8 >> AR, npt.NDArray[np.signedinteger]) +assert_type(u8 | AR, npt.NDArray[np.signedinteger]) +assert_type(u8 ^ AR, npt.NDArray[np.signedinteger]) +assert_type(u8 & AR, npt.NDArray[np.signedinteger]) + +assert_type(u4 << u4, np.uint32) +assert_type(u4 >> u4, np.uint32) +assert_type(u4 | u4, np.uint32) +assert_type(u4 ^ u4, np.uint32) +assert_type(u4 & u4, np.uint32) + +assert_type(u4 << i4, np.signedinteger) +assert_type(u4 >> i4, np.signedinteger) +assert_type(u4 | i4, np.signedinteger) +assert_type(u4 ^ i4, np.signedinteger) +assert_type(u4 & i4, np.signedinteger) + +assert_type(u4 << i, np.uint32) +assert_type(u4 >> i, np.uint32) +assert_type(u4 | i, np.uint32) +assert_type(u4 ^ i, np.uint32) +assert_type(u4 & i, np.uint32) + +assert_type(u8 << b_, np.uint64) +assert_type(u8 >> b_, np.uint64) +assert_type(u8 | b_, np.uint64) +assert_type(u8 ^ b_, np.uint64) +assert_type(u8 & b_, np.uint64) + +assert_type(u8 << b, np.uint64) +assert_type(u8 >> b, np.uint64) +assert_type(u8 | b, np.uint64) +assert_type(u8 ^ b, np.uint64) +assert_type(u8 & b, np.uint64) + +assert_type(b_ << b_, np.int8) +assert_type(b_ >> b_, np.int8) +assert_type(b_ | b_, np.bool) +assert_type(b_ ^ b_, np.bool) +assert_type(b_ & b_, np.bool) + +assert_type(b_ << AR, npt.NDArray[np.signedinteger]) +assert_type(b_ >> AR, npt.NDArray[np.signedinteger]) +assert_type(b_ | AR, npt.NDArray[np.signedinteger]) +assert_type(b_ ^ AR, npt.NDArray[np.signedinteger]) +assert_type(b_ & AR, npt.NDArray[np.signedinteger]) + +assert_type(b_ << b, np.int8) +assert_type(b_ >> b, np.int8) +assert_type(b_ | b, np.bool) +assert_type(b_ ^ b, np.bool) +assert_type(b_ & b, np.bool) + +assert_type(b_ << i, np.int_) +assert_type(b_ >> i, np.int_) +assert_type(b_ | i, np.bool | np.int_) +assert_type(b_ ^ i, np.bool | np.int_) +assert_type(b_ & i, np.bool | np.int_) + +assert_type(~i8, np.int64) +assert_type(~i4, np.int32) +assert_type(~u8, np.uint64) +assert_type(~u4, np.uint32) +assert_type(~b_, np.bool) +assert_type(~b0_, np.bool[TrueType]) +assert_type(~b1_, np.bool[FalseType]) +assert_type(~AR, npt.NDArray[np.int32]) + +assert_type(b_ | b0_, np.bool) +assert_type(b0_ | b_, np.bool) +assert_type(b_ | b1_, np.bool[TrueType]) +assert_type(b1_ | b_, np.bool[TrueType]) + +assert_type(b_ ^ b0_, np.bool) +assert_type(b0_ ^ b_, np.bool) +assert_type(b_ ^ b1_, np.bool) +assert_type(b1_ ^ b_, np.bool) + +assert_type(b_ & b0_, np.bool[FalseType]) +assert_type(b0_ & b_, np.bool[FalseType]) +assert_type(b_ & b1_, np.bool) +assert_type(b1_ & b_, np.bool) + +assert_type(b0_ | b0_, np.bool[FalseType]) +assert_type(b0_ | b1_, np.bool[TrueType]) +assert_type(b1_ | b0_, np.bool[TrueType]) +assert_type(b1_ | b1_, np.bool[TrueType]) + +assert_type(b0_ ^ b0_, np.bool[FalseType]) +assert_type(b0_ ^ b1_, np.bool[TrueType]) +assert_type(b1_ ^ b0_, np.bool[TrueType]) +assert_type(b1_ ^ b1_, np.bool[FalseType]) + +assert_type(b0_ & b0_, np.bool[FalseType]) +assert_type(b0_ & b1_, np.bool[FalseType]) +assert_type(b1_ & b0_, np.bool[FalseType]) +assert_type(b1_ & b1_, np.bool[TrueType]) diff --git a/local_packages/numpy/typing/tests/data/reveal/char.pyi b/local_packages/numpy/typing/tests/data/reveal/char.pyi new file mode 100644 index 0000000..2fba2fe --- /dev/null +++ b/local_packages/numpy/typing/tests/data/reveal/char.pyi @@ -0,0 +1,225 @@ +from typing import TypeAlias, assert_type + +import numpy as np +import numpy._typing as np_t +import numpy.typing as npt + +AR_T_alias: TypeAlias = np.ndarray[np_t._AnyShape, np.dtypes.StringDType] +AR_TU_alias: TypeAlias = AR_T_alias | npt.NDArray[np.str_] + +AR_U: npt.NDArray[np.str_] +AR_S: npt.NDArray[np.bytes_] +AR_T: AR_T_alias + +assert_type(np.char.equal(AR_U, AR_U), npt.NDArray[np.bool]) +assert_type(np.char.equal(AR_S, AR_S), npt.NDArray[np.bool]) +assert_type(np.char.equal(AR_T, AR_T), npt.NDArray[np.bool]) + +assert_type(np.char.not_equal(AR_U, AR_U), npt.NDArray[np.bool]) +assert_type(np.char.not_equal(AR_S, AR_S), npt.NDArray[np.bool]) +assert_type(np.char.not_equal(AR_T, AR_T), npt.NDArray[np.bool]) + +assert_type(np.char.greater_equal(AR_U, AR_U), npt.NDArray[np.bool]) +assert_type(np.char.greater_equal(AR_S, AR_S), npt.NDArray[np.bool]) +assert_type(np.char.greater_equal(AR_T, AR_T), npt.NDArray[np.bool]) + +assert_type(np.char.less_equal(AR_U, AR_U), npt.NDArray[np.bool]) +assert_type(np.char.less_equal(AR_S, AR_S), npt.NDArray[np.bool]) +assert_type(np.char.less_equal(AR_T, AR_T), npt.NDArray[np.bool]) + +assert_type(np.char.greater(AR_U, AR_U), npt.NDArray[np.bool]) +assert_type(np.char.greater(AR_S, AR_S), npt.NDArray[np.bool]) +assert_type(np.char.greater(AR_T, AR_T), npt.NDArray[np.bool]) + +assert_type(np.char.less(AR_U, AR_U), npt.NDArray[np.bool]) +assert_type(np.char.less(AR_S, AR_S), npt.NDArray[np.bool]) +assert_type(np.char.less(AR_T, AR_T), npt.NDArray[np.bool]) + +assert_type(np.char.multiply(AR_U, 5), npt.NDArray[np.str_]) +assert_type(np.char.multiply(AR_S, [5, 4, 3]), npt.NDArray[np.bytes_]) +assert_type(np.char.multiply(AR_T, 5), AR_T_alias) + +assert_type(np.char.mod(AR_U, "test"), npt.NDArray[np.str_]) +assert_type(np.char.mod(AR_S, "test"), npt.NDArray[np.bytes_]) +assert_type(np.char.mod(AR_T, "test"), AR_T_alias) + +assert_type(np.char.capitalize(AR_U), npt.NDArray[np.str_]) +assert_type(np.char.capitalize(AR_S), npt.NDArray[np.bytes_]) +assert_type(np.char.capitalize(AR_T), AR_T_alias) + +assert_type(np.char.center(AR_U, 5), npt.NDArray[np.str_]) +assert_type(np.char.center(AR_S, [2, 3, 4], b"a"), npt.NDArray[np.bytes_]) +assert_type(np.char.center(AR_T, 5), AR_T_alias) + +assert_type(np.char.encode(AR_U), npt.NDArray[np.bytes_]) +assert_type(np.char.encode(AR_T), npt.NDArray[np.bytes_]) +assert_type(np.char.decode(AR_S), npt.NDArray[np.str_]) + +assert_type(np.char.expandtabs(AR_U), npt.NDArray[np.str_]) +assert_type(np.char.expandtabs(AR_S, tabsize=4), npt.NDArray[np.bytes_]) +assert_type(np.char.expandtabs(AR_T), AR_T_alias) + +assert_type(np.char.join(AR_U, "_"), npt.NDArray[np.str_]) +assert_type(np.char.join(AR_S, [b"_", b""]), npt.NDArray[np.bytes_]) +assert_type(np.char.join(AR_T, "_"), AR_TU_alias) + +assert_type(np.char.ljust(AR_U, 5), npt.NDArray[np.str_]) +assert_type(np.char.ljust(AR_S, [4, 3, 1], fillchar=[b"a", b"b", b"c"]), npt.NDArray[np.bytes_]) +assert_type(np.char.ljust(AR_S, [4, 3, 1], fillchar="a"), npt.NDArray[np.bytes_]) +assert_type(np.char.ljust(AR_T, 5), AR_T_alias) +assert_type(np.char.ljust(AR_T, [4, 2, 1], fillchar=["a", "b", "c"]), AR_TU_alias) + +assert_type(np.char.rjust(AR_U, 5), npt.NDArray[np.str_]) +assert_type(np.char.rjust(AR_S, [4, 3, 1], fillchar=[b"a", b"b", b"c"]), npt.NDArray[np.bytes_]) +assert_type(np.char.rjust(AR_T, 5), AR_T_alias) +assert_type(np.char.rjust(AR_T, [4, 2, 1], fillchar=["a", "b", "c"]), AR_TU_alias) + +assert_type(np.char.lstrip(AR_U), npt.NDArray[np.str_]) +assert_type(np.char.lstrip(AR_S, b"_"), npt.NDArray[np.bytes_]) +assert_type(np.char.lstrip(AR_T), AR_T_alias) +assert_type(np.char.lstrip(AR_T, "_"), AR_TU_alias) + +assert_type(np.char.rstrip(AR_U), npt.NDArray[np.str_]) +assert_type(np.char.rstrip(AR_S, b"_"), npt.NDArray[np.bytes_]) +assert_type(np.char.rstrip(AR_T), AR_T_alias) +assert_type(np.char.rstrip(AR_T, "_"), AR_TU_alias) + +assert_type(np.char.strip(AR_U), npt.NDArray[np.str_]) +assert_type(np.char.strip(AR_S, b"_"), npt.NDArray[np.bytes_]) +assert_type(np.char.strip(AR_T), AR_T_alias) +assert_type(np.char.strip(AR_T, "_"), AR_TU_alias) + +assert_type(np.char.count(AR_U, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) +assert_type(np.char.count(AR_S, [b"a", b"b", b"c"], end=9), npt.NDArray[np.int_]) +assert_type(np.char.count(AR_T, AR_T, start=[1, 2, 3]), npt.NDArray[np.int_]) +assert_type(np.char.count(AR_T, ["a", "b", "c"], end=9), npt.NDArray[np.int_]) + +assert_type(np.char.partition(AR_U, "\n"), npt.NDArray[np.str_]) +assert_type(np.char.partition(AR_S, [b"a", b"b", b"c"]), npt.NDArray[np.bytes_]) +assert_type(np.char.partition(AR_T, "\n"), AR_TU_alias) + +assert_type(np.char.rpartition(AR_U, "\n"), npt.NDArray[np.str_]) +assert_type(np.char.rpartition(AR_S, [b"a", b"b", b"c"]), npt.NDArray[np.bytes_]) +assert_type(np.char.rpartition(AR_T, "\n"), AR_TU_alias) + +assert_type(np.char.replace(AR_U, "_", "-"), npt.NDArray[np.str_]) +assert_type(np.char.replace(AR_S, [b"_", b""], [b"a", b"b"]), npt.NDArray[np.bytes_]) +assert_type(np.char.replace(AR_T, "_", "_"), AR_TU_alias) + +assert_type(np.char.split(AR_U, "_"), npt.NDArray[np.object_]) +assert_type(np.char.split(AR_S, maxsplit=[1, 2, 3]), npt.NDArray[np.object_]) +assert_type(np.char.split(AR_T, "_"), npt.NDArray[np.object_]) + +assert_type(np.char.rsplit(AR_U, "_"), npt.NDArray[np.object_]) +assert_type(np.char.rsplit(AR_S, maxsplit=[1, 2, 3]), npt.NDArray[np.object_]) +assert_type(np.char.rsplit(AR_T, "_"), npt.NDArray[np.object_]) + +assert_type(np.char.splitlines(AR_U), npt.NDArray[np.object_]) +assert_type(np.char.splitlines(AR_S, keepends=[True, True, False]), npt.NDArray[np.object_]) +assert_type(np.char.splitlines(AR_T), npt.NDArray[np.object_]) + +assert_type(np.char.lower(AR_U), npt.NDArray[np.str_]) +assert_type(np.char.lower(AR_S), npt.NDArray[np.bytes_]) +assert_type(np.char.lower(AR_T), AR_T_alias) + +assert_type(np.char.upper(AR_U), npt.NDArray[np.str_]) +assert_type(np.char.upper(AR_S), npt.NDArray[np.bytes_]) +assert_type(np.char.upper(AR_T), AR_T_alias) + +assert_type(np.char.swapcase(AR_U), npt.NDArray[np.str_]) +assert_type(np.char.swapcase(AR_S), npt.NDArray[np.bytes_]) +assert_type(np.char.swapcase(AR_T), AR_T_alias) + +assert_type(np.char.title(AR_U), npt.NDArray[np.str_]) +assert_type(np.char.title(AR_S), npt.NDArray[np.bytes_]) +assert_type(np.char.title(AR_T), AR_T_alias) + +assert_type(np.char.zfill(AR_U, 5), npt.NDArray[np.str_]) +assert_type(np.char.zfill(AR_S, [2, 3, 4]), npt.NDArray[np.bytes_]) +assert_type(np.char.zfill(AR_T, 5), AR_T_alias) + +assert_type(np.char.endswith(AR_U, "a", start=[1, 2, 3]), npt.NDArray[np.bool]) +assert_type(np.char.endswith(AR_S, [b"a", b"b", b"c"], end=9), npt.NDArray[np.bool]) +assert_type(np.char.endswith(AR_T, "a", start=[1, 2, 3]), npt.NDArray[np.bool]) + +assert_type(np.char.startswith(AR_U, "a", start=[1, 2, 3]), npt.NDArray[np.bool]) +assert_type(np.char.startswith(AR_S, [b"a", b"b", b"c"], end=9), npt.NDArray[np.bool]) +assert_type(np.char.startswith(AR_T, "a", start=[1, 2, 3]), npt.NDArray[np.bool]) + +assert_type(np.char.find(AR_U, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) +assert_type(np.char.find(AR_S, [b"a", b"b", b"c"], end=9), npt.NDArray[np.int_]) +assert_type(np.char.find(AR_T, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) + +assert_type(np.char.rfind(AR_U, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) +assert_type(np.char.rfind(AR_S, [b"a", b"b", b"c"], end=9), npt.NDArray[np.int_]) +assert_type(np.char.rfind(AR_T, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) + +assert_type(np.char.index(AR_U, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) +assert_type(np.char.index(AR_S, [b"a", b"b", b"c"], end=9), npt.NDArray[np.int_]) +assert_type(np.char.index(AR_T, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) + +assert_type(np.char.rindex(AR_U, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) +assert_type(np.char.rindex(AR_S, [b"a", b"b", b"c"], end=9), npt.NDArray[np.int_]) +assert_type(np.char.rindex(AR_T, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) + +assert_type(np.char.isalpha(AR_U), npt.NDArray[np.bool]) +assert_type(np.char.isalpha(AR_S), npt.NDArray[np.bool]) +assert_type(np.char.isalpha(AR_T), npt.NDArray[np.bool]) + +assert_type(np.char.isalnum(AR_U), npt.NDArray[np.bool]) +assert_type(np.char.isalnum(AR_S), npt.NDArray[np.bool]) +assert_type(np.char.isalnum(AR_T), npt.NDArray[np.bool]) + +assert_type(np.char.isdecimal(AR_U), npt.NDArray[np.bool]) +assert_type(np.char.isdecimal(AR_T), npt.NDArray[np.bool]) + +assert_type(np.char.isdigit(AR_U), npt.NDArray[np.bool]) +assert_type(np.char.isdigit(AR_S), npt.NDArray[np.bool]) +assert_type(np.char.isdigit(AR_T), npt.NDArray[np.bool]) + +assert_type(np.char.islower(AR_U), npt.NDArray[np.bool]) +assert_type(np.char.islower(AR_S), npt.NDArray[np.bool]) +assert_type(np.char.islower(AR_T), npt.NDArray[np.bool]) + +assert_type(np.char.isnumeric(AR_U), npt.NDArray[np.bool]) +assert_type(np.char.isnumeric(AR_T), npt.NDArray[np.bool]) + +assert_type(np.char.isspace(AR_U), npt.NDArray[np.bool]) +assert_type(np.char.isspace(AR_S), npt.NDArray[np.bool]) +assert_type(np.char.isspace(AR_T), npt.NDArray[np.bool]) + +assert_type(np.char.istitle(AR_U), npt.NDArray[np.bool]) +assert_type(np.char.istitle(AR_S), npt.NDArray[np.bool]) +assert_type(np.char.istitle(AR_T), npt.NDArray[np.bool]) + +assert_type(np.char.isupper(AR_U), npt.NDArray[np.bool]) +assert_type(np.char.isupper(AR_S), npt.NDArray[np.bool]) +assert_type(np.char.isupper(AR_T), npt.NDArray[np.bool]) + +assert_type(np.char.str_len(AR_U), npt.NDArray[np.int_]) +assert_type(np.char.str_len(AR_S), npt.NDArray[np.int_]) +assert_type(np.char.str_len(AR_T), npt.NDArray[np.int_]) + +assert_type(np.char.translate(AR_U, ""), npt.NDArray[np.str_]) +assert_type(np.char.translate(AR_S, ""), npt.NDArray[np.bytes_]) +assert_type(np.char.translate(AR_T, ""), AR_T_alias) + +assert_type(np.char.array(AR_U), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]]) +assert_type(np.char.array(AR_S, order="K"), np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) +assert_type(np.char.array("bob", copy=True), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]]) +assert_type(np.char.array(b"bob", itemsize=5), np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) +assert_type(np.char.array(1, unicode=False), np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) +assert_type(np.char.array(1, unicode=True), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]]) +assert_type(np.char.array(1), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]] | np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) +assert_type(np.char.array(AR_U, unicode=False), np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) +assert_type(np.char.array(AR_S, unicode=True), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]]) + +assert_type(np.char.asarray(AR_U), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]]) +assert_type(np.char.asarray(AR_S, order="K"), np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) +assert_type(np.char.asarray("bob"), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]]) +assert_type(np.char.asarray(b"bob", itemsize=5), np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) +assert_type(np.char.asarray(1, unicode=False), np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) +assert_type(np.char.asarray(1, unicode=True), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]]) +assert_type(np.char.asarray(1), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]] | np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) +assert_type(np.char.asarray(AR_U, unicode=False), np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) +assert_type(np.char.asarray(AR_S, unicode=True), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]]) diff --git a/local_packages/numpy/typing/tests/data/reveal/chararray.pyi b/local_packages/numpy/typing/tests/data/reveal/chararray.pyi new file mode 100644 index 0000000..5c3dc85 --- /dev/null +++ b/local_packages/numpy/typing/tests/data/reveal/chararray.pyi @@ -0,0 +1,138 @@ +from typing import Any, TypeAlias, assert_type + +import numpy as np +import numpy.typing as npt + +_BytesCharArray: TypeAlias = np.char.chararray[tuple[Any, ...], np.dtype[np.bytes_]] +_StrCharArray: TypeAlias = np.char.chararray[tuple[Any, ...], np.dtype[np.str_]] + +AR_U: _StrCharArray +AR_S: _BytesCharArray + +assert_type(AR_U == AR_U, npt.NDArray[np.bool]) +assert_type(AR_S == AR_S, npt.NDArray[np.bool]) + +assert_type(AR_U != AR_U, npt.NDArray[np.bool]) +assert_type(AR_S != AR_S, npt.NDArray[np.bool]) + +assert_type(AR_U >= AR_U, npt.NDArray[np.bool]) +assert_type(AR_S >= AR_S, npt.NDArray[np.bool]) + +assert_type(AR_U <= AR_U, npt.NDArray[np.bool]) +assert_type(AR_S <= AR_S, npt.NDArray[np.bool]) + +assert_type(AR_U > AR_U, npt.NDArray[np.bool]) +assert_type(AR_S > AR_S, npt.NDArray[np.bool]) + +assert_type(AR_U < AR_U, npt.NDArray[np.bool]) +assert_type(AR_S < AR_S, npt.NDArray[np.bool]) + +assert_type(AR_U * 5, _StrCharArray) +assert_type(AR_S * [5], _BytesCharArray) + +assert_type(AR_U % "test", _StrCharArray) +assert_type(AR_S % b"test", _BytesCharArray) + +assert_type(AR_U.capitalize(), _StrCharArray) +assert_type(AR_S.capitalize(), _BytesCharArray) + +assert_type(AR_U.center(5), _StrCharArray) +assert_type(AR_S.center([2, 3, 4], b"a"), _BytesCharArray) + +assert_type(AR_U.encode(), _BytesCharArray) +assert_type(AR_S.decode(), _StrCharArray) + +assert_type(AR_U.expandtabs(), _StrCharArray) +assert_type(AR_S.expandtabs(tabsize=4), _BytesCharArray) + +assert_type(AR_U.join("_"), _StrCharArray) +assert_type(AR_S.join([b"_", b""]), _BytesCharArray) + +assert_type(AR_U.ljust(5), _StrCharArray) +assert_type(AR_S.ljust([4, 3, 1], fillchar=[b"a", b"b", b"c"]), _BytesCharArray) +assert_type(AR_S.ljust([4, 3, 1], fillchar="a"), _BytesCharArray) +assert_type(AR_U.rjust(5), _StrCharArray) +assert_type(AR_S.rjust([4, 3, 1], fillchar=[b"a", b"b", b"c"]), _BytesCharArray) + +assert_type(AR_U.lstrip(), _StrCharArray) +assert_type(AR_S.lstrip(chars=b"_"), _BytesCharArray) +assert_type(AR_U.rstrip(), _StrCharArray) +assert_type(AR_S.rstrip(chars=b"_"), _BytesCharArray) +assert_type(AR_U.strip(), _StrCharArray) +assert_type(AR_S.strip(chars=b"_"), _BytesCharArray) + +assert_type(AR_U.partition("\n"), _StrCharArray) +assert_type(AR_S.partition([b"a", b"b", b"c"]), _BytesCharArray) +assert_type(AR_U.rpartition("\n"), _StrCharArray) +assert_type(AR_S.rpartition([b"a", b"b", b"c"]), _BytesCharArray) + +assert_type(AR_U.replace("_", "-"), _StrCharArray) +assert_type(AR_S.replace([b"_", b""], [b"a", b"b"]), _BytesCharArray) + +assert_type(AR_U.split("_"), npt.NDArray[np.object_]) +assert_type(AR_S.split(maxsplit=[1, 2, 3]), npt.NDArray[np.object_]) +assert_type(AR_U.rsplit("_"), npt.NDArray[np.object_]) +assert_type(AR_S.rsplit(maxsplit=[1, 2, 3]), npt.NDArray[np.object_]) + +assert_type(AR_U.splitlines(), npt.NDArray[np.object_]) +assert_type(AR_S.splitlines(keepends=[True, True, False]), npt.NDArray[np.object_]) + +assert_type(AR_U.swapcase(), _StrCharArray) +assert_type(AR_S.swapcase(), _BytesCharArray) + +assert_type(AR_U.title(), _StrCharArray) +assert_type(AR_S.title(), _BytesCharArray) + +assert_type(AR_U.upper(), _StrCharArray) +assert_type(AR_S.upper(), _BytesCharArray) + +assert_type(AR_U.zfill(5), _StrCharArray) +assert_type(AR_S.zfill([2, 3, 4]), _BytesCharArray) + +assert_type(AR_U.count("a", start=[1, 2, 3]), npt.NDArray[np.int_]) +assert_type(AR_S.count([b"a", b"b", b"c"], end=9), npt.NDArray[np.int_]) + +assert_type(AR_U.endswith("a", start=[1, 2, 3]), npt.NDArray[np.bool]) +assert_type(AR_S.endswith([b"a", b"b", b"c"], end=9), npt.NDArray[np.bool]) +assert_type(AR_U.startswith("a", start=[1, 2, 3]), npt.NDArray[np.bool]) +assert_type(AR_S.startswith([b"a", b"b", b"c"], end=9), npt.NDArray[np.bool]) + +assert_type(AR_U.find("a", start=[1, 2, 3]), npt.NDArray[np.int_]) +assert_type(AR_S.find([b"a", b"b", b"c"], end=9), npt.NDArray[np.int_]) +assert_type(AR_U.rfind("a", start=[1, 2, 3]), npt.NDArray[np.int_]) +assert_type(AR_S.rfind([b"a", b"b", b"c"], end=9), npt.NDArray[np.int_]) + +assert_type(AR_U.index("a", start=[1, 2, 3]), npt.NDArray[np.int_]) +assert_type(AR_S.index([b"a", b"b", b"c"], end=9), npt.NDArray[np.int_]) +assert_type(AR_U.rindex("a", start=[1, 2, 3]), npt.NDArray[np.int_]) +assert_type(AR_S.rindex([b"a", b"b", b"c"], end=9), npt.NDArray[np.int_]) + +assert_type(AR_U.isalpha(), npt.NDArray[np.bool]) +assert_type(AR_S.isalpha(), npt.NDArray[np.bool]) + +assert_type(AR_U.isalnum(), npt.NDArray[np.bool]) +assert_type(AR_S.isalnum(), npt.NDArray[np.bool]) + +assert_type(AR_U.isdecimal(), npt.NDArray[np.bool]) +assert_type(AR_S.isdecimal(), npt.NDArray[np.bool]) + +assert_type(AR_U.isdigit(), npt.NDArray[np.bool]) +assert_type(AR_S.isdigit(), npt.NDArray[np.bool]) + +assert_type(AR_U.islower(), npt.NDArray[np.bool]) +assert_type(AR_S.islower(), npt.NDArray[np.bool]) + +assert_type(AR_U.isnumeric(), npt.NDArray[np.bool]) +assert_type(AR_S.isnumeric(), npt.NDArray[np.bool]) + +assert_type(AR_U.isspace(), npt.NDArray[np.bool]) +assert_type(AR_S.isspace(), npt.NDArray[np.bool]) + +assert_type(AR_U.istitle(), npt.NDArray[np.bool]) +assert_type(AR_S.istitle(), npt.NDArray[np.bool]) + +assert_type(AR_U.isupper(), npt.NDArray[np.bool]) +assert_type(AR_S.isupper(), npt.NDArray[np.bool]) + +assert_type(AR_U.__array_finalize__(object()), None) +assert_type(AR_S.__array_finalize__(object()), None) diff --git a/local_packages/numpy/typing/tests/data/reveal/comparisons.pyi b/local_packages/numpy/typing/tests/data/reveal/comparisons.pyi new file mode 100644 index 0000000..6df5a3d --- /dev/null +++ b/local_packages/numpy/typing/tests/data/reveal/comparisons.pyi @@ -0,0 +1,264 @@ +import decimal +import fractions +from typing import assert_type + +import numpy as np +import numpy.typing as npt + +c16 = np.complex128() +f8 = np.float64() +i8 = np.int64() +u8 = np.uint64() + +c8 = np.complex64() +f4 = np.float32() +i4 = np.int32() +u4 = np.uint32() + +dt = np.datetime64(0, "D") +td = np.timedelta64(0, "D") + +b_ = np.bool() + +b = False +c = complex() +f = 0.0 +i = 0 + +AR = np.array([0], dtype=np.int64) +AR.setflags(write=False) + +SEQ = (0, 1, 2, 3, 4) + +# object-like comparisons + +assert_type(i8 > fractions.Fraction(1, 5), np.bool) +assert_type(i8 > [fractions.Fraction(1, 5)], npt.NDArray[np.bool]) +assert_type(i8 > decimal.Decimal("1.5"), np.bool) +assert_type(i8 > [decimal.Decimal("1.5")], npt.NDArray[np.bool]) + +# Time structures + +assert_type(dt > dt, np.bool) + +assert_type(td > td, np.bool) +assert_type(td > i, np.bool) +assert_type(td > i4, np.bool) +assert_type(td > i8, np.bool) + +assert_type(td > AR, npt.NDArray[np.bool]) +assert_type(td > SEQ, npt.NDArray[np.bool]) +assert_type(AR > SEQ, npt.NDArray[np.bool]) +assert_type(AR > td, npt.NDArray[np.bool]) +assert_type(SEQ > td, npt.NDArray[np.bool]) +assert_type(SEQ > AR, npt.NDArray[np.bool]) + +# boolean + +assert_type(b_ > b, np.bool) +assert_type(b_ > b_, np.bool) +assert_type(b_ > i, np.bool) +assert_type(b_ > i8, np.bool) +assert_type(b_ > i4, np.bool) +assert_type(b_ > u8, np.bool) +assert_type(b_ > u4, np.bool) +assert_type(b_ > f, np.bool) +assert_type(b_ > f8, np.bool) +assert_type(b_ > f4, np.bool) +assert_type(b_ > c, np.bool) +assert_type(b_ > c16, np.bool) +assert_type(b_ > c8, np.bool) +assert_type(b_ > AR, npt.NDArray[np.bool]) +assert_type(b_ > SEQ, npt.NDArray[np.bool]) + +# Complex + +assert_type(c16 > c16, np.bool) +assert_type(c16 > f8, np.bool) +assert_type(c16 > i8, np.bool) +assert_type(c16 > c8, np.bool) +assert_type(c16 > f4, np.bool) +assert_type(c16 > i4, np.bool) +assert_type(c16 > b_, np.bool) +assert_type(c16 > b, np.bool) +assert_type(c16 > c, np.bool) +assert_type(c16 > f, np.bool) +assert_type(c16 > i, np.bool) +assert_type(c16 > AR, npt.NDArray[np.bool]) +assert_type(c16 > SEQ, npt.NDArray[np.bool]) + +assert_type(c16 > c16, np.bool) +assert_type(f8 > c16, np.bool) +assert_type(i8 > c16, np.bool) +assert_type(c8 > c16, np.bool) +assert_type(f4 > c16, np.bool) +assert_type(i4 > c16, np.bool) +assert_type(b_ > c16, np.bool) +assert_type(b > c16, np.bool) +assert_type(c > c16, np.bool) +assert_type(f > c16, np.bool) +assert_type(i > c16, np.bool) +assert_type(AR > c16, npt.NDArray[np.bool]) +assert_type(SEQ > c16, npt.NDArray[np.bool]) + +assert_type(c8 > c16, np.bool) +assert_type(c8 > f8, np.bool) +assert_type(c8 > i8, np.bool) +assert_type(c8 > c8, np.bool) +assert_type(c8 > f4, np.bool) +assert_type(c8 > i4, np.bool) +assert_type(c8 > b_, np.bool) +assert_type(c8 > b, np.bool) +assert_type(c8 > c, np.bool) +assert_type(c8 > f, np.bool) +assert_type(c8 > i, np.bool) +assert_type(c8 > AR, npt.NDArray[np.bool]) +assert_type(c8 > SEQ, npt.NDArray[np.bool]) + +assert_type(c16 > c8, np.bool) +assert_type(f8 > c8, np.bool) +assert_type(i8 > c8, np.bool) +assert_type(c8 > c8, np.bool) +assert_type(f4 > c8, np.bool) +assert_type(i4 > c8, np.bool) +assert_type(b_ > c8, np.bool) +assert_type(b > c8, np.bool) +assert_type(c > c8, np.bool) +assert_type(f > c8, np.bool) +assert_type(i > c8, np.bool) +assert_type(AR > c8, npt.NDArray[np.bool]) +assert_type(SEQ > c8, npt.NDArray[np.bool]) + +# Float + +assert_type(f8 > f8, np.bool) +assert_type(f8 > i8, np.bool) +assert_type(f8 > f4, np.bool) +assert_type(f8 > i4, np.bool) +assert_type(f8 > b_, np.bool) +assert_type(f8 > b, np.bool) +assert_type(f8 > c, np.bool) +assert_type(f8 > f, np.bool) +assert_type(f8 > i, np.bool) +assert_type(f8 > AR, npt.NDArray[np.bool]) +assert_type(f8 > SEQ, npt.NDArray[np.bool]) + +assert_type(f8 > f8, np.bool) +assert_type(i8 > f8, np.bool) +assert_type(f4 > f8, np.bool) +assert_type(i4 > f8, np.bool) +assert_type(b_ > f8, np.bool) +assert_type(b > f8, np.bool) +assert_type(c > f8, np.bool) +assert_type(f > f8, np.bool) +assert_type(i > f8, np.bool) +assert_type(AR > f8, npt.NDArray[np.bool]) +assert_type(SEQ > f8, npt.NDArray[np.bool]) + +assert_type(f4 > f8, np.bool) +assert_type(f4 > i8, np.bool) +assert_type(f4 > f4, np.bool) +assert_type(f4 > i4, np.bool) +assert_type(f4 > b_, np.bool) +assert_type(f4 > b, np.bool) +assert_type(f4 > c, np.bool) +assert_type(f4 > f, np.bool) +assert_type(f4 > i, np.bool) +assert_type(f4 > AR, npt.NDArray[np.bool]) +assert_type(f4 > SEQ, npt.NDArray[np.bool]) + +assert_type(f8 > f4, np.bool) +assert_type(i8 > f4, np.bool) +assert_type(f4 > f4, np.bool) +assert_type(i4 > f4, np.bool) +assert_type(b_ > f4, np.bool) +assert_type(b > f4, np.bool) +assert_type(c > f4, np.bool) +assert_type(f > f4, np.bool) +assert_type(i > f4, np.bool) +assert_type(AR > f4, npt.NDArray[np.bool]) +assert_type(SEQ > f4, npt.NDArray[np.bool]) + +# Int + +assert_type(i8 > i8, np.bool) +assert_type(i8 > u8, np.bool) +assert_type(i8 > i4, np.bool) +assert_type(i8 > u4, np.bool) +assert_type(i8 > b_, np.bool) +assert_type(i8 > b, np.bool) +assert_type(i8 > c, np.bool) +assert_type(i8 > f, np.bool) +assert_type(i8 > i, np.bool) +assert_type(i8 > AR, npt.NDArray[np.bool]) +assert_type(i8 > SEQ, npt.NDArray[np.bool]) + +assert_type(u8 > u8, np.bool) +assert_type(u8 > i4, np.bool) +assert_type(u8 > u4, np.bool) +assert_type(u8 > b_, np.bool) +assert_type(u8 > b, np.bool) +assert_type(u8 > c, np.bool) +assert_type(u8 > f, np.bool) +assert_type(u8 > i, np.bool) +assert_type(u8 > AR, npt.NDArray[np.bool]) +assert_type(u8 > SEQ, npt.NDArray[np.bool]) + +assert_type(i8 > i8, np.bool) +assert_type(u8 > i8, np.bool) +assert_type(i4 > i8, np.bool) +assert_type(u4 > i8, np.bool) +assert_type(b_ > i8, np.bool) +assert_type(b > i8, np.bool) +assert_type(c > i8, np.bool) +assert_type(f > i8, np.bool) +assert_type(i > i8, np.bool) +assert_type(AR > i8, npt.NDArray[np.bool]) +assert_type(SEQ > i8, npt.NDArray[np.bool]) + +assert_type(u8 > u8, np.bool) +assert_type(i4 > u8, np.bool) +assert_type(u4 > u8, np.bool) +assert_type(b_ > u8, np.bool) +assert_type(b > u8, np.bool) +assert_type(c > u8, np.bool) +assert_type(f > u8, np.bool) +assert_type(i > u8, np.bool) +assert_type(AR > u8, npt.NDArray[np.bool]) +assert_type(SEQ > u8, npt.NDArray[np.bool]) + +assert_type(i4 > i8, np.bool) +assert_type(i4 > i4, np.bool) +assert_type(i4 > i, np.bool) +assert_type(i4 > b_, np.bool) +assert_type(i4 > b, np.bool) +assert_type(i4 > AR, npt.NDArray[np.bool]) +assert_type(i4 > SEQ, npt.NDArray[np.bool]) + +assert_type(u4 > i8, np.bool) +assert_type(u4 > i4, np.bool) +assert_type(u4 > u8, np.bool) +assert_type(u4 > u4, np.bool) +assert_type(u4 > i, np.bool) +assert_type(u4 > b_, np.bool) +assert_type(u4 > b, np.bool) +assert_type(u4 > AR, npt.NDArray[np.bool]) +assert_type(u4 > SEQ, npt.NDArray[np.bool]) + +assert_type(i8 > i4, np.bool) +assert_type(i4 > i4, np.bool) +assert_type(i > i4, np.bool) +assert_type(b_ > i4, np.bool) +assert_type(b > i4, np.bool) +assert_type(AR > i4, npt.NDArray[np.bool]) +assert_type(SEQ > i4, npt.NDArray[np.bool]) + +assert_type(i8 > u4, np.bool) +assert_type(i4 > u4, np.bool) +assert_type(u8 > u4, np.bool) +assert_type(u4 > u4, np.bool) +assert_type(b_ > u4, np.bool) +assert_type(b > u4, np.bool) +assert_type(i > u4, np.bool) +assert_type(AR > u4, npt.NDArray[np.bool]) +assert_type(SEQ > u4, npt.NDArray[np.bool]) diff --git a/local_packages/numpy/typing/tests/data/reveal/constants.pyi b/local_packages/numpy/typing/tests/data/reveal/constants.pyi new file mode 100644 index 0000000..d4474f4 --- /dev/null +++ b/local_packages/numpy/typing/tests/data/reveal/constants.pyi @@ -0,0 +1,14 @@ +from typing import Literal, assert_type + +import numpy as np + +assert_type(np.e, float) +assert_type(np.euler_gamma, float) +assert_type(np.inf, float) +assert_type(np.nan, float) +assert_type(np.pi, float) + +assert_type(np.little_endian, bool) + +assert_type(np.True_, np.bool[Literal[True]]) +assert_type(np.False_, np.bool[Literal[False]]) diff --git a/local_packages/numpy/typing/tests/data/reveal/ctypeslib.pyi b/local_packages/numpy/typing/tests/data/reveal/ctypeslib.pyi new file mode 100644 index 0000000..0564d72 --- /dev/null +++ b/local_packages/numpy/typing/tests/data/reveal/ctypeslib.pyi @@ -0,0 +1,81 @@ +import ctypes as ct +from typing import Any, assert_type + +import numpy as np +import numpy.typing as npt +from numpy import ctypeslib + +AR_bool: npt.NDArray[np.bool] +AR_ubyte: npt.NDArray[np.ubyte] +AR_ushort: npt.NDArray[np.ushort] +AR_uintc: npt.NDArray[np.uintc] +AR_ulong: npt.NDArray[np.ulong] +AR_ulonglong: npt.NDArray[np.ulonglong] +AR_byte: npt.NDArray[np.byte] +AR_short: npt.NDArray[np.short] +AR_intc: npt.NDArray[np.intc] +AR_long: npt.NDArray[np.long] +AR_longlong: npt.NDArray[np.longlong] +AR_single: npt.NDArray[np.single] +AR_double: npt.NDArray[np.double] +AR_longdouble: npt.NDArray[np.longdouble] +AR_void: npt.NDArray[np.void] + +pointer: ct._Pointer[Any] + +assert_type(np.ctypeslib.c_intp(), ctypeslib.c_intp) + +assert_type(np.ctypeslib.ndpointer(), type[ctypeslib._ndptr[None]]) +assert_type(np.ctypeslib.ndpointer(dtype=np.float64), type[ctypeslib._ndptr[np.dtype[np.float64]]]) +assert_type(np.ctypeslib.ndpointer(dtype=float), type[ctypeslib._ndptr[np.dtype]]) +assert_type(np.ctypeslib.ndpointer(shape=(10, 3)), type[ctypeslib._ndptr[None]]) +assert_type(np.ctypeslib.ndpointer(np.int64, shape=(10, 3)), type[ctypeslib._concrete_ndptr[np.dtype[np.int64]]]) +assert_type(np.ctypeslib.ndpointer(int, shape=(1,)), type[np.ctypeslib._concrete_ndptr[np.dtype]]) + +assert_type(np.ctypeslib.as_ctypes_type(np.bool), type[ct.c_bool]) +assert_type(np.ctypeslib.as_ctypes_type(np.ubyte), type[ct.c_ubyte]) +assert_type(np.ctypeslib.as_ctypes_type(np.ushort), type[ct.c_ushort]) +assert_type(np.ctypeslib.as_ctypes_type(np.uintc), type[ct.c_uint]) +assert_type(np.ctypeslib.as_ctypes_type(np.byte), type[ct.c_byte]) +assert_type(np.ctypeslib.as_ctypes_type(np.short), type[ct.c_short]) +assert_type(np.ctypeslib.as_ctypes_type(np.intc), type[ct.c_int]) +assert_type(np.ctypeslib.as_ctypes_type(np.single), type[ct.c_float]) +assert_type(np.ctypeslib.as_ctypes_type(np.double), type[ct.c_double]) +assert_type(np.ctypeslib.as_ctypes_type(ct.c_double), type[ct.c_double]) +assert_type(np.ctypeslib.as_ctypes_type("q"), type[ct.c_longlong]) +assert_type(np.ctypeslib.as_ctypes_type([("i8", np.int64), ("f8", np.float64)]), type[Any]) +assert_type(np.ctypeslib.as_ctypes_type("i8"), type[Any]) +assert_type(np.ctypeslib.as_ctypes_type("f8"), type[Any]) + +assert_type(np.ctypeslib.as_ctypes(AR_bool.take(0)), ct.c_bool) +assert_type(np.ctypeslib.as_ctypes(AR_ubyte.take(0)), ct.c_ubyte) +assert_type(np.ctypeslib.as_ctypes(AR_ushort.take(0)), ct.c_ushort) +assert_type(np.ctypeslib.as_ctypes(AR_uintc.take(0)), ct.c_uint) + +assert_type(np.ctypeslib.as_ctypes(AR_byte.take(0)), ct.c_byte) +assert_type(np.ctypeslib.as_ctypes(AR_short.take(0)), ct.c_short) +assert_type(np.ctypeslib.as_ctypes(AR_intc.take(0)), ct.c_int) +assert_type(np.ctypeslib.as_ctypes(AR_single.take(0)), ct.c_float) +assert_type(np.ctypeslib.as_ctypes(AR_double.take(0)), ct.c_double) +assert_type(np.ctypeslib.as_ctypes(AR_void.take(0)), Any) +assert_type(np.ctypeslib.as_ctypes(AR_bool), ct.Array[ct.c_bool]) +assert_type(np.ctypeslib.as_ctypes(AR_ubyte), ct.Array[ct.c_ubyte]) +assert_type(np.ctypeslib.as_ctypes(AR_ushort), ct.Array[ct.c_ushort]) +assert_type(np.ctypeslib.as_ctypes(AR_uintc), ct.Array[ct.c_uint]) +assert_type(np.ctypeslib.as_ctypes(AR_byte), ct.Array[ct.c_byte]) +assert_type(np.ctypeslib.as_ctypes(AR_short), ct.Array[ct.c_short]) +assert_type(np.ctypeslib.as_ctypes(AR_intc), ct.Array[ct.c_int]) +assert_type(np.ctypeslib.as_ctypes(AR_single), ct.Array[ct.c_float]) +assert_type(np.ctypeslib.as_ctypes(AR_double), ct.Array[ct.c_double]) +assert_type(np.ctypeslib.as_ctypes(AR_void), ct.Array[Any]) + +assert_type(np.ctypeslib.as_array(AR_ubyte), npt.NDArray[np.ubyte]) +assert_type(np.ctypeslib.as_array(1), npt.NDArray[Any]) +assert_type(np.ctypeslib.as_array(pointer), npt.NDArray[Any]) + +assert_type(np.ctypeslib.as_ctypes_type(np.long), type[ct.c_long]) +assert_type(np.ctypeslib.as_ctypes_type(np.ulong), type[ct.c_ulong]) +assert_type(np.ctypeslib.as_ctypes(AR_ulong), ct.Array[ct.c_ulong]) +assert_type(np.ctypeslib.as_ctypes(AR_long), ct.Array[ct.c_long]) +assert_type(np.ctypeslib.as_ctypes(AR_long.take(0)), ct.c_long) +assert_type(np.ctypeslib.as_ctypes(AR_ulong.take(0)), ct.c_ulong) diff --git a/local_packages/numpy/typing/tests/data/reveal/datasource.pyi b/local_packages/numpy/typing/tests/data/reveal/datasource.pyi new file mode 100644 index 0000000..9f01791 --- /dev/null +++ b/local_packages/numpy/typing/tests/data/reveal/datasource.pyi @@ -0,0 +1,23 @@ +from pathlib import Path +from typing import IO, Any, assert_type + +import numpy as np + +path1: Path +path2: str + +d1 = np.lib.npyio.DataSource(path1) +d2 = np.lib.npyio.DataSource(path2) +d3 = np.lib.npyio.DataSource(None) + +assert_type(d1.abspath("..."), str) +assert_type(d2.abspath("..."), str) +assert_type(d3.abspath("..."), str) + +assert_type(d1.exists("..."), bool) +assert_type(d2.exists("..."), bool) +assert_type(d3.exists("..."), bool) + +assert_type(d1.open("...", "r"), IO[Any]) +assert_type(d2.open("...", encoding="utf8"), IO[Any]) +assert_type(d3.open("...", newline="/n"), IO[Any]) diff --git a/local_packages/numpy/typing/tests/data/reveal/dtype.pyi b/local_packages/numpy/typing/tests/data/reveal/dtype.pyi new file mode 100644 index 0000000..7f670b3 --- /dev/null +++ b/local_packages/numpy/typing/tests/data/reveal/dtype.pyi @@ -0,0 +1,132 @@ +import ctypes as ct +import datetime as dt +from decimal import Decimal +from fractions import Fraction +from typing import Any, Literal, LiteralString, TypeAlias, assert_type + +import numpy as np +from numpy.dtypes import StringDType + +# a combination of likely `object` dtype-like candidates (no `_co`) +_PyObjectLike: TypeAlias = Decimal | Fraction | dt.datetime | dt.timedelta + +dtype_U: np.dtype[np.str_] +dtype_V: np.dtype[np.void] +dtype_i8: np.dtype[np.int64] + +py_object: type[_PyObjectLike] +py_character: type[str | bytes] + +ct_floating: type[ct.c_float | ct.c_double | ct.c_longdouble] +ct_number: type[ct.c_uint8 | ct.c_float] +ct_generic: type[ct.c_bool | ct.c_char] + +cs_integer: Literal["u1", "V", "S"] +cs_generic: Literal["H", "U", "h", "|M8[Y]", "?"] + +dt_inexact: np.dtype[np.inexact] +dt_string: StringDType + +assert_type(np.dtype(np.float64), np.dtype[np.float64]) +assert_type(np.dtype(np.float64, metadata={"test": "test"}), np.dtype[np.float64]) +assert_type(np.dtype(np.int64), np.dtype[np.int64]) + +# String aliases +assert_type(np.dtype("float64"), np.dtype[np.float64]) +assert_type(np.dtype("float32"), np.dtype[np.float32]) +assert_type(np.dtype("int64"), np.dtype[np.int64]) +assert_type(np.dtype("int32"), np.dtype[np.int32]) +assert_type(np.dtype("bool"), np.dtype[np.bool]) +assert_type(np.dtype("bytes"), np.dtype[np.bytes_]) +assert_type(np.dtype("str"), np.dtype[np.str_]) + +# Python types +assert_type(np.dtype(bool), np.dtype[np.bool]) +assert_type(np.dtype(int), np.dtype[np.int_ | np.bool]) +assert_type(np.dtype(float), np.dtype[np.float64 | np.int_ | np.bool]) +assert_type(np.dtype(complex), np.dtype[np.complex128 | np.float64 | np.int_ | np.bool]) +assert_type(np.dtype(py_object), np.dtype[np.object_]) +assert_type(np.dtype(str), np.dtype[np.str_]) +assert_type(np.dtype(bytes), np.dtype[np.bytes_]) +assert_type(np.dtype(memoryview), np.dtype[np.void]) +assert_type(np.dtype(py_character), np.dtype[np.character]) + +# object types +assert_type(np.dtype(list), np.dtype[np.object_]) +assert_type(np.dtype(dt.datetime), np.dtype[np.object_]) +assert_type(np.dtype(dt.timedelta), np.dtype[np.object_]) +assert_type(np.dtype(Decimal), np.dtype[np.object_]) +assert_type(np.dtype(Fraction), np.dtype[np.object_]) + +# char-codes +assert_type(np.dtype("?"), np.dtype[np.bool]) +assert_type(np.dtype("|b1"), np.dtype[np.bool]) +assert_type(np.dtype("u1"), np.dtype[np.uint8]) +assert_type(np.dtype("l"), np.dtype[np.long]) +assert_type(np.dtype("longlong"), np.dtype[np.longlong]) +assert_type(np.dtype(">g"), np.dtype[np.longdouble]) +assert_type(np.dtype(cs_integer), np.dtype[np.integer]) + +# ctypes +assert_type(np.dtype(ct.c_double), np.dtype[np.float64]) # see numpy/numpy#29155 +assert_type(np.dtype(ct.c_longlong), np.dtype[np.longlong]) +assert_type(np.dtype(ct.c_uint32), np.dtype[np.uint32]) +assert_type(np.dtype(ct.c_bool), np.dtype[np.bool]) +assert_type(np.dtype(ct.c_char), np.dtype[np.bytes_]) +assert_type(np.dtype(ct.py_object), np.dtype[np.object_]) + +# Special case for None +assert_type(np.dtype(None), np.dtype[np.float64]) + +# dtypes of dtypes +assert_type(np.dtype(np.dtype(np.float64)), np.dtype[np.float64]) +assert_type(np.dtype(dt_inexact), np.dtype[np.inexact]) + +# Parameterized dtypes +assert_type(np.dtype("S8"), np.dtype) + +# Void +assert_type(np.dtype(("U", 10)), np.dtype[np.void]) +assert_type(np.dtype({"formats": (int, "u8"), "names": ("n", "B")}), np.dtype[np.void]) + +# StringDType +assert_type(np.dtype(dt_string), StringDType) +assert_type(np.dtype("T"), StringDType) +assert_type(np.dtype("=T"), StringDType) +assert_type(np.dtype("|T"), StringDType) + +# Methods and attributes +assert_type(dtype_U.base, np.dtype) +assert_type(dtype_U.subdtype, tuple[np.dtype, tuple[Any, ...]] | None) +assert_type(dtype_U.newbyteorder(), np.dtype[np.str_]) +assert_type(dtype_U.type, type[np.str_]) +assert_type(dtype_U.name, LiteralString) +assert_type(dtype_U.names, tuple[str, ...] | None) + +assert_type(dtype_U * 0, np.dtype[np.str_]) +assert_type(dtype_U * 1, np.dtype[np.str_]) +assert_type(dtype_U * 2, np.dtype[np.str_]) + +assert_type(dtype_i8 * 0, np.dtype[np.void]) +assert_type(dtype_i8 * 1, np.dtype[np.int64]) +assert_type(dtype_i8 * 2, np.dtype[np.void]) + +assert_type(0 * dtype_U, np.dtype[np.str_]) +assert_type(1 * dtype_U, np.dtype[np.str_]) +assert_type(2 * dtype_U, np.dtype[np.str_]) + +assert_type(0 * dtype_i8, np.dtype) +assert_type(1 * dtype_i8, np.dtype) +assert_type(2 * dtype_i8, np.dtype) + +assert_type(dtype_V["f0"], np.dtype) +assert_type(dtype_V[0], np.dtype) +assert_type(dtype_V[["f0", "f1"]], np.dtype[np.void]) +assert_type(dtype_V[["f0"]], np.dtype[np.void]) + +class _D: + __numpy_dtype__: np.dtype[np.int8] + +assert_type(np.dtype(_D()), np.dtype[np.int8]) diff --git a/local_packages/numpy/typing/tests/data/reveal/einsumfunc.pyi b/local_packages/numpy/typing/tests/data/reveal/einsumfunc.pyi new file mode 100644 index 0000000..cc58f00 --- /dev/null +++ b/local_packages/numpy/typing/tests/data/reveal/einsumfunc.pyi @@ -0,0 +1,39 @@ +from typing import Any, assert_type + +import numpy as np +import numpy.typing as npt + +AR_LIKE_b: list[bool] +AR_LIKE_u: list[np.uint32] +AR_LIKE_i: list[int] +AR_LIKE_f: list[float] +AR_LIKE_c: list[complex] +AR_LIKE_U: list[str] +AR_o: npt.NDArray[np.object_] + +OUT_f: npt.NDArray[np.float64] + +assert_type(np.einsum("i,i->i", AR_LIKE_b, AR_LIKE_b), Any) +assert_type(np.einsum("i,i->i", AR_o, AR_o), Any) +assert_type(np.einsum("i,i->i", AR_LIKE_u, AR_LIKE_u), Any) +assert_type(np.einsum("i,i->i", AR_LIKE_i, AR_LIKE_i), Any) +assert_type(np.einsum("i,i->i", AR_LIKE_f, AR_LIKE_f), Any) +assert_type(np.einsum("i,i->i", AR_LIKE_c, AR_LIKE_c), Any) +assert_type(np.einsum("i,i->i", AR_LIKE_b, AR_LIKE_i), Any) +assert_type(np.einsum("i,i,i,i->i", AR_LIKE_b, AR_LIKE_u, AR_LIKE_i, AR_LIKE_c), Any) + +assert_type(np.einsum("i,i->i", AR_LIKE_c, AR_LIKE_c, out=OUT_f), npt.NDArray[np.float64]) +assert_type(np.einsum("i,i->i", AR_LIKE_U, AR_LIKE_U, dtype=bool, casting="unsafe", out=OUT_f), npt.NDArray[np.float64]) +assert_type(np.einsum("i,i->i", AR_LIKE_f, AR_LIKE_f, dtype="c16"), Any) +assert_type(np.einsum("i,i->i", AR_LIKE_U, AR_LIKE_U, dtype=bool, casting="unsafe"), Any) + +assert_type(np.einsum_path("i,i->i", AR_LIKE_b, AR_LIKE_b), tuple[list[Any], str]) +assert_type(np.einsum_path("i,i->i", AR_LIKE_u, AR_LIKE_u), tuple[list[Any], str]) +assert_type(np.einsum_path("i,i->i", AR_LIKE_i, AR_LIKE_i), tuple[list[Any], str]) +assert_type(np.einsum_path("i,i->i", AR_LIKE_f, AR_LIKE_f), tuple[list[Any], str]) +assert_type(np.einsum_path("i,i->i", AR_LIKE_c, AR_LIKE_c), tuple[list[Any], str]) +assert_type(np.einsum_path("i,i->i", AR_LIKE_b, AR_LIKE_i), tuple[list[Any], str]) +assert_type(np.einsum_path("i,i,i,i->i", AR_LIKE_b, AR_LIKE_u, AR_LIKE_i, AR_LIKE_c), tuple[list[Any], str]) + +assert_type(np.einsum([[1, 1], [1, 1]], AR_LIKE_i, AR_LIKE_i), Any) +assert_type(np.einsum_path([[1, 1], [1, 1]], AR_LIKE_i, AR_LIKE_i), tuple[list[Any], str]) diff --git a/local_packages/numpy/typing/tests/data/reveal/emath.pyi b/local_packages/numpy/typing/tests/data/reveal/emath.pyi new file mode 100644 index 0000000..1d7bff8 --- /dev/null +++ b/local_packages/numpy/typing/tests/data/reveal/emath.pyi @@ -0,0 +1,54 @@ +from typing import Any, assert_type + +import numpy as np +import numpy.typing as npt + +AR_f8: npt.NDArray[np.float64] +AR_c16: npt.NDArray[np.complex128] +f8: np.float64 +c16: np.complex128 + +assert_type(np.emath.sqrt(f8), Any) +assert_type(np.emath.sqrt(AR_f8), npt.NDArray[Any]) +assert_type(np.emath.sqrt(c16), np.complexfloating) +assert_type(np.emath.sqrt(AR_c16), npt.NDArray[np.complexfloating]) + +assert_type(np.emath.log(f8), Any) +assert_type(np.emath.log(AR_f8), npt.NDArray[Any]) +assert_type(np.emath.log(c16), np.complexfloating) +assert_type(np.emath.log(AR_c16), npt.NDArray[np.complexfloating]) + +assert_type(np.emath.log10(f8), Any) +assert_type(np.emath.log10(AR_f8), npt.NDArray[Any]) +assert_type(np.emath.log10(c16), np.complexfloating) +assert_type(np.emath.log10(AR_c16), npt.NDArray[np.complexfloating]) + +assert_type(np.emath.log2(f8), Any) +assert_type(np.emath.log2(AR_f8), npt.NDArray[Any]) +assert_type(np.emath.log2(c16), np.complexfloating) +assert_type(np.emath.log2(AR_c16), npt.NDArray[np.complexfloating]) + +assert_type(np.emath.logn(f8, 2), Any) +assert_type(np.emath.logn(AR_f8, 4), npt.NDArray[Any]) +assert_type(np.emath.logn(f8, 1j), np.complexfloating) +assert_type(np.emath.logn(AR_c16, 1.5), npt.NDArray[np.complexfloating]) + +assert_type(np.emath.power(f8, 2), Any) +assert_type(np.emath.power(AR_f8, 4), npt.NDArray[Any]) +assert_type(np.emath.power(f8, 2j), np.complexfloating) +assert_type(np.emath.power(AR_c16, 1.5), npt.NDArray[np.complexfloating]) + +assert_type(np.emath.arccos(f8), Any) +assert_type(np.emath.arccos(AR_f8), npt.NDArray[Any]) +assert_type(np.emath.arccos(c16), np.complexfloating) +assert_type(np.emath.arccos(AR_c16), npt.NDArray[np.complexfloating]) + +assert_type(np.emath.arcsin(f8), Any) +assert_type(np.emath.arcsin(AR_f8), npt.NDArray[Any]) +assert_type(np.emath.arcsin(c16), np.complexfloating) +assert_type(np.emath.arcsin(AR_c16), npt.NDArray[np.complexfloating]) + +assert_type(np.emath.arctanh(f8), Any) +assert_type(np.emath.arctanh(AR_f8), npt.NDArray[Any]) +assert_type(np.emath.arctanh(c16), np.complexfloating) +assert_type(np.emath.arctanh(AR_c16), npt.NDArray[np.complexfloating]) diff --git a/local_packages/numpy/typing/tests/data/reveal/fft.pyi b/local_packages/numpy/typing/tests/data/reveal/fft.pyi new file mode 100644 index 0000000..dacd2b8 --- /dev/null +++ b/local_packages/numpy/typing/tests/data/reveal/fft.pyi @@ -0,0 +1,37 @@ +from typing import Any, assert_type + +import numpy as np +import numpy.typing as npt + +AR_f8: npt.NDArray[np.float64] +AR_c16: npt.NDArray[np.complex128] +AR_LIKE_f8: list[float] + +assert_type(np.fft.fftshift(AR_f8), npt.NDArray[np.float64]) +assert_type(np.fft.fftshift(AR_LIKE_f8, axes=0), npt.NDArray[Any]) + +assert_type(np.fft.ifftshift(AR_f8), npt.NDArray[np.float64]) +assert_type(np.fft.ifftshift(AR_LIKE_f8, axes=0), npt.NDArray[Any]) + +assert_type(np.fft.fftfreq(5, AR_f8), npt.NDArray[np.floating]) +assert_type(np.fft.fftfreq(np.int64(), AR_c16), npt.NDArray[np.complexfloating]) + +assert_type(np.fft.fftfreq(5, AR_f8), npt.NDArray[np.floating]) +assert_type(np.fft.fftfreq(np.int64(), AR_c16), npt.NDArray[np.complexfloating]) + +assert_type(np.fft.fft(AR_f8), npt.NDArray[np.complex128]) +assert_type(np.fft.ifft(AR_f8, axis=1), npt.NDArray[np.complex128]) +assert_type(np.fft.rfft(AR_f8, n=None), npt.NDArray[np.complex128]) +assert_type(np.fft.irfft(AR_f8, norm="ortho"), npt.NDArray[np.float64]) +assert_type(np.fft.hfft(AR_f8, n=2), npt.NDArray[np.float64]) +assert_type(np.fft.ihfft(AR_f8), npt.NDArray[np.complex128]) + +assert_type(np.fft.fftn(AR_f8), npt.NDArray[np.complex128]) +assert_type(np.fft.ifftn(AR_f8), npt.NDArray[np.complex128]) +assert_type(np.fft.rfftn(AR_f8), npt.NDArray[np.complex128]) +assert_type(np.fft.irfftn(AR_f8), npt.NDArray[np.float64]) + +assert_type(np.fft.rfft2(AR_f8), npt.NDArray[np.complex128]) +assert_type(np.fft.ifft2(AR_f8), npt.NDArray[np.complex128]) +assert_type(np.fft.fft2(AR_f8), npt.NDArray[np.complex128]) +assert_type(np.fft.irfft2(AR_f8), npt.NDArray[np.float64]) diff --git a/local_packages/numpy/typing/tests/data/reveal/flatiter.pyi b/local_packages/numpy/typing/tests/data/reveal/flatiter.pyi new file mode 100644 index 0000000..98d61a6 --- /dev/null +++ b/local_packages/numpy/typing/tests/data/reveal/flatiter.pyi @@ -0,0 +1,86 @@ +from typing import Any, TypeAlias, assert_type + +import numpy as np + +_ArrayND: TypeAlias = np.ndarray[tuple[Any, ...], np.dtypes.StrDType] +_Array1D: TypeAlias = np.ndarray[tuple[int], np.dtypes.BytesDType] +_Array2D: TypeAlias = np.ndarray[tuple[int, int], np.dtypes.Int8DType] + +_a_nd: np.flatiter[_ArrayND] +_a_1d: np.flatiter[_Array1D] +_a_2d: np.flatiter[_Array2D] + +### + +# .base +assert_type(_a_nd.base, _ArrayND) +assert_type(_a_1d.base, _Array1D) +assert_type(_a_2d.base, _Array2D) + +# .coords +assert_type(_a_nd.coords, tuple[Any, ...]) +assert_type(_a_1d.coords, tuple[int]) +assert_type(_a_2d.coords, tuple[int, int]) + +# .index +assert_type(_a_nd.index, int) +assert_type(_a_1d.index, int) +assert_type(_a_2d.index, int) + +# .__len__() +assert_type(len(_a_nd), int) +assert_type(len(_a_1d), int) +assert_type(len(_a_2d), int) + +# .__iter__() +assert_type(iter(_a_nd), np.flatiter[_ArrayND]) +assert_type(iter(_a_1d), np.flatiter[_Array1D]) +assert_type(iter(_a_2d), np.flatiter[_Array2D]) + +# .__next__() +assert_type(next(_a_nd), np.str_) +assert_type(next(_a_1d), np.bytes_) +assert_type(next(_a_2d), np.int8) + +# .__getitem__(()) +assert_type(_a_nd[()], _ArrayND) +assert_type(_a_1d[()], _Array1D) +assert_type(_a_2d[()], _Array2D) +# .__getitem__(int) +assert_type(_a_nd[0], np.str_) +assert_type(_a_1d[0], np.bytes_) +assert_type(_a_2d[0], np.int8) +# .__getitem__(slice) +assert_type(_a_nd[::], np.ndarray[tuple[int], np.dtypes.StrDType]) +assert_type(_a_1d[::], np.ndarray[tuple[int], np.dtypes.BytesDType]) +assert_type(_a_2d[::], np.ndarray[tuple[int], np.dtypes.Int8DType]) +# .__getitem__(EllipsisType) +assert_type(_a_nd[...], np.ndarray[tuple[int], np.dtypes.StrDType]) +assert_type(_a_1d[...], np.ndarray[tuple[int], np.dtypes.BytesDType]) +assert_type(_a_2d[...], np.ndarray[tuple[int], np.dtypes.Int8DType]) +# .__getitem__(list[!]) +assert_type(_a_nd[[]], np.ndarray[tuple[int], np.dtypes.StrDType]) +assert_type(_a_1d[[]], np.ndarray[tuple[int], np.dtypes.BytesDType]) +assert_type(_a_2d[[]], np.ndarray[tuple[int], np.dtypes.Int8DType]) +# .__getitem__(list[int]) +assert_type(_a_nd[[0]], np.ndarray[tuple[int], np.dtypes.StrDType]) +assert_type(_a_1d[[0]], np.ndarray[tuple[int], np.dtypes.BytesDType]) +assert_type(_a_2d[[0]], np.ndarray[tuple[int], np.dtypes.Int8DType]) +# .__getitem__(list[list[int]]) +assert_type(_a_nd[[[0]]], np.ndarray[tuple[int, int], np.dtypes.StrDType]) +assert_type(_a_1d[[[0]]], np.ndarray[tuple[int, int], np.dtypes.BytesDType]) +assert_type(_a_2d[[[0]]], np.ndarray[tuple[int, int], np.dtypes.Int8DType]) +# .__getitem__(list[list[list[list[int]]]]) +assert_type(_a_nd[[[[[0]]]]], np.ndarray[tuple[Any, ...], np.dtypes.StrDType]) +assert_type(_a_1d[[[[[0]]]]], np.ndarray[tuple[Any, ...], np.dtypes.BytesDType]) +assert_type(_a_2d[[[[[0]]]]], np.ndarray[tuple[Any, ...], np.dtypes.Int8DType]) + +# __array__() +assert_type(_a_nd.__array__(), np.ndarray[tuple[int], np.dtypes.StrDType]) +assert_type(_a_1d.__array__(), np.ndarray[tuple[int], np.dtypes.BytesDType]) +assert_type(_a_2d.__array__(), np.ndarray[tuple[int], np.dtypes.Int8DType]) + +# .copy() +assert_type(_a_nd.copy(), np.ndarray[tuple[int], np.dtypes.StrDType]) +assert_type(_a_1d.copy(), np.ndarray[tuple[int], np.dtypes.BytesDType]) +assert_type(_a_2d.copy(), np.ndarray[tuple[int], np.dtypes.Int8DType]) diff --git a/local_packages/numpy/typing/tests/data/reveal/fromnumeric.pyi b/local_packages/numpy/typing/tests/data/reveal/fromnumeric.pyi new file mode 100644 index 0000000..58ea2c5 --- /dev/null +++ b/local_packages/numpy/typing/tests/data/reveal/fromnumeric.pyi @@ -0,0 +1,347 @@ +"""Tests for :mod:`_core.fromnumeric`.""" + +from typing import Any, assert_type + +import numpy as np +import numpy.typing as npt + +class NDArraySubclass(npt.NDArray[np.complex128]): ... + +AR_b: npt.NDArray[np.bool] +AR_f4: npt.NDArray[np.float32] +AR_c16: npt.NDArray[np.complex128] +AR_u8: npt.NDArray[np.uint64] +AR_i8: npt.NDArray[np.int64] +AR_O: npt.NDArray[np.object_] +AR_subclass: NDArraySubclass +AR_m: npt.NDArray[np.timedelta64] +AR_0d: np.ndarray[tuple[()]] +AR_1d: np.ndarray[tuple[int]] +AR_nd: np.ndarray + +b: np.bool +f4: np.float32 +i8: np.int64 +f: float + +# integer‑dtype subclass for argmin/argmax +class NDArrayIntSubclass(npt.NDArray[np.intp]): ... +AR_sub_i: NDArrayIntSubclass + +assert_type(np.take(b, 0), np.bool) +assert_type(np.take(f4, 0), np.float32) +assert_type(np.take(f, 0), Any) +assert_type(np.take(AR_b, 0), np.bool) +assert_type(np.take(AR_f4, 0), np.float32) +assert_type(np.take(AR_b, [0]), npt.NDArray[np.bool]) +assert_type(np.take(AR_f4, [0]), npt.NDArray[np.float32]) +assert_type(np.take([1], [0]), npt.NDArray[Any]) +assert_type(np.take(AR_f4, [0], out=AR_subclass), NDArraySubclass) + +assert_type(np.reshape(b, 1), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(np.reshape(f4, 1), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.reshape(f, 1), np.ndarray[tuple[int], np.dtype]) +assert_type(np.reshape(AR_b, 1), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(np.reshape(AR_f4, 1), np.ndarray[tuple[int], np.dtype[np.float32]]) + +assert_type(np.choose(1, [True, True]), Any) +assert_type(np.choose([1], [True, True]), npt.NDArray[Any]) +assert_type(np.choose([1], AR_b), npt.NDArray[np.bool]) +assert_type(np.choose([1], AR_b, out=AR_f4), npt.NDArray[np.float32]) + +assert_type(np.repeat(b, 1), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(np.repeat(b, 1, axis=0), npt.NDArray[np.bool]) +assert_type(np.repeat(f4, 1), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.repeat(f, 1), np.ndarray[tuple[int], np.dtype[Any]]) +assert_type(np.repeat(AR_b, 1), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(np.repeat(AR_f4, 1), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.repeat(AR_f4, 1, axis=0), npt.NDArray[np.float32]) + +# TODO: array_bdd tests for np.put() + +assert_type(np.swapaxes([[0, 1]], 0, 0), npt.NDArray[Any]) +assert_type(np.swapaxes(AR_b, 0, 0), npt.NDArray[np.bool]) +assert_type(np.swapaxes(AR_f4, 0, 0), npt.NDArray[np.float32]) + +assert_type(np.transpose(b), npt.NDArray[np.bool]) +assert_type(np.transpose(f4), npt.NDArray[np.float32]) +assert_type(np.transpose(f), npt.NDArray[Any]) +assert_type(np.transpose(AR_b), npt.NDArray[np.bool]) +assert_type(np.transpose(AR_f4), npt.NDArray[np.float32]) + +assert_type(np.partition(b, 0, axis=None), npt.NDArray[np.bool]) +assert_type(np.partition(f4, 0, axis=None), npt.NDArray[np.float32]) +assert_type(np.partition(f, 0, axis=None), npt.NDArray[Any]) +assert_type(np.partition(AR_b, 0), npt.NDArray[np.bool]) +assert_type(np.partition(AR_f4, 0), npt.NDArray[np.float32]) + +assert_type(np.argpartition(b, 0), npt.NDArray[np.intp]) +assert_type(np.argpartition(f4, 0), npt.NDArray[np.intp]) +assert_type(np.argpartition(f, 0), npt.NDArray[np.intp]) +assert_type(np.argpartition(AR_b, 0), npt.NDArray[np.intp]) +assert_type(np.argpartition(AR_f4, 0), npt.NDArray[np.intp]) + +assert_type(np.sort([2, 1], 0), npt.NDArray[Any]) +assert_type(np.sort(AR_b, 0), npt.NDArray[np.bool]) +assert_type(np.sort(AR_f4, 0), npt.NDArray[np.float32]) + +assert_type(np.argsort(AR_b, 0), npt.NDArray[np.intp]) +assert_type(np.argsort(AR_f4, 0), npt.NDArray[np.intp]) + +assert_type(np.argmax(AR_b), np.intp) +assert_type(np.argmax(AR_f4), np.intp) +assert_type(np.argmax(AR_b, axis=0), Any) +assert_type(np.argmax(AR_f4, axis=0), Any) +assert_type(np.argmax(AR_f4, out=AR_sub_i), NDArrayIntSubclass) + +assert_type(np.argmin(AR_b), np.intp) +assert_type(np.argmin(AR_f4), np.intp) +assert_type(np.argmin(AR_b, axis=0), Any) +assert_type(np.argmin(AR_f4, axis=0), Any) +assert_type(np.argmin(AR_f4, out=AR_sub_i), NDArrayIntSubclass) + +assert_type(np.searchsorted(AR_b[0], 0), np.intp) +assert_type(np.searchsorted(AR_f4[0], 0), np.intp) +assert_type(np.searchsorted(AR_b[0], [0]), npt.NDArray[np.intp]) +assert_type(np.searchsorted(AR_f4[0], [0]), npt.NDArray[np.intp]) + +assert_type(np.resize(b, (5, 5)), np.ndarray[tuple[int, int], np.dtype[np.bool]]) +assert_type(np.resize(f4, (5, 5)), np.ndarray[tuple[int, int], np.dtype[np.float32]]) +assert_type(np.resize(f, (5, 5)), np.ndarray[tuple[int, int], np.dtype]) +assert_type(np.resize(AR_b, (5, 5)), np.ndarray[tuple[int, int], np.dtype[np.bool]]) +assert_type(np.resize(AR_f4, (5, 5)), np.ndarray[tuple[int, int], np.dtype[np.float32]]) + +assert_type(np.squeeze(b), np.bool) +assert_type(np.squeeze(f4), np.float32) +assert_type(np.squeeze(f), npt.NDArray[Any]) +assert_type(np.squeeze(AR_b), npt.NDArray[np.bool]) +assert_type(np.squeeze(AR_f4), npt.NDArray[np.float32]) + +assert_type(np.diagonal(AR_b), npt.NDArray[np.bool]) +assert_type(np.diagonal(AR_f4), npt.NDArray[np.float32]) + +assert_type(np.trace(AR_b), Any) +assert_type(np.trace(AR_f4), Any) +assert_type(np.trace(AR_f4, out=AR_subclass), NDArraySubclass) +assert_type(np.trace(AR_f4, out=AR_subclass, dtype=None), NDArraySubclass) + +assert_type(np.ravel(b), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(np.ravel(f4), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.ravel(f), np.ndarray[tuple[int], np.dtype[np.float64 | Any]]) +assert_type(np.ravel(AR_b), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(np.ravel(AR_f4), np.ndarray[tuple[int], np.dtype[np.float32]]) + +assert_type(np.nonzero(AR_b), tuple[np.ndarray[tuple[int], np.dtype[np.intp]], ...]) +assert_type(np.nonzero(AR_f4), tuple[np.ndarray[tuple[int], np.dtype[np.intp]], ...]) +assert_type(np.nonzero(AR_1d), tuple[np.ndarray[tuple[int], np.dtype[np.intp]], ...]) +assert_type(np.nonzero(AR_nd), tuple[np.ndarray[tuple[int], np.dtype[np.intp]], ...]) + +assert_type(np.shape(b), tuple[()]) +assert_type(np.shape(f), tuple[()]) +assert_type(np.shape([1]), tuple[int]) +assert_type(np.shape([[2]]), tuple[int, int]) +assert_type(np.shape([[[3]]]), tuple[Any, ...]) +assert_type(np.shape(AR_b), tuple[Any, ...]) +assert_type(np.shape(AR_nd), tuple[Any, ...]) +# these fail on mypy, but it works as expected with pyright/pylance +# assert_type(np.shape(AR_0d), tuple[()]) +# assert_type(np.shape(AR_1d), tuple[int]) +# assert_type(np.shape(AR_2d), tuple[int, int]) + +assert_type(np.compress([True], b), npt.NDArray[np.bool]) +assert_type(np.compress([True], f4), npt.NDArray[np.float32]) +assert_type(np.compress([True], f), npt.NDArray[Any]) +assert_type(np.compress([True], AR_b), npt.NDArray[np.bool]) +assert_type(np.compress([True], AR_f4), npt.NDArray[np.float32]) + +assert_type(np.clip(b, 0, 1.0), np.bool) +assert_type(np.clip(f4, -1, 1), np.float32) +assert_type(np.clip(f, 0, 1), Any) +assert_type(np.clip(AR_b, 0, 1), npt.NDArray[np.bool]) +assert_type(np.clip(AR_f4, 0, 1), npt.NDArray[np.float32]) +assert_type(np.clip([0], 0, 1), npt.NDArray[Any]) +assert_type(np.clip(AR_b, 0, 1, out=AR_subclass), NDArraySubclass) + +assert_type(np.sum(b), np.bool) +assert_type(np.sum(f4), np.float32) +assert_type(np.sum(f), Any) +assert_type(np.sum(AR_b), np.bool) +assert_type(np.sum(AR_f4), np.float32) +assert_type(np.sum(AR_b, axis=0), Any) +assert_type(np.sum(AR_f4, axis=0), Any) +assert_type(np.sum(AR_f4, out=AR_subclass), NDArraySubclass) +assert_type(np.sum(AR_f4, dtype=np.float64), np.float64) +assert_type(np.sum(AR_f4, None, np.float64), np.float64) +assert_type(np.sum(AR_f4, dtype=np.float64, keepdims=False), np.float64) +assert_type(np.sum(AR_f4, None, np.float64, keepdims=False), np.float64) +assert_type(np.sum(AR_f4, dtype=np.float64, keepdims=True), np.float64 | npt.NDArray[np.float64]) +assert_type(np.sum(AR_f4, None, np.float64, keepdims=True), np.float64 | npt.NDArray[np.float64]) + +assert_type(np.all(b), np.bool) +assert_type(np.all(f4), np.bool) +assert_type(np.all(f), np.bool) +assert_type(np.all(AR_b), np.bool) +assert_type(np.all(AR_f4), np.bool) +assert_type(np.all(AR_b, axis=0), Any) +assert_type(np.all(AR_f4, axis=0), Any) +assert_type(np.all(AR_b, keepdims=True), Any) +assert_type(np.all(AR_f4, keepdims=True), Any) +assert_type(np.all(AR_f4, out=AR_subclass), NDArraySubclass) + +assert_type(np.any(b), np.bool) +assert_type(np.any(f4), np.bool) +assert_type(np.any(f), np.bool) +assert_type(np.any(AR_b), np.bool) +assert_type(np.any(AR_f4), np.bool) +assert_type(np.any(AR_b, axis=0), Any) +assert_type(np.any(AR_f4, axis=0), Any) +assert_type(np.any(AR_b, keepdims=True), Any) +assert_type(np.any(AR_f4, keepdims=True), Any) +assert_type(np.any(AR_f4, out=AR_subclass), NDArraySubclass) + +assert_type(np.cumsum(b), npt.NDArray[np.bool]) +assert_type(np.cumsum(f4), npt.NDArray[np.float32]) +assert_type(np.cumsum(f), npt.NDArray[Any]) +assert_type(np.cumsum(AR_b), npt.NDArray[np.bool]) +assert_type(np.cumsum(AR_f4), npt.NDArray[np.float32]) +assert_type(np.cumsum(f, dtype=float), npt.NDArray[Any]) +assert_type(np.cumsum(f, dtype=np.float64), npt.NDArray[np.float64]) +assert_type(np.cumsum(AR_f4, out=AR_subclass), NDArraySubclass) + +assert_type(np.cumulative_sum(b), npt.NDArray[np.bool]) +assert_type(np.cumulative_sum(f4), npt.NDArray[np.float32]) +assert_type(np.cumulative_sum(f), npt.NDArray[Any]) +assert_type(np.cumulative_sum(AR_b), npt.NDArray[np.bool]) +assert_type(np.cumulative_sum(AR_f4), npt.NDArray[np.float32]) +assert_type(np.cumulative_sum(f, dtype=float), npt.NDArray[Any]) +assert_type(np.cumulative_sum(f, dtype=np.float64), npt.NDArray[np.float64]) +assert_type(np.cumulative_sum(AR_f4, out=AR_subclass), NDArraySubclass) + +assert_type(np.ptp(b), np.bool) +assert_type(np.ptp(f4), np.float32) +assert_type(np.ptp(f), Any) +assert_type(np.ptp(AR_b), np.bool) +assert_type(np.ptp(AR_f4), np.float32) +assert_type(np.ptp(AR_b, axis=0), Any) +assert_type(np.ptp(AR_f4, axis=0), Any) +assert_type(np.ptp(AR_b, keepdims=True), Any) +assert_type(np.ptp(AR_f4, keepdims=True), Any) +assert_type(np.ptp(AR_f4, out=AR_subclass), NDArraySubclass) + +assert_type(np.amax(b), np.bool) +assert_type(np.amax(f4), np.float32) +assert_type(np.amax(f), Any) +assert_type(np.amax(AR_b), np.bool) +assert_type(np.amax(AR_f4), np.float32) +assert_type(np.amax(AR_b, axis=0), Any) +assert_type(np.amax(AR_f4, axis=0), Any) +assert_type(np.amax(AR_b, keepdims=True), Any) +assert_type(np.amax(AR_f4, keepdims=True), Any) +assert_type(np.amax(AR_f4, out=AR_subclass), NDArraySubclass) + +assert_type(np.amin(b), np.bool) +assert_type(np.amin(f4), np.float32) +assert_type(np.amin(f), Any) +assert_type(np.amin(AR_b), np.bool) +assert_type(np.amin(AR_f4), np.float32) +assert_type(np.amin(AR_b, axis=0), Any) +assert_type(np.amin(AR_f4, axis=0), Any) +assert_type(np.amin(AR_b, keepdims=True), Any) +assert_type(np.amin(AR_f4, keepdims=True), Any) +assert_type(np.amin(AR_f4, out=AR_subclass), NDArraySubclass) + +assert_type(np.prod(AR_b), np.int_) +assert_type(np.prod(AR_u8), np.uint64) +assert_type(np.prod(AR_i8), np.int64) +assert_type(np.prod(AR_f4), np.floating) +assert_type(np.prod(AR_c16), np.complexfloating) +assert_type(np.prod(AR_O), Any) +assert_type(np.prod(AR_f4, axis=0), Any) +assert_type(np.prod(AR_f4, keepdims=True), Any) +assert_type(np.prod(AR_f4, dtype=np.float64), np.float64) +assert_type(np.prod(AR_f4, dtype=float), Any) +assert_type(np.prod(AR_f4, out=AR_subclass), NDArraySubclass) + +assert_type(np.cumprod(AR_b), npt.NDArray[np.int_]) +assert_type(np.cumprod(AR_u8), npt.NDArray[np.uint64]) +assert_type(np.cumprod(AR_i8), npt.NDArray[np.int64]) +assert_type(np.cumprod(AR_f4), npt.NDArray[np.floating]) +assert_type(np.cumprod(AR_c16), npt.NDArray[np.complexfloating]) +assert_type(np.cumprod(AR_O), npt.NDArray[np.object_]) +assert_type(np.cumprod(AR_f4, axis=0), npt.NDArray[np.floating]) +assert_type(np.cumprod(AR_f4, dtype=np.float64), npt.NDArray[np.float64]) +assert_type(np.cumprod(AR_f4, dtype=float), npt.NDArray[Any]) +assert_type(np.cumprod(AR_f4, out=AR_subclass), NDArraySubclass) + +assert_type(np.cumulative_prod(AR_b), npt.NDArray[np.int_]) +assert_type(np.cumulative_prod(AR_u8), npt.NDArray[np.uint64]) +assert_type(np.cumulative_prod(AR_i8), npt.NDArray[np.int64]) +assert_type(np.cumulative_prod(AR_f4), npt.NDArray[np.floating]) +assert_type(np.cumulative_prod(AR_c16), npt.NDArray[np.complexfloating]) +assert_type(np.cumulative_prod(AR_O), npt.NDArray[np.object_]) +assert_type(np.cumulative_prod(AR_f4, axis=0), npt.NDArray[np.floating]) +assert_type(np.cumulative_prod(AR_f4, dtype=np.float64), npt.NDArray[np.float64]) +assert_type(np.cumulative_prod(AR_f4, dtype=float), npt.NDArray[Any]) +assert_type(np.cumulative_prod(AR_f4, out=AR_subclass), NDArraySubclass) + +assert_type(np.ndim(b), int) +assert_type(np.ndim(f4), int) +assert_type(np.ndim(f), int) +assert_type(np.ndim(AR_b), int) +assert_type(np.ndim(AR_f4), int) + +assert_type(np.size(b), int) +assert_type(np.size(f4), int) +assert_type(np.size(f), int) +assert_type(np.size(AR_b), int) +assert_type(np.size(AR_f4), int) + +assert_type(np.around(b), np.float16) +assert_type(np.around(f), Any) +assert_type(np.around(i8), np.int64) +assert_type(np.around(f4), np.float32) +assert_type(np.around(AR_b), npt.NDArray[np.float16]) +assert_type(np.around(AR_i8), npt.NDArray[np.int64]) +assert_type(np.around(AR_f4), npt.NDArray[np.float32]) +assert_type(np.around([1.5]), npt.NDArray[Any]) +assert_type(np.around(AR_f4, out=AR_subclass), NDArraySubclass) + +assert_type(np.mean(AR_b), np.floating) +assert_type(np.mean(AR_i8), np.floating) +assert_type(np.mean(AR_f4), np.floating) +assert_type(np.mean(AR_m), np.timedelta64) +assert_type(np.mean(AR_c16), np.complexfloating) +assert_type(np.mean(AR_O), Any) +assert_type(np.mean(AR_f4, axis=0), Any) +assert_type(np.mean(AR_f4, keepdims=True), Any) +assert_type(np.mean(AR_f4, dtype=float), Any) +assert_type(np.mean(AR_f4, dtype=np.float64), np.float64) +assert_type(np.mean(AR_f4, out=AR_subclass), NDArraySubclass) +assert_type(np.mean(AR_f4, dtype=np.float64), np.float64) +assert_type(np.mean(AR_f4, None, np.float64), np.float64) +assert_type(np.mean(AR_f4, dtype=np.float64, keepdims=False), np.float64) +assert_type(np.mean(AR_f4, None, np.float64, keepdims=False), np.float64) +assert_type(np.mean(AR_f4, dtype=np.float64, keepdims=True), np.float64 | npt.NDArray[np.float64]) +assert_type(np.mean(AR_f4, None, np.float64, keepdims=True), np.float64 | npt.NDArray[np.float64]) + +assert_type(np.std(AR_b), np.floating) +assert_type(np.std(AR_i8), np.floating) +assert_type(np.std(AR_f4), np.floating) +assert_type(np.std(AR_c16), np.floating) +assert_type(np.std(AR_O), Any) +assert_type(np.std(AR_f4, axis=0), Any) +assert_type(np.std(AR_f4, keepdims=True), Any) +assert_type(np.std(AR_f4, dtype=float), Any) +assert_type(np.std(AR_f4, dtype=np.float64), np.float64) +assert_type(np.std(AR_f4, out=AR_subclass), NDArraySubclass) + +assert_type(np.var(AR_b), np.floating) +assert_type(np.var(AR_i8), np.floating) +assert_type(np.var(AR_f4), np.floating) +assert_type(np.var(AR_c16), np.floating) +assert_type(np.var(AR_O), Any) +assert_type(np.var(AR_f4, axis=0), Any) +assert_type(np.var(AR_f4, keepdims=True), Any) +assert_type(np.var(AR_f4, dtype=float), Any) +assert_type(np.var(AR_f4, dtype=np.float64), np.float64) +assert_type(np.var(AR_f4, out=AR_subclass), NDArraySubclass) diff --git a/local_packages/numpy/typing/tests/data/reveal/getlimits.pyi b/local_packages/numpy/typing/tests/data/reveal/getlimits.pyi new file mode 100644 index 0000000..cc964d7 --- /dev/null +++ b/local_packages/numpy/typing/tests/data/reveal/getlimits.pyi @@ -0,0 +1,53 @@ +from typing import assert_type + +import numpy as np + +f: float +f8: np.float64 +c8: np.complex64 +c16: np.complex128 + +i: int +i8: np.int64 +u4: np.uint32 + +finfo_f8: np.finfo[np.float64] +iinfo_i8: np.iinfo[np.int64] + +assert_type(np.finfo(f), np.finfo[np.float64]) +assert_type(np.finfo(f8), np.finfo[np.float64]) +assert_type(np.finfo(c8), np.finfo[np.float32]) +assert_type(np.finfo(c16), np.finfo[np.float64]) +assert_type(np.finfo("f2"), np.finfo[np.float16]) + +assert_type(finfo_f8.dtype, np.dtype[np.float64]) +assert_type(finfo_f8.bits, int) +assert_type(finfo_f8.eps, np.float64) +assert_type(finfo_f8.epsneg, np.float64) +assert_type(finfo_f8.iexp, int) +assert_type(finfo_f8.machep, int) +assert_type(finfo_f8.max, np.float64) +assert_type(finfo_f8.maxexp, int) +assert_type(finfo_f8.min, np.float64) +assert_type(finfo_f8.minexp, int) +assert_type(finfo_f8.negep, int) +assert_type(finfo_f8.nexp, int) +assert_type(finfo_f8.nmant, int) +assert_type(finfo_f8.precision, int) +assert_type(finfo_f8.resolution, np.float64) +assert_type(finfo_f8.tiny, np.float64) +assert_type(finfo_f8.smallest_normal, np.float64) +assert_type(finfo_f8.smallest_subnormal, np.float64) + +assert_type(np.iinfo(i), np.iinfo[np.int_]) +assert_type(np.iinfo(i8), np.iinfo[np.int64]) +assert_type(np.iinfo(u4), np.iinfo[np.uint32]) +assert_type(np.iinfo("i2"), np.iinfo[np.int16]) +assert_type(np.iinfo("u2"), np.iinfo[np.uint16]) + +assert_type(iinfo_i8.dtype, np.dtype[np.int64]) +assert_type(iinfo_i8.kind, str) +assert_type(iinfo_i8.bits, int) +assert_type(iinfo_i8.key, str) +assert_type(iinfo_i8.min, int) +assert_type(iinfo_i8.max, int) diff --git a/local_packages/numpy/typing/tests/data/reveal/histograms.pyi b/local_packages/numpy/typing/tests/data/reveal/histograms.pyi new file mode 100644 index 0000000..c1c63d5 --- /dev/null +++ b/local_packages/numpy/typing/tests/data/reveal/histograms.pyi @@ -0,0 +1,25 @@ +from typing import Any, assert_type + +import numpy as np +import numpy.typing as npt + +AR_i8: npt.NDArray[np.int64] +AR_f8: npt.NDArray[np.float64] + +assert_type(np.histogram_bin_edges(AR_i8, bins="auto"), npt.NDArray[Any]) +assert_type(np.histogram_bin_edges(AR_i8, bins="rice", range=(0, 3)), npt.NDArray[Any]) +assert_type(np.histogram_bin_edges(AR_i8, bins="scott", weights=AR_f8), npt.NDArray[Any]) + +assert_type(np.histogram(AR_i8, bins="auto"), tuple[npt.NDArray[Any], npt.NDArray[Any]]) +assert_type(np.histogram(AR_i8, bins="rice", range=(0, 3)), tuple[npt.NDArray[Any], npt.NDArray[Any]]) +assert_type(np.histogram(AR_i8, bins="scott", weights=AR_f8), tuple[npt.NDArray[Any], npt.NDArray[Any]]) +assert_type(np.histogram(AR_f8, bins=1, density=True), tuple[npt.NDArray[Any], npt.NDArray[Any]]) + +assert_type(np.histogramdd(AR_i8, bins=[1]), + tuple[npt.NDArray[Any], tuple[npt.NDArray[Any], ...]]) +assert_type(np.histogramdd(AR_i8, range=[(0, 3)]), + tuple[npt.NDArray[Any], tuple[npt.NDArray[Any], ...]]) +assert_type(np.histogramdd(AR_i8, weights=AR_f8), + tuple[npt.NDArray[Any], tuple[npt.NDArray[Any], ...]]) +assert_type(np.histogramdd(AR_f8, density=True), + tuple[npt.NDArray[Any], tuple[npt.NDArray[Any], ...]]) diff --git a/local_packages/numpy/typing/tests/data/reveal/index_tricks.pyi b/local_packages/numpy/typing/tests/data/reveal/index_tricks.pyi new file mode 100644 index 0000000..f6067c3 --- /dev/null +++ b/local_packages/numpy/typing/tests/data/reveal/index_tricks.pyi @@ -0,0 +1,70 @@ +from types import EllipsisType +from typing import Any, Literal, assert_type + +import numpy as np +import numpy.typing as npt + +AR_LIKE_b: list[bool] +AR_LIKE_i: list[int] +AR_LIKE_f: list[float] +AR_LIKE_U: list[str] +AR_LIKE_O: list[object] + +AR_i8: npt.NDArray[np.int64] +AR_O: npt.NDArray[np.object_] + +assert_type(np.ndenumerate(AR_i8), np.ndenumerate[np.int64]) +assert_type(np.ndenumerate(AR_LIKE_f), np.ndenumerate[np.float64]) +assert_type(np.ndenumerate(AR_LIKE_U), np.ndenumerate[np.str_]) +assert_type(np.ndenumerate(AR_LIKE_O), np.ndenumerate[Any]) + +assert_type(next(np.ndenumerate(AR_i8)), tuple[tuple[Any, ...], np.int64]) +assert_type(next(np.ndenumerate(AR_LIKE_f)), tuple[tuple[Any, ...], np.float64]) +assert_type(next(np.ndenumerate(AR_LIKE_U)), tuple[tuple[Any, ...], np.str_]) +assert_type(next(np.ndenumerate(AR_LIKE_O)), tuple[tuple[Any, ...], Any]) + +assert_type(iter(np.ndenumerate(AR_i8)), np.ndenumerate[np.int64]) +assert_type(iter(np.ndenumerate(AR_LIKE_f)), np.ndenumerate[np.float64]) +assert_type(iter(np.ndenumerate(AR_LIKE_U)), np.ndenumerate[np.str_]) +assert_type(iter(np.ndenumerate(AR_LIKE_O)), np.ndenumerate[Any]) + +assert_type(np.ndindex(1, 2, 3), np.ndindex) +assert_type(np.ndindex((1, 2, 3)), np.ndindex) +assert_type(iter(np.ndindex(1, 2, 3)), np.ndindex) +assert_type(next(np.ndindex(1, 2, 3)), tuple[Any, ...]) + +assert_type(np.unravel_index([22, 41, 37], (7, 6)), tuple[npt.NDArray[np.intp], ...]) +assert_type(np.unravel_index([31, 41, 13], (7, 6), order="F"), tuple[npt.NDArray[np.intp], ...]) +assert_type(np.unravel_index(1621, (6, 7, 8, 9)), tuple[np.intp, ...]) + +assert_type(np.ravel_multi_index([[1]], (7, 6)), npt.NDArray[np.intp]) +assert_type(np.ravel_multi_index(AR_LIKE_i, (7, 6)), np.intp) +assert_type(np.ravel_multi_index(AR_LIKE_i, (7, 6), order="F"), np.intp) +assert_type(np.ravel_multi_index(AR_LIKE_i, (4, 6), mode="clip"), np.intp) +assert_type(np.ravel_multi_index(AR_LIKE_i, (4, 4), mode=("clip", "wrap")), np.intp) +assert_type(np.ravel_multi_index((3, 1, 4, 1), (6, 7, 8, 9)), np.intp) + +assert_type(np.mgrid[1:1:2], npt.NDArray[Any]) +assert_type(np.mgrid[1:1:2, None:10], npt.NDArray[Any]) + +assert_type(np.ogrid[1:1:2], tuple[npt.NDArray[Any], ...]) +assert_type(np.ogrid[1:1:2, None:10], tuple[npt.NDArray[Any], ...]) + +assert_type(np.index_exp[0:1], tuple[slice[int, int, None]]) +assert_type(np.index_exp[0:1, None:3], tuple[slice[int, int, None], slice[None, int, None]]) +assert_type(np.index_exp[0, 0:1, ..., [0, 1, 3]], tuple[Literal[0], slice[int, int, None], EllipsisType, list[int]]) + +assert_type(np.s_[0:1], slice[int, int, None]) +assert_type(np.s_[0:1, None:3], tuple[slice[int, int, None], slice[None, int, None]]) +assert_type(np.s_[0, 0:1, ..., [0, 1, 3]], tuple[Literal[0], slice[int, int, None], EllipsisType, list[int]]) + +assert_type(np.ix_(AR_LIKE_b), tuple[npt.NDArray[np.bool], ...]) +assert_type(np.ix_(AR_LIKE_i, AR_LIKE_f), tuple[npt.NDArray[np.float64], ...]) +assert_type(np.ix_(AR_i8), tuple[npt.NDArray[np.int64], ...]) + +assert_type(np.fill_diagonal(AR_i8, 5), None) + +assert_type(np.diag_indices(4), tuple[npt.NDArray[np.int_], ...]) +assert_type(np.diag_indices(2, 3), tuple[npt.NDArray[np.int_], ...]) + +assert_type(np.diag_indices_from(AR_i8), tuple[npt.NDArray[np.int_], ...]) diff --git a/local_packages/numpy/typing/tests/data/reveal/lib_function_base.pyi b/local_packages/numpy/typing/tests/data/reveal/lib_function_base.pyi new file mode 100644 index 0000000..345635d --- /dev/null +++ b/local_packages/numpy/typing/tests/data/reveal/lib_function_base.pyi @@ -0,0 +1,409 @@ +from collections.abc import Callable +from fractions import Fraction +from typing import Any, LiteralString, assert_type, type_check_only + +import numpy as np +import numpy.typing as npt + +f8: np.float64 +AR_LIKE_b: list[bool] +AR_LIKE_i8: list[int] +AR_LIKE_f8: list[float] +AR_LIKE_c16: list[complex] +AR_LIKE_O: list[Fraction] + +AR_u1: npt.NDArray[np.uint8] +AR_i8: npt.NDArray[np.int64] +AR_f2: npt.NDArray[np.float16] +AR_f4: npt.NDArray[np.float32] +AR_f8: npt.NDArray[np.float64] +AR_f10: npt.NDArray[np.longdouble] +AR_c8: npt.NDArray[np.complex64] +AR_c16: npt.NDArray[np.complex128] +AR_c20: npt.NDArray[np.clongdouble] +AR_m: npt.NDArray[np.timedelta64] +AR_M: npt.NDArray[np.datetime64] +AR_O: npt.NDArray[np.object_] +AR_b: npt.NDArray[np.bool] +AR_U: npt.NDArray[np.str_] +CHAR_AR_U: np.char.chararray[tuple[Any, ...], np.dtype[np.str_]] + +AR_f8_1d: np.ndarray[tuple[int], np.dtype[np.float64]] +AR_f8_2d: np.ndarray[tuple[int, int], np.dtype[np.float64]] +AR_f8_3d: np.ndarray[tuple[int, int, int], np.dtype[np.float64]] +AR_c16_1d: np.ndarray[tuple[int], np.dtype[np.complex128]] + +AR_b_list: list[npt.NDArray[np.bool]] + +@type_check_only +def func(a: np.ndarray, posarg: bool = ..., /, arg: int = ..., *, kwarg: str = ...) -> np.ndarray: ... +@type_check_only +def func_f8(a: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]: ... + +### + +# vectorize +vectorized_func: np.vectorize +assert_type(vectorized_func.pyfunc, Callable[..., Any]) +assert_type(vectorized_func.cache, bool) +assert_type(vectorized_func.signature, LiteralString | None) +assert_type(vectorized_func.otypes, LiteralString | None) +assert_type(vectorized_func.excluded, set[int | str]) +assert_type(vectorized_func.__doc__, str | None) +assert_type(vectorized_func([1]), Any) +assert_type(np.vectorize(int), np.vectorize) +assert_type( + np.vectorize(int, otypes="i", doc="doc", excluded=(), cache=True, signature=None), + np.vectorize, +) + +# rot90 +assert_type(np.rot90(AR_f8_1d), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.rot90(AR_f8, k=2), npt.NDArray[np.float64]) +assert_type(np.rot90(AR_LIKE_f8, axes=(0, 1)), np.ndarray) + +# flip +assert_type(np.flip(AR_f8_1d), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.flip(AR_f8, axis=(0, 1)), npt.NDArray[np.float64]) +assert_type(np.flip(AR_LIKE_f8, axis=0), np.ndarray) + +# iterable +assert_type(np.iterable(1), bool) +assert_type(np.iterable([1]), bool) + +# average +assert_type(np.average(AR_f8_2d), np.float64) +assert_type(np.average(AR_f8_2d, axis=1), npt.NDArray[np.float64]) +assert_type(np.average(AR_f8_2d, keepdims=True), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(np.average(AR_f8), np.float64) +assert_type(np.average(AR_f8, axis=1), npt.NDArray[np.float64]) +assert_type(np.average(AR_f8, keepdims=True), npt.NDArray[np.float64]) +assert_type(np.average(AR_f8, returned=True), tuple[np.float64, np.float64]) +assert_type(np.average(AR_f8, axis=1, returned=True), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]) +assert_type(np.average(AR_f8, keepdims=True, returned=True), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]) +assert_type(np.average(AR_LIKE_f8), np.float64) +assert_type(np.average(AR_LIKE_f8, weights=AR_f8), np.float64) +assert_type(np.average(AR_LIKE_f8, axis=1), npt.NDArray[np.float64]) +assert_type(np.average(AR_LIKE_f8, keepdims=True), npt.NDArray[np.float64]) +assert_type(np.average(AR_LIKE_f8, returned=True), tuple[np.float64, np.float64]) +assert_type(np.average(AR_LIKE_f8, axis=1, returned=True), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]) +assert_type(np.average(AR_LIKE_f8, keepdims=True, returned=True), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]) +assert_type(np.average(AR_O), Any) +assert_type(np.average(AR_O, axis=1), np.ndarray) +assert_type(np.average(AR_O, keepdims=True), np.ndarray) +assert_type(np.average(AR_O, returned=True), tuple[Any, Any]) +assert_type(np.average(AR_O, axis=1, returned=True), tuple[np.ndarray, np.ndarray]) +assert_type(np.average(AR_O, keepdims=True, returned=True), tuple[np.ndarray, np.ndarray]) + +# asarray_chkfinite +assert_type(np.asarray_chkfinite(AR_f8_1d), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.asarray_chkfinite(AR_f8), npt.NDArray[np.float64]) +assert_type(np.asarray_chkfinite(AR_LIKE_f8), np.ndarray) +assert_type(np.asarray_chkfinite(AR_f8, dtype=np.float64), npt.NDArray[np.float64]) +assert_type(np.asarray_chkfinite(AR_f8, dtype=float), np.ndarray) + +# piecewise +assert_type(np.piecewise(AR_f8_1d, AR_b, [func]), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.piecewise(AR_f8, AR_b, [func]), npt.NDArray[np.float64]) +assert_type(np.piecewise(AR_f8, AR_b, [func_f8]), npt.NDArray[np.float64]) +assert_type(np.piecewise(AR_f8, AR_b_list, [func]), npt.NDArray[np.float64]) +assert_type(np.piecewise(AR_f8, AR_b_list, [func_f8]), npt.NDArray[np.float64]) +assert_type(np.piecewise(AR_f8, AR_b_list, [func], True, -1, kwarg=""), npt.NDArray[np.float64]) +assert_type(np.piecewise(AR_f8, AR_b_list, [func], True, arg=-1, kwarg=""), npt.NDArray[np.float64]) +assert_type(np.piecewise(AR_LIKE_f8, AR_b_list, [func]), np.ndarray) +assert_type(np.piecewise(AR_LIKE_f8, AR_b_list, [func_f8]), npt.NDArray[np.float64]) + +# extract +assert_type(np.extract(AR_i8, AR_f8), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.extract(AR_i8, AR_LIKE_b), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(np.extract(AR_i8, AR_LIKE_i8), np.ndarray[tuple[int], np.dtype[np.int_]]) +assert_type(np.extract(AR_i8, AR_LIKE_f8), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.extract(AR_i8, AR_LIKE_c16), np.ndarray[tuple[int], np.dtype[np.complex128]]) + +# select +assert_type(np.select([AR_b], [AR_f8_1d]), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.select([AR_b], [AR_f8]), npt.NDArray[np.float64]) + +# places +assert_type(np.place(AR_f8, mask=AR_i8, vals=5.0), None) + +# copy +assert_type(np.copy(AR_LIKE_f8), np.ndarray) +assert_type(np.copy(AR_U), npt.NDArray[np.str_]) +assert_type(np.copy(CHAR_AR_U, "K", subok=True), np.char.chararray[tuple[Any, ...], np.dtype[np.str_]]) +assert_type(np.copy(CHAR_AR_U, subok=True), np.char.chararray[tuple[Any, ...], np.dtype[np.str_]]) +# pyright correctly infers `NDArray[str_]` here +assert_type(np.copy(CHAR_AR_U), np.ndarray[Any, Any]) # pyright: ignore[reportAssertTypeFailure] + +# gradient +assert_type(np.gradient(AR_f8_1d, 1), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type( + np.gradient(AR_f8_2d, [1, 2], [2, 3.5, 4]), + tuple[ + np.ndarray[tuple[int, int], np.dtype[np.float64]], + np.ndarray[tuple[int, int], np.dtype[np.float64]], + ], +) +assert_type( + np.gradient(AR_f8_3d), + tuple[ + np.ndarray[tuple[int, int, int], np.dtype[np.float64]], + np.ndarray[tuple[int, int, int], np.dtype[np.float64]], + np.ndarray[tuple[int, int, int], np.dtype[np.float64]], + ], +) +assert_type(np.gradient(AR_f8), np.ndarray[tuple[int], np.dtype[np.float64]] | Any) +assert_type(np.gradient(AR_LIKE_f8, edge_order=2), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.gradient(AR_LIKE_c16, axis=0), np.ndarray[tuple[int], np.dtype[np.complex128]]) + +# diff +assert_type(np.diff("git", n=0), str) +assert_type(np.diff(AR_f8), npt.NDArray[np.float64]) +assert_type(np.diff(AR_f8_1d, axis=0), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.diff(AR_f8_2d, axis=0), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(np.diff(AR_LIKE_f8, prepend=1.5), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.diff(AR_c16), npt.NDArray[np.complex128]) +assert_type(np.diff(AR_c16_1d), np.ndarray[tuple[int], np.dtype[np.complex128]]) +assert_type(np.diff(AR_LIKE_c16), np.ndarray[tuple[int], np.dtype[np.complex128]]) + +# interp +assert_type(np.interp(1, [1], AR_f8), np.float64) +assert_type(np.interp(1, [1], [1]), np.float64) +assert_type(np.interp(1, [1], AR_c16), np.complex128) +assert_type(np.interp(1, [1], [1j]), np.complex128) +assert_type(np.interp([1], [1], AR_f8), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.interp([1], [1], [1]), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.interp([1], [1], AR_c16), np.ndarray[tuple[int], np.dtype[np.complex128]]) +assert_type(np.interp([1], [1], [1j]), np.ndarray[tuple[int], np.dtype[np.complex128]]) + +# angle +assert_type(np.angle(1), np.float64) +assert_type(np.angle(1, deg=True), np.float64) +assert_type(np.angle(1j), np.float64) +assert_type(np.angle(f8), np.float64) +assert_type(np.angle(AR_b), npt.NDArray[np.float64]) +assert_type(np.angle(AR_u1), npt.NDArray[np.float64]) +assert_type(np.angle(AR_i8), npt.NDArray[np.float64]) +assert_type(np.angle(AR_f2), npt.NDArray[np.float16]) +assert_type(np.angle(AR_f4), npt.NDArray[np.float32]) +assert_type(np.angle(AR_c8), npt.NDArray[np.float32]) +assert_type(np.angle(AR_f8), npt.NDArray[np.float64]) +assert_type(np.angle(AR_c16), npt.NDArray[np.float64]) +assert_type(np.angle(AR_f10), npt.NDArray[np.longdouble]) +assert_type(np.angle(AR_c20), npt.NDArray[np.longdouble]) +assert_type(np.angle(AR_f8_1d), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.angle(AR_c16_1d), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.angle(AR_LIKE_b), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.angle(AR_LIKE_i8), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.angle(AR_LIKE_f8), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.angle(AR_LIKE_c16), np.ndarray[tuple[int], np.dtype[np.float64]]) + +# unwrap +assert_type(np.unwrap(AR_f2), npt.NDArray[np.float16]) +assert_type(np.unwrap(AR_f8), npt.NDArray[np.float64]) +assert_type(np.unwrap(AR_f10), npt.NDArray[np.longdouble]) +assert_type(np.unwrap(AR_O), npt.NDArray[np.object_]) +assert_type(np.unwrap(AR_f8_1d), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.unwrap(AR_f8_2d), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(np.unwrap(AR_f8_3d), np.ndarray[tuple[int, int, int], np.dtype[np.float64]]) +assert_type(np.unwrap(AR_LIKE_b), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.unwrap(AR_LIKE_i8), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.unwrap(AR_LIKE_f8), np.ndarray[tuple[int], np.dtype[np.float64]]) + +# sort_complex +assert_type(np.sort_complex(AR_u1), npt.NDArray[np.complex64]) +assert_type(np.sort_complex(AR_f8), npt.NDArray[np.complex128]) +assert_type(np.sort_complex(AR_f10), npt.NDArray[np.clongdouble]) +assert_type(np.sort_complex(AR_f8_1d), np.ndarray[tuple[int], np.dtype[np.complex128]]) +assert_type(np.sort_complex(AR_c16_1d), np.ndarray[tuple[int], np.dtype[np.complex128]]) + +# trim_zeros +assert_type(np.trim_zeros(AR_f8), npt.NDArray[np.float64]) +assert_type(np.trim_zeros(AR_LIKE_f8), list[float]) + +# cov +assert_type(np.cov(AR_f8_1d), np.ndarray[tuple[()], np.dtype[np.float64]]) +assert_type(np.cov(AR_f8_2d), npt.NDArray[np.float64]) +assert_type(np.cov(AR_f8), npt.NDArray[np.float64]) +assert_type(np.cov(AR_f8, AR_f8), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(np.cov(AR_c16, AR_c16), np.ndarray[tuple[int, int], np.dtype[np.complex128]]) +assert_type(np.cov(AR_LIKE_f8), np.ndarray[tuple[()], np.dtype[np.float64]]) +assert_type(np.cov(AR_LIKE_f8, AR_LIKE_f8), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(np.cov(AR_LIKE_f8, dtype=np.float16), np.ndarray[tuple[()], np.dtype[np.float16]]) +assert_type(np.cov(AR_LIKE_f8, AR_LIKE_f8, dtype=np.float32), np.ndarray[tuple[int, int], np.dtype[np.float32]]) +assert_type(np.cov(AR_f8, AR_f8, dtype=float), np.ndarray[tuple[int, int]]) +assert_type(np.cov(AR_LIKE_f8, dtype=float), np.ndarray[tuple[()]]) +assert_type(np.cov(AR_LIKE_f8, AR_LIKE_f8, dtype=float), np.ndarray[tuple[int, int]]) + +# corrcoef +assert_type(np.corrcoef(AR_f8_1d), np.float64) +assert_type(np.corrcoef(AR_f8_2d), np.ndarray[tuple[int, int], np.dtype[np.float64]] | np.float64) +assert_type(np.corrcoef(AR_f8), np.ndarray[tuple[int, int], np.dtype[np.float64]] | np.float64) +assert_type(np.corrcoef(AR_f8, AR_f8), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(np.corrcoef(AR_c16, AR_c16), np.ndarray[tuple[int, int], np.dtype[np.complex128]]) +assert_type(np.corrcoef(AR_LIKE_f8), np.float64) +assert_type(np.corrcoef(AR_LIKE_f8, AR_LIKE_f8), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(np.corrcoef(AR_LIKE_f8, dtype=np.float16), np.float16) +assert_type(np.corrcoef(AR_LIKE_f8, AR_LIKE_f8, dtype=np.float32), np.ndarray[tuple[int, int], np.dtype[np.float32]]) +assert_type(np.corrcoef(AR_f8, AR_f8, dtype=float), np.ndarray[tuple[int, int]]) +assert_type(np.corrcoef(AR_LIKE_f8, dtype=float), Any) +assert_type(np.corrcoef(AR_LIKE_f8, AR_LIKE_f8, dtype=float), np.ndarray[tuple[int, int]]) + +# window functions +assert_type(np.blackman(5), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.bartlett(6), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.hanning(4.5), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.hamming(0), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.kaiser(4, 5.9), np.ndarray[tuple[int], np.dtype[np.float64]]) + +# i0 (bessel function) +assert_type(np.i0(AR_i8), npt.NDArray[np.float64]) + +# sinc (cardinal sine function) +assert_type(np.sinc(1.0), np.float64) +assert_type(np.sinc(1j), np.complex128 | Any) +assert_type(np.sinc(AR_f8), npt.NDArray[np.float64]) +assert_type(np.sinc(AR_c16), npt.NDArray[np.complex128]) +assert_type(np.sinc(AR_LIKE_f8), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.sinc(AR_LIKE_c16), np.ndarray[tuple[int], np.dtype[np.complex128]]) + +# median +assert_type(np.median(AR_f8, keepdims=False), np.float64) +assert_type(np.median(AR_c16, overwrite_input=True), np.complex128) +assert_type(np.median(AR_m), np.timedelta64) +assert_type(np.median(AR_O), Any) +assert_type(np.median(AR_f8, keepdims=True), npt.NDArray[np.float64]) +assert_type(np.median(AR_f8, axis=0), npt.NDArray[np.float64]) +assert_type(np.median(AR_c16, keepdims=True), npt.NDArray[np.complex128]) +assert_type(np.median(AR_c16, axis=0), npt.NDArray[np.complex128]) +assert_type(np.median(AR_LIKE_f8, keepdims=True), npt.NDArray[np.float64]) +assert_type(np.median(AR_LIKE_c16, keepdims=True), npt.NDArray[np.complex128]) +assert_type(np.median(AR_LIKE_f8, out=AR_c16), npt.NDArray[np.complex128]) + +# percentile +assert_type(np.percentile(AR_f8, 50), np.float64) +assert_type(np.percentile(AR_f8, 50, axis=1), npt.NDArray[np.float64]) +assert_type(np.percentile(AR_f8, 50, axis=(1, 0)), npt.NDArray[np.float64]) +assert_type(np.percentile(AR_f8, 50, keepdims=True), npt.NDArray[np.float64]) +assert_type(np.percentile(AR_f8, 50, axis=0, keepdims=True), npt.NDArray[np.float64]) +assert_type(np.percentile(AR_c16, 50), np.complex128) +assert_type(np.percentile(AR_m, 50), np.timedelta64) +assert_type(np.percentile(AR_M, 50, overwrite_input=True), np.datetime64) +assert_type(np.percentile(AR_O, 50), Any) +assert_type(np.percentile(AR_f8, [50]), npt.NDArray[np.float64]) +assert_type(np.percentile(AR_f8, [50], axis=1), npt.NDArray[np.float64]) +assert_type(np.percentile(AR_f8, [50], keepdims=True), npt.NDArray[np.float64]) +assert_type(np.percentile(AR_c16, [50]), npt.NDArray[np.complex128]) +assert_type(np.percentile(AR_m, [50]), npt.NDArray[np.timedelta64]) +assert_type(np.percentile(AR_M, [50], method="nearest"), npt.NDArray[np.datetime64]) +assert_type(np.percentile(AR_O, [50]), npt.NDArray[np.object_]) +assert_type(np.percentile(AR_f8, [50], keepdims=True), npt.NDArray[np.float64]) +assert_type(np.percentile(AR_f8, [50], out=AR_c16), npt.NDArray[np.complex128]) + +# quantile +assert_type(np.quantile(AR_f8, 0.50), np.float64) +assert_type(np.quantile(AR_f8, 0.50, axis=1), npt.NDArray[np.float64]) +assert_type(np.quantile(AR_f8, 0.50, axis=(1, 0)), npt.NDArray[np.float64]) +assert_type(np.quantile(AR_f8, 0.50, keepdims=True), npt.NDArray[np.float64]) +assert_type(np.quantile(AR_f8, 0.50, axis=0, keepdims=True), npt.NDArray[np.float64]) +assert_type(np.quantile(AR_c16, 0.50), np.complex128) +assert_type(np.quantile(AR_m, 0.50), np.timedelta64) +assert_type(np.quantile(AR_M, 0.50, overwrite_input=True), np.datetime64) +assert_type(np.quantile(AR_O, 0.50), Any) +assert_type(np.quantile(AR_f8, [0.50]), npt.NDArray[np.float64]) +assert_type(np.quantile(AR_f8, [0.50], axis=1), npt.NDArray[np.float64]) +assert_type(np.quantile(AR_f8, [0.50], keepdims=True), npt.NDArray[np.float64]) +assert_type(np.quantile(AR_c16, [0.50]), npt.NDArray[np.complex128]) +assert_type(np.quantile(AR_m, [0.50]), npt.NDArray[np.timedelta64]) +assert_type(np.quantile(AR_M, [0.50], method="nearest"), npt.NDArray[np.datetime64]) +assert_type(np.quantile(AR_O, [0.50]), npt.NDArray[np.object_]) +assert_type(np.quantile(AR_f8, [0.50], keepdims=True), npt.NDArray[np.float64]) +assert_type(np.quantile(AR_f8, [0.50], out=AR_c16), npt.NDArray[np.complex128]) + +# trapezoid +assert_type(np.trapezoid(AR_LIKE_f8), np.float64) +assert_type(np.trapezoid(AR_LIKE_f8, AR_LIKE_f8), np.float64) +assert_type(np.trapezoid(AR_LIKE_c16), np.complex128) +assert_type(np.trapezoid(AR_LIKE_c16, AR_LIKE_f8), np.complex128) +assert_type(np.trapezoid(AR_LIKE_f8, AR_LIKE_c16), np.complex128) +assert_type(np.trapezoid(AR_LIKE_O), float) +assert_type(np.trapezoid(AR_LIKE_O, AR_LIKE_f8), float) +assert_type(np.trapezoid(AR_f8), np.float64 | npt.NDArray[np.float64]) +assert_type(np.trapezoid(AR_f8, AR_f8), np.float64 | npt.NDArray[np.float64]) +assert_type(np.trapezoid(AR_c16), np.complex128 | npt.NDArray[np.complex128]) +assert_type(np.trapezoid(AR_c16, AR_c16), np.complex128 | npt.NDArray[np.complex128]) +assert_type(np.trapezoid(AR_m), np.timedelta64 | npt.NDArray[np.timedelta64]) +assert_type(np.trapezoid(AR_O), npt.NDArray[np.object_] | Any) +assert_type(np.trapezoid(AR_O, AR_LIKE_f8), npt.NDArray[np.object_] | Any) + +# meshgrid +assert_type(np.meshgrid(), tuple[()]) +assert_type( + np.meshgrid(AR_f8), + tuple[ + np.ndarray[tuple[int], np.dtype[np.float64]], + ], +) +assert_type( + np.meshgrid(AR_c16, indexing="ij"), + tuple[ + np.ndarray[tuple[int], np.dtype[np.complex128]], + ], +) +assert_type( + np.meshgrid(AR_i8, AR_f8, copy=False), + tuple[ + np.ndarray[tuple[int, int], np.dtype[np.int64]], + np.ndarray[tuple[int, int], np.dtype[np.float64]], + ], +) +assert_type( + np.meshgrid(AR_LIKE_f8, AR_f8), + tuple[ + np.ndarray[tuple[int, int]], + np.ndarray[tuple[int, int], np.dtype[np.float64]], + ], +) +assert_type( + np.meshgrid(AR_f8, AR_LIKE_f8), + tuple[ + np.ndarray[tuple[int, int], np.dtype[np.float64]], + np.ndarray[tuple[int, int]], + ], +) +assert_type( + np.meshgrid(AR_LIKE_f8, AR_LIKE_f8), + tuple[ + np.ndarray[tuple[int, int]], + np.ndarray[tuple[int, int]], + ], +) +assert_type( + np.meshgrid(AR_f8, AR_i8, AR_c16), + tuple[ + np.ndarray[tuple[int, int, int], np.dtype[np.float64]], + np.ndarray[tuple[int, int, int], np.dtype[np.int64]], + np.ndarray[tuple[int, int, int], np.dtype[np.complex128]], + ], +) +assert_type(np.meshgrid(AR_f8, AR_f8, AR_f8, AR_f8), tuple[npt.NDArray[np.float64], ...]) +assert_type(np.meshgrid(AR_f8, AR_f8, AR_f8, AR_LIKE_f8), tuple[np.ndarray, ...]) +assert_type(np.meshgrid(*AR_LIKE_f8), tuple[np.ndarray, ...]) + +# delete +assert_type(np.delete(AR_f8, np.s_[:5]), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.delete(AR_LIKE_f8, [0, 4, 9], axis=0), np.ndarray) + +# insert +assert_type(np.insert(AR_f8, np.s_[:5], 5), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.insert(AR_LIKE_f8, [0, 4, 9], [0.5, 9.2, 7], axis=0), np.ndarray) + +# append +assert_type(np.append(f8, f8), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.append(AR_f8, AR_f8), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.append(AR_LIKE_f8, AR_LIKE_c16, axis=0), np.ndarray) +assert_type(np.append(AR_f8, AR_LIKE_f8, axis=0), np.ndarray) + +# digitize +assert_type(np.digitize(4.5, [1]), np.intp) +assert_type(np.digitize(AR_f8, [1, 2, 3]), npt.NDArray[np.intp]) diff --git a/local_packages/numpy/typing/tests/data/reveal/lib_polynomial.pyi b/local_packages/numpy/typing/tests/data/reveal/lib_polynomial.pyi new file mode 100644 index 0000000..b7cbade --- /dev/null +++ b/local_packages/numpy/typing/tests/data/reveal/lib_polynomial.pyi @@ -0,0 +1,147 @@ +from collections.abc import Iterator +from typing import Any, NoReturn, assert_type + +import numpy as np +import numpy.typing as npt + +AR_b: npt.NDArray[np.bool] +AR_u4: npt.NDArray[np.uint32] +AR_i8: npt.NDArray[np.int64] +AR_f8: npt.NDArray[np.float64] +AR_c16: npt.NDArray[np.complex128] +AR_O: npt.NDArray[np.object_] + +poly_obj: np.poly1d + +assert_type(poly_obj.variable, str) +assert_type(poly_obj.order, int) +assert_type(poly_obj.o, int) +assert_type(poly_obj.roots, npt.NDArray[Any]) +assert_type(poly_obj.r, npt.NDArray[Any]) +assert_type(poly_obj.coeffs, npt.NDArray[Any]) +assert_type(poly_obj.c, npt.NDArray[Any]) +assert_type(poly_obj.coef, npt.NDArray[Any]) +assert_type(poly_obj.coefficients, npt.NDArray[Any]) +assert_type(poly_obj.__hash__, None) + +assert_type(poly_obj(1), Any) +assert_type(poly_obj([1]), npt.NDArray[Any]) +assert_type(poly_obj(poly_obj), np.poly1d) + +assert_type(len(poly_obj), int) +assert_type(-poly_obj, np.poly1d) +assert_type(+poly_obj, np.poly1d) + +assert_type(poly_obj * 5, np.poly1d) +assert_type(5 * poly_obj, np.poly1d) +assert_type(poly_obj + 5, np.poly1d) +assert_type(5 + poly_obj, np.poly1d) +assert_type(poly_obj - 5, np.poly1d) +assert_type(5 - poly_obj, np.poly1d) +assert_type(poly_obj**1, np.poly1d) +assert_type(poly_obj**1.0, np.poly1d) +assert_type(poly_obj / 5, np.poly1d) +assert_type(5 / poly_obj, np.poly1d) + +assert_type(poly_obj[0], Any) +poly_obj[0] = 5 +assert_type(iter(poly_obj), Iterator[Any]) +assert_type(poly_obj.deriv(), np.poly1d) +assert_type(poly_obj.integ(), np.poly1d) + +assert_type(np.poly(poly_obj), npt.NDArray[np.floating]) +assert_type(np.poly(AR_f8), npt.NDArray[np.floating]) +assert_type(np.poly(AR_c16), npt.NDArray[np.floating]) + +assert_type(np.polyint(poly_obj), np.poly1d) +assert_type(np.polyint(AR_f8), npt.NDArray[np.floating]) +assert_type(np.polyint(AR_f8, k=AR_c16), npt.NDArray[np.complexfloating]) +assert_type(np.polyint(AR_O, m=2), npt.NDArray[np.object_]) + +assert_type(np.polyder(poly_obj), np.poly1d) +assert_type(np.polyder(AR_f8), npt.NDArray[np.floating]) +assert_type(np.polyder(AR_c16), npt.NDArray[np.complexfloating]) +assert_type(np.polyder(AR_O, m=2), npt.NDArray[np.object_]) + +assert_type(np.polyfit(AR_f8, AR_f8, 2), npt.NDArray[np.float64]) +assert_type( + np.polyfit(AR_f8, AR_i8, 1, full=True), + tuple[ + npt.NDArray[np.float64], + npt.NDArray[np.float64], + npt.NDArray[np.int32], + npt.NDArray[np.float64], + npt.NDArray[np.float64], + ], +) +assert_type( + np.polyfit(AR_u4, AR_f8, 1.0, cov="unscaled"), + tuple[ + npt.NDArray[np.float64], + npt.NDArray[np.float64], + ], +) +assert_type(np.polyfit(AR_c16, AR_f8, 2), npt.NDArray[np.complex128]) +assert_type( + np.polyfit(AR_f8, AR_c16, 1, full=True), + tuple[ + npt.NDArray[np.complex128], + npt.NDArray[np.float64], + npt.NDArray[np.int32], + npt.NDArray[np.float64], + npt.NDArray[np.float64], + ], +) +assert_type( + np.polyfit(AR_u4, AR_c16, 1.0, cov=True), + tuple[ + npt.NDArray[np.complex128], + npt.NDArray[np.complex128], + ], +) + +assert_type(np.polyval(AR_b, AR_b), npt.NDArray[np.int64]) +assert_type(np.polyval(AR_u4, AR_b), npt.NDArray[np.unsignedinteger]) +assert_type(np.polyval(AR_i8, AR_i8), npt.NDArray[np.signedinteger]) +assert_type(np.polyval(AR_f8, AR_i8), npt.NDArray[np.floating]) +assert_type(np.polyval(AR_i8, AR_c16), npt.NDArray[np.complexfloating]) +assert_type(np.polyval(AR_O, AR_O), npt.NDArray[np.object_]) + +assert_type(np.polyadd(poly_obj, AR_i8), np.poly1d) +assert_type(np.polyadd(AR_f8, poly_obj), np.poly1d) +assert_type(np.polyadd(AR_b, AR_b), npt.NDArray[np.bool]) +assert_type(np.polyadd(AR_u4, AR_b), npt.NDArray[np.unsignedinteger]) +assert_type(np.polyadd(AR_i8, AR_i8), npt.NDArray[np.signedinteger]) +assert_type(np.polyadd(AR_f8, AR_i8), npt.NDArray[np.floating]) +assert_type(np.polyadd(AR_i8, AR_c16), npt.NDArray[np.complexfloating]) +assert_type(np.polyadd(AR_O, AR_O), npt.NDArray[np.object_]) + +assert_type(np.polysub(poly_obj, AR_i8), np.poly1d) +assert_type(np.polysub(AR_f8, poly_obj), np.poly1d) + +def test_invalid_polysub() -> None: + assert_type(np.polysub(AR_b, AR_b), NoReturn) + +assert_type(np.polysub(AR_u4, AR_b), npt.NDArray[np.unsignedinteger]) +assert_type(np.polysub(AR_i8, AR_i8), npt.NDArray[np.signedinteger]) +assert_type(np.polysub(AR_f8, AR_i8), npt.NDArray[np.floating]) +assert_type(np.polysub(AR_i8, AR_c16), npt.NDArray[np.complexfloating]) +assert_type(np.polysub(AR_O, AR_O), npt.NDArray[np.object_]) + +assert_type(np.polymul(poly_obj, AR_i8), np.poly1d) +assert_type(np.polymul(AR_f8, poly_obj), np.poly1d) +assert_type(np.polymul(AR_b, AR_b), npt.NDArray[np.bool]) +assert_type(np.polymul(AR_u4, AR_b), npt.NDArray[np.unsignedinteger]) +assert_type(np.polymul(AR_i8, AR_i8), npt.NDArray[np.signedinteger]) +assert_type(np.polymul(AR_f8, AR_i8), npt.NDArray[np.floating]) +assert_type(np.polymul(AR_i8, AR_c16), npt.NDArray[np.complexfloating]) +assert_type(np.polymul(AR_O, AR_O), npt.NDArray[np.object_]) + +assert_type(np.polydiv(poly_obj, AR_i8), tuple[np.poly1d, np.poly1d]) +assert_type(np.polydiv(AR_f8, poly_obj), tuple[np.poly1d, np.poly1d]) +assert_type(np.polydiv(AR_b, AR_b), tuple[npt.NDArray[np.floating], npt.NDArray[np.floating]]) +assert_type(np.polydiv(AR_u4, AR_b), tuple[npt.NDArray[np.floating], npt.NDArray[np.floating]]) +assert_type(np.polydiv(AR_i8, AR_i8), tuple[npt.NDArray[np.floating], npt.NDArray[np.floating]]) +assert_type(np.polydiv(AR_f8, AR_i8), tuple[npt.NDArray[np.floating], npt.NDArray[np.floating]]) +assert_type(np.polydiv(AR_i8, AR_c16), tuple[npt.NDArray[np.complexfloating], npt.NDArray[np.complexfloating]]) +assert_type(np.polydiv(AR_O, AR_O), tuple[npt.NDArray[Any], npt.NDArray[Any]]) diff --git a/local_packages/numpy/typing/tests/data/reveal/lib_utils.pyi b/local_packages/numpy/typing/tests/data/reveal/lib_utils.pyi new file mode 100644 index 0000000..c9470e0 --- /dev/null +++ b/local_packages/numpy/typing/tests/data/reveal/lib_utils.pyi @@ -0,0 +1,17 @@ +from io import StringIO +from typing import assert_type + +import numpy as np +import numpy.lib.array_utils as array_utils +import numpy.typing as npt + +AR: npt.NDArray[np.float64] +AR_DICT: dict[str, npt.NDArray[np.float64]] +FILE: StringIO + +def func(a: int) -> bool: ... + +assert_type(array_utils.byte_bounds(AR), tuple[int, int]) +assert_type(array_utils.byte_bounds(np.float64()), tuple[int, int]) + +assert_type(np.info(1, output=FILE), None) diff --git a/local_packages/numpy/typing/tests/data/reveal/lib_version.pyi b/local_packages/numpy/typing/tests/data/reveal/lib_version.pyi new file mode 100644 index 0000000..0373537 --- /dev/null +++ b/local_packages/numpy/typing/tests/data/reveal/lib_version.pyi @@ -0,0 +1,20 @@ +from typing import assert_type + +from numpy.lib import NumpyVersion + +version = NumpyVersion("1.8.0") + +assert_type(version.vstring, str) +assert_type(version.version, str) +assert_type(version.major, int) +assert_type(version.minor, int) +assert_type(version.bugfix, int) +assert_type(version.pre_release, str) +assert_type(version.is_devversion, bool) + +assert_type(version == version, bool) +assert_type(version != version, bool) +assert_type(version < "1.8.0", bool) +assert_type(version <= version, bool) +assert_type(version > version, bool) +assert_type(version >= "1.8.0", bool) diff --git a/local_packages/numpy/typing/tests/data/reveal/linalg.pyi b/local_packages/numpy/typing/tests/data/reveal/linalg.pyi new file mode 100644 index 0000000..6005651 --- /dev/null +++ b/local_packages/numpy/typing/tests/data/reveal/linalg.pyi @@ -0,0 +1,154 @@ +from typing import Any, assert_type + +import numpy as np +import numpy.typing as npt +from numpy.linalg._linalg import ( + EighResult, + EigResult, + QRResult, + SlogdetResult, + SVDResult, +) + +float_list_2d: list[list[float]] +AR_i8: npt.NDArray[np.int64] +AR_f4: npt.NDArray[np.float32] +AR_f8: npt.NDArray[np.float64] +AR_c8: npt.NDArray[np.complex64] +AR_c16: npt.NDArray[np.complex128] +AR_O: npt.NDArray[np.object_] +AR_m: npt.NDArray[np.timedelta64] +AR_S: npt.NDArray[np.str_] +AR_b: npt.NDArray[np.bool] + +assert_type(np.linalg.tensorsolve(AR_i8, AR_i8), npt.NDArray[np.float64]) +assert_type(np.linalg.tensorsolve(AR_i8, AR_f8), npt.NDArray[np.floating]) +assert_type(np.linalg.tensorsolve(AR_c16, AR_f8), npt.NDArray[np.complexfloating]) + +assert_type(np.linalg.solve(AR_i8, AR_i8), npt.NDArray[np.float64]) +assert_type(np.linalg.solve(AR_i8, AR_f8), npt.NDArray[np.floating]) +assert_type(np.linalg.solve(AR_c16, AR_f8), npt.NDArray[np.complexfloating]) + +assert_type(np.linalg.tensorinv(AR_i8), npt.NDArray[np.float64]) +assert_type(np.linalg.tensorinv(AR_f8), npt.NDArray[np.floating]) +assert_type(np.linalg.tensorinv(AR_c16), npt.NDArray[np.complexfloating]) + +assert_type(np.linalg.inv(AR_i8), npt.NDArray[np.float64]) +assert_type(np.linalg.inv(AR_f8), npt.NDArray[np.floating]) +assert_type(np.linalg.inv(AR_c16), npt.NDArray[np.complexfloating]) + +assert_type(np.linalg.matrix_power(AR_i8, -1), npt.NDArray[Any]) +assert_type(np.linalg.matrix_power(AR_f8, 0), npt.NDArray[Any]) +assert_type(np.linalg.matrix_power(AR_c16, 1), npt.NDArray[Any]) +assert_type(np.linalg.matrix_power(AR_O, 2), npt.NDArray[Any]) + +assert_type(np.linalg.cholesky(AR_i8), npt.NDArray[np.float64]) +assert_type(np.linalg.cholesky(AR_f8), npt.NDArray[np.floating]) +assert_type(np.linalg.cholesky(AR_c16), npt.NDArray[np.complexfloating]) + +assert_type(np.linalg.outer(AR_i8, AR_i8), npt.NDArray[np.int64]) +assert_type(np.linalg.outer(AR_f8, AR_f8), npt.NDArray[np.float64]) +assert_type(np.linalg.outer(AR_c16, AR_c16), npt.NDArray[np.complex128]) +assert_type(np.linalg.outer(AR_b, AR_b), npt.NDArray[np.bool]) +assert_type(np.linalg.outer(AR_O, AR_O), npt.NDArray[np.object_]) +assert_type(np.linalg.outer(AR_i8, AR_m), npt.NDArray[np.timedelta64]) + +assert_type(np.linalg.qr(AR_i8), QRResult) +assert_type(np.linalg.qr(AR_f8), QRResult) +assert_type(np.linalg.qr(AR_c16), QRResult) + +assert_type(np.linalg.eigvals(AR_i8), npt.NDArray[np.float64] | npt.NDArray[np.complex128]) +assert_type(np.linalg.eigvals(AR_f8), npt.NDArray[np.floating] | npt.NDArray[np.complexfloating]) +assert_type(np.linalg.eigvals(AR_c16), npt.NDArray[np.complexfloating]) + +assert_type(np.linalg.eigvalsh(AR_i8), npt.NDArray[np.float64]) +assert_type(np.linalg.eigvalsh(AR_f8), npt.NDArray[np.floating]) +assert_type(np.linalg.eigvalsh(AR_c16), npt.NDArray[np.floating]) + +assert_type(np.linalg.eig(AR_i8), EigResult) +assert_type(np.linalg.eig(AR_f8), EigResult) +assert_type(np.linalg.eig(AR_c16), EigResult) + +assert_type(np.linalg.eigh(AR_i8), EighResult) +assert_type(np.linalg.eigh(AR_f8), EighResult) +assert_type(np.linalg.eigh(AR_c16), EighResult) + +assert_type(np.linalg.svd(AR_i8), SVDResult) +assert_type(np.linalg.svd(AR_f8), SVDResult) +assert_type(np.linalg.svd(AR_c16), SVDResult) +assert_type(np.linalg.svd(AR_i8, compute_uv=False), npt.NDArray[np.float64]) +assert_type(np.linalg.svd(AR_i8, True, False), npt.NDArray[np.float64]) +assert_type(np.linalg.svd(AR_f8, compute_uv=False), npt.NDArray[np.floating]) +assert_type(np.linalg.svd(AR_c16, compute_uv=False), npt.NDArray[np.floating]) +assert_type(np.linalg.svd(AR_c16, True, False), npt.NDArray[np.floating]) + +assert_type(np.linalg.svdvals(AR_b), npt.NDArray[np.float64]) +assert_type(np.linalg.svdvals(AR_i8), npt.NDArray[np.float64]) +assert_type(np.linalg.svdvals(AR_f4), npt.NDArray[np.float32]) +assert_type(np.linalg.svdvals(AR_c8), npt.NDArray[np.float32]) +assert_type(np.linalg.svdvals(AR_f8), npt.NDArray[np.float64]) +assert_type(np.linalg.svdvals(AR_c16), npt.NDArray[np.float64]) +assert_type(np.linalg.svdvals([[1, 2], [3, 4]]), npt.NDArray[np.float64]) +assert_type(np.linalg.svdvals([[1.0, 2.0], [3.0, 4.0]]), npt.NDArray[np.float64]) +assert_type(np.linalg.svdvals([[1j, 2j], [3j, 4j]]), npt.NDArray[np.float64]) + +assert_type(np.linalg.cond(AR_i8), Any) +assert_type(np.linalg.cond(AR_f8), Any) +assert_type(np.linalg.cond(AR_c16), Any) + +assert_type(np.linalg.matrix_rank(AR_i8), Any) +assert_type(np.linalg.matrix_rank(AR_f8), Any) +assert_type(np.linalg.matrix_rank(AR_c16), Any) + +assert_type(np.linalg.pinv(AR_i8), npt.NDArray[np.float64]) +assert_type(np.linalg.pinv(AR_f8), npt.NDArray[np.floating]) +assert_type(np.linalg.pinv(AR_c16), npt.NDArray[np.complexfloating]) + +assert_type(np.linalg.slogdet(AR_i8), SlogdetResult) +assert_type(np.linalg.slogdet(AR_f8), SlogdetResult) +assert_type(np.linalg.slogdet(AR_c16), SlogdetResult) + +assert_type(np.linalg.det(AR_i8), Any) +assert_type(np.linalg.det(AR_f8), Any) +assert_type(np.linalg.det(AR_c16), Any) + +assert_type(np.linalg.lstsq(AR_i8, AR_i8), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64], np.int32, npt.NDArray[np.float64]]) +assert_type(np.linalg.lstsq(AR_i8, AR_f8), tuple[npt.NDArray[np.floating], npt.NDArray[np.floating], np.int32, npt.NDArray[np.floating]]) +assert_type(np.linalg.lstsq(AR_f8, AR_c16), tuple[npt.NDArray[np.complexfloating], npt.NDArray[np.floating], np.int32, npt.NDArray[np.floating]]) + +assert_type(np.linalg.norm(AR_i8), np.floating) +assert_type(np.linalg.norm(AR_f8), np.floating) +assert_type(np.linalg.norm(AR_c16), np.floating) +assert_type(np.linalg.norm(AR_S), np.floating) +assert_type(np.linalg.norm(AR_f8, axis=0), Any) + +assert_type(np.linalg.matrix_norm(AR_i8), np.floating) +assert_type(np.linalg.matrix_norm(AR_f8), np.floating) +assert_type(np.linalg.matrix_norm(AR_c16), np.floating) +assert_type(np.linalg.matrix_norm(AR_S), np.floating) + +assert_type(np.linalg.vector_norm(AR_i8), np.floating) +assert_type(np.linalg.vector_norm(AR_f8), np.floating) +assert_type(np.linalg.vector_norm(AR_c16), np.floating) +assert_type(np.linalg.vector_norm(AR_S), np.floating) + +assert_type(np.linalg.tensordot(AR_b, AR_b), npt.NDArray[np.bool]) +assert_type(np.linalg.tensordot(AR_i8, AR_i8), npt.NDArray[np.int64]) +assert_type(np.linalg.tensordot(AR_f8, AR_f8), npt.NDArray[np.float64]) +assert_type(np.linalg.tensordot(AR_c16, AR_c16), npt.NDArray[np.complex128]) +assert_type(np.linalg.tensordot(AR_m, AR_m), npt.NDArray[np.timedelta64]) +assert_type(np.linalg.tensordot(AR_O, AR_O), npt.NDArray[np.object_]) + +assert_type(np.linalg.multi_dot([AR_i8, AR_i8]), Any) +assert_type(np.linalg.multi_dot([AR_i8, AR_f8]), Any) +assert_type(np.linalg.multi_dot([AR_f8, AR_c16]), Any) +assert_type(np.linalg.multi_dot([AR_O, AR_O]), Any) +assert_type(np.linalg.multi_dot([AR_m, AR_m]), Any) + +assert_type(np.linalg.cross(AR_i8, AR_i8), npt.NDArray[np.signedinteger]) +assert_type(np.linalg.cross(AR_f8, AR_f8), npt.NDArray[np.floating]) +assert_type(np.linalg.cross(AR_c16, AR_c16), npt.NDArray[np.complexfloating]) + +assert_type(np.linalg.matmul(AR_i8, AR_i8), npt.NDArray[np.int64]) +assert_type(np.linalg.matmul(AR_f8, AR_f8), npt.NDArray[np.float64]) +assert_type(np.linalg.matmul(AR_c16, AR_c16), npt.NDArray[np.complex128]) diff --git a/local_packages/numpy/typing/tests/data/reveal/ma.pyi b/local_packages/numpy/typing/tests/data/reveal/ma.pyi new file mode 100644 index 0000000..8eef32d --- /dev/null +++ b/local_packages/numpy/typing/tests/data/reveal/ma.pyi @@ -0,0 +1,1098 @@ +from typing import Any, Generic, Literal, NoReturn, TypeAlias, TypeVar, assert_type + +import numpy as np +from numpy import dtype, generic +from numpy._typing import NDArray, _AnyShape + +_ScalarT = TypeVar("_ScalarT", bound=generic) +_ScalarT_co = TypeVar("_ScalarT_co", bound=generic, covariant=True) + +MaskedArray: TypeAlias = np.ma.MaskedArray[_AnyShape, dtype[_ScalarT]] +_NoMaskType: TypeAlias = np.bool[Literal[False]] +_Array1D: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] + +class MaskedArraySubclass(MaskedArray[_ScalarT_co]): ... + +class IntoMaskedArraySubClass(Generic[_ScalarT_co]): + def __array__(self) -> MaskedArraySubclass[_ScalarT_co]: ... + +MaskedArraySubclassC: TypeAlias = MaskedArraySubclass[np.complex128] + +AR_b: NDArray[np.bool] +AR_f4: NDArray[np.float32] +AR_i8: NDArray[np.int64] +AR_u4: NDArray[np.uint32] +AR_dt64: NDArray[np.datetime64] +AR_td64: NDArray[np.timedelta64] +AR_o: NDArray[np.timedelta64] + +AR_LIKE_b: list[bool] +AR_LIKE_u: list[np.uint32] +AR_LIKE_i: list[int] +AR_LIKE_f: list[float] +AR_LIKE_c: list[complex] +AR_LIKE_td64: list[np.timedelta64] +AR_LIKE_dt64: list[np.datetime64] +AR_LIKE_o: list[np.object_] +AR_number: NDArray[np.number] + +MAR_c8: MaskedArray[np.complex64] +MAR_c16: MaskedArray[np.complex128] +MAR_b: MaskedArray[np.bool] +MAR_f4: MaskedArray[np.float32] +MAR_f8: MaskedArray[np.float64] +MAR_i8: MaskedArray[np.int64] +MAR_u4: MaskedArray[np.uint32] +MAR_dt64: MaskedArray[np.datetime64] +MAR_td64: MaskedArray[np.timedelta64] +MAR_o: MaskedArray[np.object_] +MAR_s: MaskedArray[np.str_] +MAR_byte: MaskedArray[np.bytes_] +MAR_V: MaskedArray[np.void] +MAR_floating: MaskedArray[np.floating] +MAR_number: MaskedArray[np.number] + +MAR_subclass: MaskedArraySubclassC +MAR_into_subclass: IntoMaskedArraySubClass[np.float32] + +MAR_1d: np.ma.MaskedArray[tuple[int], np.dtype] +MAR_2d_f4: np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]] +MAR_2d_V: np.ma.MaskedArray[tuple[int, int], np.dtype[np.void]] + +b: np.bool +f4: np.float32 +f: float +i: int + +assert_type(MAR_1d.shape, tuple[int]) + +assert_type(MAR_f4.dtype, np.dtype[np.float32]) + +assert_type(int(MAR_i8), int) +assert_type(float(MAR_f4), float) + +assert_type(np.ma.min(MAR_b), np.bool) +assert_type(np.ma.min(MAR_f4), np.float32) +assert_type(np.ma.min(MAR_b, axis=0), Any) +assert_type(np.ma.min(MAR_f4, axis=0), Any) +assert_type(np.ma.min(MAR_b, keepdims=True), Any) +assert_type(np.ma.min(MAR_f4, keepdims=True), Any) +assert_type(np.ma.min(MAR_f4, out=MAR_subclass), MaskedArraySubclassC) +assert_type(np.ma.min(MAR_f4, 0, MAR_subclass), MaskedArraySubclassC) +assert_type(np.ma.min(MAR_f4, None, MAR_subclass), MaskedArraySubclassC) + +assert_type(MAR_b.min(), np.bool) +assert_type(MAR_f4.min(), np.float32) +assert_type(MAR_b.min(axis=0), Any) +assert_type(MAR_f4.min(axis=0), Any) +assert_type(MAR_b.min(keepdims=True), Any) +assert_type(MAR_f4.min(keepdims=True), Any) +assert_type(MAR_f4.min(out=MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f4.min(0, MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f4.min(None, MAR_subclass), MaskedArraySubclassC) + +assert_type(np.ma.max(MAR_b), np.bool) +assert_type(np.ma.max(MAR_f4), np.float32) +assert_type(np.ma.max(MAR_b, axis=0), Any) +assert_type(np.ma.max(MAR_f4, axis=0), Any) +assert_type(np.ma.max(MAR_b, keepdims=True), Any) +assert_type(np.ma.max(MAR_f4, keepdims=True), Any) +assert_type(np.ma.max(MAR_f4, out=MAR_subclass), MaskedArraySubclassC) +assert_type(np.ma.max(MAR_f4, 0, MAR_subclass), MaskedArraySubclassC) +assert_type(np.ma.max(MAR_f4, None, MAR_subclass), MaskedArraySubclassC) + +assert_type(MAR_b.max(), np.bool) +assert_type(MAR_f4.max(), np.float32) +assert_type(MAR_b.max(axis=0), Any) +assert_type(MAR_f4.max(axis=0), Any) +assert_type(MAR_b.max(keepdims=True), Any) +assert_type(MAR_f4.max(keepdims=True), Any) +assert_type(MAR_f4.max(out=MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f4.max(0, MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f4.max(None, MAR_subclass), MaskedArraySubclassC) + +assert_type(np.ma.ptp(MAR_b), np.bool) +assert_type(np.ma.ptp(MAR_f4), np.float32) +assert_type(np.ma.ptp(MAR_b, axis=0), Any) +assert_type(np.ma.ptp(MAR_f4, axis=0), Any) +assert_type(np.ma.ptp(MAR_b, keepdims=True), Any) +assert_type(np.ma.ptp(MAR_f4, keepdims=True), Any) +assert_type(np.ma.ptp(MAR_f4, out=MAR_subclass), MaskedArraySubclassC) +assert_type(np.ma.ptp(MAR_f4, 0, MAR_subclass), MaskedArraySubclassC) +assert_type(np.ma.ptp(MAR_f4, None, MAR_subclass), MaskedArraySubclassC) + +assert_type(MAR_b.ptp(), np.bool) +assert_type(MAR_f4.ptp(), np.float32) +assert_type(MAR_b.ptp(axis=0), Any) +assert_type(MAR_f4.ptp(axis=0), Any) +assert_type(MAR_b.ptp(keepdims=True), Any) +assert_type(MAR_f4.ptp(keepdims=True), Any) +assert_type(MAR_f4.ptp(out=MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f4.ptp(0, MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f4.ptp(None, MAR_subclass), MaskedArraySubclassC) + +assert_type(MAR_b.argmin(), np.intp) +assert_type(MAR_f4.argmin(), np.intp) +assert_type(MAR_f4.argmax(fill_value=6.28318, keepdims=False), np.intp) +assert_type(MAR_b.argmin(axis=0), Any) +assert_type(MAR_f4.argmin(axis=0), Any) +assert_type(MAR_b.argmin(keepdims=True), Any) +assert_type(MAR_f4.argmin(out=MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f4.argmin(None, None, out=MAR_subclass), MaskedArraySubclassC) + +assert_type(np.ma.argmin(MAR_b), np.intp) +assert_type(np.ma.argmin(MAR_f4), np.intp) +assert_type(np.ma.argmin(MAR_f4, fill_value=6.28318, keepdims=False), np.intp) +assert_type(np.ma.argmin(MAR_b, axis=0), Any) +assert_type(np.ma.argmin(MAR_f4, axis=0), Any) +assert_type(np.ma.argmin(MAR_b, keepdims=True), Any) +assert_type(np.ma.argmin(MAR_f4, out=MAR_subclass), MaskedArraySubclassC) +assert_type(np.ma.argmin(MAR_f4, None, None, out=MAR_subclass), MaskedArraySubclassC) + +assert_type(MAR_b.argmax(), np.intp) +assert_type(MAR_f4.argmax(), np.intp) +assert_type(MAR_f4.argmax(fill_value=6.28318, keepdims=False), np.intp) +assert_type(MAR_b.argmax(axis=0), Any) +assert_type(MAR_f4.argmax(axis=0), Any) +assert_type(MAR_b.argmax(keepdims=True), Any) +assert_type(MAR_f4.argmax(out=MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f4.argmax(None, None, out=MAR_subclass), MaskedArraySubclassC) + +assert_type(np.ma.argmax(MAR_b), np.intp) +assert_type(np.ma.argmax(MAR_f4), np.intp) +assert_type(np.ma.argmax(MAR_f4, fill_value=6.28318, keepdims=False), np.intp) +assert_type(np.ma.argmax(MAR_b, axis=0), Any) +assert_type(np.ma.argmax(MAR_f4, axis=0), Any) +assert_type(np.ma.argmax(MAR_b, keepdims=True), Any) +assert_type(np.ma.argmax(MAR_f4, out=MAR_subclass), MaskedArraySubclassC) +assert_type(np.ma.argmax(MAR_f4, None, None, out=MAR_subclass), MaskedArraySubclassC) + +assert_type(MAR_b.all(), np.bool) +assert_type(MAR_f4.all(), np.bool) +assert_type(MAR_f4.all(keepdims=False), np.bool) +assert_type(MAR_b.all(axis=0), np.bool | MaskedArray[np.bool]) +assert_type(MAR_b.all(axis=0, keepdims=True), MaskedArray[np.bool]) +assert_type(MAR_b.all(0, None, True), MaskedArray[np.bool]) +assert_type(MAR_f4.all(axis=0), np.bool | MaskedArray[np.bool]) +assert_type(MAR_b.all(keepdims=True), MaskedArray[np.bool]) +assert_type(MAR_f4.all(out=MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f4.all(None, out=MAR_subclass), MaskedArraySubclassC) + +assert_type(MAR_b.any(), np.bool) +assert_type(MAR_f4.any(), np.bool) +assert_type(MAR_f4.any(keepdims=False), np.bool) +assert_type(MAR_b.any(axis=0), np.bool | MaskedArray[np.bool]) +assert_type(MAR_b.any(axis=0, keepdims=True), MaskedArray[np.bool]) +assert_type(MAR_b.any(0, None, True), MaskedArray[np.bool]) +assert_type(MAR_f4.any(axis=0), np.bool | MaskedArray[np.bool]) +assert_type(MAR_b.any(keepdims=True), MaskedArray[np.bool]) +assert_type(MAR_f4.any(out=MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f4.any(None, out=MAR_subclass), MaskedArraySubclassC) + +assert_type(MAR_f4.sort(), None) +assert_type(MAR_f4.sort(axis=0, kind="quicksort", order="K", endwith=False, fill_value=42., stable=False), None) + +assert_type(np.ma.sort(MAR_f4), MaskedArray[np.float32]) +assert_type(np.ma.sort(MAR_subclass), MaskedArraySubclassC) +assert_type(np.ma.sort([[0, 1], [2, 3]]), NDArray[Any]) +assert_type(np.ma.sort(AR_f4), NDArray[np.float32]) + +assert_type(MAR_f8.take(0), np.float64) +assert_type(MAR_1d.take(0), Any) +assert_type(MAR_f8.take([0]), MaskedArray[np.float64]) +assert_type(MAR_f8.take(0, out=MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f8.take([0], out=MAR_subclass), MaskedArraySubclassC) + +assert_type(np.ma.take(f, 0), Any) +assert_type(np.ma.take(f4, 0), np.float32) +assert_type(np.ma.take(MAR_f8, 0), np.float64) +assert_type(np.ma.take(AR_f4, 0), np.float32) +assert_type(np.ma.take(MAR_1d, 0), Any) +assert_type(np.ma.take(MAR_f8, [0]), MaskedArray[np.float64]) +assert_type(np.ma.take(AR_f4, [0]), MaskedArray[np.float32]) +assert_type(np.ma.take(MAR_f8, 0, out=MAR_subclass), MaskedArraySubclassC) +assert_type(np.ma.take(MAR_f8, [0], out=MAR_subclass), MaskedArraySubclassC) +assert_type(np.ma.take([1], [0]), MaskedArray[Any]) +assert_type(np.ma.take(np.eye(2), 1, axis=0), MaskedArray[np.float64]) + +assert_type(MAR_f4.partition(1), None) +assert_type(MAR_V.partition(1, axis=0, kind="introselect", order="K"), None) + +assert_type(MAR_f4.argpartition(1), MaskedArray[np.intp]) +assert_type(MAR_1d.argpartition(1, axis=0, kind="introselect", order="K"), MaskedArray[np.intp]) + +assert_type(np.ma.ndim(f4), int) +assert_type(np.ma.ndim(MAR_b), int) +assert_type(np.ma.ndim(AR_f4), int) + +assert_type(np.ma.size(b), int) +assert_type(np.ma.size(MAR_f4, axis=0), int) +assert_type(np.ma.size(AR_f4), int) + +assert_type(np.ma.is_masked(MAR_f4), bool) + +assert_type(MAR_f4.ids(), tuple[int, int]) + +assert_type(MAR_f4.iscontiguous(), bool) + +assert_type(MAR_f4 >= 3, MaskedArray[np.bool]) +assert_type(MAR_i8 >= AR_td64, MaskedArray[np.bool]) +assert_type(MAR_b >= AR_td64, MaskedArray[np.bool]) +assert_type(MAR_td64 >= AR_td64, MaskedArray[np.bool]) +assert_type(MAR_dt64 >= AR_dt64, MaskedArray[np.bool]) +assert_type(MAR_o >= AR_o, MaskedArray[np.bool]) +assert_type(MAR_1d >= 0, MaskedArray[np.bool]) +assert_type(MAR_s >= MAR_s, MaskedArray[np.bool]) +assert_type(MAR_byte >= MAR_byte, MaskedArray[np.bool]) + +assert_type(MAR_f4 > 3, MaskedArray[np.bool]) +assert_type(MAR_i8 > AR_td64, MaskedArray[np.bool]) +assert_type(MAR_b > AR_td64, MaskedArray[np.bool]) +assert_type(MAR_td64 > AR_td64, MaskedArray[np.bool]) +assert_type(MAR_dt64 > AR_dt64, MaskedArray[np.bool]) +assert_type(MAR_o > AR_o, MaskedArray[np.bool]) +assert_type(MAR_1d > 0, MaskedArray[np.bool]) +assert_type(MAR_s > MAR_s, MaskedArray[np.bool]) +assert_type(MAR_byte > MAR_byte, MaskedArray[np.bool]) + +assert_type(MAR_f4 <= 3, MaskedArray[np.bool]) +assert_type(MAR_i8 <= AR_td64, MaskedArray[np.bool]) +assert_type(MAR_b <= AR_td64, MaskedArray[np.bool]) +assert_type(MAR_td64 <= AR_td64, MaskedArray[np.bool]) +assert_type(MAR_dt64 <= AR_dt64, MaskedArray[np.bool]) +assert_type(MAR_o <= AR_o, MaskedArray[np.bool]) +assert_type(MAR_1d <= 0, MaskedArray[np.bool]) +assert_type(MAR_s <= MAR_s, MaskedArray[np.bool]) +assert_type(MAR_byte <= MAR_byte, MaskedArray[np.bool]) + +assert_type(MAR_f4 < 3, MaskedArray[np.bool]) +assert_type(MAR_i8 < AR_td64, MaskedArray[np.bool]) +assert_type(MAR_b < AR_td64, MaskedArray[np.bool]) +assert_type(MAR_td64 < AR_td64, MaskedArray[np.bool]) +assert_type(MAR_dt64 < AR_dt64, MaskedArray[np.bool]) +assert_type(MAR_o < AR_o, MaskedArray[np.bool]) +assert_type(MAR_1d < 0, MaskedArray[np.bool]) +assert_type(MAR_s < MAR_s, MaskedArray[np.bool]) +assert_type(MAR_byte < MAR_byte, MaskedArray[np.bool]) + +assert_type(MAR_f4 <= 3, MaskedArray[np.bool]) +assert_type(MAR_i8 <= AR_td64, MaskedArray[np.bool]) +assert_type(MAR_b <= AR_td64, MaskedArray[np.bool]) +assert_type(MAR_td64 <= AR_td64, MaskedArray[np.bool]) +assert_type(MAR_dt64 <= AR_dt64, MaskedArray[np.bool]) +assert_type(MAR_o <= AR_o, MaskedArray[np.bool]) +assert_type(MAR_1d <= 0, MaskedArray[np.bool]) +assert_type(MAR_s <= MAR_s, MaskedArray[np.bool]) +assert_type(MAR_byte <= MAR_byte, MaskedArray[np.bool]) + +assert_type(MAR_byte.count(), int) +assert_type(MAR_f4.count(axis=None), int) +assert_type(MAR_f4.count(axis=0), NDArray[np.int_]) +assert_type(MAR_b.count(axis=(0, 1)), NDArray[np.int_]) +assert_type(MAR_o.count(keepdims=True), NDArray[np.int_]) +assert_type(MAR_o.count(axis=None, keepdims=True), NDArray[np.int_]) +assert_type(MAR_o.count(None, True), NDArray[np.int_]) + +assert_type(np.ma.count(MAR_byte), int) +assert_type(np.ma.count(MAR_byte, axis=None), int) +assert_type(np.ma.count(MAR_f4, axis=0), NDArray[np.int_]) +assert_type(np.ma.count(MAR_b, axis=(0, 1)), NDArray[np.int_]) +assert_type(np.ma.count(MAR_o, keepdims=True), NDArray[np.int_]) +assert_type(np.ma.count(MAR_o, axis=None, keepdims=True), NDArray[np.int_]) +assert_type(np.ma.count(MAR_o, None, True), NDArray[np.int_]) + +assert_type(MAR_f4.compressed(), np.ndarray[tuple[int], np.dtype[np.float32]]) + +assert_type(MAR_f4.compress([True, False]), np.ma.MaskedArray[tuple[int], np.dtype[np.float32]]) +assert_type(MAR_f4.compress([True, False], axis=0), MaskedArray[np.float32]) +assert_type(MAR_f4.compress([True, False], axis=0, out=MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f4.compress([True, False], 0, MAR_subclass), MaskedArraySubclassC) + +assert_type(np.ma.compressed(MAR_i8), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(np.ma.compressed([[1, 2, 3]]), np.ndarray[tuple[int], np.dtype]) + +assert_type(MAR_f4.put([0, 4, 8], [10, 20, 30]), None) +assert_type(MAR_f4.put(4, 999), None) +assert_type(MAR_f4.put(4, 999, mode="clip"), None) + +assert_type(MAR_c8.__array_wrap__(AR_b), MaskedArray[np.bool]) + +assert_type(np.ma.put(MAR_f4, [0, 4, 8], [10, 20, 30]), None) +assert_type(np.ma.put(MAR_f4, 4, 999), None) +assert_type(np.ma.put(MAR_f4, 4, 999, mode="clip"), None) + +assert_type(np.ma.putmask(MAR_f4, [True, False], [0, 1]), None) +assert_type(np.ma.putmask(MAR_f4, np.False_, [0, 1]), None) + +assert_type(MAR_f4.filled(float("nan")), NDArray[np.float32]) +assert_type(MAR_i8.filled(), NDArray[np.int64]) +assert_type(MAR_1d.filled(), np.ndarray[tuple[int], np.dtype]) + +assert_type(np.ma.filled(MAR_f4, float("nan")), NDArray[np.float32]) +assert_type(np.ma.filled([[1, 2, 3]]), NDArray[Any]) +# PyRight detects this one correctly, but mypy doesn't. +# https://github.com/numpy/numpy/pull/28742#discussion_r2048968375 +assert_type(np.ma.filled(MAR_1d), np.ndarray[tuple[int], np.dtype]) # type: ignore[assert-type] + +assert_type(MAR_b.repeat(3), np.ma.MaskedArray[tuple[int], np.dtype[np.bool]]) +assert_type(MAR_2d_f4.repeat(MAR_i8), np.ma.MaskedArray[tuple[int], np.dtype[np.float32]]) +assert_type(MAR_2d_f4.repeat(MAR_i8, axis=None), np.ma.MaskedArray[tuple[int], np.dtype[np.float32]]) +assert_type(MAR_2d_f4.repeat(MAR_i8, axis=0), MaskedArray[np.float32]) + +assert_type(np.ma.allequal(AR_f4, MAR_f4), bool) +assert_type(np.ma.allequal(AR_f4, MAR_f4, fill_value=False), bool) + +assert_type(np.ma.allclose(AR_f4, MAR_f4), bool) +assert_type(np.ma.allclose(AR_f4, MAR_f4, masked_equal=False), bool) +assert_type(np.ma.allclose(AR_f4, MAR_f4, rtol=.4, atol=.3), bool) + +assert_type(MAR_2d_f4.ravel(), np.ma.MaskedArray[tuple[int], np.dtype[np.float32]]) +assert_type(MAR_1d.ravel(order="A"), np.ma.MaskedArray[tuple[int], np.dtype[Any]]) + +assert_type(np.ma.getmask(MAR_f4), NDArray[np.bool] | _NoMaskType) +# PyRight detects this one correctly, but mypy doesn't: +# `Revealed type is "Union[numpy.ndarray[Any, Any], numpy.bool[Any]]"` +assert_type(np.ma.getmask(MAR_1d), np.ndarray[tuple[int], np.dtype[np.bool]] | np.bool) # type: ignore[assert-type] +assert_type(np.ma.getmask(MAR_2d_f4), np.ndarray[tuple[int, int], np.dtype[np.bool]] | _NoMaskType) +assert_type(np.ma.getmask([1, 2]), NDArray[np.bool] | _NoMaskType) +assert_type(np.ma.getmask(np.int64(1)), _NoMaskType) + +assert_type(np.ma.is_mask(MAR_1d), bool) +assert_type(np.ma.is_mask(AR_b), bool) + +def func(x: object) -> None: + if np.ma.is_mask(x): + assert_type(x, NDArray[np.bool]) + else: + assert_type(x, object) + +assert_type(MAR_2d_f4.mT, np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) + +assert_type(MAR_c16.real, MaskedArray[np.float64]) +assert_type(MAR_c16.imag, MaskedArray[np.float64]) + +assert_type(MAR_2d_f4.baseclass, type[NDArray[Any]]) + +assert_type(MAR_b.swapaxes(0, 1), MaskedArray[np.bool]) +assert_type(MAR_2d_f4.swapaxes(1, 0), np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) + +assert_type(MAR_2d_f4[AR_i8], MaskedArray[np.float32]) +assert_type(MAR_2d_f4[[1, 2, 3]], MaskedArray[np.float32]) +assert_type(MAR_2d_f4[1:], MaskedArray[np.float32]) +assert_type(MAR_2d_f4[:], MaskedArray[np.float32]) +assert_type(MAR_2d_f4[0, 0], Any) +assert_type(MAR_2d_f4[:, np.newaxis], MaskedArray[np.float32]) +assert_type(MAR_2d_f4[..., -1], MaskedArray[np.float32]) +assert_type(MAR_2d_V["field_0"], np.ma.MaskedArray[tuple[int, int], np.dtype]) +assert_type(MAR_2d_V[["field_0", "field_1"]], np.ma.MaskedArray[tuple[int, int], np.dtype[np.void]]) + +assert_type(np.ma.nomask, np.bool[Literal[False]]) +assert_type(np.ma.MaskType, type[np.bool]) + +assert_type(MAR_1d.__setmask__([True, False]), None) +assert_type(MAR_1d.__setmask__(np.False_), None) + +assert_type(MAR_2d_f4.harden_mask(), np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) +assert_type(MAR_i8.harden_mask(), MaskedArray[np.int64]) +assert_type(MAR_2d_f4.soften_mask(), np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) +assert_type(MAR_i8.soften_mask(), MaskedArray[np.int64]) +assert_type(MAR_f4.unshare_mask(), MaskedArray[np.float32]) +assert_type(MAR_b.shrink_mask(), MaskedArray[np.bool_]) + +assert_type(MAR_i8.hardmask, bool) +assert_type(MAR_i8.sharedmask, bool) + +assert_type(MAR_i8.recordmask, np.ma.MaskType | NDArray[np.ma.MaskType]) +assert_type(MAR_2d_f4.recordmask, np.ma.MaskType | np.ndarray[tuple[int, int], np.dtype[np.ma.MaskType]]) + +assert_type(MAR_2d_f4.anom(), np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) +assert_type(MAR_2d_f4.anom(axis=0, dtype=np.float16), np.ma.MaskedArray[tuple[int, int], np.dtype]) +assert_type(MAR_2d_f4.anom(0, np.float16), np.ma.MaskedArray[tuple[int, int], np.dtype]) +assert_type(MAR_2d_f4.anom(0, "float16"), np.ma.MaskedArray[tuple[int, int], np.dtype]) + +assert_type(MAR_i8.fill_value, np.int64) + +assert_type(MAR_b.transpose(), MaskedArray[np.bool]) +assert_type(MAR_2d_f4.transpose(), np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) +assert_type(MAR_2d_f4.transpose(1, 0), np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) +assert_type(MAR_2d_f4.transpose((1, 0)), np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) +assert_type(MAR_b.T, MaskedArray[np.bool]) +assert_type(MAR_2d_f4.T, np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) + +assert_type(MAR_2d_f4.dot(1), MaskedArray[Any]) +assert_type(MAR_2d_f4.dot([1]), MaskedArray[Any]) +assert_type(MAR_2d_f4.dot(1, out=MAR_subclass), MaskedArraySubclassC) + +assert_type(MAR_2d_f4.nonzero(), tuple[_Array1D[np.intp], ...]) +assert_type(MAR_2d_f4.nonzero()[0], _Array1D[np.intp]) + +assert_type(MAR_f8.trace(), Any) +assert_type(MAR_f8.trace(out=MAR_subclass), MaskedArraySubclassC) +assert_type(MAR_f8.trace(out=MAR_subclass, dtype=None), MaskedArraySubclassC) + +assert_type(MAR_f8.round(), MaskedArray[np.float64]) +assert_type(MAR_f8.round(out=MAR_subclass), MaskedArraySubclassC) + +assert_type(MAR_i8.reshape(None), MaskedArray[np.int64]) +assert_type(MAR_f8.reshape(-1), np.ma.MaskedArray[tuple[int], np.dtype[np.float64]]) +assert_type(MAR_c8.reshape(2, 3, 4, 5), np.ma.MaskedArray[tuple[int, int, int, int], np.dtype[np.complex64]]) +assert_type(MAR_td64.reshape(()), np.ma.MaskedArray[tuple[()], np.dtype[np.timedelta64]]) +assert_type(MAR_s.reshape([]), np.ma.MaskedArray[tuple[()], np.dtype[np.str_]]) +assert_type(MAR_V.reshape((480, 720, 4)), np.ma.MaskedArray[tuple[int, int, int], np.dtype[np.void]]) + +assert_type(MAR_f8.cumprod(), MaskedArray[Any]) +assert_type(MAR_f8.cumprod(out=MAR_subclass), MaskedArraySubclassC) + +assert_type(MAR_f8.cumsum(), MaskedArray[Any]) +assert_type(MAR_f8.cumsum(out=MAR_subclass), MaskedArraySubclassC) + +assert_type(MAR_f8.view(), MaskedArray[np.float64]) +assert_type(MAR_f8.view(dtype=np.float32), MaskedArray[np.float32]) +assert_type(MAR_f8.view(dtype=np.dtype(np.float32)), MaskedArray[np.float32]) +assert_type(MAR_f8.view(dtype=np.float32, fill_value=0), MaskedArray[np.float32]) +assert_type(MAR_f8.view(type=np.ndarray), np.ndarray[Any, Any]) +assert_type(MAR_f8.view(None, np.ndarray), np.ndarray[Any, Any]) +assert_type(MAR_f8.view(dtype=np.ndarray), np.ndarray[Any, Any]) +assert_type(MAR_f8.view(dtype="float32"), MaskedArray[Any]) +assert_type(MAR_f8.view(dtype="float32", type=np.ndarray), np.ndarray[Any, Any]) +assert_type(MAR_2d_f4.view(dtype=np.float16), np.ma.MaskedArray[tuple[int, int], np.dtype[np.float16]]) +assert_type(MAR_2d_f4.view(dtype=np.dtype(np.float16)), np.ma.MaskedArray[tuple[int, int], np.dtype[np.float16]]) + +assert_type(MAR_f8.__deepcopy__(), MaskedArray[np.float64]) + +assert_type(MAR_f8.argsort(), MaskedArray[np.intp]) +assert_type(MAR_f8.argsort(axis=0, kind="heap", order=("x", "y")), MaskedArray[np.intp]) +assert_type(MAR_f8.argsort(endwith=True, fill_value=1.5, stable=False), MaskedArray[np.intp]) + +assert_type(MAR_2d_f4.flat, np.ma.core.MaskedIterator[tuple[int, int], np.dtype[np.float32]]) +assert_type(MAR_2d_f4.flat.ma, np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) +assert_type(MAR_2d_f4.flat[AR_i8], MaskedArray[np.float32]) +assert_type(MAR_2d_f4.flat[[1, 2, 3]], MaskedArray[np.float32]) +assert_type(MAR_2d_f4.flat[1:], MaskedArray[np.float32]) +assert_type(MAR_2d_f4.flat[:], MaskedArray[np.float32]) +assert_type(MAR_2d_f4.flat[0, 0], Any) +assert_type(MAR_2d_f4.flat[:, np.newaxis], MaskedArray[np.float32]) +assert_type(MAR_2d_f4.flat[..., -1], MaskedArray[np.float32]) + +def invalid_resize() -> None: + assert_type(MAR_f8.resize((1, 1)), NoReturn) # type: ignore[arg-type] + +assert_type(np.ma.MaskedArray(AR_f4), MaskedArray[np.float32]) +assert_type(np.ma.MaskedArray(np.array([1, 2, 3]), [True, True, False], np.float16), MaskedArray[np.float16]) +assert_type(np.ma.MaskedArray(np.array([1, 2, 3]), dtype=np.float16), MaskedArray[np.float16]) +assert_type(np.ma.MaskedArray(np.array([1, 2, 3]), copy=True), MaskedArray[Any]) +# TODO: This one could be made more precise, the return type could be `MaskedArraySubclassC` +assert_type(np.ma.MaskedArray(MAR_subclass), MaskedArray[np.complex128]) +# TODO: This one could be made more precise, the return type could be `MaskedArraySubclass[np.float32]` +assert_type(np.ma.MaskedArray(MAR_into_subclass), MaskedArray[np.float32]) + +# Masked Array addition + +assert_type(MAR_b + AR_LIKE_u, MaskedArray[np.uint32]) +assert_type(MAR_b + AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_b + AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_b + AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_b + AR_LIKE_td64, MaskedArray[np.timedelta64]) +assert_type(MAR_b + AR_LIKE_o, Any) + +assert_type(AR_LIKE_u + MAR_b, MaskedArray[np.uint32]) +assert_type(AR_LIKE_i + MAR_b, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f + MAR_b, MaskedArray[np.floating]) +assert_type(AR_LIKE_c + MAR_b, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_td64 + MAR_b, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_dt64 + MAR_b, MaskedArray[np.datetime64]) +assert_type(AR_LIKE_o + MAR_b, Any) + +assert_type(MAR_u4 + AR_LIKE_b, MaskedArray[np.uint32]) +assert_type(MAR_u4 + AR_LIKE_u, MaskedArray[np.unsignedinteger]) +assert_type(MAR_u4 + AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_u4 + AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_u4 + AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_u4 + AR_LIKE_td64, MaskedArray[np.timedelta64]) +assert_type(MAR_u4 + AR_LIKE_o, Any) + +assert_type(AR_LIKE_b + MAR_u4, MaskedArray[np.uint32]) +assert_type(AR_LIKE_u + MAR_u4, MaskedArray[np.unsignedinteger]) +assert_type(AR_LIKE_i + MAR_u4, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f + MAR_u4, MaskedArray[np.floating]) +assert_type(AR_LIKE_c + MAR_u4, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_td64 + MAR_u4, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_dt64 + MAR_u4, MaskedArray[np.datetime64]) +assert_type(AR_LIKE_o + MAR_u4, Any) + +assert_type(MAR_i8 + AR_LIKE_b, MaskedArray[np.int64]) +assert_type(MAR_i8 + AR_LIKE_u, MaskedArray[np.signedinteger]) +assert_type(MAR_i8 + AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_i8 + AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_i8 + AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_i8 + AR_LIKE_td64, MaskedArray[np.timedelta64]) +assert_type(MAR_i8 + AR_LIKE_o, Any) + +assert_type(AR_LIKE_b + MAR_i8, MaskedArray[np.int64]) +assert_type(AR_LIKE_u + MAR_i8, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_i + MAR_i8, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f + MAR_i8, MaskedArray[np.floating]) +assert_type(AR_LIKE_c + MAR_i8, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_td64 + MAR_i8, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_dt64 + MAR_i8, MaskedArray[np.datetime64]) +assert_type(AR_LIKE_o + MAR_i8, Any) + +assert_type(MAR_f8 + AR_LIKE_b, MaskedArray[np.float64]) +assert_type(MAR_f8 + AR_LIKE_u, MaskedArray[np.float64]) +assert_type(MAR_f8 + AR_LIKE_i, MaskedArray[np.float64]) +assert_type(MAR_f8 + AR_LIKE_f, MaskedArray[np.float64]) +assert_type(MAR_f8 + AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_f8 + AR_LIKE_o, Any) + +assert_type(AR_LIKE_b + MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_u + MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_i + MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_f + MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_c + MAR_f8, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_o + MAR_f8, Any) + +assert_type(MAR_c16 + AR_LIKE_b, MaskedArray[np.complex128]) +assert_type(MAR_c16 + AR_LIKE_u, MaskedArray[np.complex128]) +assert_type(MAR_c16 + AR_LIKE_i, MaskedArray[np.complex128]) +assert_type(MAR_c16 + AR_LIKE_f, MaskedArray[np.complex128]) +assert_type(MAR_c16 + AR_LIKE_c, MaskedArray[np.complex128]) +assert_type(MAR_c16 + AR_LIKE_o, Any) + +assert_type(AR_LIKE_b + MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_u + MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_i + MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_f + MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_c + MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_o + MAR_c16, Any) + +assert_type(MAR_td64 + AR_LIKE_b, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 + AR_LIKE_u, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 + AR_LIKE_i, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 + AR_LIKE_td64, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 + AR_LIKE_o, Any) + +assert_type(AR_LIKE_b + MAR_td64, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_u + MAR_td64, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_i + MAR_td64, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_td64 + MAR_td64, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_dt64 + MAR_td64, MaskedArray[np.datetime64]) +assert_type(AR_LIKE_o + MAR_td64, Any) + +assert_type(MAR_dt64 + AR_LIKE_b, MaskedArray[np.datetime64]) +assert_type(MAR_dt64 + AR_LIKE_u, MaskedArray[np.datetime64]) +assert_type(MAR_dt64 + AR_LIKE_i, MaskedArray[np.datetime64]) +assert_type(MAR_dt64 + AR_LIKE_td64, MaskedArray[np.datetime64]) +assert_type(MAR_dt64 + AR_LIKE_o, Any) + +assert_type(AR_LIKE_o + MAR_dt64, Any) + +assert_type(MAR_o + AR_LIKE_b, Any) +assert_type(MAR_o + AR_LIKE_u, Any) +assert_type(MAR_o + AR_LIKE_i, Any) +assert_type(MAR_o + AR_LIKE_f, Any) +assert_type(MAR_o + AR_LIKE_c, Any) +assert_type(MAR_o + AR_LIKE_td64, Any) +assert_type(MAR_o + AR_LIKE_dt64, Any) +assert_type(MAR_o + AR_LIKE_o, Any) + +assert_type(AR_LIKE_b + MAR_o, Any) +assert_type(AR_LIKE_u + MAR_o, Any) +assert_type(AR_LIKE_i + MAR_o, Any) +assert_type(AR_LIKE_f + MAR_o, Any) +assert_type(AR_LIKE_c + MAR_o, Any) +assert_type(AR_LIKE_td64 + MAR_o, Any) +assert_type(AR_LIKE_dt64 + MAR_o, Any) +assert_type(AR_LIKE_o + MAR_o, Any) + +# Masked Array subtraction +# Keep in sync with numpy/typing/tests/data/reveal/arithmetic.pyi + +assert_type(MAR_number - AR_number, MaskedArray[np.number]) + +assert_type(MAR_b - AR_LIKE_u, MaskedArray[np.uint32]) +assert_type(MAR_b - AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_b - AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_b - AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_b - AR_LIKE_td64, MaskedArray[np.timedelta64]) +assert_type(MAR_b - AR_LIKE_o, Any) + +assert_type(AR_LIKE_u - MAR_b, MaskedArray[np.uint32]) +assert_type(AR_LIKE_i - MAR_b, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f - MAR_b, MaskedArray[np.floating]) +assert_type(AR_LIKE_c - MAR_b, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_td64 - MAR_b, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_dt64 - MAR_b, MaskedArray[np.datetime64]) +assert_type(AR_LIKE_o - MAR_b, Any) + +assert_type(MAR_u4 - AR_LIKE_b, MaskedArray[np.uint32]) +assert_type(MAR_u4 - AR_LIKE_u, MaskedArray[np.unsignedinteger]) +assert_type(MAR_u4 - AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_u4 - AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_u4 - AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_u4 - AR_LIKE_td64, MaskedArray[np.timedelta64]) +assert_type(MAR_u4 - AR_LIKE_o, Any) + +assert_type(AR_LIKE_b - MAR_u4, MaskedArray[np.uint32]) +assert_type(AR_LIKE_u - MAR_u4, MaskedArray[np.unsignedinteger]) +assert_type(AR_LIKE_i - MAR_u4, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f - MAR_u4, MaskedArray[np.floating]) +assert_type(AR_LIKE_c - MAR_u4, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_td64 - MAR_u4, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_dt64 - MAR_u4, MaskedArray[np.datetime64]) +assert_type(AR_LIKE_o - MAR_u4, Any) + +assert_type(MAR_i8 - AR_LIKE_b, MaskedArray[np.int64]) +assert_type(MAR_i8 - AR_LIKE_u, MaskedArray[np.signedinteger]) +assert_type(MAR_i8 - AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_i8 - AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_i8 - AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_i8 - AR_LIKE_td64, MaskedArray[np.timedelta64]) +assert_type(MAR_i8 - AR_LIKE_o, Any) + +assert_type(AR_LIKE_b - MAR_i8, MaskedArray[np.int64]) +assert_type(AR_LIKE_u - MAR_i8, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_i - MAR_i8, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f - MAR_i8, MaskedArray[np.floating]) +assert_type(AR_LIKE_c - MAR_i8, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_td64 - MAR_i8, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_dt64 - MAR_i8, MaskedArray[np.datetime64]) +assert_type(AR_LIKE_o - MAR_i8, Any) + +assert_type(MAR_f8 - AR_LIKE_b, MaskedArray[np.float64]) +assert_type(MAR_f8 - AR_LIKE_u, MaskedArray[np.float64]) +assert_type(MAR_f8 - AR_LIKE_i, MaskedArray[np.float64]) +assert_type(MAR_f8 - AR_LIKE_f, MaskedArray[np.float64]) +assert_type(MAR_f8 - AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_f8 - AR_LIKE_o, Any) + +assert_type(AR_LIKE_b - MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_u - MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_i - MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_f - MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_c - MAR_f8, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_o - MAR_f8, Any) + +assert_type(MAR_c16 - AR_LIKE_b, MaskedArray[np.complex128]) +assert_type(MAR_c16 - AR_LIKE_u, MaskedArray[np.complex128]) +assert_type(MAR_c16 - AR_LIKE_i, MaskedArray[np.complex128]) +assert_type(MAR_c16 - AR_LIKE_f, MaskedArray[np.complex128]) +assert_type(MAR_c16 - AR_LIKE_c, MaskedArray[np.complex128]) +assert_type(MAR_c16 - AR_LIKE_o, Any) + +assert_type(AR_LIKE_b - MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_u - MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_i - MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_f - MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_c - MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_o - MAR_c16, Any) + +assert_type(MAR_td64 - AR_LIKE_b, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 - AR_LIKE_u, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 - AR_LIKE_i, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 - AR_LIKE_td64, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 - AR_LIKE_o, Any) + +assert_type(AR_LIKE_b - MAR_td64, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_u - MAR_td64, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_i - MAR_td64, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_td64 - MAR_td64, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_dt64 - MAR_td64, MaskedArray[np.datetime64]) +assert_type(AR_LIKE_o - MAR_td64, Any) + +assert_type(MAR_dt64 - AR_LIKE_b, MaskedArray[np.datetime64]) +assert_type(MAR_dt64 - AR_LIKE_u, MaskedArray[np.datetime64]) +assert_type(MAR_dt64 - AR_LIKE_i, MaskedArray[np.datetime64]) +assert_type(MAR_dt64 - AR_LIKE_td64, MaskedArray[np.datetime64]) +assert_type(MAR_dt64 - AR_LIKE_dt64, MaskedArray[np.timedelta64]) +assert_type(MAR_dt64 - AR_LIKE_o, Any) + +assert_type(AR_LIKE_dt64 - MAR_dt64, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_o - MAR_dt64, Any) + +assert_type(MAR_o - AR_LIKE_b, Any) +assert_type(MAR_o - AR_LIKE_u, Any) +assert_type(MAR_o - AR_LIKE_i, Any) +assert_type(MAR_o - AR_LIKE_f, Any) +assert_type(MAR_o - AR_LIKE_c, Any) +assert_type(MAR_o - AR_LIKE_td64, Any) +assert_type(MAR_o - AR_LIKE_dt64, Any) +assert_type(MAR_o - AR_LIKE_o, Any) + +assert_type(AR_LIKE_b - MAR_o, Any) +assert_type(AR_LIKE_u - MAR_o, Any) +assert_type(AR_LIKE_i - MAR_o, Any) +assert_type(AR_LIKE_f - MAR_o, Any) +assert_type(AR_LIKE_c - MAR_o, Any) +assert_type(AR_LIKE_td64 - MAR_o, Any) +assert_type(AR_LIKE_dt64 - MAR_o, Any) +assert_type(AR_LIKE_o - MAR_o, Any) + +# Masked Array multiplication + +assert_type(MAR_b * AR_LIKE_u, MaskedArray[np.uint32]) +assert_type(MAR_b * AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_b * AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_b * AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_b * AR_LIKE_td64, MaskedArray[np.timedelta64]) +assert_type(MAR_b * AR_LIKE_o, Any) + +# Ignore due to https://github.com/python/mypy/issues/19341 +assert_type(AR_LIKE_u * MAR_b, MaskedArray[np.uint32]) # type: ignore[assert-type] +assert_type(AR_LIKE_i * MAR_b, MaskedArray[np.signedinteger]) # type: ignore[assert-type] +assert_type(AR_LIKE_f * MAR_b, MaskedArray[np.floating]) # type: ignore[assert-type] +assert_type(AR_LIKE_c * MAR_b, MaskedArray[np.complexfloating]) # type: ignore[assert-type] +assert_type(AR_LIKE_td64 * MAR_b, MaskedArray[np.timedelta64]) # type: ignore[assert-type] +assert_type(AR_LIKE_o * MAR_b, Any) # type: ignore[assert-type] + +assert_type(MAR_u4 * AR_LIKE_b, MaskedArray[np.uint32]) +assert_type(MAR_u4 * AR_LIKE_u, MaskedArray[np.unsignedinteger]) +assert_type(MAR_u4 * AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_u4 * AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_u4 * AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_u4 * AR_LIKE_td64, MaskedArray[np.timedelta64]) +assert_type(MAR_u4 * AR_LIKE_o, Any) + +assert_type(MAR_i8 * AR_LIKE_b, MaskedArray[np.int64]) +assert_type(MAR_i8 * AR_LIKE_u, MaskedArray[np.signedinteger]) +assert_type(MAR_i8 * AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_i8 * AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_i8 * AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_i8 * AR_LIKE_td64, MaskedArray[np.timedelta64]) +assert_type(MAR_i8 * AR_LIKE_o, Any) + +assert_type(MAR_f8 * AR_LIKE_b, MaskedArray[np.float64]) +assert_type(MAR_f8 * AR_LIKE_u, MaskedArray[np.float64]) +assert_type(MAR_f8 * AR_LIKE_i, MaskedArray[np.float64]) +assert_type(MAR_f8 * AR_LIKE_f, MaskedArray[np.float64]) +assert_type(MAR_f8 * AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_f8 * AR_LIKE_o, Any) + +# Ignore due to https://github.com/python/mypy/issues/19341 +assert_type(AR_LIKE_b * MAR_f8, MaskedArray[np.float64]) # type: ignore[assert-type] +assert_type(AR_LIKE_u * MAR_f8, MaskedArray[np.float64]) # type: ignore[assert-type] +assert_type(AR_LIKE_i * MAR_f8, MaskedArray[np.float64]) # type: ignore[assert-type] +assert_type(AR_LIKE_f * MAR_f8, MaskedArray[np.float64]) # type: ignore[assert-type] +assert_type(AR_LIKE_c * MAR_f8, MaskedArray[np.complexfloating]) # type: ignore[assert-type] +assert_type(AR_LIKE_o * MAR_f8, Any) # type: ignore[assert-type] + +assert_type(MAR_c16 * AR_LIKE_b, MaskedArray[np.complex128]) +assert_type(MAR_c16 * AR_LIKE_u, MaskedArray[np.complex128]) +assert_type(MAR_c16 * AR_LIKE_i, MaskedArray[np.complex128]) +assert_type(MAR_c16 * AR_LIKE_f, MaskedArray[np.complex128]) +assert_type(MAR_c16 * AR_LIKE_c, MaskedArray[np.complex128]) +assert_type(MAR_c16 * AR_LIKE_o, Any) + +# Ignore due to https://github.com/python/mypy/issues/19341 +assert_type(AR_LIKE_b * MAR_c16, MaskedArray[np.complex128]) # type: ignore[assert-type] +assert_type(AR_LIKE_u * MAR_c16, MaskedArray[np.complex128]) # type: ignore[assert-type] +assert_type(AR_LIKE_i * MAR_c16, MaskedArray[np.complex128]) # type: ignore[assert-type] +assert_type(AR_LIKE_f * MAR_c16, MaskedArray[np.complex128]) # type: ignore[assert-type] +assert_type(AR_LIKE_c * MAR_c16, MaskedArray[np.complex128]) # type: ignore[assert-type] +assert_type(AR_LIKE_o * MAR_c16, Any) # type: ignore[assert-type] + +assert_type(MAR_td64 * AR_LIKE_b, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 * AR_LIKE_u, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 * AR_LIKE_i, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 * AR_LIKE_o, Any) + +# Ignore due to https://github.com/python/mypy/issues/19341 +assert_type(AR_LIKE_b * MAR_td64, MaskedArray[np.timedelta64]) # type: ignore[assert-type] +assert_type(AR_LIKE_u * MAR_td64, MaskedArray[np.timedelta64]) # type: ignore[assert-type] +assert_type(AR_LIKE_i * MAR_td64, MaskedArray[np.timedelta64]) # type: ignore[assert-type] +assert_type(AR_LIKE_td64 * MAR_td64, MaskedArray[np.timedelta64]) # type: ignore[assert-type] +assert_type(AR_LIKE_dt64 * MAR_td64, MaskedArray[np.datetime64]) # type: ignore[assert-type] +assert_type(AR_LIKE_o * MAR_td64, Any) # type: ignore[assert-type] + +assert_type(AR_LIKE_o * MAR_dt64, Any) # type: ignore[assert-type] + +assert_type(MAR_o * AR_LIKE_b, Any) +assert_type(MAR_o * AR_LIKE_u, Any) +assert_type(MAR_o * AR_LIKE_i, Any) +assert_type(MAR_o * AR_LIKE_f, Any) +assert_type(MAR_o * AR_LIKE_c, Any) +assert_type(MAR_o * AR_LIKE_td64, Any) +assert_type(MAR_o * AR_LIKE_dt64, Any) +assert_type(MAR_o * AR_LIKE_o, Any) + +# Ignore due to https://github.com/python/mypy/issues/19341 +assert_type(AR_LIKE_b * MAR_o, Any) # type: ignore[assert-type] +assert_type(AR_LIKE_u * MAR_o, Any) # type: ignore[assert-type] +assert_type(AR_LIKE_i * MAR_o, Any) # type: ignore[assert-type] +assert_type(AR_LIKE_f * MAR_o, Any) # type: ignore[assert-type] +assert_type(AR_LIKE_c * MAR_o, Any) # type: ignore[assert-type] +assert_type(AR_LIKE_td64 * MAR_o, Any) # type: ignore[assert-type] +assert_type(AR_LIKE_dt64 * MAR_o, Any) # type: ignore[assert-type] +assert_type(AR_LIKE_o * MAR_o, Any) # type: ignore[assert-type] + +assert_type(MAR_f8.sum(), Any) +assert_type(MAR_f8.sum(axis=0), Any) +assert_type(MAR_f8.sum(keepdims=True), Any) +assert_type(MAR_f8.sum(out=MAR_subclass), MaskedArraySubclassC) + +assert_type(MAR_f8.std(), Any) +assert_type(MAR_f8.std(axis=0), Any) +assert_type(MAR_f8.std(keepdims=True, mean=0.), Any) +assert_type(MAR_f8.std(out=MAR_subclass), MaskedArraySubclassC) + +assert_type(MAR_f8.var(), Any) +assert_type(MAR_f8.var(axis=0), Any) +assert_type(MAR_f8.var(keepdims=True, mean=0.), Any) +assert_type(MAR_f8.var(out=MAR_subclass), MaskedArraySubclassC) + +assert_type(MAR_f8.mean(), Any) +assert_type(MAR_f8.mean(axis=0), Any) +assert_type(MAR_f8.mean(keepdims=True), Any) +assert_type(MAR_f8.mean(out=MAR_subclass), MaskedArraySubclassC) + +assert_type(MAR_f8.prod(), Any) +assert_type(MAR_f8.prod(axis=0), Any) +assert_type(MAR_f8.prod(keepdims=True), Any) +assert_type(MAR_f8.prod(out=MAR_subclass), MaskedArraySubclassC) + +# MaskedArray "true" division + +assert_type(MAR_f8 / b, MaskedArray[np.float64]) +assert_type(MAR_f8 / i, MaskedArray[np.float64]) +assert_type(MAR_f8 / f, MaskedArray[np.float64]) + +assert_type(b / MAR_f8, MaskedArray[np.float64]) +assert_type(i / MAR_f8, MaskedArray[np.float64]) +assert_type(f / MAR_f8, MaskedArray[np.float64]) + +assert_type(MAR_b / AR_LIKE_b, MaskedArray[np.float64]) +assert_type(MAR_b / AR_LIKE_u, MaskedArray[np.float64]) +assert_type(MAR_b / AR_LIKE_i, MaskedArray[np.float64]) +assert_type(MAR_b / AR_LIKE_f, MaskedArray[np.float64]) +assert_type(MAR_b / AR_LIKE_o, Any) + +assert_type(AR_LIKE_b / MAR_b, MaskedArray[np.float64]) +assert_type(AR_LIKE_u / MAR_b, MaskedArray[np.float64]) +assert_type(AR_LIKE_i / MAR_b, MaskedArray[np.float64]) +assert_type(AR_LIKE_f / MAR_b, MaskedArray[np.float64]) +assert_type(AR_LIKE_o / MAR_b, Any) + +assert_type(MAR_u4 / AR_LIKE_b, MaskedArray[np.float64]) +assert_type(MAR_u4 / AR_LIKE_u, MaskedArray[np.float64]) +assert_type(MAR_u4 / AR_LIKE_i, MaskedArray[np.float64]) +assert_type(MAR_u4 / AR_LIKE_f, MaskedArray[np.float64]) +assert_type(MAR_u4 / AR_LIKE_o, Any) + +assert_type(AR_LIKE_b / MAR_u4, MaskedArray[np.float64]) +assert_type(AR_LIKE_u / MAR_u4, MaskedArray[np.float64]) +assert_type(AR_LIKE_i / MAR_u4, MaskedArray[np.float64]) +assert_type(AR_LIKE_f / MAR_u4, MaskedArray[np.float64]) +assert_type(AR_LIKE_td64 / MAR_u4, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_o / MAR_u4, Any) + +assert_type(MAR_i8 / AR_LIKE_b, MaskedArray[np.float64]) +assert_type(MAR_i8 / AR_LIKE_u, MaskedArray[np.float64]) +assert_type(MAR_i8 / AR_LIKE_i, MaskedArray[np.float64]) +assert_type(MAR_i8 / AR_LIKE_f, MaskedArray[np.float64]) +assert_type(MAR_i8 / AR_LIKE_o, Any) + +assert_type(AR_LIKE_b / MAR_i8, MaskedArray[np.float64]) +assert_type(AR_LIKE_u / MAR_i8, MaskedArray[np.float64]) +assert_type(AR_LIKE_i / MAR_i8, MaskedArray[np.float64]) +assert_type(AR_LIKE_f / MAR_i8, MaskedArray[np.float64]) +assert_type(AR_LIKE_td64 / MAR_i8, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_o / MAR_i8, Any) + +assert_type(MAR_f8 / AR_LIKE_b, MaskedArray[np.float64]) +assert_type(MAR_f8 / AR_LIKE_u, MaskedArray[np.float64]) +assert_type(MAR_f8 / AR_LIKE_i, MaskedArray[np.float64]) +assert_type(MAR_f8 / AR_LIKE_f, MaskedArray[np.float64]) +assert_type(MAR_f8 / AR_LIKE_o, Any) + +assert_type(AR_LIKE_b / MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_u / MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_i / MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_f / MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_td64 / MAR_f8, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_o / MAR_f8, Any) + +assert_type(MAR_td64 / AR_LIKE_u, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 / AR_LIKE_i, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 / AR_LIKE_f, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 / AR_LIKE_td64, MaskedArray[np.float64]) +assert_type(MAR_td64 / AR_LIKE_o, Any) + +assert_type(AR_LIKE_td64 / MAR_td64, MaskedArray[np.float64]) +assert_type(AR_LIKE_o / MAR_td64, Any) + +assert_type(MAR_o / AR_LIKE_b, Any) +assert_type(MAR_o / AR_LIKE_u, Any) +assert_type(MAR_o / AR_LIKE_i, Any) +assert_type(MAR_o / AR_LIKE_f, Any) +assert_type(MAR_o / AR_LIKE_td64, Any) +assert_type(MAR_o / AR_LIKE_dt64, Any) +assert_type(MAR_o / AR_LIKE_o, Any) + +assert_type(AR_LIKE_b / MAR_o, Any) +assert_type(AR_LIKE_u / MAR_o, Any) +assert_type(AR_LIKE_i / MAR_o, Any) +assert_type(AR_LIKE_f / MAR_o, Any) +assert_type(AR_LIKE_td64 / MAR_o, Any) +assert_type(AR_LIKE_dt64 / MAR_o, Any) +assert_type(AR_LIKE_o / MAR_o, Any) + +# MaskedArray floor division + +assert_type(MAR_b // AR_LIKE_b, MaskedArray[np.int8]) +assert_type(MAR_b // AR_LIKE_u, MaskedArray[np.uint32]) +assert_type(MAR_b // AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_b // AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_b // AR_LIKE_o, Any) + +assert_type(AR_LIKE_b // MAR_b, MaskedArray[np.int8]) +assert_type(AR_LIKE_u // MAR_b, MaskedArray[np.uint32]) +assert_type(AR_LIKE_i // MAR_b, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f // MAR_b, MaskedArray[np.floating]) +assert_type(AR_LIKE_o // MAR_b, Any) + +assert_type(MAR_u4 // AR_LIKE_b, MaskedArray[np.uint32]) +assert_type(MAR_u4 // AR_LIKE_u, MaskedArray[np.unsignedinteger]) +assert_type(MAR_u4 // AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_u4 // AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_u4 // AR_LIKE_o, Any) + +assert_type(AR_LIKE_b // MAR_u4, MaskedArray[np.uint32]) +assert_type(AR_LIKE_u // MAR_u4, MaskedArray[np.unsignedinteger]) +assert_type(AR_LIKE_i // MAR_u4, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f // MAR_u4, MaskedArray[np.floating]) +assert_type(AR_LIKE_td64 // MAR_u4, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_o // MAR_u4, Any) + +assert_type(MAR_i8 // AR_LIKE_b, MaskedArray[np.int64]) +assert_type(MAR_i8 // AR_LIKE_u, MaskedArray[np.signedinteger]) +assert_type(MAR_i8 // AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_i8 // AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_i8 // AR_LIKE_o, Any) + +assert_type(AR_LIKE_b // MAR_i8, MaskedArray[np.int64]) +assert_type(AR_LIKE_u // MAR_i8, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_i // MAR_i8, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f // MAR_i8, MaskedArray[np.floating]) +assert_type(AR_LIKE_td64 // MAR_i8, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_o // MAR_i8, Any) + +assert_type(MAR_f8 // AR_LIKE_b, MaskedArray[np.float64]) +assert_type(MAR_f8 // AR_LIKE_u, MaskedArray[np.float64]) +assert_type(MAR_f8 // AR_LIKE_i, MaskedArray[np.float64]) +assert_type(MAR_f8 // AR_LIKE_f, MaskedArray[np.float64]) +assert_type(MAR_f8 // AR_LIKE_o, Any) + +assert_type(AR_LIKE_b // MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_u // MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_i // MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_f // MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_td64 // MAR_f8, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_o // MAR_f8, Any) + +assert_type(MAR_td64 // AR_LIKE_u, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 // AR_LIKE_i, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 // AR_LIKE_f, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 // AR_LIKE_td64, MaskedArray[np.int64]) +assert_type(MAR_td64 // AR_LIKE_o, Any) + +assert_type(AR_LIKE_td64 // MAR_td64, MaskedArray[np.int64]) +assert_type(AR_LIKE_o // MAR_td64, Any) + +assert_type(MAR_o // AR_LIKE_b, Any) +assert_type(MAR_o // AR_LIKE_u, Any) +assert_type(MAR_o // AR_LIKE_i, Any) +assert_type(MAR_o // AR_LIKE_f, Any) +assert_type(MAR_o // AR_LIKE_td64, Any) +assert_type(MAR_o // AR_LIKE_dt64, Any) +assert_type(MAR_o // AR_LIKE_o, Any) + +assert_type(AR_LIKE_b // MAR_o, Any) +assert_type(AR_LIKE_u // MAR_o, Any) +assert_type(AR_LIKE_i // MAR_o, Any) +assert_type(AR_LIKE_f // MAR_o, Any) +assert_type(AR_LIKE_td64 // MAR_o, Any) +assert_type(AR_LIKE_dt64 // MAR_o, Any) +assert_type(AR_LIKE_o // MAR_o, Any) + +# Masked Array power + +assert_type(MAR_b ** AR_LIKE_u, MaskedArray[np.uint32]) +assert_type(MAR_b ** AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_b ** AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_b ** AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_b ** AR_LIKE_o, Any) + +assert_type(AR_LIKE_u ** MAR_b, MaskedArray[np.uint32]) +assert_type(AR_LIKE_i ** MAR_b, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f ** MAR_b, MaskedArray[np.floating]) +assert_type(AR_LIKE_c ** MAR_b, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_o ** MAR_b, Any) + +assert_type(MAR_u4 ** AR_LIKE_b, MaskedArray[np.uint32]) +assert_type(MAR_u4 ** AR_LIKE_u, MaskedArray[np.unsignedinteger]) +assert_type(MAR_u4 ** AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_u4 ** AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_u4 ** AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_u4 ** AR_LIKE_o, Any) + +assert_type(AR_LIKE_b ** MAR_u4, MaskedArray[np.uint32]) +assert_type(AR_LIKE_u ** MAR_u4, MaskedArray[np.unsignedinteger]) +assert_type(AR_LIKE_i ** MAR_u4, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f ** MAR_u4, MaskedArray[np.floating]) +assert_type(AR_LIKE_c ** MAR_u4, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_o ** MAR_u4, Any) + +assert_type(MAR_i8 ** AR_LIKE_b, MaskedArray[np.int64]) +assert_type(MAR_i8 ** AR_LIKE_u, MaskedArray[np.signedinteger]) +assert_type(MAR_i8 ** AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_i8 ** AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_i8 ** AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_i8 ** AR_LIKE_o, Any) +assert_type(MAR_i8 ** AR_LIKE_b, MaskedArray[np.int64]) + +assert_type(AR_LIKE_u ** MAR_i8, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_i ** MAR_i8, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f ** MAR_i8, MaskedArray[np.floating]) +assert_type(AR_LIKE_c ** MAR_i8, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_o ** MAR_i8, Any) + +assert_type(MAR_f8 ** AR_LIKE_b, MaskedArray[np.float64]) +assert_type(MAR_f8 ** AR_LIKE_u, MaskedArray[np.float64]) +assert_type(MAR_f8 ** AR_LIKE_i, MaskedArray[np.float64]) +assert_type(MAR_f8 ** AR_LIKE_f, MaskedArray[np.float64]) +assert_type(MAR_f8 ** AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_f8 ** AR_LIKE_o, Any) + +assert_type(AR_LIKE_b ** MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_u ** MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_i ** MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_f ** MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_c ** MAR_f8, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_o ** MAR_f8, Any) + +assert_type(MAR_c16 ** AR_LIKE_b, MaskedArray[np.complex128]) +assert_type(MAR_c16 ** AR_LIKE_u, MaskedArray[np.complex128]) +assert_type(MAR_c16 ** AR_LIKE_i, MaskedArray[np.complex128]) +assert_type(MAR_c16 ** AR_LIKE_f, MaskedArray[np.complex128]) +assert_type(MAR_c16 ** AR_LIKE_c, MaskedArray[np.complex128]) +assert_type(MAR_c16 ** AR_LIKE_o, Any) + +assert_type(AR_LIKE_b ** MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_u ** MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_i ** MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_f ** MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_c ** MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_o ** MAR_c16, Any) + +assert_type(MAR_o ** AR_LIKE_b, Any) +assert_type(MAR_o ** AR_LIKE_u, Any) +assert_type(MAR_o ** AR_LIKE_i, Any) +assert_type(MAR_o ** AR_LIKE_f, Any) +assert_type(MAR_o ** AR_LIKE_c, Any) +assert_type(MAR_o ** AR_LIKE_o, Any) + +assert_type(AR_LIKE_b ** MAR_o, Any) +assert_type(AR_LIKE_u ** MAR_o, Any) +assert_type(AR_LIKE_i ** MAR_o, Any) +assert_type(AR_LIKE_f ** MAR_o, Any) +assert_type(AR_LIKE_c ** MAR_o, Any) +assert_type(AR_LIKE_o ** MAR_o, Any) diff --git a/local_packages/numpy/typing/tests/data/reveal/matrix.pyi b/local_packages/numpy/typing/tests/data/reveal/matrix.pyi new file mode 100644 index 0000000..3a32b3d --- /dev/null +++ b/local_packages/numpy/typing/tests/data/reveal/matrix.pyi @@ -0,0 +1,73 @@ +from typing import Any, TypeAlias, assert_type + +import numpy as np +import numpy.typing as npt + +_Shape2D: TypeAlias = tuple[int, int] + +mat: np.matrix[_Shape2D, np.dtype[np.int64]] +ar_f8: npt.NDArray[np.float64] +ar_ip: npt.NDArray[np.intp] + +assert_type(mat * 5, np.matrix) +assert_type(5 * mat, np.matrix) +mat *= 5 + +assert_type(mat**5, np.matrix) +mat **= 5 + +assert_type(mat.sum(), Any) +assert_type(mat.mean(), Any) +assert_type(mat.std(), Any) +assert_type(mat.var(), Any) +assert_type(mat.prod(), Any) +assert_type(mat.any(), np.bool) +assert_type(mat.all(), np.bool) +assert_type(mat.max(), np.int64) +assert_type(mat.min(), np.int64) +assert_type(mat.argmax(), np.intp) +assert_type(mat.argmin(), np.intp) +assert_type(mat.ptp(), np.int64) + +assert_type(mat.sum(axis=0), np.matrix) +assert_type(mat.mean(axis=0), np.matrix) +assert_type(mat.std(axis=0), np.matrix) +assert_type(mat.var(axis=0), np.matrix) +assert_type(mat.prod(axis=0), np.matrix) +assert_type(mat.any(axis=0), np.matrix[_Shape2D, np.dtype[np.bool]]) +assert_type(mat.all(axis=0), np.matrix[_Shape2D, np.dtype[np.bool]]) +assert_type(mat.max(axis=0), np.matrix[_Shape2D, np.dtype[np.int64]]) +assert_type(mat.min(axis=0), np.matrix[_Shape2D, np.dtype[np.int64]]) +assert_type(mat.argmax(axis=0), np.matrix[_Shape2D, np.dtype[np.intp]]) +assert_type(mat.argmin(axis=0), np.matrix[_Shape2D, np.dtype[np.intp]]) +assert_type(mat.ptp(axis=0), np.matrix[_Shape2D, np.dtype[np.int64]]) + +assert_type(mat.sum(out=ar_f8), npt.NDArray[np.float64]) +assert_type(mat.mean(out=ar_f8), npt.NDArray[np.float64]) +assert_type(mat.std(out=ar_f8), npt.NDArray[np.float64]) +assert_type(mat.var(out=ar_f8), npt.NDArray[np.float64]) +assert_type(mat.prod(out=ar_f8), npt.NDArray[np.float64]) +assert_type(mat.any(out=ar_f8), npt.NDArray[np.float64]) +assert_type(mat.all(out=ar_f8), npt.NDArray[np.float64]) +assert_type(mat.max(out=ar_f8), npt.NDArray[np.float64]) +assert_type(mat.min(out=ar_f8), npt.NDArray[np.float64]) +assert_type(mat.argmax(out=ar_ip), npt.NDArray[np.intp]) +assert_type(mat.argmin(out=ar_ip), npt.NDArray[np.intp]) +assert_type(mat.ptp(out=ar_f8), npt.NDArray[np.float64]) + +assert_type(mat.T, np.matrix[_Shape2D, np.dtype[np.int64]]) +assert_type(mat.I, np.matrix) +assert_type(mat.A, np.ndarray[_Shape2D, np.dtype[np.int64]]) +assert_type(mat.A1, npt.NDArray[np.int64]) +assert_type(mat.H, np.matrix[_Shape2D, np.dtype[np.int64]]) +assert_type(mat.getT(), np.matrix[_Shape2D, np.dtype[np.int64]]) +assert_type(mat.getI(), np.matrix) +assert_type(mat.getA(), np.ndarray[_Shape2D, np.dtype[np.int64]]) +assert_type(mat.getA1(), npt.NDArray[np.int64]) +assert_type(mat.getH(), np.matrix[_Shape2D, np.dtype[np.int64]]) + +assert_type(np.bmat(ar_f8), np.matrix) +assert_type(np.bmat([[0, 1, 2]]), np.matrix) +assert_type(np.bmat("mat"), np.matrix) + +assert_type(np.asmatrix(ar_f8, dtype=np.int64), np.matrix) diff --git a/local_packages/numpy/typing/tests/data/reveal/memmap.pyi b/local_packages/numpy/typing/tests/data/reveal/memmap.pyi new file mode 100644 index 0000000..f3e20ed --- /dev/null +++ b/local_packages/numpy/typing/tests/data/reveal/memmap.pyi @@ -0,0 +1,19 @@ +from typing import Any, assert_type + +import numpy as np + +memmap_obj: np.memmap[Any, np.dtype[np.str_]] + +assert_type(np.memmap.__array_priority__, float) +assert_type(memmap_obj.__array_priority__, float) +assert_type(memmap_obj.filename, str | None) +assert_type(memmap_obj.offset, int) +assert_type(memmap_obj.mode, str) +assert_type(memmap_obj.flush(), None) + +assert_type(np.memmap("file.txt", offset=5), np.memmap[Any, np.dtype[np.uint8]]) +assert_type(np.memmap(b"file.txt", dtype=np.float64, shape=(10, 3)), np.memmap[Any, np.dtype[np.float64]]) +with open("file.txt", "rb") as f: + assert_type(np.memmap(f, dtype=float, order="K"), np.memmap[Any, np.dtype]) + +assert_type(memmap_obj.__array_finalize__(object()), None) diff --git a/local_packages/numpy/typing/tests/data/reveal/mod.pyi b/local_packages/numpy/typing/tests/data/reveal/mod.pyi new file mode 100644 index 0000000..ef07dc0 --- /dev/null +++ b/local_packages/numpy/typing/tests/data/reveal/mod.pyi @@ -0,0 +1,178 @@ +import datetime as dt +from typing import Literal as L, assert_type + +import numpy as np +import numpy.typing as npt + +f8: np.float64 +i8: np.int64 +u8: np.uint64 + +f4: np.float32 +i4: np.int32 +u4: np.uint32 + +m: np.timedelta64 +m_nat: np.timedelta64[None] +m_int0: np.timedelta64[L[0]] +m_int: np.timedelta64[int] +m_td: np.timedelta64[dt.timedelta] + +b_: np.bool + +b: bool +i: int +f: float + +AR_b: npt.NDArray[np.bool] +AR_m: npt.NDArray[np.timedelta64] + +# Time structures + +assert_type(m % m, np.timedelta64) +assert_type(m % m_nat, np.timedelta64[None]) +assert_type(m % m_int0, np.timedelta64[None]) +assert_type(m % m_int, np.timedelta64[int | None]) +assert_type(m_nat % m, np.timedelta64[None]) +assert_type(m_int % m_nat, np.timedelta64[None]) +assert_type(m_int % m_int0, np.timedelta64[None]) +assert_type(m_int % m_int, np.timedelta64[int | None]) +assert_type(m_int % m_td, np.timedelta64[int | None]) +assert_type(m_td % m_nat, np.timedelta64[None]) +assert_type(m_td % m_int0, np.timedelta64[None]) +assert_type(m_td % m_int, np.timedelta64[int | None]) +assert_type(m_td % m_td, np.timedelta64[dt.timedelta | None]) + +assert_type(AR_m % m, npt.NDArray[np.timedelta64]) +assert_type(m % AR_m, npt.NDArray[np.timedelta64]) + +assert_type(divmod(m, m), tuple[np.int64, np.timedelta64]) +assert_type(divmod(m, m_nat), tuple[np.int64, np.timedelta64[None]]) +assert_type(divmod(m, m_int0), tuple[np.int64, np.timedelta64[None]]) +# workarounds for https://github.com/microsoft/pyright/issues/9663 +assert_type(m.__divmod__(m_int), tuple[np.int64, np.timedelta64[int | None]]) +assert_type(divmod(m_nat, m), tuple[np.int64, np.timedelta64[None]]) +assert_type(divmod(m_int, m_nat), tuple[np.int64, np.timedelta64[None]]) +assert_type(divmod(m_int, m_int0), tuple[np.int64, np.timedelta64[None]]) +assert_type(divmod(m_int, m_int), tuple[np.int64, np.timedelta64[int | None]]) +assert_type(divmod(m_int, m_td), tuple[np.int64, np.timedelta64[int | None]]) +assert_type(divmod(m_td, m_nat), tuple[np.int64, np.timedelta64[None]]) +assert_type(divmod(m_td, m_int0), tuple[np.int64, np.timedelta64[None]]) +assert_type(divmod(m_td, m_int), tuple[np.int64, np.timedelta64[int | None]]) +assert_type(divmod(m_td, m_td), tuple[np.int64, np.timedelta64[dt.timedelta | None]]) + +assert_type(divmod(AR_m, m), tuple[npt.NDArray[np.int64], npt.NDArray[np.timedelta64]]) +assert_type(divmod(m, AR_m), tuple[npt.NDArray[np.int64], npt.NDArray[np.timedelta64]]) + +# Bool + +assert_type(b_ % b, np.int8) +assert_type(b_ % i, np.int_) +assert_type(b_ % f, np.float64) +assert_type(b_ % b_, np.int8) +assert_type(b_ % i8, np.int64) +assert_type(b_ % u8, np.uint64) +assert_type(b_ % f8, np.float64) +assert_type(b_ % AR_b, npt.NDArray[np.int8]) + +assert_type(divmod(b_, b), tuple[np.int8, np.int8]) +assert_type(divmod(b_, b_), tuple[np.int8, np.int8]) +# workarounds for https://github.com/microsoft/pyright/issues/9663 +assert_type(b_.__divmod__(i), tuple[np.int_, np.int_]) +assert_type(b_.__divmod__(f), tuple[np.float64, np.float64]) +assert_type(b_.__divmod__(i8), tuple[np.int64, np.int64]) +assert_type(b_.__divmod__(u8), tuple[np.uint64, np.uint64]) +assert_type(divmod(b_, f8), tuple[np.float64, np.float64]) +assert_type(divmod(b_, AR_b), tuple[npt.NDArray[np.int8], npt.NDArray[np.int8]]) + +assert_type(b % b_, np.int8) +assert_type(i % b_, np.int_) +assert_type(f % b_, np.float64) +assert_type(b_ % b_, np.int8) +assert_type(i8 % b_, np.int64) +assert_type(u8 % b_, np.uint64) +assert_type(f8 % b_, np.float64) +assert_type(AR_b % b_, npt.NDArray[np.int8]) + +assert_type(divmod(b, b_), tuple[np.int8, np.int8]) +assert_type(divmod(i, b_), tuple[np.int_, np.int_]) +assert_type(divmod(f, b_), tuple[np.float64, np.float64]) +assert_type(divmod(b_, b_), tuple[np.int8, np.int8]) +assert_type(divmod(i8, b_), tuple[np.int64, np.int64]) +assert_type(divmod(u8, b_), tuple[np.uint64, np.uint64]) +assert_type(divmod(f8, b_), tuple[np.float64, np.float64]) +assert_type(divmod(AR_b, b_), tuple[npt.NDArray[np.int8], npt.NDArray[np.int8]]) + +# int + +assert_type(i8 % b, np.int64) +assert_type(i8 % i8, np.int64) +assert_type(i8 % f, np.float64) +assert_type(i8 % f8, np.float64) +assert_type(i4 % i8, np.signedinteger) +assert_type(i4 % f8, np.float64) +assert_type(i4 % i4, np.int32) +assert_type(i4 % f4, np.floating) +assert_type(i8 % AR_b, npt.NDArray[np.int64]) + +assert_type(divmod(i8, b), tuple[np.int64, np.int64]) +assert_type(divmod(i8, i4), tuple[np.signedinteger, np.signedinteger]) +assert_type(divmod(i8, i8), tuple[np.int64, np.int64]) +# workarounds for https://github.com/microsoft/pyright/issues/9663 +assert_type(i8.__divmod__(f), tuple[np.float64, np.float64]) +assert_type(i8.__divmod__(f8), tuple[np.float64, np.float64]) +assert_type(divmod(i8, f4), tuple[np.floating, np.floating]) +assert_type(divmod(i4, i4), tuple[np.int32, np.int32]) +assert_type(divmod(i4, f4), tuple[np.floating, np.floating]) +assert_type(divmod(i8, AR_b), tuple[npt.NDArray[np.int64], npt.NDArray[np.int64]]) + +assert_type(b % i8, np.int64) +assert_type(f % i8, np.float64) +assert_type(i8 % i8, np.int64) +assert_type(f8 % i8, np.float64) +assert_type(i8 % i4, np.signedinteger) +assert_type(f8 % i4, np.float64) +assert_type(i4 % i4, np.int32) +assert_type(f4 % i4, np.floating) +assert_type(AR_b % i8, npt.NDArray[np.int64]) + +assert_type(divmod(b, i8), tuple[np.int64, np.int64]) +assert_type(divmod(f, i8), tuple[np.float64, np.float64]) +assert_type(divmod(i8, i8), tuple[np.int64, np.int64]) +assert_type(divmod(f8, i8), tuple[np.float64, np.float64]) +assert_type(divmod(i4, i8), tuple[np.signedinteger, np.signedinteger]) +assert_type(divmod(i4, i4), tuple[np.int32, np.int32]) +# workarounds for https://github.com/microsoft/pyright/issues/9663 +assert_type(f4.__divmod__(i8), tuple[np.floating, np.floating]) +assert_type(f4.__divmod__(i4), tuple[np.floating, np.floating]) +assert_type(AR_b.__divmod__(i8), tuple[npt.NDArray[np.int64], npt.NDArray[np.int64]]) + +# float + +assert_type(f8 % b, np.float64) +assert_type(f8 % f, np.float64) +assert_type(i8 % f4, np.floating) +assert_type(f4 % f4, np.float32) +assert_type(f8 % AR_b, npt.NDArray[np.float64]) + +assert_type(divmod(f8, b), tuple[np.float64, np.float64]) +assert_type(divmod(f8, f), tuple[np.float64, np.float64]) +assert_type(divmod(f8, f8), tuple[np.float64, np.float64]) +assert_type(divmod(f8, f4), tuple[np.float64, np.float64]) +assert_type(divmod(f4, f4), tuple[np.float32, np.float32]) +assert_type(divmod(f8, AR_b), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]) + +assert_type(b % f8, np.float64) +assert_type(f % f8, np.float64) # pyright: ignore[reportAssertTypeFailure] # pyright incorrectly infers `builtins.float` +assert_type(f8 % f8, np.float64) +assert_type(f8 % f8, np.float64) +assert_type(f4 % f4, np.float32) +assert_type(AR_b % f8, npt.NDArray[np.float64]) + +assert_type(divmod(b, f8), tuple[np.float64, np.float64]) +assert_type(divmod(f8, f8), tuple[np.float64, np.float64]) +assert_type(divmod(f4, f4), tuple[np.float32, np.float32]) +# workarounds for https://github.com/microsoft/pyright/issues/9663 +assert_type(f8.__rdivmod__(f), tuple[np.float64, np.float64]) +assert_type(f8.__rdivmod__(f4), tuple[np.float64, np.float64]) +assert_type(AR_b.__divmod__(f8), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]) diff --git a/local_packages/numpy/typing/tests/data/reveal/modules.pyi b/local_packages/numpy/typing/tests/data/reveal/modules.pyi new file mode 100644 index 0000000..628fb50 --- /dev/null +++ b/local_packages/numpy/typing/tests/data/reveal/modules.pyi @@ -0,0 +1,51 @@ +import types +from typing import assert_type + +import numpy as np +from numpy import f2py + +assert_type(np, types.ModuleType) + +assert_type(np.char, types.ModuleType) +assert_type(np.ctypeslib, types.ModuleType) +assert_type(np.emath, types.ModuleType) +assert_type(np.fft, types.ModuleType) +assert_type(np.lib, types.ModuleType) +assert_type(np.linalg, types.ModuleType) +assert_type(np.ma, types.ModuleType) +assert_type(np.matrixlib, types.ModuleType) +assert_type(np.polynomial, types.ModuleType) +assert_type(np.random, types.ModuleType) +assert_type(np.rec, types.ModuleType) +assert_type(np.testing, types.ModuleType) +assert_type(np.version, types.ModuleType) +assert_type(np.exceptions, types.ModuleType) +assert_type(np.dtypes, types.ModuleType) + +assert_type(np.lib.format, types.ModuleType) +assert_type(np.lib.mixins, types.ModuleType) +assert_type(np.lib.scimath, types.ModuleType) +assert_type(np.lib.stride_tricks, types.ModuleType) +assert_type(np.ma.extras, types.ModuleType) +assert_type(np.polynomial.chebyshev, types.ModuleType) +assert_type(np.polynomial.hermite, types.ModuleType) +assert_type(np.polynomial.hermite_e, types.ModuleType) +assert_type(np.polynomial.laguerre, types.ModuleType) +assert_type(np.polynomial.legendre, types.ModuleType) +assert_type(np.polynomial.polynomial, types.ModuleType) + +assert_type(np.__path__, list[str]) +assert_type(np.__version__, str) +assert_type(np.test, np._pytesttester.PytestTester) +assert_type(np.test.module_name, str) + +assert_type(np.__all__, list[str]) +assert_type(np.char.__all__, list[str]) +assert_type(np.ctypeslib.__all__, list[str]) +assert_type(np.emath.__all__, list[str]) +assert_type(np.lib.__all__, list[str]) +assert_type(np.ma.__all__, list[str]) +assert_type(np.random.__all__, list[str]) +assert_type(np.rec.__all__, list[str]) +assert_type(np.testing.__all__, list[str]) +assert_type(f2py.__all__, list[str]) diff --git a/local_packages/numpy/typing/tests/data/reveal/multiarray.pyi b/local_packages/numpy/typing/tests/data/reveal/multiarray.pyi new file mode 100644 index 0000000..424f60d --- /dev/null +++ b/local_packages/numpy/typing/tests/data/reveal/multiarray.pyi @@ -0,0 +1,197 @@ +import datetime as dt +from typing import Any, Literal, TypeVar, assert_type + +import numpy as np +import numpy.typing as npt + +_ScalarT_co = TypeVar("_ScalarT_co", bound=np.generic, covariant=True) + +class SubClass(npt.NDArray[_ScalarT_co]): ... + +subclass: SubClass[np.float64] + +AR_f8: npt.NDArray[np.float64] +AR_i8: npt.NDArray[np.int64] +AR_u1: npt.NDArray[np.uint8] +AR_m: npt.NDArray[np.timedelta64] +AR_M: npt.NDArray[np.datetime64] + +AR_LIKE_f: list[float] +AR_LIKE_i: list[int] + +m: np.timedelta64 +M: np.datetime64 + +b_f8 = np.broadcast(AR_f8) +b_i8_f8_f8 = np.broadcast(AR_i8, AR_f8, AR_f8) + +nditer_obj: np.nditer + +date_scalar: dt.date +date_seq: list[dt.date] +timedelta_seq: list[dt.timedelta] + +n1: Literal[1] +n2: Literal[2] +n3: Literal[3] + +f8: np.float64 + +def func11(a: int) -> bool: ... +def func21(a: int, b: int) -> int: ... +def func12(a: int) -> tuple[complex, bool]: ... + +assert_type(next(b_f8), tuple[Any, ...]) +assert_type(b_f8.reset(), None) +assert_type(b_f8.index, int) +assert_type(b_f8.iters, tuple[np.flatiter[Any], ...]) +assert_type(b_f8.nd, int) +assert_type(b_f8.ndim, int) +assert_type(b_f8.numiter, int) +assert_type(b_f8.shape, tuple[Any, ...]) +assert_type(b_f8.size, int) + +assert_type(next(b_i8_f8_f8), tuple[Any, ...]) +assert_type(b_i8_f8_f8.reset(), None) +assert_type(b_i8_f8_f8.index, int) +assert_type(b_i8_f8_f8.iters, tuple[np.flatiter[Any], ...]) +assert_type(b_i8_f8_f8.nd, int) +assert_type(b_i8_f8_f8.ndim, int) +assert_type(b_i8_f8_f8.numiter, int) +assert_type(b_i8_f8_f8.shape, tuple[Any, ...]) +assert_type(b_i8_f8_f8.size, int) + +assert_type(np.inner(AR_f8, AR_i8), Any) + +assert_type(np.where([True, True, False]), tuple[npt.NDArray[np.intp], ...]) +assert_type(np.where([True, True, False], 1, 0), npt.NDArray[Any]) + +assert_type(np.lexsort([0, 1, 2]), npt.NDArray[np.intp]) + +assert_type(np.can_cast(np.dtype("i8"), int), bool) +assert_type(np.can_cast(AR_f8, "f8"), bool) +assert_type(np.can_cast(AR_f8, np.complex128, casting="unsafe"), bool) + +assert_type(np.min_scalar_type([1]), np.dtype) +assert_type(np.min_scalar_type(AR_f8), np.dtype) + +assert_type(np.result_type(int, [1]), np.dtype) +assert_type(np.result_type(AR_f8, AR_u1), np.dtype) +assert_type(np.result_type(AR_f8, np.complex128), np.dtype) + +assert_type(np.dot(AR_LIKE_f, AR_i8), Any) +assert_type(np.dot(AR_u1, 1), Any) +assert_type(np.dot(1.5j, 1), Any) +assert_type(np.dot(AR_u1, 1, out=AR_f8), npt.NDArray[np.float64]) + +assert_type(np.vdot(AR_LIKE_f, AR_i8), np.floating) +assert_type(np.vdot(AR_u1, 1), np.signedinteger) +assert_type(np.vdot(1.5j, 1), np.complexfloating) + +assert_type(np.bincount(AR_i8), npt.NDArray[np.intp]) + +assert_type(np.copyto(AR_f8, [1., 1.5, 1.6]), None) + +assert_type(np.putmask(AR_f8, [True, True, False], 1.5), None) + +assert_type(np.packbits(AR_i8), np.ndarray[tuple[int], np.dtype[np.uint8]]) +assert_type(np.packbits(AR_u1), np.ndarray[tuple[int], np.dtype[np.uint8]]) +assert_type(np.packbits(AR_i8, axis=1), npt.NDArray[np.uint8]) +assert_type(np.packbits(AR_u1, axis=1), npt.NDArray[np.uint8]) + +assert_type(np.unpackbits(AR_u1), np.ndarray[tuple[int], np.dtype[np.uint8]]) +assert_type(np.unpackbits(AR_u1, axis=1), npt.NDArray[np.uint8]) + +assert_type(np.shares_memory(1, 2), bool) +assert_type(np.shares_memory(AR_f8, AR_f8, max_work=-1), bool) + +assert_type(np.may_share_memory(1, 2), bool) +assert_type(np.may_share_memory(AR_f8, AR_f8, max_work=0), bool) + +assert_type(np.promote_types(np.int32, np.int64), np.dtype) +assert_type(np.promote_types("f4", float), np.dtype) + +assert_type(np.frompyfunc(func11, n1, n1).nin, Literal[1]) +assert_type(np.frompyfunc(func11, n1, n1).nout, Literal[1]) +assert_type(np.frompyfunc(func11, n1, n1).nargs, Literal[2]) +assert_type(np.frompyfunc(func11, n1, n1).ntypes, Literal[1]) +assert_type(np.frompyfunc(func11, n1, n1).identity, None) +assert_type(np.frompyfunc(func11, n1, n1).signature, None) +assert_type(np.frompyfunc(func11, n1, n1)(f8), bool) +assert_type(np.frompyfunc(func11, n1, n1)(AR_f8), bool | npt.NDArray[np.object_]) +assert_type(np.frompyfunc(func11, n1, n1).at(AR_f8, AR_i8), None) + +assert_type(np.frompyfunc(func21, n2, n1).nin, Literal[2]) +assert_type(np.frompyfunc(func21, n2, n1).nout, Literal[1]) +assert_type(np.frompyfunc(func21, n2, n1).nargs, Literal[3]) +assert_type(np.frompyfunc(func21, n2, n1).ntypes, Literal[1]) +assert_type(np.frompyfunc(func21, n2, n1).identity, None) +assert_type(np.frompyfunc(func21, n2, n1).signature, None) +assert_type(np.frompyfunc(func21, n2, n1)(f8, f8), int) +assert_type(np.frompyfunc(func21, n2, n1)(AR_f8, f8), int | npt.NDArray[np.object_]) +assert_type(np.frompyfunc(func21, n2, n1)(f8, AR_f8), int | npt.NDArray[np.object_]) +assert_type(np.frompyfunc(func21, n2, n1).reduce(AR_f8, axis=0), int | npt.NDArray[np.object_]) +assert_type(np.frompyfunc(func21, n2, n1).accumulate(AR_f8), npt.NDArray[np.object_]) +assert_type(np.frompyfunc(func21, n2, n1).reduceat(AR_f8, AR_i8), npt.NDArray[np.object_]) +assert_type(np.frompyfunc(func21, n2, n1).outer(f8, f8), int) +assert_type(np.frompyfunc(func21, n2, n1).outer(AR_f8, f8), int | npt.NDArray[np.object_]) + +assert_type(np.frompyfunc(func21, n2, n1, identity=0).nin, Literal[2]) +assert_type(np.frompyfunc(func21, n2, n1, identity=0).nout, Literal[1]) +assert_type(np.frompyfunc(func21, n2, n1, identity=0).nargs, Literal[3]) +assert_type(np.frompyfunc(func21, n2, n1, identity=0).ntypes, Literal[1]) +assert_type(np.frompyfunc(func21, n2, n1, identity=0).identity, int) +assert_type(np.frompyfunc(func21, n2, n1, identity=0).signature, None) + +assert_type(np.frompyfunc(func12, n1, n2).nin, Literal[1]) +assert_type(np.frompyfunc(func12, n1, n2).nout, Literal[2]) +assert_type(np.frompyfunc(func12, n1, n2).nargs, int) +assert_type(np.frompyfunc(func12, n1, n2).ntypes, Literal[1]) +assert_type(np.frompyfunc(func12, n1, n2).identity, None) +assert_type(np.frompyfunc(func12, n1, n2).signature, None) +assert_type( + np.frompyfunc(func12, n2, n2)(f8, f8), + tuple[complex, complex, *tuple[complex, ...]], +) +assert_type( + np.frompyfunc(func12, n2, n2)(AR_f8, f8), + tuple[ + complex | npt.NDArray[np.object_], + complex | npt.NDArray[np.object_], + *tuple[complex | npt.NDArray[np.object_], ...], + ], +) + +assert_type(np.datetime_data("m8[D]"), tuple[str, int]) +assert_type(np.datetime_data(np.datetime64), tuple[str, int]) +assert_type(np.datetime_data(np.dtype(np.timedelta64)), tuple[str, int]) + +assert_type(np.busday_count("2011-01", "2011-02"), np.int_) +assert_type(np.busday_count(["2011-01"], "2011-02"), npt.NDArray[np.int_]) +assert_type(np.busday_count(["2011-01"], date_scalar), npt.NDArray[np.int_]) + +assert_type(np.busday_offset(M, m), np.datetime64) +assert_type(np.busday_offset(date_scalar, m), np.datetime64) +assert_type(np.busday_offset(M, 5), np.datetime64) +assert_type(np.busday_offset(AR_M, m), npt.NDArray[np.datetime64]) +assert_type(np.busday_offset(M, timedelta_seq), npt.NDArray[np.datetime64]) +assert_type(np.busday_offset("2011-01", "2011-02", roll="forward"), np.datetime64) +assert_type(np.busday_offset(["2011-01"], "2011-02", roll="forward"), npt.NDArray[np.datetime64]) + +assert_type(np.is_busday("2012"), np.bool) +assert_type(np.is_busday(date_scalar), np.bool) +assert_type(np.is_busday(["2012"]), npt.NDArray[np.bool]) + +assert_type(np.datetime_as_string(M), np.str_) +assert_type(np.datetime_as_string(AR_M), npt.NDArray[np.str_]) + +assert_type(np.busdaycalendar(holidays=date_seq), np.busdaycalendar) +assert_type(np.busdaycalendar(holidays=[M]), np.busdaycalendar) + +assert_type(np.char.compare_chararrays("a", "b", "!=", rstrip=False), npt.NDArray[np.bool]) +assert_type(np.char.compare_chararrays(b"a", b"a", "==", True), npt.NDArray[np.bool]) + +assert_type(np.nested_iters([AR_i8, AR_i8], [[0], [1]], flags=["c_index"]), tuple[np.nditer, ...]) +assert_type(np.nested_iters([AR_i8, AR_i8], [[0], [1]], op_flags=[["readonly", "readonly"]]), tuple[np.nditer, ...]) +assert_type(np.nested_iters([AR_i8, AR_i8], [[0], [1]], op_dtypes=np.int_), tuple[np.nditer, ...]) +assert_type(np.nested_iters([AR_i8, AR_i8], [[0], [1]], order="C", casting="no"), tuple[np.nditer, ...]) diff --git a/local_packages/numpy/typing/tests/data/reveal/nbit_base_example.pyi b/local_packages/numpy/typing/tests/data/reveal/nbit_base_example.pyi new file mode 100644 index 0000000..66470b9 --- /dev/null +++ b/local_packages/numpy/typing/tests/data/reveal/nbit_base_example.pyi @@ -0,0 +1,20 @@ +from typing import TypeVar, assert_type + +import numpy as np +import numpy.typing as npt +from numpy._typing import _32Bit, _64Bit + +T1 = TypeVar("T1", bound=npt.NBitBase) # type: ignore[deprecated] # pyright: ignore[reportDeprecated] +T2 = TypeVar("T2", bound=npt.NBitBase) # type: ignore[deprecated] # pyright: ignore[reportDeprecated] + +def add(a: np.floating[T1], b: np.integer[T2]) -> np.floating[T1 | T2]: ... + +i8: np.int64 +i4: np.int32 +f8: np.float64 +f4: np.float32 + +assert_type(add(f8, i8), np.floating[_64Bit]) +assert_type(add(f4, i8), np.floating[_32Bit | _64Bit]) +assert_type(add(f8, i4), np.floating[_32Bit | _64Bit]) +assert_type(add(f4, i4), np.floating[_32Bit]) diff --git a/local_packages/numpy/typing/tests/data/reveal/ndarray_assignability.pyi b/local_packages/numpy/typing/tests/data/reveal/ndarray_assignability.pyi new file mode 100644 index 0000000..feaccf2 --- /dev/null +++ b/local_packages/numpy/typing/tests/data/reveal/ndarray_assignability.pyi @@ -0,0 +1,82 @@ +from typing import Any, Protocol, TypeAlias, TypeVar, assert_type + +import numpy as np +from numpy._typing import _64Bit + +_T = TypeVar("_T") +_T_co = TypeVar("_T_co", covariant=True) + +class CanAbs(Protocol[_T_co]): + def __abs__(self, /) -> _T_co: ... + +class CanInvert(Protocol[_T_co]): + def __invert__(self, /) -> _T_co: ... + +class CanNeg(Protocol[_T_co]): + def __neg__(self, /) -> _T_co: ... + +class CanPos(Protocol[_T_co]): + def __pos__(self, /) -> _T_co: ... + +def do_abs(x: CanAbs[_T]) -> _T: ... +def do_invert(x: CanInvert[_T]) -> _T: ... +def do_neg(x: CanNeg[_T]) -> _T: ... +def do_pos(x: CanPos[_T]) -> _T: ... + +_Bool_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.bool]] +_UInt8_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.uint8]] +_Int16_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.int16]] +_LongLong_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.longlong]] +_Float32_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.float32]] +_Float64_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.float64]] +_LongDouble_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.longdouble]] +_Complex64_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.complex64]] +_Complex128_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.complex128]] +_CLongDouble_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.clongdouble]] +_Void_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.void]] + +b1_1d: _Bool_1d +u1_1d: _UInt8_1d +i2_1d: _Int16_1d +q_1d: _LongLong_1d +f4_1d: _Float32_1d +f8_1d: _Float64_1d +g_1d: _LongDouble_1d +c8_1d: _Complex64_1d +c16_1d: _Complex128_1d +G_1d: _CLongDouble_1d +V_1d: _Void_1d + +assert_type(do_abs(b1_1d), _Bool_1d) +assert_type(do_abs(u1_1d), _UInt8_1d) +assert_type(do_abs(i2_1d), _Int16_1d) +assert_type(do_abs(q_1d), _LongLong_1d) +assert_type(do_abs(f4_1d), _Float32_1d) +assert_type(do_abs(f8_1d), _Float64_1d) +assert_type(do_abs(g_1d), _LongDouble_1d) + +assert_type(do_abs(c8_1d), _Float32_1d) +# NOTE: Unfortunately it's not possible to have this return a `float64` sctype, see +# https://github.com/python/mypy/issues/14070 +assert_type(do_abs(c16_1d), np.ndarray[tuple[int], np.dtype[np.floating[_64Bit]]]) +assert_type(do_abs(G_1d), _LongDouble_1d) + +assert_type(do_invert(b1_1d), _Bool_1d) +assert_type(do_invert(u1_1d), _UInt8_1d) +assert_type(do_invert(i2_1d), _Int16_1d) +assert_type(do_invert(q_1d), _LongLong_1d) + +assert_type(do_neg(u1_1d), _UInt8_1d) +assert_type(do_neg(i2_1d), _Int16_1d) +assert_type(do_neg(q_1d), _LongLong_1d) +assert_type(do_neg(f4_1d), _Float32_1d) +assert_type(do_neg(c16_1d), _Complex128_1d) + +assert_type(do_pos(u1_1d), _UInt8_1d) +assert_type(do_pos(i2_1d), _Int16_1d) +assert_type(do_pos(q_1d), _LongLong_1d) +assert_type(do_pos(f4_1d), _Float32_1d) +assert_type(do_pos(c16_1d), _Complex128_1d) + +# this shape is effectively equivalent to `tuple[int, *tuple[Any, ...]]`, i.e. ndim >= 1 +assert_type(V_1d["field"], np.ndarray[tuple[int] | tuple[Any, ...]]) diff --git a/local_packages/numpy/typing/tests/data/reveal/ndarray_conversion.pyi b/local_packages/numpy/typing/tests/data/reveal/ndarray_conversion.pyi new file mode 100644 index 0000000..2af6164 --- /dev/null +++ b/local_packages/numpy/typing/tests/data/reveal/ndarray_conversion.pyi @@ -0,0 +1,83 @@ +from typing import Any, assert_type + +import numpy as np +import numpy.typing as npt + +b1_0d: np.ndarray[tuple[()], np.dtype[np.bool]] +u2_1d: np.ndarray[tuple[int], np.dtype[np.uint16]] +i4_2d: np.ndarray[tuple[int, int], np.dtype[np.int32]] +f8_3d: np.ndarray[tuple[int, int, int], np.dtype[np.float64]] +cG_4d: np.ndarray[tuple[int, int, int, int], np.dtype[np.clongdouble]] +i0_nd: npt.NDArray[np.int_] +uncertain_dtype: np.int32 | np.float64 | np.str_ + +# item +assert_type(i0_nd.item(), int) +assert_type(i0_nd.item(1), int) +assert_type(i0_nd.item(0, 1), int) +assert_type(i0_nd.item((0, 1)), int) + +assert_type(b1_0d.item(()), bool) +assert_type(u2_1d.item((0,)), int) +assert_type(i4_2d.item(-1, 2), int) +assert_type(f8_3d.item(2, 1, -1), float) +assert_type(cG_4d.item(-0xEd_fed_Deb_a_dead_bee), complex) # c'mon Ed, we talked about this... + +# tolist +assert_type(b1_0d.tolist(), bool) +assert_type(u2_1d.tolist(), list[int]) +assert_type(i4_2d.tolist(), list[list[int]]) +assert_type(f8_3d.tolist(), list[list[list[float]]]) +assert_type(cG_4d.tolist(), Any) +assert_type(i0_nd.tolist(), Any) + +# regression tests for numpy/numpy#27944 +any_dtype: np.ndarray[Any, Any] +any_sctype: np.ndarray[Any, Any] +assert_type(any_dtype.tolist(), Any) +assert_type(any_sctype.tolist(), Any) + +# tobytes is pretty simple +# tofile does not return a value +# dump does not return a value +# dumps is pretty simple + +# astype +assert_type(i0_nd.astype("float"), npt.NDArray[Any]) +assert_type(i0_nd.astype(float), npt.NDArray[Any]) +assert_type(i0_nd.astype(np.float64), npt.NDArray[np.float64]) +assert_type(i0_nd.astype(np.float64, "K"), npt.NDArray[np.float64]) +assert_type(i0_nd.astype(np.float64, "K", "unsafe"), npt.NDArray[np.float64]) +assert_type(i0_nd.astype(np.float64, "K", "unsafe", True), npt.NDArray[np.float64]) +assert_type(i0_nd.astype(np.float64, "K", "unsafe", True, True), npt.NDArray[np.float64]) + +assert_type(np.astype(i0_nd, np.float64), npt.NDArray[np.float64]) + +assert_type(i4_2d.astype(np.uint16), np.ndarray[tuple[int, int], np.dtype[np.uint16]]) +assert_type(np.astype(i4_2d, np.uint16), np.ndarray[tuple[int, int], np.dtype[np.uint16]]) +assert_type(f8_3d.astype(np.int16), np.ndarray[tuple[int, int, int], np.dtype[np.int16]]) +assert_type(np.astype(f8_3d, np.int16), np.ndarray[tuple[int, int, int], np.dtype[np.int16]]) +assert_type(i4_2d.astype(uncertain_dtype), np.ndarray[tuple[int, int], np.dtype[np.generic]]) +assert_type(np.astype(i4_2d, uncertain_dtype), np.ndarray[tuple[int, int], np.dtype]) + +# byteswap +assert_type(i0_nd.byteswap(), npt.NDArray[np.int_]) +assert_type(i0_nd.byteswap(True), npt.NDArray[np.int_]) + +# copy +assert_type(i0_nd.copy(), npt.NDArray[np.int_]) +assert_type(i0_nd.copy("C"), npt.NDArray[np.int_]) + +assert_type(i0_nd.view(), npt.NDArray[np.int_]) +assert_type(i0_nd.view(np.float64), npt.NDArray[np.float64]) +assert_type(i0_nd.view(float), npt.NDArray[Any]) +assert_type(i0_nd.view(np.float64, np.matrix), np.matrix) + +# getfield +assert_type(i0_nd.getfield("float"), npt.NDArray[Any]) +assert_type(i0_nd.getfield(float), npt.NDArray[Any]) +assert_type(i0_nd.getfield(np.float64), npt.NDArray[np.float64]) +assert_type(i0_nd.getfield(np.float64, 8), npt.NDArray[np.float64]) + +# setflags does not return a value +# fill does not return a value diff --git a/local_packages/numpy/typing/tests/data/reveal/ndarray_misc.pyi b/local_packages/numpy/typing/tests/data/reveal/ndarray_misc.pyi new file mode 100644 index 0000000..2972a58 --- /dev/null +++ b/local_packages/numpy/typing/tests/data/reveal/ndarray_misc.pyi @@ -0,0 +1,246 @@ +""" +Tests for miscellaneous (non-magic) ``np.ndarray``/``np.generic`` methods. + +More extensive tests are performed for the methods' +function-based counterpart in `../from_numeric.py`. + +""" + +import ctypes as ct +import operator +from collections.abc import Iterator +from types import ModuleType +from typing import Any, Literal, assert_type +from typing_extensions import CapsuleType + +import numpy as np +import numpy.typing as npt + +class SubClass(npt.NDArray[np.object_]): ... + +f8: np.float64 +i8: np.int64 +B: SubClass +AR_f8: npt.NDArray[np.float64] +AR_i8: npt.NDArray[np.int64] +AR_u1: npt.NDArray[np.uint8] +AR_c8: npt.NDArray[np.complex64] +AR_m: npt.NDArray[np.timedelta64] +AR_U: npt.NDArray[np.str_] +AR_V: npt.NDArray[np.void] + +AR_f8_1d: np.ndarray[tuple[int], np.dtype[np.float64]] +AR_f8_2d: np.ndarray[tuple[int, int], np.dtype[np.float64]] +AR_f8_3d: np.ndarray[tuple[int, int, int], np.dtype[np.float64]] + +ctypes_obj = AR_f8.ctypes + +assert_type(AR_f8.__dlpack__(), CapsuleType) +assert_type(AR_f8.__dlpack_device__(), tuple[Literal[1], Literal[0]]) + +assert_type(ctypes_obj.data, int) +assert_type(ctypes_obj.shape, ct.Array[np.ctypeslib.c_intp]) +assert_type(ctypes_obj.strides, ct.Array[np.ctypeslib.c_intp]) +assert_type(ctypes_obj._as_parameter_, ct.c_void_p) + +assert_type(ctypes_obj.data_as(ct.c_void_p), ct.c_void_p) +assert_type(ctypes_obj.shape_as(ct.c_longlong), ct.Array[ct.c_longlong]) +assert_type(ctypes_obj.strides_as(ct.c_ubyte), ct.Array[ct.c_ubyte]) + +assert_type(f8.all(), np.bool) +assert_type(AR_f8.all(), np.bool) +assert_type(AR_f8.all(axis=0), np.bool | npt.NDArray[np.bool]) +assert_type(AR_f8.all(keepdims=True), np.bool | npt.NDArray[np.bool]) +assert_type(AR_f8.all(out=B), SubClass) + +assert_type(f8.any(), np.bool) +assert_type(AR_f8.any(), np.bool) +assert_type(AR_f8.any(axis=0), np.bool | npt.NDArray[np.bool]) +assert_type(AR_f8.any(keepdims=True), np.bool | npt.NDArray[np.bool]) +assert_type(AR_f8.any(out=B), SubClass) + +assert_type(f8.argmax(), np.intp) +assert_type(AR_f8.argmax(), np.intp) +assert_type(AR_f8.argmax(axis=0), Any) +assert_type(AR_f8.argmax(out=AR_i8), npt.NDArray[np.intp]) + +assert_type(f8.argmin(), np.intp) +assert_type(AR_f8.argmin(), np.intp) +assert_type(AR_f8.argmin(axis=0), Any) +assert_type(AR_f8.argmin(out=AR_i8), npt.NDArray[np.intp]) + +assert_type(f8.argsort(), npt.NDArray[np.intp]) +assert_type(AR_f8.argsort(), npt.NDArray[np.intp]) + +assert_type(f8.astype(np.int64).choose([()]), npt.NDArray[Any]) +assert_type(AR_f8.choose([0]), npt.NDArray[Any]) +assert_type(AR_f8.choose([0], out=B), SubClass) + +assert_type(f8.clip(1), npt.NDArray[Any]) +assert_type(AR_f8.clip(1), npt.NDArray[Any]) +assert_type(AR_f8.clip(None, 1), npt.NDArray[Any]) +assert_type(AR_f8.clip(1, out=B), SubClass) +assert_type(AR_f8.clip(None, 1, out=B), SubClass) + +assert_type(f8.compress([0]), npt.NDArray[Any]) +assert_type(AR_f8.compress([0]), npt.NDArray[Any]) +assert_type(AR_f8.compress([0], out=B), SubClass) + +assert_type(f8.conj(), np.float64) +assert_type(AR_f8.conj(), npt.NDArray[np.float64]) +assert_type(B.conj(), SubClass) + +assert_type(f8.conjugate(), np.float64) +assert_type(AR_f8.conjugate(), npt.NDArray[np.float64]) +assert_type(B.conjugate(), SubClass) + +assert_type(f8.cumprod(), npt.NDArray[Any]) +assert_type(AR_f8.cumprod(), npt.NDArray[Any]) +assert_type(AR_f8.cumprod(out=B), SubClass) + +assert_type(f8.cumsum(), npt.NDArray[Any]) +assert_type(AR_f8.cumsum(), npt.NDArray[Any]) +assert_type(AR_f8.cumsum(out=B), SubClass) + +assert_type(f8.max(), Any) +assert_type(AR_f8.max(), Any) +assert_type(AR_f8.max(axis=0), Any) +assert_type(AR_f8.max(keepdims=True), Any) +assert_type(AR_f8.max(out=B), SubClass) + +assert_type(f8.mean(), Any) +assert_type(AR_f8.mean(), Any) +assert_type(AR_f8.mean(axis=0), Any) +assert_type(AR_f8.mean(keepdims=True), Any) +assert_type(AR_f8.mean(out=B), SubClass) + +assert_type(f8.min(), Any) +assert_type(AR_f8.min(), Any) +assert_type(AR_f8.min(axis=0), Any) +assert_type(AR_f8.min(keepdims=True), Any) +assert_type(AR_f8.min(out=B), SubClass) + +assert_type(f8.prod(), Any) +assert_type(AR_f8.prod(), Any) +assert_type(AR_f8.prod(axis=0), Any) +assert_type(AR_f8.prod(keepdims=True), Any) +assert_type(AR_f8.prod(out=B), SubClass) + +assert_type(f8.round(), np.float64) +assert_type(AR_f8.round(), npt.NDArray[np.float64]) +assert_type(AR_f8.round(out=B), SubClass) + +assert_type(f8.repeat(1), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(f8.repeat(1, axis=0), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(AR_f8.repeat(1), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(AR_f8.repeat(1, axis=0), npt.NDArray[np.float64]) +assert_type(B.repeat(1), np.ndarray[tuple[int], np.dtype[np.object_]]) +assert_type(B.repeat(1, axis=0), npt.NDArray[np.object_]) + +assert_type(f8.std(), Any) +assert_type(AR_f8.std(), Any) +assert_type(AR_f8.std(axis=0), Any) +assert_type(AR_f8.std(keepdims=True), Any) +assert_type(AR_f8.std(out=B), SubClass) + +assert_type(f8.sum(), Any) +assert_type(AR_f8.sum(), Any) +assert_type(AR_f8.sum(axis=0), Any) +assert_type(AR_f8.sum(keepdims=True), Any) +assert_type(AR_f8.sum(out=B), SubClass) + +assert_type(f8.take(0), np.float64) +assert_type(AR_f8.take(0), np.float64) +assert_type(AR_f8.take([0]), npt.NDArray[np.float64]) +assert_type(AR_f8.take(0, out=B), SubClass) +assert_type(AR_f8.take([0], out=B), SubClass) + +assert_type(f8.var(), Any) +assert_type(AR_f8.var(), Any) +assert_type(AR_f8.var(axis=0), Any) +assert_type(AR_f8.var(keepdims=True), Any) +assert_type(AR_f8.var(out=B), SubClass) + +assert_type(AR_f8.argpartition([0]), npt.NDArray[np.intp]) + +assert_type(AR_f8.diagonal(), npt.NDArray[np.float64]) + +assert_type(AR_f8.dot(1), npt.NDArray[Any]) +assert_type(AR_f8.dot([1]), Any) +assert_type(AR_f8.dot(1, out=B), SubClass) + +assert_type(AR_f8.nonzero(), tuple[np.ndarray[tuple[int], np.dtype[np.intp]], ...]) + +assert_type(AR_f8.searchsorted(1), np.intp) +assert_type(AR_f8.searchsorted([1]), npt.NDArray[np.intp]) + +assert_type(AR_f8.trace(), Any) +assert_type(AR_f8.trace(out=B), SubClass) + +assert_type(AR_f8.item(), float) +assert_type(AR_U.item(), str) + +assert_type(AR_f8.ravel(), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(AR_U.ravel(), np.ndarray[tuple[int], np.dtype[np.str_]]) + +assert_type(AR_f8.flatten(), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(AR_U.flatten(), np.ndarray[tuple[int], np.dtype[np.str_]]) + +assert_type(AR_i8.reshape(None), npt.NDArray[np.int64]) +assert_type(AR_f8.reshape(-1), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(AR_c8.reshape(2, 3, 4, 5), np.ndarray[tuple[int, int, int, int], np.dtype[np.complex64]]) +assert_type(AR_m.reshape(()), np.ndarray[tuple[()], np.dtype[np.timedelta64]]) +assert_type(AR_U.reshape([]), np.ndarray[tuple[()], np.dtype[np.str_]]) +assert_type(AR_V.reshape((480, 720, 4)), np.ndarray[tuple[int, int, int], np.dtype[np.void]]) + +assert_type(int(AR_f8), int) +assert_type(int(AR_U), int) + +assert_type(float(AR_f8), float) +assert_type(float(AR_U), float) + +assert_type(complex(AR_f8), complex) + +assert_type(operator.index(AR_i8), int) + +assert_type(AR_f8.__array_wrap__(B), npt.NDArray[np.object_]) + +assert_type(AR_V[0], Any) +assert_type(AR_V[0, 0], Any) +assert_type(AR_V[AR_i8], npt.NDArray[np.void]) +assert_type(AR_V[AR_i8, AR_i8], npt.NDArray[np.void]) +assert_type(AR_V[AR_i8, None], npt.NDArray[np.void]) +assert_type(AR_V[0, ...], npt.NDArray[np.void]) +assert_type(AR_V[[0]], npt.NDArray[np.void]) +assert_type(AR_V[[0], [0]], npt.NDArray[np.void]) +assert_type(AR_V[:], npt.NDArray[np.void]) +assert_type(AR_V["a"], npt.NDArray[Any]) +assert_type(AR_V[["a", "b"]], npt.NDArray[np.void]) + +assert_type(AR_f8.dump("test_file"), None) +assert_type(AR_f8.dump(b"test_file"), None) +with open("test_file", "wb") as f: + assert_type(AR_f8.dump(f), None) + +assert_type(AR_f8.__array_finalize__(None), None) +assert_type(AR_f8.__array_finalize__(B), None) +assert_type(AR_f8.__array_finalize__(AR_f8), None) + +assert_type(f8.device, Literal["cpu"]) +assert_type(AR_f8.device, Literal["cpu"]) + +assert_type(f8.to_device("cpu"), np.float64) +assert_type(i8.to_device("cpu"), np.int64) +assert_type(AR_f8.to_device("cpu"), npt.NDArray[np.float64]) +assert_type(AR_i8.to_device("cpu"), npt.NDArray[np.int64]) +assert_type(AR_u1.to_device("cpu"), npt.NDArray[np.uint8]) +assert_type(AR_c8.to_device("cpu"), npt.NDArray[np.complex64]) +assert_type(AR_m.to_device("cpu"), npt.NDArray[np.timedelta64]) + +assert_type(f8.__array_namespace__(), ModuleType) +assert_type(AR_f8.__array_namespace__(), ModuleType) + +assert_type(iter(AR_f8), Iterator[Any]) # any-D +assert_type(iter(AR_f8_1d), Iterator[np.float64]) # 1-D +assert_type(iter(AR_f8_2d), Iterator[npt.NDArray[np.float64]]) # 2-D +assert_type(iter(AR_f8_3d), Iterator[npt.NDArray[np.float64]]) # 3-D diff --git a/local_packages/numpy/typing/tests/data/reveal/ndarray_shape_manipulation.pyi b/local_packages/numpy/typing/tests/data/reveal/ndarray_shape_manipulation.pyi new file mode 100644 index 0000000..0ce599a --- /dev/null +++ b/local_packages/numpy/typing/tests/data/reveal/ndarray_shape_manipulation.pyi @@ -0,0 +1,47 @@ +from typing import TypeAlias, assert_type + +import numpy as np +import numpy.typing as npt + +_ArrayND: TypeAlias = npt.NDArray[np.int64] +_Array2D: TypeAlias = np.ndarray[tuple[int, int], np.dtype[np.int8]] +_Array3D: TypeAlias = np.ndarray[tuple[int, int, int], np.dtype[np.bool]] + +_nd: _ArrayND +_2d: _Array2D +_3d: _Array3D + +# reshape +assert_type(_nd.reshape(None), npt.NDArray[np.int64]) +assert_type(_nd.reshape(4), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(_nd.reshape((4,)), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(_nd.reshape(2, 2), np.ndarray[tuple[int, int], np.dtype[np.int64]]) +assert_type(_nd.reshape((2, 2)), np.ndarray[tuple[int, int], np.dtype[np.int64]]) + +assert_type(_nd.reshape((2, 2), order="C"), np.ndarray[tuple[int, int], np.dtype[np.int64]]) +assert_type(_nd.reshape(4, order="C"), np.ndarray[tuple[int], np.dtype[np.int64]]) + +# resize does not return a value + +# transpose +assert_type(_nd.transpose(), npt.NDArray[np.int64]) +assert_type(_nd.transpose(1, 0), npt.NDArray[np.int64]) +assert_type(_nd.transpose((1, 0)), npt.NDArray[np.int64]) + +# swapaxes +assert_type(_nd.swapaxes(0, 1), _ArrayND) +assert_type(_2d.swapaxes(0, 1), _Array2D) +assert_type(_3d.swapaxes(0, 1), _Array3D) + +# flatten +assert_type(_nd.flatten(), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(_nd.flatten("C"), np.ndarray[tuple[int], np.dtype[np.int64]]) + +# ravel +assert_type(_nd.ravel(), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(_nd.ravel("C"), np.ndarray[tuple[int], np.dtype[np.int64]]) + +# squeeze +assert_type(_nd.squeeze(), npt.NDArray[np.int64]) +assert_type(_nd.squeeze(0), npt.NDArray[np.int64]) +assert_type(_nd.squeeze((0, 2)), npt.NDArray[np.int64]) diff --git a/local_packages/numpy/typing/tests/data/reveal/nditer.pyi b/local_packages/numpy/typing/tests/data/reveal/nditer.pyi new file mode 100644 index 0000000..8965f3c --- /dev/null +++ b/local_packages/numpy/typing/tests/data/reveal/nditer.pyi @@ -0,0 +1,49 @@ +from typing import Any, assert_type + +import numpy as np +import numpy.typing as npt + +nditer_obj: np.nditer + +assert_type(np.nditer([0, 1], flags=["c_index"]), np.nditer) +assert_type(np.nditer([0, 1], op_flags=[["readonly", "readonly"]]), np.nditer) +assert_type(np.nditer([0, 1], op_dtypes=np.int_), np.nditer) +assert_type(np.nditer([0, 1], order="C", casting="no"), np.nditer) + +assert_type(nditer_obj.dtypes, tuple[np.dtype, ...]) +assert_type(nditer_obj.finished, bool) +assert_type(nditer_obj.has_delayed_bufalloc, bool) +assert_type(nditer_obj.has_index, bool) +assert_type(nditer_obj.has_multi_index, bool) +assert_type(nditer_obj.index, int) +assert_type(nditer_obj.iterationneedsapi, bool) +assert_type(nditer_obj.iterindex, int) +assert_type(nditer_obj.iterrange, tuple[int, ...]) +assert_type(nditer_obj.itersize, int) +assert_type(nditer_obj.itviews, tuple[npt.NDArray[Any], ...]) +assert_type(nditer_obj.multi_index, tuple[int, ...]) +assert_type(nditer_obj.ndim, int) +assert_type(nditer_obj.nop, int) +assert_type(nditer_obj.operands, tuple[npt.NDArray[Any], ...]) +assert_type(nditer_obj.shape, tuple[int, ...]) +assert_type(nditer_obj.value, tuple[npt.NDArray[Any], ...]) + +assert_type(nditer_obj.close(), None) +assert_type(nditer_obj.copy(), np.nditer) +assert_type(nditer_obj.debug_print(), None) +assert_type(nditer_obj.enable_external_loop(), None) +assert_type(nditer_obj.iternext(), bool) +assert_type(nditer_obj.remove_axis(0), None) +assert_type(nditer_obj.remove_multi_index(), None) +assert_type(nditer_obj.reset(), None) + +assert_type(len(nditer_obj), int) +assert_type(iter(nditer_obj), np.nditer) +assert_type(next(nditer_obj), tuple[npt.NDArray[Any], ...]) +assert_type(nditer_obj.__copy__(), np.nditer) +with nditer_obj as f: + assert_type(f, np.nditer) +assert_type(nditer_obj[0], npt.NDArray[Any]) +assert_type(nditer_obj[:], tuple[npt.NDArray[Any], ...]) +nditer_obj[0] = 0 +nditer_obj[:] = [0, 1] diff --git a/local_packages/numpy/typing/tests/data/reveal/nested_sequence.pyi b/local_packages/numpy/typing/tests/data/reveal/nested_sequence.pyi new file mode 100644 index 0000000..b4f98b7 --- /dev/null +++ b/local_packages/numpy/typing/tests/data/reveal/nested_sequence.pyi @@ -0,0 +1,25 @@ +from collections.abc import Sequence +from typing import Any, assert_type + +from numpy._typing import _NestedSequence + +a: Sequence[int] +b: Sequence[Sequence[int]] +c: Sequence[Sequence[Sequence[int]]] +d: Sequence[Sequence[Sequence[Sequence[int]]]] +e: Sequence[bool] +f: tuple[int, ...] +g: list[int] +h: Sequence[Any] + +def func(a: _NestedSequence[int]) -> None: ... + +assert_type(func(a), None) +assert_type(func(b), None) +assert_type(func(c), None) +assert_type(func(d), None) +assert_type(func(e), None) +assert_type(func(f), None) +assert_type(func(g), None) +assert_type(func(h), None) +assert_type(func(range(15)), None) diff --git a/local_packages/numpy/typing/tests/data/reveal/npyio.pyi b/local_packages/numpy/typing/tests/data/reveal/npyio.pyi new file mode 100644 index 0000000..e3eaa45 --- /dev/null +++ b/local_packages/numpy/typing/tests/data/reveal/npyio.pyi @@ -0,0 +1,83 @@ +import pathlib +import re +import zipfile +from collections.abc import Mapping +from typing import IO, Any, assert_type + +import numpy as np +import numpy.typing as npt +from numpy.lib._npyio_impl import BagObj + +str_path: str +pathlib_path: pathlib.Path +str_file: IO[str] +bytes_file: IO[bytes] + +npz_file: np.lib.npyio.NpzFile + +AR_i8: npt.NDArray[np.int64] +AR_LIKE_f8: list[float] + +class BytesWriter: + def write(self, data: bytes) -> None: ... + +class BytesReader: + def read(self, n: int = ...) -> bytes: ... + def seek(self, offset: int, whence: int = ...) -> int: ... + +bytes_writer: BytesWriter +bytes_reader: BytesReader + +assert_type(npz_file.zip, zipfile.ZipFile | None) +assert_type(npz_file.fid, IO[str] | None) +assert_type(npz_file.files, list[str]) +assert_type(npz_file.allow_pickle, bool) +assert_type(npz_file.pickle_kwargs, Mapping[str, Any] | None) +assert_type(npz_file.f, BagObj[np.lib.npyio.NpzFile]) +assert_type(npz_file["test"], npt.NDArray[Any]) +assert_type(len(npz_file), int) +with npz_file as f: + assert_type(f, np.lib.npyio.NpzFile) + +assert_type(np.load(bytes_file), Any) +assert_type(np.load(pathlib_path, allow_pickle=True), Any) +assert_type(np.load(str_path, encoding="bytes"), Any) +assert_type(np.load(bytes_reader), Any) + +assert_type(np.save(bytes_file, AR_LIKE_f8), None) +assert_type(np.save(pathlib_path, AR_i8, allow_pickle=True), None) +assert_type(np.save(str_path, AR_LIKE_f8), None) +assert_type(np.save(bytes_writer, AR_LIKE_f8), None) + +assert_type(np.savez(bytes_file, AR_LIKE_f8), None) +assert_type(np.savez(pathlib_path, ar1=AR_i8, ar2=AR_i8), None) +assert_type(np.savez(str_path, AR_LIKE_f8, ar1=AR_i8), None) +assert_type(np.savez(bytes_writer, AR_LIKE_f8, ar1=AR_i8), None) + +assert_type(np.savez_compressed(bytes_file, AR_LIKE_f8), None) +assert_type(np.savez_compressed(pathlib_path, ar1=AR_i8, ar2=AR_i8), None) +assert_type(np.savez_compressed(str_path, AR_LIKE_f8, ar1=AR_i8), None) +assert_type(np.savez_compressed(bytes_writer, AR_LIKE_f8, ar1=AR_i8), None) + +assert_type(np.loadtxt(bytes_file), npt.NDArray[np.float64]) +assert_type(np.loadtxt(pathlib_path, dtype=np.str_), npt.NDArray[np.str_]) +assert_type(np.loadtxt(str_path, dtype=str, skiprows=2), npt.NDArray[Any]) +assert_type(np.loadtxt(str_file, comments="test"), npt.NDArray[np.float64]) +assert_type(np.loadtxt(str_file, comments=None), npt.NDArray[np.float64]) +assert_type(np.loadtxt(str_path, delimiter="\n"), npt.NDArray[np.float64]) +assert_type(np.loadtxt(str_path, ndmin=2), npt.NDArray[np.float64]) +assert_type(np.loadtxt(["1", "2", "3"]), npt.NDArray[np.float64]) + +assert_type(np.fromregex(bytes_file, "test", np.float64), npt.NDArray[np.float64]) +assert_type(np.fromregex(str_file, b"test", dtype=float), npt.NDArray[Any]) +assert_type(np.fromregex(str_path, re.compile("test"), dtype=np.str_, encoding="utf8"), npt.NDArray[np.str_]) +assert_type(np.fromregex(pathlib_path, "test", np.float64), npt.NDArray[np.float64]) +assert_type(np.fromregex(bytes_reader, "test", np.float64), npt.NDArray[np.float64]) + +assert_type(np.genfromtxt(bytes_file), npt.NDArray[Any]) +assert_type(np.genfromtxt(pathlib_path, dtype=np.str_), npt.NDArray[np.str_]) +assert_type(np.genfromtxt(str_path, dtype=str, skip_header=2), npt.NDArray[Any]) +assert_type(np.genfromtxt(str_file, comments="test"), npt.NDArray[Any]) +assert_type(np.genfromtxt(str_path, delimiter="\n"), npt.NDArray[Any]) +assert_type(np.genfromtxt(str_path, ndmin=2), npt.NDArray[Any]) +assert_type(np.genfromtxt(["1", "2", "3"], ndmin=2), npt.NDArray[Any]) diff --git a/local_packages/numpy/typing/tests/data/reveal/numeric.pyi b/local_packages/numpy/typing/tests/data/reveal/numeric.pyi new file mode 100644 index 0000000..24f97d2 --- /dev/null +++ b/local_packages/numpy/typing/tests/data/reveal/numeric.pyi @@ -0,0 +1,170 @@ +""" +Tests for :mod:`_core.numeric`. + +Does not include tests which fall under ``array_constructors``. + +""" + +from typing import Any, assert_type + +import numpy as np +import numpy.typing as npt + +class SubClass(npt.NDArray[np.int64]): ... + +i8: np.int64 + +AR_b: npt.NDArray[np.bool] +AR_u8: npt.NDArray[np.uint64] +AR_i8: npt.NDArray[np.int64] +AR_f8: npt.NDArray[np.float64] +AR_c16: npt.NDArray[np.complex128] +AR_m: npt.NDArray[np.timedelta64] +AR_O: npt.NDArray[np.object_] + +_sub_nd_i8: SubClass + +_to_1d_bool: list[bool] +_to_1d_int: list[int] +_to_1d_float: list[float] +_to_1d_complex: list[complex] + +### + +assert_type(np.count_nonzero(i8), np.intp) +assert_type(np.count_nonzero(AR_i8), np.intp) +assert_type(np.count_nonzero(_to_1d_int), np.intp) +assert_type(np.count_nonzero(AR_i8, keepdims=True), npt.NDArray[np.intp]) +assert_type(np.count_nonzero(AR_i8, axis=0), Any) + +assert_type(np.isfortran(i8), bool) +assert_type(np.isfortran(AR_i8), bool) + +assert_type(np.argwhere(i8), np.ndarray[tuple[int, int], np.dtype[np.intp]]) +assert_type(np.argwhere(AR_i8), np.ndarray[tuple[int, int], np.dtype[np.intp]]) + +assert_type(np.flatnonzero(i8), np.ndarray[tuple[int], np.dtype[np.intp]]) +assert_type(np.flatnonzero(AR_i8), np.ndarray[tuple[int], np.dtype[np.intp]]) + +# correlate +assert_type(np.correlate(AR_i8, AR_i8), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(np.correlate(AR_b, AR_b), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(np.correlate(AR_u8, AR_u8), np.ndarray[tuple[int], np.dtype[np.uint64]]) +assert_type(np.correlate(AR_i8, AR_i8), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(np.correlate(AR_f8, AR_f8), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.correlate(AR_f8, AR_i8), np.ndarray[tuple[int], np.dtype[np.float64 | Any]]) +assert_type(np.correlate(AR_c16, AR_c16), np.ndarray[tuple[int], np.dtype[np.complex128]]) +assert_type(np.correlate(AR_c16, AR_f8), np.ndarray[tuple[int], np.dtype[np.complex128 | Any]]) +assert_type(np.correlate(AR_m, AR_m), np.ndarray[tuple[int], np.dtype[np.timedelta64]]) +assert_type(np.correlate(AR_i8, AR_m), np.ndarray[tuple[int], np.dtype[np.timedelta64 | Any]]) +assert_type(np.correlate(AR_O, AR_O), np.ndarray[tuple[int], np.dtype[np.object_]]) +assert_type(np.correlate(_to_1d_bool, _to_1d_bool), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(np.correlate(_to_1d_int, _to_1d_int), np.ndarray[tuple[int], np.dtype[np.int_ | Any]]) +assert_type(np.correlate(_to_1d_float, _to_1d_float), np.ndarray[tuple[int], np.dtype[np.float64 | Any]]) +assert_type(np.correlate(_to_1d_complex, _to_1d_complex), np.ndarray[tuple[int], np.dtype[np.complex128 | Any]]) + +# convolve (same as correlate) +assert_type(np.convolve(AR_i8, AR_i8), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(np.convolve(AR_b, AR_b), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(np.convolve(AR_u8, AR_u8), np.ndarray[tuple[int], np.dtype[np.uint64]]) +assert_type(np.convolve(AR_i8, AR_i8), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(np.convolve(AR_f8, AR_f8), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.convolve(AR_f8, AR_i8), np.ndarray[tuple[int], np.dtype[np.float64 | Any]]) +assert_type(np.convolve(AR_c16, AR_c16), np.ndarray[tuple[int], np.dtype[np.complex128]]) +assert_type(np.convolve(AR_c16, AR_f8), np.ndarray[tuple[int], np.dtype[np.complex128 | Any]]) +assert_type(np.convolve(AR_m, AR_m), np.ndarray[tuple[int], np.dtype[np.timedelta64]]) +assert_type(np.convolve(AR_i8, AR_m), np.ndarray[tuple[int], np.dtype[np.timedelta64 | Any]]) +assert_type(np.convolve(AR_O, AR_O), np.ndarray[tuple[int], np.dtype[np.object_]]) +assert_type(np.convolve(_to_1d_bool, _to_1d_bool), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(np.convolve(_to_1d_int, _to_1d_int), np.ndarray[tuple[int], np.dtype[np.int_ | Any]]) +assert_type(np.convolve(_to_1d_float, _to_1d_float), np.ndarray[tuple[int], np.dtype[np.float64 | Any]]) +assert_type(np.convolve(_to_1d_complex, _to_1d_complex), np.ndarray[tuple[int], np.dtype[np.complex128 | Any]]) + +# outer (very similar to above, but 2D output) +assert_type(np.outer(AR_i8, AR_i8), np.ndarray[tuple[int, int], np.dtype[np.int64]]) +assert_type(np.outer(AR_b, AR_b), np.ndarray[tuple[int, int], np.dtype[np.bool]]) +assert_type(np.outer(AR_u8, AR_u8), np.ndarray[tuple[int, int], np.dtype[np.uint64]]) +assert_type(np.outer(AR_i8, AR_i8), np.ndarray[tuple[int, int], np.dtype[np.int64]]) +assert_type(np.outer(AR_f8, AR_f8), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(np.outer(AR_f8, AR_i8), np.ndarray[tuple[int, int], np.dtype[np.float64 | Any]]) +assert_type(np.outer(AR_c16, AR_c16), np.ndarray[tuple[int, int], np.dtype[np.complex128]]) +assert_type(np.outer(AR_c16, AR_f8), np.ndarray[tuple[int, int], np.dtype[np.complex128 | Any]]) +assert_type(np.outer(AR_m, AR_m), np.ndarray[tuple[int, int], np.dtype[np.timedelta64]]) +assert_type(np.outer(AR_i8, AR_m), np.ndarray[tuple[int, int], np.dtype[np.timedelta64 | Any]]) +assert_type(np.outer(AR_O, AR_O), np.ndarray[tuple[int, int], np.dtype[np.object_]]) +assert_type(np.outer(AR_i8, AR_i8, out=_sub_nd_i8), SubClass) +assert_type(np.outer(_to_1d_bool, _to_1d_bool), np.ndarray[tuple[int, int], np.dtype[np.bool]]) +assert_type(np.outer(_to_1d_int, _to_1d_int), np.ndarray[tuple[int, int], np.dtype[np.int_ | Any]]) +assert_type(np.outer(_to_1d_float, _to_1d_float), np.ndarray[tuple[int, int], np.dtype[np.float64 | Any]]) +assert_type(np.outer(_to_1d_complex, _to_1d_complex), np.ndarray[tuple[int, int], np.dtype[np.complex128 | Any]]) + +# tensordot +assert_type(np.tensordot(AR_i8, AR_i8), npt.NDArray[np.int64]) +assert_type(np.tensordot(AR_b, AR_b), npt.NDArray[np.bool]) +assert_type(np.tensordot(AR_u8, AR_u8), npt.NDArray[np.uint64]) +assert_type(np.tensordot(AR_i8, AR_i8), npt.NDArray[np.int64]) +assert_type(np.tensordot(AR_f8, AR_f8), npt.NDArray[np.float64]) +assert_type(np.tensordot(AR_f8, AR_i8), npt.NDArray[np.float64 | Any]) +assert_type(np.tensordot(AR_c16, AR_c16), npt.NDArray[np.complex128]) +assert_type(np.tensordot(AR_c16, AR_f8), npt.NDArray[np.complex128 | Any]) +assert_type(np.tensordot(AR_m, AR_m), npt.NDArray[np.timedelta64]) +assert_type(np.tensordot(AR_O, AR_O), npt.NDArray[np.object_]) +assert_type(np.tensordot(_to_1d_bool, _to_1d_bool), npt.NDArray[np.bool]) +assert_type(np.tensordot(_to_1d_int, _to_1d_int), npt.NDArray[np.int_ | Any]) +assert_type(np.tensordot(_to_1d_float, _to_1d_float), npt.NDArray[np.float64 | Any]) +assert_type(np.tensordot(_to_1d_complex, _to_1d_complex), npt.NDArray[np.complex128 | Any]) + +# cross +assert_type(np.cross(AR_i8, AR_i8), npt.NDArray[np.int64]) +assert_type(np.cross(AR_u8, AR_u8), npt.NDArray[np.uint64]) +assert_type(np.cross(AR_i8, AR_i8), npt.NDArray[np.int64]) +assert_type(np.cross(AR_f8, AR_f8), npt.NDArray[np.float64]) +assert_type(np.cross(AR_f8, AR_i8), npt.NDArray[np.float64 | Any]) +assert_type(np.cross(AR_c16, AR_c16), npt.NDArray[np.complex128]) +assert_type(np.cross(AR_c16, AR_f8), npt.NDArray[np.complex128 | Any]) +assert_type(np.cross(AR_m, AR_m), npt.NDArray[np.timedelta64]) +assert_type(np.cross(AR_O, AR_O), npt.NDArray[np.object_]) +assert_type(np.cross(_to_1d_int, _to_1d_int), npt.NDArray[np.int_ | Any]) +assert_type(np.cross(_to_1d_float, _to_1d_float), npt.NDArray[np.float64 | Any]) +assert_type(np.cross(_to_1d_complex, _to_1d_complex), npt.NDArray[np.complex128 | Any]) + +assert_type(np.isscalar(i8), bool) +assert_type(np.isscalar(AR_i8), bool) +assert_type(np.isscalar(_to_1d_int), bool) + +assert_type(np.roll(AR_i8, 1), npt.NDArray[np.int64]) +assert_type(np.roll(AR_i8, (1, 2)), npt.NDArray[np.int64]) +assert_type(np.roll(_to_1d_int, 1), npt.NDArray[Any]) + +assert_type(np.rollaxis(AR_i8, 0, 1), npt.NDArray[np.int64]) + +assert_type(np.moveaxis(AR_i8, 0, 1), npt.NDArray[np.int64]) +assert_type(np.moveaxis(AR_i8, (0, 1), (1, 2)), npt.NDArray[np.int64]) + +assert_type(np.indices([0, 1, 2]), npt.NDArray[np.int_]) +assert_type(np.indices([0, 1, 2], sparse=True), tuple[npt.NDArray[np.int_], ...]) +assert_type(np.indices([0, 1, 2], dtype=np.float64), npt.NDArray[np.float64]) +assert_type(np.indices([0, 1, 2], sparse=True, dtype=np.float64), tuple[npt.NDArray[np.float64], ...]) +assert_type(np.indices([0, 1, 2], dtype=float), npt.NDArray[Any]) +assert_type(np.indices([0, 1, 2], sparse=True, dtype=float), tuple[npt.NDArray[Any], ...]) + +assert_type(np.binary_repr(1), str) + +assert_type(np.base_repr(1), str) + +assert_type(np.allclose(i8, AR_i8), bool) +assert_type(np.allclose(_to_1d_int, AR_i8), bool) +assert_type(np.allclose(AR_i8, AR_i8), bool) + +assert_type(np.isclose(i8, i8), np.bool) +assert_type(np.isclose(i8, AR_i8), npt.NDArray[np.bool]) +assert_type(np.isclose(_to_1d_int, _to_1d_int), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(np.isclose(AR_i8, AR_i8), npt.NDArray[np.bool]) + +assert_type(np.array_equal(i8, AR_i8), bool) +assert_type(np.array_equal(_to_1d_int, AR_i8), bool) +assert_type(np.array_equal(AR_i8, AR_i8), bool) + +assert_type(np.array_equiv(i8, AR_i8), bool) +assert_type(np.array_equiv(_to_1d_int, AR_i8), bool) +assert_type(np.array_equiv(AR_i8, AR_i8), bool) diff --git a/local_packages/numpy/typing/tests/data/reveal/numerictypes.pyi b/local_packages/numpy/typing/tests/data/reveal/numerictypes.pyi new file mode 100644 index 0000000..75d108c --- /dev/null +++ b/local_packages/numpy/typing/tests/data/reveal/numerictypes.pyi @@ -0,0 +1,16 @@ +from typing import Literal, assert_type + +import numpy as np + +assert_type(np.ScalarType[0], type[int]) +assert_type(np.ScalarType[3], type[bool]) +assert_type(np.ScalarType[8], type[np.complex64]) +assert_type(np.ScalarType[9], type[np.complex128]) +assert_type(np.ScalarType[-1], type[np.void]) +assert_type(np.bool_(object()), np.bool) + +assert_type(np.typecodes["Character"], Literal["c"]) +assert_type(np.typecodes["Complex"], Literal["FDG"]) +assert_type(np.typecodes["All"], Literal["?bhilqnpBHILQNPefdgFDGSUVOMm"]) + +assert_type(np.sctypeDict["uint8"], type[np.generic]) diff --git a/local_packages/numpy/typing/tests/data/reveal/polynomial_polybase.pyi b/local_packages/numpy/typing/tests/data/reveal/polynomial_polybase.pyi new file mode 100644 index 0000000..4c4899a --- /dev/null +++ b/local_packages/numpy/typing/tests/data/reveal/polynomial_polybase.pyi @@ -0,0 +1,217 @@ +from collections.abc import Sequence +from decimal import Decimal +from typing import Any, Literal as L, TypeAlias, TypeVar, assert_type + +import numpy as np +import numpy.polynomial as npp +import numpy.typing as npt + +_Ar_x: TypeAlias = npt.NDArray[np.inexact | np.object_] +_Ar_f: TypeAlias = npt.NDArray[np.floating] +_Ar_c: TypeAlias = npt.NDArray[np.complexfloating] +_Ar_O: TypeAlias = npt.NDArray[np.object_] + +_Ar_x_n: TypeAlias = np.ndarray[tuple[int], np.dtype[np.inexact | np.object_]] +_Ar_f_n: TypeAlias = np.ndarray[tuple[int], np.dtype[np.floating]] +_Ar_c_n: TypeAlias = np.ndarray[tuple[int], np.dtype[np.complexfloating]] +_Ar_O_n: TypeAlias = np.ndarray[tuple[int], np.dtype[np.object_]] + +_Ar_x_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.float64 | Any]] +_Ar_f_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.floating]] +_Ar_c_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.complexfloating]] +_Ar_O_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.object_]] + +_ScalarT = TypeVar("_ScalarT", bound=np.generic) +_Ar_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] + +_BasisName: TypeAlias = L["X"] + +SC_i: np.int_ +SC_i_co: int | np.int_ +SC_f: np.float64 +SC_f_co: float | np.float64 | np.int_ +SC_c: np.complex128 +SC_c_co: complex | np.complex128 +SC_O: Decimal + +AR_i: npt.NDArray[np.int_] +AR_f: npt.NDArray[np.float64] +AR_f_co: npt.NDArray[np.float64] | npt.NDArray[np.int_] +AR_c: npt.NDArray[np.complex128] +AR_c_co: npt.NDArray[np.complex128] | npt.NDArray[np.float64] | npt.NDArray[np.int_] +AR_O: npt.NDArray[np.object_] +AR_O_co: npt.NDArray[np.object_ | np.number] + +SQ_i: Sequence[int] +SQ_f: Sequence[float] +SQ_c: Sequence[complex] +SQ_O: Sequence[Decimal] + +PS_poly: npp.Polynomial +PS_cheb: npp.Chebyshev +PS_herm: npp.Hermite +PS_herme: npp.HermiteE +PS_lag: npp.Laguerre +PS_leg: npp.Legendre +PS_all: ( + npp.Polynomial + | npp.Chebyshev + | npp.Hermite + | npp.HermiteE + | npp.Laguerre + | npp.Legendre +) + +# static- and classmethods + +assert_type(type(PS_poly).basis_name, None) +assert_type(type(PS_cheb).basis_name, L["T"]) +assert_type(type(PS_herm).basis_name, L["H"]) +assert_type(type(PS_herme).basis_name, L["He"]) +assert_type(type(PS_lag).basis_name, L["L"]) +assert_type(type(PS_leg).basis_name, L["P"]) + +assert_type(type(PS_all).__hash__, None) +assert_type(type(PS_all).__array_ufunc__, None) +assert_type(type(PS_all).maxpower, L[100]) + +assert_type(type(PS_poly).fromroots(SC_i), npp.Polynomial) +assert_type(type(PS_poly).fromroots(SQ_i), npp.Polynomial) +assert_type(type(PS_poly).fromroots(AR_i), npp.Polynomial) +assert_type(type(PS_cheb).fromroots(SC_f), npp.Chebyshev) +assert_type(type(PS_cheb).fromroots(SQ_f), npp.Chebyshev) +assert_type(type(PS_cheb).fromroots(AR_f_co), npp.Chebyshev) +assert_type(type(PS_herm).fromroots(SC_c), npp.Hermite) +assert_type(type(PS_herm).fromroots(SQ_c), npp.Hermite) +assert_type(type(PS_herm).fromroots(AR_c_co), npp.Hermite) +assert_type(type(PS_leg).fromroots(SC_O), npp.Legendre) +assert_type(type(PS_leg).fromroots(SQ_O), npp.Legendre) +assert_type(type(PS_leg).fromroots(AR_O_co), npp.Legendre) + +assert_type(type(PS_poly).identity(), npp.Polynomial) +assert_type(type(PS_cheb).identity(symbol="z"), npp.Chebyshev) + +assert_type(type(PS_lag).basis(SC_i), npp.Laguerre) +assert_type(type(PS_leg).basis(32, symbol="u"), npp.Legendre) + +assert_type(type(PS_herm).cast(PS_poly), npp.Hermite) +assert_type(type(PS_herme).cast(PS_leg), npp.HermiteE) + +# attributes / properties + +assert_type(PS_all.coef, _Ar_x_n) +assert_type(PS_all.domain, _Ar_x_2) +assert_type(PS_all.window, _Ar_x_2) +assert_type(PS_all.symbol, str) + +# instance methods + +assert_type(PS_all.has_samecoef(PS_all), bool) +assert_type(PS_all.has_samedomain(PS_all), bool) +assert_type(PS_all.has_samewindow(PS_all), bool) +assert_type(PS_all.has_sametype(PS_all), bool) +assert_type(PS_poly.has_sametype(PS_poly), bool) +assert_type(PS_poly.has_sametype(PS_leg), bool) +assert_type(PS_poly.has_sametype(NotADirectoryError), bool) + +assert_type(PS_poly.copy(), npp.Polynomial) +assert_type(PS_cheb.copy(), npp.Chebyshev) +assert_type(PS_herm.copy(), npp.Hermite) +assert_type(PS_herme.copy(), npp.HermiteE) +assert_type(PS_lag.copy(), npp.Laguerre) +assert_type(PS_leg.copy(), npp.Legendre) + +assert_type(PS_leg.cutdeg(3), npp.Legendre) +assert_type(PS_leg.trim(), npp.Legendre) +assert_type(PS_leg.trim(tol=SC_f_co), npp.Legendre) +assert_type(PS_leg.truncate(SC_i_co), npp.Legendre) + +assert_type(PS_all.convert(None, npp.Chebyshev), npp.Chebyshev) +assert_type(PS_all.convert((0, 1), npp.Laguerre), npp.Laguerre) +assert_type(PS_all.convert([0, 1], npp.Hermite, [-1, 1]), npp.Hermite) + +assert_type(PS_all.degree(), int) +assert_type(PS_all.mapparms(), tuple[Any, Any]) + +assert_type(PS_poly.integ(), npp.Polynomial) +assert_type(PS_herme.integ(SC_i_co), npp.HermiteE) +assert_type(PS_lag.integ(SC_i_co, SC_f_co), npp.Laguerre) +assert_type(PS_poly.deriv(), npp.Polynomial) +assert_type(PS_herm.deriv(SC_i_co), npp.Hermite) + +assert_type(PS_poly.roots(), _Ar_x_n) + +assert_type( + PS_poly.linspace(), + tuple[_Ar_1d[np.float64 | np.complex128], _Ar_1d[np.float64 | np.complex128]], +) + +assert_type( + PS_poly.linspace(9), + tuple[_Ar_1d[np.float64 | np.complex128], _Ar_1d[np.float64 | np.complex128]], +) + +assert_type(PS_cheb.fit(AR_c_co, AR_c_co, SC_i_co), npp.Chebyshev) +assert_type(PS_leg.fit(AR_c_co, AR_c_co, AR_i), npp.Legendre) +assert_type(PS_herm.fit(AR_c_co, AR_c_co, SQ_i), npp.Hermite) +assert_type(PS_poly.fit(AR_c_co, SQ_c, SQ_i), npp.Polynomial) +assert_type(PS_lag.fit(SQ_c, SQ_c, SQ_i, full=False), npp.Laguerre) +assert_type( + PS_herme.fit(SQ_c, AR_c_co, SC_i_co, full=True), + tuple[npp.HermiteE, Sequence[np.inexact | np.int32]], +) + +# custom operations + +assert_type(PS_all.__hash__, None) +assert_type(PS_all.__array_ufunc__, None) + +assert_type(str(PS_all), str) +assert_type(repr(PS_all), str) +assert_type(format(PS_all), str) + +assert_type(len(PS_all), int) +assert_type(next(iter(PS_all)), np.float64 | Any) + +assert_type(PS_all(SC_f_co), np.float64 | Any) +assert_type(PS_all(SC_c_co), np.complex128 | Any) +assert_type(PS_all(Decimal()), np.float64 | Any) +assert_type(PS_poly(SQ_f), npt.NDArray[np.float64 | Any]) +assert_type(PS_poly(SQ_c), npt.NDArray[np.complex128 | Any]) +assert_type(PS_poly(SQ_O), npt.NDArray[np.object_]) +assert_type(PS_poly(AR_f), npt.NDArray[np.float64 | Any]) +assert_type(PS_poly(AR_c), npt.NDArray[np.complex128 | Any]) +assert_type(PS_poly(AR_O), npt.NDArray[np.object_]) +assert_type(PS_all(PS_poly), npp.Polynomial) + +assert_type(PS_poly == PS_poly, bool) +assert_type(PS_poly != PS_poly, bool) + +assert_type(-PS_poly, npp.Polynomial) +assert_type(+PS_poly, npp.Polynomial) + +assert_type(PS_poly + 5, npp.Polynomial) +assert_type(PS_poly - 5, npp.Polynomial) +assert_type(PS_poly * 5, npp.Polynomial) +assert_type(PS_poly / 5, npp.Polynomial) +assert_type(PS_poly // 5, npp.Polynomial) +assert_type(PS_poly % 5, npp.Polynomial) + +assert_type(PS_poly + PS_leg, npp.Polynomial) +assert_type(PS_poly - PS_leg, npp.Polynomial) +assert_type(PS_poly * PS_leg, npp.Polynomial) +assert_type(PS_poly / PS_leg, npp.Polynomial) +assert_type(PS_poly // PS_leg, npp.Polynomial) +assert_type(PS_poly % PS_leg, npp.Polynomial) + +assert_type(5 + PS_poly, npp.Polynomial) +assert_type(5 - PS_poly, npp.Polynomial) +assert_type(5 * PS_poly, npp.Polynomial) +assert_type(5 / PS_poly, npp.Polynomial) +assert_type(5 // PS_poly, npp.Polynomial) +assert_type(5 % PS_poly, npp.Polynomial) +assert_type(divmod(PS_poly, 5), tuple[npp.Polynomial, npp.Polynomial]) +assert_type(divmod(5, PS_poly), tuple[npp.Polynomial, npp.Polynomial]) + +assert_type(PS_poly**1, npp.Polynomial) +assert_type(PS_poly**1.0, npp.Polynomial) diff --git a/local_packages/numpy/typing/tests/data/reveal/polynomial_polyutils.pyi b/local_packages/numpy/typing/tests/data/reveal/polynomial_polyutils.pyi new file mode 100644 index 0000000..07d6c9d --- /dev/null +++ b/local_packages/numpy/typing/tests/data/reveal/polynomial_polyutils.pyi @@ -0,0 +1,218 @@ +from collections.abc import Sequence +from decimal import Decimal +from fractions import Fraction +from typing import Literal as L, TypeAlias, assert_type + +import numpy as np +import numpy.polynomial.polyutils as pu +import numpy.typing as npt +from numpy.polynomial._polytypes import _Tuple2 + +_ArrFloat1D: TypeAlias = np.ndarray[tuple[int], np.dtype[np.floating]] +_ArrComplex1D: TypeAlias = np.ndarray[tuple[int], np.dtype[np.complexfloating]] +_ArrObject1D: TypeAlias = np.ndarray[tuple[int], np.dtype[np.object_]] + +_ArrFloat1D_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.float64]] +_ArrComplex1D_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.complex128]] +_ArrObject1D_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.object_]] + +num_int: int +num_float: float +num_complex: complex +# will result in an `object_` dtype +num_object: Decimal | Fraction + +sct_int: np.int_ +sct_float: np.float64 +sct_complex: np.complex128 +sct_object: np.object_ # doesn't exist at runtime + +arr_int: npt.NDArray[np.int_] +arr_float: npt.NDArray[np.float64] +arr_complex: npt.NDArray[np.complex128] +arr_object: npt.NDArray[np.object_] + +seq_num_int: Sequence[int] +seq_num_float: Sequence[float] +seq_num_complex: Sequence[complex] +seq_num_object: Sequence[Decimal | Fraction] + +seq_sct_int: Sequence[np.int_] +seq_sct_float: Sequence[np.float64] +seq_sct_complex: Sequence[np.complex128] +seq_sct_object: Sequence[np.object_] + +seq_arr_int: Sequence[npt.NDArray[np.int_]] +seq_arr_float: Sequence[npt.NDArray[np.float64]] +seq_arr_complex: Sequence[npt.NDArray[np.complex128]] +seq_arr_object: Sequence[npt.NDArray[np.object_]] + +seq_seq_num_int: Sequence[Sequence[int]] +seq_seq_num_float: Sequence[Sequence[float]] +seq_seq_num_complex: Sequence[Sequence[complex]] +seq_seq_num_object: Sequence[Sequence[Decimal | Fraction]] + +seq_seq_sct_int: Sequence[Sequence[np.int_]] +seq_seq_sct_float: Sequence[Sequence[np.float64]] +seq_seq_sct_complex: Sequence[Sequence[np.complex128]] +seq_seq_sct_object: Sequence[Sequence[np.object_]] # doesn't exist at runtime + +# as_series + +assert_type(pu.as_series(arr_int), list[_ArrFloat1D]) +assert_type(pu.as_series(arr_float), list[_ArrFloat1D]) +assert_type(pu.as_series(arr_complex), list[_ArrComplex1D]) +assert_type(pu.as_series(arr_object), list[_ArrObject1D]) + +assert_type(pu.as_series(seq_num_int), list[_ArrFloat1D]) +assert_type(pu.as_series(seq_num_float), list[_ArrFloat1D]) +assert_type(pu.as_series(seq_num_complex), list[_ArrComplex1D]) +assert_type(pu.as_series(seq_num_object), list[_ArrObject1D]) + +assert_type(pu.as_series(seq_sct_int), list[_ArrFloat1D]) +assert_type(pu.as_series(seq_sct_float), list[_ArrFloat1D]) +assert_type(pu.as_series(seq_sct_complex), list[_ArrComplex1D]) +assert_type(pu.as_series(seq_sct_object), list[_ArrObject1D]) + +assert_type(pu.as_series(seq_arr_int), list[_ArrFloat1D]) +assert_type(pu.as_series(seq_arr_float), list[_ArrFloat1D]) +assert_type(pu.as_series(seq_arr_complex), list[_ArrComplex1D]) +assert_type(pu.as_series(seq_arr_object), list[_ArrObject1D]) + +assert_type(pu.as_series(seq_seq_num_int), list[_ArrFloat1D]) +assert_type(pu.as_series(seq_seq_num_float), list[_ArrFloat1D]) +assert_type(pu.as_series(seq_seq_num_complex), list[_ArrComplex1D]) +assert_type(pu.as_series(seq_seq_num_object), list[_ArrObject1D]) + +assert_type(pu.as_series(seq_seq_sct_int), list[_ArrFloat1D]) +assert_type(pu.as_series(seq_seq_sct_float), list[_ArrFloat1D]) +assert_type(pu.as_series(seq_seq_sct_complex), list[_ArrComplex1D]) +assert_type(pu.as_series(seq_seq_sct_object), list[_ArrObject1D]) + +# trimcoef + +assert_type(pu.trimcoef(num_int), _ArrFloat1D) +assert_type(pu.trimcoef(num_float), _ArrFloat1D) +assert_type(pu.trimcoef(num_complex), _ArrComplex1D) +assert_type(pu.trimcoef(num_object), _ArrObject1D) +assert_type(pu.trimcoef(num_object), _ArrObject1D) + +assert_type(pu.trimcoef(sct_int), _ArrFloat1D) +assert_type(pu.trimcoef(sct_float), _ArrFloat1D) +assert_type(pu.trimcoef(sct_complex), _ArrComplex1D) +assert_type(pu.trimcoef(sct_object), _ArrObject1D) + +assert_type(pu.trimcoef(arr_int), _ArrFloat1D) +assert_type(pu.trimcoef(arr_float), _ArrFloat1D) +assert_type(pu.trimcoef(arr_complex), _ArrComplex1D) +assert_type(pu.trimcoef(arr_object), _ArrObject1D) + +assert_type(pu.trimcoef(seq_num_int), _ArrFloat1D) +assert_type(pu.trimcoef(seq_num_float), _ArrFloat1D) +assert_type(pu.trimcoef(seq_num_complex), _ArrComplex1D) +assert_type(pu.trimcoef(seq_num_object), _ArrObject1D) + +assert_type(pu.trimcoef(seq_sct_int), _ArrFloat1D) +assert_type(pu.trimcoef(seq_sct_float), _ArrFloat1D) +assert_type(pu.trimcoef(seq_sct_complex), _ArrComplex1D) +assert_type(pu.trimcoef(seq_sct_object), _ArrObject1D) + +# getdomain + +assert_type(pu.getdomain(num_int), _ArrFloat1D_2) +assert_type(pu.getdomain(num_float), _ArrFloat1D_2) +assert_type(pu.getdomain(num_complex), _ArrComplex1D_2) +assert_type(pu.getdomain(num_object), _ArrObject1D_2) +assert_type(pu.getdomain(num_object), _ArrObject1D_2) + +assert_type(pu.getdomain(sct_int), _ArrFloat1D_2) +assert_type(pu.getdomain(sct_float), _ArrFloat1D_2) +assert_type(pu.getdomain(sct_complex), _ArrComplex1D_2) +assert_type(pu.getdomain(sct_object), _ArrObject1D_2) + +assert_type(pu.getdomain(arr_int), _ArrFloat1D_2) +assert_type(pu.getdomain(arr_float), _ArrFloat1D_2) +assert_type(pu.getdomain(arr_complex), _ArrComplex1D_2) +assert_type(pu.getdomain(arr_object), _ArrObject1D_2) + +assert_type(pu.getdomain(seq_num_int), _ArrFloat1D_2) +assert_type(pu.getdomain(seq_num_float), _ArrFloat1D_2) +assert_type(pu.getdomain(seq_num_complex), _ArrComplex1D_2) +assert_type(pu.getdomain(seq_num_object), _ArrObject1D_2) + +assert_type(pu.getdomain(seq_sct_int), _ArrFloat1D_2) +assert_type(pu.getdomain(seq_sct_float), _ArrFloat1D_2) +assert_type(pu.getdomain(seq_sct_complex), _ArrComplex1D_2) +assert_type(pu.getdomain(seq_sct_object), _ArrObject1D_2) + +# mapparms + +assert_type(pu.mapparms(seq_num_int, seq_num_int), _Tuple2[float]) +assert_type(pu.mapparms(seq_num_int, seq_num_float), _Tuple2[float]) +assert_type(pu.mapparms(seq_num_float, seq_num_float), _Tuple2[float]) +assert_type(pu.mapparms(seq_num_float, seq_num_complex), _Tuple2[complex]) +assert_type(pu.mapparms(seq_num_complex, seq_num_complex), _Tuple2[complex]) +assert_type(pu.mapparms(seq_num_complex, seq_num_object), _Tuple2[object]) +assert_type(pu.mapparms(seq_num_object, seq_num_object), _Tuple2[object]) + +assert_type(pu.mapparms(seq_sct_int, seq_sct_int), _Tuple2[np.floating]) +assert_type(pu.mapparms(seq_sct_int, seq_sct_float), _Tuple2[np.floating]) +assert_type(pu.mapparms(seq_sct_float, seq_sct_float), _Tuple2[float]) +assert_type(pu.mapparms(seq_sct_float, seq_sct_complex), _Tuple2[complex]) +assert_type(pu.mapparms(seq_sct_complex, seq_sct_complex), _Tuple2[complex]) +assert_type(pu.mapparms(seq_sct_complex, seq_sct_object), _Tuple2[object]) +assert_type(pu.mapparms(seq_sct_object, seq_sct_object), _Tuple2[object]) + +assert_type(pu.mapparms(arr_int, arr_int), _Tuple2[np.floating]) +assert_type(pu.mapparms(arr_int, arr_float), _Tuple2[np.floating]) +assert_type(pu.mapparms(arr_float, arr_float), _Tuple2[np.floating]) +assert_type(pu.mapparms(arr_float, arr_complex), _Tuple2[np.complexfloating]) +assert_type(pu.mapparms(arr_complex, arr_complex), _Tuple2[np.complexfloating]) +assert_type(pu.mapparms(arr_complex, arr_object), _Tuple2[object]) +assert_type(pu.mapparms(arr_object, arr_object), _Tuple2[object]) + +# mapdomain + +assert_type(pu.mapdomain(num_int, seq_num_int, seq_num_int), np.floating) +assert_type(pu.mapdomain(num_int, seq_num_int, seq_num_float), np.floating) +assert_type(pu.mapdomain(num_int, seq_num_float, seq_num_float), np.floating) +assert_type(pu.mapdomain(num_float, seq_num_float, seq_num_float), np.floating) +assert_type(pu.mapdomain(num_float, seq_num_float, seq_num_complex), np.complexfloating) +assert_type(pu.mapdomain(num_float, seq_num_complex, seq_num_complex), np.complexfloating) +assert_type(pu.mapdomain(num_complex, seq_num_complex, seq_num_complex), np.complexfloating) +assert_type(pu.mapdomain(num_complex, seq_num_complex, seq_num_object), object) +assert_type(pu.mapdomain(num_complex, seq_num_object, seq_num_object), object) +assert_type(pu.mapdomain(num_object, seq_num_object, seq_num_object), object) + +assert_type(pu.mapdomain(seq_num_int, seq_num_int, seq_num_int), _ArrFloat1D) +assert_type(pu.mapdomain(seq_num_int, seq_num_int, seq_num_float), _ArrFloat1D) +assert_type(pu.mapdomain(seq_num_int, seq_num_float, seq_num_float), _ArrFloat1D) +assert_type(pu.mapdomain(seq_num_float, seq_num_float, seq_num_float), _ArrFloat1D) +assert_type(pu.mapdomain(seq_num_float, seq_num_float, seq_num_complex), _ArrComplex1D) +assert_type(pu.mapdomain(seq_num_float, seq_num_complex, seq_num_complex), _ArrComplex1D) +assert_type(pu.mapdomain(seq_num_complex, seq_num_complex, seq_num_complex), _ArrComplex1D) +assert_type(pu.mapdomain(seq_num_complex, seq_num_complex, seq_num_object), _ArrObject1D) +assert_type(pu.mapdomain(seq_num_complex, seq_num_object, seq_num_object), _ArrObject1D) +assert_type(pu.mapdomain(seq_num_object, seq_num_object, seq_num_object), _ArrObject1D) + +assert_type(pu.mapdomain(seq_sct_int, seq_sct_int, seq_sct_int), _ArrFloat1D) +assert_type(pu.mapdomain(seq_sct_int, seq_sct_int, seq_sct_float), _ArrFloat1D) +assert_type(pu.mapdomain(seq_sct_int, seq_sct_float, seq_sct_float), _ArrFloat1D) +assert_type(pu.mapdomain(seq_sct_float, seq_sct_float, seq_sct_float), _ArrFloat1D) +assert_type(pu.mapdomain(seq_sct_float, seq_sct_float, seq_sct_complex), _ArrComplex1D) +assert_type(pu.mapdomain(seq_sct_float, seq_sct_complex, seq_sct_complex), _ArrComplex1D) +assert_type(pu.mapdomain(seq_sct_complex, seq_sct_complex, seq_sct_complex), _ArrComplex1D) +assert_type(pu.mapdomain(seq_sct_complex, seq_sct_complex, seq_sct_object), _ArrObject1D) +assert_type(pu.mapdomain(seq_sct_complex, seq_sct_object, seq_sct_object), _ArrObject1D) +assert_type(pu.mapdomain(seq_sct_object, seq_sct_object, seq_sct_object), _ArrObject1D) + +assert_type(pu.mapdomain(arr_int, arr_int, arr_int), _ArrFloat1D) +assert_type(pu.mapdomain(arr_int, arr_int, arr_float), _ArrFloat1D) +assert_type(pu.mapdomain(arr_int, arr_float, arr_float), _ArrFloat1D) +assert_type(pu.mapdomain(arr_float, arr_float, arr_float), _ArrFloat1D) +assert_type(pu.mapdomain(arr_float, arr_float, arr_complex), _ArrComplex1D) +assert_type(pu.mapdomain(arr_float, arr_complex, arr_complex), _ArrComplex1D) +assert_type(pu.mapdomain(arr_complex, arr_complex, arr_complex), _ArrComplex1D) +assert_type(pu.mapdomain(arr_complex, arr_complex, arr_object), _ArrObject1D) +assert_type(pu.mapdomain(arr_complex, arr_object, arr_object), _ArrObject1D) +assert_type(pu.mapdomain(arr_object, arr_object, arr_object), _ArrObject1D) diff --git a/local_packages/numpy/typing/tests/data/reveal/polynomial_series.pyi b/local_packages/numpy/typing/tests/data/reveal/polynomial_series.pyi new file mode 100644 index 0000000..3188ad9 --- /dev/null +++ b/local_packages/numpy/typing/tests/data/reveal/polynomial_series.pyi @@ -0,0 +1,138 @@ +from collections.abc import Sequence +from typing import Any, TypeAlias, assert_type + +import numpy as np +import numpy.polynomial as npp +import numpy.typing as npt + +_ArrFloat1D: TypeAlias = np.ndarray[tuple[int], np.dtype[np.floating]] +_ArrFloat1D64: TypeAlias = np.ndarray[tuple[int], np.dtype[np.float64]] +_ArrComplex1D: TypeAlias = np.ndarray[tuple[int], np.dtype[np.complexfloating]] +_ArrComplex1D128: TypeAlias = np.ndarray[tuple[int], np.dtype[np.complex128]] +_ArrObject1D: TypeAlias = np.ndarray[tuple[int], np.dtype[np.object_]] + +AR_b: npt.NDArray[np.bool] +AR_u4: npt.NDArray[np.uint32] +AR_i8: npt.NDArray[np.int64] +AR_f8: npt.NDArray[np.float64] +AR_c16: npt.NDArray[np.complex128] +AR_O: npt.NDArray[np.object_] + +PS_poly: npp.Polynomial +PS_cheb: npp.Chebyshev + +assert_type(npp.polynomial.polyroots(AR_f8), _ArrFloat1D64) +assert_type(npp.polynomial.polyroots(AR_c16), _ArrComplex1D128) +assert_type(npp.polynomial.polyroots(AR_O), _ArrObject1D) + +assert_type(npp.polynomial.polyfromroots(AR_f8), _ArrFloat1D) +assert_type(npp.polynomial.polyfromroots(AR_c16), _ArrComplex1D) +assert_type(npp.polynomial.polyfromroots(AR_O), _ArrObject1D) + +# assert_type(npp.polynomial.polyadd(AR_b, AR_b), NoReturn) +assert_type(npp.polynomial.polyadd(AR_u4, AR_b), _ArrFloat1D) +assert_type(npp.polynomial.polyadd(AR_i8, AR_i8), _ArrFloat1D) +assert_type(npp.polynomial.polyadd(AR_f8, AR_i8), _ArrFloat1D) +assert_type(npp.polynomial.polyadd(AR_i8, AR_c16), _ArrComplex1D) +assert_type(npp.polynomial.polyadd(AR_O, AR_O), _ArrObject1D) + +assert_type(npp.polynomial.polymulx(AR_u4), _ArrFloat1D) +assert_type(npp.polynomial.polymulx(AR_i8), _ArrFloat1D) +assert_type(npp.polynomial.polymulx(AR_f8), _ArrFloat1D) +assert_type(npp.polynomial.polymulx(AR_c16), _ArrComplex1D) +assert_type(npp.polynomial.polymulx(AR_O), _ArrObject1D) + +assert_type(npp.polynomial.polypow(AR_u4, 2), _ArrFloat1D) +assert_type(npp.polynomial.polypow(AR_i8, 2), _ArrFloat1D) +assert_type(npp.polynomial.polypow(AR_f8, 2), _ArrFloat1D) +assert_type(npp.polynomial.polypow(AR_c16, 2), _ArrComplex1D) +assert_type(npp.polynomial.polypow(AR_O, 2), _ArrObject1D) + +# assert_type(npp.polynomial.polyder(PS_poly), npt.NDArray[np.object_]) +assert_type(npp.polynomial.polyder(AR_f8), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyder(AR_c16), npt.NDArray[np.complexfloating]) +assert_type(npp.polynomial.polyder(AR_O, m=2), npt.NDArray[np.object_]) + +# assert_type(npp.polynomial.polyint(PS_poly), npt.NDArray[np.object_]) +assert_type(npp.polynomial.polyint(AR_f8), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyint(AR_f8, k=AR_c16), npt.NDArray[np.complexfloating]) +assert_type(npp.polynomial.polyint(AR_O, m=2), npt.NDArray[np.object_]) + +assert_type(npp.polynomial.polyval(AR_b, AR_b), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyval(AR_u4, AR_b), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyval(AR_i8, AR_i8), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyval(AR_f8, AR_i8), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyval(AR_i8, AR_c16), npt.NDArray[np.complexfloating]) +assert_type(npp.polynomial.polyval(AR_O, AR_O), npt.NDArray[np.object_]) + +assert_type(npp.polynomial.polyval2d(AR_b, AR_b, AR_b), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyval2d(AR_u4, AR_u4, AR_b), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyval2d(AR_i8, AR_i8, AR_i8), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyval2d(AR_f8, AR_f8, AR_i8), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyval2d(AR_i8, AR_i8, AR_c16), npt.NDArray[np.complexfloating]) +assert_type(npp.polynomial.polyval2d(AR_O, AR_O, AR_O), npt.NDArray[np.object_]) + +assert_type(npp.polynomial.polyval3d(AR_b, AR_b, AR_b, AR_b), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyval3d(AR_u4, AR_u4, AR_u4, AR_b), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyval3d(AR_i8, AR_i8, AR_i8, AR_i8), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyval3d(AR_f8, AR_f8, AR_f8, AR_i8), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyval3d(AR_i8, AR_i8, AR_i8, AR_c16), npt.NDArray[np.complexfloating]) +assert_type(npp.polynomial.polyval3d(AR_O, AR_O, AR_O, AR_O), npt.NDArray[np.object_]) + +assert_type(npp.polynomial.polyvalfromroots(AR_b, AR_b), npt.NDArray[np.float64 | Any]) +assert_type(npp.polynomial.polyvalfromroots(AR_u4, AR_b), npt.NDArray[np.float64 | Any]) +assert_type(npp.polynomial.polyvalfromroots(AR_i8, AR_i8), npt.NDArray[np.float64 | Any]) +assert_type(npp.polynomial.polyvalfromroots(AR_f8, AR_i8), npt.NDArray[np.float64 | Any]) +assert_type(npp.polynomial.polyvalfromroots(AR_i8, AR_c16), npt.NDArray[np.complex128 | Any]) +assert_type(npp.polynomial.polyvalfromroots(AR_O, AR_O), npt.NDArray[np.object_ | Any]) + +assert_type(npp.polynomial.polyvander(AR_f8, 3), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyvander(AR_c16, 3), npt.NDArray[np.complexfloating]) +assert_type(npp.polynomial.polyvander(AR_O, 3), npt.NDArray[np.object_]) + +assert_type(npp.polynomial.polyvander2d(AR_f8, AR_f8, [4, 2]), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyvander2d(AR_c16, AR_c16, [4, 2]), npt.NDArray[np.complexfloating]) +assert_type(npp.polynomial.polyvander2d(AR_O, AR_O, [4, 2]), npt.NDArray[np.object_]) + +assert_type(npp.polynomial.polyvander3d(AR_f8, AR_f8, AR_f8, [4, 3, 2]), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyvander3d(AR_c16, AR_c16, AR_c16, [4, 3, 2]), npt.NDArray[np.complexfloating]) +assert_type(npp.polynomial.polyvander3d(AR_O, AR_O, AR_O, [4, 3, 2]), npt.NDArray[np.object_]) + +assert_type( + npp.polynomial.polyfit(AR_f8, AR_f8, 2), + npt.NDArray[np.floating], +) +assert_type( + npp.polynomial.polyfit(AR_f8, AR_i8, 1, full=True), + tuple[npt.NDArray[np.floating], Sequence[np.inexact | np.int32]], +) +assert_type( + npp.polynomial.polyfit(AR_c16, AR_f8, 2), + npt.NDArray[np.complexfloating], +) +assert_type( + npp.polynomial.polyfit(AR_f8, AR_c16, 1, full=True)[0], + npt.NDArray[np.complexfloating], +) + +assert_type(npp.chebyshev.chebgauss(2), tuple[_ArrFloat1D64, _ArrFloat1D64]) + +assert_type(npp.chebyshev.chebweight(AR_f8), npt.NDArray[np.float64]) +assert_type(npp.chebyshev.chebweight(AR_c16), npt.NDArray[np.complex128]) +assert_type(npp.chebyshev.chebweight(AR_O), npt.NDArray[np.object_]) + +assert_type(npp.chebyshev.poly2cheb(AR_f8), _ArrFloat1D) +assert_type(npp.chebyshev.poly2cheb(AR_c16), _ArrComplex1D) +assert_type(npp.chebyshev.poly2cheb(AR_O), _ArrObject1D) + +assert_type(npp.chebyshev.cheb2poly(AR_f8), _ArrFloat1D) +assert_type(npp.chebyshev.cheb2poly(AR_c16), _ArrComplex1D) +assert_type(npp.chebyshev.cheb2poly(AR_O), _ArrObject1D) + +assert_type(npp.chebyshev.chebpts1(6), _ArrFloat1D64) +assert_type(npp.chebyshev.chebpts2(6), _ArrFloat1D64) + +assert_type( + npp.chebyshev.chebinterpolate(np.tanh, 3), + npt.NDArray[np.float64 | np.complex128 | np.object_], +) diff --git a/local_packages/numpy/typing/tests/data/reveal/random.pyi b/local_packages/numpy/typing/tests/data/reveal/random.pyi new file mode 100644 index 0000000..e188eb0 --- /dev/null +++ b/local_packages/numpy/typing/tests/data/reveal/random.pyi @@ -0,0 +1,1546 @@ +import threading +from collections.abc import Sequence +from typing import Any, assert_type + +import numpy as np +import numpy.typing as npt +from numpy.random._generator import Generator +from numpy.random._mt19937 import MT19937 +from numpy.random._pcg64 import PCG64 +from numpy.random._philox import Philox +from numpy.random._sfc64 import SFC64 +from numpy.random.bit_generator import SeedlessSeedSequence, SeedSequence + +def_rng = np.random.default_rng() +seed_seq = np.random.SeedSequence() +mt19937 = np.random.MT19937() +pcg64 = np.random.PCG64() +sfc64 = np.random.SFC64() +philox = np.random.Philox() +seedless_seq = SeedlessSeedSequence() + +assert_type(def_rng, Generator) +assert_type(mt19937, MT19937) +assert_type(pcg64, PCG64) +assert_type(sfc64, SFC64) +assert_type(philox, Philox) +assert_type(seed_seq, SeedSequence) +assert_type(seedless_seq, SeedlessSeedSequence) + +mt19937_jumped = mt19937.jumped() +mt19937_jumped3 = mt19937.jumped(3) +mt19937_raw = mt19937.random_raw() +mt19937_raw_arr = mt19937.random_raw(5) + +assert_type(mt19937_jumped, MT19937) +assert_type(mt19937_jumped3, MT19937) +assert_type(mt19937_raw, int) +assert_type(mt19937_raw_arr, npt.NDArray[np.uint64]) +assert_type(mt19937.lock, threading.Lock) + +pcg64_jumped = pcg64.jumped() +pcg64_jumped3 = pcg64.jumped(3) +pcg64_adv = pcg64.advance(3) +pcg64_raw = pcg64.random_raw() +pcg64_raw_arr = pcg64.random_raw(5) + +assert_type(pcg64_jumped, PCG64) +assert_type(pcg64_jumped3, PCG64) +assert_type(pcg64_adv, PCG64) +assert_type(pcg64_raw, int) +assert_type(pcg64_raw_arr, npt.NDArray[np.uint64]) +assert_type(pcg64.lock, threading.Lock) + +philox_jumped = philox.jumped() +philox_jumped3 = philox.jumped(3) +philox_adv = philox.advance(3) +philox_raw = philox.random_raw() +philox_raw_arr = philox.random_raw(5) + +assert_type(philox_jumped, Philox) +assert_type(philox_jumped3, Philox) +assert_type(philox_adv, Philox) +assert_type(philox_raw, int) +assert_type(philox_raw_arr, npt.NDArray[np.uint64]) +assert_type(philox.lock, threading.Lock) + +sfc64_raw = sfc64.random_raw() +sfc64_raw_arr = sfc64.random_raw(5) + +assert_type(sfc64_raw, int) +assert_type(sfc64_raw_arr, npt.NDArray[np.uint64]) +assert_type(sfc64.lock, threading.Lock) + +assert_type(seed_seq.pool, npt.NDArray[np.uint32]) +assert_type(seed_seq.entropy, int | Sequence[int] | None) +assert_type(seed_seq.spawn(1), list[np.random.SeedSequence]) +assert_type(seed_seq.generate_state(8, "uint32"), npt.NDArray[np.uint32 | np.uint64]) +assert_type(seed_seq.generate_state(8, "uint64"), npt.NDArray[np.uint32 | np.uint64]) + +def_gen: np.random.Generator = np.random.default_rng() + +D_arr_0p1: npt.NDArray[np.float64] = np.array([0.1]) +D_arr_0p5: npt.NDArray[np.float64] = np.array([0.5]) +D_arr_0p9: npt.NDArray[np.float64] = np.array([0.9]) +D_arr_1p5: npt.NDArray[np.float64] = np.array([1.5]) +I_arr_10: npt.NDArray[np.int_] = np.array([10], dtype=np.int_) +I_arr_20: npt.NDArray[np.int_] = np.array([20], dtype=np.int_) +D_arr_like_0p1: list[float] = [0.1] +D_arr_like_0p5: list[float] = [0.5] +D_arr_like_0p9: list[float] = [0.9] +D_arr_like_1p5: list[float] = [1.5] +I_arr_like_10: list[int] = [10] +I_arr_like_20: list[int] = [20] +D_2D_like: list[list[float]] = [[1, 2], [2, 3], [3, 4], [4, 5.1]] +D_2D: npt.NDArray[np.float64] = np.array(D_2D_like) +S_out: npt.NDArray[np.float32] = np.empty(1, dtype=np.float32) +D_out: npt.NDArray[np.float64] = np.empty(1) + +assert_type(def_gen.standard_normal(), float) +assert_type(def_gen.standard_normal(dtype=np.float32), float) +assert_type(def_gen.standard_normal(dtype="float32"), float) +assert_type(def_gen.standard_normal(dtype="double"), float) +assert_type(def_gen.standard_normal(dtype=np.float64), float) +assert_type(def_gen.standard_normal(size=None), float) +assert_type(def_gen.standard_normal(size=1), npt.NDArray[np.float64]) +assert_type(def_gen.standard_normal(size=1, dtype=np.float32), npt.NDArray[np.float32]) +assert_type(def_gen.standard_normal(size=1, dtype="f4"), npt.NDArray[np.float32]) +assert_type(def_gen.standard_normal(size=1, dtype="float32", out=S_out), npt.NDArray[np.float32]) +assert_type(def_gen.standard_normal(dtype=np.float32, out=S_out), npt.NDArray[np.float32]) +assert_type(def_gen.standard_normal(size=1, dtype=np.float64), npt.NDArray[np.float64]) +assert_type(def_gen.standard_normal(size=1, dtype="float64"), npt.NDArray[np.float64]) +assert_type(def_gen.standard_normal(size=1, dtype="f8"), npt.NDArray[np.float64]) +assert_type(def_gen.standard_normal(out=D_out), npt.NDArray[np.float64]) +assert_type(def_gen.standard_normal(size=1, dtype="float64"), npt.NDArray[np.float64]) +assert_type(def_gen.standard_normal(size=1, dtype="float64", out=D_out), npt.NDArray[np.float64]) + +assert_type(def_gen.random(), float) +assert_type(def_gen.random(dtype=np.float32), float) +assert_type(def_gen.random(dtype="float32"), float) +assert_type(def_gen.random(dtype="double"), float) +assert_type(def_gen.random(dtype=np.float64), float) +assert_type(def_gen.random(size=None), float) +assert_type(def_gen.random(size=1), npt.NDArray[np.float64]) +assert_type(def_gen.random(size=1, dtype=np.float32), npt.NDArray[np.float32]) +assert_type(def_gen.random(size=1, dtype="f4"), npt.NDArray[np.float32]) +assert_type(def_gen.random(size=1, dtype="float32", out=S_out), npt.NDArray[np.float32]) +assert_type(def_gen.random(dtype=np.float32, out=S_out), npt.NDArray[np.float32]) +assert_type(def_gen.random(size=1, dtype=np.float64), npt.NDArray[np.float64]) +assert_type(def_gen.random(size=1, dtype="float64"), npt.NDArray[np.float64]) +assert_type(def_gen.random(size=1, dtype="f8"), npt.NDArray[np.float64]) +assert_type(def_gen.random(out=D_out), npt.NDArray[np.float64]) +assert_type(def_gen.random(size=1, dtype="float64"), npt.NDArray[np.float64]) +assert_type(def_gen.random(size=1, dtype="float64", out=D_out), npt.NDArray[np.float64]) + +assert_type(def_gen.standard_cauchy(), float) +assert_type(def_gen.standard_cauchy(size=None), float) +assert_type(def_gen.standard_cauchy(size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.standard_exponential(), float) +assert_type(def_gen.standard_exponential(method="inv"), float) +assert_type(def_gen.standard_exponential(dtype=np.float32), float) +assert_type(def_gen.standard_exponential(dtype="float32"), float) +assert_type(def_gen.standard_exponential(dtype="double"), float) +assert_type(def_gen.standard_exponential(dtype=np.float64), float) +assert_type(def_gen.standard_exponential(size=None), float) +assert_type(def_gen.standard_exponential(size=None, method="inv"), float) +assert_type(def_gen.standard_exponential(size=1, method="inv"), npt.NDArray[np.float64]) +assert_type(def_gen.standard_exponential(size=1, dtype=np.float32), npt.NDArray[np.float32]) +assert_type(def_gen.standard_exponential(size=1, dtype="f4", method="inv"), npt.NDArray[np.float32]) +assert_type(def_gen.standard_exponential(size=1, dtype="float32", out=S_out), npt.NDArray[np.float32]) +assert_type(def_gen.standard_exponential(dtype=np.float32, out=S_out), npt.NDArray[np.float32]) +assert_type(def_gen.standard_exponential(size=1, dtype=np.float64, method="inv"), npt.NDArray[np.float64]) +assert_type(def_gen.standard_exponential(size=1, dtype="float64"), npt.NDArray[np.float64]) +assert_type(def_gen.standard_exponential(size=1, dtype="f8"), npt.NDArray[np.float64]) +assert_type(def_gen.standard_exponential(out=D_out), npt.NDArray[np.float64]) +assert_type(def_gen.standard_exponential(size=1, dtype="float64"), npt.NDArray[np.float64]) +assert_type(def_gen.standard_exponential(size=1, dtype="float64", out=D_out), npt.NDArray[np.float64]) + +assert_type(def_gen.zipf(1.5), int) +assert_type(def_gen.zipf(1.5, size=None), int) +assert_type(def_gen.zipf(1.5, size=1), npt.NDArray[np.int64]) +assert_type(def_gen.zipf(D_arr_1p5), npt.NDArray[np.int64]) +assert_type(def_gen.zipf(D_arr_1p5, size=1), npt.NDArray[np.int64]) +assert_type(def_gen.zipf(D_arr_like_1p5), npt.NDArray[np.int64]) +assert_type(def_gen.zipf(D_arr_like_1p5, size=1), npt.NDArray[np.int64]) + +assert_type(def_gen.weibull(0.5), float) +assert_type(def_gen.weibull(0.5, size=None), float) +assert_type(def_gen.weibull(0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.weibull(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.weibull(D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.weibull(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.weibull(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.standard_t(0.5), float) +assert_type(def_gen.standard_t(0.5, size=None), float) +assert_type(def_gen.standard_t(0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.standard_t(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.standard_t(D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.standard_t(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.standard_t(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.poisson(0.5), int) +assert_type(def_gen.poisson(0.5, size=None), int) +assert_type(def_gen.poisson(0.5, size=1), npt.NDArray[np.int64]) +assert_type(def_gen.poisson(D_arr_0p5), npt.NDArray[np.int64]) +assert_type(def_gen.poisson(D_arr_0p5, size=1), npt.NDArray[np.int64]) +assert_type(def_gen.poisson(D_arr_like_0p5), npt.NDArray[np.int64]) +assert_type(def_gen.poisson(D_arr_like_0p5, size=1), npt.NDArray[np.int64]) + +assert_type(def_gen.power(0.5), float) +assert_type(def_gen.power(0.5, size=None), float) +assert_type(def_gen.power(0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.power(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.power(D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.power(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.power(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.pareto(0.5), float) +assert_type(def_gen.pareto(0.5, size=None), float) +assert_type(def_gen.pareto(0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.pareto(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.pareto(D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.pareto(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.pareto(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.chisquare(0.5), float) +assert_type(def_gen.chisquare(0.5, size=None), float) +assert_type(def_gen.chisquare(0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.chisquare(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.chisquare(D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.chisquare(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.chisquare(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.exponential(0.5), float) +assert_type(def_gen.exponential(0.5, size=None), float) +assert_type(def_gen.exponential(0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.exponential(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.exponential(D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.exponential(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.exponential(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.geometric(0.5), int) +assert_type(def_gen.geometric(0.5, size=None), int) +assert_type(def_gen.geometric(0.5, size=1), npt.NDArray[np.int64]) +assert_type(def_gen.geometric(D_arr_0p5), npt.NDArray[np.int64]) +assert_type(def_gen.geometric(D_arr_0p5, size=1), npt.NDArray[np.int64]) +assert_type(def_gen.geometric(D_arr_like_0p5), npt.NDArray[np.int64]) +assert_type(def_gen.geometric(D_arr_like_0p5, size=1), npt.NDArray[np.int64]) + +assert_type(def_gen.logseries(0.5), int) +assert_type(def_gen.logseries(0.5, size=None), int) +assert_type(def_gen.logseries(0.5, size=1), npt.NDArray[np.int64]) +assert_type(def_gen.logseries(D_arr_0p5), npt.NDArray[np.int64]) +assert_type(def_gen.logseries(D_arr_0p5, size=1), npt.NDArray[np.int64]) +assert_type(def_gen.logseries(D_arr_like_0p5), npt.NDArray[np.int64]) +assert_type(def_gen.logseries(D_arr_like_0p5, size=1), npt.NDArray[np.int64]) + +assert_type(def_gen.rayleigh(0.5), float) +assert_type(def_gen.rayleigh(0.5, size=None), float) +assert_type(def_gen.rayleigh(0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.rayleigh(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.rayleigh(D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.rayleigh(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.rayleigh(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.standard_gamma(0.5), float) +assert_type(def_gen.standard_gamma(0.5, size=None), float) +assert_type(def_gen.standard_gamma(0.5, dtype="float32"), float) +assert_type(def_gen.standard_gamma(0.5, size=None, dtype="float32"), float) +assert_type(def_gen.standard_gamma(0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.standard_gamma(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.standard_gamma(D_arr_0p5, dtype="f4"), npt.NDArray[np.float32]) +assert_type(def_gen.standard_gamma(0.5, size=1, dtype="float32", out=S_out), npt.NDArray[np.float32]) +assert_type(def_gen.standard_gamma(D_arr_0p5, dtype=np.float32, out=S_out), npt.NDArray[np.float32]) +assert_type(def_gen.standard_gamma(D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.standard_gamma(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.standard_gamma(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.standard_gamma(0.5, out=D_out), npt.NDArray[np.float64]) +assert_type(def_gen.standard_gamma(D_arr_like_0p5, out=D_out), npt.NDArray[np.float64]) +assert_type(def_gen.standard_gamma(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.standard_gamma(D_arr_like_0p5, size=1, out=D_out, dtype=np.float64), npt.NDArray[np.float64]) + +assert_type(def_gen.vonmises(0.5, 0.5), float) +assert_type(def_gen.vonmises(0.5, 0.5, size=None), float) +assert_type(def_gen.vonmises(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.vonmises(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.vonmises(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.vonmises(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.vonmises(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.vonmises(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.vonmises(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.vonmises(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.vonmises(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.vonmises(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.vonmises(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.wald(0.5, 0.5), float) +assert_type(def_gen.wald(0.5, 0.5, size=None), float) +assert_type(def_gen.wald(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.wald(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.wald(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.wald(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.wald(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.wald(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.wald(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.wald(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.wald(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.wald(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.wald(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.uniform(0.5, 0.5), float) +assert_type(def_gen.uniform(0.5, 0.5, size=None), float) +assert_type(def_gen.uniform(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.uniform(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.uniform(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.uniform(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.uniform(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.uniform(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.uniform(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.uniform(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.uniform(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.uniform(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.uniform(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.beta(0.5, 0.5), float) +assert_type(def_gen.beta(0.5, 0.5, size=None), float) +assert_type(def_gen.beta(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.beta(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.beta(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.beta(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.beta(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.beta(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.beta(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.beta(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.beta(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.beta(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.beta(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.f(0.5, 0.5), float) +assert_type(def_gen.f(0.5, 0.5, size=None), float) +assert_type(def_gen.f(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.f(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.f(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.f(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.f(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.f(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.f(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.f(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.f(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.f(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.f(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.gamma(0.5, 0.5), float) +assert_type(def_gen.gamma(0.5, 0.5, size=None), float) +assert_type(def_gen.gamma(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.gamma(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.gamma(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.gamma(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.gamma(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.gamma(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.gamma(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.gamma(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.gamma(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.gamma(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.gamma(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.gumbel(0.5, 0.5), float) +assert_type(def_gen.gumbel(0.5, 0.5, size=None), float) +assert_type(def_gen.gumbel(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.gumbel(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.gumbel(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.gumbel(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.gumbel(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.gumbel(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.gumbel(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.gumbel(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.gumbel(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.gumbel(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.gumbel(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.laplace(0.5, 0.5), float) +assert_type(def_gen.laplace(0.5, 0.5, size=None), float) +assert_type(def_gen.laplace(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.laplace(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.laplace(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.laplace(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.laplace(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.laplace(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.laplace(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.laplace(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.laplace(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.laplace(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.laplace(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.logistic(0.5, 0.5), float) +assert_type(def_gen.logistic(0.5, 0.5, size=None), float) +assert_type(def_gen.logistic(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.logistic(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.logistic(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.logistic(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.logistic(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.logistic(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.logistic(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.logistic(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.logistic(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.logistic(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.logistic(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.lognormal(0.5, 0.5), float) +assert_type(def_gen.lognormal(0.5, 0.5, size=None), float) +assert_type(def_gen.lognormal(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.lognormal(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.lognormal(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.lognormal(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.lognormal(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.lognormal(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.lognormal(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.lognormal(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.lognormal(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.lognormal(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.lognormal(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.noncentral_chisquare(0.5, 0.5), float) +assert_type(def_gen.noncentral_chisquare(0.5, 0.5, size=None), float) +assert_type(def_gen.noncentral_chisquare(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.noncentral_chisquare(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.noncentral_chisquare(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.noncentral_chisquare(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.noncentral_chisquare(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.noncentral_chisquare(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.noncentral_chisquare(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.noncentral_chisquare(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.noncentral_chisquare(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.noncentral_chisquare(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.noncentral_chisquare(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.normal(0.5, 0.5), float) +assert_type(def_gen.normal(0.5, 0.5, size=None), float) +assert_type(def_gen.normal(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.normal(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.normal(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.normal(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.normal(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.normal(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.normal(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.normal(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.normal(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.normal(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.normal(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.triangular(0.1, 0.5, 0.9), float) +assert_type(def_gen.triangular(0.1, 0.5, 0.9, size=None), float) +assert_type(def_gen.triangular(0.1, 0.5, 0.9, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.triangular(D_arr_0p1, 0.5, 0.9), npt.NDArray[np.float64]) +assert_type(def_gen.triangular(0.1, D_arr_0p5, 0.9), npt.NDArray[np.float64]) +assert_type(def_gen.triangular(D_arr_0p1, 0.5, D_arr_like_0p9, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.triangular(0.1, D_arr_0p5, 0.9, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.triangular(D_arr_like_0p1, 0.5, D_arr_0p9), npt.NDArray[np.float64]) +assert_type(def_gen.triangular(0.5, D_arr_like_0p5, 0.9), npt.NDArray[np.float64]) +assert_type(def_gen.triangular(D_arr_0p1, D_arr_0p5, 0.9), npt.NDArray[np.float64]) +assert_type(def_gen.triangular(D_arr_like_0p1, D_arr_like_0p5, 0.9), npt.NDArray[np.float64]) +assert_type(def_gen.triangular(D_arr_0p1, D_arr_0p5, D_arr_0p9, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.triangular(D_arr_like_0p1, D_arr_like_0p5, D_arr_like_0p9, size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.noncentral_f(0.1, 0.5, 0.9), float) +assert_type(def_gen.noncentral_f(0.1, 0.5, 0.9, size=None), float) +assert_type(def_gen.noncentral_f(0.1, 0.5, 0.9, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.noncentral_f(D_arr_0p1, 0.5, 0.9), npt.NDArray[np.float64]) +assert_type(def_gen.noncentral_f(0.1, D_arr_0p5, 0.9), npt.NDArray[np.float64]) +assert_type(def_gen.noncentral_f(D_arr_0p1, 0.5, D_arr_like_0p9, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.noncentral_f(0.1, D_arr_0p5, 0.9, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.noncentral_f(D_arr_like_0p1, 0.5, D_arr_0p9), npt.NDArray[np.float64]) +assert_type(def_gen.noncentral_f(0.5, D_arr_like_0p5, 0.9), npt.NDArray[np.float64]) +assert_type(def_gen.noncentral_f(D_arr_0p1, D_arr_0p5, 0.9), npt.NDArray[np.float64]) +assert_type(def_gen.noncentral_f(D_arr_like_0p1, D_arr_like_0p5, 0.9), npt.NDArray[np.float64]) +assert_type(def_gen.noncentral_f(D_arr_0p1, D_arr_0p5, D_arr_0p9, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.noncentral_f(D_arr_like_0p1, D_arr_like_0p5, D_arr_like_0p9, size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.binomial(10, 0.5), int) +assert_type(def_gen.binomial(10, 0.5, size=None), int) +assert_type(def_gen.binomial(10, 0.5, size=1), npt.NDArray[np.int64]) +assert_type(def_gen.binomial(I_arr_10, 0.5), npt.NDArray[np.int64]) +assert_type(def_gen.binomial(10, D_arr_0p5), npt.NDArray[np.int64]) +assert_type(def_gen.binomial(I_arr_10, 0.5, size=1), npt.NDArray[np.int64]) +assert_type(def_gen.binomial(10, D_arr_0p5, size=1), npt.NDArray[np.int64]) +assert_type(def_gen.binomial(I_arr_like_10, 0.5), npt.NDArray[np.int64]) +assert_type(def_gen.binomial(10, D_arr_like_0p5), npt.NDArray[np.int64]) +assert_type(def_gen.binomial(I_arr_10, D_arr_0p5), npt.NDArray[np.int64]) +assert_type(def_gen.binomial(I_arr_like_10, D_arr_like_0p5), npt.NDArray[np.int64]) +assert_type(def_gen.binomial(I_arr_10, D_arr_0p5, size=1), npt.NDArray[np.int64]) +assert_type(def_gen.binomial(I_arr_like_10, D_arr_like_0p5, size=1), npt.NDArray[np.int64]) + +assert_type(def_gen.negative_binomial(10, 0.5), int) +assert_type(def_gen.negative_binomial(10, 0.5, size=None), int) +assert_type(def_gen.negative_binomial(10, 0.5, size=1), npt.NDArray[np.int64]) +assert_type(def_gen.negative_binomial(I_arr_10, 0.5), npt.NDArray[np.int64]) +assert_type(def_gen.negative_binomial(10, D_arr_0p5), npt.NDArray[np.int64]) +assert_type(def_gen.negative_binomial(I_arr_10, 0.5, size=1), npt.NDArray[np.int64]) +assert_type(def_gen.negative_binomial(10, D_arr_0p5, size=1), npt.NDArray[np.int64]) +assert_type(def_gen.negative_binomial(I_arr_like_10, 0.5), npt.NDArray[np.int64]) +assert_type(def_gen.negative_binomial(10, D_arr_like_0p5), npt.NDArray[np.int64]) +assert_type(def_gen.negative_binomial(I_arr_10, D_arr_0p5), npt.NDArray[np.int64]) +assert_type(def_gen.negative_binomial(I_arr_like_10, D_arr_like_0p5), npt.NDArray[np.int64]) +assert_type(def_gen.negative_binomial(I_arr_10, D_arr_0p5, size=1), npt.NDArray[np.int64]) +assert_type(def_gen.negative_binomial(I_arr_like_10, D_arr_like_0p5, size=1), npt.NDArray[np.int64]) + +assert_type(def_gen.hypergeometric(20, 20, 10), int) +assert_type(def_gen.hypergeometric(20, 20, 10, size=None), int) +assert_type(def_gen.hypergeometric(20, 20, 10, size=1), npt.NDArray[np.int64]) +assert_type(def_gen.hypergeometric(I_arr_20, 20, 10), npt.NDArray[np.int64]) +assert_type(def_gen.hypergeometric(20, I_arr_20, 10), npt.NDArray[np.int64]) +assert_type(def_gen.hypergeometric(I_arr_20, 20, I_arr_like_10, size=1), npt.NDArray[np.int64]) +assert_type(def_gen.hypergeometric(20, I_arr_20, 10, size=1), npt.NDArray[np.int64]) +assert_type(def_gen.hypergeometric(I_arr_like_20, 20, I_arr_10), npt.NDArray[np.int64]) +assert_type(def_gen.hypergeometric(20, I_arr_like_20, 10), npt.NDArray[np.int64]) +assert_type(def_gen.hypergeometric(I_arr_20, I_arr_20, 10), npt.NDArray[np.int64]) +assert_type(def_gen.hypergeometric(I_arr_like_20, I_arr_like_20, 10), npt.NDArray[np.int64]) +assert_type(def_gen.hypergeometric(I_arr_20, I_arr_20, I_arr_10, size=1), npt.NDArray[np.int64]) +assert_type(def_gen.hypergeometric(I_arr_like_20, I_arr_like_20, I_arr_like_10, size=1), npt.NDArray[np.int64]) + +I_int64_100: npt.NDArray[np.int64] = np.array([100], dtype=np.int64) + +assert_type(def_gen.integers(0, 100), np.int64) +assert_type(def_gen.integers(100), np.int64) +assert_type(def_gen.integers([100]), npt.NDArray[np.int64]) +assert_type(def_gen.integers(0, [100]), npt.NDArray[np.int64]) + +I_bool_low: npt.NDArray[np.bool] = np.array([0], dtype=np.bool) +I_bool_low_like: list[int] = [0] +I_bool_high_open: npt.NDArray[np.bool] = np.array([1], dtype=np.bool) +I_bool_high_closed: npt.NDArray[np.bool] = np.array([1], dtype=np.bool) + +assert_type(def_gen.integers(2, dtype=bool), bool) +assert_type(def_gen.integers(0, 2, dtype=bool), bool) +assert_type(def_gen.integers(1, dtype=bool, endpoint=True), bool) +assert_type(def_gen.integers(0, 1, dtype=bool, endpoint=True), bool) +assert_type(def_gen.integers(I_bool_low_like, 1, dtype=bool, endpoint=True), npt.NDArray[np.bool]) +assert_type(def_gen.integers(I_bool_high_open, dtype=bool), npt.NDArray[np.bool]) +assert_type(def_gen.integers(I_bool_low, I_bool_high_open, dtype=bool), npt.NDArray[np.bool]) +assert_type(def_gen.integers(0, I_bool_high_open, dtype=bool), npt.NDArray[np.bool]) +assert_type(def_gen.integers(I_bool_high_closed, dtype=bool, endpoint=True), npt.NDArray[np.bool]) +assert_type(def_gen.integers(I_bool_low, I_bool_high_closed, dtype=bool, endpoint=True), npt.NDArray[np.bool]) +assert_type(def_gen.integers(0, I_bool_high_closed, dtype=bool, endpoint=True), npt.NDArray[np.bool]) + +assert_type(def_gen.integers(2, dtype=np.bool), np.bool) +assert_type(def_gen.integers(0, 2, dtype=np.bool), np.bool) +assert_type(def_gen.integers(1, dtype=np.bool, endpoint=True), np.bool) +assert_type(def_gen.integers(0, 1, dtype=np.bool, endpoint=True), np.bool) +assert_type(def_gen.integers(I_bool_low_like, 1, dtype=np.bool, endpoint=True), npt.NDArray[np.bool]) +assert_type(def_gen.integers(I_bool_high_open, dtype=np.bool), npt.NDArray[np.bool]) +assert_type(def_gen.integers(I_bool_low, I_bool_high_open, dtype=np.bool), npt.NDArray[np.bool]) +assert_type(def_gen.integers(0, I_bool_high_open, dtype=np.bool), npt.NDArray[np.bool]) +assert_type(def_gen.integers(I_bool_high_closed, dtype=np.bool, endpoint=True), npt.NDArray[np.bool]) +assert_type(def_gen.integers(I_bool_low, I_bool_high_closed, dtype=np.bool, endpoint=True), npt.NDArray[np.bool]) +assert_type(def_gen.integers(0, I_bool_high_closed, dtype=np.bool, endpoint=True), npt.NDArray[np.bool]) + +I_u1_low: npt.NDArray[np.uint8] = np.array([0], dtype=np.uint8) +I_u1_low_like: list[int] = [0] +I_u1_high_open: npt.NDArray[np.uint8] = np.array([255], dtype=np.uint8) +I_u1_high_closed: npt.NDArray[np.uint8] = np.array([255], dtype=np.uint8) + +assert_type(def_gen.integers(256, dtype="u1"), np.uint8) +assert_type(def_gen.integers(0, 256, dtype="u1"), np.uint8) +assert_type(def_gen.integers(255, dtype="u1", endpoint=True), np.uint8) +assert_type(def_gen.integers(0, 255, dtype="u1", endpoint=True), np.uint8) +assert_type(def_gen.integers(I_u1_low_like, 255, dtype="u1", endpoint=True), npt.NDArray[np.uint8]) +assert_type(def_gen.integers(I_u1_high_open, dtype="u1"), npt.NDArray[np.uint8]) +assert_type(def_gen.integers(I_u1_low, I_u1_high_open, dtype="u1"), npt.NDArray[np.uint8]) +assert_type(def_gen.integers(0, I_u1_high_open, dtype="u1"), npt.NDArray[np.uint8]) +assert_type(def_gen.integers(I_u1_high_closed, dtype="u1", endpoint=True), npt.NDArray[np.uint8]) +assert_type(def_gen.integers(I_u1_low, I_u1_high_closed, dtype="u1", endpoint=True), npt.NDArray[np.uint8]) +assert_type(def_gen.integers(0, I_u1_high_closed, dtype="u1", endpoint=True), npt.NDArray[np.uint8]) + +assert_type(def_gen.integers(256, dtype="uint8"), np.uint8) +assert_type(def_gen.integers(0, 256, dtype="uint8"), np.uint8) +assert_type(def_gen.integers(255, dtype="uint8", endpoint=True), np.uint8) +assert_type(def_gen.integers(0, 255, dtype="uint8", endpoint=True), np.uint8) +assert_type(def_gen.integers(I_u1_low_like, 255, dtype="uint8", endpoint=True), npt.NDArray[np.uint8]) +assert_type(def_gen.integers(I_u1_high_open, dtype="uint8"), npt.NDArray[np.uint8]) +assert_type(def_gen.integers(I_u1_low, I_u1_high_open, dtype="uint8"), npt.NDArray[np.uint8]) +assert_type(def_gen.integers(0, I_u1_high_open, dtype="uint8"), npt.NDArray[np.uint8]) +assert_type(def_gen.integers(I_u1_high_closed, dtype="uint8", endpoint=True), npt.NDArray[np.uint8]) +assert_type(def_gen.integers(I_u1_low, I_u1_high_closed, dtype="uint8", endpoint=True), npt.NDArray[np.uint8]) +assert_type(def_gen.integers(0, I_u1_high_closed, dtype="uint8", endpoint=True), npt.NDArray[np.uint8]) + +assert_type(def_gen.integers(256, dtype=np.uint8), np.uint8) +assert_type(def_gen.integers(0, 256, dtype=np.uint8), np.uint8) +assert_type(def_gen.integers(255, dtype=np.uint8, endpoint=True), np.uint8) +assert_type(def_gen.integers(0, 255, dtype=np.uint8, endpoint=True), np.uint8) +assert_type(def_gen.integers(I_u1_low_like, 255, dtype=np.uint8, endpoint=True), npt.NDArray[np.uint8]) +assert_type(def_gen.integers(I_u1_high_open, dtype=np.uint8), npt.NDArray[np.uint8]) +assert_type(def_gen.integers(I_u1_low, I_u1_high_open, dtype=np.uint8), npt.NDArray[np.uint8]) +assert_type(def_gen.integers(0, I_u1_high_open, dtype=np.uint8), npt.NDArray[np.uint8]) +assert_type(def_gen.integers(I_u1_high_closed, dtype=np.uint8, endpoint=True), npt.NDArray[np.uint8]) +assert_type(def_gen.integers(I_u1_low, I_u1_high_closed, dtype=np.uint8, endpoint=True), npt.NDArray[np.uint8]) +assert_type(def_gen.integers(0, I_u1_high_closed, dtype=np.uint8, endpoint=True), npt.NDArray[np.uint8]) + +I_u2_low: npt.NDArray[np.uint16] = np.array([0], dtype=np.uint16) +I_u2_low_like: list[int] = [0] +I_u2_high_open: npt.NDArray[np.uint16] = np.array([65535], dtype=np.uint16) +I_u2_high_closed: npt.NDArray[np.uint16] = np.array([65535], dtype=np.uint16) + +assert_type(def_gen.integers(65536, dtype="u2"), np.uint16) +assert_type(def_gen.integers(0, 65536, dtype="u2"), np.uint16) +assert_type(def_gen.integers(65535, dtype="u2", endpoint=True), np.uint16) +assert_type(def_gen.integers(0, 65535, dtype="u2", endpoint=True), np.uint16) +assert_type(def_gen.integers(I_u2_low_like, 65535, dtype="u2", endpoint=True), npt.NDArray[np.uint16]) +assert_type(def_gen.integers(I_u2_high_open, dtype="u2"), npt.NDArray[np.uint16]) +assert_type(def_gen.integers(I_u2_low, I_u2_high_open, dtype="u2"), npt.NDArray[np.uint16]) +assert_type(def_gen.integers(0, I_u2_high_open, dtype="u2"), npt.NDArray[np.uint16]) +assert_type(def_gen.integers(I_u2_high_closed, dtype="u2", endpoint=True), npt.NDArray[np.uint16]) +assert_type(def_gen.integers(I_u2_low, I_u2_high_closed, dtype="u2", endpoint=True), npt.NDArray[np.uint16]) +assert_type(def_gen.integers(0, I_u2_high_closed, dtype="u2", endpoint=True), npt.NDArray[np.uint16]) + +assert_type(def_gen.integers(65536, dtype="uint16"), np.uint16) +assert_type(def_gen.integers(0, 65536, dtype="uint16"), np.uint16) +assert_type(def_gen.integers(65535, dtype="uint16", endpoint=True), np.uint16) +assert_type(def_gen.integers(0, 65535, dtype="uint16", endpoint=True), np.uint16) +assert_type(def_gen.integers(I_u2_low_like, 65535, dtype="uint16", endpoint=True), npt.NDArray[np.uint16]) +assert_type(def_gen.integers(I_u2_high_open, dtype="uint16"), npt.NDArray[np.uint16]) +assert_type(def_gen.integers(I_u2_low, I_u2_high_open, dtype="uint16"), npt.NDArray[np.uint16]) +assert_type(def_gen.integers(0, I_u2_high_open, dtype="uint16"), npt.NDArray[np.uint16]) +assert_type(def_gen.integers(I_u2_high_closed, dtype="uint16", endpoint=True), npt.NDArray[np.uint16]) +assert_type(def_gen.integers(I_u2_low, I_u2_high_closed, dtype="uint16", endpoint=True), npt.NDArray[np.uint16]) +assert_type(def_gen.integers(0, I_u2_high_closed, dtype="uint16", endpoint=True), npt.NDArray[np.uint16]) + +assert_type(def_gen.integers(65536, dtype=np.uint16), np.uint16) +assert_type(def_gen.integers(0, 65536, dtype=np.uint16), np.uint16) +assert_type(def_gen.integers(65535, dtype=np.uint16, endpoint=True), np.uint16) +assert_type(def_gen.integers(0, 65535, dtype=np.uint16, endpoint=True), np.uint16) +assert_type(def_gen.integers(I_u2_low_like, 65535, dtype=np.uint16, endpoint=True), npt.NDArray[np.uint16]) +assert_type(def_gen.integers(I_u2_high_open, dtype=np.uint16), npt.NDArray[np.uint16]) +assert_type(def_gen.integers(I_u2_low, I_u2_high_open, dtype=np.uint16), npt.NDArray[np.uint16]) +assert_type(def_gen.integers(0, I_u2_high_open, dtype=np.uint16), npt.NDArray[np.uint16]) +assert_type(def_gen.integers(I_u2_high_closed, dtype=np.uint16, endpoint=True), npt.NDArray[np.uint16]) +assert_type(def_gen.integers(I_u2_low, I_u2_high_closed, dtype=np.uint16, endpoint=True), npt.NDArray[np.uint16]) +assert_type(def_gen.integers(0, I_u2_high_closed, dtype=np.uint16, endpoint=True), npt.NDArray[np.uint16]) + +I_u4_low: npt.NDArray[np.uint32] = np.array([0], dtype=np.uint32) +I_u4_low_like: list[int] = [0] +I_u4_high_open: npt.NDArray[np.uint32] = np.array([4294967295], dtype=np.uint32) +I_u4_high_closed: npt.NDArray[np.uint32] = np.array([4294967295], dtype=np.uint32) + +assert_type(def_gen.integers(4294967296, dtype=np.int_), np.int_) +assert_type(def_gen.integers(0, 4294967296, dtype=np.int_), np.int_) +assert_type(def_gen.integers(4294967295, dtype=np.int_, endpoint=True), np.int_) +assert_type(def_gen.integers(0, 4294967295, dtype=np.int_, endpoint=True), np.int_) +assert_type(def_gen.integers(I_u4_low_like, 4294967295, dtype=np.int_, endpoint=True), npt.NDArray[np.int_]) +assert_type(def_gen.integers(I_u4_high_open, dtype=np.int_), npt.NDArray[np.int_]) +assert_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype=np.int_), npt.NDArray[np.int_]) +assert_type(def_gen.integers(0, I_u4_high_open, dtype=np.int_), npt.NDArray[np.int_]) +assert_type(def_gen.integers(I_u4_high_closed, dtype=np.int_, endpoint=True), npt.NDArray[np.int_]) +assert_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype=np.int_, endpoint=True), npt.NDArray[np.int_]) +assert_type(def_gen.integers(0, I_u4_high_closed, dtype=np.int_, endpoint=True), npt.NDArray[np.int_]) + +assert_type(def_gen.integers(4294967296, dtype="u4"), np.uint32) +assert_type(def_gen.integers(0, 4294967296, dtype="u4"), np.uint32) +assert_type(def_gen.integers(4294967295, dtype="u4", endpoint=True), np.uint32) +assert_type(def_gen.integers(0, 4294967295, dtype="u4", endpoint=True), np.uint32) +assert_type(def_gen.integers(I_u4_low_like, 4294967295, dtype="u4", endpoint=True), npt.NDArray[np.uint32]) +assert_type(def_gen.integers(I_u4_high_open, dtype="u4"), npt.NDArray[np.uint32]) +assert_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype="u4"), npt.NDArray[np.uint32]) +assert_type(def_gen.integers(0, I_u4_high_open, dtype="u4"), npt.NDArray[np.uint32]) +assert_type(def_gen.integers(I_u4_high_closed, dtype="u4", endpoint=True), npt.NDArray[np.uint32]) +assert_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype="u4", endpoint=True), npt.NDArray[np.uint32]) +assert_type(def_gen.integers(0, I_u4_high_closed, dtype="u4", endpoint=True), npt.NDArray[np.uint32]) + +assert_type(def_gen.integers(4294967296, dtype="uint32"), np.uint32) +assert_type(def_gen.integers(0, 4294967296, dtype="uint32"), np.uint32) +assert_type(def_gen.integers(4294967295, dtype="uint32", endpoint=True), np.uint32) +assert_type(def_gen.integers(0, 4294967295, dtype="uint32", endpoint=True), np.uint32) +assert_type(def_gen.integers(I_u4_low_like, 4294967295, dtype="uint32", endpoint=True), npt.NDArray[np.uint32]) +assert_type(def_gen.integers(I_u4_high_open, dtype="uint32"), npt.NDArray[np.uint32]) +assert_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype="uint32"), npt.NDArray[np.uint32]) +assert_type(def_gen.integers(0, I_u4_high_open, dtype="uint32"), npt.NDArray[np.uint32]) +assert_type(def_gen.integers(I_u4_high_closed, dtype="uint32", endpoint=True), npt.NDArray[np.uint32]) +assert_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype="uint32", endpoint=True), npt.NDArray[np.uint32]) +assert_type(def_gen.integers(0, I_u4_high_closed, dtype="uint32", endpoint=True), npt.NDArray[np.uint32]) + +assert_type(def_gen.integers(4294967296, dtype=np.uint32), np.uint32) +assert_type(def_gen.integers(0, 4294967296, dtype=np.uint32), np.uint32) +assert_type(def_gen.integers(4294967295, dtype=np.uint32, endpoint=True), np.uint32) +assert_type(def_gen.integers(0, 4294967295, dtype=np.uint32, endpoint=True), np.uint32) +assert_type(def_gen.integers(I_u4_low_like, 4294967295, dtype=np.uint32, endpoint=True), npt.NDArray[np.uint32]) +assert_type(def_gen.integers(I_u4_high_open, dtype=np.uint32), npt.NDArray[np.uint32]) +assert_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype=np.uint32), npt.NDArray[np.uint32]) +assert_type(def_gen.integers(0, I_u4_high_open, dtype=np.uint32), npt.NDArray[np.uint32]) +assert_type(def_gen.integers(I_u4_high_closed, dtype=np.uint32, endpoint=True), npt.NDArray[np.uint32]) +assert_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype=np.uint32, endpoint=True), npt.NDArray[np.uint32]) +assert_type(def_gen.integers(0, I_u4_high_closed, dtype=np.uint32, endpoint=True), npt.NDArray[np.uint32]) + +assert_type(def_gen.integers(4294967296, dtype=np.uint), np.uint) +assert_type(def_gen.integers(0, 4294967296, dtype=np.uint), np.uint) +assert_type(def_gen.integers(4294967295, dtype=np.uint, endpoint=True), np.uint) +assert_type(def_gen.integers(0, 4294967295, dtype=np.uint, endpoint=True), np.uint) +assert_type(def_gen.integers(I_u4_low_like, 4294967295, dtype=np.uint, endpoint=True), npt.NDArray[np.uint]) +assert_type(def_gen.integers(I_u4_high_open, dtype=np.uint), npt.NDArray[np.uint]) +assert_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype=np.uint), npt.NDArray[np.uint]) +assert_type(def_gen.integers(0, I_u4_high_open, dtype=np.uint), npt.NDArray[np.uint]) +assert_type(def_gen.integers(I_u4_high_closed, dtype=np.uint, endpoint=True), npt.NDArray[np.uint]) +assert_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype=np.uint, endpoint=True), npt.NDArray[np.uint]) +assert_type(def_gen.integers(0, I_u4_high_closed, dtype=np.uint, endpoint=True), npt.NDArray[np.uint]) + +I_u8_low: npt.NDArray[np.uint64] = np.array([0], dtype=np.uint64) +I_u8_low_like: list[int] = [0] +I_u8_high_open: npt.NDArray[np.uint64] = np.array([18446744073709551615], dtype=np.uint64) +I_u8_high_closed: npt.NDArray[np.uint64] = np.array([18446744073709551615], dtype=np.uint64) + +assert_type(def_gen.integers(18446744073709551616, dtype="u8"), np.uint64) +assert_type(def_gen.integers(0, 18446744073709551616, dtype="u8"), np.uint64) +assert_type(def_gen.integers(18446744073709551615, dtype="u8", endpoint=True), np.uint64) +assert_type(def_gen.integers(0, 18446744073709551615, dtype="u8", endpoint=True), np.uint64) +assert_type(def_gen.integers(I_u8_low_like, 18446744073709551615, dtype="u8", endpoint=True), npt.NDArray[np.uint64]) +assert_type(def_gen.integers(I_u8_high_open, dtype="u8"), npt.NDArray[np.uint64]) +assert_type(def_gen.integers(I_u8_low, I_u8_high_open, dtype="u8"), npt.NDArray[np.uint64]) +assert_type(def_gen.integers(0, I_u8_high_open, dtype="u8"), npt.NDArray[np.uint64]) +assert_type(def_gen.integers(I_u8_high_closed, dtype="u8", endpoint=True), npt.NDArray[np.uint64]) +assert_type(def_gen.integers(I_u8_low, I_u8_high_closed, dtype="u8", endpoint=True), npt.NDArray[np.uint64]) +assert_type(def_gen.integers(0, I_u8_high_closed, dtype="u8", endpoint=True), npt.NDArray[np.uint64]) + +assert_type(def_gen.integers(18446744073709551616, dtype="uint64"), np.uint64) +assert_type(def_gen.integers(0, 18446744073709551616, dtype="uint64"), np.uint64) +assert_type(def_gen.integers(18446744073709551615, dtype="uint64", endpoint=True), np.uint64) +assert_type(def_gen.integers(0, 18446744073709551615, dtype="uint64", endpoint=True), np.uint64) +assert_type(def_gen.integers(I_u8_low_like, 18446744073709551615, dtype="uint64", endpoint=True), npt.NDArray[np.uint64]) +assert_type(def_gen.integers(I_u8_high_open, dtype="uint64"), npt.NDArray[np.uint64]) +assert_type(def_gen.integers(I_u8_low, I_u8_high_open, dtype="uint64"), npt.NDArray[np.uint64]) +assert_type(def_gen.integers(0, I_u8_high_open, dtype="uint64"), npt.NDArray[np.uint64]) +assert_type(def_gen.integers(I_u8_high_closed, dtype="uint64", endpoint=True), npt.NDArray[np.uint64]) +assert_type(def_gen.integers(I_u8_low, I_u8_high_closed, dtype="uint64", endpoint=True), npt.NDArray[np.uint64]) +assert_type(def_gen.integers(0, I_u8_high_closed, dtype="uint64", endpoint=True), npt.NDArray[np.uint64]) + +assert_type(def_gen.integers(18446744073709551616, dtype=np.uint64), np.uint64) +assert_type(def_gen.integers(0, 18446744073709551616, dtype=np.uint64), np.uint64) +assert_type(def_gen.integers(18446744073709551615, dtype=np.uint64, endpoint=True), np.uint64) +assert_type(def_gen.integers(0, 18446744073709551615, dtype=np.uint64, endpoint=True), np.uint64) +assert_type(def_gen.integers(I_u8_low_like, 18446744073709551615, dtype=np.uint64, endpoint=True), npt.NDArray[np.uint64]) +assert_type(def_gen.integers(I_u8_high_open, dtype=np.uint64), npt.NDArray[np.uint64]) +assert_type(def_gen.integers(I_u8_low, I_u8_high_open, dtype=np.uint64), npt.NDArray[np.uint64]) +assert_type(def_gen.integers(0, I_u8_high_open, dtype=np.uint64), npt.NDArray[np.uint64]) +assert_type(def_gen.integers(I_u8_high_closed, dtype=np.uint64, endpoint=True), npt.NDArray[np.uint64]) +assert_type(def_gen.integers(I_u8_low, I_u8_high_closed, dtype=np.uint64, endpoint=True), npt.NDArray[np.uint64]) +assert_type(def_gen.integers(0, I_u8_high_closed, dtype=np.uint64, endpoint=True), npt.NDArray[np.uint64]) + +I_i1_low: npt.NDArray[np.int8] = np.array([-128], dtype=np.int8) +I_i1_low_like: list[int] = [-128] +I_i1_high_open: npt.NDArray[np.int8] = np.array([127], dtype=np.int8) +I_i1_high_closed: npt.NDArray[np.int8] = np.array([127], dtype=np.int8) + +assert_type(def_gen.integers(128, dtype="i1"), np.int8) +assert_type(def_gen.integers(-128, 128, dtype="i1"), np.int8) +assert_type(def_gen.integers(127, dtype="i1", endpoint=True), np.int8) +assert_type(def_gen.integers(-128, 127, dtype="i1", endpoint=True), np.int8) +assert_type(def_gen.integers(I_i1_low_like, 127, dtype="i1", endpoint=True), npt.NDArray[np.int8]) +assert_type(def_gen.integers(I_i1_high_open, dtype="i1"), npt.NDArray[np.int8]) +assert_type(def_gen.integers(I_i1_low, I_i1_high_open, dtype="i1"), npt.NDArray[np.int8]) +assert_type(def_gen.integers(-128, I_i1_high_open, dtype="i1"), npt.NDArray[np.int8]) +assert_type(def_gen.integers(I_i1_high_closed, dtype="i1", endpoint=True), npt.NDArray[np.int8]) +assert_type(def_gen.integers(I_i1_low, I_i1_high_closed, dtype="i1", endpoint=True), npt.NDArray[np.int8]) +assert_type(def_gen.integers(-128, I_i1_high_closed, dtype="i1", endpoint=True), npt.NDArray[np.int8]) + +assert_type(def_gen.integers(128, dtype="int8"), np.int8) +assert_type(def_gen.integers(-128, 128, dtype="int8"), np.int8) +assert_type(def_gen.integers(127, dtype="int8", endpoint=True), np.int8) +assert_type(def_gen.integers(-128, 127, dtype="int8", endpoint=True), np.int8) +assert_type(def_gen.integers(I_i1_low_like, 127, dtype="int8", endpoint=True), npt.NDArray[np.int8]) +assert_type(def_gen.integers(I_i1_high_open, dtype="int8"), npt.NDArray[np.int8]) +assert_type(def_gen.integers(I_i1_low, I_i1_high_open, dtype="int8"), npt.NDArray[np.int8]) +assert_type(def_gen.integers(-128, I_i1_high_open, dtype="int8"), npt.NDArray[np.int8]) +assert_type(def_gen.integers(I_i1_high_closed, dtype="int8", endpoint=True), npt.NDArray[np.int8]) +assert_type(def_gen.integers(I_i1_low, I_i1_high_closed, dtype="int8", endpoint=True), npt.NDArray[np.int8]) +assert_type(def_gen.integers(-128, I_i1_high_closed, dtype="int8", endpoint=True), npt.NDArray[np.int8]) + +assert_type(def_gen.integers(128, dtype=np.int8), np.int8) +assert_type(def_gen.integers(-128, 128, dtype=np.int8), np.int8) +assert_type(def_gen.integers(127, dtype=np.int8, endpoint=True), np.int8) +assert_type(def_gen.integers(-128, 127, dtype=np.int8, endpoint=True), np.int8) +assert_type(def_gen.integers(I_i1_low_like, 127, dtype=np.int8, endpoint=True), npt.NDArray[np.int8]) +assert_type(def_gen.integers(I_i1_high_open, dtype=np.int8), npt.NDArray[np.int8]) +assert_type(def_gen.integers(I_i1_low, I_i1_high_open, dtype=np.int8), npt.NDArray[np.int8]) +assert_type(def_gen.integers(-128, I_i1_high_open, dtype=np.int8), npt.NDArray[np.int8]) +assert_type(def_gen.integers(I_i1_high_closed, dtype=np.int8, endpoint=True), npt.NDArray[np.int8]) +assert_type(def_gen.integers(I_i1_low, I_i1_high_closed, dtype=np.int8, endpoint=True), npt.NDArray[np.int8]) +assert_type(def_gen.integers(-128, I_i1_high_closed, dtype=np.int8, endpoint=True), npt.NDArray[np.int8]) + +I_i2_low: npt.NDArray[np.int16] = np.array([-32768], dtype=np.int16) +I_i2_low_like: list[int] = [-32768] +I_i2_high_open: npt.NDArray[np.int16] = np.array([32767], dtype=np.int16) +I_i2_high_closed: npt.NDArray[np.int16] = np.array([32767], dtype=np.int16) + +assert_type(def_gen.integers(32768, dtype="i2"), np.int16) +assert_type(def_gen.integers(-32768, 32768, dtype="i2"), np.int16) +assert_type(def_gen.integers(32767, dtype="i2", endpoint=True), np.int16) +assert_type(def_gen.integers(-32768, 32767, dtype="i2", endpoint=True), np.int16) +assert_type(def_gen.integers(I_i2_low_like, 32767, dtype="i2", endpoint=True), npt.NDArray[np.int16]) +assert_type(def_gen.integers(I_i2_high_open, dtype="i2"), npt.NDArray[np.int16]) +assert_type(def_gen.integers(I_i2_low, I_i2_high_open, dtype="i2"), npt.NDArray[np.int16]) +assert_type(def_gen.integers(-32768, I_i2_high_open, dtype="i2"), npt.NDArray[np.int16]) +assert_type(def_gen.integers(I_i2_high_closed, dtype="i2", endpoint=True), npt.NDArray[np.int16]) +assert_type(def_gen.integers(I_i2_low, I_i2_high_closed, dtype="i2", endpoint=True), npt.NDArray[np.int16]) +assert_type(def_gen.integers(-32768, I_i2_high_closed, dtype="i2", endpoint=True), npt.NDArray[np.int16]) + +assert_type(def_gen.integers(32768, dtype="int16"), np.int16) +assert_type(def_gen.integers(-32768, 32768, dtype="int16"), np.int16) +assert_type(def_gen.integers(32767, dtype="int16", endpoint=True), np.int16) +assert_type(def_gen.integers(-32768, 32767, dtype="int16", endpoint=True), np.int16) +assert_type(def_gen.integers(I_i2_low_like, 32767, dtype="int16", endpoint=True), npt.NDArray[np.int16]) +assert_type(def_gen.integers(I_i2_high_open, dtype="int16"), npt.NDArray[np.int16]) +assert_type(def_gen.integers(I_i2_low, I_i2_high_open, dtype="int16"), npt.NDArray[np.int16]) +assert_type(def_gen.integers(-32768, I_i2_high_open, dtype="int16"), npt.NDArray[np.int16]) +assert_type(def_gen.integers(I_i2_high_closed, dtype="int16", endpoint=True), npt.NDArray[np.int16]) +assert_type(def_gen.integers(I_i2_low, I_i2_high_closed, dtype="int16", endpoint=True), npt.NDArray[np.int16]) +assert_type(def_gen.integers(-32768, I_i2_high_closed, dtype="int16", endpoint=True), npt.NDArray[np.int16]) + +assert_type(def_gen.integers(32768, dtype=np.int16), np.int16) +assert_type(def_gen.integers(-32768, 32768, dtype=np.int16), np.int16) +assert_type(def_gen.integers(32767, dtype=np.int16, endpoint=True), np.int16) +assert_type(def_gen.integers(-32768, 32767, dtype=np.int16, endpoint=True), np.int16) +assert_type(def_gen.integers(I_i2_low_like, 32767, dtype=np.int16, endpoint=True), npt.NDArray[np.int16]) +assert_type(def_gen.integers(I_i2_high_open, dtype=np.int16), npt.NDArray[np.int16]) +assert_type(def_gen.integers(I_i2_low, I_i2_high_open, dtype=np.int16), npt.NDArray[np.int16]) +assert_type(def_gen.integers(-32768, I_i2_high_open, dtype=np.int16), npt.NDArray[np.int16]) +assert_type(def_gen.integers(I_i2_high_closed, dtype=np.int16, endpoint=True), npt.NDArray[np.int16]) +assert_type(def_gen.integers(I_i2_low, I_i2_high_closed, dtype=np.int16, endpoint=True), npt.NDArray[np.int16]) +assert_type(def_gen.integers(-32768, I_i2_high_closed, dtype=np.int16, endpoint=True), npt.NDArray[np.int16]) + +I_i4_low: npt.NDArray[np.int32] = np.array([-2147483648], dtype=np.int32) +I_i4_low_like: list[int] = [-2147483648] +I_i4_high_open: npt.NDArray[np.int32] = np.array([2147483647], dtype=np.int32) +I_i4_high_closed: npt.NDArray[np.int32] = np.array([2147483647], dtype=np.int32) + +assert_type(def_gen.integers(2147483648, dtype="i4"), np.int32) +assert_type(def_gen.integers(-2147483648, 2147483648, dtype="i4"), np.int32) +assert_type(def_gen.integers(2147483647, dtype="i4", endpoint=True), np.int32) +assert_type(def_gen.integers(-2147483648, 2147483647, dtype="i4", endpoint=True), np.int32) +assert_type(def_gen.integers(I_i4_low_like, 2147483647, dtype="i4", endpoint=True), npt.NDArray[np.int32]) +assert_type(def_gen.integers(I_i4_high_open, dtype="i4"), npt.NDArray[np.int32]) +assert_type(def_gen.integers(I_i4_low, I_i4_high_open, dtype="i4"), npt.NDArray[np.int32]) +assert_type(def_gen.integers(-2147483648, I_i4_high_open, dtype="i4"), npt.NDArray[np.int32]) +assert_type(def_gen.integers(I_i4_high_closed, dtype="i4", endpoint=True), npt.NDArray[np.int32]) +assert_type(def_gen.integers(I_i4_low, I_i4_high_closed, dtype="i4", endpoint=True), npt.NDArray[np.int32]) +assert_type(def_gen.integers(-2147483648, I_i4_high_closed, dtype="i4", endpoint=True), npt.NDArray[np.int32]) + +assert_type(def_gen.integers(2147483648, dtype="int32"), np.int32) +assert_type(def_gen.integers(-2147483648, 2147483648, dtype="int32"), np.int32) +assert_type(def_gen.integers(2147483647, dtype="int32", endpoint=True), np.int32) +assert_type(def_gen.integers(-2147483648, 2147483647, dtype="int32", endpoint=True), np.int32) +assert_type(def_gen.integers(I_i4_low_like, 2147483647, dtype="int32", endpoint=True), npt.NDArray[np.int32]) +assert_type(def_gen.integers(I_i4_high_open, dtype="int32"), npt.NDArray[np.int32]) +assert_type(def_gen.integers(I_i4_low, I_i4_high_open, dtype="int32"), npt.NDArray[np.int32]) +assert_type(def_gen.integers(-2147483648, I_i4_high_open, dtype="int32"), npt.NDArray[np.int32]) +assert_type(def_gen.integers(I_i4_high_closed, dtype="int32", endpoint=True), npt.NDArray[np.int32]) +assert_type(def_gen.integers(I_i4_low, I_i4_high_closed, dtype="int32", endpoint=True), npt.NDArray[np.int32]) +assert_type(def_gen.integers(-2147483648, I_i4_high_closed, dtype="int32", endpoint=True), npt.NDArray[np.int32]) + +assert_type(def_gen.integers(2147483648, dtype=np.int32), np.int32) +assert_type(def_gen.integers(-2147483648, 2147483648, dtype=np.int32), np.int32) +assert_type(def_gen.integers(2147483647, dtype=np.int32, endpoint=True), np.int32) +assert_type(def_gen.integers(-2147483648, 2147483647, dtype=np.int32, endpoint=True), np.int32) +assert_type(def_gen.integers(I_i4_low_like, 2147483647, dtype=np.int32, endpoint=True), npt.NDArray[np.int32]) +assert_type(def_gen.integers(I_i4_high_open, dtype=np.int32), npt.NDArray[np.int32]) +assert_type(def_gen.integers(I_i4_low, I_i4_high_open, dtype=np.int32), npt.NDArray[np.int32]) +assert_type(def_gen.integers(-2147483648, I_i4_high_open, dtype=np.int32), npt.NDArray[np.int32]) +assert_type(def_gen.integers(I_i4_high_closed, dtype=np.int32, endpoint=True), npt.NDArray[np.int32]) +assert_type(def_gen.integers(I_i4_low, I_i4_high_closed, dtype=np.int32, endpoint=True), npt.NDArray[np.int32]) +assert_type(def_gen.integers(-2147483648, I_i4_high_closed, dtype=np.int32, endpoint=True), npt.NDArray[np.int32]) + +I_i8_low: npt.NDArray[np.int64] = np.array([-9223372036854775808], dtype=np.int64) +I_i8_low_like: list[int] = [-9223372036854775808] +I_i8_high_open: npt.NDArray[np.int64] = np.array([9223372036854775807], dtype=np.int64) +I_i8_high_closed: npt.NDArray[np.int64] = np.array([9223372036854775807], dtype=np.int64) + +assert_type(def_gen.integers(9223372036854775808, dtype="i8"), np.int64) +assert_type(def_gen.integers(-9223372036854775808, 9223372036854775808, dtype="i8"), np.int64) +assert_type(def_gen.integers(9223372036854775807, dtype="i8", endpoint=True), np.int64) +assert_type(def_gen.integers(-9223372036854775808, 9223372036854775807, dtype="i8", endpoint=True), np.int64) +assert_type(def_gen.integers(I_i8_low_like, 9223372036854775807, dtype="i8", endpoint=True), npt.NDArray[np.int64]) +assert_type(def_gen.integers(I_i8_high_open, dtype="i8"), npt.NDArray[np.int64]) +assert_type(def_gen.integers(I_i8_low, I_i8_high_open, dtype="i8"), npt.NDArray[np.int64]) +assert_type(def_gen.integers(-9223372036854775808, I_i8_high_open, dtype="i8"), npt.NDArray[np.int64]) +assert_type(def_gen.integers(I_i8_high_closed, dtype="i8", endpoint=True), npt.NDArray[np.int64]) +assert_type(def_gen.integers(I_i8_low, I_i8_high_closed, dtype="i8", endpoint=True), npt.NDArray[np.int64]) +assert_type(def_gen.integers(-9223372036854775808, I_i8_high_closed, dtype="i8", endpoint=True), npt.NDArray[np.int64]) + +assert_type(def_gen.integers(9223372036854775808, dtype="int64"), np.int64) +assert_type(def_gen.integers(-9223372036854775808, 9223372036854775808, dtype="int64"), np.int64) +assert_type(def_gen.integers(9223372036854775807, dtype="int64", endpoint=True), np.int64) +assert_type(def_gen.integers(-9223372036854775808, 9223372036854775807, dtype="int64", endpoint=True), np.int64) +assert_type(def_gen.integers(I_i8_low_like, 9223372036854775807, dtype="int64", endpoint=True), npt.NDArray[np.int64]) +assert_type(def_gen.integers(I_i8_high_open, dtype="int64"), npt.NDArray[np.int64]) +assert_type(def_gen.integers(I_i8_low, I_i8_high_open, dtype="int64"), npt.NDArray[np.int64]) +assert_type(def_gen.integers(-9223372036854775808, I_i8_high_open, dtype="int64"), npt.NDArray[np.int64]) +assert_type(def_gen.integers(I_i8_high_closed, dtype="int64", endpoint=True), npt.NDArray[np.int64]) +assert_type(def_gen.integers(I_i8_low, I_i8_high_closed, dtype="int64", endpoint=True), npt.NDArray[np.int64]) +assert_type(def_gen.integers(-9223372036854775808, I_i8_high_closed, dtype="int64", endpoint=True), npt.NDArray[np.int64]) + +assert_type(def_gen.integers(9223372036854775808, dtype=np.int64), np.int64) +assert_type(def_gen.integers(-9223372036854775808, 9223372036854775808, dtype=np.int64), np.int64) +assert_type(def_gen.integers(9223372036854775807, dtype=np.int64, endpoint=True), np.int64) +assert_type(def_gen.integers(-9223372036854775808, 9223372036854775807, dtype=np.int64, endpoint=True), np.int64) +assert_type(def_gen.integers(I_i8_low_like, 9223372036854775807, dtype=np.int64, endpoint=True), npt.NDArray[np.int64]) +assert_type(def_gen.integers(I_i8_high_open, dtype=np.int64), npt.NDArray[np.int64]) +assert_type(def_gen.integers(I_i8_low, I_i8_high_open, dtype=np.int64), npt.NDArray[np.int64]) +assert_type(def_gen.integers(-9223372036854775808, I_i8_high_open, dtype=np.int64), npt.NDArray[np.int64]) +assert_type(def_gen.integers(I_i8_high_closed, dtype=np.int64, endpoint=True), npt.NDArray[np.int64]) +assert_type(def_gen.integers(I_i8_low, I_i8_high_closed, dtype=np.int64, endpoint=True), npt.NDArray[np.int64]) +assert_type(def_gen.integers(-9223372036854775808, I_i8_high_closed, dtype=np.int64, endpoint=True), npt.NDArray[np.int64]) + +assert_type(def_gen.bit_generator, np.random.BitGenerator) + +assert_type(def_gen.bytes(2), bytes) + +assert_type(def_gen.choice(5), int) +assert_type(def_gen.choice(5, 3), npt.NDArray[np.int64]) +assert_type(def_gen.choice(5, 3, replace=True), npt.NDArray[np.int64]) +assert_type(def_gen.choice(5, 3, p=[1 / 5] * 5), npt.NDArray[np.int64]) +assert_type(def_gen.choice(5, 3, p=[1 / 5] * 5, replace=False), npt.NDArray[np.int64]) + +assert_type(def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"]), Any) +assert_type(def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"], 3), npt.NDArray[Any]) +assert_type(def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, p=[1 / 4] * 4), npt.NDArray[Any]) +assert_type(def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, replace=True), npt.NDArray[Any]) +assert_type(def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, replace=False, p=np.array([1 / 8, 1 / 8, 1 / 2, 1 / 4])), npt.NDArray[Any]) + +assert_type(def_gen.dirichlet([0.5, 0.5]), npt.NDArray[np.float64]) +assert_type(def_gen.dirichlet(np.array([0.5, 0.5])), npt.NDArray[np.float64]) +assert_type(def_gen.dirichlet(np.array([0.5, 0.5]), size=3), npt.NDArray[np.float64]) + +assert_type(def_gen.multinomial(20, [1 / 6.0] * 6), npt.NDArray[np.int64]) +assert_type(def_gen.multinomial(20, np.array([0.5, 0.5])), npt.NDArray[np.int64]) +assert_type(def_gen.multinomial(20, [1 / 6.0] * 6, size=2), npt.NDArray[np.int64]) +assert_type(def_gen.multinomial([[10], [20]], [1 / 6.0] * 6, size=(2, 2)), npt.NDArray[np.int64]) +assert_type(def_gen.multinomial(np.array([[10], [20]]), np.array([0.5, 0.5]), size=(2, 2)), npt.NDArray[np.int64]) + +assert_type(def_gen.multivariate_hypergeometric([3, 5, 7], 2), npt.NDArray[np.int64]) +assert_type(def_gen.multivariate_hypergeometric(np.array([3, 5, 7]), 2), npt.NDArray[np.int64]) +assert_type(def_gen.multivariate_hypergeometric(np.array([3, 5, 7]), 2, size=4), npt.NDArray[np.int64]) +assert_type(def_gen.multivariate_hypergeometric(np.array([3, 5, 7]), 2, size=(4, 7)), npt.NDArray[np.int64]) +assert_type(def_gen.multivariate_hypergeometric([3, 5, 7], 2, method="count"), npt.NDArray[np.int64]) +assert_type(def_gen.multivariate_hypergeometric(np.array([3, 5, 7]), 2, method="marginals"), npt.NDArray[np.int64]) + +assert_type(def_gen.multivariate_normal([0.0], [[1.0]]), npt.NDArray[np.float64]) +assert_type(def_gen.multivariate_normal([0.0], np.array([[1.0]])), npt.NDArray[np.float64]) +assert_type(def_gen.multivariate_normal(np.array([0.0]), [[1.0]]), npt.NDArray[np.float64]) +assert_type(def_gen.multivariate_normal([0.0], np.array([[1.0]])), npt.NDArray[np.float64]) + +assert_type(def_gen.permutation(10), npt.NDArray[np.int64]) +assert_type(def_gen.permutation([1, 2, 3, 4]), npt.NDArray[Any]) +assert_type(def_gen.permutation(np.array([1, 2, 3, 4])), npt.NDArray[Any]) +assert_type(def_gen.permutation(D_2D, axis=1), npt.NDArray[Any]) +assert_type(def_gen.permuted(D_2D), npt.NDArray[Any]) +assert_type(def_gen.permuted(D_2D_like), npt.NDArray[Any]) +assert_type(def_gen.permuted(D_2D, axis=1), npt.NDArray[Any]) +assert_type(def_gen.permuted(D_2D, out=D_2D), npt.NDArray[Any]) +assert_type(def_gen.permuted(D_2D_like, out=D_2D), npt.NDArray[Any]) +assert_type(def_gen.permuted(D_2D_like, out=D_2D), npt.NDArray[Any]) +assert_type(def_gen.permuted(D_2D, axis=1, out=D_2D), npt.NDArray[Any]) + +assert_type(def_gen.shuffle(np.arange(10)), None) +assert_type(def_gen.shuffle([1, 2, 3, 4, 5]), None) +assert_type(def_gen.shuffle(D_2D, axis=1), None) + +assert_type(np.random.Generator(pcg64), np.random.Generator) +assert_type(def_gen.__str__(), str) +assert_type(def_gen.__repr__(), str) +assert_type(def_gen.__setstate__(dict(def_gen.bit_generator.state)), None) + +# RandomState +random_st: np.random.RandomState = np.random.RandomState() + +assert_type(random_st.standard_normal(), float) +assert_type(random_st.standard_normal(size=None), float) +assert_type(random_st.standard_normal(size=1), npt.NDArray[np.float64]) + +assert_type(random_st.random(), float) +assert_type(random_st.random(size=None), float) +assert_type(random_st.random(size=1), npt.NDArray[np.float64]) + +assert_type(random_st.standard_cauchy(), float) +assert_type(random_st.standard_cauchy(size=None), float) +assert_type(random_st.standard_cauchy(size=1), npt.NDArray[np.float64]) + +assert_type(random_st.standard_exponential(), float) +assert_type(random_st.standard_exponential(size=None), float) +assert_type(random_st.standard_exponential(size=1), npt.NDArray[np.float64]) + +assert_type(random_st.zipf(1.5), int) +assert_type(random_st.zipf(1.5, size=None), int) +assert_type(random_st.zipf(1.5, size=1), npt.NDArray[np.long]) +assert_type(random_st.zipf(D_arr_1p5), npt.NDArray[np.long]) +assert_type(random_st.zipf(D_arr_1p5, size=1), npt.NDArray[np.long]) +assert_type(random_st.zipf(D_arr_like_1p5), npt.NDArray[np.long]) +assert_type(random_st.zipf(D_arr_like_1p5, size=1), npt.NDArray[np.long]) + +assert_type(random_st.weibull(0.5), float) +assert_type(random_st.weibull(0.5, size=None), float) +assert_type(random_st.weibull(0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.weibull(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.weibull(D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.weibull(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.weibull(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.standard_t(0.5), float) +assert_type(random_st.standard_t(0.5, size=None), float) +assert_type(random_st.standard_t(0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.standard_t(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.standard_t(D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.standard_t(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.standard_t(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.poisson(0.5), int) +assert_type(random_st.poisson(0.5, size=None), int) +assert_type(random_st.poisson(0.5, size=1), npt.NDArray[np.long]) +assert_type(random_st.poisson(D_arr_0p5), npt.NDArray[np.long]) +assert_type(random_st.poisson(D_arr_0p5, size=1), npt.NDArray[np.long]) +assert_type(random_st.poisson(D_arr_like_0p5), npt.NDArray[np.long]) +assert_type(random_st.poisson(D_arr_like_0p5, size=1), npt.NDArray[np.long]) + +assert_type(random_st.power(0.5), float) +assert_type(random_st.power(0.5, size=None), float) +assert_type(random_st.power(0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.power(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.power(D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.power(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.power(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.pareto(0.5), float) +assert_type(random_st.pareto(0.5, size=None), float) +assert_type(random_st.pareto(0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.pareto(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.pareto(D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.pareto(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.pareto(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.chisquare(0.5), float) +assert_type(random_st.chisquare(0.5, size=None), float) +assert_type(random_st.chisquare(0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.chisquare(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.chisquare(D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.chisquare(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.chisquare(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.exponential(0.5), float) +assert_type(random_st.exponential(0.5, size=None), float) +assert_type(random_st.exponential(0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.exponential(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.exponential(D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.exponential(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.exponential(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.geometric(0.5), int) +assert_type(random_st.geometric(0.5, size=None), int) +assert_type(random_st.geometric(0.5, size=1), npt.NDArray[np.long]) +assert_type(random_st.geometric(D_arr_0p5), npt.NDArray[np.long]) +assert_type(random_st.geometric(D_arr_0p5, size=1), npt.NDArray[np.long]) +assert_type(random_st.geometric(D_arr_like_0p5), npt.NDArray[np.long]) +assert_type(random_st.geometric(D_arr_like_0p5, size=1), npt.NDArray[np.long]) + +assert_type(random_st.logseries(0.5), int) +assert_type(random_st.logseries(0.5, size=None), int) +assert_type(random_st.logseries(0.5, size=1), npt.NDArray[np.long]) +assert_type(random_st.logseries(D_arr_0p5), npt.NDArray[np.long]) +assert_type(random_st.logseries(D_arr_0p5, size=1), npt.NDArray[np.long]) +assert_type(random_st.logseries(D_arr_like_0p5), npt.NDArray[np.long]) +assert_type(random_st.logseries(D_arr_like_0p5, size=1), npt.NDArray[np.long]) + +assert_type(random_st.rayleigh(0.5), float) +assert_type(random_st.rayleigh(0.5, size=None), float) +assert_type(random_st.rayleigh(0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.rayleigh(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.rayleigh(D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.rayleigh(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.rayleigh(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.standard_gamma(0.5), float) +assert_type(random_st.standard_gamma(0.5, size=None), float) +assert_type(random_st.standard_gamma(0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.standard_gamma(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.standard_gamma(D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.standard_gamma(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.standard_gamma(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.standard_gamma(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.vonmises(0.5, 0.5), float) +assert_type(random_st.vonmises(0.5, 0.5, size=None), float) +assert_type(random_st.vonmises(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.vonmises(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.vonmises(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.vonmises(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.vonmises(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.vonmises(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.vonmises(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.vonmises(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.vonmises(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.vonmises(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.vonmises(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.wald(0.5, 0.5), float) +assert_type(random_st.wald(0.5, 0.5, size=None), float) +assert_type(random_st.wald(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.wald(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.wald(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.wald(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.wald(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.wald(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.wald(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.wald(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.wald(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.wald(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.wald(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.uniform(0.5, 0.5), float) +assert_type(random_st.uniform(0.5, 0.5, size=None), float) +assert_type(random_st.uniform(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.uniform(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.uniform(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.uniform(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.uniform(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.uniform(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.uniform(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.uniform(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.uniform(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.uniform(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.uniform(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.beta(0.5, 0.5), float) +assert_type(random_st.beta(0.5, 0.5, size=None), float) +assert_type(random_st.beta(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.beta(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.beta(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.beta(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.beta(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.beta(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.beta(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.beta(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.beta(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.beta(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.beta(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.f(0.5, 0.5), float) +assert_type(random_st.f(0.5, 0.5, size=None), float) +assert_type(random_st.f(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.f(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.f(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.f(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.f(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.f(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.f(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.f(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.f(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.f(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.f(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.gamma(0.5, 0.5), float) +assert_type(random_st.gamma(0.5, 0.5, size=None), float) +assert_type(random_st.gamma(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.gamma(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.gamma(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.gamma(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.gamma(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.gamma(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.gamma(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.gamma(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.gamma(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.gamma(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.gamma(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.gumbel(0.5, 0.5), float) +assert_type(random_st.gumbel(0.5, 0.5, size=None), float) +assert_type(random_st.gumbel(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.gumbel(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.gumbel(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.gumbel(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.gumbel(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.gumbel(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.gumbel(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.gumbel(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.gumbel(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.gumbel(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.gumbel(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.laplace(0.5, 0.5), float) +assert_type(random_st.laplace(0.5, 0.5, size=None), float) +assert_type(random_st.laplace(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.laplace(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.laplace(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.laplace(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.laplace(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.laplace(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.laplace(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.laplace(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.laplace(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.laplace(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.laplace(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.logistic(0.5, 0.5), float) +assert_type(random_st.logistic(0.5, 0.5, size=None), float) +assert_type(random_st.logistic(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.logistic(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.logistic(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.logistic(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.logistic(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.logistic(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.logistic(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.logistic(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.logistic(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.logistic(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.logistic(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.lognormal(0.5, 0.5), float) +assert_type(random_st.lognormal(0.5, 0.5, size=None), float) +assert_type(random_st.lognormal(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.lognormal(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.lognormal(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.lognormal(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.lognormal(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.lognormal(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.lognormal(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.lognormal(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.lognormal(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.lognormal(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.lognormal(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.noncentral_chisquare(0.5, 0.5), float) +assert_type(random_st.noncentral_chisquare(0.5, 0.5, size=None), float) +assert_type(random_st.noncentral_chisquare(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_chisquare(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_chisquare(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_chisquare(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_chisquare(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_chisquare(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_chisquare(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_chisquare(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_chisquare(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_chisquare(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_chisquare(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.normal(0.5, 0.5), float) +assert_type(random_st.normal(0.5, 0.5, size=None), float) +assert_type(random_st.normal(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.normal(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.normal(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.normal(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.normal(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.normal(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.normal(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.normal(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.normal(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.normal(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.normal(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.triangular(0.1, 0.5, 0.9), float) +assert_type(random_st.triangular(0.1, 0.5, 0.9, size=None), float) +assert_type(random_st.triangular(0.1, 0.5, 0.9, size=1), npt.NDArray[np.float64]) +assert_type(random_st.triangular(D_arr_0p1, 0.5, 0.9), npt.NDArray[np.float64]) +assert_type(random_st.triangular(0.1, D_arr_0p5, 0.9), npt.NDArray[np.float64]) +assert_type(random_st.triangular(D_arr_0p1, 0.5, D_arr_like_0p9, size=1), npt.NDArray[np.float64]) +assert_type(random_st.triangular(0.1, D_arr_0p5, 0.9, size=1), npt.NDArray[np.float64]) +assert_type(random_st.triangular(D_arr_like_0p1, 0.5, D_arr_0p9), npt.NDArray[np.float64]) +assert_type(random_st.triangular(0.5, D_arr_like_0p5, 0.9), npt.NDArray[np.float64]) +assert_type(random_st.triangular(D_arr_0p1, D_arr_0p5, 0.9), npt.NDArray[np.float64]) +assert_type(random_st.triangular(D_arr_like_0p1, D_arr_like_0p5, 0.9), npt.NDArray[np.float64]) +assert_type(random_st.triangular(D_arr_0p1, D_arr_0p5, D_arr_0p9, size=1), npt.NDArray[np.float64]) +assert_type(random_st.triangular(D_arr_like_0p1, D_arr_like_0p5, D_arr_like_0p9, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.noncentral_f(0.1, 0.5, 0.9), float) +assert_type(random_st.noncentral_f(0.1, 0.5, 0.9, size=None), float) +assert_type(random_st.noncentral_f(0.1, 0.5, 0.9, size=1), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_f(D_arr_0p1, 0.5, 0.9), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_f(0.1, D_arr_0p5, 0.9), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_f(D_arr_0p1, 0.5, D_arr_like_0p9, size=1), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_f(0.1, D_arr_0p5, 0.9, size=1), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_f(D_arr_like_0p1, 0.5, D_arr_0p9), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_f(0.5, D_arr_like_0p5, 0.9), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_f(D_arr_0p1, D_arr_0p5, 0.9), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_f(D_arr_like_0p1, D_arr_like_0p5, 0.9), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_f(D_arr_0p1, D_arr_0p5, D_arr_0p9, size=1), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_f(D_arr_like_0p1, D_arr_like_0p5, D_arr_like_0p9, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.binomial(10, 0.5), int) +assert_type(random_st.binomial(10, 0.5, size=None), int) +assert_type(random_st.binomial(10, 0.5, size=1), npt.NDArray[np.long]) +assert_type(random_st.binomial(I_arr_10, 0.5), npt.NDArray[np.long]) +assert_type(random_st.binomial(10, D_arr_0p5), npt.NDArray[np.long]) +assert_type(random_st.binomial(I_arr_10, 0.5, size=1), npt.NDArray[np.long]) +assert_type(random_st.binomial(10, D_arr_0p5, size=1), npt.NDArray[np.long]) +assert_type(random_st.binomial(I_arr_like_10, 0.5), npt.NDArray[np.long]) +assert_type(random_st.binomial(10, D_arr_like_0p5), npt.NDArray[np.long]) +assert_type(random_st.binomial(I_arr_10, D_arr_0p5), npt.NDArray[np.long]) +assert_type(random_st.binomial(I_arr_like_10, D_arr_like_0p5), npt.NDArray[np.long]) +assert_type(random_st.binomial(I_arr_10, D_arr_0p5, size=1), npt.NDArray[np.long]) +assert_type(random_st.binomial(I_arr_like_10, D_arr_like_0p5, size=1), npt.NDArray[np.long]) + +assert_type(random_st.negative_binomial(10, 0.5), int) +assert_type(random_st.negative_binomial(10, 0.5, size=None), int) +assert_type(random_st.negative_binomial(10, 0.5, size=1), npt.NDArray[np.long]) +assert_type(random_st.negative_binomial(I_arr_10, 0.5), npt.NDArray[np.long]) +assert_type(random_st.negative_binomial(10, D_arr_0p5), npt.NDArray[np.long]) +assert_type(random_st.negative_binomial(I_arr_10, 0.5, size=1), npt.NDArray[np.long]) +assert_type(random_st.negative_binomial(10, D_arr_0p5, size=1), npt.NDArray[np.long]) +assert_type(random_st.negative_binomial(I_arr_like_10, 0.5), npt.NDArray[np.long]) +assert_type(random_st.negative_binomial(10, D_arr_like_0p5), npt.NDArray[np.long]) +assert_type(random_st.negative_binomial(I_arr_10, D_arr_0p5), npt.NDArray[np.long]) +assert_type(random_st.negative_binomial(I_arr_like_10, D_arr_like_0p5), npt.NDArray[np.long]) +assert_type(random_st.negative_binomial(I_arr_10, D_arr_0p5, size=1), npt.NDArray[np.long]) +assert_type(random_st.negative_binomial(I_arr_like_10, D_arr_like_0p5, size=1), npt.NDArray[np.long]) + +assert_type(random_st.hypergeometric(20, 20, 10), int) +assert_type(random_st.hypergeometric(20, 20, 10, size=None), int) +assert_type(random_st.hypergeometric(20, 20, 10, size=1), npt.NDArray[np.long]) +assert_type(random_st.hypergeometric(I_arr_20, 20, 10), npt.NDArray[np.long]) +assert_type(random_st.hypergeometric(20, I_arr_20, 10), npt.NDArray[np.long]) +assert_type(random_st.hypergeometric(I_arr_20, 20, I_arr_like_10, size=1), npt.NDArray[np.long]) +assert_type(random_st.hypergeometric(20, I_arr_20, 10, size=1), npt.NDArray[np.long]) +assert_type(random_st.hypergeometric(I_arr_like_20, 20, I_arr_10), npt.NDArray[np.long]) +assert_type(random_st.hypergeometric(20, I_arr_like_20, 10), npt.NDArray[np.long]) +assert_type(random_st.hypergeometric(I_arr_20, I_arr_20, 10), npt.NDArray[np.long]) +assert_type(random_st.hypergeometric(I_arr_like_20, I_arr_like_20, 10), npt.NDArray[np.long]) +assert_type(random_st.hypergeometric(I_arr_20, I_arr_20, I_arr_10, size=1), npt.NDArray[np.long]) +assert_type(random_st.hypergeometric(I_arr_like_20, I_arr_like_20, I_arr_like_10, size=1), npt.NDArray[np.long]) + +assert_type(random_st.randint(0, 100), int) +assert_type(random_st.randint(100), int) +assert_type(random_st.randint([100]), npt.NDArray[np.long]) +assert_type(random_st.randint(0, [100]), npt.NDArray[np.long]) + +assert_type(random_st.randint(2, dtype=bool), bool) +assert_type(random_st.randint(0, 2, dtype=bool), bool) +assert_type(random_st.randint(I_bool_high_open, dtype=bool), npt.NDArray[np.bool]) +assert_type(random_st.randint(I_bool_low, I_bool_high_open, dtype=bool), npt.NDArray[np.bool]) +assert_type(random_st.randint(0, I_bool_high_open, dtype=bool), npt.NDArray[np.bool]) + +assert_type(random_st.randint(2, dtype=np.bool), np.bool) +assert_type(random_st.randint(0, 2, dtype=np.bool), np.bool) +assert_type(random_st.randint(I_bool_high_open, dtype=np.bool), npt.NDArray[np.bool]) +assert_type(random_st.randint(I_bool_low, I_bool_high_open, dtype=np.bool), npt.NDArray[np.bool]) +assert_type(random_st.randint(0, I_bool_high_open, dtype=np.bool), npt.NDArray[np.bool]) + +assert_type(random_st.randint(256, dtype="u1"), np.uint8) +assert_type(random_st.randint(0, 256, dtype="u1"), np.uint8) +assert_type(random_st.randint(I_u1_high_open, dtype="u1"), npt.NDArray[np.uint8]) +assert_type(random_st.randint(I_u1_low, I_u1_high_open, dtype="u1"), npt.NDArray[np.uint8]) +assert_type(random_st.randint(0, I_u1_high_open, dtype="u1"), npt.NDArray[np.uint8]) + +assert_type(random_st.randint(256, dtype="uint8"), np.uint8) +assert_type(random_st.randint(0, 256, dtype="uint8"), np.uint8) +assert_type(random_st.randint(I_u1_high_open, dtype="uint8"), npt.NDArray[np.uint8]) +assert_type(random_st.randint(I_u1_low, I_u1_high_open, dtype="uint8"), npt.NDArray[np.uint8]) +assert_type(random_st.randint(0, I_u1_high_open, dtype="uint8"), npt.NDArray[np.uint8]) + +assert_type(random_st.randint(256, dtype=np.uint8), np.uint8) +assert_type(random_st.randint(0, 256, dtype=np.uint8), np.uint8) +assert_type(random_st.randint(I_u1_high_open, dtype=np.uint8), npt.NDArray[np.uint8]) +assert_type(random_st.randint(I_u1_low, I_u1_high_open, dtype=np.uint8), npt.NDArray[np.uint8]) +assert_type(random_st.randint(0, I_u1_high_open, dtype=np.uint8), npt.NDArray[np.uint8]) + +assert_type(random_st.randint(65536, dtype="u2"), np.uint16) +assert_type(random_st.randint(0, 65536, dtype="u2"), np.uint16) +assert_type(random_st.randint(I_u2_high_open, dtype="u2"), npt.NDArray[np.uint16]) +assert_type(random_st.randint(I_u2_low, I_u2_high_open, dtype="u2"), npt.NDArray[np.uint16]) +assert_type(random_st.randint(0, I_u2_high_open, dtype="u2"), npt.NDArray[np.uint16]) + +assert_type(random_st.randint(65536, dtype="uint16"), np.uint16) +assert_type(random_st.randint(0, 65536, dtype="uint16"), np.uint16) +assert_type(random_st.randint(I_u2_high_open, dtype="uint16"), npt.NDArray[np.uint16]) +assert_type(random_st.randint(I_u2_low, I_u2_high_open, dtype="uint16"), npt.NDArray[np.uint16]) +assert_type(random_st.randint(0, I_u2_high_open, dtype="uint16"), npt.NDArray[np.uint16]) + +assert_type(random_st.randint(65536, dtype=np.uint16), np.uint16) +assert_type(random_st.randint(0, 65536, dtype=np.uint16), np.uint16) +assert_type(random_st.randint(I_u2_high_open, dtype=np.uint16), npt.NDArray[np.uint16]) +assert_type(random_st.randint(I_u2_low, I_u2_high_open, dtype=np.uint16), npt.NDArray[np.uint16]) +assert_type(random_st.randint(0, I_u2_high_open, dtype=np.uint16), npt.NDArray[np.uint16]) + +assert_type(random_st.randint(4294967296, dtype="u4"), np.uint32) +assert_type(random_st.randint(0, 4294967296, dtype="u4"), np.uint32) +assert_type(random_st.randint(I_u4_high_open, dtype="u4"), npt.NDArray[np.uint32]) +assert_type(random_st.randint(I_u4_low, I_u4_high_open, dtype="u4"), npt.NDArray[np.uint32]) +assert_type(random_st.randint(0, I_u4_high_open, dtype="u4"), npt.NDArray[np.uint32]) + +assert_type(random_st.randint(4294967296, dtype="uint32"), np.uint32) +assert_type(random_st.randint(0, 4294967296, dtype="uint32"), np.uint32) +assert_type(random_st.randint(I_u4_high_open, dtype="uint32"), npt.NDArray[np.uint32]) +assert_type(random_st.randint(I_u4_low, I_u4_high_open, dtype="uint32"), npt.NDArray[np.uint32]) +assert_type(random_st.randint(0, I_u4_high_open, dtype="uint32"), npt.NDArray[np.uint32]) + +assert_type(random_st.randint(4294967296, dtype=np.uint32), np.uint32) +assert_type(random_st.randint(0, 4294967296, dtype=np.uint32), np.uint32) +assert_type(random_st.randint(I_u4_high_open, dtype=np.uint32), npt.NDArray[np.uint32]) +assert_type(random_st.randint(I_u4_low, I_u4_high_open, dtype=np.uint32), npt.NDArray[np.uint32]) +assert_type(random_st.randint(0, I_u4_high_open, dtype=np.uint32), npt.NDArray[np.uint32]) + +assert_type(random_st.randint(4294967296, dtype=np.uint), np.uint) +assert_type(random_st.randint(0, 4294967296, dtype=np.uint), np.uint) +assert_type(random_st.randint(I_u4_high_open, dtype=np.uint), npt.NDArray[np.uint]) +assert_type(random_st.randint(I_u4_low, I_u4_high_open, dtype=np.uint), npt.NDArray[np.uint]) +assert_type(random_st.randint(0, I_u4_high_open, dtype=np.uint), npt.NDArray[np.uint]) + +assert_type(random_st.randint(18446744073709551616, dtype="u8"), np.uint64) +assert_type(random_st.randint(0, 18446744073709551616, dtype="u8"), np.uint64) +assert_type(random_st.randint(I_u8_high_open, dtype="u8"), npt.NDArray[np.uint64]) +assert_type(random_st.randint(I_u8_low, I_u8_high_open, dtype="u8"), npt.NDArray[np.uint64]) +assert_type(random_st.randint(0, I_u8_high_open, dtype="u8"), npt.NDArray[np.uint64]) + +assert_type(random_st.randint(18446744073709551616, dtype="uint64"), np.uint64) +assert_type(random_st.randint(0, 18446744073709551616, dtype="uint64"), np.uint64) +assert_type(random_st.randint(I_u8_high_open, dtype="uint64"), npt.NDArray[np.uint64]) +assert_type(random_st.randint(I_u8_low, I_u8_high_open, dtype="uint64"), npt.NDArray[np.uint64]) +assert_type(random_st.randint(0, I_u8_high_open, dtype="uint64"), npt.NDArray[np.uint64]) + +assert_type(random_st.randint(18446744073709551616, dtype=np.uint64), np.uint64) +assert_type(random_st.randint(0, 18446744073709551616, dtype=np.uint64), np.uint64) +assert_type(random_st.randint(I_u8_high_open, dtype=np.uint64), npt.NDArray[np.uint64]) +assert_type(random_st.randint(I_u8_low, I_u8_high_open, dtype=np.uint64), npt.NDArray[np.uint64]) +assert_type(random_st.randint(0, I_u8_high_open, dtype=np.uint64), npt.NDArray[np.uint64]) + +assert_type(random_st.randint(128, dtype="i1"), np.int8) +assert_type(random_st.randint(-128, 128, dtype="i1"), np.int8) +assert_type(random_st.randint(I_i1_high_open, dtype="i1"), npt.NDArray[np.int8]) +assert_type(random_st.randint(I_i1_low, I_i1_high_open, dtype="i1"), npt.NDArray[np.int8]) +assert_type(random_st.randint(-128, I_i1_high_open, dtype="i1"), npt.NDArray[np.int8]) + +assert_type(random_st.randint(128, dtype="int8"), np.int8) +assert_type(random_st.randint(-128, 128, dtype="int8"), np.int8) +assert_type(random_st.randint(I_i1_high_open, dtype="int8"), npt.NDArray[np.int8]) +assert_type(random_st.randint(I_i1_low, I_i1_high_open, dtype="int8"), npt.NDArray[np.int8]) +assert_type(random_st.randint(-128, I_i1_high_open, dtype="int8"), npt.NDArray[np.int8]) + +assert_type(random_st.randint(128, dtype=np.int8), np.int8) +assert_type(random_st.randint(-128, 128, dtype=np.int8), np.int8) +assert_type(random_st.randint(I_i1_high_open, dtype=np.int8), npt.NDArray[np.int8]) +assert_type(random_st.randint(I_i1_low, I_i1_high_open, dtype=np.int8), npt.NDArray[np.int8]) +assert_type(random_st.randint(-128, I_i1_high_open, dtype=np.int8), npt.NDArray[np.int8]) + +assert_type(random_st.randint(32768, dtype="i2"), np.int16) +assert_type(random_st.randint(-32768, 32768, dtype="i2"), np.int16) +assert_type(random_st.randint(I_i2_high_open, dtype="i2"), npt.NDArray[np.int16]) +assert_type(random_st.randint(I_i2_low, I_i2_high_open, dtype="i2"), npt.NDArray[np.int16]) +assert_type(random_st.randint(-32768, I_i2_high_open, dtype="i2"), npt.NDArray[np.int16]) + +assert_type(random_st.randint(32768, dtype="int16"), np.int16) +assert_type(random_st.randint(-32768, 32768, dtype="int16"), np.int16) +assert_type(random_st.randint(I_i2_high_open, dtype="int16"), npt.NDArray[np.int16]) +assert_type(random_st.randint(I_i2_low, I_i2_high_open, dtype="int16"), npt.NDArray[np.int16]) +assert_type(random_st.randint(-32768, I_i2_high_open, dtype="int16"), npt.NDArray[np.int16]) + +assert_type(random_st.randint(32768, dtype=np.int16), np.int16) +assert_type(random_st.randint(-32768, 32768, dtype=np.int16), np.int16) +assert_type(random_st.randint(I_i2_high_open, dtype=np.int16), npt.NDArray[np.int16]) +assert_type(random_st.randint(I_i2_low, I_i2_high_open, dtype=np.int16), npt.NDArray[np.int16]) +assert_type(random_st.randint(-32768, I_i2_high_open, dtype=np.int16), npt.NDArray[np.int16]) + +assert_type(random_st.randint(2147483648, dtype="i4"), np.int32) +assert_type(random_st.randint(-2147483648, 2147483648, dtype="i4"), np.int32) +assert_type(random_st.randint(I_i4_high_open, dtype="i4"), npt.NDArray[np.int32]) +assert_type(random_st.randint(I_i4_low, I_i4_high_open, dtype="i4"), npt.NDArray[np.int32]) +assert_type(random_st.randint(-2147483648, I_i4_high_open, dtype="i4"), npt.NDArray[np.int32]) + +assert_type(random_st.randint(2147483648, dtype="int32"), np.int32) +assert_type(random_st.randint(-2147483648, 2147483648, dtype="int32"), np.int32) +assert_type(random_st.randint(I_i4_high_open, dtype="int32"), npt.NDArray[np.int32]) +assert_type(random_st.randint(I_i4_low, I_i4_high_open, dtype="int32"), npt.NDArray[np.int32]) +assert_type(random_st.randint(-2147483648, I_i4_high_open, dtype="int32"), npt.NDArray[np.int32]) + +assert_type(random_st.randint(2147483648, dtype=np.int32), np.int32) +assert_type(random_st.randint(-2147483648, 2147483648, dtype=np.int32), np.int32) +assert_type(random_st.randint(I_i4_high_open, dtype=np.int32), npt.NDArray[np.int32]) +assert_type(random_st.randint(I_i4_low, I_i4_high_open, dtype=np.int32), npt.NDArray[np.int32]) +assert_type(random_st.randint(-2147483648, I_i4_high_open, dtype=np.int32), npt.NDArray[np.int32]) + +assert_type(random_st.randint(2147483648, dtype=np.int_), np.int_) +assert_type(random_st.randint(-2147483648, 2147483648, dtype=np.int_), np.int_) +assert_type(random_st.randint(I_i4_high_open, dtype=np.int_), npt.NDArray[np.int_]) +assert_type(random_st.randint(I_i4_low, I_i4_high_open, dtype=np.int_), npt.NDArray[np.int_]) +assert_type(random_st.randint(-2147483648, I_i4_high_open, dtype=np.int_), npt.NDArray[np.int_]) + +assert_type(random_st.randint(9223372036854775808, dtype="i8"), np.int64) +assert_type(random_st.randint(-9223372036854775808, 9223372036854775808, dtype="i8"), np.int64) +assert_type(random_st.randint(I_i8_high_open, dtype="i8"), npt.NDArray[np.int64]) +assert_type(random_st.randint(I_i8_low, I_i8_high_open, dtype="i8"), npt.NDArray[np.int64]) +assert_type(random_st.randint(-9223372036854775808, I_i8_high_open, dtype="i8"), npt.NDArray[np.int64]) + +assert_type(random_st.randint(9223372036854775808, dtype="int64"), np.int64) +assert_type(random_st.randint(-9223372036854775808, 9223372036854775808, dtype="int64"), np.int64) +assert_type(random_st.randint(I_i8_high_open, dtype="int64"), npt.NDArray[np.int64]) +assert_type(random_st.randint(I_i8_low, I_i8_high_open, dtype="int64"), npt.NDArray[np.int64]) +assert_type(random_st.randint(-9223372036854775808, I_i8_high_open, dtype="int64"), npt.NDArray[np.int64]) + +assert_type(random_st.randint(9223372036854775808, dtype=np.int64), np.int64) +assert_type(random_st.randint(-9223372036854775808, 9223372036854775808, dtype=np.int64), np.int64) +assert_type(random_st.randint(I_i8_high_open, dtype=np.int64), npt.NDArray[np.int64]) +assert_type(random_st.randint(I_i8_low, I_i8_high_open, dtype=np.int64), npt.NDArray[np.int64]) +assert_type(random_st.randint(-9223372036854775808, I_i8_high_open, dtype=np.int64), npt.NDArray[np.int64]) + +assert_type(random_st._bit_generator, np.random.BitGenerator) + +assert_type(random_st.bytes(2), bytes) + +assert_type(random_st.choice(5), int) +assert_type(random_st.choice(5, 3), npt.NDArray[np.long]) +assert_type(random_st.choice(5, 3, replace=True), npt.NDArray[np.long]) +assert_type(random_st.choice(5, 3, p=[1 / 5] * 5), npt.NDArray[np.long]) +assert_type(random_st.choice(5, 3, p=[1 / 5] * 5, replace=False), npt.NDArray[np.long]) + +assert_type(random_st.choice(["pooh", "rabbit", "piglet", "Christopher"]), Any) +assert_type(random_st.choice(["pooh", "rabbit", "piglet", "Christopher"], 3), npt.NDArray[Any]) +assert_type(random_st.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, p=[1 / 4] * 4), npt.NDArray[Any]) +assert_type(random_st.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, replace=True), npt.NDArray[Any]) +assert_type(random_st.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, replace=False, p=np.array([1 / 8, 1 / 8, 1 / 2, 1 / 4])), npt.NDArray[Any]) + +assert_type(random_st.dirichlet([0.5, 0.5]), npt.NDArray[np.float64]) +assert_type(random_st.dirichlet(np.array([0.5, 0.5])), npt.NDArray[np.float64]) +assert_type(random_st.dirichlet(np.array([0.5, 0.5]), size=3), npt.NDArray[np.float64]) + +assert_type(random_st.multinomial(20, [1 / 6.0] * 6), npt.NDArray[np.long]) +assert_type(random_st.multinomial(20, np.array([0.5, 0.5])), npt.NDArray[np.long]) +assert_type(random_st.multinomial(20, [1 / 6.0] * 6, size=2), npt.NDArray[np.long]) + +assert_type(random_st.multivariate_normal([0.0], [[1.0]]), npt.NDArray[np.float64]) +assert_type(random_st.multivariate_normal([0.0], np.array([[1.0]])), npt.NDArray[np.float64]) +assert_type(random_st.multivariate_normal(np.array([0.0]), [[1.0]]), npt.NDArray[np.float64]) +assert_type(random_st.multivariate_normal([0.0], np.array([[1.0]])), npt.NDArray[np.float64]) + +assert_type(random_st.permutation(10), npt.NDArray[np.long]) +assert_type(random_st.permutation([1, 2, 3, 4]), npt.NDArray[Any]) +assert_type(random_st.permutation(np.array([1, 2, 3, 4])), npt.NDArray[Any]) +assert_type(random_st.permutation(D_2D), npt.NDArray[Any]) + +assert_type(random_st.shuffle(np.arange(10)), None) +assert_type(random_st.shuffle([1, 2, 3, 4, 5]), None) +assert_type(random_st.shuffle(D_2D), None) + +assert_type(np.random.RandomState(pcg64), np.random.RandomState) +assert_type(np.random.RandomState(0), np.random.RandomState) +assert_type(np.random.RandomState([0, 1, 2]), np.random.RandomState) +assert_type(random_st.__str__(), str) +assert_type(random_st.__repr__(), str) +random_st_state = random_st.__getstate__() +assert_type(random_st_state, dict[str, Any]) +assert_type(random_st.__setstate__(random_st_state), None) +assert_type(random_st.seed(), None) +assert_type(random_st.seed(1), None) +assert_type(random_st.seed([0, 1]), None) +random_st_get_state = random_st.get_state() +assert_type(random_st_state, dict[str, Any]) +random_st_get_state_legacy = random_st.get_state(legacy=True) +assert_type(random_st_get_state_legacy, dict[str, Any] | tuple[str, npt.NDArray[np.uint32], int, int, float]) +assert_type(random_st.set_state(random_st_get_state), None) + +assert_type(random_st.rand(), float) +assert_type(random_st.rand(1), npt.NDArray[np.float64]) +assert_type(random_st.rand(1, 2), npt.NDArray[np.float64]) +assert_type(random_st.randn(), float) +assert_type(random_st.randn(1), npt.NDArray[np.float64]) +assert_type(random_st.randn(1, 2), npt.NDArray[np.float64]) +assert_type(random_st.random_sample(), float) +assert_type(random_st.random_sample(1), npt.NDArray[np.float64]) +assert_type(random_st.random_sample(size=(1, 2)), npt.NDArray[np.float64]) + +assert_type(random_st.tomaxint(), int) +assert_type(random_st.tomaxint(1), npt.NDArray[np.int64]) +assert_type(random_st.tomaxint((1,)), npt.NDArray[np.int64]) + +assert_type(np.random.mtrand.set_bit_generator(pcg64), None) +assert_type(np.random.mtrand.get_bit_generator(), np.random.BitGenerator) diff --git a/local_packages/numpy/typing/tests/data/reveal/rec.pyi b/local_packages/numpy/typing/tests/data/reveal/rec.pyi new file mode 100644 index 0000000..aacf217 --- /dev/null +++ b/local_packages/numpy/typing/tests/data/reveal/rec.pyi @@ -0,0 +1,171 @@ +import io +from typing import Any, TypeAlias, assert_type + +import numpy as np +import numpy.typing as npt + +_RecArray: TypeAlias = np.recarray[tuple[Any, ...], np.dtype[np.record]] + +AR_i8: npt.NDArray[np.int64] +REC_AR_V: _RecArray +AR_LIST: list[npt.NDArray[np.int64]] + +record: np.record +file_obj: io.BufferedIOBase + +assert_type(np.rec.format_parser( + formats=[np.float64, np.int64, np.bool], + names=["f8", "i8", "?"], + titles=None, + aligned=True, +), np.rec.format_parser) +assert_type(np.rec.format_parser.dtype, np.dtype[np.void]) + +assert_type(record.field_a, Any) +assert_type(record.field_b, Any) +assert_type(record["field_a"], Any) +assert_type(record["field_b"], Any) +assert_type(record.pprint(), str) +record.field_c = 5 + +assert_type(REC_AR_V.field(0), Any) +assert_type(REC_AR_V.field("field_a"), Any) +assert_type(REC_AR_V.field(0, AR_i8), None) +assert_type(REC_AR_V.field("field_a", AR_i8), None) +assert_type(REC_AR_V["field_a"], npt.NDArray[Any]) +assert_type(REC_AR_V.field_a, Any) +assert_type(REC_AR_V.__array_finalize__(object()), None) + +assert_type( + np.recarray( + shape=(10, 5), + formats=[np.float64, np.int64, np.bool], + order="K", + byteorder="|", + ), + _RecArray, +) + +assert_type( + np.recarray( + shape=(10, 5), + dtype=[("f8", np.float64), ("i8", np.int64)], + strides=(5, 5), + ), + np.recarray, +) + +assert_type(np.rec.fromarrays(AR_LIST), np.recarray) +assert_type( + np.rec.fromarrays(AR_LIST, dtype=np.int64), + np.recarray, +) +assert_type( + np.rec.fromarrays( + AR_LIST, + formats=[np.int64, np.float64], + names=["i8", "f8"] + ), + _RecArray, +) + +assert_type( + np.rec.fromrecords((1, 1.5)), + _RecArray +) + +assert_type( + np.rec.fromrecords( + [(1, 1.5)], + dtype=[("i8", np.int64), ("f8", np.float64)], + ), + _RecArray, +) + +assert_type( + np.rec.fromrecords( + REC_AR_V, + formats=[np.int64, np.float64], + names=["i8", "f8"] + ), + _RecArray, +) + +assert_type( + np.rec.fromstring( + b"(1, 1.5)", + dtype=[("i8", np.int64), ("f8", np.float64)], + ), + _RecArray, +) + +assert_type( + np.rec.fromstring( + REC_AR_V, + formats=[np.int64, np.float64], + names=["i8", "f8"] + ), + _RecArray, +) + +assert_type( + np.rec.fromfile( + "test_file.txt", + dtype=[("i8", np.int64), ("f8", np.float64)], + ), + np.recarray, +) + +assert_type( + np.rec.fromfile( + file_obj, + formats=[np.int64, np.float64], + names=["i8", "f8"] + ), + _RecArray, +) + +assert_type(np.rec.array(AR_i8), np.recarray[tuple[Any, ...], np.dtype[np.int64]]) + +assert_type( + np.rec.array([(1, 1.5)], dtype=[("i8", np.int64), ("f8", np.float64)]), + np.recarray, +) + +assert_type( + np.rec.array( + [(1, 1.5)], + formats=[np.int64, np.float64], + names=["i8", "f8"] + ), + _RecArray, +) + +assert_type( + np.rec.array( + None, + dtype=np.float64, + shape=(10, 3), + ), + np.recarray, +) + +assert_type( + np.rec.array( + None, + formats=[np.int64, np.float64], + names=["i8", "f8"], + shape=(10, 3), + ), + _RecArray, +) + +assert_type( + np.rec.array(file_obj, dtype=np.float64), + np.recarray, +) + +assert_type( + np.rec.array(file_obj, formats=[np.int64, np.float64], names=["i8", "f8"]), + _RecArray, +) diff --git a/local_packages/numpy/typing/tests/data/reveal/scalars.pyi b/local_packages/numpy/typing/tests/data/reveal/scalars.pyi new file mode 100644 index 0000000..67444e3 --- /dev/null +++ b/local_packages/numpy/typing/tests/data/reveal/scalars.pyi @@ -0,0 +1,191 @@ +from typing import Any, Literal, TypeAlias, assert_type + +import numpy as np + +_1: TypeAlias = Literal[1] + +b: np.bool +u8: np.uint64 +i8: np.int64 +f8: np.float64 +c8: np.complex64 +c16: np.complex128 +m: np.timedelta64 +U: np.str_ +S: np.bytes_ +V: np.void +O: np.object_ # cannot exists at runtime + +array_nd: np.ndarray[Any, Any] +array_0d: np.ndarray[tuple[()], Any] +array_2d_2x2: np.ndarray[tuple[Literal[2], Literal[2]], Any] + +assert_type(c8.real, np.float32) +assert_type(c8.imag, np.float32) + +assert_type(c8.real.real, np.float32) +assert_type(c8.real.imag, np.float32) + +assert_type(c8.itemsize, int) +assert_type(c8.shape, tuple[()]) +assert_type(c8.strides, tuple[()]) + +assert_type(c8.ndim, Literal[0]) +assert_type(c8.size, Literal[1]) + +assert_type(c8.squeeze(), np.complex64) +assert_type(c8.byteswap(), np.complex64) +assert_type(c8.transpose(), np.complex64) + +assert_type(c8.dtype, np.dtype[np.complex64]) + +assert_type(c8.real, np.float32) +assert_type(c16.imag, np.float64) + +assert_type(np.str_("foo"), np.str_) + +assert_type(V[0], Any) +assert_type(V["field1"], Any) +assert_type(V[["field1", "field2"]], np.void) +V[0] = 5 + +# Aliases +assert_type(np.bool_(), np.bool[Literal[False]]) +assert_type(np.byte(), np.byte) +assert_type(np.short(), np.short) +assert_type(np.intc(), np.intc) +assert_type(np.intp(), np.intp) +assert_type(np.int_(), np.int_) +assert_type(np.long(), np.long) +assert_type(np.longlong(), np.longlong) + +assert_type(np.ubyte(), np.ubyte) +assert_type(np.ushort(), np.ushort) +assert_type(np.uintc(), np.uintc) +assert_type(np.uintp(), np.uintp) +assert_type(np.uint(), np.uint) +assert_type(np.ulong(), np.ulong) +assert_type(np.ulonglong(), np.ulonglong) + +assert_type(np.half(), np.half) +assert_type(np.single(), np.single) +assert_type(np.double(), np.double) +assert_type(np.longdouble(), np.longdouble) + +assert_type(np.csingle(), np.csingle) +assert_type(np.cdouble(), np.cdouble) +assert_type(np.clongdouble(), np.clongdouble) + +assert_type(b.item(), bool) +assert_type(i8.item(), int) +assert_type(u8.item(), int) +assert_type(f8.item(), float) +assert_type(c16.item(), complex) +assert_type(U.item(), str) +assert_type(S.item(), bytes) + +assert_type(b.tolist(), bool) +assert_type(i8.tolist(), int) +assert_type(u8.tolist(), int) +assert_type(f8.tolist(), float) +assert_type(c16.tolist(), complex) +assert_type(U.tolist(), str) +assert_type(S.tolist(), bytes) + +assert_type(b.ravel(), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(i8.ravel(), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(u8.ravel(), np.ndarray[tuple[int], np.dtype[np.uint64]]) +assert_type(f8.ravel(), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(c16.ravel(), np.ndarray[tuple[int], np.dtype[np.complex128]]) +assert_type(U.ravel(), np.ndarray[tuple[int], np.dtype[np.str_]]) +assert_type(S.ravel(), np.ndarray[tuple[int], np.dtype[np.bytes_]]) + +assert_type(b.flatten(), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(i8.flatten(), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(u8.flatten(), np.ndarray[tuple[int], np.dtype[np.uint64]]) +assert_type(f8.flatten(), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(c16.flatten(), np.ndarray[tuple[int], np.dtype[np.complex128]]) +assert_type(U.flatten(), np.ndarray[tuple[int], np.dtype[np.str_]]) +assert_type(S.flatten(), np.ndarray[tuple[int], np.dtype[np.bytes_]]) + +assert_type(b.reshape(()), np.bool) +assert_type(i8.reshape([]), np.int64) +assert_type(b.reshape(1), np.ndarray[tuple[_1], np.dtype[np.bool]]) +assert_type(i8.reshape(-1), np.ndarray[tuple[_1], np.dtype[np.int64]]) +assert_type(u8.reshape(1, 1), np.ndarray[tuple[_1, _1], np.dtype[np.uint64]]) +assert_type(f8.reshape(1, -1), np.ndarray[tuple[_1, _1], np.dtype[np.float64]]) +assert_type(c16.reshape(1, 1, 1), np.ndarray[tuple[_1, _1, _1], np.dtype[np.complex128]]) +assert_type(U.reshape(1, 1, 1, 1), np.ndarray[tuple[_1, _1, _1, _1], np.dtype[np.str_]]) +assert_type( + S.reshape(1, 1, 1, 1, 1), + np.ndarray[ + # len(shape) >= 5 + tuple[_1, _1, _1, _1, _1, *tuple[_1, ...]], + np.dtype[np.bytes_], + ], +) + +assert_type(i8.astype(float), Any) +assert_type(i8.astype(np.float64), np.float64) + +assert_type(i8.view(), np.int64) +assert_type(i8.view(np.float64), np.float64) +assert_type(i8.view(float), Any) +assert_type(i8.view(np.float64, np.ndarray), np.float64) + +assert_type(i8.getfield(float), Any) +assert_type(i8.getfield(np.float64), np.float64) +assert_type(i8.getfield(np.float64, 8), np.float64) + +assert_type(f8.as_integer_ratio(), tuple[int, int]) +assert_type(f8.is_integer(), bool) +assert_type(f8.__trunc__(), int) +assert_type(f8.__getformat__("float"), str) +assert_type(f8.hex(), str) +assert_type(np.float64.fromhex("0x0.0p+0"), np.float64) + +assert_type(f8.__getnewargs__(), tuple[float]) +assert_type(c16.__getnewargs__(), tuple[float, float]) + +assert_type(i8.numerator, np.int64) +assert_type(i8.denominator, Literal[1]) +assert_type(u8.numerator, np.uint64) +assert_type(u8.denominator, Literal[1]) +assert_type(m.numerator, np.timedelta64) +assert_type(m.denominator, Literal[1]) + +assert_type(round(i8), int) +assert_type(round(i8, 3), np.int64) +assert_type(round(u8), int) +assert_type(round(u8, 3), np.uint64) +assert_type(round(f8), int) +assert_type(round(f8, 3), np.float64) + +assert_type(f8.__ceil__(), int) +assert_type(f8.__floor__(), int) + +assert_type(i8.is_integer(), Literal[True]) + +assert_type(O.real, np.object_) +assert_type(O.imag, np.object_) +assert_type(int(O), int) +assert_type(float(O), float) +assert_type(complex(O), complex) + +# These fail fail because of a mypy __new__ bug: +# https://github.com/python/mypy/issues/15182 +# According to the typing spec, the following statements are valid, see +# https://typing.readthedocs.io/en/latest/spec/constructors.html#new-method + +# assert_type(np.object_(), None) +# assert_type(np.object_(None), None) +# assert_type(np.object_(array_nd), np.ndarray[Any, np.dtype[np.object_]]) +# assert_type(np.object_([]), npt.NDArray[np.object_]) +# assert_type(np.object_(()), npt.NDArray[np.object_]) +# assert_type(np.object_(range(4)), npt.NDArray[np.object_]) +# assert_type(np.object_(+42), int) +# assert_type(np.object_(1 / 137), float) +# assert_type(np.object_('Developers! ' * (1 << 6)), str) +# assert_type(np.object_(object()), object) +# assert_type(np.object_({False, True, NotADirectoryError}), set[Any]) +# assert_type(np.object_({'spam': 'food', 'ham': 'food'}), dict[str, str]) diff --git a/local_packages/numpy/typing/tests/data/reveal/shape.pyi b/local_packages/numpy/typing/tests/data/reveal/shape.pyi new file mode 100644 index 0000000..2406a39 --- /dev/null +++ b/local_packages/numpy/typing/tests/data/reveal/shape.pyi @@ -0,0 +1,13 @@ +from typing import Any, NamedTuple, assert_type + +import numpy as np + +# Subtype of tuple[int, int] +class XYGrid(NamedTuple): + x_axis: int + y_axis: int + +arr: np.ndarray[XYGrid, Any] + +# Test shape property matches shape typevar +assert_type(arr.shape, XYGrid) diff --git a/local_packages/numpy/typing/tests/data/reveal/shape_base.pyi b/local_packages/numpy/typing/tests/data/reveal/shape_base.pyi new file mode 100644 index 0000000..e409a53 --- /dev/null +++ b/local_packages/numpy/typing/tests/data/reveal/shape_base.pyi @@ -0,0 +1,52 @@ +from typing import Any, assert_type + +import numpy as np +import numpy.typing as npt + +i8: np.int64 +f8: np.float64 + +AR_b: npt.NDArray[np.bool] +AR_i8: npt.NDArray[np.int64] +AR_f8: npt.NDArray[np.float64] + +AR_LIKE_f8: list[float] + +assert_type(np.take_along_axis(AR_f8, AR_i8, axis=1), npt.NDArray[np.float64]) +assert_type(np.take_along_axis(f8, AR_i8, axis=None), npt.NDArray[np.float64]) + +assert_type(np.put_along_axis(AR_f8, AR_i8, "1.0", axis=1), None) + +assert_type(np.expand_dims(AR_i8, 2), npt.NDArray[np.int64]) +assert_type(np.expand_dims(AR_LIKE_f8, 2), npt.NDArray[Any]) + +assert_type(np.column_stack([AR_i8]), npt.NDArray[np.int64]) +assert_type(np.column_stack([AR_LIKE_f8]), npt.NDArray[Any]) + +assert_type(np.dstack([AR_i8]), npt.NDArray[np.int64]) +assert_type(np.dstack([AR_LIKE_f8]), npt.NDArray[Any]) + +assert_type(np.array_split(AR_i8, [3, 5, 6, 10]), list[npt.NDArray[np.int64]]) +assert_type(np.array_split(AR_LIKE_f8, [3, 5, 6, 10]), list[npt.NDArray[Any]]) + +assert_type(np.split(AR_i8, [3, 5, 6, 10]), list[npt.NDArray[np.int64]]) +assert_type(np.split(AR_LIKE_f8, [3, 5, 6, 10]), list[npt.NDArray[Any]]) + +assert_type(np.hsplit(AR_i8, [3, 5, 6, 10]), list[npt.NDArray[np.int64]]) +assert_type(np.hsplit(AR_LIKE_f8, [3, 5, 6, 10]), list[npt.NDArray[Any]]) + +assert_type(np.vsplit(AR_i8, [3, 5, 6, 10]), list[npt.NDArray[np.int64]]) +assert_type(np.vsplit(AR_LIKE_f8, [3, 5, 6, 10]), list[npt.NDArray[Any]]) + +assert_type(np.dsplit(AR_i8, [3, 5, 6, 10]), list[npt.NDArray[np.int64]]) +assert_type(np.dsplit(AR_LIKE_f8, [3, 5, 6, 10]), list[npt.NDArray[Any]]) + +assert_type(np.kron(AR_b, AR_b), npt.NDArray[np.bool]) +assert_type(np.kron(AR_b, AR_i8), npt.NDArray[np.signedinteger]) +assert_type(np.kron(AR_f8, AR_f8), npt.NDArray[np.floating]) + +assert_type(np.tile(AR_i8, 5), npt.NDArray[np.int64]) +assert_type(np.tile(AR_LIKE_f8, [2, 2]), npt.NDArray[Any]) + +assert_type(np.unstack(AR_i8, axis=0), tuple[npt.NDArray[np.int64], ...]) +assert_type(np.unstack(AR_LIKE_f8, axis=0), tuple[npt.NDArray[Any], ...]) diff --git a/local_packages/numpy/typing/tests/data/reveal/stride_tricks.pyi b/local_packages/numpy/typing/tests/data/reveal/stride_tricks.pyi new file mode 100644 index 0000000..8fde9b8 --- /dev/null +++ b/local_packages/numpy/typing/tests/data/reveal/stride_tricks.pyi @@ -0,0 +1,27 @@ +from typing import Any, assert_type + +import numpy as np +import numpy.typing as npt + +AR_f8: npt.NDArray[np.float64] +AR_LIKE_f: list[float] +interface_dict: dict[str, Any] + +assert_type(np.lib.stride_tricks.as_strided(AR_f8), npt.NDArray[np.float64]) +assert_type(np.lib.stride_tricks.as_strided(AR_LIKE_f), npt.NDArray[Any]) +assert_type(np.lib.stride_tricks.as_strided(AR_f8, strides=(1, 5)), npt.NDArray[np.float64]) +assert_type(np.lib.stride_tricks.as_strided(AR_f8, shape=[9, 20]), npt.NDArray[np.float64]) + +assert_type(np.lib.stride_tricks.sliding_window_view(AR_f8, 5), npt.NDArray[np.float64]) +assert_type(np.lib.stride_tricks.sliding_window_view(AR_LIKE_f, (1, 5)), npt.NDArray[Any]) +assert_type(np.lib.stride_tricks.sliding_window_view(AR_f8, [9], axis=1), npt.NDArray[np.float64]) + +assert_type(np.broadcast_to(AR_f8, 5), npt.NDArray[np.float64]) +assert_type(np.broadcast_to(AR_LIKE_f, (1, 5)), npt.NDArray[Any]) +assert_type(np.broadcast_to(AR_f8, [4, 6], subok=True), npt.NDArray[np.float64]) + +assert_type(np.broadcast_shapes((1, 2), [3, 1], (3, 2)), tuple[Any, ...]) +assert_type(np.broadcast_shapes((6, 7), (5, 6, 1), 7, (5, 1, 7)), tuple[Any, ...]) + +assert_type(np.broadcast_arrays(AR_f8, AR_f8), tuple[npt.NDArray[Any], ...]) +assert_type(np.broadcast_arrays(AR_f8, AR_LIKE_f), tuple[npt.NDArray[Any], ...]) diff --git a/local_packages/numpy/typing/tests/data/reveal/strings.pyi b/local_packages/numpy/typing/tests/data/reveal/strings.pyi new file mode 100644 index 0000000..18bd252 --- /dev/null +++ b/local_packages/numpy/typing/tests/data/reveal/strings.pyi @@ -0,0 +1,196 @@ +from typing import TypeAlias, assert_type + +import numpy as np +import numpy._typing as np_t +import numpy.typing as npt + +AR_T_alias: TypeAlias = np.ndarray[np_t._AnyShape, np.dtypes.StringDType] +AR_TU_alias: TypeAlias = AR_T_alias | npt.NDArray[np.str_] + +AR_U: npt.NDArray[np.str_] +AR_S: npt.NDArray[np.bytes_] +AR_T: AR_T_alias + +assert_type(np.strings.equal(AR_U, AR_U), npt.NDArray[np.bool]) +assert_type(np.strings.equal(AR_S, AR_S), npt.NDArray[np.bool]) +assert_type(np.strings.equal(AR_T, AR_T), npt.NDArray[np.bool]) + +assert_type(np.strings.not_equal(AR_U, AR_U), npt.NDArray[np.bool]) +assert_type(np.strings.not_equal(AR_S, AR_S), npt.NDArray[np.bool]) +assert_type(np.strings.not_equal(AR_T, AR_T), npt.NDArray[np.bool]) + +assert_type(np.strings.greater_equal(AR_U, AR_U), npt.NDArray[np.bool]) +assert_type(np.strings.greater_equal(AR_S, AR_S), npt.NDArray[np.bool]) +assert_type(np.strings.greater_equal(AR_T, AR_T), npt.NDArray[np.bool]) + +assert_type(np.strings.less_equal(AR_U, AR_U), npt.NDArray[np.bool]) +assert_type(np.strings.less_equal(AR_S, AR_S), npt.NDArray[np.bool]) +assert_type(np.strings.less_equal(AR_T, AR_T), npt.NDArray[np.bool]) + +assert_type(np.strings.greater(AR_U, AR_U), npt.NDArray[np.bool]) +assert_type(np.strings.greater(AR_S, AR_S), npt.NDArray[np.bool]) +assert_type(np.strings.greater(AR_T, AR_T), npt.NDArray[np.bool]) + +assert_type(np.strings.less(AR_U, AR_U), npt.NDArray[np.bool]) +assert_type(np.strings.less(AR_S, AR_S), npt.NDArray[np.bool]) +assert_type(np.strings.less(AR_T, AR_T), npt.NDArray[np.bool]) + +assert_type(np.strings.add(AR_U, AR_U), npt.NDArray[np.str_]) +assert_type(np.strings.add(AR_S, AR_S), npt.NDArray[np.bytes_]) +assert_type(np.strings.add(AR_T, AR_T), AR_T_alias) + +assert_type(np.strings.multiply(AR_U, 5), npt.NDArray[np.str_]) +assert_type(np.strings.multiply(AR_S, [5, 4, 3]), npt.NDArray[np.bytes_]) +assert_type(np.strings.multiply(AR_T, 5), AR_T_alias) + +assert_type(np.strings.mod(AR_U, "test"), npt.NDArray[np.str_]) +assert_type(np.strings.mod(AR_S, "test"), npt.NDArray[np.bytes_]) +assert_type(np.strings.mod(AR_T, "test"), AR_T_alias) + +assert_type(np.strings.capitalize(AR_U), npt.NDArray[np.str_]) +assert_type(np.strings.capitalize(AR_S), npt.NDArray[np.bytes_]) +assert_type(np.strings.capitalize(AR_T), AR_T_alias) + +assert_type(np.strings.center(AR_U, 5), npt.NDArray[np.str_]) +assert_type(np.strings.center(AR_S, [2, 3, 4], b"a"), npt.NDArray[np.bytes_]) +assert_type(np.strings.center(AR_T, 5), AR_T_alias) + +assert_type(np.strings.encode(AR_U), npt.NDArray[np.bytes_]) +assert_type(np.strings.encode(AR_T), npt.NDArray[np.bytes_]) +assert_type(np.strings.decode(AR_S), npt.NDArray[np.str_]) + +assert_type(np.strings.expandtabs(AR_U), npt.NDArray[np.str_]) +assert_type(np.strings.expandtabs(AR_S, tabsize=4), npt.NDArray[np.bytes_]) +assert_type(np.strings.expandtabs(AR_T), AR_T_alias) + +assert_type(np.strings.ljust(AR_U, 5), npt.NDArray[np.str_]) +assert_type(np.strings.ljust(AR_S, [4, 3, 1], fillchar=[b"a", b"b", b"c"]), npt.NDArray[np.bytes_]) +assert_type(np.strings.ljust(AR_T, 5), AR_T_alias) +assert_type(np.strings.ljust(AR_T, [4, 2, 1], fillchar=["a", "b", "c"]), AR_T_alias) + +assert_type(np.strings.rjust(AR_U, 5), npt.NDArray[np.str_]) +assert_type(np.strings.rjust(AR_S, [4, 3, 1], fillchar=[b"a", b"b", b"c"]), npt.NDArray[np.bytes_]) +assert_type(np.strings.rjust(AR_T, 5), AR_T_alias) +assert_type(np.strings.rjust(AR_T, [4, 2, 1], fillchar=["a", "b", "c"]), AR_T_alias) + +assert_type(np.strings.lstrip(AR_U), npt.NDArray[np.str_]) +assert_type(np.strings.lstrip(AR_S, b"_"), npt.NDArray[np.bytes_]) +assert_type(np.strings.lstrip(AR_T), AR_T_alias) +assert_type(np.strings.lstrip(AR_T, "_"), AR_T_alias) + +assert_type(np.strings.rstrip(AR_U), npt.NDArray[np.str_]) +assert_type(np.strings.rstrip(AR_S, b"_"), npt.NDArray[np.bytes_]) +assert_type(np.strings.rstrip(AR_T), AR_T_alias) +assert_type(np.strings.rstrip(AR_T, "_"), AR_T_alias) + +assert_type(np.strings.strip(AR_U), npt.NDArray[np.str_]) +assert_type(np.strings.strip(AR_S, b"_"), npt.NDArray[np.bytes_]) +assert_type(np.strings.strip(AR_T), AR_T_alias) +assert_type(np.strings.strip(AR_T, "_"), AR_T_alias) + +assert_type(np.strings.count(AR_U, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) +assert_type(np.strings.count(AR_S, [b"a", b"b", b"c"], end=9), npt.NDArray[np.int_]) +assert_type(np.strings.count(AR_T, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) +assert_type(np.strings.count(AR_T, ["a", "b", "c"], end=9), npt.NDArray[np.int_]) + +assert_type(np.strings.partition(AR_U, "\n"), npt.NDArray[np.str_]) +assert_type(np.strings.partition(AR_S, [b"a", b"b", b"c"]), npt.NDArray[np.bytes_]) +assert_type(np.strings.partition(AR_T, "\n"), AR_TU_alias) + +assert_type(np.strings.rpartition(AR_U, "\n"), npt.NDArray[np.str_]) +assert_type(np.strings.rpartition(AR_S, [b"a", b"b", b"c"]), npt.NDArray[np.bytes_]) +assert_type(np.strings.rpartition(AR_T, "\n"), AR_TU_alias) + +assert_type(np.strings.replace(AR_U, "_", "-"), npt.NDArray[np.str_]) +assert_type(np.strings.replace(AR_S, [b"_", b""], [b"a", b"b"]), npt.NDArray[np.bytes_]) +assert_type(np.strings.replace(AR_T, "_", "_"), AR_TU_alias) + +assert_type(np.strings.lower(AR_U), npt.NDArray[np.str_]) +assert_type(np.strings.lower(AR_S), npt.NDArray[np.bytes_]) +assert_type(np.strings.lower(AR_T), AR_T_alias) + +assert_type(np.strings.upper(AR_U), npt.NDArray[np.str_]) +assert_type(np.strings.upper(AR_S), npt.NDArray[np.bytes_]) +assert_type(np.strings.upper(AR_T), AR_T_alias) + +assert_type(np.strings.swapcase(AR_U), npt.NDArray[np.str_]) +assert_type(np.strings.swapcase(AR_S), npt.NDArray[np.bytes_]) +assert_type(np.strings.swapcase(AR_T), AR_T_alias) + +assert_type(np.strings.title(AR_U), npt.NDArray[np.str_]) +assert_type(np.strings.title(AR_S), npt.NDArray[np.bytes_]) +assert_type(np.strings.title(AR_T), AR_T_alias) + +assert_type(np.strings.zfill(AR_U, 5), npt.NDArray[np.str_]) +assert_type(np.strings.zfill(AR_S, [2, 3, 4]), npt.NDArray[np.bytes_]) +assert_type(np.strings.zfill(AR_T, 5), AR_T_alias) + +assert_type(np.strings.endswith(AR_U, "a", start=[1, 2, 3]), npt.NDArray[np.bool]) +assert_type(np.strings.endswith(AR_S, [b"a", b"b", b"c"], end=9), npt.NDArray[np.bool]) +assert_type(np.strings.endswith(AR_T, "a", start=[1, 2, 3]), npt.NDArray[np.bool]) + +assert_type(np.strings.startswith(AR_U, "a", start=[1, 2, 3]), npt.NDArray[np.bool]) +assert_type(np.strings.startswith(AR_S, [b"a", b"b", b"c"], end=9), npt.NDArray[np.bool]) +assert_type(np.strings.startswith(AR_T, "a", start=[1, 2, 3]), npt.NDArray[np.bool]) + +assert_type(np.strings.find(AR_U, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) +assert_type(np.strings.find(AR_S, [b"a", b"b", b"c"], end=9), npt.NDArray[np.int_]) +assert_type(np.strings.find(AR_T, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) + +assert_type(np.strings.rfind(AR_U, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) +assert_type(np.strings.rfind(AR_S, [b"a", b"b", b"c"], end=9), npt.NDArray[np.int_]) +assert_type(np.strings.rfind(AR_T, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) + +assert_type(np.strings.index(AR_U, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) +assert_type(np.strings.index(AR_S, [b"a", b"b", b"c"], end=9), npt.NDArray[np.int_]) +assert_type(np.strings.index(AR_T, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) + +assert_type(np.strings.rindex(AR_U, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) +assert_type(np.strings.rindex(AR_S, [b"a", b"b", b"c"], end=9), npt.NDArray[np.int_]) +assert_type(np.strings.rindex(AR_T, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) + +assert_type(np.strings.isalpha(AR_U), npt.NDArray[np.bool]) +assert_type(np.strings.isalpha(AR_S), npt.NDArray[np.bool]) +assert_type(np.strings.isalpha(AR_T), npt.NDArray[np.bool]) + +assert_type(np.strings.isalnum(AR_U), npt.NDArray[np.bool]) +assert_type(np.strings.isalnum(AR_S), npt.NDArray[np.bool]) +assert_type(np.strings.isalnum(AR_T), npt.NDArray[np.bool]) + +assert_type(np.strings.isdecimal(AR_U), npt.NDArray[np.bool]) +assert_type(np.strings.isdecimal(AR_T), npt.NDArray[np.bool]) + +assert_type(np.strings.isdigit(AR_U), npt.NDArray[np.bool]) +assert_type(np.strings.isdigit(AR_S), npt.NDArray[np.bool]) +assert_type(np.strings.isdigit(AR_T), npt.NDArray[np.bool]) + +assert_type(np.strings.islower(AR_U), npt.NDArray[np.bool]) +assert_type(np.strings.islower(AR_S), npt.NDArray[np.bool]) +assert_type(np.strings.islower(AR_T), npt.NDArray[np.bool]) + +assert_type(np.strings.isnumeric(AR_U), npt.NDArray[np.bool]) +assert_type(np.strings.isnumeric(AR_T), npt.NDArray[np.bool]) + +assert_type(np.strings.isspace(AR_U), npt.NDArray[np.bool]) +assert_type(np.strings.isspace(AR_S), npt.NDArray[np.bool]) +assert_type(np.strings.isspace(AR_T), npt.NDArray[np.bool]) + +assert_type(np.strings.istitle(AR_U), npt.NDArray[np.bool]) +assert_type(np.strings.istitle(AR_S), npt.NDArray[np.bool]) +assert_type(np.strings.istitle(AR_T), npt.NDArray[np.bool]) + +assert_type(np.strings.isupper(AR_U), npt.NDArray[np.bool]) +assert_type(np.strings.isupper(AR_S), npt.NDArray[np.bool]) +assert_type(np.strings.isupper(AR_T), npt.NDArray[np.bool]) + +assert_type(np.strings.str_len(AR_U), npt.NDArray[np.int_]) +assert_type(np.strings.str_len(AR_S), npt.NDArray[np.int_]) +assert_type(np.strings.str_len(AR_T), npt.NDArray[np.int_]) + +assert_type(np.strings.translate(AR_U, ""), npt.NDArray[np.str_]) +assert_type(np.strings.translate(AR_S, ""), npt.NDArray[np.bytes_]) +assert_type(np.strings.translate(AR_T, ""), AR_T_alias) + +assert_type(np.strings.slice(AR_U, 1, 5, 2), npt.NDArray[np.str_]) +assert_type(np.strings.slice(AR_S, 1, 5, 2), npt.NDArray[np.bytes_]) +assert_type(np.strings.slice(AR_T, 1, 5, 2), AR_T_alias) diff --git a/local_packages/numpy/typing/tests/data/reveal/testing.pyi b/local_packages/numpy/typing/tests/data/reveal/testing.pyi new file mode 100644 index 0000000..34fbc5f --- /dev/null +++ b/local_packages/numpy/typing/tests/data/reveal/testing.pyi @@ -0,0 +1,198 @@ +import contextlib +import re +import sys +import types +import unittest +import warnings +from collections.abc import Callable +from pathlib import Path +from typing import Any, TypeVar, assert_type + +import numpy as np +import numpy.typing as npt + +AR_f8: npt.NDArray[np.float64] +AR_i8: npt.NDArray[np.int64] + +bool_obj: bool +suppress_obj: np.testing.suppress_warnings # type: ignore[deprecated] # pyright: ignore[reportDeprecated] +FT = TypeVar("FT", bound=Callable[..., Any]) + +def func() -> int: ... + +def func2( + x: npt.NDArray[np.number], + y: npt.NDArray[np.number], +) -> npt.NDArray[np.bool]: ... + +assert_type(np.testing.KnownFailureException(), np.testing.KnownFailureException) +assert_type(np.testing.IgnoreException(), np.testing.IgnoreException) + +assert_type( + np.testing.clear_and_catch_warnings(modules=[np.testing]), + np.testing.clear_and_catch_warnings[None], +) +assert_type( + np.testing.clear_and_catch_warnings(True), + np.testing.clear_and_catch_warnings[list[warnings.WarningMessage]], +) +assert_type( + np.testing.clear_and_catch_warnings(False), + np.testing.clear_and_catch_warnings[None], +) +assert_type( + np.testing.clear_and_catch_warnings(bool_obj), + np.testing.clear_and_catch_warnings, +) +assert_type( + np.testing.clear_and_catch_warnings.class_modules, + tuple[types.ModuleType, ...], +) +assert_type( + np.testing.clear_and_catch_warnings.modules, + set[types.ModuleType], +) + +with np.testing.clear_and_catch_warnings(True) as c1: + assert_type(c1, list[warnings.WarningMessage]) +with np.testing.clear_and_catch_warnings() as c2: + assert_type(c2, None) + +assert_type(np.testing.suppress_warnings("once"), np.testing.suppress_warnings) # type: ignore[deprecated] # pyright: ignore[reportDeprecated] +assert_type(np.testing.suppress_warnings()(func), Callable[[], int]) # type: ignore[deprecated] # pyright: ignore[reportDeprecated] +assert_type(suppress_obj.filter(RuntimeWarning), None) +assert_type(suppress_obj.record(RuntimeWarning), list[warnings.WarningMessage]) +with suppress_obj as c3: + assert_type(c3, np.testing.suppress_warnings) # type: ignore[deprecated] # pyright: ignore[reportDeprecated] + +assert_type(np.testing.verbose, int) +assert_type(np.testing.IS_PYPY, bool) +assert_type(np.testing.HAS_REFCOUNT, bool) +assert_type(np.testing.HAS_LAPACK64, bool) + +assert_type(np.testing.assert_(1, msg="test"), None) +assert_type(np.testing.assert_(2, msg=lambda: "test"), None) + +if sys.platform == "win32" or sys.platform == "cygwin": + assert_type(np.testing.memusage(), int) +elif sys.platform == "linux": + assert_type(np.testing.memusage(), int | None) + +assert_type(np.testing.jiffies(), int) + +assert_type(np.testing.build_err_msg([0, 1, 2], "test"), str) +assert_type(np.testing.build_err_msg(range(2), "test", header="header"), str) +assert_type(np.testing.build_err_msg(np.arange(9).reshape(3, 3), "test", verbose=False), str) +assert_type(np.testing.build_err_msg("abc", "test", names=["x", "y"]), str) +assert_type(np.testing.build_err_msg([1.0, 2.0], "test", precision=5), str) + +assert_type(np.testing.assert_equal({1}, {1}), None) +assert_type(np.testing.assert_equal([1, 2, 3], [1, 2, 3], err_msg="fail"), None) +assert_type(np.testing.assert_equal(1, 1.0, verbose=True), None) + +assert_type(np.testing.print_assert_equal("Test XYZ of func xyz", [0, 1], [0, 1]), None) + +assert_type(np.testing.assert_almost_equal(1.0, 1.1), None) +assert_type(np.testing.assert_almost_equal([1, 2, 3], [1, 2, 3], err_msg="fail"), None) +assert_type(np.testing.assert_almost_equal(1, 1.0, verbose=True), None) +assert_type(np.testing.assert_almost_equal(1, 1.0001, decimal=2), None) + +assert_type(np.testing.assert_approx_equal(1.0, 1.1), None) +assert_type(np.testing.assert_approx_equal("1", "2", err_msg="fail"), None) +assert_type(np.testing.assert_approx_equal(1, 1.0, verbose=True), None) +assert_type(np.testing.assert_approx_equal(1, 1.0001, significant=2), None) + +assert_type(np.testing.assert_array_compare(func2, AR_i8, AR_f8, err_msg="test"), None) +assert_type(np.testing.assert_array_compare(func2, AR_i8, AR_f8, verbose=True), None) +assert_type(np.testing.assert_array_compare(func2, AR_i8, AR_f8, header="header"), None) +assert_type(np.testing.assert_array_compare(func2, AR_i8, AR_f8, precision=np.int64()), None) +assert_type(np.testing.assert_array_compare(func2, AR_i8, AR_f8, equal_nan=False), None) +assert_type(np.testing.assert_array_compare(func2, AR_i8, AR_f8, equal_inf=True), None) + +assert_type(np.testing.assert_array_equal(AR_i8, AR_f8), None) +assert_type(np.testing.assert_array_equal(AR_i8, AR_f8, err_msg="test"), None) +assert_type(np.testing.assert_array_equal(AR_i8, AR_f8, verbose=True), None) + +assert_type(np.testing.assert_array_almost_equal(AR_i8, AR_f8), None) +assert_type(np.testing.assert_array_almost_equal(AR_i8, AR_f8, err_msg="test"), None) +assert_type(np.testing.assert_array_almost_equal(AR_i8, AR_f8, verbose=True), None) +assert_type(np.testing.assert_array_almost_equal(AR_i8, AR_f8, decimal=1), None) + +assert_type(np.testing.assert_array_less(AR_i8, AR_f8), None) +assert_type(np.testing.assert_array_less(AR_i8, AR_f8, err_msg="test"), None) +assert_type(np.testing.assert_array_less(AR_i8, AR_f8, verbose=True), None) + +assert_type(np.testing.runstring("1 + 1", {}), Any) +assert_type(np.testing.runstring("int64() + 1", {"int64": np.int64}), Any) + +assert_type(np.testing.assert_string_equal("1", "1"), None) + +assert_type(np.testing.rundocs(), None) +assert_type(np.testing.rundocs("test.py"), None) +assert_type(np.testing.rundocs(Path("test.py"), raise_on_error=True), None) + +def func3(a: int) -> bool: ... + +assert_type( + np.testing.assert_raises(RuntimeWarning), + unittest.case._AssertRaisesContext[RuntimeWarning], +) +assert_type(np.testing.assert_raises(RuntimeWarning, func3, 5), None) + +assert_type( + np.testing.assert_raises_regex(RuntimeWarning, r"test"), + unittest.case._AssertRaisesContext[RuntimeWarning], +) +assert_type(np.testing.assert_raises_regex(RuntimeWarning, b"test", func3, 5), None) +assert_type(np.testing.assert_raises_regex(RuntimeWarning, re.compile(b"test"), func3, 5), None) + +class Test: ... + +def decorate(a: FT) -> FT: + return a + +assert_type(np.testing.decorate_methods(Test, decorate), None) +assert_type(np.testing.decorate_methods(Test, decorate, None), None) +assert_type(np.testing.decorate_methods(Test, decorate, "test"), None) +assert_type(np.testing.decorate_methods(Test, decorate, b"test"), None) +assert_type(np.testing.decorate_methods(Test, decorate, re.compile("test")), None) + +assert_type(np.testing.measure("for i in range(1000): np.sqrt(i**2)"), float) +assert_type(np.testing.measure(b"for i in range(1000): np.sqrt(i**2)", times=5), float) + +assert_type(np.testing.assert_allclose(AR_i8, AR_f8), None) +assert_type(np.testing.assert_allclose(AR_i8, AR_f8, rtol=0.005), None) +assert_type(np.testing.assert_allclose(AR_i8, AR_f8, atol=1), None) +assert_type(np.testing.assert_allclose(AR_i8, AR_f8, equal_nan=True), None) +assert_type(np.testing.assert_allclose(AR_i8, AR_f8, err_msg="err"), None) +assert_type(np.testing.assert_allclose(AR_i8, AR_f8, verbose=False), None) + +assert_type(np.testing.assert_array_almost_equal_nulp(AR_i8, AR_f8, nulp=2), None) + +assert_type(np.testing.assert_array_max_ulp(AR_i8, AR_f8, maxulp=2), npt.NDArray[Any]) +assert_type(np.testing.assert_array_max_ulp(AR_i8, AR_f8, dtype=np.float32), npt.NDArray[Any]) + +assert_type(np.testing.assert_warns(RuntimeWarning), contextlib._GeneratorContextManager[None]) # type: ignore[deprecated] # pyright: ignore[reportDeprecated] +assert_type(np.testing.assert_warns(RuntimeWarning, func3, 5), bool) # type: ignore[deprecated] # pyright: ignore[reportDeprecated] + +def func4(a: int, b: str) -> bool: ... + +assert_type(np.testing.assert_no_warnings(), contextlib._GeneratorContextManager[None]) +assert_type(np.testing.assert_no_warnings(func3, 5), bool) +assert_type(np.testing.assert_no_warnings(func4, a=1, b="test"), bool) +assert_type(np.testing.assert_no_warnings(func4, 1, "test"), bool) + +assert_type(np.testing.tempdir("test_dir"), contextlib._GeneratorContextManager[str]) +assert_type(np.testing.tempdir(prefix=b"test"), contextlib._GeneratorContextManager[bytes]) +assert_type(np.testing.tempdir("test_dir", dir=Path("here")), contextlib._GeneratorContextManager[str]) + +assert_type(np.testing.temppath("test_dir", text=True), contextlib._GeneratorContextManager[str]) +assert_type(np.testing.temppath(prefix=b"test"), contextlib._GeneratorContextManager[bytes]) +assert_type(np.testing.temppath("test_dir", dir=Path("here")), contextlib._GeneratorContextManager[str]) + +assert_type(np.testing.assert_no_gc_cycles(), contextlib._GeneratorContextManager[None]) +assert_type(np.testing.assert_no_gc_cycles(func3, 5), None) + +assert_type(np.testing.break_cycles(), None) + +assert_type(np.testing.TestCase(), unittest.case.TestCase) diff --git a/local_packages/numpy/typing/tests/data/reveal/twodim_base.pyi b/local_packages/numpy/typing/tests/data/reveal/twodim_base.pyi new file mode 100644 index 0000000..1fd17e3 --- /dev/null +++ b/local_packages/numpy/typing/tests/data/reveal/twodim_base.pyi @@ -0,0 +1,225 @@ +from typing import Any, TypeAlias, TypeVar, assert_type, type_check_only + +import numpy as np +import numpy.typing as npt + +_ScalarT = TypeVar("_ScalarT", bound=np.generic) + +_1D: TypeAlias = tuple[int] +_2D: TypeAlias = tuple[int, int] +_ND: TypeAlias = tuple[Any, ...] + +_Indices2D: TypeAlias = tuple[ + np.ndarray[_1D, np.dtype[np.intp]], + np.ndarray[_1D, np.dtype[np.intp]], +] + +### + +_nd_bool: np.ndarray[_ND, np.dtype[np.bool]] +_1d_bool: np.ndarray[_1D, np.dtype[np.bool]] +_2d_bool: np.ndarray[_2D, np.dtype[np.bool]] +_nd_u64: np.ndarray[_ND, np.dtype[np.uint64]] +_nd_i64: np.ndarray[_ND, np.dtype[np.int64]] +_nd_f64: np.ndarray[_ND, np.dtype[np.float64]] +_nd_c128: np.ndarray[_ND, np.dtype[np.complex128]] +_nd_obj: np.ndarray[_ND, np.dtype[np.object_]] + +_to_nd_bool: list[bool] | list[list[bool]] +_to_1d_bool: list[bool] +_to_2d_bool: list[list[bool]] + +_to_1d_f64: list[float] +_to_1d_c128: list[complex] + +@type_check_only +def func1(ar: npt.NDArray[_ScalarT], a: int) -> npt.NDArray[_ScalarT]: ... +@type_check_only +def func2(ar: npt.NDArray[np.number], a: str) -> npt.NDArray[np.float64]: ... + +@type_check_only +class _Cube: + shape = 3, 4 + ndim = 2 + +### + +# fliplr +assert_type(np.fliplr(_nd_bool), np.ndarray[_ND, np.dtype[np.bool]]) +assert_type(np.fliplr(_1d_bool), np.ndarray[_1D, np.dtype[np.bool]]) +assert_type(np.fliplr(_2d_bool), np.ndarray[_2D, np.dtype[np.bool]]) +assert_type(np.fliplr(_to_nd_bool), np.ndarray) +assert_type(np.fliplr(_to_1d_bool), np.ndarray) +assert_type(np.fliplr(_to_2d_bool), np.ndarray) + +# flipud +assert_type(np.flipud(_nd_bool), np.ndarray[_ND, np.dtype[np.bool]]) +assert_type(np.flipud(_1d_bool), np.ndarray[_1D, np.dtype[np.bool]]) +assert_type(np.flipud(_2d_bool), np.ndarray[_2D, np.dtype[np.bool]]) +assert_type(np.flipud(_to_nd_bool), np.ndarray) +assert_type(np.flipud(_to_1d_bool), np.ndarray) +assert_type(np.flipud(_to_2d_bool), np.ndarray) + +# eye +assert_type(np.eye(10), np.ndarray[_2D, np.dtype[np.float64]]) +assert_type(np.eye(10, M=20, dtype=np.int64), np.ndarray[_2D, np.dtype[np.int64]]) +assert_type(np.eye(10, k=2, dtype=int), np.ndarray[_2D]) + +# diag +assert_type(np.diag(_nd_bool), np.ndarray[_ND, np.dtype[np.bool]]) +assert_type(np.diag(_1d_bool), np.ndarray[_2D, np.dtype[np.bool]]) +assert_type(np.diag(_2d_bool), np.ndarray[_1D, np.dtype[np.bool]]) +assert_type(np.diag(_to_nd_bool, k=0), np.ndarray) +assert_type(np.diag(_to_1d_bool, k=0), np.ndarray[_2D]) +assert_type(np.diag(_to_2d_bool, k=0), np.ndarray[_1D]) + +# diagflat +assert_type(np.diagflat(_nd_bool), np.ndarray[_2D, np.dtype[np.bool]]) +assert_type(np.diagflat(_1d_bool), np.ndarray[_2D, np.dtype[np.bool]]) +assert_type(np.diagflat(_2d_bool), np.ndarray[_2D, np.dtype[np.bool]]) +assert_type(np.diagflat(_to_nd_bool, k=0), np.ndarray[_2D]) +assert_type(np.diagflat(_to_1d_bool, k=0), np.ndarray[_2D]) +assert_type(np.diagflat(_to_2d_bool, k=0), np.ndarray[_2D]) + +# tri +assert_type(np.tri(10), np.ndarray[_2D, np.dtype[np.float64]]) +assert_type(np.tri(10, M=20, dtype=np.int64), np.ndarray[_2D, np.dtype[np.int64]]) +assert_type(np.tri(10, k=2, dtype=int), np.ndarray[_2D]) + +# tril +assert_type(np.tril(_nd_bool), np.ndarray[_ND, np.dtype[np.bool]]) +assert_type(np.tril(_to_nd_bool, k=0), np.ndarray) +assert_type(np.tril(_to_1d_bool, k=0), np.ndarray) +assert_type(np.tril(_to_2d_bool, k=0), np.ndarray) + +# triu +assert_type(np.triu(_nd_bool), np.ndarray[_ND, np.dtype[np.bool]]) +assert_type(np.triu(_to_nd_bool, k=0), np.ndarray) +assert_type(np.triu(_to_1d_bool, k=0), np.ndarray) +assert_type(np.triu(_to_2d_bool, k=0), np.ndarray) + +# vander +assert_type(np.vander(_nd_bool), np.ndarray[_2D, np.dtype[np.int_]]) +assert_type(np.vander(_nd_u64), np.ndarray[_2D, np.dtype[np.uint64]]) +assert_type(np.vander(_nd_i64, N=2), np.ndarray[_2D, np.dtype[np.int64]]) +assert_type(np.vander(_nd_f64, increasing=True), np.ndarray[_2D, np.dtype[np.float64]]) +assert_type(np.vander(_nd_c128), np.ndarray[_2D, np.dtype[np.complex128]]) +assert_type(np.vander(_nd_obj), np.ndarray[_2D, np.dtype[np.object_]]) + +# histogram2d +assert_type( + np.histogram2d(_to_1d_f64, _to_1d_f64), + tuple[ + np.ndarray[_2D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.float64]], + ], +) +assert_type( + np.histogram2d(_to_1d_c128, _to_1d_c128), + tuple[ + np.ndarray[_2D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.complex128 | Any]], + np.ndarray[_1D, np.dtype[np.complex128 | Any]], + ], +) +assert_type( + np.histogram2d(_nd_i64, _nd_bool), + tuple[ + np.ndarray[_2D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.float64]], + ], +) +assert_type( + np.histogram2d(_nd_f64, _nd_i64), + tuple[ + np.ndarray[_2D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.float64]], + ], +) +assert_type( + np.histogram2d(_nd_i64, _nd_f64), + tuple[ + np.ndarray[_2D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.float64]], + ], +) +assert_type( + np.histogram2d(_nd_f64, _nd_c128, weights=_to_1d_bool), + tuple[ + np.ndarray[_2D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.complex128]], + np.ndarray[_1D, np.dtype[np.complex128]], + ], +) +assert_type( + np.histogram2d(_nd_f64, _nd_c128, bins=8), + tuple[ + np.ndarray[_2D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.complex128]], + np.ndarray[_1D, np.dtype[np.complex128]], + ], +) +assert_type( + np.histogram2d(_nd_c128, _nd_f64, bins=(8, 5)), + tuple[ + np.ndarray[_2D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.complex128]], + np.ndarray[_1D, np.dtype[np.complex128]], + ], +) +assert_type( + np.histogram2d(_nd_c128, _nd_i64, bins=_nd_u64), + tuple[ + np.ndarray[_2D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.uint64]], + np.ndarray[_1D, np.dtype[np.uint64]], + ], +) +assert_type( + np.histogram2d(_nd_c128, _nd_c128, bins=(_nd_u64, _nd_u64)), + tuple[ + np.ndarray[_2D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.uint64]], + np.ndarray[_1D, np.dtype[np.uint64]], + ], +) +assert_type( + np.histogram2d(_nd_c128, _nd_c128, bins=(_nd_bool, 8)), + tuple[ + np.ndarray[_2D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.complex128 | np.bool]], + np.ndarray[_1D, np.dtype[np.complex128 | np.bool]], + ], +) +assert_type( + np.histogram2d(_nd_c128, _nd_c128, bins=(_to_1d_f64, 8)), + tuple[ + np.ndarray[_2D, np.dtype[np.float64]], + np.ndarray[_1D, np.dtype[np.complex128 | Any]], + np.ndarray[_1D, np.dtype[np.complex128 | Any]], + ], +) + +# mask_indices +assert_type(np.mask_indices(10, func1), _Indices2D) +assert_type(np.mask_indices(8, func2, "0"), _Indices2D) + +# tril_indices +assert_type(np.tril_indices(3), _Indices2D) +assert_type(np.tril_indices(3, 1), _Indices2D) +assert_type(np.tril_indices(3, 1, 2), _Indices2D) +# tril_indices +assert_type(np.triu_indices(3), _Indices2D) +assert_type(np.triu_indices(3, 1), _Indices2D) +assert_type(np.triu_indices(3, 1, 2), _Indices2D) + +# tril_indices_from +assert_type(np.tril_indices_from(_2d_bool), _Indices2D) +assert_type(np.tril_indices_from(_Cube()), _Indices2D) +# triu_indices_from +assert_type(np.triu_indices_from(_2d_bool), _Indices2D) +assert_type(np.triu_indices_from(_Cube()), _Indices2D) diff --git a/local_packages/numpy/typing/tests/data/reveal/type_check.pyi b/local_packages/numpy/typing/tests/data/reveal/type_check.pyi new file mode 100644 index 0000000..df95da7 --- /dev/null +++ b/local_packages/numpy/typing/tests/data/reveal/type_check.pyi @@ -0,0 +1,67 @@ +from typing import Any, Literal, assert_type + +import numpy as np +import numpy.typing as npt + +f8: np.float64 +f: float + +# NOTE: Avoid importing the platform specific `np.float128` type +AR_i8: npt.NDArray[np.int64] +AR_i4: npt.NDArray[np.int32] +AR_f2: npt.NDArray[np.float16] +AR_f8: npt.NDArray[np.float64] +AR_f16: npt.NDArray[np.longdouble] +AR_c8: npt.NDArray[np.complex64] +AR_c16: npt.NDArray[np.complex128] + +AR_LIKE_f: list[float] + +class ComplexObj: + real: slice + imag: slice + +assert_type(np.mintypecode(["f8"], typeset="qfQF"), str) + +assert_type(np.real(ComplexObj()), slice) +assert_type(np.real(AR_f8), npt.NDArray[np.float64]) +assert_type(np.real(AR_c16), npt.NDArray[np.float64]) +assert_type(np.real(AR_LIKE_f), npt.NDArray[Any]) + +assert_type(np.imag(ComplexObj()), slice) +assert_type(np.imag(AR_f8), npt.NDArray[np.float64]) +assert_type(np.imag(AR_c16), npt.NDArray[np.float64]) +assert_type(np.imag(AR_LIKE_f), npt.NDArray[Any]) + +assert_type(np.iscomplex(f8), np.bool) +assert_type(np.iscomplex(AR_f8), npt.NDArray[np.bool]) +assert_type(np.iscomplex(AR_LIKE_f), npt.NDArray[np.bool]) + +assert_type(np.isreal(f8), np.bool) +assert_type(np.isreal(AR_f8), npt.NDArray[np.bool]) +assert_type(np.isreal(AR_LIKE_f), npt.NDArray[np.bool]) + +assert_type(np.iscomplexobj(f8), bool) +assert_type(np.isrealobj(f8), bool) + +assert_type(np.nan_to_num(f8), np.float64) +assert_type(np.nan_to_num(f, copy=True), Any) +assert_type(np.nan_to_num(AR_f8, nan=1.5), npt.NDArray[np.float64]) +assert_type(np.nan_to_num(AR_LIKE_f, posinf=9999), npt.NDArray[Any]) + +assert_type(np.real_if_close(AR_f8), npt.NDArray[np.float64]) +assert_type(np.real_if_close(AR_c16), npt.NDArray[np.float64 | np.complex128]) +assert_type(np.real_if_close(AR_c8), npt.NDArray[np.float32 | np.complex64]) +assert_type(np.real_if_close(AR_LIKE_f), npt.NDArray[Any]) + +assert_type(np.typename("h"), Literal["short"]) +assert_type(np.typename("B"), Literal["unsigned char"]) +assert_type(np.typename("V"), Literal["void"]) +assert_type(np.typename("S1"), Literal["character"]) + +assert_type(np.common_type(AR_i4), type[np.float64]) +assert_type(np.common_type(AR_f2), type[np.float16]) +assert_type(np.common_type(AR_f2, AR_i4), type[np.float64]) +assert_type(np.common_type(AR_f16, AR_i4), type[np.longdouble]) +assert_type(np.common_type(AR_c8, AR_f2), type[np.complex64]) +assert_type(np.common_type(AR_f2, AR_c8, AR_i4), type[np.complexfloating]) diff --git a/local_packages/numpy/typing/tests/data/reveal/ufunc_config.pyi b/local_packages/numpy/typing/tests/data/reveal/ufunc_config.pyi new file mode 100644 index 0000000..77c27eb --- /dev/null +++ b/local_packages/numpy/typing/tests/data/reveal/ufunc_config.pyi @@ -0,0 +1,29 @@ +"""Typing tests for `_core._ufunc_config`.""" + +from _typeshed import SupportsWrite +from collections.abc import Callable +from typing import Any, assert_type + +import numpy as np + +def func(a: str, b: int) -> None: ... + +class Write: + def write(self, value: str) -> None: ... + +assert_type(np.seterr(all=None), np._core._ufunc_config._ErrDict) +assert_type(np.seterr(divide="ignore"), np._core._ufunc_config._ErrDict) +assert_type(np.seterr(over="warn"), np._core._ufunc_config._ErrDict) +assert_type(np.seterr(under="call"), np._core._ufunc_config._ErrDict) +assert_type(np.seterr(invalid="raise"), np._core._ufunc_config._ErrDict) +assert_type(np.geterr(), np._core._ufunc_config._ErrDict) + +assert_type(np.setbufsize(4096), int) +assert_type(np.getbufsize(), int) + +assert_type(np.seterrcall(func), Callable[[str, int], Any] | SupportsWrite[str] | None) +assert_type(np.seterrcall(Write()), Callable[[str, int], Any] | SupportsWrite[str] | None) +assert_type(np.geterrcall(), Callable[[str, int], Any] | SupportsWrite[str] | None) + +assert_type(np.errstate(call=func, all="call"), np.errstate) +assert_type(np.errstate(call=Write(), divide="log", over="log"), np.errstate) diff --git a/local_packages/numpy/typing/tests/data/reveal/ufunclike.pyi b/local_packages/numpy/typing/tests/data/reveal/ufunclike.pyi new file mode 100644 index 0000000..c679b82 --- /dev/null +++ b/local_packages/numpy/typing/tests/data/reveal/ufunclike.pyi @@ -0,0 +1,31 @@ +from typing import assert_type + +import numpy as np +import numpy.typing as npt + +AR_LIKE_b: list[bool] +AR_LIKE_u: list[np.uint32] +AR_LIKE_i: list[int] +AR_LIKE_f: list[float] +AR_LIKE_O: list[np.object_] + +AR_U: npt.NDArray[np.str_] + +assert_type(np.fix(AR_LIKE_b), npt.NDArray[np.floating]) # type: ignore[deprecated] +assert_type(np.fix(AR_LIKE_u), npt.NDArray[np.floating]) # type: ignore[deprecated] +assert_type(np.fix(AR_LIKE_i), npt.NDArray[np.floating]) # type: ignore[deprecated] +assert_type(np.fix(AR_LIKE_f), npt.NDArray[np.floating]) # type: ignore[deprecated] +assert_type(np.fix(AR_LIKE_O), npt.NDArray[np.object_]) # type: ignore[deprecated] +assert_type(np.fix(AR_LIKE_f, out=AR_U), npt.NDArray[np.str_]) # type: ignore[deprecated] + +assert_type(np.isposinf(AR_LIKE_b), npt.NDArray[np.bool]) +assert_type(np.isposinf(AR_LIKE_u), npt.NDArray[np.bool]) +assert_type(np.isposinf(AR_LIKE_i), npt.NDArray[np.bool]) +assert_type(np.isposinf(AR_LIKE_f), npt.NDArray[np.bool]) +assert_type(np.isposinf(AR_LIKE_f, out=AR_U), npt.NDArray[np.str_]) + +assert_type(np.isneginf(AR_LIKE_b), npt.NDArray[np.bool]) +assert_type(np.isneginf(AR_LIKE_u), npt.NDArray[np.bool]) +assert_type(np.isneginf(AR_LIKE_i), npt.NDArray[np.bool]) +assert_type(np.isneginf(AR_LIKE_f), npt.NDArray[np.bool]) +assert_type(np.isneginf(AR_LIKE_f, out=AR_U), npt.NDArray[np.str_]) diff --git a/local_packages/numpy/typing/tests/data/reveal/ufuncs.pyi b/local_packages/numpy/typing/tests/data/reveal/ufuncs.pyi new file mode 100644 index 0000000..eda92f2 --- /dev/null +++ b/local_packages/numpy/typing/tests/data/reveal/ufuncs.pyi @@ -0,0 +1,142 @@ +from typing import Any, Literal, NoReturn, assert_type + +import numpy as np +import numpy.typing as npt + +i8: np.int64 +f8: np.float64 +AR_f8: npt.NDArray[np.float64] +AR_i8: npt.NDArray[np.int64] + +assert_type(np.absolute.__doc__, str) +assert_type(np.absolute.types, list[str]) + +assert_type(np.absolute.__name__, Literal["absolute"]) +assert_type(np.absolute.__qualname__, Literal["absolute"]) +assert_type(np.absolute.ntypes, Literal[20]) +assert_type(np.absolute.identity, None) +assert_type(np.absolute.nin, Literal[1]) +assert_type(np.absolute.nin, Literal[1]) +assert_type(np.absolute.nout, Literal[1]) +assert_type(np.absolute.nargs, Literal[2]) +assert_type(np.absolute.signature, None) +assert_type(np.absolute(f8), Any) +assert_type(np.absolute(AR_f8), npt.NDArray[Any]) +assert_type(np.absolute.at(AR_f8, AR_i8), None) + +assert_type(np.add.__name__, Literal["add"]) +assert_type(np.add.__qualname__, Literal["add"]) +assert_type(np.add.ntypes, Literal[22]) +assert_type(np.add.identity, Literal[0]) +assert_type(np.add.nin, Literal[2]) +assert_type(np.add.nout, Literal[1]) +assert_type(np.add.nargs, Literal[3]) +assert_type(np.add.signature, None) +assert_type(np.add(f8, f8), Any) +assert_type(np.add(AR_f8, f8), npt.NDArray[Any]) +assert_type(np.add.at(AR_f8, AR_i8, f8), None) +assert_type(np.add.reduce(AR_f8, axis=0), Any) +assert_type(np.add.accumulate(AR_f8), npt.NDArray[Any]) +assert_type(np.add.reduceat(AR_f8, AR_i8), npt.NDArray[Any]) +assert_type(np.add.outer(f8, f8), Any) +assert_type(np.add.outer(AR_f8, f8), npt.NDArray[Any]) + +assert_type(np.frexp.__name__, Literal["frexp"]) +assert_type(np.frexp.__qualname__, Literal["frexp"]) +assert_type(np.frexp.ntypes, Literal[4]) +assert_type(np.frexp.identity, None) +assert_type(np.frexp.nin, Literal[1]) +assert_type(np.frexp.nout, Literal[2]) +assert_type(np.frexp.nargs, Literal[3]) +assert_type(np.frexp.signature, None) +assert_type(np.frexp(f8), tuple[Any, Any]) +assert_type(np.frexp(AR_f8), tuple[npt.NDArray[Any], npt.NDArray[Any]]) + +assert_type(np.divmod.__name__, Literal["divmod"]) +assert_type(np.divmod.__qualname__, Literal["divmod"]) +assert_type(np.divmod.ntypes, Literal[15]) +assert_type(np.divmod.identity, None) +assert_type(np.divmod.nin, Literal[2]) +assert_type(np.divmod.nout, Literal[2]) +assert_type(np.divmod.nargs, Literal[4]) +assert_type(np.divmod.signature, None) +assert_type(np.divmod(f8, f8), tuple[Any, Any]) +assert_type(np.divmod(AR_f8, f8), tuple[npt.NDArray[Any], npt.NDArray[Any]]) + +assert_type(np.matmul.__name__, Literal["matmul"]) +assert_type(np.matmul.__qualname__, Literal["matmul"]) +assert_type(np.matmul.ntypes, Literal[19]) +assert_type(np.matmul.identity, None) +assert_type(np.matmul.nin, Literal[2]) +assert_type(np.matmul.nout, Literal[1]) +assert_type(np.matmul.nargs, Literal[3]) +assert_type(np.matmul.signature, Literal["(n?,k),(k,m?)->(n?,m?)"]) +assert_type(np.matmul.identity, None) +assert_type(np.matmul(AR_f8, AR_f8), Any) +assert_type(np.matmul(AR_f8, AR_f8, axes=[(0, 1), (0, 1), (0, 1)]), Any) + +assert_type(np.vecdot.__name__, Literal["vecdot"]) +assert_type(np.vecdot.__qualname__, Literal["vecdot"]) +assert_type(np.vecdot.ntypes, Literal[19]) +assert_type(np.vecdot.identity, None) +assert_type(np.vecdot.nin, Literal[2]) +assert_type(np.vecdot.nout, Literal[1]) +assert_type(np.vecdot.nargs, Literal[3]) +assert_type(np.vecdot.signature, Literal["(n),(n)->()"]) +assert_type(np.vecdot.identity, None) +assert_type(np.vecdot(AR_f8, AR_f8), Any) + +assert_type(np.bitwise_count.__name__, Literal["bitwise_count"]) +assert_type(np.bitwise_count.__qualname__, Literal["bitwise_count"]) +assert_type(np.bitwise_count.ntypes, Literal[11]) +assert_type(np.bitwise_count.identity, None) +assert_type(np.bitwise_count.nin, Literal[1]) +assert_type(np.bitwise_count.nout, Literal[1]) +assert_type(np.bitwise_count.nargs, Literal[2]) +assert_type(np.bitwise_count.signature, None) +assert_type(np.bitwise_count.identity, None) +assert_type(np.bitwise_count(i8), Any) +assert_type(np.bitwise_count(AR_i8), npt.NDArray[Any]) + +def test_absolute_outer_invalid() -> None: + assert_type(np.absolute.outer(AR_f8, AR_f8), NoReturn) # type: ignore[arg-type] +def test_frexp_outer_invalid() -> None: + assert_type(np.frexp.outer(AR_f8, AR_f8), NoReturn) # type: ignore[arg-type] +def test_divmod_outer_invalid() -> None: + assert_type(np.divmod.outer(AR_f8, AR_f8), NoReturn) # type: ignore[arg-type] +def test_matmul_outer_invalid() -> None: + assert_type(np.matmul.outer(AR_f8, AR_f8), NoReturn) # type: ignore[arg-type] + +def test_absolute_reduceat_invalid() -> None: + assert_type(np.absolute.reduceat(AR_f8, AR_i8), NoReturn) # type: ignore[arg-type] +def test_frexp_reduceat_invalid() -> None: + assert_type(np.frexp.reduceat(AR_f8, AR_i8), NoReturn) # type: ignore[arg-type] +def test_divmod_reduceat_invalid() -> None: + assert_type(np.divmod.reduceat(AR_f8, AR_i8), NoReturn) # type: ignore[arg-type] +def test_matmul_reduceat_invalid() -> None: + assert_type(np.matmul.reduceat(AR_f8, AR_i8), NoReturn) # type: ignore[arg-type] + +def test_absolute_reduce_invalid() -> None: + assert_type(np.absolute.reduce(AR_f8), NoReturn) # type: ignore[arg-type] +def test_frexp_reduce_invalid() -> None: + assert_type(np.frexp.reduce(AR_f8), NoReturn) # type: ignore[arg-type] +def test_divmod_reduce_invalid() -> None: + assert_type(np.divmod.reduce(AR_f8), NoReturn) # type: ignore[arg-type] +def test_matmul_reduce_invalid() -> None: + assert_type(np.matmul.reduce(AR_f8), NoReturn) # type: ignore[arg-type] + +def test_absolute_accumulate_invalid() -> None: + assert_type(np.absolute.accumulate(AR_f8), NoReturn) # type: ignore[arg-type] +def test_frexp_accumulate_invalid() -> None: + assert_type(np.frexp.accumulate(AR_f8), NoReturn) # type: ignore[arg-type] +def test_divmod_accumulate_invalid() -> None: + assert_type(np.divmod.accumulate(AR_f8), NoReturn) # type: ignore[arg-type] +def test_matmul_accumulate_invalid() -> None: + assert_type(np.matmul.accumulate(AR_f8), NoReturn) # type: ignore[arg-type] + +def test_frexp_at_invalid() -> None: + assert_type(np.frexp.at(AR_f8, i8), NoReturn) # type: ignore[arg-type] +def test_divmod_at_invalid() -> None: + assert_type(np.divmod.at(AR_f8, i8, AR_f8), NoReturn) # type: ignore[arg-type] +def test_matmul_at_invalid() -> None: + assert_type(np.matmul.at(AR_f8, i8, AR_f8), NoReturn) # type: ignore[arg-type] diff --git a/local_packages/numpy/typing/tests/data/reveal/warnings_and_errors.pyi b/local_packages/numpy/typing/tests/data/reveal/warnings_and_errors.pyi new file mode 100644 index 0000000..f756a8e --- /dev/null +++ b/local_packages/numpy/typing/tests/data/reveal/warnings_and_errors.pyi @@ -0,0 +1,11 @@ +from typing import assert_type + +import numpy.exceptions as ex + +assert_type(ex.ModuleDeprecationWarning(), ex.ModuleDeprecationWarning) +assert_type(ex.VisibleDeprecationWarning(), ex.VisibleDeprecationWarning) +assert_type(ex.ComplexWarning(), ex.ComplexWarning) +assert_type(ex.RankWarning(), ex.RankWarning) +assert_type(ex.TooHardError(), ex.TooHardError) +assert_type(ex.AxisError("test"), ex.AxisError) +assert_type(ex.AxisError(5, 1), ex.AxisError) diff --git a/local_packages/numpy/typing/tests/test_isfile.py b/local_packages/numpy/typing/tests/test_isfile.py new file mode 100644 index 0000000..0e3157a --- /dev/null +++ b/local_packages/numpy/typing/tests/test_isfile.py @@ -0,0 +1,38 @@ +import os +import sys +from pathlib import Path + +import pytest + +import numpy as np +from numpy.testing import assert_ + +ROOT = Path(np.__file__).parents[0] +FILES = [ + ROOT / "py.typed", + ROOT / "__init__.pyi", + ROOT / "ctypeslib" / "__init__.pyi", + ROOT / "_core" / "__init__.pyi", + ROOT / "f2py" / "__init__.pyi", + ROOT / "fft" / "__init__.pyi", + ROOT / "lib" / "__init__.pyi", + ROOT / "linalg" / "__init__.pyi", + ROOT / "ma" / "__init__.pyi", + ROOT / "matrixlib" / "__init__.pyi", + ROOT / "polynomial" / "__init__.pyi", + ROOT / "random" / "__init__.pyi", + ROOT / "testing" / "__init__.pyi", +] +if sys.version_info < (3, 12): + FILES += [ROOT / "distutils" / "__init__.pyi"] + + +@pytest.mark.thread_unsafe( + reason="os.path has a thread-safety bug (python/cpython#140054). " + "Expected to only be a problem in 3.14.0" +) +class TestIsFile: + def test_isfile(self): + """Test if all ``.pyi`` files are properly installed.""" + for file in FILES: + assert_(os.path.isfile(file)) diff --git a/local_packages/numpy/typing/tests/test_runtime.py b/local_packages/numpy/typing/tests/test_runtime.py new file mode 100644 index 0000000..462fe4e --- /dev/null +++ b/local_packages/numpy/typing/tests/test_runtime.py @@ -0,0 +1,110 @@ +"""Test the runtime usage of `numpy.typing`.""" + +from typing import ( + Any, + NamedTuple, + Union, # pyright: ignore[reportDeprecated] + get_args, + get_origin, + get_type_hints, +) + +import pytest + +import numpy as np +import numpy._typing as _npt +import numpy.typing as npt + + +class TypeTup(NamedTuple): + typ: type + args: tuple[type, ...] + origin: type | None + + +def _flatten_type_alias(t: Any) -> Any: + # "flattens" a TypeAliasType to its underlying type alias + return getattr(t, "__value__", t) + + +NDArrayTup = TypeTup(npt.NDArray, npt.NDArray.__args__, np.ndarray) + +TYPES = { + "ArrayLike": TypeTup( + _flatten_type_alias(npt.ArrayLike), + _flatten_type_alias(npt.ArrayLike).__args__, + Union, + ), + "DTypeLike": TypeTup( + _flatten_type_alias(npt.DTypeLike), + _flatten_type_alias(npt.DTypeLike).__args__, + Union, + ), + "NBitBase": TypeTup(npt.NBitBase, (), None), # type: ignore[deprecated] # pyright: ignore[reportDeprecated] + "NDArray": NDArrayTup, +} + + +@pytest.mark.parametrize("name,tup", TYPES.items(), ids=TYPES.keys()) +def test_get_args(name: type, tup: TypeTup) -> None: + """Test `typing.get_args`.""" + typ, ref = tup.typ, tup.args + out = get_args(typ) + assert out == ref + + +@pytest.mark.parametrize("name,tup", TYPES.items(), ids=TYPES.keys()) +def test_get_origin(name: type, tup: TypeTup) -> None: + """Test `typing.get_origin`.""" + typ, ref = tup.typ, tup.origin + out = get_origin(typ) + assert out == ref + + +@pytest.mark.parametrize("name,tup", TYPES.items(), ids=TYPES.keys()) +def test_get_type_hints(name: type, tup: TypeTup) -> None: + """Test `typing.get_type_hints`.""" + typ = tup.typ + + def func(a: typ) -> None: pass + + out = get_type_hints(func) + ref = {"a": typ, "return": type(None)} + assert out == ref + + +@pytest.mark.parametrize("name,tup", TYPES.items(), ids=TYPES.keys()) +def test_get_type_hints_str(name: type, tup: TypeTup) -> None: + """Test `typing.get_type_hints` with string-representation of types.""" + typ_str, typ = f"npt.{name}", tup.typ + + def func(a: typ_str) -> None: pass + + out = get_type_hints(func) + ref = {"a": getattr(npt, str(name)), "return": type(None)} + assert out == ref + + +def test_keys() -> None: + """Test that ``TYPES.keys()`` and ``numpy.typing.__all__`` are synced.""" + keys = TYPES.keys() + ref = set(npt.__all__) + assert keys == ref + + +PROTOCOLS: dict[str, tuple[type[Any], object]] = { + "_SupportsArray": (_npt._SupportsArray, np.arange(10)), + "_SupportsArrayFunc": (_npt._SupportsArrayFunc, np.arange(10)), + "_NestedSequence": (_npt._NestedSequence, [1]), +} + + +@pytest.mark.parametrize("cls,obj", PROTOCOLS.values(), ids=PROTOCOLS.keys()) +class TestRuntimeProtocol: + def test_isinstance(self, cls: type[Any], obj: object) -> None: + assert isinstance(obj, cls) + assert not isinstance(None, cls) + + def test_issubclass(self, cls: type[Any], obj: object) -> None: + assert issubclass(type(obj), cls) + assert not issubclass(type(None), cls) diff --git a/local_packages/numpy/typing/tests/test_typing.py b/local_packages/numpy/typing/tests/test_typing.py new file mode 100644 index 0000000..ca4cf37 --- /dev/null +++ b/local_packages/numpy/typing/tests/test_typing.py @@ -0,0 +1,205 @@ +import importlib.util +import os +import re +import shutil +import textwrap +from collections import defaultdict +from typing import TYPE_CHECKING + +import pytest + +# Only trigger a full `mypy` run if this environment variable is set +# Note that these tests tend to take over a minute even on a macOS M1 CPU, +# and more than that in CI. +RUN_MYPY = "NPY_RUN_MYPY_IN_TESTSUITE" in os.environ +if RUN_MYPY and RUN_MYPY not in ('0', '', 'false'): + RUN_MYPY = True + +# Skips all functions in this file +pytestmark = pytest.mark.skipif( + not RUN_MYPY, + reason="`NPY_RUN_MYPY_IN_TESTSUITE` not set" +) + + +try: + from mypy import api +except ImportError: + NO_MYPY = True +else: + NO_MYPY = False + +if TYPE_CHECKING: + from collections.abc import Iterator + + # We need this as annotation, but it's located in a private namespace. + # As a compromise, do *not* import it during runtime + from _pytest.mark.structures import ParameterSet + +DATA_DIR = os.path.join(os.path.dirname(__file__), "data") +PASS_DIR = os.path.join(DATA_DIR, "pass") +FAIL_DIR = os.path.join(DATA_DIR, "fail") +REVEAL_DIR = os.path.join(DATA_DIR, "reveal") +MISC_DIR = os.path.join(DATA_DIR, "misc") +MYPY_INI = os.path.join(DATA_DIR, "mypy.ini") +CACHE_DIR = os.path.join(DATA_DIR, ".mypy_cache") + +#: A dictionary with file names as keys and lists of the mypy stdout as values. +#: To-be populated by `run_mypy`. +OUTPUT_MYPY: defaultdict[str, list[str]] = defaultdict(list) + + +def _key_func(key: str) -> str: + """Split at the first occurrence of the ``:`` character. + + Windows drive-letters (*e.g.* ``C:``) are ignored herein. + """ + drive, tail = os.path.splitdrive(key) + return os.path.join(drive, tail.split(":", 1)[0]) + + +def _strip_filename(msg: str) -> tuple[int, str]: + """Strip the filename and line number from a mypy message.""" + _, tail = os.path.splitdrive(msg) + _, lineno, msg = tail.split(":", 2) + return int(lineno), msg.strip() + + +def strip_func(match: re.Match[str]) -> str: + """`re.sub` helper function for stripping module names.""" + return match.groups()[1] + + +@pytest.fixture(scope="module", autouse=True) +def run_mypy() -> None: + """Clears the cache and run mypy before running any of the typing tests. + + The mypy results are cached in `OUTPUT_MYPY` for further use. + + The cache refresh can be skipped using + + NUMPY_TYPING_TEST_CLEAR_CACHE=0 pytest numpy/typing/tests + """ + if ( + os.path.isdir(CACHE_DIR) + and bool(os.environ.get("NUMPY_TYPING_TEST_CLEAR_CACHE", True)) # noqa: PLW1508 + ): + shutil.rmtree(CACHE_DIR) + + split_pattern = re.compile(r"(\s+)?\^(\~+)?") + for directory in (PASS_DIR, REVEAL_DIR, FAIL_DIR, MISC_DIR): + # Run mypy + stdout, stderr, exit_code = api.run([ + "--config-file", + MYPY_INI, + "--cache-dir", + CACHE_DIR, + directory, + ]) + if stderr: + pytest.fail(f"Unexpected mypy standard error\n\n{stderr}", False) + elif exit_code not in {0, 1}: + pytest.fail(f"Unexpected mypy exit code: {exit_code}\n\n{stdout}", False) + + str_concat = "" + filename: str | None = None + for i in stdout.split("\n"): + if "note:" in i: + continue + if filename is None: + filename = _key_func(i) + + str_concat += f"{i}\n" + if split_pattern.match(i) is not None: + OUTPUT_MYPY[filename].append(str_concat) + str_concat = "" + filename = None + + +def get_test_cases(*directories: str) -> "Iterator[ParameterSet]": + for directory in directories: + for root, _, files in os.walk(directory): + for fname in files: + short_fname, ext = os.path.splitext(fname) + if ext not in (".pyi", ".py"): + continue + + fullpath = os.path.join(root, fname) + yield pytest.param(fullpath, id=short_fname) + + +_FAIL_INDENT = " " * 4 +_FAIL_SEP = "\n" + "_" * 79 + "\n\n" + +_FAIL_MSG_REVEAL = """{}:{} - reveal mismatch: + +{}""" + + +@pytest.mark.slow +@pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed") +@pytest.mark.parametrize("path", get_test_cases(PASS_DIR, FAIL_DIR)) +def test_pass(path) -> None: + # Alias `OUTPUT_MYPY` so that it appears in the local namespace + output_mypy = OUTPUT_MYPY + + if path not in output_mypy: + return + + relpath = os.path.relpath(path) + + # collect any reported errors, and clean up the output + messages = [] + for message in output_mypy[path]: + lineno, content = _strip_filename(message) + content = content.removeprefix("error:").lstrip() + messages.append(f"{relpath}:{lineno} - {content}") + + if messages: + pytest.fail("\n".join(messages), pytrace=False) + + +@pytest.mark.slow +@pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed") +@pytest.mark.parametrize("path", get_test_cases(REVEAL_DIR)) +def test_reveal(path: str) -> None: + """Validate that mypy correctly infers the return-types of + the expressions in `path`. + """ + __tracebackhide__ = True + + output_mypy = OUTPUT_MYPY + if path not in output_mypy: + return + + relpath = os.path.relpath(path) + + # collect any reported errors, and clean up the output + failures = [] + for error_line in output_mypy[path]: + lineno, error_msg = _strip_filename(error_line) + error_msg = textwrap.indent(error_msg, _FAIL_INDENT) + reason = _FAIL_MSG_REVEAL.format(relpath, lineno, error_msg) + failures.append(reason) + + if failures: + reasons = _FAIL_SEP.join(failures) + pytest.fail(reasons, pytrace=False) + + +@pytest.mark.slow +@pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed") +@pytest.mark.parametrize("path", get_test_cases(PASS_DIR)) +def test_code_runs(path: str) -> None: + """Validate that the code in `path` properly during runtime.""" + path_without_extension, _ = os.path.splitext(path) + dirname, filename = path.split(os.sep)[-2:] + + spec = importlib.util.spec_from_file_location( + f"{dirname}.{filename}", path + ) + assert spec is not None + assert spec.loader is not None + + test_module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(test_module) diff --git a/local_packages/numpy/version.py b/local_packages/numpy/version.py new file mode 100644 index 0000000..3892665 --- /dev/null +++ b/local_packages/numpy/version.py @@ -0,0 +1,11 @@ + +""" +Module to expose more detailed version info for the installed `numpy` +""" +version = "2.4.4" +__version__ = version +full_version = version + +git_revision = "be93fe2960dbf49b4647f5783c66d967fb2c65b5" +release = 'dev' not in version and '+' not in version +short_version = version.split("+")[0] diff --git a/local_packages/numpy/version.pyi b/local_packages/numpy/version.pyi new file mode 100644 index 0000000..073885c --- /dev/null +++ b/local_packages/numpy/version.pyi @@ -0,0 +1,9 @@ +from typing import Final, LiteralString + +version: Final[LiteralString] = ... +__version__: Final[LiteralString] = ... +full_version: Final[LiteralString] = ... + +git_revision: Final[LiteralString] = ... +release: Final[bool] = ... +short_version: Final[LiteralString] = ... diff --git a/local_packages/pygments-2.20.0.dist-info/INSTALLER b/local_packages/pygments-2.20.0.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/local_packages/pygments-2.20.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/local_packages/pygments-2.20.0.dist-info/METADATA b/local_packages/pygments-2.20.0.dist-info/METADATA new file mode 100644 index 0000000..3dea620 --- /dev/null +++ b/local_packages/pygments-2.20.0.dist-info/METADATA @@ -0,0 +1,57 @@ +Metadata-Version: 2.4 +Name: Pygments +Version: 2.20.0 +Summary: Pygments is a syntax highlighting package written in Python. +Project-URL: Homepage, https://pygments.org +Project-URL: Documentation, https://pygments.org/docs +Project-URL: Source, https://github.com/pygments/pygments +Project-URL: Bug Tracker, https://github.com/pygments/pygments/issues +Project-URL: Changelog, https://github.com/pygments/pygments/blob/master/CHANGES +Author-email: Georg Brandl +Maintainer: Matthäus G. Chajdas +Maintainer-email: Georg Brandl , Jean Abou Samra +License-Expression: BSD-2-Clause +License-File: AUTHORS +License-File: LICENSE +Keywords: syntax highlighting +Classifier: Development Status :: 6 - Mature +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: End Users/Desktop +Classifier: Intended Audience :: System Administrators +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3.13 +Classifier: Programming Language :: Python :: 3.14 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Topic :: Text Processing :: Filters +Classifier: Topic :: Utilities +Requires-Python: >=3.9 +Provides-Extra: plugins +Provides-Extra: windows-terminal +Requires-Dist: colorama>=0.4.6; extra == 'windows-terminal' +Description-Content-Type: text/x-rst + +Pygments +~~~~~~~~ + +Pygments is a syntax highlighting package written in Python. + +It is a generic syntax highlighter suitable for use in code hosting, forums, +wikis or other applications that need to prettify source code. Highlights +are: + +* a wide range of over 500 languages and other text formats is supported +* special attention is paid to details, increasing quality by a fair amount +* support for new languages and formats are added easily +* a number of output formats, presently HTML, LaTeX, RTF, SVG, all image + formats that PIL supports and ANSI sequences +* it is usable as a command-line tool and as a library + +Copyright 2006-present by the Pygments team, see ``AUTHORS``. +Licensed under the BSD, see ``LICENSE`` for details. diff --git a/local_packages/pygments-2.20.0.dist-info/RECORD b/local_packages/pygments-2.20.0.dist-info/RECORD new file mode 100644 index 0000000..263b06f --- /dev/null +++ b/local_packages/pygments-2.20.0.dist-info/RECORD @@ -0,0 +1,686 @@ +../../bin/pygmentize.exe,sha256=mkcQ8__X51DIFP3qv_fjlWBX78se7YnY_-EkY9ZRcTs,108388 +pygments-2.20.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +pygments-2.20.0.dist-info/METADATA,sha256=4FKPUbMEJ_rpRyNmK6Yi-NjbKk2NPxNlaY1npSRQqEU,2476 +pygments-2.20.0.dist-info/RECORD,, +pygments-2.20.0.dist-info/WHEEL,sha256=QccIxa26bgl1E6uMy58deGWi-0aeIkkangHcxk2kWfw,87 +pygments-2.20.0.dist-info/entry_points.txt,sha256=uUXw-XhMKBEX4pWcCtpuTTnPhL3h7OEE2jWi51VQsa8,53 +pygments-2.20.0.dist-info/licenses/AUTHORS,sha256=DbYDpfRJn2kMRCVHf_ZkwWbaMl06zDtajU3j2wckQ9A,10873 +pygments-2.20.0.dist-info/licenses/LICENSE,sha256=qdZvHVJt8C4p3Oc0NtNOVuhjL0bCdbvf_HBWnogvnxc,1331 +pygments/__init__.py,sha256=ZzpnXpvnv0c7r_OIS8h7UeaESqYNKMy__pV7KCiWXxw,2962 +pygments/__main__.py,sha256=QZWj0T6TTRsqr-w-0YvVILo1DyAvzzydGCRONwdEzqo,351 +pygments/__pycache__/__init__.cpython-312.pyc,, +pygments/__pycache__/__main__.cpython-312.pyc,, +pygments/__pycache__/cmdline.cpython-312.pyc,, +pygments/__pycache__/console.cpython-312.pyc,, +pygments/__pycache__/filter.cpython-312.pyc,, +pygments/__pycache__/formatter.cpython-312.pyc,, +pygments/__pycache__/lexer.cpython-312.pyc,, +pygments/__pycache__/modeline.cpython-312.pyc,, +pygments/__pycache__/plugin.cpython-312.pyc,, +pygments/__pycache__/regexopt.cpython-312.pyc,, +pygments/__pycache__/scanner.cpython-312.pyc,, +pygments/__pycache__/sphinxext.cpython-312.pyc,, +pygments/__pycache__/style.cpython-312.pyc,, +pygments/__pycache__/token.cpython-312.pyc,, +pygments/__pycache__/unistring.cpython-312.pyc,, +pygments/__pycache__/util.cpython-312.pyc,, +pygments/cmdline.py,sha256=_dOnrta_2GIe8Jg-1Y0pb5vWi8L6QTiJzyoTuHrYrbM,23542 +pygments/console.py,sha256=C189JAwhC1Qh0AKgshzb1wVDzFqBnv1VZ45kTEDIO30,1721 +pygments/filter.py,sha256=1dnbkq2AdC3AkHt3DaXwnOkTBLChl1kR6naSLwnr_tc,1913 +pygments/filters/__init__.py,sha256=03ZYdIYmxnWCh7gNXD7lEQ869Df4kzHnOGM44iJxan8,40349 +pygments/filters/__pycache__/__init__.cpython-312.pyc,, +pygments/formatter.py,sha256=PTBnTW0EHke2vzlAKuvHJkj3-_CMj8duIx-3yVUie-o,4369 +pygments/formatters/__init__.py,sha256=qPG4q5cuaZRGBglmEJVLP4SDv43QI3tAUskj30M-mOY,5352 +pygments/formatters/__pycache__/__init__.cpython-312.pyc,, +pygments/formatters/__pycache__/_mapping.cpython-312.pyc,, +pygments/formatters/__pycache__/bbcode.cpython-312.pyc,, +pygments/formatters/__pycache__/groff.cpython-312.pyc,, +pygments/formatters/__pycache__/html.cpython-312.pyc,, +pygments/formatters/__pycache__/img.cpython-312.pyc,, +pygments/formatters/__pycache__/irc.cpython-312.pyc,, +pygments/formatters/__pycache__/latex.cpython-312.pyc,, +pygments/formatters/__pycache__/other.cpython-312.pyc,, +pygments/formatters/__pycache__/pangomarkup.cpython-312.pyc,, +pygments/formatters/__pycache__/rtf.cpython-312.pyc,, +pygments/formatters/__pycache__/svg.cpython-312.pyc,, +pygments/formatters/__pycache__/terminal.cpython-312.pyc,, +pygments/formatters/__pycache__/terminal256.cpython-312.pyc,, +pygments/formatters/_mapping.py,sha256=1Cw37FuQlNacnxRKmtlPX4nyLoX9_ttko5ZwscNUZZ4,4176 +pygments/formatters/bbcode.py,sha256=lvG1REZJv0pM6VMe-QwLSuo0Yl1Ra_X51wh93fW8A2k,3299 +pygments/formatters/groff.py,sha256=anF3fNbDYwwOlppOUoKTZg4FzzPbZrE4jcJCMd49_1g,5085 +pygments/formatters/html.py,sha256=qe4P6qIV462HkZovS8s5xKKyYXPQQvUjz6Wj7HX1CxY,36053 +pygments/formatters/img.py,sha256=41uSY0pKg9VmKJYrHuavCVgg0z-mg81p28QEMktwf1o,23304 +pygments/formatters/irc.py,sha256=hdOqAvF02bI8CY8_tutnUptBZpWEBLzVAqS4_YA0tew,4907 +pygments/formatters/latex.py,sha256=cQmE1Nj4E9q5N0XQJBqpc6dLtu5UNOtYq3plPyCX1Hk,19261 +pygments/formatters/other.py,sha256=Hq6qY4POBZ_llAWRr1gzSO9UoeYPONlLMBK60RkW4bg,4989 +pygments/formatters/pangomarkup.py,sha256=L4jU6oO18UqEMqXeN5FkPtkisXrXuXq9wcecgy_6dzo,2209 +pygments/formatters/rtf.py,sha256=YQYW8NTrB4XfOjLbQzutyD4j35TimFNZ0T39dET9DOo,11924 +pygments/formatters/svg.py,sha256=oksXT-ZnTHDsn1WtWgBs9V7qmKFswAKx7jV37tb2tYY,7141 +pygments/formatters/terminal.py,sha256=q7jLLanle33eCZkDr4CoHAN-dHBFf1DBhi4FcBQ8_1E,4629 +pygments/formatters/terminal256.py,sha256=PpA_oATHCih3UTjrbfKqIRUcvsyp_TeBaGw3kpIxeBA,11717 +pygments/lexer.py,sha256=gNMYzmdSkTNyWfqiLJ37oUd1KrN_dMXtsPyaX2-n9EA,35154 +pygments/lexers/__init__.py,sha256=G4dtqE5QMEAqoaaD1rwSZZeuqKhMyS4utlMz1szkrTg,12070 +pygments/lexers/__pycache__/__init__.cpython-312.pyc,, +pygments/lexers/__pycache__/_ada_builtins.cpython-312.pyc,, +pygments/lexers/__pycache__/_asy_builtins.cpython-312.pyc,, +pygments/lexers/__pycache__/_cl_builtins.cpython-312.pyc,, +pygments/lexers/__pycache__/_cocoa_builtins.cpython-312.pyc,, +pygments/lexers/__pycache__/_csound_builtins.cpython-312.pyc,, +pygments/lexers/__pycache__/_css_builtins.cpython-312.pyc,, +pygments/lexers/__pycache__/_googlesql_builtins.cpython-312.pyc,, +pygments/lexers/__pycache__/_julia_builtins.cpython-312.pyc,, +pygments/lexers/__pycache__/_lasso_builtins.cpython-312.pyc,, +pygments/lexers/__pycache__/_lilypond_builtins.cpython-312.pyc,, +pygments/lexers/__pycache__/_lua_builtins.cpython-312.pyc,, +pygments/lexers/__pycache__/_luau_builtins.cpython-312.pyc,, +pygments/lexers/__pycache__/_mapping.cpython-312.pyc,, +pygments/lexers/__pycache__/_mql_builtins.cpython-312.pyc,, +pygments/lexers/__pycache__/_mysql_builtins.cpython-312.pyc,, +pygments/lexers/__pycache__/_openedge_builtins.cpython-312.pyc,, +pygments/lexers/__pycache__/_php_builtins.cpython-312.pyc,, +pygments/lexers/__pycache__/_postgres_builtins.cpython-312.pyc,, +pygments/lexers/__pycache__/_qlik_builtins.cpython-312.pyc,, +pygments/lexers/__pycache__/_scheme_builtins.cpython-312.pyc,, +pygments/lexers/__pycache__/_scilab_builtins.cpython-312.pyc,, +pygments/lexers/__pycache__/_sourcemod_builtins.cpython-312.pyc,, +pygments/lexers/__pycache__/_sql_builtins.cpython-312.pyc,, +pygments/lexers/__pycache__/_stan_builtins.cpython-312.pyc,, +pygments/lexers/__pycache__/_stata_builtins.cpython-312.pyc,, +pygments/lexers/__pycache__/_tsql_builtins.cpython-312.pyc,, +pygments/lexers/__pycache__/_usd_builtins.cpython-312.pyc,, +pygments/lexers/__pycache__/_vbscript_builtins.cpython-312.pyc,, +pygments/lexers/__pycache__/_vim_builtins.cpython-312.pyc,, +pygments/lexers/__pycache__/actionscript.cpython-312.pyc,, +pygments/lexers/__pycache__/ada.cpython-312.pyc,, +pygments/lexers/__pycache__/agile.cpython-312.pyc,, +pygments/lexers/__pycache__/algebra.cpython-312.pyc,, +pygments/lexers/__pycache__/ambient.cpython-312.pyc,, +pygments/lexers/__pycache__/amdgpu.cpython-312.pyc,, +pygments/lexers/__pycache__/ampl.cpython-312.pyc,, +pygments/lexers/__pycache__/apdlexer.cpython-312.pyc,, +pygments/lexers/__pycache__/apl.cpython-312.pyc,, +pygments/lexers/__pycache__/archetype.cpython-312.pyc,, +pygments/lexers/__pycache__/arrow.cpython-312.pyc,, +pygments/lexers/__pycache__/arturo.cpython-312.pyc,, +pygments/lexers/__pycache__/asc.cpython-312.pyc,, +pygments/lexers/__pycache__/asm.cpython-312.pyc,, +pygments/lexers/__pycache__/asn1.cpython-312.pyc,, +pygments/lexers/__pycache__/automation.cpython-312.pyc,, +pygments/lexers/__pycache__/bare.cpython-312.pyc,, +pygments/lexers/__pycache__/basic.cpython-312.pyc,, +pygments/lexers/__pycache__/bdd.cpython-312.pyc,, +pygments/lexers/__pycache__/berry.cpython-312.pyc,, +pygments/lexers/__pycache__/bibtex.cpython-312.pyc,, +pygments/lexers/__pycache__/blueprint.cpython-312.pyc,, +pygments/lexers/__pycache__/boa.cpython-312.pyc,, +pygments/lexers/__pycache__/bqn.cpython-312.pyc,, +pygments/lexers/__pycache__/business.cpython-312.pyc,, +pygments/lexers/__pycache__/c_cpp.cpython-312.pyc,, +pygments/lexers/__pycache__/c_like.cpython-312.pyc,, +pygments/lexers/__pycache__/capnproto.cpython-312.pyc,, +pygments/lexers/__pycache__/carbon.cpython-312.pyc,, +pygments/lexers/__pycache__/cddl.cpython-312.pyc,, +pygments/lexers/__pycache__/chapel.cpython-312.pyc,, +pygments/lexers/__pycache__/clean.cpython-312.pyc,, +pygments/lexers/__pycache__/codeql.cpython-312.pyc,, +pygments/lexers/__pycache__/comal.cpython-312.pyc,, +pygments/lexers/__pycache__/compiled.cpython-312.pyc,, +pygments/lexers/__pycache__/configs.cpython-312.pyc,, +pygments/lexers/__pycache__/console.cpython-312.pyc,, +pygments/lexers/__pycache__/cplint.cpython-312.pyc,, +pygments/lexers/__pycache__/crystal.cpython-312.pyc,, +pygments/lexers/__pycache__/csound.cpython-312.pyc,, +pygments/lexers/__pycache__/css.cpython-312.pyc,, +pygments/lexers/__pycache__/d.cpython-312.pyc,, +pygments/lexers/__pycache__/dalvik.cpython-312.pyc,, +pygments/lexers/__pycache__/data.cpython-312.pyc,, +pygments/lexers/__pycache__/dax.cpython-312.pyc,, +pygments/lexers/__pycache__/devicetree.cpython-312.pyc,, +pygments/lexers/__pycache__/diff.cpython-312.pyc,, +pygments/lexers/__pycache__/dns.cpython-312.pyc,, +pygments/lexers/__pycache__/dotnet.cpython-312.pyc,, +pygments/lexers/__pycache__/dsls.cpython-312.pyc,, +pygments/lexers/__pycache__/dylan.cpython-312.pyc,, +pygments/lexers/__pycache__/ecl.cpython-312.pyc,, +pygments/lexers/__pycache__/eiffel.cpython-312.pyc,, +pygments/lexers/__pycache__/elm.cpython-312.pyc,, +pygments/lexers/__pycache__/elpi.cpython-312.pyc,, +pygments/lexers/__pycache__/email.cpython-312.pyc,, +pygments/lexers/__pycache__/erlang.cpython-312.pyc,, +pygments/lexers/__pycache__/esoteric.cpython-312.pyc,, +pygments/lexers/__pycache__/ezhil.cpython-312.pyc,, +pygments/lexers/__pycache__/factor.cpython-312.pyc,, +pygments/lexers/__pycache__/fantom.cpython-312.pyc,, +pygments/lexers/__pycache__/felix.cpython-312.pyc,, +pygments/lexers/__pycache__/fift.cpython-312.pyc,, +pygments/lexers/__pycache__/floscript.cpython-312.pyc,, +pygments/lexers/__pycache__/forth.cpython-312.pyc,, +pygments/lexers/__pycache__/fortran.cpython-312.pyc,, +pygments/lexers/__pycache__/foxpro.cpython-312.pyc,, +pygments/lexers/__pycache__/freefem.cpython-312.pyc,, +pygments/lexers/__pycache__/func.cpython-312.pyc,, +pygments/lexers/__pycache__/functional.cpython-312.pyc,, +pygments/lexers/__pycache__/futhark.cpython-312.pyc,, +pygments/lexers/__pycache__/gcodelexer.cpython-312.pyc,, +pygments/lexers/__pycache__/gdscript.cpython-312.pyc,, +pygments/lexers/__pycache__/gleam.cpython-312.pyc,, +pygments/lexers/__pycache__/go.cpython-312.pyc,, +pygments/lexers/__pycache__/grammar_notation.cpython-312.pyc,, +pygments/lexers/__pycache__/graph.cpython-312.pyc,, +pygments/lexers/__pycache__/graphics.cpython-312.pyc,, +pygments/lexers/__pycache__/graphql.cpython-312.pyc,, +pygments/lexers/__pycache__/graphviz.cpython-312.pyc,, +pygments/lexers/__pycache__/gsql.cpython-312.pyc,, +pygments/lexers/__pycache__/hare.cpython-312.pyc,, +pygments/lexers/__pycache__/haskell.cpython-312.pyc,, +pygments/lexers/__pycache__/haxe.cpython-312.pyc,, +pygments/lexers/__pycache__/hdl.cpython-312.pyc,, +pygments/lexers/__pycache__/hexdump.cpython-312.pyc,, +pygments/lexers/__pycache__/html.cpython-312.pyc,, +pygments/lexers/__pycache__/idl.cpython-312.pyc,, +pygments/lexers/__pycache__/igor.cpython-312.pyc,, +pygments/lexers/__pycache__/inferno.cpython-312.pyc,, +pygments/lexers/__pycache__/installers.cpython-312.pyc,, +pygments/lexers/__pycache__/int_fiction.cpython-312.pyc,, +pygments/lexers/__pycache__/iolang.cpython-312.pyc,, +pygments/lexers/__pycache__/j.cpython-312.pyc,, +pygments/lexers/__pycache__/javascript.cpython-312.pyc,, +pygments/lexers/__pycache__/jmespath.cpython-312.pyc,, +pygments/lexers/__pycache__/jslt.cpython-312.pyc,, +pygments/lexers/__pycache__/json5.cpython-312.pyc,, +pygments/lexers/__pycache__/jsonnet.cpython-312.pyc,, +pygments/lexers/__pycache__/jsx.cpython-312.pyc,, +pygments/lexers/__pycache__/julia.cpython-312.pyc,, +pygments/lexers/__pycache__/jvm.cpython-312.pyc,, +pygments/lexers/__pycache__/kuin.cpython-312.pyc,, +pygments/lexers/__pycache__/kusto.cpython-312.pyc,, +pygments/lexers/__pycache__/ldap.cpython-312.pyc,, +pygments/lexers/__pycache__/lean.cpython-312.pyc,, +pygments/lexers/__pycache__/lilypond.cpython-312.pyc,, +pygments/lexers/__pycache__/lisp.cpython-312.pyc,, +pygments/lexers/__pycache__/macaulay2.cpython-312.pyc,, +pygments/lexers/__pycache__/make.cpython-312.pyc,, +pygments/lexers/__pycache__/maple.cpython-312.pyc,, +pygments/lexers/__pycache__/markup.cpython-312.pyc,, +pygments/lexers/__pycache__/math.cpython-312.pyc,, +pygments/lexers/__pycache__/matlab.cpython-312.pyc,, +pygments/lexers/__pycache__/maxima.cpython-312.pyc,, +pygments/lexers/__pycache__/meson.cpython-312.pyc,, +pygments/lexers/__pycache__/mime.cpython-312.pyc,, +pygments/lexers/__pycache__/minecraft.cpython-312.pyc,, +pygments/lexers/__pycache__/mips.cpython-312.pyc,, +pygments/lexers/__pycache__/ml.cpython-312.pyc,, +pygments/lexers/__pycache__/modeling.cpython-312.pyc,, +pygments/lexers/__pycache__/modula2.cpython-312.pyc,, +pygments/lexers/__pycache__/mojo.cpython-312.pyc,, +pygments/lexers/__pycache__/monte.cpython-312.pyc,, +pygments/lexers/__pycache__/mosel.cpython-312.pyc,, +pygments/lexers/__pycache__/ncl.cpython-312.pyc,, +pygments/lexers/__pycache__/nimrod.cpython-312.pyc,, +pygments/lexers/__pycache__/nit.cpython-312.pyc,, +pygments/lexers/__pycache__/nix.cpython-312.pyc,, +pygments/lexers/__pycache__/numbair.cpython-312.pyc,, +pygments/lexers/__pycache__/oberon.cpython-312.pyc,, +pygments/lexers/__pycache__/objective.cpython-312.pyc,, +pygments/lexers/__pycache__/ooc.cpython-312.pyc,, +pygments/lexers/__pycache__/openscad.cpython-312.pyc,, +pygments/lexers/__pycache__/other.cpython-312.pyc,, +pygments/lexers/__pycache__/parasail.cpython-312.pyc,, +pygments/lexers/__pycache__/parsers.cpython-312.pyc,, +pygments/lexers/__pycache__/pascal.cpython-312.pyc,, +pygments/lexers/__pycache__/pawn.cpython-312.pyc,, +pygments/lexers/__pycache__/pddl.cpython-312.pyc,, +pygments/lexers/__pycache__/perl.cpython-312.pyc,, +pygments/lexers/__pycache__/phix.cpython-312.pyc,, +pygments/lexers/__pycache__/php.cpython-312.pyc,, +pygments/lexers/__pycache__/pointless.cpython-312.pyc,, +pygments/lexers/__pycache__/pony.cpython-312.pyc,, +pygments/lexers/__pycache__/praat.cpython-312.pyc,, +pygments/lexers/__pycache__/procfile.cpython-312.pyc,, +pygments/lexers/__pycache__/prolog.cpython-312.pyc,, +pygments/lexers/__pycache__/promql.cpython-312.pyc,, +pygments/lexers/__pycache__/prql.cpython-312.pyc,, +pygments/lexers/__pycache__/ptx.cpython-312.pyc,, +pygments/lexers/__pycache__/python.cpython-312.pyc,, +pygments/lexers/__pycache__/q.cpython-312.pyc,, +pygments/lexers/__pycache__/qlik.cpython-312.pyc,, +pygments/lexers/__pycache__/qvt.cpython-312.pyc,, +pygments/lexers/__pycache__/r.cpython-312.pyc,, +pygments/lexers/__pycache__/rdf.cpython-312.pyc,, +pygments/lexers/__pycache__/rebol.cpython-312.pyc,, +pygments/lexers/__pycache__/rego.cpython-312.pyc,, +pygments/lexers/__pycache__/rell.cpython-312.pyc,, +pygments/lexers/__pycache__/resource.cpython-312.pyc,, +pygments/lexers/__pycache__/ride.cpython-312.pyc,, +pygments/lexers/__pycache__/rita.cpython-312.pyc,, +pygments/lexers/__pycache__/rnc.cpython-312.pyc,, +pygments/lexers/__pycache__/roboconf.cpython-312.pyc,, +pygments/lexers/__pycache__/robotframework.cpython-312.pyc,, +pygments/lexers/__pycache__/ruby.cpython-312.pyc,, +pygments/lexers/__pycache__/rust.cpython-312.pyc,, +pygments/lexers/__pycache__/sas.cpython-312.pyc,, +pygments/lexers/__pycache__/savi.cpython-312.pyc,, +pygments/lexers/__pycache__/scdoc.cpython-312.pyc,, +pygments/lexers/__pycache__/scripting.cpython-312.pyc,, +pygments/lexers/__pycache__/sgf.cpython-312.pyc,, +pygments/lexers/__pycache__/shell.cpython-312.pyc,, +pygments/lexers/__pycache__/sieve.cpython-312.pyc,, +pygments/lexers/__pycache__/slash.cpython-312.pyc,, +pygments/lexers/__pycache__/smalltalk.cpython-312.pyc,, +pygments/lexers/__pycache__/smithy.cpython-312.pyc,, +pygments/lexers/__pycache__/smv.cpython-312.pyc,, +pygments/lexers/__pycache__/snobol.cpython-312.pyc,, +pygments/lexers/__pycache__/solidity.cpython-312.pyc,, +pygments/lexers/__pycache__/soong.cpython-312.pyc,, +pygments/lexers/__pycache__/sophia.cpython-312.pyc,, +pygments/lexers/__pycache__/special.cpython-312.pyc,, +pygments/lexers/__pycache__/spice.cpython-312.pyc,, +pygments/lexers/__pycache__/sql.cpython-312.pyc,, +pygments/lexers/__pycache__/srcinfo.cpython-312.pyc,, +pygments/lexers/__pycache__/stata.cpython-312.pyc,, +pygments/lexers/__pycache__/supercollider.cpython-312.pyc,, +pygments/lexers/__pycache__/tablegen.cpython-312.pyc,, +pygments/lexers/__pycache__/tact.cpython-312.pyc,, +pygments/lexers/__pycache__/tal.cpython-312.pyc,, +pygments/lexers/__pycache__/tcl.cpython-312.pyc,, +pygments/lexers/__pycache__/teal.cpython-312.pyc,, +pygments/lexers/__pycache__/templates.cpython-312.pyc,, +pygments/lexers/__pycache__/teraterm.cpython-312.pyc,, +pygments/lexers/__pycache__/testing.cpython-312.pyc,, +pygments/lexers/__pycache__/text.cpython-312.pyc,, +pygments/lexers/__pycache__/textedit.cpython-312.pyc,, +pygments/lexers/__pycache__/textfmts.cpython-312.pyc,, +pygments/lexers/__pycache__/theorem.cpython-312.pyc,, +pygments/lexers/__pycache__/thingsdb.cpython-312.pyc,, +pygments/lexers/__pycache__/tlb.cpython-312.pyc,, +pygments/lexers/__pycache__/tls.cpython-312.pyc,, +pygments/lexers/__pycache__/tnt.cpython-312.pyc,, +pygments/lexers/__pycache__/trafficscript.cpython-312.pyc,, +pygments/lexers/__pycache__/typoscript.cpython-312.pyc,, +pygments/lexers/__pycache__/typst.cpython-312.pyc,, +pygments/lexers/__pycache__/ul4.cpython-312.pyc,, +pygments/lexers/__pycache__/unicon.cpython-312.pyc,, +pygments/lexers/__pycache__/urbi.cpython-312.pyc,, +pygments/lexers/__pycache__/usd.cpython-312.pyc,, +pygments/lexers/__pycache__/varnish.cpython-312.pyc,, +pygments/lexers/__pycache__/verification.cpython-312.pyc,, +pygments/lexers/__pycache__/verifpal.cpython-312.pyc,, +pygments/lexers/__pycache__/vip.cpython-312.pyc,, +pygments/lexers/__pycache__/vyper.cpython-312.pyc,, +pygments/lexers/__pycache__/web.cpython-312.pyc,, +pygments/lexers/__pycache__/webassembly.cpython-312.pyc,, +pygments/lexers/__pycache__/webidl.cpython-312.pyc,, +pygments/lexers/__pycache__/webmisc.cpython-312.pyc,, +pygments/lexers/__pycache__/wgsl.cpython-312.pyc,, +pygments/lexers/__pycache__/whiley.cpython-312.pyc,, +pygments/lexers/__pycache__/wowtoc.cpython-312.pyc,, +pygments/lexers/__pycache__/wren.cpython-312.pyc,, +pygments/lexers/__pycache__/x10.cpython-312.pyc,, +pygments/lexers/__pycache__/xorg.cpython-312.pyc,, +pygments/lexers/__pycache__/yang.cpython-312.pyc,, +pygments/lexers/__pycache__/yara.cpython-312.pyc,, +pygments/lexers/__pycache__/zig.cpython-312.pyc,, +pygments/lexers/_ada_builtins.py,sha256=dZb-lodsSM6L5emLlgLg-rClm2HumvnHK72CetnSRdA,1546 +pygments/lexers/_asy_builtins.py,sha256=zg54fGhgzWXQUk6-qZ6OW5GTL9d4OrnB8SJQkjrD0xs,27290 +pygments/lexers/_cl_builtins.py,sha256=oBF00ZkJyD14LkYuR603EDFIMyitkze12aT7UzDYLYs,13997 +pygments/lexers/_cocoa_builtins.py,sha256=ab6sq-iy5LapE1cNYyl8PJXLg6EINXx-lMRbwERdKYs,105176 +pygments/lexers/_csound_builtins.py,sha256=wuWiQjmEMhaVvsV8b_efsebgU0YKIoMC0ECpOavTCIM,18417 +pygments/lexers/_css_builtins.py,sha256=qhmC4tRGG53zvzMQE1qn794LvbVODRtVacxWCP3kIkA,12449 +pygments/lexers/_googlesql_builtins.py,sha256=mGfOGuKKjZoHHAAZq6Hpc68x0qKycx-SX_wGDScSOYI,16135 +pygments/lexers/_julia_builtins.py,sha256=u6v0yAzZjqUENjiIQ5qMfcso1r_6ERqGbj3aHwFnyqA,11886 +pygments/lexers/_lasso_builtins.py,sha256=YO_c_f05ZoxspHxCR3oWQCpZhbAJUzBJBhVuuwTjr5c,134513 +pygments/lexers/_lilypond_builtins.py,sha256=-_4i4gpgDgcFHvaZMJL2s1rI9dSNY2ZC6vO6ul7Hml0,115114 +pygments/lexers/_lua_builtins.py,sha256=MsDV9sEbJngKoXSZu0xXawGvd6XjFf125nuXBNoffdU,8111 +pygments/lexers/_luau_builtins.py,sha256=YLUj2bcZ0Cb0SBkIlNXWHFGDUFkpddOJx-ukrZx1cMc,958 +pygments/lexers/_mapping.py,sha256=YbQJB1eqeGk7ol5NgMMPLrD3VGdTwgQCl3TvLwtxwxA,70758 +pygments/lexers/_mql_builtins.py,sha256=CkcvMyHYh4U5rGgV6AgShxRnTOv0tmqEbOiVdpaFLqs,24716 +pygments/lexers/_mysql_builtins.py,sha256=SqiVYVVirtnN4lSgtfeRPxtVpcumjUvvUq9n8JObuQ4,26876 +pygments/lexers/_openedge_builtins.py,sha256=AzF1o6eAkMc6vnvmeVqNnUZUVijzbNYkjzrUdtCQe3M,49401 +pygments/lexers/_php_builtins.py,sha256=9g-WLT3qbSGolQJB_4PS6FW_r7HVXBWvI5OWxKtbwok,108054 +pygments/lexers/_postgres_builtins.py,sha256=QGg2mBTThgv4LdXjFm5wrKaKnCU9DubmJi6lXB7y2Mg,13346 +pygments/lexers/_qlik_builtins.py,sha256=3ZkCSDjtxSxXDI9erigWTLSUct_ap_W0LqfZjFeKQYI,12598 +pygments/lexers/_scheme_builtins.py,sha256=O3BRtt5suTESbZ_bSReQ6D0r0G_Sq_i4wCcUkdUrdS0,32567 +pygments/lexers/_scilab_builtins.py,sha256=E33R0mknNc-5eZ9Ugu7u7Av_vUxAaQoMTtdx3MceyxU,52414 +pygments/lexers/_sourcemod_builtins.py,sha256=8qspLxCLsfN9J7N0SWZRNs0A-qJOIGRBAJaNMeWaOBQ,26780 +pygments/lexers/_sql_builtins.py,sha256=fLphonB6wv6Oit8zanJiExgRoLvZoFW4l9lNH-PTYvw,6770 +pygments/lexers/_stan_builtins.py,sha256=sIoONg4TjLkGN7Ab0tZB1HM7tQ071_Z2c41n9lXZl04,16623 +pygments/lexers/_stata_builtins.py,sha256=ymh8GxEca6eSCgHnqsLqsi9OrfRqYnt5O8yBeinJ7DE,27230 +pygments/lexers/_tsql_builtins.py,sha256=Mw6UnMByju0U-OBgT8af3A4utfrS00Tn5lDnaxBtj9c,15463 +pygments/lexers/_usd_builtins.py,sha256=SGp_ePf2VuktrFV59q6df4mTyxZV7AZYs0TJFSWoBAI,1661 +pygments/lexers/_vbscript_builtins.py,sha256=TFtyc11yvpD_OlNrrFbcCOJoUYkWMq3EeHHCpw_zJNc,4228 +pygments/lexers/_vim_builtins.py,sha256=bB6kdLFM33uoY-sAtsRuUZOoccaa4gDAsdFg4WAEsoU,57069 +pygments/lexers/actionscript.py,sha256=kbHhoDl7JjVzREMv1UuxYBRJaPrxHDkmEAC5COmOIVw,11737 +pygments/lexers/ada.py,sha256=m2O7dYzJVc3iuSgaC-Ti-Gh3CG-JmpZit_DPZpu1IIM,5356 +pygments/lexers/agile.py,sha256=Du-vjnGZEPHuE40XxTMO-XLt56pYcIaKIAxIwcYHFH0,899 +pygments/lexers/algebra.py,sha256=2iQSGxfVuBXBHAJdzbbPvtpvROYTfxfMivDagk9yfh8,10032 +pygments/lexers/ambient.py,sha256=8Nk8WX4qrId9zj_zgjJBuFT40yQXG2OXxFhhF3R00g8,2608 +pygments/lexers/amdgpu.py,sha256=KMDLdb-1aK9RJaNsSkPyiU3DQE_HdjBF0paw_yiJMRA,1726 +pygments/lexers/ampl.py,sha256=IKuMVE6aXsa4S6qpuc_DvEsJmJpyKDNga09BPFLvYxA,4179 +pygments/lexers/apdlexer.py,sha256=puSpLBWevciSc8G0wg2Y_yQIoGgqPes0UBkpFImqvO4,30803 +pygments/lexers/apl.py,sha256=-jhPYsBVT4ckL7CZmukIZNi6tfaJJbBk2vutld372rs,3407 +pygments/lexers/archetype.py,sha256=buhH2WHtxBowy_KD5SPm5vgHpmLoKMdV9tgMs_bE4xU,11577 +pygments/lexers/arrow.py,sha256=e6mb3Ix4jL7TMGNUpk8iRbJy_VF7VY2J0VUknlo1EvA,3567 +pygments/lexers/arturo.py,sha256=GkklOaiEJTOz8E0zdxd_hd1zXwv7lYgp8IJl7uh13hg,11417 +pygments/lexers/asc.py,sha256=CvFSi7FYDW6eu-5oiKGc0MN3aF1XVZDoFI5-3mHpBDk,1696 +pygments/lexers/asm.py,sha256=jHJ3CDtvHgu4hX4kVvvJyciFLWM6FAYFDPG_zjBTyrc,42219 +pygments/lexers/asn1.py,sha256=MLakOeBFkdhKKr42eHFzonpGITEJxR_God8vqjWPva4,4267 +pygments/lexers/automation.py,sha256=gdSYslkW9sblDcvMeK1ay15rb9vRuK56pPOJzdxlIvg,19834 +pygments/lexers/bare.py,sha256=AdqL9Z20nd0bhNGWielLhAHej6V8_8m1OWUkCOrSBx8,3023 +pygments/lexers/basic.py,sha256=NqNxIeHhjBUtenwCdJ88WWEE3N7a16DA9g1_D1mdmF0,27992 +pygments/lexers/bdd.py,sha256=KeuOzXBLJRxxM2gV7aMuYsdV_aJFHyBuF8ptBpWxD5M,1644 +pygments/lexers/berry.py,sha256=vIfT4sBtm6ao64WdNkJTVJmS00BiFdEWvb3obBXBeX8,3212 +pygments/lexers/bibtex.py,sha256=sgfFqXyNyCxROHIe8C0gre69rzbADALnpDtTaqHfSas,4814 +pygments/lexers/blueprint.py,sha256=dW-L5bFIiWxRPDashee2EOYmXIRHdwuCZ1y3iN-a6_0,6191 +pygments/lexers/boa.py,sha256=VqYF1Yg_XdDA5tK_Eiuo1rr7ediBsKUTpB6xza_B5d4,3924 +pygments/lexers/bqn.py,sha256=se4W2XPsNiT36z0Am9X5F4yNLrQvMI5X8miY6R9SEM8,3674 +pygments/lexers/business.py,sha256=MXYomjQiYaTmnUgjQ7LUxjcU1i2iVDjZdXPTeqOlDjM,28348 +pygments/lexers/c_cpp.py,sha256=2ViQoLY22Y6xdzXDmwz30rMcij7EemNq4vpTEsfl7sg,18321 +pygments/lexers/c_like.py,sha256=rafLNQqcEbf-97VpDtSIQZSSmdiMD4oBYeEs7DZyByg,32024 +pygments/lexers/capnproto.py,sha256=kc3rT95GkeQmrW1LeP5HMVb7Nk8sqXkW6OJkkWIHYbA,2177 +pygments/lexers/carbon.py,sha256=_M-0YbofjZMrJIaiXKoT6wpkd3_6fFq-OdSZ7V961I4,3214 +pygments/lexers/cddl.py,sha256=7Wri2q2yKexWo0CD7s2c7AoCAMbrwyEE6f_gz5B6x1Y,5079 +pygments/lexers/chapel.py,sha256=SvZXNJijW0greFToDHZQ0yeo4_3niyTx31Iz_yLctck,5159 +pygments/lexers/clean.py,sha256=_oV3Tbmrpl3S1phPMS6gyMWyGZGj6Wy0q8adpf5vunY,6421 +pygments/lexers/codeql.py,sha256=hAX51uWeG5Zk9JhXSnOxPMaznFOpaX4ZnZHZ2krrPGk,2579 +pygments/lexers/comal.py,sha256=0fcTyV5h36zPlxoQPWWGet2yaYi5ESXiFpx6EbsqP00,3182 +pygments/lexers/compiled.py,sha256=PRimy7eb2weP7X6bEukz1JaSLuQAETO34K7maHLGbxA,1429 +pygments/lexers/configs.py,sha256=YDBIWV_R2SiRb0Cbxol-S8RRieDfAg_YcThc1BmZ90I,50925 +pygments/lexers/console.py,sha256=MrTIkXDYQ71z0yDepwbpO9WwXPQRPHAFAOvt_ngscxU,4183 +pygments/lexers/cplint.py,sha256=wPs_Zh0OJ2_UUeF9_mu_r-X_65DJSWMCTKOmOyjWLz8,1392 +pygments/lexers/crystal.py,sha256=k8Xy8TpfGKTPvsP1WikxILScILD8iXZ5qceLZopsLYE,15757 +pygments/lexers/csound.py,sha256=7KHoJv8wtuIe9dques4mFFkQLULcGQ23dgZ_KBd0f_k,17001 +pygments/lexers/css.py,sha256=OVjsd0Kn9pNH6iltS4ikjy9nnilPlCVx-VAZ26n3UJ8,26439 +pygments/lexers/d.py,sha256=ee5oImP1BcHYZ93UwXoHIzw6BztgR3WCCv3ksu2J-o0,9923 +pygments/lexers/dalvik.py,sha256=0f5mB5y8g0l1ID2liJ_5qM2NjyR_SBs7ckuIDeNbbj8,4609 +pygments/lexers/data.py,sha256=MhLXoZXYMZFmmKlqL8RV5zp-eUwVb0yFzsyzXYrpKiw,27049 +pygments/lexers/dax.py,sha256=N-5fxukBfFtFCT2LJoyg1VJsfldWHxXKLY3ZPLnPrig,8101 +pygments/lexers/devicetree.py,sha256=Tig-tTSrpmGzbGxN2ka4MMTiC344roav9A4hoc9o_kw,4826 +pygments/lexers/diff.py,sha256=-627lmuYpJ2uUVUM_orVGgXJGXIBGSVHbmlqflCEeaM,5385 +pygments/lexers/dns.py,sha256=ElzSN3IEoRs1g6iWMnzVIV8H0x9WSEF1wBjm2a0ByxE,3894 +pygments/lexers/dotnet.py,sha256=GmoaOxSoITkYdAUaVJ0Z1RQa6MvcRxh1gQ3XxgEs4X8,39444 +pygments/lexers/dsls.py,sha256=a6Jdv3w1aF3vpQJrnF32MNHAl6pEZkkeGB1cYv-x2Hw,36753 +pygments/lexers/dylan.py,sha256=fi8mSyni4dnGP5x1crE6BKNqLWJs4dDWdmkur-TbbNc,10412 +pygments/lexers/ecl.py,sha256=XJ66PU9EjkXJwA4f1OfC0RdfP49v8N62RZXnFVVAYNo,6374 +pygments/lexers/eiffel.py,sha256=m2eVPDG9J-VXieCeOy0S7zHPjiFHQB9F6bOIy-K3MDQ,2693 +pygments/lexers/elm.py,sha256=Xj3HlbUTEkEUFwhem4NCujx55tQ2TS61_Kcf6BCPHAI,3155 +pygments/lexers/elpi.py,sha256=RZqAVzdxfA_BqBmtMHj8ATw5f9e24SKL-QeO14QuKk4,7904 +pygments/lexers/email.py,sha256=NPlGc2L6F3KWWDop3ZOJGpUM6LDnOEqKZj1YJG3vP2M,4807 +pygments/lexers/erlang.py,sha256=2aUlVBVFtQdR2QISsDeKTOPjGrV7r1ckyXL17KJot9c,19150 +pygments/lexers/esoteric.py,sha256=tmJ9Pa1wYtYgR6yDqgp3zDYuXPFzXw7SLTHOH_eE3p0,10503 +pygments/lexers/ezhil.py,sha256=tbGYtGnqZAB8ffnyPdeMMJHtKYJ7OAciUFRrR_8QpXM,3275 +pygments/lexers/factor.py,sha256=HBUqNcwd4VplYw30ScuC8YgdipFAYiqjRgWsyT0s4xQ,19533 +pygments/lexers/fantom.py,sha256=w9IU1x61Codd7Oxv5DpvVlkVPn_bfvVLiP8Ae3660zM,10234 +pygments/lexers/felix.py,sha256=DvPOs_Em6fFtUBQCaZWWHWgnvAFeBrDe97Cy9XyQ8ps,9658 +pygments/lexers/fift.py,sha256=Q9OLJivp4ngqFso7APCvX85h-ghdbp9ZqpP8GKHws1M,1647 +pygments/lexers/floscript.py,sha256=TcyuCiv9bAb89x1_7_Zq6nUhsdsHAF2DPsKZ6m1W1NU,2670 +pygments/lexers/forth.py,sha256=4e0OCfJGcElkQrHInmQMucvsI9dljuPx4FYg38dyjno,7196 +pygments/lexers/fortran.py,sha256=ouvOHdMbj56j40Nrj21WCRf4rNKpuwphxmtlUVsnzhc,10385 +pygments/lexers/foxpro.py,sha256=nV8yyES39QoFmTNlM8xl3MteOsvmt6yV1l6x6xnXqIU,26298 +pygments/lexers/freefem.py,sha256=ZmZLIYLXtCK2kWqGzBmrkp7ychOy7vgfaorafUkUl1I,26916 +pygments/lexers/func.py,sha256=ZjOerj3njJcaGnHWc_npWjxb4D5O-udxBRK7RLHEibU,3703 +pygments/lexers/functional.py,sha256=4m9JlAnfpXxSkQQIrNOob2VTwin6hPLAqWuhv9aVkAg,697 +pygments/lexers/futhark.py,sha256=mCU2yvloJF5LN7uRE_8rCVqA8HAINny04HXVxjSiM4g,3746 +pygments/lexers/gcodelexer.py,sha256=t8JMWaaDjWZwdGiRGwrpsYtfjKTmibwlRyOLCiJZ8qw,877 +pygments/lexers/gdscript.py,sha256=VNOwd7KcDREvoBAfw66GjxxQygPsHH2akdzRbPNKqoQ,7569 +pygments/lexers/gleam.py,sha256=Jx6LnUpWV9pmjdSYLUMt16c3UamZ-v5X6CcuEhWxf-8,2395 +pygments/lexers/go.py,sha256=ynHVStw2XUJFlUiTS4UqY65fLkvcJBMNzoYtSNSl_BU,3786 +pygments/lexers/grammar_notation.py,sha256=ZGYcwhSvOolw1J7cA9Pc7hVpcIselaCgxwsvAf5yRgc,8046 +pygments/lexers/graph.py,sha256=X_IsdjxXZn5p9Dq4cLXyqwo4C8emcYA8NP6VZ9ZTBaU,4111 +pygments/lexers/graphics.py,sha256=IeruxvEQR1Wsu-YyLpAjkUnisLDzUrk2cd0xXrDCbjw,39148 +pygments/lexers/graphql.py,sha256=RWE_kfVUDEAnv_-SYyOi9IJUks61NvOakmUfWOlorzo,5604 +pygments/lexers/graphviz.py,sha256=DRNWwetEWasJDtiLkdqkKsQHDryJ5W80wUD2PE6oKCs,1937 +pygments/lexers/gsql.py,sha256=pzSj5Pd4vtNLES-pIdxaG0vOVTqUWE9UuOUw0RrY-oc,3993 +pygments/lexers/hare.py,sha256=eCrRPwcswXL-ZHTWNlhE1A05G9rxzS20_IqSddv2M9Y,2652 +pygments/lexers/haskell.py,sha256=sr5gq3U7xt0D2Bi41xdTsBaponX6otP1qPyRMCuxM7E,33323 +pygments/lexers/haxe.py,sha256=_MzQJTWx18kD9bP_sniBshKbtOZDRm4OZnifaOgw_aY,31169 +pygments/lexers/hdl.py,sha256=2jYX3fRWZ9mAZ_QER1WH58SvJaOdYQt0OmOxo3X2JK0,22741 +pygments/lexers/hexdump.py,sha256=VvYp_NTaE-6NUuSG4FMezwcjxbPnAz0fOWMzr3Y2Gm4,3656 +pygments/lexers/html.py,sha256=O6qkpyOylH2n1Zi1qzlmBsIEW31RfvzBn7jKTaAR2LQ,21999 +pygments/lexers/idl.py,sha256=W2QRH1j9LMaNvC5jb6MkP4dLYo_OQU6lQlCAlQveFH4,15452 +pygments/lexers/igor.py,sha256=Sv5EGBJeKy0UOyl6-GF3lBeeoWArVcMDJtQtl2RXRYo,31636 +pygments/lexers/inferno.py,sha256=cjVMyOg6jhJ6nYX333Z6KaCCeS0pNVHv4WTxyFGY8Tg,3138 +pygments/lexers/installers.py,sha256=ioK2JUPSJk9vcsoe3NASvMt-paXd1_Nn3e8WmaqJ7CE,14494 +pygments/lexers/int_fiction.py,sha256=BUjvXilUiuMF-euXEQDzQZs45jn_aLs17eTKcfEptrI,56547 +pygments/lexers/iolang.py,sha256=kMUBUqVvdHlB79ez0pFZPRf0NgiEC2SdtYUwdjnbE90,1908 +pygments/lexers/j.py,sha256=56qUbs1C7wKdZ6-WpNnxHntNqsxWroqiO7keI2yoR1o,4856 +pygments/lexers/javascript.py,sha256=muyCVZQAsXbhT7IHZkoqAkI5myhRaHze6JMAR4zILNs,63246 +pygments/lexers/jmespath.py,sha256=726PDpRr1g2Ky_x5pZORhbXofXxkfr5HXQ8eHMhX5xg,2085 +pygments/lexers/jslt.py,sha256=UOVw1J3uK-hVRWomoFK2jMS_yeuo5iqga-LhXUJI660,3703 +pygments/lexers/json5.py,sha256=GyeZ58AxKkJJiiH7JuPmakDZXropa6bDkFRguJHifqU,2505 +pygments/lexers/jsonnet.py,sha256=_T7Y16sW7nFuAoGFuyWUKCrUyJAC4FTWZVoeSt0mTXk,5639 +pygments/lexers/jsx.py,sha256=O0evHioyudtWLhVmAXRrIfkEd5KRiokgHu8l9_Kn8sM,2696 +pygments/lexers/julia.py,sha256=F0twMg0iLBNDdedsoJG4Rn5LoMciVu9cufyUVdxKQPY,11713 +pygments/lexers/jvm.py,sha256=cf5V7NOLIvB2s7RM9wghESYRCoCDyrGjRuPmY8bfGAw,72936 +pygments/lexers/kuin.py,sha256=n2lMina8SskpfA2KbPYQFwVJOSHgNYu10U5pz5YiajE,11408 +pygments/lexers/kusto.py,sha256=pyUfWLf9Q0Qwqu74DyPAbRh20lkPDLhxnExD1FVl8Z4,3480 +pygments/lexers/ldap.py,sha256=_ya4_InnSIRj-RYjGagWFZJDy1NdgQUocQzVWjiRp68,6554 +pygments/lexers/lean.py,sha256=qP_iwUQ7MJZ1C5s3KYBW8Wu6-uuVj4PAG3a2Ghjp99U,8588 +pygments/lexers/lilypond.py,sha256=o264ovGZFrcx8ZwpKRX-gjUECK1w5T8T0n6LoTjP92U,9755 +pygments/lexers/lisp.py,sha256=a5MGcmtrWTaeEj_5Uv8YMODr3dVV6s1v1_SMBh_Ch9Q,157903 +pygments/lexers/macaulay2.py,sha256=hLNDs1TdubudvqqovYN6a4Ne9JjnU4FvNE6EE2hPn8Q,34139 +pygments/lexers/make.py,sha256=wAV0KRRXTAFMliLfAKXDihQIE2VAVUvWjFZ-yuM7a08,7834 +pygments/lexers/maple.py,sha256=OBjODNLgqanyw522CE7n9thZctWi47Uua2hPBfj7KF4,7963 +pygments/lexers/markup.py,sha256=4TMRfujXvOE9xiWwTiT_fq1lau4dKSoXYac1hWMLYRs,65264 +pygments/lexers/math.py,sha256=Nspl6IZtCyh9egiYUahboWSmy4cJCVZUBxyVnCuF1ag,698 +pygments/lexers/matlab.py,sha256=QYVBdA-IRNcuWggBJNC9lpq3jgX_1w9GQwpv907VfZM,133030 +pygments/lexers/maxima.py,sha256=ha-f-JzGkjggAr5kxmVq7_uJTCcpHstQWnsLHYIhNwU,2718 +pygments/lexers/meson.py,sha256=ZpNVp7lSHwJHEj7pwXlGcy9Lftdc6MMgJiOoY2TnfM8,4345 +pygments/lexers/mime.py,sha256=l5BsFkad3agJ6KDg_IHI6nU0Ao0Y9EcwjYoYP8vQD-g,7585 +pygments/lexers/minecraft.py,sha256=bBbMbqvsVgTfO7CBxf03oGP_qOAgrw06PSzH1QF7txs,13701 +pygments/lexers/mips.py,sha256=QpgBoMPzsk1t90cIF0W_EV-QZ8LwgTapS-fSISxZcFc,4659 +pygments/lexers/ml.py,sha256=shDPgWARTzTEhGDeblV7ZW2j6yZEeBiYqx2z24SDP7A,35393 +pygments/lexers/modeling.py,sha256=2_ucVFy7Z4yoKx90E9-ASI9t4t1LIoeQ3yHarhghz-c,13764 +pygments/lexers/modula2.py,sha256=dXW0KNFu3ETgABsPMsIhdJI0DKF-KWtT831FKsz2VXw,53075 +pygments/lexers/mojo.py,sha256=imOlg8mQCoKwi_rsuMiifZrzsAsSqT2KckCs_MV5uVE,24236 +pygments/lexers/monte.py,sha256=JTx-jSrFKlWRwA1qzhPd647ETGSElwhP4YMlVrfXyX8,6292 +pygments/lexers/mosel.py,sha256=HEYOmZiANhe9VdkTpBt7mhPi_tPGBUkVsnzTh2HVdVI,9300 +pygments/lexers/ncl.py,sha256=RZXkxOK-iHspLX_sFVT0wc5H6v-pa4kKghP79fJGQPQ,64002 +pygments/lexers/nimrod.py,sha256=L8Ww5CUaP-ojywoQZXnjC08m5Be64fBynSf5aWGIaPw,6416 +pygments/lexers/nit.py,sha256=xwu2P49Hu8YxEL7mgpV5WWFw6OYF1iJg9DVnc7H1SXk,2728 +pygments/lexers/nix.py,sha256=M2E-k--F7pneuMAOae9ag4srJsKhWJ1f1fRueO6PtKM,4424 +pygments/lexers/numbair.py,sha256=KZOw96Tj7Gly-f9F8NA-tdGd53SIt5UgbpokZsfnzWM,1761 +pygments/lexers/oberon.py,sha256=uH1FkPeXCfdd0IQ_--S8SHUNaf2dnjiQZjtIT-jxu4A,4216 +pygments/lexers/objective.py,sha256=KME-J0UL2HJYAk2fHNhf7ApQe4XF8GH6qGOvYA1wPS4,23300 +pygments/lexers/ooc.py,sha256=HEjWHdQDVk7tRb_TuEb1_C5qi-peJwwyYBVAhf49MS0,3005 +pygments/lexers/openscad.py,sha256=te2iL8VkfXul_PYXlz8UK3_QtMjdmNgt0YHCYEvdEtM,3703 +pygments/lexers/other.py,sha256=OAlXzsrVDUx4Ma25fyG98U5LaBEHyt-LJZ2IHvMJJWY,1766 +pygments/lexers/parasail.py,sha256=oPcs7fRYNkV0isPSRmw-2cPfMgzTtNPi6bJOm7vq-9o,2722 +pygments/lexers/parsers.py,sha256=g4tVvf36yhT_aH_b737o0o1joojcf-XckD2HdNoNbsQ,26598 +pygments/lexers/pascal.py,sha256=4dewXkwc12f_iiMfdNDqbxb_oqAEX2dCzb5VIZ62V54,30992 +pygments/lexers/pawn.py,sha256=adsa-7sPuPk7mO7lB08auabEpeZdNewVqTBg6FS8mCk,8256 +pygments/lexers/pddl.py,sha256=a8A2keCF9qNQvVZQirgBV5fH4u3cjrta8-eZJ8tsxmY,2992 +pygments/lexers/perl.py,sha256=uYMj6amZPawLf-KjICg9LLU5-ADKzqEYKqi7VXnvm0k,39195 +pygments/lexers/phix.py,sha256=nEWWt5-OoIDapw4IJ_cNL19Th8Ztt6OIQgeuogVed10,23252 +pygments/lexers/php.py,sha256=U6wmxPM-pS-SY1N3qdv0ebUDBxLH6-EFIFcatUvg74w,13171 +pygments/lexers/pointless.py,sha256=gNcuhhOY9cEpIPyGFjMh2DItNB1UD8CmjceXkDRModE,1977 +pygments/lexers/pony.py,sha256=HKqf5AngUOdXN1N1VvSg0jTj3NUAzGBOPdXwSFamvmM,3282 +pygments/lexers/praat.py,sha256=sfVAd7zfRsI-TcCouuYMWlcU8EBxAwJ4wkh0ZuNsZ34,12679 +pygments/lexers/procfile.py,sha256=fhRTtscyMMPcPbvzjxVtXix1mYlpO-4JuWUL7Db4ENI,1158 +pygments/lexers/prolog.py,sha256=4YvPZbAFLWNZMbZhvtIulWib0PQLA_TB93gjkNywFRA,12869 +pygments/lexers/promql.py,sha256=04fS_R6HBWhpKNe3WPp_QXgIdFvJr5p6KxStZYw7yHU,4741 +pygments/lexers/prql.py,sha256=XJhd8dpEWPBIKmQXx_09-z1NyotLXIskpuzX36HykHo,8750 +pygments/lexers/ptx.py,sha256=dEaNSReAjAymIwVM_MnbdY4hd3muK79q200hYwN72cw,4504 +pygments/lexers/python.py,sha256=79A_yJqjVHp_ZeS1rY8Pcc4cwZ_7-zI2WWkeUmZgUrA,54202 +pygments/lexers/q.py,sha256=2CbJYgRu8uz8wOSp5FMr086KCC7IjSRmIODVY0uKrCA,6939 +pygments/lexers/qlik.py,sha256=9b6Q-6jXeeraIRcWtsKsYWCOlBhfmjNzIN49pUvMR-I,3696 +pygments/lexers/qvt.py,sha256=rpT5oD4awEKMs3uBCbNlzY1zCz7AVeqM-BikLTQHo8k,6106 +pygments/lexers/r.py,sha256=hzgUUH9gCsqqwTG9eCt1JVMd9RW0dy6IY_jTvRntF-U,6477 +pygments/lexers/rdf.py,sha256=FM154fB1lxfOpSpeW6bWOVld5kJBgEuYyT9udt-EcK0,16063 +pygments/lexers/rebol.py,sha256=CQ3pMaAz64UMqiQVrYoREnOgjU5QPeF_H3aaHNdVRC8,18262 +pygments/lexers/rego.py,sha256=Yi0G4secTWOL7CUhLLCzJ9gA6SJzDJpPJNij3hXixC0,1751 +pygments/lexers/rell.py,sha256=0gZStI953aFjFMyMVX_UKfwZmzV3uj3Tnl_5F20BPx0,2487 +pygments/lexers/resource.py,sha256=RDEY7iSv2hgQ3V-I1DeOKVOumNKgFqbV2Me_9Y21o10,2930 +pygments/lexers/ride.py,sha256=1zg7kGYPKRIekyuZY3f6OVqRlXyrX-sR-tHEX1M9nz4,5038 +pygments/lexers/rita.py,sha256=fCNElPik6dDIZzGd96kKgdlY4Qqp_zVI87waTBHMBfg,1130 +pygments/lexers/rnc.py,sha256=PNfnnTlnZjNvvG33BNMCqPzdA9LZEYvDsGoaDd_2mn8,1975 +pygments/lexers/roboconf.py,sha256=l6BeJIS-ZAUb3zc5GEuVIb0edrRpPFmKdffEBx74Zxg,2077 +pygments/lexers/robotframework.py,sha256=8s1U7GldhzRPGR7d8_rMSG0ZJz9ZOm-LGqBTGILwt2U,18451 +pygments/lexers/ruby.py,sha256=BxndG-3gLg7wEGvICiNDAMygQR9Qs0xrmq7DwfoLvMs,22756 +pygments/lexers/rust.py,sha256=7qD3KGVir-tUWf_bPBt4EX8l9aIWe8TblMCFI75oorc,8263 +pygments/lexers/sas.py,sha256=9hmGYhmKo7ri9oCmZdsoEFMz6z3AwKD4LcTUIn52lYU,9459 +pygments/lexers/savi.py,sha256=HCllgBe3rzP_7-2b1hSUEbE2dYz08Iysxc6Dcxj3f2c,4881 +pygments/lexers/scdoc.py,sha256=9n64S-bO1dI4x-2Kzh2jrDM6gEmB5YI7ch6ndPB4-cw,2527 +pygments/lexers/scripting.py,sha256=c-i_cMFhYEHQZZbLgJn0jyiD52vWeZVxUK6wFV2Fib4,82959 +pygments/lexers/sgf.py,sha256=ya_sG4TOvDWwzEM6cIi40XSbhkTuHCqvo-uHnNYfi5Q,1988 +pygments/lexers/shell.py,sha256=si6MAn6S7pHfyX65NkwQdjxtLtEu1M4l7K2lUHvyzWw,36384 +pygments/lexers/sieve.py,sha256=CXR9S1nGeVTln2u5R600hxgkAmJFVVTTTBh_k3aw-pk,2517 +pygments/lexers/slash.py,sha256=molh5sNG8UtheHffQGjjHLxVtyg2oCp4B8cvgDyf004,8487 +pygments/lexers/smalltalk.py,sha256=q52NHegl3pjn1jMkJWA9J0nOA_A5VzUULZAPgb-6uNU,7207 +pygments/lexers/smithy.py,sha256=hqEImo4B-i0hddNo1r0k5eHLOlArtRW4W5PsLBdllk4,2662 +pygments/lexers/smv.py,sha256=D51In9Qr2nWFicYAOX_bJpeFgmJ3BUkEXnvXPmA04D4,2808 +pygments/lexers/snobol.py,sha256=BX_1VPUZi-ckKCYlF2sttQprF2bLUHOi8blhInFHv9s,2781 +pygments/lexers/solidity.py,sha256=pz0DZ0xiHwufhbQ9hdCPrHMjA7NMsa6hxZ1nO-A4Y3E,3166 +pygments/lexers/soong.py,sha256=TVqBJzJxLwCEDmb5Aix2_AVuZILYyPWcAzFbzINNQAM,2342 +pygments/lexers/sophia.py,sha256=MAkWLYxhHJNcc3UUzWD3VDLzVRbf6IAd2uz4q7DY3wI,3379 +pygments/lexers/special.py,sha256=8gpTiLICFNwIcahm282mi8Nqi_6gwYMsAfjJIM2Bq6k,3588 +pygments/lexers/spice.py,sha256=UcrjK2KJDDKSEgEOeLXTayqRwirWdsywrfH9MqwPPIY,2801 +pygments/lexers/sql.py,sha256=zz_TFZtf5R29cvkBFG1PDJ-0KKrY7FjA-RDky-gfNlk,41656 +pygments/lexers/srcinfo.py,sha256=MHj02VB7WP3AcoOz1WVYAqNM_3oFLjEaAda075kXVBU,1749 +pygments/lexers/stata.py,sha256=KojmkxWHlEk-HvmxI313a52nzH_JHNqfv6r3jJoFDy8,6418 +pygments/lexers/supercollider.py,sha256=H-qwP6sUaotemsXxuugpui6KnU3jtIdDfS3bGt3Jvh4,3700 +pygments/lexers/tablegen.py,sha256=ryuzw-ArLdvlY55YJBn6ebOzDo3L8UnbR5xcZB8xMso,4012 +pygments/lexers/tact.py,sha256=YbOWYNp302ZBPM5affzM4-CudJ4xRZyJb-_vSDCozCo,10812 +pygments/lexers/tal.py,sha256=xZYmhv8mBr-A3oeoFkcSP7nDbX1r21kKOnhsLTtL3bE,2907 +pygments/lexers/tcl.py,sha256=MXMAo2wCZeq5oR7VzsNbamawun_vMOk2gOEDnGk6yOc,5515 +pygments/lexers/teal.py,sha256=pgpPi9xWPumSr-heSpGBGDcU5s3qN0aFUj2maHLE3MM,3525 +pygments/lexers/templates.py,sha256=ndJMdue33qQ_thsortAra1fm_-szMN40S5bZXBLY54w,75734 +pygments/lexers/teraterm.py,sha256=YCdvILRq-FdOJ_HfiIkYVToH3c6I2QRbQoH17Q49Hx8,10045 +pygments/lexers/testing.py,sha256=GF5SpanGwjgKKoYari32SdtdOWWXXje8xVX5-ZzTjLg,10813 +pygments/lexers/text.py,sha256=yq6mOLz3PizKNMm4_Y8UHn9vEeBfqEY5W-3M7dk1jYs,1071 +pygments/lexers/textedit.py,sha256=V-Ijh0eULTWtx3S_6vT7JcyjQO-CNirpAbEpkczLGCE,7763 +pygments/lexers/textfmts.py,sha256=WSiJDzNKCcOsBoTU4NCYU33ti5ZHGi0Px29eGjT6pB0,15527 +pygments/lexers/theorem.py,sha256=c-eg6tIYWUbxSYgrOtLZfFxro4g91JcAIwrIJGLPooU,17903 +pygments/lexers/thingsdb.py,sha256=FNKjArS1vadHEN3ZXaZdDLqvcG59GbF6Ak5M8RMsTLU,6257 +pygments/lexers/tlb.py,sha256=vWsFq_MrIrzgG2q-qdzpcoNiv9KkNow4x9ebs9_HAwA,1453 +pygments/lexers/tls.py,sha256=GZ1lvZ8PUk1-LThq1KuiqSEnMqJVqiHo_A-16aKuJOc,1543 +pygments/lexers/tnt.py,sha256=MNYTfkix0x5L1pDV8V8zGLS7GU8z7bt-wpiBap2W09U,10459 +pygments/lexers/trafficscript.py,sha256=1Tawom6bKJM-u1kYey21xFaHep3D9kl62rV3RaHSvcI,1509 +pygments/lexers/typoscript.py,sha256=dWAuOYYk0X4xE_fxKgcXz-sMYlG2wxQR4m_d7PW2PYs,8335 +pygments/lexers/typst.py,sha256=ZtV37NCQCoSqEKrAhffuZtUp1kDahKQx8PxMLrR2G6k,7170 +pygments/lexers/ul4.py,sha256=joM-US0-2BEWRHHK6eyZ-Y--306YgbMrevMY0hg8QTY,10502 +pygments/lexers/unicon.py,sha256=9D-GilKOqIEaowGHmdvHRjhqyigRk320ARAr-G_snew,18628 +pygments/lexers/urbi.py,sha256=j87k6fe60kZdbJ6AJA4g9E0no1EP0bxJY06UVP0YgnU,6085 +pygments/lexers/usd.py,sha256=tP9kHPZUJcE_flPvKEsF7wTck4FuQW75S8ELh4IYJEI,3307 +pygments/lexers/varnish.py,sha256=INCtYbol1yV_NXMh51CMDZDacEHgiB3SmR3H5uW-Fkc,7476 +pygments/lexers/verification.py,sha256=QISJmmz7cmYaXZphW5so4PfdKJo0rdZWuj_de64mafk,3937 +pygments/lexers/verifpal.py,sha256=0iZTQWawF9f7lFZnFNPTwYRiu6L_25PtEI83tuCAmKY,2664 +pygments/lexers/vip.py,sha256=pM5xFoeu-2GPWqAVzZJe6xPYr6WJPlozVI5084SHQ80,5714 +pygments/lexers/vyper.py,sha256=rw2yD9c2L7yPtXqKPd0RdxJSZK-aJbW5jEa0gHfKEO0,5618 +pygments/lexers/web.py,sha256=YMrxoHlKlQwfFJlae9_T9F1Vp03YA7IWsmgqLzh6pgI,916 +pygments/lexers/webassembly.py,sha256=Y48VdBp8b4SI10PEt8biOm-gteYgmNc_bRR-KvulfnQ,5701 +pygments/lexers/webidl.py,sha256=MQMaZFskluB29lMGpiGP2f6fzoja4vV8FtoCGFQH6fM,10519 +pygments/lexers/webmisc.py,sha256=GnSTSHfsAUGy5DYF_qwFmMsyabnzTstaEG96Fub53fk,40567 +pygments/lexers/wgsl.py,sha256=vgDtY_q42TGRvcsyS7viSadU8D3eHvatfoCNNnEP0zE,11883 +pygments/lexers/whiley.py,sha256=MC2V7o1s7LIoLy0Fk-i8OymBoN0dpK2BPjIrdQwsnVk,4020 +pygments/lexers/wowtoc.py,sha256=XnLSSX7p_RkwZIzlmYXO873d4byX9-A3dfvzNL5Eocw,4079 +pygments/lexers/wren.py,sha256=2lhzwpS27xW6nERY_NeODgNN9X6mB7vl3odXSPngqY8,3232 +pygments/lexers/x10.py,sha256=77pAyohtv9PFwPvJPGgq-Vw8tEXo9Sny1MnTchFpTTI,1946 +pygments/lexers/xorg.py,sha256=LUKU91t1Opc3W_F64UGzx0nB-HqKlN3Cn_6tFzt5IYw,928 +pygments/lexers/yang.py,sha256=yIvwteHWBWL2-8zZscnCPJOnq3rMOXh5lxJujywA_Zo,4502 +pygments/lexers/yara.py,sha256=_ISzhko7v9AhJ-1cWINZSyG3my2vJOyFSHxSjm4VdTY,2430 +pygments/lexers/zig.py,sha256=q0tuplpRFAUSS29hg_XiRH22Ctve2EFAT5xo7TNAlrk,3975 +pygments/modeline.py,sha256=me8g5rySidvPtMBOrDy2O1sMqZd02lBHMY5KDZefUws,1008 +pygments/plugin.py,sha256=P6zIw-vkSQ0k1WKHrzbBTjpwlCZPbR-6Qe_dO9Bjdu0,1928 +pygments/regexopt.py,sha256=d2hTvazlow5zzZIOCVnfeEG2CY0GrY_igH1kCSSf7ow,3308 +pygments/scanner.py,sha256=DtoLi1pOKpNu-6jiakJpSQLUi_ep29SQQhv7oAwYWME,3095 +pygments/sphinxext.py,sha256=qmiWv5b7qq6bNUT2Y2wZejV2ImQmvtzGJFg6LcGENl0,7901 +pygments/style.py,sha256=Hrie373bgWU81ZNwVjZM-GNBoxBxVe1W1Tr3MzJLdkY,6411 +pygments/styles/__init__.py,sha256=2vgGKnbyt0nf_CSDEtMhHxppPSAPr2jafzl5FiXFuGs,2009 +pygments/styles/__pycache__/__init__.cpython-312.pyc,, +pygments/styles/__pycache__/_mapping.cpython-312.pyc,, +pygments/styles/__pycache__/abap.cpython-312.pyc,, +pygments/styles/__pycache__/algol.cpython-312.pyc,, +pygments/styles/__pycache__/algol_nu.cpython-312.pyc,, +pygments/styles/__pycache__/arduino.cpython-312.pyc,, +pygments/styles/__pycache__/autumn.cpython-312.pyc,, +pygments/styles/__pycache__/borland.cpython-312.pyc,, +pygments/styles/__pycache__/bw.cpython-312.pyc,, +pygments/styles/__pycache__/coffee.cpython-312.pyc,, +pygments/styles/__pycache__/colorful.cpython-312.pyc,, +pygments/styles/__pycache__/default.cpython-312.pyc,, +pygments/styles/__pycache__/dracula.cpython-312.pyc,, +pygments/styles/__pycache__/emacs.cpython-312.pyc,, +pygments/styles/__pycache__/friendly.cpython-312.pyc,, +pygments/styles/__pycache__/friendly_grayscale.cpython-312.pyc,, +pygments/styles/__pycache__/fruity.cpython-312.pyc,, +pygments/styles/__pycache__/gh_dark.cpython-312.pyc,, +pygments/styles/__pycache__/gruvbox.cpython-312.pyc,, +pygments/styles/__pycache__/igor.cpython-312.pyc,, +pygments/styles/__pycache__/inkpot.cpython-312.pyc,, +pygments/styles/__pycache__/lightbulb.cpython-312.pyc,, +pygments/styles/__pycache__/lilypond.cpython-312.pyc,, +pygments/styles/__pycache__/lovelace.cpython-312.pyc,, +pygments/styles/__pycache__/manni.cpython-312.pyc,, +pygments/styles/__pycache__/material.cpython-312.pyc,, +pygments/styles/__pycache__/monokai.cpython-312.pyc,, +pygments/styles/__pycache__/murphy.cpython-312.pyc,, +pygments/styles/__pycache__/native.cpython-312.pyc,, +pygments/styles/__pycache__/nord.cpython-312.pyc,, +pygments/styles/__pycache__/onedark.cpython-312.pyc,, +pygments/styles/__pycache__/paraiso_dark.cpython-312.pyc,, +pygments/styles/__pycache__/paraiso_light.cpython-312.pyc,, +pygments/styles/__pycache__/pastie.cpython-312.pyc,, +pygments/styles/__pycache__/perldoc.cpython-312.pyc,, +pygments/styles/__pycache__/rainbow_dash.cpython-312.pyc,, +pygments/styles/__pycache__/rrt.cpython-312.pyc,, +pygments/styles/__pycache__/sas.cpython-312.pyc,, +pygments/styles/__pycache__/solarized.cpython-312.pyc,, +pygments/styles/__pycache__/staroffice.cpython-312.pyc,, +pygments/styles/__pycache__/stata_dark.cpython-312.pyc,, +pygments/styles/__pycache__/stata_light.cpython-312.pyc,, +pygments/styles/__pycache__/tango.cpython-312.pyc,, +pygments/styles/__pycache__/trac.cpython-312.pyc,, +pygments/styles/__pycache__/vim.cpython-312.pyc,, +pygments/styles/__pycache__/vs.cpython-312.pyc,, +pygments/styles/__pycache__/xcode.cpython-312.pyc,, +pygments/styles/__pycache__/zenburn.cpython-312.pyc,, +pygments/styles/_mapping.py,sha256=6lovFUE29tz6EsV3XYY4hgozJ7q1JL7cfO3UOlgnS8w,3312 +pygments/styles/abap.py,sha256=T7Ad121Kjz8ZbuphyhulULwXyvXubSS6Czc0MDOFiJ8,752 +pygments/styles/algol.py,sha256=6v6ZXLxPJsm_1qPqOu5ihAuWhVk644NdPXPcZqiOTTI,2265 +pygments/styles/algol_nu.py,sha256=581Lf5db303g7zDb7z-VlvGlFNpbXMIlAnrRBxtjGts,2286 +pygments/styles/arduino.py,sha256=oFF5gjfbQNBigKxFhC7giUIHWl0ieCshhsm9UxCIzqg,4560 +pygments/styles/autumn.py,sha256=P9IX5utjDvdJgevM0kNGwE-wvVysRh1lgMkjO1dhDDE,2198 +pygments/styles/borland.py,sha256=V8qTD8SrS0wLohp2udIirlijbHfPoIlWZkYpqxSdkSw,1614 +pygments/styles/bw.py,sha256=e4Fo6Kyax2aRhqKsjsqvi2CW6lkU9QnjKoPrESZAj-I,1409 +pygments/styles/coffee.py,sha256=0jxdctCEKbR1AqzA59fFQK4sVBdErrRkGCUn6fPF4sg,2311 +pygments/styles/colorful.py,sha256=opkfOcjFTtDn9Fty6ogM7okFMB6bA0iR0VtTdBPE9NE,2835 +pygments/styles/default.py,sha256=WFLDucKJS_ac5Jqutmglbz4ITc4jTTQEOnqLIMpSttA,2591 +pygments/styles/dracula.py,sha256=H-EM1WM3Ixd2OBY9bzoObKl4IzdmhirfDVFbJ9Wsi0Q,2185 +pygments/styles/emacs.py,sha256=iEYWPgQrDQk4RdDf8-g7j4mtoFRzo2xcKvZE1ys9mJU,2538 +pygments/styles/friendly.py,sha256=S43XMczW53tKftmarEL4-nTroPVdN6k8fbNrINzR_QU,2607 +pygments/styles/friendly_grayscale.py,sha256=el2E804DEkKZ6ApXt3N5FTmHAkIhqkJj-omhE1E8mdE,2831 +pygments/styles/fruity.py,sha256=mrNDixp39QIoitRBo-yMOr8tnYLAwGd0EIMw2A6Gd8g,1327 +pygments/styles/gh_dark.py,sha256=yb4EOBAYsQ9g5IGEOqyZ5LKI-MUbwz32l9WB5EcMLaA,3593 +pygments/styles/gruvbox.py,sha256=_QZtRq9y1s3NYADHqek-DXELQqueOnVDXvWJ_1aTGgo,3390 +pygments/styles/igor.py,sha256=QO_M5-Z1xIxVnusEU4LZr4VT--nTERVt-cpfbnOv4yk,740 +pygments/styles/inkpot.py,sha256=bJvNWikqnAWLjx2AH4t_tW4jR0TsrUyVFXFVPAfOgkM,2407 +pygments/styles/lightbulb.py,sha256=CnDv1mF1X5k1msVvRS_diR4tkp06E_jg0tK9cdHEFCQ,3175 +pygments/styles/lilypond.py,sha256=gSCiazPPWfR_9-BV5BYHe6-2gSor4vOHnz3nyFQI2QA,2069 +pygments/styles/lovelace.py,sha256=vZ-S9tS-QUOwfA0RHYBhSTRE3h1ZU-bZnuMW7O6ksyw,3181 +pygments/styles/manni.py,sha256=dpsJC1Zees0e9IcWirqWuSzQxwD0jIbjhFquM23dmWM,2446 +pygments/styles/material.py,sha256=eyyAaJgp1Fnjl4-_D39_l1wH5CW4W0VWScQ3Q0jtAgU,4204 +pygments/styles/monokai.py,sha256=3CwJJm_YVGibzFi8nhK_b5d4cNzgV8z-L6RzC1UVA7Q,5187 +pygments/styles/murphy.py,sha256=HsPd80nObb3Ov3-7yJQbnJr3z9mCyz30Pjn_nkWkMYo,2808 +pygments/styles/native.py,sha256=oazEJUbuaHqqOZR5vx-jIhTPIw7dC6QZnKxW-xLr7fs,2046 +pygments/styles/nord.py,sha256=t7fZj04LUgsLaHpio66FqOGMKb7a2VC3teUlmInPfRg,5394 +pygments/styles/onedark.py,sha256=UXrMzVvA9OVP7OhCQ5gL0CaZrk5jo_I84HJm7LEN5GQ,2126 +pygments/styles/paraiso_dark.py,sha256=rx5_j4gnZRy2j-3hAhFmWpnjnd-Vfz3Z7fcwEXRUGm0,5665 +pygments/styles/paraiso_light.py,sha256=iERjbYRemqgsisv_9TjanBig4bJ_-80xRz5B5puhMc0,5671 +pygments/styles/pastie.py,sha256=mnKebFiJ2do9A_wTPq4tKtgw-TQqpb0lP3kNbemQ2to,2528 +pygments/styles/perldoc.py,sha256=-e1T4QBEQNE_l17RYW-WBowNprg2uL_hFjhSDcaMiDM,2233 +pygments/styles/rainbow_dash.py,sha256=YP2HPVXJLmOy8Yz8p8u5GbkAyuKxLlNmu6ITZBMa1Mc,2393 +pygments/styles/rrt.py,sha256=vs4pnWwmMIQR0eGczp06AYazsYiR0KFcRiRUzSf3rXc,1295 +pygments/styles/sas.py,sha256=ER-JjZgU2wEJWk5K06iHAFPjVYD1iMkvjjE0rlNOt2Y,1443 +pygments/styles/solarized.py,sha256=VdtLYhQ7xDj_iTHP_Ekr6rAdMcE3fRSrglngVSal63M,4250 +pygments/styles/staroffice.py,sha256=peExkIBqPkRhqXFpW99n49t6pMwehI_pDYVUmQwkgdA,834 +pygments/styles/stata_dark.py,sha256=N6imWykqHoT5drOn-sVRUzt1AZMKG3HHU-Q7LKz2d50,1260 +pygments/styles/stata_light.py,sha256=YW7ih4teXoEuUHImOK2rGSCVxfnj43tlcKPXUsrDkRg,1292 +pygments/styles/tango.py,sha256=Af0WVTxpH_cUdHr_ua69JDijTLI5aG1RmBTF7o2KTgk,7140 +pygments/styles/trac.py,sha256=-h5iU-LjSmt-dQ7y5jXwJ5LQ9ABR5QD5RlPRjhnzmv8,1984 +pygments/styles/vim.py,sha256=dWqCVC2YT45dLSpMTwLBPkKYgRsT1-cFf7ZHoc2PQpA,2022 +pygments/styles/vs.py,sha256=7HJehhtiHE5nibjC3r3X-aYHBm5BccL33LgO_4anEiU,1133 +pygments/styles/xcode.py,sha256=HmB6aPkxvmo4za5DijhLb5AXSyv6UbWnwOUoRUNBaZA,1507 +pygments/styles/zenburn.py,sha256=Ax7iBbMzvVQNTpT3YdMwCEkL-nIXnpr60aVrR4Lpa6o,2206 +pygments/token.py,sha256=DVil5T2ltHkTgQTUYy1dMtgyigjavCs1ypE4Qf2CrGo,6229 +pygments/unistring.py,sha256=Z4w4HfOVUhueCURRkfhAqQ2b-69UzkhY4xuHevYEy5g,63211 +pygments/util.py,sha256=zk935tJSpwSA9zxNmV1TuhcEP0MfboXiRZSGQnLPEqk,10046 diff --git a/local_packages/pygments-2.20.0.dist-info/WHEEL b/local_packages/pygments-2.20.0.dist-info/WHEEL new file mode 100644 index 0000000..b1b94fd --- /dev/null +++ b/local_packages/pygments-2.20.0.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: hatchling 1.29.0 +Root-Is-Purelib: true +Tag: py3-none-any diff --git a/local_packages/pygments-2.20.0.dist-info/entry_points.txt b/local_packages/pygments-2.20.0.dist-info/entry_points.txt new file mode 100644 index 0000000..15498e3 --- /dev/null +++ b/local_packages/pygments-2.20.0.dist-info/entry_points.txt @@ -0,0 +1,2 @@ +[console_scripts] +pygmentize = pygments.cmdline:main diff --git a/local_packages/pygments-2.20.0.dist-info/licenses/AUTHORS b/local_packages/pygments-2.20.0.dist-info/licenses/AUTHORS new file mode 100644 index 0000000..909089d --- /dev/null +++ b/local_packages/pygments-2.20.0.dist-info/licenses/AUTHORS @@ -0,0 +1,292 @@ +Pygments is written and maintained by Georg Brandl . + +Major developers are Tim Hatch and Armin Ronacher +. + +Other contributors, listed alphabetically, are: + +* Sam Aaron -- Ioke lexer +* Jean Abou Samra -- LilyPond lexer +* João Abecasis -- JSLT lexer +* Ali Afshar -- image formatter +* Thomas Aglassinger -- Easytrieve, JCL, Rexx, Transact-SQL and VBScript + lexers +* Maxence Ahlouche -- PostgreSQL Explain lexer +* Muthiah Annamalai -- Ezhil lexer +* Nikolay Antipov -- OpenSCAD lexer +* Kumar Appaiah -- Debian control lexer +* Andreas Amann -- AppleScript lexer +* Timothy Armstrong -- Dart lexer fixes +* Jeffrey Arnold -- R/S, Rd, BUGS, Jags, and Stan lexers +* Eiríkr Åsheim -- Uxntal lexer +* Jeremy Ashkenas -- CoffeeScript lexer +* José Joaquín Atria -- Praat lexer +* Stefan Matthias Aust -- Smalltalk lexer +* Lucas Bajolet -- Nit lexer +* Ben Bangert -- Mako lexers +* Max Battcher -- Darcs patch lexer +* Thomas Baruchel -- APL lexer +* Tim Baumann -- (Literate) Agda lexer +* Paul Baumgart, 280 North, Inc. -- Objective-J lexer +* Michael Bayer -- Myghty lexers +* Thomas Beale -- Archetype lexers +* John Benediktsson -- Factor lexer +* David Benjamin, Google LLC -- TLS lexer +* Trevor Bergeron -- mIRC formatter +* Vincent Bernat -- LessCSS lexer +* Christopher Bertels -- Fancy lexer +* Sébastien Bigaret -- QVT Operational lexer +* Jarrett Billingsley -- MiniD lexer +* Adam Blinkinsop -- Haskell, Redcode lexers +* Stéphane Blondon -- Procfile, SGF and Sieve lexers +* Frits van Bommel -- assembler lexers +* Pierre Bourdon -- bugfixes +* Martijn Braam -- Kernel log lexer, BARE lexer +* JD Browne, Google LLC -- GoogleSQL lexer +* Matthias Bussonnier -- ANSI style handling for terminal-256 formatter +* chebee7i -- Python traceback lexer improvements +* Hiram Chirino -- Scaml and Jade lexers +* Mauricio Caceres -- SAS and Stata lexers. +* Michael Camilleri, John Gabriele, sogaiu -- Janet lexer +* Daren Chandisingh -- Gleam lexer +* Ian Cooper -- VGL lexer +* David Corbett -- Inform, Jasmin, JSGF, Snowball, and TADS 3 lexers +* Leaf Corcoran -- MoonScript lexer +* Fraser Cormack -- TableGen lexer +* Gabriel Corona -- ASN.1 lexer +* Christopher Creutzig -- MuPAD lexer +* Daniël W. Crompton -- Pike lexer +* Pete Curry -- bugfixes +* Bryan Davis -- EBNF lexer +* Bruno Deferrari -- Shen lexer +* Walter Dörwald -- UL4 lexer +* Luke Drummond -- Meson lexer +* Giedrius Dubinskas -- HTML formatter improvements +* Owen Durni -- Haxe lexer +* Alexander Dutton, Oxford University Computing Services -- SPARQL lexer +* James Edwards -- Terraform lexer +* Nick Efford -- Python 3 lexer +* Sven Efftinge -- Xtend lexer +* Artem Egorkine -- terminal256 formatter +* Matthew Fernandez -- CAmkES lexer +* Paweł Fertyk -- GDScript lexer, HTML formatter improvements +* Michael Ficarra -- CPSA lexer +* James H. Fisher -- PostScript lexer +* Amanda Fitch, Google LLC -- GoogleSQL lexer +* William S. Fulton -- SWIG lexer +* Carlos Galdino -- Elixir and Elixir Console lexers +* Michael Galloy -- IDL lexer +* Naveen Garg -- Autohotkey lexer +* Simon Garnotel -- FreeFem++ lexer +* Laurent Gautier -- R/S lexer +* Alex Gaynor -- PyPy log lexer +* Richard Gerkin -- Igor Pro lexer +* Alain Gilbert -- TypeScript lexer +* Alex Gilding -- BlitzBasic lexer +* GitHub, Inc -- DASM16, Augeas, TOML, and Slash lexers +* Bertrand Goetzmann -- Groovy lexer +* Krzysiek Goj -- Scala lexer +* Rostyslav Golda -- FloScript lexer +* Andrey Golovizin -- BibTeX lexers +* Matt Good -- Genshi, Cheetah lexers +* Michał Górny -- vim modeline support +* Alex Gosse -- TrafficScript lexer +* Patrick Gotthardt -- PHP namespaces support +* Hubert Gruniaux -- C and C++ lexer improvements +* Olivier Guibe -- Asymptote lexer +* Phil Hagelberg -- Fennel lexer +* Florian Hahn -- Boogie lexer +* Martin Harriman -- SNOBOL lexer +* Matthew Harrison -- SVG formatter +* Steven Hazel -- Tcl lexer +* Dan Michael Heggø -- Turtle lexer +* Aslak Hellesøy -- Gherkin lexer +* Greg Hendershott -- Racket lexer +* Justin Hendrick -- ParaSail lexer +* Jordi Gutiérrez Hermoso -- Octave lexer +* David Hess, Fish Software, Inc. -- Objective-J lexer +* Ken Hilton -- Typographic Number Theory and Arrow lexers +* Varun Hiremath -- Debian control lexer +* Rob Hoelz -- Perl 6 lexer +* Doug Hogan -- Mscgen lexer +* Ben Hollis -- Mason lexer +* Max Horn -- GAP lexer +* Fred Hornsey -- OMG IDL Lexer +* Alastair Houghton -- Lexer inheritance facility +* Tim Howard -- BlitzMax lexer +* Dustin Howett -- Logos lexer +* Ivan Inozemtsev -- Fantom lexer +* Hiroaki Itoh -- Shell console rewrite, Lexers for PowerShell session, + MSDOS session, BC, WDiff +* Brian R. Jackson -- Tea lexer +* Alex Jeffery, ChromaWay AB -- Rell lexer +* Christian Jann -- ShellSession lexer +* Jonas Camillus Jeppesen -- Line numbers and line highlighting for + RTF-formatter +* Dennis Kaarsemaker -- sources.list lexer +* Dmitri Kabak -- Inferno Limbo lexer +* Igor Kalnitsky -- vhdl lexer +* Colin Kennedy - USD lexer +* Alexander Kit -- MaskJS lexer +* Pekka Klärck -- Robot Framework lexer +* Gerwin Klein -- Isabelle lexer +* Eric Knibbe -- Lasso lexer +* Stepan Koltsov -- Clay lexer +* Oliver Kopp - Friendly grayscale style +* Adam Koprowski -- Opa lexer +* Benjamin Kowarsch -- Modula-2 lexer +* Domen Kožar -- Nix lexer +* Oleh Krekel -- Emacs Lisp lexer +* Alexander Kriegisch -- Kconfig and AspectJ lexers +* Marek Kubica -- Scheme lexer +* Jochen Kupperschmidt -- Markdown processor +* Gerd Kurzbach -- Modelica lexer +* Jon Larimer, Google Inc. -- Smali lexer +* Olov Lassus -- Dart lexer +* Matt Layman -- TAP lexer +* Dan Lazin, Google LLC -- GoogleSQL lexer +* Kristian Lyngstøl -- Varnish lexers +* Sylvestre Ledru -- Scilab lexer +* Chee Sing Lee -- Flatline lexer +* Mark Lee -- Vala lexer +* Thomas Linder Puls -- Visual Prolog lexer +* Pete Lomax -- Phix lexer +* Valentin Lorentz -- C++ lexer improvements +* Ben Mabey -- Gherkin lexer +* Angus MacArthur -- QML lexer +* Louis Mandel -- X10 lexer +* Louis Marchand -- Eiffel lexer +* Simone Margaritelli -- Hybris lexer +* Tim Martin - World of Warcraft TOC lexer +* Kirk McDonald -- D lexer +* Gordon McGregor -- SystemVerilog lexer +* Stephen McKamey -- Duel/JBST lexer +* Brian McKenna -- F# lexer +* Charles McLaughlin -- Puppet lexer +* Kurt McKee -- Tera Term macro lexer, PostgreSQL updates, MySQL overhaul, JSON lexer +* Joe Eli McIlvain -- Savi lexer +* Lukas Meuser -- BBCode formatter, Lua lexer +* Cat Miller -- Pig lexer +* Paul Miller -- LiveScript lexer +* Hong Minhee -- HTTP lexer +* Michael Mior -- Awk lexer +* Bruce Mitchener -- Dylan lexer rewrite +* Reuben Morais -- SourcePawn lexer +* Jon Morton -- Rust lexer +* Paulo Moura -- Logtalk lexer +* Mher Movsisyan -- DTD lexer +* Dejan Muhamedagic -- Crmsh lexer +* Adrien Nayrat -- PostgreSQL Explain lexer +* Ana Nelson -- Ragel, ANTLR, R console lexers +* David Neto, Google LLC -- WebGPU Shading Language lexer +* Kurt Neufeld -- Markdown lexer +* Nam T. Nguyen -- Monokai style +* Jesper Noehr -- HTML formatter "anchorlinenos" +* Mike Nolta -- Julia lexer +* Avery Nortonsmith -- Pointless lexer +* Jonas Obrist -- BBCode lexer +* Edward O'Callaghan -- Cryptol lexer +* David Oliva -- Rebol lexer +* Pat Pannuto -- nesC lexer +* Jon Parise -- Protocol buffers and Thrift lexers +* Benjamin Peterson -- Test suite refactoring +* Ronny Pfannschmidt -- BBCode lexer +* Dominik Picheta -- Nimrod lexer +* Andrew Pinkham -- RTF Formatter Refactoring +* Clément Prévost -- UrbiScript lexer +* Tanner Prynn -- cmdline -x option and loading lexers from files +* Oleh Prypin -- Crystal lexer (based on Ruby lexer) +* Nick Psaris -- K and Q lexers +* Xidorn Quan -- Web IDL lexer +* Elias Rabel -- Fortran fixed form lexer +* raichoo -- Idris lexer +* Daniel Ramirez -- GDScript lexer +* Kashif Rasul -- CUDA lexer +* Nathan Reed -- HLSL lexer +* Justin Reidy -- MXML lexer +* Jonathon Reinhart, Google LLC -- Soong lexer +* Norman Richards -- JSON lexer +* Corey Richardson -- Rust lexer updates +* Fabrizio Riguzzi -- cplint leder +* Lubomir Rintel -- GoodData MAQL and CL lexers +* Andre Roberge -- Tango style +* Georg Rollinger -- HSAIL lexer +* Michiel Roos -- TypoScript lexer +* Konrad Rudolph -- LaTeX formatter enhancements +* Mario Ruggier -- Evoque lexers +* Miikka Salminen -- Lovelace style, Hexdump lexer, lexer enhancements +* Stou Sandalski -- NumPy, FORTRAN, tcsh and XSLT lexers +* Matteo Sasso -- Common Lisp lexer +* Joe Schafer -- Ada lexer +* Max Schillinger -- TiddlyWiki5 lexer +* Andrew Schmidt -- X++ lexer +* Ken Schutte -- Matlab lexers +* René Schwaiger -- Rainbow Dash style +* Sebastian Schweizer -- Whiley lexer +* Tassilo Schweyer -- Io, MOOCode lexers +* Pablo Seminario -- PromQL lexer +* Ted Shaw -- AutoIt lexer +* Joerg Sieker -- ABAP lexer +* Robert Simmons -- Standard ML lexer +* Kirill Simonov -- YAML lexer +* Corbin Simpson -- Monte lexer +* Ville Skyttä -- ASCII armored lexer +* Alexander Smishlajev -- Visual FoxPro lexer +* Steve Spigarelli -- XQuery lexer +* Jerome St-Louis -- eC lexer +* Camil Staps -- Clean and NuSMV lexers; Solarized style +* James Strachan -- Kotlin lexer +* Tom Stuart -- Treetop lexer +* Colin Sullivan -- SuperCollider lexer +* Ben Swift -- Extempore lexer +* tatt61880 -- Kuin lexer +* Edoardo Tenani -- Arduino lexer +* Tiberius Teng -- default style overhaul +* Jeremy Thurgood -- Erlang, Squid config lexers +* Brian Tiffin -- OpenCOBOL lexer +* Bob Tolbert -- Hy lexer +* Doug Torrance -- Macaulay2 lexer +* Matthias Trute -- Forth lexer +* Tuoa Spi T4 -- Bdd lexer +* Erick Tryzelaar -- Felix lexer +* Alexander Udalov -- Kotlin lexer improvements +* Thomas Van Doren -- Chapel lexer +* Dave Van Ee -- Uxntal lexer updates +* Daniele Varrazzo -- PostgreSQL lexers +* Abe Voelker -- OpenEdge ABL lexer +* Pepijn de Vos -- HTML formatter CTags support +* Matthias Vallentin -- Bro lexer +* Benoît Vinot -- AMPL lexer +* Linh Vu Hong -- RSL lexer +* Taavi Väänänen -- Debian control, PHP lexers +* Immanuel Washington -- Smithy lexer +* Nathan Weizenbaum -- Haml and Sass lexers +* Nathan Whetsell -- Csound lexers +* Dietmar Winkler -- Modelica lexer +* Nils Winter -- Smalltalk lexer +* Davy Wybiral -- Clojure lexer +* Whitney Young -- ObjectiveC lexer +* Diego Zamboni -- CFengine3 lexer +* Enrique Zamudio -- Ceylon lexer +* Alex Zimin -- Nemerle lexer +* Rob Zimmerman -- Kal lexer +* Evgenii Zheltonozhskii -- Maple lexer +* Vincent Zurczak -- Roboconf lexer +* Hubert Gruniaux -- C and C++ lexer improvements +* Thomas Symalla -- AMDGPU Lexer +* 15b3 -- Image Formatter improvements +* Fabian Neumann -- CDDL lexer +* Thomas Duboucher -- CDDL lexer +* Philipp Imhof -- Pango Markup formatter +* Thomas Voss -- Sed lexer +* Martin Fischer -- WCAG contrast testing +* Marc Auberer -- Spice lexer +* Amr Hesham -- Carbon lexer +* diskdance -- Wikitext lexer +* vanillajonathan -- PRQL lexer +* Nikolay Antipov -- OpenSCAD lexer +* Markus Meyer, Nextron Systems -- YARA lexer +* Hannes Römer -- Mojo lexer +* Jan Frederik Schaefer -- PDDL lexer + +Many thanks for all contributions! diff --git a/local_packages/pygments-2.20.0.dist-info/licenses/LICENSE b/local_packages/pygments-2.20.0.dist-info/licenses/LICENSE new file mode 100644 index 0000000..446a1a8 --- /dev/null +++ b/local_packages/pygments-2.20.0.dist-info/licenses/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2006-2022 by the respective authors (see AUTHORS file). +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +* Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/local_packages/pygments/__init__.py b/local_packages/pygments/__init__.py new file mode 100644 index 0000000..3a5ba53 --- /dev/null +++ b/local_packages/pygments/__init__.py @@ -0,0 +1,82 @@ +""" + Pygments + ~~~~~~~~ + + Pygments is a syntax highlighting package written in Python. + + It is a generic syntax highlighter for general use in all kinds of software + such as forum systems, wikis or other applications that need to prettify + source code. Highlights are: + + * a wide range of common languages and markup formats is supported + * special attention is paid to details, increasing quality by a fair amount + * support for new languages and formats are added easily + * a number of output formats, presently HTML, LaTeX, RTF, SVG, all image + formats that PIL supports, and ANSI sequences + * it is usable as a command-line tool and as a library + * ... and it highlights even Brainfuck! + + The `Pygments master branch`_ is installable with ``easy_install Pygments==dev``. + + .. _Pygments master branch: + https://github.com/pygments/pygments/archive/master.zip#egg=Pygments-dev + + :copyright: Copyright 2006-present by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" +from io import StringIO, BytesIO + +__version__ = '2.20.0' +__docformat__ = 'restructuredtext' + +__all__ = ['lex', 'format', 'highlight'] + + +def lex(code, lexer): + """ + Lex `code` with the `lexer` (must be a `Lexer` instance) + and return an iterable of tokens. Currently, this only calls + `lexer.get_tokens()`. + """ + try: + return lexer.get_tokens(code) + except TypeError: + # Heuristic to catch a common mistake. + from pygments.lexer import RegexLexer + if isinstance(lexer, type) and issubclass(lexer, RegexLexer): + raise TypeError('lex() argument must be a lexer instance, ' + 'not a class') + raise + + +def format(tokens, formatter, outfile=None): # pylint: disable=redefined-builtin + """ + Format ``tokens`` (an iterable of tokens) with the formatter ``formatter`` + (a `Formatter` instance). + + If ``outfile`` is given and a valid file object (an object with a + ``write`` method), the result will be written to it, otherwise it + is returned as a string. + """ + try: + if not outfile: + realoutfile = getattr(formatter, 'encoding', None) and BytesIO() or StringIO() + formatter.format(tokens, realoutfile) + return realoutfile.getvalue() + else: + formatter.format(tokens, outfile) + except TypeError: + # Heuristic to catch a common mistake. + from pygments.formatter import Formatter + if isinstance(formatter, type) and issubclass(formatter, Formatter): + raise TypeError('format() argument must be a formatter instance, ' + 'not a class') + raise + + +def highlight(code, lexer, formatter, outfile=None): + """ + This is the most high-level highlighting function. It combines `lex` and + `format` in one function. + """ + return format(lex(code, lexer), formatter, outfile) diff --git a/local_packages/pygments/__main__.py b/local_packages/pygments/__main__.py new file mode 100644 index 0000000..818bfc3 --- /dev/null +++ b/local_packages/pygments/__main__.py @@ -0,0 +1,17 @@ +""" + pygments.__main__ + ~~~~~~~~~~~~~~~~~ + + Main entry point for ``python -m pygments``. + + :copyright: Copyright 2006-present by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import sys +import pygments.cmdline + +try: + sys.exit(pygments.cmdline.main(sys.argv)) +except KeyboardInterrupt: + sys.exit(1) diff --git a/local_packages/pygments/cmdline.py b/local_packages/pygments/cmdline.py new file mode 100644 index 0000000..bd4a1fb --- /dev/null +++ b/local_packages/pygments/cmdline.py @@ -0,0 +1,668 @@ +""" + pygments.cmdline + ~~~~~~~~~~~~~~~~ + + Command line interface. + + :copyright: Copyright 2006-present by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import os +import sys +import shutil +import argparse +from textwrap import dedent + +from pygments import __version__, highlight +from pygments.util import ClassNotFound, OptionError, docstring_headline, \ + guess_decode, guess_decode_from_terminal, terminal_encoding, \ + UnclosingTextIOWrapper +from pygments.lexers import get_all_lexers, get_lexer_by_name, guess_lexer, \ + load_lexer_from_file, get_lexer_for_filename, find_lexer_class_for_filename +from pygments.lexers.special import TextLexer +from pygments.formatters.latex import LatexEmbeddedLexer, LatexFormatter +from pygments.formatters import get_all_formatters, get_formatter_by_name, \ + load_formatter_from_file, get_formatter_for_filename, find_formatter_class +from pygments.formatters.terminal import TerminalFormatter +from pygments.formatters.terminal256 import Terminal256Formatter, TerminalTrueColorFormatter +from pygments.filters import get_all_filters, find_filter_class +from pygments.styles import get_all_styles, get_style_by_name + + +def _parse_options(o_strs): + opts = {} + if not o_strs: + return opts + for o_str in o_strs: + if not o_str.strip(): + continue + o_args = o_str.split(',') + for o_arg in o_args: + o_arg = o_arg.strip() + try: + o_key, o_val = o_arg.split('=', 1) + o_key = o_key.strip() + o_val = o_val.strip() + except ValueError: + opts[o_arg] = True + else: + opts[o_key] = o_val + return opts + + +def _parse_filters(f_strs): + filters = [] + if not f_strs: + return filters + for f_str in f_strs: + if ':' in f_str: + fname, fopts = f_str.split(':', 1) + filters.append((fname, _parse_options([fopts]))) + else: + filters.append((f_str, {})) + return filters + + +def _print_help(what, name): + try: + if what == 'lexer': + cls = get_lexer_by_name(name) + print(f"Help on the {cls.name} lexer:") + print(dedent(cls.__doc__)) + elif what == 'formatter': + cls = find_formatter_class(name) + print(f"Help on the {cls.name} formatter:") + print(dedent(cls.__doc__)) + elif what == 'filter': + cls = find_filter_class(name) + print(f"Help on the {name} filter:") + print(dedent(cls.__doc__)) + return 0 + except (AttributeError, ValueError): + print(f"{what} not found!", file=sys.stderr) + return 1 + + +def _print_list(what): + if what == 'lexer': + print() + print("Lexers:") + print("~~~~~~~") + + info = [] + for fullname, names, exts, _ in get_all_lexers(): + tup = (', '.join(names)+':', fullname, + exts and '(filenames ' + ', '.join(exts) + ')' or '') + info.append(tup) + info.sort() + for i in info: + print(('* {}\n {} {}').format(*i)) + + elif what == 'formatter': + print() + print("Formatters:") + print("~~~~~~~~~~~") + + info = [] + for cls in get_all_formatters(): + doc = docstring_headline(cls) + tup = (', '.join(cls.aliases) + ':', doc, cls.filenames and + '(filenames ' + ', '.join(cls.filenames) + ')' or '') + info.append(tup) + info.sort() + for i in info: + print(('* {}\n {} {}').format(*i)) + + elif what == 'filter': + print() + print("Filters:") + print("~~~~~~~~") + + for name in get_all_filters(): + cls = find_filter_class(name) + print("* " + name + ':') + print(f" {docstring_headline(cls)}") + + elif what == 'style': + print() + print("Styles:") + print("~~~~~~~") + + for name in get_all_styles(): + cls = get_style_by_name(name) + print("* " + name + ':') + print(f" {docstring_headline(cls)}") + + +def _print_list_as_json(requested_items): + import json + result = {} + if 'lexer' in requested_items: + info = {} + for fullname, names, filenames, mimetypes in get_all_lexers(): + info[fullname] = { + 'aliases': names, + 'filenames': filenames, + 'mimetypes': mimetypes + } + result['lexers'] = info + + if 'formatter' in requested_items: + info = {} + for cls in get_all_formatters(): + doc = docstring_headline(cls) + info[cls.name] = { + 'aliases': cls.aliases, + 'filenames': cls.filenames, + 'doc': doc + } + result['formatters'] = info + + if 'filter' in requested_items: + info = {} + for name in get_all_filters(): + cls = find_filter_class(name) + info[name] = { + 'doc': docstring_headline(cls) + } + result['filters'] = info + + if 'style' in requested_items: + info = {} + for name in get_all_styles(): + cls = get_style_by_name(name) + info[name] = { + 'doc': docstring_headline(cls) + } + result['styles'] = info + + json.dump(result, sys.stdout) + +def main_inner(parser, argns): + if argns.help: + parser.print_help() + return 0 + + if argns.V: + print(f'Pygments version {__version__}, (c) 2006-present by Georg Brandl, Matthäus ' + 'Chajdas and contributors.') + return 0 + + def is_only_option(opt): + return not any(v for (k, v) in vars(argns).items() if k != opt) + + # handle ``pygmentize -L`` + if argns.L is not None: + arg_set = set() + for k, v in vars(argns).items(): + if v: + arg_set.add(k) + + arg_set.discard('L') + arg_set.discard('json') + + if arg_set: + parser.print_help(sys.stderr) + return 2 + + # print version + if not argns.json: + main(['', '-V']) + allowed_types = {'lexer', 'formatter', 'filter', 'style'} + largs = [arg.rstrip('s') for arg in argns.L] + if any(arg not in allowed_types for arg in largs): + parser.print_help(sys.stderr) + return 0 + if not largs: + largs = allowed_types + if not argns.json: + for arg in largs: + _print_list(arg) + else: + _print_list_as_json(largs) + return 0 + + # handle ``pygmentize -H`` + if argns.H: + if not is_only_option('H'): + parser.print_help(sys.stderr) + return 2 + what, name = argns.H + if what not in ('lexer', 'formatter', 'filter'): + parser.print_help(sys.stderr) + return 2 + return _print_help(what, name) + + # parse -O options + parsed_opts = _parse_options(argns.O or []) + + # parse -P options + for p_opt in argns.P or []: + try: + name, value = p_opt.split('=', 1) + except ValueError: + parsed_opts[p_opt] = True + else: + parsed_opts[name] = value + + # encodings + inencoding = parsed_opts.get('inencoding', parsed_opts.get('encoding')) + outencoding = parsed_opts.get('outencoding', parsed_opts.get('encoding')) + + # handle ``pygmentize -N`` + if argns.N: + lexer = find_lexer_class_for_filename(argns.N) + if lexer is None: + lexer = TextLexer + + print(lexer.aliases[0]) + return 0 + + # handle ``pygmentize -C`` + if argns.C: + inp = sys.stdin.buffer.read() + try: + lexer = guess_lexer(inp, inencoding=inencoding) + except ClassNotFound: + lexer = TextLexer + + print(lexer.aliases[0]) + return 0 + + # handle ``pygmentize -S`` + S_opt = argns.S + a_opt = argns.a + if S_opt is not None: + f_opt = argns.f + if not f_opt: + parser.print_help(sys.stderr) + return 2 + if argns.l or argns.INPUTFILE: + parser.print_help(sys.stderr) + return 2 + + try: + parsed_opts['style'] = S_opt + fmter = get_formatter_by_name(f_opt, **parsed_opts) + except ClassNotFound as err: + print(err, file=sys.stderr) + return 1 + + print(fmter.get_style_defs(a_opt or '')) + return 0 + + # if no -S is given, -a is not allowed + if argns.a is not None: + parser.print_help(sys.stderr) + return 2 + + # parse -F options + F_opts = _parse_filters(argns.F or []) + + # -x: allow custom (eXternal) lexers and formatters + allow_custom_lexer_formatter = bool(argns.x) + + # select lexer + lexer = None + + # given by name? + lexername = argns.l + if lexername: + # custom lexer, located relative to user's cwd + if allow_custom_lexer_formatter and '.py' in lexername: + try: + filename = None + name = None + if ':' in lexername: + filename, name = lexername.rsplit(':', 1) + + if '.py' in name: + # This can happen on Windows: If the lexername is + # C:\lexer.py -- return to normal load path in that case + name = None + + if filename and name: + lexer = load_lexer_from_file(filename, name, + **parsed_opts) + else: + lexer = load_lexer_from_file(lexername, **parsed_opts) + except ClassNotFound as err: + print('Error:', err, file=sys.stderr) + return 1 + else: + try: + lexer = get_lexer_by_name(lexername, **parsed_opts) + except (OptionError, ClassNotFound) as err: + print('Error:', err, file=sys.stderr) + return 1 + + # read input code + code = None + + if argns.INPUTFILE: + if argns.s: + print('Error: -s option not usable when input file specified', + file=sys.stderr) + return 2 + + infn = argns.INPUTFILE + try: + with open(infn, 'rb') as infp: + code = infp.read() + except Exception as err: + print('Error: cannot read infile:', err, file=sys.stderr) + return 1 + if not inencoding: + code, inencoding = guess_decode(code) + + # do we have to guess the lexer? + if not lexer: + try: + lexer = get_lexer_for_filename(infn, code, **parsed_opts) + except ClassNotFound as err: + if argns.g: + try: + lexer = guess_lexer(code, **parsed_opts) + except ClassNotFound: + lexer = TextLexer(**parsed_opts) + else: + print('Error:', err, file=sys.stderr) + return 1 + except OptionError as err: + print('Error:', err, file=sys.stderr) + return 1 + + elif not argns.s: # treat stdin as full file (-s support is later) + # read code from terminal, always in binary mode since we want to + # decode ourselves and be tolerant with it + code = sys.stdin.buffer.read() # use .buffer to get a binary stream + if not inencoding: + code, inencoding = guess_decode_from_terminal(code, sys.stdin) + # else the lexer will do the decoding + if not lexer: + try: + lexer = guess_lexer(code, **parsed_opts) + except ClassNotFound: + lexer = TextLexer(**parsed_opts) + + else: # -s option needs a lexer with -l + if not lexer: + print('Error: when using -s a lexer has to be selected with -l', + file=sys.stderr) + return 2 + + # process filters + for fname, fopts in F_opts: + try: + lexer.add_filter(fname, **fopts) + except ClassNotFound as err: + print('Error:', err, file=sys.stderr) + return 1 + + # select formatter + outfn = argns.o + fmter = argns.f + if fmter: + # custom formatter, located relative to user's cwd + if allow_custom_lexer_formatter and '.py' in fmter: + try: + filename = None + name = None + if ':' in fmter: + # Same logic as above for custom lexer + filename, name = fmter.rsplit(':', 1) + + if '.py' in name: + name = None + + if filename and name: + fmter = load_formatter_from_file(filename, name, + **parsed_opts) + else: + fmter = load_formatter_from_file(fmter, **parsed_opts) + except ClassNotFound as err: + print('Error:', err, file=sys.stderr) + return 1 + else: + try: + fmter = get_formatter_by_name(fmter, **parsed_opts) + except (OptionError, ClassNotFound) as err: + print('Error:', err, file=sys.stderr) + return 1 + + if outfn: + if not fmter: + try: + fmter = get_formatter_for_filename(outfn, **parsed_opts) + except (OptionError, ClassNotFound) as err: + print('Error:', err, file=sys.stderr) + return 1 + try: + outfile = open(outfn, 'wb') + except Exception as err: + print('Error: cannot open outfile:', err, file=sys.stderr) + return 1 + else: + if not fmter: + if os.environ.get('COLORTERM','') in ('truecolor', '24bit'): + fmter = TerminalTrueColorFormatter(**parsed_opts) + elif '256' in os.environ.get('TERM', ''): + fmter = Terminal256Formatter(**parsed_opts) + else: + fmter = TerminalFormatter(**parsed_opts) + outfile = sys.stdout.buffer + + # determine output encoding if not explicitly selected + if not outencoding: + if outfn: + # output file? use lexer encoding for now (can still be None) + fmter.encoding = inencoding + else: + # else use terminal encoding + fmter.encoding = terminal_encoding(sys.stdout) + + # provide coloring under Windows, if possible + if not outfn and sys.platform in ('win32', 'cygwin') and \ + fmter.name in ('Terminal', 'Terminal256'): # pragma: no cover + # unfortunately colorama doesn't support binary streams on Py3 + outfile = UnclosingTextIOWrapper(outfile, encoding=fmter.encoding) + fmter.encoding = None + try: + import colorama.initialise + except ImportError: + pass + else: + outfile = colorama.initialise.wrap_stream( + outfile, convert=None, strip=None, autoreset=False, wrap=True) + + # When using the LaTeX formatter and the option `escapeinside` is + # specified, we need a special lexer which collects escaped text + # before running the chosen language lexer. + escapeinside = parsed_opts.get('escapeinside', '') + if len(escapeinside) == 2 and isinstance(fmter, LatexFormatter): + left = escapeinside[0] + right = escapeinside[1] + lexer = LatexEmbeddedLexer(left, right, lexer) + + # ... and do it! + if not argns.s: + # process whole input as per normal... + try: + highlight(code, lexer, fmter, outfile) + finally: + if outfn: + outfile.close() + return 0 + else: + # line by line processing of stdin (eg: for 'tail -f')... + try: + while 1: + line = sys.stdin.buffer.readline() + if not line: + break + if not inencoding: + line = guess_decode_from_terminal(line, sys.stdin)[0] + highlight(line, lexer, fmter, outfile) + if hasattr(outfile, 'flush'): + outfile.flush() + return 0 + except KeyboardInterrupt: # pragma: no cover + return 0 + finally: + if outfn: + outfile.close() + + +class HelpFormatter(argparse.HelpFormatter): + def __init__(self, prog, indent_increment=2, max_help_position=16, width=None): + if width is None: + try: + width = shutil.get_terminal_size().columns - 2 + except Exception: + pass + argparse.HelpFormatter.__init__(self, prog, indent_increment, + max_help_position, width) + + +def main(args=sys.argv): + """ + Main command line entry point. + """ + desc = "Highlight an input file and write the result to an output file." + parser = argparse.ArgumentParser(description=desc, add_help=False, + formatter_class=HelpFormatter) + + operation = parser.add_argument_group('Main operation') + lexersel = operation.add_mutually_exclusive_group() + lexersel.add_argument( + '-l', metavar='LEXER', + help='Specify the lexer to use. (Query names with -L.) If not ' + 'given and -g is not present, the lexer is guessed from the filename.') + lexersel.add_argument( + '-g', action='store_true', + help='Guess the lexer from the file contents, or pass through ' + 'as plain text if nothing can be guessed.') + operation.add_argument( + '-F', metavar='FILTER[:options]', action='append', + help='Add a filter to the token stream. (Query names with -L.) ' + 'Filter options are given after a colon if necessary.') + operation.add_argument( + '-f', metavar='FORMATTER', + help='Specify the formatter to use. (Query names with -L.) ' + 'If not given, the formatter is guessed from the output filename, ' + 'and defaults to the terminal formatter if the output is to the ' + 'terminal or an unknown file extension.') + operation.add_argument( + '-O', metavar='OPTION=value[,OPTION=value,...]', action='append', + help='Give options to the lexer and formatter as a comma-separated ' + 'list of key-value pairs. ' + 'Example: `-O bg=light,python=cool`.') + operation.add_argument( + '-P', metavar='OPTION=value', action='append', + help='Give a single option to the lexer and formatter - with this ' + 'you can pass options whose value contains commas and equal signs. ' + 'Example: `-P "heading=Pygments, the Python highlighter"`.') + operation.add_argument( + '-o', metavar='OUTPUTFILE', + help='Where to write the output. Defaults to standard output.') + + operation.add_argument( + 'INPUTFILE', nargs='?', + help='Where to read the input. Defaults to standard input.') + + flags = parser.add_argument_group('Operation flags') + flags.add_argument( + '-v', action='store_true', + help='Print a detailed traceback on unhandled exceptions, which ' + 'is useful for debugging and bug reports.') + flags.add_argument( + '-s', action='store_true', + help='Process lines one at a time until EOF, rather than waiting to ' + 'process the entire file. This only works for stdin, only for lexers ' + 'with no line-spanning constructs, and is intended for streaming ' + 'input such as you get from `tail -f`. ' + 'Example usage: `tail -f sql.log | pygmentize -s -l sql`.') + flags.add_argument( + '-x', action='store_true', + help='Allow custom lexers and formatters to be loaded from a .py file ' + 'relative to the current working directory. For example, ' + '`-l ./customlexer.py -x`. By default, this option expects a file ' + 'with a class named CustomLexer or CustomFormatter; you can also ' + 'specify your own class name with a colon (`-l ./lexer.py:MyLexer`). ' + 'Users should be very careful not to use this option with untrusted ' + 'files, because it will import and run them.') + flags.add_argument('--json', help='Output as JSON. This can ' + 'be only used in conjunction with -L.', + default=False, + action='store_true') + + special_modes_group = parser.add_argument_group( + 'Special modes - do not do any highlighting') + special_modes = special_modes_group.add_mutually_exclusive_group() + special_modes.add_argument( + '-S', metavar='STYLE -f formatter', + help='Print style definitions for STYLE for a formatter ' + 'given with -f. The argument given by -a is formatter ' + 'dependent.') + special_modes.add_argument( + '-L', nargs='*', metavar='WHAT', + help='List lexers, formatters, styles or filters -- ' + 'give additional arguments for the thing(s) you want to list ' + '(e.g. "styles"), or omit them to list everything.') + special_modes.add_argument( + '-N', metavar='FILENAME', + help='Guess and print out a lexer name based solely on the given ' + 'filename. Does not take input or highlight anything. If no specific ' + 'lexer can be determined, "text" is printed.') + special_modes.add_argument( + '-C', action='store_true', + help='Like -N, but print out a lexer name based solely on ' + 'a given content from standard input.') + special_modes.add_argument( + '-H', action='store', nargs=2, metavar=('NAME', 'TYPE'), + help='Print detailed help for the object of type , ' + 'where is one of "lexer", "formatter" or "filter".') + special_modes.add_argument( + '-V', action='store_true', + help='Print the package version.') + special_modes.add_argument( + '-h', '--help', action='store_true', + help='Print this help.') + special_modes_group.add_argument( + '-a', metavar='ARG', + help='Formatter-specific additional argument for the -S (print ' + 'style sheet) mode.') + + argns = parser.parse_args(args[1:]) + + try: + return main_inner(parser, argns) + except BrokenPipeError: + # someone closed our stdout, e.g. by quitting a pager. + return 0 + except Exception: + if argns.v: + print(file=sys.stderr) + print('*' * 65, file=sys.stderr) + print('An unhandled exception occurred while highlighting.', + file=sys.stderr) + print('Please report the whole traceback to the issue tracker at', + file=sys.stderr) + print('.', + file=sys.stderr) + print('*' * 65, file=sys.stderr) + print(file=sys.stderr) + raise + import traceback + info = traceback.format_exception(*sys.exc_info()) + msg = info[-1].strip() + if len(info) >= 3: + # extract relevant file and position info + msg += '\n (f{})'.format(info[-2].split('\n')[0].strip()[1:]) + print(file=sys.stderr) + print('*** Error while highlighting:', file=sys.stderr) + print(msg, file=sys.stderr) + print('*** If this is a bug you want to report, please rerun with -v.', + file=sys.stderr) + return 1 diff --git a/local_packages/pygments/console.py b/local_packages/pygments/console.py new file mode 100644 index 0000000..d900916 --- /dev/null +++ b/local_packages/pygments/console.py @@ -0,0 +1,70 @@ +""" + pygments.console + ~~~~~~~~~~~~~~~~ + + Format colored console output. + + :copyright: Copyright 2006-present by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +esc = "\x1b[" + +codes = {} +codes[""] = "" +codes["reset"] = esc + "39;49;00m" + +codes["bold"] = esc + "01m" +codes["faint"] = esc + "02m" +codes["standout"] = esc + "03m" +codes["underline"] = esc + "04m" +codes["blink"] = esc + "05m" +codes["overline"] = esc + "06m" + +dark_colors = ["black", "red", "green", "yellow", "blue", + "magenta", "cyan", "gray"] +light_colors = ["brightblack", "brightred", "brightgreen", "brightyellow", "brightblue", + "brightmagenta", "brightcyan", "white"] + +x = 30 +for dark, light in zip(dark_colors, light_colors): + codes[dark] = esc + "%im" % x + codes[light] = esc + "%im" % (60 + x) + x += 1 + +del dark, light, x + +codes["white"] = codes["bold"] + + +def reset_color(): + return codes["reset"] + + +def colorize(color_key, text): + return codes[color_key] + text + codes["reset"] + + +def ansiformat(attr, text): + """ + Format ``text`` with a color and/or some attributes:: + + color normal color + *color* bold color + _color_ underlined color + +color+ blinking color + """ + result = [] + if attr[:1] == attr[-1:] == '+': + result.append(codes['blink']) + attr = attr[1:-1] + if attr[:1] == attr[-1:] == '*': + result.append(codes['bold']) + attr = attr[1:-1] + if attr[:1] == attr[-1:] == '_': + result.append(codes['underline']) + attr = attr[1:-1] + result.append(codes[attr]) + result.append(text) + result.append(codes['reset']) + return ''.join(result) diff --git a/local_packages/pygments/filter.py b/local_packages/pygments/filter.py new file mode 100644 index 0000000..6c926c2 --- /dev/null +++ b/local_packages/pygments/filter.py @@ -0,0 +1,70 @@ +""" + pygments.filter + ~~~~~~~~~~~~~~~ + + Module that implements the default filter. + + :copyright: Copyright 2006-present by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + + +def apply_filters(stream, filters, lexer=None): + """ + Use this method to apply an iterable of filters to + a stream. If lexer is given it's forwarded to the + filter, otherwise the filter receives `None`. + """ + def _apply(filter_, stream): + yield from filter_.filter(lexer, stream) + for filter_ in filters: + stream = _apply(filter_, stream) + return stream + + +def simplefilter(f): + """ + Decorator that converts a function into a filter:: + + @simplefilter + def lowercase(self, lexer, stream, options): + for ttype, value in stream: + yield ttype, value.lower() + """ + return type(f.__name__, (FunctionFilter,), { + '__module__': getattr(f, '__module__'), + '__doc__': f.__doc__, + 'function': f, + }) + + +class Filter: + """ + Default filter. Subclass this class or use the `simplefilter` + decorator to create own filters. + """ + + def __init__(self, **options): + self.options = options + + def filter(self, lexer, stream): + raise NotImplementedError() + + +class FunctionFilter(Filter): + """ + Abstract class used by `simplefilter` to create simple + function filters on the fly. The `simplefilter` decorator + automatically creates subclasses of this class for + functions passed to it. + """ + function = None + + def __init__(self, **options): + if not hasattr(self, 'function'): + raise TypeError(f'{self.__class__.__name__!r} used without bound function') + Filter.__init__(self, **options) + + def filter(self, lexer, stream): + # pylint: disable=not-callable + yield from self.function(lexer, stream, self.options) diff --git a/local_packages/pygments/filters/__init__.py b/local_packages/pygments/filters/__init__.py new file mode 100644 index 0000000..c7ba6ad --- /dev/null +++ b/local_packages/pygments/filters/__init__.py @@ -0,0 +1,942 @@ +""" + pygments.filters + ~~~~~~~~~~~~~~~~ + + Module containing filter lookup functions and default + filters. + + :copyright: Copyright 2006-present by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.token import String, Comment, Keyword, Name, Error, Whitespace, \ + string_to_tokentype +from pygments.filter import Filter +from pygments.util import get_list_opt, get_int_opt, get_bool_opt, \ + get_choice_opt, ClassNotFound, OptionError +from pygments.plugin import find_plugin_filters + + +def find_filter_class(filtername): + """Lookup a filter by name. Return None if not found.""" + if filtername in FILTERS: + return FILTERS[filtername] + for name, cls in find_plugin_filters(): + if name == filtername: + return cls + return None + + +def get_filter_by_name(filtername, **options): + """Return an instantiated filter. + + Options are passed to the filter initializer if wanted. + Raise a ClassNotFound if not found. + """ + cls = find_filter_class(filtername) + if cls: + return cls(**options) + else: + raise ClassNotFound(f'filter {filtername!r} not found') + + +def get_all_filters(): + """Return a generator of all filter names.""" + yield from FILTERS + for name, _ in find_plugin_filters(): + yield name + + +def _replace_special(ttype, value, regex, specialttype, + replacefunc=lambda x: x): + last = 0 + for match in regex.finditer(value): + start, end = match.start(), match.end() + if start != last: + yield ttype, value[last:start] + yield specialttype, replacefunc(value[start:end]) + last = end + if last != len(value): + yield ttype, value[last:] + + +class CodeTagFilter(Filter): + """Highlight special code tags in comments and docstrings. + + Options accepted: + + `codetags` : list of strings + A list of strings that are flagged as code tags. The default is to + highlight ``XXX``, ``TODO``, ``FIXME``, ``BUG`` and ``NOTE``. + + .. versionchanged:: 2.13 + Now recognizes ``FIXME`` by default. + """ + + def __init__(self, **options): + Filter.__init__(self, **options) + tags = get_list_opt(options, 'codetags', + ['XXX', 'TODO', 'FIXME', 'BUG', 'NOTE']) + self.tag_re = re.compile(r'\b({})\b'.format('|'.join([ + re.escape(tag) for tag in tags if tag + ]))) + + def filter(self, lexer, stream): + regex = self.tag_re + for ttype, value in stream: + if ttype in String.Doc or \ + ttype in Comment and \ + ttype not in Comment.Preproc: + yield from _replace_special(ttype, value, regex, Comment.Special) + else: + yield ttype, value + + +class SymbolFilter(Filter): + """Convert mathematical symbols into Unicode characters. + + Examples are ``\\`` in Isabelle or + ``\\longrightarrow`` in LaTeX. + + This is mostly useful for HTML or console output when you want to + approximate the source rendering you'd see in an IDE. + + Options accepted: + + `lang` : string + The symbol language. Must be one of ``'isabelle'`` or + ``'latex'``. The default is ``'isabelle'``. + """ + + latex_symbols = { + '\\alpha' : '\U000003b1', + '\\beta' : '\U000003b2', + '\\gamma' : '\U000003b3', + '\\delta' : '\U000003b4', + '\\varepsilon' : '\U000003b5', + '\\zeta' : '\U000003b6', + '\\eta' : '\U000003b7', + '\\vartheta' : '\U000003b8', + '\\iota' : '\U000003b9', + '\\kappa' : '\U000003ba', + '\\lambda' : '\U000003bb', + '\\mu' : '\U000003bc', + '\\nu' : '\U000003bd', + '\\xi' : '\U000003be', + '\\pi' : '\U000003c0', + '\\varrho' : '\U000003c1', + '\\sigma' : '\U000003c3', + '\\tau' : '\U000003c4', + '\\upsilon' : '\U000003c5', + '\\varphi' : '\U000003c6', + '\\chi' : '\U000003c7', + '\\psi' : '\U000003c8', + '\\omega' : '\U000003c9', + '\\Gamma' : '\U00000393', + '\\Delta' : '\U00000394', + '\\Theta' : '\U00000398', + '\\Lambda' : '\U0000039b', + '\\Xi' : '\U0000039e', + '\\Pi' : '\U000003a0', + '\\Sigma' : '\U000003a3', + '\\Upsilon' : '\U000003a5', + '\\Phi' : '\U000003a6', + '\\Psi' : '\U000003a8', + '\\Omega' : '\U000003a9', + '\\leftarrow' : '\U00002190', + '\\longleftarrow' : '\U000027f5', + '\\rightarrow' : '\U00002192', + '\\longrightarrow' : '\U000027f6', + '\\Leftarrow' : '\U000021d0', + '\\Longleftarrow' : '\U000027f8', + '\\Rightarrow' : '\U000021d2', + '\\Longrightarrow' : '\U000027f9', + '\\leftrightarrow' : '\U00002194', + '\\longleftrightarrow' : '\U000027f7', + '\\Leftrightarrow' : '\U000021d4', + '\\Longleftrightarrow' : '\U000027fa', + '\\mapsto' : '\U000021a6', + '\\longmapsto' : '\U000027fc', + '\\relbar' : '\U00002500', + '\\Relbar' : '\U00002550', + '\\hookleftarrow' : '\U000021a9', + '\\hookrightarrow' : '\U000021aa', + '\\leftharpoondown' : '\U000021bd', + '\\rightharpoondown' : '\U000021c1', + '\\leftharpoonup' : '\U000021bc', + '\\rightharpoonup' : '\U000021c0', + '\\rightleftharpoons' : '\U000021cc', + '\\leadsto' : '\U0000219d', + '\\downharpoonleft' : '\U000021c3', + '\\downharpoonright' : '\U000021c2', + '\\upharpoonleft' : '\U000021bf', + '\\upharpoonright' : '\U000021be', + '\\restriction' : '\U000021be', + '\\uparrow' : '\U00002191', + '\\Uparrow' : '\U000021d1', + '\\downarrow' : '\U00002193', + '\\Downarrow' : '\U000021d3', + '\\updownarrow' : '\U00002195', + '\\Updownarrow' : '\U000021d5', + '\\langle' : '\U000027e8', + '\\rangle' : '\U000027e9', + '\\lceil' : '\U00002308', + '\\rceil' : '\U00002309', + '\\lfloor' : '\U0000230a', + '\\rfloor' : '\U0000230b', + '\\flqq' : '\U000000ab', + '\\frqq' : '\U000000bb', + '\\bot' : '\U000022a5', + '\\top' : '\U000022a4', + '\\wedge' : '\U00002227', + '\\bigwedge' : '\U000022c0', + '\\vee' : '\U00002228', + '\\bigvee' : '\U000022c1', + '\\forall' : '\U00002200', + '\\exists' : '\U00002203', + '\\nexists' : '\U00002204', + '\\neg' : '\U000000ac', + '\\Box' : '\U000025a1', + '\\Diamond' : '\U000025c7', + '\\vdash' : '\U000022a2', + '\\models' : '\U000022a8', + '\\dashv' : '\U000022a3', + '\\surd' : '\U0000221a', + '\\le' : '\U00002264', + '\\ge' : '\U00002265', + '\\ll' : '\U0000226a', + '\\gg' : '\U0000226b', + '\\lesssim' : '\U00002272', + '\\gtrsim' : '\U00002273', + '\\lessapprox' : '\U00002a85', + '\\gtrapprox' : '\U00002a86', + '\\in' : '\U00002208', + '\\notin' : '\U00002209', + '\\subset' : '\U00002282', + '\\supset' : '\U00002283', + '\\subseteq' : '\U00002286', + '\\supseteq' : '\U00002287', + '\\sqsubset' : '\U0000228f', + '\\sqsupset' : '\U00002290', + '\\sqsubseteq' : '\U00002291', + '\\sqsupseteq' : '\U00002292', + '\\cap' : '\U00002229', + '\\bigcap' : '\U000022c2', + '\\cup' : '\U0000222a', + '\\bigcup' : '\U000022c3', + '\\sqcup' : '\U00002294', + '\\bigsqcup' : '\U00002a06', + '\\sqcap' : '\U00002293', + '\\Bigsqcap' : '\U00002a05', + '\\setminus' : '\U00002216', + '\\propto' : '\U0000221d', + '\\uplus' : '\U0000228e', + '\\bigplus' : '\U00002a04', + '\\sim' : '\U0000223c', + '\\doteq' : '\U00002250', + '\\simeq' : '\U00002243', + '\\approx' : '\U00002248', + '\\asymp' : '\U0000224d', + '\\cong' : '\U00002245', + '\\equiv' : '\U00002261', + '\\Join' : '\U000022c8', + '\\bowtie' : '\U00002a1d', + '\\prec' : '\U0000227a', + '\\succ' : '\U0000227b', + '\\preceq' : '\U0000227c', + '\\succeq' : '\U0000227d', + '\\parallel' : '\U00002225', + '\\mid' : '\U000000a6', + '\\pm' : '\U000000b1', + '\\mp' : '\U00002213', + '\\times' : '\U000000d7', + '\\div' : '\U000000f7', + '\\cdot' : '\U000022c5', + '\\star' : '\U000022c6', + '\\circ' : '\U00002218', + '\\dagger' : '\U00002020', + '\\ddagger' : '\U00002021', + '\\lhd' : '\U000022b2', + '\\rhd' : '\U000022b3', + '\\unlhd' : '\U000022b4', + '\\unrhd' : '\U000022b5', + '\\triangleleft' : '\U000025c3', + '\\triangleright' : '\U000025b9', + '\\triangle' : '\U000025b3', + '\\triangleq' : '\U0000225c', + '\\oplus' : '\U00002295', + '\\bigoplus' : '\U00002a01', + '\\otimes' : '\U00002297', + '\\bigotimes' : '\U00002a02', + '\\odot' : '\U00002299', + '\\bigodot' : '\U00002a00', + '\\ominus' : '\U00002296', + '\\oslash' : '\U00002298', + '\\dots' : '\U00002026', + '\\cdots' : '\U000022ef', + '\\sum' : '\U00002211', + '\\prod' : '\U0000220f', + '\\coprod' : '\U00002210', + '\\infty' : '\U0000221e', + '\\int' : '\U0000222b', + '\\oint' : '\U0000222e', + '\\clubsuit' : '\U00002663', + '\\diamondsuit' : '\U00002662', + '\\heartsuit' : '\U00002661', + '\\spadesuit' : '\U00002660', + '\\aleph' : '\U00002135', + '\\emptyset' : '\U00002205', + '\\nabla' : '\U00002207', + '\\partial' : '\U00002202', + '\\flat' : '\U0000266d', + '\\natural' : '\U0000266e', + '\\sharp' : '\U0000266f', + '\\angle' : '\U00002220', + '\\copyright' : '\U000000a9', + '\\textregistered' : '\U000000ae', + '\\textonequarter' : '\U000000bc', + '\\textonehalf' : '\U000000bd', + '\\textthreequarters' : '\U000000be', + '\\textordfeminine' : '\U000000aa', + '\\textordmasculine' : '\U000000ba', + '\\euro' : '\U000020ac', + '\\pounds' : '\U000000a3', + '\\yen' : '\U000000a5', + '\\textcent' : '\U000000a2', + '\\textcurrency' : '\U000000a4', + '\\textdegree' : '\U000000b0', + } + + isabelle_symbols = { + '\\' : '\U0001d7ec', + '\\' : '\U0001d7ed', + '\\' : '\U0001d7ee', + '\\' : '\U0001d7ef', + '\\' : '\U0001d7f0', + '\\' : '\U0001d7f1', + '\\' : '\U0001d7f2', + '\\' : '\U0001d7f3', + '\\' : '\U0001d7f4', + '\\' : '\U0001d7f5', + '\\' : '\U0001d49c', + '\\' : '\U0000212c', + '\\' : '\U0001d49e', + '\\' : '\U0001d49f', + '\\' : '\U00002130', + '\\' : '\U00002131', + '\\' : '\U0001d4a2', + '\\' : '\U0000210b', + '\\' : '\U00002110', + '\\' : '\U0001d4a5', + '\\' : '\U0001d4a6', + '\\' : '\U00002112', + '\\' : '\U00002133', + '\\' : '\U0001d4a9', + '\\' : '\U0001d4aa', + '\\

' : '\U0001d5c9', + '\\' : '\U0001d5ca', + '\\' : '\U0001d5cb', + '\\' : '\U0001d5cc', + '\\' : '\U0001d5cd', + '\\' : '\U0001d5ce', + '\\' : '\U0001d5cf', + '\\' : '\U0001d5d0', + '\\' : '\U0001d5d1', + '\\' : '\U0001d5d2', + '\\' : '\U0001d5d3', + '\\' : '\U0001d504', + '\\' : '\U0001d505', + '\\' : '\U0000212d', + '\\

' : '\U0001d507', + '\\' : '\U0001d508', + '\\' : '\U0001d509', + '\\' : '\U0001d50a', + '\\' : '\U0000210c', + '\\' : '\U00002111', + '\\' : '\U0001d50d', + '\\' : '\U0001d50e', + '\\' : '\U0001d50f', + '\\' : '\U0001d510', + '\\' : '\U0001d511', + '\\' : '\U0001d512', + '\\' : '\U0001d513', + '\\' : '\U0001d514', + '\\' : '\U0000211c', + '\\' : '\U0001d516', + '\\' : '\U0001d517', + '\\' : '\U0001d518', + '\\' : '\U0001d519', + '\\' : '\U0001d51a', + '\\' : '\U0001d51b', + '\\' : '\U0001d51c', + '\\' : '\U00002128', + '\\' : '\U0001d51e', + '\\' : '\U0001d51f', + '\\' : '\U0001d520', + '\\
' : '\U0001d521', + '\\' : '\U0001d522', + '\\' : '\U0001d523', + '\\' : '\U0001d524', + '\\' : '\U0001d525', + '\\' : '\U0001d526', + '\\' : '\U0001d527', + '\\' : '\U0001d528', + '\\' : '\U0001d529', + '\\' : '\U0001d52a', + '\\' : '\U0001d52b', + '\\' : '\U0001d52c', + '\\' : '\U0001d52d', + '\\' : '\U0001d52e', + '\\' : '\U0001d52f', + '\\' : '\U0001d530', + '\\' : '\U0001d531', + '\\' : '\U0001d532', + '\\' : '\U0001d533', + '\\' : '\U0001d534', + '\\' : '\U0001d535', + '\\' : '\U0001d536', + '\\' : '\U0001d537', + '\\' : '\U000003b1', + '\\' : '\U000003b2', + '\\' : '\U000003b3', + '\\' : '\U000003b4', + '\\' : '\U000003b5', + '\\' : '\U000003b6', + '\\' : '\U000003b7', + '\\' : '\U000003b8', + '\\' : '\U000003b9', + '\\' : '\U000003ba', + '\\' : '\U000003bb', + '\\' : '\U000003bc', + '\\' : '\U000003bd', + '\\' : '\U000003be', + '\\' : '\U000003c0', + '\\' : '\U000003c1', + '\\' : '\U000003c3', + '\\' : '\U000003c4', + '\\' : '\U000003c5', + '\\' : '\U000003c6', + '\\' : '\U000003c7', + '\\' : '\U000003c8', + '\\' : '\U000003c9', + '\\' : '\U00000393', + '\\' : '\U00000394', + '\\' : '\U00000398', + '\\' : '\U0000039b', + '\\' : '\U0000039e', + '\\' : '\U000003a0', + '\\' : '\U000003a3', + '\\' : '\U000003a5', + '\\' : '\U000003a6', + '\\' : '\U000003a8', + '\\' : '\U000003a9', + '\\' : '\U0001d539', + '\\' : '\U00002102', + '\\' : '\U00002115', + '\\' : '\U0000211a', + '\\' : '\U0000211d', + '\\' : '\U00002124', + '\\' : '\U00002190', + '\\' : '\U000027f5', + '\\' : '\U00002192', + '\\' : '\U000027f6', + '\\' : '\U000021d0', + '\\' : '\U000027f8', + '\\' : '\U000021d2', + '\\' : '\U000027f9', + '\\' : '\U00002194', + '\\' : '\U000027f7', + '\\' : '\U000021d4', + '\\' : '\U000027fa', + '\\' : '\U000021a6', + '\\' : '\U000027fc', + '\\' : '\U00002500', + '\\' : '\U00002550', + '\\' : '\U000021a9', + '\\' : '\U000021aa', + '\\' : '\U000021bd', + '\\' : '\U000021c1', + '\\' : '\U000021bc', + '\\' : '\U000021c0', + '\\' : '\U000021cc', + '\\' : '\U0000219d', + '\\' : '\U000021c3', + '\\' : '\U000021c2', + '\\' : '\U000021bf', + '\\' : '\U000021be', + '\\' : '\U000021be', + '\\' : '\U00002237', + '\\' : '\U00002191', + '\\' : '\U000021d1', + '\\' : '\U00002193', + '\\' : '\U000021d3', + '\\' : '\U00002195', + '\\' : '\U000021d5', + '\\' : '\U000027e8', + '\\' : '\U000027e9', + '\\' : '\U00002308', + '\\' : '\U00002309', + '\\' : '\U0000230a', + '\\' : '\U0000230b', + '\\' : '\U00002987', + '\\' : '\U00002988', + '\\' : '\U000027e6', + '\\' : '\U000027e7', + '\\' : '\U00002983', + '\\' : '\U00002984', + '\\' : '\U000000ab', + '\\' : '\U000000bb', + '\\' : '\U000022a5', + '\\' : '\U000022a4', + '\\' : '\U00002227', + '\\' : '\U000022c0', + '\\' : '\U00002228', + '\\' : '\U000022c1', + '\\' : '\U00002200', + '\\' : '\U00002203', + '\\' : '\U00002204', + '\\' : '\U000000ac', + '\\' : '\U000025a1', + '\\' : '\U000025c7', + '\\' : '\U000022a2', + '\\' : '\U000022a8', + '\\' : '\U000022a9', + '\\' : '\U000022ab', + '\\' : '\U000022a3', + '\\' : '\U0000221a', + '\\' : '\U00002264', + '\\' : '\U00002265', + '\\' : '\U0000226a', + '\\' : '\U0000226b', + '\\' : '\U00002272', + '\\' : '\U00002273', + '\\' : '\U00002a85', + '\\' : '\U00002a86', + '\\' : '\U00002208', + '\\' : '\U00002209', + '\\' : '\U00002282', + '\\' : '\U00002283', + '\\' : '\U00002286', + '\\' : '\U00002287', + '\\' : '\U0000228f', + '\\' : '\U00002290', + '\\' : '\U00002291', + '\\' : '\U00002292', + '\\' : '\U00002229', + '\\' : '\U000022c2', + '\\' : '\U0000222a', + '\\' : '\U000022c3', + '\\' : '\U00002294', + '\\' : '\U00002a06', + '\\' : '\U00002293', + '\\' : '\U00002a05', + '\\' : '\U00002216', + '\\' : '\U0000221d', + '\\' : '\U0000228e', + '\\' : '\U00002a04', + '\\' : '\U00002260', + '\\' : '\U0000223c', + '\\' : '\U00002250', + '\\' : '\U00002243', + '\\' : '\U00002248', + '\\' : '\U0000224d', + '\\' : '\U00002245', + '\\' : '\U00002323', + '\\' : '\U00002261', + '\\' : '\U00002322', + '\\' : '\U000022c8', + '\\' : '\U00002a1d', + '\\' : '\U0000227a', + '\\' : '\U0000227b', + '\\' : '\U0000227c', + '\\' : '\U0000227d', + '\\' : '\U00002225', + '\\' : '\U000000a6', + '\\' : '\U000000b1', + '\\' : '\U00002213', + '\\' : '\U000000d7', + '\\
' : '\U000000f7', + '\\' : '\U000022c5', + '\\' : '\U000022c6', + '\\' : '\U00002219', + '\\' : '\U00002218', + '\\' : '\U00002020', + '\\' : '\U00002021', + '\\' : '\U000022b2', + '\\' : '\U000022b3', + '\\' : '\U000022b4', + '\\' : '\U000022b5', + '\\' : '\U000025c3', + '\\' : '\U000025b9', + '\\' : '\U000025b3', + '\\' : '\U0000225c', + '\\' : '\U00002295', + '\\' : '\U00002a01', + '\\' : '\U00002297', + '\\' : '\U00002a02', + '\\' : '\U00002299', + '\\' : '\U00002a00', + '\\' : '\U00002296', + '\\' : '\U00002298', + '\\' : '\U00002026', + '\\' : '\U000022ef', + '\\' : '\U00002211', + '\\' : '\U0000220f', + '\\' : '\U00002210', + '\\' : '\U0000221e', + '\\' : '\U0000222b', + '\\' : '\U0000222e', + '\\' : '\U00002663', + '\\' : '\U00002662', + '\\' : '\U00002661', + '\\' : '\U00002660', + '\\' : '\U00002135', + '\\' : '\U00002205', + '\\' : '\U00002207', + '\\' : '\U00002202', + '\\' : '\U0000266d', + '\\' : '\U0000266e', + '\\' : '\U0000266f', + '\\' : '\U00002220', + '\\' : '\U000000a9', + '\\' : '\U000000ae', + '\\' : '\U000000ad', + '\\' : '\U000000af', + '\\' : '\U000000bc', + '\\' : '\U000000bd', + '\\' : '\U000000be', + '\\' : '\U000000aa', + '\\' : '\U000000ba', + '\\
' : '\U000000a7', + '\\' : '\U000000b6', + '\\' : '\U000000a1', + '\\' : '\U000000bf', + '\\' : '\U000020ac', + '\\' : '\U000000a3', + '\\' : '\U000000a5', + '\\' : '\U000000a2', + '\\' : '\U000000a4', + '\\' : '\U000000b0', + '\\' : '\U00002a3f', + '\\' : '\U00002127', + '\\' : '\U000025ca', + '\\' : '\U00002118', + '\\' : '\U00002240', + '\\' : '\U000022c4', + '\\' : '\U000000b4', + '\\' : '\U00000131', + '\\' : '\U000000a8', + '\\' : '\U000000b8', + '\\' : '\U000002dd', + '\\' : '\U000003f5', + '\\' : '\U000023ce', + '\\' : '\U00002039', + '\\' : '\U0000203a', + '\\' : '\U00002302', + '\\<^sub>' : '\U000021e9', + '\\<^sup>' : '\U000021e7', + '\\<^bold>' : '\U00002759', + '\\<^bsub>' : '\U000021d8', + '\\<^esub>' : '\U000021d9', + '\\<^bsup>' : '\U000021d7', + '\\<^esup>' : '\U000021d6', + } + + lang_map = {'isabelle' : isabelle_symbols, 'latex' : latex_symbols} + + def __init__(self, **options): + Filter.__init__(self, **options) + lang = get_choice_opt(options, 'lang', + ['isabelle', 'latex'], 'isabelle') + self.symbols = self.lang_map[lang] + + def filter(self, lexer, stream): + for ttype, value in stream: + if value in self.symbols: + yield ttype, self.symbols[value] + else: + yield ttype, value + + +class KeywordCaseFilter(Filter): + """Convert keywords to lowercase or uppercase or capitalize them. + + This means first letter uppercase, rest lowercase. + + This can be useful e.g. if you highlight Pascal code and want to adapt the + code to your styleguide. + + Options accepted: + + `case` : string + The casing to convert keywords to. Must be one of ``'lower'``, + ``'upper'`` or ``'capitalize'``. The default is ``'lower'``. + """ + + def __init__(self, **options): + Filter.__init__(self, **options) + case = get_choice_opt(options, 'case', + ['lower', 'upper', 'capitalize'], 'lower') + self.convert = getattr(str, case) + + def filter(self, lexer, stream): + for ttype, value in stream: + if ttype in Keyword: + yield ttype, self.convert(value) + else: + yield ttype, value + + +class NameHighlightFilter(Filter): + """Highlight a normal Name (and Name.*) token with a different token type. + + Example:: + + filter = NameHighlightFilter( + names=['foo', 'bar', 'baz'], + tokentype=Name.Function, + ) + + This would highlight the names "foo", "bar" and "baz" + as functions. `Name.Function` is the default token type. + + Options accepted: + + `names` : list of strings + A list of names that should be given the different token type. + There is no default. + `tokentype` : TokenType or string + A token type or a string containing a token type name that is + used for highlighting the strings in `names`. The default is + `Name.Function`. + """ + + def __init__(self, **options): + Filter.__init__(self, **options) + self.names = set(get_list_opt(options, 'names', [])) + tokentype = options.get('tokentype') + if tokentype: + self.tokentype = string_to_tokentype(tokentype) + else: + self.tokentype = Name.Function + + def filter(self, lexer, stream): + for ttype, value in stream: + if ttype in Name and value in self.names: + yield self.tokentype, value + else: + yield ttype, value + + +class ErrorToken(Exception): + pass + + +class RaiseOnErrorTokenFilter(Filter): + """Raise an exception when the lexer generates an error token. + + Options accepted: + + `excclass` : Exception class + The exception class to raise. + The default is `pygments.filters.ErrorToken`. + + .. versionadded:: 0.8 + """ + + def __init__(self, **options): + Filter.__init__(self, **options) + self.exception = options.get('excclass', ErrorToken) + try: + # issubclass() will raise TypeError if first argument is not a class + if not issubclass(self.exception, Exception): + raise TypeError + except TypeError: + raise OptionError('excclass option is not an exception class') + + def filter(self, lexer, stream): + for ttype, value in stream: + if ttype is Error: + raise self.exception(value) + yield ttype, value + + +class VisibleWhitespaceFilter(Filter): + """Convert tabs, newlines and/or spaces to visible characters. + + Options accepted: + + `spaces` : string or bool + If this is a one-character string, spaces will be replaces by this string. + If it is another true value, spaces will be replaced by ``·`` (unicode + MIDDLE DOT). If it is a false value, spaces will not be replaced. The + default is ``False``. + `tabs` : string or bool + The same as for `spaces`, but the default replacement character is ``»`` + (unicode RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK). The default value + is ``False``. Note: this will not work if the `tabsize` option for the + lexer is nonzero, as tabs will already have been expanded then. + `tabsize` : int + If tabs are to be replaced by this filter (see the `tabs` option), this + is the total number of characters that a tab should be expanded to. + The default is ``8``. + `newlines` : string or bool + The same as for `spaces`, but the default replacement character is ``¶`` + (unicode PILCROW SIGN). The default value is ``False``. + `wstokentype` : bool + If true, give whitespace the special `Whitespace` token type. This allows + styling the visible whitespace differently (e.g. greyed out), but it can + disrupt background colors. The default is ``True``. + + .. versionadded:: 0.8 + """ + + def __init__(self, **options): + Filter.__init__(self, **options) + for name, default in [('spaces', '·'), + ('tabs', '»'), + ('newlines', '¶')]: + opt = options.get(name, False) + if isinstance(opt, str) and len(opt) == 1: + setattr(self, name, opt) + else: + setattr(self, name, (opt and default or '')) + tabsize = get_int_opt(options, 'tabsize', 8) + if self.tabs: + self.tabs += ' ' * (tabsize - 1) + if self.newlines: + self.newlines += '\n' + self.wstt = get_bool_opt(options, 'wstokentype', True) + + def filter(self, lexer, stream): + if self.wstt: + spaces = self.spaces or ' ' + tabs = self.tabs or '\t' + newlines = self.newlines or '\n' + regex = re.compile(r'\s') + + def replacefunc(wschar): + if wschar == ' ': + return spaces + elif wschar == '\t': + return tabs + elif wschar == '\n': + return newlines + return wschar + + for ttype, value in stream: + yield from _replace_special(ttype, value, regex, Whitespace, + replacefunc) + else: + spaces, tabs, newlines = self.spaces, self.tabs, self.newlines + # simpler processing + for ttype, value in stream: + if spaces: + value = value.replace(' ', spaces) + if tabs: + value = value.replace('\t', tabs) + if newlines: + value = value.replace('\n', newlines) + yield ttype, value + + +class GobbleFilter(Filter): + """Gobble source code lines (eats initial characters). + + This filter drops the first ``n`` characters off every line of code. This + may be useful when the source code fed to the lexer is indented by a fixed + amount of space that isn't desired in the output. + + Options accepted: + + `n` : int + The number of characters to gobble. + + .. versionadded:: 1.2 + """ + def __init__(self, **options): + Filter.__init__(self, **options) + self.n = get_int_opt(options, 'n', 0) + + def gobble(self, value, left): + if left < len(value): + return value[left:], 0 + else: + return '', left - len(value) + + def filter(self, lexer, stream): + n = self.n + left = n # How many characters left to gobble. + for ttype, value in stream: + # Remove ``left`` tokens from first line, ``n`` from all others. + parts = value.split('\n') + (parts[0], left) = self.gobble(parts[0], left) + for i in range(1, len(parts)): + (parts[i], left) = self.gobble(parts[i], n) + value = '\n'.join(parts) + + if value != '': + yield ttype, value + + +class TokenMergeFilter(Filter): + """Merge consecutive tokens with the same token type in the output stream. + + .. versionadded:: 1.2 + """ + def __init__(self, **options): + Filter.__init__(self, **options) + + def filter(self, lexer, stream): + current_type = None + current_value = None + for ttype, value in stream: + if ttype is current_type: + current_value += value + else: + if current_type is not None: + yield current_type, current_value + current_type = ttype + current_value = value + if current_type is not None: + yield current_type, current_value + + +FILTERS = { + 'codetagify': CodeTagFilter, + 'keywordcase': KeywordCaseFilter, + 'highlight': NameHighlightFilter, + 'raiseonerror': RaiseOnErrorTokenFilter, + 'whitespace': VisibleWhitespaceFilter, + 'gobble': GobbleFilter, + 'tokenmerge': TokenMergeFilter, + 'symbols': SymbolFilter, +} diff --git a/local_packages/pygments/formatter.py b/local_packages/pygments/formatter.py new file mode 100644 index 0000000..4fa603d --- /dev/null +++ b/local_packages/pygments/formatter.py @@ -0,0 +1,129 @@ +""" + pygments.formatter + ~~~~~~~~~~~~~~~~~~ + + Base formatter class. + + :copyright: Copyright 2006-present by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import codecs + +from pygments.util import get_bool_opt +from pygments.styles import get_style_by_name + +__all__ = ['Formatter'] + + +def _lookup_style(style): + if isinstance(style, str): + return get_style_by_name(style) + return style + + +class Formatter: + """ + Converts a token stream to text. + + Formatters should have attributes to help selecting them. These + are similar to the corresponding :class:`~pygments.lexer.Lexer` + attributes. + + .. autoattribute:: name + :no-value: + + .. autoattribute:: aliases + :no-value: + + .. autoattribute:: filenames + :no-value: + + You can pass options as keyword arguments to the constructor. + All formatters accept these basic options: + + ``style`` + The style to use, can be a string or a Style subclass + (default: "default"). Not used by e.g. the + TerminalFormatter. + ``full`` + Tells the formatter to output a "full" document, i.e. + a complete self-contained document. This doesn't have + any effect for some formatters (default: false). + ``title`` + If ``full`` is true, the title that should be used to + caption the document (default: ''). + ``encoding`` + If given, must be an encoding name. This will be used to + convert the Unicode token strings to byte strings in the + output. If it is "" or None, Unicode strings will be written + to the output file, which most file-like objects do not + support (default: None). + ``outencoding`` + Overrides ``encoding`` if given. + + """ + + #: Full name for the formatter, in human-readable form. + name = None + + #: A list of short, unique identifiers that can be used to lookup + #: the formatter from a list, e.g. using :func:`.get_formatter_by_name()`. + aliases = [] + + #: A list of fnmatch patterns that match filenames for which this + #: formatter can produce output. The patterns in this list should be unique + #: among all formatters. + filenames = [] + + #: If True, this formatter outputs Unicode strings when no encoding + #: option is given. + unicodeoutput = True + + def __init__(self, **options): + """ + As with lexers, this constructor takes arbitrary optional arguments, + and if you override it, you should first process your own options, then + call the base class implementation. + """ + self.style = _lookup_style(options.get('style', 'default')) + self.full = get_bool_opt(options, 'full', False) + self.title = options.get('title', '') + self.encoding = options.get('encoding', None) or None + if self.encoding in ('guess', 'chardet'): + # can happen for e.g. pygmentize -O encoding=guess + self.encoding = 'utf-8' + self.encoding = options.get('outencoding') or self.encoding + self.options = options + + def get_style_defs(self, arg=''): + """ + This method must return statements or declarations suitable to define + the current style for subsequent highlighted text (e.g. CSS classes + in the `HTMLFormatter`). + + The optional argument `arg` can be used to modify the generation and + is formatter dependent (it is standardized because it can be given on + the command line). + + This method is called by the ``-S`` :doc:`command-line option `, + the `arg` is then given by the ``-a`` option. + """ + return '' + + def format(self, tokensource, outfile): + """ + This method must format the tokens from the `tokensource` iterable and + write the formatted version to the file object `outfile`. + + Formatter options can control how exactly the tokens are converted. + """ + if self.encoding: + # wrap the outfile in a StreamWriter + outfile = codecs.lookup(self.encoding)[3](outfile) + return self.format_unencoded(tokensource, outfile) + + # Allow writing Formatter[str] or Formatter[bytes]. That's equivalent to + # Formatter. This helps when using third-party type stubs from typeshed. + def __class_getitem__(cls, name): + return cls diff --git a/local_packages/pygments/formatters/__init__.py b/local_packages/pygments/formatters/__init__.py new file mode 100644 index 0000000..829496d --- /dev/null +++ b/local_packages/pygments/formatters/__init__.py @@ -0,0 +1,157 @@ +""" + pygments.formatters + ~~~~~~~~~~~~~~~~~~~ + + Pygments formatters. + + :copyright: Copyright 2006-present by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re +import sys +import types +import fnmatch +from os.path import basename + +from pygments.formatters._mapping import FORMATTERS +from pygments.plugin import find_plugin_formatters +from pygments.util import ClassNotFound + +__all__ = ['get_formatter_by_name', 'get_formatter_for_filename', + 'get_all_formatters', 'load_formatter_from_file'] + list(FORMATTERS) + +_formatter_cache = {} # classes by name +_pattern_cache = {} + + +def _fn_matches(fn, glob): + """Return whether the supplied file name fn matches pattern filename.""" + if glob not in _pattern_cache: + pattern = _pattern_cache[glob] = re.compile(fnmatch.translate(glob)) + return pattern.match(fn) + return _pattern_cache[glob].match(fn) + + +def _load_formatters(module_name): + """Load a formatter (and all others in the module too).""" + mod = __import__(module_name, None, None, ['__all__']) + for formatter_name in mod.__all__: + cls = getattr(mod, formatter_name) + _formatter_cache[cls.name] = cls + + +def get_all_formatters(): + """Return a generator for all formatter classes.""" + # NB: this returns formatter classes, not info like get_all_lexers(). + for info in FORMATTERS.values(): + if info[1] not in _formatter_cache: + _load_formatters(info[0]) + yield _formatter_cache[info[1]] + for _, formatter in find_plugin_formatters(): + yield formatter + + +def find_formatter_class(alias): + """Lookup a formatter by alias. + + Returns None if not found. + """ + for module_name, name, aliases, _, _ in FORMATTERS.values(): + if alias in aliases: + if name not in _formatter_cache: + _load_formatters(module_name) + return _formatter_cache[name] + for _, cls in find_plugin_formatters(): + if alias in cls.aliases: + return cls + + +def get_formatter_by_name(_alias, **options): + """ + Return an instance of a :class:`.Formatter` subclass that has `alias` in its + aliases list. The formatter is given the `options` at its instantiation. + + Will raise :exc:`pygments.util.ClassNotFound` if no formatter with that + alias is found. + """ + cls = find_formatter_class(_alias) + if cls is None: + raise ClassNotFound(f"no formatter found for name {_alias!r}") + return cls(**options) + + +def load_formatter_from_file(filename, formattername="CustomFormatter", **options): + """ + Return a `Formatter` subclass instance loaded from the provided file, relative + to the current directory. + + The file is expected to contain a Formatter class named ``formattername`` + (by default, CustomFormatter). Users should be very careful with the input, because + this method is equivalent to running ``eval()`` on the input file. The formatter is + given the `options` at its instantiation. + + :exc:`pygments.util.ClassNotFound` is raised if there are any errors loading + the formatter. + + .. versionadded:: 2.2 + """ + try: + # This empty dict will contain the namespace for the exec'd file + custom_namespace = {} + with open(filename, 'rb') as f: + exec(f.read(), custom_namespace) + # Retrieve the class `formattername` from that namespace + if formattername not in custom_namespace: + raise ClassNotFound(f'no valid {formattername} class found in {filename}') + formatter_class = custom_namespace[formattername] + # And finally instantiate it with the options + return formatter_class(**options) + except OSError as err: + raise ClassNotFound(f'cannot read {filename}: {err}') + except ClassNotFound: + raise + except Exception as err: + raise ClassNotFound(f'error when loading custom formatter: {err}') + + +def get_formatter_for_filename(fn, **options): + """ + Return a :class:`.Formatter` subclass instance that has a filename pattern + matching `fn`. The formatter is given the `options` at its instantiation. + + Will raise :exc:`pygments.util.ClassNotFound` if no formatter for that filename + is found. + """ + fn = basename(fn) + for modname, name, _, filenames, _ in FORMATTERS.values(): + for filename in filenames: + if _fn_matches(fn, filename): + if name not in _formatter_cache: + _load_formatters(modname) + return _formatter_cache[name](**options) + for _name, cls in find_plugin_formatters(): + for filename in cls.filenames: + if _fn_matches(fn, filename): + return cls(**options) + raise ClassNotFound(f"no formatter found for file name {fn!r}") + + +class _automodule(types.ModuleType): + """Automatically import formatters.""" + + def __getattr__(self, name): + info = FORMATTERS.get(name) + if info: + _load_formatters(info[0]) + cls = _formatter_cache[info[1]] + setattr(self, name, cls) + return cls + raise AttributeError(name) + + +oldmod = sys.modules[__name__] +newmod = _automodule(__name__) +newmod.__dict__.update(oldmod.__dict__) +sys.modules[__name__] = newmod +del newmod.newmod, newmod.oldmod, newmod.sys, newmod.types diff --git a/local_packages/pygments/formatters/_mapping.py b/local_packages/pygments/formatters/_mapping.py new file mode 100644 index 0000000..72ca840 --- /dev/null +++ b/local_packages/pygments/formatters/_mapping.py @@ -0,0 +1,23 @@ +# Automatically generated by scripts/gen_mapfiles.py. +# DO NOT EDIT BY HAND; run `tox -e mapfiles` instead. + +FORMATTERS = { + 'BBCodeFormatter': ('pygments.formatters.bbcode', 'BBCode', ('bbcode', 'bb'), (), 'Format tokens with BBcodes. These formatting codes are used by many bulletin boards, so you can highlight your sourcecode with pygments before posting it there.'), + 'BmpImageFormatter': ('pygments.formatters.img', 'img_bmp', ('bmp', 'bitmap'), ('*.bmp',), 'Create a bitmap image from source code. This uses the Python Imaging Library to generate a pixmap from the source code.'), + 'GifImageFormatter': ('pygments.formatters.img', 'img_gif', ('gif',), ('*.gif',), 'Create a GIF image from source code. This uses the Python Imaging Library to generate a pixmap from the source code.'), + 'GroffFormatter': ('pygments.formatters.groff', 'groff', ('groff', 'troff', 'roff'), (), 'Format tokens with groff escapes to change their color and font style.'), + 'HtmlFormatter': ('pygments.formatters.html', 'HTML', ('html',), ('*.html', '*.htm'), "Format tokens as HTML 4 ```` tags. By default, the content is enclosed in a ``
`` tag, itself wrapped in a ``
`` tag (but see the `nowrap` option). The ``
``'s CSS class can be set by the `cssclass` option."), + 'IRCFormatter': ('pygments.formatters.irc', 'IRC', ('irc', 'IRC'), (), 'Format tokens with IRC color sequences'), + 'ImageFormatter': ('pygments.formatters.img', 'img', ('img', 'IMG', 'png'), ('*.png',), 'Create a PNG image from source code. This uses the Python Imaging Library to generate a pixmap from the source code.'), + 'JpgImageFormatter': ('pygments.formatters.img', 'img_jpg', ('jpg', 'jpeg'), ('*.jpg',), 'Create a JPEG image from source code. This uses the Python Imaging Library to generate a pixmap from the source code.'), + 'LatexFormatter': ('pygments.formatters.latex', 'LaTeX', ('latex', 'tex'), ('*.tex',), 'Format tokens as LaTeX code. This needs the `fancyvrb` and `color` standard packages.'), + 'NullFormatter': ('pygments.formatters.other', 'Text only', ('text', 'null'), ('*.txt',), 'Output the text unchanged without any formatting.'), + 'PangoMarkupFormatter': ('pygments.formatters.pangomarkup', 'Pango Markup', ('pango', 'pangomarkup'), (), 'Format tokens as Pango Markup code. It can then be rendered to an SVG.'), + 'RawTokenFormatter': ('pygments.formatters.other', 'Raw tokens', ('raw', 'tokens'), ('*.raw',), 'Format tokens as a raw representation for storing token streams.'), + 'RtfFormatter': ('pygments.formatters.rtf', 'RTF', ('rtf',), ('*.rtf',), 'Format tokens as RTF markup. This formatter automatically outputs full RTF documents with color information and other useful stuff. Perfect for Copy and Paste into Microsoft(R) Word(R) documents.'), + 'SvgFormatter': ('pygments.formatters.svg', 'SVG', ('svg',), ('*.svg',), 'Format tokens as an SVG graphics file. This formatter is still experimental. Each line of code is a ```` element with explicit ``x`` and ``y`` coordinates containing ```` elements with the individual token styles.'), + 'Terminal256Formatter': ('pygments.formatters.terminal256', 'Terminal256', ('terminal256', 'console256', '256'), (), 'Format tokens with ANSI color sequences, for output in a 256-color terminal or console. Like in `TerminalFormatter` color sequences are terminated at newlines, so that paging the output works correctly.'), + 'TerminalFormatter': ('pygments.formatters.terminal', 'Terminal', ('terminal', 'console'), (), 'Format tokens with ANSI color sequences, for output in a text console. Color sequences are terminated at newlines, so that paging the output works correctly.'), + 'TerminalTrueColorFormatter': ('pygments.formatters.terminal256', 'TerminalTrueColor', ('terminal16m', 'console16m', '16m'), (), 'Format tokens with ANSI color sequences, for output in a true-color terminal or console. Like in `TerminalFormatter` color sequences are terminated at newlines, so that paging the output works correctly.'), + 'TestcaseFormatter': ('pygments.formatters.other', 'Testcase', ('testcase',), (), 'Format tokens as appropriate for a new testcase.'), +} diff --git a/local_packages/pygments/formatters/bbcode.py b/local_packages/pygments/formatters/bbcode.py new file mode 100644 index 0000000..90d8d80 --- /dev/null +++ b/local_packages/pygments/formatters/bbcode.py @@ -0,0 +1,108 @@ +""" + pygments.formatters.bbcode + ~~~~~~~~~~~~~~~~~~~~~~~~~~ + + BBcode formatter. + + :copyright: Copyright 2006-present by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + + +from pygments.formatter import Formatter +from pygments.util import get_bool_opt + +__all__ = ['BBCodeFormatter'] + + +class BBCodeFormatter(Formatter): + """ + Format tokens with BBcodes. These formatting codes are used by many + bulletin boards, so you can highlight your sourcecode with pygments before + posting it there. + + This formatter has no support for background colors and borders, as there + are no common BBcode tags for that. + + Some board systems (e.g. phpBB) don't support colors in their [code] tag, + so you can't use the highlighting together with that tag. + Text in a [code] tag usually is shown with a monospace font (which this + formatter can do with the ``monofont`` option) and no spaces (which you + need for indentation) are removed. + + Additional options accepted: + + `style` + The style to use, can be a string or a Style subclass (default: + ``'default'``). + + `codetag` + If set to true, put the output into ``[code]`` tags (default: + ``false``) + + `monofont` + If set to true, add a tag to show the code with a monospace font + (default: ``false``). + """ + name = 'BBCode' + aliases = ['bbcode', 'bb'] + filenames = [] + + def __init__(self, **options): + Formatter.__init__(self, **options) + self._code = get_bool_opt(options, 'codetag', False) + self._mono = get_bool_opt(options, 'monofont', False) + + self.styles = {} + self._make_styles() + + def _make_styles(self): + for ttype, ndef in self.style: + start = end = '' + if ndef['color']: + start += '[color=#{}]'.format(ndef['color']) + end = '[/color]' + end + if ndef['bold']: + start += '[b]' + end = '[/b]' + end + if ndef['italic']: + start += '[i]' + end = '[/i]' + end + if ndef['underline']: + start += '[u]' + end = '[/u]' + end + # there are no common BBcodes for background-color and border + + self.styles[ttype] = start, end + + def format_unencoded(self, tokensource, outfile): + if self._code: + outfile.write('[code]') + if self._mono: + outfile.write('[font=monospace]') + + lastval = '' + lasttype = None + + for ttype, value in tokensource: + while ttype not in self.styles: + ttype = ttype.parent + if ttype == lasttype: + lastval += value + else: + if lastval: + start, end = self.styles[lasttype] + outfile.write(''.join((start, lastval, end))) + lastval = value + lasttype = ttype + + if lastval: + start, end = self.styles[lasttype] + outfile.write(''.join((start, lastval, end))) + + if self._mono: + outfile.write('[/font]') + if self._code: + outfile.write('[/code]') + if self._code or self._mono: + outfile.write('\n') diff --git a/local_packages/pygments/formatters/groff.py b/local_packages/pygments/formatters/groff.py new file mode 100644 index 0000000..f5d3146 --- /dev/null +++ b/local_packages/pygments/formatters/groff.py @@ -0,0 +1,170 @@ +""" + pygments.formatters.groff + ~~~~~~~~~~~~~~~~~~~~~~~~~ + + Formatter for groff output. + + :copyright: Copyright 2006-present by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import math +from pygments.formatter import Formatter +from pygments.util import get_bool_opt, get_int_opt + +__all__ = ['GroffFormatter'] + + +class GroffFormatter(Formatter): + """ + Format tokens with groff escapes to change their color and font style. + + .. versionadded:: 2.11 + + Additional options accepted: + + `style` + The style to use, can be a string or a Style subclass (default: + ``'default'``). + + `monospaced` + If set to true, monospace font will be used (default: ``true``). + + `linenos` + If set to true, print the line numbers (default: ``false``). + + `wrap` + Wrap lines to the specified number of characters. Disabled if set to 0 + (default: ``0``). + """ + + name = 'groff' + aliases = ['groff','troff','roff'] + filenames = [] + + def __init__(self, **options): + Formatter.__init__(self, **options) + + self.monospaced = get_bool_opt(options, 'monospaced', True) + self.linenos = get_bool_opt(options, 'linenos', False) + self._lineno = 0 + self.wrap = get_int_opt(options, 'wrap', 0) + self._linelen = 0 + + self.styles = {} + self._make_styles() + + + def _make_styles(self): + regular = '\\f[CR]' if self.monospaced else '\\f[R]' + bold = '\\f[CB]' if self.monospaced else '\\f[B]' + italic = '\\f[CI]' if self.monospaced else '\\f[I]' + + for ttype, ndef in self.style: + start = end = '' + if ndef['color']: + start += '\\m[{}]'.format(ndef['color']) + end = '\\m[]' + end + if ndef['bold']: + start += bold + end = regular + end + if ndef['italic']: + start += italic + end = regular + end + if ndef['bgcolor']: + start += '\\M[{}]'.format(ndef['bgcolor']) + end = '\\M[]' + end + + self.styles[ttype] = start, end + + + def _define_colors(self, outfile): + colors = set() + for _, ndef in self.style: + if ndef['color'] is not None: + colors.add(ndef['color']) + + for color in sorted(colors): + outfile.write('.defcolor ' + color + ' rgb #' + color + '\n') + + + def _write_lineno(self, outfile): + self._lineno += 1 + outfile.write("%s% 4d " % (self._lineno != 1 and '\n' or '', self._lineno)) + + + def _wrap_line(self, line): + length = len(line.rstrip('\n')) + space = ' ' if self.linenos else '' + newline = '' + + if length > self.wrap: + for i in range(0, math.floor(length / self.wrap)): + chunk = line[i*self.wrap:i*self.wrap+self.wrap] + newline += (chunk + '\n' + space) + remainder = length % self.wrap + if remainder > 0: + newline += line[-remainder-1:] + self._linelen = remainder + elif self._linelen + length > self.wrap: + newline = ('\n' + space) + line + self._linelen = length + else: + newline = line + self._linelen += length + + return newline + + + def _escape_chars(self, text): + text = text.replace('\\', '\\[u005C]'). \ + replace('.', '\\[char46]'). \ + replace('\'', '\\[u0027]'). \ + replace('`', '\\[u0060]'). \ + replace('~', '\\[u007E]') + copy = text + + for char in copy: + if len(char) != len(char.encode()): + uni = char.encode('unicode_escape') \ + .decode()[1:] \ + .replace('x', 'u00') \ + .upper() + text = text.replace(char, '\\[u' + uni[1:] + ']') + + return text + + + def format_unencoded(self, tokensource, outfile): + self._define_colors(outfile) + + outfile.write('.nf\n\\f[CR]\n') + + if self.linenos: + self._write_lineno(outfile) + + for ttype, value in tokensource: + while ttype not in self.styles: + ttype = ttype.parent + start, end = self.styles[ttype] + + for line in value.splitlines(True): + if self.wrap > 0: + line = self._wrap_line(line) + + if start and end: + text = self._escape_chars(line.rstrip('\n')) + if text != '': + outfile.write(''.join((start, text, end))) + else: + outfile.write(self._escape_chars(line.rstrip('\n'))) + + if line.endswith('\n'): + if self.linenos: + self._write_lineno(outfile) + self._linelen = 0 + else: + outfile.write('\n') + self._linelen = 0 + + outfile.write('\n.fi') diff --git a/local_packages/pygments/formatters/html.py b/local_packages/pygments/formatters/html.py new file mode 100644 index 0000000..17e6741 --- /dev/null +++ b/local_packages/pygments/formatters/html.py @@ -0,0 +1,997 @@ +""" + pygments.formatters.html + ~~~~~~~~~~~~~~~~~~~~~~~~ + + Formatter for HTML output. + + :copyright: Copyright 2006-present by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import functools +import os +import sys +import os.path +from io import StringIO + +from pygments.formatter import Formatter +from pygments.token import Token, Text, STANDARD_TYPES +from pygments.util import get_bool_opt, get_int_opt, get_list_opt + +import html + +try: + import ctags +except ImportError: + ctags = None + +__all__ = ['HtmlFormatter'] + + +_escape_html_table = { + ord('&'): '&', + ord('<'): '<', + ord('>'): '>', + ord('"'): '"', + ord("'"): ''', +} + + +def escape_html(text, table=_escape_html_table): + """Escape &, <, > as well as single and double quotes for HTML.""" + return text.translate(table) + + +def webify(color): + if color.startswith('calc') or color.startswith('var'): + return color + else: + # Check if the color can be shortened from 6 to 3 characters + color = color.upper() + if (len(color) == 6 and + ( color[0] == color[1] + and color[2] == color[3] + and color[4] == color[5])): + return f'#{color[0]}{color[2]}{color[4]}' + else: + return f'#{color}' + + +def _get_ttype_class(ttype): + fname = STANDARD_TYPES.get(ttype) + if fname: + return fname + aname = '' + while fname is None: + aname = '-' + ttype[-1] + aname + ttype = ttype.parent + fname = STANDARD_TYPES.get(ttype) + return fname + aname + + +CSSFILE_TEMPLATE = '''\ +/* +generated by Pygments +Copyright 2006-present by the Pygments team. +Licensed under the BSD license, see LICENSE for details. +*/ +%(styledefs)s +''' + +DOC_HEADER = '''\ + + + + + %(title)s + + + + +

%(title)s

+ +''' + +DOC_HEADER_EXTERNALCSS = '''\ + + + + + %(title)s + + + + +

%(title)s

+ +''' + +DOC_FOOTER = '''\ + + +''' + + +class HtmlFormatter(Formatter): + r""" + Format tokens as HTML 4 ```` tags. By default, the content is enclosed + in a ``
`` tag, itself wrapped in a ``
`` tag (but see the `nowrap` option). + The ``
``'s CSS class can be set by the `cssclass` option. + + If the `linenos` option is set to ``"table"``, the ``
`` is
+    additionally wrapped inside a ```` which has one row and two
+    cells: one containing the line numbers and one containing the code.
+    Example:
+
+    .. sourcecode:: html
+
+        
+
+ + +
+
1
+            2
+
+
def foo(bar):
+              pass
+            
+
+ + (whitespace added to improve clarity). + + A list of lines can be specified using the `hl_lines` option to make these + lines highlighted (as of Pygments 0.11). + + With the `full` option, a complete HTML 4 document is output, including + the style definitions inside a ``$)', _handle_cssblock), + + include('keywords'), + include('inline'), + ], + 'keywords': [ + (words(( + '\\define', '\\end', 'caption', 'created', 'modified', 'tags', + 'title', 'type'), prefix=r'^', suffix=r'\b'), + Keyword), + ], + 'inline': [ + # escape + (r'\\.', Text), + # created or modified date + (r'\d{17}', Number.Integer), + # italics + (r'(\s)(//[^/]+//)((?=\W|\n))', + bygroups(Text, Generic.Emph, Text)), + # superscript + (r'(\s)(\^\^[^\^]+\^\^)', bygroups(Text, Generic.Emph)), + # subscript + (r'(\s)(,,[^,]+,,)', bygroups(Text, Generic.Emph)), + # underscore + (r'(\s)(__[^_]+__)', bygroups(Text, Generic.Strong)), + # bold + (r"(\s)(''[^']+'')((?=\W|\n))", + bygroups(Text, Generic.Strong, Text)), + # strikethrough + (r'(\s)(~~[^~]+~~)((?=\W|\n))', + bygroups(Text, Generic.Deleted, Text)), + # TiddlyWiki variables + (r'<<[^>]+>>', Name.Tag), + (r'\$\$[^$]+\$\$', Name.Tag), + (r'\$\([^)]+\)\$', Name.Tag), + # TiddlyWiki style or class + (r'^@@.*$', Name.Tag), + # HTML tags + (r']+>', Name.Tag), + # inline code + (r'`[^`]+`', String.Backtick), + # HTML escaped symbols + (r'&\S*?;', String.Regex), + # Wiki links + (r'(\[{2})([^]\|]+)(\]{2})', bygroups(Text, Name.Tag, Text)), + # External links + (r'(\[{2})([^]\|]+)(\|)([^]\|]+)(\]{2})', + bygroups(Text, Name.Tag, Text, Name.Attribute, Text)), + # Transclusion + (r'(\{{2})([^}]+)(\}{2})', bygroups(Text, Name.Tag, Text)), + # URLs + (r'(\b.?.?tps?://[^\s"]+)', bygroups(Name.Attribute)), + + # general text, must come last! + (r'[\w]+', Text), + (r'.', Text) + ], + } + + def __init__(self, **options): + self.handlecodeblocks = get_bool_opt(options, 'handlecodeblocks', True) + RegexLexer.__init__(self, **options) + + +class WikitextLexer(RegexLexer): + """ + For MediaWiki Wikitext. + + Parsing Wikitext is tricky, and results vary between different MediaWiki + installations, so we only highlight common syntaxes (built-in or from + popular extensions), and also assume templates produce no unbalanced + syntaxes. + """ + name = 'Wikitext' + url = 'https://www.mediawiki.org/wiki/Wikitext' + aliases = ['wikitext', 'mediawiki'] + filenames = [] + mimetypes = ['text/x-wiki'] + version_added = '2.15' + flags = re.MULTILINE + + def nowiki_tag_rules(tag_name): + return [ + (rf'(?i)()', bygroups(Punctuation, + Name.Tag, Whitespace, Punctuation), '#pop'), + include('entity'), + include('text'), + ] + + def plaintext_tag_rules(tag_name): + return [ + (rf'(?si)(.*?)()', bygroups(Text, + Punctuation, Name.Tag, Whitespace, Punctuation), '#pop'), + ] + + def delegate_tag_rules(tag_name, lexer, **lexer_kwargs): + return [ + (rf'(?i)()', bygroups(Punctuation, + Name.Tag, Whitespace, Punctuation), '#pop'), + (rf'(?si).+?(?=)', using(lexer, **lexer_kwargs)), + ] + + def text_rules(token): + return [ + (r'\w+', token), + (r'[^\S\n]+', token), + (r'(?s).', token), + ] + + def handle_syntaxhighlight(self, match, ctx): + from pygments.lexers import get_lexer_by_name + + attr_content = match.group() + start = 0 + index = 0 + while True: + index = attr_content.find('>', start) + # Exclude comment end (-->) + if attr_content[index-2:index] != '--': + break + start = index + 1 + + if index == -1: + # No tag end + yield from self.get_tokens_unprocessed(attr_content, stack=['root', 'attr']) + return + attr = attr_content[:index] + yield from self.get_tokens_unprocessed(attr, stack=['root', 'attr']) + yield match.start(3) + index, Punctuation, '>' + + lexer = None + content = attr_content[index+1:] + lang_match = re.findall(r'\blang=("|\'|)(\w+)(\1)', attr) + + if len(lang_match) >= 1: + # Pick the last match in case of multiple matches + lang = lang_match[-1][1] + try: + lexer = get_lexer_by_name(lang) + except ClassNotFound: + pass + + if lexer is None: + yield match.start() + index + 1, Text, content + else: + yield from lexer.get_tokens_unprocessed(content) + + def handle_score(self, match, ctx): + attr_content = match.group() + start = 0 + index = 0 + while True: + index = attr_content.find('>', start) + # Exclude comment end (-->) + if attr_content[index-2:index] != '--': + break + start = index + 1 + + if index == -1: + # No tag end + yield from self.get_tokens_unprocessed(attr_content, stack=['root', 'attr']) + return + attr = attr_content[:index] + content = attr_content[index+1:] + yield from self.get_tokens_unprocessed(attr, stack=['root', 'attr']) + yield match.start(3) + index, Punctuation, '>' + + lang_match = re.findall(r'\blang=("|\'|)(\w+)(\1)', attr) + # Pick the last match in case of multiple matches + lang = lang_match[-1][1] if len(lang_match) >= 1 else 'lilypond' + + if lang == 'lilypond': # Case sensitive + yield from LilyPondLexer().get_tokens_unprocessed(content) + else: # ABC + # FIXME: Use ABC lexer in the future + yield match.start() + index + 1, Text, content + + # a-z removed to prevent linter from complaining, REMEMBER to use (?i) + title_char = r' %!"$&\'()*,\-./0-9:;=?@A-Z\\\^_`~+\u0080-\uFFFF' + nbsp_char = r'(?:\t| |&\#0*160;|&\#[Xx]0*[Aa]0;|[ \xA0\u1680\u2000-\u200A\u202F\u205F\u3000])' + link_address = r'(?:[0-9.]+|\[[0-9a-f:.]+\]|[^\x00-\x20"<>\[\]\x7F\xA0\u1680\u2000-\u200A\u202F\u205F\u3000\uFFFD])' + link_char_class = r'[^\x00-\x20"<>\[\]\x7F\xA0\u1680\u2000-\u200A\u202F\u205F\u3000\uFFFD]' + double_slashes_i = { + '__FORCETOC__', '__NOCONTENTCONVERT__', '__NOCC__', '__NOEDITSECTION__', '__NOGALLERY__', + '__NOTITLECONVERT__', '__NOTC__', '__NOTOC__', '__TOC__', + } + double_slashes = { + '__EXPECTUNUSEDCATEGORY__', '__HIDDENCAT__', '__INDEX__', '__NEWSECTIONLINK__', + '__NOINDEX__', '__NONEWSECTIONLINK__', '__STATICREDIRECT__', '__NOGLOBAL__', + '__DISAMBIG__', '__EXPECTED_UNCONNECTED_PAGE__', + } + protocols = { + 'bitcoin:', 'ftp://', 'ftps://', 'geo:', 'git://', 'gopher://', 'http://', 'https://', + 'irc://', 'ircs://', 'magnet:', 'mailto:', 'mms://', 'news:', 'nntp://', 'redis://', + 'sftp://', 'sip:', 'sips:', 'sms:', 'ssh://', 'svn://', 'tel:', 'telnet://', 'urn:', + 'worldwind://', 'xmpp:', '//', + } + non_relative_protocols = protocols - {'//'} + html_tags = { + 'abbr', 'b', 'bdi', 'bdo', 'big', 'blockquote', 'br', 'caption', 'center', 'cite', 'code', + 'data', 'dd', 'del', 'dfn', 'div', 'dl', 'dt', 'em', 'font', 'h1', 'h2', 'h3', 'h4', 'h5', + 'h6', 'hr', 'i', 'ins', 'kbd', 'li', 'link', 'mark', 'meta', 'ol', 'p', 'q', 'rb', 'rp', + 'rt', 'rtc', 'ruby', 's', 'samp', 'small', 'span', 'strike', 'strong', 'sub', 'sup', + 'table', 'td', 'th', 'time', 'tr', 'tt', 'u', 'ul', 'var', 'wbr', + } + parser_tags = { + 'graph', 'charinsert', 'rss', 'chem', 'categorytree', 'nowiki', 'inputbox', 'math', + 'hiero', 'score', 'pre', 'ref', 'translate', 'imagemap', 'templatestyles', 'languages', + 'noinclude', 'mapframe', 'section', 'poem', 'syntaxhighlight', 'includeonly', 'tvar', + 'onlyinclude', 'templatedata', 'langconvert', 'timeline', 'dynamicpagelist', 'gallery', + 'maplink', 'ce', 'references', + } + variant_langs = { + # ZhConverter.php + 'zh', 'zh-hans', 'zh-hant', 'zh-cn', 'zh-hk', 'zh-mo', 'zh-my', 'zh-sg', 'zh-tw', + # WuuConverter.php + 'wuu', 'wuu-hans', 'wuu-hant', + # UzConverter.php + 'uz', 'uz-latn', 'uz-cyrl', + # TlyConverter.php + 'tly', 'tly-cyrl', + # TgConverter.php + 'tg', 'tg-latn', + # SrConverter.php + 'sr', 'sr-ec', 'sr-el', + # ShiConverter.php + 'shi', 'shi-tfng', 'shi-latn', + # ShConverter.php + 'sh-latn', 'sh-cyrl', + # KuConverter.php + 'ku', 'ku-arab', 'ku-latn', + # IuConverter.php + 'iu', 'ike-cans', 'ike-latn', + # GanConverter.php + 'gan', 'gan-hans', 'gan-hant', + # EnConverter.php + 'en', 'en-x-piglatin', + # CrhConverter.php + 'crh', 'crh-cyrl', 'crh-latn', + # BanConverter.php + 'ban', 'ban-bali', 'ban-x-dharma', 'ban-x-palmleaf', 'ban-x-pku', + } + magic_vars_i = { + 'ARTICLEPATH', 'INT', 'PAGEID', 'SCRIPTPATH', 'SERVER', 'SERVERNAME', 'STYLEPATH', + } + magic_vars = { + '!', '=', 'BASEPAGENAME', 'BASEPAGENAMEE', 'CASCADINGSOURCES', 'CONTENTLANGUAGE', + 'CONTENTLANG', 'CURRENTDAY', 'CURRENTDAY2', 'CURRENTDAYNAME', 'CURRENTDOW', 'CURRENTHOUR', + 'CURRENTMONTH', 'CURRENTMONTH2', 'CURRENTMONTH1', 'CURRENTMONTHABBREV', 'CURRENTMONTHNAME', + 'CURRENTMONTHNAMEGEN', 'CURRENTTIME', 'CURRENTTIMESTAMP', 'CURRENTVERSION', 'CURRENTWEEK', + 'CURRENTYEAR', 'DIRECTIONMARK', 'DIRMARK', 'FULLPAGENAME', 'FULLPAGENAMEE', 'LOCALDAY', + 'LOCALDAY2', 'LOCALDAYNAME', 'LOCALDOW', 'LOCALHOUR', 'LOCALMONTH', 'LOCALMONTH2', + 'LOCALMONTH1', 'LOCALMONTHABBREV', 'LOCALMONTHNAME', 'LOCALMONTHNAMEGEN', 'LOCALTIME', + 'LOCALTIMESTAMP', 'LOCALWEEK', 'LOCALYEAR', 'NAMESPACE', 'NAMESPACEE', 'NAMESPACENUMBER', + 'NUMBEROFACTIVEUSERS', 'NUMBEROFADMINS', 'NUMBEROFARTICLES', 'NUMBEROFEDITS', + 'NUMBEROFFILES', 'NUMBEROFPAGES', 'NUMBEROFUSERS', 'PAGELANGUAGE', 'PAGENAME', 'PAGENAMEE', + 'REVISIONDAY', 'REVISIONDAY2', 'REVISIONID', 'REVISIONMONTH', 'REVISIONMONTH1', + 'REVISIONSIZE', 'REVISIONTIMESTAMP', 'REVISIONUSER', 'REVISIONYEAR', 'ROOTPAGENAME', + 'ROOTPAGENAMEE', 'SITENAME', 'SUBJECTPAGENAME', 'ARTICLEPAGENAME', 'SUBJECTPAGENAMEE', + 'ARTICLEPAGENAMEE', 'SUBJECTSPACE', 'ARTICLESPACE', 'SUBJECTSPACEE', 'ARTICLESPACEE', + 'SUBPAGENAME', 'SUBPAGENAMEE', 'TALKPAGENAME', 'TALKPAGENAMEE', 'TALKSPACE', 'TALKSPACEE', + } + parser_functions_i = { + 'ANCHORENCODE', 'BIDI', 'CANONICALURL', 'CANONICALURLE', 'FILEPATH', 'FORMATNUM', + 'FULLURL', 'FULLURLE', 'GENDER', 'GRAMMAR', 'INT', r'\#LANGUAGE', 'LC', 'LCFIRST', 'LOCALURL', + 'LOCALURLE', 'NS', 'NSE', 'PADLEFT', 'PADRIGHT', 'PAGEID', 'PLURAL', 'UC', 'UCFIRST', + 'URLENCODE', + } + parser_functions = { + 'BASEPAGENAME', 'BASEPAGENAMEE', 'CASCADINGSOURCES', 'DEFAULTSORT', 'DEFAULTSORTKEY', + 'DEFAULTCATEGORYSORT', 'FULLPAGENAME', 'FULLPAGENAMEE', 'NAMESPACE', 'NAMESPACEE', + 'NAMESPACENUMBER', 'NUMBERINGROUP', 'NUMINGROUP', 'NUMBEROFACTIVEUSERS', 'NUMBEROFADMINS', + 'NUMBEROFARTICLES', 'NUMBEROFEDITS', 'NUMBEROFFILES', 'NUMBEROFPAGES', 'NUMBEROFUSERS', + 'PAGENAME', 'PAGENAMEE', 'PAGESINCATEGORY', 'PAGESINCAT', 'PAGESIZE', 'PROTECTIONEXPIRY', + 'PROTECTIONLEVEL', 'REVISIONDAY', 'REVISIONDAY2', 'REVISIONID', 'REVISIONMONTH', + 'REVISIONMONTH1', 'REVISIONTIMESTAMP', 'REVISIONUSER', 'REVISIONYEAR', 'ROOTPAGENAME', + 'ROOTPAGENAMEE', 'SUBJECTPAGENAME', 'ARTICLEPAGENAME', 'SUBJECTPAGENAMEE', + 'ARTICLEPAGENAMEE', 'SUBJECTSPACE', 'ARTICLESPACE', 'SUBJECTSPACEE', 'ARTICLESPACEE', + 'SUBPAGENAME', 'SUBPAGENAMEE', 'TALKPAGENAME', 'TALKPAGENAMEE', 'TALKSPACE', 'TALKSPACEE', + 'INT', 'DISPLAYTITLE', 'PAGESINNAMESPACE', 'PAGESINNS', + } + + tokens = { + 'root': [ + # Redirects + (r"""(?xi) + (\A\s*?)(\#REDIRECT:?) # may contain a colon + (\s+)(\[\[) (?=[^\]\n]* \]\]$) + """, + bygroups(Whitespace, Keyword, Whitespace, Punctuation), 'redirect-inner'), + # Subheadings + (r'^(={2,6})(.+?)(\1)(\s*$\n)', + bygroups(Generic.Subheading, Generic.Subheading, Generic.Subheading, Whitespace)), + # Headings + (r'^(=.+?=)(\s*$\n)', + bygroups(Generic.Heading, Whitespace)), + # Double-slashed magic words + (words(double_slashes_i, prefix=r'(?i)'), Name.Function.Magic), + (words(double_slashes), Name.Function.Magic), + # Raw URLs + (r'(?i)\b(?:{}){}{}*'.format('|'.join(protocols), + link_address, link_char_class), Name.Label), + # Magic links + (rf'\b(?:RFC|PMID){nbsp_char}+[0-9]+\b', + Name.Function.Magic), + (r"""(?x) + \bISBN {nbsp_char} + (?: 97[89] {nbsp_dash}? )? + (?: [0-9] {nbsp_dash}? ){{9}} # escape format() + [0-9Xx]\b + """.format(nbsp_char=nbsp_char, nbsp_dash=f'(?:-|{nbsp_char})'), Name.Function.Magic), + include('list'), + include('inline'), + include('text'), + ], + 'redirect-inner': [ + (r'(\]\])(\s*?\n)', bygroups(Punctuation, Whitespace), '#pop'), + (r'(\#)([^#]*?)', bygroups(Punctuation, Name.Label)), + (rf'(?i)[{title_char}]+', Name.Tag), + ], + 'list': [ + # Description lists + (r'^;', Keyword, 'dt'), + # Ordered lists, unordered lists and indents + (r'^[#:*]+', Keyword), + # Horizontal rules + (r'^-{4,}', Keyword), + ], + 'inline': [ + # Signatures + (r'~{3,5}', Keyword), + # Entities + include('entity'), + # Bold & italic + (r"('')(''')(?!')", bygroups(Generic.Emph, + Generic.EmphStrong), 'inline-italic-bold'), + (r"'''(?!')", Generic.Strong, 'inline-bold'), + (r"''(?!')", Generic.Emph, 'inline-italic'), + # Comments & parameters & templates + include('replaceable'), + # Media links + ( + r"""(?xi) + (\[\[) + (File|Image) (:) + ((?: [{}] | \{{{{2,3}}[^{{}}]*?\}}{{2,3}} | )*) + (?: (\#) ([{}]*?) )? + """.format(title_char, f'{title_char}#'), + bygroups(Punctuation, Name.Namespace, Punctuation, + using(this, state=['wikilink-name']), Punctuation, Name.Label), + 'medialink-inner' + ), + # Wikilinks + ( + r"""(?xi) + (\[\[)(?!{}) # Should not contain URLs + (?: ([{}]*) (:))? + ((?: [{}] | \{{{{2,3}}[^{{}}]*?\}}{{2,3}} | )*?) + (?: (\#) ([{}]*?) )? + (\]\]) + """.format('|'.join(protocols), title_char.replace('/', ''), + title_char, f'{title_char}#'), + bygroups(Punctuation, Name.Namespace, Punctuation, + using(this, state=['wikilink-name']), Punctuation, Name.Label, Punctuation) + ), + ( + r"""(?xi) + (\[\[)(?!{}) + (?: ([{}]*) (:))? + ((?: [{}] | \{{{{2,3}}[^{{}}]*?\}}{{2,3}} | )*?) + (?: (\#) ([{}]*?) )? + (\|) + """.format('|'.join(protocols), title_char.replace('/', ''), + title_char, f'{title_char}#'), + bygroups(Punctuation, Name.Namespace, Punctuation, + using(this, state=['wikilink-name']), Punctuation, Name.Label, Punctuation), + 'wikilink-inner' + ), + # External links + ( + r"""(?xi) + (\[) + ((?:{}) {} {}*) + (\s*) + """.format('|'.join(protocols), link_address, link_char_class), + bygroups(Punctuation, Name.Label, Whitespace), + 'extlink-inner' + ), + # Tables + (r'^(:*)(\s*?)(\{\|)([^\n]*)$', bygroups(Keyword, + Whitespace, Punctuation, using(this, state=['root', 'attr'])), 'table'), + # HTML tags + (r'(?i)(<)({})\b'.format('|'.join(html_tags)), + bygroups(Punctuation, Name.Tag), 'tag-inner-ordinary'), + (r'(?i)()'.format('|'.join(html_tags)), + bygroups(Punctuation, Name.Tag, Whitespace, Punctuation)), + # + (r'(?i)(<)(nowiki)\b', bygroups(Punctuation, + Name.Tag), ('tag-nowiki', 'tag-inner')), + #
+            (r'(?i)(<)(pre)\b', bygroups(Punctuation,
+             Name.Tag), ('tag-pre', 'tag-inner')),
+            # 
+            (r'(?i)(<)(categorytree)\b', bygroups(
+                Punctuation, Name.Tag), ('tag-categorytree', 'tag-inner')),
+            # 
+            (r'(?i)(<)(hiero)\b', bygroups(Punctuation,
+             Name.Tag), ('tag-hiero', 'tag-inner')),
+            # 
+            (r'(?i)(<)(math)\b', bygroups(Punctuation,
+             Name.Tag), ('tag-math', 'tag-inner')),
+            # 
+            (r'(?i)(<)(chem)\b', bygroups(Punctuation,
+             Name.Tag), ('tag-chem', 'tag-inner')),
+            # 
+            (r'(?i)(<)(ce)\b', bygroups(Punctuation,
+             Name.Tag), ('tag-ce', 'tag-inner')),
+            # 
+            (r'(?i)(<)(charinsert)\b', bygroups(
+                Punctuation, Name.Tag), ('tag-charinsert', 'tag-inner')),
+            # 
+            (r'(?i)(<)(templatedata)\b', bygroups(
+                Punctuation, Name.Tag), ('tag-templatedata', 'tag-inner')),
+            # 
+            (r'(?i)(<)(gallery)\b', bygroups(
+                Punctuation, Name.Tag), ('tag-gallery', 'tag-inner')),
+            # 
+            (r'(?i)(<)(gallery)\b', bygroups(
+                Punctuation, Name.Tag), ('tag-graph', 'tag-inner')),
+            # 
+            (r'(?i)(<)(dynamicpagelist)\b', bygroups(
+                Punctuation, Name.Tag), ('tag-dynamicpagelist', 'tag-inner')),
+            # 
+            (r'(?i)(<)(inputbox)\b', bygroups(
+                Punctuation, Name.Tag), ('tag-inputbox', 'tag-inner')),
+            # 
+            (r'(?i)(<)(rss)\b', bygroups(
+                Punctuation, Name.Tag), ('tag-rss', 'tag-inner')),
+            # 
+            (r'(?i)(<)(imagemap)\b', bygroups(
+                Punctuation, Name.Tag), ('tag-imagemap', 'tag-inner')),
+            # 
+            (r'(?i)()',
+             bygroups(Punctuation, Name.Tag, Whitespace, Punctuation)),
+            (r'(?si)(<)(syntaxhighlight)\b([^>]*?(?.*?)(?=)',
+             bygroups(Punctuation, Name.Tag, handle_syntaxhighlight)),
+            # : Fallback case for self-closing tags
+            (r'(?i)(<)(syntaxhighlight)\b(\s*?)((?:[^>]|-->)*?)(/\s*?(?)*?)(/\s*?(?)*?)(/\s*?(?|\Z)', Comment.Multiline),
+            # Parameters
+            (
+                r"""(?x)
+                (\{{3})
+                    ([^|]*?)
+                    (?=\}{3}|\|)
+                """,
+                bygroups(Punctuation, Name.Variable),
+                'parameter-inner',
+            ),
+            # Magic variables
+            (r'(?i)(\{{\{{)(\s*)({})(\s*)(\}}\}})'.format('|'.join(magic_vars_i)),
+             bygroups(Punctuation, Whitespace, Name.Function, Whitespace, Punctuation)),
+            (r'(\{{\{{)(\s*)({})(\s*)(\}}\}})'.format('|'.join(magic_vars)),
+                bygroups(Punctuation, Whitespace, Name.Function, Whitespace, Punctuation)),
+            # Parser functions & templates
+            (r'\{\{', Punctuation, 'template-begin-space'),
+            #  legacy syntax
+            (r'(?i)(<)(tvar)\b(\|)([^>]*?)(>)', bygroups(Punctuation,
+             Name.Tag, Punctuation, String, Punctuation)),
+            (r'', Punctuation, '#pop'),
+            # 
+            (r'(?i)(<)(tvar)\b', bygroups(Punctuation, Name.Tag), 'tag-inner-ordinary'),
+            (r'(?i)()',
+             bygroups(Punctuation, Name.Tag, Whitespace, Punctuation)),
+        ],
+        'parameter-inner': [
+            (r'\}{3}', Punctuation, '#pop'),
+            (r'\|', Punctuation),
+            include('inline'),
+            include('text'),
+        ],
+        'template-begin-space': [
+            # Templates allow line breaks at the beginning, and due to how MediaWiki handles
+            # comments, an extra state is required to handle things like {{\n\n name}}
+            (r'|\Z)', Comment.Multiline),
+            (r'\s+', Whitespace),
+            # Parser functions
+            (
+                r'(?i)(\#[{}]*?|{})(:)'.format(title_char,
+                                           '|'.join(parser_functions_i)),
+                bygroups(Name.Function, Punctuation), ('#pop', 'template-inner')
+            ),
+            (
+                r'({})(:)'.format('|'.join(parser_functions)),
+                bygroups(Name.Function, Punctuation), ('#pop', 'template-inner')
+            ),
+            # Templates
+            (
+                rf'(?i)([{title_char}]*?)(:)',
+                bygroups(Name.Namespace, Punctuation), ('#pop', 'template-name')
+            ),
+            default(('#pop', 'template-name'),),
+        ],
+        'template-name': [
+            (r'(\s*?)(\|)', bygroups(Text, Punctuation), ('#pop', 'template-inner')),
+            (r'\}\}', Punctuation, '#pop'),
+            (r'\n', Text, '#pop'),
+            include('replaceable'),
+            *text_rules(Name.Tag),
+        ],
+        'template-inner': [
+            (r'\}\}', Punctuation, '#pop'),
+            (r'\|', Punctuation),
+            (
+                r"""(?x)
+                    (?<=\|)
+                    ( (?: (?! \{\{ | \}\} )[^=\|<])*? ) # Exclude templates and tags
+                    (=)
+                """,
+                bygroups(Name.Label, Operator)
+            ),
+            include('inline'),
+            include('text'),
+        ],
+        'table': [
+            # Use [ \t\n\r\0\x0B] instead of \s to follow PHP trim() behavior
+            # Endings
+            (r'^([ \t\n\r\0\x0B]*?)(\|\})',
+             bygroups(Whitespace, Punctuation), '#pop'),
+            # Table rows
+            (r'^([ \t\n\r\0\x0B]*?)(\|-+)(.*)$', bygroups(Whitespace, Punctuation,
+             using(this, state=['root', 'attr']))),
+            # Captions
+            (
+                r"""(?x)
+                ^([ \t\n\r\0\x0B]*?)(\|\+)
+                # Exclude links, template and tags
+                (?: ( (?: (?! \[\[ | \{\{ )[^|\n<] )*? )(\|) )?
+                (.*?)$
+                """,
+                bygroups(Whitespace, Punctuation, using(this, state=[
+                         'root', 'attr']), Punctuation, Generic.Heading),
+            ),
+            # Table data
+            (
+                r"""(?x)
+                ( ^(?:[ \t\n\r\0\x0B]*?)\| | \|\| )
+                (?: ( (?: (?! \[\[ | \{\{ )[^|\n<] )*? )(\|)(?!\|) )?
+                """,
+                bygroups(Punctuation, using(this, state=[
+                         'root', 'attr']), Punctuation),
+            ),
+            # Table headers
+            (
+                r"""(?x)
+                ( ^(?:[ \t\n\r\0\x0B]*?)!  )
+                (?: ( (?: (?! \[\[ | \{\{ )[^|\n<] )*? )(\|)(?!\|) )?
+                """,
+                bygroups(Punctuation, using(this, state=[
+                         'root', 'attr']), Punctuation),
+                'table-header',
+            ),
+            include('list'),
+            include('inline'),
+            include('text'),
+        ],
+        'table-header': [
+            # Requires another state for || handling inside headers
+            (r'\n', Text, '#pop'),
+            (
+                r"""(?x)
+                (!!|\|\|)
+                (?:
+                    ( (?: (?! \[\[ | \{\{ )[^|\n<] )*? )
+                    (\|)(?!\|)
+                )?
+                """,
+                bygroups(Punctuation, using(this, state=[
+                         'root', 'attr']), Punctuation)
+            ),
+            *text_rules(Generic.Subheading),
+        ],
+        'entity': [
+            (r'&\S*?;', Name.Entity),
+        ],
+        'dt': [
+            (r'\n', Text, '#pop'),
+            include('inline'),
+            (r':', Keyword, '#pop'),
+            include('text'),
+        ],
+        'extlink-inner': [
+            (r'\]', Punctuation, '#pop'),
+            include('inline'),
+            include('text'),
+        ],
+        'nowiki-ish': [
+            include('entity'),
+            include('text'),
+        ],
+        'attr': [
+            include('replaceable'),
+            (r'\s+', Whitespace),
+            (r'(=)(\s*)(")', bygroups(Operator, Whitespace, String.Double), 'attr-val-2'),
+            (r"(=)(\s*)(')", bygroups(Operator, Whitespace, String.Single), 'attr-val-1'),
+            (r'(=)(\s*)', bygroups(Operator, Whitespace), 'attr-val-0'),
+            (r'[\w:-]+', Name.Attribute),
+
+        ],
+        'attr-val-0': [
+            (r'\s', Whitespace, '#pop'),
+            include('replaceable'),
+            *text_rules(String),
+        ],
+        'attr-val-1': [
+            (r"'", String.Single, '#pop'),
+            include('replaceable'),
+            *text_rules(String.Single),
+        ],
+        'attr-val-2': [
+            (r'"', String.Double, '#pop'),
+            include('replaceable'),
+            *text_rules(String.Double),
+        ],
+        'tag-inner-ordinary': [
+            (r'/?\s*>', Punctuation, '#pop'),
+            include('tag-attr'),
+        ],
+        'tag-inner': [
+            # Return to root state for self-closing tags
+            (r'/\s*>', Punctuation, '#pop:2'),
+            (r'\s*>', Punctuation, '#pop'),
+            include('tag-attr'),
+        ],
+        # There states below are just like their non-tag variants, the key difference is
+        # they forcibly quit when encountering tag closing markup
+        'tag-attr': [
+            include('replaceable'),
+            (r'\s+', Whitespace),
+            (r'(=)(\s*)(")', bygroups(Operator,
+             Whitespace, String.Double), 'tag-attr-val-2'),
+            (r"(=)(\s*)(')", bygroups(Operator,
+             Whitespace, String.Single), 'tag-attr-val-1'),
+            (r'(=)(\s*)', bygroups(Operator, Whitespace), 'tag-attr-val-0'),
+            (r'[\w:-]+', Name.Attribute),
+
+        ],
+        'tag-attr-val-0': [
+            (r'\s', Whitespace, '#pop'),
+            (r'/?>', Punctuation, '#pop:2'),
+            include('replaceable'),
+            *text_rules(String),
+        ],
+        'tag-attr-val-1': [
+            (r"'", String.Single, '#pop'),
+            (r'/?>', Punctuation, '#pop:2'),
+            include('replaceable'),
+            *text_rules(String.Single),
+        ],
+        'tag-attr-val-2': [
+            (r'"', String.Double, '#pop'),
+            (r'/?>', Punctuation, '#pop:2'),
+            include('replaceable'),
+            *text_rules(String.Double),
+        ],
+        'tag-nowiki': nowiki_tag_rules('nowiki'),
+        'tag-pre': nowiki_tag_rules('pre'),
+        'tag-categorytree': plaintext_tag_rules('categorytree'),
+        'tag-dynamicpagelist': plaintext_tag_rules('dynamicpagelist'),
+        'tag-hiero': plaintext_tag_rules('hiero'),
+        'tag-inputbox': plaintext_tag_rules('inputbox'),
+        'tag-imagemap': plaintext_tag_rules('imagemap'),
+        'tag-charinsert': plaintext_tag_rules('charinsert'),
+        'tag-timeline': plaintext_tag_rules('timeline'),
+        'tag-gallery': plaintext_tag_rules('gallery'),
+        'tag-graph': plaintext_tag_rules('graph'),
+        'tag-rss': plaintext_tag_rules('rss'),
+        'tag-math': delegate_tag_rules('math', TexLexer, state='math'),
+        'tag-chem': delegate_tag_rules('chem', TexLexer, state='math'),
+        'tag-ce': delegate_tag_rules('ce', TexLexer, state='math'),
+        'tag-templatedata': delegate_tag_rules('templatedata', JsonLexer),
+        'text-italic': text_rules(Generic.Emph),
+        'text-bold': text_rules(Generic.Strong),
+        'text-bold-italic': text_rules(Generic.EmphStrong),
+        'text': text_rules(Text),
+    }
diff --git a/local_packages/pygments/lexers/math.py b/local_packages/pygments/lexers/math.py
new file mode 100644
index 0000000..b38c13e
--- /dev/null
+++ b/local_packages/pygments/lexers/math.py
@@ -0,0 +1,21 @@
+"""
+    pygments.lexers.math
+    ~~~~~~~~~~~~~~~~~~~~
+
+    Just export lexers that were contained in this module.
+
+    :copyright: Copyright 2006-present by the Pygments team, see AUTHORS.
+    :license: BSD, see LICENSE for details.
+"""
+
+# ruff: noqa: F401
+from pygments.lexers.python import NumPyLexer
+from pygments.lexers.matlab import MatlabLexer, MatlabSessionLexer, \
+    OctaveLexer, ScilabLexer
+from pygments.lexers.julia import JuliaLexer, JuliaConsoleLexer
+from pygments.lexers.r import RConsoleLexer, SLexer, RdLexer
+from pygments.lexers.modeling import BugsLexer, JagsLexer, StanLexer
+from pygments.lexers.idl import IDLLexer
+from pygments.lexers.algebra import MuPADLexer
+
+__all__ = []
diff --git a/local_packages/pygments/lexers/matlab.py b/local_packages/pygments/lexers/matlab.py
new file mode 100644
index 0000000..b27f4db
--- /dev/null
+++ b/local_packages/pygments/lexers/matlab.py
@@ -0,0 +1,3307 @@
+"""
+    pygments.lexers.matlab
+    ~~~~~~~~~~~~~~~~~~~~~~
+
+    Lexers for Matlab and related languages.
+
+    :copyright: Copyright 2006-present by the Pygments team, see AUTHORS.
+    :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import Lexer, RegexLexer, bygroups, default, words, \
+    do_insertions, include
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+    Number, Punctuation, Generic, Whitespace
+
+from pygments.lexers import _scilab_builtins
+
+__all__ = ['MatlabLexer', 'MatlabSessionLexer', 'OctaveLexer', 'ScilabLexer']
+
+
+class MatlabLexer(RegexLexer):
+    """
+    For Matlab source code.
+    """
+    name = 'Matlab'
+    aliases = ['matlab']
+    filenames = ['*.m']
+    mimetypes = ['text/matlab']
+    url = 'https://www.mathworks.com/products/matlab.html'
+    version_added = '0.10'
+
+    _operators = r'-|==|~=|<=|>=|<|>|&&|&|~|\|\|?|\.\*|\*|\+|\.\^|\^|\.\\|\./|/|\\'
+
+    tokens = {
+        'expressions': [
+            # operators:
+            (_operators, Operator),
+
+            # numbers (must come before punctuation to handle `.5`; cannot use
+            # `\b` due to e.g. `5. + .5`).  The negative lookahead on operators
+            # avoids including the dot in `1./x` (the dot is part of `./`).
+            (rf'(? and then
+            # (equal | open-parenthesis |  | ).
+            (rf'(?:^|(?<=;))(\s*)(\w+)(\s+)(?!=|\(|{_operators}\s|\s)',
+             bygroups(Whitespace, Name, Whitespace), 'commandargs'),
+
+            include('expressions')
+        ],
+        'blockcomment': [
+            (r'^\s*%\}', Comment.Multiline, '#pop'),
+            (r'^.*\n', Comment.Multiline),
+            (r'.', Comment.Multiline),
+        ],
+        'deffunc': [
+            (r'(\s*)(?:(\S+)(\s*)(=)(\s*))?(.+)(\()(.*)(\))(\s*)',
+             bygroups(Whitespace, Text, Whitespace, Punctuation,
+                      Whitespace, Name.Function, Punctuation, Text,
+                      Punctuation, Whitespace), '#pop'),
+            # function with no args
+            (r'(\s*)([a-zA-Z_]\w*)',
+             bygroups(Whitespace, Name.Function), '#pop'),
+        ],
+        'propattrs': [
+            (r'(\w+)(\s*)(=)(\s*)(\d+)',
+             bygroups(Name.Builtin, Whitespace, Punctuation, Whitespace,
+                      Number)),
+            (r'(\w+)(\s*)(=)(\s*)([a-zA-Z]\w*)',
+             bygroups(Name.Builtin, Whitespace, Punctuation, Whitespace,
+                      Keyword)),
+            (r',', Punctuation),
+            (r'\)', Punctuation, '#pop'),
+            (r'\s+', Whitespace),
+            (r'.', Text),
+        ],
+        'defprops': [
+            (r'%\{\s*\n', Comment.Multiline, 'blockcomment'),
+            (r'%.*$', Comment),
+            (r'(?.
+    """
+    name = 'Matlab session'
+    aliases = ['matlabsession']
+    url = 'https://www.mathworks.com/products/matlab.html'
+    version_added = '0.10'
+    _example = "matlabsession/matlabsession_sample.txt"
+
+    def get_tokens_unprocessed(self, text):
+        mlexer = MatlabLexer(**self.options)
+
+        curcode = ''
+        insertions = []
+        continuation = False
+
+        for match in line_re.finditer(text):
+            line = match.group()
+
+            if line.startswith('>> '):
+                insertions.append((len(curcode),
+                                   [(0, Generic.Prompt, line[:3])]))
+                curcode += line[3:]
+
+            elif line.startswith('>>'):
+                insertions.append((len(curcode),
+                                   [(0, Generic.Prompt, line[:2])]))
+                curcode += line[2:]
+
+            elif line.startswith('???'):
+
+                idx = len(curcode)
+
+                # without is showing error on same line as before...?
+                # line = "\n" + line
+                token = (0, Generic.Traceback, line)
+                insertions.append((idx, [token]))
+            elif continuation and insertions:
+                # line_start is the length of the most recent prompt symbol
+                line_start = len(insertions[-1][-1][-1])
+                # Set leading spaces with the length of the prompt to be a generic prompt
+                # This keeps code aligned when prompts are removed, say with some Javascript
+                if line.startswith(' '*line_start):
+                    insertions.append(
+                        (len(curcode), [(0, Generic.Prompt, line[:line_start])]))
+                    curcode += line[line_start:]
+                else:
+                    curcode += line
+            else:
+                if curcode:
+                    yield from do_insertions(
+                        insertions, mlexer.get_tokens_unprocessed(curcode))
+                    curcode = ''
+                    insertions = []
+
+                yield match.start(), Generic.Output, line
+
+            # Does not allow continuation if a comment is included after the ellipses.
+            # Continues any line that ends with ..., even comments (lines that start with %)
+            if line.strip().endswith('...'):
+                continuation = True
+            else:
+                continuation = False
+
+        if curcode:  # or item:
+            yield from do_insertions(
+                insertions, mlexer.get_tokens_unprocessed(curcode))
+
+
+class OctaveLexer(RegexLexer):
+    """
+    For GNU Octave source code.
+    """
+    name = 'Octave'
+    url = 'https://www.gnu.org/software/octave/index'
+    aliases = ['octave']
+    filenames = ['*.m']
+    mimetypes = ['text/octave']
+    version_added = '1.5'
+
+    # These lists are generated automatically.
+    # Run the following in bash shell:
+    #
+    # First dump all of the Octave manual into a plain text file:
+    #
+    #   $ info octave --subnodes -o octave-manual
+    #
+    # Now grep through it:
+
+    # for i in \
+    #     "Built-in Function" "Command" "Function File" \
+    #     "Loadable Function" "Mapping Function";
+    # do
+    #     perl -e '@name = qw('"$i"');
+    #              print lc($name[0]),"_kw = [\n"';
+    #
+    #     perl -n -e 'print "\"$1\",\n" if /-- '"$i"': .* (\w*) \(/;' \
+    #         octave-manual | sort | uniq ;
+    #     echo "]" ;
+    #     echo;
+    # done
+
+    # taken from Octave Mercurial changeset 8cc154f45e37 (30-jan-2011)
+
+    builtin_kw = (
+        "addlistener", "addpath", "addproperty", "all",
+        "and", "any", "argnames", "argv", "assignin",
+        "atexit", "autoload",
+        "available_graphics_toolkits", "beep_on_error",
+        "bitand", "bitmax", "bitor", "bitshift", "bitxor",
+        "cat", "cell", "cellstr", "char", "class", "clc",
+        "columns", "command_line_path",
+        "completion_append_char", "completion_matches",
+        "complex", "confirm_recursive_rmdir", "cputime",
+        "crash_dumps_octave_core", "ctranspose", "cumprod",
+        "cumsum", "debug_on_error", "debug_on_interrupt",
+        "debug_on_warning", "default_save_options",
+        "dellistener", "diag", "diff", "disp",
+        "doc_cache_file", "do_string_escapes", "double",
+        "drawnow", "e", "echo_executing_commands", "eps",
+        "eq", "errno", "errno_list", "error", "eval",
+        "evalin", "exec", "exist", "exit", "eye", "false",
+        "fclear", "fclose", "fcntl", "fdisp", "feof",
+        "ferror", "feval", "fflush", "fgetl", "fgets",
+        "fieldnames", "file_in_loadpath", "file_in_path",
+        "filemarker", "filesep", "find_dir_in_path",
+        "fixed_point_format", "fnmatch", "fopen", "fork",
+        "formula", "fprintf", "fputs", "fread", "freport",
+        "frewind", "fscanf", "fseek", "fskipl", "ftell",
+        "functions", "fwrite", "ge", "genpath", "get",
+        "getegid", "getenv", "geteuid", "getgid",
+        "getpgrp", "getpid", "getppid", "getuid", "glob",
+        "gt", "gui_mode", "history_control",
+        "history_file", "history_size",
+        "history_timestamp_format_string", "home",
+        "horzcat", "hypot", "ifelse",
+        "ignore_function_time_stamp", "inferiorto",
+        "info_file", "info_program", "inline", "input",
+        "intmax", "intmin", "ipermute",
+        "is_absolute_filename", "isargout", "isbool",
+        "iscell", "iscellstr", "ischar", "iscomplex",
+        "isempty", "isfield", "isfloat", "isglobal",
+        "ishandle", "isieee", "isindex", "isinteger",
+        "islogical", "ismatrix", "ismethod", "isnull",
+        "isnumeric", "isobject", "isreal",
+        "is_rooted_relative_filename", "issorted",
+        "isstruct", "isvarname", "kbhit", "keyboard",
+        "kill", "lasterr", "lasterror", "lastwarn",
+        "ldivide", "le", "length", "link", "linspace",
+        "logical", "lstat", "lt", "make_absolute_filename",
+        "makeinfo_program", "max_recursion_depth", "merge",
+        "methods", "mfilename", "minus", "mislocked",
+        "mkdir", "mkfifo", "mkstemp", "mldivide", "mlock",
+        "mouse_wheel_zoom", "mpower", "mrdivide", "mtimes",
+        "munlock", "nargin", "nargout",
+        "native_float_format", "ndims", "ne", "nfields",
+        "nnz", "norm", "not", "numel", "nzmax",
+        "octave_config_info", "octave_core_file_limit",
+        "octave_core_file_name",
+        "octave_core_file_options", "ones", "or",
+        "output_max_field_width", "output_precision",
+        "page_output_immediately", "page_screen_output",
+        "path", "pathsep", "pause", "pclose", "permute",
+        "pi", "pipe", "plus", "popen", "power",
+        "print_empty_dimensions", "printf",
+        "print_struct_array_contents", "prod",
+        "program_invocation_name", "program_name",
+        "putenv", "puts", "pwd", "quit", "rats", "rdivide",
+        "readdir", "readlink", "read_readline_init_file",
+        "realmax", "realmin", "rehash", "rename",
+        "repelems", "re_read_readline_init_file", "reset",
+        "reshape", "resize", "restoredefaultpath",
+        "rethrow", "rmdir", "rmfield", "rmpath", "rows",
+        "save_header_format_string", "save_precision",
+        "saving_history", "scanf", "set", "setenv",
+        "shell_cmd", "sighup_dumps_octave_core",
+        "sigterm_dumps_octave_core", "silent_functions",
+        "single", "size", "size_equal", "sizemax",
+        "sizeof", "sleep", "source", "sparse_auto_mutate",
+        "split_long_rows", "sprintf", "squeeze", "sscanf",
+        "stat", "stderr", "stdin", "stdout", "strcmp",
+        "strcmpi", "string_fill_char", "strncmp",
+        "strncmpi", "struct", "struct_levels_to_print",
+        "strvcat", "subsasgn", "subsref", "sum", "sumsq",
+        "superiorto", "suppress_verbose_help_message",
+        "symlink", "system", "tic", "tilde_expand",
+        "times", "tmpfile", "tmpnam", "toc", "toupper",
+        "transpose", "true", "typeinfo", "umask", "uminus",
+        "uname", "undo_string_escapes", "unlink", "uplus",
+        "upper", "usage", "usleep", "vec", "vectorize",
+        "vertcat", "waitpid", "warning", "warranty",
+        "whos_line_format", "yes_or_no", "zeros",
+        "inf", "Inf", "nan", "NaN")
+
+    command_kw = ("close", "load", "who", "whos")
+
+    function_kw = (
+        "accumarray", "accumdim", "acosd", "acotd",
+        "acscd", "addtodate", "allchild", "ancestor",
+        "anova", "arch_fit", "arch_rnd", "arch_test",
+        "area", "arma_rnd", "arrayfun", "ascii", "asctime",
+        "asecd", "asind", "assert", "atand",
+        "autoreg_matrix", "autumn", "axes", "axis", "bar",
+        "barh", "bartlett", "bartlett_test", "beep",
+        "betacdf", "betainv", "betapdf", "betarnd",
+        "bicgstab", "bicubic", "binary", "binocdf",
+        "binoinv", "binopdf", "binornd", "bitcmp",
+        "bitget", "bitset", "blackman", "blanks",
+        "blkdiag", "bone", "box", "brighten", "calendar",
+        "cast", "cauchy_cdf", "cauchy_inv", "cauchy_pdf",
+        "cauchy_rnd", "caxis", "celldisp", "center", "cgs",
+        "chisquare_test_homogeneity",
+        "chisquare_test_independence", "circshift", "cla",
+        "clabel", "clf", "clock", "cloglog", "closereq",
+        "colon", "colorbar", "colormap", "colperm",
+        "comet", "common_size", "commutation_matrix",
+        "compan", "compare_versions", "compass",
+        "computer", "cond", "condest", "contour",
+        "contourc", "contourf", "contrast", "conv",
+        "convhull", "cool", "copper", "copyfile", "cor",
+        "corrcoef", "cor_test", "cosd", "cotd", "cov",
+        "cplxpair", "cross", "cscd", "cstrcat", "csvread",
+        "csvwrite", "ctime", "cumtrapz", "curl", "cut",
+        "cylinder", "date", "datenum", "datestr",
+        "datetick", "datevec", "dblquad", "deal",
+        "deblank", "deconv", "delaunay", "delaunayn",
+        "delete", "demo", "detrend", "diffpara", "diffuse",
+        "dir", "discrete_cdf", "discrete_inv",
+        "discrete_pdf", "discrete_rnd", "display",
+        "divergence", "dlmwrite", "dos", "dsearch",
+        "dsearchn", "duplication_matrix", "durbinlevinson",
+        "ellipsoid", "empirical_cdf", "empirical_inv",
+        "empirical_pdf", "empirical_rnd", "eomday",
+        "errorbar", "etime", "etreeplot", "example",
+        "expcdf", "expinv", "expm", "exppdf", "exprnd",
+        "ezcontour", "ezcontourf", "ezmesh", "ezmeshc",
+        "ezplot", "ezpolar", "ezsurf", "ezsurfc", "factor",
+        "factorial", "fail", "fcdf", "feather", "fftconv",
+        "fftfilt", "fftshift", "figure", "fileattrib",
+        "fileparts", "fill", "findall", "findobj",
+        "findstr", "finv", "flag", "flipdim", "fliplr",
+        "flipud", "fpdf", "fplot", "fractdiff", "freqz",
+        "freqz_plot", "frnd", "fsolve",
+        "f_test_regression", "ftp", "fullfile", "fzero",
+        "gamcdf", "gaminv", "gampdf", "gamrnd", "gca",
+        "gcbf", "gcbo", "gcf", "genvarname", "geocdf",
+        "geoinv", "geopdf", "geornd", "getfield", "ginput",
+        "glpk", "gls", "gplot", "gradient",
+        "graphics_toolkit", "gray", "grid", "griddata",
+        "griddatan", "gtext", "gunzip", "gzip", "hadamard",
+        "hamming", "hankel", "hanning", "hggroup",
+        "hidden", "hilb", "hist", "histc", "hold", "hot",
+        "hotelling_test", "housh", "hsv", "hurst",
+        "hygecdf", "hygeinv", "hygepdf", "hygernd",
+        "idivide", "ifftshift", "image", "imagesc",
+        "imfinfo", "imread", "imshow", "imwrite", "index",
+        "info", "inpolygon", "inputname", "interpft",
+        "interpn", "intersect", "invhilb", "iqr", "isa",
+        "isdefinite", "isdir", "is_duplicate_entry",
+        "isequal", "isequalwithequalnans", "isfigure",
+        "ishermitian", "ishghandle", "is_leap_year",
+        "isletter", "ismac", "ismember", "ispc", "isprime",
+        "isprop", "isscalar", "issquare", "isstrprop",
+        "issymmetric", "isunix", "is_valid_file_id",
+        "isvector", "jet", "kendall",
+        "kolmogorov_smirnov_cdf",
+        "kolmogorov_smirnov_test", "kruskal_wallis_test",
+        "krylov", "kurtosis", "laplace_cdf", "laplace_inv",
+        "laplace_pdf", "laplace_rnd", "legend", "legendre",
+        "license", "line", "linkprop", "list_primes",
+        "loadaudio", "loadobj", "logistic_cdf",
+        "logistic_inv", "logistic_pdf", "logistic_rnd",
+        "logit", "loglog", "loglogerr", "logm", "logncdf",
+        "logninv", "lognpdf", "lognrnd", "logspace",
+        "lookfor", "ls_command", "lsqnonneg", "magic",
+        "mahalanobis", "manova", "matlabroot",
+        "mcnemar_test", "mean", "meansq", "median", "menu",
+        "mesh", "meshc", "meshgrid", "meshz", "mexext",
+        "mget", "mkpp", "mode", "moment", "movefile",
+        "mpoles", "mput", "namelengthmax", "nargchk",
+        "nargoutchk", "nbincdf", "nbininv", "nbinpdf",
+        "nbinrnd", "nchoosek", "ndgrid", "newplot", "news",
+        "nonzeros", "normcdf", "normest", "norminv",
+        "normpdf", "normrnd", "now", "nthroot", "null",
+        "ocean", "ols", "onenormest", "optimget",
+        "optimset", "orderfields", "orient", "orth",
+        "pack", "pareto", "parseparams", "pascal", "patch",
+        "pathdef", "pcg", "pchip", "pcolor", "pcr",
+        "peaks", "periodogram", "perl", "perms", "pie",
+        "pink", "planerot", "playaudio", "plot",
+        "plotmatrix", "plotyy", "poisscdf", "poissinv",
+        "poisspdf", "poissrnd", "polar", "poly",
+        "polyaffine", "polyarea", "polyderiv", "polyfit",
+        "polygcd", "polyint", "polyout", "polyreduce",
+        "polyval", "polyvalm", "postpad", "powerset",
+        "ppder", "ppint", "ppjumps", "ppplot", "ppval",
+        "pqpnonneg", "prepad", "primes", "print",
+        "print_usage", "prism", "probit", "qp", "qqplot",
+        "quadcc", "quadgk", "quadl", "quadv", "quiver",
+        "qzhess", "rainbow", "randi", "range", "rank",
+        "ranks", "rat", "reallog", "realpow", "realsqrt",
+        "record", "rectangle_lw", "rectangle_sw",
+        "rectint", "refresh", "refreshdata",
+        "regexptranslate", "repmat", "residue", "ribbon",
+        "rindex", "roots", "rose", "rosser", "rotdim",
+        "rref", "run", "run_count", "rundemos", "run_test",
+        "runtests", "saveas", "saveaudio", "saveobj",
+        "savepath", "scatter", "secd", "semilogx",
+        "semilogxerr", "semilogy", "semilogyerr",
+        "setaudio", "setdiff", "setfield", "setxor",
+        "shading", "shift", "shiftdim", "sign_test",
+        "sinc", "sind", "sinetone", "sinewave", "skewness",
+        "slice", "sombrero", "sortrows", "spaugment",
+        "spconvert", "spdiags", "spearman", "spectral_adf",
+        "spectral_xdf", "specular", "speed", "spencer",
+        "speye", "spfun", "sphere", "spinmap", "spline",
+        "spones", "sprand", "sprandn", "sprandsym",
+        "spring", "spstats", "spy", "sqp", "stairs",
+        "statistics", "std", "stdnormal_cdf",
+        "stdnormal_inv", "stdnormal_pdf", "stdnormal_rnd",
+        "stem", "stft", "strcat", "strchr", "strjust",
+        "strmatch", "strread", "strsplit", "strtok",
+        "strtrim", "strtrunc", "structfun", "studentize",
+        "subplot", "subsindex", "subspace", "substr",
+        "substruct", "summer", "surf", "surface", "surfc",
+        "surfl", "surfnorm", "svds", "swapbytes",
+        "sylvester_matrix", "symvar", "synthesis", "table",
+        "tand", "tar", "tcdf", "tempdir", "tempname",
+        "test", "text", "textread", "textscan", "tinv",
+        "title", "toeplitz", "tpdf", "trace", "trapz",
+        "treelayout", "treeplot", "triangle_lw",
+        "triangle_sw", "tril", "trimesh", "triplequad",
+        "triplot", "trisurf", "triu", "trnd", "tsearchn",
+        "t_test", "t_test_regression", "type", "unidcdf",
+        "unidinv", "unidpdf", "unidrnd", "unifcdf",
+        "unifinv", "unifpdf", "unifrnd", "union", "unique",
+        "unix", "unmkpp", "unpack", "untabify", "untar",
+        "unwrap", "unzip", "u_test", "validatestring",
+        "vander", "var", "var_test", "vech", "ver",
+        "version", "view", "voronoi", "voronoin",
+        "waitforbuttonpress", "wavread", "wavwrite",
+        "wblcdf", "wblinv", "wblpdf", "wblrnd", "weekday",
+        "welch_test", "what", "white", "whitebg",
+        "wienrnd", "wilcoxon_test", "wilkinson", "winter",
+        "xlabel", "xlim", "ylabel", "yulewalker", "zip",
+        "zlabel", "z_test")
+
+    loadable_kw = (
+        "airy", "amd", "balance", "besselh", "besseli",
+        "besselj", "besselk", "bessely", "bitpack",
+        "bsxfun", "builtin", "ccolamd", "cellfun",
+        "cellslices", "chol", "choldelete", "cholinsert",
+        "cholinv", "cholshift", "cholupdate", "colamd",
+        "colloc", "convhulln", "convn", "csymamd",
+        "cummax", "cummin", "daspk", "daspk_options",
+        "dasrt", "dasrt_options", "dassl", "dassl_options",
+        "dbclear", "dbdown", "dbstack", "dbstatus",
+        "dbstop", "dbtype", "dbup", "dbwhere", "det",
+        "dlmread", "dmperm", "dot", "eig", "eigs",
+        "endgrent", "endpwent", "etree", "fft", "fftn",
+        "fftw", "filter", "find", "full", "gcd",
+        "getgrent", "getgrgid", "getgrnam", "getpwent",
+        "getpwnam", "getpwuid", "getrusage", "givens",
+        "gmtime", "gnuplot_binary", "hess", "ifft",
+        "ifftn", "inv", "isdebugmode", "issparse", "kron",
+        "localtime", "lookup", "lsode", "lsode_options",
+        "lu", "luinc", "luupdate", "matrix_type", "max",
+        "min", "mktime", "pinv", "qr", "qrdelete",
+        "qrinsert", "qrshift", "qrupdate", "quad",
+        "quad_options", "qz", "rand", "rande", "randg",
+        "randn", "randp", "randperm", "rcond", "regexp",
+        "regexpi", "regexprep", "schur", "setgrent",
+        "setpwent", "sort", "spalloc", "sparse", "spparms",
+        "sprank", "sqrtm", "strfind", "strftime",
+        "strptime", "strrep", "svd", "svd_driver", "syl",
+        "symamd", "symbfact", "symrcm", "time", "tsearch",
+        "typecast", "urlread", "urlwrite")
+
+    mapping_kw = (
+        "abs", "acos", "acosh", "acot", "acoth", "acsc",
+        "acsch", "angle", "arg", "asec", "asech", "asin",
+        "asinh", "atan", "atanh", "beta", "betainc",
+        "betaln", "bincoeff", "cbrt", "ceil", "conj", "cos",
+        "cosh", "cot", "coth", "csc", "csch", "erf", "erfc",
+        "erfcx", "erfinv", "exp", "finite", "fix", "floor",
+        "fmod", "gamma", "gammainc", "gammaln", "imag",
+        "isalnum", "isalpha", "isascii", "iscntrl",
+        "isdigit", "isfinite", "isgraph", "isinf",
+        "islower", "isna", "isnan", "isprint", "ispunct",
+        "isspace", "isupper", "isxdigit", "lcm", "lgamma",
+        "log", "lower", "mod", "real", "rem", "round",
+        "roundb", "sec", "sech", "sign", "sin", "sinh",
+        "sqrt", "tan", "tanh", "toascii", "tolower", "xor")
+
+    builtin_consts = (
+        "EDITOR", "EXEC_PATH", "I", "IMAGE_PATH", "NA",
+        "OCTAVE_HOME", "OCTAVE_VERSION", "PAGER",
+        "PAGER_FLAGS", "SEEK_CUR", "SEEK_END", "SEEK_SET",
+        "SIG", "S_ISBLK", "S_ISCHR", "S_ISDIR", "S_ISFIFO",
+        "S_ISLNK", "S_ISREG", "S_ISSOCK", "WCONTINUE",
+        "WCOREDUMP", "WEXITSTATUS", "WIFCONTINUED",
+        "WIFEXITED", "WIFSIGNALED", "WIFSTOPPED", "WNOHANG",
+        "WSTOPSIG", "WTERMSIG", "WUNTRACED")
+
+    tokens = {
+        'root': [
+            (r'%\{\s*\n', Comment.Multiline, 'percentblockcomment'),
+            (r'#\{\s*\n', Comment.Multiline, 'hashblockcomment'),
+            (r'[%#].*$', Comment),
+            (r'^\s*function\b', Keyword, 'deffunc'),
+
+            # from 'iskeyword' on hg changeset 8cc154f45e37
+            (words((
+                '__FILE__', '__LINE__', 'break', 'case', 'catch', 'classdef',
+                'continue', 'do', 'else', 'elseif', 'end', 'end_try_catch',
+                'end_unwind_protect', 'endclassdef', 'endevents', 'endfor',
+                'endfunction', 'endif', 'endmethods', 'endproperties', 'endswitch',
+                'endwhile', 'events', 'for', 'function', 'get', 'global', 'if',
+                'methods', 'otherwise', 'persistent', 'properties', 'return',
+                'set', 'static', 'switch', 'try', 'until', 'unwind_protect',
+                'unwind_protect_cleanup', 'while'), suffix=r'\b'),
+             Keyword),
+
+            (words(builtin_kw + command_kw + function_kw + loadable_kw + mapping_kw,
+                   suffix=r'\b'),  Name.Builtin),
+
+            (words(builtin_consts, suffix=r'\b'), Name.Constant),
+
+            # operators in Octave but not Matlab:
+            (r'-=|!=|!|/=|--', Operator),
+            # operators:
+            (r'-|==|~=|<|>|<=|>=|&&|&|~|\|\|?', Operator),
+            # operators in Octave but not Matlab requiring escape for re:
+            (r'\*=|\+=|\^=|\/=|\\=|\*\*|\+\+|\.\*\*', Operator),
+            # operators requiring escape for re:
+            (r'\.\*|\*|\+|\.\^|\^|\.\\|\.\/|\/|\\', Operator),
+
+
+            # punctuation:
+            (r'[\[\](){}:@.,]', Punctuation),
+            (r'=|:|;', Punctuation),
+
+            (r'"[^"]*"', String),
+
+            (r'(\d+\.\d*|\d*\.\d+)([eEf][+-]?[0-9]+)?', Number.Float),
+            (r'\d+[eEf][+-]?[0-9]+', Number.Float),
+            (r'\d+', Number.Integer),
+
+            # quote can be transpose, instead of string:
+            # (not great, but handles common cases...)
+            (r'(?<=[\w)\].])\'+', Operator),
+            (r'(?|<=|>=|&&|&|~|\|\|?', Operator),
+            # operators requiring escape for re:
+            (r'\.\*|\*|\+|\.\^|\^|\.\\|\.\/|\/|\\', Operator),
+
+            # punctuation:
+            (r'[\[\](){}@.,=:;]+', Punctuation),
+
+            (r'"[^"]*"', String),
+
+            # quote can be transpose, instead of string:
+            # (not great, but handles common cases...)
+            (r'(?<=[\w)\].])\'+', Operator),
+            (r'(?', r'<', r'|', r'!', r"'")
+
+    operator_words = ('and', 'or', 'not')
+
+    tokens = {
+        'root': [
+            (r'/\*', Comment.Multiline, 'comment'),
+            (r'"(?:[^"\\]|\\.)*"', String),
+            (r'\(|\)|\[|\]|\{|\}', Punctuation),
+            (r'[,;$]', Punctuation),
+            (words (constants), Name.Constant),
+            (words (keywords), Keyword),
+            (words (operators), Operator),
+            (words (operator_words), Operator.Word),
+            (r'''(?x)
+              ((?:[a-zA-Z_#][\w#]*|`[^`]*`)
+              (?:::[a-zA-Z_#][\w#]*|`[^`]*`)*)(\s*)([(])''',
+             bygroups(Name.Function, Text.Whitespace, Punctuation)),
+            (r'''(?x)
+              (?:[a-zA-Z_#%][\w#%]*|`[^`]*`)
+              (?:::[a-zA-Z_#%][\w#%]*|`[^`]*`)*''', Name.Variable),
+            (r'[-+]?(\d*\.\d+([bdefls][-+]?\d+)?|\d+(\.\d*)?[bdefls][-+]?\d+)', Number.Float),
+            (r'[-+]?\d+', Number.Integer),
+            (r'\s+', Text.Whitespace),
+            (r'.', Text)
+        ],
+        'comment': [
+            (r'[^*/]+', Comment.Multiline),
+            (r'/\*', Comment.Multiline, '#push'),
+            (r'\*/', Comment.Multiline, '#pop'),
+            (r'[*/]', Comment.Multiline)
+        ]
+    }
+
+    def analyse_text (text):
+        strength = 0.0
+        # Input expression terminator.
+        if re.search (r'\$\s*$', text, re.MULTILINE):
+            strength += 0.05
+        # Function definition operator.
+        if ':=' in text:
+            strength += 0.02
+        return strength
diff --git a/local_packages/pygments/lexers/meson.py b/local_packages/pygments/lexers/meson.py
new file mode 100644
index 0000000..154a2de
--- /dev/null
+++ b/local_packages/pygments/lexers/meson.py
@@ -0,0 +1,139 @@
+"""
+    pygments.lexers.meson
+    ~~~~~~~~~~~~~~~~~~~~~
+
+    Pygments lexer for the Meson build system
+
+    :copyright: Copyright 2006-present by the Pygments team, see AUTHORS.
+    :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, words, include
+from pygments.token import Comment, Name, Number, Punctuation, Operator, \
+    Keyword, String, Whitespace
+
+__all__ = ['MesonLexer']
+
+
+class MesonLexer(RegexLexer):
+    """Meson language lexer.
+
+    The grammar definition use to transcribe the syntax was retrieved from
+    https://mesonbuild.com/Syntax.html#grammar for version 0.58.
+    Some of those definitions are improperly transcribed, so the Meson++
+    implementation was also checked: https://github.com/dcbaker/meson-plus-plus.
+    """
+
+    # TODO String interpolation @VARNAME@ inner matches
+    # TODO keyword_arg: value inner matches
+
+    name = 'Meson'
+    url = 'https://mesonbuild.com/'
+    aliases = ['meson', 'meson.build']
+    filenames = ['meson.build', 'meson.options', 'meson_options.txt']
+    mimetypes = ['text/x-meson']
+    version_added = '2.10'
+
+    tokens = {
+        'root': [
+            (r'#.*?$', Comment),
+            (r"'''.*'''", String.Single),
+            (r'[1-9][0-9]*', Number.Integer),
+            (r'0o[0-7]+', Number.Oct),
+            (r'0x[a-fA-F0-9]+', Number.Hex),
+            include('string'),
+            include('keywords'),
+            include('expr'),
+            (r'[a-zA-Z_][a-zA-Z_0-9]*', Name),
+            (r'\s+', Whitespace),
+        ],
+        'string': [
+            (r"[']{3}([']{0,2}([^\\']|\\(.|\n)))*[']{3}", String),
+            (r"'.*?(?`_.
+    """
+
+    name = "MCFunction"
+    url = "https://minecraft.wiki/w/Commands"
+    aliases = ["mcfunction", "mcf"]
+    filenames = ["*.mcfunction"]
+    mimetypes = ["text/mcfunction"]
+    version_added = '2.12'
+
+    # Used to denotate the start of a block comment, borrowed from Github's mcfunction
+    _block_comment_prefix = "[>!]"
+
+    tokens = {
+        "root": [
+            include("names"),
+            include("comments"),
+            include("literals"),
+            include("whitespace"),
+            include("property"),
+            include("operators"),
+            include("selectors"),
+        ],
+
+        "names": [
+            # The start of a command (either beginning of line OR after the run keyword)
+            #  We don't encode a list of keywords since mods, plugins, or even pre-processors
+            #  may add new commands, so we have a 'close-enough' regex which catches them.
+            (r"^(\s*)([a-z_]+)", bygroups(Whitespace, Name.Builtin)),
+            (r"(?<=run)\s+[a-z_]+", Name.Builtin),
+
+            # UUID
+            (r"\b[0-9a-fA-F]+(?:-[0-9a-fA-F]+){4}\b", Name.Variable),
+            include("resource-name"),
+            # normal command names and scoreboards
+            #  there's no way to know the differences unfortuntely
+            (r"[A-Za-z_][\w.#%$]+", Keyword.Constant),
+            (r"[#%$][\w.#%$]+", Name.Variable.Magic),
+        ],
+
+        "resource-name": [
+            # resource names have to be lowercase
+            (r"#?[a-z_][a-z_.-]*:[a-z0-9_./-]+", Name.Function),
+            # similar to above except optional `:``
+            #  a `/` must be present "somewhere"
+            (r"#?[a-z0-9_\.\-]+\/[a-z0-9_\.\-\/]+", Name.Function),
+        ],
+
+        "whitespace": [
+            (r"\s+", Whitespace),
+        ],
+
+        "comments": [
+            (rf"^\s*(#{_block_comment_prefix})", Comment.Multiline,
+             ("comments.block", "comments.block.emphasized")),
+            (r"#.*$", Comment.Single),
+        ],
+        "comments.block": [
+            (rf"^\s*#{_block_comment_prefix}", Comment.Multiline,
+             "comments.block.emphasized"),
+            (r"^\s*#", Comment.Multiline, "comments.block.normal"),
+            default("#pop"),
+        ],
+        "comments.block.normal": [
+            include("comments.block.special"),
+            (r"\S+", Comment.Multiline),
+            (r"\n", Text, "#pop"),
+            include("whitespace"),
+        ],
+        "comments.block.emphasized": [
+            include("comments.block.special"),
+            (r"\S+", String.Doc),
+            (r"\n", Text, "#pop"),
+            include("whitespace"),
+        ],
+        "comments.block.special": [
+            # Params
+            (r"@\S+", Name.Decorator),
+
+            include("resource-name"),
+
+            # Scoreboard player names
+            (r"[#%$][\w.#%$]+", Name.Variable.Magic),
+        ],
+
+        "operators": [
+            (r"[\-~%^?!+*<>\\/|&=.]", Operator),
+        ],
+
+        "literals": [
+            (r"\.\.", Literal),
+            (r"(true|false)", Keyword.Pseudo),
+
+            # these are like unquoted strings and appear in many places
+            (r"[A-Za-z_]+", Name.Variable.Class),
+
+            (r"[0-7]b", Number.Byte),
+            (r"[+-]?\d*\.?\d+([eE]?[+-]?\d+)?[df]?\b", Number.Float),
+            (r"[+-]?\d+\b", Number.Integer),
+            (r'"', String.Double, "literals.string-double"),
+            (r"'", String.Single, "literals.string-single"),
+        ],
+        "literals.string-double": [
+            (r"\\.", String.Escape),
+            (r'[^\\"\n]+', String.Double),
+            (r'"', String.Double, "#pop"),
+        ],
+        "literals.string-single": [
+            (r"\\.", String.Escape),
+            (r"[^\\'\n]+", String.Single),
+            (r"'", String.Single, "#pop"),
+        ],
+
+        "selectors": [
+            (r"@[a-z]", Name.Variable),
+        ],
+
+
+        ## Generic Property Container
+        # There are several, differing instances where the language accepts
+        #  specific contained keys or contained key, value pairings.
+        #
+        # Property Maps:
+        # - Starts with either `[` or `{`
+        # - Key separated by `:` or `=`
+        # - Deliminated by `,`
+        #
+        # Property Lists:
+        # - Starts with `[`
+        # - Deliminated by `,`
+        #
+        # For simplicity, these patterns match a generic, nestable structure
+        #  which follow a key, value pattern. For normal lists, there's only keys.
+        # This allow some "illegal" structures, but we'll accept those for
+        #  sake of simplicity
+        #
+        # Examples:
+        # - `[facing=up, powered=true]` (blockstate)
+        # - `[name="hello world", nbt={key: 1b}]` (selector + nbt)
+        # - `[{"text": "value"}, "literal"]` (json)
+        ##
+        "property": [
+            # This state gets included in root and also several substates
+            # We do this to shortcut the starting of new properties
+            #  within other properties. Lists can have sublists and compounds
+            #  and values can start a new property (see the `difficult_1.txt`
+            #  snippet).
+            (r"\{", Punctuation, ("property.curly", "property.key")),
+            (r"\[", Punctuation, ("property.square", "property.key")),
+        ],
+        "property.curly": [
+            include("whitespace"),
+            include("property"),
+            (r"\}", Punctuation, "#pop"),
+        ],
+        "property.square": [
+            include("whitespace"),
+            include("property"),
+            (r"\]", Punctuation, "#pop"),
+
+            # lists can have sequences of items
+            (r",", Punctuation),
+        ],
+        "property.key": [
+            include("whitespace"),
+
+            # resource names (for advancements)
+            #  can omit `:` to default `minecraft:`
+            # must check if there is a future equals sign if `:` is in the name
+            (r"#?[a-z_][a-z_\.\-]*\:[a-z0-9_\.\-/]+(?=\s*\=)", Name.Attribute, "property.delimiter"),
+            (r"#?[a-z_][a-z0-9_\.\-/]+", Name.Attribute, "property.delimiter"),
+
+            # unquoted NBT key
+            (r"[A-Za-z_\-\+]+", Name.Attribute, "property.delimiter"),
+
+            # quoted JSON or NBT key
+            (r'"', Name.Attribute, "property.delimiter", "literals.string-double"),
+            (r"'", Name.Attribute, "property.delimiter", "literals.string-single"),
+
+            # index for a list
+            (r"-?\d+", Number.Integer, "property.delimiter"),
+
+            default("#pop"),
+        ],
+        "property.key.string-double": [
+            (r"\\.", String.Escape),
+            (r'[^\\"\n]+', Name.Attribute),
+            (r'"', Name.Attribute, "#pop"),
+        ],
+        "property.key.string-single": [
+            (r"\\.", String.Escape),
+            (r"[^\\'\n]+", Name.Attribute),
+            (r"'", Name.Attribute, "#pop"),
+        ],
+        "property.delimiter": [
+            include("whitespace"),
+
+            (r"[:=]!?", Punctuation, "property.value"),
+            (r",", Punctuation),
+
+            default("#pop"),
+        ],
+        "property.value": [
+            include("whitespace"),
+
+            # unquoted resource names are valid literals here
+            (r"#?[a-z_][a-z_\.\-]*\:[a-z0-9_\.\-/]+", Name.Tag),
+            (r"#?[a-z_][a-z0-9_\.\-/]+", Name.Tag),
+
+            include("literals"),
+            include("property"),
+
+            default("#pop"),
+        ],
+    }
+
+
+class MCSchemaLexer(RegexLexer):
+    """Lexer for Minecraft Add-ons data Schemas, an interface structure standard used in Minecraft
+    """
+
+    name = 'MCSchema'
+    url = 'https://learn.microsoft.com/en-us/minecraft/creator/reference/content/schemasreference/'
+    aliases = ['mcschema']
+    filenames = ['*.mcschema']
+    mimetypes = ['text/mcschema']
+    version_added = '2.14'
+
+    tokens = {
+        'commentsandwhitespace': [
+            (r'\s+', Whitespace),
+            (r'//.*?$', Comment.Single),
+            (r'/\*.*?\*/', Comment.Multiline)
+        ],
+        'slashstartsregex': [
+            include('commentsandwhitespace'),
+            (r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
+             r'([gimuysd]+\b|\B)', String.Regex, '#pop'),
+            (r'(?=/)', Text, ('#pop', 'badregex')),
+            default('#pop')
+        ],
+        'badregex': [
+            (r'\n', Whitespace, '#pop')
+        ],
+        'singlestring': [
+            (r'\\.', String.Escape),
+            (r"'", String.Single, '#pop'),
+            (r"[^\\']+", String.Single),
+        ],
+        'doublestring': [
+            (r'\\.', String.Escape),
+            (r'"', String.Double, '#pop'),
+            (r'[^\\"]+', String.Double),
+        ],
+        'root': [
+            (r'^(?=\s|/|', Comment, '#pop'),
+            (r'[^\-]+|-', Comment),
+        ],
+    }
+
+
+class ReasonLexer(RegexLexer):
+    """
+    For the ReasonML language.
+    """
+
+    name = 'ReasonML'
+    url = 'https://reasonml.github.io/'
+    aliases = ['reasonml', 'reason']
+    filenames = ['*.re', '*.rei']
+    mimetypes = ['text/x-reasonml']
+    version_added = '2.6'
+
+    keywords = (
+        'as', 'assert', 'begin', 'class', 'constraint', 'do', 'done', 'downto',
+        'else', 'end', 'exception', 'external', 'false', 'for', 'fun', 'esfun',
+        'function', 'functor', 'if', 'in', 'include', 'inherit', 'initializer', 'lazy',
+        'let', 'switch', 'module', 'pub', 'mutable', 'new', 'nonrec', 'object', 'of',
+        'open', 'pri', 'rec', 'sig', 'struct', 'then', 'to', 'true', 'try',
+        'type', 'val', 'virtual', 'when', 'while', 'with',
+    )
+    keyopts = (
+        '!=', '#', '&', '&&', r'\(', r'\)', r'\*', r'\+', ',', '-',
+        r'-\.', '=>', r'\.', r'\.\.', r'\.\.\.', ':', '::', ':=', ':>', ';', ';;', '<',
+        '<-', '=', '>', '>]', r'>\}', r'\?', r'\?\?', r'\[', r'\[<', r'\[>',
+        r'\[\|', ']', '_', '`', r'\{', r'\{<', r'\|', r'\|\|', r'\|]', r'\}', '~'
+    )
+
+    operators = r'[!$%&*+\./:<=>?@^|~-]'
+    word_operators = ('and', 'asr', 'land', 'lor', 'lsl', 'lsr', 'lxor', 'mod', 'or')
+    prefix_syms = r'[!?~]'
+    infix_syms = r'[=<>@^|&+\*/$%-]'
+    primitives = ('unit', 'int', 'float', 'bool', 'string', 'char', 'list', 'array')
+
+    tokens = {
+        'escape-sequence': [
+            (r'\\[\\"\'ntbr]', String.Escape),
+            (r'\\[0-9]{3}', String.Escape),
+            (r'\\x[0-9a-fA-F]{2}', String.Escape),
+        ],
+        'root': [
+            (r'\s+', Text),
+            (r'false|true|\(\)|\[\]', Name.Builtin.Pseudo),
+            (r'\b([A-Z][\w\']*)(?=\s*\.)', Name.Namespace, 'dotted'),
+            (r'\b([A-Z][\w\']*)', Name.Class),
+            (r'//.*?\n', Comment.Single),
+            (r'\/\*(?!/)', Comment.Multiline, 'comment'),
+            (r'\b({})\b'.format('|'.join(keywords)), Keyword),
+            (r'({})'.format('|'.join(keyopts[::-1])), Operator.Word),
+            (rf'({infix_syms}|{prefix_syms})?{operators}', Operator),
+            (r'\b({})\b'.format('|'.join(word_operators)), Operator.Word),
+            (r'\b({})\b'.format('|'.join(primitives)), Keyword.Type),
+
+            (r"[^\W\d][\w']*", Name),
+
+            (r'-?\d[\d_]*(.[\d_]*)?([eE][+\-]?\d[\d_]*)', Number.Float),
+            (r'0[xX][\da-fA-F][\da-fA-F_]*', Number.Hex),
+            (r'0[oO][0-7][0-7_]*', Number.Oct),
+            (r'0[bB][01][01_]*', Number.Bin),
+            (r'\d[\d_]*', Number.Integer),
+
+            (r"'(?:(\\[\\\"'ntbr ])|(\\[0-9]{3})|(\\x[0-9a-fA-F]{2}))'",
+             String.Char),
+            (r"'.'", String.Char),
+            (r"'", Keyword),
+
+            (r'"', String.Double, 'string'),
+
+            (r'[~?][a-z][\w\']*:', Name.Variable),
+        ],
+        'comment': [
+            (r'[^/*]+', Comment.Multiline),
+            (r'\/\*', Comment.Multiline, '#push'),
+            (r'\*\/', Comment.Multiline, '#pop'),
+            (r'\*', Comment.Multiline),
+        ],
+        'string': [
+            (r'[^\\"]+', String.Double),
+            include('escape-sequence'),
+            (r'\\\n', String.Double),
+            (r'"', String.Double, '#pop'),
+        ],
+        'dotted': [
+            (r'\s+', Text),
+            (r'\.', Punctuation),
+            (r'[A-Z][\w\']*(?=\s*\.)', Name.Namespace),
+            (r'[A-Z][\w\']*', Name.Class, '#pop'),
+            (r'[a-z_][\w\']*', Name, '#pop'),
+            default('#pop'),
+        ],
+    }
+
+
+class FStarLexer(RegexLexer):
+    """
+    For the F* language.
+    """
+
+    name = 'FStar'
+    url = 'https://www.fstar-lang.org/'
+    aliases = ['fstar']
+    filenames = ['*.fst', '*.fsti']
+    mimetypes = ['text/x-fstar']
+    version_added = '2.7'
+
+    keywords = (
+        'abstract', 'attributes', 'noeq', 'unopteq', 'and'
+        'begin', 'by', 'default', 'effect', 'else', 'end', 'ensures',
+        'exception', 'exists', 'false', 'forall', 'fun', 'function', 'if',
+        'in', 'include', 'inline', 'inline_for_extraction', 'irreducible',
+        'logic', 'match', 'module', 'mutable', 'new', 'new_effect', 'noextract',
+        'of', 'open', 'opaque', 'private', 'range_of', 'reifiable',
+        'reify', 'reflectable', 'requires', 'set_range_of', 'sub_effect',
+        'synth', 'then', 'total', 'true', 'try', 'type', 'unfold', 'unfoldable',
+        'val', 'when', 'with', 'not'
+    )
+    decl_keywords = ('let', 'rec')
+    assume_keywords = ('assume', 'admit', 'assert', 'calc')
+    keyopts = (
+        r'~', r'-', r'/\\', r'\\/', r'<:', r'<@', r'\(\|', r'\|\)', r'#', r'u#',
+        r'&', r'\(', r'\)', r'\(\)', r',', r'~>', r'->', r'<-', r'<--', r'<==>',
+        r'==>', r'\.', r'\?', r'\?\.', r'\.\[', r'\.\(', r'\.\(\|', r'\.\[\|',
+        r'\{:pattern', r':', r'::', r':=', r';', r';;', r'=', r'%\[', r'!\{',
+        r'\[', r'\[@', r'\[\|', r'\|>', r'\]', r'\|\]', r'\{', r'\|', r'\}', r'\$'
+    )
+
+    operators = r'[!$%&*+\./:<=>?@^|~-]'
+    prefix_syms = r'[!?~]'
+    infix_syms = r'[=<>@^|&+\*/$%-]'
+    primitives = ('unit', 'int', 'float', 'bool', 'string', 'char', 'list', 'array')
+
+    tokens = {
+        'escape-sequence': [
+            (r'\\[\\"\'ntbr]', String.Escape),
+            (r'\\[0-9]{3}', String.Escape),
+            (r'\\x[0-9a-fA-F]{2}', String.Escape),
+        ],
+        'root': [
+            (r'\s+', Text),
+            (r'false|true|False|True|\(\)|\[\]', Name.Builtin.Pseudo),
+            (r'\b([A-Z][\w\']*)(?=\s*\.)', Name.Namespace, 'dotted'),
+            (r'\b([A-Z][\w\']*)', Name.Class),
+            (r'\(\*(?![)])', Comment, 'comment'),
+            (r'\/\/.+$', Comment),
+            (r'\b({})\b'.format('|'.join(keywords)), Keyword),
+            (r'\b({})\b'.format('|'.join(assume_keywords)), Name.Exception),
+            (r'\b({})\b'.format('|'.join(decl_keywords)), Keyword.Declaration),
+            (r'({})'.format('|'.join(keyopts[::-1])), Operator),
+            (rf'({infix_syms}|{prefix_syms})?{operators}', Operator),
+            (r'\b({})\b'.format('|'.join(primitives)), Keyword.Type),
+
+            (r"[^\W\d][\w']*", Name),
+
+            (r'-?\d[\d_]*(.[\d_]*)?([eE][+\-]?\d[\d_]*)', Number.Float),
+            (r'0[xX][\da-fA-F][\da-fA-F_]*', Number.Hex),
+            (r'0[oO][0-7][0-7_]*', Number.Oct),
+            (r'0[bB][01][01_]*', Number.Bin),
+            (r'\d[\d_]*', Number.Integer),
+
+            (r"'(?:(\\[\\\"'ntbr ])|(\\[0-9]{3})|(\\x[0-9a-fA-F]{2}))'",
+             String.Char),
+            (r"'.'", String.Char),
+            (r"'", Keyword),  # a stray quote is another syntax element
+            (r"\`([\w\'.]+)\`", Operator.Word),  # for infix applications
+            (r"\`", Keyword),  # for quoting
+            (r'"', String.Double, 'string'),
+
+            (r'[~?][a-z][\w\']*:', Name.Variable),
+        ],
+        'comment': [
+            (r'[^(*)]+', Comment),
+            (r'\(\*', Comment, '#push'),
+            (r'\*\)', Comment, '#pop'),
+            (r'[(*)]', Comment),
+        ],
+        'string': [
+            (r'[^\\"]+', String.Double),
+            include('escape-sequence'),
+            (r'\\\n', String.Double),
+            (r'"', String.Double, '#pop'),
+        ],
+        'dotted': [
+            (r'\s+', Text),
+            (r'\.', Punctuation),
+            (r'[A-Z][\w\']*(?=\s*\.)', Name.Namespace),
+            (r'[A-Z][\w\']*', Name.Class, '#pop'),
+            (r'[a-z_][\w\']*', Name, '#pop'),
+            default('#pop'),
+        ],
+    }
diff --git a/local_packages/pygments/lexers/modeling.py b/local_packages/pygments/lexers/modeling.py
new file mode 100644
index 0000000..4c22aeb
--- /dev/null
+++ b/local_packages/pygments/lexers/modeling.py
@@ -0,0 +1,368 @@
+"""
+    pygments.lexers.modeling
+    ~~~~~~~~~~~~~~~~~~~~~~~~
+
+    Lexers for modeling languages.
+
+    :copyright: Copyright 2006-present by the Pygments team, see AUTHORS.
+    :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import RegexLexer, include, bygroups, using, default
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+    Number, Punctuation, Whitespace
+
+from pygments.lexers.html import HtmlLexer
+from pygments.lexers import _stan_builtins
+
+__all__ = ['ModelicaLexer', 'BugsLexer', 'JagsLexer', 'StanLexer']
+
+
+class ModelicaLexer(RegexLexer):
+    """
+    For Modelica source code.
+    """
+    name = 'Modelica'
+    url = 'http://www.modelica.org/'
+    aliases = ['modelica']
+    filenames = ['*.mo']
+    mimetypes = ['text/x-modelica']
+    version_added = '1.1'
+
+    flags = re.DOTALL | re.MULTILINE
+
+    _name = r"(?:'(?:[^\\']|\\.)+'|[a-zA-Z_]\w*)"
+
+    tokens = {
+        'whitespace': [
+            (r'[\s\ufeff]+', Text),
+            (r'//[^\n]*\n?', Comment.Single),
+            (r'/\*.*?\*/', Comment.Multiline)
+        ],
+        'root': [
+            include('whitespace'),
+            (r'"', String.Double, 'string'),
+            (r'[()\[\]{},;]+', Punctuation),
+            (r'\.?[*^/+-]|\.|<>|[<>:=]=?', Operator),
+            (r'\d+(\.?\d*[eE][-+]?\d+|\.\d*)', Number.Float),
+            (r'\d+', Number.Integer),
+            (r'(abs|acos|actualStream|array|asin|assert|AssertionLevel|atan|'
+             r'atan2|backSample|Boolean|cardinality|cat|ceil|change|Clock|'
+             r'Connections|cos|cosh|cross|delay|diagonal|div|edge|exp|'
+             r'ExternalObject|fill|floor|getInstanceName|hold|homotopy|'
+             r'identity|inStream|integer|Integer|interval|inverse|isPresent|'
+             r'linspace|log|log10|matrix|max|min|mod|ndims|noClock|noEvent|'
+             r'ones|outerProduct|pre|previous|product|Real|reinit|rem|rooted|'
+             r'sample|scalar|semiLinear|shiftSample|sign|sin|sinh|size|skew|'
+             r'smooth|spatialDistribution|sqrt|StateSelect|String|subSample|'
+             r'sum|superSample|symmetric|tan|tanh|terminal|terminate|time|'
+             r'transpose|vector|zeros)\b', Name.Builtin),
+            (r'(algorithm|annotation|break|connect|constant|constrainedby|der|'
+             r'discrete|each|else|elseif|elsewhen|encapsulated|enumeration|'
+             r'equation|exit|expandable|extends|external|firstTick|final|flow|for|if|'
+             r'import|impure|in|initial|inner|input|interval|loop|nondiscrete|outer|'
+             r'output|parameter|partial|protected|public|pure|redeclare|'
+             r'replaceable|return|stream|then|when|while)\b',
+             Keyword.Reserved),
+            (r'(and|not|or)\b', Operator.Word),
+            (r'(block|class|connector|end|function|model|operator|package|'
+             r'record|type)\b', Keyword.Reserved, 'class'),
+            (r'(false|true)\b', Keyword.Constant),
+            (r'within\b', Keyword.Reserved, 'package-prefix'),
+            (_name, Name)
+        ],
+        'class': [
+            include('whitespace'),
+            (r'(function|record)\b', Keyword.Reserved),
+            (r'(if|for|when|while)\b', Keyword.Reserved, '#pop'),
+            (_name, Name.Class, '#pop'),
+            default('#pop')
+        ],
+        'package-prefix': [
+            include('whitespace'),
+            (_name, Name.Namespace, '#pop'),
+            default('#pop')
+        ],
+        'string': [
+            (r'"', String.Double, '#pop'),
+            (r'\\[\'"?\\abfnrtv]', String.Escape),
+            (r'(?i)<\s*html\s*>([^\\"]|\\.)+?(<\s*/\s*html\s*>|(?="))',
+             using(HtmlLexer)),
+            (r'<|\\?[^"\\<]+', String.Double)
+        ]
+    }
+
+
+class BugsLexer(RegexLexer):
+    """
+    Pygments Lexer for OpenBugs and WinBugs
+    models.
+    """
+
+    name = 'BUGS'
+    aliases = ['bugs', 'winbugs', 'openbugs']
+    filenames = ['*.bug']
+    url = 'https://www.mrc-bsu.cam.ac.uk/software/bugs/openbugs'
+    version_added = '1.6'
+
+    _FUNCTIONS = (
+        # Scalar functions
+        'abs', 'arccos', 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctanh',
+        'cloglog', 'cos', 'cosh', 'cumulative', 'cut', 'density', 'deviance',
+        'equals', 'expr', 'gammap', 'ilogit', 'icloglog', 'integral', 'log',
+        'logfact', 'loggam', 'logit', 'max', 'min', 'phi', 'post.p.value',
+        'pow', 'prior.p.value', 'probit', 'replicate.post', 'replicate.prior',
+        'round', 'sin', 'sinh', 'solution', 'sqrt', 'step', 'tan', 'tanh',
+        'trunc',
+        # Vector functions
+        'inprod', 'interp.lin', 'inverse', 'logdet', 'mean', 'eigen.vals',
+        'ode', 'prod', 'p.valueM', 'rank', 'ranked', 'replicate.postM',
+        'sd', 'sort', 'sum',
+        # Special
+        'D', 'I', 'F', 'T', 'C')
+    """ OpenBUGS built-in functions
+
+    From http://www.openbugs.info/Manuals/ModelSpecification.html#ContentsAII
+
+    This also includes
+
+    - T, C, I : Truncation and censoring.
+      ``T`` and ``C`` are in OpenBUGS. ``I`` in WinBUGS.
+    - D : ODE
+    - F : Functional http://www.openbugs.info/Examples/Functionals.html
+
+    """
+
+    _DISTRIBUTIONS = ('dbern', 'dbin', 'dcat', 'dnegbin', 'dpois',
+                      'dhyper', 'dbeta', 'dchisqr', 'ddexp', 'dexp',
+                      'dflat', 'dgamma', 'dgev', 'df', 'dggamma', 'dgpar',
+                      'dloglik', 'dlnorm', 'dlogis', 'dnorm', 'dpar',
+                      'dt', 'dunif', 'dweib', 'dmulti', 'ddirch', 'dmnorm',
+                      'dmt', 'dwish')
+    """ OpenBUGS built-in distributions
+
+    Functions from
+    http://www.openbugs.info/Manuals/ModelSpecification.html#ContentsAI
+    """
+
+    tokens = {
+        'whitespace': [
+            (r"\s+", Text),
+        ],
+        'comments': [
+            # Comments
+            (r'#.*$', Comment.Single),
+        ],
+        'root': [
+            # Comments
+            include('comments'),
+            include('whitespace'),
+            # Block start
+            (r'(model)(\s+)(\{)',
+             bygroups(Keyword.Namespace, Text, Punctuation)),
+            # Reserved Words
+            (r'(for|in)(?![\w.])', Keyword.Reserved),
+            # Built-in Functions
+            (r'({})(?=\s*\()'.format(r'|'.join(_FUNCTIONS + _DISTRIBUTIONS)),
+             Name.Builtin),
+            # Regular variable names
+            (r'[A-Za-z][\w.]*', Name),
+            # Number Literals
+            (r'[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?', Number),
+            # Punctuation
+            (r'\[|\]|\(|\)|:|,|;', Punctuation),
+            # Assignment operators
+            # SLexer makes these tokens Operators.
+            (r'<-|~', Operator),
+            # Infix and prefix operators
+            (r'\+|-|\*|/', Operator),
+            # Block
+            (r'[{}]', Punctuation),
+        ]
+    }
+
+    def analyse_text(text):
+        if re.search(r"^\s*model\s*{", text, re.M):
+            return 0.7
+        else:
+            return 0.0
+
+
+class JagsLexer(RegexLexer):
+    """
+    Pygments Lexer for JAGS.
+    """
+
+    name = 'JAGS'
+    aliases = ['jags']
+    filenames = ['*.jag', '*.bug']
+    url = 'https://mcmc-jags.sourceforge.io'
+    version_added = '1.6'
+
+    # JAGS
+    _FUNCTIONS = (
+        'abs', 'arccos', 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctanh',
+        'cos', 'cosh', 'cloglog',
+        'equals', 'exp', 'icloglog', 'ifelse', 'ilogit', 'log', 'logfact',
+        'loggam', 'logit', 'phi', 'pow', 'probit', 'round', 'sin', 'sinh',
+        'sqrt', 'step', 'tan', 'tanh', 'trunc', 'inprod', 'interp.lin',
+        'logdet', 'max', 'mean', 'min', 'prod', 'sum', 'sd', 'inverse',
+        'rank', 'sort', 't', 'acos', 'acosh', 'asin', 'asinh', 'atan',
+        # Truncation/Censoring (should I include)
+        'T', 'I')
+    # Distributions with density, probability and quartile functions
+    _DISTRIBUTIONS = tuple(f'[dpq]{x}' for x in
+                           ('bern', 'beta', 'dchiqsqr', 'ddexp', 'dexp',
+                            'df', 'gamma', 'gen.gamma', 'logis', 'lnorm',
+                            'negbin', 'nchisqr', 'norm', 'par', 'pois', 'weib'))
+    # Other distributions without density and probability
+    _OTHER_DISTRIBUTIONS = (
+        'dt', 'dunif', 'dbetabin', 'dbern', 'dbin', 'dcat', 'dhyper',
+        'ddirch', 'dmnorm', 'dwish', 'dmt', 'dmulti', 'dbinom', 'dchisq',
+        'dnbinom', 'dweibull', 'ddirich')
+
+    tokens = {
+        'whitespace': [
+            (r"\s+", Text),
+        ],
+        'names': [
+            # Regular variable names
+            (r'[a-zA-Z][\w.]*\b', Name),
+        ],
+        'comments': [
+            # do not use stateful comments
+            (r'(?s)/\*.*?\*/', Comment.Multiline),
+            # Comments
+            (r'#.*$', Comment.Single),
+        ],
+        'root': [
+            # Comments
+            include('comments'),
+            include('whitespace'),
+            # Block start
+            (r'(model|data)(\s+)(\{)',
+             bygroups(Keyword.Namespace, Text, Punctuation)),
+            (r'var(?![\w.])', Keyword.Declaration),
+            # Reserved Words
+            (r'(for|in)(?![\w.])', Keyword.Reserved),
+            # Builtins
+            # Need to use lookahead because . is a valid char
+            (r'({})(?=\s*\()'.format(r'|'.join(_FUNCTIONS
+                                          + _DISTRIBUTIONS
+                                          + _OTHER_DISTRIBUTIONS)),
+             Name.Builtin),
+            # Names
+            include('names'),
+            # Number Literals
+            (r'[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?', Number),
+            (r'\[|\]|\(|\)|:|,|;', Punctuation),
+            # Assignment operators
+            (r'<-|~', Operator),
+            # # JAGS includes many more than OpenBUGS
+            (r'\+|-|\*|\/|\|\|[&]{2}|[<>=]=?|\^|%.*?%', Operator),
+            (r'[{}]', Punctuation),
+        ]
+    }
+
+    def analyse_text(text):
+        if re.search(r'^\s*model\s*\{', text, re.M):
+            if re.search(r'^\s*data\s*\{', text, re.M):
+                return 0.9
+            elif re.search(r'^\s*var', text, re.M):
+                return 0.9
+            else:
+                return 0.3
+        else:
+            return 0
+
+
+class StanLexer(RegexLexer):
+    """Pygments Lexer for Stan models.
+
+    The Stan modeling language is specified in the *Stan Modeling Language
+    User's Guide and Reference Manual, v2.17.0*,
+    `pdf `__.
+    """
+
+    name = 'Stan'
+    aliases = ['stan']
+    filenames = ['*.stan']
+    url = 'https://mc-stan.org'
+    version_added = '1.6'
+
+    tokens = {
+        'whitespace': [
+            (r"\s+", Text),
+        ],
+        'comments': [
+            (r'(?s)/\*.*?\*/', Comment.Multiline),
+            # Comments
+            (r'(//|#).*$', Comment.Single),
+        ],
+        'root': [
+            (r'"[^"]*"', String),
+            # Comments
+            include('comments'),
+            # block start
+            include('whitespace'),
+            # Block start
+            (r'({})(\s*)(\{{)'.format(r'|'.join(('functions', 'data', r'transformed\s+?data',
+                        'parameters', r'transformed\s+parameters',
+                        'model', r'generated\s+quantities'))),
+             bygroups(Keyword.Namespace, Text, Punctuation)),
+            # target keyword
+            (r'target\s*\+=', Keyword),
+            # jacobian += statement
+            (r'jacobian\s*\+=', Keyword),
+            # Reserved Words
+            (r'({})\b'.format(r'|'.join(_stan_builtins.KEYWORDS)), Keyword),
+            # Truncation
+            (r'T(?=\s*\[)', Keyword),
+            # Data types
+            (r'({})\b'.format(r'|'.join(_stan_builtins.TYPES)), Keyword.Type),
+             # < should be punctuation, but elsewhere I can't tell if it is in
+             # a range constraint
+            (r'(<)(\s*)(upper|lower|offset|multiplier)(\s*)(=)',
+             bygroups(Operator, Whitespace, Keyword, Whitespace, Punctuation)),
+            (r'(,)(\s*)(upper)(\s*)(=)',
+             bygroups(Punctuation, Whitespace, Keyword, Whitespace, Punctuation)),
+            # Punctuation
+            (r"[;,\[\]()]", Punctuation),
+            # Builtin
+            (r'({})(?=\s*\()'.format('|'.join(_stan_builtins.FUNCTIONS)), Name.Builtin),
+            (r'(~)(\s*)({})(?=\s*\()'.format('|'.join(_stan_builtins.DISTRIBUTIONS)),
+                bygroups(Operator, Whitespace, Name.Builtin)),
+            # Special names ending in __, like lp__
+            (r'[A-Za-z]\w*__\b', Name.Builtin.Pseudo),
+            (r'({})\b'.format(r'|'.join(_stan_builtins.RESERVED)), Keyword.Reserved),
+            # user-defined functions
+            (r'[A-Za-z]\w*(?=\s*\()]', Name.Function),
+            # Imaginary Literals
+            (r'[0-9]+(\.[0-9]*)?([eE][+-]?[0-9]+)?i', Number.Float),
+            (r'\.[0-9]+([eE][+-]?[0-9]+)?i', Number.Float),
+            (r'[0-9]+i', Number.Float),
+            # Real Literals
+            (r'[0-9]+(\.[0-9]*)?([eE][+-]?[0-9]+)?', Number.Float),
+            (r'\.[0-9]+([eE][+-]?[0-9]+)?', Number.Float),
+            # Integer Literals
+            (r'[0-9]+', Number.Integer),
+            # Regular variable names
+            (r'[A-Za-z]\w*\b', Name),
+            # Assignment operators
+            (r'<-|(?:\+|-|\.?/|\.?\*|=)?=|~', Operator),
+            # Infix, prefix and postfix operators (and = )
+            (r"\+|-|\.?\*|\.?/|\\|'|\.?\^|!=?|<=?|>=?|\|\||&&|%|\?|:|%/%|!", Operator),
+            # Block delimiters
+            (r'[{}]', Punctuation),
+            # Distribution |
+            (r'\|', Punctuation)
+        ]
+    }
+
+    def analyse_text(text):
+        if re.search(r'^\s*parameters\s*\{', text, re.M):
+            return 1.0
+        else:
+            return 0.0
diff --git a/local_packages/pygments/lexers/modula2.py b/local_packages/pygments/lexers/modula2.py
new file mode 100644
index 0000000..c0ebd5c
--- /dev/null
+++ b/local_packages/pygments/lexers/modula2.py
@@ -0,0 +1,1579 @@
+"""
+    pygments.lexers.modula2
+    ~~~~~~~~~~~~~~~~~~~~~~~
+
+    Multi-Dialect Lexer for Modula-2.
+
+    :copyright: Copyright 2006-present by the Pygments team, see AUTHORS.
+    :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import RegexLexer, include
+from pygments.util import get_bool_opt, get_list_opt
+from pygments.token import Text, Comment, Operator, Keyword, Name, \
+    String, Number, Punctuation, Error
+
+__all__ = ['Modula2Lexer']
+
+
+# Multi-Dialect Modula-2 Lexer
+class Modula2Lexer(RegexLexer):
+    """
+    For Modula-2 source code.
+
+    The Modula-2 lexer supports several dialects.  By default, it operates in
+    fallback mode, recognising the *combined* literals, punctuation symbols
+    and operators of all supported dialects, and the *combined* reserved words
+    and builtins of PIM Modula-2, ISO Modula-2 and Modula-2 R10, while not
+    differentiating between library defined identifiers.
+
+    To select a specific dialect, a dialect option may be passed
+    or a dialect tag may be embedded into a source file.
+
+    Dialect Options:
+
+    `m2pim`
+        Select PIM Modula-2 dialect.
+    `m2iso`
+        Select ISO Modula-2 dialect.
+    `m2r10`
+        Select Modula-2 R10 dialect.
+    `objm2`
+        Select Objective Modula-2 dialect.
+
+    The PIM and ISO dialect options may be qualified with a language extension.
+
+    Language Extensions:
+
+    `+aglet`
+        Select Aglet Modula-2 extensions, available with m2iso.
+    `+gm2`
+        Select GNU Modula-2 extensions, available with m2pim.
+    `+p1`
+        Select p1 Modula-2 extensions, available with m2iso.
+    `+xds`
+        Select XDS Modula-2 extensions, available with m2iso.
+
+
+    Passing a Dialect Option via Unix Commandline Interface
+
+    Dialect options may be passed to the lexer using the `dialect` key.
+    Only one such option should be passed. If multiple dialect options are
+    passed, the first valid option is used, any subsequent options are ignored.
+
+    Examples:
+
+    `$ pygmentize -O full,dialect=m2iso -f html -o /path/to/output /path/to/input`
+        Use ISO dialect to render input to HTML output
+    `$ pygmentize -O full,dialect=m2iso+p1 -f rtf -o /path/to/output /path/to/input`
+        Use ISO dialect with p1 extensions to render input to RTF output
+
+
+    Embedding a Dialect Option within a source file
+
+    A dialect option may be embedded in a source file in form of a dialect
+    tag, a specially formatted comment that specifies a dialect option.
+
+    Dialect Tag EBNF::
+
+       dialectTag :
+           OpeningCommentDelim Prefix dialectOption ClosingCommentDelim ;
+
+       dialectOption :
+           'm2pim' | 'm2iso' | 'm2r10' | 'objm2' |
+           'm2iso+aglet' | 'm2pim+gm2' | 'm2iso+p1' | 'm2iso+xds' ;
+
+       Prefix : '!' ;
+
+       OpeningCommentDelim : '(*' ;
+
+       ClosingCommentDelim : '*)' ;
+
+    No whitespace is permitted between the tokens of a dialect tag.
+
+    In the event that a source file contains multiple dialect tags, the first
+    tag that contains a valid dialect option will be used and any subsequent
+    dialect tags will be ignored.  Ideally, a dialect tag should be placed
+    at the beginning of a source file.
+
+    An embedded dialect tag overrides a dialect option set via command line.
+
+    Examples:
+
+    ``(*!m2r10*) DEFINITION MODULE Foobar; ...``
+        Use Modula2 R10 dialect to render this source file.
+    ``(*!m2pim+gm2*) DEFINITION MODULE Bazbam; ...``
+        Use PIM dialect with GNU extensions to render this source file.
+
+
+    Algol Publication Mode:
+
+    In Algol publication mode, source text is rendered for publication of
+    algorithms in scientific papers and academic texts, following the format
+    of the Revised Algol-60 Language Report.  It is activated by passing
+    one of two corresponding styles as an option:
+
+    `algol`
+        render reserved words lowercase underline boldface
+        and builtins lowercase boldface italic
+    `algol_nu`
+        render reserved words lowercase boldface (no underlining)
+        and builtins lowercase boldface italic
+
+    The lexer automatically performs the required lowercase conversion when
+    this mode is activated.
+
+    Example:
+
+    ``$ pygmentize -O full,style=algol -f latex -o /path/to/output /path/to/input``
+        Render input file in Algol publication mode to LaTeX output.
+
+
+    Rendering Mode of First Class ADT Identifiers:
+
+    The rendering of standard library first class ADT identifiers is controlled
+    by option flag "treat_stdlib_adts_as_builtins".
+
+    When this option is turned on, standard library ADT identifiers are rendered
+    as builtins.  When it is turned off, they are rendered as ordinary library
+    identifiers.
+
+    `treat_stdlib_adts_as_builtins` (default: On)
+
+    The option is useful for dialects that support ADTs as first class objects
+    and provide ADTs in the standard library that would otherwise be built-in.
+
+    At present, only Modula-2 R10 supports library ADTs as first class objects
+    and therefore, no ADT identifiers are defined for any other dialects.
+
+    Example:
+
+    ``$ pygmentize -O full,dialect=m2r10,treat_stdlib_adts_as_builtins=Off ...``
+        Render standard library ADTs as ordinary library types.
+
+    .. versionchanged:: 2.1
+       Added multi-dialect support.
+    """
+    name = 'Modula-2'
+    url = 'http://www.modula2.org/'
+    aliases = ['modula2', 'm2']
+    filenames = ['*.def', '*.mod']
+    mimetypes = ['text/x-modula2']
+    version_added = '1.3'
+
+    flags = re.MULTILINE | re.DOTALL
+
+    tokens = {
+        'whitespace': [
+            (r'\n+', Text),  # blank lines
+            (r'\s+', Text),  # whitespace
+        ],
+        'dialecttags': [
+            # PIM Dialect Tag
+            (r'\(\*!m2pim\*\)', Comment.Special),
+            # ISO Dialect Tag
+            (r'\(\*!m2iso\*\)', Comment.Special),
+            # M2R10 Dialect Tag
+            (r'\(\*!m2r10\*\)', Comment.Special),
+            # ObjM2 Dialect Tag
+            (r'\(\*!objm2\*\)', Comment.Special),
+            # Aglet Extensions Dialect Tag
+            (r'\(\*!m2iso\+aglet\*\)', Comment.Special),
+            # GNU Extensions Dialect Tag
+            (r'\(\*!m2pim\+gm2\*\)', Comment.Special),
+            # p1 Extensions Dialect Tag
+            (r'\(\*!m2iso\+p1\*\)', Comment.Special),
+            # XDS Extensions Dialect Tag
+            (r'\(\*!m2iso\+xds\*\)', Comment.Special),
+        ],
+        'identifiers': [
+            (r'([a-zA-Z_$][\w$]*)', Name),
+        ],
+        'prefixed_number_literals': [
+            #
+            # Base-2, whole number
+            (r'0b[01]+(\'[01]+)*', Number.Bin),
+            #
+            # Base-16, whole number
+            (r'0[ux][0-9A-F]+(\'[0-9A-F]+)*', Number.Hex),
+        ],
+        'plain_number_literals': [
+            #
+            # Base-10, real number with exponent
+            (r'[0-9]+(\'[0-9]+)*'  # integral part
+             r'\.[0-9]+(\'[0-9]+)*'  # fractional part
+             r'[eE][+-]?[0-9]+(\'[0-9]+)*',  # exponent
+             Number.Float),
+            #
+            # Base-10, real number without exponent
+            (r'[0-9]+(\'[0-9]+)*'  # integral part
+             r'\.[0-9]+(\'[0-9]+)*',  # fractional part
+             Number.Float),
+            #
+            # Base-10, whole number
+            (r'[0-9]+(\'[0-9]+)*', Number.Integer),
+        ],
+        'suffixed_number_literals': [
+            #
+            # Base-8, whole number
+            (r'[0-7]+B', Number.Oct),
+            #
+            # Base-8, character code
+            (r'[0-7]+C', Number.Oct),
+            #
+            # Base-16, number
+            (r'[0-9A-F]+H', Number.Hex),
+        ],
+        'string_literals': [
+            (r'"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
+            (r"'(\\\\|\\[^\\]|[^'\\])*'", String.Single),
+        ],
+        'digraph_operators': [
+            # Dot Product Operator
+            (r'\*\.', Operator),
+            # Array Concatenation Operator
+            (r'\+>', Operator),  # M2R10 + ObjM2
+            # Inequality Operator
+            (r'<>', Operator),  # ISO + PIM
+            # Less-Or-Equal, Subset
+            (r'<=', Operator),
+            # Greater-Or-Equal, Superset
+            (r'>=', Operator),
+            # Identity Operator
+            (r'==', Operator),  # M2R10 + ObjM2
+            # Type Conversion Operator
+            (r'::', Operator),  # M2R10 + ObjM2
+            # Assignment Symbol
+            (r':=', Operator),
+            # Postfix Increment Mutator
+            (r'\+\+', Operator),  # M2R10 + ObjM2
+            # Postfix Decrement Mutator
+            (r'--', Operator),  # M2R10 + ObjM2
+        ],
+        'unigraph_operators': [
+            # Arithmetic Operators
+            (r'[+-]', Operator),
+            (r'[*/]', Operator),
+            # ISO 80000-2 compliant Set Difference Operator
+            (r'\\', Operator),  # M2R10 + ObjM2
+            # Relational Operators
+            (r'[=#<>]', Operator),
+            # Dereferencing Operator
+            (r'\^', Operator),
+            # Dereferencing Operator Synonym
+            (r'@', Operator),  # ISO
+            # Logical AND Operator Synonym
+            (r'&', Operator),  # PIM + ISO
+            # Logical NOT Operator Synonym
+            (r'~', Operator),  # PIM + ISO
+            # Smalltalk Message Prefix
+            (r'`', Operator),  # ObjM2
+        ],
+        'digraph_punctuation': [
+            # Range Constructor
+            (r'\.\.', Punctuation),
+            # Opening Chevron Bracket
+            (r'<<', Punctuation),  # M2R10 + ISO
+            # Closing Chevron Bracket
+            (r'>>', Punctuation),  # M2R10 + ISO
+            # Blueprint Punctuation
+            (r'->', Punctuation),  # M2R10 + ISO
+            # Distinguish |# and # in M2 R10
+            (r'\|#', Punctuation),
+            # Distinguish ## and # in M2 R10
+            (r'##', Punctuation),
+            # Distinguish |* and * in M2 R10
+            (r'\|\*', Punctuation),
+        ],
+        'unigraph_punctuation': [
+            # Common Punctuation
+            (r'[()\[\]{},.:;|]', Punctuation),
+            # Case Label Separator Synonym
+            (r'!', Punctuation),  # ISO
+            # Blueprint Punctuation
+            (r'\?', Punctuation),  # M2R10 + ObjM2
+        ],
+        'comments': [
+            # Single Line Comment
+            (r'^//.*?\n', Comment.Single),  # M2R10 + ObjM2
+            # Block Comment
+            (r'\(\*([^$].*?)\*\)', Comment.Multiline),
+            # Template Block Comment
+            (r'/\*(.*?)\*/', Comment.Multiline),  # M2R10 + ObjM2
+        ],
+        'pragmas': [
+            # ISO Style Pragmas
+            (r'<\*.*?\*>', Comment.Preproc),  # ISO, M2R10 + ObjM2
+            # Pascal Style Pragmas
+            (r'\(\*\$.*?\*\)', Comment.Preproc),  # PIM
+        ],
+        'root': [
+            include('whitespace'),
+            include('dialecttags'),
+            include('pragmas'),
+            include('comments'),
+            include('identifiers'),
+            include('suffixed_number_literals'),  # PIM + ISO
+            include('prefixed_number_literals'),  # M2R10 + ObjM2
+            include('plain_number_literals'),
+            include('string_literals'),
+            include('digraph_punctuation'),
+            include('digraph_operators'),
+            include('unigraph_punctuation'),
+            include('unigraph_operators'),
+        ]
+    }
+
+#  C o m m o n   D a t a s e t s
+
+    # Common Reserved Words Dataset
+    common_reserved_words = (
+        # 37 common reserved words
+        'AND', 'ARRAY', 'BEGIN', 'BY', 'CASE', 'CONST', 'DEFINITION', 'DIV',
+        'DO', 'ELSE', 'ELSIF', 'END', 'EXIT', 'FOR', 'FROM', 'IF',
+        'IMPLEMENTATION', 'IMPORT', 'IN', 'LOOP', 'MOD', 'MODULE', 'NOT',
+        'OF', 'OR', 'POINTER', 'PROCEDURE', 'RECORD', 'REPEAT', 'RETURN',
+        'SET', 'THEN', 'TO', 'TYPE', 'UNTIL', 'VAR', 'WHILE',
+    )
+
+    # Common Builtins Dataset
+    common_builtins = (
+        # 16 common builtins
+        'ABS', 'BOOLEAN', 'CARDINAL', 'CHAR', 'CHR', 'FALSE', 'INTEGER',
+        'LONGINT', 'LONGREAL', 'MAX', 'MIN', 'NIL', 'ODD', 'ORD', 'REAL',
+        'TRUE',
+    )
+
+    # Common Pseudo-Module Builtins Dataset
+    common_pseudo_builtins = (
+        # 4 common pseudo builtins
+        'ADDRESS', 'BYTE', 'WORD', 'ADR'
+    )
+
+#  P I M   M o d u l a - 2   D a t a s e t s
+
+    # Lexemes to Mark as Error Tokens for PIM Modula-2
+    pim_lexemes_to_reject = (
+        '!', '`', '@', '$', '%', '?', '\\', '==', '++', '--', '::', '*.',
+        '+>', '->', '<<', '>>', '|#', '##',
+    )
+
+    # PIM Modula-2 Additional Reserved Words Dataset
+    pim_additional_reserved_words = (
+        # 3 additional reserved words
+        'EXPORT', 'QUALIFIED', 'WITH',
+    )
+
+    # PIM Modula-2 Additional Builtins Dataset
+    pim_additional_builtins = (
+        # 16 additional builtins
+        'BITSET', 'CAP', 'DEC', 'DISPOSE', 'EXCL', 'FLOAT', 'HALT', 'HIGH',
+        'INC', 'INCL', 'NEW', 'NIL', 'PROC', 'SIZE', 'TRUNC', 'VAL',
+    )
+
+    # PIM Modula-2 Additional Pseudo-Module Builtins Dataset
+    pim_additional_pseudo_builtins = (
+        # 5 additional pseudo builtins
+        'SYSTEM', 'PROCESS', 'TSIZE', 'NEWPROCESS', 'TRANSFER',
+    )
+
+#  I S O   M o d u l a - 2   D a t a s e t s
+
+    # Lexemes to Mark as Error Tokens for ISO Modula-2
+    iso_lexemes_to_reject = (
+        '`', '$', '%', '?', '\\', '==', '++', '--', '::', '*.', '+>', '->',
+        '<<', '>>', '|#', '##',
+    )
+
+    # ISO Modula-2 Additional Reserved Words Dataset
+    iso_additional_reserved_words = (
+        # 9 additional reserved words (ISO 10514-1)
+        'EXCEPT', 'EXPORT', 'FINALLY', 'FORWARD', 'PACKEDSET', 'QUALIFIED',
+        'REM', 'RETRY', 'WITH',
+        # 10 additional reserved words (ISO 10514-2 & ISO 10514-3)
+        'ABSTRACT', 'AS', 'CLASS', 'GUARD', 'INHERIT', 'OVERRIDE', 'READONLY',
+        'REVEAL', 'TRACED', 'UNSAFEGUARDED',
+    )
+
+    # ISO Modula-2 Additional Builtins Dataset
+    iso_additional_builtins = (
+        # 26 additional builtins (ISO 10514-1)
+        'BITSET', 'CAP', 'CMPLX', 'COMPLEX', 'DEC', 'DISPOSE', 'EXCL', 'FLOAT',
+        'HALT', 'HIGH', 'IM', 'INC', 'INCL', 'INT', 'INTERRUPTIBLE',  'LENGTH',
+        'LFLOAT', 'LONGCOMPLEX', 'NEW', 'PROC', 'PROTECTION', 'RE', 'SIZE',
+        'TRUNC', 'UNINTERRUBTIBLE', 'VAL',
+        # 5 additional builtins (ISO 10514-2 & ISO 10514-3)
+        'CREATE', 'DESTROY', 'EMPTY', 'ISMEMBER', 'SELF',
+    )
+
+    # ISO Modula-2 Additional Pseudo-Module Builtins Dataset
+    iso_additional_pseudo_builtins = (
+        # 14 additional builtins (SYSTEM)
+        'SYSTEM', 'BITSPERLOC', 'LOCSPERBYTE', 'LOCSPERWORD', 'LOC',
+        'ADDADR', 'SUBADR', 'DIFADR', 'MAKEADR', 'ADR',
+        'ROTATE', 'SHIFT', 'CAST', 'TSIZE',
+        # 13 additional builtins (COROUTINES)
+        'COROUTINES', 'ATTACH', 'COROUTINE', 'CURRENT', 'DETACH', 'HANDLER',
+        'INTERRUPTSOURCE', 'IOTRANSFER', 'IsATTACHED', 'LISTEN',
+        'NEWCOROUTINE', 'PROT', 'TRANSFER',
+        # 9 additional builtins (EXCEPTIONS)
+        'EXCEPTIONS', 'AllocateSource', 'CurrentNumber', 'ExceptionNumber',
+        'ExceptionSource', 'GetMessage', 'IsCurrentSource',
+        'IsExceptionalExecution', 'RAISE',
+        # 3 additional builtins (TERMINATION)
+        'TERMINATION', 'IsTerminating', 'HasHalted',
+        # 4 additional builtins (M2EXCEPTION)
+        'M2EXCEPTION', 'M2Exceptions', 'M2Exception', 'IsM2Exception',
+        'indexException', 'rangeException', 'caseSelectException',
+        'invalidLocation', 'functionException', 'wholeValueException',
+        'wholeDivException', 'realValueException', 'realDivException',
+        'complexValueException', 'complexDivException', 'protException',
+        'sysException', 'coException', 'exException',
+    )
+
+#  M o d u l a - 2   R 1 0   D a t a s e t s
+
+    # Lexemes to Mark as Error Tokens for Modula-2 R10
+    m2r10_lexemes_to_reject = (
+        '!', '`', '@', '$', '%', '&', '<>',
+    )
+
+    # Modula-2 R10 reserved words in addition to the common set
+    m2r10_additional_reserved_words = (
+        # 12 additional reserved words
+        'ALIAS', 'ARGLIST', 'BLUEPRINT', 'COPY', 'GENLIB', 'INDETERMINATE',
+        'NEW', 'NONE', 'OPAQUE', 'REFERENTIAL', 'RELEASE', 'RETAIN',
+        # 2 additional reserved words with symbolic assembly option
+        'ASM', 'REG',
+    )
+
+    # Modula-2 R10 builtins in addition to the common set
+    m2r10_additional_builtins = (
+        # 26 additional builtins
+        'CARDINAL', 'COUNT', 'EMPTY', 'EXISTS', 'INSERT', 'LENGTH', 'LONGCARD',
+        'OCTET', 'PTR', 'PRED', 'READ', 'READNEW', 'REMOVE', 'RETRIEVE', 'SORT',
+        'STORE', 'SUBSET', 'SUCC', 'TLIMIT', 'TMAX', 'TMIN', 'TRUE', 'TSIZE',
+        'UNICHAR', 'WRITE', 'WRITEF',
+    )
+
+    # Modula-2 R10 Additional Pseudo-Module Builtins Dataset
+    m2r10_additional_pseudo_builtins = (
+        # 13 additional builtins (TPROPERTIES)
+        'TPROPERTIES', 'PROPERTY', 'LITERAL', 'TPROPERTY', 'TLITERAL',
+        'TBUILTIN', 'TDYN', 'TREFC', 'TNIL', 'TBASE', 'TPRECISION',
+        'TMAXEXP', 'TMINEXP',
+        # 4 additional builtins (CONVERSION)
+        'CONVERSION', 'TSXFSIZE', 'SXF', 'VAL',
+        # 35 additional builtins (UNSAFE)
+        'UNSAFE', 'CAST', 'INTRINSIC', 'AVAIL', 'ADD', 'SUB', 'ADDC', 'SUBC',
+        'FETCHADD', 'FETCHSUB', 'SHL', 'SHR', 'ASHR', 'ROTL', 'ROTR', 'ROTLC',
+        'ROTRC', 'BWNOT', 'BWAND', 'BWOR', 'BWXOR', 'BWNAND', 'BWNOR',
+        'SETBIT', 'TESTBIT', 'LSBIT', 'MSBIT', 'CSBITS', 'BAIL', 'HALT',
+        'TODO', 'FFI', 'ADDR', 'VARGLIST', 'VARGC',
+        # 11 additional builtins (ATOMIC)
+        'ATOMIC', 'INTRINSIC', 'AVAIL', 'SWAP', 'CAS', 'INC', 'DEC', 'BWAND',
+        'BWNAND', 'BWOR', 'BWXOR',
+        # 7 additional builtins (COMPILER)
+        'COMPILER', 'DEBUG', 'MODNAME', 'PROCNAME', 'LINENUM', 'DEFAULT',
+        'HASH',
+        # 5 additional builtins (ASSEMBLER)
+        'ASSEMBLER', 'REGISTER', 'SETREG', 'GETREG', 'CODE',
+    )
+
+#  O b j e c t i v e   M o d u l a - 2   D a t a s e t s
+
+    # Lexemes to Mark as Error Tokens for Objective Modula-2
+    objm2_lexemes_to_reject = (
+        '!', '$', '%', '&', '<>',
+    )
+
+    # Objective Modula-2 Extensions
+    # reserved words in addition to Modula-2 R10
+    objm2_additional_reserved_words = (
+        # 16 additional reserved words
+        'BYCOPY', 'BYREF', 'CLASS', 'CONTINUE', 'CRITICAL', 'INOUT', 'METHOD',
+        'ON', 'OPTIONAL', 'OUT', 'PRIVATE', 'PROTECTED', 'PROTOCOL', 'PUBLIC',
+        'SUPER', 'TRY',
+    )
+
+    # Objective Modula-2 Extensions
+    # builtins in addition to Modula-2 R10
+    objm2_additional_builtins = (
+        # 3 additional builtins
+        'OBJECT', 'NO', 'YES',
+    )
+
+    # Objective Modula-2 Extensions
+    # pseudo-module builtins in addition to Modula-2 R10
+    objm2_additional_pseudo_builtins = (
+        # None
+    )
+
+#  A g l e t   M o d u l a - 2   D a t a s e t s
+
+    # Aglet Extensions
+    # reserved words in addition to ISO Modula-2
+    aglet_additional_reserved_words = (
+        # None
+    )
+
+    # Aglet Extensions
+    # builtins in addition to ISO Modula-2
+    aglet_additional_builtins = (
+        # 9 additional builtins
+        'BITSET8', 'BITSET16', 'BITSET32', 'CARDINAL8', 'CARDINAL16',
+        'CARDINAL32', 'INTEGER8', 'INTEGER16', 'INTEGER32',
+    )
+
+    # Aglet Modula-2 Extensions
+    # pseudo-module builtins in addition to ISO Modula-2
+    aglet_additional_pseudo_builtins = (
+        # None
+    )
+
+#  G N U   M o d u l a - 2   D a t a s e t s
+
+    # GNU Extensions
+    # reserved words in addition to PIM Modula-2
+    gm2_additional_reserved_words = (
+        # 10 additional reserved words
+        'ASM', '__ATTRIBUTE__', '__BUILTIN__', '__COLUMN__', '__DATE__',
+        '__FILE__', '__FUNCTION__', '__LINE__', '__MODULE__', 'VOLATILE',
+    )
+
+    # GNU Extensions
+    # builtins in addition to PIM Modula-2
+    gm2_additional_builtins = (
+        # 21 additional builtins
+        'BITSET8', 'BITSET16', 'BITSET32', 'CARDINAL8', 'CARDINAL16',
+        'CARDINAL32', 'CARDINAL64', 'COMPLEX32', 'COMPLEX64', 'COMPLEX96',
+        'COMPLEX128', 'INTEGER8', 'INTEGER16', 'INTEGER32', 'INTEGER64',
+        'REAL8', 'REAL16', 'REAL32', 'REAL96', 'REAL128', 'THROW',
+    )
+
+    # GNU Extensions
+    # pseudo-module builtins in addition to PIM Modula-2
+    gm2_additional_pseudo_builtins = (
+        # None
+    )
+
+#  p 1   M o d u l a - 2   D a t a s e t s
+
+    # p1 Extensions
+    # reserved words in addition to ISO Modula-2
+    p1_additional_reserved_words = (
+        # None
+    )
+
+    # p1 Extensions
+    # builtins in addition to ISO Modula-2
+    p1_additional_builtins = (
+        # None
+    )
+
+    # p1 Modula-2 Extensions
+    # pseudo-module builtins in addition to ISO Modula-2
+    p1_additional_pseudo_builtins = (
+        # 1 additional builtin
+        'BCD',
+    )
+
+#  X D S   M o d u l a - 2   D a t a s e t s
+
+    # XDS Extensions
+    # reserved words in addition to ISO Modula-2
+    xds_additional_reserved_words = (
+        # 1 additional reserved word
+        'SEQ',
+    )
+
+    # XDS Extensions
+    # builtins in addition to ISO Modula-2
+    xds_additional_builtins = (
+        # 9 additional builtins
+        'ASH', 'ASSERT', 'DIFFADR_TYPE', 'ENTIER', 'INDEX', 'LEN',
+        'LONGCARD', 'SHORTCARD', 'SHORTINT',
+    )
+
+    # XDS Modula-2 Extensions
+    # pseudo-module builtins in addition to ISO Modula-2
+    xds_additional_pseudo_builtins = (
+        # 22 additional builtins (SYSTEM)
+        'PROCESS', 'NEWPROCESS', 'BOOL8', 'BOOL16', 'BOOL32', 'CARD8',
+        'CARD16', 'CARD32', 'INT8', 'INT16', 'INT32', 'REF', 'MOVE',
+        'FILL', 'GET', 'PUT', 'CC', 'int', 'unsigned', 'size_t', 'void'
+        # 3 additional builtins (COMPILER)
+        'COMPILER', 'OPTION', 'EQUATION'
+    )
+
+#  P I M   S t a n d a r d   L i b r a r y   D a t a s e t s
+
+    # PIM Modula-2 Standard Library Modules Dataset
+    pim_stdlib_module_identifiers = (
+        'Terminal', 'FileSystem', 'InOut', 'RealInOut', 'MathLib0', 'Storage',
+    )
+
+    # PIM Modula-2 Standard Library Types Dataset
+    pim_stdlib_type_identifiers = (
+        'Flag', 'FlagSet', 'Response', 'Command', 'Lock', 'Permission',
+        'MediumType', 'File', 'FileProc', 'DirectoryProc', 'FileCommand',
+        'DirectoryCommand',
+    )
+
+    # PIM Modula-2 Standard Library Procedures Dataset
+    pim_stdlib_proc_identifiers = (
+        'Read', 'BusyRead', 'ReadAgain', 'Write', 'WriteString', 'WriteLn',
+        'Create', 'Lookup', 'Close', 'Delete', 'Rename', 'SetRead', 'SetWrite',
+        'SetModify', 'SetOpen', 'Doio', 'SetPos', 'GetPos', 'Length', 'Reset',
+        'Again', 'ReadWord', 'WriteWord', 'ReadChar', 'WriteChar',
+        'CreateMedium', 'DeleteMedium', 'AssignName', 'DeassignName',
+        'ReadMedium', 'LookupMedium', 'OpenInput', 'OpenOutput', 'CloseInput',
+        'CloseOutput', 'ReadString', 'ReadInt', 'ReadCard', 'ReadWrd',
+        'WriteInt', 'WriteCard', 'WriteOct', 'WriteHex', 'WriteWrd',
+        'ReadReal', 'WriteReal', 'WriteFixPt', 'WriteRealOct', 'sqrt', 'exp',
+        'ln', 'sin', 'cos', 'arctan', 'entier', 'ALLOCATE', 'DEALLOCATE',
+    )
+
+    # PIM Modula-2 Standard Library Variables Dataset
+    pim_stdlib_var_identifiers = (
+        'Done', 'termCH', 'in', 'out'
+    )
+
+    # PIM Modula-2 Standard Library Constants Dataset
+    pim_stdlib_const_identifiers = (
+        'EOL',
+    )
+
+#  I S O   S t a n d a r d   L i b r a r y   D a t a s e t s
+
+    # ISO Modula-2 Standard Library Modules Dataset
+    iso_stdlib_module_identifiers = (
+        # TO DO
+    )
+
+    # ISO Modula-2 Standard Library Types Dataset
+    iso_stdlib_type_identifiers = (
+        # TO DO
+    )
+
+    # ISO Modula-2 Standard Library Procedures Dataset
+    iso_stdlib_proc_identifiers = (
+        # TO DO
+    )
+
+    # ISO Modula-2 Standard Library Variables Dataset
+    iso_stdlib_var_identifiers = (
+        # TO DO
+    )
+
+    # ISO Modula-2 Standard Library Constants Dataset
+    iso_stdlib_const_identifiers = (
+        # TO DO
+    )
+
+#  M 2   R 1 0   S t a n d a r d   L i b r a r y   D a t a s e t s
+
+    # Modula-2 R10 Standard Library ADTs Dataset
+    m2r10_stdlib_adt_identifiers = (
+        'BCD', 'LONGBCD', 'BITSET', 'SHORTBITSET', 'LONGBITSET',
+        'LONGLONGBITSET', 'COMPLEX', 'LONGCOMPLEX', 'SHORTCARD', 'LONGLONGCARD',
+        'SHORTINT', 'LONGLONGINT', 'POSINT', 'SHORTPOSINT', 'LONGPOSINT',
+        'LONGLONGPOSINT', 'BITSET8', 'BITSET16', 'BITSET32', 'BITSET64',
+        'BITSET128', 'BS8', 'BS16', 'BS32', 'BS64', 'BS128', 'CARDINAL8',
+        'CARDINAL16', 'CARDINAL32', 'CARDINAL64', 'CARDINAL128', 'CARD8',
+        'CARD16', 'CARD32', 'CARD64', 'CARD128', 'INTEGER8', 'INTEGER16',
+        'INTEGER32', 'INTEGER64', 'INTEGER128', 'INT8', 'INT16', 'INT32',
+        'INT64', 'INT128', 'STRING', 'UNISTRING',
+    )
+
+    # Modula-2 R10 Standard Library Blueprints Dataset
+    m2r10_stdlib_blueprint_identifiers = (
+        'ProtoRoot', 'ProtoComputational', 'ProtoNumeric', 'ProtoScalar',
+        'ProtoNonScalar', 'ProtoCardinal', 'ProtoInteger', 'ProtoReal',
+        'ProtoComplex', 'ProtoVector', 'ProtoTuple', 'ProtoCompArray',
+        'ProtoCollection', 'ProtoStaticArray', 'ProtoStaticSet',
+        'ProtoStaticString', 'ProtoArray', 'ProtoString', 'ProtoSet',
+        'ProtoMultiSet', 'ProtoDictionary', 'ProtoMultiDict', 'ProtoExtension',
+        'ProtoIO', 'ProtoCardMath', 'ProtoIntMath', 'ProtoRealMath',
+    )
+
+    # Modula-2 R10 Standard Library Modules Dataset
+    m2r10_stdlib_module_identifiers = (
+        'ASCII', 'BooleanIO', 'CharIO', 'UnicharIO', 'OctetIO',
+        'CardinalIO', 'LongCardIO', 'IntegerIO', 'LongIntIO', 'RealIO',
+        'LongRealIO', 'BCDIO', 'LongBCDIO', 'CardMath', 'LongCardMath',
+        'IntMath', 'LongIntMath', 'RealMath', 'LongRealMath', 'BCDMath',
+        'LongBCDMath', 'FileIO', 'FileSystem', 'Storage', 'IOSupport',
+    )
+
+    # Modula-2 R10 Standard Library Types Dataset
+    m2r10_stdlib_type_identifiers = (
+        'File', 'Status',
+        # TO BE COMPLETED
+    )
+
+    # Modula-2 R10 Standard Library Procedures Dataset
+    m2r10_stdlib_proc_identifiers = (
+        'ALLOCATE', 'DEALLOCATE', 'SIZE',
+        # TO BE COMPLETED
+    )
+
+    # Modula-2 R10 Standard Library Variables Dataset
+    m2r10_stdlib_var_identifiers = (
+        'stdIn', 'stdOut', 'stdErr',
+    )
+
+    # Modula-2 R10 Standard Library Constants Dataset
+    m2r10_stdlib_const_identifiers = (
+        'pi', 'tau',
+    )
+
+#  D i a l e c t s
+
+    # Dialect modes
+    dialects = (
+        'unknown',
+        'm2pim', 'm2iso', 'm2r10', 'objm2',
+        'm2iso+aglet', 'm2pim+gm2', 'm2iso+p1', 'm2iso+xds',
+    )
+
+#   D a t a b a s e s
+
+    # Lexemes to Mark as Errors Database
+    lexemes_to_reject_db = {
+        # Lexemes to reject for unknown dialect
+        'unknown': (
+            # LEAVE THIS EMPTY
+        ),
+        # Lexemes to reject for PIM Modula-2
+        'm2pim': (
+            pim_lexemes_to_reject,
+        ),
+        # Lexemes to reject for ISO Modula-2
+        'm2iso': (
+            iso_lexemes_to_reject,
+        ),
+        # Lexemes to reject for Modula-2 R10
+        'm2r10': (
+            m2r10_lexemes_to_reject,
+        ),
+        # Lexemes to reject for Objective Modula-2
+        'objm2': (
+            objm2_lexemes_to_reject,
+        ),
+        # Lexemes to reject for Aglet Modula-2
+        'm2iso+aglet': (
+            iso_lexemes_to_reject,
+        ),
+        # Lexemes to reject for GNU Modula-2
+        'm2pim+gm2': (
+            pim_lexemes_to_reject,
+        ),
+        # Lexemes to reject for p1 Modula-2
+        'm2iso+p1': (
+            iso_lexemes_to_reject,
+        ),
+        # Lexemes to reject for XDS Modula-2
+        'm2iso+xds': (
+            iso_lexemes_to_reject,
+        ),
+    }
+
+    # Reserved Words Database
+    reserved_words_db = {
+        # Reserved words for unknown dialect
+        'unknown': (
+            common_reserved_words,
+            pim_additional_reserved_words,
+            iso_additional_reserved_words,
+            m2r10_additional_reserved_words,
+        ),
+
+        # Reserved words for PIM Modula-2
+        'm2pim': (
+            common_reserved_words,
+            pim_additional_reserved_words,
+        ),
+
+        # Reserved words for Modula-2 R10
+        'm2iso': (
+            common_reserved_words,
+            iso_additional_reserved_words,
+        ),
+
+        # Reserved words for ISO Modula-2
+        'm2r10': (
+            common_reserved_words,
+            m2r10_additional_reserved_words,
+        ),
+
+        # Reserved words for Objective Modula-2
+        'objm2': (
+            common_reserved_words,
+            m2r10_additional_reserved_words,
+            objm2_additional_reserved_words,
+        ),
+
+        # Reserved words for Aglet Modula-2 Extensions
+        'm2iso+aglet': (
+            common_reserved_words,
+            iso_additional_reserved_words,
+            aglet_additional_reserved_words,
+        ),
+
+        # Reserved words for GNU Modula-2 Extensions
+        'm2pim+gm2': (
+            common_reserved_words,
+            pim_additional_reserved_words,
+            gm2_additional_reserved_words,
+        ),
+
+        # Reserved words for p1 Modula-2 Extensions
+        'm2iso+p1': (
+            common_reserved_words,
+            iso_additional_reserved_words,
+            p1_additional_reserved_words,
+        ),
+
+        # Reserved words for XDS Modula-2 Extensions
+        'm2iso+xds': (
+            common_reserved_words,
+            iso_additional_reserved_words,
+            xds_additional_reserved_words,
+        ),
+    }
+
+    # Builtins Database
+    builtins_db = {
+        # Builtins for unknown dialect
+        'unknown': (
+            common_builtins,
+            pim_additional_builtins,
+            iso_additional_builtins,
+            m2r10_additional_builtins,
+        ),
+
+        # Builtins for PIM Modula-2
+        'm2pim': (
+            common_builtins,
+            pim_additional_builtins,
+        ),
+
+        # Builtins for ISO Modula-2
+        'm2iso': (
+            common_builtins,
+            iso_additional_builtins,
+        ),
+
+        # Builtins for ISO Modula-2
+        'm2r10': (
+            common_builtins,
+            m2r10_additional_builtins,
+        ),
+
+        # Builtins for Objective Modula-2
+        'objm2': (
+            common_builtins,
+            m2r10_additional_builtins,
+            objm2_additional_builtins,
+        ),
+
+        # Builtins for Aglet Modula-2 Extensions
+        'm2iso+aglet': (
+            common_builtins,
+            iso_additional_builtins,
+            aglet_additional_builtins,
+        ),
+
+        # Builtins for GNU Modula-2 Extensions
+        'm2pim+gm2': (
+            common_builtins,
+            pim_additional_builtins,
+            gm2_additional_builtins,
+        ),
+
+        # Builtins for p1 Modula-2 Extensions
+        'm2iso+p1': (
+            common_builtins,
+            iso_additional_builtins,
+            p1_additional_builtins,
+        ),
+
+        # Builtins for XDS Modula-2 Extensions
+        'm2iso+xds': (
+            common_builtins,
+            iso_additional_builtins,
+            xds_additional_builtins,
+        ),
+    }
+
+    # Pseudo-Module Builtins Database
+    pseudo_builtins_db = {
+        # Builtins for unknown dialect
+        'unknown': (
+            common_pseudo_builtins,
+            pim_additional_pseudo_builtins,
+            iso_additional_pseudo_builtins,
+            m2r10_additional_pseudo_builtins,
+        ),
+
+        # Builtins for PIM Modula-2
+        'm2pim': (
+            common_pseudo_builtins,
+            pim_additional_pseudo_builtins,
+        ),
+
+        # Builtins for ISO Modula-2
+        'm2iso': (
+            common_pseudo_builtins,
+            iso_additional_pseudo_builtins,
+        ),
+
+        # Builtins for ISO Modula-2
+        'm2r10': (
+            common_pseudo_builtins,
+            m2r10_additional_pseudo_builtins,
+        ),
+
+        # Builtins for Objective Modula-2
+        'objm2': (
+            common_pseudo_builtins,
+            m2r10_additional_pseudo_builtins,
+            objm2_additional_pseudo_builtins,
+        ),
+
+        # Builtins for Aglet Modula-2 Extensions
+        'm2iso+aglet': (
+            common_pseudo_builtins,
+            iso_additional_pseudo_builtins,
+            aglet_additional_pseudo_builtins,
+        ),
+
+        # Builtins for GNU Modula-2 Extensions
+        'm2pim+gm2': (
+            common_pseudo_builtins,
+            pim_additional_pseudo_builtins,
+            gm2_additional_pseudo_builtins,
+        ),
+
+        # Builtins for p1 Modula-2 Extensions
+        'm2iso+p1': (
+            common_pseudo_builtins,
+            iso_additional_pseudo_builtins,
+            p1_additional_pseudo_builtins,
+        ),
+
+        # Builtins for XDS Modula-2 Extensions
+        'm2iso+xds': (
+            common_pseudo_builtins,
+            iso_additional_pseudo_builtins,
+            xds_additional_pseudo_builtins,
+        ),
+    }
+
+    # Standard Library ADTs Database
+    stdlib_adts_db = {
+        # Empty entry for unknown dialect
+        'unknown': (
+            # LEAVE THIS EMPTY
+        ),
+        # Standard Library ADTs for PIM Modula-2
+        'm2pim': (
+            # No first class library types
+        ),
+
+        # Standard Library ADTs for ISO Modula-2
+        'm2iso': (
+            # No first class library types
+        ),
+
+        # Standard Library ADTs for Modula-2 R10
+        'm2r10': (
+            m2r10_stdlib_adt_identifiers,
+        ),
+
+        # Standard Library ADTs for Objective Modula-2
+        'objm2': (
+            m2r10_stdlib_adt_identifiers,
+        ),
+
+        # Standard Library ADTs for Aglet Modula-2
+        'm2iso+aglet': (
+            # No first class library types
+        ),
+
+        # Standard Library ADTs for GNU Modula-2
+        'm2pim+gm2': (
+            # No first class library types
+        ),
+
+        # Standard Library ADTs for p1 Modula-2
+        'm2iso+p1': (
+            # No first class library types
+        ),
+
+        # Standard Library ADTs for XDS Modula-2
+        'm2iso+xds': (
+            # No first class library types
+        ),
+    }
+
+    # Standard Library Modules Database
+    stdlib_modules_db = {
+        # Empty entry for unknown dialect
+        'unknown': (
+            # LEAVE THIS EMPTY
+        ),
+        # Standard Library Modules for PIM Modula-2
+        'm2pim': (
+            pim_stdlib_module_identifiers,
+        ),
+
+        # Standard Library Modules for ISO Modula-2
+        'm2iso': (
+            iso_stdlib_module_identifiers,
+        ),
+
+        # Standard Library Modules for Modula-2 R10
+        'm2r10': (
+            m2r10_stdlib_blueprint_identifiers,
+            m2r10_stdlib_module_identifiers,
+            m2r10_stdlib_adt_identifiers,
+        ),
+
+        # Standard Library Modules for Objective Modula-2
+        'objm2': (
+            m2r10_stdlib_blueprint_identifiers,
+            m2r10_stdlib_module_identifiers,
+        ),
+
+        # Standard Library Modules for Aglet Modula-2
+        'm2iso+aglet': (
+            iso_stdlib_module_identifiers,
+        ),
+
+        # Standard Library Modules for GNU Modula-2
+        'm2pim+gm2': (
+            pim_stdlib_module_identifiers,
+        ),
+
+        # Standard Library Modules for p1 Modula-2
+        'm2iso+p1': (
+            iso_stdlib_module_identifiers,
+        ),
+
+        # Standard Library Modules for XDS Modula-2
+        'm2iso+xds': (
+            iso_stdlib_module_identifiers,
+        ),
+    }
+
+    # Standard Library Types Database
+    stdlib_types_db = {
+        # Empty entry for unknown dialect
+        'unknown': (
+            # LEAVE THIS EMPTY
+        ),
+        # Standard Library Types for PIM Modula-2
+        'm2pim': (
+            pim_stdlib_type_identifiers,
+        ),
+
+        # Standard Library Types for ISO Modula-2
+        'm2iso': (
+            iso_stdlib_type_identifiers,
+        ),
+
+        # Standard Library Types for Modula-2 R10
+        'm2r10': (
+            m2r10_stdlib_type_identifiers,
+        ),
+
+        # Standard Library Types for Objective Modula-2
+        'objm2': (
+            m2r10_stdlib_type_identifiers,
+        ),
+
+        # Standard Library Types for Aglet Modula-2
+        'm2iso+aglet': (
+            iso_stdlib_type_identifiers,
+        ),
+
+        # Standard Library Types for GNU Modula-2
+        'm2pim+gm2': (
+            pim_stdlib_type_identifiers,
+        ),
+
+        # Standard Library Types for p1 Modula-2
+        'm2iso+p1': (
+            iso_stdlib_type_identifiers,
+        ),
+
+        # Standard Library Types for XDS Modula-2
+        'm2iso+xds': (
+            iso_stdlib_type_identifiers,
+        ),
+    }
+
+    # Standard Library Procedures Database
+    stdlib_procedures_db = {
+        # Empty entry for unknown dialect
+        'unknown': (
+            # LEAVE THIS EMPTY
+        ),
+        # Standard Library Procedures for PIM Modula-2
+        'm2pim': (
+            pim_stdlib_proc_identifiers,
+        ),
+
+        # Standard Library Procedures for ISO Modula-2
+        'm2iso': (
+            iso_stdlib_proc_identifiers,
+        ),
+
+        # Standard Library Procedures for Modula-2 R10
+        'm2r10': (
+            m2r10_stdlib_proc_identifiers,
+        ),
+
+        # Standard Library Procedures for Objective Modula-2
+        'objm2': (
+            m2r10_stdlib_proc_identifiers,
+        ),
+
+        # Standard Library Procedures for Aglet Modula-2
+        'm2iso+aglet': (
+            iso_stdlib_proc_identifiers,
+        ),
+
+        # Standard Library Procedures for GNU Modula-2
+        'm2pim+gm2': (
+            pim_stdlib_proc_identifiers,
+        ),
+
+        # Standard Library Procedures for p1 Modula-2
+        'm2iso+p1': (
+            iso_stdlib_proc_identifiers,
+        ),
+
+        # Standard Library Procedures for XDS Modula-2
+        'm2iso+xds': (
+            iso_stdlib_proc_identifiers,
+        ),
+    }
+
+    # Standard Library Variables Database
+    stdlib_variables_db = {
+        # Empty entry for unknown dialect
+        'unknown': (
+            # LEAVE THIS EMPTY
+        ),
+        # Standard Library Variables for PIM Modula-2
+        'm2pim': (
+            pim_stdlib_var_identifiers,
+        ),
+
+        # Standard Library Variables for ISO Modula-2
+        'm2iso': (
+            iso_stdlib_var_identifiers,
+        ),
+
+        # Standard Library Variables for Modula-2 R10
+        'm2r10': (
+            m2r10_stdlib_var_identifiers,
+        ),
+
+        # Standard Library Variables for Objective Modula-2
+        'objm2': (
+            m2r10_stdlib_var_identifiers,
+        ),
+
+        # Standard Library Variables for Aglet Modula-2
+        'm2iso+aglet': (
+            iso_stdlib_var_identifiers,
+        ),
+
+        # Standard Library Variables for GNU Modula-2
+        'm2pim+gm2': (
+            pim_stdlib_var_identifiers,
+        ),
+
+        # Standard Library Variables for p1 Modula-2
+        'm2iso+p1': (
+            iso_stdlib_var_identifiers,
+        ),
+
+        # Standard Library Variables for XDS Modula-2
+        'm2iso+xds': (
+            iso_stdlib_var_identifiers,
+        ),
+    }
+
+    # Standard Library Constants Database
+    stdlib_constants_db = {
+        # Empty entry for unknown dialect
+        'unknown': (
+            # LEAVE THIS EMPTY
+        ),
+        # Standard Library Constants for PIM Modula-2
+        'm2pim': (
+            pim_stdlib_const_identifiers,
+        ),
+
+        # Standard Library Constants for ISO Modula-2
+        'm2iso': (
+            iso_stdlib_const_identifiers,
+        ),
+
+        # Standard Library Constants for Modula-2 R10
+        'm2r10': (
+            m2r10_stdlib_const_identifiers,
+        ),
+
+        # Standard Library Constants for Objective Modula-2
+        'objm2': (
+            m2r10_stdlib_const_identifiers,
+        ),
+
+        # Standard Library Constants for Aglet Modula-2
+        'm2iso+aglet': (
+            iso_stdlib_const_identifiers,
+        ),
+
+        # Standard Library Constants for GNU Modula-2
+        'm2pim+gm2': (
+            pim_stdlib_const_identifiers,
+        ),
+
+        # Standard Library Constants for p1 Modula-2
+        'm2iso+p1': (
+            iso_stdlib_const_identifiers,
+        ),
+
+        # Standard Library Constants for XDS Modula-2
+        'm2iso+xds': (
+            iso_stdlib_const_identifiers,
+        ),
+    }
+
+#   M e t h o d s
+
+    # initialise a lexer instance
+    def __init__(self, **options):
+        #
+        # check dialect options
+        #
+        dialects = get_list_opt(options, 'dialect', [])
+        #
+        for dialect_option in dialects:
+            if dialect_option in self.dialects[1:-1]:
+                # valid dialect option found
+                self.set_dialect(dialect_option)
+                break
+        #
+        # Fallback Mode (DEFAULT)
+        else:
+            # no valid dialect option
+            self.set_dialect('unknown')
+        #
+        self.dialect_set_by_tag = False
+        #
+        # check style options
+        #
+        styles = get_list_opt(options, 'style', [])
+        #
+        # use lowercase mode for Algol style
+        if 'algol' in styles or 'algol_nu' in styles:
+            self.algol_publication_mode = True
+        else:
+            self.algol_publication_mode = False
+        #
+        # Check option flags
+        #
+        self.treat_stdlib_adts_as_builtins = get_bool_opt(
+            options, 'treat_stdlib_adts_as_builtins', True)
+        #
+        # call superclass initialiser
+        RegexLexer.__init__(self, **options)
+
+    # Set lexer to a specified dialect
+    def set_dialect(self, dialect_id):
+        #
+        # if __debug__:
+        #    print 'entered set_dialect with arg: ', dialect_id
+        #
+        # check dialect name against known dialects
+        if dialect_id not in self.dialects:
+            dialect = 'unknown'  # default
+        else:
+            dialect = dialect_id
+        #
+        # compose lexemes to reject set
+        lexemes_to_reject_set = set()
+        # add each list of reject lexemes for this dialect
+        for list in self.lexemes_to_reject_db[dialect]:
+            lexemes_to_reject_set.update(set(list))
+        #
+        # compose reserved words set
+        reswords_set = set()
+        # add each list of reserved words for this dialect
+        for list in self.reserved_words_db[dialect]:
+            reswords_set.update(set(list))
+        #
+        # compose builtins set
+        builtins_set = set()
+        # add each list of builtins for this dialect excluding reserved words
+        for list in self.builtins_db[dialect]:
+            builtins_set.update(set(list).difference(reswords_set))
+        #
+        # compose pseudo-builtins set
+        pseudo_builtins_set = set()
+        # add each list of builtins for this dialect excluding reserved words
+        for list in self.pseudo_builtins_db[dialect]:
+            pseudo_builtins_set.update(set(list).difference(reswords_set))
+        #
+        # compose ADTs set
+        adts_set = set()
+        # add each list of ADTs for this dialect excluding reserved words
+        for list in self.stdlib_adts_db[dialect]:
+            adts_set.update(set(list).difference(reswords_set))
+        #
+        # compose modules set
+        modules_set = set()
+        # add each list of builtins for this dialect excluding builtins
+        for list in self.stdlib_modules_db[dialect]:
+            modules_set.update(set(list).difference(builtins_set))
+        #
+        # compose types set
+        types_set = set()
+        # add each list of types for this dialect excluding builtins
+        for list in self.stdlib_types_db[dialect]:
+            types_set.update(set(list).difference(builtins_set))
+        #
+        # compose procedures set
+        procedures_set = set()
+        # add each list of procedures for this dialect excluding builtins
+        for list in self.stdlib_procedures_db[dialect]:
+            procedures_set.update(set(list).difference(builtins_set))
+        #
+        # compose variables set
+        variables_set = set()
+        # add each list of variables for this dialect excluding builtins
+        for list in self.stdlib_variables_db[dialect]:
+            variables_set.update(set(list).difference(builtins_set))
+        #
+        # compose constants set
+        constants_set = set()
+        # add each list of constants for this dialect excluding builtins
+        for list in self.stdlib_constants_db[dialect]:
+            constants_set.update(set(list).difference(builtins_set))
+        #
+        # update lexer state
+        self.dialect = dialect
+        self.lexemes_to_reject = lexemes_to_reject_set
+        self.reserved_words = reswords_set
+        self.builtins = builtins_set
+        self.pseudo_builtins = pseudo_builtins_set
+        self.adts = adts_set
+        self.modules = modules_set
+        self.types = types_set
+        self.procedures = procedures_set
+        self.variables = variables_set
+        self.constants = constants_set
+        #
+        # if __debug__:
+        #    print 'exiting set_dialect'
+        #    print ' self.dialect: ', self.dialect
+        #    print ' self.lexemes_to_reject: ', self.lexemes_to_reject
+        #    print ' self.reserved_words: ', self.reserved_words
+        #    print ' self.builtins: ', self.builtins
+        #    print ' self.pseudo_builtins: ', self.pseudo_builtins
+        #    print ' self.adts: ', self.adts
+        #    print ' self.modules: ', self.modules
+        #    print ' self.types: ', self.types
+        #    print ' self.procedures: ', self.procedures
+        #    print ' self.variables: ', self.variables
+        #    print ' self.types: ', self.types
+        #    print ' self.constants: ', self.constants
+
+    # Extracts a dialect name from a dialect tag comment string  and checks
+    # the extracted name against known dialects.  If a match is found,  the
+    # matching name is returned, otherwise dialect id 'unknown' is returned
+    def get_dialect_from_dialect_tag(self, dialect_tag):
+        #
+        # if __debug__:
+        #    print 'entered get_dialect_from_dialect_tag with arg: ', dialect_tag
+        #
+        # constants
+        left_tag_delim = '(*!'
+        right_tag_delim = '*)'
+        left_tag_delim_len = len(left_tag_delim)
+        right_tag_delim_len = len(right_tag_delim)
+        indicator_start = left_tag_delim_len
+        indicator_end = -(right_tag_delim_len)
+        #
+        # check comment string for dialect indicator
+        if len(dialect_tag) > (left_tag_delim_len + right_tag_delim_len) \
+           and dialect_tag.startswith(left_tag_delim) \
+           and dialect_tag.endswith(right_tag_delim):
+            #
+            # if __debug__:
+            #    print 'dialect tag found'
+            #
+            # extract dialect indicator
+            indicator = dialect_tag[indicator_start:indicator_end]
+            #
+            # if __debug__:
+            #    print 'extracted: ', indicator
+            #
+            # check against known dialects
+            for index in range(1, len(self.dialects)):
+                #
+                # if __debug__:
+                #    print 'dialects[', index, ']: ', self.dialects[index]
+                #
+                if indicator == self.dialects[index]:
+                    #
+                    # if __debug__:
+                    #    print 'matching dialect found'
+                    #
+                    # indicator matches known dialect
+                    return indicator
+            else:
+                # indicator does not match any dialect
+                return 'unknown'  # default
+        else:
+            # invalid indicator string
+            return 'unknown'  # default
+
+    # intercept the token stream, modify token attributes and return them
+    def get_tokens_unprocessed(self, text):
+        for index, token, value in RegexLexer.get_tokens_unprocessed(self, text):
+            #
+            # check for dialect tag if dialect has not been set by tag
+            if not self.dialect_set_by_tag and token == Comment.Special:
+                indicated_dialect = self.get_dialect_from_dialect_tag(value)
+                if indicated_dialect != 'unknown':
+                    # token is a dialect indicator
+                    # reset reserved words and builtins
+                    self.set_dialect(indicated_dialect)
+                    self.dialect_set_by_tag = True
+            #
+            # check for reserved words, predefined and stdlib identifiers
+            if token is Name:
+                if value in self.reserved_words:
+                    token = Keyword.Reserved
+                    if self.algol_publication_mode:
+                        value = value.lower()
+                #
+                elif value in self.builtins:
+                    token = Name.Builtin
+                    if self.algol_publication_mode:
+                        value = value.lower()
+                #
+                elif value in self.pseudo_builtins:
+                    token = Name.Builtin.Pseudo
+                    if self.algol_publication_mode:
+                        value = value.lower()
+                #
+                elif value in self.adts:
+                    if not self.treat_stdlib_adts_as_builtins:
+                        token = Name.Namespace
+                    else:
+                        token = Name.Builtin.Pseudo
+                        if self.algol_publication_mode:
+                            value = value.lower()
+                #
+                elif value in self.modules:
+                    token = Name.Namespace
+                #
+                elif value in self.types:
+                    token = Name.Class
+                #
+                elif value in self.procedures:
+                    token = Name.Function
+                #
+                elif value in self.variables:
+                    token = Name.Variable
+                #
+                elif value in self.constants:
+                    token = Name.Constant
+            #
+            elif token in Number:
+                #
+                # mark prefix number literals as error for PIM and ISO dialects
+                if self.dialect not in ('unknown', 'm2r10', 'objm2'):
+                    if "'" in value or value[0:2] in ('0b', '0x', '0u'):
+                        token = Error
+                #
+                elif self.dialect in ('m2r10', 'objm2'):
+                    # mark base-8 number literals as errors for M2 R10 and ObjM2
+                    if token is Number.Oct:
+                        token = Error
+                    # mark suffix base-16 literals as errors for M2 R10 and ObjM2
+                    elif token is Number.Hex and 'H' in value:
+                        token = Error
+                    # mark real numbers with E as errors for M2 R10 and ObjM2
+                    elif token is Number.Float and 'E' in value:
+                        token = Error
+            #
+            elif token in Comment:
+                #
+                # mark single line comment as error for PIM and ISO dialects
+                if token is Comment.Single:
+                    if self.dialect not in ('unknown', 'm2r10', 'objm2'):
+                        token = Error
+                #
+                if token is Comment.Preproc:
+                    # mark ISO pragma as error for PIM dialects
+                    if value.startswith('<*') and \
+                       self.dialect.startswith('m2pim'):
+                        token = Error
+                    # mark PIM pragma as comment for other dialects
+                    elif value.startswith('(*$') and \
+                            self.dialect != 'unknown' and \
+                            not self.dialect.startswith('m2pim'):
+                        token = Comment.Multiline
+            #
+            else:  # token is neither Name nor Comment
+                #
+                # mark lexemes matching the dialect's error token set as errors
+                if value in self.lexemes_to_reject:
+                    token = Error
+                #
+                # substitute lexemes when in Algol mode
+                if self.algol_publication_mode:
+                    if value == '#':
+                        value = '≠'
+                    elif value == '<=':
+                        value = '≤'
+                    elif value == '>=':
+                        value = '≥'
+                    elif value == '==':
+                        value = '≡'
+                    elif value == '*.':
+                        value = '•'
+
+            # return result
+            yield index, token, value
+
+    def analyse_text(text):
+        """It's Pascal-like, but does not use FUNCTION -- uses PROCEDURE
+        instead."""
+
+        # Check if this looks like Pascal, if not, bail out early
+        if not ('(*' in text and '*)' in text and ':=' in text):
+            return
+
+        result = 0
+        # Procedure is in Modula2
+        if re.search(r'\bPROCEDURE\b', text):
+            result += 0.6
+
+        # FUNCTION is only valid in Pascal, but not in Modula2
+        if re.search(r'\bFUNCTION\b', text):
+            result = 0.0
+
+        return result
diff --git a/local_packages/pygments/lexers/mojo.py b/local_packages/pygments/lexers/mojo.py
new file mode 100644
index 0000000..84aac46
--- /dev/null
+++ b/local_packages/pygments/lexers/mojo.py
@@ -0,0 +1,707 @@
+"""
+    pygments.lexers.mojo
+    ~~~~~~~~~~~~~~~~~~~~
+
+    Lexers for Mojo and related languages.
+
+    :copyright: Copyright 2006-present by the Pygments team, see AUTHORS.
+    :license: BSD, see LICENSE for details.
+"""
+
+import keyword
+
+from pygments import unistring as uni
+from pygments.lexer import (
+    RegexLexer,
+    bygroups,
+    combined,
+    default,
+    include,
+    this,
+    using,
+    words,
+)
+from pygments.token import (
+    Comment,
+    # Error,
+    Keyword,
+    Name,
+    Number,
+    Operator,
+    Punctuation,
+    String,
+    Text,
+    Whitespace,
+)
+from pygments.util import shebang_matches
+
+__all__ = ["MojoLexer"]
+
+
+class MojoLexer(RegexLexer):
+    """
+    For Mojo source code (version 24.2.1).
+    """
+
+    name = "Mojo"
+    url = "https://docs.modular.com/mojo/"
+    aliases = ["mojo", "🔥"]
+    filenames = [
+        "*.mojo",
+        "*.🔥",
+    ]
+    mimetypes = [
+        "text/x-mojo",
+        "application/x-mojo",
+    ]
+    version_added = "2.18"
+
+    uni_name = f"[{uni.xid_start}][{uni.xid_continue}]*"
+
+    def innerstring_rules(ttype):
+        return [
+            # the old style '%s' % (...) string formatting (still valid in Py3)
+            (
+                r"%(\(\w+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?"
+                "[hlL]?[E-GXc-giorsaux%]",
+                String.Interpol,
+            ),
+            # the new style '{}'.format(...) string formatting
+            (
+                r"\{"
+                r"((\w+)((\.\w+)|(\[[^\]]+\]))*)?"  # field name
+                r"(\![sra])?"  # conversion
+                r"(\:(.?[<>=\^])?[-+ ]?#?0?(\d+)?,?(\.\d+)?[E-GXb-gnosx%]?)?"
+                r"\}",
+                String.Interpol,
+            ),
+            # backslashes, quotes and formatting signs must be parsed one at a time
+            (r'[^\\\'"%{\n]+', ttype),
+            (r'[\'"\\]', ttype),
+            # unhandled string formatting sign
+            (r"%|(\{{1,2})", ttype),
+            # newlines are an error (use "nl" state)
+        ]
+
+    def fstring_rules(ttype):
+        return [
+            # Assuming that a '}' is the closing brace after format specifier.
+            # Sadly, this means that we won't detect syntax error. But it's
+            # more important to parse correct syntax correctly, than to
+            # highlight invalid syntax.
+            (r"\}", String.Interpol),
+            (r"\{", String.Interpol, "expr-inside-fstring"),
+            # backslashes, quotes and formatting signs must be parsed one at a time
+            (r'[^\\\'"{}\n]+', ttype),
+            (r'[\'"\\]', ttype),
+            # newlines are an error (use "nl" state)
+        ]
+
+    tokens = {
+        "root": [
+            (r"\s+", Whitespace),
+            (
+                r'^(\s*)([rRuUbB]{,2})("""(?:.|\n)*?""")',
+                bygroups(Whitespace, String.Affix, String.Doc),
+            ),
+            (
+                r"^(\s*)([rRuUbB]{,2})('''(?:.|\n)*?''')",
+                bygroups(Whitespace, String.Affix, String.Doc),
+            ),
+            (r"\A#!.+$", Comment.Hashbang),
+            (r"#.*$", Comment.Single),
+            (r"\\\n", Whitespace),
+            (r"\\", Whitespace),
+            include("keywords"),
+            include("soft-keywords"),
+            # In the original PR, all the below here used ((?:\s|\\\s)+) to
+            # designate whitespace, but I can't find any example of this being
+            # needed in the example file, so we're replacing it with `\s+`.
+            (
+                r"(alias)(\s+)",
+                bygroups(Keyword, Whitespace),
+                "varname",  # TODO varname the right fit?
+            ),
+            (r"(var)(\s+)", bygroups(Keyword, Whitespace), "varname"),
+            (r"(def)(\s+)", bygroups(Keyword, Whitespace), "funcname"),
+            (r"(fn)(\s+)", bygroups(Keyword, Whitespace), "funcname"),
+            (
+                r"(class)(\s+)",
+                bygroups(Keyword, Whitespace),
+                "classname",
+            ),  # not implemented yet
+            (r"(struct)(\s+)", bygroups(Keyword, Whitespace), "structname"),
+            (r"(trait)(\s+)", bygroups(Keyword, Whitespace), "structname"),
+            (r"(from)(\s+)", bygroups(Keyword.Namespace, Whitespace), "fromimport"),
+            (r"(import)(\s+)", bygroups(Keyword.Namespace, Whitespace), "import"),
+            include("expr"),
+        ],
+        "expr": [
+            # raw f-strings
+            (
+                '(?i)(rf|fr)(""")',
+                bygroups(String.Affix, String.Double),
+                combined("rfstringescape", "tdqf"),
+            ),
+            (
+                "(?i)(rf|fr)(''')",
+                bygroups(String.Affix, String.Single),
+                combined("rfstringescape", "tsqf"),
+            ),
+            (
+                '(?i)(rf|fr)(")',
+                bygroups(String.Affix, String.Double),
+                combined("rfstringescape", "dqf"),
+            ),
+            (
+                "(?i)(rf|fr)(')",
+                bygroups(String.Affix, String.Single),
+                combined("rfstringescape", "sqf"),
+            ),
+            # non-raw f-strings
+            (
+                '([fF])(""")',
+                bygroups(String.Affix, String.Double),
+                combined("fstringescape", "tdqf"),
+            ),
+            (
+                "([fF])(''')",
+                bygroups(String.Affix, String.Single),
+                combined("fstringescape", "tsqf"),
+            ),
+            (
+                '([fF])(")',
+                bygroups(String.Affix, String.Double),
+                combined("fstringescape", "dqf"),
+            ),
+            (
+                "([fF])(')",
+                bygroups(String.Affix, String.Single),
+                combined("fstringescape", "sqf"),
+            ),
+            # raw bytes and strings
+            ('(?i)(rb|br|r)(""")', bygroups(String.Affix, String.Double), "tdqs"),
+            ("(?i)(rb|br|r)(''')", bygroups(String.Affix, String.Single), "tsqs"),
+            ('(?i)(rb|br|r)(")', bygroups(String.Affix, String.Double), "dqs"),
+            ("(?i)(rb|br|r)(')", bygroups(String.Affix, String.Single), "sqs"),
+            # non-raw strings
+            (
+                '([uU]?)(""")',
+                bygroups(String.Affix, String.Double),
+                combined("stringescape", "tdqs"),
+            ),
+            (
+                "([uU]?)(''')",
+                bygroups(String.Affix, String.Single),
+                combined("stringescape", "tsqs"),
+            ),
+            (
+                '([uU]?)(")',
+                bygroups(String.Affix, String.Double),
+                combined("stringescape", "dqs"),
+            ),
+            (
+                "([uU]?)(')",
+                bygroups(String.Affix, String.Single),
+                combined("stringescape", "sqs"),
+            ),
+            # non-raw bytes
+            (
+                '([bB])(""")',
+                bygroups(String.Affix, String.Double),
+                combined("bytesescape", "tdqs"),
+            ),
+            (
+                "([bB])(''')",
+                bygroups(String.Affix, String.Single),
+                combined("bytesescape", "tsqs"),
+            ),
+            (
+                '([bB])(")',
+                bygroups(String.Affix, String.Double),
+                combined("bytesescape", "dqs"),
+            ),
+            (
+                "([bB])(')",
+                bygroups(String.Affix, String.Single),
+                combined("bytesescape", "sqs"),
+            ),
+            (r"[^\S\n]+", Text),
+            include("numbers"),
+            (r"!=|==|<<|>>|:=|[-~+/*%=<>&^|.]", Operator),
+            (r"([]{}:\(\),;[])+", Punctuation),
+            (r"(in|is|and|or|not)\b", Operator.Word),
+            include("expr-keywords"),
+            include("builtins"),
+            include("magicfuncs"),
+            include("magicvars"),
+            include("name"),
+        ],
+        "expr-inside-fstring": [
+            (r"[{([]", Punctuation, "expr-inside-fstring-inner"),
+            # without format specifier
+            (
+                r"(=\s*)?"  # debug (https://bugs.python.org/issue36817)
+                r"(\![sraf])?"  # conversion
+                r"\}",
+                String.Interpol,
+                "#pop",
+            ),
+            # with format specifier
+            # we'll catch the remaining '}' in the outer scope
+            (
+                r"(=\s*)?"  # debug (https://bugs.python.org/issue36817)
+                r"(\![sraf])?"  # conversion
+                r":",
+                String.Interpol,
+                "#pop",
+            ),
+            (r"\s+", Whitespace),  # allow new lines
+            include("expr"),
+        ],
+        "expr-inside-fstring-inner": [
+            (r"[{([]", Punctuation, "expr-inside-fstring-inner"),
+            (r"[])}]", Punctuation, "#pop"),
+            (r"\s+", Whitespace),  # allow new lines
+            include("expr"),
+        ],
+        "expr-keywords": [
+            # Based on https://docs.python.org/3/reference/expressions.html
+            (
+                words(
+                    (
+                        "async for",  # TODO https://docs.modular.com/mojo/roadmap#no-async-for-or-async-with
+                        "async with",  # TODO https://docs.modular.com/mojo/roadmap#no-async-for-or-async-with
+                        "await",
+                        "else",
+                        "for",
+                        "if",
+                        "lambda",
+                        "yield",
+                        "yield from",
+                    ),
+                    suffix=r"\b",
+                ),
+                Keyword,
+            ),
+            (words(("True", "False", "None"), suffix=r"\b"), Keyword.Constant),
+        ],
+        "keywords": [
+            (
+                words(
+                    (
+                        "assert",
+                        "async",
+                        "await",
+                        "borrowed",
+                        "break",
+                        "continue",
+                        "del",
+                        "elif",
+                        "else",
+                        "except",
+                        "finally",
+                        "for",
+                        "global",
+                        "if",
+                        "lambda",
+                        "pass",
+                        "raise",
+                        "nonlocal",
+                        "return",
+                        "try",
+                        "while",
+                        "yield",
+                        "yield from",
+                        "as",
+                        "with",
+                    ),
+                    suffix=r"\b",
+                ),
+                Keyword,
+            ),
+            (words(("True", "False", "None"), suffix=r"\b"), Keyword.Constant),
+        ],
+        "soft-keywords": [
+            # `match`, `case` and `_` soft keywords
+            (
+                r"(^[ \t]*)"  # at beginning of line + possible indentation
+                r"(match|case)\b"  # a possible keyword
+                r"(?![ \t]*(?:"  # not followed by...
+                r"[:,;=^&|@~)\]}]|(?:" +  # characters and keywords that mean this isn't
+                # pattern matching (but None/True/False is ok)
+                r"|".join(k for k in keyword.kwlist if k[0].islower())
+                + r")\b))",
+                bygroups(Whitespace, Keyword),
+                "soft-keywords-inner",
+            ),
+        ],
+        "soft-keywords-inner": [
+            # optional `_` keyword
+            (r"(\s+)([^\n_]*)(_\b)", bygroups(Whitespace, using(this), Keyword)),
+            default("#pop"),
+        ],
+        "builtins": [
+            (
+                words(
+                    (
+                        "__import__",
+                        "abs",
+                        "aiter",
+                        "all",
+                        "any",
+                        "bin",
+                        "bool",
+                        "bytearray",
+                        "breakpoint",
+                        "bytes",
+                        "callable",
+                        "chr",
+                        "classmethod",
+                        "compile",
+                        "complex",
+                        "delattr",
+                        "dict",
+                        "dir",
+                        "divmod",
+                        "enumerate",
+                        "eval",
+                        "filter",
+                        "float",
+                        "format",
+                        "frozenset",
+                        "getattr",
+                        "globals",
+                        "hasattr",
+                        "hash",
+                        "hex",
+                        "id",
+                        "input",
+                        "int",
+                        "isinstance",
+                        "issubclass",
+                        "iter",
+                        "len",
+                        "list",
+                        "locals",
+                        "map",
+                        "max",
+                        "memoryview",
+                        "min",
+                        "next",
+                        "object",
+                        "oct",
+                        "open",
+                        "ord",
+                        "pow",
+                        "print",
+                        "property",
+                        "range",
+                        "repr",
+                        "reversed",
+                        "round",
+                        "set",
+                        "setattr",
+                        "slice",
+                        "sorted",
+                        "staticmethod",
+                        "str",
+                        "sum",
+                        "super",
+                        "tuple",
+                        "type",
+                        "vars",
+                        "zip",
+                        # Mojo builtin types: https://docs.modular.com/mojo/stdlib/builtin/
+                        "AnyType",
+                        "Coroutine",
+                        "DType",
+                        "Error",
+                        "Int",
+                        "List",
+                        "ListLiteral",
+                        "Scalar",
+                        "Int8",
+                        "UInt8",
+                        "Int16",
+                        "UInt16",
+                        "Int32",
+                        "UInt32",
+                        "Int64",
+                        "UInt64",
+                        "BFloat16",
+                        "Float16",
+                        "Float32",
+                        "Float64",
+                        "SIMD",
+                        "String",
+                        "Tensor",
+                        "Tuple",
+                        "Movable",
+                        "Copyable",
+                        "CollectionElement",
+                    ),
+                    prefix=r"(?>',
+    # Binary augmented
+    '+=', '-=', '*=', '/=', '%=', '**=', '&=', '|=', '^=', '<<=', '>>=',
+    # Comparison
+    '==', '!=', '<', '<=', '>', '>=', '<=>',
+    # Patterns and assignment
+    ':=', '?', '=~', '!~', '=>',
+    # Calls and sends
+    '.', '<-', '->',
+]
+_escape_pattern = (
+    r'(?:\\x[0-9a-fA-F]{2}|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|'
+    r'\\["\'\\bftnr])')
+# _char = _escape_chars + [('.', String.Char)]
+_identifier = r'[_a-zA-Z]\w*'
+
+_constants = [
+    # Void constants
+    'null',
+    # Bool constants
+    'false', 'true',
+    # Double constants
+    'Infinity', 'NaN',
+    # Special objects
+    'M', 'Ref', 'throw', 'traceln',
+]
+
+_guards = [
+    'Any', 'Binding', 'Bool', 'Bytes', 'Char', 'DeepFrozen', 'Double',
+    'Empty', 'Int', 'List', 'Map', 'Near', 'NullOk', 'Same', 'Selfless',
+    'Set', 'Str', 'SubrangeGuard', 'Transparent', 'Void',
+]
+
+_safeScope = [
+    '_accumulateList', '_accumulateMap', '_auditedBy', '_bind',
+    '_booleanFlow', '_comparer', '_equalizer', '_iterForever', '_loop',
+    '_makeBytes', '_makeDouble', '_makeFinalSlot', '_makeInt', '_makeList',
+    '_makeMap', '_makeMessageDesc', '_makeOrderedSpace', '_makeParamDesc',
+    '_makeProtocolDesc', '_makeSourceSpan', '_makeString', '_makeVarSlot',
+    '_makeVerbFacet', '_mapExtract', '_matchSame', '_quasiMatcher',
+    '_slotToBinding', '_splitList', '_suchThat', '_switchFailed',
+    '_validateFor', 'b__quasiParser', 'eval', 'import', 'm__quasiParser',
+    'makeBrandPair', 'makeLazySlot', 'safeScope', 'simple__quasiParser',
+]
+
+
+class MonteLexer(RegexLexer):
+    """
+    Lexer for the Monte programming language.
+    """
+    name = 'Monte'
+    url = 'https://monte.readthedocs.io/'
+    aliases = ['monte']
+    filenames = ['*.mt']
+    version_added = '2.2'
+
+    tokens = {
+        'root': [
+            # Comments
+            (r'#[^\n]*\n', Comment),
+
+            # Docstrings
+            # Apologies for the non-greedy matcher here.
+            (r'/\*\*.*?\*/', String.Doc),
+
+            # `var` declarations
+            (r'\bvar\b', Keyword.Declaration, 'var'),
+
+            # `interface` declarations
+            (r'\binterface\b', Keyword.Declaration, 'interface'),
+
+            # method declarations
+            (words(_methods, prefix='\\b', suffix='\\b'),
+             Keyword, 'method'),
+
+            # All other declarations
+            (words(_declarations, prefix='\\b', suffix='\\b'),
+             Keyword.Declaration),
+
+            # Keywords
+            (words(_keywords, prefix='\\b', suffix='\\b'), Keyword),
+
+            # Literals
+            ('[+-]?0x[_0-9a-fA-F]+', Number.Hex),
+            (r'[+-]?[_0-9]+\.[_0-9]*([eE][+-]?[_0-9]+)?', Number.Float),
+            ('[+-]?[_0-9]+', Number.Integer),
+            ("'", String.Double, 'char'),
+            ('"', String.Double, 'string'),
+
+            # Quasiliterals
+            ('`', String.Backtick, 'ql'),
+
+            # Operators
+            (words(_operators), Operator),
+
+            # Verb operators
+            (_identifier + '=', Operator.Word),
+
+            # Safe scope constants
+            (words(_constants, prefix='\\b', suffix='\\b'),
+             Keyword.Pseudo),
+
+            # Safe scope guards
+            (words(_guards, prefix='\\b', suffix='\\b'), Keyword.Type),
+
+            # All other safe scope names
+            (words(_safeScope, prefix='\\b', suffix='\\b'),
+             Name.Builtin),
+
+            # Identifiers
+            (_identifier, Name),
+
+            # Punctuation
+            (r'\(|\)|\{|\}|\[|\]|:|,', Punctuation),
+
+            # Whitespace
+            (' +', Whitespace),
+
+            # Definite lexer errors
+            ('=', Error),
+        ],
+        'char': [
+            # It is definitely an error to have a char of width == 0.
+            ("'", Error, 'root'),
+            (_escape_pattern, String.Escape, 'charEnd'),
+            ('.', String.Char, 'charEnd'),
+        ],
+        'charEnd': [
+            ("'", String.Char, '#pop:2'),
+            # It is definitely an error to have a char of width > 1.
+            ('.', Error),
+        ],
+        # The state of things coming into an interface.
+        'interface': [
+            (' +', Whitespace),
+            (_identifier, Name.Class, '#pop'),
+            include('root'),
+        ],
+        # The state of things coming into a method.
+        'method': [
+            (' +', Whitespace),
+            (_identifier, Name.Function, '#pop'),
+            include('root'),
+        ],
+        'string': [
+            ('"', String.Double, 'root'),
+            (_escape_pattern, String.Escape),
+            (r'\n', String.Double),
+            ('.', String.Double),
+        ],
+        'ql': [
+            ('`', String.Backtick, 'root'),
+            (r'\$' + _escape_pattern, String.Escape),
+            (r'\$\$', String.Escape),
+            (r'@@', String.Escape),
+            (r'\$\{', String.Interpol, 'qlNest'),
+            (r'@\{', String.Interpol, 'qlNest'),
+            (r'\$' + _identifier, Name),
+            ('@' + _identifier, Name),
+            ('.', String.Backtick),
+        ],
+        'qlNest': [
+            (r'\}', String.Interpol, '#pop'),
+            include('root'),
+        ],
+        # The state of things immediately following `var`.
+        'var': [
+            (' +', Whitespace),
+            (_identifier, Name.Variable, '#pop'),
+            include('root'),
+        ],
+    }
diff --git a/local_packages/pygments/lexers/mosel.py b/local_packages/pygments/lexers/mosel.py
new file mode 100644
index 0000000..912f478
--- /dev/null
+++ b/local_packages/pygments/lexers/mosel.py
@@ -0,0 +1,447 @@
+"""
+    pygments.lexers.mosel
+    ~~~~~~~~~~~~~~~~~~~~~
+
+    Lexers for the mosel language.
+    http://www.fico.com/en/products/fico-xpress-optimization
+
+    :copyright: Copyright 2006-present by the Pygments team, see AUTHORS.
+    :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, words
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+    Number, Punctuation
+
+__all__ = ['MoselLexer']
+
+FUNCTIONS = (
+    # core functions
+    '_',
+    'abs',
+    'arctan',
+    'asproc',
+    'assert',
+    'bitflip',
+    'bitneg',
+    'bitset',
+    'bitshift',
+    'bittest',
+    'bitval',
+    'ceil',
+    'cos',
+    'create',
+    'currentdate',
+    'currenttime',
+    'cutelt',
+    'cutfirst',
+    'cuthead',
+    'cutlast',
+    'cuttail',
+    'datablock',
+    'delcell',
+    'exists',
+    'exit',
+    'exp',
+    'exportprob',
+    'fclose',
+    'fflush',
+    'finalize',
+    'findfirst',
+    'findlast',
+    'floor',
+    'fopen',
+    'fselect',
+    'fskipline',
+    'fwrite',
+    'fwrite_',
+    'fwriteln',
+    'fwriteln_',
+    'getact',
+    'getcoeff',
+    'getcoeffs',
+    'getdual',
+    'getelt',
+    'getfid',
+    'getfirst',
+    'getfname',
+    'gethead',
+    'getlast',
+    'getobjval',
+    'getparam',
+    'getrcost',
+    'getreadcnt',
+    'getreverse',
+    'getsize',
+    'getslack',
+    'getsol',
+    'gettail',
+    'gettype',
+    'getvars',
+    'isdynamic',
+    'iseof',
+    'isfinite',
+    'ishidden',
+    'isinf',
+    'isnan',
+    'isodd',
+    'ln',
+    'localsetparam',
+    'log',
+    'makesos1',
+    'makesos2',
+    'maxlist',
+    'memoryuse',
+    'minlist',
+    'newmuid',
+    'publish',
+    'random',
+    'read',
+    'readln',
+    'reset',
+    'restoreparam',
+    'reverse',
+    'round',
+    'setcoeff',
+    'sethidden',
+    'setioerr',
+    'setmatherr',
+    'setname',
+    'setparam',
+    'setrandseed',
+    'setrange',
+    'settype',
+    'sin',
+    'splithead',
+    'splittail',
+    'sqrt',
+    'strfmt',
+    'substr',
+    'timestamp',
+    'unpublish',
+    'versionnum',
+    'versionstr',
+    'write',
+    'write_',
+    'writeln',
+    'writeln_',
+
+    # mosel exam mmxprs | sed -n -e "s/ [pf][a-z]* \([a-zA-Z0-9_]*\).*/'\1',/p" | sort -u
+    'addcut',
+    'addcuts',
+    'addmipsol',
+    'basisstability',
+    'calcsolinfo',
+    'clearmipdir',
+    'clearmodcut',
+    'command',
+    'copysoltoinit',
+    'crossoverlpsol',
+    'defdelayedrows',
+    'defsecurevecs',
+    'delcuts',
+    'dropcuts',
+    'estimatemarginals',
+    'fixglobal',
+    'flushmsgq',
+    'getbstat',
+    'getcnlist',
+    'getcplist',
+    'getdualray',
+    'getiis',
+    'getiissense',
+    'getiistype',
+    'getinfcause',
+    'getinfeas',
+    'getlb',
+    'getlct',
+    'getleft',
+    'getloadedlinctrs',
+    'getloadedmpvars',
+    'getname',
+    'getprimalray',
+    'getprobstat',
+    'getrange',
+    'getright',
+    'getsensrng',
+    'getsize',
+    'getsol',
+    'gettype',
+    'getub',
+    'getvars',
+    'gety',
+    'hasfeature',
+    'implies',
+    'indicator',
+    'initglobal',
+    'ishidden',
+    'isiisvalid',
+    'isintegral',
+    'loadbasis',
+    'loadcuts',
+    'loadlpsol',
+    'loadmipsol',
+    'loadprob',
+    'maximise',
+    'maximize',
+    'minimise',
+    'minimize',
+    'postsolve',
+    'readbasis',
+    'readdirs',
+    'readsol',
+    'refinemipsol',
+    'rejectintsol',
+    'repairinfeas',
+    'repairinfeas_deprec',
+    'resetbasis',
+    'resetiis',
+    'resetsol',
+    'savebasis',
+    'savemipsol',
+    'savesol',
+    'savestate',
+    'selectsol',
+    'setarchconsistency',
+    'setbstat',
+    'setcallback',
+    'setcbcutoff',
+    'setgndata',
+    'sethidden',
+    'setlb',
+    'setmipdir',
+    'setmodcut',
+    'setsol',
+    'setub',
+    'setucbdata',
+    'stopoptimise',
+    'stopoptimize',
+    'storecut',
+    'storecuts',
+    'unloadprob',
+    'uselastbarsol',
+    'writebasis',
+    'writedirs',
+    'writeprob',
+    'writesol',
+    'xor',
+    'xprs_addctr',
+    'xprs_addindic',
+
+    # mosel exam mmsystem | sed -n -e "s/ [pf][a-z]* \([a-zA-Z0-9_]*\).*/'\1',/p" | sort -u
+    'addmonths',
+    'copytext',
+    'cuttext',
+    'deltext',
+    'endswith',
+    'erase',
+    'expandpath',
+    'fcopy',
+    'fdelete',
+    'findfiles',
+    'findtext',
+    'fmove',
+    'formattext',
+    'getasnumber',
+    'getchar',
+    'getcwd',
+    'getdate',
+    'getday',
+    'getdaynum',
+    'getdays',
+    'getdirsep',
+    'getdsoparam',
+    'getendparse',
+    'getenv',
+    'getfsize',
+    'getfstat',
+    'getftime',
+    'gethour',
+    'getminute',
+    'getmonth',
+    'getmsec',
+    'getoserrmsg',
+    'getoserror',
+    'getpathsep',
+    'getqtype',
+    'getsecond',
+    'getsepchar',
+    'getsize',
+    'getstart',
+    'getsucc',
+    'getsysinfo',
+    'getsysstat',
+    'gettime',
+    'gettmpdir',
+    'gettrim',
+    'getweekday',
+    'getyear',
+    'inserttext',
+    'isvalid',
+    'jointext',
+    'makedir',
+    'makepath',
+    'newtar',
+    'newzip',
+    'nextfield',
+    'openpipe',
+    'parseextn',
+    'parseint',
+    'parsereal',
+    'parsetext',
+    'pastetext',
+    'pathmatch',
+    'pathsplit',
+    'qsort',
+    'quote',
+    'readtextline',
+    'regmatch',
+    'regreplace',
+    'removedir',
+    'removefiles',
+    'setchar',
+    'setdate',
+    'setday',
+    'setdsoparam',
+    'setendparse',
+    'setenv',
+    'sethour',
+    'setminute',
+    'setmonth',
+    'setmsec',
+    'setoserror',
+    'setqtype',
+    'setsecond',
+    'setsepchar',
+    'setstart',
+    'setsucc',
+    'settime',
+    'settrim',
+    'setyear',
+    'sleep',
+    'splittext',
+    'startswith',
+    'system',
+    'tarlist',
+    'textfmt',
+    'tolower',
+    'toupper',
+    'trim',
+    'untar',
+    'unzip',
+    'ziplist',
+
+    # mosel exam mmjobs | sed -n -e "s/ [pf][a-z]* \([a-zA-Z0-9_]*\).*/'\1',/p" | sort -u
+    'canceltimer',
+    'clearaliases',
+    'compile',
+    'connect',
+    'detach',
+    'disconnect',
+    'dropnextevent',
+    'findxsrvs',
+    'getaliases',
+    'getannidents',
+    'getannotations',
+    'getbanner',
+    'getclass',
+    'getdsoprop',
+    'getdsopropnum',
+    'getexitcode',
+    'getfromgid',
+    'getfromid',
+    'getfromuid',
+    'getgid',
+    'gethostalias',
+    'getid',
+    'getmodprop',
+    'getmodpropnum',
+    'getnextevent',
+    'getnode',
+    'getrmtid',
+    'getstatus',
+    'getsysinfo',
+    'gettimer',
+    'getuid',
+    'getvalue',
+    'isqueueempty',
+    'load',
+    'nullevent',
+    'peeknextevent',
+    'resetmodpar',
+    'run',
+    'send',
+    'setcontrol',
+    'setdefstream',
+    'setgid',
+    'sethostalias',
+    'setmodpar',
+    'settimer',
+    'setuid',
+    'setworkdir',
+    'stop',
+    'unload',
+    'wait',
+    'waitexpired',
+    'waitfor',
+    'waitforend',
+)
+
+
+class MoselLexer(RegexLexer):
+    """
+    For the Mosel optimization language.
+    """
+    name = 'Mosel'
+    aliases = ['mosel']
+    filenames = ['*.mos']
+    url = 'https://www.fico.com/fico-xpress-optimization/docs/latest/mosel/mosel_lang/dhtml/moselreflang.html'
+    version_added = '2.6'
+
+    tokens = {
+        'root': [
+            (r'\n', Text),
+            (r'\s+', Text.Whitespace),
+            (r'!.*?\n', Comment.Single),
+            (r'\(!(.|\n)*?!\)', Comment.Multiline),
+            (words((
+                'and', 'as', 'break', 'case', 'count', 'declarations', 'do',
+                'dynamic', 'elif', 'else', 'end-', 'end', 'evaluation', 'false',
+                'forall', 'forward', 'from', 'function', 'hashmap', 'if',
+                'imports', 'include', 'initialisations', 'initializations', 'inter',
+                'max', 'min', 'model', 'namespace', 'next', 'not', 'nsgroup',
+                'nssearch', 'of', 'options', 'or', 'package', 'parameters',
+                'procedure', 'public', 'prod', 'record', 'repeat', 'requirements',
+                'return', 'sum', 'then', 'to', 'true', 'union', 'until', 'uses',
+                'version', 'while', 'with'), prefix=r'\b', suffix=r'\b'),
+             Keyword.Builtin),
+            (words((
+                'range', 'array', 'set', 'list', 'mpvar', 'mpproblem', 'linctr',
+                'nlctr', 'integer', 'string', 'real', 'boolean', 'text', 'time',
+                'date', 'datetime', 'returned', 'Model', 'Mosel', 'counter',
+                'xmldoc', 'is_sos1', 'is_sos2', 'is_integer', 'is_binary',
+                'is_continuous', 'is_free', 'is_semcont', 'is_semint',
+                'is_partint'), prefix=r'\b', suffix=r'\b'),
+             Keyword.Type),
+            (r'(\+|\-|\*|/|=|<=|>=|\||\^|<|>|<>|\.\.|\.|:=|::|:|in|mod|div)',
+             Operator),
+            (r'[()\[\]{},;]+', Punctuation),
+            (words(FUNCTIONS,  prefix=r'\b', suffix=r'\b'), Name.Function),
+            (r'(\d+\.(?!\.)\d*|\.(?!.)\d+)([eE][+-]?\d+)?', Number.Float),
+            (r'\d+([eE][+-]?\d+)?', Number.Integer),
+            (r'[+-]?Infinity', Number.Integer),
+            (r'0[xX][0-9a-fA-F]+', Number),
+            (r'"', String.Double, 'double_quote'),
+            (r'\'', String.Single, 'single_quote'),
+            (r'(\w+|(\.(?!\.)))', Text),
+        ],
+        'single_quote': [
+            (r'\'', String.Single, '#pop'),
+            (r'[^\']+', String.Single),
+        ],
+        'double_quote': [
+            (r'(\\"|\\[0-7]{1,3}\D|\\[abfnrtv]|\\\\)', String.Escape),
+            (r'\"', String.Double, '#pop'),
+            (r'[^"\\]+', String.Double),
+        ],
+    }
diff --git a/local_packages/pygments/lexers/ncl.py b/local_packages/pygments/lexers/ncl.py
new file mode 100644
index 0000000..a2729ef
--- /dev/null
+++ b/local_packages/pygments/lexers/ncl.py
@@ -0,0 +1,894 @@
+"""
+    pygments.lexers.ncl
+    ~~~~~~~~~~~~~~~~~~~
+
+    Lexers for NCAR Command Language.
+
+    :copyright: Copyright 2006-present by the Pygments team, see AUTHORS.
+    :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import RegexLexer, include, words
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+    Number, Punctuation
+
+__all__ = ['NCLLexer']
+
+
+class NCLLexer(RegexLexer):
+    """
+    Lexer for NCL code.
+    """
+    name = 'NCL'
+    aliases = ['ncl']
+    filenames = ['*.ncl']
+    mimetypes = ['text/ncl']
+    url = 'https://www.ncl.ucar.edu'
+    version_added = '2.2'
+
+    flags = re.MULTILINE
+
+    tokens = {
+        'root': [
+            (r';.*\n', Comment),
+            include('strings'),
+            include('core'),
+            (r'[a-zA-Z_]\w*', Name),
+            include('nums'),
+            (r'[\s]+', Text),
+        ],
+        'core': [
+            # Statements
+            (words((
+                'begin', 'break', 'continue', 'create', 'defaultapp', 'do',
+                'else', 'end', 'external', 'exit', 'True', 'False', 'file', 'function',
+                'getvalues', 'graphic', 'group', 'if', 'list', 'load', 'local',
+                'new', '_Missing', 'Missing', 'noparent', 'procedure',
+                'quit', 'QUIT', 'Quit', 'record', 'return', 'setvalues', 'stop',
+                'then', 'while'), prefix=r'\b', suffix=r'\s*\b'),
+             Keyword),
+
+            # Data Types
+            (words((
+                'ubyte', 'uint', 'uint64', 'ulong', 'string', 'byte',
+                'character', 'double', 'float', 'integer', 'int64', 'logical',
+                'long', 'short', 'ushort', 'enumeric', 'numeric', 'snumeric'),
+                prefix=r'\b', suffix=r'\s*\b'),
+             Keyword.Type),
+
+            # Operators
+            (r'[\%^*+\-/<>]', Operator),
+
+            # punctuation:
+            (r'[\[\]():@$!&|.,\\{}]', Punctuation),
+            (r'[=:]', Punctuation),
+
+            # Intrinsics
+            (words((
+                'abs', 'acos', 'addfile', 'addfiles', 'all', 'angmom_atm', 'any',
+                'area_conserve_remap', 'area_hi2lores', 'area_poly_sphere',
+                'asciiread', 'asciiwrite', 'asin', 'atan', 'atan2', 'attsetvalues',
+                'avg', 'betainc', 'bin_avg', 'bin_sum', 'bw_bandpass_filter',
+                'cancor', 'cbinread', 'cbinwrite', 'cd_calendar', 'cd_inv_calendar',
+                'cdfbin_p', 'cdfbin_pr', 'cdfbin_s', 'cdfbin_xn', 'cdfchi_p',
+                'cdfchi_x', 'cdfgam_p', 'cdfgam_x', 'cdfnor_p', 'cdfnor_x',
+                'cdft_p', 'cdft_t', 'ceil', 'center_finite_diff',
+                'center_finite_diff_n', 'cfftb', 'cfftf', 'cfftf_frq_reorder',
+                'charactertodouble', 'charactertofloat', 'charactertointeger',
+                'charactertolong', 'charactertoshort', 'charactertostring',
+                'chartodouble', 'chartofloat', 'chartoint', 'chartointeger',
+                'chartolong', 'chartoshort', 'chartostring', 'chiinv', 'clear',
+                'color_index_to_rgba', 'conform', 'conform_dims', 'cos', 'cosh',
+                'count_unique_values', 'covcorm', 'covcorm_xy', 'craybinnumrec',
+                'craybinrecread', 'create_graphic', 'csa1', 'csa1d', 'csa1s',
+                'csa1x', 'csa1xd', 'csa1xs', 'csa2', 'csa2d', 'csa2l', 'csa2ld',
+                'csa2ls', 'csa2lx', 'csa2lxd', 'csa2lxs', 'csa2s', 'csa2x',
+                'csa2xd', 'csa2xs', 'csa3', 'csa3d', 'csa3l', 'csa3ld', 'csa3ls',
+                'csa3lx', 'csa3lxd', 'csa3lxs', 'csa3s', 'csa3x', 'csa3xd',
+                'csa3xs', 'csc2s', 'csgetp', 'css2c', 'cssetp', 'cssgrid', 'csstri',
+                'csvoro', 'cumsum', 'cz2ccm', 'datatondc', 'day_of_week',
+                'day_of_year', 'days_in_month', 'default_fillvalue', 'delete',
+                'depth_to_pres', 'destroy', 'determinant', 'dewtemp_trh',
+                'dgeevx_lapack', 'dim_acumrun_n', 'dim_avg', 'dim_avg_n',
+                'dim_avg_wgt', 'dim_avg_wgt_n', 'dim_cumsum', 'dim_cumsum_n',
+                'dim_gamfit_n', 'dim_gbits', 'dim_max', 'dim_max_n', 'dim_median',
+                'dim_median_n', 'dim_min', 'dim_min_n', 'dim_num', 'dim_num_n',
+                'dim_numrun_n', 'dim_pqsort', 'dim_pqsort_n', 'dim_product',
+                'dim_product_n', 'dim_rmsd', 'dim_rmsd_n', 'dim_rmvmean',
+                'dim_rmvmean_n', 'dim_rmvmed', 'dim_rmvmed_n', 'dim_spi_n',
+                'dim_standardize', 'dim_standardize_n', 'dim_stat4', 'dim_stat4_n',
+                'dim_stddev', 'dim_stddev_n', 'dim_sum', 'dim_sum_n', 'dim_sum_wgt',
+                'dim_sum_wgt_n', 'dim_variance', 'dim_variance_n', 'dimsizes',
+                'doubletobyte', 'doubletochar', 'doubletocharacter',
+                'doubletofloat', 'doubletoint', 'doubletointeger', 'doubletolong',
+                'doubletoshort', 'dpres_hybrid_ccm', 'dpres_plevel', 'draw',
+                'draw_color_palette', 'dsgetp', 'dsgrid2', 'dsgrid2d', 'dsgrid2s',
+                'dsgrid3', 'dsgrid3d', 'dsgrid3s', 'dspnt2', 'dspnt2d', 'dspnt2s',
+                'dspnt3', 'dspnt3d', 'dspnt3s', 'dssetp', 'dtrend', 'dtrend_msg',
+                'dtrend_msg_n', 'dtrend_n', 'dtrend_quadratic',
+                'dtrend_quadratic_msg_n', 'dv2uvf', 'dv2uvg', 'dz_height',
+                'echo_off', 'echo_on', 'eof2data', 'eof_varimax', 'eofcor',
+                'eofcor_pcmsg', 'eofcor_ts', 'eofcov', 'eofcov_pcmsg', 'eofcov_ts',
+                'eofunc', 'eofunc_ts', 'eofunc_varimax', 'equiv_sample_size', 'erf',
+                'erfc', 'esacr', 'esacv', 'esccr', 'esccv', 'escorc', 'escorc_n',
+                'escovc', 'exit', 'exp', 'exp_tapersh', 'exp_tapersh_wgts',
+                'exp_tapershC', 'ezfftb', 'ezfftb_n', 'ezfftf', 'ezfftf_n',
+                'f2fosh', 'f2foshv', 'f2fsh', 'f2fshv', 'f2gsh', 'f2gshv', 'fabs',
+                'fbindirread', 'fbindirwrite', 'fbinnumrec', 'fbinread',
+                'fbinrecread', 'fbinrecwrite', 'fbinwrite', 'fft2db', 'fft2df',
+                'fftshift', 'fileattdef', 'filechunkdimdef', 'filedimdef',
+                'fileexists', 'filegrpdef', 'filevarattdef', 'filevarchunkdef',
+                'filevarcompressleveldef', 'filevardef', 'filevardimsizes',
+                'filwgts_lancos', 'filwgts_lanczos', 'filwgts_normal',
+                'floattobyte', 'floattochar', 'floattocharacter', 'floattoint',
+                'floattointeger', 'floattolong', 'floattoshort', 'floor',
+                'fluxEddy', 'fo2fsh', 'fo2fshv', 'fourier_info', 'frame', 'fspan',
+                'ftcurv', 'ftcurvd', 'ftcurvi', 'ftcurvp', 'ftcurvpi', 'ftcurvps',
+                'ftcurvs', 'ftest', 'ftgetp', 'ftkurv', 'ftkurvd', 'ftkurvp',
+                'ftkurvpd', 'ftsetp', 'ftsurf', 'g2fsh', 'g2fshv', 'g2gsh',
+                'g2gshv', 'gamma', 'gammainc', 'gaus', 'gaus_lobat',
+                'gaus_lobat_wgt', 'gc_aangle', 'gc_clkwise', 'gc_dangle',
+                'gc_inout', 'gc_latlon', 'gc_onarc', 'gc_pnt2gc', 'gc_qarea',
+                'gc_tarea', 'generate_2d_array', 'get_color_index',
+                'get_color_rgba', 'get_cpu_time', 'get_isolines', 'get_ncl_version',
+                'get_script_name', 'get_script_prefix_name', 'get_sphere_radius',
+                'get_unique_values', 'getbitsone', 'getenv', 'getfiledimsizes',
+                'getfilegrpnames', 'getfilepath', 'getfilevaratts',
+                'getfilevarchunkdimsizes', 'getfilevardims', 'getfilevardimsizes',
+                'getfilevarnames', 'getfilevartypes', 'getvaratts', 'getvardims',
+                'gradsf', 'gradsg', 'greg2jul', 'grid2triple', 'hlsrgb', 'hsvrgb',
+                'hydro', 'hyi2hyo', 'idsfft', 'igradsf', 'igradsg', 'ilapsf',
+                'ilapsg', 'ilapvf', 'ilapvg', 'ind', 'ind_resolve', 'int2p',
+                'int2p_n', 'integertobyte', 'integertochar', 'integertocharacter',
+                'integertoshort', 'inttobyte', 'inttochar', 'inttoshort',
+                'inverse_matrix', 'isatt', 'isbigendian', 'isbyte', 'ischar',
+                'iscoord', 'isdefined', 'isdim', 'isdimnamed', 'isdouble',
+                'isenumeric', 'isfile', 'isfilepresent', 'isfilevar',
+                'isfilevaratt', 'isfilevarcoord', 'isfilevardim', 'isfloat',
+                'isfunc', 'isgraphic', 'isint', 'isint64', 'isinteger',
+                'isleapyear', 'islogical', 'islong', 'ismissing', 'isnan_ieee',
+                'isnumeric', 'ispan', 'isproc', 'isshort', 'issnumeric', 'isstring',
+                'isubyte', 'isuint', 'isuint64', 'isulong', 'isunlimited',
+                'isunsigned', 'isushort', 'isvar', 'jul2greg', 'kmeans_as136',
+                'kolsm2_n', 'kron_product', 'lapsf', 'lapsg', 'lapvf', 'lapvg',
+                'latlon2utm', 'lclvl', 'lderuvf', 'lderuvg', 'linint1', 'linint1_n',
+                'linint2', 'linint2_points', 'linmsg', 'linmsg_n', 'linrood_latwgt',
+                'linrood_wgt', 'list_files', 'list_filevars', 'list_hlus',
+                'list_procfuncs', 'list_vars', 'ListAppend', 'ListCount',
+                'ListGetType', 'ListIndex', 'ListIndexFromName', 'ListPop',
+                'ListPush', 'ListSetType', 'loadscript', 'local_max', 'local_min',
+                'log', 'log10', 'longtobyte', 'longtochar', 'longtocharacter',
+                'longtoint', 'longtointeger', 'longtoshort', 'lspoly', 'lspoly_n',
+                'mask', 'max', 'maxind', 'min', 'minind', 'mixed_layer_depth',
+                'mixhum_ptd', 'mixhum_ptrh', 'mjo_cross_coh2pha',
+                'mjo_cross_segment', 'moc_globe_atl', 'monthday', 'natgrid',
+                'natgridd', 'natgrids', 'ncargpath', 'ncargversion', 'ndctodata',
+                'ndtooned', 'new', 'NewList', 'ngezlogo', 'nggcog', 'nggetp',
+                'nglogo', 'ngsetp', 'NhlAddAnnotation', 'NhlAddData',
+                'NhlAddOverlay', 'NhlAddPrimitive', 'NhlAppGetDefaultParentId',
+                'NhlChangeWorkstation', 'NhlClassName', 'NhlClearWorkstation',
+                'NhlDataPolygon', 'NhlDataPolyline', 'NhlDataPolymarker',
+                'NhlDataToNDC', 'NhlDestroy', 'NhlDraw', 'NhlFrame', 'NhlFreeColor',
+                'NhlGetBB', 'NhlGetClassResources', 'NhlGetErrorObjectId',
+                'NhlGetNamedColorIndex', 'NhlGetParentId',
+                'NhlGetParentWorkstation', 'NhlGetWorkspaceObjectId',
+                'NhlIsAllocatedColor', 'NhlIsApp', 'NhlIsDataComm', 'NhlIsDataItem',
+                'NhlIsDataSpec', 'NhlIsTransform', 'NhlIsView', 'NhlIsWorkstation',
+                'NhlName', 'NhlNDCPolygon', 'NhlNDCPolyline', 'NhlNDCPolymarker',
+                'NhlNDCToData', 'NhlNewColor', 'NhlNewDashPattern', 'NhlNewMarker',
+                'NhlPalGetDefined', 'NhlRemoveAnnotation', 'NhlRemoveData',
+                'NhlRemoveOverlay', 'NhlRemovePrimitive', 'NhlSetColor',
+                'NhlSetDashPattern', 'NhlSetMarker', 'NhlUpdateData',
+                'NhlUpdateWorkstation', 'nice_mnmxintvl', 'nngetaspectd',
+                'nngetaspects', 'nngetp', 'nngetsloped', 'nngetslopes', 'nngetwts',
+                'nngetwtsd', 'nnpnt', 'nnpntd', 'nnpntend', 'nnpntendd',
+                'nnpntinit', 'nnpntinitd', 'nnpntinits', 'nnpnts', 'nnsetp', 'num',
+                'obj_anal_ic', 'omega_ccm', 'onedtond', 'overlay', 'paleo_outline',
+                'pdfxy_bin', 'poisson_grid_fill', 'pop_remap', 'potmp_insitu_ocn',
+                'prcwater_dp', 'pres2hybrid', 'pres_hybrid_ccm', 'pres_sigma',
+                'print', 'print_table', 'printFileVarSummary', 'printVarSummary',
+                'product', 'pslec', 'pslhor', 'pslhyp', 'qsort', 'rand',
+                'random_chi', 'random_gamma', 'random_normal', 'random_setallseed',
+                'random_uniform', 'rcm2points', 'rcm2rgrid', 'rdsstoi',
+                'read_colormap_file', 'reg_multlin', 'regcoef', 'regCoef_n',
+                'regline', 'relhum', 'replace_ieeenan', 'reshape', 'reshape_ind',
+                'rgba_to_color_index', 'rgbhls', 'rgbhsv', 'rgbyiq', 'rgrid2rcm',
+                'rhomb_trunc', 'rip_cape_2d', 'rip_cape_3d', 'round', 'rtest',
+                'runave', 'runave_n', 'set_default_fillvalue', 'set_sphere_radius',
+                'setfileoption', 'sfvp2uvf', 'sfvp2uvg', 'shaec', 'shagc',
+                'shgetnp', 'shgetp', 'shgrid', 'shorttobyte', 'shorttochar',
+                'shorttocharacter', 'show_ascii', 'shsec', 'shsetp', 'shsgc',
+                'shsgc_R42', 'sigma2hybrid', 'simpeq', 'simpne', 'sin',
+                'sindex_yrmo', 'sinh', 'sizeof', 'sleep', 'smth9', 'snindex_yrmo',
+                'solve_linsys', 'span_color_indexes', 'span_color_rgba',
+                'sparse_matrix_mult', 'spcorr', 'spcorr_n', 'specx_anal',
+                'specxy_anal', 'spei', 'sprintf', 'sprinti', 'sqrt', 'sqsort',
+                'srand', 'stat2', 'stat4', 'stat_medrng', 'stat_trim',
+                'status_exit', 'stdatmus_p2tdz', 'stdatmus_z2tdp', 'stddev',
+                'str_capital', 'str_concat', 'str_fields_count', 'str_get_cols',
+                'str_get_dq', 'str_get_field', 'str_get_nl', 'str_get_sq',
+                'str_get_tab', 'str_index_of_substr', 'str_insert', 'str_is_blank',
+                'str_join', 'str_left_strip', 'str_lower', 'str_match',
+                'str_match_ic', 'str_match_ic_regex', 'str_match_ind',
+                'str_match_ind_ic', 'str_match_ind_ic_regex', 'str_match_ind_regex',
+                'str_match_regex', 'str_right_strip', 'str_split',
+                'str_split_by_length', 'str_split_csv', 'str_squeeze', 'str_strip',
+                'str_sub_str', 'str_switch', 'str_upper', 'stringtochar',
+                'stringtocharacter', 'stringtodouble', 'stringtofloat',
+                'stringtoint', 'stringtointeger', 'stringtolong', 'stringtoshort',
+                'strlen', 'student_t', 'sum', 'svd_lapack', 'svdcov', 'svdcov_sv',
+                'svdstd', 'svdstd_sv', 'system', 'systemfunc', 'tan', 'tanh',
+                'taper', 'taper_n', 'tdclrs', 'tdctri', 'tdcudp', 'tdcurv',
+                'tddtri', 'tdez2d', 'tdez3d', 'tdgetp', 'tdgrds', 'tdgrid',
+                'tdgtrs', 'tdinit', 'tditri', 'tdlbla', 'tdlblp', 'tdlbls',
+                'tdline', 'tdlndp', 'tdlnpa', 'tdlpdp', 'tdmtri', 'tdotri',
+                'tdpara', 'tdplch', 'tdprpa', 'tdprpi', 'tdprpt', 'tdsetp',
+                'tdsort', 'tdstri', 'tdstrs', 'tdttri', 'thornthwaite', 'tobyte',
+                'tochar', 'todouble', 'tofloat', 'toint', 'toint64', 'tointeger',
+                'tolong', 'toshort', 'tosigned', 'tostring', 'tostring_with_format',
+                'totype', 'toubyte', 'touint', 'touint64', 'toulong', 'tounsigned',
+                'toushort', 'trend_manken', 'tri_trunc', 'triple2grid',
+                'triple2grid2d', 'trop_wmo', 'ttest', 'typeof', 'undef',
+                'unique_string', 'update', 'ushorttoint', 'ut_calendar',
+                'ut_inv_calendar', 'utm2latlon', 'uv2dv_cfd', 'uv2dvf', 'uv2dvg',
+                'uv2sfvpf', 'uv2sfvpg', 'uv2vr_cfd', 'uv2vrdvf', 'uv2vrdvg',
+                'uv2vrf', 'uv2vrg', 'v5d_close', 'v5d_create', 'v5d_setLowLev',
+                'v5d_setUnits', 'v5d_write', 'v5d_write_var', 'variance', 'vhaec',
+                'vhagc', 'vhsec', 'vhsgc', 'vibeta', 'vinth2p', 'vinth2p_ecmwf',
+                'vinth2p_ecmwf_nodes', 'vinth2p_nodes', 'vintp2p_ecmwf', 'vr2uvf',
+                'vr2uvg', 'vrdv2uvf', 'vrdv2uvg', 'wavelet', 'wavelet_default',
+                'weibull', 'wgt_area_smooth', 'wgt_areaave', 'wgt_areaave2',
+                'wgt_arearmse', 'wgt_arearmse2', 'wgt_areasum2', 'wgt_runave',
+                'wgt_runave_n', 'wgt_vert_avg_beta', 'wgt_volave', 'wgt_volave_ccm',
+                'wgt_volrmse', 'wgt_volrmse_ccm', 'where', 'wk_smooth121', 'wmbarb',
+                'wmbarbmap', 'wmdrft', 'wmgetp', 'wmlabs', 'wmsetp', 'wmstnm',
+                'wmvect', 'wmvectmap', 'wmvlbl', 'wrf_avo', 'wrf_cape_2d',
+                'wrf_cape_3d', 'wrf_dbz', 'wrf_eth', 'wrf_helicity', 'wrf_ij_to_ll',
+                'wrf_interp_1d', 'wrf_interp_2d_xy', 'wrf_interp_3d_z',
+                'wrf_latlon_to_ij', 'wrf_ll_to_ij', 'wrf_omega', 'wrf_pvo',
+                'wrf_rh', 'wrf_slp', 'wrf_smooth_2d', 'wrf_td', 'wrf_tk',
+                'wrf_updraft_helicity', 'wrf_uvmet', 'wrf_virtual_temp',
+                'wrf_wetbulb', 'wrf_wps_close_int', 'wrf_wps_open_int',
+                'wrf_wps_rddata_int', 'wrf_wps_rdhead_int', 'wrf_wps_read_int',
+                'wrf_wps_write_int', 'write_matrix', 'write_table', 'yiqrgb',
+                'z2geouv', 'zonal_mpsi', 'addfiles_GetVar', 'advect_variable',
+                'area_conserve_remap_Wrap', 'area_hi2lores_Wrap',
+                'array_append_record', 'assignFillValue', 'byte2flt',
+                'byte2flt_hdf', 'calcDayAnomTLL', 'calcMonAnomLLLT',
+                'calcMonAnomLLT', 'calcMonAnomTLL', 'calcMonAnomTLLL',
+                'calculate_monthly_values', 'cd_convert', 'changeCase',
+                'changeCaseChar', 'clmDayTLL', 'clmDayTLLL', 'clmMon2clmDay',
+                'clmMonLLLT', 'clmMonLLT', 'clmMonTLL', 'clmMonTLLL', 'closest_val',
+                'copy_VarAtts', 'copy_VarCoords', 'copy_VarCoords_1',
+                'copy_VarCoords_2', 'copy_VarMeta', 'copyatt', 'crossp3',
+                'cshstringtolist', 'cssgrid_Wrap', 'dble2flt', 'decimalPlaces',
+                'delete_VarAtts', 'dim_avg_n_Wrap', 'dim_avg_wgt_n_Wrap',
+                'dim_avg_wgt_Wrap', 'dim_avg_Wrap', 'dim_cumsum_n_Wrap',
+                'dim_cumsum_Wrap', 'dim_max_n_Wrap', 'dim_min_n_Wrap',
+                'dim_rmsd_n_Wrap', 'dim_rmsd_Wrap', 'dim_rmvmean_n_Wrap',
+                'dim_rmvmean_Wrap', 'dim_rmvmed_n_Wrap', 'dim_rmvmed_Wrap',
+                'dim_standardize_n_Wrap', 'dim_standardize_Wrap',
+                'dim_stddev_n_Wrap', 'dim_stddev_Wrap', 'dim_sum_n_Wrap',
+                'dim_sum_wgt_n_Wrap', 'dim_sum_wgt_Wrap', 'dim_sum_Wrap',
+                'dim_variance_n_Wrap', 'dim_variance_Wrap', 'dpres_plevel_Wrap',
+                'dtrend_leftdim', 'dv2uvF_Wrap', 'dv2uvG_Wrap', 'eof_north',
+                'eofcor_Wrap', 'eofcov_Wrap', 'eofunc_north', 'eofunc_ts_Wrap',
+                'eofunc_varimax_reorder', 'eofunc_varimax_Wrap', 'eofunc_Wrap',
+                'epsZero', 'f2fosh_Wrap', 'f2foshv_Wrap', 'f2fsh_Wrap',
+                'f2fshv_Wrap', 'f2gsh_Wrap', 'f2gshv_Wrap', 'fbindirSwap',
+                'fbinseqSwap1', 'fbinseqSwap2', 'flt2dble', 'flt2string',
+                'fo2fsh_Wrap', 'fo2fshv_Wrap', 'g2fsh_Wrap', 'g2fshv_Wrap',
+                'g2gsh_Wrap', 'g2gshv_Wrap', 'generate_resample_indices',
+                'generate_sample_indices', 'generate_unique_indices',
+                'genNormalDist', 'get1Dindex', 'get1Dindex_Collapse',
+                'get1Dindex_Exclude', 'get_file_suffix', 'GetFillColor',
+                'GetFillColorIndex', 'getFillValue', 'getind_latlon2d',
+                'getVarDimNames', 'getVarFillValue', 'grib_stime2itime',
+                'hyi2hyo_Wrap', 'ilapsF_Wrap', 'ilapsG_Wrap', 'ind_nearest_coord',
+                'indStrSubset', 'int2dble', 'int2flt', 'int2p_n_Wrap', 'int2p_Wrap',
+                'isMonotonic', 'isStrSubset', 'latGau', 'latGauWgt', 'latGlobeF',
+                'latGlobeFo', 'latRegWgt', 'linint1_n_Wrap', 'linint1_Wrap',
+                'linint2_points_Wrap', 'linint2_Wrap', 'local_max_1d',
+                'local_min_1d', 'lonFlip', 'lonGlobeF', 'lonGlobeFo', 'lonPivot',
+                'merge_levels_sfc', 'mod', 'month_to_annual',
+                'month_to_annual_weighted', 'month_to_season', 'month_to_season12',
+                'month_to_seasonN', 'monthly_total_to_daily_mean', 'nameDim',
+                'natgrid_Wrap', 'NewCosWeight', 'niceLatLon2D', 'NormCosWgtGlobe',
+                'numAsciiCol', 'numAsciiRow', 'numeric2int',
+                'obj_anal_ic_deprecated', 'obj_anal_ic_Wrap', 'omega_ccm_driver',
+                'omega_to_w', 'oneDtostring', 'pack_values', 'pattern_cor', 'pdfx',
+                'pdfxy', 'pdfxy_conform', 'pot_temp', 'pot_vort_hybrid',
+                'pot_vort_isobaric', 'pres2hybrid_Wrap', 'print_clock',
+                'printMinMax', 'quadroots', 'rcm2points_Wrap', 'rcm2rgrid_Wrap',
+                'readAsciiHead', 'readAsciiTable', 'reg_multlin_stats',
+                'region_ind', 'regline_stats', 'relhum_ttd', 'replaceSingleChar',
+                'RGBtoCmap', 'rgrid2rcm_Wrap', 'rho_mwjf', 'rm_single_dims',
+                'rmAnnCycle1D', 'rmInsufData', 'rmMonAnnCycLLLT', 'rmMonAnnCycLLT',
+                'rmMonAnnCycTLL', 'runave_n_Wrap', 'runave_Wrap', 'short2flt',
+                'short2flt_hdf', 'shsgc_R42_Wrap', 'sign_f90', 'sign_matlab',
+                'smth9_Wrap', 'smthClmDayTLL', 'smthClmDayTLLL', 'SqrtCosWeight',
+                'stat_dispersion', 'static_stability', 'stdMonLLLT', 'stdMonLLT',
+                'stdMonTLL', 'stdMonTLLL', 'symMinMaxPlt', 'table_attach_columns',
+                'table_attach_rows', 'time_to_newtime', 'transpose',
+                'triple2grid_Wrap', 'ut_convert', 'uv2dvF_Wrap', 'uv2dvG_Wrap',
+                'uv2vrF_Wrap', 'uv2vrG_Wrap', 'vr2uvF_Wrap', 'vr2uvG_Wrap',
+                'w_to_omega', 'wallClockElapseTime', 'wave_number_spc',
+                'wgt_areaave_Wrap', 'wgt_runave_leftdim', 'wgt_runave_n_Wrap',
+                'wgt_runave_Wrap', 'wgt_vertical_n', 'wind_component',
+                'wind_direction', 'yyyyddd_to_yyyymmdd', 'yyyymm_time',
+                'yyyymm_to_yyyyfrac', 'yyyymmdd_time', 'yyyymmdd_to_yyyyddd',
+                'yyyymmdd_to_yyyyfrac', 'yyyymmddhh_time', 'yyyymmddhh_to_yyyyfrac',
+                'zonal_mpsi_Wrap', 'zonalAve', 'calendar_decode2', 'cd_string',
+                'kf_filter', 'run_cor', 'time_axis_labels', 'ut_string',
+                'wrf_contour', 'wrf_map', 'wrf_map_overlay', 'wrf_map_overlays',
+                'wrf_map_resources', 'wrf_map_zoom', 'wrf_overlay', 'wrf_overlays',
+                'wrf_user_getvar', 'wrf_user_ij_to_ll', 'wrf_user_intrp2d',
+                'wrf_user_intrp3d', 'wrf_user_latlon_to_ij', 'wrf_user_list_times',
+                'wrf_user_ll_to_ij', 'wrf_user_unstagger', 'wrf_user_vert_interp',
+                'wrf_vector', 'gsn_add_annotation', 'gsn_add_polygon',
+                'gsn_add_polyline', 'gsn_add_polymarker',
+                'gsn_add_shapefile_polygons', 'gsn_add_shapefile_polylines',
+                'gsn_add_shapefile_polymarkers', 'gsn_add_text', 'gsn_attach_plots',
+                'gsn_blank_plot', 'gsn_contour', 'gsn_contour_map',
+                'gsn_contour_shade', 'gsn_coordinates', 'gsn_create_labelbar',
+                'gsn_create_legend', 'gsn_create_text',
+                'gsn_csm_attach_zonal_means', 'gsn_csm_blank_plot',
+                'gsn_csm_contour', 'gsn_csm_contour_map', 'gsn_csm_contour_map_ce',
+                'gsn_csm_contour_map_overlay', 'gsn_csm_contour_map_polar',
+                'gsn_csm_hov', 'gsn_csm_lat_time', 'gsn_csm_map', 'gsn_csm_map_ce',
+                'gsn_csm_map_polar', 'gsn_csm_pres_hgt',
+                'gsn_csm_pres_hgt_streamline', 'gsn_csm_pres_hgt_vector',
+                'gsn_csm_streamline', 'gsn_csm_streamline_contour_map',
+                'gsn_csm_streamline_contour_map_ce',
+                'gsn_csm_streamline_contour_map_polar', 'gsn_csm_streamline_map',
+                'gsn_csm_streamline_map_ce', 'gsn_csm_streamline_map_polar',
+                'gsn_csm_streamline_scalar', 'gsn_csm_streamline_scalar_map',
+                'gsn_csm_streamline_scalar_map_ce',
+                'gsn_csm_streamline_scalar_map_polar', 'gsn_csm_time_lat',
+                'gsn_csm_vector', 'gsn_csm_vector_map', 'gsn_csm_vector_map_ce',
+                'gsn_csm_vector_map_polar', 'gsn_csm_vector_scalar',
+                'gsn_csm_vector_scalar_map', 'gsn_csm_vector_scalar_map_ce',
+                'gsn_csm_vector_scalar_map_polar', 'gsn_csm_x2y', 'gsn_csm_x2y2',
+                'gsn_csm_xy', 'gsn_csm_xy2', 'gsn_csm_xy3', 'gsn_csm_y',
+                'gsn_define_colormap', 'gsn_draw_colormap', 'gsn_draw_named_colors',
+                'gsn_histogram', 'gsn_labelbar_ndc', 'gsn_legend_ndc', 'gsn_map',
+                'gsn_merge_colormaps', 'gsn_open_wks', 'gsn_panel', 'gsn_polygon',
+                'gsn_polygon_ndc', 'gsn_polyline', 'gsn_polyline_ndc',
+                'gsn_polymarker', 'gsn_polymarker_ndc', 'gsn_retrieve_colormap',
+                'gsn_reverse_colormap', 'gsn_streamline', 'gsn_streamline_map',
+                'gsn_streamline_scalar', 'gsn_streamline_scalar_map', 'gsn_table',
+                'gsn_text', 'gsn_text_ndc', 'gsn_vector', 'gsn_vector_map',
+                'gsn_vector_scalar', 'gsn_vector_scalar_map', 'gsn_xy', 'gsn_y',
+                'hsv2rgb', 'maximize_output', 'namedcolor2rgb', 'namedcolor2rgba',
+                'reset_device_coordinates', 'span_named_colors'), prefix=r'\b'),
+             Name.Builtin),
+
+            # Resources
+            (words((
+                'amDataXF', 'amDataYF', 'amJust', 'amOn', 'amOrthogonalPosF',
+                'amParallelPosF', 'amResizeNotify', 'amSide', 'amTrackData',
+                'amViewId', 'amZone', 'appDefaultParent', 'appFileSuffix',
+                'appResources', 'appSysDir', 'appUsrDir', 'caCopyArrays',
+                'caXArray', 'caXCast', 'caXMaxV', 'caXMinV', 'caXMissingV',
+                'caYArray', 'caYCast', 'caYMaxV', 'caYMinV', 'caYMissingV',
+                'cnCellFillEdgeColor', 'cnCellFillMissingValEdgeColor',
+                'cnConpackParams', 'cnConstFEnableFill', 'cnConstFLabelAngleF',
+                'cnConstFLabelBackgroundColor', 'cnConstFLabelConstantSpacingF',
+                'cnConstFLabelFont', 'cnConstFLabelFontAspectF',
+                'cnConstFLabelFontColor', 'cnConstFLabelFontHeightF',
+                'cnConstFLabelFontQuality', 'cnConstFLabelFontThicknessF',
+                'cnConstFLabelFormat', 'cnConstFLabelFuncCode', 'cnConstFLabelJust',
+                'cnConstFLabelOn', 'cnConstFLabelOrthogonalPosF',
+                'cnConstFLabelParallelPosF', 'cnConstFLabelPerimColor',
+                'cnConstFLabelPerimOn', 'cnConstFLabelPerimSpaceF',
+                'cnConstFLabelPerimThicknessF', 'cnConstFLabelSide',
+                'cnConstFLabelString', 'cnConstFLabelTextDirection',
+                'cnConstFLabelZone', 'cnConstFUseInfoLabelRes',
+                'cnExplicitLabelBarLabelsOn', 'cnExplicitLegendLabelsOn',
+                'cnExplicitLineLabelsOn', 'cnFillBackgroundColor', 'cnFillColor',
+                'cnFillColors', 'cnFillDotSizeF', 'cnFillDrawOrder', 'cnFillMode',
+                'cnFillOn', 'cnFillOpacityF', 'cnFillPalette', 'cnFillPattern',
+                'cnFillPatterns', 'cnFillScaleF', 'cnFillScales', 'cnFixFillBleed',
+                'cnGridBoundFillColor', 'cnGridBoundFillPattern',
+                'cnGridBoundFillScaleF', 'cnGridBoundPerimColor',
+                'cnGridBoundPerimDashPattern', 'cnGridBoundPerimOn',
+                'cnGridBoundPerimThicknessF', 'cnHighLabelAngleF',
+                'cnHighLabelBackgroundColor', 'cnHighLabelConstantSpacingF',
+                'cnHighLabelCount', 'cnHighLabelFont', 'cnHighLabelFontAspectF',
+                'cnHighLabelFontColor', 'cnHighLabelFontHeightF',
+                'cnHighLabelFontQuality', 'cnHighLabelFontThicknessF',
+                'cnHighLabelFormat', 'cnHighLabelFuncCode', 'cnHighLabelPerimColor',
+                'cnHighLabelPerimOn', 'cnHighLabelPerimSpaceF',
+                'cnHighLabelPerimThicknessF', 'cnHighLabelString', 'cnHighLabelsOn',
+                'cnHighLowLabelOverlapMode', 'cnHighUseLineLabelRes',
+                'cnInfoLabelAngleF', 'cnInfoLabelBackgroundColor',
+                'cnInfoLabelConstantSpacingF', 'cnInfoLabelFont',
+                'cnInfoLabelFontAspectF', 'cnInfoLabelFontColor',
+                'cnInfoLabelFontHeightF', 'cnInfoLabelFontQuality',
+                'cnInfoLabelFontThicknessF', 'cnInfoLabelFormat',
+                'cnInfoLabelFuncCode', 'cnInfoLabelJust', 'cnInfoLabelOn',
+                'cnInfoLabelOrthogonalPosF', 'cnInfoLabelParallelPosF',
+                'cnInfoLabelPerimColor', 'cnInfoLabelPerimOn',
+                'cnInfoLabelPerimSpaceF', 'cnInfoLabelPerimThicknessF',
+                'cnInfoLabelSide', 'cnInfoLabelString', 'cnInfoLabelTextDirection',
+                'cnInfoLabelZone', 'cnLabelBarEndLabelsOn', 'cnLabelBarEndStyle',
+                'cnLabelDrawOrder', 'cnLabelMasking', 'cnLabelScaleFactorF',
+                'cnLabelScaleValueF', 'cnLabelScalingMode', 'cnLegendLevelFlags',
+                'cnLevelCount', 'cnLevelFlag', 'cnLevelFlags', 'cnLevelSelectionMode',
+                'cnLevelSpacingF', 'cnLevels', 'cnLineColor', 'cnLineColors',
+                'cnLineDashPattern', 'cnLineDashPatterns', 'cnLineDashSegLenF',
+                'cnLineDrawOrder', 'cnLineLabelAngleF', 'cnLineLabelBackgroundColor',
+                'cnLineLabelConstantSpacingF', 'cnLineLabelCount',
+                'cnLineLabelDensityF', 'cnLineLabelFont', 'cnLineLabelFontAspectF',
+                'cnLineLabelFontColor', 'cnLineLabelFontColors',
+                'cnLineLabelFontHeightF', 'cnLineLabelFontQuality',
+                'cnLineLabelFontThicknessF', 'cnLineLabelFormat',
+                'cnLineLabelFuncCode', 'cnLineLabelInterval', 'cnLineLabelPerimColor',
+                'cnLineLabelPerimOn', 'cnLineLabelPerimSpaceF',
+                'cnLineLabelPerimThicknessF', 'cnLineLabelPlacementMode',
+                'cnLineLabelStrings', 'cnLineLabelsOn', 'cnLinePalette',
+                'cnLineThicknessF', 'cnLineThicknesses', 'cnLinesOn',
+                'cnLowLabelAngleF', 'cnLowLabelBackgroundColor',
+                'cnLowLabelConstantSpacingF', 'cnLowLabelCount', 'cnLowLabelFont',
+                'cnLowLabelFontAspectF', 'cnLowLabelFontColor',
+                'cnLowLabelFontHeightF', 'cnLowLabelFontQuality',
+                'cnLowLabelFontThicknessF', 'cnLowLabelFormat', 'cnLowLabelFuncCode',
+                'cnLowLabelPerimColor', 'cnLowLabelPerimOn', 'cnLowLabelPerimSpaceF',
+                'cnLowLabelPerimThicknessF', 'cnLowLabelString', 'cnLowLabelsOn',
+                'cnLowUseHighLabelRes', 'cnMaxDataValueFormat', 'cnMaxLevelCount',
+                'cnMaxLevelValF', 'cnMaxPointDistanceF', 'cnMinLevelValF',
+                'cnMissingValFillColor', 'cnMissingValFillPattern',
+                'cnMissingValFillScaleF', 'cnMissingValPerimColor',
+                'cnMissingValPerimDashPattern', 'cnMissingValPerimGridBoundOn',
+                'cnMissingValPerimOn', 'cnMissingValPerimThicknessF',
+                'cnMonoFillColor', 'cnMonoFillPattern', 'cnMonoFillScale',
+                'cnMonoLevelFlag', 'cnMonoLineColor', 'cnMonoLineDashPattern',
+                'cnMonoLineLabelFontColor', 'cnMonoLineThickness', 'cnNoDataLabelOn',
+                'cnNoDataLabelString', 'cnOutOfRangeFillColor',
+                'cnOutOfRangeFillPattern', 'cnOutOfRangeFillScaleF',
+                'cnOutOfRangePerimColor', 'cnOutOfRangePerimDashPattern',
+                'cnOutOfRangePerimOn', 'cnOutOfRangePerimThicknessF',
+                'cnRasterCellSizeF', 'cnRasterMinCellSizeF', 'cnRasterModeOn',
+                'cnRasterSampleFactorF', 'cnRasterSmoothingOn', 'cnScalarFieldData',
+                'cnSmoothingDistanceF', 'cnSmoothingOn', 'cnSmoothingTensionF',
+                'cnSpanFillPalette', 'cnSpanLinePalette', 'ctCopyTables',
+                'ctXElementSize', 'ctXMaxV', 'ctXMinV', 'ctXMissingV', 'ctXTable',
+                'ctXTableLengths', 'ctXTableType', 'ctYElementSize', 'ctYMaxV',
+                'ctYMinV', 'ctYMissingV', 'ctYTable', 'ctYTableLengths',
+                'ctYTableType', 'dcDelayCompute', 'errBuffer',
+                'errFileName', 'errFilePtr', 'errLevel', 'errPrint', 'errUnitNumber',
+                'gsClipOn', 'gsColors', 'gsEdgeColor', 'gsEdgeDashPattern',
+                'gsEdgeDashSegLenF', 'gsEdgeThicknessF', 'gsEdgesOn',
+                'gsFillBackgroundColor', 'gsFillColor', 'gsFillDotSizeF',
+                'gsFillIndex', 'gsFillLineThicknessF', 'gsFillOpacityF',
+                'gsFillScaleF', 'gsFont', 'gsFontAspectF', 'gsFontColor',
+                'gsFontHeightF', 'gsFontOpacityF', 'gsFontQuality',
+                'gsFontThicknessF', 'gsLineColor', 'gsLineDashPattern',
+                'gsLineDashSegLenF', 'gsLineLabelConstantSpacingF', 'gsLineLabelFont',
+                'gsLineLabelFontAspectF', 'gsLineLabelFontColor',
+                'gsLineLabelFontHeightF', 'gsLineLabelFontQuality',
+                'gsLineLabelFontThicknessF', 'gsLineLabelFuncCode',
+                'gsLineLabelString', 'gsLineOpacityF', 'gsLineThicknessF',
+                'gsMarkerColor', 'gsMarkerIndex', 'gsMarkerOpacityF', 'gsMarkerSizeF',
+                'gsMarkerThicknessF', 'gsSegments', 'gsTextAngleF',
+                'gsTextConstantSpacingF', 'gsTextDirection', 'gsTextFuncCode',
+                'gsTextJustification', 'gsnAboveYRefLineBarColors',
+                'gsnAboveYRefLineBarFillScales', 'gsnAboveYRefLineBarPatterns',
+                'gsnAboveYRefLineColor', 'gsnAddCyclic', 'gsnAttachBorderOn',
+                'gsnAttachPlotsXAxis', 'gsnBelowYRefLineBarColors',
+                'gsnBelowYRefLineBarFillScales', 'gsnBelowYRefLineBarPatterns',
+                'gsnBelowYRefLineColor', 'gsnBoxMargin', 'gsnCenterString',
+                'gsnCenterStringFontColor', 'gsnCenterStringFontHeightF',
+                'gsnCenterStringFuncCode', 'gsnCenterStringOrthogonalPosF',
+                'gsnCenterStringParallelPosF', 'gsnContourLineThicknessesScale',
+                'gsnContourNegLineDashPattern', 'gsnContourPosLineDashPattern',
+                'gsnContourZeroLineThicknessF', 'gsnDebugWriteFileName', 'gsnDraw',
+                'gsnFrame', 'gsnHistogramBarWidthPercent', 'gsnHistogramBinIntervals',
+                'gsnHistogramBinMissing', 'gsnHistogramBinWidth',
+                'gsnHistogramClassIntervals', 'gsnHistogramCompare',
+                'gsnHistogramComputePercentages',
+                'gsnHistogramComputePercentagesNoMissing',
+                'gsnHistogramDiscreteBinValues', 'gsnHistogramDiscreteClassValues',
+                'gsnHistogramHorizontal', 'gsnHistogramMinMaxBinsOn',
+                'gsnHistogramNumberOfBins', 'gsnHistogramPercentSign',
+                'gsnHistogramSelectNiceIntervals', 'gsnLeftString',
+                'gsnLeftStringFontColor', 'gsnLeftStringFontHeightF',
+                'gsnLeftStringFuncCode', 'gsnLeftStringOrthogonalPosF',
+                'gsnLeftStringParallelPosF', 'gsnMajorLatSpacing',
+                'gsnMajorLonSpacing', 'gsnMaskLambertConformal',
+                'gsnMaskLambertConformalOutlineOn', 'gsnMaximize',
+                'gsnMinorLatSpacing', 'gsnMinorLonSpacing', 'gsnPanelBottom',
+                'gsnPanelCenter', 'gsnPanelDebug', 'gsnPanelFigureStrings',
+                'gsnPanelFigureStringsBackgroundFillColor',
+                'gsnPanelFigureStringsFontHeightF', 'gsnPanelFigureStringsJust',
+                'gsnPanelFigureStringsPerimOn', 'gsnPanelLabelBar', 'gsnPanelLeft',
+                'gsnPanelMainFont', 'gsnPanelMainFontColor',
+                'gsnPanelMainFontHeightF', 'gsnPanelMainString', 'gsnPanelRight',
+                'gsnPanelRowSpec', 'gsnPanelScalePlotIndex', 'gsnPanelTop',
+                'gsnPanelXF', 'gsnPanelXWhiteSpacePercent', 'gsnPanelYF',
+                'gsnPanelYWhiteSpacePercent', 'gsnPaperHeight', 'gsnPaperMargin',
+                'gsnPaperOrientation', 'gsnPaperWidth', 'gsnPolar',
+                'gsnPolarLabelDistance', 'gsnPolarLabelFont',
+                'gsnPolarLabelFontHeightF', 'gsnPolarLabelSpacing', 'gsnPolarTime',
+                'gsnPolarUT', 'gsnRightString', 'gsnRightStringFontColor',
+                'gsnRightStringFontHeightF', 'gsnRightStringFuncCode',
+                'gsnRightStringOrthogonalPosF', 'gsnRightStringParallelPosF',
+                'gsnScalarContour', 'gsnScale', 'gsnShape', 'gsnSpreadColorEnd',
+                'gsnSpreadColorStart', 'gsnSpreadColors', 'gsnStringFont',
+                'gsnStringFontColor', 'gsnStringFontHeightF', 'gsnStringFuncCode',
+                'gsnTickMarksOn', 'gsnXAxisIrregular2Linear', 'gsnXAxisIrregular2Log',
+                'gsnXRefLine', 'gsnXRefLineColor', 'gsnXRefLineDashPattern',
+                'gsnXRefLineThicknessF', 'gsnXYAboveFillColors', 'gsnXYBarChart',
+                'gsnXYBarChartBarWidth', 'gsnXYBarChartColors',
+                'gsnXYBarChartColors2', 'gsnXYBarChartFillDotSizeF',
+                'gsnXYBarChartFillLineThicknessF', 'gsnXYBarChartFillOpacityF',
+                'gsnXYBarChartFillScaleF', 'gsnXYBarChartOutlineOnly',
+                'gsnXYBarChartOutlineThicknessF', 'gsnXYBarChartPatterns',
+                'gsnXYBarChartPatterns2', 'gsnXYBelowFillColors', 'gsnXYFillColors',
+                'gsnXYFillOpacities', 'gsnXYLeftFillColors', 'gsnXYRightFillColors',
+                'gsnYAxisIrregular2Linear', 'gsnYAxisIrregular2Log', 'gsnYRefLine',
+                'gsnYRefLineColor', 'gsnYRefLineColors', 'gsnYRefLineDashPattern',
+                'gsnYRefLineDashPatterns', 'gsnYRefLineThicknessF',
+                'gsnYRefLineThicknesses', 'gsnZonalMean', 'gsnZonalMeanXMaxF',
+                'gsnZonalMeanXMinF', 'gsnZonalMeanYRefLine', 'lbAutoManage',
+                'lbBottomMarginF', 'lbBoxCount', 'lbBoxEndCapStyle', 'lbBoxFractions',
+                'lbBoxLineColor', 'lbBoxLineDashPattern', 'lbBoxLineDashSegLenF',
+                'lbBoxLineThicknessF', 'lbBoxLinesOn', 'lbBoxMajorExtentF',
+                'lbBoxMinorExtentF', 'lbBoxSeparatorLinesOn', 'lbBoxSizing',
+                'lbFillBackground', 'lbFillColor', 'lbFillColors', 'lbFillDotSizeF',
+                'lbFillLineThicknessF', 'lbFillPattern', 'lbFillPatterns',
+                'lbFillScaleF', 'lbFillScales', 'lbJustification', 'lbLabelAlignment',
+                'lbLabelAngleF', 'lbLabelAutoStride', 'lbLabelBarOn',
+                'lbLabelConstantSpacingF', 'lbLabelDirection', 'lbLabelFont',
+                'lbLabelFontAspectF', 'lbLabelFontColor', 'lbLabelFontHeightF',
+                'lbLabelFontQuality', 'lbLabelFontThicknessF', 'lbLabelFuncCode',
+                'lbLabelJust', 'lbLabelOffsetF', 'lbLabelPosition', 'lbLabelStride',
+                'lbLabelStrings', 'lbLabelsOn', 'lbLeftMarginF', 'lbMaxLabelLenF',
+                'lbMinLabelSpacingF', 'lbMonoFillColor', 'lbMonoFillPattern',
+                'lbMonoFillScale', 'lbOrientation', 'lbPerimColor',
+                'lbPerimDashPattern', 'lbPerimDashSegLenF', 'lbPerimFill',
+                'lbPerimFillColor', 'lbPerimOn', 'lbPerimThicknessF',
+                'lbRasterFillOn', 'lbRightMarginF', 'lbTitleAngleF',
+                'lbTitleConstantSpacingF', 'lbTitleDirection', 'lbTitleExtentF',
+                'lbTitleFont', 'lbTitleFontAspectF', 'lbTitleFontColor',
+                'lbTitleFontHeightF', 'lbTitleFontQuality', 'lbTitleFontThicknessF',
+                'lbTitleFuncCode', 'lbTitleJust', 'lbTitleOffsetF', 'lbTitleOn',
+                'lbTitlePosition', 'lbTitleString', 'lbTopMarginF', 'lgAutoManage',
+                'lgBottomMarginF', 'lgBoxBackground', 'lgBoxLineColor',
+                'lgBoxLineDashPattern', 'lgBoxLineDashSegLenF', 'lgBoxLineThicknessF',
+                'lgBoxLinesOn', 'lgBoxMajorExtentF', 'lgBoxMinorExtentF',
+                'lgDashIndex', 'lgDashIndexes', 'lgItemCount', 'lgItemOrder',
+                'lgItemPlacement', 'lgItemPositions', 'lgItemType', 'lgItemTypes',
+                'lgJustification', 'lgLabelAlignment', 'lgLabelAngleF',
+                'lgLabelAutoStride', 'lgLabelConstantSpacingF', 'lgLabelDirection',
+                'lgLabelFont', 'lgLabelFontAspectF', 'lgLabelFontColor',
+                'lgLabelFontHeightF', 'lgLabelFontQuality', 'lgLabelFontThicknessF',
+                'lgLabelFuncCode', 'lgLabelJust', 'lgLabelOffsetF', 'lgLabelPosition',
+                'lgLabelStride', 'lgLabelStrings', 'lgLabelsOn', 'lgLeftMarginF',
+                'lgLegendOn', 'lgLineColor', 'lgLineColors', 'lgLineDashSegLenF',
+                'lgLineDashSegLens', 'lgLineLabelConstantSpacingF', 'lgLineLabelFont',
+                'lgLineLabelFontAspectF', 'lgLineLabelFontColor',
+                'lgLineLabelFontColors', 'lgLineLabelFontHeightF',
+                'lgLineLabelFontHeights', 'lgLineLabelFontQuality',
+                'lgLineLabelFontThicknessF', 'lgLineLabelFuncCode',
+                'lgLineLabelStrings', 'lgLineLabelsOn', 'lgLineThicknessF',
+                'lgLineThicknesses', 'lgMarkerColor', 'lgMarkerColors',
+                'lgMarkerIndex', 'lgMarkerIndexes', 'lgMarkerSizeF', 'lgMarkerSizes',
+                'lgMarkerThicknessF', 'lgMarkerThicknesses', 'lgMonoDashIndex',
+                'lgMonoItemType', 'lgMonoLineColor', 'lgMonoLineDashSegLen',
+                'lgMonoLineLabelFontColor', 'lgMonoLineLabelFontHeight',
+                'lgMonoLineThickness', 'lgMonoMarkerColor', 'lgMonoMarkerIndex',
+                'lgMonoMarkerSize', 'lgMonoMarkerThickness', 'lgOrientation',
+                'lgPerimColor', 'lgPerimDashPattern', 'lgPerimDashSegLenF',
+                'lgPerimFill', 'lgPerimFillColor', 'lgPerimOn', 'lgPerimThicknessF',
+                'lgRightMarginF', 'lgTitleAngleF', 'lgTitleConstantSpacingF',
+                'lgTitleDirection', 'lgTitleExtentF', 'lgTitleFont',
+                'lgTitleFontAspectF', 'lgTitleFontColor', 'lgTitleFontHeightF',
+                'lgTitleFontQuality', 'lgTitleFontThicknessF', 'lgTitleFuncCode',
+                'lgTitleJust', 'lgTitleOffsetF', 'lgTitleOn', 'lgTitlePosition',
+                'lgTitleString', 'lgTopMarginF', 'mpAreaGroupCount',
+                'mpAreaMaskingOn', 'mpAreaNames', 'mpAreaTypes', 'mpBottomAngleF',
+                'mpBottomMapPosF', 'mpBottomNDCF', 'mpBottomNPCF',
+                'mpBottomPointLatF', 'mpBottomPointLonF', 'mpBottomWindowF',
+                'mpCenterLatF', 'mpCenterLonF', 'mpCenterRotF', 'mpCountyLineColor',
+                'mpCountyLineDashPattern', 'mpCountyLineDashSegLenF',
+                'mpCountyLineThicknessF', 'mpDataBaseVersion', 'mpDataResolution',
+                'mpDataSetName', 'mpDefaultFillColor', 'mpDefaultFillPattern',
+                'mpDefaultFillScaleF', 'mpDynamicAreaGroups', 'mpEllipticalBoundary',
+                'mpFillAreaSpecifiers', 'mpFillBoundarySets', 'mpFillColor',
+                'mpFillColors', 'mpFillColors-default', 'mpFillDotSizeF',
+                'mpFillDrawOrder', 'mpFillOn', 'mpFillPatternBackground',
+                'mpFillPattern', 'mpFillPatterns', 'mpFillPatterns-default',
+                'mpFillScaleF', 'mpFillScales', 'mpFillScales-default',
+                'mpFixedAreaGroups', 'mpGeophysicalLineColor',
+                'mpGeophysicalLineDashPattern', 'mpGeophysicalLineDashSegLenF',
+                'mpGeophysicalLineThicknessF', 'mpGreatCircleLinesOn',
+                'mpGridAndLimbDrawOrder', 'mpGridAndLimbOn', 'mpGridLatSpacingF',
+                'mpGridLineColor', 'mpGridLineDashPattern', 'mpGridLineDashSegLenF',
+                'mpGridLineThicknessF', 'mpGridLonSpacingF', 'mpGridMaskMode',
+                'mpGridMaxLatF', 'mpGridPolarLonSpacingF', 'mpGridSpacingF',
+                'mpInlandWaterFillColor', 'mpInlandWaterFillPattern',
+                'mpInlandWaterFillScaleF', 'mpLabelDrawOrder', 'mpLabelFontColor',
+                'mpLabelFontHeightF', 'mpLabelsOn', 'mpLambertMeridianF',
+                'mpLambertParallel1F', 'mpLambertParallel2F', 'mpLandFillColor',
+                'mpLandFillPattern', 'mpLandFillScaleF', 'mpLeftAngleF',
+                'mpLeftCornerLatF', 'mpLeftCornerLonF', 'mpLeftMapPosF',
+                'mpLeftNDCF', 'mpLeftNPCF', 'mpLeftPointLatF',
+                'mpLeftPointLonF', 'mpLeftWindowF', 'mpLimbLineColor',
+                'mpLimbLineDashPattern', 'mpLimbLineDashSegLenF',
+                'mpLimbLineThicknessF', 'mpLimitMode', 'mpMaskAreaSpecifiers',
+                'mpMaskOutlineSpecifiers', 'mpMaxLatF', 'mpMaxLonF',
+                'mpMinLatF', 'mpMinLonF', 'mpMonoFillColor', 'mpMonoFillPattern',
+                'mpMonoFillScale', 'mpNationalLineColor', 'mpNationalLineDashPattern',
+                'mpNationalLineThicknessF', 'mpOceanFillColor', 'mpOceanFillPattern',
+                'mpOceanFillScaleF', 'mpOutlineBoundarySets', 'mpOutlineDrawOrder',
+                'mpOutlineMaskingOn', 'mpOutlineOn', 'mpOutlineSpecifiers',
+                'mpPerimDrawOrder', 'mpPerimLineColor', 'mpPerimLineDashPattern',
+                'mpPerimLineDashSegLenF', 'mpPerimLineThicknessF', 'mpPerimOn',
+                'mpPolyMode', 'mpProjection', 'mpProvincialLineColor',
+                'mpProvincialLineDashPattern', 'mpProvincialLineDashSegLenF',
+                'mpProvincialLineThicknessF', 'mpRelativeCenterLat',
+                'mpRelativeCenterLon', 'mpRightAngleF', 'mpRightCornerLatF',
+                'mpRightCornerLonF', 'mpRightMapPosF', 'mpRightNDCF',
+                'mpRightNPCF', 'mpRightPointLatF', 'mpRightPointLonF',
+                'mpRightWindowF', 'mpSatelliteAngle1F', 'mpSatelliteAngle2F',
+                'mpSatelliteDistF', 'mpShapeMode', 'mpSpecifiedFillColors',
+                'mpSpecifiedFillDirectIndexing', 'mpSpecifiedFillPatterns',
+                'mpSpecifiedFillPriority', 'mpSpecifiedFillScales',
+                'mpTopAngleF', 'mpTopMapPosF', 'mpTopNDCF', 'mpTopNPCF',
+                'mpTopPointLatF', 'mpTopPointLonF', 'mpTopWindowF',
+                'mpUSStateLineColor', 'mpUSStateLineDashPattern',
+                'mpUSStateLineDashSegLenF', 'mpUSStateLineThicknessF',
+                'pmAnnoManagers', 'pmAnnoViews', 'pmLabelBarDisplayMode',
+                'pmLabelBarHeightF', 'pmLabelBarKeepAspect', 'pmLabelBarOrthogonalPosF',
+                'pmLabelBarParallelPosF', 'pmLabelBarSide', 'pmLabelBarWidthF',
+                'pmLabelBarZone', 'pmLegendDisplayMode', 'pmLegendHeightF',
+                'pmLegendKeepAspect', 'pmLegendOrthogonalPosF',
+                'pmLegendParallelPosF', 'pmLegendSide', 'pmLegendWidthF',
+                'pmLegendZone', 'pmOverlaySequenceIds', 'pmTickMarkDisplayMode',
+                'pmTickMarkZone', 'pmTitleDisplayMode', 'pmTitleZone',
+                'prGraphicStyle', 'prPolyType', 'prXArray', 'prYArray',
+                'sfCopyData', 'sfDataArray', 'sfDataMaxV', 'sfDataMinV',
+                'sfElementNodes', 'sfExchangeDimensions', 'sfFirstNodeIndex',
+                'sfMissingValueV', 'sfXArray', 'sfXCActualEndF', 'sfXCActualStartF',
+                'sfXCEndIndex', 'sfXCEndSubsetV', 'sfXCEndV', 'sfXCStartIndex',
+                'sfXCStartSubsetV', 'sfXCStartV', 'sfXCStride', 'sfXCellBounds',
+                'sfYArray', 'sfYCActualEndF', 'sfYCActualStartF', 'sfYCEndIndex',
+                'sfYCEndSubsetV', 'sfYCEndV', 'sfYCStartIndex', 'sfYCStartSubsetV',
+                'sfYCStartV', 'sfYCStride', 'sfYCellBounds', 'stArrowLengthF',
+                'stArrowStride', 'stCrossoverCheckCount',
+                'stExplicitLabelBarLabelsOn', 'stLabelBarEndLabelsOn',
+                'stLabelFormat', 'stLengthCheckCount', 'stLevelColors',
+                'stLevelCount', 'stLevelPalette', 'stLevelSelectionMode',
+                'stLevelSpacingF', 'stLevels', 'stLineColor', 'stLineOpacityF',
+                'stLineStartStride', 'stLineThicknessF', 'stMapDirection',
+                'stMaxLevelCount', 'stMaxLevelValF', 'stMinArrowSpacingF',
+                'stMinDistanceF', 'stMinLevelValF', 'stMinLineSpacingF',
+                'stMinStepFactorF', 'stMonoLineColor', 'stNoDataLabelOn',
+                'stNoDataLabelString', 'stScalarFieldData', 'stScalarMissingValColor',
+                'stSpanLevelPalette', 'stStepSizeF', 'stStreamlineDrawOrder',
+                'stUseScalarArray', 'stVectorFieldData', 'stZeroFLabelAngleF',
+                'stZeroFLabelBackgroundColor', 'stZeroFLabelConstantSpacingF',
+                'stZeroFLabelFont', 'stZeroFLabelFontAspectF',
+                'stZeroFLabelFontColor', 'stZeroFLabelFontHeightF',
+                'stZeroFLabelFontQuality', 'stZeroFLabelFontThicknessF',
+                'stZeroFLabelFuncCode', 'stZeroFLabelJust', 'stZeroFLabelOn',
+                'stZeroFLabelOrthogonalPosF', 'stZeroFLabelParallelPosF',
+                'stZeroFLabelPerimColor', 'stZeroFLabelPerimOn',
+                'stZeroFLabelPerimSpaceF', 'stZeroFLabelPerimThicknessF',
+                'stZeroFLabelSide', 'stZeroFLabelString', 'stZeroFLabelTextDirection',
+                'stZeroFLabelZone', 'tfDoNDCOverlay', 'tfPlotManagerOn',
+                'tfPolyDrawList', 'tfPolyDrawOrder', 'tiDeltaF', 'tiMainAngleF',
+                'tiMainConstantSpacingF', 'tiMainDirection', 'tiMainFont',
+                'tiMainFontAspectF', 'tiMainFontColor', 'tiMainFontHeightF',
+                'tiMainFontQuality', 'tiMainFontThicknessF', 'tiMainFuncCode',
+                'tiMainJust', 'tiMainOffsetXF', 'tiMainOffsetYF', 'tiMainOn',
+                'tiMainPosition', 'tiMainSide', 'tiMainString', 'tiUseMainAttributes',
+                'tiXAxisAngleF', 'tiXAxisConstantSpacingF', 'tiXAxisDirection',
+                'tiXAxisFont', 'tiXAxisFontAspectF', 'tiXAxisFontColor',
+                'tiXAxisFontHeightF', 'tiXAxisFontQuality', 'tiXAxisFontThicknessF',
+                'tiXAxisFuncCode', 'tiXAxisJust', 'tiXAxisOffsetXF',
+                'tiXAxisOffsetYF', 'tiXAxisOn', 'tiXAxisPosition', 'tiXAxisSide',
+                'tiXAxisString', 'tiYAxisAngleF', 'tiYAxisConstantSpacingF',
+                'tiYAxisDirection', 'tiYAxisFont', 'tiYAxisFontAspectF',
+                'tiYAxisFontColor', 'tiYAxisFontHeightF', 'tiYAxisFontQuality',
+                'tiYAxisFontThicknessF', 'tiYAxisFuncCode', 'tiYAxisJust',
+                'tiYAxisOffsetXF', 'tiYAxisOffsetYF', 'tiYAxisOn', 'tiYAxisPosition',
+                'tiYAxisSide', 'tiYAxisString', 'tmBorderLineColor',
+                'tmBorderThicknessF', 'tmEqualizeXYSizes', 'tmLabelAutoStride',
+                'tmSciNoteCutoff', 'tmXBAutoPrecision', 'tmXBBorderOn',
+                'tmXBDataLeftF', 'tmXBDataRightF', 'tmXBFormat', 'tmXBIrrTensionF',
+                'tmXBIrregularPoints', 'tmXBLabelAngleF', 'tmXBLabelConstantSpacingF',
+                'tmXBLabelDeltaF', 'tmXBLabelDirection', 'tmXBLabelFont',
+                'tmXBLabelFontAspectF', 'tmXBLabelFontColor', 'tmXBLabelFontHeightF',
+                'tmXBLabelFontQuality', 'tmXBLabelFontThicknessF',
+                'tmXBLabelFuncCode', 'tmXBLabelJust', 'tmXBLabelStride', 'tmXBLabels',
+                'tmXBLabelsOn', 'tmXBMajorLengthF', 'tmXBMajorLineColor',
+                'tmXBMajorOutwardLengthF', 'tmXBMajorThicknessF', 'tmXBMaxLabelLenF',
+                'tmXBMaxTicks', 'tmXBMinLabelSpacingF', 'tmXBMinorLengthF',
+                'tmXBMinorLineColor', 'tmXBMinorOn', 'tmXBMinorOutwardLengthF',
+                'tmXBMinorPerMajor', 'tmXBMinorThicknessF', 'tmXBMinorValues',
+                'tmXBMode', 'tmXBOn', 'tmXBPrecision', 'tmXBStyle', 'tmXBTickEndF',
+                'tmXBTickSpacingF', 'tmXBTickStartF', 'tmXBValues', 'tmXMajorGrid',
+                'tmXMajorGridLineColor', 'tmXMajorGridLineDashPattern',
+                'tmXMajorGridThicknessF', 'tmXMinorGrid', 'tmXMinorGridLineColor',
+                'tmXMinorGridLineDashPattern', 'tmXMinorGridThicknessF',
+                'tmXTAutoPrecision', 'tmXTBorderOn', 'tmXTDataLeftF',
+                'tmXTDataRightF', 'tmXTFormat', 'tmXTIrrTensionF',
+                'tmXTIrregularPoints', 'tmXTLabelAngleF', 'tmXTLabelConstantSpacingF',
+                'tmXTLabelDeltaF', 'tmXTLabelDirection', 'tmXTLabelFont',
+                'tmXTLabelFontAspectF', 'tmXTLabelFontColor', 'tmXTLabelFontHeightF',
+                'tmXTLabelFontQuality', 'tmXTLabelFontThicknessF',
+                'tmXTLabelFuncCode', 'tmXTLabelJust', 'tmXTLabelStride', 'tmXTLabels',
+                'tmXTLabelsOn', 'tmXTMajorLengthF', 'tmXTMajorLineColor',
+                'tmXTMajorOutwardLengthF', 'tmXTMajorThicknessF', 'tmXTMaxLabelLenF',
+                'tmXTMaxTicks', 'tmXTMinLabelSpacingF', 'tmXTMinorLengthF',
+                'tmXTMinorLineColor', 'tmXTMinorOn', 'tmXTMinorOutwardLengthF',
+                'tmXTMinorPerMajor', 'tmXTMinorThicknessF', 'tmXTMinorValues',
+                'tmXTMode', 'tmXTOn', 'tmXTPrecision', 'tmXTStyle', 'tmXTTickEndF',
+                'tmXTTickSpacingF', 'tmXTTickStartF', 'tmXTValues', 'tmXUseBottom',
+                'tmYLAutoPrecision', 'tmYLBorderOn', 'tmYLDataBottomF',
+                'tmYLDataTopF', 'tmYLFormat', 'tmYLIrrTensionF',
+                'tmYLIrregularPoints', 'tmYLLabelAngleF', 'tmYLLabelConstantSpacingF',
+                'tmYLLabelDeltaF', 'tmYLLabelDirection', 'tmYLLabelFont',
+                'tmYLLabelFontAspectF', 'tmYLLabelFontColor', 'tmYLLabelFontHeightF',
+                'tmYLLabelFontQuality', 'tmYLLabelFontThicknessF',
+                'tmYLLabelFuncCode', 'tmYLLabelJust', 'tmYLLabelStride', 'tmYLLabels',
+                'tmYLLabelsOn', 'tmYLMajorLengthF', 'tmYLMajorLineColor',
+                'tmYLMajorOutwardLengthF', 'tmYLMajorThicknessF', 'tmYLMaxLabelLenF',
+                'tmYLMaxTicks', 'tmYLMinLabelSpacingF', 'tmYLMinorLengthF',
+                'tmYLMinorLineColor', 'tmYLMinorOn', 'tmYLMinorOutwardLengthF',
+                'tmYLMinorPerMajor', 'tmYLMinorThicknessF', 'tmYLMinorValues',
+                'tmYLMode', 'tmYLOn', 'tmYLPrecision', 'tmYLStyle', 'tmYLTickEndF',
+                'tmYLTickSpacingF', 'tmYLTickStartF', 'tmYLValues', 'tmYMajorGrid',
+                'tmYMajorGridLineColor', 'tmYMajorGridLineDashPattern',
+                'tmYMajorGridThicknessF', 'tmYMinorGrid', 'tmYMinorGridLineColor',
+                'tmYMinorGridLineDashPattern', 'tmYMinorGridThicknessF',
+                'tmYRAutoPrecision', 'tmYRBorderOn', 'tmYRDataBottomF',
+                'tmYRDataTopF', 'tmYRFormat', 'tmYRIrrTensionF',
+                'tmYRIrregularPoints', 'tmYRLabelAngleF', 'tmYRLabelConstantSpacingF',
+                'tmYRLabelDeltaF', 'tmYRLabelDirection', 'tmYRLabelFont',
+                'tmYRLabelFontAspectF', 'tmYRLabelFontColor', 'tmYRLabelFontHeightF',
+                'tmYRLabelFontQuality', 'tmYRLabelFontThicknessF',
+                'tmYRLabelFuncCode', 'tmYRLabelJust', 'tmYRLabelStride', 'tmYRLabels',
+                'tmYRLabelsOn', 'tmYRMajorLengthF', 'tmYRMajorLineColor',
+                'tmYRMajorOutwardLengthF', 'tmYRMajorThicknessF', 'tmYRMaxLabelLenF',
+                'tmYRMaxTicks', 'tmYRMinLabelSpacingF', 'tmYRMinorLengthF',
+                'tmYRMinorLineColor', 'tmYRMinorOn', 'tmYRMinorOutwardLengthF',
+                'tmYRMinorPerMajor', 'tmYRMinorThicknessF', 'tmYRMinorValues',
+                'tmYRMode', 'tmYROn', 'tmYRPrecision', 'tmYRStyle', 'tmYRTickEndF',
+                'tmYRTickSpacingF', 'tmYRTickStartF', 'tmYRValues', 'tmYUseLeft',
+                'trGridType', 'trLineInterpolationOn',
+                'trXAxisType', 'trXCoordPoints', 'trXInterPoints', 'trXLog',
+                'trXMaxF', 'trXMinF', 'trXReverse', 'trXSamples', 'trXTensionF',
+                'trYAxisType', 'trYCoordPoints', 'trYInterPoints', 'trYLog',
+                'trYMaxF', 'trYMinF', 'trYReverse', 'trYSamples', 'trYTensionF',
+                'txAngleF', 'txBackgroundFillColor', 'txConstantSpacingF', 'txDirection',
+                'txFont', 'HLU-Fonts', 'txFontAspectF', 'txFontColor',
+                'txFontHeightF', 'txFontOpacityF', 'txFontQuality',
+                'txFontThicknessF', 'txFuncCode', 'txJust', 'txPerimColor',
+                'txPerimDashLengthF', 'txPerimDashPattern', 'txPerimOn',
+                'txPerimSpaceF', 'txPerimThicknessF', 'txPosXF', 'txPosYF',
+                'txString', 'vcExplicitLabelBarLabelsOn', 'vcFillArrowEdgeColor',
+                'vcFillArrowEdgeThicknessF', 'vcFillArrowFillColor',
+                'vcFillArrowHeadInteriorXF', 'vcFillArrowHeadMinFracXF',
+                'vcFillArrowHeadMinFracYF', 'vcFillArrowHeadXF', 'vcFillArrowHeadYF',
+                'vcFillArrowMinFracWidthF', 'vcFillArrowWidthF', 'vcFillArrowsOn',
+                'vcFillOverEdge', 'vcGlyphOpacityF', 'vcGlyphStyle',
+                'vcLabelBarEndLabelsOn', 'vcLabelFontColor', 'vcLabelFontHeightF',
+                'vcLabelsOn', 'vcLabelsUseVectorColor', 'vcLevelColors',
+                'vcLevelCount', 'vcLevelPalette', 'vcLevelSelectionMode',
+                'vcLevelSpacingF', 'vcLevels', 'vcLineArrowColor',
+                'vcLineArrowHeadMaxSizeF', 'vcLineArrowHeadMinSizeF',
+                'vcLineArrowThicknessF', 'vcMagnitudeFormat',
+                'vcMagnitudeScaleFactorF', 'vcMagnitudeScaleValueF',
+                'vcMagnitudeScalingMode', 'vcMapDirection', 'vcMaxLevelCount',
+                'vcMaxLevelValF', 'vcMaxMagnitudeF', 'vcMinAnnoAngleF',
+                'vcMinAnnoArrowAngleF', 'vcMinAnnoArrowEdgeColor',
+                'vcMinAnnoArrowFillColor', 'vcMinAnnoArrowLineColor',
+                'vcMinAnnoArrowMinOffsetF', 'vcMinAnnoArrowSpaceF',
+                'vcMinAnnoArrowUseVecColor', 'vcMinAnnoBackgroundColor',
+                'vcMinAnnoConstantSpacingF', 'vcMinAnnoExplicitMagnitudeF',
+                'vcMinAnnoFont', 'vcMinAnnoFontAspectF', 'vcMinAnnoFontColor',
+                'vcMinAnnoFontHeightF', 'vcMinAnnoFontQuality',
+                'vcMinAnnoFontThicknessF', 'vcMinAnnoFuncCode', 'vcMinAnnoJust',
+                'vcMinAnnoOn', 'vcMinAnnoOrientation', 'vcMinAnnoOrthogonalPosF',
+                'vcMinAnnoParallelPosF', 'vcMinAnnoPerimColor', 'vcMinAnnoPerimOn',
+                'vcMinAnnoPerimSpaceF', 'vcMinAnnoPerimThicknessF', 'vcMinAnnoSide',
+                'vcMinAnnoString1', 'vcMinAnnoString1On', 'vcMinAnnoString2',
+                'vcMinAnnoString2On', 'vcMinAnnoTextDirection', 'vcMinAnnoZone',
+                'vcMinDistanceF', 'vcMinFracLengthF', 'vcMinLevelValF',
+                'vcMinMagnitudeF', 'vcMonoFillArrowEdgeColor',
+                'vcMonoFillArrowFillColor', 'vcMonoLineArrowColor',
+                'vcMonoWindBarbColor', 'vcNoDataLabelOn', 'vcNoDataLabelString',
+                'vcPositionMode', 'vcRefAnnoAngleF', 'vcRefAnnoArrowAngleF',
+                'vcRefAnnoArrowEdgeColor', 'vcRefAnnoArrowFillColor',
+                'vcRefAnnoArrowLineColor', 'vcRefAnnoArrowMinOffsetF',
+                'vcRefAnnoArrowSpaceF', 'vcRefAnnoArrowUseVecColor',
+                'vcRefAnnoBackgroundColor', 'vcRefAnnoConstantSpacingF',
+                'vcRefAnnoExplicitMagnitudeF', 'vcRefAnnoFont',
+                'vcRefAnnoFontAspectF', 'vcRefAnnoFontColor', 'vcRefAnnoFontHeightF',
+                'vcRefAnnoFontQuality', 'vcRefAnnoFontThicknessF',
+                'vcRefAnnoFuncCode', 'vcRefAnnoJust', 'vcRefAnnoOn',
+                'vcRefAnnoOrientation', 'vcRefAnnoOrthogonalPosF',
+                'vcRefAnnoParallelPosF', 'vcRefAnnoPerimColor', 'vcRefAnnoPerimOn',
+                'vcRefAnnoPerimSpaceF', 'vcRefAnnoPerimThicknessF', 'vcRefAnnoSide',
+                'vcRefAnnoString1', 'vcRefAnnoString1On', 'vcRefAnnoString2',
+                'vcRefAnnoString2On', 'vcRefAnnoTextDirection', 'vcRefAnnoZone',
+                'vcRefLengthF', 'vcRefMagnitudeF', 'vcScalarFieldData',
+                'vcScalarMissingValColor', 'vcScalarValueFormat',
+                'vcScalarValueScaleFactorF', 'vcScalarValueScaleValueF',
+                'vcScalarValueScalingMode', 'vcSpanLevelPalette', 'vcUseRefAnnoRes',
+                'vcUseScalarArray', 'vcVectorDrawOrder', 'vcVectorFieldData',
+                'vcWindBarbCalmCircleSizeF', 'vcWindBarbColor',
+                'vcWindBarbLineThicknessF', 'vcWindBarbScaleFactorF',
+                'vcWindBarbTickAngleF', 'vcWindBarbTickLengthF',
+                'vcWindBarbTickSpacingF', 'vcZeroFLabelAngleF',
+                'vcZeroFLabelBackgroundColor', 'vcZeroFLabelConstantSpacingF',
+                'vcZeroFLabelFont', 'vcZeroFLabelFontAspectF',
+                'vcZeroFLabelFontColor', 'vcZeroFLabelFontHeightF',
+                'vcZeroFLabelFontQuality', 'vcZeroFLabelFontThicknessF',
+                'vcZeroFLabelFuncCode', 'vcZeroFLabelJust', 'vcZeroFLabelOn',
+                'vcZeroFLabelOrthogonalPosF', 'vcZeroFLabelParallelPosF',
+                'vcZeroFLabelPerimColor', 'vcZeroFLabelPerimOn',
+                'vcZeroFLabelPerimSpaceF', 'vcZeroFLabelPerimThicknessF',
+                'vcZeroFLabelSide', 'vcZeroFLabelString', 'vcZeroFLabelTextDirection',
+                'vcZeroFLabelZone', 'vfCopyData', 'vfDataArray',
+                'vfExchangeDimensions', 'vfExchangeUVData', 'vfMagMaxV', 'vfMagMinV',
+                'vfMissingUValueV', 'vfMissingVValueV', 'vfPolarData',
+                'vfSingleMissingValue', 'vfUDataArray', 'vfUMaxV', 'vfUMinV',
+                'vfVDataArray', 'vfVMaxV', 'vfVMinV', 'vfXArray', 'vfXCActualEndF',
+                'vfXCActualStartF', 'vfXCEndIndex', 'vfXCEndSubsetV', 'vfXCEndV',
+                'vfXCStartIndex', 'vfXCStartSubsetV', 'vfXCStartV', 'vfXCStride',
+                'vfYArray', 'vfYCActualEndF', 'vfYCActualStartF', 'vfYCEndIndex',
+                'vfYCEndSubsetV', 'vfYCEndV', 'vfYCStartIndex', 'vfYCStartSubsetV',
+                'vfYCStartV', 'vfYCStride', 'vpAnnoManagerId', 'vpClipOn',
+                'vpHeightF', 'vpKeepAspect', 'vpOn', 'vpUseSegments', 'vpWidthF',
+                'vpXF', 'vpYF', 'wkAntiAlias', 'wkBackgroundColor', 'wkBackgroundOpacityF',
+                'wkColorMapLen', 'wkColorMap', 'wkColorModel', 'wkDashTableLength',
+                'wkDefGraphicStyleId', 'wkDeviceLowerX', 'wkDeviceLowerY',
+                'wkDeviceUpperX', 'wkDeviceUpperY', 'wkFileName', 'wkFillTableLength',
+                'wkForegroundColor', 'wkFormat', 'wkFullBackground', 'wkGksWorkId',
+                'wkHeight', 'wkMarkerTableLength', 'wkMetaName', 'wkOrientation',
+                'wkPDFFileName', 'wkPDFFormat', 'wkPDFResolution', 'wkPSFileName',
+                'wkPSFormat', 'wkPSResolution', 'wkPaperHeightF', 'wkPaperSize',
+                'wkPaperWidthF', 'wkPause', 'wkTopLevelViews', 'wkViews',
+                'wkVisualType', 'wkWidth', 'wkWindowId', 'wkXColorMode', 'wsCurrentSize',
+                'wsMaximumSize', 'wsThresholdSize', 'xyComputeXMax',
+                'xyComputeXMin', 'xyComputeYMax', 'xyComputeYMin', 'xyCoordData',
+                'xyCoordDataSpec', 'xyCurveDrawOrder', 'xyDashPattern',
+                'xyDashPatterns', 'xyExplicitLabels', 'xyExplicitLegendLabels',
+                'xyLabelMode', 'xyLineColor', 'xyLineColors', 'xyLineDashSegLenF',
+                'xyLineLabelConstantSpacingF', 'xyLineLabelFont',
+                'xyLineLabelFontAspectF', 'xyLineLabelFontColor',
+                'xyLineLabelFontColors', 'xyLineLabelFontHeightF',
+                'xyLineLabelFontQuality', 'xyLineLabelFontThicknessF',
+                'xyLineLabelFuncCode', 'xyLineThicknessF', 'xyLineThicknesses',
+                'xyMarkLineMode', 'xyMarkLineModes', 'xyMarker', 'xyMarkerColor',
+                'xyMarkerColors', 'xyMarkerSizeF', 'xyMarkerSizes',
+                'xyMarkerThicknessF', 'xyMarkerThicknesses', 'xyMarkers',
+                'xyMonoDashPattern', 'xyMonoLineColor', 'xyMonoLineLabelFontColor',
+                'xyMonoLineThickness', 'xyMonoMarkLineMode', 'xyMonoMarker',
+                'xyMonoMarkerColor', 'xyMonoMarkerSize', 'xyMonoMarkerThickness',
+                'xyXIrrTensionF', 'xyXIrregularPoints', 'xyXStyle', 'xyYIrrTensionF',
+                'xyYIrregularPoints', 'xyYStyle'), prefix=r'\b'),
+             Name.Builtin),
+
+            # Booleans
+            (r'\.(True|False)\.', Name.Builtin),
+            # Comparing Operators
+            (r'\.(eq|ne|lt|le|gt|ge|not|and|or|xor)\.', Operator.Word),
+        ],
+
+        'strings': [
+            (r'(?s)"(\\\\|\\[0-7]+|\\.|[^"\\])*"', String.Double),
+        ],
+
+        'nums': [
+            (r'\d+(?![.e])(_[a-z]\w+)?', Number.Integer),
+            (r'[+-]?\d*\.\d+(e[-+]?\d+)?(_[a-z]\w+)?', Number.Float),
+            (r'[+-]?\d+\.\d*(e[-+]?\d+)?(_[a-z]\w+)?', Number.Float),
+        ],
+    }
diff --git a/local_packages/pygments/lexers/nimrod.py b/local_packages/pygments/lexers/nimrod.py
new file mode 100644
index 0000000..b99e246
--- /dev/null
+++ b/local_packages/pygments/lexers/nimrod.py
@@ -0,0 +1,199 @@
+"""
+    pygments.lexers.nimrod
+    ~~~~~~~~~~~~~~~~~~~~~~
+
+    Lexer for the Nim language (formerly known as Nimrod).
+
+    :copyright: Copyright 2006-present by the Pygments team, see AUTHORS.
+    :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import RegexLexer, include, default, bygroups
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+    Number, Punctuation, Error
+
+__all__ = ['NimrodLexer']
+
+
+class NimrodLexer(RegexLexer):
+    """
+    For Nim source code.
+    """
+
+    name = 'Nimrod'
+    url = 'http://nim-lang.org/'
+    aliases = ['nimrod', 'nim']
+    filenames = ['*.nim', '*.nimrod']
+    mimetypes = ['text/x-nim']
+    version_added = '1.5'
+
+    flags = re.MULTILINE | re.IGNORECASE
+
+    def underscorize(words):
+        newWords = []
+        new = []
+        for word in words:
+            for ch in word:
+                new.append(ch)
+                new.append("_?")
+            newWords.append(''.join(new))
+            new = []
+        return "|".join(newWords)
+
+    keywords = [
+        'addr', 'and', 'as', 'asm', 'bind', 'block', 'break', 'case',
+        'cast', 'concept', 'const', 'continue', 'converter', 'defer', 'discard',
+        'distinct', 'div', 'do', 'elif', 'else', 'end', 'enum', 'except',
+        'export', 'finally', 'for', 'if', 'in', 'yield', 'interface',
+        'is', 'isnot', 'iterator', 'let', 'mixin', 'mod',
+        'not', 'notin', 'object', 'of', 'or', 'out', 'ptr', 'raise',
+        'ref', 'return', 'shl', 'shr', 'static', 'try',
+        'tuple', 'type', 'using', 'when', 'while', 'xor'
+    ]
+
+    keywordsPseudo = [
+        'nil', 'true', 'false'
+    ]
+
+    opWords = [
+        'and', 'or', 'not', 'xor', 'shl', 'shr', 'div', 'mod', 'in',
+        'notin', 'is', 'isnot'
+    ]
+
+    types = [
+        'int', 'int8', 'int16', 'int32', 'int64', 'float', 'float32', 'float64',
+        'bool', 'char', 'range', 'array', 'seq', 'set', 'string'
+    ]
+
+    tokens = {
+        'root': [
+            # Comments
+            (r'##\[', String.Doc, 'doccomment'),
+            (r'##.*$', String.Doc),
+            (r'#\[', Comment.Multiline, 'comment'),
+            (r'#.*$', Comment),
+
+            # Pragmas
+            (r'\{\.', String.Other, 'pragma'),
+
+            # Operators
+            (r'[*=><+\-/@$~&%!?|\\\[\]]', Operator),
+            (r'\.\.|\.|,|\[\.|\.\]|\{\.|\.\}|\(\.|\.\)|\{|\}|\(|\)|:|\^|`|;',
+             Punctuation),
+
+            # Case statement branch
+            (r'(\n\s*)(of)(\s)', bygroups(Text.Whitespace, Keyword,
+                                          Text.Whitespace), 'casebranch'),
+
+            # Strings
+            (r'(?:[\w]+)"', String, 'rdqs'),
+            (r'"""', String.Double, 'tdqs'),
+            ('"', String, 'dqs'),
+
+            # Char
+            ("'", String.Char, 'chars'),
+
+            # Keywords
+            (rf'({underscorize(opWords)})\b', Operator.Word),
+            (r'(proc|func|method|macro|template)(\s)(?![(\[\]])',
+             bygroups(Keyword, Text.Whitespace), 'funcname'),
+            (rf'({underscorize(keywords)})\b', Keyword),
+            (r'({})\b'.format(underscorize(['from', 'import', 'include', 'export'])),
+             Keyword.Namespace),
+            (r'(v_?a_?r)\b', Keyword.Declaration),
+            (rf'({underscorize(types)})\b', Name.Builtin),
+            (rf'({underscorize(keywordsPseudo)})\b', Keyword.Pseudo),
+
+            # Identifiers
+            (r'\b((?![_\d])\w)(((?!_)\w)|(_(?!_)\w))*', Name),
+
+            # Numbers
+            (r'[0-9][0-9_]*(?=([e.]|\'f(32|64)))',
+             Number.Float, ('float-suffix', 'float-number')),
+            (r'0x[a-f0-9][a-f0-9_]*', Number.Hex, 'int-suffix'),
+            (r'0b[01][01_]*', Number.Bin, 'int-suffix'),
+            (r'0o[0-7][0-7_]*', Number.Oct, 'int-suffix'),
+            (r'[0-9][0-9_]*', Number.Integer, 'int-suffix'),
+
+            # Whitespace
+            (r'\s+', Text.Whitespace),
+            (r'.+$', Error),
+        ],
+        'chars': [
+            (r'\\([\\abcefnrtvl"\']|x[a-f0-9]{2}|[0-9]{1,3})', String.Escape),
+            (r"'", String.Char, '#pop'),
+            (r".", String.Char)
+        ],
+        'strings': [
+            (r'(?|>=|>>|>|<=|<<|<|\+|-|=|/|\*|%|\+=|-=|!|@', Operator),
+            (r'\(|\)|\[|\]|,|\.\.\.|\.\.|\.|::|:', Punctuation),
+            (r'`\{[^`]*`\}', Text),  # Extern blocks won't be Lexed by Nit
+            (r'[\r\n\t ]+', Text),
+        ],
+    }
diff --git a/local_packages/pygments/lexers/nix.py b/local_packages/pygments/lexers/nix.py
new file mode 100644
index 0000000..16572af
--- /dev/null
+++ b/local_packages/pygments/lexers/nix.py
@@ -0,0 +1,144 @@
+"""
+    pygments.lexers.nix
+    ~~~~~~~~~~~~~~~~~~~
+
+    Lexers for the NixOS Nix language.
+
+    :copyright: Copyright 2006-present by the Pygments team, see AUTHORS.
+    :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import RegexLexer, include
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+    Number, Punctuation, Literal
+
+__all__ = ['NixLexer']
+
+
+class NixLexer(RegexLexer):
+    """
+    For the Nix language.
+    """
+
+    name = 'Nix'
+    url = 'http://nixos.org/nix/'
+    aliases = ['nixos', 'nix']
+    filenames = ['*.nix']
+    mimetypes = ['text/x-nix']
+    version_added = '2.0'
+
+    keywords = ['rec', 'with', 'let', 'in', 'inherit', 'assert', 'if',
+                'else', 'then', '...']
+    builtins = ['import', 'abort', 'baseNameOf', 'dirOf', 'isNull', 'builtins',
+                'map', 'removeAttrs', 'throw', 'toString', 'derivation']
+    operators = ['++', '+', '?', '.', '!', '//', '==', '/',
+                 '!=', '&&', '||', '->', '=', '<', '>', '*', '-']
+
+    punctuations = ["(", ")", "[", "]", ";", "{", "}", ":", ",", "@"]
+
+    tokens = {
+        'root': [
+            # comments starting with #
+            (r'#.*$', Comment.Single),
+
+            # multiline comments
+            (r'/\*', Comment.Multiline, 'comment'),
+
+            # whitespace
+            (r'\s+', Text),
+
+            # keywords
+            ('({})'.format('|'.join(re.escape(entry) + '\\b' for entry in keywords)), Keyword),
+
+            # highlight the builtins
+            ('({})'.format('|'.join(re.escape(entry) + '\\b' for entry in builtins)),
+             Name.Builtin),
+
+            (r'\b(true|false|null)\b', Name.Constant),
+
+            # floats
+            (r'-?(\d+\.\d*|\.\d+)([eE][-+]?\d+)?', Number.Float),
+
+            # integers
+            (r'-?[0-9]+', Number.Integer),
+
+            # paths
+            (r'[\w.+-]*(\/[\w.+-]+)+', Literal),
+            (r'~(\/[\w.+-]+)+', Literal),
+            (r'\<[\w.+-]+(\/[\w.+-]+)*\>', Literal),
+
+            # operators
+            ('({})'.format('|'.join(re.escape(entry) for entry in operators)),
+             Operator),
+
+            # word operators
+            (r'\b(or|and)\b', Operator.Word),
+
+            (r'\{', Punctuation, 'block'),
+
+            # punctuations
+            ('({})'.format('|'.join(re.escape(entry) for entry in punctuations)), Punctuation),
+
+            # strings
+            (r'"', String.Double, 'doublequote'),
+            (r"''", String.Multiline, 'multiline'),
+
+            # urls
+            (r'[a-zA-Z][a-zA-Z0-9\+\-\.]*\:[\w%/?:@&=+$,\\.!~*\'-]+', Literal),
+
+            # names of variables
+            (r'[\w-]+(?=\s*=)', String.Symbol),
+            (r'[a-zA-Z_][\w\'-]*', Text),
+
+            (r"\$\{", String.Interpol, 'antiquote'),
+        ],
+        'comment': [
+            (r'[^/*]+', Comment.Multiline),
+            (r'/\*', Comment.Multiline, '#push'),
+            (r'\*/', Comment.Multiline, '#pop'),
+            (r'[*/]', Comment.Multiline),
+        ],
+        'multiline': [
+            (r"''(\$|'|\\n|\\r|\\t|\\)", String.Escape),
+            (r"''", String.Multiline, '#pop'),
+            (r'\$\{', String.Interpol, 'antiquote'),
+            (r"[^'\$]+", String.Multiline),
+            (r"\$[^\{']", String.Multiline),
+            (r"'[^']", String.Multiline),
+            (r"\$(?=')", String.Multiline),
+        ],
+        'doublequote': [
+            (r'\\(\\|"|\$|n)', String.Escape),
+            (r'"', String.Double, '#pop'),
+            (r'\$\{', String.Interpol, 'antiquote'),
+            (r'[^"\\\$]+', String.Double),
+            (r'\$[^\{"]', String.Double),
+            (r'\$(?=")', String.Double),
+            (r'\\', String.Double),
+        ],
+        'antiquote': [
+            (r"\}", String.Interpol, '#pop'),
+            # TODO: we should probably escape also here ''${ \${
+            (r"\$\{", String.Interpol, '#push'),
+            include('root'),
+        ],
+        'block': [
+            (r"\}", Punctuation, '#pop'),
+            include('root'),
+        ],
+    }
+
+    def analyse_text(text):
+        rv = 0.0
+        # TODO: let/in
+        if re.search(r'import.+?<[^>]+>', text):
+            rv += 0.4
+        if re.search(r'mkDerivation\s+(\(|\{|rec)', text):
+            rv += 0.4
+        if re.search(r'=\s+mkIf\s+', text):
+            rv += 0.4
+        if re.search(r'\{[a-zA-Z,\s]+\}:', text):
+            rv += 0.1
+        return rv
diff --git a/local_packages/pygments/lexers/numbair.py b/local_packages/pygments/lexers/numbair.py
new file mode 100644
index 0000000..28395b4
--- /dev/null
+++ b/local_packages/pygments/lexers/numbair.py
@@ -0,0 +1,63 @@
+"""
+    pygments.lexers.numbair
+    ~~~~~~~~~~~~~~~~~~~~~~~
+
+    Lexer for other Numba Intermediate Representation.
+
+    :copyright: Copyright 2006-present by the Pygments team, see AUTHORS.
+    :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, include, bygroups, words
+from pygments.token import Whitespace, Name, String,  Punctuation, Keyword, \
+    Operator, Number
+
+__all__ = ["NumbaIRLexer"]
+
+class NumbaIRLexer(RegexLexer):
+    """
+    Lexer for Numba IR
+    """
+    name = 'Numba_IR'
+    url = "https://numba.readthedocs.io/en/stable/developer/architecture.html#stage-2-generate-the-numba-ir"
+    aliases = ['numba_ir', 'numbair']
+    filenames = ['*.numba_ir']
+    mimetypes = ['text/x-numba_ir', 'text/x-numbair']
+    version_added = '2.19'
+
+    identifier = r'\$[a-zA-Z0-9._]+'
+    fun_or_var = r'([a-zA-Z_]+[a-zA-Z0-9]*)'
+
+    tokens = {
+        'root' : [
+            (r'(label)(\ [0-9]+)(:)$',
+                bygroups(Keyword, Name.Label, Punctuation)),
+
+            (r'=', Operator),
+            include('whitespace'),
+            include('keyword'),
+
+            (identifier, Name.Variable),
+            (fun_or_var + r'(\()',
+                bygroups(Name.Function, Punctuation)),
+            (fun_or_var + r'(\=)',
+                bygroups(Name.Attribute, Punctuation)),
+            (fun_or_var, Name.Constant),
+            (r'[0-9]+', Number),
+
+            # 
+            (r'<[^>\n]*>', String),
+
+            (r'[=<>{}\[\]()*.,!\':]|x\b', Punctuation)
+        ],
+
+        'keyword':[
+            (words((
+                'del', 'jump', 'call', 'branch',
+            ), suffix=' '), Keyword),
+        ],
+
+        'whitespace': [
+            (r'(\n|\s)+', Whitespace),
+        ],
+    }
diff --git a/local_packages/pygments/lexers/oberon.py b/local_packages/pygments/lexers/oberon.py
new file mode 100644
index 0000000..11cf571
--- /dev/null
+++ b/local_packages/pygments/lexers/oberon.py
@@ -0,0 +1,120 @@
+"""
+    pygments.lexers.oberon
+    ~~~~~~~~~~~~~~~~~~~~~~
+
+    Lexers for Oberon family languages.
+
+    :copyright: Copyright 2006-present by the Pygments team, see AUTHORS.
+    :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import RegexLexer, include, words
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+    Number, Punctuation
+
+__all__ = ['ComponentPascalLexer']
+
+
+class ComponentPascalLexer(RegexLexer):
+    """
+    For Component Pascal source code.
+    """
+    name = 'Component Pascal'
+    aliases = ['componentpascal', 'cp']
+    filenames = ['*.cp', '*.cps']
+    mimetypes = ['text/x-component-pascal']
+    url = 'https://blackboxframework.org'
+    version_added = '2.1'
+
+    flags = re.MULTILINE | re.DOTALL
+
+    tokens = {
+        'root': [
+            include('whitespace'),
+            include('comments'),
+            include('punctuation'),
+            include('numliterals'),
+            include('strings'),
+            include('operators'),
+            include('builtins'),
+            include('identifiers'),
+        ],
+        'whitespace': [
+            (r'\n+', Text),  # blank lines
+            (r'\s+', Text),  # whitespace
+        ],
+        'comments': [
+            (r'\(\*([^$].*?)\*\)', Comment.Multiline),
+            # TODO: nested comments (* (* ... *) ... (* ... *) *) not supported!
+        ],
+        'punctuation': [
+            (r'[()\[\]{},.:;|]', Punctuation),
+        ],
+        'numliterals': [
+            (r'[0-9A-F]+X\b', Number.Hex),                 # char code
+            (r'[0-9A-F]+[HL]\b', Number.Hex),              # hexadecimal number
+            (r'[0-9]+\.[0-9]+E[+-][0-9]+', Number.Float),  # real number
+            (r'[0-9]+\.[0-9]+', Number.Float),             # real number
+            (r'[0-9]+', Number.Integer),                   # decimal whole number
+        ],
+        'strings': [
+            (r"'[^\n']*'", String),  # single quoted string
+            (r'"[^\n"]*"', String),  # double quoted string
+        ],
+        'operators': [
+            # Arithmetic Operators
+            (r'[+-]', Operator),
+            (r'[*/]', Operator),
+            # Relational Operators
+            (r'[=#<>]', Operator),
+            # Dereferencing Operator
+            (r'\^', Operator),
+            # Logical AND Operator
+            (r'&', Operator),
+            # Logical NOT Operator
+            (r'~', Operator),
+            # Assignment Symbol
+            (r':=', Operator),
+            # Range Constructor
+            (r'\.\.', Operator),
+            (r'\$', Operator),
+        ],
+        'identifiers': [
+            (r'([a-zA-Z_$][\w$]*)', Name),
+        ],
+        'builtins': [
+            (words((
+                'ANYPTR', 'ANYREC', 'BOOLEAN', 'BYTE', 'CHAR', 'INTEGER', 'LONGINT',
+                'REAL', 'SET', 'SHORTCHAR', 'SHORTINT', 'SHORTREAL'
+                ), suffix=r'\b'), Keyword.Type),
+            (words((
+                'ABS', 'ABSTRACT', 'ARRAY', 'ASH', 'ASSERT', 'BEGIN', 'BITS', 'BY',
+                'CAP', 'CASE', 'CHR', 'CLOSE', 'CONST', 'DEC', 'DIV', 'DO', 'ELSE',
+                'ELSIF', 'EMPTY', 'END', 'ENTIER', 'EXCL', 'EXIT', 'EXTENSIBLE', 'FOR',
+                'HALT', 'IF', 'IMPORT', 'IN', 'INC', 'INCL', 'IS', 'LEN', 'LIMITED',
+                'LONG', 'LOOP', 'MAX', 'MIN', 'MOD', 'MODULE', 'NEW', 'ODD', 'OF',
+                'OR', 'ORD', 'OUT', 'POINTER', 'PROCEDURE', 'RECORD', 'REPEAT', 'RETURN',
+                'SHORT', 'SHORTCHAR', 'SHORTINT', 'SIZE', 'THEN', 'TYPE', 'TO', 'UNTIL',
+                'VAR', 'WHILE', 'WITH'
+                ), suffix=r'\b'), Keyword.Reserved),
+            (r'(TRUE|FALSE|NIL|INF)\b', Keyword.Constant),
+        ]
+    }
+
+    def analyse_text(text):
+        """The only other lexer using .cp is the C++ one, so we check if for
+        a few common Pascal keywords here. Those are unfortunately quite
+        common across various business languages as well."""
+        result = 0
+        if 'BEGIN' in text:
+            result += 0.01
+        if 'END' in text:
+            result += 0.01
+        if 'PROCEDURE' in text:
+            result += 0.01
+        if 'MODULE' in text:
+            result += 0.01
+
+        return result
diff --git a/local_packages/pygments/lexers/objective.py b/local_packages/pygments/lexers/objective.py
new file mode 100644
index 0000000..ce281fb
--- /dev/null
+++ b/local_packages/pygments/lexers/objective.py
@@ -0,0 +1,513 @@
+"""
+    pygments.lexers.objective
+    ~~~~~~~~~~~~~~~~~~~~~~~~~
+
+    Lexers for Objective-C family languages.
+
+    :copyright: Copyright 2006-present by the Pygments team, see AUTHORS.
+    :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import RegexLexer, include, bygroups, using, this, words, \
+    inherit, default
+from pygments.token import Text, Keyword, Name, String, Operator, \
+    Number, Punctuation, Literal, Comment, Whitespace
+
+from pygments.lexers.c_cpp import CLexer, CppLexer
+
+__all__ = ['ObjectiveCLexer', 'ObjectiveCppLexer', 'LogosLexer', 'SwiftLexer']
+
+
+def objective(baselexer):
+    """
+    Generate a subclass of baselexer that accepts the Objective-C syntax
+    extensions.
+    """
+
+    # Have to be careful not to accidentally match JavaDoc/Doxygen syntax here,
+    # since that's quite common in ordinary C/C++ files.  It's OK to match
+    # JavaDoc/Doxygen keywords that only apply to Objective-C, mind.
+    #
+    # The upshot of this is that we CANNOT match @class or @interface
+    _oc_keywords = re.compile(r'@(?:end|implementation|protocol)')
+
+    # Matches [ ? identifier  ( identifier ? ] |  identifier? : )
+    # (note the identifier is *optional* when there is a ':'!)
+    _oc_message = re.compile(r'\[\s*[a-zA-Z_]\w*\s+'
+                             r'(?:[a-zA-Z_]\w*\s*\]|'
+                             r'(?:[a-zA-Z_]\w*)?:)')
+
+    class GeneratedObjectiveCVariant(baselexer):
+        """
+        Implements Objective-C syntax on top of an existing C family lexer.
+        """
+
+        tokens = {
+            'statements': [
+                (r'@"', String, 'string'),
+                (r'@(YES|NO)', Number),
+                (r"@'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char),
+                (r'@(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float),
+                (r'@(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
+                (r'@0x[0-9a-fA-F]+[Ll]?', Number.Hex),
+                (r'@0[0-7]+[Ll]?', Number.Oct),
+                (r'@\d+[Ll]?', Number.Integer),
+                (r'@\(', Literal, 'literal_number'),
+                (r'@\[', Literal, 'literal_array'),
+                (r'@\{', Literal, 'literal_dictionary'),
+                (words((
+                    '@selector', '@private', '@protected', '@public', '@encode',
+                    '@synchronized', '@try', '@throw', '@catch', '@finally',
+                    '@end', '@property', '@synthesize', '__bridge', '__bridge_transfer',
+                    '__autoreleasing', '__block', '__weak', '__strong', 'weak', 'strong',
+                    'copy', 'retain', 'assign', 'unsafe_unretained', 'atomic', 'nonatomic',
+                    'readonly', 'readwrite', 'setter', 'getter', 'typeof', 'in',
+                    'out', 'inout', 'release', 'class', '@dynamic', '@optional',
+                    '@required', '@autoreleasepool', '@import'), suffix=r'\b'),
+                 Keyword),
+                (words(('id', 'instancetype', 'Class', 'IMP', 'SEL', 'BOOL',
+                        'IBOutlet', 'IBAction', 'unichar'), suffix=r'\b'),
+                 Keyword.Type),
+                (r'@(true|false|YES|NO)\n', Name.Builtin),
+                (r'(YES|NO|nil|self|super)\b', Name.Builtin),
+                # Carbon types
+                (r'(Boolean|UInt8|SInt8|UInt16|SInt16|UInt32|SInt32)\b', Keyword.Type),
+                # Carbon built-ins
+                (r'(TRUE|FALSE)\b', Name.Builtin),
+                (r'(@interface|@implementation)(\s+)', bygroups(Keyword, Text),
+                 ('#pop', 'oc_classname')),
+                (r'(@class|@protocol)(\s+)', bygroups(Keyword, Text),
+                 ('#pop', 'oc_forward_classname')),
+                # @ can also prefix other expressions like @{...} or @(...)
+                (r'@', Punctuation),
+                inherit,
+            ],
+            'oc_classname': [
+                # interface definition that inherits
+                (r'([a-zA-Z$_][\w$]*)(\s*:\s*)([a-zA-Z$_][\w$]*)?(\s*)(\{)',
+                 bygroups(Name.Class, Text, Name.Class, Text, Punctuation),
+                 ('#pop', 'oc_ivars')),
+                (r'([a-zA-Z$_][\w$]*)(\s*:\s*)([a-zA-Z$_][\w$]*)?',
+                 bygroups(Name.Class, Text, Name.Class), '#pop'),
+                # interface definition for a category
+                (r'([a-zA-Z$_][\w$]*)(\s*)(\([a-zA-Z$_][\w$]*\))(\s*)(\{)',
+                 bygroups(Name.Class, Text, Name.Label, Text, Punctuation),
+                 ('#pop', 'oc_ivars')),
+                (r'([a-zA-Z$_][\w$]*)(\s*)(\([a-zA-Z$_][\w$]*\))',
+                 bygroups(Name.Class, Text, Name.Label), '#pop'),
+                # simple interface / implementation
+                (r'([a-zA-Z$_][\w$]*)(\s*)(\{)',
+                 bygroups(Name.Class, Text, Punctuation), ('#pop', 'oc_ivars')),
+                (r'([a-zA-Z$_][\w$]*)', Name.Class, '#pop')
+            ],
+            'oc_forward_classname': [
+                (r'([a-zA-Z$_][\w$]*)(\s*,\s*)',
+                 bygroups(Name.Class, Text), 'oc_forward_classname'),
+                (r'([a-zA-Z$_][\w$]*)(\s*;?)',
+                 bygroups(Name.Class, Text), '#pop')
+            ],
+            'oc_ivars': [
+                include('whitespace'),
+                include('statements'),
+                (';', Punctuation),
+                (r'\{', Punctuation, '#push'),
+                (r'\}', Punctuation, '#pop'),
+            ],
+            'root': [
+                # methods
+                (r'^([-+])(\s*)'                         # method marker
+                 r'(\(.*?\))?(\s*)'                      # return type
+                 r'([a-zA-Z$_][\w$]*:?)',        # begin of method name
+                 bygroups(Punctuation, Text, using(this),
+                          Text, Name.Function),
+                 'method'),
+                inherit,
+            ],
+            'method': [
+                include('whitespace'),
+                # TODO unsure if ellipses are allowed elsewhere, see
+                # discussion in Issue 789
+                (r',', Punctuation),
+                (r'\.\.\.', Punctuation),
+                (r'(\(.*?\))(\s*)([a-zA-Z$_][\w$]*)',
+                 bygroups(using(this), Text, Name.Variable)),
+                (r'[a-zA-Z$_][\w$]*:', Name.Function),
+                (';', Punctuation, '#pop'),
+                (r'\{', Punctuation, 'function'),
+                default('#pop'),
+            ],
+            'literal_number': [
+                (r'\(', Punctuation, 'literal_number_inner'),
+                (r'\)', Literal, '#pop'),
+                include('statement'),
+            ],
+            'literal_number_inner': [
+                (r'\(', Punctuation, '#push'),
+                (r'\)', Punctuation, '#pop'),
+                include('statement'),
+            ],
+            'literal_array': [
+                (r'\[', Punctuation, 'literal_array_inner'),
+                (r'\]', Literal, '#pop'),
+                include('statement'),
+            ],
+            'literal_array_inner': [
+                (r'\[', Punctuation, '#push'),
+                (r'\]', Punctuation, '#pop'),
+                include('statement'),
+            ],
+            'literal_dictionary': [
+                (r'\}', Literal, '#pop'),
+                include('statement'),
+            ],
+        }
+
+        def analyse_text(text):
+            if _oc_keywords.search(text):
+                return 1.0
+            elif '@"' in text:  # strings
+                return 0.8
+            elif re.search('@[0-9]+', text):
+                return 0.7
+            elif _oc_message.search(text):
+                return 0.8
+            return 0
+
+        def get_tokens_unprocessed(self, text, stack=('root',)):
+            from pygments.lexers._cocoa_builtins import COCOA_INTERFACES, \
+                COCOA_PROTOCOLS, COCOA_PRIMITIVES
+
+            for index, token, value in \
+                    baselexer.get_tokens_unprocessed(self, text, stack):
+                if token is Name or token is Name.Class:
+                    if value in COCOA_INTERFACES or value in COCOA_PROTOCOLS \
+                       or value in COCOA_PRIMITIVES:
+                        token = Name.Builtin.Pseudo
+
+                yield index, token, value
+
+    return GeneratedObjectiveCVariant
+
+
+class ObjectiveCLexer(objective(CLexer)):
+    """
+    For Objective-C source code with preprocessor directives.
+    """
+
+    name = 'Objective-C'
+    url = 'https://developer.apple.com/library/archive/documentation/Cocoa/Conceptual/ProgrammingWithObjectiveC/Introduction/Introduction.html'
+    aliases = ['objective-c', 'objectivec', 'obj-c', 'objc']
+    filenames = ['*.m', '*.h']
+    mimetypes = ['text/x-objective-c']
+    version_added = ''
+    priority = 0.05    # Lower than C
+
+
+class ObjectiveCppLexer(objective(CppLexer)):
+    """
+    For Objective-C++ source code with preprocessor directives.
+    """
+
+    name = 'Objective-C++'
+    aliases = ['objective-c++', 'objectivec++', 'obj-c++', 'objc++']
+    filenames = ['*.mm', '*.hh']
+    mimetypes = ['text/x-objective-c++']
+    version_added = ''
+    priority = 0.05    # Lower than C++
+
+
+class LogosLexer(ObjectiveCppLexer):
+    """
+    For Logos + Objective-C source code with preprocessor directives.
+    """
+
+    name = 'Logos'
+    aliases = ['logos']
+    filenames = ['*.x', '*.xi', '*.xm', '*.xmi']
+    mimetypes = ['text/x-logos']
+    version_added = '1.6'
+    priority = 0.25
+
+    tokens = {
+        'statements': [
+            (r'(%orig|%log)\b', Keyword),
+            (r'(%c)\b(\()(\s*)([a-zA-Z$_][\w$]*)(\s*)(\))',
+             bygroups(Keyword, Punctuation, Text, Name.Class, Text, Punctuation)),
+            (r'(%init)\b(\()',
+             bygroups(Keyword, Punctuation), 'logos_init_directive'),
+            (r'(%init)(?=\s*;)', bygroups(Keyword)),
+            (r'(%hook|%group)(\s+)([a-zA-Z$_][\w$]+)',
+             bygroups(Keyword, Text, Name.Class), '#pop'),
+            (r'(%subclass)(\s+)', bygroups(Keyword, Text),
+             ('#pop', 'logos_classname')),
+            inherit,
+        ],
+        'logos_init_directive': [
+            (r'\s+', Text),
+            (',', Punctuation, ('logos_init_directive', '#pop')),
+            (r'([a-zA-Z$_][\w$]*)(\s*)(=)(\s*)([^);]*)',
+             bygroups(Name.Class, Text, Punctuation, Text, Text)),
+            (r'([a-zA-Z$_][\w$]*)', Name.Class),
+            (r'\)', Punctuation, '#pop'),
+        ],
+        'logos_classname': [
+            (r'([a-zA-Z$_][\w$]*)(\s*:\s*)([a-zA-Z$_][\w$]*)?',
+             bygroups(Name.Class, Text, Name.Class), '#pop'),
+            (r'([a-zA-Z$_][\w$]*)', Name.Class, '#pop')
+        ],
+        'root': [
+            (r'(%subclass)(\s+)', bygroups(Keyword, Text),
+             'logos_classname'),
+            (r'(%hook|%group)(\s+)([a-zA-Z$_][\w$]+)',
+             bygroups(Keyword, Text, Name.Class)),
+            (r'(%config)(\s*\(\s*)(\w+)(\s*=)(.*?)(\)\s*)',
+             bygroups(Keyword, Text, Name.Variable, Text, String, Text)),
+            (r'(%ctor)(\s*)(\{)', bygroups(Keyword, Text, Punctuation),
+             'function'),
+            (r'(%new)(\s*)(\()(.*?)(\))',
+             bygroups(Keyword, Text, Keyword, String, Keyword)),
+            (r'(\s*)(%end)(\s*)', bygroups(Text, Keyword, Text)),
+            inherit,
+        ],
+    }
+
+    _logos_keywords = re.compile(r'%(?:hook|ctor|init|c\()')
+
+    def analyse_text(text):
+        if LogosLexer._logos_keywords.search(text):
+            return 1.0
+        return 0
+
+
+class SwiftLexer(RegexLexer):
+    """
+    For Swift source.
+    """
+    name = 'Swift'
+    url = 'https://www.swift.org/'
+    filenames = ['*.swift']
+    aliases = ['swift']
+    mimetypes = ['text/x-swift']
+    version_added = '2.0'
+
+    tokens = {
+        'root': [
+            # Whitespace and Comments
+            (r'\n', Text),
+            (r'\s+', Whitespace),
+            (r'//', Comment.Single, 'comment-single'),
+            (r'/\*', Comment.Multiline, 'comment-multi'),
+            (r'#(if|elseif|else|endif|available)\b', Comment.Preproc, 'preproc'),
+
+            # Keywords
+            include('keywords'),
+
+            # Global Types
+            (words((
+                'Array', 'AutoreleasingUnsafeMutablePointer', 'BidirectionalReverseView',
+                'Bit', 'Bool', 'CFunctionPointer', 'COpaquePointer', 'CVaListPointer',
+                'Character', 'ClosedInterval', 'CollectionOfOne', 'ContiguousArray',
+                'Dictionary', 'DictionaryGenerator', 'DictionaryIndex', 'Double',
+                'EmptyCollection', 'EmptyGenerator', 'EnumerateGenerator',
+                'EnumerateSequence', 'FilterCollectionView',
+                'FilterCollectionViewIndex', 'FilterGenerator', 'FilterSequenceView',
+                'Float', 'Float80', 'FloatingPointClassification', 'GeneratorOf',
+                'GeneratorOfOne', 'GeneratorSequence', 'HalfOpenInterval', 'HeapBuffer',
+                'HeapBufferStorage', 'ImplicitlyUnwrappedOptional', 'IndexingGenerator',
+                'Int', 'Int16', 'Int32', 'Int64', 'Int8', 'LazyBidirectionalCollection',
+                'LazyForwardCollection', 'LazyRandomAccessCollection',
+                'LazySequence', 'MapCollectionView', 'MapSequenceGenerator',
+                'MapSequenceView', 'MirrorDisposition', 'ObjectIdentifier', 'OnHeap',
+                'Optional', 'PermutationGenerator', 'QuickLookObject',
+                'RandomAccessReverseView', 'Range', 'RangeGenerator', 'RawByte', 'Repeat',
+                'ReverseBidirectionalIndex', 'ReverseRandomAccessIndex', 'SequenceOf',
+                'SinkOf', 'Slice', 'StaticString', 'StrideThrough', 'StrideThroughGenerator',
+                'StrideTo', 'StrideToGenerator', 'String', 'UInt', 'UInt16', 'UInt32',
+                'UInt64', 'UInt8', 'UTF16', 'UTF32', 'UTF8', 'UnicodeDecodingResult',
+                'UnicodeScalar', 'Unmanaged', 'UnsafeBufferPointer',
+                'UnsafeBufferPointerGenerator', 'UnsafeMutableBufferPointer',
+                'UnsafeMutablePointer', 'UnsafePointer', 'Zip2', 'ZipGenerator2',
+                # Protocols
+                'AbsoluteValuable', 'AnyObject', 'ArrayLiteralConvertible',
+                'BidirectionalIndexType', 'BitwiseOperationsType',
+                'BooleanLiteralConvertible', 'BooleanType', 'CVarArgType',
+                'CollectionType', 'Comparable', 'DebugPrintable',
+                'DictionaryLiteralConvertible', 'Equatable',
+                'ExtendedGraphemeClusterLiteralConvertible',
+                'ExtensibleCollectionType', 'FloatLiteralConvertible',
+                'FloatingPointType', 'ForwardIndexType', 'GeneratorType', 'Hashable',
+                'IntegerArithmeticType', 'IntegerLiteralConvertible', 'IntegerType',
+                'IntervalType', 'MirrorType', 'MutableCollectionType', 'MutableSliceable',
+                'NilLiteralConvertible', 'OutputStreamType', 'Printable',
+                'RandomAccessIndexType', 'RangeReplaceableCollectionType',
+                'RawOptionSetType', 'RawRepresentable', 'Reflectable', 'SequenceType',
+                'SignedIntegerType', 'SignedNumberType', 'SinkType', 'Sliceable',
+                'Streamable', 'Strideable', 'StringInterpolationConvertible',
+                'StringLiteralConvertible', 'UnicodeCodecType',
+                'UnicodeScalarLiteralConvertible', 'UnsignedIntegerType',
+                '_ArrayBufferType', '_BidirectionalIndexType', '_CocoaStringType',
+                '_CollectionType', '_Comparable', '_ExtensibleCollectionType',
+                '_ForwardIndexType', '_Incrementable', '_IntegerArithmeticType',
+                '_IntegerType', '_ObjectiveCBridgeable', '_RandomAccessIndexType',
+                '_RawOptionSetType', '_SequenceType', '_Sequence_Type',
+                '_SignedIntegerType', '_SignedNumberType', '_Sliceable', '_Strideable',
+                '_SwiftNSArrayRequiredOverridesType', '_SwiftNSArrayType',
+                '_SwiftNSCopyingType', '_SwiftNSDictionaryRequiredOverridesType',
+                '_SwiftNSDictionaryType', '_SwiftNSEnumeratorType',
+                '_SwiftNSFastEnumerationType', '_SwiftNSStringRequiredOverridesType',
+                '_SwiftNSStringType', '_UnsignedIntegerType',
+                # Variables
+                'C_ARGC', 'C_ARGV', 'Process',
+                # Typealiases
+                'Any', 'AnyClass', 'BooleanLiteralType', 'CBool', 'CChar', 'CChar16',
+                'CChar32', 'CDouble', 'CFloat', 'CInt', 'CLong', 'CLongLong', 'CShort',
+                'CSignedChar', 'CUnsignedInt', 'CUnsignedLong', 'CUnsignedShort',
+                'CWideChar', 'ExtendedGraphemeClusterType', 'Float32', 'Float64',
+                'FloatLiteralType', 'IntMax', 'IntegerLiteralType', 'StringLiteralType',
+                'UIntMax', 'UWord', 'UnicodeScalarType', 'Void', 'Word',
+                # Foundation/Cocoa
+                'NSErrorPointer', 'NSObjectProtocol', 'Selector'), suffix=r'\b'),
+             Name.Builtin),
+            # Functions
+            (words((
+                'abs', 'advance', 'alignof', 'alignofValue', 'assert', 'assertionFailure',
+                'contains', 'count', 'countElements', 'debugPrint', 'debugPrintln',
+                'distance', 'dropFirst', 'dropLast', 'dump', 'enumerate', 'equal',
+                'extend', 'fatalError', 'filter', 'find', 'first', 'getVaList', 'indices',
+                'insert', 'isEmpty', 'join', 'last', 'lazy', 'lexicographicalCompare',
+                'map', 'max', 'maxElement', 'min', 'minElement', 'numericCast', 'overlaps',
+                'partition', 'precondition', 'preconditionFailure', 'prefix', 'print',
+                'println', 'reduce', 'reflect', 'removeAll', 'removeAtIndex', 'removeLast',
+                'removeRange', 'reverse', 'sizeof', 'sizeofValue', 'sort', 'sorted',
+                'splice', 'split', 'startsWith', 'stride', 'strideof', 'strideofValue',
+                'suffix', 'swap', 'toDebugString', 'toString', 'transcode',
+                'underestimateCount', 'unsafeAddressOf', 'unsafeBitCast', 'unsafeDowncast',
+                'withExtendedLifetime', 'withUnsafeMutablePointer',
+                'withUnsafeMutablePointers', 'withUnsafePointer', 'withUnsafePointers',
+                'withVaList'), suffix=r'\b'),
+             Name.Builtin.Pseudo),
+
+            # Implicit Block Variables
+            (r'\$\d+', Name.Variable),
+
+            # Binary Literal
+            (r'0b[01_]+', Number.Bin),
+            # Octal Literal
+            (r'0o[0-7_]+', Number.Oct),
+            # Hexadecimal Literal
+            (r'0x[0-9a-fA-F_]+', Number.Hex),
+            # Decimal Literal
+            (r'[0-9][0-9_]*(\.[0-9_]+[eE][+\-]?[0-9_]+|'
+             r'\.[0-9_]*|[eE][+\-]?[0-9_]+)', Number.Float),
+            (r'[0-9][0-9_]*', Number.Integer),
+            # String Literal
+            (r'"""', String, 'string-multi'),
+            (r'"', String, 'string'),
+
+            # Operators and Punctuation
+            (r'[(){}\[\].,:;=@#`?]|->|[<&?](?=\w)|(?<=\w)[>!?]', Punctuation),
+            (r'[/=\-+!*%<>&|^?~]+', Operator),
+
+            # Identifier
+            (r'[a-zA-Z_]\w*', Name)
+        ],
+        'keywords': [
+            (words((
+                'as', 'async', 'await', 'break', 'case', 'catch', 'continue', 'default', 'defer',
+                'do', 'else', 'fallthrough', 'for', 'guard', 'if', 'in', 'is',
+                'repeat', 'return', '#selector', 'switch', 'throw', 'try',
+                'where', 'while'), suffix=r'\b'),
+             Keyword),
+            (r'@availability\([^)]+\)', Keyword.Reserved),
+            (words((
+                'associativity', 'convenience', 'dynamic', 'didSet', 'final',
+                'get', 'indirect', 'infix', 'inout', 'lazy', 'left', 'mutating',
+                'none', 'nonmutating', 'optional', 'override', 'postfix',
+                'precedence', 'prefix', 'Protocol', 'required', 'rethrows',
+                'right', 'set', 'throws', 'Type', 'unowned', 'weak', 'willSet',
+                '@availability', '@autoclosure', '@noreturn',
+                '@NSApplicationMain', '@NSCopying', '@NSManaged', '@objc',
+                '@UIApplicationMain', '@IBAction', '@IBDesignable',
+                '@IBInspectable', '@IBOutlet'), suffix=r'\b'),
+             Keyword.Reserved),
+            (r'(as|dynamicType|false|is|nil|self|Self|super|true|__COLUMN__'
+             r'|__FILE__|__FUNCTION__|__LINE__|_'
+             r'|#(?:file|line|column|function))\b', Keyword.Constant),
+            (r'import\b', Keyword.Declaration, 'module'),
+            (r'(class|enum|extension|struct|protocol)(\s+)([a-zA-Z_]\w*)',
+             bygroups(Keyword.Declaration, Whitespace, Name.Class)),
+            (r'(func)(\s+)([a-zA-Z_]\w*)',
+             bygroups(Keyword.Declaration, Whitespace, Name.Function)),
+            (r'(var|let)(\s+)([a-zA-Z_]\w*)', bygroups(Keyword.Declaration,
+             Whitespace, Name.Variable)),
+            (words((
+                'actor', 'associatedtype', 'class', 'deinit', 'enum', 'extension', 'func', 'import',
+                'init', 'internal', 'let', 'operator', 'private', 'protocol', 'public',
+                'static', 'struct', 'subscript', 'typealias', 'var'), suffix=r'\b'),
+             Keyword.Declaration)
+        ],
+        'comment': [
+            (r':param: [a-zA-Z_]\w*|:returns?:|(FIXME|MARK|TODO):',
+             Comment.Special)
+        ],
+
+        # Nested
+        'comment-single': [
+            (r'\n', Whitespace, '#pop'),
+            include('comment'),
+            (r'[^\n]+', Comment.Single)
+        ],
+        'comment-multi': [
+            include('comment'),
+            (r'[^*/]+', Comment.Multiline),
+            (r'/\*', Comment.Multiline, '#push'),
+            (r'\*/', Comment.Multiline, '#pop'),
+            (r'[*/]+', Comment.Multiline)
+        ],
+        'module': [
+            (r'\n', Whitespace, '#pop'),
+            (r'[a-zA-Z_]\w*', Name.Class),
+            include('root')
+        ],
+        'preproc': [
+            (r'\n', Whitespace, '#pop'),
+            include('keywords'),
+            (r'[A-Za-z]\w*', Comment.Preproc),
+            include('root')
+        ],
+        'string': [
+            (r'"', String, '#pop'),
+            include("string-common"),
+        ],
+        'string-multi': [
+            (r'"""', String, '#pop'),
+            include("string-common"),
+        ],
+        'string-common': [
+            (r'\\\(', String.Interpol, 'string-intp'),
+            (r"""\\['"\\nrt]|\\x[0-9a-fA-F]{2}|\\[0-7]{1,3}"""
+             r"""|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}""", String.Escape),
+            (r'[^\\"]+', String),
+            (r'\\', String)
+        ],
+        'string-intp': [
+            (r'\(', String.Interpol, '#push'),
+            (r'\)', String.Interpol, '#pop'),
+            include('root')
+        ]
+    }
+
+    def get_tokens_unprocessed(self, text):
+        from pygments.lexers._cocoa_builtins import COCOA_INTERFACES, \
+            COCOA_PROTOCOLS, COCOA_PRIMITIVES
+
+        for index, token, value in \
+                RegexLexer.get_tokens_unprocessed(self, text):
+            if token is Name or token is Name.Class:
+                if value in COCOA_INTERFACES or value in COCOA_PROTOCOLS \
+                   or value in COCOA_PRIMITIVES:
+                    token = Name.Builtin.Pseudo
+
+            yield index, token, value
diff --git a/local_packages/pygments/lexers/ooc.py b/local_packages/pygments/lexers/ooc.py
new file mode 100644
index 0000000..f6aea8c
--- /dev/null
+++ b/local_packages/pygments/lexers/ooc.py
@@ -0,0 +1,84 @@
+"""
+    pygments.lexers.ooc
+    ~~~~~~~~~~~~~~~~~~~
+
+    Lexers for the Ooc language.
+
+    :copyright: Copyright 2006-present by the Pygments team, see AUTHORS.
+    :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, bygroups, words
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+    Number, Punctuation
+
+__all__ = ['OocLexer']
+
+
+class OocLexer(RegexLexer):
+    """
+    For Ooc source code
+    """
+    name = 'Ooc'
+    url = 'https://ooc-lang.github.io/'
+    aliases = ['ooc']
+    filenames = ['*.ooc']
+    mimetypes = ['text/x-ooc']
+    version_added = '1.2'
+
+    tokens = {
+        'root': [
+            (words((
+                'class', 'interface', 'implement', 'abstract', 'extends', 'from',
+                'this', 'super', 'new', 'const', 'final', 'static', 'import',
+                'use', 'extern', 'inline', 'proto', 'break', 'continue',
+                'fallthrough', 'operator', 'if', 'else', 'for', 'while', 'do',
+                'switch', 'case', 'as', 'in', 'version', 'return', 'true',
+                'false', 'null'), prefix=r'\b', suffix=r'\b'),
+             Keyword),
+            (r'include\b', Keyword, 'include'),
+            (r'(cover)([ \t]+)(from)([ \t]+)(\w+[*@]?)',
+             bygroups(Keyword, Text, Keyword, Text, Name.Class)),
+            (r'(func)((?:[ \t]|\\\n)+)(~[a-z_]\w*)',
+             bygroups(Keyword, Text, Name.Function)),
+            (r'\bfunc\b', Keyword),
+            # Note: %= not listed on https://ooc-lang.github.io/docs/lang/operators/
+            (r'//.*', Comment),
+            (r'(?s)/\*.*?\*/', Comment.Multiline),
+            (r'(==?|\+=?|-[=>]?|\*=?|/=?|:=|!=?|%=?|\?|>{1,3}=?|<{1,3}=?|\.\.|'
+             r'&&?|\|\|?|\^=?)', Operator),
+            (r'(\.)([ \t]*)([a-z]\w*)', bygroups(Operator, Text,
+                                                 Name.Function)),
+            (r'[A-Z][A-Z0-9_]+', Name.Constant),
+            (r'[A-Z]\w*([@*]|\[[ \t]*\])?', Name.Class),
+
+            (r'([a-z]\w*(?:~[a-z]\w*)?)((?:[ \t]|\\\n)*)(?=\()',
+             bygroups(Name.Function, Text)),
+            (r'[a-z]\w*', Name.Variable),
+
+            # : introduces types
+            (r'[:(){}\[\];,]', Punctuation),
+
+            (r'0x[0-9a-fA-F]+', Number.Hex),
+            (r'0c[0-9]+', Number.Oct),
+            (r'0b[01]+', Number.Bin),
+            (r'[0-9_]\.[0-9_]*(?!\.)', Number.Float),
+            (r'[0-9_]+', Number.Decimal),
+
+            (r'"(?:\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\"])*"',
+             String.Double),
+            (r"'(?:\\.|\\[0-9]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'",
+             String.Char),
+            (r'@', Punctuation),  # pointer dereference
+            (r'\.', Punctuation),  # imports or chain operator
+
+            (r'\\[ \t\n]', Text),
+            (r'[ \t]+', Text),
+        ],
+        'include': [
+            (r'[\w/]+', Name),
+            (r',', Punctuation),
+            (r'[ \t]', Text),
+            (r'[;\n]', Text, '#pop'),
+        ],
+    }
diff --git a/local_packages/pygments/lexers/openscad.py b/local_packages/pygments/lexers/openscad.py
new file mode 100644
index 0000000..1ec8fea
--- /dev/null
+++ b/local_packages/pygments/lexers/openscad.py
@@ -0,0 +1,96 @@
+"""
+    pygments.lexers.openscad
+    ~~~~~~~~~~~~~~~~~~~~~~~~
+
+    Lexers for the OpenSCAD languages.
+
+    :copyright: Copyright 2006-present by the Pygments team, see AUTHORS.
+    :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, bygroups, words, include
+from pygments.token import Text, Comment, Punctuation, Operator, Keyword, Name, Number, Whitespace, Literal, String
+
+__all__ = ['OpenScadLexer']
+
+
+class OpenScadLexer(RegexLexer):
+    """For openSCAD code.
+    """
+    name = "OpenSCAD"
+    url = "https://openscad.org/"
+    aliases = ["openscad"]
+    filenames = ["*.scad"]
+    mimetypes = ["application/x-openscad"]
+    version_added = '2.16'
+
+    tokens = {
+        "root": [
+            (r"[^\S\n]+", Whitespace),
+            (r'//', Comment.Single, 'comment-single'),
+            (r'/\*', Comment.Multiline, 'comment-multi'),
+            (r"[{}\[\]\(\),;:]", Punctuation),
+            (r"[*!#%\-+=?/]", Operator),
+            (r"<=|<|==|!=|>=|>|&&|\|\|", Operator),
+            (r"\$(f[asn]|t|vp[rtd]|children)", Operator),
+            (r"(undef|PI)\b", Keyword.Constant),
+            (
+                r"(use|include)((?:\s|\\\\s)+)",
+                bygroups(Keyword.Namespace, Text),
+                "includes",
+            ),
+            (r"(module)(\s*)([^\s\(]+)",
+             bygroups(Keyword.Namespace, Whitespace, Name.Namespace)),
+            (r"(function)(\s*)([^\s\(]+)",
+             bygroups(Keyword.Declaration, Whitespace, Name.Function)),
+            (words(("true", "false"), prefix=r"\b", suffix=r"\b"), Literal),
+            (words((
+                "function", "module", "include", "use", "for",
+                "intersection_for", "if", "else", "return"
+                ), prefix=r"\b", suffix=r"\b"), Keyword
+            ),
+            (words((
+                "circle", "square", "polygon", "text", "sphere", "cube",
+                "cylinder", "polyhedron", "translate", "rotate", "scale",
+                "resize", "mirror", "multmatrix", "color", "offset", "hull",
+                "minkowski", "union", "difference", "intersection", "abs",
+                "sign", "sin", "cos", "tan", "acos", "asin", "atan", "atan2",
+                "floor", "round", "ceil", "ln", "log", "pow", "sqrt", "exp",
+                "rands", "min", "max", "concat", "lookup", "str", "chr",
+                "search", "version", "version_num", "norm", "cross",
+                "parent_module", "echo", "import", "import_dxf",
+                "dxf_linear_extrude", "linear_extrude", "rotate_extrude",
+                "surface", "projection", "render", "dxf_cross",
+                "dxf_dim", "let", "assign", "len"
+                ), prefix=r"\b", suffix=r"\b"),
+                Name.Builtin
+            ),
+            (r"\bchildren\b", Name.Builtin.Pseudo),
+            (r'""".*?"""', String.Double),
+            (r'"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
+            (r"-?\d+(\.\d+)?(e[+-]?\d+)?", Number),
+            (r"\w+", Name),
+        ],
+        "includes": [
+            (
+                r"(<)([^>]*)(>)",
+                bygroups(Punctuation, Comment.PreprocFile, Punctuation),
+            ),
+        ],
+        'comment': [
+            (r':param: [a-zA-Z_]\w*|:returns?:|(FIXME|MARK|TODO):',
+             Comment.Special)
+        ],
+        'comment-single': [
+            (r'\n', Text, '#pop'),
+            include('comment'),
+            (r'[^\n]+', Comment.Single)
+        ],
+        'comment-multi': [
+            include('comment'),
+            (r'[^*/]+', Comment.Multiline),
+            (r'/\*', Comment.Multiline, '#push'),
+            (r'\*/', Comment.Multiline, '#pop'),
+            (r'[*/]', Comment.Multiline)
+        ],
+    }
diff --git a/local_packages/pygments/lexers/other.py b/local_packages/pygments/lexers/other.py
new file mode 100644
index 0000000..8767d73
--- /dev/null
+++ b/local_packages/pygments/lexers/other.py
@@ -0,0 +1,41 @@
+"""
+    pygments.lexers.other
+    ~~~~~~~~~~~~~~~~~~~~~
+
+    Just export lexer classes previously contained in this module.
+
+    :copyright: Copyright 2006-present by the Pygments team, see AUTHORS.
+    :license: BSD, see LICENSE for details.
+"""
+
+# ruff: noqa: F401
+from pygments.lexers.sql import SqlLexer, MySqlLexer, SqliteConsoleLexer
+from pygments.lexers.shell import BashLexer, BashSessionLexer, BatchLexer, \
+    TcshLexer
+from pygments.lexers.robotframework import RobotFrameworkLexer
+from pygments.lexers.testing import GherkinLexer
+from pygments.lexers.esoteric import BrainfuckLexer, BefungeLexer, RedcodeLexer
+from pygments.lexers.prolog import LogtalkLexer
+from pygments.lexers.snobol import SnobolLexer
+from pygments.lexers.rebol import RebolLexer
+from pygments.lexers.configs import KconfigLexer, Cfengine3Lexer
+from pygments.lexers.modeling import ModelicaLexer
+from pygments.lexers.scripting import AppleScriptLexer, MOOCodeLexer, \
+    HybrisLexer
+from pygments.lexers.graphics import PostScriptLexer, GnuplotLexer, \
+    AsymptoteLexer, PovrayLexer
+from pygments.lexers.business import ABAPLexer, OpenEdgeLexer, \
+    GoodDataCLLexer, MaqlLexer
+from pygments.lexers.automation import AutoItLexer, AutohotkeyLexer
+from pygments.lexers.dsls import ProtoBufLexer, BroLexer, PuppetLexer, \
+    MscgenLexer, VGLLexer
+from pygments.lexers.basic import CbmBasicV2Lexer
+from pygments.lexers.pawn import SourcePawnLexer, PawnLexer
+from pygments.lexers.ecl import ECLLexer
+from pygments.lexers.urbi import UrbiscriptLexer
+from pygments.lexers.smalltalk import SmalltalkLexer, NewspeakLexer
+from pygments.lexers.installers import NSISLexer, RPMSpecLexer
+from pygments.lexers.textedit import AwkLexer
+from pygments.lexers.smv import NuSMVLexer
+
+__all__ = []
diff --git a/local_packages/pygments/lexers/parasail.py b/local_packages/pygments/lexers/parasail.py
new file mode 100644
index 0000000..9b40960
--- /dev/null
+++ b/local_packages/pygments/lexers/parasail.py
@@ -0,0 +1,78 @@
+"""
+    pygments.lexers.parasail
+    ~~~~~~~~~~~~~~~~~~~~~~~~
+
+    Lexer for ParaSail.
+
+    :copyright: Copyright 2006-present by the Pygments team, see AUTHORS.
+    :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import RegexLexer, include
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+    Number, Punctuation, Literal
+
+__all__ = ['ParaSailLexer']
+
+
+class ParaSailLexer(RegexLexer):
+    """
+    For ParaSail source code.
+    """
+
+    name = 'ParaSail'
+    url = 'http://www.parasail-lang.org'
+    aliases = ['parasail']
+    filenames = ['*.psi', '*.psl']
+    mimetypes = ['text/x-parasail']
+    version_added = '2.1'
+
+    flags = re.MULTILINE
+
+    tokens = {
+        'root': [
+            (r'[^\S\n]+', Text),
+            (r'//.*?\n', Comment.Single),
+            (r'\b(and|or|xor)=', Operator.Word),
+            (r'\b(and(\s+then)?|or(\s+else)?|xor|rem|mod|'
+             r'(is|not)\s+null)\b',
+             Operator.Word),
+            # Keywords
+            (r'\b(abs|abstract|all|block|class|concurrent|const|continue|'
+             r'each|end|exit|extends|exports|forward|func|global|implements|'
+             r'import|in|interface|is|lambda|locked|new|not|null|of|op|'
+             r'optional|private|queued|ref|return|reverse|separate|some|'
+             r'type|until|var|with|'
+             # Control flow
+             r'if|then|else|elsif|case|for|while|loop)\b',
+             Keyword.Reserved),
+            (r'(abstract\s+)?(interface|class|op|func|type)',
+             Keyword.Declaration),
+            # Literals
+            (r'"[^"]*"', String),
+            (r'\\[\'ntrf"0]', String.Escape),
+            (r'#[a-zA-Z]\w*', Literal),       # Enumeration
+            include('numbers'),
+            (r"'[^']'", String.Char),
+            (r'[a-zA-Z]\w*', Name),
+            # Operators and Punctuation
+            (r'(<==|==>|<=>|\*\*=|<\|=|<<=|>>=|==|!=|=\?|<=|>=|'
+             r'\*\*|<<|>>|=>|:=|\+=|-=|\*=|\|=|\||/=|\+|-|\*|/|'
+             r'\.\.|<\.\.|\.\.<|<\.\.<)',
+             Operator),
+            (r'(<|>|\[|\]|\(|\)|\||:|;|,|.|\{|\}|->)',
+             Punctuation),
+            (r'\n+', Text),
+        ],
+        'numbers': [
+            (r'\d[0-9_]*#[0-9a-fA-F][0-9a-fA-F_]*#', Number.Hex),  # any base
+            (r'0[xX][0-9a-fA-F][0-9a-fA-F_]*', Number.Hex),        # C-like hex
+            (r'0[bB][01][01_]*', Number.Bin),                      # C-like bin
+            (r'\d[0-9_]*\.\d[0-9_]*[eE][+-]\d[0-9_]*',             # float exp
+             Number.Float),
+            (r'\d[0-9_]*\.\d[0-9_]*', Number.Float),               # float
+            (r'\d[0-9_]*', Number.Integer),                        # integer
+        ],
+    }
diff --git a/local_packages/pygments/lexers/parsers.py b/local_packages/pygments/lexers/parsers.py
new file mode 100644
index 0000000..baa49b0
--- /dev/null
+++ b/local_packages/pygments/lexers/parsers.py
@@ -0,0 +1,798 @@
+"""
+    pygments.lexers.parsers
+    ~~~~~~~~~~~~~~~~~~~~~~~
+
+    Lexers for parser generators.
+
+    :copyright: Copyright 2006-present by the Pygments team, see AUTHORS.
+    :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import RegexLexer, DelegatingLexer, \
+    include, bygroups, using
+from pygments.token import Punctuation, Other, Text, Comment, Operator, \
+    Keyword, Name, String, Number, Whitespace
+from pygments.lexers.jvm import JavaLexer
+from pygments.lexers.c_cpp import CLexer, CppLexer
+from pygments.lexers.objective import ObjectiveCLexer
+from pygments.lexers.d import DLexer
+from pygments.lexers.dotnet import CSharpLexer
+from pygments.lexers.ruby import RubyLexer
+from pygments.lexers.python import PythonLexer
+from pygments.lexers.perl import PerlLexer
+
+__all__ = ['RagelLexer', 'RagelEmbeddedLexer', 'RagelCLexer', 'RagelDLexer',
+           'RagelCppLexer', 'RagelObjectiveCLexer', 'RagelRubyLexer',
+           'RagelJavaLexer', 'AntlrLexer', 'AntlrPythonLexer',
+           'AntlrPerlLexer', 'AntlrRubyLexer', 'AntlrCppLexer',
+           'AntlrCSharpLexer', 'AntlrObjectiveCLexer',
+           'AntlrJavaLexer', 'AntlrActionScriptLexer',
+           'TreetopLexer', 'EbnfLexer']
+
+
+class RagelLexer(RegexLexer):
+    """A pure `Ragel `_ lexer.
+
+    Use this for fragments of Ragel.  For ``.rl`` files, use
+    :class:`RagelEmbeddedLexer` instead (or one of the language-specific
+    subclasses).
+    """
+
+    name = 'Ragel'
+    url = 'http://www.colm.net/open-source/ragel/'
+    aliases = ['ragel']
+    filenames = []
+    version_added = '1.1'
+
+    tokens = {
+        'whitespace': [
+            (r'\s+', Whitespace)
+        ],
+        'comments': [
+            (r'\#.*$', Comment),
+        ],
+        'keywords': [
+            (r'(access|action|alphtype)\b', Keyword),
+            (r'(getkey|write|machine|include)\b', Keyword),
+            (r'(any|ascii|extend|alpha|digit|alnum|lower|upper)\b', Keyword),
+            (r'(xdigit|cntrl|graph|print|punct|space|zlen|empty)\b', Keyword)
+        ],
+        'numbers': [
+            (r'0x[0-9A-Fa-f]+', Number.Hex),
+            (r'[+-]?[0-9]+', Number.Integer),
+        ],
+        'literals': [
+            (r'"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
+            (r"'(\\\\|\\[^\\]|[^'\\])*'", String.Single),
+            (r'\[(\\\\|\\[^\\]|[^\\\]])*\]', String),          # square bracket literals
+            (r'/(?!\*)(\\\\|\\[^\\]|[^/\\])*/', String.Regex),  # regular expressions
+        ],
+        'identifiers': [
+            (r'[a-zA-Z_]\w*', Name.Variable),
+        ],
+        'operators': [
+            (r',', Operator),                           # Join
+            (r'\||&|--?', Operator),                    # Union, Intersection and Subtraction
+            (r'\.|<:|:>>?', Operator),                  # Concatention
+            (r':', Operator),                           # Label
+            (r'->', Operator),                          # Epsilon Transition
+            (r'(>|\$|%|<|@|<>)(/|eof\b)', Operator),    # EOF Actions
+            (r'(>|\$|%|<|@|<>)(!|err\b)', Operator),    # Global Error Actions
+            (r'(>|\$|%|<|@|<>)(\^|lerr\b)', Operator),  # Local Error Actions
+            (r'(>|\$|%|<|@|<>)(~|to\b)', Operator),     # To-State Actions
+            (r'(>|\$|%|<|@|<>)(\*|from\b)', Operator),  # From-State Actions
+            (r'>|@|\$|%', Operator),                    # Transition Actions and Priorities
+            (r'\*|\?|\+|\{[0-9]*,[0-9]*\}', Operator),  # Repetition
+            (r'!|\^', Operator),                        # Negation
+            (r'\(|\)', Operator),                       # Grouping
+        ],
+        'root': [
+            include('literals'),
+            include('whitespace'),
+            include('comments'),
+            include('keywords'),
+            include('numbers'),
+            include('identifiers'),
+            include('operators'),
+            (r'\{', Punctuation, 'host'),
+            (r'=', Operator),
+            (r';', Punctuation),
+        ],
+        'host': [
+            (r'(' + r'|'.join((  # keep host code in largest possible chunks
+                r'[^{}\'"/#]+',  # exclude unsafe characters
+                r'[^\\]\\[{}]',  # allow escaped { or }
+
+                # strings and comments may safely contain unsafe characters
+                r'"(\\\\|\\[^\\]|[^"\\])*"',
+                r"'(\\\\|\\[^\\]|[^'\\])*'",
+                r'//.*$\n?',            # single line comment
+                r'/\*(.|\n)*?\*/',      # multi-line javadoc-style comment
+                r'\#.*$\n?',            # ruby comment
+
+                # regular expression: There's no reason for it to start
+                # with a * and this stops confusion with comments.
+                r'/(?!\*)(\\\\|\\[^\\]|[^/\\])*/',
+
+                # / is safe now that we've handled regex and javadoc comments
+                r'/',
+            )) + r')+', Other),
+
+            (r'\{', Punctuation, '#push'),
+            (r'\}', Punctuation, '#pop'),
+        ],
+    }
+
+
+class RagelEmbeddedLexer(RegexLexer):
+    """
+    A lexer for Ragel embedded in a host language file.
+
+    This will only highlight Ragel statements. If you want host language
+    highlighting then call the language-specific Ragel lexer.
+    """
+
+    name = 'Embedded Ragel'
+    aliases = ['ragel-em']
+    filenames = ['*.rl']
+    url = 'http://www.colm.net/open-source/ragel/'
+    version_added = '1.1'
+
+    tokens = {
+        'root': [
+            (r'(' + r'|'.join((   # keep host code in largest possible chunks
+                r'[^%\'"/#]+',    # exclude unsafe characters
+                r'%(?=[^%]|$)',   # a single % sign is okay, just not 2 of them
+
+                # strings and comments may safely contain unsafe characters
+                r'"(\\\\|\\[^\\]|[^"\\])*"',
+                r"'(\\\\|\\[^\\]|[^'\\])*'",
+                r'/\*(.|\n)*?\*/',      # multi-line javadoc-style comment
+                r'//.*$\n?',  # single line comment
+                r'\#.*$\n?',  # ruby/ragel comment
+                r'/(?!\*)(\\\\|\\[^\\]|[^/\\])*/',  # regular expression
+
+                # / is safe now that we've handled regex and javadoc comments
+                r'/',
+            )) + r')+', Other),
+
+            # Single Line FSM.
+            # Please don't put a quoted newline in a single line FSM.
+            # That's just mean. It will break this.
+            (r'(%%)(?![{%])(.*)($|;)(\n?)', bygroups(Punctuation,
+                                                     using(RagelLexer),
+                                                     Punctuation, Text)),
+
+            # Multi Line FSM.
+            (r'(%%%%|%%)\{', Punctuation, 'multi-line-fsm'),
+        ],
+        'multi-line-fsm': [
+            (r'(' + r'|'.join((  # keep ragel code in largest possible chunks.
+                r'(' + r'|'.join((
+                    r'[^}\'"\[/#]',   # exclude unsafe characters
+                    r'\}(?=[^%]|$)',   # } is okay as long as it's not followed by %
+                    r'\}%(?=[^%]|$)',  # ...well, one %'s okay, just not two...
+                    r'[^\\]\\[{}]',   # ...and } is okay if it's escaped
+
+                    # allow / if it's preceded with one of these symbols
+                    # (ragel EOF actions)
+                    r'(>|\$|%|<|@|<>)/',
+
+                    # specifically allow regex followed immediately by *
+                    # so it doesn't get mistaken for a comment
+                    r'/(?!\*)(\\\\|\\[^\\]|[^/\\])*/\*',
+
+                    # allow / as long as it's not followed by another / or by a *
+                    r'/(?=[^/*]|$)',
+
+                    # We want to match as many of these as we can in one block.
+                    # Not sure if we need the + sign here,
+                    # does it help performance?
+                )) + r')+',
+
+                # strings and comments may safely contain unsafe characters
+                r'"(\\\\|\\[^\\]|[^"\\])*"',
+                r"'(\\\\|\\[^\\]|[^'\\])*'",
+                r"\[(\\\\|\\[^\\]|[^\]\\])*\]",  # square bracket literal
+                r'/\*(.|\n)*?\*/',          # multi-line javadoc-style comment
+                r'//.*$\n?',                # single line comment
+                r'\#.*$\n?',                # ruby/ragel comment
+            )) + r')+', using(RagelLexer)),
+
+            (r'\}%%', Punctuation, '#pop'),
+        ]
+    }
+
+    def analyse_text(text):
+        return '@LANG: indep' in text
+
+
+class RagelRubyLexer(DelegatingLexer):
+    """
+    A lexer for Ragel in a Ruby host file.
+    """
+
+    name = 'Ragel in Ruby Host'
+    aliases = ['ragel-ruby', 'ragel-rb']
+    filenames = ['*.rl']
+    url = 'http://www.colm.net/open-source/ragel/'
+    version_added = '1.1'
+
+    def __init__(self, **options):
+        super().__init__(RubyLexer, RagelEmbeddedLexer, **options)
+
+    def analyse_text(text):
+        return '@LANG: ruby' in text
+
+
+class RagelCLexer(DelegatingLexer):
+    """
+    A lexer for Ragel in a C host file.
+    """
+
+    name = 'Ragel in C Host'
+    aliases = ['ragel-c']
+    filenames = ['*.rl']
+    url = 'http://www.colm.net/open-source/ragel/'
+    version_added = '1.1'
+
+    def __init__(self, **options):
+        super().__init__(CLexer, RagelEmbeddedLexer, **options)
+
+    def analyse_text(text):
+        return '@LANG: c' in text
+
+
+class RagelDLexer(DelegatingLexer):
+    """
+    A lexer for Ragel in a D host file.
+    """
+
+    name = 'Ragel in D Host'
+    aliases = ['ragel-d']
+    filenames = ['*.rl']
+    url = 'http://www.colm.net/open-source/ragel/'
+    version_added = '1.1'
+
+    def __init__(self, **options):
+        super().__init__(DLexer, RagelEmbeddedLexer, **options)
+
+    def analyse_text(text):
+        return '@LANG: d' in text
+
+
+class RagelCppLexer(DelegatingLexer):
+    """
+    A lexer for Ragel in a C++ host file.
+    """
+
+    name = 'Ragel in CPP Host'
+    aliases = ['ragel-cpp']
+    filenames = ['*.rl']
+    url = 'http://www.colm.net/open-source/ragel/'
+    version_added = '1.1'
+
+    def __init__(self, **options):
+        super().__init__(CppLexer, RagelEmbeddedLexer, **options)
+
+    def analyse_text(text):
+        return '@LANG: c++' in text
+
+
+class RagelObjectiveCLexer(DelegatingLexer):
+    """
+    A lexer for Ragel in an Objective C host file.
+    """
+
+    name = 'Ragel in Objective C Host'
+    aliases = ['ragel-objc']
+    filenames = ['*.rl']
+    url = 'http://www.colm.net/open-source/ragel/'
+    version_added = '1.1'
+
+    def __init__(self, **options):
+        super().__init__(ObjectiveCLexer, RagelEmbeddedLexer, **options)
+
+    def analyse_text(text):
+        return '@LANG: objc' in text
+
+
+class RagelJavaLexer(DelegatingLexer):
+    """
+    A lexer for Ragel in a Java host file.
+    """
+
+    name = 'Ragel in Java Host'
+    aliases = ['ragel-java']
+    filenames = ['*.rl']
+    url = 'http://www.colm.net/open-source/ragel/'
+    version_added = '1.1'
+
+    def __init__(self, **options):
+        super().__init__(JavaLexer, RagelEmbeddedLexer, **options)
+
+    def analyse_text(text):
+        return '@LANG: java' in text
+
+
+class AntlrLexer(RegexLexer):
+    """
+    Generic ANTLR Lexer.
+    Should not be called directly, instead
+    use DelegatingLexer for your target language.
+    """
+
+    name = 'ANTLR'
+    aliases = ['antlr']
+    filenames = []
+    url = 'https://www.antlr.org'
+    version_added = '1.1'
+
+    _id = r'[A-Za-z]\w*'
+    _TOKEN_REF = r'[A-Z]\w*'
+    _RULE_REF = r'[a-z]\w*'
+    _STRING_LITERAL = r'\'(?:\\\\|\\\'|[^\']*)\''
+    _INT = r'[0-9]+'
+
+    tokens = {
+        'whitespace': [
+            (r'\s+', Whitespace),
+        ],
+        'comments': [
+            (r'//.*$', Comment),
+            (r'/\*(.|\n)*?\*/', Comment),
+        ],
+        'root': [
+            include('whitespace'),
+            include('comments'),
+
+            (r'(lexer|parser|tree)?(\s*)(grammar\b)(\s*)(' + _id + ')(;)',
+             bygroups(Keyword, Whitespace, Keyword, Whitespace, Name.Class,
+                      Punctuation)),
+            # optionsSpec
+            (r'options\b', Keyword, 'options'),
+            # tokensSpec
+            (r'tokens\b', Keyword, 'tokens'),
+            # attrScope
+            (r'(scope)(\s*)(' + _id + r')(\s*)(\{)',
+             bygroups(Keyword, Whitespace, Name.Variable, Whitespace,
+                      Punctuation), 'action'),
+            # exception
+            (r'(catch|finally)\b', Keyword, 'exception'),
+            # action
+            (r'(@' + _id + r')(\s*)(::)?(\s*)(' + _id + r')(\s*)(\{)',
+             bygroups(Name.Label, Whitespace, Punctuation, Whitespace,
+                      Name.Label, Whitespace, Punctuation), 'action'),
+            # rule
+            (r'((?:protected|private|public|fragment)\b)?(\s*)(' + _id + ')(!)?',
+             bygroups(Keyword, Whitespace, Name.Label, Punctuation),
+             ('rule-alts', 'rule-prelims')),
+        ],
+        'exception': [
+            (r'\n', Whitespace, '#pop'),
+            (r'\s', Whitespace),
+            include('comments'),
+
+            (r'\[', Punctuation, 'nested-arg-action'),
+            (r'\{', Punctuation, 'action'),
+        ],
+        'rule-prelims': [
+            include('whitespace'),
+            include('comments'),
+
+            (r'returns\b', Keyword),
+            (r'\[', Punctuation, 'nested-arg-action'),
+            (r'\{', Punctuation, 'action'),
+            # throwsSpec
+            (r'(throws)(\s+)(' + _id + ')',
+             bygroups(Keyword, Whitespace, Name.Label)),
+            (r'(,)(\s*)(' + _id + ')',
+             bygroups(Punctuation, Whitespace, Name.Label)),  # Additional throws
+            # optionsSpec
+            (r'options\b', Keyword, 'options'),
+            # ruleScopeSpec - scope followed by target language code or name of action
+            # TODO finish implementing other possibilities for scope
+            # L173 ANTLRv3.g from ANTLR book
+            (r'(scope)(\s+)(\{)', bygroups(Keyword, Whitespace, Punctuation),
+             'action'),
+            (r'(scope)(\s+)(' + _id + r')(\s*)(;)',
+             bygroups(Keyword, Whitespace, Name.Label, Whitespace, Punctuation)),
+            # ruleAction
+            (r'(@' + _id + r')(\s*)(\{)',
+             bygroups(Name.Label, Whitespace, Punctuation), 'action'),
+            # finished prelims, go to rule alts!
+            (r':', Punctuation, '#pop')
+        ],
+        'rule-alts': [
+            include('whitespace'),
+            include('comments'),
+
+            # These might need to go in a separate 'block' state triggered by (
+            (r'options\b', Keyword, 'options'),
+            (r':', Punctuation),
+
+            # literals
+            (r'"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
+            (r"'(\\\\|\\[^\\]|[^'\\])*'", String.Single),
+            (r'<<([^>]|>[^>])>>', String),
+            # identifiers
+            # Tokens start with capital letter.
+            (r'\$?[A-Z_]\w*', Name.Constant),
+            # Rules start with small letter.
+            (r'\$?[a-z_]\w*', Name.Variable),
+            # operators
+            (r'(\+|\||->|=>|=|\(|\)|\.\.|\.|\?|\*|\^|!|\#|~)', Operator),
+            (r',', Punctuation),
+            (r'\[', Punctuation, 'nested-arg-action'),
+            (r'\{', Punctuation, 'action'),
+            (r';', Punctuation, '#pop')
+        ],
+        'tokens': [
+            include('whitespace'),
+            include('comments'),
+            (r'\{', Punctuation),
+            (r'(' + _TOKEN_REF + r')(\s*)(=)?(\s*)(' + _STRING_LITERAL
+             + r')?(\s*)(;)',
+             bygroups(Name.Label, Whitespace, Punctuation, Whitespace,
+                      String, Whitespace, Punctuation)),
+            (r'\}', Punctuation, '#pop'),
+        ],
+        'options': [
+            include('whitespace'),
+            include('comments'),
+            (r'\{', Punctuation),
+            (r'(' + _id + r')(\s*)(=)(\s*)(' +
+             '|'.join((_id, _STRING_LITERAL, _INT, r'\*')) + r')(\s*)(;)',
+             bygroups(Name.Variable, Whitespace, Punctuation, Whitespace,
+                      Text, Whitespace, Punctuation)),
+            (r'\}', Punctuation, '#pop'),
+        ],
+        'action': [
+            (r'(' + r'|'.join((    # keep host code in largest possible chunks
+                r'[^${}\'"/\\]+',  # exclude unsafe characters
+
+                # strings and comments may safely contain unsafe characters
+                r'"(\\\\|\\[^\\]|[^"\\])*"',
+                r"'(\\\\|\\[^\\]|[^'\\])*'",
+                r'//.*$\n?',            # single line comment
+                r'/\*(.|\n)*?\*/',      # multi-line javadoc-style comment
+
+                # regular expression: There's no reason for it to start
+                # with a * and this stops confusion with comments.
+                r'/(?!\*)(\\\\|\\[^\\]|[^/\\])*/',
+
+                # backslashes are okay, as long as we are not backslashing a %
+                r'\\(?!%)',
+
+                # Now that we've handled regex and javadoc comments
+                # it's safe to let / through.
+                r'/',
+            )) + r')+', Other),
+            (r'(\\)(%)', bygroups(Punctuation, Other)),
+            (r'(\$[a-zA-Z]+)(\.?)(text|value)?',
+             bygroups(Name.Variable, Punctuation, Name.Property)),
+            (r'\{', Punctuation, '#push'),
+            (r'\}', Punctuation, '#pop'),
+        ],
+        'nested-arg-action': [
+            (r'(' + r'|'.join((    # keep host code in largest possible chunks.
+                r'[^$\[\]\'"/]+',  # exclude unsafe characters
+
+                # strings and comments may safely contain unsafe characters
+                r'"(\\\\|\\[^\\]|[^"\\])*"',
+                r"'(\\\\|\\[^\\]|[^'\\])*'",
+                r'//.*$\n?',            # single line comment
+                r'/\*(.|\n)*?\*/',      # multi-line javadoc-style comment
+
+                # regular expression: There's no reason for it to start
+                # with a * and this stops confusion with comments.
+                r'/(?!\*)(\\\\|\\[^\\]|[^/\\])*/',
+
+                # Now that we've handled regex and javadoc comments
+                # it's safe to let / through.
+                r'/',
+            )) + r')+', Other),
+
+
+            (r'\[', Punctuation, '#push'),
+            (r'\]', Punctuation, '#pop'),
+            (r'(\$[a-zA-Z]+)(\.?)(text|value)?',
+             bygroups(Name.Variable, Punctuation, Name.Property)),
+            (r'(\\\\|\\\]|\\\[|[^\[\]])+', Other),
+        ]
+    }
+
+    def analyse_text(text):
+        return re.search(r'^\s*grammar\s+[a-zA-Z0-9]+\s*;', text, re.M)
+
+
+# http://www.antlr.org/wiki/display/ANTLR3/Code+Generation+Targets
+
+class AntlrCppLexer(DelegatingLexer):
+    """
+    ANTLR with C++ Target
+    """
+
+    name = 'ANTLR With CPP Target'
+    aliases = ['antlr-cpp']
+    filenames = ['*.G', '*.g']
+    url = 'https://www.antlr.org'
+    version_added = '1.1'
+
+    def __init__(self, **options):
+        super().__init__(CppLexer, AntlrLexer, **options)
+
+    def analyse_text(text):
+        return AntlrLexer.analyse_text(text) and \
+            re.search(r'^\s*language\s*=\s*C\s*;', text, re.M)
+
+
+class AntlrObjectiveCLexer(DelegatingLexer):
+    """
+    ANTLR with Objective-C Target
+    """
+
+    name = 'ANTLR With ObjectiveC Target'
+    aliases = ['antlr-objc']
+    filenames = ['*.G', '*.g']
+    url = 'https://www.antlr.org'
+    version_added = '1.1'
+
+    def __init__(self, **options):
+        super().__init__(ObjectiveCLexer, AntlrLexer, **options)
+
+    def analyse_text(text):
+        return AntlrLexer.analyse_text(text) and \
+            re.search(r'^\s*language\s*=\s*ObjC\s*;', text)
+
+
+class AntlrCSharpLexer(DelegatingLexer):
+    """
+    ANTLR with C# Target
+    """
+
+    name = 'ANTLR With C# Target'
+    aliases = ['antlr-csharp', 'antlr-c#']
+    filenames = ['*.G', '*.g']
+    url = 'https://www.antlr.org'
+    version_added = '1.1'
+
+    def __init__(self, **options):
+        super().__init__(CSharpLexer, AntlrLexer, **options)
+
+    def analyse_text(text):
+        return AntlrLexer.analyse_text(text) and \
+            re.search(r'^\s*language\s*=\s*CSharp2\s*;', text, re.M)
+
+
+class AntlrPythonLexer(DelegatingLexer):
+    """
+    ANTLR with Python Target
+    """
+
+    name = 'ANTLR With Python Target'
+    aliases = ['antlr-python']
+    filenames = ['*.G', '*.g']
+    url = 'https://www.antlr.org'
+    version_added = '1.1'
+
+    def __init__(self, **options):
+        super().__init__(PythonLexer, AntlrLexer, **options)
+
+    def analyse_text(text):
+        return AntlrLexer.analyse_text(text) and \
+            re.search(r'^\s*language\s*=\s*Python\s*;', text, re.M)
+
+
+class AntlrJavaLexer(DelegatingLexer):
+    """
+    ANTLR with Java Target
+    """
+
+    name = 'ANTLR With Java Target'
+    aliases = ['antlr-java']
+    filenames = ['*.G', '*.g']
+    url = 'https://www.antlr.org'
+    version_added = '1.1'
+
+    def __init__(self, **options):
+        super().__init__(JavaLexer, AntlrLexer, **options)
+
+    def analyse_text(text):
+        # Antlr language is Java by default
+        return AntlrLexer.analyse_text(text) and 0.9
+
+
+class AntlrRubyLexer(DelegatingLexer):
+    """
+    ANTLR with Ruby Target
+    """
+
+    name = 'ANTLR With Ruby Target'
+    aliases = ['antlr-ruby', 'antlr-rb']
+    filenames = ['*.G', '*.g']
+    url = 'https://www.antlr.org'
+    version_added = '1.1'
+
+    def __init__(self, **options):
+        super().__init__(RubyLexer, AntlrLexer, **options)
+
+    def analyse_text(text):
+        return AntlrLexer.analyse_text(text) and \
+            re.search(r'^\s*language\s*=\s*Ruby\s*;', text, re.M)
+
+
+class AntlrPerlLexer(DelegatingLexer):
+    """
+    ANTLR with Perl Target
+    """
+
+    name = 'ANTLR With Perl Target'
+    aliases = ['antlr-perl']
+    filenames = ['*.G', '*.g']
+    url = 'https://www.antlr.org'
+    version_added = '1.1'
+
+    def __init__(self, **options):
+        super().__init__(PerlLexer, AntlrLexer, **options)
+
+    def analyse_text(text):
+        return AntlrLexer.analyse_text(text) and \
+            re.search(r'^\s*language\s*=\s*Perl5\s*;', text, re.M)
+
+
+class AntlrActionScriptLexer(DelegatingLexer):
+    """
+    ANTLR with ActionScript Target
+    """
+
+    name = 'ANTLR With ActionScript Target'
+    aliases = ['antlr-actionscript', 'antlr-as']
+    filenames = ['*.G', '*.g']
+    url = 'https://www.antlr.org'
+    version_added = '1.1'
+
+    def __init__(self, **options):
+        from pygments.lexers.actionscript import ActionScriptLexer
+        super().__init__(ActionScriptLexer, AntlrLexer, **options)
+
+    def analyse_text(text):
+        return AntlrLexer.analyse_text(text) and \
+            re.search(r'^\s*language\s*=\s*ActionScript\s*;', text, re.M)
+
+
+class TreetopBaseLexer(RegexLexer):
+    """
+    A base lexer for `Treetop `_ grammars.
+    Not for direct use; use :class:`TreetopLexer` instead.
+
+    .. versionadded:: 1.6
+    """
+
+    tokens = {
+        'root': [
+            include('space'),
+            (r'require[ \t]+[^\n\r]+[\n\r]', Other),
+            (r'module\b', Keyword.Namespace, 'module'),
+            (r'grammar\b', Keyword, 'grammar'),
+        ],
+        'module': [
+            include('space'),
+            include('end'),
+            (r'module\b', Keyword, '#push'),
+            (r'grammar\b', Keyword, 'grammar'),
+            (r'[A-Z]\w*(?:::[A-Z]\w*)*', Name.Namespace),
+        ],
+        'grammar': [
+            include('space'),
+            include('end'),
+            (r'rule\b', Keyword, 'rule'),
+            (r'include\b', Keyword, 'include'),
+            (r'[A-Z]\w*', Name),
+        ],
+        'include': [
+            include('space'),
+            (r'[A-Z]\w*(?:::[A-Z]\w*)*', Name.Class, '#pop'),
+        ],
+        'rule': [
+            include('space'),
+            include('end'),
+            (r'"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
+            (r"'(\\\\|\\[^\\]|[^'\\])*'", String.Single),
+            (r'([A-Za-z_]\w*)(:)', bygroups(Name.Label, Punctuation)),
+            (r'[A-Za-z_]\w*', Name),
+            (r'[()]', Punctuation),
+            (r'[?+*/&!~]', Operator),
+            (r'\[(?:\\.|\[:\^?[a-z]+:\]|[^\\\]])+\]', String.Regex),
+            (r'([0-9]*)(\.\.)([0-9]*)',
+             bygroups(Number.Integer, Operator, Number.Integer)),
+            (r'(<)([^>]+)(>)', bygroups(Punctuation, Name.Class, Punctuation)),
+            (r'\{', Punctuation, 'inline_module'),
+            (r'\.', String.Regex),
+        ],
+        'inline_module': [
+            (r'\{', Other, 'ruby'),
+            (r'\}', Punctuation, '#pop'),
+            (r'[^{}]+', Other),
+        ],
+        'ruby': [
+            (r'\{', Other, '#push'),
+            (r'\}', Other, '#pop'),
+            (r'[^{}]+', Other),
+        ],
+        'space': [
+            (r'[ \t\n\r]+', Whitespace),
+            (r'#[^\n]*', Comment.Single),
+        ],
+        'end': [
+            (r'end\b', Keyword, '#pop'),
+        ],
+    }
+
+
+class TreetopLexer(DelegatingLexer):
+    """
+    A lexer for Treetop grammars.
+    """
+
+    name = 'Treetop'
+    aliases = ['treetop']
+    filenames = ['*.treetop', '*.tt']
+    url = 'https://cjheath.github.io/treetop'
+    version_added = '1.6'
+
+    def __init__(self, **options):
+        super().__init__(RubyLexer, TreetopBaseLexer, **options)
+
+
+class EbnfLexer(RegexLexer):
+    """
+    Lexer for `ISO/IEC 14977 EBNF
+    `_
+    grammars.
+    """
+
+    name = 'EBNF'
+    aliases = ['ebnf']
+    filenames = ['*.ebnf']
+    mimetypes = ['text/x-ebnf']
+    url = 'https://en.wikipedia.org/wiki/Extended_Backus%E2%80%93Naur_Form'
+    version_added = '2.0'
+
+    tokens = {
+        'root': [
+            include('whitespace'),
+            include('comment_start'),
+            include('identifier'),
+            (r'=', Operator, 'production'),
+        ],
+        'production': [
+            include('whitespace'),
+            include('comment_start'),
+            include('identifier'),
+            (r'"[^"]*"', String.Double),
+            (r"'[^']*'", String.Single),
+            (r'(\?[^?]*\?)', Name.Entity),
+            (r'[\[\]{}(),|]', Punctuation),
+            (r'-', Operator),
+            (r';', Punctuation, '#pop'),
+            (r'\.', Punctuation, '#pop'),
+        ],
+        'whitespace': [
+            (r'\s+', Text),
+        ],
+        'comment_start': [
+            (r'\(\*', Comment.Multiline, 'comment'),
+        ],
+        'comment': [
+            (r'[^*)]', Comment.Multiline),
+            include('comment_start'),
+            (r'\*\)', Comment.Multiline, '#pop'),
+            (r'[*)]', Comment.Multiline),
+        ],
+        'identifier': [
+            (r'([a-zA-Z][\w \-]*)', Keyword),
+        ],
+    }
diff --git a/local_packages/pygments/lexers/pascal.py b/local_packages/pygments/lexers/pascal.py
new file mode 100644
index 0000000..ad8a0ae
--- /dev/null
+++ b/local_packages/pygments/lexers/pascal.py
@@ -0,0 +1,644 @@
+"""
+    pygments.lexers.pascal
+    ~~~~~~~~~~~~~~~~~~~~~~
+
+    Lexers for Pascal family languages.
+
+    :copyright: Copyright 2006-present by the Pygments team, see AUTHORS.
+    :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import Lexer
+from pygments.util import get_bool_opt, get_list_opt
+from pygments.token import Comment, Operator, Keyword, Name, String, \
+    Number, Punctuation, Error, Whitespace
+from pygments.scanner import Scanner
+
+# compatibility import
+from pygments.lexers.modula2 import Modula2Lexer # noqa: F401
+
+__all__ = ['DelphiLexer', 'PortugolLexer']
+
+
+class PortugolLexer(Lexer):
+    """For Portugol, a Pascal dialect with keywords in Portuguese."""
+    name = 'Portugol'
+    aliases = ['portugol']
+    filenames = ['*.alg', '*.portugol']
+    mimetypes = []
+    url = "https://www.apoioinformatica.inf.br/produtos/visualg/linguagem"
+    version_added = ''
+
+    def __init__(self, **options):
+        Lexer.__init__(self, **options)
+        self.lexer = DelphiLexer(**options, portugol=True)
+
+    def get_tokens_unprocessed(self, text):
+        return self.lexer.get_tokens_unprocessed(text)
+
+
+class DelphiLexer(Lexer):
+    """
+    For Delphi (Borland Object Pascal),
+    Turbo Pascal and Free Pascal source code.
+
+    Additional options accepted:
+
+    `turbopascal`
+        Highlight Turbo Pascal specific keywords (default: ``True``).
+    `delphi`
+        Highlight Borland Delphi specific keywords (default: ``True``).
+    `freepascal`
+        Highlight Free Pascal specific keywords (default: ``True``).
+    `units`
+        A list of units that should be considered builtin, supported are
+        ``System``, ``SysUtils``, ``Classes`` and ``Math``.
+        Default is to consider all of them builtin.
+    """
+    name = 'Delphi'
+    aliases = ['delphi', 'pas', 'pascal', 'objectpascal']
+    filenames = ['*.pas', '*.dpr']
+    mimetypes = ['text/x-pascal']
+    url = 'https://www.embarcadero.com/products/delphi'
+    version_added = ''
+
+    TURBO_PASCAL_KEYWORDS = (
+        'absolute', 'and', 'array', 'asm', 'begin', 'break', 'case',
+        'const', 'constructor', 'continue', 'destructor', 'div', 'do',
+        'downto', 'else', 'end', 'file', 'for', 'function', 'goto',
+        'if', 'implementation', 'in', 'inherited', 'inline', 'interface',
+        'label', 'mod', 'nil', 'not', 'object', 'of', 'on', 'operator',
+        'or', 'packed', 'procedure', 'program', 'record', 'reintroduce',
+        'repeat', 'self', 'set', 'shl', 'shr', 'string', 'then', 'to',
+        'type', 'unit', 'until', 'uses', 'var', 'while', 'with', 'xor'
+    )
+
+    DELPHI_KEYWORDS = (
+        'as', 'class', 'except', 'exports', 'finalization', 'finally',
+        'initialization', 'is', 'library', 'on', 'property', 'raise',
+        'threadvar', 'try'
+    )
+
+    FREE_PASCAL_KEYWORDS = (
+        'dispose', 'exit', 'false', 'new', 'true'
+    )
+
+    BLOCK_KEYWORDS = {
+        'begin', 'class', 'const', 'constructor', 'destructor', 'end',
+        'finalization', 'function', 'implementation', 'initialization',
+        'label', 'library', 'operator', 'procedure', 'program', 'property',
+        'record', 'threadvar', 'type', 'unit', 'uses', 'var'
+    }
+
+    FUNCTION_MODIFIERS = {
+        'alias', 'cdecl', 'export', 'inline', 'interrupt', 'nostackframe',
+        'pascal', 'register', 'safecall', 'softfloat', 'stdcall',
+        'varargs', 'name', 'dynamic', 'near', 'virtual', 'external',
+        'override', 'assembler'
+    }
+
+    # XXX: those aren't global. but currently we know no way for defining
+    #      them just for the type context.
+    DIRECTIVES = {
+        'absolute', 'abstract', 'assembler', 'cppdecl', 'default', 'far',
+        'far16', 'forward', 'index', 'oldfpccall', 'private', 'protected',
+        'published', 'public'
+    }
+
+    BUILTIN_TYPES = {
+        'ansichar', 'ansistring', 'bool', 'boolean', 'byte', 'bytebool',
+        'cardinal', 'char', 'comp', 'currency', 'double', 'dword',
+        'extended', 'int64', 'integer', 'iunknown', 'longbool', 'longint',
+        'longword', 'pansichar', 'pansistring', 'pbool', 'pboolean',
+        'pbyte', 'pbytearray', 'pcardinal', 'pchar', 'pcomp', 'pcurrency',
+        'pdate', 'pdatetime', 'pdouble', 'pdword', 'pextended', 'phandle',
+        'pint64', 'pinteger', 'plongint', 'plongword', 'pointer',
+        'ppointer', 'pshortint', 'pshortstring', 'psingle', 'psmallint',
+        'pstring', 'pvariant', 'pwidechar', 'pwidestring', 'pword',
+        'pwordarray', 'pwordbool', 'real', 'real48', 'shortint',
+        'shortstring', 'single', 'smallint', 'string', 'tclass', 'tdate',
+        'tdatetime', 'textfile', 'thandle', 'tobject', 'ttime', 'variant',
+        'widechar', 'widestring', 'word', 'wordbool'
+    }
+
+    BUILTIN_UNITS = {
+        'System': (
+            'abs', 'acquireexceptionobject', 'addr', 'ansitoutf8',
+            'append', 'arctan', 'assert', 'assigned', 'assignfile',
+            'beginthread', 'blockread', 'blockwrite', 'break', 'chdir',
+            'chr', 'close', 'closefile', 'comptocurrency', 'comptodouble',
+            'concat', 'continue', 'copy', 'cos', 'dec', 'delete',
+            'dispose', 'doubletocomp', 'endthread', 'enummodules',
+            'enumresourcemodules', 'eof', 'eoln', 'erase', 'exceptaddr',
+            'exceptobject', 'exclude', 'exit', 'exp', 'filepos', 'filesize',
+            'fillchar', 'finalize', 'findclasshinstance', 'findhinstance',
+            'findresourcehinstance', 'flush', 'frac', 'freemem',
+            'get8087cw', 'getdir', 'getlasterror', 'getmem',
+            'getmemorymanager', 'getmodulefilename', 'getvariantmanager',
+            'halt', 'hi', 'high', 'inc', 'include', 'initialize', 'insert',
+            'int', 'ioresult', 'ismemorymanagerset', 'isvariantmanagerset',
+            'length', 'ln', 'lo', 'low', 'mkdir', 'move', 'new', 'odd',
+            'olestrtostring', 'olestrtostrvar', 'ord', 'paramcount',
+            'paramstr', 'pi', 'pos', 'pred', 'ptr', 'pucs4chars', 'random',
+            'randomize', 'read', 'readln', 'reallocmem',
+            'releaseexceptionobject', 'rename', 'reset', 'rewrite', 'rmdir',
+            'round', 'runerror', 'seek', 'seekeof', 'seekeoln',
+            'set8087cw', 'setlength', 'setlinebreakstyle',
+            'setmemorymanager', 'setstring', 'settextbuf',
+            'setvariantmanager', 'sin', 'sizeof', 'slice', 'sqr', 'sqrt',
+            'str', 'stringofchar', 'stringtoolestr', 'stringtowidechar',
+            'succ', 'swap', 'trunc', 'truncate', 'typeinfo',
+            'ucs4stringtowidestring', 'unicodetoutf8', 'uniquestring',
+            'upcase', 'utf8decode', 'utf8encode', 'utf8toansi',
+            'utf8tounicode', 'val', 'vararrayredim', 'varclear',
+            'widecharlentostring', 'widecharlentostrvar',
+            'widechartostring', 'widechartostrvar',
+            'widestringtoucs4string', 'write', 'writeln'
+        ),
+        'SysUtils': (
+            'abort', 'addexitproc', 'addterminateproc', 'adjustlinebreaks',
+            'allocmem', 'ansicomparefilename', 'ansicomparestr',
+            'ansicomparetext', 'ansidequotedstr', 'ansiextractquotedstr',
+            'ansilastchar', 'ansilowercase', 'ansilowercasefilename',
+            'ansipos', 'ansiquotedstr', 'ansisamestr', 'ansisametext',
+            'ansistrcomp', 'ansistricomp', 'ansistrlastchar', 'ansistrlcomp',
+            'ansistrlicomp', 'ansistrlower', 'ansistrpos', 'ansistrrscan',
+            'ansistrscan', 'ansistrupper', 'ansiuppercase',
+            'ansiuppercasefilename', 'appendstr', 'assignstr', 'beep',
+            'booltostr', 'bytetocharindex', 'bytetocharlen', 'bytetype',
+            'callterminateprocs', 'changefileext', 'charlength',
+            'chartobyteindex', 'chartobytelen', 'comparemem', 'comparestr',
+            'comparetext', 'createdir', 'createguid', 'currentyear',
+            'currtostr', 'currtostrf', 'date', 'datetimetofiledate',
+            'datetimetostr', 'datetimetostring', 'datetimetosystemtime',
+            'datetimetotimestamp', 'datetostr', 'dayofweek', 'decodedate',
+            'decodedatefully', 'decodetime', 'deletefile', 'directoryexists',
+            'diskfree', 'disksize', 'disposestr', 'encodedate', 'encodetime',
+            'exceptionerrormessage', 'excludetrailingbackslash',
+            'excludetrailingpathdelimiter', 'expandfilename',
+            'expandfilenamecase', 'expanduncfilename', 'extractfiledir',
+            'extractfiledrive', 'extractfileext', 'extractfilename',
+            'extractfilepath', 'extractrelativepath', 'extractshortpathname',
+            'fileage', 'fileclose', 'filecreate', 'filedatetodatetime',
+            'fileexists', 'filegetattr', 'filegetdate', 'fileisreadonly',
+            'fileopen', 'fileread', 'filesearch', 'fileseek', 'filesetattr',
+            'filesetdate', 'filesetreadonly', 'filewrite', 'finalizepackage',
+            'findclose', 'findcmdlineswitch', 'findfirst', 'findnext',
+            'floattocurr', 'floattodatetime', 'floattodecimal', 'floattostr',
+            'floattostrf', 'floattotext', 'floattotextfmt', 'fmtloadstr',
+            'fmtstr', 'forcedirectories', 'format', 'formatbuf', 'formatcurr',
+            'formatdatetime', 'formatfloat', 'freeandnil', 'getcurrentdir',
+            'getenvironmentvariable', 'getfileversion', 'getformatsettings',
+            'getlocaleformatsettings', 'getmodulename', 'getpackagedescription',
+            'getpackageinfo', 'gettime', 'guidtostring', 'incamonth',
+            'includetrailingbackslash', 'includetrailingpathdelimiter',
+            'incmonth', 'initializepackage', 'interlockeddecrement',
+            'interlockedexchange', 'interlockedexchangeadd',
+            'interlockedincrement', 'inttohex', 'inttostr', 'isdelimiter',
+            'isequalguid', 'isleapyear', 'ispathdelimiter', 'isvalidident',
+            'languages', 'lastdelimiter', 'loadpackage', 'loadstr',
+            'lowercase', 'msecstotimestamp', 'newstr', 'nextcharindex', 'now',
+            'outofmemoryerror', 'quotedstr', 'raiselastoserror',
+            'raiselastwin32error', 'removedir', 'renamefile', 'replacedate',
+            'replacetime', 'safeloadlibrary', 'samefilename', 'sametext',
+            'setcurrentdir', 'showexception', 'sleep', 'stralloc', 'strbufsize',
+            'strbytetype', 'strcat', 'strcharlength', 'strcomp', 'strcopy',
+            'strdispose', 'strecopy', 'strend', 'strfmt', 'stricomp',
+            'stringreplace', 'stringtoguid', 'strlcat', 'strlcomp', 'strlcopy',
+            'strlen', 'strlfmt', 'strlicomp', 'strlower', 'strmove', 'strnew',
+            'strnextchar', 'strpas', 'strpcopy', 'strplcopy', 'strpos',
+            'strrscan', 'strscan', 'strtobool', 'strtobooldef', 'strtocurr',
+            'strtocurrdef', 'strtodate', 'strtodatedef', 'strtodatetime',
+            'strtodatetimedef', 'strtofloat', 'strtofloatdef', 'strtoint',
+            'strtoint64', 'strtoint64def', 'strtointdef', 'strtotime',
+            'strtotimedef', 'strupper', 'supports', 'syserrormessage',
+            'systemtimetodatetime', 'texttofloat', 'time', 'timestamptodatetime',
+            'timestamptomsecs', 'timetostr', 'trim', 'trimleft', 'trimright',
+            'tryencodedate', 'tryencodetime', 'tryfloattocurr', 'tryfloattodatetime',
+            'trystrtobool', 'trystrtocurr', 'trystrtodate', 'trystrtodatetime',
+            'trystrtofloat', 'trystrtoint', 'trystrtoint64', 'trystrtotime',
+            'unloadpackage', 'uppercase', 'widecomparestr', 'widecomparetext',
+            'widefmtstr', 'wideformat', 'wideformatbuf', 'widelowercase',
+            'widesamestr', 'widesametext', 'wideuppercase', 'win32check',
+            'wraptext'
+        ),
+        'Classes': (
+            'activateclassgroup', 'allocatehwnd', 'bintohex', 'checksynchronize',
+            'collectionsequal', 'countgenerations', 'deallocatehwnd', 'equalrect',
+            'extractstrings', 'findclass', 'findglobalcomponent', 'getclass',
+            'groupdescendantswith', 'hextobin', 'identtoint',
+            'initinheritedcomponent', 'inttoident', 'invalidpoint',
+            'isuniqueglobalcomponentname', 'linestart', 'objectbinarytotext',
+            'objectresourcetotext', 'objecttexttobinary', 'objecttexttoresource',
+            'pointsequal', 'readcomponentres', 'readcomponentresex',
+            'readcomponentresfile', 'rect', 'registerclass', 'registerclassalias',
+            'registerclasses', 'registercomponents', 'registerintegerconsts',
+            'registernoicon', 'registernonactivex', 'smallpoint', 'startclassgroup',
+            'teststreamformat', 'unregisterclass', 'unregisterclasses',
+            'unregisterintegerconsts', 'unregistermoduleclasses',
+            'writecomponentresfile'
+        ),
+        'Math': (
+            'arccos', 'arccosh', 'arccot', 'arccoth', 'arccsc', 'arccsch', 'arcsec',
+            'arcsech', 'arcsin', 'arcsinh', 'arctan2', 'arctanh', 'ceil',
+            'comparevalue', 'cosecant', 'cosh', 'cot', 'cotan', 'coth', 'csc',
+            'csch', 'cycletodeg', 'cycletograd', 'cycletorad', 'degtocycle',
+            'degtograd', 'degtorad', 'divmod', 'doubledecliningbalance',
+            'ensurerange', 'floor', 'frexp', 'futurevalue', 'getexceptionmask',
+            'getprecisionmode', 'getroundmode', 'gradtocycle', 'gradtodeg',
+            'gradtorad', 'hypot', 'inrange', 'interestpayment', 'interestrate',
+            'internalrateofreturn', 'intpower', 'isinfinite', 'isnan', 'iszero',
+            'ldexp', 'lnxp1', 'log10', 'log2', 'logn', 'max', 'maxintvalue',
+            'maxvalue', 'mean', 'meanandstddev', 'min', 'minintvalue', 'minvalue',
+            'momentskewkurtosis', 'netpresentvalue', 'norm', 'numberofperiods',
+            'payment', 'periodpayment', 'poly', 'popnstddev', 'popnvariance',
+            'power', 'presentvalue', 'radtocycle', 'radtodeg', 'radtograd',
+            'randg', 'randomrange', 'roundto', 'samevalue', 'sec', 'secant',
+            'sech', 'setexceptionmask', 'setprecisionmode', 'setroundmode',
+            'sign', 'simpleroundto', 'sincos', 'sinh', 'slndepreciation', 'stddev',
+            'sum', 'sumint', 'sumofsquares', 'sumsandsquares', 'syddepreciation',
+            'tan', 'tanh', 'totalvariance', 'variance'
+        )
+    }
+
+    ASM_REGISTERS = {
+        'ah', 'al', 'ax', 'bh', 'bl', 'bp', 'bx', 'ch', 'cl', 'cr0',
+        'cr1', 'cr2', 'cr3', 'cr4', 'cs', 'cx', 'dh', 'di', 'dl', 'dr0',
+        'dr1', 'dr2', 'dr3', 'dr4', 'dr5', 'dr6', 'dr7', 'ds', 'dx',
+        'eax', 'ebp', 'ebx', 'ecx', 'edi', 'edx', 'es', 'esi', 'esp',
+        'fs', 'gs', 'mm0', 'mm1', 'mm2', 'mm3', 'mm4', 'mm5', 'mm6',
+        'mm7', 'si', 'sp', 'ss', 'st0', 'st1', 'st2', 'st3', 'st4', 'st5',
+        'st6', 'st7', 'xmm0', 'xmm1', 'xmm2', 'xmm3', 'xmm4', 'xmm5',
+        'xmm6', 'xmm7'
+    }
+
+    ASM_INSTRUCTIONS = {
+        'aaa', 'aad', 'aam', 'aas', 'adc', 'add', 'and', 'arpl', 'bound',
+        'bsf', 'bsr', 'bswap', 'bt', 'btc', 'btr', 'bts', 'call', 'cbw',
+        'cdq', 'clc', 'cld', 'cli', 'clts', 'cmc', 'cmova', 'cmovae',
+        'cmovb', 'cmovbe', 'cmovc', 'cmovcxz', 'cmove', 'cmovg',
+        'cmovge', 'cmovl', 'cmovle', 'cmovna', 'cmovnae', 'cmovnb',
+        'cmovnbe', 'cmovnc', 'cmovne', 'cmovng', 'cmovnge', 'cmovnl',
+        'cmovnle', 'cmovno', 'cmovnp', 'cmovns', 'cmovnz', 'cmovo',
+        'cmovp', 'cmovpe', 'cmovpo', 'cmovs', 'cmovz', 'cmp', 'cmpsb',
+        'cmpsd', 'cmpsw', 'cmpxchg', 'cmpxchg486', 'cmpxchg8b', 'cpuid',
+        'cwd', 'cwde', 'daa', 'das', 'dec', 'div', 'emms', 'enter', 'hlt',
+        'ibts', 'icebp', 'idiv', 'imul', 'in', 'inc', 'insb', 'insd',
+        'insw', 'int', 'int01', 'int03', 'int1', 'int3', 'into', 'invd',
+        'invlpg', 'iret', 'iretd', 'iretw', 'ja', 'jae', 'jb', 'jbe',
+        'jc', 'jcxz', 'jcxz', 'je', 'jecxz', 'jg', 'jge', 'jl', 'jle',
+        'jmp', 'jna', 'jnae', 'jnb', 'jnbe', 'jnc', 'jne', 'jng', 'jnge',
+        'jnl', 'jnle', 'jno', 'jnp', 'jns', 'jnz', 'jo', 'jp', 'jpe',
+        'jpo', 'js', 'jz', 'lahf', 'lar', 'lcall', 'lds', 'lea', 'leave',
+        'les', 'lfs', 'lgdt', 'lgs', 'lidt', 'ljmp', 'lldt', 'lmsw',
+        'loadall', 'loadall286', 'lock', 'lodsb', 'lodsd', 'lodsw',
+        'loop', 'loope', 'loopne', 'loopnz', 'loopz', 'lsl', 'lss', 'ltr',
+        'mov', 'movd', 'movq', 'movsb', 'movsd', 'movsw', 'movsx',
+        'movzx', 'mul', 'neg', 'nop', 'not', 'or', 'out', 'outsb', 'outsd',
+        'outsw', 'pop', 'popa', 'popad', 'popaw', 'popf', 'popfd', 'popfw',
+        'push', 'pusha', 'pushad', 'pushaw', 'pushf', 'pushfd', 'pushfw',
+        'rcl', 'rcr', 'rdmsr', 'rdpmc', 'rdshr', 'rdtsc', 'rep', 'repe',
+        'repne', 'repnz', 'repz', 'ret', 'retf', 'retn', 'rol', 'ror',
+        'rsdc', 'rsldt', 'rsm', 'sahf', 'sal', 'salc', 'sar', 'sbb',
+        'scasb', 'scasd', 'scasw', 'seta', 'setae', 'setb', 'setbe',
+        'setc', 'setcxz', 'sete', 'setg', 'setge', 'setl', 'setle',
+        'setna', 'setnae', 'setnb', 'setnbe', 'setnc', 'setne', 'setng',
+        'setnge', 'setnl', 'setnle', 'setno', 'setnp', 'setns', 'setnz',
+        'seto', 'setp', 'setpe', 'setpo', 'sets', 'setz', 'sgdt', 'shl',
+        'shld', 'shr', 'shrd', 'sidt', 'sldt', 'smi', 'smint', 'smintold',
+        'smsw', 'stc', 'std', 'sti', 'stosb', 'stosd', 'stosw', 'str',
+        'sub', 'svdc', 'svldt', 'svts', 'syscall', 'sysenter', 'sysexit',
+        'sysret', 'test', 'ud1', 'ud2', 'umov', 'verr', 'verw', 'wait',
+        'wbinvd', 'wrmsr', 'wrshr', 'xadd', 'xbts', 'xchg', 'xlat',
+        'xlatb', 'xor'
+    }
+
+    PORTUGOL_KEYWORDS = (
+        'aleatorio',
+        'algoritmo',
+        'arquivo',
+        'ate',
+        'caso',
+        'cronometro',
+        'debug',
+        'e',
+        'eco',
+        'enquanto',
+        'entao',
+        'escolha',
+        'escreva',
+        'escreval',
+        'faca',
+        'falso',
+        'fimalgoritmo',
+        'fimenquanto',
+        'fimescolha',
+        'fimfuncao',
+        'fimpara',
+        'fimprocedimento',
+        'fimrepita',
+        'fimse',
+        'funcao',
+        'inicio',
+        'int',
+        'interrompa',
+        'leia',
+        'limpatela',
+        'mod',
+        'nao',
+        'ou',
+        'outrocaso',
+        'para',
+        'passo',
+        'pausa',
+        'procedimento',
+        'repita',
+        'retorne',
+        'se',
+        'senao',
+        'timer',
+        'var',
+        'vetor',
+        'verdadeiro',
+        'xou',
+        'div',
+        'mod',
+        'abs',
+        'arccos',
+        'arcsen',
+        'arctan',
+        'cos',
+        'cotan',
+        'Exp',
+        'grauprad',
+        'int',
+        'log',
+        'logn',
+        'pi',
+        'quad',
+        'radpgrau',
+        'raizq',
+        'rand',
+        'randi',
+        'sen',
+        'Tan',
+        'asc',
+        'carac',
+        'caracpnum',
+        'compr',
+        'copia',
+        'maiusc',
+        'minusc',
+        'numpcarac',
+        'pos',
+    )
+
+    PORTUGOL_BUILTIN_TYPES = {
+        'inteiro', 'real', 'caractere', 'logico'
+    }
+
+    def __init__(self, **options):
+        Lexer.__init__(self, **options)
+        self.keywords = set()
+        self.builtins = set()
+        if get_bool_opt(options, 'portugol', False):
+            self.keywords.update(self.PORTUGOL_KEYWORDS)
+            self.builtins.update(self.PORTUGOL_BUILTIN_TYPES)
+            self.is_portugol = True
+        else:
+            self.is_portugol = False
+
+            if get_bool_opt(options, 'turbopascal', True):
+                self.keywords.update(self.TURBO_PASCAL_KEYWORDS)
+            if get_bool_opt(options, 'delphi', True):
+                self.keywords.update(self.DELPHI_KEYWORDS)
+            if get_bool_opt(options, 'freepascal', True):
+                self.keywords.update(self.FREE_PASCAL_KEYWORDS)
+            for unit in get_list_opt(options, 'units', list(self.BUILTIN_UNITS)):
+                self.builtins.update(self.BUILTIN_UNITS[unit])
+
+    def get_tokens_unprocessed(self, text):
+        scanner = Scanner(text, re.DOTALL | re.MULTILINE | re.IGNORECASE)
+        stack = ['initial']
+        in_function_block = False
+        in_property_block = False
+        was_dot = False
+        next_token_is_function = False
+        next_token_is_property = False
+        collect_labels = False
+        block_labels = set()
+        brace_balance = [0, 0]
+
+        while not scanner.eos:
+            token = Error
+
+            if stack[-1] == 'initial':
+                if scanner.scan(r'\s+'):
+                    token = Whitespace
+                elif not self.is_portugol and scanner.scan(r'\{.*?\}|\(\*.*?\*\)'):
+                    if scanner.match.startswith('$'):
+                        token = Comment.Preproc
+                    else:
+                        token = Comment.Multiline
+                elif scanner.scan(r'//.*?$'):
+                    token = Comment.Single
+                elif self.is_portugol and scanner.scan(r'(<\-)|(>=)|(<=)|%|<|>|-|\+|\*|\=|(<>)|\/|\.|:|,'):
+                    token = Operator
+                elif not self.is_portugol and scanner.scan(r'[-+*\/=<>:;,.@\^]'):
+                    token = Operator
+                    # stop label highlighting on next ";"
+                    if collect_labels and scanner.match == ';':
+                        collect_labels = False
+                elif scanner.scan(r'[\(\)\[\]]+'):
+                    token = Punctuation
+                    # abort function naming ``foo = Function(...)``
+                    next_token_is_function = False
+                    # if we are in a function block we count the open
+                    # braces because ootherwise it's impossible to
+                    # determine the end of the modifier context
+                    if in_function_block or in_property_block:
+                        if scanner.match == '(':
+                            brace_balance[0] += 1
+                        elif scanner.match == ')':
+                            brace_balance[0] -= 1
+                        elif scanner.match == '[':
+                            brace_balance[1] += 1
+                        elif scanner.match == ']':
+                            brace_balance[1] -= 1
+                elif scanner.scan(r'[A-Za-z_][A-Za-z_0-9]*'):
+                    lowercase_name = scanner.match.lower()
+                    if lowercase_name == 'result':
+                        token = Name.Builtin.Pseudo
+                    elif lowercase_name in self.keywords:
+                        token = Keyword
+                        # if we are in a special block and a
+                        # block ending keyword occurs (and the parenthesis
+                        # is balanced) we end the current block context
+                        if self.is_portugol:
+                            if lowercase_name in ('funcao', 'procedimento'):
+                                in_function_block = True
+                                next_token_is_function = True
+                        else:
+                            if (in_function_block or in_property_block) and \
+                                    lowercase_name in self.BLOCK_KEYWORDS and \
+                                    brace_balance[0] <= 0 and \
+                                    brace_balance[1] <= 0:
+                                in_function_block = False
+                                in_property_block = False
+                                brace_balance = [0, 0]
+                                block_labels = set()
+                            if lowercase_name in ('label', 'goto'):
+                                collect_labels = True
+                            elif lowercase_name == 'asm':
+                                stack.append('asm')
+                            elif lowercase_name == 'property':
+                                in_property_block = True
+                                next_token_is_property = True
+                            elif lowercase_name in ('procedure', 'operator',
+                                                    'function', 'constructor',
+                                                    'destructor'):
+                                in_function_block = True
+                                next_token_is_function = True
+                    # we are in a function block and the current name
+                    # is in the set of registered modifiers. highlight
+                    # it as pseudo keyword
+                    elif not self.is_portugol and in_function_block and \
+                            lowercase_name in self.FUNCTION_MODIFIERS:
+                        token = Keyword.Pseudo
+                    # if we are in a property highlight some more
+                    # modifiers
+                    elif not self.is_portugol and in_property_block and \
+                            lowercase_name in ('read', 'write'):
+                        token = Keyword.Pseudo
+                        next_token_is_function = True
+                    # if the last iteration set next_token_is_function
+                    # to true we now want this name highlighted as
+                    # function. so do that and reset the state
+                    elif next_token_is_function:
+                        # Look if the next token is a dot. If yes it's
+                        # not a function, but a class name and the
+                        # part after the dot a function name
+                        if not self.is_portugol and scanner.test(r'\s*\.\s*'):
+                            token = Name.Class
+                        # it's not a dot, our job is done
+                        else:
+                            token = Name.Function
+                            next_token_is_function = False
+
+                            if self.is_portugol:
+                                block_labels.add(scanner.match.lower())
+
+                    # same for properties
+                    elif not self.is_portugol and next_token_is_property:
+                        token = Name.Property
+                        next_token_is_property = False
+                    # Highlight this token as label and add it
+                    # to the list of known labels
+                    elif not self.is_portugol and collect_labels:
+                        token = Name.Label
+                        block_labels.add(scanner.match.lower())
+                    # name is in list of known labels
+                    elif lowercase_name in block_labels:
+                        token = Name.Label
+                    elif self.is_portugol and lowercase_name in self.PORTUGOL_BUILTIN_TYPES:
+                        token = Keyword.Type
+                    elif not self.is_portugol and lowercase_name in self.BUILTIN_TYPES:
+                        token = Keyword.Type
+                    elif not self.is_portugol and lowercase_name in self.DIRECTIVES:
+                        token = Keyword.Pseudo
+                    # builtins are just builtins if the token
+                    # before isn't a dot
+                    elif not self.is_portugol and not was_dot and lowercase_name in self.builtins:
+                        token = Name.Builtin
+                    else:
+                        token = Name
+                elif self.is_portugol and scanner.scan(r"\""):
+                    token = String
+                    stack.append('string')
+                elif not self.is_portugol and scanner.scan(r"'"):
+                    token = String
+                    stack.append('string')
+                elif not self.is_portugol and scanner.scan(r'\#(\d+|\$[0-9A-Fa-f]+)'):
+                    token = String.Char
+                elif not self.is_portugol and scanner.scan(r'\$[0-9A-Fa-f]+'):
+                    token = Number.Hex
+                elif scanner.scan(r'\d+(?![eE]|\.[^.])'):
+                    token = Number.Integer
+                elif scanner.scan(r'\d+(\.\d+([eE][+-]?\d+)?|[eE][+-]?\d+)'):
+                    token = Number.Float
+                else:
+                    # if the stack depth is deeper than once, pop
+                    if len(stack) > 1:
+                        stack.pop()
+                    scanner.get_char()
+
+            elif stack[-1] == 'string':
+                if self.is_portugol:
+                    if scanner.scan(r"''"):
+                        token = String.Escape
+                    elif scanner.scan(r"\""):
+                        token = String
+                        stack.pop()
+                    elif scanner.scan(r"[^\"]*"):
+                        token = String
+                    else:
+                        scanner.get_char()
+                        stack.pop()
+                else:
+                    if scanner.scan(r"''"):
+                        token = String.Escape
+                    elif scanner.scan(r"'"):
+                        token = String
+                        stack.pop()
+                    elif scanner.scan(r"[^']*"):
+                        token = String
+                    else:
+                        scanner.get_char()
+                        stack.pop()
+            elif not self.is_portugol and stack[-1] == 'asm':
+                if scanner.scan(r'\s+'):
+                    token = Whitespace
+                elif scanner.scan(r'end'):
+                    token = Keyword
+                    stack.pop()
+                elif scanner.scan(r'\{.*?\}|\(\*.*?\*\)'):
+                    if scanner.match.startswith('$'):
+                        token = Comment.Preproc
+                    else:
+                        token = Comment.Multiline
+                elif scanner.scan(r'//.*?$'):
+                    token = Comment.Single
+                elif scanner.scan(r"'"):
+                    token = String
+                    stack.append('string')
+                elif scanner.scan(r'@@[A-Za-z_][A-Za-z_0-9]*'):
+                    token = Name.Label
+                elif scanner.scan(r'[A-Za-z_][A-Za-z_0-9]*'):
+                    lowercase_name = scanner.match.lower()
+                    if lowercase_name in self.ASM_INSTRUCTIONS:
+                        token = Keyword
+                    elif lowercase_name in self.ASM_REGISTERS:
+                        token = Name.Builtin
+                    else:
+                        token = Name
+                elif scanner.scan(r'[-+*\/=<>:;,.@\^]+'):
+                    token = Operator
+                elif scanner.scan(r'[\(\)\[\]]+'):
+                    token = Punctuation
+                elif scanner.scan(r'\$[0-9A-Fa-f]+'):
+                    token = Number.Hex
+                elif scanner.scan(r'\d+(?![eE]|\.[^.])'):
+                    token = Number.Integer
+                elif scanner.scan(r'\d+(\.\d+([eE][+-]?\d+)?|[eE][+-]?\d+)'):
+                    token = Number.Float
+                else:
+                    scanner.get_char()
+                    stack.pop()
+
+            # save the dot!!!11
+            if not self.is_portugol and scanner.match.strip():
+                was_dot = scanner.match == '.'
+
+            yield scanner.start_pos, token, scanner.match or ''
diff --git a/local_packages/pygments/lexers/pawn.py b/local_packages/pygments/lexers/pawn.py
new file mode 100644
index 0000000..9b4234c
--- /dev/null
+++ b/local_packages/pygments/lexers/pawn.py
@@ -0,0 +1,202 @@
+"""
+    pygments.lexers.pawn
+    ~~~~~~~~~~~~~~~~~~~~
+
+    Lexers for the Pawn languages.
+
+    :copyright: Copyright 2006-present by the Pygments team, see AUTHORS.
+    :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+    Number, Punctuation
+from pygments.util import get_bool_opt
+
+__all__ = ['SourcePawnLexer', 'PawnLexer']
+
+
+class SourcePawnLexer(RegexLexer):
+    """
+    For SourcePawn source code with preprocessor directives.
+    """
+    name = 'SourcePawn'
+    aliases = ['sp']
+    filenames = ['*.sp']
+    mimetypes = ['text/x-sourcepawn']
+    url = 'https://github.com/alliedmodders/sourcepawn'
+    version_added = '1.6'
+
+    #: optional Comment or Whitespace
+    _ws = r'(?:\s|//.*?\n|/\*.*?\*/)+'
+    #: only one /* */ style comment
+    _ws1 = r'\s*(?:/[*].*?[*]/\s*)*'
+
+    tokens = {
+        'root': [
+            # preprocessor directives: without whitespace
+            (r'^#if\s+0', Comment.Preproc, 'if0'),
+            ('^#', Comment.Preproc, 'macro'),
+            # or with whitespace
+            ('^' + _ws1 + r'#if\s+0', Comment.Preproc, 'if0'),
+            ('^' + _ws1 + '#', Comment.Preproc, 'macro'),
+            (r'\n', Text),
+            (r'\s+', Text),
+            (r'\\\n', Text),  # line continuation
+            (r'/(\\\n)?/(\n|(.|\n)*?[^\\]\n)', Comment.Single),
+            (r'/(\\\n)?\*(.|\n)*?\*(\\\n)?/', Comment.Multiline),
+            (r'[{}]', Punctuation),
+            (r'L?"', String, 'string'),
+            (r"L?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char),
+            (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[LlUu]*', Number.Float),
+            (r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
+            (r'0x[0-9a-fA-F]+[LlUu]*', Number.Hex),
+            (r'0[0-7]+[LlUu]*', Number.Oct),
+            (r'\d+[LlUu]*', Number.Integer),
+            (r'[~!%^&*+=|?:<>/-]', Operator),
+            (r'[()\[\],.;]', Punctuation),
+            (r'(case|const|continue|native|'
+             r'default|else|enum|for|if|new|operator|'
+             r'public|return|sizeof|static|decl|struct|switch)\b', Keyword),
+            (r'(bool|Float)\b', Keyword.Type),
+            (r'(true|false)\b', Keyword.Constant),
+            (r'[a-zA-Z_]\w*', Name),
+        ],
+        'string': [
+            (r'"', String, '#pop'),
+            (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
+            (r'[^\\"\n]+', String),  # all other characters
+            (r'\\\n', String),       # line continuation
+            (r'\\', String),         # stray backslash
+        ],
+        'macro': [
+            (r'[^/\n]+', Comment.Preproc),
+            (r'/\*(.|\n)*?\*/', Comment.Multiline),
+            (r'//.*?\n', Comment.Single, '#pop'),
+            (r'/', Comment.Preproc),
+            (r'(?<=\\)\n', Comment.Preproc),
+            (r'\n', Comment.Preproc, '#pop'),
+        ],
+        'if0': [
+            (r'^\s*#if.*?(?/-]', Operator),
+            (r'[()\[\],.;]', Punctuation),
+            (r'(switch|case|default|const|new|static|char|continue|break|'
+             r'if|else|for|while|do|operator|enum|'
+             r'public|return|sizeof|tagof|state|goto)\b', Keyword),
+            (r'(bool|Float)\b', Keyword.Type),
+            (r'(true|false)\b', Keyword.Constant),
+            (r'[a-zA-Z_]\w*', Name),
+        ],
+        'string': [
+            (r'"', String, '#pop'),
+            (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
+            (r'[^\\"\n]+', String),  # all other characters
+            (r'\\\n', String),       # line continuation
+            (r'\\', String),         # stray backslash
+        ],
+        'macro': [
+            (r'[^/\n]+', Comment.Preproc),
+            (r'/\*(.|\n)*?\*/', Comment.Multiline),
+            (r'//.*?\n', Comment.Single, '#pop'),
+            (r'/', Comment.Preproc),
+            (r'(?<=\\)\n', Comment.Preproc),
+            (r'\n', Comment.Preproc, '#pop'),
+        ],
+        'if0': [
+            (r'^\s*#if.*?(?<-]', Operator),
+            (r'[a-zA-Z][a-zA-Z0-9_-]*', Name),
+            (r'\?[a-zA-Z][a-zA-Z0-9_-]*', Name.Variable),
+            (r'[0-9]+\.[0-9]+', Number.Float),
+            (r'[0-9]+', Number.Integer),
+        ],
+        'keywords': [
+            (words((
+                ':requirements', ':types', ':constants',
+                ':predicates', ':functions', ':action', ':agent',
+                ':parameters', ':precondition', ':effect',
+                ':durative-action', ':duration', ':condition',
+                ':derived', ':domain', ':objects', ':init',
+                ':goal', ':metric', ':length', ':serial', ':parallel',
+                # the following are requirements
+                ':strips', ':typing', ':negative-preconditions',
+                ':disjunctive-preconditions', ':equality',
+                ':existential-preconditions', ':universal-preconditions',
+                ':conditional-effects', ':fluents', ':numeric-fluents',
+                ':object-fluents', ':adl', ':durative-actions',
+                ':continuous-effects', ':derived-predicates',
+                ':time-intial-literals', ':preferences',
+                ':constraints', ':action-costs', ':multi-agent',
+                ':unfactored-privacy', ':factored-privacy',
+                ':non-deterministic'
+                ), suffix=r'\b'), Keyword)
+        ],
+        'builtins': [
+            (words((
+                'define', 'domain', 'object', 'either', 'and',
+                'forall', 'preference', 'imply', 'or', 'exists',
+                'not', 'when', 'assign', 'scale-up', 'scale-down',
+                'increase', 'decrease', 'at', 'over', 'start',
+                'end', 'all', 'problem', 'always', 'sometime',
+                'within', 'at-most-once', 'sometime-after',
+                'sometime-before', 'always-within', 'hold-during',
+                'hold-after', 'minimize', 'maximize',
+                'total-time', 'is-violated'), suffix=r'\b'),
+                Name.Builtin)
+        ]
+    }
+
diff --git a/local_packages/pygments/lexers/perl.py b/local_packages/pygments/lexers/perl.py
new file mode 100644
index 0000000..7448d2a
--- /dev/null
+++ b/local_packages/pygments/lexers/perl.py
@@ -0,0 +1,733 @@
+"""
+    pygments.lexers.perl
+    ~~~~~~~~~~~~~~~~~~~~
+
+    Lexers for Perl, Raku and related languages.
+
+    :copyright: Copyright 2006-present by the Pygments team, see AUTHORS.
+    :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import RegexLexer, ExtendedRegexLexer, include, bygroups, \
+    using, this, default, words
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+    Number, Punctuation, Whitespace
+from pygments.util import shebang_matches
+
+__all__ = ['PerlLexer', 'Perl6Lexer']
+
+
+class PerlLexer(RegexLexer):
+    """
+    For Perl source code.
+    """
+
+    name = 'Perl'
+    url = 'https://www.perl.org'
+    aliases = ['perl', 'pl']
+    filenames = ['*.pl', '*.pm', '*.t', '*.perl']
+    mimetypes = ['text/x-perl', 'application/x-perl']
+    version_added = ''
+
+    flags = re.DOTALL | re.MULTILINE
+    # TODO: give this to a perl guy who knows how to parse perl...
+    tokens = {
+        'balanced-regex': [
+            (r'/(\\\\|\\[^\\]|[^\\/])*/[egimosx]*', String.Regex, '#pop'),
+            (r'!(\\\\|\\[^\\]|[^\\!])*![egimosx]*', String.Regex, '#pop'),
+            (r'\\(\\\\|[^\\])*\\[egimosx]*', String.Regex, '#pop'),
+            (r'\{(\\\\|\\[^\\]|[^\\}])*\}[egimosx]*', String.Regex, '#pop'),
+            (r'<(\\\\|\\[^\\]|[^\\>])*>[egimosx]*', String.Regex, '#pop'),
+            (r'\[(\\\\|\\[^\\]|[^\\\]])*\][egimosx]*', String.Regex, '#pop'),
+            (r'\((\\\\|\\[^\\]|[^\\)])*\)[egimosx]*', String.Regex, '#pop'),
+            (r'@(\\\\|\\[^\\]|[^\\@])*@[egimosx]*', String.Regex, '#pop'),
+            (r'%(\\\\|\\[^\\]|[^\\%])*%[egimosx]*', String.Regex, '#pop'),
+            (r'\$(\\\\|\\[^\\]|[^\\$])*\$[egimosx]*', String.Regex, '#pop'),
+        ],
+        'root': [
+            (r'\A\#!.+?$', Comment.Hashbang),
+            (r'\#.*?$', Comment.Single),
+            (r'^=[a-zA-Z0-9]+\s+.*?\n=cut', Comment.Multiline),
+            (words((
+                'case', 'continue', 'do', 'else', 'elsif', 'for', 'foreach',
+                'if', 'last', 'my', 'next', 'our', 'redo', 'reset', 'then',
+                'unless', 'until', 'while', 'print', 'new', 'BEGIN',
+                'CHECK', 'INIT', 'END', 'return'), suffix=r'\b'),
+             Keyword),
+            (r'(format)(\s+)(\w+)(\s*)(=)(\s*\n)',
+             bygroups(Keyword, Whitespace, Name, Whitespace, Punctuation, Whitespace), 'format'),
+            (r'(eq|lt|gt|le|ge|ne|not|and|or|cmp)\b', Operator.Word),
+            # common delimiters
+            (r's/(\\\\|\\[^\\]|[^\\/])*/(\\\\|\\[^\\]|[^\\/])*/[egimosx]*',
+                String.Regex),
+            (r's!(\\\\|\\!|[^!])*!(\\\\|\\!|[^!])*![egimosx]*', String.Regex),
+            (r's\\(\\\\|[^\\])*\\(\\\\|[^\\])*\\[egimosx]*', String.Regex),
+            (r's@(\\\\|\\[^\\]|[^\\@])*@(\\\\|\\[^\\]|[^\\@])*@[egimosx]*',
+                String.Regex),
+            (r's%(\\\\|\\[^\\]|[^\\%])*%(\\\\|\\[^\\]|[^\\%])*%[egimosx]*',
+                String.Regex),
+            # balanced delimiters
+            (r's\{(\\\\|\\[^\\]|[^\\}])*\}\s*', String.Regex, 'balanced-regex'),
+            (r's<(\\\\|\\[^\\]|[^\\>])*>\s*', String.Regex, 'balanced-regex'),
+            (r's\[(\\\\|\\[^\\]|[^\\\]])*\]\s*', String.Regex,
+                'balanced-regex'),
+            (r's\((\\\\|\\[^\\]|[^\\)])*\)\s*', String.Regex,
+                'balanced-regex'),
+
+            (r'm?/(\\\\|\\[^\\]|[^\\/\n])*/[gcimosx]*', String.Regex),
+            (r'm(?=[/!\\{<\[(@%$])', String.Regex, 'balanced-regex'),
+            (r'((?<==~)|(?<=\())\s*/(\\\\|\\[^\\]|[^\\/])*/[gcimosx]*',
+                String.Regex),
+            (r'\s+', Whitespace),
+            (words((
+                'abs', 'accept', 'alarm', 'atan2', 'bind', 'binmode', 'bless', 'caller', 'chdir',
+                'chmod', 'chomp', 'chop', 'chown', 'chr', 'chroot', 'close', 'closedir', 'connect',
+                'continue', 'cos', 'crypt', 'dbmclose', 'dbmopen', 'defined', 'delete', 'die',
+                'dump', 'each', 'endgrent', 'endhostent', 'endnetent', 'endprotoent',
+                'endpwent', 'endservent', 'eof', 'eval', 'exec', 'exists', 'exit', 'exp', 'fcntl',
+                'fileno', 'flock', 'fork', 'format', 'formline', 'getc', 'getgrent', 'getgrgid',
+                'getgrnam', 'gethostbyaddr', 'gethostbyname', 'gethostent', 'getlogin',
+                'getnetbyaddr', 'getnetbyname', 'getnetent', 'getpeername', 'getpgrp',
+                'getppid', 'getpriority', 'getprotobyname', 'getprotobynumber',
+                'getprotoent', 'getpwent', 'getpwnam', 'getpwuid', 'getservbyname',
+                'getservbyport', 'getservent', 'getsockname', 'getsockopt', 'glob', 'gmtime',
+                'goto', 'grep', 'hex', 'import', 'index', 'int', 'ioctl', 'join', 'keys', 'kill', 'last',
+                'lc', 'lcfirst', 'length', 'link', 'listen', 'local', 'localtime', 'log', 'lstat',
+                'map', 'mkdir', 'msgctl', 'msgget', 'msgrcv', 'msgsnd', 'my', 'next', 'oct', 'open',
+                'opendir', 'ord', 'our', 'pack', 'pipe', 'pop', 'pos', 'printf',
+                'prototype', 'push', 'quotemeta', 'rand', 'read', 'readdir',
+                'readline', 'readlink', 'readpipe', 'recv', 'redo', 'ref', 'rename',
+                'reverse', 'rewinddir', 'rindex', 'rmdir', 'scalar', 'seek', 'seekdir',
+                'select', 'semctl', 'semget', 'semop', 'send', 'setgrent', 'sethostent', 'setnetent',
+                'setpgrp', 'setpriority', 'setprotoent', 'setpwent', 'setservent',
+                'setsockopt', 'shift', 'shmctl', 'shmget', 'shmread', 'shmwrite', 'shutdown',
+                'sin', 'sleep', 'socket', 'socketpair', 'sort', 'splice', 'split', 'sprintf', 'sqrt',
+                'srand', 'stat', 'study', 'substr', 'symlink', 'syscall', 'sysopen', 'sysread',
+                'sysseek', 'system', 'syswrite', 'tell', 'telldir', 'tie', 'tied', 'time', 'times', 'tr',
+                'truncate', 'uc', 'ucfirst', 'umask', 'undef', 'unlink', 'unpack', 'unshift', 'untie',
+                'utime', 'values', 'vec', 'wait', 'waitpid', 'wantarray', 'warn', 'write'), suffix=r'\b'),
+             Name.Builtin),
+            (r'((__(DATA|DIE|WARN)__)|(STD(IN|OUT|ERR)))\b', Name.Builtin.Pseudo),
+            (r'(<<)([\'"]?)([a-zA-Z_]\w*)(\2;?\n.*?\n)(\3)(\n)',
+             bygroups(String, String, String.Delimiter, String, String.Delimiter, Whitespace)),
+            (r'__END__', Comment.Preproc, 'end-part'),
+            (r'\$\^[ADEFHILMOPSTWX]', Name.Variable.Global),
+            (r"\$[\\\"\[\]'&`+*.,;=%~?@$!<>(^|/-](?!\w)", Name.Variable.Global),
+            (r'[$@%#]+', Name.Variable, 'varname'),
+            (r'0_?[0-7]+(_[0-7]+)*', Number.Oct),
+            (r'0x[0-9A-Fa-f]+(_[0-9A-Fa-f]+)*', Number.Hex),
+            (r'0b[01]+(_[01]+)*', Number.Bin),
+            (r'(?i)(\d*(_\d*)*\.\d+(_\d*)*|\d+(_\d*)*\.\d+(_\d*)*)(e[+-]?\d+)?',
+             Number.Float),
+            (r'(?i)\d+(_\d*)*e[+-]?\d+(_\d*)*', Number.Float),
+            (r'\d+(_\d+)*', Number.Integer),
+            (r"'(\\\\|\\[^\\]|[^'\\])*'", String),
+            (r'"(\\\\|\\[^\\]|[^"\\])*"', String),
+            (r'`(\\\\|\\[^\\]|[^`\\])*`', String.Backtick),
+            (r'<([^\s>]+)>', String.Regex),
+            (r'(q|qq|qw|qr|qx)\{', String.Other, 'cb-string'),
+            (r'(q|qq|qw|qr|qx)\(', String.Other, 'rb-string'),
+            (r'(q|qq|qw|qr|qx)\[', String.Other, 'sb-string'),
+            (r'(q|qq|qw|qr|qx)\<', String.Other, 'lt-string'),
+            (r'(q|qq|qw|qr|qx)([\W_])(.|\n)*?\2', String.Other),
+            (r'(package)(\s+)([a-zA-Z_]\w*(?:::[a-zA-Z_]\w*)*)',
+             bygroups(Keyword, Whitespace, Name.Namespace)),
+            (r'(use|require|no)(\s+)([a-zA-Z_]\w*(?:::[a-zA-Z_]\w*)*)',
+             bygroups(Keyword, Whitespace, Name.Namespace)),
+            (r'(sub)(\s+)', bygroups(Keyword, Whitespace), 'funcname'),
+            (words((
+                'no', 'package', 'require', 'use'), suffix=r'\b'),
+             Keyword),
+            (r'(\[\]|\*\*|::|<<|>>|>=|<=>|<=|={3}|!=|=~|'
+             r'!~|&&?|\|\||\.{1,3})', Operator),
+            (r'[-+/*%=<>&^|!\\~]=?', Operator),
+            (r'[()\[\]:;,<>/?{}]', Punctuation),  # yes, there's no shortage
+                                                  # of punctuation in Perl!
+            (r'(?=\w)', Name, 'name'),
+        ],
+        'format': [
+            (r'\.\n', String.Interpol, '#pop'),
+            (r'[^\n]*\n', String.Interpol),
+        ],
+        'varname': [
+            (r'\s+', Whitespace),
+            (r'\{', Punctuation, '#pop'),    # hash syntax?
+            (r'\)|,', Punctuation, '#pop'),  # argument specifier
+            (r'\w+::', Name.Namespace),
+            (r'[\w:]+', Name.Variable, '#pop'),
+        ],
+        'name': [
+            (r'[a-zA-Z_]\w*(::[a-zA-Z_]\w*)*(::)?(?=\s*->)', Name.Namespace, '#pop'),
+            (r'[a-zA-Z_]\w*(::[a-zA-Z_]\w*)*::', Name.Namespace, '#pop'),
+            (r'[\w:]+', Name, '#pop'),
+            (r'[A-Z_]+(?=\W)', Name.Constant, '#pop'),
+            (r'(?=\W)', Text, '#pop'),
+        ],
+        'funcname': [
+            (r'[a-zA-Z_]\w*[!?]?', Name.Function),
+            (r'\s+', Whitespace),
+            # argument declaration
+            (r'(\([$@%]*\))(\s*)', bygroups(Punctuation, Whitespace)),
+            (r';', Punctuation, '#pop'),
+            (r'.*?\{', Punctuation, '#pop'),
+        ],
+        'cb-string': [
+            (r'\\[{}\\]', String.Other),
+            (r'\\', String.Other),
+            (r'\{', String.Other, 'cb-string'),
+            (r'\}', String.Other, '#pop'),
+            (r'[^{}\\]+', String.Other)
+        ],
+        'rb-string': [
+            (r'\\[()\\]', String.Other),
+            (r'\\', String.Other),
+            (r'\(', String.Other, 'rb-string'),
+            (r'\)', String.Other, '#pop'),
+            (r'[^()]+', String.Other)
+        ],
+        'sb-string': [
+            (r'\\[\[\]\\]', String.Other),
+            (r'\\', String.Other),
+            (r'\[', String.Other, 'sb-string'),
+            (r'\]', String.Other, '#pop'),
+            (r'[^\[\]]+', String.Other)
+        ],
+        'lt-string': [
+            (r'\\[<>\\]', String.Other),
+            (r'\\', String.Other),
+            (r'\<', String.Other, 'lt-string'),
+            (r'\>', String.Other, '#pop'),
+            (r'[^<>]+', String.Other)
+        ],
+        'end-part': [
+            (r'.+', Comment.Preproc, '#pop')
+        ]
+    }
+
+    def analyse_text(text):
+        if shebang_matches(text, r'perl'):
+            return True
+
+        result = 0
+
+        if re.search(r'(?:my|our)\s+[$@%(]', text):
+            result += 0.9
+
+        if ':=' in text:
+            # := is not valid Perl, but it appears in unicon, so we should
+            # become less confident if we think we found Perl with :=
+            result /= 2
+
+        return result
+
+
+class Perl6Lexer(ExtendedRegexLexer):
+    """
+    For Raku (a.k.a. Perl 6) source code.
+    """
+
+    name = 'Perl6'
+    url = 'https://www.raku.org'
+    aliases = ['perl6', 'pl6', 'raku']
+    filenames = ['*.pl', '*.pm', '*.nqp', '*.p6', '*.6pl', '*.p6l', '*.pl6',
+                 '*.6pm', '*.p6m', '*.pm6', '*.t', '*.raku', '*.rakumod',
+                 '*.rakutest', '*.rakudoc']
+    mimetypes = ['text/x-perl6', 'application/x-perl6']
+    version_added = '2.0'
+    flags = re.MULTILINE | re.DOTALL
+
+    PERL6_IDENTIFIER_RANGE = r"['\w:-]"
+
+    PERL6_KEYWORDS = (
+        #Phasers
+        'BEGIN','CATCH','CHECK','CLOSE','CONTROL','DOC','END','ENTER','FIRST',
+        'INIT','KEEP','LAST','LEAVE','NEXT','POST','PRE','QUIT','UNDO',
+        #Keywords
+        'anon','augment','but','class','constant','default','does','else',
+        'elsif','enum','for','gather','given','grammar','has','if','import',
+        'is','let','loop','made','make','method','module','multi','my','need',
+        'orwith','our','proceed','proto','repeat','require','return',
+        'return-rw','returns','role','rule','state','sub','submethod','subset',
+        'succeed','supersede','token','try','unit','unless','until','use',
+        'when','while','with','without',
+        #Traits
+        'export','native','repr','required','rw','symbol',
+    )
+
+    PERL6_BUILTINS = (
+        'ACCEPTS','abs','abs2rel','absolute','accept','accessed','acos',
+        'acosec','acosech','acosh','acotan','acotanh','acquire','act','action',
+        'actions','add','add_attribute','add_enum_value','add_fallback',
+        'add_method','add_parent','add_private_method','add_role','add_trustee',
+        'adverb','after','all','allocate','allof','allowed','alternative-names',
+        'annotations','antipair','antipairs','any','anyof','app_lifetime',
+        'append','arch','archname','args','arity','Array','asec','asech','asin',
+        'asinh','ASSIGN-KEY','ASSIGN-POS','assuming','ast','at','atan','atan2',
+        'atanh','AT-KEY','atomic-assign','atomic-dec-fetch','atomic-fetch',
+        'atomic-fetch-add','atomic-fetch-dec','atomic-fetch-inc',
+        'atomic-fetch-sub','atomic-inc-fetch','AT-POS','attributes','auth',
+        'await','backtrace','Bag','BagHash','bail-out','base','basename',
+        'base-repeating','batch','BIND-KEY','BIND-POS','bind-stderr',
+        'bind-stdin','bind-stdout','bind-udp','bits','bless','block','Bool',
+        'bool-only','bounds','break','Bridge','broken','BUILD','build-date',
+        'bytes','cache','callframe','calling-package','CALL-ME','callsame',
+        'callwith','can','cancel','candidates','cando','can-ok','canonpath',
+        'caps','caption','Capture','cas','catdir','categorize','categorize-list',
+        'catfile','catpath','cause','ceiling','cglobal','changed','Channel',
+        'chars','chdir','child','child-name','child-typename','chmod','chomp',
+        'chop','chr','chrs','chunks','cis','classify','classify-list','cleanup',
+        'clone','close','closed','close-stdin','cmp-ok','code','codes','collate',
+        'column','comb','combinations','command','comment','compiler','Complex',
+        'compose','compose_type','composer','condition','config',
+        'configure_destroy','configure_type_checking','conj','connect',
+        'constraints','construct','contains','contents','copy','cos','cosec',
+        'cosech','cosh','cotan','cotanh','count','count-only','cpu-cores',
+        'cpu-usage','CREATE','create_type','cross','cue','curdir','curupdir','d',
+        'Date','DateTime','day','daycount','day-of-month','day-of-week',
+        'day-of-year','days-in-month','declaration','decode','decoder','deepmap',
+        'default','defined','DEFINITE','delayed','DELETE-KEY','DELETE-POS',
+        'denominator','desc','DESTROY','destroyers','devnull','diag',
+        'did-you-mean','die','dies-ok','dir','dirname','dir-sep','DISTROnames',
+        'do','does','does-ok','done','done-testing','duckmap','dynamic','e',
+        'eager','earlier','elems','emit','enclosing','encode','encoder',
+        'encoding','end','ends-with','enum_from_value','enum_value_list',
+        'enum_values','enums','eof','EVAL','eval-dies-ok','EVALFILE',
+        'eval-lives-ok','exception','excludes-max','excludes-min','EXISTS-KEY',
+        'EXISTS-POS','exit','exitcode','exp','expected','explicitly-manage',
+        'expmod','extension','f','fail','fails-like','fc','feature','file',
+        'filename','find_method','find_method_qualified','finish','first','flat',
+        'flatmap','flip','floor','flunk','flush','fmt','format','formatter',
+        'freeze','from','from-list','from-loop','from-posix','full',
+        'full-barrier','get','get_value','getc','gist','got','grab','grabpairs',
+        'grep','handle','handled','handles','hardware','has_accessor','Hash',
+        'head','headers','hh-mm-ss','hidden','hides','hour','how','hyper','id',
+        'illegal','im','in','indent','index','indices','indir','infinite',
+        'infix','infix:<+>','infix:<->','install_method_cache','Instant',
+        'instead','Int','int-bounds','interval','in-timezone','invalid-str',
+        'invert','invocant','IO','IO::Notification.watch-path','is_trusted',
+        'is_type','isa','is-absolute','isa-ok','is-approx','is-deeply',
+        'is-hidden','is-initial-thread','is-int','is-lazy','is-leap-year',
+        'isNaN','isnt','is-prime','is-relative','is-routine','is-setting',
+        'is-win','item','iterator','join','keep','kept','KERNELnames','key',
+        'keyof','keys','kill','kv','kxxv','l','lang','last','lastcall','later',
+        'lazy','lc','leading','level','like','line','lines','link','List',
+        'listen','live','lives-ok','local','lock','log','log10','lookup','lsb',
+        'made','MAIN','make','Map','match','max','maxpairs','merge','message',
+        'method','method_table','methods','migrate','min','minmax','minpairs',
+        'minute','misplaced','Mix','MixHash','mkdir','mode','modified','month',
+        'move','mro','msb','multi','multiness','my','name','named','named_names',
+        'narrow','nativecast','native-descriptor','nativesizeof','new','new_type',
+        'new-from-daycount','new-from-pairs','next','nextcallee','next-handle',
+        'nextsame','nextwith','NFC','NFD','NFKC','NFKD','nl-in','nl-out',
+        'nodemap','nok','none','norm','not','note','now','nude','Num',
+        'numerator','Numeric','of','offset','offset-in-hours','offset-in-minutes',
+        'ok','old','on-close','one','on-switch','open','opened','operation',
+        'optional','ord','ords','orig','os-error','osname','out-buffer','pack',
+        'package','package-kind','package-name','packages','pair','pairs',
+        'pairup','parameter','params','parent','parent-name','parents','parse',
+        'parse-base','parsefile','parse-names','parts','pass','path','path-sep',
+        'payload','peer-host','peer-port','periods','perl','permutations','phaser',
+        'pick','pickpairs','pid','placeholder','plan','plus','polar','poll',
+        'polymod','pop','pos','positional','posix','postfix','postmatch',
+        'precomp-ext','precomp-target','pred','prefix','prematch','prepend',
+        'print','printf','print-nl','print-to','private','private_method_table',
+        'proc','produce','Promise','prompt','protect','pull-one','push',
+        'push-all','push-at-least','push-exactly','push-until-lazy','put',
+        'qualifier-type','quit','r','race','radix','rand','range','Rat','raw',
+        're','read','readchars','readonly','ready','Real','reallocate','reals',
+        'reason','rebless','receive','recv','redispatcher','redo','reduce',
+        'rel2abs','relative','release','rename','repeated','replacement',
+        'report','reserved','resolve','restore','result','resume','rethrow',
+        'reverse','right','rindex','rmdir','role','roles_to_compose','rolish',
+        'roll','rootdir','roots','rotate','rotor','round','roundrobin',
+        'routine-type','run','rwx','s','samecase','samemark','samewith','say',
+        'schedule-on','scheduler','scope','sec','sech','second','seek','self',
+        'send','Set','set_hidden','set_name','set_package','set_rw','set_value',
+        'SetHash','set-instruments','setup_finalization','shape','share','shell',
+        'shift','sibling','sigil','sign','signal','signals','signature','sin',
+        'sinh','sink','sink-all','skip','skip-at-least','skip-at-least-pull-one',
+        'skip-one','skip-rest','sleep','sleep-timer','sleep-until','Slip','slurp',
+        'slurp-rest','slurpy','snap','snapper','so','socket-host','socket-port',
+        'sort','source','source-package','spawn','SPEC','splice','split',
+        'splitdir','splitpath','sprintf','spurt','sqrt','squish','srand','stable',
+        'start','started','starts-with','status','stderr','stdout','Str',
+        'sub_signature','subbuf','subbuf-rw','subname','subparse','subst',
+        'subst-mutate','substr','substr-eq','substr-rw','subtest','succ','sum',
+        'Supply','symlink','t','tail','take','take-rw','tan','tanh','tap',
+        'target','target-name','tc','tclc','tell','then','throttle','throw',
+        'throws-like','timezone','tmpdir','to','today','todo','toggle','to-posix',
+        'total','trailing','trans','tree','trim','trim-leading','trim-trailing',
+        'truncate','truncated-to','trusts','try_acquire','trying','twigil','type',
+        'type_captures','typename','uc','udp','uncaught_handler','unimatch',
+        'uniname','uninames','uniparse','uniprop','uniprops','unique','unival',
+        'univals','unlike','unlink','unlock','unpack','unpolar','unshift',
+        'unwrap','updir','USAGE','use-ok','utc','val','value','values','VAR',
+        'variable','verbose-config','version','VMnames','volume','vow','w','wait',
+        'warn','watch','watch-path','week','weekday-of-month','week-number',
+        'week-year','WHAT','when','WHERE','WHEREFORE','WHICH','WHO',
+        'whole-second','WHY','wordcase','words','workaround','wrap','write',
+        'write-to','x','yada','year','yield','yyyy-mm-dd','z','zip','zip-latest',
+
+    )
+
+    PERL6_BUILTIN_CLASSES = (
+        #Booleans
+        'False','True',
+        #Classes
+        'Any','Array','Associative','AST','atomicint','Attribute','Backtrace',
+        'Backtrace::Frame','Bag','Baggy','BagHash','Blob','Block','Bool','Buf',
+        'Callable','CallFrame','Cancellation','Capture','CArray','Channel','Code',
+        'compiler','Complex','ComplexStr','Cool','CurrentThreadScheduler',
+        'Cursor','Date','Dateish','DateTime','Distro','Duration','Encoding',
+        'Exception','Failure','FatRat','Grammar','Hash','HyperWhatever','Instant',
+        'Int','int16','int32','int64','int8','IntStr','IO','IO::ArgFiles',
+        'IO::CatHandle','IO::Handle','IO::Notification','IO::Path',
+        'IO::Path::Cygwin','IO::Path::QNX','IO::Path::Unix','IO::Path::Win32',
+        'IO::Pipe','IO::Socket','IO::Socket::Async','IO::Socket::INET','IO::Spec',
+        'IO::Spec::Cygwin','IO::Spec::QNX','IO::Spec::Unix','IO::Spec::Win32',
+        'IO::Special','Iterable','Iterator','Junction','Kernel','Label','List',
+        'Lock','Lock::Async','long','longlong','Macro','Map','Match',
+        'Metamodel::AttributeContainer','Metamodel::C3MRO','Metamodel::ClassHOW',
+        'Metamodel::EnumHOW','Metamodel::Finalization','Metamodel::MethodContainer',
+        'Metamodel::MROBasedMethodDispatch','Metamodel::MultipleInheritance',
+        'Metamodel::Naming','Metamodel::Primitives','Metamodel::PrivateMethodContainer',
+        'Metamodel::RoleContainer','Metamodel::Trusting','Method','Mix','MixHash',
+        'Mixy','Mu','NFC','NFD','NFKC','NFKD','Nil','Num','num32','num64',
+        'Numeric','NumStr','ObjAt','Order','Pair','Parameter','Perl','Pod::Block',
+        'Pod::Block::Code','Pod::Block::Comment','Pod::Block::Declarator',
+        'Pod::Block::Named','Pod::Block::Para','Pod::Block::Table','Pod::Heading',
+        'Pod::Item','Pointer','Positional','PositionalBindFailover','Proc',
+        'Proc::Async','Promise','Proxy','PseudoStash','QuantHash','Range','Rat',
+        'Rational','RatStr','Real','Regex','Routine','Scalar','Scheduler',
+        'Semaphore','Seq','Set','SetHash','Setty','Signature','size_t','Slip',
+        'Stash','Str','StrDistance','Stringy','Sub','Submethod','Supplier',
+        'Supplier::Preserving','Supply','Systemic','Tap','Telemetry',
+        'Telemetry::Instrument::Thread','Telemetry::Instrument::Usage',
+        'Telemetry::Period','Telemetry::Sampler','Thread','ThreadPoolScheduler',
+        'UInt','uint16','uint32','uint64','uint8','Uni','utf8','Variable',
+        'Version','VM','Whatever','WhateverCode','WrapHandle'
+    )
+
+    PERL6_OPERATORS = (
+        'X', 'Z', 'after', 'also', 'and', 'andthen', 'before', 'cmp', 'div',
+        'eq', 'eqv', 'extra', 'ff', 'fff', 'ge', 'gt', 'le', 'leg', 'lt', 'm',
+        'mm', 'mod', 'ne', 'or', 'orelse', 'rx', 's', 'tr', 'x', 'xor', 'xx',
+        '++', '--', '**', '!', '+', '-', '~', '?', '|', '||', '+^', '~^', '?^',
+        '^', '*', '/', '%', '%%', '+&', '+<', '+>', '~&', '~<', '~>', '?&',
+        'gcd', 'lcm', '+', '-', '+|', '+^', '~|', '~^', '?|', '?^',
+        '~', '&', '^', 'but', 'does', '<=>', '..', '..^', '^..', '^..^',
+        '!=', '==', '<', '<=', '>', '>=', '~~', '===', '!eqv',
+        '&&', '||', '^^', '//', 'min', 'max', '??', '!!', 'ff', 'fff', 'so',
+        'not', '<==', '==>', '<<==', '==>>','unicmp',
+    )
+
+    # Perl 6 has a *lot* of possible bracketing characters
+    # this list was lifted from STD.pm6 (https://github.com/perl6/std)
+    PERL6_BRACKETS = {
+        '\u0028': '\u0029', '\u003c': '\u003e', '\u005b': '\u005d',
+        '\u007b': '\u007d', '\u00ab': '\u00bb', '\u0f3a': '\u0f3b',
+        '\u0f3c': '\u0f3d', '\u169b': '\u169c', '\u2018': '\u2019',
+        '\u201a': '\u2019', '\u201b': '\u2019', '\u201c': '\u201d',
+        '\u201e': '\u201d', '\u201f': '\u201d', '\u2039': '\u203a',
+        '\u2045': '\u2046', '\u207d': '\u207e', '\u208d': '\u208e',
+        '\u2208': '\u220b', '\u2209': '\u220c', '\u220a': '\u220d',
+        '\u2215': '\u29f5', '\u223c': '\u223d', '\u2243': '\u22cd',
+        '\u2252': '\u2253', '\u2254': '\u2255', '\u2264': '\u2265',
+        '\u2266': '\u2267', '\u2268': '\u2269', '\u226a': '\u226b',
+        '\u226e': '\u226f', '\u2270': '\u2271', '\u2272': '\u2273',
+        '\u2274': '\u2275', '\u2276': '\u2277', '\u2278': '\u2279',
+        '\u227a': '\u227b', '\u227c': '\u227d', '\u227e': '\u227f',
+        '\u2280': '\u2281', '\u2282': '\u2283', '\u2284': '\u2285',
+        '\u2286': '\u2287', '\u2288': '\u2289', '\u228a': '\u228b',
+        '\u228f': '\u2290', '\u2291': '\u2292', '\u2298': '\u29b8',
+        '\u22a2': '\u22a3', '\u22a6': '\u2ade', '\u22a8': '\u2ae4',
+        '\u22a9': '\u2ae3', '\u22ab': '\u2ae5', '\u22b0': '\u22b1',
+        '\u22b2': '\u22b3', '\u22b4': '\u22b5', '\u22b6': '\u22b7',
+        '\u22c9': '\u22ca', '\u22cb': '\u22cc', '\u22d0': '\u22d1',
+        '\u22d6': '\u22d7', '\u22d8': '\u22d9', '\u22da': '\u22db',
+        '\u22dc': '\u22dd', '\u22de': '\u22df', '\u22e0': '\u22e1',
+        '\u22e2': '\u22e3', '\u22e4': '\u22e5', '\u22e6': '\u22e7',
+        '\u22e8': '\u22e9', '\u22ea': '\u22eb', '\u22ec': '\u22ed',
+        '\u22f0': '\u22f1', '\u22f2': '\u22fa', '\u22f3': '\u22fb',
+        '\u22f4': '\u22fc', '\u22f6': '\u22fd', '\u22f7': '\u22fe',
+        '\u2308': '\u2309', '\u230a': '\u230b', '\u2329': '\u232a',
+        '\u23b4': '\u23b5', '\u2768': '\u2769', '\u276a': '\u276b',
+        '\u276c': '\u276d', '\u276e': '\u276f', '\u2770': '\u2771',
+        '\u2772': '\u2773', '\u2774': '\u2775', '\u27c3': '\u27c4',
+        '\u27c5': '\u27c6', '\u27d5': '\u27d6', '\u27dd': '\u27de',
+        '\u27e2': '\u27e3', '\u27e4': '\u27e5', '\u27e6': '\u27e7',
+        '\u27e8': '\u27e9', '\u27ea': '\u27eb', '\u2983': '\u2984',
+        '\u2985': '\u2986', '\u2987': '\u2988', '\u2989': '\u298a',
+        '\u298b': '\u298c', '\u298d': '\u298e', '\u298f': '\u2990',
+        '\u2991': '\u2992', '\u2993': '\u2994', '\u2995': '\u2996',
+        '\u2997': '\u2998', '\u29c0': '\u29c1', '\u29c4': '\u29c5',
+        '\u29cf': '\u29d0', '\u29d1': '\u29d2', '\u29d4': '\u29d5',
+        '\u29d8': '\u29d9', '\u29da': '\u29db', '\u29f8': '\u29f9',
+        '\u29fc': '\u29fd', '\u2a2b': '\u2a2c', '\u2a2d': '\u2a2e',
+        '\u2a34': '\u2a35', '\u2a3c': '\u2a3d', '\u2a64': '\u2a65',
+        '\u2a79': '\u2a7a', '\u2a7d': '\u2a7e', '\u2a7f': '\u2a80',
+        '\u2a81': '\u2a82', '\u2a83': '\u2a84', '\u2a8b': '\u2a8c',
+        '\u2a91': '\u2a92', '\u2a93': '\u2a94', '\u2a95': '\u2a96',
+        '\u2a97': '\u2a98', '\u2a99': '\u2a9a', '\u2a9b': '\u2a9c',
+        '\u2aa1': '\u2aa2', '\u2aa6': '\u2aa7', '\u2aa8': '\u2aa9',
+        '\u2aaa': '\u2aab', '\u2aac': '\u2aad', '\u2aaf': '\u2ab0',
+        '\u2ab3': '\u2ab4', '\u2abb': '\u2abc', '\u2abd': '\u2abe',
+        '\u2abf': '\u2ac0', '\u2ac1': '\u2ac2', '\u2ac3': '\u2ac4',
+        '\u2ac5': '\u2ac6', '\u2acd': '\u2ace', '\u2acf': '\u2ad0',
+        '\u2ad1': '\u2ad2', '\u2ad3': '\u2ad4', '\u2ad5': '\u2ad6',
+        '\u2aec': '\u2aed', '\u2af7': '\u2af8', '\u2af9': '\u2afa',
+        '\u2e02': '\u2e03', '\u2e04': '\u2e05', '\u2e09': '\u2e0a',
+        '\u2e0c': '\u2e0d', '\u2e1c': '\u2e1d', '\u2e20': '\u2e21',
+        '\u3008': '\u3009', '\u300a': '\u300b', '\u300c': '\u300d',
+        '\u300e': '\u300f', '\u3010': '\u3011', '\u3014': '\u3015',
+        '\u3016': '\u3017', '\u3018': '\u3019', '\u301a': '\u301b',
+        '\u301d': '\u301e', '\ufd3e': '\ufd3f', '\ufe17': '\ufe18',
+        '\ufe35': '\ufe36', '\ufe37': '\ufe38', '\ufe39': '\ufe3a',
+        '\ufe3b': '\ufe3c', '\ufe3d': '\ufe3e', '\ufe3f': '\ufe40',
+        '\ufe41': '\ufe42', '\ufe43': '\ufe44', '\ufe47': '\ufe48',
+        '\ufe59': '\ufe5a', '\ufe5b': '\ufe5c', '\ufe5d': '\ufe5e',
+        '\uff08': '\uff09', '\uff1c': '\uff1e', '\uff3b': '\uff3d',
+        '\uff5b': '\uff5d', '\uff5f': '\uff60', '\uff62': '\uff63',
+    }
+
+    def _build_word_match(words, boundary_regex_fragment=None, prefix='', suffix=''):
+        if boundary_regex_fragment is None:
+            return r'\b(' + prefix + r'|'.join(re.escape(x) for x in words) + \
+                suffix + r')\b'
+        else:
+            return r'(? 0:
+                    next_open_pos = text.find(opening_chars, search_pos + n_chars)
+                    next_close_pos = text.find(closing_chars, search_pos + n_chars)
+
+                    if next_close_pos == -1:
+                        next_close_pos = len(text)
+                        nesting_level = 0
+                    elif next_open_pos != -1 and next_open_pos < next_close_pos:
+                        nesting_level += 1
+                        search_pos = next_open_pos
+                    else:  # next_close_pos < next_open_pos
+                        nesting_level -= 1
+                        search_pos = next_close_pos
+
+                end_pos = next_close_pos
+
+            if end_pos < 0:     # if we didn't find a closer, just highlight the
+                                # rest of the text in this class
+                end_pos = len(text)
+
+            if adverbs is not None and re.search(r':to\b', adverbs):
+                heredoc_terminator = text[match.start('delimiter') + n_chars:end_pos]
+                end_heredoc = re.search(r'^\s*' + re.escape(heredoc_terminator) +
+                                        r'\s*$', text[end_pos:], re.MULTILINE)
+
+                if end_heredoc:
+                    end_pos += end_heredoc.end()
+                else:
+                    end_pos = len(text)
+
+            yield match.start(), token_class, text[match.start():end_pos + n_chars]
+            context.pos = end_pos + n_chars
+
+        return callback
+
+    def opening_brace_callback(lexer, match, context):
+        stack = context.stack
+
+        yield match.start(), Text, context.text[match.start():match.end()]
+        context.pos = match.end()
+
+        # if we encounter an opening brace and we're one level
+        # below a token state, it means we need to increment
+        # the nesting level for braces so we know later when
+        # we should return to the token rules.
+        if len(stack) > 2 and stack[-2] == 'token':
+            context.perl6_token_nesting_level += 1
+
+    def closing_brace_callback(lexer, match, context):
+        stack = context.stack
+
+        yield match.start(), Text, context.text[match.start():match.end()]
+        context.pos = match.end()
+
+        # if we encounter a free closing brace and we're one level
+        # below a token state, it means we need to check the nesting
+        # level to see if we need to return to the token state.
+        if len(stack) > 2 and stack[-2] == 'token':
+            context.perl6_token_nesting_level -= 1
+            if context.perl6_token_nesting_level == 0:
+                stack.pop()
+
+    def embedded_perl6_callback(lexer, match, context):
+        context.perl6_token_nesting_level = 1
+        yield match.start(), Text, context.text[match.start():match.end()]
+        context.pos = match.end()
+        context.stack.append('root')
+
+    # If you're modifying these rules, be careful if you need to process '{' or '}'
+    # characters. We have special logic for processing these characters (due to the fact
+    # that you can nest Perl 6 code in regex blocks), so if you need to process one of
+    # them, make sure you also process the corresponding one!
+    tokens = {
+        'common': [
+            (r'#[`|=](?P(?P[' + ''.join(PERL6_BRACKETS) + r'])(?P=first_char)*)',
+             brackets_callback(Comment.Multiline)),
+            (r'#[^\n]*$', Comment.Single),
+            (r'^(\s*)=begin\s+(\w+)\b.*?^\1=end\s+\2', Comment.Multiline),
+            (r'^(\s*)=for.*?\n\s*?\n', Comment.Multiline),
+            (r'^=.*?\n\s*?\n', Comment.Multiline),
+            (r'(regex|token|rule)(\s*' + PERL6_IDENTIFIER_RANGE + '+:sym)',
+             bygroups(Keyword, Name), 'token-sym-brackets'),
+            (r'(regex|token|rule)(?!' + PERL6_IDENTIFIER_RANGE + r')(\s*' + PERL6_IDENTIFIER_RANGE + '+)?',
+             bygroups(Keyword, Name), 'pre-token'),
+            # deal with a special case in the Perl 6 grammar (role q { ... })
+            (r'(role)(\s+)(q)(\s*)', bygroups(Keyword, Whitespace, Name, Whitespace)),
+            (_build_word_match(PERL6_KEYWORDS, PERL6_IDENTIFIER_RANGE), Keyword),
+            (_build_word_match(PERL6_BUILTIN_CLASSES, PERL6_IDENTIFIER_RANGE, suffix='(?::[UD])?'),
+             Name.Builtin),
+            (_build_word_match(PERL6_BUILTINS, PERL6_IDENTIFIER_RANGE), Name.Builtin),
+            # copied from PerlLexer
+            (r'[$@%&][.^:?=!~]?' + PERL6_IDENTIFIER_RANGE + '+(?:<<.*?>>|<.*?>|«.*?»)*',
+             Name.Variable),
+            (r'\$[!/](?:<<.*?>>|<.*?>|«.*?»)*', Name.Variable.Global),
+            (r'::\?\w+', Name.Variable.Global),
+            (r'[$@%&]\*' + PERL6_IDENTIFIER_RANGE + '+(?:<<.*?>>|<.*?>|«.*?»)*',
+             Name.Variable.Global),
+            (r'\$(?:<.*?>)+', Name.Variable),
+            (r'(?:q|qq|Q)[a-zA-Z]?\s*(?P:[\w\s:]+)?\s*(?P(?P[^0-9a-zA-Z:\s])'
+             r'(?P=first_char)*)', brackets_callback(String)),
+            # copied from PerlLexer
+            (r'0_?[0-7]+(_[0-7]+)*', Number.Oct),
+            (r'0x[0-9A-Fa-f]+(_[0-9A-Fa-f]+)*', Number.Hex),
+            (r'0b[01]+(_[01]+)*', Number.Bin),
+            (r'(?i)(\d*(_\d*)*\.\d+(_\d*)*|\d+(_\d*)*\.\d+(_\d*)*)(e[+-]?\d+)?',
+             Number.Float),
+            (r'(?i)\d+(_\d*)*e[+-]?\d+(_\d*)*', Number.Float),
+            (r'\d+(_\d+)*', Number.Integer),
+            (r'(?<=~~)\s*/(?:\\\\|\\/|.)*?/', String.Regex),
+            (r'(?<=[=(,])\s*/(?:\\\\|\\/|.)*?/', String.Regex),
+            (r'm\w+(?=\()', Name),
+            (r'(?:m|ms|rx)\s*(?P:[\w\s:]+)?\s*(?P(?P[^\w:\s])'
+             r'(?P=first_char)*)', brackets_callback(String.Regex)),
+            (r'(?:s|ss|tr)\s*(?::[\w\s:]+)?\s*/(?:\\\\|\\/|.)*?/(?:\\\\|\\/|.)*?/',
+             String.Regex),
+            (r'<[^\s=].*?\S>', String),
+            (_build_word_match(PERL6_OPERATORS), Operator),
+            (r'\w' + PERL6_IDENTIFIER_RANGE + '*', Name),
+            (r"'(\\\\|\\[^\\]|[^'\\])*'", String),
+            (r'"(\\\\|\\[^\\]|[^"\\])*"', String),
+        ],
+        'root': [
+            include('common'),
+            (r'\{', opening_brace_callback),
+            (r'\}', closing_brace_callback),
+            (r'.+?', Text),
+        ],
+        'pre-token': [
+            include('common'),
+            (r'\{', Text, ('#pop', 'token')),
+            (r'.+?', Text),
+        ],
+        'token-sym-brackets': [
+            (r'(?P(?P[' + ''.join(PERL6_BRACKETS) + '])(?P=first_char)*)',
+             brackets_callback(Name), ('#pop', 'pre-token')),
+            default(('#pop', 'pre-token')),
+        ],
+        'token': [
+            (r'\}', Text, '#pop'),
+            (r'(?<=:)(?:my|our|state|constant|temp|let).*?;', using(this)),
+            # make sure that quotes in character classes aren't treated as strings
+            (r'<(?:[-!?+.]\s*)?\[.*?\]>', String.Regex),
+            # make sure that '#' characters in quotes aren't treated as comments
+            (r"(?my|our)\s+)?(?:module|class|role|enum|grammar)', line)
+            if class_decl:
+                if saw_perl_decl or class_decl.group('scope') is not None:
+                    return True
+                rating = 0.05
+                continue
+            break
+
+        if ':=' in text:
+            # Same logic as above for PerlLexer
+            rating /= 2
+
+        return rating
+
+    def __init__(self, **options):
+        super().__init__(**options)
+        self.encoding = options.get('encoding', 'utf-8')
diff --git a/local_packages/pygments/lexers/phix.py b/local_packages/pygments/lexers/phix.py
new file mode 100644
index 0000000..c4cb35d
--- /dev/null
+++ b/local_packages/pygments/lexers/phix.py
@@ -0,0 +1,363 @@
+"""
+    pygments.lexers.phix
+    ~~~~~~~~~~~~~~~~~~~~
+
+    Lexers for Phix.
+
+    :copyright: Copyright 2006-present by the Pygments team, see AUTHORS.
+    :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import RegexLexer, words
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+    Whitespace
+
+__all__ = ['PhixLexer']
+
+
+class PhixLexer(RegexLexer):
+    """
+    Pygments Lexer for Phix files (.exw).
+    See http://phix.x10.mx
+    """
+
+    name = 'Phix'
+    url = 'http://phix.x10.mx'
+    aliases = ['phix']
+    filenames = ['*.exw']
+    mimetypes = ['text/x-phix']
+    version_added = '2.14'
+
+    flags = re.MULTILINE    # nb: **NOT** re.DOTALL! (totally spanners comment handling)
+
+    preproc = (
+        'ifdef', 'elsifdef', 'elsedef'
+    )
+    # Note these lists are auto-generated by pwa/p2js.exw, when pwa\src\p2js_keywords.e (etc)
+    #     change, though of course subsequent copy/commit/pull requests are all manual steps.
+    types = (
+        'string', 'nullable_string', 'atom_string', 'atom', 'bool', 'boolean',
+        'cdCanvan', 'cdCanvas', 'complex', 'CURLcode', 'dictionary', 'int',
+        'integer', 'Ihandle', 'Ihandles', 'Ihandln', 'mpfr', 'mpq', 'mpz',
+        'mpz_or_string', 'number', 'rid_string', 'seq', 'sequence', 'timedate',
+        'object'
+    )
+    keywords = (
+        'abstract', 'class', 'continue', 'export', 'extends', 'nullable',
+        'private', 'public', 'static', 'struct', 'trace',
+        'and', 'break', 'by', 'case', 'catch', 'const', 'constant', 'debug',
+        'default', 'do', 'else', 'elsif', 'end', 'enum', 'exit', 'fallthru',
+        'fallthrough', 'for', 'forward', 'function', 'global', 'if', 'in',
+        'include', 'js', 'javascript', 'javascript_semantics', 'let', 'not',
+        'or', 'procedure', 'profile', 'profile_time', 'return', 'safe_mode',
+        'switch', 'then', 'to', 'try', 'type', 'type_check', 'until', 'warning',
+        'while', 'with', 'without', 'xor'
+    )
+    routines = (
+        'abort', 'abs', 'adjust_timedate', 'and_bits', 'and_bitsu', 'apply',
+        'append', 'arccos', 'arcsin', 'arctan', 'assert', 'atan2',
+        'atom_to_float32', 'atom_to_float64', 'bankers_rounding', 'beep',
+        'begins', 'binary_search', 'bits_to_int', 'bk_color', 'bytes_to_int',
+        'call_func', 'call_proc', 'cdCanvasActivate', 'cdCanvasArc',
+        'cdCanvasBegin', 'cdCanvasBox', 'cdCanvasChord', 'cdCanvasCircle',
+        'cdCanvasClear', 'cdCanvasEnd', 'cdCanvasFlush', 'cdCanvasFont',
+        'cdCanvasGetImageRGB', 'cdCanvasGetSize', 'cdCanvasGetTextAlignment',
+        'cdCanvasGetTextSize', 'cdCanvasLine', 'cdCanvasMark',
+        'cdCanvasMarkSize', 'cdCanvasMultiLineVectorText', 'cdCanvasPixel',
+        'cdCanvasRect', 'cdCanvasRoundedBox', 'cdCanvasRoundedRect',
+        'cdCanvasSector', 'cdCanvasSetAttribute', 'cdCanvasSetBackground',
+        'cdCanvasSetFillMode', 'cdCanvasSetForeground',
+        'cdCanvasSetInteriorStyle', 'cdCanvasSetLineStyle',
+        'cdCanvasSetLineWidth', 'cdCanvasSetTextAlignment', 'cdCanvasText',
+        'cdCanvasSetTextOrientation', 'cdCanvasGetTextOrientation',
+        'cdCanvasVectorText', 'cdCanvasVectorTextDirection',
+        'cdCanvasVectorTextSize', 'cdCanvasVertex', 'cdCreateCanvas',
+        'cdDecodeAlpha', 'cdDecodeColor', 'cdDecodeColorAlpha', 'cdEncodeAlpha',
+        'cdEncodeColor', 'cdEncodeColorAlpha', 'cdKillCanvas', 'cdVersion',
+        'cdVersionDate', 'ceil', 'change_timezone', 'choose', 'clear_screen',
+        'columnize', 'command_line', 'compare', 'complex_abs', 'complex_add',
+        'complex_arg', 'complex_conjugate', 'complex_cos', 'complex_cosh',
+        'complex_div', 'complex_exp', 'complex_imag', 'complex_inv',
+        'complex_log', 'complex_mul', 'complex_neg', 'complex_new',
+        'complex_norm', 'complex_power', 'complex_rho', 'complex_real',
+        'complex_round', 'complex_sin', 'complex_sinh', 'complex_sprint',
+        'complex_sqrt', 'complex_sub', 'complex_theta', 'concat', 'cos',
+        'crash', 'custom_sort', 'date', 'day_of_week', 'day_of_year',
+        'days_in_month', 'decode_base64', 'decode_flags', 'deep_copy', 'deld',
+        'deserialize', 'destroy_dict', 'destroy_queue', 'destroy_stack',
+        'dict_name', 'dict_size', 'elapsed', 'elapsed_short', 'encode_base64',
+        'equal', 'even', 'exp', 'extract', 'factorial', 'factors',
+        'file_size_k', 'find', 'find_all', 'find_any', 'find_replace', 'filter',
+        'flatten', 'float32_to_atom', 'float64_to_atom', 'floor',
+        'format_timedate', 'free_console', 'from_polar', 'gcd', 'get_file_base',
+        'get_file_extension', 'get_file_name', 'get_file_name_and_path',
+        'get_file_path', 'get_file_path_and_name', 'get_maxprime', 'get_prime',
+        'get_primes', 'get_primes_le', 'get_proper_dir', 'get_proper_path',
+        'get_rand', 'get_routine_info', 'get_test_abort', 'get_test_logfile',
+        'get_test_pause', 'get_test_verbosity', 'get_tzid', 'getd', 'getdd',
+        'getd_all_keys', 'getd_by_index', 'getd_index', 'getd_partial_key',
+        'glAttachShader', 'glBindBuffer', 'glBindTexture', 'glBufferData',
+        'glCanvasSpecialText', 'glClear', 'glClearColor', 'glColor',
+        'glCompileShader', 'glCreateBuffer', 'glCreateProgram',
+        'glCreateShader', 'glCreateTexture', 'glDeleteProgram',
+        'glDeleteShader', 'glDrawArrays', 'glEnable',
+        'glEnableVertexAttribArray', 'glFloat32Array', 'glInt32Array',
+        'glFlush', 'glGetAttribLocation', 'glGetError', 'glGetProgramInfoLog',
+        'glGetProgramParameter', 'glGetShaderInfoLog', 'glGetShaderParameter',
+        'glGetUniformLocation', 'glLinkProgram', 'glLoadIdentity',
+        'glMatrixMode', 'glOrtho', 'glRotatef', 'glShadeModel',
+        'glShaderSource', 'glSimpleA7texcoords', 'glTexImage2Dc',
+        'glTexParameteri', 'glTranslate', 'glUniform1f', 'glUniform1i',
+        'glUniformMatrix4fv', 'glUseProgram', 'glVertex',
+        'glVertexAttribPointer', 'glViewport', 'head', 'hsv_to_rgb', 'iff',
+        'iif', 'include_file', 'incl0de_file', 'insert', 'instance',
+        'int_to_bits', 'int_to_bytes', 'is_dict', 'is_integer', 's_leap_year',
+        'is_prime', 'is_prime2', 'islower', 'isupper', 'Icallback',
+        'iup_isdouble', 'iup_isprint', 'iup_XkeyBase', 'IupAppend', 'IupAlarm',
+        'IupBackgroundBox', 'IupButton', 'IupCalendar', 'IupCanvas',
+        'IupClipboard', 'IupClose', 'IupCloseOnEscape', 'IupControlsOpen',
+        'IupDatePick', 'IupDestroy', 'IupDialog', 'IupDrawArc', 'IupDrawBegin',
+        'IupDrawEnd', 'IupDrawGetSize', 'IupDrawGetTextSize', 'IupDrawLine',
+        'IupDrawRectangle', 'IupDrawText', 'IupExpander', 'IupFill',
+        'IupFlatLabel', 'IupFlatList', 'IupFlatTree', 'IupFlush', 'IupFrame',
+        'IupGetAttribute', 'IupGetAttributeId', 'IupGetAttributePtr',
+        'IupGetBrother', 'IupGetChild', 'IupGetChildCount', 'IupGetClassName',
+        'IupGetDialog', 'IupGetDialogChild', 'IupGetDouble', 'IupGetFocus',
+        'IupGetGlobal', 'IupGetGlobalInt', 'IupGetGlobalIntInt', 'IupGetInt',
+        'IupGetInt2', 'IupGetIntId', 'IupGetIntInt', 'IupGetParent',
+        'IupGLCanvas', 'IupGLCanvasOpen', 'IupGLMakeCurrent', 'IupGraph',
+        'IupHbox', 'IupHide', 'IupImage', 'IupImageRGBA', 'IupItem',
+        'iupKeyCodeToName', 'IupLabel', 'IupLink', 'IupList', 'IupMap',
+        'IupMenu', 'IupMenuItem', 'IupMessage', 'IupMessageDlg', 'IupMultiBox',
+        'IupMultiLine', 'IupNextField', 'IupNormaliser', 'IupOpen',
+        'IupPlayInput', 'IupPopup', 'IupPreviousField', 'IupProgressBar',
+        'IupRadio', 'IupRecordInput', 'IupRedraw', 'IupRefresh',
+        'IupRefreshChildren', 'IupSeparator', 'IupSetAttribute',
+        'IupSetAttributes', 'IupSetAttributeHandle', 'IupSetAttributeId',
+        'IupSetAttributePtr', 'IupSetCallback', 'IupSetCallbacks',
+        'IupSetDouble', 'IupSetFocus', 'IupSetGlobal', 'IupSetGlobalInt',
+        'IupSetGlobalFunction', 'IupSetHandle', 'IupSetInt',
+        'IupSetStrAttribute', 'IupSetStrGlobal', 'IupShow', 'IupShowXY',
+        'IupSplit', 'IupStoreAttribute', 'IupSubmenu', 'IupTable',
+        'IupTableClearSelected', 'IupTableClick_cb', 'IupTableGetSelected',
+        'IupTableResize_cb', 'IupTableSetData', 'IupTabs', 'IupText',
+        'IupTimer', 'IupToggle', 'IupTreeAddNodes', 'IupTreeView', 'IupUpdate',
+        'IupValuator', 'IupVbox', 'join', 'join_by', 'join_path', 'k_perm',
+        'largest', 'lcm', 'length', 'log', 'log10', 'log2', 'lower',
+        'm4_crossProduct', 'm4_inverse', 'm4_lookAt', 'm4_multiply',
+        'm4_normalize', 'm4_perspective', 'm4_subtractVectors', 'm4_xRotate',
+        'm4_yRotate', 'machine_bits', 'machine_word', 'match', 'match_all',
+        'match_replace', 'max', 'maxsq', 'min', 'minsq', 'mod', 'mpfr_add',
+        'mpfr_ceil', 'mpfr_cmp', 'mpfr_cmp_si', 'mpfr_const_pi', 'mpfr_div',
+        'mpfr_div_si', 'mpfr_div_z', 'mpfr_floor', 'mpfr_free', 'mpfr_get_d',
+        'mpfr_get_default_precision', 'mpfr_get_default_rounding_mode',
+        'mpfr_get_fixed', 'mpfr_get_precision', 'mpfr_get_si', 'mpfr_init',
+        'mpfr_inits', 'mpfr_init_set', 'mpfr_init_set_q', 'mpfr_init_set_z',
+        'mpfr_mul', 'mpfr_mul_si', 'mpfr_pow_si', 'mpfr_set', 'mpfr_set_d',
+        'mpfr_set_default_precision', 'mpfr_set_default_rounding_mode',
+        'mpfr_set_precision', 'mpfr_set_q', 'mpfr_set_si', 'mpfr_set_str',
+        'mpfr_set_z', 'mpfr_si_div', 'mpfr_si_sub', 'mpfr_sqrt', 'mpfr_sub',
+        'mpfr_sub_si', 'mpq_abs', 'mpq_add', 'mpq_add_si', 'mpq_canonicalize',
+        'mpq_cmp', 'mpq_cmp_si', 'mpq_div', 'mpq_div_2exp', 'mpq_free',
+        'mpq_get_den', 'mpq_get_num', 'mpq_get_str', 'mpq_init', 'mpq_init_set',
+        'mpq_init_set_si', 'mpq_init_set_str', 'mpq_init_set_z', 'mpq_inits',
+        'mpq_inv', 'mpq_mul', 'mpq_neg', 'mpq_set', 'mpq_set_si', 'mpq_set_str',
+        'mpq_set_z', 'mpq_sub', 'mpz_abs', 'mpz_add', 'mpz_addmul',
+        'mpz_addmul_ui', 'mpz_addmul_si', 'mpz_add_si', 'mpz_add_ui', 'mpz_and',
+        'mpz_bin_uiui', 'mpz_cdiv_q', 'mpz_cmp', 'mpz_cmp_si', 'mpz_divexact',
+        'mpz_divexact_ui', 'mpz_divisible_p', 'mpz_divisible_ui_p', 'mpz_even',
+        'mpz_fac_ui', 'mpz_factorstring', 'mpz_fdiv_q', 'mpz_fdiv_q_2exp',
+        'mpz_fdiv_q_ui', 'mpz_fdiv_qr', 'mpz_fdiv_r', 'mpz_fdiv_ui',
+        'mpz_fib_ui', 'mpz_fib2_ui', 'mpz_fits_atom', 'mpz_fits_integer',
+        'mpz_free', 'mpz_gcd', 'mpz_gcd_ui', 'mpz_get_atom', 'mpz_get_integer',
+        'mpz_get_short_str', 'mpz_get_str', 'mpz_init', 'mpz_init_set',
+        'mpz_inits', 'mpz_invert', 'mpz_lcm', 'mpz_lcm_ui', 'mpz_max',
+        'mpz_min', 'mpz_mod', 'mpz_mod_ui', 'mpz_mul', 'mpz_mul_2exp',
+        'mpz_mul_d', 'mpz_mul_si', 'mpz_neg', 'mpz_nthroot', 'mpz_odd',
+        'mpz_pollard_rho', 'mpz_pow_ui', 'mpz_powm', 'mpz_powm_ui', 'mpz_prime',
+        'mpz_prime_factors', 'mpz_prime_mr', 'mpz_rand', 'mpz_rand_ui',
+        'mpz_re_compose', 'mpz_remove', 'mpz_scan0', 'mpz_scan1', 'mpz_set',
+        'mpz_set_d', 'mpz_set_si', 'mpz_set_str', 'mpz_set_v', 'mpz_sign',
+        'mpz_sizeinbase', 'mpz_sqrt', 'mpz_sub', 'mpz_sub_si', 'mpz_sub_ui',
+        'mpz_si_sub', 'mpz_tdiv_q_2exp', 'mpz_tdiv_r_2exp', 'mpz_tstbit',
+        'mpz_ui_pow_ui', 'mpz_xor', 'named_dict', 'new_dict', 'new_queue',
+        'new_stack', 'not_bits', 'not_bitsu', 'odd', 'or_all', 'or_allu',
+        'or_bits', 'or_bitsu', 'ord', 'ordinal', 'ordinant',
+        'override_timezone', 'pad', 'pad_head', 'pad_tail', 'parse_date_string',
+        'papply', 'peep', 'peepn', 'peep_dict', 'permute', 'permutes',
+        'platform', 'pop', 'popn', 'pop_dict', 'power', 'pp', 'ppEx', 'ppExf',
+        'ppf', 'ppOpt', 'pq_add', 'pq_destroy', 'pq_empty', 'pq_new', 'pq_peek',
+        'pq_pop', 'pq_pop_data', 'pq_size', 'prepend', 'prime_factors',
+        'printf', 'product', 'proper', 'push', 'pushn', 'putd', 'puts',
+        'queue_empty', 'queue_size', 'rand', 'rand_range', 'reinstate',
+        'remainder', 'remove', 'remove_all', 'repeat', 'repeatch', 'replace',
+        'requires', 'reverse', 'rfind', 'rgb', 'rmatch', 'rmdr', 'rnd', 'round',
+        'routine_id', 'scanf', 'serialize', 'series', 'set_rand',
+        'set_test_abort', 'set_test_logfile', 'set_test_module',
+        'set_test_pause', 'set_test_verbosity', 'set_timedate_formats',
+        'set_timezone', 'setd', 'setd_default', 'shorten', 'sha256',
+        'shift_bits', 'shuffle', 'sign', 'sin', 'smallest', 'sort',
+        'sort_columns', 'speak', 'splice', 'split', 'split_any', 'split_by',
+        'sprint', 'sprintf', 'sq_abs', 'sq_add', 'sq_and', 'sq_and_bits',
+        'sq_arccos', 'sq_arcsin', 'sq_arctan', 'sq_atom', 'sq_ceil', 'sq_cmp',
+        'sq_cos', 'sq_div', 'sq_even', 'sq_eq', 'sq_floor', 'sq_floor_div',
+        'sq_ge', 'sq_gt', 'sq_int', 'sq_le', 'sq_log', 'sq_log10', 'sq_log2',
+        'sq_lt', 'sq_max', 'sq_min', 'sq_mod', 'sq_mul', 'sq_ne', 'sq_not',
+        'sq_not_bits', 'sq_odd', 'sq_or', 'sq_or_bits', 'sq_power', 'sq_rand',
+        'sq_remainder', 'sq_rmdr', 'sq_rnd', 'sq_round', 'sq_seq', 'sq_sign',
+        'sq_sin', 'sq_sqrt', 'sq_str', 'sq_sub', 'sq_tan', 'sq_trunc',
+        'sq_uminus', 'sq_xor', 'sq_xor_bits', 'sqrt', 'square_free',
+        'stack_empty', 'stack_size', 'substitute', 'substitute_all', 'sum',
+        'tail', 'tan', 'test_equal', 'test_fail', 'test_false',
+        'test_not_equal', 'test_pass', 'test_summary', 'test_true',
+        'text_color', 'throw', 'time', 'timedate_diff', 'timedelta',
+        'to_integer', 'to_number', 'to_rgb', 'to_string', 'traverse_dict',
+        'traverse_dict_partial_key', 'trim', 'trim_head', 'trim_tail', 'trunc',
+        'tagset', 'tagstart', 'typeof', 'unique', 'unix_dict', 'upper',
+        'utf8_to_utf32', 'utf32_to_utf8', 'version', 'vlookup', 'vslice',
+        'wglGetProcAddress', 'wildcard_file', 'wildcard_match', 'with_rho',
+        'with_theta', 'xml_new_doc', 'xml_new_element', 'xml_set_attribute',
+        'xml_sprint', 'xor_bits', 'xor_bitsu',
+        'accept', 'allocate', 'allocate_string', 'allow_break', 'ARM',
+        'atom_to_float80', 'c_func', 'c_proc', 'call_back', 'chdir',
+        'check_break', 'clearDib', 'close', 'closesocket', 'console',
+        'copy_file', 'create', 'create_directory', 'create_thread',
+        'curl_easy_cleanup', 'curl_easy_get_file', 'curl_easy_init',
+        'curl_easy_perform', 'curl_easy_perform_ex', 'curl_easy_setopt',
+        'curl_easy_strerror', 'curl_global_cleanup', 'curl_global_init',
+        'curl_slist_append', 'curl_slist_free_all', 'current_dir', 'cursor',
+        'define_c_func', 'define_c_proc', 'delete', 'delete_cs', 'delete_file',
+        'dir', 'DLL', 'drawDib', 'drawShadedPolygonToDib', 'ELF32', 'ELF64',
+        'enter_cs', 'eval', 'exit_thread', 'free', 'file_exists', 'final',
+        'float80_to_atom', 'format', 'get_bytes', 'get_file_date',
+        'get_file_size', 'get_file_type', 'get_interpreter', 'get_key',
+        'get_socket_error', 'get_text', 'get_thread_exitcode', 'get_thread_id',
+        'getc', 'getenv', 'gets', 'getsockaddr', 'glBegin', 'glCallList',
+        'glFrustum', 'glGenLists', 'glGetString', 'glLight', 'glMaterial',
+        'glNewList', 'glNormal', 'glPopMatrix', 'glPushMatrix', 'glRotate',
+        'glEnd', 'glEndList', 'glTexImage2D', 'goto', 'GUI', 'icons', 'ilASM',
+        'include_files', 'include_paths', 'init_cs', 'ip_to_string',
+        'IupConfig', 'IupConfigDialogClosed', 'IupConfigDialogShow',
+        'IupConfigGetVariableInt', 'IupConfigLoad', 'IupConfigSave',
+        'IupConfigSetVariableInt', 'IupExitLoop', 'IupFileDlg', 'IupFileList',
+        'IupGLSwapBuffers', 'IupHelp', 'IupLoopStep', 'IupMainLoop',
+        'IupNormalizer', 'IupPlot', 'IupPlotAdd', 'IupPlotBegin', 'IupPlotEnd',
+        'IupPlotInsert', 'IupSaveImage', 'IupTreeGetUserId', 'IupUser',
+        'IupVersion', 'IupVersionDate', 'IupVersionNumber', 'IupVersionShow',
+        'killDib', 'leave_cs', 'listen', 'manifest', 'mem_copy', 'mem_set',
+        'mpfr_gamma', 'mpfr_printf', 'mpfr_sprintf', 'mpz_export', 'mpz_import',
+        'namespace', 'new', 'newDib', 'open', 'open_dll', 'PE32', 'PE64',
+        'peek', 'peek_string', 'peek1s', 'peek1u', 'peek2s', 'peek2u', 'peek4s',
+        'peek4u', 'peek8s', 'peek8u', 'peekNS', 'peekns', 'peeknu', 'poke',
+        'poke2', 'poke4', 'poke8', 'pokeN', 'poke_string', 'poke_wstring',
+        'position', 'progress', 'prompt_number', 'prompt_string', 'read_file',
+        'read_lines', 'recv', 'resume_thread', 'seek', 'select', 'send',
+        'setHandler', 'shutdown', 'sleep', 'SO', 'sockaddr_in', 'socket',
+        'split_path', 'suspend_thread', 'system', 'system_exec', 'system_open',
+        'system_wait', 'task_clock_start', 'task_clock_stop', 'task_create',
+        'task_delay', 'task_list', 'task_schedule', 'task_self', 'task_status',
+        'task_suspend', 'task_yield', 'thread_safe_string', 'try_cs',
+        'utf8_to_utf16', 'utf16_to_utf8', 'utf16_to_utf32', 'utf32_to_utf16',
+        'video_config', 'WSACleanup', 'wait_thread', 'walk_dir', 'where',
+        'write_lines', 'wait_key'
+    )
+    constants = (
+        'ANY_QUEUE', 'ASCENDING', 'BLACK', 'BLOCK_CURSOR', 'BLUE',
+        'BRIGHT_CYAN', 'BRIGHT_BLUE', 'BRIGHT_GREEN', 'BRIGHT_MAGENTA',
+        'BRIGHT_RED', 'BRIGHT_WHITE', 'BROWN', 'C_DWORD', 'C_INT', 'C_POINTER',
+        'C_USHORT', 'C_WORD', 'CD_AMBER', 'CD_BLACK', 'CD_BLUE', 'CD_BOLD',
+        'CD_BOLD_ITALIC', 'CD_BOX', 'CD_CENTER', 'CD_CIRCLE', 'CD_CLOSED_LINES',
+        'CD_CONTINUOUS', 'CD_CUSTOM', 'CD_CYAN', 'CD_DARK_BLUE', 'CD_DARK_CYAN',
+        'CD_DARK_GRAY', 'CD_DARK_GREY', 'CD_DARK_GREEN', 'CD_DARK_MAGENTA',
+        'CD_DARK_RED', 'CD_DARK_YELLOW', 'CD_DASH_DOT', 'CD_DASH_DOT_DOT',
+        'CD_DASHED', 'CD_DBUFFER', 'CD_DEG2RAD', 'CD_DIAMOND', 'CD_DOTTED',
+        'CD_EAST', 'CD_EVENODD', 'CD_FILL', 'CD_GL', 'CD_GRAY', 'CD_GREY',
+        'CD_GREEN', 'CD_HATCH', 'CD_HOLLOW', 'CD_HOLLOW_BOX',
+        'CD_HOLLOW_CIRCLE', 'CD_HOLLOW_DIAMOND', 'CD_INDIGO', 'CD_ITALIC',
+        'CD_IUP', 'CD_IUPDBUFFER', 'CD_LIGHT_BLUE', 'CD_LIGHT_GRAY',
+        'CD_LIGHT_GREY', 'CD_LIGHT_GREEN', 'CD_LIGHT_PARCHMENT', 'CD_MAGENTA',
+        'CD_NAVY', 'CD_NORTH', 'CD_NORTH_EAST', 'CD_NORTH_WEST', 'CD_OLIVE',
+        'CD_OPEN_LINES', 'CD_ORANGE', 'CD_PARCHMENT', 'CD_PATTERN',
+        'CD_PRINTER', 'CD_PURPLE', 'CD_PLAIN', 'CD_PLUS', 'CD_QUERY',
+        'CD_RAD2DEG', 'CD_RED', 'CD_SILVER', 'CD_SOLID', 'CD_SOUTH_EAST',
+        'CD_SOUTH_WEST', 'CD_STAR', 'CD_STIPPLE', 'CD_STRIKEOUT',
+        'CD_UNDERLINE', 'CD_WEST', 'CD_WHITE', 'CD_WINDING', 'CD_VIOLET',
+        'CD_X', 'CD_YELLOW', 'CURLE_OK', 'CURLOPT_MAIL_FROM',
+        'CURLOPT_MAIL_RCPT', 'CURLOPT_PASSWORD', 'CURLOPT_READDATA',
+        'CURLOPT_READFUNCTION', 'CURLOPT_SSL_VERIFYPEER',
+        'CURLOPT_SSL_VERIFYHOST', 'CURLOPT_UPLOAD', 'CURLOPT_URL',
+        'CURLOPT_USE_SSL', 'CURLOPT_USERNAME', 'CURLOPT_VERBOSE',
+        'CURLOPT_WRITEFUNCTION', 'CURLUSESSL_ALL', 'CYAN', 'D_NAME',
+        'D_ATTRIBUTES', 'D_SIZE', 'D_YEAR', 'D_MONTH', 'D_DAY', 'D_HOUR',
+        'D_MINUTE', 'D_SECOND', 'D_CREATION', 'D_LASTACCESS', 'D_MODIFICATION',
+        'DT_YEAR', 'DT_MONTH', 'DT_DAY', 'DT_HOUR', 'DT_MINUTE', 'DT_SECOND',
+        'DT_DOW', 'DT_MSEC', 'DT_DOY', 'DT_GMT', 'EULER', 'E_CODE', 'E_ADDR',
+        'E_LINE', 'E_RTN', 'E_NAME', 'E_FILE', 'E_PATH', 'E_USER', 'false',
+        'False', 'FALSE', 'FIFO_QUEUE', 'FILETYPE_DIRECTORY', 'FILETYPE_FILE',
+        'GET_EOF', 'GET_FAIL', 'GET_IGNORE', 'GET_SUCCESS',
+        'GL_AMBIENT_AND_DIFFUSE', 'GL_ARRAY_BUFFER', 'GL_CLAMP',
+        'GL_CLAMP_TO_BORDER', 'GL_CLAMP_TO_EDGE', 'GL_COLOR_BUFFER_BIT',
+        'GL_COMPILE', 'GL_COMPILE_STATUS', 'GL_CULL_FACE',
+        'GL_DEPTH_BUFFER_BIT', 'GL_DEPTH_TEST', 'GL_EXTENSIONS', 'GL_FLAT',
+        'GL_FLOAT', 'GL_FRAGMENT_SHADER', 'GL_FRONT', 'GL_LIGHT0',
+        'GL_LIGHTING', 'GL_LINEAR', 'GL_LINK_STATUS', 'GL_MODELVIEW',
+        'GL_NEAREST', 'GL_NO_ERROR', 'GL_NORMALIZE', 'GL_POSITION',
+        'GL_PROJECTION', 'GL_QUAD_STRIP', 'GL_QUADS', 'GL_RENDERER',
+        'GL_REPEAT', 'GL_RGB', 'GL_RGBA', 'GL_SMOOTH', 'GL_STATIC_DRAW',
+        'GL_TEXTURE_2D', 'GL_TEXTURE_MAG_FILTER', 'GL_TEXTURE_MIN_FILTER',
+        'GL_TEXTURE_WRAP_S', 'GL_TEXTURE_WRAP_T', 'GL_TRIANGLES',
+        'GL_UNSIGNED_BYTE', 'GL_VENDOR', 'GL_VERSION', 'GL_VERTEX_SHADER',
+        'GRAY', 'GREEN', 'GT_LF_STRIPPED', 'GT_WHOLE_FILE', 'INVLN10',
+        'IUP_CLOSE', 'IUP_CONTINUE', 'IUP_DEFAULT', 'IUP_BLACK', 'IUP_BLUE',
+        'IUP_BUTTON1', 'IUP_BUTTON3', 'IUP_CENTER', 'IUP_CYAN', 'IUP_DARK_BLUE',
+        'IUP_DARK_CYAN', 'IUP_DARK_GRAY', 'IUP_DARK_GREY', 'IUP_DARK_GREEN',
+        'IUP_DARK_MAGENTA', 'IUP_DARK_RED', 'IUP_GRAY', 'IUP_GREY', 'IUP_GREEN',
+        'IUP_IGNORE', 'IUP_INDIGO', 'IUP_MAGENTA', 'IUP_MASK_INT',
+        'IUP_MASK_UINT', 'IUP_MOUSEPOS', 'IUP_NAVY', 'IUP_OLIVE', 'IUP_RECTEXT',
+        'IUP_RED', 'IUP_LIGHT_BLUE', 'IUP_LIGHT_GRAY', 'IUP_LIGHT_GREY',
+        'IUP_LIGHT_GREEN', 'IUP_ORANGE', 'IUP_PARCHMENT', 'IUP_PURPLE',
+        'IUP_SILVER', 'IUP_TEAL', 'IUP_VIOLET', 'IUP_WHITE', 'IUP_YELLOW',
+        'K_BS', 'K_cA', 'K_cC', 'K_cD', 'K_cF5', 'K_cK', 'K_cM', 'K_cN', 'K_cO',
+        'K_cP', 'K_cR', 'K_cS', 'K_cT', 'K_cW', 'K_CR', 'K_DEL', 'K_DOWN',
+        'K_END', 'K_ESC', 'K_F1', 'K_F2', 'K_F3', 'K_F4', 'K_F5', 'K_F6',
+        'K_F7', 'K_F8', 'K_F9', 'K_F10', 'K_F11', 'K_F12', 'K_HOME', 'K_INS',
+        'K_LEFT', 'K_MIDDLE', 'K_PGDN', 'K_PGUP', 'K_RIGHT', 'K_SP', 'K_TAB',
+        'K_UP', 'K_h', 'K_i', 'K_j', 'K_p', 'K_r', 'K_s', 'JS', 'LIFO_QUEUE',
+        'LINUX', 'MAX_HEAP', 'MAGENTA', 'MIN_HEAP', 'Nan', 'NO_CURSOR', 'null',
+        'NULL', 'PI', 'pp_Ascii', 'pp_Brkt', 'pp_Date', 'pp_File', 'pp_FltFmt',
+        'pp_Indent', 'pp_IntCh', 'pp_IntFmt', 'pp_Maxlen', 'pp_Nest',
+        'pp_Pause', 'pp_Q22', 'pp_StrFmt', 'RED', 'SEEK_OK', 'SLASH',
+        'TEST_ABORT', 'TEST_CRASH', 'TEST_PAUSE', 'TEST_PAUSE_FAIL',
+        'TEST_QUIET', 'TEST_SHOW_ALL', 'TEST_SHOW_FAILED', 'TEST_SUMMARY',
+        'true', 'True', 'TRUE', 'VC_SCRNLINES', 'WHITE', 'WINDOWS', 'YELLOW'
+    )
+
+    tokens = {
+        'root': [
+            (r"\s+", Whitespace),
+            (r'/\*|--/\*|#\[', Comment.Multiline, 'comment'),
+            (r'(?://|--|#!).*$', Comment.Single),
+#Alt:
+#           (r'//.*$|--.*$|#!.*$', Comment.Single),
+            (r'"([^"\\]|\\.)*"', String.Other),
+            (r'\'[^\']*\'', String.Other),
+            (r'`[^`]*`', String.Other),
+
+            (words(types, prefix=r'\b', suffix=r'\b'), Name.Function),
+            (words(routines, prefix=r'\b', suffix=r'\b'), Name.Function),
+            (words(preproc, prefix=r'\b', suffix=r'\b'), Keyword.Declaration),
+            (words(keywords, prefix=r'\b', suffix=r'\b'), Keyword.Declaration),
+            (words(constants, prefix=r'\b', suffix=r'\b'), Name.Constant),
+            # Aside: Phix only supports/uses the ascii/non-unicode tilde
+            (r'!=|==|<<|>>|:=|[-~+/*%=<>&^|\.(){},?:\[\]$\\;#]', Operator),
+            (r'[\w-]+', Text)
+        ],
+        'comment': [
+            (r'[^*/#]+', Comment.Multiline),
+            (r'/\*|#\[', Comment.Multiline, '#push'),
+            (r'\*/|#\]', Comment.Multiline, '#pop'),
+            (r'[*/#]', Comment.Multiline)
+        ]
+    }
diff --git a/local_packages/pygments/lexers/php.py b/local_packages/pygments/lexers/php.py
new file mode 100644
index 0000000..ac935c4
--- /dev/null
+++ b/local_packages/pygments/lexers/php.py
@@ -0,0 +1,335 @@
+"""
+    pygments.lexers.php
+    ~~~~~~~~~~~~~~~~~~~
+
+    Lexers for PHP and related languages.
+
+    :copyright: Copyright 2006-present by the Pygments team, see AUTHORS.
+    :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import Lexer, RegexLexer, include, bygroups, default, \
+    using, this, words, do_insertions, line_re
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+    Number, Punctuation, Other, Generic
+from pygments.util import get_bool_opt, get_list_opt, shebang_matches
+
+__all__ = ['ZephirLexer', 'PsyshConsoleLexer', 'PhpLexer']
+
+
+class ZephirLexer(RegexLexer):
+    """
+    For Zephir language source code.
+
+    Zephir is a compiled high level language aimed
+    to the creation of C-extensions for PHP.
+    """
+
+    name = 'Zephir'
+    url = 'http://zephir-lang.com/'
+    aliases = ['zephir']
+    filenames = ['*.zep']
+    version_added = '2.0'
+
+    zephir_keywords = ['fetch', 'echo', 'isset', 'empty']
+    zephir_type = ['bit', 'bits', 'string']
+
+    flags = re.DOTALL | re.MULTILINE
+
+    tokens = {
+        'commentsandwhitespace': [
+            (r'\s+', Text),
+            (r'//.*?\n', Comment.Single),
+            (r'/\*.*?\*/', Comment.Multiline)
+        ],
+        'slashstartsregex': [
+            include('commentsandwhitespace'),
+            (r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
+             r'([gim]+\b|\B)', String.Regex, '#pop'),
+            (r'/', Operator, '#pop'),
+            default('#pop')
+        ],
+        'badregex': [
+            (r'\n', Text, '#pop')
+        ],
+        'root': [
+            (r'^(?=\s|/)', Text, 'slashstartsregex'),
+            include('commentsandwhitespace'),
+            (r'\+\+|--|~|&&|\?|:|\|\||\\(?=\n)|'
+             r'(<<|>>>?|==?|!=?|->|[-<>+*%&|^/])=?', Operator, 'slashstartsregex'),
+            (r'[{(\[;,]', Punctuation, 'slashstartsregex'),
+            (r'[})\].]', Punctuation),
+            (r'(for|in|while|do|break|return|continue|switch|case|default|if|else|loop|'
+             r'require|inline|throw|try|catch|finally|new|delete|typeof|instanceof|void|'
+             r'namespace|use|extends|this|fetch|isset|unset|echo|fetch|likely|unlikely|'
+             r'empty)\b', Keyword, 'slashstartsregex'),
+            (r'(var|let|with|function)\b', Keyword.Declaration, 'slashstartsregex'),
+            (r'(abstract|boolean|bool|char|class|const|double|enum|export|extends|final|'
+             r'native|goto|implements|import|int|string|interface|long|ulong|char|uchar|'
+             r'float|unsigned|private|protected|public|short|static|self|throws|reverse|'
+             r'transient|volatile|readonly)\b', Keyword.Reserved),
+            (r'(true|false|null|undefined)\b', Keyword.Constant),
+            (r'(Array|Boolean|Date|_REQUEST|_COOKIE|_SESSION|'
+             r'_GET|_POST|_SERVER|this|stdClass|range|count|iterator|'
+             r'window)\b', Name.Builtin),
+            (r'[$a-zA-Z_][\w\\]*', Name.Other),
+            (r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
+            (r'0x[0-9a-fA-F]+', Number.Hex),
+            (r'[0-9]+', Number.Integer),
+            (r'"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
+            (r"'(\\\\|\\[^\\]|[^'\\])*'", String.Single),
+        ]
+    }
+
+
+class PsyshConsoleLexer(Lexer):
+    """
+    For PsySH console output, such as:
+
+    .. sourcecode:: psysh
+
+        >>> $greeting = function($name): string {
+        ...     return "Hello, {$name}";
+        ... };
+        => Closure($name): string {#2371 …3}
+        >>> $greeting('World')
+        => "Hello, World"
+    """
+    name = 'PsySH console session for PHP'
+    url = 'https://psysh.org/'
+    aliases = ['psysh']
+    version_added = '2.7'
+
+    def __init__(self, **options):
+        options['startinline'] = True
+        Lexer.__init__(self, **options)
+
+    def get_tokens_unprocessed(self, text):
+        phplexer = PhpLexer(**self.options)
+        curcode = ''
+        insertions = []
+        for match in line_re.finditer(text):
+            line = match.group()
+            if line.startswith('>>> ') or line.startswith('... '):
+                insertions.append((len(curcode),
+                                   [(0, Generic.Prompt, line[:4])]))
+                curcode += line[4:]
+            elif line.rstrip() == '...':
+                insertions.append((len(curcode),
+                                   [(0, Generic.Prompt, '...')]))
+                curcode += line[3:]
+            else:
+                if curcode:
+                    yield from do_insertions(
+                        insertions, phplexer.get_tokens_unprocessed(curcode))
+                    curcode = ''
+                    insertions = []
+                yield match.start(), Generic.Output, line
+        if curcode:
+            yield from do_insertions(insertions,
+                                     phplexer.get_tokens_unprocessed(curcode))
+
+
+class PhpLexer(RegexLexer):
+    """
+    For PHP source code.
+    For PHP embedded in HTML, use the `HtmlPhpLexer`.
+
+    Additional options accepted:
+
+    `startinline`
+        If given and ``True`` the lexer starts highlighting with
+        php code (i.e.: no starting ``>> from pygments.lexers._php_builtins import MODULES
+            >>> MODULES.keys()
+            ['PHP Options/Info', 'Zip', 'dba', ...]
+
+        In fact the names of those modules match the module names from
+        the php documentation.
+    """
+
+    name = 'PHP'
+    url = 'https://www.php.net/'
+    aliases = ['php', 'php3', 'php4', 'php5']
+    filenames = ['*.php', '*.php[345]', '*.inc']
+    mimetypes = ['text/x-php']
+    version_added = ''
+
+    # Note that a backslash is included, PHP uses a backslash as a namespace
+    # separator.
+    _ident_inner = r'(?:[\\_a-z]|[^\x00-\x7f])(?:[\\\w]|[^\x00-\x7f])*'
+    # But not inside strings.
+    _ident_nons = r'(?:[_a-z]|[^\x00-\x7f])(?:\w|[^\x00-\x7f])*'
+
+    flags = re.IGNORECASE | re.DOTALL | re.MULTILINE
+    tokens = {
+        'root': [
+            (r'<\?(php)?', Comment.Preproc, 'php'),
+            (r'[^<]+', Other),
+            (r'<', Other)
+        ],
+        'php': [
+            (r'\?>', Comment.Preproc, '#pop'),
+            (r'(<<<)([\'"]?)(' + _ident_nons + r')(\2\n.*?\n\s*)(\3)(;?)(\n)',
+             bygroups(String, String, String.Delimiter, String, String.Delimiter,
+                      Punctuation, Text)),
+            (r'\s+', Text),
+            (r'#\[', Punctuation, 'attribute'),
+            (r'#.*?\n', Comment.Single),
+            (r'//.*?\n', Comment.Single),
+            # put the empty comment here, it is otherwise seen as
+            # the start of a docstring
+            (r'/\*\*/', Comment.Multiline),
+            (r'/\*\*.*?\*/', String.Doc),
+            (r'/\*.*?\*/', Comment.Multiline),
+            (r'(->|::)(\s*)(' + _ident_nons + ')',
+             bygroups(Operator, Text, Name.Attribute)),
+            (r'[~!%^&*+=|:.<>/@-]+', Operator),
+            (r'\?', Operator),  # don't add to the charclass above!
+            (r'[\[\]{}();,]+', Punctuation),
+            (r'(new)(\s+)(class)\b', bygroups(Keyword, Text, Keyword)),
+            (r'(class)(\s+)', bygroups(Keyword, Text), 'classname'),
+            (r'(function)(\s*)(?=\()', bygroups(Keyword, Text)),
+            (r'(function)(\s+)(&?)(\s*)',
+             bygroups(Keyword, Text, Operator, Text), 'functionname'),
+            (r'(const)(\s+)(' + _ident_inner + ')',
+             bygroups(Keyword, Text, Name.Constant)),
+            # source: https://www.php.net/manual/en/reserved.keywords.php
+            (r'(and|E_PARSE|old_function|E_ERROR|or|as|E_WARNING|parent|'
+             r'eval|PHP_OS|break|exit|case|extends|PHP_VERSION|cfunction|'
+             r'FALSE|print|for|require|continue|foreach|require_once|'
+             r'declare|return|default|static|do|switch|die|stdClass|'
+             r'echo|else|TRUE|elseif|var|empty|if|xor|enddeclare|include|'
+             r'virtual|endfor|include_once|while|endforeach|global|'
+             r'endif|list|endswitch|new|endwhile|not|'
+             r'array|E_ALL|NULL|final|php_user_filter|interface|'
+             r'implements|public|private|protected|abstract|clone|try|'
+             r'catch|throw|this|use|namespace|trait|yield( from)?|'
+             r'finally|match|readonly)\b', Keyword),
+            (r'(true|false|null)\b', Keyword.Constant),
+            include('magicconstants'),
+            (r'\$\{', Name.Variable, 'variablevariable'),
+            (r'\$+' + _ident_inner, Name.Variable),
+            (_ident_inner, Name.Other),
+            (r'(\d+\.\d*|\d*\.\d+)(e[+-]?[0-9]+)?', Number.Float),
+            (r'\d+e[+-]?[0-9]+', Number.Float),
+            (r'0[0-7]+', Number.Oct),
+            (r'0x[a-f0-9]+', Number.Hex),
+            (r'\d+', Number.Integer),
+            (r'0b[01]+', Number.Bin),
+            (r"'([^'\\]*(?:\\.[^'\\]*)*)'", String.Single),
+            (r'`([^`\\]*(?:\\.[^`\\]*)*)`', String.Backtick),
+            (r'"', String.Double, 'string'),
+        ],
+        'variablevariable': [
+            (r'\}', Name.Variable, '#pop'),
+            include('php')
+        ],
+        'magicfuncs': [
+            # source: http://php.net/manual/en/language.oop5.magic.php
+            (words((
+                '__construct', '__destruct', '__call', '__callStatic', '__get', '__set',
+                '__isset', '__unset', '__sleep', '__wakeup', '__toString', '__invoke',
+                '__set_state', '__clone', '__debugInfo',), suffix=r'\b'),
+             Name.Function.Magic),
+        ],
+        'magicconstants': [
+            # source: https://www.php.net/manual/en/language.constants.magic.php
+            (words((
+                '__LINE__', '__FILE__', '__DIR__', '__FUNCTION__', '__CLASS__',
+                '__TRAIT__', '__METHOD__', '__NAMESPACE__', '__PROPERTY__',),
+                suffix=r'\b'),
+             Name.Constant),
+        ],
+        'classname': [
+            (_ident_inner, Name.Class, '#pop')
+        ],
+        'functionname': [
+            include('magicfuncs'),
+            (_ident_inner, Name.Function, '#pop'),
+            default('#pop')
+        ],
+        'string': [
+            (r'"', String.Double, '#pop'),
+            (r'[^{$"\\]+', String.Double),
+            (r'\\([nrt"$\\]|[0-7]{1,3}|x[0-9a-f]{1,2})', String.Escape),
+            (r'\$' + _ident_nons + r'(\[\S+?\]|->' + _ident_nons + ')?',
+             String.Interpol),
+            (r'(\{\$\{)(.*?)(\}\})',
+             bygroups(String.Interpol, using(this, _startinline=True),
+                      String.Interpol)),
+            (r'(\{)(\$.*?)(\})',
+             bygroups(String.Interpol, using(this, _startinline=True),
+                      String.Interpol)),
+            (r'(\$\{)(\S+)(\})',
+             bygroups(String.Interpol, Name.Variable, String.Interpol)),
+            (r'[${\\]', String.Double)
+        ],
+        'attribute': [
+            (r'\]', Punctuation, '#pop'),
+            (r'\(', Punctuation, 'attributeparams'),
+            (_ident_inner, Name.Decorator),
+            include('php')
+        ],
+        'attributeparams': [
+            (r'\)', Punctuation, '#pop'),
+            include('php')
+        ],
+    }
+
+    def __init__(self, **options):
+        self.funcnamehighlighting = get_bool_opt(
+            options, 'funcnamehighlighting', True)
+        self.disabledmodules = get_list_opt(
+            options, 'disabledmodules', ['unknown'])
+        self.startinline = get_bool_opt(options, 'startinline', False)
+
+        # private option argument for the lexer itself
+        if '_startinline' in options:
+            self.startinline = options.pop('_startinline')
+
+        # collect activated functions in a set
+        self._functions = set()
+        if self.funcnamehighlighting:
+            from pygments.lexers._php_builtins import MODULES
+            for key, value in MODULES.items():
+                if key not in self.disabledmodules:
+                    self._functions.update(value)
+        RegexLexer.__init__(self, **options)
+
+    def get_tokens_unprocessed(self, text):
+        stack = ['root']
+        if self.startinline:
+            stack.append('php')
+        for index, token, value in \
+                RegexLexer.get_tokens_unprocessed(self, text, stack):
+            if token is Name.Other:
+                if value in self._functions:
+                    yield index, Name.Builtin, value
+                    continue
+            yield index, token, value
+
+    def analyse_text(text):
+        if shebang_matches(text, r'php'):
+            return True
+        rv = 0.0
+        if re.search(r'<\?(?!xml)', text):
+            rv += 0.3
+        return rv
diff --git a/local_packages/pygments/lexers/pointless.py b/local_packages/pygments/lexers/pointless.py
new file mode 100644
index 0000000..aab0a56
--- /dev/null
+++ b/local_packages/pygments/lexers/pointless.py
@@ -0,0 +1,70 @@
+"""
+    pygments.lexers.pointless
+    ~~~~~~~~~~~~~~~~~~~~~~~~~
+
+    Lexers for Pointless.
+
+    :copyright: Copyright 2006-present by the Pygments team, see AUTHORS.
+    :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, words
+from pygments.token import Comment, Error, Keyword, Name, Number, Operator, \
+    Punctuation, String, Text
+
+__all__ = ['PointlessLexer']
+
+
+class PointlessLexer(RegexLexer):
+    """
+    For Pointless source code.
+    """
+
+    name = 'Pointless'
+    url = 'https://ptls.dev'
+    aliases = ['pointless']
+    filenames = ['*.ptls']
+    version_added = '2.7'
+
+    ops = words([
+        "+", "-", "*", "/", "**", "%", "+=", "-=", "*=",
+        "/=", "**=", "%=", "|>", "=", "==", "!=", "<", ">",
+        "<=", ">=", "=>", "$", "++",
+    ])
+
+    keywords = words([
+        "if", "then", "else", "where", "with", "cond",
+        "case", "and", "or", "not", "in", "as", "for",
+        "requires", "throw", "try", "catch", "when",
+        "yield", "upval",
+    ], suffix=r'\b')
+
+    tokens = {
+        'root': [
+            (r'[ \n\r]+', Text),
+            (r'--.*$', Comment.Single),
+            (r'"""', String, 'multiString'),
+            (r'"', String, 'string'),
+            (r'[\[\](){}:;,.]', Punctuation),
+            (ops, Operator),
+            (keywords, Keyword),
+            (r'\d+|\d*\.\d+', Number),
+            (r'(true|false)\b', Name.Builtin),
+            (r'[A-Z][a-zA-Z0-9]*\b', String.Symbol),
+            (r'output\b', Name.Variable.Magic),
+            (r'(export|import)\b', Keyword.Namespace),
+            (r'[a-z][a-zA-Z0-9]*\b', Name.Variable)
+        ],
+        'multiString': [
+            (r'\\.', String.Escape),
+            (r'"""', String, '#pop'),
+            (r'"', String),
+            (r'[^\\"]+', String),
+        ],
+        'string': [
+            (r'\\.', String.Escape),
+            (r'"', String, '#pop'),
+            (r'\n', Error),
+            (r'[^\\"]+', String),
+        ],
+    }
diff --git a/local_packages/pygments/lexers/pony.py b/local_packages/pygments/lexers/pony.py
new file mode 100644
index 0000000..62d16d7
--- /dev/null
+++ b/local_packages/pygments/lexers/pony.py
@@ -0,0 +1,93 @@
+"""
+    pygments.lexers.pony
+    ~~~~~~~~~~~~~~~~~~~~
+
+    Lexers for Pony and related languages.
+
+    :copyright: Copyright 2006-present by the Pygments team, see AUTHORS.
+    :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, bygroups, words
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+    Number, Punctuation
+
+__all__ = ['PonyLexer']
+
+
+class PonyLexer(RegexLexer):
+    """
+    For Pony source code.
+    """
+
+    name = 'Pony'
+    aliases = ['pony']
+    filenames = ['*.pony']
+    url = 'https://www.ponylang.io'
+    version_added = '2.4'
+
+    _caps = r'(iso|trn|ref|val|box|tag)'
+
+    tokens = {
+        'root': [
+            (r'\n', Text),
+            (r'[^\S\n]+', Text),
+            (r'//.*\n', Comment.Single),
+            (r'/\*', Comment.Multiline, 'nested_comment'),
+            (r'"""(?:.|\n)*?"""', String.Doc),
+            (r'"', String, 'string'),
+            (r'\'.*\'', String.Char),
+            (r'=>|[]{}:().~;,|&!^?[]', Punctuation),
+            (words((
+                'addressof', 'and', 'as', 'consume', 'digestof', 'is', 'isnt',
+                'not', 'or'),
+                suffix=r'\b'),
+             Operator.Word),
+            (r'!=|==|<<|>>|[-+/*%=<>]', Operator),
+            (words((
+                'box', 'break', 'compile_error', 'compile_intrinsic',
+                'continue', 'do', 'else', 'elseif', 'embed', 'end', 'error',
+                'for', 'if', 'ifdef', 'in', 'iso', 'lambda', 'let', 'match',
+                'object', 'recover', 'ref', 'repeat', 'return', 'tag', 'then',
+                'this', 'trn', 'try', 'until', 'use', 'var', 'val', 'where',
+                'while', 'with', '#any', '#read', '#send', '#share'),
+                suffix=r'\b'),
+             Keyword),
+            (r'(actor|class|struct|primitive|interface|trait|type)((?:\s)+)',
+             bygroups(Keyword, Text), 'typename'),
+            (r'(new|fun|be)((?:\s)+)', bygroups(Keyword, Text), 'methodname'),
+            (words((
+                'I8', 'U8', 'I16', 'U16', 'I32', 'U32', 'I64', 'U64', 'I128',
+                'U128', 'ILong', 'ULong', 'ISize', 'USize', 'F32', 'F64',
+                'Bool', 'Pointer', 'None', 'Any', 'Array', 'String',
+                'Iterator'),
+                suffix=r'\b'),
+             Name.Builtin.Type),
+            (r'_?[A-Z]\w*', Name.Type),
+            (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+', Number.Float),
+            (r'0x[0-9a-fA-F]+', Number.Hex),
+            (r'\d+', Number.Integer),
+            (r'(true|false)\b', Name.Builtin),
+            (r'_\d*', Name),
+            (r'_?[a-z][\w\']*', Name)
+        ],
+        'typename': [
+            (_caps + r'?((?:\s)*)(_?[A-Z]\w*)',
+             bygroups(Keyword, Text, Name.Class), '#pop')
+        ],
+        'methodname': [
+            (_caps + r'?((?:\s)*)(_?[a-z]\w*)',
+             bygroups(Keyword, Text, Name.Function), '#pop')
+        ],
+        'nested_comment': [
+            (r'[^*/]+', Comment.Multiline),
+            (r'/\*', Comment.Multiline, '#push'),
+            (r'\*/', Comment.Multiline, '#pop'),
+            (r'[*/]', Comment.Multiline)
+        ],
+        'string': [
+            (r'"', String, '#pop'),
+            (r'\\"', String),
+            (r'[^\\"]+', String)
+        ]
+    }
diff --git a/local_packages/pygments/lexers/praat.py b/local_packages/pygments/lexers/praat.py
new file mode 100644
index 0000000..c50e927
--- /dev/null
+++ b/local_packages/pygments/lexers/praat.py
@@ -0,0 +1,303 @@
+"""
+    pygments.lexers.praat
+    ~~~~~~~~~~~~~~~~~~~~~
+
+    Lexer for Praat
+
+    :copyright: Copyright 2006-present by the Pygments team, see AUTHORS.
+    :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, words, bygroups, include
+from pygments.token import Name, Text, Comment, Keyword, String, Punctuation, \
+    Number, Operator, Whitespace
+
+__all__ = ['PraatLexer']
+
+
+class PraatLexer(RegexLexer):
+    """
+    For Praat scripts.
+    """
+
+    name = 'Praat'
+    url = 'http://www.praat.org'
+    aliases = ['praat']
+    filenames = ['*.praat', '*.proc', '*.psc']
+    version_added = '2.1'
+
+    keywords = (
+        'if', 'then', 'else', 'elsif', 'elif', 'endif', 'fi', 'for', 'from', 'to',
+        'endfor', 'endproc', 'while', 'endwhile', 'repeat', 'until', 'select', 'plus',
+        'minus', 'demo', 'assert', 'stopwatch', 'nocheck', 'nowarn', 'noprogress',
+        'editor', 'endeditor', 'clearinfo',
+    )
+
+    functions_string = (
+        'backslashTrigraphsToUnicode', 'chooseDirectory', 'chooseReadFile',
+        'chooseWriteFile', 'date', 'demoKey', 'do', 'environment', 'extractLine',
+        'extractWord', 'fixed', 'info', 'left', 'mid', 'percent', 'readFile', 'replace',
+        'replace_regex', 'right', 'selected', 'string', 'unicodeToBackslashTrigraphs',
+    )
+
+    functions_numeric = (
+        'abs', 'appendFile', 'appendFileLine', 'appendInfo', 'appendInfoLine', 'arccos',
+        'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctan2', 'arctanh', 'barkToHertz',
+        'beginPause', 'beginSendPraat', 'besselI', 'besselK', 'beta', 'beta2',
+        'binomialP', 'binomialQ', 'boolean', 'ceiling', 'chiSquareP', 'chiSquareQ',
+        'choice', 'comment', 'cos', 'cosh', 'createDirectory', 'deleteFile',
+        'demoClicked', 'demoClickedIn', 'demoCommandKeyPressed',
+        'demoExtraControlKeyPressed', 'demoInput', 'demoKeyPressed',
+        'demoOptionKeyPressed', 'demoShiftKeyPressed', 'demoShow', 'demoWaitForInput',
+        'demoWindowTitle', 'demoX', 'demoY', 'differenceLimensToPhon', 'do', 'editor',
+        'endPause', 'endSendPraat', 'endsWith', 'erb', 'erbToHertz', 'erf', 'erfc',
+        'exitScript', 'exp', 'extractNumber', 'fileReadable', 'fisherP', 'fisherQ',
+        'floor', 'gaussP', 'gaussQ', 'hertzToBark', 'hertzToErb', 'hertzToMel',
+        'hertzToSemitones', 'imax', 'imin', 'incompleteBeta', 'incompleteGammaP', 'index',
+        'index_regex', 'integer', 'invBinomialP', 'invBinomialQ', 'invChiSquareQ', 'invFisherQ',
+        'invGaussQ', 'invSigmoid', 'invStudentQ', 'length', 'ln', 'lnBeta', 'lnGamma',
+        'log10', 'log2', 'max', 'melToHertz', 'min', 'minusObject', 'natural', 'number',
+        'numberOfColumns', 'numberOfRows', 'numberOfSelected', 'objectsAreIdentical',
+        'option', 'optionMenu', 'pauseScript', 'phonToDifferenceLimens', 'plusObject',
+        'positive', 'randomBinomial', 'randomGauss', 'randomInteger', 'randomPoisson',
+        'randomUniform', 'real', 'readFile', 'removeObject', 'rindex', 'rindex_regex',
+        'round', 'runScript', 'runSystem', 'runSystem_nocheck', 'selectObject',
+        'selected', 'semitonesToHertz', 'sentence', 'sentencetext', 'sigmoid', 'sin', 'sinc',
+        'sincpi', 'sinh', 'soundPressureToPhon', 'sqrt', 'startsWith', 'studentP',
+        'studentQ', 'tan', 'tanh', 'text', 'variableExists', 'word', 'writeFile', 'writeFileLine',
+        'writeInfo', 'writeInfoLine',
+    )
+
+    functions_array = (
+        'linear', 'randomGauss', 'randomInteger', 'randomUniform', 'zero',
+    )
+
+    objects = (
+        'Activation', 'AffineTransform', 'AmplitudeTier', 'Art', 'Artword',
+        'Autosegment', 'BarkFilter', 'BarkSpectrogram', 'CCA', 'Categories',
+        'Cepstrogram', 'Cepstrum', 'Cepstrumc', 'ChebyshevSeries', 'ClassificationTable',
+        'Cochleagram', 'Collection', 'ComplexSpectrogram', 'Configuration', 'Confusion',
+        'ContingencyTable', 'Corpus', 'Correlation', 'Covariance',
+        'CrossCorrelationTable', 'CrossCorrelationTables', 'DTW', 'DataModeler',
+        'Diagonalizer', 'Discriminant', 'Dissimilarity', 'Distance', 'Distributions',
+        'DurationTier', 'EEG', 'ERP', 'ERPTier', 'EditCostsTable', 'EditDistanceTable',
+        'Eigen', 'Excitation', 'Excitations', 'ExperimentMFC', 'FFNet', 'FeatureWeights',
+        'FileInMemory', 'FilesInMemory', 'Formant', 'FormantFilter', 'FormantGrid',
+        'FormantModeler', 'FormantPoint', 'FormantTier', 'GaussianMixture', 'HMM',
+        'HMM_Observation', 'HMM_ObservationSequence', 'HMM_State', 'HMM_StateSequence',
+        'Harmonicity', 'ISpline', 'Index', 'Intensity', 'IntensityTier', 'IntervalTier',
+        'KNN', 'KlattGrid', 'KlattTable', 'LFCC', 'LPC', 'Label', 'LegendreSeries',
+        'LinearRegression', 'LogisticRegression', 'LongSound', 'Ltas', 'MFCC', 'MSpline',
+        'ManPages', 'Manipulation', 'Matrix', 'MelFilter', 'MelSpectrogram',
+        'MixingMatrix', 'Movie', 'Network', 'Object', 'OTGrammar', 'OTHistory', 'OTMulti',
+        'PCA', 'PairDistribution', 'ParamCurve', 'Pattern', 'Permutation', 'Photo',
+        'Pitch', 'PitchModeler', 'PitchTier', 'PointProcess', 'Polygon', 'Polynomial',
+        'PowerCepstrogram', 'PowerCepstrum', 'Procrustes', 'RealPoint', 'RealTier',
+        'ResultsMFC', 'Roots', 'SPINET', 'SSCP', 'SVD', 'Salience', 'ScalarProduct',
+        'Similarity', 'SimpleString', 'SortedSetOfString', 'Sound', 'Speaker',
+        'Spectrogram', 'Spectrum', 'SpectrumTier', 'SpeechSynthesizer', 'SpellingChecker',
+        'Strings', 'StringsIndex', 'Table', 'TableOfReal', 'TextGrid', 'TextInterval',
+        'TextPoint', 'TextTier', 'Tier', 'Transition', 'VocalTract', 'VocalTractTier',
+        'Weight', 'WordList',
+    )
+
+    variables_numeric = (
+        'macintosh', 'windows', 'unix', 'praatVersion', 'pi', 'e', 'undefined',
+    )
+
+    variables_string = (
+        'praatVersion', 'tab', 'shellDirectory', 'homeDirectory',
+        'preferencesDirectory', 'newline', 'temporaryDirectory',
+        'defaultDirectory',
+    )
+
+    object_attributes = (
+        'ncol', 'nrow', 'xmin', 'ymin', 'xmax', 'ymax', 'nx', 'ny', 'dx', 'dy',
+    )
+
+    tokens = {
+        'root': [
+            (r'(\s+)(#.*?$)',  bygroups(Whitespace, Comment.Single)),
+            (r'^#.*?$',        Comment.Single),
+            (r';[^\n]*',       Comment.Single),
+            (r'\s+',           Whitespace),
+
+            (r'\bprocedure\b', Keyword,       'procedure_definition'),
+            (r'\bcall\b',      Keyword,       'procedure_call'),
+            (r'@',             Name.Function, 'procedure_call'),
+
+            include('function_call'),
+
+            (words(keywords, suffix=r'\b'), Keyword),
+
+            (r'(\bform\b)(\s+)([^\n]+)',
+             bygroups(Keyword, Whitespace, String), 'old_form'),
+
+            (r'(print(?:line|tab)?|echo|exit|asserterror|pause|send(?:praat|socket)|'
+             r'include|execute|system(?:_nocheck)?)(\s+)',
+             bygroups(Keyword, Whitespace), 'string_unquoted'),
+
+            (r'(goto|label)(\s+)(\w+)', bygroups(Keyword, Whitespace, Name.Label)),
+
+            include('variable_name'),
+            include('number'),
+
+            (r'"', String, 'string'),
+
+            (words((objects), suffix=r'(?=\s+\S+\n)'), Name.Class, 'string_unquoted'),
+
+            (r'\b[A-Z]', Keyword, 'command'),
+            (r'(\.{3}|[)(,])', Punctuation),
+        ],
+        'command': [
+            (r'( ?[\w()-]+ ?)', Keyword),
+
+            include('string_interpolated'),
+
+            (r'\.{3}', Keyword, ('#pop', 'old_arguments')),
+            (r':', Keyword, ('#pop', 'comma_list')),
+            (r'\s', Whitespace, '#pop'),
+        ],
+        'procedure_call': [
+            (r'\s+', Whitespace),
+            (r'([\w.]+)(?:(:)|(?:(\s*)(\()))',
+             bygroups(Name.Function, Punctuation,
+                      Text.Whitespace, Punctuation), '#pop'),
+            (r'([\w.]+)', Name.Function, ('#pop', 'old_arguments')),
+        ],
+        'procedure_definition': [
+            (r'\s', Whitespace),
+            (r'([\w.]+)(\s*?[(:])',
+             bygroups(Name.Function, Whitespace), '#pop'),
+            (r'([\w.]+)([^\n]*)',
+             bygroups(Name.Function, Text), '#pop'),
+        ],
+        'function_call': [
+            (words(functions_string, suffix=r'\$(?=\s*[:(])'), Name.Function, 'function'),
+            (words(functions_array, suffix=r'#(?=\s*[:(])'),   Name.Function, 'function'),
+            (words(functions_numeric, suffix=r'(?=\s*[:(])'),  Name.Function, 'function'),
+        ],
+        'function': [
+            (r'\s+',   Whitespace),
+            (r':',     Punctuation, ('#pop', 'comma_list')),
+            (r'\s*\(', Punctuation, ('#pop', 'comma_list')),
+        ],
+        'comma_list': [
+            (r'(\s*\n\s*)(\.{3})', bygroups(Whitespace, Punctuation)),
+
+            (r'(\s*)(?:([)\]])|(\n))', bygroups(
+                Whitespace, Punctuation, Whitespace), '#pop'),
+
+            (r'\s+', Whitespace),
+            (r'"',   String, 'string'),
+            (r'\b(if|then|else|fi|endif)\b', Keyword),
+
+            include('function_call'),
+            include('variable_name'),
+            include('operator'),
+            include('number'),
+
+            (r'[()]', Text),
+            (r',', Punctuation),
+        ],
+        'old_arguments': [
+            (r'\n', Whitespace, '#pop'),
+
+            include('variable_name'),
+            include('operator'),
+            include('number'),
+
+            (r'"', String, 'string'),
+            (r'[^\n]', Text),
+        ],
+        'number': [
+            (r'\n', Whitespace, '#pop'),
+            (r'\b\d+(\.\d*)?([eE][-+]?\d+)?%?', Number),
+        ],
+        'object_reference': [
+            include('string_interpolated'),
+            (r'([a-z][a-zA-Z0-9_]*|\d+)', Name.Builtin),
+
+            (words(object_attributes, prefix=r'\.'), Name.Builtin, '#pop'),
+
+            (r'\$', Name.Builtin),
+            (r'\[', Text, '#pop'),
+        ],
+        'variable_name': [
+            include('operator'),
+            include('number'),
+
+            (words(variables_string,  suffix=r'\$'), Name.Variable.Global),
+            (words(variables_numeric,
+             suffix=r'(?=[^a-zA-Z0-9_."\'$#\[:(]|\s|^|$)'),
+             Name.Variable.Global),
+
+            (words(objects, prefix=r'\b', suffix=r"(_)"),
+             bygroups(Name.Builtin, Name.Builtin),
+             'object_reference'),
+
+            (r'\.?_?[a-z][\w.]*(\$|#)?', Text),
+            (r'[\[\]]', Punctuation, 'comma_list'),
+
+            include('string_interpolated'),
+        ],
+        'operator': [
+            (r'([+\/*<>=!-]=?|[&*|][&*|]?|\^|<>)',       Operator),
+            (r'(?', Punctuation),
+            (r'"(?:\\x[0-9a-fA-F]+\\|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|'
+             r'\\[0-7]+\\|\\["\\abcefnrstv]|[^\\"])*"', String.Double),
+            (r"'(?:''|[^'])*'", String.Atom),  # quoted atom
+            # Needs to not be followed by an atom.
+            # (r'=(?=\s|[a-zA-Z\[])', Operator),
+            (r'is\b', Operator),
+            (r'(<|>|=<|>=|==|=:=|=|/|//|\*|\+|-)(?=\s|[a-zA-Z0-9\[])',
+             Operator),
+            (r'(mod|div|not)\b', Operator),
+            (r'_', Keyword),  # The don't-care variable
+            (r'([a-z]+)(:)', bygroups(Name.Namespace, Punctuation)),
+            (r'([a-z\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]'
+             r'[\w$\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]*)'
+             r'(\s*)(:-|-->)',
+             bygroups(Name.Function, Text, Operator)),  # function defn
+            (r'([a-z\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]'
+             r'[\w$\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]*)'
+             r'(\s*)(\()',
+             bygroups(Name.Function, Text, Punctuation)),
+            (r'[a-z\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]'
+             r'[\w$\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]*',
+             String.Atom),  # atom, characters
+            # This one includes !
+            (r'[#&*+\-./:<=>?@\\^~\u00a1-\u00bf\u2010-\u303f]+',
+             String.Atom),  # atom, graphics
+            (r'[A-Z_]\w*', Name.Variable),
+            (r'\s+|[\u2000-\u200f\ufff0-\ufffe\uffef]', Text),
+        ],
+        'nested-comment': [
+            (r'\*/', Comment.Multiline, '#pop'),
+            (r'/\*', Comment.Multiline, '#push'),
+            (r'[^*/]+', Comment.Multiline),
+            (r'[*/]', Comment.Multiline),
+        ],
+    }
+
+    def analyse_text(text):
+        """Competes with IDL and Visual Prolog on *.pro"""
+        if ':-' in text:
+            # Visual Prolog also uses :-
+            return 0.5
+        else:
+            return 0
+
+
+class LogtalkLexer(RegexLexer):
+    """
+    For Logtalk source code.
+    """
+
+    name = 'Logtalk'
+    url = 'http://logtalk.org/'
+    aliases = ['logtalk']
+    filenames = ['*.lgt', '*.logtalk']
+    mimetypes = ['text/x-logtalk']
+    version_added = '0.10'
+
+    tokens = {
+        'root': [
+            # Directives
+            (r'^\s*:-\s', Punctuation, 'directive'),
+            # Comments
+            (r'%.*?\n', Comment),
+            (r'/\*(.|\n)*?\*/', Comment),
+            # Whitespace
+            (r'\n', Text),
+            (r'\s+', Text),
+            # Numbers
+            (r"0'[\\]?.", Number),
+            (r'0b[01]+', Number.Bin),
+            (r'0o[0-7]+', Number.Oct),
+            (r'0x[0-9a-fA-F]+', Number.Hex),
+            (r'\d+\.?\d*((e|E)(\+|-)?\d+)?', Number),
+            # Variables
+            (r'([A-Z_][a-zA-Z0-9_]*)', Name.Variable),
+            # Event handlers
+            (r'(after|before)(?=[(])', Keyword),
+            # Message forwarding handler
+            (r'forward(?=[(])', Keyword),
+            # Execution-context methods
+            (r'(context|parameter|this|se(lf|nder))(?=[(])', Keyword),
+            # Reflection
+            (r'(current_predicate|predicate_property)(?=[(])', Keyword),
+            # DCGs and term expansion
+            (r'(expand_(goal|term)|(goal|term)_expansion|phrase)(?=[(])', Keyword),
+            # Entity
+            (r'(abolish|c(reate|urrent))_(object|protocol|category)(?=[(])', Keyword),
+            (r'(object|protocol|category)_property(?=[(])', Keyword),
+            # Entity relations
+            (r'co(mplements_object|nforms_to_protocol)(?=[(])', Keyword),
+            (r'extends_(object|protocol|category)(?=[(])', Keyword),
+            (r'imp(lements_protocol|orts_category)(?=[(])', Keyword),
+            (r'(instantiat|specializ)es_class(?=[(])', Keyword),
+            # Events
+            (r'(current_event|(abolish|define)_events)(?=[(])', Keyword),
+            # Flags
+            (r'(create|current|set)_logtalk_flag(?=[(])', Keyword),
+            # Compiling, loading, and library paths
+            (r'logtalk_(compile|l(ibrary_path|oad|oad_context)|make(_target_action)?)(?=[(])', Keyword),
+            (r'\blogtalk_make\b', Keyword),
+            # Database
+            (r'(clause|retract(all)?)(?=[(])', Keyword),
+            (r'a(bolish|ssert(a|z))(?=[(])', Keyword),
+            # Control constructs
+            (r'(ca(ll|tch)|throw)(?=[(])', Keyword),
+            (r'(fa(il|lse)|true|(instantiation|system)_error)\b', Keyword),
+            (r'(uninstantiation|type|domain|existence|permission|representation|evaluation|resource|syntax)_error(?=[(])', Keyword),
+            # All solutions
+            (r'((bag|set)of|f(ind|or)all)(?=[(])', Keyword),
+            # Multi-threading predicates
+            (r'threaded(_(ca(ll|ncel)|once|ignore|exit|peek|wait|notify))?(?=[(])', Keyword),
+            # Engine predicates
+            (r'threaded_engine(_(create|destroy|self|next|next_reified|yield|post|fetch))?(?=[(])', Keyword),
+            # Term unification
+            (r'(subsumes_term|unify_with_occurs_check)(?=[(])', Keyword),
+            # Term creation and decomposition
+            (r'(functor|arg|copy_term|numbervars|term_variables)(?=[(])', Keyword),
+            # Evaluable functors
+            (r'(div|rem|m(ax|in|od)|abs|sign)(?=[(])', Keyword),
+            (r'float(_(integer|fractional)_part)?(?=[(])', Keyword),
+            (r'(floor|t(an|runcate)|round|ceiling)(?=[(])', Keyword),
+            # Other arithmetic functors
+            (r'(cos|a(cos|sin|tan|tan2)|exp|log|s(in|qrt)|xor)(?=[(])', Keyword),
+            # Term testing
+            (r'(var|atom(ic)?|integer|float|c(allable|ompound)|n(onvar|umber)|ground|acyclic_term)(?=[(])', Keyword),
+            # Term comparison
+            (r'compare(?=[(])', Keyword),
+            # Stream selection and control
+            (r'(curren|se)t_(in|out)put(?=[(])', Keyword),
+            (r'(open|close)(?=[(])', Keyword),
+            (r'flush_output(?=[(])', Keyword),
+            (r'(at_end_of_stream|flush_output)\b', Keyword),
+            (r'(stream_property|at_end_of_stream|set_stream_position)(?=[(])', Keyword),
+            # Character and byte input/output
+            (r'(nl|(get|peek|put)_(byte|c(har|ode)))(?=[(])', Keyword),
+            (r'\bnl\b', Keyword),
+            # Term input/output
+            (r'read(_term)?(?=[(])', Keyword),
+            (r'write(q|_(canonical|term))?(?=[(])', Keyword),
+            (r'(current_)?op(?=[(])', Keyword),
+            (r'(current_)?char_conversion(?=[(])', Keyword),
+            # Atomic term processing
+            (r'atom_(length|c(hars|o(ncat|des)))(?=[(])', Keyword),
+            (r'(char_code|sub_atom)(?=[(])', Keyword),
+            (r'number_c(har|ode)s(?=[(])', Keyword),
+            # Implementation defined hooks functions
+            (r'(se|curren)t_prolog_flag(?=[(])', Keyword),
+            (r'\bhalt\b', Keyword),
+            (r'halt(?=[(])', Keyword),
+            # Message sending operators
+            (r'(::|:|\^\^)', Operator),
+            # External call
+            (r'[{}]', Keyword),
+            # Logic and control
+            (r'(ignore|once)(?=[(])', Keyword),
+            (r'\brepeat\b', Keyword),
+            # Sorting
+            (r'(key)?sort(?=[(])', Keyword),
+            # Bitwise functors
+            (r'(>>|<<|/\\|\\\\|\\)', Operator),
+            # Predicate aliases
+            (r'\bas\b', Operator),
+            # Arithmetic evaluation
+            (r'\bis\b', Keyword),
+            # Arithmetic comparison
+            (r'(=:=|=\\=|<|=<|>=|>)', Operator),
+            # Term creation and decomposition
+            (r'=\.\.', Operator),
+            # Term unification
+            (r'(=|\\=)', Operator),
+            # Term comparison
+            (r'(==|\\==|@=<|@<|@>=|@>)', Operator),
+            # Evaluable functors
+            (r'(//|[-+*/])', Operator),
+            (r'\b(e|pi|div|mod|rem)\b', Operator),
+            # Other arithmetic functors
+            (r'\b\*\*\b', Operator),
+            # DCG rules
+            (r'-->', Operator),
+            # Control constructs
+            (r'([!;]|->)', Operator),
+            # Logic and control
+            (r'\\+', Operator),
+            # Mode operators
+            (r'[?@]', Operator),
+            # Existential quantifier
+            (r'\^', Operator),
+            # Punctuation
+            (r'[()\[\],.|]', Text),
+            # Atoms
+            (r"[a-z][a-zA-Z0-9_]*", Text),
+            (r"'", String, 'quoted_atom'),
+            # Double-quoted terms
+            (r'"', String, 'double_quoted_term'),
+        ],
+
+        'quoted_atom': [
+            (r"''", String),
+            (r"'", String, '#pop'),
+            (r'\\([\\abfnrtv"\']|(x[a-fA-F0-9]+|[0-7]+)\\)', String.Escape),
+            (r"[^\\'\n]+", String),
+            (r'\\', String),
+        ],
+
+        'double_quoted_term': [
+            (r'""', String),
+            (r'"', String, '#pop'),
+            (r'\\([\\abfnrtv"\']|(x[a-fA-F0-9]+|[0-7]+)\\)', String.Escape),
+            (r'[^\\"\n]+', String),
+            (r'\\', String),
+        ],
+
+        'directive': [
+            # Conditional compilation directives
+            (r'(el)?if(?=[(])', Keyword, 'root'),
+            (r'(e(lse|ndif))(?=[.])', Keyword, 'root'),
+            # Entity directives
+            (r'(category|object|protocol)(?=[(])', Keyword, 'entityrelations'),
+            (r'(end_(category|object|protocol))(?=[.])', Keyword, 'root'),
+            # Predicate scope directives
+            (r'(public|protected|private)(?=[(])', Keyword, 'root'),
+            # Other directives
+            (r'e(n(coding|sure_loaded)|xport)(?=[(])', Keyword, 'root'),
+            (r'in(clude|itialization|fo)(?=[(])', Keyword, 'root'),
+            (r'(built_in|dynamic|synchronized|threaded)(?=[.])', Keyword, 'root'),
+            (r'(alias|d(ynamic|iscontiguous)|m(eta_(non_terminal|predicate)|ode|ultifile)|s(et_(logtalk|prolog)_flag|ynchronized))(?=[(])', Keyword, 'root'),
+            (r'op(?=[(])', Keyword, 'root'),
+            (r'(c(alls|oinductive)|module|reexport|use(s|_module))(?=[(])', Keyword, 'root'),
+            (r'[a-z][a-zA-Z0-9_]*(?=[(])', Text, 'root'),
+            (r'[a-z][a-zA-Z0-9_]*(?=[.])', Text, 'root'),
+        ],
+
+        'entityrelations': [
+            (r'(complements|extends|i(nstantiates|mp(lements|orts))|specializes)(?=[(])', Keyword),
+            # Numbers
+            (r"0'[\\]?.", Number),
+            (r'0b[01]+', Number.Bin),
+            (r'0o[0-7]+', Number.Oct),
+            (r'0x[0-9a-fA-F]+', Number.Hex),
+            (r'\d+\.?\d*((e|E)(\+|-)?\d+)?', Number),
+            # Variables
+            (r'([A-Z_][a-zA-Z0-9_]*)', Name.Variable),
+            # Atoms
+            (r"[a-z][a-zA-Z0-9_]*", Text),
+            (r"'", String, 'quoted_atom'),
+            # Double-quoted terms
+            (r'"', String, 'double_quoted_term'),
+            # End of entity-opening directive
+            (r'([)]\.)', Text, 'root'),
+            # Scope operator
+            (r'(::)', Operator),
+            # Punctuation
+            (r'[()\[\],.|]', Text),
+            # Comments
+            (r'%.*?\n', Comment),
+            (r'/\*(.|\n)*?\*/', Comment),
+            # Whitespace
+            (r'\n', Text),
+            (r'\s+', Text),
+        ]
+    }
+
+    def analyse_text(text):
+        if ':- object(' in text:
+            return 1.0
+        elif ':- protocol(' in text:
+            return 1.0
+        elif ':- category(' in text:
+            return 1.0
+        elif re.search(r'^:-\s[a-z]', text, re.M):
+            return 0.9
+        else:
+            return 0.0
diff --git a/local_packages/pygments/lexers/promql.py b/local_packages/pygments/lexers/promql.py
new file mode 100644
index 0000000..5cfa73f
--- /dev/null
+++ b/local_packages/pygments/lexers/promql.py
@@ -0,0 +1,176 @@
+"""
+    pygments.lexers.promql
+    ~~~~~~~~~~~~~~~~~~~~~~
+
+    Lexer for Prometheus Query Language.
+
+    :copyright: Copyright 2006-present by the Pygments team, see AUTHORS.
+    :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, bygroups, default, words
+from pygments.token import Comment, Keyword, Name, Number, Operator, \
+    Punctuation, String, Whitespace
+
+__all__ = ["PromQLLexer"]
+
+
+class PromQLLexer(RegexLexer):
+    """
+    For PromQL queries.
+
+    For details about the grammar see:
+    https://github.com/prometheus/prometheus/tree/master/promql/parser
+
+    .. versionadded: 2.7
+    """
+
+    name = "PromQL"
+    url = 'https://prometheus.io/docs/prometheus/latest/querying/basics/'
+    aliases = ["promql"]
+    filenames = ["*.promql"]
+    version_added = ''
+
+    base_keywords = (
+        words(
+            (
+                "bool",
+                "by",
+                "group_left",
+                "group_right",
+                "ignoring",
+                "offset",
+                "on",
+                "without",
+            ),
+            suffix=r"\b",
+        ),
+        Keyword,
+    )
+
+    aggregator_keywords = (
+        words(
+            (
+                "sum",
+                "min",
+                "max",
+                "avg",
+                "group",
+                "stddev",
+                "stdvar",
+                "count",
+                "count_values",
+                "bottomk",
+                "topk",
+                "quantile",
+            ),
+            suffix=r"\b",
+        ),
+        Keyword,
+    )
+
+    function_keywords = (
+        words(
+            (
+                "abs",
+                "absent",
+                "absent_over_time",
+                "avg_over_time",
+                "ceil",
+                "changes",
+                "clamp_max",
+                "clamp_min",
+                "count_over_time",
+                "day_of_month",
+                "day_of_week",
+                "days_in_month",
+                "delta",
+                "deriv",
+                "exp",
+                "floor",
+                "histogram_quantile",
+                "holt_winters",
+                "hour",
+                "idelta",
+                "increase",
+                "irate",
+                "label_join",
+                "label_replace",
+                "ln",
+                "log10",
+                "log2",
+                "max_over_time",
+                "min_over_time",
+                "minute",
+                "month",
+                "predict_linear",
+                "quantile_over_time",
+                "rate",
+                "resets",
+                "round",
+                "scalar",
+                "sort",
+                "sort_desc",
+                "sqrt",
+                "stddev_over_time",
+                "stdvar_over_time",
+                "sum_over_time",
+                "time",
+                "timestamp",
+                "vector",
+                "year",
+            ),
+            suffix=r"\b",
+        ),
+        Keyword.Reserved,
+    )
+
+    tokens = {
+        "root": [
+            (r"\n", Whitespace),
+            (r"\s+", Whitespace),
+            (r",", Punctuation),
+            # Keywords
+            base_keywords,
+            aggregator_keywords,
+            function_keywords,
+            # Offsets
+            (r"[1-9][0-9]*[smhdwy]", String),
+            # Numbers
+            (r"-?[0-9]+\.[0-9]+", Number.Float),
+            (r"-?[0-9]+", Number.Integer),
+            # Comments
+            (r"#.*?$", Comment.Single),
+            # Operators
+            (r"(\+|\-|\*|\/|\%|\^)", Operator),
+            (r"==|!=|>=|<=|<|>", Operator),
+            (r"and|or|unless", Operator.Word),
+            # Metrics
+            (r"[_a-zA-Z][a-zA-Z0-9_]+", Name.Variable),
+            # Params
+            (r'(["\'])(.*?)(["\'])', bygroups(Punctuation, String, Punctuation)),
+            # Other states
+            (r"\(", Operator, "function"),
+            (r"\)", Operator),
+            (r"\{", Punctuation, "labels"),
+            (r"\[", Punctuation, "range"),
+        ],
+        "labels": [
+            (r"\}", Punctuation, "#pop"),
+            (r"\n", Whitespace),
+            (r"\s+", Whitespace),
+            (r",", Punctuation),
+            (r'([_a-zA-Z][a-zA-Z0-9_]*?)(\s*?)(=~|!=|=|!~)(\s*?)("|\')(.*?)("|\')',
+             bygroups(Name.Label, Whitespace, Operator, Whitespace,
+                      Punctuation, String, Punctuation)),
+        ],
+        "range": [
+            (r"\]", Punctuation, "#pop"),
+            (r"[1-9][0-9]*[smhdwy]", String),
+        ],
+        "function": [
+            (r"\)", Operator, "#pop"),
+            (r"\(", Operator, "#push"),
+            default("#pop"),
+        ],
+    }
diff --git a/local_packages/pygments/lexers/prql.py b/local_packages/pygments/lexers/prql.py
new file mode 100644
index 0000000..a62cb6b
--- /dev/null
+++ b/local_packages/pygments/lexers/prql.py
@@ -0,0 +1,251 @@
+"""
+    pygments.lexers.prql
+    ~~~~~~~~~~~~~~~~~~~~
+
+    Lexer for the PRQL query language.
+
+    :copyright: Copyright 2006-present by the Pygments team, see AUTHORS.
+    :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, combined, words, include, bygroups
+from pygments.token import Comment, Literal, Keyword, Name, Number, Operator, \
+    Punctuation, String, Text, Whitespace
+
+__all__ = ['PrqlLexer']
+
+
+class PrqlLexer(RegexLexer):
+    """
+    For PRQL source code.
+
+    grammar: https://github.com/PRQL/prql/tree/main/grammars
+    """
+
+    name = 'PRQL'
+    url = 'https://prql-lang.org/'
+    aliases = ['prql']
+    filenames = ['*.prql']
+    mimetypes = ['application/prql', 'application/x-prql']
+    version_added = '2.17'
+
+    builtinTypes = words((
+        "bool",
+        "int",
+        "int8", "int16", "int32", "int64", "int128",
+        "float",
+        "text",
+        "set"), suffix=r'\b')
+
+    def innerstring_rules(ttype):
+        return [
+            # the new style '{}'.format(...) string formatting
+            (r'\{'
+             r'((\w+)((\.\w+)|(\[[^\]]+\]))*)?'  # field name
+             r'(\:(.?[<>=\^])?[-+ ]?#?0?(\d+)?,?(\.\d+)?[E-GXb-gnosx%]?)?'
+             r'\}', String.Interpol),
+
+            (r'[^\\\'"%{\n]+', ttype),
+            (r'[\'"\\]', ttype),
+            (r'%|(\{{1,2})', ttype)
+        ]
+
+    def fstring_rules(ttype):
+        return [
+            (r'\}', String.Interpol),
+            (r'\{', String.Interpol, 'expr-inside-fstring'),
+            (r'[^\\\'"{}\n]+', ttype),
+            (r'[\'"\\]', ttype),
+        ]
+
+    tokens = {
+        'root': [
+
+            # Comments
+            (r'#!.*', String.Doc),
+            (r'#.*', Comment.Single),
+
+            # Whitespace
+            (r'\s+', Whitespace),
+
+            # Modules
+            (r'^(\s*)(module)(\s*)',
+             bygroups(Whitespace, Keyword.Namespace, Whitespace),
+             'imports'),
+
+            (builtinTypes, Keyword.Type),
+
+            # Main
+            (r'^prql ', Keyword.Reserved),
+
+            ('let', Keyword.Declaration),
+
+            include('keywords'),
+            include('expr'),
+
+            # Transforms
+            (r'^[A-Za-z_][a-zA-Z0-9_]*', Keyword),
+        ],
+        'expr': [
+            # non-raw f-strings
+            ('(f)(""")', bygroups(String.Affix, String.Double),
+             combined('fstringescape', 'tdqf')),
+            ("(f)(''')", bygroups(String.Affix, String.Single),
+             combined('fstringescape', 'tsqf')),
+            ('(f)(")', bygroups(String.Affix, String.Double),
+             combined('fstringescape', 'dqf')),
+            ("(f)(')", bygroups(String.Affix, String.Single),
+             combined('fstringescape', 'sqf')),
+
+            # non-raw s-strings
+            ('(s)(""")', bygroups(String.Affix, String.Double),
+             combined('stringescape', 'tdqf')),
+            ("(s)(''')", bygroups(String.Affix, String.Single),
+             combined('stringescape', 'tsqf')),
+            ('(s)(")', bygroups(String.Affix, String.Double),
+             combined('stringescape', 'dqf')),
+            ("(s)(')", bygroups(String.Affix, String.Single),
+             combined('stringescape', 'sqf')),
+
+            # raw strings
+            ('(?i)(r)(""")',
+             bygroups(String.Affix, String.Double), 'tdqs'),
+            ("(?i)(r)(''')",
+             bygroups(String.Affix, String.Single), 'tsqs'),
+            ('(?i)(r)(")',
+             bygroups(String.Affix, String.Double), 'dqs'),
+            ("(?i)(r)(')",
+             bygroups(String.Affix, String.Single), 'sqs'),
+
+            # non-raw strings
+            ('"""', String.Double, combined('stringescape', 'tdqs')),
+            ("'''", String.Single, combined('stringescape', 'tsqs')),
+            ('"', String.Double, combined('stringescape', 'dqs')),
+            ("'", String.Single, combined('stringescape', 'sqs')),
+
+            # Time and dates
+            (r'@\d{4}-\d{2}-\d{2}T\d{2}(:\d{2})?(:\d{2})?(\.\d{1,6})?(Z|[+-]\d{1,2}(:\d{1,2})?)?', Literal.Date),
+            (r'@\d{4}-\d{2}-\d{2}', Literal.Date),
+            (r'@\d{2}(:\d{2})?(:\d{2})?(\.\d{1,6})?(Z|[+-]\d{1,2}(:\d{1,2})?)?', Literal.Date),
+
+            (r'[^\S\n]+', Text),
+            include('numbers'),
+            (r'->|=>|==|!=|>=|<=|~=|&&|\|\||\?\?|\/\/', Operator),
+            (r'[-~+/*%=<>&^|.@]', Operator),
+            (r'[]{}:(),;[]', Punctuation),
+            include('functions'),
+
+            # Variable Names
+            (r'[A-Za-z_][a-zA-Z0-9_]*', Name.Variable),
+        ],
+        'numbers': [
+            (r'(\d(?:_?\d)*\.(?:\d(?:_?\d)*)?|(?:\d(?:_?\d)*)?\.\d(?:_?\d)*)'
+             r'([eE][+-]?\d(?:_?\d)*)?', Number.Float),
+            (r'\d(?:_?\d)*[eE][+-]?\d(?:_?\d)*j?', Number.Float),
+            (r'0[oO](?:_?[0-7])+', Number.Oct),
+            (r'0[bB](?:_?[01])+', Number.Bin),
+            (r'0[xX](?:_?[a-fA-F0-9])+', Number.Hex),
+            (r'\d(?:_?\d)*', Number.Integer),
+        ],
+        'fstringescape': [
+            include('stringescape'),
+        ],
+        'bytesescape': [
+            (r'\\([\\bfnrt"\']|\n|x[a-fA-F0-9]{2}|[0-7]{1,3})', String.Escape)
+        ],
+        'stringescape': [
+            (r'\\(N\{.*?\}|u\{[a-fA-F0-9]{1,6}\})', String.Escape),
+            include('bytesescape')
+        ],
+        'fstrings-single': fstring_rules(String.Single),
+        'fstrings-double': fstring_rules(String.Double),
+        'strings-single': innerstring_rules(String.Single),
+        'strings-double': innerstring_rules(String.Double),
+        'dqf': [
+            (r'"', String.Double, '#pop'),
+            (r'\\\\|\\"|\\\n', String.Escape),  # included here for raw strings
+            include('fstrings-double')
+        ],
+        'sqf': [
+            (r"'", String.Single, '#pop'),
+            (r"\\\\|\\'|\\\n", String.Escape),  # included here for raw strings
+            include('fstrings-single')
+        ],
+        'dqs': [
+            (r'"', String.Double, '#pop'),
+            (r'\\\\|\\"|\\\n', String.Escape),  # included here for raw strings
+            include('strings-double')
+        ],
+        'sqs': [
+            (r"'", String.Single, '#pop'),
+            (r"\\\\|\\'|\\\n", String.Escape),  # included here for raw strings
+            include('strings-single')
+        ],
+        'tdqf': [
+            (r'"""', String.Double, '#pop'),
+            include('fstrings-double'),
+            (r'\n', String.Double)
+        ],
+        'tsqf': [
+            (r"'''", String.Single, '#pop'),
+            include('fstrings-single'),
+            (r'\n', String.Single)
+        ],
+        'tdqs': [
+            (r'"""', String.Double, '#pop'),
+            include('strings-double'),
+            (r'\n', String.Double)
+        ],
+        'tsqs': [
+            (r"'''", String.Single, '#pop'),
+            include('strings-single'),
+            (r'\n', String.Single)
+        ],
+
+        'expr-inside-fstring': [
+            (r'[{([]', Punctuation, 'expr-inside-fstring-inner'),
+            # without format specifier
+            (r'(=\s*)?'         # debug (https://bugs.python.org/issue36817)
+             r'\}', String.Interpol, '#pop'),
+            # with format specifier
+            # we'll catch the remaining '}' in the outer scope
+            (r'(=\s*)?'         # debug (https://bugs.python.org/issue36817)
+             r':', String.Interpol, '#pop'),
+            (r'\s+', Whitespace),  # allow new lines
+            include('expr'),
+        ],
+        'expr-inside-fstring-inner': [
+            (r'[{([]', Punctuation, 'expr-inside-fstring-inner'),
+            (r'[])}]', Punctuation, '#pop'),
+            (r'\s+', Whitespace),  # allow new lines
+            include('expr'),
+        ],
+        'keywords': [
+            (words((
+                'into', 'case', 'type', 'module', 'internal',
+            ), suffix=r'\b'),
+                Keyword),
+            (words(('true', 'false', 'null'), suffix=r'\b'), Keyword.Constant),
+        ],
+        'functions': [
+            (words((
+                "min", "max", "sum", "average", "stddev", "every", "any",
+                "concat_array", "count", "lag", "lead", "first", "last",
+                "rank", "rank_dense", "row_number", "round", "as", "in",
+                "tuple_every", "tuple_map", "tuple_zip", "_eq", "_is_null",
+                "from_text", "lower", "upper", "read_parquet", "read_csv"),
+                suffix=r'\b'),
+             Name.Function),
+        ],
+
+        'comment': [
+            (r'-(?!\})', Comment.Multiline),
+            (r'\{-', Comment.Multiline, 'comment'),
+            (r'[^-}]', Comment.Multiline),
+            (r'-\}', Comment.Multiline, '#pop'),
+        ],
+
+        'imports': [
+            (r'\w+(\.\w+)*', Name.Class, '#pop'),
+        ],
+    }
diff --git a/local_packages/pygments/lexers/ptx.py b/local_packages/pygments/lexers/ptx.py
new file mode 100644
index 0000000..685c735
--- /dev/null
+++ b/local_packages/pygments/lexers/ptx.py
@@ -0,0 +1,119 @@
+"""
+    pygments.lexers.ptx
+    ~~~~~~~~~~~~~~~~~~~
+
+    Lexer for other PTX language.
+
+    :copyright: Copyright 2006-present by the Pygments team, see AUTHORS.
+    :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, include, words
+from pygments.token import Comment, Keyword, Name, String, Number, \
+    Punctuation, Whitespace, Operator
+
+__all__ = ["PtxLexer"]
+
+
+class PtxLexer(RegexLexer):
+    """
+    For NVIDIA `PTX `_
+    source.
+    """
+    name = 'PTX'
+    url = "https://docs.nvidia.com/cuda/parallel-thread-execution/"
+    filenames = ['*.ptx']
+    aliases = ['ptx']
+    mimetypes = ['text/x-ptx']
+    version_added = '2.16'
+
+    #: optional Comment or Whitespace
+    string = r'"[^"]*?"'
+    followsym = r'[a-zA-Z0-9_$]'
+    identifier = r'([-a-zA-Z$._][\w\-$.]*|' + string + ')'
+    block_label = r'(' + identifier + r'|(\d+))'
+
+    tokens = {
+        'root': [
+            include('whitespace'),
+
+            (block_label + r'\s*:', Name.Label),
+
+            include('keyword'),
+
+            (r'%' + identifier, Name.Variable),
+            (r'%\d+', Name.Variable.Anonymous),
+            (r'c?' + string, String),
+            (identifier, Name.Variable),
+            (r';', Punctuation),
+            (r'[*+-/]', Operator),
+
+            (r'0[xX][a-fA-F0-9]+', Number),
+            (r'-?\d+(?:[.]\d+)?(?:[eE][-+]?\d+(?:[.]\d+)?)?', Number),
+
+            (r'[=<>{}\[\]()*.,!]|x\b', Punctuation)
+
+        ],
+        'whitespace': [
+            (r'(\n|\s+)+', Whitespace),
+            (r'//.*?\n', Comment)
+        ],
+
+        'keyword': [
+            # Instruction keywords
+            (words((
+                'abs', 'discard', 'min', 'shf', 'vadd',
+                'activemask', 'div', 'mma', 'shfl', 'vadd2',
+                'add', 'dp2a', 'mov', 'shl', 'vadd4',
+                'addc', 'dp4a', 'movmatrix', 'shr', 'vavrg2',
+                'alloca', 'elect', 'mul', 'sin', 'vavrg4',
+                'and', 'ex2', 'mul24', 'slct', 'vmad',
+                'applypriority', 'exit', 'multimem', 'sqrt', 'vmax',
+                'atom', 'fence', 'nanosleep', 'st', 'vmax2',
+                'bar', 'fma', 'neg', 'stackrestore', 'vmax4',
+                'barrier', 'fns', 'not', 'stacksave', 'vmin',
+                'bfe', 'getctarank', 'or', 'stmatrix', 'vmin2',
+                'bfi', 'griddepcontrol', 'pmevent', 'sub', 'vmin4',
+                'bfind', 'isspacep', 'popc', 'subc', 'vote',
+                'bmsk', 'istypep', 'prefetch', 'suld', 'vset',
+                'bra', 'ld', 'prefetchu', 'suq', 'vset2',
+                'brev', 'ldmatrix', 'prmt', 'sured', 'vset4',
+                'brkpt', 'ldu', 'rcp', 'sust', 'vshl',
+                'brx', 'lg2', 'red', 'szext', 'vshr',
+                'call', 'lop3', 'redux', 'tanh', 'vsub',
+                'clz', 'mad', 'rem', 'testp', 'vsub2',
+                'cnot', 'mad24', 'ret', 'tex', 'vsub4',
+                'copysign', 'madc', 'rsqrt', 'tld4', 'wgmma',
+                'cos', 'mapa', 'sad', 'trap', 'wmma',
+                'cp', 'match', 'selp', 'txq', 'xor',
+                'createpolicy', 'max', 'set', 'vabsdiff', 'cvt',
+                'mbarrier', 'setmaxnreg', 'vabsdiff2', 'cvta',
+                'membar', 'setp', 'vabsdiff4')), Keyword),
+            # State Spaces and Suffixes
+            (words((
+                'reg', '.sreg', '.const', '.global',
+                '.local', '.param', '.shared', '.tex',
+                '.wide', '.loc'
+            )), Keyword.Pseudo),
+            # PTX Directives
+            (words((
+                '.address_size', '.explicitcluster', '.maxnreg', '.section',
+                '.alias', '.extern', '.maxntid', '.shared',
+                '.align', '.file', '.minnctapersm', '.sreg',
+                '.branchtargets', '.func', '.noreturn', '.target',
+                '.callprototype', '.global', '.param', '.tex',
+                '.calltargets', '.loc', '.pragma', '.version',
+                '.common', '.local', '.reg', '.visible',
+                '.const', '.maxclusterrank', '.reqnctapercluster', '.weak',
+                '.entry', '.maxnctapersm', '.reqntid')), Keyword.Reserved),
+            # Fundamental Types
+            (words((
+                '.s8', '.s16', '.s32', '.s64',
+                '.u8', '.u16', '.u32', '.u64',
+                '.f16', '.f16x2', '.f32', '.f64',
+                '.b8', '.b16', '.b32', '.b64',
+                '.pred'
+            )), Keyword.Type)
+        ],
+
+    }
diff --git a/local_packages/pygments/lexers/python.py b/local_packages/pygments/lexers/python.py
new file mode 100644
index 0000000..b296c8d
--- /dev/null
+++ b/local_packages/pygments/lexers/python.py
@@ -0,0 +1,1204 @@
+"""
+    pygments.lexers.python
+    ~~~~~~~~~~~~~~~~~~~~~~
+
+    Lexers for Python and related languages.
+
+    :copyright: Copyright 2006-present by the Pygments team, see AUTHORS.
+    :license: BSD, see LICENSE for details.
+"""
+
+import keyword
+
+from pygments.lexer import DelegatingLexer, RegexLexer, include, \
+    bygroups, using, default, words, combined, this
+from pygments.util import get_bool_opt, shebang_matches
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+    Number, Punctuation, Generic, Other, Error, Whitespace
+from pygments import unistring as uni
+
+__all__ = ['PythonLexer', 'PythonConsoleLexer', 'PythonTracebackLexer',
+           'Python2Lexer', 'Python2TracebackLexer',
+           'CythonLexer', 'DgLexer', 'NumPyLexer']
+
+
+class PythonLexer(RegexLexer):
+    """
+    For Python source code (version 3.x).
+
+    .. versionchanged:: 2.5
+       This is now the default ``PythonLexer``.  It is still available as the
+       alias ``Python3Lexer``.
+    """
+
+    name = 'Python'
+    url = 'https://www.python.org'
+    aliases = ['python', 'py', 'sage', 'python3', 'py3', 'bazel', 'starlark', 'pyi']
+    filenames = [
+        '*.py',
+        '*.pyw',
+        # Type stubs
+        '*.pyi',
+        # Jython
+        '*.jy',
+        # Sage
+        '*.sage',
+        # SCons
+        '*.sc',
+        'SConstruct',
+        'SConscript',
+        # Skylark/Starlark (used by Bazel, Buck, and Pants)
+        '*.bzl',
+        'BUCK',
+        'BUILD',
+        'BUILD.bazel',
+        'WORKSPACE',
+        # Twisted Application infrastructure
+        '*.tac',
+        # Execubot level format
+        '*.pye',
+    ]
+    mimetypes = ['text/x-python', 'application/x-python',
+                 'text/x-python3', 'application/x-python3']
+    version_added = '0.10'
+
+    uni_name = f"[{uni.xid_start}][{uni.xid_continue}]*"
+
+    def innerstring_rules(ttype):
+        return [
+            # the old style '%s' % (...) string formatting (still valid in Py3)
+            (r'%(\(\w+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?'
+             '[hlL]?[E-GXc-giorsaux%]', String.Interpol),
+            # the new style '{}'.format(...) string formatting
+            (r'\{'
+             r'((\w+)((\.\w+)|(\[[^\]]+\]))*)?'  # field name
+             r'(\![sra])?'                       # conversion
+             r'(\:(.?[<>=\^])?[-+ ]?#?0?(\d+)?,?(\.\d+)?[E-GXb-gnosx%]?)?'
+             r'\}', String.Interpol),
+
+            # backslashes, quotes and formatting signs must be parsed one at a time
+            (r'[^\\\'"%{\n]+', ttype),
+            (r'[\'"\\]', ttype),
+            # unhandled string formatting sign
+            (r'%|(\{{1,2})', ttype)
+            # newlines are an error (use "nl" state)
+        ]
+
+    def fstring_rules(ttype):
+        return [
+            # Assuming that a '}' is the closing brace after format specifier.
+            # Sadly, this means that we won't detect syntax error. But it's
+            # more important to parse correct syntax correctly, than to
+            # highlight invalid syntax.
+            (r'\}', String.Interpol),
+            (r'\{', String.Interpol, 'expr-inside-fstring'),
+            # backslashes, quotes and formatting signs must be parsed one at a time
+            (r'[^\\\'"{}\n]+', ttype),
+            (r'[\'"\\]', ttype),
+            # newlines are an error (use "nl" state)
+        ]
+
+    tokens = {
+        'root': [
+            (r'\n', Whitespace),
+            (r'^(\s*)([rRuUbB]{,2})("""(?:.|\n)*?""")',
+             bygroups(Whitespace, String.Affix, String.Doc)),
+            (r"^(\s*)([rRuUbB]{,2})('''(?:.|\n)*?''')",
+             bygroups(Whitespace, String.Affix, String.Doc)),
+            (r'\A#!.+$', Comment.Hashbang),
+            (r'#.*$', Comment.Single),
+            (r'\\\n', Text),
+            (r'\\', Text),
+            include('keywords'),
+            include('soft-keywords'),
+            (r'(def)((?:\s|\\\s)+)', bygroups(Keyword, Whitespace), 'funcname'),
+            (r'(class)((?:\s|\\\s)+)', bygroups(Keyword, Whitespace), 'classname'),
+            (r'(from)((?:\s|\\\s)+)', bygroups(Keyword.Namespace, Whitespace),
+             'fromimport'),
+            (r'(import)((?:\s|\\\s)+)', bygroups(Keyword.Namespace, Whitespace),
+             'import'),
+            include('expr'),
+        ],
+        'expr': [
+            # raw f-strings and t-strings
+            ('(?i)(r[ft]|[ft]r)(""")',
+             bygroups(String.Affix, String.Double),
+             combined('rfstringescape', 'tdqf')),
+            ("(?i)(r[ft]|[ft]r)(''')",
+             bygroups(String.Affix, String.Single),
+             combined('rfstringescape', 'tsqf')),
+            ('(?i)(r[ft]|[ft]r)(")',
+             bygroups(String.Affix, String.Double),
+             combined('rfstringescape', 'dqf')),
+            ("(?i)(r[ft]|[ft]r)(')",
+             bygroups(String.Affix, String.Single),
+             combined('rfstringescape', 'sqf')),
+            # non-raw f-strings and t-strings
+            ('([fFtT])(""")', bygroups(String.Affix, String.Double),
+             combined('fstringescape', 'tdqf')),
+            ("([fFtT])(''')", bygroups(String.Affix, String.Single),
+             combined('fstringescape', 'tsqf')),
+            ('([fFtT])(")', bygroups(String.Affix, String.Double),
+             combined('fstringescape', 'dqf')),
+            ("([fFtT])(')", bygroups(String.Affix, String.Single),
+             combined('fstringescape', 'sqf')),
+            # raw bytes and strings
+            ('(?i)(rb|br|r)(""")',
+             bygroups(String.Affix, String.Double), 'tdqs'),
+            ("(?i)(rb|br|r)(''')",
+             bygroups(String.Affix, String.Single), 'tsqs'),
+            ('(?i)(rb|br|r)(")',
+             bygroups(String.Affix, String.Double), 'dqs'),
+            ("(?i)(rb|br|r)(')",
+             bygroups(String.Affix, String.Single), 'sqs'),
+            # non-raw strings
+            ('([uU]?)(""")', bygroups(String.Affix, String.Double),
+             combined('stringescape', 'tdqs')),
+            ("([uU]?)(''')", bygroups(String.Affix, String.Single),
+             combined('stringescape', 'tsqs')),
+            ('([uU]?)(")', bygroups(String.Affix, String.Double),
+             combined('stringescape', 'dqs')),
+            ("([uU]?)(')", bygroups(String.Affix, String.Single),
+             combined('stringescape', 'sqs')),
+            # non-raw bytes
+            ('([bB])(""")', bygroups(String.Affix, String.Double),
+             combined('bytesescape', 'tdqs')),
+            ("([bB])(''')", bygroups(String.Affix, String.Single),
+             combined('bytesescape', 'tsqs')),
+            ('([bB])(")', bygroups(String.Affix, String.Double),
+             combined('bytesescape', 'dqs')),
+            ("([bB])(')", bygroups(String.Affix, String.Single),
+             combined('bytesescape', 'sqs')),
+
+            (r'[^\S\n]+', Text),
+            include('numbers'),
+            (r'!=|==|<<|>>|:=|[-~+/*%=<>&^|.]', Operator),
+            (r'[]{}:(),;[]', Punctuation),
+            (r'(in|is|and|or|not)\b', Operator.Word),
+            include('expr-keywords'),
+            include('builtins'),
+            include('magicfuncs'),
+            include('magicvars'),
+            include('name'),
+        ],
+        'expr-inside-fstring': [
+            (r'[{([]', Punctuation, 'expr-inside-fstring-inner'),
+            # without format specifier
+            (r'(=\s*)?'         # debug (https://bugs.python.org/issue36817)
+             r'(\![sraf])?'     # conversion
+             r'\}', String.Interpol, '#pop'),
+            # with format specifier
+            # we'll catch the remaining '}' in the outer scope
+            (r'(=\s*)?'         # debug (https://bugs.python.org/issue36817)
+             r'(\![sraf])?'     # conversion
+             r':', String.Interpol, '#pop'),
+            (r'\s+', Whitespace),  # allow new lines
+            include('expr'),
+        ],
+        'expr-inside-fstring-inner': [
+            (r'[{([]', Punctuation, 'expr-inside-fstring-inner'),
+            (r'[])}]', Punctuation, '#pop'),
+            (r'\s+', Whitespace),  # allow new lines
+            include('expr'),
+        ],
+        'expr-keywords': [
+            # Based on https://docs.python.org/3/reference/expressions.html
+            (words((
+                'async for', 'await', 'else', 'for', 'if', 'lambda',
+                'yield', 'yield from'), suffix=r'\b'),
+             Keyword),
+            (words(('True', 'False', 'None'), suffix=r'\b'), Keyword.Constant),
+        ],
+        'keywords': [
+            (words((
+                'assert', 'async', 'await', 'break', 'continue', 'del', 'elif',
+                'else', 'except', 'finally', 'for', 'global', 'if', 'lambda',
+                'pass', 'raise', 'nonlocal', 'return', 'try', 'while', 'yield',
+                'yield from', 'as', 'with'), suffix=r'\b'),
+             Keyword),
+            (words(('True', 'False', 'None'), suffix=r'\b'), Keyword.Constant),
+        ],
+        'soft-keywords': [
+            # `match`, `case` and `_` soft keywords
+            (r'(^[ \t]*)'              # at beginning of line + possible indentation
+             r'(match|case)\b'         # a possible keyword
+             r'(?![ \t]*(?:'           # not followed by...
+             r'[:,;=^&|@~)\]}]|(?:' +  # characters and keywords that mean this isn't
+                                       # pattern matching (but None/True/False is ok)
+             r'|'.join(k for k in keyword.kwlist if k[0].islower()) + r')\b))',
+             bygroups(Text, Keyword), 'soft-keywords-inner'),
+        ],
+        'soft-keywords-inner': [
+            # optional `_` keyword
+            (r'(\s+)([^\n_]*)(_\b)', bygroups(Whitespace, using(this), Keyword)),
+            default('#pop')
+        ],
+        'builtins': [
+            (words((
+                '__import__', 'abs', 'aiter', 'all', 'any', 'bin', 'bool', 'bytearray',
+                'breakpoint', 'bytes', 'callable', 'chr', 'classmethod', 'compile',
+                'complex', 'delattr', 'dict', 'dir', 'divmod', 'enumerate', 'eval',
+                'filter', 'float', 'format', 'frozenset', 'getattr', 'globals',
+                'hasattr', 'hash', 'hex', 'id', 'input', 'int', 'isinstance',
+                'issubclass', 'iter', 'len', 'list', 'locals', 'map', 'max',
+                'memoryview', 'min', 'next', 'object', 'oct', 'open', 'ord', 'pow',
+                'print', 'property', 'range', 'repr', 'reversed', 'round', 'set',
+                'setattr', 'slice', 'sorted', 'staticmethod', 'str', 'sum', 'super',
+                'tuple', 'type', 'vars', 'zip'), prefix=r'(?>|[-~+/*%=<>&^|.]', Operator),
+            include('keywords'),
+            (r'(def)((?:\s|\\\s)+)', bygroups(Keyword, Whitespace), 'funcname'),
+            (r'(class)((?:\s|\\\s)+)', bygroups(Keyword, Whitespace), 'classname'),
+            (r'(from)((?:\s|\\\s)+)', bygroups(Keyword.Namespace, Whitespace),
+             'fromimport'),
+            (r'(import)((?:\s|\\\s)+)', bygroups(Keyword.Namespace, Whitespace),
+             'import'),
+            include('builtins'),
+            include('magicfuncs'),
+            include('magicvars'),
+            include('backtick'),
+            ('([rR]|[uUbB][rR]|[rR][uUbB])(""")',
+             bygroups(String.Affix, String.Double), 'tdqs'),
+            ("([rR]|[uUbB][rR]|[rR][uUbB])(''')",
+             bygroups(String.Affix, String.Single), 'tsqs'),
+            ('([rR]|[uUbB][rR]|[rR][uUbB])(")',
+             bygroups(String.Affix, String.Double), 'dqs'),
+            ("([rR]|[uUbB][rR]|[rR][uUbB])(')",
+             bygroups(String.Affix, String.Single), 'sqs'),
+            ('([uUbB]?)(""")', bygroups(String.Affix, String.Double),
+             combined('stringescape', 'tdqs')),
+            ("([uUbB]?)(''')", bygroups(String.Affix, String.Single),
+             combined('stringescape', 'tsqs')),
+            ('([uUbB]?)(")', bygroups(String.Affix, String.Double),
+             combined('stringescape', 'dqs')),
+            ("([uUbB]?)(')", bygroups(String.Affix, String.Single),
+             combined('stringescape', 'sqs')),
+            include('name'),
+            include('numbers'),
+        ],
+        'keywords': [
+            (words((
+                'assert', 'break', 'continue', 'del', 'elif', 'else', 'except',
+                'exec', 'finally', 'for', 'global', 'if', 'lambda', 'pass',
+                'print', 'raise', 'return', 'try', 'while', 'yield',
+                'yield from', 'as', 'with'), suffix=r'\b'),
+             Keyword),
+        ],
+        'builtins': [
+            (words((
+                '__import__', 'abs', 'all', 'any', 'apply', 'basestring', 'bin',
+                'bool', 'buffer', 'bytearray', 'bytes', 'callable', 'chr', 'classmethod',
+                'cmp', 'coerce', 'compile', 'complex', 'delattr', 'dict', 'dir', 'divmod',
+                'enumerate', 'eval', 'execfile', 'exit', 'file', 'filter', 'float',
+                'frozenset', 'getattr', 'globals', 'hasattr', 'hash', 'hex', 'id',
+                'input', 'int', 'intern', 'isinstance', 'issubclass', 'iter', 'len',
+                'list', 'locals', 'long', 'map', 'max', 'min', 'next', 'object',
+                'oct', 'open', 'ord', 'pow', 'property', 'range', 'raw_input', 'reduce',
+                'reload', 'repr', 'reversed', 'round', 'set', 'setattr', 'slice',
+                'sorted', 'staticmethod', 'str', 'sum', 'super', 'tuple', 'type',
+                'unichr', 'unicode', 'vars', 'xrange', 'zip'),
+                prefix=r'(?>> )(.*\n)', bygroups(Generic.Prompt, Other.Code), 'continuations'),
+            # This happens, e.g., when tracebacks are embedded in documentation;
+            # trailing whitespaces are often stripped in such contexts.
+            (r'(>>>)(\n)', bygroups(Generic.Prompt, Whitespace)),
+            (r'(\^C)?Traceback \(most recent call last\):\n', Other.Traceback, 'traceback'),
+            # SyntaxError starts with this
+            (r'  File "[^"]+", line \d+', Other.Traceback, 'traceback'),
+            (r'.*\n', Generic.Output),
+        ],
+        'continuations': [
+            (r'(\.\.\. )(.*\n)', bygroups(Generic.Prompt, Other.Code)),
+            # See above.
+            (r'(\.\.\.)(\n)', bygroups(Generic.Prompt, Whitespace)),
+            default('#pop'),
+        ],
+        'traceback': [
+            # As soon as we see a traceback, consume everything until the next
+            # >>> prompt.
+            (r'(?=>>>( |$))', Text, '#pop'),
+            (r'(KeyboardInterrupt)(\n)', bygroups(Name.Class, Whitespace)),
+            (r'.*\n', Other.Traceback),
+        ],
+    }
+
+
+class PythonConsoleLexer(DelegatingLexer):
+    """
+    For Python console output or doctests, such as:
+
+    .. sourcecode:: pycon
+
+        >>> a = 'foo'
+        >>> print(a)
+        foo
+        >>> 1 / 0
+        Traceback (most recent call last):
+          File "", line 1, in 
+        ZeroDivisionError: integer division or modulo by zero
+
+    Additional options:
+
+    `python3`
+        Use Python 3 lexer for code.  Default is ``True``.
+
+        .. versionadded:: 1.0
+        .. versionchanged:: 2.5
+           Now defaults to ``True``.
+    """
+
+    name = 'Python console session'
+    aliases = ['pycon', 'python-console']
+    mimetypes = ['text/x-python-doctest']
+    url = 'https://python.org'
+    version_added = ''
+
+    def __init__(self, **options):
+        python3 = get_bool_opt(options, 'python3', True)
+        if python3:
+            pylexer = PythonLexer
+            tblexer = PythonTracebackLexer
+        else:
+            pylexer = Python2Lexer
+            tblexer = Python2TracebackLexer
+        # We have two auxiliary lexers. Use DelegatingLexer twice with
+        # different tokens.  TODO: DelegatingLexer should support this
+        # directly, by accepting a tuplet of auxiliary lexers and a tuple of
+        # distinguishing tokens. Then we wouldn't need this intermediary
+        # class.
+        class _ReplaceInnerCode(DelegatingLexer):
+            def __init__(self, **options):
+                super().__init__(pylexer, _PythonConsoleLexerBase, Other.Code, **options)
+        super().__init__(tblexer, _ReplaceInnerCode, Other.Traceback, **options)
+
+
+class PythonTracebackLexer(RegexLexer):
+    """
+    For Python 3.x tracebacks, with support for chained exceptions.
+
+    .. versionchanged:: 2.5
+       This is now the default ``PythonTracebackLexer``.  It is still available
+       as the alias ``Python3TracebackLexer``.
+    """
+
+    name = 'Python Traceback'
+    aliases = ['pytb', 'py3tb']
+    filenames = ['*.pytb', '*.py3tb']
+    mimetypes = ['text/x-python-traceback', 'text/x-python3-traceback']
+    url = 'https://python.org'
+    version_added = '1.0'
+
+    tokens = {
+        'root': [
+            (r'\n', Whitespace),
+            (r'^(\^C)?Traceback \(most recent call last\):\n', Generic.Traceback, 'intb'),
+            (r'^During handling of the above exception, another '
+             r'exception occurred:\n\n', Generic.Traceback),
+            (r'^The above exception was the direct cause of the '
+             r'following exception:\n\n', Generic.Traceback),
+            (r'^(?=  File "[^"]+", line \d+)', Generic.Traceback, 'intb'),
+            (r'^.*\n', Other),
+        ],
+        'intb': [
+            (r'^(  File )("[^"]+")(, line )(\d+)(, in )(.+)(\n)',
+             bygroups(Text, Name.Builtin, Text, Number, Text, Name, Whitespace)),
+            (r'^(  File )("[^"]+")(, line )(\d+)(\n)',
+             bygroups(Text, Name.Builtin, Text, Number, Whitespace)),
+            (r'^(    )(.+)(\n)',
+             bygroups(Whitespace, using(PythonLexer), Whitespace), 'markers'),
+            (r'^([ \t]*)(\.\.\.)(\n)',
+             bygroups(Whitespace, Comment, Whitespace)),  # for doctests...
+            (r'^([^:]+)(: )(.+)(\n)',
+             bygroups(Generic.Error, Text, Name, Whitespace), '#pop'),
+            (r'^([a-zA-Z_][\w.]*)(:?\n)',
+             bygroups(Generic.Error, Whitespace), '#pop'),
+            default('#pop'),
+        ],
+        'markers': [
+            # Either `PEP 657 `
+            # error locations in Python 3.11+, or single-caret markers
+            # for syntax errors before that.
+            (r'^( {4,})([~^]+)(\n)',
+             bygroups(Whitespace, Punctuation.Marker, Whitespace),
+             '#pop'),
+            default('#pop'),
+        ],
+    }
+
+
+Python3TracebackLexer = PythonTracebackLexer
+
+
+class Python2TracebackLexer(RegexLexer):
+    """
+    For Python tracebacks.
+
+    .. versionchanged:: 2.5
+       This class has been renamed from ``PythonTracebackLexer``.
+       ``PythonTracebackLexer`` now refers to the Python 3 variant.
+    """
+
+    name = 'Python 2.x Traceback'
+    aliases = ['py2tb']
+    filenames = ['*.py2tb']
+    mimetypes = ['text/x-python2-traceback']
+    url = 'https://python.org'
+    version_added = '0.7'
+
+    tokens = {
+        'root': [
+            # Cover both (most recent call last) and (innermost last)
+            # The optional ^C allows us to catch keyboard interrupt signals.
+            (r'^(\^C)?(Traceback.*\n)',
+             bygroups(Text, Generic.Traceback), 'intb'),
+            # SyntaxError starts with this.
+            (r'^(?=  File "[^"]+", line \d+)', Generic.Traceback, 'intb'),
+            (r'^.*\n', Other),
+        ],
+        'intb': [
+            (r'^(  File )("[^"]+")(, line )(\d+)(, in )(.+)(\n)',
+             bygroups(Text, Name.Builtin, Text, Number, Text, Name, Whitespace)),
+            (r'^(  File )("[^"]+")(, line )(\d+)(\n)',
+             bygroups(Text, Name.Builtin, Text, Number, Whitespace)),
+            (r'^(    )(.+)(\n)',
+             bygroups(Text, using(Python2Lexer), Whitespace), 'marker'),
+            (r'^([ \t]*)(\.\.\.)(\n)',
+             bygroups(Text, Comment, Whitespace)),  # for doctests...
+            (r'^([^:]+)(: )(.+)(\n)',
+             bygroups(Generic.Error, Text, Name, Whitespace), '#pop'),
+            (r'^([a-zA-Z_]\w*)(:?\n)',
+             bygroups(Generic.Error, Whitespace), '#pop')
+        ],
+        'marker': [
+            # For syntax errors.
+            (r'( {4,})(\^)', bygroups(Text, Punctuation.Marker), '#pop'),
+            default('#pop'),
+        ],
+    }
+
+
+class CythonLexer(RegexLexer):
+    """
+    For Pyrex and Cython source code.
+    """
+
+    name = 'Cython'
+    url = 'https://cython.org'
+    aliases = ['cython', 'pyx', 'pyrex']
+    filenames = ['*.pyx', '*.pxd', '*.pxi']
+    mimetypes = ['text/x-cython', 'application/x-cython']
+    version_added = '1.1'
+
+    tokens = {
+        'root': [
+            (r'\n', Whitespace),
+            (r'^(\s*)("""(?:.|\n)*?""")', bygroups(Whitespace, String.Doc)),
+            (r"^(\s*)('''(?:.|\n)*?''')", bygroups(Whitespace, String.Doc)),
+            (r'[^\S\n]+', Text),
+            (r'#.*$', Comment),
+            (r'[]{}:(),;[]', Punctuation),
+            (r'\\\n', Whitespace),
+            (r'\\', Text),
+            (r'(in|is|and|or|not)\b', Operator.Word),
+            (r'(<)([a-zA-Z0-9.?]+)(>)',
+             bygroups(Punctuation, Keyword.Type, Punctuation)),
+            (r'!=|==|<<|>>|[-~+/*%=<>&^|.?]', Operator),
+            (r'(from)(\d+)(<=)(\s+)(<)(\d+)(:)',
+             bygroups(Keyword, Number.Integer, Operator, Whitespace, Operator,
+                      Name, Punctuation)),
+            include('keywords'),
+            (r'(def|property)(\s+)', bygroups(Keyword, Whitespace), 'funcname'),
+            (r'(cp?def)(\s+)', bygroups(Keyword, Whitespace), 'cdef'),
+            # (should actually start a block with only cdefs)
+            (r'(cdef)(:)', bygroups(Keyword, Punctuation)),
+            (r'(class|cppclass|struct)(\s+)', bygroups(Keyword, Whitespace), 'classname'),
+            (r'(from)(\s+)', bygroups(Keyword, Whitespace), 'fromimport'),
+            (r'(c?import)(\s+)', bygroups(Keyword, Whitespace), 'import'),
+            include('builtins'),
+            include('backtick'),
+            ('(?:[rR]|[uU][rR]|[rR][uU])"""', String, 'tdqs'),
+            ("(?:[rR]|[uU][rR]|[rR][uU])'''", String, 'tsqs'),
+            ('(?:[rR]|[uU][rR]|[rR][uU])"', String, 'dqs'),
+            ("(?:[rR]|[uU][rR]|[rR][uU])'", String, 'sqs'),
+            ('[uU]?"""', String, combined('stringescape', 'tdqs')),
+            ("[uU]?'''", String, combined('stringescape', 'tsqs')),
+            ('[uU]?"', String, combined('stringescape', 'dqs')),
+            ("[uU]?'", String, combined('stringescape', 'sqs')),
+            include('name'),
+            include('numbers'),
+        ],
+        'keywords': [
+            (words((
+                'assert', 'async', 'await', 'break', 'by', 'continue', 'ctypedef', 'del',
+                'elif', 'else', 'except', 'except?', 'exec', 'finally', 'for', 'fused', 'gil',
+                'global', 'if', 'include', 'lambda', 'namespace', 'new', 'noexcept','nogil',
+                'pass', 'print', 'raise', 'return', 'try', 'while', 'yield', 'as', 'with'),
+             suffix=r'\b'),
+             Keyword),
+             (words(('True', 'False', 'None', 'NULL'), suffix=r'\b'), Keyword.Constant),
+            (r'(DEF|IF|ELIF|ELSE)\b', Comment.Preproc),
+        ],
+        'builtins': [
+            (words((
+                '__import__', 'abs', 'all', 'any', 'apply', 'basestring', 'bin', 'bint',
+                'bool', 'buffer', 'bytearray', 'bytes', 'callable', 'char', 'chr',
+                'classmethod', 'cmp', 'coerce', 'compile', 'complex', 'delattr',
+                'dict', 'dir', 'divmod', 'double', 'enumerate', 'eval', 'execfile', 'exit',
+                'file', 'filter', 'float', 'frozenset', 'getattr', 'globals',
+                'hasattr', 'hash', 'hex', 'id', 'input', 'int', 'intern', 'isinstance',
+                'issubclass', 'iter', 'len', 'list', 'locals', 'long', 'map', 'max',
+                'min', 'next', 'object', 'oct', 'open', 'ord', 'pow', 'property',
+                'Py_ssize_t', 'range', 'raw_input', 'reduce', 'reload', 'repr', 'reversed',
+                'round', 'set', 'setattr', 'size_t', 'slice', 'sorted', 'staticmethod',
+                'ssize_t', 'str', 'sum', 'super', 'tuple', 'type', 'unichr', 'unicode',
+                'unsigned', 'vars', 'xrange', 'zip'), prefix=r'(??/\\:']?:)(\s*)(\{)",
+             bygroups(Name.Function, Whitespace, Operator, Whitespace, Punctuation),
+             "functions"),
+            # Variable Names
+            (r"([.]?[a-zA-Z][\w.]*)(\s*)([-.~=!@#$%^&*_+|,<>?/\\:']?:)",
+             bygroups(Name.Variable, Whitespace, Operator)),
+            # Functions
+            (r"\{", Punctuation, "functions"),
+            # Parentheses
+            (r"\(", Punctuation, "parentheses"),
+            # Brackets
+            (r"\[", Punctuation, "brackets"),
+            # Errors
+            (r"'`([a-zA-Z][\w.]*)?", Name.Exception),
+            # File Symbols
+            (r"`:([a-zA-Z/][\w./]*)?", String.Symbol),
+            # Symbols
+            (r"`([a-zA-Z][\w.]*)?", String.Symbol),
+            # Numbers
+            include("numbers"),
+            # Variable Names
+            (r"[a-zA-Z][\w.]*", Name),
+            # Operators
+            (r"[-=+*#$%@!~^&:.,<>'\\|/?_]", Operator),
+            # Punctuation
+            (r";", Punctuation),
+        ],
+        "functions": [
+            include("root"),
+            (r"\}", Punctuation, "#pop"),
+        ],
+        "parentheses": [
+            include("root"),
+            (r"\)", Punctuation, "#pop"),
+        ],
+        "brackets": [
+            include("root"),
+            (r"\]", Punctuation, "#pop"),
+        ],
+        "numbers": [
+            # Binary Values
+            (r"[01]+b", Number.Bin),
+            # Nulls/Infinities
+            (r"0[nNwW][cefghijmndzuvtp]?", Number),
+            # Timestamps
+            ((r"(?:[0-9]{4}[.][0-9]{2}[.][0-9]{2}|[0-9]+)"
+              "D(?:[0-9](?:[0-9](?::[0-9]{2}"
+              "(?::[0-9]{2}(?:[.][0-9]*)?)?)?)?)?"), Literal.Date),
+            # Datetimes
+            ((r"[0-9]{4}[.][0-9]{2}"
+              "(?:m|[.][0-9]{2}(?:T(?:[0-9]{2}:[0-9]{2}"
+              "(?::[0-9]{2}(?:[.][0-9]*)?)?)?)?)"), Literal.Date),
+            # Times
+            (r"[0-9]{2}:[0-9]{2}(?::[0-9]{2}(?:[.][0-9]{1,3})?)?",
+             Literal.Date),
+            # GUIDs
+            (r"[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}",
+             Number.Hex),
+            # Byte Vectors
+            (r"0x[0-9a-fA-F]+", Number.Hex),
+            # Floats
+            (r"([0-9]*[.]?[0-9]+|[0-9]+[.]?[0-9]*)[eE][+-]?[0-9]+[ef]?",
+             Number.Float),
+            (r"([0-9]*[.][0-9]+|[0-9]+[.][0-9]*)[ef]?", Number.Float),
+            (r"[0-9]+[ef]", Number.Float),
+            # Characters
+            (r"[0-9]+c", Number),
+            # Integers
+            (r"[0-9]+[ihtuv]", Number.Integer),
+            # Long Integers
+            (r"[0-9]+[jnp]?", Number.Integer.Long),
+        ],
+        "comments": [
+            (r"[^\\]+", Comment.Multiline),
+            (r"^\\", Comment.Multiline, "#pop"),
+            (r"\\", Comment.Multiline),
+        ],
+        "strings": [
+            (r'[^"\\]+', String.Double),
+            (r"\\.", String.Escape),
+            (r'"', String.Double, "#pop"),
+        ],
+    }
+
+
+class QLexer(KLexer):
+    """
+    For `Q `_ source code.
+    """
+
+    name = "Q"
+    aliases = ["q"]
+    filenames = ["*.q"]
+    version_added = '2.12'
+
+    tokens = {
+        "root": [
+            (words(("aj", "aj0", "ajf", "ajf0", "all", "and", "any", "asc",
+                    "asof", "attr", "avgs", "ceiling", "cols", "count", "cross",
+                    "csv", "cut", "deltas", "desc", "differ", "distinct", "dsave",
+                    "each", "ej", "ema", "eval", "except", "fby", "fills", "first",
+                    "fkeys", "flip", "floor", "get", "group", "gtime", "hclose",
+                    "hcount", "hdel", "hsym", "iasc", "idesc", "ij", "ijf",
+                    "inter", "inv", "key", "keys", "lj", "ljf", "load", "lower",
+                    "lsq", "ltime", "ltrim", "mavg", "maxs", "mcount", "md5",
+                    "mdev", "med", "meta", "mins", "mmax", "mmin", "mmu", "mod",
+                    "msum", "neg", "next", "not", "null", "or", "over", "parse",
+                    "peach", "pj", "prds", "prior", "prev", "rand", "rank", "ratios",
+                    "raze", "read0", "read1", "reciprocal", "reval", "reverse",
+                    "rload", "rotate", "rsave", "rtrim", "save", "scan", "scov",
+                    "sdev", "set", "show", "signum", "ssr", "string", "sublist",
+                    "sums", "sv", "svar", "system", "tables", "til", "trim", "txf",
+                    "type", "uj", "ujf", "ungroup", "union", "upper", "upsert",
+                    "value", "view", "views", "vs", "where", "wj", "wj1", "ww",
+                    "xasc", "xbar", "xcol", "xcols", "xdesc", "xgroup", "xkey",
+                    "xlog", "xprev", "xrank"),
+                    suffix=r"\b"), Name.Builtin,
+            ),
+            inherit,
+        ],
+    }
diff --git a/local_packages/pygments/lexers/qlik.py b/local_packages/pygments/lexers/qlik.py
new file mode 100644
index 0000000..6972ed9
--- /dev/null
+++ b/local_packages/pygments/lexers/qlik.py
@@ -0,0 +1,117 @@
+"""
+    pygments.lexers.qlik
+    ~~~~~~~~~~~~~~~~~~~~
+
+    Lexer for the qlik scripting language
+
+    :copyright: Copyright 2006-present by the Pygments team, see AUTHORS.
+    :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import RegexLexer, include, bygroups, words
+from pygments.token import Comment, Keyword, Name, Number, Operator, \
+    Punctuation, String, Text
+from pygments.lexers._qlik_builtins import OPERATORS_LIST, STATEMENT_LIST, \
+    SCRIPT_FUNCTIONS, CONSTANT_LIST
+
+__all__ = ["QlikLexer"]
+
+
+class QlikLexer(RegexLexer):
+    """
+    Lexer for qlik code, including .qvs files
+    """
+
+    name = "Qlik"
+    aliases = ["qlik", "qlikview", "qliksense", "qlikscript"]
+    filenames = ["*.qvs", "*.qvw"]
+    url = "https://qlik.com"
+    version_added = '2.12'
+
+    flags = re.IGNORECASE
+
+    tokens = {
+        # Handle multi-line comments
+        "comment": [
+            (r"\*/", Comment.Multiline, "#pop"),
+            (r"[^*]+", Comment.Multiline),
+        ],
+        # Handle numbers
+        "numerics": [
+            (r"\b\d+\.\d+(e\d+)?[fd]?\b", Number.Float),
+            (r"\b\d+\b", Number.Integer),
+        ],
+        # Handle variable names in things
+        "interp": [
+            (
+                r"(\$\()(\w+)(\))",
+                bygroups(String.Interpol, Name.Variable, String.Interpol),
+            ),
+        ],
+        # Handle strings
+        "string": [
+            (r"'", String, "#pop"),
+            include("interp"),
+            (r"[^'$]+", String),
+            (r"\$", String),
+        ],
+        #
+        "assignment": [
+            (r";", Punctuation, "#pop"),
+            include("root"),
+        ],
+        "field_name_quote": [
+            (r'"', String.Symbol, "#pop"),
+            include("interp"),
+            (r"[^\"$]+", String.Symbol),
+            (r"\$", String.Symbol),
+        ],
+        "field_name_bracket": [
+            (r"\]", String.Symbol, "#pop"),
+            include("interp"),
+            (r"[^\]$]+", String.Symbol),
+            (r"\$", String.Symbol),
+        ],
+        "function": [(r"\)", Punctuation, "#pop"), include("root")],
+        "root": [
+            # Whitespace and comments
+            (r"\s+", Text.Whitespace),
+            (r"/\*", Comment.Multiline, "comment"),
+            (r"//.*\n", Comment.Single),
+            # variable assignment
+            (r"(let|set)(\s+)", bygroups(Keyword.Declaration, Text.Whitespace),
+             "assignment"),
+            # Word operators
+            (words(OPERATORS_LIST["words"], prefix=r"\b", suffix=r"\b"),
+             Operator.Word),
+            # Statements
+            (words(STATEMENT_LIST, suffix=r"\b"), Keyword),
+            # Table names
+            (r"[a-z]\w*:", Keyword.Declaration),
+            # Constants
+            (words(CONSTANT_LIST, suffix=r"\b"), Keyword.Constant),
+            # Functions
+            (words(SCRIPT_FUNCTIONS, suffix=r"(?=\s*\()"), Name.Builtin,
+             "function"),
+            # interpolation - e.g. $(variableName)
+            include("interp"),
+            # Quotes denote a field/file name
+            (r'"', String.Symbol, "field_name_quote"),
+            # Square brackets denote a field/file name
+            (r"\[", String.Symbol, "field_name_bracket"),
+            # Strings
+            (r"'", String, "string"),
+            # Numbers
+            include("numerics"),
+            # Operator symbols
+            (words(OPERATORS_LIST["symbols"]), Operator),
+            # Strings denoted by single quotes
+            (r"'.+?'", String),
+            # Words as text
+            (r"\b\w+\b", Text),
+            # Basic punctuation
+            (r"[,;.()\\/]", Punctuation),
+        ],
+    }
diff --git a/local_packages/pygments/lexers/qvt.py b/local_packages/pygments/lexers/qvt.py
new file mode 100644
index 0000000..02d6fcf
--- /dev/null
+++ b/local_packages/pygments/lexers/qvt.py
@@ -0,0 +1,153 @@
+"""
+    pygments.lexers.qvt
+    ~~~~~~~~~~~~~~~~~~~
+
+    Lexer for QVT Operational language.
+
+    :copyright: Copyright 2006-present by the Pygments team, see AUTHORS.
+    :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, bygroups, include, combined, default, \
+    words
+from pygments.token import Text, Comment, Operator, Keyword, Punctuation, \
+    Name, String, Number
+
+__all__ = ['QVToLexer']
+
+
+class QVToLexer(RegexLexer):
+    """
+    For the QVT Operational Mapping language.
+
+    Reference for implementing this: «Meta Object Facility (MOF) 2.0
+    Query/View/Transformation Specification», Version 1.1 - January 2011
+    (https://www.omg.org/spec/QVT/1.1/), see §8.4, «Concrete Syntax» in
+    particular.
+
+    Notable tokens assignments:
+
+    - Name.Class is assigned to the identifier following any of the following
+      keywords: metamodel, class, exception, primitive, enum, transformation
+      or library
+
+    - Name.Function is assigned to the names of mappings and queries
+
+    - Name.Builtin.Pseudo is assigned to the pre-defined variables 'this',
+      'self' and 'result'.
+    """
+    # With obvious borrowings & inspiration from the Java, Python and C lexers
+
+    name = 'QVTO'
+    aliases = ['qvto', 'qvt']
+    filenames = ['*.qvto']
+    url = 'https://www.omg.org/spec/QVT/1.1'
+    version_added = ''
+
+    tokens = {
+        'root': [
+            (r'\n', Text),
+            (r'[^\S\n]+', Text),
+            (r'(--|//)(\s*)(directive:)?(.*)$',
+             bygroups(Comment, Comment, Comment.Preproc, Comment)),
+            # Uncomment the following if you want to distinguish between
+            # '/*' and '/**', à la javadoc
+            # (r'/[*]{2}(.|\n)*?[*]/', Comment.Multiline),
+            (r'/[*](.|\n)*?[*]/', Comment.Multiline),
+            (r'\\\n', Text),
+            (r'(and|not|or|xor|##?)\b', Operator.Word),
+            (r'(:{1,2}=|[-+]=)\b', Operator.Word),
+            (r'(@|<<|>>)\b', Keyword),  # stereotypes
+            (r'!=|<>|==|=|!->|->|>=|<=|[.]{3}|[+/*%=<>&|.~]', Operator),
+            (r'[]{}:(),;[]', Punctuation),
+            (r'(true|false|unlimited|null)\b', Keyword.Constant),
+            (r'(this|self|result)\b', Name.Builtin.Pseudo),
+            (r'(var)\b', Keyword.Declaration),
+            (r'(from|import)\b', Keyword.Namespace, 'fromimport'),
+            (r'(metamodel|class|exception|primitive|enum|transformation|'
+             r'library)(\s+)(\w+)',
+             bygroups(Keyword.Word, Text, Name.Class)),
+            (r'(exception)(\s+)(\w+)',
+             bygroups(Keyword.Word, Text, Name.Exception)),
+            (r'(main)\b', Name.Function),
+            (r'(mapping|helper|query)(\s+)',
+             bygroups(Keyword.Declaration, Text), 'operation'),
+            (r'(assert)(\s+)\b', bygroups(Keyword, Text), 'assert'),
+            (r'(Bag|Collection|Dict|OrderedSet|Sequence|Set|Tuple|List)\b',
+             Keyword.Type),
+            include('keywords'),
+            ('"', String, combined('stringescape', 'dqs')),
+            ("'", String, combined('stringescape', 'sqs')),
+            include('name'),
+            include('numbers'),
+            # (r'([a-zA-Z_]\w*)(::)([a-zA-Z_]\w*)',
+            # bygroups(Text, Text, Text)),
+        ],
+
+        'fromimport': [
+            (r'(?:[ \t]|\\\n)+', Text),
+            (r'[a-zA-Z_][\w.]*', Name.Namespace),
+            default('#pop'),
+        ],
+
+        'operation': [
+            (r'::', Text),
+            (r'(.*::)([a-zA-Z_]\w*)([ \t]*)(\()',
+             bygroups(Text, Name.Function, Text, Punctuation), '#pop')
+        ],
+
+        'assert': [
+            (r'(warning|error|fatal)\b', Keyword, '#pop'),
+            default('#pop'),  # all else: go back
+        ],
+
+        'keywords': [
+            (words((
+                'abstract', 'access', 'any', 'assert', 'blackbox', 'break',
+                'case', 'collect', 'collectNested', 'collectOne', 'collectselect',
+                'collectselectOne', 'composes', 'compute', 'configuration',
+                'constructor', 'continue', 'datatype', 'default', 'derived',
+                'disjuncts', 'do', 'elif', 'else', 'end', 'endif', 'except',
+                'exists', 'extends', 'forAll', 'forEach', 'forOne', 'from', 'if',
+                'implies', 'in', 'inherits', 'init', 'inout', 'intermediate',
+                'invresolve', 'invresolveIn', 'invresolveone', 'invresolveoneIn',
+                'isUnique', 'iterate', 'late', 'let', 'literal', 'log', 'map',
+                'merges', 'modeltype', 'new', 'object', 'one', 'ordered', 'out',
+                'package', 'population', 'property', 'raise', 'readonly',
+                'references', 'refines', 'reject', 'resolve', 'resolveIn',
+                'resolveone', 'resolveoneIn', 'return', 'select', 'selectOne',
+                'sortedBy', 'static', 'switch', 'tag', 'then', 'try', 'typedef',
+                'unlimited', 'uses', 'when', 'where', 'while', 'with', 'xcollect',
+                'xmap', 'xselect'), suffix=r'\b'), Keyword),
+        ],
+
+        # There is no need to distinguish between String.Single and
+        # String.Double: 'strings' is factorised for 'dqs' and 'sqs'
+        'strings': [
+            (r'[^\\\'"\n]+', String),
+            # quotes, percents and backslashes must be parsed one at a time
+            (r'[\'"\\]', String),
+        ],
+        'stringescape': [
+            (r'\\([\\btnfr"\']|u[0-3][0-7]{2}|u[0-7]{1,2})', String.Escape)
+        ],
+        'dqs': [  # double-quoted string
+            (r'"', String, '#pop'),
+            (r'\\\\|\\"', String.Escape),
+            include('strings')
+        ],
+        'sqs': [  # single-quoted string
+            (r"'", String, '#pop'),
+            (r"\\\\|\\'", String.Escape),
+            include('strings')
+        ],
+        'name': [
+            (r'[a-zA-Z_]\w*', Name),
+        ],
+        # numbers: excerpt taken from the python lexer
+        'numbers': [
+            (r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float),
+            (r'\d+[eE][+-]?[0-9]+', Number.Float),
+            (r'\d+', Number.Integer)
+        ],
+    }
diff --git a/local_packages/pygments/lexers/r.py b/local_packages/pygments/lexers/r.py
new file mode 100644
index 0000000..7dcc148
--- /dev/null
+++ b/local_packages/pygments/lexers/r.py
@@ -0,0 +1,196 @@
+"""
+    pygments.lexers.r
+    ~~~~~~~~~~~~~~~~~
+
+    Lexers for the R/S languages.
+
+    :copyright: Copyright 2006-present by the Pygments team, see AUTHORS.
+    :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import Lexer, RegexLexer, include, do_insertions
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+    Number, Punctuation, Generic, Whitespace
+
+__all__ = ['RConsoleLexer', 'SLexer', 'RdLexer']
+
+
+line_re  = re.compile('.*?\n')
+
+
+class RConsoleLexer(Lexer):
+    """
+    For R console transcripts or R CMD BATCH output files.
+    """
+
+    name = 'RConsole'
+    aliases = ['rconsole', 'rout']
+    filenames = ['*.Rout']
+    url = 'https://www.r-project.org'
+    version_added = ''
+    _example = "rconsole/r-console-transcript.Rout"
+
+    def get_tokens_unprocessed(self, text):
+        slexer = SLexer(**self.options)
+
+        current_code_block = ''
+        insertions = []
+
+        for match in line_re.finditer(text):
+            line = match.group()
+            if line.startswith('>') or line.startswith('+'):
+                # Colorize the prompt as such,
+                # then put rest of line into current_code_block
+                insertions.append((len(current_code_block),
+                                   [(0, Generic.Prompt, line[:2])]))
+                current_code_block += line[2:]
+            else:
+                # We have reached a non-prompt line!
+                # If we have stored prompt lines, need to process them first.
+                if current_code_block:
+                    # Weave together the prompts and highlight code.
+                    yield from do_insertions(
+                        insertions, slexer.get_tokens_unprocessed(current_code_block))
+                    # Reset vars for next code block.
+                    current_code_block = ''
+                    insertions = []
+                # Now process the actual line itself, this is output from R.
+                yield match.start(), Generic.Output, line
+
+        # If we happen to end on a code block with nothing after it, need to
+        # process the last code block. This is neither elegant nor DRY so
+        # should be changed.
+        if current_code_block:
+            yield from do_insertions(
+                insertions, slexer.get_tokens_unprocessed(current_code_block))
+
+
+class SLexer(RegexLexer):
+    """
+    For S, S-plus, and R source code.
+    """
+
+    name = 'S'
+    aliases = ['splus', 's', 'r']
+    filenames = ['*.S', '*.R', '.Rhistory', '.Rprofile', '.Renviron']
+    mimetypes = ['text/S-plus', 'text/S', 'text/x-r-source', 'text/x-r',
+                 'text/x-R', 'text/x-r-history', 'text/x-r-profile']
+    url = 'https://www.r-project.org'
+    version_added = '0.10'
+
+    valid_name = r'`[^`\\]*(?:\\.[^`\\]*)*`|(?:[a-zA-Z]|\.[A-Za-z_.])[\w.]*|\.'
+    tokens = {
+        'comments': [
+            (r'#.*$', Comment.Single),
+        ],
+        'valid_name': [
+            (valid_name, Name),
+        ],
+        'function_name': [
+            (rf'({valid_name})\s*(?=\()', Name.Function),
+        ],
+        'punctuation': [
+            (r'\[{1,2}|\]{1,2}|\(|\)|;|,', Punctuation),
+        ],
+        'keywords': [
+            (r'(if|else|for|while|repeat|in|next|break|return|switch|function)'
+             r'(?![\w.])',
+             Keyword.Reserved),
+        ],
+        'operators': [
+            (r'<>?|-|==|<=|>=|\|>|<|>|&&?|!=|\|\|?|\?', Operator),
+            (r'\*|\+|\^|/|!|%[^%]*%|=|~|\$|@|:{1,3}', Operator),
+        ],
+        'builtin_symbols': [
+            (r'(NULL|NA(_(integer|real|complex|character)_)?|'
+             r'letters|LETTERS|Inf|TRUE|FALSE|NaN|pi|\.\.(\.|[0-9]+))'
+             r'(?![\w.])',
+             Keyword.Constant),
+            (r'(T|F)\b', Name.Builtin.Pseudo),
+        ],
+        'numbers': [
+            # hex number
+            (r'0[xX][a-fA-F0-9]+([pP][0-9]+)?[Li]?', Number.Hex),
+            # decimal number
+            (r'[+-]?([0-9]+(\.[0-9]+)?|\.[0-9]+|\.)([eE][+-]?[0-9]+)?[Li]?',
+             Number),
+        ],
+        'statements': [
+            include('comments'),
+            # whitespaces
+            (r'\s+', Whitespace),
+            (r'\'', String, 'string_squote'),
+            (r'\"', String, 'string_dquote'),
+            include('builtin_symbols'),
+            include('keywords'),
+            include('function_name'),
+            include('valid_name'),
+            include('numbers'),
+            include('punctuation'),
+            include('operators'),
+        ],
+        'root': [
+            # calls:
+            include('statements'),
+            # blocks:
+            (r'\{|\}', Punctuation),
+            # (r'\{', Punctuation, 'block'),
+            (r'.', Text),
+        ],
+        # 'block': [
+        #    include('statements'),
+        #    ('\{', Punctuation, '#push'),
+        #    ('\}', Punctuation, '#pop')
+        # ],
+        'string_squote': [
+            (r'([^\'\\]|\\.)*\'', String, '#pop'),
+        ],
+        'string_dquote': [
+            (r'([^"\\]|\\.)*"', String, '#pop'),
+        ],
+    }
+
+    def analyse_text(text):
+        if re.search(r'[a-z0-9_\])\s]<-(?!-)', text):
+            return 0.11
+
+
+class RdLexer(RegexLexer):
+    """
+    Pygments Lexer for R documentation (Rd) files
+
+    This is a very minimal implementation, highlighting little more
+    than the macros. A description of Rd syntax is found in `Writing R
+    Extensions `_
+    and `Parsing Rd files `_.
+    """
+    name = 'Rd'
+    aliases = ['rd']
+    filenames = ['*.Rd']
+    mimetypes = ['text/x-r-doc']
+    url = 'http://cran.r-project.org/doc/manuals/R-exts.html'
+    version_added = '1.6'
+
+    # To account for verbatim / LaTeX-like / and R-like areas
+    # would require parsing.
+    tokens = {
+        'root': [
+            # catch escaped brackets and percent sign
+            (r'\\[\\{}%]', String.Escape),
+            # comments
+            (r'%.*$', Comment),
+            # special macros with no arguments
+            (r'\\(?:cr|l?dots|R|tab)\b', Keyword.Constant),
+            # macros
+            (r'\\[a-zA-Z]+\b', Keyword),
+            # special preprocessor macros
+            (r'^\s*#(?:ifn?def|endif).*\b', Comment.Preproc),
+            # non-escaped brackets
+            (r'[{}]', Name.Builtin),
+            # everything else
+            (r'[^\\%\n{}]+', Text),
+            (r'.', Text),
+        ]
+    }
diff --git a/local_packages/pygments/lexers/rdf.py b/local_packages/pygments/lexers/rdf.py
new file mode 100644
index 0000000..f2ecf7a
--- /dev/null
+++ b/local_packages/pygments/lexers/rdf.py
@@ -0,0 +1,468 @@
+"""
+    pygments.lexers.rdf
+    ~~~~~~~~~~~~~~~~~~~
+
+    Lexers for semantic web and RDF query languages and markup.
+
+    :copyright: Copyright 2006-present by the Pygments team, see AUTHORS.
+    :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import RegexLexer, bygroups, default
+from pygments.token import Keyword, Punctuation, String, Number, Operator, \
+    Generic, Whitespace, Name, Literal, Comment, Text
+
+__all__ = ['SparqlLexer', 'TurtleLexer', 'ShExCLexer']
+
+
+class SparqlLexer(RegexLexer):
+    """
+    Lexer for SPARQL query language.
+    """
+    name = 'SPARQL'
+    aliases = ['sparql']
+    filenames = ['*.rq', '*.sparql']
+    mimetypes = ['application/sparql-query']
+    url = 'https://www.w3.org/TR/sparql11-query'
+    version_added = '2.0'
+
+    # character group definitions ::
+
+    PN_CHARS_BASE_GRP = ('a-zA-Z'
+                         '\u00c0-\u00d6'
+                         '\u00d8-\u00f6'
+                         '\u00f8-\u02ff'
+                         '\u0370-\u037d'
+                         '\u037f-\u1fff'
+                         '\u200c-\u200d'
+                         '\u2070-\u218f'
+                         '\u2c00-\u2fef'
+                         '\u3001-\ud7ff'
+                         '\uf900-\ufdcf'
+                         '\ufdf0-\ufffd')
+
+    PN_CHARS_U_GRP = (PN_CHARS_BASE_GRP + '_')
+
+    PN_CHARS_GRP = (PN_CHARS_U_GRP +
+                    r'\-' +
+                    r'0-9' +
+                    '\u00b7' +
+                    '\u0300-\u036f' +
+                    '\u203f-\u2040')
+
+    HEX_GRP = '0-9A-Fa-f'
+
+    PN_LOCAL_ESC_CHARS_GRP = r' _~.\-!$&"()*+,;=/?#@%'
+
+    # terminal productions ::
+
+    PN_CHARS_BASE = '[' + PN_CHARS_BASE_GRP + ']'
+
+    PN_CHARS_U = '[' + PN_CHARS_U_GRP + ']'
+
+    PN_CHARS = '[' + PN_CHARS_GRP + ']'
+
+    HEX = '[' + HEX_GRP + ']'
+
+    PN_LOCAL_ESC_CHARS = '[' + PN_LOCAL_ESC_CHARS_GRP + ']'
+
+    IRIREF = r'<(?:[^<>"{}|^`\\\x00-\x20])*>'
+
+    BLANK_NODE_LABEL = '_:[0-9' + PN_CHARS_U_GRP + '](?:[' + PN_CHARS_GRP + \
+                       '.]*' + PN_CHARS + ')?'
+
+    PN_PREFIX = PN_CHARS_BASE + '(?:[' + PN_CHARS_GRP + '.]*' + PN_CHARS + ')?'
+
+    VARNAME = '[0-9' + PN_CHARS_U_GRP + '][' + PN_CHARS_U_GRP + \
+              '0-9\u00b7\u0300-\u036f\u203f-\u2040]*'
+
+    PERCENT = '%' + HEX + HEX
+
+    PN_LOCAL_ESC = r'\\' + PN_LOCAL_ESC_CHARS
+
+    PLX = '(?:' + PERCENT + ')|(?:' + PN_LOCAL_ESC + ')'
+
+    PN_LOCAL = ('(?:[' + PN_CHARS_U_GRP + ':0-9' + ']|' + PLX + ')' +
+                '(?:(?:[' + PN_CHARS_GRP + '.:]|' + PLX + ')*(?:[' +
+                PN_CHARS_GRP + ':]|' + PLX + '))?')
+
+    EXPONENT = r'[eE][+-]?\d+'
+
+    # Lexer token definitions ::
+
+    tokens = {
+        'root': [
+            (r'\s+', Text),
+            # keywords ::
+            (r'(?i)(select|construct|describe|ask|where|filter|group\s+by|minus|'
+             r'distinct|reduced|from\s+named|from|order\s+by|desc|asc|limit|'
+             r'offset|values|bindings|load|into|clear|drop|create|add|move|copy|'
+             r'insert\s+data|delete\s+data|delete\s+where|with|delete|insert|'
+             r'using\s+named|using|graph|default|named|all|optional|service|'
+             r'silent|bind|undef|union|not\s+in|in|as|having|to|prefix|base)\b', Keyword),
+            (r'(a)\b', Keyword),
+            # IRIs ::
+            ('(' + IRIREF + ')', Name.Label),
+            # blank nodes ::
+            ('(' + BLANK_NODE_LABEL + ')', Name.Label),
+            #  # variables ::
+            ('[?$]' + VARNAME, Name.Variable),
+            # prefixed names ::
+            (r'(' + PN_PREFIX + r')?(\:)(' + PN_LOCAL + r')?',
+             bygroups(Name.Namespace, Punctuation, Name.Tag)),
+            # function names ::
+            (r'(?i)(str|lang|langmatches|datatype|bound|iri|uri|bnode|rand|abs|'
+             r'ceil|floor|round|concat|strlen|ucase|lcase|encode_for_uri|'
+             r'contains|strstarts|strends|strbefore|strafter|year|month|day|'
+             r'hours|minutes|seconds|timezone|tz|now|uuid|struuid|md5|sha1|sha256|sha384|'
+             r'sha512|coalesce|if|strlang|strdt|sameterm|isiri|isuri|isblank|'
+             r'isliteral|isnumeric|regex|substr|replace|exists|not\s+exists|'
+             r'count|sum|min|max|avg|sample|group_concat|separator)\b',
+             Name.Function),
+            # boolean literals ::
+            (r'(true|false)', Keyword.Constant),
+            # double literals ::
+            (r'[+\-]?(\d+\.\d*' + EXPONENT + r'|\.?\d+' + EXPONENT + ')', Number.Float),
+            # decimal literals ::
+            (r'[+\-]?(\d+\.\d*|\.\d+)', Number.Float),
+            # integer literals ::
+            (r'[+\-]?\d+', Number.Integer),
+            # operators ::
+            (r'(\|\||&&|=|\*|\-|\+|/|!=|<=|>=|!|<|>)', Operator),
+            # punctuation characters ::
+            (r'[(){}.;,:^\[\]]', Punctuation),
+            # line comments ::
+            (r'#[^\n]*', Comment),
+            # strings ::
+            (r'"""', String, 'triple-double-quoted-string'),
+            (r'"', String, 'single-double-quoted-string'),
+            (r"'''", String, 'triple-single-quoted-string'),
+            (r"'", String, 'single-single-quoted-string'),
+        ],
+        'triple-double-quoted-string': [
+            (r'"""', String, 'end-of-string'),
+            (r'[^\\]+', String),
+            (r'\\', String, 'string-escape'),
+        ],
+        'single-double-quoted-string': [
+            (r'"', String, 'end-of-string'),
+            (r'[^"\\\n]+', String),
+            (r'\\', String, 'string-escape'),
+        ],
+        'triple-single-quoted-string': [
+            (r"'''", String, 'end-of-string'),
+            (r'[^\\]+', String),
+            (r'\\', String.Escape, 'string-escape'),
+        ],
+        'single-single-quoted-string': [
+            (r"'", String, 'end-of-string'),
+            (r"[^'\\\n]+", String),
+            (r'\\', String, 'string-escape'),
+        ],
+        'string-escape': [
+            (r'u' + HEX + '{4}', String.Escape, '#pop'),
+            (r'U' + HEX + '{8}', String.Escape, '#pop'),
+            (r'.', String.Escape, '#pop'),
+        ],
+        'end-of-string': [
+            (r'(@)([a-zA-Z]+(?:-[a-zA-Z0-9]+)*)',
+             bygroups(Operator, Name.Function), '#pop:2'),
+            (r'\^\^', Operator, '#pop:2'),
+            default('#pop:2'),
+        ],
+    }
+
+
+class TurtleLexer(RegexLexer):
+    """
+    Lexer for Turtle data language.
+    """
+    name = 'Turtle'
+    aliases = ['turtle']
+    filenames = ['*.ttl']
+    mimetypes = ['text/turtle', 'application/x-turtle']
+    url = 'https://www.w3.org/TR/turtle'
+    version_added = '2.1'
+
+    # character group definitions ::
+    PN_CHARS_BASE_GRP = ('a-zA-Z'
+                         '\u00c0-\u00d6'
+                         '\u00d8-\u00f6'
+                         '\u00f8-\u02ff'
+                         '\u0370-\u037d'
+                         '\u037f-\u1fff'
+                         '\u200c-\u200d'
+                         '\u2070-\u218f'
+                         '\u2c00-\u2fef'
+                         '\u3001-\ud7ff'
+                         '\uf900-\ufdcf'
+                         '\ufdf0-\ufffd')
+
+    PN_CHARS_U_GRP = (PN_CHARS_BASE_GRP + '_')
+
+    PN_CHARS_GRP = (PN_CHARS_U_GRP +
+                    r'\-' +
+                    r'0-9' +
+                    '\u00b7' +
+                    '\u0300-\u036f' +
+                    '\u203f-\u2040')
+
+    PN_CHARS = '[' + PN_CHARS_GRP + ']'
+
+    PN_CHARS_BASE = '[' + PN_CHARS_BASE_GRP + ']'
+
+    PN_PREFIX = PN_CHARS_BASE + '(?:[' + PN_CHARS_GRP + '.]*' + PN_CHARS + ')?'
+
+    HEX_GRP = '0-9A-Fa-f'
+
+    HEX = '[' + HEX_GRP + ']'
+
+    PERCENT = '%' + HEX + HEX
+
+    PN_LOCAL_ESC_CHARS_GRP = r' _~.\-!$&"()*+,;=/?#@%'
+
+    PN_LOCAL_ESC_CHARS = '[' + PN_LOCAL_ESC_CHARS_GRP + ']'
+
+    PN_LOCAL_ESC = r'\\' + PN_LOCAL_ESC_CHARS
+
+    PLX = '(?:' + PERCENT + ')|(?:' + PN_LOCAL_ESC + ')'
+
+    PN_LOCAL = ('(?:[' + PN_CHARS_U_GRP + ':0-9' + ']|' + PLX + ')' +
+                '(?:(?:[' + PN_CHARS_GRP + '.:]|' + PLX + ')*(?:[' +
+                PN_CHARS_GRP + ':]|' + PLX + '))?')
+
+    patterns = {
+        'PNAME_NS': r'((?:[a-zA-Z][\w-]*)?\:)',  # Simplified character range
+        'IRIREF': r'(<[^<>"{}|^`\\\x00-\x20]*>)'
+    }
+
+    tokens = {
+        'root': [
+            (r'\s+', Text),
+
+            # Base / prefix
+            (r'(@base|BASE)(\s+){IRIREF}(\s*)(\.?)'.format(**patterns),
+             bygroups(Keyword, Whitespace, Name.Variable, Whitespace,
+                      Punctuation)),
+            (r'(@prefix|PREFIX)(\s+){PNAME_NS}(\s+){IRIREF}(\s*)(\.?)'.format(**patterns),
+             bygroups(Keyword, Whitespace, Name.Namespace, Whitespace,
+                      Name.Variable, Whitespace, Punctuation)),
+
+            # The shorthand predicate 'a'
+            (r'(?<=\s)a(?=\s)', Keyword.Type),
+
+            # IRIREF
+            (r'{IRIREF}'.format(**patterns), Name.Variable),
+
+            # PrefixedName
+            (r'(' + PN_PREFIX + r')?(\:)(' + PN_LOCAL + r')?',
+             bygroups(Name.Namespace, Punctuation, Name.Tag)),
+
+            # BlankNodeLabel
+            (r'(_)(:)([' + PN_CHARS_U_GRP + r'0-9]([' + PN_CHARS_GRP + r'.]*' + PN_CHARS + ')?)',
+             bygroups(Name.Namespace, Punctuation, Name.Tag)),
+
+            # Comment
+            (r'#([^\n]+|$)', Comment),
+
+            (r'\b(true|false)\b', Literal),
+            (r'[+\-]?\d*\.\d+', Number.Float),
+            (r'[+\-]?\d*(:?\.\d+)?E[+\-]?\d+', Number.Float),
+            (r'[+\-]?\d+', Number.Integer),
+            (r'[\[\](){}.;,:^]', Punctuation),
+
+            (r'"""', String, 'triple-double-quoted-string'),
+            (r'"', String, 'single-double-quoted-string'),
+            (r"'''", String, 'triple-single-quoted-string'),
+            (r"'", String, 'single-single-quoted-string'),
+        ],
+        'triple-double-quoted-string': [
+            (r'"""', String, 'end-of-string'),
+            (r'[^\\]+(?=""")', String),
+            (r'\\', String, 'string-escape'),
+        ],
+        'single-double-quoted-string': [
+            (r'"', String, 'end-of-string'),
+            (r'[^"\\\n]+', String),
+            (r'\\', String, 'string-escape'),
+        ],
+        'triple-single-quoted-string': [
+            (r"'''", String, 'end-of-string'),
+            (r"[^\\]+(?=''')", String),
+            (r'\\', String, 'string-escape'),
+        ],
+        'single-single-quoted-string': [
+            (r"'", String, 'end-of-string'),
+            (r"[^'\\\n]+", String),
+            (r'\\', String, 'string-escape'),
+        ],
+        'string-escape': [
+            (r'.', String, '#pop'),
+        ],
+        'end-of-string': [
+            (r'(@)([a-zA-Z]+(?:-[a-zA-Z0-9]+)*)',
+             bygroups(Operator, Generic.Emph), '#pop:2'),
+
+            (r'(\^\^){IRIREF}'.format(**patterns), bygroups(Operator, Generic.Emph), '#pop:2'),
+
+            default('#pop:2'),
+
+        ],
+    }
+
+    # Turtle and Tera Term macro files share the same file extension
+    # but each has a recognizable and distinct syntax.
+    def analyse_text(text):
+        for t in ('@base ', 'BASE ', '@prefix ', 'PREFIX '):
+            if re.search(rf'^\s*{t}', text):
+                return 0.80
+
+
+class ShExCLexer(RegexLexer):
+    """
+    Lexer for ShExC shape expressions language syntax.
+    """
+    name = 'ShExC'
+    aliases = ['shexc', 'shex']
+    filenames = ['*.shex']
+    mimetypes = ['text/shex']
+    url = 'https://shex.io/shex-semantics/#shexc'
+    version_added = ''
+
+    # character group definitions ::
+
+    PN_CHARS_BASE_GRP = ('a-zA-Z'
+                         '\u00c0-\u00d6'
+                         '\u00d8-\u00f6'
+                         '\u00f8-\u02ff'
+                         '\u0370-\u037d'
+                         '\u037f-\u1fff'
+                         '\u200c-\u200d'
+                         '\u2070-\u218f'
+                         '\u2c00-\u2fef'
+                         '\u3001-\ud7ff'
+                         '\uf900-\ufdcf'
+                         '\ufdf0-\ufffd')
+
+    PN_CHARS_U_GRP = (PN_CHARS_BASE_GRP + '_')
+
+    PN_CHARS_GRP = (PN_CHARS_U_GRP +
+                    r'\-' +
+                    r'0-9' +
+                    '\u00b7' +
+                    '\u0300-\u036f' +
+                    '\u203f-\u2040')
+
+    HEX_GRP = '0-9A-Fa-f'
+
+    PN_LOCAL_ESC_CHARS_GRP = r"_~.\-!$&'()*+,;=/?#@%"
+
+    # terminal productions ::
+
+    PN_CHARS_BASE = '[' + PN_CHARS_BASE_GRP + ']'
+
+    PN_CHARS_U = '[' + PN_CHARS_U_GRP + ']'
+
+    PN_CHARS = '[' + PN_CHARS_GRP + ']'
+
+    HEX = '[' + HEX_GRP + ']'
+
+    PN_LOCAL_ESC_CHARS = '[' + PN_LOCAL_ESC_CHARS_GRP + ']'
+
+    UCHAR_NO_BACKSLASH = '(?:u' + HEX + '{4}|U' + HEX + '{8})'
+
+    UCHAR = r'\\' + UCHAR_NO_BACKSLASH
+
+    IRIREF = r'<(?:[^\x00-\x20<>"{}|^`\\]|' + UCHAR + ')*>'
+
+    BLANK_NODE_LABEL = '_:[0-9' + PN_CHARS_U_GRP + '](?:[' + PN_CHARS_GRP + \
+                       '.]*' + PN_CHARS + ')?'
+
+    PN_PREFIX = PN_CHARS_BASE + '(?:[' + PN_CHARS_GRP + '.]*' + PN_CHARS + ')?'
+
+    PERCENT = '%' + HEX + HEX
+
+    PN_LOCAL_ESC = r'\\' + PN_LOCAL_ESC_CHARS
+
+    PLX = '(?:' + PERCENT + ')|(?:' + PN_LOCAL_ESC + ')'
+
+    PN_LOCAL = ('(?:[' + PN_CHARS_U_GRP + ':0-9' + ']|' + PLX + ')' +
+                '(?:(?:[' + PN_CHARS_GRP + '.:]|' + PLX + ')*(?:[' +
+                PN_CHARS_GRP + ':]|' + PLX + '))?')
+
+    EXPONENT = r'[eE][+-]?\d+'
+
+    # Lexer token definitions ::
+
+    tokens = {
+        'root': [
+            (r'\s+', Text),
+            # keywords ::
+            (r'(?i)(base|prefix|start|external|'
+             r'literal|iri|bnode|nonliteral|length|minlength|maxlength|'
+             r'mininclusive|minexclusive|maxinclusive|maxexclusive|'
+             r'totaldigits|fractiondigits|'
+             r'closed|extra)\b', Keyword),
+            (r'(a)\b', Keyword),
+            # IRIs ::
+            ('(' + IRIREF + ')', Name.Label),
+            # blank nodes ::
+            ('(' + BLANK_NODE_LABEL + ')', Name.Label),
+            # prefixed names ::
+            (r'(' + PN_PREFIX + r')?(\:)(' + PN_LOCAL + ')?',
+             bygroups(Name.Namespace, Punctuation, Name.Tag)),
+            # boolean literals ::
+            (r'(true|false)', Keyword.Constant),
+            # double literals ::
+            (r'[+\-]?(\d+\.\d*' + EXPONENT + r'|\.?\d+' + EXPONENT + ')', Number.Float),
+            # decimal literals ::
+            (r'[+\-]?(\d+\.\d*|\.\d+)', Number.Float),
+            # integer literals ::
+            (r'[+\-]?\d+', Number.Integer),
+            # operators ::
+            (r'[@|$&=*+?^\-~]', Operator),
+            # operator keywords ::
+            (r'(?i)(and|or|not)\b', Operator.Word),
+            # punctuation characters ::
+            (r'[(){}.;,:^\[\]]', Punctuation),
+            # line comments ::
+            (r'#[^\n]*', Comment),
+            # strings ::
+            (r'"""', String, 'triple-double-quoted-string'),
+            (r'"', String, 'single-double-quoted-string'),
+            (r"'''", String, 'triple-single-quoted-string'),
+            (r"'", String, 'single-single-quoted-string'),
+        ],
+        'triple-double-quoted-string': [
+            (r'"""', String, 'end-of-string'),
+            (r'[^\\]+', String),
+            (r'\\', String, 'string-escape'),
+        ],
+        'single-double-quoted-string': [
+            (r'"', String, 'end-of-string'),
+            (r'[^"\\\n]+', String),
+            (r'\\', String, 'string-escape'),
+        ],
+        'triple-single-quoted-string': [
+            (r"'''", String, 'end-of-string'),
+            (r'[^\\]+', String),
+            (r'\\', String.Escape, 'string-escape'),
+        ],
+        'single-single-quoted-string': [
+            (r"'", String, 'end-of-string'),
+            (r"[^'\\\n]+", String),
+            (r'\\', String, 'string-escape'),
+        ],
+        'string-escape': [
+            (UCHAR_NO_BACKSLASH, String.Escape, '#pop'),
+            (r'.', String.Escape, '#pop'),
+        ],
+        'end-of-string': [
+            (r'(@)([a-zA-Z]+(?:-[a-zA-Z0-9]+)*)',
+             bygroups(Operator, Name.Function), '#pop:2'),
+            (r'\^\^', Operator, '#pop:2'),
+            default('#pop:2'),
+        ],
+    }
diff --git a/local_packages/pygments/lexers/rebol.py b/local_packages/pygments/lexers/rebol.py
new file mode 100644
index 0000000..fcff62b
--- /dev/null
+++ b/local_packages/pygments/lexers/rebol.py
@@ -0,0 +1,419 @@
+"""
+    pygments.lexers.rebol
+    ~~~~~~~~~~~~~~~~~~~~~
+
+    Lexers for the REBOL and related languages.
+
+    :copyright: Copyright 2006-present by the Pygments team, see AUTHORS.
+    :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import RegexLexer, bygroups
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+    Number, Generic, Whitespace
+
+__all__ = ['RebolLexer', 'RedLexer']
+
+
+class RebolLexer(RegexLexer):
+    """
+    A REBOL lexer.
+    """
+    name = 'REBOL'
+    aliases = ['rebol']
+    filenames = ['*.r', '*.r3', '*.reb']
+    mimetypes = ['text/x-rebol']
+    url = 'http://www.rebol.com'
+    version_added = '1.1'
+
+    flags = re.IGNORECASE | re.MULTILINE
+
+    escape_re = r'(?:\^\([0-9a-f]{1,4}\)*)'
+
+    def word_callback(lexer, match):
+        word = match.group()
+
+        if re.match(".*:$", word):
+            yield match.start(), Generic.Subheading, word
+        elif re.match(
+            r'(native|alias|all|any|as-string|as-binary|bind|bound\?|case|'
+            r'catch|checksum|comment|debase|dehex|exclude|difference|disarm|'
+            r'either|else|enbase|foreach|remove-each|form|free|get|get-env|if|'
+            r'in|intersect|loop|minimum-of|maximum-of|mold|new-line|'
+            r'new-line\?|not|now|prin|print|reduce|compose|construct|repeat|'
+            r'reverse|save|script\?|set|shift|switch|throw|to-hex|trace|try|'
+            r'type\?|union|unique|unless|unprotect|unset|until|use|value\?|'
+            r'while|compress|decompress|secure|open|close|read|read-io|'
+            r'write-io|write|update|query|wait|input\?|exp|log-10|log-2|'
+            r'log-e|square-root|cosine|sine|tangent|arccosine|arcsine|'
+            r'arctangent|protect|lowercase|uppercase|entab|detab|connected\?|'
+            r'browse|launch|stats|get-modes|set-modes|to-local-file|'
+            r'to-rebol-file|encloak|decloak|create-link|do-browser|bind\?|'
+            r'hide|draw|show|size-text|textinfo|offset-to-caret|'
+            r'caret-to-offset|local-request-file|rgb-to-hsv|hsv-to-rgb|'
+            r'crypt-strength\?|dh-make-key|dh-generate-key|dh-compute-key|'
+            r'dsa-make-key|dsa-generate-key|dsa-make-signature|'
+            r'dsa-verify-signature|rsa-make-key|rsa-generate-key|'
+            r'rsa-encrypt)$', word):
+            yield match.start(), Name.Builtin, word
+        elif re.match(
+            r'(add|subtract|multiply|divide|remainder|power|and~|or~|xor~|'
+            r'minimum|maximum|negate|complement|absolute|random|head|tail|'
+            r'next|back|skip|at|pick|first|second|third|fourth|fifth|sixth|'
+            r'seventh|eighth|ninth|tenth|last|path|find|select|make|to|copy\*|'
+            r'insert|remove|change|poke|clear|trim|sort|min|max|abs|cp|'
+            r'copy)$', word):
+            yield match.start(), Name.Function, word
+        elif re.match(
+            r'(error|source|input|license|help|install|echo|Usage|with|func|'
+            r'throw-on-error|function|does|has|context|probe|\?\?|as-pair|'
+            r'mod|modulo|round|repend|about|set-net|append|join|rejoin|reform|'
+            r'remold|charset|array|replace|move|extract|forskip|forall|alter|'
+            r'first+|also|take|for|forever|dispatch|attempt|what-dir|'
+            r'change-dir|clean-path|list-dir|dirize|rename|split-path|delete|'
+            r'make-dir|delete-dir|in-dir|confirm|dump-obj|upgrade|what|'
+            r'build-tag|process-source|build-markup|decode-cgi|read-cgi|'
+            r'write-user|save-user|set-user-name|protect-system|parse-xml|'
+            r'cvs-date|cvs-version|do-boot|get-net-info|desktop|layout|'
+            r'scroll-para|get-face|alert|set-face|uninstall|unfocus|'
+            r'request-dir|center-face|do-events|net-error|decode-url|'
+            r'parse-header|parse-header-date|parse-email-addrs|import-email|'
+            r'send|build-attach-body|resend|show-popup|hide-popup|open-events|'
+            r'find-key-face|do-face|viewtop|confine|find-window|'
+            r'insert-event-func|remove-event-func|inform|dump-pane|dump-face|'
+            r'flag-face|deflag-face|clear-fields|read-net|vbug|path-thru|'
+            r'read-thru|load-thru|do-thru|launch-thru|load-image|'
+            r'request-download|do-face-alt|set-font|set-para|get-style|'
+            r'set-style|make-face|stylize|choose|hilight-text|hilight-all|'
+            r'unlight-text|focus|scroll-drag|clear-face|reset-face|scroll-face|'
+            r'resize-face|load-stock|load-stock-block|notify|request|flash|'
+            r'request-color|request-pass|request-text|request-list|'
+            r'request-date|request-file|dbug|editor|link-relative-path|'
+            r'emailer|parse-error)$', word):
+            yield match.start(), Keyword.Namespace, word
+        elif re.match(
+            r'(halt|quit|do|load|q|recycle|call|run|ask|parse|view|unview|'
+            r'return|exit|break)$', word):
+            yield match.start(), Name.Exception, word
+        elif re.match('REBOL$', word):
+            yield match.start(), Generic.Heading, word
+        elif re.match("to-.*", word):
+            yield match.start(), Keyword, word
+        elif re.match(r'(\+|-|\*|/|//|\*\*|and|or|xor|=\?|=|==|<>|<|>|<=|>=)$',
+                      word):
+            yield match.start(), Operator, word
+        elif re.match(r".*\?$", word):
+            yield match.start(), Keyword, word
+        elif re.match(r".*\!$", word):
+            yield match.start(), Keyword.Type, word
+        elif re.match("'.*", word):
+            yield match.start(), Name.Variable.Instance, word  # lit-word
+        elif re.match("#.*", word):
+            yield match.start(), Name.Label, word  # issue
+        elif re.match("%.*", word):
+            yield match.start(), Name.Decorator, word  # file
+        else:
+            yield match.start(), Name.Variable, word
+
+    tokens = {
+        'root': [
+            (r'\s+', Text),
+            (r'#"', String.Char, 'char'),
+            (r'#\{[0-9a-f]*\}', Number.Hex),
+            (r'2#\{', Number.Hex, 'bin2'),
+            (r'64#\{[0-9a-z+/=\s]*\}', Number.Hex),
+            (r'"', String, 'string'),
+            (r'\{', String, 'string2'),
+            (r';#+.*\n', Comment.Special),
+            (r';\*+.*\n', Comment.Preproc),
+            (r';.*\n', Comment),
+            (r'%"', Name.Decorator, 'stringFile'),
+            (r'%[^(^{")\s\[\]]+', Name.Decorator),
+            (r'[+-]?([a-z]{1,3})?\$\d+(\.\d+)?', Number.Float),  # money
+            (r'[+-]?\d+\:\d+(\:\d+)?(\.\d+)?', String.Other),    # time
+            (r'\d+[\-/][0-9a-z]+[\-/]\d+(\/\d+\:\d+((\:\d+)?'
+             r'([.\d+]?([+-]?\d+:\d+)?)?)?)?', String.Other),   # date
+            (r'\d+(\.\d+)+\.\d+', Keyword.Constant),             # tuple
+            (r'\d+X\d+', Keyword.Constant),                   # pair
+            (r'[+-]?\d+(\'\d+)?([.,]\d*)?E[+-]?\d+', Number.Float),
+            (r'[+-]?\d+(\'\d+)?[.,]\d*', Number.Float),
+            (r'[+-]?\d+(\'\d+)?', Number),
+            (r'[\[\]()]', Generic.Strong),
+            (r'[a-z]+[^(^{"\s:)]*://[^(^{"\s)]*', Name.Decorator),  # url
+            (r'mailto:[^(^{"@\s)]+@[^(^{"@\s)]+', Name.Decorator),  # url
+            (r'[^(^{"@\s)]+@[^(^{"@\s)]+', Name.Decorator),         # email
+            (r'comment\s"', Comment, 'commentString1'),
+            (r'comment\s\{', Comment, 'commentString2'),
+            (r'comment\s\[', Comment, 'commentBlock'),
+            (r'comment\s[^(\s{"\[]+', Comment),
+            (r'/[^(^{")\s/[\]]*', Name.Attribute),
+            (r'([^(^{")\s/[\]]+)(?=[:({"\s/\[\]])', word_callback),
+            (r'<[\w:.-]*>', Name.Tag),
+            (r'<[^(<>\s")]+', Name.Tag, 'tag'),
+            (r'([^(^{")\s]+)', Text),
+        ],
+        'string': [
+            (r'[^(^")]+', String),
+            (escape_re, String.Escape),
+            (r'[(|)]+', String),
+            (r'\^.', String.Escape),
+            (r'"', String, '#pop'),
+        ],
+        'string2': [
+            (r'[^(^{})]+', String),
+            (escape_re, String.Escape),
+            (r'[(|)]+', String),
+            (r'\^.', String.Escape),
+            (r'\{', String, '#push'),
+            (r'\}', String, '#pop'),
+        ],
+        'stringFile': [
+            (r'[^(^")]+', Name.Decorator),
+            (escape_re, Name.Decorator),
+            (r'\^.', Name.Decorator),
+            (r'"', Name.Decorator, '#pop'),
+        ],
+        'char': [
+            (escape_re + '"', String.Char, '#pop'),
+            (r'\^."', String.Char, '#pop'),
+            (r'."', String.Char, '#pop'),
+        ],
+        'tag': [
+            (escape_re, Name.Tag),
+            (r'"', Name.Tag, 'tagString'),
+            (r'[^(<>\r\n")]+', Name.Tag),
+            (r'>', Name.Tag, '#pop'),
+        ],
+        'tagString': [
+            (r'[^(^")]+', Name.Tag),
+            (escape_re, Name.Tag),
+            (r'[(|)]+', Name.Tag),
+            (r'\^.', Name.Tag),
+            (r'"', Name.Tag, '#pop'),
+        ],
+        'tuple': [
+            (r'(\d+\.)+', Keyword.Constant),
+            (r'\d+', Keyword.Constant, '#pop'),
+        ],
+        'bin2': [
+            (r'\s+', Number.Hex),
+            (r'([01]\s*){8}', Number.Hex),
+            (r'\}', Number.Hex, '#pop'),
+        ],
+        'commentString1': [
+            (r'[^(^")]+', Comment),
+            (escape_re, Comment),
+            (r'[(|)]+', Comment),
+            (r'\^.', Comment),
+            (r'"', Comment, '#pop'),
+        ],
+        'commentString2': [
+            (r'[^(^{})]+', Comment),
+            (escape_re, Comment),
+            (r'[(|)]+', Comment),
+            (r'\^.', Comment),
+            (r'\{', Comment, '#push'),
+            (r'\}', Comment, '#pop'),
+        ],
+        'commentBlock': [
+            (r'\[', Comment, '#push'),
+            (r'\]', Comment, '#pop'),
+            (r'"', Comment, "commentString1"),
+            (r'\{', Comment, "commentString2"),
+            (r'[^(\[\]"{)]+', Comment),
+        ],
+    }
+
+    def analyse_text(text):
+        """
+        Check if code contains REBOL header and so it probably not R code
+        """
+        if re.match(r'^\s*REBOL\s*\[', text, re.IGNORECASE):
+            # The code starts with REBOL header
+            return 1.0
+        elif re.search(r'\s*REBOL\s*\[', text, re.IGNORECASE):
+            # The code contains REBOL header but also some text before it
+            return 0.5
+
+
+class RedLexer(RegexLexer):
+    """
+    A Red-language lexer.
+    """
+    name = 'Red'
+    aliases = ['red', 'red/system']
+    filenames = ['*.red', '*.reds']
+    mimetypes = ['text/x-red', 'text/x-red-system']
+    url = 'https://www.red-lang.org'
+    version_added = '2.0'
+
+    flags = re.IGNORECASE | re.MULTILINE
+
+    escape_re = r'(?:\^\([0-9a-f]{1,4}\)*)'
+
+    def word_callback(lexer, match):
+        word = match.group()
+
+        if re.match(".*:$", word):
+            yield match.start(), Generic.Subheading, word
+        elif re.match(r'(if|unless|either|any|all|while|until|loop|repeat|'
+                      r'foreach|forall|func|function|does|has|switch|'
+                      r'case|reduce|compose|get|set|print|prin|equal\?|'
+                      r'not-equal\?|strict-equal\?|lesser\?|greater\?|lesser-or-equal\?|'
+                      r'greater-or-equal\?|same\?|not|type\?|stats|'
+                      r'bind|union|replace|charset|routine)$', word):
+            yield match.start(), Name.Builtin, word
+        elif re.match(r'(make|random|reflect|to|form|mold|absolute|add|divide|multiply|negate|'
+                      r'power|remainder|round|subtract|even\?|odd\?|and~|complement|or~|xor~|'
+                      r'append|at|back|change|clear|copy|find|head|head\?|index\?|insert|'
+                      r'length\?|next|pick|poke|remove|reverse|select|sort|skip|swap|tail|tail\?|'
+                      r'take|trim|create|close|delete|modify|open|open\?|query|read|rename|'
+                      r'update|write)$', word):
+            yield match.start(), Name.Function, word
+        elif re.match(r'(yes|on|no|off|true|false|tab|cr|lf|newline|escape|slash|sp|space|null|'
+                      r'none|crlf|dot|null-byte)$', word):
+            yield match.start(), Name.Builtin.Pseudo, word
+        elif re.match(r'(#system-global|#include|#enum|#define|#either|#if|#import|#export|'
+                      r'#switch|#default|#get-definition)$', word):
+            yield match.start(), Keyword.Namespace, word
+        elif re.match(r'(system|halt|quit|quit-return|do|load|q|recycle|call|run|ask|parse|'
+                      r'raise-error|return|exit|break|alias|push|pop|probe|\?\?|spec-of|body-of|'
+                      r'quote|forever)$', word):
+            yield match.start(), Name.Exception, word
+        elif re.match(r'(action\?|block\?|char\?|datatype\?|file\?|function\?|get-path\?|zero\?|'
+                      r'get-word\?|integer\?|issue\?|lit-path\?|lit-word\?|logic\?|native\?|'
+                      r'op\?|paren\?|path\?|refinement\?|set-path\?|set-word\?|string\?|unset\?|'
+                      r'any-struct\?|none\?|word\?|any-series\?)$', word):
+            yield match.start(), Keyword, word
+        elif re.match(r'(JNICALL|stdcall|cdecl|infix)$', word):
+            yield match.start(), Keyword.Namespace, word
+        elif re.match("to-.*", word):
+            yield match.start(), Keyword, word
+        elif re.match(r'(\+|-\*\*|-|\*\*|//|/|\*|and|or|xor|=\?|===|==|=|<>|<=|>=|'
+                      r'<<<|>>>|<<|>>|<|>%)$', word):
+            yield match.start(), Operator, word
+        elif re.match(r".*\!$", word):
+            yield match.start(), Keyword.Type, word
+        elif re.match("'.*", word):
+            yield match.start(), Name.Variable.Instance, word  # lit-word
+        elif re.match("#.*", word):
+            yield match.start(), Name.Label, word  # issue
+        elif re.match("%.*", word):
+            yield match.start(), Name.Decorator, word  # file
+        elif re.match(":.*", word):
+            yield match.start(), Generic.Subheading, word  # get-word
+        else:
+            yield match.start(), Name.Variable, word
+
+    tokens = {
+        'root': [
+            (r'\s+', Text),
+            (r'#"', String.Char, 'char'),
+            (r'#\{[0-9a-f\s]*\}', Number.Hex),
+            (r'2#\{', Number.Hex, 'bin2'),
+            (r'64#\{[0-9a-z+/=\s]*\}', Number.Hex),
+            (r'([0-9a-f]+)(h)((\s)|(?=[\[\]{}"()]))',
+             bygroups(Number.Hex, Name.Variable, Whitespace)),
+            (r'"', String, 'string'),
+            (r'\{', String, 'string2'),
+            (r';#+.*\n', Comment.Special),
+            (r';\*+.*\n', Comment.Preproc),
+            (r';.*\n', Comment),
+            (r'%"', Name.Decorator, 'stringFile'),
+            (r'%[^(^{")\s\[\]]+', Name.Decorator),
+            (r'[+-]?([a-z]{1,3})?\$\d+(\.\d+)?', Number.Float),  # money
+            (r'[+-]?\d+\:\d+(\:\d+)?(\.\d+)?', String.Other),    # time
+            (r'\d+[\-/][0-9a-z]+[\-/]\d+(/\d+:\d+((:\d+)?'
+             r'([\.\d+]?([+-]?\d+:\d+)?)?)?)?', String.Other),   # date
+            (r'\d+(\.\d+)+\.\d+', Keyword.Constant),             # tuple
+            (r'\d+X\d+', Keyword.Constant),                   # pair
+            (r'[+-]?\d+(\'\d+)?([.,]\d*)?E[+-]?\d+', Number.Float),
+            (r'[+-]?\d+(\'\d+)?[.,]\d*', Number.Float),
+            (r'[+-]?\d+(\'\d+)?', Number),
+            (r'[\[\]()]', Generic.Strong),
+            (r'[a-z]+[^(^{"\s:)]*://[^(^{"\s)]*', Name.Decorator),  # url
+            (r'mailto:[^(^{"@\s)]+@[^(^{"@\s)]+', Name.Decorator),  # url
+            (r'[^(^{"@\s)]+@[^(^{"@\s)]+', Name.Decorator),         # email
+            (r'comment\s"', Comment, 'commentString1'),
+            (r'comment\s\{', Comment, 'commentString2'),
+            (r'comment\s\[', Comment, 'commentBlock'),
+            (r'comment\s[^(\s{"\[]+', Comment),
+            (r'/[^(^{^")\s/[\]]*', Name.Attribute),
+            (r'([^(^{^")\s/[\]]+)(?=[:({"\s/\[\]])', word_callback),
+            (r'<[\w:.-]*>', Name.Tag),
+            (r'<[^(<>\s")]+', Name.Tag, 'tag'),
+            (r'([^(^{")\s]+)', Text),
+        ],
+        'string': [
+            (r'[^(^")]+', String),
+            (escape_re, String.Escape),
+            (r'[(|)]+', String),
+            (r'\^.', String.Escape),
+            (r'"', String, '#pop'),
+        ],
+        'string2': [
+            (r'[^(^{})]+', String),
+            (escape_re, String.Escape),
+            (r'[(|)]+', String),
+            (r'\^.', String.Escape),
+            (r'\{', String, '#push'),
+            (r'\}', String, '#pop'),
+        ],
+        'stringFile': [
+            (r'[^(^")]+', Name.Decorator),
+            (escape_re, Name.Decorator),
+            (r'\^.', Name.Decorator),
+            (r'"', Name.Decorator, '#pop'),
+        ],
+        'char': [
+            (escape_re + '"', String.Char, '#pop'),
+            (r'\^."', String.Char, '#pop'),
+            (r'."', String.Char, '#pop'),
+        ],
+        'tag': [
+            (escape_re, Name.Tag),
+            (r'"', Name.Tag, 'tagString'),
+            (r'[^(<>\r\n")]+', Name.Tag),
+            (r'>', Name.Tag, '#pop'),
+        ],
+        'tagString': [
+            (r'[^(^")]+', Name.Tag),
+            (escape_re, Name.Tag),
+            (r'[(|)]+', Name.Tag),
+            (r'\^.', Name.Tag),
+            (r'"', Name.Tag, '#pop'),
+        ],
+        'tuple': [
+            (r'(\d+\.)+', Keyword.Constant),
+            (r'\d+', Keyword.Constant, '#pop'),
+        ],
+        'bin2': [
+            (r'\s+', Number.Hex),
+            (r'([01]\s*){8}', Number.Hex),
+            (r'\}', Number.Hex, '#pop'),
+        ],
+        'commentString1': [
+            (r'[^(^")]+', Comment),
+            (escape_re, Comment),
+            (r'[(|)]+', Comment),
+            (r'\^.', Comment),
+            (r'"', Comment, '#pop'),
+        ],
+        'commentString2': [
+            (r'[^(^{})]+', Comment),
+            (escape_re, Comment),
+            (r'[(|)]+', Comment),
+            (r'\^.', Comment),
+            (r'\{', Comment, '#push'),
+            (r'\}', Comment, '#pop'),
+        ],
+        'commentBlock': [
+            (r'\[', Comment, '#push'),
+            (r'\]', Comment, '#pop'),
+            (r'"', Comment, "commentString1"),
+            (r'\{', Comment, "commentString2"),
+            (r'[^(\[\]"{)]+', Comment),
+        ],
+    }
diff --git a/local_packages/pygments/lexers/rego.py b/local_packages/pygments/lexers/rego.py
new file mode 100644
index 0000000..b2e8b75
--- /dev/null
+++ b/local_packages/pygments/lexers/rego.py
@@ -0,0 +1,57 @@
+"""
+    pygments.lexers.rego
+    ~~~~~~~~~~~~~~~~~~~~
+
+    Lexers for the Rego policy languages.
+
+    :copyright: Copyright 2006-present by the Pygments team, see AUTHORS.
+    :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, words
+from pygments.token import Comment, Operator, Keyword, Name, String, Number, Punctuation, Whitespace
+
+class RegoLexer(RegexLexer):
+    """
+    For Rego source.
+    """
+    name = 'Rego'
+    url = 'https://www.openpolicyagent.org/docs/latest/policy-language/'
+    filenames = ['*.rego']
+    aliases = ['rego']
+    mimetypes = ['text/x-rego']
+    version_added = '2.19'
+
+    reserved_words = (
+        'as', 'contains', 'data', 'default', 'else', 'every', 'false',
+        'if', 'in', 'import', 'package', 'not', 'null',
+        'some', 'true', 'with'
+    )
+
+    builtins = (
+        # https://www.openpolicyagent.org/docs/latest/philosophy/#the-opa-document-model
+        'data',  # Global variable for accessing base and virtual documents
+        'input', # Represents synchronously pushed base documents
+    )
+
+    tokens = {
+        'root': [
+            (r'\n', Whitespace),
+            (r'\s+', Whitespace),
+            (r'#.*?$', Comment.Single),
+            (words(reserved_words, suffix=r'\b'), Keyword),
+            (words(builtins, suffix=r'\b'), Name.Builtin),
+            (r'[a-zA-Z_][a-zA-Z0-9_]*', Name),
+            (r'"(\\\\|\\"|[^"])*"', String.Double),
+            (r'`[^`]*`', String.Backtick),
+            (r'-?\d+(\.\d+)?', Number),
+            (r'(==|!=|<=|>=|:=)', Operator),  # Compound operators
+            (r'[=<>+\-*/%&|]', Operator),     # Single-character operators
+            (r'[\[\]{}(),.:;]', Punctuation),
+        ]
+    }
+
+__all__ = ['RegoLexer']
+
+
+
diff --git a/local_packages/pygments/lexers/rell.py b/local_packages/pygments/lexers/rell.py
new file mode 100644
index 0000000..8d54ed2
--- /dev/null
+++ b/local_packages/pygments/lexers/rell.py
@@ -0,0 +1,68 @@
+"""
+    pygments.lexers.rell
+    ~~~~~~~~~~~~~~~~~~~~
+
+    Lexers for the Rell language.
+
+    :copyright: Copyright 2006-present by the Pygments team, see AUTHORS.
+    :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, bygroups, default, words
+from pygments.token import Comment, Keyword, Name, String, Number, \
+        Punctuation, Whitespace
+
+__all__ = ['RellLexer']
+
+
+class RellLexer(RegexLexer):
+    """
+    A Lexer for Rell.
+    """
+    name = 'Rell'
+    url = 'https://docs.chromia.com/rell/rell-intro'
+    aliases = ['rell']
+    filenames = ['*.rell']
+    mimetypes = ['text/x-rell']
+    version_added = '2.20'
+
+    ident = r'[a-zA-Z_][a-zA-Z0-9_]*'
+
+    tokens = {
+        'root': [
+            (words((
+                'big_integer', 'boolean', 'byte_array', 'decimal', 'gtv',
+                'integer', 'json', 'list', 'map', 'mutable', 'set', 'text',
+                'virtual'), suffix=r'\b'),
+             Keyword.Type),
+            (r'(false|true|null)\b', Keyword.Constant),
+            (r'(entity|enum|namespace|object|struct)\b', Keyword.Declaration),
+            (r'(function|operation|query)\b', Keyword.Declaration, 'function'),
+            (words((
+                'abstract', 'and', 'break', 'continue', 'create', 'delete',
+                'else', 'for', 'if', 'import', 'in', 'index', 'key', 'limit',
+                'module', 'not', 'offset', 'or', 'override', 'return', 'update',
+                'val', 'var', 'when', 'while'), suffix=r'\b'),
+             Keyword.Reserved),
+            (r'//.*?$', Comment.Single),
+            (r'/\*(.|\n|\r)*?\*/', Comment.Multiline),
+            (r'"(\\\\|\\"|[^"])*"', String.Double),
+            (r'\'(\\\\|\\\'|[^\\\'])*\'', String.Single),
+            (r'-?[0-9]*\.[0-9]+([eE][+-][0-9]+)?', Number.Float),
+            (r'-?[0-9]+([eE][+-][0-9]+|[lL])?', Number.Integer),
+            (r'x(\'[a-fA-F0-9]*\'|"[a-fA-F0-9]*")', String.Binary),
+            (r'(\.)([ \n\t\r]*)(' + ident + ')',
+                bygroups(Punctuation, Whitespace, Name.Attribute)),
+            (r'[{}():;,]+', Punctuation),
+            (r'[ \n\t\r]+', Whitespace),
+            (r'@[a-zA-Z_][a-zA-Z0-9_]*', Name.Decorator),
+            (r'[~^*!%&\[\]<>|+=/?\-@\$]', Punctuation.Marker),
+            (ident, Name),
+            (r'(\.)+', Punctuation),
+        ],
+        'function': [
+            (r'[ \n\t\r]+', Whitespace),
+            (ident, Name.Function, '#pop'),
+            default('#pop'),
+        ],
+    }
diff --git a/local_packages/pygments/lexers/resource.py b/local_packages/pygments/lexers/resource.py
new file mode 100644
index 0000000..eb6cb4a
--- /dev/null
+++ b/local_packages/pygments/lexers/resource.py
@@ -0,0 +1,83 @@
+"""
+    pygments.lexers.resource
+    ~~~~~~~~~~~~~~~~~~~~~~~~
+
+    Lexer for resource definition files.
+
+    :copyright: Copyright 2006-present by the Pygments team, see AUTHORS.
+    :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import RegexLexer, bygroups, words
+from pygments.token import Comment, String, Number, Operator, Text, \
+    Keyword, Name
+
+__all__ = ['ResourceLexer']
+
+
+class ResourceLexer(RegexLexer):
+    """Lexer for ICU Resource bundles.
+    """
+    name = 'ResourceBundle'
+    aliases = ['resourcebundle', 'resource']
+    filenames = []
+    url = 'https://unicode-org.github.io/icu/userguide/locale/resources.html'
+    version_added = '2.0'
+
+    _types = (':table', ':array', ':string', ':bin', ':import', ':intvector',
+              ':int', ':alias')
+
+    flags = re.MULTILINE | re.IGNORECASE
+    tokens = {
+        'root': [
+            (r'//.*?$', Comment),
+            (r'"', String, 'string'),
+            (r'-?\d+', Number.Integer),
+            (r'[,{}]', Operator),
+            (r'([^\s{{:]+)(\s*)({}?)'.format('|'.join(_types)),
+             bygroups(Name, Text, Keyword)),
+            (r'\s+', Text),
+            (words(_types), Keyword),
+        ],
+        'string': [
+            (r'(\\x[0-9a-f]{2}|\\u[0-9a-f]{4}|\\U00[0-9a-f]{6}|'
+             r'\\[0-7]{1,3}|\\c.|\\[abtnvfre\'"?\\]|\\\{|[^"{\\])+', String),
+            (r'\{', String.Escape, 'msgname'),
+            (r'"', String, '#pop')
+        ],
+        'msgname': [
+            (r'([^{},]+)(\s*)', bygroups(Name, String.Escape), ('#pop', 'message'))
+        ],
+        'message': [
+            (r'\{', String.Escape, 'msgname'),
+            (r'\}', String.Escape, '#pop'),
+            (r'(,)(\s*)([a-z]+)(\s*\})',
+             bygroups(Operator, String.Escape, Keyword, String.Escape), '#pop'),
+            (r'(,)(\s*)([a-z]+)(\s*)(,)(\s*)(offset)(\s*)(:)(\s*)(-?\d+)(\s*)',
+             bygroups(Operator, String.Escape, Keyword, String.Escape, Operator,
+                      String.Escape, Operator.Word, String.Escape, Operator,
+                      String.Escape, Number.Integer, String.Escape), 'choice'),
+            (r'(,)(\s*)([a-z]+)(\s*)(,)(\s*)',
+             bygroups(Operator, String.Escape, Keyword, String.Escape, Operator,
+                      String.Escape), 'choice'),
+            (r'\s+', String.Escape)
+        ],
+        'choice': [
+            (r'(=|<|>|<=|>=|!=)(-?\d+)(\s*\{)',
+             bygroups(Operator, Number.Integer, String.Escape), 'message'),
+            (r'([a-z]+)(\s*\{)', bygroups(Keyword.Type, String.Escape), 'str'),
+            (r'\}', String.Escape, ('#pop', '#pop')),
+            (r'\s+', String.Escape)
+        ],
+        'str': [
+            (r'\}', String.Escape, '#pop'),
+            (r'\{', String.Escape, 'msgname'),
+            (r'[^{}]+', String)
+        ]
+    }
+
+    def analyse_text(text):
+        if text.startswith('root:table'):
+            return 1.0
diff --git a/local_packages/pygments/lexers/ride.py b/local_packages/pygments/lexers/ride.py
new file mode 100644
index 0000000..1ed8f38
--- /dev/null
+++ b/local_packages/pygments/lexers/ride.py
@@ -0,0 +1,138 @@
+"""
+    pygments.lexers.ride
+    ~~~~~~~~~~~~~~~~~~~~
+
+    Lexer for the Ride programming language.
+
+    :copyright: Copyright 2006-present by the Pygments team, see AUTHORS.
+    :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, words, include
+from pygments.token import Comment, Keyword, Name, Number, Punctuation, \
+    String, Text
+
+__all__ = ['RideLexer']
+
+
+class RideLexer(RegexLexer):
+    """
+    For Ride source code.
+    """
+
+    name = 'Ride'
+    aliases = ['ride']
+    filenames = ['*.ride']
+    mimetypes = ['text/x-ride']
+    url = 'https://docs.waves.tech/en/ride'
+    version_added = '2.6'
+
+    validName = r'[a-zA-Z_][a-zA-Z0-9_\']*'
+
+    builtinOps = (
+        '||', '|', '>=', '>', '==', '!',
+        '=', '<=', '<', '::', ':+', ':', '!=', '/',
+        '.', '=>', '-', '+', '*', '&&', '%', '++',
+    )
+
+    globalVariablesName = (
+        'NOALG', 'MD5', 'SHA1', 'SHA224', 'SHA256', 'SHA384', 'SHA512',
+        'SHA3224', 'SHA3256', 'SHA3384', 'SHA3512', 'nil', 'this', 'unit',
+        'height', 'lastBlock', 'Buy', 'Sell', 'CEILING', 'FLOOR', 'DOWN',
+        'HALFDOWN', 'HALFEVEN', 'HALFUP', 'UP',
+    )
+
+    typesName = (
+        'Unit', 'Int', 'Boolean', 'ByteVector', 'String', 'Address', 'Alias',
+        'Transfer', 'AssetPair', 'DataEntry', 'Order', 'Transaction',
+        'GenesisTransaction', 'PaymentTransaction', 'ReissueTransaction',
+        'BurnTransaction', 'MassTransferTransaction', 'ExchangeTransaction',
+        'TransferTransaction', 'SetAssetScriptTransaction',
+        'InvokeScriptTransaction', 'IssueTransaction', 'LeaseTransaction',
+        'LeaseCancelTransaction', 'CreateAliasTransaction',
+        'SetScriptTransaction', 'SponsorFeeTransaction', 'DataTransaction',
+        'WriteSet', 'AttachedPayment', 'ScriptTransfer', 'TransferSet',
+        'ScriptResult', 'Invocation', 'Asset', 'BlockInfo', 'Issue', 'Reissue',
+        'Burn', 'NoAlg', 'Md5', 'Sha1', 'Sha224', 'Sha256', 'Sha384', 'Sha512',
+        'Sha3224', 'Sha3256', 'Sha3384', 'Sha3512', 'BinaryEntry',
+        'BooleanEntry', 'IntegerEntry', 'StringEntry', 'List', 'Ceiling',
+        'Down', 'Floor', 'HalfDown', 'HalfEven', 'HalfUp', 'Up',
+    )
+
+    functionsName = (
+        'fraction', 'size', 'toBytes', 'take', 'drop', 'takeRight', 'dropRight',
+        'toString', 'isDefined', 'extract', 'throw', 'getElement', 'value',
+        'cons', 'toUtf8String', 'toInt', 'indexOf', 'lastIndexOf', 'split',
+        'parseInt', 'parseIntValue', 'keccak256', 'blake2b256', 'sha256',
+        'sigVerify', 'toBase58String', 'fromBase58String', 'toBase64String',
+        'fromBase64String', 'transactionById', 'transactionHeightById',
+        'getInteger', 'getBoolean', 'getBinary', 'getString',
+        'addressFromPublicKey', 'addressFromString', 'addressFromRecipient',
+        'assetBalance', 'wavesBalance', 'getIntegerValue', 'getBooleanValue',
+        'getBinaryValue', 'getStringValue', 'addressFromStringValue',
+        'assetInfo', 'rsaVerify', 'checkMerkleProof', 'median',
+        'valueOrElse', 'valueOrErrorMessage', 'contains', 'log', 'pow',
+        'toBase16String', 'fromBase16String', 'blockInfoByHeight',
+        'transferTransactionById',
+    )
+
+    reservedWords = words((
+        'match', 'case', 'else', 'func', 'if',
+        'let', 'then', '@Callable', '@Verifier',
+    ), suffix=r'\b')
+
+    tokens = {
+        'root': [
+            # Comments
+            (r'#.*', Comment.Single),
+            # Whitespace
+            (r'\s+', Text),
+            # Strings
+            (r'"', String, 'doublequote'),
+            (r'utf8\'', String, 'utf8quote'),
+            (r'base(58|64|16)\'', String, 'singlequote'),
+            # Keywords
+            (reservedWords, Keyword.Reserved),
+            (r'\{-#.*?#-\}', Keyword.Reserved),
+            (r'FOLD<\d+>', Keyword.Reserved),
+            # Types
+            (words(typesName), Keyword.Type),
+            # Main
+            # (specialName, Keyword.Reserved),
+            # Prefix Operators
+            (words(builtinOps, prefix=r'\(', suffix=r'\)'), Name.Function),
+            # Infix Operators
+            (words(builtinOps), Name.Function),
+            (words(globalVariablesName), Name.Function),
+            (words(functionsName), Name.Function),
+            # Numbers
+            include('numbers'),
+            # Variable Names
+            (validName, Name.Variable),
+            # Parens
+            (r'[,()\[\]{}]', Punctuation),
+        ],
+
+        'doublequote': [
+            (r'\\u[0-9a-fA-F]{4}', String.Escape),
+            (r'\\[nrfvb\\"]', String.Escape),
+            (r'[^"]', String),
+            (r'"', String, '#pop'),
+        ],
+
+        'utf8quote': [
+            (r'\\u[0-9a-fA-F]{4}', String.Escape),
+            (r'\\[nrfvb\\\']', String.Escape),
+            (r'[^\']', String),
+            (r'\'', String, '#pop'),
+        ],
+
+        'singlequote': [
+            (r'[^\']', String),
+            (r'\'', String, '#pop'),
+        ],
+
+        'numbers': [
+            (r'_?\d+', Number.Integer),
+        ],
+    }
diff --git a/local_packages/pygments/lexers/rita.py b/local_packages/pygments/lexers/rita.py
new file mode 100644
index 0000000..99a574c
--- /dev/null
+++ b/local_packages/pygments/lexers/rita.py
@@ -0,0 +1,42 @@
+"""
+    pygments.lexers.rita
+    ~~~~~~~~~~~~~~~~~~~~
+
+    Lexers for RITA language
+
+    :copyright: Copyright 2006-present by the Pygments team, see AUTHORS.
+    :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer
+from pygments.token import Comment, Operator, Keyword, Name, Literal, \
+    Punctuation, Whitespace
+
+__all__ = ['RitaLexer']
+
+
+class RitaLexer(RegexLexer):
+    """
+    Lexer for RITA.
+    """
+    name = 'Rita'
+    url = 'https://github.com/zaibacu/rita-dsl'
+    filenames = ['*.rita']
+    aliases = ['rita']
+    mimetypes = ['text/rita']
+    version_added = '2.11'
+
+    tokens = {
+        'root': [
+            (r'\n', Whitespace),
+            (r'\s+', Whitespace),
+            (r'#(.*?)\n', Comment.Single),
+            (r'@(.*?)\n', Operator),  # Yes, whole line as an operator
+            (r'"(\w|\d|\s|(\\")|[\'_\-./,\?\!])+?"', Literal),
+            (r'\'(\w|\d|\s|(\\\')|["_\-./,\?\!])+?\'', Literal),
+            (r'([A-Z_]+)', Keyword),
+            (r'([a-z0-9_]+)', Name),
+            (r'((->)|[!?+*|=])', Operator),
+            (r'[\(\),\{\}]', Punctuation)
+        ]
+    }
diff --git a/local_packages/pygments/lexers/rnc.py b/local_packages/pygments/lexers/rnc.py
new file mode 100644
index 0000000..7ddd33e
--- /dev/null
+++ b/local_packages/pygments/lexers/rnc.py
@@ -0,0 +1,66 @@
+"""
+    pygments.lexers.rnc
+    ~~~~~~~~~~~~~~~~~~~
+
+    Lexer for Relax-NG Compact syntax
+
+    :copyright: Copyright 2006-present by the Pygments team, see AUTHORS.
+    :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+    Punctuation
+
+__all__ = ['RNCCompactLexer']
+
+
+class RNCCompactLexer(RegexLexer):
+    """
+    For RelaxNG-compact syntax.
+    """
+
+    name = 'Relax-NG Compact'
+    url = 'http://relaxng.org'
+    aliases = ['rng-compact', 'rnc']
+    filenames = ['*.rnc']
+    version_added = '2.2'
+
+    tokens = {
+        'root': [
+            (r'namespace\b', Keyword.Namespace),
+            (r'(?:default|datatypes)\b', Keyword.Declaration),
+            (r'##.*$', Comment.Preproc),
+            (r'#.*$', Comment.Single),
+            (r'"[^"]*"', String.Double),
+            # TODO single quoted strings and escape sequences outside of
+            # double-quoted strings
+            (r'(?:element|attribute|mixed)\b', Keyword.Declaration, 'variable'),
+            (r'(text\b|xsd:[^ ]+)', Keyword.Type, 'maybe_xsdattributes'),
+            (r'[,?&*=|~]|>>', Operator),
+            (r'[(){}]', Punctuation),
+            (r'.', Text),
+        ],
+
+        # a variable has been declared using `element` or `attribute`
+        'variable': [
+            (r'[^{]+', Name.Variable),
+            (r'\{', Punctuation, '#pop'),
+        ],
+
+        # after an xsd: declaration there may be attributes
+        'maybe_xsdattributes': [
+            (r'\{', Punctuation, 'xsdattributes'),
+            (r'\}', Punctuation, '#pop'),
+            (r'.', Text),
+        ],
+
+        # attributes take the form { key1 = value1 key2 = value2 ... }
+        'xsdattributes': [
+            (r'[^ =}]', Name.Attribute),
+            (r'=', Operator),
+            (r'"[^"]*"', String.Double),
+            (r'\}', Punctuation, '#pop'),
+            (r'.', Text),
+        ],
+    }
diff --git a/local_packages/pygments/lexers/roboconf.py b/local_packages/pygments/lexers/roboconf.py
new file mode 100644
index 0000000..bf0570c
--- /dev/null
+++ b/local_packages/pygments/lexers/roboconf.py
@@ -0,0 +1,81 @@
+"""
+    pygments.lexers.roboconf
+    ~~~~~~~~~~~~~~~~~~~~~~~~
+
+    Lexers for Roboconf DSL.
+
+    :copyright: Copyright 2006-present by the Pygments team, see AUTHORS.
+    :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, words, re
+from pygments.token import Text, Operator, Keyword, Name, Comment
+
+__all__ = ['RoboconfGraphLexer', 'RoboconfInstancesLexer']
+
+
+class RoboconfGraphLexer(RegexLexer):
+    """
+    Lexer for Roboconf graph files.
+    """
+    name = 'Roboconf Graph'
+    aliases = ['roboconf-graph']
+    filenames = ['*.graph']
+    url = 'https://roboconf.github.io/en/user-guide/graph-definition.html'
+    version_added = '2.1'
+
+    flags = re.IGNORECASE | re.MULTILINE
+    tokens = {
+        'root': [
+            # Skip white spaces
+            (r'\s+', Text),
+
+            # There is one operator
+            (r'=', Operator),
+
+            # Keywords
+            (words(('facet', 'import'), suffix=r'\s*\b', prefix=r'\b'), Keyword),
+            (words((
+                'installer', 'extends', 'exports', 'imports', 'facets',
+                'children'), suffix=r'\s*:?', prefix=r'\b'), Name),
+
+            # Comments
+            (r'#.*\n', Comment),
+
+            # Default
+            (r'[^#]', Text),
+            (r'.*\n', Text)
+        ]
+    }
+
+
+class RoboconfInstancesLexer(RegexLexer):
+    """
+    Lexer for Roboconf instances files.
+    """
+    name = 'Roboconf Instances'
+    aliases = ['roboconf-instances']
+    filenames = ['*.instances']
+    url = 'https://roboconf.github.io'
+    version_added = '2.1'
+
+    flags = re.IGNORECASE | re.MULTILINE
+    tokens = {
+        'root': [
+
+            # Skip white spaces
+            (r'\s+', Text),
+
+            # Keywords
+            (words(('instance of', 'import'), suffix=r'\s*\b', prefix=r'\b'), Keyword),
+            (words(('name', 'count'), suffix=r's*:?', prefix=r'\b'), Name),
+            (r'\s*[\w.-]+\s*:', Name),
+
+            # Comments
+            (r'#.*\n', Comment),
+
+            # Default
+            (r'[^#]', Text),
+            (r'.*\n', Text)
+        ]
+    }
diff --git a/local_packages/pygments/lexers/robotframework.py b/local_packages/pygments/lexers/robotframework.py
new file mode 100644
index 0000000..a481136
--- /dev/null
+++ b/local_packages/pygments/lexers/robotframework.py
@@ -0,0 +1,551 @@
+"""
+    pygments.lexers.robotframework
+    ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+    Lexer for Robot Framework.
+
+    :copyright: Copyright 2006-present by the Pygments team, see AUTHORS.
+    :license: BSD, see LICENSE for details.
+"""
+
+#  Copyright 2012 Nokia Siemens Networks Oyj
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+import re
+
+from pygments.lexer import Lexer
+from pygments.token import Token
+
+__all__ = ['RobotFrameworkLexer']
+
+
+HEADING = Token.Generic.Heading
+SETTING = Token.Keyword.Namespace
+IMPORT = Token.Name.Namespace
+TC_KW_NAME = Token.Generic.Subheading
+KEYWORD = Token.Name.Function
+ARGUMENT = Token.String
+VARIABLE = Token.Name.Variable
+COMMENT = Token.Comment
+SEPARATOR = Token.Punctuation
+SYNTAX = Token.Punctuation
+GHERKIN = Token.Generic.Emph
+ERROR = Token.Error
+
+
+def normalize(string, remove=''):
+    string = string.lower()
+    for char in remove + ' ':
+        if char in string:
+            string = string.replace(char, '')
+    return string
+
+
+class RobotFrameworkLexer(Lexer):
+    """
+    For Robot Framework test data.
+
+    Supports both space and pipe separated plain text formats.
+    """
+    name = 'RobotFramework'
+    url = 'http://robotframework.org'
+    aliases = ['robotframework']
+    filenames = ['*.robot', '*.resource']
+    mimetypes = ['text/x-robotframework']
+    version_added = '1.6'
+
+    def __init__(self, **options):
+        options['tabsize'] = 2
+        options['encoding'] = 'UTF-8'
+        Lexer.__init__(self, **options)
+
+    def get_tokens_unprocessed(self, text):
+        row_tokenizer = RowTokenizer()
+        var_tokenizer = VariableTokenizer()
+        index = 0
+        for row in text.splitlines():
+            for value, token in row_tokenizer.tokenize(row):
+                for value, token in var_tokenizer.tokenize(value, token):
+                    if value:
+                        yield index, token, str(value)
+                        index += len(value)
+
+
+class VariableTokenizer:
+
+    def tokenize(self, string, token):
+        var = VariableSplitter(string, identifiers='$@%&')
+        if var.start < 0 or token in (COMMENT, ERROR):
+            yield string, token
+            return
+        for value, token in self._tokenize(var, string, token):
+            if value:
+                yield value, token
+
+    def _tokenize(self, var, string, orig_token):
+        before = string[:var.start]
+        yield before, orig_token
+        yield var.identifier + '{', SYNTAX
+        yield from self.tokenize(var.base, VARIABLE)
+        yield '}', SYNTAX
+        if var.index is not None:
+            yield '[', SYNTAX
+            yield from self.tokenize(var.index, VARIABLE)
+            yield ']', SYNTAX
+        yield from self.tokenize(string[var.end:], orig_token)
+
+
+class RowTokenizer:
+
+    def __init__(self):
+        self._table = UnknownTable()
+        self._splitter = RowSplitter()
+        testcases = TestCaseTable()
+        settings = SettingTable(testcases.set_default_template)
+        variables = VariableTable()
+        keywords = KeywordTable()
+        self._tables = {'settings': settings, 'setting': settings,
+                        'metadata': settings,
+                        'variables': variables, 'variable': variables,
+                        'testcases': testcases, 'testcase': testcases,
+                        'tasks': testcases, 'task': testcases,
+                        'keywords': keywords, 'keyword': keywords,
+                        'userkeywords': keywords, 'userkeyword': keywords}
+
+    def tokenize(self, row):
+        commented = False
+        heading = False
+        for index, value in enumerate(self._splitter.split(row)):
+            # First value, and every second after that, is a separator.
+            index, separator = divmod(index-1, 2)
+            if value.startswith('#'):
+                commented = True
+            elif index == 0 and value.startswith('*'):
+                self._table = self._start_table(value)
+                heading = True
+            yield from self._tokenize(value, index, commented,
+                                      separator, heading)
+        self._table.end_row()
+
+    def _start_table(self, header):
+        name = normalize(header, remove='*')
+        return self._tables.get(name, UnknownTable())
+
+    def _tokenize(self, value, index, commented, separator, heading):
+        if commented:
+            yield value, COMMENT
+        elif separator:
+            yield value, SEPARATOR
+        elif heading:
+            yield value, HEADING
+        else:
+            yield from self._table.tokenize(value, index)
+
+
+class RowSplitter:
+    _space_splitter = re.compile('( {2,})')
+    _pipe_splitter = re.compile(r'((?:^| +)\|(?: +|$))')
+
+    def split(self, row):
+        splitter = (row.startswith('| ') and self._split_from_pipes
+                    or self._split_from_spaces)
+        yield from splitter(row)
+        yield '\n'
+
+    def _split_from_spaces(self, row):
+        yield ''  # Start with (pseudo)separator similarly as with pipes
+        yield from self._space_splitter.split(row)
+
+    def _split_from_pipes(self, row):
+        _, separator, rest = self._pipe_splitter.split(row, 1)
+        yield separator
+        while self._pipe_splitter.search(rest):
+            cell, separator, rest = self._pipe_splitter.split(rest, 1)
+            yield cell
+            yield separator
+        yield rest
+
+
+class Tokenizer:
+    _tokens = None
+
+    def __init__(self):
+        self._index = 0
+
+    def tokenize(self, value):
+        values_and_tokens = self._tokenize(value, self._index)
+        self._index += 1
+        if isinstance(values_and_tokens, type(Token)):
+            values_and_tokens = [(value, values_and_tokens)]
+        return values_and_tokens
+
+    def _tokenize(self, value, index):
+        index = min(index, len(self._tokens) - 1)
+        return self._tokens[index]
+
+    def _is_assign(self, value):
+        if value.endswith('='):
+            value = value[:-1].strip()
+        var = VariableSplitter(value, identifiers='$@&')
+        return var.start == 0 and var.end == len(value)
+
+
+class Comment(Tokenizer):
+    _tokens = (COMMENT,)
+
+
+class Setting(Tokenizer):
+    _tokens = (SETTING, ARGUMENT)
+    _keyword_settings = ('suitesetup', 'suiteprecondition', 'suiteteardown',
+                         'suitepostcondition', 'testsetup', 'tasksetup', 'testprecondition',
+                         'testteardown','taskteardown', 'testpostcondition', 'testtemplate', 'tasktemplate')
+    _import_settings = ('library', 'resource', 'variables')
+    _other_settings = ('documentation', 'metadata', 'forcetags', 'defaulttags',
+                       'testtimeout','tasktimeout')
+    _custom_tokenizer = None
+
+    def __init__(self, template_setter=None):
+        Tokenizer.__init__(self)
+        self._template_setter = template_setter
+
+    def _tokenize(self, value, index):
+        if index == 1 and self._template_setter:
+            self._template_setter(value)
+        if index == 0:
+            normalized = normalize(value)
+            if normalized in self._keyword_settings:
+                self._custom_tokenizer = KeywordCall(support_assign=False)
+            elif normalized in self._import_settings:
+                self._custom_tokenizer = ImportSetting()
+            elif normalized not in self._other_settings:
+                return ERROR
+        elif self._custom_tokenizer:
+            return self._custom_tokenizer.tokenize(value)
+        return Tokenizer._tokenize(self, value, index)
+
+
+class ImportSetting(Tokenizer):
+    _tokens = (IMPORT, ARGUMENT)
+
+
+class TestCaseSetting(Setting):
+    _keyword_settings = ('setup', 'precondition', 'teardown', 'postcondition',
+                         'template')
+    _import_settings = ()
+    _other_settings = ('documentation', 'tags', 'timeout')
+
+    def _tokenize(self, value, index):
+        if index == 0:
+            type = Setting._tokenize(self, value[1:-1], index)
+            return [('[', SYNTAX), (value[1:-1], type), (']', SYNTAX)]
+        return Setting._tokenize(self, value, index)
+
+
+class KeywordSetting(TestCaseSetting):
+    _keyword_settings = ('teardown',)
+    _other_settings = ('documentation', 'arguments', 'return', 'timeout', 'tags')
+
+
+class Variable(Tokenizer):
+    _tokens = (SYNTAX, ARGUMENT)
+
+    def _tokenize(self, value, index):
+        if index == 0 and not self._is_assign(value):
+            return ERROR
+        return Tokenizer._tokenize(self, value, index)
+
+
+class KeywordCall(Tokenizer):
+    _tokens = (KEYWORD, ARGUMENT)
+
+    def __init__(self, support_assign=True):
+        Tokenizer.__init__(self)
+        self._keyword_found = not support_assign
+        self._assigns = 0
+
+    def _tokenize(self, value, index):
+        if not self._keyword_found and self._is_assign(value):
+            self._assigns += 1
+            return SYNTAX  # VariableTokenizer tokenizes this later.
+        if self._keyword_found:
+            return Tokenizer._tokenize(self, value, index - self._assigns)
+        self._keyword_found = True
+        return GherkinTokenizer().tokenize(value, KEYWORD)
+
+
+class GherkinTokenizer:
+    _gherkin_prefix = re.compile('^(Given|When|Then|And|But) ', re.IGNORECASE)
+
+    def tokenize(self, value, token):
+        match = self._gherkin_prefix.match(value)
+        if not match:
+            return [(value, token)]
+        end = match.end()
+        return [(value[:end], GHERKIN), (value[end:], token)]
+
+
+class TemplatedKeywordCall(Tokenizer):
+    _tokens = (ARGUMENT,)
+
+
+class ForLoop(Tokenizer):
+
+    def __init__(self):
+        Tokenizer.__init__(self)
+        self._in_arguments = False
+
+    def _tokenize(self, value, index):
+        token = self._in_arguments and ARGUMENT or SYNTAX
+        if value.upper() in ('IN', 'IN RANGE'):
+            self._in_arguments = True
+        return token
+
+
+class _Table:
+    _tokenizer_class = None
+
+    def __init__(self, prev_tokenizer=None):
+        self._tokenizer = self._tokenizer_class()
+        self._prev_tokenizer = prev_tokenizer
+        self._prev_values_on_row = []
+
+    def tokenize(self, value, index):
+        if self._continues(value, index):
+            self._tokenizer = self._prev_tokenizer
+            yield value, SYNTAX
+        else:
+            yield from self._tokenize(value, index)
+        self._prev_values_on_row.append(value)
+
+    def _continues(self, value, index):
+        return value == '...' and all(self._is_empty(t)
+                                      for t in self._prev_values_on_row)
+
+    def _is_empty(self, value):
+        return value in ('', '\\')
+
+    def _tokenize(self, value, index):
+        return self._tokenizer.tokenize(value)
+
+    def end_row(self):
+        self.__init__(prev_tokenizer=self._tokenizer)
+
+
+class UnknownTable(_Table):
+    _tokenizer_class = Comment
+
+    def _continues(self, value, index):
+        return False
+
+
+class VariableTable(_Table):
+    _tokenizer_class = Variable
+
+
+class SettingTable(_Table):
+    _tokenizer_class = Setting
+
+    def __init__(self, template_setter, prev_tokenizer=None):
+        _Table.__init__(self, prev_tokenizer)
+        self._template_setter = template_setter
+
+    def _tokenize(self, value, index):
+        if index == 0 and normalize(value) == 'testtemplate':
+            self._tokenizer = Setting(self._template_setter)
+        return _Table._tokenize(self, value, index)
+
+    def end_row(self):
+        self.__init__(self._template_setter, prev_tokenizer=self._tokenizer)
+
+
+class TestCaseTable(_Table):
+    _setting_class = TestCaseSetting
+    _test_template = None
+    _default_template = None
+
+    @property
+    def _tokenizer_class(self):
+        if self._test_template or (self._default_template and
+                                   self._test_template is not False):
+            return TemplatedKeywordCall
+        return KeywordCall
+
+    def _continues(self, value, index):
+        return index > 0 and _Table._continues(self, value, index)
+
+    def _tokenize(self, value, index):
+        if index == 0:
+            if value:
+                self._test_template = None
+            return GherkinTokenizer().tokenize(value, TC_KW_NAME)
+        if index == 1 and self._is_setting(value):
+            if self._is_template(value):
+                self._test_template = False
+                self._tokenizer = self._setting_class(self.set_test_template)
+            else:
+                self._tokenizer = self._setting_class()
+        if index == 1 and self._is_for_loop(value):
+            self._tokenizer = ForLoop()
+        if index == 1 and self._is_empty(value):
+            return [(value, SYNTAX)]
+        return _Table._tokenize(self, value, index)
+
+    def _is_setting(self, value):
+        return value.startswith('[') and value.endswith(']')
+
+    def _is_template(self, value):
+        return normalize(value) == '[template]'
+
+    def _is_for_loop(self, value):
+        return value.startswith(':') and normalize(value, remove=':') == 'for'
+
+    def set_test_template(self, template):
+        self._test_template = self._is_template_set(template)
+
+    def set_default_template(self, template):
+        self._default_template = self._is_template_set(template)
+
+    def _is_template_set(self, template):
+        return normalize(template) not in ('', '\\', 'none', '${empty}')
+
+
+class KeywordTable(TestCaseTable):
+    _tokenizer_class = KeywordCall
+    _setting_class = KeywordSetting
+
+    def _is_template(self, value):
+        return False
+
+
+# Following code copied directly from Robot Framework 2.7.5.
+
+class VariableSplitter:
+
+    def __init__(self, string, identifiers):
+        self.identifier = None
+        self.base = None
+        self.index = None
+        self.start = -1
+        self.end = -1
+        self._identifiers = identifiers
+        self._may_have_internal_variables = False
+        try:
+            self._split(string)
+        except ValueError:
+            pass
+        else:
+            self._finalize()
+
+    def get_replaced_base(self, variables):
+        if self._may_have_internal_variables:
+            return variables.replace_string(self.base)
+        return self.base
+
+    def _finalize(self):
+        self.identifier = self._variable_chars[0]
+        self.base = ''.join(self._variable_chars[2:-1])
+        self.end = self.start + len(self._variable_chars)
+        if self._has_list_or_dict_variable_index():
+            self.index = ''.join(self._list_and_dict_variable_index_chars[1:-1])
+            self.end += len(self._list_and_dict_variable_index_chars)
+
+    def _has_list_or_dict_variable_index(self):
+        return self._list_and_dict_variable_index_chars\
+        and self._list_and_dict_variable_index_chars[-1] == ']'
+
+    def _split(self, string):
+        start_index, max_index = self._find_variable(string)
+        self.start = start_index
+        self._open_curly = 1
+        self._state = self._variable_state
+        self._variable_chars = [string[start_index], '{']
+        self._list_and_dict_variable_index_chars = []
+        self._string = string
+        start_index += 2
+        for index, char in enumerate(string[start_index:]):
+            index += start_index  # Giving start to enumerate only in Py 2.6+
+            try:
+                self._state(char, index)
+            except StopIteration:
+                return
+            if index  == max_index and not self._scanning_list_variable_index():
+                return
+
+    def _scanning_list_variable_index(self):
+        return self._state in [self._waiting_list_variable_index_state,
+                               self._list_variable_index_state]
+
+    def _find_variable(self, string):
+        max_end_index = string.rfind('}')
+        if max_end_index == -1:
+            raise ValueError('No variable end found')
+        if self._is_escaped(string, max_end_index):
+            return self._find_variable(string[:max_end_index])
+        start_index = self._find_start_index(string, 1, max_end_index)
+        if start_index == -1:
+            raise ValueError('No variable start found')
+        return start_index, max_end_index
+
+    def _find_start_index(self, string, start, end):
+        index = string.find('{', start, end) - 1
+        if index < 0:
+            return -1
+        if self._start_index_is_ok(string, index):
+            return index
+        return self._find_start_index(string, index+2, end)
+
+    def _start_index_is_ok(self, string, index):
+        return string[index] in self._identifiers\
+        and not self._is_escaped(string, index)
+
+    def _is_escaped(self, string, index):
+        escaped = False
+        while index > 0 and string[index-1] == '\\':
+            index -= 1
+            escaped = not escaped
+        return escaped
+
+    def _variable_state(self, char, index):
+        self._variable_chars.append(char)
+        if char == '}' and not self._is_escaped(self._string, index):
+            self._open_curly -= 1
+            if self._open_curly == 0:
+                if not self._is_list_or_dict_variable():
+                    raise StopIteration
+                self._state = self._waiting_list_variable_index_state
+        elif char in self._identifiers:
+            self._state = self._internal_variable_start_state
+
+    def _is_list_or_dict_variable(self):
+        return self._variable_chars[0] in ('@','&')
+
+    def _internal_variable_start_state(self, char, index):
+        self._state = self._variable_state
+        if char == '{':
+            self._variable_chars.append(char)
+            self._open_curly += 1
+            self._may_have_internal_variables = True
+        else:
+            self._variable_state(char, index)
+
+    def _waiting_list_variable_index_state(self, char, index):
+        if char != '[':
+            raise StopIteration
+        self._list_and_dict_variable_index_chars.append(char)
+        self._state = self._list_variable_index_state
+
+    def _list_variable_index_state(self, char, index):
+        self._list_and_dict_variable_index_chars.append(char)
+        if char == ']':
+            raise StopIteration
diff --git a/local_packages/pygments/lexers/ruby.py b/local_packages/pygments/lexers/ruby.py
new file mode 100644
index 0000000..6832652
--- /dev/null
+++ b/local_packages/pygments/lexers/ruby.py
@@ -0,0 +1,518 @@
+"""
+    pygments.lexers.ruby
+    ~~~~~~~~~~~~~~~~~~~~
+
+    Lexers for Ruby and related languages.
+
+    :copyright: Copyright 2006-present by the Pygments team, see AUTHORS.
+    :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import Lexer, RegexLexer, ExtendedRegexLexer, include, \
+    bygroups, default, LexerContext, do_insertions, words, line_re
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+    Number, Punctuation, Error, Generic, Whitespace
+from pygments.util import shebang_matches
+
+__all__ = ['RubyLexer', 'RubyConsoleLexer', 'FancyLexer']
+
+
+RUBY_OPERATORS = (
+    '*', '**', '-', '+', '-@', '+@', '/', '%', '&', '|', '^', '`', '~',
+    '[]', '[]=', '<<', '>>', '<', '<>', '<=>', '>', '>=', '==', '==='
+)
+
+
+class RubyLexer(ExtendedRegexLexer):
+    """
+    For Ruby source code.
+    """
+
+    name = 'Ruby'
+    url = 'http://www.ruby-lang.org'
+    aliases = ['ruby', 'rb', 'duby']
+    filenames = ['*.rb', '*.rbw', 'Rakefile', '*.rake', '*.gemspec',
+                 '*.rbx', '*.duby', 'Gemfile', 'Vagrantfile']
+    mimetypes = ['text/x-ruby', 'application/x-ruby']
+    version_added = ''
+
+    flags = re.DOTALL | re.MULTILINE
+
+    def heredoc_callback(self, match, ctx):
+        # okay, this is the hardest part of parsing Ruby...
+        # match: 1 = <<[-~]?, 2 = quote? 3 = name 4 = quote? 5 = rest of line
+
+        start = match.start(1)
+        yield start, Operator, match.group(1)        # <<[-~]?
+        yield match.start(2), String.Heredoc, match.group(2)   # quote ", ', `
+        yield match.start(3), String.Delimiter, match.group(3) # heredoc name
+        yield match.start(4), String.Heredoc, match.group(4)   # quote again
+
+        heredocstack = ctx.__dict__.setdefault('heredocstack', [])
+        outermost = not bool(heredocstack)
+        heredocstack.append((match.group(1) in ('<<-', '<<~'), match.group(3)))
+
+        ctx.pos = match.start(5)
+        ctx.end = match.end(5)
+        # this may find other heredocs, so limit the recursion depth
+        if len(heredocstack) < 100:
+            yield from self.get_tokens_unprocessed(context=ctx)
+        else:
+            yield ctx.pos, String.Heredoc, match.group(5)
+        ctx.pos = match.end()
+
+        if outermost:
+            # this is the outer heredoc again, now we can process them all
+            for tolerant, hdname in heredocstack:
+                lines = []
+                for match in line_re.finditer(ctx.text, ctx.pos):
+                    if tolerant:
+                        check = match.group().strip()
+                    else:
+                        check = match.group().rstrip()
+                    if check == hdname:
+                        for amatch in lines:
+                            yield amatch.start(), String.Heredoc, amatch.group()
+                        yield match.start(), String.Delimiter, match.group()
+                        ctx.pos = match.end()
+                        break
+                    else:
+                        lines.append(match)
+                else:
+                    # end of heredoc not found -- error!
+                    for amatch in lines:
+                        yield amatch.start(), Error, amatch.group()
+            ctx.end = len(ctx.text)
+            del heredocstack[:]
+
+    def gen_rubystrings_rules():
+        def intp_regex_callback(self, match, ctx):
+            yield match.start(1), String.Regex, match.group(1)  # begin
+            nctx = LexerContext(match.group(3), 0, ['interpolated-regex'])
+            for i, t, v in self.get_tokens_unprocessed(context=nctx):
+                yield match.start(3)+i, t, v
+            yield match.start(4), String.Regex, match.group(4)  # end[mixounse]*
+            ctx.pos = match.end()
+
+        def intp_string_callback(self, match, ctx):
+            yield match.start(1), String.Other, match.group(1)
+            nctx = LexerContext(match.group(3), 0, ['interpolated-string'])
+            for i, t, v in self.get_tokens_unprocessed(context=nctx):
+                yield match.start(3)+i, t, v
+            yield match.start(4), String.Other, match.group(4)  # end
+            ctx.pos = match.end()
+
+        states = {}
+        states['strings'] = [
+            # easy ones
+            (r'\:@{0,2}[a-zA-Z_]\w*[!?]?', String.Symbol),
+            (words(RUBY_OPERATORS, prefix=r'\:@{0,2}'), String.Symbol),
+            (r":'(\\\\|\\[^\\]|[^'\\])*'", String.Symbol),
+            (r':"', String.Symbol, 'simple-sym'),
+            (r'([a-zA-Z_]\w*)(:)(?!:)',
+             bygroups(String.Symbol, Punctuation)),  # Since Ruby 1.9
+            (r'"', String.Double, 'simple-string-double'),
+            (r"'", String.Single, 'simple-string-single'),
+            (r'(?', '<>', 'ab'):
+            states[name+'-intp-string'] = [
+                (r'\\[\\' + bracecc + ']', String.Other),
+                (lbrace, String.Other, '#push'),
+                (rbrace, String.Other, '#pop'),
+                include('string-intp-escaped'),
+                (r'[\\#' + bracecc + ']', String.Other),
+                (r'[^\\#' + bracecc + ']+', String.Other),
+            ]
+            states['strings'].append((r'%[QWx]?' + lbrace, String.Other,
+                                      name+'-intp-string'))
+            states[name+'-string'] = [
+                (r'\\[\\' + bracecc + ']', String.Other),
+                (lbrace, String.Other, '#push'),
+                (rbrace, String.Other, '#pop'),
+                (r'[\\#' + bracecc + ']', String.Other),
+                (r'[^\\#' + bracecc + ']+', String.Other),
+            ]
+            states['strings'].append((r'%[qsw]' + lbrace, String.Other,
+                                      name+'-string'))
+            states[name+'-regex'] = [
+                (r'\\[\\' + bracecc + ']', String.Regex),
+                (lbrace, String.Regex, '#push'),
+                (rbrace + '[mixounse]*', String.Regex, '#pop'),
+                include('string-intp'),
+                (r'[\\#' + bracecc + ']', String.Regex),
+                (r'[^\\#' + bracecc + ']+', String.Regex),
+            ]
+            states['strings'].append((r'%r' + lbrace, String.Regex,
+                                      name+'-regex'))
+
+        # these must come after %!
+        states['strings'] += [
+            # %r regex
+            (r'(%r([\W_]))((?:\\\2|(?!\2).)*)(\2[mixounse]*)',
+             intp_regex_callback),
+            # regular fancy strings with qsw
+            (r'%[qsw]([\W_])((?:\\\1|(?!\1).)*)\1', String.Other),
+            (r'(%[QWx]([\W_]))((?:\\\2|(?!\2).)*)(\2)',
+             intp_string_callback),
+            # special forms of fancy strings after operators or
+            # in method calls with braces
+            (r'(?<=[-+/*%=<>&!^|~,(])(\s*)(%([\t ])(?:(?:\\\3|(?!\3).)*)\3)',
+             bygroups(Whitespace, String.Other, None)),
+            # and because of fixed width lookbehinds the whole thing a
+            # second time for line startings...
+            (r'^(\s*)(%([\t ])(?:(?:\\\3|(?!\3).)*)\3)',
+             bygroups(Whitespace, String.Other, None)),
+            # all regular fancy strings without qsw
+            (r'(%([^a-zA-Z0-9\s]))((?:\\\2|(?!\2).)*)(\2)',
+             intp_string_callback),
+        ]
+
+        return states
+
+    tokens = {
+        'root': [
+            (r'\A#!.+?$', Comment.Hashbang),
+            (r'#.*?$', Comment.Single),
+            (r'=begin\s.*?\n=end.*?$', Comment.Multiline),
+            # keywords
+            (words((
+                'BEGIN', 'END', 'alias', 'begin', 'break', 'case', 'defined?',
+                'do', 'else', 'elsif', 'end', 'ensure', 'for', 'if', 'in', 'next', 'redo',
+                'rescue', 'raise', 'retry', 'return', 'super', 'then', 'undef',
+                'unless', 'until', 'when', 'while', 'yield'), suffix=r'\b'),
+             Keyword),
+            # start of function, class and module names
+            (r'(module)(\s+)([a-zA-Z_]\w*'
+             r'(?:::[a-zA-Z_]\w*)*)',
+             bygroups(Keyword, Whitespace, Name.Namespace)),
+            (r'(def)(\s+)', bygroups(Keyword, Whitespace), 'funcname'),
+            (r'def(?=[*%&^`~+-/\[<>=])', Keyword, 'funcname'),
+            (r'(class)(\s+)', bygroups(Keyword, Whitespace), 'classname'),
+            # special methods
+            (words((
+                'initialize', 'new', 'loop', 'include', 'extend', 'raise', 'attr_reader',
+                'attr_writer', 'attr_accessor', 'attr', 'catch', 'throw', 'private',
+                'module_function', 'public', 'protected', 'true', 'false', 'nil'),
+                suffix=r'\b'),
+             Keyword.Pseudo),
+            (r'(not|and|or)\b', Operator.Word),
+            (words((
+                'autoload', 'block_given', 'const_defined', 'eql', 'equal', 'frozen', 'include',
+                'instance_of', 'is_a', 'iterator', 'kind_of', 'method_defined', 'nil',
+                'private_method_defined', 'protected_method_defined',
+                'public_method_defined', 'respond_to', 'tainted'), suffix=r'\?'),
+             Name.Builtin),
+            (r'(chomp|chop|exit|gsub|sub)!', Name.Builtin),
+            (words((
+                'Array', 'Float', 'Integer', 'String', '__id__', '__send__', 'abort',
+                'ancestors', 'at_exit', 'autoload', 'binding', 'callcc', 'caller',
+                'catch', 'chomp', 'chop', 'class_eval', 'class_variables',
+                'clone', 'const_defined?', 'const_get', 'const_missing', 'const_set',
+                'constants', 'display', 'dup', 'eval', 'exec', 'exit', 'extend', 'fail', 'fork',
+                'format', 'freeze', 'getc', 'gets', 'global_variables', 'gsub',
+                'hash', 'id', 'included_modules', 'inspect', 'instance_eval',
+                'instance_method', 'instance_methods',
+                'instance_variable_get', 'instance_variable_set', 'instance_variables',
+                'lambda', 'load', 'local_variables', 'loop',
+                'method', 'method_missing', 'methods', 'module_eval', 'name',
+                'object_id', 'open', 'p', 'print', 'printf', 'private_class_method',
+                'private_instance_methods',
+                'private_methods', 'proc', 'protected_instance_methods',
+                'protected_methods', 'public_class_method',
+                'public_instance_methods', 'public_methods',
+                'putc', 'puts', 'raise', 'rand', 'readline', 'readlines', 'require',
+                'scan', 'select', 'self', 'send', 'set_trace_func', 'singleton_methods', 'sleep',
+                'split', 'sprintf', 'srand', 'sub', 'syscall', 'system', 'taint',
+                'test', 'throw', 'to_a', 'to_s', 'trace_var', 'trap', 'untaint',
+                'untrace_var', 'warn'), prefix=r'(?~!:])|'
+             r'(?<=(?:\s|;)when\s)|'
+             r'(?<=(?:\s|;)or\s)|'
+             r'(?<=(?:\s|;)and\s)|'
+             r'(?<=\.index\s)|'
+             r'(?<=\.scan\s)|'
+             r'(?<=\.sub\s)|'
+             r'(?<=\.sub!\s)|'
+             r'(?<=\.gsub\s)|'
+             r'(?<=\.gsub!\s)|'
+             r'(?<=\.match\s)|'
+             r'(?<=(?:\s|;)if\s)|'
+             r'(?<=(?:\s|;)elsif\s)|'
+             r'(?<=^when\s)|'
+             r'(?<=^index\s)|'
+             r'(?<=^scan\s)|'
+             r'(?<=^sub\s)|'
+             r'(?<=^gsub\s)|'
+             r'(?<=^sub!\s)|'
+             r'(?<=^gsub!\s)|'
+             r'(?<=^match\s)|'
+             r'(?<=^if\s)|'
+             r'(?<=^elsif\s)'
+             r')(\s*)(/)', bygroups(Text, String.Regex), 'multiline-regex'),
+            # multiline regex (in method calls or subscripts)
+            (r'(?<=\(|,|\[)/', String.Regex, 'multiline-regex'),
+            # multiline regex (this time the funny no whitespace rule)
+            (r'(\s+)(/)(?![\s=])', bygroups(Whitespace, String.Regex),
+             'multiline-regex'),
+            # lex numbers and ignore following regular expressions which
+            # are division operators in fact (grrrr. i hate that. any
+            # better ideas?)
+            # since pygments 0.7 we also eat a "?" operator after numbers
+            # so that the char operator does not work. Chars are not allowed
+            # there so that you can use the ternary operator.
+            # stupid example:
+            #   x>=0?n[x]:""
+            (r'(0_?[0-7]+(?:_[0-7]+)*)(\s*)([/?])?',
+             bygroups(Number.Oct, Whitespace, Operator)),
+            (r'(0x[0-9A-Fa-f]+(?:_[0-9A-Fa-f]+)*)(\s*)([/?])?',
+             bygroups(Number.Hex, Whitespace, Operator)),
+            (r'(0b[01]+(?:_[01]+)*)(\s*)([/?])?',
+             bygroups(Number.Bin, Whitespace, Operator)),
+            (r'([\d]+(?:_\d+)*)(\s*)([/?])?',
+             bygroups(Number.Integer, Whitespace, Operator)),
+            # Names
+            (r'@@[a-zA-Z_]\w*', Name.Variable.Class),
+            (r'@[a-zA-Z_]\w*', Name.Variable.Instance),
+            (r'\$\w+', Name.Variable.Global),
+            (r'\$[!@&`\'+~=/\\,;.<>_*$?:"^-]', Name.Variable.Global),
+            (r'\$-[0adFiIlpvw]', Name.Variable.Global),
+            (r'::', Operator),
+            include('strings'),
+            # chars
+            (r'\?(\\[MC]-)*'  # modifiers
+             r'(\\([\\abefnrstv#"\']|x[a-fA-F0-9]{1,2}|[0-7]{1,3})|\S)'
+             r'(?!\w)',
+             String.Char),
+            (r'[A-Z]\w+', Name.Constant),
+            # this is needed because ruby attributes can look
+            # like keywords (class) or like this: ` ?!?
+            (words(RUBY_OPERATORS, prefix=r'(\.|::)'),
+             bygroups(Operator, Name.Operator)),
+            (r'(\.|::)([a-zA-Z_]\w*[!?]?|[*%&^`~+\-/\[<>=])',
+             bygroups(Operator, Name)),
+            (r'[a-zA-Z_]\w*[!?]?', Name),
+            (r'(\[|\]|\*\*|<>?|>=|<=|<=>|=~|={3}|'
+             r'!~|&&?|\|\||\.{1,3})', Operator),
+            (r'[-+/*%=<>&!^|~]=?', Operator),
+            (r'[(){};,/?:\\]', Punctuation),
+            (r'\s+', Whitespace)
+        ],
+        'funcname': [
+            (r'\(', Punctuation, 'defexpr'),
+            (r'(?:([a-zA-Z_]\w*)(\.))?'  # optional scope name, like "self."
+             r'('
+                r'[a-zA-Z\u0080-\uffff][a-zA-Z0-9_\u0080-\uffff]*[!?=]?'  # method name
+                r'|!=|!~|=~|\*\*?|[-+!~]@?|[/%&|^]|<=>|<[<=]?|>[>=]?|===?'  # or operator override
+                r'|\[\]=?'  # or element reference/assignment override
+                r'|`'  # or the undocumented backtick override
+             r')',
+             bygroups(Name.Class, Operator, Name.Function), '#pop'),
+            default('#pop')
+        ],
+        'classname': [
+            (r'\(', Punctuation, 'defexpr'),
+            (r'<<', Operator, '#pop'),
+            (r'[A-Z_]\w*', Name.Class, '#pop'),
+            default('#pop')
+        ],
+        'defexpr': [
+            (r'(\))(\.|::)?', bygroups(Punctuation, Operator), '#pop'),
+            (r'\(', Operator, '#push'),
+            include('root')
+        ],
+        'in-intp': [
+            (r'\{', String.Interpol, '#push'),
+            (r'\}', String.Interpol, '#pop'),
+            include('root'),
+        ],
+        'string-intp': [
+            (r'#\{', String.Interpol, 'in-intp'),
+            (r'#@@?[a-zA-Z_]\w*', String.Interpol),
+            (r'#\$[a-zA-Z_]\w*', String.Interpol)
+        ],
+        'string-intp-escaped': [
+            include('string-intp'),
+            (r'\\([\\abefnrstv#"\']|x[a-fA-F0-9]{1,2}|[0-7]{1,3})',
+             String.Escape)
+        ],
+        'interpolated-regex': [
+            include('string-intp'),
+            (r'[\\#]', String.Regex),
+            (r'[^\\#]+', String.Regex),
+        ],
+        'interpolated-string': [
+            include('string-intp'),
+            (r'[\\#]', String.Other),
+            (r'[^\\#]+', String.Other),
+        ],
+        'multiline-regex': [
+            include('string-intp'),
+            (r'\\\\', String.Regex),
+            (r'\\/', String.Regex),
+            (r'[\\#]', String.Regex),
+            (r'[^\\/#]+', String.Regex),
+            (r'/[mixounse]*', String.Regex, '#pop'),
+        ],
+        'end-part': [
+            (r'.+', Comment.Preproc, '#pop')
+        ]
+    }
+    tokens.update(gen_rubystrings_rules())
+
+    def analyse_text(text):
+        return shebang_matches(text, r'ruby(1\.\d)?')
+
+
+class RubyConsoleLexer(Lexer):
+    """
+    For Ruby interactive console (**irb**) output.
+    """
+    name = 'Ruby irb session'
+    aliases = ['rbcon', 'irb']
+    mimetypes = ['text/x-ruby-shellsession']
+    url = 'https://www.ruby-lang.org'
+    version_added = ''
+    _example = 'rbcon/console'
+
+    _prompt_re = re.compile(r'irb\([a-zA-Z_]\w*\):\d{3}:\d+[>*"\'] '
+                            r'|>> |\?> ')
+
+    def get_tokens_unprocessed(self, text):
+        rblexer = RubyLexer(**self.options)
+
+        curcode = ''
+        insertions = []
+        for match in line_re.finditer(text):
+            line = match.group()
+            m = self._prompt_re.match(line)
+            if m is not None:
+                end = m.end()
+                insertions.append((len(curcode),
+                                   [(0, Generic.Prompt, line[:end])]))
+                curcode += line[end:]
+            else:
+                if curcode:
+                    yield from do_insertions(
+                        insertions, rblexer.get_tokens_unprocessed(curcode))
+                    curcode = ''
+                    insertions = []
+                yield match.start(), Generic.Output, line
+        if curcode:
+            yield from do_insertions(
+                insertions, rblexer.get_tokens_unprocessed(curcode))
+
+
+class FancyLexer(RegexLexer):
+    """
+    Pygments Lexer For Fancy.
+
+    Fancy is a self-hosted, pure object-oriented, dynamic,
+    class-based, concurrent general-purpose programming language
+    running on Rubinius, the Ruby VM.
+    """
+    name = 'Fancy'
+    url = 'https://github.com/bakkdoor/fancy'
+    filenames = ['*.fy', '*.fancypack']
+    aliases = ['fancy', 'fy']
+    mimetypes = ['text/x-fancysrc']
+    version_added = '1.5'
+
+    tokens = {
+        # copied from PerlLexer:
+        'balanced-regex': [
+            (r'/(\\\\|\\[^\\]|[^/\\])*/[egimosx]*', String.Regex, '#pop'),
+            (r'!(\\\\|\\[^\\]|[^!\\])*![egimosx]*', String.Regex, '#pop'),
+            (r'\\(\\\\|[^\\])*\\[egimosx]*', String.Regex, '#pop'),
+            (r'\{(\\\\|\\[^\\]|[^}\\])*\}[egimosx]*', String.Regex, '#pop'),
+            (r'<(\\\\|\\[^\\]|[^>\\])*>[egimosx]*', String.Regex, '#pop'),
+            (r'\[(\\\\|\\[^\\]|[^\]\\])*\][egimosx]*', String.Regex, '#pop'),
+            (r'\((\\\\|\\[^\\]|[^)\\])*\)[egimosx]*', String.Regex, '#pop'),
+            (r'@(\\\\|\\[^\\]|[^@\\])*@[egimosx]*', String.Regex, '#pop'),
+            (r'%(\\\\|\\[^\\]|[^%\\])*%[egimosx]*', String.Regex, '#pop'),
+            (r'\$(\\\\|\\[^\\]|[^$\\])*\$[egimosx]*', String.Regex, '#pop'),
+        ],
+        'root': [
+            (r'\s+', Whitespace),
+
+            # balanced delimiters (copied from PerlLexer):
+            (r's\{(\\\\|\\[^\\]|[^}\\])*\}\s*', String.Regex, 'balanced-regex'),
+            (r's<(\\\\|\\[^\\]|[^>\\])*>\s*', String.Regex, 'balanced-regex'),
+            (r's\[(\\\\|\\[^\\]|[^\]\\])*\]\s*', String.Regex, 'balanced-regex'),
+            (r's\((\\\\|\\[^\\]|[^)\\])*\)\s*', String.Regex, 'balanced-regex'),
+            (r'm?/(\\\\|\\[^\\]|[^///\n])*/[gcimosx]*', String.Regex),
+            (r'm(?=[/!\\{<\[(@%$])', String.Regex, 'balanced-regex'),
+
+            # Comments
+            (r'#(.*?)\n', Comment.Single),
+            # Symbols
+            (r'\'([^\'\s\[\](){}]+|\[\])', String.Symbol),
+            # Multi-line DoubleQuotedString
+            (r'"""(\\\\|\\[^\\]|[^\\])*?"""', String),
+            # DoubleQuotedString
+            (r'"(\\\\|\\[^\\]|[^"\\])*"', String),
+            # keywords
+            (r'(def|class|try|catch|finally|retry|return|return_local|match|'
+             r'case|->|=>)\b', Keyword),
+            # constants
+            (r'(self|super|nil|false|true)\b', Name.Constant),
+            (r'[(){};,/?|:\\]', Punctuation),
+            # names
+            (words((
+                'Object', 'Array', 'Hash', 'Directory', 'File', 'Class', 'String',
+                'Number', 'Enumerable', 'FancyEnumerable', 'Block', 'TrueClass',
+                'NilClass', 'FalseClass', 'Tuple', 'Symbol', 'Stack', 'Set',
+                'FancySpec', 'Method', 'Package', 'Range'), suffix=r'\b'),
+             Name.Builtin),
+            # functions
+            (r'[a-zA-Z](\w|[-+?!=*/^><%])*:', Name.Function),
+            # operators, must be below functions
+            (r'[-+*/~,<>=&!?%^\[\].$]+', Operator),
+            (r'[A-Z]\w*', Name.Constant),
+            (r'@[a-zA-Z_]\w*', Name.Variable.Instance),
+            (r'@@[a-zA-Z_]\w*', Name.Variable.Class),
+            ('@@?', Operator),
+            (r'[a-zA-Z_]\w*', Name),
+            # numbers - / checks are necessary to avoid mismarking regexes,
+            # see comment in RubyLexer
+            (r'(0[oO]?[0-7]+(?:_[0-7]+)*)(\s*)([/?])?',
+             bygroups(Number.Oct, Whitespace, Operator)),
+            (r'(0[xX][0-9A-Fa-f]+(?:_[0-9A-Fa-f]+)*)(\s*)([/?])?',
+             bygroups(Number.Hex, Whitespace, Operator)),
+            (r'(0[bB][01]+(?:_[01]+)*)(\s*)([/?])?',
+             bygroups(Number.Bin, Whitespace, Operator)),
+            (r'([\d]+(?:_\d+)*)(\s*)([/?])?',
+             bygroups(Number.Integer, Whitespace, Operator)),
+            (r'\d+([eE][+-]?[0-9]+)|\d+\.\d+([eE][+-]?[0-9]+)?', Number.Float),
+            (r'\d+', Number.Integer)
+        ]
+    }
diff --git a/local_packages/pygments/lexers/rust.py b/local_packages/pygments/lexers/rust.py
new file mode 100644
index 0000000..6c9e621
--- /dev/null
+++ b/local_packages/pygments/lexers/rust.py
@@ -0,0 +1,222 @@
+"""
+    pygments.lexers.rust
+    ~~~~~~~~~~~~~~~~~~~~
+
+    Lexers for the Rust language.
+
+    :copyright: Copyright 2006-present by the Pygments team, see AUTHORS.
+    :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, include, bygroups, words, default
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+    Number, Punctuation, Whitespace
+
+__all__ = ['RustLexer']
+
+
+class RustLexer(RegexLexer):
+    """
+    Lexer for the Rust programming language (version 1.47).
+    """
+    name = 'Rust'
+    url = 'https://www.rust-lang.org/'
+    filenames = ['*.rs', '*.rs.in']
+    aliases = ['rust', 'rs']
+    mimetypes = ['text/rust', 'text/x-rust']
+    version_added = '1.6'
+
+    keyword_types = (words((
+        'u8', 'u16', 'u32', 'u64', 'u128', 'i8', 'i16', 'i32', 'i64', 'i128',
+        'usize', 'isize', 'f32', 'f64', 'char', 'str', 'bool',
+    ), suffix=r'\b'), Keyword.Type)
+
+    builtin_funcs_types = (words((
+        'Copy', 'Send', 'Sized', 'Sync', 'Unpin',
+        'Drop', 'Fn', 'FnMut', 'FnOnce', 'drop',
+        'Box', 'ToOwned', 'Clone',
+        'PartialEq', 'PartialOrd', 'Eq', 'Ord',
+        'AsRef', 'AsMut', 'Into', 'From', 'Default',
+        'Iterator', 'Extend', 'IntoIterator', 'DoubleEndedIterator',
+        'ExactSizeIterator',
+        'Option', 'Some', 'None',
+        'Result', 'Ok', 'Err',
+        'String', 'ToString', 'Vec',
+    ), suffix=r'\b'), Name.Builtin)
+
+    builtin_macros = (words((
+        'asm', 'assert', 'assert_eq', 'assert_ne', 'cfg', 'column',
+        'compile_error', 'concat', 'concat_idents', 'dbg', 'debug_assert',
+        'debug_assert_eq', 'debug_assert_ne', 'env', 'eprint', 'eprintln',
+        'file', 'format', 'format_args', 'format_args_nl', 'global_asm',
+        'include', 'include_bytes', 'include_str',
+        'is_aarch64_feature_detected',
+        'is_arm_feature_detected',
+        'is_mips64_feature_detected',
+        'is_mips_feature_detected',
+        'is_powerpc64_feature_detected',
+        'is_powerpc_feature_detected',
+        'is_x86_feature_detected',
+        'line', 'llvm_asm', 'log_syntax', 'macro_rules', 'matches',
+        'module_path', 'option_env', 'panic', 'print', 'println', 'stringify',
+        'thread_local', 'todo', 'trace_macros', 'unimplemented', 'unreachable',
+        'vec', 'write', 'writeln',
+    ), suffix=r'!'), Name.Function.Magic)
+
+    tokens = {
+        'root': [
+            # rust allows a file to start with a shebang, but if the first line
+            # starts with #![ then it's not a shebang but a crate attribute.
+            (r'#![^[\r\n].*$', Comment.Preproc),
+            default('base'),
+        ],
+        'base': [
+            # Whitespace and Comments
+            (r'\n', Whitespace),
+            (r'\s+', Whitespace),
+            (r'//!.*?\n', String.Doc),
+            (r'///(\n|[^/].*?\n)', String.Doc),
+            (r'//(.*?)\n', Comment.Single),
+            (r'/\*\*(\n|[^/*])', String.Doc, 'doccomment'),
+            (r'/\*!', String.Doc, 'doccomment'),
+            (r'/\*', Comment.Multiline, 'comment'),
+
+            # Macro parameters
+            (r"""\$([a-zA-Z_]\w*|\(,?|\),?|,?)""", Comment.Preproc),
+            # Keywords
+            (words(('as', 'async', 'await', 'box', 'const', 'crate', 'dyn',
+                    'else', 'extern', 'for', 'if', 'impl', 'in', 'loop',
+                    'match', 'move', 'mut', 'pub', 'ref', 'return', 'static',
+                    'super', 'trait', 'unsafe', 'use', 'where', 'while'),
+                   suffix=r'\b'), Keyword),
+            (words(('abstract', 'become', 'do', 'final', 'macro', 'override',
+                    'priv', 'typeof', 'try', 'unsized', 'virtual', 'yield'),
+                   suffix=r'\b'), Keyword.Reserved),
+            (r'(true|false)\b', Keyword.Constant),
+            (r'self\b', Name.Builtin.Pseudo),
+            (r'mod\b', Keyword, 'modname'),
+            (r'let\b', Keyword.Declaration),
+            (r'fn\b', Keyword, 'funcname'),
+            (r'(struct|enum|type|union)\b', Keyword, 'typename'),
+            (r'(default)(\s+)(type|fn)\b', bygroups(Keyword, Whitespace, Keyword)),
+            keyword_types,
+            (r'[sS]elf\b', Name.Builtin.Pseudo),
+            # Prelude (taken from Rust's src/libstd/prelude.rs)
+            builtin_funcs_types,
+            builtin_macros,
+            # Path separators, so types don't catch them.
+            (r'::\b', Punctuation),
+            # Types in positions.
+            (r'(?::|->)', Punctuation, 'typename'),
+            # Labels
+            (r'(break|continue)(\b\s*)(\'[A-Za-z_]\w*)?',
+             bygroups(Keyword, Text.Whitespace, Name.Label)),
+
+            # Character literals
+            (r"""'(\\['"\\nrt]|\\x[0-7][0-9a-fA-F]|\\0"""
+             r"""|\\u\{[0-9a-fA-F]{1,6}\}|.)'""",
+             String.Char),
+            (r"""b'(\\['"\\nrt]|\\x[0-9a-fA-F]{2}|\\0"""
+             r"""|\\u\{[0-9a-fA-F]{1,6}\}|.)'""",
+             String.Char),
+
+            # Binary literals
+            (r'0b[01_]+', Number.Bin, 'number_lit'),
+            # Octal literals
+            (r'0o[0-7_]+', Number.Oct, 'number_lit'),
+            # Hexadecimal literals
+            (r'0[xX][0-9a-fA-F_]+', Number.Hex, 'number_lit'),
+            # Decimal literals
+            (r'[0-9][0-9_]*(\.[0-9_]+[eE][+\-]?[0-9_]+|'
+             r'\.[0-9_]*(?!\.)|[eE][+\-]?[0-9_]+)', Number.Float,
+             'number_lit'),
+            (r'[0-9][0-9_]*', Number.Integer, 'number_lit'),
+
+            # String literals
+            (r'b"', String, 'bytestring'),
+            (r'"', String, 'string'),
+            (r'(?s)b?r(#*)".*?"\1', String),
+
+            # Lifetime names
+            (r"'", Operator, 'lifetime'),
+
+            # Operators and Punctuation
+            (r'\.\.=?', Operator),
+            (r'[{}()\[\],.;]', Punctuation),
+            (r'[+\-*/%&|<>^!~@=:?]', Operator),
+
+            # Identifiers
+            (r'[a-zA-Z_]\w*', Name),
+            # Raw identifiers
+            (r'r#[a-zA-Z_]\w*', Name),
+
+            # Attributes
+            (r'#!?\[', Comment.Preproc, 'attribute['),
+
+            # Misc
+            # Lone hashes: not used in Rust syntax, but allowed in macro
+            # arguments, most famously for quote::quote!()
+            (r'#', Punctuation),
+        ],
+        'comment': [
+            (r'[^*/]+', Comment.Multiline),
+            (r'/\*', Comment.Multiline, '#push'),
+            (r'\*/', Comment.Multiline, '#pop'),
+            (r'[*/]', Comment.Multiline),
+        ],
+        'doccomment': [
+            (r'[^*/]+', String.Doc),
+            (r'/\*', String.Doc, '#push'),
+            (r'\*/', String.Doc, '#pop'),
+            (r'[*/]', String.Doc),
+        ],
+        'modname': [
+            (r'\s+', Whitespace),
+            (r'[a-zA-Z_]\w*', Name.Namespace, '#pop'),
+            default('#pop'),
+        ],
+        'funcname': [
+            (r'\s+', Whitespace),
+            (r'[a-zA-Z_]\w*', Name.Function, '#pop'),
+            default('#pop'),
+        ],
+        'typename': [
+            (r'\s+', Whitespace),
+            (r'&', Keyword.Pseudo),
+            (r"'", Operator, 'lifetime'),
+            builtin_funcs_types,
+            keyword_types,
+            (r'[a-zA-Z_]\w*', Name.Class, '#pop'),
+            default('#pop'),
+        ],
+        'lifetime': [
+            (r"(static|_)", Name.Builtin),
+            (r"[a-zA-Z_]+\w*", Name.Attribute),
+            default('#pop'),
+        ],
+        'number_lit': [
+            (r'[ui](8|16|32|64|size)', Keyword, '#pop'),
+            (r'f(32|64)', Keyword, '#pop'),
+            default('#pop'),
+        ],
+        'string': [
+            (r'"', String, '#pop'),
+            (r"""\\['"\\nrt]|\\x[0-7][0-9a-fA-F]|\\0"""
+             r"""|\\u\{[0-9a-fA-F]{1,6}\}""", String.Escape),
+            (r'[^\\"]+', String),
+            (r'\\', String),
+        ],
+        'bytestring': [
+            (r"""\\x[89a-fA-F][0-9a-fA-F]""", String.Escape),
+            include('string'),
+        ],
+        'attribute_common': [
+            (r'"', String, 'string'),
+            (r'\[', Comment.Preproc, 'attribute['),
+        ],
+        'attribute[': [
+            include('attribute_common'),
+            (r'\]', Comment.Preproc, '#pop'),
+            (r'[^"\]\[]+', Comment.Preproc),
+        ],
+    }
diff --git a/local_packages/pygments/lexers/sas.py b/local_packages/pygments/lexers/sas.py
new file mode 100644
index 0000000..35ee854
--- /dev/null
+++ b/local_packages/pygments/lexers/sas.py
@@ -0,0 +1,227 @@
+"""
+    pygments.lexers.sas
+    ~~~~~~~~~~~~~~~~~~~
+
+    Lexer for SAS.
+
+    :copyright: Copyright 2006-present by the Pygments team, see AUTHORS.
+    :license: BSD, see LICENSE for details.
+"""
+
+import re
+from pygments.lexer import RegexLexer, include, words
+from pygments.token import Comment, Keyword, Name, Number, String, Text, \
+    Other, Generic
+
+__all__ = ['SASLexer']
+
+
+class SASLexer(RegexLexer):
+    """
+    For SAS files.
+    """
+    # Syntax from syntax/sas.vim by James Kidd 
+
+    name      = 'SAS'
+    aliases   = ['sas']
+    filenames = ['*.SAS', '*.sas']
+    mimetypes = ['text/x-sas', 'text/sas', 'application/x-sas']
+    url = 'https://en.wikipedia.org/wiki/SAS_(software)'
+    version_added = '2.2'
+    flags     = re.IGNORECASE | re.MULTILINE
+
+    builtins_macros = (
+        "bquote", "nrbquote", "cmpres", "qcmpres", "compstor", "datatyp",
+        "display", "do", "else", "end", "eval", "global", "goto", "if",
+        "index", "input", "keydef", "label", "left", "length", "let",
+        "local", "lowcase", "macro", "mend", "nrquote",
+        "nrstr", "put", "qleft", "qlowcase", "qscan",
+        "qsubstr", "qsysfunc", "qtrim", "quote", "qupcase", "scan",
+        "str", "substr", "superq", "syscall", "sysevalf", "sysexec",
+        "sysfunc", "sysget", "syslput", "sysprod", "sysrc", "sysrput",
+        "then", "to", "trim", "unquote", "until", "upcase", "verify",
+        "while", "window"
+    )
+
+    builtins_conditionals = (
+        "do", "if", "then", "else", "end", "until", "while"
+    )
+
+    builtins_statements = (
+        "abort", "array", "attrib", "by", "call", "cards", "cards4",
+        "catname", "continue", "datalines", "datalines4", "delete", "delim",
+        "delimiter", "display", "dm", "drop", "endsas", "error", "file",
+        "filename", "footnote", "format", "goto", "in", "infile", "informat",
+        "input", "keep", "label", "leave", "length", "libname", "link",
+        "list", "lostcard", "merge", "missing", "modify", "options", "output",
+        "out", "page", "put", "redirect", "remove", "rename", "replace",
+        "retain", "return", "select", "set", "skip", "startsas", "stop",
+        "title", "update", "waitsas", "where", "window", "x", "systask"
+    )
+
+    builtins_sql = (
+        "add", "and", "alter", "as", "cascade", "check", "create",
+        "delete", "describe", "distinct", "drop", "foreign", "from",
+        "group", "having", "index", "insert", "into", "in", "key", "like",
+        "message", "modify", "msgtype", "not", "null", "on", "or",
+        "order", "primary", "references", "reset", "restrict", "select",
+        "set", "table", "unique", "update", "validate", "view", "where"
+    )
+
+    builtins_functions = (
+        "abs", "addr", "airy", "arcos", "arsin", "atan", "attrc",
+        "attrn", "band", "betainv", "blshift", "bnot", "bor",
+        "brshift", "bxor", "byte", "cdf", "ceil", "cexist", "cinv",
+        "close", "cnonct", "collate", "compbl", "compound",
+        "compress", "cos", "cosh", "css", "curobs", "cv", "daccdb",
+        "daccdbsl", "daccsl", "daccsyd", "dacctab", "dairy", "date",
+        "datejul", "datepart", "datetime", "day", "dclose", "depdb",
+        "depdbsl", "depsl", "depsyd",
+        "deptab", "dequote", "dhms", "dif", "digamma",
+        "dim", "dinfo", "dnum", "dopen", "doptname", "doptnum",
+        "dread", "dropnote", "dsname", "erf", "erfc", "exist", "exp",
+        "fappend", "fclose", "fcol", "fdelete", "fetch", "fetchobs",
+        "fexist", "fget", "fileexist", "filename", "fileref",
+        "finfo", "finv", "fipname", "fipnamel", "fipstate", "floor",
+        "fnonct", "fnote", "fopen", "foptname", "foptnum", "fpoint",
+        "fpos", "fput", "fread", "frewind", "frlen", "fsep", "fuzz",
+        "fwrite", "gaminv", "gamma", "getoption", "getvarc", "getvarn",
+        "hbound", "hms", "hosthelp", "hour", "ibessel", "index",
+        "indexc", "indexw", "input", "inputc", "inputn", "int",
+        "intck", "intnx", "intrr", "irr", "jbessel", "juldate",
+        "kurtosis", "lag", "lbound", "left", "length", "lgamma",
+        "libname", "libref", "log", "log10", "log2", "logpdf", "logpmf",
+        "logsdf", "lowcase", "max", "mdy", "mean", "min", "minute",
+        "mod", "month", "mopen", "mort", "n", "netpv", "nmiss",
+        "normal", "note", "npv", "open", "ordinal", "pathname",
+        "pdf", "peek", "peekc", "pmf", "point", "poisson", "poke",
+        "probbeta", "probbnml", "probchi", "probf", "probgam",
+        "probhypr", "probit", "probnegb", "probnorm", "probt",
+        "put", "putc", "putn", "qtr", "quote", "ranbin", "rancau",
+        "ranexp", "rangam", "range", "rank", "rannor", "ranpoi",
+        "rantbl", "rantri", "ranuni", "repeat", "resolve", "reverse",
+        "rewind", "right", "round", "saving", "scan", "sdf", "second",
+        "sign", "sin", "sinh", "skewness", "soundex", "spedis",
+        "sqrt", "std", "stderr", "stfips", "stname", "stnamel",
+        "substr", "sum", "symget", "sysget", "sysmsg", "sysprod",
+        "sysrc", "system", "tan", "tanh", "time", "timepart", "tinv",
+        "tnonct", "today", "translate", "tranwrd", "trigamma",
+        "trim", "trimn", "trunc", "uniform", "upcase", "uss", "var",
+        "varfmt", "varinfmt", "varlabel", "varlen", "varname",
+        "varnum", "varray", "varrayx", "vartype", "verify", "vformat",
+        "vformatd", "vformatdx", "vformatn", "vformatnx", "vformatw",
+        "vformatwx", "vformatx", "vinarray", "vinarrayx", "vinformat",
+        "vinformatd", "vinformatdx", "vinformatn", "vinformatnx",
+        "vinformatw", "vinformatwx", "vinformatx", "vlabel",
+        "vlabelx", "vlength", "vlengthx", "vname", "vnamex", "vtype",
+        "vtypex", "weekday", "year", "yyq", "zipfips", "zipname",
+        "zipnamel", "zipstate"
+    )
+
+    tokens = {
+        'root': [
+            include('comments'),
+            include('proc-data'),
+            include('cards-datalines'),
+            include('logs'),
+            include('general'),
+            (r'.', Text),
+        ],
+        # SAS is multi-line regardless, but * is ended by ;
+        'comments': [
+            (r'^\s*\*.*?;', Comment),
+            (r'/\*.*?\*/', Comment),
+            (r'^\s*\*(.|\n)*?;', Comment.Multiline),
+            (r'/[*](.|\n)*?[*]/', Comment.Multiline),
+        ],
+        # Special highlight for proc, data, quit, run
+        'proc-data': [
+            (r'(^|;)\s*(proc \w+|data|run|quit)[\s;]',
+             Keyword.Reserved),
+        ],
+        # Special highlight cards and datalines
+        'cards-datalines': [
+            (r'^\s*(datalines|cards)\s*;\s*$', Keyword, 'data'),
+        ],
+        'data': [
+            (r'(.|\n)*^\s*;\s*$', Other, '#pop'),
+        ],
+        # Special highlight for put NOTE|ERROR|WARNING (order matters)
+        'logs': [
+            (r'\n?^\s*%?put ', Keyword, 'log-messages'),
+        ],
+        'log-messages': [
+            (r'NOTE(:|-).*', Generic, '#pop'),
+            (r'WARNING(:|-).*', Generic.Emph, '#pop'),
+            (r'ERROR(:|-).*', Generic.Error, '#pop'),
+            include('general'),
+        ],
+        'general': [
+            include('keywords'),
+            include('vars-strings'),
+            include('special'),
+            include('numbers'),
+        ],
+        # Keywords, statements, functions, macros
+        'keywords': [
+            (words(builtins_statements,
+                   prefix = r'\b',
+                   suffix = r'\b'),
+             Keyword),
+            (words(builtins_sql,
+                   prefix = r'\b',
+                   suffix = r'\b'),
+             Keyword),
+            (words(builtins_conditionals,
+                   prefix = r'\b',
+                   suffix = r'\b'),
+             Keyword),
+            (words(builtins_macros,
+                   prefix = r'%',
+                   suffix = r'\b'),
+             Name.Builtin),
+            (words(builtins_functions,
+                   prefix = r'\b',
+                   suffix = r'\('),
+             Name.Builtin),
+        ],
+        # Strings and user-defined variables and macros (order matters)
+        'vars-strings': [
+            (r'&[a-z_]\w{0,31}\.?', Name.Variable),
+            (r'%[a-z_]\w{0,31}', Name.Function),
+            (r'\'', String, 'string_squote'),
+            (r'"', String, 'string_dquote'),
+        ],
+        'string_squote': [
+            ('\'', String, '#pop'),
+            (r'\\\\|\\"|\\\n', String.Escape),
+            # AFAIK, macro variables are not evaluated in single quotes
+            # (r'&', Name.Variable, 'validvar'),
+            (r'[^$\'\\]+', String),
+            (r'[$\'\\]', String),
+        ],
+        'string_dquote': [
+            (r'"', String, '#pop'),
+            (r'\\\\|\\"|\\\n', String.Escape),
+            (r'&', Name.Variable, 'validvar'),
+            (r'[^$&"\\]+', String),
+            (r'[$"\\]', String),
+        ],
+        'validvar': [
+            (r'[a-z_]\w{0,31}\.?', Name.Variable, '#pop'),
+        ],
+        # SAS numbers and special variables
+        'numbers': [
+            (r'\b[+-]?([0-9]+(\.[0-9]+)?|\.[0-9]+|\.)(E[+-]?[0-9]+)?i?\b',
+             Number),
+        ],
+        'special': [
+            (r'(null|missing|_all_|_automatic_|_character_|_n_|'
+             r'_infile_|_name_|_null_|_numeric_|_user_|_webout_)',
+             Keyword.Constant),
+        ],
+        # 'operators': [
+        #     (r'(-|=|<=|>=|<|>|<>|&|!=|'
+        #      r'\||\*|\+|\^|/|!|~|~=)', Operator)
+        # ],
+    }
diff --git a/local_packages/pygments/lexers/savi.py b/local_packages/pygments/lexers/savi.py
new file mode 100644
index 0000000..548a056
--- /dev/null
+++ b/local_packages/pygments/lexers/savi.py
@@ -0,0 +1,171 @@
+"""
+    pygments.lexers.savi
+    ~~~~~~~~~~~~~~~~~~~~
+
+    Lexer for Savi.
+
+    :copyright: Copyright 2006-present by the Pygments team, see AUTHORS.
+    :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, bygroups, include
+from pygments.token import Whitespace, Keyword, Name, String, Number, \
+  Operator, Punctuation, Comment, Generic, Error
+
+__all__ = ['SaviLexer']
+
+
+# The canonical version of this file can be found in the following repository,
+# where it is kept in sync with any language changes, as well as the other
+# pygments-like lexers that are maintained for use with other tools:
+# - https://github.com/savi-lang/savi/blob/main/tooling/pygments/lexers/savi.py
+#
+# If you're changing this file in the pygments repository, please ensure that
+# any changes you make are also propagated to the official Savi repository,
+# in order to avoid accidental clobbering of your changes later when an update
+# from the Savi repository flows forward into the pygments repository.
+#
+# If you're changing this file in the Savi repository, please ensure that
+# any changes you make are also reflected in the other pygments-like lexers
+# (rouge, vscode, etc) so that all of the lexers can be kept cleanly in sync.
+
+class SaviLexer(RegexLexer):
+    """
+    For Savi source code.
+
+    .. versionadded: 2.10
+    """
+
+    name = 'Savi'
+    url = 'https://github.com/savi-lang/savi'
+    aliases = ['savi']
+    filenames = ['*.savi']
+    version_added = ''
+
+    tokens = {
+      "root": [
+        # Line Comment
+        (r'//.*?$', Comment.Single),
+
+        # Doc Comment
+        (r'::.*?$', Comment.Single),
+
+        # Capability Operator
+        (r'(\')(\w+)(?=[^\'])', bygroups(Operator, Name)),
+
+        # Double-Quote String
+        (r'\w?"', String.Double, "string.double"),
+
+        # Single-Char String
+        (r"'", String.Char, "string.char"),
+
+        # Type Name
+        (r'(_?[A-Z]\w*)', Name.Class),
+
+        # Nested Type Name
+        (r'(\.)(\s*)(_?[A-Z]\w*)', bygroups(Punctuation, Whitespace, Name.Class)),
+
+        # Declare
+        (r'^([ \t]*)(:\w+)',
+          bygroups(Whitespace, Name.Tag),
+          "decl"),
+
+        # Error-Raising Calls/Names
+        (r'((\w+|\+|\-|\*)\!)', Generic.Deleted),
+
+        # Numeric Values
+        (r'\b\d([\d_]*(\.[\d_]+)?)\b', Number),
+
+        # Hex Numeric Values
+        (r'\b0x([0-9a-fA-F_]+)\b', Number.Hex),
+
+        # Binary Numeric Values
+        (r'\b0b([01_]+)\b', Number.Bin),
+
+        # Function Call (with braces)
+        (r'\w+(?=\()', Name.Function),
+
+        # Function Call (with receiver)
+        (r'(\.)(\s*)(\w+)', bygroups(Punctuation, Whitespace, Name.Function)),
+
+        # Function Call (with self receiver)
+        (r'(@)(\w+)', bygroups(Punctuation, Name.Function)),
+
+        # Parenthesis
+        (r'\(', Punctuation, "root"),
+        (r'\)', Punctuation, "#pop"),
+
+        # Brace
+        (r'\{', Punctuation, "root"),
+        (r'\}', Punctuation, "#pop"),
+
+        # Bracket
+        (r'\[', Punctuation, "root"),
+        (r'(\])(\!)', bygroups(Punctuation, Generic.Deleted), "#pop"),
+        (r'\]', Punctuation, "#pop"),
+
+        # Punctuation
+        (r'[,;:\.@]', Punctuation),
+
+        # Piping Operators
+        (r'(\|\>)', Operator),
+
+        # Branching Operators
+        (r'(\&\&|\|\||\?\?|\&\?|\|\?|\.\?)', Operator),
+
+        # Comparison Operators
+        (r'(\<\=\>|\=\~|\=\=|\<\=|\>\=|\<|\>)', Operator),
+
+        # Arithmetic Operators
+        (r'(\+|\-|\/|\*|\%)', Operator),
+
+        # Assignment Operators
+        (r'(\=)', Operator),
+
+        # Other Operators
+        (r'(\!|\<\<|\<|\&|\|)', Operator),
+
+        # Identifiers
+        (r'\b\w+\b', Name),
+
+        # Whitespace
+        (r'[ \t\r]+\n*|\n+', Whitespace),
+      ],
+
+      # Declare (nested rules)
+      "decl": [
+        (r'\b[a-z_]\w*\b(?!\!)', Keyword.Declaration),
+        (r':', Punctuation, "#pop"),
+        (r'\n', Whitespace, "#pop"),
+        include("root"),
+      ],
+
+      # Double-Quote String (nested rules)
+      "string.double": [
+        (r'\\\(', String.Interpol, "string.interpolation"),
+        (r'\\u[0-9a-fA-F]{4}', String.Escape),
+        (r'\\x[0-9a-fA-F]{2}', String.Escape),
+        (r'\\[bfnrt\\\']', String.Escape),
+        (r'\\"', String.Escape),
+        (r'"', String.Double, "#pop"),
+        (r'[^\\"]+', String.Double),
+        (r'.', Error),
+      ],
+
+      # Single-Char String (nested rules)
+      "string.char": [
+        (r'\\u[0-9a-fA-F]{4}', String.Escape),
+        (r'\\x[0-9a-fA-F]{2}', String.Escape),
+        (r'\\[bfnrt\\\']', String.Escape),
+        (r"\\'", String.Escape),
+        (r"'", String.Char, "#pop"),
+        (r"[^\\']+", String.Char),
+        (r'.', Error),
+      ],
+
+      # Interpolation inside String (nested rules)
+      "string.interpolation": [
+        (r"\)", String.Interpol, "#pop"),
+        include("root"),
+      ]
+    }
diff --git a/local_packages/pygments/lexers/scdoc.py b/local_packages/pygments/lexers/scdoc.py
new file mode 100644
index 0000000..9b2e01d
--- /dev/null
+++ b/local_packages/pygments/lexers/scdoc.py
@@ -0,0 +1,85 @@
+"""
+    pygments.lexers.scdoc
+    ~~~~~~~~~~~~~~~~~~~~~
+
+    Lexer for scdoc, a simple man page generator.
+
+    :copyright: Copyright 2006-present by the Pygments team, see AUTHORS.
+    :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import RegexLexer, include, bygroups, using, this
+from pygments.token import Text, Comment, Keyword, String, Generic
+
+__all__ = ['ScdocLexer']
+
+
+class ScdocLexer(RegexLexer):
+    """
+    `scdoc` is a simple man page generator for POSIX systems written in C99.
+    """
+    name = 'scdoc'
+    url = 'https://git.sr.ht/~sircmpwn/scdoc'
+    aliases = ['scdoc', 'scd']
+    filenames = ['*.scd', '*.scdoc']
+    version_added = '2.5'
+    flags = re.MULTILINE
+
+    tokens = {
+        'root': [
+            # comment
+            (r'^(;.+\n)', bygroups(Comment)),
+
+            # heading with pound prefix
+            (r'^(#)([^#].+\n)', bygroups(Generic.Heading, Text)),
+            (r'^(#{2})(.+\n)', bygroups(Generic.Subheading, Text)),
+            # bulleted lists
+            (r'^(\s*)([*-])(\s)(.+\n)',
+            bygroups(Text, Keyword, Text, using(this, state='inline'))),
+            # numbered lists
+            (r'^(\s*)(\.+\.)( .+\n)',
+            bygroups(Text, Keyword, using(this, state='inline'))),
+            # quote
+            (r'^(\s*>\s)(.+\n)', bygroups(Keyword, Generic.Emph)),
+            # text block
+            (r'^(```\n)([\w\W]*?)(^```$)', bygroups(String, Text, String)),
+
+            include('inline'),
+        ],
+        'inline': [
+            # escape
+            (r'\\.', Text),
+            # underlines
+            (r'(\s)(_[^_]+_)(\W|\n)', bygroups(Text, Generic.Emph, Text)),
+            # bold
+            (r'(\s)(\*[^*]+\*)(\W|\n)', bygroups(Text, Generic.Strong, Text)),
+            # inline code
+            (r'`[^`]+`', String.Backtick),
+
+            # general text, must come last!
+            (r'[^\\\s]+', Text),
+            (r'.', Text),
+        ],
+    }
+
+    def analyse_text(text):
+        """We checks for bold and underline text with * and _. Also
+        every scdoc file must start with a strictly defined first line."""
+        result = 0
+
+        if '*' in text:
+            result += 0.01
+
+        if '_' in text:
+            result += 0.01
+
+        # name(section) ["left_footer" ["center_header"]]
+        first_line = text.partition('\n')[0]
+        scdoc_preamble_pattern = r'^.*\([1-7]\)( "[^"]+"){0,2}$'
+
+        if re.search(scdoc_preamble_pattern, first_line):
+            result += 0.5
+
+        return result
diff --git a/local_packages/pygments/lexers/scripting.py b/local_packages/pygments/lexers/scripting.py
new file mode 100644
index 0000000..2adb7bb
--- /dev/null
+++ b/local_packages/pygments/lexers/scripting.py
@@ -0,0 +1,1638 @@
+"""
+    pygments.lexers.scripting
+    ~~~~~~~~~~~~~~~~~~~~~~~~~
+
+    Lexer for scripting and embedded languages.
+
+    :copyright: Copyright 2006-present by the Pygments team, see AUTHORS.
+    :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import RegexLexer, include, bygroups, default, combined, \
+    words
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+    Number, Punctuation, Error, Whitespace, Other
+from pygments.util import get_bool_opt, get_list_opt
+
+__all__ = ['LuaLexer', 'LuauLexer', 'MoonScriptLexer', 'ChaiscriptLexer', 'LSLLexer',
+           'AppleScriptLexer', 'RexxLexer', 'MOOCodeLexer', 'HybrisLexer',
+           'EasytrieveLexer', 'JclLexer', 'MiniScriptLexer']
+
+
+def all_lua_builtins():
+    from pygments.lexers._lua_builtins import MODULES
+    return [w for values in MODULES.values() for w in values]
+
+class LuaLexer(RegexLexer):
+    """
+    For Lua source code.
+
+    Additional options accepted:
+
+    `func_name_highlighting`
+        If given and ``True``, highlight builtin function names
+        (default: ``True``).
+    `disabled_modules`
+        If given, must be a list of module names whose function names
+        should not be highlighted. By default all modules are highlighted.
+
+        To get a list of allowed modules have a look into the
+        `_lua_builtins` module:
+
+        .. sourcecode:: pycon
+
+            >>> from pygments.lexers._lua_builtins import MODULES
+            >>> MODULES.keys()
+            ['string', 'coroutine', 'modules', 'io', 'basic', ...]
+    """
+
+    name = 'Lua'
+    url = 'https://www.lua.org/'
+    aliases = ['lua']
+    filenames = ['*.lua', '*.wlua']
+    mimetypes = ['text/x-lua', 'application/x-lua']
+    version_added = ''
+
+    _comment_multiline = r'(?:--\[(?P=*)\[[\w\W]*?\](?P=level)\])'
+    _comment_single = r'(?:--.*$)'
+    _space = r'(?:\s+(?!\s))'
+    _s = rf'(?:{_comment_multiline}|{_comment_single}|{_space})'
+    # A lookahead-safe version of _s that avoids catastrophic backtracking.
+    # The _comment_multiline pattern contains [\w\W]*? which, when used
+    # inside a lookahead with a * quantifier, causes exponential blowup.
+    # This version skips only whitespace; comments between an identifier
+    # and a following [.:] or ( are rare enough to sacrifice.
+    _s_la = r'\s'
+    _name = r'(?:[^\W\d]\w*)'
+
+    tokens = {
+        'root': [
+            # Lua allows a file to start with a shebang.
+            (r'#!.*', Comment.Preproc),
+            default('base'),
+        ],
+        'ws': [
+            (_comment_multiline, Comment.Multiline),
+            (_comment_single, Comment.Single),
+            (_space, Whitespace),
+        ],
+        'base': [
+            include('ws'),
+
+            (r'(?i)0x[\da-f]*(\.[\da-f]*)?(p[+-]?\d+)?', Number.Hex),
+            (r'(?i)(\d*\.\d+|\d+\.\d*)(e[+-]?\d+)?', Number.Float),
+            (r'(?i)\d+e[+-]?\d+', Number.Float),
+            (r'\d+', Number.Integer),
+
+            # multiline strings
+            (r'(?s)\[(=*)\[.*?\]\1\]', String),
+
+            (r'::', Punctuation, 'label'),
+            (r'\.{3}', Punctuation),
+            (r'[=<>|~&+\-*/%#^]+|\.\.', Operator),
+            (r'[\[\]{}().,:;]+', Punctuation),
+            (r'(and|or|not)\b', Operator.Word),
+
+            (words([
+                'break', 'do', 'else', 'elseif', 'end', 'for', 'if', 'in',
+                'repeat', 'return', 'then', 'until', 'while'
+            ], suffix=r'\b'), Keyword.Reserved),
+            (r'goto\b', Keyword.Reserved, 'goto'),
+            (r'(local)\b', Keyword.Declaration),
+            (r'(true|false|nil)\b', Keyword.Constant),
+
+            (r'(function)\b', Keyword.Reserved, 'funcname'),
+
+            (words(all_lua_builtins(), suffix=r"\b"), Name.Builtin),
+            (fr'[A-Za-z_]\w*(?={_s_la}*[.:])', Name.Variable, 'varname'),
+            (fr'[A-Za-z_]\w*(?={_s_la}*\()', Name.Function),
+            (r'[A-Za-z_]\w*', Name.Variable),
+
+            ("'", String.Single, combined('stringescape', 'sqs')),
+            ('"', String.Double, combined('stringescape', 'dqs'))
+        ],
+
+        'varname': [
+            include('ws'),
+            (r'\.\.', Operator, '#pop'),
+            (r'[.:]', Punctuation),
+            (rf'{_name}(?={_s_la}*[.:])', Name.Property),
+            (rf'{_name}(?={_s_la}*\()', Name.Function, '#pop'),
+            (_name, Name.Property, '#pop'),
+        ],
+
+        'funcname': [
+            include('ws'),
+            (r'[.:]', Punctuation),
+            (rf'{_name}(?={_s_la}*[.:])', Name.Class),
+            (_name, Name.Function, '#pop'),
+            # inline function
+            (r'\(', Punctuation, '#pop'),
+        ],
+
+        'goto': [
+            include('ws'),
+            (_name, Name.Label, '#pop'),
+        ],
+
+        'label': [
+            include('ws'),
+            (r'::', Punctuation, '#pop'),
+            (_name, Name.Label),
+        ],
+
+        'stringescape': [
+            (r'\\([abfnrtv\\"\']|[\r\n]{1,2}|z\s*|x[0-9a-fA-F]{2}|\d{1,3}|'
+             r'u\{[0-9a-fA-F]+\})', String.Escape),
+        ],
+
+        'sqs': [
+            (r"'", String.Single, '#pop'),
+            (r"[^\\']+", String.Single),
+        ],
+
+        'dqs': [
+            (r'"', String.Double, '#pop'),
+            (r'[^\\"]+', String.Double),
+        ]
+    }
+
+    def __init__(self, **options):
+        self.func_name_highlighting = get_bool_opt(
+            options, 'func_name_highlighting', True)
+        self.disabled_modules = get_list_opt(options, 'disabled_modules', [])
+
+        self._functions = set()
+        if self.func_name_highlighting:
+            from pygments.lexers._lua_builtins import MODULES
+            for mod, func in MODULES.items():
+                if mod not in self.disabled_modules:
+                    self._functions.update(func)
+        RegexLexer.__init__(self, **options)
+
+    def get_tokens_unprocessed(self, text):
+        for index, token, value in \
+                RegexLexer.get_tokens_unprocessed(self, text):
+            if token is Name.Builtin and value not in self._functions:
+                if '.' in value:
+                    a, b = value.split('.')
+                    yield index, Name, a
+                    yield index + len(a), Punctuation, '.'
+                    yield index + len(a) + 1, Name, b
+                else:
+                    yield index, Name, value
+                continue
+            yield index, token, value
+
+def _luau_make_expression(should_pop, _s, _s_la):
+    temp_list = [
+        (r'0[xX][\da-fA-F_]*', Number.Hex, '#pop'),
+        (r'0[bB][\d_]*', Number.Bin, '#pop'),
+        (r'\.?\d[\d_]*(?:\.[\d_]*)?(?:[eE][+-]?[\d_]+)?', Number.Float, '#pop'),
+
+        (words((
+            'true', 'false', 'nil'
+        ), suffix=r'\b'), Keyword.Constant, '#pop'),
+
+        (r'\[(=*)\[[.\n]*?\]\1\]', String, '#pop'),
+
+        (r'(\.)([a-zA-Z_]\w*)(?=%s*[({"\'])', bygroups(Punctuation, Name.Function), '#pop'),
+        (r'(\.)([a-zA-Z_]\w*)', bygroups(Punctuation, Name.Variable), '#pop'),
+
+        (rf'[a-zA-Z_]\w*(?:\.[a-zA-Z_]\w*)*(?={_s_la}*[({{"\'])', Name.Other, '#pop'),
+        (r'[a-zA-Z_]\w*(?:\.[a-zA-Z_]\w*)*', Name, '#pop'),
+    ]
+    if should_pop:
+        return temp_list
+    return [entry[:2] for entry in temp_list]
+
+def _luau_make_expression_special(should_pop):
+    temp_list = [
+        (r'\{', Punctuation, ('#pop', 'closing_brace_base', 'expression')),
+        (r'\(', Punctuation, ('#pop', 'closing_parenthesis_base', 'expression')),
+
+        (r'::?', Punctuation, ('#pop', 'type_end', 'type_start')),
+
+        (r"'", String.Single, ('#pop', 'string_single')),
+        (r'"', String.Double, ('#pop', 'string_double')),
+        (r'`', String.Backtick, ('#pop', 'string_interpolated')),
+    ]
+    if should_pop:
+        return temp_list
+    return [(entry[0], entry[1], entry[2][1:]) for entry in temp_list]
+
+class LuauLexer(RegexLexer):
+    """
+    For Luau source code.
+
+    Additional options accepted:
+
+    `include_luau_builtins`
+        If given and ``True``, automatically highlight Luau builtins
+        (default: ``True``).
+    `include_roblox_builtins`
+        If given and ``True``, automatically highlight Roblox-specific builtins
+        (default: ``False``).
+    `additional_builtins`
+        If given, must be a list of additional builtins to highlight.
+    `disabled_builtins`
+        If given, must be a list of builtins that will not be highlighted.
+    """
+
+    name = 'Luau'
+    url = 'https://luau-lang.org/'
+    aliases = ['luau']
+    filenames = ['*.luau']
+    version_added = '2.18'
+
+    _comment_multiline = r'(?:--\[(?P=*)\[[\w\W]*?\](?P=level)\])'
+    _comment_single = r'(?:--.*$)'
+    _s = r'(?:{}|{}|{})'.format(_comment_multiline, _comment_single, r'\s+')
+    # Lookahead-safe version — avoids catastrophic backtracking from
+    # [\w\W]*? inside _comment_multiline when combined with * quantifier.
+    _s_la = r'\s'
+
+    tokens = {
+        'root': [
+            (r'#!.*', Comment.Hashbang, 'base'),
+            default('base'),
+        ],
+
+        'ws': [
+            (_comment_multiline, Comment.Multiline),
+            (_comment_single, Comment.Single),
+            (r'\s+', Whitespace),
+        ],
+
+        'base': [
+            include('ws'),
+
+            *_luau_make_expression_special(False),
+            (r'\.\.\.', Punctuation),
+
+            (rf'type\b(?={_s}+[a-zA-Z_])', Keyword.Reserved, 'type_declaration'),
+            (rf'export\b(?={_s}+[a-zA-Z_])', Keyword.Reserved),
+
+            (r'(?:\.\.|//|[+\-*\/%^<>=])=?', Operator, 'expression'),
+            (r'~=', Operator, 'expression'),
+
+            (words((
+                'and', 'or', 'not'
+            ), suffix=r'\b'), Operator.Word, 'expression'),
+
+            (words((
+                'elseif', 'for', 'if', 'in', 'repeat', 'return', 'until',
+                'while'), suffix=r'\b'), Keyword.Reserved, 'expression'),
+            (r'local\b', Keyword.Declaration, 'expression'),
+
+            (r'function\b', Keyword.Reserved, ('expression', 'func_name')),
+
+            (r'[\])};]+', Punctuation),
+
+            include('expression_static'),
+            *_luau_make_expression(False, _s, _s_la),
+
+            (r'[\[.,]', Punctuation, 'expression'),
+        ],
+        'expression_static': [
+            (words((
+                'break', 'continue', 'do', 'else', 'elseif', 'end', 'for',
+                'if', 'in', 'repeat', 'return', 'then', 'until', 'while'),
+                suffix=r'\b'), Keyword.Reserved),
+        ],
+        'expression': [
+            include('ws'),
+
+            (r'if\b', Keyword.Reserved, ('ternary', 'expression')),
+
+            (r'local\b', Keyword.Declaration),
+            *_luau_make_expression_special(True),
+            (r'\.\.\.', Punctuation, '#pop'),
+
+            (r'function\b', Keyword.Reserved, 'func_name'),
+
+            include('expression_static'),
+            *_luau_make_expression(True, _s, _s_la),
+
+            default('#pop'),
+        ],
+        'ternary': [
+            include('ws'),
+
+            (r'else\b', Keyword.Reserved, '#pop'),
+            (words((
+                'then', 'elseif',
+            ), suffix=r'\b'), Operator.Reserved, 'expression'),
+
+            default('#pop'),
+        ],
+
+        'closing_brace_pop': [
+            (r'\}', Punctuation, '#pop'),
+        ],
+        'closing_parenthesis_pop': [
+            (r'\)', Punctuation, '#pop'),
+        ],
+        'closing_gt_pop': [
+            (r'>', Punctuation, '#pop'),
+        ],
+
+        'closing_parenthesis_base': [
+            include('closing_parenthesis_pop'),
+            include('base'),
+        ],
+        'closing_parenthesis_type': [
+            include('closing_parenthesis_pop'),
+            include('type'),
+        ],
+        'closing_brace_base': [
+            include('closing_brace_pop'),
+            include('base'),
+        ],
+        'closing_brace_type': [
+            include('closing_brace_pop'),
+            include('type'),
+        ],
+        'closing_gt_type': [
+            include('closing_gt_pop'),
+            include('type'),
+        ],
+
+        'string_escape': [
+            (r'\\z\s*', String.Escape),
+            (r'\\(?:[abfnrtvz\\"\'`\{\n])|[\r\n]{1,2}|x[\da-fA-F]{2}|\d{1,3}|'
+             r'u\{\}[\da-fA-F]*\}', String.Escape),
+        ],
+        'string_single': [
+            include('string_escape'),
+
+            (r"'", String.Single, "#pop"),
+            (r"[^\\']+", String.Single),
+        ],
+        'string_double': [
+            include('string_escape'),
+
+            (r'"', String.Double, "#pop"),
+            (r'[^\\"]+', String.Double),
+        ],
+        'string_interpolated': [
+            include('string_escape'),
+
+            (r'\{', Punctuation, ('closing_brace_base', 'expression')),
+
+            (r'`', String.Backtick, "#pop"),
+            (r'[^\\`\{]+', String.Backtick),
+        ],
+
+        'func_name': [
+            include('ws'),
+
+            (r'[.:]', Punctuation),
+            (rf'[a-zA-Z_]\w*(?={_s_la}*[.:])', Name.Class),
+            (r'[a-zA-Z_]\w*', Name.Function),
+
+            (r'<', Punctuation, 'closing_gt_type'),
+
+            (r'\(', Punctuation, '#pop'),
+        ],
+
+        'type': [
+            include('ws'),
+
+            (r'\(', Punctuation, 'closing_parenthesis_type'),
+            (r'\{', Punctuation, 'closing_brace_type'),
+            (r'<', Punctuation, 'closing_gt_type'),
+
+            (r"'", String.Single, 'string_single'),
+            (r'"', String.Double, 'string_double'),
+
+            (r'[|&\.,\[\]:=]+', Punctuation),
+            (r'->', Punctuation),
+
+            (r'typeof\(', Name.Builtin, ('closing_parenthesis_base',
+                                         'expression')),
+            (r'[a-zA-Z_]\w*', Name.Class),
+        ],
+        'type_start': [
+            include('ws'),
+
+            (r'\(', Punctuation, ('#pop', 'closing_parenthesis_type')),
+            (r'\{', Punctuation, ('#pop', 'closing_brace_type')),
+            (r'<', Punctuation, ('#pop', 'closing_gt_type')),
+
+            (r"'", String.Single, ('#pop', 'string_single')),
+            (r'"', String.Double, ('#pop', 'string_double')),
+
+            (r'typeof\(', Name.Builtin, ('#pop', 'closing_parenthesis_base',
+                                         'expression')),
+            (r'[a-zA-Z_]\w*', Name.Class, '#pop'),
+        ],
+        'type_end': [
+            include('ws'),
+
+            (r'[|&\.]', Punctuation, 'type_start'),
+            (r'->', Punctuation, 'type_start'),
+
+            (r'<', Punctuation, 'closing_gt_type'),
+
+            default('#pop'),
+        ],
+        'type_declaration': [
+            include('ws'),
+
+            (r'[a-zA-Z_]\w*', Name.Class),
+            (r'<', Punctuation, 'closing_gt_type'),
+
+            (r'=', Punctuation, ('#pop', 'type_end', 'type_start')),
+        ],
+    }
+
+    def __init__(self, **options):
+        self.include_luau_builtins = get_bool_opt(
+            options, 'include_luau_builtins', True)
+        self.include_roblox_builtins = get_bool_opt(
+            options, 'include_roblox_builtins', False)
+        self.additional_builtins = get_list_opt(options, 'additional_builtins', [])
+        self.disabled_builtins = get_list_opt(options, 'disabled_builtins', [])
+
+        self._builtins = set(self.additional_builtins)
+        if self.include_luau_builtins:
+            from pygments.lexers._luau_builtins import LUAU_BUILTINS
+            self._builtins.update(LUAU_BUILTINS)
+        if self.include_roblox_builtins:
+            from pygments.lexers._luau_builtins import ROBLOX_BUILTINS
+            self._builtins.update(ROBLOX_BUILTINS)
+        if self.additional_builtins:
+            self._builtins.update(self.additional_builtins)
+        self._builtins.difference_update(self.disabled_builtins)
+
+        RegexLexer.__init__(self, **options)
+
+    def get_tokens_unprocessed(self, text):
+        for index, token, value in \
+                RegexLexer.get_tokens_unprocessed(self, text):
+            if token is Name or token is Name.Other:
+                split_value = value.split('.')
+                complete_value = []
+                new_index = index
+                for position in range(len(split_value), 0, -1):
+                    potential_string = '.'.join(split_value[:position])
+                    if potential_string in self._builtins:
+                        yield index, Name.Builtin, potential_string
+                        new_index += len(potential_string)
+
+                        if complete_value:
+                            yield new_index, Punctuation, '.'
+                            new_index += 1
+                        break
+                    complete_value.insert(0, split_value[position - 1])
+
+                for position, substring in enumerate(complete_value):
+                    if position + 1 == len(complete_value):
+                        if token is Name:
+                            yield new_index, Name.Variable, substring
+                            continue
+                        yield new_index, Name.Function, substring
+                        continue
+                    yield new_index, Name.Variable, substring
+                    new_index += len(substring)
+                    yield new_index, Punctuation, '.'
+                    new_index += 1
+
+                continue
+            yield index, token, value
+
+class MoonScriptLexer(LuaLexer):
+    """
+    For MoonScript source code.
+    """
+
+    name = 'MoonScript'
+    url = 'http://moonscript.org'
+    aliases = ['moonscript', 'moon']
+    filenames = ['*.moon']
+    mimetypes = ['text/x-moonscript', 'application/x-moonscript']
+    version_added = '1.5'
+
+    tokens = {
+        'root': [
+            (r'#!(.*?)$', Comment.Preproc),
+            default('base'),
+        ],
+        'base': [
+            ('--.*$', Comment.Single),
+            (r'(?i)(\d*\.\d+|\d+\.\d*)(e[+-]?\d+)?', Number.Float),
+            (r'(?i)\d+e[+-]?\d+', Number.Float),
+            (r'(?i)0x[0-9a-f]*', Number.Hex),
+            (r'\d+', Number.Integer),
+            (r'\n', Whitespace),
+            (r'[^\S\n]+', Text),
+            (r'(?s)\[(=*)\[.*?\]\1\]', String),
+            (r'(->|=>)', Name.Function),
+            (r':[a-zA-Z_]\w*', Name.Variable),
+            (r'(==|!=|~=|<=|>=|\.\.\.|\.\.|[=+\-*/%^<>#!.\\:])', Operator),
+            (r'[;,]', Punctuation),
+            (r'[\[\]{}()]', Keyword.Type),
+            (r'[a-zA-Z_]\w*:', Name.Variable),
+            (words((
+                'class', 'extends', 'if', 'then', 'super', 'do', 'with',
+                'import', 'export', 'while', 'elseif', 'return', 'for', 'in',
+                'from', 'when', 'using', 'else', 'and', 'or', 'not', 'switch',
+                'break'), suffix=r'\b'),
+             Keyword),
+            (r'(true|false|nil)\b', Keyword.Constant),
+            (r'(and|or|not)\b', Operator.Word),
+            (r'(self)\b', Name.Builtin.Pseudo),
+            (r'@@?([a-zA-Z_]\w*)?', Name.Variable.Class),
+            (r'[A-Z]\w*', Name.Class),  # proper name
+            (words(all_lua_builtins(), suffix=r"\b"), Name.Builtin),
+            (r'[A-Za-z_]\w*', Name),
+            ("'", String.Single, combined('stringescape', 'sqs')),
+            ('"', String.Double, combined('stringescape', 'dqs'))
+        ],
+        'stringescape': [
+            (r'''\\([abfnrtv\\"']|\d{1,3})''', String.Escape)
+        ],
+        'strings': [
+            (r'[^#\\\'"]+', String),
+            # note that strings are multi-line.
+            # hashmarks, quotes and backslashes must be parsed one at a time
+        ],
+        'interpoling_string': [
+            (r'\}', String.Interpol, "#pop"),
+            include('base')
+        ],
+        'dqs': [
+            (r'"', String.Double, '#pop'),
+            (r'\\.|\'', String),  # double-quoted string don't need ' escapes
+            (r'#\{', String.Interpol, "interpoling_string"),
+            (r'#', String),
+            include('strings')
+        ],
+        'sqs': [
+            (r"'", String.Single, '#pop'),
+            (r'#|\\.|"', String),  # single quoted strings don't need " escapses
+            include('strings')
+        ]
+    }
+
+    def get_tokens_unprocessed(self, text):
+        # set . as Operator instead of Punctuation
+        for index, token, value in LuaLexer.get_tokens_unprocessed(self, text):
+            if token == Punctuation and value == ".":
+                token = Operator
+            yield index, token, value
+
+
+class ChaiscriptLexer(RegexLexer):
+    """
+    For ChaiScript source code.
+    """
+
+    name = 'ChaiScript'
+    url = 'http://chaiscript.com/'
+    aliases = ['chaiscript', 'chai']
+    filenames = ['*.chai']
+    mimetypes = ['text/x-chaiscript', 'application/x-chaiscript']
+    version_added = '2.0'
+
+    flags = re.DOTALL | re.MULTILINE
+
+    tokens = {
+        'commentsandwhitespace': [
+            (r'\s+', Text),
+            (r'//.*?\n', Comment.Single),
+            (r'/\*.*?\*/', Comment.Multiline),
+            (r'^\#.*?\n', Comment.Single)
+        ],
+        'slashstartsregex': [
+            include('commentsandwhitespace'),
+            (r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
+             r'([gim]+\b|\B)', String.Regex, '#pop'),
+            (r'(?=/)', Text, ('#pop', 'badregex')),
+            default('#pop')
+        ],
+        'badregex': [
+            (r'\n', Text, '#pop')
+        ],
+        'root': [
+            include('commentsandwhitespace'),
+            (r'\n', Text),
+            (r'[^\S\n]+', Text),
+            (r'\+\+|--|~|&&|\?|:|\|\||\\(?=\n)|\.\.'
+             r'(<<|>>>?|==?|!=?|[-<>+*%&|^/])=?', Operator, 'slashstartsregex'),
+            (r'[{(\[;,]', Punctuation, 'slashstartsregex'),
+            (r'[})\].]', Punctuation),
+            (r'[=+\-*/]', Operator),
+            (r'(for|in|while|do|break|return|continue|if|else|'
+             r'throw|try|catch'
+             r')\b', Keyword, 'slashstartsregex'),
+            (r'(var)\b', Keyword.Declaration, 'slashstartsregex'),
+            (r'(attr|def|fun)\b', Keyword.Reserved),
+            (r'(true|false)\b', Keyword.Constant),
+            (r'(eval|throw)\b', Name.Builtin),
+            (r'`\S+`', Name.Builtin),
+            (r'[$a-zA-Z_]\w*', Name.Other),
+            (r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
+            (r'0x[0-9a-fA-F]+', Number.Hex),
+            (r'[0-9]+', Number.Integer),
+            (r'"', String.Double, 'dqstring'),
+            (r"'(\\\\|\\[^\\]|[^'\\])*'", String.Single),
+        ],
+        'dqstring': [
+            (r'\$\{[^"}]+?\}', String.Interpol),
+            (r'\$', String.Double),
+            (r'\\\\', String.Double),
+            (r'\\"', String.Double),
+            (r'[^\\"$]+', String.Double),
+            (r'"', String.Double, '#pop'),
+        ],
+    }
+
+
+class LSLLexer(RegexLexer):
+    """
+    For Second Life's Linden Scripting Language source code.
+    """
+
+    name = 'LSL'
+    aliases = ['lsl']
+    filenames = ['*.lsl']
+    mimetypes = ['text/x-lsl']
+    url = 'https://wiki.secondlife.com/wiki/Linden_Scripting_Language'
+    version_added = '2.0'
+
+    flags = re.MULTILINE
+
+    lsl_keywords = r'\b(?:do|else|for|if|jump|return|while)\b'
+    lsl_types = r'\b(?:float|integer|key|list|quaternion|rotation|string|vector)\b'
+    lsl_states = r'\b(?:(?:state)\s+\w+|default)\b'
+    lsl_events = r'\b(?:state_(?:entry|exit)|touch(?:_(?:start|end))?|(?:land_)?collision(?:_(?:start|end))?|timer|listen|(?:no_)?sensor|control|(?:not_)?at_(?:rot_)?target|money|email|run_time_permissions|changed|attach|dataserver|moving_(?:start|end)|link_message|(?:on|object)_rez|remote_data|http_re(?:sponse|quest)|path_update|transaction_result)\b'
+    lsl_functions_builtin = r'\b(?:ll(?:ReturnObjectsBy(?:ID|Owner)|Json(?:2List|[GS]etValue|ValueType)|Sin|Cos|Tan|Atan2|Sqrt|Pow|Abs|Fabs|Frand|Floor|Ceil|Round|Vec(?:Mag|Norm|Dist)|Rot(?:Between|2(?:Euler|Fwd|Left|Up))|(?:Euler|Axes)2Rot|Whisper|(?:Region|Owner)?Say|Shout|Listen(?:Control|Remove)?|Sensor(?:Repeat|Remove)?|Detected(?:Name|Key|Owner|Type|Pos|Vel|Grab|Rot|Group|LinkNumber)|Die|Ground|Wind|(?:[GS]et)(?:AnimationOverride|MemoryLimit|PrimMediaParams|ParcelMusicURL|Object(?:Desc|Name)|PhysicsMaterial|Status|Scale|Color|Alpha|Texture|Pos|Rot|Force|Torque)|ResetAnimationOverride|(?:Scale|Offset|Rotate)Texture|(?:Rot)?Target(?:Remove)?|(?:Stop)?MoveToTarget|Apply(?:Rotational)?Impulse|Set(?:KeyframedMotion|ContentType|RegionPos|(?:Angular)?Velocity|Buoyancy|HoverHeight|ForceAndTorque|TimerEvent|ScriptState|Damage|TextureAnim|Sound(?:Queueing|Radius)|Vehicle(?:Type|(?:Float|Vector|Rotation)Param)|(?:Touch|Sit)?Text|Camera(?:Eye|At)Offset|PrimitiveParams|ClickAction|Link(?:Alpha|Color|PrimitiveParams(?:Fast)?|Texture(?:Anim)?|Camera|Media)|RemoteScriptAccessPin|PayPrice|LocalRot)|ScaleByFactor|Get(?:(?:Max|Min)ScaleFactor|ClosestNavPoint|StaticPath|SimStats|Env|PrimitiveParams|Link(?:PrimitiveParams|Number(?:OfSides)?|Key|Name|Media)|HTTPHeader|FreeURLs|Object(?:Details|PermMask|PrimCount)|Parcel(?:MaxPrims|Details|Prim(?:Count|Owners))|Attached|(?:SPMax|Free|Used)Memory|Region(?:Name|TimeDilation|FPS|Corner|AgentCount)|Root(?:Position|Rotation)|UnixTime|(?:Parcel|Region)Flags|(?:Wall|GMT)clock|SimulatorHostname|BoundingBox|GeometricCenter|Creator|NumberOf(?:Prims|NotecardLines|Sides)|Animation(?:List)?|(?:Camera|Local)(?:Pos|Rot)|Vel|Accel|Omega|Time(?:stamp|OfDay)|(?:Object|CenterOf)?Mass|MassMKS|Energy|Owner|(?:Owner)?Key|SunDirection|Texture(?:Offset|Scale|Rot)|Inventory(?:Number|Name|Key|Type|Creator|PermMask)|Permissions(?:Key)?|StartParameter|List(?:Length|EntryType)|Date|Agent(?:Size|Info|Language|List)|LandOwnerAt|NotecardLine|Script(?:Name|State))|(?:Get|Reset|GetAndReset)Time|PlaySound(?:Slave)?|LoopSound(?:Master|Slave)?|(?:Trigger|Stop|Preload)Sound|(?:(?:Get|Delete)Sub|Insert)String|To(?:Upper|Lower)|Give(?:InventoryList|Money)|RezObject|(?:Stop)?LookAt|Sleep|CollisionFilter|(?:Take|Release)Controls|DetachFromAvatar|AttachToAvatar(?:Temp)?|InstantMessage|(?:GetNext)?Email|StopHover|MinEventDelay|RotLookAt|String(?:Length|Trim)|(?:Start|Stop)Animation|TargetOmega|RequestPermissions|(?:Create|Break)Link|BreakAllLinks|(?:Give|Remove)Inventory|Water|PassTouches|Request(?:Agent|Inventory)Data|TeleportAgent(?:Home|GlobalCoords)?|ModifyLand|CollisionSound|ResetScript|MessageLinked|PushObject|PassCollisions|AxisAngle2Rot|Rot2(?:Axis|Angle)|A(?:cos|sin)|AngleBetween|AllowInventoryDrop|SubStringIndex|List2(?:CSV|Integer|Json|Float|String|Key|Vector|Rot|List(?:Strided)?)|DeleteSubList|List(?:Statistics|Sort|Randomize|(?:Insert|Find|Replace)List)|EdgeOfWorld|AdjustSoundVolume|Key2Name|TriggerSoundLimited|EjectFromLand|(?:CSV|ParseString)2List|OverMyLand|SameGroup|UnSit|Ground(?:Slope|Normal|Contour)|GroundRepel|(?:Set|Remove)VehicleFlags|(?:AvatarOn)?(?:Link)?SitTarget|Script(?:Danger|Profiler)|Dialog|VolumeDetect|ResetOtherScript|RemoteLoadScriptPin|(?:Open|Close)RemoteDataChannel|SendRemoteData|RemoteDataReply|(?:Integer|String)ToBase64|XorBase64|Log(?:10)?|Base64To(?:String|Integer)|ParseStringKeepNulls|RezAtRoot|RequestSimulatorData|ForceMouselook|(?:Load|Release|(?:E|Une)scape)URL|ParcelMedia(?:CommandList|Query)|ModPow|MapDestination|(?:RemoveFrom|AddTo|Reset)Land(?:Pass|Ban)List|(?:Set|Clear)CameraParams|HTTP(?:Request|Response)|TextBox|DetectedTouch(?:UV|Face|Pos|(?:N|Bin)ormal|ST)|(?:MD5|SHA1|DumpList2)String|Request(?:Secure)?URL|Clear(?:Prim|Link)Media|(?:Link)?ParticleSystem|(?:Get|Request)(?:Username|DisplayName)|RegionSayTo|CastRay|GenerateKey|TransferLindenDollars|ManageEstateAccess|(?:Create|Delete)Character|ExecCharacterCmd|Evade|FleeFrom|NavigateTo|PatrolPoints|Pursue|UpdateCharacter|WanderWithin))\b'
+    lsl_constants_float = r'\b(?:DEG_TO_RAD|PI(?:_BY_TWO)?|RAD_TO_DEG|SQRT2|TWO_PI)\b'
+    lsl_constants_integer = r'\b(?:JSON_APPEND|STATUS_(?:PHYSICS|ROTATE_[XYZ]|PHANTOM|SANDBOX|BLOCK_GRAB(?:_OBJECT)?|(?:DIE|RETURN)_AT_EDGE|CAST_SHADOWS|OK|MALFORMED_PARAMS|TYPE_MISMATCH|BOUNDS_ERROR|NOT_(?:FOUND|SUPPORTED)|INTERNAL_ERROR|WHITELIST_FAILED)|AGENT(?:_(?:BY_(?:LEGACY_|USER)NAME|FLYING|ATTACHMENTS|SCRIPTED|MOUSELOOK|SITTING|ON_OBJECT|AWAY|WALKING|IN_AIR|TYPING|CROUCHING|BUSY|ALWAYS_RUN|AUTOPILOT|LIST_(?:PARCEL(?:_OWNER)?|REGION)))?|CAMERA_(?:PITCH|DISTANCE|BEHINDNESS_(?:ANGLE|LAG)|(?:FOCUS|POSITION)(?:_(?:THRESHOLD|LOCKED|LAG))?|FOCUS_OFFSET|ACTIVE)|ANIM_ON|LOOP|REVERSE|PING_PONG|SMOOTH|ROTATE|SCALE|ALL_SIDES|LINK_(?:ROOT|SET|ALL_(?:OTHERS|CHILDREN)|THIS)|ACTIVE|PASSIVE|SCRIPTED|CONTROL_(?:FWD|BACK|(?:ROT_)?(?:LEFT|RIGHT)|UP|DOWN|(?:ML_)?LBUTTON)|PERMISSION_(?:RETURN_OBJECTS|DEBIT|OVERRIDE_ANIMATIONS|SILENT_ESTATE_MANAGEMENT|TAKE_CONTROLS|TRIGGER_ANIMATION|ATTACH|CHANGE_LINKS|(?:CONTROL|TRACK)_CAMERA|TELEPORT)|INVENTORY_(?:TEXTURE|SOUND|OBJECT|SCRIPT|LANDMARK|CLOTHING|NOTECARD|BODYPART|ANIMATION|GESTURE|ALL|NONE)|CHANGED_(?:INVENTORY|COLOR|SHAPE|SCALE|TEXTURE|LINK|ALLOWED_DROP|OWNER|REGION(?:_START)?|TELEPORT|MEDIA)|OBJECT_(?:(?:PHYSICS|SERVER|STREAMING)_COST|UNKNOWN_DETAIL|CHARACTER_TIME|PHANTOM|PHYSICS|TEMP_ON_REZ|NAME|DESC|POS|PRIM_EQUIVALENCE|RETURN_(?:PARCEL(?:_OWNER)?|REGION)|ROO?T|VELOCITY|OWNER|GROUP|CREATOR|ATTACHED_POINT|RENDER_WEIGHT|PATHFINDING_TYPE|(?:RUNNING|TOTAL)_SCRIPT_COUNT|SCRIPT_(?:MEMORY|TIME))|TYPE_(?:INTEGER|FLOAT|STRING|KEY|VECTOR|ROTATION|INVALID)|(?:DEBUG|PUBLIC)_CHANNEL|ATTACH_(?:AVATAR_CENTER|CHEST|HEAD|BACK|PELVIS|MOUTH|CHIN|NECK|NOSE|BELLY|[LR](?:SHOULDER|HAND|FOOT|EAR|EYE|[UL](?:ARM|LEG)|HIP)|(?:LEFT|RIGHT)_PEC|HUD_(?:CENTER_[12]|TOP_(?:RIGHT|CENTER|LEFT)|BOTTOM(?:_(?:RIGHT|LEFT))?))|LAND_(?:LEVEL|RAISE|LOWER|SMOOTH|NOISE|REVERT)|DATA_(?:ONLINE|NAME|BORN|SIM_(?:POS|STATUS|RATING)|PAYINFO)|PAYMENT_INFO_(?:ON_FILE|USED)|REMOTE_DATA_(?:CHANNEL|REQUEST|REPLY)|PSYS_(?:PART_(?:BF_(?:ZERO|ONE(?:_MINUS_(?:DEST_COLOR|SOURCE_(ALPHA|COLOR)))?|DEST_COLOR|SOURCE_(ALPHA|COLOR))|BLEND_FUNC_(DEST|SOURCE)|FLAGS|(?:START|END)_(?:COLOR|ALPHA|SCALE|GLOW)|MAX_AGE|(?:RIBBON|WIND|INTERP_(?:COLOR|SCALE)|BOUNCE|FOLLOW_(?:SRC|VELOCITY)|TARGET_(?:POS|LINEAR)|EMISSIVE)_MASK)|SRC_(?:MAX_AGE|PATTERN|ANGLE_(?:BEGIN|END)|BURST_(?:RATE|PART_COUNT|RADIUS|SPEED_(?:MIN|MAX))|ACCEL|TEXTURE|TARGET_KEY|OMEGA|PATTERN_(?:DROP|EXPLODE|ANGLE(?:_CONE(?:_EMPTY)?)?)))|VEHICLE_(?:REFERENCE_FRAME|TYPE_(?:NONE|SLED|CAR|BOAT|AIRPLANE|BALLOON)|(?:LINEAR|ANGULAR)_(?:FRICTION_TIMESCALE|MOTOR_DIRECTION)|LINEAR_MOTOR_OFFSET|HOVER_(?:HEIGHT|EFFICIENCY|TIMESCALE)|BUOYANCY|(?:LINEAR|ANGULAR)_(?:DEFLECTION_(?:EFFICIENCY|TIMESCALE)|MOTOR_(?:DECAY_)?TIMESCALE)|VERTICAL_ATTRACTION_(?:EFFICIENCY|TIMESCALE)|BANKING_(?:EFFICIENCY|MIX|TIMESCALE)|FLAG_(?:NO_DEFLECTION_UP|LIMIT_(?:ROLL_ONLY|MOTOR_UP)|HOVER_(?:(?:WATER|TERRAIN|UP)_ONLY|GLOBAL_HEIGHT)|MOUSELOOK_(?:STEER|BANK)|CAMERA_DECOUPLED))|PRIM_(?:TYPE(?:_(?:BOX|CYLINDER|PRISM|SPHERE|TORUS|TUBE|RING|SCULPT))?|HOLE_(?:DEFAULT|CIRCLE|SQUARE|TRIANGLE)|MATERIAL(?:_(?:STONE|METAL|GLASS|WOOD|FLESH|PLASTIC|RUBBER))?|SHINY_(?:NONE|LOW|MEDIUM|HIGH)|BUMP_(?:NONE|BRIGHT|DARK|WOOD|BARK|BRICKS|CHECKER|CONCRETE|TILE|STONE|DISKS|GRAVEL|BLOBS|SIDING|LARGETILE|STUCCO|SUCTION|WEAVE)|TEXGEN_(?:DEFAULT|PLANAR)|SCULPT_(?:TYPE_(?:SPHERE|TORUS|PLANE|CYLINDER|MASK)|FLAG_(?:MIRROR|INVERT))|PHYSICS(?:_(?:SHAPE_(?:CONVEX|NONE|PRIM|TYPE)))?|(?:POS|ROT)_LOCAL|SLICE|TEXT|FLEXIBLE|POINT_LIGHT|TEMP_ON_REZ|PHANTOM|POSITION|SIZE|ROTATION|TEXTURE|NAME|OMEGA|DESC|LINK_TARGET|COLOR|BUMP_SHINY|FULLBRIGHT|TEXGEN|GLOW|MEDIA_(?:ALT_IMAGE_ENABLE|CONTROLS|(?:CURRENT|HOME)_URL|AUTO_(?:LOOP|PLAY|SCALE|ZOOM)|FIRST_CLICK_INTERACT|(?:WIDTH|HEIGHT)_PIXELS|WHITELIST(?:_ENABLE)?|PERMS_(?:INTERACT|CONTROL)|PARAM_MAX|CONTROLS_(?:STANDARD|MINI)|PERM_(?:NONE|OWNER|GROUP|ANYONE)|MAX_(?:URL_LENGTH|WHITELIST_(?:SIZE|COUNT)|(?:WIDTH|HEIGHT)_PIXELS)))|MASK_(?:BASE|OWNER|GROUP|EVERYONE|NEXT)|PERM_(?:TRANSFER|MODIFY|COPY|MOVE|ALL)|PARCEL_(?:MEDIA_COMMAND_(?:STOP|PAUSE|PLAY|LOOP|TEXTURE|URL|TIME|AGENT|UNLOAD|AUTO_ALIGN|TYPE|SIZE|DESC|LOOP_SET)|FLAG_(?:ALLOW_(?:FLY|(?:GROUP_)?SCRIPTS|LANDMARK|TERRAFORM|DAMAGE|CREATE_(?:GROUP_)?OBJECTS)|USE_(?:ACCESS_(?:GROUP|LIST)|BAN_LIST|LAND_PASS_LIST)|LOCAL_SOUND_ONLY|RESTRICT_PUSHOBJECT|ALLOW_(?:GROUP|ALL)_OBJECT_ENTRY)|COUNT_(?:TOTAL|OWNER|GROUP|OTHER|SELECTED|TEMP)|DETAILS_(?:NAME|DESC|OWNER|GROUP|AREA|ID|SEE_AVATARS))|LIST_STAT_(?:MAX|MIN|MEAN|MEDIAN|STD_DEV|SUM(?:_SQUARES)?|NUM_COUNT|GEOMETRIC_MEAN|RANGE)|PAY_(?:HIDE|DEFAULT)|REGION_FLAG_(?:ALLOW_DAMAGE|FIXED_SUN|BLOCK_TERRAFORM|SANDBOX|DISABLE_(?:COLLISIONS|PHYSICS)|BLOCK_FLY|ALLOW_DIRECT_TELEPORT|RESTRICT_PUSHOBJECT)|HTTP_(?:METHOD|MIMETYPE|BODY_(?:MAXLENGTH|TRUNCATED)|CUSTOM_HEADER|PRAGMA_NO_CACHE|VERBOSE_THROTTLE|VERIFY_CERT)|STRING_(?:TRIM(?:_(?:HEAD|TAIL))?)|CLICK_ACTION_(?:NONE|TOUCH|SIT|BUY|PAY|OPEN(?:_MEDIA)?|PLAY|ZOOM)|TOUCH_INVALID_FACE|PROFILE_(?:NONE|SCRIPT_MEMORY)|RC_(?:DATA_FLAGS|DETECT_PHANTOM|GET_(?:LINK_NUM|NORMAL|ROOT_KEY)|MAX_HITS|REJECT_(?:TYPES|AGENTS|(?:NON)?PHYSICAL|LAND))|RCERR_(?:CAST_TIME_EXCEEDED|SIM_PERF_LOW|UNKNOWN)|ESTATE_ACCESS_(?:ALLOWED_(?:AGENT|GROUP)_(?:ADD|REMOVE)|BANNED_AGENT_(?:ADD|REMOVE))|DENSITY|FRICTION|RESTITUTION|GRAVITY_MULTIPLIER|KFM_(?:COMMAND|CMD_(?:PLAY|STOP|PAUSE|SET_MODE)|MODE|FORWARD|LOOP|PING_PONG|REVERSE|DATA|ROTATION|TRANSLATION)|ERR_(?:GENERIC|PARCEL_PERMISSIONS|MALFORMED_PARAMS|RUNTIME_PERMISSIONS|THROTTLED)|CHARACTER_(?:CMD_(?:(?:SMOOTH_)?STOP|JUMP)|DESIRED_(?:TURN_)?SPEED|RADIUS|STAY_WITHIN_PARCEL|LENGTH|ORIENTATION|ACCOUNT_FOR_SKIPPED_FRAMES|AVOIDANCE_MODE|TYPE(?:_(?:[A-D]|NONE))?|MAX_(?:DECEL|TURN_RADIUS|(?:ACCEL|SPEED)))|PURSUIT_(?:OFFSET|FUZZ_FACTOR|GOAL_TOLERANCE|INTERCEPT)|REQUIRE_LINE_OF_SIGHT|FORCE_DIRECT_PATH|VERTICAL|HORIZONTAL|AVOID_(?:CHARACTERS|DYNAMIC_OBSTACLES|NONE)|PU_(?:EVADE_(?:HIDDEN|SPOTTED)|FAILURE_(?:DYNAMIC_PATHFINDING_DISABLED|INVALID_(?:GOAL|START)|NO_(?:NAVMESH|VALID_DESTINATION)|OTHER|TARGET_GONE|(?:PARCEL_)?UNREACHABLE)|(?:GOAL|SLOWDOWN_DISTANCE)_REACHED)|TRAVERSAL_TYPE(?:_(?:FAST|NONE|SLOW))?|CONTENT_TYPE_(?:ATOM|FORM|HTML|JSON|LLSD|RSS|TEXT|XHTML|XML)|GCNP_(?:RADIUS|STATIC)|(?:PATROL|WANDER)_PAUSE_AT_WAYPOINTS|OPT_(?:AVATAR|CHARACTER|EXCLUSION_VOLUME|LEGACY_LINKSET|MATERIAL_VOLUME|OTHER|STATIC_OBSTACLE|WALKABLE)|SIM_STAT_PCT_CHARS_STEPPED)\b'
+    lsl_constants_integer_boolean = r'\b(?:FALSE|TRUE)\b'
+    lsl_constants_rotation = r'\b(?:ZERO_ROTATION)\b'
+    lsl_constants_string = r'\b(?:EOF|JSON_(?:ARRAY|DELETE|FALSE|INVALID|NULL|NUMBER|OBJECT|STRING|TRUE)|NULL_KEY|TEXTURE_(?:BLANK|DEFAULT|MEDIA|PLYWOOD|TRANSPARENT)|URL_REQUEST_(?:GRANTED|DENIED))\b'
+    lsl_constants_vector = r'\b(?:TOUCH_INVALID_(?:TEXCOORD|VECTOR)|ZERO_VECTOR)\b'
+    lsl_invalid_broken = r'\b(?:LAND_(?:LARGE|MEDIUM|SMALL)_BRUSH)\b'
+    lsl_invalid_deprecated = r'\b(?:ATTACH_[LR]PEC|DATA_RATING|OBJECT_ATTACHMENT_(?:GEOMETRY_BYTES|SURFACE_AREA)|PRIM_(?:CAST_SHADOWS|MATERIAL_LIGHT|TYPE_LEGACY)|PSYS_SRC_(?:INNER|OUTER)ANGLE|VEHICLE_FLAG_NO_FLY_UP|ll(?:Cloud|Make(?:Explosion|Fountain|Smoke|Fire)|RemoteDataSetRegion|Sound(?:Preload)?|XorBase64Strings(?:Correct)?))\b'
+    lsl_invalid_illegal = r'\b(?:event)\b'
+    lsl_invalid_unimplemented = r'\b(?:CHARACTER_(?:MAX_ANGULAR_(?:ACCEL|SPEED)|TURN_SPEED_MULTIPLIER)|PERMISSION_(?:CHANGE_(?:JOINTS|PERMISSIONS)|RELEASE_OWNERSHIP|REMAP_CONTROLS)|PRIM_PHYSICS_MATERIAL|PSYS_SRC_OBJ_REL_MASK|ll(?:CollisionSprite|(?:Stop)?PointAt|(?:(?:Refresh|Set)Prim)URL|(?:Take|Release)Camera|RemoteLoadScript))\b'
+    lsl_reserved_godmode = r'\b(?:ll(?:GodLikeRezObject|Set(?:Inventory|Object)PermMask))\b'
+    lsl_reserved_log = r'\b(?:print)\b'
+    lsl_operators = r'\+\+|\-\-|<<|>>|&&?|\|\|?|\^|~|[!%<>=*+\-/]=?'
+
+    tokens = {
+        'root':
+        [
+            (r'//.*?\n',                          Comment.Single),
+            (r'/\*',                              Comment.Multiline, 'comment'),
+            (r'"',                                String.Double, 'string'),
+            (lsl_keywords,                        Keyword),
+            (lsl_types,                           Keyword.Type),
+            (lsl_states,                          Name.Class),
+            (lsl_events,                          Name.Builtin),
+            (lsl_functions_builtin,               Name.Function),
+            (lsl_constants_float,                 Keyword.Constant),
+            (lsl_constants_integer,               Keyword.Constant),
+            (lsl_constants_integer_boolean,       Keyword.Constant),
+            (lsl_constants_rotation,              Keyword.Constant),
+            (lsl_constants_string,                Keyword.Constant),
+            (lsl_constants_vector,                Keyword.Constant),
+            (lsl_invalid_broken,                  Error),
+            (lsl_invalid_deprecated,              Error),
+            (lsl_invalid_illegal,                 Error),
+            (lsl_invalid_unimplemented,           Error),
+            (lsl_reserved_godmode,                Keyword.Reserved),
+            (lsl_reserved_log,                    Keyword.Reserved),
+            (r'\b([a-zA-Z_]\w*)\b',     Name.Variable),
+            (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d*', Number.Float),
+            (r'(\d+\.\d*|\.\d+)',                 Number.Float),
+            (r'0[xX][0-9a-fA-F]+',                Number.Hex),
+            (r'\d+',                              Number.Integer),
+            (lsl_operators,                       Operator),
+            (r':=?',                              Error),
+            (r'[,;{}()\[\]]',                     Punctuation),
+            (r'\n+',                              Whitespace),
+            (r'\s+',                              Whitespace)
+        ],
+        'comment':
+        [
+            (r'[^*/]+',                           Comment.Multiline),
+            (r'/\*',                              Comment.Multiline, '#push'),
+            (r'\*/',                              Comment.Multiline, '#pop'),
+            (r'[*/]',                             Comment.Multiline)
+        ],
+        'string':
+        [
+            (r'\\([nt"\\])',                      String.Escape),
+            (r'"',                                String.Double, '#pop'),
+            (r'\\.',                              Error),
+            (r'[^"\\]+',                          String.Double),
+        ]
+    }
+
+
+class AppleScriptLexer(RegexLexer):
+    """
+    For AppleScript source code,
+    including `AppleScript Studio
+    `_.
+    Contributed by Andreas Amann .
+    """
+
+    name = 'AppleScript'
+    url = 'https://developer.apple.com/library/archive/documentation/AppleScript/Conceptual/AppleScriptLangGuide/introduction/ASLR_intro.html'
+    aliases = ['applescript']
+    filenames = ['*.applescript']
+    version_added = '1.0'
+
+    flags = re.MULTILINE | re.DOTALL
+
+    Identifiers = r'[a-zA-Z]\w*'
+
+    # XXX: use words() for all of these
+    Literals = ('AppleScript', 'current application', 'false', 'linefeed',
+                'missing value', 'pi', 'quote', 'result', 'return', 'space',
+                'tab', 'text item delimiters', 'true', 'version')
+    Classes = ('alias ', 'application ', 'boolean ', 'class ', 'constant ',
+               'date ', 'file ', 'integer ', 'list ', 'number ', 'POSIX file ',
+               'real ', 'record ', 'reference ', 'RGB color ', 'script ',
+               'text ', 'unit types', '(?:Unicode )?text', 'string')
+    BuiltIn = ('attachment', 'attribute run', 'character', 'day', 'month',
+               'paragraph', 'word', 'year')
+    HandlerParams = ('about', 'above', 'against', 'apart from', 'around',
+                     'aside from', 'at', 'below', 'beneath', 'beside',
+                     'between', 'for', 'given', 'instead of', 'on', 'onto',
+                     'out of', 'over', 'since')
+    Commands = ('ASCII (character|number)', 'activate', 'beep', 'choose URL',
+                'choose application', 'choose color', 'choose file( name)?',
+                'choose folder', 'choose from list',
+                'choose remote application', 'clipboard info',
+                'close( access)?', 'copy', 'count', 'current date', 'delay',
+                'delete', 'display (alert|dialog)', 'do shell script',
+                'duplicate', 'exists', 'get eof', 'get volume settings',
+                'info for', 'launch', 'list (disks|folder)', 'load script',
+                'log', 'make', 'mount volume', 'new', 'offset',
+                'open( (for access|location))?', 'path to', 'print', 'quit',
+                'random number', 'read', 'round', 'run( script)?',
+                'say', 'scripting components',
+                'set (eof|the clipboard to|volume)', 'store script',
+                'summarize', 'system attribute', 'system info',
+                'the clipboard', 'time to GMT', 'write', 'quoted form')
+    References = ('(in )?back of', '(in )?front of', '[0-9]+(st|nd|rd|th)',
+                  'first', 'second', 'third', 'fourth', 'fifth', 'sixth',
+                  'seventh', 'eighth', 'ninth', 'tenth', 'after', 'back',
+                  'before', 'behind', 'every', 'front', 'index', 'last',
+                  'middle', 'some', 'that', 'through', 'thru', 'where', 'whose')
+    Operators = ("and", "or", "is equal", "equals", "(is )?equal to", "is not",
+                 "isn't", "isn't equal( to)?", "is not equal( to)?",
+                 "doesn't equal", "does not equal", "(is )?greater than",
+                 "comes after", "is not less than or equal( to)?",
+                 "isn't less than or equal( to)?", "(is )?less than",
+                 "comes before", "is not greater than or equal( to)?",
+                 "isn't greater than or equal( to)?",
+                 "(is  )?greater than or equal( to)?", "is not less than",
+                 "isn't less than", "does not come before",
+                 "doesn't come before", "(is )?less than or equal( to)?",
+                 "is not greater than", "isn't greater than",
+                 "does not come after", "doesn't come after", "starts? with",
+                 "begins? with", "ends? with", "contains?", "does not contain",
+                 "doesn't contain", "is in", "is contained by", "is not in",
+                 "is not contained by", "isn't contained by", "div", "mod",
+                 "not", "(a  )?(ref( to)?|reference to)", "is", "does")
+    Control = ('considering', 'else', 'error', 'exit', 'from', 'if',
+               'ignoring', 'in', 'repeat', 'tell', 'then', 'times', 'to',
+               'try', 'until', 'using terms from', 'while', 'whith',
+               'with timeout( of)?', 'with transaction', 'by', 'continue',
+               'end', 'its?', 'me', 'my', 'return', 'of', 'as')
+    Declarations = ('global', 'local', 'prop(erty)?', 'set', 'get')
+    Reserved = ('but', 'put', 'returning', 'the')
+    StudioClasses = ('action cell', 'alert reply', 'application', 'box',
+                     'browser( cell)?', 'bundle', 'button( cell)?', 'cell',
+                     'clip view', 'color well', 'color-panel',
+                     'combo box( item)?', 'control',
+                     'data( (cell|column|item|row|source))?', 'default entry',
+                     'dialog reply', 'document', 'drag info', 'drawer',
+                     'event', 'font(-panel)?', 'formatter',
+                     'image( (cell|view))?', 'matrix', 'menu( item)?', 'item',
+                     'movie( view)?', 'open-panel', 'outline view', 'panel',
+                     'pasteboard', 'plugin', 'popup button',
+                     'progress indicator', 'responder', 'save-panel',
+                     'scroll view', 'secure text field( cell)?', 'slider',
+                     'sound', 'split view', 'stepper', 'tab view( item)?',
+                     'table( (column|header cell|header view|view))',
+                     'text( (field( cell)?|view))?', 'toolbar( item)?',
+                     'user-defaults', 'view', 'window')
+    StudioEvents = ('accept outline drop', 'accept table drop', 'action',
+                    'activated', 'alert ended', 'awake from nib', 'became key',
+                    'became main', 'begin editing', 'bounds changed',
+                    'cell value', 'cell value changed', 'change cell value',
+                    'change item value', 'changed', 'child of item',
+                    'choose menu item', 'clicked', 'clicked toolbar item',
+                    'closed', 'column clicked', 'column moved',
+                    'column resized', 'conclude drop', 'data representation',
+                    'deminiaturized', 'dialog ended', 'document nib name',
+                    'double clicked', 'drag( (entered|exited|updated))?',
+                    'drop', 'end editing', 'exposed', 'idle', 'item expandable',
+                    'item value', 'item value changed', 'items changed',
+                    'keyboard down', 'keyboard up', 'launched',
+                    'load data representation', 'miniaturized', 'mouse down',
+                    'mouse dragged', 'mouse entered', 'mouse exited',
+                    'mouse moved', 'mouse up', 'moved',
+                    'number of browser rows', 'number of items',
+                    'number of rows', 'open untitled', 'opened', 'panel ended',
+                    'parameters updated', 'plugin loaded', 'prepare drop',
+                    'prepare outline drag', 'prepare outline drop',
+                    'prepare table drag', 'prepare table drop',
+                    'read from file', 'resigned active', 'resigned key',
+                    'resigned main', 'resized( sub views)?',
+                    'right mouse down', 'right mouse dragged',
+                    'right mouse up', 'rows changed', 'scroll wheel',
+                    'selected tab view item', 'selection changed',
+                    'selection changing', 'should begin editing',
+                    'should close', 'should collapse item',
+                    'should end editing', 'should expand item',
+                    'should open( untitled)?',
+                    'should quit( after last window closed)?',
+                    'should select column', 'should select item',
+                    'should select row', 'should select tab view item',
+                    'should selection change', 'should zoom', 'shown',
+                    'update menu item', 'update parameters',
+                    'update toolbar item', 'was hidden', 'was miniaturized',
+                    'will become active', 'will close', 'will dismiss',
+                    'will display browser cell', 'will display cell',
+                    'will display item cell', 'will display outline cell',
+                    'will finish launching', 'will hide', 'will miniaturize',
+                    'will move', 'will open', 'will pop up', 'will quit',
+                    'will resign active', 'will resize( sub views)?',
+                    'will select tab view item', 'will show', 'will zoom',
+                    'write to file', 'zoomed')
+    StudioCommands = ('animate', 'append', 'call method', 'center',
+                      'close drawer', 'close panel', 'display',
+                      'display alert', 'display dialog', 'display panel', 'go',
+                      'hide', 'highlight', 'increment', 'item for',
+                      'load image', 'load movie', 'load nib', 'load panel',
+                      'load sound', 'localized string', 'lock focus', 'log',
+                      'open drawer', 'path for', 'pause', 'perform action',
+                      'play', 'register', 'resume', 'scroll', 'select( all)?',
+                      'show', 'size to fit', 'start', 'step back',
+                      'step forward', 'stop', 'synchronize', 'unlock focus',
+                      'update')
+    StudioProperties = ('accepts arrow key', 'action method', 'active',
+                        'alignment', 'allowed identifiers',
+                        'allows branch selection', 'allows column reordering',
+                        'allows column resizing', 'allows column selection',
+                        'allows customization',
+                        'allows editing text attributes',
+                        'allows empty selection', 'allows mixed state',
+                        'allows multiple selection', 'allows reordering',
+                        'allows undo', 'alpha( value)?', 'alternate image',
+                        'alternate increment value', 'alternate title',
+                        'animation delay', 'associated file name',
+                        'associated object', 'auto completes', 'auto display',
+                        'auto enables items', 'auto repeat',
+                        'auto resizes( outline column)?',
+                        'auto save expanded items', 'auto save name',
+                        'auto save table columns', 'auto saves configuration',
+                        'auto scroll', 'auto sizes all columns to fit',
+                        'auto sizes cells', 'background color', 'bezel state',
+                        'bezel style', 'bezeled', 'border rect', 'border type',
+                        'bordered', 'bounds( rotation)?', 'box type',
+                        'button returned', 'button type',
+                        'can choose directories', 'can choose files',
+                        'can draw', 'can hide',
+                        'cell( (background color|size|type))?', 'characters',
+                        'class', 'click count', 'clicked( data)? column',
+                        'clicked data item', 'clicked( data)? row',
+                        'closeable', 'collating', 'color( (mode|panel))',
+                        'command key down', 'configuration',
+                        'content(s| (size|view( margins)?))?', 'context',
+                        'continuous', 'control key down', 'control size',
+                        'control tint', 'control view',
+                        'controller visible', 'coordinate system',
+                        'copies( on scroll)?', 'corner view', 'current cell',
+                        'current column', 'current( field)?  editor',
+                        'current( menu)? item', 'current row',
+                        'current tab view item', 'data source',
+                        'default identifiers', 'delta (x|y|z)',
+                        'destination window', 'directory', 'display mode',
+                        'displayed cell', 'document( (edited|rect|view))?',
+                        'double value', 'dragged column', 'dragged distance',
+                        'dragged items', 'draws( cell)? background',
+                        'draws grid', 'dynamically scrolls', 'echos bullets',
+                        'edge', 'editable', 'edited( data)? column',
+                        'edited data item', 'edited( data)? row', 'enabled',
+                        'enclosing scroll view', 'ending page',
+                        'error handling', 'event number', 'event type',
+                        'excluded from windows menu', 'executable path',
+                        'expanded', 'fax number', 'field editor', 'file kind',
+                        'file name', 'file type', 'first responder',
+                        'first visible column', 'flipped', 'floating',
+                        'font( panel)?', 'formatter', 'frameworks path',
+                        'frontmost', 'gave up', 'grid color', 'has data items',
+                        'has horizontal ruler', 'has horizontal scroller',
+                        'has parent data item', 'has resize indicator',
+                        'has shadow', 'has sub menu', 'has vertical ruler',
+                        'has vertical scroller', 'header cell', 'header view',
+                        'hidden', 'hides when deactivated', 'highlights by',
+                        'horizontal line scroll', 'horizontal page scroll',
+                        'horizontal ruler view', 'horizontally resizable',
+                        'icon image', 'id', 'identifier',
+                        'ignores multiple clicks',
+                        'image( (alignment|dims when disabled|frame style|scaling))?',
+                        'imports graphics', 'increment value',
+                        'indentation per level', 'indeterminate', 'index',
+                        'integer value', 'intercell spacing', 'item height',
+                        'key( (code|equivalent( modifier)?|window))?',
+                        'knob thickness', 'label', 'last( visible)? column',
+                        'leading offset', 'leaf', 'level', 'line scroll',
+                        'loaded', 'localized sort', 'location', 'loop mode',
+                        'main( (bunde|menu|window))?', 'marker follows cell',
+                        'matrix mode', 'maximum( content)? size',
+                        'maximum visible columns',
+                        'menu( form representation)?', 'miniaturizable',
+                        'miniaturized', 'minimized image', 'minimized title',
+                        'minimum column width', 'minimum( content)? size',
+                        'modal', 'modified', 'mouse down state',
+                        'movie( (controller|file|rect))?', 'muted', 'name',
+                        'needs display', 'next state', 'next text',
+                        'number of tick marks', 'only tick mark values',
+                        'opaque', 'open panel', 'option key down',
+                        'outline table column', 'page scroll', 'pages across',
+                        'pages down', 'palette label', 'pane splitter',
+                        'parent data item', 'parent window', 'pasteboard',
+                        'path( (names|separator))?', 'playing',
+                        'plays every frame', 'plays selection only', 'position',
+                        'preferred edge', 'preferred type', 'pressure',
+                        'previous text', 'prompt', 'properties',
+                        'prototype cell', 'pulls down', 'rate',
+                        'released when closed', 'repeated',
+                        'requested print time', 'required file type',
+                        'resizable', 'resized column', 'resource path',
+                        'returns records', 'reuses columns', 'rich text',
+                        'roll over', 'row height', 'rulers visible',
+                        'save panel', 'scripts path', 'scrollable',
+                        'selectable( identifiers)?', 'selected cell',
+                        'selected( data)? columns?', 'selected data items?',
+                        'selected( data)? rows?', 'selected item identifier',
+                        'selection by rect', 'send action on arrow key',
+                        'sends action when done editing', 'separates columns',
+                        'separator item', 'sequence number', 'services menu',
+                        'shared frameworks path', 'shared support path',
+                        'sheet', 'shift key down', 'shows alpha',
+                        'shows state by', 'size( mode)?',
+                        'smart insert delete enabled', 'sort case sensitivity',
+                        'sort column', 'sort order', 'sort type',
+                        'sorted( data rows)?', 'sound', 'source( mask)?',
+                        'spell checking enabled', 'starting page', 'state',
+                        'string value', 'sub menu', 'super menu', 'super view',
+                        'tab key traverses cells', 'tab state', 'tab type',
+                        'tab view', 'table view', 'tag', 'target( printer)?',
+                        'text color', 'text container insert',
+                        'text container origin', 'text returned',
+                        'tick mark position', 'time stamp',
+                        'title(d| (cell|font|height|position|rect))?',
+                        'tool tip', 'toolbar', 'trailing offset', 'transparent',
+                        'treat packages as directories', 'truncated labels',
+                        'types', 'unmodified characters', 'update views',
+                        'use sort indicator', 'user defaults',
+                        'uses data source', 'uses ruler',
+                        'uses threaded animation',
+                        'uses title from previous column', 'value wraps',
+                        'version',
+                        'vertical( (line scroll|page scroll|ruler view))?',
+                        'vertically resizable', 'view',
+                        'visible( document rect)?', 'volume', 'width', 'window',
+                        'windows menu', 'wraps', 'zoomable', 'zoomed')
+
+    tokens = {
+        'root': [
+            (r'\s+', Text),
+            (r'¬\n', String.Escape),
+            (r"'s\s+", Text),  # This is a possessive, consider moving
+            (r'(--|#).*?$', Comment),
+            (r'\(\*', Comment.Multiline, 'comment'),
+            (r'[(){}!,.:]', Punctuation),
+            (r'(«)([^»]+)(»)',
+             bygroups(Text, Name.Builtin, Text)),
+            (r'\b((?:considering|ignoring)\s*)'
+             r'(application responses|case|diacriticals|hyphens|'
+             r'numeric strings|punctuation|white space)',
+             bygroups(Keyword, Name.Builtin)),
+            (r'(-|\*|\+|&|≠|>=?|<=?|=|≥|≤|/|÷|\^)', Operator),
+            (r"\b({})\b".format('|'.join(Operators)), Operator.Word),
+            (r'^(\s*(?:on|end)\s+)'
+             r'({})'.format('|'.join(StudioEvents[::-1])),
+             bygroups(Keyword, Name.Function)),
+            (r'^(\s*)(in|on|script|to)(\s+)', bygroups(Text, Keyword, Text)),
+            (r'\b(as )({})\b'.format('|'.join(Classes)),
+             bygroups(Keyword, Name.Class)),
+            (r'\b({})\b'.format('|'.join(Literals)), Name.Constant),
+            (r'\b({})\b'.format('|'.join(Commands)), Name.Builtin),
+            (r'\b({})\b'.format('|'.join(Control)), Keyword),
+            (r'\b({})\b'.format('|'.join(Declarations)), Keyword),
+            (r'\b({})\b'.format('|'.join(Reserved)), Name.Builtin),
+            (r'\b({})s?\b'.format('|'.join(BuiltIn)), Name.Builtin),
+            (r'\b({})\b'.format('|'.join(HandlerParams)), Name.Builtin),
+            (r'\b({})\b'.format('|'.join(StudioProperties)), Name.Attribute),
+            (r'\b({})s?\b'.format('|'.join(StudioClasses)), Name.Builtin),
+            (r'\b({})\b'.format('|'.join(StudioCommands)), Name.Builtin),
+            (r'\b({})\b'.format('|'.join(References)), Name.Builtin),
+            (r'"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
+            (rf'\b({Identifiers})\b', Name.Variable),
+            (r'[-+]?(\d+\.\d*|\d*\.\d+)(E[-+][0-9]+)?', Number.Float),
+            (r'[-+]?\d+', Number.Integer),
+        ],
+        'comment': [
+            (r'\(\*', Comment.Multiline, '#push'),
+            (r'\*\)', Comment.Multiline, '#pop'),
+            ('[^*(]+', Comment.Multiline),
+            ('[*(]', Comment.Multiline),
+        ],
+    }
+
+
+class RexxLexer(RegexLexer):
+    """
+    Rexx is a scripting language available for
+    a wide range of different platforms with its roots found on mainframe
+    systems. It is popular for I/O- and data based tasks and can act as glue
+    language to bind different applications together.
+    """
+    name = 'Rexx'
+    url = 'http://www.rexxinfo.org/'
+    aliases = ['rexx', 'arexx']
+    filenames = ['*.rexx', '*.rex', '*.rx', '*.arexx']
+    mimetypes = ['text/x-rexx']
+    version_added = '2.0'
+    flags = re.IGNORECASE
+
+    tokens = {
+        'root': [
+            (r'\s+', Whitespace),
+            (r'/\*', Comment.Multiline, 'comment'),
+            (r'"', String, 'string_double'),
+            (r"'", String, 'string_single'),
+            (r'[0-9]+(\.[0-9]+)?(e[+-]?[0-9])?', Number),
+            (r'([a-z_]\w*)(\s*)(:)(\s*)(procedure)\b',
+             bygroups(Name.Function, Whitespace, Operator, Whitespace,
+                      Keyword.Declaration)),
+            (r'([a-z_]\w*)(\s*)(:)',
+             bygroups(Name.Label, Whitespace, Operator)),
+            include('function'),
+            include('keyword'),
+            include('operator'),
+            (r'[a-z_]\w*', Text),
+        ],
+        'function': [
+            (words((
+                'abbrev', 'abs', 'address', 'arg', 'b2x', 'bitand', 'bitor', 'bitxor',
+                'c2d', 'c2x', 'center', 'charin', 'charout', 'chars', 'compare',
+                'condition', 'copies', 'd2c', 'd2x', 'datatype', 'date', 'delstr',
+                'delword', 'digits', 'errortext', 'form', 'format', 'fuzz', 'insert',
+                'lastpos', 'left', 'length', 'linein', 'lineout', 'lines', 'max',
+                'min', 'overlay', 'pos', 'queued', 'random', 'reverse', 'right', 'sign',
+                'sourceline', 'space', 'stream', 'strip', 'substr', 'subword', 'symbol',
+                'time', 'trace', 'translate', 'trunc', 'value', 'verify', 'word',
+                'wordindex', 'wordlength', 'wordpos', 'words', 'x2b', 'x2c', 'x2d',
+                'xrange'), suffix=r'(\s*)(\()'),
+             bygroups(Name.Builtin, Whitespace, Operator)),
+        ],
+        'keyword': [
+            (r'(address|arg|by|call|do|drop|else|end|exit|for|forever|if|'
+             r'interpret|iterate|leave|nop|numeric|off|on|options|parse|'
+             r'pull|push|queue|return|say|select|signal|to|then|trace|until|'
+             r'while)\b', Keyword.Reserved),
+        ],
+        'operator': [
+            (r'(-|//|/|\(|\)|\*\*|\*|\\<<|\\<|\\==|\\=|\\>>|\\>|\\|\|\||\||'
+             r'&&|&|%|\+|<<=|<<|<=|<>|<|==|=|><|>=|>>=|>>|>|¬<<|¬<|¬==|¬=|'
+             r'¬>>|¬>|¬|\.|,)', Operator),
+        ],
+        'string_double': [
+            (r'[^"\n]+', String),
+            (r'""', String),
+            (r'"', String, '#pop'),
+            (r'\n', Text, '#pop'),  # Stray linefeed also terminates strings.
+        ],
+        'string_single': [
+            (r'[^\'\n]+', String),
+            (r'\'\'', String),
+            (r'\'', String, '#pop'),
+            (r'\n', Text, '#pop'),  # Stray linefeed also terminates strings.
+        ],
+        'comment': [
+            (r'[^*]+', Comment.Multiline),
+            (r'\*/', Comment.Multiline, '#pop'),
+            (r'\*', Comment.Multiline),
+        ]
+    }
+
+    def _c(s):
+        return re.compile(s, re.MULTILINE)
+    _ADDRESS_COMMAND_PATTERN = _c(r'^\s*address\s+command\b')
+    _ADDRESS_PATTERN = _c(r'^\s*address\s+')
+    _DO_WHILE_PATTERN = _c(r'^\s*do\s+while\b')
+    _IF_THEN_DO_PATTERN = _c(r'^\s*if\b.+\bthen\s+do\s*$')
+    _PROCEDURE_PATTERN = _c(r'^\s*([a-z_]\w*)(\s*)(:)(\s*)(procedure)\b')
+    _ELSE_DO_PATTERN = _c(r'\belse\s+do\s*$')
+    _PARSE_ARG_PATTERN = _c(r'^\s*parse\s+(upper\s+)?(arg|value)\b')
+    PATTERNS_AND_WEIGHTS = (
+        (_ADDRESS_COMMAND_PATTERN, 0.2),
+        (_ADDRESS_PATTERN, 0.05),
+        (_DO_WHILE_PATTERN, 0.1),
+        (_ELSE_DO_PATTERN, 0.1),
+        (_IF_THEN_DO_PATTERN, 0.1),
+        (_PROCEDURE_PATTERN, 0.5),
+        (_PARSE_ARG_PATTERN, 0.2),
+    )
+
+    def analyse_text(text):
+        """
+        Check for initial comment and patterns that distinguish Rexx from other
+        C-like languages.
+        """
+        if re.search(r'/\*\**\s*rexx', text, re.IGNORECASE):
+            # Header matches MVS Rexx requirements, this is certainly a Rexx
+            # script.
+            return 1.0
+        elif text.startswith('/*'):
+            # Header matches general Rexx requirements; the source code might
+            # still be any language using C comments such as C++, C# or Java.
+            lowerText = text.lower()
+            result = sum(weight
+                         for (pattern, weight) in RexxLexer.PATTERNS_AND_WEIGHTS
+                         if pattern.search(lowerText)) + 0.01
+            return min(result, 1.0)
+
+
+class MOOCodeLexer(RegexLexer):
+    """
+    For MOOCode (the MOO scripting language).
+    """
+    name = 'MOOCode'
+    url = 'http://www.moo.mud.org/'
+    filenames = ['*.moo']
+    aliases = ['moocode', 'moo']
+    mimetypes = ['text/x-moocode']
+    version_added = '0.9'
+
+    tokens = {
+        'root': [
+            # Numbers
+            (r'(0|[1-9][0-9_]*)', Number.Integer),
+            # Strings
+            (r'"(\\\\|\\[^\\]|[^"\\])*"', String),
+            # exceptions
+            (r'(E_PERM|E_DIV)', Name.Exception),
+            # db-refs
+            (r'((#[-0-9]+)|(\$\w+))', Name.Entity),
+            # Keywords
+            (r'\b(if|else|elseif|endif|for|endfor|fork|endfork|while'
+             r'|endwhile|break|continue|return|try'
+             r'|except|endtry|finally|in)\b', Keyword),
+            # builtins
+            (r'(random|length)', Name.Builtin),
+            # special variables
+            (r'(player|caller|this|args)', Name.Variable.Instance),
+            # skip whitespace
+            (r'\s+', Text),
+            (r'\n', Text),
+            # other operators
+            (r'([!;=,{}&|:.\[\]@()<>?]+)', Operator),
+            # function call
+            (r'(\w+)(\()', bygroups(Name.Function, Operator)),
+            # variables
+            (r'(\w+)', Text),
+        ]
+    }
+
+
+class HybrisLexer(RegexLexer):
+    """
+    For Hybris source code.
+    """
+
+    name = 'Hybris'
+    aliases = ['hybris']
+    filenames = ['*.hyb']
+    mimetypes = ['text/x-hybris', 'application/x-hybris']
+    url = 'https://github.com/evilsocket/hybris'
+    version_added = '1.4'
+
+    flags = re.MULTILINE | re.DOTALL
+
+    tokens = {
+        'root': [
+            # method names
+            (r'^(\s*(?:function|method|operator\s+)+?)'
+             r'([a-zA-Z_]\w*)'
+             r'(\s*)(\()', bygroups(Keyword, Name.Function, Text, Operator)),
+            (r'[^\S\n]+', Text),
+            (r'//.*?\n', Comment.Single),
+            (r'/\*.*?\*/', Comment.Multiline),
+            (r'@[a-zA-Z_][\w.]*', Name.Decorator),
+            (r'(break|case|catch|next|default|do|else|finally|for|foreach|of|'
+             r'unless|if|new|return|switch|me|throw|try|while)\b', Keyword),
+            (r'(extends|private|protected|public|static|throws|function|method|'
+             r'operator)\b', Keyword.Declaration),
+            (r'(true|false|null|__FILE__|__LINE__|__VERSION__|__LIB_PATH__|'
+             r'__INC_PATH__)\b', Keyword.Constant),
+            (r'(class|struct)(\s+)',
+             bygroups(Keyword.Declaration, Text), 'class'),
+            (r'(import|include)(\s+)',
+             bygroups(Keyword.Namespace, Text), 'import'),
+            (words((
+                'gc_collect', 'gc_mm_items', 'gc_mm_usage', 'gc_collect_threshold',
+                'urlencode', 'urldecode', 'base64encode', 'base64decode', 'sha1', 'crc32',
+                'sha2', 'md5', 'md5_file', 'acos', 'asin', 'atan', 'atan2', 'ceil', 'cos',
+                'cosh', 'exp', 'fabs', 'floor', 'fmod', 'log', 'log10', 'pow', 'sin',
+                'sinh', 'sqrt', 'tan', 'tanh', 'isint', 'isfloat', 'ischar', 'isstring',
+                'isarray', 'ismap', 'isalias', 'typeof', 'sizeof', 'toint', 'tostring',
+                'fromxml', 'toxml', 'binary', 'pack', 'load', 'eval', 'var_names',
+                'var_values', 'user_functions', 'dyn_functions', 'methods', 'call',
+                'call_method', 'mknod', 'mkfifo', 'mount', 'umount2', 'umount', 'ticks',
+                'usleep', 'sleep', 'time', 'strtime', 'strdate', 'dllopen', 'dlllink',
+                'dllcall', 'dllcall_argv', 'dllclose', 'env', 'exec', 'fork', 'getpid',
+                'wait', 'popen', 'pclose', 'exit', 'kill', 'pthread_create',
+                'pthread_create_argv', 'pthread_exit', 'pthread_join', 'pthread_kill',
+                'smtp_send', 'http_get', 'http_post', 'http_download', 'socket', 'bind',
+                'listen', 'accept', 'getsockname', 'getpeername', 'settimeout', 'connect',
+                'server', 'recv', 'send', 'close', 'print', 'println', 'printf', 'input',
+                'readline', 'serial_open', 'serial_fcntl', 'serial_get_attr',
+                'serial_get_ispeed', 'serial_get_ospeed', 'serial_set_attr',
+                'serial_set_ispeed', 'serial_set_ospeed', 'serial_write', 'serial_read',
+                'serial_close', 'xml_load', 'xml_parse', 'fopen', 'fseek', 'ftell',
+                'fsize', 'fread', 'fwrite', 'fgets', 'fclose', 'file', 'readdir',
+                'pcre_replace', 'size', 'pop', 'unmap', 'has', 'keys', 'values',
+                'length', 'find', 'substr', 'replace', 'split', 'trim', 'remove',
+                'contains', 'join'), suffix=r'\b'),
+             Name.Builtin),
+            (words((
+                'MethodReference', 'Runner', 'Dll', 'Thread', 'Pipe', 'Process',
+                'Runnable', 'CGI', 'ClientSocket', 'Socket', 'ServerSocket',
+                'File', 'Console', 'Directory', 'Exception'), suffix=r'\b'),
+             Keyword.Type),
+            (r'"(\\\\|\\[^\\]|[^"\\])*"', String),
+            (r"'\\.'|'[^\\]'|'\\u[0-9a-f]{4}'", String.Char),
+            (r'(\.)([a-zA-Z_]\w*)',
+             bygroups(Operator, Name.Attribute)),
+            (r'[a-zA-Z_]\w*:', Name.Label),
+            (r'[a-zA-Z_$]\w*', Name),
+            (r'[~^*!%&\[\](){}<>|+=:;,./?\-@]+', Operator),
+            (r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
+            (r'0x[0-9a-f]+', Number.Hex),
+            (r'[0-9]+L?', Number.Integer),
+            (r'\n', Text),
+        ],
+        'class': [
+            (r'[a-zA-Z_]\w*', Name.Class, '#pop')
+        ],
+        'import': [
+            (r'[\w.]+\*?', Name.Namespace, '#pop')
+        ],
+    }
+
+    def analyse_text(text):
+        """public method and private method don't seem to be quite common
+        elsewhere."""
+        result = 0
+        if re.search(r'\b(?:public|private)\s+method\b', text):
+            result += 0.01
+        return result
+
+
+
+class EasytrieveLexer(RegexLexer):
+    """
+    Easytrieve Plus is a programming language for extracting, filtering and
+    converting sequential data. Furthermore it can layout data for reports.
+    It is mainly used on mainframe platforms and can access several of the
+    mainframe's native file formats. It is somewhat comparable to awk.
+    """
+    name = 'Easytrieve'
+    aliases = ['easytrieve']
+    filenames = ['*.ezt', '*.mac']
+    mimetypes = ['text/x-easytrieve']
+    url = 'https://www.broadcom.com/products/mainframe/application-development/easytrieve-report-generator'
+    version_added = '2.1'
+    flags = 0
+
+    # Note: We cannot use r'\b' at the start and end of keywords because
+    # Easytrieve Plus delimiter characters are:
+    #
+    #   * space ( )
+    #   * apostrophe (')
+    #   * period (.)
+    #   * comma (,)
+    #   * parenthesis ( and )
+    #   * colon (:)
+    #
+    # Additionally words end once a '*' appears, indicatins a comment.
+    _DELIMITERS = r' \'.,():\n'
+    _DELIMITERS_OR_COMENT = _DELIMITERS + '*'
+    _DELIMITER_PATTERN = '[' + _DELIMITERS + ']'
+    _DELIMITER_PATTERN_CAPTURE = '(' + _DELIMITER_PATTERN + ')'
+    _NON_DELIMITER_OR_COMMENT_PATTERN = '[^' + _DELIMITERS_OR_COMENT + ']'
+    _OPERATORS_PATTERN = '[.+\\-/=\\[\\](){}<>;,&%¬]'
+    _KEYWORDS = [
+        'AFTER-BREAK', 'AFTER-LINE', 'AFTER-SCREEN', 'AIM', 'AND', 'ATTR',
+        'BEFORE', 'BEFORE-BREAK', 'BEFORE-LINE', 'BEFORE-SCREEN', 'BUSHU',
+        'BY', 'CALL', 'CASE', 'CHECKPOINT', 'CHKP', 'CHKP-STATUS', 'CLEAR',
+        'CLOSE', 'COL', 'COLOR', 'COMMIT', 'CONTROL', 'COPY', 'CURSOR', 'D',
+        'DECLARE', 'DEFAULT', 'DEFINE', 'DELETE', 'DENWA', 'DISPLAY', 'DLI',
+        'DO', 'DUPLICATE', 'E', 'ELSE', 'ELSE-IF', 'END', 'END-CASE',
+        'END-DO', 'END-IF', 'END-PROC', 'ENDPAGE', 'ENDTABLE', 'ENTER', 'EOF',
+        'EQ', 'ERROR', 'EXIT', 'EXTERNAL', 'EZLIB', 'F1', 'F10', 'F11', 'F12',
+        'F13', 'F14', 'F15', 'F16', 'F17', 'F18', 'F19', 'F2', 'F20', 'F21',
+        'F22', 'F23', 'F24', 'F25', 'F26', 'F27', 'F28', 'F29', 'F3', 'F30',
+        'F31', 'F32', 'F33', 'F34', 'F35', 'F36', 'F4', 'F5', 'F6', 'F7',
+        'F8', 'F9', 'FETCH', 'FILE-STATUS', 'FILL', 'FINAL', 'FIRST',
+        'FIRST-DUP', 'FOR', 'GE', 'GET', 'GO', 'GOTO', 'GQ', 'GR', 'GT',
+        'HEADING', 'HEX', 'HIGH-VALUES', 'IDD', 'IDMS', 'IF', 'IN', 'INSERT',
+        'JUSTIFY', 'KANJI-DATE', 'KANJI-DATE-LONG', 'KANJI-TIME', 'KEY',
+        'KEY-PRESSED', 'KOKUGO', 'KUN', 'LAST-DUP', 'LE', 'LEVEL', 'LIKE',
+        'LINE', 'LINE-COUNT', 'LINE-NUMBER', 'LINK', 'LIST', 'LOW-VALUES',
+        'LQ', 'LS', 'LT', 'MACRO', 'MASK', 'MATCHED', 'MEND', 'MESSAGE',
+        'MOVE', 'MSTART', 'NE', 'NEWPAGE', 'NOMASK', 'NOPRINT', 'NOT',
+        'NOTE', 'NOVERIFY', 'NQ', 'NULL', 'OF', 'OR', 'OTHERWISE', 'PA1',
+        'PA2', 'PA3', 'PAGE-COUNT', 'PAGE-NUMBER', 'PARM-REGISTER',
+        'PATH-ID', 'PATTERN', 'PERFORM', 'POINT', 'POS', 'PRIMARY', 'PRINT',
+        'PROCEDURE', 'PROGRAM', 'PUT', 'READ', 'RECORD', 'RECORD-COUNT',
+        'RECORD-LENGTH', 'REFRESH', 'RELEASE', 'RENUM', 'REPEAT', 'REPORT',
+        'REPORT-INPUT', 'RESHOW', 'RESTART', 'RETRIEVE', 'RETURN-CODE',
+        'ROLLBACK', 'ROW', 'S', 'SCREEN', 'SEARCH', 'SECONDARY', 'SELECT',
+        'SEQUENCE', 'SIZE', 'SKIP', 'SOKAKU', 'SORT', 'SQL', 'STOP', 'SUM',
+        'SYSDATE', 'SYSDATE-LONG', 'SYSIN', 'SYSIPT', 'SYSLST', 'SYSPRINT',
+        'SYSSNAP', 'SYSTIME', 'TALLY', 'TERM-COLUMNS', 'TERM-NAME',
+        'TERM-ROWS', 'TERMINATION', 'TITLE', 'TO', 'TRANSFER', 'TRC',
+        'UNIQUE', 'UNTIL', 'UPDATE', 'UPPERCASE', 'USER', 'USERID', 'VALUE',
+        'VERIFY', 'W', 'WHEN', 'WHILE', 'WORK', 'WRITE', 'X', 'XDM', 'XRST'
+    ]
+
+    tokens = {
+        'root': [
+            (r'\*.*\n', Comment.Single),
+            (r'\n+', Whitespace),
+            # Macro argument
+            (r'&' + _NON_DELIMITER_OR_COMMENT_PATTERN + r'+\.', Name.Variable,
+             'after_macro_argument'),
+            # Macro call
+            (r'%' + _NON_DELIMITER_OR_COMMENT_PATTERN + r'+', Name.Variable),
+            (r'(FILE|MACRO|REPORT)(\s+)',
+             bygroups(Keyword.Declaration, Whitespace), 'after_declaration'),
+            (r'(JOB|PARM)' + r'(' + _DELIMITER_PATTERN + r')',
+             bygroups(Keyword.Declaration, Operator)),
+            (words(_KEYWORDS, suffix=_DELIMITER_PATTERN_CAPTURE),
+             bygroups(Keyword.Reserved, Operator)),
+            (_OPERATORS_PATTERN, Operator),
+            # Procedure declaration
+            (r'(' + _NON_DELIMITER_OR_COMMENT_PATTERN + r'+)(\s*)(\.?)(\s*)(PROC)(\s*\n)',
+             bygroups(Name.Function, Whitespace, Operator, Whitespace,
+                      Keyword.Declaration, Whitespace)),
+            (r'[0-9]+\.[0-9]*', Number.Float),
+            (r'[0-9]+', Number.Integer),
+            (r"'(''|[^'])*'", String),
+            (r'\s+', Whitespace),
+            # Everything else just belongs to a name
+            (_NON_DELIMITER_OR_COMMENT_PATTERN + r'+', Name),
+         ],
+        'after_declaration': [
+            (_NON_DELIMITER_OR_COMMENT_PATTERN + r'+', Name.Function),
+            default('#pop'),
+        ],
+        'after_macro_argument': [
+            (r'\*.*\n', Comment.Single, '#pop'),
+            (r'\s+', Whitespace, '#pop'),
+            (_OPERATORS_PATTERN, Operator, '#pop'),
+            (r"'(''|[^'])*'", String, '#pop'),
+            # Everything else just belongs to a name
+            (_NON_DELIMITER_OR_COMMENT_PATTERN + r'+', Name),
+        ],
+    }
+    _COMMENT_LINE_REGEX = re.compile(r'^\s*\*')
+    _MACRO_HEADER_REGEX = re.compile(r'^\s*MACRO')
+
+    def analyse_text(text):
+        """
+        Perform a structural analysis for basic Easytrieve constructs.
+        """
+        result = 0.0
+        lines = text.split('\n')
+        hasEndProc = False
+        hasHeaderComment = False
+        hasFile = False
+        hasJob = False
+        hasProc = False
+        hasParm = False
+        hasReport = False
+
+        def isCommentLine(line):
+            return EasytrieveLexer._COMMENT_LINE_REGEX.match(lines[0]) is not None
+
+        def isEmptyLine(line):
+            return not bool(line.strip())
+
+        # Remove possible empty lines and header comments.
+        while lines and (isEmptyLine(lines[0]) or isCommentLine(lines[0])):
+            if not isEmptyLine(lines[0]):
+                hasHeaderComment = True
+            del lines[0]
+
+        if EasytrieveLexer._MACRO_HEADER_REGEX.match(lines[0]):
+            # Looks like an Easytrieve macro.
+            result = 0.4
+            if hasHeaderComment:
+                result += 0.4
+        else:
+            # Scan the source for lines starting with indicators.
+            for line in lines:
+                words = line.split()
+                if (len(words) >= 2):
+                    firstWord = words[0]
+                    if not hasReport:
+                        if not hasJob:
+                            if not hasFile:
+                                if not hasParm:
+                                    if firstWord == 'PARM':
+                                        hasParm = True
+                                if firstWord == 'FILE':
+                                    hasFile = True
+                            if firstWord == 'JOB':
+                                hasJob = True
+                        elif firstWord == 'PROC':
+                            hasProc = True
+                        elif firstWord == 'END-PROC':
+                            hasEndProc = True
+                        elif firstWord == 'REPORT':
+                            hasReport = True
+
+            # Weight the findings.
+            if hasJob and (hasProc == hasEndProc):
+                if hasHeaderComment:
+                    result += 0.1
+                if hasParm:
+                    if hasProc:
+                        # Found PARM, JOB and PROC/END-PROC:
+                        # pretty sure this is Easytrieve.
+                        result += 0.8
+                    else:
+                        # Found PARAM and  JOB: probably this is Easytrieve
+                        result += 0.5
+                else:
+                    # Found JOB and possibly other keywords: might be Easytrieve
+                    result += 0.11
+                    if hasParm:
+                        # Note: PARAM is not a proper English word, so this is
+                        # regarded a much better indicator for Easytrieve than
+                        # the other words.
+                        result += 0.2
+                    if hasFile:
+                        result += 0.01
+                    if hasReport:
+                        result += 0.01
+        assert 0.0 <= result <= 1.0
+        return result
+
+
+class JclLexer(RegexLexer):
+    """
+    Job Control Language (JCL)
+    is a scripting language used on mainframe platforms to instruct the system
+    on how to run a batch job or start a subsystem. It is somewhat
+    comparable to MS DOS batch and Unix shell scripts.
+    """
+    name = 'JCL'
+    aliases = ['jcl']
+    filenames = ['*.jcl']
+    mimetypes = ['text/x-jcl']
+    url = 'https://en.wikipedia.org/wiki/Job_Control_Language'
+    version_added = '2.1'
+
+    flags = re.IGNORECASE
+
+    tokens = {
+        'root': [
+            (r'//\*.*\n', Comment.Single),
+            (r'//', Keyword.Pseudo, 'statement'),
+            (r'/\*', Keyword.Pseudo, 'jes2_statement'),
+            # TODO: JES3 statement
+            (r'.*\n', Other)  # Input text or inline code in any language.
+        ],
+        'statement': [
+            (r'\s*\n', Whitespace, '#pop'),
+            (r'([a-z]\w*)(\s+)(exec|job)(\s*)',
+             bygroups(Name.Label, Whitespace, Keyword.Reserved, Whitespace),
+             'option'),
+            (r'[a-z]\w*', Name.Variable, 'statement_command'),
+            (r'\s+', Whitespace, 'statement_command'),
+        ],
+        'statement_command': [
+            (r'\s+(command|cntl|dd|endctl|endif|else|include|jcllib|'
+             r'output|pend|proc|set|then|xmit)\s+', Keyword.Reserved, 'option'),
+            include('option')
+        ],
+        'jes2_statement': [
+            (r'\s*\n', Whitespace, '#pop'),
+            (r'\$', Keyword, 'option'),
+            (r'\b(jobparam|message|netacct|notify|output|priority|route|'
+             r'setup|signoff|xeq|xmit)\b', Keyword, 'option'),
+        ],
+        'option': [
+            # (r'\n', Text, 'root'),
+            (r'\*', Name.Builtin),
+            (r'[\[\](){}<>;,]', Punctuation),
+            (r'[-+*/=&%]', Operator),
+            (r'[a-z_]\w*', Name),
+            (r'\d+\.\d*', Number.Float),
+            (r'\.\d+', Number.Float),
+            (r'\d+', Number.Integer),
+            (r"'", String, 'option_string'),
+            (r'[ \t]+', Whitespace, 'option_comment'),
+            (r'\.', Punctuation),
+        ],
+        'option_string': [
+            (r"(\n)(//)", bygroups(Text, Keyword.Pseudo)),
+            (r"''", String),
+            (r"[^']", String),
+            (r"'", String, '#pop'),
+        ],
+        'option_comment': [
+            # (r'\n', Text, 'root'),
+            (r'.+', Comment.Single),
+        ]
+    }
+
+    _JOB_HEADER_PATTERN = re.compile(r'^//[a-z#$@][a-z0-9#$@]{0,7}\s+job(\s+.*)?$',
+                                     re.IGNORECASE)
+
+    def analyse_text(text):
+        """
+        Recognize JCL job by header.
+        """
+        result = 0.0
+        lines = text.split('\n')
+        if len(lines) > 0:
+            if JclLexer._JOB_HEADER_PATTERN.match(lines[0]):
+                result = 1.0
+        assert 0.0 <= result <= 1.0
+        return result
+
+
+class MiniScriptLexer(RegexLexer):
+    """
+    For MiniScript source code.
+    """
+
+    name = 'MiniScript'
+    url = 'https://miniscript.org'
+    aliases = ['miniscript', 'ms']
+    filenames = ['*.ms']
+    mimetypes = ['text/x-minicript', 'application/x-miniscript']
+    version_added = '2.6'
+
+    tokens = {
+        'root': [
+            (r'#!(.*?)$', Comment.Preproc),
+            default('base'),
+        ],
+        'base': [
+            ('//.*$', Comment.Single),
+            (r'(?i)(\d*\.\d+|\d+\.\d*)(e[+-]?\d+)?', Number),
+            (r'(?i)\d+e[+-]?\d+', Number),
+            (r'\d+', Number),
+            (r'\n', Text),
+            (r'[^\S\n]+', Text),
+            (r'"', String, 'string_double'),
+            (r'(==|!=|<=|>=|[=+\-*/%^<>.:])', Operator),
+            (r'[;,\[\]{}()]', Punctuation),
+            (words((
+                'break', 'continue', 'else', 'end', 'for', 'function', 'if',
+                'in', 'isa', 'then', 'repeat', 'return', 'while'), suffix=r'\b'),
+             Keyword),
+            (words((
+                'abs', 'acos', 'asin', 'atan', 'ceil', 'char', 'cos', 'floor',
+                'log', 'round', 'rnd', 'pi', 'sign', 'sin', 'sqrt', 'str', 'tan',
+                'hasIndex', 'indexOf', 'len', 'val', 'code', 'remove', 'lower',
+                'upper', 'replace', 'split', 'indexes', 'values', 'join', 'sum',
+                'sort', 'shuffle', 'push', 'pop', 'pull', 'range',
+                'print', 'input', 'time', 'wait', 'locals', 'globals', 'outer',
+                'yield'), suffix=r'\b'),
+             Name.Builtin),
+            (r'(true|false|null)\b', Keyword.Constant),
+            (r'(and|or|not|new)\b', Operator.Word),
+            (r'(self|super|__isa)\b', Name.Builtin.Pseudo),
+            (r'[a-zA-Z_]\w*', Name.Variable)
+        ],
+        'string_double': [
+            (r'[^"\n]+', String),
+            (r'""', String),
+            (r'"', String, '#pop'),
+            (r'\n', Text, '#pop'),  # Stray linefeed also terminates strings.
+        ]
+    }
diff --git a/local_packages/pygments/lexers/sgf.py b/local_packages/pygments/lexers/sgf.py
new file mode 100644
index 0000000..6d7c304
--- /dev/null
+++ b/local_packages/pygments/lexers/sgf.py
@@ -0,0 +1,59 @@
+"""
+    pygments.lexers.sgf
+    ~~~~~~~~~~~~~~~~~~~
+
+    Lexer for Smart Game Format (sgf) file format.
+
+    :copyright: Copyright 2006-present by the Pygments team, see AUTHORS.
+    :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, bygroups
+from pygments.token import Name, Literal, String, Punctuation, Whitespace
+
+__all__ = ["SmartGameFormatLexer"]
+
+
+class SmartGameFormatLexer(RegexLexer):
+    """
+    Lexer for Smart Game Format (sgf) file format.
+
+    The format is used to store game records of board games for two players
+    (mainly Go game).
+    """
+    name = 'SmartGameFormat'
+    url = 'https://www.red-bean.com/sgf/'
+    aliases = ['sgf']
+    filenames = ['*.sgf']
+    version_added = '2.4'
+
+    tokens = {
+        'root': [
+            (r'[():;]+', Punctuation),
+            # tokens:
+            (r'(A[BW]|AE|AN|AP|AR|AS|[BW]L|BM|[BW]R|[BW]S|[BW]T|CA|CH|CP|CR|'
+             r'DD|DM|DO|DT|EL|EV|EX|FF|FG|G[BW]|GC|GM|GN|HA|HO|ID|IP|IT|IY|KM|'
+             r'KO|LB|LN|LT|L|MA|MN|M|N|OB|OM|ON|OP|OT|OV|P[BW]|PC|PL|PM|RE|RG|'
+             r'RO|RU|SO|SC|SE|SI|SL|SO|SQ|ST|SU|SZ|T[BW]|TC|TE|TM|TR|UC|US|VW|'
+             r'V|[BW]|C)',
+             Name.Builtin),
+            # number:
+            (r'(\[)([0-9.]+)(\])',
+             bygroups(Punctuation, Literal.Number, Punctuation)),
+            # date:
+            (r'(\[)([0-9]{4}-[0-9]{2}-[0-9]{2})(\])',
+             bygroups(Punctuation, Literal.Date, Punctuation)),
+            # point:
+            (r'(\[)([a-z]{2})(\])',
+             bygroups(Punctuation, String, Punctuation)),
+            # double points:
+            (r'(\[)([a-z]{2})(:)([a-z]{2})(\])',
+             bygroups(Punctuation, String, Punctuation, String, Punctuation)),
+
+            (r'(\[)([\w\s#()+,\-.:?]+)(\])',
+             bygroups(Punctuation, String, Punctuation)),
+            (r'(\[)(\s.*)(\])',
+             bygroups(Punctuation, Whitespace, Punctuation)),
+            (r'\s+', Whitespace)
+        ],
+    }
diff --git a/local_packages/pygments/lexers/shell.py b/local_packages/pygments/lexers/shell.py
new file mode 100644
index 0000000..c1ce07f
--- /dev/null
+++ b/local_packages/pygments/lexers/shell.py
@@ -0,0 +1,902 @@
+"""
+    pygments.lexers.shell
+    ~~~~~~~~~~~~~~~~~~~~~
+
+    Lexers for various shells.
+
+    :copyright: Copyright 2006-present by the Pygments team, see AUTHORS.
+    :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pygments.lexer import Lexer, RegexLexer, do_insertions, bygroups, \
+    include, default, this, using, words, line_re
+from pygments.token import Punctuation, Whitespace, \
+    Text, Comment, Operator, Keyword, Name, String, Number, Generic
+from pygments.util import shebang_matches
+
+__all__ = ['BashLexer', 'BashSessionLexer', 'TcshLexer', 'BatchLexer',
+           'SlurmBashLexer', 'MSDOSSessionLexer', 'PowerShellLexer',
+           'PowerShellSessionLexer', 'TcshSessionLexer', 'FishShellLexer',
+           'ExeclineLexer']
+
+
+class BashLexer(RegexLexer):
+    """
+    Lexer for (ba|k|z|)sh shell scripts.
+    """
+
+    name = 'Bash'
+    aliases = ['bash', 'sh', 'ksh', 'zsh', 'shell', 'openrc']
+    filenames = ['*.sh', '*.ksh', '*.bash', '*.ebuild', '*.eclass',
+                 '*.exheres-0', '*.exlib', '*.zsh',
+                 '.bashrc', 'bashrc', '.bash_*', 'bash_*', 'zshrc', '.zshrc',
+                 '.kshrc', 'kshrc',
+                 'PKGBUILD']
+    mimetypes = ['application/x-sh', 'application/x-shellscript', 'text/x-shellscript']
+    url = 'https://en.wikipedia.org/wiki/Unix_shell'
+    version_added = '0.6'
+
+    tokens = {
+        'root': [
+            include('basic'),
+            (r'`', String.Backtick, 'backticks'),
+            include('data'),
+            include('interp'),
+        ],
+        'interp': [
+            (r'\$\(\(', Keyword, 'math'),
+            (r'\$\(', Keyword, 'paren'),
+            (r'\$\{#?', String.Interpol, 'curly'),
+            (r'\$[a-zA-Z_]\w*', Name.Variable),  # user variable
+            (r'\$(?:\d+|[#$?!_*@-])', Name.Variable),      # builtin
+            (r'\$', Text),
+        ],
+        'basic': [
+            (r'\b(if|fi|else|while|in|do|done|for|then|return|function|case|'
+             r'select|break|continue|until|esac|elif)(\s*)\b',
+             bygroups(Keyword, Whitespace)),
+            (r'\b(alias|bg|bind|builtin|caller|cd|command|compgen|'
+             r'complete|declare|dirs|disown|echo|enable|eval|exec|exit|'
+             r'export|false|fc|fg|getopts|hash|help|history|jobs|kill|let|'
+             r'local|logout|popd|printf|pushd|pwd|read|readonly|set|shift|'
+             r'shopt|source|suspend|test|time|times|trap|true|type|typeset|'
+             r'ulimit|umask|unalias|unset|wait)(?=[\s)`])',
+             Name.Builtin),
+            (r'\A#!.+\n', Comment.Hashbang),
+            (r'#.*\n', Comment.Single),
+            (r'\\[\w\W]', String.Escape),
+            (r'(\b\w+)(\s*)(\+?=)', bygroups(Name.Variable, Whitespace, Operator)),
+            (r'[\[\]{}()=]', Operator),
+            (r'<<<', Operator),  # here-string
+            (r'<<-?\s*(\'?)\\?(\w+)[\w\W]+?\2', String),
+            (r'&&|\|\|', Operator),
+        ],
+        'data': [
+            (r'(?s)\$?"(\\.|[^"\\$])*"', String.Double),
+            (r'"', String.Double, 'string'),
+            (r"(?s)\$'(\\\\|\\[0-7]+|\\.|[^'\\])*'", String.Single),
+            (r"(?s)'.*?'", String.Single),
+            (r';', Punctuation),
+            (r'&', Punctuation),
+            (r'\|', Punctuation),
+            (r'\s+', Whitespace),
+            (r'\d+\b', Number),
+            (r'[^=\s\[\]{}()$"\'`\\<&|;]+', Text),
+            (r'<', Text),
+        ],
+        'string': [
+            (r'"', String.Double, '#pop'),
+            (r'(?s)(\\\\|\\[0-7]+|\\.|[^"\\$])+', String.Double),
+            include('interp'),
+        ],
+        'curly': [
+            (r'\}', String.Interpol, '#pop'),
+            (r':-', Keyword),
+            (r'\w+', Name.Variable),
+            (r'[^}:"\'`$\\]+', Punctuation),
+            (r':', Punctuation),
+            include('root'),
+        ],
+        'paren': [
+            (r'\)', Keyword, '#pop'),
+            include('root'),
+        ],
+        'math': [
+            (r'\)\)', Keyword, '#pop'),
+            (r'\*\*|\|\||<<|>>|[-+*/%^|&<>]', Operator),
+            (r'\d+#[\da-zA-Z]+', Number),
+            (r'\d+#(?! )', Number),
+            (r'0[xX][\da-fA-F]+', Number),
+            (r'\d+', Number),
+            (r'[a-zA-Z_]\w*', Name.Variable),  # user variable
+            include('root'),
+        ],
+        'backticks': [
+            (r'`', String.Backtick, '#pop'),
+            include('root'),
+        ],
+    }
+
+    def analyse_text(text):
+        if shebang_matches(text, r'(ba|z|)sh'):
+            return 1
+        if text.startswith('$ '):
+            return 0.2
+
+
+class SlurmBashLexer(BashLexer):
+    """
+    Lexer for (ba|k|z|)sh Slurm scripts.
+    """
+
+    name = 'Slurm'
+    aliases = ['slurm', 'sbatch']
+    filenames = ['*.sl']
+    mimetypes = []
+    version_added = '2.4'
+    EXTRA_KEYWORDS = {'srun'}
+
+    def get_tokens_unprocessed(self, text):
+        for index, token, value in BashLexer.get_tokens_unprocessed(self, text):
+            if token is Text and value in self.EXTRA_KEYWORDS:
+                yield index, Name.Builtin, value
+            elif token is Comment.Single and 'SBATCH' in value:
+                yield index, Keyword.Pseudo, value
+            else:
+                yield index, token, value
+
+
+class ShellSessionBaseLexer(Lexer):
+    """
+    Base lexer for shell sessions.
+
+    .. versionadded:: 2.1
+    """
+
+    _bare_continuation = False
+    _venv = re.compile(r'^(\([^)]*\))(\s*)')
+
+    def get_tokens_unprocessed(self, text):
+        innerlexer = self._innerLexerCls(**self.options)
+
+        pos = 0
+        curcode = ''
+        insertions = []
+        backslash_continuation = False
+
+        for match in line_re.finditer(text):
+            line = match.group()
+
+            venv_match = self._venv.match(line)
+            if venv_match:
+                venv = venv_match.group(1)
+                venv_whitespace = venv_match.group(2)
+                insertions.append((len(curcode),
+                                   [(0, Generic.Prompt.VirtualEnv, venv)]))
+                if venv_whitespace:
+                    insertions.append((len(curcode),
+                                       [(0, Text, venv_whitespace)]))
+                line = line[venv_match.end():]
+
+            m = self._ps1rgx.match(line)
+            if m:
+                # To support output lexers (say diff output), the output
+                # needs to be broken by prompts whenever the output lexer
+                # changes.
+                if not insertions:
+                    pos = match.start()
+
+                insertions.append((len(curcode),
+                                   [(0, Generic.Prompt, m.group(1))]))
+                curcode += m.group(2)
+                backslash_continuation = curcode.endswith('\\\n')
+            elif backslash_continuation:
+                if line.startswith(self._ps2):
+                    insertions.append((len(curcode),
+                                       [(0, Generic.Prompt,
+                                         line[:len(self._ps2)])]))
+                    curcode += line[len(self._ps2):]
+                else:
+                    curcode += line
+                backslash_continuation = curcode.endswith('\\\n')
+            elif self._bare_continuation and line.startswith(self._ps2):
+                insertions.append((len(curcode),
+                                   [(0, Generic.Prompt,
+                                     line[:len(self._ps2)])]))
+                curcode += line[len(self._ps2):]
+            else:
+                if insertions:
+                    toks = innerlexer.get_tokens_unprocessed(curcode)
+                    for i, t, v in do_insertions(insertions, toks):
+                        yield pos+i, t, v
+                yield match.start(), Generic.Output, line
+                insertions = []
+                curcode = ''
+        if insertions:
+            for i, t, v in do_insertions(insertions,
+                                         innerlexer.get_tokens_unprocessed(curcode)):
+                yield pos+i, t, v
+
+
+class BashSessionLexer(ShellSessionBaseLexer):
+    """
+    Lexer for Bash shell sessions, i.e. command lines, including a
+    prompt, interspersed with output.
+    """
+
+    name = 'Bash Session'
+    aliases = ['console', 'shell-session']
+    filenames = ['*.sh-session', '*.shell-session']
+    mimetypes = ['application/x-shell-session', 'application/x-sh-session']
+    url = 'https://en.wikipedia.org/wiki/Unix_shell'
+    version_added = '1.1'
+    _example = "console/example.sh-session"
+
+    _innerLexerCls = BashLexer
+    _ps1rgx = re.compile(
+        r'^((?:(?:\[.*?\])|(?:\(\S+\))?(?:| |sh\S*?|\w+\S+[@:]\S+(?:\s+\S+)' \
+        r'?|\[\S+[@:][^\n]+\].+))\s*[$#%]\s*)(.*\n?)')
+    _ps2 = '> '
+
+
+class BatchLexer(RegexLexer):
+    """
+    Lexer for the DOS/Windows Batch file format.
+    """
+    name = 'Batchfile'
+    aliases = ['batch', 'bat', 'dosbatch', 'winbatch']
+    filenames = ['*.bat', '*.cmd']
+    mimetypes = ['application/x-dos-batch']
+    url = 'https://en.wikipedia.org/wiki/Batch_file'
+    version_added = '0.7'
+
+    flags = re.MULTILINE | re.IGNORECASE
+
+    _nl = r'\n\x1a'
+    _punct = r'&<>|'
+    _ws = r'\t\v\f\r ,;=\xa0'
+    _nlws = r'\s\x1a\xa0,;='
+    _space = rf'(?:(?:(?:\^[{_nl}])?[{_ws}])+)'
+    _keyword_terminator = (rf'(?=(?:\^[{_nl}]?)?[{_ws}+./:[\\\]]|[{_nl}{_punct}(])')
+    _token_terminator = rf'(?=\^?[{_ws}]|[{_punct}{_nl}])'
+    _start_label = rf'((?:(?<=^[^:])|^[^:]?)[{_ws}]*)(:)'
+    _label = rf'(?:(?:[^{_nlws}{_punct}+:^]|\^[{_nl}]?[\w\W])*)'
+    _label_compound = rf'(?:(?:[^{_nlws}{_punct}+:^)]|\^[{_nl}]?[^)])*)'
+    _number = rf'(?:-?(?:0[0-7]+|0x[\da-f]+|\d+){_token_terminator})'
+    _opword = r'(?:equ|geq|gtr|leq|lss|neq)'
+    _string = rf'(?:"[^{_nl}"]*(?:"|(?=[{_nl}])))'
+    _variable = (r'(?:(?:%(?:\*|(?:~[a-z]*(?:\$[^:]+:)?)?\d|'
+                 rf'[^%:{_nl}]+(?::(?:~(?:-?\d+)?(?:,(?:-?\d+)?)?|(?:[^%{_nl}^]|'
+                 rf'\^[^%{_nl}])[^={_nl}]*=(?:[^%{_nl}^]|\^[^%{_nl}])*)?)?%))|'
+                 rf'(?:\^?![^!:{_nl}]+(?::(?:~(?:-?\d+)?(?:,(?:-?\d+)?)?|(?:'
+                 rf'[^!{_nl}^]|\^[^!{_nl}])[^={_nl}]*=(?:[^!{_nl}^]|\^[^!{_nl}])*)?)?\^?!))')
+    _core_token = rf'(?:(?:(?:\^[{_nl}]?)?[^"{_nlws}{_punct}])+)'
+    _core_token_compound = rf'(?:(?:(?:\^[{_nl}]?)?[^"{_nlws}{_punct})])+)'
+    _token = rf'(?:[{_punct}]+|{_core_token})'
+    _token_compound = rf'(?:[{_punct}]+|{_core_token_compound})'
+    _stoken = (rf'(?:[{_punct}]+|(?:{_string}|{_variable}|{_core_token})+)')
+
+    def _make_begin_state(compound, _core_token=_core_token,
+                          _core_token_compound=_core_token_compound,
+                          _keyword_terminator=_keyword_terminator,
+                          _nl=_nl, _punct=_punct, _string=_string,
+                          _space=_space, _start_label=_start_label,
+                          _stoken=_stoken, _token_terminator=_token_terminator,
+                          _variable=_variable, _ws=_ws):
+        rest = '(?:{}|{}|[^"%{}{}{}])*'.format(_string, _variable, _nl, _punct,
+                                            ')' if compound else '')
+        rest_of_line = rf'(?:(?:[^{_nl}^]|\^[{_nl}]?[\w\W])*)'
+        rest_of_line_compound = rf'(?:(?:[^{_nl}^)]|\^[{_nl}]?[^)])*)'
+        set_space = rf'((?:(?:\^[{_nl}]?)?[^\S\n])*)'
+        suffix = ''
+        if compound:
+            _keyword_terminator = rf'(?:(?=\))|{_keyword_terminator})'
+            _token_terminator = rf'(?:(?=\))|{_token_terminator})'
+            suffix = '/compound'
+        return [
+            ((r'\)', Punctuation, '#pop') if compound else
+             (rf'\)((?=\()|{_token_terminator}){rest_of_line}',
+              Comment.Single)),
+            (rf'(?={_start_label})', Text, f'follow{suffix}'),
+            (_space, using(this, state='text')),
+            include(f'redirect{suffix}'),
+            (rf'[{_nl}]+', Text),
+            (r'\(', Punctuation, 'root/compound'),
+            (r'@+', Punctuation),
+            (rf'((?:for|if|rem)(?:(?=(?:\^[{_nl}]?)?/)|(?:(?!\^)|'
+             rf'(?<=m))(?:(?=\()|{_token_terminator})))({_space}?{_core_token_compound if compound else _core_token}?(?:\^[{_nl}]?)?/(?:\^[{_nl}]?)?\?)',
+             bygroups(Keyword, using(this, state='text')),
+             f'follow{suffix}'),
+            (rf'(goto{_keyword_terminator})({rest}(?:\^[{_nl}]?)?/(?:\^[{_nl}]?)?\?{rest})',
+             bygroups(Keyword, using(this, state='text')),
+             f'follow{suffix}'),
+            (words(('assoc', 'break', 'cd', 'chdir', 'cls', 'color', 'copy',
+                    'date', 'del', 'dir', 'dpath', 'echo', 'endlocal', 'erase',
+                    'exit', 'ftype', 'keys', 'md', 'mkdir', 'mklink', 'move',
+                    'path', 'pause', 'popd', 'prompt', 'pushd', 'rd', 'ren',
+                    'rename', 'rmdir', 'setlocal', 'shift', 'start', 'time',
+                    'title', 'type', 'ver', 'verify', 'vol'),
+                   suffix=_keyword_terminator), Keyword, f'follow{suffix}'),
+            (rf'(call)({_space}?)(:)',
+             bygroups(Keyword, using(this, state='text'), Punctuation),
+             f'call{suffix}'),
+            (rf'call{_keyword_terminator}', Keyword),
+            (rf'(for{_token_terminator}(?!\^))({_space})(/f{_token_terminator})',
+             bygroups(Keyword, using(this, state='text'), Keyword),
+             ('for/f', 'for')),
+            (rf'(for{_token_terminator}(?!\^))({_space})(/l{_token_terminator})',
+             bygroups(Keyword, using(this, state='text'), Keyword),
+             ('for/l', 'for')),
+            (rf'for{_token_terminator}(?!\^)', Keyword, ('for2', 'for')),
+            (rf'(goto{_keyword_terminator})({_space}?)(:?)',
+             bygroups(Keyword, using(this, state='text'), Punctuation),
+             f'label{suffix}'),
+            (rf'(if(?:(?=\()|{_token_terminator})(?!\^))({_space}?)((?:/i{_token_terminator})?)({_space}?)((?:not{_token_terminator})?)({_space}?)',
+             bygroups(Keyword, using(this, state='text'), Keyword,
+                      using(this, state='text'), Keyword,
+                      using(this, state='text')), ('(?', 'if')),
+            (rf'rem(((?=\()|{_token_terminator}){_space}?{_stoken}?.*|{_keyword_terminator}{rest_of_line_compound if compound else rest_of_line})',
+             Comment.Single, f'follow{suffix}'),
+            (rf'(set{_keyword_terminator}){set_space}(/a)',
+             bygroups(Keyword, using(this, state='text'), Keyword),
+             f'arithmetic{suffix}'),
+            (r'(set{}){}((?:/p)?){}((?:(?:(?:\^[{}]?)?[^"{}{}^={}]|'
+             r'\^[{}]?[^"=])+)?)((?:(?:\^[{}]?)?=)?)'.format(_keyword_terminator, set_space, set_space, _nl, _nl, _punct,
+              ')' if compound else '', _nl, _nl),
+             bygroups(Keyword, using(this, state='text'), Keyword,
+                      using(this, state='text'), using(this, state='variable'),
+                      Punctuation),
+             f'follow{suffix}'),
+            default(f'follow{suffix}')
+        ]
+
+    def _make_follow_state(compound, _label=_label,
+                           _label_compound=_label_compound, _nl=_nl,
+                           _space=_space, _start_label=_start_label,
+                           _token=_token, _token_compound=_token_compound,
+                           _ws=_ws):
+        suffix = '/compound' if compound else ''
+        state = []
+        if compound:
+            state.append((r'(?=\))', Text, '#pop'))
+        state += [
+            (rf'{_start_label}([{_ws}]*)({_label_compound if compound else _label})(.*)',
+             bygroups(Text, Punctuation, Text, Name.Label, Comment.Single)),
+            include(f'redirect{suffix}'),
+            (rf'(?=[{_nl}])', Text, '#pop'),
+            (r'\|\|?|&&?', Punctuation, '#pop'),
+            include('text')
+        ]
+        return state
+
+    def _make_arithmetic_state(compound, _nl=_nl, _punct=_punct,
+                               _string=_string, _variable=_variable,
+                               _ws=_ws, _nlws=_nlws):
+        op = r'=+\-*/!~'
+        state = []
+        if compound:
+            state.append((r'(?=\))', Text, '#pop'))
+        state += [
+            (r'0[0-7]+', Number.Oct),
+            (r'0x[\da-f]+', Number.Hex),
+            (r'\d+', Number.Integer),
+            (r'[(),]+', Punctuation),
+            (rf'([{op}]|%|\^\^)+', Operator),
+            (r'({}|{}|(\^[{}]?)?[^(){}%\^"{}{}]|\^[{}]?{})+'.format(_string, _variable, _nl, op, _nlws, _punct, _nlws,
+              r'[^)]' if compound else r'[\w\W]'),
+             using(this, state='variable')),
+            (r'(?=[\x00|&])', Text, '#pop'),
+            include('follow')
+        ]
+        return state
+
+    def _make_call_state(compound, _label=_label,
+                         _label_compound=_label_compound):
+        state = []
+        if compound:
+            state.append((r'(?=\))', Text, '#pop'))
+        state.append((r'(:?)(%s)' % (_label_compound if compound else _label),
+                      bygroups(Punctuation, Name.Label), '#pop'))
+        return state
+
+    def _make_label_state(compound, _label=_label,
+                          _label_compound=_label_compound, _nl=_nl,
+                          _punct=_punct, _string=_string, _variable=_variable):
+        state = []
+        if compound:
+            state.append((r'(?=\))', Text, '#pop'))
+        state.append((r'({}?)((?:{}|{}|\^[{}]?{}|[^"%^{}{}{}])*)'.format(_label_compound if compound else _label, _string,
+                       _variable, _nl, r'[^)]' if compound else r'[\w\W]', _nl,
+                       _punct, r')' if compound else ''),
+                      bygroups(Name.Label, Comment.Single), '#pop'))
+        return state
+
+    def _make_redirect_state(compound,
+                             _core_token_compound=_core_token_compound,
+                             _nl=_nl, _punct=_punct, _stoken=_stoken,
+                             _string=_string, _space=_space,
+                             _variable=_variable, _nlws=_nlws):
+        stoken_compound = (rf'(?:[{_punct}]+|(?:{_string}|{_variable}|{_core_token_compound})+)')
+        return [
+            (rf'((?:(?<=[{_nlws}])\d)?)(>>?&|<&)([{_nlws}]*)(\d)',
+             bygroups(Number.Integer, Punctuation, Text, Number.Integer)),
+            (rf'((?:(?<=[{_nlws}])(?>?|<)({_space}?{stoken_compound if compound else _stoken})',
+             bygroups(Number.Integer, Punctuation, using(this, state='text')))
+        ]
+
+    tokens = {
+        'root': _make_begin_state(False),
+        'follow': _make_follow_state(False),
+        'arithmetic': _make_arithmetic_state(False),
+        'call': _make_call_state(False),
+        'label': _make_label_state(False),
+        'redirect': _make_redirect_state(False),
+        'root/compound': _make_begin_state(True),
+        'follow/compound': _make_follow_state(True),
+        'arithmetic/compound': _make_arithmetic_state(True),
+        'call/compound': _make_call_state(True),
+        'label/compound': _make_label_state(True),
+        'redirect/compound': _make_redirect_state(True),
+        'variable-or-escape': [
+            (_variable, Name.Variable),
+            (rf'%%|\^[{_nl}]?(\^!|[\w\W])', String.Escape)
+        ],
+        'string': [
+            (r'"', String.Double, '#pop'),
+            (_variable, Name.Variable),
+            (r'\^!|%%', String.Escape),
+            (rf'[^"%^{_nl}]+|[%^]', String.Double),
+            default('#pop')
+        ],
+        'sqstring': [
+            include('variable-or-escape'),
+            (r'[^%]+|%', String.Single)
+        ],
+        'bqstring': [
+            include('variable-or-escape'),
+            (r'[^%]+|%', String.Backtick)
+        ],
+        'text': [
+            (r'"', String.Double, 'string'),
+            include('variable-or-escape'),
+            (rf'[^"%^{_nlws}{_punct}\d)]+|.', Text)
+        ],
+        'variable': [
+            (r'"', String.Double, 'string'),
+            include('variable-or-escape'),
+            (rf'[^"%^{_nl}]+|.', Name.Variable)
+        ],
+        'for': [
+            (rf'({_space})(in)({_space})(\()',
+             bygroups(using(this, state='text'), Keyword,
+                      using(this, state='text'), Punctuation), '#pop'),
+            include('follow')
+        ],
+        'for2': [
+            (r'\)', Punctuation),
+            (rf'({_space})(do{_token_terminator})',
+             bygroups(using(this, state='text'), Keyword), '#pop'),
+            (rf'[{_nl}]+', Text),
+            include('follow')
+        ],
+        'for/f': [
+            (rf'(")((?:{_variable}|[^"])*?")([{_nlws}]*)(\))',
+             bygroups(String.Double, using(this, state='string'), Text,
+                      Punctuation)),
+            (r'"', String.Double, ('#pop', 'for2', 'string')),
+            (rf"('(?:%%|{_variable}|[\w\W])*?')([{_nlws}]*)(\))",
+             bygroups(using(this, state='sqstring'), Text, Punctuation)),
+            (rf'(`(?:%%|{_variable}|[\w\W])*?`)([{_nlws}]*)(\))',
+             bygroups(using(this, state='bqstring'), Text, Punctuation)),
+            include('for2')
+        ],
+        'for/l': [
+            (r'-?\d+', Number.Integer),
+            include('for2')
+        ],
+        'if': [
+            (rf'((?:cmdextversion|errorlevel){_token_terminator})({_space})(\d+)',
+             bygroups(Keyword, using(this, state='text'),
+                      Number.Integer), '#pop'),
+            (rf'(defined{_token_terminator})({_space})({_stoken})',
+             bygroups(Keyword, using(this, state='text'),
+                      using(this, state='variable')), '#pop'),
+            (rf'(exist{_token_terminator})({_space}{_stoken})',
+             bygroups(Keyword, using(this, state='text')), '#pop'),
+            (rf'({_number}{_space})({_opword})({_space}{_number})',
+             bygroups(using(this, state='arithmetic'), Operator.Word,
+                      using(this, state='arithmetic')), '#pop'),
+            (_stoken, using(this, state='text'), ('#pop', 'if2')),
+        ],
+        'if2': [
+            (rf'({_space}?)(==)({_space}?{_stoken})',
+             bygroups(using(this, state='text'), Operator,
+                      using(this, state='text')), '#pop'),
+            (rf'({_space})({_opword})({_space}{_stoken})',
+             bygroups(using(this, state='text'), Operator.Word,
+                      using(this, state='text')), '#pop')
+        ],
+        '(?': [
+            (_space, using(this, state='text')),
+            (r'\(', Punctuation, ('#pop', 'else?', 'root/compound')),
+            default('#pop')
+        ],
+        'else?': [
+            (_space, using(this, state='text')),
+            (rf'else{_token_terminator}', Keyword, '#pop'),
+            default('#pop')
+        ]
+    }
+
+
+class MSDOSSessionLexer(ShellSessionBaseLexer):
+    """
+    Lexer for MS DOS shell sessions, i.e. command lines, including a
+    prompt, interspersed with output.
+    """
+
+    name = 'MSDOS Session'
+    aliases = ['doscon']
+    filenames = []
+    mimetypes = []
+    url = 'https://en.wikipedia.org/wiki/MS-DOS'
+    version_added = '2.1'
+    _example = "doscon/session"
+
+    _innerLexerCls = BatchLexer
+    _ps1rgx = re.compile(r'^([^>]*>)(.*\n?)')
+    _ps2 = 'More? '
+
+
+class TcshLexer(RegexLexer):
+    """
+    Lexer for tcsh scripts.
+    """
+
+    name = 'Tcsh'
+    aliases = ['tcsh', 'csh']
+    filenames = ['*.tcsh', '*.csh']
+    mimetypes = ['application/x-csh']
+    url = 'https://www.tcsh.org'
+    version_added = '0.10'
+
+    tokens = {
+        'root': [
+            include('basic'),
+            (r'\$\(', Keyword, 'paren'),
+            (r'\$\{#?', Keyword, 'curly'),
+            (r'`', String.Backtick, 'backticks'),
+            include('data'),
+        ],
+        'basic': [
+            (r'\b(if|endif|else|while|then|foreach|case|default|'
+             r'break|continue|goto|breaksw|end|switch|endsw)\s*\b',
+             Keyword),
+            (r'\b(alias|alloc|bg|bindkey|builtins|bye|caller|cd|chdir|'
+             r'complete|dirs|echo|echotc|eval|exec|exit|fg|filetest|getxvers|'
+             r'glob|getspath|hashstat|history|hup|inlib|jobs|kill|'
+             r'limit|log|login|logout|ls-F|migrate|newgrp|nice|nohup|notify|'
+             r'onintr|popd|printenv|pushd|rehash|repeat|rootnode|popd|pushd|'
+             r'set|shift|sched|setenv|setpath|settc|setty|setxvers|shift|'
+             r'source|stop|suspend|source|suspend|telltc|time|'
+             r'umask|unalias|uncomplete|unhash|universe|unlimit|unset|unsetenv|'
+             r'ver|wait|warp|watchlog|where|which)\s*\b',
+             Name.Builtin),
+            (r'#.*', Comment),
+            (r'\\[\w\W]', String.Escape),
+            (r'(\b\w+)(\s*)(=)', bygroups(Name.Variable, Text, Operator)),
+            (r'[\[\]{}()=]+', Operator),
+            (r'<<\s*(\'?)\\?(\w+)[\w\W]+?\2', String),
+            (r';', Punctuation),
+        ],
+        'data': [
+            (r'(?s)"(\\\\|\\[0-7]+|\\.|[^"\\])*"', String.Double),
+            (r"(?s)'(\\\\|\\[0-7]+|\\.|[^'\\])*'", String.Single),
+            (r'\s+', Text),
+            (r'[^=\s\[\]{}()$"\'`\\;#]+', Text),
+            (r'\d+(?= |\Z)', Number),
+            (r'\$#?(\w+|.)', Name.Variable),
+        ],
+        'curly': [
+            (r'\}', Keyword, '#pop'),
+            (r':-', Keyword),
+            (r'\w+', Name.Variable),
+            (r'[^}:"\'`$]+', Punctuation),
+            (r':', Punctuation),
+            include('root'),
+        ],
+        'paren': [
+            (r'\)', Keyword, '#pop'),
+            include('root'),
+        ],
+        'backticks': [
+            (r'`', String.Backtick, '#pop'),
+            include('root'),
+        ],
+    }
+
+
+class TcshSessionLexer(ShellSessionBaseLexer):
+    """
+    Lexer for Tcsh sessions, i.e. command lines, including a
+    prompt, interspersed with output.
+    """
+
+    name = 'Tcsh Session'
+    aliases = ['tcshcon']
+    filenames = []
+    mimetypes = []
+    url = 'https://www.tcsh.org'
+    version_added = '2.1'
+    _example = "tcshcon/session"
+
+    _innerLexerCls = TcshLexer
+    _ps1rgx = re.compile(r'^([^>]+>)(.*\n?)')
+    _ps2 = '? '
+
+
+class PowerShellLexer(RegexLexer):
+    """
+    For Windows PowerShell code.
+    """
+    name = 'PowerShell'
+    aliases = ['powershell', 'pwsh', 'posh', 'ps1', 'psm1']
+    filenames = ['*.ps1', '*.psm1']
+    mimetypes = ['text/x-powershell']
+    url = 'https://learn.microsoft.com/en-us/powershell'
+    version_added = '1.5'
+
+    flags = re.DOTALL | re.IGNORECASE | re.MULTILINE
+
+    keywords = (
+        'while validateset validaterange validatepattern validatelength '
+        'validatecount until trap switch return ref process param parameter in '
+        'if global: local: function foreach for finally filter end elseif else '
+        'dynamicparam do default continue cmdletbinding break begin alias \\? '
+        '% #script #private #local #global mandatory parametersetname position '
+        'valuefrompipeline valuefrompipelinebypropertyname '
+        'valuefromremainingarguments helpmessage try catch throw').split()
+
+    operators = (
+        'and as band bnot bor bxor casesensitive ccontains ceq cge cgt cle '
+        'clike clt cmatch cne cnotcontains cnotlike cnotmatch contains '
+        'creplace eq exact f file ge gt icontains ieq ige igt ile ilike ilt '
+        'imatch ine inotcontains inotlike inotmatch ireplace is isnot le like '
+        'lt match ne not notcontains notlike notmatch or regex replace '
+        'wildcard').split()
+
+    verbs = (
+        'write where watch wait use update unregister unpublish unprotect '
+        'unlock uninstall undo unblock trace test tee take sync switch '
+        'suspend submit stop step start split sort skip show set send select '
+        'search scroll save revoke resume restore restart resolve resize '
+        'reset request repair rename remove register redo receive read push '
+        'publish protect pop ping out optimize open new move mount merge '
+        'measure lock limit join invoke install initialize import hide group '
+        'grant get format foreach find export expand exit enter enable edit '
+        'dismount disconnect disable deny debug cxnew copy convertto '
+        'convertfrom convert connect confirm compress complete compare close '
+        'clear checkpoint block backup assert approve aggregate add').split()
+
+    aliases_ = (
+        'ac asnp cat cd cfs chdir clc clear clhy cli clp cls clv cnsn '
+        'compare copy cp cpi cpp curl cvpa dbp del diff dir dnsn ebp echo epal '
+        'epcsv epsn erase etsn exsn fc fhx fl foreach ft fw gal gbp gc gci gcm '
+        'gcs gdr ghy gi gjb gl gm gmo gp gps gpv group gsn gsnp gsv gu gv gwmi '
+        'h history icm iex ihy ii ipal ipcsv ipmo ipsn irm ise iwmi iwr kill lp '
+        'ls man md measure mi mount move mp mv nal ndr ni nmo npssc nsn nv ogv '
+        'oh popd ps pushd pwd r rbp rcjb rcsn rd rdr ren ri rjb rm rmdir rmo '
+        'rni rnp rp rsn rsnp rujb rv rvpa rwmi sajb sal saps sasv sbp sc select '
+        'set shcm si sl sleep sls sort sp spjb spps spsv start sujb sv swmi tee '
+        'trcm type wget where wjb write').split()
+
+    commenthelp = (
+        'component description example externalhelp forwardhelpcategory '
+        'forwardhelptargetname functionality inputs link '
+        'notes outputs parameter remotehelprunspace role synopsis').split()
+
+    tokens = {
+        'root': [
+            # we need to count pairs of parentheses for correct highlight
+            # of '$(...)' blocks in strings
+            (r'\(', Punctuation, 'child'),
+            (r'\s+', Text),
+            (r'^(\s*#[#\s]*)(\.(?:{}))([^\n]*$)'.format('|'.join(commenthelp)),
+             bygroups(Comment, String.Doc, Comment)),
+            (r'#[^\n]*?$', Comment),
+            (r'(<|<)#', Comment.Multiline, 'multline'),
+            (r'@"\n', String.Heredoc, 'heredoc-double'),
+            (r"@'\n.*?\n'@", String.Heredoc),
+            # escaped syntax
+            (r'`[\'"$@-]', Punctuation),
+            (r'"', String.Double, 'string'),
+            (r"'([^']|'')*'", String.Single),
+            (r'(\$|@@|@)((global|script|private|env):)?\w+',
+             Name.Variable),
+            (r'({})\b'.format('|'.join(keywords)), Keyword),
+            (r'-({})\b'.format('|'.join(operators)), Operator),
+            (r'({})-[a-z_]\w*\b'.format('|'.join(verbs)), Name.Builtin),
+            (r'({})\s'.format('|'.join(aliases_)), Name.Builtin),
+            (r'\[[a-z_\[][\w. `,\[\]]*\]', Name.Constant),  # .net [type]s
+            (r'-[a-z_]\w*', Name),
+            (r'\w+', Name),
+            (r'[.,;:@{}\[\]$()=+*/\\&%!~?^`|<>-]', Punctuation),
+        ],
+        'child': [
+            (r'\)', Punctuation, '#pop'),
+            include('root'),
+        ],
+        'multline': [
+            (r'[^#&.]+', Comment.Multiline),
+            (r'#(>|>)', Comment.Multiline, '#pop'),
+            (r'\.({})'.format('|'.join(commenthelp)), String.Doc),
+            (r'[#&.]', Comment.Multiline),
+        ],
+        'string': [
+            (r"`[0abfnrtv'\"$`]", String.Escape),
+            (r'[^$`"]+', String.Double),
+            (r'\$\(', Punctuation, 'child'),
+            (r'""', String.Double),
+            (r'[`$]', String.Double),
+            (r'"', String.Double, '#pop'),
+        ],
+        'heredoc-double': [
+            (r'\n"@', String.Heredoc, '#pop'),
+            (r'\$\(', Punctuation, 'child'),
+            (r'[^@\n]+"]', String.Heredoc),
+            (r".", String.Heredoc),
+        ]
+    }
+
+
+class PowerShellSessionLexer(ShellSessionBaseLexer):
+    """
+    Lexer for PowerShell sessions, i.e. command lines, including a
+    prompt, interspersed with output.
+    """
+
+    name = 'PowerShell Session'
+    aliases = ['pwsh-session', 'ps1con']
+    filenames = []
+    mimetypes = []
+    url = 'https://learn.microsoft.com/en-us/powershell'
+    version_added = '2.1'
+    _example = "pwsh-session/session"
+
+    _innerLexerCls = PowerShellLexer
+    _bare_continuation = True
+    _ps1rgx = re.compile(r'^((?:\[[^]]+\]: )?PS[^>]*> ?)(.*\n?)')
+    _ps2 = '> '
+
+
+class FishShellLexer(RegexLexer):
+    """
+    Lexer for Fish shell scripts.
+    """
+
+    name = 'Fish'
+    aliases = ['fish', 'fishshell']
+    filenames = ['*.fish', '*.load']
+    mimetypes = ['application/x-fish']
+    url = 'https://fishshell.com'
+    version_added = '2.1'
+
+    tokens = {
+        'root': [
+            include('basic'),
+            include('data'),
+            include('interp'),
+        ],
+        'interp': [
+            (r'\$\(\(', Keyword, 'math'),
+            (r'\(', Keyword, 'paren'),
+            (r'\$#?(\w+|.)', Name.Variable),
+        ],
+        'basic': [
+            (r'\b(begin|end|if|else|while|break|for|in|return|function|block|'
+             r'case|continue|switch|not|and|or|set|echo|exit|pwd|true|false|'
+             r'cd|count|test)(\s*)\b',
+             bygroups(Keyword, Text)),
+            (r'\b(alias|bg|bind|breakpoint|builtin|command|commandline|'
+             r'complete|contains|dirh|dirs|emit|eval|exec|fg|fish|fish_config|'
+             r'fish_indent|fish_pager|fish_prompt|fish_right_prompt|'
+             r'fish_update_completions|fishd|funced|funcsave|functions|help|'
+             r'history|isatty|jobs|math|mimedb|nextd|open|popd|prevd|psub|'
+             r'pushd|random|read|set_color|source|status|trap|type|ulimit|'
+             r'umask|vared|fc|getopts|hash|kill|printf|time|wait)\s*\b(?!\.)',
+             Name.Builtin),
+            (r'#.*\n', Comment),
+            (r'\\[\w\W]', String.Escape),
+            (r'(\b\w+)(\s*)(=)', bygroups(Name.Variable, Whitespace, Operator)),
+            (r'[\[\]()=]', Operator),
+            (r'<<-?\s*(\'?)\\?(\w+)[\w\W]+?\2', String),
+        ],
+        'data': [
+            (r'(?s)\$?"(\\\\|\\[0-7]+|\\.|[^"\\$])*"', String.Double),
+            (r'"', String.Double, 'string'),
+            (r"(?s)\$'(\\\\|\\[0-7]+|\\.|[^'\\])*'", String.Single),
+            (r"(?s)'.*?'", String.Single),
+            (r';', Punctuation),
+            (r'&|\||\^|<|>', Operator),
+            (r'\s+', Text),
+            (r'\d+(?= |\Z)', Number),
+            (r'[^=\s\[\]{}()$"\'`\\<&|;]+', Text),
+        ],
+        'string': [
+            (r'"', String.Double, '#pop'),
+            (r'(?s)(\\\\|\\[0-7]+|\\.|[^"\\$])+', String.Double),
+            include('interp'),
+        ],
+        'paren': [
+            (r'\)', Keyword, '#pop'),
+            include('root'),
+        ],
+        'math': [
+            (r'\)\)', Keyword, '#pop'),
+            (r'[-+*/%^|&]|\*\*|\|\|', Operator),
+            (r'\d+#\d+', Number),
+            (r'\d+#(?! )', Number),
+            (r'\d+', Number),
+            include('root'),
+        ],
+    }
+
+class ExeclineLexer(RegexLexer):
+    """
+    Lexer for Laurent Bercot's execline language.
+    """
+
+    name = 'execline'
+    aliases = ['execline']
+    filenames = ['*.exec']
+    url = 'https://skarnet.org/software/execline'
+    version_added = '2.7'
+
+    tokens = {
+        'root': [
+            include('basic'),
+            include('data'),
+            include('interp')
+        ],
+        'interp': [
+            (r'\$\{', String.Interpol, 'curly'),
+            (r'\$[\w@#]+', Name.Variable),  # user variable
+            (r'\$', Text),
+        ],
+        'basic': [
+            (r'\b(background|backtick|cd|define|dollarat|elgetopt|'
+             r'elgetpositionals|elglob|emptyenv|envfile|exec|execlineb|'
+             r'exit|export|fdblock|fdclose|fdmove|fdreserve|fdswap|'
+             r'forbacktickx|foreground|forstdin|forx|getcwd|getpid|heredoc|'
+             r'homeof|if|ifelse|ifte|ifthenelse|importas|loopwhilex|'
+             r'multidefine|multisubstitute|pipeline|piperw|posix-cd|'
+             r'redirfd|runblock|shift|trap|tryexec|umask|unexport|wait|'
+             r'withstdinas)\b', Name.Builtin),
+            (r'\A#!.+\n', Comment.Hashbang),
+            (r'#.*\n', Comment.Single),
+            (r'[{}]', Operator)
+        ],
+        'data': [
+            (r'(?s)"(\\.|[^"\\$])*"', String.Double),
+            (r'"', String.Double, 'string'),
+            (r'\s+', Text),
+            (r'[^\s{}$"\\]+', Text)
+        ],
+        'string': [
+            (r'"', String.Double, '#pop'),
+            (r'(?s)(\\\\|\\.|[^"\\$])+', String.Double),
+            include('interp'),
+        ],
+        'curly': [
+            (r'\}', String.Interpol, '#pop'),
+            (r'[\w#@]+', Name.Variable),
+            include('root')
+        ]
+
+    }
+
+    def analyse_text(text):
+        if shebang_matches(text, r'execlineb'):
+            return 1
diff --git a/local_packages/pygments/lexers/sieve.py b/local_packages/pygments/lexers/sieve.py
new file mode 100644
index 0000000..4bda351
--- /dev/null
+++ b/local_packages/pygments/lexers/sieve.py
@@ -0,0 +1,78 @@
+"""
+    pygments.lexers.sieve
+    ~~~~~~~~~~~~~~~~~~~~~
+
+    Lexer for Sieve file format.
+
+    https://tools.ietf.org/html/rfc5228
+    https://tools.ietf.org/html/rfc5173
+    https://tools.ietf.org/html/rfc5229
+    https://tools.ietf.org/html/rfc5230
+    https://tools.ietf.org/html/rfc5232
+    https://tools.ietf.org/html/rfc5235
+    https://tools.ietf.org/html/rfc5429
+    https://tools.ietf.org/html/rfc8580
+
+    :copyright: Copyright 2006-present by the Pygments team, see AUTHORS.
+    :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, bygroups
+from pygments.token import Comment, Name, Literal, String, Text, Punctuation, \
+    Keyword
+
+__all__ = ["SieveLexer"]
+
+
+class SieveLexer(RegexLexer):
+    """
+    Lexer for sieve format.
+    """
+    name = 'Sieve'
+    filenames = ['*.siv', '*.sieve']
+    aliases = ['sieve']
+    url = 'https://en.wikipedia.org/wiki/Sieve_(mail_filtering_language)'
+    version_added = '2.6'
+
+    tokens = {
+        'root': [
+            (r'\s+', Text),
+            (r'[();,{}\[\]]', Punctuation),
+            # import:
+            (r'(?i)require',
+             Keyword.Namespace),
+            # tags:
+            (r'(?i)(:)(addresses|all|contains|content|create|copy|comparator|'
+             r'count|days|detail|domain|fcc|flags|from|handle|importance|is|'
+             r'localpart|length|lowerfirst|lower|matches|message|mime|options|'
+             r'over|percent|quotewildcard|raw|regex|specialuse|subject|text|'
+             r'under|upperfirst|upper|value)',
+             bygroups(Name.Tag, Name.Tag)),
+            # tokens:
+            (r'(?i)(address|addflag|allof|anyof|body|discard|elsif|else|envelope|'
+             r'ereject|exists|false|fileinto|if|hasflag|header|keep|'
+             r'notify_method_capability|notify|not|redirect|reject|removeflag|'
+             r'setflag|size|spamtest|stop|string|true|vacation|virustest)',
+             Name.Builtin),
+            (r'(?i)set',
+             Keyword.Declaration),
+            # number:
+            (r'([0-9.]+)([kmgKMG])?',
+             bygroups(Literal.Number, Literal.Number)),
+            # comment:
+            (r'#.*$',
+             Comment.Single),
+            (r'/\*.*\*/',
+             Comment.Multiline),
+            # string:
+            (r'"[^"]*?"',
+             String),
+            # text block:
+            (r'text:',
+             Name.Tag, 'text'),
+        ],
+        'text': [
+            (r'[^.].*?\n', String),
+            (r'^\.', Punctuation, "#pop"),
+        ]
+    }
diff --git a/local_packages/pygments/lexers/slash.py b/local_packages/pygments/lexers/slash.py
new file mode 100644
index 0000000..2bcbdf8
--- /dev/null
+++ b/local_packages/pygments/lexers/slash.py
@@ -0,0 +1,183 @@
+"""
+    pygments.lexers.slash
+    ~~~~~~~~~~~~~~~~~~~~~
+
+    Lexer for the Slash programming language.
+
+    :copyright: Copyright 2006-present by the Pygments team, see AUTHORS.
+    :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import ExtendedRegexLexer, bygroups, DelegatingLexer
+from pygments.token import Name, Number, String, Comment, Punctuation, \
+    Other, Keyword, Operator, Whitespace
+
+__all__ = ['SlashLexer']
+
+
+class SlashLanguageLexer(ExtendedRegexLexer):
+    _nkw = r'(?=[^a-zA-Z_0-9])'
+
+    def move_state(new_state):
+        return ("#pop", new_state)
+
+    def right_angle_bracket(lexer, match, ctx):
+        if len(ctx.stack) > 1 and ctx.stack[-2] == "string":
+            ctx.stack.pop()
+        yield match.start(), String.Interpol, '}'
+        ctx.pos = match.end()
+        pass
+
+    tokens = {
+        "root": [
+            (r"<%=",        Comment.Preproc,    move_state("slash")),
+            (r"<%!!",       Comment.Preproc,    move_state("slash")),
+            (r"<%#.*?%>",   Comment.Multiline),
+            (r"<%",         Comment.Preproc,    move_state("slash")),
+            (r".|\n",       Other),
+        ],
+        "string": [
+            (r"\\",         String.Escape,      move_state("string_e")),
+            (r"\"",         String,             move_state("slash")),
+            (r"#\{",        String.Interpol,    "slash"),
+            (r'.|\n',       String),
+        ],
+        "string_e": [
+            (r'n',                  String.Escape,      move_state("string")),
+            (r't',                  String.Escape,      move_state("string")),
+            (r'r',                  String.Escape,      move_state("string")),
+            (r'e',                  String.Escape,      move_state("string")),
+            (r'x[a-fA-F0-9]{2}',    String.Escape,      move_state("string")),
+            (r'.',                  String.Escape,      move_state("string")),
+        ],
+        "regexp": [
+            (r'}[a-z]*',            String.Regex,       move_state("slash")),
+            (r'\\(.|\n)',           String.Regex),
+            (r'{',                  String.Regex,       "regexp_r"),
+            (r'.|\n',               String.Regex),
+        ],
+        "regexp_r": [
+            (r'}[a-z]*',            String.Regex,       "#pop"),
+            (r'\\(.|\n)',           String.Regex),
+            (r'{',                  String.Regex,       "regexp_r"),
+        ],
+        "slash": [
+            (r"%>",                     Comment.Preproc,    move_state("root")),
+            (r"\"",                     String,             move_state("string")),
+            (r"'[a-zA-Z0-9_]+",         String),
+            (r'%r{',                    String.Regex,       move_state("regexp")),
+            (r'/\*.*?\*/',              Comment.Multiline),
+            (r"(#|//).*?\n",            Comment.Single),
+            (r'-?[0-9]+e[+-]?[0-9]+',   Number.Float),
+            (r'-?[0-9]+\.[0-9]+(e[+-]?[0-9]+)?', Number.Float),
+            (r'-?[0-9]+',               Number.Integer),
+            (r'nil'+_nkw,               Name.Builtin),
+            (r'true'+_nkw,              Name.Builtin),
+            (r'false'+_nkw,             Name.Builtin),
+            (r'self'+_nkw,              Name.Builtin),
+            (r'(class)(\s+)([A-Z][a-zA-Z0-9_\']*)',
+                bygroups(Keyword, Whitespace, Name.Class)),
+            (r'class'+_nkw,             Keyword),
+            (r'extends'+_nkw,           Keyword),
+            (r'(def)(\s+)(self)(\s*)(\.)(\s*)([a-z_][a-zA-Z0-9_\']*=?|<<|>>|==|<=>|<=|<|>=|>|\+|-(self)?|~(self)?|\*|/|%|^|&&|&|\||\[\]=?)',
+                bygroups(Keyword, Whitespace, Name.Builtin, Whitespace, Punctuation, Whitespace, Name.Function)),
+            (r'(def)(\s+)([a-z_][a-zA-Z0-9_\']*=?|<<|>>|==|<=>|<=|<|>=|>|\+|-(self)?|~(self)?|\*|/|%|^|&&|&|\||\[\]=?)',
+                bygroups(Keyword, Whitespace, Name.Function)),
+            (r'def'+_nkw,               Keyword),
+            (r'if'+_nkw,                Keyword),
+            (r'elsif'+_nkw,             Keyword),
+            (r'else'+_nkw,              Keyword),
+            (r'unless'+_nkw,            Keyword),
+            (r'for'+_nkw,               Keyword),
+            (r'in'+_nkw,                Keyword),
+            (r'while'+_nkw,             Keyword),
+            (r'until'+_nkw,             Keyword),
+            (r'and'+_nkw,               Keyword),
+            (r'or'+_nkw,                Keyword),
+            (r'not'+_nkw,               Keyword),
+            (r'lambda'+_nkw,            Keyword),
+            (r'try'+_nkw,               Keyword),
+            (r'catch'+_nkw,             Keyword),
+            (r'return'+_nkw,            Keyword),
+            (r'next'+_nkw,              Keyword),
+            (r'last'+_nkw,              Keyword),
+            (r'throw'+_nkw,             Keyword),
+            (r'use'+_nkw,               Keyword),
+            (r'switch'+_nkw,            Keyword),
+            (r'\\',                     Keyword),
+            (r'λ',                      Keyword),
+            (r'__FILE__'+_nkw,          Name.Builtin.Pseudo),
+            (r'__LINE__'+_nkw,          Name.Builtin.Pseudo),
+            (r'[A-Z][a-zA-Z0-9_\']*'+_nkw, Name.Constant),
+            (r'[a-z_][a-zA-Z0-9_\']*'+_nkw, Name),
+            (r'@[a-z_][a-zA-Z0-9_\']*'+_nkw, Name.Variable.Instance),
+            (r'@@[a-z_][a-zA-Z0-9_\']*'+_nkw, Name.Variable.Class),
+            (r'\(',                     Punctuation),
+            (r'\)',                     Punctuation),
+            (r'\[',                     Punctuation),
+            (r'\]',                     Punctuation),
+            (r'\{',                     Punctuation),
+            (r'\}',                     right_angle_bracket),
+            (r';',                      Punctuation),
+            (r',',                      Punctuation),
+            (r'<<=',                    Operator),
+            (r'>>=',                    Operator),
+            (r'<<',                     Operator),
+            (r'>>',                     Operator),
+            (r'==',                     Operator),
+            (r'!=',                     Operator),
+            (r'=>',                     Operator),
+            (r'=',                      Operator),
+            (r'<=>',                    Operator),
+            (r'<=',                     Operator),
+            (r'>=',                     Operator),
+            (r'<',                      Operator),
+            (r'>',                      Operator),
+            (r'\+\+',                   Operator),
+            (r'\+=',                    Operator),
+            (r'-=',                     Operator),
+            (r'\*\*=',                  Operator),
+            (r'\*=',                    Operator),
+            (r'\*\*',                   Operator),
+            (r'\*',                     Operator),
+            (r'/=',                     Operator),
+            (r'\+',                     Operator),
+            (r'-',                      Operator),
+            (r'/',                      Operator),
+            (r'%=',                     Operator),
+            (r'%',                      Operator),
+            (r'^=',                     Operator),
+            (r'&&=',                    Operator),
+            (r'&=',                     Operator),
+            (r'&&',                     Operator),
+            (r'&',                      Operator),
+            (r'\|\|=',                  Operator),
+            (r'\|=',                    Operator),
+            (r'\|\|',                   Operator),
+            (r'\|',                     Operator),
+            (r'!',                      Operator),
+            (r'\.\.\.',                 Operator),
+            (r'\.\.',                   Operator),
+            (r'\.',                     Operator),
+            (r'::',                     Operator),
+            (r':',                      Operator),
+            (r'(\s|\n)+',               Whitespace),
+            (r'[a-z_][a-zA-Z0-9_\']*',  Name.Variable),
+        ],
+    }
+
+
+class SlashLexer(DelegatingLexer):
+    """
+    Lexer for the Slash programming language.
+    """
+
+    name = 'Slash'
+    aliases = ['slash']
+    filenames = ['*.sla']
+    url = 'https://github.com/arturadib/Slash-A'
+    version_added = '2.4'
+
+    def __init__(self, **options):
+        from pygments.lexers.web import HtmlLexer
+        super().__init__(HtmlLexer, SlashLanguageLexer, **options)
diff --git a/local_packages/pygments/lexers/smalltalk.py b/local_packages/pygments/lexers/smalltalk.py
new file mode 100644
index 0000000..69e0519
--- /dev/null
+++ b/local_packages/pygments/lexers/smalltalk.py
@@ -0,0 +1,194 @@
+"""
+    pygments.lexers.smalltalk
+    ~~~~~~~~~~~~~~~~~~~~~~~~~
+
+    Lexers for Smalltalk and related languages.
+
+    :copyright: Copyright 2006-present by the Pygments team, see AUTHORS.
+    :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, include, bygroups, default
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+    Number, Punctuation
+
+__all__ = ['SmalltalkLexer', 'NewspeakLexer']
+
+
+class SmalltalkLexer(RegexLexer):
+    """
+    For Smalltalk syntax.
+    Contributed by Stefan Matthias Aust.
+    Rewritten by Nils Winter.
+    """
+    name = 'Smalltalk'
+    url = 'http://www.smalltalk.org/'
+    filenames = ['*.st']
+    aliases = ['smalltalk', 'squeak', 'st']
+    mimetypes = ['text/x-smalltalk']
+    version_added = '0.10'
+
+    tokens = {
+        'root': [
+            (r'(<)(\w+:)(.*?)(>)', bygroups(Text, Keyword, Text, Text)),
+            include('squeak fileout'),
+            include('whitespaces'),
+            include('method definition'),
+            (r'(\|)([\w\s]*)(\|)', bygroups(Operator, Name.Variable, Operator)),
+            include('objects'),
+            (r'\^|\:=|\_', Operator),
+            # temporaries
+            (r'[\]({}.;!]', Text),
+        ],
+        'method definition': [
+            # Not perfect can't allow whitespaces at the beginning and the
+            # without breaking everything
+            (r'([a-zA-Z]+\w*:)(\s*)(\w+)',
+             bygroups(Name.Function, Text, Name.Variable)),
+            (r'^(\b[a-zA-Z]+\w*\b)(\s*)$', bygroups(Name.Function, Text)),
+            (r'^([-+*/\\~<>=|&!?,@%]+)(\s*)(\w+)(\s*)$',
+             bygroups(Name.Function, Text, Name.Variable, Text)),
+        ],
+        'blockvariables': [
+            include('whitespaces'),
+            (r'(:)(\s*)(\w+)',
+             bygroups(Operator, Text, Name.Variable)),
+            (r'\|', Operator, '#pop'),
+            default('#pop'),  # else pop
+        ],
+        'literals': [
+            (r"'(''|[^'])*'", String, 'afterobject'),
+            (r'\$.', String.Char, 'afterobject'),
+            (r'#\(', String.Symbol, 'parenth'),
+            (r'\)', Text, 'afterobject'),
+            (r'(\d+r)?-?\d+(\.\d+)?(e-?\d+)?', Number, 'afterobject'),
+        ],
+        '_parenth_helper': [
+            include('whitespaces'),
+            (r'(\d+r)?-?\d+(\.\d+)?(e-?\d+)?', Number),
+            (r'[-+*/\\~<>=|&#!?,@%\w:]+', String.Symbol),
+            # literals
+            (r"'(''|[^'])*'", String),
+            (r'\$.', String.Char),
+            (r'#*\(', String.Symbol, 'inner_parenth'),
+        ],
+        'parenth': [
+            # This state is a bit tricky since
+            # we can't just pop this state
+            (r'\)', String.Symbol, ('root', 'afterobject')),
+            include('_parenth_helper'),
+        ],
+        'inner_parenth': [
+            (r'\)', String.Symbol, '#pop'),
+            include('_parenth_helper'),
+        ],
+        'whitespaces': [
+            # skip whitespace and comments
+            (r'\s+', Text),
+            (r'"(""|[^"])*"', Comment),
+        ],
+        'objects': [
+            (r'\[', Text, 'blockvariables'),
+            (r'\]', Text, 'afterobject'),
+            (r'\b(self|super|true|false|nil|thisContext)\b',
+             Name.Builtin.Pseudo, 'afterobject'),
+            (r'\b[A-Z]\w*(?!:)\b', Name.Class, 'afterobject'),
+            (r'\b[a-z]\w*(?!:)\b', Name.Variable, 'afterobject'),
+            (r'#("(""|[^"])*"|[-+*/\\~<>=|&!?,@%]+|[\w:]+)',
+             String.Symbol, 'afterobject'),
+            include('literals'),
+        ],
+        'afterobject': [
+            (r'! !$', Keyword, '#pop'),  # squeak chunk delimiter
+            include('whitespaces'),
+            (r'\b(ifTrue:|ifFalse:|whileTrue:|whileFalse:|timesRepeat:)',
+             Name.Builtin, '#pop'),
+            (r'\b(new\b(?!:))', Name.Builtin),
+            (r'\:=|\_', Operator, '#pop'),
+            (r'\b[a-zA-Z]+\w*:', Name.Function, '#pop'),
+            (r'\b[a-zA-Z]+\w*', Name.Function),
+            (r'\w+:?|[-+*/\\~<>=|&!?,@%]+', Name.Function, '#pop'),
+            (r'\.', Punctuation, '#pop'),
+            (r';', Punctuation),
+            (r'[\])}]', Text),
+            (r'[\[({]', Text, '#pop'),
+        ],
+        'squeak fileout': [
+            # Squeak fileout format (optional)
+            (r'^"(""|[^"])*"!', Keyword),
+            (r"^'(''|[^'])*'!", Keyword),
+            (r'^(!)(\w+)( commentStamp: )(.*?)( prior: .*?!\n)(.*?)(!)',
+                bygroups(Keyword, Name.Class, Keyword, String, Keyword, Text, Keyword)),
+            (r"^(!)(\w+(?: class)?)( methodsFor: )('(?:''|[^'])*')(.*?!)",
+                bygroups(Keyword, Name.Class, Keyword, String, Keyword)),
+            (r'^(\w+)( subclass: )(#\w+)'
+             r'(\s+instanceVariableNames: )(.*?)'
+             r'(\s+classVariableNames: )(.*?)'
+             r'(\s+poolDictionaries: )(.*?)'
+             r'(\s+category: )(.*?)(!)',
+                bygroups(Name.Class, Keyword, String.Symbol, Keyword, String, Keyword,
+                         String, Keyword, String, Keyword, String, Keyword)),
+            (r'^(\w+(?: class)?)(\s+instanceVariableNames: )(.*?)(!)',
+                bygroups(Name.Class, Keyword, String, Keyword)),
+            (r'(!\n)(\].*)(! !)$', bygroups(Keyword, Text, Keyword)),
+            (r'! !$', Keyword),
+        ],
+    }
+
+
+class NewspeakLexer(RegexLexer):
+    """
+    For Newspeak syntax.
+    """
+    name = 'Newspeak'
+    url = 'http://newspeaklanguage.org/'
+    filenames = ['*.ns2']
+    aliases = ['newspeak', ]
+    mimetypes = ['text/x-newspeak']
+    version_added = '1.1'
+
+    tokens = {
+        'root': [
+            (r'\b(Newsqueak2)\b', Keyword.Declaration),
+            (r"'[^']*'", String),
+            (r'\b(class)(\s+)(\w+)(\s*)',
+             bygroups(Keyword.Declaration, Text, Name.Class, Text)),
+            (r'\b(mixin|self|super|private|public|protected|nil|true|false)\b',
+             Keyword),
+            (r'(\w+\:)(\s*)([a-zA-Z_]\w+)',
+             bygroups(Name.Function, Text, Name.Variable)),
+            (r'(\w+)(\s*)(=)',
+             bygroups(Name.Attribute, Text, Operator)),
+            (r'<\w+>', Comment.Special),
+            include('expressionstat'),
+            include('whitespace')
+        ],
+
+        'expressionstat': [
+            (r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
+            (r'\d+', Number.Integer),
+            (r':\w+', Name.Variable),
+            (r'(\w+)(::)', bygroups(Name.Variable, Operator)),
+            (r'\w+:', Name.Function),
+            (r'\w+', Name.Variable),
+            (r'\(|\)', Punctuation),
+            (r'\[|\]', Punctuation),
+            (r'\{|\}', Punctuation),
+
+            (r'(\^|\+|\/|~|\*|<|>|=|@|%|\||&|\?|!|,|-|:)', Operator),
+            (r'\.|;', Punctuation),
+            include('whitespace'),
+            include('literals'),
+        ],
+        'literals': [
+            (r'\$.', String),
+            (r"'[^']*'", String),
+            (r"#'[^']*'", String.Symbol),
+            (r"#\w+:?", String.Symbol),
+            (r"#(\+|\/|~|\*|<|>|=|@|%|\||&|\?|!|,|-)+", String.Symbol)
+        ],
+        'whitespace': [
+            (r'\s+', Text),
+            (r'"[^"]*"', Comment)
+        ],
+    }
diff --git a/local_packages/pygments/lexers/smithy.py b/local_packages/pygments/lexers/smithy.py
new file mode 100644
index 0000000..2642bb8
--- /dev/null
+++ b/local_packages/pygments/lexers/smithy.py
@@ -0,0 +1,77 @@
+"""
+    pygments.lexers.smithy
+    ~~~~~~~~~~~~~~~~~~~~~~
+
+    Lexers for the Smithy IDL.
+
+    :copyright: Copyright 2006-present by the Pygments team, see AUTHORS.
+    :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, bygroups, words
+from pygments.token import Text, Comment, Keyword, Name, String, \
+    Number, Whitespace, Punctuation
+
+__all__ = ['SmithyLexer']
+
+
+class SmithyLexer(RegexLexer):
+    """
+    For Smithy IDL
+    """
+    name = 'Smithy'
+    url = 'https://awslabs.github.io/smithy/'
+    filenames = ['*.smithy']
+    aliases = ['smithy']
+    version_added = '2.10'
+
+    unquoted = r'[A-Za-z0-9_\.#$-]+'
+    identifier = r"[A-Za-z0-9_\.#$-]+"
+
+    simple_shapes = (
+        'use', 'byte', 'short', 'integer', 'long', 'float', 'document',
+        'double', 'bigInteger', 'bigDecimal', 'boolean', 'blob', 'string',
+        'timestamp',
+    )
+
+    aggregate_shapes = (
+       'apply', 'list', 'map', 'set', 'structure', 'union', 'resource',
+       'operation', 'service', 'trait'
+    )
+
+    tokens = {
+        'root': [
+            (r'///.*$', Comment.Multiline),
+            (r'//.*$', Comment),
+            (r'@[0-9a-zA-Z\.#-]*', Name.Decorator),
+            (r'(=)', Name.Decorator),
+            (r'^(\$version)(:)(.+)',
+                bygroups(Keyword.Declaration, Name.Decorator, Name.Class)),
+            (r'^(namespace)(\s+' + identifier + r')\b',
+                bygroups(Keyword.Declaration, Name.Class)),
+            (words(simple_shapes,
+                   prefix=r'^', suffix=r'(\s+' + identifier + r')\b'),
+                bygroups(Keyword.Declaration, Name.Class)),
+            (words(aggregate_shapes,
+                   prefix=r'^', suffix=r'(\s+' + identifier + r')'),
+                bygroups(Keyword.Declaration, Name.Class)),
+            (r'^(metadata)(\s+)((?:\S+)|(?:\"[^"]+\"))(\s*)(=)',
+                bygroups(Keyword.Declaration, Whitespace, Name.Class,
+                         Whitespace, Name.Decorator)),
+            (r"(true|false|null)", Keyword.Constant),
+            (r"(-?(?:0|[1-9]\d*)(?:\.\d+)?(?:[eE][+-]?\d+)?)", Number),
+            (identifier + ":", Name.Label),
+            (identifier, Name.Variable.Class),
+            (r'\[', Text, "#push"),
+            (r'\]', Text, "#pop"),
+            (r'\(', Text, "#push"),
+            (r'\)', Text, "#pop"),
+            (r'\{', Text, "#push"),
+            (r'\}', Text, "#pop"),
+            (r'"{3}(\\\\|\n|\\")*"{3}', String.Doc),
+            (r'"(\\\\|\n|\\"|[^"])*"', String.Double),
+            (r"'(\\\\|\n|\\'|[^'])*'", String.Single),
+            (r'[:,]+', Punctuation),
+            (r'\s+', Whitespace),
+        ]
+    }
diff --git a/local_packages/pygments/lexers/smv.py b/local_packages/pygments/lexers/smv.py
new file mode 100644
index 0000000..3d06f99
--- /dev/null
+++ b/local_packages/pygments/lexers/smv.py
@@ -0,0 +1,78 @@
+"""
+    pygments.lexers.smv
+    ~~~~~~~~~~~~~~~~~~~
+
+    Lexers for the SMV languages.
+
+    :copyright: Copyright 2006-present by the Pygments team, see AUTHORS.
+    :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, words
+from pygments.token import Comment, Keyword, Name, Number, Operator, \
+    Punctuation, Text
+
+__all__ = ['NuSMVLexer']
+
+
+class NuSMVLexer(RegexLexer):
+    """
+    Lexer for the NuSMV language.
+    """
+
+    name = 'NuSMV'
+    aliases = ['nusmv']
+    filenames = ['*.smv']
+    mimetypes = []
+    url = 'https://nusmv.fbk.eu'
+    version_added = '2.2'
+
+    tokens = {
+        'root': [
+            # Comments
+            (r'(?s)\/\-\-.*?\-\-/', Comment),
+            (r'--.*\n', Comment),
+
+            # Reserved
+            (words(('MODULE', 'DEFINE', 'MDEFINE', 'CONSTANTS', 'VAR', 'IVAR',
+                    'FROZENVAR', 'INIT', 'TRANS', 'INVAR', 'SPEC', 'CTLSPEC',
+                    'LTLSPEC', 'PSLSPEC', 'COMPUTE', 'NAME', 'INVARSPEC',
+                    'FAIRNESS', 'JUSTICE', 'COMPASSION', 'ISA', 'ASSIGN',
+                    'CONSTRAINT', 'SIMPWFF', 'CTLWFF', 'LTLWFF', 'PSLWFF',
+                    'COMPWFF', 'IN', 'MIN', 'MAX', 'MIRROR', 'PRED',
+                    'PREDICATES'), suffix=r'(?![\w$#-])'),
+             Keyword.Declaration),
+            (r'process(?![\w$#-])', Keyword),
+            (words(('array', 'of', 'boolean', 'integer', 'real', 'word'),
+                   suffix=r'(?![\w$#-])'), Keyword.Type),
+            (words(('case', 'esac'), suffix=r'(?![\w$#-])'), Keyword),
+            (words(('word1', 'bool', 'signed', 'unsigned', 'extend', 'resize',
+                    'sizeof', 'uwconst', 'swconst', 'init', 'self', 'count',
+                    'abs', 'max', 'min'), suffix=r'(?![\w$#-])'),
+             Name.Builtin),
+            (words(('EX', 'AX', 'EF', 'AF', 'EG', 'AG', 'E', 'F', 'O', 'G',
+                    'H', 'X', 'Y', 'Z', 'A', 'U', 'S', 'V', 'T', 'BU', 'EBF',
+                    'ABF', 'EBG', 'ABG', 'next', 'mod', 'union', 'in', 'xor',
+                    'xnor'), suffix=r'(?![\w$#-])'),
+                Operator.Word),
+            (words(('TRUE', 'FALSE'), suffix=r'(?![\w$#-])'), Keyword.Constant),
+
+            # Names
+            (r'[a-zA-Z_][\w$#-]*', Name.Variable),
+
+            # Operators
+            (r':=', Operator),
+            (r'[-&|+*/<>!=]', Operator),
+
+            # Literals
+            (r'\-?\d+\b', Number.Integer),
+            (r'0[su][bB]\d*_[01_]+', Number.Bin),
+            (r'0[su][oO]\d*_[0-7_]+', Number.Oct),
+            (r'0[su][dD]\d*_[\d_]+', Number.Decimal),
+            (r'0[su][hH]\d*_[\da-fA-F_]+', Number.Hex),
+
+            # Whitespace, punctuation and the rest
+            (r'\s+', Text.Whitespace),
+            (r'[()\[\]{};?:.,]', Punctuation),
+        ],
+    }
diff --git a/local_packages/pygments/lexers/snobol.py b/local_packages/pygments/lexers/snobol.py
new file mode 100644
index 0000000..7016c96
--- /dev/null
+++ b/local_packages/pygments/lexers/snobol.py
@@ -0,0 +1,82 @@
+"""
+    pygments.lexers.snobol
+    ~~~~~~~~~~~~~~~~~~~~~~
+
+    Lexers for the SNOBOL language.
+
+    :copyright: Copyright 2006-present by the Pygments team, see AUTHORS.
+    :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, bygroups
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+    Number, Punctuation
+
+__all__ = ['SnobolLexer']
+
+
+class SnobolLexer(RegexLexer):
+    """
+    Lexer for the SNOBOL4 programming language.
+
+    Recognizes the common ASCII equivalents of the original SNOBOL4 operators.
+    Does not require spaces around binary operators.
+    """
+
+    name = "Snobol"
+    aliases = ["snobol"]
+    filenames = ['*.snobol']
+    mimetypes = ['text/x-snobol']
+    url = 'https://www.regressive.org/snobol4'
+    version_added = '1.5'
+
+    tokens = {
+        # root state, start of line
+        # comments, continuation lines, and directives start in column 1
+        # as do labels
+        'root': [
+            (r'\*.*\n', Comment),
+            (r'[+.] ', Punctuation, 'statement'),
+            (r'-.*\n', Comment),
+            (r'END\s*\n', Name.Label, 'heredoc'),
+            (r'[A-Za-z$][\w$]*', Name.Label, 'statement'),
+            (r'\s+', Text, 'statement'),
+        ],
+        # statement state, line after continuation or label
+        'statement': [
+            (r'\s*\n', Text, '#pop'),
+            (r'\s+', Text),
+            (r'(?<=[^\w.])(LT|LE|EQ|NE|GE|GT|INTEGER|IDENT|DIFFER|LGT|SIZE|'
+             r'REPLACE|TRIM|DUPL|REMDR|DATE|TIME|EVAL|APPLY|OPSYN|LOAD|UNLOAD|'
+             r'LEN|SPAN|BREAK|ANY|NOTANY|TAB|RTAB|REM|POS|RPOS|FAIL|FENCE|'
+             r'ABORT|ARB|ARBNO|BAL|SUCCEED|INPUT|OUTPUT|TERMINAL)(?=[^\w.])',
+             Name.Builtin),
+            (r'[A-Za-z][\w.]*', Name),
+            # ASCII equivalents of original operators
+            # | for the EBCDIC equivalent, ! likewise
+            # \ for EBCDIC negation
+            (r'\*\*|[?$.!%*/#+\-@|&\\=]', Operator),
+            (r'"[^"]*"', String),
+            (r"'[^']*'", String),
+            # Accept SPITBOL syntax for real numbers
+            # as well as Macro SNOBOL4
+            (r'[0-9]+(?=[^.EeDd])', Number.Integer),
+            (r'[0-9]+(\.[0-9]*)?([EDed][-+]?[0-9]+)?', Number.Float),
+            # Goto
+            (r':', Punctuation, 'goto'),
+            (r'[()<>,;]', Punctuation),
+        ],
+        # Goto block
+        'goto': [
+            (r'\s*\n', Text, "#pop:2"),
+            (r'\s+', Text),
+            (r'F|S', Keyword),
+            (r'(\()([A-Za-z][\w.]*)(\))',
+             bygroups(Punctuation, Name.Label, Punctuation))
+        ],
+        # everything after the END statement is basically one
+        # big heredoc.
+        'heredoc': [
+            (r'.*\n', String.Heredoc)
+        ]
+    }
diff --git a/local_packages/pygments/lexers/solidity.py b/local_packages/pygments/lexers/solidity.py
new file mode 100644
index 0000000..3f2d1dd
--- /dev/null
+++ b/local_packages/pygments/lexers/solidity.py
@@ -0,0 +1,87 @@
+"""
+    pygments.lexers.solidity
+    ~~~~~~~~~~~~~~~~~~~~~~~~
+
+    Lexers for Solidity.
+
+    :copyright: Copyright 2006-present by the Pygments team, see AUTHORS.
+    :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, bygroups, include, words
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+    Number, Punctuation, Whitespace
+
+__all__ = ['SolidityLexer']
+
+
+class SolidityLexer(RegexLexer):
+    """
+    For Solidity source code.
+    """
+
+    name = 'Solidity'
+    aliases = ['solidity']
+    filenames = ['*.sol']
+    mimetypes = []
+    url = 'https://soliditylang.org'
+    version_added = '2.5'
+
+    datatype = (
+        r'\b(address|bool|(?:(?:bytes|hash|int|string|uint)(?:8|16|24|32|40|48|56|64'
+        r'|72|80|88|96|104|112|120|128|136|144|152|160|168|176|184|192|200|208'
+        r'|216|224|232|240|248|256)?))\b'
+    )
+
+    tokens = {
+        'root': [
+            include('whitespace'),
+            include('comments'),
+            (r'\bpragma\s+solidity\b', Keyword, 'pragma'),
+            (r'\b(contract)(\s+)([a-zA-Z_]\w*)',
+             bygroups(Keyword, Whitespace, Name.Entity)),
+            (datatype + r'(\s+)((?:external|public|internal|private)\s+)?' +
+             r'([a-zA-Z_]\w*)',
+             bygroups(Keyword.Type, Whitespace, Keyword, Name.Variable)),
+            (r'\b(enum|event|function|struct)(\s+)([a-zA-Z_]\w*)',
+             bygroups(Keyword.Type, Whitespace, Name.Variable)),
+            (r'\b(msg|block|tx)\.([A-Za-z_][a-zA-Z0-9_]*)\b', Keyword),
+            (words((
+                'block', 'break', 'constant', 'constructor', 'continue',
+                'contract', 'do', 'else', 'external', 'false', 'for',
+                'function', 'if', 'import', 'inherited', 'internal', 'is',
+                'library', 'mapping', 'memory', 'modifier', 'msg', 'new',
+                'payable', 'private', 'public', 'require', 'return',
+                'returns', 'struct', 'suicide', 'throw', 'this', 'true',
+                'tx', 'var', 'while'), prefix=r'\b', suffix=r'\b'),
+             Keyword.Type),
+            (words(('keccak256',), prefix=r'\b', suffix=r'\b'), Name.Builtin),
+            (datatype, Keyword.Type),
+            include('constants'),
+            (r'[a-zA-Z_]\w*', Text),
+            (r'[~!%^&*+=|?:<>/-]', Operator),
+            (r'[.;{}(),\[\]]', Punctuation)
+        ],
+        'comments': [
+            (r'//(\n|[\w\W]*?[^\\]\n)', Comment.Single),
+            (r'/(\\\n)?[*][\w\W]*?[*](\\\n)?/', Comment.Multiline),
+            (r'/(\\\n)?[*][\w\W]*', Comment.Multiline)
+        ],
+        'constants': [
+            (r'("(\\"|.)*?")', String.Double),
+            (r"('(\\'|.)*?')", String.Single),
+            (r'\b0[xX][0-9a-fA-F]+\b', Number.Hex),
+            (r'\b\d+\b', Number.Decimal),
+        ],
+        'pragma': [
+            include('whitespace'),
+            include('comments'),
+            (r'(\^|>=|<)(\s*)(\d+\.\d+\.\d+)',
+             bygroups(Operator, Whitespace, Keyword)),
+            (r';', Punctuation, '#pop')
+        ],
+        'whitespace': [
+            (r'\s+', Whitespace),
+            (r'\n', Whitespace)
+        ]
+    }
diff --git a/local_packages/pygments/lexers/soong.py b/local_packages/pygments/lexers/soong.py
new file mode 100644
index 0000000..0d7f12c
--- /dev/null
+++ b/local_packages/pygments/lexers/soong.py
@@ -0,0 +1,78 @@
+"""
+    pygments.lexers.soong
+    ~~~~~~~~~~~~~~~~~~~~~
+
+    Lexers for Soong (Android.bp Blueprint) files.
+
+    :copyright: Copyright 2006-present by the Pygments team, see AUTHORS.
+    :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, bygroups, include
+from pygments.token import Comment, Name, Number, Operator, Punctuation, \
+        String, Whitespace
+
+__all__ = ['SoongLexer']
+
+class SoongLexer(RegexLexer):
+    name = 'Soong'
+    version_added = '2.18'
+    url = 'https://source.android.com/docs/setup/reference/androidbp'
+    aliases = ['androidbp', 'bp', 'soong']
+    filenames = ['Android.bp']
+
+    tokens = {
+        'root': [
+            # A variable assignment
+            (r'(\w*)(\s*)(\+?=)(\s*)',
+             bygroups(Name.Variable, Whitespace, Operator, Whitespace),
+             'assign-rhs'),
+
+            # A top-level module
+            (r'(\w*)(\s*)(\{)',
+             bygroups(Name.Function, Whitespace, Punctuation),
+             'in-rule'),
+
+            # Everything else
+            include('comments'),
+            (r'\s+', Whitespace),  # newlines okay
+        ],
+        'assign-rhs': [
+            include('expr'),
+            (r'\n', Whitespace, '#pop'),
+        ],
+        'in-list': [
+            include('expr'),
+            include('comments'),
+            (r'\s+', Whitespace),  # newlines okay in a list
+            (r',', Punctuation),
+            (r'\]', Punctuation, '#pop'),
+        ],
+        'in-map': [
+            # A map key
+            (r'(\w+)(:)(\s*)', bygroups(Name, Punctuation, Whitespace)),
+
+            include('expr'),
+            include('comments'),
+            (r'\s+', Whitespace),  # newlines okay in a map
+            (r',', Punctuation),
+            (r'\}', Punctuation, '#pop'),
+        ],
+        'in-rule': [
+            # Just re-use map syntax
+            include('in-map'),
+        ],
+        'comments': [
+            (r'//.*', Comment.Single),
+            (r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
+        ],
+        'expr': [
+            (r'(true|false)\b', Name.Builtin),
+            (r'0x[0-9a-fA-F]+', Number.Hex),
+            (r'\d+', Number.Integer),
+            (r'".*?"', String),
+            (r'\{', Punctuation, 'in-map'),
+            (r'\[', Punctuation, 'in-list'),
+            (r'\w+', Name),
+        ],
+    }
diff --git a/local_packages/pygments/lexers/sophia.py b/local_packages/pygments/lexers/sophia.py
new file mode 100644
index 0000000..86435f3
--- /dev/null
+++ b/local_packages/pygments/lexers/sophia.py
@@ -0,0 +1,102 @@
+"""
+    pygments.lexers.sophia
+    ~~~~~~~~~~~~~~~~~~~~~~
+
+    Lexer for Sophia.
+
+    Derived from pygments/lexers/reason.py.
+
+    :copyright: Copyright 2006-present by the Pygments team, see AUTHORS.
+    :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, include, default, words
+from pygments.token import Comment, Keyword, Name, Number, Operator, \
+    Punctuation, String, Text
+
+__all__ = ['SophiaLexer']
+
+class SophiaLexer(RegexLexer):
+    """
+    A Sophia lexer.
+    """
+
+    name = 'Sophia'
+    aliases = ['sophia']
+    filenames = ['*.aes']
+    mimetypes = []
+    url = 'https://docs.aeternity.com/aesophia'
+    version_added = '2.11'
+
+    keywords = (
+        'contract', 'include', 'let', 'switch', 'type', 'record', 'datatype',
+        'if', 'elif', 'else', 'function', 'stateful', 'payable', 'public',
+        'entrypoint', 'private', 'indexed', 'namespace', 'interface', 'main',
+        'using', 'as', 'for', 'hiding',
+    )
+
+    builtins = ('state', 'put', 'abort', 'require')
+
+    word_operators = ('mod', 'band', 'bor', 'bxor', 'bnot')
+
+    primitive_types = ('int', 'address', 'bool', 'bits', 'bytes', 'string',
+                       'list', 'option', 'char', 'unit', 'map', 'event',
+                       'hash', 'signature', 'oracle', 'oracle_query')
+
+    tokens = {
+        'escape-sequence': [
+            (r'\\[\\"\'ntbr]', String.Escape),
+            (r'\\[0-9]{3}', String.Escape),
+            (r'\\x[0-9a-fA-F]{2}', String.Escape),
+        ],
+        'root': [
+            (r'\s+', Text.Whitespace),
+            (r'(true|false)\b', Keyword.Constant),
+            (r'\b([A-Z][\w\']*)(?=\s*\.)', Name.Class, 'dotted'),
+            (r'\b([A-Z][\w\']*)', Name.Function),
+            (r'//.*?\n', Comment.Single),
+            (r'\/\*(?!/)', Comment.Multiline, 'comment'),
+
+            (r'0[xX][\da-fA-F][\da-fA-F_]*', Number.Hex),
+            (r'#[\da-fA-F][\da-fA-F_]*', Name.Label),
+            (r'\d[\d_]*', Number.Integer),
+
+            (words(keywords, suffix=r'\b'), Keyword),
+            (words(builtins, suffix=r'\b'), Name.Builtin),
+            (words(word_operators, prefix=r'\b', suffix=r'\b'), Operator.Word),
+            (words(primitive_types, prefix=r'\b', suffix=r'\b'), Keyword.Type),
+
+            (r'[=!<>+\\*/:&|?~@^-]', Operator.Word),
+            (r'[.;:{}(),\[\]]', Punctuation),
+
+            (r"(ak_|ok_|oq_|ct_)[\w']*", Name.Label),
+            (r"[^\W\d][\w']*", Name),
+
+            (r"'(?:(\\[\\\"'ntbr ])|(\\[0-9]{3})|(\\x[0-9a-fA-F]{2}))'",
+             String.Char),
+            (r"'.'", String.Char),
+            (r"'[a-z][\w]*", Name.Variable),
+
+            (r'"', String.Double, 'string')
+        ],
+        'comment': [
+            (r'[^/*]+', Comment.Multiline),
+            (r'\/\*', Comment.Multiline, '#push'),
+            (r'\*\/', Comment.Multiline, '#pop'),
+            (r'\*', Comment.Multiline),
+        ],
+        'string': [
+            (r'[^\\"]+', String.Double),
+            include('escape-sequence'),
+            (r'\\\n', String.Double),
+            (r'"', String.Double, '#pop'),
+        ],
+        'dotted': [
+            (r'\s+', Text),
+            (r'\.', Punctuation),
+            (r'[A-Z][\w\']*(?=\s*\.)', Name.Function),
+            (r'[A-Z][\w\']*', Name.Function, '#pop'),
+            (r'[a-z_][\w\']*', Name, '#pop'),
+            default('#pop'),
+        ],
+    }
diff --git a/local_packages/pygments/lexers/special.py b/local_packages/pygments/lexers/special.py
new file mode 100644
index 0000000..ea34522
--- /dev/null
+++ b/local_packages/pygments/lexers/special.py
@@ -0,0 +1,122 @@
+"""
+    pygments.lexers.special
+    ~~~~~~~~~~~~~~~~~~~~~~~
+
+    Special lexers.
+
+    :copyright: Copyright 2006-present by the Pygments team, see AUTHORS.
+    :license: BSD, see LICENSE for details.
+"""
+
+import ast
+
+from pygments.lexer import Lexer, line_re
+from pygments.token import Token, Error, Text, Generic
+from pygments.util import get_choice_opt
+
+
+__all__ = ['TextLexer', 'OutputLexer', 'RawTokenLexer']
+
+
+class TextLexer(Lexer):
+    """
+    "Null" lexer, doesn't highlight anything.
+    """
+    name = 'Text only'
+    aliases = ['text']
+    filenames = ['*.txt']
+    mimetypes = ['text/plain']
+    url = ""
+    version_added = ''
+
+    priority = 0.01
+
+    def get_tokens_unprocessed(self, text):
+        yield 0, Text, text
+
+    def analyse_text(text):
+        return TextLexer.priority
+
+
+class OutputLexer(Lexer):
+    """
+    Simple lexer that highlights everything as ``Token.Generic.Output``.
+    """
+    name = 'Text output'
+    aliases = ['output']
+    url = ""
+    version_added = '2.10'
+    _example = "output/output"
+
+    def get_tokens_unprocessed(self, text):
+        yield 0, Generic.Output, text
+
+
+_ttype_cache = {}
+
+
+class RawTokenLexer(Lexer):
+    """
+    Recreate a token stream formatted with the `RawTokenFormatter`.
+
+    Additional options accepted:
+
+    `compress`
+        If set to ``"gz"`` or ``"bz2"``, decompress the token stream with
+        the given compression algorithm before lexing (default: ``""``).
+    """
+    name = 'Raw token data'
+    aliases = []
+    filenames = []
+    mimetypes = ['application/x-pygments-tokens']
+    url = 'https://pygments.org/docs/formatters/#RawTokenFormatter'
+    version_added = ''
+
+    def __init__(self, **options):
+        self.compress = get_choice_opt(options, 'compress',
+                                       ['', 'none', 'gz', 'bz2'], '')
+        Lexer.__init__(self, **options)
+
+    def get_tokens(self, text):
+        if self.compress:
+            if isinstance(text, str):
+                text = text.encode('latin1')
+            try:
+                if self.compress == 'gz':
+                    import gzip
+                    text = gzip.decompress(text)
+                elif self.compress == 'bz2':
+                    import bz2
+                    text = bz2.decompress(text)
+            except OSError:
+                yield Error, text.decode('latin1')
+        if isinstance(text, bytes):
+            text = text.decode('latin1')
+
+        # do not call Lexer.get_tokens() because stripping is not optional.
+        text = text.strip('\n') + '\n'
+        for i, t, v in self.get_tokens_unprocessed(text):
+            yield t, v
+
+    def get_tokens_unprocessed(self, text):
+        length = 0
+        for match in line_re.finditer(text):
+            try:
+                ttypestr, val = match.group().rstrip().split('\t', 1)
+                ttype = _ttype_cache.get(ttypestr)
+                if not ttype:
+                    ttype = Token
+                    ttypes = ttypestr.split('.')[1:]
+                    for ttype_ in ttypes:
+                        if not ttype_ or not ttype_[0].isupper():
+                            raise ValueError('malformed token name')
+                        ttype = getattr(ttype, ttype_)
+                    _ttype_cache[ttypestr] = ttype
+                val = ast.literal_eval(val)
+                if not isinstance(val, str):
+                    raise ValueError('expected str')
+            except (SyntaxError, ValueError):
+                val = match.group()
+                ttype = Error
+            yield length, ttype, val
+            length += len(val)
diff --git a/local_packages/pygments/lexers/spice.py b/local_packages/pygments/lexers/spice.py
new file mode 100644
index 0000000..978f0a7
--- /dev/null
+++ b/local_packages/pygments/lexers/spice.py
@@ -0,0 +1,70 @@
+"""
+    pygments.lexers.spice
+    ~~~~~~~~~~~~~~~~~~~~~
+
+    Lexers for the Spice programming language.
+
+    :copyright: Copyright 2006-present by the Pygments team, see AUTHORS.
+    :license: BSD, see LICENSE for details.
+"""
+
+from pygments.lexer import RegexLexer, bygroups, words
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+    Number, Punctuation, Whitespace
+
+__all__ = ['SpiceLexer']
+
+
+class SpiceLexer(RegexLexer):
+    """
+    For Spice source.
+    """
+    name = 'Spice'
+    url = 'https://www.spicelang.com'
+    filenames = ['*.spice']
+    aliases = ['spice', 'spicelang']
+    mimetypes = ['text/x-spice']
+    version_added = '2.11'
+
+    tokens = {
+        'root': [
+            (r'\n', Whitespace),
+            (r'\s+', Whitespace),
+            (r'\\\n', Text),
+            # comments
+            (r'//(.*?)\n', Comment.Single),
+            (r'/(\\\n)?[*]{2}(.|\n)*?[*](\\\n)?/', String.Doc),
+            (r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
+            # keywords
+            (r'(import|as)\b', Keyword.Namespace),
+            (r'(f|p|type|struct|interface|enum|alias|operator)\b', Keyword.Declaration),
+            (words(('if', 'else', 'switch', 'case', 'default', 'for', 'foreach', 'do',
+                    'while', 'break', 'continue', 'fallthrough', 'return', 'assert',
+                    'unsafe', 'ext', 'cast'), suffix=r'\b'), Keyword),
+            (words(('const', 'signed', 'unsigned', 'inline', 'public', 'heap', 'compose'),
+                   suffix=r'\b'), Keyword.Pseudo),
+            (words(('new', 'yield', 'stash', 'pick', 'sync', 'class'), suffix=r'\b'),
+                   Keyword.Reserved),
+            (r'(true|false|nil)\b', Keyword.Constant),
+            (words(('double', 'int', 'short', 'long', 'byte', 'char', 'string',
+                    'bool', 'dyn'), suffix=r'\b'), Keyword.Type),
+            (words(('printf', 'sizeof', 'alignof', 'len', 'panic'), suffix=r'\b(\()'),
+             bygroups(Name.Builtin, Punctuation)),
+            # numeric literals
+            (r'[-]?[0-9]*[.][0-9]+([eE][+-]?[0-9]+)?', Number.Double),
+            (r'0[bB][01]+[slu]?', Number.Bin),
+            (r'0[oO][0-7]+[slu]?', Number.Oct),
+            (r'0[xXhH][0-9a-fA-F]+[slu]?', Number.Hex),
+            (r'(0[dD])?[0-9]+[slu]?', Number.Integer),
+            # string literal
+            (r'"(\\\\|\\[^\\]|[^"\\])*"', String),
+            # char literal
+            (r'\'(\\\\|\\[^\\]|[^\'\\])\'', String.Char),
+            # tokens
+            (r'<<=|>>=|<<|>>|<=|>=|\+=|-=|\*=|/=|\%=|\|=|&=|\^=|&&|\|\||&|\||'
+             r'\+\+|--|\%|\^|\~|==|!=|->|::|[.]{3}|#!|#|[+\-*/&]', Operator),
+            (r'[|<>=!()\[\]{}.,;:\?]', Punctuation),
+            # identifiers
+            (r'[^\W\d]\w*', Name.Other),
+        ]
+    }
diff --git a/local_packages/pygments/lexers/sql.py b/local_packages/pygments/lexers/sql.py
new file mode 100644
index 0000000..1d3458e
--- /dev/null
+++ b/local_packages/pygments/lexers/sql.py
@@ -0,0 +1,1111 @@
+"""
+    pygments.lexers.sql
+    ~~~~~~~~~~~~~~~~~~~
+
+    Lexers for various SQL dialects and related interactive sessions.
+
+    Postgres specific lexers:
+
+    `PostgresLexer`
+        A SQL lexer for the PostgreSQL dialect. Differences w.r.t. the SQL
+        lexer are:
+
+        - keywords and data types list parsed from the PG docs (run the
+          `_postgres_builtins` module to update them);
+        - Content of $-strings parsed using a specific lexer, e.g. the content
+          of a PL/Python function is parsed using the Python lexer;
+        - parse PG specific constructs: E-strings, $-strings, U&-strings,
+          different operators and punctuation.
+
+    `PlPgsqlLexer`
+        A lexer for the PL/pgSQL language. Adds a few specific construct on
+        top of the PG SQL lexer (such as <{text}' if rule else text
+                    append(text)
+            else:
+                styles: Dict[str, int] = {}
+                for text, style, _ in Segment.filter_control(
+                    Segment.simplify(self._record_buffer)
+                ):
+                    text = escape(text)
+                    if style:
+                        rule = style.get_html_style(_theme)
+                        style_number = styles.setdefault(rule, len(styles) + 1)
+                        if style.link:
+                            text = f'{text}'
+                        else:
+                            text = f'{text}'
+                    append(text)
+                stylesheet_rules: List[str] = []
+                stylesheet_append = stylesheet_rules.append
+                for style_rule, style_number in styles.items():
+                    if style_rule:
+                        stylesheet_append(f".r{style_number} {{{style_rule}}}")
+                stylesheet = "\n".join(stylesheet_rules)
+
+            rendered_code = render_code_format.format(
+                code="".join(fragments),
+                stylesheet=stylesheet,
+                foreground=_theme.foreground_color.hex,
+                background=_theme.background_color.hex,
+            )
+            if clear:
+                del self._record_buffer[:]
+        return rendered_code
+
+    def save_html(
+        self,
+        path: Union[str, PathLike[str]],
+        *,
+        theme: Optional[TerminalTheme] = None,
+        clear: bool = True,
+        code_format: str = CONSOLE_HTML_FORMAT,
+        inline_styles: bool = False,
+    ) -> None:
+        """Generate HTML from console contents and write to a file (requires record=True argument in constructor).
+
+        Args:
+            path (Union[str, PathLike[str]]): Path to write html file.
+            theme (TerminalTheme, optional): TerminalTheme object containing console colors.
+            clear (bool, optional): Clear record buffer after exporting. Defaults to ``True``.
+            code_format (str, optional): Format string to render HTML. In addition to '{foreground}',
+                '{background}', and '{code}', should contain '{stylesheet}' if inline_styles is ``False``.
+            inline_styles (bool, optional): If ``True`` styles will be inlined in to spans, which makes files
+                larger but easier to cut and paste markup. If ``False``, styles will be embedded in a style tag.
+                Defaults to False.
+
+        """
+        html = self.export_html(
+            theme=theme,
+            clear=clear,
+            code_format=code_format,
+            inline_styles=inline_styles,
+        )
+        with open(path, "w", encoding="utf-8") as write_file:
+            write_file.write(html)
+
+    def export_svg(
+        self,
+        *,
+        title: str = "Rich",
+        theme: Optional[TerminalTheme] = None,
+        clear: bool = True,
+        code_format: str = CONSOLE_SVG_FORMAT,
+        font_aspect_ratio: float = 0.61,
+        unique_id: Optional[str] = None,
+    ) -> str:
+        """
+        Generate an SVG from the console contents (requires record=True in Console constructor).
+
+        Args:
+            title (str, optional): The title of the tab in the output image
+            theme (TerminalTheme, optional): The ``TerminalTheme`` object to use to style the terminal
+            clear (bool, optional): Clear record buffer after exporting. Defaults to ``True``
+            code_format (str, optional): Format string used to generate the SVG. Rich will inject a number of variables
+                into the string in order to form the final SVG output. The default template used and the variables
+                injected by Rich can be found by inspecting the ``console.CONSOLE_SVG_FORMAT`` variable.
+            font_aspect_ratio (float, optional): The width to height ratio of the font used in the ``code_format``
+                string. Defaults to 0.61, which is the width to height ratio of Fira Code (the default font).
+                If you aren't specifying a different font inside ``code_format``, you probably don't need this.
+            unique_id (str, optional): unique id that is used as the prefix for various elements (CSS styles, node
+                ids). If not set, this defaults to a computed value based on the recorded content.
+        """
+
+        import zlib
+        from html import escape
+
+        from rich.cells import cell_len
+
+        style_cache: Dict[Style, str] = {}
+
+        def get_svg_style(style: Style) -> str:
+            """Convert a Style to CSS rules for SVG."""
+            if style in style_cache:
+                return style_cache[style]
+            css_rules = []
+            color = (
+                _theme.foreground_color
+                if (style.color is None or style.color.is_default)
+                else style.color.get_truecolor(_theme)
+            )
+            bgcolor = (
+                _theme.background_color
+                if (style.bgcolor is None or style.bgcolor.is_default)
+                else style.bgcolor.get_truecolor(_theme)
+            )
+            if style.reverse:
+                color, bgcolor = bgcolor, color
+            if style.dim:
+                color = blend_rgb(color, bgcolor, 0.4)
+            css_rules.append(f"fill: {color.hex}")
+            if style.bold:
+                css_rules.append("font-weight: bold")
+            if style.italic:
+                css_rules.append("font-style: italic;")
+            if style.underline:
+                css_rules.append("text-decoration: underline;")
+            if style.strike:
+                css_rules.append("text-decoration: line-through;")
+
+            css = ";".join(css_rules)
+            style_cache[style] = css
+            return css
+
+        _theme = theme or SVG_EXPORT_THEME
+
+        width = self.width
+        char_height = 20
+        char_width = char_height * font_aspect_ratio
+        line_height = char_height * 1.22
+
+        margin_top = 1
+        margin_right = 1
+        margin_bottom = 1
+        margin_left = 1
+
+        padding_top = 40
+        padding_right = 8
+        padding_bottom = 8
+        padding_left = 8
+
+        padding_width = padding_left + padding_right
+        padding_height = padding_top + padding_bottom
+        margin_width = margin_left + margin_right
+        margin_height = margin_top + margin_bottom
+
+        text_backgrounds: List[str] = []
+        text_group: List[str] = []
+        classes: Dict[str, int] = {}
+        style_no = 1
+
+        def escape_text(text: str) -> str:
+            """HTML escape text and replace spaces with nbsp."""
+            return escape(text).replace(" ", " ")
+
+        def make_tag(
+            name: str, content: Optional[str] = None, **attribs: object
+        ) -> str:
+            """Make a tag from name, content, and attributes."""
+
+            def stringify(value: object) -> str:
+                if isinstance(value, (float)):
+                    return format(value, "g")
+                return str(value)
+
+            tag_attribs = " ".join(
+                f'{k.lstrip("_").replace("_", "-")}="{stringify(v)}"'
+                for k, v in attribs.items()
+            )
+            return (
+                f"<{name} {tag_attribs}>{content}"
+                if content
+                else f"<{name} {tag_attribs}/>"
+            )
+
+        with self._record_buffer_lock:
+            segments = list(Segment.filter_control(self._record_buffer))
+            if clear:
+                self._record_buffer.clear()
+
+        if unique_id is None:
+            unique_id = "terminal-" + str(
+                zlib.adler32(
+                    ("".join(repr(segment) for segment in segments)).encode(
+                        "utf-8",
+                        "ignore",
+                    )
+                    + title.encode("utf-8", "ignore")
+                )
+            )
+        y = 0
+        for y, line in enumerate(Segment.split_and_crop_lines(segments, length=width)):
+            x = 0
+            for text, style, _control in line:
+                style = style or Style()
+                rules = get_svg_style(style)
+                if rules not in classes:
+                    classes[rules] = style_no
+                    style_no += 1
+                class_name = f"r{classes[rules]}"
+
+                if style.reverse:
+                    has_background = True
+                    background = (
+                        _theme.foreground_color.hex
+                        if style.color is None
+                        else style.color.get_truecolor(_theme).hex
+                    )
+                else:
+                    bgcolor = style.bgcolor
+                    has_background = bgcolor is not None and not bgcolor.is_default
+                    background = (
+                        _theme.background_color.hex
+                        if style.bgcolor is None
+                        else style.bgcolor.get_truecolor(_theme).hex
+                    )
+
+                text_length = cell_len(text)
+                if has_background:
+                    text_backgrounds.append(
+                        make_tag(
+                            "rect",
+                            fill=background,
+                            x=x * char_width,
+                            y=y * line_height + 1.5,
+                            width=char_width * text_length,
+                            height=line_height + 0.25,
+                            shape_rendering="crispEdges",
+                        )
+                    )
+
+                if text != " " * len(text):
+                    text_group.append(
+                        make_tag(
+                            "text",
+                            escape_text(text),
+                            _class=f"{unique_id}-{class_name}",
+                            x=x * char_width,
+                            y=y * line_height + char_height,
+                            textLength=char_width * len(text),
+                            clip_path=f"url(#{unique_id}-line-{y})",
+                        )
+                    )
+                x += cell_len(text)
+
+        line_offsets = [line_no * line_height + 1.5 for line_no in range(y)]
+        lines = "\n".join(
+            f"""
+    {make_tag("rect", x=0, y=offset, width=char_width * width, height=line_height + 0.25)}
+            """
+            for line_no, offset in enumerate(line_offsets)
+        )
+
+        styles = "\n".join(
+            f".{unique_id}-r{rule_no} {{ {css} }}" for css, rule_no in classes.items()
+        )
+        backgrounds = "".join(text_backgrounds)
+        matrix = "".join(text_group)
+
+        terminal_width = ceil(width * char_width + padding_width)
+        terminal_height = (y + 1) * line_height + padding_height
+        chrome = make_tag(
+            "rect",
+            fill=_theme.background_color.hex,
+            stroke="rgba(255,255,255,0.35)",
+            stroke_width="1",
+            x=margin_left,
+            y=margin_top,
+            width=terminal_width,
+            height=terminal_height,
+            rx=8,
+        )
+
+        title_color = _theme.foreground_color.hex
+        if title:
+            chrome += make_tag(
+                "text",
+                escape_text(title),
+                _class=f"{unique_id}-title",
+                fill=title_color,
+                text_anchor="middle",
+                x=terminal_width // 2,
+                y=margin_top + char_height + 6,
+            )
+        chrome += f"""
+            
+            
+            
+            
+            
+        """
+
+        svg = code_format.format(
+            unique_id=unique_id,
+            char_width=char_width,
+            char_height=char_height,
+            line_height=line_height,
+            terminal_width=char_width * width - 1,
+            terminal_height=(y + 1) * line_height - 1,
+            width=terminal_width + margin_width,
+            height=terminal_height + margin_height,
+            terminal_x=margin_left + padding_left,
+            terminal_y=margin_top + padding_top,
+            styles=styles,
+            chrome=chrome,
+            backgrounds=backgrounds,
+            matrix=matrix,
+            lines=lines,
+        )
+        return svg
+
+    def save_svg(
+        self,
+        path: Union[str, PathLike[str]],
+        *,
+        title: str = "Rich",
+        theme: Optional[TerminalTheme] = None,
+        clear: bool = True,
+        code_format: str = CONSOLE_SVG_FORMAT,
+        font_aspect_ratio: float = 0.61,
+        unique_id: Optional[str] = None,
+    ) -> None:
+        """Generate an SVG file from the console contents (requires record=True in Console constructor).
+
+        Args:
+            path (Union[str, PathLike[str]]): The path to write the SVG to.
+            title (str, optional): The title of the tab in the output image
+            theme (TerminalTheme, optional): The ``TerminalTheme`` object to use to style the terminal
+            clear (bool, optional): Clear record buffer after exporting. Defaults to ``True``
+            code_format (str, optional): Format string used to generate the SVG. Rich will inject a number of variables
+                into the string in order to form the final SVG output. The default template used and the variables
+                injected by Rich can be found by inspecting the ``console.CONSOLE_SVG_FORMAT`` variable.
+            font_aspect_ratio (float, optional): The width to height ratio of the font used in the ``code_format``
+                string. Defaults to 0.61, which is the width to height ratio of Fira Code (the default font).
+                If you aren't specifying a different font inside ``code_format``, you probably don't need this.
+            unique_id (str, optional): unique id that is used as the prefix for various elements (CSS styles, node
+                ids). If not set, this defaults to a computed value based on the recorded content.
+        """
+        svg = self.export_svg(
+            title=title,
+            theme=theme,
+            clear=clear,
+            code_format=code_format,
+            font_aspect_ratio=font_aspect_ratio,
+            unique_id=unique_id,
+        )
+        with open(path, "w", encoding="utf-8") as write_file:
+            write_file.write(svg)
+
+
+if __name__ == "__main__":  # pragma: no cover
+    console = Console(record=True)
+
+    console.log(
+        "JSONRPC [i]request[/i]",
+        5,
+        1.3,
+        True,
+        False,
+        None,
+        {
+            "jsonrpc": "2.0",
+            "method": "subtract",
+            "params": {"minuend": 42, "subtrahend": 23},
+            "id": 3,
+        },
+    )
+
+    console.log("Hello, World!", "{'a': 1}", repr(console))
+
+    console.print(
+        {
+            "name": None,
+            "empty": [],
+            "quiz": {
+                "sport": {
+                    "answered": True,
+                    "q1": {
+                        "question": "Which one is correct team name in NBA?",
+                        "options": [
+                            "New York Bulls",
+                            "Los Angeles Kings",
+                            "Golden State Warriors",
+                            "Huston Rocket",
+                        ],
+                        "answer": "Huston Rocket",
+                    },
+                },
+                "maths": {
+                    "answered": False,
+                    "q1": {
+                        "question": "5 + 7 = ?",
+                        "options": [10, 11, 12, 13],
+                        "answer": 12,
+                    },
+                    "q2": {
+                        "question": "12 - 8 = ?",
+                        "options": [1, 2, 3, 4],
+                        "answer": 4,
+                    },
+                },
+            },
+        }
+    )
diff --git a/local_packages/rich/constrain.py b/local_packages/rich/constrain.py
new file mode 100644
index 0000000..65fdf56
--- /dev/null
+++ b/local_packages/rich/constrain.py
@@ -0,0 +1,37 @@
+from typing import Optional, TYPE_CHECKING
+
+from .jupyter import JupyterMixin
+from .measure import Measurement
+
+if TYPE_CHECKING:
+    from .console import Console, ConsoleOptions, RenderableType, RenderResult
+
+
+class Constrain(JupyterMixin):
+    """Constrain the width of a renderable to a given number of characters.
+
+    Args:
+        renderable (RenderableType): A renderable object.
+        width (int, optional): The maximum width (in characters) to render. Defaults to 80.
+    """
+
+    def __init__(self, renderable: "RenderableType", width: Optional[int] = 80) -> None:
+        self.renderable = renderable
+        self.width = width
+
+    def __rich_console__(
+        self, console: "Console", options: "ConsoleOptions"
+    ) -> "RenderResult":
+        if self.width is None:
+            yield self.renderable
+        else:
+            child_options = options.update_width(min(self.width, options.max_width))
+            yield from console.render(self.renderable, child_options)
+
+    def __rich_measure__(
+        self, console: "Console", options: "ConsoleOptions"
+    ) -> "Measurement":
+        if self.width is not None:
+            options = options.update_width(self.width)
+        measurement = Measurement.get(console, options, self.renderable)
+        return measurement
diff --git a/local_packages/rich/containers.py b/local_packages/rich/containers.py
new file mode 100644
index 0000000..901ff8b
--- /dev/null
+++ b/local_packages/rich/containers.py
@@ -0,0 +1,167 @@
+from itertools import zip_longest
+from typing import (
+    TYPE_CHECKING,
+    Iterable,
+    Iterator,
+    List,
+    Optional,
+    TypeVar,
+    Union,
+    overload,
+)
+
+if TYPE_CHECKING:
+    from .console import (
+        Console,
+        ConsoleOptions,
+        JustifyMethod,
+        OverflowMethod,
+        RenderResult,
+        RenderableType,
+    )
+    from .text import Text
+
+from .cells import cell_len
+from .measure import Measurement
+
+T = TypeVar("T")
+
+
+class Renderables:
+    """A list subclass which renders its contents to the console."""
+
+    def __init__(
+        self, renderables: Optional[Iterable["RenderableType"]] = None
+    ) -> None:
+        self._renderables: List["RenderableType"] = (
+            list(renderables) if renderables is not None else []
+        )
+
+    def __rich_console__(
+        self, console: "Console", options: "ConsoleOptions"
+    ) -> "RenderResult":
+        """Console render method to insert line-breaks."""
+        yield from self._renderables
+
+    def __rich_measure__(
+        self, console: "Console", options: "ConsoleOptions"
+    ) -> "Measurement":
+        dimensions = [
+            Measurement.get(console, options, renderable)
+            for renderable in self._renderables
+        ]
+        if not dimensions:
+            return Measurement(1, 1)
+        _min = max(dimension.minimum for dimension in dimensions)
+        _max = max(dimension.maximum for dimension in dimensions)
+        return Measurement(_min, _max)
+
+    def append(self, renderable: "RenderableType") -> None:
+        self._renderables.append(renderable)
+
+    def __iter__(self) -> Iterable["RenderableType"]:
+        return iter(self._renderables)
+
+
+class Lines:
+    """A list subclass which can render to the console."""
+
+    def __init__(self, lines: Iterable["Text"] = ()) -> None:
+        self._lines: List["Text"] = list(lines)
+
+    def __repr__(self) -> str:
+        return f"Lines({self._lines!r})"
+
+    def __iter__(self) -> Iterator["Text"]:
+        return iter(self._lines)
+
+    @overload
+    def __getitem__(self, index: int) -> "Text":
+        ...
+
+    @overload
+    def __getitem__(self, index: slice) -> List["Text"]:
+        ...
+
+    def __getitem__(self, index: Union[slice, int]) -> Union["Text", List["Text"]]:
+        return self._lines[index]
+
+    def __setitem__(self, index: int, value: "Text") -> "Lines":
+        self._lines[index] = value
+        return self
+
+    def __len__(self) -> int:
+        return self._lines.__len__()
+
+    def __rich_console__(
+        self, console: "Console", options: "ConsoleOptions"
+    ) -> "RenderResult":
+        """Console render method to insert line-breaks."""
+        yield from self._lines
+
+    def append(self, line: "Text") -> None:
+        self._lines.append(line)
+
+    def extend(self, lines: Iterable["Text"]) -> None:
+        self._lines.extend(lines)
+
+    def pop(self, index: int = -1) -> "Text":
+        return self._lines.pop(index)
+
+    def justify(
+        self,
+        console: "Console",
+        width: int,
+        justify: "JustifyMethod" = "left",
+        overflow: "OverflowMethod" = "fold",
+    ) -> None:
+        """Justify and overflow text to a given width.
+
+        Args:
+            console (Console): Console instance.
+            width (int): Number of cells available per line.
+            justify (str, optional): Default justify method for text: "left", "center", "full" or "right". Defaults to "left".
+            overflow (str, optional): Default overflow for text: "crop", "fold", or "ellipsis". Defaults to "fold".
+
+        """
+        from .text import Text
+
+        if justify == "left":
+            for line in self._lines:
+                line.truncate(width, overflow=overflow, pad=True)
+        elif justify == "center":
+            for line in self._lines:
+                line.rstrip()
+                line.truncate(width, overflow=overflow)
+                line.pad_left((width - cell_len(line.plain)) // 2)
+                line.pad_right(width - cell_len(line.plain))
+        elif justify == "right":
+            for line in self._lines:
+                line.rstrip()
+                line.truncate(width, overflow=overflow)
+                line.pad_left(width - cell_len(line.plain))
+        elif justify == "full":
+            for line_index, line in enumerate(self._lines):
+                if line_index == len(self._lines) - 1:
+                    break
+                words = line.split(" ")
+                words_size = sum(cell_len(word.plain) for word in words)
+                num_spaces = len(words) - 1
+                spaces = [1 for _ in range(num_spaces)]
+                index = 0
+                if spaces:
+                    while words_size + num_spaces < width:
+                        spaces[len(spaces) - index - 1] += 1
+                        num_spaces += 1
+                        index = (index + 1) % len(spaces)
+                tokens: List[Text] = []
+                for index, (word, next_word) in enumerate(
+                    zip_longest(words, words[1:])
+                ):
+                    tokens.append(word)
+                    if index < len(spaces):
+                        style = word.get_style_at_offset(console, -1)
+                        next_style = next_word.get_style_at_offset(console, 0)
+                        space_style = style if style == next_style else line.style
+                        tokens.append(Text(" " * spaces[index], style=space_style))
+                self[line_index] = Text("").join(tokens)
diff --git a/local_packages/rich/control.py b/local_packages/rich/control.py
new file mode 100644
index 0000000..248b0f5
--- /dev/null
+++ b/local_packages/rich/control.py
@@ -0,0 +1,219 @@
+import time
+from typing import TYPE_CHECKING, Callable, Dict, Iterable, List, Union, Final
+
+from .segment import ControlCode, ControlType, Segment
+
+if TYPE_CHECKING:
+    from .console import Console, ConsoleOptions, RenderResult
+
+STRIP_CONTROL_CODES: Final = [
+    7,  # Bell
+    8,  # Backspace
+    11,  # Vertical tab
+    12,  # Form feed
+    13,  # Carriage return
+]
+_CONTROL_STRIP_TRANSLATE: Final = {
+    _codepoint: None for _codepoint in STRIP_CONTROL_CODES
+}
+
+CONTROL_ESCAPE: Final = {
+    7: "\\a",
+    8: "\\b",
+    11: "\\v",
+    12: "\\f",
+    13: "\\r",
+}
+
+CONTROL_CODES_FORMAT: Dict[int, Callable[..., str]] = {
+    ControlType.BELL: lambda: "\x07",
+    ControlType.CARRIAGE_RETURN: lambda: "\r",
+    ControlType.HOME: lambda: "\x1b[H",
+    ControlType.CLEAR: lambda: "\x1b[2J",
+    ControlType.ENABLE_ALT_SCREEN: lambda: "\x1b[?1049h",
+    ControlType.DISABLE_ALT_SCREEN: lambda: "\x1b[?1049l",
+    ControlType.SHOW_CURSOR: lambda: "\x1b[?25h",
+    ControlType.HIDE_CURSOR: lambda: "\x1b[?25l",
+    ControlType.CURSOR_UP: lambda param: f"\x1b[{param}A",
+    ControlType.CURSOR_DOWN: lambda param: f"\x1b[{param}B",
+    ControlType.CURSOR_FORWARD: lambda param: f"\x1b[{param}C",
+    ControlType.CURSOR_BACKWARD: lambda param: f"\x1b[{param}D",
+    ControlType.CURSOR_MOVE_TO_COLUMN: lambda param: f"\x1b[{param+1}G",
+    ControlType.ERASE_IN_LINE: lambda param: f"\x1b[{param}K",
+    ControlType.CURSOR_MOVE_TO: lambda x, y: f"\x1b[{y+1};{x+1}H",
+    ControlType.SET_WINDOW_TITLE: lambda title: f"\x1b]0;{title}\x07",
+}
+
+
+class Control:
+    """A renderable that inserts a control code (non printable but may move cursor).
+
+    Args:
+        *codes (str): Positional arguments are either a :class:`~rich.segment.ControlType` enum or a
+            tuple of ControlType and an integer parameter
+    """
+
+    __slots__ = ["segment"]
+
+    def __init__(self, *codes: Union[ControlType, ControlCode]) -> None:
+        control_codes: List[ControlCode] = [
+            (code,) if isinstance(code, ControlType) else code for code in codes
+        ]
+        _format_map = CONTROL_CODES_FORMAT
+        rendered_codes = "".join(
+            _format_map[code](*parameters) for code, *parameters in control_codes
+        )
+        self.segment = Segment(rendered_codes, None, control_codes)
+
+    @classmethod
+    def bell(cls) -> "Control":
+        """Ring the 'bell'."""
+        return cls(ControlType.BELL)
+
+    @classmethod
+    def home(cls) -> "Control":
+        """Move cursor to 'home' position."""
+        return cls(ControlType.HOME)
+
+    @classmethod
+    def move(cls, x: int = 0, y: int = 0) -> "Control":
+        """Move cursor relative to current position.
+
+        Args:
+            x (int): X offset.
+            y (int): Y offset.
+
+        Returns:
+            ~Control: Control object.
+
+        """
+
+        def get_codes() -> Iterable[ControlCode]:
+            control = ControlType
+            if x:
+                yield (
+                    control.CURSOR_FORWARD if x > 0 else control.CURSOR_BACKWARD,
+                    abs(x),
+                )
+            if y:
+                yield (
+                    control.CURSOR_DOWN if y > 0 else control.CURSOR_UP,
+                    abs(y),
+                )
+
+        control = cls(*get_codes())
+        return control
+
+    @classmethod
+    def move_to_column(cls, x: int, y: int = 0) -> "Control":
+        """Move to the given column, optionally add offset to row.
+
+        Returns:
+            x (int): absolute x (column)
+            y (int): optional y offset (row)
+
+        Returns:
+            ~Control: Control object.
+        """
+
+        return (
+            cls(
+                (ControlType.CURSOR_MOVE_TO_COLUMN, x),
+                (
+                    ControlType.CURSOR_DOWN if y > 0 else ControlType.CURSOR_UP,
+                    abs(y),
+                ),
+            )
+            if y
+            else cls((ControlType.CURSOR_MOVE_TO_COLUMN, x))
+        )
+
+    @classmethod
+    def move_to(cls, x: int, y: int) -> "Control":
+        """Move cursor to absolute position.
+
+        Args:
+            x (int): x offset (column)
+            y (int): y offset (row)
+
+        Returns:
+            ~Control: Control object.
+        """
+        return cls((ControlType.CURSOR_MOVE_TO, x, y))
+
+    @classmethod
+    def clear(cls) -> "Control":
+        """Clear the screen."""
+        return cls(ControlType.CLEAR)
+
+    @classmethod
+    def show_cursor(cls, show: bool) -> "Control":
+        """Show or hide the cursor."""
+        return cls(ControlType.SHOW_CURSOR if show else ControlType.HIDE_CURSOR)
+
+    @classmethod
+    def alt_screen(cls, enable: bool) -> "Control":
+        """Enable or disable alt screen."""
+        if enable:
+            return cls(ControlType.ENABLE_ALT_SCREEN, ControlType.HOME)
+        else:
+            return cls(ControlType.DISABLE_ALT_SCREEN)
+
+    @classmethod
+    def title(cls, title: str) -> "Control":
+        """Set the terminal window title
+
+        Args:
+            title (str): The new terminal window title
+        """
+        return cls((ControlType.SET_WINDOW_TITLE, title))
+
+    def __str__(self) -> str:
+        return self.segment.text
+
+    def __rich_console__(
+        self, console: "Console", options: "ConsoleOptions"
+    ) -> "RenderResult":
+        if self.segment.text:
+            yield self.segment
+
+
+def strip_control_codes(
+    text: str, _translate_table: Dict[int, None] = _CONTROL_STRIP_TRANSLATE
+) -> str:
+    """Remove control codes from text.
+
+    Args:
+        text (str): A string possibly contain control codes.
+
+    Returns:
+        str: String with control codes removed.
+    """
+    return text.translate(_translate_table)
+
+
+def escape_control_codes(
+    text: str,
+    _translate_table: Dict[int, str] = CONTROL_ESCAPE,
+) -> str:
+    """Replace control codes with their "escaped" equivalent in the given text.
+    (e.g. "\b" becomes "\\b")
+
+    Args:
+        text (str): A string possibly containing control codes.
+
+    Returns:
+        str: String with control codes replaced with their escaped version.
+    """
+    return text.translate(_translate_table)
+
+
+if __name__ == "__main__":  # pragma: no cover
+    from rich.console import Console
+
+    console = Console()
+    console.print("Look at the title of your terminal window ^")
+    # console.print(Control((ControlType.SET_WINDOW_TITLE, "Hello, world!")))
+    for i in range(10):
+        console.set_window_title("🚀 Loading" + "." * i)
+        time.sleep(0.5)
diff --git a/local_packages/rich/default_styles.py b/local_packages/rich/default_styles.py
new file mode 100644
index 0000000..f2e4cd1
--- /dev/null
+++ b/local_packages/rich/default_styles.py
@@ -0,0 +1,196 @@
+from typing import Dict
+
+from .style import Style
+
+DEFAULT_STYLES: Dict[str, Style] = {
+    "none": Style.null(),
+    "reset": Style(
+        color="default",
+        bgcolor="default",
+        dim=False,
+        bold=False,
+        italic=False,
+        underline=False,
+        blink=False,
+        blink2=False,
+        reverse=False,
+        conceal=False,
+        strike=False,
+    ),
+    "dim": Style(dim=True),
+    "bright": Style(dim=False),
+    "bold": Style(bold=True),
+    "strong": Style(bold=True),
+    "code": Style(reverse=True, bold=True),
+    "italic": Style(italic=True),
+    "emphasize": Style(italic=True),
+    "underline": Style(underline=True),
+    "blink": Style(blink=True),
+    "blink2": Style(blink2=True),
+    "reverse": Style(reverse=True),
+    "strike": Style(strike=True),
+    "black": Style(color="black"),
+    "red": Style(color="red"),
+    "green": Style(color="green"),
+    "yellow": Style(color="yellow"),
+    "magenta": Style(color="magenta"),
+    "cyan": Style(color="cyan"),
+    "white": Style(color="white"),
+    "inspect.attr": Style(color="yellow", italic=True),
+    "inspect.attr.dunder": Style(color="yellow", italic=True, dim=True),
+    "inspect.callable": Style(bold=True, color="red"),
+    "inspect.async_def": Style(italic=True, color="bright_cyan"),
+    "inspect.def": Style(italic=True, color="bright_cyan"),
+    "inspect.class": Style(italic=True, color="bright_cyan"),
+    "inspect.error": Style(bold=True, color="red"),
+    "inspect.equals": Style(),
+    "inspect.help": Style(color="cyan"),
+    "inspect.doc": Style(dim=True),
+    "inspect.value.border": Style(color="green"),
+    "live.ellipsis": Style(bold=True, color="red"),
+    "layout.tree.row": Style(dim=False, color="red"),
+    "layout.tree.column": Style(dim=False, color="blue"),
+    "logging.keyword": Style(bold=True, color="yellow"),
+    "logging.level.notset": Style(dim=True),
+    "logging.level.debug": Style(color="green"),
+    "logging.level.info": Style(color="blue"),
+    "logging.level.warning": Style(color="yellow"),
+    "logging.level.error": Style(color="red", bold=True),
+    "logging.level.critical": Style(color="red", bold=True, reverse=True),
+    "log.level": Style.null(),
+    "log.time": Style(color="cyan", dim=True),
+    "log.message": Style.null(),
+    "log.path": Style(dim=True),
+    "repr.ellipsis": Style(color="yellow"),
+    "repr.indent": Style(color="green", dim=True),
+    "repr.error": Style(color="red", bold=True),
+    "repr.str": Style(color="green", italic=False, bold=False),
+    "repr.brace": Style(bold=True),
+    "repr.comma": Style(bold=True),
+    "repr.ipv4": Style(bold=True, color="bright_green"),
+    "repr.ipv6": Style(bold=True, color="bright_green"),
+    "repr.eui48": Style(bold=True, color="bright_green"),
+    "repr.eui64": Style(bold=True, color="bright_green"),
+    "repr.tag_start": Style(bold=True),
+    "repr.tag_name": Style(color="bright_magenta", bold=True),
+    "repr.tag_contents": Style(color="default"),
+    "repr.tag_end": Style(bold=True),
+    "repr.attrib_name": Style(color="yellow", italic=False),
+    "repr.attrib_equal": Style(bold=True),
+    "repr.attrib_value": Style(color="magenta", italic=False),
+    "repr.number": Style(color="cyan", bold=True, italic=False),
+    "repr.number_complex": Style(color="cyan", bold=True, italic=False),  # same
+    "repr.bool_true": Style(color="bright_green", italic=True),
+    "repr.bool_false": Style(color="bright_red", italic=True),
+    "repr.none": Style(color="magenta", italic=True),
+    "repr.url": Style(underline=True, color="bright_blue", italic=False, bold=False),
+    "repr.uuid": Style(color="bright_yellow", bold=False),
+    "repr.call": Style(color="magenta", bold=True),
+    "repr.path": Style(color="magenta"),
+    "repr.filename": Style(color="bright_magenta"),
+    "rule.line": Style(color="bright_green"),
+    "rule.text": Style.null(),
+    "json.brace": Style(bold=True),
+    "json.bool_true": Style(color="bright_green", italic=True),
+    "json.bool_false": Style(color="bright_red", italic=True),
+    "json.null": Style(color="magenta", italic=True),
+    "json.number": Style(color="cyan", bold=True, italic=False),
+    "json.str": Style(color="green", italic=False, bold=False),
+    "json.key": Style(color="blue", bold=True),
+    "prompt": Style.null(),
+    "prompt.choices": Style(color="magenta", bold=True),
+    "prompt.default": Style(color="cyan", bold=True),
+    "prompt.invalid": Style(color="red"),
+    "prompt.invalid.choice": Style(color="red"),
+    "pretty": Style.null(),
+    "scope.border": Style(color="blue"),
+    "scope.key": Style(color="yellow", italic=True),
+    "scope.key.special": Style(color="yellow", italic=True, dim=True),
+    "scope.equals": Style(color="red"),
+    "table.header": Style(bold=True),
+    "table.footer": Style(bold=True),
+    "table.cell": Style.null(),
+    "table.title": Style(italic=True),
+    "table.caption": Style(italic=True, dim=True),
+    "traceback.error": Style(color="red", italic=True),
+    "traceback.border.syntax_error": Style(color="bright_red"),
+    "traceback.border": Style(color="red"),
+    "traceback.text": Style.null(),
+    "traceback.title": Style(color="red", bold=True),
+    "traceback.exc_type": Style(color="bright_red", bold=True),
+    "traceback.exc_value": Style.null(),
+    "traceback.offset": Style(color="bright_red", bold=True),
+    "traceback.error_range": Style(underline=True, bold=True),
+    "traceback.note": Style(color="green", bold=True),
+    "traceback.group.border": Style(color="magenta"),
+    "bar.back": Style(color="grey23"),
+    "bar.complete": Style(color="rgb(249,38,114)"),
+    "bar.finished": Style(color="rgb(114,156,31)"),
+    "bar.pulse": Style(color="rgb(249,38,114)"),
+    "progress.description": Style.null(),
+    "progress.filesize": Style(color="green"),
+    "progress.filesize.total": Style(color="green"),
+    "progress.download": Style(color="green"),
+    "progress.elapsed": Style(color="yellow"),
+    "progress.percentage": Style(color="magenta"),
+    "progress.remaining": Style(color="cyan"),
+    "progress.data.speed": Style(color="red"),
+    "progress.spinner": Style(color="green"),
+    "status.spinner": Style(color="green"),
+    "tree": Style(),
+    "tree.line": Style(),
+    "markdown.paragraph": Style(),
+    "markdown.text": Style(),
+    "markdown.em": Style(italic=True),
+    "markdown.emph": Style(italic=True),  # For commonmark backwards compatibility
+    "markdown.strong": Style(bold=True),
+    "markdown.code": Style(bold=True, color="cyan", bgcolor="black"),
+    "markdown.code_block": Style(color="cyan", bgcolor="black"),
+    "markdown.block_quote": Style(color="magenta"),
+    "markdown.list": Style(color="cyan"),
+    "markdown.item": Style(),
+    "markdown.item.bullet": Style(bold=True),
+    "markdown.item.number": Style(color="cyan"),
+    "markdown.hr": Style(dim=True),
+    "markdown.h1.border": Style(),
+    "markdown.h1": Style(bold=True, underline=True),
+    "markdown.h2": Style(color="magenta", underline=True),
+    "markdown.h3": Style(color="magenta", bold=True),
+    "markdown.h4": Style(color="magenta", italic=True),
+    "markdown.h5": Style(italic=True),
+    "markdown.h6": Style(dim=True),
+    "markdown.h7": Style(italic=True, dim=True),
+    "markdown.link": Style(color="bright_blue"),
+    "markdown.link_url": Style(color="blue", underline=True),
+    "markdown.s": Style(strike=True),
+    "markdown.table.border": Style(color="cyan"),
+    "markdown.table.header": Style(color="cyan", bold=False),
+    "markdown.kbd": Style(bold=True, color="bright_yellow"),
+    "iso8601.date": Style(color="blue"),
+    "iso8601.time": Style(color="magenta"),
+    "iso8601.timezone": Style(color="yellow"),
+}
+
+
+if __name__ == "__main__":  # pragma: no cover
+    import argparse
+    import io
+
+    from rich.console import Console
+    from rich.table import Table
+    from rich.text import Text
+
+    parser = argparse.ArgumentParser()
+    parser.add_argument("--html", action="store_true", help="Export as HTML table")
+    args = parser.parse_args()
+    html: bool = args.html
+    console = Console(record=True, width=70, file=io.StringIO()) if html else Console()
+
+    table = Table("Name", "Styling")
+
+    for style_name, style in DEFAULT_STYLES.items():
+        table.add_row(Text(style_name, style=style), str(style))
+
+    console.print(table)
+    if html:
+        print(console.export_html(inline_styles=True))
diff --git a/local_packages/rich/diagnose.py b/local_packages/rich/diagnose.py
new file mode 100644
index 0000000..9d5ff3e
--- /dev/null
+++ b/local_packages/rich/diagnose.py
@@ -0,0 +1,39 @@
+import os
+import platform
+
+from rich import inspect
+from rich.console import Console, get_windows_console_features
+from rich.panel import Panel
+from rich.pretty import Pretty
+
+
+def report() -> None:  # pragma: no cover
+    """Print a report to the terminal with debugging information"""
+    console = Console()
+    inspect(console)
+    features = get_windows_console_features()
+    inspect(features)
+
+    env_names = (
+        "CLICOLOR",
+        "COLORTERM",
+        "COLUMNS",
+        "JPY_PARENT_PID",
+        "JUPYTER_COLUMNS",
+        "JUPYTER_LINES",
+        "LINES",
+        "NO_COLOR",
+        "TERM_PROGRAM",
+        "TERM",
+        "TTY_COMPATIBLE",
+        "TTY_INTERACTIVE",
+        "VSCODE_VERBOSE_LOGGING",
+    )
+    env = {name: os.getenv(name) for name in env_names}
+    console.print(Panel.fit((Pretty(env)), title="[b]Environment Variables"))
+
+    console.print(f'platform="{platform.system()}"')
+
+
+if __name__ == "__main__":  # pragma: no cover
+    report()
diff --git a/local_packages/rich/emoji.py b/local_packages/rich/emoji.py
new file mode 100644
index 0000000..067f93c
--- /dev/null
+++ b/local_packages/rich/emoji.py
@@ -0,0 +1,93 @@
+import sys
+from typing import TYPE_CHECKING, Literal, Optional, Union
+
+from ._emoji_replace import _emoji_replace
+from .jupyter import JupyterMixin
+from .segment import Segment
+from .style import Style
+
+if TYPE_CHECKING:
+    from .console import Console, ConsoleOptions, RenderResult
+
+
+EmojiVariant = Literal["emoji", "text"]
+
+
+class NoEmoji(Exception):
+    """No emoji by that name."""
+
+
+class Emoji(JupyterMixin):
+    __slots__ = ["name", "style", "_char", "variant"]
+
+    VARIANTS = {"text": "\ufe0e", "emoji": "\ufe0f"}
+
+    def __init__(
+        self,
+        name: str,
+        style: Union[str, Style] = "none",
+        variant: Optional[EmojiVariant] = None,
+    ) -> None:
+        """A single emoji character.
+
+        Args:
+            name (str): Name of emoji.
+            style (Union[str, Style], optional): Optional style. Defaults to None.
+
+        Raises:
+            NoEmoji: If the emoji doesn't exist.
+        """
+        from ._emoji_codes import EMOJI
+
+        self.name = name
+        self.style = style
+        self.variant = variant
+        try:
+            self._char = EMOJI[name]
+        except KeyError:
+            raise NoEmoji(f"No emoji called {name!r}")
+        if variant is not None:
+            self._char += self.VARIANTS.get(variant, "")
+
+    @classmethod
+    def replace(cls, text: str) -> str:
+        """Replace emoji markup with corresponding unicode characters.
+
+        Args:
+            text (str): A string with emojis codes, e.g. "Hello :smiley:!"
+
+        Returns:
+            str: A string with emoji codes replaces with actual emoji.
+        """
+        return _emoji_replace(text)
+
+    def __repr__(self) -> str:
+        return f""
+
+    def __str__(self) -> str:
+        return self._char
+
+    def __rich_console__(
+        self, console: "Console", options: "ConsoleOptions"
+    ) -> "RenderResult":
+        yield Segment(self._char, console.get_style(self.style))
+
+
+if __name__ == "__main__":  # pragma: no cover
+    import sys
+
+    from rich.columns import Columns
+    from rich.console import Console
+
+    console = Console(record=True)
+
+    from ._emoji_codes import EMOJI
+
+    columns = Columns(
+        (f":{name}: {name}" for name in sorted(EMOJI.keys()) if "\u200d" not in name),
+        column_first=True,
+    )
+
+    console.print(columns)
+    if len(sys.argv) > 1:
+        console.save_html(sys.argv[1])
diff --git a/local_packages/rich/errors.py b/local_packages/rich/errors.py
new file mode 100644
index 0000000..0bcbe53
--- /dev/null
+++ b/local_packages/rich/errors.py
@@ -0,0 +1,34 @@
+class ConsoleError(Exception):
+    """An error in console operation."""
+
+
+class StyleError(Exception):
+    """An error in styles."""
+
+
+class StyleSyntaxError(ConsoleError):
+    """Style was badly formatted."""
+
+
+class MissingStyle(StyleError):
+    """No such style."""
+
+
+class StyleStackError(ConsoleError):
+    """Style stack is invalid."""
+
+
+class NotRenderableError(ConsoleError):
+    """Object is not renderable."""
+
+
+class MarkupError(ConsoleError):
+    """Markup was badly formatted."""
+
+
+class LiveError(ConsoleError):
+    """Error related to Live display."""
+
+
+class NoAltScreen(ConsoleError):
+    """Alt screen mode was required."""
diff --git a/local_packages/rich/file_proxy.py b/local_packages/rich/file_proxy.py
new file mode 100644
index 0000000..e32523b
--- /dev/null
+++ b/local_packages/rich/file_proxy.py
@@ -0,0 +1,60 @@
+import io
+from typing import IO, TYPE_CHECKING, Any, List
+
+from .ansi import AnsiDecoder
+from .text import Text
+
+if TYPE_CHECKING:
+    from .console import Console
+
+
+class FileProxy(io.TextIOBase):
+    """Wraps a file (e.g. sys.stdout) and redirects writes to a console."""
+
+    def __init__(self, console: "Console", file: IO[str]) -> None:
+        self.__console = console
+        self.__file = file
+        self.__buffer: List[str] = []
+        self.__ansi_decoder = AnsiDecoder()
+
+    @property
+    def rich_proxied_file(self) -> IO[str]:
+        """Get proxied file."""
+        return self.__file
+
+    def __getattr__(self, name: str) -> Any:
+        return getattr(self.__file, name)
+
+    def write(self, text: str) -> int:
+        if not isinstance(text, str):
+            raise TypeError(f"write() argument must be str, not {type(text).__name__}")
+        buffer = self.__buffer
+        lines: List[str] = []
+        while text:
+            line, new_line, text = text.partition("\n")
+            if new_line:
+                lines.append("".join(buffer) + line)
+                buffer.clear()
+            else:
+                buffer.append(line)
+                break
+        if lines:
+            console = self.__console
+            with console:
+                output = Text("\n").join(
+                    self.__ansi_decoder.decode_line(line) for line in lines
+                )
+                console.print(output)
+        return len(text)
+
+    def flush(self) -> None:
+        output = "".join(self.__buffer)
+        if output:
+            self.__console.print(output)
+        del self.__buffer[:]
+
+    def fileno(self) -> int:
+        return self.__file.fileno()
+
+    def isatty(self) -> bool:
+        return self.__file.isatty()
diff --git a/local_packages/rich/filesize.py b/local_packages/rich/filesize.py
new file mode 100644
index 0000000..83bc911
--- /dev/null
+++ b/local_packages/rich/filesize.py
@@ -0,0 +1,88 @@
+"""Functions for reporting filesizes. Borrowed from https://github.com/PyFilesystem/pyfilesystem2
+
+The functions declared in this module should cover the different
+use cases needed to generate a string representation of a file size
+using several different units. Since there are many standards regarding
+file size units, three different functions have been implemented.
+
+See Also:
+    * `Wikipedia: Binary prefix `_
+
+"""
+
+__all__ = ["decimal"]
+
+from typing import Iterable, List, Optional, Tuple
+
+
+def _to_str(
+    size: int,
+    suffixes: Iterable[str],
+    base: int,
+    *,
+    precision: Optional[int] = 1,
+    separator: Optional[str] = " ",
+) -> str:
+    if size == 1:
+        return "1 byte"
+    elif size < base:
+        return f"{size:,} bytes"
+
+    for i, suffix in enumerate(suffixes, 2):  # noqa: B007
+        unit = base**i
+        if size < unit:
+            break
+    return "{:,.{precision}f}{separator}{}".format(
+        (base * size / unit),
+        suffix,
+        precision=precision,
+        separator=separator,
+    )
+
+
+def pick_unit_and_suffix(size: int, suffixes: List[str], base: int) -> Tuple[int, str]:
+    """Pick a suffix and base for the given size."""
+    for i, suffix in enumerate(suffixes):
+        unit = base**i
+        if size < unit * base:
+            break
+    return unit, suffix
+
+
+def decimal(
+    size: int,
+    *,
+    precision: Optional[int] = 1,
+    separator: Optional[str] = " ",
+) -> str:
+    """Convert a filesize in to a string (powers of 1000, SI prefixes).
+
+    In this convention, ``1000 B = 1 kB``.
+
+    This is typically the format used to advertise the storage
+    capacity of USB flash drives and the like (*256 MB* meaning
+    actually a storage capacity of more than *256 000 000 B*),
+    or used by **Mac OS X** since v10.6 to report file sizes.
+
+    Arguments:
+        int (size): A file size.
+        int (precision): The number of decimal places to include (default = 1).
+        str (separator): The string to separate the value from the units (default = " ").
+
+    Returns:
+        `str`: A string containing a abbreviated file size and units.
+
+    Example:
+        >>> filesize.decimal(30000)
+        '30.0 kB'
+        >>> filesize.decimal(30000, precision=2, separator="")
+        '30.00kB'
+
+    """
+    return _to_str(
+        size,
+        ("kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"),
+        1000,
+        precision=precision,
+        separator=separator,
+    )
diff --git a/local_packages/rich/highlighter.py b/local_packages/rich/highlighter.py
new file mode 100644
index 0000000..df28048
--- /dev/null
+++ b/local_packages/rich/highlighter.py
@@ -0,0 +1,232 @@
+import re
+from abc import ABC, abstractmethod
+from typing import ClassVar, Sequence, Union
+
+from .text import Span, Text
+
+
+def _combine_regex(*regexes: str) -> str:
+    """Combine a number of regexes in to a single regex.
+
+    Returns:
+        str: New regex with all regexes ORed together.
+    """
+    return "|".join(regexes)
+
+
+class Highlighter(ABC):
+    """Abstract base class for highlighters."""
+
+    def __call__(self, text: Union[str, Text]) -> Text:
+        """Highlight a str or Text instance.
+
+        Args:
+            text (Union[str, ~Text]): Text to highlight.
+
+        Raises:
+            TypeError: If not called with text or str.
+
+        Returns:
+            Text: A test instance with highlighting applied.
+        """
+        if isinstance(text, str):
+            highlight_text = Text(text)
+        elif isinstance(text, Text):
+            highlight_text = text.copy()
+        else:
+            raise TypeError(f"str or Text instance required, not {text!r}")
+        self.highlight(highlight_text)
+        return highlight_text
+
+    @abstractmethod
+    def highlight(self, text: Text) -> None:
+        """Apply highlighting in place to text.
+
+        Args:
+            text (~Text): A text object highlight.
+        """
+
+
+class NullHighlighter(Highlighter):
+    """A highlighter object that doesn't highlight.
+
+    May be used to disable highlighting entirely.
+
+    """
+
+    def highlight(self, text: Text) -> None:
+        """Nothing to do"""
+
+
+class RegexHighlighter(Highlighter):
+    """Applies highlighting from a list of regular expressions."""
+
+    highlights: ClassVar[Sequence[str]] = []
+    base_style: ClassVar[str] = ""
+
+    def highlight(self, text: Text) -> None:
+        """Highlight :class:`rich.text.Text` using regular expressions.
+
+        Args:
+            text (~Text): Text to highlighted.
+
+        """
+
+        highlight_regex = text.highlight_regex
+        for re_highlight in self.highlights:
+            highlight_regex(re_highlight, style_prefix=self.base_style)
+
+
+class ReprHighlighter(RegexHighlighter):
+    """Highlights the text typically produced from ``__repr__`` methods."""
+
+    base_style = "repr."
+    highlights: ClassVar[Sequence[str]] = [
+        r"(?P<)(?P[-\w.:|]*)(?P[\w\W]*)(?P>)",
+        r'(?P[\w_]{1,50})=(?P"?[\w_]+"?)?',
+        r"(?P[][{}()])",
+        _combine_regex(
+            r"(?P[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3})",
+            r"(?P([A-Fa-f0-9]{1,4}::?){1,7}[A-Fa-f0-9]{1,4})",
+            r"(?P(?:[0-9A-Fa-f]{1,2}-){7}[0-9A-Fa-f]{1,2}|(?:[0-9A-Fa-f]{1,2}:){7}[0-9A-Fa-f]{1,2}|(?:[0-9A-Fa-f]{4}\.){3}[0-9A-Fa-f]{4})",
+            r"(?P(?:[0-9A-Fa-f]{1,2}-){5}[0-9A-Fa-f]{1,2}|(?:[0-9A-Fa-f]{1,2}:){5}[0-9A-Fa-f]{1,2}|(?:[0-9A-Fa-f]{4}\.){2}[0-9A-Fa-f]{4})",
+            r"(?P[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12})",
+            r"(?P[\w.]*?)\(",
+            r"\b(?PTrue)\b|\b(?PFalse)\b|\b(?PNone)\b",
+            r"(?P\.\.\.)",
+            r"(?P(?(?\B(/[-\w._+]+)*\/)(?P[-\w._+]*)?",
+            r"(?b?'''.*?(?(file|https|http|ws|wss)://[-0-9a-zA-Z$_+!`(),.?/;:&=%#~@]*)",
+        ),
+    ]
+
+
+class JSONHighlighter(RegexHighlighter):
+    """Highlights JSON"""
+
+    # Captures the start and end of JSON strings, handling escaped quotes
+    JSON_STR = r"(?b?\".*?(?[\{\[\(\)\]\}])",
+            r"\b(?Ptrue)\b|\b(?Pfalse)\b|\b(?Pnull)\b",
+            r"(?P(? None:
+        super().highlight(text)
+
+        # Additional work to handle highlighting JSON keys
+        plain = text.plain
+        append = text.spans.append
+        whitespace = self.JSON_WHITESPACE
+        for match in re.finditer(self.JSON_STR, plain):
+            start, end = match.span()
+            cursor = end
+            while cursor < len(plain):
+                char = plain[cursor]
+                cursor += 1
+                if char == ":":
+                    append(Span(start, end, "json.key"))
+                elif char in whitespace:
+                    continue
+                break
+
+
+class ISO8601Highlighter(RegexHighlighter):
+    """Highlights the ISO8601 date time strings.
+    Regex reference: https://www.oreilly.com/library/view/regular-expressions-cookbook/9781449327453/ch04s07.html
+    """
+
+    base_style: ClassVar[str] = "iso8601."
+    highlights: ClassVar[Sequence[str]] = [
+        #
+        # Dates
+        #
+        # Calendar month (e.g. 2008-08). The hyphen is required
+        r"^(?P[0-9]{4})-(?P1[0-2]|0[1-9])$",
+        # Calendar date w/o hyphens (e.g. 20080830)
+        r"^(?P(?P[0-9]{4})(?P1[0-2]|0[1-9])(?P3[01]|0[1-9]|[12][0-9]))$",
+        # Ordinal date (e.g. 2008-243). The hyphen is optional
+        r"^(?P(?P[0-9]{4})-?(?P36[0-6]|3[0-5][0-9]|[12][0-9]{2}|0[1-9][0-9]|00[1-9]))$",
+        #
+        # Weeks
+        #
+        # Week of the year (e.g., 2008-W35). The hyphen is optional
+        r"^(?P(?P[0-9]{4})-?W(?P5[0-3]|[1-4][0-9]|0[1-9]))$",
+        # Week date (e.g., 2008-W35-6). The hyphens are optional
+        r"^(?P(?P[0-9]{4})-?W(?P5[0-3]|[1-4][0-9]|0[1-9])-?(?P[1-7]))$",
+        #
+        # Times
+        #
+        # Hours and minutes (e.g., 17:21). The colon is optional
+        r"^(?P{text}'
+        append_fragment(text)
+
+    code = "".join(fragments)
+    html = JUPYTER_HTML_FORMAT.format(code=code)
+
+    return html
+
+
+def display(segments: Iterable[Segment], text: str) -> None:
+    """Render segments to Jupyter."""
+    html = _render_segments(segments)
+    jupyter_renderable = JupyterRenderable(html, text)
+    try:
+        from IPython.display import display as ipython_display
+
+        ipython_display(jupyter_renderable)
+    except ModuleNotFoundError:
+        # Handle the case where the Console has force_jupyter=True,
+        # but IPython is not installed.
+        pass
+
+
+def print(*args: Any, **kwargs: Any) -> None:
+    """Proxy for Console print."""
+    console = get_console()
+    return console.print(*args, **kwargs)
diff --git a/local_packages/rich/layout.py b/local_packages/rich/layout.py
new file mode 100644
index 0000000..7fa2852
--- /dev/null
+++ b/local_packages/rich/layout.py
@@ -0,0 +1,442 @@
+from abc import ABC, abstractmethod
+from itertools import islice
+from operator import itemgetter
+from threading import RLock
+from typing import (
+    TYPE_CHECKING,
+    Dict,
+    Iterable,
+    List,
+    NamedTuple,
+    Optional,
+    Sequence,
+    Tuple,
+    Union,
+)
+
+from ._ratio import ratio_resolve
+from .align import Align
+from .console import Console, ConsoleOptions, RenderableType, RenderResult
+from .highlighter import ReprHighlighter
+from .panel import Panel
+from .pretty import Pretty
+from .region import Region
+from .repr import Result, rich_repr
+from .segment import Segment
+from .style import StyleType
+
+if TYPE_CHECKING:
+    from rich.tree import Tree
+
+
+class LayoutRender(NamedTuple):
+    """An individual layout render."""
+
+    region: Region
+    render: List[List[Segment]]
+
+
+RegionMap = Dict["Layout", Region]
+RenderMap = Dict["Layout", LayoutRender]
+
+
+class LayoutError(Exception):
+    """Layout related error."""
+
+
+class NoSplitter(LayoutError):
+    """Requested splitter does not exist."""
+
+
+class _Placeholder:
+    """An internal renderable used as a Layout placeholder."""
+
+    highlighter = ReprHighlighter()
+
+    def __init__(self, layout: "Layout", style: StyleType = "") -> None:
+        self.layout = layout
+        self.style = style
+
+    def __rich_console__(
+        self, console: Console, options: ConsoleOptions
+    ) -> RenderResult:
+        width = options.max_width
+        height = options.height or options.size.height
+        layout = self.layout
+        title = (
+            f"{layout.name!r} ({width} x {height})"
+            if layout.name
+            else f"({width} x {height})"
+        )
+        yield Panel(
+            Align.center(Pretty(layout), vertical="middle"),
+            style=self.style,
+            title=self.highlighter(title),
+            border_style="blue",
+            height=height,
+        )
+
+
+class Splitter(ABC):
+    """Base class for a splitter."""
+
+    name: str = ""
+
+    @abstractmethod
+    def get_tree_icon(self) -> str:
+        """Get the icon (emoji) used in layout.tree"""
+
+    @abstractmethod
+    def divide(
+        self, children: Sequence["Layout"], region: Region
+    ) -> Iterable[Tuple["Layout", Region]]:
+        """Divide a region amongst several child layouts.
+
+        Args:
+            children (Sequence(Layout)): A number of child layouts.
+            region (Region): A rectangular region to divide.
+        """
+
+
+class RowSplitter(Splitter):
+    """Split a layout region in to rows."""
+
+    name = "row"
+
+    def get_tree_icon(self) -> str:
+        return "[layout.tree.row]⬌"
+
+    def divide(
+        self, children: Sequence["Layout"], region: Region
+    ) -> Iterable[Tuple["Layout", Region]]:
+        x, y, width, height = region
+        render_widths = ratio_resolve(width, children)
+        offset = 0
+        _Region = Region
+        for child, child_width in zip(children, render_widths):
+            yield child, _Region(x + offset, y, child_width, height)
+            offset += child_width
+
+
+class ColumnSplitter(Splitter):
+    """Split a layout region in to columns."""
+
+    name = "column"
+
+    def get_tree_icon(self) -> str:
+        return "[layout.tree.column]⬍"
+
+    def divide(
+        self, children: Sequence["Layout"], region: Region
+    ) -> Iterable[Tuple["Layout", Region]]:
+        x, y, width, height = region
+        render_heights = ratio_resolve(height, children)
+        offset = 0
+        _Region = Region
+        for child, child_height in zip(children, render_heights):
+            yield child, _Region(x, y + offset, width, child_height)
+            offset += child_height
+
+
+@rich_repr
+class Layout:
+    """A renderable to divide a fixed height in to rows or columns.
+
+    Args:
+        renderable (RenderableType, optional): Renderable content, or None for placeholder. Defaults to None.
+        name (str, optional): Optional identifier for Layout. Defaults to None.
+        size (int, optional): Optional fixed size of layout. Defaults to None.
+        minimum_size (int, optional): Minimum size of layout. Defaults to 1.
+        ratio (int, optional): Optional ratio for flexible layout. Defaults to 1.
+        visible (bool, optional): Visibility of layout. Defaults to True.
+    """
+
+    splitters = {"row": RowSplitter, "column": ColumnSplitter}
+
+    def __init__(
+        self,
+        renderable: Optional[RenderableType] = None,
+        *,
+        name: Optional[str] = None,
+        size: Optional[int] = None,
+        minimum_size: int = 1,
+        ratio: int = 1,
+        visible: bool = True,
+    ) -> None:
+        self._renderable = renderable or _Placeholder(self)
+        self.size = size
+        self.minimum_size = minimum_size
+        self.ratio = ratio
+        self.name = name
+        self.visible = visible
+        self.splitter: Splitter = self.splitters["column"]()
+        self._children: List[Layout] = []
+        self._render_map: RenderMap = {}
+        self._lock = RLock()
+
+    def __rich_repr__(self) -> Result:
+        yield "name", self.name, None
+        yield "size", self.size, None
+        yield "minimum_size", self.minimum_size, 1
+        yield "ratio", self.ratio, 1
+
+    @property
+    def renderable(self) -> RenderableType:
+        """Layout renderable."""
+        return self if self._children else self._renderable
+
+    @property
+    def children(self) -> List["Layout"]:
+        """Gets (visible) layout children."""
+        return [child for child in self._children if child.visible]
+
+    @property
+    def map(self) -> RenderMap:
+        """Get a map of the last render."""
+        return self._render_map
+
+    def get(self, name: str) -> Optional["Layout"]:
+        """Get a named layout, or None if it doesn't exist.
+
+        Args:
+            name (str): Name of layout.
+
+        Returns:
+            Optional[Layout]: Layout instance or None if no layout was found.
+        """
+        if self.name == name:
+            return self
+        else:
+            for child in self._children:
+                named_layout = child.get(name)
+                if named_layout is not None:
+                    return named_layout
+        return None
+
+    def __getitem__(self, name: str) -> "Layout":
+        layout = self.get(name)
+        if layout is None:
+            raise KeyError(f"No layout with name {name!r}")
+        return layout
+
+    @property
+    def tree(self) -> "Tree":
+        """Get a tree renderable to show layout structure."""
+        from rich.styled import Styled
+        from rich.table import Table
+        from rich.tree import Tree
+
+        def summary(layout: "Layout") -> Table:
+            icon = layout.splitter.get_tree_icon()
+
+            table = Table.grid(padding=(0, 1, 0, 0))
+
+            text: RenderableType = (
+                Pretty(layout) if layout.visible else Styled(Pretty(layout), "dim")
+            )
+            table.add_row(icon, text)
+            _summary = table
+            return _summary
+
+        layout = self
+        tree = Tree(
+            summary(layout),
+            guide_style=f"layout.tree.{layout.splitter.name}",
+            highlight=True,
+        )
+
+        def recurse(tree: "Tree", layout: "Layout") -> None:
+            for child in layout._children:
+                recurse(
+                    tree.add(
+                        summary(child),
+                        guide_style=f"layout.tree.{child.splitter.name}",
+                    ),
+                    child,
+                )
+
+        recurse(tree, self)
+        return tree
+
+    def split(
+        self,
+        *layouts: Union["Layout", RenderableType],
+        splitter: Union[Splitter, str] = "column",
+    ) -> None:
+        """Split the layout in to multiple sub-layouts.
+
+        Args:
+            *layouts (Layout): Positional arguments should be (sub) Layout instances.
+            splitter (Union[Splitter, str]): Splitter instance or name of splitter.
+        """
+        _layouts = [
+            layout if isinstance(layout, Layout) else Layout(layout)
+            for layout in layouts
+        ]
+        try:
+            self.splitter = (
+                splitter
+                if isinstance(splitter, Splitter)
+                else self.splitters[splitter]()
+            )
+        except KeyError:
+            raise NoSplitter(f"No splitter called {splitter!r}")
+        self._children[:] = _layouts
+
+    def add_split(self, *layouts: Union["Layout", RenderableType]) -> None:
+        """Add a new layout(s) to existing split.
+
+        Args:
+            *layouts (Union[Layout, RenderableType]): Positional arguments should be renderables or (sub) Layout instances.
+
+        """
+        _layouts = (
+            layout if isinstance(layout, Layout) else Layout(layout)
+            for layout in layouts
+        )
+        self._children.extend(_layouts)
+
+    def split_row(self, *layouts: Union["Layout", RenderableType]) -> None:
+        """Split the layout in to a row (layouts side by side).
+
+        Args:
+            *layouts (Layout): Positional arguments should be (sub) Layout instances.
+        """
+        self.split(*layouts, splitter="row")
+
+    def split_column(self, *layouts: Union["Layout", RenderableType]) -> None:
+        """Split the layout in to a column (layouts stacked on top of each other).
+
+        Args:
+            *layouts (Layout): Positional arguments should be (sub) Layout instances.
+        """
+        self.split(*layouts, splitter="column")
+
+    def unsplit(self) -> None:
+        """Reset splits to initial state."""
+        del self._children[:]
+
+    def update(self, renderable: RenderableType) -> None:
+        """Update renderable.
+
+        Args:
+            renderable (RenderableType): New renderable object.
+        """
+        with self._lock:
+            self._renderable = renderable
+
+    def refresh_screen(self, console: "Console", layout_name: str) -> None:
+        """Refresh a sub-layout.
+
+        Args:
+            console (Console): Console instance where Layout is to be rendered.
+            layout_name (str): Name of layout.
+        """
+        with self._lock:
+            layout = self[layout_name]
+            region, _lines = self._render_map[layout]
+            (x, y, width, height) = region
+            lines = console.render_lines(
+                layout, console.options.update_dimensions(width, height)
+            )
+            self._render_map[layout] = LayoutRender(region, lines)
+            console.update_screen_lines(lines, x, y)
+
+    def _make_region_map(self, width: int, height: int) -> RegionMap:
+        """Create a dict that maps layout on to Region."""
+        stack: List[Tuple[Layout, Region]] = [(self, Region(0, 0, width, height))]
+        push = stack.append
+        pop = stack.pop
+        layout_regions: List[Tuple[Layout, Region]] = []
+        append_layout_region = layout_regions.append
+        while stack:
+            append_layout_region(pop())
+            layout, region = layout_regions[-1]
+            children = layout.children
+            if children:
+                for child_and_region in layout.splitter.divide(children, region):
+                    push(child_and_region)
+
+        region_map = {
+            layout: region
+            for layout, region in sorted(layout_regions, key=itemgetter(1))
+        }
+        return region_map
+
+    def render(self, console: Console, options: ConsoleOptions) -> RenderMap:
+        """Render the sub_layouts.
+
+        Args:
+            console (Console): Console instance.
+            options (ConsoleOptions): Console options.
+
+        Returns:
+            RenderMap: A dict that maps Layout on to a tuple of Region, lines
+        """
+        render_width = options.max_width
+        render_height = options.height or console.height
+        region_map = self._make_region_map(render_width, render_height)
+        layout_regions = [
+            (layout, region)
+            for layout, region in region_map.items()
+            if not layout.children
+        ]
+        render_map: Dict["Layout", "LayoutRender"] = {}
+        render_lines = console.render_lines
+        update_dimensions = options.update_dimensions
+
+        for layout, region in layout_regions:
+            lines = render_lines(
+                layout.renderable, update_dimensions(region.width, region.height)
+            )
+            render_map[layout] = LayoutRender(region, lines)
+        return render_map
+
+    def __rich_console__(
+        self, console: Console, options: ConsoleOptions
+    ) -> RenderResult:
+        with self._lock:
+            width = options.max_width or console.width
+            height = options.height or console.height
+            render_map = self.render(console, options.update_dimensions(width, height))
+            self._render_map = render_map
+            layout_lines: List[List[Segment]] = [[] for _ in range(height)]
+            _islice = islice
+            for region, lines in render_map.values():
+                _x, y, _layout_width, layout_height = region
+                for row, line in zip(
+                    _islice(layout_lines, y, y + layout_height), lines
+                ):
+                    row.extend(line)
+
+            new_line = Segment.line()
+            for layout_row in layout_lines:
+                yield from layout_row
+                yield new_line
+
+
+if __name__ == "__main__":
+    from rich.console import Console
+
+    console = Console()
+    layout = Layout()
+
+    layout.split_column(
+        Layout(name="header", size=3),
+        Layout(ratio=1, name="main"),
+        Layout(size=10, name="footer"),
+    )
+
+    layout["main"].split_row(Layout(name="side"), Layout(name="body", ratio=2))
+
+    layout["body"].split_row(Layout(name="content", ratio=2), Layout(name="s2"))
+
+    layout["s2"].split_column(
+        Layout(name="top"), Layout(name="middle"), Layout(name="bottom")
+    )
+
+    layout["side"].split_column(Layout(layout.tree, name="left1"), Layout(name="left2"))
+
+    layout["content"].update("foo")
+
+    console.print(layout)
diff --git a/local_packages/rich/live.py b/local_packages/rich/live.py
new file mode 100644
index 0000000..2fd893b
--- /dev/null
+++ b/local_packages/rich/live.py
@@ -0,0 +1,404 @@
+from __future__ import annotations
+
+import sys
+from threading import Event, RLock, Thread
+from types import TracebackType
+from typing import IO, TYPE_CHECKING, Any, Callable, List, Optional, TextIO, Type, cast
+
+from . import get_console
+from .console import Console, ConsoleRenderable, Group, RenderableType, RenderHook
+from .control import Control
+from .file_proxy import FileProxy
+from .jupyter import JupyterMixin
+from .live_render import LiveRender, VerticalOverflowMethod
+from .screen import Screen
+from .text import Text
+
+if TYPE_CHECKING:
+    # Can be replaced with `from typing import Self` in Python 3.11+
+    from typing_extensions import Self  # pragma: no cover
+
+
+class _RefreshThread(Thread):
+    """A thread that calls refresh() at regular intervals."""
+
+    def __init__(self, live: "Live", refresh_per_second: float) -> None:
+        self.live = live
+        self.refresh_per_second = refresh_per_second
+        self.done = Event()
+        super().__init__(daemon=True)
+
+    def stop(self) -> None:
+        self.done.set()
+
+    def run(self) -> None:
+        while not self.done.wait(1 / self.refresh_per_second):
+            with self.live._lock:
+                if not self.done.is_set():
+                    self.live.refresh()
+
+
+class Live(JupyterMixin, RenderHook):
+    """Renders an auto-updating live display of any given renderable.
+
+    Args:
+        renderable (RenderableType, optional): The renderable to live display. Defaults to displaying nothing.
+        console (Console, optional): Optional Console instance. Defaults to an internal Console instance writing to stdout.
+        screen (bool, optional): Enable alternate screen mode. Defaults to False.
+        auto_refresh (bool, optional): Enable auto refresh. If disabled, you will need to call `refresh()` or `update()` with refresh flag. Defaults to True
+        refresh_per_second (float, optional): Number of times per second to refresh the live display. Defaults to 4.
+        transient (bool, optional): Clear the renderable on exit (has no effect when screen=True). Defaults to False.
+        redirect_stdout (bool, optional): Enable redirection of stdout, so ``print`` may be used. Defaults to True.
+        redirect_stderr (bool, optional): Enable redirection of stderr. Defaults to True.
+        vertical_overflow (VerticalOverflowMethod, optional): How to handle renderable when it is too tall for the console. Defaults to "ellipsis".
+        get_renderable (Callable[[], RenderableType], optional): Optional callable to get renderable. Defaults to None.
+    """
+
+    def __init__(
+        self,
+        renderable: Optional[RenderableType] = None,
+        *,
+        console: Optional[Console] = None,
+        screen: bool = False,
+        auto_refresh: bool = True,
+        refresh_per_second: float = 4,
+        transient: bool = False,
+        redirect_stdout: bool = True,
+        redirect_stderr: bool = True,
+        vertical_overflow: VerticalOverflowMethod = "ellipsis",
+        get_renderable: Optional[Callable[[], RenderableType]] = None,
+    ) -> None:
+        assert refresh_per_second > 0, "refresh_per_second must be > 0"
+        self._renderable = renderable
+        self.console = console if console is not None else get_console()
+        self._screen = screen
+        self._alt_screen = False
+
+        self._redirect_stdout = redirect_stdout
+        self._redirect_stderr = redirect_stderr
+        self._restore_stdout: Optional[IO[str]] = None
+        self._restore_stderr: Optional[IO[str]] = None
+
+        self._lock = RLock()
+        self.ipy_widget: Optional[Any] = None
+        self.auto_refresh = auto_refresh
+        self._started: bool = False
+        self.transient = True if screen else transient
+
+        self._refresh_thread: Optional[_RefreshThread] = None
+        self.refresh_per_second = refresh_per_second
+
+        self.vertical_overflow = vertical_overflow
+        self._get_renderable = get_renderable
+        self._live_render = LiveRender(
+            self.get_renderable(), vertical_overflow=vertical_overflow
+        )
+        self._nested = False
+
+    @property
+    def is_started(self) -> bool:
+        """Check if live display has been started."""
+        return self._started
+
+    def get_renderable(self) -> RenderableType:
+        renderable = (
+            self._get_renderable()
+            if self._get_renderable is not None
+            else self._renderable
+        )
+        return renderable or ""
+
+    def start(self, refresh: bool = False) -> None:
+        """Start live rendering display.
+
+        Args:
+            refresh (bool, optional): Also refresh. Defaults to False.
+        """
+        with self._lock:
+            if self._started:
+                return
+            self._started = True
+
+            if not self.console.set_live(self):
+                self._nested = True
+                return
+
+            if self._screen:
+                self._alt_screen = self.console.set_alt_screen(True)
+            self.console.show_cursor(False)
+            self._enable_redirect_io()
+            self.console.push_render_hook(self)
+            if refresh:
+                try:
+                    self.refresh()
+                except Exception:
+                    # If refresh fails, we want to stop the redirection of sys.stderr,
+                    # so the error stacktrace is properly displayed in the terminal.
+                    # (or, if the code that calls Rich captures the exception and wants to display something,
+                    # let this be displayed in the terminal).
+                    self.stop()
+                    raise
+            if self.auto_refresh:
+                self._refresh_thread = _RefreshThread(self, self.refresh_per_second)
+                self._refresh_thread.start()
+
+    def stop(self) -> None:
+        """Stop live rendering display."""
+        with self._lock:
+            if not self._started:
+                return
+            self._started = False
+            self.console.clear_live()
+            if self._nested:
+                if not self.transient:
+                    self.console.print(self.renderable)
+                return
+
+            if self.auto_refresh and self._refresh_thread is not None:
+                self._refresh_thread.stop()
+                self._refresh_thread = None
+            # allow it to fully render on the last even if overflow
+            self.vertical_overflow = "visible"
+            with self.console:
+                try:
+                    if not self._alt_screen and not self.console.is_jupyter:
+                        self.refresh()
+                finally:
+                    self._disable_redirect_io()
+                    self.console.pop_render_hook()
+                    if (
+                        not self._alt_screen
+                        and self.console.is_terminal
+                        and self._live_render.last_render_height
+                    ):
+                        self.console.line()
+                    self.console.show_cursor(True)
+                    if self._alt_screen:
+                        self.console.set_alt_screen(False)
+                    if self.transient and not self._alt_screen:
+                        self.console.control(self._live_render.restore_cursor())
+                    if self.ipy_widget is not None and self.transient:
+                        self.ipy_widget.close()  # pragma: no cover
+
+    def __enter__(self) -> Self:
+        self.start(refresh=self._renderable is not None)
+        return self
+
+    def __exit__(
+        self,
+        exc_type: Optional[Type[BaseException]],
+        exc_val: Optional[BaseException],
+        exc_tb: Optional[TracebackType],
+    ) -> None:
+        self.stop()
+
+    def _enable_redirect_io(self) -> None:
+        """Enable redirecting of stdout / stderr."""
+        if self.console.is_terminal or self.console.is_jupyter:
+            if self._redirect_stdout and not isinstance(sys.stdout, FileProxy):
+                self._restore_stdout = sys.stdout
+                sys.stdout = cast("TextIO", FileProxy(self.console, sys.stdout))
+            if self._redirect_stderr and not isinstance(sys.stderr, FileProxy):
+                self._restore_stderr = sys.stderr
+                sys.stderr = cast("TextIO", FileProxy(self.console, sys.stderr))
+
+    def _disable_redirect_io(self) -> None:
+        """Disable redirecting of stdout / stderr."""
+        if self._restore_stdout:
+            sys.stdout = cast("TextIO", self._restore_stdout)
+            self._restore_stdout = None
+        if self._restore_stderr:
+            sys.stderr = cast("TextIO", self._restore_stderr)
+            self._restore_stderr = None
+
+    @property
+    def renderable(self) -> RenderableType:
+        """Get the renderable that is being displayed
+
+        Returns:
+            RenderableType: Displayed renderable.
+        """
+        live_stack = self.console._live_stack
+        renderable: RenderableType
+        if live_stack and self is live_stack[0]:
+            # The first Live instance will render everything in the Live stack
+            renderable = Group(*[live.get_renderable() for live in live_stack])
+        else:
+            renderable = self.get_renderable()
+        return Screen(renderable) if self._alt_screen else renderable
+
+    def update(self, renderable: RenderableType, *, refresh: bool = False) -> None:
+        """Update the renderable that is being displayed
+
+        Args:
+            renderable (RenderableType): New renderable to use.
+            refresh (bool, optional): Refresh the display. Defaults to False.
+        """
+        if isinstance(renderable, str):
+            renderable = self.console.render_str(renderable)
+        with self._lock:
+            self._renderable = renderable
+            if refresh:
+                self.refresh()
+
+    def refresh(self) -> None:
+        """Update the display of the Live Render."""
+        with self._lock:
+            self._live_render.set_renderable(self.renderable)
+            if self._nested:
+                if self.console._live_stack:
+                    self.console._live_stack[0].refresh()
+                return
+
+            if self.console.is_jupyter:  # pragma: no cover
+                try:
+                    from IPython.display import display
+                    from ipywidgets import Output
+                except ImportError:
+                    import warnings
+
+                    warnings.warn('install "ipywidgets" for Jupyter support')
+                else:
+                    if self.ipy_widget is None:
+                        self.ipy_widget = Output()
+                        display(self.ipy_widget)
+
+                    with self.ipy_widget:
+                        self.ipy_widget.clear_output(wait=True)
+                        self.console.print(self._live_render.renderable)
+            elif self.console.is_terminal and not self.console.is_dumb_terminal:
+                with self.console:
+                    self.console.print(Control())
+            elif (
+                not self._started and not self.transient
+            ):  # if it is finished allow files or dumb-terminals to see final result
+                with self.console:
+                    self.console.print(Control())
+
+    def process_renderables(
+        self, renderables: List[ConsoleRenderable]
+    ) -> List[ConsoleRenderable]:
+        """Process renderables to restore cursor and display progress."""
+        self._live_render.vertical_overflow = self.vertical_overflow
+        if self.console.is_interactive:
+            # lock needs acquiring as user can modify live_render renderable at any time unlike in Progress.
+            with self._lock:
+                reset = (
+                    Control.home()
+                    if self._alt_screen
+                    else self._live_render.position_cursor()
+                )
+                renderables = [reset, *renderables, self._live_render]
+        elif (
+            not self._started and not self.transient
+        ):  # if it is finished render the final output for files or dumb_terminals
+            renderables = [*renderables, self._live_render]
+
+        return renderables
+
+
+if __name__ == "__main__":  # pragma: no cover
+    import random
+    import time
+    from itertools import cycle
+    from typing import Dict, List, Tuple
+
+    from .align import Align
+    from .console import Console
+    from .live import Live as Live
+    from .panel import Panel
+    from .rule import Rule
+    from .syntax import Syntax
+    from .table import Table
+
+    console = Console()
+
+    syntax = Syntax(
+        '''def loop_last(values: Iterable[T]) -> Iterable[Tuple[bool, T]]:
+    """Iterate and generate a tuple with a flag for last value."""
+    iter_values = iter(values)
+    try:
+        previous_value = next(iter_values)
+    except StopIteration:
+        return
+    for value in iter_values:
+        yield False, previous_value
+        previous_value = value
+    yield True, previous_value''',
+        "python",
+        line_numbers=True,
+    )
+
+    table = Table("foo", "bar", "baz")
+    table.add_row("1", "2", "3")
+
+    progress_renderables = [
+        "You can make the terminal shorter and taller to see the live table hide"
+        "Text may be printed while the progress bars are rendering.",
+        Panel("In fact, [i]any[/i] renderable will work"),
+        "Such as [magenta]tables[/]...",
+        table,
+        "Pretty printed structures...",
+        {"type": "example", "text": "Pretty printed"},
+        "Syntax...",
+        syntax,
+        Rule("Give it a try!"),
+    ]
+
+    examples = cycle(progress_renderables)
+
+    exchanges = [
+        "SGD",
+        "MYR",
+        "EUR",
+        "USD",
+        "AUD",
+        "JPY",
+        "CNH",
+        "HKD",
+        "CAD",
+        "INR",
+        "DKK",
+        "GBP",
+        "RUB",
+        "NZD",
+        "MXN",
+        "IDR",
+        "TWD",
+        "THB",
+        "VND",
+    ]
+    with Live(console=console) as live_table:
+        exchange_rate_dict: Dict[Tuple[str, str], float] = {}
+
+        for index in range(100):
+            select_exchange = exchanges[index % len(exchanges)]
+
+            for exchange in exchanges:
+                if exchange == select_exchange:
+                    continue
+                time.sleep(0.4)
+                if random.randint(0, 10) < 1:
+                    console.log(next(examples))
+                exchange_rate_dict[(select_exchange, exchange)] = 200 / (
+                    (random.random() * 320) + 1
+                )
+                if len(exchange_rate_dict) > len(exchanges) - 1:
+                    exchange_rate_dict.pop(list(exchange_rate_dict.keys())[0])
+                table = Table(title="Exchange Rates")
+
+                table.add_column("Source Currency")
+                table.add_column("Destination Currency")
+                table.add_column("Exchange Rate")
+
+                for (source, dest), exchange_rate in exchange_rate_dict.items():
+                    table.add_row(
+                        source,
+                        dest,
+                        Text(
+                            f"{exchange_rate:.4f}",
+                            style="red" if exchange_rate < 1.0 else "green",
+                        ),
+                    )
+
+                live_table.update(Align.center(table))
diff --git a/local_packages/rich/live_render.py b/local_packages/rich/live_render.py
new file mode 100644
index 0000000..e7ec970
--- /dev/null
+++ b/local_packages/rich/live_render.py
@@ -0,0 +1,116 @@
+from typing import Literal, Optional, Tuple
+
+from ._loop import loop_last
+from .console import Console, ConsoleOptions, RenderableType, RenderResult
+from .control import Control
+from .segment import ControlType, Segment
+from .style import StyleType
+from .text import Text
+
+VerticalOverflowMethod = Literal["crop", "ellipsis", "visible"]
+
+
+class LiveRender:
+    """Creates a renderable that may be updated.
+
+    Args:
+        renderable (RenderableType): Any renderable object.
+        style (StyleType, optional): An optional style to apply to the renderable. Defaults to "".
+    """
+
+    def __init__(
+        self,
+        renderable: RenderableType,
+        style: StyleType = "",
+        vertical_overflow: VerticalOverflowMethod = "ellipsis",
+    ) -> None:
+        self.renderable = renderable
+        self.style = style
+        self.vertical_overflow = vertical_overflow
+        self._shape: Optional[Tuple[int, int]] = None
+
+    @property
+    def last_render_height(self) -> int:
+        """The number of lines in the last render (may be 0 if nothing was rendered).
+
+        Returns:
+            Height in lines
+        """
+        if self._shape is None:
+            return 0
+        return self._shape[1]
+
+    def set_renderable(self, renderable: RenderableType) -> None:
+        """Set a new renderable.
+
+        Args:
+            renderable (RenderableType): Any renderable object, including str.
+        """
+        self.renderable = renderable
+
+    def position_cursor(self) -> Control:
+        """Get control codes to move cursor to beginning of live render.
+
+        Returns:
+            Control: A control instance that may be printed.
+        """
+        if self._shape is not None:
+            _, height = self._shape
+            return Control(
+                ControlType.CARRIAGE_RETURN,
+                (ControlType.ERASE_IN_LINE, 2),
+                *(
+                    (
+                        (ControlType.CURSOR_UP, 1),
+                        (ControlType.ERASE_IN_LINE, 2),
+                    )
+                    * (height - 1)
+                )
+            )
+        return Control()
+
+    def restore_cursor(self) -> Control:
+        """Get control codes to clear the render and restore the cursor to its previous position.
+
+        Returns:
+            Control: A Control instance that may be printed.
+        """
+        if self._shape is not None:
+            _, height = self._shape
+            return Control(
+                ControlType.CARRIAGE_RETURN,
+                *((ControlType.CURSOR_UP, 1), (ControlType.ERASE_IN_LINE, 2)) * height
+            )
+        return Control()
+
+    def __rich_console__(
+        self, console: Console, options: ConsoleOptions
+    ) -> RenderResult:
+        renderable = self.renderable
+        style = console.get_style(self.style)
+        lines = console.render_lines(renderable, options, style=style, pad=False)
+        shape = Segment.get_shape(lines)
+
+        _, height = shape
+        if height > options.size.height:
+            if self.vertical_overflow == "crop":
+                lines = lines[: options.size.height]
+                shape = Segment.get_shape(lines)
+            elif self.vertical_overflow == "ellipsis":
+                lines = lines[: (options.size.height - 1)]
+                overflow_text = Text(
+                    "...",
+                    overflow="crop",
+                    justify="center",
+                    end="",
+                    style="live.ellipsis",
+                )
+                lines.append(list(console.render(overflow_text)))
+                shape = Segment.get_shape(lines)
+        self._shape = shape
+
+        new_line = Segment.line()
+        for last, line in loop_last(lines):
+            yield from line
+            if not last:
+                yield new_line
diff --git a/local_packages/rich/logging.py b/local_packages/rich/logging.py
new file mode 100644
index 0000000..e572f07
--- /dev/null
+++ b/local_packages/rich/logging.py
@@ -0,0 +1,305 @@
+from __future__ import annotations
+
+import logging
+import os
+from datetime import datetime
+from logging import Handler, LogRecord
+from types import ModuleType
+from typing import TYPE_CHECKING, ClassVar, Iterable, List, Optional, Type, Union
+
+if TYPE_CHECKING:
+    from ._log_render import FormatTimeCallable
+    from .console import Console, ConsoleRenderable
+    from .highlighter import Highlighter
+    from .traceback import Traceback
+
+from rich._null_file import NullFile
+
+from . import get_console
+from ._log_render import LogRender
+from .highlighter import ReprHighlighter
+from .text import Text
+
+
+class RichHandler(Handler):
+    """A logging handler that renders output with Rich. The time / level / message and file are displayed in columns.
+    The level is color coded, and the message is syntax highlighted.
+
+    Note:
+        Be careful when enabling console markup in log messages if you have configured logging for libraries not
+        under your control. If a dependency writes messages containing square brackets, it may not produce the intended output.
+
+    Args:
+        level (Union[int, str], optional): Log level. Defaults to logging.NOTSET.
+        console (:class:`~rich.console.Console`, optional): Optional console instance to write logs.
+            Default will use a global console instance writing to stdout.
+        show_time (bool, optional): Show a column for the time. Defaults to True.
+        omit_repeated_times (bool, optional): Omit repetition of the same time. Defaults to True.
+        show_level (bool, optional): Show a column for the level. Defaults to True.
+        show_path (bool, optional): Show the path to the original log call. Defaults to True.
+        enable_link_path (bool, optional): Enable terminal link of path column to file. Defaults to True.
+        highlighter (Highlighter, optional): Highlighter to style log messages, or None to use ReprHighlighter. Defaults to None.
+        markup (bool, optional): Enable console markup in log messages. Defaults to False.
+        rich_tracebacks (bool, optional): Enable rich tracebacks with syntax highlighting and formatting. Defaults to False.
+        tracebacks_width (Optional[int], optional): Number of characters used to render tracebacks, or None for full width. Defaults to None.
+        tracebacks_code_width (int, optional): Number of code characters used to render tracebacks, or None for full width. Defaults to 88.
+        tracebacks_extra_lines (int, optional): Additional lines of code to render tracebacks, or None for full width. Defaults to None.
+        tracebacks_theme (str, optional): Override pygments theme used in traceback.
+        tracebacks_word_wrap (bool, optional): Enable word wrapping of long tracebacks lines. Defaults to True.
+        tracebacks_show_locals (bool, optional): Enable display of locals in tracebacks. Defaults to False.
+        tracebacks_suppress (Sequence[Union[str, ModuleType]]): Optional sequence of modules or paths to exclude from traceback.
+        tracebacks_max_frames (int, optional): Optional maximum number of frames returned by traceback.
+        locals_max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
+            Defaults to 10.
+        locals_max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to 80.
+        log_time_format (Union[str, TimeFormatterCallable], optional): If ``log_time`` is enabled, either string for strftime or callable that formats the time. Defaults to "[%x %X] ".
+        keywords (List[str], optional): List of words to highlight instead of ``RichHandler.KEYWORDS``.
+    """
+
+    KEYWORDS: ClassVar[Optional[List[str]]] = [
+        "GET",
+        "POST",
+        "HEAD",
+        "PUT",
+        "DELETE",
+        "OPTIONS",
+        "TRACE",
+        "PATCH",
+    ]
+    HIGHLIGHTER_CLASS: ClassVar[Type[Highlighter]] = ReprHighlighter
+
+    def __init__(
+        self,
+        level: Union[int, str] = logging.NOTSET,
+        console: Optional[Console] = None,
+        *,
+        show_time: bool = True,
+        omit_repeated_times: bool = True,
+        show_level: bool = True,
+        show_path: bool = True,
+        enable_link_path: bool = True,
+        highlighter: Optional[Highlighter] = None,
+        markup: bool = False,
+        rich_tracebacks: bool = False,
+        tracebacks_width: Optional[int] = None,
+        tracebacks_code_width: Optional[int] = 88,
+        tracebacks_extra_lines: int = 3,
+        tracebacks_theme: Optional[str] = None,
+        tracebacks_word_wrap: bool = True,
+        tracebacks_show_locals: bool = False,
+        tracebacks_suppress: Iterable[Union[str, ModuleType]] = (),
+        tracebacks_max_frames: int = 100,
+        locals_max_length: int = 10,
+        locals_max_string: int = 80,
+        log_time_format: Union[str, FormatTimeCallable] = "[%x %X]",
+        keywords: Optional[List[str]] = None,
+    ) -> None:
+        super().__init__(level=level)
+        self.console = console or get_console()
+        self.highlighter = highlighter or self.HIGHLIGHTER_CLASS()
+        self._log_render = LogRender(
+            show_time=show_time,
+            show_level=show_level,
+            show_path=show_path,
+            time_format=log_time_format,
+            omit_repeated_times=omit_repeated_times,
+            level_width=None,
+        )
+        self.enable_link_path = enable_link_path
+        self.markup = markup
+        self.rich_tracebacks = rich_tracebacks
+        self.tracebacks_width = tracebacks_width
+        self.tracebacks_extra_lines = tracebacks_extra_lines
+        self.tracebacks_theme = tracebacks_theme
+        self.tracebacks_word_wrap = tracebacks_word_wrap
+        self.tracebacks_show_locals = tracebacks_show_locals
+        self.tracebacks_suppress = tracebacks_suppress
+        self.tracebacks_max_frames = tracebacks_max_frames
+        self.tracebacks_code_width = tracebacks_code_width
+        self.locals_max_length = locals_max_length
+        self.locals_max_string = locals_max_string
+        self.keywords = keywords
+
+    def get_level_text(self, record: LogRecord) -> Text:
+        """Get the level name from the record.
+
+        Args:
+            record (LogRecord): LogRecord instance.
+
+        Returns:
+            Text: A tuple of the style and level name.
+        """
+        level_name = record.levelname
+        level_text = Text.styled(
+            level_name.ljust(8), f"logging.level.{level_name.lower()}"
+        )
+        return level_text
+
+    def emit(self, record: LogRecord) -> None:
+        """Invoked by logging."""
+        message = self.format(record)
+        traceback = None
+        if (
+            self.rich_tracebacks
+            and record.exc_info
+            and record.exc_info != (None, None, None)
+        ):
+            exc_type, exc_value, exc_traceback = record.exc_info
+            assert exc_type is not None
+            assert exc_value is not None
+            from .traceback import Traceback
+
+            traceback = Traceback.from_exception(
+                exc_type,
+                exc_value,
+                exc_traceback,
+                width=self.tracebacks_width,
+                code_width=self.tracebacks_code_width,
+                extra_lines=self.tracebacks_extra_lines,
+                theme=self.tracebacks_theme,
+                word_wrap=self.tracebacks_word_wrap,
+                show_locals=self.tracebacks_show_locals,
+                locals_max_length=self.locals_max_length,
+                locals_max_string=self.locals_max_string,
+                suppress=self.tracebacks_suppress,
+                max_frames=self.tracebacks_max_frames,
+            )
+            message = record.getMessage()
+            if self.formatter:
+                record.message = record.getMessage()
+                formatter = self.formatter
+                if hasattr(formatter, "usesTime") and formatter.usesTime():
+                    record.asctime = formatter.formatTime(record, formatter.datefmt)
+                message = formatter.formatMessage(record)
+
+        message_renderable = self.render_message(record, message)
+        log_renderable = self.render(
+            record=record, traceback=traceback, message_renderable=message_renderable
+        )
+        if isinstance(self.console.file, NullFile):
+            # Handles pythonw, where stdout/stderr are null, and we return NullFile
+            # instance from Console.file. In this case, we still want to make a log record
+            # even though we won't be writing anything to a file.
+            self.handleError(record)
+        else:
+            try:
+                self.console.print(log_renderable)
+            except Exception:
+                self.handleError(record)
+
+    def render_message(self, record: LogRecord, message: str) -> ConsoleRenderable:
+        """Render message text in to Text.
+
+        Args:
+            record (LogRecord): logging Record.
+            message (str): String containing log message.
+
+        Returns:
+            ConsoleRenderable: Renderable to display log message.
+        """
+        use_markup = getattr(record, "markup", self.markup)
+        message_text = Text.from_markup(message) if use_markup else Text(message)
+
+        highlighter = getattr(record, "highlighter", self.highlighter)
+        if highlighter:
+            message_text = highlighter(message_text)
+
+        if self.keywords is None:
+            self.keywords = self.KEYWORDS
+
+        if self.keywords:
+            message_text.highlight_words(self.keywords, "logging.keyword")
+
+        return message_text
+
+    def render(
+        self,
+        *,
+        record: LogRecord,
+        traceback: Optional[Traceback],
+        message_renderable: ConsoleRenderable,
+    ) -> ConsoleRenderable:
+        """Render log for display.
+
+        Args:
+            record (LogRecord): logging Record.
+            traceback (Optional[Traceback]): Traceback instance or None for no Traceback.
+            message_renderable (ConsoleRenderable): Renderable (typically Text) containing log message contents.
+
+        Returns:
+            ConsoleRenderable: Renderable to display log.
+        """
+        path = os.path.basename(record.pathname)
+        level = self.get_level_text(record)
+        time_format = None if self.formatter is None else self.formatter.datefmt
+        log_time = datetime.fromtimestamp(record.created)
+
+        log_renderable = self._log_render(
+            self.console,
+            [message_renderable] if not traceback else [message_renderable, traceback],
+            log_time=log_time,
+            time_format=time_format,
+            level=level,
+            path=path,
+            line_no=record.lineno,
+            link_path=record.pathname if self.enable_link_path else None,
+        )
+        return log_renderable
+
+
+if __name__ == "__main__":  # pragma: no cover
+    from time import sleep
+
+    FORMAT = "%(message)s"
+    # FORMAT = "%(asctime)-15s - %(levelname)s - %(message)s"
+    logging.basicConfig(
+        level="NOTSET",
+        format=FORMAT,
+        datefmt="[%X]",
+        handlers=[RichHandler(rich_tracebacks=True, tracebacks_show_locals=True)],
+    )
+    log = logging.getLogger("rich")
+
+    log.info("Server starting...")
+    log.info("Listening on http://127.0.0.1:8080")
+    sleep(1)
+
+    log.info("GET /index.html 200 1298")
+    log.info("GET /imgs/backgrounds/back1.jpg 200 54386")
+    log.info("GET /css/styles.css 200 54386")
+    log.warning("GET /favicon.ico 404 242")
+    sleep(1)
+
+    log.debug(
+        "JSONRPC request\n--> %r\n<-- %r",
+        {
+            "version": "1.1",
+            "method": "confirmFruitPurchase",
+            "params": [["apple", "orange", "mangoes", "pomelo"], 1.123],
+            "id": "194521489",
+        },
+        {"version": "1.1", "result": True, "error": None, "id": "194521489"},
+    )
+    log.debug(
+        "Loading configuration file /adasd/asdasd/qeqwe/qwrqwrqwr/sdgsdgsdg/werwerwer/dfgerert/ertertert/ertetert/werwerwer"
+    )
+    log.error("Unable to find 'pomelo' in database!")
+    log.info("POST /jsonrpc/ 200 65532")
+    log.info("POST /admin/ 401 42234")
+    log.warning("password was rejected for admin site.")
+
+    def divide() -> None:
+        number = 1
+        divisor = 0
+        foos = ["foo"] * 100
+        log.debug("in divide")
+        try:
+            number / divisor
+        except:
+            log.exception("An error of some kind occurred!")
+
+    divide()
+    sleep(1)
+    log.critical("Out of memory!")
+    log.info("Server exited with code=-1")
+    log.info("[bold]EXITING...[/bold]", extra=dict(markup=True))
diff --git a/local_packages/rich/markdown.py b/local_packages/rich/markdown.py
new file mode 100644
index 0000000..abbbb07
--- /dev/null
+++ b/local_packages/rich/markdown.py
@@ -0,0 +1,802 @@
+from __future__ import annotations
+
+import sys
+from dataclasses import dataclass
+from typing import ClassVar, Iterable, get_args
+
+from markdown_it import MarkdownIt
+from markdown_it.token import Token
+
+from rich.table import Table
+
+from . import box
+from ._loop import loop_first
+from ._stack import Stack
+from .console import Console, ConsoleOptions, JustifyMethod, RenderResult
+from .containers import Renderables
+from .jupyter import JupyterMixin
+from .rule import Rule
+from .segment import Segment
+from .style import Style, StyleStack
+from .syntax import Syntax
+from .text import Text, TextType
+
+
+class MarkdownElement:
+    new_line: ClassVar[bool] = True
+
+    @classmethod
+    def create(cls, markdown: Markdown, token: Token) -> MarkdownElement:
+        """Factory to create markdown element,
+
+        Args:
+            markdown (Markdown): The parent Markdown object.
+            token (Token): A node from markdown-it.
+
+        Returns:
+            MarkdownElement: A new markdown element
+        """
+        return cls()
+
+    def on_enter(self, context: MarkdownContext) -> None:
+        """Called when the node is entered.
+
+        Args:
+            context (MarkdownContext): The markdown context.
+        """
+
+    def on_text(self, context: MarkdownContext, text: TextType) -> None:
+        """Called when text is parsed.
+
+        Args:
+            context (MarkdownContext): The markdown context.
+        """
+
+    def on_leave(self, context: MarkdownContext) -> None:
+        """Called when the parser leaves the element.
+
+        Args:
+            context (MarkdownContext): [description]
+        """
+
+    def on_child_close(self, context: MarkdownContext, child: MarkdownElement) -> bool:
+        """Called when a child element is closed.
+
+        This method allows a parent element to take over rendering of its children.
+
+        Args:
+            context (MarkdownContext): The markdown context.
+            child (MarkdownElement): The child markdown element.
+
+        Returns:
+            bool: Return True to render the element, or False to not render the element.
+        """
+        return True
+
+    def __rich_console__(
+        self, console: Console, options: ConsoleOptions
+    ) -> RenderResult:
+        return ()
+
+
+class UnknownElement(MarkdownElement):
+    """An unknown element.
+
+    Hopefully there will be no unknown elements, and we will have a MarkdownElement for
+    everything in the document.
+
+    """
+
+
+class TextElement(MarkdownElement):
+    """Base class for elements that render text."""
+
+    style_name = "none"
+
+    def on_enter(self, context: MarkdownContext) -> None:
+        self.style = context.enter_style(self.style_name)
+        self.text = Text(justify="left")
+
+    def on_text(self, context: MarkdownContext, text: TextType) -> None:
+        self.text.append(text, context.current_style if isinstance(text, str) else None)
+
+    def on_leave(self, context: MarkdownContext) -> None:
+        context.leave_style()
+
+
+class Paragraph(TextElement):
+    """A Paragraph."""
+
+    style_name = "markdown.paragraph"
+    justify: JustifyMethod
+
+    @classmethod
+    def create(cls, markdown: Markdown, token: Token) -> Paragraph:
+        return cls(justify=markdown.justify or "left")
+
+    def __init__(self, justify: JustifyMethod) -> None:
+        self.justify = justify
+
+    def __rich_console__(
+        self, console: Console, options: ConsoleOptions
+    ) -> RenderResult:
+        self.text.justify = self.justify
+        yield self.text
+
+
+@dataclass
+class HeadingFormat:
+    justify: JustifyMethod = "left"
+    style: str = ""
+
+
+class Heading(TextElement):
+    """A heading."""
+
+    LEVEL_ALIGN: ClassVar[dict[str, JustifyMethod]] = {
+        "h1": "center",
+        "h2": "left",
+        "h3": "left",
+        "h4": "left",
+        "h5": "left",
+        "h6": "left",
+    }
+
+    @classmethod
+    def create(cls, markdown: Markdown, token: Token) -> Heading:
+        return cls(token.tag)
+
+    def on_enter(self, context: MarkdownContext) -> None:
+        self.text = Text()
+        context.enter_style(self.style_name)
+
+    def __init__(self, tag: str) -> None:
+        self.tag = tag
+        self.style_name = f"markdown.{tag}"
+        super().__init__()
+
+    def __rich_console__(
+        self, console: Console, options: ConsoleOptions
+    ) -> RenderResult:
+        text = self.text.copy()
+        heading_justify = self.LEVEL_ALIGN.get(self.tag, "left")
+        text.justify = heading_justify
+        yield text
+
+
+class CodeBlock(TextElement):
+    """A code block with syntax highlighting."""
+
+    style_name = "markdown.code_block"
+
+    @classmethod
+    def create(cls, markdown: Markdown, token: Token) -> CodeBlock:
+        node_info = token.info or ""
+        lexer_name = node_info.partition(" ")[0]
+        return cls(lexer_name or "text", markdown.code_theme)
+
+    def __init__(self, lexer_name: str, theme: str) -> None:
+        self.lexer_name = lexer_name
+        self.theme = theme
+
+    def __rich_console__(
+        self, console: Console, options: ConsoleOptions
+    ) -> RenderResult:
+        code = str(self.text).rstrip()
+        syntax = Syntax(
+            code, self.lexer_name, theme=self.theme, word_wrap=True, padding=1
+        )
+        yield syntax
+
+
+class BlockQuote(TextElement):
+    """A block quote."""
+
+    style_name = "markdown.block_quote"
+
+    def __init__(self) -> None:
+        self.elements: Renderables = Renderables()
+
+    def on_child_close(self, context: MarkdownContext, child: MarkdownElement) -> bool:
+        self.elements.append(child)
+        return False
+
+    def __rich_console__(
+        self, console: Console, options: ConsoleOptions
+    ) -> RenderResult:
+        render_options = options.update(width=options.max_width - 4)
+        lines = console.render_lines(self.elements, render_options, style=self.style)
+        style = self.style
+        new_line = Segment("\n")
+        padding = Segment("▌ ", style)
+        for line in lines:
+            yield padding
+            yield from line
+            yield new_line
+
+
+class HorizontalRule(MarkdownElement):
+    """A horizontal rule to divide sections."""
+
+    new_line = False
+
+    def __rich_console__(
+        self, console: Console, options: ConsoleOptions
+    ) -> RenderResult:
+        style = console.get_style("markdown.hr", default="none")
+        yield Rule(style=style, characters="-")
+        yield Text()
+
+
+class TableElement(MarkdownElement):
+    """MarkdownElement corresponding to `table_open`."""
+
+    def __init__(self) -> None:
+        self.header: TableHeaderElement | None = None
+        self.body: TableBodyElement | None = None
+
+    def on_child_close(self, context: MarkdownContext, child: MarkdownElement) -> bool:
+        if isinstance(child, TableHeaderElement):
+            self.header = child
+        elif isinstance(child, TableBodyElement):
+            self.body = child
+        else:
+            raise RuntimeError("Couldn't process markdown table.")
+        return False
+
+    def __rich_console__(
+        self, console: Console, options: ConsoleOptions
+    ) -> RenderResult:
+        table = Table(
+            box=box.SIMPLE,
+            pad_edge=False,
+            style="markdown.table.border",
+            show_edge=True,
+            collapse_padding=True,
+        )
+
+        if self.header is not None and self.header.row is not None:
+            for column in self.header.row.cells:
+                heading = column.content.copy()
+                heading.stylize("markdown.table.header")
+                table.add_column(heading)
+
+        if self.body is not None:
+            for row in self.body.rows:
+                row_content = [element.content for element in row.cells]
+                table.add_row(*row_content)
+
+        yield table
+
+
+class TableHeaderElement(MarkdownElement):
+    """MarkdownElement corresponding to `thead_open` and `thead_close`."""
+
+    def __init__(self) -> None:
+        self.row: TableRowElement | None = None
+
+    def on_child_close(self, context: MarkdownContext, child: MarkdownElement) -> bool:
+        assert isinstance(child, TableRowElement)
+        self.row = child
+        return False
+
+
+class TableBodyElement(MarkdownElement):
+    """MarkdownElement corresponding to `tbody_open` and `tbody_close`."""
+
+    def __init__(self) -> None:
+        self.rows: list[TableRowElement] = []
+
+    def on_child_close(self, context: MarkdownContext, child: MarkdownElement) -> bool:
+        assert isinstance(child, TableRowElement)
+        self.rows.append(child)
+        return False
+
+
+class TableRowElement(MarkdownElement):
+    """MarkdownElement corresponding to `tr_open` and `tr_close`."""
+
+    def __init__(self) -> None:
+        self.cells: list[TableDataElement] = []
+
+    def on_child_close(self, context: MarkdownContext, child: MarkdownElement) -> bool:
+        assert isinstance(child, TableDataElement)
+        self.cells.append(child)
+        return False
+
+
+class TableDataElement(MarkdownElement):
+    """MarkdownElement corresponding to `td_open` and `td_close`
+    and `th_open` and `th_close`."""
+
+    @classmethod
+    def create(cls, markdown: Markdown, token: Token) -> MarkdownElement:
+        style = str(token.attrs.get("style")) or ""
+
+        justify: JustifyMethod
+        if "text-align:right" in style:
+            justify = "right"
+        elif "text-align:center" in style:
+            justify = "center"
+        elif "text-align:left" in style:
+            justify = "left"
+        else:
+            justify = "default"
+
+        assert justify in get_args(JustifyMethod)
+        return cls(justify=justify)
+
+    def __init__(self, justify: JustifyMethod) -> None:
+        self.content: Text = Text("", justify=justify)
+        self.justify = justify
+
+    def on_text(self, context: MarkdownContext, text: TextType) -> None:
+        if isinstance(text, str):
+            self.content.append(text, context.current_style)
+        else:
+            self.content.append_text(text)
+
+
+class ListElement(MarkdownElement):
+    """A list element."""
+
+    @classmethod
+    def create(cls, markdown: Markdown, token: Token) -> ListElement:
+        return cls(token.type, int(token.attrs.get("start", 1)))
+
+    def __init__(self, list_type: str, list_start: int | None) -> None:
+        self.items: list[ListItem] = []
+        self.list_type = list_type
+        self.list_start = list_start
+
+    def on_child_close(self, context: MarkdownContext, child: MarkdownElement) -> bool:
+        assert isinstance(child, ListItem)
+        self.items.append(child)
+        return False
+
+    def __rich_console__(
+        self, console: Console, options: ConsoleOptions
+    ) -> RenderResult:
+        if self.list_type == "bullet_list_open":
+            for item in self.items:
+                yield from item.render_bullet(console, options)
+        else:
+            number = 1 if self.list_start is None else self.list_start
+            last_number = number + len(self.items)
+            for index, item in enumerate(self.items):
+                yield from item.render_number(
+                    console, options, number + index, last_number
+                )
+
+
+class ListItem(TextElement):
+    """An item in a list."""
+
+    style_name = "markdown.item"
+
+    def __init__(self) -> None:
+        self.elements: Renderables = Renderables()
+
+    def on_child_close(self, context: MarkdownContext, child: MarkdownElement) -> bool:
+        self.elements.append(child)
+        return False
+
+    def render_bullet(self, console: Console, options: ConsoleOptions) -> RenderResult:
+        render_options = options.update(width=options.max_width - 3)
+        lines = console.render_lines(self.elements, render_options, style=self.style)
+        bullet_style = console.get_style("markdown.item.bullet", default="none")
+
+        bullet = Segment(" • ", bullet_style)
+        padding = Segment(" " * 3, bullet_style)
+        new_line = Segment("\n")
+        for first, line in loop_first(lines):
+            yield bullet if first else padding
+            yield from line
+            yield new_line
+
+    def render_number(
+        self, console: Console, options: ConsoleOptions, number: int, last_number: int
+    ) -> RenderResult:
+        number_width = len(str(last_number)) + 2
+        render_options = options.update(width=options.max_width - number_width)
+        lines = console.render_lines(self.elements, render_options, style=self.style)
+        number_style = console.get_style("markdown.item.number", default="none")
+
+        new_line = Segment("\n")
+        padding = Segment(" " * number_width, number_style)
+        numeral = Segment(f"{number}".rjust(number_width - 1) + " ", number_style)
+        for first, line in loop_first(lines):
+            yield numeral if first else padding
+            yield from line
+            yield new_line
+
+
+class Link(TextElement):
+    @classmethod
+    def create(cls, markdown: Markdown, token: Token) -> MarkdownElement:
+        url = token.attrs.get("href", "#")
+        return cls(token.content, str(url))
+
+    def __init__(self, text: str, href: str):
+        self.text = Text(text)
+        self.href = href
+
+
+class ImageItem(TextElement):
+    """Renders a placeholder for an image."""
+
+    new_line = False
+
+    @classmethod
+    def create(cls, markdown: Markdown, token: Token) -> MarkdownElement:
+        """Factory to create markdown element,
+
+        Args:
+            markdown (Markdown): The parent Markdown object.
+            token (Any): A token from markdown-it.
+
+        Returns:
+            MarkdownElement: A new markdown element
+        """
+        return cls(str(token.attrs.get("src", "")), markdown.hyperlinks)
+
+    def __init__(self, destination: str, hyperlinks: bool) -> None:
+        self.destination = destination
+        self.hyperlinks = hyperlinks
+        self.link: str | None = None
+        super().__init__()
+
+    def on_enter(self, context: MarkdownContext) -> None:
+        self.link = context.current_style.link
+        self.text = Text(justify="left")
+        super().on_enter(context)
+
+    def __rich_console__(
+        self, console: Console, options: ConsoleOptions
+    ) -> RenderResult:
+        link_style = Style(link=self.link or self.destination or None)
+        title = self.text or Text(self.destination.strip("/").rsplit("/", 1)[-1])
+        if self.hyperlinks:
+            title.stylize(link_style)
+        text = Text.assemble("🌆 ", title, " ", end="")
+        yield text
+
+
+class MarkdownContext:
+    """Manages the console render state."""
+
+    def __init__(
+        self,
+        console: Console,
+        options: ConsoleOptions,
+        style: Style,
+        inline_code_lexer: str | None = None,
+        inline_code_theme: str = "monokai",
+    ) -> None:
+        self.console = console
+        self.options = options
+        self.style_stack: StyleStack = StyleStack(style)
+        self.stack: Stack[MarkdownElement] = Stack()
+
+        self._syntax: Syntax | None = None
+        if inline_code_lexer is not None:
+            self._syntax = Syntax("", inline_code_lexer, theme=inline_code_theme)
+
+    @property
+    def current_style(self) -> Style:
+        """Current style which is the product of all styles on the stack."""
+        return self.style_stack.current
+
+    def on_text(self, text: str, node_type: str) -> None:
+        """Called when the parser visits text."""
+        if node_type in {"fence", "code_inline"} and self._syntax is not None:
+            highlight_text = self._syntax.highlight(text)
+            highlight_text.rstrip()
+            self.stack.top.on_text(
+                self, Text.assemble(highlight_text, style=self.style_stack.current)
+            )
+        else:
+            self.stack.top.on_text(self, text)
+
+    def enter_style(self, style_name: str | Style) -> Style:
+        """Enter a style context."""
+        style = self.console.get_style(style_name, default="none")
+        self.style_stack.push(style)
+        return self.current_style
+
+    def leave_style(self) -> Style:
+        """Leave a style context."""
+        style = self.style_stack.pop()
+        return style
+
+
+class Markdown(JupyterMixin):
+    """A Markdown renderable.
+
+    Args:
+        markup (str): A string containing markdown.
+        code_theme (str, optional): Pygments theme for code blocks. Defaults to "monokai". See https://pygments.org/styles/ for code themes.
+        justify (JustifyMethod, optional): Justify value for paragraphs. Defaults to None.
+        style (Union[str, Style], optional): Optional style to apply to markdown.
+        hyperlinks (bool, optional): Enable hyperlinks. Defaults to ``True``.
+        inline_code_lexer: (str, optional): Lexer to use if inline code highlighting is
+            enabled. Defaults to None.
+        inline_code_theme: (Optional[str], optional): Pygments theme for inline code
+            highlighting, or None for no highlighting. Defaults to None.
+    """
+
+    elements: ClassVar[dict[str, type[MarkdownElement]]] = {
+        "paragraph_open": Paragraph,
+        "heading_open": Heading,
+        "fence": CodeBlock,
+        "code_block": CodeBlock,
+        "blockquote_open": BlockQuote,
+        "hr": HorizontalRule,
+        "bullet_list_open": ListElement,
+        "ordered_list_open": ListElement,
+        "list_item_open": ListItem,
+        "image": ImageItem,
+        "table_open": TableElement,
+        "tbody_open": TableBodyElement,
+        "thead_open": TableHeaderElement,
+        "tr_open": TableRowElement,
+        "td_open": TableDataElement,
+        "th_open": TableDataElement,
+    }
+
+    inlines = {"em", "strong", "code", "s"}
+
+    def __init__(
+        self,
+        markup: str,
+        code_theme: str = "monokai",
+        justify: JustifyMethod | None = None,
+        style: str | Style = "none",
+        hyperlinks: bool = True,
+        inline_code_lexer: str | None = None,
+        inline_code_theme: str | None = None,
+    ) -> None:
+        parser = MarkdownIt().enable("strikethrough").enable("table")
+        self.markup = markup
+        self.parsed = parser.parse(markup)
+        self.code_theme = code_theme
+        self.justify: JustifyMethod | None = justify
+        self.style = style
+        self.hyperlinks = hyperlinks
+        self.inline_code_lexer = inline_code_lexer
+        self.inline_code_theme = inline_code_theme or code_theme
+
+    def _flatten_tokens(self, tokens: Iterable[Token]) -> Iterable[Token]:
+        """Flattens the token stream."""
+        for token in tokens:
+            is_fence = token.type == "fence"
+            is_image = token.tag == "img"
+            if token.children and not (is_image or is_fence):
+                yield from self._flatten_tokens(token.children)
+            else:
+                yield token
+
+    def __rich_console__(
+        self, console: Console, options: ConsoleOptions
+    ) -> RenderResult:
+        """Render markdown to the console."""
+        style = console.get_style(self.style, default="none")
+        options = options.update(height=None)
+        context = MarkdownContext(
+            console,
+            options,
+            style,
+            inline_code_lexer=self.inline_code_lexer,
+            inline_code_theme=self.inline_code_theme,
+        )
+        tokens = self.parsed
+        inline_style_tags = self.inlines
+        new_line = False
+        _new_line_segment = Segment.line()
+
+        for token in self._flatten_tokens(tokens):
+            node_type = token.type
+            tag = token.tag
+
+            entering = token.nesting == 1
+            exiting = token.nesting == -1
+            self_closing = token.nesting == 0
+
+            if node_type == "text":
+                context.on_text(token.content, node_type)
+            elif node_type == "hardbreak":
+                context.on_text("\n", node_type)
+            elif node_type == "softbreak":
+                context.on_text(" ", node_type)
+            elif node_type == "link_open":
+                href = str(token.attrs.get("href", ""))
+                if self.hyperlinks:
+                    link_style = console.get_style("markdown.link_url", default="none")
+                    link_style += Style(link=href)
+                    context.enter_style(link_style)
+                else:
+                    context.stack.push(Link.create(self, token))
+            elif node_type == "html_inline":
+                if token.content == "":
+                    kbd_style = console.get_style("markdown.kbd", default="bold")
+                    context.enter_style(kbd_style)
+                elif token.content == "":
+                    context.leave_style()
+                else:
+                    continue
+            elif node_type == "link_close":
+                if self.hyperlinks:
+                    context.leave_style()
+                else:
+                    element = context.stack.pop()
+                    assert isinstance(element, Link)
+                    link_style = console.get_style("markdown.link", default="none")
+                    context.enter_style(link_style)
+                    context.on_text(element.text.plain, node_type)
+                    context.leave_style()
+                    context.on_text(" (", node_type)
+                    link_url_style = console.get_style(
+                        "markdown.link_url", default="none"
+                    )
+                    context.enter_style(link_url_style)
+                    context.on_text(element.href, node_type)
+                    context.leave_style()
+                    context.on_text(")", node_type)
+            elif (
+                tag in inline_style_tags
+                and node_type != "fence"
+                and node_type != "code_block"
+            ):
+                if entering:
+                    # If it's an opening inline token e.g. strong, em, etc.
+                    # Then we move into a style context i.e. push to stack.
+                    context.enter_style(f"markdown.{tag}")
+                elif exiting:
+                    # If it's a closing inline style, then we pop the style
+                    # off of the stack, to move out of the context of it...
+                    context.leave_style()
+                else:
+                    # If it's a self-closing inline style e.g. `code_inline`
+                    context.enter_style(f"markdown.{tag}")
+                    if token.content:
+                        context.on_text(token.content, node_type)
+                    context.leave_style()
+            else:
+                # Map the markdown tag -> MarkdownElement renderable
+                element_class = self.elements.get(token.type) or UnknownElement
+                element = element_class.create(self, token)
+
+                if entering or self_closing:
+                    context.stack.push(element)
+                    element.on_enter(context)
+
+                if exiting:  # CLOSING tag
+                    element = context.stack.pop()
+
+                    should_render = not context.stack or (
+                        context.stack
+                        and context.stack.top.on_child_close(context, element)
+                    )
+
+                    if should_render:
+                        if new_line:
+                            yield _new_line_segment
+
+                        yield from console.render(element, context.options)
+                elif self_closing:  # SELF-CLOSING tags (e.g. text, code, image)
+                    context.stack.pop()
+                    text = token.content
+                    if text is not None:
+                        element.on_text(context, text)
+
+                    should_render = (
+                        not context.stack
+                        or context.stack
+                        and context.stack.top.on_child_close(context, element)
+                    )
+                    if should_render:
+                        if new_line and node_type != "inline":
+                            yield _new_line_segment
+                        yield from console.render(element, context.options)
+
+                if exiting or self_closing:
+                    element.on_leave(context)
+                    new_line = element.new_line
+
+
+if __name__ == "__main__":  # pragma: no cover
+    import argparse
+    import sys
+
+    parser = argparse.ArgumentParser(
+        description="Render Markdown to the console with Rich"
+    )
+    parser.add_argument(
+        "path",
+        metavar="PATH",
+        help="path to markdown file, or - for stdin",
+    )
+    parser.add_argument(
+        "-c",
+        "--force-color",
+        dest="force_color",
+        action="store_true",
+        default=None,
+        help="force color for non-terminals",
+    )
+    parser.add_argument(
+        "-t",
+        "--code-theme",
+        dest="code_theme",
+        default="monokai",
+        help="pygments code theme",
+    )
+    parser.add_argument(
+        "-i",
+        "--inline-code-lexer",
+        dest="inline_code_lexer",
+        default=None,
+        help="inline_code_lexer",
+    )
+    parser.add_argument(
+        "-y",
+        "--hyperlinks",
+        dest="hyperlinks",
+        action="store_true",
+        help="enable hyperlinks",
+    )
+    parser.add_argument(
+        "-w",
+        "--width",
+        type=int,
+        dest="width",
+        default=None,
+        help="width of output (default will auto-detect)",
+    )
+    parser.add_argument(
+        "-j",
+        "--justify",
+        dest="justify",
+        action="store_true",
+        help="enable full text justify",
+    )
+    parser.add_argument(
+        "-p",
+        "--page",
+        dest="page",
+        action="store_true",
+        help="use pager to scroll output",
+    )
+    args = parser.parse_args()
+
+    from rich.console import Console
+
+    if args.path == "-":
+        markdown_body = sys.stdin.read()
+    else:
+        with open(args.path, encoding="utf-8") as markdown_file:
+            markdown_body = markdown_file.read()
+
+    markdown = Markdown(
+        markdown_body,
+        justify="full" if args.justify else "left",
+        code_theme=args.code_theme,
+        hyperlinks=args.hyperlinks,
+        inline_code_lexer=args.inline_code_lexer,
+    )
+    if args.page:
+        import io
+        import pydoc
+
+        fileio = io.StringIO()
+        console = Console(
+            file=fileio, force_terminal=args.force_color, width=args.width
+        )
+        console.print(markdown)
+        pydoc.pager(fileio.getvalue())
+
+    else:
+        console = Console(
+            force_terminal=args.force_color, width=args.width, record=True
+        )
+        console.print(markdown)
diff --git a/local_packages/rich/markup.py b/local_packages/rich/markup.py
new file mode 100644
index 0000000..bd9c05a
--- /dev/null
+++ b/local_packages/rich/markup.py
@@ -0,0 +1,251 @@
+import re
+from ast import literal_eval
+from operator import attrgetter
+from typing import Callable, Iterable, List, Match, NamedTuple, Optional, Tuple, Union
+
+from ._emoji_replace import _emoji_replace
+from .emoji import EmojiVariant
+from .errors import MarkupError
+from .style import Style
+from .text import Span, Text
+
+RE_TAGS = re.compile(
+    r"""((\\*)\[([a-z#/@][^[]*?)])""",
+    re.VERBOSE,
+)
+
+RE_HANDLER = re.compile(r"^([\w.]*?)(\(.*?\))?$")
+
+
+class Tag(NamedTuple):
+    """A tag in console markup."""
+
+    name: str
+    """The tag name. e.g. 'bold'."""
+    parameters: Optional[str]
+    """Any additional parameters after the name."""
+
+    def __str__(self) -> str:
+        return (
+            self.name if self.parameters is None else f"{self.name} {self.parameters}"
+        )
+
+    @property
+    def markup(self) -> str:
+        """Get the string representation of this tag."""
+        return (
+            f"[{self.name}]"
+            if self.parameters is None
+            else f"[{self.name}={self.parameters}]"
+        )
+
+
+_ReStringMatch = Match[str]  # regex match object
+_ReSubCallable = Callable[[_ReStringMatch], str]  # Callable invoked by re.sub
+_EscapeSubMethod = Callable[[_ReSubCallable, str], str]  # Sub method of a compiled re
+
+
+def escape(
+    markup: str,
+    _escape: _EscapeSubMethod = re.compile(r"(\\*)(\[[a-z#/@][^[]*?])").sub,
+) -> str:
+    """Escapes text so that it won't be interpreted as markup.
+
+    Args:
+        markup (str): Content to be inserted in to markup.
+
+    Returns:
+        str: Markup with square brackets escaped.
+    """
+
+    def escape_backslashes(match: Match[str]) -> str:
+        """Called by re.sub replace matches."""
+        backslashes, text = match.groups()
+        return f"{backslashes}{backslashes}\\{text}"
+
+    markup = _escape(escape_backslashes, markup)
+    if markup.endswith("\\") and not markup.endswith("\\\\"):
+        return markup + "\\"
+
+    return markup
+
+
+def _parse(markup: str) -> Iterable[Tuple[int, Optional[str], Optional[Tag]]]:
+    """Parse markup in to an iterable of tuples of (position, text, tag).
+
+    Args:
+        markup (str): A string containing console markup
+
+    """
+    position = 0
+    _divmod = divmod
+    _Tag = Tag
+    for match in RE_TAGS.finditer(markup):
+        full_text, escapes, tag_text = match.groups()
+        start, end = match.span()
+        if start > position:
+            yield start, markup[position:start], None
+        if escapes:
+            backslashes, escaped = _divmod(len(escapes), 2)
+            if backslashes:
+                # Literal backslashes
+                yield start, "\\" * backslashes, None
+                start += backslashes * 2
+            if escaped:
+                # Escape of tag
+                yield start, full_text[len(escapes) :], None
+                position = end
+                continue
+        text, equals, parameters = tag_text.partition("=")
+        yield start, None, _Tag(text, parameters if equals else None)
+        position = end
+    if position < len(markup):
+        yield position, markup[position:], None
+
+
+def render(
+    markup: str,
+    style: Union[str, Style] = "",
+    emoji: bool = True,
+    emoji_variant: Optional[EmojiVariant] = None,
+) -> Text:
+    """Render console markup in to a Text instance.
+
+    Args:
+        markup (str): A string containing console markup.
+        style: (Union[str, Style]): The style to use.
+        emoji (bool, optional): Also render emoji code. Defaults to True.
+        emoji_variant (str, optional): Optional emoji variant, either "text" or "emoji". Defaults to None.
+
+
+    Raises:
+        MarkupError: If there is a syntax error in the markup.
+
+    Returns:
+        Text: A test instance.
+    """
+    emoji_replace = _emoji_replace
+    if "[" not in markup:
+        return Text(
+            emoji_replace(markup, default_variant=emoji_variant) if emoji else markup,
+            style=style,
+        )
+    text = Text(style=style)
+    append = text.append
+    normalize = Style.normalize
+
+    style_stack: List[Tuple[int, Tag]] = []
+    pop = style_stack.pop
+
+    spans: List[Span] = []
+    append_span = spans.append
+
+    _Span = Span
+    _Tag = Tag
+
+    def pop_style(style_name: str) -> Tuple[int, Tag]:
+        """Pop tag matching given style name."""
+        for index, (_, tag) in enumerate(reversed(style_stack), 1):
+            if tag.name == style_name:
+                return pop(-index)
+        raise KeyError(style_name)
+
+    for position, plain_text, tag in _parse(markup):
+        if plain_text is not None:
+            # Handle open brace escapes, where the brace is not part of a tag.
+            plain_text = plain_text.replace("\\[", "[")
+            append(emoji_replace(plain_text) if emoji else plain_text)
+        elif tag is not None:
+            if tag.name.startswith("/"):  # Closing tag
+                style_name = tag.name[1:].strip()
+
+                if style_name:  # explicit close
+                    style_name = normalize(style_name)
+                    try:
+                        start, open_tag = pop_style(style_name)
+                    except KeyError:
+                        raise MarkupError(
+                            f"closing tag '{tag.markup}' at position {position} doesn't match any open tag"
+                        ) from None
+                else:  # implicit close
+                    try:
+                        start, open_tag = pop()
+                    except IndexError:
+                        raise MarkupError(
+                            f"closing tag '[/]' at position {position} has nothing to close"
+                        ) from None
+
+                if open_tag.name.startswith("@"):
+                    if open_tag.parameters:
+                        handler_name = ""
+                        parameters = open_tag.parameters.strip()
+                        handler_match = RE_HANDLER.match(parameters)
+                        if handler_match is not None:
+                            handler_name, match_parameters = handler_match.groups()
+                            parameters = (
+                                "()" if match_parameters is None else match_parameters
+                            )
+
+                        try:
+                            meta_params = literal_eval(parameters)
+                        except SyntaxError as error:
+                            raise MarkupError(
+                                f"error parsing {parameters!r} in {open_tag.parameters!r}; {error.msg}"
+                            )
+                        except Exception as error:
+                            raise MarkupError(
+                                f"error parsing {open_tag.parameters!r}; {error}"
+                            ) from None
+
+                        if handler_name:
+                            meta_params = (
+                                handler_name,
+                                meta_params
+                                if isinstance(meta_params, tuple)
+                                else (meta_params,),
+                            )
+
+                    else:
+                        meta_params = ()
+
+                    append_span(
+                        _Span(
+                            start, len(text), Style(meta={open_tag.name: meta_params})
+                        )
+                    )
+                else:
+                    append_span(_Span(start, len(text), str(open_tag)))
+
+            else:  # Opening tag
+                normalized_tag = _Tag(normalize(tag.name), tag.parameters)
+                style_stack.append((len(text), normalized_tag))
+
+    text_length = len(text)
+    while style_stack:
+        start, tag = style_stack.pop()
+        style = str(tag)
+        if style:
+            append_span(_Span(start, text_length, style))
+
+    text.spans = sorted(spans[::-1], key=attrgetter("start"))
+    return text
+
+
+if __name__ == "__main__":  # pragma: no cover
+    MARKUP = [
+        "[red]Hello World[/red]",
+        "[magenta]Hello [b]World[/b]",
+        "[bold]Bold[italic] bold and italic [/bold]italic[/italic]",
+        "Click [link=https://www.willmcgugan.com]here[/link] to visit my Blog",
+        ":warning-emoji: [bold red blink] DANGER![/]",
+    ]
+
+    from rich import print
+    from rich.table import Table
+
+    grid = Table("Markup", "Result", padding=(0, 1))
+
+    for markup in MARKUP:
+        grid.add_row(Text(markup), markup)
+
+    print(grid)
diff --git a/local_packages/rich/measure.py b/local_packages/rich/measure.py
new file mode 100644
index 0000000..a508ffa
--- /dev/null
+++ b/local_packages/rich/measure.py
@@ -0,0 +1,151 @@
+from operator import itemgetter
+from typing import TYPE_CHECKING, Callable, NamedTuple, Optional, Sequence
+
+from . import errors
+from .protocol import is_renderable, rich_cast
+
+if TYPE_CHECKING:
+    from .console import Console, ConsoleOptions, RenderableType
+
+
+class Measurement(NamedTuple):
+    """Stores the minimum and maximum widths (in characters) required to render an object."""
+
+    minimum: int
+    """Minimum number of cells required to render."""
+    maximum: int
+    """Maximum number of cells required to render."""
+
+    @property
+    def span(self) -> int:
+        """Get difference between maximum and minimum."""
+        return self.maximum - self.minimum
+
+    def normalize(self) -> "Measurement":
+        """Get measurement that ensures that minimum <= maximum and minimum >= 0
+
+        Returns:
+            Measurement: A normalized measurement.
+        """
+        minimum, maximum = self
+        minimum = min(max(0, minimum), maximum)
+        return Measurement(max(0, minimum), max(0, max(minimum, maximum)))
+
+    def with_maximum(self, width: int) -> "Measurement":
+        """Get a RenderableWith where the widths are <= width.
+
+        Args:
+            width (int): Maximum desired width.
+
+        Returns:
+            Measurement: New Measurement object.
+        """
+        minimum, maximum = self
+        return Measurement(min(minimum, width), min(maximum, width))
+
+    def with_minimum(self, width: int) -> "Measurement":
+        """Get a RenderableWith where the widths are >= width.
+
+        Args:
+            width (int): Minimum desired width.
+
+        Returns:
+            Measurement: New Measurement object.
+        """
+        minimum, maximum = self
+        width = max(0, width)
+        return Measurement(max(minimum, width), max(maximum, width))
+
+    def clamp(
+        self, min_width: Optional[int] = None, max_width: Optional[int] = None
+    ) -> "Measurement":
+        """Clamp a measurement within the specified range.
+
+        Args:
+            min_width (int): Minimum desired width, or ``None`` for no minimum. Defaults to None.
+            max_width (int): Maximum desired width, or ``None`` for no maximum. Defaults to None.
+
+        Returns:
+            Measurement: New Measurement object.
+        """
+        measurement = self
+        if min_width is not None:
+            measurement = measurement.with_minimum(min_width)
+        if max_width is not None:
+            measurement = measurement.with_maximum(max_width)
+        return measurement
+
+    @classmethod
+    def get(
+        cls, console: "Console", options: "ConsoleOptions", renderable: "RenderableType"
+    ) -> "Measurement":
+        """Get a measurement for a renderable.
+
+        Args:
+            console (~rich.console.Console): Console instance.
+            options (~rich.console.ConsoleOptions): Console options.
+            renderable (RenderableType): An object that may be rendered with Rich.
+
+        Raises:
+            errors.NotRenderableError: If the object is not renderable.
+
+        Returns:
+            Measurement: Measurement object containing range of character widths required to render the object.
+        """
+        _max_width = options.max_width
+        if _max_width < 1:
+            return Measurement(0, 0)
+        if isinstance(renderable, str):
+            renderable = console.render_str(
+                renderable, markup=options.markup, highlight=False
+            )
+        renderable = rich_cast(renderable)
+        if is_renderable(renderable):
+            get_console_width: Optional[
+                Callable[["Console", "ConsoleOptions"], "Measurement"]
+            ] = getattr(renderable, "__rich_measure__", None)
+            if get_console_width is not None:
+                render_width = (
+                    get_console_width(console, options)
+                    .normalize()
+                    .with_maximum(_max_width)
+                )
+                if render_width.maximum < 1:
+                    return Measurement(0, 0)
+                return render_width.normalize()
+            else:
+                return Measurement(0, _max_width)
+        else:
+            raise errors.NotRenderableError(
+                f"Unable to get render width for {renderable!r}; "
+                "a str, Segment, or object with __rich_console__ method is required"
+            )
+
+
+def measure_renderables(
+    console: "Console",
+    options: "ConsoleOptions",
+    renderables: Sequence["RenderableType"],
+) -> "Measurement":
+    """Get a measurement that would fit a number of renderables.
+
+    Args:
+        console (~rich.console.Console): Console instance.
+        options (~rich.console.ConsoleOptions): Console options.
+        renderables (Iterable[RenderableType]): One or more renderable objects.
+
+    Returns:
+        Measurement: Measurement object containing range of character widths required to
+            contain all given renderables.
+    """
+    if not renderables:
+        return Measurement(0, 0)
+    get_measurement = Measurement.get
+    measurements = [
+        get_measurement(console, options, renderable) for renderable in renderables
+    ]
+    measured_width = Measurement(
+        max(measurements, key=itemgetter(0)).minimum,
+        max(measurements, key=itemgetter(1)).maximum,
+    )
+    return measured_width
diff --git a/local_packages/rich/padding.py b/local_packages/rich/padding.py
new file mode 100644
index 0000000..d1aa01b
--- /dev/null
+++ b/local_packages/rich/padding.py
@@ -0,0 +1,141 @@
+from typing import TYPE_CHECKING, List, Optional, Tuple, Union
+
+if TYPE_CHECKING:
+    from .console import (
+        Console,
+        ConsoleOptions,
+        RenderableType,
+        RenderResult,
+    )
+
+from .jupyter import JupyterMixin
+from .measure import Measurement
+from .segment import Segment
+from .style import Style
+
+PaddingDimensions = Union[int, Tuple[int], Tuple[int, int], Tuple[int, int, int, int]]
+
+
+class Padding(JupyterMixin):
+    """Draw space around content.
+
+    Example:
+        >>> print(Padding("Hello", (2, 4), style="on blue"))
+
+    Args:
+        renderable (RenderableType): String or other renderable.
+        pad (Union[int, Tuple[int]]): Padding for top, right, bottom, and left borders.
+            May be specified with 1, 2, or 4 integers (CSS style).
+        style (Union[str, Style], optional): Style for padding characters. Defaults to "none".
+        expand (bool, optional): Expand padding to fit available width. Defaults to True.
+    """
+
+    def __init__(
+        self,
+        renderable: "RenderableType",
+        pad: "PaddingDimensions" = (0, 0, 0, 0),
+        *,
+        style: Union[str, Style] = "none",
+        expand: bool = True,
+    ):
+        self.renderable = renderable
+        self.top, self.right, self.bottom, self.left = self.unpack(pad)
+        self.style = style
+        self.expand = expand
+
+    @classmethod
+    def indent(cls, renderable: "RenderableType", level: int) -> "Padding":
+        """Make padding instance to render an indent.
+
+        Args:
+            renderable (RenderableType): String or other renderable.
+            level (int): Number of characters to indent.
+
+        Returns:
+            Padding: A Padding instance.
+        """
+
+        return Padding(renderable, pad=(0, 0, 0, level), expand=False)
+
+    @staticmethod
+    def unpack(pad: "PaddingDimensions") -> Tuple[int, int, int, int]:
+        """Unpack padding specified in CSS style."""
+        if isinstance(pad, int):
+            return (pad, pad, pad, pad)
+        if len(pad) == 1:
+            _pad = pad[0]
+            return (_pad, _pad, _pad, _pad)
+        if len(pad) == 2:
+            pad_top, pad_right = pad
+            return (pad_top, pad_right, pad_top, pad_right)
+        if len(pad) == 4:
+            top, right, bottom, left = pad
+            return (top, right, bottom, left)
+        raise ValueError(f"1, 2 or 4 integers required for padding; {len(pad)} given")
+
+    def __repr__(self) -> str:
+        return f"Padding({self.renderable!r}, ({self.top},{self.right},{self.bottom},{self.left}))"
+
+    def __rich_console__(
+        self, console: "Console", options: "ConsoleOptions"
+    ) -> "RenderResult":
+        style = console.get_style(self.style)
+        if self.expand:
+            width = options.max_width
+        else:
+            width = min(
+                Measurement.get(console, options, self.renderable).maximum
+                + self.left
+                + self.right,
+                options.max_width,
+            )
+        render_options = options.update_width(width - self.left - self.right)
+        if render_options.height is not None:
+            render_options = render_options.update_height(
+                height=render_options.height - self.top - self.bottom
+            )
+        lines = console.render_lines(
+            self.renderable, render_options, style=style, pad=True
+        )
+        _Segment = Segment
+
+        left = _Segment(" " * self.left, style) if self.left else None
+        right = (
+            [_Segment(f'{" " * self.right}', style), _Segment.line()]
+            if self.right
+            else [_Segment.line()]
+        )
+        blank_line: Optional[List[Segment]] = None
+        if self.top:
+            blank_line = [_Segment(f'{" " * width}\n', style)]
+            yield from blank_line * self.top
+        if left:
+            for line in lines:
+                yield left
+                yield from line
+                yield from right
+        else:
+            for line in lines:
+                yield from line
+                yield from right
+        if self.bottom:
+            blank_line = blank_line or [_Segment(f'{" " * width}\n', style)]
+            yield from blank_line * self.bottom
+
+    def __rich_measure__(
+        self, console: "Console", options: "ConsoleOptions"
+    ) -> "Measurement":
+        max_width = options.max_width
+        extra_width = self.left + self.right
+        if max_width - extra_width < 1:
+            return Measurement(max_width, max_width)
+        measure_min, measure_max = Measurement.get(console, options, self.renderable)
+        measurement = Measurement(measure_min + extra_width, measure_max + extra_width)
+        measurement = measurement.with_maximum(max_width)
+        return measurement
+
+
+if __name__ == "__main__":  #  pragma: no cover
+    from rich import print
+
+    print(Padding("Hello, World", (2, 4), style="on blue"))
diff --git a/local_packages/rich/pager.py b/local_packages/rich/pager.py
new file mode 100644
index 0000000..a3f7aa6
--- /dev/null
+++ b/local_packages/rich/pager.py
@@ -0,0 +1,34 @@
+from abc import ABC, abstractmethod
+from typing import Any
+
+
+class Pager(ABC):
+    """Base class for a pager."""
+
+    @abstractmethod
+    def show(self, content: str) -> None:
+        """Show content in pager.
+
+        Args:
+            content (str): Content to be displayed.
+        """
+
+
+class SystemPager(Pager):
+    """Uses the pager installed on the system."""
+
+    def _pager(self, content: str) -> Any:  #  pragma: no cover
+        return __import__("pydoc").pager(content)
+
+    def show(self, content: str) -> None:
+        """Use the same pager used by pydoc."""
+        self._pager(content)
+
+
+if __name__ == "__main__":  # pragma: no cover
+    from .__main__ import make_test_card
+    from .console import Console
+
+    console = Console()
+    with console.pager(styles=True):
+        console.print(make_test_card())
diff --git a/local_packages/rich/palette.py b/local_packages/rich/palette.py
new file mode 100644
index 0000000..f295879
--- /dev/null
+++ b/local_packages/rich/palette.py
@@ -0,0 +1,100 @@
+from math import sqrt
+from functools import lru_cache
+from typing import Sequence, Tuple, TYPE_CHECKING
+
+from .color_triplet import ColorTriplet
+
+if TYPE_CHECKING:
+    from rich.table import Table
+
+
+class Palette:
+    """A palette of available colors."""
+
+    def __init__(self, colors: Sequence[Tuple[int, int, int]]):
+        self._colors = colors
+
+    def __getitem__(self, number: int) -> ColorTriplet:
+        return ColorTriplet(*self._colors[number])
+
+    def __rich__(self) -> "Table":
+        from rich.color import Color
+        from rich.style import Style
+        from rich.text import Text
+        from rich.table import Table
+
+        table = Table(
+            "index",
+            "RGB",
+            "Color",
+            title="Palette",
+            caption=f"{len(self._colors)} colors",
+            highlight=True,
+            caption_justify="right",
+        )
+        for index, color in enumerate(self._colors):
+            table.add_row(
+                str(index),
+                repr(color),
+                Text(" " * 16, style=Style(bgcolor=Color.from_rgb(*color))),
+            )
+        return table
+
+    # This is somewhat inefficient and needs caching
+    @lru_cache(maxsize=1024)
+    def match(self, color: Tuple[int, int, int]) -> int:
+        """Find a color from a palette that most closely matches a given color.
+
+        Args:
+            color (Tuple[int, int, int]): RGB components in range 0 > 255.
+
+        Returns:
+            int: Index of closes matching color.
+        """
+        red1, green1, blue1 = color
+        _sqrt = sqrt
+        get_color = self._colors.__getitem__
+
+        def get_color_distance(index: int) -> float:
+            """Get the distance to a color."""
+            red2, green2, blue2 = get_color(index)
+            red_mean = (red1 + red2) // 2
+            red = red1 - red2
+            green = green1 - green2
+            blue = blue1 - blue2
+            return _sqrt(
+                (((512 + red_mean) * red * red) >> 8)
+                + 4 * green * green
+                + (((767 - red_mean) * blue * blue) >> 8)
+            )
+
+        min_index = min(range(len(self._colors)), key=get_color_distance)
+        return min_index
+
+
+if __name__ == "__main__":  # pragma: no cover
+    import colorsys
+    from typing import Iterable
+    from rich.color import Color
+    from rich.console import Console, ConsoleOptions
+    from rich.segment import Segment
+    from rich.style import Style
+
+    class ColorBox:
+        def __rich_console__(
+            self, console: Console, options: ConsoleOptions
+        ) -> Iterable[Segment]:
+            height = console.size.height - 3
+            for y in range(0, height):
+                for x in range(options.max_width):
+                    h = x / options.max_width
+                    l = y / (height + 1)
+                    r1, g1, b1 = colorsys.hls_to_rgb(h, l, 1.0)
+                    r2, g2, b2 = colorsys.hls_to_rgb(h, l + (1 / height / 2), 1.0)
+                    bgcolor = Color.from_rgb(r1 * 255, g1 * 255, b1 * 255)
+                    color = Color.from_rgb(r2 * 255, g2 * 255, b2 * 255)
+                    yield Segment("▄", Style(color=color, bgcolor=bgcolor))
+                yield Segment.line()
+
+    console = Console()
+    console.print(ColorBox())
diff --git a/local_packages/rich/panel.py b/local_packages/rich/panel.py
new file mode 100644
index 0000000..07587e3
--- /dev/null
+++ b/local_packages/rich/panel.py
@@ -0,0 +1,317 @@
+from typing import TYPE_CHECKING, Optional
+
+from .align import AlignMethod
+from .box import ROUNDED, Box
+from .cells import cell_len
+from .jupyter import JupyterMixin
+from .measure import Measurement, measure_renderables
+from .padding import Padding, PaddingDimensions
+from .segment import Segment
+from .style import Style, StyleType
+from .text import Text, TextType
+
+if TYPE_CHECKING:
+    from .console import Console, ConsoleOptions, RenderableType, RenderResult
+
+
+class Panel(JupyterMixin):
+    """A console renderable that draws a border around its contents.
+
+    Example:
+        >>> console.print(Panel("Hello, World!"))
+
+    Args:
+        renderable (RenderableType): A console renderable object.
+        box (Box): A Box instance that defines the look of the border (see :ref:`appendix_box`. Defaults to box.ROUNDED.
+        title (Optional[TextType], optional): Optional title displayed in panel header. Defaults to None.
+        title_align (AlignMethod, optional): Alignment of title. Defaults to "center".
+        subtitle (Optional[TextType], optional): Optional subtitle displayed in panel footer. Defaults to None.
+        subtitle_align (AlignMethod, optional): Alignment of subtitle. Defaults to "center".
+        safe_box (bool, optional): Disable box characters that don't display on windows legacy terminal with *raster* fonts. Defaults to True.
+        expand (bool, optional): If True the panel will stretch to fill the console width, otherwise it will be sized to fit the contents. Defaults to True.
+        style (str, optional): The style of the panel (border and contents). Defaults to "none".
+        border_style (str, optional): The style of the border. Defaults to "none".
+        width (Optional[int], optional): Optional width of panel. Defaults to None to auto-detect.
+        height (Optional[int], optional): Optional height of panel. Defaults to None to auto-detect.
+        padding (Optional[PaddingDimensions]): Optional padding around renderable. Defaults to 0.
+        highlight (bool, optional): Enable automatic highlighting of panel title (if str). Defaults to False.
+    """
+
+    def __init__(
+        self,
+        renderable: "RenderableType",
+        box: Box = ROUNDED,
+        *,
+        title: Optional[TextType] = None,
+        title_align: AlignMethod = "center",
+        subtitle: Optional[TextType] = None,
+        subtitle_align: AlignMethod = "center",
+        safe_box: Optional[bool] = None,
+        expand: bool = True,
+        style: StyleType = "none",
+        border_style: StyleType = "none",
+        width: Optional[int] = None,
+        height: Optional[int] = None,
+        padding: PaddingDimensions = (0, 1),
+        highlight: bool = False,
+    ) -> None:
+        self.renderable = renderable
+        self.box = box
+        self.title = title
+        self.title_align: AlignMethod = title_align
+        self.subtitle = subtitle
+        self.subtitle_align = subtitle_align
+        self.safe_box = safe_box
+        self.expand = expand
+        self.style = style
+        self.border_style = border_style
+        self.width = width
+        self.height = height
+        self.padding = padding
+        self.highlight = highlight
+
+    @classmethod
+    def fit(
+        cls,
+        renderable: "RenderableType",
+        box: Box = ROUNDED,
+        *,
+        title: Optional[TextType] = None,
+        title_align: AlignMethod = "center",
+        subtitle: Optional[TextType] = None,
+        subtitle_align: AlignMethod = "center",
+        safe_box: Optional[bool] = None,
+        style: StyleType = "none",
+        border_style: StyleType = "none",
+        width: Optional[int] = None,
+        height: Optional[int] = None,
+        padding: PaddingDimensions = (0, 1),
+        highlight: bool = False,
+    ) -> "Panel":
+        """An alternative constructor that sets expand=False."""
+        return cls(
+            renderable,
+            box,
+            title=title,
+            title_align=title_align,
+            subtitle=subtitle,
+            subtitle_align=subtitle_align,
+            safe_box=safe_box,
+            style=style,
+            border_style=border_style,
+            width=width,
+            height=height,
+            padding=padding,
+            highlight=highlight,
+            expand=False,
+        )
+
+    @property
+    def _title(self) -> Optional[Text]:
+        if self.title:
+            title_text = (
+                Text.from_markup(self.title)
+                if isinstance(self.title, str)
+                else self.title.copy()
+            )
+            title_text.end = ""
+            title_text.plain = title_text.plain.replace("\n", " ")
+            title_text.no_wrap = True
+            title_text.expand_tabs()
+            title_text.pad(1)
+            return title_text
+        return None
+
+    @property
+    def _subtitle(self) -> Optional[Text]:
+        if self.subtitle:
+            subtitle_text = (
+                Text.from_markup(self.subtitle)
+                if isinstance(self.subtitle, str)
+                else self.subtitle.copy()
+            )
+            subtitle_text.end = ""
+            subtitle_text.plain = subtitle_text.plain.replace("\n", " ")
+            subtitle_text.no_wrap = True
+            subtitle_text.expand_tabs()
+            subtitle_text.pad(1)
+            return subtitle_text
+        return None
+
+    def __rich_console__(
+        self, console: "Console", options: "ConsoleOptions"
+    ) -> "RenderResult":
+        _padding = Padding.unpack(self.padding)
+        renderable = (
+            Padding(self.renderable, _padding) if any(_padding) else self.renderable
+        )
+        style = console.get_style(self.style)
+        border_style = style + console.get_style(self.border_style)
+        width = (
+            options.max_width
+            if self.width is None
+            else min(options.max_width, self.width)
+        )
+
+        safe_box: bool = console.safe_box if self.safe_box is None else self.safe_box
+        box = self.box.substitute(options, safe=safe_box)
+
+        def align_text(
+            text: Text, width: int, align: str, character: str, style: Style
+        ) -> Text:
+            """Gets new aligned text.
+
+            Args:
+                text (Text): Title or subtitle text.
+                width (int): Desired width.
+                align (str): Alignment.
+                character (str): Character for alignment.
+                style (Style): Border style
+
+            Returns:
+                Text: New text instance
+            """
+            text = text.copy()
+            text.truncate(width)
+            excess_space = width - cell_len(text.plain)
+            if text.style:
+                text.stylize(console.get_style(text.style))
+
+            if excess_space:
+                if align == "left":
+                    return Text.assemble(
+                        text,
+                        (character * excess_space, style),
+                        no_wrap=True,
+                        end="",
+                    )
+                elif align == "center":
+                    left = excess_space // 2
+                    return Text.assemble(
+                        (character * left, style),
+                        text,
+                        (character * (excess_space - left), style),
+                        no_wrap=True,
+                        end="",
+                    )
+                else:
+                    return Text.assemble(
+                        (character * excess_space, style),
+                        text,
+                        no_wrap=True,
+                        end="",
+                    )
+            return text
+
+        title_text = self._title
+        if title_text is not None:
+            title_text.stylize_before(border_style)
+
+        child_width = (
+            width - 2
+            if self.expand
+            else console.measure(
+                renderable, options=options.update_width(width - 2)
+            ).maximum
+        )
+        child_height = self.height or options.height or None
+        if child_height:
+            child_height -= 2
+        if title_text is not None:
+            child_width = min(
+                options.max_width - 2, max(child_width, title_text.cell_len + 2)
+            )
+
+        width = child_width + 2
+        child_options = options.update(
+            width=child_width, height=child_height, highlight=self.highlight
+        )
+        lines = console.render_lines(renderable, child_options, style=style)
+
+        line_start = Segment(box.mid_left, border_style)
+        line_end = Segment(f"{box.mid_right}", border_style)
+        new_line = Segment.line()
+        if title_text is None or width <= 4:
+            yield Segment(box.get_top([width - 2]), border_style)
+        else:
+            title_text = align_text(
+                title_text,
+                width - 4,
+                self.title_align,
+                box.top,
+                border_style,
+            )
+            yield Segment(box.top_left + box.top, border_style)
+            yield from console.render(title_text, child_options.update_width(width - 4))
+            yield Segment(box.top + box.top_right, border_style)
+
+        yield new_line
+        for line in lines:
+            yield line_start
+            yield from line
+            yield line_end
+            yield new_line
+
+        subtitle_text = self._subtitle
+        if subtitle_text is not None:
+            subtitle_text.stylize_before(border_style)
+
+        if subtitle_text is None or width <= 4:
+            yield Segment(box.get_bottom([width - 2]), border_style)
+        else:
+            subtitle_text = align_text(
+                subtitle_text,
+                width - 4,
+                self.subtitle_align,
+                box.bottom,
+                border_style,
+            )
+            yield Segment(box.bottom_left + box.bottom, border_style)
+            yield from console.render(
+                subtitle_text, child_options.update_width(width - 4)
+            )
+            yield Segment(box.bottom + box.bottom_right, border_style)
+
+        yield new_line
+
+    def __rich_measure__(
+        self, console: "Console", options: "ConsoleOptions"
+    ) -> "Measurement":
+        _title = self._title
+        _, right, _, left = Padding.unpack(self.padding)
+        padding = left + right
+        renderables = [self.renderable, _title] if _title else [self.renderable]
+
+        if self.width is None:
+            width = (
+                measure_renderables(
+                    console,
+                    options.update_width(options.max_width - padding - 2),
+                    renderables,
+                ).maximum
+                + padding
+                + 2
+            )
+        else:
+            width = self.width
+        return Measurement(width, width)
+
+
+if __name__ == "__main__":  # pragma: no cover
+    from .console import Console
+
+    c = Console()
+
+    from .box import DOUBLE, ROUNDED
+    from .padding import Padding
+
+    p = Panel(
+        "Hello, World!",
+        title="rich.Panel",
+        style="white on blue",
+        box=DOUBLE,
+        padding=1,
+    )
+
+    c.print()
+    c.print(p)
diff --git a/local_packages/rich/pretty.py b/local_packages/rich/pretty.py
new file mode 100644
index 0000000..00abeca
--- /dev/null
+++ b/local_packages/rich/pretty.py
@@ -0,0 +1,1016 @@
+import builtins
+import collections
+import dataclasses
+import inspect
+import os
+import reprlib
+import sys
+from array import array
+from collections import Counter, UserDict, UserList, defaultdict, deque
+from dataclasses import dataclass, fields, is_dataclass
+from inspect import isclass
+from itertools import islice
+from types import MappingProxyType
+from typing import (
+    TYPE_CHECKING,
+    Any,
+    Callable,
+    DefaultDict,
+    Deque,
+    Dict,
+    Iterable,
+    List,
+    Optional,
+    Sequence,
+    Set,
+    Tuple,
+    Union,
+)
+
+from rich.repr import RichReprResult
+
+try:
+    import attr as _attr_module
+
+    _has_attrs = hasattr(_attr_module, "ib")
+except ImportError:  # pragma: no cover
+    _has_attrs = False
+
+from . import get_console
+from ._loop import loop_last
+from ._pick import pick_bool
+from .abc import RichRenderable
+from .cells import cell_len
+from .highlighter import ReprHighlighter
+from .jupyter import JupyterMixin, JupyterRenderable
+from .measure import Measurement
+from .text import Text
+
+if TYPE_CHECKING:
+    from .console import (
+        Console,
+        ConsoleOptions,
+        HighlighterType,
+        JustifyMethod,
+        OverflowMethod,
+        RenderResult,
+    )
+
+
+def _is_attr_object(obj: Any) -> bool:
+    """Check if an object was created with attrs module."""
+    return _has_attrs and _attr_module.has(type(obj))
+
+
+def _get_attr_fields(obj: Any) -> Sequence["_attr_module.Attribute[Any]"]:
+    """Get fields for an attrs object."""
+    return _attr_module.fields(type(obj)) if _has_attrs else []
+
+
+def _is_dataclass_repr(obj: object) -> bool:
+    """Check if an instance of a dataclass contains the default repr.
+
+    Args:
+        obj (object): A dataclass instance.
+
+    Returns:
+        bool: True if the default repr is used, False if there is a custom repr.
+    """
+    # Digging in to a lot of internals here
+    # Catching all exceptions in case something is missing on a non CPython implementation
+    try:
+        return obj.__repr__.__code__.co_filename in (
+            dataclasses.__file__,
+            reprlib.__file__,
+        )
+    except Exception:  # pragma: no coverage
+        return False
+
+
+_dummy_namedtuple = collections.namedtuple("_dummy_namedtuple", [])
+
+
+def _has_default_namedtuple_repr(obj: object) -> bool:
+    """Check if an instance of namedtuple contains the default repr
+
+    Args:
+        obj (object): A namedtuple
+
+    Returns:
+        bool: True if the default repr is used, False if there's a custom repr.
+    """
+    obj_file = None
+    try:
+        obj_file = inspect.getfile(obj.__repr__)
+    except (OSError, TypeError):
+        # OSError handles case where object is defined in __main__ scope, e.g. REPL - no filename available.
+        # TypeError trapped defensively, in case of object without filename slips through.
+        pass
+    default_repr_file = inspect.getfile(_dummy_namedtuple.__repr__)
+    return obj_file == default_repr_file
+
+
+def _ipy_display_hook(
+    value: Any,
+    console: Optional["Console"] = None,
+    overflow: "OverflowMethod" = "ignore",
+    crop: bool = False,
+    indent_guides: bool = False,
+    max_length: Optional[int] = None,
+    max_string: Optional[int] = None,
+    max_depth: Optional[int] = None,
+    expand_all: bool = False,
+) -> Union[str, None]:
+    # needed here to prevent circular import:
+    from .console import ConsoleRenderable
+
+    # always skip rich generated jupyter renderables or None values
+    if _safe_isinstance(value, JupyterRenderable) or value is None:
+        return None
+
+    console = console or get_console()
+
+    with console.capture() as capture:
+        # certain renderables should start on a new line
+        if _safe_isinstance(value, ConsoleRenderable):
+            console.line()
+        console.print(
+            (
+                value
+                if _safe_isinstance(value, RichRenderable)
+                else Pretty(
+                    value,
+                    overflow=overflow,
+                    indent_guides=indent_guides,
+                    max_length=max_length,
+                    max_string=max_string,
+                    max_depth=max_depth,
+                    expand_all=expand_all,
+                    margin=12,
+                )
+            ),
+            crop=crop,
+            new_line_start=True,
+            end="",
+        )
+    # strip trailing newline, not usually part of a text repr
+    # I'm not sure if this should be prevented at a lower level
+    return capture.get().rstrip("\n")
+
+
+def _safe_isinstance(
+    obj: object, class_or_tuple: Union[type, Tuple[type, ...]]
+) -> bool:
+    """isinstance can fail in rare cases, for example types with no __class__"""
+    try:
+        return isinstance(obj, class_or_tuple)
+    except Exception:
+        return False
+
+
+def install(
+    console: Optional["Console"] = None,
+    overflow: "OverflowMethod" = "ignore",
+    crop: bool = False,
+    indent_guides: bool = False,
+    max_length: Optional[int] = None,
+    max_string: Optional[int] = None,
+    max_depth: Optional[int] = None,
+    expand_all: bool = False,
+) -> None:
+    """Install automatic pretty printing in the Python REPL.
+
+    Args:
+        console (Console, optional): Console instance or ``None`` to use global console. Defaults to None.
+        overflow (Optional[OverflowMethod], optional): Overflow method. Defaults to "ignore".
+        crop (Optional[bool], optional): Enable cropping of long lines. Defaults to False.
+        indent_guides (bool, optional): Enable indentation guides. Defaults to False.
+        max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
+            Defaults to None.
+        max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to None.
+        max_depth (int, optional): Maximum depth of nested data structures, or None for no maximum. Defaults to None.
+        expand_all (bool, optional): Expand all containers. Defaults to False.
+        max_frames (int): Maximum number of frames to show in a traceback, 0 for no maximum. Defaults to 100.
+    """
+    from rich import get_console
+
+    console = console or get_console()
+    assert console is not None
+
+    def display_hook(value: Any) -> None:
+        """Replacement sys.displayhook which prettifies objects with Rich."""
+        if value is not None:
+            assert console is not None
+            builtins._ = None  # type: ignore[attr-defined]
+            console.print(
+                (
+                    value
+                    if _safe_isinstance(value, RichRenderable)
+                    else Pretty(
+                        value,
+                        overflow=overflow,
+                        indent_guides=indent_guides,
+                        max_length=max_length,
+                        max_string=max_string,
+                        max_depth=max_depth,
+                        expand_all=expand_all,
+                    )
+                ),
+                crop=crop,
+            )
+            builtins._ = value  # type: ignore[attr-defined]
+
+    try:
+        ip = get_ipython()  # type: ignore[name-defined]
+    except NameError:
+        sys.displayhook = display_hook
+    else:
+        from IPython.core.formatters import BaseFormatter
+
+        class RichFormatter(BaseFormatter):  # type: ignore[misc]
+            pprint: bool = True
+
+            def __call__(self, value: Any) -> Any:
+                if self.pprint:
+                    return _ipy_display_hook(
+                        value,
+                        console=console,
+                        overflow=overflow,
+                        indent_guides=indent_guides,
+                        max_length=max_length,
+                        max_string=max_string,
+                        max_depth=max_depth,
+                        expand_all=expand_all,
+                    )
+                else:
+                    return repr(value)
+
+        # replace plain text formatter with rich formatter
+        rich_formatter = RichFormatter()
+        ip.display_formatter.formatters["text/plain"] = rich_formatter
+
+
+class Pretty(JupyterMixin):
+    """A rich renderable that pretty prints an object.
+
+    Args:
+        _object (Any): An object to pretty print.
+        highlighter (HighlighterType, optional): Highlighter object to apply to result, or None for ReprHighlighter. Defaults to None.
+        indent_size (int, optional): Number of spaces in indent. Defaults to 4.
+        justify (JustifyMethod, optional): Justify method, or None for default. Defaults to None.
+        overflow (OverflowMethod, optional): Overflow method, or None for default. Defaults to None.
+        no_wrap (Optional[bool], optional): Disable word wrapping. Defaults to False.
+        indent_guides (bool, optional): Enable indentation guides. Defaults to False.
+        max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
+            Defaults to None.
+        max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to None.
+        max_depth (int, optional): Maximum depth of nested data structures, or None for no maximum. Defaults to None.
+        expand_all (bool, optional): Expand all containers. Defaults to False.
+        margin (int, optional): Subtrace a margin from width to force containers to expand earlier. Defaults to 0.
+        insert_line (bool, optional): Insert a new line if the output has multiple new lines. Defaults to False.
+    """
+
+    def __init__(
+        self,
+        _object: Any,
+        highlighter: Optional["HighlighterType"] = None,
+        *,
+        indent_size: int = 4,
+        justify: Optional["JustifyMethod"] = None,
+        overflow: Optional["OverflowMethod"] = None,
+        no_wrap: Optional[bool] = False,
+        indent_guides: bool = False,
+        max_length: Optional[int] = None,
+        max_string: Optional[int] = None,
+        max_depth: Optional[int] = None,
+        expand_all: bool = False,
+        margin: int = 0,
+        insert_line: bool = False,
+    ) -> None:
+        self._object = _object
+        self.highlighter = highlighter or ReprHighlighter()
+        self.indent_size = indent_size
+        self.justify: Optional["JustifyMethod"] = justify
+        self.overflow: Optional["OverflowMethod"] = overflow
+        self.no_wrap = no_wrap
+        self.indent_guides = indent_guides
+        self.max_length = max_length
+        self.max_string = max_string
+        self.max_depth = max_depth
+        self.expand_all = expand_all
+        self.margin = margin
+        self.insert_line = insert_line
+
+    def __rich_console__(
+        self, console: "Console", options: "ConsoleOptions"
+    ) -> "RenderResult":
+        pretty_str = pretty_repr(
+            self._object,
+            max_width=options.max_width - self.margin,
+            indent_size=self.indent_size,
+            max_length=self.max_length,
+            max_string=self.max_string,
+            max_depth=self.max_depth,
+            expand_all=self.expand_all,
+        )
+        pretty_text = Text.from_ansi(
+            pretty_str,
+            justify=self.justify or options.justify,
+            overflow=self.overflow or options.overflow,
+            no_wrap=pick_bool(self.no_wrap, options.no_wrap),
+            style="pretty",
+        )
+        pretty_text = (
+            self.highlighter(pretty_text)
+            if pretty_text
+            else Text(
+                f"{type(self._object)}.__repr__ returned empty string",
+                style="dim italic",
+            )
+        )
+        if self.indent_guides and not options.ascii_only:
+            pretty_text = pretty_text.with_indent_guides(
+                self.indent_size, style="repr.indent"
+            )
+        if self.insert_line and "\n" in pretty_text:
+            yield ""
+        yield pretty_text
+
+    def __rich_measure__(
+        self, console: "Console", options: "ConsoleOptions"
+    ) -> "Measurement":
+        pretty_str = pretty_repr(
+            self._object,
+            max_width=options.max_width,
+            indent_size=self.indent_size,
+            max_length=self.max_length,
+            max_string=self.max_string,
+            max_depth=self.max_depth,
+            expand_all=self.expand_all,
+        )
+        text_width = (
+            max(cell_len(line) for line in pretty_str.splitlines()) if pretty_str else 0
+        )
+        return Measurement(text_width, text_width)
+
+
+def _get_braces_for_defaultdict(_object: DefaultDict[Any, Any]) -> Tuple[str, str, str]:
+    return (
+        f"defaultdict({_object.default_factory!r}, {{",
+        "})",
+        f"defaultdict({_object.default_factory!r}, {{}})",
+    )
+
+
+def _get_braces_for_deque(_object: Deque[Any]) -> Tuple[str, str, str]:
+    if _object.maxlen is None:
+        return ("deque([", "])", "deque()")
+    return (
+        "deque([",
+        f"], maxlen={_object.maxlen})",
+        f"deque(maxlen={_object.maxlen})",
+    )
+
+
+def _get_braces_for_array(_object: "array[Any]") -> Tuple[str, str, str]:
+    return (f"array({_object.typecode!r}, [", "])", f"array({_object.typecode!r})")
+
+
+_BRACES: Dict[type, Callable[[Any], Tuple[str, str, str]]] = {
+    os._Environ: lambda _object: ("environ({", "})", "environ({})"),
+    array: _get_braces_for_array,
+    defaultdict: _get_braces_for_defaultdict,
+    Counter: lambda _object: ("Counter({", "})", "Counter()"),
+    deque: _get_braces_for_deque,
+    dict: lambda _object: ("{", "}", "{}"),
+    UserDict: lambda _object: ("{", "}", "{}"),
+    frozenset: lambda _object: ("frozenset({", "})", "frozenset()"),
+    list: lambda _object: ("[", "]", "[]"),
+    UserList: lambda _object: ("[", "]", "[]"),
+    set: lambda _object: ("{", "}", "set()"),
+    tuple: lambda _object: ("(", ")", "()"),
+    MappingProxyType: lambda _object: ("mappingproxy({", "})", "mappingproxy({})"),
+}
+_CONTAINERS = tuple(_BRACES.keys())
+_MAPPING_CONTAINERS = (dict, os._Environ, MappingProxyType, UserDict)
+
+
+def is_expandable(obj: Any) -> bool:
+    """Check if an object may be expanded by pretty print."""
+    return (
+        _safe_isinstance(obj, _CONTAINERS)
+        or (is_dataclass(obj))
+        or (hasattr(obj, "__rich_repr__"))
+        or _is_attr_object(obj)
+    ) and not isclass(obj)
+
+
+@dataclass
+class Node:
+    """A node in a repr tree. May be atomic or a container."""
+
+    key_repr: str = ""
+    value_repr: str = ""
+    open_brace: str = ""
+    close_brace: str = ""
+    empty: str = ""
+    last: bool = False
+    is_tuple: bool = False
+    is_namedtuple: bool = False
+    children: Optional[List["Node"]] = None
+    key_separator: str = ": "
+    separator: str = ", "
+
+    def iter_tokens(self) -> Iterable[str]:
+        """Generate tokens for this node."""
+        if self.key_repr:
+            yield self.key_repr
+            yield self.key_separator
+        if self.value_repr:
+            yield self.value_repr
+        elif self.children is not None:
+            if self.children:
+                yield self.open_brace
+                if self.is_tuple and not self.is_namedtuple and len(self.children) == 1:
+                    yield from self.children[0].iter_tokens()
+                    yield ","
+                else:
+                    for child in self.children:
+                        yield from child.iter_tokens()
+                        if not child.last:
+                            yield self.separator
+                yield self.close_brace
+            else:
+                yield self.empty
+
+    def check_length(self, start_length: int, max_length: int) -> bool:
+        """Check the length fits within a limit.
+
+        Args:
+            start_length (int): Starting length of the line (indent, prefix, suffix).
+            max_length (int): Maximum length.
+
+        Returns:
+            bool: True if the node can be rendered within max length, otherwise False.
+        """
+        total_length = start_length
+        for token in self.iter_tokens():
+            total_length += cell_len(token)
+            if total_length > max_length:
+                return False
+        return True
+
+    def __str__(self) -> str:
+        repr_text = "".join(self.iter_tokens())
+        return repr_text
+
+    def render(
+        self, max_width: int = 80, indent_size: int = 4, expand_all: bool = False
+    ) -> str:
+        """Render the node to a pretty repr.
+
+        Args:
+            max_width (int, optional): Maximum width of the repr. Defaults to 80.
+            indent_size (int, optional): Size of indents. Defaults to 4.
+            expand_all (bool, optional): Expand all levels. Defaults to False.
+
+        Returns:
+            str: A repr string of the original object.
+        """
+        lines = [_Line(node=self, is_root=True)]
+        line_no = 0
+        while line_no < len(lines):
+            line = lines[line_no]
+            if line.expandable and not line.expanded:
+                if expand_all or not line.check_length(max_width):
+                    lines[line_no : line_no + 1] = line.expand(indent_size)
+            line_no += 1
+
+        repr_str = "\n".join(str(line) for line in lines)
+        return repr_str
+
+
+@dataclass
+class _Line:
+    """A line in repr output."""
+
+    parent: Optional["_Line"] = None
+    is_root: bool = False
+    node: Optional[Node] = None
+    text: str = ""
+    suffix: str = ""
+    whitespace: str = ""
+    expanded: bool = False
+    last: bool = False
+
+    @property
+    def expandable(self) -> bool:
+        """Check if the line may be expanded."""
+        return bool(self.node is not None and self.node.children)
+
+    def check_length(self, max_length: int) -> bool:
+        """Check this line fits within a given number of cells."""
+        start_length = (
+            len(self.whitespace) + cell_len(self.text) + cell_len(self.suffix)
+        )
+        assert self.node is not None
+        return self.node.check_length(start_length, max_length)
+
+    def expand(self, indent_size: int) -> Iterable["_Line"]:
+        """Expand this line by adding children on their own line."""
+        node = self.node
+        assert node is not None
+        whitespace = self.whitespace
+        assert node.children
+        if node.key_repr:
+            new_line = yield _Line(
+                text=f"{node.key_repr}{node.key_separator}{node.open_brace}",
+                whitespace=whitespace,
+            )
+        else:
+            new_line = yield _Line(text=node.open_brace, whitespace=whitespace)
+        child_whitespace = self.whitespace + " " * indent_size
+        tuple_of_one = node.is_tuple and len(node.children) == 1
+        for last, child in loop_last(node.children):
+            separator = "," if tuple_of_one else node.separator
+            line = _Line(
+                parent=new_line,
+                node=child,
+                whitespace=child_whitespace,
+                suffix=separator,
+                last=last and not tuple_of_one,
+            )
+            yield line
+
+        yield _Line(
+            text=node.close_brace,
+            whitespace=whitespace,
+            suffix=self.suffix,
+            last=self.last,
+        )
+
+    def __str__(self) -> str:
+        if self.last:
+            return f"{self.whitespace}{self.text}{self.node or ''}"
+        else:
+            return (
+                f"{self.whitespace}{self.text}{self.node or ''}{self.suffix.rstrip()}"
+            )
+
+
+def _is_namedtuple(obj: Any) -> bool:
+    """Checks if an object is most likely a namedtuple. It is possible
+    to craft an object that passes this check and isn't a namedtuple, but
+    there is only a minuscule chance of this happening unintentionally.
+
+    Args:
+        obj (Any): The object to test
+
+    Returns:
+        bool: True if the object is a namedtuple. False otherwise.
+    """
+    try:
+        fields = getattr(obj, "_fields", None)
+    except Exception:
+        # Being very defensive - if we cannot get the attr then its not a namedtuple
+        return False
+    return isinstance(obj, tuple) and isinstance(fields, tuple)
+
+
+def traverse(
+    _object: Any,
+    max_length: Optional[int] = None,
+    max_string: Optional[int] = None,
+    max_depth: Optional[int] = None,
+) -> Node:
+    """Traverse object and generate a tree.
+
+    Args:
+        _object (Any): Object to be traversed.
+        max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
+            Defaults to None.
+        max_string (int, optional): Maximum length of string before truncating, or None to disable truncating.
+            Defaults to None.
+        max_depth (int, optional): Maximum depth of data structures, or None for no maximum.
+            Defaults to None.
+
+    Returns:
+        Node: The root of a tree structure which can be used to render a pretty repr.
+    """
+
+    def to_repr(obj: Any) -> str:
+        """Get repr string for an object, but catch errors."""
+        if (
+            max_string is not None
+            and _safe_isinstance(obj, (bytes, str))
+            and len(obj) > max_string
+        ):
+            truncated = len(obj) - max_string
+            obj_repr = f"{obj[:max_string]!r}+{truncated}"
+        else:
+            try:
+                obj_repr = repr(obj)
+            except Exception as error:
+                obj_repr = f""
+        return obj_repr
+
+    visited_ids: Set[int] = set()
+    push_visited = visited_ids.add
+    pop_visited = visited_ids.remove
+
+    def _traverse(obj: Any, root: bool = False, depth: int = 0) -> Node:
+        """Walk the object depth first."""
+
+        obj_id = id(obj)
+        if obj_id in visited_ids:
+            # Recursion detected
+            return Node(value_repr="...")
+
+        obj_type = type(obj)
+        children: List[Node]
+        reached_max_depth = max_depth is not None and depth >= max_depth
+
+        def iter_rich_args(rich_args: Any) -> Iterable[Union[Any, Tuple[str, Any]]]:
+            for arg in rich_args:
+                if _safe_isinstance(arg, tuple):
+                    if len(arg) == 3:
+                        key, child, default = arg
+                        if default == child:
+                            continue
+                        yield key, child
+                    elif len(arg) == 2:
+                        key, child = arg
+                        yield key, child
+                    elif len(arg) == 1:
+                        yield arg[0]
+                else:
+                    yield arg
+
+        try:
+            fake_attributes = hasattr(
+                obj, "awehoi234_wdfjwljet234_234wdfoijsdfmmnxpi492"
+            )
+        except Exception:
+            fake_attributes = False
+
+        rich_repr_result: Optional[RichReprResult] = None
+        if not fake_attributes:
+            try:
+                if hasattr(obj, "__rich_repr__") and not isclass(obj):
+                    rich_repr_result = obj.__rich_repr__()
+            except Exception:
+                pass
+
+        if rich_repr_result is not None:
+            push_visited(obj_id)
+            angular = getattr(obj.__rich_repr__, "angular", False)
+            args = list(iter_rich_args(rich_repr_result))
+            class_name = obj.__class__.__name__
+
+            if args:
+                children = []
+                append = children.append
+
+                if reached_max_depth:
+                    if angular:
+                        node = Node(value_repr=f"<{class_name}...>")
+                    else:
+                        node = Node(value_repr=f"{class_name}(...)")
+                else:
+                    if angular:
+                        node = Node(
+                            open_brace=f"<{class_name} ",
+                            close_brace=">",
+                            children=children,
+                            last=root,
+                            separator=" ",
+                        )
+                    else:
+                        node = Node(
+                            open_brace=f"{class_name}(",
+                            close_brace=")",
+                            children=children,
+                            last=root,
+                        )
+                    for last, arg in loop_last(args):
+                        if _safe_isinstance(arg, tuple):
+                            key, child = arg
+                            child_node = _traverse(child, depth=depth + 1)
+                            child_node.last = last
+                            child_node.key_repr = key
+                            child_node.key_separator = "="
+                            append(child_node)
+                        else:
+                            child_node = _traverse(arg, depth=depth + 1)
+                            child_node.last = last
+                            append(child_node)
+            else:
+                node = Node(
+                    value_repr=f"<{class_name}>" if angular else f"{class_name}()",
+                    children=[],
+                    last=root,
+                )
+            pop_visited(obj_id)
+        elif _is_attr_object(obj) and not fake_attributes:
+            push_visited(obj_id)
+            children = []
+            append = children.append
+
+            attr_fields = _get_attr_fields(obj)
+            if attr_fields:
+                if reached_max_depth:
+                    node = Node(value_repr=f"{obj.__class__.__name__}(...)")
+                else:
+                    node = Node(
+                        open_brace=f"{obj.__class__.__name__}(",
+                        close_brace=")",
+                        children=children,
+                        last=root,
+                    )
+
+                    def iter_attrs() -> (
+                        Iterable[Tuple[str, Any, Optional[Callable[[Any], str]]]]
+                    ):
+                        """Iterate over attr fields and values."""
+                        for attr in attr_fields:
+                            if attr.repr:
+                                try:
+                                    value = getattr(obj, attr.name)
+                                except Exception as error:
+                                    # Can happen, albeit rarely
+                                    yield (attr.name, error, None)
+                                else:
+                                    yield (
+                                        attr.name,
+                                        value,
+                                        attr.repr if callable(attr.repr) else None,
+                                    )
+
+                    for last, (name, value, repr_callable) in loop_last(iter_attrs()):
+                        if repr_callable:
+                            child_node = Node(value_repr=str(repr_callable(value)))
+                        else:
+                            child_node = _traverse(value, depth=depth + 1)
+                        child_node.last = last
+                        child_node.key_repr = name
+                        child_node.key_separator = "="
+                        append(child_node)
+            else:
+                node = Node(
+                    value_repr=f"{obj.__class__.__name__}()", children=[], last=root
+                )
+            pop_visited(obj_id)
+        elif (
+            is_dataclass(obj)
+            and not _safe_isinstance(obj, type)
+            and not fake_attributes
+            and _is_dataclass_repr(obj)
+        ):
+            push_visited(obj_id)
+            children = []
+            append = children.append
+            if reached_max_depth:
+                node = Node(value_repr=f"{obj.__class__.__name__}(...)")
+            else:
+                node = Node(
+                    open_brace=f"{obj.__class__.__name__}(",
+                    close_brace=")",
+                    children=children,
+                    last=root,
+                    empty=f"{obj.__class__.__name__}()",
+                )
+
+                for last, field in loop_last(
+                    field
+                    for field in fields(obj)
+                    if field.repr and hasattr(obj, field.name)
+                ):
+                    child_node = _traverse(getattr(obj, field.name), depth=depth + 1)
+                    child_node.key_repr = field.name
+                    child_node.last = last
+                    child_node.key_separator = "="
+                    append(child_node)
+
+            pop_visited(obj_id)
+        elif _is_namedtuple(obj) and _has_default_namedtuple_repr(obj):
+            push_visited(obj_id)
+            class_name = obj.__class__.__name__
+            if reached_max_depth:
+                # If we've reached the max depth, we still show the class name, but not its contents
+                node = Node(
+                    value_repr=f"{class_name}(...)",
+                )
+            else:
+                children = []
+                append = children.append
+                node = Node(
+                    open_brace=f"{class_name}(",
+                    close_brace=")",
+                    children=children,
+                    empty=f"{class_name}()",
+                )
+                for last, (key, value) in loop_last(obj._asdict().items()):
+                    child_node = _traverse(value, depth=depth + 1)
+                    child_node.key_repr = key
+                    child_node.last = last
+                    child_node.key_separator = "="
+                    append(child_node)
+            pop_visited(obj_id)
+        elif _safe_isinstance(obj, _CONTAINERS):
+            for container_type in _CONTAINERS:
+                if _safe_isinstance(obj, container_type):
+                    obj_type = container_type
+                    break
+
+            push_visited(obj_id)
+
+            open_brace, close_brace, empty = _BRACES[obj_type](obj)
+
+            if reached_max_depth:
+                node = Node(value_repr=f"{open_brace}...{close_brace}")
+            elif obj_type.__repr__ != type(obj).__repr__:
+                node = Node(value_repr=to_repr(obj), last=root)
+            elif obj:
+                children = []
+                node = Node(
+                    open_brace=open_brace,
+                    close_brace=close_brace,
+                    children=children,
+                    last=root,
+                )
+                append = children.append
+                num_items = len(obj)
+                last_item_index = num_items - 1
+
+                if _safe_isinstance(obj, _MAPPING_CONTAINERS):
+                    iter_items = iter(obj.items())
+                    if max_length is not None:
+                        iter_items = islice(iter_items, max_length)
+                    for index, (key, child) in enumerate(iter_items):
+                        child_node = _traverse(child, depth=depth + 1)
+                        child_node.key_repr = to_repr(key)
+                        child_node.last = index == last_item_index
+                        append(child_node)
+                else:
+                    iter_values = iter(obj)
+                    if max_length is not None:
+                        iter_values = islice(iter_values, max_length)
+                    for index, child in enumerate(iter_values):
+                        child_node = _traverse(child, depth=depth + 1)
+                        child_node.last = index == last_item_index
+                        append(child_node)
+                if max_length is not None and num_items > max_length:
+                    append(Node(value_repr=f"... +{num_items - max_length}", last=True))
+            else:
+                node = Node(empty=empty, children=[], last=root)
+
+            pop_visited(obj_id)
+        else:
+            node = Node(value_repr=to_repr(obj), last=root)
+        node.is_tuple = type(obj) == tuple
+        node.is_namedtuple = _is_namedtuple(obj)
+        return node
+
+    node = _traverse(_object, root=True)
+    return node
+
+
+def pretty_repr(
+    _object: Any,
+    *,
+    max_width: int = 80,
+    indent_size: int = 4,
+    max_length: Optional[int] = None,
+    max_string: Optional[int] = None,
+    max_depth: Optional[int] = None,
+    expand_all: bool = False,
+) -> str:
+    """Prettify repr string by expanding on to new lines to fit within a given width.
+
+    Args:
+        _object (Any): Object to repr.
+        max_width (int, optional): Desired maximum width of repr string. Defaults to 80.
+        indent_size (int, optional): Number of spaces to indent. Defaults to 4.
+        max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
+            Defaults to None.
+        max_string (int, optional): Maximum length of string before truncating, or None to disable truncating.
+            Defaults to None.
+        max_depth (int, optional): Maximum depth of nested data structure, or None for no depth.
+            Defaults to None.
+        expand_all (bool, optional): Expand all containers regardless of available width. Defaults to False.
+
+    Returns:
+        str: A possibly multi-line representation of the object.
+    """
+
+    if _safe_isinstance(_object, Node):
+        node = _object
+    else:
+        node = traverse(
+            _object, max_length=max_length, max_string=max_string, max_depth=max_depth
+        )
+    repr_str: str = node.render(
+        max_width=max_width, indent_size=indent_size, expand_all=expand_all
+    )
+    return repr_str
+
+
+def pprint(
+    _object: Any,
+    *,
+    console: Optional["Console"] = None,
+    indent_guides: bool = True,
+    max_length: Optional[int] = None,
+    max_string: Optional[int] = None,
+    max_depth: Optional[int] = None,
+    expand_all: bool = False,
+) -> None:
+    """A convenience function for pretty printing.
+
+    Args:
+        _object (Any): Object to pretty print.
+        console (Console, optional): Console instance, or None to use default. Defaults to None.
+        max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
+            Defaults to None.
+        max_string (int, optional): Maximum length of strings before truncating, or None to disable. Defaults to None.
+        max_depth (int, optional): Maximum depth for nested data structures, or None for unlimited depth. Defaults to None.
+        indent_guides (bool, optional): Enable indentation guides. Defaults to True.
+        expand_all (bool, optional): Expand all containers. Defaults to False.
+    """
+    _console = get_console() if console is None else console
+    _console.print(
+        Pretty(
+            _object,
+            max_length=max_length,
+            max_string=max_string,
+            max_depth=max_depth,
+            indent_guides=indent_guides,
+            expand_all=expand_all,
+            overflow="ignore",
+        ),
+        soft_wrap=True,
+    )
+
+
+if __name__ == "__main__":  # pragma: no cover
+
+    class BrokenRepr:
+        def __repr__(self) -> str:
+            1 / 0
+            return "this will fail"
+
+    from typing import NamedTuple
+
+    class StockKeepingUnit(NamedTuple):
+        name: str
+        description: str
+        price: float
+        category: str
+        reviews: List[str]
+
+    d = defaultdict(int)
+    d["foo"] = 5
+    data = {
+        "foo": [
+            1,
+            "Hello World!",
+            100.123,
+            323.232,
+            432324.0,
+            {5, 6, 7, (1, 2, 3, 4), 8},
+        ],
+        "bar": frozenset({1, 2, 3}),
+        "defaultdict": defaultdict(
+            list, {"crumble": ["apple", "rhubarb", "butter", "sugar", "flour"]}
+        ),
+        "counter": Counter(
+            [
+                "apple",
+                "orange",
+                "pear",
+                "kumquat",
+                "kumquat",
+                "durian" * 100,
+            ]
+        ),
+        "atomic": (False, True, None),
+        "namedtuple": StockKeepingUnit(
+            "Sparkling British Spring Water",
+            "Carbonated spring water",
+            0.9,
+            "water",
+            ["its amazing!", "its terrible!"],
+        ),
+        "Broken": BrokenRepr(),
+    }
+    data["foo"].append(data)  # type: ignore[attr-defined]
+
+    from rich import print
+
+    print(Pretty(data, indent_guides=True, max_string=20))
+
+    class Thing:
+        def __repr__(self) -> str:
+            return "Hello\x1b[38;5;239m World!"
+
+    print(Pretty(Thing()))
diff --git a/local_packages/rich/progress.py b/local_packages/rich/progress.py
new file mode 100644
index 0000000..c2de125
--- /dev/null
+++ b/local_packages/rich/progress.py
@@ -0,0 +1,1716 @@
+from __future__ import annotations
+
+import io
+import typing
+import warnings
+from abc import ABC, abstractmethod
+from collections import deque
+from dataclasses import dataclass, field
+from datetime import timedelta
+from io import RawIOBase, UnsupportedOperation
+from math import ceil
+from mmap import mmap
+from operator import length_hint
+from os import PathLike, stat
+from threading import Event, RLock, Thread
+from types import TracebackType
+from typing import (
+    TYPE_CHECKING,
+    Any,
+    BinaryIO,
+    Callable,
+    ContextManager,
+    Deque,
+    Dict,
+    Generic,
+    Iterable,
+    List,
+    Literal,
+    NamedTuple,
+    NewType,
+    Optional,
+    TextIO,
+    Tuple,
+    Type,
+    TypeVar,
+    Union,
+)
+
+if TYPE_CHECKING:
+    # Can be replaced with `from typing import Self` in Python 3.11+
+    from typing_extensions import Self  # pragma: no cover
+
+from . import filesize, get_console
+from .console import Console, Group, JustifyMethod, RenderableType
+from .highlighter import Highlighter
+from .jupyter import JupyterMixin
+from .live import Live
+from .progress_bar import ProgressBar
+from .spinner import Spinner
+from .style import StyleType
+from .table import Column, Table
+from .text import Text, TextType
+
+TaskID = NewType("TaskID", int)
+
+ProgressType = TypeVar("ProgressType")
+
+GetTimeCallable = Callable[[], float]
+
+
+_I = typing.TypeVar("_I", TextIO, BinaryIO)
+
+
+class _TrackThread(Thread):
+    """A thread to periodically update progress."""
+
+    def __init__(self, progress: "Progress", task_id: "TaskID", update_period: float):
+        self.progress = progress
+        self.task_id = task_id
+        self.update_period = update_period
+        self.done = Event()
+
+        self.completed = 0
+        super().__init__(daemon=True)
+
+    def run(self) -> None:
+        task_id = self.task_id
+        advance = self.progress.advance
+        update_period = self.update_period
+        last_completed = 0
+        wait = self.done.wait
+        while not wait(update_period) and self.progress.live.is_started:
+            completed = self.completed
+            if last_completed != completed:
+                advance(task_id, completed - last_completed)
+                last_completed = completed
+
+        self.progress.update(self.task_id, completed=self.completed, refresh=True)
+
+    def __enter__(self) -> "_TrackThread":
+        self.start()
+        return self
+
+    def __exit__(
+        self,
+        exc_type: Optional[Type[BaseException]],
+        exc_val: Optional[BaseException],
+        exc_tb: Optional[TracebackType],
+    ) -> None:
+        self.done.set()
+        self.join()
+
+
+def track(
+    sequence: Iterable[ProgressType],
+    description: str = "Working...",
+    total: Optional[float] = None,
+    completed: int = 0,
+    auto_refresh: bool = True,
+    console: Optional[Console] = None,
+    transient: bool = False,
+    get_time: Optional[Callable[[], float]] = None,
+    refresh_per_second: float = 10,
+    style: StyleType = "bar.back",
+    complete_style: StyleType = "bar.complete",
+    finished_style: StyleType = "bar.finished",
+    pulse_style: StyleType = "bar.pulse",
+    update_period: float = 0.1,
+    disable: bool = False,
+    show_speed: bool = True,
+) -> Iterable[ProgressType]:
+    """Track progress by iterating over a sequence.
+
+    You can also track progress of an iterable, which might require that you additionally specify ``total``.
+
+    Args:
+        sequence (Iterable[ProgressType]): Values you wish to iterate over and track progress.
+        description (str, optional): Description of task show next to progress bar. Defaults to "Working".
+        total: (float, optional): Total number of steps. Default is len(sequence).
+        completed (int, optional): Number of steps completed so far. Defaults to 0.
+        auto_refresh (bool, optional): Automatic refresh, disable to force a refresh after each iteration. Default is True.
+        transient: (bool, optional): Clear the progress on exit. Defaults to False.
+        console (Console, optional): Console to write to. Default creates internal Console instance.
+        refresh_per_second (float): Number of times per second to refresh the progress information. Defaults to 10.
+        style (StyleType, optional): Style for the bar background. Defaults to "bar.back".
+        complete_style (StyleType, optional): Style for the completed bar. Defaults to "bar.complete".
+        finished_style (StyleType, optional): Style for a finished bar. Defaults to "bar.finished".
+        pulse_style (StyleType, optional): Style for pulsing bars. Defaults to "bar.pulse".
+        update_period (float, optional): Minimum time (in seconds) between calls to update(). Defaults to 0.1.
+        disable (bool, optional): Disable display of progress.
+        show_speed (bool, optional): Show speed if total isn't known. Defaults to True.
+    Returns:
+        Iterable[ProgressType]: An iterable of the values in the sequence.
+
+    """
+
+    columns: List["ProgressColumn"] = (
+        [TextColumn("[progress.description]{task.description}")] if description else []
+    )
+    columns.extend(
+        (
+            BarColumn(
+                style=style,
+                complete_style=complete_style,
+                finished_style=finished_style,
+                pulse_style=pulse_style,
+            ),
+            TaskProgressColumn(show_speed=show_speed),
+            TimeRemainingColumn(elapsed_when_finished=True),
+        )
+    )
+    progress = Progress(
+        *columns,
+        auto_refresh=auto_refresh,
+        console=console,
+        transient=transient,
+        get_time=get_time,
+        refresh_per_second=refresh_per_second or 10,
+        disable=disable,
+    )
+
+    with progress:
+        yield from progress.track(
+            sequence,
+            total=total,
+            completed=completed,
+            description=description,
+            update_period=update_period,
+        )
+
+
+class _Reader(RawIOBase, BinaryIO):
+    """A reader that tracks progress while it's being read from."""
+
+    def __init__(
+        self,
+        handle: BinaryIO,
+        progress: "Progress",
+        task: TaskID,
+        close_handle: bool = True,
+    ) -> None:
+        self.handle = handle
+        self.progress = progress
+        self.task = task
+        self.close_handle = close_handle
+        self._closed = False
+
+    def __enter__(self) -> "_Reader":
+        self.handle.__enter__()
+        return self
+
+    def __exit__(
+        self,
+        exc_type: Optional[Type[BaseException]],
+        exc_val: Optional[BaseException],
+        exc_tb: Optional[TracebackType],
+    ) -> None:
+        self.close()
+
+    def __iter__(self) -> BinaryIO:
+        return self
+
+    def __next__(self) -> bytes:
+        line = next(self.handle)
+        self.progress.advance(self.task, advance=len(line))
+        return line
+
+    @property
+    def closed(self) -> bool:
+        return self._closed
+
+    def fileno(self) -> int:
+        return self.handle.fileno()
+
+    def isatty(self) -> bool:
+        return self.handle.isatty()
+
+    @property
+    def mode(self) -> str:
+        return self.handle.mode
+
+    @property
+    def name(self) -> str:
+        return self.handle.name
+
+    def readable(self) -> bool:
+        return self.handle.readable()
+
+    def seekable(self) -> bool:
+        return self.handle.seekable()
+
+    def writable(self) -> bool:
+        return False
+
+    def read(self, size: int = -1) -> bytes:
+        block = self.handle.read(size)
+        self.progress.advance(self.task, advance=len(block))
+        return block
+
+    def readinto(self, b: Union[bytearray, memoryview, mmap]):  # type: ignore[no-untyped-def, override]
+        n = self.handle.readinto(b)  # type: ignore[attr-defined]
+        self.progress.advance(self.task, advance=n)
+        return n
+
+    def readline(self, size: int = -1) -> bytes:  # type: ignore[override]
+        line = self.handle.readline(size)
+        self.progress.advance(self.task, advance=len(line))
+        return line
+
+    def readlines(self, hint: int = -1) -> List[bytes]:
+        lines = self.handle.readlines(hint)
+        self.progress.advance(self.task, advance=sum(map(len, lines)))
+        return lines
+
+    def close(self) -> None:
+        if self.close_handle:
+            self.handle.close()
+        self._closed = True
+
+    def seek(self, offset: int, whence: int = 0) -> int:
+        pos = self.handle.seek(offset, whence)
+        self.progress.update(self.task, completed=pos)
+        return pos
+
+    def tell(self) -> int:
+        return self.handle.tell()
+
+    def write(self, s: Any) -> int:
+        raise UnsupportedOperation("write")
+
+    def writelines(self, lines: Iterable[Any]) -> None:
+        raise UnsupportedOperation("writelines")
+
+
+class _ReadContext(ContextManager[_I], Generic[_I]):
+    """A utility class to handle a context for both a reader and a progress."""
+
+    def __init__(self, progress: "Progress", reader: _I) -> None:
+        self.progress = progress
+        self.reader: _I = reader
+
+    def __enter__(self) -> _I:
+        self.progress.start()
+        return self.reader.__enter__()
+
+    def __exit__(
+        self,
+        exc_type: Optional[Type[BaseException]],
+        exc_val: Optional[BaseException],
+        exc_tb: Optional[TracebackType],
+    ) -> None:
+        self.progress.stop()
+        self.reader.__exit__(exc_type, exc_val, exc_tb)
+
+
+def wrap_file(
+    file: BinaryIO,
+    total: int,
+    *,
+    description: str = "Reading...",
+    auto_refresh: bool = True,
+    console: Optional[Console] = None,
+    transient: bool = False,
+    get_time: Optional[Callable[[], float]] = None,
+    refresh_per_second: float = 10,
+    style: StyleType = "bar.back",
+    complete_style: StyleType = "bar.complete",
+    finished_style: StyleType = "bar.finished",
+    pulse_style: StyleType = "bar.pulse",
+    disable: bool = False,
+) -> ContextManager[BinaryIO]:
+    """Read bytes from a file while tracking progress.
+
+    Args:
+        file (Union[str, PathLike[str], BinaryIO]): The path to the file to read, or a file-like object in binary mode.
+        total (int): Total number of bytes to read.
+        description (str, optional): Description of task show next to progress bar. Defaults to "Reading".
+        auto_refresh (bool, optional): Automatic refresh, disable to force a refresh after each iteration. Default is True.
+        transient: (bool, optional): Clear the progress on exit. Defaults to False.
+        console (Console, optional): Console to write to. Default creates internal Console instance.
+        refresh_per_second (float): Number of times per second to refresh the progress information. Defaults to 10.
+        style (StyleType, optional): Style for the bar background. Defaults to "bar.back".
+        complete_style (StyleType, optional): Style for the completed bar. Defaults to "bar.complete".
+        finished_style (StyleType, optional): Style for a finished bar. Defaults to "bar.finished".
+        pulse_style (StyleType, optional): Style for pulsing bars. Defaults to "bar.pulse".
+        disable (bool, optional): Disable display of progress.
+    Returns:
+        ContextManager[BinaryIO]: A context manager yielding a progress reader.
+
+    """
+
+    columns: List["ProgressColumn"] = (
+        [TextColumn("[progress.description]{task.description}")] if description else []
+    )
+    columns.extend(
+        (
+            BarColumn(
+                style=style,
+                complete_style=complete_style,
+                finished_style=finished_style,
+                pulse_style=pulse_style,
+            ),
+            DownloadColumn(),
+            TimeRemainingColumn(),
+        )
+    )
+    progress = Progress(
+        *columns,
+        auto_refresh=auto_refresh,
+        console=console,
+        transient=transient,
+        get_time=get_time,
+        refresh_per_second=refresh_per_second or 10,
+        disable=disable,
+    )
+
+    reader = progress.wrap_file(file, total=total, description=description)
+    return _ReadContext(progress, reader)
+
+
+@typing.overload
+def open(
+    file: Union[str, "PathLike[str]", bytes],
+    mode: Union[Literal["rt"], Literal["r"]],
+    buffering: int = -1,
+    encoding: Optional[str] = None,
+    errors: Optional[str] = None,
+    newline: Optional[str] = None,
+    *,
+    total: Optional[int] = None,
+    description: str = "Reading...",
+    auto_refresh: bool = True,
+    console: Optional[Console] = None,
+    transient: bool = False,
+    get_time: Optional[Callable[[], float]] = None,
+    refresh_per_second: float = 10,
+    style: StyleType = "bar.back",
+    complete_style: StyleType = "bar.complete",
+    finished_style: StyleType = "bar.finished",
+    pulse_style: StyleType = "bar.pulse",
+    disable: bool = False,
+) -> ContextManager[TextIO]:
+    pass
+
+
+@typing.overload
+def open(
+    file: Union[str, "PathLike[str]", bytes],
+    mode: Literal["rb"],
+    buffering: int = -1,
+    encoding: Optional[str] = None,
+    errors: Optional[str] = None,
+    newline: Optional[str] = None,
+    *,
+    total: Optional[int] = None,
+    description: str = "Reading...",
+    auto_refresh: bool = True,
+    console: Optional[Console] = None,
+    transient: bool = False,
+    get_time: Optional[Callable[[], float]] = None,
+    refresh_per_second: float = 10,
+    style: StyleType = "bar.back",
+    complete_style: StyleType = "bar.complete",
+    finished_style: StyleType = "bar.finished",
+    pulse_style: StyleType = "bar.pulse",
+    disable: bool = False,
+) -> ContextManager[BinaryIO]:
+    pass
+
+
+def open(
+    file: Union[str, "PathLike[str]", bytes],
+    mode: Union[Literal["rb"], Literal["rt"], Literal["r"]] = "r",
+    buffering: int = -1,
+    encoding: Optional[str] = None,
+    errors: Optional[str] = None,
+    newline: Optional[str] = None,
+    *,
+    total: Optional[int] = None,
+    description: str = "Reading...",
+    auto_refresh: bool = True,
+    console: Optional[Console] = None,
+    transient: bool = False,
+    get_time: Optional[Callable[[], float]] = None,
+    refresh_per_second: float = 10,
+    style: StyleType = "bar.back",
+    complete_style: StyleType = "bar.complete",
+    finished_style: StyleType = "bar.finished",
+    pulse_style: StyleType = "bar.pulse",
+    disable: bool = False,
+) -> Union[ContextManager[BinaryIO], ContextManager[TextIO]]:
+    """Read bytes from a file while tracking progress.
+
+    Args:
+        path (Union[str, PathLike[str], BinaryIO]): The path to the file to read, or a file-like object in binary mode.
+        mode (str): The mode to use to open the file. Only supports "r", "rb" or "rt".
+        buffering (int): The buffering strategy to use, see :func:`io.open`.
+        encoding (str, optional): The encoding to use when reading in text mode, see :func:`io.open`.
+        errors (str, optional): The error handling strategy for decoding errors, see :func:`io.open`.
+        newline (str, optional): The strategy for handling newlines in text mode, see :func:`io.open`
+        total: (int, optional): Total number of bytes to read. Must be provided if reading from a file handle. Default for a path is os.stat(file).st_size.
+        description (str, optional): Description of task show next to progress bar. Defaults to "Reading".
+        auto_refresh (bool, optional): Automatic refresh, disable to force a refresh after each iteration. Default is True.
+        transient: (bool, optional): Clear the progress on exit. Defaults to False.
+        console (Console, optional): Console to write to. Default creates internal Console instance.
+        refresh_per_second (float): Number of times per second to refresh the progress information. Defaults to 10.
+        style (StyleType, optional): Style for the bar background. Defaults to "bar.back".
+        complete_style (StyleType, optional): Style for the completed bar. Defaults to "bar.complete".
+        finished_style (StyleType, optional): Style for a finished bar. Defaults to "bar.finished".
+        pulse_style (StyleType, optional): Style for pulsing bars. Defaults to "bar.pulse".
+        disable (bool, optional): Disable display of progress.
+        encoding (str, optional): The encoding to use when reading in text mode.
+
+    Returns:
+        ContextManager[BinaryIO]: A context manager yielding a progress reader.
+
+    """
+
+    columns: List["ProgressColumn"] = (
+        [TextColumn("[progress.description]{task.description}")] if description else []
+    )
+    columns.extend(
+        (
+            BarColumn(
+                style=style,
+                complete_style=complete_style,
+                finished_style=finished_style,
+                pulse_style=pulse_style,
+            ),
+            DownloadColumn(),
+            TimeRemainingColumn(),
+        )
+    )
+    progress = Progress(
+        *columns,
+        auto_refresh=auto_refresh,
+        console=console,
+        transient=transient,
+        get_time=get_time,
+        refresh_per_second=refresh_per_second or 10,
+        disable=disable,
+    )
+
+    reader = progress.open(
+        file,
+        mode=mode,
+        buffering=buffering,
+        encoding=encoding,
+        errors=errors,
+        newline=newline,
+        total=total,
+        description=description,
+    )
+    return _ReadContext(progress, reader)  # type: ignore[return-value, type-var]
+
+
+class ProgressColumn(ABC):
+    """Base class for a widget to use in progress display."""
+
+    max_refresh: Optional[float] = None
+
+    def __init__(self, table_column: Optional[Column] = None) -> None:
+        self._table_column = table_column
+        self._renderable_cache: Dict[TaskID, Tuple[float, RenderableType]] = {}
+        self._update_time: Optional[float] = None
+
+    def get_table_column(self) -> Column:
+        """Get a table column, used to build tasks table."""
+        return self._table_column or Column()
+
+    def __call__(self, task: "Task") -> RenderableType:
+        """Called by the Progress object to return a renderable for the given task.
+
+        Args:
+            task (Task): An object containing information regarding the task.
+
+        Returns:
+            RenderableType: Anything renderable (including str).
+        """
+        current_time = task.get_time()
+        if self.max_refresh is not None and not task.completed:
+            try:
+                timestamp, renderable = self._renderable_cache[task.id]
+            except KeyError:
+                pass
+            else:
+                if timestamp + self.max_refresh > current_time:
+                    return renderable
+
+        renderable = self.render(task)
+        self._renderable_cache[task.id] = (current_time, renderable)
+        return renderable
+
+    @abstractmethod
+    def render(self, task: "Task") -> RenderableType:
+        """Should return a renderable object."""
+
+
+class RenderableColumn(ProgressColumn):
+    """A column to insert an arbitrary column.
+
+    Args:
+        renderable (RenderableType, optional): Any renderable. Defaults to empty string.
+    """
+
+    def __init__(
+        self, renderable: RenderableType = "", *, table_column: Optional[Column] = None
+    ):
+        self.renderable = renderable
+        super().__init__(table_column=table_column)
+
+    def render(self, task: "Task") -> RenderableType:
+        return self.renderable
+
+
+class SpinnerColumn(ProgressColumn):
+    """A column with a 'spinner' animation.
+
+    Args:
+        spinner_name (str, optional): Name of spinner animation. Defaults to "dots".
+        style (StyleType, optional): Style of spinner. Defaults to "progress.spinner".
+        speed (float, optional): Speed factor of spinner. Defaults to 1.0.
+        finished_text (TextType, optional): Text used when task is finished. Defaults to " ".
+    """
+
+    def __init__(
+        self,
+        spinner_name: str = "dots",
+        style: Optional[StyleType] = "progress.spinner",
+        speed: float = 1.0,
+        finished_text: TextType = " ",
+        table_column: Optional[Column] = None,
+    ):
+        self.spinner = Spinner(spinner_name, style=style, speed=speed)
+        self.finished_text = (
+            Text.from_markup(finished_text)
+            if isinstance(finished_text, str)
+            else finished_text
+        )
+        super().__init__(table_column=table_column)
+
+    def set_spinner(
+        self,
+        spinner_name: str,
+        spinner_style: Optional[StyleType] = "progress.spinner",
+        speed: float = 1.0,
+    ) -> None:
+        """Set a new spinner.
+
+        Args:
+            spinner_name (str): Spinner name, see python -m rich.spinner.
+            spinner_style (Optional[StyleType], optional): Spinner style. Defaults to "progress.spinner".
+            speed (float, optional): Speed factor of spinner. Defaults to 1.0.
+        """
+        self.spinner = Spinner(spinner_name, style=spinner_style, speed=speed)
+
+    def render(self, task: "Task") -> RenderableType:
+        text = (
+            self.finished_text
+            if task.finished
+            else self.spinner.render(task.get_time())
+        )
+        return text
+
+
+class TextColumn(ProgressColumn):
+    """A column containing text."""
+
+    def __init__(
+        self,
+        text_format: str,
+        style: StyleType = "none",
+        justify: JustifyMethod = "left",
+        markup: bool = True,
+        highlighter: Optional[Highlighter] = None,
+        table_column: Optional[Column] = None,
+    ) -> None:
+        self.text_format = text_format
+        self.justify: JustifyMethod = justify
+        self.style = style
+        self.markup = markup
+        self.highlighter = highlighter
+        super().__init__(table_column=table_column or Column(no_wrap=True))
+
+    def render(self, task: "Task") -> Text:
+        _text = self.text_format.format(task=task)
+        if self.markup:
+            text = Text.from_markup(_text, style=self.style, justify=self.justify)
+        else:
+            text = Text(_text, style=self.style, justify=self.justify)
+        if self.highlighter:
+            self.highlighter.highlight(text)
+        return text
+
+
+class BarColumn(ProgressColumn):
+    """Renders a visual progress bar.
+
+    Args:
+        bar_width (Optional[int], optional): Width of bar or None for full width. Defaults to 40.
+        style (StyleType, optional): Style for the bar background. Defaults to "bar.back".
+        complete_style (StyleType, optional): Style for the completed bar. Defaults to "bar.complete".
+        finished_style (StyleType, optional): Style for a finished bar. Defaults to "bar.finished".
+        pulse_style (StyleType, optional): Style for pulsing bars. Defaults to "bar.pulse".
+    """
+
+    def __init__(
+        self,
+        bar_width: Optional[int] = 40,
+        style: StyleType = "bar.back",
+        complete_style: StyleType = "bar.complete",
+        finished_style: StyleType = "bar.finished",
+        pulse_style: StyleType = "bar.pulse",
+        table_column: Optional[Column] = None,
+    ) -> None:
+        self.bar_width = bar_width
+        self.style = style
+        self.complete_style = complete_style
+        self.finished_style = finished_style
+        self.pulse_style = pulse_style
+        super().__init__(table_column=table_column)
+
+    def render(self, task: "Task") -> ProgressBar:
+        """Gets a progress bar widget for a task."""
+        return ProgressBar(
+            total=max(0, task.total) if task.total is not None else None,
+            completed=max(0, task.completed),
+            width=None if self.bar_width is None else max(1, self.bar_width),
+            pulse=not task.started,
+            animation_time=task.get_time(),
+            style=self.style,
+            complete_style=self.complete_style,
+            finished_style=self.finished_style,
+            pulse_style=self.pulse_style,
+        )
+
+
+class TimeElapsedColumn(ProgressColumn):
+    """Renders time elapsed."""
+
+    def render(self, task: "Task") -> Text:
+        """Show time elapsed."""
+        elapsed = task.finished_time if task.finished else task.elapsed
+        if elapsed is None:
+            return Text("-:--:--", style="progress.elapsed")
+        delta = timedelta(seconds=max(0, int(elapsed)))
+        return Text(str(delta), style="progress.elapsed")
+
+
+class TaskProgressColumn(TextColumn):
+    """Show task progress as a percentage.
+
+    Args:
+        text_format (str, optional): Format for percentage display. Defaults to "[progress.percentage]{task.percentage:>3.0f}%".
+        text_format_no_percentage (str, optional): Format if percentage is unknown. Defaults to "".
+        style (StyleType, optional): Style of output. Defaults to "none".
+        justify (JustifyMethod, optional): Text justification. Defaults to "left".
+        markup (bool, optional): Enable markup. Defaults to True.
+        highlighter (Optional[Highlighter], optional): Highlighter to apply to output. Defaults to None.
+        table_column (Optional[Column], optional): Table Column to use. Defaults to None.
+        show_speed (bool, optional): Show speed if total is unknown. Defaults to False.
+    """
+
+    def __init__(
+        self,
+        text_format: str = "[progress.percentage]{task.percentage:>3.0f}%",
+        text_format_no_percentage: str = "",
+        style: StyleType = "none",
+        justify: JustifyMethod = "left",
+        markup: bool = True,
+        highlighter: Optional[Highlighter] = None,
+        table_column: Optional[Column] = None,
+        show_speed: bool = False,
+    ) -> None:
+        self.text_format_no_percentage = text_format_no_percentage
+        self.show_speed = show_speed
+        super().__init__(
+            text_format=text_format,
+            style=style,
+            justify=justify,
+            markup=markup,
+            highlighter=highlighter,
+            table_column=table_column,
+        )
+
+    @classmethod
+    def render_speed(cls, speed: Optional[float]) -> Text:
+        """Render the speed in iterations per second.
+
+        Args:
+            task (Task): A Task object.
+
+        Returns:
+            Text: Text object containing the task speed.
+        """
+        if speed is None:
+            return Text("", style="progress.percentage")
+        unit, suffix = filesize.pick_unit_and_suffix(
+            int(speed),
+            ["", "×10³", "×10⁶", "×10⁹", "×10¹²"],
+            1000,
+        )
+        data_speed = speed / unit
+        return Text(f"{data_speed:.1f}{suffix} it/s", style="progress.percentage")
+
+    def render(self, task: "Task") -> Text:
+        if task.total is None and self.show_speed:
+            return self.render_speed(task.finished_speed or task.speed)
+        text_format = (
+            self.text_format_no_percentage if task.total is None else self.text_format
+        )
+        _text = text_format.format(task=task)
+        if self.markup:
+            text = Text.from_markup(_text, style=self.style, justify=self.justify)
+        else:
+            text = Text(_text, style=self.style, justify=self.justify)
+        if self.highlighter:
+            self.highlighter.highlight(text)
+        return text
+
+
+class TimeRemainingColumn(ProgressColumn):
+    """Renders estimated time remaining.
+
+    Args:
+        compact (bool, optional): Render MM:SS when time remaining is less than an hour. Defaults to False.
+        elapsed_when_finished (bool, optional): Render time elapsed when the task is finished. Defaults to False.
+    """
+
+    # Only refresh twice a second to prevent jitter
+    max_refresh = 0.5
+
+    def __init__(
+        self,
+        compact: bool = False,
+        elapsed_when_finished: bool = False,
+        table_column: Optional[Column] = None,
+    ):
+        self.compact = compact
+        self.elapsed_when_finished = elapsed_when_finished
+        super().__init__(table_column=table_column)
+
+    def render(self, task: "Task") -> Text:
+        """Show time remaining."""
+        if self.elapsed_when_finished and task.finished:
+            task_time = task.finished_time
+            style = "progress.elapsed"
+        else:
+            task_time = task.time_remaining
+            style = "progress.remaining"
+
+        if task.total is None:
+            return Text("", style=style)
+
+        if task_time is None:
+            return Text("--:--" if self.compact else "-:--:--", style=style)
+
+        # Based on https://github.com/tqdm/tqdm/blob/master/tqdm/std.py
+        minutes, seconds = divmod(int(task_time), 60)
+        hours, minutes = divmod(minutes, 60)
+
+        if self.compact and not hours:
+            formatted = f"{minutes:02d}:{seconds:02d}"
+        else:
+            formatted = f"{hours:d}:{minutes:02d}:{seconds:02d}"
+
+        return Text(formatted, style=style)
+
+
+class FileSizeColumn(ProgressColumn):
+    """Renders completed filesize."""
+
+    def render(self, task: "Task") -> Text:
+        """Show data completed."""
+        data_size = filesize.decimal(int(task.completed))
+        return Text(data_size, style="progress.filesize")
+
+
+class TotalFileSizeColumn(ProgressColumn):
+    """Renders total filesize."""
+
+    def render(self, task: "Task") -> Text:
+        """Show data completed."""
+        data_size = filesize.decimal(int(task.total)) if task.total is not None else ""
+        return Text(data_size, style="progress.filesize.total")
+
+
+class MofNCompleteColumn(ProgressColumn):
+    """Renders completed count/total, e.g. '  10/1000'.
+
+    Best for bounded tasks with int quantities.
+
+    Space pads the completed count so that progress length does not change as task progresses
+    past powers of 10.
+
+    Args:
+        separator (str, optional): Text to separate completed and total values. Defaults to "/".
+    """
+
+    def __init__(self, separator: str = "/", table_column: Optional[Column] = None):
+        self.separator = separator
+        super().__init__(table_column=table_column)
+
+    def render(self, task: "Task") -> Text:
+        """Show completed/total."""
+        completed = int(task.completed)
+        total = int(task.total) if task.total is not None else "?"
+        total_width = len(str(total))
+        return Text(
+            f"{completed:{total_width}d}{self.separator}{total}",
+            style="progress.download",
+        )
+
+
+class DownloadColumn(ProgressColumn):
+    """Renders file size downloaded and total, e.g. '0.5/2.3 GB'.
+
+    Args:
+        binary_units (bool, optional): Use binary units, KiB, MiB etc. Defaults to False.
+    """
+
+    def __init__(
+        self, binary_units: bool = False, table_column: Optional[Column] = None
+    ) -> None:
+        self.binary_units = binary_units
+        super().__init__(table_column=table_column)
+
+    def render(self, task: "Task") -> Text:
+        """Calculate common unit for completed and total."""
+        completed = int(task.completed)
+
+        unit_and_suffix_calculation_base = (
+            int(task.total) if task.total is not None else completed
+        )
+        if self.binary_units:
+            unit, suffix = filesize.pick_unit_and_suffix(
+                unit_and_suffix_calculation_base,
+                ["bytes", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"],
+                1024,
+            )
+        else:
+            unit, suffix = filesize.pick_unit_and_suffix(
+                unit_and_suffix_calculation_base,
+                ["bytes", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"],
+                1000,
+            )
+        precision = 0 if unit == 1 else 1
+
+        completed_ratio = completed / unit
+        completed_str = f"{completed_ratio:,.{precision}f}"
+
+        if task.total is not None:
+            total = int(task.total)
+            total_ratio = total / unit
+            total_str = f"{total_ratio:,.{precision}f}"
+        else:
+            total_str = "?"
+
+        download_status = f"{completed_str}/{total_str} {suffix}"
+        download_text = Text(download_status, style="progress.download")
+        return download_text
+
+
+class TransferSpeedColumn(ProgressColumn):
+    """Renders human readable transfer speed."""
+
+    def render(self, task: "Task") -> Text:
+        """Show data transfer speed."""
+        speed = task.finished_speed or task.speed
+        if speed is None:
+            return Text("?", style="progress.data.speed")
+        data_speed = filesize.decimal(int(speed))
+        return Text(f"{data_speed}/s", style="progress.data.speed")
+
+
+class ProgressSample(NamedTuple):
+    """Sample of progress for a given time."""
+
+    timestamp: float
+    """Timestamp of sample."""
+    completed: float
+    """Number of steps completed."""
+
+
+@dataclass
+class Task:
+    """Information regarding a progress task.
+
+    This object should be considered read-only outside of the :class:`~Progress` class.
+
+    """
+
+    id: TaskID
+    """Task ID associated with this task (used in Progress methods)."""
+
+    description: str
+    """str: Description of the task."""
+
+    total: Optional[float]
+    """Optional[float]: Total number of steps in this task."""
+
+    completed: float
+    """float: Number of steps completed"""
+
+    _get_time: GetTimeCallable
+    """Callable to get the current time."""
+
+    finished_time: Optional[float] = None
+    """float: Time task was finished."""
+
+    visible: bool = True
+    """bool: Indicates if this task is visible in the progress display."""
+
+    fields: Dict[str, Any] = field(default_factory=dict)
+    """dict: Arbitrary fields passed in via Progress.update."""
+
+    start_time: Optional[float] = field(default=None, init=False, repr=False)
+    """Optional[float]: Time this task was started, or None if not started."""
+
+    stop_time: Optional[float] = field(default=None, init=False, repr=False)
+    """Optional[float]: Time this task was stopped, or None if not stopped."""
+
+    finished_speed: Optional[float] = None
+    """Optional[float]: The last speed for a finished task."""
+
+    _progress: Deque[ProgressSample] = field(
+        default_factory=lambda: deque(maxlen=1000), init=False, repr=False
+    )
+
+    _lock: RLock = field(repr=False, default_factory=RLock)
+    """Thread lock."""
+
+    def get_time(self) -> float:
+        """float: Get the current time, in seconds."""
+        return self._get_time()
+
+    @property
+    def started(self) -> bool:
+        """bool: Check if the task as started."""
+        return self.start_time is not None
+
+    @property
+    def remaining(self) -> Optional[float]:
+        """Optional[float]: Get the number of steps remaining, if a non-None total was set."""
+        if self.total is None:
+            return None
+        return self.total - self.completed
+
+    @property
+    def elapsed(self) -> Optional[float]:
+        """Optional[float]: Time elapsed since task was started, or ``None`` if the task hasn't started."""
+        if self.start_time is None:
+            return None
+        if self.stop_time is not None:
+            return self.stop_time - self.start_time
+        return self.get_time() - self.start_time
+
+    @property
+    def finished(self) -> bool:
+        """Check if the task has finished."""
+        return self.finished_time is not None
+
+    @property
+    def percentage(self) -> float:
+        """float: Get progress of task as a percentage. If a None total was set, returns 0"""
+        if not self.total:
+            return 0.0
+        completed = (self.completed / self.total) * 100.0
+        completed = min(100.0, max(0.0, completed))
+        return completed
+
+    @property
+    def speed(self) -> Optional[float]:
+        """Optional[float]: Get the estimated speed in steps per second."""
+        if self.start_time is None:
+            return None
+        with self._lock:
+            progress = self._progress
+            if not progress:
+                return None
+            total_time = progress[-1].timestamp - progress[0].timestamp
+            if total_time == 0:
+                return None
+            iter_progress = iter(progress)
+            next(iter_progress)
+            total_completed = sum(sample.completed for sample in iter_progress)
+            speed = total_completed / total_time
+            return speed
+
+    @property
+    def time_remaining(self) -> Optional[float]:
+        """Optional[float]: Get estimated time to completion, or ``None`` if no data."""
+        if self.finished:
+            return 0.0
+        speed = self.speed
+        if not speed:
+            return None
+        remaining = self.remaining
+        if remaining is None:
+            return None
+        estimate = ceil(remaining / speed)
+        return estimate
+
+    def _reset(self) -> None:
+        """Reset progress."""
+        self._progress.clear()
+        self.finished_time = None
+        self.finished_speed = None
+
+
+class Progress(JupyterMixin):
+    """Renders an auto-updating progress bar(s).
+
+    Args:
+        console (Console, optional): Optional Console instance. Defaults to an internal Console instance writing to stdout.
+        auto_refresh (bool, optional): Enable auto refresh. If disabled, you will need to call `refresh()`.
+        refresh_per_second (float, optional): Number of times per second to refresh the progress information. Defaults to 10.
+        speed_estimate_period: (float, optional): Period (in seconds) used to calculate the speed estimate. Defaults to 30.
+        transient: (bool, optional): Clear the progress on exit. Defaults to False.
+        redirect_stdout: (bool, optional): Enable redirection of stdout, so ``print`` may be used. Defaults to True.
+        redirect_stderr: (bool, optional): Enable redirection of stderr. Defaults to True.
+        get_time: (Callable, optional): A callable that gets the current time, or None to use Console.get_time. Defaults to None.
+        disable (bool, optional): Disable progress display. Defaults to False
+        expand (bool, optional): Expand tasks table to fit width. Defaults to False.
+    """
+
+    def __init__(
+        self,
+        *columns: Union[str, ProgressColumn],
+        console: Optional[Console] = None,
+        auto_refresh: bool = True,
+        refresh_per_second: float = 10,
+        speed_estimate_period: float = 30.0,
+        transient: bool = False,
+        redirect_stdout: bool = True,
+        redirect_stderr: bool = True,
+        get_time: Optional[GetTimeCallable] = None,
+        disable: bool = False,
+        expand: bool = False,
+    ) -> None:
+        assert refresh_per_second > 0, "refresh_per_second must be > 0"
+        self._lock = RLock()
+        self.columns = columns or self.get_default_columns()
+        self.speed_estimate_period = speed_estimate_period
+
+        self.disable = disable
+        self.expand = expand
+        self._tasks: Dict[TaskID, Task] = {}
+        self._task_index: TaskID = TaskID(0)
+        self.live = Live(
+            console=console or get_console(),
+            auto_refresh=auto_refresh,
+            refresh_per_second=refresh_per_second,
+            transient=transient,
+            redirect_stdout=redirect_stdout,
+            redirect_stderr=redirect_stderr,
+            get_renderable=self.get_renderable,
+        )
+        self.get_time = get_time or self.console.get_time
+        self.print = self.console.print
+        self.log = self.console.log
+
+    @classmethod
+    def get_default_columns(cls) -> Tuple[ProgressColumn, ...]:
+        """Get the default columns used for a new Progress instance:
+           - a text column for the description (TextColumn)
+           - the bar itself (BarColumn)
+           - a text column showing completion percentage (TextColumn)
+           - an estimated-time-remaining column (TimeRemainingColumn)
+        If the Progress instance is created without passing a columns argument,
+        the default columns defined here will be used.
+
+        You can also create a Progress instance using custom columns before
+        and/or after the defaults, as in this example:
+
+            progress = Progress(
+                SpinnerColumn(),
+                *Progress.get_default_columns(),
+                "Elapsed:",
+                TimeElapsedColumn(),
+            )
+
+        This code shows the creation of a Progress display, containing
+        a spinner to the left, the default columns, and a labeled elapsed
+        time column.
+        """
+        return (
+            TextColumn("[progress.description]{task.description}"),
+            BarColumn(),
+            TaskProgressColumn(),
+            TimeRemainingColumn(),
+        )
+
+    @property
+    def console(self) -> Console:
+        return self.live.console
+
+    @property
+    def tasks(self) -> List[Task]:
+        """Get a list of Task instances."""
+        with self._lock:
+            return list(self._tasks.values())
+
+    @property
+    def task_ids(self) -> List[TaskID]:
+        """A list of task IDs."""
+        with self._lock:
+            return list(self._tasks.keys())
+
+    @property
+    def finished(self) -> bool:
+        """Check if all tasks have been completed."""
+        with self._lock:
+            if not self._tasks:
+                return True
+            return all(task.finished for task in self._tasks.values())
+
+    def start(self) -> None:
+        """Start the progress display."""
+        if not self.disable:
+            self.live.start(refresh=True)
+
+    def stop(self) -> None:
+        """Stop the progress display."""
+        if not self.disable:
+            self.live.stop()
+            if not self.console.is_interactive and not self.console.is_jupyter:
+                self.console.print()
+
+    def __enter__(self) -> Self:
+        self.start()
+        return self
+
+    def __exit__(
+        self,
+        exc_type: Optional[Type[BaseException]],
+        exc_val: Optional[BaseException],
+        exc_tb: Optional[TracebackType],
+    ) -> None:
+        self.stop()
+
+    def track(
+        self,
+        sequence: Iterable[ProgressType],
+        total: Optional[float] = None,
+        completed: int = 0,
+        task_id: Optional[TaskID] = None,
+        description: str = "Working...",
+        update_period: float = 0.1,
+    ) -> Iterable[ProgressType]:
+        """Track progress by iterating over a sequence.
+
+        You can also track progress of an iterable, which might require that you additionally specify ``total``.
+
+        Args:
+            sequence (Iterable[ProgressType]): Values you want to iterate over and track progress.
+            total: (float, optional): Total number of steps. Default is len(sequence).
+            completed (int, optional): Number of steps completed so far. Defaults to 0.
+            task_id: (TaskID): Task to track. Default is new task.
+            description: (str, optional): Description of task, if new task is created.
+            update_period (float, optional): Minimum time (in seconds) between calls to update(). Defaults to 0.1.
+
+        Returns:
+            Iterable[ProgressType]: An iterable of values taken from the provided sequence.
+        """
+        if total is None:
+            total = float(length_hint(sequence)) or None
+
+        if task_id is None:
+            task_id = self.add_task(description, total=total, completed=completed)
+        else:
+            self.update(task_id, total=total, completed=completed)
+
+        if self.live.auto_refresh:
+            with _TrackThread(self, task_id, update_period) as track_thread:
+                for value in sequence:
+                    yield value
+                    track_thread.completed += 1
+        else:
+            advance = self.advance
+            refresh = self.refresh
+            for value in sequence:
+                yield value
+                advance(task_id, 1)
+                refresh()
+
+    def wrap_file(
+        self,
+        file: BinaryIO,
+        total: Optional[int] = None,
+        *,
+        task_id: Optional[TaskID] = None,
+        description: str = "Reading...",
+    ) -> BinaryIO:
+        """Track progress file reading from a binary file.
+
+        Args:
+            file (BinaryIO): A file-like object opened in binary mode.
+            total (int, optional): Total number of bytes to read. This must be provided unless a task with a total is also given.
+            task_id (TaskID): Task to track. Default is new task.
+            description (str, optional): Description of task, if new task is created.
+
+        Returns:
+            BinaryIO: A readable file-like object in binary mode.
+
+        Raises:
+            ValueError: When no total value can be extracted from the arguments or the task.
+        """
+        # attempt to recover the total from the task
+        total_bytes: Optional[float] = None
+        if total is not None:
+            total_bytes = total
+        elif task_id is not None:
+            with self._lock:
+                total_bytes = self._tasks[task_id].total
+        if total_bytes is None:
+            raise ValueError(
+                f"unable to get the total number of bytes, please specify 'total'"
+            )
+
+        # update total of task or create new task
+        if task_id is None:
+            task_id = self.add_task(description, total=total_bytes)
+        else:
+            self.update(task_id, total=total_bytes)
+
+        return _Reader(file, self, task_id, close_handle=False)
+
+    @typing.overload
+    def open(
+        self,
+        file: Union[str, "PathLike[str]", bytes],
+        mode: Literal["rb"],
+        buffering: int = -1,
+        encoding: Optional[str] = None,
+        errors: Optional[str] = None,
+        newline: Optional[str] = None,
+        *,
+        total: Optional[int] = None,
+        task_id: Optional[TaskID] = None,
+        description: str = "Reading...",
+    ) -> BinaryIO:
+        pass
+
+    @typing.overload
+    def open(
+        self,
+        file: Union[str, "PathLike[str]", bytes],
+        mode: Union[Literal["r"], Literal["rt"]],
+        buffering: int = -1,
+        encoding: Optional[str] = None,
+        errors: Optional[str] = None,
+        newline: Optional[str] = None,
+        *,
+        total: Optional[int] = None,
+        task_id: Optional[TaskID] = None,
+        description: str = "Reading...",
+    ) -> TextIO:
+        pass
+
+    def open(
+        self,
+        file: Union[str, "PathLike[str]", bytes],
+        mode: Union[Literal["rb"], Literal["rt"], Literal["r"]] = "r",
+        buffering: int = -1,
+        encoding: Optional[str] = None,
+        errors: Optional[str] = None,
+        newline: Optional[str] = None,
+        *,
+        total: Optional[int] = None,
+        task_id: Optional[TaskID] = None,
+        description: str = "Reading...",
+    ) -> Union[BinaryIO, TextIO]:
+        """Track progress while reading from a binary file.
+
+        Args:
+            path (Union[str, PathLike[str]]): The path to the file to read.
+            mode (str): The mode to use to open the file. Only supports "r", "rb" or "rt".
+            buffering (int): The buffering strategy to use, see :func:`io.open`.
+            encoding (str, optional): The encoding to use when reading in text mode, see :func:`io.open`.
+            errors (str, optional): The error handling strategy for decoding errors, see :func:`io.open`.
+            newline (str, optional): The strategy for handling newlines in text mode, see :func:`io.open`.
+            total (int, optional): Total number of bytes to read. If none given, os.stat(path).st_size is used.
+            task_id (TaskID): Task to track. Default is new task.
+            description (str, optional): Description of task, if new task is created.
+
+        Returns:
+            BinaryIO: A readable file-like object in binary mode.
+
+        Raises:
+            ValueError: When an invalid mode is given.
+        """
+        # normalize the mode (always rb, rt)
+        _mode = "".join(sorted(mode, reverse=False))
+        if _mode not in ("br", "rt", "r"):
+            raise ValueError(f"invalid mode {mode!r}")
+
+        # patch buffering to provide the same behaviour as the builtin `open`
+        line_buffering = buffering == 1
+        if _mode == "br" and buffering == 1:
+            warnings.warn(
+                "line buffering (buffering=1) isn't supported in binary mode, the default buffer size will be used",
+                RuntimeWarning,
+            )
+            buffering = -1
+        elif _mode in ("rt", "r"):
+            if buffering == 0:
+                raise ValueError("can't have unbuffered text I/O")
+            elif buffering == 1:
+                buffering = -1
+
+        # attempt to get the total with `os.stat`
+        if total is None:
+            total = stat(file).st_size
+
+        # update total of task or create new task
+        if task_id is None:
+            task_id = self.add_task(description, total=total)
+        else:
+            self.update(task_id, total=total)
+
+        # open the file in binary mode,
+        handle = io.open(file, "rb", buffering=buffering)
+        reader = _Reader(handle, self, task_id, close_handle=True)
+
+        # wrap the reader in a `TextIOWrapper` if text mode
+        if mode in ("r", "rt"):
+            return io.TextIOWrapper(
+                reader,
+                encoding=encoding,
+                errors=errors,
+                newline=newline,
+                line_buffering=line_buffering,
+            )
+
+        return reader
+
+    def start_task(self, task_id: TaskID) -> None:
+        """Start a task.
+
+        Starts a task (used when calculating elapsed time). You may need to call this manually,
+        if you called ``add_task`` with ``start=False``.
+
+        Args:
+            task_id (TaskID): ID of task.
+        """
+        with self._lock:
+            task = self._tasks[task_id]
+            if task.start_time is None:
+                task.start_time = self.get_time()
+
+    def stop_task(self, task_id: TaskID) -> None:
+        """Stop a task.
+
+        This will freeze the elapsed time on the task.
+
+        Args:
+            task_id (TaskID): ID of task.
+        """
+        with self._lock:
+            task = self._tasks[task_id]
+            current_time = self.get_time()
+            if task.start_time is None:
+                task.start_time = current_time
+            task.stop_time = current_time
+
+    def update(
+        self,
+        task_id: TaskID,
+        *,
+        total: Optional[float] = None,
+        completed: Optional[float] = None,
+        advance: Optional[float] = None,
+        description: Optional[str] = None,
+        visible: Optional[bool] = None,
+        refresh: bool = False,
+        **fields: Any,
+    ) -> None:
+        """Update information associated with a task.
+
+        Args:
+            task_id (TaskID): Task id (returned by add_task).
+            total (float, optional): Updates task.total if not None.
+            completed (float, optional): Updates task.completed if not None.
+            advance (float, optional): Add a value to task.completed if not None.
+            description (str, optional): Change task description if not None.
+            visible (bool, optional): Set visible flag if not None.
+            refresh (bool): Force a refresh of progress information. Default is False.
+            **fields (Any): Additional data fields required for rendering.
+        """
+        with self._lock:
+            task = self._tasks[task_id]
+            completed_start = task.completed
+
+            if total is not None and total != task.total:
+                task.total = total
+                task._reset()
+            if advance is not None:
+                task.completed += advance
+            if completed is not None:
+                task.completed = completed
+            if description is not None:
+                task.description = description
+            if visible is not None:
+                task.visible = visible
+            task.fields.update(fields)
+            update_completed = task.completed - completed_start
+
+            current_time = self.get_time()
+            old_sample_time = current_time - self.speed_estimate_period
+            _progress = task._progress
+
+            popleft = _progress.popleft
+            while _progress and _progress[0].timestamp < old_sample_time:
+                popleft()
+            if update_completed > 0:
+                _progress.append(ProgressSample(current_time, update_completed))
+            if (
+                task.total is not None
+                and task.completed >= task.total
+                and task.finished_time is None
+            ):
+                task.finished_time = task.elapsed
+
+        if refresh:
+            self.refresh()
+
+    def reset(
+        self,
+        task_id: TaskID,
+        *,
+        start: bool = True,
+        total: Optional[float] = None,
+        completed: int = 0,
+        visible: Optional[bool] = None,
+        description: Optional[str] = None,
+        **fields: Any,
+    ) -> None:
+        """Reset a task so completed is 0 and the clock is reset.
+
+        Args:
+            task_id (TaskID): ID of task.
+            start (bool, optional): Start the task after reset. Defaults to True.
+            total (float, optional): New total steps in task, or None to use current total. Defaults to None.
+            completed (int, optional): Number of steps completed. Defaults to 0.
+            visible (bool, optional): Set visible flag if not None.
+            description (str, optional): Change task description if not None. Defaults to None.
+            **fields (str): Additional data fields required for rendering.
+        """
+        current_time = self.get_time()
+        with self._lock:
+            task = self._tasks[task_id]
+            task._reset()
+            task.start_time = current_time if start else None
+            if total is not None:
+                task.total = total
+            task.completed = completed
+            if visible is not None:
+                task.visible = visible
+            if fields:
+                task.fields = fields
+            if description is not None:
+                task.description = description
+            task.finished_time = None
+        self.refresh()
+
+    def advance(self, task_id: TaskID, advance: float = 1) -> None:
+        """Advance task by a number of steps.
+
+        Args:
+            task_id (TaskID): ID of task.
+            advance (float): Number of steps to advance. Default is 1.
+        """
+        current_time = self.get_time()
+        with self._lock:
+            task = self._tasks[task_id]
+            completed_start = task.completed
+            task.completed += advance
+            update_completed = task.completed - completed_start
+            old_sample_time = current_time - self.speed_estimate_period
+            _progress = task._progress
+
+            popleft = _progress.popleft
+            while _progress and _progress[0].timestamp < old_sample_time:
+                popleft()
+            while len(_progress) > 1000:
+                popleft()
+            _progress.append(ProgressSample(current_time, update_completed))
+            if (
+                task.total is not None
+                and task.completed >= task.total
+                and task.finished_time is None
+            ):
+                task.finished_time = task.elapsed
+                task.finished_speed = task.speed
+
+    def refresh(self) -> None:
+        """Refresh (render) the progress information."""
+        if not self.disable and self.live.is_started:
+            self.live.refresh()
+
+    def get_renderable(self) -> RenderableType:
+        """Get a renderable for the progress display."""
+        renderable = Group(*self.get_renderables())
+        return renderable
+
+    def get_renderables(self) -> Iterable[RenderableType]:
+        """Get a number of renderables for the progress display."""
+        table = self.make_tasks_table(self.tasks)
+        yield table
+
+    def make_tasks_table(self, tasks: Iterable[Task]) -> Table:
+        """Get a table to render the Progress display.
+
+        Args:
+            tasks (Iterable[Task]): An iterable of Task instances, one per row of the table.
+
+        Returns:
+            Table: A table instance.
+        """
+        table_columns = (
+            (
+                Column(no_wrap=True)
+                if isinstance(_column, str)
+                else _column.get_table_column().copy()
+            )
+            for _column in self.columns
+        )
+        table = Table.grid(*table_columns, padding=(0, 1), expand=self.expand)
+
+        for task in tasks:
+            if task.visible:
+                table.add_row(
+                    *(
+                        (
+                            column.format(task=task)
+                            if isinstance(column, str)
+                            else column(task)
+                        )
+                        for column in self.columns
+                    )
+                )
+        return table
+
+    def __rich__(self) -> RenderableType:
+        """Makes the Progress class itself renderable."""
+        with self._lock:
+            return self.get_renderable()
+
+    def add_task(
+        self,
+        description: str,
+        start: bool = True,
+        total: Optional[float] = 100.0,
+        completed: int = 0,
+        visible: bool = True,
+        **fields: Any,
+    ) -> TaskID:
+        """Add a new 'task' to the Progress display.
+
+        Args:
+            description (str): A description of the task.
+            start (bool, optional): Start the task immediately (to calculate elapsed time). If set to False,
+                you will need to call `start` manually. Defaults to True.
+            total (float, optional): Number of total steps in the progress if known.
+                Set to None to render a pulsing animation. Defaults to 100.
+            completed (int, optional): Number of steps completed so far. Defaults to 0.
+            visible (bool, optional): Enable display of the task. Defaults to True.
+            **fields (str): Additional data fields required for rendering.
+
+        Returns:
+            TaskID: An ID you can use when calling `update`.
+        """
+        with self._lock:
+            task = Task(
+                self._task_index,
+                description,
+                total,
+                completed,
+                visible=visible,
+                fields=fields,
+                _get_time=self.get_time,
+                _lock=self._lock,
+            )
+            self._tasks[self._task_index] = task
+            if start:
+                self.start_task(self._task_index)
+            new_task_index = self._task_index
+            self._task_index = TaskID(int(self._task_index) + 1)
+        self.refresh()
+        return new_task_index
+
+    def remove_task(self, task_id: TaskID) -> None:
+        """Delete a task if it exists.
+
+        Args:
+            task_id (TaskID): A task ID.
+
+        """
+        with self._lock:
+            del self._tasks[task_id]
+
+
+if __name__ == "__main__":  # pragma: no coverage
+    import random
+    import time
+
+    from .panel import Panel
+    from .rule import Rule
+    from .syntax import Syntax
+    from .table import Table
+
+    syntax = Syntax(
+        '''def loop_last(values: Iterable[T]) -> Iterable[Tuple[bool, T]]:
+    """Iterate and generate a tuple with a flag for last value."""
+    iter_values = iter(values)
+    try:
+        previous_value = next(iter_values)
+    except StopIteration:
+        return
+    for value in iter_values:
+        yield False, previous_value
+        previous_value = value
+    yield True, previous_value''',
+        "python",
+        line_numbers=True,
+    )
+
+    table = Table("foo", "bar", "baz")
+    table.add_row("1", "2", "3")
+
+    progress_renderables = [
+        "Text may be printed while the progress bars are rendering.",
+        Panel("In fact, [i]any[/i] renderable will work"),
+        "Such as [magenta]tables[/]...",
+        table,
+        "Pretty printed structures...",
+        {"type": "example", "text": "Pretty printed"},
+        "Syntax...",
+        syntax,
+        Rule("Give it a try!"),
+    ]
+
+    from itertools import cycle
+
+    examples = cycle(progress_renderables)
+
+    console = Console(record=True)
+
+    with Progress(
+        SpinnerColumn(),
+        *Progress.get_default_columns(),
+        TimeElapsedColumn(),
+        console=console,
+        transient=False,
+    ) as progress:
+        task1 = progress.add_task("[red]Downloading", total=1000)
+        task2 = progress.add_task("[green]Processing", total=1000)
+        task3 = progress.add_task("[yellow]Thinking", total=None)
+
+        while not progress.finished:
+            progress.update(task1, advance=0.5)
+            progress.update(task2, advance=0.3)
+            time.sleep(0.01)
+            if random.randint(0, 100) < 1:
+                progress.log(next(examples))
diff --git a/local_packages/rich/progress_bar.py b/local_packages/rich/progress_bar.py
new file mode 100644
index 0000000..41794f7
--- /dev/null
+++ b/local_packages/rich/progress_bar.py
@@ -0,0 +1,223 @@
+import math
+from functools import lru_cache
+from time import monotonic
+from typing import Iterable, List, Optional
+
+from .color import Color, blend_rgb
+from .color_triplet import ColorTriplet
+from .console import Console, ConsoleOptions, RenderResult
+from .jupyter import JupyterMixin
+from .measure import Measurement
+from .segment import Segment
+from .style import Style, StyleType
+
+# Number of characters before 'pulse' animation repeats
+PULSE_SIZE = 20
+
+
+class ProgressBar(JupyterMixin):
+    """Renders a (progress) bar. Used by rich.progress.
+
+    Args:
+        total (float, optional): Number of steps in the bar. Defaults to 100. Set to None to render a pulsing animation.
+        completed (float, optional): Number of steps completed. Defaults to 0.
+        width (int, optional): Width of the bar, or ``None`` for maximum width. Defaults to None.
+        pulse (bool, optional): Enable pulse effect. Defaults to False. Will pulse if a None total was passed.
+        style (StyleType, optional): Style for the bar background. Defaults to "bar.back".
+        complete_style (StyleType, optional): Style for the completed bar. Defaults to "bar.complete".
+        finished_style (StyleType, optional): Style for a finished bar. Defaults to "bar.finished".
+        pulse_style (StyleType, optional): Style for pulsing bars. Defaults to "bar.pulse".
+        animation_time (Optional[float], optional): Time in seconds to use for animation, or None to use system time.
+    """
+
+    def __init__(
+        self,
+        total: Optional[float] = 100.0,
+        completed: float = 0,
+        width: Optional[int] = None,
+        pulse: bool = False,
+        style: StyleType = "bar.back",
+        complete_style: StyleType = "bar.complete",
+        finished_style: StyleType = "bar.finished",
+        pulse_style: StyleType = "bar.pulse",
+        animation_time: Optional[float] = None,
+    ):
+        self.total = total
+        self.completed = completed
+        self.width = width
+        self.pulse = pulse
+        self.style = style
+        self.complete_style = complete_style
+        self.finished_style = finished_style
+        self.pulse_style = pulse_style
+        self.animation_time = animation_time
+
+        self._pulse_segments: Optional[List[Segment]] = None
+
+    def __repr__(self) -> str:
+        return f""
+
+    @property
+    def percentage_completed(self) -> Optional[float]:
+        """Calculate percentage complete."""
+        if self.total is None:
+            return None
+        completed = (self.completed / self.total) * 100.0
+        completed = min(100, max(0.0, completed))
+        return completed
+
+    @lru_cache(maxsize=16)
+    def _get_pulse_segments(
+        self,
+        fore_style: Style,
+        back_style: Style,
+        color_system: str,
+        no_color: bool,
+        ascii: bool = False,
+    ) -> List[Segment]:
+        """Get a list of segments to render a pulse animation.
+
+        Returns:
+            List[Segment]: A list of segments, one segment per character.
+        """
+        bar = "-" if ascii else "━"
+        segments: List[Segment] = []
+        if color_system not in ("standard", "eight_bit", "truecolor") or no_color:
+            segments += [Segment(bar, fore_style)] * (PULSE_SIZE // 2)
+            segments += [Segment(" " if no_color else bar, back_style)] * (
+                PULSE_SIZE - (PULSE_SIZE // 2)
+            )
+            return segments
+
+        append = segments.append
+        fore_color = (
+            fore_style.color.get_truecolor()
+            if fore_style.color
+            else ColorTriplet(255, 0, 255)
+        )
+        back_color = (
+            back_style.color.get_truecolor()
+            if back_style.color
+            else ColorTriplet(0, 0, 0)
+        )
+        cos = math.cos
+        pi = math.pi
+        _Segment = Segment
+        _Style = Style
+        from_triplet = Color.from_triplet
+
+        for index in range(PULSE_SIZE):
+            position = index / PULSE_SIZE
+            fade = 0.5 + cos(position * pi * 2) / 2.0
+            color = blend_rgb(fore_color, back_color, cross_fade=fade)
+            append(_Segment(bar, _Style(color=from_triplet(color))))
+        return segments
+
+    def update(self, completed: float, total: Optional[float] = None) -> None:
+        """Update progress with new values.
+
+        Args:
+            completed (float): Number of steps completed.
+            total (float, optional): Total number of steps, or ``None`` to not change. Defaults to None.
+        """
+        self.completed = completed
+        self.total = total if total is not None else self.total
+
+    def _render_pulse(
+        self, console: Console, width: int, ascii: bool = False
+    ) -> Iterable[Segment]:
+        """Renders the pulse animation.
+
+        Args:
+            console (Console): Console instance.
+            width (int): Width in characters of pulse animation.
+
+        Returns:
+            RenderResult: [description]
+
+        Yields:
+            Iterator[Segment]: Segments to render pulse
+        """
+        fore_style = console.get_style(self.pulse_style, default="white")
+        back_style = console.get_style(self.style, default="black")
+
+        pulse_segments = self._get_pulse_segments(
+            fore_style, back_style, console.color_system, console.no_color, ascii=ascii
+        )
+        segment_count = len(pulse_segments)
+        current_time = (
+            monotonic() if self.animation_time is None else self.animation_time
+        )
+        segments = pulse_segments * (int(width / segment_count) + 2)
+        offset = int(-current_time * 15) % segment_count
+        segments = segments[offset : offset + width]
+        yield from segments
+
+    def __rich_console__(
+        self, console: Console, options: ConsoleOptions
+    ) -> RenderResult:
+        width = min(self.width or options.max_width, options.max_width)
+        ascii = options.legacy_windows or options.ascii_only
+        should_pulse = self.pulse or self.total is None
+        if should_pulse:
+            yield from self._render_pulse(console, width, ascii=ascii)
+            return
+
+        completed: Optional[float] = (
+            min(self.total, max(0, self.completed)) if self.total is not None else None
+        )
+
+        bar = "-" if ascii else "━"
+        half_bar_right = " " if ascii else "╸"
+        half_bar_left = " " if ascii else "╺"
+        complete_halves = (
+            int(width * 2 * completed / self.total)
+            if self.total and completed is not None
+            else width * 2
+        )
+        bar_count = complete_halves // 2
+        half_bar_count = complete_halves % 2
+        style = console.get_style(self.style)
+        is_finished = self.total is None or self.completed >= self.total
+        complete_style = console.get_style(
+            self.finished_style if is_finished else self.complete_style
+        )
+        _Segment = Segment
+        if bar_count:
+            yield _Segment(bar * bar_count, complete_style)
+        if half_bar_count:
+            yield _Segment(half_bar_right * half_bar_count, complete_style)
+
+        if not console.no_color:
+            remaining_bars = width - bar_count - half_bar_count
+            if remaining_bars and console.color_system is not None:
+                if not half_bar_count and bar_count:
+                    yield _Segment(half_bar_left, style)
+                    remaining_bars -= 1
+                if remaining_bars:
+                    yield _Segment(bar * remaining_bars, style)
+
+    def __rich_measure__(
+        self, console: Console, options: ConsoleOptions
+    ) -> Measurement:
+        return (
+            Measurement(self.width, self.width)
+            if self.width is not None
+            else Measurement(4, options.max_width)
+        )
+
+
+if __name__ == "__main__":  # pragma: no cover
+    console = Console()
+    bar = ProgressBar(width=50, total=100)
+
+    import time
+
+    console.show_cursor(False)
+    for n in range(0, 101, 1):
+        bar.update(n)
+        console.print(bar)
+        console.file.write("\r")
+        time.sleep(0.05)
+    console.show_cursor(True)
+    console.print()
diff --git a/local_packages/rich/prompt.py b/local_packages/rich/prompt.py
new file mode 100644
index 0000000..ae94d9b
--- /dev/null
+++ b/local_packages/rich/prompt.py
@@ -0,0 +1,400 @@
+from typing import Any, Generic, List, Optional, TextIO, TypeVar, Union, overload
+
+from . import get_console
+from .console import Console
+from .text import Text, TextType
+
+PromptType = TypeVar("PromptType")
+DefaultType = TypeVar("DefaultType")
+
+
+class PromptError(Exception):
+    """Exception base class for prompt related errors."""
+
+
+class InvalidResponse(PromptError):
+    """Exception to indicate a response was invalid. Raise this within process_response() to indicate an error
+    and provide an error message.
+
+    Args:
+        message (Union[str, Text]): Error message.
+    """
+
+    def __init__(self, message: TextType) -> None:
+        self.message = message
+
+    def __rich__(self) -> TextType:
+        return self.message
+
+
+class PromptBase(Generic[PromptType]):
+    """Ask the user for input until a valid response is received. This is the base class, see one of
+    the concrete classes for examples.
+
+    Args:
+        prompt (TextType, optional): Prompt text. Defaults to "".
+        console (Console, optional): A Console instance or None to use global console. Defaults to None.
+        password (bool, optional): Enable password input. Defaults to False.
+        choices (List[str], optional): A list of valid choices. Defaults to None.
+        case_sensitive (bool, optional): Matching of choices should be case-sensitive. Defaults to True.
+        show_default (bool, optional): Show default in prompt. Defaults to True.
+        show_choices (bool, optional): Show choices in prompt. Defaults to True.
+    """
+
+    response_type: type = str
+
+    validate_error_message = "[prompt.invalid]Please enter a valid value"
+    illegal_choice_message = (
+        "[prompt.invalid.choice]Please select one of the available options"
+    )
+    prompt_suffix = ": "
+
+    choices: Optional[List[str]] = None
+
+    def __init__(
+        self,
+        prompt: TextType = "",
+        *,
+        console: Optional[Console] = None,
+        password: bool = False,
+        choices: Optional[List[str]] = None,
+        case_sensitive: bool = True,
+        show_default: bool = True,
+        show_choices: bool = True,
+    ) -> None:
+        self.console = console or get_console()
+        self.prompt = (
+            Text.from_markup(prompt, style="prompt")
+            if isinstance(prompt, str)
+            else prompt
+        )
+        self.password = password
+        if choices is not None:
+            self.choices = choices
+        self.case_sensitive = case_sensitive
+        self.show_default = show_default
+        self.show_choices = show_choices
+
+    @classmethod
+    @overload
+    def ask(
+        cls,
+        prompt: TextType = "",
+        *,
+        console: Optional[Console] = None,
+        password: bool = False,
+        choices: Optional[List[str]] = None,
+        case_sensitive: bool = True,
+        show_default: bool = True,
+        show_choices: bool = True,
+        default: DefaultType,
+        stream: Optional[TextIO] = None,
+    ) -> Union[DefaultType, PromptType]:
+        ...
+
+    @classmethod
+    @overload
+    def ask(
+        cls,
+        prompt: TextType = "",
+        *,
+        console: Optional[Console] = None,
+        password: bool = False,
+        choices: Optional[List[str]] = None,
+        case_sensitive: bool = True,
+        show_default: bool = True,
+        show_choices: bool = True,
+        stream: Optional[TextIO] = None,
+    ) -> PromptType:
+        ...
+
+    @classmethod
+    def ask(
+        cls,
+        prompt: TextType = "",
+        *,
+        console: Optional[Console] = None,
+        password: bool = False,
+        choices: Optional[List[str]] = None,
+        case_sensitive: bool = True,
+        show_default: bool = True,
+        show_choices: bool = True,
+        default: Any = ...,
+        stream: Optional[TextIO] = None,
+    ) -> Any:
+        """Shortcut to construct and run a prompt loop and return the result.
+
+        Example:
+            >>> filename = Prompt.ask("Enter a filename")
+
+        Args:
+            prompt (TextType, optional): Prompt text. Defaults to "".
+            console (Console, optional): A Console instance or None to use global console. Defaults to None.
+            password (bool, optional): Enable password input. Defaults to False.
+            choices (List[str], optional): A list of valid choices. Defaults to None.
+            case_sensitive (bool, optional): Matching of choices should be case-sensitive. Defaults to True.
+            show_default (bool, optional): Show default in prompt. Defaults to True.
+            show_choices (bool, optional): Show choices in prompt. Defaults to True.
+            stream (TextIO, optional): Optional text file open for reading to get input. Defaults to None.
+        """
+        _prompt = cls(
+            prompt,
+            console=console,
+            password=password,
+            choices=choices,
+            case_sensitive=case_sensitive,
+            show_default=show_default,
+            show_choices=show_choices,
+        )
+        return _prompt(default=default, stream=stream)
+
+    def render_default(self, default: DefaultType) -> Text:
+        """Turn the supplied default in to a Text instance.
+
+        Args:
+            default (DefaultType): Default value.
+
+        Returns:
+            Text: Text containing rendering of default value.
+        """
+        return Text(f"({default})", "prompt.default")
+
+    def make_prompt(self, default: DefaultType) -> Text:
+        """Make prompt text.
+
+        Args:
+            default (DefaultType): Default value.
+
+        Returns:
+            Text: Text to display in prompt.
+        """
+        prompt = self.prompt.copy()
+        prompt.end = ""
+
+        if self.show_choices and self.choices:
+            _choices = "/".join(self.choices)
+            choices = f"[{_choices}]"
+            prompt.append(" ")
+            prompt.append(choices, "prompt.choices")
+
+        if (
+            default != ...
+            and self.show_default
+            and isinstance(default, (str, self.response_type))
+        ):
+            prompt.append(" ")
+            _default = self.render_default(default)
+            prompt.append(_default)
+
+        prompt.append(self.prompt_suffix)
+
+        return prompt
+
+    @classmethod
+    def get_input(
+        cls,
+        console: Console,
+        prompt: TextType,
+        password: bool,
+        stream: Optional[TextIO] = None,
+    ) -> str:
+        """Get input from user.
+
+        Args:
+            console (Console): Console instance.
+            prompt (TextType): Prompt text.
+            password (bool): Enable password entry.
+
+        Returns:
+            str: String from user.
+        """
+        return console.input(prompt, password=password, stream=stream)
+
+    def check_choice(self, value: str) -> bool:
+        """Check value is in the list of valid choices.
+
+        Args:
+            value (str): Value entered by user.
+
+        Returns:
+            bool: True if choice was valid, otherwise False.
+        """
+        assert self.choices is not None
+        if self.case_sensitive:
+            return value.strip() in self.choices
+        return value.strip().lower() in [choice.lower() for choice in self.choices]
+
+    def process_response(self, value: str) -> PromptType:
+        """Process response from user, convert to prompt type.
+
+        Args:
+            value (str): String typed by user.
+
+        Raises:
+            InvalidResponse: If ``value`` is invalid.
+
+        Returns:
+            PromptType: The value to be returned from ask method.
+        """
+        value = value.strip()
+        try:
+            return_value: PromptType = self.response_type(value)
+        except ValueError:
+            raise InvalidResponse(self.validate_error_message)
+
+        if self.choices is not None:
+            if not self.check_choice(value):
+                raise InvalidResponse(self.illegal_choice_message)
+
+            if not self.case_sensitive:
+                # return the original choice, not the lower case version
+                return_value = self.response_type(
+                    self.choices[
+                        [choice.lower() for choice in self.choices].index(value.lower())
+                    ]
+                )
+        return return_value
+
+    def on_validate_error(self, value: str, error: InvalidResponse) -> None:
+        """Called to handle validation error.
+
+        Args:
+            value (str): String entered by user.
+            error (InvalidResponse): Exception instance the initiated the error.
+        """
+        self.console.print(error, markup=True)
+
+    def pre_prompt(self) -> None:
+        """Hook to display something before the prompt."""
+
+    @overload
+    def __call__(self, *, stream: Optional[TextIO] = None) -> PromptType:
+        ...
+
+    @overload
+    def __call__(
+        self, *, default: DefaultType, stream: Optional[TextIO] = None
+    ) -> Union[PromptType, DefaultType]:
+        ...
+
+    def __call__(self, *, default: Any = ..., stream: Optional[TextIO] = None) -> Any:
+        """Run the prompt loop.
+
+        Args:
+            default (Any, optional): Optional default value.
+
+        Returns:
+            PromptType: Processed value.
+        """
+        while True:
+            self.pre_prompt()
+            prompt = self.make_prompt(default)
+            value = self.get_input(self.console, prompt, self.password, stream=stream)
+            if value == "" and default != ...:
+                return default
+            try:
+                return_value = self.process_response(value)
+            except InvalidResponse as error:
+                self.on_validate_error(value, error)
+                continue
+            else:
+                return return_value
+
+
+class Prompt(PromptBase[str]):
+    """A prompt that returns a str.
+
+    Example:
+        >>> name = Prompt.ask("Enter your name")
+
+
+    """
+
+    response_type = str
+
+
+class IntPrompt(PromptBase[int]):
+    """A prompt that returns an integer.
+
+    Example:
+        >>> burrito_count = IntPrompt.ask("How many burritos do you want to order")
+
+    """
+
+    response_type = int
+    validate_error_message = "[prompt.invalid]Please enter a valid integer number"
+
+
+class FloatPrompt(PromptBase[float]):
+    """A prompt that returns a float.
+
+    Example:
+        >>> temperature = FloatPrompt.ask("Enter desired temperature")
+
+    """
+
+    response_type = float
+    validate_error_message = "[prompt.invalid]Please enter a number"
+
+
+class Confirm(PromptBase[bool]):
+    """A yes / no confirmation prompt.
+
+    Example:
+        >>> if Confirm.ask("Continue"):
+                run_job()
+
+    """
+
+    response_type = bool
+    validate_error_message = "[prompt.invalid]Please enter Y or N"
+    choices: List[str] = ["y", "n"]
+
+    def render_default(self, default: DefaultType) -> Text:
+        """Render the default as (y) or (n) rather than True/False."""
+        yes, no = self.choices
+        return Text(f"({yes})" if default else f"({no})", style="prompt.default")
+
+    def process_response(self, value: str) -> bool:
+        """Convert choices to a bool."""
+        value = value.strip().lower()
+        if value not in self.choices:
+            raise InvalidResponse(self.validate_error_message)
+        return value == self.choices[0]
+
+
+if __name__ == "__main__":  # pragma: no cover
+    from rich import print
+
+    if Confirm.ask("Run [i]prompt[/i] tests?", default=True):
+        while True:
+            result = IntPrompt.ask(
+                ":rocket: Enter a number between [b]1[/b] and [b]10[/b]", default=5
+            )
+            if result >= 1 and result <= 10:
+                break
+            print(":pile_of_poo: [prompt.invalid]Number must be between 1 and 10")
+        print(f"number={result}")
+
+        while True:
+            password = Prompt.ask(
+                "Please enter a password [cyan](must be at least 5 characters)",
+                password=True,
+            )
+            if len(password) >= 5:
+                break
+            print("[prompt.invalid]password too short")
+        print(f"password={password!r}")
+
+        fruit = Prompt.ask("Enter a fruit", choices=["apple", "orange", "pear"])
+        print(f"fruit={fruit!r}")
+
+        doggie = Prompt.ask(
+            "What's the best Dog? (Case INSENSITIVE)",
+            choices=["Border Terrier", "Collie", "Labradoodle"],
+            case_sensitive=False,
+        )
+        print(f"doggie={doggie!r}")
+
+    else:
+        print("[b]OK :loudly_crying_face:")
diff --git a/local_packages/rich/protocol.py b/local_packages/rich/protocol.py
new file mode 100644
index 0000000..ae13856
--- /dev/null
+++ b/local_packages/rich/protocol.py
@@ -0,0 +1,41 @@
+from typing import Any, cast, Set, TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from rich.console import RenderableType
+
+_GIBBERISH = """aihwerij235234ljsdnp34ksodfipwoe234234jlskjdf"""
+
+
+def is_renderable(check_object: Any) -> bool:
+    """Check if an object may be rendered by Rich."""
+    return (
+        isinstance(check_object, str)
+        or hasattr(check_object, "__rich__")
+        or hasattr(check_object, "__rich_console__")
+    )
+
+
+def rich_cast(renderable: object) -> "RenderableType":
+    """Cast an object to a renderable by calling __rich__ if present.
+
+    Args:
+        renderable (object): A potentially renderable object
+
+    Returns:
+        object: The result of recursively calling __rich__.
+    """
+    from rich.console import RenderableType
+
+    rich_visited_set: Set[type] = set()  # Prevent potential infinite loop
+    while hasattr(renderable, "__rich__") and not isinstance(renderable, type):
+        # Detect object which claim to have all the attributes
+        if hasattr(renderable, _GIBBERISH):
+            return repr(renderable)
+        cast_method = getattr(renderable, "__rich__")
+        renderable = cast_method()
+        renderable_type = type(renderable)
+        if renderable_type in rich_visited_set:
+            break
+        rich_visited_set.add(renderable_type)
+
+    return cast(RenderableType, renderable)
diff --git a/local_packages/rich/py.typed b/local_packages/rich/py.typed
new file mode 100644
index 0000000..e69de29
diff --git a/local_packages/rich/region.py b/local_packages/rich/region.py
new file mode 100644
index 0000000..75b3631
--- /dev/null
+++ b/local_packages/rich/region.py
@@ -0,0 +1,10 @@
+from typing import NamedTuple
+
+
+class Region(NamedTuple):
+    """Defines a rectangular region of the screen."""
+
+    x: int
+    y: int
+    width: int
+    height: int
diff --git a/local_packages/rich/repr.py b/local_packages/rich/repr.py
new file mode 100644
index 0000000..0e9bc0e
--- /dev/null
+++ b/local_packages/rich/repr.py
@@ -0,0 +1,150 @@
+from functools import partial
+from typing import (
+    Any,
+    Callable,
+    Iterable,
+    List,
+    Optional,
+    Tuple,
+    Type,
+    TypeVar,
+    Union,
+    overload,
+)
+
+T = TypeVar("T")
+
+
+Result = Iterable[Union[Any, Tuple[Any], Tuple[str, Any], Tuple[str, Any, Any]]]
+RichReprResult = Result
+
+
+class ReprError(Exception):
+    """An error occurred when attempting to build a repr."""
+
+
+@overload
+def auto(cls: Optional[Type[T]]) -> Type[T]:
+    ...
+
+
+@overload
+def auto(*, angular: bool = False) -> Callable[[Type[T]], Type[T]]:
+    ...
+
+
+def auto(
+    cls: Optional[Type[T]] = None, *, angular: Optional[bool] = None
+) -> Union[Type[T], Callable[[Type[T]], Type[T]]]:
+    """Class decorator to create __repr__ from __rich_repr__"""
+
+    def do_replace(cls: Type[T], angular: Optional[bool] = None) -> Type[T]:
+        def auto_repr(self: T) -> str:
+            """Create repr string from __rich_repr__"""
+            repr_str: List[str] = []
+            append = repr_str.append
+
+            angular: bool = getattr(self.__rich_repr__, "angular", False)  # type: ignore[attr-defined]
+            for arg in self.__rich_repr__():  # type: ignore[attr-defined]
+                if isinstance(arg, tuple):
+                    if len(arg) == 1:
+                        append(repr(arg[0]))
+                    else:
+                        key, value, *default = arg
+                        if key is None:
+                            append(repr(value))
+                        else:
+                            if default and default[0] == value:
+                                continue
+                            append(f"{key}={value!r}")
+                else:
+                    append(repr(arg))
+            if angular:
+                return f"<{self.__class__.__name__} {' '.join(repr_str)}>"
+            else:
+                return f"{self.__class__.__name__}({', '.join(repr_str)})"
+
+        def auto_rich_repr(self: Type[T]) -> Result:
+            """Auto generate __rich_rep__ from signature of __init__"""
+            try:
+                import inspect
+
+                signature = inspect.signature(self.__init__)
+                for name, param in signature.parameters.items():
+                    if param.kind == param.POSITIONAL_ONLY:
+                        yield getattr(self, name)
+                    elif param.kind in (
+                        param.POSITIONAL_OR_KEYWORD,
+                        param.KEYWORD_ONLY,
+                    ):
+                        if param.default is param.empty:
+                            yield getattr(self, param.name)
+                        else:
+                            yield param.name, getattr(self, param.name), param.default
+            except Exception as error:
+                raise ReprError(
+                    f"Failed to auto generate __rich_repr__; {error}"
+                ) from None
+
+        if not hasattr(cls, "__rich_repr__"):
+            auto_rich_repr.__doc__ = "Build a rich repr"
+            cls.__rich_repr__ = auto_rich_repr  # type: ignore[attr-defined]
+
+        auto_repr.__doc__ = "Return repr(self)"
+        cls.__repr__ = auto_repr  # type: ignore[assignment]
+        if angular is not None:
+            cls.__rich_repr__.angular = angular  # type: ignore[attr-defined]
+        return cls
+
+    if cls is None:
+        return partial(do_replace, angular=angular)
+    else:
+        return do_replace(cls, angular=angular)
+
+
+@overload
+def rich_repr(cls: Optional[Type[T]]) -> Type[T]:
+    ...
+
+
+@overload
+def rich_repr(*, angular: bool = False) -> Callable[[Type[T]], Type[T]]:
+    ...
+
+
+def rich_repr(
+    cls: Optional[Type[T]] = None, *, angular: bool = False
+) -> Union[Type[T], Callable[[Type[T]], Type[T]]]:
+    if cls is None:
+        return auto(angular=angular)
+    else:
+        return auto(cls)
+
+
+if __name__ == "__main__":
+
+    @auto
+    class Foo:
+        def __rich_repr__(self) -> Result:
+            yield "foo"
+            yield "bar", {"shopping": ["eggs", "ham", "pineapple"]}
+            yield "buy", "hand sanitizer"
+
+    foo = Foo()
+    from rich.console import Console
+
+    console = Console()
+
+    console.rule("Standard repr")
+    console.print(foo)
+
+    console.print(foo, width=60)
+    console.print(foo, width=30)
+
+    console.rule("Angular repr")
+    Foo.__rich_repr__.angular = True  # type: ignore[attr-defined]
+
+    console.print(foo)
+
+    console.print(foo, width=60)
+    console.print(foo, width=30)
diff --git a/local_packages/rich/rule.py b/local_packages/rich/rule.py
new file mode 100644
index 0000000..fb3d432
--- /dev/null
+++ b/local_packages/rich/rule.py
@@ -0,0 +1,130 @@
+from typing import Union
+
+from .align import AlignMethod
+from .cells import cell_len, set_cell_size
+from .console import Console, ConsoleOptions, RenderResult
+from .jupyter import JupyterMixin
+from .measure import Measurement
+from .style import Style
+from .text import Text
+
+
+class Rule(JupyterMixin):
+    """A console renderable to draw a horizontal rule (line).
+
+    Args:
+        title (Union[str, Text], optional): Text to render in the rule. Defaults to "".
+        characters (str, optional): Character(s) used to draw the line. Defaults to "─".
+        style (StyleType, optional): Style of Rule. Defaults to "rule.line".
+        end (str, optional): Character at end of Rule. defaults to "\\\\n"
+        align (str, optional): How to align the title, one of "left", "center", or "right". Defaults to "center".
+    """
+
+    def __init__(
+        self,
+        title: Union[str, Text] = "",
+        *,
+        characters: str = "─",
+        style: Union[str, Style] = "rule.line",
+        end: str = "\n",
+        align: AlignMethod = "center",
+    ) -> None:
+        if cell_len(characters) < 1:
+            raise ValueError(
+                "'characters' argument must have a cell width of at least 1"
+            )
+        if align not in ("left", "center", "right"):
+            raise ValueError(
+                f'invalid value for align, expected "left", "center", "right" (not {align!r})'
+            )
+        self.title = title
+        self.characters = characters
+        self.style = style
+        self.end = end
+        self.align = align
+
+    def __repr__(self) -> str:
+        return f"Rule({self.title!r}, {self.characters!r})"
+
+    def __rich_console__(
+        self, console: Console, options: ConsoleOptions
+    ) -> RenderResult:
+        width = options.max_width
+
+        characters = (
+            "-"
+            if (options.ascii_only and not self.characters.isascii())
+            else self.characters
+        )
+
+        chars_len = cell_len(characters)
+        if not self.title:
+            yield self._rule_line(chars_len, width)
+            return
+
+        if isinstance(self.title, Text):
+            title_text = self.title
+        else:
+            title_text = console.render_str(self.title, style="rule.text")
+
+        title_text.plain = title_text.plain.replace("\n", " ")
+        title_text.expand_tabs()
+
+        required_space = 4 if self.align == "center" else 2
+        truncate_width = max(0, width - required_space)
+        if not truncate_width:
+            yield self._rule_line(chars_len, width)
+            return
+
+        rule_text = Text(end=self.end)
+        if self.align == "center":
+            title_text.truncate(truncate_width, overflow="ellipsis")
+            side_width = (width - cell_len(title_text.plain)) // 2
+            left = Text(characters * (side_width // chars_len + 1))
+            left.truncate(side_width - 1)
+            right_length = width - cell_len(left.plain) - cell_len(title_text.plain)
+            right = Text(characters * (side_width // chars_len + 1))
+            right.truncate(right_length)
+            rule_text.append(left.plain + " ", self.style)
+            rule_text.append(title_text)
+            rule_text.append(" " + right.plain, self.style)
+        elif self.align == "left":
+            title_text.truncate(truncate_width, overflow="ellipsis")
+            rule_text.append(title_text)
+            rule_text.append(" ")
+            rule_text.append(characters * (width - rule_text.cell_len), self.style)
+        elif self.align == "right":
+            title_text.truncate(truncate_width, overflow="ellipsis")
+            rule_text.append(characters * (width - title_text.cell_len - 1), self.style)
+            rule_text.append(" ")
+            rule_text.append(title_text)
+
+        rule_text.plain = set_cell_size(rule_text.plain, width)
+        yield rule_text
+
+    def _rule_line(self, chars_len: int, width: int) -> Text:
+        rule_text = Text(self.characters * ((width // chars_len) + 1), self.style)
+        rule_text.truncate(width)
+        rule_text.plain = set_cell_size(rule_text.plain, width)
+        return rule_text
+
+    def __rich_measure__(
+        self, console: Console, options: ConsoleOptions
+    ) -> Measurement:
+        return Measurement(1, 1)
+
+
+if __name__ == "__main__":  # pragma: no cover
+    import sys
+
+    from rich.console import Console
+
+    try:
+        text = sys.argv[1]
+    except IndexError:
+        text = "Hello, World"
+    console = Console()
+    console.print(Rule(title=text))
+
+    console = Console()
+    console.print(Rule("foo"), width=4)
diff --git a/local_packages/rich/scope.py b/local_packages/rich/scope.py
new file mode 100644
index 0000000..41d0299
--- /dev/null
+++ b/local_packages/rich/scope.py
@@ -0,0 +1,92 @@
+from collections.abc import Mapping
+from typing import TYPE_CHECKING, Any, Optional, Tuple
+
+from .highlighter import ReprHighlighter
+from .panel import Panel
+from .pretty import Pretty
+from .table import Table
+from .text import Text, TextType
+
+if TYPE_CHECKING:
+    from .console import ConsoleRenderable, OverflowMethod
+
+
+def render_scope(
+    scope: "Mapping[str, Any]",
+    *,
+    title: Optional[TextType] = None,
+    sort_keys: bool = True,
+    indent_guides: bool = False,
+    max_length: Optional[int] = None,
+    max_string: Optional[int] = None,
+    max_depth: Optional[int] = None,
+    overflow: Optional["OverflowMethod"] = None,
+) -> "ConsoleRenderable":
+    """Render python variables in a given scope.
+
+    Args:
+        scope (Mapping): A mapping containing variable names and values.
+        title (str, optional): Optional title. Defaults to None.
+        sort_keys (bool, optional): Enable sorting of items. Defaults to True.
+        indent_guides (bool, optional): Enable indentation guides. Defaults to False.
+        max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
+            Defaults to None.
+        max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to None.
+        max_depth (int, optional): Maximum depths of locals before truncating, or None to disable. Defaults to None.
+        overflow (OverflowMethod, optional): How to handle overflowing locals, or None to disable. Defaults to None.
+
+    Returns:
+        ConsoleRenderable: A renderable object.
+    """
+    highlighter = ReprHighlighter()
+    items_table = Table.grid(padding=(0, 1), expand=False)
+    items_table.add_column(justify="right")
+
+    def sort_items(item: Tuple[str, Any]) -> Tuple[bool, str]:
+        """Sort special variables first, then alphabetically."""
+        key, _ = item
+        return (not key.startswith("__"), key.lower())
+
+    items = sorted(scope.items(), key=sort_items) if sort_keys else scope.items()
+    for key, value in items:
+        key_text = Text.assemble(
+            (key, "scope.key.special" if key.startswith("__") else "scope.key"),
+            (" =", "scope.equals"),
+        )
+        items_table.add_row(
+            key_text,
+            Pretty(
+                value,
+                highlighter=highlighter,
+                indent_guides=indent_guides,
+                max_length=max_length,
+                max_string=max_string,
+                max_depth=max_depth,
+                overflow=overflow,
+            ),
+        )
+    return Panel.fit(
+        items_table,
+        title=title,
+        border_style="scope.border",
+        padding=(0, 1),
+    )
+
+
+if __name__ == "__main__":  # pragma: no cover
+    from rich import print
+
+    print()
+
+    def test(foo: float, bar: float) -> None:
+        list_of_things = [1, 2, 3, None, 4, True, False, "Hello World"]
+        dict_of_things = {
+            "version": "1.1",
+            "method": "confirmFruitPurchase",
+            "params": [["apple", "orange", "mangoes", "pomelo"], 1.123],
+            "id": "194521489",
+        }
+        print(render_scope(locals(), title="[i]locals", sort_keys=False))
+
+    test(20.3423, 3.1427)
+    print()
diff --git a/local_packages/rich/screen.py b/local_packages/rich/screen.py
new file mode 100644
index 0000000..b4f7fd1
--- /dev/null
+++ b/local_packages/rich/screen.py
@@ -0,0 +1,54 @@
+from typing import Optional, TYPE_CHECKING
+
+from .segment import Segment
+from .style import StyleType
+from ._loop import loop_last
+
+
+if TYPE_CHECKING:
+    from .console import (
+        Console,
+        ConsoleOptions,
+        RenderResult,
+        RenderableType,
+        Group,
+    )
+
+
+class Screen:
+    """A renderable that fills the terminal screen and crops excess.
+
+    Args:
+        renderable (RenderableType): Child renderable.
+        style (StyleType, optional): Optional background style. Defaults to None.
+    """
+
+    renderable: "RenderableType"
+
+    def __init__(
+        self,
+        *renderables: "RenderableType",
+        style: Optional[StyleType] = None,
+        application_mode: bool = False,
+    ) -> None:
+        from rich.console import Group
+
+        self.renderable = Group(*renderables)
+        self.style = style
+        self.application_mode = application_mode
+
+    def __rich_console__(
+        self, console: "Console", options: "ConsoleOptions"
+    ) -> "RenderResult":
+        width, height = options.size
+        style = console.get_style(self.style) if self.style else None
+        render_options = options.update(width=width, height=height)
+        lines = console.render_lines(
+            self.renderable or "", render_options, style=style, pad=True
+        )
+        lines = Segment.set_shape(lines, width, height, style=style)
+        new_line = Segment("\n\r") if self.application_mode else Segment.line()
+        for last, line in loop_last(lines):
+            yield from line
+            if not last:
+                yield new_line
diff --git a/local_packages/rich/segment.py b/local_packages/rich/segment.py
new file mode 100644
index 0000000..dd1e8c2
--- /dev/null
+++ b/local_packages/rich/segment.py
@@ -0,0 +1,780 @@
+from enum import IntEnum
+from functools import lru_cache
+from itertools import filterfalse
+from operator import attrgetter
+from typing import (
+    TYPE_CHECKING,
+    Dict,
+    Iterable,
+    List,
+    NamedTuple,
+    Optional,
+    Sequence,
+    Tuple,
+    Type,
+    Union,
+)
+
+from .cells import (
+    _is_single_cell_widths,
+    cached_cell_len,
+    cell_len,
+    get_character_cell_size,
+    set_cell_size,
+)
+from .repr import Result, rich_repr
+from .style import Style
+
+if TYPE_CHECKING:
+    from .console import Console, ConsoleOptions, RenderResult
+
+
+class ControlType(IntEnum):
+    """Non-printable control codes which typically translate to ANSI codes."""
+
+    BELL = 1
+    CARRIAGE_RETURN = 2
+    HOME = 3
+    CLEAR = 4
+    SHOW_CURSOR = 5
+    HIDE_CURSOR = 6
+    ENABLE_ALT_SCREEN = 7
+    DISABLE_ALT_SCREEN = 8
+    CURSOR_UP = 9
+    CURSOR_DOWN = 10
+    CURSOR_FORWARD = 11
+    CURSOR_BACKWARD = 12
+    CURSOR_MOVE_TO_COLUMN = 13
+    CURSOR_MOVE_TO = 14
+    ERASE_IN_LINE = 15
+    SET_WINDOW_TITLE = 16
+
+
+ControlCode = Union[
+    Tuple[ControlType],
+    Tuple[ControlType, Union[int, str]],
+    Tuple[ControlType, int, int],
+]
+
+
+@rich_repr()
+class Segment(NamedTuple):
+    """A piece of text with associated style. Segments are produced by the Console render process and
+    are ultimately converted in to strings to be written to the terminal.
+
+    Args:
+        text (str): A piece of text.
+        style (:class:`~rich.style.Style`, optional): An optional style to apply to the text.
+        control (Tuple[ControlCode], optional): Optional sequence of control codes.
+
+    Attributes:
+        cell_length (int): The cell length of this Segment.
+    """
+
+    text: str
+    style: Optional[Style] = None
+    control: Optional[Sequence[ControlCode]] = None
+
+    @property
+    def cell_length(self) -> int:
+        """The number of terminal cells required to display self.text.
+
+        Returns:
+            int: A number of cells.
+        """
+        text, _style, control = self
+        return 0 if control else cell_len(text)
+
+    def __rich_repr__(self) -> Result:
+        yield self.text
+        if self.control is None:
+            if self.style is not None:
+                yield self.style
+        else:
+            yield self.style
+            yield self.control
+
+    def __bool__(self) -> bool:
+        """Check if the segment contains text."""
+        return bool(self.text)
+
+    @property
+    def is_control(self) -> bool:
+        """Check if the segment contains control codes."""
+        return self.control is not None
+
+    @classmethod
+    @lru_cache(1024 * 16)
+    def _split_cells(cls, segment: "Segment", cut: int) -> Tuple["Segment", "Segment"]:
+        """Split a segment in to two at a given cell position.
+
+        Note that splitting a double-width character, may result in that character turning
+        into two spaces.
+
+        Args:
+            segment (Segment): A segment to split.
+            cut (int): A cell position to cut on.
+
+        Returns:
+            A tuple of two segments.
+        """
+        text, style, control = segment
+        _Segment = Segment
+        cell_length = segment.cell_length
+        if cut >= cell_length:
+            return segment, _Segment("", style, control)
+
+        cell_size = get_character_cell_size
+
+        pos = int((cut / cell_length) * len(text))
+
+        while True:
+            before = text[:pos]
+            cell_pos = cell_len(before)
+            out_by = cell_pos - cut
+            if not out_by:
+                return (
+                    _Segment(before, style, control),
+                    _Segment(text[pos:], style, control),
+                )
+            if out_by == -1 and cell_size(text[pos]) == 2:
+                return (
+                    _Segment(text[:pos] + " ", style, control),
+                    _Segment(" " + text[pos + 1 :], style, control),
+                )
+            if out_by == +1 and cell_size(text[pos - 1]) == 2:
+                return (
+                    _Segment(text[: pos - 1] + " ", style, control),
+                    _Segment(" " + text[pos:], style, control),
+                )
+            if cell_pos < cut:
+                pos += 1
+            else:
+                pos -= 1
+
+    def split_cells(self, cut: int) -> Tuple["Segment", "Segment"]:
+        """Split segment in to two segments at the specified column.
+
+        If the cut point falls in the middle of a 2-cell wide character then it is replaced
+        by two spaces, to preserve the display width of the parent segment.
+
+        Args:
+            cut (int): Offset within the segment to cut.
+
+        Returns:
+            Tuple[Segment, Segment]: Two segments.
+        """
+        text, style, control = self
+        assert cut >= 0
+
+        if _is_single_cell_widths(text):
+            # Fast path with all 1 cell characters
+            if cut >= len(text):
+                return self, Segment("", style, control)
+            return (
+                Segment(text[:cut], style, control),
+                Segment(text[cut:], style, control),
+            )
+
+        return self._split_cells(self, cut)
+
+    @classmethod
+    def line(cls) -> "Segment":
+        """Make a new line segment."""
+        return cls("\n")
+
+    @classmethod
+    def apply_style(
+        cls,
+        segments: Iterable["Segment"],
+        style: Optional[Style] = None,
+        post_style: Optional[Style] = None,
+    ) -> Iterable["Segment"]:
+        """Apply style(s) to an iterable of segments.
+
+        Returns an iterable of segments where the style is replaced by ``style + segment.style + post_style``.
+
+        Args:
+            segments (Iterable[Segment]): Segments to process.
+            style (Style, optional): Base style. Defaults to None.
+            post_style (Style, optional): Style to apply on top of segment style. Defaults to None.
+
+        Returns:
+            Iterable[Segments]: A new iterable of segments (possibly the same iterable).
+        """
+        result_segments = segments
+        if style:
+            apply = style.__add__
+            result_segments = (
+                cls(text, None if control else apply(_style), control)
+                for text, _style, control in result_segments
+            )
+        if post_style:
+            result_segments = (
+                cls(
+                    text,
+                    (
+                        None
+                        if control
+                        else (_style + post_style if _style else post_style)
+                    ),
+                    control,
+                )
+                for text, _style, control in result_segments
+            )
+        return result_segments
+
+    @classmethod
+    def filter_control(
+        cls, segments: Iterable["Segment"], is_control: bool = False
+    ) -> Iterable["Segment"]:
+        """Filter segments by ``is_control`` attribute.
+
+        Args:
+            segments (Iterable[Segment]): An iterable of Segment instances.
+            is_control (bool, optional): is_control flag to match in search.
+
+        Returns:
+            Iterable[Segment]: And iterable of Segment instances.
+
+        """
+        if is_control:
+            return filter(attrgetter("control"), segments)
+        else:
+            return filterfalse(attrgetter("control"), segments)
+
+    @classmethod
+    def split_lines(cls, segments: Iterable["Segment"]) -> Iterable[List["Segment"]]:
+        """Split a sequence of segments in to a list of lines.
+
+        Args:
+            segments (Iterable[Segment]): Segments potentially containing line feeds.
+
+        Yields:
+            Iterable[List[Segment]]: Iterable of segment lists, one per line.
+        """
+        line: List[Segment] = []
+        append = line.append
+
+        for segment in segments:
+            if "\n" in segment.text and not segment.control:
+                text, style, _ = segment
+                while text:
+                    _text, new_line, text = text.partition("\n")
+                    if _text:
+                        append(cls(_text, style))
+                    if new_line:
+                        yield line
+                        line = []
+                        append = line.append
+            else:
+                append(segment)
+        if line:
+            yield line
+
+    @classmethod
+    def split_lines_terminator(
+        cls, segments: Iterable["Segment"]
+    ) -> Iterable[Tuple[List["Segment"], bool]]:
+        """Split a sequence of segments in to a list of lines and a boolean to indicate if there was a new line.
+
+        Args:
+            segments (Iterable[Segment]): Segments potentially containing line feeds.
+
+        Yields:
+            Iterable[List[Segment]]: Iterable of segment lists, one per line.
+        """
+        line: List[Segment] = []
+        append = line.append
+
+        for segment in segments:
+            if "\n" in segment.text and not segment.control:
+                text, style, _ = segment
+                while text:
+                    _text, new_line, text = text.partition("\n")
+                    if _text:
+                        append(cls(_text, style))
+                    if new_line:
+                        yield (line, True)
+                        line = []
+                        append = line.append
+            else:
+                append(segment)
+        if line:
+            yield (line, False)
+
+    @classmethod
+    def split_and_crop_lines(
+        cls,
+        segments: Iterable["Segment"],
+        length: int,
+        style: Optional[Style] = None,
+        pad: bool = True,
+        include_new_lines: bool = True,
+    ) -> Iterable[List["Segment"]]:
+        """Split segments in to lines, and crop lines greater than a given length.
+
+        Args:
+            segments (Iterable[Segment]): An iterable of segments, probably
+                generated from console.render.
+            length (int): Desired line length.
+            style (Style, optional): Style to use for any padding.
+            pad (bool): Enable padding of lines that are less than `length`.
+
+        Returns:
+            Iterable[List[Segment]]: An iterable of lines of segments.
+        """
+        line: List[Segment] = []
+        append = line.append
+
+        adjust_line_length = cls.adjust_line_length
+        new_line_segment = cls("\n")
+
+        for segment in segments:
+            if "\n" in segment.text and not segment.control:
+                text, segment_style, _ = segment
+                while text:
+                    _text, new_line, text = text.partition("\n")
+                    if _text:
+                        append(cls(_text, segment_style))
+                    if new_line:
+                        cropped_line = adjust_line_length(
+                            line, length, style=style, pad=pad
+                        )
+                        if include_new_lines:
+                            cropped_line.append(new_line_segment)
+                        yield cropped_line
+                        line.clear()
+            else:
+                append(segment)
+        if line:
+            yield adjust_line_length(line, length, style=style, pad=pad)
+
+    @classmethod
+    def adjust_line_length(
+        cls,
+        line: List["Segment"],
+        length: int,
+        style: Optional[Style] = None,
+        pad: bool = True,
+    ) -> List["Segment"]:
+        """Adjust a line to a given width (cropping or padding as required).
+
+        Args:
+            segments (Iterable[Segment]): A list of segments in a single line.
+            length (int): The desired width of the line.
+            style (Style, optional): The style of padding if used (space on the end). Defaults to None.
+            pad (bool, optional): Pad lines with spaces if they are shorter than `length`. Defaults to True.
+
+        Returns:
+            List[Segment]: A line of segments with the desired length.
+        """
+        line_length = sum(segment.cell_length for segment in line)
+        new_line: List[Segment]
+
+        if line_length < length:
+            if pad:
+                new_line = line + [cls(" " * (length - line_length), style)]
+            else:
+                new_line = line[:]
+        elif line_length > length:
+            new_line = []
+            append = new_line.append
+            line_length = 0
+            for segment in line:
+                segment_length = segment.cell_length
+                if line_length + segment_length < length or segment.control:
+                    append(segment)
+                    line_length += segment_length
+                else:
+                    text, segment_style, _ = segment
+                    text = set_cell_size(text, length - line_length)
+                    append(cls(text, segment_style))
+                    break
+        else:
+            new_line = line[:]
+        return new_line
+
+    @classmethod
+    def get_line_length(cls, line: List["Segment"]) -> int:
+        """Get the length of list of segments.
+
+        Args:
+            line (List[Segment]): A line encoded as a list of Segments (assumes no '\\\\n' characters),
+
+        Returns:
+            int: The length of the line.
+        """
+        _cell_len = cell_len
+        return sum(_cell_len(text) for text, style, control in line if not control)
+
+    @classmethod
+    def get_shape(cls, lines: List[List["Segment"]]) -> Tuple[int, int]:
+        """Get the shape (enclosing rectangle) of a list of lines.
+
+        Args:
+            lines (List[List[Segment]]): A list of lines (no '\\\\n' characters).
+
+        Returns:
+            Tuple[int, int]: Width and height in characters.
+        """
+        get_line_length = cls.get_line_length
+        max_width = max(get_line_length(line) for line in lines) if lines else 0
+        return (max_width, len(lines))
+
+    @classmethod
+    def set_shape(
+        cls,
+        lines: List[List["Segment"]],
+        width: int,
+        height: Optional[int] = None,
+        style: Optional[Style] = None,
+        new_lines: bool = False,
+    ) -> List[List["Segment"]]:
+        """Set the shape of a list of lines (enclosing rectangle).
+
+        Args:
+            lines (List[List[Segment]]): A list of lines.
+            width (int): Desired width.
+            height (int, optional): Desired height or None for no change.
+            style (Style, optional): Style of any padding added.
+            new_lines (bool, optional): Padded lines should include "\n". Defaults to False.
+
+        Returns:
+            List[List[Segment]]: New list of lines.
+        """
+        _height = height or len(lines)
+
+        blank = (
+            [cls(" " * width + "\n", style)] if new_lines else [cls(" " * width, style)]
+        )
+
+        adjust_line_length = cls.adjust_line_length
+        shaped_lines = lines[:_height]
+        shaped_lines[:] = [
+            adjust_line_length(line, width, style=style) for line in lines
+        ]
+        if len(shaped_lines) < _height:
+            shaped_lines.extend([blank] * (_height - len(shaped_lines)))
+        return shaped_lines
+
+    @classmethod
+    def align_top(
+        cls: Type["Segment"],
+        lines: List[List["Segment"]],
+        width: int,
+        height: int,
+        style: Style,
+        new_lines: bool = False,
+    ) -> List[List["Segment"]]:
+        """Aligns lines to top (adds extra lines to bottom as required).
+
+        Args:
+            lines (List[List[Segment]]): A list of lines.
+            width (int): Desired width.
+            height (int, optional): Desired height or None for no change.
+            style (Style): Style of any padding added.
+            new_lines (bool, optional): Padded lines should include "\n". Defaults to False.
+
+        Returns:
+            List[List[Segment]]: New list of lines.
+        """
+        extra_lines = height - len(lines)
+        if not extra_lines:
+            return lines[:]
+        lines = lines[:height]
+        blank = cls(" " * width + "\n", style) if new_lines else cls(" " * width, style)
+        lines = lines + [[blank]] * extra_lines
+        return lines
+
+    @classmethod
+    def align_bottom(
+        cls: Type["Segment"],
+        lines: List[List["Segment"]],
+        width: int,
+        height: int,
+        style: Style,
+        new_lines: bool = False,
+    ) -> List[List["Segment"]]:
+        """Aligns render to bottom (adds extra lines above as required).
+
+        Args:
+            lines (List[List[Segment]]): A list of lines.
+            width (int): Desired width.
+            height (int, optional): Desired height or None for no change.
+            style (Style): Style of any padding added. Defaults to None.
+            new_lines (bool, optional): Padded lines should include "\n". Defaults to False.
+
+        Returns:
+            List[List[Segment]]: New list of lines.
+        """
+        extra_lines = height - len(lines)
+        if not extra_lines:
+            return lines[:]
+        lines = lines[:height]
+        blank = cls(" " * width + "\n", style) if new_lines else cls(" " * width, style)
+        lines = [[blank]] * extra_lines + lines
+        return lines
+
+    @classmethod
+    def align_middle(
+        cls: Type["Segment"],
+        lines: List[List["Segment"]],
+        width: int,
+        height: int,
+        style: Style,
+        new_lines: bool = False,
+    ) -> List[List["Segment"]]:
+        """Aligns lines to middle (adds extra lines to above and below as required).
+
+        Args:
+            lines (List[List[Segment]]): A list of lines.
+            width (int): Desired width.
+            height (int, optional): Desired height or None for no change.
+            style (Style): Style of any padding added.
+            new_lines (bool, optional): Padded lines should include "\n". Defaults to False.
+
+        Returns:
+            List[List[Segment]]: New list of lines.
+        """
+        extra_lines = height - len(lines)
+        if not extra_lines:
+            return lines[:]
+        lines = lines[:height]
+        blank = cls(" " * width + "\n", style) if new_lines else cls(" " * width, style)
+        top_lines = extra_lines // 2
+        bottom_lines = extra_lines - top_lines
+        lines = [[blank]] * top_lines + lines + [[blank]] * bottom_lines
+        return lines
+
+    @classmethod
+    def simplify(cls, segments: Iterable["Segment"]) -> Iterable["Segment"]:
+        """Simplify an iterable of segments by combining contiguous segments with the same style.
+
+        Args:
+            segments (Iterable[Segment]): An iterable of segments.
+
+        Returns:
+            Iterable[Segment]: A possibly smaller iterable of segments that will render the same way.
+        """
+        iter_segments = iter(segments)
+        try:
+            last_segment = next(iter_segments)
+        except StopIteration:
+            return
+
+        _Segment = Segment
+        for segment in iter_segments:
+            if last_segment.style == segment.style and not segment.control:
+                last_segment = _Segment(
+                    last_segment.text + segment.text, last_segment.style
+                )
+            else:
+                yield last_segment
+                last_segment = segment
+        yield last_segment
+
+    @classmethod
+    def strip_links(cls, segments: Iterable["Segment"]) -> Iterable["Segment"]:
+        """Remove all links from an iterable of styles.
+
+        Args:
+            segments (Iterable[Segment]): An iterable segments.
+
+        Yields:
+            Segment: Segments with link removed.
+        """
+        for segment in segments:
+            if segment.control or segment.style is None:
+                yield segment
+            else:
+                text, style, _control = segment
+                yield cls(text, style.update_link(None) if style else None)
+
+    @classmethod
+    def strip_styles(cls, segments: Iterable["Segment"]) -> Iterable["Segment"]:
+        """Remove all styles from an iterable of segments.
+
+        Args:
+            segments (Iterable[Segment]): An iterable segments.
+
+        Yields:
+            Segment: Segments with styles replace with None
+        """
+        for text, _style, control in segments:
+            yield cls(text, None, control)
+
+    @classmethod
+    def remove_color(cls, segments: Iterable["Segment"]) -> Iterable["Segment"]:
+        """Remove all color from an iterable of segments.
+
+        Args:
+            segments (Iterable[Segment]): An iterable segments.
+
+        Yields:
+            Segment: Segments with colorless style.
+        """
+
+        cache: Dict[Style, Style] = {}
+        for text, style, control in segments:
+            if style:
+                colorless_style = cache.get(style)
+                if colorless_style is None:
+                    colorless_style = style.without_color
+                    cache[style] = colorless_style
+                yield cls(text, colorless_style, control)
+            else:
+                yield cls(text, None, control)
+
+    @classmethod
+    def divide(
+        cls, segments: Iterable["Segment"], cuts: Iterable[int]
+    ) -> Iterable[List["Segment"]]:
+        """Divides an iterable of segments in to portions.
+
+        Args:
+            cuts (Iterable[int]): Cell positions where to divide.
+
+        Yields:
+            [Iterable[List[Segment]]]: An iterable of Segments in List.
+        """
+        split_segments: List["Segment"] = []
+        add_segment = split_segments.append
+
+        iter_cuts = iter(cuts)
+
+        while True:
+            cut = next(iter_cuts, -1)
+            if cut == -1:
+                return
+            if cut != 0:
+                break
+            yield []
+        pos = 0
+
+        segments_clear = split_segments.clear
+        segments_copy = split_segments.copy
+
+        _cell_len = cached_cell_len
+        for segment in segments:
+            text, _style, control = segment
+            while text:
+                end_pos = pos if control else pos + _cell_len(text)
+                if end_pos < cut:
+                    add_segment(segment)
+                    pos = end_pos
+                    break
+
+                if end_pos == cut:
+                    add_segment(segment)
+                    yield segments_copy()
+                    segments_clear()
+                    pos = end_pos
+
+                    cut = next(iter_cuts, -1)
+                    if cut == -1:
+                        if split_segments:
+                            yield segments_copy()
+                        return
+
+                    break
+
+                else:
+                    before, segment = segment.split_cells(cut - pos)
+                    text, _style, control = segment
+                    add_segment(before)
+                    yield segments_copy()
+                    segments_clear()
+                    pos = cut
+
+                cut = next(iter_cuts, -1)
+                if cut == -1:
+                    if split_segments:
+                        yield segments_copy()
+                    return
+
+        yield segments_copy()
+
+
+class Segments:
+    """A simple renderable to render an iterable of segments. This class may be useful if
+    you want to print segments outside of a __rich_console__ method.
+
+    Args:
+        segments (Iterable[Segment]): An iterable of segments.
+        new_lines (bool, optional): Add new lines between segments. Defaults to False.
+    """
+
+    def __init__(self, segments: Iterable[Segment], new_lines: bool = False) -> None:
+        self.segments = list(segments)
+        self.new_lines = new_lines
+
+    def __rich_console__(
+        self, console: "Console", options: "ConsoleOptions"
+    ) -> "RenderResult":
+        if self.new_lines:
+            line = Segment.line()
+            for segment in self.segments:
+                yield segment
+                yield line
+        else:
+            yield from self.segments
+
+
+class SegmentLines:
+    def __init__(self, lines: Iterable[List[Segment]], new_lines: bool = False) -> None:
+        """A simple renderable containing a number of lines of segments. May be used as an intermediate
+        in rendering process.
+
+        Args:
+            lines (Iterable[List[Segment]]): Lists of segments forming lines.
+            new_lines (bool, optional): Insert new lines after each line. Defaults to False.
+        """
+        self.lines = list(lines)
+        self.new_lines = new_lines
+
+    def __rich_console__(
+        self, console: "Console", options: "ConsoleOptions"
+    ) -> "RenderResult":
+        if self.new_lines:
+            new_line = Segment.line()
+            for line in self.lines:
+                yield from line
+                yield new_line
+        else:
+            for line in self.lines:
+                yield from line
+
+
+if __name__ == "__main__":  # pragma: no cover
+    from rich.console import Console
+    from rich.syntax import Syntax
+    from rich.text import Text
+
+    code = """from rich.console import Console
+console = Console()
+text = Text.from_markup("Hello, [bold magenta]World[/]!")
+console.print(text)"""
+
+    text = Text.from_markup("Hello, [bold magenta]World[/]!")
+
+    console = Console()
+
+    console.rule("rich.Segment")
+    console.print(
+        "A Segment is the last step in the Rich render process before generating text with ANSI codes."
+    )
+    console.print("\nConsider the following code:\n")
+    console.print(Syntax(code, "python", line_numbers=True))
+    console.print()
+    console.print(
+        "When you call [b]print()[/b], Rich [i]renders[/i] the object in to the following:\n"
+    )
+    fragments = list(console.render(text))
+    console.print(fragments)
+    console.print()
+    console.print("The Segments are then processed to produce the following output:\n")
+    console.print(text)
+    console.print(
+        "\nYou will only need to know this if you are implementing your own Rich renderables."
+    )
diff --git a/local_packages/rich/spinner.py b/local_packages/rich/spinner.py
new file mode 100644
index 0000000..a3a3caf
--- /dev/null
+++ b/local_packages/rich/spinner.py
@@ -0,0 +1,132 @@
+from typing import TYPE_CHECKING, List, Optional, Union, cast
+
+from ._spinners import SPINNERS
+from .measure import Measurement
+from .table import Table
+from .text import Text
+
+if TYPE_CHECKING:
+    from .console import Console, ConsoleOptions, RenderableType, RenderResult
+    from .style import StyleType
+
+
+class Spinner:
+    """A spinner animation.
+
+    Args:
+        name (str): Name of spinner (run python -m rich.spinner).
+        text (RenderableType, optional): A renderable to display at the right of the spinner (str or Text typically). Defaults to "".
+        style (StyleType, optional): Style for spinner animation. Defaults to None.
+        speed (float, optional): Speed factor for animation. Defaults to 1.0.
+
+    Raises:
+        KeyError: If name isn't one of the supported spinner animations.
+    """
+
+    def __init__(
+        self,
+        name: str,
+        text: "RenderableType" = "",
+        *,
+        style: Optional["StyleType"] = None,
+        speed: float = 1.0,
+    ) -> None:
+        try:
+            spinner = SPINNERS[name]
+        except KeyError:
+            raise KeyError(f"no spinner called {name!r}")
+        self.text: "Union[RenderableType, Text]" = (
+            Text.from_markup(text) if isinstance(text, str) else text
+        )
+        self.name = name
+        self.frames = cast(List[str], spinner["frames"])[:]
+        self.interval = cast(float, spinner["interval"])
+        self.start_time: Optional[float] = None
+        self.style = style
+        self.speed = speed
+        self.frame_no_offset: float = 0.0
+        self._update_speed = 0.0
+
+    def __rich_console__(
+        self, console: "Console", options: "ConsoleOptions"
+    ) -> "RenderResult":
+        yield self.render(console.get_time())
+
+    def __rich_measure__(
+        self, console: "Console", options: "ConsoleOptions"
+    ) -> Measurement:
+        text = self.render(0)
+        return Measurement.get(console, options, text)
+
+    def render(self, time: float) -> "RenderableType":
+        """Render the spinner for a given time.
+
+        Args:
+            time (float): Time in seconds.
+
+        Returns:
+            RenderableType: A renderable containing animation frame.
+        """
+        if self.start_time is None:
+            self.start_time = time
+
+        frame_no = ((time - self.start_time) * self.speed) / (
+            self.interval / 1000.0
+        ) + self.frame_no_offset
+        frame = Text(
+            self.frames[int(frame_no) % len(self.frames)], style=self.style or ""
+        )
+
+        if self._update_speed:
+            self.frame_no_offset = frame_no
+            self.start_time = time
+            self.speed = self._update_speed
+            self._update_speed = 0.0
+
+        if not self.text:
+            return frame
+        elif isinstance(self.text, (str, Text)):
+            return Text.assemble(frame, " ", self.text)
+        else:
+            table = Table.grid(padding=1)
+            table.add_row(frame, self.text)
+            return table
+
+    def update(
+        self,
+        *,
+        text: "RenderableType" = "",
+        style: Optional["StyleType"] = None,
+        speed: Optional[float] = None,
+    ) -> None:
+        """Updates attributes of a spinner after it has been started.
+
+        Args:
+            text (RenderableType, optional): A renderable to display at the right of the spinner (str or Text typically). Defaults to "".
+            style (StyleType, optional): Style for spinner animation. Defaults to None.
+            speed (float, optional): Speed factor for animation. Defaults to None.
+        """
+        if text:
+            self.text = Text.from_markup(text) if isinstance(text, str) else text
+        if style:
+            self.style = style
+        if speed:
+            self._update_speed = speed
+
+
+if __name__ == "__main__":  # pragma: no cover
+    from time import sleep
+
+    from .console import Group
+    from .live import Live
+
+    all_spinners = Group(
+        *[
+            Spinner(spinner_name, text=Text(repr(spinner_name), style="green"))
+            for spinner_name in sorted(SPINNERS.keys())
+        ]
+    )
+
+    with Live(all_spinners, refresh_per_second=20) as live:
+        while True:
+            sleep(0.1)
diff --git a/local_packages/rich/status.py b/local_packages/rich/status.py
new file mode 100644
index 0000000..6574483
--- /dev/null
+++ b/local_packages/rich/status.py
@@ -0,0 +1,131 @@
+from types import TracebackType
+from typing import Optional, Type
+
+from .console import Console, RenderableType
+from .jupyter import JupyterMixin
+from .live import Live
+from .spinner import Spinner
+from .style import StyleType
+
+
+class Status(JupyterMixin):
+    """Displays a status indicator with a 'spinner' animation.
+
+    Args:
+        status (RenderableType): A status renderable (str or Text typically).
+        console (Console, optional): Console instance to use, or None for global console. Defaults to None.
+        spinner (str, optional): Name of spinner animation (see python -m rich.spinner). Defaults to "dots".
+        spinner_style (StyleType, optional): Style of spinner. Defaults to "status.spinner".
+        speed (float, optional): Speed factor for spinner animation. Defaults to 1.0.
+        refresh_per_second (float, optional): Number of refreshes per second. Defaults to 12.5.
+    """
+
+    def __init__(
+        self,
+        status: RenderableType,
+        *,
+        console: Optional[Console] = None,
+        spinner: str = "dots",
+        spinner_style: StyleType = "status.spinner",
+        speed: float = 1.0,
+        refresh_per_second: float = 12.5,
+    ):
+        self.status = status
+        self.spinner_style = spinner_style
+        self.speed = speed
+        self._spinner = Spinner(spinner, text=status, style=spinner_style, speed=speed)
+        self._live = Live(
+            self.renderable,
+            console=console,
+            refresh_per_second=refresh_per_second,
+            transient=True,
+        )
+
+    @property
+    def renderable(self) -> Spinner:
+        return self._spinner
+
+    @property
+    def console(self) -> "Console":
+        """Get the Console used by the Status objects."""
+        return self._live.console
+
+    def update(
+        self,
+        status: Optional[RenderableType] = None,
+        *,
+        spinner: Optional[str] = None,
+        spinner_style: Optional[StyleType] = None,
+        speed: Optional[float] = None,
+    ) -> None:
+        """Update status.
+
+        Args:
+            status (Optional[RenderableType], optional): New status renderable or None for no change. Defaults to None.
+            spinner (Optional[str], optional): New spinner or None for no change. Defaults to None.
+            spinner_style (Optional[StyleType], optional): New spinner style or None for no change. Defaults to None.
+            speed (Optional[float], optional): Speed factor for spinner animation or None for no change. Defaults to None.
+        """
+        if status is not None:
+            self.status = status
+        if spinner_style is not None:
+            self.spinner_style = spinner_style
+        if speed is not None:
+            self.speed = speed
+        if spinner is not None:
+            self._spinner = Spinner(
+                spinner, text=self.status, style=self.spinner_style, speed=self.speed
+            )
+            self._live.update(self.renderable, refresh=True)
+        else:
+            self._spinner.update(
+                text=self.status, style=self.spinner_style, speed=self.speed
+            )
+
+    def start(self) -> None:
+        """Start the status animation."""
+        self._live.start()
+
+    def stop(self) -> None:
+        """Stop the spinner animation."""
+        self._live.stop()
+
+    def __rich__(self) -> RenderableType:
+        return self.renderable
+
+    def __enter__(self) -> "Status":
+        self.start()
+        return self
+
+    def __exit__(
+        self,
+        exc_type: Optional[Type[BaseException]],
+        exc_val: Optional[BaseException],
+        exc_tb: Optional[TracebackType],
+    ) -> None:
+        self.stop()
+
+
+if __name__ == "__main__":  # pragma: no cover
+    from time import sleep
+
+    from .console import Console
+
+    console = Console()
+    with console.status("[magenta]Covid detector booting up") as status:
+        sleep(3)
+        console.log("Importing advanced AI")
+        sleep(3)
+        console.log("Advanced Covid AI Ready")
+        sleep(3)
+        status.update(status="[bold blue] Scanning for Covid", spinner="earth")
+        sleep(3)
+        console.log("Found 10,000,000,000 copies of Covid32.exe")
+        sleep(3)
+        status.update(
+            status="[bold red]Moving Covid32.exe to Trash",
+            spinner="bouncingBall",
+            spinner_style="yellow",
+        )
+        sleep(5)
+    console.print("[bold green]Covid deleted successfully")
diff --git a/local_packages/rich/style.py b/local_packages/rich/style.py
new file mode 100644
index 0000000..3806a8c
--- /dev/null
+++ b/local_packages/rich/style.py
@@ -0,0 +1,796 @@
+import sys
+from functools import lru_cache
+from itertools import count
+from operator import attrgetter
+from pickle import dumps, loads
+from random import getrandbits
+from typing import Any, Dict, Iterable, List, Optional, Type, Union, cast
+
+from . import errors
+from .color import Color, ColorParseError, ColorSystem, blend_rgb
+from .repr import Result, rich_repr
+from .terminal_theme import DEFAULT_TERMINAL_THEME, TerminalTheme
+
+_hash_getter = attrgetter(
+    "_color", "_bgcolor", "_attributes", "_set_attributes", "_link", "_meta"
+)
+
+# Style instances and style definitions are often interchangeable
+StyleType = Union[str, "Style"]
+
+
+_id_generator = count(getrandbits(24))
+
+
+class _Bit:
+    """A descriptor to get/set a style attribute bit."""
+
+    __slots__ = ["bit"]
+
+    def __init__(self, bit_no: int) -> None:
+        self.bit = 1 << bit_no
+
+    def __get__(self, obj: "Style", objtype: Type["Style"]) -> Optional[bool]:
+        if obj._set_attributes & self.bit:
+            return obj._attributes & self.bit != 0
+        return None
+
+
+@rich_repr
+class Style:
+    """A terminal style.
+
+    A terminal style consists of a color (`color`), a background color (`bgcolor`), and a number of attributes, such
+    as bold, italic etc. The attributes have 3 states: they can either be on
+    (``True``), off (``False``), or not set (``None``).
+
+    Args:
+        color (Union[Color, str], optional): Color of terminal text. Defaults to None.
+        bgcolor (Union[Color, str], optional): Color of terminal background. Defaults to None.
+        bold (bool, optional): Enable bold text. Defaults to None.
+        dim (bool, optional): Enable dim text. Defaults to None.
+        italic (bool, optional): Enable italic text. Defaults to None.
+        underline (bool, optional): Enable underlined text. Defaults to None.
+        blink (bool, optional): Enabled blinking text. Defaults to None.
+        blink2 (bool, optional): Enable fast blinking text. Defaults to None.
+        reverse (bool, optional): Enabled reverse text. Defaults to None.
+        conceal (bool, optional): Enable concealed text. Defaults to None.
+        strike (bool, optional): Enable strikethrough text. Defaults to None.
+        underline2 (bool, optional): Enable doubly underlined text. Defaults to None.
+        frame (bool, optional): Enable framed text. Defaults to None.
+        encircle (bool, optional): Enable encircled text. Defaults to None.
+        overline (bool, optional): Enable overlined text. Defaults to None.
+        link (str, link): Link URL. Defaults to None.
+
+    """
+
+    _color: Optional[Color]
+    _bgcolor: Optional[Color]
+    _attributes: int
+    _set_attributes: int
+    _hash: Optional[int]
+    _null: bool
+    _meta: Optional[bytes]
+
+    __slots__ = [
+        "_color",
+        "_bgcolor",
+        "_attributes",
+        "_set_attributes",
+        "_link",
+        "_link_id",
+        "_ansi",
+        "_style_definition",
+        "_hash",
+        "_null",
+        "_meta",
+    ]
+
+    # maps bits on to SGR parameter
+    _style_map = {
+        0: "1",
+        1: "2",
+        2: "3",
+        3: "4",
+        4: "5",
+        5: "6",
+        6: "7",
+        7: "8",
+        8: "9",
+        9: "21",
+        10: "51",
+        11: "52",
+        12: "53",
+    }
+
+    STYLE_ATTRIBUTES = {
+        "dim": "dim",
+        "d": "dim",
+        "bold": "bold",
+        "b": "bold",
+        "italic": "italic",
+        "i": "italic",
+        "underline": "underline",
+        "u": "underline",
+        "blink": "blink",
+        "blink2": "blink2",
+        "reverse": "reverse",
+        "r": "reverse",
+        "conceal": "conceal",
+        "c": "conceal",
+        "strike": "strike",
+        "s": "strike",
+        "underline2": "underline2",
+        "uu": "underline2",
+        "frame": "frame",
+        "encircle": "encircle",
+        "overline": "overline",
+        "o": "overline",
+    }
+
+    def __init__(
+        self,
+        *,
+        color: Optional[Union[Color, str]] = None,
+        bgcolor: Optional[Union[Color, str]] = None,
+        bold: Optional[bool] = None,
+        dim: Optional[bool] = None,
+        italic: Optional[bool] = None,
+        underline: Optional[bool] = None,
+        blink: Optional[bool] = None,
+        blink2: Optional[bool] = None,
+        reverse: Optional[bool] = None,
+        conceal: Optional[bool] = None,
+        strike: Optional[bool] = None,
+        underline2: Optional[bool] = None,
+        frame: Optional[bool] = None,
+        encircle: Optional[bool] = None,
+        overline: Optional[bool] = None,
+        link: Optional[str] = None,
+        meta: Optional[Dict[str, Any]] = None,
+    ):
+        self._ansi: Optional[str] = None
+        self._style_definition: Optional[str] = None
+
+        def _make_color(color: Union[Color, str]) -> Color:
+            return color if isinstance(color, Color) else Color.parse(color)
+
+        self._color = None if color is None else _make_color(color)
+        self._bgcolor = None if bgcolor is None else _make_color(bgcolor)
+        self._set_attributes = sum(
+            (
+                bold is not None,
+                dim is not None and 2,
+                italic is not None and 4,
+                underline is not None and 8,
+                blink is not None and 16,
+                blink2 is not None and 32,
+                reverse is not None and 64,
+                conceal is not None and 128,
+                strike is not None and 256,
+                underline2 is not None and 512,
+                frame is not None and 1024,
+                encircle is not None and 2048,
+                overline is not None and 4096,
+            )
+        )
+        self._attributes = (
+            sum(
+                (
+                    bold and 1 or 0,
+                    dim and 2 or 0,
+                    italic and 4 or 0,
+                    underline and 8 or 0,
+                    blink and 16 or 0,
+                    blink2 and 32 or 0,
+                    reverse and 64 or 0,
+                    conceal and 128 or 0,
+                    strike and 256 or 0,
+                    underline2 and 512 or 0,
+                    frame and 1024 or 0,
+                    encircle and 2048 or 0,
+                    overline and 4096 or 0,
+                )
+            )
+            if self._set_attributes
+            else 0
+        )
+
+        self._link = link
+        self._meta = None if meta is None else dumps(meta)
+        self._link_id = (
+            f"{next(_id_generator)}{hash(self._meta)}" if (link or meta) else ""
+        )
+        self._hash: Optional[int] = None
+        self._null = not (self._set_attributes or color or bgcolor or link or meta)
+
+    @classmethod
+    def null(cls) -> "Style":
+        """Create an 'null' style, equivalent to Style(), but more performant."""
+        return NULL_STYLE
+
+    @classmethod
+    def from_color(
+        cls, color: Optional[Color] = None, bgcolor: Optional[Color] = None
+    ) -> "Style":
+        """Create a new style with colors and no attributes.
+
+        Returns:
+            color (Optional[Color]): A (foreground) color, or None for no color. Defaults to None.
+            bgcolor (Optional[Color]): A (background) color, or None for no color. Defaults to None.
+        """
+        style: Style = cls.__new__(Style)
+        style._ansi = None
+        style._style_definition = None
+        style._color = color
+        style._bgcolor = bgcolor
+        style._set_attributes = 0
+        style._attributes = 0
+        style._link = None
+        style._link_id = ""
+        style._meta = None
+        style._null = not (color or bgcolor)
+        style._hash = None
+        return style
+
+    @classmethod
+    def from_meta(cls, meta: Optional[Dict[str, Any]]) -> "Style":
+        """Create a new style with meta data.
+
+        Returns:
+            meta (Optional[Dict[str, Any]]): A dictionary of meta data. Defaults to None.
+        """
+        style: Style = cls.__new__(Style)
+        style._ansi = None
+        style._style_definition = None
+        style._color = None
+        style._bgcolor = None
+        style._set_attributes = 0
+        style._attributes = 0
+        style._link = None
+        style._meta = dumps(meta)
+        style._link_id = f"{next(_id_generator)}{hash(style._meta)}"
+        style._hash = None
+        style._null = not (meta)
+        return style
+
+    @classmethod
+    def on(cls, meta: Optional[Dict[str, Any]] = None, **handlers: Any) -> "Style":
+        """Create a blank style with meta information.
+
+        Example:
+            style = Style.on(click=self.on_click)
+
+        Args:
+            meta (Optional[Dict[str, Any]], optional): An optional dict of meta information.
+            **handlers (Any): Keyword arguments are translated in to handlers.
+
+        Returns:
+            Style: A Style with meta information attached.
+        """
+        meta = {} if meta is None else meta
+        meta.update({f"@{key}": value for key, value in handlers.items()})
+        return cls.from_meta(meta)
+
+    bold = _Bit(0)
+    dim = _Bit(1)
+    italic = _Bit(2)
+    underline = _Bit(3)
+    blink = _Bit(4)
+    blink2 = _Bit(5)
+    reverse = _Bit(6)
+    conceal = _Bit(7)
+    strike = _Bit(8)
+    underline2 = _Bit(9)
+    frame = _Bit(10)
+    encircle = _Bit(11)
+    overline = _Bit(12)
+
+    @property
+    def link_id(self) -> str:
+        """Get a link id, used in ansi code for links."""
+        return self._link_id
+
+    def __str__(self) -> str:
+        """Re-generate style definition from attributes."""
+        if self._style_definition is None:
+            attributes: List[str] = []
+            append = attributes.append
+            bits = self._set_attributes
+            if bits & 0b0000000001111:
+                if bits & 1:
+                    append("bold" if self.bold else "not bold")
+                if bits & (1 << 1):
+                    append("dim" if self.dim else "not dim")
+                if bits & (1 << 2):
+                    append("italic" if self.italic else "not italic")
+                if bits & (1 << 3):
+                    append("underline" if self.underline else "not underline")
+            if bits & 0b0000111110000:
+                if bits & (1 << 4):
+                    append("blink" if self.blink else "not blink")
+                if bits & (1 << 5):
+                    append("blink2" if self.blink2 else "not blink2")
+                if bits & (1 << 6):
+                    append("reverse" if self.reverse else "not reverse")
+                if bits & (1 << 7):
+                    append("conceal" if self.conceal else "not conceal")
+                if bits & (1 << 8):
+                    append("strike" if self.strike else "not strike")
+            if bits & 0b1111000000000:
+                if bits & (1 << 9):
+                    append("underline2" if self.underline2 else "not underline2")
+                if bits & (1 << 10):
+                    append("frame" if self.frame else "not frame")
+                if bits & (1 << 11):
+                    append("encircle" if self.encircle else "not encircle")
+                if bits & (1 << 12):
+                    append("overline" if self.overline else "not overline")
+            if self._color is not None:
+                append(self._color.name)
+            if self._bgcolor is not None:
+                append("on")
+                append(self._bgcolor.name)
+            if self._link:
+                append("link")
+                append(self._link)
+            self._style_definition = " ".join(attributes) or "none"
+        return self._style_definition
+
+    def __bool__(self) -> bool:
+        """A Style is false if it has no attributes, colors, or links."""
+        return not self._null
+
+    def _make_ansi_codes(self, color_system: ColorSystem) -> str:
+        """Generate ANSI codes for this style.
+
+        Args:
+            color_system (ColorSystem): Color system.
+
+        Returns:
+            str: String containing codes.
+        """
+
+        if self._ansi is None:
+            sgr: List[str] = []
+            append = sgr.append
+            _style_map = self._style_map
+            attributes = self._attributes & self._set_attributes
+            if attributes:
+                if attributes & 1:
+                    append(_style_map[0])
+                if attributes & 2:
+                    append(_style_map[1])
+                if attributes & 4:
+                    append(_style_map[2])
+                if attributes & 8:
+                    append(_style_map[3])
+                if attributes & 0b0000111110000:
+                    for bit in range(4, 9):
+                        if attributes & (1 << bit):
+                            append(_style_map[bit])
+                if attributes & 0b1111000000000:
+                    for bit in range(9, 13):
+                        if attributes & (1 << bit):
+                            append(_style_map[bit])
+            if self._color is not None:
+                sgr.extend(self._color.downgrade(color_system).get_ansi_codes())
+            if self._bgcolor is not None:
+                sgr.extend(
+                    self._bgcolor.downgrade(color_system).get_ansi_codes(
+                        foreground=False
+                    )
+                )
+            self._ansi = ";".join(sgr)
+        return self._ansi
+
+    @classmethod
+    @lru_cache(maxsize=1024)
+    def normalize(cls, style: str) -> str:
+        """Normalize a style definition so that styles with the same effect have the same string
+        representation.
+
+        Args:
+            style (str): A style definition.
+
+        Returns:
+            str: Normal form of style definition.
+        """
+        try:
+            return str(cls.parse(style))
+        except errors.StyleSyntaxError:
+            return style.strip().lower()
+
+    @classmethod
+    def pick_first(cls, *values: Optional[StyleType]) -> StyleType:
+        """Pick first non-None style."""
+        for value in values:
+            if value is not None:
+                return value
+        raise ValueError("expected at least one non-None style")
+
+    def __rich_repr__(self) -> Result:
+        yield "color", self.color, None
+        yield "bgcolor", self.bgcolor, None
+        yield "bold", self.bold, None,
+        yield "dim", self.dim, None,
+        yield "italic", self.italic, None
+        yield "underline", self.underline, None,
+        yield "blink", self.blink, None
+        yield "blink2", self.blink2, None
+        yield "reverse", self.reverse, None
+        yield "conceal", self.conceal, None
+        yield "strike", self.strike, None
+        yield "underline2", self.underline2, None
+        yield "frame", self.frame, None
+        yield "encircle", self.encircle, None
+        yield "link", self.link, None
+        if self._meta:
+            yield "meta", self.meta
+
+    def __eq__(self, other: Any) -> bool:
+        if not isinstance(other, Style):
+            return NotImplemented
+        return self.__hash__() == other.__hash__()
+
+    def __ne__(self, other: Any) -> bool:
+        if not isinstance(other, Style):
+            return NotImplemented
+        return self.__hash__() != other.__hash__()
+
+    def __hash__(self) -> int:
+        if self._hash is not None:
+            return self._hash
+        self._hash = hash(_hash_getter(self))
+        return self._hash
+
+    @property
+    def color(self) -> Optional[Color]:
+        """The foreground color or None if it is not set."""
+        return self._color
+
+    @property
+    def bgcolor(self) -> Optional[Color]:
+        """The background color or None if it is not set."""
+        return self._bgcolor
+
+    @property
+    def link(self) -> Optional[str]:
+        """Link text, if set."""
+        return self._link
+
+    @property
+    def transparent_background(self) -> bool:
+        """Check if the style specified a transparent background."""
+        return self.bgcolor is None or self.bgcolor.is_default
+
+    @property
+    def background_style(self) -> "Style":
+        """A Style with background only."""
+        return Style(bgcolor=self.bgcolor)
+
+    @property
+    def meta(self) -> Dict[str, Any]:
+        """Get meta information (can not be changed after construction)."""
+        return {} if self._meta is None else cast(Dict[str, Any], loads(self._meta))
+
+    @property
+    def without_color(self) -> "Style":
+        """Get a copy of the style with color removed."""
+        if self._null:
+            return NULL_STYLE
+        style: Style = self.__new__(Style)
+        style._ansi = None
+        style._style_definition = None
+        style._color = None
+        style._bgcolor = None
+        style._attributes = self._attributes
+        style._set_attributes = self._set_attributes
+        style._link = self._link
+        style._link_id = f"{next(_id_generator)}" if self._link else ""
+        style._null = False
+        style._meta = None
+        style._hash = None
+        return style
+
+    @classmethod
+    @lru_cache(maxsize=4096)
+    def parse(cls, style_definition: str) -> "Style":
+        """Parse a style definition.
+
+        Args:
+            style_definition (str): A string containing a style.
+
+        Raises:
+            errors.StyleSyntaxError: If the style definition syntax is invalid.
+
+        Returns:
+            `Style`: A Style instance.
+        """
+        if style_definition.strip() == "none" or not style_definition:
+            return cls.null()
+
+        STYLE_ATTRIBUTES = cls.STYLE_ATTRIBUTES
+        color: Optional[str] = None
+        bgcolor: Optional[str] = None
+        attributes: Dict[str, Optional[Any]] = {}
+        link: Optional[str] = None
+
+        words = iter(style_definition.split())
+        for original_word in words:
+            word = original_word.lower()
+            if word == "on":
+                word = next(words, "")
+                if not word:
+                    raise errors.StyleSyntaxError("color expected after 'on'")
+                try:
+                    Color.parse(word)
+                except ColorParseError as error:
+                    raise errors.StyleSyntaxError(
+                        f"unable to parse {word!r} as background color; {error}"
+                    ) from None
+                bgcolor = word
+
+            elif word == "not":
+                word = next(words, "")
+                attribute = STYLE_ATTRIBUTES.get(word)
+                if attribute is None:
+                    raise errors.StyleSyntaxError(
+                        f"expected style attribute after 'not', found {word!r}"
+                    )
+                attributes[attribute] = False
+
+            elif word == "link":
+                word = next(words, "")
+                if not word:
+                    raise errors.StyleSyntaxError("URL expected after 'link'")
+                link = word
+
+            elif word in STYLE_ATTRIBUTES:
+                attributes[STYLE_ATTRIBUTES[word]] = True
+
+            else:
+                try:
+                    Color.parse(word)
+                except ColorParseError as error:
+                    raise errors.StyleSyntaxError(
+                        f"unable to parse {word!r} as color; {error}"
+                    ) from None
+                color = word
+        style = Style(color=color, bgcolor=bgcolor, link=link, **attributes)
+        return style
+
+    @lru_cache(maxsize=1024)
+    def get_html_style(self, theme: Optional[TerminalTheme] = None) -> str:
+        """Get a CSS style rule."""
+        theme = theme or DEFAULT_TERMINAL_THEME
+        css: List[str] = []
+        append = css.append
+
+        color = self.color
+        bgcolor = self.bgcolor
+        if self.reverse:
+            color, bgcolor = bgcolor, color
+        if self.dim:
+            foreground_color = (
+                theme.foreground_color if color is None else color.get_truecolor(theme)
+            )
+            color = Color.from_triplet(
+                blend_rgb(foreground_color, theme.background_color, 0.5)
+            )
+        if color is not None:
+            theme_color = color.get_truecolor(theme)
+            append(f"color: {theme_color.hex}")
+            append(f"text-decoration-color: {theme_color.hex}")
+        if bgcolor is not None:
+            theme_color = bgcolor.get_truecolor(theme, foreground=False)
+            append(f"background-color: {theme_color.hex}")
+        if self.bold:
+            append("font-weight: bold")
+        if self.italic:
+            append("font-style: italic")
+        if self.underline:
+            append("text-decoration: underline")
+        if self.strike:
+            append("text-decoration: line-through")
+        if self.overline:
+            append("text-decoration: overline")
+        return "; ".join(css)
+
+    @classmethod
+    def combine(cls, styles: Iterable["Style"]) -> "Style":
+        """Combine styles and get result.
+
+        Args:
+            styles (Iterable[Style]): Styles to combine.
+
+        Returns:
+            Style: A new style instance.
+        """
+        iter_styles = iter(styles)
+        return sum(iter_styles, next(iter_styles))
+
+    @classmethod
+    def chain(cls, *styles: "Style") -> "Style":
+        """Combine styles from positional argument in to a single style.
+
+        Args:
+            *styles (Iterable[Style]): Styles to combine.
+
+        Returns:
+            Style: A new style instance.
+        """
+        iter_styles = iter(styles)
+        return sum(iter_styles, next(iter_styles))
+
+    def copy(self) -> "Style":
+        """Get a copy of this style.
+
+        Returns:
+            Style: A new Style instance with identical attributes.
+        """
+        if self._null:
+            return NULL_STYLE
+        style: Style = self.__new__(Style)
+        style._ansi = self._ansi
+        style._style_definition = self._style_definition
+        style._color = self._color
+        style._bgcolor = self._bgcolor
+        style._attributes = self._attributes
+        style._set_attributes = self._set_attributes
+        style._link = self._link
+        style._link_id = f"{next(_id_generator)}" if self._link else ""
+        style._hash = self._hash
+        style._null = False
+        style._meta = self._meta
+        return style
+
+    @lru_cache(maxsize=128)
+    def clear_meta_and_links(self) -> "Style":
+        """Get a copy of this style with link and meta information removed.
+
+        Returns:
+            Style: New style object.
+        """
+        if self._null:
+            return NULL_STYLE
+        style: Style = self.__new__(Style)
+        style._ansi = self._ansi
+        style._style_definition = self._style_definition
+        style._color = self._color
+        style._bgcolor = self._bgcolor
+        style._attributes = self._attributes
+        style._set_attributes = self._set_attributes
+        style._link = None
+        style._link_id = ""
+        style._hash = None
+        style._null = False
+        style._meta = None
+        return style
+
+    def update_link(self, link: Optional[str] = None) -> "Style":
+        """Get a copy with a different value for link.
+
+        Args:
+            link (str, optional): New value for link. Defaults to None.
+
+        Returns:
+            Style: A new Style instance.
+        """
+        style: Style = self.__new__(Style)
+        style._ansi = self._ansi
+        style._style_definition = self._style_definition
+        style._color = self._color
+        style._bgcolor = self._bgcolor
+        style._attributes = self._attributes
+        style._set_attributes = self._set_attributes
+        style._link = link
+        style._link_id = f"{next(_id_generator)}" if link else ""
+        style._hash = None
+        style._null = False
+        style._meta = self._meta
+        return style
+
+    def render(
+        self,
+        text: str = "",
+        *,
+        color_system: Optional[ColorSystem] = ColorSystem.TRUECOLOR,
+        legacy_windows: bool = False,
+    ) -> str:
+        """Render the ANSI codes for the style.
+
+        Args:
+            text (str, optional): A string to style. Defaults to "".
+            color_system (Optional[ColorSystem], optional): Color system to render to. Defaults to ColorSystem.TRUECOLOR.
+
+        Returns:
+            str: A string containing ANSI style codes.
+        """
+        if not text or color_system is None:
+            return text
+        attrs = self._ansi or self._make_ansi_codes(color_system)
+        rendered = f"\x1b[{attrs}m{text}\x1b[0m" if attrs else text
+        if self._link and not legacy_windows:
+            rendered = (
+                f"\x1b]8;id={self._link_id};{self._link}\x1b\\{rendered}\x1b]8;;\x1b\\"
+            )
+        return rendered
+
+    def test(self, text: Optional[str] = None) -> None:
+        """Write text with style directly to terminal.
+
+        This method is for testing purposes only.
+
+        Args:
+            text (Optional[str], optional): Text to style or None for style name.
+
+        """
+        text = text or str(self)
+        sys.stdout.write(f"{self.render(text)}\n")
+
+    @lru_cache(maxsize=1024)
+    def _add(self, style: Optional["Style"]) -> "Style":
+        if style is None or style._null:
+            return self
+        if self._null:
+            return style
+        new_style: Style = self.__new__(Style)
+        new_style._ansi = None
+        new_style._style_definition = None
+        new_style._color = style._color or self._color
+        new_style._bgcolor = style._bgcolor or self._bgcolor
+        new_style._attributes = (self._attributes & ~style._set_attributes) | (
+            style._attributes & style._set_attributes
+        )
+        new_style._set_attributes = self._set_attributes | style._set_attributes
+        new_style._link = style._link or self._link
+        new_style._link_id = style._link_id or self._link_id
+        new_style._null = style._null
+        if self._meta and style._meta:
+            new_style._meta = dumps({**self.meta, **style.meta})
+        else:
+            new_style._meta = self._meta or style._meta
+        new_style._hash = None
+        return new_style
+
+    def __add__(self, style: Optional["Style"]) -> "Style":
+        combined_style = self._add(style)
+        return combined_style.copy() if combined_style.link else combined_style
+
+
+NULL_STYLE = Style()
+
+
+class StyleStack:
+    """A stack of styles."""
+
+    __slots__ = ["_stack"]
+
+    def __init__(self, default_style: "Style") -> None:
+        self._stack: List[Style] = [default_style]
+
+    def __repr__(self) -> str:
+        return f""
+
+    @property
+    def current(self) -> Style:
+        """Get the Style at the top of the stack."""
+        return self._stack[-1]
+
+    def push(self, style: Style) -> None:
+        """Push a new style on to the stack.
+
+        Args:
+            style (Style): New style to combine with current style.
+        """
+        self._stack.append(self._stack[-1] + style)
+
+    def pop(self) -> Style:
+        """Pop last style and discard.
+
+        Returns:
+            Style: New current style (also available as stack.current)
+        """
+        self._stack.pop()
+        return self._stack[-1]
diff --git a/local_packages/rich/styled.py b/local_packages/rich/styled.py
new file mode 100644
index 0000000..27243be
--- /dev/null
+++ b/local_packages/rich/styled.py
@@ -0,0 +1,42 @@
+from typing import TYPE_CHECKING
+
+from .measure import Measurement
+from .segment import Segment
+from .style import StyleType
+
+if TYPE_CHECKING:
+    from .console import Console, ConsoleOptions, RenderResult, RenderableType
+
+
+class Styled:
+    """Apply a style to a renderable.
+
+    Args:
+        renderable (RenderableType): Any renderable.
+        style (StyleType): A style to apply across the entire renderable.
+    """
+
+    def __init__(self, renderable: "RenderableType", style: "StyleType") -> None:
+        self.renderable = renderable
+        self.style = style
+
+    def __rich_console__(
+        self, console: "Console", options: "ConsoleOptions"
+    ) -> "RenderResult":
+        style = console.get_style(self.style)
+        rendered_segments = console.render(self.renderable, options)
+        segments = Segment.apply_style(rendered_segments, style)
+        return segments
+
+    def __rich_measure__(
+        self, console: "Console", options: "ConsoleOptions"
+    ) -> Measurement:
+        return Measurement.get(console, options, self.renderable)
+
+
+if __name__ == "__main__":  # pragma: no cover
+    from rich import print
+    from rich.panel import Panel
+
+    panel = Styled(Panel("hello"), "on blue")
+    print(panel)
diff --git a/local_packages/rich/syntax.py b/local_packages/rich/syntax.py
new file mode 100644
index 0000000..8c8f831
--- /dev/null
+++ b/local_packages/rich/syntax.py
@@ -0,0 +1,988 @@
+from __future__ import annotations
+
+import os.path
+import re
+import sys
+import textwrap
+from abc import ABC, abstractmethod
+from pathlib import Path
+from typing import (
+    TYPE_CHECKING,
+    Any,
+    Dict,
+    Iterable,
+    List,
+    NamedTuple,
+    Optional,
+    Sequence,
+    Set,
+    Tuple,
+    Type,
+    Union,
+)
+
+from pygments.lexer import Lexer
+from pygments.lexers import get_lexer_by_name, guess_lexer_for_filename
+from pygments.style import Style as PygmentsStyle
+from pygments.styles import get_style_by_name
+from pygments.token import (
+    Comment,
+    Error,
+    Generic,
+    Keyword,
+    Name,
+    Number,
+    Operator,
+    String,
+    Token,
+    Whitespace,
+)
+from pygments.util import ClassNotFound
+
+if TYPE_CHECKING:
+    from .console import Console, ConsoleOptions, JustifyMethod, RenderResult
+
+from rich.containers import Lines
+from rich.padding import Padding, PaddingDimensions
+
+from ._loop import loop_first
+from .cells import cell_len
+from .color import Color, blend_rgb
+from .jupyter import JupyterMixin
+from .measure import Measurement
+from .segment import Segment, Segments
+from .style import Style, StyleType
+from .text import Text
+
+TokenType = Tuple[str, ...]
+
+WINDOWS = sys.platform == "win32"
+DEFAULT_THEME = "monokai"
+
+# The following styles are based on https://github.com/pygments/pygments/blob/master/pygments/formatters/terminal.py
+# A few modifications were made
+
+ANSI_LIGHT: Dict[TokenType, Style] = {
+    Token: Style(),
+    Whitespace: Style(color="white"),
+    Comment: Style(dim=True),
+    Comment.Preproc: Style(color="cyan"),
+    Keyword: Style(color="blue"),
+    Keyword.Type: Style(color="cyan"),
+    Operator.Word: Style(color="magenta"),
+    Name.Builtin: Style(color="cyan"),
+    Name.Function: Style(color="green"),
+    Name.Namespace: Style(color="cyan", underline=True),
+    Name.Class: Style(color="green", underline=True),
+    Name.Exception: Style(color="cyan"),
+    Name.Decorator: Style(color="magenta", bold=True),
+    Name.Variable: Style(color="red"),
+    Name.Constant: Style(color="red"),
+    Name.Attribute: Style(color="cyan"),
+    Name.Tag: Style(color="bright_blue"),
+    String: Style(color="yellow"),
+    Number: Style(color="blue"),
+    Generic.Deleted: Style(color="bright_red"),
+    Generic.Inserted: Style(color="green"),
+    Generic.Heading: Style(bold=True),
+    Generic.Subheading: Style(color="magenta", bold=True),
+    Generic.Prompt: Style(bold=True),
+    Generic.Error: Style(color="bright_red"),
+    Error: Style(color="red", underline=True),
+}
+
+ANSI_DARK: Dict[TokenType, Style] = {
+    Token: Style(),
+    Whitespace: Style(color="bright_black"),
+    Comment: Style(dim=True),
+    Comment.Preproc: Style(color="bright_cyan"),
+    Keyword: Style(color="bright_blue"),
+    Keyword.Type: Style(color="bright_cyan"),
+    Operator.Word: Style(color="bright_magenta"),
+    Name.Builtin: Style(color="bright_cyan"),
+    Name.Function: Style(color="bright_green"),
+    Name.Namespace: Style(color="bright_cyan", underline=True),
+    Name.Class: Style(color="bright_green", underline=True),
+    Name.Exception: Style(color="bright_cyan"),
+    Name.Decorator: Style(color="bright_magenta", bold=True),
+    Name.Variable: Style(color="bright_red"),
+    Name.Constant: Style(color="bright_red"),
+    Name.Attribute: Style(color="bright_cyan"),
+    Name.Tag: Style(color="bright_blue"),
+    String: Style(color="yellow"),
+    Number: Style(color="bright_blue"),
+    Generic.Deleted: Style(color="bright_red"),
+    Generic.Inserted: Style(color="bright_green"),
+    Generic.Heading: Style(bold=True),
+    Generic.Subheading: Style(color="bright_magenta", bold=True),
+    Generic.Prompt: Style(bold=True),
+    Generic.Error: Style(color="bright_red"),
+    Error: Style(color="red", underline=True),
+}
+
+RICH_SYNTAX_THEMES = {"ansi_light": ANSI_LIGHT, "ansi_dark": ANSI_DARK}
+NUMBERS_COLUMN_DEFAULT_PADDING = 2
+
+
+class SyntaxTheme(ABC):
+    """Base class for a syntax theme."""
+
+    @abstractmethod
+    def get_style_for_token(self, token_type: TokenType) -> Style:
+        """Get a style for a given Pygments token."""
+        raise NotImplementedError  # pragma: no cover
+
+    @abstractmethod
+    def get_background_style(self) -> Style:
+        """Get the background color."""
+        raise NotImplementedError  # pragma: no cover
+
+
+class PygmentsSyntaxTheme(SyntaxTheme):
+    """Syntax theme that delegates to Pygments theme."""
+
+    def __init__(self, theme: Union[str, Type[PygmentsStyle]]) -> None:
+        self._style_cache: Dict[TokenType, Style] = {}
+        if isinstance(theme, str):
+            try:
+                self._pygments_style_class = get_style_by_name(theme)
+            except ClassNotFound:
+                self._pygments_style_class = get_style_by_name("default")
+        else:
+            self._pygments_style_class = theme
+
+        self._background_color = self._pygments_style_class.background_color
+        self._background_style = Style(bgcolor=self._background_color)
+
+    def get_style_for_token(self, token_type: TokenType) -> Style:
+        """Get a style from a Pygments class."""
+        try:
+            return self._style_cache[token_type]
+        except KeyError:
+            try:
+                pygments_style = self._pygments_style_class.style_for_token(token_type)
+            except KeyError:
+                style = Style.null()
+            else:
+                color = pygments_style["color"]
+                bgcolor = pygments_style["bgcolor"]
+                style = Style(
+                    color="#" + color if color else "#000000",
+                    bgcolor="#" + bgcolor if bgcolor else self._background_color,
+                    bold=pygments_style["bold"],
+                    italic=pygments_style["italic"],
+                    underline=pygments_style["underline"],
+                )
+            self._style_cache[token_type] = style
+        return style
+
+    def get_background_style(self) -> Style:
+        return self._background_style
+
+
+class ANSISyntaxTheme(SyntaxTheme):
+    """Syntax theme to use standard colors."""
+
+    def __init__(self, style_map: Dict[TokenType, Style]) -> None:
+        self.style_map = style_map
+        self._missing_style = Style.null()
+        self._background_style = Style.null()
+        self._style_cache: Dict[TokenType, Style] = {}
+
+    def get_style_for_token(self, token_type: TokenType) -> Style:
+        """Look up style in the style map."""
+        try:
+            return self._style_cache[token_type]
+        except KeyError:
+            # Styles form a hierarchy
+            # We need to go from most to least specific
+            # e.g. ("foo", "bar", "baz") to ("foo", "bar")  to ("foo",)
+            get_style = self.style_map.get
+            token = tuple(token_type)
+            style = self._missing_style
+            while token:
+                _style = get_style(token)
+                if _style is not None:
+                    style = _style
+                    break
+                token = token[:-1]
+            self._style_cache[token_type] = style
+            return style
+
+    def get_background_style(self) -> Style:
+        return self._background_style
+
+
+SyntaxPosition = Tuple[int, int]
+
+
+class _SyntaxHighlightRange(NamedTuple):
+    """
+    A range to highlight in a Syntax object.
+    `start` and `end` are 2-integers tuples, where the first integer is the line number
+    (starting from 1) and the second integer is the column index (starting from 0).
+    """
+
+    style: StyleType
+    start: SyntaxPosition
+    end: SyntaxPosition
+    style_before: bool = False
+
+
+class PaddingProperty:
+    """Descriptor to get and set padding."""
+
+    def __get__(self, obj: Syntax, objtype: Type[Syntax]) -> Tuple[int, int, int, int]:
+        """Space around the Syntax."""
+        return obj._padding
+
+    def __set__(self, obj: Syntax, padding: PaddingDimensions) -> None:
+        obj._padding = Padding.unpack(padding)
+
+
+class Syntax(JupyterMixin):
+    """Construct a Syntax object to render syntax highlighted code.
+
+    Args:
+        code (str): Code to highlight.
+        lexer (Lexer | str): Lexer to use (see https://pygments.org/docs/lexers/)
+        theme (str, optional): Color theme, aka Pygments style (see https://pygments.org/docs/styles/#getting-a-list-of-available-styles). Defaults to "monokai".
+        dedent (bool, optional): Enable stripping of initial whitespace. Defaults to False.
+        line_numbers (bool, optional): Enable rendering of line numbers. Defaults to False.
+        start_line (int, optional): Starting number for line numbers. Defaults to 1.
+        line_range (Tuple[int | None, int | None], optional): If given should be a tuple of the start and end line to render.
+            A value of None in the tuple indicates the range is open in that direction.
+        highlight_lines (Set[int]): A set of line numbers to highlight.
+        code_width: Width of code to render (not including line numbers), or ``None`` to use all available width.
+        tab_size (int, optional): Size of tabs. Defaults to 4.
+        word_wrap (bool, optional): Enable word wrapping.
+        background_color (str, optional): Optional background color, or None to use theme color. Defaults to None.
+        indent_guides (bool, optional): Show indent guides. Defaults to False.
+        padding (PaddingDimensions): Padding to apply around the syntax. Defaults to 0 (no padding).
+    """
+
+    _pygments_style_class: Type[PygmentsStyle]
+    _theme: SyntaxTheme
+
+    @classmethod
+    def get_theme(cls, name: Union[str, SyntaxTheme]) -> SyntaxTheme:
+        """Get a syntax theme instance."""
+        if isinstance(name, SyntaxTheme):
+            return name
+        theme: SyntaxTheme
+        if name in RICH_SYNTAX_THEMES:
+            theme = ANSISyntaxTheme(RICH_SYNTAX_THEMES[name])
+        else:
+            theme = PygmentsSyntaxTheme(name)
+        return theme
+
+    def __init__(
+        self,
+        code: str,
+        lexer: Union[Lexer, str],
+        *,
+        theme: Union[str, SyntaxTheme] = DEFAULT_THEME,
+        dedent: bool = False,
+        line_numbers: bool = False,
+        start_line: int = 1,
+        line_range: Optional[Tuple[Optional[int], Optional[int]]] = None,
+        highlight_lines: Optional[Set[int]] = None,
+        code_width: Optional[int] = None,
+        tab_size: int = 4,
+        word_wrap: bool = False,
+        background_color: Optional[str] = None,
+        indent_guides: bool = False,
+        padding: PaddingDimensions = 0,
+    ) -> None:
+        self.code = code
+        self._lexer = lexer
+        self.dedent = dedent
+        self.line_numbers = line_numbers
+        self.start_line = start_line
+        self.line_range = line_range
+        self.highlight_lines = highlight_lines or set()
+        self.code_width = code_width
+        self.tab_size = tab_size
+        self.word_wrap = word_wrap
+        self.background_color = background_color
+        self.background_style = (
+            Style(bgcolor=background_color) if background_color else Style()
+        )
+        self.indent_guides = indent_guides
+        self._padding = Padding.unpack(padding)
+
+        self._theme = self.get_theme(theme)
+        self._stylized_ranges: List[_SyntaxHighlightRange] = []
+
+    padding = PaddingProperty()
+
+    @classmethod
+    def from_path(
+        cls,
+        path: str,
+        encoding: str = "utf-8",
+        lexer: Optional[Union[Lexer, str]] = None,
+        theme: Union[str, SyntaxTheme] = DEFAULT_THEME,
+        dedent: bool = False,
+        line_numbers: bool = False,
+        line_range: Optional[Tuple[int, int]] = None,
+        start_line: int = 1,
+        highlight_lines: Optional[Set[int]] = None,
+        code_width: Optional[int] = None,
+        tab_size: int = 4,
+        word_wrap: bool = False,
+        background_color: Optional[str] = None,
+        indent_guides: bool = False,
+        padding: PaddingDimensions = 0,
+    ) -> "Syntax":
+        """Construct a Syntax object from a file.
+
+        Args:
+            path (str): Path to file to highlight.
+            encoding (str): Encoding of file.
+            lexer (str | Lexer, optional): Lexer to use. If None, lexer will be auto-detected from path/file content.
+            theme (str, optional): Color theme, aka Pygments style (see https://pygments.org/docs/styles/#getting-a-list-of-available-styles). Defaults to "emacs".
+            dedent (bool, optional): Enable stripping of initial whitespace. Defaults to True.
+            line_numbers (bool, optional): Enable rendering of line numbers. Defaults to False.
+            start_line (int, optional): Starting number for line numbers. Defaults to 1.
+            line_range (Tuple[int, int], optional): If given should be a tuple of the start and end line to render.
+            highlight_lines (Set[int]): A set of line numbers to highlight.
+            code_width: Width of code to render (not including line numbers), or ``None`` to use all available width.
+            tab_size (int, optional): Size of tabs. Defaults to 4.
+            word_wrap (bool, optional): Enable word wrapping of code.
+            background_color (str, optional): Optional background color, or None to use theme color. Defaults to None.
+            indent_guides (bool, optional): Show indent guides. Defaults to False.
+            padding (PaddingDimensions): Padding to apply around the syntax. Defaults to 0 (no padding).
+
+        Returns:
+            [Syntax]: A Syntax object that may be printed to the console
+        """
+        code = Path(path).read_text(encoding=encoding)
+
+        if not lexer:
+            lexer = cls.guess_lexer(path, code=code)
+
+        return cls(
+            code,
+            lexer,
+            theme=theme,
+            dedent=dedent,
+            line_numbers=line_numbers,
+            line_range=line_range,
+            start_line=start_line,
+            highlight_lines=highlight_lines,
+            code_width=code_width,
+            tab_size=tab_size,
+            word_wrap=word_wrap,
+            background_color=background_color,
+            indent_guides=indent_guides,
+            padding=padding,
+        )
+
+    @classmethod
+    def guess_lexer(cls, path: str, code: Optional[str] = None) -> str:
+        """Guess the alias of the Pygments lexer to use based on a path and an optional string of code.
+        If code is supplied, it will use a combination of the code and the filename to determine the
+        best lexer to use. For example, if the file is ``index.html`` and the file contains Django
+        templating syntax, then "html+django" will be returned. If the file is ``index.html``, and no
+        templating language is used, the "html" lexer will be used. If no string of code
+        is supplied, the lexer will be chosen based on the file extension..
+
+        Args:
+            path (AnyStr): The path to the file containing the code you wish to know the lexer for.
+            code (str, optional): Optional string of code that will be used as a fallback if no lexer
+                is found for the supplied path.
+
+        Returns:
+            str: The name of the Pygments lexer that best matches the supplied path/code.
+        """
+        lexer: Optional[Lexer] = None
+        lexer_name = "default"
+        if code:
+            try:
+                lexer = guess_lexer_for_filename(path, code)
+            except ClassNotFound:
+                pass
+
+        if not lexer:
+            try:
+                _, ext = os.path.splitext(path)
+                if ext:
+                    extension = ext.lstrip(".").lower()
+                    lexer = get_lexer_by_name(extension)
+            except ClassNotFound:
+                pass
+
+        if lexer:
+            if lexer.aliases:
+                lexer_name = lexer.aliases[0]
+            else:
+                lexer_name = lexer.name
+
+        return lexer_name
+
+    def _get_base_style(self) -> Style:
+        """Get the base style."""
+        default_style = self._theme.get_background_style() + self.background_style
+        return default_style
+
+    def _get_token_color(self, token_type: TokenType) -> Optional[Color]:
+        """Get a color (if any) for the given token.
+
+        Args:
+            token_type (TokenType): A token type tuple from Pygments.
+
+        Returns:
+            Optional[Color]: Color from theme, or None for no color.
+        """
+        style = self._theme.get_style_for_token(token_type)
+        return style.color
+
+    @property
+    def lexer(self) -> Optional[Lexer]:
+        """The lexer for this syntax, or None if no lexer was found.
+
+        Tries to find the lexer by name if a string was passed to the constructor.
+        """
+
+        if isinstance(self._lexer, Lexer):
+            return self._lexer
+        try:
+            return get_lexer_by_name(
+                self._lexer,
+                stripnl=False,
+                ensurenl=True,
+                tabsize=self.tab_size,
+            )
+        except ClassNotFound:
+            return None
+
+    @property
+    def default_lexer(self) -> Lexer:
+        """A Pygments Lexer to use if one is not specified or invalid."""
+        return get_lexer_by_name(
+            "text",
+            stripnl=False,
+            ensurenl=True,
+            tabsize=self.tab_size,
+        )
+
+    def highlight(
+        self,
+        code: str,
+        line_range: Optional[Tuple[Optional[int], Optional[int]]] = None,
+    ) -> Text:
+        """Highlight code and return a Text instance.
+
+        Args:
+            code (str): Code to highlight.
+            line_range(Tuple[int, int], optional): Optional line range to highlight.
+
+        Returns:
+            Text: A text instance containing highlighted syntax.
+        """
+
+        base_style = self._get_base_style()
+        justify: JustifyMethod = (
+            "default" if base_style.transparent_background else "left"
+        )
+
+        text = Text(
+            justify=justify,
+            style=base_style,
+            tab_size=self.tab_size,
+            no_wrap=not self.word_wrap,
+        )
+        _get_theme_style = self._theme.get_style_for_token
+
+        lexer = self.lexer or self.default_lexer
+
+        if lexer is None:
+            text.append(code)
+        else:
+            if line_range:
+                # More complicated path to only stylize a portion of the code
+                # This speeds up further operations as there are less spans to process
+                line_start, line_end = line_range
+
+                def line_tokenize() -> Iterable[Tuple[Any, str]]:
+                    """Split tokens to one per line."""
+                    assert lexer  # required to make MyPy happy - we know lexer is not None at this point
+
+                    for token_type, token in lexer.get_tokens(code):
+                        while token:
+                            line_token, new_line, token = token.partition("\n")
+                            yield token_type, line_token + new_line
+
+                def tokens_to_spans() -> Iterable[Tuple[str, Optional[Style]]]:
+                    """Convert tokens to spans."""
+                    tokens = iter(line_tokenize())
+                    line_no = 0
+                    _line_start = line_start - 1 if line_start else 0
+
+                    # Skip over tokens until line start
+                    while line_no < _line_start:
+                        try:
+                            _token_type, token = next(tokens)
+                        except StopIteration:
+                            break
+                        yield (token, None)
+                        if token.endswith("\n"):
+                            line_no += 1
+                    # Generate spans until line end
+                    for token_type, token in tokens:
+                        yield (token, _get_theme_style(token_type))
+                        if token.endswith("\n"):
+                            line_no += 1
+                            if line_end and line_no >= line_end:
+                                break
+
+                text.append_tokens(tokens_to_spans())
+
+            else:
+                text.append_tokens(
+                    (token, _get_theme_style(token_type))
+                    for token_type, token in lexer.get_tokens(code)
+                )
+            if self.background_color is not None:
+                text.stylize(f"on {self.background_color}")
+
+        if self._stylized_ranges:
+            self._apply_stylized_ranges(text)
+
+        return text
+
+    def stylize_range(
+        self,
+        style: StyleType,
+        start: SyntaxPosition,
+        end: SyntaxPosition,
+        style_before: bool = False,
+    ) -> None:
+        """
+        Adds a custom style on a part of the code, that will be applied to the syntax display when it's rendered.
+        Line numbers are 1-based, while column indexes are 0-based.
+
+        Args:
+            style (StyleType): The style to apply.
+            start (Tuple[int, int]): The start of the range, in the form `[line number, column index]`.
+            end (Tuple[int, int]): The end of the range, in the form `[line number, column index]`.
+            style_before (bool): Apply the style before any existing styles.
+        """
+        self._stylized_ranges.append(
+            _SyntaxHighlightRange(style, start, end, style_before)
+        )
+
+    def _get_line_numbers_color(self, blend: float = 0.3) -> Color:
+        background_style = self._theme.get_background_style() + self.background_style
+        background_color = background_style.bgcolor
+        if background_color is None or background_color.is_system_defined:
+            return Color.default()
+        foreground_color = self._get_token_color(Token.Text)
+        if foreground_color is None or foreground_color.is_system_defined:
+            return foreground_color or Color.default()
+        new_color = blend_rgb(
+            background_color.get_truecolor(),
+            foreground_color.get_truecolor(),
+            cross_fade=blend,
+        )
+        return Color.from_triplet(new_color)
+
+    @property
+    def _numbers_column_width(self) -> int:
+        """Get the number of characters used to render the numbers column."""
+        column_width = 0
+        if self.line_numbers:
+            column_width = (
+                len(str(self.start_line + self.code.count("\n")))
+                + NUMBERS_COLUMN_DEFAULT_PADDING
+            )
+        return column_width
+
+    def _get_number_styles(self, console: Console) -> Tuple[Style, Style, Style]:
+        """Get background, number, and highlight styles for line numbers."""
+        background_style = self._get_base_style()
+        if background_style.transparent_background:
+            return Style.null(), Style(dim=True), Style.null()
+        if console.color_system in ("256", "truecolor"):
+            number_style = Style.chain(
+                background_style,
+                self._theme.get_style_for_token(Token.Text),
+                Style(color=self._get_line_numbers_color()),
+                self.background_style,
+            )
+            highlight_number_style = Style.chain(
+                background_style,
+                self._theme.get_style_for_token(Token.Text),
+                Style(bold=True, color=self._get_line_numbers_color(0.9)),
+                self.background_style,
+            )
+        else:
+            number_style = background_style + Style(dim=True)
+            highlight_number_style = background_style + Style(dim=False)
+        return background_style, number_style, highlight_number_style
+
+    def __rich_measure__(
+        self, console: "Console", options: "ConsoleOptions"
+    ) -> "Measurement":
+        _, right, _, left = self.padding
+        padding = left + right
+        if self.code_width is not None:
+            width = self.code_width + self._numbers_column_width + padding + 1
+            return Measurement(self._numbers_column_width, width)
+        lines = self.code.splitlines()
+        width = (
+            self._numbers_column_width
+            + padding
+            + (max(cell_len(line) for line in lines) if lines else 0)
+        )
+        if self.line_numbers:
+            width += 1
+        return Measurement(self._numbers_column_width, width)
+
+    def __rich_console__(
+        self, console: Console, options: ConsoleOptions
+    ) -> RenderResult:
+        segments = Segments(self._get_syntax(console, options))
+        if any(self.padding):
+            yield Padding(segments, style=self._get_base_style(), pad=self.padding)
+        else:
+            yield segments
+
+    def _get_syntax(
+        self,
+        console: Console,
+        options: ConsoleOptions,
+    ) -> Iterable[Segment]:
+        """
+        Get the Segments for the Syntax object, excluding any vertical/horizontal padding
+        """
+        transparent_background = self._get_base_style().transparent_background
+        _pad_top, pad_right, _pad_bottom, pad_left = self.padding
+        horizontal_padding = pad_left + pad_right
+        code_width = (
+            (
+                (options.max_width - self._numbers_column_width - 1)
+                if self.line_numbers
+                else options.max_width
+            )
+            - horizontal_padding
+            if self.code_width is None
+            else self.code_width
+        )
+        code_width = max(0, code_width)
+
+        ends_on_nl, processed_code = self._process_code(self.code)
+        text = self.highlight(processed_code, self.line_range)
+
+        if not self.line_numbers and not self.word_wrap and not self.line_range:
+            if not ends_on_nl:
+                text.remove_suffix("\n")
+            # Simple case of just rendering text
+            style = (
+                self._get_base_style()
+                + self._theme.get_style_for_token(Comment)
+                + Style(dim=True)
+                + self.background_style
+            )
+            if self.indent_guides and not options.ascii_only:
+                text = text.with_indent_guides(self.tab_size, style=style)
+                text.overflow = "crop"
+            if style.transparent_background:
+                yield from console.render(
+                    text, options=options.update(width=code_width)
+                )
+            else:
+                syntax_lines = console.render_lines(
+                    text,
+                    options.update(width=code_width, height=None, justify="left"),
+                    style=self.background_style,
+                    pad=True,
+                    new_lines=True,
+                )
+                for syntax_line in syntax_lines:
+                    yield from syntax_line
+            return
+
+        start_line, end_line = self.line_range or (None, None)
+        line_offset = 0
+        if start_line:
+            line_offset = max(0, start_line - 1)
+        lines: Union[List[Text], Lines] = text.split("\n", allow_blank=ends_on_nl)
+        if self.line_range:
+            if line_offset > len(lines):
+                return
+            lines = lines[line_offset:end_line]
+
+        if self.indent_guides and not options.ascii_only:
+            style = (
+                self._get_base_style()
+                + self._theme.get_style_for_token(Comment)
+                + Style(dim=True)
+                + self.background_style
+            )
+            lines = (
+                Text("\n")
+                .join(lines)
+                .with_indent_guides(self.tab_size, style=style + Style(italic=False))
+                .split("\n", allow_blank=True)
+            )
+
+        numbers_column_width = self._numbers_column_width
+        render_options = options.update(width=code_width)
+
+        highlight_line = self.highlight_lines.__contains__
+        _Segment = Segment
+        new_line = _Segment("\n")
+
+        line_pointer = "> " if options.legacy_windows else "❱ "
+
+        (
+            background_style,
+            number_style,
+            highlight_number_style,
+        ) = self._get_number_styles(console)
+
+        for line_no, line in enumerate(lines, self.start_line + line_offset):
+            if self.word_wrap:
+                wrapped_lines = console.render_lines(
+                    line,
+                    render_options.update(height=None, justify="left"),
+                    style=background_style,
+                    pad=not transparent_background,
+                )
+            else:
+                segments = list(line.render(console, end=""))
+                if options.no_wrap:
+                    wrapped_lines = [segments]
+                else:
+                    wrapped_lines = [
+                        _Segment.adjust_line_length(
+                            segments,
+                            render_options.max_width,
+                            style=background_style,
+                            pad=not transparent_background,
+                        )
+                    ]
+
+            if self.line_numbers:
+                wrapped_line_left_pad = _Segment(
+                    " " * numbers_column_width + " ", background_style
+                )
+                for first, wrapped_line in loop_first(wrapped_lines):
+                    if first:
+                        line_column = str(line_no).rjust(numbers_column_width - 2) + " "
+                        if highlight_line(line_no):
+                            yield _Segment(line_pointer, Style(color="red"))
+                            yield _Segment(line_column, highlight_number_style)
+                        else:
+                            yield _Segment("  ", highlight_number_style)
+                            yield _Segment(line_column, number_style)
+                    else:
+                        yield wrapped_line_left_pad
+                    yield from wrapped_line
+                    yield new_line
+            else:
+                for wrapped_line in wrapped_lines:
+                    yield from wrapped_line
+                    yield new_line
+
+    def _apply_stylized_ranges(self, text: Text) -> None:
+        """
+        Apply stylized ranges to a text instance,
+        using the given code to determine the right portion to apply the style to.
+
+        Args:
+            text (Text): Text instance to apply the style to.
+        """
+        code = text.plain
+        newlines_offsets = [
+            # Let's add outer boundaries at each side of the list:
+            0,
+            # N.B. using "\n" here is much faster than using metacharacters such as "^" or "\Z":
+            *[
+                match.start() + 1
+                for match in re.finditer("\n", code, flags=re.MULTILINE)
+            ],
+            len(code) + 1,
+        ]
+
+        for stylized_range in self._stylized_ranges:
+            start = _get_code_index_for_syntax_position(
+                newlines_offsets, stylized_range.start
+            )
+            end = _get_code_index_for_syntax_position(
+                newlines_offsets, stylized_range.end
+            )
+            if start is not None and end is not None:
+                if stylized_range.style_before:
+                    text.stylize_before(stylized_range.style, start, end)
+                else:
+                    text.stylize(stylized_range.style, start, end)
+
+    def _process_code(self, code: str) -> Tuple[bool, str]:
+        """
+        Applies various processing to a raw code string
+        (normalises it so it always ends with a line return, dedents it if necessary, etc.)
+
+        Args:
+            code (str): The raw code string to process
+
+        Returns:
+            Tuple[bool, str]: the boolean indicates whether the raw code ends with a line return,
+                while the string is the processed code.
+        """
+        ends_on_nl = code.endswith("\n")
+        processed_code = code if ends_on_nl else code + "\n"
+        processed_code = (
+            textwrap.dedent(processed_code) if self.dedent else processed_code
+        )
+        processed_code = processed_code.expandtabs(self.tab_size)
+        return ends_on_nl, processed_code
+
+
+def _get_code_index_for_syntax_position(
+    newlines_offsets: Sequence[int], position: SyntaxPosition
+) -> Optional[int]:
+    """
+    Returns the index of the code string for the given positions.
+
+    Args:
+        newlines_offsets (Sequence[int]): The offset of each newline character found in the code snippet.
+        position (SyntaxPosition): The position to search for.
+
+    Returns:
+        Optional[int]: The index of the code string for this position, or `None`
+            if the given position's line number is out of range (if it's the column that is out of range
+            we silently clamp its value so that it reaches the end of the line)
+    """
+    lines_count = len(newlines_offsets)
+
+    line_number, column_index = position
+    if line_number > lines_count or len(newlines_offsets) < (line_number + 1):
+        return None  # `line_number` is out of range
+    line_index = line_number - 1
+    line_length = newlines_offsets[line_index + 1] - newlines_offsets[line_index] - 1
+    # If `column_index` is out of range: let's silently clamp it:
+    column_index = min(line_length, column_index)
+    return newlines_offsets[line_index] + column_index
+
+
+if __name__ == "__main__":  # pragma: no cover
+    import argparse
+    import sys
+
+    parser = argparse.ArgumentParser(
+        description="Render syntax to the console with Rich"
+    )
+    parser.add_argument(
+        "path",
+        metavar="PATH",
+        help="path to file, or - for stdin",
+    )
+    parser.add_argument(
+        "-c",
+        "--force-color",
+        dest="force_color",
+        action="store_true",
+        default=None,
+        help="force color for non-terminals",
+    )
+    parser.add_argument(
+        "-i",
+        "--indent-guides",
+        dest="indent_guides",
+        action="store_true",
+        default=False,
+        help="display indent guides",
+    )
+    parser.add_argument(
+        "-l",
+        "--line-numbers",
+        dest="line_numbers",
+        action="store_true",
+        help="render line numbers",
+    )
+    parser.add_argument(
+        "-w",
+        "--width",
+        type=int,
+        dest="width",
+        default=None,
+        help="width of output (default will auto-detect)",
+    )
+    parser.add_argument(
+        "-r",
+        "--wrap",
+        dest="word_wrap",
+        action="store_true",
+        default=False,
+        help="word wrap long lines",
+    )
+    parser.add_argument(
+        "-s",
+        "--soft-wrap",
+        action="store_true",
+        dest="soft_wrap",
+        default=False,
+        help="enable soft wrapping mode",
+    )
+    parser.add_argument(
+        "-t", "--theme", dest="theme", default="monokai", help="pygments theme"
+    )
+    parser.add_argument(
+        "-b",
+        "--background-color",
+        dest="background_color",
+        default=None,
+        help="Override background color",
+    )
+    parser.add_argument(
+        "-x",
+        "--lexer",
+        default=None,
+        dest="lexer_name",
+        help="Lexer name",
+    )
+    parser.add_argument(
+        "-p", "--padding", type=int, default=0, dest="padding", help="Padding"
+    )
+    parser.add_argument(
+        "--highlight-line",
+        type=int,
+        default=None,
+        dest="highlight_line",
+        help="The line number (not index!) to highlight",
+    )
+    args = parser.parse_args()
+
+    from rich.console import Console
+
+    console = Console(force_terminal=args.force_color, width=args.width)
+
+    if args.path == "-":
+        code = sys.stdin.read()
+        syntax = Syntax(
+            code=code,
+            lexer=args.lexer_name,
+            line_numbers=args.line_numbers,
+            word_wrap=args.word_wrap,
+            theme=args.theme,
+            background_color=args.background_color,
+            indent_guides=args.indent_guides,
+            padding=args.padding,
+            highlight_lines={args.highlight_line},
+        )
+    else:
+        syntax = Syntax.from_path(
+            args.path,
+            lexer=args.lexer_name,
+            line_numbers=args.line_numbers,
+            word_wrap=args.word_wrap,
+            theme=args.theme,
+            background_color=args.background_color,
+            indent_guides=args.indent_guides,
+            padding=args.padding,
+            highlight_lines={args.highlight_line},
+        )
+    console.print(syntax, soft_wrap=args.soft_wrap)
diff --git a/local_packages/rich/table.py b/local_packages/rich/table.py
new file mode 100644
index 0000000..1e2eb2b
--- /dev/null
+++ b/local_packages/rich/table.py
@@ -0,0 +1,1015 @@
+from dataclasses import dataclass, field, replace
+from typing import (
+    TYPE_CHECKING,
+    Dict,
+    Iterable,
+    List,
+    NamedTuple,
+    Optional,
+    Sequence,
+    Tuple,
+    Union,
+)
+
+from . import box, errors
+from ._loop import loop_first_last, loop_last
+from ._pick import pick_bool
+from ._ratio import ratio_distribute, ratio_reduce
+from .align import VerticalAlignMethod
+from .jupyter import JupyterMixin
+from .measure import Measurement
+from .padding import Padding, PaddingDimensions
+from .protocol import is_renderable
+from .segment import Segment
+from .style import Style, StyleType
+from .text import Text, TextType
+
+if TYPE_CHECKING:
+    from .console import (
+        Console,
+        ConsoleOptions,
+        JustifyMethod,
+        OverflowMethod,
+        RenderableType,
+        RenderResult,
+    )
+
+
+@dataclass
+class Column:
+    """Defines a column within a ~Table.
+
+    Args:
+        title (Union[str, Text], optional): The title of the table rendered at the top. Defaults to None.
+        caption (Union[str, Text], optional): The table caption rendered below. Defaults to None.
+        width (int, optional): The width in characters of the table, or ``None`` to automatically fit. Defaults to None.
+        min_width (Optional[int], optional): The minimum width of the table, or ``None`` for no minimum. Defaults to None.
+        box (box.Box, optional): One of the constants in box.py used to draw the edges (see :ref:`appendix_box`), or ``None`` for no box lines. Defaults to box.HEAVY_HEAD.
+        safe_box (Optional[bool], optional): Disable box characters that don't display on windows legacy terminal with *raster* fonts. Defaults to True.
+        padding (PaddingDimensions, optional): Padding for cells (top, right, bottom, left). Defaults to (0, 1).
+        collapse_padding (bool, optional): Enable collapsing of padding around cells. Defaults to False.
+        pad_edge (bool, optional): Enable padding of edge cells. Defaults to True.
+        show_header (bool, optional): Show a header row. Defaults to True.
+        show_footer (bool, optional): Show a footer row. Defaults to False.
+        show_edge (bool, optional): Draw a box around the outside of the table. Defaults to True.
+        show_lines (bool, optional): Draw lines between every row. Defaults to False.
+        leading (int, optional): Number of blank lines between rows (precludes ``show_lines``). Defaults to 0.
+        style (Union[str, Style], optional): Default style for the table. Defaults to "none".
+        row_styles (List[Union, str], optional): Optional list of row styles, if more than one style is given then the styles will alternate. Defaults to None.
+        header_style (Union[str, Style], optional): Style of the header. Defaults to "table.header".
+        footer_style (Union[str, Style], optional): Style of the footer. Defaults to "table.footer".
+        border_style (Union[str, Style], optional): Style of the border. Defaults to None.
+        title_style (Union[str, Style], optional): Style of the title. Defaults to None.
+        caption_style (Union[str, Style], optional): Style of the caption. Defaults to None.
+        title_justify (str, optional): Justify method for title. Defaults to "center".
+        caption_justify (str, optional): Justify method for caption. Defaults to "center".
+        highlight (bool, optional): Highlight cell contents (if str). Defaults to False.
+    """
+
+    header: "RenderableType" = ""
+    """RenderableType: Renderable for the header (typically a string)"""
+
+    footer: "RenderableType" = ""
+    """RenderableType: Renderable for the footer (typically a string)"""
+
+    header_style: StyleType = ""
+    """StyleType: The style of the header."""
+
+    footer_style: StyleType = ""
+    """StyleType: The style of the footer."""
+
+    style: StyleType = ""
+    """StyleType: The style of the column."""
+
+    justify: "JustifyMethod" = "left"
+    """str: How to justify text within the column ("left", "center", "right", or "full")"""
+
+    vertical: "VerticalAlignMethod" = "top"
+    """str: How to vertically align content ("top", "middle", or "bottom")"""
+
+    overflow: "OverflowMethod" = "ellipsis"
+    """str: Overflow method."""
+
+    width: Optional[int] = None
+    """Optional[int]: Width of the column, or ``None`` (default) to auto calculate width."""
+
+    min_width: Optional[int] = None
+    """Optional[int]: Minimum width of column, or ``None`` for no minimum. Defaults to None."""
+
+    max_width: Optional[int] = None
+    """Optional[int]: Maximum width of column, or ``None`` for no maximum. Defaults to None."""
+
+    ratio: Optional[int] = None
+    """Optional[int]: Ratio to use when calculating column width, or ``None`` (default) to adapt to column contents."""
+
+    no_wrap: bool = False
+    """bool: Prevent wrapping of text within the column. Defaults to ``False``."""
+
+    highlight: bool = False
+    """bool: Apply highlighter to column. Defaults to ``False``."""
+
+    _index: int = 0
+    """Index of column."""
+
+    _cells: List["RenderableType"] = field(default_factory=list)
+
+    def copy(self) -> "Column":
+        """Return a copy of this Column."""
+        return replace(self, _cells=[])
+
+    @property
+    def cells(self) -> Iterable["RenderableType"]:
+        """Get all cells in the column, not including header."""
+        yield from self._cells
+
+    @property
+    def flexible(self) -> bool:
+        """Check if this column is flexible."""
+        return self.ratio is not None
+
+
+@dataclass
+class Row:
+    """Information regarding a row."""
+
+    style: Optional[StyleType] = None
+    """Style to apply to row."""
+
+    end_section: bool = False
+    """Indicated end of section, which will force a line beneath the row."""
+
+
+class _Cell(NamedTuple):
+    """A single cell in a table."""
+
+    style: StyleType
+    """Style to apply to cell."""
+    renderable: "RenderableType"
+    """Cell renderable."""
+    vertical: VerticalAlignMethod
+    """Cell vertical alignment."""
+
+
+class Table(JupyterMixin):
+    """A console renderable to draw a table.
+
+    Args:
+        *headers (Union[Column, str]): Column headers, either as a string, or :class:`~rich.table.Column` instance.
+        title (Union[str, Text], optional): The title of the table rendered at the top. Defaults to None.
+        caption (Union[str, Text], optional): The table caption rendered below. Defaults to None.
+        width (int, optional): The width in characters of the table, or ``None`` to automatically fit. Defaults to None.
+        min_width (Optional[int], optional): The minimum width of the table, or ``None`` for no minimum. Defaults to None.
+        box (box.Box, optional): One of the constants in box.py used to draw the edges (see :ref:`appendix_box`), or ``None`` for no box lines. Defaults to box.HEAVY_HEAD.
+        safe_box (Optional[bool], optional): Disable box characters that don't display on windows legacy terminal with *raster* fonts. Defaults to True.
+        padding (PaddingDimensions, optional): Padding for cells (top, right, bottom, left). Defaults to (0, 1).
+        collapse_padding (bool, optional): Enable collapsing of padding around cells. Defaults to False.
+        pad_edge (bool, optional): Enable padding of edge cells. Defaults to True.
+        expand (bool, optional): Expand the table to fit the available space if ``True``, otherwise the table width will be auto-calculated. Defaults to False.
+        show_header (bool, optional): Show a header row. Defaults to True.
+        show_footer (bool, optional): Show a footer row. Defaults to False.
+        show_edge (bool, optional): Draw a box around the outside of the table. Defaults to True.
+        show_lines (bool, optional): Draw lines between every row. Defaults to False.
+        leading (int, optional): Number of blank lines between rows (precludes ``show_lines``). Defaults to 0.
+        style (Union[str, Style], optional): Default style for the table. Defaults to "none".
+        row_styles (List[Union, str], optional): Optional list of row styles, if more than one style is given then the styles will alternate. Defaults to None.
+        header_style (Union[str, Style], optional): Style of the header. Defaults to "table.header".
+        footer_style (Union[str, Style], optional): Style of the footer. Defaults to "table.footer".
+        border_style (Union[str, Style], optional): Style of the border. Defaults to None.
+        title_style (Union[str, Style], optional): Style of the title. Defaults to None.
+        caption_style (Union[str, Style], optional): Style of the caption. Defaults to None.
+        title_justify (str, optional): Justify method for title. Defaults to "center".
+        caption_justify (str, optional): Justify method for caption. Defaults to "center".
+        highlight (bool, optional): Highlight cell contents (if str). Defaults to False.
+    """
+
+    columns: List[Column]
+    rows: List[Row]
+
+    def __init__(
+        self,
+        *headers: Union[Column, str],
+        title: Optional[TextType] = None,
+        caption: Optional[TextType] = None,
+        width: Optional[int] = None,
+        min_width: Optional[int] = None,
+        box: Optional[box.Box] = box.HEAVY_HEAD,
+        safe_box: Optional[bool] = None,
+        padding: PaddingDimensions = (0, 1),
+        collapse_padding: bool = False,
+        pad_edge: bool = True,
+        expand: bool = False,
+        show_header: bool = True,
+        show_footer: bool = False,
+        show_edge: bool = True,
+        show_lines: bool = False,
+        leading: int = 0,
+        style: StyleType = "none",
+        row_styles: Optional[Iterable[StyleType]] = None,
+        header_style: Optional[StyleType] = "table.header",
+        footer_style: Optional[StyleType] = "table.footer",
+        border_style: Optional[StyleType] = None,
+        title_style: Optional[StyleType] = None,
+        caption_style: Optional[StyleType] = None,
+        title_justify: "JustifyMethod" = "center",
+        caption_justify: "JustifyMethod" = "center",
+        highlight: bool = False,
+    ) -> None:
+        self.columns: List[Column] = []
+        self.rows: List[Row] = []
+        self.title = title
+        self.caption = caption
+        self.width = width
+        self.min_width = min_width
+        self.box = box
+        self.safe_box = safe_box
+        self._padding = Padding.unpack(padding)
+        self.pad_edge = pad_edge
+        self._expand = expand
+        self.show_header = show_header
+        self.show_footer = show_footer
+        self.show_edge = show_edge
+        self.show_lines = show_lines
+        self.leading = leading
+        self.collapse_padding = collapse_padding
+        self.style = style
+        self.header_style = header_style or ""
+        self.footer_style = footer_style or ""
+        self.border_style = border_style
+        self.title_style = title_style
+        self.caption_style = caption_style
+        self.title_justify: "JustifyMethod" = title_justify
+        self.caption_justify: "JustifyMethod" = caption_justify
+        self.highlight = highlight
+        self.row_styles: Sequence[StyleType] = list(row_styles or [])
+        append_column = self.columns.append
+        for header in headers:
+            if isinstance(header, str):
+                self.add_column(header=header)
+            else:
+                header._index = len(self.columns)
+                append_column(header)
+
+    @classmethod
+    def grid(
+        cls,
+        *headers: Union[Column, str],
+        padding: PaddingDimensions = 0,
+        collapse_padding: bool = True,
+        pad_edge: bool = False,
+        expand: bool = False,
+    ) -> "Table":
+        """Get a table with no lines, headers, or footer.
+
+        Args:
+            *headers (Union[Column, str]): Column headers, either as a string, or :class:`~rich.table.Column` instance.
+            padding (PaddingDimensions, optional): Get padding around cells. Defaults to 0.
+            collapse_padding (bool, optional): Enable collapsing of padding around cells. Defaults to True.
+            pad_edge (bool, optional): Enable padding around edges of table. Defaults to False.
+            expand (bool, optional): Expand the table to fit the available space if ``True``, otherwise the table width will be auto-calculated. Defaults to False.
+
+        Returns:
+            Table: A table instance.
+        """
+        return cls(
+            *headers,
+            box=None,
+            padding=padding,
+            collapse_padding=collapse_padding,
+            show_header=False,
+            show_footer=False,
+            show_edge=False,
+            pad_edge=pad_edge,
+            expand=expand,
+        )
+
+    @property
+    def expand(self) -> bool:
+        """Setting a non-None self.width implies expand."""
+        return self._expand or self.width is not None
+
+    @expand.setter
+    def expand(self, expand: bool) -> None:
+        """Set expand."""
+        self._expand = expand
+
+    @property
+    def _extra_width(self) -> int:
+        """Get extra width to add to cell content."""
+        width = 0
+        if self.box and self.show_edge:
+            width += 2
+        if self.box:
+            width += len(self.columns) - 1
+        return width
+
+    @property
+    def row_count(self) -> int:
+        """Get the current number of rows."""
+        return len(self.rows)
+
+    def get_row_style(self, console: "Console", index: int) -> StyleType:
+        """Get the current row style."""
+        style = Style.null()
+        if self.row_styles:
+            style += console.get_style(self.row_styles[index % len(self.row_styles)])
+        row_style = self.rows[index].style
+        if row_style is not None:
+            style += console.get_style(row_style)
+        return style
+
+    def __rich_measure__(
+        self, console: "Console", options: "ConsoleOptions"
+    ) -> Measurement:
+        max_width = options.max_width
+        if self.width is not None:
+            max_width = self.width
+        if max_width < 0:
+            return Measurement(0, 0)
+
+        extra_width = self._extra_width
+        max_width = sum(
+            self._calculate_column_widths(
+                console, options.update_width(max_width - extra_width)
+            )
+        )
+        _measure_column = self._measure_column
+
+        measurements = [
+            _measure_column(console, options.update_width(max_width), column)
+            for column in self.columns
+        ]
+        minimum_width = (
+            sum(measurement.minimum for measurement in measurements) + extra_width
+        )
+        maximum_width = (
+            sum(measurement.maximum for measurement in measurements) + extra_width
+            if (self.width is None)
+            else self.width
+        )
+        measurement = Measurement(minimum_width, maximum_width)
+        measurement = measurement.clamp(self.min_width)
+        return measurement
+
+    @property
+    def padding(self) -> Tuple[int, int, int, int]:
+        """Get cell padding."""
+        return self._padding
+
+    @padding.setter
+    def padding(self, padding: PaddingDimensions) -> "Table":
+        """Set cell padding."""
+        self._padding = Padding.unpack(padding)
+        return self
+
+    def add_column(
+        self,
+        header: "RenderableType" = "",
+        footer: "RenderableType" = "",
+        *,
+        header_style: Optional[StyleType] = None,
+        highlight: Optional[bool] = None,
+        footer_style: Optional[StyleType] = None,
+        style: Optional[StyleType] = None,
+        justify: "JustifyMethod" = "left",
+        vertical: "VerticalAlignMethod" = "top",
+        overflow: "OverflowMethod" = "ellipsis",
+        width: Optional[int] = None,
+        min_width: Optional[int] = None,
+        max_width: Optional[int] = None,
+        ratio: Optional[int] = None,
+        no_wrap: bool = False,
+    ) -> None:
+        """Add a column to the table.
+
+        Args:
+            header (RenderableType, optional): Text or renderable for the header.
+                Defaults to "".
+            footer (RenderableType, optional): Text or renderable for the footer.
+                Defaults to "".
+            header_style (Union[str, Style], optional): Style for the header, or None for default. Defaults to None.
+            highlight (bool, optional): Whether to highlight the text. The default of None uses the value of the table (self) object.
+            footer_style (Union[str, Style], optional): Style for the footer, or None for default. Defaults to None.
+            style (Union[str, Style], optional): Style for the column cells, or None for default. Defaults to None.
+            justify (JustifyMethod, optional): Alignment for cells. Defaults to "left".
+            vertical (VerticalAlignMethod, optional): Vertical alignment, one of "top", "middle", or "bottom". Defaults to "top".
+            overflow (OverflowMethod): Overflow method: "crop", "fold", "ellipsis". Defaults to "ellipsis".
+            width (int, optional): Desired width of column in characters, or None to fit to contents. Defaults to None.
+            min_width (Optional[int], optional): Minimum width of column, or ``None`` for no minimum. Defaults to None.
+            max_width (Optional[int], optional): Maximum width of column, or ``None`` for no maximum. Defaults to None.
+            ratio (int, optional): Flexible ratio for the column (requires ``Table.expand`` or ``Table.width``). Defaults to None.
+            no_wrap (bool, optional): Set to ``True`` to disable wrapping of this column.
+        """
+
+        column = Column(
+            _index=len(self.columns),
+            header=header,
+            footer=footer,
+            header_style=header_style or "",
+            highlight=highlight if highlight is not None else self.highlight,
+            footer_style=footer_style or "",
+            style=style or "",
+            justify=justify,
+            vertical=vertical,
+            overflow=overflow,
+            width=width,
+            min_width=min_width,
+            max_width=max_width,
+            ratio=ratio,
+            no_wrap=no_wrap,
+        )
+        self.columns.append(column)
+
+    def add_row(
+        self,
+        *renderables: Optional["RenderableType"],
+        style: Optional[StyleType] = None,
+        end_section: bool = False,
+    ) -> None:
+        """Add a row of renderables.
+
+        Args:
+            *renderables (None or renderable): Each cell in a row must be a renderable object (including str),
+                or ``None`` for a blank cell.
+            style (StyleType, optional): An optional style to apply to the entire row. Defaults to None.
+            end_section (bool, optional): End a section and draw a line. Defaults to False.
+
+        Raises:
+            errors.NotRenderableError: If you add something that can't be rendered.
+        """
+
+        def add_cell(column: Column, renderable: "RenderableType") -> None:
+            column._cells.append(renderable)
+
+        cell_renderables: List[Optional["RenderableType"]] = list(renderables)
+
+        columns = self.columns
+        if len(cell_renderables) < len(columns):
+            cell_renderables = [
+                *cell_renderables,
+                *[None] * (len(columns) - len(cell_renderables)),
+            ]
+        for index, renderable in enumerate(cell_renderables):
+            if index == len(columns):
+                column = Column(_index=index, highlight=self.highlight)
+                for _ in self.rows:
+                    add_cell(column, Text(""))
+                self.columns.append(column)
+            else:
+                column = columns[index]
+            if renderable is None:
+                add_cell(column, "")
+            elif is_renderable(renderable):
+                add_cell(column, renderable)
+            else:
+                raise errors.NotRenderableError(
+                    f"unable to render {type(renderable).__name__}; a string or other renderable object is required"
+                )
+        self.rows.append(Row(style=style, end_section=end_section))
+
+    def add_section(self) -> None:
+        """Add a new section (draw a line after current row)."""
+
+        if self.rows:
+            self.rows[-1].end_section = True
+
+    def __rich_console__(
+        self, console: "Console", options: "ConsoleOptions"
+    ) -> "RenderResult":
+        if not self.columns:
+            yield Segment("\n")
+            return
+
+        max_width = options.max_width
+        if self.width is not None:
+            max_width = self.width
+
+        extra_width = self._extra_width
+
+        widths = self._calculate_column_widths(
+            console, options.update_width(max_width - extra_width)
+        )
+        table_width = sum(widths) + extra_width
+
+        render_options = options.update(
+            width=table_width, highlight=self.highlight, height=None
+        )
+
+        def render_annotation(
+            text: TextType, style: StyleType, justify: "JustifyMethod" = "center"
+        ) -> "RenderResult":
+            render_text = (
+                console.render_str(text, style=style, highlight=False)
+                if isinstance(text, str)
+                else text
+            )
+            return console.render(
+                render_text, options=render_options.update(justify=justify)
+            )
+
+        if self.title:
+            yield from render_annotation(
+                self.title,
+                style=Style.pick_first(self.title_style, "table.title"),
+                justify=self.title_justify,
+            )
+        yield from self._render(console, render_options, widths)
+        if self.caption:
+            yield from render_annotation(
+                self.caption,
+                style=Style.pick_first(self.caption_style, "table.caption"),
+                justify=self.caption_justify,
+            )
+
+    def _calculate_column_widths(
+        self, console: "Console", options: "ConsoleOptions"
+    ) -> List[int]:
+        """Calculate the widths of each column, including padding, not including borders."""
+        max_width = options.max_width
+        columns = self.columns
+        width_ranges = [
+            self._measure_column(console, options, column) for column in columns
+        ]
+        widths = [_range.maximum or 1 for _range in width_ranges]
+
+        get_padding_width = self._get_padding_width
+        extra_width = self._extra_width
+        if self.expand:
+            ratios = [col.ratio or 0 for col in columns if col.flexible]
+            if any(ratios):
+                fixed_widths = [
+                    0 if column.flexible else _range.maximum
+                    for _range, column in zip(width_ranges, columns)
+                ]
+                flex_minimum = [
+                    (column.width or 1) + get_padding_width(column._index)
+                    for column in columns
+                    if column.flexible
+                ]
+                flexible_width = max_width - sum(fixed_widths)
+                flex_widths = ratio_distribute(flexible_width, ratios, flex_minimum)
+                iter_flex_widths = iter(flex_widths)
+                for index, column in enumerate(columns):
+                    if column.flexible:
+                        widths[index] = fixed_widths[index] + next(iter_flex_widths)
+        table_width = sum(widths)
+
+        if table_width > max_width:
+            widths = self._collapse_widths(
+                widths,
+                [(column.width is None and not column.no_wrap) for column in columns],
+                max_width,
+            )
+            table_width = sum(widths)
+            # last resort, reduce columns evenly
+            if table_width > max_width:
+                excess_width = table_width - max_width
+                widths = ratio_reduce(excess_width, [1] * len(widths), widths, widths)
+                table_width = sum(widths)
+
+            width_ranges = [
+                self._measure_column(console, options.update_width(width), column)
+                for width, column in zip(widths, columns)
+            ]
+            widths = [_range.maximum or 0 for _range in width_ranges]
+
+        if (table_width < max_width and self.expand) or (
+            self.min_width is not None and table_width < (self.min_width - extra_width)
+        ):
+            _max_width = (
+                max_width
+                if self.min_width is None
+                else min(self.min_width - extra_width, max_width)
+            )
+            pad_widths = ratio_distribute(_max_width - table_width, widths)
+            widths = [_width + pad for _width, pad in zip(widths, pad_widths)]
+
+        return widths
+
+    @classmethod
+    def _collapse_widths(
+        cls, widths: List[int], wrapable: List[bool], max_width: int
+    ) -> List[int]:
+        """Reduce widths so that the total is under max_width.
+
+        Args:
+            widths (List[int]): List of widths.
+            wrapable (List[bool]): List of booleans that indicate if a column may shrink.
+            max_width (int): Maximum width to reduce to.
+
+        Returns:
+            List[int]: A new list of widths.
+        """
+        total_width = sum(widths)
+        excess_width = total_width - max_width
+        if any(wrapable):
+            while total_width and excess_width > 0:
+                max_column = max(
+                    width for width, allow_wrap in zip(widths, wrapable) if allow_wrap
+                )
+                second_max_column = max(
+                    width if allow_wrap and width != max_column else 0
+                    for width, allow_wrap in zip(widths, wrapable)
+                )
+                column_difference = max_column - second_max_column
+                ratios = [
+                    (1 if (width == max_column and allow_wrap) else 0)
+                    for width, allow_wrap in zip(widths, wrapable)
+                ]
+                if not any(ratios) or not column_difference:
+                    break
+                max_reduce = [min(excess_width, column_difference)] * len(widths)
+                widths = ratio_reduce(excess_width, ratios, max_reduce, widths)
+
+                total_width = sum(widths)
+                excess_width = total_width - max_width
+        return widths
+
+    def _get_cells(
+        self, console: "Console", column_index: int, column: Column
+    ) -> Iterable[_Cell]:
+        """Get all the cells with padding and optional header."""
+
+        collapse_padding = self.collapse_padding
+        pad_edge = self.pad_edge
+        padding = self.padding
+        any_padding = any(padding)
+
+        first_column = column_index == 0
+        last_column = column_index == len(self.columns) - 1
+
+        _padding_cache: Dict[Tuple[bool, bool], Tuple[int, int, int, int]] = {}
+
+        def get_padding(first_row: bool, last_row: bool) -> Tuple[int, int, int, int]:
+            cached = _padding_cache.get((first_row, last_row))
+            if cached:
+                return cached
+            top, right, bottom, left = padding
+
+            if collapse_padding:
+                if not first_column:
+                    left = max(0, left - right)
+                if not last_row:
+                    bottom = max(0, top - bottom)
+
+            if not pad_edge:
+                if first_column:
+                    left = 0
+                if last_column:
+                    right = 0
+                if first_row:
+                    top = 0
+                if last_row:
+                    bottom = 0
+            _padding = (top, right, bottom, left)
+            _padding_cache[(first_row, last_row)] = _padding
+            return _padding
+
+        raw_cells: List[Tuple[StyleType, "RenderableType"]] = []
+        _append = raw_cells.append
+        get_style = console.get_style
+        if self.show_header:
+            header_style = get_style(self.header_style or "") + get_style(
+                column.header_style
+            )
+            _append((header_style, column.header))
+        cell_style = get_style(column.style or "")
+        for cell in column.cells:
+            _append((cell_style, cell))
+        if self.show_footer:
+            footer_style = get_style(self.footer_style or "") + get_style(
+                column.footer_style
+            )
+            _append((footer_style, column.footer))
+
+        if any_padding:
+            _Padding = Padding
+            for first, last, (style, renderable) in loop_first_last(raw_cells):
+                yield _Cell(
+                    style,
+                    _Padding(renderable, get_padding(first, last)),
+                    getattr(renderable, "vertical", None) or column.vertical,
+                )
+        else:
+            for style, renderable in raw_cells:
+                yield _Cell(
+                    style,
+                    renderable,
+                    getattr(renderable, "vertical", None) or column.vertical,
+                )
+
+    def _get_padding_width(self, column_index: int) -> int:
+        """Get extra width from padding."""
+        _, pad_right, _, pad_left = self.padding
+
+        if self.collapse_padding:
+            pad_left = 0
+            pad_right = abs(pad_left - pad_right)
+
+        if not self.pad_edge:
+            if column_index == 0:
+                pad_left = 0
+            if column_index == len(self.columns) - 1:
+                pad_right = 0
+
+        return pad_left + pad_right
+
+    def _measure_column(
+        self,
+        console: "Console",
+        options: "ConsoleOptions",
+        column: Column,
+    ) -> Measurement:
+        """Get the minimum and maximum width of the column."""
+
+        max_width = options.max_width
+        if max_width < 1:
+            return Measurement(0, 0)
+
+        padding_width = self._get_padding_width(column._index)
+        if column.width is not None:
+            # Fixed width column
+            return Measurement(
+                column.width + padding_width, column.width + padding_width
+            ).with_maximum(max_width)
+        # Flexible column, we need to measure contents
+        min_widths: List[int] = []
+        max_widths: List[int] = []
+        append_min = min_widths.append
+        append_max = max_widths.append
+        get_render_width = Measurement.get
+        for cell in self._get_cells(console, column._index, column):
+            _min, _max = get_render_width(console, options, cell.renderable)
+            append_min(_min)
+            append_max(_max)
+
+        measurement = Measurement(
+            max(min_widths) if min_widths else 1,
+            max(max_widths) if max_widths else max_width,
+        ).with_maximum(max_width)
+        measurement = measurement.clamp(
+            None if column.min_width is None else column.min_width + padding_width,
+            None if column.max_width is None else column.max_width + padding_width,
+        )
+        return measurement
+
+    def _render(
+        self, console: "Console", options: "ConsoleOptions", widths: List[int]
+    ) -> "RenderResult":
+        table_style = console.get_style(self.style or "")
+
+        border_style = table_style + console.get_style(self.border_style or "")
+        _column_cells = (
+            self._get_cells(console, column_index, column)
+            for column_index, column in enumerate(self.columns)
+        )
+
+        row_cells: List[Tuple[_Cell, ...]] = list(zip(*_column_cells))
+        _box = (
+            self.box.substitute(
+                options, safe=pick_bool(self.safe_box, console.safe_box)
+            )
+            if self.box
+            else None
+        )
+        _box = _box.get_plain_headed_box() if _box and not self.show_header else _box
+
+        new_line = Segment.line()
+
+        columns = self.columns
+        show_header = self.show_header
+        show_footer = self.show_footer
+        show_edge = self.show_edge
+        show_lines = self.show_lines
+        leading = self.leading
+
+        _Segment = Segment
+        if _box:
+            box_segments = [
+                (
+                    _Segment(_box.head_left, border_style),
+                    _Segment(_box.head_right, border_style),
+                    _Segment(_box.head_vertical, border_style),
+                ),
+                (
+                    _Segment(_box.mid_left, border_style),
+                    _Segment(_box.mid_right, border_style),
+                    _Segment(_box.mid_vertical, border_style),
+                ),
+                (
+                    _Segment(_box.foot_left, border_style),
+                    _Segment(_box.foot_right, border_style),
+                    _Segment(_box.foot_vertical, border_style),
+                ),
+            ]
+            if show_edge:
+                yield _Segment(_box.get_top(widths), border_style)
+                yield new_line
+        else:
+            box_segments = []
+
+        get_row_style = self.get_row_style
+        get_style = console.get_style
+
+        for index, (first, last, row_cell) in enumerate(loop_first_last(row_cells)):
+            header_row = first and show_header
+            footer_row = last and show_footer
+            row = (
+                self.rows[index - show_header]
+                if (not header_row and not footer_row)
+                else None
+            )
+            max_height = 1
+            cells: List[List[List[Segment]]] = []
+            if header_row or footer_row:
+                row_style = Style.null()
+            else:
+                row_style = get_style(
+                    get_row_style(console, index - 1 if show_header else index)
+                )
+            for width, cell, column in zip(widths, row_cell, columns):
+                render_options = options.update(
+                    width=width,
+                    justify=column.justify,
+                    no_wrap=column.no_wrap,
+                    overflow=column.overflow,
+                    height=None,
+                    highlight=column.highlight,
+                )
+                lines = console.render_lines(
+                    cell.renderable,
+                    render_options,
+                    style=get_style(cell.style) + row_style,
+                )
+                max_height = max(max_height, len(lines))
+                cells.append(lines)
+
+            row_height = max(len(cell) for cell in cells)
+
+            def align_cell(
+                cell: List[List[Segment]],
+                vertical: "VerticalAlignMethod",
+                width: int,
+                style: Style,
+            ) -> List[List[Segment]]:
+                if header_row:
+                    vertical = "bottom"
+                elif footer_row:
+                    vertical = "top"
+
+                if vertical == "top":
+                    return _Segment.align_top(cell, width, row_height, style)
+                elif vertical == "middle":
+                    return _Segment.align_middle(cell, width, row_height, style)
+                return _Segment.align_bottom(cell, width, row_height, style)
+
+            cells[:] = [
+                _Segment.set_shape(
+                    align_cell(
+                        cell,
+                        _cell.vertical,
+                        width,
+                        get_style(_cell.style) + row_style,
+                    ),
+                    width,
+                    max_height,
+                )
+                for width, _cell, cell, column in zip(widths, row_cell, cells, columns)
+            ]
+
+            if _box:
+                if last and show_footer:
+                    yield _Segment(
+                        _box.get_row(widths, "foot", edge=show_edge), border_style
+                    )
+                    yield new_line
+                left, right, _divider = box_segments[0 if first else (2 if last else 1)]
+
+                # If the column divider is whitespace also style it with the row background
+                divider = (
+                    _divider
+                    if _divider.text.strip()
+                    else _Segment(
+                        _divider.text, row_style.background_style + _divider.style
+                    )
+                )
+                for line_no in range(max_height):
+                    if show_edge:
+                        yield left
+                    for last_cell, rendered_cell in loop_last(cells):
+                        yield from rendered_cell[line_no]
+                        if not last_cell:
+                            yield divider
+                    if show_edge:
+                        yield right
+                    yield new_line
+            else:
+                for line_no in range(max_height):
+                    for rendered_cell in cells:
+                        yield from rendered_cell[line_no]
+                    yield new_line
+            if _box and first and show_header:
+                yield _Segment(
+                    _box.get_row(widths, "head", edge=show_edge), border_style
+                )
+                yield new_line
+            end_section = row and row.end_section
+            if _box and (show_lines or leading or end_section):
+                if (
+                    not last
+                    and not (show_footer and index >= len(row_cells) - 2)
+                    and not (show_header and header_row)
+                ):
+                    if leading:
+                        yield _Segment(
+                            _box.get_row(widths, "mid", edge=show_edge) * leading,
+                            border_style,
+                        )
+                    else:
+                        yield _Segment(
+                            _box.get_row(widths, "row", edge=show_edge), border_style
+                        )
+                    yield new_line
+
+        if _box and show_edge:
+            yield _Segment(_box.get_bottom(widths), border_style)
+            yield new_line
+
+
+if __name__ == "__main__":  # pragma: no cover
+    from rich.console import Console
+    from rich.highlighter import ReprHighlighter
+
+    from ._timer import timer
+
+    with timer("Table render"):
+        table = Table(
+            title="Star Wars Movies",
+            caption="Rich example table",
+            caption_justify="right",
+        )
+
+        table.add_column(
+            "Released", header_style="bright_cyan", style="cyan", no_wrap=True
+        )
+        table.add_column("Title", style="magenta")
+        table.add_column("Box Office", justify="right", style="green")
+
+        table.add_row(
+            "Dec 20, 2019",
+            "Star Wars: The Rise of Skywalker",
+            "$952,110,690",
+        )
+        table.add_row("May 25, 2018", "Solo: A Star Wars Story", "$393,151,347")
+        table.add_row(
+            "Dec 15, 2017",
+            "Star Wars Ep. V111: The Last Jedi",
+            "$1,332,539,889",
+            style="on black",
+            end_section=True,
+        )
+        table.add_row(
+            "Dec 16, 2016",
+            "Rogue One: A Star Wars Story",
+            "$1,332,439,889",
+        )
+
+        def header(text: str) -> None:
+            console.print()
+            console.rule(highlight(text))
+            console.print()
+
+        console = Console()
+        highlight = ReprHighlighter()
+        header("Example Table")
+        console.print(table, justify="center")
+
+        table.expand = True
+        header("expand=True")
+        console.print(table)
+
+        table.width = 50
+        header("width=50")
+
+        console.print(table, justify="center")
+
+        table.width = None
+        table.expand = False
+        table.row_styles = ["dim", "none"]
+        header("row_styles=['dim', 'none']")
+
+        console.print(table, justify="center")
+
+        table.width = None
+        table.expand = False
+        table.row_styles = ["dim", "none"]
+        table.leading = 1
+        header("leading=1, row_styles=['dim', 'none']")
+        console.print(table, justify="center")
+
+        table.width = None
+        table.expand = False
+        table.row_styles = ["dim", "none"]
+        table.show_lines = True
+        table.leading = 0
+        header("show_lines=True, row_styles=['dim', 'none']")
+        console.print(table, justify="center")
diff --git a/local_packages/rich/terminal_theme.py b/local_packages/rich/terminal_theme.py
new file mode 100644
index 0000000..565e9d9
--- /dev/null
+++ b/local_packages/rich/terminal_theme.py
@@ -0,0 +1,153 @@
+from typing import List, Optional, Tuple
+
+from .color_triplet import ColorTriplet
+from .palette import Palette
+
+_ColorTuple = Tuple[int, int, int]
+
+
+class TerminalTheme:
+    """A color theme used when exporting console content.
+
+    Args:
+        background (Tuple[int, int, int]): The background color.
+        foreground (Tuple[int, int, int]): The foreground (text) color.
+        normal (List[Tuple[int, int, int]]): A list of 8 normal intensity colors.
+        bright (List[Tuple[int, int, int]], optional): A list of 8 bright colors, or None
+            to repeat normal intensity. Defaults to None.
+    """
+
+    def __init__(
+        self,
+        background: _ColorTuple,
+        foreground: _ColorTuple,
+        normal: List[_ColorTuple],
+        bright: Optional[List[_ColorTuple]] = None,
+    ) -> None:
+        self.background_color = ColorTriplet(*background)
+        self.foreground_color = ColorTriplet(*foreground)
+        self.ansi_colors = Palette(normal + (bright or normal))
+
+
+DEFAULT_TERMINAL_THEME = TerminalTheme(
+    (255, 255, 255),
+    (0, 0, 0),
+    [
+        (0, 0, 0),
+        (128, 0, 0),
+        (0, 128, 0),
+        (128, 128, 0),
+        (0, 0, 128),
+        (128, 0, 128),
+        (0, 128, 128),
+        (192, 192, 192),
+    ],
+    [
+        (128, 128, 128),
+        (255, 0, 0),
+        (0, 255, 0),
+        (255, 255, 0),
+        (0, 0, 255),
+        (255, 0, 255),
+        (0, 255, 255),
+        (255, 255, 255),
+    ],
+)
+
+MONOKAI = TerminalTheme(
+    (12, 12, 12),
+    (217, 217, 217),
+    [
+        (26, 26, 26),
+        (244, 0, 95),
+        (152, 224, 36),
+        (253, 151, 31),
+        (157, 101, 255),
+        (244, 0, 95),
+        (88, 209, 235),
+        (196, 197, 181),
+        (98, 94, 76),
+    ],
+    [
+        (244, 0, 95),
+        (152, 224, 36),
+        (224, 213, 97),
+        (157, 101, 255),
+        (244, 0, 95),
+        (88, 209, 235),
+        (246, 246, 239),
+    ],
+)
+DIMMED_MONOKAI = TerminalTheme(
+    (25, 25, 25),
+    (185, 188, 186),
+    [
+        (58, 61, 67),
+        (190, 63, 72),
+        (135, 154, 59),
+        (197, 166, 53),
+        (79, 118, 161),
+        (133, 92, 141),
+        (87, 143, 164),
+        (185, 188, 186),
+        (136, 137, 135),
+    ],
+    [
+        (251, 0, 31),
+        (15, 114, 47),
+        (196, 112, 51),
+        (24, 109, 227),
+        (251, 0, 103),
+        (46, 112, 109),
+        (253, 255, 185),
+    ],
+)
+NIGHT_OWLISH = TerminalTheme(
+    (255, 255, 255),
+    (64, 63, 83),
+    [
+        (1, 22, 39),
+        (211, 66, 62),
+        (42, 162, 152),
+        (218, 170, 1),
+        (72, 118, 214),
+        (64, 63, 83),
+        (8, 145, 106),
+        (122, 129, 129),
+        (122, 129, 129),
+    ],
+    [
+        (247, 110, 110),
+        (73, 208, 197),
+        (218, 194, 107),
+        (92, 167, 228),
+        (105, 112, 152),
+        (0, 201, 144),
+        (152, 159, 177),
+    ],
+)
+
+SVG_EXPORT_THEME = TerminalTheme(
+    (41, 41, 41),
+    (197, 200, 198),
+    [
+        (75, 78, 85),
+        (204, 85, 90),
+        (152, 168, 75),
+        (208, 179, 68),
+        (96, 138, 177),
+        (152, 114, 159),
+        (104, 160, 179),
+        (197, 200, 198),
+        (154, 155, 153),
+    ],
+    [
+        (255, 38, 39),
+        (0, 130, 61),
+        (208, 132, 66),
+        (25, 132, 233),
+        (255, 44, 122),
+        (57, 130, 128),
+        (253, 253, 197),
+    ],
+)
diff --git a/local_packages/rich/text.py b/local_packages/rich/text.py
new file mode 100644
index 0000000..7e087a4
--- /dev/null
+++ b/local_packages/rich/text.py
@@ -0,0 +1,1363 @@
+import re
+from functools import partial, reduce
+from math import gcd
+from operator import itemgetter
+from typing import (
+    TYPE_CHECKING,
+    Any,
+    Callable,
+    Dict,
+    Iterable,
+    List,
+    NamedTuple,
+    Optional,
+    Pattern,
+    Tuple,
+    Union,
+)
+
+from ._loop import loop_last
+from ._pick import pick_bool
+from ._wrap import divide_line
+from .align import AlignMethod
+from .cells import cell_len, set_cell_size
+from .containers import Lines
+from .control import strip_control_codes
+from .emoji import EmojiVariant
+from .jupyter import JupyterMixin
+from .measure import Measurement
+from .segment import Segment
+from .style import Style, StyleType
+
+if TYPE_CHECKING:  # pragma: no cover
+    from .console import Console, ConsoleOptions, JustifyMethod, OverflowMethod
+
+DEFAULT_JUSTIFY: "JustifyMethod" = "default"
+DEFAULT_OVERFLOW: "OverflowMethod" = "fold"
+
+
+_re_whitespace = re.compile(r"\s+$")
+
+TextType = Union[str, "Text"]
+"""A plain string or a :class:`Text` instance."""
+
+GetStyleCallable = Callable[[str], Optional[StyleType]]
+
+
+class Span(NamedTuple):
+    """A marked up region in some text."""
+
+    start: int
+    """Span start index."""
+    end: int
+    """Span end index."""
+    style: Union[str, Style]
+    """Style associated with the span."""
+
+    def __repr__(self) -> str:
+        return f"Span({self.start}, {self.end}, {self.style!r})"
+
+    def __bool__(self) -> bool:
+        return self.end > self.start
+
+    def split(self, offset: int) -> Tuple["Span", Optional["Span"]]:
+        """Split a span in to 2 from a given offset."""
+
+        if offset < self.start:
+            return self, None
+        if offset >= self.end:
+            return self, None
+
+        start, end, style = self
+        span1 = Span(start, min(end, offset), style)
+        span2 = Span(span1.end, end, style)
+        return span1, span2
+
+    def move(self, offset: int) -> "Span":
+        """Move start and end by a given offset.
+
+        Args:
+            offset (int): Number of characters to add to start and end.
+
+        Returns:
+            TextSpan: A new TextSpan with adjusted position.
+        """
+        start, end, style = self
+        return Span(start + offset, end + offset, style)
+
+    def right_crop(self, offset: int) -> "Span":
+        """Crop the span at the given offset.
+
+        Args:
+            offset (int): A value between start and end.
+
+        Returns:
+            Span: A new (possibly smaller) span.
+        """
+        start, end, style = self
+        if offset >= end:
+            return self
+        return Span(start, min(offset, end), style)
+
+    def extend(self, cells: int) -> "Span":
+        """Extend the span by the given number of cells.
+
+        Args:
+            cells (int): Additional space to add to end of span.
+
+        Returns:
+            Span: A span.
+        """
+        if cells:
+            start, end, style = self
+            return Span(start, end + cells, style)
+        else:
+            return self
+
+
+class Text(JupyterMixin):
+    """Text with color / style.
+
+    Args:
+        text (str, optional): Default unstyled text. Defaults to "".
+        style (Union[str, Style], optional): Base style for text. Defaults to "".
+        justify (str, optional): Justify method: "left", "center", "full", "right". Defaults to None.
+        overflow (str, optional): Overflow method: "crop", "fold", "ellipsis". Defaults to None.
+        no_wrap (bool, optional): Disable text wrapping, or None for default. Defaults to None.
+        end (str, optional): Character to end text with. Defaults to "\\\\n".
+        tab_size (int): Number of spaces per tab, or ``None`` to use ``console.tab_size``. Defaults to None.
+        spans (List[Span], optional). A list of predefined style spans. Defaults to None.
+    """
+
+    __slots__ = [
+        "_text",
+        "style",
+        "justify",
+        "overflow",
+        "no_wrap",
+        "end",
+        "tab_size",
+        "_spans",
+        "_length",
+    ]
+
+    def __init__(
+        self,
+        text: str = "",
+        style: Union[str, Style] = "",
+        *,
+        justify: Optional["JustifyMethod"] = None,
+        overflow: Optional["OverflowMethod"] = None,
+        no_wrap: Optional[bool] = None,
+        end: str = "\n",
+        tab_size: Optional[int] = None,
+        spans: Optional[List[Span]] = None,
+    ) -> None:
+        sanitized_text = strip_control_codes(text)
+        self._text = [sanitized_text]
+        self.style = style
+        self.justify: Optional["JustifyMethod"] = justify
+        self.overflow: Optional["OverflowMethod"] = overflow
+        self.no_wrap = no_wrap
+        self.end = end
+        self.tab_size = tab_size
+        self._spans: List[Span] = spans or []
+        self._length: int = len(sanitized_text)
+
+    def __len__(self) -> int:
+        return self._length
+
+    def __bool__(self) -> bool:
+        return bool(self._length)
+
+    def __str__(self) -> str:
+        return self.plain
+
+    def __repr__(self) -> str:
+        return f""
+
+    def __add__(self, other: Any) -> "Text":
+        if isinstance(other, (str, Text)):
+            result = self.copy()
+            result.append(other)
+            return result
+        return NotImplemented
+
+    def __eq__(self, other: object) -> bool:
+        if not isinstance(other, Text):
+            return NotImplemented
+        return self.plain == other.plain and self._spans == other._spans
+
+    def __contains__(self, other: object) -> bool:
+        if isinstance(other, str):
+            return other in self.plain
+        elif isinstance(other, Text):
+            return other.plain in self.plain
+        return False
+
+    def __getitem__(self, slice: Union[int, slice]) -> "Text":
+        def get_text_at(offset: int) -> "Text":
+            _Span = Span
+            text = Text(
+                self.plain[offset],
+                spans=[
+                    _Span(0, 1, style)
+                    for start, end, style in self._spans
+                    if end > offset >= start
+                ],
+                end="",
+            )
+            return text
+
+        if isinstance(slice, int):
+            return get_text_at(slice)
+        else:
+            start, stop, step = slice.indices(len(self.plain))
+            if step == 1:
+                lines = self.divide([start, stop])
+                return lines[1]
+            else:
+                # This would be a bit of work to implement efficiently
+                # For now, its not required
+                raise TypeError("slices with step!=1 are not supported")
+
+    @property
+    def cell_len(self) -> int:
+        """Get the number of cells required to render this text."""
+        return cell_len(self.plain)
+
+    @property
+    def markup(self) -> str:
+        """Get console markup to render this Text.
+
+        Returns:
+            str: A string potentially creating markup tags.
+        """
+        from .markup import escape
+
+        output: List[str] = []
+
+        plain = self.plain
+        markup_spans = [
+            (0, False, self.style),
+            *((span.start, False, span.style) for span in self._spans),
+            *((span.end, True, span.style) for span in self._spans),
+            (len(plain), True, self.style),
+        ]
+        markup_spans.sort(key=itemgetter(0, 1))
+        position = 0
+        append = output.append
+        for offset, closing, style in markup_spans:
+            if offset > position:
+                append(escape(plain[position:offset]))
+                position = offset
+            if style:
+                append(f"[/{style}]" if closing else f"[{style}]")
+        markup = "".join(output)
+        return markup
+
+    @classmethod
+    def from_markup(
+        cls,
+        text: str,
+        *,
+        style: Union[str, Style] = "",
+        emoji: bool = True,
+        emoji_variant: Optional[EmojiVariant] = None,
+        justify: Optional["JustifyMethod"] = None,
+        overflow: Optional["OverflowMethod"] = None,
+        end: str = "\n",
+    ) -> "Text":
+        """Create Text instance from markup.
+
+        Args:
+            text (str): A string containing console markup.
+            style (Union[str, Style], optional): Base style for text. Defaults to "".
+            emoji (bool, optional): Also render emoji code. Defaults to True.
+            emoji_variant (str, optional): Optional emoji variant, either "text" or "emoji". Defaults to None.
+            justify (str, optional): Justify method: "left", "center", "full", "right". Defaults to None.
+            overflow (str, optional): Overflow method: "crop", "fold", "ellipsis". Defaults to None.
+            end (str, optional): Character to end text with. Defaults to "\\\\n".
+
+        Returns:
+            Text: A Text instance with markup rendered.
+        """
+        from .markup import render
+
+        rendered_text = render(text, style, emoji=emoji, emoji_variant=emoji_variant)
+        rendered_text.justify = justify
+        rendered_text.overflow = overflow
+        rendered_text.end = end
+        return rendered_text
+
+    @classmethod
+    def from_ansi(
+        cls,
+        text: str,
+        *,
+        style: Union[str, Style] = "",
+        justify: Optional["JustifyMethod"] = None,
+        overflow: Optional["OverflowMethod"] = None,
+        no_wrap: Optional[bool] = None,
+        end: str = "\n",
+        tab_size: Optional[int] = 8,
+    ) -> "Text":
+        """Create a Text object from a string containing ANSI escape codes.
+
+        Args:
+            text (str): A string containing escape codes.
+            style (Union[str, Style], optional): Base style for text. Defaults to "".
+            justify (str, optional): Justify method: "left", "center", "full", "right". Defaults to None.
+            overflow (str, optional): Overflow method: "crop", "fold", "ellipsis". Defaults to None.
+            no_wrap (bool, optional): Disable text wrapping, or None for default. Defaults to None.
+            end (str, optional): Character to end text with. Defaults to "\\\\n".
+            tab_size (int): Number of spaces per tab, or ``None`` to use ``console.tab_size``. Defaults to None.
+        """
+        from .ansi import AnsiDecoder
+
+        joiner = Text(
+            "\n",
+            justify=justify,
+            overflow=overflow,
+            no_wrap=no_wrap,
+            end=end,
+            tab_size=tab_size,
+            style=style,
+        )
+        decoder = AnsiDecoder()
+        result = joiner.join(line for line in decoder.decode(text))
+        return result
+
+    @classmethod
+    def styled(
+        cls,
+        text: str,
+        style: StyleType = "",
+        *,
+        justify: Optional["JustifyMethod"] = None,
+        overflow: Optional["OverflowMethod"] = None,
+    ) -> "Text":
+        """Construct a Text instance with a pre-applied styled. A style applied in this way won't be used
+        to pad the text when it is justified.
+
+        Args:
+            text (str): A string containing console markup.
+            style (Union[str, Style]): Style to apply to the text. Defaults to "".
+            justify (str, optional): Justify method: "left", "center", "full", "right". Defaults to None.
+            overflow (str, optional): Overflow method: "crop", "fold", "ellipsis". Defaults to None.
+
+        Returns:
+            Text: A text instance with a style applied to the entire string.
+        """
+        styled_text = cls(text, justify=justify, overflow=overflow)
+        styled_text.stylize(style)
+        return styled_text
+
+    @classmethod
+    def assemble(
+        cls,
+        *parts: Union[str, "Text", Tuple[str, StyleType]],
+        style: Union[str, Style] = "",
+        justify: Optional["JustifyMethod"] = None,
+        overflow: Optional["OverflowMethod"] = None,
+        no_wrap: Optional[bool] = None,
+        end: str = "\n",
+        tab_size: int = 8,
+        meta: Optional[Dict[str, Any]] = None,
+    ) -> "Text":
+        """Construct a text instance by combining a sequence of strings with optional styles.
+        The positional arguments should be either strings, or a tuple of string + style.
+
+        Args:
+            style (Union[str, Style], optional): Base style for text. Defaults to "".
+            justify (str, optional): Justify method: "left", "center", "full", "right". Defaults to None.
+            overflow (str, optional): Overflow method: "crop", "fold", "ellipsis". Defaults to None.
+            no_wrap (bool, optional): Disable text wrapping, or None for default. Defaults to None.
+            end (str, optional): Character to end text with. Defaults to "\\\\n".
+            tab_size (int): Number of spaces per tab, or ``None`` to use ``console.tab_size``. Defaults to None.
+            meta (Dict[str, Any], optional). Meta data to apply to text, or None for no meta data. Default to None
+
+        Returns:
+            Text: A new text instance.
+        """
+        text = cls(
+            style=style,
+            justify=justify,
+            overflow=overflow,
+            no_wrap=no_wrap,
+            end=end,
+            tab_size=tab_size,
+        )
+        append = text.append
+        _Text = Text
+        for part in parts:
+            if isinstance(part, (_Text, str)):
+                append(part)
+            else:
+                append(*part)
+        if meta:
+            text.apply_meta(meta)
+        return text
+
+    @property
+    def plain(self) -> str:
+        """Get the text as a single string."""
+        if len(self._text) != 1:
+            self._text[:] = ["".join(self._text)]
+        return self._text[0]
+
+    @plain.setter
+    def plain(self, new_text: str) -> None:
+        """Set the text to a new value."""
+        if new_text != self.plain:
+            sanitized_text = strip_control_codes(new_text)
+            self._text[:] = [sanitized_text]
+            old_length = self._length
+            self._length = len(sanitized_text)
+            if old_length > self._length:
+                self._trim_spans()
+
+    @property
+    def spans(self) -> List[Span]:
+        """Get a reference to the internal list of spans."""
+        return self._spans
+
+    @spans.setter
+    def spans(self, spans: List[Span]) -> None:
+        """Set spans."""
+        self._spans = spans[:]
+
+    def blank_copy(self, plain: str = "") -> "Text":
+        """Return a new Text instance with copied metadata (but not the string or spans)."""
+        copy_self = Text(
+            plain,
+            style=self.style,
+            justify=self.justify,
+            overflow=self.overflow,
+            no_wrap=self.no_wrap,
+            end=self.end,
+            tab_size=self.tab_size,
+        )
+        return copy_self
+
+    def copy(self) -> "Text":
+        """Return a copy of this instance."""
+        copy_self = Text(
+            self.plain,
+            style=self.style,
+            justify=self.justify,
+            overflow=self.overflow,
+            no_wrap=self.no_wrap,
+            end=self.end,
+            tab_size=self.tab_size,
+        )
+        copy_self._spans[:] = self._spans
+        return copy_self
+
+    def stylize(
+        self,
+        style: Union[str, Style],
+        start: int = 0,
+        end: Optional[int] = None,
+    ) -> None:
+        """Apply a style to the text, or a portion of the text.
+
+        Args:
+            style (Union[str, Style]): Style instance or style definition to apply.
+            start (int): Start offset (negative indexing is supported). Defaults to 0.
+            end (Optional[int], optional): End offset (negative indexing is supported), or None for end of text. Defaults to None.
+        """
+        if style:
+            length = len(self)
+            if start < 0:
+                start = length + start
+            if end is None:
+                end = length
+            if end < 0:
+                end = length + end
+            if start >= length or end <= start:
+                # Span not in text or not valid
+                return
+            self._spans.append(Span(start, min(length, end), style))
+
+    def stylize_before(
+        self,
+        style: Union[str, Style],
+        start: int = 0,
+        end: Optional[int] = None,
+    ) -> None:
+        """Apply a style to the text, or a portion of the text. Styles will be applied before other styles already present.
+
+        Args:
+            style (Union[str, Style]): Style instance or style definition to apply.
+            start (int): Start offset (negative indexing is supported). Defaults to 0.
+            end (Optional[int], optional): End offset (negative indexing is supported), or None for end of text. Defaults to None.
+        """
+        if style:
+            length = len(self)
+            if start < 0:
+                start = length + start
+            if end is None:
+                end = length
+            if end < 0:
+                end = length + end
+            if start >= length or end <= start:
+                # Span not in text or not valid
+                return
+            self._spans.insert(0, Span(start, min(length, end), style))
+
+    def apply_meta(
+        self, meta: Dict[str, Any], start: int = 0, end: Optional[int] = None
+    ) -> None:
+        """Apply metadata to the text, or a portion of the text.
+
+        Args:
+            meta (Dict[str, Any]): A dict of meta information.
+            start (int): Start offset (negative indexing is supported). Defaults to 0.
+            end (Optional[int], optional): End offset (negative indexing is supported), or None for end of text. Defaults to None.
+
+        """
+        style = Style.from_meta(meta)
+        self.stylize(style, start=start, end=end)
+
+    def on(self, meta: Optional[Dict[str, Any]] = None, **handlers: Any) -> "Text":
+        """Apply event handlers (used by Textual project).
+
+        Example:
+            >>> from rich.text import Text
+            >>> text = Text("hello world")
+            >>> text.on(click="view.toggle('world')")
+
+        Args:
+            meta (Dict[str, Any]): Mapping of meta information.
+            **handlers: Keyword args are prefixed with "@" to defined handlers.
+
+        Returns:
+            Text: Self is returned to method may be chained.
+        """
+        meta = {} if meta is None else meta
+        meta.update({f"@{key}": value for key, value in handlers.items()})
+        self.stylize(Style.from_meta(meta))
+        return self
+
+    def remove_suffix(self, suffix: str) -> None:
+        """Remove a suffix if it exists.
+
+        Args:
+            suffix (str): Suffix to remove.
+        """
+        if self.plain.endswith(suffix):
+            self.right_crop(len(suffix))
+
+    def get_style_at_offset(self, console: "Console", offset: int) -> Style:
+        """Get the style of a character at give offset.
+
+        Args:
+            console (~Console): Console where text will be rendered.
+            offset (int): Offset in to text (negative indexing supported)
+
+        Returns:
+            Style: A Style instance.
+        """
+        # TODO: This is a little inefficient, it is only used by full justify
+        if offset < 0:
+            offset = len(self) + offset
+        get_style = console.get_style
+        style = get_style(self.style).copy()
+        for start, end, span_style in self._spans:
+            if end > offset >= start:
+                style += get_style(span_style, default="")
+        return style
+
+    def extend_style(self, spaces: int) -> None:
+        """Extend the Text given number of spaces where the spaces have the same style as the last character.
+
+        Args:
+            spaces (int): Number of spaces to add to the Text.
+        """
+        if spaces <= 0:
+            return
+        spans = self.spans
+        new_spaces = " " * spaces
+        if spans:
+            end_offset = len(self)
+            self._spans[:] = [
+                span.extend(spaces) if span.end >= end_offset else span
+                for span in spans
+            ]
+            self._text.append(new_spaces)
+            self._length += spaces
+        else:
+            self.plain += new_spaces
+
+    def highlight_regex(
+        self,
+        re_highlight: Union[Pattern[str], str],
+        style: Optional[Union[GetStyleCallable, StyleType]] = None,
+        *,
+        style_prefix: str = "",
+    ) -> int:
+        """Highlight text with a regular expression, where group names are
+        translated to styles.
+
+        Args:
+            re_highlight (Union[re.Pattern, str]): A regular expression object or string.
+            style (Union[GetStyleCallable, StyleType]): Optional style to apply to whole match, or a callable
+                which accepts the matched text and returns a style. Defaults to None.
+            style_prefix (str, optional): Optional prefix to add to style group names.
+
+        Returns:
+            int: Number of regex matches
+        """
+        count = 0
+        append_span = self._spans.append
+        _Span = Span
+        plain = self.plain
+        if isinstance(re_highlight, str):
+            re_highlight = re.compile(re_highlight)
+        for match in re_highlight.finditer(plain):
+            get_span = match.span
+            if style:
+                start, end = get_span()
+                match_style = style(plain[start:end]) if callable(style) else style
+                if match_style is not None and end > start:
+                    append_span(_Span(start, end, match_style))
+
+            count += 1
+            for name in match.groupdict().keys():
+                start, end = get_span(name)
+                if start != -1 and end > start:
+                    append_span(_Span(start, end, f"{style_prefix}{name}"))
+        return count
+
+    def highlight_words(
+        self,
+        words: Iterable[str],
+        style: Union[str, Style],
+        *,
+        case_sensitive: bool = True,
+    ) -> int:
+        """Highlight words with a style.
+
+        Args:
+            words (Iterable[str]): Words to highlight.
+            style (Union[str, Style]): Style to apply.
+            case_sensitive (bool, optional): Enable case sensitive matching. Defaults to True.
+
+        Returns:
+            int: Number of words highlighted.
+        """
+        re_words = "|".join(re.escape(word) for word in words)
+        add_span = self._spans.append
+        count = 0
+        _Span = Span
+        for match in re.finditer(
+            re_words, self.plain, flags=0 if case_sensitive else re.IGNORECASE
+        ):
+            start, end = match.span(0)
+            add_span(_Span(start, end, style))
+            count += 1
+        return count
+
+    def rstrip(self) -> None:
+        """Strip whitespace from end of text."""
+        self.plain = self.plain.rstrip()
+
+    def rstrip_end(self, size: int) -> None:
+        """Remove whitespace beyond a certain width at the end of the text.
+
+        Args:
+            size (int): The desired size of the text.
+        """
+        text_length = len(self)
+        if text_length > size:
+            excess = text_length - size
+            whitespace_match = _re_whitespace.search(self.plain)
+            if whitespace_match is not None:
+                whitespace_count = len(whitespace_match.group(0))
+                self.right_crop(min(whitespace_count, excess))
+
+    def set_length(self, new_length: int) -> None:
+        """Set new length of the text, clipping or padding is required."""
+        length = len(self)
+        if length != new_length:
+            if length < new_length:
+                self.pad_right(new_length - length)
+            else:
+                self.right_crop(length - new_length)
+
+    def __rich_console__(
+        self, console: "Console", options: "ConsoleOptions"
+    ) -> Iterable[Segment]:
+        tab_size: int = console.tab_size if self.tab_size is None else self.tab_size
+        justify = self.justify or options.justify or DEFAULT_JUSTIFY
+        overflow = self.overflow or options.overflow or DEFAULT_OVERFLOW
+
+        lines = self.wrap(
+            console,
+            options.max_width,
+            justify=justify,
+            overflow=overflow,
+            tab_size=tab_size or 8,
+            no_wrap=pick_bool(self.no_wrap, options.no_wrap, False),
+        )
+        all_lines = Text("\n").join(lines)
+        yield from all_lines.render(console, end=self.end)
+
+    def __rich_measure__(
+        self, console: "Console", options: "ConsoleOptions"
+    ) -> Measurement:
+        text = self.plain
+        lines = text.splitlines()
+        max_text_width = max(cell_len(line) for line in lines) if lines else 0
+        words = text.split()
+        min_text_width = (
+            max(cell_len(word) for word in words) if words else max_text_width
+        )
+        return Measurement(min_text_width, max_text_width)
+
+    def render(self, console: "Console", end: str = "") -> Iterable["Segment"]:
+        """Render the text as Segments.
+
+        Args:
+            console (Console): Console instance.
+            end (Optional[str], optional): Optional end character.
+
+        Returns:
+            Iterable[Segment]: Result of render that may be written to the console.
+        """
+        _Segment = Segment
+        text = self.plain
+        if not self._spans:
+            yield Segment(text)
+            if end:
+                yield _Segment(end)
+            return
+        get_style = partial(console.get_style, default=Style.null())
+
+        enumerated_spans = list(enumerate(self._spans, 1))
+        style_map = {index: get_style(span.style) for index, span in enumerated_spans}
+        style_map[0] = get_style(self.style)
+
+        spans = [
+            (0, False, 0),
+            *((span.start, False, index) for index, span in enumerated_spans),
+            *((span.end, True, index) for index, span in enumerated_spans),
+            (len(text), True, 0),
+        ]
+        spans.sort(key=itemgetter(0, 1))
+
+        stack: List[int] = []
+        stack_append = stack.append
+        stack_pop = stack.remove
+
+        style_cache: Dict[Tuple[Style, ...], Style] = {}
+        style_cache_get = style_cache.get
+        combine = Style.combine
+
+        def get_current_style() -> Style:
+            """Construct current style from stack."""
+            styles = tuple(style_map[_style_id] for _style_id in sorted(stack))
+            cached_style = style_cache_get(styles)
+            if cached_style is not None:
+                return cached_style
+            current_style = combine(styles)
+            style_cache[styles] = current_style
+            return current_style
+
+        for (offset, leaving, style_id), (next_offset, _, _) in zip(spans, spans[1:]):
+            if leaving:
+                stack_pop(style_id)
+            else:
+                stack_append(style_id)
+            if next_offset > offset:
+                yield _Segment(text[offset:next_offset], get_current_style())
+        if end:
+            yield _Segment(end)
+
+    def join(self, lines: Iterable["Text"]) -> "Text":
+        """Join text together with this instance as the separator.
+
+        Args:
+            lines (Iterable[Text]): An iterable of Text instances to join.
+
+        Returns:
+            Text: A new text instance containing join text.
+        """
+
+        new_text = self.blank_copy()
+
+        def iter_text() -> Iterable["Text"]:
+            if self.plain:
+                for last, line in loop_last(lines):
+                    yield line
+                    if not last:
+                        yield self
+            else:
+                yield from lines
+
+        extend_text = new_text._text.extend
+        append_span = new_text._spans.append
+        extend_spans = new_text._spans.extend
+        offset = 0
+        _Span = Span
+
+        for text in iter_text():
+            extend_text(text._text)
+            if text.style:
+                append_span(_Span(offset, offset + len(text), text.style))
+            extend_spans(
+                _Span(offset + start, offset + end, style)
+                for start, end, style in text._spans
+            )
+            offset += len(text)
+        new_text._length = offset
+        return new_text
+
+    def expand_tabs(self, tab_size: Optional[int] = None) -> None:
+        """Converts tabs to spaces.
+
+        Args:
+            tab_size (int, optional): Size of tabs. Defaults to 8.
+
+        """
+        if "\t" not in self.plain:
+            return
+        if tab_size is None:
+            tab_size = self.tab_size
+        if tab_size is None:
+            tab_size = 8
+
+        new_text: List[Text] = []
+        append = new_text.append
+
+        for line in self.split("\n", include_separator=True):
+            if "\t" not in line.plain:
+                append(line)
+            else:
+                cell_position = 0
+                parts = line.split("\t", include_separator=True)
+                for part in parts:
+                    if part.plain.endswith("\t"):
+                        part._text[-1] = part._text[-1][:-1] + " "
+                        cell_position += part.cell_len
+                        tab_remainder = cell_position % tab_size
+                        if tab_remainder:
+                            spaces = tab_size - tab_remainder
+                            part.extend_style(spaces)
+                            cell_position += spaces
+                    else:
+                        cell_position += part.cell_len
+                    append(part)
+
+        result = Text("").join(new_text)
+
+        self._text = [result.plain]
+        self._length = len(self.plain)
+        self._spans[:] = result._spans
+
+    def truncate(
+        self,
+        max_width: int,
+        *,
+        overflow: Optional["OverflowMethod"] = None,
+        pad: bool = False,
+    ) -> None:
+        """Truncate text if it is longer that a given width.
+
+        Args:
+            max_width (int): Maximum number of characters in text.
+            overflow (str, optional): Overflow method: "crop", "fold", or "ellipsis". Defaults to None, to use self.overflow.
+            pad (bool, optional): Pad with spaces if the length is less than max_width. Defaults to False.
+        """
+        _overflow = overflow or self.overflow or DEFAULT_OVERFLOW
+        if _overflow != "ignore":
+            length = cell_len(self.plain)
+            if length > max_width:
+                if _overflow == "ellipsis":
+                    self.plain = set_cell_size(self.plain, max_width - 1) + "…"
+                else:
+                    self.plain = set_cell_size(self.plain, max_width)
+            if pad and length < max_width:
+                spaces = max_width - length
+                self._text = [f"{self.plain}{' ' * spaces}"]
+                self._length = len(self.plain)
+
+    def _trim_spans(self) -> None:
+        """Remove or modify any spans that are over the end of the text."""
+        max_offset = len(self.plain)
+        _Span = Span
+        self._spans[:] = [
+            (
+                span
+                if span.end < max_offset
+                else _Span(span.start, min(max_offset, span.end), span.style)
+            )
+            for span in self._spans
+            if span.start < max_offset
+        ]
+
+    def pad(self, count: int, character: str = " ") -> None:
+        """Pad left and right with a given number of characters.
+
+        Args:
+            count (int): Width of padding.
+            character (str): The character to pad with. Must be a string of length 1.
+        """
+        assert len(character) == 1, "Character must be a string of length 1"
+        if count:
+            pad_characters = character * count
+            self.plain = f"{pad_characters}{self.plain}{pad_characters}"
+            _Span = Span
+            self._spans[:] = [
+                _Span(start + count, end + count, style)
+                for start, end, style in self._spans
+            ]
+
+    def pad_left(self, count: int, character: str = " ") -> None:
+        """Pad the left with a given character.
+
+        Args:
+            count (int): Number of characters to pad.
+            character (str, optional): Character to pad with. Defaults to " ".
+        """
+        assert len(character) == 1, "Character must be a string of length 1"
+        if count:
+            self.plain = f"{character * count}{self.plain}"
+            _Span = Span
+            self._spans[:] = [
+                _Span(start + count, end + count, style)
+                for start, end, style in self._spans
+            ]
+
+    def pad_right(self, count: int, character: str = " ") -> None:
+        """Pad the right with a given character.
+
+        Args:
+            count (int): Number of characters to pad.
+            character (str, optional): Character to pad with. Defaults to " ".
+        """
+        assert len(character) == 1, "Character must be a string of length 1"
+        if count:
+            self.plain = f"{self.plain}{character * count}"
+
+    def align(self, align: AlignMethod, width: int, character: str = " ") -> None:
+        """Align text to a given width.
+
+        Args:
+            align (AlignMethod): One of "left", "center", or "right".
+            width (int): Desired width.
+            character (str, optional): Character to pad with. Defaults to " ".
+        """
+        self.truncate(width)
+        excess_space = width - cell_len(self.plain)
+        if excess_space:
+            if align == "left":
+                self.pad_right(excess_space, character)
+            elif align == "center":
+                left = excess_space // 2
+                self.pad_left(left, character)
+                self.pad_right(excess_space - left, character)
+            else:
+                self.pad_left(excess_space, character)
+
+    def append(
+        self, text: Union["Text", str], style: Optional[Union[str, "Style"]] = None
+    ) -> "Text":
+        """Add text with an optional style.
+
+        Args:
+            text (Union[Text, str]): A str or Text to append.
+            style (str, optional): A style name. Defaults to None.
+
+        Returns:
+            Text: Returns self for chaining.
+        """
+
+        if not isinstance(text, (str, Text)):
+            raise TypeError("Only str or Text can be appended to Text")
+
+        if len(text):
+            if isinstance(text, str):
+                sanitized_text = strip_control_codes(text)
+                self._text.append(sanitized_text)
+                offset = len(self)
+                text_length = len(sanitized_text)
+                if style:
+                    self._spans.append(Span(offset, offset + text_length, style))
+                self._length += text_length
+            elif isinstance(text, Text):
+                _Span = Span
+                if style is not None:
+                    raise ValueError(
+                        "style must not be set when appending Text instance"
+                    )
+                text_length = self._length
+                if text.style:
+                    self._spans.append(
+                        _Span(text_length, text_length + len(text), text.style)
+                    )
+                self._text.append(text.plain)
+                self._spans.extend(
+                    _Span(start + text_length, end + text_length, style)
+                    for start, end, style in text._spans.copy()
+                )
+                self._length += len(text)
+        return self
+
+    def append_text(self, text: "Text") -> "Text":
+        """Append another Text instance. This method is more performant than Text.append, but
+        only works for Text.
+
+        Args:
+            text (Text): The Text instance to append to this instance.
+
+        Returns:
+            Text: Returns self for chaining.
+        """
+        _Span = Span
+        text_length = self._length
+        if text.style:
+            self._spans.append(_Span(text_length, text_length + len(text), text.style))
+        self._text.append(text.plain)
+        self._spans.extend(
+            _Span(start + text_length, end + text_length, style)
+            for start, end, style in text._spans.copy()
+        )
+        self._length += len(text)
+        return self
+
+    def append_tokens(
+        self, tokens: Iterable[Tuple[str, Optional[StyleType]]]
+    ) -> "Text":
+        """Append iterable of str and style. Style may be a Style instance or a str style definition.
+
+        Args:
+            tokens (Iterable[Tuple[str, Optional[StyleType]]]): An iterable of tuples containing str content and style.
+
+        Returns:
+            Text: Returns self for chaining.
+        """
+        append_text = self._text.append
+        append_span = self._spans.append
+        _Span = Span
+        offset = len(self)
+        for content, style in tokens:
+            content = strip_control_codes(content)
+            append_text(content)
+            if style:
+                append_span(_Span(offset, offset + len(content), style))
+            offset += len(content)
+        self._length = offset
+        return self
+
+    def copy_styles(self, text: "Text") -> None:
+        """Copy styles from another Text instance.
+
+        Args:
+            text (Text): A Text instance to copy styles from, must be the same length.
+        """
+        self._spans.extend(text._spans)
+
+    def split(
+        self,
+        separator: str = "\n",
+        *,
+        include_separator: bool = False,
+        allow_blank: bool = False,
+    ) -> Lines:
+        """Split rich text in to lines, preserving styles.
+
+        Args:
+            separator (str, optional): String to split on. Defaults to "\\\\n".
+            include_separator (bool, optional): Include the separator in the lines. Defaults to False.
+            allow_blank (bool, optional): Return a blank line if the text ends with a separator. Defaults to False.
+
+        Returns:
+            List[RichText]: A list of rich text, one per line of the original.
+        """
+        assert separator, "separator must not be empty"
+
+        text = self.plain
+        if separator not in text:
+            return Lines([self.copy()])
+
+        if include_separator:
+            lines = self.divide(
+                match.end() for match in re.finditer(re.escape(separator), text)
+            )
+        else:
+
+            def flatten_spans() -> Iterable[int]:
+                for match in re.finditer(re.escape(separator), text):
+                    start, end = match.span()
+                    yield start
+                    yield end
+
+            lines = Lines(
+                line for line in self.divide(flatten_spans()) if line.plain != separator
+            )
+
+        if not allow_blank and text.endswith(separator):
+            lines.pop()
+
+        return lines
+
+    def divide(self, offsets: Iterable[int]) -> Lines:
+        """Divide text into a number of lines at given offsets.
+
+        Args:
+            offsets (Iterable[int]): Offsets used to divide text.
+
+        Returns:
+            Lines: New RichText instances between offsets.
+        """
+        _offsets = list(offsets)
+
+        if not _offsets:
+            return Lines([self.copy()])
+
+        text = self.plain
+        text_length = len(text)
+        divide_offsets = [0, *_offsets, text_length]
+        line_ranges = list(zip(divide_offsets, divide_offsets[1:]))
+
+        style = self.style
+        justify = self.justify
+        overflow = self.overflow
+        _Text = Text
+        new_lines = Lines(
+            _Text(
+                text[start:end],
+                style=style,
+                justify=justify,
+                overflow=overflow,
+            )
+            for start, end in line_ranges
+        )
+        if not self._spans:
+            return new_lines
+
+        _line_appends = [line._spans.append for line in new_lines._lines]
+        line_count = len(line_ranges)
+        _Span = Span
+
+        for span_start, span_end, style in self._spans:
+            lower_bound = 0
+            upper_bound = line_count
+            start_line_no = (lower_bound + upper_bound) // 2
+
+            while True:
+                line_start, line_end = line_ranges[start_line_no]
+                if span_start < line_start:
+                    upper_bound = start_line_no - 1
+                elif span_start > line_end:
+                    lower_bound = start_line_no + 1
+                else:
+                    break
+                start_line_no = (lower_bound + upper_bound) // 2
+
+            if span_end < line_end:
+                end_line_no = start_line_no
+            else:
+                end_line_no = lower_bound = start_line_no
+                upper_bound = line_count
+
+                while True:
+                    line_start, line_end = line_ranges[end_line_no]
+                    if span_end < line_start:
+                        upper_bound = end_line_no - 1
+                    elif span_end > line_end:
+                        lower_bound = end_line_no + 1
+                    else:
+                        break
+                    end_line_no = (lower_bound + upper_bound) // 2
+
+            for line_no in range(start_line_no, end_line_no + 1):
+                line_start, line_end = line_ranges[line_no]
+                new_start = max(0, span_start - line_start)
+                new_end = min(span_end - line_start, line_end - line_start)
+                if new_end > new_start:
+                    _line_appends[line_no](_Span(new_start, new_end, style))
+
+        return new_lines
+
+    def right_crop(self, amount: int = 1) -> None:
+        """Remove a number of characters from the end of the text."""
+        max_offset = len(self.plain) - amount
+        _Span = Span
+        self._spans[:] = [
+            (
+                span
+                if span.end < max_offset
+                else _Span(span.start, min(max_offset, span.end), span.style)
+            )
+            for span in self._spans
+            if span.start < max_offset
+        ]
+        self._text = [self.plain[:-amount]]
+        self._length -= amount
+
+    def wrap(
+        self,
+        console: "Console",
+        width: int,
+        *,
+        justify: Optional["JustifyMethod"] = None,
+        overflow: Optional["OverflowMethod"] = None,
+        tab_size: int = 8,
+        no_wrap: Optional[bool] = None,
+    ) -> Lines:
+        """Word wrap the text.
+
+        Args:
+            console (Console): Console instance.
+            width (int): Number of cells available per line.
+            justify (str, optional): Justify method: "default", "left", "center", "full", "right". Defaults to "default".
+            overflow (str, optional): Overflow method: "crop", "fold", or "ellipsis". Defaults to None.
+            tab_size (int, optional): Default tab size. Defaults to 8.
+            no_wrap (bool, optional): Disable wrapping, Defaults to False.
+
+        Returns:
+            Lines: Number of lines.
+        """
+        wrap_justify = justify or self.justify or DEFAULT_JUSTIFY
+        wrap_overflow = overflow or self.overflow or DEFAULT_OVERFLOW
+
+        no_wrap = pick_bool(no_wrap, self.no_wrap, False) or overflow == "ignore"
+
+        lines = Lines()
+        for line in self.split(allow_blank=True):
+            if "\t" in line:
+                line.expand_tabs(tab_size)
+            if no_wrap:
+                if overflow == "ignore":
+                    lines.append(line)
+                    continue
+                new_lines = Lines([line])
+            else:
+                offsets = divide_line(str(line), width, fold=wrap_overflow == "fold")
+                new_lines = line.divide(offsets)
+                for line in new_lines:
+                    line.rstrip_end(width)
+            if wrap_justify:
+                new_lines.justify(
+                    console, width, justify=wrap_justify, overflow=wrap_overflow
+                )
+            for line in new_lines:
+                line.truncate(width, overflow=wrap_overflow)
+            lines.extend(new_lines)
+        return lines
+
+    def fit(self, width: int) -> Lines:
+        """Fit the text in to given width by chopping in to lines.
+
+        Args:
+            width (int): Maximum characters in a line.
+
+        Returns:
+            Lines: Lines container.
+        """
+        lines: Lines = Lines()
+        append = lines.append
+        for line in self.split():
+            line.set_length(width)
+            append(line)
+        return lines
+
+    def detect_indentation(self) -> int:
+        """Auto-detect indentation of code.
+
+        Returns:
+            int: Number of spaces used to indent code.
+        """
+
+        _indentations = {
+            len(match.group(1))
+            for match in re.finditer(r"^( *)(.*)$", self.plain, flags=re.MULTILINE)
+        }
+
+        try:
+            indentation = (
+                reduce(gcd, [indent for indent in _indentations if not indent % 2]) or 1
+            )
+        except TypeError:
+            indentation = 1
+
+        return indentation
+
+    def with_indent_guides(
+        self,
+        indent_size: Optional[int] = None,
+        *,
+        character: str = "│",
+        style: StyleType = "dim green",
+    ) -> "Text":
+        """Adds indent guide lines to text.
+
+        Args:
+            indent_size (Optional[int]): Size of indentation, or None to auto detect. Defaults to None.
+            character (str, optional): Character to use for indentation. Defaults to "│".
+            style (Union[Style, str], optional): Style of indent guides.
+
+        Returns:
+            Text: New text with indentation guides.
+        """
+
+        _indent_size = self.detect_indentation() if indent_size is None else indent_size
+
+        text = self.copy()
+        text.expand_tabs()
+        indent_line = f"{character}{' ' * (_indent_size - 1)}"
+
+        re_indent = re.compile(r"^( *)(.*)$")
+        new_lines: List[Text] = []
+        add_line = new_lines.append
+        blank_lines = 0
+        for line in text.split(allow_blank=True):
+            match = re_indent.match(line.plain)
+            if not match or not match.group(2):
+                blank_lines += 1
+                continue
+            indent = match.group(1)
+            full_indents, remaining_space = divmod(len(indent), _indent_size)
+            new_indent = f"{indent_line * full_indents}{' ' * remaining_space}"
+            line.plain = new_indent + line.plain[len(new_indent) :]
+            line.stylize(style, 0, len(new_indent))
+            if blank_lines:
+                new_lines.extend([Text(new_indent, style=style)] * blank_lines)
+                blank_lines = 0
+            add_line(line)
+        if blank_lines:
+            new_lines.extend([Text("", style=style)] * blank_lines)
+
+        new_text = text.blank_copy("\n").join(new_lines)
+        return new_text
+
+
+if __name__ == "__main__":  # pragma: no cover
+    from rich.console import Console
+
+    text = Text(
+        """\nLorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.\n"""
+    )
+    text.highlight_words(["Lorem"], "bold")
+    text.highlight_words(["ipsum"], "italic")
+
+    console = Console()
+
+    console.rule("justify='left'")
+    console.print(text, style="red")
+    console.print()
+
+    console.rule("justify='center'")
+    console.print(text, style="green", justify="center")
+    console.print()
+
+    console.rule("justify='right'")
+    console.print(text, style="blue", justify="right")
+    console.print()
+
+    console.rule("justify='full'")
+    console.print(text, style="magenta", justify="full")
+    console.print()
diff --git a/local_packages/rich/theme.py b/local_packages/rich/theme.py
new file mode 100644
index 0000000..6b3c8c0
--- /dev/null
+++ b/local_packages/rich/theme.py
@@ -0,0 +1,116 @@
+from typing import IO, Dict, List, Mapping, Optional
+
+from .default_styles import DEFAULT_STYLES
+from .style import Style, StyleType
+
+
+class Theme:
+    """A container for style information, used by :class:`~rich.console.Console`.
+
+    Args:
+        styles (Dict[str, Style], optional): A mapping of style names on to styles. Defaults to None for a theme with no styles.
+        inherit (bool, optional): Inherit default styles. Defaults to True.
+    """
+
+    styles: Dict[str, Style]
+
+    def __init__(
+        self, styles: Optional[Mapping[str, StyleType]] = None, inherit: bool = True
+    ):
+        self.styles = DEFAULT_STYLES.copy() if inherit else {}
+        if styles is not None:
+            self.styles.update(
+                {
+                    name: style if isinstance(style, Style) else Style.parse(style)
+                    for name, style in styles.items()
+                }
+            )
+
+    @property
+    def config(self) -> str:
+        """Get contents of a config file for this theme."""
+        config = "[styles]\n" + "\n".join(
+            f"{name} = {style}" for name, style in sorted(self.styles.items())
+        )
+        return config
+
+    @classmethod
+    def from_file(
+        cls, config_file: IO[str], source: Optional[str] = None, inherit: bool = True
+    ) -> "Theme":
+        """Load a theme from a text mode file.
+
+        Args:
+            config_file (IO[str]): An open conf file.
+            source (str, optional): The filename of the open file. Defaults to None.
+            inherit (bool, optional): Inherit default styles. Defaults to True.
+
+        Returns:
+            Theme: A New theme instance.
+        """
+        import configparser
+
+        config = configparser.ConfigParser()
+        config.read_file(config_file, source=source)
+        styles = {name: Style.parse(value) for name, value in config.items("styles")}
+        theme = Theme(styles, inherit=inherit)
+        return theme
+
+    @classmethod
+    def read(
+        cls, path: str, inherit: bool = True, encoding: Optional[str] = None
+    ) -> "Theme":
+        """Read a theme from a path.
+
+        Args:
+            path (str): Path to a config file readable by Python configparser module.
+            inherit (bool, optional): Inherit default styles. Defaults to True.
+            encoding (str, optional): Encoding of the config file. Defaults to None.
+
+        Returns:
+            Theme: A new theme instance.
+        """
+        with open(path, encoding=encoding) as config_file:
+            return cls.from_file(config_file, source=path, inherit=inherit)
+
+
+class ThemeStackError(Exception):
+    """Base exception for errors related to the theme stack."""
+
+
+class ThemeStack:
+    """A stack of themes.
+
+    Args:
+        theme (Theme): A theme instance
+    """
+
+    def __init__(self, theme: Theme) -> None:
+        self._entries: List[Dict[str, Style]] = [theme.styles]
+        self.get = self._entries[-1].get
+
+    def push_theme(self, theme: Theme, inherit: bool = True) -> None:
+        """Push a theme on the top of the stack.
+
+        Args:
+            theme (Theme): A Theme instance.
+            inherit (boolean, optional): Inherit styles from current top of stack.
+        """
+        styles: Dict[str, Style]
+        styles = (
+            {**self._entries[-1], **theme.styles} if inherit else theme.styles.copy()
+        )
+        self._entries.append(styles)
+        self.get = self._entries[-1].get
+
+    def pop_theme(self) -> None:
+        """Pop (and discard) the top-most theme."""
+        if len(self._entries) == 1:
+            raise ThemeStackError("Unable to pop base theme")
+        self._entries.pop()
+        self.get = self._entries[-1].get
+
+
+if __name__ == "__main__":  # pragma: no cover
+    theme = Theme()
+    print(theme.config)
diff --git a/local_packages/rich/themes.py b/local_packages/rich/themes.py
new file mode 100644
index 0000000..bf6db10
--- /dev/null
+++ b/local_packages/rich/themes.py
@@ -0,0 +1,5 @@
+from .default_styles import DEFAULT_STYLES
+from .theme import Theme
+
+
+DEFAULT = Theme(DEFAULT_STYLES)
diff --git a/local_packages/rich/traceback.py b/local_packages/rich/traceback.py
new file mode 100644
index 0000000..66eaeca
--- /dev/null
+++ b/local_packages/rich/traceback.py
@@ -0,0 +1,924 @@
+import inspect
+import linecache
+import os
+import sys
+from dataclasses import dataclass, field
+from itertools import islice
+from traceback import walk_tb
+from types import ModuleType, TracebackType
+from typing import (
+    Any,
+    Callable,
+    Dict,
+    Iterable,
+    List,
+    Optional,
+    Sequence,
+    Set,
+    Tuple,
+    Type,
+    Union,
+)
+
+from pygments.lexers import guess_lexer_for_filename
+from pygments.token import Comment, Keyword, Name, Number, Operator, String
+from pygments.token import Text as TextToken
+from pygments.token import Token
+from pygments.util import ClassNotFound
+
+from . import pretty
+from ._loop import loop_first_last, loop_last
+from .columns import Columns
+from .console import (
+    Console,
+    ConsoleOptions,
+    ConsoleRenderable,
+    OverflowMethod,
+    Group,
+    RenderResult,
+    group,
+)
+from .constrain import Constrain
+from .highlighter import RegexHighlighter, ReprHighlighter
+from .panel import Panel
+from .scope import render_scope
+from .style import Style
+from .syntax import Syntax, SyntaxPosition
+from .text import Text
+from .theme import Theme
+
+WINDOWS = sys.platform == "win32"
+
+LOCALS_MAX_LENGTH = 10
+LOCALS_MAX_STRING = 80
+
+
+def _iter_syntax_lines(
+    start: SyntaxPosition, end: SyntaxPosition
+) -> Iterable[Tuple[int, int, int]]:
+    """Yield start and end positions per line.
+
+    Args:
+        start: Start position.
+        end: End position.
+
+    Returns:
+        Iterable of (LINE, COLUMN1, COLUMN2).
+    """
+
+    line1, column1 = start
+    line2, column2 = end
+
+    if line1 == line2:
+        yield line1, column1, column2
+    else:
+        for first, last, line_no in loop_first_last(range(line1, line2 + 1)):
+            if first:
+                yield line_no, column1, -1
+            elif last:
+                yield line_no, 0, column2
+            else:
+                yield line_no, 0, -1
+
+
+def install(
+    *,
+    console: Optional[Console] = None,
+    width: Optional[int] = 100,
+    code_width: Optional[int] = 88,
+    extra_lines: int = 3,
+    theme: Optional[str] = None,
+    word_wrap: bool = False,
+    show_locals: bool = False,
+    locals_max_length: int = LOCALS_MAX_LENGTH,
+    locals_max_string: int = LOCALS_MAX_STRING,
+    locals_max_depth: Optional[int] = None,
+    locals_hide_dunder: bool = True,
+    locals_hide_sunder: Optional[bool] = None,
+    locals_overflow: Optional[OverflowMethod] = None,
+    indent_guides: bool = True,
+    suppress: Iterable[Union[str, ModuleType]] = (),
+    max_frames: int = 100,
+) -> Callable[[Type[BaseException], BaseException, Optional[TracebackType]], Any]:
+    """Install a rich traceback handler.
+
+    Once installed, any tracebacks will be printed with syntax highlighting and rich formatting.
+
+
+    Args:
+        console (Optional[Console], optional): Console to write exception to. Default uses internal Console instance.
+        width (Optional[int], optional): Width (in characters) of traceback. Defaults to 100.
+        code_width (Optional[int], optional): Code width (in characters) of traceback. Defaults to 88.
+        extra_lines (int, optional): Extra lines of code. Defaults to 3.
+        theme (Optional[str], optional): Pygments theme to use in traceback. Defaults to ``None`` which will pick
+            a theme appropriate for the platform.
+        word_wrap (bool, optional): Enable word wrapping of long lines. Defaults to False.
+        show_locals (bool, optional): Enable display of local variables. Defaults to False.
+        locals_max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
+            Defaults to 10.
+        locals_max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to 80.
+        locals_max_depth (int, optional): Maximum depths of locals before truncating, or None to disable. Defaults to None.
+        locals_hide_dunder (bool, optional): Hide locals prefixed with double underscore. Defaults to True.
+        locals_hide_sunder (bool, optional): Hide locals prefixed with single underscore. Defaults to False.
+        locals_overflow (OverflowMethod, optional): How to handle overflowing locals, or None to disable. Defaults to None.
+        indent_guides (bool, optional): Enable indent guides in code and locals. Defaults to True.
+        suppress (Sequence[Union[str, ModuleType]]): Optional sequence of modules or paths to exclude from traceback.
+
+    Returns:
+        Callable: The previous exception handler that was replaced.
+
+    """
+    traceback_console = Console(stderr=True) if console is None else console
+
+    locals_hide_sunder = (
+        True
+        if (traceback_console.is_jupyter and locals_hide_sunder is None)
+        else locals_hide_sunder
+    )
+
+    def excepthook(
+        type_: Type[BaseException],
+        value: BaseException,
+        traceback: Optional[TracebackType],
+    ) -> None:
+        exception_traceback = Traceback.from_exception(
+            type_,
+            value,
+            traceback,
+            width=width,
+            code_width=code_width,
+            extra_lines=extra_lines,
+            theme=theme,
+            word_wrap=word_wrap,
+            show_locals=show_locals,
+            locals_max_length=locals_max_length,
+            locals_max_string=locals_max_string,
+            locals_max_depth=locals_max_depth,
+            locals_hide_dunder=locals_hide_dunder,
+            locals_hide_sunder=bool(locals_hide_sunder),
+            locals_overflow=locals_overflow,
+            indent_guides=indent_guides,
+            suppress=suppress,
+            max_frames=max_frames,
+        )
+        traceback_console.print(exception_traceback)
+
+    def ipy_excepthook_closure(ip: Any) -> None:  # pragma: no cover
+        tb_data = {}  # store information about showtraceback call
+        default_showtraceback = ip.showtraceback  # keep reference of default traceback
+
+        def ipy_show_traceback(*args: Any, **kwargs: Any) -> None:
+            """wrap the default ip.showtraceback to store info for ip._showtraceback"""
+            nonlocal tb_data
+            tb_data = kwargs
+            default_showtraceback(*args, **kwargs)
+
+        def ipy_display_traceback(
+            *args: Any, is_syntax: bool = False, **kwargs: Any
+        ) -> None:
+            """Internally called traceback from ip._showtraceback"""
+            nonlocal tb_data
+            exc_tuple = ip._get_exc_info()
+
+            # do not display trace on syntax error
+            tb: Optional[TracebackType] = None if is_syntax else exc_tuple[2]
+
+            # determine correct tb_offset
+            compiled = tb_data.get("running_compiled_code", False)
+            tb_offset = tb_data.get("tb_offset")
+            if tb_offset is None:
+                tb_offset = 1 if compiled else 0
+            # remove ipython internal frames from trace with tb_offset
+            for _ in range(tb_offset):
+                if tb is None:
+                    break
+                tb = tb.tb_next
+
+            excepthook(exc_tuple[0], exc_tuple[1], tb)
+            tb_data = {}  # clear data upon usage
+
+        # replace _showtraceback instead of showtraceback to allow ipython features such as debugging to work
+        # this is also what the ipython docs recommends to modify when subclassing InteractiveShell
+        ip._showtraceback = ipy_display_traceback
+        # add wrapper to capture tb_data
+        ip.showtraceback = ipy_show_traceback
+        ip.showsyntaxerror = lambda *args, **kwargs: ipy_display_traceback(
+            *args, is_syntax=True, **kwargs
+        )
+
+    try:  # pragma: no cover
+        # if within ipython, use customized traceback
+        ip = get_ipython()  # type: ignore[name-defined]
+        ipy_excepthook_closure(ip)
+        return sys.excepthook
+    except Exception:
+        # otherwise use default system hook
+        old_excepthook = sys.excepthook
+        sys.excepthook = excepthook
+        return old_excepthook
+
+
+@dataclass
+class Frame:
+    filename: str
+    lineno: int
+    name: str
+    line: str = ""
+    locals: Optional[Dict[str, pretty.Node]] = None
+    last_instruction: Optional[Tuple[Tuple[int, int], Tuple[int, int]]] = None
+
+
+@dataclass
+class _SyntaxError:
+    offset: int
+    filename: str
+    line: str
+    lineno: int
+    msg: str
+    notes: List[str] = field(default_factory=list)
+
+
+@dataclass
+class Stack:
+    exc_type: str
+    exc_value: str
+    syntax_error: Optional[_SyntaxError] = None
+    is_cause: bool = False
+    frames: List[Frame] = field(default_factory=list)
+    notes: List[str] = field(default_factory=list)
+    is_group: bool = False
+    exceptions: List["Trace"] = field(default_factory=list)
+
+
+@dataclass
+class Trace:
+    stacks: List[Stack]
+
+
+class PathHighlighter(RegexHighlighter):
+    highlights = [r"(?P.*/)(?P.+)"]
+
+
+class Traceback:
+    """A Console renderable that renders a traceback.
+
+    Args:
+        trace (Trace, optional): A `Trace` object produced from `extract`. Defaults to None, which uses
+            the last exception.
+        width (Optional[int], optional): Number of characters used to traceback. Defaults to 100.
+        code_width (Optional[int], optional): Number of code characters used to traceback. Defaults to 88.
+        extra_lines (int, optional): Additional lines of code to render. Defaults to 3.
+        theme (str, optional): Override pygments theme used in traceback.
+        word_wrap (bool, optional): Enable word wrapping of long lines. Defaults to False.
+        show_locals (bool, optional): Enable display of local variables. Defaults to False.
+        indent_guides (bool, optional): Enable indent guides in code and locals. Defaults to True.
+        locals_max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
+            Defaults to 10.
+        locals_max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to 80.
+        locals_max_depth (int, optional): Maximum depths of locals before truncating, or None to disable. Defaults to None.
+        locals_hide_dunder (bool, optional): Hide locals prefixed with double underscore. Defaults to True.
+        locals_hide_sunder (bool, optional): Hide locals prefixed with single underscore. Defaults to False.
+        locals_overflow (OverflowMethod, optional): How to handle overflowing locals, or None to disable. Defaults to None.
+        suppress (Sequence[Union[str, ModuleType]]): Optional sequence of modules or paths to exclude from traceback.
+        max_frames (int): Maximum number of frames to show in a traceback, 0 for no maximum. Defaults to 100.
+
+    """
+
+    LEXERS = {
+        "": "text",
+        ".py": "python",
+        ".pxd": "cython",
+        ".pyx": "cython",
+        ".pxi": "pyrex",
+    }
+
+    def __init__(
+        self,
+        trace: Optional[Trace] = None,
+        *,
+        width: Optional[int] = 100,
+        code_width: Optional[int] = 88,
+        extra_lines: int = 3,
+        theme: Optional[str] = None,
+        word_wrap: bool = False,
+        show_locals: bool = False,
+        locals_max_length: int = LOCALS_MAX_LENGTH,
+        locals_max_string: int = LOCALS_MAX_STRING,
+        locals_max_depth: Optional[int] = None,
+        locals_hide_dunder: bool = True,
+        locals_hide_sunder: bool = False,
+        locals_overlow: Optional[OverflowMethod] = None,
+        indent_guides: bool = True,
+        suppress: Iterable[Union[str, ModuleType]] = (),
+        max_frames: int = 100,
+    ):
+        if trace is None:
+            exc_type, exc_value, traceback = sys.exc_info()
+            if exc_type is None or exc_value is None or traceback is None:
+                raise ValueError(
+                    "Value for 'trace' required if not called in except: block"
+                )
+            trace = self.extract(
+                exc_type, exc_value, traceback, show_locals=show_locals
+            )
+        self.trace = trace
+        self.width = width
+        self.code_width = code_width
+        self.extra_lines = extra_lines
+        self.theme = Syntax.get_theme(theme or "ansi_dark")
+        self.word_wrap = word_wrap
+        self.show_locals = show_locals
+        self.indent_guides = indent_guides
+        self.locals_max_length = locals_max_length
+        self.locals_max_string = locals_max_string
+        self.locals_max_depth = locals_max_depth
+        self.locals_hide_dunder = locals_hide_dunder
+        self.locals_hide_sunder = locals_hide_sunder
+        self.locals_overflow = locals_overlow
+
+        self.suppress: Sequence[str] = []
+        for suppress_entity in suppress:
+            if not isinstance(suppress_entity, str):
+                assert (
+                    suppress_entity.__file__ is not None
+                ), f"{suppress_entity!r} must be a module with '__file__' attribute"
+                path = os.path.dirname(suppress_entity.__file__)
+            else:
+                path = suppress_entity
+            path = os.path.normpath(os.path.abspath(path))
+            self.suppress.append(path)
+        self.max_frames = max(4, max_frames) if max_frames > 0 else 0
+
+    @classmethod
+    def from_exception(
+        cls,
+        exc_type: Type[Any],
+        exc_value: BaseException,
+        traceback: Optional[TracebackType],
+        *,
+        width: Optional[int] = 100,
+        code_width: Optional[int] = 88,
+        extra_lines: int = 3,
+        theme: Optional[str] = None,
+        word_wrap: bool = False,
+        show_locals: bool = False,
+        locals_max_length: int = LOCALS_MAX_LENGTH,
+        locals_max_string: int = LOCALS_MAX_STRING,
+        locals_max_depth: Optional[int] = None,
+        locals_hide_dunder: bool = True,
+        locals_hide_sunder: bool = False,
+        locals_overflow: Optional[OverflowMethod] = None,
+        indent_guides: bool = True,
+        suppress: Iterable[Union[str, ModuleType]] = (),
+        max_frames: int = 100,
+    ) -> "Traceback":
+        """Create a traceback from exception info
+
+        Args:
+            exc_type (Type[BaseException]): Exception type.
+            exc_value (BaseException): Exception value.
+            traceback (TracebackType): Python Traceback object.
+            width (Optional[int], optional): Number of characters used to traceback. Defaults to 100.
+            code_width (Optional[int], optional): Number of code characters used to traceback. Defaults to 88.
+            extra_lines (int, optional): Additional lines of code to render. Defaults to 3.
+            theme (str, optional): Override pygments theme used in traceback.
+            word_wrap (bool, optional): Enable word wrapping of long lines. Defaults to False.
+            show_locals (bool, optional): Enable display of local variables. Defaults to False.
+            indent_guides (bool, optional): Enable indent guides in code and locals. Defaults to True.
+            locals_max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
+                Defaults to 10.
+            locals_max_depth (int, optional): Maximum depths of locals before truncating, or None to disable. Defaults to None.
+            locals_max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to 80.
+            locals_hide_dunder (bool, optional): Hide locals prefixed with double underscore. Defaults to True.
+            locals_hide_sunder (bool, optional): Hide locals prefixed with single underscore. Defaults to False.
+            locals_overflow (OverflowMethod, optional): How to handle overflowing locals, or None to disable. Defaults to None.
+            suppress (Iterable[Union[str, ModuleType]]): Optional sequence of modules or paths to exclude from traceback.
+            max_frames (int): Maximum number of frames to show in a traceback, 0 for no maximum. Defaults to 100.
+
+        Returns:
+            Traceback: A Traceback instance that may be printed.
+        """
+        rich_traceback = cls.extract(
+            exc_type,
+            exc_value,
+            traceback,
+            show_locals=show_locals,
+            locals_max_length=locals_max_length,
+            locals_max_string=locals_max_string,
+            locals_max_depth=locals_max_depth,
+            locals_hide_dunder=locals_hide_dunder,
+            locals_hide_sunder=locals_hide_sunder,
+        )
+
+        return cls(
+            rich_traceback,
+            width=width,
+            code_width=code_width,
+            extra_lines=extra_lines,
+            theme=theme,
+            word_wrap=word_wrap,
+            show_locals=show_locals,
+            indent_guides=indent_guides,
+            locals_max_length=locals_max_length,
+            locals_max_string=locals_max_string,
+            locals_max_depth=locals_max_depth,
+            locals_hide_dunder=locals_hide_dunder,
+            locals_hide_sunder=locals_hide_sunder,
+            locals_overlow=locals_overflow,
+            suppress=suppress,
+            max_frames=max_frames,
+        )
+
+    @classmethod
+    def extract(
+        cls,
+        exc_type: Type[BaseException],
+        exc_value: BaseException,
+        traceback: Optional[TracebackType],
+        *,
+        show_locals: bool = False,
+        locals_max_length: int = LOCALS_MAX_LENGTH,
+        locals_max_string: int = LOCALS_MAX_STRING,
+        locals_max_depth: Optional[int] = None,
+        locals_hide_dunder: bool = True,
+        locals_hide_sunder: bool = False,
+        _visited_exceptions: Optional[Set[BaseException]] = None,
+    ) -> Trace:
+        """Extract traceback information.
+
+        Args:
+            exc_type (Type[BaseException]): Exception type.
+            exc_value (BaseException): Exception value.
+            traceback (TracebackType): Python Traceback object.
+            show_locals (bool, optional): Enable display of local variables. Defaults to False.
+            locals_max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
+                Defaults to 10.
+            locals_max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to 80.
+            locals_max_depth (int, optional): Maximum depths of locals before truncating, or None to disable. Defaults to None.
+            locals_hide_dunder (bool, optional): Hide locals prefixed with double underscore. Defaults to True.
+            locals_hide_sunder (bool, optional): Hide locals prefixed with single underscore. Defaults to False.
+
+        Returns:
+            Trace: A Trace instance which you can use to construct a `Traceback`.
+        """
+
+        stacks: List[Stack] = []
+        is_cause = False
+
+        from rich import _IMPORT_CWD
+
+        notes: List[str] = getattr(exc_value, "__notes__", None) or []
+
+        grouped_exceptions: Set[BaseException] = (
+            set() if _visited_exceptions is None else _visited_exceptions
+        )
+
+        def safe_str(_object: Any) -> str:
+            """Don't allow exceptions from __str__ to propagate."""
+            try:
+                return str(_object)
+            except Exception:
+                return ""
+
+        while True:
+            stack = Stack(
+                exc_type=safe_str(exc_type.__name__),
+                exc_value=safe_str(exc_value),
+                is_cause=is_cause,
+                notes=notes,
+            )
+
+            if sys.version_info >= (3, 11):
+                if isinstance(exc_value, (BaseExceptionGroup, ExceptionGroup)):
+                    stack.is_group = True
+                    for exception in exc_value.exceptions:
+                        if exception in grouped_exceptions:
+                            continue
+                        grouped_exceptions.add(exception)
+                        stack.exceptions.append(
+                            Traceback.extract(
+                                type(exception),
+                                exception,
+                                exception.__traceback__,
+                                show_locals=show_locals,
+                                locals_max_length=locals_max_length,
+                                locals_hide_dunder=locals_hide_dunder,
+                                locals_hide_sunder=locals_hide_sunder,
+                                _visited_exceptions=grouped_exceptions,
+                            )
+                        )
+
+            if isinstance(exc_value, SyntaxError):
+                stack.syntax_error = _SyntaxError(
+                    offset=exc_value.offset or 0,
+                    filename=exc_value.filename or "?",
+                    lineno=exc_value.lineno or 0,
+                    line=exc_value.text or "",
+                    msg=exc_value.msg,
+                    notes=notes,
+                )
+
+            stacks.append(stack)
+            append = stack.frames.append
+
+            def get_locals(
+                iter_locals: Iterable[Tuple[str, object]],
+            ) -> Iterable[Tuple[str, object]]:
+                """Extract locals from an iterator of key pairs."""
+                if not (locals_hide_dunder or locals_hide_sunder):
+                    yield from iter_locals
+                    return
+                for key, value in iter_locals:
+                    if locals_hide_dunder and key.startswith("__"):
+                        continue
+                    if locals_hide_sunder and key.startswith("_"):
+                        continue
+                    yield key, value
+
+            for frame_summary, line_no in walk_tb(traceback):
+                filename = frame_summary.f_code.co_filename
+
+                last_instruction: Optional[Tuple[Tuple[int, int], Tuple[int, int]]]
+                last_instruction = None
+                if sys.version_info >= (3, 11):
+                    instruction_index = frame_summary.f_lasti // 2
+                    instruction_position = next(
+                        islice(
+                            frame_summary.f_code.co_positions(),
+                            instruction_index,
+                            instruction_index + 1,
+                        )
+                    )
+                    (
+                        start_line,
+                        end_line,
+                        start_column,
+                        end_column,
+                    ) = instruction_position
+                    if (
+                        start_line is not None
+                        and end_line is not None
+                        and start_column is not None
+                        and end_column is not None
+                    ):
+                        last_instruction = (
+                            (start_line, start_column),
+                            (end_line, end_column),
+                        )
+
+                if filename and not filename.startswith("<"):
+                    if not os.path.isabs(filename):
+                        filename = os.path.join(_IMPORT_CWD, filename)
+                if frame_summary.f_locals.get("_rich_traceback_omit", False):
+                    continue
+
+                frame = Frame(
+                    filename=filename or "?",
+                    lineno=line_no,
+                    name=frame_summary.f_code.co_name,
+                    locals=(
+                        {
+                            key: pretty.traverse(
+                                value,
+                                max_length=locals_max_length,
+                                max_string=locals_max_string,
+                                max_depth=locals_max_depth,
+                            )
+                            for key, value in get_locals(frame_summary.f_locals.items())
+                            if not (inspect.isfunction(value) or inspect.isclass(value))
+                        }
+                        if show_locals
+                        else None
+                    ),
+                    last_instruction=last_instruction,
+                )
+                append(frame)
+                if frame_summary.f_locals.get("_rich_traceback_guard", False):
+                    del stack.frames[:]
+
+            if not grouped_exceptions:
+                cause = getattr(exc_value, "__cause__", None)
+                if cause is not None and cause is not exc_value:
+                    exc_type = cause.__class__
+                    exc_value = cause
+                    # __traceback__ can be None, e.g. for exceptions raised by the
+                    # 'multiprocessing' module
+                    traceback = cause.__traceback__
+                    is_cause = True
+                    continue
+
+                cause = exc_value.__context__
+                if cause is not None and not getattr(
+                    exc_value, "__suppress_context__", False
+                ):
+                    exc_type = cause.__class__
+                    exc_value = cause
+                    traceback = cause.__traceback__
+                    is_cause = False
+                    continue
+            # No cover, code is reached but coverage doesn't recognize it.
+            break  # pragma: no cover
+
+        trace = Trace(stacks=stacks)
+
+        return trace
+
+    def __rich_console__(
+        self, console: Console, options: ConsoleOptions
+    ) -> RenderResult:
+        theme = self.theme
+        background_style = theme.get_background_style()
+        token_style = theme.get_style_for_token
+
+        traceback_theme = Theme(
+            {
+                "pretty": token_style(TextToken),
+                "pygments.text": token_style(Token),
+                "pygments.string": token_style(String),
+                "pygments.function": token_style(Name.Function),
+                "pygments.number": token_style(Number),
+                "repr.indent": token_style(Comment) + Style(dim=True),
+                "repr.str": token_style(String),
+                "repr.brace": token_style(TextToken) + Style(bold=True),
+                "repr.number": token_style(Number),
+                "repr.bool_true": token_style(Keyword.Constant),
+                "repr.bool_false": token_style(Keyword.Constant),
+                "repr.none": token_style(Keyword.Constant),
+                "scope.border": token_style(String.Delimiter),
+                "scope.equals": token_style(Operator),
+                "scope.key": token_style(Name),
+                "scope.key.special": token_style(Name.Constant) + Style(dim=True),
+            },
+            inherit=False,
+        )
+
+        highlighter = ReprHighlighter()
+
+        @group()
+        def render_stack(stack: Stack, last: bool) -> RenderResult:
+            if stack.frames:
+                stack_renderable: ConsoleRenderable = Panel(
+                    self._render_stack(stack),
+                    title="[traceback.title]Traceback [dim](most recent call last)",
+                    style=background_style,
+                    border_style="traceback.border",
+                    expand=True,
+                    padding=(0, 1),
+                )
+                stack_renderable = Constrain(stack_renderable, self.width)
+                with console.use_theme(traceback_theme):
+                    yield stack_renderable
+
+            if stack.syntax_error is not None:
+                with console.use_theme(traceback_theme):
+                    yield Constrain(
+                        Panel(
+                            self._render_syntax_error(stack.syntax_error),
+                            style=background_style,
+                            border_style="traceback.border.syntax_error",
+                            expand=True,
+                            padding=(0, 1),
+                            width=self.width,
+                        ),
+                        self.width,
+                    )
+                yield Text.assemble(
+                    (f"{stack.exc_type}: ", "traceback.exc_type"),
+                    highlighter(stack.syntax_error.msg),
+                )
+            elif stack.exc_value:
+                yield Text.assemble(
+                    (f"{stack.exc_type}: ", "traceback.exc_type"),
+                    highlighter(stack.exc_value),
+                )
+            else:
+                yield Text.assemble((f"{stack.exc_type}", "traceback.exc_type"))
+
+            for note in stack.notes:
+                yield Text.assemble(("[NOTE] ", "traceback.note"), highlighter(note))
+
+            if stack.is_group:
+                for group_no, group_exception in enumerate(stack.exceptions, 1):
+                    grouped_exceptions: List[Group] = []
+                    for group_last, group_stack in loop_last(group_exception.stacks):
+                        grouped_exceptions.append(render_stack(group_stack, group_last))
+                    yield ""
+                    yield Constrain(
+                        Panel(
+                            Group(*grouped_exceptions),
+                            title=f"Sub-exception #{group_no}",
+                            border_style="traceback.group.border",
+                        ),
+                        self.width,
+                    )
+
+            if not last:
+                if stack.is_cause:
+                    yield Text.from_markup(
+                        "\n[i]The above exception was the direct cause of the following exception:\n",
+                    )
+                else:
+                    yield Text.from_markup(
+                        "\n[i]During handling of the above exception, another exception occurred:\n",
+                    )
+
+        for last, stack in loop_last(reversed(self.trace.stacks)):
+            yield render_stack(stack, last)
+
+    @group()
+    def _render_syntax_error(self, syntax_error: _SyntaxError) -> RenderResult:
+        highlighter = ReprHighlighter()
+        path_highlighter = PathHighlighter()
+        if syntax_error.filename != "":
+            if os.path.exists(syntax_error.filename):
+                text = Text.assemble(
+                    (f" {syntax_error.filename}", "pygments.string"),
+                    (":", "pygments.text"),
+                    (str(syntax_error.lineno), "pygments.number"),
+                    style="pygments.text",
+                )
+                yield path_highlighter(text)
+        syntax_error_text = highlighter(syntax_error.line.rstrip())
+        syntax_error_text.no_wrap = True
+        offset = min(syntax_error.offset - 1, len(syntax_error_text))
+        syntax_error_text.stylize("bold underline", offset, offset)
+        syntax_error_text += Text.from_markup(
+            "\n" + " " * offset + "[traceback.offset]▲[/]",
+            style="pygments.text",
+        )
+        yield syntax_error_text
+
+    @classmethod
+    def _guess_lexer(cls, filename: str, code: str) -> str:
+        ext = os.path.splitext(filename)[-1]
+        if not ext:
+            # No extension, look at first line to see if it is a hashbang
+            # Note, this is an educated guess and not a guarantee
+            # If it fails, the only downside is that the code is highlighted strangely
+            new_line_index = code.index("\n")
+            first_line = code[:new_line_index] if new_line_index != -1 else code
+            if first_line.startswith("#!") and "python" in first_line.lower():
+                return "python"
+        try:
+            return cls.LEXERS.get(ext) or guess_lexer_for_filename(filename, code).name
+        except ClassNotFound:
+            return "text"
+
+    @group()
+    def _render_stack(self, stack: Stack) -> RenderResult:
+        path_highlighter = PathHighlighter()
+        theme = self.theme
+
+        def render_locals(frame: Frame) -> Iterable[ConsoleRenderable]:
+            if frame.locals:
+                yield render_scope(
+                    frame.locals,
+                    title="locals",
+                    indent_guides=self.indent_guides,
+                    max_length=self.locals_max_length,
+                    max_string=self.locals_max_string,
+                    max_depth=self.locals_max_depth,
+                    overflow=self.locals_overflow,
+                )
+
+        exclude_frames: Optional[range] = None
+        if self.max_frames != 0:
+            exclude_frames = range(
+                self.max_frames // 2,
+                len(stack.frames) - self.max_frames // 2,
+            )
+
+        excluded = False
+        for frame_index, frame in enumerate(stack.frames):
+            if exclude_frames and frame_index in exclude_frames:
+                excluded = True
+                continue
+
+            if excluded:
+                assert exclude_frames is not None
+                yield Text(
+                    f"\n... {len(exclude_frames)} frames hidden ...",
+                    justify="center",
+                    style="traceback.error",
+                )
+                excluded = False
+
+            first = frame_index == 0
+            frame_filename = frame.filename
+            suppressed = any(frame_filename.startswith(path) for path in self.suppress)
+
+            if os.path.exists(frame.filename):
+                text = Text.assemble(
+                    path_highlighter(Text(frame.filename, style="pygments.string")),
+                    (":", "pygments.text"),
+                    (str(frame.lineno), "pygments.number"),
+                    " in ",
+                    (frame.name, "pygments.function"),
+                    style="pygments.text",
+                )
+            else:
+                text = Text.assemble(
+                    "in ",
+                    (frame.name, "pygments.function"),
+                    (":", "pygments.text"),
+                    (str(frame.lineno), "pygments.number"),
+                    style="pygments.text",
+                )
+            if not frame.filename.startswith("<") and not first:
+                yield ""
+            yield text
+            if frame.filename.startswith("<"):
+                yield from render_locals(frame)
+                continue
+            if not suppressed:
+                try:
+                    code_lines = linecache.getlines(frame.filename)
+                    code = "".join(code_lines)
+                    if not code:
+                        # code may be an empty string if the file doesn't exist, OR
+                        # if the traceback filename is generated dynamically
+                        continue
+                    lexer_name = self._guess_lexer(frame.filename, code)
+                    syntax = Syntax(
+                        code,
+                        lexer_name,
+                        theme=theme,
+                        line_numbers=True,
+                        line_range=(
+                            frame.lineno - self.extra_lines,
+                            frame.lineno + self.extra_lines,
+                        ),
+                        highlight_lines={frame.lineno},
+                        word_wrap=self.word_wrap,
+                        code_width=self.code_width,
+                        indent_guides=self.indent_guides,
+                        dedent=False,
+                    )
+                    yield ""
+                except Exception as error:
+                    yield Text.assemble(
+                        (f"\n{error}", "traceback.error"),
+                    )
+                else:
+                    if frame.last_instruction is not None:
+                        start, end = frame.last_instruction
+
+                        # Stylize a line at a time
+                        # So that indentation isn't underlined (which looks bad)
+                        for line1, column1, column2 in _iter_syntax_lines(start, end):
+                            try:
+                                if column1 == 0:
+                                    line = code_lines[line1 - 1]
+                                    column1 = len(line) - len(line.lstrip())
+                                if column2 == -1:
+                                    column2 = len(code_lines[line1 - 1])
+                            except IndexError:
+                                # Being defensive here
+                                # If last_instruction reports a line out-of-bounds, we don't want to crash
+                                continue
+
+                            syntax.stylize_range(
+                                style="traceback.error_range",
+                                start=(line1, column1),
+                                end=(line1, column2),
+                            )
+                    yield (
+                        Columns(
+                            [
+                                syntax,
+                                *render_locals(frame),
+                            ],
+                            padding=1,
+                        )
+                        if frame.locals
+                        else syntax
+                    )
+
+
+if __name__ == "__main__":  # pragma: no cover
+    install(show_locals=True)
+    import sys
+
+    def bar(
+        a: Any,
+    ) -> None:  # 这是对亚洲语言支持的测试。面对模棱两可的想法,拒绝猜测的诱惑
+        one = 1
+        print(one / a)
+
+    def foo(a: Any) -> None:
+        _rich_traceback_guard = True
+        zed = {
+            "characters": {
+                "Paul Atreides",
+                "Vladimir Harkonnen",
+                "Thufir Hawat",
+                "Duncan Idaho",
+            },
+            "atomic_types": (None, False, True),
+        }
+        bar(a)
+
+    def error() -> None:
+        foo(0)
+
+    error()
diff --git a/local_packages/rich/tree.py b/local_packages/rich/tree.py
new file mode 100644
index 0000000..9a87d60
--- /dev/null
+++ b/local_packages/rich/tree.py
@@ -0,0 +1,257 @@
+from typing import Iterator, List, Optional, Tuple
+
+from ._loop import loop_first, loop_last
+from .console import Console, ConsoleOptions, RenderableType, RenderResult
+from .jupyter import JupyterMixin
+from .measure import Measurement
+from .segment import Segment
+from .style import Style, StyleStack, StyleType
+from .styled import Styled
+
+GuideType = Tuple[str, str, str, str]
+
+
+class Tree(JupyterMixin):
+    """A renderable for a tree structure.
+
+    Attributes:
+        ASCII_GUIDES (GuideType): Guide lines used when Console.ascii_only is True.
+        TREE_GUIDES (List[GuideType, GuideType, GuideType]): Default guide lines.
+
+    Args:
+        label (RenderableType): The renderable or str for the tree label.
+        style (StyleType, optional): Style of this tree. Defaults to "tree".
+        guide_style (StyleType, optional): Style of the guide lines. Defaults to "tree.line".
+        expanded (bool, optional): Also display children. Defaults to True.
+        highlight (bool, optional): Highlight renderable (if str). Defaults to False.
+        hide_root (bool, optional): Hide the root node. Defaults to False.
+    """
+
+    ASCII_GUIDES = ("    ", "|   ", "+-- ", "`-- ")
+    TREE_GUIDES = [
+        ("    ", "│   ", "├── ", "└── "),
+        ("    ", "┃   ", "┣━━ ", "┗━━ "),
+        ("    ", "║   ", "╠══ ", "╚══ "),
+    ]
+
+    def __init__(
+        self,
+        label: RenderableType,
+        *,
+        style: StyleType = "tree",
+        guide_style: StyleType = "tree.line",
+        expanded: bool = True,
+        highlight: bool = False,
+        hide_root: bool = False,
+    ) -> None:
+        self.label = label
+        self.style = style
+        self.guide_style = guide_style
+        self.children: List[Tree] = []
+        self.expanded = expanded
+        self.highlight = highlight
+        self.hide_root = hide_root
+
+    def add(
+        self,
+        label: RenderableType,
+        *,
+        style: Optional[StyleType] = None,
+        guide_style: Optional[StyleType] = None,
+        expanded: bool = True,
+        highlight: Optional[bool] = False,
+    ) -> "Tree":
+        """Add a child tree.
+
+        Args:
+            label (RenderableType): The renderable or str for the tree label.
+            style (StyleType, optional): Style of this tree. Defaults to "tree".
+            guide_style (StyleType, optional): Style of the guide lines. Defaults to "tree.line".
+            expanded (bool, optional): Also display children. Defaults to True.
+            highlight (Optional[bool], optional): Highlight renderable (if str). Defaults to False.
+
+        Returns:
+            Tree: A new child Tree, which may be further modified.
+        """
+        node = Tree(
+            label,
+            style=self.style if style is None else style,
+            guide_style=self.guide_style if guide_style is None else guide_style,
+            expanded=expanded,
+            highlight=self.highlight if highlight is None else highlight,
+        )
+        self.children.append(node)
+        return node
+
+    def __rich_console__(
+        self, console: "Console", options: "ConsoleOptions"
+    ) -> "RenderResult":
+        stack: List[Iterator[Tuple[bool, Tree]]] = []
+        pop = stack.pop
+        push = stack.append
+        new_line = Segment.line()
+
+        get_style = console.get_style
+        null_style = Style.null()
+        guide_style = get_style(self.guide_style, default="") or null_style
+        SPACE, CONTINUE, FORK, END = range(4)
+
+        _Segment = Segment
+
+        def make_guide(index: int, style: Style) -> Segment:
+            """Make a Segment for a level of the guide lines."""
+            if options.ascii_only:
+                line = self.ASCII_GUIDES[index]
+            else:
+                guide = 1 if style.bold else (2 if style.underline2 else 0)
+                line = self.TREE_GUIDES[0 if options.legacy_windows else guide][index]
+            return _Segment(line, style)
+
+        levels: List[Segment] = [make_guide(CONTINUE, guide_style)]
+        push(iter(loop_last([self])))
+
+        guide_style_stack = StyleStack(get_style(self.guide_style))
+        style_stack = StyleStack(get_style(self.style))
+        remove_guide_styles = Style(bold=False, underline2=False)
+
+        depth = 0
+
+        while stack:
+            stack_node = pop()
+            try:
+                last, node = next(stack_node)
+            except StopIteration:
+                levels.pop()
+                if levels:
+                    guide_style = levels[-1].style or null_style
+                    levels[-1] = make_guide(FORK, guide_style)
+                    guide_style_stack.pop()
+                    style_stack.pop()
+                continue
+            push(stack_node)
+            if last:
+                levels[-1] = make_guide(END, levels[-1].style or null_style)
+
+            guide_style = guide_style_stack.current + get_style(node.guide_style)
+            style = style_stack.current + get_style(node.style)
+            prefix = levels[(2 if self.hide_root else 1) :]
+            renderable_lines = console.render_lines(
+                Styled(node.label, style),
+                options.update(
+                    width=options.max_width
+                    - sum(level.cell_length for level in prefix),
+                    highlight=self.highlight,
+                    height=None,
+                ),
+                pad=options.justify is not None,
+            )
+
+            if not (depth == 0 and self.hide_root):
+                for first, line in loop_first(renderable_lines):
+                    if prefix:
+                        yield from _Segment.apply_style(
+                            prefix,
+                            style.background_style,
+                            post_style=remove_guide_styles,
+                        )
+                    yield from line
+                    yield new_line
+                    if first and prefix:
+                        prefix[-1] = make_guide(
+                            SPACE if last else CONTINUE, prefix[-1].style or null_style
+                        )
+
+            if node.expanded and node.children:
+                levels[-1] = make_guide(
+                    SPACE if last else CONTINUE, levels[-1].style or null_style
+                )
+                levels.append(
+                    make_guide(END if len(node.children) == 1 else FORK, guide_style)
+                )
+                style_stack.push(get_style(node.style))
+                guide_style_stack.push(get_style(node.guide_style))
+                push(iter(loop_last(node.children)))
+                depth += 1
+
+    def __rich_measure__(
+        self, console: "Console", options: "ConsoleOptions"
+    ) -> "Measurement":
+        stack: List[Iterator[Tree]] = [iter([self])]
+        pop = stack.pop
+        push = stack.append
+        minimum = 0
+        maximum = 0
+        measure = Measurement.get
+        level = 0
+        while stack:
+            iter_tree = pop()
+            try:
+                tree = next(iter_tree)
+            except StopIteration:
+                level -= 1
+                continue
+            push(iter_tree)
+            min_measure, max_measure = measure(console, options, tree.label)
+            indent = level * 4
+            minimum = max(min_measure + indent, minimum)
+            maximum = max(max_measure + indent, maximum)
+            if tree.expanded and tree.children:
+                push(iter(tree.children))
+                level += 1
+        return Measurement(minimum, maximum)
+
+
+if __name__ == "__main__":  # pragma: no cover
+    from rich.console import Group
+    from rich.markdown import Markdown
+    from rich.panel import Panel
+    from rich.syntax import Syntax
+    from rich.table import Table
+
+    table = Table(row_styles=["", "dim"])
+
+    table.add_column("Released", style="cyan", no_wrap=True)
+    table.add_column("Title", style="magenta")
+    table.add_column("Box Office", justify="right", style="green")
+
+    table.add_row("Dec 20, 2019", "Star Wars: The Rise of Skywalker", "$952,110,690")
+    table.add_row("May 25, 2018", "Solo: A Star Wars Story", "$393,151,347")
+    table.add_row("Dec 15, 2017", "Star Wars Ep. V111: The Last Jedi", "$1,332,539,889")
+    table.add_row("Dec 16, 2016", "Rogue One: A Star Wars Story", "$1,332,439,889")
+
+    code = """\
+class Segment(NamedTuple):
+    text: str = ""
+    style: Optional[Style] = None
+    is_control: bool = False
+"""
+    syntax = Syntax(code, "python", theme="monokai", line_numbers=True)
+
+    markdown = Markdown(
+        """\
+### example.md
+> Hello, World!
+>
+> Markdown _all_ the things
+"""
+    )
+
+    root = Tree("🌲 [b green]Rich Tree", highlight=True, hide_root=True)
+
+    node = root.add(":file_folder: Renderables", guide_style="red")
+    simple_node = node.add(":file_folder: [bold yellow]Atomic", guide_style="uu green")
+    simple_node.add(Group("📄 Syntax", syntax))
+    simple_node.add(Group("📄 Markdown", Panel(markdown, border_style="green")))
+
+    containers_node = node.add(
+        ":file_folder: [bold magenta]Containers", guide_style="bold magenta"
+    )
+    containers_node.expanded = True
+    panel = Panel.fit("Just a panel", border_style="red")
+    containers_node.add(Group("📄 Panels", panel))
+
+    containers_node.add(Group("📄 [b magenta]Table", table))
+
+    console = Console()
+
+    console.print(root)
diff --git a/local_packages/share/spirv_cross_c/cmake/spirv_cross_cConfig-release.cmake b/local_packages/share/spirv_cross_c/cmake/spirv_cross_cConfig-release.cmake
new file mode 100644
index 0000000..fb86220
--- /dev/null
+++ b/local_packages/share/spirv_cross_c/cmake/spirv_cross_cConfig-release.cmake
@@ -0,0 +1,19 @@
+#----------------------------------------------------------------
+# Generated CMake target import file for configuration "Release".
+#----------------------------------------------------------------
+
+# Commands may need to know the format version.
+set(CMAKE_IMPORT_FILE_VERSION 1)
+
+# Import target "spirv-cross-c" for configuration "Release"
+set_property(TARGET spirv-cross-c APPEND PROPERTY IMPORTED_CONFIGURATIONS RELEASE)
+set_target_properties(spirv-cross-c PROPERTIES
+  IMPORTED_LINK_INTERFACE_LANGUAGES_RELEASE "CXX"
+  IMPORTED_LOCATION_RELEASE "${_IMPORT_PREFIX}/lib/spirv-cross-c.lib"
+  )
+
+list(APPEND _cmake_import_check_targets spirv-cross-c )
+list(APPEND _cmake_import_check_files_for_spirv-cross-c "${_IMPORT_PREFIX}/lib/spirv-cross-c.lib" )
+
+# Commands beyond this point should not need to know the version.
+set(CMAKE_IMPORT_FILE_VERSION)
diff --git a/local_packages/share/spirv_cross_c/cmake/spirv_cross_cConfig.cmake b/local_packages/share/spirv_cross_c/cmake/spirv_cross_cConfig.cmake
new file mode 100644
index 0000000..d975e1f
--- /dev/null
+++ b/local_packages/share/spirv_cross_c/cmake/spirv_cross_cConfig.cmake
@@ -0,0 +1,123 @@
+# Generated by CMake
+
+if("${CMAKE_MAJOR_VERSION}.${CMAKE_MINOR_VERSION}" LESS 2.8)
+   message(FATAL_ERROR "CMake >= 2.8.12 required")
+endif()
+if(CMAKE_VERSION VERSION_LESS "2.8.12")
+   message(FATAL_ERROR "CMake >= 2.8.12 required")
+endif()
+cmake_policy(PUSH)
+cmake_policy(VERSION 2.8.12...3.29)
+#----------------------------------------------------------------
+# Generated CMake target import file.
+#----------------------------------------------------------------
+
+# Commands may need to know the format version.
+set(CMAKE_IMPORT_FILE_VERSION 1)
+
+# Protect against multiple inclusion, which would fail when already imported targets are added once more.
+set(_cmake_targets_defined "")
+set(_cmake_targets_not_defined "")
+set(_cmake_expected_targets "")
+foreach(_cmake_expected_target IN ITEMS spirv-cross-c)
+  list(APPEND _cmake_expected_targets "${_cmake_expected_target}")
+  if(TARGET "${_cmake_expected_target}")
+    list(APPEND _cmake_targets_defined "${_cmake_expected_target}")
+  else()
+    list(APPEND _cmake_targets_not_defined "${_cmake_expected_target}")
+  endif()
+endforeach()
+unset(_cmake_expected_target)
+if(_cmake_targets_defined STREQUAL _cmake_expected_targets)
+  unset(_cmake_targets_defined)
+  unset(_cmake_targets_not_defined)
+  unset(_cmake_expected_targets)
+  unset(CMAKE_IMPORT_FILE_VERSION)
+  cmake_policy(POP)
+  return()
+endif()
+if(NOT _cmake_targets_defined STREQUAL "")
+  string(REPLACE ";" ", " _cmake_targets_defined_text "${_cmake_targets_defined}")
+  string(REPLACE ";" ", " _cmake_targets_not_defined_text "${_cmake_targets_not_defined}")
+  message(FATAL_ERROR "Some (but not all) targets in this export set were already defined.\nTargets Defined: ${_cmake_targets_defined_text}\nTargets not yet defined: ${_cmake_targets_not_defined_text}\n")
+endif()
+unset(_cmake_targets_defined)
+unset(_cmake_targets_not_defined)
+unset(_cmake_expected_targets)
+
+
+# Compute the installation prefix relative to this file.
+get_filename_component(_IMPORT_PREFIX "${CMAKE_CURRENT_LIST_FILE}" PATH)
+get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH)
+get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH)
+get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH)
+if(_IMPORT_PREFIX STREQUAL "/")
+  set(_IMPORT_PREFIX "")
+endif()
+
+# Create imported target spirv-cross-c
+add_library(spirv-cross-c STATIC IMPORTED)
+
+set_target_properties(spirv-cross-c PROPERTIES
+  INTERFACE_INCLUDE_DIRECTORIES "${_IMPORT_PREFIX}/include/spirv_cross"
+  INTERFACE_LINK_LIBRARIES "\$;\$;\$;\$;\$"
+)
+
+# Load information for each installed configuration.
+file(GLOB _cmake_config_files "${CMAKE_CURRENT_LIST_DIR}/spirv_cross_cConfig-*.cmake")
+foreach(_cmake_config_file IN LISTS _cmake_config_files)
+  include("${_cmake_config_file}")
+endforeach()
+unset(_cmake_config_file)
+unset(_cmake_config_files)
+
+# Cleanup temporary variables.
+set(_IMPORT_PREFIX)
+
+# Loop over all imported files and verify that they actually exist
+foreach(_cmake_target IN LISTS _cmake_import_check_targets)
+  if(CMAKE_VERSION VERSION_LESS "3.28"
+      OR NOT DEFINED _cmake_import_check_xcframework_for_${_cmake_target}
+      OR NOT IS_DIRECTORY "${_cmake_import_check_xcframework_for_${_cmake_target}}")
+    foreach(_cmake_file IN LISTS "_cmake_import_check_files_for_${_cmake_target}")
+      if(NOT EXISTS "${_cmake_file}")
+        message(FATAL_ERROR "The imported target \"${_cmake_target}\" references the file
+   \"${_cmake_file}\"
+but this file does not exist.  Possible reasons include:
+* The file was deleted, renamed, or moved to another location.
+* An install or uninstall procedure did not complete successfully.
+* The installation package was faulty and contained
+   \"${CMAKE_CURRENT_LIST_FILE}\"
+but not all the files it references.
+")
+      endif()
+    endforeach()
+  endif()
+  unset(_cmake_file)
+  unset("_cmake_import_check_files_for_${_cmake_target}")
+endforeach()
+unset(_cmake_target)
+unset(_cmake_import_check_targets)
+
+# Make sure the targets which have been exported in some other
+# export set exist.
+unset(${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE_targets)
+foreach(_target "spirv-cross-glsl" "spirv-cross-hlsl" "spirv-cross-msl" "spirv-cross-cpp" "spirv-cross-reflect" )
+  if(NOT TARGET "${_target}" )
+    set(${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE_targets "${${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE_targets} ${_target}")
+  endif()
+endforeach()
+
+if(DEFINED ${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE_targets)
+  if(CMAKE_FIND_PACKAGE_NAME)
+    set( ${CMAKE_FIND_PACKAGE_NAME}_FOUND FALSE)
+    set( ${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE "The following imported targets are referenced, but are missing: ${${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE_targets}")
+  else()
+    message(FATAL_ERROR "The following imported targets are referenced, but are missing: ${${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE_targets}")
+  endif()
+endif()
+unset(${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE_targets)
+
+# Commands beyond this point should not need to know the version.
+set(CMAKE_IMPORT_FILE_VERSION)
+cmake_policy(POP)
diff --git a/local_packages/share/spirv_cross_core/cmake/spirv_cross_coreConfig-release.cmake b/local_packages/share/spirv_cross_core/cmake/spirv_cross_coreConfig-release.cmake
new file mode 100644
index 0000000..c43df56
--- /dev/null
+++ b/local_packages/share/spirv_cross_core/cmake/spirv_cross_coreConfig-release.cmake
@@ -0,0 +1,19 @@
+#----------------------------------------------------------------
+# Generated CMake target import file for configuration "Release".
+#----------------------------------------------------------------
+
+# Commands may need to know the format version.
+set(CMAKE_IMPORT_FILE_VERSION 1)
+
+# Import target "spirv-cross-core" for configuration "Release"
+set_property(TARGET spirv-cross-core APPEND PROPERTY IMPORTED_CONFIGURATIONS RELEASE)
+set_target_properties(spirv-cross-core PROPERTIES
+  IMPORTED_LINK_INTERFACE_LANGUAGES_RELEASE "CXX"
+  IMPORTED_LOCATION_RELEASE "${_IMPORT_PREFIX}/lib/spirv-cross-core.lib"
+  )
+
+list(APPEND _cmake_import_check_targets spirv-cross-core )
+list(APPEND _cmake_import_check_files_for_spirv-cross-core "${_IMPORT_PREFIX}/lib/spirv-cross-core.lib" )
+
+# Commands beyond this point should not need to know the version.
+set(CMAKE_IMPORT_FILE_VERSION)
diff --git a/local_packages/share/spirv_cross_core/cmake/spirv_cross_coreConfig.cmake b/local_packages/share/spirv_cross_core/cmake/spirv_cross_coreConfig.cmake
new file mode 100644
index 0000000..4a531c2
--- /dev/null
+++ b/local_packages/share/spirv_cross_core/cmake/spirv_cross_coreConfig.cmake
@@ -0,0 +1,106 @@
+# Generated by CMake
+
+if("${CMAKE_MAJOR_VERSION}.${CMAKE_MINOR_VERSION}" LESS 2.8)
+   message(FATAL_ERROR "CMake >= 2.8.3 required")
+endif()
+if(CMAKE_VERSION VERSION_LESS "2.8.3")
+   message(FATAL_ERROR "CMake >= 2.8.3 required")
+endif()
+cmake_policy(PUSH)
+cmake_policy(VERSION 2.8.3...3.29)
+#----------------------------------------------------------------
+# Generated CMake target import file.
+#----------------------------------------------------------------
+
+# Commands may need to know the format version.
+set(CMAKE_IMPORT_FILE_VERSION 1)
+
+# Protect against multiple inclusion, which would fail when already imported targets are added once more.
+set(_cmake_targets_defined "")
+set(_cmake_targets_not_defined "")
+set(_cmake_expected_targets "")
+foreach(_cmake_expected_target IN ITEMS spirv-cross-core)
+  list(APPEND _cmake_expected_targets "${_cmake_expected_target}")
+  if(TARGET "${_cmake_expected_target}")
+    list(APPEND _cmake_targets_defined "${_cmake_expected_target}")
+  else()
+    list(APPEND _cmake_targets_not_defined "${_cmake_expected_target}")
+  endif()
+endforeach()
+unset(_cmake_expected_target)
+if(_cmake_targets_defined STREQUAL _cmake_expected_targets)
+  unset(_cmake_targets_defined)
+  unset(_cmake_targets_not_defined)
+  unset(_cmake_expected_targets)
+  unset(CMAKE_IMPORT_FILE_VERSION)
+  cmake_policy(POP)
+  return()
+endif()
+if(NOT _cmake_targets_defined STREQUAL "")
+  string(REPLACE ";" ", " _cmake_targets_defined_text "${_cmake_targets_defined}")
+  string(REPLACE ";" ", " _cmake_targets_not_defined_text "${_cmake_targets_not_defined}")
+  message(FATAL_ERROR "Some (but not all) targets in this export set were already defined.\nTargets Defined: ${_cmake_targets_defined_text}\nTargets not yet defined: ${_cmake_targets_not_defined_text}\n")
+endif()
+unset(_cmake_targets_defined)
+unset(_cmake_targets_not_defined)
+unset(_cmake_expected_targets)
+
+
+# Compute the installation prefix relative to this file.
+get_filename_component(_IMPORT_PREFIX "${CMAKE_CURRENT_LIST_FILE}" PATH)
+get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH)
+get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH)
+get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH)
+if(_IMPORT_PREFIX STREQUAL "/")
+  set(_IMPORT_PREFIX "")
+endif()
+
+# Create imported target spirv-cross-core
+add_library(spirv-cross-core STATIC IMPORTED)
+
+set_target_properties(spirv-cross-core PROPERTIES
+  INTERFACE_INCLUDE_DIRECTORIES "${_IMPORT_PREFIX}/include/spirv_cross"
+)
+
+# Load information for each installed configuration.
+file(GLOB _cmake_config_files "${CMAKE_CURRENT_LIST_DIR}/spirv_cross_coreConfig-*.cmake")
+foreach(_cmake_config_file IN LISTS _cmake_config_files)
+  include("${_cmake_config_file}")
+endforeach()
+unset(_cmake_config_file)
+unset(_cmake_config_files)
+
+# Cleanup temporary variables.
+set(_IMPORT_PREFIX)
+
+# Loop over all imported files and verify that they actually exist
+foreach(_cmake_target IN LISTS _cmake_import_check_targets)
+  if(CMAKE_VERSION VERSION_LESS "3.28"
+      OR NOT DEFINED _cmake_import_check_xcframework_for_${_cmake_target}
+      OR NOT IS_DIRECTORY "${_cmake_import_check_xcframework_for_${_cmake_target}}")
+    foreach(_cmake_file IN LISTS "_cmake_import_check_files_for_${_cmake_target}")
+      if(NOT EXISTS "${_cmake_file}")
+        message(FATAL_ERROR "The imported target \"${_cmake_target}\" references the file
+   \"${_cmake_file}\"
+but this file does not exist.  Possible reasons include:
+* The file was deleted, renamed, or moved to another location.
+* An install or uninstall procedure did not complete successfully.
+* The installation package was faulty and contained
+   \"${CMAKE_CURRENT_LIST_FILE}\"
+but not all the files it references.
+")
+      endif()
+    endforeach()
+  endif()
+  unset(_cmake_file)
+  unset("_cmake_import_check_files_for_${_cmake_target}")
+endforeach()
+unset(_cmake_target)
+unset(_cmake_import_check_targets)
+
+# This file does not depend on other imported targets which have
+# been exported from the same project but in a separate export set.
+
+# Commands beyond this point should not need to know the version.
+set(CMAKE_IMPORT_FILE_VERSION)
+cmake_policy(POP)
diff --git a/local_packages/share/spirv_cross_cpp/cmake/spirv_cross_cppConfig-release.cmake b/local_packages/share/spirv_cross_cpp/cmake/spirv_cross_cppConfig-release.cmake
new file mode 100644
index 0000000..d920169
--- /dev/null
+++ b/local_packages/share/spirv_cross_cpp/cmake/spirv_cross_cppConfig-release.cmake
@@ -0,0 +1,19 @@
+#----------------------------------------------------------------
+# Generated CMake target import file for configuration "Release".
+#----------------------------------------------------------------
+
+# Commands may need to know the format version.
+set(CMAKE_IMPORT_FILE_VERSION 1)
+
+# Import target "spirv-cross-cpp" for configuration "Release"
+set_property(TARGET spirv-cross-cpp APPEND PROPERTY IMPORTED_CONFIGURATIONS RELEASE)
+set_target_properties(spirv-cross-cpp PROPERTIES
+  IMPORTED_LINK_INTERFACE_LANGUAGES_RELEASE "CXX"
+  IMPORTED_LOCATION_RELEASE "${_IMPORT_PREFIX}/lib/spirv-cross-cpp.lib"
+  )
+
+list(APPEND _cmake_import_check_targets spirv-cross-cpp )
+list(APPEND _cmake_import_check_files_for_spirv-cross-cpp "${_IMPORT_PREFIX}/lib/spirv-cross-cpp.lib" )
+
+# Commands beyond this point should not need to know the version.
+set(CMAKE_IMPORT_FILE_VERSION)
diff --git a/local_packages/share/spirv_cross_cpp/cmake/spirv_cross_cppConfig.cmake b/local_packages/share/spirv_cross_cpp/cmake/spirv_cross_cppConfig.cmake
new file mode 100644
index 0000000..497eacf
--- /dev/null
+++ b/local_packages/share/spirv_cross_cpp/cmake/spirv_cross_cppConfig.cmake
@@ -0,0 +1,123 @@
+# Generated by CMake
+
+if("${CMAKE_MAJOR_VERSION}.${CMAKE_MINOR_VERSION}" LESS 2.8)
+   message(FATAL_ERROR "CMake >= 2.8.12 required")
+endif()
+if(CMAKE_VERSION VERSION_LESS "2.8.12")
+   message(FATAL_ERROR "CMake >= 2.8.12 required")
+endif()
+cmake_policy(PUSH)
+cmake_policy(VERSION 2.8.12...3.29)
+#----------------------------------------------------------------
+# Generated CMake target import file.
+#----------------------------------------------------------------
+
+# Commands may need to know the format version.
+set(CMAKE_IMPORT_FILE_VERSION 1)
+
+# Protect against multiple inclusion, which would fail when already imported targets are added once more.
+set(_cmake_targets_defined "")
+set(_cmake_targets_not_defined "")
+set(_cmake_expected_targets "")
+foreach(_cmake_expected_target IN ITEMS spirv-cross-cpp)
+  list(APPEND _cmake_expected_targets "${_cmake_expected_target}")
+  if(TARGET "${_cmake_expected_target}")
+    list(APPEND _cmake_targets_defined "${_cmake_expected_target}")
+  else()
+    list(APPEND _cmake_targets_not_defined "${_cmake_expected_target}")
+  endif()
+endforeach()
+unset(_cmake_expected_target)
+if(_cmake_targets_defined STREQUAL _cmake_expected_targets)
+  unset(_cmake_targets_defined)
+  unset(_cmake_targets_not_defined)
+  unset(_cmake_expected_targets)
+  unset(CMAKE_IMPORT_FILE_VERSION)
+  cmake_policy(POP)
+  return()
+endif()
+if(NOT _cmake_targets_defined STREQUAL "")
+  string(REPLACE ";" ", " _cmake_targets_defined_text "${_cmake_targets_defined}")
+  string(REPLACE ";" ", " _cmake_targets_not_defined_text "${_cmake_targets_not_defined}")
+  message(FATAL_ERROR "Some (but not all) targets in this export set were already defined.\nTargets Defined: ${_cmake_targets_defined_text}\nTargets not yet defined: ${_cmake_targets_not_defined_text}\n")
+endif()
+unset(_cmake_targets_defined)
+unset(_cmake_targets_not_defined)
+unset(_cmake_expected_targets)
+
+
+# Compute the installation prefix relative to this file.
+get_filename_component(_IMPORT_PREFIX "${CMAKE_CURRENT_LIST_FILE}" PATH)
+get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH)
+get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH)
+get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH)
+if(_IMPORT_PREFIX STREQUAL "/")
+  set(_IMPORT_PREFIX "")
+endif()
+
+# Create imported target spirv-cross-cpp
+add_library(spirv-cross-cpp STATIC IMPORTED)
+
+set_target_properties(spirv-cross-cpp PROPERTIES
+  INTERFACE_INCLUDE_DIRECTORIES "${_IMPORT_PREFIX}/include/spirv_cross"
+  INTERFACE_LINK_LIBRARIES "\$"
+)
+
+# Load information for each installed configuration.
+file(GLOB _cmake_config_files "${CMAKE_CURRENT_LIST_DIR}/spirv_cross_cppConfig-*.cmake")
+foreach(_cmake_config_file IN LISTS _cmake_config_files)
+  include("${_cmake_config_file}")
+endforeach()
+unset(_cmake_config_file)
+unset(_cmake_config_files)
+
+# Cleanup temporary variables.
+set(_IMPORT_PREFIX)
+
+# Loop over all imported files and verify that they actually exist
+foreach(_cmake_target IN LISTS _cmake_import_check_targets)
+  if(CMAKE_VERSION VERSION_LESS "3.28"
+      OR NOT DEFINED _cmake_import_check_xcframework_for_${_cmake_target}
+      OR NOT IS_DIRECTORY "${_cmake_import_check_xcframework_for_${_cmake_target}}")
+    foreach(_cmake_file IN LISTS "_cmake_import_check_files_for_${_cmake_target}")
+      if(NOT EXISTS "${_cmake_file}")
+        message(FATAL_ERROR "The imported target \"${_cmake_target}\" references the file
+   \"${_cmake_file}\"
+but this file does not exist.  Possible reasons include:
+* The file was deleted, renamed, or moved to another location.
+* An install or uninstall procedure did not complete successfully.
+* The installation package was faulty and contained
+   \"${CMAKE_CURRENT_LIST_FILE}\"
+but not all the files it references.
+")
+      endif()
+    endforeach()
+  endif()
+  unset(_cmake_file)
+  unset("_cmake_import_check_files_for_${_cmake_target}")
+endforeach()
+unset(_cmake_target)
+unset(_cmake_import_check_targets)
+
+# Make sure the targets which have been exported in some other
+# export set exist.
+unset(${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE_targets)
+foreach(_target "spirv-cross-glsl" )
+  if(NOT TARGET "${_target}" )
+    set(${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE_targets "${${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE_targets} ${_target}")
+  endif()
+endforeach()
+
+if(DEFINED ${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE_targets)
+  if(CMAKE_FIND_PACKAGE_NAME)
+    set( ${CMAKE_FIND_PACKAGE_NAME}_FOUND FALSE)
+    set( ${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE "The following imported targets are referenced, but are missing: ${${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE_targets}")
+  else()
+    message(FATAL_ERROR "The following imported targets are referenced, but are missing: ${${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE_targets}")
+  endif()
+endif()
+unset(${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE_targets)
+
+# Commands beyond this point should not need to know the version.
+set(CMAKE_IMPORT_FILE_VERSION)
+cmake_policy(POP)
diff --git a/local_packages/share/spirv_cross_glsl/cmake/spirv_cross_glslConfig-release.cmake b/local_packages/share/spirv_cross_glsl/cmake/spirv_cross_glslConfig-release.cmake
new file mode 100644
index 0000000..c5183c9
--- /dev/null
+++ b/local_packages/share/spirv_cross_glsl/cmake/spirv_cross_glslConfig-release.cmake
@@ -0,0 +1,19 @@
+#----------------------------------------------------------------
+# Generated CMake target import file for configuration "Release".
+#----------------------------------------------------------------
+
+# Commands may need to know the format version.
+set(CMAKE_IMPORT_FILE_VERSION 1)
+
+# Import target "spirv-cross-glsl" for configuration "Release"
+set_property(TARGET spirv-cross-glsl APPEND PROPERTY IMPORTED_CONFIGURATIONS RELEASE)
+set_target_properties(spirv-cross-glsl PROPERTIES
+  IMPORTED_LINK_INTERFACE_LANGUAGES_RELEASE "CXX"
+  IMPORTED_LOCATION_RELEASE "${_IMPORT_PREFIX}/lib/spirv-cross-glsl.lib"
+  )
+
+list(APPEND _cmake_import_check_targets spirv-cross-glsl )
+list(APPEND _cmake_import_check_files_for_spirv-cross-glsl "${_IMPORT_PREFIX}/lib/spirv-cross-glsl.lib" )
+
+# Commands beyond this point should not need to know the version.
+set(CMAKE_IMPORT_FILE_VERSION)
diff --git a/local_packages/share/spirv_cross_glsl/cmake/spirv_cross_glslConfig.cmake b/local_packages/share/spirv_cross_glsl/cmake/spirv_cross_glslConfig.cmake
new file mode 100644
index 0000000..d7cddc5
--- /dev/null
+++ b/local_packages/share/spirv_cross_glsl/cmake/spirv_cross_glslConfig.cmake
@@ -0,0 +1,123 @@
+# Generated by CMake
+
+if("${CMAKE_MAJOR_VERSION}.${CMAKE_MINOR_VERSION}" LESS 2.8)
+   message(FATAL_ERROR "CMake >= 2.8.12 required")
+endif()
+if(CMAKE_VERSION VERSION_LESS "2.8.12")
+   message(FATAL_ERROR "CMake >= 2.8.12 required")
+endif()
+cmake_policy(PUSH)
+cmake_policy(VERSION 2.8.12...3.29)
+#----------------------------------------------------------------
+# Generated CMake target import file.
+#----------------------------------------------------------------
+
+# Commands may need to know the format version.
+set(CMAKE_IMPORT_FILE_VERSION 1)
+
+# Protect against multiple inclusion, which would fail when already imported targets are added once more.
+set(_cmake_targets_defined "")
+set(_cmake_targets_not_defined "")
+set(_cmake_expected_targets "")
+foreach(_cmake_expected_target IN ITEMS spirv-cross-glsl)
+  list(APPEND _cmake_expected_targets "${_cmake_expected_target}")
+  if(TARGET "${_cmake_expected_target}")
+    list(APPEND _cmake_targets_defined "${_cmake_expected_target}")
+  else()
+    list(APPEND _cmake_targets_not_defined "${_cmake_expected_target}")
+  endif()
+endforeach()
+unset(_cmake_expected_target)
+if(_cmake_targets_defined STREQUAL _cmake_expected_targets)
+  unset(_cmake_targets_defined)
+  unset(_cmake_targets_not_defined)
+  unset(_cmake_expected_targets)
+  unset(CMAKE_IMPORT_FILE_VERSION)
+  cmake_policy(POP)
+  return()
+endif()
+if(NOT _cmake_targets_defined STREQUAL "")
+  string(REPLACE ";" ", " _cmake_targets_defined_text "${_cmake_targets_defined}")
+  string(REPLACE ";" ", " _cmake_targets_not_defined_text "${_cmake_targets_not_defined}")
+  message(FATAL_ERROR "Some (but not all) targets in this export set were already defined.\nTargets Defined: ${_cmake_targets_defined_text}\nTargets not yet defined: ${_cmake_targets_not_defined_text}\n")
+endif()
+unset(_cmake_targets_defined)
+unset(_cmake_targets_not_defined)
+unset(_cmake_expected_targets)
+
+
+# Compute the installation prefix relative to this file.
+get_filename_component(_IMPORT_PREFIX "${CMAKE_CURRENT_LIST_FILE}" PATH)
+get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH)
+get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH)
+get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH)
+if(_IMPORT_PREFIX STREQUAL "/")
+  set(_IMPORT_PREFIX "")
+endif()
+
+# Create imported target spirv-cross-glsl
+add_library(spirv-cross-glsl STATIC IMPORTED)
+
+set_target_properties(spirv-cross-glsl PROPERTIES
+  INTERFACE_INCLUDE_DIRECTORIES "${_IMPORT_PREFIX}/include/spirv_cross"
+  INTERFACE_LINK_LIBRARIES "\$"
+)
+
+# Load information for each installed configuration.
+file(GLOB _cmake_config_files "${CMAKE_CURRENT_LIST_DIR}/spirv_cross_glslConfig-*.cmake")
+foreach(_cmake_config_file IN LISTS _cmake_config_files)
+  include("${_cmake_config_file}")
+endforeach()
+unset(_cmake_config_file)
+unset(_cmake_config_files)
+
+# Cleanup temporary variables.
+set(_IMPORT_PREFIX)
+
+# Loop over all imported files and verify that they actually exist
+foreach(_cmake_target IN LISTS _cmake_import_check_targets)
+  if(CMAKE_VERSION VERSION_LESS "3.28"
+      OR NOT DEFINED _cmake_import_check_xcframework_for_${_cmake_target}
+      OR NOT IS_DIRECTORY "${_cmake_import_check_xcframework_for_${_cmake_target}}")
+    foreach(_cmake_file IN LISTS "_cmake_import_check_files_for_${_cmake_target}")
+      if(NOT EXISTS "${_cmake_file}")
+        message(FATAL_ERROR "The imported target \"${_cmake_target}\" references the file
+   \"${_cmake_file}\"
+but this file does not exist.  Possible reasons include:
+* The file was deleted, renamed, or moved to another location.
+* An install or uninstall procedure did not complete successfully.
+* The installation package was faulty and contained
+   \"${CMAKE_CURRENT_LIST_FILE}\"
+but not all the files it references.
+")
+      endif()
+    endforeach()
+  endif()
+  unset(_cmake_file)
+  unset("_cmake_import_check_files_for_${_cmake_target}")
+endforeach()
+unset(_cmake_target)
+unset(_cmake_import_check_targets)
+
+# Make sure the targets which have been exported in some other
+# export set exist.
+unset(${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE_targets)
+foreach(_target "spirv-cross-core" )
+  if(NOT TARGET "${_target}" )
+    set(${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE_targets "${${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE_targets} ${_target}")
+  endif()
+endforeach()
+
+if(DEFINED ${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE_targets)
+  if(CMAKE_FIND_PACKAGE_NAME)
+    set( ${CMAKE_FIND_PACKAGE_NAME}_FOUND FALSE)
+    set( ${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE "The following imported targets are referenced, but are missing: ${${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE_targets}")
+  else()
+    message(FATAL_ERROR "The following imported targets are referenced, but are missing: ${${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE_targets}")
+  endif()
+endif()
+unset(${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE_targets)
+
+# Commands beyond this point should not need to know the version.
+set(CMAKE_IMPORT_FILE_VERSION)
+cmake_policy(POP)
diff --git a/local_packages/share/spirv_cross_hlsl/cmake/spirv_cross_hlslConfig-release.cmake b/local_packages/share/spirv_cross_hlsl/cmake/spirv_cross_hlslConfig-release.cmake
new file mode 100644
index 0000000..57edda6
--- /dev/null
+++ b/local_packages/share/spirv_cross_hlsl/cmake/spirv_cross_hlslConfig-release.cmake
@@ -0,0 +1,19 @@
+#----------------------------------------------------------------
+# Generated CMake target import file for configuration "Release".
+#----------------------------------------------------------------
+
+# Commands may need to know the format version.
+set(CMAKE_IMPORT_FILE_VERSION 1)
+
+# Import target "spirv-cross-hlsl" for configuration "Release"
+set_property(TARGET spirv-cross-hlsl APPEND PROPERTY IMPORTED_CONFIGURATIONS RELEASE)
+set_target_properties(spirv-cross-hlsl PROPERTIES
+  IMPORTED_LINK_INTERFACE_LANGUAGES_RELEASE "CXX"
+  IMPORTED_LOCATION_RELEASE "${_IMPORT_PREFIX}/lib/spirv-cross-hlsl.lib"
+  )
+
+list(APPEND _cmake_import_check_targets spirv-cross-hlsl )
+list(APPEND _cmake_import_check_files_for_spirv-cross-hlsl "${_IMPORT_PREFIX}/lib/spirv-cross-hlsl.lib" )
+
+# Commands beyond this point should not need to know the version.
+set(CMAKE_IMPORT_FILE_VERSION)
diff --git a/local_packages/share/spirv_cross_hlsl/cmake/spirv_cross_hlslConfig.cmake b/local_packages/share/spirv_cross_hlsl/cmake/spirv_cross_hlslConfig.cmake
new file mode 100644
index 0000000..d23236a
--- /dev/null
+++ b/local_packages/share/spirv_cross_hlsl/cmake/spirv_cross_hlslConfig.cmake
@@ -0,0 +1,123 @@
+# Generated by CMake
+
+if("${CMAKE_MAJOR_VERSION}.${CMAKE_MINOR_VERSION}" LESS 2.8)
+   message(FATAL_ERROR "CMake >= 2.8.12 required")
+endif()
+if(CMAKE_VERSION VERSION_LESS "2.8.12")
+   message(FATAL_ERROR "CMake >= 2.8.12 required")
+endif()
+cmake_policy(PUSH)
+cmake_policy(VERSION 2.8.12...3.29)
+#----------------------------------------------------------------
+# Generated CMake target import file.
+#----------------------------------------------------------------
+
+# Commands may need to know the format version.
+set(CMAKE_IMPORT_FILE_VERSION 1)
+
+# Protect against multiple inclusion, which would fail when already imported targets are added once more.
+set(_cmake_targets_defined "")
+set(_cmake_targets_not_defined "")
+set(_cmake_expected_targets "")
+foreach(_cmake_expected_target IN ITEMS spirv-cross-hlsl)
+  list(APPEND _cmake_expected_targets "${_cmake_expected_target}")
+  if(TARGET "${_cmake_expected_target}")
+    list(APPEND _cmake_targets_defined "${_cmake_expected_target}")
+  else()
+    list(APPEND _cmake_targets_not_defined "${_cmake_expected_target}")
+  endif()
+endforeach()
+unset(_cmake_expected_target)
+if(_cmake_targets_defined STREQUAL _cmake_expected_targets)
+  unset(_cmake_targets_defined)
+  unset(_cmake_targets_not_defined)
+  unset(_cmake_expected_targets)
+  unset(CMAKE_IMPORT_FILE_VERSION)
+  cmake_policy(POP)
+  return()
+endif()
+if(NOT _cmake_targets_defined STREQUAL "")
+  string(REPLACE ";" ", " _cmake_targets_defined_text "${_cmake_targets_defined}")
+  string(REPLACE ";" ", " _cmake_targets_not_defined_text "${_cmake_targets_not_defined}")
+  message(FATAL_ERROR "Some (but not all) targets in this export set were already defined.\nTargets Defined: ${_cmake_targets_defined_text}\nTargets not yet defined: ${_cmake_targets_not_defined_text}\n")
+endif()
+unset(_cmake_targets_defined)
+unset(_cmake_targets_not_defined)
+unset(_cmake_expected_targets)
+
+
+# Compute the installation prefix relative to this file.
+get_filename_component(_IMPORT_PREFIX "${CMAKE_CURRENT_LIST_FILE}" PATH)
+get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH)
+get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH)
+get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH)
+if(_IMPORT_PREFIX STREQUAL "/")
+  set(_IMPORT_PREFIX "")
+endif()
+
+# Create imported target spirv-cross-hlsl
+add_library(spirv-cross-hlsl STATIC IMPORTED)
+
+set_target_properties(spirv-cross-hlsl PROPERTIES
+  INTERFACE_INCLUDE_DIRECTORIES "${_IMPORT_PREFIX}/include/spirv_cross"
+  INTERFACE_LINK_LIBRARIES "\$"
+)
+
+# Load information for each installed configuration.
+file(GLOB _cmake_config_files "${CMAKE_CURRENT_LIST_DIR}/spirv_cross_hlslConfig-*.cmake")
+foreach(_cmake_config_file IN LISTS _cmake_config_files)
+  include("${_cmake_config_file}")
+endforeach()
+unset(_cmake_config_file)
+unset(_cmake_config_files)
+
+# Cleanup temporary variables.
+set(_IMPORT_PREFIX)
+
+# Loop over all imported files and verify that they actually exist
+foreach(_cmake_target IN LISTS _cmake_import_check_targets)
+  if(CMAKE_VERSION VERSION_LESS "3.28"
+      OR NOT DEFINED _cmake_import_check_xcframework_for_${_cmake_target}
+      OR NOT IS_DIRECTORY "${_cmake_import_check_xcframework_for_${_cmake_target}}")
+    foreach(_cmake_file IN LISTS "_cmake_import_check_files_for_${_cmake_target}")
+      if(NOT EXISTS "${_cmake_file}")
+        message(FATAL_ERROR "The imported target \"${_cmake_target}\" references the file
+   \"${_cmake_file}\"
+but this file does not exist.  Possible reasons include:
+* The file was deleted, renamed, or moved to another location.
+* An install or uninstall procedure did not complete successfully.
+* The installation package was faulty and contained
+   \"${CMAKE_CURRENT_LIST_FILE}\"
+but not all the files it references.
+")
+      endif()
+    endforeach()
+  endif()
+  unset(_cmake_file)
+  unset("_cmake_import_check_files_for_${_cmake_target}")
+endforeach()
+unset(_cmake_target)
+unset(_cmake_import_check_targets)
+
+# Make sure the targets which have been exported in some other
+# export set exist.
+unset(${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE_targets)
+foreach(_target "spirv-cross-glsl" )
+  if(NOT TARGET "${_target}" )
+    set(${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE_targets "${${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE_targets} ${_target}")
+  endif()
+endforeach()
+
+if(DEFINED ${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE_targets)
+  if(CMAKE_FIND_PACKAGE_NAME)
+    set( ${CMAKE_FIND_PACKAGE_NAME}_FOUND FALSE)
+    set( ${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE "The following imported targets are referenced, but are missing: ${${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE_targets}")
+  else()
+    message(FATAL_ERROR "The following imported targets are referenced, but are missing: ${${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE_targets}")
+  endif()
+endif()
+unset(${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE_targets)
+
+# Commands beyond this point should not need to know the version.
+set(CMAKE_IMPORT_FILE_VERSION)
+cmake_policy(POP)
diff --git a/local_packages/share/spirv_cross_msl/cmake/spirv_cross_mslConfig-release.cmake b/local_packages/share/spirv_cross_msl/cmake/spirv_cross_mslConfig-release.cmake
new file mode 100644
index 0000000..6eeeb76
--- /dev/null
+++ b/local_packages/share/spirv_cross_msl/cmake/spirv_cross_mslConfig-release.cmake
@@ -0,0 +1,19 @@
+#----------------------------------------------------------------
+# Generated CMake target import file for configuration "Release".
+#----------------------------------------------------------------
+
+# Commands may need to know the format version.
+set(CMAKE_IMPORT_FILE_VERSION 1)
+
+# Import target "spirv-cross-msl" for configuration "Release"
+set_property(TARGET spirv-cross-msl APPEND PROPERTY IMPORTED_CONFIGURATIONS RELEASE)
+set_target_properties(spirv-cross-msl PROPERTIES
+  IMPORTED_LINK_INTERFACE_LANGUAGES_RELEASE "CXX"
+  IMPORTED_LOCATION_RELEASE "${_IMPORT_PREFIX}/lib/spirv-cross-msl.lib"
+  )
+
+list(APPEND _cmake_import_check_targets spirv-cross-msl )
+list(APPEND _cmake_import_check_files_for_spirv-cross-msl "${_IMPORT_PREFIX}/lib/spirv-cross-msl.lib" )
+
+# Commands beyond this point should not need to know the version.
+set(CMAKE_IMPORT_FILE_VERSION)
diff --git a/local_packages/share/spirv_cross_msl/cmake/spirv_cross_mslConfig.cmake b/local_packages/share/spirv_cross_msl/cmake/spirv_cross_mslConfig.cmake
new file mode 100644
index 0000000..6e560c1
--- /dev/null
+++ b/local_packages/share/spirv_cross_msl/cmake/spirv_cross_mslConfig.cmake
@@ -0,0 +1,123 @@
+# Generated by CMake
+
+if("${CMAKE_MAJOR_VERSION}.${CMAKE_MINOR_VERSION}" LESS 2.8)
+   message(FATAL_ERROR "CMake >= 2.8.12 required")
+endif()
+if(CMAKE_VERSION VERSION_LESS "2.8.12")
+   message(FATAL_ERROR "CMake >= 2.8.12 required")
+endif()
+cmake_policy(PUSH)
+cmake_policy(VERSION 2.8.12...3.29)
+#----------------------------------------------------------------
+# Generated CMake target import file.
+#----------------------------------------------------------------
+
+# Commands may need to know the format version.
+set(CMAKE_IMPORT_FILE_VERSION 1)
+
+# Protect against multiple inclusion, which would fail when already imported targets are added once more.
+set(_cmake_targets_defined "")
+set(_cmake_targets_not_defined "")
+set(_cmake_expected_targets "")
+foreach(_cmake_expected_target IN ITEMS spirv-cross-msl)
+  list(APPEND _cmake_expected_targets "${_cmake_expected_target}")
+  if(TARGET "${_cmake_expected_target}")
+    list(APPEND _cmake_targets_defined "${_cmake_expected_target}")
+  else()
+    list(APPEND _cmake_targets_not_defined "${_cmake_expected_target}")
+  endif()
+endforeach()
+unset(_cmake_expected_target)
+if(_cmake_targets_defined STREQUAL _cmake_expected_targets)
+  unset(_cmake_targets_defined)
+  unset(_cmake_targets_not_defined)
+  unset(_cmake_expected_targets)
+  unset(CMAKE_IMPORT_FILE_VERSION)
+  cmake_policy(POP)
+  return()
+endif()
+if(NOT _cmake_targets_defined STREQUAL "")
+  string(REPLACE ";" ", " _cmake_targets_defined_text "${_cmake_targets_defined}")
+  string(REPLACE ";" ", " _cmake_targets_not_defined_text "${_cmake_targets_not_defined}")
+  message(FATAL_ERROR "Some (but not all) targets in this export set were already defined.\nTargets Defined: ${_cmake_targets_defined_text}\nTargets not yet defined: ${_cmake_targets_not_defined_text}\n")
+endif()
+unset(_cmake_targets_defined)
+unset(_cmake_targets_not_defined)
+unset(_cmake_expected_targets)
+
+
+# Compute the installation prefix relative to this file.
+get_filename_component(_IMPORT_PREFIX "${CMAKE_CURRENT_LIST_FILE}" PATH)
+get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH)
+get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH)
+get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH)
+if(_IMPORT_PREFIX STREQUAL "/")
+  set(_IMPORT_PREFIX "")
+endif()
+
+# Create imported target spirv-cross-msl
+add_library(spirv-cross-msl STATIC IMPORTED)
+
+set_target_properties(spirv-cross-msl PROPERTIES
+  INTERFACE_INCLUDE_DIRECTORIES "${_IMPORT_PREFIX}/include/spirv_cross"
+  INTERFACE_LINK_LIBRARIES "\$"
+)
+
+# Load information for each installed configuration.
+file(GLOB _cmake_config_files "${CMAKE_CURRENT_LIST_DIR}/spirv_cross_mslConfig-*.cmake")
+foreach(_cmake_config_file IN LISTS _cmake_config_files)
+  include("${_cmake_config_file}")
+endforeach()
+unset(_cmake_config_file)
+unset(_cmake_config_files)
+
+# Cleanup temporary variables.
+set(_IMPORT_PREFIX)
+
+# Loop over all imported files and verify that they actually exist
+foreach(_cmake_target IN LISTS _cmake_import_check_targets)
+  if(CMAKE_VERSION VERSION_LESS "3.28"
+      OR NOT DEFINED _cmake_import_check_xcframework_for_${_cmake_target}
+      OR NOT IS_DIRECTORY "${_cmake_import_check_xcframework_for_${_cmake_target}}")
+    foreach(_cmake_file IN LISTS "_cmake_import_check_files_for_${_cmake_target}")
+      if(NOT EXISTS "${_cmake_file}")
+        message(FATAL_ERROR "The imported target \"${_cmake_target}\" references the file
+   \"${_cmake_file}\"
+but this file does not exist.  Possible reasons include:
+* The file was deleted, renamed, or moved to another location.
+* An install or uninstall procedure did not complete successfully.
+* The installation package was faulty and contained
+   \"${CMAKE_CURRENT_LIST_FILE}\"
+but not all the files it references.
+")
+      endif()
+    endforeach()
+  endif()
+  unset(_cmake_file)
+  unset("_cmake_import_check_files_for_${_cmake_target}")
+endforeach()
+unset(_cmake_target)
+unset(_cmake_import_check_targets)
+
+# Make sure the targets which have been exported in some other
+# export set exist.
+unset(${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE_targets)
+foreach(_target "spirv-cross-glsl" )
+  if(NOT TARGET "${_target}" )
+    set(${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE_targets "${${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE_targets} ${_target}")
+  endif()
+endforeach()
+
+if(DEFINED ${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE_targets)
+  if(CMAKE_FIND_PACKAGE_NAME)
+    set( ${CMAKE_FIND_PACKAGE_NAME}_FOUND FALSE)
+    set( ${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE "The following imported targets are referenced, but are missing: ${${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE_targets}")
+  else()
+    message(FATAL_ERROR "The following imported targets are referenced, but are missing: ${${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE_targets}")
+  endif()
+endif()
+unset(${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE_targets)
+
+# Commands beyond this point should not need to know the version.
+set(CMAKE_IMPORT_FILE_VERSION)
+cmake_policy(POP)
diff --git a/local_packages/share/spirv_cross_reflect/cmake/spirv_cross_reflectConfig-release.cmake b/local_packages/share/spirv_cross_reflect/cmake/spirv_cross_reflectConfig-release.cmake
new file mode 100644
index 0000000..1bedc0f
--- /dev/null
+++ b/local_packages/share/spirv_cross_reflect/cmake/spirv_cross_reflectConfig-release.cmake
@@ -0,0 +1,19 @@
+#----------------------------------------------------------------
+# Generated CMake target import file for configuration "Release".
+#----------------------------------------------------------------
+
+# Commands may need to know the format version.
+set(CMAKE_IMPORT_FILE_VERSION 1)
+
+# Import target "spirv-cross-reflect" for configuration "Release"
+set_property(TARGET spirv-cross-reflect APPEND PROPERTY IMPORTED_CONFIGURATIONS RELEASE)
+set_target_properties(spirv-cross-reflect PROPERTIES
+  IMPORTED_LINK_INTERFACE_LANGUAGES_RELEASE "CXX"
+  IMPORTED_LOCATION_RELEASE "${_IMPORT_PREFIX}/lib/spirv-cross-reflect.lib"
+  )
+
+list(APPEND _cmake_import_check_targets spirv-cross-reflect )
+list(APPEND _cmake_import_check_files_for_spirv-cross-reflect "${_IMPORT_PREFIX}/lib/spirv-cross-reflect.lib" )
+
+# Commands beyond this point should not need to know the version.
+set(CMAKE_IMPORT_FILE_VERSION)
diff --git a/local_packages/share/spirv_cross_reflect/cmake/spirv_cross_reflectConfig.cmake b/local_packages/share/spirv_cross_reflect/cmake/spirv_cross_reflectConfig.cmake
new file mode 100644
index 0000000..874bee9
--- /dev/null
+++ b/local_packages/share/spirv_cross_reflect/cmake/spirv_cross_reflectConfig.cmake
@@ -0,0 +1,106 @@
+# Generated by CMake
+
+if("${CMAKE_MAJOR_VERSION}.${CMAKE_MINOR_VERSION}" LESS 2.8)
+   message(FATAL_ERROR "CMake >= 2.8.3 required")
+endif()
+if(CMAKE_VERSION VERSION_LESS "2.8.3")
+   message(FATAL_ERROR "CMake >= 2.8.3 required")
+endif()
+cmake_policy(PUSH)
+cmake_policy(VERSION 2.8.3...3.29)
+#----------------------------------------------------------------
+# Generated CMake target import file.
+#----------------------------------------------------------------
+
+# Commands may need to know the format version.
+set(CMAKE_IMPORT_FILE_VERSION 1)
+
+# Protect against multiple inclusion, which would fail when already imported targets are added once more.
+set(_cmake_targets_defined "")
+set(_cmake_targets_not_defined "")
+set(_cmake_expected_targets "")
+foreach(_cmake_expected_target IN ITEMS spirv-cross-reflect)
+  list(APPEND _cmake_expected_targets "${_cmake_expected_target}")
+  if(TARGET "${_cmake_expected_target}")
+    list(APPEND _cmake_targets_defined "${_cmake_expected_target}")
+  else()
+    list(APPEND _cmake_targets_not_defined "${_cmake_expected_target}")
+  endif()
+endforeach()
+unset(_cmake_expected_target)
+if(_cmake_targets_defined STREQUAL _cmake_expected_targets)
+  unset(_cmake_targets_defined)
+  unset(_cmake_targets_not_defined)
+  unset(_cmake_expected_targets)
+  unset(CMAKE_IMPORT_FILE_VERSION)
+  cmake_policy(POP)
+  return()
+endif()
+if(NOT _cmake_targets_defined STREQUAL "")
+  string(REPLACE ";" ", " _cmake_targets_defined_text "${_cmake_targets_defined}")
+  string(REPLACE ";" ", " _cmake_targets_not_defined_text "${_cmake_targets_not_defined}")
+  message(FATAL_ERROR "Some (but not all) targets in this export set were already defined.\nTargets Defined: ${_cmake_targets_defined_text}\nTargets not yet defined: ${_cmake_targets_not_defined_text}\n")
+endif()
+unset(_cmake_targets_defined)
+unset(_cmake_targets_not_defined)
+unset(_cmake_expected_targets)
+
+
+# Compute the installation prefix relative to this file.
+get_filename_component(_IMPORT_PREFIX "${CMAKE_CURRENT_LIST_FILE}" PATH)
+get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH)
+get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH)
+get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH)
+if(_IMPORT_PREFIX STREQUAL "/")
+  set(_IMPORT_PREFIX "")
+endif()
+
+# Create imported target spirv-cross-reflect
+add_library(spirv-cross-reflect STATIC IMPORTED)
+
+set_target_properties(spirv-cross-reflect PROPERTIES
+  INTERFACE_INCLUDE_DIRECTORIES "${_IMPORT_PREFIX}/include/spirv_cross"
+)
+
+# Load information for each installed configuration.
+file(GLOB _cmake_config_files "${CMAKE_CURRENT_LIST_DIR}/spirv_cross_reflectConfig-*.cmake")
+foreach(_cmake_config_file IN LISTS _cmake_config_files)
+  include("${_cmake_config_file}")
+endforeach()
+unset(_cmake_config_file)
+unset(_cmake_config_files)
+
+# Cleanup temporary variables.
+set(_IMPORT_PREFIX)
+
+# Loop over all imported files and verify that they actually exist
+foreach(_cmake_target IN LISTS _cmake_import_check_targets)
+  if(CMAKE_VERSION VERSION_LESS "3.28"
+      OR NOT DEFINED _cmake_import_check_xcframework_for_${_cmake_target}
+      OR NOT IS_DIRECTORY "${_cmake_import_check_xcframework_for_${_cmake_target}}")
+    foreach(_cmake_file IN LISTS "_cmake_import_check_files_for_${_cmake_target}")
+      if(NOT EXISTS "${_cmake_file}")
+        message(FATAL_ERROR "The imported target \"${_cmake_target}\" references the file
+   \"${_cmake_file}\"
+but this file does not exist.  Possible reasons include:
+* The file was deleted, renamed, or moved to another location.
+* An install or uninstall procedure did not complete successfully.
+* The installation package was faulty and contained
+   \"${CMAKE_CURRENT_LIST_FILE}\"
+but not all the files it references.
+")
+      endif()
+    endforeach()
+  endif()
+  unset(_cmake_file)
+  unset("_cmake_import_check_files_for_${_cmake_target}")
+endforeach()
+unset(_cmake_target)
+unset(_cmake_import_check_targets)
+
+# This file does not depend on other imported targets which have
+# been exported from the same project but in a separate export set.
+
+# Commands beyond this point should not need to know the version.
+set(CMAKE_IMPORT_FILE_VERSION)
+cmake_policy(POP)
diff --git a/local_packages/share/spirv_cross_util/cmake/spirv_cross_utilConfig-release.cmake b/local_packages/share/spirv_cross_util/cmake/spirv_cross_utilConfig-release.cmake
new file mode 100644
index 0000000..ef73605
--- /dev/null
+++ b/local_packages/share/spirv_cross_util/cmake/spirv_cross_utilConfig-release.cmake
@@ -0,0 +1,19 @@
+#----------------------------------------------------------------
+# Generated CMake target import file for configuration "Release".
+#----------------------------------------------------------------
+
+# Commands may need to know the format version.
+set(CMAKE_IMPORT_FILE_VERSION 1)
+
+# Import target "spirv-cross-util" for configuration "Release"
+set_property(TARGET spirv-cross-util APPEND PROPERTY IMPORTED_CONFIGURATIONS RELEASE)
+set_target_properties(spirv-cross-util PROPERTIES
+  IMPORTED_LINK_INTERFACE_LANGUAGES_RELEASE "CXX"
+  IMPORTED_LOCATION_RELEASE "${_IMPORT_PREFIX}/lib/spirv-cross-util.lib"
+  )
+
+list(APPEND _cmake_import_check_targets spirv-cross-util )
+list(APPEND _cmake_import_check_files_for_spirv-cross-util "${_IMPORT_PREFIX}/lib/spirv-cross-util.lib" )
+
+# Commands beyond this point should not need to know the version.
+set(CMAKE_IMPORT_FILE_VERSION)
diff --git a/local_packages/share/spirv_cross_util/cmake/spirv_cross_utilConfig.cmake b/local_packages/share/spirv_cross_util/cmake/spirv_cross_utilConfig.cmake
new file mode 100644
index 0000000..7c22576
--- /dev/null
+++ b/local_packages/share/spirv_cross_util/cmake/spirv_cross_utilConfig.cmake
@@ -0,0 +1,123 @@
+# Generated by CMake
+
+if("${CMAKE_MAJOR_VERSION}.${CMAKE_MINOR_VERSION}" LESS 2.8)
+   message(FATAL_ERROR "CMake >= 2.8.12 required")
+endif()
+if(CMAKE_VERSION VERSION_LESS "2.8.12")
+   message(FATAL_ERROR "CMake >= 2.8.12 required")
+endif()
+cmake_policy(PUSH)
+cmake_policy(VERSION 2.8.12...3.29)
+#----------------------------------------------------------------
+# Generated CMake target import file.
+#----------------------------------------------------------------
+
+# Commands may need to know the format version.
+set(CMAKE_IMPORT_FILE_VERSION 1)
+
+# Protect against multiple inclusion, which would fail when already imported targets are added once more.
+set(_cmake_targets_defined "")
+set(_cmake_targets_not_defined "")
+set(_cmake_expected_targets "")
+foreach(_cmake_expected_target IN ITEMS spirv-cross-util)
+  list(APPEND _cmake_expected_targets "${_cmake_expected_target}")
+  if(TARGET "${_cmake_expected_target}")
+    list(APPEND _cmake_targets_defined "${_cmake_expected_target}")
+  else()
+    list(APPEND _cmake_targets_not_defined "${_cmake_expected_target}")
+  endif()
+endforeach()
+unset(_cmake_expected_target)
+if(_cmake_targets_defined STREQUAL _cmake_expected_targets)
+  unset(_cmake_targets_defined)
+  unset(_cmake_targets_not_defined)
+  unset(_cmake_expected_targets)
+  unset(CMAKE_IMPORT_FILE_VERSION)
+  cmake_policy(POP)
+  return()
+endif()
+if(NOT _cmake_targets_defined STREQUAL "")
+  string(REPLACE ";" ", " _cmake_targets_defined_text "${_cmake_targets_defined}")
+  string(REPLACE ";" ", " _cmake_targets_not_defined_text "${_cmake_targets_not_defined}")
+  message(FATAL_ERROR "Some (but not all) targets in this export set were already defined.\nTargets Defined: ${_cmake_targets_defined_text}\nTargets not yet defined: ${_cmake_targets_not_defined_text}\n")
+endif()
+unset(_cmake_targets_defined)
+unset(_cmake_targets_not_defined)
+unset(_cmake_expected_targets)
+
+
+# Compute the installation prefix relative to this file.
+get_filename_component(_IMPORT_PREFIX "${CMAKE_CURRENT_LIST_FILE}" PATH)
+get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH)
+get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH)
+get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH)
+if(_IMPORT_PREFIX STREQUAL "/")
+  set(_IMPORT_PREFIX "")
+endif()
+
+# Create imported target spirv-cross-util
+add_library(spirv-cross-util STATIC IMPORTED)
+
+set_target_properties(spirv-cross-util PROPERTIES
+  INTERFACE_INCLUDE_DIRECTORIES "${_IMPORT_PREFIX}/include/spirv_cross"
+  INTERFACE_LINK_LIBRARIES "\$"
+)
+
+# Load information for each installed configuration.
+file(GLOB _cmake_config_files "${CMAKE_CURRENT_LIST_DIR}/spirv_cross_utilConfig-*.cmake")
+foreach(_cmake_config_file IN LISTS _cmake_config_files)
+  include("${_cmake_config_file}")
+endforeach()
+unset(_cmake_config_file)
+unset(_cmake_config_files)
+
+# Cleanup temporary variables.
+set(_IMPORT_PREFIX)
+
+# Loop over all imported files and verify that they actually exist
+foreach(_cmake_target IN LISTS _cmake_import_check_targets)
+  if(CMAKE_VERSION VERSION_LESS "3.28"
+      OR NOT DEFINED _cmake_import_check_xcframework_for_${_cmake_target}
+      OR NOT IS_DIRECTORY "${_cmake_import_check_xcframework_for_${_cmake_target}}")
+    foreach(_cmake_file IN LISTS "_cmake_import_check_files_for_${_cmake_target}")
+      if(NOT EXISTS "${_cmake_file}")
+        message(FATAL_ERROR "The imported target \"${_cmake_target}\" references the file
+   \"${_cmake_file}\"
+but this file does not exist.  Possible reasons include:
+* The file was deleted, renamed, or moved to another location.
+* An install or uninstall procedure did not complete successfully.
+* The installation package was faulty and contained
+   \"${CMAKE_CURRENT_LIST_FILE}\"
+but not all the files it references.
+")
+      endif()
+    endforeach()
+  endif()
+  unset(_cmake_file)
+  unset("_cmake_import_check_files_for_${_cmake_target}")
+endforeach()
+unset(_cmake_target)
+unset(_cmake_import_check_targets)
+
+# Make sure the targets which have been exported in some other
+# export set exist.
+unset(${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE_targets)
+foreach(_target "spirv-cross-core" )
+  if(NOT TARGET "${_target}" )
+    set(${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE_targets "${${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE_targets} ${_target}")
+  endif()
+endforeach()
+
+if(DEFINED ${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE_targets)
+  if(CMAKE_FIND_PACKAGE_NAME)
+    set( ${CMAKE_FIND_PACKAGE_NAME}_FOUND FALSE)
+    set( ${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE "The following imported targets are referenced, but are missing: ${${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE_targets}")
+  else()
+    message(FATAL_ERROR "The following imported targets are referenced, but are missing: ${${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE_targets}")
+  endif()
+endif()
+unset(${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE_targets)
+
+# Commands beyond this point should not need to know the version.
+set(CMAKE_IMPORT_FILE_VERSION)
+cmake_policy(POP)
diff --git a/local_packages/taichi-1.7.4.dist-info/INSTALLER b/local_packages/taichi-1.7.4.dist-info/INSTALLER
new file mode 100644
index 0000000..a1b589e
--- /dev/null
+++ b/local_packages/taichi-1.7.4.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/local_packages/taichi-1.7.4.dist-info/LICENSE b/local_packages/taichi-1.7.4.dist-info/LICENSE
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/local_packages/taichi-1.7.4.dist-info/LICENSE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/local_packages/taichi-1.7.4.dist-info/METADATA b/local_packages/taichi-1.7.4.dist-info/METADATA
new file mode 100644
index 0000000..a446fb0
--- /dev/null
+++ b/local_packages/taichi-1.7.4.dist-info/METADATA
@@ -0,0 +1,236 @@
+Metadata-Version: 2.1
+Name: taichi
+Version: 1.7.4
+Summary: The Taichi Programming Language
+Home-page: https://github.com/taichi-dev/taichi
+Author: Taichi developers
+Author-email: yuanmhu@gmail.com
+License: Apache Software License (http://www.apache.org/licenses/LICENSE-2.0)
+Keywords: graphics,simulation
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Topic :: Software Development :: Compilers
+Classifier: Topic :: Multimedia :: Graphics
+Classifier: Topic :: Games/Entertainment :: Simulation
+Classifier: Intended Audience :: Science/Research
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: Apache Software License
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: 3.10
+Classifier: Programming Language :: Python :: 3.11
+Requires-Python: >=3.9,<4.0
+Description-Content-Type: text/markdown; charset=UTF-8
+License-File: LICENSE
+Requires-Dist: numpy
+Requires-Dist: colorama
+Requires-Dist: dill
+Requires-Dist: rich
+
+
+ +
+ +--- +[![Latest Release](https://img.shields.io/github/v/release/taichi-dev/taichi?color=blue&label=Latest%20Release)](https://github.com/taichi-dev/taichi/releases/latest) +[![downloads](https://pepy.tech/badge/taichi)](https://pepy.tech/project/taichi) +[![CI](https://github.com/taichi-dev/taichi/actions/workflows/testing.yml/badge.svg)](https://github.com/taichi-dev/taichi/actions/workflows/testing.yml) +[![Nightly Release](https://github.com/taichi-dev/taichi/actions/workflows/release.yml/badge.svg)](https://github.com/taichi-dev/taichi/actions/workflows/release.yml) +discord invitation link + +```shell +pip install taichi # Install Taichi Lang +ti gallery # Launch demo gallery +``` + +## What is Taichi Lang? + +Taichi Lang is an open-source, imperative, parallel programming language for high-performance numerical computation. It is embedded in Python and uses just-in-time (JIT) compiler frameworks, for example LLVM, to offload the compute-intensive Python code to the native GPU or CPU instructions. + + + +The language has broad applications spanning real-time physical simulation, numerical computation, augmented reality, artificial intelligence, vision and robotics, visual effects in films and games, general-purpose computing, and much more. + + + + + + + + + +[...More](#demos) + +## Why Taichi Lang? + +- Built around Python: Taichi Lang shares almost the same syntax with Python, allowing you to write algorithms with minimal language barrier. It is also well integrated into the Python ecosystem, including NumPy and PyTorch. +- Flexibility: Taichi Lang provides a set of generic data containers known as *SNode* (/ˈsnoʊd/), an effective mechanism for composing hierarchical, multi-dimensional fields. This can cover many use patterns in numerical simulation (e.g. [spatially sparse computing](https://docs.taichi-lang.org/docs/sparse)). +- Performance: With the `@ti.kernel` decorator, Taichi Lang's JIT compiler automatically compiles your Python functions into efficient GPU or CPU machine code for parallel execution. +- Portability: Write your code once and run it everywhere. Currently, Taichi Lang supports most mainstream GPU APIs, such as CUDA and Vulkan. +- ... and many more features! A cross-platform, Vulkan-based 3D visualizer, [differentiable programming](https://docs.taichi-lang.org/docs/differentiable_programming), [quantized computation](https://github.com/taichi-dev/quantaichi) (experimental), etc. + +## Getting Started + +### Installation + +
+ Prerequisites + + + +- Operating systems + - Windows + - Linux + - macOS +- Python: 3.6 ~ 3.10 (64-bit only) +- Compute backends + - x64/ARM CPUs + - CUDA + - Vulkan + - OpenGL (4.3+) + - Apple Metal + - WebAssembly (experiemental) +
+ +Use Python's package installer **pip** to install Taichi Lang: + +```bash +pip install --upgrade taichi +``` + +*We also provide a nightly package. Note that nightly packages may crash because they are not fully tested. We cannot guarantee their validity, and you are at your own risk trying out our latest, untested features. The nightly packages can be installed from our self-hosted PyPI (Using self-hosted PyPI allows us to provide more frequent releases over a longer period of time)* + +```bash +pip install -i https://pypi.taichi.graphics/simple/ taichi-nightly +``` + +### Run your "Hello, world!" + +Here is how you can program a 2D fractal in Taichi: + +```py +# python/taichi/examples/simulation/fractal.py + +import taichi as ti + +ti.init(arch=ti.gpu) + +n = 320 +pixels = ti.field(dtype=float, shape=(n * 2, n)) + + +@ti.func +def complex_sqr(z): + return ti.Vector([z[0]**2 - z[1]**2, z[1] * z[0] * 2]) + + +@ti.kernel +def paint(t: float): + for i, j in pixels: # Parallelized over all pixels + c = ti.Vector([-0.8, ti.cos(t) * 0.2]) + z = ti.Vector([i / n - 1, j / n - 0.5]) * 2 + iterations = 0 + while z.norm() < 20 and iterations < 50: + z = complex_sqr(z) + c + iterations += 1 + pixels[i, j] = 1 - iterations * 0.02 + + +gui = ti.GUI("Julia Set", res=(n * 2, n)) + +for i in range(1000000): + paint(i * 0.03) + gui.set_image(pixels) + gui.show() +``` + +*If Taichi Lang is properly installed, you should get the animation below 🎉:* + + + +See [Get started](https://docs.taichi-lang.org) for more information. + +### Build from source + +If you wish to try our experimental features or build Taichi Lang for your own environments, see [Developer installation](https://docs.taichi-lang.org/docs/dev_install). + +## Documentation + +- [Technical documents](https://docs.taichi-lang.org/) +- [API Reference](https://docs.taichi-lang.org/api/) +- [Blog](https://docs.taichi-lang.org/blog) + +## Community activity [![Time period](https://images.repography.com/32602247/taichi-dev/taichi/recent-activity/RlhQybvihwEjfE7ngXyQR9tudBDYAvl27v-NVNMxUrg_badge.svg)](https://repography.com) +[![Timeline graph](https://images.repography.com/32602247/taichi-dev/taichi/recent-activity/RlhQybvihwEjfE7ngXyQR9tudBDYAvl27v-NVNMxUrg_timeline.svg)](https://github.com/taichi-dev/taichi/commits) +[![Issue status graph](https://images.repography.com/32602247/taichi-dev/taichi/recent-activity/RlhQybvihwEjfE7ngXyQR9tudBDYAvl27v-NVNMxUrg_issues.svg)](https://github.com/taichi-dev/taichi/issues) +[![Pull request status graph](https://images.repography.com/32602247/taichi-dev/taichi/recent-activity/RlhQybvihwEjfE7ngXyQR9tudBDYAvl27v-NVNMxUrg_prs.svg)](https://github.com/taichi-dev/taichi/pulls) +[![Trending topics](https://images.repography.com/32602247/taichi-dev/taichi/recent-activity/RlhQybvihwEjfE7ngXyQR9tudBDYAvl27v-NVNMxUrg_words.svg)](https://github.com/taichi-dev/taichi/commits) + +## Contributing + +Kudos to all of our amazing contributors! Taichi Lang thrives through open-source. In that spirit, we welcome all kinds of contributions from the community. If you would like to participate, check out the [Contribution Guidelines](CONTRIBUTING.md) first. + + + +*Contributor avatars are randomly shuffled.* + +## License + +Taichi Lang is distributed under the terms of Apache License (Version 2.0). + +See [Apache License](https://github.com/taichi-dev/taichi/blob/master/LICENSE) for details. + +## Community + +For more information about the events or community, please refer to [this page](https://github.com/taichi-dev/community) + + +### Join our discussions + +- [Discord](https://discord.gg/f25GRdXRfg) +- [GitHub Discussions](https://github.com/taichi-dev/taichi/discussions) +- [太极编程语言中文论坛](https://forum.taichi.graphics/) + +### Report an issue + +- If you spot an technical or documentation issue, file an issue at [GitHub Issues](https://github.com/taichi-dev/taichi/issues) +- If you spot any security issue, mail directly to security@taichi.graphics. + +### Contact us + +- [Discord](https://discord.gg/f25GRdXRfg) +- [WeChat](https://forum.taichi-lang.cn/t/topic/2884) + +## Reference + +### Demos + +- [Nerf with Taichi](https://github.com/taichi-dev/taichi-nerfs) +- [Taichi Lang examples](https://github.com/taichi-dev/taichi/tree/master/python/taichi/examples) +- [Advanced Taichi Lang examples](https://github.com/taichi-dev/advanced_examples) +- [Awesome Taichi](https://github.com/taichi-dev/awesome-taichi) +- [DiffTaichi](https://github.com/taichi-dev/difftaichi) +- [Taichi elements](https://github.com/taichi-dev/taichi_elements) +- [Taichi Houdini](https://github.com/taichi-dev/taichi_houdini) +- [More...](misc/links.md) + + +### AOT deployment + +- [Taichi AOT demos & tutorial](https://github.com/taichi-dev/taichi-aot-demo/) + + +### Lectures & talks + +- SIGGRAPH 2020 course on Taichi basics: [YouTube](https://youtu.be/Y0-76n3aZFA), [Bilibili](https://www.bilibili.com/video/BV1kA411n7jk/), [slides (pdf)](https://yuanming.taichi.graphics/publication/2020-taichi-tutorial/taichi-tutorial.pdf). +- Chinagraph 2020 用太极编写物理引擎: [哔哩哔哩](https://www.bilibili.com/video/BV1gA411j7H5) +- GAMES 201 高级物理引擎实战指南 2020: [课件](https://github.com/taichi-dev/games201) +- 太极图形课第一季:[课件](https://github.com/taichiCourse01) +- [TaichiCon](https://github.com/taichi-dev/taichicon): Taichi Developer Conferences +- More to come... + +### Citations + +If you use Taichi Lang in your research, please cite the corresponding papers: + +- [**(SIGGRAPH Asia 2019) Taichi: High-Performance Computation on Sparse Data Structures**](https://yuanming.taichi.graphics/publication/2019-taichi/taichi-lang.pdf) [[Video]](https://youtu.be/wKw8LMF3Djo) [[BibTex]](https://raw.githubusercontent.com/taichi-dev/taichi/master/misc/taichi_bibtex.txt) [[Code]](https://github.com/taichi-dev/taichi) +- [**(ICLR 2020) DiffTaichi: Differentiable Programming for Physical Simulation**](https://arxiv.org/abs/1910.00935) [[Video]](https://www.youtube.com/watch?v=Z1xvAZve9aE) [[BibTex]](https://raw.githubusercontent.com/taichi-dev/taichi/master/misc/difftaichi_bibtex.txt) [[Code]](https://github.com/yuanming-hu/difftaichi) +- [**(SIGGRAPH 2021) QuanTaichi: A Compiler for Quantized Simulations**](https://yuanming.taichi.graphics/publication/2021-quantaichi/quantaichi.pdf) [[Video]](https://www.youtube.com/watch?v=0jdrAQOxJlY) [[BibTex]](https://raw.githubusercontent.com/taichi-dev/taichi/master/misc/quantaichi_bibtex.txt) [[Code]](https://github.com/taichi-dev/quantaichi) diff --git a/local_packages/taichi-1.7.4.dist-info/RECORD b/local_packages/taichi-1.7.4.dist-info/RECORD new file mode 100644 index 0000000..18b8c24 --- /dev/null +++ b/local_packages/taichi-1.7.4.dist-info/RECORD @@ -0,0 +1,524 @@ +../../SPIRV-Tools-diff/cmake/SPIRV-Tools-diffConfig.cmake,sha256=3S1hOLuDTsAwTH-F_Xn9iBdCjjOH6Kx3EjNhDXBVShw,280 +../../SPIRV-Tools-diff/cmake/SPIRV-Tools-diffTargets-release.cmake,sha256=6fnDD1oaU-DEP4JIq75wotq4OSPW63pe5jEcTVwEtyo,890 +../../SPIRV-Tools-diff/cmake/SPIRV-Tools-diffTargets.cmake,sha256=U1RmMclqSVR8vePS98vpT2RqItqnivv5tXbfM58i9V0,5068 +../../SPIRV-Tools-link/cmake/SPIRV-Tools-linkConfig.cmake,sha256=HgRoCSWZQhGnNEs938xBFDSdxK6crL6VtbrQ9-DqcMY,280 +../../SPIRV-Tools-link/cmake/SPIRV-Tools-linkTargets-release.cmake,sha256=cqDRYOLX02s-xo8EKyuGQwYX6uNa2Lxb363OP0tUB54,890 +../../SPIRV-Tools-link/cmake/SPIRV-Tools-linkTargets.cmake,sha256=nlQJ33ZSrKM5w-xi7lYlopolM5lwcX4Yyqrytlbvj5c,5028 +../../SPIRV-Tools-lint/cmake/SPIRV-Tools-lintConfig.cmake,sha256=E0o9wMS7mOV58CacqGpW5EOyG70WrdBKexx1AFlCyiQ,280 +../../SPIRV-Tools-lint/cmake/SPIRV-Tools-lintTargets-release.cmake,sha256=3QC3rN6BhZ4n5-WrR9pmu4AfH1QKFIGCCXZjPA3A8c0,890 +../../SPIRV-Tools-lint/cmake/SPIRV-Tools-lintTargets.cmake,sha256=X2bmbqKDvgHDUaYEjssse8Rgk_ZsETuMMS3EaX2gmo0,5068 +../../SPIRV-Tools-opt/cmake/SPIRV-Tools-optConfig.cmake,sha256=CYygoz_0kxDnsT7_NbByRCuviPMS0TwwK86Qwc6tnnw,275 +../../SPIRV-Tools-opt/cmake/SPIRV-Tools-optTargets-release.cmake,sha256=vSAY69UsznqGYYqEUdL255U2B3LUZl9PSexaQwmPECw,883 +../../SPIRV-Tools-opt/cmake/SPIRV-Tools-optTargets.cmake,sha256=5Erfqk7DZeEa2sveBBlRw8QwBrdBZ55GDdo14DdNxpI,5029 +../../SPIRV-Tools-reduce/cmake/SPIRV-Tools-reduceConfig.cmake,sha256=i2lpvjP36I_6iEh5mObgdPMz5UOHQ4wIIOcbP5NIjTk,290 +../../SPIRV-Tools-reduce/cmake/SPIRV-Tools-reduceTarget-release.cmake,sha256=D2c9eUqoh3H-ihr_1up2Ppo_2nSLoNyxmo9eWUV_pGE,904 +../../SPIRV-Tools-reduce/cmake/SPIRV-Tools-reduceTarget.cmake,sha256=_Miasn_g_L8dG2fTiigOC-rrDc3aKgHy3IfMi7DB2b4,5077 +../../SPIRV-Tools/cmake/SPIRV-ToolsConfig.cmake,sha256=jFprL3mwRRYJ0eh52yZqmUAWD9cA7xA7hhstBQU_scY,230 +../../SPIRV-Tools/cmake/SPIRV-ToolsTarget-release.cmake,sha256=X438aOEvZsAeo6Suk2LfStdal8R5m3UyfUPCg51DXGM,1477 +../../SPIRV-Tools/cmake/SPIRV-ToolsTarget.cmake,sha256=mriRCzvolwIXT55WDaD2ouu6AQvVHXQbRdJPmj0ZtSg,4468 +../../bin/SPIRV-Tools-shared.dll,sha256=k2fVp3-Mldqvtcq2jCU6Q0EYn0jf4rovqPps8rZgKLQ,2035712 +../../bin/ti.exe,sha256=fdxlRQuppoCYFHvNsSYZ3SHNoz7yO4LTvnmsx22ELHk,108384 +../../include/GLFW/glfw3.h,sha256=QIB4KnbXSK4tfZ99LqTfJdn-ObkYf5vp70ee9uBPzFQ,240786 +../../include/GLFW/glfw3native.h,sha256=3hGJGACURjjhav5xcOb9F1fW76uPjfAzoXZlK3UUxAc,19397 +../../include/spirv-tools/instrument.hpp,sha256=92ifeZiADg0tWjsnYsVqQoH5KbIxxnz3AK-S9r0ln2A,11917 +../../include/spirv-tools/libspirv.h,sha256=3rvQUbgMs9h3jxnC4OLRyPN-lWvqSjHgy7tS3ZkOYvI,42688 +../../include/spirv-tools/libspirv.hpp,sha256=TMJs7spPKjYx0FlnorbOSany4R1k2bk8B4wOi_vttic,15012 +../../include/spirv-tools/linker.hpp,sha256=SKevy72q3RRFOdqQotIb08h9NNMfAnNMKmSF7B20d0A,3691 +../../include/spirv-tools/optimizer.hpp,sha256=5MPYUrk-hRM6tbFkCr2IUKsukO8eAPjA9_DE-ib4Lnk,50060 +../../include/spirv_cross/GLSL.std.450.h,sha256=N-ApraJYklysVlU6mqcgoui6O4izYk6fhBgNXt3E77I,3187 +../../include/spirv_cross/spirv.h,sha256=fMfM5AFqmKSrgJzwzuur-zvKo1XACOD0FqJPgb2BCiY,134925 +../../include/spirv_cross/spirv.hpp,sha256=AVXt851z6atxQrCE6IInasE9VmDmvWEdJY7_biboia8,127741 +../../include/spirv_cross/spirv_cfg.hpp,sha256=u3w0mxv3mZRs_9qFddAQjNNzIWGns-OhunZJWoEgwEQ,3995 +../../include/spirv_cross/spirv_common.hpp,sha256=mm5do62VgPTBzS4k6opryOAvKqJO-6ouGVGc8Y1Qbx0,49732 +../../include/spirv_cross/spirv_cpp.hpp,sha256=dPRMD5Blvn13TntF_RVyQwhEf_9bR-zxfaeRFw12RkM,2730 +../../include/spirv_cross/spirv_cross.hpp,sha256=T9SN1c63ZA5S38qxQ_XDF_kOqyfdiNvpWJiBE12hync,52077 +../../include/spirv_cross/spirv_cross_c.h,sha256=U0DKDgiDFVVh19TVbpgR_l4__x5R8ZeKN_xOdxELmAc,52561 +../../include/spirv_cross/spirv_cross_containers.hpp,sha256=NVMq5bIB3Meyk3OQcJ9W4qNfCMr3RfGmt4TIUGvfdfE,18714 +../../include/spirv_cross/spirv_cross_error_handling.hpp,sha256=24aFl4Z2120x-cIXAUCX9N06T8hTzlNM0cjKOeCeQiw,2715 +../../include/spirv_cross/spirv_cross_parsed_ir.hpp,sha256=5q-j9FbQRizSzX58xumpXyeJbq9UtWUnuWCcLXDV9xw,9526 +../../include/spirv_cross/spirv_cross_util.hpp,sha256=328JGpBh9WkOIEV_KbKC94SJAU_FxAxROCaO4mhgc14,1439 +../../include/spirv_cross/spirv_glsl.hpp,sha256=J6RnJ-3I4M5HH0DSGk3eJeDPgXBb6kQhPsnKEsgFFtE,48619 +../../include/spirv_cross/spirv_hlsl.hpp,sha256=ICMgxSpsO_0-Ypye82o6iMF5PLfE66T9oEOgyv8Efmk,16789 +../../include/spirv_cross/spirv_msl.hpp,sha256=XxZhWCiVT_yCh6y5FcLe0wj6EldxR_IlJzu-lctYp0c,61477 +../../include/spirv_cross/spirv_parser.hpp,sha256=nx8ZMSo03J_Uo6-GuqPKQRFO5NQPQxsVa8HNBWuRZdY,2736 +../../include/spirv_cross/spirv_reflect.hpp,sha256=jGvuJ3v03dcFCZ792wV7uaInSz_xptTHNyGM5OsVoXY,2594 +../../share/spirv_cross_c/cmake/spirv_cross_cConfig-release.cmake,sha256=367KoaO0FOR9tg0z4q8yydNV5EUT3_79FeYt7GnQcj0,869 +../../share/spirv_cross_c/cmake/spirv_cross_cConfig.cmake,sha256=jsmLfo20Js6Lr8VQSfWQZTw1AzIJfQIcEWahY44vpBc,5307 +../../share/spirv_cross_core/cmake/spirv_cross_coreConfig-release.cmake,sha256=Vr46KADE5z9zfNejHjiKI7tkycy9zmz6_s_tzziFfUg,890 +../../share/spirv_cross_core/cmake/spirv_cross_coreConfig.cmake,sha256=zr2r3FFmJSwzAyfOpxfIqn1adUVglA8DjY_DqoIGcqY,4251 +../../share/spirv_cross_cpp/cmake/spirv_cross_cppConfig-release.cmake,sha256=7-cy6JUKittZDmzF-oHuANzpUZ9ndXF1GmkgsCd68PI,883 +../../share/spirv_cross_cpp/cmake/spirv_cross_cppConfig.cmake,sha256=cBeItic08VF69H9_74DeM9rx6RO-54LakZ1V7bYgMuc,5115 +../../share/spirv_cross_glsl/cmake/spirv_cross_glslConfig-release.cmake,sha256=Gq-M4tIDX1FNpMrBAGJRL54inkseUQFEbhnUXYN2dvk,890 +../../share/spirv_cross_glsl/cmake/spirv_cross_glslConfig.cmake,sha256=isH6qg36WSkQSbMMi76smARsuQBlyUz26zRerMbiytY,5120 +../../share/spirv_cross_hlsl/cmake/spirv_cross_hlslConfig-release.cmake,sha256=COhhjrFrZFkmydraLJSMhU6pih9lzy3rlS9SkbS7Gmk,890 +../../share/spirv_cross_hlsl/cmake/spirv_cross_hlslConfig.cmake,sha256=gbNh_B5wxNl74N355CHNgOpVjASkKwJ-AgjemFvCvs4,5120 +../../share/spirv_cross_msl/cmake/spirv_cross_mslConfig-release.cmake,sha256=ZMFRgqWTimwjE8H6Kmxux_NPwyhaEYxSTsQK0-Jmshc,883 +../../share/spirv_cross_msl/cmake/spirv_cross_mslConfig.cmake,sha256=HRnYqQQCZEM-1cHLvx72FAI_nCOaFwuQd9JQDDBSYzE,5115 +../../share/spirv_cross_reflect/cmake/spirv_cross_reflectConfig-release.cmake,sha256=uQ8nU_YP17ydl8wqZ31Go7kf6cqJfmY1qvUS_xUlGS8,911 +../../share/spirv_cross_reflect/cmake/spirv_cross_reflectConfig.cmake,sha256=ARFyln9-7iQz_Xf3Rh06mgt6tv3QaITQelJQgecYJAU,4266 +../../share/spirv_cross_util/cmake/spirv_cross_utilConfig-release.cmake,sha256=FEe8a1a1hfcWUjt01O_s39zbaS9rASdZCBpuuo9AUgo,890 +../../share/spirv_cross_util/cmake/spirv_cross_utilConfig.cmake,sha256=CAufGwJV0wLQzb5m3l4Yt9wmnydxAva-iaU68qVrlAI,5120 +../SPIRV-Tools-diff.lib,sha256=4GqtGgeNuy8VOsIsOlMLIi8yRsJ9zGr0nbh9GKWVLBA,4843666 +../SPIRV-Tools-link.lib,sha256=UXox28ELXAkqK1V155DsjzB6iQEYna8klfn_TWClXlQ,4145588 +../SPIRV-Tools-lint.lib,sha256=bNQl3uq7M67bltv4jn-kIxUr8EP55tVtbpJJ1d6B2dw,4859250 +../SPIRV-Tools-opt.lib,sha256=u_5gDdHt2biYYzaRW7S40PImKaWjZvpCI7E2IYvtaCU,157007784 +../SPIRV-Tools-reduce.lib,sha256=DZfOz5oezClIGj8VCKYZWx6vxYv3qEgnuFa_50B4zYM,15356038 +../SPIRV-Tools-shared.lib,sha256=TOR7Jm9vKCPfC4HYFVvyH_ifVSgX5St0uGnMtPzy1OU,16470 +../SPIRV-Tools.lib,sha256=goBwsrdsS4PeMa_qX5aIFogjg-9I18lgS4IwpD2b2mc,37543236 +../cmake/glfw3/glfw3Config.cmake,sha256=YZVL9F2A70B51dNW0W--Scv9ga3cbyZYjlsaDCs-zHk,118 +../cmake/glfw3/glfw3ConfigVersion.cmake,sha256=mttbBnVYyxo57F-pEE8g4-E62lcH8xYujvlmTQyD7ug,2827 +../cmake/glfw3/glfw3Targets-release.cmake,sha256=VlH9iN0TgA6MlsPimvnOgTG0gxzsaoGj-RmWEqd9QHA,806 +../cmake/glfw3/glfw3Targets.cmake,sha256=D3NWABsZAPn6vu3kAtXXUo3Lqql5NiGhSHVqtwg3UOw,4242 +../glfw3.lib,sha256=qmyW4IEGIBExMvskDLsjqNEaMILyXE-JaTkPtEG8q9A,700628 +../spirv-cross-c.lib,sha256=8wyzW631cg__6vuokQT5PfweS2fMrF0uhkHN_PMn0tI,2777756 +../spirv-cross-core.lib,sha256=MMwK0bnnheManTgWkKFg9rgs1saRQLbBNEyl66NmBLo,9954558 +../spirv-cross-cpp.lib,sha256=ULHUXNKk5o9ozE2reDMDMEuFAJNOb1qmntwCgNr_chY,1412740 +../spirv-cross-glsl.lib,sha256=VHAvT23yLdvVKexlojvazn1gauYCpAFZzi25WanLhcM,10305866 +../spirv-cross-hlsl.lib,sha256=myMwLg5l0jddSmEhV8O6QTWxoKhRp-4tz4E_qmUK2Eo,6544248 +../spirv-cross-msl.lib,sha256=CGFJaglfG8qb4T2yUvmRl_ixLYWimzSOrsUCnla9M4M,16755834 +../spirv-cross-reflect.lib,sha256=yCliWAmP56birwX0RzNf2m0ctiURr1diLdzioHIlunQ,1374674 +../spirv-cross-util.lib,sha256=PgKkHUPPP7mxL99vDV75CVJ6LgvxbPkgf4De8D8Zheo,158366 +taichi-1.7.4.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +taichi-1.7.4.dist-info/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558 +taichi-1.7.4.dist-info/METADATA,sha256=kQtwH-s38BpcjvIHzz2sybDMZwtCIi5c17mJddl7Tag,12772 +taichi-1.7.4.dist-info/RECORD,, +taichi-1.7.4.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +taichi-1.7.4.dist-info/WHEEL,sha256=3cUY9TiuvxHsB0dWaW6wa9MO8pP_yfFeO2_t7ly9aNg,96 +taichi-1.7.4.dist-info/entry_points.txt,sha256=aCsfvbmeUJTvIaaS7AlWore9Kk9y972OriDvM8vj11w,41 +taichi-1.7.4.dist-info/top_level.txt,sha256=M4SUucyO8TtnY975SVXLwvzaXv4bMRa1VYX32gVzUvU,7 +taichi/CHANGELOG.md,sha256=V1doQiRw-1MtNqt7otpq4GNslcBe_t0e_FZ6zxjpMIo,76 +taichi/__init__.py,sha256=41vrCjoiXGSSSkPeZR92d6y7t2CjO6GisKDBUXfE0x8,1144 +taichi/__main__.py,sha256=Jrxoqfig-PJi7nkYspHT-AiRnTB00CcLAIqHp7T5pvw,35 +taichi/__pycache__/__init__.cpython-312.pyc,, +taichi/__pycache__/__main__.cpython-312.pyc,, +taichi/__pycache__/_funcs.cpython-312.pyc,, +taichi/__pycache__/_kernels.cpython-312.pyc,, +taichi/__pycache__/_logging.cpython-312.pyc,, +taichi/__pycache__/_main.cpython-312.pyc,, +taichi/__pycache__/_version_check.cpython-312.pyc,, +taichi/__pycache__/experimental.cpython-312.pyc,, +taichi/_funcs.py,sha256=x1DJXRXrUHFdkFOPiUyM1GLzGkXIV35AL19gCsRgOL0,23909 +taichi/_kernels.py,sha256=zsYLABmwz87XqqFmoLo-MWhABzc0mQ1Gt0PrO3hsW7I,14620 +taichi/_lib/__init__.py,sha256=MMxmWFKXjpcETWaSXcqIOUXFHkRC0KzYUIsL8cmb0WM,54 +taichi/_lib/__pycache__/__init__.cpython-312.pyc,, +taichi/_lib/__pycache__/utils.cpython-312.pyc,, +taichi/_lib/c_api/bin/taichi_c_api.dll,sha256=iysY6HHKNvCmyFlTzB8baTJDbXoChFzaMQa4YCgetYY,55419392 +taichi/_lib/c_api/include/taichi/cpp/taichi.hpp,sha256=-_ypTHvNb5_EwQuvAv_9JUBec3GsO6ConNsNL6GSAf4,41667 +taichi/_lib/c_api/include/taichi/taichi.h,sha256=CWKE41rZTZrfd4mIKL8MfY3D705JChQz9hZ5XrktYmk,515 +taichi/_lib/c_api/include/taichi/taichi_core.h,sha256=d_M6zX2YUWKvu4XzVBKQtdRg1lZ1tXTw37lq1AGmH-w,39906 +taichi/_lib/c_api/include/taichi/taichi_cpu.h,sha256=2nAPbUwnxlw0VulLnYAoKvirVuzOEKqtErbi9TKSfDI,776 +taichi/_lib/c_api/include/taichi/taichi_cuda.h,sha256=PqbTQ6HqfvVizP_rOjugddDrDfWecK5YoJQRrbijBIo,1029 +taichi/_lib/c_api/include/taichi/taichi_opengl.h,sha256=5No5Wjuwmwmt5M8U5l-GrJUD_9FRDGAXtPF9w7s01ZE,1950 +taichi/_lib/c_api/include/taichi/taichi_platform.h,sha256=k1TWqLHe9RGKLqUvv3Zx9YZq3I9wDg07opxbs2EA3NM,1351 +taichi/_lib/c_api/include/taichi/taichi_unity.h,sha256=kmTCD28keOLJxfWPU7nmJlTtRW5yeBJ0VviZKqTRiV4,2420 +taichi/_lib/c_api/include/taichi/taichi_vulkan.h,sha256=iCoRIOFkul0gGR3gSgxNQuF9sW9i2gVK3ZXRrq9kL-0,5464 +taichi/_lib/c_api/lib/taichi_c_api.lib,sha256=NXIG71gzykX8VDgy7X0YUgDQYd5TFQUUuKRvoZCXuoI,405408 +taichi/_lib/c_api/runtime/runtime_cuda.bc,sha256=-n1-p5Cn_xYem3lbsfQ0m02Cf3C5waGvidEVKRIzhGs,156796 +taichi/_lib/c_api/runtime/runtime_dx12.bc,sha256=fZZoQ-4zKZQAfgaw28rCyxCQ8fbo4sPRXbwzm2DM9DY,149236 +taichi/_lib/c_api/runtime/runtime_x64.bc,sha256=nMEuhswvtXtfxIH1KtFqOxjrvMK2gya31V3zODff654,141364 +taichi/_lib/c_api/runtime/slim_libdevice.10.bc,sha256=9dlDFXMV4uIAIKy-1yU7ppNV_oMdIHDR9YqOT_7sfaw,171892 +taichi/_lib/c_api/taichi/lib/cmake/taichi/TaichiConfig.cmake,sha256=EsFymp0dkoCf7PcF8UQLJZxhTBnBTxcLQTzagmRKYLI,1019 +taichi/_lib/c_api/taichi/lib/cmake/taichi/TaichiConfigVersion.cmake,sha256=eNIUcoCklApgQQDKz05GyNRme1KPjt3UtVU1ra9A65A,2827 +taichi/_lib/c_api/taichi/lib/cmake/taichi/TaichiTargets.cmake,sha256=IPEleeqobY76p1XsQ45tKquzOdL9Tb47Xr9mdQ6vXao,4946 +taichi/_lib/core/taichi_python.cp312-win_amd64.pyd,sha256=LgO5lVDNkfiAzYM3JCsDjlvphNC0SCd57OB3ZQWjV2c,89634304 +taichi/_lib/runtime/runtime_cuda.bc,sha256=-n1-p5Cn_xYem3lbsfQ0m02Cf3C5waGvidEVKRIzhGs,156796 +taichi/_lib/runtime/runtime_dx12.bc,sha256=fZZoQ-4zKZQAfgaw28rCyxCQ8fbo4sPRXbwzm2DM9DY,149236 +taichi/_lib/runtime/runtime_x64.bc,sha256=nMEuhswvtXtfxIH1KtFqOxjrvMK2gya31V3zODff654,141364 +taichi/_lib/runtime/slim_libdevice.10.bc,sha256=9dlDFXMV4uIAIKy-1yU7ppNV_oMdIHDR9YqOT_7sfaw,171892 +taichi/_lib/utils.py,sha256=2x9kG7xi7LRoxY-_1fNuYZGW4HR3k2V4Om_SJJUvVhU,8040 +taichi/_logging.py,sha256=JmkpnZvsaxq-j3SjKOBfOuH4wIs1BztE7P_qhPbD5tw,3793 +taichi/_main.py,sha256=qb0laTdjvXjt1PGl1xPhY9eYcrUykuH0PYQ5pSMMIG4,30789 +taichi/_snode/__init__.py,sha256=VoxerCYA5Ag7kmrRmG6358Sw2Zzntf_l-5eNBqKI2UA,87 +taichi/_snode/__pycache__/__init__.cpython-312.pyc,, +taichi/_snode/__pycache__/fields_builder.cpython-312.pyc,, +taichi/_snode/__pycache__/snode_tree.cpython-312.pyc,, +taichi/_snode/fields_builder.py,sha256=oYGBL1DwwR_2ZZxdckqlVTm-rfJYUdyQHcrJd2xFHYo,6967 +taichi/_snode/snode_tree.py,sha256=dWiPeoYqMKcxahSSknzXQj77qjgfvM9F5kY-1VZd58o,1201 +taichi/_ti_module/__init__.py,sha256=C4J_Nk7JhN25IIqQwhWHHbwN894bpyCgLGZrW0pXjPs,44 +taichi/_ti_module/__pycache__/__init__.cpython-312.pyc,, +taichi/_ti_module/__pycache__/cppgen.cpython-312.pyc,, +taichi/_ti_module/__pycache__/module.cpython-312.pyc,, +taichi/_ti_module/cppgen.py,sha256=brhncczvSQlN2ay1w5SaziEvtWmycb9Q23evMhZ00eY,9929 +taichi/_ti_module/module.py,sha256=Bl1rEpCBs9N43DPux92SJuz381VoX3so5H8tXPGcZxc,5418 +taichi/_version_check.py,sha256=aGgd7nk3bVMduM6vxk6diXWLc7tkNPywOHfc77bmfaQ,3730 +taichi/ad/__init__.py,sha256=nvt_7HZeYCTUkuEeJkS2LxX5RODrdQe0BXHhGGLDab8,29 +taichi/ad/__pycache__/__init__.cpython-312.pyc,, +taichi/ad/__pycache__/_ad.cpython-312.pyc,, +taichi/ad/_ad.py,sha256=3WYoKbyGiQPFhlp8fvcldjbRHfIU84xdI-iKXFh9Ep8,19669 +taichi/algorithms/__init__.py,sha256=KuvRlXNV1gbVRNTNe_aU9tohrAoQJU1JHY5L_ADHsIw,28 +taichi/algorithms/__pycache__/__init__.cpython-312.pyc,, +taichi/algorithms/__pycache__/_algorithms.cpython-312.pyc,, +taichi/algorithms/_algorithms.py,sha256=0FjbjTTVYnShXpINBXECJQ5BLBoHQvKnVmPSS2u7tQ8,3790 +taichi/aot/__init__.py,sha256=uT7KNl3bZ2TkDn1vzOfKoLWu94urP26bHHqW5ZAOI8I,350 +taichi/aot/__pycache__/__init__.cpython-312.pyc,, +taichi/aot/__pycache__/_export.cpython-312.pyc,, +taichi/aot/__pycache__/module.cpython-312.pyc,, +taichi/aot/__pycache__/utils.cpython-312.pyc,, +taichi/aot/_export.py,sha256=_9_1zBN2xBIQn7-7esgVSRvPBQvyTggWau-QdOCBIHA,685 +taichi/aot/conventions/__init__.py,sha256=jL4k8gPlkZPEmHQJTnHKaEgL017qj791ScallmP4Fw0,50 +taichi/aot/conventions/__pycache__/__init__.cpython-312.pyc,, +taichi/aot/conventions/gfxruntime140/__init__.py,sha256=71juvd0brF0Bw1CmbM0FdyCGDrbZ2uOz94hnI6rYDTg,1384 +taichi/aot/conventions/gfxruntime140/__pycache__/__init__.cpython-312.pyc,, +taichi/aot/conventions/gfxruntime140/__pycache__/dr.cpython-312.pyc,, +taichi/aot/conventions/gfxruntime140/__pycache__/sr.cpython-312.pyc,, +taichi/aot/conventions/gfxruntime140/dr.py,sha256=uM69FA3C7ns53mMd_Ue9XnK-TOmuHMpyDFahxHvRDlc,8416 +taichi/aot/conventions/gfxruntime140/sr.py,sha256=xwwXLp1y97A6yMrTl4mRaNdTHDwZnEmfbwNDeo4_WuY,18701 +taichi/aot/module.py,sha256=0Yj6_jAi6IOr6XRTnopwg6ZXGW6QbnqPUIOTVBBjtvk,8578 +taichi/aot/utils.py,sha256=iaRjKmSnIVLVh4_4aBaemHNcEsC7uH8GPnOxFikYQ8Q,6271 +taichi/assets/.git,sha256=_sepXJOH-4A9o4jtu-46jG2P6XHHbsvGW3642aKNQBc,43 +taichi/assets/Go-Regular.ttf,sha256=S7gpWTE2QWxqOezcRUguBS91rDdOZFm5r2jU-6J5OWw,134988 +taichi/assets/static/imgs/ti_gallery.png,sha256=rOeh8JlUXb-ZdkTaei3cVyJXZBWOqcJBLdndAORUdtU,263871 +taichi/examples/__pycache__/minimal.cpython-312.pyc,, +taichi/examples/__pycache__/patterns.cpython-312.pyc,, +taichi/examples/algorithm/__pycache__/laplace.cpython-312.pyc,, +taichi/examples/algorithm/__pycache__/marching_squares.cpython-312.pyc,, +taichi/examples/algorithm/__pycache__/mciso_advanced.cpython-312.pyc,, +taichi/examples/algorithm/__pycache__/mgpcg.cpython-312.pyc,, +taichi/examples/algorithm/__pycache__/mgpcg_advanced.cpython-312.pyc,, +taichi/examples/algorithm/__pycache__/poisson_disk_sampling.cpython-312.pyc,, +taichi/examples/algorithm/__pycache__/print_offset.cpython-312.pyc,, +taichi/examples/algorithm/circle-packing/__pycache__/circle_packing_image.cpython-312.pyc,, +taichi/examples/algorithm/circle-packing/circle_packing_image.py,sha256=XUvdZOJd5cESEbs4fPNEK4zSqACswfFnPltKFIZ6Q1M,4092 +taichi/examples/algorithm/circle-packing/taichi_logo.png,sha256=tNHZH_EfWKnJzFafYKKJScZEanyAviz8jiEP1XyWrgQ,6390 +taichi/examples/algorithm/laplace.py,sha256=pr1VS12_TNl5fIdNFp41xmKN3eDsGE8BOfBkZV1kQJo,579 +taichi/examples/algorithm/marching_squares.py,sha256=TE2YflNz0ZCt7q5SJHRutvjHvFBWXp_RgCVQIBqkuZQ,5354 +taichi/examples/algorithm/mciso_advanced.py,sha256=r8isqbefP7WUWNHC3rC1GqI2hkhZlLh_ub2Fsk8_PZw,27835 +taichi/examples/algorithm/mgpcg.py,sha256=ZucqtQeDC4NTCysW9F_YoVsOv-wPsejSHL9BqD71jIs,5793 +taichi/examples/algorithm/mgpcg_advanced.py,sha256=twF4yKHUz_tiHClh2KYYlLQK_EMjmT1hu8ERqmkbnyg,8914 +taichi/examples/algorithm/poisson_disk_sampling.py,sha256=3-8PtsQYCHKFvVNDlMPQk91HfcWmhdYW-Ue41_RXIGU,5091 +taichi/examples/algorithm/print_offset.py,sha256=4Wq7xCNKct8MBsE6ZSx9EqIi9ZmccDX6_6Ujo_G_GAk,1034 +taichi/examples/autodiff/__pycache__/jacobian.cpython-312.pyc,, +taichi/examples/autodiff/__pycache__/minimization.cpython-312.pyc,, +taichi/examples/autodiff/__pycache__/regression.cpython-312.pyc,, +taichi/examples/autodiff/__pycache__/simple_derivative.cpython-312.pyc,, +taichi/examples/autodiff/diff_sph/__pycache__/diff_sph.cpython-312.pyc,, +taichi/examples/autodiff/diff_sph/diff_sph.py,sha256=DCZ5DYfMerNUpoYUXDbV52CWYAlNnoCLJc-JofbrFhY,26160 +taichi/examples/autodiff/diff_sph/fc1_pretrained.pkl,sha256=OLV5z6JuDWuE-G-3uTSj_SXk4kMNcHK9rddgBow8meo,711 +taichi/examples/autodiff/diff_sph/fc2_pretrained.pkl,sha256=-nK5AbAuK4o-bqlo71tvL7LHsATP60DgEIMs5tM2M9A,2375 +taichi/examples/autodiff/jacobian.py,sha256=I8wY3aLwmB97ywcI9AahHL5fNkJBC_tGwuThUHt8Yxo,1383 +taichi/examples/autodiff/minimization.py,sha256=s5gw_e79InKgrzNxMgcz1OivW9dNepzSJqRllqr9l3M,893 +taichi/examples/autodiff/regression.py,sha256=QGzbgzCrZVvVKWpWWreWVnaOUbeUPnAIywq_iHdTU_Q,2274 +taichi/examples/autodiff/simple_derivative.py,sha256=GgCI_uHytiGcNbFIRjqJK092pJ_mjYMqqSZbLlQcWWk,1227 +taichi/examples/features/gui/__pycache__/fullscreen.cpython-312.pyc,, +taichi/examples/features/gui/__pycache__/gui_image_io.cpython-312.pyc,, +taichi/examples/features/gui/__pycache__/gui_widgets.cpython-312.pyc,, +taichi/examples/features/gui/__pycache__/keyboard.cpython-312.pyc,, +taichi/examples/features/gui/fullscreen.py,sha256=r10Hgs5f0bLMX608vvuQFwY5LwggZXEe3R9_h4425vg,449 +taichi/examples/features/gui/gui_image_io.py,sha256=KwjbeWkbqLUNsvcAGePBbeyEM6AWqEVqSN8LSLSsgzg,592 +taichi/examples/features/gui/gui_widgets.py,sha256=UMBRyWcy8EFbdMZujVtkWHFHhM4iXW5bXh7qvYFm-jo,706 +taichi/examples/features/gui/keyboard.py,sha256=gE1Wdbt0dyaYtWkknCWsSM4rKUiEW-wYuco5dOzDHuI,1155 +taichi/examples/features/io/__pycache__/export_mesh.cpython-312.pyc,, +taichi/examples/features/io/__pycache__/export_ply.cpython-312.pyc,, +taichi/examples/features/io/__pycache__/export_videos.cpython-312.pyc,, +taichi/examples/features/io/export_mesh.py,sha256=Vbp5nPlDoAcXOhNMRg4JtYXBFiytueVGPV75T05omPs,3580 +taichi/examples/features/io/export_ply.py,sha256=h9T5YBlwZQdxbnhu83XA78dcFskFiIsUXyyTKawztXk,1305 +taichi/examples/features/io/export_videos.py,sha256=q1bjwtrTyw3MG_fKn24fojbGPfhZ_cWpE2L5uL9AVLM,844 +taichi/examples/features/sparse/__pycache__/explicit_activation.cpython-312.pyc,, +taichi/examples/features/sparse/__pycache__/taichi_bitmasked.cpython-312.pyc,, +taichi/examples/features/sparse/__pycache__/taichi_dynamic.cpython-312.pyc,, +taichi/examples/features/sparse/__pycache__/taichi_sparse.cpython-312.pyc,, +taichi/examples/features/sparse/__pycache__/tutorial.cpython-312.pyc,, +taichi/examples/features/sparse/explicit_activation.py,sha256=mGAuEuGTXhPTlw_RgmHHDoDHZugKzpHKy-rBGGHDZS0,1682 +taichi/examples/features/sparse/taichi_bitmasked.py,sha256=Lzvpxinc_Qvyv57S7GJXziDfwAlWkZvh0E_KsHTZfKk,1486 +taichi/examples/features/sparse/taichi_dynamic.py,sha256=OjYlwEP5BFWmXA5ieYIyiEKP084cCxeJyVk8cBgplJQ,560 +taichi/examples/features/sparse/taichi_sparse.py,sha256=_C-wPegNc2muhLd6pTNUmukw9hjT6ErEBPmKZiZ7lus,1429 +taichi/examples/features/sparse/tutorial.py,sha256=pjPvAo1ovVHcVE0XYXYkNMEYZXHoAi7BDei7a5K-p3w,524 +taichi/examples/ggui_examples/__pycache__/fem128_ggui.cpython-312.pyc,, +taichi/examples/ggui_examples/__pycache__/fractal3d_ggui.cpython-312.pyc,, +taichi/examples/ggui_examples/__pycache__/mass_spring_3d_ggui.cpython-312.pyc,, +taichi/examples/ggui_examples/__pycache__/mass_spring_game_ggui.cpython-312.pyc,, +taichi/examples/ggui_examples/__pycache__/mpm128_ggui.cpython-312.pyc,, +taichi/examples/ggui_examples/__pycache__/mpm3d_ggui.cpython-312.pyc,, +taichi/examples/ggui_examples/__pycache__/stable_fluid_ggui.cpython-312.pyc,, +taichi/examples/ggui_examples/fem128_ggui.py,sha256=CldvFVsvB-GeImPzCbU947NMgzWjvStLBZTavxaEWNw,5586 +taichi/examples/ggui_examples/fractal3d_ggui.py,sha256=iHqF-edFd0HAT4zhkQNT92lNiCoemYC52sRxAm5ENyc,4081 +taichi/examples/ggui_examples/mass_spring_3d_ggui.py,sha256=5QXv5Q3B5nV4ERA8bjKlEwVmFgpzD5pcZuZptQ1t7dg,4699 +taichi/examples/ggui_examples/mass_spring_game_ggui.py,sha256=xNmqjM7ZJMAUQlOBAVH-5M6yrIS_YLMwaNMVYCr85Jo,5867 +taichi/examples/ggui_examples/mpm128_ggui.py,sha256=Wi2feV6uaWqkBB3aKyQzQB_ygnQwjsgQC0GZi0Z1tpU,7640 +taichi/examples/ggui_examples/mpm3d_ggui.py,sha256=seDVtv_DIZ4hh2ZAUXTkUclgcAUBz8Yz0-avDOecjKE,10414 +taichi/examples/ggui_examples/stable_fluid_ggui.py,sha256=ubymPseW7QWgH-iTJEP_Fdu3_KZje1OGVT2PUX1n4CM,8348 +taichi/examples/graph/__pycache__/mpm88_graph.cpython-312.pyc,, +taichi/examples/graph/__pycache__/stable_fluid_graph.cpython-312.pyc,, +taichi/examples/graph/__pycache__/texture_graph.cpython-312.pyc,, +taichi/examples/graph/mpm88_graph.py,sha256=rfffrT-YYY8M2vqvQJsP4tJJz7KwDR7cz-m4pW2ZnRw,6308 +taichi/examples/graph/stable_fluid_graph.py,sha256=061WXVTzdFf8ok8ozGJGmEXeGW7P1YfNzcnHQD5bys8,11269 +taichi/examples/graph/texture_graph.py,sha256=sch9xVPLSExaak77gjmBcX4wf9QoCafD98gBlwk6QQA,2420 +taichi/examples/machine_learning/__pycache__/differential_evolution.cpython-312.pyc,, +taichi/examples/machine_learning/differential_evolution.py,sha256=kbyhS22xqzadI26g8IMCGxwKSjM_hyMgkC2XvcgIlrk,7523 +taichi/examples/minimal.py,sha256=KjPQBCPY7oWmTLmA3B0K2KmhDLwmvHuxGBXcVCUKYrQ,120 +taichi/examples/patterns.py,sha256=XjwdZjmofNXCZK6kGwYU-P6dxLr9zwPFYnWlZhjnOPs,887 +taichi/examples/real_func/algorithm/__pycache__/marching_squares.cpython-312.pyc,, +taichi/examples/real_func/algorithm/__pycache__/poisson_disk_sampling.cpython-312.pyc,, +taichi/examples/real_func/algorithm/marching_squares.py,sha256=GFsaVnu13oT5OEfnSttE5otsc_ouJ4_I0aDrOB0cjCo,5527 +taichi/examples/real_func/algorithm/poisson_disk_sampling.py,sha256=UxF-SrxqlFdBplHllc3bv9IDC58wgz4bpgqRHo07Fxk,5236 +taichi/examples/real_func/graph/__pycache__/stable_fluid_graph.cpython-312.pyc,, +taichi/examples/real_func/graph/stable_fluid_graph.py,sha256=u1xiZ6nY880IKVuI4xEY669vp888BuC-5QgSGzX8a1k,12405 +taichi/examples/real_func/rendering/__pycache__/cornell_box.cpython-312.pyc,, +taichi/examples/real_func/rendering/__pycache__/taichi_ngp.cpython-312.pyc,, +taichi/examples/real_func/rendering/cornell_box.py,sha256=odooE589tYvanAXbr-gGlIX8oQwYElxT6FJgp6I5MA0,6815 +taichi/examples/real_func/rendering/taichi_ngp.py,sha256=0rwTDO8zonqkoKhOYM5Nt9tVGAMe33zz4vanVxCPFrg,40836 +taichi/examples/rendering/__pycache__/cornell_box.cpython-312.pyc,, +taichi/examples/rendering/__pycache__/oit_renderer.cpython-312.pyc,, +taichi/examples/rendering/__pycache__/rasterizer.cpython-312.pyc,, +taichi/examples/rendering/__pycache__/sdf_renderer.cpython-312.pyc,, +taichi/examples/rendering/__pycache__/simple_texture.cpython-312.pyc,, +taichi/examples/rendering/__pycache__/simple_uv.cpython-312.pyc,, +taichi/examples/rendering/__pycache__/taichi_logo.cpython-312.pyc,, +taichi/examples/rendering/__pycache__/taichi_ngp.cpython-312.pyc,, +taichi/examples/rendering/cornell_box.py,sha256=mDUGw-IbMWh8tv8n-sjtOj-Vk7SR07gP19nvHqeLAWs,6737 +taichi/examples/rendering/oit_renderer.py,sha256=RGjBOYLGfEFFW_W1pQn193b-YR2Gs-JwF_8_LPKWDiM,5012 +taichi/examples/rendering/rasterizer.py,sha256=FU6i2RZqhdHxdWkFvyWI1hpERMrVSVe2A2Wteha84ZM,5107 +taichi/examples/rendering/sdf_renderer.py,sha256=DFnEEU5O4x5Nam3chiw0sDqkd7hR8EZ1-eC6JIHjnec,4720 +taichi/examples/rendering/simple_texture.py,sha256=doVxLQ4KtOK7pIg1idJ4pBwM2kOkUCHiM7_-qiTT5fY,1307 +taichi/examples/rendering/simple_uv.py,sha256=tqHOrFLlyqI-E0Nh2XqIugKXPRDb38d79WzHOsnwvoM,370 +taichi/examples/rendering/taichi_logo.py,sha256=XFHPw5sV8r3T8ediitwmIto6-5Jq5H1Xwu7kmpR-BQg,518 +taichi/examples/rendering/taichi_ngp.py,sha256=OtCB1IU0OS2xINTGYBxecnkirsM9VxN-uzEg576uvYI,40597 +taichi/examples/simulation/__pycache__/ad_gravity.cpython-312.pyc,, +taichi/examples/simulation/__pycache__/comet.cpython-312.pyc,, +taichi/examples/simulation/__pycache__/euler.cpython-312.pyc,, +taichi/examples/simulation/__pycache__/eulerfluid2d.cpython-312.pyc,, +taichi/examples/simulation/__pycache__/fem128.cpython-312.pyc,, +taichi/examples/simulation/__pycache__/fem99.cpython-312.pyc,, +taichi/examples/simulation/__pycache__/fractal.cpython-312.pyc,, +taichi/examples/simulation/__pycache__/game_of_life.cpython-312.pyc,, +taichi/examples/simulation/__pycache__/implicit_fem.cpython-312.pyc,, +taichi/examples/simulation/__pycache__/implicit_mass_spring.cpython-312.pyc,, +taichi/examples/simulation/__pycache__/initial_value_problem.cpython-312.pyc,, +taichi/examples/simulation/__pycache__/karman_vortex_street.cpython-312.pyc,, +taichi/examples/simulation/__pycache__/laplace_equation.cpython-312.pyc,, +taichi/examples/simulation/__pycache__/mandelbrot_zoom.cpython-312.pyc,, +taichi/examples/simulation/__pycache__/mass_spring_game.cpython-312.pyc,, +taichi/examples/simulation/__pycache__/mpm128.cpython-312.pyc,, +taichi/examples/simulation/__pycache__/mpm3d.cpython-312.pyc,, +taichi/examples/simulation/__pycache__/mpm88.cpython-312.pyc,, +taichi/examples/simulation/__pycache__/mpm99.cpython-312.pyc,, +taichi/examples/simulation/__pycache__/mpm_lagrangian_forces.cpython-312.pyc,, +taichi/examples/simulation/__pycache__/nbody.cpython-312.pyc,, +taichi/examples/simulation/__pycache__/odop_solar.cpython-312.pyc,, +taichi/examples/simulation/__pycache__/pbf2d.cpython-312.pyc,, +taichi/examples/simulation/__pycache__/physarum.cpython-312.pyc,, +taichi/examples/simulation/__pycache__/snow_phaseField.cpython-312.pyc,, +taichi/examples/simulation/__pycache__/stable_fluid.cpython-312.pyc,, +taichi/examples/simulation/__pycache__/two_stream_instability.cpython-312.pyc,, +taichi/examples/simulation/__pycache__/vortex_rings.cpython-312.pyc,, +taichi/examples/simulation/__pycache__/waterwave.cpython-312.pyc,, +taichi/examples/simulation/ad_gravity.py,sha256=v6cGxDCbjxlXzUTWwVHmLOFgaFPvFZgPD0d-mk13ntA,1386 +taichi/examples/simulation/comet.py,sha256=1DSSJNbOjU3LC0kcedwaO-hQfkFa98yEMcyEwnasL9w,2421 +taichi/examples/simulation/euler.py,sha256=F5bVxAsjYoNqv7h0iPpgT65tIJdbgtaoBq0SzXfbB9M,15132 +taichi/examples/simulation/eulerfluid2d.py,sha256=_fF_7-Z8XZXmycEjFoWgysI1e7k2ym3cFymgJYcOm3U,9522 +taichi/examples/simulation/fem128.py,sha256=YFjQk1qY4My9YfZP8JBbk-v2fI0Lv_q7U8RNS9pFVFA,4564 +taichi/examples/simulation/fem99.py,sha256=Cjz9J-ponzI7lTZqfjRyB7NW5cYOq6_OhnNK_wrdj6A,3291 +taichi/examples/simulation/fractal.py,sha256=LS-wfU0ijmwXBtZwouXA9IffSvZGNcFM-oM0DPwMYyE,853 +taichi/examples/simulation/game_of_life.py,sha256=5s2uYZ5lGVSL_3KuvgTgpvAKn6Itn1lm9DQVX-t41aU,2413 +taichi/examples/simulation/implicit_fem.py,sha256=hPPU86dloxm1YxYNvCPp_cyguqdp7DVd93jS9WCE-1Q,11835 +taichi/examples/simulation/implicit_mass_spring.py,sha256=eKa23tKtgiBrKBSfKylAD3AUwEskOdLtXZpiVuxRH5A,10927 +taichi/examples/simulation/initial_value_problem.py,sha256=2gtU_4go3lAr05XMLV91OR8fGZ1RJ0xDEtp-rX-mPpU,1173 +taichi/examples/simulation/karman_vortex_street.py,sha256=Z9RYVQvfAbR7ZRxetC1rM4pfn80dg00zhwRLyxhZmqI,6799 +taichi/examples/simulation/laplace_equation.py,sha256=P3fZji3RHYu1osDSCiTQS5SuWYh4YhiDwk0_j3_vzdE,11759 +taichi/examples/simulation/mandelbrot_zoom.py,sha256=b5jM3GOsKq3NTLLVaMmhFMqjiCevFL4jd-Nwvf1iVvM,1376 +taichi/examples/simulation/mass_spring_game.py,sha256=09kZD0_5qVu-9dIQufbREX8ZVAR_bGhcBAn7Vmme4Rc,6042 +taichi/examples/simulation/mpm128.py,sha256=cM7P_fpOs_nSlue08rr2trBG0d4MpvfMRW-ZyjCfUH4,6783 +taichi/examples/simulation/mpm3d.py,sha256=WJhVGeFrRatkL5GCVMBX15W4m2CRDtIAmVEd22-Lndk,3854 +taichi/examples/simulation/mpm88.py,sha256=tjgpSxtNHFsjjIuj3_6Vf9YnZkNZOlMcVoHfQ4ly2go,2830 +taichi/examples/simulation/mpm99.py,sha256=7DpEpc7sJ_75ImlLYvW0Wb15llcyzbAflNSGvJGS4zk,5653 +taichi/examples/simulation/mpm_lagrangian_forces.py,sha256=O_fdXKFOdOh_pKiT-AbYKTWc46YzQ9DEoCcI14k6s-Y,6020 +taichi/examples/simulation/nbody.py,sha256=STmVVPonx2j07wNDrI3S4mhHCXHCQRP-Jn0Ha1DgLG4,2491 +taichi/examples/simulation/odop_solar.py,sha256=Uo6pmR33SCzas3bqGCeQS5f5AL7ftRw8mmD1DYR-i58,2250 +taichi/examples/simulation/pbf2d.py,sha256=LgMCQKbBXPLIatKPa3x5z_tDJxd1MPz4_GjHjOO9BQk,9071 +taichi/examples/simulation/physarum.py,sha256=_G5SonACVfIL5vuoTfJZDkwoXPazcgKtsW6L8eyf25o,2353 +taichi/examples/simulation/snow_phaseField.py,sha256=FQx1gDrPoaGL8T1nIAXuIDGUNDV_2MiGECcFhcWa7b8,9820 +taichi/examples/simulation/stable_fluid.py,sha256=mxZdFQfFUv8-uA60sVL_XP4YA9gb3lVZ97lassufOpI,11545 +taichi/examples/simulation/two_stream_instability.py,sha256=DpgQ3X3rgheA3VpPiu33o73OQd0i9xfScQ6oP333cJE,3189 +taichi/examples/simulation/vortex_rings.py,sha256=ouvdw5nDZimtu2MZvpInizPVtr9F_tnYFPquJtzvmzc,2148 +taichi/examples/simulation/waterwave.py,sha256=_RTGUA8ZLnTXGVbPUQO7nrFUm5aj-DfScN9PEZvqSh8,2963 +taichi/experimental.py,sha256=st8Ny0kWmSwoXb-aoFey2xGF2vgTA2JofVvn9JIOhnM,337 +taichi/graph/__init__.py,sha256=Pse9Sm7eSVybOy0TWljIkN6DY9DkAMmqGSA3vAhpkbY,23 +taichi/graph/__pycache__/__init__.cpython-312.pyc,, +taichi/graph/__pycache__/_graph.cpython-312.pyc,, +taichi/graph/_graph.py,sha256=3fXQ7bKnRDQy8epppclnMpzD2eXZZoywf7_tmkrqZ6Y,10241 +taichi/lang/__init__.py,sha256=TLRXp2hEfoQTSkbS5f_yhH196nuEJcva1CztC1Ne_V4,1251 +taichi/lang/__pycache__/__init__.cpython-312.pyc,, +taichi/lang/__pycache__/_ndarray.cpython-312.pyc,, +taichi/lang/__pycache__/_ndrange.cpython-312.pyc,, +taichi/lang/__pycache__/_texture.cpython-312.pyc,, +taichi/lang/__pycache__/_wrap_inspect.cpython-312.pyc,, +taichi/lang/__pycache__/any_array.cpython-312.pyc,, +taichi/lang/__pycache__/argpack.cpython-312.pyc,, +taichi/lang/__pycache__/common_ops.cpython-312.pyc,, +taichi/lang/__pycache__/enums.cpython-312.pyc,, +taichi/lang/__pycache__/exception.cpython-312.pyc,, +taichi/lang/__pycache__/expr.cpython-312.pyc,, +taichi/lang/__pycache__/field.cpython-312.pyc,, +taichi/lang/__pycache__/impl.cpython-312.pyc,, +taichi/lang/__pycache__/kernel_arguments.cpython-312.pyc,, +taichi/lang/__pycache__/kernel_impl.cpython-312.pyc,, +taichi/lang/__pycache__/matrix.cpython-312.pyc,, +taichi/lang/__pycache__/matrix_ops.cpython-312.pyc,, +taichi/lang/__pycache__/matrix_ops_utils.cpython-312.pyc,, +taichi/lang/__pycache__/mesh.cpython-312.pyc,, +taichi/lang/__pycache__/misc.cpython-312.pyc,, +taichi/lang/__pycache__/ops.cpython-312.pyc,, +taichi/lang/__pycache__/runtime_ops.cpython-312.pyc,, +taichi/lang/__pycache__/shell.cpython-312.pyc,, +taichi/lang/__pycache__/snode.cpython-312.pyc,, +taichi/lang/__pycache__/source_builder.cpython-312.pyc,, +taichi/lang/__pycache__/struct.cpython-312.pyc,, +taichi/lang/__pycache__/util.cpython-312.pyc,, +taichi/lang/_ndarray.py,sha256=cd9rTKulc2_xzIVJNDCbf9dFfn5dsRrAJFiTUOtWXX0,10764 +taichi/lang/_ndrange.py,sha256=sKRCg_zxkOYLqkCydQIeHvIe0iwHi1oAiVMWfgw6W0U,5881 +taichi/lang/_texture.py,sha256=ss6dZmqcrhBSx3TazJXGirHfuyCEfiSjNSdE8DaFnwE,6724 +taichi/lang/_wrap_inspect.py,sha256=1Iji8qBSq4v5SA8X9KtJoDINIfrc4nS0c21UnKt9bYw,7141 +taichi/lang/any_array.py,sha256=zI8uTCPg1Xo3qx58xYlNYH1XHkFs_6PJqux204dXoMQ,3340 +taichi/lang/argpack.py,sha256=LqTDEucXHve9H_yoBlCthHi4kP-8zmSR_Ex0rxOo6eI,17169 +taichi/lang/ast/__init__.py,sha256=EN2eGLMeT1ADP4BrFqugwh9eg3WJ5pAdWElHmgQ6zvI,192 +taichi/lang/ast/__pycache__/__init__.cpython-312.pyc,, +taichi/lang/ast/__pycache__/ast_transformer.cpython-312.pyc,, +taichi/lang/ast/__pycache__/ast_transformer_utils.cpython-312.pyc,, +taichi/lang/ast/__pycache__/checkers.cpython-312.pyc,, +taichi/lang/ast/__pycache__/symbol_resolver.cpython-312.pyc,, +taichi/lang/ast/__pycache__/transform.cpython-312.pyc,, +taichi/lang/ast/ast_transformer.py,sha256=K7rpq9iX-2Z5lorKvr02H-R9TrQv6WO8JQ1GAFS1Ofs,76596 +taichi/lang/ast/ast_transformer_utils.py,sha256=tZJ7bjDHOhPX6GTp11Kvc6TZqlJXnOo1bJh0wIdHmas,10616 +taichi/lang/ast/checkers.py,sha256=buG2Af2EZsrF-2zKu_Y0hiX17F2yDscA1TDZVtTM5vc,3876 +taichi/lang/ast/symbol_resolver.py,sha256=L4WT8yTm3d33SxdwBKo5brtesUkkg1_B_6tMShRoZ58,1811 +taichi/lang/ast/transform.py,sha256=Sts2v4_m-Wi8NiDtY6JS9b3iNJ3OZ9xwIEQ6Xf8hZ3Y,253 +taichi/lang/common_ops.py,sha256=EDL3cw4Jsb1LQBjqCWrc1FNc3dWkf_5SwxDfCLwAPvo,8720 +taichi/lang/enums.py,sha256=SFUJ5ZgcQjxIi6cP42pKsssCLxgskHFXOY9AaAzGArI,2079 +taichi/lang/exception.py,sha256=9-XHPNFTHw-RQSQvgOYCWJLzCSM7KrPJrUUyjLrGpDM,2083 +taichi/lang/expr.py,sha256=ZVc-QTRLGhMDAx2xrXFtbuvlmUwPhb6yR_HVEbnuKAw,6230 +taichi/lang/field.py,sha256=AnGoEeXzzebu_g1-QEKHpdlDRlwwdExuuRxkj_i1AQo,14754 +taichi/lang/impl.py,sha256=hu8uGyri_MP3OTYgDh6kru7hOzhEmKtRUZAx84OJNrE,41131 +taichi/lang/kernel_arguments.py,sha256=HnT8hP7gWcQAG9FMlZYUkpUmd_6HqbtK9jjNyAsJKf4,6514 +taichi/lang/kernel_impl.py,sha256=75YHJakce3JOMvbcmHFDawnvyUAfwWn31HUC3EJZnrg,54889 +taichi/lang/matrix.py,sha256=9sQvog-Yx-TC99O4F4iODkcGpvhk_qJ61i4Io2XJqho,65352 +taichi/lang/matrix_ops.py,sha256=xwO5CmTwU1wmHAuitQGdgoe23JfoL3djqHaGYpJmfzM,10369 +taichi/lang/matrix_ops_utils.py,sha256=5LG1BoeRhQoXxjr0AcaO8z-Q2OvXxn1ETDTGgu2oq8E,5407 +taichi/lang/mesh.py,sha256=drwcZ3cEYOm_62EslwA6vPDfrvqdarRVI5X6_i4eMwE,27693 +taichi/lang/misc.py,sha256=ZT6uY57y8_zU4l508py3md4UAWKLyEJTofQuBJyxmm4,24678 +taichi/lang/ops.py,sha256=Z9WTFIb2RP-bZhAlMS3_j6x3y1y6IUJxE9UAv7THZ50,45413 +taichi/lang/runtime_ops.py,sha256=VCDaHQvzYWwmkgPIacR-sAEjjlxkSd2yAUQi0T6O4Xw,215 +taichi/lang/shell.py,sha256=z7kFM9xCvk4uS1aE_lMshIkEnVpOHYrYi_Gn5RsH7qU,1071 +taichi/lang/simt/__init__.py,sha256=Ds2e-tapGg0O33EdtxFGy0lWyghntr_qMCMqtsmezTg,109 +taichi/lang/simt/__pycache__/__init__.cpython-312.pyc,, +taichi/lang/simt/__pycache__/block.cpython-312.pyc,, +taichi/lang/simt/__pycache__/grid.cpython-312.pyc,, +taichi/lang/simt/__pycache__/subgroup.cpython-312.pyc,, +taichi/lang/simt/__pycache__/warp.cpython-312.pyc,, +taichi/lang/simt/block.py,sha256=GpxKKhOEww9i-ag5-QgBMDX1tKUx981X1K4kZYj8ZsE,3753 +taichi/lang/simt/grid.py,sha256=jrqrTVxiZLWh9fkm8ej0AiCeqFPB2C0qSNyYxGkDROc,127 +taichi/lang/simt/subgroup.py,sha256=aUSxgYnR2uS5YmP909mV-Ftd2fpxDbn9be8aKz3PpOg,3952 +taichi/lang/simt/warp.py,sha256=ZQrx5czxX6T6N4vkQH6CBY6Etqs2JoTIzrMDyrdvafM,3163 +taichi/lang/snode.py,sha256=vRynVdViDAUUhYOQwcaFw4s0PuK4-K5IFNt-b0Jexjk,17131 +taichi/lang/source_builder.py,sha256=zhA784ZKkqvQJ662ZErvMnHFmv2GHO_7SnfZSrgTcKY,5913 +taichi/lang/struct.py,sha256=QOrXbpKl_DFvisWCkjp-j2HqxFu7uNf-3jAUVl0CJsE,31935 +taichi/lang/util.py,sha256=zgTmsUuAxx2z-LMDxBtEwgDE5qkku3ns1rjsc1TYckY,9425 +taichi/linalg/__init__.py,sha256=CzPFbKIgOZdMrtyswnlmoDNpFWiO9Zdp5cyxMe0wm78,247 +taichi/linalg/__pycache__/__init__.cpython-312.pyc,, +taichi/linalg/__pycache__/matrixfree_cg.cpython-312.pyc,, +taichi/linalg/__pycache__/sparse_cg.cpython-312.pyc,, +taichi/linalg/__pycache__/sparse_matrix.cpython-312.pyc,, +taichi/linalg/__pycache__/sparse_solver.cpython-312.pyc,, +taichi/linalg/matrixfree_cg.py,sha256=ncOu_IbIvnclUTzruWow0qyWv0MLKJhtLjJXbaDldSM,10536 +taichi/linalg/sparse_cg.py,sha256=_Lc6fnCVl--PBLCM5chS1LN5EMClueUYNg3bwe_8mI8,2575 +taichi/linalg/sparse_matrix.py,sha256=jfytBKahCvjXdm8CKcOWYr0wXoG8M5TX6--CJAlBK8E,11206 +taichi/linalg/sparse_solver.py,sha256=uhirSbHB4FO4gZ-zM-_7stpMgNRcw0xLqSaXwOFyfgM,4981 +taichi/math/__init__.py,sha256=Wv8MqjFxlxgA2iYFGVZQYCVGDkhIzU_9O_ZJtCTi0r4,194 +taichi/math/__pycache__/__init__.cpython-312.pyc,, +taichi/math/__pycache__/_complex.cpython-312.pyc,, +taichi/math/__pycache__/mathimpl.cpython-312.pyc,, +taichi/math/_complex.py,sha256=aa0ILPhqusdjwuLLNvGcn0r9RRWFPwBT1q05XkD6Kz0,5127 +taichi/math/mathimpl.py,sha256=mP1mwbJ21nz1gaxOVBAkeCY603tsjOhUvDNEwyE3MrQ,22449 +taichi/profiler/__init__.py,sha256=rq6xqtxKkEffDa0Da-9wbuO0ytM8JJrdapHw95mM8wU,187 +taichi/profiler/__pycache__/__init__.cpython-312.pyc,, +taichi/profiler/__pycache__/kernel_metrics.cpython-312.pyc,, +taichi/profiler/__pycache__/kernel_profiler.cpython-312.pyc,, +taichi/profiler/__pycache__/memory_profiler.cpython-312.pyc,, +taichi/profiler/__pycache__/scoped_profiler.cpython-312.pyc,, +taichi/profiler/kernel_metrics.py,sha256=b0svQZ8DWPHRt8dyHd0WJyc5Gh1xXI35BxgG6s05Rvc,7839 +taichi/profiler/kernel_profiler.py,sha256=9RRm8iZMOP_n0Y39NdFwahs3qA220YSmgsVStX1bMJ0,23068 +taichi/profiler/memory_profiler.py,sha256=ZSxWUNefoPzyhrGnZmuJR0LCs5tuvZ-uyD6hIjMKuVQ,338 +taichi/profiler/scoped_profiler.py,sha256=s5UUf2NbwBIbhZMB8R82B0ulzZkgQVV2w_ymdJE9ekU,985 +taichi/shaders/Circles_vk.frag,sha256=C-Gunizzb5yp263wFJo8sNXm0VFN5qDKWdcI2vtX4qY,542 +taichi/shaders/Circles_vk.vert,sha256=DHZQ31tJFwQsRlX5_9hOeVWmT_0ZgRdWVziQOt_Agwc,1118 +taichi/shaders/Circles_vk_frag.spv,sha256=qI6bTUsPGf08FaOTqHdfi2u5RdZEldvJma7fW7VdEX8,1332 +taichi/shaders/Circles_vk_vert.spv,sha256=0nDQgBjI75ANa7eNZoDojIyOvWhNQIs9mZpYDpME-tE,3484 +taichi/shaders/Lines_vk.frag,sha256=Hgu8YEa6lg13Z8QR-f4ytHhyQG76NP5wo31VOa5Fn4I,148 +taichi/shaders/Lines_vk.vert,sha256=VDa2kKAtrNliKqeVpfG27YwLHcJL-sagLFpf8TH7z74,316 +taichi/shaders/Lines_vk_frag.spv,sha256=bNybJN5kDtKaP-IAgqHI6rRFMP2cAVUpQdvkWQBvDDk,568 +taichi/shaders/Lines_vk_vert.spv,sha256=urES2qDkY91hs8F5lvJilL7mLZJngu6a4Bx66FFu4FY,1308 +taichi/shaders/Mesh_vk.frag,sha256=y0elUiH0ztClM8NFIChhQFcj0tohe-711QUCW0ycbBI,1550 +taichi/shaders/Mesh_vk.vert,sha256=qgTkAxsY-dPDyyXo8GT2n7ars09bUZCaW-05AO-me3A,1680 +taichi/shaders/Mesh_vk_frag.spv,sha256=APc6Sb1tz2uNRQSqvwwrWB6W_jk0s3EMRDsEIGKMBpM,3952 +taichi/shaders/Mesh_vk_vert.spv,sha256=P-2FSwJaPObW5HSvQXbrRCDLZurYxr3zbhsH1YUasG8,4396 +taichi/shaders/Particles_vk.frag,sha256=6OpkFgANuX4_93s0hCeUZY5xNPSaoRjcmoFRL-LunAU,2316 +taichi/shaders/Particles_vk.vert,sha256=cxKlHDPwX35oxrx8GPJp1dryUk5RjP3WBQ79g3WQ2d8,1849 +taichi/shaders/Particles_vk_frag.spv,sha256=soI-L_l6Amd0UlokxTPZ2JBaZdaeO--BA1AlDlgIRA8,6216 +taichi/shaders/Particles_vk_vert.spv,sha256=815FN2TpmimCbzxtQGHQNnUCLTBOTwo173xJEG_y6Tg,4624 +taichi/shaders/SceneLines2quad_vk_comp.spv,sha256=zSCZvr0bsW-KtznbYO6EwreiDP5Ua-NamvUkBTS18so,11344 +taichi/shaders/SceneLines_vk.frag,sha256=Hgu8YEa6lg13Z8QR-f4ytHhyQG76NP5wo31VOa5Fn4I,148 +taichi/shaders/SceneLines_vk.vert,sha256=8CfvgtOSiG9yu9GyRr_DxyW1p8s5vUQ9PwEwk7ZOO5Y,259 +taichi/shaders/SceneLines_vk_frag.spv,sha256=bNybJN5kDtKaP-IAgqHI6rRFMP2cAVUpQdvkWQBvDDk,568 +taichi/shaders/SceneLines_vk_vert.spv,sha256=8yg5hXGslcpvBQZvxG7seXCW4208nHcamgIk9W12xMw,1092 +taichi/shaders/SetImage_vk.frag,sha256=7AMlfh6JTVRQA5wMjk_T2hkb_WIDNtuS0wQgp_eRCxo,534 +taichi/shaders/SetImage_vk.vert,sha256=yc-Q_CUxujZ1UGTH5xaUv3sLoEKdeLcMgOyq8Kg8H1I,394 +taichi/shaders/SetImage_vk_frag.spv,sha256=IcFSSJPgFUXFbx07xJ8qI3d-FyCtT57g38iZ-TYZQkM,1712 +taichi/shaders/SetImage_vk_vert.spv,sha256=qWaRg8caSTrN11HqVgxx_J84zBN0haeHx-7xsw303qk,1244 +taichi/shaders/Triangles_vk.frag,sha256=zQrAXml1LJIdIEx28Wvs4Gymozw69ljqU5gldupIb5A,317 +taichi/shaders/Triangles_vk.vert,sha256=s8FhjV9oJvdxPtbFCNSUPC5mRLoNz_w-WyjlOexjUO4,688 +taichi/shaders/Triangles_vk_frag.spv,sha256=wDE2jVYHZfcVMwZlweFCHhxOxXTynBPL5YJ5z2eh9Dc,840 +taichi/shaders/Triangles_vk_vert.spv,sha256=5BY7jgsDc1_X_BwWWXu5nhpXySVOD16ziplFEVR4LxY,1976 +taichi/shaders/lines2quad_vk_comp.spv,sha256=p1LYegTuQZhD7dFmHoU0rc8lOg-TAka57J6uZZFrwrc,9192 +taichi/sparse/__init__.py,sha256=ErHDqk2SR7q3otSuS3fFLI09gS73OW_wEOXcAzTMSPo,29 +taichi/sparse/__pycache__/__init__.cpython-312.pyc,, +taichi/sparse/__pycache__/_sparse_grid.cpython-312.pyc,, +taichi/sparse/_sparse_grid.py,sha256=f3y7rrXrWyL-_pDlcrLk_aLuAQuZ-pIfB60fKyTZUHY,2477 +taichi/tools/__init__.py,sha256=mhFMBt_uCN2eF4c0DNLh2UpuDZGH89pj4vERMo36mdw,371 +taichi/tools/__pycache__/__init__.cpython-312.pyc,, +taichi/tools/__pycache__/diagnose.cpython-312.pyc,, +taichi/tools/__pycache__/image.cpython-312.pyc,, +taichi/tools/__pycache__/np2ply.cpython-312.pyc,, +taichi/tools/__pycache__/video.cpython-312.pyc,, +taichi/tools/__pycache__/vtk.cpython-312.pyc,, +taichi/tools/diagnose.py,sha256=KT2zM9npSCpUDGOuJ-xCArmoJQKnPcMHdMLbaOGgLYE,3803 +taichi/tools/image.py,sha256=U1CPRuIwtqUYRGYM34b3U8oa7OF6-Zi_yPukUhrS-Lo,4831 +taichi/tools/np2ply.py,sha256=Yo1319XywsLhVharLBtALEc5JoSfUolzFAEAnnlVlgE,15617 +taichi/tools/video.py,sha256=uYM9cRl9gPZZt1c24LFLaWGpYNLRUUikIUoa_uh0DgE,9555 +taichi/tools/vtk.py,sha256=oKy38LSavCi8pQPEa5wPRN2jZUgqDG3ySJY-sohUfR4,984 +taichi/types/__init__.py,sha256=Pr5TlyN617r_0aUkiypwPg6tjJsJEvDNXNIsUpeRKuY,581 +taichi/types/__pycache__/__init__.cpython-312.pyc,, +taichi/types/__pycache__/annotations.cpython-312.pyc,, +taichi/types/__pycache__/compound_types.cpython-312.pyc,, +taichi/types/__pycache__/ndarray_type.cpython-312.pyc,, +taichi/types/__pycache__/primitive_types.cpython-312.pyc,, +taichi/types/__pycache__/quant.cpython-312.pyc,, +taichi/types/__pycache__/texture_type.cpython-312.pyc,, +taichi/types/__pycache__/utils.cpython-312.pyc,, +taichi/types/annotations.py,sha256=pTZvxOUgRmy2CO09kq5HbHJY3NLHRKByHsXphfyUr7M,1005 +taichi/types/compound_types.py,sha256=Zyus9xcPlJcrrSf-v4qHLibYvb6dDsq0iVDncht42TI,2355 +taichi/types/ndarray_type.py,sha256=DfUhUuYvg2cTWXZyOkSGC8Cxj9xDiqJ-MF-Kivi0i2k,6751 +taichi/types/primitive_types.py,sha256=OLOlCvFvgwm0dvs1fEmI2ArX-JD1iybkn0uQB6el4Rw,4123 +taichi/types/quant.py,sha256=dbzgQhI143lp2rJpg1DG6YOITla7ZTSTVcAJ3yUmuZU,2853 +taichi/types/texture_type.py,sha256=feGL6Vc_--MOzNJ9eCqMRaX7aKTwWk20wB-PHjbI6FU,2413 +taichi/types/utils.py,sha256=JhOreH2HpaNDzDqeo3PJwvwRMrCVjWbFc8ysdVLjWgo,274 +taichi/ui/__init__.py,sha256=4zgDMmVQWwnn8E4CHfZRImpW60B-WzX7S6040104F9k,240 +taichi/ui/__pycache__/__init__.cpython-312.pyc,, +taichi/ui/__pycache__/camera.cpython-312.pyc,, +taichi/ui/__pycache__/canvas.cpython-312.pyc,, +taichi/ui/__pycache__/constants.cpython-312.pyc,, +taichi/ui/__pycache__/gui.cpython-312.pyc,, +taichi/ui/__pycache__/imgui.cpython-312.pyc,, +taichi/ui/__pycache__/scene.cpython-312.pyc,, +taichi/ui/__pycache__/staging_buffer.cpython-312.pyc,, +taichi/ui/__pycache__/ui.cpython-312.pyc,, +taichi/ui/__pycache__/utils.cpython-312.pyc,, +taichi/ui/__pycache__/window.cpython-312.pyc,, +taichi/ui/camera.py,sha256=Dfe_idi4UUBf4tveMnEngoyJwrgmEDsLK79US8-m1BQ,8217 +taichi/ui/canvas.py,sha256=Y8dXcXbNenV_2EIUQ8M8hd7Mt9F7aMazTpqiup4kdCA,11317 +taichi/ui/constants.py,sha256=eidiJ8DxJdPKY36S3WwjjPZPLRM8OWyQtSQ4SY4VZpo,357 +taichi/ui/gui.py,sha256=cr7G6ceLRAtUtI69eVZ-MoW5Dmy_kaA_IluPGL06nv0,38495 +taichi/ui/imgui.py,sha256=RouytYyf5NjoJi1NnkA8_ZdFciYuz_p6FA-J7P86Ul8,4446 +taichi/ui/scene.py,sha256=9XaSPJyihSRbtXkzxJHMJnxX6gh8i-EpFFask3qLGH0,34942 +taichi/ui/staging_buffer.py,sha256=IHZbWil-wvzrcx-DKhj0mTuJnXF_vz9mwiQLMQ7crrg,10091 +taichi/ui/ui.py,sha256=-UmzwaKOMbD4TudHdjKg6RIRHszNp2AtsAqKF0ByJ-8,722 +taichi/ui/utils.py,sha256=XHlCucJeTbqfI_KV8EMNFWe3TAyxeOsfNgbCDrrcjIg,2534 +taichi/ui/window.py,sha256=rm24glWDmhgjbmFyCMSlZdekK_eVrmb9NkBHSodneBA,7014 diff --git a/local_packages/taichi-1.7.4.dist-info/REQUESTED b/local_packages/taichi-1.7.4.dist-info/REQUESTED new file mode 100644 index 0000000..e69de29 diff --git a/local_packages/taichi-1.7.4.dist-info/WHEEL b/local_packages/taichi-1.7.4.dist-info/WHEEL new file mode 100644 index 0000000..e3427cf --- /dev/null +++ b/local_packages/taichi-1.7.4.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: skbuild 0.18.1 +Root-Is-Purelib: false +Tag: cp312-cp312-win_amd64 + diff --git a/local_packages/taichi-1.7.4.dist-info/entry_points.txt b/local_packages/taichi-1.7.4.dist-info/entry_points.txt new file mode 100644 index 0000000..55ed764 --- /dev/null +++ b/local_packages/taichi-1.7.4.dist-info/entry_points.txt @@ -0,0 +1,2 @@ +[console_scripts] +ti = taichi._main:main diff --git a/local_packages/taichi-1.7.4.dist-info/top_level.txt b/local_packages/taichi-1.7.4.dist-info/top_level.txt new file mode 100644 index 0000000..0ad51a5 --- /dev/null +++ b/local_packages/taichi-1.7.4.dist-info/top_level.txt @@ -0,0 +1 @@ +taichi diff --git a/local_packages/taichi/CHANGELOG.md b/local_packages/taichi/CHANGELOG.md new file mode 100644 index 0000000..62b9871 --- /dev/null +++ b/local_packages/taichi/CHANGELOG.md @@ -0,0 +1,4 @@ +Highlights: + +Full changelog: + - [misc] Bump to v1.7.4 (by **Proton**) diff --git a/local_packages/taichi/__init__.py b/local_packages/taichi/__init__.py new file mode 100644 index 0000000..034052e --- /dev/null +++ b/local_packages/taichi/__init__.py @@ -0,0 +1,35 @@ +from taichi._funcs import * +from taichi._lib import core as _ti_core +from taichi._lib.utils import warn_restricted_version +from taichi._logging import * +from taichi._snode import * +from taichi.lang import * # pylint: disable=W0622 # TODO(archibate): It's `taichi.lang.core` overriding `taichi.core` +from taichi.types.annotations import * + +# Provide a shortcut to types since they're commonly used. +from taichi.types.primitive_types import * + + +from taichi import ad, algorithms, experimental, graph, linalg, math, sparse, tools, types +from taichi.ui import GUI, hex_to_rgb, rgb_to_hex, ui + +# Issue#2223: Do not reorder, or we're busted with partially initialized module +from taichi import aot # isort:skip + + +def __getattr__(attr): + if attr == "cfg": + return None if lang.impl.get_runtime().prog is None else lang.impl.current_cfg() + raise AttributeError(f"module '{__name__}' has no attribute '{attr}'") + + +__version__ = ( + _ti_core.get_version_major(), + _ti_core.get_version_minor(), + _ti_core.get_version_patch(), +) + +del _ti_core + +warn_restricted_version() +del warn_restricted_version diff --git a/local_packages/taichi/__main__.py b/local_packages/taichi/__main__.py new file mode 100644 index 0000000..925fa91 --- /dev/null +++ b/local_packages/taichi/__main__.py @@ -0,0 +1,3 @@ +from ._main import main + +main() diff --git a/local_packages/taichi/_funcs.py b/local_packages/taichi/_funcs.py new file mode 100644 index 0000000..13b7d9d --- /dev/null +++ b/local_packages/taichi/_funcs.py @@ -0,0 +1,704 @@ +import math + +from taichi.lang import impl, ops +from taichi.lang.impl import get_runtime, grouped, static +from taichi.lang.kernel_impl import func +from taichi.lang.matrix import Matrix, Vector +from taichi.types import f32, f64 +from taichi.types.annotations import template + + +@func +def _randn(dt): + """ + Generate a random float sampled from univariate standard normal + (Gaussian) distribution of mean 0 and variance 1, using the + Box-Muller transformation. + """ + assert dt == f32 or dt == f64 + u1 = ops.cast(1.0, dt) - ops.random(dt) + u2 = ops.random(dt) + r = ops.sqrt(-2 * ops.log(u1)) + c = ops.cos(math.tau * u2) + return r * c + + +def randn(dt=None): + """Generate a random float sampled from univariate standard normal + (Gaussian) distribution of mean 0 and variance 1, using the + Box-Muller transformation. Must be called in Taichi scope. + + Args: + dt (DataType): Data type of the required random number. Default to `None`. + If set to `None` `dt` will be determined dynamically in runtime. + + Returns: + The generated random float. + + Example:: + + >>> @ti.kernel + >>> def main(): + >>> print(ti.randn()) + >>> + >>> main() + -0.463608 + """ + if dt is None: + dt = impl.get_runtime().default_fp + return _randn(dt) + + +@func +def _polar_decompose2d(A, dt): + """Perform polar decomposition (A=UP) for 2x2 matrix. + Mathematical concept refers to https://en.wikipedia.org/wiki/Polar_decomposition. + + Args: + A (ti.Matrix(2, 2)): input 2x2 matrix `A`. + dt (DataType): date type of elements in matrix `A`, typically accepts ti.f32 or ti.f64. + + Returns: + Decomposed 2x2 matrices `U` and `P`. `U` is a 2x2 orthogonal matrix + and `P` is a 2x2 positive or semi-positive definite matrix. + """ + U = Matrix.identity(dt, 2) + P = ops.cast(A, dt) + zero = ops.cast(0.0, dt) + # if A is a zero matrix we simply return the pair (I, A) + if A[0, 0] == zero and A[0, 1] == zero and A[1, 0] == zero and A[1, 1] == zero: + pass + else: + detA = A[0, 0] * A[1, 1] - A[1, 0] * A[0, 1] + adetA = abs(detA) + B = Matrix( + [ + [A[0, 0] + A[1, 1], A[0, 1] - A[1, 0]], + [A[1, 0] - A[0, 1], A[1, 1] + A[0, 0]], + ], + dt, + ) + + if detA < zero: + B = Matrix( + [ + [A[0, 0] - A[1, 1], A[0, 1] + A[1, 0]], + [A[1, 0] + A[0, 1], A[1, 1] - A[0, 0]], + ], + dt, + ) + # here det(B) != 0 if A is not the zero matrix + adetB = abs(B[0, 0] * B[1, 1] - B[1, 0] * B[0, 1]) + k = ops.cast(1.0, dt) / ops.sqrt(adetB) + U = B * k + P = (A.transpose() @ A + adetA * Matrix.identity(dt, 2)) * k + + return U, P + + +@func +def _polar_decompose3d(A, dt): + """Perform polar decomposition (A=UP) for 3x3 matrix. + + Mathematical concept refers to https://en.wikipedia.org/wiki/Polar_decomposition. + + Args: + A (ti.Matrix(3, 3)): input 3x3 matrix `A`. + dt (DataType): date type of elements in matrix `A`, typically accepts ti.f32 or ti.f64. + + Returns: + Decomposed 3x3 matrices `U` and `P`. + """ + U, sig, V = _svd3d(A, dt) + return U @ V.transpose(), V @ sig @ V.transpose() + + +# https://www.seas.upenn.edu/~cffjiang/research/svd/svd.pdf +@func +def _svd2d(A, dt): + """Perform singular value decomposition (A=USV^T) for 2x2 matrix. + + Mathematical concept refers to https://en.wikipedia.org/wiki/Singular_value_decomposition. + + Args: + A (ti.Matrix(2, 2)): input 2x2 matrix `A`. + dt (DataType): date type of elements in matrix `A`, typically accepts ti.f32 or ti.f64. + + Returns: + Decomposed 2x2 matrices `U`, 'S' and `V`. + """ + R, S = _polar_decompose2d(A, dt) + c, s = ops.cast(0.0, dt), ops.cast(0.0, dt) + s1, s2 = ops.cast(0.0, dt), ops.cast(0.0, dt) + if abs(S[0, 1]) < 1e-5: + c, s = 1, 0 + s1, s2 = S[0, 0], S[1, 1] + else: + tao = ops.cast(0.5, dt) * (S[0, 0] - S[1, 1]) + w = ops.sqrt(tao**2 + S[0, 1] ** 2) + t = ops.cast(0.0, dt) + if tao > 0: + t = S[0, 1] / (tao + w) + else: + t = S[0, 1] / (tao - w) + c = 1 / ops.sqrt(t**2 + 1) + s = -t * c + s1 = c**2 * S[0, 0] - 2 * c * s * S[0, 1] + s**2 * S[1, 1] + s2 = s**2 * S[0, 0] + 2 * c * s * S[0, 1] + c**2 * S[1, 1] + V = Matrix.zero(dt, 2, 2) + if s1 < s2: + tmp = s1 + s1 = s2 + s2 = tmp + V = Matrix([[-s, c], [-c, -s]], dt=dt) + else: + V = Matrix([[c, s], [-s, c]], dt=dt) + U = R @ V + return U, Matrix([[s1, ops.cast(0, dt)], [ops.cast(0, dt), s2]], dt=dt), V + + +def _svd3d(A, dt, iters=None): + """Perform singular value decomposition (A=USV^T) for 3x3 matrix. + + Mathematical concept refers to https://en.wikipedia.org/wiki/Singular_value_decomposition. + + Args: + A (ti.Matrix(3, 3)): input 3x3 matrix `A`. + dt (DataType): date type of elements in matrix `A`, typically accepts ti.f32 or ti.f64. + iters (int): iteration number to control algorithm precision. + + Returns: + Decomposed 3x3 matrices `U`, 'S' and `V`. + """ + assert A.n == 3 and A.m == 3 + assert dt in [f32, f64] + if iters is None: + if dt == f32: + iters = 5 + else: + iters = 8 + if dt == f32: + rets = get_runtime().compiling_callable.ast_builder().sifakis_svd_f32(A.ptr, iters) + else: + rets = get_runtime().compiling_callable.ast_builder().sifakis_svd_f64(A.ptr, iters) + assert len(rets) == 21 + U_entries = rets[:9] + V_entries = rets[9:18] + sig_entries = rets[18:] + + @func + def get_result(): + U = Matrix.zero(dt, 3, 3) + V = Matrix.zero(dt, 3, 3) + sigma = Matrix.zero(dt, 3, 3) + for i in static(range(3)): + for j in static(range(3)): + U[i, j] = U_entries[i * 3 + j] + V[i, j] = V_entries[i * 3 + j] + sigma[i, i] = sig_entries[i] + return U, sigma, V + + return get_result() + + +@func +def _eig2x2(A, dt): + """Compute the eigenvalues and right eigenvectors (Av=lambda v) of a 2x2 real matrix. + + Mathematical concept refers to https://en.wikipedia.org/wiki/Eigendecomposition_of_a_matrix. + + Args: + A (ti.Matrix(2, 2)): input 2x2 matrix `A`. + dt (DataType): date type of elements in matrix `A`, typically accepts ti.f32 or ti.f64. + + Returns: + eigenvalues (ti.Matrix(2, 2)): The eigenvalues in complex form. Each row stores one eigenvalue. The first number of the eigenvalue represents the real part and the second number represents the imaginary part. + eigenvectors: (ti.Matrix(4, 2)): The eigenvectors in complex form. Each column stores one eigenvector. Each eigenvector consists of 2 entries, each of which is represented by two numbers for its real part and imaginary part. + """ + tr = A.trace() + det = A.determinant() + gap = tr**2 - 4 * det + lambda1 = Vector.zero(dt, 2) + lambda2 = Vector.zero(dt, 2) + v1 = Vector.zero(dt, 4) + v2 = Vector.zero(dt, 4) + if gap > 0: + lambda1 = Vector([tr + ops.sqrt(gap), 0.0], dt=dt) * 0.5 + lambda2 = Vector([tr - ops.sqrt(gap), 0.0], dt=dt) * 0.5 + A1 = A - lambda1[0] * Matrix.identity(dt, 2) + A2 = A - lambda2[0] * Matrix.identity(dt, 2) + if all(A1 == Matrix.zero(dt, 2, 2)) and all(A1 == Matrix.zero(dt, 2, 2)): + v1 = Vector([0.0, 0.0, 1.0, 0.0]).cast(dt) + v2 = Vector([1.0, 0.0, 0.0, 0.0]).cast(dt) + else: + v1 = Vector([A2[0, 0], 0.0, A2[1, 0], 0.0], dt=dt).normalized() + v2 = Vector([A1[0, 0], 0.0, A1[1, 0], 0.0], dt=dt).normalized() + else: + lambda1 = Vector([tr, ops.sqrt(-gap)], dt=dt) * 0.5 + lambda2 = Vector([tr, -ops.sqrt(-gap)], dt=dt) * 0.5 + A1r = A - lambda1[0] * Matrix.identity(dt, 2) + A1i = -lambda1[1] * Matrix.identity(dt, 2) + A2r = A - lambda2[0] * Matrix.identity(dt, 2) + A2i = -lambda2[1] * Matrix.identity(dt, 2) + v1 = Vector([A2r[0, 0], A2i[0, 0], A2r[1, 0], A2i[1, 0]], dt=dt).normalized() + v2 = Vector([A1r[0, 0], A1i[0, 0], A1r[1, 0], A1i[1, 0]], dt=dt).normalized() + eigenvalues = Matrix.rows([lambda1, lambda2]) + eigenvectors = Matrix.cols([v1, v2]) + + return eigenvalues, eigenvectors + + +@func +def _sym_eig2x2(A, dt): + """Compute the eigenvalues and right eigenvectors (Av=lambda v) of a 2x2 real symmetric matrix. + + Mathematical concept refers to https://en.wikipedia.org/wiki/Eigendecomposition_of_a_matrix. + + Args: + A (ti.Matrix(2, 2)): input 2x2 symmetric matrix `A`. + dt (DataType): date type of elements in matrix `A`, typically accepts ti.f32 or ti.f64. + + Returns: + eigenvalues (ti.Vector(2)): The eigenvalues. Each entry store one eigen value. + eigenvectors (ti.Matrix(2, 2)): The eigenvectors. Each column stores one eigenvector. + """ + assert all(A == A.transpose()), "A needs to be symmetric" + tr = A.trace() + det = A.determinant() + gap = tr**2 - 4 * det + lambda1 = (tr + ops.sqrt(gap)) * 0.5 + lambda2 = (tr - ops.sqrt(gap)) * 0.5 + eigenvalues = Vector([lambda1, lambda2], dt=dt) + + A1 = A - lambda1 * Matrix.identity(dt, 2) + A2 = A - lambda2 * Matrix.identity(dt, 2) + v1 = Vector.zero(dt, 2) + v2 = Vector.zero(dt, 2) + if all(A1 == Matrix.zero(dt, 2, 2)) and all(A1 == Matrix.zero(dt, 2, 2)): + v1 = Vector([0.0, 1.0]).cast(dt) + v2 = Vector([1.0, 0.0]).cast(dt) + else: + v1 = Vector([A2[0, 0], A2[1, 0]], dt=dt).normalized() + v2 = Vector([A1[0, 0], A1[1, 0]], dt=dt).normalized() + eigenvectors = Matrix.cols([v1, v2]) + return eigenvalues, eigenvectors + + +@func +def dsytrd3(A, Q, dt): + Q[0, 0] = 1.0 + Q[1, 1] = 1.0 + Q[2, 2] = 1.0 + e = Vector([0.0, 0.0, 0.0], dt=dt) + u = Vector([0.0, 0.0, 0.0], dt=dt) + q = Vector([0.0, 0.0, 0.0], dt=dt) + d = Vector([0.0, 0.0, 0.0], dt=dt) + h = A[0, 1] ** 2 + A[0, 2] ** 2 + g = 0.0 + if A[0, 1] > 0: + g = -ops.sqrt(h) + else: + g = ops.sqrt(h) + e[0] = g + f = g * A[0, 1] + u[1] = A[0, 1] - g + u[2] = A[0, 2] + omega = h - f + if omega > 0.0: + omega = 1.0 / omega + K = 0.0 + f = A[1, 1] * u[1] + A[1, 2] * u[2] + q[1] = omega * f # p + K += u[1] * f # u* A u + + f = A[1, 2] * u[1] + A[2, 2] * u[2] + q[2] = omega * f # p + K += u[2] * f # u* A u + + K *= 0.5 * omega * omega + + q[1] = q[1] - K * u[1] + q[2] = q[2] - K * u[2] + + d[0] = A[0, 0] + d[1] = A[1, 1] - 2.0 * q[1] * u[1] + d[2] = A[2, 2] - 2.0 * q[2] * u[2] + + for j in range(1, 3): + f = omega * u[j] + for i in range(1, 3): + Q[i, j] = Q[i, j] - f * u[i] + + # Calculate updated A[1, 2] and store it in e[1] + e[1] = A[1, 2] - q[1] * u[2] - u[1] * q[2] + else: + d[0] = A[0, 0] + d[1] = A[1, 1] + d[2] = A[2, 2] + e[1] = A[1, 2] + return d, e, Q + + +@func +def dsyevq3(A, Q, w, dt): + w, e, Q = dsytrd3(A, Q, dt) + for l in range(0, 2): + nIter = 0 + while True: + # Check for convergence and exit iteration loop if off-diagonal + # element e(l) is zero + m = 0 + for i in range(l, 2): + m = i + g = ops.abs(w[m]) + ops.abs(w[m + 1]) + if ops.abs(e[m]) + g == g: + break + if m == l: + break + + nIter += 1 + assert nIter <= 30, "Timeout" + + # Calculate g = d_m - k + g = (w[l + 1] - w[l]) / (e[l] + e[l]) + r = ops.sqrt(g * g + 1.0) + if g > 0: + g = w[m] - w[l] + e[l] / (g + r) + else: + g = w[m] - w[l] + e[l] / (g - r) + + s = c = 1.0 + p = 0.0 + i = m - 1 + while i >= l: + f = s * e[i] + b = c * e[i] + if ops.abs(f) > ops.abs(g): + c = g / f + r = ops.sqrt(c * c + 1.0) + e[i + 1] = f * r + s = 1.0 / r + c *= s + else: + s = f / g + r = ops.sqrt(s * s + 1.0) + e[i + 1] = g * r + c = 1.0 / r + s *= c + + g = w[i + 1] - p + r = (w[i] - g) * s + 2.0 * c * b + p = s * r + w[i + 1] = g + p + g = c * r - b + + for k in range(0, 3): + t = Q[k, i + 1] + Q[k, i + 1] = s * Q[k, i] + c * t + Q[k, i] = c * Q[k, i] - s * t + + i -= 1 + w[l] -= p + e[l] = g + e[m] = 0.0 + return Q, w + + +@func +def _sym_eig3x3(A, dt): + """Compute the eigenvalues and right eigenvectors (Av=lambda v) of a 3x3 real symmetric matrix using Cardano's method. + + Mathematical concept refers to https://www.mpi-hd.mpg.de/personalhomes/globes/3x3/. + + Args: + A (ti.Matrix(3, 3)): input 3x3 symmetric matrix `A`. + dt (DataType): date type of elements in matrix `A`, typically accepts ti.f32 or ti.f64. + + Returns: + eigenvalues (ti.Vector(3)): The eigenvalues. Each entry store one eigen value. + eigenvectors (ti.Matrix(3, 3)): The eigenvectors. Each column stores one eigenvector. + """ + assert all(A == A.transpose()), "A needs to be symmetric" + M_SQRT3 = 1.73205080756887729352744634151 + DBL_EPSILON = 2.2204460492503131e-16 + m = A.trace() + dd = A[0, 1] * A[0, 1] + ee = A[1, 2] * A[1, 2] + ff = A[0, 2] * A[0, 2] + c1 = A[0, 0] * A[1, 1] + A[0, 0] * A[2, 2] + A[1, 1] * A[2, 2] - (dd + ee + ff) + c0 = A[2, 2] * dd + A[0, 0] * ee + A[1, 1] * ff - A[0, 0] * A[1, 1] * A[2, 2] - 2.0 * A[0, 2] * A[0, 1] * A[1, 2] + + p = m * m - 3.0 * c1 + q = m * (p - 1.5 * c1) - 13.5 * c0 + sqrt_p = ops.sqrt(ops.abs(p)) + phi = 27.0 * (0.25 * c1 * c1 * (p - c1) + c0 * (q + 6.75 * c0)) + phi = (1.0 / 3.0) * ops.atan2(ops.sqrt(ops.abs(phi)), q) + + c = sqrt_p * ops.cos(phi) + s = (1.0 / M_SQRT3) * sqrt_p * ops.sin(phi) + eigenvalues = Vector([0.0, 0.0, 0.0], dt=dt) + eigenvalues_final = Vector([0.0, 0.0, 0.0], dt=dt) + eigenvalues[1] = (1.0 / 3.0) * (m - c) + eigenvalues[2] = eigenvalues[1] + s + eigenvalues[0] = eigenvalues[1] + c + eigenvalues[1] = eigenvalues[1] - s + + t = ops.abs(eigenvalues[0]) + u = ops.abs(eigenvalues[1]) + if u > t: + t = u + u = ops.abs(eigenvalues[2]) + if u > t: + t = u + if t < 1.0: + u = t + else: + u = t * t + error = 256.0 * DBL_EPSILON * u * u + Q = Matrix.zero(dt, 3, 3) + Q_final = Matrix.zero(dt, 3, 3) + Q[0, 1] = A[0, 1] * A[1, 2] - A[0, 2] * A[1, 1] + Q[1, 1] = A[0, 2] * A[0, 1] - A[1, 2] * A[0, 0] + Q[2, 1] = A[0, 1] * A[0, 1] + + Q[0, 0] = Q[0, 1] + A[0, 2] * eigenvalues[0] + Q[1, 0] = Q[1, 1] + A[1, 2] * eigenvalues[0] + Q[2, 0] = (A[0, 0] - eigenvalues[0]) * (A[1, 1] - eigenvalues[0]) - Q[2, 1] + norm = Q[0, 0] * Q[0, 0] + Q[1, 0] * Q[1, 0] + Q[2, 0] * Q[2, 0] + early_ret = 0 + if norm <= error: + Q_final, eigenvalues_final = dsyevq3(A, Q, eigenvalues, dt) + early_ret = 1 + else: + norm = ops.sqrt(1.0 / norm) + Q[0, 0] *= norm + Q[1, 0] *= norm + Q[2, 0] *= norm + + if not early_ret: + Q[0, 1] = Q[0, 1] + A[0, 2] * eigenvalues[1] + Q[1, 1] = Q[1, 1] + A[1, 2] * eigenvalues[1] + Q[2, 1] = (A[0, 0] - eigenvalues[1]) * (A[1, 1] - eigenvalues[1]) - Q[2, 1] + norm = Q[0, 1] * Q[0, 1] + Q[1, 1] * Q[1, 1] + Q[2, 1] * Q[2, 1] + if norm <= error: + Q_final, eigenvalues_final = dsyevq3(A, Q, eigenvalues, dt) + early_ret = 1 + else: + norm = ops.sqrt(1.0 / norm) + Q[0, 1] *= norm + Q[1, 1] *= norm + Q[2, 1] *= norm + + Q[0, 2] = Q[1, 0] * Q[2, 1] - Q[2, 0] * Q[1, 1] + Q[1, 2] = Q[2, 0] * Q[0, 1] - Q[0, 0] * Q[2, 1] + Q[2, 2] = Q[0, 0] * Q[1, 1] - Q[1, 0] * Q[0, 1] + + if early_ret: + Q = Q_final + eigenvalues = eigenvalues_final + + if eigenvalues[1] < eigenvalues[0]: + tmp = eigenvalues[0] + eigenvalues[0] = eigenvalues[1] + eigenvalues[1] = tmp + tmp2 = Q[:, 0] + Q[:, 0] = Q[:, 1] + Q[:, 1] = tmp2 + + if eigenvalues[2] < eigenvalues[0]: + tmp = eigenvalues[0] + eigenvalues[0] = eigenvalues[2] + eigenvalues[2] = tmp + tmp2 = Q[:, 0] + Q[:, 0] = Q[:, 2] + Q[:, 2] = tmp2 + + if eigenvalues[2] < eigenvalues[1]: + tmp = eigenvalues[1] + eigenvalues[1] = eigenvalues[2] + eigenvalues[2] = tmp + tmp2 = Q[:, 1] + Q[:, 1] = Q[:, 2] + Q[:, 2] = tmp2 + + return eigenvalues, Q + + +def polar_decompose(A, dt=None): + """Perform polar decomposition (A=UP) for arbitrary size matrix. + + Mathematical concept refers to https://en.wikipedia.org/wiki/Polar_decomposition. + + Args: + A (ti.Matrix(n, n)): input nxn matrix `A`. + dt (DataType): date type of elements in matrix `A`, typically accepts ti.f32 or ti.f64. + + Returns: + Decomposed nxn matrices `U` and `P`. + """ + if dt is None: + dt = impl.get_runtime().default_fp + if A.n == 2: + return _polar_decompose2d(A, dt) + if A.n == 3: + return _polar_decompose3d(A, dt) + raise Exception("Polar decomposition only supports 2D and 3D matrices.") + + +def svd(A, dt=None): + """Perform singular value decomposition (A=USV^T) for arbitrary size matrix. + + Mathematical concept refers to https://en.wikipedia.org/wiki/Singular_value_decomposition. + + Args: + A (ti.Matrix(n, n)): input nxn matrix `A`. + dt (DataType): date type of elements in matrix `A`, typically accepts ti.f32 or ti.f64. + + Returns: + Decomposed nxn matrices `U`, 'S' and `V`. + """ + if dt is None: + dt = impl.get_runtime().default_fp + if A.n == 2: + return _svd2d(A, dt) + if A.n == 3: + return _svd3d(A, dt) + raise Exception("SVD only supports 2D and 3D matrices.") + + +def eig(A, dt=None): + """Compute the eigenvalues and right eigenvectors of a real matrix. + + Mathematical concept refers to https://en.wikipedia.org/wiki/Eigendecomposition_of_a_matrix. + + Args: + A (ti.Matrix(n, n)): 2D Matrix for which the eigenvalues and right eigenvectors will be computed. + dt (DataType): The datatype for the eigenvalues and right eigenvectors. + + Returns: + eigenvalues (ti.Matrix(n, 2)): The eigenvalues in complex form. Each row stores one eigenvalue. The first number of the eigenvalue represents the real part and the second number represents the imaginary part. + eigenvectors (ti.Matrix(n*2, n)): The eigenvectors in complex form. Each column stores one eigenvector. Each eigenvector consists of n entries, each of which is represented by two numbers for its real part and imaginary part. + """ + if dt is None: + dt = impl.get_runtime().default_fp + if A.n == 2: + return _eig2x2(A, dt) + raise Exception("Eigen solver only supports 2D matrices.") + + +def sym_eig(A, dt=None): + """Compute the eigenvalues and right eigenvectors of a real symmetric matrix. + + Mathematical concept refers to https://en.wikipedia.org/wiki/Eigendecomposition_of_a_matrix. + + Args: + A (ti.Matrix(n, n)): Symmetric Matrix for which the eigenvalues and right eigenvectors will be computed. + dt (DataType): The datatype for the eigenvalues and right eigenvectors. + + Returns: + eigenvalues (ti.Vector(n)): The eigenvalues. Each entry store one eigen value. + eigenvectors (ti.Matrix(n, n)): The eigenvectors. Each column stores one eigenvector. + """ + if dt is None: + dt = impl.get_runtime().default_fp + if A.n == 2: + return _sym_eig2x2(A, dt) + if A.n == 3: + return _sym_eig3x3(A, dt) + raise Exception("Symmetric eigen solver only supports 2D and 3D matrices.") + + +@func +def _gauss_elimination_2x2(Ab, dt): + if ops.abs(Ab[0, 0]) < ops.abs(Ab[1, 0]): + Ab[0, 0], Ab[1, 0] = Ab[1, 0], Ab[0, 0] + Ab[0, 1], Ab[1, 1] = Ab[1, 1], Ab[0, 1] + Ab[0, 2], Ab[1, 2] = Ab[1, 2], Ab[0, 2] + assert Ab[0, 0] != 0.0, "Matrix is singular in linear solve." + scale = Ab[1, 0] / Ab[0, 0] + Ab[1, 0] = 0.0 + for k in static(range(1, 3)): + Ab[1, k] -= Ab[0, k] * scale + x = Vector.zero(dt, 2) + # Back substitution + x[1] = Ab[1, 2] / Ab[1, 1] + x[0] = (Ab[0, 2] - Ab[0, 1] * x[1]) / Ab[0, 0] + return x + + +@func +def _gauss_elimination_3x3(Ab, dt): + for i in static(range(3)): + max_row = i + max_v = ops.abs(Ab[i, i]) + for j in static(range(i + 1, 3)): + if ops.abs(Ab[j, i]) > max_v: + max_row = j + max_v = ops.abs(Ab[j, i]) + assert max_v != 0.0, "Matrix is singular in linear solve." + if i != max_row: + if max_row == 1: + for col in static(range(4)): + Ab[i, col], Ab[1, col] = Ab[1, col], Ab[i, col] + else: + for col in static(range(4)): + Ab[i, col], Ab[2, col] = Ab[2, col], Ab[i, col] + assert Ab[i, i] != 0.0, "Matrix is singular in linear solve." + for j in static(range(i + 1, 3)): + scale = Ab[j, i] / Ab[i, i] + Ab[j, i] = 0.0 + for k in static(range(i + 1, 4)): + Ab[j, k] -= Ab[i, k] * scale + # Back substitution + x = Vector.zero(dt, 3) + for i in static(range(2, -1, -1)): + x[i] = Ab[i, 3] + for k in static(range(i + 1, 3)): + x[i] -= Ab[i, k] * x[k] + x[i] = x[i] / Ab[i, i] + return x + + +@func +def _combine(A, b, dt): + n = static(A.n) + Ab = Matrix.zero(dt, n, n + 1) + for i in static(range(n)): + for j in static(range(n)): + Ab[i, j] = A[i, j] + for i in static(range(n)): + Ab[i, n] = b[i] + return Ab + + +def solve(A, b, dt=None): + """Solve a matrix using Gauss elimination method. + + Args: + A (ti.Matrix(n, n)): input nxn matrix `A`. + b (ti.Vector(n, 1)): input nx1 vector `b`. + dt (DataType): The datatype for the `A` and `b`. + + Returns: + x (ti.Vector(n, 1)): the solution of Ax=b. + """ + assert A.n == A.m, "Only square matrix is supported" + assert A.n >= 2 and A.n <= 3, "Only 2D and 3D matrices are supported" + assert A.m == b.n, "Matrix and Vector dimension dismatch" + if dt is None: + dt = impl.get_runtime().default_fp + Ab = _combine(A, b, dt) + if A.n == 2: + return _gauss_elimination_2x2(Ab, dt) + if A.n == 3: + return _gauss_elimination_3x3(Ab, dt) + raise Exception("Solver only supports 2D and 3D matrices.") + + +@func +def field_fill_taichi_scope(F: template(), val: template()): + for I in grouped(F): + F[I] = val + + +__all__ = ["randn", "polar_decompose", "eig", "sym_eig", "svd", "solve"] diff --git a/local_packages/taichi/_kernels.py b/local_packages/taichi/_kernels.py new file mode 100644 index 0000000..7eddf53 --- /dev/null +++ b/local_packages/taichi/_kernels.py @@ -0,0 +1,419 @@ +from taichi._funcs import field_fill_taichi_scope +from taichi._lib.utils import get_os_name +from taichi.lang import ops +from taichi.lang._ndrange import ndrange +from taichi.lang.enums import Format +from taichi.lang.expr import Expr +from taichi.lang.field import ScalarField +from taichi.lang.impl import grouped, static, static_assert +from taichi.lang.kernel_impl import func, kernel +from taichi.lang.misc import loop_config +from taichi.lang.simt import block, warp +from taichi.lang.snode import deactivate +from taichi.types import ndarray_type, texture_type, vector +from taichi.types.annotations import template +from taichi.types.primitive_types import f16, f32, f64, i32, u8 + +from taichi.math import vec3 + + +# A set of helper (meta)functions +@kernel +def fill_field(field: template(), val: template()): + value = ops.cast(val, field.dtype) + for I in grouped(field): + field[I] = value + + +@kernel +def fill_ndarray(ndarray: ndarray_type.ndarray(), val: template()): + for I in grouped(ndarray): + ndarray[I] = val + + +@kernel +def fill_ndarray_matrix(ndarray: ndarray_type.ndarray(), val: template()): + for I in grouped(ndarray): + ndarray[I] = val + + +@kernel +def tensor_to_ext_arr(tensor: template(), arr: ndarray_type.ndarray()): + # default value of offset is [], replace it with [0] * len + offset = static(tensor.snode.ptr.offset if len(tensor.snode.ptr.offset) != 0 else [0] * len(tensor.shape)) + + for I in grouped(tensor): + arr[I - offset] = tensor[I] + + +@kernel +def ndarray_to_ext_arr(ndarray: ndarray_type.ndarray(), arr: ndarray_type.ndarray()): + for I in grouped(ndarray): + arr[I] = ndarray[I] + + +@kernel +def ndarray_matrix_to_ext_arr( + ndarray: ndarray_type.ndarray(), + arr: ndarray_type.ndarray(), + layout_is_aos: template(), + as_vector: template(), +): + for I in grouped(ndarray): + for p in static(range(ndarray[I].n)): + if static(as_vector): + if static(layout_is_aos): + arr[I, p] = ndarray[I][p] + else: + arr[p, I] = ndarray[I][p] + else: + for q in static(range(ndarray[I].m)): + if static(layout_is_aos): + arr[I, p, q] = ndarray[I][p, q] + else: + arr[p, q, I] = ndarray[I][p, q] + + +@kernel +def vector_to_fast_image(img: template(), out: ndarray_type.ndarray()): + static_assert(len(img.shape) == 2) + offset = static(img.snode.ptr.offset if len(img.snode.ptr.offset) != 0 else [0, 0]) + i_offset = static(offset[0]) + j_offset = static(offset[1]) + # FIXME: Why is ``for i, j in img:`` slower than: + for i, j in ndrange(*img.shape): + r, g, b = 0, 0, 0 + color = img[i + i_offset, (img.shape[1] + j_offset) - 1 - j] + if static(img.dtype in [f16, f32, f64]): + r, g, b = ops.min(255, ops.max(0, int(color * 255)))[:3] + else: + static_assert(img.dtype == u8) + r, g, b = color[:3] + + idx = j * img.shape[0] + i + # We use i32 for |out| since OpenGL and Metal doesn't support u8 types + if static(get_os_name() != "osx"): + out[idx] = (r << 16) + (g << 8) + b + else: + # What's -16777216? + # + # On Mac, we need to set the alpha channel to 0xff. Since Mac's GUI + # is big-endian, the color is stored in ABGR order, and we need to + # add 0xff000000, which is -16777216 in I32's legit range. (Albeit + # the clarity, adding 0xff000000 doesn't work.) + alpha = -16777216 + out[idx] = (b << 16) + (g << 8) + r + alpha + + +@kernel +def tensor_to_image(tensor: template(), arr: ndarray_type.ndarray()): + # default value of offset is [], replace it with [0] * len + offset = static(tensor.snode.ptr.offset if len(tensor.snode.ptr.offset) != 0 else [0] * len(tensor.shape)) + for I in grouped(tensor): + t = ops.cast(tensor[I], f32) + arr[I - offset, 0] = t + arr[I - offset, 1] = t + arr[I - offset, 2] = t + + +@kernel +def vector_to_image(mat: template(), arr: ndarray_type.ndarray()): + # default value of offset is [], replace it with [0] * len + offset = static(mat.snode.ptr.offset if len(mat.snode.ptr.offset) != 0 else [0] * len(mat.shape)) + for I in grouped(mat): + for p in static(range(mat.n)): + arr[I - offset, p] = ops.cast(mat[I][p], f32) + if static(mat.n <= 2): + arr[I - offset, 2] = 0 + + +@kernel +def tensor_to_tensor(tensor: template(), other: template()): + static_assert(tensor.shape == other.shape) + shape = static(tensor.shape) + tensor_offset = static(tensor.snode.ptr.offset if len(tensor.snode.ptr.offset) != 0 else [0] * len(shape)) + other_offset = static(other.snode.ptr.offset if len(other.snode.ptr.offset) != 0 else [0] * len(shape)) + + for I in grouped(ndrange(*shape)): + tensor[I + tensor_offset] = other[I + other_offset] + + +@kernel +def ext_arr_to_tensor(arr: ndarray_type.ndarray(), tensor: template()): + # default value of offset is [], replace it with [0] * len + offset = static(tensor.snode.ptr.offset if len(tensor.snode.ptr.offset) != 0 else [0] * len(tensor.shape)) + for I in grouped(tensor): + tensor[I] = arr[I - offset] + + +@kernel +def ndarray_to_ndarray(ndarray: ndarray_type.ndarray(), other: ndarray_type.ndarray()): + for I in grouped(ndarray): + ndarray[I] = other[I] + + +@kernel +def ext_arr_to_ndarray(arr: ndarray_type.ndarray(), ndarray: ndarray_type.ndarray()): + for I in grouped(ndarray): + ndarray[I] = arr[I] + + +@kernel +def ext_arr_to_ndarray_matrix( + arr: ndarray_type.ndarray(), + ndarray: ndarray_type.ndarray(), + layout_is_aos: template(), + as_vector: template(), +): + for I in grouped(ndarray): + for p in static(range(ndarray[I].n)): + if static(as_vector): + if static(layout_is_aos): + ndarray[I][p] = arr[I, p] + else: + ndarray[I][p] = arr[p, I] + else: + for q in static(range(ndarray[I].m)): + if static(layout_is_aos): + ndarray[I][p, q] = arr[I, p, q] + else: + ndarray[I][p, q] = arr[p, q, I] + + +@kernel +def matrix_to_ext_arr(mat: template(), arr: ndarray_type.ndarray(), as_vector: template()): + # default value of offset is [], replace it with [0] * len + offset = static(mat.snode.ptr.offset if len(mat.snode.ptr.offset) != 0 else [0] * len(mat.shape)) + + for I in grouped(mat): + for p in static(range(mat.n)): + for q in static(range(mat.m)): + if static(as_vector): + if static(getattr(mat, "ndim", 2) == 1): + arr[I - offset, p] = mat[I][p] + else: + arr[I - offset, p] = mat[I][p, q] + else: + if static(getattr(mat, "ndim", 2) == 1): + arr[I - offset, p, q] = mat[I][p] + else: + arr[I - offset, p, q] = mat[I][p, q] + + +@kernel +def ext_arr_to_matrix(arr: ndarray_type.ndarray(), mat: template(), as_vector: template()): + # default value of offset is [], replace it with [0] * len + offset = static(mat.snode.ptr.offset if len(mat.snode.ptr.offset) != 0 else [0] * len(mat.shape)) + + for I in grouped(mat): + for p in static(range(mat.n)): + for q in static(range(mat.m)): + if static(getattr(mat, "ndim", 2) == 1): + if static(as_vector): + mat[I][p] = arr[I - offset, p] + else: + mat[I][p] = arr[I - offset, p, q] + else: + if static(as_vector): + mat[I][p, q] = arr[I - offset, p] + else: + mat[I][p, q] = arr[I - offset, p, q] + + +# extract ndarray of raw vulkan memory layout to normal memory layout. +# the vulkan layout stored in ndarray : width-by-width stored along n- +# darray's shape[1] which is the height-axis(So use [size // h, size % +# h]). And the height-order of vulkan layout is flip up-down.(So take +# [size = (h - 1 - j) * w + i] to get the index) +@kernel +def arr_vulkan_layout_to_arr_normal_layout(vk_arr: ndarray_type.ndarray(), normal_arr: ndarray_type.ndarray()): + static_assert(len(normal_arr.shape) == 2) + w = normal_arr.shape[0] + h = normal_arr.shape[1] + for i, j in ndrange(w, h): + normal_arr[i, j] = vk_arr[(h - 1 - j) * w + i] + + +# extract ndarray of raw vulkan memory layout into a taichi-field data +# structure with normal memory layout. +@kernel +def arr_vulkan_layout_to_field_normal_layout(vk_arr: ndarray_type.ndarray(), normal_field: template()): + static_assert(len(normal_field.shape) == 2) + w = static(normal_field.shape[0]) + h = static(normal_field.shape[1]) + offset = static(normal_field.snode.ptr.offset if len(normal_field.snode.ptr.offset) != 0 else [0, 0]) + i_offset = static(offset[0]) + j_offset = static(offset[1]) + + for i, j in ndrange(w, h): + normal_field[i + i_offset, j + j_offset] = vk_arr[(h - 1 - j) * w + i] + + +@kernel +def clear_gradients(_vars: template()): + for I in grouped(ScalarField(Expr(_vars[0]))): + for s in static(_vars): + ScalarField(Expr(s))[I] = ops.cast(0, dtype=s.get_dt()) + + +@kernel +def field_fill_python_scope(F: template(), val: template()): + field_fill_taichi_scope(F, val) + + +@kernel +def snode_deactivate(b: template()): + for I in grouped(b): + deactivate(b, I) + + +@kernel +def snode_deactivate_dynamic(b: template()): + for I in grouped(b.parent()): + deactivate(b, I) + + +@kernel +def load_texture_from_numpy( + tex: texture_type.rw_texture(num_dimensions=2, fmt=Format.rgba8, lod=0), + img: ndarray_type.ndarray(dtype=vec3, ndim=2), +): + for i, j in img: + tex.store( + vector(2, i32)([i, j]), + vector(4, f32)([img[i, j][0], img[i, j][1], img[i, j][2], 0]) / 255.0, + ) + + +@kernel +def save_texture_to_numpy( + tex: texture_type.rw_texture(num_dimensions=2, fmt=Format.rgba8, lod=0), + img: ndarray_type.ndarray(dtype=vec3, ndim=2), +): + for i, j in img: + img[i, j] = ops.round(tex.load(vector(2, i32)([i, j])).rgb * 255) + + +# Odd-even merge sort +@kernel +def sort_stage( + keys: template(), + use_values: int, + values: template(), + N: int, + p: int, + k: int, + invocations: int, +): + keys_offset = static(keys.snode.ptr.offset if len(keys.snode.ptr.offset) != 0 else 0) + values_offset = static(values.snode.ptr.offset if len(values.snode.ptr.offset) != 0 else 0) + for inv in range(invocations): + j = k % p + inv * 2 * k + for i in range(0, ops.min(k, N - j - k)): + a = i + j + b = i + j + k + if int(a / (p * 2)) == int(b / (p * 2)): + key_a = keys[a + keys_offset] + key_b = keys[b + keys_offset] + if key_a > key_b: + keys[a + keys_offset] = key_b + keys[b + keys_offset] = key_a + if use_values != 0: + temp = values[a + values_offset] + values[a + values_offset] = values[b + values_offset] + values[b + values_offset] = temp + + +# Parallel Prefix Sum (Scan) +@func +def warp_shfl_up_i32(val: template()): + global_tid = block.global_thread_idx() + WARP_SZ = 32 + lane_id = global_tid % WARP_SZ + # Intra-warp scan, manually unrolled + offset_j = 1 + n = warp.shfl_up_i32(warp.active_mask(), val, offset_j) + if lane_id >= offset_j: + val += n + offset_j = 2 + n = warp.shfl_up_i32(warp.active_mask(), val, offset_j) + if lane_id >= offset_j: + val += n + offset_j = 4 + n = warp.shfl_up_i32(warp.active_mask(), val, offset_j) + if lane_id >= offset_j: + val += n + offset_j = 8 + n = warp.shfl_up_i32(warp.active_mask(), val, offset_j) + if lane_id >= offset_j: + val += n + offset_j = 16 + n = warp.shfl_up_i32(warp.active_mask(), val, offset_j) + if lane_id >= offset_j: + val += n + return val + + +@kernel +def scan_add_inclusive( + arr_in: template(), + in_beg: i32, + in_end: i32, + single_block: template(), + inclusive_add: template(), +): + WARP_SZ = 32 + BLOCK_SZ = 64 + loop_config(block_dim=64) + for i in range(in_beg, in_end): + val = arr_in[i] + + thread_id = i % BLOCK_SZ + block_id = int((i - in_beg) // BLOCK_SZ) + lane_id = thread_id % WARP_SZ + warp_id = thread_id // WARP_SZ + + pad_shared = block.SharedArray((65,), i32) + + val = inclusive_add(val) + block.sync() + + # Put warp scan results to smem + # TODO replace smem with real smem when available + if thread_id % WARP_SZ == WARP_SZ - 1: + pad_shared[warp_id] = val + block.sync() + + # Inter-warp scan, use the first thread in the first warp + if warp_id == 0 and lane_id == 0: + for k in range(1, BLOCK_SZ / WARP_SZ): + pad_shared[k] += pad_shared[k - 1] + block.sync() + + # Update data with warp sums + warp_sum = 0 + if warp_id > 0: + warp_sum = pad_shared[warp_id - 1] + val += warp_sum + arr_in[i] = val + + # Update partial sums except the final block + if not single_block and (thread_id == BLOCK_SZ - 1): + arr_in[in_end + block_id] = val + + +@kernel +def uniform_add(arr_in: template(), in_beg: i32, in_end: i32): + BLOCK_SZ = 64 + loop_config(block_dim=64) + for i in range(in_beg + BLOCK_SZ, in_end): + block_id = int((i - in_beg) // BLOCK_SZ) + arr_in[i] += arr_in[in_end + block_id - 1] + + +@kernel +def blit_from_field_to_field(dst: template(), src: template(), offset: i32, size: i32): + dst_offset = static(dst.snode.ptr.offset if len(dst.snode.ptr.offset) != 0 else 0) + src_offset = static(src.snode.ptr.offset if len(src.snode.ptr.offset) != 0 else 0) + for i in range(size): + dst[i + dst_offset + offset] = src[i + src_offset] diff --git a/local_packages/taichi/_lib/__init__.py b/local_packages/taichi/_lib/__init__.py new file mode 100644 index 0000000..ae8858c --- /dev/null +++ b/local_packages/taichi/_lib/__init__.py @@ -0,0 +1 @@ +from taichi._lib.utils import ti_python_core as core diff --git a/local_packages/taichi/_lib/c_api/bin/taichi_c_api.dll b/local_packages/taichi/_lib/c_api/bin/taichi_c_api.dll new file mode 100644 index 0000000..8d3a22c Binary files /dev/null and b/local_packages/taichi/_lib/c_api/bin/taichi_c_api.dll differ diff --git a/local_packages/taichi/_lib/c_api/include/taichi/cpp/taichi.hpp b/local_packages/taichi/_lib/c_api/include/taichi/cpp/taichi.hpp new file mode 100644 index 0000000..d8bd646 --- /dev/null +++ b/local_packages/taichi/_lib/c_api/include/taichi/cpp/taichi.hpp @@ -0,0 +1,1401 @@ +// C++ wrapper of Taichi C-API +#pragma once +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace ti { + +struct Version { + uint32_t version; + + explicit Version(uint32_t version) : version(version) { + } + Version(uint32_t major, uint32_t minor, uint32_t patch) + : version((major * 1000 + minor) * 1000 + patch) { + } + Version(const Version &) = default; + Version(Version &&) = default; + Version &operator=(const Version &) = default; + Version &operator=(Version &&) = default; + + inline uint32_t major() const { + return version / 1000000; + } + inline uint32_t minor() const { + return (version / 1000) % 1000; + } + inline uint32_t patch() const { + return version % 1000; + } +}; +inline Version get_version() { + return Version(ti_get_version()); +} + +inline std::vector get_available_archs() { + uint32_t narch = 0; + ti_get_available_archs(&narch, nullptr); + std::vector archs(narch); + ti_get_available_archs(&narch, archs.data()); + return archs; +} +inline std::vector get_available_archs( + const std::vector &expect_archs) { + std::vector actual_archs = get_available_archs(); + std::vector out_archs; + for (TiArch arch : actual_archs) { + auto it = std::find(expect_archs.begin(), expect_archs.end(), arch); + if (it != expect_archs.end()) { + out_archs.emplace_back(arch); + } + } + return out_archs; +} +inline bool is_arch_available(TiArch arch) { + std::vector archs = get_available_archs(); + for (size_t i = 0; i < archs.size(); ++i) { + if (archs.at(i) == arch) { + return true; + } + } + return false; +} + +struct Error { + TiError error; + std::string message; + + inline operator TiError() const { // NOLINT + return error; + } +}; + +inline Error get_last_error() { + uint64_t message_size = 0; + ti_get_last_error(&message_size, nullptr); + std::string message(message_size, '\0'); + TiError error = ti_get_last_error(&message_size, (char *)message.data()); + message.resize(message.size() - 1); + return Error{error, message}; +} +inline void check_last_error() { + Error error = get_last_error(); + if (error != TI_ERROR_SUCCESS) { +#ifdef TI_WITH_EXCEPTIONS + throw std::runtime_error(error.message); +#else + assert(false); +#endif // TI_WITH_EXCEPTIONS + } +} +inline void set_last_error(TiError error) { + ti_set_last_error(error, nullptr); +} +inline void set_last_error(TiError error, const std::string &message) { + ti_set_last_error(error, message.c_str()); +} +inline void set_last_error(const Error &error) { + set_last_error(error.error, error.message); +} + +namespace detail { + +// Template type to data type enum. +template +struct templ2dtype {}; +template <> +struct templ2dtype { + static const TiDataType value = TI_DATA_TYPE_I8; +}; +template <> +struct templ2dtype { + static const TiDataType value = TI_DATA_TYPE_I16; +}; +template <> +struct templ2dtype { + static const TiDataType value = TI_DATA_TYPE_I32; +}; +template <> +struct templ2dtype { + static const TiDataType value = TI_DATA_TYPE_U8; +}; +template <> +struct templ2dtype { + static const TiDataType value = TI_DATA_TYPE_U16; +}; +template <> +struct templ2dtype { + static const TiDataType value = TI_DATA_TYPE_U32; +}; +template <> +struct templ2dtype { + static const TiDataType value = TI_DATA_TYPE_F32; +}; +template <> +struct templ2dtype { + static const TiDataType value = TI_DATA_TYPE_F64; +}; + +template +T exchange(T &storage, U &&value) { + T out = std::move(storage); + storage = (T)std::move(value); + return out; +} + +template +THandle move_handle(THandle &handle) { + THandle out = std::move(handle); + handle = TI_NULL_HANDLE; + return out; +} + +} // namespace detail + +class MemorySlice { + TiRuntime runtime_{TI_NULL_HANDLE}; + TiMemorySlice slice_{}; + + public: + MemorySlice() = default; + MemorySlice(TiRuntime runtime, const TiMemorySlice &slice) + : runtime_(runtime), slice_(slice) { + } + MemorySlice(const MemorySlice &) = default; + MemorySlice(MemorySlice &&) = default; + MemorySlice &operator=(const MemorySlice &) = default; + MemorySlice &operator=(MemorySlice &&) = default; + + inline void copy_to(const MemorySlice &dst) const { + if (runtime_ != dst.runtime_) { + ti_set_last_error( + TI_ERROR_INVALID_ARGUMENT, + "cannot copy device memory between different runtime instances"); + return; + } + if (slice_.size != dst.slice_.size) { + ti_set_last_error( + TI_ERROR_INVALID_ARGUMENT, + "copy source and destination slice must have the same size"); + return; + } + ti_copy_memory_device_to_device(runtime_, &dst.slice_, &slice_); + } + + inline TiMemory memory() const { + return slice_.memory; + } + inline uint64_t offset() const { + return slice_.offset; + } + inline uint64_t size() const { + return slice_.size; + } + inline const TiMemorySlice &slice() const { + return slice_; + } + inline operator const TiMemorySlice &() const { // NOLINT + return slice_; + } +}; + +class Memory { + protected: + TiRuntime runtime_{TI_NULL_HANDLE}; + TiMemory memory_{TI_NULL_HANDLE}; + size_t size_{0}; + bool should_destroy_{false}; + + public: + constexpr bool is_valid() const { + return runtime_ != nullptr; + } + inline void destroy() { + if (should_destroy_) { + ti_free_memory(runtime_, memory_); + memory_ = TI_NULL_HANDLE; + should_destroy_ = false; + } + } + + Memory() { + } + Memory(const Memory &) = delete; + Memory(Memory &&b) + : runtime_(detail::move_handle(b.runtime_)), + memory_(detail::move_handle(b.memory_)), + size_(detail::exchange(b.size_, 0)), + should_destroy_(detail::exchange(b.should_destroy_, false)) { + } + Memory(TiRuntime runtime, TiMemory memory, size_t size, bool should_destroy) + : runtime_(runtime), + memory_(memory), + size_(size), + should_destroy_(should_destroy) { + } + ~Memory() { + destroy(); + } + + Memory &operator=(const Memory &) = delete; + Memory &operator=(Memory &&b) { + destroy(); + runtime_ = detail::move_handle(b.runtime_); + memory_ = detail::move_handle(b.memory_); + size_ = detail::exchange(b.size_, 0); + should_destroy_ = detail::exchange(b.should_destroy_, false); + return *this; + } + + inline Memory borrow() const { + return Memory(runtime_, memory_, size_, false); + } + + void *map() const { + return ti_map_memory(runtime_, memory_); + } + void unmap() const { + ti_unmap_memory(runtime_, memory_); + } + + inline void read(void *dst, size_t size) const { + void *src = map(); + if (src != nullptr) { + std::memcpy(dst, src, size); + } + unmap(); + } + inline void write(const void *src, size_t size) const { + void *dst = map(); + if (dst != nullptr) { + std::memcpy(dst, src, size); + } + unmap(); + } + + inline void copy_to(const ti::Memory &dst) const { + slice().copy_to(dst.slice()); + } + + inline MemorySlice slice(size_t offset, size_t size) const { + if (offset + size > size_) { + ti_set_last_error(TI_ERROR_ARGUMENT_OUT_OF_RANGE, "size"); + return {}; + } + TiMemorySlice slice{}; + slice.memory = memory_; + slice.offset = offset; + slice.size = size; + return MemorySlice(runtime_, slice); + } + inline MemorySlice slice() const { + return slice(0, size_); + } + + constexpr size_t size() const { + return size_; + } + constexpr TiMemory memory() const { + return memory_; + } + constexpr operator TiMemory() const { // NOLINT + return memory_; + } +}; + +template +class NdArray { + protected: + Memory memory_{}; + TiNdArray ndarray_{}; + size_t elem_count_{}; + size_t scalar_count_{}; + + public: + constexpr bool is_valid() const { + return memory_.is_valid(); + } + inline void destroy() { + memory_.destroy(); + } + + NdArray() : elem_count_(1), scalar_count_(1) { + } + NdArray(const NdArray &) = delete; + NdArray(NdArray &&b) + : memory_(std::move(b.memory_)), + ndarray_(detail::exchange(b.ndarray_, TiNdArray{})), + elem_count_(detail::exchange(b.elem_count_, 1)), + scalar_count_(detail::exchange(b.scalar_count_, 1)) { + } + NdArray(Memory &&memory, const TiNdArray &ndarray) + : memory_(std::move(memory)), + ndarray_(ndarray), + elem_count_(1), + scalar_count_(1) { + if (ndarray.memory != memory_) { + ti_set_last_error(TI_ERROR_INVALID_ARGUMENT, "ndarray.memory != memory"); + } + for (uint32_t i = 0; i < ndarray_.shape.dim_count; ++i) { + elem_count_ *= ndarray_.shape.dims[i]; + } + scalar_count_ *= elem_count_; + for (uint32_t i = 0; i < ndarray_.elem_shape.dim_count; ++i) { + scalar_count_ *= ndarray_.elem_shape.dims[i]; + } + } + ~NdArray() { + destroy(); + } + + NdArray &operator=(const NdArray &) = delete; + NdArray &operator=(NdArray &&b) { + destroy(); + memory_ = std::move(b.memory_); + ndarray_ = detail::exchange(b.ndarray_, TiNdArray{}); + elem_count_ = detail::exchange(b.elem_count_, 1); + scalar_count_ = detail::exchange(b.scalar_count_, 1); + return *this; + } + + inline NdArray borrow() const { + return NdArray(memory_.borrow(), ndarray_); + } + + inline void *map() const { + return memory_.map(); + } + inline void unmap() const { + return memory_.unmap(); + } + + inline size_t scalar_count() const { + return scalar_count_; + } + inline size_t elem_count() const { + return elem_count_; + } + + inline void read(T *dst, size_t count) const { + if (count > scalar_count_) { + ti_set_last_error( + TI_ERROR_ARGUMENT_OUT_OF_RANGE, + "ndarray read ouf of range; please ensure you specified the correct " + "number of elements (rather than size-in-bytes) to be read"); + return; + } + memory_.read(dst, count * sizeof(T)); + } + inline void read(std::vector &dst) const { + read(dst.data(), dst.size()); + } + template + inline void read(std::vector &dst) const { + static_assert(sizeof(U) % sizeof(T) == 0, + "sizeof(U) must be a multiple of sizeof(T)"); + read((T *)dst.data(), dst.size() * (sizeof(U) / sizeof(T))); + } + inline void write(const T *src, size_t count) const { + if (count > scalar_count_) { + ti_set_last_error( + TI_ERROR_ARGUMENT_OUT_OF_RANGE, + "ndarray write ouf of range; please ensure you specified the correct " + "number of elements (rather than size-in-bytes) to be written"); + return; + } + memory_.write(src, count * sizeof(T)); + } + inline void write(const std::vector &src) const { + write(src.data(), src.size()); + } + template + inline void write(const std::vector &src) const { + static_assert(sizeof(U) % sizeof(T) == 0, + "sizeof(U) must be a multiple of sizeof(T)"); + write((const T *)src.data(), src.size() * (sizeof(U) / sizeof(T))); + } + + template + inline void copy_to(const ti::NdArray &dst) const { + memory().copy_to(dst.memory()); + } + + inline MemorySlice slice() const { + return memory_.slice(); + } + + constexpr TiDataType elem_type() const { + return ndarray_.elem_type; + } + constexpr const TiNdShape &shape() const { + return ndarray_.shape; + } + constexpr const TiNdShape &elem_shape() const { + return ndarray_.elem_shape; + } + constexpr const Memory &memory() const { + return memory_; + } + constexpr const TiNdArray &ndarray() const { + return ndarray_; + } + constexpr operator TiNdArray() const { // NOLINT + return ndarray_; + } +}; + +class ImageSlice { + TiRuntime runtime_{TI_NULL_HANDLE}; + TiImageSlice slice_{}; + + public: + ImageSlice() = default; + ImageSlice(TiRuntime runtime, const TiImageSlice &slice) + : runtime_(runtime), slice_(slice) { + } + ImageSlice(const ImageSlice &) = default; + ImageSlice(ImageSlice &&) = default; + ImageSlice &operator=(const ImageSlice &) = default; + ImageSlice &operator=(ImageSlice &&) = default; + + inline void copy_to(const ImageSlice &dst) const { + if (runtime_ != dst.runtime_) { + ti_set_last_error( + TI_ERROR_INVALID_ARGUMENT, + "cannot copy device memory between different runtime instances"); + return; + } + if (slice_.extent.width != dst.slice_.extent.width || + slice_.extent.height != dst.slice_.extent.height || + slice_.extent.depth != dst.slice_.extent.depth || + slice_.extent.array_layer_count != + dst.slice_.extent.array_layer_count) { + ti_set_last_error( + TI_ERROR_INVALID_ARGUMENT, + "copy source and destination slice must have the same size"); + return; + } + ti_copy_image_device_to_device(runtime_, &dst.slice_, &slice_); + } + + inline TiImage image() const { + return slice_.image; + } + inline const TiImageOffset &offset() const { + return slice_.offset; + } + inline const TiImageExtent &extent() const { + return slice_.extent; + } + inline const uint32_t &mip_level() const { + return slice_.mip_level; + } + inline const TiImageSlice &slice() const { + return slice_; + } + inline operator TiImageSlice() const { // NOLINT + return slice_; + } +}; + +class Image { + protected: + TiRuntime runtime_{TI_NULL_HANDLE}; + TiImage image_{TI_NULL_HANDLE}; + TiImageDimension dimension_{TI_IMAGE_DIMENSION_MAX_ENUM}; + TiImageExtent extent_{0, 0, 0}; + uint32_t mip_level_count_; + TiFormat format_{TI_FORMAT_UNKNOWN}; + bool should_destroy_{false}; + + public: + constexpr bool is_valid() const { + return image_ != nullptr; + } + inline void destroy() { + if (should_destroy_) { + ti_free_image(runtime_, image_); + image_ = TI_NULL_HANDLE; + should_destroy_ = false; + } + } + + Image() { + } + Image(const Image &b) = delete; + Image(Image &&b) + : runtime_(detail::move_handle(b.runtime_)), + image_(detail::move_handle(b.image_)), + dimension_(detail::exchange(b.dimension_, TI_IMAGE_DIMENSION_MAX_ENUM)), + extent_(detail::exchange(b.extent_, TiImageExtent{0, 0, 0})), + mip_level_count_(detail::exchange(b.mip_level_count_, 0)), + format_(detail::exchange(b.format_, TI_FORMAT_UNKNOWN)), + should_destroy_(detail::exchange(b.should_destroy_, false)) { + } + Image(TiRuntime runtime, + TiImage image, + TiImageDimension dimension, + const TiImageExtent &extent, + uint32_t mip_level_count, + TiFormat format, + bool should_destroy) + : runtime_(runtime), + image_(image), + dimension_(dimension), + extent_(extent), + mip_level_count_(mip_level_count), + format_(format), + should_destroy_(should_destroy) { + } + ~Image() { + destroy(); + } + + Image &operator=(const Image &) = delete; + Image &operator=(Image &&b) { + destroy(); + runtime_ = detail::move_handle(b.runtime_); + image_ = detail::move_handle(b.image_); + dimension_ = detail::exchange(b.dimension_, TI_IMAGE_DIMENSION_MAX_ENUM); + extent_ = detail::exchange(b.extent_, TiImageExtent{0, 0, 0}); + mip_level_count_ = detail::exchange(b.mip_level_count_, 0); + format_ = detail::exchange(b.format_, TI_FORMAT_UNKNOWN); + should_destroy_ = detail::exchange(b.should_destroy_, false); + return *this; + } + + inline Image borrow() const { + return Image(runtime_, image_, dimension_, extent_, mip_level_count_, + format_, false); + } + + inline void copy_to(const Image &dst) const { + slice().copy_to(dst.slice()); + } + + inline void transition_to(TiImageLayout layout) const { + ti_transition_image(runtime_, image_, layout); + } + + inline ImageSlice slice(const TiImageOffset &offset, + const TiImageExtent &extent, + uint32_t mip_level) const { + if (offset.x + extent.width > extent_.width || + offset.y + extent.height > extent_.height || + offset.z + extent.depth > extent_.depth || + offset.array_layer_offset + extent.array_layer_count > + extent_.array_layer_count) { + ti_set_last_error(TI_ERROR_ARGUMENT_OUT_OF_RANGE, "extent"); + return {}; + } + TiImageSlice slice{}; + slice.image = image_; + slice.extent = extent; + slice.offset = offset; + slice.mip_level = mip_level; + return ImageSlice(runtime_, slice); + } + inline ImageSlice slice() const { + return slice(TiImageOffset{}, extent_, 0); + } + + constexpr TiImageDimension dimension() const { + return dimension_; + } + constexpr const TiImageExtent &extent() const { + return extent_; + } + constexpr uint32_t mip_level_count() const { + return mip_level_count_; + } + constexpr TiFormat format() const { + return format_; + } + constexpr TiImage image() const { + return image_; + } + constexpr operator TiImage() const { // NOLINT + return image_; + } +}; + +class Texture { + protected: + Image image_{}; + TiTexture texture_{}; + + public: + constexpr bool is_valid() const { + return image_.is_valid(); + } + inline void destroy() { + image_.destroy(); + } + + Texture() { + } + Texture(const Texture &b) = delete; + Texture(Texture &&b) + : image_(std::move(b.image_)), texture_(std::move(b.texture_)) { + } + Texture(Image &&image, const TiTexture &texture) + : image_(std::move(image)), texture_(texture) { + if (texture.image != image_.image()) { + ti_set_last_error(TI_ERROR_INVALID_ARGUMENT, "texture.image != image"); + } + } + ~Texture() { + destroy(); + } + + Texture &operator=(const Texture &) = delete; + Texture &operator=(Texture &&b) { + destroy(); + image_ = std::move(b.image_); + texture_ = std::move(b.texture_); + return *this; + } + + inline Texture borrow() const { + return Texture(image_.borrow(), texture_); + } + + inline void copy_to(const Texture &dst) const { + slice().copy_to(dst.slice()); + } + + inline ImageSlice slice() const { + return image_.slice(); + } + + constexpr const Image &image() const { + return image_; + } + constexpr TiTexture texture() const { + return texture_; + } + constexpr operator TiTexture() const { // NOLINT + return texture_; + } +}; + +template +struct DataTypeToEnum { + static constexpr TiDataType value = TI_DATA_TYPE_UNKNOWN; +}; +#define DEFINE_DATA_TYPE_ENUM(type, enumv) \ + template <> \ + struct DataTypeToEnum { \ + static constexpr TiDataType value = TI_DATA_TYPE_##enumv; \ + }; + +DEFINE_DATA_TYPE_ENUM(int32_t, I32); +DEFINE_DATA_TYPE_ENUM(float, F32); +DEFINE_DATA_TYPE_ENUM(uint16_t, U16); +DEFINE_DATA_TYPE_ENUM(int16_t, I16); +DEFINE_DATA_TYPE_ENUM(uint8_t, U8); +DEFINE_DATA_TYPE_ENUM(int8_t, I8); +DEFINE_DATA_TYPE_ENUM(uint64_t, U64); +DEFINE_DATA_TYPE_ENUM(int64_t, I64); +#undef DEFINE_DATA_TYPE_ENUM + +class ArgumentEntry { + friend class ComputeGraph; + TiArgument *arg_; + + public: + ArgumentEntry() = delete; + ArgumentEntry(const ArgumentEntry &) = delete; + ArgumentEntry(ArgumentEntry &&b) : arg_(b.arg_) { + } + explicit ArgumentEntry(TiArgument *arg) : arg_(arg) { + } + + inline void set_f16(float value) { + arg_->type = TI_ARGUMENT_TYPE_SCALAR; + arg_->value.scalar.type = TI_DATA_TYPE_F16; + std::memcpy(&arg_->value.scalar.value.x32, &value, sizeof(value)); + } + inline void set_u16(uint16_t value) { + arg_->type = TI_ARGUMENT_TYPE_SCALAR; + arg_->value.scalar.type = TI_DATA_TYPE_U16; + std::memcpy(&arg_->value.scalar.value.x16, &value, sizeof(value)); + } + inline void set_i16(int16_t value) { + arg_->type = TI_ARGUMENT_TYPE_SCALAR; + arg_->value.scalar.type = TI_DATA_TYPE_I16; + std::memcpy(&arg_->value.scalar.value.x16, &value, sizeof(value)); + } + + inline ArgumentEntry &operator=(const TiArgument &b) { + *arg_ = b; + return *this; + } + inline ArgumentEntry &operator=(int32_t i32) { + arg_->type = TI_ARGUMENT_TYPE_I32; + arg_->value.i32 = i32; + return *this; + } + inline ArgumentEntry &operator=(float f32) { + arg_->type = TI_ARGUMENT_TYPE_F32; + arg_->value.f32 = f32; + return *this; + } + inline ArgumentEntry &operator=(uint16_t u16) { + this->set_u16(u16); + return *this; + } + inline ArgumentEntry &operator=(int16_t i16) { + this->set_i16(i16); + return *this; + } + inline ArgumentEntry &operator=(const TiNdArray &ndarray) { + arg_->type = TI_ARGUMENT_TYPE_NDARRAY; + arg_->value.ndarray = ndarray; + return *this; + } + inline ArgumentEntry &operator=(const TiTexture &texture) { + arg_->type = TI_ARGUMENT_TYPE_TEXTURE; + arg_->value.texture = texture; + return *this; + } + template + inline ArgumentEntry &operator=(const std::vector &matrix) { + arg_->type = TI_ARGUMENT_TYPE_TENSOR; + std::memcpy(arg_->value.tensor.contents.data.x8, matrix.data(), + matrix.size() * sizeof(T)); + arg_->value.tensor.contents.length = matrix.size(); + arg_->value.tensor.type = DataTypeToEnum::value; + return *this; + } + template + inline ArgumentEntry &operator=(const std::vector> &matrix) { + arg_->type = TI_ARGUMENT_TYPE_TENSOR; + uint32_t size = 0, bias = 0; + for (const auto &row : matrix) { + std::memcpy((arg_->value.tensor.contents.data.x8 + bias), row.data(), + row.size() * sizeof(T)); + size += row.size(); + bias += row.size() * sizeof(T); + } + arg_->value.tensor.contents.length = size; + arg_->value.tensor.type = DataTypeToEnum::value; + return *this; + } +}; + +class ComputeGraph { + protected: + TiRuntime runtime_{TI_NULL_HANDLE}; + TiComputeGraph compute_graph_{TI_NULL_HANDLE}; + std::list arg_names_{}; // For stable addresses. + std::vector args_{}; + + public: + constexpr bool is_valid() const { + return compute_graph_ != nullptr; + } + + ComputeGraph() { + } + ComputeGraph(const ComputeGraph &) = delete; + ComputeGraph(ComputeGraph &&b) + : runtime_(detail::move_handle(b.runtime_)), + compute_graph_(detail::move_handle(b.compute_graph_)), + arg_names_(std::move(b.arg_names_)), + args_(std::move(b.args_)) { + } + ComputeGraph(TiRuntime runtime, TiComputeGraph compute_graph) + : runtime_(runtime), compute_graph_(compute_graph) { + } + ~ComputeGraph() { + } + + ComputeGraph &operator=(const ComputeGraph &) = delete; + ComputeGraph &operator=(ComputeGraph &&b) { + runtime_ = detail::move_handle(b.runtime_); + compute_graph_ = detail::move_handle(b.compute_graph_); + arg_names_ = std::move(b.arg_names_); + args_ = std::move(b.args_); + return *this; + } + + inline ArgumentEntry at(const char *name) { + size_t i = 0; + auto it = arg_names_.begin(); + for (; it != arg_names_.end(); ++it) { + if (*it == name) { + break; + } + ++i; + } + + TiArgument *out; + if (it != arg_names_.end()) { + out = &args_.at(i).argument; + } else { + arg_names_.emplace_back(name); + args_.emplace_back(); + args_.back().name = arg_names_.back().c_str(); + out = &args_.back().argument; + } + + return ArgumentEntry(out); + }; + inline ArgumentEntry at(const std::string &name) { + return at(name.c_str()); + } + inline ArgumentEntry operator[](const char *name) { + return at(name); + } + inline ArgumentEntry operator[](const std::string &name) { + return at(name); + } + + void launch(uint32_t argument_count, const TiNamedArgument *arguments) const { + ti_launch_compute_graph(runtime_, compute_graph_, argument_count, + arguments); + } + void launch() const { + launch(args_.size(), args_.data()); + } + void launch(const std::vector &arguments) const { + launch(arguments.size(), arguments.data()); + } + + constexpr TiComputeGraph compute_graph() const { + return compute_graph_; + } + constexpr operator TiComputeGraph() const { // NOLINT + return compute_graph_; + } +}; + +class Kernel { + protected: + TiRuntime runtime_{TI_NULL_HANDLE}; + TiKernel kernel_{TI_NULL_HANDLE}; + std::vector args_{}; + + public: + constexpr bool is_valid() const { + return kernel_ != nullptr; + } + + Kernel() { + } + Kernel(const Kernel &) = delete; + Kernel(Kernel &&b) + : runtime_(detail::move_handle(b.runtime_)), + kernel_(detail::move_handle(b.kernel_)), + args_(std::move(b.args_)) { + } + Kernel(TiRuntime runtime, TiKernel kernel) + : runtime_(runtime), kernel_(kernel) { + } + + Kernel &operator=(const Kernel &) = delete; + Kernel &operator=(Kernel &&b) { + runtime_ = detail::move_handle(b.runtime_); + kernel_ = detail::move_handle(b.kernel_); + args_ = std::move(b.args_); + return *this; + } + + ArgumentEntry at(uint32_t i) { + if (i < args_.size()) { + return ArgumentEntry(&args_.at(i)); + } else { + args_.resize(i + 1); + return ArgumentEntry(&args_.at(i)); + } + } + ArgumentEntry operator[](uint32_t i) { + return at(i); + } + + template + void push_arg(const std::vector &v) { + int idx = args_.size(); + args_.resize(idx + 1); + args_[idx].type = TI_ARGUMENT_TYPE_TENSOR; + std::memcpy(args_[idx].value.tensor.contents.data.x32, v.data(), + v.size() * sizeof(T)); + args_[idx].value.tensor.contents.length = v.size(); + args_[idx].value.tensor.type = DataTypeToEnum::value; + } + + template + void push_arg(const T &arg) { + int idx = args_.size(); + args_.resize(idx + 1); + at(idx) = arg; + } + + void clear_args() { + args_.clear(); + } + + void launch(uint32_t argument_count, const TiArgument *arguments) const { + ti_launch_kernel(runtime_, kernel_, argument_count, arguments); + } + void launch() const { + launch(args_.size(), args_.data()); + } + void launch(const std::vector &arguments) const { + launch(arguments.size(), arguments.data()); + } + + constexpr TiKernel kernel() const { + return kernel_; + } + constexpr operator TiKernel() const { // NOLINT + return kernel_; + } +}; + +class AotModule { + protected: + TiRuntime runtime_{TI_NULL_HANDLE}; + TiAotModule aot_module_{TI_NULL_HANDLE}; + bool should_destroy_{false}; + + public: + constexpr bool is_valid() const { + return aot_module_ != nullptr; + } + inline void destroy() { + if (should_destroy_) { + ti_destroy_aot_module(aot_module_); + aot_module_ = TI_NULL_HANDLE; + should_destroy_ = false; + } + } + + AotModule() { + } + AotModule(const AotModule &) = delete; + AotModule(AotModule &&b) + : runtime_(detail::move_handle(b.runtime_)), + aot_module_(detail::move_handle(b.aot_module_)), + should_destroy_(detail::exchange(b.should_destroy_, false)) { + } + AotModule(TiRuntime runtime, TiAotModule aot_module, bool should_destroy) + : runtime_(runtime), + aot_module_(aot_module), + should_destroy_(should_destroy) { + } + ~AotModule() { + destroy(); + } + + AotModule &operator=(const AotModule &) = delete; + AotModule &operator=(AotModule &&b) { + runtime_ = detail::move_handle(b.runtime_); + aot_module_ = detail::move_handle(b.aot_module_); + should_destroy_ = detail::exchange(b.should_destroy_, false); + return *this; + } + + inline AotModule borrow() const { + return AotModule(runtime_, aot_module_, false); + } + + Kernel get_kernel(const char *name) const { + TiKernel kernel_ = ti_get_aot_module_kernel(aot_module_, name); + return Kernel(runtime_, kernel_); + } + ComputeGraph get_compute_graph(const char *name) const { + TiComputeGraph compute_graph_ = + ti_get_aot_module_compute_graph(aot_module_, name); + return ComputeGraph(runtime_, compute_graph_); + } + + constexpr TiAotModule aot_module() const { + return aot_module_; + } + constexpr operator TiAotModule() const { // NOLINT + return aot_module_; + } +}; + +class CapabilityLevelConfigBuilder; +class CapabilityLevelConfig { + public: + std::vector cap_level_infos; + + CapabilityLevelConfig() { + } + explicit CapabilityLevelConfig( + std::vector &&capabilities) + : cap_level_infos(std::move(capabilities)) { + } + + static CapabilityLevelConfigBuilder builder(); + + uint32_t get(TiCapability capability) const { + for (size_t i = 0; i < cap_level_infos.size(); ++i) { + const TiCapabilityLevelInfo &cap_level_info = cap_level_infos.at(i); + if (cap_level_info.capability == capability) { + return cap_level_info.level; + } + } + return 0; + } + + void set(TiCapability capability, uint32_t level) { + std::vector::iterator it = cap_level_infos.begin(); + for (; it != cap_level_infos.end(); ++it) { + if (it->capability == capability) { + it->level = level; + return; + } + } + TiCapabilityLevelInfo cap_level_info{}; + cap_level_info.capability = capability; + cap_level_info.level = level; + cap_level_infos.emplace_back(std::move(cap_level_info)); + } +}; + +class CapabilityLevelConfigBuilder { + typedef CapabilityLevelConfigBuilder Self; + std::map cap_level_infos_; + + public: + CapabilityLevelConfigBuilder() { + } + CapabilityLevelConfigBuilder(const Self &) = delete; + Self &operator=(const Self &) = delete; + + Self &spirv_version(uint32_t major, uint32_t minor) { + if (major == 1) { + if (minor == 3) { + cap_level_infos_[TI_CAPABILITY_SPIRV_VERSION] = 0x10300; + } else if (minor == 4) { + cap_level_infos_[TI_CAPABILITY_SPIRV_VERSION] = 0x10400; + } else if (minor == 5) { + cap_level_infos_[TI_CAPABILITY_SPIRV_VERSION] = 0x10500; + } else { + ti_set_last_error(TI_ERROR_ARGUMENT_OUT_OF_RANGE, "minor"); + } + } else { + ti_set_last_error(TI_ERROR_ARGUMENT_OUT_OF_RANGE, "major"); + } + return *this; + } + Self &spirv_has_int8(bool value = true) { + cap_level_infos_[TI_CAPABILITY_SPIRV_HAS_INT8] = value ? TI_TRUE : TI_FALSE; + return *this; + } + Self &spirv_has_int16(bool value = true) { + cap_level_infos_[TI_CAPABILITY_SPIRV_HAS_INT16] = + value ? TI_TRUE : TI_FALSE; + return *this; + } + Self &spirv_has_int64(bool value = true) { + cap_level_infos_[TI_CAPABILITY_SPIRV_HAS_INT64] = + value ? TI_TRUE : TI_FALSE; + return *this; + } + Self &spirv_has_float16(bool value = true) { + cap_level_infos_[TI_CAPABILITY_SPIRV_HAS_FLOAT16] = + value ? TI_TRUE : TI_FALSE; + return *this; + } + Self &spirv_has_float64(bool value = true) { + cap_level_infos_[TI_CAPABILITY_SPIRV_HAS_FLOAT64] = + value ? TI_TRUE : TI_FALSE; + return *this; + } + Self &spirv_has_atomic_int64(bool value = true) { + cap_level_infos_[TI_CAPABILITY_SPIRV_HAS_ATOMIC_INT64] = + value ? TI_TRUE : TI_FALSE; + return *this; + } + Self &spirv_has_atomic_float16(bool value = true) { + cap_level_infos_[TI_CAPABILITY_SPIRV_HAS_ATOMIC_FLOAT16] = + value ? TI_TRUE : TI_FALSE; + return *this; + } + Self &spirv_has_atomic_float16_add(bool value = true) { + cap_level_infos_[TI_CAPABILITY_SPIRV_HAS_ATOMIC_FLOAT16_ADD] = + value ? TI_TRUE : TI_FALSE; + return *this; + } + Self &spirv_has_atomic_float16_minmax(bool value = true) { + cap_level_infos_[TI_CAPABILITY_SPIRV_HAS_ATOMIC_FLOAT16_MINMAX] = + value ? TI_TRUE : TI_FALSE; + return *this; + } + Self &spirv_has_atomic_float64(bool value = true) { + cap_level_infos_[TI_CAPABILITY_SPIRV_HAS_ATOMIC_FLOAT64] = + value ? TI_TRUE : TI_FALSE; + return *this; + } + Self &spirv_has_atomic_float64_add(bool value = true) { + cap_level_infos_[TI_CAPABILITY_SPIRV_HAS_ATOMIC_FLOAT64_ADD] = + value ? TI_TRUE : TI_FALSE; + return *this; + } + Self &spirv_has_variable_ptr(bool value = true) { + cap_level_infos_[TI_CAPABILITY_SPIRV_HAS_VARIABLE_PTR] = + value ? TI_TRUE : TI_FALSE; + return *this; + } + Self &spirv_has_physical_storage_buffer(bool value = true) { + cap_level_infos_[TI_CAPABILITY_SPIRV_HAS_PHYSICAL_STORAGE_BUFFER] = + value ? TI_TRUE : TI_FALSE; + return *this; + } + Self &spirv_has_subgroup_basic(bool value = true) { + cap_level_infos_[TI_CAPABILITY_SPIRV_HAS_SUBGROUP_BASIC] = + value ? TI_TRUE : TI_FALSE; + return *this; + } + Self &spirv_has_subgroup_vote(bool value = true) { + cap_level_infos_[TI_CAPABILITY_SPIRV_HAS_SUBGROUP_VOTE] = + value ? TI_TRUE : TI_FALSE; + return *this; + } + Self &spirv_has_subgroup_arithmetic(bool value = true) { + cap_level_infos_[TI_CAPABILITY_SPIRV_HAS_SUBGROUP_ARITHMETIC] = + value ? TI_TRUE : TI_FALSE; + return *this; + } + Self &spirv_has_subgroup_ballot(bool value = true) { + cap_level_infos_[TI_CAPABILITY_SPIRV_HAS_SUBGROUP_BALLOT] = + value ? TI_TRUE : TI_FALSE; + return *this; + } + Self &spirv_has_non_semantic_info(bool value = true) { + cap_level_infos_[TI_CAPABILITY_SPIRV_HAS_NON_SEMANTIC_INFO] = + value ? TI_TRUE : TI_FALSE; + return *this; + } + Self &spirv_has_no_integer_wrap_decoration(bool value = true) { + cap_level_infos_[TI_CAPABILITY_SPIRV_HAS_NO_INTEGER_WRAP_DECORATION] = + value ? TI_TRUE : TI_FALSE; + return *this; + } + + CapabilityLevelConfig build() { + std::vector out{}; + for (const auto &pair : cap_level_infos_) { + TiCapabilityLevelInfo cap_level_info{}; + cap_level_info.capability = pair.first; + cap_level_info.level = pair.second; + out.emplace_back(std::move(cap_level_info)); + } + return CapabilityLevelConfig{std::move(out)}; + } +}; + +inline CapabilityLevelConfigBuilder CapabilityLevelConfig::builder() { + return {}; +} + +class Runtime { + protected: + TiArch arch_{TI_ARCH_MAX_ENUM}; + TiRuntime runtime_{TI_NULL_HANDLE}; + bool should_destroy_{false}; + + public: + constexpr bool is_valid() const { + return runtime_ != nullptr; + } + inline void destroy() { + if (should_destroy_) { + ti_destroy_runtime(runtime_); + runtime_ = TI_NULL_HANDLE; + should_destroy_ = false; + } + } + + Runtime() { + } + Runtime(const Runtime &) = delete; + Runtime(Runtime &&b) + : arch_(detail::exchange(b.arch_, TI_ARCH_MAX_ENUM)), + runtime_(detail::move_handle(b.runtime_)), + should_destroy_(detail::exchange(b.should_destroy_, false)) { + } + explicit Runtime(TiArch arch, uint32_t device_index = 0) + : arch_(arch), + runtime_(ti_create_runtime(arch, device_index)), + should_destroy_(true) { + } + Runtime(TiArch arch, TiRuntime runtime, bool should_destroy) + : arch_(arch), runtime_(runtime), should_destroy_(should_destroy) { + } + ~Runtime() { + destroy(); + } + + Runtime &operator=(const Runtime &) = delete; + Runtime &operator=(Runtime &&b) { + arch_ = detail::exchange(b.arch_, TI_ARCH_MAX_ENUM); + runtime_ = detail::move_handle(b.runtime_); + should_destroy_ = detail::exchange(b.should_destroy_, false); + return *this; + } + + inline Runtime borrow() const { + return Runtime(arch_, runtime_, false); + } + + void set_capabilities_ext( + const std::vector &capabilities) const { + ti_set_runtime_capabilities_ext(runtime_, (uint32_t)capabilities.size(), + capabilities.data()); + } + void set_capabilities_ext(const CapabilityLevelConfig &capabilities) const { + set_capabilities_ext(capabilities.cap_level_infos); + } + CapabilityLevelConfig get_capabilities() const { + uint32_t n = 0; + ti_get_runtime_capabilities(runtime_, &n, nullptr); + std::vector devcaps(n); + ti_get_runtime_capabilities(runtime_, &n, devcaps.data()); + return CapabilityLevelConfig{std::move(devcaps)}; + } + + Memory allocate_memory(const TiMemoryAllocateInfo &allocate_info) const { + TiMemory memory = ti_allocate_memory(runtime_, &allocate_info); + return Memory(runtime_, memory, allocate_info.size, true); + } + Memory allocate_memory(size_t size, bool host_access = false) const { + TiMemoryAllocateInfo allocate_info{}; + allocate_info.size = size; + allocate_info.host_read = host_access; + allocate_info.host_write = host_access; + allocate_info.usage = TI_MEMORY_USAGE_STORAGE_BIT; + return allocate_memory(allocate_info); + } + template + NdArray allocate_ndarray(const std::vector &shape = {}, + const std::vector &elem_shape = {}, + bool host_access = false) const { + auto dtype = detail::templ2dtype::value; + return allocate_ndarray(dtype, shape, elem_shape, host_access); + } + + template + NdArray allocate_ndarray(TiDataType dtype, + const std::vector &shape = {}, + const std::vector &elem_shape = {}, + bool host_access = false) const { + size_t size = sizeof(T); + TiNdArray ndarray{}; + for (size_t i = 0; i < shape.size(); ++i) { + uint32_t x = shape.at(i); + size *= x; + ndarray.shape.dims[i] = x; + } + ndarray.shape.dim_count = shape.size(); + for (size_t i = 0; i < elem_shape.size(); ++i) { + uint32_t x = elem_shape.at(i); + size *= x; + ndarray.elem_shape.dims[i] = x; + } + ndarray.elem_shape.dim_count = elem_shape.size(); + ndarray.elem_type = dtype; + + ti::Memory memory = allocate_memory(size, host_access); + ndarray.memory = memory.memory(); + return NdArray(std::move(memory), ndarray); + } + + Image allocate_image(const TiImageAllocateInfo &allocate_info) const { + TiImage image = ti_allocate_image(runtime_, &allocate_info); + return Image(runtime_, image, allocate_info.dimension, allocate_info.extent, + allocate_info.mip_level_count, allocate_info.format, true); + } + Texture allocate_texture2d(uint32_t width, + uint32_t height, + TiFormat format, + TiSampler sampler) const { + TiImageExtent extent{}; + extent.width = width; + extent.height = height; + extent.depth = 1; + extent.array_layer_count = 1; + + TiImageAllocateInfo allocate_info{}; + allocate_info.dimension = TI_IMAGE_DIMENSION_2D; + allocate_info.extent = extent; + allocate_info.mip_level_count = 1; + allocate_info.format = format; + allocate_info.usage = + TI_IMAGE_USAGE_STORAGE_BIT | TI_IMAGE_USAGE_SAMPLED_BIT; + + Image image = allocate_image(allocate_info); + TiTexture texture{}; + texture.image = image.image(); + texture.dimension = TI_IMAGE_DIMENSION_2D; + texture.extent = extent; + texture.format = format; + texture.sampler = sampler; + return Texture(std::move(image), texture); + } + + AotModule load_aot_module(const char *path) const { + TiAotModule aot_module_ = ti_load_aot_module(runtime_, path); + return AotModule(runtime_, aot_module_, true); + } + AotModule load_aot_module(const std::string &path) const { + return load_aot_module(path.c_str()); + } + + AotModule create_aot_module(const void *tcm, size_t size) const { + TiAotModule aot_module = ti_create_aot_module(runtime_, tcm, size); + return AotModule(runtime_, aot_module, true); + } + AotModule create_aot_module(const std::vector &tcm) const { + return create_aot_module(tcm.data(), tcm.size()); + } + + void copy_memory_device_to_device(const MemorySlice &dst_memory, + const MemorySlice &src_memory) const { + ti_copy_memory_device_to_device(runtime_, &dst_memory.slice(), + &src_memory.slice()); + } + void copy_image_device_to_device(const ImageSlice &dst_image, + const ImageSlice &src_image) const { + ti_copy_image_device_to_device(runtime_, &dst_image.slice(), + &src_image.slice()); + } + void transition_image(TiImage image, TiImageLayout layout) const { + ti_transition_image(runtime_, image, layout); + } + + void flush() const { + ti_flush(runtime_); + } + void wait() const { + ti_wait(runtime_); + } + + constexpr TiArch arch() const { + return arch_; + } + constexpr TiRuntime runtime() const { + return runtime_; + } + constexpr operator TiRuntime() const { // NOLINT + return runtime_; + } +}; + +} // namespace ti diff --git a/local_packages/taichi/_lib/c_api/include/taichi/taichi.h b/local_packages/taichi/_lib/c_api/include/taichi/taichi.h new file mode 100644 index 0000000..e7f1ae1 --- /dev/null +++ b/local_packages/taichi/_lib/c_api/include/taichi/taichi.h @@ -0,0 +1,29 @@ +#pragma once +#ifndef TAICHI_H +#define TAICHI_H + +#include "taichi_platform.h" + +#include "taichi_core.h" + +#ifdef TI_WITH_VULKAN +#include "taichi_vulkan.h" +#endif // TI_WITH_VULKAN + +#ifdef TI_WITH_OPENGL +#include "taichi_opengl.h" +#endif // TI_WITH_OPENGL + +#ifdef TI_WITH_CUDA +#include "taichi_cuda.h" +#endif // TI_WITH_CUDA + +#ifdef TI_WITH_CPU +#include "taichi_cpu.h" +#endif // TI_WITH_CPU + +#ifdef TI_WITH_METAL +#include "taichi_metal.h" +#endif // TI_WITH_METAL + +#endif // TAICHI_H diff --git a/local_packages/taichi/_lib/c_api/include/taichi/taichi_core.h b/local_packages/taichi/_lib/c_api/include/taichi/taichi_core.h new file mode 100644 index 0000000..f376482 --- /dev/null +++ b/local_packages/taichi/_lib/c_api/include/taichi/taichi_core.h @@ -0,0 +1,1111 @@ +// # Core Functionality +// +// Taichi Core exposes all necessary interfaces for offloading the AOT modules +// to Taichi. The following is a list of features that are available regardless +// of your backend. The corresponding APIs are still under development and +// subject to change. +// +// ## Availability +// +// Taichi C-API intends to support the following backends: +// +// |Backend |Offload Target |Maintenance Tier | Stabilized? | +// |------------|-----------------|-----------------|-------------| +// |Vulkan |GPU |Tier 1 | Yes | +// |Metal |GPU (macOS, iOS) |Tier 2 | No | +// |CUDA (LLVM) |GPU (NVIDIA) |Tier 2 | No | +// |CPU (LLVM) |CPU |Tier 2 | No | +// |OpenGL |GPU |Tier 2 | No | +// |OpenGL ES |GPU |Tier 2 | No | +// |DirectX 11 |GPU (Windows) |N/A | No | +// +// The backends with tier-1 support are being developed and tested more +// intensively. And most new features will be available on Vulkan first because +// it has the most outstanding cross-platform compatibility among all the tier-1 +// backends. For the backends with tier-2 support, you should expect a delay in +// the fixes to minor issues. +// +// For convenience, in the following text and other C-API documents, the term +// *host* refers to the user of the C-API; the term *device* refers to the +// logical (conceptual) compute device, to which Taichi's runtime offloads its +// compute tasks. A *device* may not be a physical discrete processor other than +// the CPU and the *host* may *not* be able to access the memory allocated on +// the *device*. +// +// Unless otherwise specified, **device**, **backend**, **offload target**, and +// **GPU** are interchangeable; **host**, **user code**, **user procedure**, and +// **CPU** are interchangeable. +// +// ## HowTo +// +// The following section provides a brief introduction to the Taichi C-API. +// +// ### Create and destroy a Runtime Instance +// +// You *must* create a runtime instance before working with Taichi, and *only* +// one runtime per thread. Currently, we do not officially claim that multiple +// runtime instances can coexist in a process, but please feel free to [file an +// issue with us](https://github.com/taichi-dev/taichi/issues) if you run into +// any problem with runtime instance coexistence. +// +// ```cpp +// // Create a Taichi Runtime on Vulkan device at index 0. +// TiRuntime runtime = ti_create_runtime(TI_ARCH_VULKAN, 0); +// ``` +// +// When your program runs to the end, ensure that: +// - You destroy the runtime instance, +// - All related resources are destroyed before the +// [`TiRuntime`](#handle-tiruntime) itself. +// +// ```cpp +// ti_destroy_runtime(runtime); +// ``` +// +// ### Allocate and free memory +// +// Allocate a piece of memory that is visible only to the device. On the GPU +// backends, it usually means that the memory is located in the graphics memory +// (GRAM). +// +// ```cpp +// TiMemoryAllocateInfo mai {}; +// mai.size = 1024; // Size in bytes. +// mai.usage = TI_MEMORY_USAGE_STORAGE_BIT; +// TiMemory memory = ti_allocate_memory(runtime, &mai); +// ``` +// +// Allocated memory is automatically freed when the related +// [`TiRuntime`](#handle-tiruntime) is destroyed. You can also manually free the +// allocated memory. +// +// ```cpp +// ti_free_memory(runtime, memory); +// ``` +// +// ### Allocate host-accessible memory +// +// By default, memory allocations are physically or conceptually local to the +// offload target for performance reasons. You can configure the +// [`TiMemoryAllocateInfo`](#structure-timemoryallocateinfo) to enable host +// access to memory allocations. But please note that host-accessible +// allocations *may* slow down computation on GPU because of the limited bus +// bandwidth between the host memory and the device. +// +// You *must* set `host_write` to [`TI_TRUE`](#definition-ti_true) to allow +// zero-copy data streaming to the memory. +// +// ```cpp +// TiMemoryAllocateInfo mai {}; +// mai.size = 1024; // Size in bytes. +// mai.host_write = TI_TRUE; +// mai.usage = TI_MEMORY_USAGE_STORAGE_BIT; +// TiMemory steaming_memory = ti_allocate_memory(runtime, &mai); +// +// // ... +// +// std::vector src = some_random_data_source(); +// +// void* dst = ti_map_memory(runtime, steaming_memory); +// std::memcpy(dst, src.data(), src.size()); +// ti_unmap_memory(runtime, streaming_memory); +// ``` +// +// To read data back to the host, `host_read` *must* be set to +// [`TI_TRUE`](#definition-ti_true). +// +// ```cpp +// TiMemoryAllocateInfo mai {}; +// mai.size = 1024; // Size in bytes. +// mai.host_read = TI_TRUE; +// mai.usage = TI_MEMORY_USAGE_STORAGE_BIT; +// TiMemory read_back_memory = ti_allocate_memory(runtime, &mai); +// +// // ... +// +// std::vector dst(1024); +// void* src = ti_map_memory(runtime, read_back_memory); +// std::memcpy(dst.data(), src, dst.size()); +// ti_unmap_memory(runtime, read_back_memory); +// +// ti_free_memory(runtime, read_back_memory); +// ``` +// +// > You can set `host_read` and `host_write` at the same time. +// +// ### Load and destroy a Taichi AOT module +// +// You can load a Taichi AOT module from the filesystem. +// +// ```cpp +// TiAotModule aot_module = ti_load_aot_module(runtime, "/path/to/aot/module"); +// ``` +// +// `/path/to/aot/module` should point to the directory that contains a +// `metadata.json`. +// +// You can destroy an unused AOT module, but please ensure that there is no +// kernel or compute graph related to it pending to +// [`ti_flush`](#function-ti_flush). +// +// ```cpp +// ti_destroy_aot_module(aot_module); +// ``` +// +// ### Launch kernels and compute graphs +// +// You can extract kernels and compute graphs from an AOT module. Kernel and +// compute graphs are a part of the module, so you don't have to destroy them. +// +// ```cpp +// TiKernel kernel = ti_get_aot_module_kernel(aot_module, "foo"); +// TiComputeGraph compute_graph = ti_get_aot_module_compute_graph(aot_module, +// "bar"); +// ``` +// +// You can launch a kernel with positional arguments. Please ensure the types, +// the sizes and the order matches the source code in Python. +// +// ```cpp +// TiNdArray ndarray{}; +// ndarray.memory = get_some_memory(); +// ndarray.shape.dim_count = 1; +// ndarray.shape.dims[0] = 16; +// ndarray.elem_shape.dim_count = 2; +// ndarray.elem_shape.dims[0] = 4; +// ndarray.elem_shape.dims[1] = 4; +// ndarray.elem_type = TI_DATA_TYPE_F32; +// +// std::array args{}; +// +// TiArgument& arg0 = args[0]; +// arg0.type = TI_ARGUMENT_TYPE_I32; +// arg0.value.i32 = 123; +// +// TiArgument& arg1 = args[1]; +// arg1.type = TI_ARGUMENT_TYPE_F32; +// arg1.value.f32 = 123.0f; +// +// TiArgument& arg2 = args[2]; +// arg2.type = TI_ARGUMENT_TYPE_NDARRAY; +// arg2.value.ndarray = ndarray; +// +// ti_launch_kernel(runtime, kernel, args.size(), args.data()); +// ``` +// +// You can launch a compute graph in a similar way. But additionally please +// ensure the argument names matches those in the Python source. +// +// ```cpp +// std::array named_args{}; +// TiNamedArgument& named_arg0 = named_args[0]; +// named_arg0.name = "foo"; +// named_arg0.argument = args[0]; +// TiNamedArgument& named_arg1 = named_args[1]; +// named_arg1.name = "bar"; +// named_arg1.argument = args[1]; +// TiNamedArgument& named_arg2 = named_args[2]; +// named_arg2.name = "baz"; +// named_arg2.argument = args[2]; +// +// ti_launch_compute_graph(runtime, compute_graph, named_args.size(), +// named_args.data()); +// ``` +// +// When you have launched all kernels and compute graphs for this batch, you +// should [`ti_flush`](#function-ti_flush) and [`ti_wait`](#function-ti_wait) +// for the execution to finish. +// +// ```cpp +// ti_flush(runtime); +// ti_wait(runtime); +// ``` +// +// **WARNING** This part is subject to change. We will introduce multi-queue in +// the future. +// +#pragma once + +#ifndef TI_C_API_VERSION +#define TI_C_API_VERSION 1007000 +#endif // TI_C_API_VERSION + +#ifndef TAICHI_H +#include "taichi.h" +#endif // TAICHI_H + +#ifdef __cplusplus +extern "C" { +#endif // __cplusplus + +// Alias `TiBool` (1.4.0) +// +// A boolean value. Can be either [`TI_TRUE`](#definition-ti_true) or +// [`TI_FALSE`](#definition-ti_false). Assignment with other values could lead +// to undefined behavior. +typedef uint32_t TiBool; + +// Definition `TI_FALSE` (1.4.0) +// +// A condition or a predicate is not satisfied; a statement is invalid. +#define TI_FALSE 0 + +// Definition `TI_TRUE` (1.4.0) +// +// A condition or a predicate is satisfied; a statement is valid. +#define TI_TRUE 1 + +// Alias `TiFlags` (1.4.0) +// +// A bit field that can be used to represent 32 orthogonal flags. Bits +// unspecified in the corresponding flag enum are ignored. +// +// > Enumerations and bit-field flags in the C-API have a `TI_XXX_MAX_ENUM` case +// to ensure the enum has a 32-bit range and in-memory size. It has no +// semantical impact and can be safely ignored. +typedef uint32_t TiFlags; + +// Definition `TI_NULL_HANDLE` (1.4.0) +// +// A sentinal invalid handle that will never be produced from a valid call to +// Taichi C-API. +#define TI_NULL_HANDLE 0 + +// Handle `TiRuntime` (1.4.0) +// +// Taichi runtime represents an instance of a logical backend and its internal +// dynamic state. The user is responsible to synchronize any use of +// [`TiRuntime`](#handle-tiruntime). The user *must not* manipulate multiple +// [`TiRuntime`](#handle-tiruntime)s in the same thread. +typedef struct TiRuntime_t *TiRuntime; + +// Handle `TiAotModule` (1.4.0) +// +// An ahead-of-time (AOT) compiled Taichi module, which contains a collection of +// kernels and compute graphs. +typedef struct TiAotModule_t *TiAotModule; + +// Handle `TiMemory` (1.4.0) +// +// A contiguous allocation of device memory. +typedef struct TiMemory_t *TiMemory; + +// Handle `TiImage` (1.4.0) +// +// A contiguous allocation of device image. +typedef struct TiImage_t *TiImage; + +// Handle `TiSampler` +// +// An image sampler. [`TI_NULL_HANDLE`](#definition-ti_null_handle) represents a +// default image sampler provided by the runtime implementation. The filter +// modes and address modes of default samplers depend on backend implementation. +typedef struct TiSampler_t *TiSampler; + +// Handle `TiKernel` (1.4.0) +// +// A Taichi kernel that can be launched on the offload target for execution. +typedef struct TiKernel_t *TiKernel; + +// Handle `TiComputeGraph` (1.4.0) +// +// A collection of Taichi kernels (a compute graph) to launch on the offload +// target in a predefined order. +typedef struct TiComputeGraph_t *TiComputeGraph; + +// Enumeration `TiError` (1.4.0) +// +// Errors reported by the Taichi C-API. +typedef enum TiError { + // The Taichi C-API invocation finished gracefully. + TI_ERROR_SUCCESS = 0, + // The invoked API, or the combination of parameters is not supported by the + // Taichi C-API. + TI_ERROR_NOT_SUPPORTED = -1, + // Provided data is corrupted. + TI_ERROR_CORRUPTED_DATA = -2, + // Provided name does not refer to any existing item. + TI_ERROR_NAME_NOT_FOUND = -3, + // One or more function arguments violate constraints specified in C-API + // documents, or kernel arguments mismatch the kernel argument list defined in + // the AOT module. + TI_ERROR_INVALID_ARGUMENT = -4, + // One or more by-reference (pointer) function arguments point to null. + TI_ERROR_ARGUMENT_NULL = -5, + // One or more function arguments are out of its acceptable range; or + // enumeration arguments have undefined value. + TI_ERROR_ARGUMENT_OUT_OF_RANGE = -6, + // One or more kernel arguments are missing. + TI_ERROR_ARGUMENT_NOT_FOUND = -7, + // The intended interoperation is not possible on the current arch. For + // example, attempts to export a Vulkan object from a CUDA runtime are not + // allowed. + TI_ERROR_INVALID_INTEROP = -8, + // The Taichi C-API enters an unrecoverable invalid state. Related Taichi + // objects are potentially corrupted. The users *should* release the + // contaminated resources for stability. Please feel free to file an issue if + // you encountered this error in a normal routine. + TI_ERROR_INVALID_STATE = -9, + // The AOT module is not compatible with the current runtime. + TI_ERROR_INCOMPATIBLE_MODULE = -10, + TI_ERROR_OUT_OF_MEMORY = -11, + TI_ERROR_MAX_ENUM = 0xffffffff, +} TiError; + +// Enumeration `TiArch` (1.4.0) +// +// Types of backend archs. +typedef enum TiArch { + TI_ARCH_RESERVED = 0, + // Vulkan GPU backend. + TI_ARCH_VULKAN = 1, + // Metal GPU backend. + TI_ARCH_METAL = 2, + // NVIDIA CUDA GPU backend. + TI_ARCH_CUDA = 3, + // x64 native CPU backend. + TI_ARCH_X64 = 4, + // Arm64 native CPU backend. + TI_ARCH_ARM64 = 5, + // OpenGL GPU backend. + TI_ARCH_OPENGL = 6, + // OpenGL ES GPU backend. + TI_ARCH_GLES = 7, + TI_ARCH_MAX_ENUM = 0xffffffff, +} TiArch; + +// Enumeration `TiCapability` (1.4.0) +// +// Device capabilities. +typedef enum TiCapability { + TI_CAPABILITY_RESERVED = 0, + TI_CAPABILITY_SPIRV_VERSION = 1, + TI_CAPABILITY_SPIRV_HAS_INT8 = 2, + TI_CAPABILITY_SPIRV_HAS_INT16 = 3, + TI_CAPABILITY_SPIRV_HAS_INT64 = 4, + TI_CAPABILITY_SPIRV_HAS_FLOAT16 = 5, + TI_CAPABILITY_SPIRV_HAS_FLOAT64 = 6, + TI_CAPABILITY_SPIRV_HAS_ATOMIC_INT64 = 7, + TI_CAPABILITY_SPIRV_HAS_ATOMIC_FLOAT16 = 8, + TI_CAPABILITY_SPIRV_HAS_ATOMIC_FLOAT16_ADD = 9, + TI_CAPABILITY_SPIRV_HAS_ATOMIC_FLOAT16_MINMAX = 10, + TI_CAPABILITY_SPIRV_HAS_ATOMIC_FLOAT = 11, + TI_CAPABILITY_SPIRV_HAS_ATOMIC_FLOAT_ADD = 12, + TI_CAPABILITY_SPIRV_HAS_ATOMIC_FLOAT_MINMAX = 13, + TI_CAPABILITY_SPIRV_HAS_ATOMIC_FLOAT64 = 14, + TI_CAPABILITY_SPIRV_HAS_ATOMIC_FLOAT64_ADD = 15, + TI_CAPABILITY_SPIRV_HAS_ATOMIC_FLOAT64_MINMAX = 16, + TI_CAPABILITY_SPIRV_HAS_VARIABLE_PTR = 17, + TI_CAPABILITY_SPIRV_HAS_PHYSICAL_STORAGE_BUFFER = 18, + TI_CAPABILITY_SPIRV_HAS_SUBGROUP_BASIC = 19, + TI_CAPABILITY_SPIRV_HAS_SUBGROUP_VOTE = 20, + TI_CAPABILITY_SPIRV_HAS_SUBGROUP_ARITHMETIC = 21, + TI_CAPABILITY_SPIRV_HAS_SUBGROUP_BALLOT = 22, + TI_CAPABILITY_SPIRV_HAS_NON_SEMANTIC_INFO = 23, + TI_CAPABILITY_SPIRV_HAS_NO_INTEGER_WRAP_DECORATION = 24, + TI_CAPABILITY_MAX_ENUM = 0xffffffff, +} TiCapability; + +// Structure `TiCapabilityLevelInfo` (1.4.0) +// +// An integral device capability level. It currently is not guaranteed that a +// higher level value is compatible with a lower level value. +typedef struct TiCapabilityLevelInfo { + TiCapability capability; + uint32_t level; +} TiCapabilityLevelInfo; + +// Enumeration `TiDataType` (1.4.0) +// +// Elementary (primitive) data types. There might be vendor-specific constraints +// on the available data types so it's recommended to use 32-bit data types if +// multi-platform distribution is desired. +typedef enum TiDataType { + // 16-bit IEEE 754 half-precision floating-point number. + TI_DATA_TYPE_F16 = 0, + // 32-bit IEEE 754 single-precision floating-point number. + TI_DATA_TYPE_F32 = 1, + // 64-bit IEEE 754 double-precision floating-point number. + TI_DATA_TYPE_F64 = 2, + // 8-bit one's complement signed integer. + TI_DATA_TYPE_I8 = 3, + // 16-bit one's complement signed integer. + TI_DATA_TYPE_I16 = 4, + // 32-bit one's complement signed integer. + TI_DATA_TYPE_I32 = 5, + // 64-bit one's complement signed integer. + TI_DATA_TYPE_I64 = 6, + TI_DATA_TYPE_U1 = 7, + // 8-bit unsigned integer. + TI_DATA_TYPE_U8 = 8, + // 16-bit unsigned integer. + TI_DATA_TYPE_U16 = 9, + // 32-bit unsigned integer. + TI_DATA_TYPE_U32 = 10, + // 64-bit unsigned integer. + TI_DATA_TYPE_U64 = 11, + TI_DATA_TYPE_GEN = 12, + TI_DATA_TYPE_UNKNOWN = 13, + TI_DATA_TYPE_MAX_ENUM = 0xffffffff, +} TiDataType; + +// Enumeration `TiArgumentType` (1.4.0) +// +// Types of kernel and compute graph argument. +typedef enum TiArgumentType { + // 32-bit one's complement signed integer. + TI_ARGUMENT_TYPE_I32 = 0, + // 32-bit IEEE 754 single-precision floating-point number. + TI_ARGUMENT_TYPE_F32 = 1, + // ND-array wrapped around a `handle.memory`. + TI_ARGUMENT_TYPE_NDARRAY = 2, + // Texture wrapped around a `handle.image`. + TI_ARGUMENT_TYPE_TEXTURE = 3, + // Typed scalar. + TI_ARGUMENT_TYPE_SCALAR = 4, + // Typed tensor. + TI_ARGUMENT_TYPE_TENSOR = 5, + TI_ARGUMENT_TYPE_MAX_ENUM = 0xffffffff, +} TiArgumentType; + +// BitField `TiMemoryUsageFlags` (1.4.0) +// +// Usages of a memory allocation. Taichi requires kernel argument memories to be +// allocated with `TI_MEMORY_USAGE_STORAGE_BIT`. +typedef enum TiMemoryUsageFlagBits { + // The memory can be read/write accessed by any kernel. + TI_MEMORY_USAGE_STORAGE_BIT = 1 << 0, + // The memory can be used as a uniform buffer in graphics pipelines. + TI_MEMORY_USAGE_UNIFORM_BIT = 1 << 1, + // The memory can be used as a vertex buffer in graphics pipelines. + TI_MEMORY_USAGE_VERTEX_BIT = 1 << 2, + // The memory can be used as an index buffer in graphics pipelines. + TI_MEMORY_USAGE_INDEX_BIT = 1 << 3, +} TiMemoryUsageFlagBits; +typedef TiFlags TiMemoryUsageFlags; + +// Structure `TiMemoryAllocateInfo` (1.4.0) +// +// Parameters of a newly allocated memory. +typedef struct TiMemoryAllocateInfo { + // Size of the allocation in bytes. + uint64_t size; + // True if the host needs to write to the allocated memory. + TiBool host_write; + // True if the host needs to read from the allocated memory. + TiBool host_read; + // True if the memory allocation needs to be exported to other backends (e.g., + // from Vulkan to CUDA). + TiBool export_sharing; + // All possible usage of this memory allocation. In most cases, + // `bit_field.memory_usage.storage` is enough. + TiMemoryUsageFlags usage; +} TiMemoryAllocateInfo; + +// Structure `TiMemorySlice` (1.4.0) +// +// A subsection of a memory allocation. The sum of `offset` and `size` cannot +// exceed the size of `memory`. +typedef struct TiMemorySlice { + // The subsectioned memory allocation. + TiMemory memory; + // Offset from the beginning of the allocation. + uint64_t offset; + // Size of the subsection. + uint64_t size; +} TiMemorySlice; + +// Structure `TiNdShape` (1.4.0) +// +// Multi-dimensional size of an ND-array. Dimension sizes after `dim_count` are +// ignored. +typedef struct TiNdShape { + // Number of dimensions. + uint32_t dim_count; + // Dimension sizes. + uint32_t dims[16]; +} TiNdShape; + +// Structure `TiNdArray` (1.4.0) +// +// Multi-dimensional array of dense primitive data. +typedef struct TiNdArray { + // Memory bound to the ND-array. + TiMemory memory; + // Shape of the ND-array. + TiNdShape shape; + // Shape of the ND-array elements. It *must not* be empty for vector or matrix + // ND-arrays. + TiNdShape elem_shape; + // Primitive data type of the ND-array elements. + TiDataType elem_type; +} TiNdArray; + +// BitField `TiImageUsageFlags` (1.4.0) +// +// Usages of an image allocation. Taichi requires kernel argument images to be +// allocated with `TI_IMAGE_USAGE_STORAGE_BIT` and `TI_IMAGE_USAGE_SAMPLED_BIT`. +typedef enum TiImageUsageFlagBits { + // The image can be read/write accessed by any kernel. + TI_IMAGE_USAGE_STORAGE_BIT = 1 << 0, + // The image can be read-only accessed by any kernel. + TI_IMAGE_USAGE_SAMPLED_BIT = 1 << 1, + // The image can be used as a color or depth-stencil attachment depending on + // its format. + TI_IMAGE_USAGE_ATTACHMENT_BIT = 1 << 2, +} TiImageUsageFlagBits; +typedef TiFlags TiImageUsageFlags; + +// Enumeration `TiImageDimension` (1.4.0) +// +// Dimensions of an image allocation. +typedef enum TiImageDimension { + // The image is 1-dimensional. + TI_IMAGE_DIMENSION_1D = 0, + // The image is 2-dimensional. + TI_IMAGE_DIMENSION_2D = 1, + // The image is 3-dimensional. + TI_IMAGE_DIMENSION_3D = 2, + // The image is 1-dimensional and it has one or more layers. + TI_IMAGE_DIMENSION_1D_ARRAY = 3, + // The image is 2-dimensional and it has one or more layers. + TI_IMAGE_DIMENSION_2D_ARRAY = 4, + // The image is 2-dimensional and it has 6 layers for the faces towards +X, + // -X, +Y, -Y, +Z, -Z in sequence. + TI_IMAGE_DIMENSION_CUBE = 5, + TI_IMAGE_DIMENSION_MAX_ENUM = 0xffffffff, +} TiImageDimension; + +// Enumeration `TiImageLayout` (1.4.0) +typedef enum TiImageLayout { + // Undefined layout. An image in this layout does not contain any semantical + // information. + TI_IMAGE_LAYOUT_UNDEFINED = 0, + // Optimal layout for read-only access, including sampling. + TI_IMAGE_LAYOUT_SHADER_READ = 1, + // Optimal layout for write-only access. + TI_IMAGE_LAYOUT_SHADER_WRITE = 2, + // Optimal layout for read/write access. + TI_IMAGE_LAYOUT_SHADER_READ_WRITE = 3, + // Optimal layout as a color attachment. + TI_IMAGE_LAYOUT_COLOR_ATTACHMENT = 4, + // Optimal layout as an input color attachment. + TI_IMAGE_LAYOUT_COLOR_ATTACHMENT_READ = 5, + // Optimal layout as a depth attachment. + TI_IMAGE_LAYOUT_DEPTH_ATTACHMENT = 6, + // Optimal layout as an input depth attachment. + TI_IMAGE_LAYOUT_DEPTH_ATTACHMENT_READ = 7, + // Optimal layout as a data copy destination. + TI_IMAGE_LAYOUT_TRANSFER_DST = 8, + // Optimal layout as a data copy source. + TI_IMAGE_LAYOUT_TRANSFER_SRC = 9, + // Optimal layout as a presentation source. + TI_IMAGE_LAYOUT_PRESENT_SRC = 10, + TI_IMAGE_LAYOUT_MAX_ENUM = 0xffffffff, +} TiImageLayout; + +// Enumeration `TiFormat` (1.4.0) +// +// Texture formats. The availability of texture formats depends on runtime +// support. +typedef enum TiFormat { + TI_FORMAT_UNKNOWN = 0, + TI_FORMAT_R8 = 1, + TI_FORMAT_RG8 = 2, + TI_FORMAT_RGBA8 = 3, + TI_FORMAT_RGBA8SRGB = 4, + TI_FORMAT_BGRA8 = 5, + TI_FORMAT_BGRA8SRGB = 6, + TI_FORMAT_R8U = 7, + TI_FORMAT_RG8U = 8, + TI_FORMAT_RGBA8U = 9, + TI_FORMAT_R8I = 10, + TI_FORMAT_RG8I = 11, + TI_FORMAT_RGBA8I = 12, + TI_FORMAT_R16 = 13, + TI_FORMAT_RG16 = 14, + TI_FORMAT_RGB16 = 15, + TI_FORMAT_RGBA16 = 16, + TI_FORMAT_R16U = 17, + TI_FORMAT_RG16U = 18, + TI_FORMAT_RGB16U = 19, + TI_FORMAT_RGBA16U = 20, + TI_FORMAT_R16I = 21, + TI_FORMAT_RG16I = 22, + TI_FORMAT_RGB16I = 23, + TI_FORMAT_RGBA16I = 24, + TI_FORMAT_R16F = 25, + TI_FORMAT_RG16F = 26, + TI_FORMAT_RGB16F = 27, + TI_FORMAT_RGBA16F = 28, + TI_FORMAT_R32U = 29, + TI_FORMAT_RG32U = 30, + TI_FORMAT_RGB32U = 31, + TI_FORMAT_RGBA32U = 32, + TI_FORMAT_R32I = 33, + TI_FORMAT_RG32I = 34, + TI_FORMAT_RGB32I = 35, + TI_FORMAT_RGBA32I = 36, + TI_FORMAT_R32F = 37, + TI_FORMAT_RG32F = 38, + TI_FORMAT_RGB32F = 39, + TI_FORMAT_RGBA32F = 40, + TI_FORMAT_DEPTH16 = 41, + TI_FORMAT_DEPTH24STENCIL8 = 42, + TI_FORMAT_DEPTH32F = 43, + TI_FORMAT_MAX_ENUM = 0xffffffff, +} TiFormat; + +// Structure `TiImageOffset` (1.4.0) +// +// Offsets of an image in X, Y, Z, and array layers. +typedef struct TiImageOffset { + // Image offset in the X direction. + uint32_t x; + // Image offset in the Y direction. *Must* be 0 if the image has a dimension + // of `enumeration.image_dimension.1d` or + // `enumeration.image_dimension.1d_array`. + uint32_t y; + // Image offset in the Z direction. *Must* be 0 if the image has a dimension + // of `enumeration.image_dimension.1d`, `enumeration.image_dimension.2d`, + // `enumeration.image_dimension.1d_array`, + // `enumeration.image_dimension.2d_array` or + // `enumeration.image_dimension.cube_array`. + uint32_t z; + // Image offset in array layers. *Must* be 0 if the image has a dimension of + // `enumeration.image_dimension.1d`, `enumeration.image_dimension.2d` or + // `enumeration.image_dimension.3d`. + uint32_t array_layer_offset; +} TiImageOffset; + +// Structure `TiImageExtent` (1.4.0) +// +// Extents of an image in X, Y, Z, and array layers. +typedef struct TiImageExtent { + // Image extent in the X direction. + uint32_t width; + // Image extent in the Y direction. *Must* be 1 if the image has a dimension + // of `enumeration.image_dimension.1d` or + // `enumeration.image_dimension.1d_array`. + uint32_t height; + // Image extent in the Z direction. *Must* be 1 if the image has a dimension + // of `enumeration.image_dimension.1d`, `enumeration.image_dimension.2d`, + // `enumeration.image_dimension.1d_array`, + // `enumeration.image_dimension.2d_array` or + // `enumeration.image_dimension.cube_array`. + uint32_t depth; + // Image extent in array layers. *Must* be 1 if the image has a dimension of + // `enumeration.image_dimension.1d`, `enumeration.image_dimension.2d` or + // `enumeration.image_dimension.3d`. *Must* be 6 if the image has a dimension + // of `enumeration.image_dimension.cube_array`. + uint32_t array_layer_count; +} TiImageExtent; + +// Structure `TiImageAllocateInfo` (1.4.0) +// +// Parameters of a newly allocated image. +typedef struct TiImageAllocateInfo { + // Image dimension. + TiImageDimension dimension; + // Image extent. + TiImageExtent extent; + // Number of mip-levels. + uint32_t mip_level_count; + // Image texel format. + TiFormat format; + // True if the memory allocation needs to be exported to other backends (e.g., + // from Vulkan to CUDA). + TiBool export_sharing; + // All possible usages of this image allocation. In most cases, + // `bit_field.image_usage.storage` and `bit_field.image_usage.sampled` enough. + TiImageUsageFlags usage; +} TiImageAllocateInfo; + +// Structure `TiImageSlice` (1.4.0) +// +// A subsection of a memory allocation. The sum of `offset` and `extent` in each +// dimension cannot exceed the size of `image`. +typedef struct TiImageSlice { + // The subsectioned image allocation. + TiImage image; + // Offset from the beginning of the allocation in each dimension. + TiImageOffset offset; + // Size of the subsection in each dimension. + TiImageExtent extent; + // The subsectioned mip-level. + uint32_t mip_level; +} TiImageSlice; + +// Enumeration `TiFilter` +typedef enum TiFilter { + TI_FILTER_NEAREST = 0, + TI_FILTER_LINEAR = 1, + TI_FILTER_MAX_ENUM = 0xffffffff, +} TiFilter; + +// Enumeration `TiAddressMode` +typedef enum TiAddressMode { + TI_ADDRESS_MODE_REPEAT = 0, + TI_ADDRESS_MODE_MIRRORED_REPEAT = 1, + TI_ADDRESS_MODE_CLAMP_TO_EDGE = 2, + TI_ADDRESS_MODE_MAX_ENUM = 0xffffffff, +} TiAddressMode; + +// Structure `TiSamplerCreateInfo` +typedef struct TiSamplerCreateInfo { + TiFilter mag_filter; + TiFilter min_filter; + TiAddressMode address_mode; + float max_anisotropy; +} TiSamplerCreateInfo; + +// Structure `TiTexture` (1.4.0) +// +// Image data bound to a sampler. +typedef struct TiTexture { + // Image bound to the texture. + TiImage image; + // The bound sampler that controls the sampling behavior of + // `structure.texture.image`. + TiSampler sampler; + // Image Dimension. + TiImageDimension dimension; + // Image extent. + TiImageExtent extent; + // Image texel format. + TiFormat format; +} TiTexture; + +// Union `TiScalarValue` (1.5.0) +// +// Scalar value represented by a power-of-two number of bits. +// +// **NOTE** The unsigned integer types merely hold the number of bits in memory +// and doesn't reflect any type of the underlying data. For example, a 32-bit +// floating-point scalar value is assigned by `*(float*)&scalar_value.x32 = +// 0.0f`; a 16-bit signed integer is assigned by `*(int16_t)&scalar_vaue.x16 = +// 1`. The actual type of the scalar is hinted via `type`. +typedef union TiScalarValue { + // Scalar value that fits into 8 bits. + uint8_t x8; + // Scalar value that fits into 16 bits. + uint16_t x16; + // Scalar value that fits into 32 bits. + uint32_t x32; + // Scalar value that fits into 64 bits. + uint64_t x64; +} TiScalarValue; + +// Structure `TiScalar` (1.5.0) +// +// A typed scalar value. +typedef struct TiScalar { + TiDataType type; + TiScalarValue value; +} TiScalar; + +// Union `TiTensorValue` +// +// Tensor value represented by a power-of-two number of bits. +typedef union TiTensorValue { + // Tensor value that fits into 8 bits. + uint8_t x8[128]; + // Tensor value that fits into 16 bits. + uint16_t x16[64]; + // Tensor value that fits into 32 bits. + uint32_t x32[32]; + // Tensor value that fits into 64 bits. + uint64_t x64[16]; +} TiTensorValue; + +// Structure `TiTensorValueWithLength` +// +// A tensor value with a length. +typedef struct TiTensorValueWithLength { + uint32_t length; + TiTensorValue data; +} TiTensorValueWithLength; + +// Structure `TiTensor` +// +// A typed tensor value. +typedef struct TiTensor { + TiDataType type; + TiTensorValueWithLength contents; +} TiTensor; + +// Union `TiArgumentValue` (1.4.0) +// +// A scalar or structured argument value. +typedef union TiArgumentValue { + // Value of a 32-bit one's complement signed integer. This is equivalent to + // `union.scalar_value.x32` with `enumeration.data_type.i32`. + int32_t i32; + // Value of a 32-bit IEEE 754 single-precision floating-poing number. This is + // equivalent to `union.scalar_value.x32` with `enumeration.data_type.f32`. + float f32; + // An ND-array to be bound. + TiNdArray ndarray; + // A texture to be bound. + TiTexture texture; + // An scalar to be bound. + TiScalar scalar; + // A tensor to be bound. + TiTensor tensor; +} TiArgumentValue; + +// Structure `TiArgument` (1.4.0) +// +// An argument value to feed kernels. +typedef struct TiArgument { + // Type of the argument. + TiArgumentType type; + // Value of the argument. + TiArgumentValue value; +} TiArgument; + +// Structure `TiNamedArgument` (1.4.0) +// +// A named argument value to feed compute graphs. +typedef struct TiNamedArgument { + // Name of the argument. + const char *name; + // Argument body. + TiArgument argument; +} TiNamedArgument; + +// Function `ti_get_version` (1.4.0) +// +// Get the current taichi version. It has the same value as `TI_C_API_VERSION` +// as defined in `taichi_core.h`. +TI_DLL_EXPORT uint32_t TI_API_CALL ti_get_version(); + +// Function `ti_get_available_archs` (1.4.0) +// +// Gets a list of available archs on the current platform. An arch is only +// available if: +// +// 1. The Runtime library is compiled with its support; +// 2. The current platform is installed with a capable hardware or an emulation +// software. +// +// An available arch has at least one device available, i.e., device index 0 is +// always available. If an arch is not available on the current platform, a call +// to [`ti_create_runtime`](#function-ti_create_runtime) with that arch is +// guaranteed failing. +// +// **WARNING** Please also note that the order or returned archs is *undefined*. +TI_DLL_EXPORT void TI_API_CALL ti_get_available_archs(uint32_t *arch_count, + TiArch *archs); + +// Function `ti_get_last_error` (1.4.0) +// +// Gets the last error raised by Taichi C-API invocations. Returns the +// semantical error code. +TI_DLL_EXPORT TiError TI_API_CALL ti_get_last_error( + // Size of textual error message in `function.get_last_error.message` + uint64_t *message_size, + // Text buffer for the textual error message. Ignored when `message_size` is + // 0. + char *message); + +// Function `ti_set_last_error` (1.4.0) +// +// Sets the provided error as the last error raised by Taichi C-API invocations. +// It can be useful in extended validation procedures in Taichi C-API wrappers +// and helper libraries. +TI_DLL_EXPORT void TI_API_CALL ti_set_last_error( + // Semantical error code. + TiError error, + // A null-terminated string of the textual error message or `nullptr` for + // empty error message. + const char *message); + +// Function `ti_create_runtime` (1.4.0) +// +// Creates a Taichi Runtime with the specified [`TiArch`](#enumeration-tiarch). +TI_DLL_EXPORT TiRuntime TI_API_CALL ti_create_runtime( + // Arch of Taichi Runtime. + TiArch arch, + // The index of device in `function.create_runtime.arch` to create Taichi + // Runtime on. + uint32_t device_index); + +// Function `ti_destroy_runtime` (1.4.0) +// +// Destroys a Taichi Runtime. +TI_DLL_EXPORT void TI_API_CALL ti_destroy_runtime(TiRuntime runtime); + +// Function `ti_set_runtime_capabilities_ext` (1.4.0) +// +// Force override the list of available capabilities in the runtime instance. +TI_DLL_EXPORT void TI_API_CALL +ti_set_runtime_capabilities_ext(TiRuntime runtime, + uint32_t capability_count, + const TiCapabilityLevelInfo *capabilities); + +// Function `ti_get_runtime_capabilities` (1.4.0) +// +// Gets all capabilities available on the runtime instance. +TI_DLL_EXPORT void TI_API_CALL +ti_get_runtime_capabilities(TiRuntime runtime, + // The total number of capabilities available. + uint32_t *capability_count, + // Returned capabilities. + TiCapabilityLevelInfo *capabilities); + +// Function `ti_allocate_memory` (1.4.0) +// +// Allocates a contiguous device memory with provided parameters. +TI_DLL_EXPORT TiMemory TI_API_CALL +ti_allocate_memory(TiRuntime runtime, + const TiMemoryAllocateInfo *allocate_info); + +// Function `ti_free_memory` (1.4.0) +// +// Frees a memory allocation. +TI_DLL_EXPORT void TI_API_CALL ti_free_memory(TiRuntime runtime, + TiMemory memory); + +// Function `ti_map_memory` (1.4.0) +// +// Maps a device memory to a host-addressable space. You *must* ensure that the +// device is not being used by any device command before the mapping. +TI_DLL_EXPORT void *TI_API_CALL ti_map_memory(TiRuntime runtime, + TiMemory memory); + +// Function `ti_unmap_memory` (1.4.0) +// +// Unmaps a device memory and makes any host-side changes about the memory +// visible to the device. You *must* ensure that there is no further access to +// the previously mapped host-addressable space. +TI_DLL_EXPORT void TI_API_CALL ti_unmap_memory(TiRuntime runtime, + TiMemory memory); + +// Function `ti_allocate_image` (1.4.0) +// +// Allocates a device image with provided parameters. +TI_DLL_EXPORT TiImage TI_API_CALL +ti_allocate_image(TiRuntime runtime, const TiImageAllocateInfo *allocate_info); + +// Function `ti_free_image` (1.4.0) +// +// Frees an image allocation. +TI_DLL_EXPORT void TI_API_CALL ti_free_image(TiRuntime runtime, TiImage image); + +// Function `ti_create_sampler` +TI_DLL_EXPORT TiSampler TI_API_CALL +ti_create_sampler(TiRuntime runtime, const TiSamplerCreateInfo *create_info); + +// Function `ti_destroy_sampler` +TI_DLL_EXPORT void TI_API_CALL ti_destroy_sampler(TiRuntime runtime, + TiSampler sampler); + +// Function `ti_copy_memory_device_to_device` (Device Command) (1.4.0) +// +// Copies the data in a contiguous subsection of the device memory to another +// subsection. The two subsections *must not* overlap. +TI_DLL_EXPORT void TI_API_CALL +ti_copy_memory_device_to_device(TiRuntime runtime, + const TiMemorySlice *dst_memory, + const TiMemorySlice *src_memory); + +// Function `ti_copy_image_device_to_device` (Device Command) +// +// Copies the image data in a contiguous subsection of the device image to +// another subsection. The two subsections *must not* overlap. +TI_DLL_EXPORT void TI_API_CALL +ti_copy_image_device_to_device(TiRuntime runtime, + const TiImageSlice *dst_image, + const TiImageSlice *src_image); + +// Function `ti_track_image_ext` (1.4.0) +// +// Tracks the device image with the provided image layout. Because Taichi tracks +// image layouts internally, it is *only* useful to inform Taichi that the image +// is transitioned to a new layout by external procedures. +TI_DLL_EXPORT void TI_API_CALL ti_track_image_ext(TiRuntime runtime, + TiImage image, + TiImageLayout layout); + +// Function `ti_transition_image` (Device Command) (1.4.0) +// +// Transitions the image to the provided image layout. Because Taichi tracks +// image layouts internally, it is *only* useful to enforce an image layout for +// external procedures to use. +TI_DLL_EXPORT void TI_API_CALL ti_transition_image(TiRuntime runtime, + TiImage image, + TiImageLayout layout); + +// Function `ti_launch_kernel` (Device Command) (1.4.0) +// +// Launches a Taichi kernel with the provided arguments. The arguments *must* +// have the same count and types in the same order as in the source code. +TI_DLL_EXPORT void TI_API_CALL ti_launch_kernel(TiRuntime runtime, + TiKernel kernel, + uint32_t arg_count, + const TiArgument *args); + +// Function `ti_launch_compute_graph` (Device Command) (1.4.0) +// +// Launches a Taichi compute graph with provided named arguments. The named +// arguments *must* have the same count, names, and types as in the source code. +TI_DLL_EXPORT void TI_API_CALL +ti_launch_compute_graph(TiRuntime runtime, + TiComputeGraph compute_graph, + uint32_t arg_count, + const TiNamedArgument *args); + +// Function `ti_flush` (1.4.0) +// +// Submits all previously invoked device commands to the offload device for +// execution. +TI_DLL_EXPORT void TI_API_CALL ti_flush(TiRuntime runtime); + +// Function `ti_wait` (1.4.0) +// +// Waits until all previously invoked device commands are executed. Any invoked +// command that has not been submitted is submitted first. +TI_DLL_EXPORT void TI_API_CALL ti_wait(TiRuntime runtime); + +// Function `ti_load_aot_module` (1.4.0) +// +// Loads a pre-compiled AOT module from the file system. +// Returns [`TI_NULL_HANDLE`](#definition-ti_null_handle) if the runtime fails +// to load the AOT module from the specified path. +TI_DLL_EXPORT TiAotModule TI_API_CALL +ti_load_aot_module(TiRuntime runtime, const char *module_path); + +// Function `ti_create_aot_module` (1.4.0) +// +// Creates a pre-compiled AOT module from TCM data. +// Returns [`TI_NULL_HANDLE`](#definition-ti_null_handle) if the runtime fails +// to create the AOT module from TCM data. +TI_DLL_EXPORT TiAotModule TI_API_CALL ti_create_aot_module(TiRuntime runtime, + const void *tcm, + uint64_t size); + +// Function `ti_destroy_aot_module` (1.4.0) +// +// Destroys a loaded AOT module and releases all related resources. +TI_DLL_EXPORT void TI_API_CALL ti_destroy_aot_module(TiAotModule aot_module); + +// Function `ti_get_aot_module_kernel` (1.4.0) +// +// Retrieves a pre-compiled Taichi kernel from the AOT module. +// Returns [`TI_NULL_HANDLE`](#definition-ti_null_handle) if the module does not +// have a kernel of the specified name. +TI_DLL_EXPORT TiKernel TI_API_CALL +ti_get_aot_module_kernel(TiAotModule aot_module, const char *name); + +// Function `ti_get_aot_module_compute_graph` (1.4.0) +// +// Retrieves a pre-compiled compute graph from the AOT module. +// Returns [`TI_NULL_HANDLE`](#definition-ti_null_handle) if the module does not +// have a compute graph of the specified name. +TI_DLL_EXPORT TiComputeGraph TI_API_CALL +ti_get_aot_module_compute_graph(TiAotModule aot_module, const char *name); + +#ifdef __cplusplus +} // extern "C" +#endif // __cplusplus diff --git a/local_packages/taichi/_lib/c_api/include/taichi/taichi_cpu.h b/local_packages/taichi/_lib/c_api/include/taichi/taichi_cpu.h new file mode 100644 index 0000000..e94ca65 --- /dev/null +++ b/local_packages/taichi/_lib/c_api/include/taichi/taichi_cpu.h @@ -0,0 +1,29 @@ +#pragma once + +#ifndef TAICHI_H +#include "taichi.h" +#endif // TAICHI_H + +#ifdef __cplusplus +extern "C" { +#endif // __cplusplus + +// Structure `TiCpuMemoryInteropInfo` +typedef struct TiCpuMemoryInteropInfo { + void *ptr; + uint64_t size; +} TiCpuMemoryInteropInfo; + +// Function `ti_export_cpu_memory` +TI_DLL_EXPORT void TI_API_CALL +ti_export_cpu_memory(TiRuntime runtime, + TiMemory memory, + TiCpuMemoryInteropInfo *interop_info); + +TI_DLL_EXPORT TiMemory TI_API_CALL ti_import_cpu_memory(TiRuntime runtime, + void *ptr, + size_t memory_size); + +#ifdef __cplusplus +} // extern "C" +#endif // __cplusplus diff --git a/local_packages/taichi/_lib/c_api/include/taichi/taichi_cuda.h b/local_packages/taichi/_lib/c_api/include/taichi/taichi_cuda.h new file mode 100644 index 0000000..a67b4f2 --- /dev/null +++ b/local_packages/taichi/_lib/c_api/include/taichi/taichi_cuda.h @@ -0,0 +1,36 @@ +#pragma once + +#ifndef TAICHI_H +#include "taichi.h" +#endif // TAICHI_H + +#ifdef __cplusplus +extern "C" { +#endif // __cplusplus + +// Structure `TiCudaMemoryInteropInfo` +typedef struct TiCudaMemoryInteropInfo { + void *ptr; + uint64_t size; +} TiCudaMemoryInteropInfo; + +// Function `ti_export_cuda_memory` +TI_DLL_EXPORT void TI_API_CALL +ti_export_cuda_memory(TiRuntime runtime, + TiMemory memory, + TiCudaMemoryInteropInfo *interop_info); + +// Function `ti_import_cuda_memory` +TI_DLL_EXPORT TiMemory TI_API_CALL ti_import_cuda_memory(TiRuntime runtime, + void *ptr, + size_t memory_size); + +// Function `ti_set_cuda_stream` +TI_DLL_EXPORT void TI_API_CALL ti_set_cuda_stream(void *stream); + +// Function `ti_get_cuda_stream` +TI_DLL_EXPORT void TI_API_CALL ti_get_cuda_stream(void **stream); + +#ifdef __cplusplus +} // extern "C" +#endif // __cplusplus diff --git a/local_packages/taichi/_lib/c_api/include/taichi/taichi_opengl.h b/local_packages/taichi/_lib/c_api/include/taichi/taichi_opengl.h new file mode 100644 index 0000000..c4d68ba --- /dev/null +++ b/local_packages/taichi/_lib/c_api/include/taichi/taichi_opengl.h @@ -0,0 +1,67 @@ +#pragma once + +#ifndef TAICHI_H +#include "taichi.h" +#endif // TAICHI_H + +#ifdef __cplusplus +extern "C" { +#endif // __cplusplus + +// Structure `TiOpenglRuntimeInteropInfo` +typedef struct TiOpenglRuntimeInteropInfo { + void *get_proc_addr; +} TiOpenglRuntimeInteropInfo; + +// Structure `TiOpenglMemoryInteropInfo` +typedef struct TiOpenglMemoryInteropInfo { + GLuint buffer; + GLsizeiptr size; +} TiOpenglMemoryInteropInfo; + +// Structure `TiOpenglImageInteropInfo` +typedef struct TiOpenglImageInteropInfo { + GLuint texture; + GLenum target; + GLsizei levels; + GLenum format; + GLsizei width; + GLsizei height; + GLsizei depth; +} TiOpenglImageInteropInfo; + +// Function `ti_import_opengl_runtime` +TI_DLL_EXPORT TiRuntime TI_API_CALL +ti_import_opengl_runtime(TiOpenglRuntimeInteropInfo *interop_info, + bool use_gles); + +// Function `ti_export_opengl_runtime` +TI_DLL_EXPORT void TI_API_CALL +ti_export_opengl_runtime(TiRuntime runtime, + TiOpenglRuntimeInteropInfo *interop_info); + +// Function `ti_import_opengl_memory` +TI_DLL_EXPORT TiMemory TI_API_CALL +ti_import_opengl_memory(TiRuntime runtime, + const TiOpenglMemoryInteropInfo *interop_info); + +// Function `ti_export_opengl_memory` +TI_DLL_EXPORT void TI_API_CALL +ti_export_opengl_memory(TiRuntime runtime, + TiMemory memory, + TiOpenglMemoryInteropInfo *interop_info); + +// Function `ti_import_opengl_image` +TI_DLL_EXPORT TiImage TI_API_CALL +ti_import_opengl_image(TiRuntime runtime, + const TiOpenglImageInteropInfo *interop_info); + +// Function `ti_export_opengl_image` +TI_DLL_EXPORT void TI_API_CALL +ti_export_opengl_image(TiRuntime runtime, + TiImage image, + TiOpenglImageInteropInfo *interop_info); + +#ifdef __cplusplus +} // extern "C" +#endif // __cplusplus diff --git a/local_packages/taichi/_lib/c_api/include/taichi/taichi_platform.h b/local_packages/taichi/_lib/c_api/include/taichi/taichi_platform.h new file mode 100644 index 0000000..5759e3f --- /dev/null +++ b/local_packages/taichi/_lib/c_api/include/taichi/taichi_platform.h @@ -0,0 +1,55 @@ +#pragma once + +// TO KEEP THE INCLUDE DEPENDENCY CLEAN, PLEASE DO NOT INCLUDE ANY OTHER +// TAICHI HEADERS INTO THIS ONE. +// +// TODO(#2196): Once we can slim down "taichi/common/core.h", consider moving +// the contents back to core.h and delete this file. +#ifndef _CRT_SECURE_NO_WARNINGS +#define _CRT_SECURE_NO_WARNINGS +#endif + +// https://gcc.gnu.org/wiki/Visibility +#if defined _WIN32 || defined _WIN64 || defined __CYGWIN__ +#ifdef __GNUC__ +#define TI_DLL_EXPORT __attribute__((dllexport)) +#define TI_API_CALL +#else +#define TI_DLL_EXPORT __declspec(dllexport) +#define TI_API_CALL __stdcall +#endif // __GNUC__ +#else +#define TI_DLL_EXPORT __attribute__((visibility("default"))) +#define TI_API_CALL +#endif // defined _WIN32 || defined _WIN64 || defined __CYGWIN__ + +// Windows +#if defined(_WIN64) +#define TI_PLATFORM_WINDOWS +#endif + +#if defined(_WIN32) && !defined(_WIN64) +static_assert(false, "32-bit Windows systems are not supported") +#endif + +// Linux +#if defined(__linux__) +#if defined(ANDROID) +#define TI_PLATFORM_ANDROID +#else +#define TI_PLATFORM_LINUX +#endif +#endif + +// OSX +#if defined(__APPLE__) +#define TI_PLATFORM_OSX +#endif + +#if (defined(TI_PLATFORM_LINUX) || defined(TI_PLATFORM_OSX) || \ + defined(__unix__)) +#define TI_PLATFORM_UNIX +#endif + +#include +#include diff --git a/local_packages/taichi/_lib/c_api/include/taichi/taichi_unity.h b/local_packages/taichi/_lib/c_api/include/taichi/taichi_unity.h new file mode 100644 index 0000000..d66ad0b --- /dev/null +++ b/local_packages/taichi/_lib/c_api/include/taichi/taichi_unity.h @@ -0,0 +1,64 @@ +#pragma once + +#ifndef TAICHI_H +#include "taichi.h" +#endif // TAICHI_H + +#ifdef __cplusplus +extern "C" { +#endif // __cplusplus + +// Handle `TixNativeBufferUnity` +typedef struct TixNativeBufferUnity_t *TixNativeBufferUnity; + +// Callback `TixAsyncTaskUnity` +typedef void(TI_API_CALL *TixAsyncTaskUnity)(void *user_data); + +// Function `tix_import_native_runtime_unity` +TI_DLL_EXPORT TiRuntime TI_API_CALL tix_import_native_runtime_unity(); + +// Function `tix_enqueue_task_async_unity` +TI_DLL_EXPORT void TI_API_CALL +tix_enqueue_task_async_unity(void *user_data, TixAsyncTaskUnity async_task); + +// Function `tix_launch_kernel_async_unity` +TI_DLL_EXPORT void TI_API_CALL +tix_launch_kernel_async_unity(TiRuntime runtime, + TiKernel kernel, + uint32_t arg_count, + const TiArgument *args); + +// Function `tix_launch_compute_graph_async_unity` +TI_DLL_EXPORT void TI_API_CALL +tix_launch_compute_graph_async_unity(TiRuntime runtime, + TiComputeGraph compute_graph, + uint32_t arg_count, + const TiNamedArgument *args); + +// Function `tix_copy_memory_to_native_buffer_async_unity` +TI_DLL_EXPORT void TI_API_CALL +tix_copy_memory_to_native_buffer_async_unity(TiRuntime runtime, + TixNativeBufferUnity dst, + uint64_t dst_offset, + const TiMemorySlice *src); + +// Function `tix_copy_memory_device_to_host_unity` +TI_DLL_EXPORT void TI_API_CALL +tix_copy_memory_device_to_host_unity(TiRuntime runtime, + void *dst, + uint64_t dst_offset, + const TiMemorySlice *src); + +// Function `tix_copy_memory_host_to_device_unity` +TI_DLL_EXPORT void TI_API_CALL +tix_copy_memory_host_to_device_unity(TiRuntime runtime, + const TiMemorySlice *dst, + const void *src, + uint64_t src_offset); + +// Function `tix_submit_async_unity` +TI_DLL_EXPORT void *TI_API_CALL tix_submit_async_unity(TiRuntime runtime); + +#ifdef __cplusplus +} // extern "C" +#endif // __cplusplus diff --git a/local_packages/taichi/_lib/c_api/include/taichi/taichi_vulkan.h b/local_packages/taichi/_lib/c_api/include/taichi/taichi_vulkan.h new file mode 100644 index 0000000..09bb31f --- /dev/null +++ b/local_packages/taichi/_lib/c_api/include/taichi/taichi_vulkan.h @@ -0,0 +1,151 @@ +// # Vulkan Backend Features +// +// Taichi's Vulkan API gives you further control over the Vulkan version and +// extension requirements and allows you to interop with external Vulkan +// applications with shared resources. +// +#pragma once + +#ifndef TAICHI_H +#include "taichi.h" +#endif // TAICHI_H + +#ifdef __cplusplus +extern "C" { +#endif // __cplusplus + +// Structure `TiVulkanRuntimeInteropInfo` (1.4.0) +// +// Necessary detail to share the same Vulkan runtime between Taichi and external +// procedures. +// +// +// **NOTE** `compute_queue` and `graphics_queue` can be the same if the queue +// family have `VK_QUEUE_COMPUTE_BIT` and `VK_QUEUE_GRAPHICS_BIT` set at the +// same tiem. +typedef struct TiVulkanRuntimeInteropInfo { + // Pointer to Vulkan loader function `vkGetInstanceProcAddr`. + PFN_vkGetInstanceProcAddr get_instance_proc_addr; + // Target Vulkan API version. + uint32_t api_version; + // Vulkan instance handle. + VkInstance instance; + // Vulkan physical device handle. + VkPhysicalDevice physical_device; + // Vulkan logical device handle. + VkDevice device; + // Vulkan queue handle created in the queue family at + // `structure.vulkan_runtime_interop_info.compute_queue_family_index`. + VkQueue compute_queue; + // Index of a Vulkan queue family with the `VK_QUEUE_COMPUTE_BIT` set. + uint32_t compute_queue_family_index; + // Vulkan queue handle created in the queue family at + // `structure.vulkan_runtime_interop_info.graphics_queue_family_index`. + VkQueue graphics_queue; + // Index of a Vulkan queue family with the `VK_QUEUE_GRAPHICS_BIT` set. + uint32_t graphics_queue_family_index; +} TiVulkanRuntimeInteropInfo; + +// Structure `TiVulkanMemoryInteropInfo` (1.4.0) +// +// Necessary detail to share the same piece of Vulkan buffer between Taichi and +// external procedures. +typedef struct TiVulkanMemoryInteropInfo { + // Vulkan buffer. + VkBuffer buffer; + // Size of the piece of memory in bytes. + uint64_t size; + // Vulkan buffer usage. In most of the cases, Taichi requires the + // `VK_BUFFER_USAGE_STORAGE_BUFFER_BIT`. + VkBufferUsageFlags usage; + // Device memory binded to the Vulkan buffer. + VkDeviceMemory memory; + // Offset in `VkDeviceMemory` object to the beginning of this allocation, in + // bytes. + uint64_t offset; +} TiVulkanMemoryInteropInfo; + +// Structure `TiVulkanImageInteropInfo` (1.4.0) +// +// Necessary detail to share the same piece of Vulkan image between Taichi and +// external procedures. +typedef struct TiVulkanImageInteropInfo { + // Vulkan image. + VkImage image; + // Vulkan image allocation type. + VkImageType image_type; + // Pixel format. + VkFormat format; + // Image extent. + VkExtent3D extent; + // Number of mip-levels of the image. + uint32_t mip_level_count; + // Number of array layers. + uint32_t array_layer_count; + // Number of samples per pixel. + VkSampleCountFlagBits sample_count; + // Image tiling. + VkImageTiling tiling; + // Vulkan image usage. In most cases, Taichi requires the + // `VK_IMAGE_USAGE_STORAGE_BIT` and the `VK_IMAGE_USAGE_SAMPLED_BIT`. + VkImageUsageFlags usage; +} TiVulkanImageInteropInfo; + +// Function `ti_create_vulkan_runtime_ext` (1.4.0) +// +// Creates a Vulkan Taichi runtime with user-controlled capability settings. +TI_DLL_EXPORT TiRuntime TI_API_CALL +ti_create_vulkan_runtime_ext(uint32_t api_version, + uint32_t instance_extension_count, + const char **instance_extensions, + uint32_t device_extension_count, + const char **device_extensions); + +// Function `ti_import_vulkan_runtime` (1.4.0) +// +// Imports the Vulkan runtime owned by Taichi to external procedures. +TI_DLL_EXPORT TiRuntime TI_API_CALL +ti_import_vulkan_runtime(const TiVulkanRuntimeInteropInfo *interop_info); + +// Function `ti_export_vulkan_runtime` (1.4.0) +// +// Exports a Vulkan runtime from external procedures to Taichi. +TI_DLL_EXPORT void TI_API_CALL +ti_export_vulkan_runtime(TiRuntime runtime, + TiVulkanRuntimeInteropInfo *interop_info); + +// Function `ti_import_vulkan_memory` (1.4.0) +// +// Imports the Vulkan buffer owned by Taichi to external procedures. +TI_DLL_EXPORT TiMemory TI_API_CALL +ti_import_vulkan_memory(TiRuntime runtime, + const TiVulkanMemoryInteropInfo *interop_info); + +// Function `ti_export_vulkan_memory` (1.4.0) +// +// Exports a Vulkan buffer from external procedures to Taichi. +TI_DLL_EXPORT void TI_API_CALL +ti_export_vulkan_memory(TiRuntime runtime, + TiMemory memory, + TiVulkanMemoryInteropInfo *interop_info); + +// Function `ti_import_vulkan_image` (1.4.0) +// +// Imports the Vulkan image owned by Taichi to external procedures. +TI_DLL_EXPORT TiImage TI_API_CALL +ti_import_vulkan_image(TiRuntime runtime, + const TiVulkanImageInteropInfo *interop_info, + VkImageViewType view_type, + VkImageLayout layout); + +// Function `ti_export_vulkan_image` (1.4.0) +// +// Exports a Vulkan image from external procedures to Taichi. +TI_DLL_EXPORT void TI_API_CALL +ti_export_vulkan_image(TiRuntime runtime, + TiImage image, + TiVulkanImageInteropInfo *interop_info); + +#ifdef __cplusplus +} // extern "C" +#endif // __cplusplus diff --git a/local_packages/taichi/_lib/c_api/lib/taichi_c_api.lib b/local_packages/taichi/_lib/c_api/lib/taichi_c_api.lib new file mode 100644 index 0000000..4d088eb Binary files /dev/null and b/local_packages/taichi/_lib/c_api/lib/taichi_c_api.lib differ diff --git a/local_packages/taichi/_lib/c_api/runtime/runtime_cuda.bc b/local_packages/taichi/_lib/c_api/runtime/runtime_cuda.bc new file mode 100644 index 0000000..10ef2d6 Binary files /dev/null and b/local_packages/taichi/_lib/c_api/runtime/runtime_cuda.bc differ diff --git a/local_packages/taichi/_lib/c_api/runtime/runtime_dx12.bc b/local_packages/taichi/_lib/c_api/runtime/runtime_dx12.bc new file mode 100644 index 0000000..b99b471 Binary files /dev/null and b/local_packages/taichi/_lib/c_api/runtime/runtime_dx12.bc differ diff --git a/local_packages/taichi/_lib/c_api/runtime/runtime_x64.bc b/local_packages/taichi/_lib/c_api/runtime/runtime_x64.bc new file mode 100644 index 0000000..2c70bd7 Binary files /dev/null and b/local_packages/taichi/_lib/c_api/runtime/runtime_x64.bc differ diff --git a/local_packages/taichi/_lib/c_api/runtime/slim_libdevice.10.bc b/local_packages/taichi/_lib/c_api/runtime/slim_libdevice.10.bc new file mode 100644 index 0000000..ba83492 Binary files /dev/null and b/local_packages/taichi/_lib/c_api/runtime/slim_libdevice.10.bc differ diff --git a/local_packages/taichi/_lib/c_api/taichi/lib/cmake/taichi/TaichiConfig.cmake b/local_packages/taichi/_lib/c_api/taichi/lib/cmake/taichi/TaichiConfig.cmake new file mode 100644 index 0000000..9fff2f1 --- /dev/null +++ b/local_packages/taichi/_lib/c_api/taichi/lib/cmake/taichi/TaichiConfig.cmake @@ -0,0 +1,29 @@ + +####### Expanded from @PACKAGE_INIT@ by configure_package_config_file() ####### +####### Any changes to this file will be overwritten by the next CMake run #### +####### The input file was TaichiConfig.cmake.in ######## + +get_filename_component(PACKAGE_PREFIX_DIR "${CMAKE_CURRENT_LIST_DIR}/../../../../../../../../" ABSOLUTE) + +macro(set_and_check _var _file) + set(${_var} "${_file}") + if(NOT EXISTS "${_file}") + message(FATAL_ERROR "File or directory ${_file} referenced by variable ${_var} does not exist !") + endif() +endmacro() + +macro(check_required_components _NAME) + foreach(comp ${${_NAME}_FIND_COMPONENTS}) + if(NOT ${_NAME}_${comp}_FOUND) + if(${_NAME}_FIND_REQUIRED_${comp}) + set(${_NAME}_FOUND FALSE) + endif() + endif() + endforeach() +endmacro() + +#################################################################################### + +include("${CMAKE_CURRENT_LIST_DIR}/TaichiTargets.cmake") + +check_required_components("Runtime") diff --git a/local_packages/taichi/_lib/c_api/taichi/lib/cmake/taichi/TaichiConfigVersion.cmake b/local_packages/taichi/_lib/c_api/taichi/lib/cmake/taichi/TaichiConfigVersion.cmake new file mode 100644 index 0000000..3b4f83f --- /dev/null +++ b/local_packages/taichi/_lib/c_api/taichi/lib/cmake/taichi/TaichiConfigVersion.cmake @@ -0,0 +1,65 @@ +# This is a basic version file for the Config-mode of find_package(). +# It is used by write_basic_package_version_file() as input file for configure_file() +# to create a version-file which can be installed along a config.cmake file. +# +# The created file sets PACKAGE_VERSION_EXACT if the current version string and +# the requested version string are exactly the same and it sets +# PACKAGE_VERSION_COMPATIBLE if the current version is >= requested version, +# but only if the requested major version is the same as the current one. +# The variable CVF_VERSION must be set before calling configure_file(). + + +set(PACKAGE_VERSION "1.7.4") + +if(PACKAGE_VERSION VERSION_LESS PACKAGE_FIND_VERSION) + set(PACKAGE_VERSION_COMPATIBLE FALSE) +else() + + if("1.7.4" MATCHES "^([0-9]+)\\.") + set(CVF_VERSION_MAJOR "${CMAKE_MATCH_1}") + if(NOT CVF_VERSION_MAJOR VERSION_EQUAL 0) + string(REGEX REPLACE "^0+" "" CVF_VERSION_MAJOR "${CVF_VERSION_MAJOR}") + endif() + else() + set(CVF_VERSION_MAJOR "1.7.4") + endif() + + if(PACKAGE_FIND_VERSION_RANGE) + # both endpoints of the range must have the expected major version + math (EXPR CVF_VERSION_MAJOR_NEXT "${CVF_VERSION_MAJOR} + 1") + if (NOT PACKAGE_FIND_VERSION_MIN_MAJOR STREQUAL CVF_VERSION_MAJOR + OR ((PACKAGE_FIND_VERSION_RANGE_MAX STREQUAL "INCLUDE" AND NOT PACKAGE_FIND_VERSION_MAX_MAJOR STREQUAL CVF_VERSION_MAJOR) + OR (PACKAGE_FIND_VERSION_RANGE_MAX STREQUAL "EXCLUDE" AND NOT PACKAGE_FIND_VERSION_MAX VERSION_LESS_EQUAL CVF_VERSION_MAJOR_NEXT))) + set(PACKAGE_VERSION_COMPATIBLE FALSE) + elseif(PACKAGE_FIND_VERSION_MIN_MAJOR STREQUAL CVF_VERSION_MAJOR + AND ((PACKAGE_FIND_VERSION_RANGE_MAX STREQUAL "INCLUDE" AND PACKAGE_VERSION VERSION_LESS_EQUAL PACKAGE_FIND_VERSION_MAX) + OR (PACKAGE_FIND_VERSION_RANGE_MAX STREQUAL "EXCLUDE" AND PACKAGE_VERSION VERSION_LESS PACKAGE_FIND_VERSION_MAX))) + set(PACKAGE_VERSION_COMPATIBLE TRUE) + else() + set(PACKAGE_VERSION_COMPATIBLE FALSE) + endif() + else() + if(PACKAGE_FIND_VERSION_MAJOR STREQUAL CVF_VERSION_MAJOR) + set(PACKAGE_VERSION_COMPATIBLE TRUE) + else() + set(PACKAGE_VERSION_COMPATIBLE FALSE) + endif() + + if(PACKAGE_FIND_VERSION STREQUAL PACKAGE_VERSION) + set(PACKAGE_VERSION_EXACT TRUE) + endif() + endif() +endif() + + +# if the installed or the using project don't have CMAKE_SIZEOF_VOID_P set, ignore it: +if("${CMAKE_SIZEOF_VOID_P}" STREQUAL "" OR "8" STREQUAL "") + return() +endif() + +# check that the installed version has the same 32/64bit-ness as the one which is currently searching: +if(NOT CMAKE_SIZEOF_VOID_P STREQUAL "8") + math(EXPR installedBits "8 * 8") + set(PACKAGE_VERSION "${PACKAGE_VERSION} (${installedBits}bit)") + set(PACKAGE_VERSION_UNSUITABLE TRUE) +endif() diff --git a/local_packages/taichi/_lib/c_api/taichi/lib/cmake/taichi/TaichiTargets.cmake b/local_packages/taichi/_lib/c_api/taichi/lib/cmake/taichi/TaichiTargets.cmake new file mode 100644 index 0000000..71f207a --- /dev/null +++ b/local_packages/taichi/_lib/c_api/taichi/lib/cmake/taichi/TaichiTargets.cmake @@ -0,0 +1,121 @@ +if("${CMAKE_MAJOR_VERSION}.${CMAKE_MINOR_VERSION}" LESS 2.8) + message(FATAL_ERROR "CMake >= 2.8.0 required") +endif() +if(CMAKE_VERSION VERSION_LESS "2.8.3") + message(FATAL_ERROR "CMake >= 2.8.3 required") +endif() +cmake_policy(PUSH) +cmake_policy(VERSION 2.8.3...3.23) + +# Commands may need to know the format version. +set(CMAKE_IMPORT_FILE_VERSION 1) + +# Protect against multiple inclusion, which would fail when already imported targets are added once more. +set(_cmake_targets_defined "") +set(_cmake_targets_not_defined "") +set(_cmake_expected_targets "") +foreach(_cmake_expected_target IN ITEMS taichi_c_api) + list(APPEND _cmake_expected_targets "${_cmake_expected_target}") + if(TARGET "${_cmake_expected_target}") + list(APPEND _cmake_targets_defined "${_cmake_expected_target}") + else() + list(APPEND _cmake_targets_not_defined "${_cmake_expected_target}") + endif() +endforeach() +unset(_cmake_expected_target) +if(_cmake_targets_defined STREQUAL _cmake_expected_targets) + unset(_cmake_targets_defined) + unset(_cmake_targets_not_defined) + unset(_cmake_expected_targets) + unset(CMAKE_IMPORT_FILE_VERSION) + cmake_policy(POP) + return() +endif() +if(NOT _cmake_targets_defined STREQUAL "") + string(REPLACE ";" ", " _cmake_targets_defined_text "${_cmake_targets_defined}") + string(REPLACE ";" ", " _cmake_targets_not_defined_text "${_cmake_targets_not_defined}") + message(FATAL_ERROR "Some (but not all) targets in this export set were already defined.\nTargets Defined: ${_cmake_targets_defined_text}\nTargets not yet defined: ${_cmake_targets_not_defined_text}\n") +endif() +unset(_cmake_targets_defined) +unset(_cmake_targets_not_defined) +unset(_cmake_expected_targets) + + +# Compute the installation prefix relative to this file. +get_filename_component(_IMPORT_PREFIX "${CMAKE_CURRENT_LIST_FILE}" PATH) +get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH) +get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH) +get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH) +get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH) +if(_IMPORT_PREFIX STREQUAL "/") + set(_IMPORT_PREFIX "") +endif() + +# - [taichi_c_api] ------------------------------------------------------------- + +add_library(taichi_c_api SHARED IMPORTED) + +list(APPEND _cmake_import_check_targets taichi_c_api ) + +if (APPLE) + set_target_properties(taichi_c_api PROPERTIES + IMPORTED_LOCATION "${_IMPORT_PREFIX}/lib/libtaichi_c_api.dylib" + IMPORTED_SONAME "@rpath/libtaichi_c_api.dylib" + INTERFACE_INCLUDE_DIRECTORIES "${_IMPORT_PREFIX}/include" + ) + list(APPEND _cmake_import_check_files_for_taichi_c_api "${_IMPORT_PREFIX}/lib/libtaichi_c_api.dylib" ) +elseif (WIN32) + set_target_properties(taichi_c_api PROPERTIES + IMPORTED_LOCATION "${_IMPORT_PREFIX}/bin/taichi_c_api.dll" + IMPORTED_IMPLIB "${_IMPORT_PREFIX}/lib/taichi_c_api.lib" + INTERFACE_INCLUDE_DIRECTORIES "${_IMPORT_PREFIX}/include" + ) + list(APPEND _cmake_import_check_files_for_taichi_c_api "${_IMPORT_PREFIX}/bin/taichi_c_api.dll" ) +elseif (UNIX) + # Check if the .so file exists in the lib directory + if(EXISTS "${_IMPORT_PREFIX}/lib/libtaichi_c_api.so") + set(TAICHI_SO_LOCATION "${_IMPORT_PREFIX}/lib/libtaichi_c_api.so") + else() + set(TAICHI_SO_LOCATION "${_IMPORT_PREFIX}/lib64/libtaichi_c_api.so") + endif() + + set_target_properties(taichi_c_api PROPERTIES + IMPORTED_LOCATION "${TAICHI_SO_LOCATION}" + IMPORTED_SONAME "libtaichi_c_api.so" + INTERFACE_INCLUDE_DIRECTORIES "${_IMPORT_PREFIX}/include" + ) + list(APPEND _cmake_import_check_files_for_taichi_c_api "${TAICHI_SO_LOCATION}" ) +endif() + +# ------------------------------------------------------------------------------ + +# Cleanup temporary variables. +set(_IMPORT_PREFIX) + +# Loop over all imported files and verify that they actually exist +foreach(_cmake_target IN LISTS _cmake_import_check_targets) + foreach(_cmake_file IN LISTS "_cmake_import_check_files_for_${_cmake_target}") + if(NOT EXISTS "${_cmake_file}") + message(FATAL_ERROR "The imported target \"${_cmake_target}\" references the file + \"${_cmake_file}\" +but this file does not exist. Possible reasons include: +* The file was deleted, renamed, or moved to another location. +* An install or uninstall procedure did not complete successfully. +* The installation package was faulty and contained + \"${CMAKE_CURRENT_LIST_FILE}\" +but not all the files it references. +") + endif() + endforeach() + unset(_cmake_file) + unset("_cmake_import_check_files_for_${_cmake_target}") +endforeach() +unset(_cmake_target) +unset(_cmake_import_check_targets) + +# This file does not depend on other imported targets which have +# been exported from the same project but in a separate export set. + +# Commands beyond this point should not need to know the version. +set(CMAKE_IMPORT_FILE_VERSION) +cmake_policy(POP) diff --git a/local_packages/taichi/_lib/core/taichi_python.cp312-win_amd64.pyd b/local_packages/taichi/_lib/core/taichi_python.cp312-win_amd64.pyd new file mode 100644 index 0000000..a29bba7 Binary files /dev/null and b/local_packages/taichi/_lib/core/taichi_python.cp312-win_amd64.pyd differ diff --git a/local_packages/taichi/_lib/runtime/runtime_cuda.bc b/local_packages/taichi/_lib/runtime/runtime_cuda.bc new file mode 100644 index 0000000..10ef2d6 Binary files /dev/null and b/local_packages/taichi/_lib/runtime/runtime_cuda.bc differ diff --git a/local_packages/taichi/_lib/runtime/runtime_dx12.bc b/local_packages/taichi/_lib/runtime/runtime_dx12.bc new file mode 100644 index 0000000..b99b471 Binary files /dev/null and b/local_packages/taichi/_lib/runtime/runtime_dx12.bc differ diff --git a/local_packages/taichi/_lib/runtime/runtime_x64.bc b/local_packages/taichi/_lib/runtime/runtime_x64.bc new file mode 100644 index 0000000..2c70bd7 Binary files /dev/null and b/local_packages/taichi/_lib/runtime/runtime_x64.bc differ diff --git a/local_packages/taichi/_lib/runtime/slim_libdevice.10.bc b/local_packages/taichi/_lib/runtime/slim_libdevice.10.bc new file mode 100644 index 0000000..ba83492 Binary files /dev/null and b/local_packages/taichi/_lib/runtime/slim_libdevice.10.bc differ diff --git a/local_packages/taichi/_lib/utils.py b/local_packages/taichi/_lib/utils.py new file mode 100644 index 0000000..b14dc05 --- /dev/null +++ b/local_packages/taichi/_lib/utils.py @@ -0,0 +1,247 @@ +import os +import platform +import re +import sys +import warnings + +from colorama import Fore, Style + +if sys.version_info[0] < 3 or sys.version_info[1] <= 5: + raise RuntimeError( + "\nPlease restart with Python 3.6+\n" + "Current Python version:", + sys.version_info, + ) + + +def in_docker(): + if os.environ.get("TI_IN_DOCKER", "") == "": + return False + return True + + +def get_os_name(): + name = platform.platform() + # in python 3.8, platform.platform() uses mac_ver() on macOS + # it will return 'macOS-XXXX' instead of 'Darwin-XXXX' + if name.lower().startswith("darwin") or name.lower().startswith("macos"): + return "osx" + if name.lower().startswith("windows"): + return "win" + if name.lower().startswith("linux"): + return "linux" + if "bsd" in name.lower(): + return "unix" + assert False, f"Unknown platform name {name}" + + +def import_ti_python_core(): + if get_os_name() != "win": + # pylint: disable=E1101 + old_flags = sys.getdlopenflags() + sys.setdlopenflags(2 | 8) # RTLD_NOW | RTLD_DEEPBIND + else: + pyddir = os.path.dirname(os.path.realpath(__file__)) + os.environ["PATH"] += os.pathsep + pyddir + try: + from taichi._lib.core import taichi_python as core # pylint: disable=C0415 + except Exception as e: + if isinstance(e, ImportError): + print( + Fore.YELLOW + "Share object taichi_python import failed, " + "check this page for possible solutions:\n" + "https://docs.taichi-lang.org/docs/install" + Fore.RESET + ) + if get_os_name() == "win": + # pylint: disable=E1101 + e.msg += "\nConsider installing Microsoft Visual C++ Redistributable: https://aka.ms/vs/16/release/vc_redist.x64.exe" + raise e from None + + if get_os_name() != "win": + sys.setdlopenflags(old_flags) # pylint: disable=E1101 + lib_dir = os.path.join(package_root, "_lib", "runtime") + core.set_lib_dir(locale_encode(lib_dir)) + return core + + +def locale_encode(path): + try: + import locale # pylint: disable=C0415 + + return path.encode(locale.getdefaultlocale()[1]) + except (UnicodeEncodeError, TypeError): + try: + return path.encode(sys.getfilesystemencoding()) + except UnicodeEncodeError: + try: + return path.encode() + except UnicodeEncodeError: + return path + + +def is_ci(): + return os.environ.get("TI_CI", "") == "1" + + +package_root = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))) + + +def get_core_shared_object(): + directory = os.path.join(package_root, "_lib") + return os.path.join(directory, "libtaichi_python.so") + + +def print_red_bold(*args, **kwargs): + print(Fore.RED + Style.BRIGHT, end="") + print(*args, **kwargs) + print(Style.RESET_ALL, end="") + + +def print_yellow_bold(*args, **kwargs): + print(Fore.YELLOW + Style.BRIGHT, end="") + print(*args, **kwargs) + print(Style.RESET_ALL, end="") + + +def check_exists(src): + if not os.path.exists(src): + raise FileNotFoundError(f'File "{src}" not exist. Installation corrupted or build incomplete?') + + +ti_python_core = import_ti_python_core() + +ti_python_core.set_python_package_dir(package_root) + +log_level = os.environ.get("TI_LOG_LEVEL", "") +if log_level: + ti_python_core.set_logging_level(log_level) + + +def get_dll_name(name): + if get_os_name() == "linux": + return f"libtaichi_{name}.so" + if get_os_name() == "osx": + return f"libtaichi_{name}.dylib" + if get_os_name() == "win": + return f"taichi_{name}.dll" + raise Exception(f"Unknown OS: {get_os_name()}") + + +def at_startup(): + ti_python_core.set_core_state_python_imported(True) + + +at_startup() + + +def compare_version(latest, current): + latest_num = map(int, latest.split(".")) + current_num = map(int, current.split(".")) + return tuple(latest_num) > tuple(current_num) + + +def _print_taichi_header(): + header = "[Taichi] " + header += f"version {ti_python_core.get_version_string()}, " + + try: + timestamp_path = os.path.join(ti_python_core.get_repo_dir(), "timestamp") + if os.path.exists(timestamp_path): + latest_version = "" + with open(timestamp_path, "r") as f: + latest_version = f.readlines()[1].rstrip() + if compare_version(latest_version, ti_python_core.get_version_string()): + header += f"latest version {latest_version}, " + except: + pass + + llvm_target_support = ti_python_core.get_llvm_target_support() + header += f"llvm {llvm_target_support}, " + + commit_hash = ti_python_core.get_commit_hash() + commit_hash = commit_hash[:8] + header += f"commit {commit_hash}, " + + header += f"{get_os_name()}, " + + py_ver = ".".join(str(x) for x in sys.version_info[:3]) + header += f"python {py_ver}" + + print(header) + + +if os.getenv("ENABLE_TAICHI_HEADER_PRINT", "True").lower() not in ("false", "0", "f"): + _print_taichi_header() + + +def try_get_wheel_tag(module): + try: + from email.parser import Parser # pylint: disable=import-outside-toplevel + + wheel_path = f'{module.__path__[0]}-{".".join(map(str, module.__version__))}.dist-info/WHEEL' + with open(wheel_path, "r") as f: + meta = Parser().parse(f) + return meta.get("Tag") + except Exception: + return None + + +def try_get_loaded_libc_version(): + assert platform.system() == "Linux" + with open("/proc/self/maps") as f: + content = f.read() + + try: + libc_path = next(v for v in content.split() if "libc-" in v) + ver = re.findall(r"\d+\.\d+", libc_path) + if not ver: + return None + return tuple([int(v) for v in ver[0].split(".")]) + except StopIteration: + return None + + +def try_get_pip_version(): + try: + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + import pip # pylint: disable=import-outside-toplevel + return tuple([int(v) for v in pip.__version__.split(".")]) + except ImportError: + return None + + +def warn_restricted_version(): + if os.environ.get("TI_MANYLINUX2014_OK", ""): + return + + if get_os_name() == "linux": + try: + import taichi as ti # pylint: disable=import-outside-toplevel + + wheel_tag = try_get_wheel_tag(ti) + if wheel_tag and "manylinux2014" in wheel_tag: + print_yellow_bold( + "You have installed a restricted version of taichi, certain features (e.g. Vulkan & GGUI) will not work." + ) + libc_ver = try_get_loaded_libc_version() + if libc_ver and libc_ver < (2, 27): + print_yellow_bold( + "!! Taichi requires glibc >= 2.27 to run, please try upgrading your OS to a recent one (e.g. Ubuntu 18.04 or later) if possible." + ) + + pip_ver = try_get_pip_version() + if pip_ver and pip_ver < (20, 3, 0): + print_yellow_bold( + f"!! Your pip (version {'.'.join(map(str, pip_ver))}) is outdated (20.3.0 or later required), " + "try upgrading pip and install taichi again." + ) + print() + print_yellow_bold(" $ python3 -m pip install --upgrade pip") + print_yellow_bold(" $ python3 -m pip install --force-reinstall taichi") + print() + + print_yellow_bold( + "You can suppress this warning by setting the environment variable TI_MANYLINUX2014_OK=1." + ) + except Exception: + pass diff --git a/local_packages/taichi/_logging.py b/local_packages/taichi/_logging.py new file mode 100644 index 0000000..7e23534 --- /dev/null +++ b/local_packages/taichi/_logging.py @@ -0,0 +1,129 @@ +import inspect +import os + +from taichi._lib import core as ti_python_core + + +def _get_logging(name): + """Generates a decorator to decorate a specific logger function. + + Args: + name (str): The string represents logging level. + Effective levels include: 'trace', 'debug', 'info', 'warn', 'error', 'critical'. + + Returns: + Callabe: The decorated function. + """ + + def logger(msg, *args, **kwargs): + # Python inspection takes time (~0.1ms) so avoid it as much as possible + if ti_python_core.logging_effective(name): + msg_formatted = msg.format(*args, **kwargs) + func = getattr(ti_python_core, name) + frame = inspect.currentframe().f_back + file_name, lineno, func_name, _, _ = inspect.getframeinfo(frame) + file_name = os.path.basename(file_name) + msg = f"[{file_name}:{func_name}@{lineno}] {msg_formatted}" + func(msg) + + return logger + + +def set_logging_level(level): + """Setting the logging level to a specified value. + Available levels are: 'trace', 'debug', 'info', 'warn', 'error', 'critical'. + + Note that after calling this function, logging levels below the specified one will + also be effective. For example if `level` is set to 'warn', then the levels below + it, which are 'error' and 'critical' in this case, will also be effective. + + See also https://docs.taichi-lang.org/docs/developer_utilities#logging. + + Args: + level (str): Logging level. + + Example:: + + >>> set_logging_level('debug') + """ + ti_python_core.set_logging_level(level) + + +def is_logging_effective(level): + """Check if the specified logging level is effective. + All levels below current level will be effective. + The default level is 'info'. + + See also https://docs.taichi-lang.org/docs/developer_utilities#logging. + + Args: + level (str): The string represents logging level. \ + Effective levels include: 'trace', 'debug', 'info', 'warn', 'error', 'critical'. + + Returns: + Bool: Indicate whether the logging level is effective. + + Example:: + + >>> # assume current level is 'info' + >>> print(ti.is_logging_effective("trace")) # False + >>> print(ti.is_logging_effective("debug")) # False + >>> print(ti.is_logging_effective("info")) # True + >>> print(ti.is_logging_effective("warn")) # True + >>> print(ti.is_logging_effective("error")) # True + >>> print(ti.is_logging_effective("critical")) # True + """ + return ti_python_core.logging_effective(level) + + +# ------------------------ + +DEBUG = "debug" +"""The `str` 'debug', used for the `debug` logging level. +""" +# ------------------------ + +TRACE = "trace" +"""The `str` 'trace', used for the `debug` logging level. +""" +# ------------------------ + +INFO = "info" +"""The `str` 'info', used for the `info` logging level. +""" +# ------------------------ + +WARN = "warn" +"""The `str` 'warn', used for the `warn` logging level. +""" +# ------------------------ + +ERROR = "error" +"""The `str` 'error', used for the `error` logging level. +""" +# ------------------------ + +CRITICAL = "critical" +"""The `str` 'critical', used for the `critical` logging level. +""" +# ------------------------ + +supported_log_levels = [DEBUG, TRACE, INFO, WARN, ERROR, CRITICAL] + +debug = _get_logging(DEBUG) +trace = _get_logging(TRACE) +info = _get_logging(INFO) +warn = _get_logging(WARN) +error = _get_logging(ERROR) +critical = _get_logging(CRITICAL) + +__all__ = [ + "DEBUG", + "TRACE", + "INFO", + "WARN", + "ERROR", + "CRITICAL", + "set_logging_level", + "is_logging_effective", +] diff --git a/local_packages/taichi/_main.py b/local_packages/taichi/_main.py new file mode 100644 index 0000000..57676b1 --- /dev/null +++ b/local_packages/taichi/_main.py @@ -0,0 +1,901 @@ +import argparse +import glob +import math +import os +import runpy +import shutil +import subprocess +import sys +import timeit +from collections import defaultdict +from functools import wraps +from pathlib import Path + +import numpy as np +import rich +from colorama import Fore +from rich.console import Console +from rich.syntax import Syntax +from taichi._lib import core as _ti_core +from taichi._lib import utils +from taichi.lang import impl +from taichi.tools import diagnose, video + +import taichi as ti + + +def timer(func): + """Function decorator to benchmark a function running time.""" + + @wraps(func) + def wrapper(*args, **kwargs): + start = timeit.default_timer() + result = func(*args, **kwargs) + elapsed = timeit.default_timer() - start + print(f">>> Running time: {elapsed:.2f}s") + return result + + return wrapper + + +def registerableCLI(cls): + """Class decorator to register methods with @register into a set.""" + cls.registered_commands = set([]) + for name in dir(cls): + method = getattr(cls, name) + if hasattr(method, "registered"): + cls.registered_commands.add(name) + return cls + + +def register(func): + """Method decorator to register CLI commands.""" + func.registered = True + return func + + +@registerableCLI +class TaichiMain: + def __init__(self, test_mode: bool = False): + self.banner = f"\n{'*' * 43}\n** Taichi Programming Language **\n{'*' * 43}" + print(self.banner) + + print(self._get_friend_links()) + + parser = argparse.ArgumentParser(description="Taichi CLI", usage=self._usage()) + parser.add_argument("command", help="command from the above list to run") + + # Flag for unit testing + self.test_mode = test_mode + + self.main_parser = parser + + @timer + def __call__(self): + # Print help if no command provided + if len(sys.argv[1:2]) == 0: + self.main_parser.print_help() + return 1 + + # Parse the command + args = self.main_parser.parse_args(sys.argv[1:2]) + + if args.command not in self.registered_commands: # pylint: disable=E1101 + # TODO: do we really need this? + if args.command.endswith(".py"): + TaichiMain._exec_python_file(args.command) + else: + print(f"{args.command} is not a valid command!") + self.main_parser.print_help() + return 1 + + return getattr(self, args.command)(sys.argv[2:]) + + @staticmethod + def _get_friend_links(): + return ( + "\n" + "Docs: https://docs.taichi-lang.org/\n" + "GitHub: https://github.com/taichi-dev/taichi/\n" + "Forum: https://forum.taichi.graphics/\n" + ) + + def _usage(self) -> str: + """Compose deterministic usage message based on registered_commands.""" + # TODO: add some color to commands + msg = "\n" + space = 20 + for command in sorted(self.registered_commands): # pylint: disable=E1101 + msg += f" {command}{' ' * (space - len(command))}|-> {getattr(self, command).__doc__}\n" + return msg + + @staticmethod + def _exec_python_file(filename: str): + """Execute a Python file based on filename.""" + # TODO: do we really need this? + subprocess.call([sys.executable, filename] + sys.argv[1:]) + + @staticmethod + def _get_examples_dir() -> Path: + """Get the path to the examples directory.""" + + root_dir = utils.package_root + examples_dir = Path(root_dir) / "examples" + return examples_dir + + @staticmethod + def _get_available_examples() -> set: + """Get a set of all available example names.""" + examples_dir = TaichiMain._get_examples_dir() + all_examples = examples_dir.rglob("*.py") + all_example_names = {f.stem: f.parent for f in all_examples} + return all_example_names + + @staticmethod + def _example_choices_type(choices): + def support_choice_with_dot_py(choice): + if choice.endswith(".py") and choice.split(".")[0] in choices: + # try to find and remove python file extension + return choice.split(".")[0] + return choice + + return support_choice_with_dot_py + + @register + def gallery(self, arguments: list = sys.argv[2:]): + """Use mouse to select and run taichi examples in an interactive gui.""" + # set the spacing parameters in the gallery image + slide_bar = 14 + top_margin = 6 + left_margin = 7 + bottom_margin = 32 + row_spacing = 32 + col_spacing = 11 + tile_size = 128 + + # load the gallery image + image_source = utils.package_root + "/assets/**/ti_gallery.png" + gallery_image_path = glob.glob(image_source, recursive=True)[0] + gallery_image = ti.tools.imread(gallery_image_path) + gallery_image = gallery_image[:, :-slide_bar] + width, height = gallery_image.shape[:2] + + # create the gui, 2x4 tiles + gui = ti.GUI("Taichi Gallery", res=(width, height)) + ncols = 4 + + # side length of a tile + dx = tile_size / width + dy = tile_size / height + + examples = [ + "poisson_disk_sampling", + "mass_spring_3d_ggui", + "circle_packing_image", + "snow_phaseField", + "sdf_renderer", + "cornell_box", + "karman_vortex_street", + "euler", + "fractal", + "mpm128", + "pbf2d", + "mass_spring_game", + ] + + def valid_mouse_position(mou_x, mou_y): + xmin = left_margin / width + xmax = 1 - xmin + ymin = 0 + ymax = 1 - top_margin / height + return (xmin <= mou_x <= xmax) and (ymin <= mou_y <= ymax) + + def get_tile_from_mouse(mou_x, mou_y): + """Find the image tile that the mouse is hovering over.""" + x = int(mou_x * width) + y = int(mou_y * height) + rind = y // (row_spacing + tile_size) + cind = (x - left_margin) // (col_spacing + tile_size) + return rind, cind + + def draw_bounding_box(rind, cind): + x0 = cind * (col_spacing + tile_size) + left_margin + y0 = rind * (row_spacing + tile_size) + bottom_margin + x0 /= width + y0 /= height + pts = [(x0, y0), (x0 + dx, y0), (x0 + dx, y0 + dy), (x0, y0 + dy), (x0, y0)] + for i in range(4): + gui.line(pts[i], pts[i + 1], radius=2, color=0x0000FF) + + def on_mouse_click_callback(example_name): + examples_dir = TaichiMain._get_examples_dir() + script = list(examples_dir.rglob(f"{example_name}.py"))[0] + print("Demo source code:") + print() + content = Syntax.from_path(script, line_numbers=True) + console = rich.console.Console() + console.print(content) + self._exec_python_file(str(script)) + + index = None + while not gui.get_event(ti.GUI.ESCAPE, ti.GUI.EXIT): + gui.set_image(gallery_image) + mou_x, mou_y = gui.get_cursor_pos() + gui.get_event(ti.GUI.PRESS) + if valid_mouse_position(mou_x, mou_y): + rind, cind = get_tile_from_mouse(mou_x, mou_y) + draw_bounding_box(rind, cind) + if gui.is_pressed(ti.GUI.LMB): + gui.close() + index = cind + rind * ncols + break + + gui.show() + + if index is not None: + on_mouse_click_callback(examples[index]) + + @register + def example(self, arguments: list = sys.argv[2:]): + """Run an example by name (or name.py)""" + + def colormap(index, name): + from colorsys import hls_to_rgb # pylint: disable=C0415 + + x = (ord(name[0].upper()) - 64.0) / 26.0 + r, g, b = hls_to_rgb(x, 0.4, 1.0) + r = hex(int(r * 255) % 16)[2:] + g = hex(int(g * 255) % 16)[2:] + b = hex(int(b * 255) % 16)[2:] + return f"{index}: [#{r}{r}{g}{g}{b}{b}]{name}" + + console = Console() + table = rich.table.Table( + box=rich.box.HORIZONTALS, + show_header=False, + header_style="bold #2070b2", + title="[bold][#3fdda4]TAICHI[#f8e020] EXAMPLES", + ) + + ncols = 3 + choices = TaichiMain._get_available_examples() + nrows, rem = divmod(len(choices), ncols) + if rem > 0: + nrows += 1 + names = sorted(choices.keys()) + for k in range(nrows): + table.add_row(*[colormap(j, names[j]) for j in range(k, len(choices), nrows)]) + + parser = argparse.ArgumentParser(prog="ti example", description=f"{self.example.__doc__}") + parser.add_argument( + "name", + type=TaichiMain._example_choices_type(choices.keys()), + choices=sorted(choices.keys()), + help=console.print(table), + nargs="?", + default=None, + metavar="name", + ) + parser.add_argument( + "-p", + "--print", + required=False, + dest="print", + action="store_true", + help="Print example source code instead of running it", + ) + parser.add_argument( + "-P", + "--pretty-print", + required=False, + dest="pretty_print", + action="store_true", + help="Like --print, but print in a rich format with line numbers", + ) + parser.add_argument( + "-s", + "--save", + required=False, + dest="save", + action="store_true", + help="Save source code to current directory instead of running it", + ) + + # TODO: Pass the arguments to downstream correctly(#3216). + args = parser.parse_args(arguments) + + examples_dir = TaichiMain._get_examples_dir() + example_name = args.name + if example_name is None: + try: + index = input(f"Please input the example index (between 0 and {len(names)}): ") + while not index.isdigit() or int(index) >= len(names): + index = input(f"Example [{index}] does not exist. Please try again: ") + example_name = names[int(index)] + except KeyboardInterrupt as e: + print("\nCancelled by user, exiting...") + return 1 + + target = str((examples_dir / choices[example_name] / f"{example_name}.py").resolve()) + # path for examples needs to be modified for implicit relative imports + sys.path.append(str((examples_dir / choices[example_name]).resolve())) + + # Short circuit for testing + if self.test_mode: + return args + + if args.save: + print(f"Saving example {example_name} to current directory...") + shutil.copy(target, ".") + return 0 + + if args.pretty_print: + syntax = rich.syntax.Syntax.from_path(target, line_numbers=True) + console = Console() + console.print(syntax) + return 0 + + if args.print: + with open(target) as f: + print(f.read()) + return 0 + + print(f"Running example {example_name} ...") + + runpy.run_path(target, run_name="__main__") + + @staticmethod + @register + def changelog(arguments: list = sys.argv[2:]): + """Display changelog of current version""" + changelog_md = os.path.join(utils.package_root, "CHANGELOG.md") + with open(changelog_md) as f: + print(f.read()) + + @staticmethod + @register + def release(arguments: list = sys.argv[2:]): + """Make source code release""" + raise RuntimeError("TBD") + + @staticmethod + def _mp4_file(name: str) -> str: + if not name.endswith(".mp4"): + raise argparse.ArgumentTypeError("filename must be of type .mp4") + return name + + @register + def gif(self, arguments: list = sys.argv[2:]): + """Convert mp4 file to gif in the same directory""" + parser = argparse.ArgumentParser(prog="ti gif", description=f"{self.gif.__doc__}") + parser.add_argument( + "-i", + "--input", + required=True, + dest="input_file", + type=TaichiMain._mp4_file, + help="Path to input MP4 video file", + ) + parser.add_argument( + "-f", + "--framerate", + required=False, + default=24, + dest="framerate", + type=int, + help="Frame rate of the output GIF", + ) + args = parser.parse_args(arguments) + + args.output_file = str(Path(args.input_file).with_suffix(".gif")) + ti._logging.info(f"Converting {args.input_file} to {args.output_file}") + + # Short circuit for testing + if self.test_mode: + return args + video.mp4_to_gif(args.input_file, args.output_file, args.framerate) + + return None + + @register + def video_speed(self, arguments: list = sys.argv[2:]): + """Speed up video in the same directory""" + parser = argparse.ArgumentParser(prog="ti video_speed", description=f"{self.video_speed.__doc__}") + parser.add_argument( + "-i", + "--input", + required=True, + dest="input_file", + type=TaichiMain._mp4_file, + help="Path to input MP4 video file", + ) + parser.add_argument( + "-s", + "--speed", + required=True, + dest="speed", + type=float, + help="Speedup factor for the output MP4 based on input. (e.g. 2.0)", + ) + args = parser.parse_args(arguments) + + args.output_file = str( + Path(args.input_file).with_name(f"{Path(args.input_file).stem}-sped{Path(args.input_file).suffix}") + ) + + # Short circuit for testing + if self.test_mode: + return args + video.accelerate_video(args.input_file, args.output_file, args.speed) + + return None + + @register + def video_crop(self, arguments: list = sys.argv[2:]): + """Crop video in the same directory""" + parser = argparse.ArgumentParser(prog="ti video_crop", description=f"{self.video_crop.__doc__}") + parser.add_argument( + "-i", + "--input", + required=True, + dest="input_file", + type=TaichiMain._mp4_file, + help="Path to input MP4 video file", + ) + parser.add_argument( + "--x1", + required=True, + dest="x_begin", + type=float, + help="X coordinate of the beginning crop point", + ) + parser.add_argument( + "--x2", + required=True, + dest="x_end", + type=float, + help="X coordinate of the ending crop point", + ) + parser.add_argument( + "--y1", + required=True, + dest="y_begin", + type=float, + help="Y coordinate of the beginning crop point", + ) + parser.add_argument( + "--y2", + required=True, + dest="y_end", + type=float, + help="Y coordinate of the ending crop point", + ) + args = parser.parse_args(arguments) + + args.output_file = str( + Path(args.input_file).with_name(f"{Path(args.input_file).stem}-cropped{Path(args.input_file).suffix}") + ) + + # Short circuit for testing + if self.test_mode: + return args + video.crop_video( + args.input_file, + args.output_file, + args.x_begin, + args.x_end, + args.y_begin, + args.y_end, + ) + + return None + + @register + def video_scale(self, arguments: list = sys.argv[2:]): + """Scale video resolution in the same directory""" + parser = argparse.ArgumentParser(prog="ti video_scale", description=f"{self.video_scale.__doc__}") + parser.add_argument( + "-i", + "--input", + required=True, + dest="input_file", + type=TaichiMain._mp4_file, + help="Path to input MP4 video file", + ) + parser.add_argument( + "-w", + "--ratio-width", + required=True, + dest="ratio_width", + type=float, + help="The scaling ratio of the resolution on width", + ) + parser.add_argument( + "--ratio-height", + required=False, + default=None, + dest="ratio_height", + type=float, + help="The scaling ratio of the resolution on height [default: equal to ratio-width]", + ) + args = parser.parse_args(arguments) + + if not args.ratio_height: + args.ratio_height = args.ratio_width + args.output_file = str( + Path(args.input_file).with_name(f"{Path(args.input_file).stem}-scaled{Path(args.input_file).suffix}") + ) + + # Short circuit for testing + if self.test_mode: + return args + video.scale_video(args.input_file, args.output_file, args.ratio_width, args.ratio_height) + + return None + + @register + def video(self, arguments: list = sys.argv[2:]): + """Make a video using *.png files in the current directory""" + parser = argparse.ArgumentParser(prog="ti video", description=f"{self.video.__doc__}") + parser.add_argument("inputs", nargs="*", help="PNG file(s) as inputs") + parser.add_argument( + "-o", + "--output", + required=False, + default=Path("./video.mp4").resolve(), + dest="output_file", + type=lambda x: Path(x).resolve(), + help="Path to output MP4 video file", + ) + parser.add_argument( + "-f", + "--framerate", + required=False, + default=24, + dest="framerate", + type=int, + help="Frame rate of the output MP4 video", + ) + parser.add_argument( + "-c", + "--crf", + required=False, + default=20, + dest="crf", + type=int, + help="Constant rate factor (0-51, lower is higher quality)", + ) + args = parser.parse_args(arguments) + + if not args.inputs: + args.inputs = sorted(str(p.resolve()) for p in Path(".").glob("*.png")) + + assert ( + 1 <= args.crf <= 51 + ), "The range of the CRF scale is 1-51, where 1 is almost lossless, 20 is the default, and 51 is worst quality possible." + + ti._logging.info(f"Making video using {len(args.inputs)} png files...") + ti._logging.info(f"frame_rate = {args.framerate}") + + # Short circuit for testing + if self.test_mode: + return args + video.make_video( + args.inputs, + output_path=str(args.output_file), + crf=args.crf, + frame_rate=args.framerate, + ) + ti._logging.info(f"Done! Output video file = {args.output_file}") + + return None + + @staticmethod + @register + def doc(arguments: list = sys.argv[2:]): + """Build documentation""" + raise RuntimeError("TBD") + + @staticmethod + @register + def format(arguments: list = sys.argv[2:]): + """Reformat modified source files""" + raise RuntimeError("Please run `pre-commit run -a` instead") + + @staticmethod + @register + def format_all(arguments: list = sys.argv[2:]): + """Reformat all source files""" + raise RuntimeError("Please run `pre-commit run -a` instead") + + @staticmethod + def _display_benchmark_regression(xd, yd, args): + def parse_dat(file): + _dict = {} + with open(file) as f: + for line in f.readlines(): + try: + a, b = line.strip().split(":") + except: + continue + b = float(b) + if abs(b % 1.0) < 1e-5: # codegen_* + b = int(b) + _dict[a.strip()] = b + return _dict + + def parse_name(file): + if file[0:5] == "test_": + return file[5:-4].replace("__test_", "::", 1) + if file[0:10] == "benchmark_": + return "::".join(reversed(file[10:-4].split("__arch_"))) + raise Exception(f"bad benchmark file name {file}") + + def get_dats(directory): + _list = [] + for x in os.listdir(directory): + if x.endswith(".dat"): + _list.append(x) + _dict = {} + for x in _list: + name = parse_name(x) + path = os.path.join(directory, x) + _dict[name] = parse_dat(path) + return _dict + + def plot_in_gui(scatter): + gui = ti.GUI("Regression Test", (640, 480), 0x001122) + print("[Hint] press SPACE to go for next display") + for key, data in scatter.items(): + data = np.array([((i + 0.5) / len(data), x / 2) for i, x in enumerate(data)]) + while not gui.get_event((ti.GUI.PRESS, ti.GUI.SPACE)): + gui.core.title = key + gui.line((0, 0.5), (1, 0.5), 1.8, 0x66CCFF) + gui.circles(data, 0xFFCC66, 1.5) + gui.show() + + spec = args.files + single_line = spec and len(spec) == 1 + xs, ys = get_dats(xd), get_dats(yd) + scatter = defaultdict(list) + for name in reversed(sorted(set(xs.keys()).union(ys.keys()))): + file, func = name.split("::") + u, v = xs.get(name, {}), ys.get(name, {}) + ret = "" + for key in set(u.keys()).union(v.keys()): + if spec and key not in spec: + continue + a, b = u.get(key, 0), v.get(key, 0) + if a == 0: + if b == 0: + res = 1.0 + else: + res = math.inf + else: + res = b / a + scatter[key].append(res) + if res == 1: + continue + if not single_line: + ret += f"{key:<30}" + res -= 1 + color = Fore.RESET + if res > 0: + color = Fore.RED + elif res < 0: + color = Fore.GREEN + if isinstance(a, float): + a = f"{a:>7.2}" + else: + a = f"{a:>7}" + if isinstance(b, float): + b = f"{b:>7.2}" + else: + b = f"{b:>7}" + ret += f"{Fore.MAGENTA}{a}{Fore.RESET} -> " + ret += f"{Fore.CYAN}{b} {color}{res:>+9.1%}{Fore.RESET}\n" + if ret != "": + print(f'{file + "::" + func:_<58}', end="") + if not single_line: + print("") + print(ret, end="") + if not single_line: + print("") + + if args.gui: + plot_in_gui(scatter) + + @staticmethod + def _get_benchmark_baseline_dir(): + return os.path.join(_ti_core.get_repo_dir(), "benchmarks", "baseline") + + @staticmethod + def _get_benchmark_output_dir(): + return os.path.join(_ti_core.get_repo_dir(), "benchmarks", "output") + + @register + def regression(self, arguments: list = sys.argv[2:]): + """Display benchmark regression test result""" + parser = argparse.ArgumentParser(prog="ti regression", description=f"{self.regression.__doc__}") + parser.add_argument("files", nargs="*", help="Test file(s) to be run for benchmarking") + parser.add_argument( + "-g", + "--gui", + dest="gui", + action="store_true", + help="Display benchmark regression result in GUI", + ) + args = parser.parse_args(arguments) + + # Short circuit for testing + if self.test_mode: + return args + + baseline_dir = TaichiMain._get_benchmark_baseline_dir() + output_dir = TaichiMain._get_benchmark_output_dir() + TaichiMain._display_benchmark_regression(baseline_dir, output_dir, args) + + return None + + @register + def baseline(self, arguments: list = sys.argv[2:]): + """Archive current benchmark result as baseline""" + parser = argparse.ArgumentParser(prog="ti baseline", description=f"{self.baseline.__doc__}") + args = parser.parse_args(arguments) + + # Short circuit for testing + if self.test_mode: + return args + + baseline_dir = TaichiMain._get_benchmark_baseline_dir() + output_dir = TaichiMain._get_benchmark_output_dir() + shutil.rmtree(baseline_dir, True) + shutil.copytree(output_dir, baseline_dir) + print(f"[benchmark] baseline data saved to {baseline_dir}") + + return None + + @staticmethod + @register + def test(self, arguments: list = sys.argv[2:]): + raise RuntimeError("ti test is deprecated. Please run `python tests/run_tests.py` instead.") + + @register + def run(self, arguments: list = sys.argv[2:]): + """Run a single script""" + parser = argparse.ArgumentParser(prog="ti run", description=f"{self.run.__doc__}") + parser.add_argument( + "filename", + help="A single (Python) script to run with Taichi, e.g. render.py", + ) + args = parser.parse_args(arguments) + + # Short circuit for testing + if self.test_mode: + return args + + runpy.run_path(args.filename) + + return None + + @register + def debug(self, arguments: list = sys.argv[2:]): + """Debug a single script""" + parser = argparse.ArgumentParser(prog="ti debug", description=f"{self.debug.__doc__}") + parser.add_argument( + "filename", + help="A single (Python) script to run with debugger, e.g. render.py", + ) + args = parser.parse_args(arguments) + + # Short circuit for testing + if self.test_mode: + return args + + _ti_core.set_core_trigger_gdb_when_crash(True) + os.environ["TI_DEBUG"] = "1" + + runpy.run_path(args.filename, run_name="__main__") + + return None + + @staticmethod + @register + def diagnose(arguments: list = sys.argv[2:]): + """System diagnose information""" + diagnose.main() + + @staticmethod + @register + def repl(arguments: list = sys.argv[2:]): + """Start Taichi REPL / Python shell with 'import taichi as ti'""" + + def local_scope(): + try: + import IPython # pylint: disable=C0415 + + IPython.embed() + except ImportError: + import code # pylint: disable=C0415 + + __name__ = "__console__" # pylint: disable=W0622 + code.interact(local=locals()) + + local_scope() + + @staticmethod + @register + def lint(arguments: list = sys.argv[2:]): + """Run pylint checker for the Python codebase of Taichi""" + # TODO: support arguments for lint specific files + # parser = argparse.ArgumentParser(prog='ti lint', description=f"{self.lint.__doc__}") + # args = parser.parse_args(arguments) + + options = [os.path.dirname(__file__)] + + from multiprocessing import cpu_count # pylint: disable=C0415 + + threads = min(8, cpu_count()) + options += ["-j", str(threads)] + + # http://pylint.pycqa.org/en/latest/user_guide/run.html + # TODO: support redirect output to lint.log + import pylint # pylint: disable=C0415 + + pylint.lint.Run(options) + + @register + def module(self, arguments: list = sys.argv[2:]): + """Taichi module tools""" + from taichi import _ti_module # pylint: disable=C0415 + + _ti_module._main(arguments) + + @staticmethod + @register + def cache(arguments: list = sys.argv[2:]): + """Manage the offline cache files manually""" + + def clean(cmd_args, parser): + parser.add_argument( + "-p", + "--offline-cache-file-path", + dest="offline_cache_file_path", + default=impl.default_cfg().offline_cache_file_path, + ) + args = parser.parse_args(cmd_args) + path = os.path.abspath(args.offline_cache_file_path) + count = _ti_core.clean_offline_cache_files(path) + print(f"Deleted {count} offline cache files in {path}") + + # TODO(PGZXB): Provide more tools to manage the offline cache files + subcmds_map = {"clean": (clean, "Clean all offline cache files in given path")} + + def print_help(): + print("usage: ti cache []") + for name, value in subcmds_map.items(): + _, description = value + print(f"\t{name}\t|-> {description}") + + if not arguments: + print_help() + return + + subcmd = arguments[0] + if subcmd not in subcmds_map: + print(f"'ti cache {subcmd}' is not a valid command!") + print_help() + return + + func, description = subcmds_map[subcmd] + parser = argparse.ArgumentParser(prog=f"ti cache {subcmd}", description=description) + func(cmd_args=arguments[1:], parser=parser) + + +def main(): + cli = TaichiMain() + return cli() + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/local_packages/taichi/_snode/__init__.py b/local_packages/taichi/_snode/__init__.py new file mode 100644 index 0000000..39ab569 --- /dev/null +++ b/local_packages/taichi/_snode/__init__.py @@ -0,0 +1,3 @@ +from taichi._snode.fields_builder import FieldsBuilder + +__all__ = ["FieldsBuilder"] diff --git a/local_packages/taichi/_snode/fields_builder.py b/local_packages/taichi/_snode/fields_builder.py new file mode 100644 index 0000000..b0da56b --- /dev/null +++ b/local_packages/taichi/_snode/fields_builder.py @@ -0,0 +1,186 @@ +from typing import Any, Optional, Sequence, Union + +from taichi._lib import core as _ti_core +from taichi._snode.snode_tree import SNodeTree +from taichi.lang import impl, snode +from taichi.lang.exception import TaichiRuntimeError +from taichi.lang.util import warning + +_snode_registry = _ti_core.SNodeRegistry() + +_Axis = _ti_core.Axis + + +class FieldsBuilder: + """A builder that constructs a SNodeTree instance. + + Example:: + + x = ti.field(ti.i32) + y = ti.field(ti.f32) + fb = ti.FieldsBuilder() + fb.dense(ti.ij, 8).place(x) + fb.pointer(ti.ij, 8).dense(ti.ij, 4).place(y) + + # After this line, `x` and `y` are placed. No more fields can be placed + # into `fb`. + # + # The tree looks like the following: + # (implicit root) + # | + # +-- dense +-- place(x) + # | + # +-- pointer +-- dense +-- place(y) + fb.finalize() + """ + + def __init__(self): + self.ptr = _snode_registry.create_root(impl.get_runtime().prog) + self.root = snode.SNode(self.ptr) + self.finalized = False + self.empty = True + impl.get_runtime().initialize_fields_builder(self) + + # TODO: move this into SNodeTree + @classmethod + def _finalized_roots(cls): + """Gets all the roots of the finalized SNodeTree. + + Returns: + A list of the roots of the finalized SNodeTree. + """ + roots_ptr = [] + size = impl.get_runtime().prog.get_snode_tree_size() + for i in range(size): + res = impl.get_runtime().prog.get_snode_root(i) + roots_ptr.append(snode.SNode(res)) + return roots_ptr + + # TODO: move this to SNodeTree class. + def deactivate_all(self): + """Same as :func:`taichi.lang.snode.SNode.deactivate_all`""" + if self.finalized: + self.root.deactivate_all() + else: + warning("""'deactivate_all()' would do nothing if FieldsBuilder is not finalized""") + + def dense( + self, + indices: Union[Sequence[_Axis], _Axis], + dimensions: Union[Sequence[int], int], + ): + """Same as :func:`taichi.lang.snode.SNode.dense`""" + self._check_not_finalized() + self.empty = False + return self.root.dense(indices, dimensions) + + def pointer( + self, + indices: Union[Sequence[_Axis], _Axis], + dimensions: Union[Sequence[int], int], + ): + """Same as :func:`taichi.lang.snode.SNode.pointer`""" + if not _ti_core.is_extension_supported(impl.current_cfg().arch, _ti_core.Extension.sparse): + raise TaichiRuntimeError("Pointer SNode is not supported on this backend.") + self._check_not_finalized() + self.empty = False + return self.root.pointer(indices, dimensions) + + def _hash(self, indices, dimensions): + """Same as :func:`taichi.lang.snode.SNode.hash`""" + raise NotImplementedError() + + def dynamic( + self, + index: Union[Sequence[_Axis], _Axis], + dimension: Union[Sequence[int], int], + chunk_size: Optional[int] = None, + ): + """Same as :func:`taichi.lang.snode.SNode.dynamic`""" + if not _ti_core.is_extension_supported(impl.current_cfg().arch, _ti_core.Extension.sparse): + raise TaichiRuntimeError("Dynamic SNode is not supported on this backend.") + + if dimension >= 2**31: + raise TaichiRuntimeError( + f"The maximum dimension of a dynamic SNode cannot exceed the maximum value of a 32-bit signed integer: Got {dimension} > 2**31-1" + ) + if chunk_size is not None and chunk_size >= 2**31: + raise TaichiRuntimeError( + f"Chunk size cannot exceed the maximum value of a 32-bit signed integer: Got {chunk_size} > 2**31-1" + ) + + self._check_not_finalized() + self.empty = False + return self.root.dynamic(index, dimension, chunk_size) + + def bitmasked( + self, + indices: Union[Sequence[_Axis], _Axis], + dimensions: Union[Sequence[int], int], + ): + """Same as :func:`taichi.lang.snode.SNode.bitmasked`""" + if not _ti_core.is_extension_supported(impl.current_cfg().arch, _ti_core.Extension.sparse): + raise TaichiRuntimeError("Bitmasked SNode is not supported on this backend.") + self._check_not_finalized() + self.empty = False + return self.root.bitmasked(indices, dimensions) + + def quant_array( + self, + indices: Union[Sequence[_Axis], _Axis], + dimensions: Union[Sequence[int], int], + max_num_bits: int, + ): + """Same as :func:`taichi.lang.snode.SNode.quant_array`""" + self._check_not_finalized() + self.empty = False + return self.root.quant_array(indices, dimensions, max_num_bits) + + def place(self, *args: Any, offset: Optional[Union[Sequence[int], int]] = None): + """Same as :func:`taichi.lang.snode.SNode.place`""" + self._check_not_finalized() + self.empty = False + self.root.place(*args, offset=offset) + + def lazy_grad(self): + """Same as :func:`taichi.lang.snode.SNode.lazy_grad`""" + # TODO: This complicates the implementation. Figure out why we need this + self._check_not_finalized() + self.empty = False + self.root.lazy_grad() + + def _allocate_adjoint_checkbit(self): + """Same as :func:`taichi.lang.snode.SNode._allocate_adjoint_checkbit`""" + self._check_not_finalized() + self.empty = False + self.root._allocate_adjoint_checkbit() + + def lazy_dual(self): + """Same as :func:`taichi.lang.snode.SNode.lazy_dual`""" + # TODO: This complicates the implementation. Figure out why we need this + self._check_not_finalized() + self.empty = False + self.root.lazy_dual() + + def finalize(self, raise_warning=True): + """Constructs the SNodeTree and finalizes this builder. + + Args: + raise_warning (bool): Raise warning or not.""" + return self._finalize(raise_warning, compile_only=False) + + def _finalize_for_aot(self): + """Constructs the SNodeTree and compiles the type for AOT purpose.""" + return self._finalize(raise_warning=False, compile_only=True) + + def _finalize(self, raise_warning, compile_only): + self._check_not_finalized() + if self.empty and raise_warning: + warning("Finalizing an empty FieldsBuilder!") + self.finalized = True + impl.get_runtime().finalize_fields_builder(self) + return SNodeTree(_ti_core.finalize_snode_tree(_snode_registry, self.ptr, impl.get_runtime().prog, compile_only)) + + def _check_not_finalized(self): + if self.finalized: + raise TaichiRuntimeError("FieldsBuilder finalized") diff --git a/local_packages/taichi/_snode/snode_tree.py b/local_packages/taichi/_snode/snode_tree.py new file mode 100644 index 0000000..cc368ab --- /dev/null +++ b/local_packages/taichi/_snode/snode_tree.py @@ -0,0 +1,32 @@ +# The reason we import just the taichi.core.util module, instead of the ti_python_core +# object within it, is that ti_python_core is stateful. While in practice ti_python_core is +# loaded during the import procedure, it's probably still good to delay the +# access to it. + +from taichi.lang import impl +from taichi.lang.exception import TaichiRuntimeError + + +class SNodeTree: + def __init__(self, ptr): + self.prog = impl.get_runtime().prog + self.ptr = ptr + self.destroyed = False + + def destroy(self): + if self.destroyed: + raise TaichiRuntimeError("SNode tree has been destroyed") + if self.prog != impl.get_runtime().prog: + return + self.ptr.destroy_snode_tree(impl.get_runtime().prog) + + # FieldExpression holds a SNode* to the place-SNode associated with a SNodeTree + # Therefore, we have to recompile all the kernels after destroying a SNodeTree + impl.get_runtime().clear_compiled_functions() + self.destroyed = True + + @property + def id(self): + if self.destroyed: + raise TaichiRuntimeError("SNode tree has been destroyed") + return self.ptr.id() diff --git a/local_packages/taichi/_ti_module/__init__.py b/local_packages/taichi/_ti_module/__init__.py new file mode 100644 index 0000000..f3cd09d --- /dev/null +++ b/local_packages/taichi/_ti_module/__init__.py @@ -0,0 +1 @@ +from taichi._ti_module.module import _main diff --git a/local_packages/taichi/_ti_module/cppgen.py b/local_packages/taichi/_ti_module/cppgen.py new file mode 100644 index 0000000..3c1f5da --- /dev/null +++ b/local_packages/taichi/_ti_module/cppgen.py @@ -0,0 +1,307 @@ +from typing import Any, List, Optional, Set + +from taichi.aot.conventions.gfxruntime140 import GfxRuntime140, sr + +dtype2ctype = { + sr.DataType.f16: "half_t", + sr.DataType.f32: "float", + sr.DataType.f64: "double", + sr.DataType.i8: "int8_t", + sr.DataType.i16: "int16_t", + sr.DataType.i32: "int32_t", + sr.DataType.i64: "int64_t", + sr.DataType.u8: "uint8_t", + sr.DataType.u16: "uint16_t", + sr.DataType.u32: "uint32_t", + sr.DataType.u64: "uint64_t", +} + + +def check_arg(actual: str, expect: Any) -> List[str]: + out = [] + + expect = str(expect) + out += [ + f" if (value.{actual} != {expect}) {{", + f' ti_set_last_error(TI_ERROR_INVALID_ARGUMENT, "value.{actual} != {expect}");', + " return *this;", + " }", + ] + + return out + + +def get_arg_dst(i: int, is_named: bool) -> str: + if is_named: + return f"args_[{i}].argument" + return f"args_[{i}]" + + +def generate_scalar_assign(cls_name: str, i: int, arg_name: str, arg: sr.ArgumentScalar, is_named: bool) -> List[str]: + ctype = dtype2ctype[arg.dtype] + + out = [] + + out += [ + f" {cls_name} &set_{arg_name}({ctype} value) {{", + ] + + if is_named: + out += [ + f' args_[{i}].name = "{arg_name}";', + ] + if ctype == "float": + out += [ + f" {get_arg_dst(i, is_named)}.type = TI_ARGUMENT_TYPE_F32;", + f" {get_arg_dst(i, is_named)}.value.f32 = value;", + ] + elif ctype == "int32_t": + out += [ + f" {get_arg_dst(i, is_named)}.type = TI_ARGUMENT_TYPE_I32;", + f" {get_arg_dst(i, is_named)}.value.i32 = value;", + ] + else: + out += [ + f" {get_arg_dst(i, is_named)}.type = TI_ARGUMENT_TYPE_SCALAR;", + f" {get_arg_dst(i, is_named)}.value.scalar.type = TI_DATA_TYPE_{arg.dtype.name.upper()};", + f" *(({ctype}*)(&{get_arg_dst(i, is_named)}.value.scalar.value)) = value;", + ] + assert False, f"{ctype} is not a supported scalar type." + + out += [ + " return *this;", + " }", + ] + return out + + +def generate_ndarray_assign(cls_name: str, i: int, arg_name: str, arg: sr.ArgumentNdArray, is_named: bool) -> List[str]: + out = [] + + out += [ + f" {cls_name} &set_{arg_name}(const TiNdArray &value) {{", + ] + + out += check_arg("elem_type", f"TI_DATA_TYPE_{arg.dtype.name.upper()}") + out += check_arg("shape.dim_count", arg.ndim) + assert len(arg.element_shape) <= 16 + out += check_arg("elem_shape.dim_count", len(arg.element_shape)) + for j, dim in enumerate(arg.element_shape): + out += check_arg(f"elem_shape.dims[{j}]", dim) + + if is_named: + out += [ + f' args_[{i}].name = "{arg_name}";', + ] + out += [ + f" {get_arg_dst(i, is_named)}.type = TI_ARGUMENT_TYPE_NDARRAY;", + f" {get_arg_dst(i, is_named)}.value.ndarray = value;", + " return *this;", + " }", + ] + return out + + +def generate_texture_assign( + cls_name: str, + i: int, + arg_name: str, + arg: sr.ArgumentTexture | sr.ArgumentRwTexture, + is_named: bool, +) -> List[str]: + out = [] + + out += [ + f" {cls_name} &set_{arg_name}(const TiTexture &value) {{", + ] + + assert arg.ndim in [1, 2, 3] + out += check_arg("dimension", f"TI_IMAGE_DIMENSION_{arg.ndim}D") + if isinstance(arg, sr.ArgumentRwTexture): + out += check_arg("format", f"TI_FORMAT_{arg.fmt.name.upper()}") + + if is_named: + out += [ + f' args_[{i}].name = "{arg_name}";', + ] + out += [ + f" {get_arg_dst(i, is_named)}.type = TI_ARGUMENT_TYPE_TEXTURE;", + f" {get_arg_dst(i, is_named)}.value.texture = value;", + " return *this;", + " }", + ] + return out + + +def generate_kernel_args_builder(kernel: sr.Kernel) -> List[str]: + out = [] + + out += [ + f"struct Kernel_{kernel.name} : public ti::Kernel {{", + f" explicit Kernel_{kernel.name}(TiRuntime runtime, TiKernel kernel) :", + " ti::Kernel(runtime, kernel) {", + f" args_.resize({len(kernel.context.args)});", + " }", + "", + ] + + cls_name = f"Kernel_{kernel.name}" + for i, arg in enumerate(kernel.context.args): + arg_name = arg.name if arg.name else f"arg{i}" + if isinstance(arg, sr.ArgumentScalar): + out += generate_scalar_assign(cls_name, i, arg_name, arg, False) + elif isinstance(arg, sr.ArgumentNdArray): + out += generate_ndarray_assign(cls_name, i, arg_name, arg, False) + elif isinstance(arg, (sr.ArgumentTexture, sr.ArgumentRwTexture)): + out += generate_texture_assign(cls_name, i, arg_name, arg, False) + else: + assert False + out += [""] + + out += [ + "};", + "", + ] + return out + + +def generate_graph_args_builder(graph: sr.Graph) -> List[str]: + out = [] + + out += [ + f"struct ComputeGraph_{graph.name} : public ti::ComputeGraph {{", + f" explicit ComputeGraph_{graph.name}(TiRuntime runtime, TiComputeGraph graph) :", + " ti::ComputeGraph(runtime, graph) {", + f" args_.resize({len(graph.args)});", + " }", + "", + ] + + cls_name = f"ComputeGraph_{graph.name}" + for i, arg in enumerate(graph.args): + arg_name = arg.name + if isinstance(arg.arg, sr.ArgumentScalar): + out += generate_scalar_assign(cls_name, i, arg_name, arg.arg, True) + elif isinstance(arg.arg, sr.ArgumentNdArray): + out += generate_ndarray_assign(cls_name, i, arg_name, arg.arg, True) + elif isinstance(arg.arg, (sr.ArgumentTexture, sr.ArgumentRwTexture)): + out += generate_texture_assign(cls_name, i, arg_name, arg.arg, True) + else: + assert False + out += [""] + + out += [ + "};", + "", + ] + return out + + +def generate_module_content_repr(m: GfxRuntime140, module_name: str, cgraph_kernel_names: Set[str]) -> List[str]: + out = [] + + if module_name: + module_name = f"AotModule_{module_name}" + else: + module_name = "AotModule" + + out += [ + f"struct {module_name} : public ti::AotModule {{", + f" explicit {module_name}(TiRuntime runtime, TiAotModule aot_module, bool should_destroy = true) :", + " ti::AotModule(runtime, aot_module, should_destroy) {}", + "", + f" static {module_name} load(TiRuntime runtime, const char *path) {{", + " TiAotModule aot_module = ti_load_aot_module(runtime, path);", + f" return {module_name}(runtime, aot_module, true);", + " }", + f" static {module_name} load(TiRuntime runtime, const std::string &path) {{", + f" return {module_name}::load(runtime, path.c_str());", + " }", + f" static {module_name} create(TiRuntime runtime, const void *tcm, size_t size) {{", + " TiAotModule aot_module = ti_create_aot_module(runtime, tcm, size);", + f" return {module_name}(runtime, aot_module, true);", + " }", + f" static {module_name} create(TiRuntime runtime, const std::vector &tcm) {{", + f" return {module_name}::create(runtime, tcm.data(), tcm.size());", + " }", + "", + ] + for kernel in m.metadata.kernels.values(): + if kernel.name in cgraph_kernel_names: + continue + out += [ + f" Kernel_{kernel.name} get_kernel_{kernel.name}() const {{", + f' return Kernel_{kernel.name}(runtime_, ti_get_aot_module_kernel(aot_module(), "{kernel.name}"));', + " }", + ] + for graph in m.graphs: + out += [ + f" ComputeGraph_{graph.name} get_compute_graph_{graph.name}() const {{", + f' return ComputeGraph_{graph.name}(runtime_, ti_get_aot_module_compute_graph(aot_module(), "{graph.name}"));', + " }", + ] + out += [ + "};", + "", + ] + return out + + +def generate_module_content(m: GfxRuntime140, module_name: str) -> List[str]: + # This has all kernels including all the ones launched by compute graphs. + cgraph_kernel_names = set(dispatch.kernel.name for graph in m.graphs for dispatch in graph.dispatches) + + out = [] + for kernel in m.metadata.kernels.values(): + if kernel.name in cgraph_kernel_names: + continue + out += generate_kernel_args_builder(kernel) + + for graph in m.graphs: + out += generate_graph_args_builder(graph) + + out += generate_module_content_repr(m, module_name, cgraph_kernel_names) + + return out + + +def generate_header(m: GfxRuntime140, module_name: str, namespace: str, tcm: Optional[bytes]) -> List[str]: + out = [] + + out += [ + "// THIS IS A GENERATED HEADER; PLEASE DO NOT MODIFY.", + "#pragma once", + "#include ", + "#include ", + "#include ", + "", + ] + + if namespace: + out += [ + f"namespace {namespace} {{", + "", + ] + + if tcm is not None: + tcm_bytes = [x for x in tcm] + + out += [ + f"static const uint8_t {module_name}_tcm[{len(tcm_bytes)}] = {{", + ] + + out += [f" {', '.join(str(x) for x in tcm_bytes[i:i + 8])}," for i in range(0, len(tcm_bytes), 8)] + + out += [ + "};", + "", + f"static const size_t {module_name}_tcm_size = {len(tcm_bytes)};", + "", + ] + + out += generate_module_content(m, module_name) + + if namespace: + out += [f"}} // namespace {namespace}", ""] + + return out diff --git a/local_packages/taichi/_ti_module/module.py b/local_packages/taichi/_ti_module/module.py new file mode 100644 index 0000000..6976908 --- /dev/null +++ b/local_packages/taichi/_ti_module/module.py @@ -0,0 +1,144 @@ +import argparse +import runpy +from pathlib import Path +from typing import List + +from taichi._ti_module.cppgen import generate_header +from taichi.aot._export import _aot_kernels +from taichi.aot.conventions.gfxruntime140 import GfxRuntime140 +from taichi.aot.module import Module +from taichi.types.ndarray_type import NdarrayType +from taichi.types.primitive_types import integer_type_ids, real_type_ids +from taichi.types.texture_type import RWTextureType, TextureType + +import taichi + + +def module_cppgen(parser: argparse.ArgumentParser): + """Generate C++ headers for Taichi modules.""" + parser.add_argument("MODOLE", help="Path to the module directory.") + parser.add_argument("-n", "--namespace", type=str, help="C++ namespace if wanted.") + parser.add_argument( + "-m", + "--module-name", + type=str, + help="Module name to be a part of the module class. By default, it's the directory name.", + default=None, + ) + parser.add_argument("-o", "--output", type=str, help="Output C++ header path.", default="module.h") + parser.add_argument( + "--bin2c", + help="Save the entire TCM archive to an in-memory buffer. This flag is ignored if the module is not a TCM archive", + action="store_true", + ) + parser.set_defaults(func=module_cppgen_impl) + + +def module_cppgen_impl(a): + module_path = a.MODOLE + + print(f"Generating C++ header for Taichi module: {Path(module_path).absolute()}") + + tcm = None + if a.bin2c and module_path.endswith(".tcm"): + with open(module_path, "rb") as f: + tcm = f.read() + + if a.module_name: + module_name = a.module_name + else: + module_name = Path(module_path).name + if module_name.endswith(".tcm"): + module_name = module_name[:-4] + + m = GfxRuntime140.from_module(module_path) + + out = generate_header(m, module_name, a.namespace, tcm) + + with open(a.output, "w") as f: + f.write("\n".join(out)) + + print(f"Module header is saved to: {Path(a.output).absolute()}") + + +def module_build(parser: argparse.ArgumentParser): + """Build Taichi modules from python scripts.""" + parser.add_argument("SOURCE", help="Path to the Taichi program source (Python script).") + parser.add_argument("-o", "--output", type=str, help="Output module path.", default=None) + parser.set_defaults(func=module_build_impl) + + +def module_build_impl(a): + source_path = a.SOURCE + module_path = a.output + + source_path = Path(source_path) + assert source_path.name.endswith(".py"), "Source must be a Python script." + if module_path is None: + module_path = f"{source_path.name[:-3]}.tcm" + module_path = Path(module_path) + + print(f"Building Taichi module: {source_path}") + print() + + d = runpy.run_path(str(source_path), run_name="__main__") + print() + + required_caps = d["REQUIRED_CAPS"] if "REQUIRED_CAPS" in d else [] + assert isinstance(required_caps, list), "REQUIRED_CAPS must be a list." + + if required_caps: + print("Module requires the following capabilities:") + for cap in required_caps: + print(f" - {cap}") + print() + + m = Module(caps=required_caps) + for record in _aot_kernels: + print("Added kernel:", record.name) + template_args = None + if record.template_types: + print(" Template types:") + template_args = {} + for k, v in record.template_types.items(): + print(f" - {k}: {v}") + # TODO: (penguinliong) Remove this hack. It's not properly + # working with unusual numeric types like f16 or i64. + if isinstance(v, int) or id(v) in integer_type_ids: + value = 0 + elif isinstance(v, float) or id(v) in real_type_ids: + value = 0.0 + elif isinstance(v, NdarrayType): + if v.ndim is None or v.ndim <= 0: + raise ValueError("Ndarray template type must specify a non-zero dimension.") + value = taichi.ndarray(v.dtype, (1,) * v.ndim) + elif isinstance(v, TextureType): + value = taichi.Texture(taichi.Format.rgba8, (4,) * v.num_dimensions) + elif isinstance(v, RWTextureType): + value = taichi.Texture(v.fmt, (4,) * v.num_dimensions) + else: + raise ValueError(f"Unsupported template type: {type(v)}") + template_args[k] = value + m.add_kernel(record.kernel, template_args) + print() + + if module_path.name.endswith(".tcm"): + m.archive(str(module_path)) + else: + m.save(str(module_path)) + + print(f"Module is archive to: {module_path}") + print() + + +def _main(arguments: List[str]): + """Taichi module tools.""" + parser = argparse.ArgumentParser(prog="ti module", description=_main.__doc__) + subparsers = parser.add_subparsers(title="Taichi module manager commands", required=True) + + cppgen_parser = subparsers.add_parser("cppgen", help=module_cppgen.__doc__) + build_parser = subparsers.add_parser("build", help=module_build.__doc__) + module_cppgen(cppgen_parser) + module_build(build_parser) + args = parser.parse_args(arguments) + args.func(args) diff --git a/local_packages/taichi/_version_check.py b/local_packages/taichi/_version_check.py new file mode 100644 index 0000000..29191ef --- /dev/null +++ b/local_packages/taichi/_version_check.py @@ -0,0 +1,101 @@ +import datetime +import json +import os +import platform +import threading +import uuid +from urllib import request + +from taichi._lib import core as _ti_core + + +def check_version(cur_uuid): + # Check Taichi version for the user. + major = _ti_core.get_version_major() + minor = _ti_core.get_version_minor() + patch = _ti_core.get_version_patch() + version = f"{major}.{minor}.{patch}" + payload = {"version": version, "platform": "", "python": ""} + + system = platform.system() + u = platform.uname() + if (u.system, u.machine) == ("Linux", "x86_64"): + payload["platform"] = "manylinux_2_27_x86_64" + elif (u.system, u.machine) in (("Linux", "arm64"), ("Linux", "aarch64")): + payload["platform"] = "manylinux_2_27_aarch64" + elif system == "Windows": + payload["platform"] = "win_amd64" + elif system == "Darwin": + if platform.release() < "19.0.0": + payload["platform"] = "macosx_10_14_x86_64" + elif platform.machine() == "x86_64": + payload["platform"] = "macosx_10_15_x86_64" + else: + payload["platform"] = "macosx_11_0_arm64" + + python_version = platform.python_version().split(".") + payload["python"] = "cp" + python_version[0] + python_version[1] + + payload["uuid"] = cur_uuid + if os.getenv("TI_CI") == "1": + payload["type"] = "CI" + # We do not want request exceptions break users' usage of Taichi. + try: + payload = json.dumps(payload) + payload = payload.encode() + req = request.Request("https://metadata.taichi.graphics/check_version", method="POST") + req.add_header("Content-Type", "application/json") + with request.urlopen(req, data=payload, timeout=5) as response: + response = json.loads(response.read().decode("utf-8")) + return response + except: + return None + + +def write_version_info(response, cur_uuid, version_info_path, cur_date): + if response is None: + return + with open(version_info_path, "w") as f: + f.write((cur_date).strftime("%Y-%m-%d")) + f.write("\n") + if response["status"] == 1: + f.write(response["latest_version"]) + else: + f.write("0.0.0") + f.write("\n") + f.write(cur_uuid) + f.write("\n") + + +def try_check_version(): + try: + os.makedirs(_ti_core.get_repo_dir(), exist_ok=True) + version_info_path = os.path.join(_ti_core.get_repo_dir(), "version_info") + cur_date = datetime.date.today() + if os.path.exists(version_info_path): + with open(version_info_path, "r") as f: + version_info_file = f.readlines() + last_time = version_info_file[0].rstrip() + cur_uuid = version_info_file[2].rstrip() + if cur_date.strftime("%Y-%m-%d") > last_time: + response = check_version(cur_uuid) + write_version_info(response, cur_uuid, version_info_path, cur_date) + else: + cur_uuid = str(uuid.uuid4()) + write_version_info({"status": 0}, cur_uuid, version_info_path, cur_date) + response = check_version(cur_uuid) + write_version_info(response, cur_uuid, version_info_path, cur_date) + # Wildcard exception to catch potential file writing errors. + except: + pass + + +def start_version_check_thread(): + skip = os.environ.get("TI_SKIP_VERSION_CHECK") + if skip != "ON": + # We don't join this thread because we do not wish to block users. + check_version_thread = threading.Thread(target=try_check_version, daemon=True) + check_version_thread.start() + + +__all__ = [] diff --git a/local_packages/taichi/ad/__init__.py b/local_packages/taichi/ad/__init__.py new file mode 100644 index 0000000..09d38f9 --- /dev/null +++ b/local_packages/taichi/ad/__init__.py @@ -0,0 +1 @@ +from taichi.ad._ad import * diff --git a/local_packages/taichi/ad/_ad.py b/local_packages/taichi/ad/_ad.py new file mode 100644 index 0000000..b4f0989 --- /dev/null +++ b/local_packages/taichi/ad/_ad.py @@ -0,0 +1,528 @@ +"""Taichi automatic differentiation module. + +This module supplies two decorators for users to customize their +gradient computation task. +""" + +import warnings +from functools import reduce + +import numpy as np +import taichi.types.primitive_types as types +from taichi.lang import impl +from taichi.lang.enums import AutodiffMode, SNodeGradType +from taichi.lang.expr import Expr +from taichi.lang.field import Field, ScalarField +from taichi.lang._ndarray import Ndarray +from taichi.lang.kernel_impl import kernel +from taichi.lang.snode import SNode +from taichi.types import ndarray, template + +from taichi import _snode + + +class GradChecker: + def __init__(self, loss, to_check): + self.to_check = to_check + self.loss = loss + self.eps_range = 2.0 ** np.arange(-3, -30, -2).astype(np.float64) + self.result = [None] * len(to_check) + self.all_fields = get_all_fields() + self.backups = save_all_fields(self.all_fields) + + def add_calls(self, calls): + self.calls = calls + + def check_grad(self): + assert self.loss.dtype == types.f64, "Only f64 is supported when checking grad." + + @kernel + def x_pos(x: template(), tangent_np: ndarray(), eps: types.f64): + for I in impl.grouped(x): + x[I] += eps * tangent_np[I] + + @kernel + def x_neg(x: template(), tangent_np: ndarray(), eps: types.f64): + for I in impl.grouped(x): + x[I] -= eps * tangent_np[I] + + for i, x in enumerate(self.to_check): + if x is self.loss: + self.result[i] = True + continue + + check_pass = False + + re_range = [] + for eps in self.eps_range: + tangent_np = np.array(np.random.rand(*x.shape)).astype(np.float64) + + restore_all_fields(self.all_fields, self.backups) + x_pos(x, tangent_np, eps) + for func, args in self.calls: + func(*args) + loss_pos = self.loss.to_numpy() + + restore_all_fields(self.all_fields, self.backups) + x_neg(x, tangent_np, eps) + for func, args in self.calls: + func(*args) + loss_neg = self.loss.to_numpy() + + ip_numerical = (loss_pos - loss_neg) * 0.5 / eps + x_grad_np = x.grad.to_numpy() + extra_dim = x_grad_np.ndim - tangent_np.ndim + if extra_dim == 1: + tangent_np = np.expand_dims(tangent_np, axis=-1) + if extra_dim == 2: + tangent_np = np.expand_dims(tangent_np, axis=-1) + + ip_autodiff = np.sum(x_grad_np * tangent_np) + err = abs(ip_autodiff - ip_numerical) + if ip_numerical != 0: + re = err / abs(ip_autodiff) + else: + re = err / (abs(ip_autodiff) + 1e-20) + re_range.append(re) + + if err * 100 <= abs(ip_autodiff): + check_pass = True + break + + self.result[i] = check_pass + + if not check_pass: + print( + "variable", + i, + "has relative error", + min(re_range), + ", expected relative error 0.01", + ) + else: + print("variable", i, "passes grad check") + + assert all(self.result), "Grad check failed: Not all variables pass grad check" + + restore_all_fields(self.all_fields, self.backups) + for func, args in self.calls: + func(*args) + + +def get_all_fields(): + def visit(node, fields): + for _i in range(node.ptr.get_num_ch()): + ch = node.ptr.get_ch(_i) + if not ch.is_place(): + visit(SNode(ch), fields) + else: + if not ch.is_primal(): + continue + fields.append(ScalarField(Expr(ch.get_expr()))) + + fields = [] + for root_fb in _snode.FieldsBuilder._finalized_roots(): + visit(root_fb, fields) + return fields + + +def save_all_fields(all_fields): + return [x.to_numpy() for x in all_fields] + + +def restore_all_fields(all_fields, backups): + assert len(all_fields) == len(backups) + for f, x in zip(all_fields, backups): + f.from_numpy(x) + + +class Tape: + def __init__(self, loss=None, clear_gradients=True, validation=False, grad_check=None): + """A context manager for reverse mode autodiff :class:`~taichi.ad.Tape`. The + context manager would catching all of the callings of functions that + decorated by :func:`~taichi.lang.kernel_impl.kernel` or + :func:`~taichi.ad.grad_replaced` under `with` statement, and calculate + all the partial gradients of a given loss variable by calling all of the + gradient function of the callings caught in reverse order while `with` + statement ended. + + See also :func:`~taichi.lang.kernel_impl.kernel` and + :func:`~taichi.ad.grad_replaced` for gradient functions. + + Args: + loss(:class:`~taichi.lang.expr.Expr`): The loss field, which shape should be (). + clear_gradients(Bool): Before `with` body start, clear all gradients or not. + validation(Bool): Check whether the code inside the context manager is autodiff valid, e.g., agree with the global data access rule. + grad_check(List[Field]): List of fields that need to check gradients. + + Example:: + + >>> @ti.kernel + >>> def sum(a: ti.float32): + >>> for I in ti.grouped(x): + >>> y[None] += x[I] ** a + >>> + >>> with ti.ad.Tape(loss = y): + >>> sum(2) + """ + self.calls = [] + self.modes = [] + self.entered = False + self.gradient_evaluated = False + self.clear_gradients = clear_gradients + self.validation = validation + self.runtime = impl.get_runtime() + if not self.runtime.prog.config().debug and self.validation: + warnings.warn( + "Debug mode is disabled, autodiff valid check will not work. Please specify `ti.init(debug=True)` to enable the check.", + Warning, + ) + self.eval_on_exit = loss is not None + self.loss = loss + self.grad_checker = None + if grad_check: + assert isinstance(grad_check, list), "grad_check should be a list of fields that need to check gradients." + self.grad_checker = GradChecker(loss, grad_check) + + def __enter__(self): + assert not self.entered, "Tape can be entered only once." + self.entered = True + + if isinstance(self.loss, Field): + impl.get_runtime().materialize() + if len(self.loss.shape) != 0: + raise RuntimeError("The loss of `Tape` must be a 0-D field, i.e. scalar") + if not self.loss.snode.ptr.has_adjoint(): + raise RuntimeError( + "Gradients of loss are not allocated, please use ti.field(..., needs_grad=True)" + " for all fields that are required by autodiff." + ) + if self.clear_gradients: + clear_all_gradients() + if self.validation: + clear_all_gradients(gradient_type=SNodeGradType.ADJOINT_CHECKBIT) + + self.loss.fill(0.0) + elif isinstance(self.loss, Ndarray): + if self.loss._get_nelement() != 1: + raise RuntimeError("The loss of `Tape` must be an ndarray with only one element") + if self.loss.grad is None: + raise RuntimeError( + "Gradients of loss are not allocated, please set needs_grad=True for all ndarrays that are required by autodiff." + ) + self.loss.fill(0.0) + else: + import torch # pylint: disable=C0415 + + if self.loss.numel() != 1: + raise RuntimeError("The loss of `Tape` must be a tensor only contains one element") + if not self.loss.requires_grad: + raise RuntimeError( + "Gradients of loss are not allocated, please set requires_grad=True for all tensors that are required by autodiff." + ) + with torch.no_grad(): + self.loss.fill_(0.0) + + # Attach the context manager to runtime + self.runtime.target_tape = self + return self + + def __exit__(self, _type, value, tb): + self.runtime.target_tape = None + if self.eval_on_exit: + self.grad() + for calls, mode in zip(self.calls, self.modes): + calls[0].autodiff_mode = mode + + def insert(self, func, args): + # Kernels with mode `AutodiffMode.NONE` and `AutodiffMode.VALIDATION` are all forward kernels. + # The difference is there are `assert` for global data access rule check in VALIDATION kernels. + assert func.autodiff_mode in ( + AutodiffMode.NONE, + AutodiffMode.VALIDATION, + ), "Inserted funcs should be forward kernels." + self.modes.append(func.autodiff_mode) + if self.validation: + func.autodiff_mode = AutodiffMode.VALIDATION + self.calls.append((func, args)) + + def grad(self): + assert self.entered, "Before evaluating gradients tape must be entered." + assert not self.gradient_evaluated, "Gradients of grad can be evaluated only once." + + # Set grad for loss + if isinstance(self.loss, (Field, Ndarray)): + self.loss.grad.fill(1.0) + else: + import torch # pylint: disable=C0415 + + if self.loss.grad is None: + self.loss.grad = torch.ones_like(self.loss) + else: + with torch.no_grad(): + self.loss.grad.fill_(1.0) + + for func, args in reversed(self.calls): + # we need to check whether "func" has "grad" attribute + # since we insert write_int and write_float kernels to self.calls + # e.g. x[None] = 0.0, this func has no grad attribute + if hasattr(func, "grad"): + func.grad(*args) + + self.gradient_evaluated = True + if self.grad_checker: + self.grad_checker.add_calls(self.calls) + self.grad_checker.check_grad() + + +def clear_all_gradients(gradient_type=SNodeGradType.ADJOINT): + """Sets the gradients of all fields to zero.""" + impl.get_runtime().materialize() + + def visit(node): + places = [] + for _i in range(node.ptr.get_num_ch()): + ch = node.ptr.get_ch(_i) + if not ch.is_place(): + visit(SNode(ch)) + else: + if ch.get_snode_grad_type() == gradient_type: + places.append(ch.get_expr()) + + places = tuple(places) + if places: + from taichi._kernels import clear_gradients # pylint: disable=C0415 + + clear_gradients(places) + + for root_fb in _snode.FieldsBuilder._finalized_roots(): + visit(root_fb) + + +def grad_replaced(func): + """A decorator for python function to customize gradient with Taichi's autodiff + system, e.g. `ti.ad.Tape()` and `kernel.grad()`. + + This decorator forces Taichi's autodiff system to use a user-defined gradient + function for the decorated function. Its customized gradient must be decorated + by :func:`~taichi.ad.grad_for`. + + Args: + fn (Callable): The python function to be decorated. + + Returns: + Callable: The decorated function. + + Example:: + + >>> @ti.kernel + >>> def multiply(a: ti.float32): + >>> for I in ti.grouped(x): + >>> y[I] = x[I] * a + >>> + >>> @ti.kernel + >>> def multiply_grad(a: ti.float32): + >>> for I in ti.grouped(x): + >>> x.grad[I] = y.grad[I] / a + >>> + >>> @ti.ad.grad_replaced + >>> def foo(a): + >>> multiply(a) + >>> + >>> @ti.ad.grad_for(foo) + >>> def foo_grad(a): + >>> multiply_grad(a)""" + + def decorated(*args, **kwargs): + # TODO [#3025]: get rid of circular imports and move this to the top. + impl.get_runtime().grad_replaced = True + if impl.get_runtime().target_tape: + impl.get_runtime().target_tape.insert(decorated, args) + try: + func(*args, **kwargs) + finally: + impl.get_runtime().grad_replaced = False + + decorated.grad = None + decorated.autodiff_mode = AutodiffMode.NONE + return decorated + + +def grad_for(primal): + """Generates a decorator to decorate `primal`'s customized gradient function. + + See :func:`~taichi.lang.grad_replaced` for examples. + + Args: + primal (Callable): The primal function, must be decorated by :func:`~taichi.ad.grad_replaced`. + + Returns: + Callable: The decorator used to decorate customized gradient function.""" + + def decorator(func): + def decorated(*args, **kwargs): + func(*args, **kwargs) + + if not hasattr(primal, "grad"): + raise RuntimeError(f"Primal function `{primal.__name__}` must be decorated by ti.ad.grad_replaced") + if primal.grad is not None: + raise RuntimeError( + "Primal function must be a **python** function instead of a taichi kernel. Please wrap the taichi kernel in a @ti.ad.grad_replaced decorated python function instead." + ) + primal.grad = decorated + return decorated + + return decorator + + +def no_grad(func): + """A decorator for python function to skip gradient calculation within Taichi's + autodiff system, e.g. `ti.ad.Tape()` and `kernel.grad()`. + This decorator forces Taichi's autodiff system to use an empty gradient function + for the decorated function. + + Args: + fn (Callable): The python function to be decorated. + + Returns: + Callable: The decorated function. + + Example:: + + >>> @ti.kernel + >>> def multiply(a: ti.float32): + >>> for I in ti.grouped(x): + >>> y[I] = x[I] * a + >>> + >>> @ti.no_grad + >>> def foo(a): + >>> multiply(a)""" + + def decorated(*args, **kwargs): + impl.get_runtime().grad_replaced = True + if impl.get_runtime().target_tape: + impl.get_runtime().target_tape.insert(decorated, args) + try: + func(*args, **kwargs) + finally: + impl.get_runtime().grad_replaced = False + + def placeholder(*args, **kwargs): + return + + decorated.grad = placeholder + decorated.autodiff_mode = AutodiffMode.NONE + return decorated + + +class FwdMode: + def __init__(self, loss, param, seed=None, clear_gradients=True): + self.calls = [] + self.modes = [] + self.entered = False + self.kernels_recovered = False + self.runtime = impl.get_runtime() + self.loss = loss + self.param = param + self.seed = seed + self.clear_gradients = clear_gradients + + def __enter__(self): + assert not self.entered, "Forward mode manager can be entered only once." + self.entered = True + impl.get_runtime().materialize() + if not isinstance(self.loss, list): + self.loss = [self.loss] + for ls in self.loss: + assert isinstance(ls, ScalarField) + + # Currently we only support only one N-D field as a group of parameters, + # which is sufficient for computing Jacobian-vector product(Jvp). + # For cases with multiple groups of parameters, it requires to run the forward ad multiple times, + # which is out of scope of the current design for this interface. + + # TODO: support vector field and matrix field + assert isinstance(self.param, ScalarField) + + def shape_flatten(shape): + return reduce((lambda x, y: x * y), list(shape)) + + # Handle 0-D field + if len(self.param.shape) != 0: + parameters_shape_flatten = shape_flatten(self.param.shape) + else: + parameters_shape_flatten = 1 + + if not self.seed: + if parameters_shape_flatten == 1: + # Compute the derivative respect to the first variable by default + self.seed = [1.0] + else: + raise RuntimeError( + "`seed` is not set for non 0-D field, please specify." + " `seed` is a list to specify which parameters the computed derivatives respect to. The length of the `seed` should be same to that of the `parameters`" + " E.g. Given a loss `loss = ti.field(float, shape=3)`, parameter `x = ti.field(float, shape=3)`" + " seed = [0, 0, 1] indicates compute derivative respect to the third element of `x`." + " seed = [1, 1, 1] indicates compute the sum of derivatives respect to all three element of `x`, i.e., Jacobian-vector product(Jvp) for each element in `loss`" + ) + else: + assert parameters_shape_flatten == len(self.seed) + + # Clear gradients + if self.clear_gradients: + clear_all_gradients(gradient_type=SNodeGradType.DUAL) + + # Set seed for each variable + if len(self.seed) == 1: + if len(self.param.shape) == 0: + # e.g., x= ti.field(float, shape = ()) + self.param.dual[None] = 1.0 * self.seed[0] + else: + # e.g., ti.root.dense(ti.i, 1).place(x.dual) + self.param.dual[0] = 1.0 * self.seed[0] + else: + self.param.dual.from_numpy(np.array(self.seed, dtype=np.float32)) + + # Attach the context manager to the runtime + self.runtime.fwd_mode_manager = self + + def __exit__(self, _type, value, tb): + self.runtime.fwd_mode_manager = None + self.clear_seed() + self.recover_kernels() + + def insert(self, func): + assert ( + func.autodiff_mode == AutodiffMode.NONE or func.autodiff_mode == AutodiffMode.FORWARD + ), "Inserted funcs should be forward or grad kernels (forward mode)." + self.modes.append(func.autodiff_mode) + func.autodiff_mode = AutodiffMode.FORWARD + self.calls.append((func)) + + def recover_kernels(self): + assert self.entered, "Before recover the kernels, fwd mode manager must be entered." + for calls, mode in zip(self.calls, self.modes): + calls.autodiff_mode = mode + self.kernels_recovered = True + + def clear_seed(self): + # clear seed values + if len(self.seed) == 1: + if len(self.param.shape) == 0: + # e.g., x= ti.field(float, shape = ()) + self.param.dual[None] = 0.0 + else: + # e.g., ti.root.dense(ti.i, 1).place(x.dual) + self.param.dual[0] = 0.0 + else: + self.param.dual.fill(0) + + +__all__ = [ + "FwdMode", + "Tape", + "clear_all_gradients", + "grad_for", + "grad_replaced", + "no_grad", +] diff --git a/local_packages/taichi/algorithms/__init__.py b/local_packages/taichi/algorithms/__init__.py new file mode 100644 index 0000000..1a4e0f3 --- /dev/null +++ b/local_packages/taichi/algorithms/__init__.py @@ -0,0 +1 @@ +from ._algorithms import * diff --git a/local_packages/taichi/algorithms/_algorithms.py b/local_packages/taichi/algorithms/_algorithms.py new file mode 100644 index 0000000..398436c --- /dev/null +++ b/local_packages/taichi/algorithms/_algorithms.py @@ -0,0 +1,115 @@ +from taichi._kernels import ( + blit_from_field_to_field, + scan_add_inclusive, + sort_stage, + uniform_add, + warp_shfl_up_i32, +) +from taichi.lang.impl import current_cfg, field +from taichi.lang.kernel_impl import data_oriented +from taichi.lang.misc import cuda, vulkan +from taichi.lang.runtime_ops import sync +from taichi.lang.simt import subgroup +from taichi.types.primitive_types import i32 + + +def parallel_sort(keys, values=None): + """Odd-even merge sort + + References: + https://developer.nvidia.com/gpugems/gpugems2/part-vi-simulation-and-numerical-algorithms/chapter-46-improved-gpu-sorting + https://en.wikipedia.org/wiki/Batcher_odd%E2%80%93even_mergesort + """ + N = keys.shape[0] + + num_stages = 0 + p = 1 + while p < N: + k = p + while k >= 1: + invocations = int((N - k - k % p) / (2 * k)) + 1 + if values is None: + sort_stage(keys, 0, keys, N, p, k, invocations) + else: + sort_stage(keys, 1, values, N, p, k, invocations) + num_stages += 1 + sync() + k = int(k / 2) + p = int(p * 2) + + +@data_oriented +class PrefixSumExecutor: + """Parallel Prefix Sum (Scan) Helper + + Use this helper to perform an inclusive in-place's parallel prefix sum. + + References: + https://developer.download.nvidia.com/compute/cuda/1.1-Beta/x86_website/projects/scan/doc/scan.pdf + https://github.com/NVIDIA/cuda-samples/blob/master/Samples/2_Concepts_and_Techniques/shfl_scan/shfl_scan.cu + """ + + def __init__(self, length): + self.sorting_length = length + + BLOCK_SZ = 64 + GRID_SZ = int((length + BLOCK_SZ - 1) / BLOCK_SZ) + + # Buffer position and length + # This is a single buffer implementation for ease of aot usage + ele_num = length + self.ele_nums = [ele_num] + start_pos = 0 + self.ele_nums_pos = [start_pos] + + while ele_num > 1: + ele_num = int((ele_num + BLOCK_SZ - 1) / BLOCK_SZ) + self.ele_nums.append(ele_num) + start_pos += BLOCK_SZ * ele_num + self.ele_nums_pos.append(start_pos) + + self.large_arr = field(i32, shape=start_pos) + + def run(self, input_arr): + length = self.sorting_length + ele_nums = self.ele_nums + ele_nums_pos = self.ele_nums_pos + + if input_arr.dtype != i32: + raise RuntimeError("Only ti.i32 type is supported for prefix sum.") + + if current_cfg().arch == cuda: + inclusive_add = warp_shfl_up_i32 + elif current_cfg().arch == vulkan: + inclusive_add = subgroup.inclusive_add + else: + raise RuntimeError(f"{str(current_cfg().arch)} is not supported for prefix sum.") + + blit_from_field_to_field(self.large_arr, input_arr, 0, length) + + # Kogge-Stone construction + for i in range(len(ele_nums) - 1): + if i == len(ele_nums) - 2: + scan_add_inclusive( + self.large_arr, + ele_nums_pos[i], + ele_nums_pos[i + 1], + True, + inclusive_add, + ) + else: + scan_add_inclusive( + self.large_arr, + ele_nums_pos[i], + ele_nums_pos[i + 1], + False, + inclusive_add, + ) + + for i in range(len(ele_nums) - 3, -1, -1): + uniform_add(self.large_arr, ele_nums_pos[i], ele_nums_pos[i + 1]) + + blit_from_field_to_field(input_arr, self.large_arr, 0, length) + + +__all__ = ["parallel_sort", "PrefixSumExecutor"] diff --git a/local_packages/taichi/aot/__init__.py b/local_packages/taichi/aot/__init__.py new file mode 100644 index 0000000..e6cbe43 --- /dev/null +++ b/local_packages/taichi/aot/__init__.py @@ -0,0 +1,10 @@ +"""Taichi's AOT (ahead of time) module. + +Users can use Taichi as a GPU compute shader/kernel compiler by compiling their +Taichi kernels into an AOT module. +""" + +import taichi.aot.conventions +from taichi.aot._export import export, export_as +from taichi.aot.conventions.gfxruntime140 import GfxRuntime140 +from taichi.aot.module import Module diff --git a/local_packages/taichi/aot/_export.py b/local_packages/taichi/aot/_export.py new file mode 100644 index 0000000..37889a7 --- /dev/null +++ b/local_packages/taichi/aot/_export.py @@ -0,0 +1,26 @@ +from typing import Any, Dict, List, Optional + + +class AotExportKernel: + def __init__(self, f, name: str, template_types: Dict[str, Any]) -> None: + self.kernel = f + self.name = name + self.template_types = template_types + + +_aot_kernels: List[AotExportKernel] = [] + + +def export_as(name: str, *, template_types: Optional[Dict[str, Any]] = None): + def inner(f): + assert hasattr(f, "_is_wrapped_kernel"), "Only Taichi kernels can be exported" + + record = AotExportKernel(f, name, template_types or {}) + _aot_kernels.append(record) + return f + + return inner + + +def export(f): + return export_as(f.__name__)(f) diff --git a/local_packages/taichi/aot/conventions/__init__.py b/local_packages/taichi/aot/conventions/__init__.py new file mode 100644 index 0000000..a5c6f93 --- /dev/null +++ b/local_packages/taichi/aot/conventions/__init__.py @@ -0,0 +1 @@ +from taichi.aot.conventions import gfxruntime140 diff --git a/local_packages/taichi/aot/conventions/gfxruntime140/__init__.py b/local_packages/taichi/aot/conventions/gfxruntime140/__init__.py new file mode 100644 index 0000000..64699ca --- /dev/null +++ b/local_packages/taichi/aot/conventions/gfxruntime140/__init__.py @@ -0,0 +1,36 @@ +import json +import zipfile +from pathlib import Path +from typing import Any, List + +from taichi.aot.conventions.gfxruntime140 import dr, sr + + +class GfxRuntime140: + def __init__(self, metadata_json: Any, graphs_json: Any) -> None: + metadata = dr.from_json_metadata(metadata_json) + graphs = [dr.from_json_graph(x) for x in graphs_json] + self.metadata = sr.from_dr_metadata(metadata) + self.graphs = [sr.from_dr_graph(self.metadata, x) for x in graphs] + + @staticmethod + def from_module(module_path: str) -> "GfxRuntime140": + if Path(module_path).is_file(): + with zipfile.ZipFile(module_path) as z: + with z.open("metadata.json") as f: + metadata_json = json.load(f) + with z.open("graphs.json") as f: + graphs_json = json.load(f) + else: + with open(f"{module_path}/metadata.json") as f: + metadata_json = json.load(f) + with open(f"{module_path}/graphs.json") as f: + graphs_json = json.load(f) + + return GfxRuntime140(metadata_json, graphs_json) + + def to_metadata_json(self) -> Any: + return dr.to_json_metadata(sr.to_dr_metadata(self.metadata)) + + def to_graphs_json(self) -> List[Any]: + return [dr.to_json_graph(sr.to_dr_graph(x)) for x in self.graphs] diff --git a/local_packages/taichi/aot/conventions/gfxruntime140/dr.py b/local_packages/taichi/aot/conventions/gfxruntime140/dr.py new file mode 100644 index 0000000..306ddd4 --- /dev/null +++ b/local_packages/taichi/aot/conventions/gfxruntime140/dr.py @@ -0,0 +1,242 @@ +""" +Data representation of all JSON data structures following the GfxRuntime140 +convention. +""" + +from typing import Any, Dict, List, Optional + +from taichi.aot.utils import dump_json_data_model, json_data_model + + +@json_data_model +class FieldAttributes: + def __init__(self, j: Dict[str, Any]) -> None: + dtype = j["dtype"] + dtype_name = j["dtype_name"] + element_shape = j["element_shape"] + field_name = j["field_name"] + is_scalar = j["is_scalar"] + mem_offset_in_parent = j["mem_offset_in_parent"] + shape = j["shape"] + + self.dtype: int = int(dtype) + self.dtype_name: str = str(dtype_name) + self.element_shape: List[int] = [int(x) for x in element_shape] + self.field_name: str = str(field_name) + self.is_scalar: bool = bool(is_scalar) + self.mem_offset_in_parent: int = int(mem_offset_in_parent) + self.shape: List[int] = [int(x) for x in shape] + + +@json_data_model +class ArgumentAttributes: + def __init__(self, j: Dict[str, Any]) -> None: + index = j["key"][0] + dtype = j["value"]["dtype"] + element_shape = j["value"]["element_shape"] + field_dim = j["value"]["field_dim"] + fmt = j["value"]["format"] + is_array = j["value"]["is_array"] + offset_in_mem = j["value"]["offset_in_mem"] + stride = j["value"]["stride"] + # (penguinliong) Note that the name field is optional for kernels. + # Kernels are always launched by indexed arguments and this is for + # debugging and header generation only. + name = j["value"]["name"] if "name" in j["value"] and len(j["value"]["name"]) > 0 else None + ptype = j["value"]["ptype"] if "ptype" in j["value"] else None + + self.dtype: int = int(dtype) + self.element_shape: List[int] = [int(x) for x in element_shape] + self.field_dim: int = int(field_dim) + self.format: int = int(fmt) + self.index: int = int(index) + self.is_array: bool = bool(is_array) + self.offset_in_mem: int = int(offset_in_mem) + self.stride: int = int(stride) + self.name: Optional[str] = str(name) if name is not None else None + self.ptype: Optional[int] = int(ptype) if ptype is not None else None + + +@json_data_model +class ContextAttributes: + def __init__(self, j: Dict[str, Any]) -> None: + arg_attribs_vec_ = j["arg_attribs_vec_"] + args_bytes_ = j["args_bytes_"] + arr_access = j["arr_access"] + ret_attribs_vec_ = j["ret_attribs_vec_"] + rets_bytes_ = j["rets_bytes_"] + + self.arg_attribs_vec_: List[ArgumentAttributes] = [ArgumentAttributes(x) for x in arg_attribs_vec_] + self.arg_attribs_vec_.sort(key=lambda x: x.index) + self.args_bytes_: int = int(args_bytes_) + self.arr_access: List[int] = [int(x["value"]) for x in arr_access] + self.ret_attribs_vec_: List[ArgumentAttributes] = [ArgumentAttributes(x) for x in ret_attribs_vec_] + self.rets_bytes_: int = int(rets_bytes_) + + +@json_data_model +class Buffer: + def __init__(self, j: Dict[str, Any]) -> None: + root_id = j["root_id"][0] + ty = j["type"] + + self.root_id: int = int(root_id) + self.type: int = int(ty) + + +@json_data_model +class BufferBinding: + def __init__(self, j: Dict[str, Any]) -> None: + binding = j["binding"] + buffer = j["buffer"] + + self.binding: int = int(binding) + self.buffer: Buffer = Buffer(buffer) + + +@json_data_model +class TextureBinding: + def __init__(self, j: Dict[str, Any]) -> None: + arg_id = j["arg_id"] + binding = j["binding"] + is_storage = j["is_storage"] + + self.arg_id: int = int(arg_id) + self.binding: int = int(binding) + self.is_storage: bool = bool(is_storage) + + +@json_data_model +class RangeForAttributes: + def __init__(self, j: Dict[str, Any]) -> None: + begin = j["begin"] + const_begin = j["const_begin"] + const_end = j["const_end"] + end = j["end"] + + self.begin: int = int(begin) + self.const_begin: bool = bool(const_begin) + self.const_end: bool = bool(const_end) + self.end: int = int(end) + + +@json_data_model +class TaskAttributes: + def __init__(self, j: Dict[str, Any]) -> None: + advisory_num_threads_per_group = j["advisory_num_threads_per_group"] + advisory_total_num_threads = j["advisory_total_num_threads"] + buffer_binds = j["buffer_binds"] + name = j["name"] + range_for_attribs = j["range_for_attribs"] if "range_for_attribs" in j else None + task_type = j["task_type"] + texture_binds = j["texture_binds"] + + self.advisory_num_threads_per_group: int = int(advisory_num_threads_per_group) + self.advisory_total_num_threads: int = int(advisory_total_num_threads) + self.buffer_binds: List[BufferBinding] = [BufferBinding(x) for x in buffer_binds] + self.name: str = str(name) + self.range_for_attribs: Optional[RangeForAttributes] = ( + RangeForAttributes(range_for_attribs) if range_for_attribs is not None else None + ) + self.task_type: int = int(task_type) + self.texture_binds: List[TextureBinding] = [TextureBinding(x) for x in texture_binds] + + +@json_data_model +class DeviceCapabilityLevel: + def __init__(self, j: Dict[str, Any]) -> None: + key = j["key"] + value = j["value"] + + self.key: str = str(key) + self.value: int = int(value) + + +@json_data_model +class KernelAttributes: + def __init__(self, j: Dict[str, Any]) -> None: + ctx_attribs = j["ctx_attribs"] + is_jit_evaluator = j["is_jit_evaluator"] + name = j["name"] + tasks_attribs = j["tasks_attribs"] + + self.ctx_attribs: ContextAttributes = ContextAttributes(ctx_attribs) + self.is_jit_evaluator: bool = bool(is_jit_evaluator) + self.name: str = str(name) + self.tasks_attribs: List[TaskAttributes] = [TaskAttributes(x) for x in tasks_attribs] + + +@json_data_model +class Metadata: + def __init__(self, j: Dict[str, Any]) -> None: + fields = j["fields"] + kernels = j["kernels"] + required_caps = j["required_caps"] + root_buffer_size = j["root_buffer_size"] + + self.fields: List[FieldAttributes] = [FieldAttributes(x) for x in fields] + self.kernels: List[KernelAttributes] = [KernelAttributes(x) for x in kernels] + self.required_caps: List[DeviceCapabilityLevel] = [DeviceCapabilityLevel(x) for x in required_caps] + self.root_buffer_size: int = int(root_buffer_size) + + +def from_json_metadata(j: Dict[str, Any]) -> Metadata: + return Metadata(j) + + +def to_json_metadata(meta_data: Metadata) -> Dict[str, Any]: + return dump_json_data_model(meta_data) + + +@json_data_model +class SymbolicArgument: + def __init__(self, j: Dict[str, Any]) -> None: + dtype_id = j["dtype_id"] + element_shape = j["element_shape"] + field_dim = j["field_dim"] + name = j["name"] + num_channels = j["num_channels"] + tag = j["tag"] + + self.dtype_id: int = int(dtype_id) + self.element_shape: List[int] = [int(x) for x in element_shape] + self.field_dim: int = int(field_dim) + self.name: str = str(name) + self.num_channels: int = int(num_channels) + self.tag: int = int(tag) + + +@json_data_model +class Dispatch: + def __init__(self, j: Dict[str, Any]) -> None: + kernel_name = j["kernel_name"] + symbolic_args = j["symbolic_args"] + + self.kernel_name: str = str(kernel_name) + self.symbolic_args: List[SymbolicArgument] = [SymbolicArgument(x) for x in symbolic_args] + + +@json_data_model +class GraphData: + def __init__(self, j: Dict[str, Any]) -> None: + dispatches = j["dispatches"] + + self.dispatches = [Dispatch(x) for x in dispatches] + + +@json_data_model +class Graph: + def __init__(self, j: Dict[str, Any]) -> None: + key = j["key"] + value = j["value"] + + self.key = str(key) + self.value = GraphData(value) + + +def from_json_graph(j: Dict[str, Any]) -> Graph: + return Graph(j) + + +def to_json_graph(graph: Graph) -> Dict[str, Any]: + return dump_json_data_model(graph) diff --git a/local_packages/taichi/aot/conventions/gfxruntime140/sr.py b/local_packages/taichi/aot/conventions/gfxruntime140/sr.py new file mode 100644 index 0000000..9e0333d --- /dev/null +++ b/local_packages/taichi/aot/conventions/gfxruntime140/sr.py @@ -0,0 +1,612 @@ +""" +Structured representation of all JSON data structures following the +GfxRuntime140. +""" + +from abc import ABC +from enum import Enum +from typing import Any, Dict, List, Optional + +from taichi.aot.conventions.gfxruntime140 import dr + +import taichi as ti + + +class DataType(Enum): + f16 = 0 + f32 = 1 + f64 = 2 + i8 = 3 + i16 = 4 + i32 = 5 + i64 = 6 + u8 = 8 + u16 = 9 + u32 = 10 + u64 = 11 + + +def get_data_type_size(dtype: DataType) -> int: + if dtype in [DataType.f16, DataType.i16, DataType.u16]: + return 2 + if dtype in [DataType.f32, DataType.i32, DataType.u32]: + return 4 + if dtype in [DataType.f64, DataType.i64, DataType.u64]: + return 8 + assert False + + +class Argument(ABC): + def __init__(self, name: Optional[str]): + self.name = name + pass + + +class ArgumentScalar(Argument): + def __init__(self, name: Optional[str], dtype: DataType): + super().__init__(name) + self.dtype: DataType = dtype + + +class ParameterType(Enum): + Scalar = 0 + Ndarray = 1 + Texture = 2 + RwTexture = 3 + Unknown = 4 + + +class NdArrayAccess(Enum): + NoAccess = 0 + Read = 1 + Write = 2 + ReadWrite = 3 + + +class ArgumentNdArray(Argument): + def __init__( + self, + name: Optional[str], + dtype: DataType, + element_shape: List[int], + ndim: int, + access: NdArrayAccess, + ): + super().__init__(name) + self.dtype: DataType = dtype + self.element_shape: List[int] = element_shape + self.ndim: int = ndim + self.access: NdArrayAccess = access + + +class ArgumentTexture(Argument): + def __init__(self, name: Optional[str], ndim: int): + super().__init__(name) + self.ndim: int = ndim + + +class ArgumentRwTexture(Argument): + def __init__(self, name: Optional[str], fmt: ti.Format, ndim: int): + super().__init__(name) + self.fmt: ti.Format = fmt + self.ndim: int = ndim + + +class ReturnValue: + def __init__(self, dtype: DataType): + self.dtype: DataType = dtype + + +class Context: + def __init__(self, args: List[Argument], ret: Optional[ReturnValue]): + self.args: List[Argument] = args + self.ret: Optional[ReturnValue] = ret + + +class BufferBindingType(Enum): + Root = 0 + GlobalTmps = 1 + Args = 2 + Rets = 3 + ListGen = 4 + ExtArr = 5 + + +class BufferBinding: + def __init__(self, binding: int, iarg: int, buffer_bind_ty: BufferBindingType): + self.binding: int = binding + self.iarg: int = iarg + self.buffer_bind_ty: BufferBindingType = buffer_bind_ty + + +class TextureBindingType(Enum): + Texture = 0 + RwTexture = 1 + + +class TextureBinding: + def __init__(self, binding: int, iarg: int, texture_bind_ty: TextureBindingType): + self.binding: int = binding + self.iarg: int = iarg + self.texture_bind_ty: TextureBindingType = texture_bind_ty + + +class TaskType(Enum): + Serial = 0 + RangeFor = 1 + StructFor = 2 + MeshFor = 3 + ListGen = 4 + Gc = 5 + GcRc = 6 + + +class LaunchGrid: + def __init__(self, block_size: int, grid_size: int): + self.block_size: int = block_size + self.grid_size: int = grid_size + + +class Task: + def __init__( + self, + name: str, + task_ty: TaskType, + buffer_binds: List[BufferBinding], + texture_binds: List[TextureBinding], + launch_grid: LaunchGrid, + ): + self.name: str = name + self.task_ty: TaskType = task_ty + self.buffer_binds: List[BufferBinding] = buffer_binds + self.texture_binds: List[TextureBinding] = texture_binds + self.launch_grid: LaunchGrid = launch_grid + + +class Field: + def __init__( + self, + name: str, + dtype: DataType, + element_shape: List[int], + shape: List[int], + offset: int, + ): + self.name: str = name + self.dtype: DataType = dtype + self.element_shape: List[int] = element_shape + self.shape: List[int] = shape + self.offset: int = offset + + +class Kernel: + def __init__(self, name: str, context: Context, tasks: List[Task]): + self.name = name + self.context: Context = context + self.tasks: List[Task] = tasks + + +class Metadata: + def __init__( + self, + fields: List[Field], + kernels: List[Kernel], + required_caps: List[ti.DeviceCapability], + root_buffer_size: int, + ): + self.fields: Dict[str, Field] = {x.name: x for x in fields} + self.kernels: Dict[str, Kernel] = {x.name: x for x in kernels} + self.required_caps: List[ti.DeviceCapability] = required_caps + self.root_buffer_size: int = root_buffer_size + + +def from_dr_field(d: dr.FieldAttributes) -> Field: + return Field( + d.field_name, + DataType(d.dtype), + d.element_shape, + d.shape, + d.mem_offset_in_parent, + ) + + +def from_dr_kernel(d: dr.KernelAttributes) -> Kernel: + assert d.is_jit_evaluator is False + + name = d.name + + class OpaqueArgumentType(Enum): + NdArray = 0 + Texture = 1 + RwTexture = 2 + + tasks = [] + iarg2arg_ty: Dict[int, OpaqueArgumentType] = {} + for task in d.tasks_attribs: + # Collect buffer bindings. + buffer_binds = [] + for buffer_bind in task.buffer_binds: + binding = buffer_bind.binding + iarg = buffer_bind.buffer.root_id + buffer_ty = BufferBindingType(buffer_bind.buffer.type) + buffer_binds += [BufferBinding(binding, iarg, buffer_ty)] + if buffer_ty == BufferBindingType.ExtArr: + iarg2arg_ty[buffer_bind.buffer.root_id] = OpaqueArgumentType.NdArray + elif buffer_ty == BufferBindingType.Root: + pass + elif buffer_ty == BufferBindingType.Args: + pass + elif buffer_ty == BufferBindingType.ListGen: + pass + elif buffer_ty == BufferBindingType.Rets: + pass + elif buffer_ty == BufferBindingType.GlobalTmps: + pass + else: + assert False + + # Collect texture bindings. + texture_binds = [] + for texture_bind in task.texture_binds: + binding = texture_bind.binding + iarg = texture_bind.arg_id + if texture_bind.is_storage: + texture_binds += [TextureBinding(binding, iarg, TextureBindingType.RwTexture)] + iarg2arg_ty[iarg] = OpaqueArgumentType.RwTexture + else: + texture_binds += [TextureBinding(binding, iarg, TextureBindingType.Texture)] + iarg2arg_ty[iarg] = OpaqueArgumentType.Texture + + launch_grid = LaunchGrid(task.advisory_num_threads_per_group, task.advisory_total_num_threads) + + tasks += [ + Task( + task.name, + TaskType(task.task_type), + buffer_binds, + texture_binds, + launch_grid, + ) + ] + + args = [] + for i, arg in enumerate(d.ctx_attribs.arg_attribs_vec_): + assert i == arg.index + ptype = ParameterType(arg.ptype) + if ptype is not None: + if ptype == ParameterType.Scalar: + args += [ArgumentScalar(arg.name, DataType(arg.dtype))] + elif ptype == ParameterType.Ndarray: + args += [ + ArgumentNdArray( + arg.name, + DataType(arg.dtype), + arg.element_shape, + arg.field_dim, + NdArrayAccess(d.ctx_attribs.arr_access[i]), + ) + ] + elif ptype == ParameterType.Texture: + args += [ArgumentTexture(arg.name, arg.field_dim)] + elif ptype == ParameterType.RwTexture: + args += [ArgumentRwTexture(arg.name, ti.Format(arg.format), arg.field_dim)] + else: + assert False + else: + # TODO: Keeping this for BC but feel free to break it if necessary + if arg.is_array: + # Opaque binding types. + binding_ty = iarg2arg_ty[arg.index] + if binding_ty == OpaqueArgumentType.NdArray: + args += [ + ArgumentNdArray( + arg.name, + DataType(arg.dtype), + arg.element_shape, + arg.field_dim, + NdArrayAccess(d.ctx_attribs.arr_access[i]), + ) + ] + elif binding_ty == OpaqueArgumentType.Texture: + args += [ArgumentTexture(arg.name, arg.field_dim)] + elif binding_ty == OpaqueArgumentType.RwTexture: + args += [ArgumentRwTexture(arg.name, ti.Format(arg.format), arg.field_dim)] + else: + assert False + else: + args += [ArgumentScalar(arg.name, DataType(arg.dtype))] + + assert len(d.ctx_attribs.ret_attribs_vec_) <= 1 + if len(d.ctx_attribs.ret_attribs_vec_) != 0: + dtype = d.ctx_attribs.ret_attribs_vec_[0].dtype + rv = ReturnValue(DataType(dtype)) + else: + rv = None + + context = Context(args, rv) + + return Kernel(name, context, tasks) + + +def from_dr_metadata(d: dr.Metadata) -> Metadata: + fields = [from_dr_field(x) for x in d.fields] + kernels = [from_dr_kernel(x) for x in d.kernels] + required_caps = [] + for cap in d.required_caps: + if cap.value == 1: + required_caps += [cap.key] + else: + required_caps += [f"{cap.key}={cap.value}"] + root_buffer_size = d.root_buffer_size + + return Metadata(fields, kernels, required_caps, root_buffer_size) + + +def to_dr_field(f: Field) -> Dict[str, Any]: + raise NotImplementedError() + + +def to_dr_kernel(s: Kernel) -> Dict[str, Any]: + tasks = [] + for task in s.tasks: + buffer_binds = [] + for buffer_bind in task.buffer_binds: + j = { + "binding": buffer_bind.binding, + "buffer": { + "root_id": buffer_bind.iarg, + "type": buffer_bind.buffer_bind_ty.value, + }, + } + buffer_binds += [j] + + texture_binds = [] + for texture_bind in task.texture_binds: + j = { + "arg_id": texture_bind.iarg, + "binding": texture_bind.binding, + "is_storage": texture_bind.texture_bind_ty == TextureBindingType.RwTexture, + } + texture_binds += [j] + + if task.task_ty == TaskType.RangeFor: + range_for_attribs = { + "begin": 0, + "const_begin": True, + "const_end": True, + "end": task.launch_grid.grid_size, + } + else: + range_for_attribs = None + + j = { + "advisory_num_threads_per_group": task.launch_grid.block_size, + "advisory_total_num_threads": task.launch_grid.grid_size, + "buffer_binds": buffer_binds, + "name": task.name, + "range_for_attribs": range_for_attribs, + "task_type": task.task_ty.value, + "texture_binds": texture_binds, + } + tasks += [j] + + args = [] + arg_bytes = 0 + arr_access = [] + arg_offset = 0 + for i, arg in enumerate(s.context.args): + if isinstance(arg, ArgumentNdArray): + j = { + "dtype": arg.dtype.value, + "element_shape": arg.element_shape, + "field_dim": arg.ndim, + "format": ti.Format.unknown, + "index": i, + "is_array": True, + "offset_in_mem": arg_offset, + "stride": 4, + } + args += [j] + arr_access += [arg.access.value] + elif isinstance(arg, ArgumentTexture): + j = { + "dtype": 1, + "element_shape": [], + "field_dim": arg.ndim, + "format": ti.Format.unknown, + "index": i, + "is_array": True, + "offset_in_mem": arg_offset, + "stride": 4, + } + args += [j] + arr_access += [0] + elif isinstance(arg, ArgumentRwTexture): + j = { + "dtype": 1, + "element_shape": [], + "field_dim": arg.ndim, + "format": arg.fmt, + "index": i, + "is_array": True, + "offset_in_mem": arg_offset, + "stride": 4, + } + args += [j] + arr_access += [0] + elif isinstance(arg, ArgumentScalar): + j = { + "dtype": arg.dtype.value, + "element_shape": [], + "field_dim": 0, + "format": ti.Format.unknown, + "index": i, + "is_array": False, + "offset_in_mem": arg_offset, + "stride": get_data_type_size(arg.dtype), + } + args += [j] + arr_access += [0] + else: + assert False + arg_offset += j["stride"] + arg_bytes = max(arg_bytes, j["offset_in_mem"] + j["stride"]) + + rets = [] + ret_bytes = 0 + if s.context.ret is not None: + for i, ret in enumerate([s.context.ret]): + j = { + "dtype": ret.dtype.value, + "element_shape": [], + "field_dim": 0, + "format": ti.Format.unknown, + "index": i, + "is_array": False, + "offset_in_mem": 0, + "stride": get_data_type_size(ret.dtype), + } + rets += [j] + ret_bytes = max(ret_bytes, j["offset_in_mem"] + j["stride"]) + + ctx_attribs = { + "arg_attribs_vec_": args, + "args_bytes_": arg_bytes, + "arr_access": arr_access, + "extra_args_bytes_": 1536, + "ret_attribs_vec_": rets, + "rets_bytes_": ret_bytes, + } + + j = { + "is_jit_evaluator": False, + "ctx_attribs": ctx_attribs, + "name": s.name, + "tasks_attribs": tasks, + } + return j + + +def to_dr_metadata(s: Metadata) -> dr.Metadata: + fields = [to_dr_field(x) for x in s.fields.values()] + kernels = [to_dr_kernel(x) for x in s.kernels.values()] + required_caps = [] + for cap in s.required_caps: + cap = str(cap) + if "=" in cap: + k, v = cap.split("=", maxsplit=1) + j = { + "key": k, + "value": int(v), + } + required_caps += [j] + else: + j = { + "key": cap, + "value": 1, + } + required_caps += [j] + root_buffer_size = s.root_buffer_size + j = { + "fields": fields, + "kernels": kernels, + "required_caps": required_caps, + "root_buffer_size": root_buffer_size, + } + return dr.Metadata(j) + + +class NamedArgument: + def __init__(self, name: str, arg: Argument): + self.name = name + self.arg = arg + + +class Dispatch: + def __init__(self, kernel: Kernel, args: List[NamedArgument]): + self.kernel = kernel + self.args = args + + +class Graph: + def __init__(self, name: str, dispatches: List[Dispatch]): + self.name = name + self.dispatches = dispatches + args = {y.name: y.arg for x in dispatches for y in x.args} + self.args: List[NamedArgument] = [NamedArgument(k, v) for k, v in args.items()] + + +def from_dr_graph(meta: Metadata, j: dr.Graph) -> Graph: + dispatches = [] + for dispatch in j.value.dispatches: + kernel = meta.kernels[dispatch.kernel_name] + args = [] + for i, symbolic_arg in enumerate(dispatch.symbolic_args): + arg = kernel.context.args[i] + args += [NamedArgument(symbolic_arg.name, arg)] + dispatches += [Dispatch(kernel, args)] + return Graph(j.key, dispatches) + + +def to_dr_graph(s: Graph) -> dr.Graph: + dispatches = [] + for dispatch in s.dispatches: + kernel = dispatch.kernel + symbolic_args = [] + for arg in dispatch.args: + if isinstance(arg.arg, ArgumentScalar): + j = { + "dtype_id": arg.arg.dtype.value, + "element_shape": [], + "field_dim": 0, + "name": arg.name, + "num_channels": 0, + "tag": 0, + } + symbolic_args += [j] + elif isinstance(arg.arg, ArgumentNdArray): + j = { + "dtype_id": arg.arg.dtype.value, + "element_shape": arg.arg.element_shape, + "field_dim": arg.arg.ndim, + "name": arg.name, + "num_channels": 0, + "tag": 2, + } + symbolic_args += [j] + elif isinstance(arg.arg, ArgumentTexture): + j = { + "dtype_id": DataType.f32.value, + "element_shape": [], + "field_dim": 0, + "name": arg.name, + "num_channels": 0, + "tag": 3, + } + symbolic_args += [j] + elif isinstance(arg.arg, ArgumentRwTexture): + j = { + "dtype_id": DataType.f32.value, + "element_shape": [], + "field_dim": 0, + "name": arg.name, + "num_channels": 0, + "tag": 4, + } + symbolic_args += [j] + else: + assert False + + j = { + "kernel_name": kernel.name, + "symbolic_args": symbolic_args, + } + dispatches += [j] + + j = { + "key": s.name, + "value": { + "dispatches": dispatches, + }, + } + return dr.Graph(j) diff --git a/local_packages/taichi/aot/module.py b/local_packages/taichi/aot/module.py new file mode 100644 index 0000000..b942d6f --- /dev/null +++ b/local_packages/taichi/aot/module.py @@ -0,0 +1,252 @@ +import datetime +import os +import warnings +from contextlib import contextmanager +from glob import glob +from pathlib import Path, PurePosixPath +from shutil import rmtree +from tempfile import mkdtemp +from zipfile import ZipFile + +from taichi.aot.utils import produce_injected_args, produce_injected_args_from_template +from taichi.lang import impl, kernel_impl +from taichi.lang.field import ScalarField +from taichi.lang.matrix import MatrixField +from taichi.types.annotations import template + +import taichi + + +class KernelTemplate: + def __init__(self, kernel_fn, aot_module): + self._kernel_fn = kernel_fn + self._aot_module = aot_module + + @staticmethod + def keygen(v, key_p, fields): + if isinstance(v, (int, float, bool)): + key_p += "=" + str(v) + "," + return key_p + for ky, val in fields: + if val is v: + key_p += "=" + ky + "," + return key_p + raise RuntimeError( + "Arg type must be of type int/float/boolean" f" or taichi field. Type {str(type(v))}" " is not supported" + ) + + def instantiate(self, **kwargs): + name = self._kernel_fn.__name__ + kernel = self._kernel_fn._primal + assert isinstance(kernel, kernel_impl.Kernel) + injected_args = [] + key_p = "" + anno_index = 0 + template_args = {} + + for index, (key, value) in enumerate(kwargs.items()): + template_args[index] = (key, value) + + for arg in kernel.arguments: + if isinstance(arg.annotation, template): + (k, v) = template_args[anno_index] + key_p += k + key_p = self.keygen(v, key_p, self._aot_module._fields.items()) + injected_args.append(v) + anno_index += 1 + else: + injected_args.append(0) + kernel.ensure_compiled(*injected_args) + self._aot_module._aot_builder.add_kernel_template(name, key_p, kernel.kernel_cpp) + + # kernel AOT + self._aot_module._kernels.append(kernel) + + +class Module: + """An AOT module to save and load Taichi kernels. + + This module serializes the Taichi kernels for a specific arch. The + serialized module can later be loaded to run on that backend, without the + Python environment. + + Example: + Usage:: + + m = ti.aot.Module(ti.metal) + m.add_kernel(foo) + m.add_kernel(bar) + + m.save('/path/to/module') + + # Now the module file '/path/to/module' contains the Metal kernels + # for running ``foo`` and ``bar``. + """ + + def __init__(self, arch=None, caps=None): + """Creates a new AOT module instance + + Args: + arch: Target backend architecture. Default to the one initialized in :func:`~taichi.lang.init` if not specified. + caps (List[str]): Enabled device capabilities. + """ + if caps is None: + caps = [] + curr_arch = impl.current_cfg().arch + if arch is None: + arch = curr_arch + elif arch != curr_arch: + # TODO: we'll support this eventually but not yet... + warnings.warn( + f"AOT compilation to a different arch than the current one is not yet supported, switching to {curr_arch}" + ) + arch = curr_arch + + self._arch = arch + self._kernels = [] + self._fields = {} + rtm = impl.get_runtime() + rtm._finalize_root_fb_for_aot() + self._aot_builder = rtm.prog.make_aot_module_builder(arch, caps) + self._content = [] + + def add_field(self, name, field): + """Add a taichi field to the AOT module. + + Args: + name: name of taichi field + field: taichi field + + Example:: + + >>> a = ti.field(ti.f32, shape=(4,4)) + >>> b = ti.field("something") + >>> + >>> m.add_field(a) + >>> m.add_field(b) + >>> + >>> # Must add in sequence + """ + is_scalar = True + self._fields[name] = field + column_num = 1 + row_num = 1 + if isinstance(field, MatrixField): + is_scalar = False + row_num = field.m + column_num = field.n + else: + assert isinstance(field, ScalarField) + self._aot_builder.add_field( + name, + field.snode.ptr, + is_scalar, + field.dtype, + field.snode.shape, + row_num, + column_num, + ) + + def add_kernel(self, kernel_fn, template_args=None, name=None): + """Add a taichi kernel to the AOT module. + + Args: + kernel_fn (Function): the function decorated by taichi `kernel`. + template_args (Dict[str, Any]): a dict where key is the template + parameter name, and value is the instantiating arg. Note that this + works for both :class:`~taichi.types.template` and for + `:class:`~taichi.types.ndarray`. + name (str): Name to identify this kernel in the module. If not + provided, uses the built-in ``__name__`` attribute of `kernel_fn`. + + """ + kernel_name = name or kernel_fn.__name__ + kernel = kernel_fn._primal + assert isinstance(kernel, kernel_impl.Kernel) + if template_args is not None: + injected_args = produce_injected_args_from_template(kernel, template_args) + else: + injected_args = produce_injected_args(kernel) + kernel.ensure_compiled(*injected_args) + self._aot_builder.add(kernel_name, kernel.kernel_cpp) + + # kernel AOT + self._kernels.append(kernel) + + self._content += ["kernel:" + kernel_name] + + def add_graph(self, name, graph): + self._aot_builder.add_graph(name, graph._compiled_graph) + self._content += ["cgraph:" + name] + + @contextmanager + def add_kernel_template(self, kernel_fn): + """Add a taichi kernel (with template parameters) to the AOT module. + + Args: + kernel_fn (Function): the function decorated by taichi `kernel`. + + Example:: + + >>> @ti.kernel + >>> def bar_tmpl(a: ti.template()): + >>> x = a + >>> # or y = a + >>> # do something with `x` or `y` + >>> + >>> m = ti.aot.Module(arch) + >>> with m.add_kernel_template(bar_tmpl) as kt: + >>> kt.instantiate(a=x) + >>> kt.instantiate(a=y) + >>> + >>> @ti.kernel + >>> def bar_tmpl_multiple_args(a: ti.template(), b: ti.template()) + >>> x = a + >>> y = b + >>> # do something with `x` and `y` + >>> + >>> with m.add_kernel_template(bar_tmpl) as kt: + >>> kt.instantiate(a=x, b=y) + + TODO: + * Support external array + """ + kt = KernelTemplate(kernel_fn, self) + yield kt + + def save(self, filepath): + """ + Args: + filepath (str): path to a folder to store aot files. + """ + filepath = str(PurePosixPath(Path(filepath))) + self._aot_builder.dump(filepath, "") + with open(f"{filepath}/__content__", "w") as f: + f.write("\n".join(self._content)) + with open(f"{filepath}/__version__", "w") as f: + f.write(".".join(str(x) for x in taichi.__version__)) + + def archive(self, filepath: str): + """ + Args: + filepath (str): path to the stored archive of aot artifacts, MUST + end with `.tcm`. + """ + assert filepath.endswith(".tcm"), "AOT module artifact archive must ends with .tcm" + tcm_path = Path(filepath).absolute() + assert tcm_path.parent.exists(), "Output directory doesn't exist" + + temp_dir = mkdtemp(prefix="tcm_") + # Save first as usual. + self.save(temp_dir) + + fixed_time = datetime.datetime(2000, 12, 1).timestamp() + + # Package all artifacts into a zip archive and attach contend data. + with ZipFile(tcm_path, "w") as z: + for path in glob(f"{temp_dir}/*", recursive=True): + os.utime(path, (fixed_time, fixed_time)) + z.write(path, Path.relative_to(Path(path), temp_dir)) + + # Remove cached files + rmtree(temp_dir) diff --git a/local_packages/taichi/aot/utils.py b/local_packages/taichi/aot/utils.py new file mode 100644 index 0000000..8741bd2 --- /dev/null +++ b/local_packages/taichi/aot/utils.py @@ -0,0 +1,149 @@ +from typing import Any + +from taichi.lang._ndarray import ScalarNdarray +from taichi.lang._texture import Texture +from taichi.lang.enums import Format +from taichi.lang.exception import TaichiCompilationError +from taichi.lang.matrix import ( + Matrix, + MatrixNdarray, + MatrixType, + VectorNdarray, + VectorType, +) +from taichi.lang.util import cook_dtype +from taichi.types.annotations import template +from taichi.types.ndarray_type import NdarrayType +from taichi.types.texture_type import RWTextureType, TextureType + +template_types = (NdarrayType, TextureType, template) + + +def check_type_match(lhs, rhs): + if isinstance(lhs, MatrixType) and isinstance(rhs, MatrixType): + return lhs.n == rhs.n and lhs.m == rhs.m and (lhs.dtype == rhs.dtype or lhs.dtype is None or rhs.dtype is None) + if isinstance(lhs, MatrixType) or isinstance(rhs, MatrixType): + return False + + return cook_dtype(lhs) == cook_dtype(rhs) + + +def produce_injected_args_from_template(kernel, template_args): + injected_args = [] + num_template_args = len([arg.annotation for arg in kernel.arguments if isinstance(arg.annotation, template_types)]) + assert num_template_args == len( + template_args + ), f"Need {num_template_args} inputs to instantiate the template parameters, got {len(template_args)}" + for arg in kernel.arguments: + anno = arg.annotation + if isinstance(anno, template_types): + injected_args.append(template_args[arg.name]) + elif isinstance(anno, RWTextureType): + texture_shape = (2,) * anno.num_dimensions + fmt = anno.fmt + injected_args.append(Texture(fmt, texture_shape)) + else: + injected_args.append(0) + return injected_args + + +def produce_injected_args(kernel, symbolic_args=None): + injected_args = [] + for i, arg in enumerate(kernel.arguments): + anno = arg.annotation + if isinstance(anno, NdarrayType): + if symbolic_args is not None: + # TODO: reconstruct dtype to be TensorType from taichi_core instead of the Python ones + element_dim = len(symbolic_args[i].element_shape) + if element_dim == 0 or symbolic_args[i].element_shape == (1,): + dtype = symbolic_args[i].dtype() + elif element_dim == 1: + dtype = VectorType(symbolic_args[i].element_shape[0], symbolic_args[i].dtype()) + elif element_dim == 2: + dtype = MatrixType( + symbolic_args[i].element_shape[0], + symbolic_args[i].element_shape[1], + 2, + symbolic_args[i].dtype(), + ) + else: + raise TaichiCompilationError("Not supported") + ndim = symbolic_args[i].field_dim + else: + ndim = anno.ndim + dtype = anno.dtype + + if anno.ndim is not None and ndim != anno.ndim: + raise TaichiCompilationError( + f"{ndim} from Arg {arg.name} doesn't match kernel's annotated ndim={anno.ndim}" + ) + + if anno.dtype is not None and not check_type_match(dtype, anno.dtype): + raise TaichiCompilationError( + f" Arg {arg.name}'s dtype {dtype.to_string()} doesn't match kernel's annotated dtype={anno.dtype.to_string()}" + ) + + if isinstance(dtype, VectorType): + injected_args.append(VectorNdarray(dtype.n, dtype=dtype.dtype, shape=(2,) * ndim)) + elif isinstance(dtype, MatrixType): + injected_args.append(MatrixNdarray(dtype.n, dtype.m, dtype=dtype.dtype, shape=(2,) * ndim)) + else: + injected_args.append(ScalarNdarray(dtype, (2,) * ndim)) + elif isinstance(anno, RWTextureType): + texture_shape = (2,) * anno.num_dimensions + fmt = anno.fmt + injected_args.append(Texture(fmt, texture_shape)) + elif isinstance(anno, TextureType): + texture_shape = (2,) * anno.num_dimensions + injected_args.append(Texture(Format.rgba8, texture_shape)) + elif isinstance(anno, MatrixType): + if symbolic_args is not None: + symbolic_mat_n = symbolic_args[i].element_shape[0] + symbolic_mat_m = symbolic_args[i].element_shape[1] + + if symbolic_mat_m != anno.m or symbolic_mat_n != anno.n: + raise RuntimeError( + f"Matrix dimension mismatch, expected ({anno.n}, {anno.m}) " + f"but dispatched shape ({symbolic_mat_n}, {symbolic_mat_m})." + ) + injected_args.append(Matrix([0] * anno.n * anno.m, dt=anno.dtype)) + else: + if symbolic_args is not None: + dtype = symbolic_args[i].dtype() + else: + dtype = anno + + if not check_type_match(dtype, anno): + raise TaichiCompilationError( + f" Arg {arg.name}'s dtype {dtype.to_string()} doesn't match kernel's annotated dtype={anno.to_string()}" + ) + # For primitive types, we can just inject a dummy value. + injected_args.append(0) + return injected_args + + +def json_data_model(f): + """ + Decorates a JSON data model. A JSON data model MUST NOT have any member + functions and it MUST be constructible from a JSON object. + + This is merely a marker. + """ + f._is_json_data_model = True + return f + + +def is_json_data_model(cls) -> bool: + return hasattr(cls, "_is_json_data_model") + + +def dump_json_data_model(x: object) -> Any: + if isinstance(x, (int, float, str, bool, type(None))): + return x + if isinstance(x, (list, tuple)): + return [dump_json_data_model(e) for e in x] + if isinstance(x, dict): + return {k: dump_json_data_model(v) for k, v in x.items()} + if is_json_data_model(x): + return {k: dump_json_data_model(v) for k, v in x.__dict__.items() if k != "_is_json_data_model"} + return x diff --git a/local_packages/taichi/assets/Go-Regular.ttf b/local_packages/taichi/assets/Go-Regular.ttf new file mode 100644 index 0000000..8d59fab Binary files /dev/null and b/local_packages/taichi/assets/Go-Regular.ttf differ diff --git a/local_packages/taichi/assets/static/imgs/ti_gallery.png b/local_packages/taichi/assets/static/imgs/ti_gallery.png new file mode 100644 index 0000000..6d49ace Binary files /dev/null and b/local_packages/taichi/assets/static/imgs/ti_gallery.png differ diff --git a/local_packages/taichi/examples/algorithm/circle-packing/circle_packing_image.py b/local_packages/taichi/examples/algorithm/circle-packing/circle_packing_image.py new file mode 100644 index 0000000..e047641 --- /dev/null +++ b/local_packages/taichi/examples/algorithm/circle-packing/circle_packing_image.py @@ -0,0 +1,139 @@ +""" +Given an input image, redraw it with circle packings. +""" + +try: + import cairo + import cv2 +except: + raise ImportError( + "This example depends on opencv and cairo, please run 'pip install opencv-python pycairo' to install." + ) + +import os + +import matplotlib.pyplot as plt +import numpy as np + +import taichi as ti + +ti.init(arch=ti.cpu) + +_internal_scale = 5 # internally we need a large image to paint + + +@ti.dataclass +class Circle: + x: int + y: int + r: int + + +circles = Circle.field() # pylint: disable=no-member +ti.root.dynamic(ti.i, 100000, chunk_size=64).place(circles) + + +def load_image(imgfile): + image = cv2.imread(imgfile) + h, w = image.shape[:2] + image = cv2.resize( + image, + (int(_internal_scale * w), int(_internal_scale * h)), + interpolation=cv2.INTER_AREA, + ) + return cv2.cvtColor(image, cv2.COLOR_BGR2RGB) + + +def get_dist_transform_image(image): + canny = cv2.Canny(image, 100, 200) + edges_inv = 255 - canny + dist_image = cv2.distanceTransform(edges_inv, cv2.DIST_L2, 0) + return dist_image + + +@ti.kernel +def add_new_circles( + filled: ti.types.ndarray(), + dist_image: ti.types.ndarray(), + min_radius: int, + max_radius: int, +) -> int: + H, W = dist_image.shape[0], dist_image.shape[1] + ti.loop_config(serialize=True) + for x in range(min_radius, W - min_radius): + for y in range(min_radius, H - min_radius): + valid = True + if dist_image[y, x] > min_radius: + r = int((dist_image[y, x] + 1) / 2) + r = ti.min(r, max_radius) + if not filled[y, x] and r <= x < W - r and r <= y < H - r: + for ii in range(x - r, x + r + 1): + for jj in range(y - r, y + r + 1): + if (ii - x) ** 2 + (jj - y) ** 2 < (r + 1) ** 2: + if filled[jj, ii]: + valid = False + break + if not valid: + break + + if valid: + circles.append(Circle(x, y, r)) + for ii in range(x - r, x + r + 1): + for jj in range(y - r, y + r + 1): + if (ii - x) ** 2 + (jj - y) ** 2 < (r + 1) ** 2: + filled[jj, ii] = 1 + + return circles.length() + + +def plot_cirlces(image, ctx, n): + for i in range(n): + c = circles[i] + fc = image[c.y, c.x] / 255 + if all(fc < 0.1): + ec = (0.5, 0.5, 0.5) + else: + ec = (0, 0, 0) + ctx.arc(c.x, c.y, c.r, 0, 2 * np.pi) + ctx.set_source_rgb(*fc) + ctx.fill_preserve() + ctx.set_source_rgba(*ec) + ctx.stroke() + + +def process(imgfile, scale): + image = load_image(imgfile) + dist_image = get_dist_transform_image(image) + image = cv2.GaussianBlur(image, (5, 5), 0) + H, W = image.shape[:2] + surface = cairo.ImageSurface(cairo.FORMAT_RGB24, W, H) + ctx = cairo.Context(surface) + ctx.set_source_rgb(0, 0, 0) + ctx.paint() + + filled = np.zeros([H, W], dtype=np.int32) + R = [150, 120, 100, 80, 50, 30, 25, 20, 15, 10, 7, 5, 3, 2] + for i in range(1, len(R)): + n = add_new_circles(filled, dist_image, R[i], R[i - 1]) + + ctx.set_line_width(1) + plot_cirlces(image, ctx, n) + data = surface.get_data() + result = np.frombuffer(data, dtype=np.uint8).reshape(H, W, 4) + w = int(W / _internal_scale * scale) + h = int(H / _internal_scale * scale) + result = cv2.resize(result, (w, h), interpolation=cv2.INTER_CUBIC) + plt.tight_layout() + plt.axis("off") + plt.imshow(result) + plt.show() + + +def main(imgfile=None, scale=2): + if imgfile is None: + imgfile = os.path.join(os.path.dirname(os.path.realpath(__file__)), "taichi_logo.png") + process(imgfile, scale) + + +if __name__ == "__main__": + main() diff --git a/local_packages/taichi/examples/algorithm/circle-packing/taichi_logo.png b/local_packages/taichi/examples/algorithm/circle-packing/taichi_logo.png new file mode 100644 index 0000000..ba98df7 Binary files /dev/null and b/local_packages/taichi/examples/algorithm/circle-packing/taichi_logo.png differ diff --git a/local_packages/taichi/examples/algorithm/laplace.py b/local_packages/taichi/examples/algorithm/laplace.py new file mode 100644 index 0000000..6b8bb9f --- /dev/null +++ b/local_packages/taichi/examples/algorithm/laplace.py @@ -0,0 +1,32 @@ +import taichi as ti + +ti.init(arch=ti.cpu) + +N = 16 + +x, y = ti.field(ti.f32), ti.field(ti.f32) + +ti.root.dense(ti.ij, N).place(x, y) + + +@ti.kernel +def laplace(): + for i, j in x: + if (i + j) % 3 == 0 and 1 <= i < N - 1 and 1 <= j < N - 1: + y[i, j] = 4.0 * x[i, j] - x[i - 1, j] - x[i + 1, j] - x[i, j - 1] - x[i, j + 1] + else: + y[i, j] = 0.0 + + +def main(): + for i in range(10): + x[i, i + 1] = 1.0 + + laplace() + + for i in range(10): + print(y[i, i + 1]) + + +if __name__ == "__main__": + main() diff --git a/local_packages/taichi/examples/algorithm/marching_squares.py b/local_packages/taichi/examples/algorithm/marching_squares.py new file mode 100644 index 0000000..52b2bd7 --- /dev/null +++ b/local_packages/taichi/examples/algorithm/marching_squares.py @@ -0,0 +1,183 @@ +""" +Marching squares algorithm in Taichi. +See "https://en.wikipedia.org/wiki/Marching_squares" +""" + +import time + +import numpy as np + +import taichi as ti +import taichi.math as tm + +ti.init(arch=ti.cpu) + +W, H = 800, 600 +resolution = (W, H) +grid_size = 8 +level = 0.15 # Draw contour of isofunc=level +pixels = ti.Vector.field(3, float, shape=resolution) + +# Cases 0-15 in the wikipedia page +_edges_np = np.array( + [ + [[-1, -1], [-1, -1]], + [[3, 0], [-1, -1]], + [[0, 1], [-1, -1]], + [[1, 3], [-1, -1]], + [[1, 2], [-1, -1]], + [[0, 1], [2, 3]], + [[0, 2], [-1, -1]], + [[2, 3], [-1, -1]], + [[2, 3], [-1, -1]], + [[0, 2], [-1, -1]], + [[0, 3], [1, 2]], + [[1, 2], [-1, -1]], + [[1, 3], [-1, -1]], + [[0, 1], [-1, -1]], + [[3, 0], [-1, -1]], + [[-1, -1], [-1, -1]], + ], + dtype=np.int32, +) +edge_table = ti.Matrix.field(2, 2, int, 16) +edge_table.from_numpy(_edges_np) + +Edge = ti.types.struct(p0=tm.vec2, p1=tm.vec2) +edges = Edge.field() +ti.root.dynamic(ti.i, 1024, chunk_size=32).place(edges) +iTime = ti.field(float, shape=()) + + +@ti.func +def hash22(p): + n = tm.sin(tm.dot(p, tm.vec2(41, 289))) + p = tm.fract(tm.vec2(262144, 32768) * n) + return tm.sin(p * 6.28 + iTime[None]) + + +@ti.func +def noise(p): + ip = tm.floor(p) + p -= ip + v = tm.vec4( + tm.dot(hash22(ip), p), + tm.dot(hash22(ip + tm.vec2(1, 0)), p - tm.vec2(1, 0)), + tm.dot(hash22(ip + tm.vec2(0, 1)), p - tm.vec2(0, 1)), + tm.dot(hash22(ip + tm.vec2(1, 1)), p - tm.vec2(1, 1)), + ) + p = p * p * p * (p * (p * 6 - 15) + 10) + return tm.mix(tm.mix(v.x, v.y, p.x), tm.mix(v.z, v.w, p.x), p.y) + + +@ti.func +def isofunc(p): + return noise(p / 4 + 1) + + +@ti.func +def interp(p1: tm.vec2, p2: tm.vec2, v1: float, v2: float, isovalue: float) -> tm.vec2: + return tm.mix(p1, p2, (isovalue - v1) / (v2 - v1)) + + +@ti.func +def get_vertex(vertex_id, values, isovalue): + v = tm.vec2(0) + square = [tm.vec2(0), tm.vec2(1, 0), tm.vec2(1, 1), tm.vec2(0, 1)] + if vertex_id == 0: + v = interp(square[0], square[1], values.x, values.y, isovalue) + elif vertex_id == 1: + v = interp(square[1], square[2], values.y, values.z, isovalue) + elif vertex_id == 2: + v = interp(square[2], square[3], values.z, values.w, isovalue) + else: + v = interp(square[3], square[0], values.w, values.x, isovalue) + return v + + +@ti.kernel +def march_squares() -> int: + edges.deactivate() + x_range = tm.ceil(grid_size * W / H, int) + y_range = grid_size + for i, j in ti.ndrange((-x_range, x_range + 1), (-y_range, y_range + 1)): + case_id = 0 + values = tm.vec4( + isofunc(tm.vec2(i, j)), + isofunc(tm.vec2(i + 1, j)), + isofunc(tm.vec2(i + 1, j + 1)), + isofunc(tm.vec2(i, j + 1)), + ) + if values.x > level: + case_id |= 1 + if values.y > level: + case_id |= 2 + if values.z > level: + case_id |= 4 + if values.w > level: + case_id |= 8 + + # Fix the ambiguity for case 5 and 10 + if case_id == 5 or case_id == 10: + center = isofunc(tm.vec2(i + 0.5, j + 0.5)) + if center < level: # A valley, switch the case_id + case_id = 15 - case_id + + for k in ti.static(range(2)): + if edge_table[case_id][k, 0] != -1: + ind1 = edge_table[case_id][k, 0] + ind2 = edge_table[case_id][k, 1] + p0 = tm.vec2(i, j) + get_vertex(ind1, values, level) + p1 = tm.vec2(i, j) + get_vertex(ind2, values, level) + edges.append(Edge(p0, p1)) + + return edges.length() + + +@ti.func +def dseg(p, a, b): + p -= a + b -= a + h = tm.clamp(tm.dot(p, b) / tm.dot(b, b), 0, 1) + return tm.length(p - h * b) + + +@ti.kernel +def render(): + for i, j in pixels: + p = (2 * tm.vec2(i, j) - tm.vec2(resolution)) / H + p *= grid_size + p.y += 1.2 + + q = tm.fract(p) - 0.5 + d2 = 0.5 - abs(q) + dgrid = ti.min(d2.x, d2.y) + dedge = 1e5 + dv = 1e5 + for k in range(edges.length()): + s, e = edges[k].p0, edges[k].p1 + dedge = ti.min(dedge, dseg(p, s, e)) + dv = ti.min(dv, tm.length(p - s), tm.length(p - e)) + + col = tm.vec3(0.3, 0.6, 0.8) + if isofunc(p) > level: + col = tm.vec3(1, 0.8, 0.3) + + # Draw background grid + col = tm.mix(col, tm.vec3(0), 1 - tm.smoothstep(0, 0.04, dgrid - 0.02)) + # Draw edges + col = tm.mix(col, tm.vec3(1, 0, 0), 1 - tm.smoothstep(0, 0.05, dedge - 0.02)) + # Draw small circles at the vertices + col = tm.mix(col, tm.vec3(0), 1 - tm.smoothstep(0, 0.05, dv - 0.1)) + col = tm.mix(col, tm.vec3(1), 1 - tm.smoothstep(0, 0.05, dv - 0.08)) + pixels[i, j] = tm.sqrt(tm.clamp(col, 0, 1)) + + +t0 = time.perf_counter() +gui = ti.GUI("2D Marching Squares", res=resolution, fast_gui=True) +while gui.running and not gui.get_event(gui.ESCAPE): + iTime[None] = time.perf_counter() - t0 + march_squares() + render() + gui.set_image(pixels) + gui.show() diff --git a/local_packages/taichi/examples/algorithm/mciso_advanced.py b/local_packages/taichi/examples/algorithm/mciso_advanced.py new file mode 100644 index 0000000..7180949 --- /dev/null +++ b/local_packages/taichi/examples/algorithm/mciso_advanced.py @@ -0,0 +1,486 @@ +import numpy as np + +import taichi as ti + + +@ti.data_oriented +class MCISO: + et2 = np.array( + [ + [[-1, -1], [-1, -1]], # + [[0, 1], [-1, -1]], # a + [[0, 2], [-1, -1]], # b + [[1, 2], [-1, -1]], # ab + [[1, 3], [-1, -1]], # c + [[0, 3], [-1, -1]], # ca + [[2, 3], [0, 1]], # cb + [[2, 3], [-1, -1]], # cab + [[2, 3], [-1, -1]], # d + [[2, 3], [0, 1]], # da + [[0, 3], [-1, -1]], # db + [[1, 3], [-1, -1]], # dab + [[1, 2], [-1, -1]], # dc + [[0, 2], [-1, -1]], # dca + [[0, 1], [-1, -1]], # dcb + [[-1, -1], [-1, -1]], # dcab + ], + np.int32, + ) + et3 = np.array( + [ # {{{ + # https://www.cnblogs.com/shushen/p/5542131.html + [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], + [0, 8, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], + [0, 1, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], + [1, 8, 3, 9, 8, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], + [1, 2, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], + [0, 8, 3, 1, 2, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], + [9, 2, 10, 0, 2, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], + [2, 8, 3, 2, 10, 8, 10, 9, 8, -1, -1, -1, -1, -1, -1, -1], + [3, 11, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], + [0, 11, 2, 8, 11, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], + [1, 9, 0, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], + [1, 11, 2, 1, 9, 11, 9, 8, 11, -1, -1, -1, -1, -1, -1, -1], + [3, 10, 1, 11, 10, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], + [0, 10, 1, 0, 8, 10, 8, 11, 10, -1, -1, -1, -1, -1, -1, -1], + [3, 9, 0, 3, 11, 9, 11, 10, 9, -1, -1, -1, -1, -1, -1, -1], + [9, 8, 10, 10, 8, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], + [4, 7, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], + [4, 3, 0, 7, 3, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], + [0, 1, 9, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], + [4, 1, 9, 4, 7, 1, 7, 3, 1, -1, -1, -1, -1, -1, -1, -1], + [1, 2, 10, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], + [3, 4, 7, 3, 0, 4, 1, 2, 10, -1, -1, -1, -1, -1, -1, -1], + [9, 2, 10, 9, 0, 2, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1], + [2, 10, 9, 2, 9, 7, 2, 7, 3, 7, 9, 4, -1, -1, -1, -1], + [8, 4, 7, 3, 11, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], + [11, 4, 7, 11, 2, 4, 2, 0, 4, -1, -1, -1, -1, -1, -1, -1], + [9, 0, 1, 8, 4, 7, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1], + [4, 7, 11, 9, 4, 11, 9, 11, 2, 9, 2, 1, -1, -1, -1, -1], + [3, 10, 1, 3, 11, 10, 7, 8, 4, -1, -1, -1, -1, -1, -1, -1], + [1, 11, 10, 1, 4, 11, 1, 0, 4, 7, 11, 4, -1, -1, -1, -1], + [4, 7, 8, 9, 0, 11, 9, 11, 10, 11, 0, 3, -1, -1, -1, -1], + [4, 7, 11, 4, 11, 9, 9, 11, 10, -1, -1, -1, -1, -1, -1, -1], + [9, 5, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], + [9, 5, 4, 0, 8, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], + [0, 5, 4, 1, 5, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], + [8, 5, 4, 8, 3, 5, 3, 1, 5, -1, -1, -1, -1, -1, -1, -1], + [1, 2, 10, 9, 5, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], + [3, 0, 8, 1, 2, 10, 4, 9, 5, -1, -1, -1, -1, -1, -1, -1], + [5, 2, 10, 5, 4, 2, 4, 0, 2, -1, -1, -1, -1, -1, -1, -1], + [2, 10, 5, 3, 2, 5, 3, 5, 4, 3, 4, 8, -1, -1, -1, -1], + [9, 5, 4, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], + [0, 11, 2, 0, 8, 11, 4, 9, 5, -1, -1, -1, -1, -1, -1, -1], + [0, 5, 4, 0, 1, 5, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1], + [2, 1, 5, 2, 5, 8, 2, 8, 11, 4, 8, 5, -1, -1, -1, -1], + [10, 3, 11, 10, 1, 3, 9, 5, 4, -1, -1, -1, -1, -1, -1, -1], + [4, 9, 5, 0, 8, 1, 8, 10, 1, 8, 11, 10, -1, -1, -1, -1], + [5, 4, 0, 5, 0, 11, 5, 11, 10, 11, 0, 3, -1, -1, -1, -1], + [5, 4, 8, 5, 8, 10, 10, 8, 11, -1, -1, -1, -1, -1, -1, -1], + [9, 7, 8, 5, 7, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], + [9, 3, 0, 9, 5, 3, 5, 7, 3, -1, -1, -1, -1, -1, -1, -1], + [0, 7, 8, 0, 1, 7, 1, 5, 7, -1, -1, -1, -1, -1, -1, -1], + [1, 5, 3, 3, 5, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], + [9, 7, 8, 9, 5, 7, 10, 1, 2, -1, -1, -1, -1, -1, -1, -1], + [10, 1, 2, 9, 5, 0, 5, 3, 0, 5, 7, 3, -1, -1, -1, -1], + [8, 0, 2, 8, 2, 5, 8, 5, 7, 10, 5, 2, -1, -1, -1, -1], + [2, 10, 5, 2, 5, 3, 3, 5, 7, -1, -1, -1, -1, -1, -1, -1], + [7, 9, 5, 7, 8, 9, 3, 11, 2, -1, -1, -1, -1, -1, -1, -1], + [9, 5, 7, 9, 7, 2, 9, 2, 0, 2, 7, 11, -1, -1, -1, -1], + [2, 3, 11, 0, 1, 8, 1, 7, 8, 1, 5, 7, -1, -1, -1, -1], + [11, 2, 1, 11, 1, 7, 7, 1, 5, -1, -1, -1, -1, -1, -1, -1], + [9, 5, 8, 8, 5, 7, 10, 1, 3, 10, 3, 11, -1, -1, -1, -1], + [5, 7, 0, 5, 0, 9, 7, 11, 0, 1, 0, 10, 11, 10, 0, -1], + [11, 10, 0, 11, 0, 3, 10, 5, 0, 8, 0, 7, 5, 7, 0, -1], + [11, 10, 5, 7, 11, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], + [10, 6, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], + [0, 8, 3, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], + [9, 0, 1, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], + [1, 8, 3, 1, 9, 8, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1], + [1, 6, 5, 2, 6, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], + [1, 6, 5, 1, 2, 6, 3, 0, 8, -1, -1, -1, -1, -1, -1, -1], + [9, 6, 5, 9, 0, 6, 0, 2, 6, -1, -1, -1, -1, -1, -1, -1], + [5, 9, 8, 5, 8, 2, 5, 2, 6, 3, 2, 8, -1, -1, -1, -1], + [2, 3, 11, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], + [11, 0, 8, 11, 2, 0, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1], + [0, 1, 9, 2, 3, 11, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1], + [5, 10, 6, 1, 9, 2, 9, 11, 2, 9, 8, 11, -1, -1, -1, -1], + [6, 3, 11, 6, 5, 3, 5, 1, 3, -1, -1, -1, -1, -1, -1, -1], + [0, 8, 11, 0, 11, 5, 0, 5, 1, 5, 11, 6, -1, -1, -1, -1], + [3, 11, 6, 0, 3, 6, 0, 6, 5, 0, 5, 9, -1, -1, -1, -1], + [6, 5, 9, 6, 9, 11, 11, 9, 8, -1, -1, -1, -1, -1, -1, -1], + [5, 10, 6, 4, 7, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], + [4, 3, 0, 4, 7, 3, 6, 5, 10, -1, -1, -1, -1, -1, -1, -1], + [1, 9, 0, 5, 10, 6, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1], + [10, 6, 5, 1, 9, 7, 1, 7, 3, 7, 9, 4, -1, -1, -1, -1], + [6, 1, 2, 6, 5, 1, 4, 7, 8, -1, -1, -1, -1, -1, -1, -1], + [1, 2, 5, 5, 2, 6, 3, 0, 4, 3, 4, 7, -1, -1, -1, -1], + [8, 4, 7, 9, 0, 5, 0, 6, 5, 0, 2, 6, -1, -1, -1, -1], + [7, 3, 9, 7, 9, 4, 3, 2, 9, 5, 9, 6, 2, 6, 9, -1], + [3, 11, 2, 7, 8, 4, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1], + [5, 10, 6, 4, 7, 2, 4, 2, 0, 2, 7, 11, -1, -1, -1, -1], + [0, 1, 9, 4, 7, 8, 2, 3, 11, 5, 10, 6, -1, -1, -1, -1], + [9, 2, 1, 9, 11, 2, 9, 4, 11, 7, 11, 4, 5, 10, 6, -1], + [8, 4, 7, 3, 11, 5, 3, 5, 1, 5, 11, 6, -1, -1, -1, -1], + [5, 1, 11, 5, 11, 6, 1, 0, 11, 7, 11, 4, 0, 4, 11, -1], + [0, 5, 9, 0, 6, 5, 0, 3, 6, 11, 6, 3, 8, 4, 7, -1], + [6, 5, 9, 6, 9, 11, 4, 7, 9, 7, 11, 9, -1, -1, -1, -1], + [10, 4, 9, 6, 4, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], + [4, 10, 6, 4, 9, 10, 0, 8, 3, -1, -1, -1, -1, -1, -1, -1], + [10, 0, 1, 10, 6, 0, 6, 4, 0, -1, -1, -1, -1, -1, -1, -1], + [8, 3, 1, 8, 1, 6, 8, 6, 4, 6, 1, 10, -1, -1, -1, -1], + [1, 4, 9, 1, 2, 4, 2, 6, 4, -1, -1, -1, -1, -1, -1, -1], + [3, 0, 8, 1, 2, 9, 2, 4, 9, 2, 6, 4, -1, -1, -1, -1], + [0, 2, 4, 4, 2, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], + [8, 3, 2, 8, 2, 4, 4, 2, 6, -1, -1, -1, -1, -1, -1, -1], + [10, 4, 9, 10, 6, 4, 11, 2, 3, -1, -1, -1, -1, -1, -1, -1], + [0, 8, 2, 2, 8, 11, 4, 9, 10, 4, 10, 6, -1, -1, -1, -1], + [3, 11, 2, 0, 1, 6, 0, 6, 4, 6, 1, 10, -1, -1, -1, -1], + [6, 4, 1, 6, 1, 10, 4, 8, 1, 2, 1, 11, 8, 11, 1, -1], + [9, 6, 4, 9, 3, 6, 9, 1, 3, 11, 6, 3, -1, -1, -1, -1], + [8, 11, 1, 8, 1, 0, 11, 6, 1, 9, 1, 4, 6, 4, 1, -1], + [3, 11, 6, 3, 6, 0, 0, 6, 4, -1, -1, -1, -1, -1, -1, -1], + [6, 4, 8, 11, 6, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], + [7, 10, 6, 7, 8, 10, 8, 9, 10, -1, -1, -1, -1, -1, -1, -1], + [0, 7, 3, 0, 10, 7, 0, 9, 10, 6, 7, 10, -1, -1, -1, -1], + [10, 6, 7, 1, 10, 7, 1, 7, 8, 1, 8, 0, -1, -1, -1, -1], + [10, 6, 7, 10, 7, 1, 1, 7, 3, -1, -1, -1, -1, -1, -1, -1], + [1, 2, 6, 1, 6, 8, 1, 8, 9, 8, 6, 7, -1, -1, -1, -1], + [2, 6, 9, 2, 9, 1, 6, 7, 9, 0, 9, 3, 7, 3, 9, -1], + [7, 8, 0, 7, 0, 6, 6, 0, 2, -1, -1, -1, -1, -1, -1, -1], + [7, 3, 2, 6, 7, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], + [2, 3, 11, 10, 6, 8, 10, 8, 9, 8, 6, 7, -1, -1, -1, -1], + [2, 0, 7, 2, 7, 11, 0, 9, 7, 6, 7, 10, 9, 10, 7, -1], + [1, 8, 0, 1, 7, 8, 1, 10, 7, 6, 7, 10, 2, 3, 11, -1], + [11, 2, 1, 11, 1, 7, 10, 6, 1, 6, 7, 1, -1, -1, -1, -1], + [8, 9, 6, 8, 6, 7, 9, 1, 6, 11, 6, 3, 1, 3, 6, -1], + [0, 9, 1, 11, 6, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], + [7, 8, 0, 7, 0, 6, 3, 11, 0, 11, 6, 0, -1, -1, -1, -1], + [7, 11, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], + [7, 6, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], + [3, 0, 8, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], + [0, 1, 9, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], + [8, 1, 9, 8, 3, 1, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1], + [10, 1, 2, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], + [1, 2, 10, 3, 0, 8, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1], + [2, 9, 0, 2, 10, 9, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1], + [6, 11, 7, 2, 10, 3, 10, 8, 3, 10, 9, 8, -1, -1, -1, -1], + [7, 2, 3, 6, 2, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], + [7, 0, 8, 7, 6, 0, 6, 2, 0, -1, -1, -1, -1, -1, -1, -1], + [2, 7, 6, 2, 3, 7, 0, 1, 9, -1, -1, -1, -1, -1, -1, -1], + [1, 6, 2, 1, 8, 6, 1, 9, 8, 8, 7, 6, -1, -1, -1, -1], + [10, 7, 6, 10, 1, 7, 1, 3, 7, -1, -1, -1, -1, -1, -1, -1], + [10, 7, 6, 1, 7, 10, 1, 8, 7, 1, 0, 8, -1, -1, -1, -1], + [0, 3, 7, 0, 7, 10, 0, 10, 9, 6, 10, 7, -1, -1, -1, -1], + [7, 6, 10, 7, 10, 8, 8, 10, 9, -1, -1, -1, -1, -1, -1, -1], + [6, 8, 4, 11, 8, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], + [3, 6, 11, 3, 0, 6, 0, 4, 6, -1, -1, -1, -1, -1, -1, -1], + [8, 6, 11, 8, 4, 6, 9, 0, 1, -1, -1, -1, -1, -1, -1, -1], + [9, 4, 6, 9, 6, 3, 9, 3, 1, 11, 3, 6, -1, -1, -1, -1], + [6, 8, 4, 6, 11, 8, 2, 10, 1, -1, -1, -1, -1, -1, -1, -1], + [1, 2, 10, 3, 0, 11, 0, 6, 11, 0, 4, 6, -1, -1, -1, -1], + [4, 11, 8, 4, 6, 11, 0, 2, 9, 2, 10, 9, -1, -1, -1, -1], + [10, 9, 3, 10, 3, 2, 9, 4, 3, 11, 3, 6, 4, 6, 3, -1], + [8, 2, 3, 8, 4, 2, 4, 6, 2, -1, -1, -1, -1, -1, -1, -1], + [0, 4, 2, 4, 6, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], + [1, 9, 0, 2, 3, 4, 2, 4, 6, 4, 3, 8, -1, -1, -1, -1], + [1, 9, 4, 1, 4, 2, 2, 4, 6, -1, -1, -1, -1, -1, -1, -1], + [8, 1, 3, 8, 6, 1, 8, 4, 6, 6, 10, 1, -1, -1, -1, -1], + [10, 1, 0, 10, 0, 6, 6, 0, 4, -1, -1, -1, -1, -1, -1, -1], + [4, 6, 3, 4, 3, 8, 6, 10, 3, 0, 3, 9, 10, 9, 3, -1], + [10, 9, 4, 6, 10, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], + [4, 9, 5, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], + [0, 8, 3, 4, 9, 5, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1], + [5, 0, 1, 5, 4, 0, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1], + [11, 7, 6, 8, 3, 4, 3, 5, 4, 3, 1, 5, -1, -1, -1, -1], + [9, 5, 4, 10, 1, 2, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1], + [6, 11, 7, 1, 2, 10, 0, 8, 3, 4, 9, 5, -1, -1, -1, -1], + [7, 6, 11, 5, 4, 10, 4, 2, 10, 4, 0, 2, -1, -1, -1, -1], + [3, 4, 8, 3, 5, 4, 3, 2, 5, 10, 5, 2, 11, 7, 6, -1], + [7, 2, 3, 7, 6, 2, 5, 4, 9, -1, -1, -1, -1, -1, -1, -1], + [9, 5, 4, 0, 8, 6, 0, 6, 2, 6, 8, 7, -1, -1, -1, -1], + [3, 6, 2, 3, 7, 6, 1, 5, 0, 5, 4, 0, -1, -1, -1, -1], + [6, 2, 8, 6, 8, 7, 2, 1, 8, 4, 8, 5, 1, 5, 8, -1], + [9, 5, 4, 10, 1, 6, 1, 7, 6, 1, 3, 7, -1, -1, -1, -1], + [1, 6, 10, 1, 7, 6, 1, 0, 7, 8, 7, 0, 9, 5, 4, -1], + [4, 0, 10, 4, 10, 5, 0, 3, 10, 6, 10, 7, 3, 7, 10, -1], + [7, 6, 10, 7, 10, 8, 5, 4, 10, 4, 8, 10, -1, -1, -1, -1], + [6, 9, 5, 6, 11, 9, 11, 8, 9, -1, -1, -1, -1, -1, -1, -1], + [3, 6, 11, 0, 6, 3, 0, 5, 6, 0, 9, 5, -1, -1, -1, -1], + [0, 11, 8, 0, 5, 11, 0, 1, 5, 5, 6, 11, -1, -1, -1, -1], + [6, 11, 3, 6, 3, 5, 5, 3, 1, -1, -1, -1, -1, -1, -1, -1], + [1, 2, 10, 9, 5, 11, 9, 11, 8, 11, 5, 6, -1, -1, -1, -1], + [0, 11, 3, 0, 6, 11, 0, 9, 6, 5, 6, 9, 1, 2, 10, -1], + [11, 8, 5, 11, 5, 6, 8, 0, 5, 10, 5, 2, 0, 2, 5, -1], + [6, 11, 3, 6, 3, 5, 2, 10, 3, 10, 5, 3, -1, -1, -1, -1], + [5, 8, 9, 5, 2, 8, 5, 6, 2, 3, 8, 2, -1, -1, -1, -1], + [9, 5, 6, 9, 6, 0, 0, 6, 2, -1, -1, -1, -1, -1, -1, -1], + [1, 5, 8, 1, 8, 0, 5, 6, 8, 3, 8, 2, 6, 2, 8, -1], + [1, 5, 6, 2, 1, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], + [1, 3, 6, 1, 6, 10, 3, 8, 6, 5, 6, 9, 8, 9, 6, -1], + [10, 1, 0, 10, 0, 6, 9, 5, 0, 5, 6, 0, -1, -1, -1, -1], + [0, 3, 8, 5, 6, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], + [10, 5, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], + [11, 5, 10, 7, 5, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], + [11, 5, 10, 11, 7, 5, 8, 3, 0, -1, -1, -1, -1, -1, -1, -1], + [5, 11, 7, 5, 10, 11, 1, 9, 0, -1, -1, -1, -1, -1, -1, -1], + [10, 7, 5, 10, 11, 7, 9, 8, 1, 8, 3, 1, -1, -1, -1, -1], + [11, 1, 2, 11, 7, 1, 7, 5, 1, -1, -1, -1, -1, -1, -1, -1], + [0, 8, 3, 1, 2, 7, 1, 7, 5, 7, 2, 11, -1, -1, -1, -1], + [9, 7, 5, 9, 2, 7, 9, 0, 2, 2, 11, 7, -1, -1, -1, -1], + [7, 5, 2, 7, 2, 11, 5, 9, 2, 3, 2, 8, 9, 8, 2, -1], + [2, 5, 10, 2, 3, 5, 3, 7, 5, -1, -1, -1, -1, -1, -1, -1], + [8, 2, 0, 8, 5, 2, 8, 7, 5, 10, 2, 5, -1, -1, -1, -1], + [9, 0, 1, 5, 10, 3, 5, 3, 7, 3, 10, 2, -1, -1, -1, -1], + [9, 8, 2, 9, 2, 1, 8, 7, 2, 10, 2, 5, 7, 5, 2, -1], + [1, 3, 5, 3, 7, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], + [0, 8, 7, 0, 7, 1, 1, 7, 5, -1, -1, -1, -1, -1, -1, -1], + [9, 0, 3, 9, 3, 5, 5, 3, 7, -1, -1, -1, -1, -1, -1, -1], + [9, 8, 7, 5, 9, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], + [5, 8, 4, 5, 10, 8, 10, 11, 8, -1, -1, -1, -1, -1, -1, -1], + [5, 0, 4, 5, 11, 0, 5, 10, 11, 11, 3, 0, -1, -1, -1, -1], + [0, 1, 9, 8, 4, 10, 8, 10, 11, 10, 4, 5, -1, -1, -1, -1], + [10, 11, 4, 10, 4, 5, 11, 3, 4, 9, 4, 1, 3, 1, 4, -1], + [2, 5, 1, 2, 8, 5, 2, 11, 8, 4, 5, 8, -1, -1, -1, -1], + [0, 4, 11, 0, 11, 3, 4, 5, 11, 2, 11, 1, 5, 1, 11, -1], + [0, 2, 5, 0, 5, 9, 2, 11, 5, 4, 5, 8, 11, 8, 5, -1], + [9, 4, 5, 2, 11, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], + [2, 5, 10, 3, 5, 2, 3, 4, 5, 3, 8, 4, -1, -1, -1, -1], + [5, 10, 2, 5, 2, 4, 4, 2, 0, -1, -1, -1, -1, -1, -1, -1], + [3, 10, 2, 3, 5, 10, 3, 8, 5, 4, 5, 8, 0, 1, 9, -1], + [5, 10, 2, 5, 2, 4, 1, 9, 2, 9, 4, 2, -1, -1, -1, -1], + [8, 4, 5, 8, 5, 3, 3, 5, 1, -1, -1, -1, -1, -1, -1, -1], + [0, 4, 5, 1, 0, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], + [8, 4, 5, 8, 5, 3, 9, 0, 5, 0, 3, 5, -1, -1, -1, -1], + [9, 4, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], + [4, 11, 7, 4, 9, 11, 9, 10, 11, -1, -1, -1, -1, -1, -1, -1], + [0, 8, 3, 4, 9, 7, 9, 11, 7, 9, 10, 11, -1, -1, -1, -1], + [1, 10, 11, 1, 11, 4, 1, 4, 0, 7, 4, 11, -1, -1, -1, -1], + [3, 1, 4, 3, 4, 8, 1, 10, 4, 7, 4, 11, 10, 11, 4, -1], + [4, 11, 7, 9, 11, 4, 9, 2, 11, 9, 1, 2, -1, -1, -1, -1], + [9, 7, 4, 9, 11, 7, 9, 1, 11, 2, 11, 1, 0, 8, 3, -1], + [11, 7, 4, 11, 4, 2, 2, 4, 0, -1, -1, -1, -1, -1, -1, -1], + [11, 7, 4, 11, 4, 2, 8, 3, 4, 3, 2, 4, -1, -1, -1, -1], + [2, 9, 10, 2, 7, 9, 2, 3, 7, 7, 4, 9, -1, -1, -1, -1], + [9, 10, 7, 9, 7, 4, 10, 2, 7, 8, 7, 0, 2, 0, 7, -1], + [3, 7, 10, 3, 10, 2, 7, 4, 10, 1, 10, 0, 4, 0, 10, -1], + [1, 10, 2, 8, 7, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], + [4, 9, 1, 4, 1, 7, 7, 1, 3, -1, -1, -1, -1, -1, -1, -1], + [4, 9, 1, 4, 1, 7, 0, 8, 1, 8, 7, 1, -1, -1, -1, -1], + [4, 0, 3, 7, 4, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], + [4, 8, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], + [9, 10, 8, 10, 11, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], + [3, 0, 9, 3, 9, 11, 11, 9, 10, -1, -1, -1, -1, -1, -1, -1], + [0, 1, 10, 0, 10, 8, 8, 10, 11, -1, -1, -1, -1, -1, -1, -1], + [3, 1, 10, 11, 3, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], + [1, 2, 11, 1, 11, 9, 9, 11, 8, -1, -1, -1, -1, -1, -1, -1], + [3, 0, 9, 3, 9, 11, 1, 2, 9, 2, 11, 9, -1, -1, -1, -1], + [0, 2, 11, 8, 0, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], + [3, 2, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], + [2, 3, 8, 2, 8, 10, 10, 8, 9, -1, -1, -1, -1, -1, -1, -1], + [9, 10, 2, 0, 9, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], + [2, 3, 8, 2, 8, 10, 0, 1, 8, 1, 10, 8, -1, -1, -1, -1], + [1, 10, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], + [1, 3, 8, 9, 1, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], + [0, 9, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], + [0, 3, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], + [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], + ], + np.int32, + )[:, :15].reshape( + 256, 5, 3 + ) # }}} + + def __init__(self, N=64, dim=3, blk_size=None): + self.N = N + self.dim = dim + self.use_sparse = blk_size is not None + self.blk_size = blk_size + + self.m = ti.field(float) # field to sample + self.g = ti.Vector.field(self.dim, float) # normalized gradient + indices = [ti.ij, ti.ijk][dim - 2] + if self.use_sparse: + ti.root.pointer(indices, self.N // self.blk_size).dense(indices, self.blk_size).place(self.m) + ti.root.dense(indices, self.N).place(self.g) + else: + ti.root.dense(indices, self.N).place(self.m) + ti.root.dense(indices, self.N).place(self.g) + + self.r = ti.Vector.field(dim, float, (self.N**self.dim, self.dim)) # result buffer, TODO: optimize this + + et = [self.et2, self.et3][dim - 2] + self.et = ti.Vector.field(dim, int, et.shape[:2]) + self.et.from_numpy(et) + + @ti.kernel + def compute_grad(self): + for I in ti.grouped(self.g): + r = ti.Vector.zero(float, self.dim) + for i in ti.static(range(self.dim)): + d = ti.Vector.unit(self.dim, i, int) + r[i] = self.m[I + d] - self.m[I - d] + self.g[I] = r + + @ti.kernel + def march(self) -> int: + r_n = 0 + + for I in ti.grouped(ti.ndrange(*[[1, self.N - 1]] * self.dim)): + idx = 0 + if ti.static(self.dim == 2): + i, j = I + if self.m[i, j] > 1: + idx |= 1 + if self.m[i + 1, j] > 1: + idx |= 2 + if self.m[i, j + 1] > 1: + idx |= 4 + if self.m[i + 1, j + 1] > 1: + idx |= 8 + else: + i, j, k = I + if self.m[i, j, k] > 1: + idx |= 1 + if self.m[i + 1, j, k] > 1: + idx |= 2 + if self.m[i + 1, j + 1, k] > 1: + idx |= 4 + if self.m[i, j + 1, k] > 1: + idx |= 8 + if self.m[i, j, k + 1] > 1: + idx |= 16 + if self.m[i + 1, j, k + 1] > 1: + idx |= 32 + if self.m[i + 1, j + 1, k + 1] > 1: + idx |= 64 + if self.m[i, j + 1, k + 1] > 1: + idx |= 128 + + for m in range(self.et.shape[1]): + if self.et[idx, m][0] == -1: + break + + n = ti.atomic_add(r_n, 1) + for l in ti.static(range(self.dim)): + e = self.et[idx, m][l] + R = float(I) + + if ti.static(self.dim == 2): + i, j = I + # (.5, 0), (0, .5), (1, .5), (.5, 1) + if e == 0: + p = (1 - self.m[i, j]) / (self.m[i + 1, j] - self.m[i, j]) + R = [i + p, j] + elif e == 1: + p = (1 - self.m[i, j]) / (self.m[i, j + 1] - self.m[i, j]) + R = [i, j + p] + elif e == 2: + p = (1 - self.m[i + 1, j]) / (self.m[i + 1, j + 1] - self.m[i + 1, j]) + R = [i + 1, j + p] + elif e == 3: + p = (1 - self.m[i, j + 1]) / (self.m[i + 1, j + 1] - self.m[i, j + 1]) + R = [i + p, j + 1] + else: + i, j, k = I + # (.5, 0, 0), (1, .5, 0), (.5, 1, 0), (0, .5, 0) + # (.5, 0, 1), (1, .5, 1), (.5, 1, 1), (0, .5, 1) + # (0, 0, .5), (1, 0, .5), (1, 1, .5), (0, 1, .5) + if e == 0: + p = (1 - self.m[i, j, k]) / (self.m[i + 1, j, k] - self.m[i, j, k]) + R = [i + p, j, k] + elif e == 1: + p = (1 - self.m[i + 1, j, k]) / (self.m[i + 1, j + 1, k] - self.m[i + 1, j, k]) + R = [i + 1, j + p, k] + elif e == 2: + p = (1 - self.m[i, j + 1, k]) / (self.m[i + 1, j + 1, k] - self.m[i, j + 1, k]) + R = [i + p, j + 1, k] + elif e == 3: + p = (1 - self.m[i, j, k]) / (self.m[i, j + 1, k] - self.m[i, j, k]) + R = [i, j + p, k] + elif e == 4: + p = (1 - self.m[i, j, k + 1]) / (self.m[i + 1, j, k + 1] - self.m[i, j, k + 1]) + R = [i + p, j, k + 1] + elif e == 5: + p = (1 - self.m[i + 1, j, k + 1]) / (self.m[i + 1, j + 1, k + 1] - self.m[i + 1, j, k + 1]) + R = [i + 1, j + p, k + 1] + elif e == 6: + p = (1 - self.m[i, j + 1, k + 1]) / (self.m[i + 1, j + 1, k + 1] - self.m[i, j + 1, k + 1]) + R = [i + p, j + 1, k + 1] + elif e == 7: + p = (1 - self.m[i, j, k + 1]) / (self.m[i, j + 1, k + 1] - self.m[i, j, k + 1]) + R = [i, j + p, k + 1] + elif e == 8: + p = (1 - self.m[i, j, k]) / (self.m[i, j, k + 1] - self.m[i, j, k]) + R = [i, j, k + p] + elif e == 9: + p = (1 - self.m[i + 1, j, k]) / (self.m[i + 1, j, k + 1] - self.m[i + 1, j, k]) + R = [i + 1, j, k + p] + elif e == 10: + p = (1 - self.m[i + 1, j + 1, k]) / (self.m[i + 1, j + 1, k + 1] - self.m[i + 1, j + 1, k]) + R = [i + 1, j + 1, k + p] + elif e == 11: + p = (1 - self.m[i, j + 1, k]) / (self.m[i, j + 1, k + 1] - self.m[i, j + 1, k]) + R = [i, j + 1, k + p] + self.r[n, l] = R + + return r_n + + +class MCISO_Example(MCISO): + @staticmethod + @ti.func + def gauss(x): + return ti.exp(-6 * x**2) + + @ti.kernel + def touch(self, mx: float, my: float): + for o in ti.grouped(ti.ndrange(*[self.N] * self.dim)): + p = o / self.N + a = self.gauss((p - 0.5).norm() / 0.25) + p.x -= mx - 0.5 / self.N + p.y -= my - 0.5 / self.N + if ti.static(self.dim == 3): + p.z -= 0.5 + b = self.gauss(p.norm() / 0.25) + r = ti.max(a + b - 0.08, 0) + if r <= 0: + continue + self.m[o] = r * 3 + + def main(self): + gui = ti.GUI("Marching cube") + while gui.running and not gui.get_event(gui.ESCAPE): + if self.use_sparse: + ti.deactivate_all_snodes() + else: + self.m.fill(0) + self.touch(*gui.get_cursor_pos()) + ret_len = self.march() + ret = self.r.to_numpy()[:ret_len] / self.N + if self.dim == 2: + self.compute_grad() + gui.set_image(ti.tools.imresize(self.g, *gui.res) * 0.5 + 0.5) + gui.lines(ret[:, 0], ret[:, 1], color=0xFF66CC, radius=1.5) + else: + gui.triangles(ret[:, 0, 0:2], ret[:, 1, 0:2], ret[:, 2, 0:2], color=0xFFCC66) + gui.lines(ret[:, 0, 0:2], ret[:, 1, 0:2], color=0xFF66CC, radius=0.5) + gui.lines(ret[:, 1, 0:2], ret[:, 2, 0:2], color=0xFF66CC, radius=0.5) + gui.lines(ret[:, 2, 0:2], ret[:, 0, 0:2], color=0xFF66CC, radius=0.5) + gui.text(f"Press space to save mesh to PLY ({len(ret)} faces)", (0, 1)) + if gui.is_pressed(gui.SPACE): + num = ret.shape[0] + writer = ti.tools.PLYWriter(num_vertices=num * 3, num_faces=num) + vertices = ret.reshape(num * 3, 3) * 2 - 1 + writer.add_vertex_pos(vertices[:, 0], vertices[:, 1], vertices[:, 2]) + indices = np.arange(0, num * 3) + writer.add_faces(indices) + writer.export("mciso_output.ply") + print("Mesh saved to mciso_output.ply") + gui.show() + + +if __name__ == "__main__": + ti.init(arch=ti.cuda) + main = MCISO_Example(dim=3) + main.main() diff --git a/local_packages/taichi/examples/algorithm/mgpcg.py b/local_packages/taichi/examples/algorithm/mgpcg.py new file mode 100644 index 0000000..f39abb1 --- /dev/null +++ b/local_packages/taichi/examples/algorithm/mgpcg.py @@ -0,0 +1,231 @@ +# Solve Poisson's equation on an NxN grid using MGPCG +import numpy as np + +import taichi as ti + +real = ti.f32 +ti.init(default_fp=real, arch=ti.x64, kernel_profiler=True) + +# grid parameters +N = 128 +N_gui = 512 # gui resolution + +n_mg_levels = 4 +pre_and_post_smoothing = 2 +bottom_smoothing = 50 + +use_multigrid = True + +N_ext = N // 2 # number of ext cells set so that that total grid size is still power of 2 +N_tot = 2 * N + +# setup sparse simulation data arrays +r = [ti.field(dtype=real) for _ in range(n_mg_levels)] # residual +z = [ti.field(dtype=real) for _ in range(n_mg_levels)] # M^-1 r +x = ti.field(dtype=real) # solution +p = ti.field(dtype=real) # conjugate gradient +Ap = ti.field(dtype=real) # matrix-vector product +alpha = ti.field(dtype=real) # step size +beta = ti.field(dtype=real) # step size +sum_ = ti.field(dtype=real) # storage for reductions +pixels = ti.field(dtype=real, shape=(N_gui, N_gui)) # image buffer + +ti.root.pointer(ti.ijk, [N_tot // 4]).dense(ti.ijk, 4).place(x, p, Ap) + +for lvl in range(n_mg_levels): + ti.root.pointer(ti.ijk, [N_tot // (4 * 2**lvl)]).dense(ti.ijk, 4).place(r[lvl], z[lvl]) + +ti.root.place(alpha, beta, sum_) + + +@ti.kernel +def init(): + for i, j, k in ti.ndrange((N_ext, N_tot - N_ext), (N_ext, N_tot - N_ext), (N_ext, N_tot - N_ext)): + xl = (i - N_ext) * 2.0 / N_tot + yl = (j - N_ext) * 2.0 / N_tot + zl = (k - N_ext) * 2.0 / N_tot + # r[0] = b - Ax, where x = 0; therefore r[0] = b + r[0][i, j, k] = ti.sin(2.0 * np.pi * xl) * ti.sin(2.0 * np.pi * yl) * ti.sin(2.0 * np.pi * zl) + z[0][i, j, k] = 0.0 + Ap[i, j, k] = 0.0 + p[i, j, k] = 0.0 + x[i, j, k] = 0.0 + + +@ti.kernel +def compute_Ap(): + for i, j, k in Ap: + # A is implicitly expressed as a 3-D laplace operator + Ap[i, j, k] = ( + 6.0 * p[i, j, k] + - p[i + 1, j, k] + - p[i - 1, j, k] + - p[i, j + 1, k] + - p[i, j - 1, k] + - p[i, j, k + 1] + - p[i, j, k - 1] + ) + + +@ti.kernel +def reduce(p_: ti.template(), q_: ti.template()): + for I in ti.grouped(p_): + sum_[None] += p_[I] * q_[I] + + +@ti.kernel +def update_x(): + for I in ti.grouped(p): + x[I] += alpha[None] * p[I] + + +@ti.kernel +def update_r(): + for I in ti.grouped(p): + r[0][I] -= alpha[None] * Ap[I] + + +@ti.kernel +def update_p(): + for I in ti.grouped(p): + p[I] = z[0][I] + beta[None] * p[I] + + +@ti.kernel +def restrict(l: ti.template()): + for i, j, k in r[l]: + res = r[l][i, j, k] - ( + 6.0 * z[l][i, j, k] + - z[l][i + 1, j, k] + - z[l][i - 1, j, k] + - z[l][i, j + 1, k] + - z[l][i, j - 1, k] + - z[l][i, j, k + 1] + - z[l][i, j, k - 1] + ) + r[l + 1][i // 2, j // 2, k // 2] += res * 0.5 + + +@ti.kernel +def prolongate(l: ti.template()): + for I in ti.grouped(z[l]): + z[l][I] = z[l + 1][I // 2] + + +@ti.kernel +def smooth(l: ti.template(), phase: ti.template()): + # phase = red/black Gauss-Seidel phase + for i, j, k in r[l]: + if (i + j + k) & 1 == phase: + z[l][i, j, k] = ( + r[l][i, j, k] + + z[l][i + 1, j, k] + + z[l][i - 1, j, k] + + z[l][i, j + 1, k] + + z[l][i, j - 1, k] + + z[l][i, j, k + 1] + + z[l][i, j, k - 1] + ) / 6.0 + + +def apply_preconditioner(): + z[0].fill(0) + for l in range(n_mg_levels - 1): + for i in range(pre_and_post_smoothing << l): + smooth(l, 0) + smooth(l, 1) + z[l + 1].fill(0) + r[l + 1].fill(0) + restrict(l) + + for i in range(bottom_smoothing): + smooth(n_mg_levels - 1, 0) + smooth(n_mg_levels - 1, 1) + + for l in reversed(range(n_mg_levels - 1)): + prolongate(l) + for i in range(pre_and_post_smoothing << l): + smooth(l, 1) + smooth(l, 0) + + +@ti.kernel +def paint(): + kk = N_tot * 3 // 8 + for i, j in pixels: + ii = int(i * N / N_gui) + N_ext + jj = int(j * N / N_gui) + N_ext + pixels[i, j] = x[ii, jj, kk] / N_tot + + +def main(): + gui = ti.GUI("mgpcg", res=(N_gui, N_gui)) + + init() + + sum_[None] = 0.0 + reduce(r[0], r[0]) + initial_rTr = sum_[None] + + # r = b - Ax = b since x = 0 + # p = r = r + 0 p + if use_multigrid: + apply_preconditioner() + else: + z[0].copy_from(r[0]) + + update_p() + + sum_[None] = 0.0 + reduce(z[0], r[0]) + old_zTr = sum_[None] + + # CG + for i in range(400): + # alpha = rTr / pTAp + compute_Ap() + sum_[None] = 0.0 + reduce(p, Ap) + pAp = sum_[None] + alpha[None] = old_zTr / pAp + + # x = x + alpha p + update_x() + + # r = r - alpha Ap + update_r() + + # check for convergence + sum_[None] = 0.0 + reduce(r[0], r[0]) + rTr = sum_[None] + if rTr < initial_rTr * 1.0e-12: + break + + # z = M^-1 r + if use_multigrid: + apply_preconditioner() + else: + z[0].copy_from(r[0]) + + # beta = new_rTr / old_rTr + sum_[None] = 0.0 + reduce(z[0], r[0]) + new_zTr = sum_[None] + beta[None] = new_zTr / old_zTr + + # p = z + beta p + update_p() + old_zTr = new_zTr + + print(" ") + print(f"Iter = {i:4}, Residual = {rTr:e}") + paint() + gui.set_image(pixels) + gui.show() + + ti.profiler.print_kernel_profiler_info() + + +if __name__ == "__main__": + main() diff --git a/local_packages/taichi/examples/algorithm/mgpcg_advanced.py b/local_packages/taichi/examples/algorithm/mgpcg_advanced.py new file mode 100644 index 0000000..2509a79 --- /dev/null +++ b/local_packages/taichi/examples/algorithm/mgpcg_advanced.py @@ -0,0 +1,272 @@ +import math +import time + +import taichi as ti + + +@ti.data_oriented +class MGPCG: + """ + Grid-based MGPCG solver for the possion equation. + + .. note:: + + This solver only runs on CPU and CUDA backends since it requires the + ``pointer`` SNode. + """ + + def __init__(self, dim=2, N=512, n_mg_levels=6, real=float): + """ + :parameter dim: Dimensionality of the fields. + :parameter N: Grid resolution. + :parameter n_mg_levels: Number of multigrid levels. + """ + + # grid parameters + self.use_multigrid = True + + self.N = N + self.n_mg_levels = n_mg_levels + self.pre_and_post_smoothing = 2 + self.bottom_smoothing = 50 + self.dim = dim + self.real = real + + self.N_ext = self.N // 2 # number of ext cells set so that that total grid size is still power of 2 + self.N_tot = 2 * self.N + + # setup sparse simulation data arrays + self.r = [ti.field(dtype=self.real) for _ in range(self.n_mg_levels)] # residual + self.z = [ti.field(dtype=self.real) for _ in range(self.n_mg_levels)] # M^-1 self.r + self.x = ti.field(dtype=self.real) # solution + self.p = ti.field(dtype=self.real) # conjugate gradient + self.Ap = ti.field(dtype=self.real) # matrix-vector product + self.alpha = ti.field(dtype=self.real) # step size + self.beta = ti.field(dtype=self.real) # step size + self.sum = ti.field(dtype=self.real) # storage for reductions + + indices = ti.ijk if self.dim == 3 else ti.ij + self.grid = ti.root.pointer(indices, [self.N_tot // 4]).dense(indices, 4).place(self.x, self.p, self.Ap) + + for l in range(self.n_mg_levels): + self.grid = ( + ti.root.pointer(indices, [self.N_tot // (4 * 2**l)]).dense(indices, 4).place(self.r[l], self.z[l]) + ) + + ti.root.place(self.alpha, self.beta, self.sum) + + @ti.func + def init_r(self, I, r_I): + I = I + self.N_ext + self.r[0][I] = r_I + self.z[0][I] = 0 + self.Ap[I] = 0 + self.p[I] = 0 + self.x[I] = 0 + + @ti.kernel + def init(self, r: ti.template(), k: ti.template()): + """ + Set up the solver for $\nabla^2 x = k r$, a scaled Poisson problem. + :parameter k: (scalar) A scaling factor of the right-hand side. + :parameter r: (ti.field) Unscaled right-hand side. + """ + for I in ti.grouped(ti.ndrange(*[self.N] * self.dim)): + self.init_r(I, r[I] * k) + + @ti.func + def get_x(self, I): + I = I + self.N_ext + return self.x[I] + + @ti.kernel + def get_result(self, x: ti.template()): + """ + Get the solution field. + + :parameter x: (ti.field) The field to store the solution + """ + for I in ti.grouped(ti.ndrange(*[self.N] * self.dim)): + x[I] = self.get_x(I) + + @ti.func + def neighbor_sum(self, x, I): + ret = ti.cast(0.0, self.real) + for i in ti.static(range(self.dim)): + offset = ti.Vector.unit(self.dim, i) + ret += x[I + offset] + x[I - offset] + return ret + + @ti.kernel + def compute_Ap(self): + for I in ti.grouped(self.Ap): + self.Ap[I] = 2 * self.dim * self.p[I] - self.neighbor_sum(self.p, I) + + @ti.kernel + def reduce(self, p: ti.template(), q: ti.template()): + self.sum[None] = 0 + for I in ti.grouped(p): + self.sum[None] += p[I] * q[I] + + @ti.kernel + def update_x(self): + for I in ti.grouped(self.p): + self.x[I] += self.alpha[None] * self.p[I] + + @ti.kernel + def update_r(self): + for I in ti.grouped(self.p): + self.r[0][I] -= self.alpha[None] * self.Ap[I] + + @ti.kernel + def update_p(self): + for I in ti.grouped(self.p): + self.p[I] = self.z[0][I] + self.beta[None] * self.p[I] + + @ti.kernel + def restrict(self, l: ti.template()): + for I in ti.grouped(self.r[l]): + res = self.r[l][I] - (2 * self.dim * self.z[l][I] - self.neighbor_sum(self.z[l], I)) + self.r[l + 1][I // 2] += res * 0.5 + + @ti.kernel + def prolongate(self, l: ti.template()): + for I in ti.grouped(self.z[l]): + self.z[l][I] = self.z[l + 1][I // 2] + + @ti.kernel + def smooth(self, l: ti.template(), phase: ti.template()): + # phase = red/black Gauss-Seidel phase + for I in ti.grouped(self.r[l]): + if (I.sum()) & 1 == phase: + self.z[l][I] = (self.r[l][I] + self.neighbor_sum(self.z[l], I)) / (2 * self.dim) + + def apply_preconditioner(self): + self.z[0].fill(0) + for l in range(self.n_mg_levels - 1): + for i in range(self.pre_and_post_smoothing << l): + self.smooth(l, 0) + self.smooth(l, 1) + self.z[l + 1].fill(0) + self.r[l + 1].fill(0) + self.restrict(l) + + for i in range(self.bottom_smoothing): + self.smooth(self.n_mg_levels - 1, 0) + self.smooth(self.n_mg_levels - 1, 1) + + for l in reversed(range(self.n_mg_levels - 1)): + self.prolongate(l) + for i in range(self.pre_and_post_smoothing << l): + self.smooth(l, 1) + self.smooth(l, 0) + + def solve(self, max_iters=-1, eps=1e-12, abs_tol=1e-12, rel_tol=1e-12, verbose=False): + """ + Solve a Poisson problem. + + :parameter max_iters: Specify the maximal iterations. -1 for no limit. + :parameter eps: Specify a non-zero value to prevent ZeroDivisionError. + :parameter abs_tol: Specify the absolute tolerance of loss. + :parameter rel_tol: Specify the tolerance of loss relative to initial loss. + """ + + self.reduce(self.r[0], self.r[0]) + initial_rTr = self.sum[None] + + tol = max(abs_tol, initial_rTr * rel_tol) + + # self.r = b - Ax = b since self.x = 0 + # self.p = self.r = self.r + 0 self.p + if self.use_multigrid: + self.apply_preconditioner() + else: + self.z[0].copy_from(self.r[0]) + + self.update_p() + + self.reduce(self.z[0], self.r[0]) + old_zTr = self.sum[None] + + # Conjugate gradients + it = 0 + while max_iters == -1 or it < max_iters: + # self.alpha = rTr / pTAp + self.compute_Ap() + self.reduce(self.p, self.Ap) + pAp = self.sum[None] + self.alpha[None] = old_zTr / (pAp + eps) + + # self.x = self.x + self.alpha self.p + self.update_x() + + # self.r = self.r - self.alpha self.Ap + self.update_r() + + # check for convergence + self.reduce(self.r[0], self.r[0]) + rTr = self.sum[None] + + if verbose: + print(f"iter {it}, |residual|_2={math.sqrt(rTr)}") + + if rTr < tol: + break + + # self.z = M^-1 self.r + if self.use_multigrid: + self.apply_preconditioner() + else: + self.z[0].copy_from(self.r[0]) + + # self.beta = new_rTr / old_rTr + self.reduce(self.z[0], self.r[0]) + new_zTr = self.sum[None] + self.beta[None] = new_zTr / (old_zTr + eps) + + # self.p = self.z + self.beta self.p + self.update_p() + old_zTr = new_zTr + + it += 1 + + +class MGPCG_Example(MGPCG): + def __init__(self): + super().__init__(dim=3, N=128, n_mg_levels=4) + + self.N_gui = 512 # gui resolution + + self.pixels = ti.field(dtype=float, shape=(self.N_gui, self.N_gui)) # image buffer + + @ti.kernel + def init(self): + for I in ti.grouped(ti.ndrange(*[self.N] * self.dim)): + r_I = 5.0 + for k in ti.static(range(self.dim)): + r_I *= ti.cos(5 * math.pi * I[k] / self.N) + self.init_r(I, r_I) + + @ti.kernel + def paint(self): + if ti.static(self.dim == 3): + kk = self.N_tot * 3 // 8 + for i, j in self.pixels: + ii = int(i * self.N / self.N_gui) + self.N_ext + jj = int(j * self.N / self.N_gui) + self.N_ext + self.pixels[i, j] = self.x[ii, jj, kk] / self.N_tot + + def run(self, verbose=False): + self.init() + self.solve(max_iters=400, verbose=verbose) + self.paint() + ti.tools.imshow(self.pixels) + ti.profiler.print_kernel_profiler_info() + + +if __name__ == "__main__": + ti.init(kernel_profiler=True) + solver = MGPCG_Example() + t = time.time() + solver.run(verbose=True) + print(f"Solver time: {time.time() - t:.3f} s") diff --git a/local_packages/taichi/examples/algorithm/poisson_disk_sampling.py b/local_packages/taichi/examples/algorithm/poisson_disk_sampling.py new file mode 100644 index 0000000..1307aaf --- /dev/null +++ b/local_packages/taichi/examples/algorithm/poisson_disk_sampling.py @@ -0,0 +1,167 @@ +""" +Poisson disk sampling in Taichi, a fancy version. +Based on Yuanming Hu's code: https://github.com/taichi-dev/poisson_disk_sampling + +User interface: + +1. Click on the window to restart the animation. +2. Press `p` to save screenshot. +""" + +import taichi as ti +import taichi.math as tm + +ti.init(arch=ti.gpu) + +grid_n = 20 +dx = 1 / grid_n +radius = dx * tm.sqrt(2) +desired_samples = 200 +grid = ti.field(dtype=int, shape=(grid_n, grid_n)) +samples = ti.Vector.field(2, dtype=float, shape=desired_samples) +window_size = 800 +dfield = ti.Vector.field(4, dtype=float, shape=(window_size, window_size)) +img = ti.Vector.field(3, dtype=float, shape=(window_size, window_size)) +iMouse = ti.Vector.field(2, dtype=float, shape=()) +iMouse[None] = [0.5, 0.5] +iResolution = tm.vec2(window_size) +head = ti.field(int, shape=()) +tail = ti.field(int, shape=()) +sample_count = ti.field(int, shape=()) + + +@ti.func +def coord_to_index(p): + return int(p * tm.vec2(grid_n)) + + +@ti.kernel +def refresh_scene(): + head[None] = 0 + tail[None] = 1 + sample_count[None] = 1 + + samples[0] = (p0 := iMouse[None]) + grid[coord_to_index(p0)] = 0 + + for i, j in grid: + grid[i, j] = -1 + + for i, j in dfield: + dfield[i, j] = tm.vec4(1e5) + img[i, j] = tm.vec3(1) + + +@ti.func +def find_nearest_point(p): + x, y = coord_to_index(p) + dmin = 1e5 + nearest = iMouse[None] + for i in range(ti.max(0, x - 2), ti.min(grid_n, x + 3)): + for j in range(ti.max(0, y - 2), ti.min(grid_n, y + 3)): + ind = grid[i, j] + if ind != -1: + q = samples[ind] + d = (q - p).norm() + if d < dmin: + dmin = d + nearest = q + return dmin, nearest + + +@ti.kernel +def poisson_disk_sample(num_samples: int) -> int: + while head[None] < tail[None] and head[None] < ti.min(num_samples, desired_samples): + source_x = samples[head[None]] + head[None] += 1 + + for _ in range(100): + theta = ti.random() * 2 * tm.pi + offset = tm.vec2(tm.cos(theta), tm.sin(theta)) * (1 + ti.random()) * radius + new_x = source_x + offset + new_index = coord_to_index(new_x) + + if 0 <= new_x[0] < 1 and 0 <= new_x[1] < 1: + collision = find_nearest_point(new_x)[0] < radius - 1e-6 + if not collision and tail[None] < desired_samples: + samples[tail[None]] = new_x + grid[new_index] = tail[None] + tail[None] += 1 + return tail[None] + + +@ti.func +def hash21(p): + return tm.fract(tm.sin(tm.dot(p, tm.vec2(127.619, 157.583))) * 43758.5453) + + +@ti.func +def sample_dist(uv): + uv = uv * iResolution + x, y = tm.clamp(0, iResolution - 1, uv).cast(int) + return dfield[x, y] + + +@ti.kernel +def compute_distance_field(): + for i, j in dfield: + uv = tm.vec2(i, j) / iResolution + d, p = find_nearest_point(uv) + d = (uv - p).norm() - radius / 2.0 + dfield[i, j] = tm.vec4(d, p.x, p.y, radius / 2.0) + + +@ti.kernel +def render(): + for i, j in img: + uv = tm.vec2(i, j) / iResolution.y + st = tm.fract(uv * grid_n) - 0.5 + dg = 0.5 - abs(st) + d1 = ti.min(dg.x, dg.y) + d1 = tm.smoothstep(0.05, 0.0, d1) + col = (1 - tm.vec3(d1)) * 0.7 + sf = 2 / iResolution.y + buf = sample_dist(uv) + bufSh = sample_dist(uv + tm.vec2(0.005, 0.015)) + cCol = tm.vec3(hash21(buf.yz + 0.3), hash21(buf.yz), hash21(buf.yz + 0.09)) + pat = (abs(tm.fract(-buf.x * 150) - 0.5) * 2) / 300 + col = tm.mix(col, tm.vec3(0), (1 - tm.smoothstep(0, 3 * sf, pat)) * 0.25) + ew, ew2 = 0.005, 0.008 + cCol2 = tm.mix(cCol, tm.vec3(1), 0.9) + col = tm.mix(col, tm.vec3(0), (1 - tm.smoothstep(0, sf * 2, bufSh.x)) * 0.4) + col = tm.mix(col, tm.vec3(0), 1 - tm.smoothstep(sf, 0, -buf.x)) + col = tm.mix(col, cCol2, 1 - tm.smoothstep(sf, 0, -buf.x - ew)) + col = tm.mix(col, tm.vec3(0), 1 - tm.smoothstep(sf, 0, -buf.x - ew2 - ew)) + col = tm.mix(col, cCol, 1 - tm.smoothstep(sf, 0.0, -buf.x - ew2 - ew * 2)) + col = tm.sqrt(ti.max(col, 0)) + img[i, j] = col + + +def main(): + refresh_scene() + gui = ti.ui.Window("Poisson Disk Sampling", res=(window_size, window_size)) + canvas = gui.get_canvas() + gui.fps_limit = 10 + while gui.running: + gui.get_event(ti.ui.PRESS) + if gui.is_pressed(ti.ui.ESCAPE): + gui.running = False + + if gui.is_pressed(ti.ui.LMB): + iMouse[None] = gui.get_cursor_pos() + refresh_scene() + + if gui.is_pressed("p"): + canvas.set_image(img) + gui.save_image("screenshot.png") + + poisson_disk_sample(sample_count[None]) + sample_count[None] += 1 + compute_distance_field() + render() + canvas.set_image(img) + gui.show() + + +if __name__ == "__main__": + main() diff --git a/local_packages/taichi/examples/algorithm/print_offset.py b/local_packages/taichi/examples/algorithm/print_offset.py new file mode 100644 index 0000000..a9f957d --- /dev/null +++ b/local_packages/taichi/examples/algorithm/print_offset.py @@ -0,0 +1,42 @@ +import taichi as ti + +ti.init(arch=ti.cpu, print_ir=True) + +n = 4 +m = 8 + +a = ti.field(dtype=ti.i32) +ti.root.dense(ti.ij, (1, 2)).dense(ti.ij, 2).dense(ti.ij, 2).place(a) + + +@ti.kernel +def fill(): + for i, j in a: + base = ti.get_addr(a.snode, [0, 0]) + a[i, j] = int(ti.get_addr(a.snode, [i, j]) - base) // 4 + + +def main(): + fill() + print(a.to_numpy()) + + gui = ti.GUI("layout", res=(256, 512), background_color=0xFFFFFF) + + while True: + for i in range(1, m): + gui.line(begin=(0, i / m), end=(1, i / m), radius=2, color=0x000000) + for i in range(1, n): + gui.line(begin=(i / n, 0), end=(i / n, 1), radius=2, color=0x000000) + for i in range(n): + for j in range(m): + gui.text( + f"{a[i, j]}", + ((i + 0.3) / n, (j + 0.75) / m), + font_size=30, + color=0x0, + ) + gui.show() + + +if __name__ == "__main__": + main() diff --git a/local_packages/taichi/examples/autodiff/diff_sph/diff_sph.py b/local_packages/taichi/examples/autodiff/diff_sph/diff_sph.py new file mode 100644 index 0000000..e7f3da2 --- /dev/null +++ b/local_packages/taichi/examples/autodiff/diff_sph/diff_sph.py @@ -0,0 +1,724 @@ +# Smoothed-particle hydrodynamics (SPH) is a computational method used for simulating the mechanics of continuum media, such as solid mechanics and fluid flows. +# Here we utilize SPH to simulate a fountain, who tries to hit a target given by the user. +# The SPH simulator here implemented using Taichi is differentiable. +# Therefore, it can be easily embedding into the training pipeline of a neural network modelled controller. + +import argparse +import os +import pickle as pkl + +import matplotlib.pyplot as plt +import numpy as np + +import taichi as ti + +parser = argparse.ArgumentParser() +parser.add_argument("--train", action="store_true", help="whether train model, default false") +parser.add_argument("place_holder", nargs="*") +args = parser.parse_args() + +TRAIN = args.train +TRAIN_OUTPUT_IMG = False +TRAIN_VISUAL = False +TRAIN_VISUAL_SHOW = False +INFER_OUTPUT_IMG = False +arch = ti.vulkan if ti._lib.core.with_vulkan() else ti.cuda +ti.init(arch=arch, device_memory_fraction=0.5, random_seed=5) +screen_res = (800, 800) + +dtype_f_np = np.float32 +real = ti.f32 +scalar = lambda: ti.field(dtype=real) + + +@ti.data_oriented +class SGD: + def __init__(self, params, lr): + self.params = params + self.lr = lr + + def step(self): + for w in self.params: + self._step(w) + + @ti.kernel + def _step(self, w: ti.template()): + for I in ti.grouped(w): + w[I] -= ti.min(ti.max(w.grad[I], -20.0), 20.0) * self.lr + + def zero_grad(self): + for w in self.params: + w.grad.fill(0.0) + + +@ti.data_oriented +class Linear: + def __init__( + self, + n_models, + batch_size, + n_steps, + n_input, + n_hidden, + n_output, + needs_grad=False, + activation=False, + ): + self.n_models = n_models + self.batch_size = batch_size + self.n_steps = n_steps + self.n_input = n_input + self.n_hidden = n_hidden + self.n_output = n_output + self.activation = activation + + self.hidden = scalar() + self.output = scalar() + + # array of structs + self.batch_node = ti.root.dense(ti.i, self.n_models) + self.n_hidden_node = self.batch_node.dense(ti.j, self.n_hidden) + self.weights1_node = self.n_hidden_node.dense(ti.k, self.n_input) + + self.batch_node.dense(ti.axes(1, 2, 3), (self.n_steps, self.batch_size, self.n_hidden)).place(self.hidden) + self.batch_node.dense(ti.axes(1, 2, 3), (self.n_steps, self.batch_size, self.n_output)).place(self.output) + + self.weights1 = scalar() + self.bias1 = scalar() + + self.weights1_node.place(self.weights1) + self.n_hidden_node.place(self.bias1) + + if needs_grad: + ti.root.lazy_grad() + + def parameters(self): + return [self.weights1, self.bias1] + + @ti.kernel + def weights_init(self): + q1 = ti.sqrt(6 / self.n_input) * 0.01 + for model_id, i, j in ti.ndrange(self.n_models, self.n_hidden, self.n_input): + self.weights1[model_id, i, j] = (ti.random() * 2 - 1) * q1 + + @ti.kernel + def _forward(self, t: ti.i32, nn_input: ti.template()): + for model_id, k, i, j in ti.ndrange(self.n_models, self.batch_size, self.n_hidden, self.n_input): + self.hidden[model_id, t, k, i] += self.weights1[model_id, i, j] * nn_input[model_id, t, k, j] + if ti.static(self.activation): + for model_id, k, i in ti.ndrange(self.n_models, self.batch_size, self.n_hidden): + self.output[model_id, t, k, i] = ti.tanh(self.hidden[model_id, t, k, i] + self.bias1[model_id, i]) + else: + for model_id, k, i in ti.ndrange(self.n_models, self.batch_size, self.n_hidden): + self.output[model_id, t, k, i] = self.hidden[model_id, t, k, i] + self.bias1[model_id, i] + + @ti.kernel + def clear(self): + for I in ti.grouped(self.hidden): + self.hidden[I] = 0.0 + for I in ti.grouped(self.output): + self.output[I] = 0.0 + + def forward(self, t, nn_input): + self._forward(t, nn_input) + + def dump_weights(self, name="save.pkl"): + w_val = [] + for w in self.parameters(): + w = w.to_numpy() + w_val.append(w[0]) + with open(name, "wb") as f: + pkl.dump(w_val, f) + + def load_weights(self, name="save.pkl", model_id=0): + with open(name, "rb") as f: + w_val = pkl.load(f) + self.load_weights_from_value(w_val, model_id) + + def load_weights_from_value(self, w_val, model_id=0): + for w, val in zip(self.parameters(), w_val): + if val.shape[0] == 1: + val = val[0] + self.copy_from_numpy(w, val, model_id) + + @staticmethod + @ti.kernel + def copy_from_numpy(dst: ti.template(), src: ti.types.ndarray(), model_id: ti.i32): + for I in ti.grouped(src): + dst[model_id, I] = src[I] + + +def init_nn_model(): + global BATCH_SIZE, steps, input_states, fc1, fc2 + global training_sample_num, training_data, loss + global optimizer + # NN model + model_num = 1 + steps = 128 + n_input = 3 + n_hidden = 32 + n_output = 16 + n_output_act = 3 + learning_rate = 1e-3 + loss = ti.field(float, shape=(), needs_grad=True) + + if TRAIN: + BATCH_SIZE = 16 + input_states = ti.field(float, shape=(model_num, steps, BATCH_SIZE, n_input), needs_grad=True) + fc1 = Linear( + n_models=model_num, + batch_size=BATCH_SIZE, + n_steps=steps, + n_input=n_input, + n_hidden=n_hidden, + n_output=n_output, + needs_grad=True, + activation=False, + ) + fc2 = Linear( + n_models=model_num, + batch_size=BATCH_SIZE, + n_steps=steps, + n_input=n_output, + n_hidden=n_hidden, + n_output=n_output_act, + needs_grad=True, + activation=True, + ) + fc1.weights_init() + fc2.weights_init() + NNs = [fc1, fc2] + parameters = [] + for layer in NNs: + parameters.extend(layer.parameters()) + optimizer = SGD(params=parameters, lr=learning_rate) + + # Training data generation + sample_num = BATCH_SIZE * 25 + x_range = (0.05, 0.45) + y_range = (0.4, 1.0) + z_range = (0.05, 0.45) + + def targets_generation(num, x_range_, y_range_, z_range_): + low = np.array([x_range_[0], y_range_[0], z_range_[0]]) + high = np.array([x_range_[1], y_range_[1], z_range_[1]]) + return np.array([np.random.uniform(low=low, high=high) for _ in range(num)]) + + np.random.seed(0) + all_data = targets_generation(sample_num, x_range, y_range, z_range) + training_sample_num = BATCH_SIZE * 4 + training_data = all_data[:training_sample_num, :] + test_data = all_data[training_sample_num:, :] + print("training data ", training_data.shape, "test data ", test_data.shape) + else: + BATCH_SIZE = 1 + input_states = ti.field(float, shape=(model_num, steps, BATCH_SIZE, n_input), needs_grad=False) + fc1 = Linear( + n_models=model_num, + batch_size=BATCH_SIZE, + n_steps=steps, + n_input=n_input, + n_hidden=n_hidden, + n_output=n_output, + needs_grad=False, + activation=False, + ) + fc2 = Linear( + n_models=model_num, + batch_size=BATCH_SIZE, + n_steps=steps, + n_input=n_output, + n_hidden=n_hidden, + n_output=n_output_act, + needs_grad=False, + activation=True, + ) + file_dir_path = os.path.dirname(os.path.realpath(__file__)) + fc1.load_weights(f"{file_dir_path}/fc1_pretrained.pkl", model_id=0) + fc2.load_weights(f"{file_dir_path}/fc2_pretrained.pkl", model_id=0) + print(f"Model at {file_dir_path} loaded. ") + + +init_nn_model() + +# Simulation configuration +boundary_box_np = np.array([[0, 0, 0], [0.5, 1.5, 0.5]], dtype=dtype_f_np) +spawn_box_np = np.array([[0.0, 0.0, 0.0], [0.5, 0.05, 0.5]], dtype=dtype_f_np) +target_box_np = np.array([[0.15, 0.90, 0.15], [0.2, 0.95, 0.2]], dtype=dtype_f_np) + +target_centers = ti.Vector.field(3, float, shape=BATCH_SIZE, needs_grad=True) +min_dist = ti.field(float, shape=BATCH_SIZE, needs_grad=True) +max_dist = ti.field(float, shape=BATCH_SIZE, needs_grad=True) +max_height = ti.field(float, shape=BATCH_SIZE, needs_grad=True) +max_left = ti.field(float, shape=BATCH_SIZE, needs_grad=True) +max_right = ti.field(float, shape=BATCH_SIZE, needs_grad=True) +jet_force_max = ti.Vector([9.81 * 3, 9.81 * 10, 9.81 * 3]) + +# Simulation parameters +particle_radius = 0.01 +particle_diameter = particle_radius * 2 +N_np = ((spawn_box_np[1] - spawn_box_np[0]) / particle_diameter + 1).astype(int) +N_target_np = ((target_box_np[1] - target_box_np[0]) / particle_diameter + 1).astype(int) + +H = 4.0 * particle_radius +fluid_particle_num = N_np[0] * N_np[1] * N_np[2] +target_particle_num = N_target_np[0] * N_target_np[1] * N_target_np[2] +particle_num = fluid_particle_num + target_particle_num +print(f"Particle num: {particle_num}") + +F_pos = ti.Vector.field(3, float) +F_vel = ti.Vector.field(3, float) +F_acc = ti.Vector.field(3, float) +F_jet_force = ti.Vector.field(3, float, shape=(steps, BATCH_SIZE), needs_grad=True) + +col = ti.Vector.field(3, float) +material = ti.field(int) +den = ti.field(float) +pre = ti.field(float) + +pos_vis_buffer = ti.Vector.field(3, float, shape=particle_num) +pos_output_buffer = ti.Vector.field(3, float, shape=(steps, particle_num)) +ti.root.dense(ti.ijk, (BATCH_SIZE, steps, int(particle_num))).place(F_pos, F_vel, F_acc, den, pre) +ti.root.dense(ti.i, int(particle_num)).place(material, col) +ti.root.lazy_grad() + +boundary_box = ti.Vector.field(3, float, shape=2) +spawn_box = ti.Vector.field(3, float, shape=2) +target_box = ti.Vector.field(3, float, shape=2) +N_fluid = ti.Vector([N_np[0], N_np[1], N_np[2]]) +N_target = ti.Vector([N_target_np[0], N_target_np[1], N_target_np[2]]) + +gravity = ti.Vector([0.0, -9.8, 0.0]) + +boundary_box.from_numpy(boundary_box_np) +spawn_box.from_numpy(spawn_box_np) +target_box.from_numpy(target_box_np) + +rest_density = 1000.0 +mass = rest_density * particle_diameter * particle_diameter * particle_diameter * 0.8 +pressure_scale = 10000.0 +viscosity_scale = 0.1 * 3 +tension_scale = 0.005 +gamma = 1.0 +substeps = 5 +dt = 0.016 / substeps +eps = 1e-6 +damping = 0.5 +pi = 3.1415926535 + + +@ti.func +def W_poly6(R, h): + r = R.norm(eps) + res = 0.0 + if r <= h: + h2 = h * h + h4 = h2 * h2 + h9 = h4 * h4 * h + h2_r2 = h2 - r * r + res = 315.0 / (64 * pi * h9) * h2_r2 * h2_r2 * h2_r2 + else: + res = 0.0 + return res + + +@ti.func +def W_spiky_gradient(R, h): + r = R.norm(eps) + res = ti.Vector([0.0, 0.0, 0.0]) + if r == 0.0: + res = ti.Vector([0.0, 0.0, 0.0]) + elif r <= h: + h3 = h * h * h + h6 = h3 * h3 + h_r = h - r + res = -45.0 / (pi * h6) * h_r * h_r * (R / r) + else: + res = ti.Vector([0.0, 0.0, 0.0]) + return res + + +W = W_poly6 +W_gradient = W_spiky_gradient + + +@ti.kernel +def initialize_fluid_particle(t: ti.int32, pos: ti.template(), N_fluid_: ti.template()): + # Allocate fluid + for bs, i in ti.ndrange(BATCH_SIZE, fluid_particle_num): + pos[bs, t, i] = ( + ti.Vector( + [ + int(i % N_fluid_[0]), + int(i / N_fluid_[0]) % N_fluid_[1], + int(i / N_fluid_[0] / N_fluid_[1] % N_fluid_[2]), + ] + ) + * particle_diameter + + spawn_box[0] + ) + F_vel[bs, t, i] = ti.Vector([0.0, 0.0, 0.0]) + material[i] = 0 + col[i] = ti.Vector([0.4, 0.7, 1.0]) + F_acc.grad[bs, t, i] = ti.Vector([0.0, 0.0, 0.0]) + pos.grad[bs, t, i] = ti.Vector([0.0, 0.0, 0.0]) + F_vel.grad[bs, t, i] = ti.Vector([0.0, 0.0, 0.0]) + + +@ti.kernel +def initialize_dists(): + for bs in range(BATCH_SIZE): + min_dist[bs] = 1000.0 + max_height[bs] = 0.0 + max_left[bs] = 0.0 + max_right[bs] = 0.0 + + +@ti.kernel +def initialize_target_particle(t: ti.int32, pos: ti.template(), N_target_: ti.template(), current_pos: ti.int32): + # Allocate target cube + for bs, i in ti.ndrange(BATCH_SIZE, (fluid_particle_num, fluid_particle_num + target_particle_num)): + pos[bs, t, i] = ( + ti.Vector( + [ + int(i % N_target_[0]), + int(i / N_target_[0]) % N_target_[1], + int(i / N_target_[0] / N_target_[1] % N_target_[2]), + ] + ) + * particle_diameter + + target_centers[current_pos] + ) + F_vel[bs, t, i] = ti.Vector([0.0, 0.0, 0.0]) + material[i] = 1 + col[i] = ti.Vector([1.0, 0.65, 0.0]) + F_acc.grad[bs, t, i] = ti.Vector([0.0, 0.0, 0.0]) + pos.grad[bs, t, i] = ti.Vector([0.0, 0.0, 0.0]) + F_vel.grad[bs, t, i] = ti.Vector([0.0, 0.0, 0.0]) + + +@ti.kernel +def initialize_density(t: ti.int32): + for bs, i in ti.ndrange(BATCH_SIZE, particle_num): + den[bs, t, i] = 0.0 + + +@ti.kernel +def update_density(t: ti.int32): + for bs, i in ti.ndrange(BATCH_SIZE, particle_num): + for j in range(particle_num): + R = F_pos[bs, t, i] - F_pos[bs, t, j] + den[bs, t, i] += mass * W(R, H) + + +@ti.kernel +def update_pressure(t: ti.int32): + for bs, i in ti.ndrange(BATCH_SIZE, particle_num): + pre[bs, t, i] = pressure_scale * ti.max(pow(den[bs, t, i] / rest_density, gamma) - 1, 0) + + +@ti.kernel +def controller_output(t: ti.int32): + for bs in range(BATCH_SIZE): + for j in ti.static(range(3)): + F_jet_force[t, bs][j] = fc2.output[0, t, bs, j] * jet_force_max[j] + + +@ti.kernel +def apply_force(t: ti.int32): + for bs, i in ti.ndrange(BATCH_SIZE, particle_num): + if material[i] == 1: + F_acc[bs, t, i] = ti.Vector([0.0, 0.0, 0.0]) + else: + if ( + F_pos[bs, t, i][0] > 0.2 + and F_pos[bs, t, i][0] < 0.3 + and F_pos[bs, t, i][1] < 0.2 + and F_pos[bs, t, i][2] > 0.2 + and F_pos[bs, t, i][2] < 0.3 + ): + indicator = (steps - t) // (steps // 2) + F_acc[bs, t, i] = F_jet_force[t, bs] + gravity + indicator * (-gravity) * 0.1 + else: + F_acc[bs, t, i] = gravity + + +@ti.kernel +def update_force(t: ti.int32): + for bs, i in ti.ndrange(BATCH_SIZE, particle_num): + for j in range(particle_num): + R = F_pos[bs, t, i] - F_pos[bs, t, j] + # Pressure forces + F_acc[bs, t, i] += ( + -mass + * (pre[bs, t, i] / (den[bs, t, i] * den[bs, t, i]) + pre[bs, t, j] / (den[bs, t, j] * den[bs, t, j])) + * W_gradient(R, H) + ) + + # Viscosity forces + F_acc[bs, t, i] += ( + viscosity_scale + * mass + * (F_vel[bs, t, i] - F_vel[bs, t, j]).dot(R) + / (R.norm(eps) + 0.01 * H * H) + / den[bs, t, j] + * W_gradient(R, H) + ) + + +@ti.kernel +def advance(t: ti.int32): + for bs, i in ti.ndrange(BATCH_SIZE, particle_num): + if material[i] == 0: + F_vel[bs, t, i] = F_vel[bs, t - 1, i] + F_acc[bs, t - 1, i] * dt + F_pos[bs, t, i] = F_pos[bs, t - 1, i] + F_vel[bs, t, i] * dt + + +@ti.kernel +def boundary_handle(t: ti.int32): + for bs, i in ti.ndrange(BATCH_SIZE, particle_num): + collision_normal = ti.Vector([0.0, 0.0, 0.0]) + for j in ti.static(range(3)): + if F_pos[bs, t, i][j] < boundary_box[0][j]: + F_pos[bs, t, i][j] = boundary_box[0][j] + collision_normal[j] += -1.0 + for j in ti.static(range(3)): + if F_pos[bs, t, i][j] > boundary_box[1][j]: + F_pos[bs, t, i][j] = boundary_box[1][j] + collision_normal[j] += 1.0 + collision_normal_length = collision_normal.norm() + if collision_normal_length > eps: + collision_normal /= collision_normal_length + F_vel[bs, t, i] -= (1.0 + damping) * collision_normal.dot(F_vel[bs, t, i]) * collision_normal + + +@ti.kernel +def compute_dist(t: ti.int32): + for bs, i in ti.ndrange(BATCH_SIZE, particle_num): + if material[i] == 0: + dist = 0.0 + for j in ti.static(range(3)): + dist += (F_pos[bs, t, i][j] - target_centers[bs][j]) ** 2 + dist_sqr = ti.sqrt(dist) + ti.atomic_min(min_dist[bs], dist_sqr) + ti.atomic_max(max_height[bs], F_pos[bs, t, i][1]) + ti.atomic_max(max_left[bs], F_pos[bs, t, i][0]) + ti.atomic_max(max_right[bs], F_pos[bs, t, i][2]) + + +@ti.kernel +def compute_loss(t: ti.int32): + for bs in range(BATCH_SIZE): + max_dist[bs] = ti.sqrt( + (max_left[bs] - target_centers[bs][0]) ** 2 + + (max_right[bs] - target_centers[bs][2]) ** 2 + + (max_height[bs] - target_centers[bs][1]) ** 2 + ) + loss[None] += (min_dist[bs] + 0.2 * max_dist[bs]) / BATCH_SIZE + + +@ti.kernel +def copy_back(t: ti.int32): + for bs, i in ti.ndrange(BATCH_SIZE, particle_num): + F_pos[bs, 0, i] = F_pos[bs, t, i] + F_vel[bs, 0, i] = F_vel[bs, t, i] + F_acc[bs, 0, i] = ti.Vector([0.0, 0.0, 0.0]) + + +@ti.kernel +def copy_to_vis(t: ti.int32, bs: ti.int32): + for i in range(particle_num): + for j in ti.static(range(3)): + pos_vis_buffer[i][j] = F_pos[bs, t, i][j] + + +@ti.kernel +def copy_to_output_buffer(t: ti.int32, bs: ti.int32): + for i in range(particle_num): + for j in ti.static(range(3)): + pos_output_buffer[t, i][j] = F_pos[bs, t, i][j] + + +@ti.kernel +def copy_from_output_to_vis(t: ti.int32): + for i in range(particle_num): + for j in ti.static(range(3)): + pos_vis_buffer[i][j] = pos_output_buffer[t, i][j] + + +@ti.kernel +def fill_target_centers(current_pos: ti.int32, data: ti.types.ndarray()): + for i in range(current_pos, current_pos + BATCH_SIZE): + for j in ti.static(range(3)): + target_centers[i][j] = data[i, j] + print("target_centers ", target_centers[current_pos]) + + +@ti.kernel +def fill_input_states(current_pos: ti.int32): + for t, bs in ti.ndrange(steps, (current_pos, current_pos + BATCH_SIZE)): + for j in ti.static(range(3)): + input_states[0, t, bs, j] = target_centers[bs][j] + + +def main(): + show_window = True + if TRAIN: + show_window = False + window = ti.ui.Window("Diff SPH", screen_res, show_window=show_window) + scene = window.get_scene() + camera = ti.ui.Camera() + camera.position(0.5, 1.0, 2.0) + camera.up(0.0, 1.0, 0.0) + camera.lookat(0.5, 0.5, 0.5) + camera.fov(70) + scene.set_camera(camera) + canvas = window.get_canvas() + gui = window.get_gui() + movement_speed = 0.02 + + if TRAIN: + losses = [] + losses_epoch_avg = [] + opt_iters = 7 + for opt_iter in range(opt_iters): + loss_epoch = 0.0 + cnt = 0 + for current_data_offset in range(0, training_sample_num, BATCH_SIZE): + fill_target_centers(current_data_offset, training_data) + fill_input_states(current_data_offset) + initialize_fluid_particle(0, F_pos, N_fluid) + initialize_dists() + initialize_target_particle(0, F_pos, N_target, current_data_offset) + fc1.clear() + fc2.clear() + with ti.ad.Tape(loss=loss): + for i in range(1, steps): + initialize_density(i - 1) + update_density(i - 1) + update_pressure(i - 1) + fc1.forward(i - 1, input_states) + fc2.forward(i - 1, fc1.output) + controller_output(i - 1) + apply_force(i - 1) + update_force(i - 1) + advance(i) + boundary_handle(i) + if i % substeps == 0: + copy_to_output_buffer(i, 0) + compute_dist(i) + compute_loss(steps - 1) + optimizer.step() + print( + f"current opt progress: {current_data_offset + BATCH_SIZE}/{training_sample_num}, loss: {loss[None]}" + ) + losses.append(loss[None]) + loss_epoch += loss[None] + cnt += 1 + print(f"opt iter {opt_iter} done. Average loss: {loss_epoch / cnt}") + losses_epoch_avg.append(loss_epoch / cnt) + + if TRAIN_VISUAL: + if opt_iter % 1 == 0: + os.makedirs(f"output_img/{opt_iter}", exist_ok=True) + for i in range(1, steps): + if i % substeps == 0: + copy_from_output_to_vis(i) + scene.set_camera(camera) + scene.point_light((2.0, 2.0, 2.0), color=(1.0, 1.0, 1.0)) + scene.particles(pos_vis_buffer, radius=particle_radius, per_vertex_color=col) + canvas.scene(scene) + if TRAIN_OUTPUT_IMG: + if i % substeps == 0: + window.save_image(f"output_img/{opt_iter}/{i:04}.png") + if TRAIN_VISUAL_SHOW: + window.show() + if opt_iter % 2 == 0: + os.makedirs(f"saved_models/{opt_iter}", exist_ok=True) + fc1.dump_weights(name=f"saved_models/{opt_iter}/fc1_{opt_iter:04}.pkl") + fc2.dump_weights(name=f"saved_models/{opt_iter}/fc2_{opt_iter:04}.pkl") + + plt.plot([i for i in range(len(losses))], losses, label="loss per iteration") + plt.plot( + [i * (training_sample_num // BATCH_SIZE) for i in range(len(losses_epoch_avg))], + losses_epoch_avg, + label="loss epoch avg.", + ) + plt.title("Training Loss") + plt.xlabel("Training Iterations") + plt.ylabel("Loss") + plt.legend() + plt.show() + else: + current_data_offset = 0 + initialize_fluid_particle(0, F_pos, N_fluid) + target_centers[current_data_offset][0] = 0.25 + target_centers[current_data_offset][1] = 0.50 + target_centers[current_data_offset][2] = 0.25 + + print("Start... ") + cnt = 0 + paused = ti.field(int, shape=()) + while window.running: + with gui.sub_window("Diff SPH", 0.05, 0.05, 0.2, 0.2) as w: + w.text("Space: pause") + w.text("Set target positions:") + + target_centers[current_data_offset][0] = w.slider_float( + "X", target_centers[current_data_offset][0], 0.05, 0.45 + ) + target_centers[current_data_offset][1] = w.slider_float( + "Y", target_centers[current_data_offset][1], 0.4, 1.0 + ) + target_centers[current_data_offset][2] = w.slider_float( + "Z", target_centers[current_data_offset][2], 0.05, 0.45 + ) + + if not paused[None]: + fill_input_states(current_data_offset) + initialize_target_particle(0, F_pos, N_target, current_data_offset) + fc1.clear() + fc2.clear() + for i in range(1, substeps): + initialize_density(i - 1) + update_density(i - 1) + update_pressure(i - 1) + fc1.forward(i - 1, input_states) + fc2.forward(i - 1, fc1.output) + controller_output(i - 1) + apply_force(i - 1) + update_force(i - 1) + advance(i) + boundary_handle(i) + copy_to_vis(substeps - 1, 0) + copy_back(substeps - 1) + cnt += 1 + # user controlling of camera + position_change = ti.Vector([0.0, 0.0, 0.0]) + up = ti.Vector([0.0, 1.0, 0.0]) + # move camera up and down + if window.is_pressed("e"): + position_change = up * movement_speed + if window.is_pressed("q"): + position_change = -up * movement_speed + + for e in window.get_events(ti.ui.PRESS): + if e.key == ti.ui.SPACE: + paused[None] = not paused[None] + camera.position(*(camera.curr_position + position_change)) + camera.lookat(*(camera.curr_lookat + position_change)) + camera.track_user_inputs(window, movement_speed=movement_speed, hold_key=ti.ui.RMB) + + scene.set_camera(camera) + scene.point_light((2.0, 2.0, 2.0), color=(1.0, 1.0, 1.0)) + scene.particles(pos_vis_buffer, radius=particle_radius, per_vertex_color=col) + canvas.scene(scene) + if INFER_OUTPUT_IMG: + if cnt % 2 == 0: + os.makedirs("demo_output_interactive/", exist_ok=True) + window.save_image(f"demo_output_interactive/{cnt:04}.png") + window.show() + + +if __name__ == "__main__": + main() diff --git a/local_packages/taichi/examples/autodiff/diff_sph/fc1_pretrained.pkl b/local_packages/taichi/examples/autodiff/diff_sph/fc1_pretrained.pkl new file mode 100644 index 0000000..c6791e6 Binary files /dev/null and b/local_packages/taichi/examples/autodiff/diff_sph/fc1_pretrained.pkl differ diff --git a/local_packages/taichi/examples/autodiff/diff_sph/fc2_pretrained.pkl b/local_packages/taichi/examples/autodiff/diff_sph/fc2_pretrained.pkl new file mode 100644 index 0000000..6c73c5c Binary files /dev/null and b/local_packages/taichi/examples/autodiff/diff_sph/fc2_pretrained.pkl differ diff --git a/local_packages/taichi/examples/autodiff/jacobian.py b/local_packages/taichi/examples/autodiff/jacobian.py new file mode 100644 index 0000000..72fcb7b --- /dev/null +++ b/local_packages/taichi/examples/autodiff/jacobian.py @@ -0,0 +1,48 @@ +import matplotlib.pyplot as plt +import numpy as np + +import taichi as ti + +ti.init() + +N_param = 50 +N_loss = 100 +params = ti.field(float, shape=N_param, needs_dual=True, needs_grad=True) +losses = ti.field(float, shape=N_loss, needs_dual=True, needs_grad=True) + + +@ti.kernel +def func(): + for j in range(N_loss): + for i in range(N_param): + losses[j] += ti.sin(j / 16 * np.pi) * ti.sin(i / 16 * np.pi) * params[i] + + +# Compute Jacobian matrix via forward mode autodiff +jacobian_fwd = np.zeros((N_loss, N_param)) +seed = [0.0 for _ in range(N_param)] +for n_p in range(N_param): + # Compute Jacobian-vector product (Jvp) N_param times + seed[n_p] = 1.0 + with ti.ad.FwdMode(loss=losses, param=params, seed=seed): + func() + jacobian_fwd[:, n_p] = losses.dual.to_numpy() + seed[n_p] = 0.0 + +# Compute Jacobian matrix via reverse mode autodiff +jacobian_rev = np.zeros((N_loss, N_param)) +for n_l in range(N_loss): + # Compute vector-Jacobian product (vJp) N_loss times + losses.grad[n_l] = 1.0 + func.grad() + jacobian_rev[n_l, :] = params.grad.to_numpy() + params.grad.fill(0) + losses.grad[n_l] = 0.0 + +plt.subplot(121) +plt.imshow(jacobian_fwd) +plt.title("Jacobian Matrix \n (via forward mode)") +plt.subplot(122) +plt.imshow(jacobian_rev) +plt.title("Jacobian Matrix \n (via reverse mode)") +plt.show() diff --git a/local_packages/taichi/examples/autodiff/minimization.py b/local_packages/taichi/examples/autodiff/minimization.py new file mode 100644 index 0000000..05e87b0 --- /dev/null +++ b/local_packages/taichi/examples/autodiff/minimization.py @@ -0,0 +1,44 @@ +import random + +import taichi as ti + +ti.init(arch=ti.cpu) + +n = 8 +x = ti.field(dtype=ti.f32, shape=n, needs_grad=True) +y = ti.field(dtype=ti.f32, shape=n) +L = ti.field(dtype=ti.f32, shape=(), needs_grad=True) + + +@ti.kernel +def reduce(): + for i in range(n): + L[None] += 0.5 * (x[i] - y[i]) ** 2 + + +@ti.kernel +def gradient_descent(): + for i in x: + x[i] -= x.grad[i] * 0.1 + + +def main(): + # Initialize vectors + for i in range(n): + x[i] = random.random() + y[i] = random.random() + + # Optimize with 100 gradient descent iterations + for k in range(100): + with ti.ad.Tape(loss=L): + reduce() + print("Loss =", L[None]) + gradient_descent() + + for i in range(n): + # Now you should approximately have x[i] == y[i] + print(x[i], y[i]) + + +if __name__ == "__main__": + main() diff --git a/local_packages/taichi/examples/autodiff/regression.py b/local_packages/taichi/examples/autodiff/regression.py new file mode 100644 index 0000000..b4aa561 --- /dev/null +++ b/local_packages/taichi/examples/autodiff/regression.py @@ -0,0 +1,101 @@ +import random + +import matplotlib.pyplot as plt +import numpy as np + +import taichi as ti + +ti.init(arch=ti.cpu) + +number_coeffs = 4 +learning_rate = 1e-4 + +N = 32 +x, y = ti.field(ti.f32, shape=N, needs_grad=True), ti.field(ti.f32, shape=N, needs_grad=True) +coeffs = ti.field(ti.f32, shape=number_coeffs, needs_grad=True) +loss = ti.field(ti.f32, shape=(), needs_grad=True) + + +@ti.kernel +def regress(): + for i in x: + v = x[i] + est = 0.0 + for j in ti.static(range(number_coeffs)): + est += coeffs[j] * (v**j) + loss[None] += 0.5 * (y[i] - est) ** 2 + + +@ti.kernel +def update(): + for i in ti.static(range(number_coeffs)): + coeffs[i] -= learning_rate * coeffs.grad[i] + + +xs = [] +ys = [] + + +def initialize(): + for i in range(N): + v = random.random() * 5 - 2.5 + xs.append(v) + x[i] = v + y[i] = (v - 1) * (v - 2) * (v + 2) + random.random() - 0.5 + + regress() + + print("y") + for i in range(N): + y.grad[i] = 1 + ys.append(y[i]) + print() + + +def regress_raw(): + use_tape = True + + for i in range(1000): + if use_tape: + with ti.ad.Tape(loss=loss): + regress() + else: + ti.ad.clear_all_gradients() + loss[None] = 0 + loss.grad[None] = 1 + regress() + regress.grad() + print("Loss =", loss[None]) + update() + for j in range(number_coeffs): + print(coeffs[j], end=", ") + print() + + +def draw(): + curve_xs = np.arange(-2.5, 2.5, 0.01) + curve_ys = curve_xs * 0 + for i in range(number_coeffs): + curve_ys += coeffs[i] * np.power(curve_xs, i) + + plt.title("Nonlinear Regression with Gradient Descent (3rd order polynomial)") + ax = plt.gca() + ax.scatter(xs, ys, label="data", color="r") + ax.plot(curve_xs, curve_ys, label="fitted") + ax.legend() + ax.grid(True) + ax.spines["left"].set_position("zero") + ax.spines["right"].set_color("none") + ax.spines["bottom"].set_position("zero") + ax.spines["top"].set_color("none") + plt.show() + + +def main(): + initialize() + regress_raw() + draw() + + +if __name__ == "__main__": + main() diff --git a/local_packages/taichi/examples/autodiff/simple_derivative.py b/local_packages/taichi/examples/autodiff/simple_derivative.py new file mode 100644 index 0000000..c538995 --- /dev/null +++ b/local_packages/taichi/examples/autodiff/simple_derivative.py @@ -0,0 +1,69 @@ +import matplotlib.pyplot as plt + +import taichi as ti + +ti.init(arch=ti.cpu) + +N = 2048 +x, y = ti.field(ti.f32), ti.field(ti.f32) + +ti.root.dense(ti.i, N).place(x, x.grad, y, y.grad) + + +@ti.kernel +def poly(): + for i in x: + v = x[i] + ret = 0.0 + guard = 0.2 + if v < -guard or v > guard: + ret = 4 / ti.max(v, 0.1) + else: + ret = 0 + y[i] = ret + + +xs = [] +ys = [] +grad_xs = [] + + +def initialize(): + for i in range(N): + v = ((i + 0.5) / N) * 2 - 1 + xs.append(v) + x[i] = v + + poly() + + for i in range(N): + y.grad[i] = 1 + ys.append(y[i]) + + poly.grad() + print("grad_x") + for i in range(N): + grad_xs.append(x.grad[i]) + + +def draw(): + plt.title("Auto Diff") + ax = plt.gca() + ax.plot(xs, ys, label="f(x)") + ax.plot(xs, grad_xs, label="f'(x)") + ax.legend() + ax.grid(True) + ax.spines["left"].set_position("zero") + ax.spines["right"].set_color("none") + ax.spines["bottom"].set_position("zero") + ax.spines["top"].set_color("none") + plt.show() + + +def main(): + initialize() + draw() + + +if __name__ == "__main__": + main() diff --git a/local_packages/taichi/examples/features/gui/fullscreen.py b/local_packages/taichi/examples/features/gui/fullscreen.py new file mode 100644 index 0000000..40ed503 --- /dev/null +++ b/local_packages/taichi/examples/features/gui/fullscreen.py @@ -0,0 +1,20 @@ +import taichi as ti + +ti.init(ti.gpu) + +res = (1920, 1080) +img = ti.Vector.field(3, float, res) + + +@ti.kernel +def render(t: float): + for i, j in img: + a = ti.Vector([i / res[0], j / res[1] + 2, i / res[0] + 4]) + img[i, j] = ti.cos(a + t) * 0.5 + 0.5 + + +gui = ti.GUI("UV", res, fullscreen=True, fast_gui=True) +while not gui.get_event(ti.GUI.ESCAPE): + render(gui.frame * 0.04) + gui.set_image(img) + gui.show() diff --git a/local_packages/taichi/examples/features/gui/gui_image_io.py b/local_packages/taichi/examples/features/gui/gui_image_io.py new file mode 100644 index 0000000..be43fa9 --- /dev/null +++ b/local_packages/taichi/examples/features/gui/gui_image_io.py @@ -0,0 +1,27 @@ +import os + +import taichi as ti + +ti.init(arch=ti.cpu) + +pixel = ti.field(ti.u8, shape=(512, 512, 3)) + + +@ti.kernel +def paint(): + for I in ti.grouped(pixel): + pixel[I] = ti.u8(ti.random() * 255) + + +paint() +pixel = pixel.to_numpy() +ti.tools.imshow(pixel, "Random Generated") +for ext in ["bmp", "png", "jpg"]: + fn = "taichi-example-random-img." + ext + ti.tools.imwrite(pixel, fn) + pixel_r = ti.tools.imread(fn) + if ext != "jpg": + assert (pixel_r == pixel).all() + else: + ti.tools.imshow(pixel_r, "JPEG Read Result") + os.remove(fn) diff --git a/local_packages/taichi/examples/features/gui/gui_widgets.py b/local_packages/taichi/examples/features/gui/gui_widgets.py new file mode 100644 index 0000000..9015ff6 --- /dev/null +++ b/local_packages/taichi/examples/features/gui/gui_widgets.py @@ -0,0 +1,28 @@ +import taichi as ti + +gui = ti.GUI("GUI widgets") + +radius = gui.slider("Radius", 1, 50, step=1) +xcoor = gui.label("X-coordinate") +okay = gui.button("OK") + +xcoor.value = 0.5 +radius.value = 10 + +while gui.running: + for e in gui.get_events(gui.PRESS): + if e.key == gui.ESCAPE: + gui.running = False + elif e.key == "a": + xcoor.value -= 0.05 + elif e.key == "d": + xcoor.value += 0.05 + elif e.key == "s": + radius.value -= 1 + elif e.key == "w": + radius.value += 1 + elif e.key == okay: + print("OK clicked") + + gui.circle((xcoor.value, 0.5), radius=radius.value) + gui.show() diff --git a/local_packages/taichi/examples/features/gui/keyboard.py b/local_packages/taichi/examples/features/gui/keyboard.py new file mode 100644 index 0000000..261453d --- /dev/null +++ b/local_packages/taichi/examples/features/gui/keyboard.py @@ -0,0 +1,39 @@ +import taichi as ti + +x, y = 0.5, 0.5 +delta = 0.01 +radius = 8 + +gui = ti.GUI("Keyboard", res=(400, 400)) + +while gui.running: + while gui.get_event(ti.GUI.PRESS, ti.GUI.MOTION): + if gui.event.key == ti.GUI.ESCAPE: + gui.running = False + elif gui.event.key == ti.GUI.RMB: + x, y = gui.event.pos + elif gui.event.key == ti.GUI.WHEEL: + x, y = gui.event.pos + dt = gui.event.delta + # delta is 2-dim vector (dx, dy) + # dt[0] denotes the horizontal direction, and dt[1] denotes the vertical direction + if dt[1] > 0: + radius += 10 + elif dt[1] < 0: + radius = max(8, radius - 10) + + if gui.is_pressed(ti.GUI.LEFT, "a"): + x -= delta + if gui.is_pressed(ti.GUI.RIGHT, "d"): + x += delta + if gui.is_pressed(ti.GUI.UP, "w"): + y += delta + if gui.is_pressed(ti.GUI.DOWN, "s"): + y -= delta + if gui.is_pressed(ti.GUI.LMB): + x, y = gui.get_cursor_pos() + + gui.text(f"({x:.3}, {y:.3})", (x, y)) + + gui.circle((x, y), 0xFFFFFF, radius) + gui.show() diff --git a/local_packages/taichi/examples/features/io/export_mesh.py b/local_packages/taichi/examples/features/io/export_mesh.py new file mode 100644 index 0000000..2c3f59c --- /dev/null +++ b/local_packages/taichi/examples/features/io/export_mesh.py @@ -0,0 +1,104 @@ +import numpy as np + +import taichi as ti + +# A 2D grid with quad faces +# y +# | +# z---/ +# x +# 19---15---11---07---03 +# | | | | | +# 18---14---10---06---02 +# | | | | | +# 17---13---19---05---01 +# | | | | | +# 16---12---08---04---00 + +writer = ti.tools.PLYWriter(num_vertices=20, num_faces=12, face_type="quad") + +# For the vertices, the only required channel is the position, +# which can be added by passing 3 np.array x, y, z into the following function. + +x = np.zeros(20) +y = np.array(list(np.arange(0, 4)) * 5) +z = np.repeat(np.arange(5), 4) +writer.add_vertex_pos(x, y, z) + +# For faces (if any), the only required channel is the list of vertex indices that each face contains. +indices = np.array([0, 1, 5, 4] * 12) + np.repeat( + np.array(list(np.arange(0, 3)) * 4) + 4 * np.repeat(np.arange(4), 3), 4 +) +writer.add_faces(indices) + +# Add custom vertex channel, the input should include a key, a supported datatype and, the data np.array +vdata = np.random.rand(20) +writer.add_vertex_channel("vdata1", "double", vdata) + +# Add custom face channel +foo_data = np.zeros(12) +writer.add_face_channel("foo_key", "foo_data_type", foo_data) +# error! because "foo_data_type" is not a supported datatype. Supported ones are +# ['char', 'uchar', 'short', 'ushort', 'int', 'uint', 'float', 'double'] + +# PLYwriter already defines several useful helper functions for common channels +# Add vertex color, alpha, and rgba +# using float/double r g b alpha to reprent color, the range should be 0 to 1 +r = np.random.rand(20) +g = np.random.rand(20) +b = np.random.rand(20) +alpha = np.random.rand(20) +writer.add_vertex_color(r, g, b) +writer.add_vertex_alpha(alpha) +# equivilantly +# add_vertex_rgba(r, g, b, alpha) + +# vertex normal +writer.add_vertex_normal(np.ones(20), np.zeros(20), np.zeros(20)) + +# vertex index, and piece (group id) +writer.add_vertex_id() +writer.add_vertex_piece(np.ones(20)) + +# Add face index, and piece (group id) +# Indexing the existing faces in the writer and add this channel to face channels +writer.add_face_id() +# Set all the faces is in group 1 +writer.add_face_piece(np.ones(12)) + +series_prefix = "example.ply" +series_prefix_ascii = "example_ascii.ply" +# Export a single file +# use ascii so you can read the content +writer.export_ascii(series_prefix_ascii) + +# alternatively, use binary for a bit better performance +writer.export(series_prefix) + +# Export a sequence of files, ie in 10 frames +for frame in range(10): + # write each frame as i.e. "example_000000.ply" in your current running folder + writer.export_frame_ascii(frame, series_prefix_ascii) + # alternatively, use binary + writer.export_frame(frame, series_prefix) + + # update location/color + x = x + 0.1 * np.random.rand(20) + y = y + 0.1 * np.random.rand(20) + z = z + 0.1 * np.random.rand(20) + r = np.random.rand(20) + g = np.random.rand(20) + b = np.random.rand(20) + alpha = np.random.rand(20) + # re-fill + writer = ti.tools.PLYWriter(num_vertices=20, num_faces=12, face_type="quad") + writer.add_vertex_pos(x, y, z) + writer.add_faces(indices) + writer.add_vertex_channel("vdata1", "double", vdata) + writer.add_vertex_color(r, g, b) + writer.add_vertex_alpha(alpha) + writer.add_vertex_normal(np.ones(20), np.zeros(20), np.zeros(20)) + writer.add_vertex_id() + writer.add_vertex_piece(np.ones(20)) + writer.add_face_id() + writer.add_face_piece(np.ones(12)) diff --git a/local_packages/taichi/examples/features/io/export_ply.py b/local_packages/taichi/examples/features/io/export_ply.py new file mode 100644 index 0000000..a787345 --- /dev/null +++ b/local_packages/taichi/examples/features/io/export_ply.py @@ -0,0 +1,44 @@ +import numpy as np + +import taichi as ti + +ti.init(arch=ti.cpu) + +num_vertices = 1000 +pos = ti.Vector.field(3, dtype=ti.f32, shape=(10, 10, 10)) +rgba = ti.Vector.field(4, dtype=ti.f32, shape=(10, 10, 10)) + + +@ti.kernel +def place_pos(): + for i, j, k in pos: + pos[i, j, k] = 0.1 * ti.Vector([i, j, k]) + + +@ti.kernel +def move_particles(): + for i, j, k in pos: + pos[i, j, k] += ti.Vector([0.1, 0.1, 0.1]) + + +@ti.kernel +def fill_rgba(): + for i, j, k in rgba: + rgba[i, j, k] = ti.Vector([ti.random(), ti.random(), ti.random(), ti.random()]) + + +place_pos() +series_prefix = "example.ply" +for frame in range(10): + move_particles() + fill_rgba() + # now adding each channel only supports passing individual np.array + # so converting into np.ndarray, reshape + # remember to use a temp var to store so you dont have to convert back + np_pos = np.reshape(pos.to_numpy(), (num_vertices, 3)) + np_rgba = np.reshape(rgba.to_numpy(), (num_vertices, 4)) + # create a PLYWriter + writer = ti.tools.PLYWriter(num_vertices=num_vertices) + writer.add_vertex_pos(np_pos[:, 0], np_pos[:, 1], np_pos[:, 2]) + writer.add_vertex_rgba(np_rgba[:, 0], np_rgba[:, 1], np_rgba[:, 2], np_rgba[:, 3]) + writer.export_frame(frame, series_prefix) diff --git a/local_packages/taichi/examples/features/io/export_videos.py b/local_packages/taichi/examples/features/io/export_videos.py new file mode 100644 index 0000000..c2cdd03 --- /dev/null +++ b/local_packages/taichi/examples/features/io/export_videos.py @@ -0,0 +1,33 @@ +import taichi as ti + +ti.init() + +pixels = ti.field(ti.u8, shape=(512, 512, 3)) + + +@ti.kernel +def paint(): + for i, j, k in pixels: + pixels[i, j, k] = ti.random() * 255 + + +def main(): + result_dir = "./results" + video_manager = ti.tools.VideoManager(output_dir=result_dir, framerate=24, automatic_build=False) + + for i in range(50): + paint() + + pixels_img = pixels.to_numpy() + video_manager.write_frame(pixels_img) + print(f"\rFrame {i+1}/50 is recorded", end="") + + print() + print("Exporting .mp4 and .gif videos...") + video_manager.make_video(gif=True, mp4=True) + print(f'MP4 video is saved to {video_manager.get_output_filename(".mp4")}') + print(f'GIF video is saved to {video_manager.get_output_filename(".gif")}') + + +if __name__ == "__main__": + main() diff --git a/local_packages/taichi/examples/features/sparse/explicit_activation.py b/local_packages/taichi/examples/features/sparse/explicit_activation.py new file mode 100644 index 0000000..8cd581e --- /dev/null +++ b/local_packages/taichi/examples/features/sparse/explicit_activation.py @@ -0,0 +1,69 @@ +import taichi as ti + +ti.init() + +x = ti.field(dtype=ti.i32) +block1 = ti.root.pointer(ti.ij, (4, 4)) +block2 = block1.pointer(ti.ij, (2, 2)) +pixel = block2.dense(ti.ij, (2, 2)) +pixel.place(x) + + +@ti.kernel +def sparse_api_demo(): + ti.activate(block1, [0, 1]) + ti.activate(block2, [1, 2]) + + for i, j in x: + print(f"field x[{i}, {j}] = {x[i, j]}") + # outputs: + # field x[2, 4] = 0 + # field x[2, 5] = 0 + # field x[3, 4] = 0 + # field x[3, 5] = 0 + + for i, j in block2: + print(f"Active block2: [{i}, {j}]") + # output: Active block2: [1, 2] + + for i, j in block1: + print(f"Active block1: [{i}, {j}]") + # output: Active block1: [0, 1] + + for j in range(4): + print(f"Activity of block2[2, {j}] = {ti.is_active(block2, [1, j])}") + + ti.deactivate(block2, [1, 2]) + + for i, j in block2: + print(f"Active block2: [{i}, {j}]") + # output: nothing + + for i, j in block1: + print(f"Active block1: [{i}, {j}]") + # output: Active block1: [0, 1] + + print(ti.rescale_index(x, block1, ti.Vector([9, 17]))) + # output = [2, 4] + + # Note: ti.Vector is optional in ti.rescale_index. + print(ti.rescale_index(x, block1, [9, 17])) + # output = [2, 4] + + ti.activate(block2, [1, 2]) + + +sparse_api_demo() + + +@ti.kernel +def check_activity(snode: ti.template(), i: ti.i32, j: ti.i32): + print(ti.is_active(snode, [i, j])) + + +check_activity(block2, 1, 2) # output = 1 +block2.deactivate_all() +check_activity(block2, 1, 2) # output = 0 +check_activity(block1, 0, 1) # output = 1 +ti.deactivate_all_snodes() +check_activity(block1, 0, 1) # output = 0 diff --git a/local_packages/taichi/examples/features/sparse/taichi_bitmasked.py b/local_packages/taichi/examples/features/sparse/taichi_bitmasked.py new file mode 100644 index 0000000..6726c21 --- /dev/null +++ b/local_packages/taichi/examples/features/sparse/taichi_bitmasked.py @@ -0,0 +1,52 @@ +import math + +import taichi as ti + +ti.init(arch=ti.cuda) + +n = 256 +x = ti.field(ti.f32) +# `bitmasked` is a field that supports sparsity, in that each element can be +# activated individually. (It can be viewed as `dense`, with an extra bit for each +# element to mark its activation). Assigning to an element will activate it +# automatically. Use struct-for syntax to loop over the active elements only. +ti.root.bitmasked(ti.ij, (n, n)).place(x) + + +@ti.kernel +def activate(): + # All elements in bitmasked is initially deactivated + # Let's activate elements in the rectangle now! + for i, j in ti.ndrange((100, 125), (100, 125)): + x[i, j] = 233 # assign any value to activate the element at (i, j) + + +@ti.kernel +def paint_active_pixels(color: ti.f32): + # struct-for syntax: loop over active pixels, inactive pixels are skipped + for i, j in x: + x[i, j] = color + + +@ti.kernel +def paint_all_pixels(color: ti.f32): + # range-for syntax: loop over all pixels, no matter active or not + for i, j in ti.ndrange(n, n): + x[i, j] = color + + +def main(): + ti.deactivate_all_snodes() + activate() + + gui = ti.GUI("bitmasked", (n, n)) + for frame in range(10000): + color = math.sin(frame * 0.05) * 0.5 + 0.5 + paint_active_pixels(color) + # paint_all_pixels(color) # try this and compare the difference! + gui.set_image(x) + gui.show() + + +if __name__ == "__main__": + main() diff --git a/local_packages/taichi/examples/features/sparse/taichi_dynamic.py b/local_packages/taichi/examples/features/sparse/taichi_dynamic.py new file mode 100644 index 0000000..7a8d923 --- /dev/null +++ b/local_packages/taichi/examples/features/sparse/taichi_dynamic.py @@ -0,0 +1,31 @@ +import taichi as ti + +ti.init() + +x = ti.field(ti.i32) +l = ti.field(ti.i32) +n = 16 + +ti.root.dense(ti.i, n).dynamic(ti.j, n).place(x) +ti.root.dense(ti.i, n).place(l) + + +@ti.kernel +def make_lists(): + for i in range(n): + for j in range(i): + ti.append(x.parent(), i, j * j) + l[i] = ti.length(x.parent(), i) + + +def main(): + make_lists() + + for i in range(n): + assert l[i] == i + for j in range(n): + assert x[i, j] == (j * j if j < i else 0) + + +if __name__ == "__main__": + main() diff --git a/local_packages/taichi/examples/features/sparse/taichi_sparse.py b/local_packages/taichi/examples/features/sparse/taichi_sparse.py new file mode 100644 index 0000000..57df76d --- /dev/null +++ b/local_packages/taichi/examples/features/sparse/taichi_sparse.py @@ -0,0 +1,60 @@ +from taichi.examples.patterns import taichi_logo + +import taichi as ti + +ti.init(arch=ti.cuda) + +n = 512 +x = ti.field(dtype=ti.i32) +res = n + n // 4 + n // 16 + n // 64 +img = ti.field(dtype=ti.f32, shape=(res, res)) + +block1 = ti.root.pointer(ti.ij, n // 64) +block2 = block1.pointer(ti.ij, 4) +block3 = block2.pointer(ti.ij, 4) +block3.dense(ti.ij, 4).place(x) + + +@ti.kernel +def activate(t: ti.f32): + for i, j in ti.ndrange(n, n): + p = ti.Vector([i, j]) / n + p = ti.math.rotation2d(ti.sin(t)) @ (p - 0.5) + 0.5 + + if taichi_logo(p) == 0: + x[i, j] = 1 + + +@ti.func +def scatter(i): + return i + i // 4 + i // 16 + i // 64 + 2 + + +@ti.kernel +def paint(): + for i, j in ti.ndrange(n, n): + t = x[i, j] + block1_index = ti.rescale_index(x, block1, [i, j]) + block2_index = ti.rescale_index(x, block2, [i, j]) + block3_index = ti.rescale_index(x, block3, [i, j]) + t += ti.is_active(block1, block1_index) + t += ti.is_active(block2, block2_index) + t += ti.is_active(block3, block3_index) + img[scatter(i), scatter(j)] = 1 - t / 4 + + +def main(): + img.fill(0.05) + + gui = ti.GUI("Sparse Grids", (res, res)) + + for i in range(100000): + block1.deactivate_all() + activate(i * 0.05) + paint() + gui.set_image(img) + gui.show() + + +if __name__ == "__main__": + main() diff --git a/local_packages/taichi/examples/features/sparse/tutorial.py b/local_packages/taichi/examples/features/sparse/tutorial.py new file mode 100644 index 0000000..129de6c --- /dev/null +++ b/local_packages/taichi/examples/features/sparse/tutorial.py @@ -0,0 +1,29 @@ +import taichi as ti + +use_bitmask = False + +ti.init() + +x = ti.field(dtype=ti.i32) +block = ti.root.pointer(ti.ij, (4, 4)) +if use_bitmask: + pixel = block.bitmasked(ti.ij, (2, 2)) +else: + pixel = block.dense(ti.ij, (2, 2)) +pixel.place(x) + + +@ti.kernel +def sparse_struct_for(): + x[2, 3] = 2 + x[5, 6] = 3 + + for i, j in x: + print(f"field x[i, j]={x[i, j]}") + + for i, j in block: + print(f"Active block: [{i}, {j}]") + + +print(f"use_bitmask={use_bitmask}") +sparse_struct_for() diff --git a/local_packages/taichi/examples/ggui_examples/fem128_ggui.py b/local_packages/taichi/examples/ggui_examples/fem128_ggui.py new file mode 100644 index 0000000..a82362f --- /dev/null +++ b/local_packages/taichi/examples/ggui_examples/fem128_ggui.py @@ -0,0 +1,179 @@ +import taichi as ti + +arch = ti.vulkan if ti._lib.core.with_vulkan() else ti.cuda +ti.init(arch=arch) + +N = 12 +dt = 5e-5 +dx = 1 / N +rho = 4e1 +NF = 2 * N**2 # number of faces +NV = (N + 1) ** 2 # number of vertices +E, nu = 4e4, 0.2 # Young's modulus and Poisson's ratio +mu, lam = E / 2 / (1 + nu), E * nu / (1 + nu) / (1 - 2 * nu) # Lame parameters +ball_pos, ball_radius = ti.Vector([0.5, 0.0]), 0.31 +damping = 14.5 + +pos = ti.Vector.field(2, float, NV, needs_grad=True) +vel = ti.Vector.field(2, float, NV) +f2v = ti.Vector.field(3, int, NF) # ids of three vertices of each face +B = ti.Matrix.field(2, 2, float, NF) +F = ti.Matrix.field(2, 2, float, NF, needs_grad=True) +V = ti.field(float, NF) +phi = ti.field(float, NF) # potential energy of each face (Neo-Hookean) +U = ti.field(float, (), needs_grad=True) # total potential energy + +gravity = ti.Vector.field(2, float, ()) +attractor_pos = ti.Vector.field(2, float, ()) +attractor_strength = ti.field(float, ()) + + +@ti.kernel +def update_U(): + for i in range(NF): + ia, ib, ic = f2v[i] + a, b, c = pos[ia], pos[ib], pos[ic] + V[i] = abs((a - c).cross(b - c)) + D_i = ti.Matrix.cols([a - c, b - c]) + F[i] = D_i @ B[i] + for i in range(NF): + F_i = F[i] + log_J_i = ti.log(F_i.determinant()) + phi_i = mu / 2 * ((F_i.transpose() @ F_i).trace() - 2) + phi_i -= mu * log_J_i + phi_i += lam / 2 * log_J_i**2 + phi[i] = phi_i + U[None] += V[i] * phi_i + + +@ti.kernel +def advance(): + for i in range(NV): + acc = -pos.grad[i] / (rho * dx**2) + g = gravity[None] * 0.8 + attractor_strength[None] * (attractor_pos[None] - pos[i]).normalized(1e-5) + vel[i] += dt * (acc + g * 40) + vel[i] *= ti.exp(-dt * damping) + for i in range(NV): + # ball boundary condition: + disp = pos[i] - ball_pos + disp2 = disp.norm_sqr() + if disp2 <= ball_radius**2: + NoV = vel[i].dot(disp) + if NoV < 0: + vel[i] -= NoV * disp / disp2 + cond = (pos[i] < 0) & (vel[i] < 0) | (pos[i] > 1) & (vel[i] > 0) + # rect boundary condition: + for j in ti.static(range(pos.n)): + if cond[j]: + vel[i][j] = 0 + pos[i] += dt * vel[i] + + +@ti.kernel +def init_pos(): + for i, j in ti.ndrange(N + 1, N + 1): + k = i * (N + 1) + j + pos[k] = ti.Vector([i, j]) / N * 0.25 + ti.Vector([0.45, 0.45]) + vel[k] = ti.Vector([0, 0]) + for i in range(NF): + ia, ib, ic = f2v[i] + a, b, c = pos[ia], pos[ib], pos[ic] + B_i_inv = ti.Matrix.cols([a - c, b - c]) + B[i] = B_i_inv.inverse() + + +@ti.kernel +def init_mesh(): + for i, j in ti.ndrange(N, N): + k = (i * N + j) * 2 + a = i * (N + 1) + j + b = a + 1 + c = a + N + 2 + d = a + N + 1 + f2v[k + 0] = [a, b, c] + f2v[k + 1] = [c, d, a] + + +window = ti.ui.Window("FEM128", (512, 512)) +canvas = window.get_canvas() + +# rendering related fields +vertexColors = ti.Vector.field(3, float, NV) +vertexPositions = ti.Vector.field(2, float, NV) +triangleIndices = ti.field(int, NF * 3) +mouse_circle = ti.Vector.field(2, dtype=float, shape=(1,)) +ball_circle = ti.Vector.field(2, dtype=float, shape=(1,)) + + +@ti.kernel +def paint_triangles(): + for i in range(NF): + k = phi[i] * (10 / E) + gb = (1 - k) * 0.5 + color = ti.Vector([k + gb, gb, gb]) + + ia, ib, ic = f2v[i] + vertexColors[ia] = color + vertexColors[ib] = color + vertexColors[ic] = color + vertexPositions[ia] = pos[ia] + vertexPositions[ib] = pos[ib] + vertexPositions[ic] = pos[ic] + triangleIndices[i * 3 + 0] = ia + triangleIndices[i * 3 + 1] = ib + triangleIndices[i * 3 + 2] = ic + + +def paint_mouse_ball(): + mouse = window.get_cursor_pos() + mouse_circle[0] = ti.Vector([mouse[0], mouse[1]]) + ball_circle[0] = ball_pos + + +def render(): + paint_triangles() + paint_mouse_ball() + + canvas.circles(mouse_circle, color=(0.2, 0.4, 0.6), radius=0.02) + canvas.circles(ball_circle, color=(0.4, 0.4, 0.4), radius=ball_radius) + + canvas.triangles(vertexPositions, indices=triangleIndices, per_vertex_color=vertexColors) + canvas.circles(vertexPositions, radius=0.003, color=(1, 0.6, 0.2)) + + +def main(): + init_mesh() + init_pos() + gravity[None] = [0, -1] + + print( + "[Hint] Use WSAD/arrow keys to control gravity. Use left/right mouse buttons to attract/repel. Press R to reset." + ) + while window.running: + for e in window.get_events(ti.ui.PRESS): + if e.key == ti.ui.ESCAPE: + window.running = False + elif e.key == "r": + init_pos() + elif e.key in ("a", ti.ui.LEFT): + gravity[None] = [-1, 0] + elif e.key in ("d", ti.ui.RIGHT): + gravity[None] = [+1, 0] + elif e.key in ("s", ti.ui.DOWN): + gravity[None] = [0, -1] + elif e.key in ("w", ti.ui.UP): + gravity[None] = [0, +1] + + mouse_pos = window.get_cursor_pos() + attractor_pos[None] = mouse_pos + attractor_strength[None] = window.is_pressed(ti.ui.LMB) - window.is_pressed(ti.ui.RMB) + for i in range(50): + with ti.ad.Tape(loss=U): + update_U() + advance() + render() + window.show() + + +if __name__ == "__main__": + main() diff --git a/local_packages/taichi/examples/ggui_examples/fractal3d_ggui.py b/local_packages/taichi/examples/ggui_examples/fractal3d_ggui.py new file mode 100644 index 0000000..b7f345e --- /dev/null +++ b/local_packages/taichi/examples/ggui_examples/fractal3d_ggui.py @@ -0,0 +1,165 @@ +import taichi as ti +import taichi.math as tm + +arch = ti.vulkan if ti._lib.core.with_vulkan() else ti.cuda +ti.init(arch=arch) + +vec3 = tm.vec3 +vec4 = tm.vec4 + + +@ti.func +def quat_mul(v1, v2): + return vec4( + v1.x * v2.x - tm.dot(v1.yzw, v2.yzw), + v1.x * v2.yzw + v2.x * v1.yzw + tm.cross(v1.yzw, v2.yzw), + ) + + +@ti.func +def quat_conj(q): + return vec4(q.x, -q.yzw) + + +iters = 10 +max_norm = 4 + + +@ti.func +def compute_sdf(z, c): + md2 = 1.0 + mz2 = tm.dot(z, z) + + for _ in range(iters): + md2 *= max_norm * mz2 + z = quat_mul(z, z) + c + + mz2 = z.dot(z) + if mz2 > max_norm: + break + + return 0.25 * ti.sqrt(mz2 / md2) * ti.log(mz2) + + +@ti.func +def compute_normal(z, c): + J0 = vec4(1, 0, 0, 0) + J1 = vec4(0, 1, 0, 0) + J2 = vec4(0, 0, 1, 0) + + z_curr = z + + iterations = 0 + while z_curr.norm() < max_norm and iterations < iters: + cz = quat_conj(z_curr) + + J0 = vec4( + tm.dot(J0, cz), + tm.dot(J0.xy, z_curr.yx), + tm.dot(J0.xz, z_curr.zx), + tm.dot(J0.xw, z_curr.wx), + ) + J1 = vec4( + tm.dot(J1, cz), + tm.dot(J1.xy, z_curr.yx), + tm.dot(J1.xz, z_curr.zx), + tm.dot(J1.xw, z_curr.wx), + ) + J2 = vec4( + tm.dot(J2, cz), + tm.dot(J2.xy, z_curr.yx), + tm.dot(J2.xz, z_curr.zx), + tm.dot(J2.xw, z_curr.wx), + ) + + z_curr = quat_mul(z_curr, z_curr) + c + iterations += 1 + + return tm.normalize(tm.vec3(tm.dot(z_curr, J0), tm.dot(z_curr, J1), tm.dot(z_curr, J2))) + + +image_res = (1280, 720) + + +@ti.data_oriented +class Julia: + def __init__(self): + self.image = ti.Vector.field(3, float, image_res) + + @ti.func + def shade(self, pos, surface_color, normal, light_pos): + _ = self # make pylint happy + light_color = vec3(1) + + light_dir = tm.normalize(light_pos - pos) + return light_color * surface_color * ti.max(0, tm.dot(light_dir, normal)) + + @ti.kernel + def march(self, time_arg: float): + time = time_arg * 0.15 + c = 0.45 * ti.cos(vec4(0.5, 3.9, 1.4, 1.1) + time * vec4(1.2, 1.7, 1.3, 2.5)) - vec4(0.3, 0, 0, 0) + + r = 1.8 + o3 = ( + tm.normalize( + vec3( + r * ti.cos(0.3 + 0.37 * time), + 0.3 + 0.8 * r * ti.cos(1.0 + 0.33 * time), + r * ti.cos(2.2 + 0.31 * time), + ) + ) + * r + ) + ta = vec3(0) + cr = 0.1 * ti.cos(0.1 * time) + + for x, y in self.image: + p = (-tm.vec2(image_res) + 2.0 * tm.vec2(x, y)) / (image_res[1] * 0.75) + + cw = tm.normalize(ta - o3) + cp = vec3(ti.sin(cr), ti.cos(cr), 0) + cu = tm.normalize(cw.cross(cp)) + cv = tm.normalize(cu.cross(cw)) + + d3 = tm.normalize(p.x * cu + p.y * cv + 2.0 * cw) + + o = vec4(o3, 0) + d = vec4(d3, 0) + + max_t = 10 + + t = 0.0 + for step in range(300): + h = compute_sdf(o + t * d, c) + t += h + if h < 0.0001 or t >= max_t: + break + if t < max_t: + normal = compute_normal(o + t * d, c) + color = abs((o + t * d).xyz) / 1.3 + pos = (o + t * d).xyz + self.image[x, y] = self.shade(pos, color, normal, o3) + else: + self.image[x, y] = (0, 0, 0) + + def get_image(self, time): + self.march(time) + return self.image + + +def main(): + julia = Julia() + + window = ti.ui.Window("Fractal 3D", image_res, vsync=True) + canvas = window.get_canvas() + + frame_id = 0 + + while window.running: + frame_id += 1 + canvas.set_image(julia.get_image(frame_id / 60)) + window.show() + + +if __name__ == "__main__": + main() diff --git a/local_packages/taichi/examples/ggui_examples/mass_spring_3d_ggui.py b/local_packages/taichi/examples/ggui_examples/mass_spring_3d_ggui.py new file mode 100644 index 0000000..d398dc0 --- /dev/null +++ b/local_packages/taichi/examples/ggui_examples/mass_spring_3d_ggui.py @@ -0,0 +1,162 @@ +import taichi as ti + +arch = ti.vulkan if ti._lib.core.with_vulkan() else ti.cuda +ti.init(arch=arch) + +n = 128 +quad_size = 1.0 / n +dt = 4e-2 / n +substeps = int(1 / 60 // dt) + +gravity = ti.Vector([0, -9.8, 0]) +spring_Y = 3e4 +dashpot_damping = 1e4 +drag_damping = 1 + +ball_radius = 0.3 +ball_center = ti.Vector.field(3, dtype=float, shape=(1,)) +ball_center[0] = [0, 0, 0] + +x = ti.Vector.field(3, dtype=float, shape=(n, n)) +v = ti.Vector.field(3, dtype=float, shape=(n, n)) + +num_triangles = (n - 1) * (n - 1) * 2 +indices = ti.field(int, shape=num_triangles * 3) +vertices = ti.Vector.field(3, dtype=float, shape=n * n) +colors = ti.Vector.field(3, dtype=float, shape=n * n) + +bending_springs = False + + +@ti.kernel +def initialize_mass_points(): + random_offset = ti.Vector([ti.random() - 0.5, ti.random() - 0.5]) * 0.1 + + for i, j in x: + x[i, j] = [ + i * quad_size - 0.5 + random_offset[0], + 0.6, + j * quad_size - 0.5 + random_offset[1], + ] + v[i, j] = [0, 0, 0] + + +@ti.kernel +def initialize_mesh_indices(): + for i, j in ti.ndrange(n - 1, n - 1): + quad_id = (i * (n - 1)) + j + # 1st triangle of the square + indices[quad_id * 6 + 0] = i * n + j + indices[quad_id * 6 + 1] = (i + 1) * n + j + indices[quad_id * 6 + 2] = i * n + (j + 1) + # 2nd triangle of the square + indices[quad_id * 6 + 3] = (i + 1) * n + j + 1 + indices[quad_id * 6 + 4] = i * n + (j + 1) + indices[quad_id * 6 + 5] = (i + 1) * n + j + + for i, j in ti.ndrange(n, n): + if (i // 4 + j // 4) % 2 == 0: + colors[i * n + j] = (0.22, 0.72, 0.52) + else: + colors[i * n + j] = (1, 0.334, 0.52) + + +initialize_mesh_indices() + +spring_offsets = [] + + +def initialize_spring_offsets(): + if bending_springs: + for i in range(-1, 2): + for j in range(-1, 2): + if (i, j) != (0, 0): + spring_offsets.append(ti.Vector([i, j])) + + else: + for i in range(-2, 3): + for j in range(-2, 3): + if (i, j) != (0, 0) and abs(i) + abs(j) <= 2: + spring_offsets.append(ti.Vector([i, j])) + + +initialize_spring_offsets() + + +@ti.kernel +def substep(): + for i in ti.grouped(x): + v[i] += gravity * dt + + for i in ti.grouped(x): + force = ti.Vector([0.0, 0.0, 0.0]) + for spring_offset in ti.static(spring_offsets): + j = i + spring_offset + if 0 <= j[0] < n and 0 <= j[1] < n: + x_ij = x[i] - x[j] + v_ij = v[i] - v[j] + d = x_ij.normalized() + current_dist = x_ij.norm() + original_dist = quad_size * float(i - j).norm() # pylint: disable=no-member + # Spring force + force += -spring_Y * d * (current_dist / original_dist - 1) + # Dashpot damping + force += -v_ij.dot(d) * d * dashpot_damping * quad_size + + v[i] += force * dt + + for i in ti.grouped(x): + v[i] *= ti.exp(-drag_damping * dt) + offset_to_center = x[i] - ball_center[0] + if offset_to_center.norm() <= ball_radius: + # Velocity projection + normal = offset_to_center.normalized() + v[i] -= ti.min(v[i].dot(normal), 0) * normal + x[i] += dt * v[i] + + +@ti.kernel +def update_vertices(): + for i, j in ti.ndrange(n, n): + vertices[i * n + j] = x[i, j] + + +def main(): + window = ti.ui.Window("Taichi Cloth Simulation on GGUI", (768, 768), vsync=True) + canvas = window.get_canvas() + canvas.set_background_color((1, 1, 1)) + scene = window.get_scene() + camera = ti.ui.Camera() + + current_t = 0.0 + initialize_mass_points() + + while window.running: + if current_t > 1.5: + # Reset + initialize_mass_points() + current_t = 0 + + for i in range(substeps): + substep() + current_t += dt + update_vertices() + + camera.position(0.0, 0.0, 3) + camera.lookat(0.0, 0.0, 0) + scene.set_camera(camera) + + scene.point_light(pos=(0, 1, 2), color=(1, 1, 1)) + scene.ambient_light((0.5, 0.5, 0.5)) + scene.mesh(vertices, indices=indices, per_vertex_color=colors, two_sided=True) + + # Draw a smaller ball to avoid visual penetration + scene.particles(ball_center, radius=ball_radius * 0.95, color=(0.5, 0.42, 0.8)) + canvas.scene(scene) + window.show() + + # TODO: include self-collision handling + + +if __name__ == "__main__": + main() diff --git a/local_packages/taichi/examples/ggui_examples/mass_spring_game_ggui.py b/local_packages/taichi/examples/ggui_examples/mass_spring_game_ggui.py new file mode 100644 index 0000000..23515d4 --- /dev/null +++ b/local_packages/taichi/examples/ggui_examples/mass_spring_game_ggui.py @@ -0,0 +1,169 @@ +import taichi as ti + +# arch = ti.vulkan if ti._lib.core.with_vulkan() else ti.cuda +ti.init(arch=ti.metal) + +spring_Y = ti.field(dtype=ti.f32, shape=()) # Young's modulus +paused = ti.field(dtype=ti.i32, shape=()) +drag_damping = ti.field(dtype=ti.f32, shape=()) +dashpot_damping = ti.field(dtype=ti.f32, shape=()) + +max_num_particles = 1024 +particle_mass = 1.0 +dt = 1e-3 +substeps = 10 + +num_particles = ti.field(dtype=ti.i32, shape=()) +x = ti.Vector.field(2, dtype=ti.f32, shape=max_num_particles) +v = ti.Vector.field(2, dtype=ti.f32, shape=max_num_particles) +f = ti.Vector.field(2, dtype=ti.f32, shape=max_num_particles) +fixed = ti.field(dtype=ti.i32, shape=max_num_particles) + +indices = ti.field(dtype=ti.i32, shape=max_num_particles * max_num_particles * 2) +per_vertex_color = ti.Vector.field(3, ti.f32, shape=max_num_particles) + +# rest_length[i, j] == 0 means i and j are NOT connected +rest_length = ti.field(dtype=ti.f32, shape=(max_num_particles, max_num_particles)) + + +@ti.kernel +def substep(): + n = num_particles[None] + + # Compute force + for i in range(n): + # Gravity + f[i] = ti.Vector([0, -9.8]) * particle_mass + for j in range(n): + if rest_length[i, j] != 0: + x_ij = x[i] - x[j] + d = x_ij.normalized() + + # Spring force + f[i] += -spring_Y[None] * (x_ij.norm() / rest_length[i, j] - 1) * d + + # Dashpot damping + v_rel = (v[i] - v[j]).dot(d) + f[i] += -dashpot_damping[None] * v_rel * d + + # We use a semi-implicit Euler (aka symplectic Euler) time integrator + for i in range(n): + if not fixed[i]: + v[i] += dt * f[i] / particle_mass + v[i] *= ti.exp(-dt * drag_damping[None]) # Drag damping + + x[i] += v[i] * dt + else: + v[i] = ti.Vector([0, 0]) + + # Collide with four walls + for d in ti.static(range(2)): + # d = 0: treating X (horizontal) component + # d = 1: treating Y (vertical) component + + if x[i][d] < 0: # Bottom and left + x[i][d] = 0 # move particle inside + v[i][d] = 0 # stop it from moving further + + if x[i][d] > 1: # Top and right + x[i][d] = 1 # move particle inside + v[i][d] = 0 # stop it from moving further + + +@ti.kernel +def new_particle(pos_x: ti.f32, pos_y: ti.f32, fixed_: ti.i32): + # Taichi doesn't support using vectors as kernel arguments yet, so we pass scalars + new_particle_id = num_particles[None] + x[new_particle_id] = [pos_x, pos_y] + v[new_particle_id] = [0, 0] + fixed[new_particle_id] = fixed_ + num_particles[None] += 1 + + # Connect with existing particles + for i in range(new_particle_id): + dist = (x[new_particle_id] - x[i]).norm() + connection_radius = 0.15 + if dist < connection_radius: + # Connect the new particle with particle i + rest_length[i, new_particle_id] = 0.1 + rest_length[new_particle_id, i] = 0.1 + + +@ti.kernel +def attract(pos_x: ti.f32, pos_y: ti.f32): + for i in range(num_particles[None]): + p = ti.Vector([pos_x, pos_y]) + v[i] += -dt * substeps * (x[i] - p) * 100 + + +@ti.kernel +def render(): + for i in indices: + indices[i] = max_num_particles - 1 + n = num_particles[None] + for i in range(n + 1, max_num_particles): + x[i] = ti.Vector([-1, -1]) # hide them + for i in range(n): + if fixed[i]: + per_vertex_color[i] = ti.Vector([1, 0, 0]) + else: + per_vertex_color[i] = ti.Vector([0, 0, 0]) + for i in range(n): + for j in range(i + 1, n): + line_id = i * max_num_particles + j + if rest_length[i, j] != 0: + indices[line_id * 2] = i + indices[line_id * 2 + 1] = j + + +def main(): + window = ti.ui.Window("Explicit Mass Spring System", (768, 768), vsync=True) + canvas = window.get_canvas() + gui = window.get_gui() + + spring_Y[None] = 1000 + drag_damping[None] = 1 + dashpot_damping[None] = 100 + + new_particle(0.3, 0.3, False) + new_particle(0.3, 0.4, False) + new_particle(0.4, 0.4, False) + + while window.running: + for e in window.get_events(ti.ui.PRESS): + if e.key in [ti.ui.ESCAPE]: + exit() + elif e.key == ti.ui.SPACE: + paused[None] = not paused[None] + elif e.key == ti.ui.LMB: + pos = window.get_cursor_pos() + new_particle(pos[0], pos[1], int(window.is_pressed(ti.ui.SHIFT))) + elif e.key == "c": + num_particles[None] = 0 + rest_length.fill(0) + + if window.is_pressed(ti.ui.RMB): + cursor_pos = window.get_cursor_pos() + attract(cursor_pos[0], cursor_pos[1]) + + if not paused[None]: + for step in range(substeps): + substep() + + render() + canvas.set_background_color((1, 1, 1)) + canvas.lines(x, indices=indices, color=(0, 0, 0), width=0.01) + canvas.circles(x, per_vertex_color=per_vertex_color, radius=0.02) + + with gui.sub_window("mass spring", 0.05, 0.05, 0.9, 0.2) as w: + w.text("Left click: add mass point (with shift to fix); Right click: attract") + w.text("C: clear all; Space: pause") + spring_Y[None] = w.slider_float("Spring Young's modulus", spring_Y[None], 100, 10000) + drag_damping[None] = w.slider_float("Drag damping", drag_damping[None], 0.0, 10) + dashpot_damping[None] = w.slider_float("Dashpot damping", dashpot_damping[None], 10, 1000) + + window.show() + + +if __name__ == "__main__": + main() diff --git a/local_packages/taichi/examples/ggui_examples/mpm128_ggui.py b/local_packages/taichi/examples/ggui_examples/mpm128_ggui.py new file mode 100644 index 0000000..34a6646 --- /dev/null +++ b/local_packages/taichi/examples/ggui_examples/mpm128_ggui.py @@ -0,0 +1,183 @@ +import taichi as ti + +arch = ti.vulkan if ti._lib.core.with_vulkan() else ti.cuda +ti.init(arch=arch) + +quality = 1 # Use a larger value for higher-res simulations +n_particles, n_grid = 9000 * quality**2, 128 * quality +dx, inv_dx = 1 / n_grid, float(n_grid) +dt = 1e-4 / quality +p_vol, p_rho = (dx * 0.5) ** 2, 1 +p_mass = p_vol * p_rho +E, nu = 5e3, 0.2 # Young's modulus and Poisson's ratio +mu_0, lambda_0 = E / (2 * (1 + nu)), E * nu / ((1 + nu) * (1 - 2 * nu)) # Lame parameters + +x = ti.Vector.field(2, dtype=float, shape=n_particles) # position +v = ti.Vector.field(2, dtype=float, shape=n_particles) # velocity +C = ti.Matrix.field(2, 2, dtype=float, shape=n_particles) # affine velocity field +F = ti.Matrix.field(2, 2, dtype=float, shape=n_particles) # deformation gradient +material = ti.field(dtype=int, shape=n_particles) # material id +Jp = ti.field(dtype=float, shape=n_particles) # plastic deformation +grid_v = ti.Vector.field(2, dtype=float, shape=(n_grid, n_grid)) # grid node momentum/velocity +grid_m = ti.field(dtype=float, shape=(n_grid, n_grid)) # grid node mass +gravity = ti.Vector.field(2, dtype=float, shape=()) +attractor_strength = ti.field(dtype=float, shape=()) +attractor_pos = ti.Vector.field(2, dtype=float, shape=()) + +group_size = n_particles // 3 +water = ti.Vector.field(2, dtype=float, shape=group_size) # position +jelly = ti.Vector.field(2, dtype=float, shape=group_size) # position +snow = ti.Vector.field(2, dtype=float, shape=group_size) # position +mouse_circle = ti.Vector.field(2, dtype=float, shape=(1,)) + + +@ti.kernel +def substep(): + for i, j in grid_m: + grid_v[i, j] = [0, 0] + grid_m[i, j] = 0 + for p in x: # Particle state update and scatter to grid (P2G) + base = (x[p] * inv_dx - 0.5).cast(int) + fx = x[p] * inv_dx - base.cast(float) + # Quadratic kernels [http://mpm.graphics Eqn. 123, with x=fx, fx-1,fx-2] + w = [0.5 * (1.5 - fx) ** 2, 0.75 - (fx - 1) ** 2, 0.5 * (fx - 0.5) ** 2] + # deformation gradient update + F[p] = (ti.Matrix.identity(float, 2) + dt * C[p]) @ F[p] + # Hardening coefficient: snow gets harder when compressed + h = ti.max(0.1, ti.min(5, ti.exp(10 * (1.0 - Jp[p])))) + if material[p] == 1: # jelly, make it softer + h = 0.3 + mu, la = mu_0 * h, lambda_0 * h + if material[p] == 0: # liquid + mu = 0.0 + U, sig, V = ti.svd(F[p]) + J = 1.0 + for d in ti.static(range(2)): + new_sig = sig[d, d] + if material[p] == 2: # Snow + new_sig = min(max(sig[d, d], 1 - 2.5e-2), 1 + 4.5e-3) # Plasticity + Jp[p] *= sig[d, d] / new_sig + sig[d, d] = new_sig + J *= new_sig + if material[p] == 0: + # Reset deformation gradient to avoid numerical instability + F[p] = ti.Matrix.identity(float, 2) * ti.sqrt(J) + elif material[p] == 2: + # Reconstruct elastic deformation gradient after plasticity + F[p] = U @ sig @ V.transpose() + stress = 2 * mu * (F[p] - U @ V.transpose()) @ F[p].transpose() + ti.Matrix.identity(float, 2) * la * J * ( + J - 1 + ) + stress = (-dt * p_vol * 4 * inv_dx * inv_dx) * stress + affine = stress + p_mass * C[p] + # Loop over 3x3 grid node neighborhood + for i, j in ti.static(ti.ndrange(3, 3)): + offset = ti.Vector([i, j]) + dpos = (offset.cast(float) - fx) * dx + weight = w[i][0] * w[j][1] + grid_v[base + offset] += weight * (p_mass * v[p] + affine @ dpos) + grid_m[base + offset] += weight * p_mass + for i, j in grid_m: + if grid_m[i, j] > 0: # No need for epsilon here + grid_v[i, j] = (1 / grid_m[i, j]) * grid_v[i, j] # Momentum to velocity + grid_v[i, j] += dt * gravity[None] * 30 # gravity + dist = attractor_pos[None] - dx * ti.Vector([i, j]) + grid_v[i, j] += dist / (0.01 + dist.norm()) * attractor_strength[None] * dt * 100 + if i < 3 and grid_v[i, j][0] < 0: + grid_v[i, j][0] = 0 # Boundary conditions + if i > n_grid - 3 and grid_v[i, j][0] > 0: + grid_v[i, j][0] = 0 + if j < 3 and grid_v[i, j][1] < 0: + grid_v[i, j][1] = 0 + if j > n_grid - 3 and grid_v[i, j][1] > 0: + grid_v[i, j][1] = 0 + for p in x: # grid to particle (G2P) + base = (x[p] * inv_dx - 0.5).cast(int) + fx = x[p] * inv_dx - base.cast(float) + w = [0.5 * (1.5 - fx) ** 2, 0.75 - (fx - 1.0) ** 2, 0.5 * (fx - 0.5) ** 2] + new_v = ti.Vector.zero(float, 2) + new_C = ti.Matrix.zero(float, 2, 2) + # loop over 3x3 grid node neighborhood + for i, j in ti.static(ti.ndrange(3, 3)): + dpos = ti.Vector([i, j]).cast(float) - fx + g_v = grid_v[base + ti.Vector([i, j])] + weight = w[i][0] * w[j][1] + new_v += weight * g_v + new_C += 4 * inv_dx * weight * g_v.outer_product(dpos) + v[p], C[p] = new_v, new_C + x[p] += dt * v[p] # advection + + +@ti.kernel +def reset(): + for i in range(n_particles): + x[i] = [ + ti.random() * 0.2 + 0.3 + 0.10 * (i // group_size), + ti.random() * 0.2 + 0.05 + 0.32 * (i // group_size), + ] + material[i] = i // group_size # 0: fluid 1: jelly 2: snow + v[i] = [0, 0] + F[i] = ti.Matrix([[1, 0], [0, 1]]) + Jp[i] = 1 + C[i] = ti.Matrix.zero(float, 2, 2) + + +@ti.kernel +def render(): + for i in range(group_size): + water[i] = x[i] + jelly[i] = x[i + group_size] + snow[i] = x[i + 2 * group_size] + + +def main(): + print( + "[Hint] Use WSAD/arrow keys to control gravity. Use left/right mouse buttons to attract/repel. Press R to reset." + ) + + res = (512, 512) + window = ti.ui.Window("Taichi MLS-MPM-128", res=res, vsync=True) + canvas = window.get_canvas() + radius = 0.003 + + reset() + gravity[None] = [0, -1] + + while window.running: + if window.get_event(ti.ui.PRESS): + if window.event.key == "r": + reset() + elif window.event.key in [ti.ui.ESCAPE]: + break + if window.event is not None: + gravity[None] = [0, 0] # if had any event + if window.is_pressed(ti.ui.LEFT, "a"): + gravity[None][0] = -1 + if window.is_pressed(ti.ui.RIGHT, "d"): + gravity[None][0] = 1 + if window.is_pressed(ti.ui.UP, "w"): + gravity[None][1] = 1 + if window.is_pressed(ti.ui.DOWN, "s"): + gravity[None][1] = -1 + mouse = window.get_cursor_pos() + mouse_circle[0] = ti.Vector([mouse[0], mouse[1]]) + canvas.circles(mouse_circle, color=(0.2, 0.4, 0.6), radius=0.05) + attractor_pos[None] = [mouse[0], mouse[1]] + attractor_strength[None] = 0 + if window.is_pressed(ti.ui.LMB): + attractor_strength[None] = 1 + if window.is_pressed(ti.ui.RMB): + attractor_strength[None] = -1 + + for s in range(int(2e-3 // dt)): + substep() + render() + canvas.set_background_color((0.067, 0.184, 0.255)) + canvas.circles(water, radius=radius, color=(0, 0.5, 0.5)) + canvas.circles(jelly, radius=radius, color=(0.93, 0.33, 0.23)) + canvas.circles(snow, radius=radius, color=(1, 1, 1)) + window.show() + + +if __name__ == "__main__": + main() diff --git a/local_packages/taichi/examples/ggui_examples/mpm3d_ggui.py b/local_packages/taichi/examples/ggui_examples/mpm3d_ggui.py new file mode 100644 index 0000000..f850181 --- /dev/null +++ b/local_packages/taichi/examples/ggui_examples/mpm3d_ggui.py @@ -0,0 +1,322 @@ +import numpy as np + +import taichi as ti + +ti.init(arch=ti.gpu) + +# dim, n_grid, steps, dt = 2, 128, 20, 2e-4 +# dim, n_grid, steps, dt = 2, 256, 32, 1e-4 +# dim, n_grid, steps, dt = 3, 32, 25, 4e-4 +dim, n_grid, steps, dt = 3, 64, 25, 2e-4 +# dim, n_grid, steps, dt = 3, 128, 5, 1e-4 + +n_particles = n_grid**dim // 2 ** (dim - 1) + +print(n_particles) + +dx = 1 / n_grid + +p_rho = 1 +p_vol = (dx * 0.5) ** 2 +p_mass = p_vol * p_rho +GRAVITY = [0, -9.8, 0] +bound = 3 +E = 1000 # Young's modulus +nu = 0.2 # Poisson's ratio +mu_0, lambda_0 = E / (2 * (1 + nu)), E * nu / ((1 + nu) * (1 - 2 * nu)) # Lame parameters + +F_x = ti.Vector.field(dim, float, n_particles) +F_v = ti.Vector.field(dim, float, n_particles) +F_C = ti.Matrix.field(dim, dim, float, n_particles) +F_dg = ti.Matrix.field(3, 3, dtype=float, shape=n_particles) # deformation gradient +F_Jp = ti.field(float, n_particles) + +F_colors = ti.Vector.field(4, float, n_particles) +F_colors_random = ti.Vector.field(4, float, n_particles) +F_materials = ti.field(int, n_particles) +F_grid_v = ti.Vector.field(dim, float, (n_grid,) * dim) +F_grid_m = ti.field(float, (n_grid,) * dim) +F_used = ti.field(int, n_particles) + +neighbour = (3,) * dim + +WATER = 0 +JELLY = 1 +SNOW = 2 + + +@ti.kernel +def substep(g_x: float, g_y: float, g_z: float): + for I in ti.grouped(F_grid_m): + F_grid_v[I] = ti.zero(F_grid_v[I]) + F_grid_m[I] = 0 + ti.loop_config(block_dim=n_grid) + for p in F_x: + if F_used[p] == 0: + continue + Xp = F_x[p] / dx + base = int(Xp - 0.5) + fx = Xp - base + w = [0.5 * (1.5 - fx) ** 2, 0.75 - (fx - 1) ** 2, 0.5 * (fx - 0.5) ** 2] + + F_dg[p] = (ti.Matrix.identity(float, 3) + dt * F_C[p]) @ F_dg[p] # deformation gradient update + # Hardening coefficient: snow gets harder when compressed + h = ti.exp(10 * (1.0 - F_Jp[p])) + if F_materials[p] == JELLY: # jelly, make it softer + h = 0.3 + mu, la = mu_0 * h, lambda_0 * h + if F_materials[p] == WATER: # liquid + mu = 0.0 + + U, sig, V = ti.svd(F_dg[p]) + J = 1.0 + for d in ti.static(range(3)): + new_sig = sig[d, d] + if F_materials[p] == SNOW: # Snow + new_sig = ti.min(ti.max(sig[d, d], 1 - 2.5e-2), 1 + 4.5e-3) # Plasticity + F_Jp[p] *= sig[d, d] / new_sig + sig[d, d] = new_sig + J *= new_sig + if F_materials[p] == WATER: + # Reset deformation gradient to avoid numerical instability + new_F = ti.Matrix.identity(float, 3) + new_F[0, 0] = J + F_dg[p] = new_F + elif F_materials[p] == SNOW: + # Reconstruct elastic deformation gradient after plasticity + F_dg[p] = U @ sig @ V.transpose() + stress = 2 * mu * (F_dg[p] - U @ V.transpose()) @ F_dg[p].transpose() + ti.Matrix.identity( + float, 3 + ) * la * J * (J - 1) + stress = (-dt * p_vol * 4) * stress / dx**2 + affine = stress + p_mass * F_C[p] + + for offset in ti.static(ti.grouped(ti.ndrange(*neighbour))): + dpos = (offset - fx) * dx + weight = 1.0 + for i in ti.static(range(dim)): + weight *= w[offset[i]][i] + F_grid_v[base + offset] += weight * (p_mass * F_v[p] + affine @ dpos) + F_grid_m[base + offset] += weight * p_mass + for I in ti.grouped(F_grid_m): + if F_grid_m[I] > 0: + F_grid_v[I] /= F_grid_m[I] + F_grid_v[I] += dt * ti.Vector([g_x, g_y, g_z]) + cond = (I < bound) & (F_grid_v[I] < 0) | (I > n_grid - bound) & (F_grid_v[I] > 0) + F_grid_v[I] = ti.select(cond, 0, F_grid_v[I]) + ti.loop_config(block_dim=n_grid) + for p in F_x: + if F_used[p] == 0: + continue + Xp = F_x[p] / dx + base = int(Xp - 0.5) + fx = Xp - base + w = [0.5 * (1.5 - fx) ** 2, 0.75 - (fx - 1) ** 2, 0.5 * (fx - 0.5) ** 2] + new_v = ti.zero(F_v[p]) + new_C = ti.zero(F_C[p]) + for offset in ti.static(ti.grouped(ti.ndrange(*neighbour))): + dpos = (offset - fx) * dx + weight = 1.0 + for i in ti.static(range(dim)): + weight *= w[offset[i]][i] + g_v = F_grid_v[base + offset] + new_v += weight * g_v + new_C += 4 * weight * g_v.outer_product(dpos) / dx**2 + F_v[p] = new_v + F_x[p] += dt * F_v[p] + F_C[p] = new_C + + +class CubeVolume: + def __init__(self, minimum, size, material): + self.minimum = minimum + self.size = size + self.volume = self.size.x * self.size.y * self.size.z + self.material = material + + +@ti.kernel +def init_cube_vol( + first_par: int, + last_par: int, + x_begin: float, + y_begin: float, + z_begin: float, + x_size: float, + y_size: float, + z_size: float, + material: int, +): + for i in range(first_par, last_par): + F_x[i] = ti.Vector([ti.random() for i in range(dim)]) * ti.Vector([x_size, y_size, z_size]) + ti.Vector( + [x_begin, y_begin, z_begin] + ) + F_Jp[i] = 1 + F_dg[i] = ti.Matrix([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) + F_v[i] = ti.Vector([0.0, 0.0, 0.0]) + F_materials[i] = material + F_colors_random[i] = ti.Vector([ti.random(), ti.random(), ti.random(), ti.random()]) + F_used[i] = 1 + + +@ti.kernel +def set_all_unused(): + for p in F_used: + F_used[p] = 0 + # basically throw them away so they aren't rendered + F_x[p] = ti.Vector([533799.0, 533799.0, 533799.0]) + F_Jp[p] = 1 + F_dg[p] = ti.Matrix([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) + F_C[p] = ti.Matrix([[0, 0, 0], [0, 0, 0], [0, 0, 0]]) + F_v[p] = ti.Vector([0.0, 0.0, 0.0]) + + +def init_vols(vols): + set_all_unused() + total_vol = 0 + for v in vols: + total_vol += v.volume + + next_p = 0 + for i, v in enumerate(vols): + v = vols[i] + if isinstance(v, CubeVolume): + par_count = int(v.volume / total_vol * n_particles) + if i == len(vols) - 1: # this is the last volume, so use all remaining particles + par_count = n_particles - next_p + init_cube_vol(next_p, next_p + par_count, *v.minimum, *v.size, v.material) + next_p += par_count + else: + raise Exception("???") + + +@ti.kernel +def set_color_by_material(mat_color: ti.types.ndarray()): + for i in range(n_particles): + mat = F_materials[i] + F_colors[i] = ti.Vector([mat_color[mat, 0], mat_color[mat, 1], mat_color[mat, 2], 1.0]) + + +print("Loading presets...this might take a minute") + +presets = [ + [ + CubeVolume(ti.Vector([0.55, 0.05, 0.55]), ti.Vector([0.4, 0.4, 0.4]), WATER), + ], + [ + CubeVolume(ti.Vector([0.05, 0.05, 0.05]), ti.Vector([0.3, 0.4, 0.3]), WATER), + CubeVolume(ti.Vector([0.65, 0.05, 0.65]), ti.Vector([0.3, 0.4, 0.3]), WATER), + ], + [ + CubeVolume(ti.Vector([0.6, 0.05, 0.6]), ti.Vector([0.25, 0.25, 0.25]), WATER), + CubeVolume(ti.Vector([0.35, 0.35, 0.35]), ti.Vector([0.25, 0.25, 0.25]), SNOW), + CubeVolume(ti.Vector([0.05, 0.6, 0.05]), ti.Vector([0.25, 0.25, 0.25]), JELLY), + ], +] +preset_names = [ + "Single Dam Break", + "Double Dam Break", + "Water Snow Jelly", +] + +curr_preset_id = 0 + +paused = False + +use_random_colors = False +particles_radius = 0.02 + +material_colors = [(0.1, 0.6, 0.9), (0.93, 0.33, 0.23), (1.0, 1.0, 1.0)] + + +def init(): + global paused + init_vols(presets[curr_preset_id]) + + +init() + +res = (1080, 720) +window = ti.ui.Window("Real MPM 3D", res, vsync=True) + +canvas = window.get_canvas() +gui = window.get_gui() +scene = window.get_scene() +camera = ti.ui.Camera() +camera.position(0.5, 1.0, 1.95) +camera.lookat(0.5, 0.3, 0.5) +camera.fov(55) + + +def show_options(): + global use_random_colors + global paused + global particles_radius + global curr_preset_id + + with gui.sub_window("Presets", 0.05, 0.1, 0.2, 0.15) as w: + old_preset = curr_preset_id + for i in range(len(presets)): + if w.checkbox(preset_names[i], curr_preset_id == i): + curr_preset_id = i + if curr_preset_id != old_preset: + init() + paused = True + + with gui.sub_window("Gravity", 0.05, 0.3, 0.2, 0.1) as w: + GRAVITY[0] = w.slider_float("x", GRAVITY[0], -10, 10) + GRAVITY[1] = w.slider_float("y", GRAVITY[1], -10, 10) + GRAVITY[2] = w.slider_float("z", GRAVITY[2], -10, 10) + + with gui.sub_window("Options", 0.05, 0.45, 0.2, 0.4) as w: + use_random_colors = w.checkbox("use_random_colors", use_random_colors) + if not use_random_colors: + material_colors[WATER] = w.color_edit_3("water color", material_colors[WATER]) + material_colors[SNOW] = w.color_edit_3("snow color", material_colors[SNOW]) + material_colors[JELLY] = w.color_edit_3("jelly color", material_colors[JELLY]) + set_color_by_material(np.array(material_colors, dtype=np.float32)) + particles_radius = w.slider_float("particles radius ", particles_radius, 0, 0.1) + if w.button("restart"): + init() + if paused: + if w.button("Continue"): + paused = False + else: + if w.button("Pause"): + paused = True + + +def render(): + camera.track_user_inputs(window, movement_speed=0.03, hold_key=ti.ui.RMB) + scene.set_camera(camera) + + scene.ambient_light((0, 0, 0)) + + colors_used = F_colors_random if use_random_colors else F_colors + scene.particles(F_x, per_vertex_color=colors_used, radius=particles_radius) + + scene.point_light(pos=(0.5, 1.5, 0.5), color=(0.5, 0.5, 0.5)) + scene.point_light(pos=(0.5, 1.5, 1.5), color=(0.5, 0.5, 0.5)) + + canvas.scene(scene) + + +def main(): + frame_id = 0 + + while window.running: + # print("heyyy ",frame_id) + frame_id += 1 + frame_id = frame_id % 256 + + if not paused: + for _ in range(steps): + substep(*GRAVITY) + + render() + show_options() + window.show() + + +if __name__ == "__main__": + main() diff --git a/local_packages/taichi/examples/ggui_examples/stable_fluid_ggui.py b/local_packages/taichi/examples/ggui_examples/stable_fluid_ggui.py new file mode 100644 index 0000000..f7f2b3a --- /dev/null +++ b/local_packages/taichi/examples/ggui_examples/stable_fluid_ggui.py @@ -0,0 +1,286 @@ +# References: +# http://developer.download.nvidia.com/books/HTML/gpugems/gpugems_ch38.html +# https://github.com/PavelDoGreat/WebGL-Fluid-Simulation +# https://www.bilibili.com/video/BV1ZK411H7Hc?p=4 +# https://github.com/ShaneFX/GAMES201/tree/master/HW01 + +import numpy as np + +import taichi as ti + +res = 512 +dt = 0.03 +p_jacobi_iters = 500 # 40 for a quicker but less accurate result +f_strength = 10000.0 +curl_strength = 0 +time_c = 2 +maxfps = 60 +dye_decay = 1 - 1 / (maxfps * time_c) +force_radius = res / 2.0 +gravity = True +debug = False + +arch = ti.vulkan if ti._lib.core.with_vulkan() else ti.cuda +ti.init(arch=arch) + +_velocities = ti.Vector.field(2, float, shape=(res, res)) +_new_velocities = ti.Vector.field(2, float, shape=(res, res)) +velocity_divs = ti.field(float, shape=(res, res)) +velocity_curls = ti.field(float, shape=(res, res)) +_pressures = ti.field(float, shape=(res, res)) +_new_pressures = ti.field(float, shape=(res, res)) +_dye_buffer = ti.Vector.field(3, float, shape=(res, res)) +_new_dye_buffer = ti.Vector.field(3, float, shape=(res, res)) + + +class TexPair: + def __init__(self, cur, nxt): + self.cur = cur + self.nxt = nxt + + def swap(self): + self.cur, self.nxt = self.nxt, self.cur + + +velocities_pair = TexPair(_velocities, _new_velocities) +pressures_pair = TexPair(_pressures, _new_pressures) +dyes_pair = TexPair(_dye_buffer, _new_dye_buffer) + + +@ti.func +def sample(qf, u, v): + I = ti.Vector([int(u), int(v)]) + I = ti.max(0, ti.min(res - 1, I)) + return qf[I] + + +@ti.func +def lerp(vl, vr, frac): + # frac: [0.0, 1.0] + return vl + frac * (vr - vl) + + +@ti.func +def bilerp(vf, p): + u, v = p + s, t = u - 0.5, v - 0.5 + # floor + iu, iv = ti.floor(s), ti.floor(t) + # fract + fu, fv = s - iu, t - iv + a = sample(vf, iu, iv) + b = sample(vf, iu + 1, iv) + c = sample(vf, iu, iv + 1) + d = sample(vf, iu + 1, iv + 1) + return lerp(lerp(a, b, fu), lerp(c, d, fu), fv) + + +# 3rd order Runge-Kutta +@ti.func +def backtrace(vf: ti.template(), p, dt_: ti.template()): + v1 = bilerp(vf, p) + p1 = p - 0.5 * dt_ * v1 + v2 = bilerp(vf, p1) + p2 = p - 0.75 * dt_ * v2 + v3 = bilerp(vf, p2) + p -= dt_ * ((2 / 9) * v1 + (1 / 3) * v2 + (4 / 9) * v3) + return p + + +@ti.kernel +def advect(vf: ti.template(), qf: ti.template(), new_qf: ti.template()): + for i, j in vf: + p = ti.Vector([i, j]) + 0.5 + p = backtrace(vf, p, dt) + new_qf[i, j] = bilerp(qf, p) * dye_decay + + +@ti.kernel +def apply_impulse(vf: ti.template(), dyef: ti.template(), imp_data: ti.types.ndarray()): + g_mag = 9.8 if gravity else 0.0 + g_dir = -ti.Vector([0, g_mag]) * 300 + for i, j in vf: + omx, omy = imp_data[2], imp_data[3] + mdir = ti.Vector([imp_data[0], imp_data[1]]) + dx, dy = (i + 0.5 - omx), (j + 0.5 - omy) + d2 = dx * dx + dy * dy + # dv = F * dt + factor = ti.exp(-d2 / force_radius) + + dc = dyef[i, j] + a = dc.norm() + + momentum = (mdir * f_strength * factor + g_dir * a / (1 + a)) * dt + + v = vf[i, j] + vf[i, j] = v + momentum + # add dye + if mdir.norm() > 0.5: + dc += ti.exp(-d2 * (4 / (res / 15) ** 2)) * ti.Vector([imp_data[4], imp_data[5], imp_data[6]]) + + dyef[i, j] = dc + + +@ti.kernel +def divergence(vf: ti.template()): + for i, j in vf: + vl = sample(vf, i - 1, j) + vr = sample(vf, i + 1, j) + vb = sample(vf, i, j - 1) + vt = sample(vf, i, j + 1) + vc = sample(vf, i, j) + if i == 0: + vl.x = -vc.x + if i == res - 1: + vr.x = -vc.x + if j == 0: + vb.y = -vc.y + if j == res - 1: + vt.y = -vc.y + velocity_divs[i, j] = (vr.x - vl.x + vt.y - vb.y) * 0.5 + + +@ti.kernel +def vorticity(vf: ti.template()): + for i, j in vf: + vl = sample(vf, i - 1, j) + vr = sample(vf, i + 1, j) + vb = sample(vf, i, j - 1) + vt = sample(vf, i, j + 1) + velocity_curls[i, j] = (vr.y - vl.y - vt.x + vb.x) * 0.5 + + +@ti.kernel +def pressure_jacobi(pf: ti.template(), new_pf: ti.template()): + for i, j in pf: + pl = sample(pf, i - 1, j) + pr = sample(pf, i + 1, j) + pb = sample(pf, i, j - 1) + pt = sample(pf, i, j + 1) + div = velocity_divs[i, j] + new_pf[i, j] = (pl + pr + pb + pt - div) * 0.25 + + +@ti.kernel +def subtract_gradient(vf: ti.template(), pf: ti.template()): + for i, j in vf: + pl = sample(pf, i - 1, j) + pr = sample(pf, i + 1, j) + pb = sample(pf, i, j - 1) + pt = sample(pf, i, j + 1) + vf[i, j] -= 0.5 * ti.Vector([pr - pl, pt - pb]) + + +@ti.kernel +def enhance_vorticity(vf: ti.template(), cf: ti.template()): + # anti-physics visual enhancement... + for i, j in vf: + cl = sample(cf, i - 1, j) + cr = sample(cf, i + 1, j) + cb = sample(cf, i, j - 1) + ct = sample(cf, i, j + 1) + cc = sample(cf, i, j) + force = ti.Vector([abs(ct) - abs(cb), abs(cl) - abs(cr)]).normalized(1e-3) + force *= curl_strength * cc + vf[i, j] = min(max(vf[i, j] + force * dt, -1e3), 1e3) + + +def step(mouse_data): + advect(velocities_pair.cur, velocities_pair.cur, velocities_pair.nxt) + advect(velocities_pair.cur, dyes_pair.cur, dyes_pair.nxt) + velocities_pair.swap() + dyes_pair.swap() + + apply_impulse(velocities_pair.cur, dyes_pair.cur, mouse_data) + + divergence(velocities_pair.cur) + + if curl_strength: + vorticity(velocities_pair.cur) + enhance_vorticity(velocities_pair.cur, velocity_curls) + + for _ in range(p_jacobi_iters): + pressure_jacobi(pressures_pair.cur, pressures_pair.nxt) + pressures_pair.swap() + + subtract_gradient(velocities_pair.cur, pressures_pair.cur) + + if debug: + divergence(velocities_pair.cur) + div_s = np.sum(velocity_divs.to_numpy()) + print(f"divergence={div_s}") + + +class MouseDataGen: + def __init__(self): + self.prev_mouse = None + self.prev_color = None + + def __call__(self, window): + # [0:2]: normalized delta direction + # [2:4]: current mouse xy + # [4:7]: color + mouse_data = np.zeros(8, dtype=np.float32) + if window.is_pressed(ti.ui.LMB): + mxy = np.array(window.get_cursor_pos(), dtype=np.float32) * res + if self.prev_mouse is None: + self.prev_mouse = mxy + # Set lower bound to 0.3 to prevent too dark colors + self.prev_color = (np.random.rand(3) * 0.7) + 0.3 + else: + mdir = mxy - self.prev_mouse + mdir = mdir / (np.linalg.norm(mdir) + 1e-5) + mouse_data[0], mouse_data[1] = mdir[0], mdir[1] + mouse_data[2], mouse_data[3] = mxy[0], mxy[1] + mouse_data[4:7] = self.prev_color + self.prev_mouse = mxy + else: + self.prev_mouse = None + self.prev_color = None + return mouse_data + + +def reset(): + velocities_pair.cur.fill(0) + pressures_pair.cur.fill(0) + dyes_pair.cur.fill(0) + + +def main(): + global curl_strength, debug + + paused = False + window = ti.ui.Window("Stable Fluid", (res, res), vsync=True) + canvas = window.get_canvas() + md_gen = MouseDataGen() + + while window.running: + if window.get_event(ti.ui.PRESS): + e = window.event + if e.key == ti.ui.ESCAPE: + break + elif e.key == "r": + paused = False + reset() + elif e.key == "s": + if curl_strength: + curl_strength = 0 + else: + curl_strength = 7 + elif e.key == "p": + paused = not paused + elif e.key == "d": + debug = not debug + + # Debug divergence: + # print(max((abs(velocity_divs.to_numpy().reshape(-1))))) + + if not paused: + mouse_data = md_gen(window) + step(mouse_data) + canvas.set_image(dyes_pair.cur) + window.show() + + +if __name__ == "__main__": + main() diff --git a/local_packages/taichi/examples/graph/mpm88_graph.py b/local_packages/taichi/examples/graph/mpm88_graph.py new file mode 100644 index 0000000..55e7388 --- /dev/null +++ b/local_packages/taichi/examples/graph/mpm88_graph.py @@ -0,0 +1,189 @@ +import argparse + +import taichi as ti + +ti.init(arch=ti.vulkan) +n_particles = 8192 +n_grid = 128 +dx = 1 / n_grid +dt = 2e-4 + +p_rho = 1 +p_vol = (dx * 0.5) ** 2 +p_mass = p_vol * p_rho +gravity = 9.8 +bound = 3 +E = 400 +N_ITER = 500 # Use 500 to make speed diff more obvious + + +@ti.kernel +def substep_reset_grid(grid_v: ti.types.ndarray(ndim=2), grid_m: ti.types.ndarray(ndim=2)): + for i, j in grid_m: + grid_v[i, j] = [0, 0] + grid_m[i, j] = 0 + + +@ti.kernel +def substep_p2g( + x: ti.types.ndarray(ndim=1), + v: ti.types.ndarray(ndim=1), + C: ti.types.ndarray(ndim=1), + J: ti.types.ndarray(ndim=1), + grid_v: ti.types.ndarray(ndim=2), + grid_m: ti.types.ndarray(ndim=2), +): + for p in x: + Xp = x[p] / dx + base = int(Xp - 0.5) + fx = Xp - base + w = [0.5 * (1.5 - fx) ** 2, 0.75 - (fx - 1) ** 2, 0.5 * (fx - 0.5) ** 2] + stress = -dt * 4 * E * p_vol * (J[p] - 1) / dx**2 + affine = ti.Matrix([[stress, 0], [0, stress]]) + p_mass * C[p] + for i, j in ti.static(ti.ndrange(3, 3)): + offset = ti.Vector([i, j]) + dpos = (offset - fx) * dx + weight = w[i].x * w[j].y + grid_v[base + offset] += weight * (p_mass * v[p] + affine @ dpos) + grid_m[base + offset] += weight * p_mass + + +@ti.kernel +def substep_update_grid_v(grid_v: ti.types.ndarray(ndim=2), grid_m: ti.types.ndarray(ndim=2)): + for i, j in grid_m: + if grid_m[i, j] > 0: + grid_v[i, j] /= grid_m[i, j] + grid_v[i, j].y -= dt * gravity + if i < bound and grid_v[i, j].x < 0: + grid_v[i, j].x = 0 + if i > n_grid - bound and grid_v[i, j].x > 0: + grid_v[i, j].x = 0 + if j < bound and grid_v[i, j].y < 0: + grid_v[i, j].y = 0 + if j > n_grid - bound and grid_v[i, j].y > 0: + grid_v[i, j].y = 0 + + +@ti.kernel +def substep_g2p( + x: ti.types.ndarray(ndim=1), + v: ti.types.ndarray(ndim=1), + C: ti.types.ndarray(ndim=1), + J: ti.types.ndarray(ndim=1), + grid_v: ti.types.ndarray(ndim=2), +): + for p in x: + Xp = x[p] / dx + base = int(Xp - 0.5) + fx = Xp - base + w = [0.5 * (1.5 - fx) ** 2, 0.75 - (fx - 1) ** 2, 0.5 * (fx - 0.5) ** 2] + new_v = ti.Vector.zero(float, 2) + new_C = ti.Matrix.zero(float, 2, 2) + for i, j in ti.static(ti.ndrange(3, 3)): + offset = ti.Vector([i, j]) + dpos = (offset - fx) * dx + weight = w[i].x * w[j].y + g_v = grid_v[base + offset] + new_v += weight * g_v + new_C += 4 * weight * g_v.outer_product(dpos) / dx**2 + v[p] = new_v + x[p] += dt * v[p] + J[p] *= 1 + dt * new_C.trace() + C[p] = new_C + + +@ti.kernel +def init_particles( + x: ti.types.ndarray(ndim=1), + v: ti.types.ndarray(ndim=1), + J: ti.types.ndarray(ndim=1), +): + for i in range(n_particles): + x[i] = [ti.random() * 0.4 + 0.2, ti.random() * 0.4 + 0.2] + v[i] = [0, -1] + J[i] = 1 + + +F_x = ti.Vector.ndarray(2, ti.f32, shape=n_particles) +F_v = ti.Vector.ndarray(2, ti.f32, shape=n_particles) + +F_C = ti.Matrix.ndarray(2, 2, ti.f32, shape=n_particles) +F_J = ti.ndarray(ti.f32, shape=n_particles) +F_grid_v = ti.Vector.ndarray(2, ti.f32, shape=(n_grid, n_grid)) +F_grid_m = ti.ndarray(ti.f32, shape=(n_grid, n_grid)) + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument("--baseline", action="store_true") + args, unknown = parser.parse_known_args() + + if not args.baseline: + print("running in graph mode") + # Build graph + sym_x = ti.graph.Arg(ti.graph.ArgKind.NDARRAY, "x", dtype=ti.math.vec2, ndim=1) + sym_v = ti.graph.Arg(ti.graph.ArgKind.NDARRAY, "v", dtype=ti.math.vec2, ndim=1) + sym_C = ti.graph.Arg(ti.graph.ArgKind.NDARRAY, "C", dtype=ti.math.mat2, ndim=1) + sym_J = ti.graph.Arg(ti.graph.ArgKind.NDARRAY, "J", ti.f32, ndim=1) + sym_grid_v = ti.graph.Arg(ti.graph.ArgKind.NDARRAY, "grid_v", dtype=ti.math.vec2, ndim=2) + sym_grid_m = ti.graph.Arg(ti.graph.ArgKind.NDARRAY, "grid_m", dtype=ti.f32, ndim=2) + g_init_builder = ti.graph.GraphBuilder() + g_init_builder.dispatch(init_particles, sym_x, sym_v, sym_J) + + g_update_builder = ti.graph.GraphBuilder() + substep = g_update_builder.create_sequential() + + substep.dispatch(substep_reset_grid, sym_grid_v, sym_grid_m) + substep.dispatch(substep_p2g, sym_x, sym_v, sym_C, sym_J, sym_grid_v, sym_grid_m) + substep.dispatch(substep_update_grid_v, sym_grid_v, sym_grid_m) + substep.dispatch(substep_g2p, sym_x, sym_v, sym_C, sym_J, sym_grid_v) + + for i in range(N_ITER): + g_update_builder.append(substep) + + # Compile + g_init = g_init_builder.compile() + g_update = g_update_builder.compile() + + # Run + g_init.run({"x": F_x, "v": F_v, "J": F_J}) + + gui = ti.GUI("MPM88") + while gui.running: + g_update.run( + { + "x": F_x, + "v": F_v, + "C": F_C, + "J": F_J, + "grid_v": F_grid_v, + "grid_m": F_grid_m, + } + ) + gui.clear(0x112F41) + gui.circles( + F_x.to_numpy(), # false+, pylint: disable=no-member + radius=1.5, + color=0x068587, + ) + gui.show() + else: + init_particles(F_x, F_v, F_J) + gui = ti.GUI("MPM88") + while gui.running and not gui.get_event(gui.ESCAPE): + for s in range(N_ITER): + substep_reset_grid(F_grid_v, F_grid_m) + substep_p2g(F_x, F_v, F_C, F_J, F_grid_v, F_grid_m) + substep_update_grid_v(F_grid_v, F_grid_m) + substep_g2p(F_x, F_v, F_C, F_J, F_grid_v) + gui.clear(0x112F41) + gui.circles( + F_x.to_numpy(), # false+, pylint: disable=no-member + radius=1.5, + color=0x068587, + ) + gui.show() + + +if __name__ == "__main__": + main() diff --git a/local_packages/taichi/examples/graph/stable_fluid_graph.py b/local_packages/taichi/examples/graph/stable_fluid_graph.py new file mode 100644 index 0000000..9f44c0c --- /dev/null +++ b/local_packages/taichi/examples/graph/stable_fluid_graph.py @@ -0,0 +1,324 @@ +# References: +# http://developer.download.nvidia.com/books/HTML/gpugems/gpugems_ch38.html +# https://github.com/PavelDoGreat/WebGL-Fluid-Simulation +# https://www.bilibili.com/video/BV1ZK411H7Hc?p=4 +# https://github.com/ShaneFX/GAMES201/tree/master/HW01 + +import argparse + +import numpy as np + +import taichi as ti + +ti.init(arch=ti.vulkan) + +res = 512 +dt = 0.03 +p_jacobi_iters = 500 # 40 for a quicker but less accurate result +f_strength = 10000.0 +curl_strength = 0 +time_c = 2 +maxfps = 60 +dye_decay = 1 - 1 / (maxfps * time_c) +force_radius = res / 2.0 + + +class TexPair: + def __init__(self, cur, nxt): + self.cur = cur + self.nxt = nxt + + def swap(self): + self.cur, self.nxt = self.nxt, self.cur + + +@ti.func +def sample(qf: ti.template(), u, v): + I = ti.Vector([int(u), int(v)]) + I = ti.max(0, ti.min(res - 1, I)) + return qf[I] + + +@ti.func +def lerp(vl, vr, frac): + # frac: [0.0, 1.0] + return vl + frac * (vr - vl) + + +@ti.func +def bilerp(vf: ti.template(), p): + u, v = p + s, t = u - 0.5, v - 0.5 + # floor + iu, iv = ti.floor(s), ti.floor(t) + # fract + fu, fv = s - iu, t - iv + a = sample(vf, iu, iv) + b = sample(vf, iu + 1, iv) + c = sample(vf, iu, iv + 1) + d = sample(vf, iu + 1, iv + 1) + return lerp(lerp(a, b, fu), lerp(c, d, fu), fv) + + +# 3rd order Runge-Kutta +@ti.func +def backtrace(vf: ti.template(), p, dt_: ti.template()): + v1 = bilerp(vf, p) + p1 = p - 0.5 * dt_ * v1 + v2 = bilerp(vf, p1) + p2 = p - 0.75 * dt_ * v2 + v3 = bilerp(vf, p2) + p -= dt_ * ((2 / 9) * v1 + (1 / 3) * v2 + (4 / 9) * v3) + return p + + +@ti.kernel +def advect( + vf: ti.types.ndarray(ndim=2), + qf: ti.types.ndarray(ndim=2), + new_qf: ti.types.ndarray(ndim=2), +): + for i, j in vf: + p = ti.Vector([i, j]) + 0.5 + p = backtrace(vf, p, dt) + new_qf[i, j] = bilerp(qf, p) * dye_decay + + +@ti.kernel +def apply_impulse( + vf: ti.types.ndarray(ndim=2), + dyef: ti.types.ndarray(ndim=2), + imp_data: ti.types.ndarray(ndim=1), +): + g_dir = -ti.Vector([0, 9.8]) * 300 + for i, j in vf: + omx, omy = imp_data[2], imp_data[3] + mdir = ti.Vector([imp_data[0], imp_data[1]]) + dx, dy = (i + 0.5 - omx), (j + 0.5 - omy) + d2 = dx * dx + dy * dy + # dv = F * dt + factor = ti.exp(-d2 / force_radius) + + dc = dyef[i, j] + a = dc.norm() + + momentum = (mdir * f_strength * factor + g_dir * a / (1 + a)) * dt + + v = vf[i, j] + vf[i, j] = v + momentum + # add dye + if mdir.norm() > 0.5: + dc += ti.exp(-d2 * (4 / (res / 15) ** 2)) * ti.Vector([imp_data[4], imp_data[5], imp_data[6]]) + + dyef[i, j] = dc + + +@ti.kernel +def divergence(vf: ti.types.ndarray(ndim=2), velocity_divs: ti.types.ndarray(ndim=2)): + for i, j in vf: + vl = sample(vf, i - 1, j) + vr = sample(vf, i + 1, j) + vb = sample(vf, i, j - 1) + vt = sample(vf, i, j + 1) + vc = sample(vf, i, j) + if i == 0: + vl.x = -vc.x + if i == res - 1: + vr.x = -vc.x + if j == 0: + vb.y = -vc.y + if j == res - 1: + vt.y = -vc.y + velocity_divs[i, j] = (vr.x - vl.x + vt.y - vb.y) * 0.5 + + +@ti.kernel +def pressure_jacobi( + pf: ti.types.ndarray(ndim=2), + new_pf: ti.types.ndarray(ndim=2), + velocity_divs: ti.types.ndarray(ndim=2), +): + for i, j in pf: + pl = sample(pf, i - 1, j) + pr = sample(pf, i + 1, j) + pb = sample(pf, i, j - 1) + pt = sample(pf, i, j + 1) + div = velocity_divs[i, j] + new_pf[i, j] = (pl + pr + pb + pt - div) * 0.25 + + +@ti.kernel +def subtract_gradient(vf: ti.types.ndarray(ndim=2), pf: ti.types.ndarray(ndim=2)): + for i, j in vf: + pl = sample(pf, i - 1, j) + pr = sample(pf, i + 1, j) + pb = sample(pf, i, j - 1) + pt = sample(pf, i, j + 1) + vf[i, j] -= 0.5 * ti.Vector([pr - pl, pt - pb]) + + +def solve_pressure_jacobi(): + for _ in range(p_jacobi_iters): + pressure_jacobi(pressures_pair.cur, pressures_pair.nxt, _velocity_divs) + pressures_pair.swap() + + +def step_orig(mouse_data): + advect(velocities_pair.cur, velocities_pair.cur, velocities_pair.nxt) + advect(velocities_pair.cur, dyes_pair.cur, dyes_pair.nxt) + velocities_pair.swap() + dyes_pair.swap() + + apply_impulse(velocities_pair.cur, dyes_pair.cur, mouse_data) + + divergence(velocities_pair.cur, _velocity_divs) + + solve_pressure_jacobi() + + subtract_gradient(velocities_pair.cur, pressures_pair.cur) + + +mouse_data_ti = ti.ndarray(ti.f32, shape=(8,)) + + +class MouseDataGen: + def __init__(self): + self.prev_mouse = None + self.prev_color = None + + def __call__(self, gui): + # [0:2]: normalized delta direction + # [2:4]: current mouse xy + # [4:7]: color + mouse_data = np.zeros(8, dtype=np.float32) + if gui.is_pressed(ti.GUI.LMB): + mxy = np.array(gui.get_cursor_pos(), dtype=np.float32) * res + if self.prev_mouse is None: + self.prev_mouse = mxy + # Set lower bound to 0.3 to prevent too dark colors + self.prev_color = (np.random.rand(3) * 0.7) + 0.3 + else: + mdir = mxy - self.prev_mouse + mdir = mdir / (np.linalg.norm(mdir) + 1e-5) + mouse_data[0], mouse_data[1] = mdir[0], mdir[1] + mouse_data[2], mouse_data[3] = mxy[0], mxy[1] + mouse_data[4:7] = self.prev_color + self.prev_mouse = mxy + else: + self.prev_mouse = None + self.prev_color = None + mouse_data_ti.from_numpy(mouse_data) # false+, pylint: disable=no-member + return mouse_data_ti + + +def reset(): + velocities_pair.cur.fill(0) + pressures_pair.cur.fill(0) + dyes_pair.cur.fill(0) + + +def main(): + global velocities_pair, pressures_pair, dyes_pair, curl_strength + + paused = False + + parser = argparse.ArgumentParser() + parser.add_argument("--baseline", action="store_true") + args, _ = parser.parse_known_args() + + gui = ti.GUI("Stable Fluid", (res, res)) + md_gen = MouseDataGen() + + _velocities = ti.Vector.ndarray(2, float, shape=(res, res)) + _new_velocities = ti.Vector.ndarray(2, float, shape=(res, res)) + _velocity_divs = ti.ndarray(float, shape=(res, res)) + _pressures = ti.ndarray(float, shape=(res, res)) + _new_pressures = ti.ndarray(float, shape=(res, res)) + _dye_buffer = ti.Vector.ndarray(3, float, shape=(res, res)) + _new_dye_buffer = ti.Vector.ndarray(3, float, shape=(res, res)) + + if args.baseline: + velocities_pair = TexPair(_velocities, _new_velocities) + pressures_pair = TexPair(_pressures, _new_pressures) + dyes_pair = TexPair(_dye_buffer, _new_dye_buffer) + else: + print("running in graph mode") + velocities_pair_cur = ti.graph.Arg(ti.graph.ArgKind.NDARRAY, "velocities_pair_cur", dtype=ti.math.vec2, ndim=2) + velocities_pair_nxt = ti.graph.Arg(ti.graph.ArgKind.NDARRAY, "velocities_pair_nxt", dtype=ti.math.vec2, ndim=2) + dyes_pair_cur = ti.graph.Arg(ti.graph.ArgKind.NDARRAY, "dyes_pair_cur", dtype=ti.math.vec3, ndim=2) + dyes_pair_nxt = ti.graph.Arg(ti.graph.ArgKind.NDARRAY, "dyes_pair_nxt", dtype=ti.math.vec3, ndim=2) + pressures_pair_cur = ti.graph.Arg(ti.graph.ArgKind.NDARRAY, "pressures_pair_cur", dtype=ti.f32, ndim=2) + pressures_pair_nxt = ti.graph.Arg(ti.graph.ArgKind.NDARRAY, "pressures_pair_nxt", dtype=ti.f32, ndim=2) + velocity_divs = ti.graph.Arg(ti.graph.ArgKind.NDARRAY, "velocity_divs", dtype=ti.f32, ndim=2) + mouse_data = ti.graph.Arg(ti.graph.ArgKind.NDARRAY, "mouse_data", dtype=ti.f32, ndim=1) + + g1_builder = ti.graph.GraphBuilder() + g1_builder.dispatch(advect, velocities_pair_cur, velocities_pair_cur, velocities_pair_nxt) + g1_builder.dispatch(advect, velocities_pair_cur, dyes_pair_cur, dyes_pair_nxt) + g1_builder.dispatch(apply_impulse, velocities_pair_nxt, dyes_pair_nxt, mouse_data) + g1_builder.dispatch(divergence, velocities_pair_nxt, velocity_divs) + # swap is unrolled in the loop so we only need p_jacobi_iters // 2 iterations. + for _ in range(p_jacobi_iters // 2): + g1_builder.dispatch(pressure_jacobi, pressures_pair_cur, pressures_pair_nxt, velocity_divs) + g1_builder.dispatch(pressure_jacobi, pressures_pair_nxt, pressures_pair_cur, velocity_divs) + g1_builder.dispatch(subtract_gradient, velocities_pair_nxt, pressures_pair_cur) + g1 = g1_builder.compile() + + g2_builder = ti.graph.GraphBuilder() + g2_builder.dispatch(advect, velocities_pair_nxt, velocities_pair_nxt, velocities_pair_cur) + g2_builder.dispatch(advect, velocities_pair_nxt, dyes_pair_nxt, dyes_pair_cur) + g2_builder.dispatch(apply_impulse, velocities_pair_cur, dyes_pair_cur, mouse_data) + g2_builder.dispatch(divergence, velocities_pair_cur, velocity_divs) + for _ in range(p_jacobi_iters // 2): + g2_builder.dispatch(pressure_jacobi, pressures_pair_cur, pressures_pair_nxt, velocity_divs) + g2_builder.dispatch(pressure_jacobi, pressures_pair_nxt, pressures_pair_cur, velocity_divs) + g2_builder.dispatch(subtract_gradient, velocities_pair_cur, pressures_pair_cur) + g2 = g2_builder.compile() + + swap = True + + while gui.running: + if gui.get_event(ti.GUI.PRESS): + e = gui.event + if e.key == ti.GUI.ESCAPE: + break + elif e.key == "r": + paused = False + reset() + elif e.key == "s": + if curl_strength: + curl_strength = 0 + else: + curl_strength = 7 + elif e.key == "p": + paused = not paused + + if not paused: + _mouse_data = md_gen(gui) + if args.baseline: + step_orig(_mouse_data) + gui.set_image(dyes_pair.cur.to_numpy()) + else: + invoke_args = { + "mouse_data": _mouse_data, + "velocities_pair_cur": _velocities, + "velocities_pair_nxt": _new_velocities, + "dyes_pair_cur": _dye_buffer, + "dyes_pair_nxt": _new_dye_buffer, + "pressures_pair_cur": _pressures, + "pressures_pair_nxt": _new_pressures, + "velocity_divs": _velocity_divs, + } + if swap: + g1.run(invoke_args) + gui.set_image(_dye_buffer.to_numpy()) # false+, pylint: disable=no-member + swap = False + else: + g2.run(invoke_args) + gui.set_image(_new_dye_buffer.to_numpy()) # false+, pylint: disable=no-member + swap = True + gui.show() + + +if __name__ == "__main__": + main() diff --git a/local_packages/taichi/examples/graph/texture_graph.py b/local_packages/taichi/examples/graph/texture_graph.py new file mode 100644 index 0000000..86296d1 --- /dev/null +++ b/local_packages/taichi/examples/graph/texture_graph.py @@ -0,0 +1,75 @@ +from taichi.examples.patterns import taichi_logo + +import taichi as ti + +ti.init(arch=ti.vulkan) + +res = (512, 512) +img = ti.Vector.field(4, dtype=float, shape=res) +pixels_arr = ti.Vector.ndarray(4, dtype=float, shape=res) + +texture = ti.Texture(ti.Format.r32f, (128, 128)) + + +@ti.kernel +def make_texture(tex: ti.types.rw_texture(num_dimensions=2, fmt=ti.Format.r32f, lod=0)): + for i, j in ti.ndrange(128, 128): + ret = ti.cast(taichi_logo(ti.Vector([i, j]) / 128), ti.f32) + tex.store(ti.Vector([i, j]), ti.Vector([ret, 0.0, 0.0, 0.0])) + + +@ti.kernel +def paint(t: ti.f32, pixels: ti.types.ndarray(ndim=2), tex: ti.types.texture(num_dimensions=2)): + for i, j in pixels: + uv = ti.Vector([i / res[0], j / res[1]]) + warp_uv = uv + ti.Vector([ti.cos(t + uv.x * 5.0), ti.sin(t + uv.y * 5.0)]) * 0.1 + c = ti.math.vec4(0.0) + if uv.x > 0.5: + c = tex.sample_lod(warp_uv, 0.0) + else: + c = tex.fetch(ti.cast(warp_uv * 128, ti.i32), 0) + pixels[i, j] = [c.r, c.r, c.r, 1.0] + + +@ti.kernel +def copy_to_field(pixels: ti.types.ndarray(ndim=2)): + for I in ti.grouped(pixels): + img[I] = pixels[I] + + +def main(): + _t = ti.graph.Arg(ti.graph.ArgKind.SCALAR, "t", ti.f32) + _pixels_arr = ti.graph.Arg(ti.graph.ArgKind.NDARRAY, "pixels_arr", dtype=ti.math.vec4, ndim=2) + + _rw_tex = ti.graph.Arg(ti.graph.ArgKind.RWTEXTURE, "rw_tex", fmt=ti.Format.r32f, ndim=2) + g_init_builder = ti.graph.GraphBuilder() + g_init_builder.dispatch(make_texture, _rw_tex) + g_init = g_init_builder.compile() + + g_init.run({"rw_tex": texture}) + _tex = ti.graph.Arg(ti.graph.ArgKind.TEXTURE, "tex", ndim=2) + g_builder = ti.graph.GraphBuilder() + g_builder.dispatch(paint, _t, _pixels_arr, _tex) + g = g_builder.compile() + + aot = False + if aot: + tmpdir = "shaders" + mod = ti.aot.Module(ti.vulkan) + mod.add_graph("g", g) + mod.add_graph("g_init", g_init) + mod.save(tmpdir, "") + else: + t = 0.0 + window = ti.ui.Window("UV", res) + canvas = window.get_canvas() + while window.running: + g.run({"t": t, "pixels_arr": pixels_arr, "tex": texture}) + copy_to_field(pixels_arr) + canvas.set_image(img) + window.show() + t += 0.03 + + +if __name__ == "__main__": + main() diff --git a/local_packages/taichi/examples/machine_learning/differential_evolution.py b/local_packages/taichi/examples/machine_learning/differential_evolution.py new file mode 100644 index 0000000..02f877c --- /dev/null +++ b/local_packages/taichi/examples/machine_learning/differential_evolution.py @@ -0,0 +1,282 @@ +# Authored by Erqi Chen. +# This script shows the optimization process of differential evolution. +# The black points are the search agents, and they finally find the minimum solution. + + +import numpy as np +import taichi as ti +import matplotlib.pyplot as plt +from mpl_toolkits.mplot3d import Axes3D + +ti.init(arch=ti.cpu) + + +@ti.func +def clip(_pop: ti.template(), _lb: ti.template(), _ub: ti.template()): + _search_num, _dim = _pop.shape + for ii, j in ti.ndrange(_search_num, _dim): + if _pop[ii, j] > _ub[j]: + _pop[ii, j] = _ub[j] + elif _pop[ii, j] < _lb[j]: + _pop[ii, j] = _lb[j] + + +@ti.func +def clip_only(_trial: ti.template(), _lb: ti.template(), _ub: ti.template()): + _dim = _trial.shape[0] + for j in range(_dim): + if _trial[j] > _ub[j]: + _trial[j] = _ub[j] + elif _trial[j] < _lb[j]: + _trial[j] = _lb[j] + + +@ti.func +def f1(_fit: ti.template(), _pop: ti.template()): + _search_num, _dim = _pop.shape + for ii in range(_search_num): + cur = 0.0 + for j in range(_dim): + cur += ti.pow(_pop[ii, j], 2) + + _fit[ii] = cur + + +@ti.func +def f1_only(_trial: ti.template()) -> ti.float32: + _dim = _trial.shape[0] + _res = 0.0 + for j in range(_dim): + _res += ti.pow(_trial[j], 2) + + return _res + + +@ti.func +def find_min(_fit: ti.template()) -> ti.i32: + _search_num = _fit.shape[0] + min_fit = _fit[0] + min_pos = 0 + for _ in ti.ndrange(1): + for ii in ti.ndrange(_search_num): + if min_fit < _fit[ii]: + min_fit = _fit[ii] + min_pos = ii + return min_pos + + +@ti.func +def rand_int(low: ti.i32, high: ti.i32) -> ti.i32: + r = ti.random(float) + _res = r * (high - low) + low + + return ti.round(_res, dtype=ti.i32) + + +@ti.func +def copy_pop_to_field(_pop: ti.template(), _trial: ti.template(), ind: ti.i32): + _, _dim = _pop.shape + for j in range(_dim): + _trial[j] = _pop[ind, j] + + +@ti.func +def copy_field_to_pop(_pop: ti.template(), _trial: ti.template(), ind: ti.i32): + _, _dim = _pop.shape + for j in range(dim): + _pop[ind, j] = _trial[j] + + +@ti.func +def copy_2d_to_3d(a: ti.template(), b: ti.template(), _iter: ti.i32): + r, c = b.shape + for ii, j in ti.ndrange(r, c): + a[_iter, ii, j] = b[ii, j] + + +@ti.func +def copy_field_a_to_b(a: ti.template(), b: ti.template()): + _dim = a.shape[0] + for j in range(_dim): + b[j] = a[j] + + +@ti.func +def de_crossover(_pop: ti.template(), _trial: ti.template(), a: ti.i32, b: ti.i32, c: ti.i32): + _, _dim = _pop.shape + CR = 0.5 + para_F = 0.7 + for k in range(_dim): + r = ti.random(float) + if r < CR or k == _dim - 1: + _trial[k] = _pop[c, k] + para_F * (_pop[a, k] - pop[b, k]) + + +@ti.func +def de_loop( + _pop: ti.template(), + all_best: ti.float32, + _fit: ti.template(), + _trial: ti.template(), + _lb: ti.template(), + _ub: ti.template(), +) -> ti.float32: + _search_num, _ = _pop.shape + for ii in range(_search_num): + copy_pop_to_field(_pop=_pop, _trial=_trial, ind=ii) + + a = rand_int(low=0, high=_search_num) + while a == ii: + a = rand_int(low=0, high=_search_num) + + b = rand_int(low=0, high=_search_num) + while b == ii or a == b: + b = rand_int(low=0, high=_search_num) + + c = rand_int(low=0, high=_search_num) + while c == ii or c == a or c == b: + c = rand_int(low=0, high=_search_num) + + de_crossover(_pop=_pop, _trial=_trial, a=a, b=b, c=c) + clip_only(_trial=_trial, _lb=_lb, _ub=_ub) + next_fit = f1_only(_trial=_trial) + if next_fit < _fit[ii]: + copy_field_to_pop(_pop=_pop, _trial=_trial, ind=ii) + _fit[ii] = next_fit + if next_fit < all_best: + all_best = next_fit + copy_field_a_to_b(a=_trial, b=best_pop) + + return all_best + + +@ti.kernel +def DE( + _pop: ti.template(), + _max_iter: ti.i32, + _lb: ti.template(), + _ub: ti.template(), + _fit: ti.template(), + _best_fit: ti.template(), + _trial: ti.template(), +): + f1(_fit=_fit, _pop=_pop) + min_pos = find_min(_fit=_fit) + all_best = _fit[min_pos] + _best_fit[0] = all_best + copy_2d_to_3d(a=all_pop, b=_pop, _iter=0) + + for _ in range(1): + for cur_iter in range(1, _max_iter + 1): + all_best = de_loop(_pop=_pop, _fit=_fit, all_best=all_best, _trial=_trial, _lb=_lb, _ub=_ub) + _best_fit[cur_iter] = all_best + copy_2d_to_3d(a=all_pop, b=_pop, _iter=cur_iter) + + +search_num = 20 +dim = 2 +max_iter = 50 + +_lb = np.ones(dim).astype(np.int32) * (-100) +lb = ti.field(ti.i32, shape=dim) +lb.from_numpy(_lb) + +_ub = np.ones(dim).astype(np.int32) * 100 +ub = ti.field(ti.i32, shape=dim) +ub.from_numpy(_ub) + +pop = ti.field(ti.float32, shape=(search_num, dim)) +pop.from_numpy((np.random.random((search_num, dim)) * (_ub - _lb) + _lb).astype(np.float32)) + +fit = ti.field(ti.float32, shape=(search_num,)) +best_fit = ti.field(ti.float32, shape=(max_iter,)) +best_pop = ti.field(ti.float32, shape=(search_num,)) +all_pop = ti.field(ti.float32, shape=(max_iter, search_num, dim)) + +trial = ti.field(ti.float32, shape=(search_num,)) + +DE(_pop=pop, _max_iter=max_iter, _lb=lb, _ub=ub, _fit=fit, _best_fit=best_fit, _trial=trial) + +res = best_fit.to_numpy() + + +@ti.kernel +def draw_contour(): + for ii, j in ti.ndrange(201, 201): + z[ii, j] = x[ii] ** 2 + y[j] ** 2 + + +_x = np.arange(-100, 101, 1) +x = ti.field(ti.float32, shape=201) +x.from_numpy(_x) +_y = np.arange(-100, 101, 1) +y = ti.field(ti.float32, shape=201) +y.from_numpy(_y) +z = ti.field(ti.float32, shape=(201, 201)) + +draw_contour() + +_z = z.to_numpy() +_pop = all_pop.to_numpy() + +plt.ion() + +"""2d visualization""" +plt.contourf(_x, _y, _z) +plt.colorbar() + +for i in range(max_iter): + plt.cla() + plt.contourf(_x, _y, _z) + plt.scatter(_pop[i, :, 0], _pop[i, :, 1], color="black") + plt.title(f"cur_iter: {i}, best_fit: {best_fit[i]:.2f}") + # plt.savefig(f"./2dimg/iter-{i}.png") + plt.pause(0.5) + + +# import imageio.v2 as imageio +# import os +# +# png_ls = os.listdir("./img") +# f = [] +# for i in png_ls: +# f.append(imageio.imread("./img/" + i)) +# +# imageio.mimsave("res.gif", f, "GIF", duration=0.5) + + +"""3d visualization""" +mesh_x, mesh_y = np.meshgrid(_x, _y) + +fig = plt.figure() +ax = Axes3D(fig, auto_add_to_figure=False) +ax.view_init(elev=51, azim=-70) +fig.add_axes(ax) +ax.plot_surface(mesh_x, mesh_y, _z, cmap="viridis", alpha=0.7) + +for i in range(max_iter): + ax.cla() + ax.plot_surface(mesh_x, mesh_y, _z, cmap="viridis", alpha=0.7) + + row = [] + col = [] + val = [] + nr, _ = _pop[i, :, :].shape + for _i in range(nr - 1): + row.append(np.round(_pop[i, _i, 0]).astype(int)) + col.append(np.round(_pop[i, _i, 1]).astype(int)) + val.append(_z[np.round(_pop[i, _i, 0]).astype(int) + 100, np.round(_pop[i, _i, 1]).astype(int) + 100]) + + ax.scatter3D(row, col, val, color="black") + # plt.savefig(f"./3dimg/iter-{i}.png") + plt.pause(0.5) +# +# +# import imageio.v2 as imageio +# import os +# +# png_ls = os.listdir("./3dimg") +# f = [] +# for i in png_ls: +# f.append(imageio.imread("./3dimg/" + i)) +# imageio.mimsave("3dres.gif", f, "GIF", duration=0.5) diff --git a/local_packages/taichi/examples/minimal.py b/local_packages/taichi/examples/minimal.py new file mode 100644 index 0000000..ce7c87a --- /dev/null +++ b/local_packages/taichi/examples/minimal.py @@ -0,0 +1,12 @@ +import taichi as ti + +ti.init() + + +@ti.kernel +def p() -> ti.f32: + print(42) + return 40 + 2 + + +print(p()) diff --git a/local_packages/taichi/examples/patterns.py b/local_packages/taichi/examples/patterns.py new file mode 100644 index 0000000..ba1001f --- /dev/null +++ b/local_packages/taichi/examples/patterns.py @@ -0,0 +1,32 @@ +import taichi as ti + + +@ti.func +def taichi_logo(pos: ti.template(), scale: float = 1 / 1.11): + p = (pos - 0.5) / scale + 0.5 + ret = -1 + if not (p - 0.50).norm_sqr() <= 0.52**2: + if ret == -1: + ret = 0 + if not (p - 0.50).norm_sqr() <= 0.495**2: + if ret == -1: + ret = 1 + if (p - ti.Vector([0.50, 0.25])).norm_sqr() <= 0.08**2: + if ret == -1: + ret = 1 + if (p - ti.Vector([0.50, 0.75])).norm_sqr() <= 0.08**2: + if ret == -1: + ret = 0 + if (p - ti.Vector([0.50, 0.25])).norm_sqr() <= 0.25**2: + if ret == -1: + ret = 0 + if (p - ti.Vector([0.50, 0.75])).norm_sqr() <= 0.25**2: + if ret == -1: + ret = 1 + if p[0] < 0.5: + if ret == -1: + ret = 1 + else: + if ret == -1: + ret = 0 + return 1 - ret diff --git a/local_packages/taichi/examples/real_func/algorithm/marching_squares.py b/local_packages/taichi/examples/real_func/algorithm/marching_squares.py new file mode 100644 index 0000000..47c3fc2 --- /dev/null +++ b/local_packages/taichi/examples/real_func/algorithm/marching_squares.py @@ -0,0 +1,183 @@ +""" +Marching squares algorithm in Taichi. +See "https://en.wikipedia.org/wiki/Marching_squares" +""" + +import time + +import numpy as np + +import taichi as ti +import taichi.math as tm + +ti.init(arch=ti.cpu) + +W, H = 800, 600 +resolution = (W, H) +grid_size = 8 +level = 0.15 # Draw contour of isofunc=level +pixels = ti.Vector.field(3, float, shape=resolution) + +# Cases 0-15 in the wikipedia page +_edges_np = np.array( + [ + [[-1, -1], [-1, -1]], + [[3, 0], [-1, -1]], + [[0, 1], [-1, -1]], + [[1, 3], [-1, -1]], + [[1, 2], [-1, -1]], + [[0, 1], [2, 3]], + [[0, 2], [-1, -1]], + [[2, 3], [-1, -1]], + [[2, 3], [-1, -1]], + [[0, 2], [-1, -1]], + [[0, 3], [1, 2]], + [[1, 2], [-1, -1]], + [[1, 3], [-1, -1]], + [[0, 1], [-1, -1]], + [[3, 0], [-1, -1]], + [[-1, -1], [-1, -1]], + ], + dtype=np.int32, +) +edge_table = ti.Matrix.field(2, 2, int, 16) +edge_table.from_numpy(_edges_np) + +Edge = ti.types.struct(p0=tm.vec2, p1=tm.vec2) +edges = Edge.field() +ti.root.dynamic(ti.i, 1024, chunk_size=32).place(edges) +iTime = ti.field(float, shape=()) + + +@ti.real_func +def hash22(p_: tm.vec2) -> tm.vec2: + n = tm.sin(tm.dot(p_, tm.vec2(41, 289))) + p = tm.fract(tm.vec2(262144, 32768) * n) + return tm.sin(p * 6.28 + iTime[None]) + + +@ti.real_func +def noise(p_: tm.vec2) -> float: + ip = tm.floor(p_) + p = p_ - ip + v = tm.vec4( + tm.dot(hash22(ip), p), + tm.dot(hash22(ip + tm.vec2(1, 0)), p - tm.vec2(1, 0)), + tm.dot(hash22(ip + tm.vec2(0, 1)), p - tm.vec2(0, 1)), + tm.dot(hash22(ip + tm.vec2(1, 1)), p - tm.vec2(1, 1)), + ) + p = p * p * p * (p * (p * 6 - 15) + 10) + return tm.mix(tm.mix(v.x, v.y, p.x), tm.mix(v.z, v.w, p.x), p.y) + + +@ti.real_func +def isofunc(p: tm.vec2) -> float: + return noise(p / 4 + 1) + + +@ti.real_func +def interp(p1: tm.vec2, p2: tm.vec2, v1: float, v2: float, isovalue: float) -> tm.vec2: + return tm.mix(p1, p2, (isovalue - v1) / (v2 - v1)) + + +@ti.real_func +def get_vertex(vertex_id: int, values: tm.vec4, isovalue: float) -> tm.vec2: + v = tm.vec2(0) + square = [tm.vec2(0), tm.vec2(1, 0), tm.vec2(1, 1), tm.vec2(0, 1)] + if vertex_id == 0: + v = interp(square[0], square[1], values.x, values.y, isovalue) + elif vertex_id == 1: + v = interp(square[1], square[2], values.y, values.z, isovalue) + elif vertex_id == 2: + v = interp(square[2], square[3], values.z, values.w, isovalue) + else: + v = interp(square[3], square[0], values.w, values.x, isovalue) + return v + + +@ti.kernel +def march_squares() -> int: + edges.deactivate() + x_range = tm.ceil(grid_size * W / H, int) + y_range = grid_size + for i, j in ti.ndrange((-x_range, x_range + 1), (-y_range, y_range + 1)): + case_id = 0 + values = tm.vec4( + isofunc(tm.vec2(i, j)), + isofunc(tm.vec2(i + 1, j)), + isofunc(tm.vec2(i + 1, j + 1)), + isofunc(tm.vec2(i, j + 1)), + ) + if values.x > level: + case_id |= 1 + if values.y > level: + case_id |= 2 + if values.z > level: + case_id |= 4 + if values.w > level: + case_id |= 8 + + # Fix the ambiguity for case 5 and 10 + if case_id == 5 or case_id == 10: + center = isofunc(tm.vec2(i + 0.5, j + 0.5)) + if center < level: # A valley, switch the case_id + case_id = 15 - case_id + + for k in ti.static(range(2)): + if edge_table[case_id][k, 0] != -1: + ind1 = edge_table[case_id][k, 0] + ind2 = edge_table[case_id][k, 1] + p0 = tm.vec2(i, j) + get_vertex(ind1, values, level) + p1 = tm.vec2(i, j) + get_vertex(ind2, values, level) + edges.append(Edge(p0, p1)) + + return edges.length() + + +@ti.real_func +def dseg(p_: tm.vec2, a: tm.vec2, b_: tm.vec2) -> ti.f32: + p = p_ - a + b = b_ - a + h = tm.clamp(tm.dot(p, b) / tm.dot(b, b), 0, 1) + return tm.length(p - h * b) + + +@ti.kernel +def render(): + for i, j in pixels: + p = (2 * tm.vec2(i, j) - tm.vec2(resolution)) / H + p *= grid_size + p.y += 1.2 + + q = tm.fract(p) - 0.5 + d2 = 0.5 - abs(q) + dgrid = ti.min(d2.x, d2.y) + dedge = 1e5 + dv = 1e5 + for k in range(edges.length()): + s, e = edges[k].p0, edges[k].p1 + dedge = ti.min(dedge, dseg(p, s, e)) + dv = ti.min(dv, tm.length(p - s), tm.length(p - e)) + + col = tm.vec3(0.3, 0.6, 0.8) + if isofunc(p) > level: + col = tm.vec3(1, 0.8, 0.3) + + # Draw background grid + col = tm.mix(col, tm.vec3(0), 1 - tm.smoothstep(0, 0.04, dgrid - 0.02)) + # Draw edges + col = tm.mix(col, tm.vec3(1, 0, 0), 1 - tm.smoothstep(0, 0.05, dedge - 0.02)) + # Draw small circles at the vertices + col = tm.mix(col, tm.vec3(0), 1 - tm.smoothstep(0, 0.05, dv - 0.1)) + col = tm.mix(col, tm.vec3(1), 1 - tm.smoothstep(0, 0.05, dv - 0.08)) + pixels[i, j] = tm.sqrt(tm.clamp(col, 0, 1)) + + +t0 = time.perf_counter() +gui = ti.GUI("2D Marching Squares", res=resolution, fast_gui=True) +while gui.running and not gui.get_event(gui.ESCAPE): + iTime[None] = time.perf_counter() - t0 + march_squares() + render() + gui.set_image(pixels) + gui.show() diff --git a/local_packages/taichi/examples/real_func/algorithm/poisson_disk_sampling.py b/local_packages/taichi/examples/real_func/algorithm/poisson_disk_sampling.py new file mode 100644 index 0000000..86c2567 --- /dev/null +++ b/local_packages/taichi/examples/real_func/algorithm/poisson_disk_sampling.py @@ -0,0 +1,168 @@ +""" +Poisson disk sampling in Taichi, a fancy version. +Based on Yuanming Hu's code: https://github.com/taichi-dev/poisson_disk_sampling + +User interface: + +1. Click on the window to restart the animation. +2. Press `p` to save screenshot. +""" + +import taichi as ti +import taichi.math as tm +import typing + +ti.init(arch=ti.cuda) + +grid_n = 20 +dx = 1 / grid_n +radius = dx * tm.sqrt(2) +desired_samples = 200 +grid = ti.field(dtype=int, shape=(grid_n, grid_n)) +samples = ti.Vector.field(2, dtype=float, shape=desired_samples) +window_size = 800 +dfield = ti.Vector.field(4, dtype=float, shape=(window_size, window_size)) +img = ti.Vector.field(3, dtype=float, shape=(window_size, window_size)) +iMouse = ti.Vector.field(2, dtype=float, shape=()) +iMouse[None] = [0.5, 0.5] +iResolution = tm.vec2(window_size) +head = ti.field(int, shape=()) +tail = ti.field(int, shape=()) +sample_count = ti.field(int, shape=()) + + +@ti.real_func +def coord_to_index(p: tm.vec2) -> tm.ivec2: + return (p * tm.vec2(grid_n)).cast(int) + + +@ti.kernel +def refresh_scene(): + head[None] = 0 + tail[None] = 1 + sample_count[None] = 1 + + samples[0] = (p0 := iMouse[None]) + grid[coord_to_index(p0)] = 0 + + for i, j in grid: + grid[i, j] = -1 + + for i, j in dfield: + dfield[i, j] = tm.vec4(1e5) + img[i, j] = tm.vec3(1) + + +@ti.real_func +def find_nearest_point(p: tm.vec2) -> typing.Tuple[float, tm.vec2]: + x, y = coord_to_index(p) + dmin = 1e5 + nearest = iMouse[None] + for i in range(ti.max(0, x - 2), ti.min(grid_n, x + 3)): + for j in range(ti.max(0, y - 2), ti.min(grid_n, y + 3)): + ind = grid[i, j] + if ind != -1: + q = samples[ind] + d = (q - p).norm() + if d < dmin: + dmin = d + nearest = q + return dmin, nearest + + +@ti.kernel +def poisson_disk_sample(num_samples: int) -> int: + while head[None] < tail[None] and head[None] < ti.min(num_samples, desired_samples): + source_x = samples[head[None]] + head[None] += 1 + + for _ in range(100): + theta = ti.random() * 2 * tm.pi + offset = tm.vec2(tm.cos(theta), tm.sin(theta)) * (1 + ti.random()) * radius + new_x = source_x + offset + new_index = coord_to_index(new_x) + + if 0 <= new_x[0] < 1 and 0 <= new_x[1] < 1: + collision = find_nearest_point(new_x)[0] < radius - 1e-6 + if not collision and tail[None] < desired_samples: + samples[tail[None]] = new_x + grid[new_index] = tail[None] + tail[None] += 1 + return tail[None] + + +@ti.real_func +def hash21(p: tm.vec2) -> float: + return tm.fract(tm.sin(tm.dot(p, tm.vec2(127.619, 157.583))) * 43758.5453) + + +@ti.real_func +def sample_dist(uv_: tm.vec2) -> tm.vec4: + uv = uv_ * iResolution + x, y = tm.clamp(0, iResolution - 1, uv).cast(int) + return dfield[x, y] + + +@ti.kernel +def compute_distance_field(): + for i, j in dfield: + uv = tm.vec2(i, j) / iResolution + d, p = find_nearest_point(uv) + d = (uv - p).norm() - radius / 2.0 + dfield[i, j] = tm.vec4(d, p.x, p.y, radius / 2.0) + + +@ti.kernel +def render(): + for i, j in img: + uv = tm.vec2(i, j) / iResolution.y + st = tm.fract(uv * grid_n) - 0.5 + dg = 0.5 - abs(st) + d1 = ti.min(dg.x, dg.y) + d1 = tm.smoothstep(0.05, 0.0, d1) + col = (1 - tm.vec3(d1)) * 0.7 + sf = 2 / iResolution.y + buf = sample_dist(uv) + bufSh = sample_dist(uv + tm.vec2(0.005, 0.015)) + cCol = tm.vec3(hash21(buf.yz + 0.3), hash21(buf.yz), hash21(buf.yz + 0.09)) + pat = (abs(tm.fract(-buf.x * 150) - 0.5) * 2) / 300 + col = tm.mix(col, tm.vec3(0), (1 - tm.smoothstep(0, 3 * sf, pat)) * 0.25) + ew, ew2 = 0.005, 0.008 + cCol2 = tm.mix(cCol, tm.vec3(1), 0.9) + col = tm.mix(col, tm.vec3(0), (1 - tm.smoothstep(0, sf * 2, bufSh.x)) * 0.4) + col = tm.mix(col, tm.vec3(0), 1 - tm.smoothstep(sf, 0, -buf.x)) + col = tm.mix(col, cCol2, 1 - tm.smoothstep(sf, 0, -buf.x - ew)) + col = tm.mix(col, tm.vec3(0), 1 - tm.smoothstep(sf, 0, -buf.x - ew2 - ew)) + col = tm.mix(col, cCol, 1 - tm.smoothstep(sf, 0.0, -buf.x - ew2 - ew * 2)) + col = tm.sqrt(ti.max(col, 0)) + img[i, j] = col + + +def main(): + refresh_scene() + gui = ti.ui.Window("Poisson Disk Sampling", res=(window_size, window_size)) + canvas = gui.get_canvas() + gui.fps_limit = 10 + while gui.running: + gui.get_event(ti.ui.PRESS) + if gui.is_pressed(ti.ui.ESCAPE): + gui.running = False + + if gui.is_pressed(ti.ui.LMB): + iMouse[None] = gui.get_cursor_pos() + refresh_scene() + + if gui.is_pressed("p"): + canvas.set_image(img) + gui.save_image("screenshot.png") + + poisson_disk_sample(sample_count[None]) + sample_count[None] += 1 + compute_distance_field() + render() + canvas.set_image(img) + gui.show() + + +if __name__ == "__main__": + main() diff --git a/local_packages/taichi/examples/real_func/graph/stable_fluid_graph.py b/local_packages/taichi/examples/real_func/graph/stable_fluid_graph.py new file mode 100644 index 0000000..919afb4 --- /dev/null +++ b/local_packages/taichi/examples/real_func/graph/stable_fluid_graph.py @@ -0,0 +1,367 @@ +# References: +# http://developer.download.nvidia.com/books/HTML/gpugems/gpugems_ch38.html +# https://github.com/PavelDoGreat/WebGL-Fluid-Simulation +# https://www.bilibili.com/video/BV1ZK411H7Hc?p=4 +# https://github.com/ShaneFX/GAMES201/tree/master/HW01 + +import argparse + +import numpy as np + +import taichi as ti +from taichi.math import vec2, vec3 + +ti.init(arch=ti.cuda) + +res = 512 +dt = 0.03 +p_jacobi_iters = 500 # 40 for a quicker but less accurate result +f_strength = 10000.0 +curl_strength = 0 +time_c = 2 +maxfps = 60 +dye_decay = 1 - 1 / (maxfps * time_c) +force_radius = res / 2.0 + + +class TexPair: + def __init__(self, cur, nxt): + self.cur = cur + self.nxt = nxt + + def swap(self): + self.cur, self.nxt = self.nxt, self.cur + + +@ti.func +def sample_impl(qf: ti.template(), u: int, v: int): + I = ti.Vector([u, v]) + I = ti.max(0, ti.min(res - 1, I)) + return qf[I] + + +@ti.real_func +def sample2(qf: ti.types.ndarray(ndim=2), u: int, v: int) -> vec2: + return sample_impl(qf, u, v) + + +@ti.real_func +def sample3(qf: ti.types.ndarray(ndim=2), u: int, v: int) -> vec3: + return sample_impl(qf, u, v) + + +@ti.real_func +def sample0(qf: ti.types.ndarray(ndim=2), u: int, v: int) -> float: + return sample_impl(qf, u, v) + + +@ti.func +def sample(qf: ti.template(), u, v): + if ti.static(qf.element_shape() == [2]): + return sample2(qf, u, v) + if ti.static(qf.element_shape() == [3]): + return sample3(qf, u, v) + ti.static_assert(qf.element_shape() == []) + return sample0(qf, u, v) + + +@ti.func +def lerp(vl, vr, frac): + # frac: [0.0, 1.0] + return vl + frac * (vr - vl) + + +@ti.func +def bilerp_impl(vf: ti.template(), p): + u, v = p + s, t = u - 0.5, v - 0.5 + # floor + iu, iv = ti.floor(s), ti.floor(t) + # fract + fu, fv = s - iu, t - iv + a = sample(vf, iu, iv) + b = sample(vf, iu + 1, iv) + c = sample(vf, iu, iv + 1) + d = sample(vf, iu + 1, iv + 1) + return lerp(lerp(a, b, fu), lerp(c, d, fu), fv) + + +@ti.real_func +def bilerp2(vf: ti.types.ndarray(ndim=2), p: vec2) -> vec2: + return bilerp_impl(vf, p) + + +@ti.real_func +def bilerp3(vf: ti.types.ndarray(ndim=2), p: vec2) -> vec3: + return bilerp_impl(vf, p) + + +@ti.func +def bilerp(vf: ti.template(), p): + if ti.static(vf.element_shape() == [2]): + return bilerp2(vf, p) + ti.static_assert(vf.element_shape() == [3]) + return bilerp3(vf, p) + + +# 3rd order Runge-Kutta +@ti.real_func +def backtrace(vf: ti.types.ndarray(ndim=2), p: vec2, dt_: float) -> vec2: + v1 = bilerp(vf, p) + p1 = p - 0.5 * dt_ * v1 + v2 = bilerp(vf, p1) + p2 = p - 0.75 * dt_ * v2 + v3 = bilerp(vf, p2) + return p - dt_ * ((2 / 9) * v1 + (1 / 3) * v2 + (4 / 9) * v3) + + +@ti.kernel +def advect( + vf: ti.types.ndarray(ndim=2), + qf: ti.types.ndarray(ndim=2), + new_qf: ti.types.ndarray(ndim=2), +): + for i, j in vf: + p = ti.Vector([i, j]) + 0.5 + p = backtrace(vf, p, dt) + new_qf[i, j] = bilerp(qf, p) * dye_decay + + +@ti.kernel +def apply_impulse( + vf: ti.types.ndarray(ndim=2), + dyef: ti.types.ndarray(ndim=2), + imp_data: ti.types.ndarray(ndim=1), +): + g_dir = -ti.Vector([0, 9.8]) * 300 + for i, j in vf: + omx, omy = imp_data[2], imp_data[3] + mdir = ti.Vector([imp_data[0], imp_data[1]]) + dx, dy = (i + 0.5 - omx), (j + 0.5 - omy) + d2 = dx * dx + dy * dy + # dv = F * dt + factor = ti.exp(-d2 / force_radius) + + dc = dyef[i, j] + a = dc.norm() + + momentum = (mdir * f_strength * factor + g_dir * a / (1 + a)) * dt + + v = vf[i, j] + vf[i, j] = v + momentum + # add dye + if mdir.norm() > 0.5: + dc += ti.exp(-d2 * (4 / (res / 15) ** 2)) * ti.Vector([imp_data[4], imp_data[5], imp_data[6]]) + + dyef[i, j] = dc + + +@ti.kernel +def divergence(vf: ti.types.ndarray(ndim=2), velocity_divs: ti.types.ndarray(ndim=2)): + for i, j in vf: + vl = sample(vf, i - 1, j) + vr = sample(vf, i + 1, j) + vb = sample(vf, i, j - 1) + vt = sample(vf, i, j + 1) + vc = sample(vf, i, j) + if i == 0: + vl.x = -vc.x + if i == res - 1: + vr.x = -vc.x + if j == 0: + vb.y = -vc.y + if j == res - 1: + vt.y = -vc.y + velocity_divs[i, j] = (vr.x - vl.x + vt.y - vb.y) * 0.5 + + +@ti.kernel +def pressure_jacobi( + pf: ti.types.ndarray(ndim=2), + new_pf: ti.types.ndarray(ndim=2), + velocity_divs: ti.types.ndarray(ndim=2), +): + for i, j in pf: + pl = sample(pf, i - 1, j) + pr = sample(pf, i + 1, j) + pb = sample(pf, i, j - 1) + pt = sample(pf, i, j + 1) + div = velocity_divs[i, j] + new_pf[i, j] = (pl + pr + pb + pt - div) * 0.25 + + +@ti.kernel +def subtract_gradient(vf: ti.types.ndarray(ndim=2), pf: ti.types.ndarray(ndim=2)): + for i, j in vf: + pl = sample(pf, i - 1, j) + pr = sample(pf, i + 1, j) + pb = sample(pf, i, j - 1) + pt = sample(pf, i, j + 1) + vf[i, j] -= 0.5 * ti.Vector([pr - pl, pt - pb]) + + +def solve_pressure_jacobi(): + for _ in range(p_jacobi_iters): + pressure_jacobi(pressures_pair.cur, pressures_pair.nxt, _velocity_divs) + pressures_pair.swap() + + +def step_orig(mouse_data): + advect(velocities_pair.cur, velocities_pair.cur, velocities_pair.nxt) + advect(velocities_pair.cur, dyes_pair.cur, dyes_pair.nxt) + velocities_pair.swap() + dyes_pair.swap() + + apply_impulse(velocities_pair.cur, dyes_pair.cur, mouse_data) + + divergence(velocities_pair.cur, _velocity_divs) + + solve_pressure_jacobi() + + subtract_gradient(velocities_pair.cur, pressures_pair.cur) + + +mouse_data_ti = ti.ndarray(ti.f32, shape=(8,)) + + +class MouseDataGen: + def __init__(self): + self.prev_mouse = None + self.prev_color = None + + def __call__(self, gui): + # [0:2]: normalized delta direction + # [2:4]: current mouse xy + # [4:7]: color + mouse_data = np.zeros(8, dtype=np.float32) + if gui.is_pressed(ti.GUI.LMB): + mxy = np.array(gui.get_cursor_pos(), dtype=np.float32) * res + if self.prev_mouse is None: + self.prev_mouse = mxy + # Set lower bound to 0.3 to prevent too dark colors + self.prev_color = (np.random.rand(3) * 0.7) + 0.3 + else: + mdir = mxy - self.prev_mouse + mdir = mdir / (np.linalg.norm(mdir) + 1e-5) + mouse_data[0], mouse_data[1] = mdir[0], mdir[1] + mouse_data[2], mouse_data[3] = mxy[0], mxy[1] + mouse_data[4:7] = self.prev_color + self.prev_mouse = mxy + else: + self.prev_mouse = None + self.prev_color = None + mouse_data_ti.from_numpy(mouse_data) # false+, pylint: disable=no-member + return mouse_data_ti + + +def reset(): + velocities_pair.cur.fill(0) + pressures_pair.cur.fill(0) + dyes_pair.cur.fill(0) + + +def main(): + global velocities_pair, pressures_pair, dyes_pair, curl_strength + + paused = False + + parser = argparse.ArgumentParser() + parser.add_argument("--baseline", action="store_true") + args, _ = parser.parse_known_args() + + gui = ti.GUI("Stable Fluid", (res, res)) + md_gen = MouseDataGen() + + _velocities = ti.Vector.ndarray(2, float, shape=(res, res)) + _new_velocities = ti.Vector.ndarray(2, float, shape=(res, res)) + _velocity_divs = ti.ndarray(float, shape=(res, res)) + _pressures = ti.ndarray(float, shape=(res, res)) + _new_pressures = ti.ndarray(float, shape=(res, res)) + _dye_buffer = ti.Vector.ndarray(3, float, shape=(res, res)) + _new_dye_buffer = ti.Vector.ndarray(3, float, shape=(res, res)) + + if args.baseline: + velocities_pair = TexPair(_velocities, _new_velocities) + pressures_pair = TexPair(_pressures, _new_pressures) + dyes_pair = TexPair(_dye_buffer, _new_dye_buffer) + else: + print("running in graph mode") + velocities_pair_cur = ti.graph.Arg(ti.graph.ArgKind.NDARRAY, "velocities_pair_cur", dtype=ti.math.vec2, ndim=2) + velocities_pair_nxt = ti.graph.Arg(ti.graph.ArgKind.NDARRAY, "velocities_pair_nxt", dtype=ti.math.vec2, ndim=2) + dyes_pair_cur = ti.graph.Arg(ti.graph.ArgKind.NDARRAY, "dyes_pair_cur", dtype=ti.math.vec3, ndim=2) + dyes_pair_nxt = ti.graph.Arg(ti.graph.ArgKind.NDARRAY, "dyes_pair_nxt", dtype=ti.math.vec3, ndim=2) + pressures_pair_cur = ti.graph.Arg(ti.graph.ArgKind.NDARRAY, "pressures_pair_cur", dtype=ti.f32, ndim=2) + pressures_pair_nxt = ti.graph.Arg(ti.graph.ArgKind.NDARRAY, "pressures_pair_nxt", dtype=ti.f32, ndim=2) + velocity_divs = ti.graph.Arg(ti.graph.ArgKind.NDARRAY, "velocity_divs", dtype=ti.f32, ndim=2) + mouse_data = ti.graph.Arg(ti.graph.ArgKind.NDARRAY, "mouse_data", dtype=ti.f32, ndim=1) + + g1_builder = ti.graph.GraphBuilder() + g1_builder.dispatch(advect, velocities_pair_cur, velocities_pair_cur, velocities_pair_nxt) + g1_builder.dispatch(advect, velocities_pair_cur, dyes_pair_cur, dyes_pair_nxt) + g1_builder.dispatch(apply_impulse, velocities_pair_nxt, dyes_pair_nxt, mouse_data) + g1_builder.dispatch(divergence, velocities_pair_nxt, velocity_divs) + # swap is unrolled in the loop so we only need p_jacobi_iters // 2 iterations. + for _ in range(p_jacobi_iters // 2): + g1_builder.dispatch(pressure_jacobi, pressures_pair_cur, pressures_pair_nxt, velocity_divs) + g1_builder.dispatch(pressure_jacobi, pressures_pair_nxt, pressures_pair_cur, velocity_divs) + g1_builder.dispatch(subtract_gradient, velocities_pair_nxt, pressures_pair_cur) + g1 = g1_builder.compile() + + g2_builder = ti.graph.GraphBuilder() + g2_builder.dispatch(advect, velocities_pair_nxt, velocities_pair_nxt, velocities_pair_cur) + g2_builder.dispatch(advect, velocities_pair_nxt, dyes_pair_nxt, dyes_pair_cur) + g2_builder.dispatch(apply_impulse, velocities_pair_cur, dyes_pair_cur, mouse_data) + g2_builder.dispatch(divergence, velocities_pair_cur, velocity_divs) + for _ in range(p_jacobi_iters // 2): + g2_builder.dispatch(pressure_jacobi, pressures_pair_cur, pressures_pair_nxt, velocity_divs) + g2_builder.dispatch(pressure_jacobi, pressures_pair_nxt, pressures_pair_cur, velocity_divs) + g2_builder.dispatch(subtract_gradient, velocities_pair_cur, pressures_pair_cur) + g2 = g2_builder.compile() + + swap = True + + while gui.running: + if gui.get_event(ti.GUI.PRESS): + e = gui.event + if e.key == ti.GUI.ESCAPE: + break + elif e.key == "r": + paused = False + reset() + elif e.key == "s": + if curl_strength: + curl_strength = 0 + else: + curl_strength = 7 + elif e.key == "p": + paused = not paused + + if not paused: + _mouse_data = md_gen(gui) + if args.baseline: + step_orig(_mouse_data) + gui.set_image(dyes_pair.cur.to_numpy()) + else: + invoke_args = { + "mouse_data": _mouse_data, + "velocities_pair_cur": _velocities, + "velocities_pair_nxt": _new_velocities, + "dyes_pair_cur": _dye_buffer, + "dyes_pair_nxt": _new_dye_buffer, + "pressures_pair_cur": _pressures, + "pressures_pair_nxt": _new_pressures, + "velocity_divs": _velocity_divs, + } + if swap: + g1.run(invoke_args) + gui.set_image(_dye_buffer.to_numpy()) # false+, pylint: disable=no-member + swap = False + else: + g2.run(invoke_args) + gui.set_image(_new_dye_buffer.to_numpy()) # false+, pylint: disable=no-member + swap = True + gui.show() + + +if __name__ == "__main__": + main() diff --git a/local_packages/taichi/examples/real_func/rendering/cornell_box.py b/local_packages/taichi/examples/real_func/rendering/cornell_box.py new file mode 100644 index 0000000..fd4233f --- /dev/null +++ b/local_packages/taichi/examples/real_func/rendering/cornell_box.py @@ -0,0 +1,218 @@ +# pylint: disable=W0622,W0621,W0401 +import taichi as ti +from taichi.math import * + +ti.init(arch=ti.gpu, default_ip=ti.i32, default_fp=ti.f32) + +image_resolution = (512, 512) +image_buffer = ti.Vector.field(4, float, image_resolution) +image_pixels = ti.Vector.field(3, float, image_resolution) + +Ray = ti.types.struct(origin=vec3, direction=vec3, color=vec3) +Material = ti.types.struct(albedo=vec3, emission=vec3) +Transform = ti.types.struct(position=vec3, rotation=vec3, scale=vec3, matrix=mat3) +SDFObject = ti.types.struct(distance=float, transform=Transform, material=Material) + +objects = SDFObject.field(shape=8) +objects[0] = SDFObject( + transform=Transform(vec3(0, 0, -1), vec3(0, 0, 0), vec3(1, 1, 0.2)), + material=Material(vec3(1, 1, 1) * 0.4, vec3(1)), +) +objects[1] = SDFObject( + transform=Transform(vec3(0, 1, 0), vec3(90, 0, 0), vec3(1, 1, 0.2)), + material=Material(vec3(1, 1, 1) * 0.4, vec3(1)), +) +objects[2] = SDFObject( + transform=Transform(vec3(0, -1, 0), vec3(90, 0, 0), vec3(1, 1, 0.2)), + material=Material(vec3(1, 1, 1) * 0.4, vec3(1)), +) +objects[3] = SDFObject( + transform=Transform(vec3(-1, 0, 0), vec3(0, 90, 0), vec3(1, 1, 0.2)), + material=Material(vec3(1, 0, 0) * 0.5, vec3(1)), +) +objects[4] = SDFObject( + transform=Transform(vec3(1, 0, 0), vec3(0, 90, 0), vec3(1, 1, 0.2)), + material=Material(vec3(0, 1, 0) * 0.5, vec3(1)), +) +objects[5] = SDFObject( + transform=Transform(vec3(-0.275, -0.3, -0.2), vec3(0, 112, 0), vec3(0.25, 0.5, 0.25)), + material=Material(vec3(1, 1, 1) * 0.4, vec3(1)), +) +objects[6] = SDFObject( + transform=Transform(vec3(0.275, -0.55, 0.2), vec3(0, -197, 0), vec3(0.25, 0.25, 0.25)), + material=Material(vec3(1, 1, 1) * 0.4, vec3(1)), +) +objects[7] = SDFObject( + transform=Transform(vec3(0, 0.809, 0), vec3(90, 0, 0), vec3(0.2, 0.2, 0.01)), + material=Material(vec3(1, 1, 1) * 1, vec3(100)), +) + + +@ti.real_func +def rotate(a: vec3) -> mat3: + s, c = sin(a), cos(a) + return ( + mat3(c.z, s.z, 0, -s.z, c.z, 0, 0, 0, 1) + @ mat3(c.y, 0, -s.y, 0, 1, 0, s.y, 0, c.y) + @ mat3(1, 0, 0, 0, c.x, s.x, 0, -s.x, c.x) + ) + + +@ti.real_func +def signed_distance(obj: SDFObject, pos: vec3) -> float: + p = obj.transform.matrix @ (pos - obj.transform.position) + q = abs(p) - obj.transform.scale + return length(max(q, 0)) + min(max(q.x, max(q.y, q.z)), 0) + + +@ti.real_func +def nearest_object(p: vec3) -> (int, float): + index, min_dis = 0, 1e32 + for i in ti.static(range(8)): + dis = signed_distance(objects[i], p) + if dis < min_dis: + min_dis, index = dis, i + return index, min_dis + + +@ti.real_func +def calc_normal(obj: SDFObject, p: vec3) -> vec3: + e = vec2(1, -1) * 0.5773 * 0.005 + return normalize( + e.xyy * signed_distance(obj, p + e.xyy) + + e.yyx * signed_distance(obj, p + e.yyx) + + e.yxy * signed_distance(obj, p + e.yxy) + + e.xxx * signed_distance(obj, p + e.xxx) + ) + + +@ti.real_func +def raycast(ray: Ray) -> (SDFObject, vec3, bool): + w, s, d, cerr = 1.6, 0.0, 0.0, 1e32 + index, t, position, hit = 0, 0.005, vec3(0), False + for _ in range(64): + position = ray.origin + ray.direction * t + index, distance = nearest_object(position) + + ld, d = d, distance + if ld + d < s: + s -= w * s + t += s + w *= 0.5 + w += 0.5 + continue + err = d / t + if err < cerr: + cerr = err + + s = w * d + t += s + hit = err < 0.001 + if t > 5.0 or hit: + break + return objects[index], position, hit + + +@ti.real_func +def hemispheric_sampling(normal: vec3) -> vec3: + z = 2.0 * ti.random() - 1.0 + a = ti.random() * 2.0 * pi + xy = sqrt(1.0 - z * z) * vec2(sin(a), cos(a)) + return normalize(normal + vec3(xy, z)) + + +@ti.real_func +def raytrace(ray: Ray) -> Ray: + for _ in range(3): + object, position, hit = raycast(ray) + if not hit: + ray.color = vec3(0) + break + + normal = calc_normal(object, position) + ray.direction = hemispheric_sampling(normal) + ray.color *= object.material.albedo + ray.origin = position + + intensity = dot(ray.color, vec3(0.299, 0.587, 0.114)) + ray.color *= object.material.emission + visible = dot(ray.color, vec3(0.299, 0.587, 0.114)) + if intensity < visible or visible < 0.000001: + break + return ray + + +@ti.kernel +def build_scene(): + for i in objects: + rotation = radians(objects[i].transform.rotation) + objects[i].transform.matrix = rotate(rotation) + + +@ti.kernel +def render(camera_position: vec3, camera_lookat: vec3, camera_up: vec3): + for i, j in image_pixels: + z = normalize(camera_position - camera_lookat) + x = normalize(cross(camera_up, z)) + y = cross(z, x) + + half_width = half_height = tan(radians(35) * 0.5) + lower_left_corner = camera_position - half_width * x - half_height * y - z + horizontal = 2.0 * half_width * x + vertical = 2.0 * half_height * y + + uv = (vec2(i, j) + vec2(ti.random(), ti.random())) / vec2(image_resolution) + po = lower_left_corner + uv.x * horizontal + uv.y * vertical + rd = normalize(po - camera_position) + + ray = raytrace(Ray(camera_position, rd, vec3(1))) + buffer = image_buffer[i, j] + buffer += vec4(ray.color, 1.0) + image_buffer[i, j] = buffer + + color = buffer.rgb / buffer.a + color = pow(color, vec3(1.0 / 2.2)) + color = ( + mat3( + 0.597190, + 0.35458, + 0.04823, + 0.07600, + 0.90834, + 0.01566, + 0.02840, + 0.13383, + 0.83777, + ) + @ color + ) + color = (color * (color + 0.024578) - 0.0000905) / (color * (0.983729 * color + 0.4329510) + 0.238081) + color = ( + mat3( + 1.60475, + -0.531, + -0.0736, + -0.102, + 1.10813, + -0.00605, + -0.00327, + -0.07276, + 1.07602, + ) + @ color + ) + image_pixels[i, j] = clamp(color, 0, 1) + + +def main(): + window = ti.ui.Window("Cornell Box", image_resolution) + canvas = window.get_canvas() + build_scene() + while window.running: + render(vec3(0, 0, 3.5), vec3(0, 0, -1), vec3(0, 1, 0)) + canvas.set_image(image_pixels) + window.show() + + +if __name__ == "__main__": + main() diff --git a/local_packages/taichi/examples/real_func/rendering/taichi_ngp.py b/local_packages/taichi/examples/real_func/rendering/taichi_ngp.py new file mode 100644 index 0000000..894b5e8 --- /dev/null +++ b/local_packages/taichi/examples/real_func/rendering/taichi_ngp.py @@ -0,0 +1,1049 @@ +# Instant-NGP renderer implemented using Taichi +# Author : Linyou (linyoutian.loyot@gmail.com) +# Original code at https://github.com/Linyou/taichi-ngp-renderer + +# GUI instruction: +# 1. Move the mouse with right click to rotate the camera +# 2. Press 'w', 'a', 's', 'd' key to move the camera like FPS game +# 3. Press 'q', 'e' key to move the camera up and down + +import argparse +import os +import platform +import time +from typing import Tuple + +import numpy as np +from matplotlib import pyplot as plt + +import taichi as ti +from taichi.math import uvec3, vec2, vec3 + +try: + import cv2 + import wget + from scipy.spatial.transform import Rotation as R +except: + raise ImportError("The Taichi NGP renderer requires cv2, wget and scipy to be installed.") + + +def depth2img(depth): + depth = (depth - depth.min()) / (depth.max() - depth.min()) + depth_img = cv2.applyColorMap((depth * 255).astype(np.uint8), cv2.COLORMAP_TURBO) + + return depth_img + + +arch = ti.cuda + +if platform.system() == "Darwin": + block_dim = 64 +else: + block_dim = 128 + +sigma_sm_preload = int(128 / block_dim) * 24 +rgb_sm_preload = int(128 / block_dim) * 50 +data_type = ti.f16 +np_type = np.float16 +tf_vec3 = ti.types.vector(3, dtype=data_type) +tf_vec8 = ti.types.vector(8, dtype=data_type) +tf_vec32 = ti.types.vector(32, dtype=data_type) +tf_vec1 = ti.types.vector(1, dtype=data_type) +tf_vec2 = ti.types.vector(2, dtype=data_type) +tf_mat1x3 = ti.types.matrix(1, 3, dtype=data_type) +tf_index_temp = ti.types.vector(8, dtype=ti.i32) + +MAX_SAMPLES = 1024 +NEAR_DISTANCE = 0.01 + +SQRT3 = 1.7320508075688772 +SQRT3_MAX_SAMPLES = SQRT3 / 1024 +SQRT3_2 = 1.7320508075688772 * 2 +PRETRAINED_MODEL_URL = "https://github.com/Linyou/taichi-ngp-renderer/releases/download/v0.1-models/{}.npy" + + +# <----------------- hash table util code -----------------> +@ti.real_func +def calc_dt(t: data_type, exp_step_factor: data_type, grid_size: int, scale: data_type) -> data_type: + return ti.math.clamp(t * exp_step_factor, SQRT3_MAX_SAMPLES, SQRT3_2 * scale / grid_size) + + +@ti.real_func +def __expand_bits(v_: uvec3) -> uvec3: + v = (v_ * ti.uint32(0x00010001)) & ti.uint32(0xFF0000FF) + v = (v * ti.uint32(0x00000101)) & ti.uint32(0x0F00F00F) + v = (v * ti.uint32(0x00000011)) & ti.uint32(0xC30C30C3) + v = (v * ti.uint32(0x00000005)) & ti.uint32(0x49249249) + return v + + +@ti.real_func +def __morton3D(xyz_: uvec3) -> ti.u32: + xyz = __expand_bits(xyz_) + return xyz[0] | (xyz[1] << 1) | (xyz[2] << 2) + + +@ti.real_func +def fast_hash(pos_grid_local: uvec3) -> ti.u32: + result = ti.uint32(0) + primes = uvec3(ti.uint32(1), ti.uint32(2654435761), ti.uint32(805459861)) + for i in ti.static(range(3)): + result ^= ti.uint32(pos_grid_local[i]) * primes[i] + return result + + +@ti.real_func +def under_hash(pos_grid_local: uvec3, resolution: ti.u32) -> ti.u32: + result = ti.uint32(0) + stride = ti.uint32(1) + for i in ti.static(range(3)): + result += ti.uint32(pos_grid_local[i] * stride) + stride *= resolution + return result + + +@ti.real_func +def grid_pos2hash_index(indicator: ti.i32, pos_grid_local: uvec3, resolution: ti.u32, map_size: ti.u32) -> ti.u32: + hash_result = ti.uint32(0) + if indicator == 1: + hash_result = under_hash(pos_grid_local, resolution) + else: + hash_result = fast_hash(pos_grid_local) + + return hash_result % map_size + + +# <----------------- hash table util code -----------------> + + +@ti.real_func +def random_in_unit_disk() -> tf_vec2: + theta = 2.0 * np.pi * ti.random() + return ti.Vector([ti.sin(theta), ti.cos(theta)]) + + +@ti.real_func +def random_normal() -> tf_vec2: + x = ti.random() * 2.0 - 1.0 + y = ti.random() * 2.0 - 1.0 + return tf_vec2(x, y) + + +@ti.real_func +def dir_encode_func(dir_: tf_vec3) -> tf_vec32: + out_feat = tf_vec32(0.0) + dir_n = dir_ / dir_.norm() + x = dir_n[0] + y = dir_n[1] + z = dir_n[2] + xy = x * y + xz = x * z + yz = y * z + x2 = x * x + y2 = y * y + z2 = z * z + + temp = 0.28209479177387814 + out_feat[0] = data_type(temp) + out_feat[1] = data_type(-0.48860251190291987 * y) + out_feat[2] = data_type(0.48860251190291987 * z) + out_feat[3] = data_type(-0.48860251190291987 * x) + out_feat[4] = data_type(1.0925484305920792 * xy) + out_feat[5] = data_type(-1.0925484305920792 * yz) + out_feat[6] = data_type(0.94617469575755997 * z2 - 0.31539156525251999) + out_feat[7] = data_type(-1.0925484305920792 * xz) + out_feat[8] = data_type(0.54627421529603959 * x2 - 0.54627421529603959 * y2) + out_feat[9] = data_type(0.59004358992664352 * y * (-3.0 * x2 + y2)) + out_feat[10] = data_type(2.8906114426405538 * xy * z) + out_feat[11] = data_type(0.45704579946446572 * y * (1.0 - 5.0 * z2)) + out_feat[12] = data_type(0.3731763325901154 * z * (5.0 * z2 - 3.0)) + out_feat[13] = data_type(0.45704579946446572 * x * (1.0 - 5.0 * z2)) + out_feat[14] = data_type(1.4453057213202769 * z * (x2 - y2)) + out_feat[15] = data_type(0.59004358992664352 * x * (-x2 + 3.0 * y2)) + + return out_feat + + +@ti.data_oriented +class NGP_fw: + def __init__(self, scale, cascades, grid_size, base_res, log2_T, res, level, exp_step_factor): + self.res = res + self.N_rays = res[0] * res[1] + self.grid_size = grid_size + self.exp_step_factor = exp_step_factor + self.scale = scale + + # rays intersection parameters + # t1, t2 need to be initialized to -1.0 + self.hits_t = ti.Vector.field(n=2, dtype=data_type, shape=self.N_rays) + self.hits_t.fill(-1.0) + self.center = tf_vec3(0.0, 0.0, 0.0) + self.xyz_min = -tf_vec3(scale, scale, scale) + self.xyz_max = tf_vec3(scale, scale, scale) + self.half_size = (self.xyz_max - self.xyz_min) / 2 + + self.noise_buffer = ti.Vector.field(2, dtype=data_type, shape=self.N_rays) + self.gen_noise_buffer() + + self.rays_o = ti.Vector.field(n=3, dtype=data_type, shape=self.N_rays) + self.rays_d = ti.Vector.field(n=3, dtype=data_type, shape=self.N_rays) + + # use the pre-compute direction and scene pose + self.directions = ti.Matrix.field(n=1, m=3, dtype=data_type, shape=(self.N_rays,)) + self.pose = ti.Matrix.field(n=3, m=4, dtype=data_type, shape=()) + + # density_bitfield is used for point sampling + self.density_bitfield = ti.field(ti.uint32, shape=cascades * grid_size**3 // 32) + + # count the number of rays that still alive + self.counter = ti.field(ti.i32, shape=()) + self.counter[None] = self.N_rays + # current alive buffer index + self.current_index = ti.field(ti.i32, shape=()) + self.current_index[None] = 0 + + # how many samples that need to run the model + self.model_launch = ti.field(ti.i32, shape=()) + + # buffer for the alive rays + self.alive_indices = ti.field(ti.i32, shape=(2 * self.N_rays,)) + + # padd the thread to the factor of block size (thread per block) + self.padd_block_network = ti.field(ti.i32, shape=()) + self.padd_block_composite = ti.field(ti.i32, shape=()) + + # hash table variables + self.min_samples = 1 if exp_step_factor == 0 else 4 + self.per_level_scales = 1.3195079565048218 # hard coded, otherwise it will be have lower percision + self.base_res = base_res + self.max_params = 2**log2_T + self.level = level + # hash table fields + self.offsets = ti.field(ti.i32, shape=(16,)) + self.hash_map_sizes = ti.field(ti.uint32, shape=(16,)) + self.hash_map_indicator = ti.field(ti.i32, shape=(16,)) + + # model parameters + layer1_base = 32 * 64 + layer2_base = layer1_base + 64 * 64 + self.hash_embedding = ti.field(dtype=data_type, shape=(11445040,)) + self.sigma_weights = ti.field(dtype=data_type, shape=(layer1_base + 64 * 16,)) + self.rgb_weights = ti.field(dtype=data_type, shape=(layer2_base + 64 * 8,)) + + # buffers that used for points sampling + self.max_samples_per_rays = 1 + self.max_samples_shape = self.N_rays * self.max_samples_per_rays + + self.xyzs = ti.Vector.field(3, dtype=data_type, shape=(self.max_samples_shape,)) + self.dirs = ti.Vector.field(3, dtype=data_type, shape=(self.max_samples_shape,)) + self.deltas = ti.field(data_type, shape=(self.max_samples_shape,)) + self.ts = ti.field(data_type, shape=(self.max_samples_shape,)) + + # buffers that store the info of sampled points + self.run_model_ind = ti.field(ti.int32, shape=(self.max_samples_shape,)) + self.N_eff_samples = ti.field(ti.int32, shape=(self.N_rays,)) + + # intermediate buffers for network + self.xyzs_embedding = ti.field(data_type, shape=(self.max_samples_shape, 32)) + self.final_embedding = ti.field(data_type, shape=(self.max_samples_shape, 16)) + self.out_3 = ti.field(data_type, shape=(self.max_samples_shape, 3)) + self.out_1 = ti.field(data_type, shape=(self.max_samples_shape,)) + self.temp_hit = ti.field(ti.i32, shape=(self.max_samples_shape,)) + + # results buffers + self.opacity = ti.field(ti.f32, shape=(self.N_rays,)) + self.depth = ti.field(ti.f32, shape=self.N_rays) + self.rgb = ti.Vector.field(3, dtype=ti.f32, shape=(self.N_rays,)) + + # GUI render buffer (data type must be float32) + self.render_buffer = ti.Vector.field( + 3, + dtype=ti.f32, + shape=( + res[0], + res[1], + ), + ) + # camera parameters + self.lookat = np.array([0.0, 0.0, -1.0]) + self.lookat_change = np.zeros((3,)) + self.lookup = np.array([0.0, -1.0, 0.0]) + + def hash_table_init(self): + print( + f"GridEncoding: base resolution: {self.base_res}, log scale per level:{self.per_level_scales:.5f} feature numbers per level: {2} maximum parameters per level: {self.max_params} level: {self.level}" + ) + offset = 0 + for i in range(self.level): + resolution = int(np.ceil(self.base_res * np.exp(i * np.log(self.per_level_scales)) - 1.0)) + 1 + params_in_level = resolution**3 + params_in_level = int(resolution**3) if params_in_level % 8 == 0 else int((params_in_level + 8 - 1) / 8) * 8 + params_in_level = min(self.max_params, params_in_level) + self.offsets[i] = offset + self.hash_map_sizes[i] = params_in_level + self.hash_map_indicator[i] = 1 if resolution**3 <= params_in_level else 0 + offset += params_in_level + + def get_direction(self, camera_angle_x): + w, h = int(self.res[1]), int(self.res[0]) + fx = 0.5 * w / np.tan(0.5 * camera_angle_x) + fy = 0.5 * h / np.tan(0.5 * camera_angle_x) + cx, cy = 0.5 * w, 0.5 * h + + x, y = np.meshgrid( + np.arange(w, dtype=np.float32) + 0.5, + np.arange(h, dtype=np.float32) + 0.5, + indexing="xy", + ) + + directions = np.stack([(x - cx) / fx, (y - cy) / fy, np.ones_like(x)], -1) + + return directions.reshape(-1, 3) + + def load_model(self, model_path): + print(f"Loading model from {model_path}") + model = np.load(model_path, allow_pickle=True).item() + # model = torch.load(model_path, map_location='cpu')['state_dict'] + self.hash_embedding.from_numpy(model["model.xyz_encoder.params"].astype(np_type)) + self.sigma_weights.from_numpy(model["model.xyz_sigmas.params"].astype(np_type)) + self.rgb_weights.from_numpy(model["model.rgb_net.params"].astype(np_type)) + + self.density_bitfield.from_numpy(model["model.density_bitfield"].view("uint32")) + + self.pose.from_numpy(model["poses"][20].astype(np_type)) + if self.res[0] != 800 or self.res[1] != 800: + directions = self.get_direction(model["camera_angle_x"])[:, None, :].astype(np_type) + else: + directions = model["directions"][:, None, :].astype(np_type) + + self.directions.from_numpy(directions) + + @staticmethod + def taichi_init(kernel_profiler): + ti.init( + arch=arch, + offline_cache=True, + kernel_profiler=kernel_profiler, + enable_fallback=False, + ) + + @staticmethod + def taichi_print_profiler(): + ti.profiler.print_kernel_profiler_info() + + @ti.kernel + def reset(self): + self.depth.fill(0.0) + self.opacity.fill(0.0) + self.counter[None] = self.N_rays + for i, j in ti.ndrange(self.N_rays, 2): + self.alive_indices[i * 2 + j] = i + + @ti.real_func + def _ray_aabb_intersec(self, ray_o: tf_vec3, ray_d: tf_vec3) -> tf_vec2: + inv_d = 1.0 / ray_d + + t_min = (self.center - self.half_size - ray_o) * inv_d + t_max = (self.center + self.half_size - ray_o) * inv_d + + _t1 = ti.min(t_min, t_max) + _t2 = ti.max(t_min, t_max) + t1 = _t1.max() + t2 = _t2.min() + + return tf_vec2(t1, t2) + + @ti.kernel + def gen_noise_buffer(self): + for i in range(self.N_rays): + self.noise_buffer[i] = random_normal() + # self.noise_buffer[i] = random_in_unit_disk() + + @ti.kernel + def ray_intersect_dof(self, dist_to_focus: float, len_dis: float): + ti.block_local(self.pose) + for i in self.directions: + c2w = self.pose[None] + dir_ori = self.directions[i] + offset = len_dis * self.noise_buffer[i] + offset_m = tf_mat1x3( + [ + [ + offset[0], + offset[1], + 0.0, + ] + ] + ) + c2w_dir = c2w[:, :3].transpose() + offset_w = offset_m @ c2w_dir + mat_result = (dir_ori * dist_to_focus) @ c2w_dir - offset_w + ray_d = tf_vec3(mat_result[0, 0], mat_result[0, 1], mat_result[0, 2]) + ray_o = c2w[:, 3] + tf_vec3(offset_w[0, 0], offset_w[0, 1], offset_w[0, 2]) + + t1t2 = self._ray_aabb_intersec(ray_o, ray_d) + + if t1t2[1] > 0.0: + self.hits_t[i][0] = data_type(ti.max(t1t2[0], NEAR_DISTANCE)) + self.hits_t[i][1] = t1t2[1] + + self.rays_o[i] = ray_o + self.rays_d[i] = ray_d + + @ti.kernel + def ray_intersect(self): + ti.block_local(self.pose) + for i in self.directions: + c2w = self.pose[None] + mat_result = self.directions[i] @ c2w[:, :3].transpose() + ray_d = tf_vec3(mat_result[0, 0], mat_result[0, 1], mat_result[0, 2]) + ray_o = c2w[:, 3] + + t1t2 = self._ray_aabb_intersec(ray_o, ray_d) + + if t1t2[1] > 0.0: + self.hits_t[i][0] = data_type(ti.max(t1t2[0], NEAR_DISTANCE)) + self.hits_t[i][1] = t1t2[1] + + self.rays_o[i] = ray_o + self.rays_d[i] = ray_d + + @ti.kernel + def raymarching_test_kernel(self, N_samples: int): + self.run_model_ind.fill(0) + for n in ti.ndrange(self.counter[None]): + c_index = self.current_index[None] + r = self.alive_indices[n * 2 + c_index] + grid_size3 = self.grid_size**3 + grid_size_inv = 1.0 / self.grid_size + + ray_o = self.rays_o[r] + ray_d = self.rays_d[r] + t1t2 = self.hits_t[r] + + d_inv = 1.0 / ray_d + + t = t1t2[0] + t2 = t1t2[1] + + s = 0 + + start_idx = n * N_samples + + while (0 <= t) & (t < t2) & (s < N_samples): + # xyz = ray_o + t*ray_d + xyz = ray_o + t * ray_d + dt = calc_dt(t, self.exp_step_factor, self.grid_size, self.scale) + # mip = ti.max(mip_from_pos(xyz, cascades), + # mip_from_dt(dt, grid_size, cascades)) + + mip_bound = 0.5 + mip_bound_inv = 1 / mip_bound + + nxyz = ti.math.clamp( + 0.5 * (xyz * mip_bound_inv + 1) * self.grid_size, + 0.0, + self.grid_size - 1.0, + ) + # nxyz = ti.ceil(nxyz) + + idx = __morton3D(ti.cast(nxyz, ti.u32)) + # occ = density_grid_taichi[idx] > 5.912066756501768 + occ = self.density_bitfield[ti.int32(idx // 32)] & (1 << ti.u32(idx % 32)) + + if occ: + sn = start_idx + s + for p in ti.static(range(3)): + self.xyzs[sn][p] = xyz[p] + self.dirs[sn][p] = ray_d[p] + self.run_model_ind[sn] = 1 + self.ts[sn] = t + self.deltas[sn] = dt + t += dt + self.hits_t[r][0] = t + s += 1 + + else: + txyz = ( + ((nxyz + 0.5 + 0.5 * ti.math.sign(ray_d)) * grid_size_inv * 2 - 1) * mip_bound - xyz + ) * d_inv + + t_target = t + ti.max(0, txyz.min()) + t += calc_dt(t, self.exp_step_factor, self.grid_size, self.scale) + while t < t_target: + t += calc_dt(t, self.exp_step_factor, self.grid_size, self.scale) + + self.N_eff_samples[n] = s + if s == 0: + self.alive_indices[n * 2 + c_index] = -1 + + @ti.kernel + def rearange_index(self, B: ti.i32): + self.model_launch[None] = 0 + + for i in ti.ndrange(B): + if self.run_model_ind[i]: + index = ti.atomic_add(self.model_launch[None], 1) + self.temp_hit[index] = i + + self.model_launch[None] += 1 + self.padd_block_network[None] = ((self.model_launch[None] + block_dim - 1) // block_dim) * block_dim + # self.padd_block_composite[None] = ((self.counter[None]+ 128 - 1)// 128) *128 + + @ti.kernel + def hash_encode(self): + # get hash table embedding + ti.loop_config(block_dim=16) + for sn, level in ti.ndrange(self.model_launch[None], 16): + # normalize to [0, 1], before is [-0.5, 0.5] + xyz = self.xyzs[self.temp_hit[sn]] + 0.5 + offset = self.offsets[level] * 2 + indicator = self.hash_map_indicator[level] + map_size = self.hash_map_sizes[level] + + init_val0 = tf_vec1(0.0) + init_val1 = tf_vec1(1.0) + local_feature_0 = init_val0[0] + local_feature_1 = init_val0[0] + + index_temp = tf_index_temp(0) + w_temp = tf_vec8(0.0) + hash_temp_1 = tf_vec8(0.0) + hash_temp_2 = tf_vec8(0.0) + + scale = self.base_res * ti.exp(level * ti.log(self.per_level_scales)) - 1.0 + resolution = ti.cast(ti.ceil(scale), ti.uint32) + 1 + + pos = xyz * scale + 0.5 + pos_grid_uint = ti.cast(ti.floor(pos), ti.uint32) + pos -= pos_grid_uint + # pos_grid_uint = ti.cast(pos_grid, ti.uint32) + + for idx in ti.static(range(8)): + # idx_uint = ti.cast(idx, ti.uint32) + w = init_val1[0] + pos_grid_local = uvec3(0) + + for d in ti.static(range(3)): + if (idx & (1 << d)) == 0: + pos_grid_local[d] = pos_grid_uint[d] + w *= data_type(1 - pos[d]) + else: + pos_grid_local[d] = pos_grid_uint[d] + 1 + w *= data_type(pos[d]) + + index = ti.int32(grid_pos2hash_index(indicator, pos_grid_local, resolution, map_size)) + index_temp[idx] = offset + index * 2 + w_temp[idx] = w + + for idx in ti.static(range(8)): + hash_temp_1[idx] = self.hash_embedding[index_temp[idx]] + hash_temp_2[idx] = self.hash_embedding[index_temp[idx] + 1] + + for idx in ti.static(range(8)): + local_feature_0 += data_type(w_temp[idx] * hash_temp_1[idx]) + local_feature_1 += data_type(w_temp[idx] * hash_temp_2[idx]) + + self.xyzs_embedding[sn, level * 2] = local_feature_0 + self.xyzs_embedding[sn, level * 2 + 1] = local_feature_1 + + @ti.kernel + def sigma_layer(self): + ti.loop_config(block_dim=block_dim) + for sn in ti.ndrange(self.padd_block_network[None]): + tid = sn % block_dim + did_launch_num = self.model_launch[None] + init_val = tf_vec1(0.0) + xyz_feat = ti.simt.block.SharedArray((32, block_dim), data_type) + weight = ti.simt.block.SharedArray((64 * 32 + 64 * 16,), data_type) + hid1 = ti.simt.block.SharedArray((64, block_dim), data_type) + hid2 = ti.simt.block.SharedArray((16, block_dim), data_type) + for i in ti.static(range(sigma_sm_preload)): + k = tid * sigma_sm_preload + i + weight[k] = self.sigma_weights[k] + ti.simt.block.sync() + + if sn < did_launch_num: + for i in ti.static(range(32)): + xyz_feat[i, tid] = self.xyzs_embedding[sn, i] + + for i in range(64): + temp = init_val[0] + for j in ti.static(range(32)): + temp += xyz_feat[j, tid] * weight[i * 32 + j] + + hid1[i, tid] = temp + ti.simt.block.sync() + + for i in range(16): + temp = init_val[0] + for j in ti.static(range(64)): + temp += data_type(ti.max(0.0, hid1[j, tid])) * weight[64 * 32 + i * 64 + j] + hid2[i, tid] = temp + ti.simt.block.sync() + + self.out_1[self.temp_hit[sn]] = data_type(ti.exp(hid2[0, tid])) + for i in ti.static(range(16)): + self.final_embedding[sn, i] = hid2[i, tid] + + ti.simt.block.sync() + + @ti.kernel + def rgb_layer(self): + ti.loop_config(block_dim=block_dim) + for sn in ti.ndrange(self.padd_block_network[None]): + ray_id = self.temp_hit[sn] + tid = sn % block_dim + did_launch_num = self.model_launch[None] + init_val = tf_vec1(0.0) + weight = ti.simt.block.SharedArray((64 * 32 + 64 * 64 + 64 * 4,), data_type) + hid1 = ti.simt.block.SharedArray((64, block_dim), data_type) + hid2 = ti.simt.block.SharedArray((64, block_dim), data_type) + for i in ti.static(range(rgb_sm_preload)): + k = tid * rgb_sm_preload + i + weight[k] = self.rgb_weights[k] + ti.simt.block.sync() + + if sn < did_launch_num: + dir_ = self.dirs[ray_id] + dir_feat = dir_encode_func(dir_) + + for i in ti.static(range(16)): + dir_feat[16 + i] = self.final_embedding[sn, i] + + for i in range(64): + temp = init_val[0] + for j in ti.static(range(32)): + temp += dir_feat[j] * weight[i * 32 + j] + + hid1[i, tid] = temp + ti.simt.block.sync() + + for i in range(64): + temp = init_val[0] + for j in ti.static(range(64)): + temp += data_type(ti.max(0.0, hid1[j, tid])) * weight[64 * 32 + i * 64 + j] + + hid2[i, tid] = temp + ti.simt.block.sync() + + for i in ti.static(range(3)): + temp = init_val[0] + for j in ti.static(range(64)): + temp += data_type(ti.max(0.0, hid2[j, tid])) * weight[64 * 32 + 64 * 64 + i * 64 + j] + + hid1[i, tid] = temp + ti.simt.block.sync() + + for i in ti.static(range(3)): + self.out_3[self.temp_hit[sn], i] = data_type(1 / (1 + ti.exp(-hid1[i, tid]))) + ti.simt.block.sync() + + @ti.kernel + def FullyFusedMLP(self): + ti.loop_config(block_dim=block_dim) + for sn in ti.ndrange(self.padd_block_network[None]): + ray_id = self.temp_hit[sn] + tid = sn % block_dim + did_launch_num = self.model_launch[None] + init_val = tf_vec1(0.0) + xyz_feat = tf_vec32(0.0) + weight = ti.simt.block.SharedArray((64 * 32 + 64 * 64 + 64 * 4,), data_type) + hid2_2 = ti.simt.block.SharedArray((32 * block_dim,), data_type) + hid2_1 = ti.simt.block.SharedArray((32 * block_dim,), data_type) + hid1 = ti.simt.block.SharedArray((64 * block_dim,), data_type) + for i in ti.static(range(rgb_sm_preload)): + k = tid * rgb_sm_preload + i + weight[k] = self.rgb_weights[k] + for i in ti.static(range(sigma_sm_preload)): + k = tid * sigma_sm_preload + i + hid2_1[k] = self.sigma_weights[k] + ti.simt.block.sync() + + if sn < did_launch_num: + dir_ = self.dirs[ray_id] + for i in ti.static(range(32)): + xyz_feat[i] = self.xyzs_embedding[sn, i] + dir_feat = dir_encode_func(dir_) + + for i in range(64): + temp = init_val[0] + for j in ti.static(range(32)): + temp += xyz_feat[j] * hid2_1[i * 32 + j] + hid1[i * block_dim + tid] = temp + ti.simt.block.sync() + + for i in range(16): + temp = init_val[0] + for j in ti.static(range(64)): + temp += data_type(ti.max(0.0, hid1[j * block_dim + tid])) * hid2_1[64 * 32 + i * 64 + j] + hid2_2[i * block_dim + tid] = temp + ti.simt.block.sync() + + out1 = data_type(ti.exp(hid2_2[tid])) + + for i in ti.static(range(16)): + dir_feat[16 + i] = hid2_2[i * block_dim + tid] + + for i in range(64): + temp = init_val[0] + for j in ti.static(range(32)): + temp += dir_feat[j] * weight[i * 32 + j] + hid1[i * block_dim + tid] = temp + ti.simt.block.sync() + + for i in range(32): + temp1 = init_val[0] + temp2 = init_val[0] + for j in ti.static(range(64)): + temp1 += data_type(ti.max(0.0, hid1[j * block_dim + tid])) * weight[64 * 32 + i * 64 + j] + temp2 += data_type(ti.max(0.0, hid1[j * block_dim + tid])) * weight[64 * 32 + (i + 32) * 64 + j] + hid2_1[i * block_dim + tid] = temp1 + hid2_2[i * block_dim + tid] = temp2 + ti.simt.block.sync() + + for i in ti.static(range(3)): + temp = init_val[0] + for j in ti.static(range(32)): + temp += ( + data_type(ti.max(0.0, hid2_1[j * block_dim + tid])) * weight[64 * 32 + 64 * 64 + i * 64 + j] + ) + # ti.simt.block.sync() + temp += ( + data_type(ti.max(0.0, hid2_2[j * block_dim + tid])) + * weight[64 * 32 + 64 * 64 + i * 64 + j + 32] + ) + hid1[i * block_dim + tid] = temp + ti.simt.block.sync() + + self.out_1[self.temp_hit[sn]] = out1 + for i in ti.static(range(3)): + self.out_3[self.temp_hit[sn], i] = data_type(1 / (1 + ti.exp(-hid1[i * block_dim + tid]))) + ti.simt.block.sync() + + @ti.kernel + def composite_test(self, max_samples: ti.i32, T_threshold: data_type): + for n in ti.ndrange(self.counter[None]): + N_samples = self.N_eff_samples[n] + if N_samples != 0: + c_index = self.current_index[None] + r = self.alive_indices[n * 2 + c_index] + + T = data_type(1.0 - self.opacity[r]) + + start_idx = n * max_samples + + rgb_temp = tf_vec3(0.0) + depth_temp = tf_vec1(0.0) + opacity_temp = tf_vec1(0.0) + out_3_temp = tf_vec3(0.0) + + for s in range(N_samples): + sn = start_idx + s + a = data_type(1.0 - ti.exp(-self.out_1[sn] * self.deltas[sn])) + w = a * T + + for i in ti.static(range(3)): + out_3_temp[i] = self.out_3[sn, i] + + rgb_temp += w * out_3_temp + depth_temp[0] += w * self.ts[sn] + opacity_temp[0] += w + + T *= data_type(1.0 - a) + + if T <= T_threshold: + self.alive_indices[n * 2 + c_index] = -1 + break + + self.rgb[r] += rgb_temp + self.depth[r] += depth_temp[0] + self.opacity[r] += opacity_temp[0] + + @ti.kernel + def re_order(self, B: ti.i32): + self.counter[None] = 0 + c_index = self.current_index[None] + n_index = (c_index + 1) % 2 + self.current_index[None] = n_index + + for i in ti.ndrange(B): + alive_temp = self.alive_indices[i * 2 + c_index] + if alive_temp >= 0: + index = ti.atomic_add(self.counter[None], 1) + self.alive_indices[index * 2 + n_index] = alive_temp + + def write_image(self): + rgb_np = self.rgb.to_numpy().reshape(self.res[0], self.res[1], 3) + depth_np = self.depth.to_numpy().reshape(self.res[0], self.res[1]) + plt.imsave("taichi_ngp.png", (rgb_np * 255).astype(np.uint8)) + plt.imsave("taichi_ngp_depth.png", depth2img(depth_np)) + + def render(self, max_samples, T_threshold, use_dof=False, dist_to_focus=0.8, len_dis=0.0) -> Tuple[float, int, int]: + samples = 0 + self.reset() + self.gen_noise_buffer() + if use_dof: + self.ray_intersect_dof(dist_to_focus, len_dis) + else: + self.ray_intersect() + + while samples < max_samples: + N_alive = self.counter[None] + if N_alive == 0: + break + + # how many more samples the number of samples add for each ray + N_samples = max(min(self.N_rays // N_alive, 64), self.min_samples) + samples += N_samples + launch_model_total = N_alive * N_samples + + self.raymarching_test_kernel(N_samples) + self.rearange_index(launch_model_total) + # self.dir_encode() + self.hash_encode() + self.sigma_layer() + self.rgb_layer() + # self.FullyFusedMLP() + self.composite_test(N_samples, T_threshold) + self.re_order(N_alive) + + return samples, N_alive, N_samples + + def render_frame(self, frame_id): + t = time.time() + samples, N_alive, N_samples = self.render(max_samples=100, T_threshold=1e-4) + self.write_image() + + print(f"samples: {samples}, N_alive: {N_alive}, N_samples: {N_samples}") + print(f"Render time: {1000*(time.time()-t):.2f} ms") + + @ti.kernel + def rgb_to_render_buffer(self, frame: ti.i32): + for i, j in self.render_buffer: + rgb = self.rgb[(self.res[0] - j) * self.res[1] + i] + self.render_buffer[i, j] = rgb / frame + + @ti.kernel + def depth_max(self) -> vec2: + max_v = self.depth[0] + min_v = self.depth[0] + for i in ti.ndrange(self.N_rays): + ti.atomic_max(max_v, self.depth[i]) + ti.atomic_min(min_v, self.depth[i]) + return vec2(max_v, min_v) + + @ti.kernel + def depth_to_render_buffer(self, max_min: vec2): + for i, j in self.render_buffer: + max_v = max_min[0] + min_v = max_min[1] + depth = self.depth[(self.res[0] - j) * self.res[1] + i] + pixel = (vec3(depth) - min_v) / (max_v - min_v) + self.render_buffer[i, j] = pixel + + def init_cam(self): + self.lookat = self.lookat @ self.pose.to_numpy()[:, :3].T + + def render_gui(self): + video_manager = None + + # check if the export file exists for snapshot and video + export_dir = "./export/" + if not os.path.exists(export_dir): + os.mkdir(export_dir) + + W, H = self.res + window = ti.ui.Window("Taichi NGP", (W, H)) + canvas = window.get_canvas() + gui = window.get_gui() + + last_mouse_x = None + last_mouse_y = None + rotate_speed = 50 + movement_speed = 0.03 + max_samples_for_rendering = 100 + render_time = 0 + # white_bg = False + recording = False + show_depth = False + use_dof = False + last_use_dof = False + frame = 0 + T_threshold = 1e-2 + dist_to_focus = 1.2 + len_dis = 0.04 + self.init_cam() + last_pose = self.pose.to_numpy() + total_frame = 0 + last_dist_to_focus = dist_to_focus + last_len_dis = len_dis + + while window.running: + # TODO: make it more efficient + pose = self.pose.to_numpy() + total_frame += 1 + if not window.is_pressed(ti.ui.RMB): + last_mouse_x = None + last_mouse_y = None + else: + curr_mouse_x, curr_mouse_y = window.get_cursor_pos() + if last_mouse_x is None or last_mouse_y is None: + last_mouse_x, last_mouse_y = curr_mouse_x, curr_mouse_y + else: + dx = curr_mouse_x - last_mouse_x + dy = curr_mouse_y - last_mouse_y + rotvec_x = pose[:, 1] * np.radians(rotate_speed * dx) + rotvec_y = pose[:, 0] * np.radians(rotate_speed * dy) + pose = R.from_rotvec(rotvec_x).as_matrix() @ R.from_rotvec(rotvec_y).as_matrix() @ pose + last_mouse_x, last_mouse_y = curr_mouse_x, curr_mouse_y + correct_dir = 1.0 if pose[2, 3] < 0.0 else -1.0 + self.lookat = np.array([0.0, 0.0, correct_dir]) @ pose[:, :3].T + + front = self.lookat - pose[:, 3] + front = front / np.linalg.norm(front) + up = self.lookup @ pose[:, :3].T + left = np.cross(up, front) + position_change = np.zeros(3) + if window.is_pressed("w"): + position_change = front * movement_speed + if window.is_pressed("s"): + position_change = -front * movement_speed + if window.is_pressed("a"): + position_change = left * movement_speed + if window.is_pressed("d"): + position_change = -left * movement_speed + if window.is_pressed("e"): + position_change = up * movement_speed + if window.is_pressed("q"): + position_change = -up * movement_speed + pose[:, 3] += position_change + self.lookat += position_change + if (last_pose - pose).sum(): + last_pose = pose + self.pose.from_numpy(pose.astype(np.float16)) + self.rgb.fill(0.0) + total_frame = 1 + + with gui.sub_window("Options", 0.05, 0.05, 0.68, 0.3) as w: + w.text("General") + T_threshold = w.slider_float("transparency threshold", T_threshold, 0.0, 1.0) + max_samples_for_rendering = w.slider_float("max samples", max_samples_for_rendering, 1, 100) + show_depth = w.checkbox("show depth", show_depth) + # white_bg = w.checkbox("white background", white_bg) + + w.text("Camera") + use_dof = w.checkbox("apply depth of field", use_dof) + dist_to_focus = w.slider_float("focus distance", dist_to_focus, 0.8, 3.0) + len_dis = w.slider_float("lens size", len_dis, 0.0, 0.1) + if last_dist_to_focus != dist_to_focus or last_len_dis != len_dis or last_use_dof != use_dof: + last_dist_to_focus = dist_to_focus + last_len_dis = len_dis + last_use_dof = use_dof + self.rgb.fill(0.0) + total_frame = 1 + + w.text(f"Render time: {render_time:.2f} ms") + + with gui.sub_window("Export", 0.75, 0.05, 0.2, 0.1) as w: + if gui.button("snapshot "): + ti.tools.imwrite(self.render_buffer.to_numpy(), export_dir + "snap_shot.png") + print("save snapshot in export folder") + if gui.button("recording"): + frame = 0 + if not recording: + video_manager = ti.tools.VideoManager( + output_dir=export_dir, framerate=24, automatic_build=False + ) + recording = True + else: + recording = False + video_manager.make_video(gif=True, mp4=True) + print("save video in export folder") + + if recording and video_manager: + w.text(f"recording frames: {frame}") + frame += 1 + pixels_img = self.render_buffer.to_numpy() + video_manager.write_frame(pixels_img) + + t = time.time() + _, _, _ = self.render( + max_samples=max_samples_for_rendering, + T_threshold=T_threshold, + use_dof=use_dof, + dist_to_focus=dist_to_focus, + len_dis=len_dis, + ) + + if not show_depth: + self.rgb_to_render_buffer(total_frame) + else: + self.depth_to_render_buffer(self.depth_max()) + + render_time = 1000 * (time.time() - t) + canvas.set_image(self.render_buffer) + window.show() + + +def main(args): + NGP_fw.taichi_init(args.print_profile) + res = args.res + scale = 0.5 + ngp = NGP_fw( + scale=scale, + cascades=max(1 + int(np.ceil(np.log2(2 * scale))), 1), + grid_size=128, + base_res=16, + log2_T=19, + res=[res, res], + level=16, + exp_step_factor=0, + ) + if args.model_path: + ngp.load_model(args.model_path) + else: + model_dir = "./npy_models/" + if not os.path.exists(model_dir): + os.mkdir(model_dir) + npy_file = os.path.join(model_dir, args.scene + ".npy") + if not os.path.exists(npy_file): + print(f"No {args.scene} model found, downloading ...") + url = PRETRAINED_MODEL_URL.format(args.scene) + wget.download(url, out=npy_file) + ngp.load_model(npy_file) + + ngp.hash_table_init() + + if not args.gui: + ngp.render_frame(0) + else: + ngp.render_gui() + + if args.print_profile: + NGP_fw.taichi_print_profiler() + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--res", type=int, default=800) + parser.add_argument( + "--scene", + type=str, + default="lego", + choices=[ + "ship", + "mic", + "materials", + "lego", + "hotdog", + "ficus", + "drums", + "chair", + ], + ) + parser.add_argument("--model_path", type=str, default=None) + parser.add_argument("--gui", action="store_true", default=False) + parser.add_argument("--print_profile", action="store_true", default=False) + cmd_args, _ = parser.parse_known_args() + main(cmd_args) diff --git a/local_packages/taichi/examples/rendering/cornell_box.py b/local_packages/taichi/examples/rendering/cornell_box.py new file mode 100644 index 0000000..03fa28c --- /dev/null +++ b/local_packages/taichi/examples/rendering/cornell_box.py @@ -0,0 +1,218 @@ +# pylint: disable=W0622,W0621,W0401 +import taichi as ti +from taichi.math import * + +ti.init(arch=ti.gpu, default_ip=ti.i32, default_fp=ti.f32) + +image_resolution = (512, 512) +image_buffer = ti.Vector.field(4, float, image_resolution) +image_pixels = ti.Vector.field(3, float, image_resolution) + +Ray = ti.types.struct(origin=vec3, direction=vec3, color=vec3) +Material = ti.types.struct(albedo=vec3, emission=vec3) +Transform = ti.types.struct(position=vec3, rotation=vec3, scale=vec3, matrix=mat3) +SDFObject = ti.types.struct(distance=float, transform=Transform, material=Material) + +objects = SDFObject.field(shape=8) +objects[0] = SDFObject( + transform=Transform(vec3(0, 0, -1), vec3(0, 0, 0), vec3(1, 1, 0.2)), + material=Material(vec3(1, 1, 1) * 0.4, vec3(1)), +) +objects[1] = SDFObject( + transform=Transform(vec3(0, 1, 0), vec3(90, 0, 0), vec3(1, 1, 0.2)), + material=Material(vec3(1, 1, 1) * 0.4, vec3(1)), +) +objects[2] = SDFObject( + transform=Transform(vec3(0, -1, 0), vec3(90, 0, 0), vec3(1, 1, 0.2)), + material=Material(vec3(1, 1, 1) * 0.4, vec3(1)), +) +objects[3] = SDFObject( + transform=Transform(vec3(-1, 0, 0), vec3(0, 90, 0), vec3(1, 1, 0.2)), + material=Material(vec3(1, 0, 0) * 0.5, vec3(1)), +) +objects[4] = SDFObject( + transform=Transform(vec3(1, 0, 0), vec3(0, 90, 0), vec3(1, 1, 0.2)), + material=Material(vec3(0, 1, 0) * 0.5, vec3(1)), +) +objects[5] = SDFObject( + transform=Transform(vec3(-0.275, -0.3, -0.2), vec3(0, 112, 0), vec3(0.25, 0.5, 0.25)), + material=Material(vec3(1, 1, 1) * 0.4, vec3(1)), +) +objects[6] = SDFObject( + transform=Transform(vec3(0.275, -0.55, 0.2), vec3(0, -197, 0), vec3(0.25, 0.25, 0.25)), + material=Material(vec3(1, 1, 1) * 0.4, vec3(1)), +) +objects[7] = SDFObject( + transform=Transform(vec3(0, 0.809, 0), vec3(90, 0, 0), vec3(0.2, 0.2, 0.01)), + material=Material(vec3(1, 1, 1) * 1, vec3(100)), +) + + +@ti.func +def rotate(a: vec3) -> mat3: + s, c = sin(a), cos(a) + return ( + mat3(c.z, s.z, 0, -s.z, c.z, 0, 0, 0, 1) + @ mat3(c.y, 0, -s.y, 0, 1, 0, s.y, 0, c.y) + @ mat3(1, 0, 0, 0, c.x, s.x, 0, -s.x, c.x) + ) + + +@ti.func +def signed_distance(obj: SDFObject, pos: vec3) -> float: + p = obj.transform.matrix @ (pos - obj.transform.position) + q = abs(p) - obj.transform.scale + return length(max(q, 0)) + min(max(q.x, max(q.y, q.z)), 0) + + +@ti.func +def nearest_object(p: vec3): + index, min_dis = 0, 1e32 + for i in ti.static(range(8)): + dis = signed_distance(objects[i], p) + if dis < min_dis: + min_dis, index = dis, i + return index, min_dis + + +@ti.func +def calc_normal(obj: SDFObject, p: vec3) -> vec3: + e = vec2(1, -1) * 0.5773 * 0.005 + return normalize( + e.xyy * signed_distance(obj, p + e.xyy) + + e.yyx * signed_distance(obj, p + e.yyx) + + e.yxy * signed_distance(obj, p + e.yxy) + + e.xxx * signed_distance(obj, p + e.xxx) + ) + + +@ti.func +def raycast(ray: Ray): + w, s, d, cerr = 1.6, 0.0, 0.0, 1e32 + index, t, position, hit = 0, 0.005, vec3(0), False + for _ in range(64): + position = ray.origin + ray.direction * t + index, distance = nearest_object(position) + + ld, d = d, distance + if ld + d < s: + s -= w * s + t += s + w *= 0.5 + w += 0.5 + continue + err = d / t + if err < cerr: + cerr = err + + s = w * d + t += s + hit = err < 0.001 + if t > 5.0 or hit: + break + return objects[index], position, hit + + +@ti.func +def hemispheric_sampling(normal: vec3) -> vec3: + z = 2.0 * ti.random() - 1.0 + a = ti.random() * 2.0 * pi + xy = sqrt(1.0 - z * z) * vec2(sin(a), cos(a)) + return normalize(normal + vec3(xy, z)) + + +@ti.func +def raytrace(ray: Ray) -> Ray: + for _ in range(3): + object, position, hit = raycast(ray) + if not hit: + ray.color = vec3(0) + break + + normal = calc_normal(object, position) + ray.direction = hemispheric_sampling(normal) + ray.color *= object.material.albedo + ray.origin = position + + intensity = dot(ray.color, vec3(0.299, 0.587, 0.114)) + ray.color *= object.material.emission + visible = dot(ray.color, vec3(0.299, 0.587, 0.114)) + if intensity < visible or visible < 0.000001: + break + return ray + + +@ti.kernel +def build_scene(): + for i in objects: + rotation = radians(objects[i].transform.rotation) + objects[i].transform.matrix = rotate(rotation) + + +@ti.kernel +def render(camera_position: vec3, camera_lookat: vec3, camera_up: vec3): + for i, j in image_pixels: + z = normalize(camera_position - camera_lookat) + x = normalize(cross(camera_up, z)) + y = cross(z, x) + + half_width = half_height = tan(radians(35) * 0.5) + lower_left_corner = camera_position - half_width * x - half_height * y - z + horizontal = 2.0 * half_width * x + vertical = 2.0 * half_height * y + + uv = (vec2(i, j) + vec2(ti.random(), ti.random())) / vec2(image_resolution) + po = lower_left_corner + uv.x * horizontal + uv.y * vertical + rd = normalize(po - camera_position) + + ray = raytrace(Ray(camera_position, rd, vec3(1))) + buffer = image_buffer[i, j] + buffer += vec4(ray.color, 1.0) + image_buffer[i, j] = buffer + + color = buffer.rgb / buffer.a + color = pow(color, vec3(1.0 / 2.2)) + color = ( + mat3( + 0.597190, + 0.35458, + 0.04823, + 0.07600, + 0.90834, + 0.01566, + 0.02840, + 0.13383, + 0.83777, + ) + @ color + ) + color = (color * (color + 0.024578) - 0.0000905) / (color * (0.983729 * color + 0.4329510) + 0.238081) + color = ( + mat3( + 1.60475, + -0.531, + -0.0736, + -0.102, + 1.10813, + -0.00605, + -0.00327, + -0.07276, + 1.07602, + ) + @ color + ) + image_pixels[i, j] = clamp(color, 0, 1) + + +def main(): + window = ti.ui.Window("Cornell Box", image_resolution) + canvas = window.get_canvas() + build_scene() + while window.running: + render(vec3(0, 0, 3.5), vec3(0, 0, -1), vec3(0, 1, 0)) + canvas.set_image(image_pixels) + window.show() + + +if __name__ == "__main__": + main() diff --git a/local_packages/taichi/examples/rendering/oit_renderer.py b/local_packages/taichi/examples/rendering/oit_renderer.py new file mode 100644 index 0000000..ffbda90 --- /dev/null +++ b/local_packages/taichi/examples/rendering/oit_renderer.py @@ -0,0 +1,175 @@ +# A demo of order-independent transparency using Taichi +# Reference: https://github.com/nvpro-samples/vk_order_independent_transparency +import taichi as ti +from taichi.math import clamp, mix, normalize, vec3, vec4 + +ti.init(arch=ti.cuda) +res = (1000, 1000) +color_buffer = ti.Vector.field(3, dtype=ti.f32, shape=res) + +eps = 1e-4 +inf = 1e10 +fov = 0.5 +aspect_ratio = res[0] / res[1] + +alpha_min = 0.2 +alpha_width = 0.3 +camera_pos = vec3(0, 0, 5) + +Line = ti.types.struct(pos=vec3, dir=vec3) + +background_color = vec4(0.2, 0.2, 0.2, 1) + +Sphere = ti.types.struct(center=vec3, radius=float, color=vec4) +spheres = Sphere.field() +ti.root.dynamic(ti.j, 1024, chunk_size=64).place(spheres) + +ColorWithDepth = ti.types.struct(color=vec4, depth=float) +colors_in_pixel = ColorWithDepth.field() +ti.root.dense(ti.ij, res).dynamic(ti.k, 2048, chunk_size=64).place(colors_in_pixel) + + +@ti.func +def gooch_lighting(normal: ti.template()): + light = normalize(vec3(-1, 2, 1)) + warmth = normal * light * 0.5 + 0.5 + return mix(vec3(0, 0.25, 0.75), vec3(1, 1, 1), warmth) + + +@ti.func +def shading(color: ti.template(), normal: ti.template()): + colorRGB = color.rgb * gooch_lighting(normal) + alpha = clamp(alpha_min + color.a * alpha_width, 0, 1) + return vec4(colorRGB, alpha) + + +@ti.func +def intersect_sphere(line: ti.template(), sphere: ti.template()): + color1 = vec4(0) + color2 = vec4(0) + dist1 = inf + dist2 = inf + l = sphere.center - line.pos + l2 = l.dot(l) + r2 = sphere.radius * sphere.radius + tp = l.dot(line.dir) + out_of_sphere = l2 > r2 + may_have_intersection = True + if -eps < l2 - r2 < eps: + if -eps < tp < eps: + may_have_intersection = False + out_of_sphere = tp < 0 + if tp < 0 and out_of_sphere: + may_have_intersection = False + if may_have_intersection: + d2 = l2 - tp * tp + if d2 <= r2: + tt = ti.sqrt(r2 - d2) + t1 = tp - tt + if t1 > 0: + hit_pos1 = line.pos + line.dir * t1 + dist1 = t1 + normal1 = normalize(hit_pos1 - sphere.center) + color1 = shading(sphere.color, normal1) + t2 = tp + tt + if t2 > 0: + hit_pos2 = line.pos + line.dir * t2 + dist2 = t2 + normal2 = normalize(hit_pos2 - sphere.center) + color2 = shading(sphere.color, normal2) + return ColorWithDepth(color=color1, depth=dist1), ColorWithDepth(color=color2, depth=dist2) + + +@ti.func +def get_line_of_vision(u, v): + ray_dir = vec3( + (2 * (u + 0.5) / res[0] - 1) * aspect_ratio, + (2 * (v + 0.5) / res[1] - 1), + -1.0 / fov, + ) + ray_dir = normalize(ray_dir) + return Line(pos=camera_pos, dir=ray_dir) + + +@ti.func +def get_intersections(u, v): + line = get_line_of_vision(u, v) + colors_in_pixel[u, v].deactivate() + for i in range(spheres.length()): + hit1, hit2 = intersect_sphere(line, spheres[i]) + if hit1.depth < inf: + colors_in_pixel[u, v].append(hit1) + if hit2.depth < inf: + colors_in_pixel[u, v].append(hit2) + + +@ti.func +def bubble_sort(u, v): + l = colors_in_pixel[u, v].length() + for i in range(l - 1): + for j in range(l - 1 - i): + if colors_in_pixel[u, v, j].depth > colors_in_pixel[u, v, j + 1].depth: + tmp = colors_in_pixel[u, v, j] + colors_in_pixel[u, v, j] = colors_in_pixel[u, v, j + 1] + colors_in_pixel[u, v, j + 1] = tmp + + +@ti.func +def blend(color: ti.template(), base_color: ti.template()): + color.rgb += (1 - color.a) * base_color.rgb * base_color.a + color.a += (1 - color.a) * base_color.a + + +@ti.func +def get_color(u, v): + color = vec4(0) + for i in range(colors_in_pixel[u, v].length()): + blend(color, colors_in_pixel[u, v, i].color) + blend(color, background_color) + gamma_corrected = ti.pow(color.rgb * color.a, 1 / 2.2) + color_buffer[u, v] = gamma_corrected + + +stratify_res = 5 +inv_stratify = 1.0 / stratify_res + + +@ti.kernel +def generate_sphere(n: ti.i32): + for i in range(n): + spheres.append( + Sphere( + vec3(ti.random() * 3 - 1.5, ti.random() * 3 - 1.5, ti.random() * 3 - 1.5), + ti.random() * 0.2 + 0.1, + (ti.random(), ti.random(), ti.random(), ti.random()), + ) + ) + + +@ti.kernel +def render(): + for u, v in color_buffer: + get_intersections(u, v) + bubble_sort(u, v) + get_color(u, v) + + +@ti.kernel +def init(): + spheres.deactivate() + for u, v in color_buffer: + colors_in_pixel[u, v].deactivate() + + +def main(): + gui = ti.GUI("OIT", res, fast_gui=True) + init() + generate_sphere(256) + render() + gui.set_image(color_buffer) + while gui.running: + gui.show() + + +if __name__ == "__main__": + main() diff --git a/local_packages/taichi/examples/rendering/rasterizer.py b/local_packages/taichi/examples/rendering/rasterizer.py new file mode 100644 index 0000000..dd5f292 --- /dev/null +++ b/local_packages/taichi/examples/rendering/rasterizer.py @@ -0,0 +1,138 @@ +import math + +import numpy as np + +import taichi as ti + +ti.init(arch=ti.gpu) + +tile_size = 8 # Size of a tile +width, height = 500, 500 # Size of framebuffer +num_triangles = 60 # Number of random colored triangles to be generated +num_samples_per_pixel = 4 # Number of samples per pixel + +num_spp_sqrt = int(math.sqrt(num_samples_per_pixel)) + +samples = ti.Vector.field(3, dtype=ti.f32, shape=(width, height, num_spp_sqrt, num_spp_sqrt)) +pixels = ti.Vector.field(3, dtype=ti.f32, shape=(width, height)) + + +@ti.data_oriented +class TriangleRasterizer: + def __init__(self, n): + self.n = n + self.A = ti.Vector.field(2, dtype=ti.f32) + self.B = ti.Vector.field(2, dtype=ti.f32) + self.C = ti.Vector.field(2, dtype=ti.f32) + self.c0 = ti.Vector.field(3, dtype=ti.f32) + self.c1 = ti.Vector.field(3, dtype=ti.f32) + self.c2 = ti.Vector.field(3, dtype=ti.f32) + + self.vertices = ti.root.dense(ti.i, n).place(self.A, self.B, self.C) + self.colors = ti.root.dense(ti.i, n).place(self.c0, self.c1, self.c2) + + # Tile-based culling + self.block_num_triangles = ti.field( + dtype=ti.i32, + shape=((width - 1) // tile_size + 1, (height - 1) // tile_size + 1), + ) + self.block_indicies = ti.field( + dtype=ti.i32, + shape=((width - 1) // tile_size + 1, (height - 1) // tile_size + 1, n), + ) + + def set_triangle(self, i, v0, v1, v2, c0, c1, c2): + self.A[i] = v0 + self.B[i] = v1 + self.C[i] = v2 + self.c0[i] = c0 + self.c1[i] = c1 + self.c2[i] = c2 + + @staticmethod + @ti.func + def point_in_triangle(P, A, B, C): + alpha = -(P.x - B.x) * (C.y - B.y) + (P.y - B.y) * (C.x - B.x) + alpha /= -(A.x - B.x) * (C.y - B.y) + (A.y - B.y) * (C.x - B.x) + beta = -(P.x - C.x) * (A.y - C.y) + (P.y - C.y) * (A.x - C.x) + beta /= -(B.x - C.x) * (A.y - C.y) + (B.y - C.y) * (A.x - C.x) + gamma = 1.0 - alpha - beta + result = alpha >= 0.0 and alpha <= 1.0 and beta >= 0.0 and beta <= 1.0 and gamma >= 0.0 + return result, alpha, beta, gamma + + @staticmethod + @ti.func + def bbox_intersect(A0, A1, B0, B1): + return B0.x < A1.x and B0.y < A1.y and B1.x > A0.x and B1.y > A0.y + + @ti.kernel + def tile_culling(self): + for i, j in self.block_num_triangles: + idx = 0 + tile_min = ti.Vector([i * tile_size, j * tile_size]) + tile_max = ti.Vector([(i + 1) * tile_size, (j + 1) * tile_size]) + for t in range(self.n): + A, B, C = self.A[t], self.B[t], self.C[t] + tri_min = ti.min(A, ti.min(B, C)) + tri_max = ti.max(A, ti.max(B, C)) + if self.bbox_intersect(tile_min, tile_max, tri_min, tri_max): + self.block_indicies[i, j, idx] = t + idx = idx + 1 + self.block_num_triangles[i, j] = idx + + @ti.kernel + def rasterize(self): + for i, j in pixels: + for k in range(self.block_num_triangles[i // tile_size, j // tile_size]): + idx = self.block_indicies[i // tile_size, j // tile_size, k] + A, B, C = self.A[idx], self.B[idx], self.C[idx] + c0, c1, c2 = self.c0[idx], self.c1[idx], self.c2[idx] + + for subi, subj in ti.ndrange(num_spp_sqrt, num_spp_sqrt): + P = ti.Vector( + [ + i + (subi + 0.5) / num_spp_sqrt, + j + (subj + 0.5) / num_spp_sqrt, + ] + ) + result, alpha, beta, gamma = self.point_in_triangle(P, A, B, C) + + if result: + interpolated_color = c0 * alpha + c1 * beta + c2 * gamma + samples[i, j, subi, subj] = interpolated_color + + samples_sum = ti.Vector([0.0, 0.0, 0.0]) + for subi, subj in ti.ndrange(num_spp_sqrt, num_spp_sqrt): + samples_sum += samples[i, j, subi, subj] + pixels[i, j] = samples_sum / num_samples_per_pixel + + +def main(): + gui = ti.GUI("Rasterizer", res=(width, height)) + + triangles = TriangleRasterizer(num_triangles) + + i = 0 + while gui.running: + # Set a triangle to a new random triangle + triangles.set_triangle( + i % num_triangles, + ti.Vector(np.random.rand(2) * [width, height]), + ti.Vector(np.random.rand(2) * [width, height]), + ti.Vector(np.random.rand(2) * [width, height]), + ti.Vector(np.random.rand(3)), + ti.Vector(np.random.rand(3)), + ti.Vector(np.random.rand(3)), + ) + i = i + 1 + + samples.fill(ti.Vector([1.0, 1.0, 1.0])) + pixels.fill(ti.Vector([1.0, 1.0, 1.0])) + triangles.tile_culling() + triangles.rasterize() + gui.set_image(pixels) + gui.show() + + +if __name__ == "__main__": + main() diff --git a/local_packages/taichi/examples/rendering/sdf_renderer.py b/local_packages/taichi/examples/rendering/sdf_renderer.py new file mode 100644 index 0000000..7601a7f --- /dev/null +++ b/local_packages/taichi/examples/rendering/sdf_renderer.py @@ -0,0 +1,169 @@ +import math +import time + +import numpy as np + +import taichi as ti + +ti.init(arch=ti.gpu) +res = 1280, 720 +color_buffer = ti.Vector.field(3, dtype=ti.f32, shape=res) +max_ray_depth = 6 +eps = 1e-4 +inf = 1e10 + +fov = 0.23 +dist_limit = 100 + +camera_pos = ti.Vector([0.0, 0.32, 3.7]) +light_pos = [-1.5, 0.6, 0.3] +light_normal = [1.0, 0.0, 0.0] +light_radius = 2.0 + + +@ti.func +def intersect_light(pos, d): + light_loc = ti.Vector(light_pos) + dot = -d.dot(ti.Vector(light_normal)) + dist = d.dot(light_loc - pos) + dist_to_light = inf + if dot > 0 and dist > 0: + D = dist / dot + dist_to_center = (light_loc - (pos + D * d)).norm_sqr() + if dist_to_center < light_radius**2: + dist_to_light = D + return dist_to_light + + +@ti.func +def out_dir(n): + u = ti.Vector([1.0, 0.0, 0.0]) + if abs(n[1]) < 1 - eps: + u = n.cross(ti.Vector([0.0, 1.0, 0.0])).normalized() + v = n.cross(u) + phi = 2 * math.pi * ti.random() + ay = ti.sqrt(ti.random()) + ax = ti.sqrt(1 - ay**2) + return ax * (ti.cos(phi) * u + ti.sin(phi) * v) + ay * n + + +@ti.func +def make_nested(f): + f = f * 40 + i = int(f) + if f < 0: + if i % 2 == 1: + f -= ti.floor(f) + else: + f = ti.floor(f) + 1 - f + f = (f - 0.2) / 40 + return f + + +# https://www.iquilezles.org/www/articles/distfunctions/distfunctions.htm +@ti.func +def sdf(o): + wall = ti.min(o[1] + 0.1, o[2] + 0.4) + sphere = (o - ti.Vector([0.0, 0.35, 0.0])).norm() - 0.36 + + q = ti.abs(o - ti.Vector([0.8, 0.3, 0])) - ti.Vector([0.3, 0.3, 0.3]) + box = ti.Vector([ti.max(0, q[0]), ti.max(0, q[1]), ti.max(0, q[2])]).norm() + ti.min(q.max(), 0) + + O = o - ti.Vector([-0.8, 0.3, 0]) + d = ti.Vector([ti.Vector([O[0], O[2]]).norm() - 0.3, abs(O[1]) - 0.3]) + cylinder = ti.min(d.max(), 0.0) + ti.Vector([ti.max(0, d[0]), ti.max(0, d[1])]).norm() + + geometry = make_nested(ti.min(sphere, box, cylinder)) + geometry = ti.max(geometry, -(0.32 - (o[1] * 0.6 + o[2] * 0.8))) + return ti.min(wall, geometry) + + +@ti.func +def ray_march(p, d): + j = 0 + dist = 0.0 + while j < 100 and sdf(p + dist * d) > 1e-6 and dist < inf: + dist += sdf(p + dist * d) + j += 1 + return ti.min(inf, dist) + + +@ti.func +def sdf_normal(p): + d = 1e-3 + n = ti.Vector([0.0, 0.0, 0.0]) + sdf_center = sdf(p) + for i in ti.static(range(3)): + inc = p + inc[i] += d + n[i] = (1 / d) * (sdf(inc) - sdf_center) + return n.normalized() + + +@ti.func +def next_hit(pos, d): + closest, normal, c = inf, ti.Vector.zero(ti.f32, 3), ti.Vector.zero(ti.f32, 3) + ray_march_dist = ray_march(pos, d) + if ray_march_dist < dist_limit and ray_march_dist < closest: + closest = ray_march_dist + normal = sdf_normal(pos + d * closest) + hit_pos = pos + d * closest + t = int((hit_pos[0] + 10) * 1.1 + 0.5) % 3 + c = ti.Vector([0.4 + 0.3 * (t == 0), 0.4 + 0.2 * (t == 1), 0.4 + 0.3 * (t == 2)]) + return closest, normal, c + + +@ti.kernel +def render(): + for u, v in color_buffer: + aspect_ratio = res[0] / res[1] + pos = camera_pos + d = ti.Vector( + [ + (2 * fov * (u + ti.random()) / res[1] - fov * aspect_ratio - 1e-5), + 2 * fov * (v + ti.random()) / res[1] - fov - 1e-5, + -1.0, + ] + ) + d = d.normalized() + + throughput = ti.Vector([1.0, 1.0, 1.0]) + + depth = 0 + hit_light = 0.00 + + while depth < max_ray_depth: + closest, normal, c = next_hit(pos, d) + depth += 1 + dist_to_light = intersect_light(pos, d) + if dist_to_light < closest: + hit_light = 1 + depth = max_ray_depth + else: + hit_pos = pos + closest * d + if normal.norm_sqr() != 0: + d = out_dir(normal) + pos = hit_pos + 1e-4 * d + throughput *= c + else: + depth = max_ray_depth + color_buffer[u, v] += throughput * hit_light + + +def main(): + gui = ti.GUI("SDF Path Tracer", res) + last_t = 0 + for i in range(50000): + render() + interval = 10 + if i % interval == 0 and i > 0: + print(f"{interval / (time.time() - last_t):.2f} samples/s") + last_t = time.time() + img = color_buffer.to_numpy() * (1 / (i + 1)) + img = img / img.mean() * 0.24 + gui.set_image(np.sqrt(img)) + gui.show() + + +if __name__ == "__main__": + main() diff --git a/local_packages/taichi/examples/rendering/simple_texture.py b/local_packages/taichi/examples/rendering/simple_texture.py new file mode 100644 index 0000000..f860443 --- /dev/null +++ b/local_packages/taichi/examples/rendering/simple_texture.py @@ -0,0 +1,48 @@ +from taichi.examples.patterns import taichi_logo + +import taichi as ti + +ti.init(arch=ti.vulkan) + +res = (512, 512) +pixels = ti.Vector.field(3, dtype=float, shape=res) + +k = 256 +texture = ti.Texture(ti.Format.r32f, (k, k)) + + +@ti.kernel +def make_texture(tex: ti.types.rw_texture(num_dimensions=2, fmt=ti.Format.r32f, lod=0), n: ti.i32): + for i, j in ti.ndrange(n, n): + ret = ti.cast(taichi_logo(ti.Vector([i, j]) / n), ti.f32) + tex.store(ti.Vector([i, j]), ti.Vector([ret, 0.0, 0.0, 0.0])) + + +@ti.kernel +def paint(t: ti.f32, tex: ti.types.texture(num_dimensions=2), n: ti.i32): + for i, j in pixels: + uv = ti.Vector([i / res[0], j / res[1]]) + warp_uv = uv + ti.Vector([ti.cos(t + uv.x * 5.0), ti.sin(t + uv.y * 5.0)]) * 0.1 + c = ti.math.vec4(0.0) + if uv.x > 0.5: + c = tex.sample_lod(warp_uv, 0.0) + else: + c = tex.fetch(ti.cast(warp_uv * n, ti.i32), 0) + pixels[i, j] = [c.r, c.r, c.r] + + +def main(): + window = ti.ui.Window("UV", res) + canvas = window.get_canvas() + + t = 0.0 + while window.running: + make_texture(texture, k) + paint(t, texture, k) + canvas.set_image(pixels) + window.show() + t += 0.03 + + +if __name__ == "__main__": + main() diff --git a/local_packages/taichi/examples/rendering/simple_uv.py b/local_packages/taichi/examples/rendering/simple_uv.py new file mode 100644 index 0000000..e9e61b8 --- /dev/null +++ b/local_packages/taichi/examples/rendering/simple_uv.py @@ -0,0 +1,21 @@ +import taichi as ti + +ti.init() + +res = (512, 512) +pixels = ti.Vector.field(3, dtype=float, shape=res) + + +@ti.kernel +def paint(): + for i, j in pixels: + u = i / res[0] + v = j / res[1] + pixels[i, j] = [u, v, 0] + + +gui = ti.GUI("UV", res) +while not gui.get_event(ti.GUI.ESCAPE): + paint() + gui.set_image(pixels) + gui.show() diff --git a/local_packages/taichi/examples/rendering/taichi_logo.py b/local_packages/taichi/examples/rendering/taichi_logo.py new file mode 100644 index 0000000..93710f9 --- /dev/null +++ b/local_packages/taichi/examples/rendering/taichi_logo.py @@ -0,0 +1,29 @@ +from taichi.examples.patterns import taichi_logo + +import taichi as ti + +ti.init() + +n = 512 +x = ti.field(dtype=ti.f32, shape=(n, n)) + + +@ti.kernel +def paint(): + for i, j in ti.ndrange(n * 4, n * 4): + # 4x4 super sampling: + ret = taichi_logo(ti.Vector([i, j]) / (n * 4)) + x[i // 4, j // 4] += ret / 16 + + +def main(): + paint() + + gui = ti.GUI("Logo", (n, n)) + while gui.running: + gui.set_image(x) + gui.show() + + +if __name__ == "__main__": + main() diff --git a/local_packages/taichi/examples/rendering/taichi_ngp.py b/local_packages/taichi/examples/rendering/taichi_ngp.py new file mode 100644 index 0000000..cb8fa3b --- /dev/null +++ b/local_packages/taichi/examples/rendering/taichi_ngp.py @@ -0,0 +1,1049 @@ +# Instant-NGP renderer implemented using Taichi +# Author : Linyou (linyoutian.loyot@gmail.com) +# Original code at https://github.com/Linyou/taichi-ngp-renderer + +# GUI instruction: +# 1. Move the mouse with right click to rotate the camera +# 2. Press 'w', 'a', 's', 'd' key to move the camera like FPS game +# 3. Press 'q', 'e' key to move the camera up and down + +import argparse +import os +import platform +import time +from typing import Tuple + +import numpy as np +from matplotlib import pyplot as plt + +import taichi as ti +from taichi.math import uvec3, vec2, vec3 + +try: + import cv2 + import wget + from scipy.spatial.transform import Rotation as R +except: + raise ImportError("The Taichi NGP renderer requires cv2, wget and scipy to be installed.") + + +def depth2img(depth): + depth = (depth - depth.min()) / (depth.max() - depth.min()) + depth_img = cv2.applyColorMap((depth * 255).astype(np.uint8), cv2.COLORMAP_TURBO) + + return depth_img + + +arch = ti.cuda if ti._lib.core.with_cuda() else ti.vulkan + +if platform.system() == "Darwin": + block_dim = 64 +else: + block_dim = 128 + +sigma_sm_preload = int(128 / block_dim) * 24 +rgb_sm_preload = int(128 / block_dim) * 50 +data_type = ti.f16 +np_type = np.float16 +tf_vec3 = ti.types.vector(3, dtype=data_type) +tf_vec8 = ti.types.vector(8, dtype=data_type) +tf_vec32 = ti.types.vector(32, dtype=data_type) +tf_vec1 = ti.types.vector(1, dtype=data_type) +tf_vec2 = ti.types.vector(2, dtype=data_type) +tf_mat1x3 = ti.types.matrix(1, 3, dtype=data_type) +tf_index_temp = ti.types.vector(8, dtype=ti.i32) + +MAX_SAMPLES = 1024 +NEAR_DISTANCE = 0.01 + +SQRT3 = 1.7320508075688772 +SQRT3_MAX_SAMPLES = SQRT3 / 1024 +SQRT3_2 = 1.7320508075688772 * 2 +PRETRAINED_MODEL_URL = "https://github.com/Linyou/taichi-ngp-renderer/releases/download/v0.1-models/{}.npy" + + +# <----------------- hash table util code -----------------> +@ti.func +def calc_dt(t, exp_step_factor, grid_size, scale): + return data_type(ti.math.clamp(t * exp_step_factor, SQRT3_MAX_SAMPLES, SQRT3_2 * scale / grid_size)) + + +@ti.func +def __expand_bits(v): + v = (v * ti.uint32(0x00010001)) & ti.uint32(0xFF0000FF) + v = (v * ti.uint32(0x00000101)) & ti.uint32(0x0F00F00F) + v = (v * ti.uint32(0x00000011)) & ti.uint32(0xC30C30C3) + v = (v * ti.uint32(0x00000005)) & ti.uint32(0x49249249) + return v + + +@ti.func +def __morton3D(xyz): + xyz = __expand_bits(xyz) + return xyz[0] | (xyz[1] << 1) | (xyz[2] << 2) + + +@ti.func +def fast_hash(pos_grid_local): + result = ti.uint32(0) + primes = uvec3(ti.uint32(1), ti.uint32(2654435761), ti.uint32(805459861)) + for i in ti.static(range(3)): + result ^= ti.uint32(pos_grid_local[i]) * primes[i] + return result + + +@ti.func +def under_hash(pos_grid_local, resolution): + result = ti.uint32(0) + stride = ti.uint32(1) + for i in ti.static(range(3)): + result += ti.uint32(pos_grid_local[i] * stride) + stride *= resolution + return result + + +@ti.func +def grid_pos2hash_index(indicator, pos_grid_local, resolution, map_size): + hash_result = ti.uint32(0) + if indicator == 1: + hash_result = under_hash(pos_grid_local, resolution) + else: + hash_result = fast_hash(pos_grid_local) + + return hash_result % map_size + + +# <----------------- hash table util code -----------------> + + +@ti.func +def random_in_unit_disk(): + theta = 2.0 * np.pi * ti.random() + return ti.Vector([ti.sin(theta), ti.cos(theta)]) + + +@ti.func +def random_normal(): + x = ti.random() * 2.0 - 1.0 + y = ti.random() * 2.0 - 1.0 + return tf_vec2(x, y) + + +@ti.func +def dir_encode_func(dir_): + out_feat = tf_vec32(0.0) + dir_n = dir_ / dir_.norm() + x = dir_n[0] + y = dir_n[1] + z = dir_n[2] + xy = x * y + xz = x * z + yz = y * z + x2 = x * x + y2 = y * y + z2 = z * z + + temp = 0.28209479177387814 + out_feat[0] = data_type(temp) + out_feat[1] = data_type(-0.48860251190291987 * y) + out_feat[2] = data_type(0.48860251190291987 * z) + out_feat[3] = data_type(-0.48860251190291987 * x) + out_feat[4] = data_type(1.0925484305920792 * xy) + out_feat[5] = data_type(-1.0925484305920792 * yz) + out_feat[6] = data_type(0.94617469575755997 * z2 - 0.31539156525251999) + out_feat[7] = data_type(-1.0925484305920792 * xz) + out_feat[8] = data_type(0.54627421529603959 * x2 - 0.54627421529603959 * y2) + out_feat[9] = data_type(0.59004358992664352 * y * (-3.0 * x2 + y2)) + out_feat[10] = data_type(2.8906114426405538 * xy * z) + out_feat[11] = data_type(0.45704579946446572 * y * (1.0 - 5.0 * z2)) + out_feat[12] = data_type(0.3731763325901154 * z * (5.0 * z2 - 3.0)) + out_feat[13] = data_type(0.45704579946446572 * x * (1.0 - 5.0 * z2)) + out_feat[14] = data_type(1.4453057213202769 * z * (x2 - y2)) + out_feat[15] = data_type(0.59004358992664352 * x * (-x2 + 3.0 * y2)) + + return out_feat + + +@ti.data_oriented +class NGP_fw: + def __init__(self, scale, cascades, grid_size, base_res, log2_T, res, level, exp_step_factor): + self.res = res + self.N_rays = res[0] * res[1] + self.grid_size = grid_size + self.exp_step_factor = exp_step_factor + self.scale = scale + + # rays intersection parameters + # t1, t2 need to be initialized to -1.0 + self.hits_t = ti.Vector.field(n=2, dtype=data_type, shape=self.N_rays) + self.hits_t.fill(-1.0) + self.center = tf_vec3(0.0, 0.0, 0.0) + self.xyz_min = -tf_vec3(scale, scale, scale) + self.xyz_max = tf_vec3(scale, scale, scale) + self.half_size = (self.xyz_max - self.xyz_min) / 2 + + self.noise_buffer = ti.Vector.field(2, dtype=data_type, shape=self.N_rays) + self.gen_noise_buffer() + + self.rays_o = ti.Vector.field(n=3, dtype=data_type, shape=self.N_rays) + self.rays_d = ti.Vector.field(n=3, dtype=data_type, shape=self.N_rays) + + # use the pre-compute direction and scene pose + self.directions = ti.Matrix.field(n=1, m=3, dtype=data_type, shape=(self.N_rays,)) + self.pose = ti.Matrix.field(n=3, m=4, dtype=data_type, shape=()) + + # density_bitfield is used for point sampling + self.density_bitfield = ti.field(ti.uint32, shape=cascades * grid_size**3 // 32) + + # count the number of rays that still alive + self.counter = ti.field(ti.i32, shape=()) + self.counter[None] = self.N_rays + # current alive buffer index + self.current_index = ti.field(ti.i32, shape=()) + self.current_index[None] = 0 + + # how many samples that need to run the model + self.model_launch = ti.field(ti.i32, shape=()) + + # buffer for the alive rays + self.alive_indices = ti.field(ti.i32, shape=(2 * self.N_rays,)) + + # padd the thread to the factor of block size (thread per block) + self.padd_block_network = ti.field(ti.i32, shape=()) + self.padd_block_composite = ti.field(ti.i32, shape=()) + + # hash table variables + self.min_samples = 1 if exp_step_factor == 0 else 4 + self.per_level_scales = 1.3195079565048218 # hard coded, otherwise it will be have lower percision + self.base_res = base_res + self.max_params = 2**log2_T + self.level = level + # hash table fields + self.offsets = ti.field(ti.i32, shape=(16,)) + self.hash_map_sizes = ti.field(ti.uint32, shape=(16,)) + self.hash_map_indicator = ti.field(ti.i32, shape=(16,)) + + # model parameters + layer1_base = 32 * 64 + layer2_base = layer1_base + 64 * 64 + self.hash_embedding = ti.field(dtype=data_type, shape=(11445040,)) + self.sigma_weights = ti.field(dtype=data_type, shape=(layer1_base + 64 * 16,)) + self.rgb_weights = ti.field(dtype=data_type, shape=(layer2_base + 64 * 8,)) + + # buffers that used for points sampling + self.max_samples_per_rays = 1 + self.max_samples_shape = self.N_rays * self.max_samples_per_rays + + self.xyzs = ti.Vector.field(3, dtype=data_type, shape=(self.max_samples_shape,)) + self.dirs = ti.Vector.field(3, dtype=data_type, shape=(self.max_samples_shape,)) + self.deltas = ti.field(data_type, shape=(self.max_samples_shape,)) + self.ts = ti.field(data_type, shape=(self.max_samples_shape,)) + + # buffers that store the info of sampled points + self.run_model_ind = ti.field(ti.int32, shape=(self.max_samples_shape,)) + self.N_eff_samples = ti.field(ti.int32, shape=(self.N_rays,)) + + # intermediate buffers for network + self.xyzs_embedding = ti.field(data_type, shape=(self.max_samples_shape, 32)) + self.final_embedding = ti.field(data_type, shape=(self.max_samples_shape, 16)) + self.out_3 = ti.field(data_type, shape=(self.max_samples_shape, 3)) + self.out_1 = ti.field(data_type, shape=(self.max_samples_shape,)) + self.temp_hit = ti.field(ti.i32, shape=(self.max_samples_shape,)) + + # results buffers + self.opacity = ti.field(ti.f32, shape=(self.N_rays,)) + self.depth = ti.field(ti.f32, shape=self.N_rays) + self.rgb = ti.Vector.field(3, dtype=ti.f32, shape=(self.N_rays,)) + + # GUI render buffer (data type must be float32) + self.render_buffer = ti.Vector.field( + 3, + dtype=ti.f32, + shape=( + res[0], + res[1], + ), + ) + # camera parameters + self.lookat = np.array([0.0, 0.0, -1.0]) + self.lookat_change = np.zeros((3,)) + self.lookup = np.array([0.0, -1.0, 0.0]) + + def hash_table_init(self): + print( + f"GridEncoding: base resolution: {self.base_res}, log scale per level:{self.per_level_scales:.5f} feature numbers per level: {2} maximum parameters per level: {self.max_params} level: {self.level}" + ) + offset = 0 + for i in range(self.level): + resolution = int(np.ceil(self.base_res * np.exp(i * np.log(self.per_level_scales)) - 1.0)) + 1 + params_in_level = resolution**3 + params_in_level = int(resolution**3) if params_in_level % 8 == 0 else int((params_in_level + 8 - 1) / 8) * 8 + params_in_level = min(self.max_params, params_in_level) + self.offsets[i] = offset + self.hash_map_sizes[i] = params_in_level + self.hash_map_indicator[i] = 1 if resolution**3 <= params_in_level else 0 + offset += params_in_level + + def get_direction(self, camera_angle_x): + w, h = int(self.res[1]), int(self.res[0]) + fx = 0.5 * w / np.tan(0.5 * camera_angle_x) + fy = 0.5 * h / np.tan(0.5 * camera_angle_x) + cx, cy = 0.5 * w, 0.5 * h + + x, y = np.meshgrid( + np.arange(w, dtype=np.float32) + 0.5, + np.arange(h, dtype=np.float32) + 0.5, + indexing="xy", + ) + + directions = np.stack([(x - cx) / fx, (y - cy) / fy, np.ones_like(x)], -1) + + return directions.reshape(-1, 3) + + def load_model(self, model_path): + print(f"Loading model from {model_path}") + model = np.load(model_path, allow_pickle=True).item() + # model = torch.load(model_path, map_location='cpu')['state_dict'] + self.hash_embedding.from_numpy(model["model.xyz_encoder.params"].astype(np_type)) + self.sigma_weights.from_numpy(model["model.xyz_sigmas.params"].astype(np_type)) + self.rgb_weights.from_numpy(model["model.rgb_net.params"].astype(np_type)) + + self.density_bitfield.from_numpy(model["model.density_bitfield"].view("uint32")) + + self.pose.from_numpy(model["poses"][20].astype(np_type)) + if self.res[0] != 800 or self.res[1] != 800: + directions = self.get_direction(model["camera_angle_x"])[:, None, :].astype(np_type) + else: + directions = model["directions"][:, None, :].astype(np_type) + + self.directions.from_numpy(directions) + + @staticmethod + def taichi_init(kernel_profiler): + ti.init( + arch=arch, + offline_cache=True, + kernel_profiler=kernel_profiler, + enable_fallback=False, + ) + + @staticmethod + def taichi_print_profiler(): + ti.profiler.print_kernel_profiler_info() + + @ti.kernel + def reset(self): + self.depth.fill(0.0) + self.opacity.fill(0.0) + self.counter[None] = self.N_rays + for i, j in ti.ndrange(self.N_rays, 2): + self.alive_indices[i * 2 + j] = i + + @ti.func + def _ray_aabb_intersec(self, ray_o, ray_d): + inv_d = 1.0 / ray_d + + t_min = (self.center - self.half_size - ray_o) * inv_d + t_max = (self.center + self.half_size - ray_o) * inv_d + + _t1 = ti.min(t_min, t_max) + _t2 = ti.max(t_min, t_max) + t1 = _t1.max() + t2 = _t2.min() + + return tf_vec2(t1, t2) + + @ti.kernel + def gen_noise_buffer(self): + for i in range(self.N_rays): + self.noise_buffer[i] = random_normal() + # self.noise_buffer[i] = random_in_unit_disk() + + @ti.kernel + def ray_intersect_dof(self, dist_to_focus: float, len_dis: float): + ti.block_local(self.pose) + for i in self.directions: + c2w = self.pose[None] + dir_ori = self.directions[i] + offset = len_dis * self.noise_buffer[i] + offset_m = tf_mat1x3( + [ + [ + offset[0], + offset[1], + 0.0, + ] + ] + ) + c2w_dir = c2w[:, :3].transpose() + offset_w = offset_m @ c2w_dir + mat_result = (dir_ori * dist_to_focus) @ c2w_dir - offset_w + ray_d = tf_vec3(mat_result[0, 0], mat_result[0, 1], mat_result[0, 2]) + ray_o = c2w[:, 3] + tf_vec3(offset_w[0, 0], offset_w[0, 1], offset_w[0, 2]) + + t1t2 = self._ray_aabb_intersec(ray_o, ray_d) + + if t1t2[1] > 0.0: + self.hits_t[i][0] = data_type(ti.max(t1t2[0], NEAR_DISTANCE)) + self.hits_t[i][1] = t1t2[1] + + self.rays_o[i] = ray_o + self.rays_d[i] = ray_d + + @ti.kernel + def ray_intersect(self): + ti.block_local(self.pose) + for i in self.directions: + c2w = self.pose[None] + mat_result = self.directions[i] @ c2w[:, :3].transpose() + ray_d = tf_vec3(mat_result[0, 0], mat_result[0, 1], mat_result[0, 2]) + ray_o = c2w[:, 3] + + t1t2 = self._ray_aabb_intersec(ray_o, ray_d) + + if t1t2[1] > 0.0: + self.hits_t[i][0] = data_type(ti.max(t1t2[0], NEAR_DISTANCE)) + self.hits_t[i][1] = t1t2[1] + + self.rays_o[i] = ray_o + self.rays_d[i] = ray_d + + @ti.kernel + def raymarching_test_kernel(self, N_samples: int): + self.run_model_ind.fill(0) + for n in ti.ndrange(self.counter[None]): + c_index = self.current_index[None] + r = self.alive_indices[n * 2 + c_index] + grid_size3 = self.grid_size**3 + grid_size_inv = 1.0 / self.grid_size + + ray_o = self.rays_o[r] + ray_d = self.rays_d[r] + t1t2 = self.hits_t[r] + + d_inv = 1.0 / ray_d + + t = t1t2[0] + t2 = t1t2[1] + + s = 0 + + start_idx = n * N_samples + + while (0 <= t) & (t < t2) & (s < N_samples): + # xyz = ray_o + t*ray_d + xyz = ray_o + t * ray_d + dt = calc_dt(t, self.exp_step_factor, self.grid_size, self.scale) + # mip = ti.max(mip_from_pos(xyz, cascades), + # mip_from_dt(dt, grid_size, cascades)) + + mip_bound = 0.5 + mip_bound_inv = 1 / mip_bound + + nxyz = ti.math.clamp( + 0.5 * (xyz * mip_bound_inv + 1) * self.grid_size, + 0.0, + self.grid_size - 1.0, + ) + # nxyz = ti.ceil(nxyz) + + idx = __morton3D(ti.cast(nxyz, ti.u32)) + # occ = density_grid_taichi[idx] > 5.912066756501768 + occ = self.density_bitfield[ti.int32(idx // 32)] & (1 << ti.u32(idx % 32)) + + if occ: + sn = start_idx + s + for p in ti.static(range(3)): + self.xyzs[sn][p] = xyz[p] + self.dirs[sn][p] = ray_d[p] + self.run_model_ind[sn] = 1 + self.ts[sn] = t + self.deltas[sn] = dt + t += dt + self.hits_t[r][0] = t + s += 1 + + else: + txyz = ( + ((nxyz + 0.5 + 0.5 * ti.math.sign(ray_d)) * grid_size_inv * 2 - 1) * mip_bound - xyz + ) * d_inv + + t_target = t + ti.max(0, txyz.min()) + t += calc_dt(t, self.exp_step_factor, self.grid_size, self.scale) + while t < t_target: + t += calc_dt(t, self.exp_step_factor, self.grid_size, self.scale) + + self.N_eff_samples[n] = s + if s == 0: + self.alive_indices[n * 2 + c_index] = -1 + + @ti.kernel + def rearange_index(self, B: ti.i32): + self.model_launch[None] = 0 + + for i in ti.ndrange(B): + if self.run_model_ind[i]: + index = ti.atomic_add(self.model_launch[None], 1) + self.temp_hit[index] = i + + self.model_launch[None] += 1 + self.padd_block_network[None] = ((self.model_launch[None] + block_dim - 1) // block_dim) * block_dim + # self.padd_block_composite[None] = ((self.counter[None]+ 128 - 1)// 128) *128 + + @ti.kernel + def hash_encode(self): + # get hash table embedding + ti.loop_config(block_dim=16) + for sn, level in ti.ndrange(self.model_launch[None], 16): + # normalize to [0, 1], before is [-0.5, 0.5] + xyz = self.xyzs[self.temp_hit[sn]] + 0.5 + offset = self.offsets[level] * 2 + indicator = self.hash_map_indicator[level] + map_size = self.hash_map_sizes[level] + + init_val0 = tf_vec1(0.0) + init_val1 = tf_vec1(1.0) + local_feature_0 = init_val0[0] + local_feature_1 = init_val0[0] + + index_temp = tf_index_temp(0) + w_temp = tf_vec8(0.0) + hash_temp_1 = tf_vec8(0.0) + hash_temp_2 = tf_vec8(0.0) + + scale = self.base_res * ti.exp(level * ti.log(self.per_level_scales)) - 1.0 + resolution = ti.cast(ti.ceil(scale), ti.uint32) + 1 + + pos = xyz * scale + 0.5 + pos_grid_uint = ti.cast(ti.floor(pos), ti.uint32) + pos -= pos_grid_uint + # pos_grid_uint = ti.cast(pos_grid, ti.uint32) + + for idx in ti.static(range(8)): + # idx_uint = ti.cast(idx, ti.uint32) + w = init_val1[0] + pos_grid_local = uvec3(0) + + for d in ti.static(range(3)): + if (idx & (1 << d)) == 0: + pos_grid_local[d] = pos_grid_uint[d] + w *= data_type(1 - pos[d]) + else: + pos_grid_local[d] = pos_grid_uint[d] + 1 + w *= data_type(pos[d]) + + index = ti.int32(grid_pos2hash_index(indicator, pos_grid_local, resolution, map_size)) + index_temp[idx] = offset + index * 2 + w_temp[idx] = w + + for idx in ti.static(range(8)): + hash_temp_1[idx] = self.hash_embedding[index_temp[idx]] + hash_temp_2[idx] = self.hash_embedding[index_temp[idx] + 1] + + for idx in ti.static(range(8)): + local_feature_0 += data_type(w_temp[idx] * hash_temp_1[idx]) + local_feature_1 += data_type(w_temp[idx] * hash_temp_2[idx]) + + self.xyzs_embedding[sn, level * 2] = local_feature_0 + self.xyzs_embedding[sn, level * 2 + 1] = local_feature_1 + + @ti.kernel + def sigma_layer(self): + ti.loop_config(block_dim=block_dim) + for sn in ti.ndrange(self.padd_block_network[None]): + tid = sn % block_dim + did_launch_num = self.model_launch[None] + init_val = tf_vec1(0.0) + xyz_feat = ti.simt.block.SharedArray((32, block_dim), data_type) + weight = ti.simt.block.SharedArray((64 * 32 + 64 * 16,), data_type) + hid1 = ti.simt.block.SharedArray((64, block_dim), data_type) + hid2 = ti.simt.block.SharedArray((16, block_dim), data_type) + for i in ti.static(range(sigma_sm_preload)): + k = tid * sigma_sm_preload + i + weight[k] = self.sigma_weights[k] + ti.simt.block.sync() + + if sn < did_launch_num: + for i in ti.static(range(32)): + xyz_feat[i, tid] = self.xyzs_embedding[sn, i] + + for i in range(64): + temp = init_val[0] + for j in ti.static(range(32)): + temp += xyz_feat[j, tid] * weight[i * 32 + j] + + hid1[i, tid] = temp + ti.simt.block.sync() + + for i in range(16): + temp = init_val[0] + for j in ti.static(range(64)): + temp += data_type(ti.max(0.0, hid1[j, tid])) * weight[64 * 32 + i * 64 + j] + hid2[i, tid] = temp + ti.simt.block.sync() + + self.out_1[self.temp_hit[sn]] = data_type(ti.exp(hid2[0, tid])) + for i in ti.static(range(16)): + self.final_embedding[sn, i] = hid2[i, tid] + + ti.simt.block.sync() + + @ti.kernel + def rgb_layer(self): + ti.loop_config(block_dim=block_dim) + for sn in ti.ndrange(self.padd_block_network[None]): + ray_id = self.temp_hit[sn] + tid = sn % block_dim + did_launch_num = self.model_launch[None] + init_val = tf_vec1(0.0) + weight = ti.simt.block.SharedArray((64 * 32 + 64 * 64 + 64 * 4,), data_type) + hid1 = ti.simt.block.SharedArray((64, block_dim), data_type) + hid2 = ti.simt.block.SharedArray((64, block_dim), data_type) + for i in ti.static(range(rgb_sm_preload)): + k = tid * rgb_sm_preload + i + weight[k] = self.rgb_weights[k] + ti.simt.block.sync() + + if sn < did_launch_num: + dir_ = self.dirs[ray_id] + dir_feat = dir_encode_func(dir_) + + for i in ti.static(range(16)): + dir_feat[16 + i] = self.final_embedding[sn, i] + + for i in range(64): + temp = init_val[0] + for j in ti.static(range(32)): + temp += dir_feat[j] * weight[i * 32 + j] + + hid1[i, tid] = temp + ti.simt.block.sync() + + for i in range(64): + temp = init_val[0] + for j in ti.static(range(64)): + temp += data_type(ti.max(0.0, hid1[j, tid])) * weight[64 * 32 + i * 64 + j] + + hid2[i, tid] = temp + ti.simt.block.sync() + + for i in ti.static(range(3)): + temp = init_val[0] + for j in ti.static(range(64)): + temp += data_type(ti.max(0.0, hid2[j, tid])) * weight[64 * 32 + 64 * 64 + i * 64 + j] + + hid1[i, tid] = temp + ti.simt.block.sync() + + for i in ti.static(range(3)): + self.out_3[self.temp_hit[sn], i] = data_type(1 / (1 + ti.exp(-hid1[i, tid]))) + ti.simt.block.sync() + + @ti.kernel + def FullyFusedMLP(self): + ti.loop_config(block_dim=block_dim) + for sn in ti.ndrange(self.padd_block_network[None]): + ray_id = self.temp_hit[sn] + tid = sn % block_dim + did_launch_num = self.model_launch[None] + init_val = tf_vec1(0.0) + xyz_feat = tf_vec32(0.0) + weight = ti.simt.block.SharedArray((64 * 32 + 64 * 64 + 64 * 4,), data_type) + hid2_2 = ti.simt.block.SharedArray((32 * block_dim,), data_type) + hid2_1 = ti.simt.block.SharedArray((32 * block_dim,), data_type) + hid1 = ti.simt.block.SharedArray((64 * block_dim,), data_type) + for i in ti.static(range(rgb_sm_preload)): + k = tid * rgb_sm_preload + i + weight[k] = self.rgb_weights[k] + for i in ti.static(range(sigma_sm_preload)): + k = tid * sigma_sm_preload + i + hid2_1[k] = self.sigma_weights[k] + ti.simt.block.sync() + + if sn < did_launch_num: + dir_ = self.dirs[ray_id] + for i in ti.static(range(32)): + xyz_feat[i] = self.xyzs_embedding[sn, i] + dir_feat = dir_encode_func(dir_) + + for i in range(64): + temp = init_val[0] + for j in ti.static(range(32)): + temp += xyz_feat[j] * hid2_1[i * 32 + j] + hid1[i * block_dim + tid] = temp + ti.simt.block.sync() + + for i in range(16): + temp = init_val[0] + for j in ti.static(range(64)): + temp += data_type(ti.max(0.0, hid1[j * block_dim + tid])) * hid2_1[64 * 32 + i * 64 + j] + hid2_2[i * block_dim + tid] = temp + ti.simt.block.sync() + + out1 = data_type(ti.exp(hid2_2[tid])) + + for i in ti.static(range(16)): + dir_feat[16 + i] = hid2_2[i * block_dim + tid] + + for i in range(64): + temp = init_val[0] + for j in ti.static(range(32)): + temp += dir_feat[j] * weight[i * 32 + j] + hid1[i * block_dim + tid] = temp + ti.simt.block.sync() + + for i in range(32): + temp1 = init_val[0] + temp2 = init_val[0] + for j in ti.static(range(64)): + temp1 += data_type(ti.max(0.0, hid1[j * block_dim + tid])) * weight[64 * 32 + i * 64 + j] + temp2 += data_type(ti.max(0.0, hid1[j * block_dim + tid])) * weight[64 * 32 + (i + 32) * 64 + j] + hid2_1[i * block_dim + tid] = temp1 + hid2_2[i * block_dim + tid] = temp2 + ti.simt.block.sync() + + for i in ti.static(range(3)): + temp = init_val[0] + for j in ti.static(range(32)): + temp += ( + data_type(ti.max(0.0, hid2_1[j * block_dim + tid])) * weight[64 * 32 + 64 * 64 + i * 64 + j] + ) + # ti.simt.block.sync() + temp += ( + data_type(ti.max(0.0, hid2_2[j * block_dim + tid])) + * weight[64 * 32 + 64 * 64 + i * 64 + j + 32] + ) + hid1[i * block_dim + tid] = temp + ti.simt.block.sync() + + self.out_1[self.temp_hit[sn]] = out1 + for i in ti.static(range(3)): + self.out_3[self.temp_hit[sn], i] = data_type(1 / (1 + ti.exp(-hid1[i * block_dim + tid]))) + ti.simt.block.sync() + + @ti.kernel + def composite_test(self, max_samples: ti.i32, T_threshold: data_type): + for n in ti.ndrange(self.counter[None]): + N_samples = self.N_eff_samples[n] + if N_samples != 0: + c_index = self.current_index[None] + r = self.alive_indices[n * 2 + c_index] + + T = data_type(1.0 - self.opacity[r]) + + start_idx = n * max_samples + + rgb_temp = tf_vec3(0.0) + depth_temp = tf_vec1(0.0) + opacity_temp = tf_vec1(0.0) + out_3_temp = tf_vec3(0.0) + + for s in range(N_samples): + sn = start_idx + s + a = data_type(1.0 - ti.exp(-self.out_1[sn] * self.deltas[sn])) + w = a * T + + for i in ti.static(range(3)): + out_3_temp[i] = self.out_3[sn, i] + + rgb_temp += w * out_3_temp + depth_temp[0] += w * self.ts[sn] + opacity_temp[0] += w + + T *= data_type(1.0 - a) + + if T <= T_threshold: + self.alive_indices[n * 2 + c_index] = -1 + break + + self.rgb[r] += rgb_temp + self.depth[r] += depth_temp[0] + self.opacity[r] += opacity_temp[0] + + @ti.kernel + def re_order(self, B: ti.i32): + self.counter[None] = 0 + c_index = self.current_index[None] + n_index = (c_index + 1) % 2 + self.current_index[None] = n_index + + for i in ti.ndrange(B): + alive_temp = self.alive_indices[i * 2 + c_index] + if alive_temp >= 0: + index = ti.atomic_add(self.counter[None], 1) + self.alive_indices[index * 2 + n_index] = alive_temp + + def write_image(self): + rgb_np = self.rgb.to_numpy().reshape(self.res[0], self.res[1], 3) + depth_np = self.depth.to_numpy().reshape(self.res[0], self.res[1]) + plt.imsave("taichi_ngp.png", (rgb_np * 255).astype(np.uint8)) + plt.imsave("taichi_ngp_depth.png", depth2img(depth_np)) + + def render(self, max_samples, T_threshold, use_dof=False, dist_to_focus=0.8, len_dis=0.0) -> Tuple[float, int, int]: + samples = 0 + self.reset() + self.gen_noise_buffer() + if use_dof: + self.ray_intersect_dof(dist_to_focus, len_dis) + else: + self.ray_intersect() + + while samples < max_samples: + N_alive = self.counter[None] + if N_alive == 0: + break + + # how many more samples the number of samples add for each ray + N_samples = max(min(self.N_rays // N_alive, 64), self.min_samples) + samples += N_samples + launch_model_total = N_alive * N_samples + + self.raymarching_test_kernel(N_samples) + self.rearange_index(launch_model_total) + # self.dir_encode() + self.hash_encode() + self.sigma_layer() + self.rgb_layer() + # self.FullyFusedMLP() + self.composite_test(N_samples, T_threshold) + self.re_order(N_alive) + + return samples, N_alive, N_samples + + def render_frame(self, frame_id): + t = time.time() + samples, N_alive, N_samples = self.render(max_samples=100, T_threshold=1e-4) + self.write_image() + + print(f"samples: {samples}, N_alive: {N_alive}, N_samples: {N_samples}") + print(f"Render time: {1000*(time.time()-t):.2f} ms") + + @ti.kernel + def rgb_to_render_buffer(self, frame: ti.i32): + for i, j in self.render_buffer: + rgb = self.rgb[(self.res[0] - j) * self.res[1] + i] + self.render_buffer[i, j] = rgb / frame + + @ti.kernel + def depth_max(self) -> vec2: + max_v = self.depth[0] + min_v = self.depth[0] + for i in ti.ndrange(self.N_rays): + ti.atomic_max(max_v, self.depth[i]) + ti.atomic_min(min_v, self.depth[i]) + return vec2(max_v, min_v) + + @ti.kernel + def depth_to_render_buffer(self, max_min: vec2): + for i, j in self.render_buffer: + max_v = max_min[0] + min_v = max_min[1] + depth = self.depth[(self.res[0] - j) * self.res[1] + i] + pixel = (vec3(depth) - min_v) / (max_v - min_v) + self.render_buffer[i, j] = pixel + + def init_cam(self): + self.lookat = self.lookat @ self.pose.to_numpy()[:, :3].T + + def render_gui(self): + video_manager = None + + # check if the export file exists for snapshot and video + export_dir = "./export/" + if not os.path.exists(export_dir): + os.mkdir(export_dir) + + W, H = self.res + window = ti.ui.Window("Taichi NGP", (W, H)) + canvas = window.get_canvas() + gui = window.get_gui() + + last_mouse_x = None + last_mouse_y = None + rotate_speed = 50 + movement_speed = 0.03 + max_samples_for_rendering = 100 + render_time = 0 + # white_bg = False + recording = False + show_depth = False + use_dof = False + last_use_dof = False + frame = 0 + T_threshold = 1e-2 + dist_to_focus = 1.2 + len_dis = 0.04 + self.init_cam() + last_pose = self.pose.to_numpy() + total_frame = 0 + last_dist_to_focus = dist_to_focus + last_len_dis = len_dis + + while window.running: + # TODO: make it more efficient + pose = self.pose.to_numpy() + total_frame += 1 + if not window.is_pressed(ti.ui.RMB): + last_mouse_x = None + last_mouse_y = None + else: + curr_mouse_x, curr_mouse_y = window.get_cursor_pos() + if last_mouse_x is None or last_mouse_y is None: + last_mouse_x, last_mouse_y = curr_mouse_x, curr_mouse_y + else: + dx = curr_mouse_x - last_mouse_x + dy = curr_mouse_y - last_mouse_y + rotvec_x = pose[:, 1] * np.radians(rotate_speed * dx) + rotvec_y = pose[:, 0] * np.radians(rotate_speed * dy) + pose = R.from_rotvec(rotvec_x).as_matrix() @ R.from_rotvec(rotvec_y).as_matrix() @ pose + last_mouse_x, last_mouse_y = curr_mouse_x, curr_mouse_y + correct_dir = 1.0 if pose[2, 3] < 0.0 else -1.0 + self.lookat = np.array([0.0, 0.0, correct_dir]) @ pose[:, :3].T + + front = self.lookat - pose[:, 3] + front = front / np.linalg.norm(front) + up = self.lookup @ pose[:, :3].T + left = np.cross(up, front) + position_change = np.zeros(3) + if window.is_pressed("w"): + position_change = front * movement_speed + if window.is_pressed("s"): + position_change = -front * movement_speed + if window.is_pressed("a"): + position_change = left * movement_speed + if window.is_pressed("d"): + position_change = -left * movement_speed + if window.is_pressed("e"): + position_change = up * movement_speed + if window.is_pressed("q"): + position_change = -up * movement_speed + pose[:, 3] += position_change + self.lookat += position_change + if (last_pose - pose).sum(): + last_pose = pose + self.pose.from_numpy(pose.astype(np.float16)) + self.rgb.fill(0.0) + total_frame = 1 + + with gui.sub_window("Options", 0.05, 0.05, 0.68, 0.3) as w: + w.text("General") + T_threshold = w.slider_float("transparency threshold", T_threshold, 0.0, 1.0) + max_samples_for_rendering = w.slider_float("max samples", max_samples_for_rendering, 1, 100) + show_depth = w.checkbox("show depth", show_depth) + # white_bg = w.checkbox("white background", white_bg) + + w.text("Camera") + use_dof = w.checkbox("apply depth of field", use_dof) + dist_to_focus = w.slider_float("focus distance", dist_to_focus, 0.8, 3.0) + len_dis = w.slider_float("lens size", len_dis, 0.0, 0.1) + if last_dist_to_focus != dist_to_focus or last_len_dis != len_dis or last_use_dof != use_dof: + last_dist_to_focus = dist_to_focus + last_len_dis = len_dis + last_use_dof = use_dof + self.rgb.fill(0.0) + total_frame = 1 + + w.text(f"Render time: {render_time:.2f} ms") + + with gui.sub_window("Export", 0.75, 0.05, 0.2, 0.1) as w: + if gui.button("snapshot "): + ti.tools.imwrite(self.render_buffer.to_numpy(), export_dir + "snap_shot.png") + print("save snapshot in export folder") + if gui.button("recording"): + frame = 0 + if not recording: + video_manager = ti.tools.VideoManager( + output_dir=export_dir, framerate=24, automatic_build=False + ) + recording = True + else: + recording = False + video_manager.make_video(gif=True, mp4=True) + print("save video in export folder") + + if recording and video_manager: + w.text(f"recording frames: {frame}") + frame += 1 + pixels_img = self.render_buffer.to_numpy() + video_manager.write_frame(pixels_img) + + t = time.time() + _, _, _ = self.render( + max_samples=max_samples_for_rendering, + T_threshold=T_threshold, + use_dof=use_dof, + dist_to_focus=dist_to_focus, + len_dis=len_dis, + ) + + if not show_depth: + self.rgb_to_render_buffer(total_frame) + else: + self.depth_to_render_buffer(self.depth_max()) + + render_time = 1000 * (time.time() - t) + canvas.set_image(self.render_buffer) + window.show() + + +def main(args): + NGP_fw.taichi_init(args.print_profile) + res = args.res + scale = 0.5 + ngp = NGP_fw( + scale=scale, + cascades=max(1 + int(np.ceil(np.log2(2 * scale))), 1), + grid_size=128, + base_res=16, + log2_T=19, + res=[res, res], + level=16, + exp_step_factor=0, + ) + if args.model_path: + ngp.load_model(args.model_path) + else: + model_dir = "./npy_models/" + if not os.path.exists(model_dir): + os.mkdir(model_dir) + npy_file = os.path.join(model_dir, args.scene + ".npy") + if not os.path.exists(npy_file): + print(f"No {args.scene} model found, downloading ...") + url = PRETRAINED_MODEL_URL.format(args.scene) + wget.download(url, out=npy_file) + ngp.load_model(npy_file) + + ngp.hash_table_init() + + if not args.gui: + ngp.render_frame(0) + else: + ngp.render_gui() + + if args.print_profile: + NGP_fw.taichi_print_profiler() + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--res", type=int, default=800) + parser.add_argument( + "--scene", + type=str, + default="lego", + choices=[ + "ship", + "mic", + "materials", + "lego", + "hotdog", + "ficus", + "drums", + "chair", + ], + ) + parser.add_argument("--model_path", type=str, default=None) + parser.add_argument("--gui", action="store_true", default=False) + parser.add_argument("--print_profile", action="store_true", default=False) + cmd_args, _ = parser.parse_known_args() + main(cmd_args) diff --git a/local_packages/taichi/examples/simulation/ad_gravity.py b/local_packages/taichi/examples/simulation/ad_gravity.py new file mode 100644 index 0000000..1b22347 --- /dev/null +++ b/local_packages/taichi/examples/simulation/ad_gravity.py @@ -0,0 +1,55 @@ +import taichi as ti + +ti.init() + +N = 8 +dt = 1e-5 + +x = ti.Vector.field(2, dtype=ti.f32, shape=N, needs_grad=True) # particle positions +v = ti.Vector.field(2, dtype=ti.f32, shape=N) # particle velocities +U = ti.field(dtype=ti.f32, shape=(), needs_grad=True) # potential energy + + +@ti.kernel +def compute_U(): + for i, j in ti.ndrange(N, N): + r = x[i] - x[j] + # r.norm(1e-3) is equivalent to ti.sqrt(r.norm()**2 + 1e-3) + # This is to prevent 1/0 error which can cause wrong derivative + U[None] += -1 / r.norm(1e-3) # U += -1 / |r| + + +@ti.kernel +def advance(): + for i in x: + v[i] += dt * -x.grad[i] # dv/dt = -dU/dx + for i in x: + x[i] += dt * v[i] # dx/dt = v + + +def substep(): + with ti.ad.Tape(loss=U): + # Kernel invocations in this scope will later contribute to partial derivatives of + # U with respect to input variables such as x. + compute_U() # The tape will automatically compute dU/dx and save the results in x.grad + advance() + + +@ti.kernel +def init(): + for i in x: + x[i] = [ti.random(), ti.random()] + + +def main(): + init() + gui = ti.GUI("Autodiff gravity") + while gui.running: + for i in range(50): + substep() + gui.circles(x.to_numpy(), radius=3) + gui.show() + + +if __name__ == "__main__": + main() diff --git a/local_packages/taichi/examples/simulation/comet.py b/local_packages/taichi/examples/simulation/comet.py new file mode 100644 index 0000000..7897119 --- /dev/null +++ b/local_packages/taichi/examples/simulation/comet.py @@ -0,0 +1,105 @@ +import math + +import taichi as ti + +ti.init(arch=ti.cuda) + +dim = 3 +N = 1024 * 8 +dt = 2e-4 +steps = 7 +sun = ti.Vector([0.5, 0.5] if dim == 2 else [0.5, 0.5, 0.0]) +gravity = 0.5 +pressure = 0.3 +tail_paticle_scale = 0.4 +color_init = 0.3 +color_decay = 1.6 +vel_init = 0.07 +res = 640 + +inv_m = ti.field(ti.f32) +color = ti.field(ti.f32) +x = ti.Vector.field(dim, ti.f32) +v = ti.Vector.field(dim, ti.f32) +ti.root.bitmasked(ti.i, N).place(x, v, inv_m, color) +count = ti.field(ti.i32, ()) +img = ti.field(ti.f32, (res, res)) + + +@ti.func +def rand_unit_2d(): + a = ti.random() * 2 * math.pi + return ti.Vector([ti.cos(a), ti.sin(a)]) + + +@ti.func +def rand_unit_3d(): + u = rand_unit_2d() + s = ti.random() * 2 - 1 + c = ti.sqrt(1 - s**2) + return ti.Vector([c * u[0], c * u[1], s]) + + +@ti.kernel +def substep(): + ti.no_activate(x) + for i in x: + r = x[i] - sun + r_sq_inverse = r / r.norm(1e-3) ** 3 + acceleration = (pressure * inv_m[i] - gravity) * r_sq_inverse + v[i] += acceleration * dt + x[i] += v[i] * dt + color[i] *= ti.exp(-dt * color_decay) + + if not all(-0.1 <= x[i] <= 1.1): + ti.deactivate(x.snode.parent(), [i]) + + +@ti.kernel +def generate(): + r = x[0] - sun + n_tail_paticles = int(tail_paticle_scale / r.norm(1e-3) ** 2) + for _ in range(n_tail_paticles): + r = x[0] + if ti.static(dim == 3): + r = rand_unit_3d() + else: + r = rand_unit_2d() + xi = ti.atomic_add(count[None], 1) % (N - 1) + 1 + x[xi] = x[0] + v[xi] = r * vel_init + v[0] + inv_m[xi] = 0.5 + ti.random() + color[xi] = color_init + + +@ti.kernel +def render(): + for p in ti.grouped(img): + img[p] = 1e-6 / (p / res - ti.Vector([sun.x, sun.y])).norm(1e-4) ** 3 + for i in x: + p = int(ti.Vector([x[i].x, x[i].y]) * res) + if 0 <= p[0] < res and 0 <= p[1] < res: + img[p] += color[i] + + +def main(): + inv_m[0] = 0 + x[0].x = +0.5 + x[0].y = -0.01 + v[0].x = +0.6 + v[0].y = +0.4 + color[0] = 1 + + gui = ti.GUI("Comet", res) + while gui.running: + gui.running = not gui.get_event(gui.ESCAPE) + generate() + for s in range(steps): + substep() + render() + gui.set_image(img) + gui.show() + + +if __name__ == "__main__": + main() diff --git a/local_packages/taichi/examples/simulation/euler.py b/local_packages/taichi/examples/simulation/euler.py new file mode 100644 index 0000000..647293b --- /dev/null +++ b/local_packages/taichi/examples/simulation/euler.py @@ -0,0 +1,461 @@ +from matplotlib import cm + +import taichi as ti + +# A compressible euler equation solver using two methods +# 1: 2nd order muscl +# 2: thinc BVD, ref: "Limiter-free discontinuity-capturing scheme +# for compressible gas dynamics with reactive fronts" + +real = ti.f32 +ti.init(arch=ti.gpu, default_fp=real) + +N = 512 # grid resolution +CFL = 0.9 # keep below 1 +method = 1 # 0:muscl, 1:thinc +IC_type = 0 # 0:sod +BC_type = 0 # 0:walls + +img_field = 0 # 0:density, 1: schlieren, 2:vorticity, 3: velocity mag +res = 512 # gui resolution +cmap_name = "magma_r" # python colormap +use_fixed_caxis = 0 # 1: use fixed caxis limits, 0: automatic caxis limits +fixed_caxis = [0.0, 5.0] # fixed caxis limits + +Q = ti.Vector.field(4, dtype=real, shape=(N, N)) # [rho, rho*u, rho*v, rho*e] consv vars +Q_old = ti.Vector.field(4, dtype=real, shape=(N, N)) +W = ti.Vector.field(4, dtype=real, shape=(N, N)) # [rho, u, v, p] cell avg +W_xl = ti.Vector.field(4, dtype=real, shape=(N, N, 3)) # left side of x-face +W_xr = ti.Vector.field(4, dtype=real, shape=(N, N, 3)) # right side of x-face +W_yl = ti.Vector.field(4, dtype=real, shape=(N, N, 3)) # left side of y-face +W_yr = ti.Vector.field(4, dtype=real, shape=(N, N, 3)) # right side of y-face +F_x = ti.Vector.field(4, dtype=real, shape=(N, N)) # x-face flux +F_y = ti.Vector.field(4, dtype=real, shape=(N, N)) # y-face flux +dt = ti.field(dtype=real, shape=()) +img = ti.field(dtype=ti.f32, shape=(res, res)) + +beta_smooth = 1.2 +beta_sharp = 2.0 +gamma = 1.4 # ratio of specific heats +h = 1.0 / (N - 2) # cell size +vol = h * h # cell volume + + +@ti.func +def is_interior_cell(i, j): + return 0 < i < N - 1 and 0 < j < N - 1 + + +@ti.func +def is_interior_x_face(i, j): + return 1 < i < N - 1 and 0 < j < N - 1 + + +@ti.func +def is_boundary_x_face(i, j): + return (i == 1 or i == N - 1) and 0 < j < N - 1 + + +@ti.func +def is_interior_y_face(i, j): + return 0 < i < N - 1 and 1 < j < N - 1 + + +@ti.func +def is_boundary_y_face(i, j): + return 0 < i < N - 1 and (j == 1 or j == N - 1) + + +@ti.func +def get_cell_pos(i, j): + return ti.Vector([i * h - h / 2.0, j * h - h / 2.0]) + + +@ti.kernel +def compute_W(): + # conversion from conservative variables to primitive variables + for i, j in Q: + W[i, j] = q_to_w(Q[i, j]) + + +@ti.kernel +def copy_to_old(): + for i, j in Q: + Q_old[i, j] = Q[i, j] + + +@ti.kernel +def set_ic(): + for i, j in Q: + if IC_type == 0: + # primitive variable initial conditions + w_in = ti.Vector([10.0, 0.0, 0.0, 10.0]) + w_out = ti.Vector([0.125, 0.0, 0.0, 0.1]) + + pos = get_cell_pos(i, j) + center = ti.Vector([0.5, 0.5]) + + if (pos - center).norm() < 0.25: + Q[i, j] = w_to_q(w_in) + else: + Q[i, j] = w_to_q(w_out) + + # implement more ic's later + + +@ti.kernel +def set_bc(): + # enforce boundary conditions by setting ghost cells + for i, j in Q: + if not is_interior_cell(i, j): + if BC_type == 0: # walls + # enforce neumann=0 and zero normal velocity on face + if i == 0: + Q[i, j] = Q[i + 1, j] + Q[i, j][1] = -Q[i + 1, j][1] + if i == N - 1: + Q[i, j] = Q[i - 1, j] # neumann 0 bc + Q[i, j][1] = -Q[i - 1, j][1] # enforce 0 normal velocty at face + if j == 0: + Q[i, j] = Q[i, j + 1] + Q[i, j][2] = -Q[i, j + 1][2] + if j == N - 1: + Q[i, j] = Q[i, j - 1] + Q[i, j][2] = -Q[i, j - 1][2] + + # implement more bc's later + + +@ti.func +def mc_lim(r): + # MC flux limiter + return max(0.0, min(2.0 * r, min(0.5 * (r + 1.0), 2.0))) + + +@ti.func +def w_to_q(w): + # convert primitive variables to conserved variables + q = ti.Vector([0.0, 0.0, 0.0, 0.0]) + q[0] = w[0] # rho + q[1] = w[0] * w[1] # rho*u + q[2] = w[0] * w[2] # rho*v + q[3] = w[0] * (w[3] / ((gamma - 1) * w[0]) + 0.5 * (w[1] ** 2 + w[2] ** 2)) + # rho*e + return q + + +@ti.func +def q_to_w(q): + # convert conserved variables to primitive variables + w = ti.Vector([0.0, 0.0, 0.0, 0.0]) + w[0] = q[0] # rho + w[1] = q[1] / q[0] # u + w[2] = q[2] / q[0] # v + w[3] = (gamma - 1) * (q[3] - 0.5 * (q[1] ** 2 + q[2] ** 2) / q[0]) + # p + return w + + +@ti.func +def HLLC_flux(qL, qR, n): + # normal vector + nx = n[0] + ny = n[1] + + # Left state + rL = qL[0] # rho + uL = qL[1] / qL[0] # u + vL = qL[2] / qL[0] # v + pL = (gamma - 1.0) * (qL[3] - 0.5 * (qL[1] ** 2 + qL[2] ** 2) / qL[0]) + # p + vnL = uL * nx + vL * ny + vtL = -uL * ny + vL * nx + aL = ti.sqrt(gamma * pL / rL) + HL = (qL[3] + pL) / rL + + # Right state + rR = qR[0] # rho + uR = qR[1] / qR[0] # u + vR = qR[2] / qR[0] # v + pR = (gamma - 1.0) * (qR[3] - 0.5 * (qR[1] ** 2 + qR[2] ** 2) / qR[0]) + # p + vnR = uR * nx + vR * ny + vtR = -uR * ny + vR * nx + aR = ti.sqrt(gamma * pR / rR) + HR = (qR[3] + pR) / rR + + # Left and Right fluxes + fL = ti.Vector([rL * vnL, rL * vnL * uL + pL * nx, rL * vnL * vL + pL * ny, rL * vnL * HL]) + fR = ti.Vector([rR * vnR, rR * vnR * uR + pR * nx, rR * vnR * vR + pR * ny, rR * vnR * HR]) + + # Roe Averages + rt = ti.sqrt(rR / rL) + u = (uL + rt * uR) / (1.0 + rt) + v = (vL + rt * vR) / (1.0 + rt) + H = (HL + rt * HR) / (1.0 + rt) + a = ti.sqrt((gamma - 1.0) * (H - (u**2 + v**2) / 2.0)) + vn = u * nx + v * ny + + # wavespeeds + sL = min(vnL - aL, vn - a) + sR = max(vnR + aR, vn + a) + sM = (pL - pR + rR * vnR * (sR - vnR) - rL * vnL * (sL - vnL)) / (rR * (sR - vnR) - rL * (sL - vnL)) + + # HLLC flux. + HLLC = ti.Vector([0.0, 0.0, 0.0, 0.0]) + if 0 <= sL: + HLLC = fL + elif 0 <= sM: + qsL = ( + rL + * (sL - vnL) + / (sL - sM) + * ti.Vector( + [ + 1.0, + sM * nx - vtL * ny, + sM * ny + vtL * nx, + qL[3] / rL + (sM - vnL) * (sM + pL / (rL * (sL - vnL))), + ] + ) + ) + HLLC = fL + sL * (qsL - qL) + elif 0 <= sR: + qsR = ( + rR + * (sR - vnR) + / (sR - sM) + * ti.Vector( + [ + 1.0, + sM * nx - vtR * ny, + sM * ny + vtR * nx, + qR[3] / rR + (sM - vnR) * (sM + pR / (rR * (sR - vnR))), + ] + ) + ) + HLLC = fR + sR * (qsR - qR) + elif 0 >= sR: + HLLC = fR + + return HLLC + + +@ti.kernel +def compute_F_muscl(): + for i, j in Q: + if is_interior_x_face(i, j): + # muscl reconstrucion of left and right states with HLLC flux + wL = ti.Vector([0.0, 0.0, 0.0, 0.0]) + wR = ti.Vector([0.0, 0.0, 0.0, 0.0]) + for f in ti.static(range(4)): + ratio_l = (W[i, j][f] - W[i - 1, j][f]) / (W[i - 1, j][f] - W[i - 2, j][f]) + ratio_r = (W[i, j][f] - W[i - 1, j][f]) / (W[i + 1, j][f] - W[i, j][f]) + wL[f] = W[i - 1, j][f] + 0.5 * mc_lim(ratio_l) * (W[i - 1, j][f] - W[i - 2, j][f]) + wR[f] = W[i, j][f] - 0.5 * mc_lim(ratio_r) * (W[i + 1, j][f] - W[i, j][f]) + F_x[i, j] = HLLC_flux(w_to_q(wL), w_to_q(wR), ti.Vector([1.0, 0.0])) + + elif is_boundary_x_face(i, j): + F_x[i, j] = HLLC_flux(Q[i - 1, j], Q[i, j], ti.Vector([1.0, 0.0])) + + if is_interior_y_face(i, j): + # muscl reconstrucion of left and right states with HLLC flux + wL = ti.Vector([0.0, 0.0, 0.0, 0.0]) + wR = ti.Vector([0.0, 0.0, 0.0, 0.0]) + for f in ti.static(range(4)): + ratio_l = (W[i, j][f] - W[i, j - 1][f]) / (W[i, j - 1][f] - W[i, j - 2][f]) + ratio_r = (W[i, j][f] - W[i, j - 1][f]) / (W[i, j + 1][f] - W[i, j][f]) + wL[f] = W[i, j - 1][f] + 0.5 * mc_lim(ratio_l) * (W[i, j - 1][f] - W[i, j - 2][f]) + wR[f] = W[i, j][f] - 0.5 * mc_lim(ratio_r) * (W[i, j + 1][f] - W[i, j][f]) + F_y[i, j] = HLLC_flux(w_to_q(wL), w_to_q(wR), ti.Vector([0.0, 1.0])) + + elif is_boundary_y_face(i, j): + F_y[i, j] = HLLC_flux(Q[i, j - 1], Q[i, j], ti.Vector([0.0, 1.0])) + + +@ti.func +def sign(a): + sgn = 0.0 + if a > 0.0: + sgn = 1.0 + elif a < 0.0: + sgn = -1.0 + return sgn + + +@ti.func +def cosh(a): + return (ti.exp(a) + ti.exp(-a)) / 2.0 + + +@ti.func +def thinc(wl, wc, wr, beta): + w0 = wc + w1 = wc + if (wr - wc) * (wc - wl) > 0.0: + # use thinc reconstruction + eps = 1.0e-15 + wmin = ti.min(wr, wl) + wmax = ti.max(wr, wl) + wdelta = wmax - wmin + theta = sign(wr - wl) + C = (wc - wmin + eps) / (wdelta + eps) + B = ti.exp(theta * beta * (2 * C - 1)) + A = (B / cosh(beta) - 1) / ti.tanh(beta) + + # reconstructed value on right side of left face + w0 = wmin + wdelta / 2.0 * (1.0 + theta * A) + + # reconstructed value on left side of right face + w1 = wmin + wdelta / 2.0 * (1.0 + theta * (ti.tanh(beta) + A) / (1.0 + A * ti.tanh(beta))) + + return w0, w1 + + +@ti.kernel +def compute_F_thinc(): + # reconstruct primitive variables on interior faces of each cell using + # multiple candidate thinc reconstructions + for i, j in Q: + if is_interior_cell(i, j): + for f in ti.static(range(4)): + # smooth x-dir reconstruction + w0, w1 = thinc(W[i - 1, j][f], W[i, j][f], W[i + 1, j][f], beta_smooth) + W_xr[i, j, 0][f] = w0 + W_xl[i + 1, j, 0][f] = w1 + + # sharp x-dir reconstruction + w0, w1 = thinc(W[i - 1, j][f], W[i, j][f], W[i + 1, j][f], beta_sharp) + W_xr[i, j, 1][f] = w0 + W_xl[i + 1, j, 1][f] = w1 + + # smooth y-dir reconstruction + w0, w1 = thinc(W[i, j - 1][f], W[i, j][f], W[i, j + 1][f], beta_smooth) + W_yr[i, j, 0][f] = w0 + W_yl[i, j + 1, 0][f] = w1 + + # sharp y-dir reconstruction + w0, w1 = thinc(W[i, j - 1][f], W[i, j][f], W[i, j + 1][f], beta_sharp) + W_yr[i, j, 1][f] = w0 + W_yl[i, j + 1, 1][f] = w1 + + for i, j in Q: + # choose the final reconstruction for each cell using the BVD algorithm + if is_interior_cell(i, j): + for f in ti.static(range(4)): + # x-dir + TBV_smooth = abs(W_xl[i, j, 0][f] - W_xr[i, j, 0][f]) + abs(W_xl[i + 1, j, 0][f] - W_xr[i + 1, j, 0][f]) + TBV_sharp = abs(W_xl[i, j, 1][f] - W_xr[i, j, 1][f]) + abs(W_xl[i + 1, j, 1][f] - W_xr[i + 1, j, 1][f]) + + if TBV_smooth < TBV_sharp: + W_xr[i, j, 2][f] = W_xr[i, j, 0][f] + W_xl[i + 1, j, 2][f] = W_xl[i + 1, j, 0][f] + else: + W_xr[i, j, 2][f] = W_xr[i, j, 1][f] + W_xl[i + 1, j, 2][f] = W_xl[i + 1, j, 1][f] + + # y-dir + TBV_smooth = abs(W_yl[i, j, 0][f] - W_yr[i, j, 0][f]) + abs(W_yl[i, j + 1, 0][f] - W_yr[i, j + 1, 0][f]) + TBV_sharp = abs(W_yl[i, j, 1][f] - W_yr[i, j, 1][f]) + abs(W_yl[i, j + 1, 1][f] - W_yr[i, j + 1, 1][f]) + + if TBV_smooth < TBV_sharp: + W_yr[i, j, 2][f] = W_yr[i, j, 0][f] + W_yl[i, j + 1, 2][f] = W_yl[i, j + 1, 0][f] + else: + W_yr[i, j, 2][f] = W_yr[i, j, 1][f] + W_yl[i, j + 1, 2][f] = W_yl[i, j + 1, 1][f] + + for i, j in Q: + # compute numerical fluxes of with Riemann solver + if is_interior_x_face(i, j): + # muscl reconstrucion of left and right states with HLLC flux + F_x[i, j] = HLLC_flux(w_to_q(W_xl[i, j, 2]), w_to_q(W_xr[i, j, 2]), ti.Vector([1.0, 0.0])) + elif is_boundary_x_face(i, j): + F_x[i, j] = HLLC_flux(Q[i - 1, j], Q[i, j], ti.Vector([1.0, 0.0])) + + if is_interior_y_face(i, j): + F_y[i, j] = HLLC_flux(w_to_q(W_yl[i, j, 2]), w_to_q(W_yr[i, j, 2]), ti.Vector([0.0, 1.0])) + elif is_boundary_y_face(i, j): + F_y[i, j] = HLLC_flux(Q[i, j - 1], Q[i, j], ti.Vector([0.0, 1.0])) + + +@ti.kernel +def calc_dt(): + dt[None] = 1.0e5 + for i, j in Q: + w = q_to_w(Q[i, j]) + a = ti.sqrt(gamma * w[3] / w[0]) + vel = ti.sqrt(w[1] ** 2 + w[2] ** 2) + ws = a + vel + ti.atomic_min(dt[None], CFL * h / ws / 2.0) + + +@ti.kernel +def update_Q(rk_step: ti.template()): + for i, j in Q: + if is_interior_cell(i, j): + if ti.static(rk_step == 0): + Q[i, j] = Q[i, j] + dt[None] * (F_x[i, j] - F_x[i + 1, j] + F_y[i, j] - F_y[i, j + 1]) / h + if ti.static(rk_step == 1): + Q[i, j] = (Q[i, j] + Q_old[i, j]) / 2.0 + dt[None] * ( + F_x[i, j] - F_x[i + 1, j] + F_y[i, j] - F_y[i, j + 1] + ) / h + + +@ti.kernel +def paint(): + for i, j in img: + ii = min(max(1, i * N // res), N - 2) + jj = min(max(1, j * N // res), N - 2) + if img_field == 0: # density + img[i, j] = Q[ii, jj][0] + elif img_field == 1: # numerical schlieren + img[i, j] = ti.sqrt( + ((Q[ii + 1, jj][0] - Q[ii - 1, jj][0]) / h) ** 2 + ((Q[ii, jj + 1][0] - Q[ii, jj - 1][0]) / h) ** 2 + ) + elif img_field == 2: # vorticity + img[i, j] = (Q[ii + 1, jj][2] - Q[ii - 1, jj][2]) / h - (Q[ii, jj + 1][1] - Q[ii, jj - 1][1]) / h + elif img_field == 3: # velocity magnitude + img[i, j] = ti.sqrt(Q[ii, jj][1] ** 2 + Q[ii, jj][2] ** 2) + + max_ = -1.0e10 + min_ = 1.0e10 + for i, j in img: + ti.atomic_max(max_, img[i, j]) + ti.atomic_min(min_, img[i, j]) + + for i, j in img: + if use_fixed_caxis: + min_ = fixed_caxis[0] + max_ = fixed_caxis[1] + img[i, j] = (img[i, j] - min_) / (max_ - min_) + + +def main(): + gui = ti.GUI("Euler Equations", (res, res)) + cmap = cm.get_cmap(cmap_name) + set_ic() + set_bc() + + n = 0 + while gui.running: + calc_dt() + copy_to_old() + for rk_step in range(2): + compute_W() + if method == 0: + compute_F_muscl() + else: + compute_F_thinc() + update_Q(rk_step) + set_bc() + + if n % 10 == 0: + paint() + gui.set_image(cmap(img.to_numpy())) + gui.show() + n += 1 + + +if __name__ == "__main__": + main() diff --git a/local_packages/taichi/examples/simulation/eulerfluid2d.py b/local_packages/taichi/examples/simulation/eulerfluid2d.py new file mode 100644 index 0000000..cd3c9ee --- /dev/null +++ b/local_packages/taichi/examples/simulation/eulerfluid2d.py @@ -0,0 +1,310 @@ +# 2D Euler Fluid Simulation using Taichi, originally created by @Lee-abcde +from taichi.examples.patterns import taichi_logo + +import taichi as ti +import taichi.math as tm + +ti.init(arch=ti.gpu) + + +##################### +# Bilinear Interpolation function +##################### +@ti.func +def sample(vf, u, v, shape): + i, j = int(u), int(v) + # Nearest + i = ti.max(0, ti.min(shape[0] - 1, i)) + j = ti.max(0, ti.min(shape[1] - 1, j)) + return vf[i, j] + + +@ti.func +def lerp(vl, vr, frac): + # frac: [0.0, 1.0] + return (1 - frac) * vl + frac * vr + + +@ti.func +def bilerp(vf, u, v, shape): + # use -0.5 to decide where bilerp performs in cells + s, t = u - 0.5, v - 0.5 + iu, iv = int(s), int(t) + a = sample(vf, iu + 0.5, iv + 0.5, shape) + b = sample(vf, iu + 1.5, iv + 0.5, shape) + c = sample(vf, iu + 0.5, iv + 1.5, shape) + d = sample(vf, iu + 1.5, iv + 1.5, shape) + # fract + fu, fv = s - iu, t - iv + return lerp(lerp(a, b, fu), lerp(c, d, fu), fv) + + +##################### +# Simulation parameters +##################### +eulerSimParam = { + "shape": [512, 512], + "dt": 1 / 60.0, + "iteration_step": 20, + "mouse_radius": 0.01, # [0.0,1.0] float + "mouse_speed": 125.0, + "mouse_respondDistance": 0.5, # for every frame, only half the trace of the mouse will influence water + "curl_param": 15, +} + + +# Double Buffer +class TexPair: + def __init__(self, cur, nxt): + self.cur = cur + self.nxt = nxt + + def swap(self): + self.cur, self.nxt = self.nxt, self.cur + + +velocityField = ti.Vector.field(2, float, shape=eulerSimParam["shape"]) +_new_velocityField = ti.Vector.field(2, float, shape=eulerSimParam["shape"]) +colorField = ti.Vector.field(3, float, shape=eulerSimParam["shape"]) +_new_colorField = ti.Vector.field(3, float, shape=eulerSimParam["shape"]) + +curlField = ti.field(float, shape=eulerSimParam["shape"]) + +divField = ti.field(float, shape=eulerSimParam["shape"]) +pressField = ti.field(float, shape=eulerSimParam["shape"]) +_new_pressField = ti.field(float, shape=eulerSimParam["shape"]) + +velocities_pair = TexPair(velocityField, _new_velocityField) +pressure_pair = TexPair(pressField, _new_pressField) +color_pair = TexPair(colorField, _new_colorField) + + +@ti.kernel +def init_field(): + # init pressure and velocity fieldfield + pressField.fill(0) + velocityField.fill(0) + for i, j in ti.ndrange(eulerSimParam["shape"][0] * 4, eulerSimParam["shape"][1] * 4): + # 4x4 super sampling: + ret = taichi_logo(ti.Vector([i, j]) / (eulerSimParam["shape"][0] * 4)) + colorField[i // 4, j // 4] += ret / 16 + + +@ti.kernel +def advection(vf: ti.template(), qf: ti.template(), new_qf: ti.template()): + for i, j in vf: + coord_cur = ti.Vector([i, j]) + ti.Vector([0.5, 0.5]) + vel_cur = vf[i, j] + coord_prev = coord_cur - vel_cur * eulerSimParam["dt"] + q_prev = bilerp(qf, coord_prev[0], coord_prev[1], eulerSimParam["shape"]) + new_qf[i, j] = q_prev + + +@ti.kernel +def curl(vf: ti.template(), cf: ti.template()): + for i, j in vf: + cf[i, j] = 0.5 * ((vf[i + 1, j][1] - vf[i - 1, j][1]) - (vf[i, j + 1][0] - vf[i, j - 1][0])) + + +@ti.kernel +def vorticity_projection(cf: ti.template(), vf: ti.template(), vf_new: ti.template()): + for i, j in cf: + gradcurl = ti.Vector( + [ + 0.5 * (cf[i + 1, j] - cf[i - 1, j]), + 0.5 * (cf[i, j + 1] - cf[i, j - 1]), + 0, + ] + ) + GradCurlLength = tm.length(gradcurl) + if GradCurlLength > 1e-5: + force = eulerSimParam["curl_param"] * tm.cross(gradcurl / GradCurlLength, ti.Vector([0, 0, 1])) + vf_new[i, j] = vf[i, j] + eulerSimParam["dt"] * force[:2] + + +@ti.kernel +def divergence(vf: ti.template(), divf: ti.template()): + for i, j in vf: + divf[i, j] = 0.5 * (vf[i + 1, j][0] - vf[i - 1, j][0] + vf[i, j + 1][1] - vf[i, j - 1][1]) + + +@ti.kernel +def pressure_iteration(divf: ti.template(), pf: ti.template(), new_pf: ti.template()): + for i, j in pf: + new_pf[i, j] = (pf[i + 1, j] + pf[i - 1, j] + pf[i, j - 1] + pf[i, j + 1] - divf[i, j]) / 4 + + +def pressure_solve(presspair: TexPair, divf: ti.template()): + for i in range(eulerSimParam["iteration_step"]): + pressure_iteration(divf, presspair.cur, presspair.nxt) + presspair.swap() + apply_p_bc(presspair.cur) + + +@ti.kernel +def pressure_projection(pf: ti.template(), vf: ti.template(), vf_new: ti.template()): + for i, j in vf: + vf_new[i, j] = vf[i, j] - ti.Vector( + [ + ( + p_with_boundary(pf, i + 1, j, eulerSimParam["shape"]) + - p_with_boundary(pf, i - 1, j, eulerSimParam["shape"]) + ) + / 2.0, + ( + p_with_boundary(pf, i, j + 1, eulerSimParam["shape"]) + - p_with_boundary(pf, i, j - 1, eulerSimParam["shape"]) + ) + / 2.0, + ] + ) + + +##################### +# Boundry Condition +##################### +@ti.func +def vel_with_boundary(vf: ti.template(), i: int, j: int, shape) -> ti.f32: + if (i <= 0) or (i >= shape[0] - 1) or (j >= shape[1] - 1) or (j <= 0): + vf[i, j] = ti.Vector([0.0, 0.0]) + return vf[i, j] + + +@ti.func +def p_with_boundary(pf: ti.template(), i: int, j: int, shape) -> ti.f32: + if ( + (i == j == 0) + or (i == shape[0] - 1 and j == shape[1] - 1) + or (i == 0 and j == shape[1] - 1) + or (i == shape[0] - 1 and j == 0) + ): + pf[i, j] = 0.0 + elif i == 0: + pf[0, j] = pf[1, j] + elif j == 0: + pf[i, 0] = pf[i, 1] + elif i == shape[0] - 1: + pf[shape[0] - 1, j] = pf[shape[0] - 2, j] + elif j == shape[1] - 1: + pf[i, shape[1] - 1] = pf[i, shape[1] - 2] + return pf[i, j] + + +@ti.func +def c_with_boundary(cf: ti.template(), i: int, j: int, shape) -> ti.f32: + if (i <= 0) or (i >= shape[0] - 1) or (j >= shape[1] - 1) or (j <= 0): + cf[i, j] = 0.0 + return cf[i, j] + + +@ti.kernel +def apply_vel_bc(vf: ti.template()): + for i, j in vf: + vel_with_boundary(vf, i, j, eulerSimParam["shape"]) + + +@ti.kernel +def apply_p_bc(pf: ti.template()): + for i, j in pf: + p_with_boundary(pf, i, j, eulerSimParam["shape"]) + + +@ti.kernel +def apply_c_bc(cf: ti.template()): + for i, j in cf: + c_with_boundary(cf, i, j, eulerSimParam["shape"]) + + +def mouse_interaction(prev_posx: int, prev_posy: int): + mouse_x, mouse_y = window.get_cursor_pos() + mousePos_x = int(mouse_x * eulerSimParam["shape"][0]) + mousePos_y = int(mouse_y * eulerSimParam["shape"][1]) + if prev_posx == 0 and prev_posy == 0: + prev_posx = mousePos_x + prev_posy = mousePos_y + mouseRadius = eulerSimParam["mouse_radius"] * min(eulerSimParam["shape"][0], eulerSimParam["shape"][1]) + + mouse_addspeed( + mousePos_x, + mousePos_y, + prev_posx, + prev_posy, + mouseRadius, + velocities_pair.cur, + velocities_pair.nxt, + ) + velocities_pair.swap() + prev_posx = mousePos_x + prev_posy = mousePos_y + return prev_posx, prev_posy + + +@ti.kernel +def mouse_addspeed( + cur_posx: int, + cur_posy: int, + prev_posx: int, + prev_posy: int, + mouseRadius: float, + vf: ti.template(), + new_vf: ti.template(), +): + for i, j in vf: + vec1 = ti.Vector([cur_posx - prev_posx, cur_posy - prev_posy]) + vec2 = ti.Vector([i - prev_posx, j - prev_posy]) + dotans = tm.dot(vec1, vec2) + distance = abs(tm.cross(vec1, vec2)) / (tm.length(vec1) + 0.001) + if ( + dotans >= 0 + and dotans <= eulerSimParam["mouse_respondDistance"] * tm.length(vec1) + and distance <= mouseRadius + ): + new_vf[i, j] = vf[i, j] + vec1 * eulerSimParam["mouse_speed"] + else: + new_vf[i, j] = vf[i, j] + + +def advaction_step(): + advection(velocities_pair.cur, color_pair.cur, color_pair.nxt) + advection(velocities_pair.cur, velocities_pair.cur, velocities_pair.nxt) + color_pair.swap() + velocities_pair.swap() + apply_vel_bc(velocities_pair.cur) + + +def voricity_step(): + curl(velocities_pair.cur, curlField) + apply_c_bc(curlField) + vorticity_projection(curlField, velocities_pair.cur, velocities_pair.nxt) + velocities_pair.swap() + apply_vel_bc(velocities_pair.cur) + + +def pressure_step(): + divergence(velocities_pair.cur, divField) + pressure_solve(pressure_pair, divField) + pressure_projection(pressure_pair.cur, velocities_pair.cur, velocities_pair.nxt) + velocities_pair.swap() + apply_vel_bc(velocities_pair.cur) + + +##################### +# The Euler Simulation starts! +##################### +init_field() +apply_vel_bc(velocities_pair.cur) +window = ti.GUI("Euler 2D Simulation", res=(eulerSimParam["shape"][0], eulerSimParam["shape"][1])) + +mouse_prevposx, mouse_prevposy = 0, 0 +while window.running: + # advection + advaction_step() + # curl + voricity_step() + # mouse interact with fluid + mouse_prevposx, mouse_prevposy = mouse_interaction(mouse_prevposx, mouse_prevposy) + # pressure iteration and projection + pressure_step() + + window.set_image(colorField) + window.show() diff --git a/local_packages/taichi/examples/simulation/fem128.py b/local_packages/taichi/examples/simulation/fem128.py new file mode 100644 index 0000000..3f8d3d8 --- /dev/null +++ b/local_packages/taichi/examples/simulation/fem128.py @@ -0,0 +1,144 @@ +import taichi as ti + +ti.init(arch=ti.gpu) + +N = 12 +dt = 5e-5 +dx = 1 / N +rho = 4e1 +NF = 2 * N**2 # number of faces +NV = (N + 1) ** 2 # number of vertices +E, nu = 4e4, 0.2 # Young's modulus and Poisson's ratio +mu, lam = E / 2 / (1 + nu), E * nu / (1 + nu) / (1 - 2 * nu) # Lame parameters +ball_pos, ball_radius = ti.Vector([0.5, 0.0]), 0.31 +damping = 14.5 + +pos = ti.Vector.field(2, float, NV, needs_grad=True) +vel = ti.Vector.field(2, float, NV) +f2v = ti.Vector.field(3, int, NF) # ids of three vertices of each face +B = ti.Matrix.field(2, 2, float, NF) +F = ti.Matrix.field(2, 2, float, NF, needs_grad=True) +V = ti.field(float, NF) +phi = ti.field(float, NF) # potential energy of each face (Neo-Hookean) +U = ti.field(float, (), needs_grad=True) # total potential energy + +gravity = ti.Vector.field(2, float, ()) +attractor_pos = ti.Vector.field(2, float, ()) +attractor_strength = ti.field(float, ()) + + +@ti.kernel +def update_U(): + for i in range(NF): + ia, ib, ic = f2v[i] + a, b, c = pos[ia], pos[ib], pos[ic] + V[i] = abs((a - c).cross(b - c)) + D_i = ti.Matrix.cols([a - c, b - c]) + F[i] = D_i @ B[i] + for i in range(NF): + F_i = F[i] + log_J_i = ti.log(F_i.determinant()) + phi_i = mu / 2 * ((F_i.transpose() @ F_i).trace() - 2) + phi_i -= mu * log_J_i + phi_i += lam / 2 * log_J_i**2 + phi[i] = phi_i + U[None] += V[i] * phi_i + + +@ti.kernel +def advance(): + for i in range(NV): + acc = -pos.grad[i] / (rho * dx**2) + g = gravity[None] * 0.8 + attractor_strength[None] * (attractor_pos[None] - pos[i]).normalized(1e-5) + vel[i] += dt * (acc + g * 40) + vel[i] *= ti.exp(-dt * damping) + for i in range(NV): + # ball boundary condition: + disp = pos[i] - ball_pos + disp2 = disp.norm_sqr() + if disp2 <= ball_radius**2: + NoV = vel[i].dot(disp) + if NoV < 0: + vel[i] -= NoV * disp / disp2 + cond = (pos[i] < 0) & (vel[i] < 0) | (pos[i] > 1) & (vel[i] > 0) + # rect boundary condition: + for j in ti.static(range(pos.n)): + if cond[j]: + vel[i][j] = 0 + pos[i] += dt * vel[i] + + +@ti.kernel +def init_pos(): + for i, j in ti.ndrange(N + 1, N + 1): + k = i * (N + 1) + j + pos[k] = ti.Vector([i, j]) / N * 0.25 + ti.Vector([0.45, 0.45]) + vel[k] = ti.Vector([0, 0]) + for i in range(NF): + ia, ib, ic = f2v[i] + a, b, c = pos[ia], pos[ib], pos[ic] + B_i_inv = ti.Matrix.cols([a - c, b - c]) + B[i] = B_i_inv.inverse() + + +@ti.kernel +def init_mesh(): + for i, j in ti.ndrange(N, N): + k = (i * N + j) * 2 + a = i * (N + 1) + j + b = a + 1 + c = a + N + 2 + d = a + N + 1 + f2v[k + 0] = [a, b, c] + f2v[k + 1] = [c, d, a] + + +def paint_phi(gui): + pos_ = pos.to_numpy() + phi_ = phi.to_numpy() + f2v_ = f2v.to_numpy() + a, b, c = pos_[f2v_[:, 0]], pos_[f2v_[:, 1]], pos_[f2v_[:, 2]] + k = phi_ * (10 / E) + gb = (1 - k) * 0.5 + gui.triangles(a, b, c, color=ti.rgb_to_hex([k + gb, gb, gb])) + + +def main(): + init_mesh() + init_pos() + gravity[None] = [0, -1] + + gui = ti.GUI("FEM128") + print( + "[Hint] Use WSAD/arrow keys to control gravity. Use left/right mouse buttons to attract/repel. Press R to reset." + ) + while gui.running: + for e in gui.get_events(gui.PRESS): + if e.key == gui.ESCAPE: + gui.running = False + elif e.key == "r": + init_pos() + elif e.key in ("a", gui.LEFT): + gravity[None] = [-1, 0] + elif e.key in ("d", gui.RIGHT): + gravity[None] = [+1, 0] + elif e.key in ("s", gui.DOWN): + gravity[None] = [0, -1] + elif e.key in ("w", gui.UP): + gravity[None] = [0, +1] + mouse_pos = gui.get_cursor_pos() + attractor_pos[None] = mouse_pos + attractor_strength[None] = gui.is_pressed(gui.LMB) - gui.is_pressed(gui.RMB) + for i in range(50): + with ti.ad.Tape(loss=U): + update_U() + advance() + paint_phi(gui) + gui.circle(mouse_pos, radius=15, color=0x336699) + gui.circle(ball_pos, radius=ball_radius * 512, color=0x666666) + gui.circles(pos.to_numpy(), radius=2, color=0xFFAA33) + gui.show() + + +if __name__ == "__main__": + main() diff --git a/local_packages/taichi/examples/simulation/fem99.py b/local_packages/taichi/examples/simulation/fem99.py new file mode 100644 index 0000000..f53ff49 --- /dev/null +++ b/local_packages/taichi/examples/simulation/fem99.py @@ -0,0 +1,112 @@ +import taichi as ti + +ti.init(arch=ti.gpu) + +N = 32 +dt = 1e-4 +dx = 1 / N +rho = 4e1 +NF = 2 * N**2 # number of faces +NV = (N + 1) ** 2 # number of vertices +E, nu = 4e4, 0.2 # Young's modulus and Poisson's ratio +mu, lam = E / 2 / (1 + nu), E * nu / (1 + nu) / (1 - 2 * nu) # Lame parameters +ball_pos, ball_radius = ti.Vector([0.5, 0.0]), 0.32 +gravity = ti.Vector([0, -40]) +damping = 12.5 + +pos = ti.Vector.field(2, float, NV, needs_grad=True) +vel = ti.Vector.field(2, float, NV) +f2v = ti.Vector.field(3, int, NF) # ids of three vertices of each face +B = ti.Matrix.field(2, 2, float, NF) +F = ti.Matrix.field(2, 2, float, NF, needs_grad=True) +V = ti.field(float, NF) +phi = ti.field(float, NF) # potential energy of each face (Neo-Hookean) +U = ti.field(float, (), needs_grad=True) # total potential energy + + +@ti.kernel +def update_U(): + for i in range(NF): + ia, ib, ic = f2v[i] + a, b, c = pos[ia], pos[ib], pos[ic] + V[i] = abs((a - c).cross(b - c)) + D_i = ti.Matrix.cols([a - c, b - c]) + F[i] = D_i @ B[i] + for i in range(NF): + F_i = F[i] + log_J_i = ti.log(F_i.determinant()) + phi_i = mu / 2 * ((F_i.transpose() @ F_i).trace() - 2) + phi_i -= mu * log_J_i + phi_i += lam / 2 * log_J_i**2 + phi[i] = phi_i + U[None] += V[i] * phi_i + + +@ti.kernel +def advance(): + for i in range(NV): + acc = -pos.grad[i] / (rho * dx**2) + vel[i] += dt * (acc + gravity) + vel[i] *= ti.exp(-dt * damping) + for i in range(NV): + # ball boundary condition: + disp = pos[i] - ball_pos + disp2 = disp.norm_sqr() + if disp2 <= ball_radius**2: + NoV = vel[i].dot(disp) + if NoV < 0: + vel[i] -= NoV * disp / disp2 + # rect boundary condition: + cond = (pos[i] < 0) & (vel[i] < 0) | (pos[i] > 1) & (vel[i] > 0) + for j in ti.static(range(pos.n)): + if cond[j]: + vel[i][j] = 0 + pos[i] += dt * vel[i] + + +@ti.kernel +def init_pos(): + for i, j in ti.ndrange(N + 1, N + 1): + k = i * (N + 1) + j + pos[k] = ti.Vector([i, j]) / N * 0.25 + ti.Vector([0.45, 0.45]) + vel[k] = ti.Vector([0, 0]) + for i in range(NF): + ia, ib, ic = f2v[i] + a, b, c = pos[ia], pos[ib], pos[ic] + B_i_inv = ti.Matrix.cols([a - c, b - c]) + B[i] = B_i_inv.inverse() + + +@ti.kernel +def init_mesh(): + for i, j in ti.ndrange(N, N): + k = (i * N + j) * 2 + a = i * (N + 1) + j + b = a + 1 + c = a + N + 2 + d = a + N + 1 + f2v[k + 0] = [a, b, c] + f2v[k + 1] = [c, d, a] + + +def main(): + init_mesh() + init_pos() + gui = ti.GUI("FEM99") + while gui.running: + for e in gui.get_events(): + if e.key == gui.ESCAPE: + gui.running = False + elif e.key == "r": + init_pos() + for i in range(30): + with ti.ad.Tape(loss=U): + update_U() + advance() + gui.circles(pos.to_numpy(), radius=2, color=0xFFAA33) + gui.circle(ball_pos, radius=ball_radius * 512, color=0x666666) + gui.show() + + +if __name__ == "__main__": + main() diff --git a/local_packages/taichi/examples/simulation/fractal.py b/local_packages/taichi/examples/simulation/fractal.py new file mode 100644 index 0000000..dc63c7b --- /dev/null +++ b/local_packages/taichi/examples/simulation/fractal.py @@ -0,0 +1,37 @@ +import taichi as ti + +ti.init(arch=ti.gpu) + +n = 320 +pixels = ti.field(dtype=float, shape=(n * 2, n)) + + +@ti.func +def complex_sqr(z): + return ti.Vector([z[0] ** 2 - z[1] ** 2, z[1] * z[0] * 2]) + + +@ti.kernel +def paint(t: float): + for i, j in pixels: # Parallelized over all pixels + c = ti.Vector([-0.8, ti.cos(t) * 0.2]) + z = ti.Vector([i / n - 1, j / n - 0.5]) * 2 + iterations = 0 + while z.norm() < 20 and iterations < 50: + z = complex_sqr(z) + c + iterations += 1 + pixels[i, j] = 1 - iterations * 0.02 + + +def main(): + gui = ti.GUI("Julia Set", res=(n * 2, n)) + t = 0.0 + while not gui.get_event(ti.GUI.ESCAPE, ti.GUI.EXIT): + paint(t) + t += 0.03 + gui.set_image(pixels) + gui.show() + + +if __name__ == "__main__": + main() diff --git a/local_packages/taichi/examples/simulation/game_of_life.py b/local_packages/taichi/examples/simulation/game_of_life.py new file mode 100644 index 0000000..eeec3f6 --- /dev/null +++ b/local_packages/taichi/examples/simulation/game_of_life.py @@ -0,0 +1,105 @@ +# Game of Life written in 100 lines of Taichi +# In memory of John Horton Conway (1937 - 2020) + +import numpy as np + +import taichi as ti + +ti.init() + +n = 64 +cell_size = 8 +img_size = n * cell_size +alive = ti.field(int, shape=(n, n)) # alive = 1, dead = 0 +count = ti.field(int, shape=(n, n)) # count of neighbours + + +@ti.func +def get_alive(i, j): + return alive[i, j] if 0 <= i < n and 0 <= j < n else 0 + + +@ti.func +def get_count(i, j): + return ( + get_alive(i - 1, j) + + get_alive(i + 1, j) + + get_alive(i, j - 1) + + get_alive(i, j + 1) + + get_alive(i - 1, j - 1) + + get_alive(i + 1, j - 1) + + get_alive(i - 1, j + 1) + + get_alive(i + 1, j + 1) + ) + + +# See https://www.conwaylife.com/wiki/Cellular_automaton#Rules for more rules +B, S = [3], [2, 3] +# B, S = [2], [0] + + +@ti.func +def calc_rule(a, c): + if a == 0: + for t in ti.static(B): + if c == t: + a = 1 + elif a == 1: + a = 0 + for t in ti.static(S): + if c == t: + a = 1 + return a + + +@ti.kernel +def run(): + for i, j in alive: + count[i, j] = get_count(i, j) + + for i, j in alive: + alive[i, j] = calc_rule(alive[i, j], count[i, j]) + + +@ti.kernel +def init(): + for i, j in alive: + if ti.random() > 0.8: + alive[i, j] = 1 + else: + alive[i, j] = 0 + + +def main(): + gui = ti.GUI("Game of Life", (img_size, img_size)) + gui.fps_limit = 15 + + print("[Hint] Press `r` to reset") + print("[Hint] Press SPACE to pause") + print("[Hint] Click LMB, RMB and drag to add alive / dead cells") + + init() + paused = False + while gui.running: + for e in gui.get_events(gui.PRESS, gui.MOTION): + if e.key == gui.ESCAPE: + gui.running = False + elif e.key == gui.SPACE: + paused = not paused + elif e.key == "r": + alive.fill(0) + + if gui.is_pressed(gui.LMB, gui.RMB): + mx, my = gui.get_cursor_pos() + alive[int(mx * n), int(my * n)] = gui.is_pressed(gui.LMB) + paused = True + + if not paused: + run() + + gui.set_image(ti.tools.imresize(alive, img_size).astype(np.uint8) * 255) + gui.show() + + +if __name__ == "__main__": + main() diff --git a/local_packages/taichi/examples/simulation/implicit_fem.py b/local_packages/taichi/examples/simulation/implicit_fem.py new file mode 100644 index 0000000..c3ac69d --- /dev/null +++ b/local_packages/taichi/examples/simulation/implicit_fem.py @@ -0,0 +1,436 @@ +import argparse + +import numpy as np +from taichi._lib import core as _ti_core + +import taichi as ti + +parser = argparse.ArgumentParser() +parser.add_argument("--exp", choices=["implicit", "explicit"], default="implicit") +parser.add_argument("--dim", type=int, default=3) +parser.add_argument("--gui", choices=["auto", "ggui", "cpu"], default="auto") +parser.add_argument( + "-s", + "--use_sparse", + action="store_true", + help="Use sparse matrix and sparse solver", +) +parser.add_argument("place_holder", nargs="*") +args = parser.parse_args() + +ti.init(arch=ti.cuda) + +if args.gui == "auto": + if _ti_core.GGUI_AVAILABLE and ti.lang.impl.current_cfg().arch == ti.cuda: + args.gui = "ggui" + else: + args.gui = "cpu" + +E, nu = 5e4, 0.0 +mu, la = E / (2 * (1 + nu)), E * nu / ((1 + nu) * (1 - 2 * nu)) # lambda = 0 +density = 1000.0 +dt = 2e-4 + +if args.exp == "implicit": + dt = 1e-2 + +use_sparse = args.use_sparse + +n_cube = np.array([5] * 3) +n_verts = np.prod(n_cube) +n_cells = 5 * np.prod(n_cube - 1) +dx = 1 / (n_cube.max() - 1) + +F_vertices = ti.Vector.field(4, dtype=ti.i32, shape=n_cells) + +F_x = ti.Vector.field(args.dim, dtype=ti.f32, shape=n_verts) +F_ox = ti.Vector.field(args.dim, dtype=ti.f32, shape=n_verts) +F_v = ti.Vector.field(args.dim, dtype=ti.f32, shape=n_verts) +F_f = ti.Vector.field(args.dim, dtype=ti.f32, shape=n_verts) +F_mul_ans = ti.Vector.field(args.dim, dtype=ti.f32, shape=n_verts) +F_m = ti.field(dtype=ti.f32, shape=n_verts) + +n_cells = (n_cube - 1).prod() * 5 +F_B = ti.Matrix.field(args.dim, args.dim, dtype=ti.f32, shape=n_cells) +F_W = ti.field(dtype=ti.f32, shape=n_cells) + + +@ti.func +def i2p(I): + return (I.x * n_cube[1] + I.y) * n_cube[2] + I.z + + +@ti.func +def set_element(e, I, verts): + for i in ti.static(range(args.dim + 1)): + F_vertices[e][i] = i2p(I + (([verts[i] >> k for k in range(3)] ^ I) & 1)) + + +@ti.kernel +def get_vertices(): + """ + This kernel partitions the cube into tetrahedrons. + Each unit cube is divided into 5 tetrahedrons. + """ + for I in ti.grouped(ti.ndrange(*(n_cube - 1))): + e = ((I.x * (n_cube[1] - 1) + I.y) * (n_cube[2] - 1) + I.z) * 5 + for i, j in ti.static(enumerate([0, 3, 5, 6])): + set_element(e + i, I, (j, j ^ 1, j ^ 2, j ^ 4)) + set_element(e + 4, I, (1, 2, 4, 7)) + for I in ti.grouped(ti.ndrange(*(n_cube))): + F_ox[i2p(I)] = I * dx + + +@ti.func +def Ds(verts): + return ti.Matrix.cols([F_x[verts[i]] - F_x[verts[3]] for i in range(3)]) + + +@ti.func +def ssvd(F): + U, sig, V = ti.svd(F) + if U.determinant() < 0: + for i in ti.static(range(3)): + U[i, 2] *= -1 + sig[2, 2] = -sig[2, 2] + if V.determinant() < 0: + for i in ti.static(range(3)): + V[i, 2] *= -1 + sig[2, 2] = -sig[2, 2] + return U, sig, V + + +@ti.func +def get_force_func(c, verts): + F = Ds(verts) @ F_B[c] + P = ti.Matrix.zero(ti.f32, 3, 3) + U, sig, V = ssvd(F) + P = 2 * mu * (F - U @ V.transpose()) + H = -F_W[c] * P @ F_B[c].transpose() + for i in ti.static(range(3)): + force = ti.Vector([H[j, i] for j in range(3)]) + F_f[verts[i]] += force + F_f[verts[3]] -= force + + +@ti.kernel +def get_force(): + for c in F_vertices: + get_force_func(c, F_vertices[c]) + for u in F_f: + F_f[u].y -= 9.8 * F_m[u] + + +@ti.kernel +def matmul_cell(ret: ti.template(), vel: ti.template()): + for i in ret: + ret[i] = vel[i] * F_m[i] + for c in F_vertices: + verts = F_vertices[c] + W_c = F_W[c] + B_c = F_B[c] + for u in range(4): + for d in range(3): + dD = ti.Matrix.zero(ti.f32, 3, 3) + if u == 3: + for j in range(3): + dD[d, j] = -1 + else: + dD[d, u] = 1 + dF = dD @ B_c + dP = 2.0 * mu * dF + dH = -W_c * dP @ B_c.transpose() + for i in range(3): + for j in range(3): + tmp = vel[verts[i]][j] - vel[verts[3]][j] + ret[verts[u]][d] += -(dt**2) * dH[j, i] * tmp + + +@ti.kernel +def add(ans: ti.template(), a: ti.template(), k: ti.f32, b: ti.template()): + for i in ans: + ans[i] = a[i] + k * b[i] + + +@ti.kernel +def dot(a: ti.template(), b: ti.template()) -> ti.f32: + ans = 0.0 + for i in a: + ans += a[i].dot(b[i]) + return ans + + +F_b = ti.Vector.field(3, dtype=ti.f32, shape=n_verts) +F_r0 = ti.Vector.field(3, dtype=ti.f32, shape=n_verts) +F_p0 = ti.Vector.field(3, dtype=ti.f32, shape=n_verts) + +# ndarray version of F_b +F_b_ndarr = ti.ndarray(dtype=ti.f32, shape=3 * n_verts) +# stiffness matrix +A_builder = ti.linalg.SparseMatrixBuilder(3 * n_verts, 3 * n_verts, max_num_triplets=50000) +solver = ti.linalg.SparseSolver(ti.f32, "LLT") + + +@ti.kernel +def get_b(): + for i in F_b: + F_b[i] = F_m[i] * F_v[i] + dt * F_f[i] + + +def cg(): + def mul(x): + matmul_cell(F_mul_ans, x) + return F_mul_ans + + get_force() + get_b() + mul(F_v) + add(F_r0, F_b, -1, mul(F_v)) + + d = F_p0 + d.copy_from(F_r0) + r_2 = dot(F_r0, F_r0) + n_iter = 50 + epsilon = 1e-6 + r_2_init = r_2 + r_2_new = r_2 + for _ in range(n_iter): + q = mul(d) + alpha = r_2_new / dot(d, q) + add(F_v, F_v, alpha, d) + add(F_r0, F_r0, -alpha, q) + r_2 = r_2_new + r_2_new = dot(F_r0, F_r0) + if r_2_new <= r_2_init * epsilon**2: + break + beta = r_2_new / r_2 + add(d, F_r0, beta, d) + F_f.fill(0) + add(F_x, F_x, dt, F_v) + + +@ti.kernel +def compute_A(A: ti.types.sparse_matrix_builder()): + # A = M - dt * dt * K + for i in range(n_verts): + for j in range(3): + A[3 * i + j, 3 * i + j] += F_m[i] + for c in F_vertices: + verts = F_vertices[c] + W_c = F_W[c] + B_c = F_B[c] + for u in range(4): + for d in range(3): + dD = ti.Matrix.zero(ti.f32, 3, 3) + if u == 3: + for j in range(3): + dD[d, j] = -1 + else: + dD[d, u] = 1 + dF = dD @ B_c + dP = 2.0 * mu * dF + dH = -W_c * dP @ B_c.transpose() + for i in range(3): + for j in range(3): + A[3 * verts[u] + d, 3 * verts[i] + j] += -(dt**2) * dH[j, i] + for i in range(3): + A[3 * verts[u] + d, 3 * verts[3] + i] += -(dt**2) * (-dH[i, 0] - dH[i, 1] - dH[i, 2]) + + +@ti.kernel +def flatten(dest: ti.types.ndarray(), src: ti.template()): + for i in range(n_verts): + for j in range(3): + dest[3 * i + j] = src[i][j] + + +@ti.kernel +def aggragate(dest: ti.template(), src: ti.types.ndarray()): + for i in range(n_verts): + for j in range(3): + dest[i][j] = src[3 * i + j] + + +def direct(): + get_force() + get_b() + flatten(F_b_ndarr, F_b) + v = solver.solve(F_b_ndarr) + aggragate(F_v, v) + F_f.fill(0) + add(F_x, F_x, dt, F_v) + + +@ti.kernel +def advect(): + for p in F_x: + F_v[p] += dt * (F_f[p] / F_m[p]) + F_x[p] += dt * F_v[p] + F_f[p] = ti.Vector([0, 0, 0]) + + +@ti.kernel +def init(): + for u in F_x: + F_x[u] = F_ox[u] + F_v[u] = [0.0] * 3 + F_f[u] = [0.0] * 3 + F_m[u] = 0.0 + for c in F_vertices: + F = Ds(F_vertices[c]) + F_B[c] = F.inverse() + F_W[c] = ti.abs(F.determinant()) / 6 + for i in range(4): + F_m[F_vertices[c][i]] += F_W[c] / 4 * density + for u in F_x: + F_x[u].y += 1.0 + + +def init_A(): + compute_A(A_builder) + A = A_builder.build() + solver.analyze_pattern(A) + solver.factorize(A) + + +@ti.kernel +def floor_bound(): + for u in F_x: + if F_x[u].y < 0: + F_x[u].y = 0 + if F_v[u].y < 0: + F_v[u].y = 0 + + +@ti.func +def check(u): + ans = 0 + rest = u + for i in ti.static(range(3)): + k = rest % n_cube[2 - i] + rest = rest // n_cube[2 - i] + if k == 0: + ans |= 1 << (i * 2) + if k == n_cube[2 - i] - 1: + ans |= 1 << (i * 2 + 1) + return ans + + +def gen_indices(): + su = 0 + for i in range(3): + su += (n_cube[i] - 1) * (n_cube[(i + 1) % 3] - 1) + return ti.field(ti.i32, shape=2 * su * 2 * 3) + + +indices = gen_indices() + + +@ti.kernel +def get_indices(): + # calculate all the meshes on surface + cnt = 0 + for c in F_vertices: + if c % 5 != 4: + for i in ti.static([0, 2, 3]): + verts = [F_vertices[c][(i + j) % 4] for j in range(3)] + sum_ = check(verts[0]) & check(verts[1]) & check(verts[2]) + if sum_: + m = ti.atomic_add(cnt, 1) + det = ti.Matrix.rows([F_x[verts[i]] - [0.5, 1.5, 0.5] for i in range(3)]).determinant() + if det < 0: + tmp = verts[1] + verts[1] = verts[2] + verts[2] = tmp + indices[m * 3] = verts[0] + indices[m * 3 + 1] = verts[1] + indices[m * 3 + 2] = verts[2] + + +def substep(): + if args.exp == "explicit": + for i in range(10): + get_force() + advect() + else: + if use_sparse: + direct() + else: + cg() + floor_bound() + + +def main(): + get_vertices() + init() + get_indices() + + if use_sparse: + init_A() + + if args.gui == "ggui": + res = (800, 600) + window = ti.ui.Window("Implicit FEM", res, vsync=True) + + frame_id = 0 + canvas = window.get_canvas() + scene = window.get_scene() + camera = ti.ui.Camera() + camera.position(2.0, 2.0, 3.95) + camera.lookat(0.5, 0.5, 0.5) + camera.fov(55) + + def render(): + camera.track_user_inputs(window, movement_speed=0.03, hold_key=ti.ui.RMB) + scene.set_camera(camera) + + scene.ambient_light((0.1,) * 3) + + scene.point_light(pos=(0.5, 10.0, 0.5), color=(0.5, 0.5, 0.5)) + scene.point_light(pos=(10.0, 10.0, 10.0), color=(0.5, 0.5, 0.5)) + + scene.mesh(F_x, indices, color=(0.73, 0.33, 0.23)) + + canvas.scene(scene) + + while window.running: + frame_id += 1 + frame_id = frame_id % 256 + substep() + if window.is_pressed("r"): + init() + if window.is_pressed(ti.GUI.ESCAPE): + break + + render() + + window.show() + + else: + + def T(a): + phi, theta = np.radians(28), np.radians(32) + + a = a - 0.2 + x, y, z = a[:, 0], a[:, 1], a[:, 2] + c, s = np.cos(phi), np.sin(phi) + C, S = np.cos(theta), np.sin(theta) + x, z = x * c + z * s, z * c - x * s + u, v = x, y * C + z * S + return np.array([u, v]).swapaxes(0, 1) + 0.5 + + gui = ti.GUI("Implicit FEM") + while gui.running: + substep() + if gui.get_event(ti.GUI.PRESS): + if gui.event.key in [ti.GUI.ESCAPE, ti.GUI.EXIT]: + break + if gui.is_pressed("r"): + init() + gui.clear(0x000000) + gui.circles(T(F_x.to_numpy() / 3), radius=1.5, color=0xBA543A) + gui.show() + + +if __name__ == "__main__": + main() diff --git a/local_packages/taichi/examples/simulation/implicit_mass_spring.py b/local_packages/taichi/examples/simulation/implicit_mass_spring.py new file mode 100644 index 0000000..9f72132 --- /dev/null +++ b/local_packages/taichi/examples/simulation/implicit_mass_spring.py @@ -0,0 +1,293 @@ +# https://www.cs.cmu.edu/~baraff/papers/sig98.pdf +import argparse + +import numpy as np + +import taichi as ti + + +@ti.data_oriented +class Cloth: + def __init__(self, N): + self.N = N + self.NF = 2 * N**2 # number of faces + self.NV = (N + 1) ** 2 # number of vertices + self.NE = 2 * N * (N + 1) + 2 * N * N # numbser of edges + self.pos = ti.Vector.field(2, ti.f32, self.NV) + self.initPos = ti.Vector.field(2, ti.f32, self.NV) + self.vel = ti.Vector.field(2, ti.f32, self.NV) + self.force = ti.Vector.field(2, ti.f32, self.NV) + self.mass = ti.field(ti.f32, self.NV) + self.vel_1D = ti.ndarray(ti.f32, 2 * self.NV) + self.force_1D = ti.ndarray(ti.f32, 2 * self.NV) + self.b = ti.ndarray(ti.f32, 2 * self.NV) + + self.spring = ti.Vector.field(2, ti.i32, self.NE) + self.indices = ti.field(ti.i32, 2 * self.NE) + self.Jx = ti.Matrix.field(2, 2, ti.f32, self.NE) # Jacobian with respect to position + self.Jv = ti.Matrix.field(2, 2, ti.f32, self.NE) # Jacobian with respect to velocity + self.rest_len = ti.field(ti.f32, self.NE) + self.ks = 1000.0 # spring stiffness + self.kd = 0.5 # damping constant + self.kf = 1.0e5 # fix point stiffness + + self.gravity = ti.Vector([0.0, -2.0]) + self.init_pos() + self.init_edges() + self.MassBuilder = ti.linalg.SparseMatrixBuilder(2 * self.NV, 2 * self.NV, max_num_triplets=10000) + self.DBuilder = ti.linalg.SparseMatrixBuilder(2 * self.NV, 2 * self.NV, max_num_triplets=10000) + self.KBuilder = ti.linalg.SparseMatrixBuilder(2 * self.NV, 2 * self.NV, max_num_triplets=10000) + self.init_mass_sp(self.MassBuilder) + self.M = self.MassBuilder.build() + self.fix_vertex = [self.N, self.NV - 1] + self.Jf = ti.Matrix.field(2, 2, ti.f32, len(self.fix_vertex)) # fix constraint hessian + + @ti.kernel + def init_pos(self): + for i, j in ti.ndrange(self.N + 1, self.N + 1): + k = i * (self.N + 1) + j + self.pos[k] = ti.Vector([i, j]) / self.N * 0.5 + ti.Vector([0.25, 0.25]) + self.initPos[k] = self.pos[k] + self.vel[k] = ti.Vector([0, 0]) + self.mass[k] = 1.0 + + @ti.kernel + def init_edges(self): + pos, spring, N, rest_len = ti.static(self.pos, self.spring, self.N, self.rest_len) + for i, j in ti.ndrange(N + 1, N): + idx, idx1 = i * N + j, i * (N + 1) + j + spring[idx] = ti.Vector([idx1, idx1 + 1]) + rest_len[idx] = (pos[idx1] - pos[idx1 + 1]).norm() + start = N * (N + 1) + for i, j in ti.ndrange(N, N + 1): + idx, idx1, idx2 = ( + start + i + j * N, + i * (N + 1) + j, + i * (N + 1) + j + N + 1, + ) + spring[idx] = ti.Vector([idx1, idx2]) + rest_len[idx] = (pos[idx1] - pos[idx2]).norm() + start = 2 * N * (N + 1) + for i, j in ti.ndrange(N, N): + idx, idx1, idx2 = ( + start + i * N + j, + i * (N + 1) + j, + (i + 1) * (N + 1) + j + 1, + ) + spring[idx] = ti.Vector([idx1, idx2]) + rest_len[idx] = (pos[idx1] - pos[idx2]).norm() + start = 2 * N * (N + 1) + N * N + for i, j in ti.ndrange(N, N): + idx, idx1, idx2 = ( + start + i * N + j, + i * (N + 1) + j + 1, + (i + 1) * (N + 1) + j, + ) + spring[idx] = ti.Vector([idx1, idx2]) + rest_len[idx] = (pos[idx1] - pos[idx2]).norm() + + @ti.kernel + def init_mass_sp(self, M: ti.types.sparse_matrix_builder()): + for i in range(self.NV): + mass = self.mass[i] + M[2 * i + 0, 2 * i + 0] += mass + M[2 * i + 1, 2 * i + 1] += mass + + @ti.func + def clear_force(self): + for i in self.force: + self.force[i] = ti.Vector([0.0, 0.0]) + + @ti.kernel + def compute_force(self): + self.clear_force() + for i in self.force: + self.force[i] += self.gravity * self.mass[i] + + for i in self.spring: + idx1, idx2 = self.spring[i][0], self.spring[i][1] + pos1, pos2 = self.pos[idx1], self.pos[idx2] + dis = pos2 - pos1 + force = self.ks * (dis.norm() - self.rest_len[i]) * dis.normalized() + self.force[idx1] += force + self.force[idx2] -= force + # fix constraint gradient + self.force[self.N] += self.kf * (self.initPos[self.N] - self.pos[self.N]) + self.force[self.NV - 1] += self.kf * (self.initPos[self.NV - 1] - self.pos[self.NV - 1]) + + @ti.kernel + def compute_Jacobians(self): + for i in self.spring: + idx1, idx2 = self.spring[i][0], self.spring[i][1] + pos1, pos2 = self.pos[idx1], self.pos[idx2] + dx = pos1 - pos2 + I = ti.Matrix([[1.0, 0.0], [0.0, 1.0]]) + dxtdx = ti.Matrix([[dx[0] * dx[0], dx[0] * dx[1]], [dx[1] * dx[0], dx[1] * dx[1]]]) + l = dx.norm() + if l != 0.0: + l = 1.0 / l + self.Jx[i] = (I - self.rest_len[i] * l * (I - dxtdx * l**2)) * self.ks + self.Jv[i] = self.kd * I + + # fix point constraint hessian + self.Jf[0] = ti.Matrix([[-self.kf, 0], [0, -self.kf]]) + self.Jf[1] = ti.Matrix([[-self.kf, 0], [0, -self.kf]]) + + @ti.kernel + def assemble_K(self, K: ti.types.sparse_matrix_builder()): + for i in self.spring: + idx1, idx2 = self.spring[i][0], self.spring[i][1] + for m, n in ti.static(ti.ndrange(2, 2)): + K[2 * idx1 + m, 2 * idx1 + n] -= self.Jx[i][m, n] + K[2 * idx1 + m, 2 * idx2 + n] += self.Jx[i][m, n] + K[2 * idx2 + m, 2 * idx1 + n] += self.Jx[i][m, n] + K[2 * idx2 + m, 2 * idx2 + n] -= self.Jx[i][m, n] + for m, n in ti.static(ti.ndrange(2, 2)): + K[2 * self.N + m, 2 * self.N + n] += self.Jf[0][m, n] + K[2 * (self.NV - 1) + m, 2 * (self.NV - 1) + n] += self.Jf[1][m, n] + + @ti.kernel + def assemble_D(self, D: ti.types.sparse_matrix_builder()): + for i in self.spring: + idx1, idx2 = self.spring[i][0], self.spring[i][1] + for m, n in ti.static(ti.ndrange(2, 2)): + D[2 * idx1 + m, 2 * idx1 + n] -= self.Jv[i][m, n] + D[2 * idx1 + m, 2 * idx2 + n] += self.Jv[i][m, n] + D[2 * idx2 + m, 2 * idx1 + n] += self.Jv[i][m, n] + D[2 * idx2 + m, 2 * idx2 + n] -= self.Jv[i][m, n] + + @ti.kernel + def updatePosVel(self, h: ti.f32, dv: ti.types.ndarray()): + for i in self.pos: + self.vel[i] += ti.Vector([dv[2 * i], dv[2 * i + 1]]) + self.pos[i] += h * self.vel[i] + + @ti.kernel + def copy_to(self, des: ti.types.ndarray(), source: ti.template()): + for i in range(self.NV): + des[2 * i] = source[i][0] + des[2 * i + 1] = source[i][1] + + @ti.kernel + def compute_b( + self, + b: ti.types.ndarray(), + f: ti.types.ndarray(), + Kv: ti.types.ndarray(), + h: ti.f32, + ): + for i in range(2 * self.NV): + b[i] = (f[i] + Kv[i] * h) * h + + def update(self, h): + self.compute_force() + + self.compute_Jacobians() + # Assemble global system + + self.assemble_D(self.DBuilder) + D = self.DBuilder.build() + + self.assemble_K(self.KBuilder) + K = self.KBuilder.build() + + A = self.M - h * D - h**2 * K + + self.copy_to(self.vel_1D, self.vel) + self.copy_to(self.force_1D, self.force) + + # b = (force + h * K @ vel) * h + Kv = K @ self.vel_1D + self.compute_b(self.b, self.force_1D, Kv, h) + + # Sparse solver + solver = ti.linalg.SparseSolver(solver_type="LDLT") + solver.analyze_pattern(A) + solver.factorize(A) + # Solve the linear system + dv = solver.solve(self.b) + self.updatePosVel(h, dv) + + def display(self, gui, radius=5, color=0xFFFFFF): + lines = self.spring.to_numpy() + pos = self.pos.to_numpy() + edgeBegin = np.zeros(shape=(lines.shape[0], 2)) + edgeEnd = np.zeros(shape=(lines.shape[0], 2)) + for i in range(lines.shape[0]): + idx1, idx2 = lines[i][0], lines[i][1] + edgeBegin[i] = pos[idx1] + edgeEnd[i] = pos[idx2] + gui.lines(edgeBegin, edgeEnd, radius=2, color=0x0000FF) + gui.circles(self.pos.to_numpy(), radius, color) + + @ti.kernel + def spring2indices(self): + for i in self.spring: + self.indices[2 * i + 0] = self.spring[i][0] + self.indices[2 * i + 1] = self.spring[i][1] + + def displayGGUI(self, canvas, radius=0.01, color=(1.0, 1.0, 1.0)): + self.spring2indices() + canvas.lines(self.pos, width=0.005, indices=self.indices, color=(0.0, 0.0, 1.0)) + canvas.circles(self.pos, radius, color) + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument("-g", "--use-ggui", action="store_true", help="Display with GGUI") + parser.add_argument( + "-a", + "--arch", + required=False, + default="cpu", + dest="arch", + type=str, + help="The arch (backend) to run this example on", + ) + args, unknowns = parser.parse_known_args() + arch = args.arch + if arch in ["x64", "cpu", "arm64"]: + ti.init(arch=ti.cpu) + elif arch in ["cuda", "gpu"]: + ti.init(arch=ti.cuda) + else: + raise ValueError("Only CPU and CUDA backends are supported for now.") + + h = 0.01 + pause = False + cloth = Cloth(N=5) + + use_ggui = args.use_ggui + if not use_ggui: + gui = ti.GUI("Implicit Mass Spring System", res=(500, 500)) + while gui.running: + for e in gui.get_events(): + if e.key == gui.ESCAPE: + gui.running = False + elif e.key == gui.SPACE: + pause = not pause + + if not pause: + cloth.update(h) + + cloth.display(gui) + gui.show() + else: + window = ti.ui.Window("Implicit Mass Spring System", res=(500, 500)) + while window.running: + if window.get_event(ti.ui.PRESS): + if window.event.key == ti.ui.ESCAPE: + break + if window.is_pressed(ti.ui.SPACE): + pause = not pause + + if not pause: + cloth.update(h) + + canvas = window.get_canvas() + cloth.displayGGUI(canvas) + window.show() + + +if __name__ == "__main__": + main() diff --git a/local_packages/taichi/examples/simulation/initial_value_problem.py b/local_packages/taichi/examples/simulation/initial_value_problem.py new file mode 100644 index 0000000..2361b6a --- /dev/null +++ b/local_packages/taichi/examples/simulation/initial_value_problem.py @@ -0,0 +1,50 @@ +import time + +import numpy as np + +import taichi as ti + + +def init(): + a = [] + for i in np.linspace(0, 1, n, False): + for j in np.linspace(0, 1, n, False): + a.append([i, j]) + return np.array(a).astype(np.float32) + + +ti.init(arch=ti.gpu) +n = 50 +dirs = ti.field(dtype=float, shape=(n * n, 2)) +locations_np = init() + +locations = ti.field(dtype=float, shape=(n * n, 2)) +locations.from_numpy(locations_np) + + +@ti.kernel +def paint(t: float): + (o, p) = locations_np.shape + for i in range(0, o): # Parallelized over all pixels + x = locations[i, 0] + y = locations[i, 1] + dirs[i, 0] = ti.sin((t * x - y)) + dirs[i, 1] = ti.cos(t * y - x) + l = (dirs[i, 0] ** 2 + dirs[i, 1] ** 2) ** 0.5 + dirs[i, 0] /= l * 40 + dirs[i, 1] /= l * 40 + + +def main(): + gui = ti.GUI("Vector Field", res=(500, 500)) + + beginning = time.time_ns() + for k in range(1000000): + paint((time.time_ns() - beginning) * 0.00000001) + dirs_np = dirs.to_numpy() + gui.arrows(locations_np, dirs_np, radius=1) + gui.show() + + +if __name__ == "__main__": + main() diff --git a/local_packages/taichi/examples/simulation/karman_vortex_street.py b/local_packages/taichi/examples/simulation/karman_vortex_street.py new file mode 100644 index 0000000..b378af3 --- /dev/null +++ b/local_packages/taichi/examples/simulation/karman_vortex_street.py @@ -0,0 +1,177 @@ +# Fluid solver based on lattice boltzmann method using taichi language +# Author : Wang (hietwll@gmail.com) +# Original code at https://github.com/hietwll/LBM_Taichi + +import matplotlib +import numpy as np +from matplotlib import cm + +import taichi as ti +import taichi.math as tm + +ti.init(arch=ti.gpu) + +vec9 = ti.types.vector(9, float) +imat9x2 = ti.types.matrix(9, 2, int) + + +@ti.data_oriented +class lbm_solver: + def __init__( + self, + nx, # domain size + ny, + niu, # viscosity of fluid + bc_type, # [left,top,right,bottom] boundary conditions: 0 -> Dirichlet ; 1 -> Neumann + bc_value, # if bc_type = 0, we need to specify the velocity in bc_value + cy=0, # whether to place a cylindrical obstacle + cy_para=[0.0, 0.0, 0.0], # location and radius of the cylinder + ): + self.nx = nx # by convention, dx = dy = dt = 1.0 (lattice units) + self.ny = ny + self.niu = niu + self.tau = 3.0 * niu + 0.5 + self.inv_tau = 1.0 / self.tau + self.rho = ti.field(float, shape=(nx, ny)) + self.vel = ti.Vector.field(2, float, shape=(nx, ny)) + self.mask = ti.field(float, shape=(nx, ny)) + self.f_old = ti.Vector.field(9, float, shape=(nx, ny)) + self.f_new = ti.Vector.field(9, float, shape=(nx, ny)) + self.w = vec9(4, 1, 1, 1, 1, 1 / 4, 1 / 4, 1 / 4, 1 / 4) / 9.0 + self.e = imat9x2([0, 0], [1, 0], [0, 1], [-1, 0], [0, -1], [1, 1], [-1, 1], [-1, -1], [1, -1]) + self.bc_type = ti.field(int, 4) + self.bc_type.from_numpy(np.array(bc_type, dtype=np.int32)) + self.bc_value = ti.Vector.field(2, float, shape=4) + self.bc_value.from_numpy(np.array(bc_value, dtype=np.float32)) + self.cy = cy + self.cy_para = tm.vec3(cy_para) + + @ti.func # compute equilibrium distribution function + def f_eq(self, i, j): + eu = self.e @ self.vel[i, j] + uv = tm.dot(self.vel[i, j], self.vel[i, j]) + return self.w * self.rho[i, j] * (1 + 3 * eu + 4.5 * eu * eu - 1.5 * uv) + + @ti.kernel + def init(self): + self.vel.fill(0) + self.rho.fill(1) + self.mask.fill(0) + for i, j in self.rho: + self.f_old[i, j] = self.f_new[i, j] = self.f_eq(i, j) + if self.cy == 1: + if (i - self.cy_para[0]) ** 2 + (j - self.cy_para[1]) ** 2 <= self.cy_para[2] ** 2: + self.mask[i, j] = 1.0 + + @ti.kernel + def collide_and_stream(self): # lbm core equation + for i, j in ti.ndrange((1, self.nx - 1), (1, self.ny - 1)): + for k in ti.static(range(9)): + ip = i - self.e[k, 0] + jp = j - self.e[k, 1] + feq = self.f_eq(ip, jp) + self.f_new[i, j][k] = (1 - self.inv_tau) * self.f_old[ip, jp][k] + feq[k] * self.inv_tau + + @ti.kernel + def update_macro_var(self): # compute rho u v + for i, j in ti.ndrange((1, self.nx - 1), (1, self.ny - 1)): + self.rho[i, j] = 0 + self.vel[i, j] = 0, 0 + for k in ti.static(range(9)): + self.f_old[i, j][k] = self.f_new[i, j][k] + self.rho[i, j] += self.f_new[i, j][k] + self.vel[i, j] += tm.vec2(self.e[k, 0], self.e[k, 1]) * self.f_new[i, j][k] + + self.vel[i, j] /= self.rho[i, j] + + @ti.kernel + def apply_bc(self): # impose boundary conditions + # left and right + for j in range(1, self.ny - 1): + # left: dr = 0; ibc = 0; jbc = j; inb = 1; jnb = j + self.apply_bc_core(1, 0, 0, j, 1, j) + + # right: dr = 2; ibc = nx-1; jbc = j; inb = nx-2; jnb = j + self.apply_bc_core(1, 2, self.nx - 1, j, self.nx - 2, j) + + # top and bottom + for i in range(self.nx): + # top: dr = 1; ibc = i; jbc = ny-1; inb = i; jnb = ny-2 + self.apply_bc_core(1, 1, i, self.ny - 1, i, self.ny - 2) + + # bottom: dr = 3; ibc = i; jbc = 0; inb = i; jnb = 1 + self.apply_bc_core(1, 3, i, 0, i, 1) + + # cylindrical obstacle + # Note: for cuda backend, putting 'if statement' inside loops can be much faster! + for i, j in ti.ndrange(self.nx, self.ny): + if self.cy == 1 and self.mask[i, j] == 1: + self.vel[i, j] = 0, 0 # velocity is zero at solid boundary + inb = 0 + jnb = 0 + if i >= self.cy_para[0]: + inb = i + 1 + else: + inb = i - 1 + if j >= self.cy_para[1]: + jnb = j + 1 + else: + jnb = j - 1 + self.apply_bc_core(0, 0, i, j, inb, jnb) + + @ti.func + def apply_bc_core(self, outer, dr, ibc, jbc, inb, jnb): + if outer == 1: # handle outer boundary + if self.bc_type[dr] == 0: + self.vel[ibc, jbc] = self.bc_value[dr] + + elif self.bc_type[dr] == 1: + self.vel[ibc, jbc] = self.vel[inb, jnb] + + self.rho[ibc, jbc] = self.rho[inb, jnb] + self.f_old[ibc, jbc] = self.f_eq(ibc, jbc) - self.f_eq(inb, jnb) + self.f_old[inb, jnb] + + def solve(self): + gui = ti.GUI("Karman Vortex Street", (self.nx, 2 * self.ny)) + self.init() + while not gui.get_event(ti.GUI.ESCAPE, ti.GUI.EXIT): + for _ in range(10): + self.collide_and_stream() + self.update_macro_var() + self.apply_bc() + + ## code fragment displaying vorticity is contributed by woclass + vel = self.vel.to_numpy() + ugrad = np.gradient(vel[:, :, 0]) + vgrad = np.gradient(vel[:, :, 1]) + vor = ugrad[1] - vgrad[0] + vel_mag = (vel[:, :, 0] ** 2.0 + vel[:, :, 1] ** 2.0) ** 0.5 + ## color map + colors = [ + (1, 1, 0), + (0.953, 0.490, 0.016), + (0, 0, 0), + (0.176, 0.976, 0.529), + (0, 1, 1), + ] + my_cmap = matplotlib.colors.LinearSegmentedColormap.from_list("my_cmap", colors) + vor_img = cm.ScalarMappable(norm=matplotlib.colors.Normalize(vmin=-0.02, vmax=0.02), cmap=my_cmap).to_rgba( + vor + ) + vel_img = cm.plasma(vel_mag / 0.15) + img = np.concatenate((vor_img, vel_img), axis=1) + gui.set_image(img) + gui.show() + + +if __name__ == "__main__": + lbm = lbm_solver( + 801, + 201, + 0.01, + [0, 0, 1, 0], + [[0.1, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0]], + 1, + [160.0, 100.0, 20.0], + ) + lbm.solve() diff --git a/local_packages/taichi/examples/simulation/laplace_equation.py b/local_packages/taichi/examples/simulation/laplace_equation.py new file mode 100644 index 0000000..16fd2f9 --- /dev/null +++ b/local_packages/taichi/examples/simulation/laplace_equation.py @@ -0,0 +1,287 @@ +import taichi as ti +import taichi.math as tm + +ti.init() +vec2 = tm.vec2 + +# Author: bismarckkk +# 2D-Simulation of fundamental solutions of Laplace equation +# Refer to "Aerodynamics" Chapter 3 (ISBN:9787030582287) + +# Pressing left mouse button with 's/v/d' key will create new 'source/vortex/dipole' on cursor position, +# and pressing right mouse button will create negative one. +# Pressing left and right mouse buttons will delete element around cursor position. +# Pressing left/right mouse button without other keys will increase/decrease the intensity of element around cursor. + +screen = ( + 30, + 20, +) # density of point generation positions on the boundary, also decide window size +arrowField = [int(it / 2) for it in screen] # number of arrow in window +meshSpace = 20 # screen * meshSpace = windowSize +maxElements = 20 # the max number of each element(source/sink, vortex, dipole) +refillFrame = 20 # frame interval for each refill points +refillVelThresh = ( + 0.2 # points will be refilled when the absolute value of the velocity on boundary is greater than this value +) +V = vec2(1.0, 0) # the velocity of uniform stream +dt = 0.002 +fadeMax = 10 # frames of fade in/out +fade = fadeMax # control arrows' fade in and fade out +maxPoints = screen[0] * screen[1] * 2 +gui_ = ti.GUI("demo", tuple(it * meshSpace for it in screen)) +guiHeight = meshSpace * screen[1] + +points = ti.Vector.field(2, float, maxPoints) +sources = ti.Struct.field({"pos": vec2, "q": ti.f32}, shape=maxElements) +vortexes = ti.Struct.field({"pos": vec2, "q": ti.f32}, shape=maxElements) +dipoles = ti.Struct.field({"pos": vec2, "m": ti.f32}, shape=maxElements) +arrows = ti.Struct.field({"base": vec2, "dir": vec2, "vel": ti.f32}, shape=arrowField) +points.fill(-100) + + +def initPoints(): + for x, y in ti.ndrange(*arrowField): + arrows[x, y].base = vec2( + (x + 1) * (1 - 1 / arrowField[0]) / arrowField[0], + (y + 1) * (1 - 1 / arrowField[1]) / arrowField[1], + ) + dipoles[0].pos = vec2(0.5, 0.5) + dipoles[0].m = 0.01 + vortexes[0].pos = vec2(0.5, 0.5) + vortexes[0].q = -0.5 + + +@ti.func +def getVel(pos): + vel = V # add uniform stream velocity + for i in range(maxElements): + # add source/sink velocity + uv = pos - sources[i].pos + uv[0] *= screen[1] / screen[0] + vel += uv * sources[i].q / (2 * tm.pi * (uv[0] ** 2 + uv[1] ** 2)) + # add vortex velocity + uv = pos - vortexes[i].pos + uv = vec2(-uv[1], uv[0]) + uv[0] *= screen[1] / screen[0] + vel += uv * vortexes[i].q / (2 * tm.pi * (uv[0] ** 2 + uv[1] ** 2)) + # add dipole velocity + uv = pos - dipoles[i].pos + uv[0] *= screen[1] / screen[0] + vel_t = vec2(uv[1] ** 2 - uv[0] ** 2, -2 * uv[0] * uv[1]) + vel += vel_t * dipoles[i].m / (uv[0] ** 2 + uv[1] ** 2) ** 2 + return vel + + +# @param.boundary: 0: left, 1: bottom, 2: right, 3: top +# @param.index: last access point index +@ti.func +def refillPointsOnOneBoundary(boundary, index): + pointsNumber = screen[0] + if boundary == 0 or boundary == 2: + pointsNumber = screen[1] + for i in range(pointsNumber): + found = False + for _ in range(maxPoints): + if points[index][0] == -100: + found = True + if boundary == 0: + points[index] = vec2(-dt * refillVelThresh, (i + 0.5) / screen[1]) + elif boundary == 1: + points[index] = vec2((i + 0.5) / screen[0], -dt * refillVelThresh) + elif boundary == 2: + points[index] = vec2(1 + dt * refillVelThresh, (i + 0.5) / screen[1]) + elif boundary == 3: + points[index] = vec2((i + 0.5) / screen[0], 1 + dt * refillVelThresh) + break + index += 1 + if index >= maxPoints: + index = 0 + if not found: + break + return index + + +@ti.kernel +def refillPoints(): + # traverse positions and refill points on the boundary. + # if the normal velocity is less than thresh, refill point will be deleted when update. + index = 0 + found = True + ti.loop_config(serialize=True) + for i in range(4): + _index = refillPointsOnOneBoundary(i, index) + if _index == index: + break + else: + index = _index + + +@ti.kernel +def updatePoints(): + for i in points: + if points[i][0] != -100: + vel = getVel(points[i]) + points[i] += vel * dt + if not (0 <= points[i][0] <= 1 and 0 <= points[i][1] <= 1): + points[i] = vec2(-100, -100) + for j in range(maxElements): + if sources[j].q < 0 and tm.length(points[i] - sources[j].pos) < 0.025: + points[i] = vec2(-100, -100) + if dipoles[j].m != 0 and tm.length(points[i] - dipoles[j].pos) < 0.05: + points[i] = vec2(-100, -100) + + +@ti.kernel +def updateArrows(): + for x, y in ti.ndrange(*arrowField): + vel = getVel(arrows[x, y].base) + arrows[x, y].vel = vel.norm() + arrows[x, y].dir = vel / vel.norm() / meshSpace / 1.5 + + +def drawArrows(_gui): + global fade + if fade < fadeMax: + fade += 1 + if fade == 0: + updateArrows() # after fade out, update arrow filed + else: + arr = arrows.to_numpy() + vel = arr["vel"].reshape(1, -1)[0] + vel = (vel / vel.max() * 0xDD + 0x11) * (abs(fade / fadeMax)) + mean = vel.mean() + if mean > 0x7F: + vel /= mean / 0x7F # make uniform stream more beautiful + vel = vel.astype(int) + vel *= 2**16 + 2**8 + 1 + _gui.arrows( + arr["base"].reshape(arr["base"].shape[0] * arr["base"].shape[1], 2), + arr["dir"].reshape(arr["dir"].shape[0] * arr["dir"].shape[1], 2), + radius=1.5, + color=vel, + ) + + +def drawMark(_gui, _frame): + triangleTrans = [ + vec2(0, 1) / guiHeight, + vec2(tm.cos(7.0 / 6.0 * tm.pi), tm.sin(7.0 / 6.0 * tm.pi)) / guiHeight, + vec2(tm.cos(-1.0 / 6.0 * tm.pi), tm.sin(-1.0 / 6.0 * tm.pi)) / guiHeight, + ] + rectTrans = [ + vec2(1 * screen[1] / screen[0], 1) / guiHeight, + vec2(-1 * screen[1] / screen[0], -1) / guiHeight, + ] + for i in range(maxElements): + if dipoles[i].m > 0: + _gui.circle(dipoles[i].pos, 0xFFFDC0, dipoles[i].m * 2000) + elif dipoles[i].m < 0: + _gui.circle(dipoles[i].pos, 0xD25565, dipoles[i].m * -2000) + if sources[i].q > 0: + _gui.rect( + sources[i].pos + rectTrans[0] * 2 * sources[i].q, + sources[i].pos + rectTrans[1] * 2 * sources[i].q, + 2 * sources[i].q + 1, + 0xFFFDC0, + ) + elif sources[i].q < 0: + _gui.rect( + sources[i].pos + rectTrans[0] * 2 * sources[i].q, + sources[i].pos + rectTrans[1] * 2 * sources[i].q, + -2 * sources[i].q + 1, + 0xD25565, + ) + if vortexes[i].q != 0: + theta = vortexes[i].q * dt * 40 * _frame + ct, st = ti.cos(theta), ti.sin(theta) + rotateMatrix = ti.Matrix([[ct, -st], [st, ct]]) + trans = [rotateMatrix @ it for it in triangleTrans] + for it in trans: + it[0] *= screen[1] / screen[0] + _gui.triangle( + vortexes[i].pos + trans[0] * 16, + vortexes[i].pos + trans[1] * 16, + vortexes[i].pos + trans[2] * 16, + 0xD25565, + ) + + +def processGuiEvent(_gui): + global fade + while _gui.get_event((ti.GUI.PRESS, ti.GUI.LMB), (ti.GUI.PRESS, ti.GUI.RMB)): + if _gui.is_pressed(ti.GUI.LMB) and _gui.is_pressed(ti.GUI.RMB): # process delete event + for i in range(maxElements): + if sources[i].q != 0 and (sources[i].pos - vec2(*_gui.get_cursor_pos())).norm() < 5 / guiHeight: + sources[i].q = 0 + if vortexes[i].q != 0 and (vortexes[i].pos - vec2(*_gui.get_cursor_pos())).norm() < 5 / guiHeight: + vortexes[i].q = 0 + if dipoles[i].m != 0 and (dipoles[i].pos - vec2(*_gui.get_cursor_pos())).norm() < 5 / guiHeight: + dipoles[i].m = 0 + else: + if _gui.is_pressed("s"): # process add source/sink event + for i in range(maxElements): + if sources[i].q == 0: + if _gui.is_pressed(ti.GUI.RMB): + sources[i].q -= 1 + else: + sources[i].q += 1 + sources[i].pos = vec2(*_gui.get_cursor_pos()) + break + elif _gui.is_pressed("v"): # process add vortex event + for i in range(maxElements): + if vortexes[i].q == 0: + if _gui.is_pressed(ti.GUI.RMB): + vortexes[i].q -= 0.5 + else: + vortexes[i].q += 0.5 + vortexes[i].pos = vec2(*_gui.get_cursor_pos()) + break + elif _gui.is_pressed("d"): # process add dipole event + for i in range(maxElements): + if dipoles[i].m == 0: + if _gui.is_pressed(ti.GUI.RMB): + dipoles[i].m -= 0.01 + else: + dipoles[i].m += 0.01 + dipoles[i].pos = vec2(*_gui.get_cursor_pos()) + break + else: + for i in range(maxElements): # process increase/decrease event + if sources[i].q != 0 and (sources[i].pos - vec2(*_gui.get_cursor_pos())).norm() < 5 / guiHeight: + if _gui.is_pressed(ti.GUI.RMB): + sources[i].q -= 0.5 * int((sources[i].q >= 0.0) - (sources[i].q <= 0.0)) + else: + sources[i].q += 0.5 * int((sources[i].q >= 0.0) - (sources[i].q <= 0.0)) + if vortexes[i].q != 0 and (vortexes[i].pos - vec2(*_gui.get_cursor_pos())).norm() < 5 / guiHeight: + if _gui.is_pressed(ti.GUI.RMB): + vortexes[i].q -= 0.1 * int((vortexes[i].q >= 0.0) - (vortexes[i].q <= 0.0)) + else: + vortexes[i].q += 0.1 * int((vortexes[i].q >= 0.0) - (vortexes[i].q <= 0.0)) + if dipoles[i].m != 0 and (dipoles[i].pos - vec2(*_gui.get_cursor_pos())).norm() < 5 / guiHeight: + if _gui.is_pressed(ti.GUI.RMB): + dipoles[i].m -= 0.001 * int((dipoles[i].m >= 0.0) - (dipoles[i].m <= 0.0)) + else: + dipoles[i].m += 0.001 * int((dipoles[i].m >= 0.0) - (dipoles[i].m <= 0.0)) + fade = -abs(fade) # fade out arrow field + + +if __name__ == "__main__": + initPoints() + updateArrows() + refillPoints() + refillCount = 0 + frame_count = 0 + while gui_.running: + processGuiEvent(gui_) + drawArrows(gui_) + updatePoints() + ps = points.to_numpy() + gui_.circles(ps, 3, color=0x2E94B9) + if refillCount > refillFrame: + refillCount = 0 + refillPoints() + drawMark(gui_, frame_count) + gui_.show() + refillCount += 1 + frame_count += 1 diff --git a/local_packages/taichi/examples/simulation/mandelbrot_zoom.py b/local_packages/taichi/examples/simulation/mandelbrot_zoom.py new file mode 100644 index 0000000..8943ea4 --- /dev/null +++ b/local_packages/taichi/examples/simulation/mandelbrot_zoom.py @@ -0,0 +1,55 @@ +import taichi as ti +from taichi.math import cmul, dot, log2, vec2, vec3 + +ti.init(arch=ti.gpu) + +MAXITERS = 100 +width, height = 800, 640 +pixels = ti.Vector.field(3, ti.f32, shape=(width, height)) + + +@ti.func +def setcolor(z, i): + v = log2(i + 1 - log2(log2(z.norm()))) / 5 + col = vec3(0.0) + if v < 1.0: + col = vec3(v**4, v**2.5, v) + else: + v = ti.max(0.0, 2 - v) + col = vec3(v, v**1.5, v**3) + return col + + +@ti.kernel +def render(time: ti.f32): + zoo = 0.64 + 0.36 * ti.cos(0.02 * time) + zoo = ti.pow(zoo, 8.0) + ca = ti.cos(0.15 * (1.0 - zoo) * time) + sa = ti.sin(0.15 * (1.0 - zoo) * time) + for i, j in pixels: + c = 2.0 * vec2(i, j) / height - vec2(1) + # c *= 1.16 + xy = vec2(c.x * ca - c.y * sa, c.x * sa + c.y * ca) + c = vec2(-0.745, 0.186) + xy * zoo + z = vec2(0.0) + count = 0.0 + while count < MAXITERS and dot(z, z) < 50: + z = cmul(z, z) + c + count += 1.0 + + if count == MAXITERS: + pixels[i, j] = [0, 0, 0] + else: + pixels[i, j] = setcolor(z, count) + + +def main(): + gui = ti.GUI("Mandelbrot set zoom", res=(width, height)) + for i in range(100000): + render(i * 0.03) + gui.set_image(pixels) + gui.show() + + +if __name__ == "__main__": + main() diff --git a/local_packages/taichi/examples/simulation/mass_spring_game.py b/local_packages/taichi/examples/simulation/mass_spring_game.py new file mode 100644 index 0000000..8da3c6b --- /dev/null +++ b/local_packages/taichi/examples/simulation/mass_spring_game.py @@ -0,0 +1,184 @@ +# Tutorials (Chinese): +# - https://www.bilibili.com/video/BV1UK4y177iH +# - https://www.bilibili.com/video/BV1DK411A771 + +import taichi as ti + +ti.init(arch=ti.cpu) + +spring_Y = ti.field(dtype=ti.f32, shape=()) # Young's modulus +paused = ti.field(dtype=ti.i32, shape=()) +drag_damping = ti.field(dtype=ti.f32, shape=()) +dashpot_damping = ti.field(dtype=ti.f32, shape=()) + +max_num_particles = 1024 +particle_mass = 1.0 +dt = 1e-3 +substeps = 10 + +num_particles = ti.field(dtype=ti.i32, shape=()) +x = ti.Vector.field(2, dtype=ti.f32, shape=max_num_particles) +v = ti.Vector.field(2, dtype=ti.f32, shape=max_num_particles) +f = ti.Vector.field(2, dtype=ti.f32, shape=max_num_particles) +fixed = ti.field(dtype=ti.i32, shape=max_num_particles) + +# rest_length[i, j] == 0 means i and j are NOT connected +rest_length = ti.field(dtype=ti.f32, shape=(max_num_particles, max_num_particles)) + + +@ti.kernel +def substep(): + n = num_particles[None] + + # Compute force + for i in range(n): + # Gravity + f[i] = ti.Vector([0, -9.8]) * particle_mass + for j in range(n): + if rest_length[i, j] != 0: + x_ij = x[i] - x[j] + d = x_ij.normalized() + + # Spring force + f[i] += -spring_Y[None] * (x_ij.norm() / rest_length[i, j] - 1) * d + + # Dashpot damping + v_rel = (v[i] - v[j]).dot(d) + f[i] += -dashpot_damping[None] * v_rel * d + + # We use a semi-implicit Euler (aka symplectic Euler) time integrator + for i in range(n): + if not fixed[i]: + v[i] += dt * f[i] / particle_mass + v[i] *= ti.exp(-dt * drag_damping[None]) # Drag damping + + x[i] += v[i] * dt + else: + v[i] = ti.Vector([0, 0]) + + # Collide with four walls + for d in ti.static(range(2)): + # d = 0: treating X (horizontal) component + # d = 1: treating Y (vertical) component + + if x[i][d] < 0: # Bottom and left + x[i][d] = 0 # move particle inside + v[i][d] = 0 # stop it from moving further + + if x[i][d] > 1: # Top and right + x[i][d] = 1 # move particle inside + v[i][d] = 0 # stop it from moving further + + +@ti.kernel +def new_particle(pos_x: ti.f32, pos_y: ti.f32, fixed_: ti.i32): + # Taichi doesn't support using vectors as kernel arguments yet, so we pass scalars + new_particle_id = num_particles[None] + x[new_particle_id] = [pos_x, pos_y] + v[new_particle_id] = [0, 0] + fixed[new_particle_id] = fixed_ + num_particles[None] += 1 + + # Connect with existing particles + for i in range(new_particle_id): + dist = (x[new_particle_id] - x[i]).norm() + connection_radius = 0.15 + if dist < connection_radius: + # Connect the new particle with particle i + rest_length[i, new_particle_id] = 0.1 + rest_length[new_particle_id, i] = 0.1 + + +@ti.kernel +def attract(pos_x: ti.f32, pos_y: ti.f32): + for i in range(num_particles[None]): + p = ti.Vector([pos_x, pos_y]) + v[i] += -dt * substeps * (x[i] - p) * 100 + + +def main(): + gui = ti.GUI("Explicit Mass Spring System", res=(512, 512), background_color=0xDDDDDD) + + spring_Y[None] = 1000 + drag_damping[None] = 1 + dashpot_damping[None] = 100 + + new_particle(0.3, 0.3, False) + new_particle(0.3, 0.4, False) + new_particle(0.4, 0.4, False) + + while True: + for e in gui.get_events(ti.GUI.PRESS): + if e.key in [ti.GUI.ESCAPE, ti.GUI.EXIT]: + exit() + elif e.key == gui.SPACE: + paused[None] = not paused[None] + elif e.key == ti.GUI.LMB: + new_particle(e.pos[0], e.pos[1], int(gui.is_pressed(ti.GUI.SHIFT))) + elif e.key == "c": + num_particles[None] = 0 + rest_length.fill(0) + elif e.key == "y": + if gui.is_pressed("Shift"): + spring_Y[None] /= 1.1 + else: + spring_Y[None] *= 1.1 + elif e.key == "d": + if gui.is_pressed("Shift"): + drag_damping[None] /= 1.1 + else: + drag_damping[None] *= 1.1 + elif e.key == "x": + if gui.is_pressed("Shift"): + dashpot_damping[None] /= 1.1 + else: + dashpot_damping[None] *= 1.1 + + if gui.is_pressed(ti.GUI.RMB): + cursor_pos = gui.get_cursor_pos() + attract(cursor_pos[0], cursor_pos[1]) + + if not paused[None]: + for step in range(substeps): + substep() + + X = x.to_numpy() + n = num_particles[None] + + # Draw the springs + for i in range(n): + for j in range(i + 1, n): + if rest_length[i, j] != 0: + gui.line(begin=X[i], end=X[j], radius=2, color=0x444444) + + # Draw the particles + for i in range(n): + c = 0xFF0000 if fixed[i] else 0x111111 + gui.circle(pos=X[i], color=c, radius=5) + + gui.text( + content="Left click: add mass point (with shift to fix); Right click: attract", + pos=(0, 0.99), + color=0x0, + ) + gui.text(content="C: clear all; Space: pause", pos=(0, 0.95), color=0x0) + gui.text( + content=f"Y: Spring Young's modulus {spring_Y[None]:.1f}", + pos=(0, 0.9), + color=0x0, + ) + gui.text( + content=f"D: Drag damping {drag_damping[None]:.2f}", + pos=(0, 0.85), + color=0x0, + ) + gui.text( + content=f"X: Dashpot damping {dashpot_damping[None]:.2f}", + pos=(0, 0.8), + color=0x0, + ) + gui.show() + + +if __name__ == "__main__": + main() diff --git a/local_packages/taichi/examples/simulation/mpm128.py b/local_packages/taichi/examples/simulation/mpm128.py new file mode 100644 index 0000000..83b013d --- /dev/null +++ b/local_packages/taichi/examples/simulation/mpm128.py @@ -0,0 +1,159 @@ +import taichi as ti + +ti.init(arch=ti.gpu) # Try to run on GPU + +quality = 1 # Use a larger value for higher-res simulations +n_particles, n_grid = 9000 * quality**2, 128 * quality +dx, inv_dx = 1 / n_grid, float(n_grid) +dt = 1e-4 / quality +p_vol, p_rho = (dx * 0.5) ** 2, 1 +p_mass = p_vol * p_rho +E, nu = 5e3, 0.2 # Young's modulus and Poisson's ratio +mu_0, lambda_0 = E / (2 * (1 + nu)), E * nu / ((1 + nu) * (1 - 2 * nu)) # Lame parameters + +x = ti.Vector.field(2, dtype=float, shape=n_particles) # position +v = ti.Vector.field(2, dtype=float, shape=n_particles) # velocity +C = ti.Matrix.field(2, 2, dtype=float, shape=n_particles) # affine velocity field +F = ti.Matrix.field(2, 2, dtype=float, shape=n_particles) # deformation gradient +material = ti.field(dtype=int, shape=n_particles) # material id +Jp = ti.field(dtype=float, shape=n_particles) # plastic deformation +grid_v = ti.Vector.field(2, dtype=float, shape=(n_grid, n_grid)) # grid node momentum/velocity +grid_m = ti.field(dtype=float, shape=(n_grid, n_grid)) # grid node mass +gravity = ti.Vector.field(2, dtype=float, shape=()) +attractor_strength = ti.field(dtype=float, shape=()) +attractor_pos = ti.Vector.field(2, dtype=float, shape=()) + + +@ti.kernel +def substep(): + for i, j in grid_m: + grid_v[i, j] = [0, 0] + grid_m[i, j] = 0 + for p in x: # Particle state update and scatter to grid (P2G) + base = (x[p] * inv_dx - 0.5).cast(int) + fx = x[p] * inv_dx - base.cast(float) + # Quadratic kernels [http://mpm.graphics Eqn. 123, with x=fx, fx-1,fx-2] + w = [0.5 * (1.5 - fx) ** 2, 0.75 - (fx - 1) ** 2, 0.5 * (fx - 0.5) ** 2] + # deformation gradient update + F[p] = (ti.Matrix.identity(float, 2) + dt * C[p]) @ F[p] + # Hardening coefficient: snow gets harder when compressed + h = ti.max(0.1, ti.min(5, ti.exp(10 * (1.0 - Jp[p])))) + if material[p] == 1: # jelly, make it softer + h = 0.3 + mu, la = mu_0 * h, lambda_0 * h + if material[p] == 0: # liquid + mu = 0.0 + U, sig, V = ti.svd(F[p]) + J = 1.0 + for d in ti.static(range(2)): + new_sig = sig[d, d] + if material[p] == 2: # Snow + new_sig = min(max(sig[d, d], 1 - 2.5e-2), 1 + 4.5e-3) # Plasticity + Jp[p] *= sig[d, d] / new_sig + sig[d, d] = new_sig + J *= new_sig + if material[p] == 0: + # Reset deformation gradient to avoid numerical instability + F[p] = ti.Matrix.identity(float, 2) * ti.sqrt(J) + elif material[p] == 2: + # Reconstruct elastic deformation gradient after plasticity + F[p] = U @ sig @ V.transpose() + stress = 2 * mu * (F[p] - U @ V.transpose()) @ F[p].transpose() + ti.Matrix.identity(float, 2) * la * J * ( + J - 1 + ) + stress = (-dt * p_vol * 4 * inv_dx * inv_dx) * stress + affine = stress + p_mass * C[p] + for i, j in ti.static(ti.ndrange(3, 3)): + # Loop over 3x3 grid node neighborhood + offset = ti.Vector([i, j]) + dpos = (offset.cast(float) - fx) * dx + weight = w[i][0] * w[j][1] + grid_v[base + offset] += weight * (p_mass * v[p] + affine @ dpos) + grid_m[base + offset] += weight * p_mass + for i, j in grid_m: + if grid_m[i, j] > 0: # No need for epsilon here + # Momentum to velocity + grid_v[i, j] = (1 / grid_m[i, j]) * grid_v[i, j] + grid_v[i, j] += dt * gravity[None] * 30 # gravity + dist = attractor_pos[None] - dx * ti.Vector([i, j]) + grid_v[i, j] += dist / (0.01 + dist.norm()) * attractor_strength[None] * dt * 100 + if i < 3 and grid_v[i, j][0] < 0: + grid_v[i, j][0] = 0 # Boundary conditions + if i > n_grid - 3 and grid_v[i, j][0] > 0: + grid_v[i, j][0] = 0 + if j < 3 and grid_v[i, j][1] < 0: + grid_v[i, j][1] = 0 + if j > n_grid - 3 and grid_v[i, j][1] > 0: + grid_v[i, j][1] = 0 + for p in x: # grid to particle (G2P) + base = (x[p] * inv_dx - 0.5).cast(int) + fx = x[p] * inv_dx - base.cast(float) + w = [0.5 * (1.5 - fx) ** 2, 0.75 - (fx - 1.0) ** 2, 0.5 * (fx - 0.5) ** 2] + new_v = ti.Vector.zero(float, 2) + new_C = ti.Matrix.zero(float, 2, 2) + for i, j in ti.static(ti.ndrange(3, 3)): + # loop over 3x3 grid node neighborhood + dpos = ti.Vector([i, j]).cast(float) - fx + g_v = grid_v[base + ti.Vector([i, j])] + weight = w[i][0] * w[j][1] + new_v += weight * g_v + new_C += 4 * inv_dx * weight * g_v.outer_product(dpos) + v[p], C[p] = new_v, new_C + x[p] += dt * v[p] # advection + + +@ti.kernel +def reset(): + group_size = n_particles // 3 + for i in range(n_particles): + x[i] = [ + ti.random() * 0.2 + 0.3 + 0.10 * (i // group_size), + ti.random() * 0.2 + 0.05 + 0.32 * (i // group_size), + ] + material[i] = i // group_size # 0: fluid 1: jelly 2: snow + v[i] = [0, 0] + F[i] = ti.Matrix([[1, 0], [0, 1]]) + Jp[i] = 1 + C[i] = ti.Matrix.zero(float, 2, 2) + + +print("[Hint] Use WSAD/arrow keys to control gravity. Use left/right mouse buttons to attract/repel. Press R to reset.") +gui = ti.GUI("Taichi MLS-MPM-128", res=512, background_color=0x112F41) +reset() +gravity[None] = [0, -1] + +for frame in range(20000): + if gui.get_event(ti.GUI.PRESS): + if gui.event.key == "r": + reset() + elif gui.event.key in [ti.GUI.ESCAPE, ti.GUI.EXIT]: + break + if gui.event is not None: + gravity[None] = [0, 0] # if had any event + if gui.is_pressed(ti.GUI.LEFT, "a"): + gravity[None][0] = -1 + if gui.is_pressed(ti.GUI.RIGHT, "d"): + gravity[None][0] = 1 + if gui.is_pressed(ti.GUI.UP, "w"): + gravity[None][1] = 1 + if gui.is_pressed(ti.GUI.DOWN, "s"): + gravity[None][1] = -1 + mouse = gui.get_cursor_pos() + gui.circle((mouse[0], mouse[1]), color=0x336699, radius=15) + attractor_pos[None] = [mouse[0], mouse[1]] + attractor_strength[None] = 0 + if gui.is_pressed(ti.GUI.LMB): + attractor_strength[None] = 1 + if gui.is_pressed(ti.GUI.RMB): + attractor_strength[None] = -1 + for s in range(int(2e-3 // dt)): + substep() + gui.circles( + x.to_numpy(), + radius=1.5, + palette=[0x068587, 0xED553B, 0xEEEEF0], + palette_indices=material, + ) + + # Change to gui.show(f'{frame:06d}.png') to write images to disk + gui.show() diff --git a/local_packages/taichi/examples/simulation/mpm3d.py b/local_packages/taichi/examples/simulation/mpm3d.py new file mode 100644 index 0000000..d1a1e2b --- /dev/null +++ b/local_packages/taichi/examples/simulation/mpm3d.py @@ -0,0 +1,122 @@ +export_file = "" # use '/tmp/mpm3d.ply' for exporting result to disk + +import numpy as np + +import taichi as ti + +ti.init(arch=ti.gpu) + +# dim, n_grid, steps, dt = 2, 128, 20, 2e-4 +# dim, n_grid, steps, dt = 2, 256, 32, 1e-4 +dim, n_grid, steps, dt = 3, 32, 25, 4e-4 +# dim, n_grid, steps, dt = 3, 64, 25, 2e-4 +# dim, n_grid, steps, dt = 3, 128, 25, 8e-5 + +n_particles = n_grid**dim // 2 ** (dim - 1) +dx = 1 / n_grid + +p_rho = 1 +p_vol = (dx * 0.5) ** 2 +p_mass = p_vol * p_rho +gravity = 9.8 +bound = 3 +E = 400 + +F_x = ti.Vector.field(dim, float, n_particles) +F_v = ti.Vector.field(dim, float, n_particles) +F_C = ti.Matrix.field(dim, dim, float, n_particles) +F_J = ti.field(float, n_particles) + +F_grid_v = ti.Vector.field(dim, float, (n_grid,) * dim) +F_grid_m = ti.field(float, (n_grid,) * dim) + +neighbour = (3,) * dim + + +@ti.kernel +def substep(): + for I in ti.grouped(F_grid_m): + F_grid_v[I] = ti.zero(F_grid_v[I]) + F_grid_m[I] = 0 + ti.loop_config(block_dim=n_grid) + for p in F_x: + Xp = F_x[p] / dx + base = int(Xp - 0.5) + fx = Xp - base + w = [0.5 * (1.5 - fx) ** 2, 0.75 - (fx - 1) ** 2, 0.5 * (fx - 0.5) ** 2] + stress = -dt * 4 * E * p_vol * (F_J[p] - 1) / dx**2 + affine = ti.Matrix.identity(float, dim) * stress + p_mass * F_C[p] + for offset in ti.static(ti.grouped(ti.ndrange(*neighbour))): + dpos = (offset - fx) * dx + weight = 1.0 + for i in ti.static(range(dim)): + weight *= w[offset[i]][i] + F_grid_v[base + offset] += weight * (p_mass * F_v[p] + affine @ dpos) + F_grid_m[base + offset] += weight * p_mass + for I in ti.grouped(F_grid_m): + if F_grid_m[I] > 0: + F_grid_v[I] /= F_grid_m[I] + F_grid_v[I][1] -= dt * gravity + cond = (I < bound) & (F_grid_v[I] < 0) | (I > n_grid - bound) & (F_grid_v[I] > 0) + F_grid_v[I] = ti.select(cond, 0, F_grid_v[I]) + ti.loop_config(block_dim=n_grid) + for p in F_x: + Xp = F_x[p] / dx + base = int(Xp - 0.5) + fx = Xp - base + w = [0.5 * (1.5 - fx) ** 2, 0.75 - (fx - 1) ** 2, 0.5 * (fx - 0.5) ** 2] + new_v = ti.zero(F_v[p]) + new_C = ti.zero(F_C[p]) + for offset in ti.static(ti.grouped(ti.ndrange(*neighbour))): + dpos = (offset - fx) * dx + weight = 1.0 + for i in ti.static(range(dim)): + weight *= w[offset[i]][i] + g_v = F_grid_v[base + offset] + new_v += weight * g_v + new_C += 4 * weight * g_v.outer_product(dpos) / dx**2 + F_v[p] = new_v + F_x[p] += dt * F_v[p] + F_J[p] *= 1 + dt * new_C.trace() + F_C[p] = new_C + + +@ti.kernel +def init(): + for i in range(n_particles): + F_x[i] = ti.Vector([ti.random() for i in range(dim)]) * 0.4 + 0.15 + F_J[i] = 1 + + +def T(a): + if dim == 2: + return a + + phi, theta = np.radians(28), np.radians(32) + + a = a - 0.5 + x, y, z = a[:, 0], a[:, 1], a[:, 2] + cp, sp = np.cos(phi), np.sin(phi) + ct, st = np.cos(theta), np.sin(theta) + x, z = x * cp + z * sp, z * cp - x * sp + u, v = x, y * ct + z * st + return np.array([u, v]).swapaxes(0, 1) + 0.5 + + +def main(): + init() + gui = ti.GUI("MPM3D", background_color=0x112F41) + while gui.running and not gui.get_event(gui.ESCAPE): + for s in range(steps): + substep() + pos = F_x.to_numpy() + if export_file: + writer = ti.tools.PLYWriter(num_vertices=n_particles) + writer.add_vertex_pos(pos[:, 0], pos[:, 1], pos[:, 2]) + writer.export_frame(gui.frame, export_file) + gui.circles(T(pos), radius=1.5, color=0x66CCFF) + gui.show() + + +if __name__ == "__main__": + main() diff --git a/local_packages/taichi/examples/simulation/mpm88.py b/local_packages/taichi/examples/simulation/mpm88.py new file mode 100644 index 0000000..875d538 --- /dev/null +++ b/local_packages/taichi/examples/simulation/mpm88.py @@ -0,0 +1,92 @@ +# MPM-MLS in 88 lines of Taichi code, originally created by @yuanming-hu +import taichi as ti + +ti.init(arch=ti.gpu) + +n_particles = 8192 +n_grid = 128 +dx = 1 / n_grid +dt = 2e-4 + +p_rho = 1 +p_vol = (dx * 0.5) ** 2 +p_mass = p_vol * p_rho +gravity = 9.8 +bound = 3 +E = 400 + +x = ti.Vector.field(2, float, n_particles) +v = ti.Vector.field(2, float, n_particles) +C = ti.Matrix.field(2, 2, float, n_particles) +J = ti.field(float, n_particles) + +grid_v = ti.Vector.field(2, float, (n_grid, n_grid)) +grid_m = ti.field(float, (n_grid, n_grid)) + + +@ti.kernel +def substep(): + for i, j in grid_m: + grid_v[i, j] = [0, 0] + grid_m[i, j] = 0 + for p in x: + Xp = x[p] / dx + base = int(Xp - 0.5) + fx = Xp - base + w = [0.5 * (1.5 - fx) ** 2, 0.75 - (fx - 1) ** 2, 0.5 * (fx - 0.5) ** 2] + stress = -dt * 4 * E * p_vol * (J[p] - 1) / dx**2 + affine = ti.Matrix([[stress, 0], [0, stress]]) + p_mass * C[p] + for i, j in ti.static(ti.ndrange(3, 3)): + offset = ti.Vector([i, j]) + dpos = (offset - fx) * dx + weight = w[i].x * w[j].y + grid_v[base + offset] += weight * (p_mass * v[p] + affine @ dpos) + grid_m[base + offset] += weight * p_mass + for i, j in grid_m: + if grid_m[i, j] > 0: + grid_v[i, j] /= grid_m[i, j] + grid_v[i, j].y -= dt * gravity + if i < bound and grid_v[i, j].x < 0: + grid_v[i, j].x = 0 + if i > n_grid - bound and grid_v[i, j].x > 0: + grid_v[i, j].x = 0 + if j < bound and grid_v[i, j].y < 0: + grid_v[i, j].y = 0 + if j > n_grid - bound and grid_v[i, j].y > 0: + grid_v[i, j].y = 0 + for p in x: + Xp = x[p] / dx + base = int(Xp - 0.5) + fx = Xp - base + w = [0.5 * (1.5 - fx) ** 2, 0.75 - (fx - 1) ** 2, 0.5 * (fx - 0.5) ** 2] + new_v = ti.Vector.zero(float, 2) + new_C = ti.Matrix.zero(float, 2, 2) + for i, j in ti.static(ti.ndrange(3, 3)): + offset = ti.Vector([i, j]) + dpos = (offset - fx) * dx + weight = w[i].x * w[j].y + g_v = grid_v[base + offset] + new_v += weight * g_v + new_C += 4 * weight * g_v.outer_product(dpos) / dx**2 + v[p] = new_v + x[p] += dt * v[p] + J[p] *= 1 + dt * new_C.trace() + C[p] = new_C + + +@ti.kernel +def init(): + for i in range(n_particles): + x[i] = [ti.random() * 0.4 + 0.2, ti.random() * 0.4 + 0.2] + v[i] = [0, -1] + J[i] = 1 + + +init() +gui = ti.GUI("MPM88") +while gui.running and not gui.get_event(gui.ESCAPE): + for s in range(50): + substep() + gui.clear(0x112F41) + gui.circles(x.to_numpy(), radius=1.5, color=0x068587) + gui.show() diff --git a/local_packages/taichi/examples/simulation/mpm99.py b/local_packages/taichi/examples/simulation/mpm99.py new file mode 100644 index 0000000..99b157c --- /dev/null +++ b/local_packages/taichi/examples/simulation/mpm99.py @@ -0,0 +1,133 @@ +import taichi as ti + +ti.init(arch=ti.gpu) # Try to run on GPU +quality = 1 # Use a larger value for higher-res simulations +n_particles, n_grid = 9000 * quality**2, 128 * quality +dx, inv_dx = 1 / n_grid, float(n_grid) +dt = 1e-4 / quality +p_vol, p_rho = (dx * 0.5) ** 2, 1 +p_mass = p_vol * p_rho +E, nu = 0.1e4, 0.2 # Young's modulus and Poisson's ratio +mu_0, lambda_0 = E / (2 * (1 + nu)), E * nu / ((1 + nu) * (1 - 2 * nu)) # Lame parameters +x = ti.Vector.field(2, dtype=float, shape=n_particles) # position +v = ti.Vector.field(2, dtype=float, shape=n_particles) # velocity +C = ti.Matrix.field(2, 2, dtype=float, shape=n_particles) # affine velocity field +F = ti.Matrix.field(2, 2, dtype=float, shape=n_particles) # deformation gradient +material = ti.field(dtype=int, shape=n_particles) # material id +Jp = ti.field(dtype=float, shape=n_particles) # plastic deformation +grid_v = ti.Vector.field(2, dtype=float, shape=(n_grid, n_grid)) # grid node momentum/velocity +grid_m = ti.field(dtype=float, shape=(n_grid, n_grid)) # grid node mass + + +@ti.kernel +def substep(): + for i, j in grid_m: + grid_v[i, j] = [0, 0] + grid_m[i, j] = 0 + for p in x: # Particle state update and scatter to grid (P2G) + base = (x[p] * inv_dx - 0.5).cast(int) + fx = x[p] * inv_dx - base.cast(float) + # Quadratic kernels [http://mpm.graphics Eqn. 123, with x=fx, fx-1,fx-2] + w = [0.5 * (1.5 - fx) ** 2, 0.75 - (fx - 1) ** 2, 0.5 * (fx - 0.5) ** 2] + # F[p]: deformation gradient update + F[p] = (ti.Matrix.identity(float, 2) + dt * C[p]) @ F[p] + # h: Hardening coefficient: snow gets harder when compressed + h = ti.exp(10 * (1.0 - Jp[p])) + if material[p] == 1: # jelly, make it softer + h = 0.3 + mu, la = mu_0 * h, lambda_0 * h + if material[p] == 0: # liquid + mu = 0.0 + U, sig, V = ti.svd(F[p]) + # Avoid zero eigenvalues because of numerical errors + for d in ti.static(range(2)): + sig[d, d] = ti.max(sig[d, d], 1e-6) + J = 1.0 + for d in ti.static(range(2)): + new_sig = sig[d, d] + if material[p] == 2: # Snow + new_sig = ti.min(ti.max(sig[d, d], 1 - 2.5e-2), 1 + 4.5e-3) # Plasticity + Jp[p] *= sig[d, d] / new_sig + sig[d, d] = new_sig + J *= new_sig + if material[p] == 0: + # Reset deformation gradient to avoid numerical instability + F[p] = ti.Matrix.identity(float, 2) * ti.sqrt(J) + elif material[p] == 2: + # Reconstruct elastic deformation gradient after plasticity + F[p] = U @ sig @ V.transpose() + stress = 2 * mu * (F[p] - U @ V.transpose()) @ F[p].transpose() + ti.Matrix.identity(float, 2) * la * J * ( + J - 1 + ) + stress = (-dt * p_vol * 4 * inv_dx * inv_dx) * stress + affine = stress + p_mass * C[p] + # Loop over 3x3 grid node neighborhood + for i, j in ti.static(ti.ndrange(3, 3)): + offset = ti.Vector([i, j]) + dpos = (offset.cast(float) - fx) * dx + weight = w[i][0] * w[j][1] + grid_v[base + offset] += weight * (p_mass * v[p] + affine @ dpos) + grid_m[base + offset] += weight * p_mass + for i, j in grid_m: + if grid_m[i, j] > 0: # No need for epsilon here + grid_v[i, j] = (1 / grid_m[i, j]) * grid_v[i, j] # Momentum to velocity + grid_v[i, j][1] -= dt * 50 # gravity + if i < 3 and grid_v[i, j][0] < 0: + grid_v[i, j][0] = 0 # Boundary conditions + if i > n_grid - 3 and grid_v[i, j][0] > 0: + grid_v[i, j][0] = 0 + if j < 3 and grid_v[i, j][1] < 0: + grid_v[i, j][1] = 0 + if j > n_grid - 3 and grid_v[i, j][1] > 0: + grid_v[i, j][1] = 0 + for p in x: # grid to particle (G2P) + base = (x[p] * inv_dx - 0.5).cast(int) + fx = x[p] * inv_dx - base.cast(float) + w = [0.5 * (1.5 - fx) ** 2, 0.75 - (fx - 1.0) ** 2, 0.5 * (fx - 0.5) ** 2] + new_v = ti.Vector.zero(float, 2) + new_C = ti.Matrix.zero(float, 2, 2) + for i, j in ti.static(ti.ndrange(3, 3)): + # loop over 3x3 grid node neighborhood + dpos = ti.Vector([i, j]).cast(float) - fx + g_v = grid_v[base + ti.Vector([i, j])] + weight = w[i][0] * w[j][1] + new_v += weight * g_v + new_C += 4 * inv_dx * weight * g_v.outer_product(dpos) + v[p], C[p] = new_v, new_C + x[p] += dt * v[p] # advection + + +group_size = n_particles // 3 + + +@ti.kernel +def initialize(): + for i in range(n_particles): + x[i] = [ + ti.random() * 0.2 + 0.3 + 0.10 * (i // group_size), + ti.random() * 0.2 + 0.05 + 0.32 * (i // group_size), + ] + material[i] = i // group_size # 0: fluid 1: jelly 2: snow + v[i] = ti.Matrix([0, 0]) + F[i] = ti.Matrix([[1, 0], [0, 1]]) + Jp[i] = 1 + + +def main(): + initialize() + gui = ti.GUI("Taichi MLS-MPM-99", res=512, background_color=0x112F41) + while not gui.get_event(ti.GUI.ESCAPE, ti.GUI.EXIT): + for s in range(int(2e-3 // dt)): + substep() + gui.circles( + x.to_numpy(), + radius=1.5, + palette=[0x068587, 0xED553B, 0xEEEEF0], + palette_indices=material, + ) + # Change to gui.show(f'{frame:06d}.png') to write images to disk + gui.show() + + +if __name__ == "__main__": + main() diff --git a/local_packages/taichi/examples/simulation/mpm_lagrangian_forces.py b/local_packages/taichi/examples/simulation/mpm_lagrangian_forces.py new file mode 100644 index 0000000..cbc002f --- /dev/null +++ b/local_packages/taichi/examples/simulation/mpm_lagrangian_forces.py @@ -0,0 +1,186 @@ +import numpy as np + +import taichi as ti + +ti.init(arch=ti.gpu) + +dim = 2 +quality = 1 # Use a larger integral number for higher quality +n_particle_x = 100 * quality +n_particle_y = 8 * quality +n_particles = n_particle_x * n_particle_y +n_elements = (n_particle_x - 1) * (n_particle_y - 1) * 2 +n_grid = 64 * quality +dx = 1 / n_grid +inv_dx = 1 / dx +dt = 1e-4 / quality +E = 25000 +p_mass = 1 +p_vol = 1 +mu = 1 +la = 1 + +x = ti.Vector.field(dim, dtype=float, shape=n_particles, needs_grad=True) +v = ti.Vector.field(dim, dtype=float, shape=n_particles) +C = ti.Matrix.field(dim, dim, dtype=float, shape=n_particles) +grid_v = ti.Vector.field(dim, dtype=float, shape=(n_grid, n_grid)) +grid_m = ti.field(dtype=float, shape=(n_grid, n_grid)) +restT = ti.Matrix.field(dim, dim, dtype=float, shape=n_elements) +total_energy = ti.field(dtype=float, shape=(), needs_grad=True) +vertices = ti.field(dtype=ti.i32, shape=(n_elements, 3)) + + +@ti.func +def mesh(i, j): + return i * n_particle_y + j + + +@ti.func +def compute_T(i): + a = vertices[i, 0] + b = vertices[i, 1] + c = vertices[i, 2] + ab = x[b] - x[a] + ac = x[c] - x[a] + return ti.Matrix([[ab[0], ac[0]], [ab[1], ac[1]]]) + + +@ti.kernel +def initialize(): + for i in range(n_particle_x): + for j in range(n_particle_y): + t = mesh(i, j) + x[t] = [0.1 + i * dx * 0.5, 0.7 + j * dx * 0.5] + v[t] = [0, -1] + + # build mesh + for i in range(n_particle_x - 1): + for j in range(n_particle_y - 1): + # element id + eid = (i * (n_particle_y - 1) + j) * 2 + vertices[eid, 0] = mesh(i, j) + vertices[eid, 1] = mesh(i + 1, j) + vertices[eid, 2] = mesh(i, j + 1) + + eid = (i * (n_particle_y - 1) + j) * 2 + 1 + vertices[eid, 0] = mesh(i, j + 1) + vertices[eid, 1] = mesh(i + 1, j + 1) + vertices[eid, 2] = mesh(i + 1, j) + + for i in range(n_elements): + restT[i] = compute_T(i) # Compute rest T + + +@ti.kernel +def compute_total_energy(): + for i in range(n_elements): + currentT = compute_T(i) + F = currentT @ restT[i].inverse() + # NeoHookean + I1 = (F @ F.transpose()).trace() + J = F.determinant() + element_energy = 0.5 * mu * (I1 - 2) - mu * ti.log(J) + 0.5 * la * ti.log(J) ** 2 + total_energy[None] += E * element_energy * dx * dx + + +@ti.kernel +def p2g(): + for p in x: + base = ti.cast(x[p] * inv_dx - 0.5, ti.i32) + fx = x[p] * inv_dx - ti.cast(base, float) + w = [0.5 * (1.5 - fx) ** 2, 0.75 - (fx - 1) ** 2, 0.5 * (fx - 0.5) ** 2] + affine = p_mass * C[p] + for i in ti.static(range(3)): + for j in ti.static(range(3)): + I = ti.Vector([i, j]) + dpos = (float(I) - fx) * dx + weight = w[i].x * w[j].y + grid_v[base + I] += weight * (p_mass * v[p] - dt * x.grad[p] + affine @ dpos) + grid_m[base + I] += weight * p_mass + + +bound = 3 + + +@ti.kernel +def grid_op(): + for i, j in grid_m: + if grid_m[i, j] > 0: + inv_m = 1 / grid_m[i, j] + grid_v[i, j] = inv_m * grid_v[i, j] + grid_v[i, j].y -= dt * 9.8 + + # center collision circle + dist = ti.Vector([i * dx - 0.5, j * dx - 0.5]) + if dist.norm_sqr() < 0.005: + dist = dist.normalized() + grid_v[i, j] -= dist * ti.min(0, grid_v[i, j].dot(dist)) + + # box + if i < bound and grid_v[i, j].x < 0: + grid_v[i, j].x = 0 + if i > n_grid - bound and grid_v[i, j].x > 0: + grid_v[i, j].x = 0 + if j < bound and grid_v[i, j].y < 0: + grid_v[i, j].y = 0 + if j > n_grid - bound and grid_v[i, j].y > 0: + grid_v[i, j].y = 0 + + +@ti.kernel +def g2p(): + for p in x: + base = ti.cast(x[p] * inv_dx - 0.5, ti.i32) + fx = x[p] * inv_dx - float(base) + w = [0.5 * (1.5 - fx) ** 2, 0.75 - (fx - 1.0) ** 2, 0.5 * (fx - 0.5) ** 2] + new_v = ti.Vector([0.0, 0.0]) + new_C = ti.Matrix([[0.0, 0.0], [0.0, 0.0]]) + + for i in ti.static(range(3)): + for j in ti.static(range(3)): + I = ti.Vector([i, j]) + dpos = float(I) - fx + g_v = grid_v[base + I] + weight = w[i].x * w[j].y + new_v += weight * g_v + new_C += 4 * weight * g_v.outer_product(dpos) * inv_dx + + v[p] = new_v + x[p] += dt * v[p] + C[p] = new_C + + +gui = ti.GUI("MPM", (640, 640), background_color=0x112F41) + + +def main(): + initialize() + + vertices_ = vertices.to_numpy() + + while gui.running and not gui.get_event(gui.ESCAPE): + for s in range(int(1e-2 // dt)): + grid_m.fill(0) + grid_v.fill(0) + # Note that we are now differentiating the total energy w.r.t. the particle position. + # Recall that F = - \partial (total_energy) / \partial x + with ti.ad.Tape(total_energy): + # Do the forward computation of total energy and backward propagation for x.grad, which is later used in p2g + compute_total_energy() + # It's OK not to use the computed total_energy at all, since we only need x.grad + p2g() + grid_op() + g2p() + + gui.circle((0.5, 0.5), radius=45, color=0x068587) + particle_pos = x.to_numpy() + a = vertices_.reshape(n_elements * 3) + b = np.roll(vertices_, shift=1, axis=1).reshape(n_elements * 3) + gui.lines(particle_pos[a], particle_pos[b], radius=1, color=0x4FB99F) + gui.circles(particle_pos, radius=1.5, color=0xF2B134) + gui.line((0.00, 0.03 / quality), (1.0, 0.03 / quality), color=0xFFFFFF, radius=3) + gui.show() + + +if __name__ == "__main__": + main() diff --git a/local_packages/taichi/examples/simulation/nbody.py b/local_packages/taichi/examples/simulation/nbody.py new file mode 100644 index 0000000..b43a704 --- /dev/null +++ b/local_packages/taichi/examples/simulation/nbody.py @@ -0,0 +1,105 @@ +# Authored by Tiantian Liu, Taichi Graphics. +import math + +import taichi as ti + +ti.init(arch=ti.cpu) + +# global control +paused = ti.field(ti.i32, ()) + +# gravitational constant 6.67408e-11, using 1 for simplicity +G = 1 + +# number of planets +N = 3000 +# unit mass +m = 1 +# galaxy size +galaxy_size = 0.4 +# planet radius (for rendering) +planet_radius = 2 +# init vel +init_vel = 120 + +# time-step size +h = 1e-4 +# substepping +substepping = 10 + +# center of the screen +center = ti.Vector.field(2, ti.f32, ()) + +# pos, vel and force of the planets +# Nx2 vectors +pos = ti.Vector.field(2, ti.f32, N) +vel = ti.Vector.field(2, ti.f32, N) +force = ti.Vector.field(2, ti.f32, N) + + +@ti.kernel +def initialize(): + center[None] = [0.5, 0.5] + for i in range(N): + theta = ti.random() * 2 * math.pi + r = (ti.sqrt(ti.random()) * 0.6 + 0.4) * galaxy_size + offset = r * ti.Vector([ti.cos(theta), ti.sin(theta)]) + pos[i] = center[None] + offset + vel[i] = [-offset.y, offset.x] + vel[i] *= init_vel + + +@ti.kernel +def compute_force(): + # clear force + for i in range(N): + force[i] = [0.0, 0.0] + + # compute gravitational force + for i in range(N): + p = pos[i] + for j in range(N): + if i != j: # double the computation for a better memory footprint and load balance + diff = p - pos[j] + r = diff.norm(1e-5) + + # gravitational force -(GMm / r^2) * (diff/r) for i + f = -G * m * m * (1.0 / r) ** 3 * diff + + # assign to each particle + force[i] += f + + +@ti.kernel +def update(): + dt = h / substepping + for i in range(N): + # symplectic euler + vel[i] += dt * force[i] / m + pos[i] += dt * vel[i] + + +def main(): + gui = ti.GUI("N-body problem", (800, 800)) + + initialize() + while gui.running: + for e in gui.get_events(ti.GUI.PRESS): + if e.key in [ti.GUI.ESCAPE, ti.GUI.EXIT]: + exit() + elif e.key == "r": + initialize() + elif e.key == ti.GUI.SPACE: + paused[None] = not paused[None] + + if not paused[None]: + for i in range(substepping): + compute_force() + update() + + gui.circles(pos.to_numpy(), color=0xFFFFFF, radius=planet_radius) + gui.show() + + +if __name__ == "__main__": + main() diff --git a/local_packages/taichi/examples/simulation/odop_solar.py b/local_packages/taichi/examples/simulation/odop_solar.py new file mode 100644 index 0000000..7c62247 --- /dev/null +++ b/local_packages/taichi/examples/simulation/odop_solar.py @@ -0,0 +1,71 @@ +import math + +import taichi as ti + +ti.init() + + +@ti.data_oriented +class SolarSystem: + def __init__(self, n, dt): # Initializer of the solar system simulator + self.n = n + self.dt = dt + self.x = ti.Vector.field(2, dtype=ti.f32, shape=n) + self.v = ti.Vector.field(2, dtype=ti.f32, shape=n) + self.center = ti.Vector.field(2, dtype=ti.f32, shape=()) + + @staticmethod + @ti.func + def random_vector(radius): # Create a random vector in circle + theta = ti.random() * 2 * math.pi + r = ti.random() * radius + return r * ti.Vector([ti.cos(theta), ti.sin(theta)]) + + @ti.kernel + def initialize_particles(self): + # (Re)initialize particle position/velocities + for i in range(self.n): + offset = self.random_vector(0.5) + self.x[i] = self.center[None] + offset # Offset from center + self.v[i] = [-offset.y, offset.x] # Perpendicular to offset + self.v[i] += self.random_vector(0.02) # Random velocity noise + self.v[i] *= 1 / offset.norm() ** 1.5 # Kepler's third law + + @ti.func + def gravity(self, pos): # Compute gravity at pos + offset = -(pos - self.center[None]) + return offset / offset.norm() ** 3 + + @ti.kernel + def integrate(self): # Semi-implicit Euler time integration + for i in range(self.n): + self.v[i] += self.dt * self.gravity(self.x[i]) + self.x[i] += self.dt * self.v[i] + + @staticmethod + def render(gui): # Render the scene on GUI + gui.circle([0.5, 0.5], radius=10, color=0xFFAA88) + gui.circles(solar.x.to_numpy(), radius=3, color=0xFFFFFF) + + +def main(): + global solar + + solar = SolarSystem(8, 0.0001) + solar.center[None] = [0.5, 0.5] + solar.initialize_particles() + + gui = ti.GUI("Solar System", background_color=0x0071A) + while gui.running: + if gui.get_event() and gui.is_pressed(gui.SPACE): + solar.initialize_particles() # reinitialize when space bar pressed. + + for _ in range(10): # Time integration + solar.integrate() + + solar.render(gui) + gui.show() + + +if __name__ == "__main__": + main() diff --git a/local_packages/taichi/examples/simulation/pbf2d.py b/local_packages/taichi/examples/simulation/pbf2d.py new file mode 100644 index 0000000..08b2378 --- /dev/null +++ b/local_packages/taichi/examples/simulation/pbf2d.py @@ -0,0 +1,306 @@ +# Macklin, M. and Müller, M., 2013. Position based fluids. ACM Transactions on Graphics (TOG), 32(4), p.104. +# Taichi implementation by Ye Kuang (k-ye) + +import math + +import numpy as np + +import taichi as ti + +ti.init(arch=ti.gpu) + +screen_res = (800, 400) +screen_to_world_ratio = 10.0 +boundary = ( + screen_res[0] / screen_to_world_ratio, + screen_res[1] / screen_to_world_ratio, +) +cell_size = 2.51 +cell_recpr = 1.0 / cell_size + + +def round_up(f, s): + return (math.floor(f * cell_recpr / s) + 1) * s + + +grid_size = (round_up(boundary[0], 1), round_up(boundary[1], 1)) + +dim = 2 +bg_color = 0x112F41 +particle_color = 0x068587 +boundary_color = 0xEBACA2 +num_particles_x = 60 +num_particles = num_particles_x * 20 +max_num_particles_per_cell = 100 +max_num_neighbors = 100 +time_delta = 1.0 / 20.0 +epsilon = 1e-5 +particle_radius = 3.0 +particle_radius_in_world = particle_radius / screen_to_world_ratio + +# PBF params +h_ = 1.1 +mass = 1.0 +rho0 = 1.0 +lambda_epsilon = 100.0 +pbf_num_iters = 5 +corr_deltaQ_coeff = 0.3 +corrK = 0.001 +# Need ti.pow() +# corrN = 4.0 +neighbor_radius = h_ * 1.05 + +poly6_factor = 315.0 / 64.0 / math.pi +spiky_grad_factor = -45.0 / math.pi + +old_positions = ti.Vector.field(dim, float) +positions = ti.Vector.field(dim, float) +velocities = ti.Vector.field(dim, float) +grid_num_particles = ti.field(int) +grid2particles = ti.field(int) +particle_num_neighbors = ti.field(int) +particle_neighbors = ti.field(int) +lambdas = ti.field(float) +position_deltas = ti.Vector.field(dim, float) +# 0: x-pos, 1: timestep in sin() +board_states = ti.Vector.field(2, float) + +ti.root.dense(ti.i, num_particles).place(old_positions, positions, velocities) +grid_snode = ti.root.dense(ti.ij, grid_size) +grid_snode.place(grid_num_particles) +grid_snode.dense(ti.k, max_num_particles_per_cell).place(grid2particles) +nb_node = ti.root.dense(ti.i, num_particles) +nb_node.place(particle_num_neighbors) +nb_node.dense(ti.j, max_num_neighbors).place(particle_neighbors) +ti.root.dense(ti.i, num_particles).place(lambdas, position_deltas) +ti.root.place(board_states) + + +@ti.func +def poly6_value(s, h): + result = 0.0 + if 0 < s and s < h: + x = (h * h - s * s) / (h * h * h) + result = poly6_factor * x * x * x + return result + + +@ti.func +def spiky_gradient(r, h): + result = ti.Vector([0.0, 0.0]) + r_len = r.norm() + if 0 < r_len and r_len < h: + x = (h - r_len) / (h * h * h) + g_factor = spiky_grad_factor * x * x + result = r * g_factor / r_len + return result + + +@ti.func +def compute_scorr(pos_ji): + # Eq (13) + x = poly6_value(pos_ji.norm(), h_) / poly6_value(corr_deltaQ_coeff * h_, h_) + # pow(x, 4) + x = x * x + x = x * x + return (-corrK) * x + + +@ti.func +def get_cell(pos): + return int(pos * cell_recpr) + + +@ti.func +def is_in_grid(c): + # @c: Vector(i32) + return 0 <= c[0] and c[0] < grid_size[0] and 0 <= c[1] and c[1] < grid_size[1] + + +@ti.func +def confine_position_to_boundary(p): + bmin = particle_radius_in_world + bmax = ti.Vector([board_states[None][0], boundary[1]]) - particle_radius_in_world + for i in ti.static(range(dim)): + # Use randomness to prevent particles from sticking into each other after clamping + if p[i] <= bmin: + p[i] = bmin + epsilon * ti.random() + elif bmax[i] <= p[i]: + p[i] = bmax[i] - epsilon * ti.random() + return p + + +@ti.kernel +def move_board(): + # probably more accurate to exert force on particles according to hooke's law. + b = board_states[None] + b[1] += 1.0 + period = 90 + vel_strength = 8.0 + if b[1] >= 2 * period: + b[1] = 0 + b[0] += -ti.sin(b[1] * np.pi / period) * vel_strength * time_delta + board_states[None] = b + + +@ti.kernel +def prologue(): + # save old positions + for i in positions: + old_positions[i] = positions[i] + # apply gravity within boundary + for i in positions: + g = ti.Vector([0.0, -9.8]) + pos, vel = positions[i], velocities[i] + vel += g * time_delta + pos += vel * time_delta + positions[i] = confine_position_to_boundary(pos) + + # clear neighbor lookup table + for I in ti.grouped(grid_num_particles): + grid_num_particles[I] = 0 + for I in ti.grouped(particle_neighbors): + particle_neighbors[I] = -1 + + # update grid + for p_i in positions: + cell = get_cell(positions[p_i]) + # ti.Vector doesn't seem to support unpacking yet + # but we can directly use int Vectors as indices + offs = ti.atomic_add(grid_num_particles[cell], 1) + grid2particles[cell, offs] = p_i + # find particle neighbors + for p_i in positions: + pos_i = positions[p_i] + cell = get_cell(pos_i) + nb_i = 0 + for offs in ti.static(ti.grouped(ti.ndrange((-1, 2), (-1, 2)))): + cell_to_check = cell + offs + if is_in_grid(cell_to_check): + for j in range(grid_num_particles[cell_to_check]): + p_j = grid2particles[cell_to_check, j] + if nb_i < max_num_neighbors and p_j != p_i and (pos_i - positions[p_j]).norm() < neighbor_radius: + particle_neighbors[p_i, nb_i] = p_j + nb_i += 1 + particle_num_neighbors[p_i] = nb_i + + +@ti.kernel +def substep(): + # compute lambdas + # Eq (8) ~ (11) + for p_i in positions: + pos_i = positions[p_i] + + grad_i = ti.Vector([0.0, 0.0]) + sum_gradient_sqr = 0.0 + density_constraint = 0.0 + + for j in range(particle_num_neighbors[p_i]): + p_j = particle_neighbors[p_i, j] + if p_j < 0: + break + pos_ji = pos_i - positions[p_j] + grad_j = spiky_gradient(pos_ji, h_) + grad_i += grad_j + sum_gradient_sqr += grad_j.dot(grad_j) + # Eq(2) + density_constraint += poly6_value(pos_ji.norm(), h_) + + # Eq(1) + density_constraint = (mass * density_constraint / rho0) - 1.0 + + sum_gradient_sqr += grad_i.dot(grad_i) + lambdas[p_i] = (-density_constraint) / (sum_gradient_sqr + lambda_epsilon) + # compute position deltas + # Eq(12), (14) + for p_i in positions: + pos_i = positions[p_i] + lambda_i = lambdas[p_i] + + pos_delta_i = ti.Vector([0.0, 0.0]) + for j in range(particle_num_neighbors[p_i]): + p_j = particle_neighbors[p_i, j] + if p_j < 0: + break + lambda_j = lambdas[p_j] + pos_ji = pos_i - positions[p_j] + scorr_ij = compute_scorr(pos_ji) + pos_delta_i += (lambda_i + lambda_j + scorr_ij) * spiky_gradient(pos_ji, h_) + + pos_delta_i /= rho0 + position_deltas[p_i] = pos_delta_i + # apply position deltas + for i in positions: + positions[i] += position_deltas[i] + + +@ti.kernel +def epilogue(): + # confine to boundary + for i in positions: + pos = positions[i] + positions[i] = confine_position_to_boundary(pos) + # update velocities + for i in positions: + velocities[i] = (positions[i] - old_positions[i]) / time_delta + # no vorticity/xsph because we cannot do cross product in 2D... + + +def run_pbf(): + prologue() + for _ in range(pbf_num_iters): + substep() + epilogue() + + +def render(gui): + gui.clear(bg_color) + pos_np = positions.to_numpy() + for j in range(dim): + pos_np[:, j] *= screen_to_world_ratio / screen_res[j] + gui.circles(pos_np, radius=particle_radius, color=particle_color) + gui.rect( + (0, 0), + (board_states[None][0] / boundary[0], 1), + radius=1.5, + color=boundary_color, + ) + gui.show() + + +@ti.kernel +def init_particles(): + for i in range(num_particles): + delta = h_ * 0.8 + offs = ti.Vector([(boundary[0] - delta * num_particles_x) * 0.5, boundary[1] * 0.02]) + positions[i] = ti.Vector([i % num_particles_x, i // num_particles_x]) * delta + offs + for c in ti.static(range(dim)): + velocities[i][c] = (ti.random() - 0.5) * 4 + board_states[None] = ti.Vector([boundary[0] - epsilon, -0.0]) + + +def print_stats(): + print("PBF stats:") + num = grid_num_particles.to_numpy() + avg, max_ = np.mean(num), np.max(num) + print(f" #particles per cell: avg={avg:.2f} max={max_}") + num = particle_num_neighbors.to_numpy() + avg, max_ = np.mean(num), np.max(num) + print(f" #neighbors per particle: avg={avg:.2f} max={max_}") + + +def main(): + init_particles() + print(f"boundary={boundary} grid={grid_size} cell_size={cell_size}") + gui = ti.GUI("PBF2D", screen_res) + while gui.running and not gui.get_event(gui.ESCAPE): + move_board() + run_pbf() + if gui.frame % 20 == 1: + print_stats() + render(gui) + + +if __name__ == "__main__": + main() diff --git a/local_packages/taichi/examples/simulation/physarum.py b/local_packages/taichi/examples/simulation/physarum.py new file mode 100644 index 0000000..701cf56 --- /dev/null +++ b/local_packages/taichi/examples/simulation/physarum.py @@ -0,0 +1,86 @@ +"""Physarum simulation example. + +See https://sagejenson.com/physarum for the details.""" + +import numpy as np + +import taichi as ti + +ti.init(arch=ti.gpu) + +PARTICLE_N = 1024 +GRID_SIZE = 512 +SENSE_ANGLE = 0.20 * np.pi +SENSE_DIST = 4.0 +EVAPORATION = 0.95 +MOVE_ANGLE = 0.1 * np.pi +MOVE_STEP = 2.0 + +grid = ti.field(dtype=ti.f32, shape=[2, GRID_SIZE, GRID_SIZE]) +position = ti.Vector.field(2, dtype=ti.f32, shape=[PARTICLE_N]) +heading = ti.field(dtype=ti.f32, shape=[PARTICLE_N]) + + +@ti.kernel +def init(): + for p in ti.grouped(grid): + grid[p] = 0.0 + for i in position: + position[i] = ti.Vector([ti.random(), ti.random()]) * GRID_SIZE + heading[i] = ti.random() * np.pi * 2.0 + + +@ti.func +def sense(phase, pos, ang): + p = pos + ti.Vector([ti.cos(ang), ti.sin(ang)]) * SENSE_DIST + return grid[phase, p.cast(int) % GRID_SIZE] + + +@ti.kernel +def step(phase: ti.i32): + # move + for i in position: + pos, ang = position[i], heading[i] + l = sense(phase, pos, ang - SENSE_ANGLE) + c = sense(phase, pos, ang) + r = sense(phase, pos, ang + SENSE_ANGLE) + if l < c < r: + ang += MOVE_ANGLE + elif l > c > r: + ang -= MOVE_ANGLE + elif c < l and c < r: + ang += MOVE_ANGLE * (2 * (ti.random() < 0.5) - 1) + pos += ti.Vector([ti.cos(ang), ti.sin(ang)]) * MOVE_STEP + position[i], heading[i] = pos, ang + + # deposit + for i in position: + ipos = position[i].cast(int) % GRID_SIZE + grid[phase, ipos] += 1.0 + + # diffuse + for i, j in ti.ndrange(GRID_SIZE, GRID_SIZE): + a = 0.0 + for di in ti.static(range(-1, 2)): + for dj in ti.static(range(-1, 2)): + a += grid[phase, (i + di) % GRID_SIZE, (j + dj) % GRID_SIZE] + a *= EVAPORATION / 9.0 + grid[1 - phase, i, j] = a + + +def main(): + print("[Hint] Use slider to change simulation speed.") + gui = ti.GUI("Physarum") + init() + i = 0 + step_per_frame = gui.slider("step_per_frame", 1, 100, 1) + while gui.running and not gui.get_event(gui.ESCAPE): + for _ in range(int(step_per_frame.value)): + step(i % 2) + i += 1 + gui.set_image(grid.to_numpy()[0]) + gui.show() + + +if __name__ == "__main__": + main() diff --git a/local_packages/taichi/examples/simulation/snow_phaseField.py b/local_packages/taichi/examples/simulation/snow_phaseField.py new file mode 100644 index 0000000..8603a88 --- /dev/null +++ b/local_packages/taichi/examples/simulation/snow_phaseField.py @@ -0,0 +1,236 @@ +"""phase field model for snow dendrite evolution, which simulates the snow growing from water vapor. +space discretization: finite difference method, time integration: Runge-Kutta method +repo's link: https://github.com/mo-hanxuan/Snow-PhaseField +more details about physical interpretation refer to [Physica D 63(3-4): 410-423]""" + +import numpy as np + +import taichi as ti + + +@ti.data_oriented +class Dendrite: + def __init__( + self, + dx=0.03, # grid space + dt=2.0e-4, # time step + n=512, # field shape + dtype=ti.f64, # data type + n_fold_symmetry=6, # dendrites number + angle0=0.0, # initial angle + ): + self.n = n # the size of the field + ### phase field indicate solid phase (phi=1) and water vapor phase (phi=0) + self.phi = ti.field(dtype=dtype, shape=(n, n)) + self.temperature = ti.field(dtype=dtype, shape=(n, n)) + self.phi_old = ti.field(dtype=dtype, shape=(n, n)) + self.temperature_old = ti.field(dtype=dtype, shape=(n, n)) + self.dEnergy_dGrad_term1 = ti.Vector.field(2, dtype, shape=(n, n)) + self.epsilons = ti.field(dtype=dtype, shape=(n, n)) # anisotropic gradient energy coefficient + self.phiRate = ti.Vector.field(4, dtype=dtype, shape=(n, n)) # rate of phi, with RK4 method + self.temperatureRate = ti.Vector.field(4, dtype=dtype, shape=(n, n)) + + self.dx = dx + self.dt = dt # maximum dt before going unstable is 2.9e-4 + self.mobility = 128.0e2 + + self.grad_energy_coef = 0.006 # magnitude of gradient energy coefficient + self.latent_heat_coef = 1.5 # latent heat coefficient + self.aniso_magnitude = 0.12 # the magnitude of anisotropy + self.n_fold_symmetry = n_fold_symmetry # n-fold rotational symmetry of the crystalline structure + self.angle0 = angle0 / 180.0 * np.pi # initial tilt angle of the crystal + + ### m(T) = (α / π) * arctan[γ(Te - T)], m determines the relative potential between water vapor and crystal + self.alpha = 0.9 / np.pi # α + self.gamma = 10.0 # γ + self.temperature_equi = 1.0 # temperature of equilibrium state + + ### parameters for RK4 method + self.dtRatio_rk4 = ti.field(ti.f64, shape=4) + self.dtRatio_rk4.from_numpy(np.array([0.0, 0.5, 0.5, 1.0])) + self.weights_rk4 = ti.field(ti.f64, shape=4) + self.weights_rk4.from_numpy(np.array([1.0 / 6.0, 1.0 / 3.0, 1.0 / 3.0, 1.0 / 6.0])) + + self.showFrameFrequency = int(4 * 1.0e-4 / self.dt) + + @ti.kernel + def initialize( + self, + ): + radius = 4.0 # 1. + center = ti.Vector([self.n // 2, self.n // 2]) + for I in ti.grouped(self.phi): + if ((ti.Vector([I[0], I[1]]) - center) ** 2).sum() < radius**2: + self.phi[I] = 1.0 + else: + self.phi[I] = 0.0 + self.phi_old[I] = self.phi[I] + + self.temperature[I] = 0.0 # temperature + self.temperature_old[I] = self.temperature[I] + + @ti.func + def neighbor_index(self, i, j): + """use periodic boundary condition to get neighbor index""" + im = i - 1 if i - 1 >= 0 else self.n - 1 + jm = j - 1 if j - 1 >= 0 else self.n - 1 + ip = i + 1 if i + 1 < self.n else 0 + jp = j + 1 if j + 1 < self.n else 0 + return im, jm, ip, jp + + @ti.kernel + def rk4_intermediate_update(self, rk_loop: int): + """update field variables at the intermediate step of RK4""" + phi, phi_old, temperature, temperature_old, dt = ti.static( + self.phi, self.phi_old, self.temperature, self.temperature_old, self.dt + ) + for I in ti.grouped(phi): + phi[I] = phi_old[I] + self.dtRatio_rk4[rk_loop] * dt * self.phiRate[I][rk_loop - 1] + temperature[I] = temperature_old[I] + self.dtRatio_rk4[rk_loop] * dt * self.temperatureRate[I][rk_loop - 1] + + @ti.kernel + def get_rate(self, rk_loop: int): + """get rate of phi and temperature""" + ( + phi, + temperature, + dx, + mobility, + aniso_magnitude, + epsilons, + n_fold_symmetry, + grad_energy_coef, + angle0, + ) = ti.static( + self.phi, + self.temperature, + self.dx, + self.mobility, + self.aniso_magnitude, + self.epsilons, + self.n_fold_symmetry, + self.grad_energy_coef, + self.angle0, + ) + ### first, get epsilons and dEnergy_dGrad_term1 + for i, j in phi: + im, jm, ip, jp = self.neighbor_index(i, j) + grad = ti.Vector( + [ + (phi[ip, j] - phi[im, j]) / (2.0 * dx), + (phi[i, jp] - phi[i, jm]) / (2.0 * dx), + ] + ) + gradNorm = (grad**2).sum() + if gradNorm < 1.0e-8: + self.dEnergy_dGrad_term1[i, j] = ti.Vector([0.0, 0.0]) + angle = ti.atan2(grad[1], grad[0]) + epsilons[i, j] = grad_energy_coef * (1.0 + aniso_magnitude * ti.cos(n_fold_symmetry * (angle - angle0))) + else: + angle = ti.atan2(grad[1], grad[0]) + epsilon = grad_energy_coef * (1.0 + aniso_magnitude * ti.cos(n_fold_symmetry * (angle - angle0))) + epsilons[i, j] = epsilon + dAngle_dGradX = -grad[1] / gradNorm + dAngle_dGradY = grad[0] / gradNorm + tmp = grad_energy_coef * aniso_magnitude * -ti.sin(n_fold_symmetry * (angle - angle0)) * n_fold_symmetry + depsilon_dGrad = tmp * ti.Vector([dAngle_dGradX, dAngle_dGradY]) + self.dEnergy_dGrad_term1[i, j] = epsilon * depsilon_dGrad * gradNorm + + ### then, get the phi rate and temperature rate + for i, j in phi: + im, jm, ip, jp = self.neighbor_index(i, j) + + lapla_phi = ( # laplacian of phi + 2 * (phi[im, j] + phi[i, jm] + phi[ip, j] + phi[i, jp]) + + (phi[im, jm] + phi[im, jp] + phi[ip, jm] + phi[ip, jp]) + - 12 * phi[i, j] + ) / (3.0 * dx * dx) + lapla_tp = ( # laplacian of temperature + 2 * (temperature[im, j] + temperature[i, jm] + temperature[ip, j] + temperature[i, jp]) + + (temperature[im, jm] + temperature[im, jp] + temperature[ip, jm] + temperature[ip, jp]) + - 12 * temperature[i, j] + ) / (3.0 * dx * dx) + + m_chem = self.alpha * ti.atan2(self.gamma * (self.temperature_equi - temperature[i, j]), 1.0) + chemicalForce = phi[i, j] * (1.0 - phi[i, j]) * (phi[i, j] - 0.5 + m_chem) + gradForce_term1 = self.divergence_dEnergy_dGrad_term1(i, j) + grad_epsilon2 = ti.Vector( + [ + (epsilons[ip, j] ** 2 - epsilons[im, j] ** 2) / (2.0 * dx), + (epsilons[i, jp] ** 2 - epsilons[i, jm] ** 2) / (2.0 * dx), + ] + ) + grad_phi = ti.Vector( + [ + (phi[ip, j] - phi[im, j]) / (2.0 * dx), + (phi[i, jp] - phi[i, jm]) / (2.0 * dx), + ] + ) + gradForce_term2 = ( + grad_epsilon2[0] * grad_phi[0] + grad_epsilon2[1] * grad_phi[1] + epsilons[i, j] ** 2 * lapla_phi + ) + + self.phiRate[i, j][rk_loop] = mobility * (chemicalForce + gradForce_term1 + gradForce_term2) + self.temperatureRate[i, j][rk_loop] = lapla_tp + self.latent_heat_coef * self.phiRate[i, j][rk_loop] + + @ti.kernel + def rk4_total_update( + self, + ): + """the final step in RK4 (Runge-Kutta) process""" + ( + dt, + phi, + phi_old, + temperature, + temperature_old, + phiRate, + temperatureRate, + ) = ti.static( + self.dt, + self.phi, + self.phi_old, + self.temperature, + self.temperature_old, + self.phiRate, + self.temperatureRate, + ) + for I in ti.grouped(phi): + for k in ti.static(range(4)): + phi_old[I] = phi_old[I] + self.weights_rk4[k] * dt * phiRate[I][k] + temperature_old[I] = temperature_old[I] + self.weights_rk4[k] * dt * temperatureRate[I][k] + for I in ti.grouped(phi): + phi[I] = phi_old[I] + temperature[I] = temperature_old[I] + + @ti.func + def divergence_dEnergy_dGrad_term1(self, i, j): + im, jm, ip, jp = self.neighbor_index(i, j) + return (self.dEnergy_dGrad_term1[ip, j][0] - self.dEnergy_dGrad_term1[im, j][0]) / (2.0 * self.dx) + ( + self.dEnergy_dGrad_term1[i, jp][1] - self.dEnergy_dGrad_term1[i, jm][1] + ) / (2.0 * self.dx) + + def advance( + self, + ): # advance a time step + self.get_rate(rk_loop=0) + for rk_loop in range(1, 4): + self.rk4_intermediate_update(rk_loop=rk_loop) + self.get_rate(rk_loop=rk_loop) + self.rk4_total_update() + + def getDendritic(self, steps=2048): + self.initialize() + gui = ti.GUI("phase field", res=(self.n, self.n)) + while not gui.get_event(ti.GUI.ESCAPE, ti.GUI.EXIT): + for _ in range(self.showFrameFrequency): + self.advance() + + gui.set_image(self.phi) + gui.show() + return self.phi + + +if __name__ == "__main__": + ti.init(arch=ti.cuda, default_fp=ti.f64) + Dendrite().getDendritic(steps=10000) diff --git a/local_packages/taichi/examples/simulation/stable_fluid.py b/local_packages/taichi/examples/simulation/stable_fluid.py new file mode 100644 index 0000000..316d332 --- /dev/null +++ b/local_packages/taichi/examples/simulation/stable_fluid.py @@ -0,0 +1,395 @@ +# References: +# http://developer.download.nvidia.com/books/HTML/gpugems/gpugems_ch38.html +# https://github.com/PavelDoGreat/WebGL-Fluid-Simulation +# https://www.bilibili.com/video/BV1ZK411H7Hc?p=4 +# https://github.com/ShaneFX/GAMES201/tree/master/HW01 + +import argparse + +import numpy as np + +import taichi as ti + +# How to run: +# `python stable_fluid.py`: use the jacobi iteration to solve the linear system. +# `python stable_fluid.py -S`: use a sparse matrix to do so. +parser = argparse.ArgumentParser() +parser.add_argument( + "-S", + "--use-sp-mat", + action="store_true", + help="Solve Poisson's equation by using a sparse matrix", +) +parser.add_argument( + "-a", + "--arch", + required=False, + default="cpu", + dest="arch", + type=str, + help="The arch (backend) to run this example on", +) +args, unknowns = parser.parse_known_args() + +res = 512 +dt = 0.03 +p_jacobi_iters = 500 # 40 for a quicker but less accurate result +f_strength = 10000.0 +curl_strength = 0 +time_c = 2 +maxfps = 60 +dye_decay = 1 - 1 / (maxfps * time_c) +force_radius = res / 2.0 +debug = False + +use_sparse_matrix = args.use_sp_mat +arch = args.arch +if arch in ["x64", "cpu", "arm64"]: + ti.init(arch=ti.cpu) +elif arch in ["cuda", "gpu"]: + ti.init(arch=ti.cuda) +else: + raise ValueError("Only CPU and CUDA backends are supported for now.") + +if use_sparse_matrix: + print("Using sparse matrix") +else: + print("Using jacobi iteration") + +_velocities = ti.Vector.field(2, float, shape=(res, res)) +_new_velocities = ti.Vector.field(2, float, shape=(res, res)) +velocity_divs = ti.field(float, shape=(res, res)) +velocity_curls = ti.field(float, shape=(res, res)) +_pressures = ti.field(float, shape=(res, res)) +_new_pressures = ti.field(float, shape=(res, res)) +_dye_buffer = ti.Vector.field(3, float, shape=(res, res)) +_new_dye_buffer = ti.Vector.field(3, float, shape=(res, res)) + + +class TexPair: + def __init__(self, cur, nxt): + self.cur = cur + self.nxt = nxt + + def swap(self): + self.cur, self.nxt = self.nxt, self.cur + + +velocities_pair = TexPair(_velocities, _new_velocities) +pressures_pair = TexPair(_pressures, _new_pressures) +dyes_pair = TexPair(_dye_buffer, _new_dye_buffer) + +if use_sparse_matrix: + # use a sparse matrix to solve Poisson's pressure equation. + @ti.kernel + def fill_laplacian_matrix(A: ti.types.sparse_matrix_builder()): + for i, j in ti.ndrange(res, res): + row = i * res + j + center = 0.0 + if j != 0: + A[row, row - 1] += -1.0 + center += 1.0 + if j != res - 1: + A[row, row + 1] += -1.0 + center += 1.0 + if i != 0: + A[row, row - res] += -1.0 + center += 1.0 + if i != res - 1: + A[row, row + res] += -1.0 + center += 1.0 + A[row, row] += center + + N = res * res + K = ti.linalg.SparseMatrixBuilder(N, N, max_num_triplets=N * 6) + F_b = ti.ndarray(ti.f32, shape=N) + + fill_laplacian_matrix(K) + L = K.build() + solver = ti.linalg.SparseSolver(solver_type="LLT") + solver.analyze_pattern(L) + solver.factorize(L) + + +@ti.func +def sample(qf, u, v): + I = ti.Vector([int(u), int(v)]) + I = ti.max(0, ti.min(res - 1, I)) + return qf[I] + + +@ti.func +def lerp(vl, vr, frac): + # frac: [0.0, 1.0] + return vl + frac * (vr - vl) + + +@ti.func +def bilerp(vf, p): + u, v = p + s, t = u - 0.5, v - 0.5 + # floor + iu, iv = ti.floor(s), ti.floor(t) + # fract + fu, fv = s - iu, t - iv + a = sample(vf, iu, iv) + b = sample(vf, iu + 1, iv) + c = sample(vf, iu, iv + 1) + d = sample(vf, iu + 1, iv + 1) + return lerp(lerp(a, b, fu), lerp(c, d, fu), fv) + + +# 3rd order Runge-Kutta +@ti.func +def backtrace(vf: ti.template(), p, dt_: ti.template()): + v1 = bilerp(vf, p) + p1 = p - 0.5 * dt_ * v1 + v2 = bilerp(vf, p1) + p2 = p - 0.75 * dt_ * v2 + v3 = bilerp(vf, p2) + p -= dt_ * ((2 / 9) * v1 + (1 / 3) * v2 + (4 / 9) * v3) + return p + + +@ti.kernel +def advect(vf: ti.template(), qf: ti.template(), new_qf: ti.template()): + for i, j in vf: + p = ti.Vector([i, j]) + 0.5 + p = backtrace(vf, p, dt) + new_qf[i, j] = bilerp(qf, p) * dye_decay + + +@ti.kernel +def apply_impulse(vf: ti.template(), dyef: ti.template(), imp_data: ti.types.ndarray()): + g_dir = -ti.Vector([0, 9.8]) * 300 + for i, j in vf: + omx, omy = imp_data[2], imp_data[3] + mdir = ti.Vector([imp_data[0], imp_data[1]]) + dx, dy = (i + 0.5 - omx), (j + 0.5 - omy) + d2 = dx * dx + dy * dy + # dv = F * dt + factor = ti.exp(-d2 / force_radius) + + dc = dyef[i, j] + a = dc.norm() + + momentum = (mdir * f_strength * factor + g_dir * a / (1 + a)) * dt + + v = vf[i, j] + vf[i, j] = v + momentum + # add dye + if mdir.norm() > 0.5: + dc += ti.exp(-d2 * (4 / (res / 15) ** 2)) * ti.Vector([imp_data[4], imp_data[5], imp_data[6]]) + + dyef[i, j] = dc + + +@ti.kernel +def divergence(vf: ti.template()): + for i, j in vf: + vl = sample(vf, i - 1, j) + vr = sample(vf, i + 1, j) + vb = sample(vf, i, j - 1) + vt = sample(vf, i, j + 1) + vc = sample(vf, i, j) + if i == 0: + vl.x = -vc.x + if i == res - 1: + vr.x = -vc.x + if j == 0: + vb.y = -vc.y + if j == res - 1: + vt.y = -vc.y + velocity_divs[i, j] = (vr.x - vl.x + vt.y - vb.y) * 0.5 + + +@ti.kernel +def vorticity(vf: ti.template()): + for i, j in vf: + vl = sample(vf, i - 1, j) + vr = sample(vf, i + 1, j) + vb = sample(vf, i, j - 1) + vt = sample(vf, i, j + 1) + velocity_curls[i, j] = (vr.y - vl.y - vt.x + vb.x) * 0.5 + + +@ti.kernel +def pressure_jacobi(pf: ti.template(), new_pf: ti.template()): + for i, j in pf: + pl = sample(pf, i - 1, j) + pr = sample(pf, i + 1, j) + pb = sample(pf, i, j - 1) + pt = sample(pf, i, j + 1) + div = velocity_divs[i, j] + new_pf[i, j] = (pl + pr + pb + pt - div) * 0.25 + + +@ti.kernel +def subtract_gradient(vf: ti.template(), pf: ti.template()): + for i, j in vf: + pl = sample(pf, i - 1, j) + pr = sample(pf, i + 1, j) + pb = sample(pf, i, j - 1) + pt = sample(pf, i, j + 1) + vf[i, j] -= 0.5 * ti.Vector([pr - pl, pt - pb]) + + +@ti.kernel +def enhance_vorticity(vf: ti.template(), cf: ti.template()): + # anti-physics visual enhancement... + for i, j in vf: + cl = sample(cf, i - 1, j) + cr = sample(cf, i + 1, j) + cb = sample(cf, i, j - 1) + ct = sample(cf, i, j + 1) + cc = sample(cf, i, j) + force = ti.Vector([abs(ct) - abs(cb), abs(cl) - abs(cr)]).normalized(1e-3) + force *= curl_strength * cc + vf[i, j] = ti.min(ti.max(vf[i, j] + force * dt, -1e3), 1e3) + + +@ti.kernel +def copy_divergence(div_in: ti.template(), div_out: ti.types.ndarray()): + for I in ti.grouped(div_in): + div_out[I[0] * res + I[1]] = -div_in[I] + + +@ti.kernel +def apply_pressure(p_in: ti.types.ndarray(), p_out: ti.template()): + for I in ti.grouped(p_out): + p_out[I] = p_in[I[0] * res + I[1]] + + +def solve_pressure_sp_mat(): + copy_divergence(velocity_divs, F_b) + x = solver.solve(F_b) + apply_pressure(x, pressures_pair.cur) + + +def solve_pressure_jacobi(): + for _ in range(p_jacobi_iters): + pressure_jacobi(pressures_pair.cur, pressures_pair.nxt) + pressures_pair.swap() + + +def step(mouse_data): + advect(velocities_pair.cur, velocities_pair.cur, velocities_pair.nxt) + advect(velocities_pair.cur, dyes_pair.cur, dyes_pair.nxt) + velocities_pair.swap() + dyes_pair.swap() + + apply_impulse(velocities_pair.cur, dyes_pair.cur, mouse_data) + + divergence(velocities_pair.cur) + + if curl_strength: + vorticity(velocities_pair.cur) + enhance_vorticity(velocities_pair.cur, velocity_curls) + + if use_sparse_matrix: + solve_pressure_sp_mat() + else: + solve_pressure_jacobi() + + subtract_gradient(velocities_pair.cur, pressures_pair.cur) + + if debug: + divergence(velocities_pair.cur) + div_s = np.sum(velocity_divs.to_numpy()) + print(f"divergence={div_s}") + + +class MouseDataGen: + def __init__(self): + self.prev_mouse = None + self.prev_color = None + + def __call__(self, gui): + # [0:2]: normalized delta direction + # [2:4]: current mouse xy + # [4:7]: color + mouse_data = np.zeros(8, dtype=np.float32) + if gui.is_pressed(ti.GUI.LMB): + mxy = np.array(gui.get_cursor_pos(), dtype=np.float32) * res + if self.prev_mouse is None: + self.prev_mouse = mxy + # Set lower bound to 0.3 to prevent too dark colors + self.prev_color = (np.random.rand(3) * 0.7) + 0.3 + else: + mdir = mxy - self.prev_mouse + mdir = mdir / (np.linalg.norm(mdir) + 1e-5) + mouse_data[0], mouse_data[1] = mdir[0], mdir[1] + mouse_data[2], mouse_data[3] = mxy[0], mxy[1] + mouse_data[4:7] = self.prev_color + self.prev_mouse = mxy + else: + self.prev_mouse = None + self.prev_color = None + return mouse_data + + +def reset(): + velocities_pair.cur.fill(0) + pressures_pair.cur.fill(0) + dyes_pair.cur.fill(0) + + +def main(): + global debug, curl_strength + visualize_d = True # visualize dye (default) + visualize_v = False # visualize velocity + visualize_c = False # visualize curl + + paused = False + + gui = ti.GUI("Stable Fluid", (res, res)) + md_gen = MouseDataGen() + + while gui.running: + if gui.get_event(ti.GUI.PRESS): + e = gui.event + if e.key == ti.GUI.ESCAPE: + break + elif e.key == "r": + paused = False + reset() + elif e.key == "s": + if curl_strength: + curl_strength = 0 + else: + curl_strength = 7 + elif e.key == "v": + visualize_v = True + visualize_c = False + visualize_d = False + elif e.key == "d": + visualize_d = True + visualize_v = False + visualize_c = False + elif e.key == "c": + visualize_c = True + visualize_d = False + visualize_v = False + elif e.key == "p": + paused = not paused + elif e.key == "d": + debug = not debug + + # Debug divergence: + # print(max((abs(velocity_divs.to_numpy().reshape(-1))))) + + if not paused: + mouse_data = md_gen(gui) + step(mouse_data) + if visualize_c: + vorticity(velocities_pair.cur) + gui.set_image(velocity_curls.to_numpy() * 0.03 + 0.5) + elif visualize_d: + gui.set_image(dyes_pair.cur) + elif visualize_v: + gui.set_image(velocities_pair.cur.to_numpy() * 0.01 + 0.5) + gui.show() + + +if __name__ == "__main__": + main() diff --git a/local_packages/taichi/examples/simulation/two_stream_instability.py b/local_packages/taichi/examples/simulation/two_stream_instability.py new file mode 100644 index 0000000..fdcc801 --- /dev/null +++ b/local_packages/taichi/examples/simulation/two_stream_instability.py @@ -0,0 +1,98 @@ +# Authored by Luhuai Jiao +# This is a 1D simulation of "two-stream instability" in Plasma Physicis. +# Some settings of the grids and particles are taken from "Introduction to Computational Plasma Physics"(ISBN: 9787030563675) + +import taichi as ti + +ti.init(arch=ti.gpu) # Try to run on GPU +PI = 3.141592653589793 +L = 8 * PI # simulation domain size +dt = 0.1 # time step +substepping = 8 +ng = 32 # number of grids +np = 16384 # numer of particles +vb = 1.0 # beam-velocity, one is vb, the other is -vb +vt = 0.3 # thermal velocity +wp = 1 # Plasma frequence +qm = -1 # charge-mass ratio +q = wp * wp / (qm * np / L) # charge of a particle +rho_back = -q * np / L # background charge density +dx = L / ng # grid spacing +inv_dx = 1.0 / dx +x = ti.Vector.field(1, ti.f32, np) # position +v = ti.Vector.field(1, ti.f32, np) # velocity +rho = ti.Vector.field(1, ti.f32, ng) # charge density +e = ti.Vector.field(1, ti.f32, ng) # electric fields +# to show x-vx on the screen +v_x_pos1 = ti.Vector.field(2, ti.f32, int(np / 2)) +v_x_pos2 = ti.Vector.field(2, ti.f32, int(np / 2)) + + +@ti.kernel +def initialize(): + for p in x: + x[p].x = (p + 1) * L / np + v[p].x = vt * ti.randn() + (-1) ** p * vb # two streams + + +@ti.kernel +def substep(): + for p in x: + x[p] += v[p] * dt + if x[p].x >= L: # periodic boundary condition + x[p] += -L + if x[p].x < 0: + x[p] += L + rho.fill(rho_back) # fill rho with background charge density + for p in x: # Particle state update and scatter to grid (P2G) + base = (x[p] * inv_dx - 0.5).cast(int) + fx = x[p] * inv_dx - 0.5 - base.cast(float) + rho[base] += (1.0 - fx) * q * inv_dx + if base[0] < ng - 1: + rho[base + 1] += fx * q * inv_dx + e.fill(0.0) + ti.loop_config(serialize=True) + for i in range(ng): # compute electric fields + if i == 0: + e[i] = rho[i] * dx * 0.5 + else: + e[i] = e[i - 1] + (rho[i - 1] + rho[i]) * dx * 0.5 + s = 0.0 + for i in e: + s += e[i].x + for i in e: + e[i] += -s / ng + for p in v: # G2P + base = (x[p] * inv_dx - 0.5).cast(int) + fx = x[p] * inv_dx - 0.5 - base.cast(float) + a = e[base] * (1.0 - fx) * qm + if base[0] < ng - 1: + a += e[base + 1] * fx * qm # compute electric force + v[p] += a * dt + + +@ti.kernel +def vx_pos(): # to show x-vx on the screen + for p in x: + if p % 2: + v_x_pos1[int((p - 1) / 2)].x = x[p].x / L + v_x_pos1[int((p - 1) / 2)].y = (v[p].x) / 10 + 0.5 + else: + v_x_pos2[int(p / 2)].x = x[p].x / L + v_x_pos2[int(p / 2)].y = (v[p].x) / 10 + 0.5 + + +def main(): + initialize() + gui = ti.GUI("Shortest PIC", (800, 800)) + while not gui.get_event(ti.GUI.ESCAPE, ti.GUI.EXIT): + for s in range(substepping): + substep() + vx_pos() + gui.circles(v_x_pos1.to_numpy(), color=0x0000FF, radius=2) + gui.circles(v_x_pos2.to_numpy(), color=0xFF0000, radius=2) + gui.show() + + +if __name__ == "__main__": + main() diff --git a/local_packages/taichi/examples/simulation/vortex_rings.py b/local_packages/taichi/examples/simulation/vortex_rings.py new file mode 100644 index 0000000..c87a343 --- /dev/null +++ b/local_packages/taichi/examples/simulation/vortex_rings.py @@ -0,0 +1,97 @@ +# C++ reference and tutorial (Chinese): https://zhuanlan.zhihu.com/p/26882619 +import math + +import numpy as np + +import taichi as ti + +ti.init(arch=ti.gpu) + +eps = 0.01 +dt = 0.1 + +n_vortex = 4 +n_tracer = 200000 + +pos = ti.Vector.field(2, ti.f32, shape=n_vortex) +new_pos = ti.Vector.field(2, ti.f32, shape=n_vortex) +vort = ti.field(ti.f32, shape=n_vortex) + +tracer = ti.Vector.field(2, ti.f32, shape=n_tracer) + + +@ti.func +def compute_u_single(p, i): + r2 = (p - pos[i]).norm() ** 2 + uv = ti.Vector([pos[i].y - p.y, p.x - pos[i].x]) + return vort[i] * uv / (r2 * math.pi) * 0.5 * (1.0 - ti.exp(-r2 / eps**2)) + + +@ti.func +def compute_u_full(p): + u = ti.Vector([0.0, 0.0]) + for i in range(n_vortex): + u += compute_u_single(p, i) + return u + + +@ti.kernel +def integrate_vortex(): + for i in range(n_vortex): + v = ti.Vector([0.0, 0.0]) + for j in range(n_vortex): + if i != j: + v += compute_u_single(pos[i], j) + new_pos[i] = pos[i] + dt * v + + for i in range(n_vortex): + pos[i] = new_pos[i] + + +@ti.kernel +def advect(): + for i in range(n_tracer): + # Ralston's third-order method + p = tracer[i] + v1 = compute_u_full(p) + v2 = compute_u_full(p + v1 * dt * 0.5) + v3 = compute_u_full(p + v2 * dt * 0.75) + tracer[i] += (2 / 9 * v1 + 1 / 3 * v2 + 4 / 9 * v3) * dt + + +pos[0] = [0, 1] +pos[1] = [0, -1] +pos[2] = [0, 0.3] +pos[3] = [0, -0.3] +vort[0] = 1 +vort[1] = -1 +vort[2] = 1 +vort[3] = -1 + + +@ti.kernel +def init_tracers(): + for i in range(n_tracer): + tracer[i] = [ti.random() - 0.5, ti.random() * 3 - 1.5] + + +def main(): + init_tracers() + gui = ti.GUI("Vortex Rings", (1024, 512), background_color=0xFFFFFF) + + while gui.running: + for i in range(4): # substeps + advect() + integrate_vortex() + + gui.circles( + tracer.to_numpy() * np.array([[0.05, 0.1]]) + np.array([[0.0, 0.5]]), + radius=0.5, + color=0x0, + ) + + gui.show() + + +if __name__ == "__main__": + main() diff --git a/local_packages/taichi/examples/simulation/waterwave.py b/local_packages/taichi/examples/simulation/waterwave.py new file mode 100644 index 0000000..263f020 --- /dev/null +++ b/local_packages/taichi/examples/simulation/waterwave.py @@ -0,0 +1,98 @@ +# Water wave effect partially based on shallow water equations +# https://en.wikipedia.org/wiki/Shallow_water_equations#Non-conservative_form + +import taichi as ti + +ti.init(arch=ti.gpu) + +light_color = 1 +gravity = 2.0 # larger gravity makes wave propagates faster +damping = 0.2 # larger damping makes wave vanishes faster when propagating +dx = 0.02 +dt = 0.01 +shape = 512, 512 +pixels = ti.field(dtype=float, shape=shape) +background = ti.field(dtype=float, shape=shape) +height = ti.field(dtype=float, shape=shape) +velocity = ti.field(dtype=float, shape=shape) + + +@ti.kernel +def reset(): + for i, j in height: + t = i // 16 + j // 16 + if t % 2 == 0: + background[i, j] = 0.0 + else: + background[i, j] = 0.25 + height[i, j] = 0 + velocity[i, j] = 0 + + +@ti.func +def laplacian(i, j): + return (-4 * height[i, j] + height[i, j - 1] + height[i, j + 1] + height[i + 1, j] + height[i - 1, j]) / (4 * dx**2) + + +@ti.func +def gradient(i, j): + return ti.Vector( + [ + (height[i + 1, j] if i < shape[0] - 1 else 0) - (height[i - 1, j] if i > 1 else 0), + (height[i, j + 1] if j < shape[1] - 1 else 0) - (height[i, j - 1] if j > 1 else 0), + ] + ) * (0.5 / dx) + + +@ti.kernel +def create_wave(amplitude: ti.f32, x: ti.f32, y: ti.f32): + for i, j in ti.ndrange((1, shape[0] - 1), (1, shape[1] - 1)): + r2 = (i - x) ** 2 + (j - y) ** 2 + height[i, j] = height[i, j] + amplitude * ti.exp(-0.02 * r2) + + +@ti.kernel +def update(): + for i, j in ti.ndrange((1, shape[0] - 1), (1, shape[1] - 1)): + acceleration = gravity * laplacian(i, j) - damping * velocity[i, j] + velocity[i, j] = velocity[i, j] + acceleration * dt + + for i, j in ti.ndrange((1, shape[0] - 1), (1, shape[1] - 1)): + height[i, j] = height[i, j] + velocity[i, j] * dt + + +@ti.kernel +def visualize_wave(): + # visualizes the wave using a fresnel-like shading + # a brighter color indicates a steeper wave + # (closer to grazing angle when looked from above) + for i, j in pixels: + g = gradient(i, j) + cos_i = 1 / ti.sqrt(1 + g.norm_sqr()) + brightness = pow(1 - cos_i, 2) + color = background[i, j] + pixels[i, j] = (1 - brightness) * color + brightness * light_color + + +def main(): + print("[Hint] click on the window to create waves") + + reset() + gui = ti.GUI("Water Wave", shape) + while gui.running: + for e in gui.get_events(ti.GUI.PRESS): + if e.key in [ti.GUI.ESCAPE, ti.GUI.EXIT]: + gui.running = False + elif e.key == "r": + reset() + elif e.key == ti.GUI.LMB: + x, y = e.pos + create_wave(3, x * shape[0], y * shape[1]) + update() + visualize_wave() + gui.set_image(pixels) + gui.show() + + +if __name__ == "__main__": + main() diff --git a/local_packages/taichi/experimental.py b/local_packages/taichi/experimental.py new file mode 100644 index 0000000..37fe5ce --- /dev/null +++ b/local_packages/taichi/experimental.py @@ -0,0 +1,13 @@ +from taichi.lang.kernel_impl import real_func as _real_func +import warnings + + +def real_func(func): + warnings.warn( + "ti.experimental.real_func is deprecated because it is no longer experimental. " "Use ti.real_func instead.", + DeprecationWarning, + ) + return _real_func(func) + + +__all__ = ["real_func"] diff --git a/local_packages/taichi/graph/__init__.py b/local_packages/taichi/graph/__init__.py new file mode 100644 index 0000000..f0cf500 --- /dev/null +++ b/local_packages/taichi/graph/__init__.py @@ -0,0 +1 @@ +from ._graph import * diff --git a/local_packages/taichi/graph/_graph.py b/local_packages/taichi/graph/_graph.py new file mode 100644 index 0000000..76b497b --- /dev/null +++ b/local_packages/taichi/graph/_graph.py @@ -0,0 +1,289 @@ +import warnings +from typing import Any, Dict, List + +from taichi._lib import core as _ti_core +from taichi.aot.utils import produce_injected_args +from taichi.lang import enums, impl, kernel_impl +from taichi.lang._ndarray import Ndarray +from taichi.lang._texture import Texture +from taichi.lang.exception import TaichiRuntimeError +from taichi.lang.matrix import Matrix, MatrixType +from taichi.types.texture_type import FORMAT2TY_CH, TY_CH2FORMAT + +ArgKind = _ti_core.ArgKind + + +def gen_cpp_kernel(kernel_fn, args): + kernel = kernel_fn._primal + assert isinstance(kernel, kernel_impl.Kernel) + injected_args = produce_injected_args(kernel, symbolic_args=args) + key = kernel.ensure_compiled(*injected_args) + return kernel.compiled_kernels[key] + + +def flatten_args(args): + unzipped_args = [] + # Tuple for matrix args + # FIXME remove this when native Matrix type is ready + for arg in args: + if isinstance(arg, list): + for sublist in arg: + unzipped_args.extend(sublist) + else: + unzipped_args.append(arg) + return unzipped_args + + +class Sequential: + def __init__(self, seq): + self.seq_ = seq + + def dispatch(self, kernel_fn, *args): + kernel_cpp = gen_cpp_kernel(kernel_fn, args) + unzipped_args = flatten_args(args) + self.seq_.dispatch(kernel_cpp, unzipped_args) + + +class GraphBuilder: + def __init__(self): + self._graph_builder = _ti_core.GraphBuilder() + + def dispatch(self, kernel_fn, *args): + kernel_cpp = gen_cpp_kernel(kernel_fn, args) + unzipped_args = flatten_args(args) + self._graph_builder.dispatch(kernel_cpp, unzipped_args) + + def create_sequential(self): + return Sequential(self._graph_builder.create_sequential()) + + def append(self, node): + # TODO: support appending dispatch node as well. + assert isinstance(node, Sequential) + self._graph_builder.seq().append(node.seq_) + + def compile(self): + return Graph(self._graph_builder.compile()) + + +class Graph: + def __init__(self, compiled_graph) -> None: + self._compiled_graph = compiled_graph + + def run(self, args): + # Support native python numerical types (int, float), Ndarray. + # Taichi Matrix types are flattened into (int, float) arrays. + # TODO diminish the flatten behavior when Matrix becomes a Taichi native type. + flattened = {} + for k, v in args.items(): + if isinstance(v, Ndarray): + flattened[k] = v.arr + elif isinstance(v, Texture): + flattened[k] = v.tex + elif isinstance(v, Matrix): + flattened[k] = v.entries + elif isinstance(v, (int, float)): + flattened[k] = v + else: + raise TaichiRuntimeError( + f"Only python int, float, ti.Matrix and ti.Ndarray are supported as runtime arguments but got {type(v)}" + ) + self._compiled_graph.jit_run(impl.get_runtime().prog.config(), flattened) + + +def _deprecate_arg_args(kwargs: Dict[str, Any]): + if "field_dim" in kwargs: + warnings.warn( + "The field_dim argument for ndarray will be deprecated in v1.6.0, use ndim instead.", + DeprecationWarning, + ) + if "ndim" in kwargs: + raise TaichiRuntimeError( + "field_dim is deprecated, please do not specify field_dim and ndim at the same time." + ) + kwargs["ndim"] = kwargs["field_dim"] + del kwargs["field_dim"] + tag = kwargs["tag"] + + if tag == ArgKind.SCALAR: + if "element_shape" in kwargs: + raise TaichiRuntimeError( + "The element_shape argument for scalar is deprecated in v1.6.0, and is removed in v1.7.0. " + "Please remove them." + ) + + if tag == ArgKind.NDARRAY: + if "element_shape" not in kwargs: + if "dtype" in kwargs: + dtype = kwargs["dtype"] + if isinstance(dtype, MatrixType): + kwargs["dtype"] = dtype.dtype + kwargs["element_shape"] = dtype.get_shape() + else: + kwargs["element_shape"] = () + else: + raise TaichiRuntimeError( + "The element_shape argument for ndarray is deprecated in v1.6.0, and it is removed in v1.7.0. " + "Please use vector or matrix data type instead." + ) + + if tag == ArgKind.RWTEXTURE or tag == ArgKind.TEXTURE: + if "dtype" in kwargs: + warnings.warn( + "The dtype argument for texture will be deprecated in v1.6.0, use format instead.", + DeprecationWarning, + ) + del kwargs["dtype"] + + if "shape" in kwargs: + raise TaichiRuntimeError( + "The shape argument for texture is deprecated in v1.6.0, and it is removed in v1.7.0. " + "Please use ndim instead. (Note that you no longer need the exact texture size.)" + ) + + if "channel_format" in kwargs or "num_channels" in kwargs: + if "fmt" in kwargs: + raise TaichiRuntimeError( + "channel_format and num_channels are deprecated, please do not specify channel_format/num_channels and fmt at the same time." + ) + if tag == ArgKind.RWTEXTURE: + fmt = TY_CH2FORMAT[(kwargs["channel_format"], kwargs["num_channels"])] + kwargs["fmt"] = fmt + raise TaichiRuntimeError( + "The channel_format and num_channels arguments for texture are deprecated in v1.6.0, " + "and they are removed in v1.7.0. Please use fmt instead." + ) + else: + raise TaichiRuntimeError( + "The channel_format and num_channels arguments are no longer required for non-RW textures " + "since v1.6.0, and they are removed in v1.7.0. Please remove them." + ) + + +def _check_args(kwargs: Dict[str, Any], allowed_kwargs: List[str]): + for k, v in kwargs.items(): + if k not in allowed_kwargs: + raise TaichiRuntimeError( + f"Invalid argument: {k}, you can only create a graph argument with: {allowed_kwargs}" + ) + if k == "tag": + if not isinstance(v, ArgKind): + raise TaichiRuntimeError(f"tag must be a ArgKind variant, but found {type(v)}.") + if k == "name": + if not isinstance(v, str): + raise TaichiRuntimeError(f"name must be a string, but found {type(v)}.") + + +def _make_arg_scalar(kwargs: Dict[str, Any]): + allowed_kwargs = [ + "tag", + "name", + "dtype", + ] + _check_args(kwargs, allowed_kwargs) + name = kwargs["name"] + dtype = kwargs["dtype"] + if isinstance(dtype, MatrixType): + raise TaichiRuntimeError(f"Tag ArgKind.SCALAR must specify a scalar type, but found {type(dtype)}.") + return _ti_core.Arg(ArgKind.SCALAR, name, dtype, 0, []) + + +def _make_arg_ndarray(kwargs: Dict[str, Any]): + allowed_kwargs = [ + "tag", + "name", + "dtype", + "ndim", + "element_shape", + ] + _check_args(kwargs, allowed_kwargs) + name = kwargs["name"] + ndim = kwargs["ndim"] + dtype = kwargs["dtype"] + element_shape = kwargs["element_shape"] + if isinstance(dtype, MatrixType): + raise TaichiRuntimeError(f"Tag ArgKind.NDARRAY must specify a scalar type, but found {dtype}.") + return _ti_core.Arg(ArgKind.NDARRAY, name, dtype, ndim, element_shape) + + +def _make_arg_matrix(kwargs: Dict[str, Any]): + allowed_kwargs = [ + "tag", + "name", + "dtype", + ] + _check_args(kwargs, allowed_kwargs) + name = kwargs["name"] + dtype = kwargs["dtype"] + if not isinstance(dtype, MatrixType): + raise TaichiRuntimeError(f"Tag ArgKind.MATRIX must specify matrix type, but got {dtype}.") + return _ti_core.Arg(ArgKind.MATRIX, f"{name}", dtype.dtype, 0, [dtype.n, dtype.m]) + + +def _make_arg_texture(kwargs: Dict[str, Any]): + allowed_kwargs = [ + "tag", + "name", + "ndim", + ] + _check_args(kwargs, allowed_kwargs) + name = kwargs["name"] + ndim = kwargs["ndim"] + return _ti_core.Arg(ArgKind.TEXTURE, name, impl.f32, 4, [2] * ndim) + + +def _make_arg_rwtexture(kwargs: Dict[str, Any]): + allowed_kwargs = [ + "tag", + "name", + "ndim", + "fmt", + ] + _check_args(kwargs, allowed_kwargs) + name = kwargs["name"] + ndim = kwargs["ndim"] + fmt = kwargs["fmt"] + if fmt == enums.Format.unknown: + raise TaichiRuntimeError(f"Tag ArgKind.RWTEXTURE must specify a valid color format, but found {fmt}.") + channel_format, num_channels = FORMAT2TY_CH[fmt] + return _ti_core.Arg(ArgKind.RWTEXTURE, name, channel_format, num_channels, [2] * ndim) + + +def _make_arg(kwargs: Dict[str, Any]): + assert "tag" in kwargs + _deprecate_arg_args(kwargs) + proc = { + ArgKind.SCALAR: _make_arg_scalar, + ArgKind.NDARRAY: _make_arg_ndarray, + ArgKind.MATRIX: _make_arg_matrix, + ArgKind.TEXTURE: _make_arg_texture, + ArgKind.RWTEXTURE: _make_arg_rwtexture, + } + tag = kwargs["tag"] + return proc[tag](kwargs) + + +def _kwarg_rewriter(args, kwargs): + for i, arg in enumerate(args): + rewrite_map = { + 0: "tag", + 1: "name", + 2: "dtype", + 3: "ndim", + 4: "field_dim", + 5: "element_shape", + 6: "channel_format", + 7: "shape", + 8: "num_channels", + } + if i in rewrite_map: + kwargs[rewrite_map[i]] = arg + else: + raise TaichiRuntimeError(f"Unexpected {i}th positional argument") + + +def Arg(*args, **kwargs): + _kwarg_rewriter(args, kwargs) + return _make_arg(kwargs) + + +__all__ = ["GraphBuilder", "Graph", "Arg", "ArgKind"] diff --git a/local_packages/taichi/lang/__init__.py b/local_packages/taichi/lang/__init__.py new file mode 100644 index 0000000..8d459f7 --- /dev/null +++ b/local_packages/taichi/lang/__init__.py @@ -0,0 +1,48 @@ +from taichi.lang import impl, simt +from taichi.lang._ndarray import * +from taichi.lang._ndrange import ndrange +from taichi.lang._texture import Texture +from taichi.lang.enums import DeviceCapability, Format, Layout +from taichi.lang.exception import * +from taichi.lang.field import * +from taichi.lang.impl import * +from taichi.lang.kernel_impl import * +from taichi.lang.matrix import * +from taichi.lang.mesh import * +from taichi.lang.misc import * # pylint: disable=W0622 +from taichi.lang.ops import * # pylint: disable=W0622 +from taichi.lang.runtime_ops import * +from taichi.lang.snode import * +from taichi.lang.source_builder import * +from taichi.lang.struct import * +from taichi.lang.argpack import * + +__all__ = [ + s + for s in dir() + if not s.startswith("_") + and s + not in [ + "any_array", + "ast", + "common_ops", + "enums", + "exception", + "expr", + "impl", + "inspect", + "kernel_arguments", + "kernel_impl", + "matrix", + "mesh", + "misc", + "ops", + "platform", + "runtime_ops", + "shell", + "snode", + "source_builder", + "struct", + "util", + ] +] diff --git a/local_packages/taichi/lang/_ndarray.py b/local_packages/taichi/lang/_ndarray.py new file mode 100644 index 0000000..796366a --- /dev/null +++ b/local_packages/taichi/lang/_ndarray.py @@ -0,0 +1,340 @@ +import numpy as np +from taichi._lib import core as _ti_core +from taichi.lang import impl +from taichi.lang.enums import Layout +from taichi.lang.exception import TaichiIndexError +from taichi.lang.util import cook_dtype, get_traceback, python_scope, to_numpy_type +from taichi.types import primitive_types +from taichi.types.ndarray_type import NdarrayTypeMetadata +from taichi.types.utils import is_real, is_signed + + +class Ndarray: + """Taichi ndarray class. + + Args: + dtype (DataType): Data type of each value. + shape (Tuple[int]): Shape of the Ndarray. + """ + + def __init__(self): + self.host_accessor = None + self.shape = None + self.element_type = None + self.dtype = None + self.arr = None + self.layout = Layout.AOS + self.grad = None + + def get_type(self): + return NdarrayTypeMetadata(self.element_type, self.shape, self.grad is not None) + + @property + def element_shape(self): + """Gets ndarray element shape. + + Returns: + Tuple[Int]: Ndarray element shape. + """ + raise NotImplementedError() + + @python_scope + def __setitem__(self, key, value): + """Sets ndarray element in Python scope. + + Args: + key (Union[List[int], int, None]): Coordinates of the ndarray element. + value (element type): Value to set. + """ + raise NotImplementedError() + + @python_scope + def __getitem__(self, key): + """Gets ndarray element in Python scope. + + Args: + key (Union[List[int], int, None]): Coordinates of the ndarray element. + + Returns: + element type: Value retrieved. + """ + raise NotImplementedError() + + @python_scope + def fill(self, val): + """Fills ndarray with a specific scalar value. + + Args: + val (Union[int, float]): Value to fill. + """ + if impl.current_cfg().arch != _ti_core.Arch.cuda and impl.current_cfg().arch != _ti_core.Arch.x64: + self._fill_by_kernel(val) + elif _ti_core.is_tensor(self.element_type): + self._fill_by_kernel(val) + elif self.dtype == primitive_types.f32: + impl.get_runtime().prog.fill_float(self.arr, val) + elif self.dtype == primitive_types.i32: + impl.get_runtime().prog.fill_int(self.arr, val) + elif self.dtype == primitive_types.u32: + impl.get_runtime().prog.fill_uint(self.arr, val) + else: + self._fill_by_kernel(val) + + @python_scope + def _ndarray_to_numpy(self): + """Converts ndarray to a numpy array. + + Returns: + numpy.ndarray: The result numpy array. + """ + arr = np.zeros(shape=self.arr.total_shape(), dtype=to_numpy_type(self.dtype)) + from taichi._kernels import ndarray_to_ext_arr # pylint: disable=C0415 + + ndarray_to_ext_arr(self, arr) + impl.get_runtime().sync() + return arr + + @python_scope + def _ndarray_matrix_to_numpy(self, as_vector): + """Converts matrix ndarray to a numpy array. + + Returns: + numpy.ndarray: The result numpy array. + """ + arr = np.zeros(shape=self.arr.total_shape(), dtype=to_numpy_type(self.dtype)) + from taichi._kernels import ndarray_matrix_to_ext_arr # pylint: disable=C0415 + + layout_is_aos = 1 + ndarray_matrix_to_ext_arr(self, arr, layout_is_aos, as_vector) + impl.get_runtime().sync() + return arr + + @python_scope + def _ndarray_from_numpy(self, arr): + """Loads all values from a numpy array. + + Args: + arr (numpy.ndarray): The source numpy array. + """ + if not isinstance(arr, np.ndarray): + raise TypeError(f"{np.ndarray} expected, but {type(arr)} provided") + if tuple(self.arr.total_shape()) != tuple(arr.shape): + raise ValueError(f"Mismatch shape: {tuple(self.arr.shape)} expected, but {tuple(arr.shape)} provided") + if not arr.flags.c_contiguous: + arr = np.ascontiguousarray(arr) + + from taichi._kernels import ext_arr_to_ndarray # pylint: disable=C0415 + + ext_arr_to_ndarray(arr, self) + impl.get_runtime().sync() + + @python_scope + def _ndarray_matrix_from_numpy(self, arr, as_vector): + """Loads all values from a numpy array. + + Args: + arr (numpy.ndarray): The source numpy array. + """ + if not isinstance(arr, np.ndarray): + raise TypeError(f"{np.ndarray} expected, but {type(arr)} provided") + if tuple(self.arr.total_shape()) != tuple(arr.shape): + raise ValueError( + f"Mismatch shape: {tuple(self.arr.total_shape())} expected, but {tuple(arr.shape)} provided" + ) + if not arr.flags.c_contiguous: + arr = np.ascontiguousarray(arr) + + from taichi._kernels import ext_arr_to_ndarray_matrix # pylint: disable=C0415 + + layout_is_aos = 1 + ext_arr_to_ndarray_matrix(arr, self, layout_is_aos, as_vector) + impl.get_runtime().sync() + + @python_scope + def _get_element_size(self): + """Returns the size of one element in bytes. + + Returns: + Size in bytes. + """ + return self.arr.element_size() + + @python_scope + def _get_nelement(self): + """Returns the total number of elements. + + Returns: + Total number of elements. + """ + return self.arr.nelement() + + @python_scope + def copy_from(self, other): + """Copies all elements from another ndarray. + + The shape of the other ndarray needs to be the same as `self`. + + Args: + other (Ndarray): The source ndarray. + """ + assert isinstance(other, Ndarray) + assert tuple(self.arr.shape) == tuple(other.arr.shape) + from taichi._kernels import ndarray_to_ndarray # pylint: disable=C0415 + + ndarray_to_ndarray(self, other) + impl.get_runtime().sync() + + def _set_grad(self, grad): + """Sets the gradient ndarray. + + Args: + grad (Ndarray): The gradient ndarray. + """ + self.grad = grad + + def __deepcopy__(self, memo=None): + """Copies all elements to a new ndarray. + + Returns: + Ndarray: The result ndarray. + """ + raise NotImplementedError() + + def _fill_by_kernel(self, val): + """Fills ndarray with a specific scalar value using a ti.kernel. + + Args: + val (Union[int, float]): Value to fill. + """ + raise NotImplementedError() + + @python_scope + def _pad_key(self, key): + if key is None: + key = () + if not isinstance(key, (tuple, list)): + key = (key,) + if len(key) != len(self.arr.total_shape()): + raise TaichiIndexError(f"{len(self.arr.total_shape())}d ndarray indexed with {len(key)}d indices: {key}") + return key + + @python_scope + def _initialize_host_accessor(self): + if self.host_accessor: + return + impl.get_runtime().materialize() + self.host_accessor = NdarrayHostAccessor(self.arr) + + +class ScalarNdarray(Ndarray): + """Taichi ndarray with scalar elements. + + Args: + dtype (DataType): Data type of each value. + shape (Tuple[int]): Shape of the ndarray. + """ + + def __init__(self, dtype, arr_shape): + super().__init__() + self.dtype = cook_dtype(dtype) + self.arr = impl.get_runtime().prog.create_ndarray( + self.dtype, arr_shape, layout=Layout.NULL, zero_fill=True, dbg_info=_ti_core.DebugInfo(get_traceback()) + ) + self.shape = tuple(self.arr.shape) + self.element_type = dtype + + def __del__(self): + if impl is not None and impl.get_runtime() is not None and impl.get_runtime().prog is not None: + impl.get_runtime().prog.delete_ndarray(self.arr) + + @property + def element_shape(self): + return () + + @python_scope + def __setitem__(self, key, value): + self._initialize_host_accessor() + self.host_accessor.setter(value, *self._pad_key(key)) + + @python_scope + def __getitem__(self, key): + self._initialize_host_accessor() + return self.host_accessor.getter(*self._pad_key(key)) + + @python_scope + def to_numpy(self): + return self._ndarray_to_numpy() + + @python_scope + def from_numpy(self, arr): + self._ndarray_from_numpy(arr) + + def __deepcopy__(self, memo=None): + ret_arr = ScalarNdarray(self.dtype, self.shape) + ret_arr.copy_from(self) + return ret_arr + + def _fill_by_kernel(self, val): + from taichi._kernels import fill_ndarray # pylint: disable=C0415 + + fill_ndarray(self, val) + + def __repr__(self): + return "" + + +class NdarrayHostAccessor: + def __init__(self, ndarray): + dtype = ndarray.element_data_type() + if is_real(dtype): + + def getter(*key): + return ndarray.read_float(key) + + def setter(value, *key): + ndarray.write_float(key, value) + + else: + if is_signed(dtype): + + def getter(*key): + return ndarray.read_int(key) + + else: + + def getter(*key): + return ndarray.read_uint(key) + + def setter(value, *key): + ndarray.write_int(key, value) + + self.getter = getter + self.setter = setter + + +class NdarrayHostAccess: + """Class for accessing VectorNdarray/MatrixNdarray in Python scope. + Args: + arr (Union[VectorNdarray, MatrixNdarray]): See above. + indices_first (Tuple[Int]): Indices of first-level access (coordinates in the field). + indices_second (Tuple[Int]): Indices of second-level access (indices in the vector/matrix). + """ + + def __init__(self, arr, indices_first, indices_second): + self.ndarr = arr + self.arr = arr.arr + self.indices = indices_first + indices_second + + def getter(): + self.ndarr._initialize_host_accessor() + return self.ndarr.host_accessor.getter(*self.ndarr._pad_key(self.indices)) + + def setter(value): + self.ndarr._initialize_host_accessor() + self.ndarr.host_accessor.setter(value, *self.ndarr._pad_key(self.indices)) + + self.getter = getter + self.setter = setter + + +__all__ = ["Ndarray", "ScalarNdarray"] diff --git a/local_packages/taichi/lang/_ndrange.py b/local_packages/taichi/lang/_ndrange.py new file mode 100644 index 0000000..a729b21 --- /dev/null +++ b/local_packages/taichi/lang/_ndrange.py @@ -0,0 +1,149 @@ +import collections.abc +from typing import Iterable + +import numpy as np +from taichi.lang import ops +from taichi.lang.exception import TaichiSyntaxError, TaichiTypeError +from taichi.lang.expr import Expr +from taichi.lang.matrix import Matrix +from taichi.types.utils import is_integral + + +class _Ndrange: + def __init__(self, *args): + args = list(args) + for i, arg in enumerate(args): + if not isinstance(arg, collections.abc.Sequence): + args[i] = (0, arg) + if len(args[i]) != 2: + raise TaichiSyntaxError( + "Every argument of ndrange should be a scalar or a tuple/list like (begin, end)" + ) + args[i] = (args[i][0], ops.max(args[i][0], args[i][1])) + for arg in args: + for bound in arg: + if not isinstance(bound, (int, np.integer)) and not ( + isinstance(bound, Expr) and is_integral(bound.ptr.get_rvalue_type()) + ): + raise TaichiTypeError( + "Every argument of ndrange should be an integer scalar or a tuple/list of (int, int)" + ) + self.bounds = args + + self.dimensions = [None] * len(args) + for i, bound in enumerate(self.bounds): + self.dimensions[i] = bound[1] - bound[0] + + self.acc_dimensions = self.dimensions.copy() + for i in reversed(range(len(self.bounds) - 1)): + self.acc_dimensions[i] = self.acc_dimensions[i] * self.acc_dimensions[i + 1] + if len(self.acc_dimensions) == 0: # for the empty case, e.g. ti.ndrange() + self.acc_dimensions = [1] + + def __iter__(self): + def gen(d, prefix): + if d == len(self.bounds): + yield prefix + else: + for t in range(self.bounds[d][0], self.bounds[d][1]): + yield from gen(d + 1, prefix + (t,)) + + yield from gen(0, ()) + + def grouped(self): + return GroupedNDRange(self) + + +def ndrange(*args) -> Iterable: + """Return an immutable iterator object for looping over multi-dimensional indices. + + This returned set of multi-dimensional indices is the direct product (in the set-theory sense) + of n groups of integers, where n equals the number of arguments in the input list, and looks like + + range(x1, y1) x range(x2, y2) x ... x range(xn, yn) + + The k-th argument corresponds to the k-th `range()` factor in the above product, and each + argument must be an integer or a pair of two integers. An integer argument n will be interpreted + as `range(0, n)`, and a pair of two integers (start, end) will be interpreted as `range(start, end)`. + + You can loop over these multi-dimensonal indices in different ways, see the examples below. + + Args: + entries: (int, tuple): Must be either an integer, or a tuple/list of two integers. + + Returns: + An immutable iterator object. + + Example:: + + You can loop over 1-D integers in range [start, end), as in native Python + + >>> @ti.kernel + >>> def loop_1d(): + >>> start = 2 + >>> end = 5 + >>> for i in ti.ndrange((start, end)): + >>> print(i) # will print 2 3 4 + + Note the braces around `(start, end)` in the above code. If without them, + the parameter `2` will be interpreted as `range(0, 2)`, `5` will be + interpreted as `range(0, 5)`, and you will get a set of 2-D indices which + contains 2x5=10 elements, and need two indices i, j to loop over them: + + >>> @ti.kernel + >>> def loop_2d(): + >>> for i, j in ti.ndrange(2, 5): + >>> print(i, j) + 0 0 + ... + 0 4 + ... + 1 4 + + But you do can use a single index i to loop over these 2-D indices, in this case + the indices are returned as a 1-D array `(0, 1, ..., 9)`: + + >>> @ti.kernel + >>> def loop_2d_as_1d(): + >>> for i in ti.ndrange(2, 5): + >>> print(i) + will print 0 1 2 3 4 5 6 7 8 9 + + In general, you can use any `1 <= k <= n` iterators to loop over a set of n-D + indices. For `k=n` all the indices are n-dimensional, and they are returned in + lexical order, but for `k>> @ti.kernel + >>> def loop_3d_as_2d(): + >>> # use two iterators to loop over a set of 3-D indices + >>> # the last two dimensions for 4, 5 will collapse into + >>> # the array [0, 1, 2, ..., 19] + >>> for i, j in ti.ndrange(3, 4, 5): + >>> print(i, j) + will print 0 0, 0 1, ..., 0 19, ..., 2 19. + + A typical usage of `ndrange` is when you want to loop over a tensor and process + its entries in parallel. You should avoid writing nested `for` loops here since + only top level `for` loops are paralleled in taichi, instead you can use `ndrange` + to hold all entries in one top level loop: + + >>> @ti.kernel + >>> def loop_tensor(): + >>> for row, col, channel in ti.ndrange(image_height, image_width, channels): + >>> image[row, col, channel] = ... + """ + return _Ndrange(*args) + + +class GroupedNDRange: + def __init__(self, r): + self.r = r + + def __iter__(self): + for ind in self.r: + yield Matrix(list(ind)) + + +__all__ = ["ndrange"] diff --git a/local_packages/taichi/lang/_texture.py b/local_packages/taichi/lang/_texture.py new file mode 100644 index 0000000..2b8735a --- /dev/null +++ b/local_packages/taichi/lang/_texture.py @@ -0,0 +1,169 @@ +import numpy as np +from taichi._lib import core as _ti_core +from taichi.lang import impl +from taichi.lang.expr import Expr, make_expr_group +from taichi.lang.matrix import Matrix +from taichi.lang.util import taichi_scope +from taichi.types import vector +from taichi.types.primitive_types import f32 + + +def _get_entries(mat): + if isinstance(mat, Matrix): + return mat.entries + return [mat] + + +class TextureSampler: + def __init__(self, ptr_expr, num_dims) -> None: + self.ptr_expr = ptr_expr + self.num_dims = num_dims + + @taichi_scope + def sample_lod(self, uv, lod): + ast_builder = impl.get_runtime().compiling_callable.ast_builder() + dbg_info = _ti_core.DebugInfo(impl.get_runtime().get_current_src_info()) + args_group = make_expr_group(*_get_entries(uv), lod) + v = ast_builder.make_texture_op_expr(_ti_core.TextureOpType.kSampleLod, self.ptr_expr, args_group, dbg_info) + r = impl.call_internal("composite_extract_0", v, with_runtime_context=False) + g = impl.call_internal("composite_extract_1", v, with_runtime_context=False) + b = impl.call_internal("composite_extract_2", v, with_runtime_context=False) + a = impl.call_internal("composite_extract_3", v, with_runtime_context=False) + return vector(4, f32)([r, g, b, a]) + + @taichi_scope + def fetch(self, index, lod): + ast_builder = impl.get_runtime().compiling_callable.ast_builder() + dbg_info = _ti_core.DebugInfo(impl.get_runtime().get_current_src_info()) + args_group = make_expr_group(*_get_entries(index), lod) + v = ast_builder.make_texture_op_expr(_ti_core.TextureOpType.kFetchTexel, self.ptr_expr, args_group, dbg_info) + r = impl.call_internal("composite_extract_0", v, with_runtime_context=False) + g = impl.call_internal("composite_extract_1", v, with_runtime_context=False) + b = impl.call_internal("composite_extract_2", v, with_runtime_context=False) + a = impl.call_internal("composite_extract_3", v, with_runtime_context=False) + return vector(4, f32)([r, g, b, a]) + + +class RWTextureAccessor: + def __init__(self, ptr_expr, num_dims) -> None: + # taichi_python.TexturePtrExpression. + self.ptr_expr = ptr_expr + self.num_dims = num_dims + + @taichi_scope + def load(self, index): + ast_builder = impl.get_runtime().compiling_callable.ast_builder() + dbg_info = _ti_core.DebugInfo(impl.get_runtime().get_current_src_info()) + args_group = make_expr_group(*_get_entries(index)) + v = ast_builder.make_texture_op_expr(_ti_core.TextureOpType.kLoad, self.ptr_expr, args_group, dbg_info) + r = impl.call_internal("composite_extract_0", v, with_runtime_context=False) + g = impl.call_internal("composite_extract_1", v, with_runtime_context=False) + b = impl.call_internal("composite_extract_2", v, with_runtime_context=False) + a = impl.call_internal("composite_extract_3", v, with_runtime_context=False) + return vector(4, f32)([r, g, b, a]) + + @taichi_scope + def store(self, index, value): + ast_builder = impl.get_runtime().compiling_callable.ast_builder() + dbg_info = _ti_core.DebugInfo(impl.get_runtime().get_current_src_info()) + args_group = make_expr_group(*_get_entries(index), *_get_entries(value)) + impl.expr_init( + ast_builder.make_texture_op_expr(_ti_core.TextureOpType.kStore, self.ptr_expr, args_group, dbg_info) + ) + + @property + @taichi_scope + def shape(self): + """A list containing sizes for each dimension. Note that element shape will be excluded. + + Returns: + List[Int]: The result list. + """ + dim = _ti_core.get_external_tensor_dim(self.ptr_expr) + dbg_info = _ti_core.DebugInfo(impl.get_runtime().get_current_src_info()) + ret = [Expr(_ti_core.get_external_tensor_shape_along_axis(self.ptr_expr, i, dbg_info)) for i in range(dim)] + return ret + + @taichi_scope + def _loop_range(self): + """Gets the corresponding taichi_python.Expr to serve as loop range. + + Returns: + taichi_python.Expr: See above. + """ + return self.ptr_expr + + +class Texture: + """Taichi Texture class. + + Args: + fmt (ti.Format): Color format of the texture. + shape (Tuple[int]): Shape of the Texture. + """ + + def __init__(self, fmt, arr_shape): + self.tex = impl.get_runtime().prog.create_texture(fmt, arr_shape) + self.fmt = fmt + self.num_dims = len(arr_shape) + self.shape = arr_shape + + def from_ndarray(self, ndarray): + """Loads an ndarray to texture. + + Args: + ndarray (ti.Ndarray): Source ndarray to load from. + """ + self.tex.from_ndarray(ndarray.arr) + + def from_field(self, field): + """Loads a field to texture. + + Args: + field (ti.Field): Source field to load from. + """ + self.tex.from_snode(field.snode.ptr) + + def _device_allocation_ptr(self): + return self.tex.device_allocation_ptr() + + def from_image(self, image): + """Loads a PIL image to texture. This method is only allowed a 2D texture with `ti.Format.rgba8`. + + Args: + image (PIL.Image.Image): Source PIL image to load from. + + """ + from PIL import Image # pylint: disable=import-outside-toplevel + + assert isinstance(image, Image.Image) + if image.mode != "RGB": + image = image.convert("RGB") + assert image.size == tuple(self.shape) + + assert self.num_dims == 2 + # Don't use transpose method since its enums are too new + image = image.rotate(90, expand=True) + arr = np.asarray(image) + from taichi._kernels import ( # pylint: disable=import-outside-toplevel + load_texture_from_numpy, + ) + + load_texture_from_numpy(self, arr) + + def to_image(self): + """Saves a texture to a PIL image in RGB mode. This method is only allowed a 2D texture with `ti.Format.rgba8`. + + Returns: + img (PIL.Image.Image): a PIL image in RGB mode, with the same size as source texture. + """ + assert self.num_dims == 2 + from PIL import Image # pylint: disable=import-outside-toplevel + + res = np.zeros(self.shape + (3,), np.uint8) + from taichi._kernels import ( # pylint: disable=import-outside-toplevel + save_texture_to_numpy, + ) + + save_texture_to_numpy(self, res) + return Image.fromarray(res).rotate(270, expand=True) diff --git a/local_packages/taichi/lang/_wrap_inspect.py b/local_packages/taichi/lang/_wrap_inspect.py new file mode 100644 index 0000000..436e8f9 --- /dev/null +++ b/local_packages/taichi/lang/_wrap_inspect.py @@ -0,0 +1,187 @@ +# Taichi's custom inspect module. +# This module is used by Taichi's ast transformer to parse the source code. +# Currently this module is aimed for working in the following modes: +# 1. Usual Python/IPython mode, e.g. python script.py +# In this case we mainly rely on the built-in `inspect` module, except +# we need some hacks when we are in IPython mode and there is a cell magic. +# 2. Blender's scripting mode, e.g. Users write Taichi code in the scripting +# window in Blender and press the run button. In this case we need to +# retrieve the source using Blender's `bpy.data.texts` and write it to a temp +# file so that the inspect module can parse. +# 3. The interactive shell mode, e.g. Users directly type their code in the +# interactive shell. In this case we use `dill` to get the source. +# +# NB: Running Taichi in other modes are likely not supported. + +import atexit +import inspect +import os +import tempfile + +import dill + +_builtin_getfile = inspect.getfile +_builtin_findsource = inspect.findsource + + +def _find_source_with_custom_getfile_func(func, obj): + """Use a custom function `func` to replace inspect's `getfile`, return the + source found by the new routine and restore the original `getfile` back. + """ + inspect.getfile = func # replace with our custom func + source = inspect.findsource(obj) + inspect.getfile = _builtin_getfile # restore + return source + + +def _blender_get_text_name(filename: str): + """Extract filename from path in the Blender mode.""" + # In Blender's scripting mode, unsaved files are named + # like `/Text`, `/Text.001`, `/test.py`, etc. + # We simply remove this path seperator. + if filename.startswith(os.path.sep) and filename.count(os.path.sep) == 1: + return filename[1:] # "/Text.001" --> "Text.001" + + # Saved text files are named like `some-path/xxx.blend/Text` or + # `some-path/xxx.blend/test.py` + # We drop the path and extract the filename with extension. + index = filename.rfind(".blend" + os.path.sep) + if index != -1: + return filename[index + 7 :] # "xxx.blend/test.py" --> "test.py" + + return None + + +def _blender_findsource(obj): + try: + import bpy # pylint: disable=import-outside-toplevel + except: + raise ImportError("Not in Blender environment!") + + # Inspect's built-in `getfile` returns the filename like + # `/Text`, `/Text.001`, `some-path/xxx.blend/test.py` + # This filename may not be a full valid path. + filename = _builtin_getfile(obj) + # Extract the text name without path + text_name = _blender_get_text_name(filename) + if text_name is None: + raise IOError("Object `{obj.__name__}` is not defined in a .blend file!") + # Get the lines of code via text_name + lines = bpy.data.texts[text_name].as_string() + # Now we have found the lines of code. + # We first check if they are already cached, to avoid file io in each query. + try: + filename = _blender_findsource._saved_inspect_cache[lines] # pylint: disable=no-member + except KeyError: + # Save the code to a valid path. + fd, filename = tempfile.mkstemp(prefix="_Blender_", suffix=f"_{text_name}.py") + os.close(fd) + + with open(filename, "w") as f: + f.write(lines) + + _blender_findsource._saved_inspect_cache[lines] = filename # pylint: disable=no-member + atexit.register(os.unlink, filename) # Remove file when program exits + + # Our custom getfile function + def wrapped_getfile(ob): + if id(ob) == id(obj): + return filename + + return _builtin_getfile(ob) + + return _find_source_with_custom_getfile_func(wrapped_getfile, obj) + + +_blender_findsource._saved_inspect_cache = {} + + +def _Python_IPython_findsource(obj): + try: + # In Python and IPython the builtin inspect would suffice in most cases + return _builtin_findsource(obj) + except IOError: + # Except that the cell has a magic command like %%time or %%timeit + # In this case the filename returned by the built-in's getfile is wrong, + # it becomes something like `` or ``. + filename = _builtin_getfile(obj) + if filename in {"", ""}: + try: + ip = get_ipython() + if ip is not None: + # So we are in IPython's cell magic + session_id = ip.history_manager.get_last_session_id() + fd, filename = tempfile.mkstemp(prefix="_IPython_", suffix=f"_{session_id}.py") + os.close(fd) + # The latest lines of code can be retrived from here + lines = ip.history_manager._i00 + + # `lines` is a string that also contains the cell magic + # command, we need to remove the magic command + # (and spaces/sep around it) to obtain a valid Python code + # snippet before saving it to a file + index = lines.find("%time") + lines_stripped = lines[index:] + lines_stripped = lines_stripped.split(maxsplit=1)[1] + + with open(filename, "w") as f: + f.write(lines_stripped) + + atexit.register(os.unlink, filename) # Remove the file after the program exits + func = lambda obj: filename + return _find_source_with_custom_getfile_func(func, obj) + + except ImportError: + pass + raise IOError( + f"Cannot find source code for Object: {obj}, it's likely \ +you are not running Taichi from command line or IPython." + ) + + +def _REPL_findsource(obj): + """Findsource in the interactive shell mode.""" + return dill.source.findsource(obj) + + +def _custom_findsource(obj): + try: + return _Python_IPython_findsource(obj) + except IOError: + try: + return _REPL_findsource(obj) + except: + try: + return _blender_findsource(obj) + except: + raise IOError( + f"Cannot find source code for Object: {obj}, this \ +is possibly because of you are running Taichi in an environment that Taichi's own \ +inspect module cannot find the source. Please report an issue to help us fix: \ +https://github.com/taichi-dev/taichi/issues" + ) + + +class _InspectContextManager: + def __enter__(self): + inspect.findsource = _custom_findsource + return self + + def __exit__(self, *_): + inspect.findsource = _builtin_findsource + + +def getsourcelines(obj): + with _InspectContextManager(): + return inspect.getsourcelines(obj) + + +def getsourcefile(obj): + with _InspectContextManager(): + ret = inspect.getsourcefile(obj) + if ret is None: + ret = inspect.getfile(obj) + return ret + + +__all__ = ["getsourcelines", "getsourcefile"] diff --git a/local_packages/taichi/lang/any_array.py b/local_packages/taichi/lang/any_array.py new file mode 100644 index 0000000..6edcda8 --- /dev/null +++ b/local_packages/taichi/lang/any_array.py @@ -0,0 +1,97 @@ +from taichi._lib import core as _ti_core +from taichi.lang import impl +from taichi.lang.enums import Layout +from taichi.lang.expr import Expr, make_expr_group +from taichi.lang.util import taichi_scope +from taichi.types.ndarray_type import NdarrayTypeMetadata + + +class AnyArray: + """Class for arbitrary arrays in Python AST. + + Args: + ptr (taichi_python.Expr): A taichi_python.Expr wrapping a taichi_python.ExternalTensorExpression. + element_shape (Tuple[Int]): () if scalar elements (default), (n) if vector elements, and (n, m) if matrix elements. + layout (Layout): Memory layout. + """ + + def __init__(self, ptr): + assert ptr.is_external_tensor_expr() + self.ptr = ptr + self.ptr.type_check(impl.get_runtime().prog.config()) + + def element_shape(self): + return _ti_core.get_external_tensor_element_shape(self.ptr) + + def layout(self): + # 0: scalar; 1: vector (SOA); 2: matrix (SOA); -1: vector + # (AOS); -2: matrix (AOS) + element_dim = _ti_core.get_external_tensor_element_dim(self.ptr) + if element_dim == 1 or element_dim == 2: + return Layout.SOA + return Layout.AOS + + def get_type(self): + return NdarrayTypeMetadata( + _ti_core.get_external_tensor_element_type(self.ptr), None, _ti_core.get_external_tensor_needs_grad(self.ptr) + ) # AnyArray can take any shape + + @property + @taichi_scope + def grad(self): + """Returns the gradient of this array.""" + return AnyArray(_ti_core.make_external_tensor_grad_expr(self.ptr)) + + @property + @taichi_scope + def shape(self): + """A list containing sizes for each dimension. Note that element shape will be excluded. + + Returns: + List[Int]: The result list. + """ + dim = _ti_core.get_external_tensor_dim(self.ptr) + dbg_info = _ti_core.DebugInfo(impl.get_runtime().get_current_src_info()) + return [Expr(_ti_core.get_external_tensor_shape_along_axis(self.ptr, i, dbg_info)) for i in range(dim)] + + @taichi_scope + def _loop_range(self): + """Gets the corresponding taichi_python.Expr to serve as loop range. + + Returns: + taichi_python.Expr: See above. + """ + return self.ptr + + +class AnyArrayAccess: + """Class for first-level access to AnyArray with Vector/Matrix elements in Python AST. + + Args: + arr (AnyArray): See above. + indices_first (Tuple[Int]): Indices of first-level access. + """ + + def __init__(self, arr, indices_first): + self.arr = arr + self.indices_first = indices_first + + @taichi_scope + def subscript(self, i, j): + ast_builder = impl.get_runtime().compiling_callable.ast_builder() + + indices_second = (i,) if len(self.arr.element_shape()) == 1 else (i, j) + if self.arr.layout() == Layout.SOA: + indices = indices_second + self.indices_first + else: + indices = self.indices_first + indices_second + return Expr( + ast_builder.expr_subscript( + self.arr.ptr, + make_expr_group(*indices), + _ti_core.DebugInfo(impl.get_runtime().get_current_src_info()), + ) + ) + + +__all__ = [] diff --git a/local_packages/taichi/lang/argpack.py b/local_packages/taichi/lang/argpack.py new file mode 100644 index 0000000..776c030 --- /dev/null +++ b/local_packages/taichi/lang/argpack.py @@ -0,0 +1,410 @@ +from taichi.lang.matrix import Matrix +from taichi.lang.util import in_python_scope, python_scope +import numpy as np +import taichi.lang +from taichi._lib import core as _ti_core +from taichi.lang import impl, ops +from taichi.lang.exception import ( + TaichiRuntimeTypeError, + TaichiSyntaxError, +) +from taichi.lang.matrix import MatrixType +from taichi.lang.struct import StructType, Struct +from taichi.lang.util import cook_dtype +from taichi.types import ( + ndarray_type, + primitive_types, + sparse_matrix_builder, + texture_type, +) +from taichi.types.compound_types import CompoundType +from taichi.types.utils import is_signed + + +class ArgPack: + """ The `ArgPack` Type Class. + + The `ArgPack` operates as a dictionary-like data pack, storing members as (key, value) pairs. Members stored can + range from scalars and matrices to other dictionary-like structures. Distinguished from structs, `ArgPack` can + accommodate buffer types such as `NdarrayType` and `TextureType` from Taichi. However, unlike `ti.Struct` which + serves as a data container, `ArgPack` functions as a reference container. It's important to note that `ArgPack` + cannot be nested within other types except for another `ArgPack`, and can only be utilized as kernel parameters. + + Args: + annotations (Dict[str, Union[Dict, Matrix, Struct]]): \ + The keys and types for `ArgPack` members. + dtype (ArgPackType): \ + The ArgPackType class of this ArgPack object. + entries (Dict[str, Union[Dict, Matrix, Struct]]): \ + The keys and corresponding values for `ArgPack` members. + + Returns: + An instance of this `ArgPack`. + + Example:: + + >>> vec3 = ti.types.vector(3, ti.f32) + >>> pack_type = ti.ArgPackType(v=vec3, t=ti.f32) + >>> a = pack_type(v=vec3([0, 0, 0]), t=1.0) + >>> print(a.items) + dict_items([('v', [0. 0. 0.]), ('t', 1.0)]) + """ + + _instance_count = 0 + + def __init__(self, annotations, dtype, *args, **kwargs): + # converts dicts to argument packs + if len(args) == 1 and kwargs == {} and isinstance(args[0], dict): + self.__entries = args[0] + elif len(args) == 0: + self.__entries = kwargs + else: + raise TaichiSyntaxError( + "Custom argument packs need to be initialized using either dictionary or keyword arguments" + ) + if annotations.keys() != self.__entries.keys(): + raise TaichiSyntaxError("ArgPack annotations keys not equals to entries keys.") + self.__annotations = annotations + for k, v in self.__entries.items(): + self.__entries[k] = v if in_python_scope() else impl.expr_init(v) + self._register_members() + self.__dtype = dtype + self.__argpack = impl.get_runtime().prog.create_argpack(self.__dtype) + for i, (k, v) in enumerate(self.__entries.items()): + self._write_to_device(self.__annotations[k], type(v), v, self._calc_element_true_index(i)) + + def __del__(self): + if impl is not None and impl.get_runtime() is not None and impl.get_runtime().prog is not None: + impl.get_runtime().prog.delete_argpack(self.__argpack) + + @property + def keys(self): + """Returns the list of member names in string format. + + Example:: + + >>> vec3 = ti.types.vector(3, ti.f32) + >>> sphere_pack = ti.ArgPackType(center=vec3, radius=ti.f32) + >>> sphere = sphere_pack(center=vec3([0, 0, 0]), radius=1.0) + >>> sphere.keys + ['center', 'radius'] + """ + return list(self.__entries.keys()) + + @property + def _members(self): + return list(self.__entries.values()) + + @property + def _annotations(self): + return list(self.__annotations.values()) + + @property + def items(self): + """Returns the items in this argument pack. + + Example:: + + >>> vec3 = ti.types.vector(3, ti.f32) + >>> sphere_pack = ti.ArgPackType(center=vec3, radius=ti.f32) + >>> sphere = sphere_pack(center=vec3([0, 0, 0]), radius=1.0) + >>> sphere.items + dict_items([('center', 2), ('radius', 1.0)]) + """ + return self.__entries.items() + + def __getitem__(self, key): + ret = self.__entries[key] + return ret + + def __setitem__(self, key, value): + self.__entries[key] = value + index = self._calc_element_true_index(list(self.__annotations).index(key)) + self._write_to_device(self.__annotations[key], type(value), value, index) + + def _set_entries(self, value): + if isinstance(value, dict): + value = ArgPack(self.__annotations, value) + for k in self.keys: + self[k] = value[k] + + @staticmethod + def _make_getter(key): + def getter(self): + """Get an entry from custom argument pack by name.""" + return self[key] + + return getter + + @staticmethod + def _make_setter(key): + @python_scope + def setter(self, value): + self[key] = value + + return setter + + def _register_members(self): + # https://stackoverflow.com/questions/48448074/adding-a-property-to-an-existing-object-instance + cls = self.__class__ + new_cls_name = cls.__name__ + str(cls._instance_count) + cls._instance_count += 1 + properties = {k: property(cls._make_getter(k), cls._make_setter(k)) for k in self.keys} + self.__class__ = type(new_cls_name, (cls,), properties) + + def __len__(self): + """Get the number of entries in a custom argument pack.""" + return len(self.__entries) + + def __iter__(self): + return self.__entries.values() + + def __str__(self): + """Python scope argument pack array print support.""" + if impl.inside_kernel(): + item_str = ", ".join([str(k) + "=" + str(v) for k, v in self.items]) + return f"" + return str(self.to_dict()) + + def __repr__(self): + return str(self.to_dict()) + + def to_dict(self): + """Converts the ArgPack to a dictionary. + + Returns: + Dict: The result dictionary. + """ + res_dict = { + k: v.to_dict() if isinstance(v, ArgPack) else v.to_list() if isinstance(v, Matrix) else v + for k, v in self.__entries.items() + } + return res_dict + + def _calc_element_true_index(self, old_index): + for i in range(old_index): + anno = list(self.__annotations.values())[i] + if ( + isinstance(anno, sparse_matrix_builder) + or isinstance(anno, ndarray_type.NdarrayType) + or isinstance(anno, texture_type.TextureType) + or isinstance(anno, texture_type.RWTextureType) + or isinstance(anno, ndarray_type.NdarrayType) + ): + old_index -= 1 + return old_index + + def _write_to_device(self, needed, provided, v, index): + if isinstance(needed, ArgPackType): + if not isinstance(v, ArgPack): + raise TaichiRuntimeTypeError.get(index, str(needed), str(provided)) + self.__argpack.set_arg_nested_argpack(index, v.__argpack) + else: + # Note: do not use sth like "needed == f32". That would be slow. + if id(needed) in primitive_types.real_type_ids: + if not isinstance(v, (float, int, np.floating, np.integer)): + raise TaichiRuntimeTypeError.get(index, needed.to_string(), provided) + self.__argpack.set_arg_float((index,), float(v)) + elif id(needed) in primitive_types.integer_type_ids: + if not isinstance(v, (int, np.integer)): + raise TaichiRuntimeTypeError.get(index, needed.to_string(), provided) + if is_signed(cook_dtype(needed)): + self.__argpack.set_arg_int((index,), int(v)) + else: + self.__argpack.set_arg_uint((index,), int(v)) + elif isinstance(needed, sparse_matrix_builder): + pass + elif isinstance(needed, ndarray_type.NdarrayType) and isinstance(v, taichi.lang._ndarray.Ndarray): + pass + elif isinstance(needed, texture_type.TextureType) and isinstance(v, taichi.lang._texture.Texture): + pass + elif isinstance(needed, texture_type.RWTextureType) and isinstance(v, taichi.lang._texture.Texture): + pass + elif isinstance(needed, ndarray_type.NdarrayType): + pass + elif isinstance(needed, MatrixType): + if needed.dtype in primitive_types.real_types: + + def cast_func(x): + if not isinstance(x, (int, float, np.integer, np.floating)): + raise TaichiRuntimeTypeError.get(index, needed.dtype.to_string(), type(x)) + return float(x) + + elif needed.dtype in primitive_types.integer_types: + + def cast_func(x): + if not isinstance(x, (int, np.integer)): + raise TaichiRuntimeTypeError.get(index, needed.dtype.to_string(), type(x)) + return int(x) + + else: + raise ValueError(f"Matrix dtype {needed.dtype} is not integer type or real type.") + + if needed.ndim == 2: + v = [cast_func(v[i, j]) for i in range(needed.n) for j in range(needed.m)] + else: + v = [cast_func(v[i]) for i in range(needed.n)] + v = needed(*v) + needed.set_argpack_struct_args(v, self.__argpack, (index,)) + elif isinstance(needed, StructType): + if not isinstance(v, needed): + raise TaichiRuntimeTypeError.get(index, str(needed), provided) + needed.set_argpack_struct_args(v, self.__argpack, (index,)) + else: + raise ValueError(f"Argument type mismatch. Expecting {needed}, got {type(v)}.") + + +class _IntermediateArgPack(ArgPack): + """Intermediate argument pack class for compiler internal use only. + + Args: + annotations (Dict[str, Union[Expr, Matrix, Struct]]): keys and types for struct members. + entries (Dict[str, Union[Expr, Matrix, Struct]]): keys and values for struct members. + """ + + def __init__(self, annotations, dtype, *args, **kwargs): + # converts dicts to argument packs + if len(args) == 1 and kwargs == {} and isinstance(args[0], dict): + self._ArgPack__entries = args[0] + elif len(args) == 0: + self._ArgPack__entries = kwargs + else: + raise TaichiSyntaxError( + "Custom argument packs need to be initialized using either dictionary or keyword arguments" + ) + if annotations.keys() != self._ArgPack__entries.keys(): + raise TaichiSyntaxError("ArgPack annotations keys not equals to entries keys.") + self._ArgPack__annotations = annotations + self._register_members() + self._ArgPack__dtype = dtype + self._ArgPack__argpack = impl.get_runtime().prog.create_argpack(dtype) + + def __del__(self): + pass + + +class ArgPackType(CompoundType): + def __init__(self, **kwargs): + self.members = {} + elements = [] + for k, dtype in kwargs.items(): + if isinstance(dtype, StructType): + self.members[k] = dtype + elements.append([dtype.dtype, k]) + elif isinstance(dtype, ArgPackType): + self.members[k] = dtype + elements.append( + [ + _ti_core.DataType( + _ti_core.get_type_factory_instance().get_struct_type_for_argpack_ptr(dtype.dtype) + ), + k, + ] + ) + elif isinstance(dtype, MatrixType): + # Convert MatrixType to StructType + if dtype.ndim == 1: + elements_ = [(dtype.dtype, f"{k}_{i}") for i in range(dtype.n)] + else: + elements_ = [(dtype.dtype, f"{k}_{i}_{j}") for i in range(dtype.n) for j in range(dtype.m)] + self.members[k] = dtype + elements.append([_ti_core.get_type_factory_instance().get_struct_type(elements_), k]) + elif isinstance(dtype, sparse_matrix_builder): + self.members[k] = dtype + elif isinstance(dtype, ndarray_type.NdarrayType): + self.members[k] = dtype + elif isinstance(dtype, texture_type.RWTextureType): + self.members[k] = dtype + elif isinstance(dtype, texture_type.TextureType): + self.members[k] = dtype + else: + dtype = cook_dtype(dtype) + self.members[k] = dtype + elements.append([dtype, k]) + if len(elements) == 0: + # Use i32 as a placeholder for empty argpacks + elements.append([primitive_types.i32, k]) + self.dtype = _ti_core.get_type_factory_instance().get_argpack_type(elements) + + def __call__(self, *args, **kwargs): + """Create an instance of this argument pack type.""" + d = {} + items = self.members.items() + # iterate over the members of this argument pack + for index, pair in enumerate(items): + name, dtype = pair # (member name, member type)) + if index < len(args): # set from args + data = args[index] + else: # set from kwargs + data = kwargs.get(name, None) + + # If dtype is CompoundType and data is a scalar, it cannot be + # casted in the self.cast call later. We need an initialization here. + if isinstance(dtype, CompoundType) and not isinstance(data, (dict, ArgPack, Struct)): + data = dtype(data) + + d[name] = data + + entries = ArgPack(self.members, self.dtype, d) + pack = self.cast(entries) + return pack + + def __instancecheck__(self, instance): + if not isinstance(instance, ArgPack): + return False + if list(self.members.keys()) != list(instance._ArgPack__entries.keys()): + return False + for k, v in self.members.items(): + if isinstance(v, ArgPackType): + if not isinstance(instance._ArgPack__entries[k], v): + return False + elif instance._ArgPack__annotations[k] != v: + return False + return True + + def cast(self, pack): + # sanity check members + if self.members.keys() != pack._ArgPack__entries.keys(): + raise TaichiSyntaxError("Incompatible arguments for custom argument pack members!") + entries = {} + for k, dtype in self.members.items(): + if isinstance(dtype, MatrixType): + entries[k] = dtype(pack._ArgPack__entries[k]) + elif isinstance(dtype, CompoundType): + entries[k] = dtype.cast(pack._ArgPack__entries[k]) + elif isinstance(dtype, ArgPackType): + entries[k] = dtype.cast(pack._ArgPack__entries[k]) + elif isinstance(dtype, ndarray_type.NdarrayType): + entries[k] = pack._ArgPack__entries[k] + elif isinstance(dtype, texture_type.RWTextureType): + entries[k] = pack._ArgPack__entries[k] + elif isinstance(dtype, texture_type.TextureType): + entries[k] = pack._ArgPack__entries[k] + elif isinstance(dtype, sparse_matrix_builder): + entries[k] = pack._ArgPack__entries[k] + else: + if in_python_scope(): + v = pack._ArgPack__entries[k] + entries[k] = int(v) if dtype in primitive_types.integer_types else float(v) + else: + entries[k] = ops.cast(pack._ArgPack__entries[k], dtype) + pack = ArgPack(self.members, self.dtype, entries) + return pack + + def from_taichi_object(self, arg_load_dict: dict): + d = {} + items = self.members.items() + for index, pair in enumerate(items): + name, dtype = pair + d[name] = arg_load_dict[name] + pack = _IntermediateArgPack(self.members, self.dtype, d) + pack._ArgPack__dtype = self.dtype + return pack + + def __str__(self): + """Python scope argpack type print support.""" + item_str = ", ".join([str(k) + "=" + str(v) for k, v in self.members.items()]) + return f"" + + +__all__ = ["ArgPack"] diff --git a/local_packages/taichi/lang/ast/__init__.py b/local_packages/taichi/lang/ast/__init__.py new file mode 100644 index 0000000..5787470 --- /dev/null +++ b/local_packages/taichi/lang/ast/__init__.py @@ -0,0 +1,3 @@ +from taichi.lang.ast.ast_transformer_utils import ASTTransformerContext +from taichi.lang.ast.checkers import KernelSimplicityASTChecker +from taichi.lang.ast.transform import transform_tree diff --git a/local_packages/taichi/lang/ast/ast_transformer.py b/local_packages/taichi/lang/ast/ast_transformer.py new file mode 100644 index 0000000..ba7890d --- /dev/null +++ b/local_packages/taichi/lang/ast/ast_transformer.py @@ -0,0 +1,1723 @@ +import ast +import collections.abc +import itertools +import operator +import re +import warnings +from collections import ChainMap +from sys import version_info +import inspect +import math + +import numpy as np +from taichi._lib import core as _ti_core +from taichi.lang import _ndarray, any_array, expr, impl, kernel_arguments, matrix, mesh +from taichi.lang import ops as ti_ops +from taichi.lang._ndrange import _Ndrange, ndrange +from taichi.lang.argpack import ArgPackType +from taichi.lang.ast.ast_transformer_utils import Builder, LoopStatus, ReturnStatus +from taichi.lang.ast.symbol_resolver import ASTResolver +from taichi.lang.exception import ( + TaichiIndexError, + TaichiSyntaxError, + TaichiTypeError, + handle_exception_from_cpp, +) +from taichi.lang.expr import Expr, make_expr_group +from taichi.lang.exception import TaichiRuntimeTypeError +from taichi.lang.field import Field +from taichi.lang.matrix import Matrix, MatrixType, Vector +from taichi.lang.snode import append, deactivate, length +from taichi.lang.struct import Struct, StructType +from taichi.lang.util import is_taichi_class, to_taichi_type +from taichi.types import annotations, ndarray_type, primitive_types, texture_type +from taichi.types.utils import is_integral + +if version_info < (3, 9): + from astunparse import unparse +else: + from ast import unparse + + +def reshape_list(flat_list, target_shape): + if len(target_shape) < 2: + return flat_list + + curr_list = [] + dim = target_shape[-1] + for i, elem in enumerate(flat_list): + if i % dim == 0: + curr_list.append([]) + curr_list[-1].append(elem) + + return reshape_list(curr_list, target_shape[:-1]) + + +def boundary_type_cast_warning(expression): + expr_dtype = expression.ptr.get_rvalue_type() + if not is_integral(expr_dtype) or expr_dtype in [ + primitive_types.i64, + primitive_types.u64, + primitive_types.u32, + ]: + warnings.warn( + f"Casting range_for boundary values from {expr_dtype} to i32, which may cause numerical issues", + Warning, + ) + + +class ASTTransformer(Builder): + @staticmethod + def build_Name(ctx, node): + node.ptr = ctx.get_var_by_name(node.id) + if isinstance(node, (ast.stmt, ast.expr)) and isinstance(node.ptr, Expr): + node.ptr.dbg_info = _ti_core.DebugInfo(ctx.get_pos_info(node)) + node.ptr.ptr.set_dbg_info(node.ptr.dbg_info) + return node.ptr + + @staticmethod + def build_AnnAssign(ctx, node): + build_stmt(ctx, node.value) + build_stmt(ctx, node.annotation) + + is_static_assign = isinstance(node.value, ast.Call) and node.value.func.ptr is impl.static + + node.ptr = ASTTransformer.build_assign_annotated( + ctx, node.target, node.value.ptr, is_static_assign, node.annotation.ptr + ) + return node.ptr + + @staticmethod + def build_assign_annotated(ctx, target, value, is_static_assign, annotation): + """Build an annotated assignment like this: target: annotation = value. + + Args: + ctx (ast_builder_utils.BuilderContext): The builder context. + target (ast.Name): A variable name. `target.id` holds the name as + a string. + annotation: A type we hope to assign to the target + value: A node representing the value. + is_static_assign: A boolean value indicating whether this is a static assignment + """ + is_local = isinstance(target, ast.Name) + if is_local and target.id in ctx.kernel_args: + raise TaichiSyntaxError( + f'Kernel argument "{target.id}" is immutable in the kernel. ' + f"If you want to change its value, please create a new variable." + ) + anno = impl.expr_init(annotation) + if is_static_assign: + raise TaichiSyntaxError("Static assign cannot be used on annotated assignment") + if is_local and not ctx.is_var_declared(target.id): + var = ti_ops.cast(value, anno) + var = impl.expr_init(var) + ctx.create_variable(target.id, var) + else: + var = build_stmt(ctx, target) + if var.ptr.get_rvalue_type() != anno: + raise TaichiSyntaxError("Static assign cannot have type overloading") + var._assign(value) + return var + + @staticmethod + def build_Assign(ctx, node): + build_stmt(ctx, node.value) + is_static_assign = isinstance(node.value, ast.Call) and node.value.func.ptr is impl.static + + # Keep all generated assign statements and compose single one at last. + # The variable is introduced to support chained assignments. + # Ref https://github.com/taichi-dev/taichi/issues/2659. + values = node.value.ptr if is_static_assign else impl.expr_init(node.value.ptr) + + for node_target in node.targets: + ASTTransformer.build_assign_unpack(ctx, node_target, values, is_static_assign) + return None + + @staticmethod + def build_assign_unpack(ctx, node_target, values, is_static_assign): + """Build the unpack assignments like this: (target1, target2) = (value1, value2). + The function should be called only if the node target is a tuple. + + Args: + ctx (ast_builder_utils.BuilderContext): The builder context. + node_target (ast.Tuple): A list or tuple object. `node_target.elts` holds a + list of nodes representing the elements. + values: A node/list representing the values. + is_static_assign: A boolean value indicating whether this is a static assignment + """ + if not isinstance(node_target, ast.Tuple): + return ASTTransformer.build_assign_basic(ctx, node_target, values, is_static_assign) + targets = node_target.elts + + if isinstance(values, matrix.Matrix): + if not values.m == 1: + raise ValueError("Matrices with more than one columns cannot be unpacked") + values = values.entries + + # Unpack: a, b, c = ti.Vector([1., 2., 3.]) + if isinstance(values, impl.Expr) and values.ptr.is_tensor(): + if len(values.get_shape()) > 1: + raise ValueError("Matrices with more than one columns cannot be unpacked") + + values = ctx.ast_builder.expand_exprs([values.ptr]) + if len(values) == 1: + values = values[0] + + if isinstance(values, impl.Expr) and values.ptr.is_struct(): + values = ctx.ast_builder.expand_exprs([values.ptr]) + if len(values) == 1: + values = values[0] + + if not isinstance(values, collections.abc.Sequence): + raise TaichiSyntaxError(f"Cannot unpack type: {type(values)}") + + if len(values) != len(targets): + raise TaichiSyntaxError("The number of targets is not equal to value length") + + for i, target in enumerate(targets): + ASTTransformer.build_assign_basic(ctx, target, values[i], is_static_assign) + + return None + + @staticmethod + def build_assign_basic(ctx, target, value, is_static_assign): + """Build basic assignment like this: target = value. + + Args: + ctx (ast_builder_utils.BuilderContext): The builder context. + target (ast.Name): A variable name. `target.id` holds the name as + a string. + value: A node representing the value. + is_static_assign: A boolean value indicating whether this is a static assignment + """ + is_local = isinstance(target, ast.Name) + if is_local and target.id in ctx.kernel_args: + raise TaichiSyntaxError( + f'Kernel argument "{target.id}" is immutable in the kernel. ' + f"If you want to change its value, please create a new variable." + ) + if is_static_assign: + if not is_local: + raise TaichiSyntaxError("Static assign cannot be used on elements in arrays") + ctx.create_variable(target.id, value) + var = value + elif is_local and not ctx.is_var_declared(target.id): + var = impl.expr_init(value) + ctx.create_variable(target.id, var) + else: + var = build_stmt(ctx, target) + try: + var._assign(value) + except AttributeError: + raise TaichiSyntaxError( + f"Variable '{unparse(target).strip()}' cannot be assigned. Maybe it is not a Taichi object?" + ) + return var + + @staticmethod + def build_NamedExpr(ctx, node): + build_stmt(ctx, node.value) + is_static_assign = isinstance(node.value, ast.Call) and node.value.func.ptr is impl.static + node.ptr = ASTTransformer.build_assign_basic(ctx, node.target, node.value.ptr, is_static_assign) + return node.ptr + + @staticmethod + def is_tuple(node): + if isinstance(node, ast.Tuple): + return True + if isinstance(node, ast.Index) and isinstance(node.value.ptr, tuple): + return True + if isinstance(node.ptr, tuple): + return True + return False + + @staticmethod + def build_Subscript(ctx, node): + build_stmt(ctx, node.value) + build_stmt(ctx, node.slice) + if not ASTTransformer.is_tuple(node.slice): + node.slice.ptr = [node.slice.ptr] + node.ptr = impl.subscript(ctx.ast_builder, node.value.ptr, *node.slice.ptr) + return node.ptr + + @staticmethod + def build_Slice(ctx, node): + if node.lower is not None: + build_stmt(ctx, node.lower) + if node.upper is not None: + build_stmt(ctx, node.upper) + if node.step is not None: + build_stmt(ctx, node.step) + + node.ptr = slice( + node.lower.ptr if node.lower else None, + node.upper.ptr if node.upper else None, + node.step.ptr if node.step else None, + ) + return node.ptr + + @staticmethod + def build_ExtSlice(ctx, node): + build_stmts(ctx, node.dims) + node.ptr = tuple(dim.ptr for dim in node.dims) + return node.ptr + + @staticmethod + def build_Tuple(ctx, node): + build_stmts(ctx, node.elts) + node.ptr = tuple(elt.ptr for elt in node.elts) + return node.ptr + + @staticmethod + def build_List(ctx, node): + build_stmts(ctx, node.elts) + node.ptr = [elt.ptr for elt in node.elts] + return node.ptr + + @staticmethod + def build_Dict(ctx, node): + dic = {} + for key, value in zip(node.keys, node.values): + if key is None: + dic.update(build_stmt(ctx, value)) + else: + dic[build_stmt(ctx, key)] = build_stmt(ctx, value) + node.ptr = dic + return node.ptr + + @staticmethod + def process_listcomp(ctx, node, result): + result.append(build_stmt(ctx, node.elt)) + + @staticmethod + def process_dictcomp(ctx, node, result): + key = build_stmt(ctx, node.key) + value = build_stmt(ctx, node.value) + result[key] = value + + @staticmethod + def process_generators(ctx, node, now_comp, func, result): + if now_comp >= len(node.generators): + return func(ctx, node, result) + with ctx.static_scope_guard(): + _iter = build_stmt(ctx, node.generators[now_comp].iter) + + if isinstance(_iter, impl.Expr) and _iter.ptr.is_tensor(): + shape = _iter.ptr.get_shape() + flattened = [Expr(x) for x in ctx.ast_builder.expand_exprs([_iter.ptr])] + _iter = reshape_list(flattened, shape) + + for value in _iter: + with ctx.variable_scope_guard(): + ASTTransformer.build_assign_unpack(ctx, node.generators[now_comp].target, value, True) + with ctx.static_scope_guard(): + build_stmts(ctx, node.generators[now_comp].ifs) + ASTTransformer.process_ifs(ctx, node, now_comp, 0, func, result) + return None + + @staticmethod + def process_ifs(ctx, node, now_comp, now_if, func, result): + if now_if >= len(node.generators[now_comp].ifs): + return ASTTransformer.process_generators(ctx, node, now_comp + 1, func, result) + cond = node.generators[now_comp].ifs[now_if].ptr + if cond: + ASTTransformer.process_ifs(ctx, node, now_comp, now_if + 1, func, result) + + return None + + @staticmethod + def build_ListComp(ctx, node): + result = [] + ASTTransformer.process_generators(ctx, node, 0, ASTTransformer.process_listcomp, result) + node.ptr = result + return node.ptr + + @staticmethod + def build_DictComp(ctx, node): + result = {} + ASTTransformer.process_generators(ctx, node, 0, ASTTransformer.process_dictcomp, result) + node.ptr = result + return node.ptr + + @staticmethod + def build_Index(ctx, node): + node.ptr = build_stmt(ctx, node.value) + return node.ptr + + @staticmethod + def build_Constant(ctx, node): + node.ptr = node.value + return node.ptr + + @staticmethod + def build_Num(ctx, node): + node.ptr = node.n + return node.ptr + + @staticmethod + def build_Str(ctx, node): + node.ptr = node.s + return node.ptr + + @staticmethod + def build_Bytes(ctx, node): + node.ptr = node.s + return node.ptr + + @staticmethod + def build_NameConstant(ctx, node): + node.ptr = node.value + return node.ptr + + @staticmethod + def build_keyword(ctx, node): + build_stmt(ctx, node.value) + if node.arg is None: + node.ptr = node.value.ptr + else: + node.ptr = {node.arg: node.value.ptr} + return node.ptr + + @staticmethod + def build_Starred(ctx, node): + node.ptr = build_stmt(ctx, node.value) + return node.ptr + + @staticmethod + def build_FormattedValue(ctx, node): + node.ptr = build_stmt(ctx, node.value) + if node.format_spec is None or len(node.format_spec.values) == 0: + return node.ptr + values = node.format_spec.values + assert len(values) == 1 + format_str = values[0].s + assert format_str is not None + # distinguished from normal list + return ["__ti_fmt_value__", node.ptr, format_str] + + @staticmethod + def build_JoinedStr(ctx, node): + str_spec = "" + args = [] + for sub_node in node.values: + if isinstance(sub_node, ast.FormattedValue): + str_spec += "{}" + args.append(build_stmt(ctx, sub_node)) + elif isinstance(sub_node, ast.Constant): + str_spec += sub_node.value + elif isinstance(sub_node, ast.Str): + str_spec += sub_node.s + else: + raise TaichiSyntaxError("Invalid value for fstring.") + + args.insert(0, str_spec) + node.ptr = impl.ti_format(*args) + return node.ptr + + @staticmethod + def build_call_if_is_builtin(ctx, node, args, keywords): + from taichi.lang import matrix_ops # pylint: disable=C0415 + + func = node.func.ptr + replace_func = { + id(print): impl.ti_print, + id(min): ti_ops.min, + id(max): ti_ops.max, + id(int): impl.ti_int, + id(bool): impl.ti_bool, + id(float): impl.ti_float, + id(any): matrix_ops.any, + id(all): matrix_ops.all, + id(abs): abs, + id(pow): pow, + id(operator.matmul): matrix_ops.matmul, + } + + # Builtin 'len' function on Matrix Expr + if id(func) == id(len) and len(args) == 1: + if isinstance(args[0], Expr) and args[0].ptr.is_tensor(): + node.ptr = args[0].get_shape()[0] + return True + + if id(func) in replace_func: + node.ptr = replace_func[id(func)](*args, **keywords) + return True + return False + + @staticmethod + def build_call_if_is_type(ctx, node, args, keywords): + func = node.func.ptr + if id(func) in primitive_types.type_ids: + if len(args) != 1 or keywords: + raise TaichiSyntaxError("A primitive type can only decorate a single expression.") + if is_taichi_class(args[0]): + raise TaichiSyntaxError("A primitive type cannot decorate an expression with a compound type.") + + if isinstance(args[0], expr.Expr): + if args[0].ptr.is_tensor(): + raise TaichiSyntaxError("A primitive type cannot decorate an expression with a compound type.") + node.ptr = ti_ops.cast(args[0], func) + else: + node.ptr = expr.Expr(args[0], dtype=func) + return True + return False + + @staticmethod + def is_external_func(ctx, func) -> bool: + if ctx.is_in_static_scope(): # allow external function in static scope + return False + if hasattr(func, "_is_taichi_function") or hasattr(func, "_is_wrapped_kernel"): # taichi func/kernel + return False + if hasattr(func, "__module__") and func.__module__ and func.__module__.startswith("taichi."): + return False + return True + + @staticmethod + def warn_if_is_external_func(ctx, node): + func = node.func.ptr + if not ASTTransformer.is_external_func(ctx, func): + return + name = unparse(node.func).strip() + warnings.warn_explicit( + f"\x1b[38;5;226m" # Yellow + f'Calling non-taichi function "{name}". ' + f"Scope inside the function is not processed by the Taichi AST transformer. " + f"The function may not work as expected. Proceed with caution! " + f"Maybe you can consider turning it into a @ti.func?" + f"\x1b[0m", # Reset + SyntaxWarning, + ctx.file, + node.lineno + ctx.lineno_offset, + module="taichi", + ) + + @staticmethod + # Parses a formatted string and extracts format specifiers from it, along with positional and keyword arguments. + # This function produces a canonicalized formatted string that includes solely empty replacement fields, e.g. 'qwerty {} {} {} {} {}'. + # Note that the arguments can be used multiple times in the string. + # e.g.: + # origin input: 'qwerty {1} {} {1:.3f} {k:.4f} {k:}'.format(1.0, 2.0, k=k) + # raw_string: 'qwerty {1} {} {1:.3f} {k:.4f} {k:}' + # raw_args: [1.0, 2.0] + # raw_keywords: {'k': } + # return value: ['qwerty {} {} {} {} {}', 2.0, 1.0, ['__ti_fmt_value__', 2.0, '.3f'], ['__ti_fmt_value__', , '.4f'], ] + def canonicalize_formatted_string(raw_string, *raw_args, **raw_keywords): + raw_brackets = re.findall(r"{(.*?)}", raw_string) + brackets = [] + unnamed = 0 + for bracket in raw_brackets: + item, spec = bracket.split(":") if ":" in bracket else (bracket, None) + if item.isdigit(): + item = int(item) + # handle unnamed positional args + if item == "": + item = unnamed + unnamed += 1 + # handle empty spec + if spec == "": + spec = None + brackets.append((item, spec)) + + # check for errors in the arguments + max_args_index = max([t[0] for t in brackets if isinstance(t[0], int)], default=-1) + if max_args_index + 1 != len(raw_args): + raise TaichiSyntaxError( + f"Expected {max_args_index + 1} positional argument(s), but received {len(raw_args)} instead." + ) + brackets_keywords = [t[0] for t in brackets if isinstance(t[0], str)] + for item in brackets_keywords: + if item not in raw_keywords: + raise TaichiSyntaxError(f"Keyword '{item}' not found.") + for item in raw_keywords: + if item not in brackets_keywords: + raise TaichiSyntaxError(f"Keyword '{item}' not used.") + + # reorganize the arguments based on their positions, keywords, and format specifiers + args = [] + for item, spec in brackets: + new_arg = raw_args[item] if isinstance(item, int) else raw_keywords[item] + if spec is not None: + args.append(["__ti_fmt_value__", new_arg, spec]) + else: + args.append(new_arg) + # put the formatted string as the first argument to make ti.format() happy + args.insert(0, re.sub(r"{.*?}", "{}", raw_string)) + return args + + @staticmethod + def build_Call(ctx, node): + if ASTTransformer.get_decorator(ctx, node) in ["static", "static_assert"]: + with ctx.static_scope_guard(): + build_stmt(ctx, node.func) + build_stmts(ctx, node.args) + build_stmts(ctx, node.keywords) + else: + build_stmt(ctx, node.func) + build_stmts(ctx, node.args) + build_stmts(ctx, node.keywords) + + args = [] + for arg in node.args: + if isinstance(arg, ast.Starred): + arg_list = arg.ptr + if isinstance(arg_list, Expr) and arg_list.is_tensor(): + # Expand Expr with Matrix-type return into list of Exprs + arg_list = [Expr(x) for x in ctx.ast_builder.expand_exprs([arg_list.ptr])] + + for i in arg_list: + args.append(i) + else: + args.append(arg.ptr) + keywords = dict(ChainMap(*[keyword.ptr for keyword in node.keywords])) + func = node.func.ptr + + if id(func) in [id(print), id(impl.ti_print)]: + ctx.func.has_print = True + + if isinstance(node.func, ast.Attribute) and isinstance(node.func.value.ptr, str) and node.func.attr == "format": + raw_string = node.func.value.ptr + args = ASTTransformer.canonicalize_formatted_string(raw_string, *args, **keywords) + node.ptr = impl.ti_format(*args) + return node.ptr + + if id(func) == id(Matrix) or id(func) == id(Vector): + node.ptr = matrix.make_matrix(*args, **keywords) + return node.ptr + + if ASTTransformer.build_call_if_is_builtin(ctx, node, args, keywords): + return node.ptr + + if ASTTransformer.build_call_if_is_type(ctx, node, args, keywords): + return node.ptr + + if hasattr(node.func, "caller"): + node.ptr = func(node.func.caller, *args, **keywords) + return node.ptr + ASTTransformer.warn_if_is_external_func(ctx, node) + try: + node.ptr = func(*args, **keywords) + except TypeError as e: + module = inspect.getmodule(func) + error_msg = re.sub(r"\bExpr\b", "Taichi Expression", str(e)) + msg = f"TypeError when calling `{func.__name__}`: {error_msg}." + if ASTTransformer.is_external_func(ctx, node.func.ptr): + args_has_expr = any([isinstance(arg, Expr) for arg in args]) + if args_has_expr and (module == math or module == np): + exec_str = f"from taichi import {func.__name__}" + try: + exec(exec_str, {}) + except: + pass + else: + msg += f"\nDid you mean to use `ti.{func.__name__}` instead of `{module.__name__}.{func.__name__}`?" + raise TaichiTypeError(msg) + + if getattr(func, "_is_taichi_function", False): + ctx.func.has_print |= func.func.has_print + + return node.ptr + + @staticmethod + def build_FunctionDef(ctx, node): + if ctx.visited_funcdef: + raise TaichiSyntaxError( + f"Function definition is not allowed in 'ti.{'kernel' if ctx.is_kernel else 'func'}'." + ) + ctx.visited_funcdef = True + + args = node.args + assert args.vararg is None + assert args.kwonlyargs == [] + assert args.kw_defaults == [] + assert args.kwarg is None + + def decl_and_create_variable(annotation, name, arg_features, invoke_later_dict, prefix_name, arg_depth): + full_name = prefix_name + "_" + name + if not isinstance(annotation, primitive_types.RefType): + ctx.kernel_args.append(name) + if isinstance(annotation, ArgPackType): + kernel_arguments.push_argpack_arg(name) + d = {} + items_to_put_in_dict = [] + for j, (_name, anno) in enumerate(annotation.members.items()): + result, obj = decl_and_create_variable( + anno, _name, arg_features[j], invoke_later_dict, full_name, arg_depth + 1 + ) + if not result: + d[_name] = None + items_to_put_in_dict.append((full_name + "_" + _name, _name, obj)) + else: + d[_name] = obj + argpack = kernel_arguments.decl_argpack_arg(annotation, d) + for item in items_to_put_in_dict: + invoke_later_dict[item[0]] = argpack, item[1], *item[2] + return True, argpack + if isinstance(annotation, annotations.template): + return True, ctx.global_vars[name] + if isinstance(annotation, annotations.sparse_matrix_builder): + return False, ( + kernel_arguments.decl_sparse_matrix, + ( + to_taichi_type(arg_features), + full_name, + ), + ) + if isinstance(annotation, ndarray_type.NdarrayType): + return False, ( + kernel_arguments.decl_ndarray_arg, + ( + to_taichi_type(arg_features[0]), + arg_features[1], + full_name, + arg_features[2], + arg_features[3], + ), + ) + if isinstance(annotation, texture_type.TextureType): + return False, (kernel_arguments.decl_texture_arg, (arg_features[0], full_name)) + if isinstance(annotation, texture_type.RWTextureType): + return False, ( + kernel_arguments.decl_rw_texture_arg, + (arg_features[0], arg_features[1], arg_features[2], full_name), + ) + if isinstance(annotation, MatrixType): + return True, kernel_arguments.decl_matrix_arg(annotation, name, arg_depth) + if isinstance(annotation, StructType): + return True, kernel_arguments.decl_struct_arg(annotation, name, arg_depth) + return True, kernel_arguments.decl_scalar_arg(annotation, name, arg_depth) + + def transform_as_kernel(): + # Treat return type + if node.returns is not None: + for return_type in ctx.func.return_type: + kernel_arguments.decl_ret(return_type) + impl.get_runtime().compiling_callable.finalize_rets() + + invoke_later_dict = dict() + create_variable_later = dict() + for i, arg in enumerate(args.args): + if isinstance(ctx.func.arguments[i].annotation, ArgPackType): + kernel_arguments.push_argpack_arg(ctx.func.arguments[i].name) + d = {} + items_to_put_in_dict = [] + for j, (name, anno) in enumerate(ctx.func.arguments[i].annotation.members.items()): + result, obj = decl_and_create_variable( + anno, name, ctx.arg_features[i][j], invoke_later_dict, "__argpack_" + name, 1 + ) + if not result: + d[name] = None + items_to_put_in_dict.append(("__argpack_" + name, name, obj)) + else: + d[name] = obj + argpack = kernel_arguments.decl_argpack_arg(ctx.func.arguments[i].annotation, d) + for item in items_to_put_in_dict: + invoke_later_dict[item[0]] = argpack, item[1], *item[2] + create_variable_later[arg.arg] = argpack + else: + result, obj = decl_and_create_variable( + ctx.func.arguments[i].annotation, + ctx.func.arguments[i].name, + ctx.arg_features[i] if ctx.arg_features is not None else None, + invoke_later_dict, + "", + 0, + ) + ctx.create_variable(arg.arg, obj if result else obj[0](*obj[1])) + for k, v in invoke_later_dict.items(): + argpack, name, func, params = v + argpack[name] = func(*params) + for k, v in create_variable_later.items(): + ctx.create_variable(k, v) + + impl.get_runtime().compiling_callable.finalize_params() + # remove original args + node.args.args = [] + + if ctx.is_kernel: # ti.kernel + transform_as_kernel() + + else: # ti.func + if ctx.is_real_function: + transform_as_kernel() + else: + assert len(args.args) == len(ctx.argument_data) + for i, (arg, data) in enumerate(zip(args.args, ctx.argument_data)): + # Template arguments are passed by reference. + if isinstance(ctx.func.arguments[i].annotation, annotations.template): + ctx.create_variable(ctx.func.arguments[i].name, data) + continue + + # Ndarray arguments are passed by reference. + if isinstance(ctx.func.arguments[i].annotation, (ndarray_type.NdarrayType)): + if not isinstance( + data, + ( + _ndarray.ScalarNdarray, + matrix.VectorNdarray, + matrix.MatrixNdarray, + any_array.AnyArray, + ), + ): + raise TaichiSyntaxError( + f"Argument {arg.arg} of type {ctx.func.arguments[i].annotation} is not recognized." + ) + ctx.func.arguments[i].annotation.check_matched(data.get_type(), ctx.func.arguments[i].name) + ctx.create_variable(ctx.func.arguments[i].name, data) + continue + + # Matrix arguments are passed by value. + if isinstance(ctx.func.arguments[i].annotation, (MatrixType)): + # "data" is expected to be an Expr here, + # so we simply call "impl.expr_init_func(data)" to perform: + # + # TensorType* t = alloca() + # assign(t, data) + # + # We created local variable "t" - a copy of the passed-in argument "data" + if not isinstance(data, expr.Expr) or not data.ptr.is_tensor(): + raise TaichiSyntaxError( + f"Argument {arg.arg} of type {ctx.func.arguments[i].annotation} is expected to be a Matrix, but got {type(data)}." + ) + + element_shape = data.ptr.get_rvalue_type().shape() + if len(element_shape) != ctx.func.arguments[i].annotation.ndim: + raise TaichiSyntaxError( + f"Argument {arg.arg} of type {ctx.func.arguments[i].annotation} is expected to be a Matrix with ndim {ctx.func.arguments[i].annotation.ndim}, but got {len(element_shape)}." + ) + + assert ctx.func.arguments[i].annotation.ndim > 0 + if element_shape[0] != ctx.func.arguments[i].annotation.n: + raise TaichiSyntaxError( + f"Argument {arg.arg} of type {ctx.func.arguments[i].annotation} is expected to be a Matrix with n {ctx.func.arguments[i].annotation.n}, but got {element_shape[0]}." + ) + + if ( + ctx.func.arguments[i].annotation.ndim == 2 + and element_shape[1] != ctx.func.arguments[i].annotation.m + ): + raise TaichiSyntaxError( + f"Argument {arg.arg} of type {ctx.func.arguments[i].annotation} is expected to be a Matrix with m {ctx.func.arguments[i].annotation.m}, but got {element_shape[0]}." + ) + + ctx.create_variable(arg.arg, impl.expr_init_func(data)) + continue + + if id(ctx.func.arguments[i].annotation) in primitive_types.type_ids: + ctx.create_variable( + arg.arg, impl.expr_init_func(ti_ops.cast(data, ctx.func.arguments[i].annotation)) + ) + continue + # Create a copy for non-template arguments, + # so that they are passed by value. + ctx.create_variable(arg.arg, impl.expr_init_func(data)) + + with ctx.variable_scope_guard(): + build_stmts(ctx, node.body) + + return None + + @staticmethod + def build_Return(ctx, node): + if not ctx.is_real_function: + if ctx.is_in_non_static_control_flow(): + raise TaichiSyntaxError("Return inside non-static if/for is not supported") + if node.value is not None: + build_stmt(ctx, node.value) + if node.value is None or node.value.ptr is None: + if not ctx.is_real_function: + ctx.returned = ReturnStatus.ReturnedVoid + return None + if ctx.is_kernel or ctx.is_real_function: + # TODO: check if it's at the end of a kernel, throw TaichiSyntaxError if not + if ctx.func.return_type is None: + raise TaichiSyntaxError( + f'A {"kernel" if ctx.is_kernel else "function"} ' + "with a return value must be annotated " + "with a return type, e.g. def func() -> ti.f32" + ) + return_exprs = [] + if len(ctx.func.return_type) == 1: + node.value.ptr = [node.value.ptr] + assert len(ctx.func.return_type) == len(node.value.ptr) + for return_type, ptr in zip(ctx.func.return_type, node.value.ptr): + if id(return_type) in primitive_types.type_ids: + if isinstance(ptr, Expr): + if ptr.is_tensor() or ptr.is_struct() or ptr.element_type() not in primitive_types.all_types: + raise TaichiRuntimeTypeError.get_ret(str(return_type), ptr) + elif not isinstance(ptr, (float, int, np.floating, np.integer)): + raise TaichiRuntimeTypeError.get_ret(str(return_type), ptr) + return_exprs += [ti_ops.cast(expr.Expr(ptr), return_type).ptr] + elif isinstance(return_type, MatrixType): + values = ptr + if isinstance(values, Matrix): + if values.ndim != ctx.func.return_type.ndim: + raise TaichiRuntimeTypeError( + f"Return matrix ndim mismatch, expecting={return_type.ndim}, got={values.ndim}." + ) + elif return_type.get_shape() != values.get_shape(): + raise TaichiRuntimeTypeError( + f"Return matrix shape mismatch, expecting={return_type.get_shape()}, got={values.get_shape()}." + ) + values = ( + itertools.chain.from_iterable(values.to_list()) + if values.ndim == 1 + else iter(values.to_list()) + ) + elif isinstance(values, Expr): + if not values.is_tensor(): + raise TaichiRuntimeTypeError.get_ret(return_type.to_string(), ptr) + elif ( + return_type.dtype in primitive_types.real_types + and not values.element_type() in primitive_types.all_types + ): + raise TaichiRuntimeTypeError.get_ret(return_type.dtype.to_string(), values.element_type()) + elif ( + return_type.dtype in primitive_types.integer_types + and not values.element_type() in primitive_types.integer_types + ): + raise TaichiRuntimeTypeError.get_ret(return_type.dtype.to_string(), values.element_type()) + elif len(values.get_shape()) != return_type.ndim: + raise TaichiRuntimeTypeError( + f"Return matrix ndim mismatch, expecting={return_type.ndim}, got={len(values.get_shape())}." + ) + elif return_type.get_shape() != values.get_shape(): + raise TaichiRuntimeTypeError( + f"Return matrix shape mismatch, expecting={return_type.get_shape()}, got={values.get_shape()}." + ) + values = [values] + else: + np_array = np.array(values) + dt, shape, ndim = np_array.dtype, np_array.shape, np_array.ndim + if return_type.dtype in primitive_types.real_types and dt not in ( + float, + int, + np.floating, + np.integer, + ): + raise TaichiRuntimeTypeError.get_ret(return_type.dtype.to_string(), dt) + elif return_type.dtype in primitive_types.integer_types and dt not in (int, np.integer): + raise TaichiRuntimeTypeError.get_ret(return_type.dtype.to_string(), dt) + elif ndim != return_type.ndim: + raise TaichiRuntimeTypeError( + f"Return matrix ndim mismatch, expecting={return_type.ndim}, got={ndim}." + ) + elif return_type.get_shape() != shape: + raise TaichiRuntimeTypeError( + f"Return matrix shape mismatch, expecting={return_type.get_shape()}, got={shape}." + ) + values = [values] + return_exprs += [ti_ops.cast(exp, return_type.dtype) for exp in values] + elif isinstance(return_type, StructType): + if not isinstance(ptr, Struct) or not isinstance(ptr, return_type): + raise TaichiRuntimeTypeError.get_ret(str(return_type), ptr) + values = ptr + assert isinstance(values, Struct) + return_exprs += expr._get_flattened_ptrs(values) + else: + raise TaichiSyntaxError("The return type is not supported now!") + ctx.ast_builder.create_kernel_exprgroup_return( + expr.make_expr_group(return_exprs), _ti_core.DebugInfo(ctx.get_pos_info(node)) + ) + else: + ctx.return_data = node.value.ptr + if ctx.func.return_type is not None: + if len(ctx.func.return_type) == 1: + ctx.return_data = [ctx.return_data] + for i, return_type in enumerate(ctx.func.return_type): + if id(return_type) in primitive_types.type_ids: + ctx.return_data[i] = ti_ops.cast(ctx.return_data[i], return_type) + if len(ctx.func.return_type) == 1: + ctx.return_data = ctx.return_data[0] + if not ctx.is_real_function: + ctx.returned = ReturnStatus.ReturnedValue + return None + + @staticmethod + def build_Module(ctx, node): + with ctx.variable_scope_guard(): + # Do NOT use |build_stmts| which inserts 'del' statements to the + # end and deletes parameters passed into the module + for stmt in node.body: + build_stmt(ctx, stmt) + return None + + @staticmethod + def build_attribute_if_is_dynamic_snode_method(ctx, node): + is_subscript = isinstance(node.value, ast.Subscript) + names = ("append", "deactivate", "length") + if node.attr not in names: + return False + if is_subscript: + x = node.value.value.ptr + indices = node.value.slice.ptr + else: + x = node.value.ptr + indices = [] + if not isinstance(x, Field): + return False + if not x.parent().ptr.type == _ti_core.SNodeType.dynamic: + return False + field_dim = x.snode.ptr.num_active_indices() + indices_expr_group = make_expr_group(*indices) + index_dim = indices_expr_group.size() + if field_dim != index_dim + 1: + return False + if node.attr == "append": + node.ptr = lambda val: append(x.parent(), indices, val) + elif node.attr == "deactivate": + node.ptr = lambda: deactivate(x.parent(), indices) + else: + node.ptr = lambda: length(x.parent(), indices) + return True + + @staticmethod + def build_Attribute(ctx, node): + # There are two valid cases for the methods of Dynamic SNode: + # + # 1. x[i, j].append (where the dimension of the field (3 in this case) is equal to one plus the number of the + # indices (2 in this case) ) + # + # 2. x.append (where the dimension of the field is one, equal to x[()].append) + # + # For the first case, the AST (simplified) is like node = Attribute(value=Subscript(value=x, slice=[i, j]), + # attr="append"), when we build_stmt(node.value)(build the expression of the Subscript i.e. x[i, j]), + # it should build the expression of node.value.value (i.e. x) and node.value.slice (i.e. [i, j]), and raise a + # TaichiIndexError because the dimension of the field is not equal to the number of the indices. Therefore, + # when we meet the error, we can detect whether it is a method of Dynamic SNode and build the expression if + # it is by calling build_attribute_if_is_dynamic_snode_method. If we find that it is not a method of Dynamic + # SNode, we raise the error again. + # + # For the second case, the AST (simplified) is like node = Attribute(value=x, attr="append"), and it does not + # raise error when we build_stmt(node.value). Therefore, when we do not meet the error, we can also detect + # whether it is a method of Dynamic SNode and build the expression if it is by calling + # build_attribute_if_is_dynamic_snode_method. If we find that it is not a method of Dynamic SNode, + # we continue to process it as a normal attribute node. + try: + build_stmt(ctx, node.value) + except Exception as e: + e = handle_exception_from_cpp(e) + if isinstance(e, TaichiIndexError): + node.value.ptr = None + if ASTTransformer.build_attribute_if_is_dynamic_snode_method(ctx, node): + return node.ptr + raise e + + if ASTTransformer.build_attribute_if_is_dynamic_snode_method(ctx, node): + return node.ptr + + if isinstance(node.value.ptr, Expr) and not hasattr(node.value.ptr, node.attr): + if node.attr in Matrix._swizzle_to_keygroup: + keygroup = Matrix._swizzle_to_keygroup[node.attr] + Matrix._keygroup_to_checker[keygroup](node.value.ptr, node.attr) + attr_len = len(node.attr) + if attr_len == 1: + node.ptr = Expr( + impl.get_runtime() + .compiling_callable.ast_builder() + .expr_subscript( + node.value.ptr.ptr, + make_expr_group(keygroup.index(node.attr)), + _ti_core.DebugInfo(impl.get_runtime().get_current_src_info()), + ) + ) + else: + node.ptr = Expr( + _ti_core.subscript_with_multiple_indices( + node.value.ptr.ptr, + [make_expr_group(keygroup.index(ch)) for ch in node.attr], + (attr_len,), + _ti_core.DebugInfo(impl.get_runtime().get_current_src_info()), + ) + ) + else: + from taichi.lang import ( # pylint: disable=C0415 + matrix_ops as tensor_ops, + ) + + node.ptr = getattr(tensor_ops, node.attr) + setattr(node, "caller", node.value.ptr) + else: + node.ptr = getattr(node.value.ptr, node.attr) + return node.ptr + + @staticmethod + def build_BinOp(ctx, node): + build_stmt(ctx, node.left) + build_stmt(ctx, node.right) + # pylint: disable-msg=C0415 + from taichi.lang.matrix_ops import matmul + + op = { + ast.Add: lambda l, r: l + r, + ast.Sub: lambda l, r: l - r, + ast.Mult: lambda l, r: l * r, + ast.Div: lambda l, r: l / r, + ast.FloorDiv: lambda l, r: l // r, + ast.Mod: lambda l, r: l % r, + ast.Pow: lambda l, r: l**r, + ast.LShift: lambda l, r: l << r, + ast.RShift: lambda l, r: l >> r, + ast.BitOr: lambda l, r: l | r, + ast.BitXor: lambda l, r: l ^ r, + ast.BitAnd: lambda l, r: l & r, + ast.MatMult: matmul, + }.get(type(node.op)) + try: + node.ptr = op(node.left.ptr, node.right.ptr) + except TypeError as e: + raise TaichiTypeError(str(e)) from None + return node.ptr + + @staticmethod + def build_AugAssign(ctx, node): + build_stmt(ctx, node.target) + build_stmt(ctx, node.value) + if isinstance(node.target, ast.Name) and node.target.id in ctx.kernel_args: + raise TaichiSyntaxError( + f'Kernel argument "{node.target.id}" is immutable in the kernel. ' + f"If you want to change its value, please create a new variable." + ) + node.ptr = node.target.ptr._augassign(node.value.ptr, type(node.op).__name__) + return node.ptr + + @staticmethod + def build_UnaryOp(ctx, node): + build_stmt(ctx, node.operand) + op = { + ast.UAdd: lambda l: l, + ast.USub: lambda l: -l, + ast.Not: ti_ops.logical_not, + ast.Invert: lambda l: ~l, + }.get(type(node.op)) + node.ptr = op(node.operand.ptr) + return node.ptr + + @staticmethod + def build_bool_op(op): + def inner(operands): + if len(operands) == 1: + return operands[0].ptr + return op(operands[0].ptr, inner(operands[1:])) + + return inner + + @staticmethod + def build_static_and(operands): + for operand in operands: + if not operand.ptr: + return operand.ptr + return operands[-1].ptr + + @staticmethod + def build_static_or(operands): + for operand in operands: + if operand.ptr: + return operand.ptr + return operands[-1].ptr + + @staticmethod + def build_BoolOp(ctx, node): + build_stmts(ctx, node.values) + if ctx.is_in_static_scope(): + ops = { + ast.And: ASTTransformer.build_static_and, + ast.Or: ASTTransformer.build_static_or, + } + elif impl.get_runtime().short_circuit_operators: + ops = { + ast.And: ASTTransformer.build_bool_op(ti_ops.logical_and), + ast.Or: ASTTransformer.build_bool_op(ti_ops.logical_or), + } + else: + ops = { + ast.And: ASTTransformer.build_bool_op(ti_ops.bit_and), + ast.Or: ASTTransformer.build_bool_op(ti_ops.bit_or), + } + op = ops.get(type(node.op)) + node.ptr = op(node.values) + return node.ptr + + @staticmethod + def build_Compare(ctx, node): + build_stmt(ctx, node.left) + build_stmts(ctx, node.comparators) + ops = { + ast.Eq: lambda l, r: l == r, + ast.NotEq: lambda l, r: l != r, + ast.Lt: lambda l, r: l < r, + ast.LtE: lambda l, r: l <= r, + ast.Gt: lambda l, r: l > r, + ast.GtE: lambda l, r: l >= r, + } + ops_static = { + ast.In: lambda l, r: l in r, + ast.NotIn: lambda l, r: l not in r, + } + if ctx.is_in_static_scope(): + ops = {**ops, **ops_static} + operands = [node.left.ptr] + [comparator.ptr for comparator in node.comparators] + val = True + for i, node_op in enumerate(node.ops): + if isinstance(node_op, (ast.Is, ast.IsNot)): + name = "is" if isinstance(node_op, ast.Is) else "is not" + raise TaichiSyntaxError(f'Operator "{name}" in Taichi scope is not supported.') + l = operands[i] + r = operands[i + 1] + op = ops.get(type(node_op)) + + if op is None: + if type(node_op) in ops_static: + raise TaichiSyntaxError(f'"{type(node_op).__name__}" is only supported inside `ti.static`.') + else: + raise TaichiSyntaxError(f'"{type(node_op).__name__}" is not supported in Taichi kernels.') + val = ti_ops.logical_and(val, op(l, r)) + if not isinstance(val, (bool, np.bool_)): + val = ti_ops.cast(val, primitive_types.u1) + node.ptr = val + return node.ptr + + @staticmethod + def get_decorator(ctx, node): + if not isinstance(node, ast.Call): + return "" + for wanted, name in [ + (impl.static, "static"), + (impl.static_assert, "static_assert"), + (impl.grouped, "grouped"), + (ndrange, "ndrange"), + ]: + if ASTResolver.resolve_to(node.func, wanted, ctx.global_vars): + return name + return "" + + @staticmethod + def get_for_loop_targets(node): + """ + Returns the list of indices of the for loop |node|. + See also: https://docs.python.org/3/library/ast.html#ast.For + """ + if isinstance(node.target, ast.Name): + return [node.target.id] + assert isinstance(node.target, ast.Tuple) + return [name.id for name in node.target.elts] + + @staticmethod + def build_static_for(ctx, node, is_grouped): + ti_unroll_limit = impl.get_runtime().unrolling_limit + if is_grouped: + assert len(node.iter.args[0].args) == 1 + ndrange_arg = build_stmt(ctx, node.iter.args[0].args[0]) + if not isinstance(ndrange_arg, _Ndrange): + raise TaichiSyntaxError("Only 'ti.ndrange' is allowed in 'ti.static(ti.grouped(...))'.") + targets = ASTTransformer.get_for_loop_targets(node) + if len(targets) != 1: + raise TaichiSyntaxError(f"Group for should have 1 loop target, found {len(targets)}") + target = targets[0] + iter_time = 0 + alert_already = False + + for value in impl.grouped(ndrange_arg): + iter_time += 1 + if not alert_already and ti_unroll_limit and iter_time > ti_unroll_limit: + alert_already = True + warnings.warn_explicit( + f"""You are unrolling more than + {ti_unroll_limit} iterations, so the compile time may be extremely long. + You can use a non-static for loop if you want to decrease the compile time. + You can disable this warning by setting ti.init(unrolling_limit=0).""", + SyntaxWarning, + ctx.file, + node.lineno + ctx.lineno_offset, + module="taichi", + ) + + with ctx.variable_scope_guard(): + ctx.create_variable(target, value) + build_stmts(ctx, node.body) + status = ctx.loop_status() + if status == LoopStatus.Break: + break + elif status == LoopStatus.Continue: + ctx.set_loop_status(LoopStatus.Normal) + else: + build_stmt(ctx, node.iter) + targets = ASTTransformer.get_for_loop_targets(node) + + iter_time = 0 + alert_already = False + for target_values in node.iter.ptr: + if not isinstance(target_values, collections.abc.Sequence) or len(targets) == 1: + target_values = [target_values] + + iter_time += 1 + if not alert_already and ti_unroll_limit and iter_time > ti_unroll_limit: + alert_already = True + warnings.warn_explicit( + f"""You are unrolling more than + {ti_unroll_limit} iterations, so the compile time may be extremely long. + You can use a non-static for loop if you want to decrease the compile time. + You can disable this warning by setting ti.init(unrolling_limit=0).""", + SyntaxWarning, + ctx.file, + node.lineno + ctx.lineno_offset, + module="taichi", + ) + + with ctx.variable_scope_guard(): + for target, target_value in zip(targets, target_values): + ctx.create_variable(target, target_value) + build_stmts(ctx, node.body) + status = ctx.loop_status() + if status == LoopStatus.Break: + break + elif status == LoopStatus.Continue: + ctx.set_loop_status(LoopStatus.Normal) + return None + + @staticmethod + def build_range_for(ctx, node): + with ctx.variable_scope_guard(): + loop_name = node.target.id + ctx.check_loop_var(loop_name) + loop_var = expr.Expr(ctx.ast_builder.make_id_expr("")) + ctx.create_variable(loop_name, loop_var) + if len(node.iter.args) not in [1, 2]: + raise TaichiSyntaxError(f"Range should have 1 or 2 arguments, found {len(node.iter.args)}") + if len(node.iter.args) == 2: + begin_expr = expr.Expr(build_stmt(ctx, node.iter.args[0])) + end_expr = expr.Expr(build_stmt(ctx, node.iter.args[1])) + + # Warning for implicit dtype conversion + boundary_type_cast_warning(begin_expr) + boundary_type_cast_warning(end_expr) + + begin = ti_ops.cast(begin_expr, primitive_types.i32) + end = ti_ops.cast(end_expr, primitive_types.i32) + + else: + end_expr = expr.Expr(build_stmt(ctx, node.iter.args[0])) + + # Warning for implicit dtype conversion + boundary_type_cast_warning(end_expr) + + begin = ti_ops.cast(expr.Expr(0), primitive_types.i32) + end = ti_ops.cast(end_expr, primitive_types.i32) + + for_di = _ti_core.DebugInfo(ctx.get_pos_info(node)) + ctx.ast_builder.begin_frontend_range_for(loop_var.ptr, begin.ptr, end.ptr, for_di) + build_stmts(ctx, node.body) + ctx.ast_builder.end_frontend_range_for() + return None + + @staticmethod + def build_ndrange_for(ctx, node): + with ctx.variable_scope_guard(): + ndrange_var = impl.expr_init(build_stmt(ctx, node.iter)) + ndrange_begin = ti_ops.cast(expr.Expr(0), primitive_types.i32) + ndrange_end = ti_ops.cast( + expr.Expr(impl.subscript(ctx.ast_builder, ndrange_var.acc_dimensions, 0)), + primitive_types.i32, + ) + ndrange_loop_var = expr.Expr(ctx.ast_builder.make_id_expr("")) + for_di = _ti_core.DebugInfo(ctx.get_pos_info(node)) + ctx.ast_builder.begin_frontend_range_for(ndrange_loop_var.ptr, ndrange_begin.ptr, ndrange_end.ptr, for_di) + I = impl.expr_init(ndrange_loop_var) + targets = ASTTransformer.get_for_loop_targets(node) + if len(targets) != len(ndrange_var.dimensions): + raise TaichiSyntaxError( + "Ndrange for loop with number of the loop variables not equal to " + "the dimension of the ndrange is not supported. " + "Please check if the number of arguments of ti.ndrange() is equal to " + "the number of the loop variables." + ) + for i, target in enumerate(targets): + if i + 1 < len(targets): + target_tmp = impl.expr_init(I // ndrange_var.acc_dimensions[i + 1]) + else: + target_tmp = impl.expr_init(I) + ctx.create_variable( + target, + impl.expr_init( + target_tmp + + impl.subscript( + ctx.ast_builder, + impl.subscript(ctx.ast_builder, ndrange_var.bounds, i), + 0, + ) + ), + ) + if i + 1 < len(targets): + I._assign(I - target_tmp * ndrange_var.acc_dimensions[i + 1]) + build_stmts(ctx, node.body) + ctx.ast_builder.end_frontend_range_for() + return None + + @staticmethod + def build_grouped_ndrange_for(ctx, node): + with ctx.variable_scope_guard(): + ndrange_var = impl.expr_init(build_stmt(ctx, node.iter.args[0])) + ndrange_begin = ti_ops.cast(expr.Expr(0), primitive_types.i32) + ndrange_end = ti_ops.cast( + expr.Expr(impl.subscript(ctx.ast_builder, ndrange_var.acc_dimensions, 0)), + primitive_types.i32, + ) + ndrange_loop_var = expr.Expr(ctx.ast_builder.make_id_expr("")) + for_di = _ti_core.DebugInfo(ctx.get_pos_info(node)) + ctx.ast_builder.begin_frontend_range_for(ndrange_loop_var.ptr, ndrange_begin.ptr, ndrange_end.ptr, for_di) + + targets = ASTTransformer.get_for_loop_targets(node) + if len(targets) != 1: + raise TaichiSyntaxError(f"Group for should have 1 loop target, found {len(targets)}") + target = targets[0] + mat = matrix.make_matrix([0] * len(ndrange_var.dimensions), dt=primitive_types.i32) + target_var = impl.expr_init(mat) + + ctx.create_variable(target, target_var) + I = impl.expr_init(ndrange_loop_var) + for i in range(len(ndrange_var.dimensions)): + if i + 1 < len(ndrange_var.dimensions): + target_tmp = I // ndrange_var.acc_dimensions[i + 1] + else: + target_tmp = I + impl.subscript(ctx.ast_builder, target_var, i)._assign(target_tmp + ndrange_var.bounds[i][0]) + if i + 1 < len(ndrange_var.dimensions): + I._assign(I - target_tmp * ndrange_var.acc_dimensions[i + 1]) + build_stmts(ctx, node.body) + ctx.ast_builder.end_frontend_range_for() + return None + + @staticmethod + def build_struct_for(ctx, node, is_grouped): + # for i, j in x + # for I in ti.grouped(x) + targets = ASTTransformer.get_for_loop_targets(node) + + for target in targets: + ctx.check_loop_var(target) + + with ctx.variable_scope_guard(): + if is_grouped: + if len(targets) != 1: + raise TaichiSyntaxError(f"Group for should have 1 loop target, found {len(targets)}") + target = targets[0] + loop_var = build_stmt(ctx, node.iter) + loop_indices = expr.make_var_list(size=len(loop_var.shape), ast_builder=ctx.ast_builder) + expr_group = expr.make_expr_group(loop_indices) + impl.begin_frontend_struct_for(ctx.ast_builder, expr_group, loop_var) + ctx.create_variable(target, matrix.make_matrix(loop_indices, dt=primitive_types.i32)) + build_stmts(ctx, node.body) + ctx.ast_builder.end_frontend_struct_for() + else: + _vars = [] + for name in targets: + var = expr.Expr(ctx.ast_builder.make_id_expr("")) + _vars.append(var) + ctx.create_variable(name, var) + loop_var = node.iter.ptr + expr_group = expr.make_expr_group(*_vars) + impl.begin_frontend_struct_for(ctx.ast_builder, expr_group, loop_var) + build_stmts(ctx, node.body) + ctx.ast_builder.end_frontend_struct_for() + return None + + @staticmethod + def build_mesh_for(ctx, node): + targets = ASTTransformer.get_for_loop_targets(node) + if len(targets) != 1: + raise TaichiSyntaxError("Mesh for should have 1 loop target, found {len(targets)}") + target = targets[0] + + with ctx.variable_scope_guard(): + var = expr.Expr(ctx.ast_builder.make_id_expr("")) + ctx.mesh = node.iter.ptr.mesh + assert isinstance(ctx.mesh, impl.MeshInstance) + mesh_idx = mesh.MeshElementFieldProxy(ctx.mesh, node.iter.ptr._type, var.ptr) + ctx.create_variable(target, mesh_idx) + ctx.ast_builder.begin_frontend_mesh_for( + mesh_idx.ptr, + ctx.mesh.mesh_ptr, + node.iter.ptr._type, + _ti_core.DebugInfo(impl.get_runtime().get_current_src_info()), + ) + build_stmts(ctx, node.body) + ctx.mesh = None + ctx.ast_builder.end_frontend_mesh_for() + return None + + @staticmethod + def build_nested_mesh_for(ctx, node): + targets = ASTTransformer.get_for_loop_targets(node) + if len(targets) != 1: + raise TaichiSyntaxError("Nested-mesh for should have 1 loop target, found {len(targets)}") + target = targets[0] + + with ctx.variable_scope_guard(): + ctx.mesh = node.iter.ptr.mesh + assert isinstance(ctx.mesh, impl.MeshInstance) + loop_name = node.target.id + "_index__" + loop_var = expr.Expr(ctx.ast_builder.make_id_expr("")) + ctx.create_variable(loop_name, loop_var) + begin = expr.Expr(0) + end = ti_ops.cast(node.iter.ptr.size, primitive_types.i32) + for_di = _ti_core.DebugInfo(ctx.get_pos_info(node)) + ctx.ast_builder.begin_frontend_range_for(loop_var.ptr, begin.ptr, end.ptr, for_di) + entry_expr = _ti_core.get_relation_access( + ctx.mesh.mesh_ptr, + node.iter.ptr.from_index.ptr, + node.iter.ptr.to_element_type, + loop_var.ptr, + ) + entry_expr.type_check(impl.get_runtime().prog.config()) + mesh_idx = mesh.MeshElementFieldProxy(ctx.mesh, node.iter.ptr.to_element_type, entry_expr) + ctx.create_variable(target, mesh_idx) + build_stmts(ctx, node.body) + ctx.ast_builder.end_frontend_range_for() + + return None + + @staticmethod + def build_For(ctx, node): + if node.orelse: + raise TaichiSyntaxError("'else' clause for 'for' not supported in Taichi kernels") + decorator = ASTTransformer.get_decorator(ctx, node.iter) + double_decorator = "" + if decorator != "" and len(node.iter.args) == 1: + double_decorator = ASTTransformer.get_decorator(ctx, node.iter.args[0]) + + if decorator == "static": + if double_decorator == "static": + raise TaichiSyntaxError("'ti.static' cannot be nested") + with ctx.loop_scope_guard(is_static=True): + return ASTTransformer.build_static_for(ctx, node, double_decorator == "grouped") + with ctx.loop_scope_guard(): + if decorator == "ndrange": + if double_decorator != "": + raise TaichiSyntaxError("No decorator is allowed inside 'ti.ndrange") + return ASTTransformer.build_ndrange_for(ctx, node) + if decorator == "grouped": + if double_decorator == "static": + raise TaichiSyntaxError("'ti.static' is not allowed inside 'ti.grouped'") + elif double_decorator == "ndrange": + return ASTTransformer.build_grouped_ndrange_for(ctx, node) + elif double_decorator == "grouped": + raise TaichiSyntaxError("'ti.grouped' cannot be nested") + else: + return ASTTransformer.build_struct_for(ctx, node, is_grouped=True) + elif ( + isinstance(node.iter, ast.Call) + and isinstance(node.iter.func, ast.Name) + and node.iter.func.id == "range" + ): + return ASTTransformer.build_range_for(ctx, node) + else: + build_stmt(ctx, node.iter) + if isinstance(node.iter.ptr, mesh.MeshElementField): + if not _ti_core.is_extension_supported(impl.default_cfg().arch, _ti_core.Extension.mesh): + raise Exception( + "Backend " + str(impl.default_cfg().arch) + " doesn't support MeshTaichi extension" + ) + return ASTTransformer.build_mesh_for(ctx, node) + if isinstance(node.iter.ptr, mesh.MeshRelationAccessProxy): + return ASTTransformer.build_nested_mesh_for(ctx, node) + # Struct for + return ASTTransformer.build_struct_for(ctx, node, is_grouped=False) + + @staticmethod + def build_While(ctx, node): + if node.orelse: + raise TaichiSyntaxError("'else' clause for 'while' not supported in Taichi kernels") + + with ctx.loop_scope_guard(): + stmt_dbg_info = _ti_core.DebugInfo(ctx.get_pos_info(node)) + ctx.ast_builder.begin_frontend_while(expr.Expr(1, dtype=primitive_types.i32).ptr, stmt_dbg_info) + while_cond = build_stmt(ctx, node.test) + impl.begin_frontend_if(ctx.ast_builder, while_cond, stmt_dbg_info) + ctx.ast_builder.begin_frontend_if_true() + ctx.ast_builder.pop_scope() + ctx.ast_builder.begin_frontend_if_false() + ctx.ast_builder.insert_break_stmt(stmt_dbg_info) + ctx.ast_builder.pop_scope() + build_stmts(ctx, node.body) + ctx.ast_builder.pop_scope() + return None + + @staticmethod + def build_If(ctx, node): + build_stmt(ctx, node.test) + is_static_if = ASTTransformer.get_decorator(ctx, node.test) == "static" + + if is_static_if: + if node.test.ptr: + build_stmts(ctx, node.body) + else: + build_stmts(ctx, node.orelse) + return node + + with ctx.non_static_if_guard(node): + stmt_dbg_info = _ti_core.DebugInfo(ctx.get_pos_info(node)) + impl.begin_frontend_if(ctx.ast_builder, node.test.ptr, stmt_dbg_info) + ctx.ast_builder.begin_frontend_if_true() + build_stmts(ctx, node.body) + ctx.ast_builder.pop_scope() + ctx.ast_builder.begin_frontend_if_false() + build_stmts(ctx, node.orelse) + ctx.ast_builder.pop_scope() + return None + + @staticmethod + def build_Expr(ctx, node): + build_stmt(ctx, node.value) + return None + + @staticmethod + def build_IfExp(ctx, node): + build_stmt(ctx, node.test) + build_stmt(ctx, node.body) + build_stmt(ctx, node.orelse) + + has_tensor_type = False + if isinstance(node.test.ptr, expr.Expr) and node.test.ptr.is_tensor(): + has_tensor_type = True + if isinstance(node.body.ptr, expr.Expr) and node.body.ptr.is_tensor(): + has_tensor_type = True + if isinstance(node.orelse.ptr, expr.Expr) and node.orelse.ptr.is_tensor(): + has_tensor_type = True + + if has_tensor_type: + if isinstance(node.test.ptr, expr.Expr) and node.test.ptr.is_tensor(): + raise TaichiSyntaxError( + "Using conditional expression for element-wise select operation on " + "Taichi vectors/matrices is deprecated and removed starting from Taichi v1.5.0 " + 'Please use "ti.select" instead.' + ) + node.ptr = ti_ops.select(node.test.ptr, node.body.ptr, node.orelse.ptr) + return node.ptr + + is_static_if = ASTTransformer.get_decorator(ctx, node.test) == "static" + + if is_static_if: + if node.test.ptr: + node.ptr = build_stmt(ctx, node.body) + else: + node.ptr = build_stmt(ctx, node.orelse) + return node.ptr + + node.ptr = ti_ops.ifte(node.test.ptr, node.body.ptr, node.orelse.ptr) + return node.ptr + + @staticmethod + def _is_string_mod_args(msg): + # 1. str % (a, b, c, ...) + # 2. str % single_item + # Note that |msg.right| may not be a tuple. + if not isinstance(msg, ast.BinOp): + return False + if not isinstance(msg.op, ast.Mod): + return False + if isinstance(msg.left, ast.Str): + return True + if isinstance(msg.left, ast.Constant) and isinstance(msg.left.value, str): + return True + return False + + @staticmethod + def _handle_string_mod_args(ctx, node): + msg = build_stmt(ctx, node.left) + args = build_stmt(ctx, node.right) + if not isinstance(args, collections.abc.Sequence): + args = (args,) + args = [expr.Expr(x).ptr for x in args] + return msg, args + + @staticmethod + def ti_format_list_to_assert_msg(raw): + # TODO: ignore formats here for now + entries, _ = impl.ti_format_list_to_content_entries([raw]) + msg = "" + args = [] + for entry in entries: + if isinstance(entry, str): + msg += entry + elif isinstance(entry, _ti_core.Expr): + ty = entry.get_rvalue_type() + if ty in primitive_types.real_types: + msg += "%f" + elif ty in primitive_types.integer_types: + msg += "%d" + else: + raise TaichiSyntaxError(f"Unsupported data type: {type(ty)}") + args.append(entry) + else: + raise TaichiSyntaxError(f"Unsupported type: {type(entry)}") + return msg, args + + @staticmethod + def build_Assert(ctx, node): + extra_args = [] + if node.msg is not None: + if ASTTransformer._is_string_mod_args(node.msg): + msg, extra_args = ASTTransformer._handle_string_mod_args(ctx, node.msg) + else: + msg = build_stmt(ctx, node.msg) + if isinstance(node.msg, ast.Constant): + msg = str(msg) + elif isinstance(node.msg, ast.Str): + pass + elif isinstance(msg, collections.abc.Sequence) and len(msg) > 0 and msg[0] == "__ti_format__": + msg, extra_args = ASTTransformer.ti_format_list_to_assert_msg(msg) + else: + raise TaichiSyntaxError(f"assert info must be constant or formatted string, not {type(msg)}") + else: + msg = unparse(node.test) + test = build_stmt(ctx, node.test) + impl.ti_assert(test, msg.strip(), extra_args, _ti_core.DebugInfo(ctx.get_pos_info(node))) + return None + + @staticmethod + def build_Break(ctx, node): + if ctx.is_in_static_for(): + nearest_non_static_if: ast.If = ctx.current_loop_scope().nearest_non_static_if + if nearest_non_static_if: + msg = ctx.get_pos_info(nearest_non_static_if.test) + msg += ( + "You are trying to `break` a static `for` loop, " + "but the `break` statement is inside a non-static `if`. " + ) + raise TaichiSyntaxError(msg) + ctx.set_loop_status(LoopStatus.Break) + else: + ctx.ast_builder.insert_break_stmt(_ti_core.DebugInfo(ctx.get_pos_info(node))) + return None + + @staticmethod + def build_Continue(ctx, node): + if ctx.is_in_static_for(): + nearest_non_static_if: ast.If = ctx.current_loop_scope().nearest_non_static_if + if nearest_non_static_if: + msg = ctx.get_pos_info(nearest_non_static_if.test) + msg += ( + "You are trying to `continue` a static `for` loop, " + "but the `continue` statement is inside a non-static `if`. " + ) + raise TaichiSyntaxError(msg) + ctx.set_loop_status(LoopStatus.Continue) + else: + ctx.ast_builder.insert_continue_stmt(_ti_core.DebugInfo(ctx.get_pos_info(node))) + return None + + @staticmethod + def build_Pass(ctx, node): + return None + + +build_stmt = ASTTransformer() + + +def build_stmts(ctx, stmts): + with ctx.variable_scope_guard(): + for stmt in stmts: + if ctx.returned != ReturnStatus.NoReturn or ctx.loop_status() != LoopStatus.Normal: + break + else: + build_stmt(ctx, stmt) + return stmts diff --git a/local_packages/taichi/lang/ast/ast_transformer_utils.py b/local_packages/taichi/lang/ast/ast_transformer_utils.py new file mode 100644 index 0000000..4a10d5e --- /dev/null +++ b/local_packages/taichi/lang/ast/ast_transformer_utils.py @@ -0,0 +1,316 @@ +import ast +import builtins +import traceback +from enum import Enum +from sys import version_info +from textwrap import TextWrapper +from typing import List + +from taichi.lang import impl +from taichi.lang.exception import ( + TaichiCompilationError, + TaichiNameError, + TaichiSyntaxError, + handle_exception_from_cpp, +) + + +class Builder: + def __call__(self, ctx, node): + method = getattr(self, "build_" + node.__class__.__name__, None) + try: + if method is None: + error_msg = f'Unsupported node "{node.__class__.__name__}"' + raise TaichiSyntaxError(error_msg) + info = ctx.get_pos_info(node) if isinstance(node, (ast.stmt, ast.expr)) else "" + with impl.get_runtime().src_info_guard(info): + return method(ctx, node) + except Exception as e: + if impl.get_runtime().print_full_traceback: + raise e + if ctx.raised or not isinstance(node, (ast.stmt, ast.expr)): + raise e.with_traceback(None) + ctx.raised = True + e = handle_exception_from_cpp(e) + if not isinstance(e, TaichiCompilationError): + msg = ctx.get_pos_info(node) + traceback.format_exc() + raise TaichiCompilationError(msg) from None + msg = ctx.get_pos_info(node) + str(e) + raise type(e)(msg) from None + + +class VariableScopeGuard: + def __init__(self, scopes): + self.scopes = scopes + + def __enter__(self): + self.scopes.append({}) + + def __exit__(self, exc_type, exc_val, exc_tb): + self.scopes.pop() + + +class StaticScopeStatus: + def __init__(self): + self.is_in_static_scope = False + + +class StaticScopeGuard: + def __init__(self, status): + self.status = status + + def __enter__(self): + self.prev = self.status.is_in_static_scope + self.status.is_in_static_scope = True + + def __exit__(self, exc_type, exc_val, exc_tb): + self.status.is_in_static_scope = self.prev + + +class NonStaticControlFlowStatus: + def __init__(self): + self.is_in_non_static_control_flow = False + + +class NonStaticControlFlowGuard: + def __init__(self, status: NonStaticControlFlowStatus): + self.status = status + + def __enter__(self): + self.prev = self.status.is_in_non_static_control_flow + self.status.is_in_non_static_control_flow = True + + def __exit__(self, exc_type, exc_val, exc_tb): + self.status.is_in_non_static_control_flow = self.prev + + +class LoopStatus(Enum): + Normal = 0 + Break = 1 + Continue = 2 + + +class LoopScopeAttribute: + def __init__(self, is_static): + self.is_static = is_static + self.status = LoopStatus.Normal + self.nearest_non_static_if = None + + +class LoopScopeGuard: + def __init__(self, scopes, non_static_guard=None): + self.scopes = scopes + self.non_static_guard = non_static_guard + + def __enter__(self): + self.scopes.append(LoopScopeAttribute(self.non_static_guard is None)) + if self.non_static_guard: + self.non_static_guard.__enter__() + + def __exit__(self, exc_type, exc_val, exc_tb): + self.scopes.pop() + if self.non_static_guard: + self.non_static_guard.__exit__(exc_type, exc_val, exc_tb) + + +class NonStaticIfGuard: + def __init__( + self, + if_node: ast.If, + loop_attribute: LoopScopeAttribute, + non_static_status: NonStaticControlFlowStatus, + ): + self.loop_attribute = loop_attribute + self.if_node = if_node + self.non_static_guard = NonStaticControlFlowGuard(non_static_status) + + def __enter__(self): + if self.loop_attribute: + self.old_non_static_if = self.loop_attribute.nearest_non_static_if + self.loop_attribute.nearest_non_static_if = self.if_node + self.non_static_guard.__enter__() + + def __exit__(self, exc_type, exc_val, exc_tb): + if self.loop_attribute: + self.loop_attribute.nearest_non_static_if = self.old_non_static_if + self.non_static_guard.__exit__(exc_type, exc_val, exc_tb) + + +class ReturnStatus(Enum): + NoReturn = 0 + ReturnedVoid = 1 + ReturnedValue = 2 + + +class ASTTransformerContext: + def __init__( + self, + excluded_parameters=(), + is_kernel=True, + func=None, + arg_features=None, + global_vars=None, + argument_data=None, + file=None, + src=None, + start_lineno=None, + ast_builder=None, + is_real_function=False, + ): + self.func = func + self.local_scopes = [] + self.loop_scopes: List[LoopScopeAttribute] = [] + self.excluded_parameters = excluded_parameters + self.is_kernel = is_kernel + self.arg_features = arg_features + self.returns = None + self.global_vars = global_vars + self.argument_data = argument_data + self.return_data = None + self.file = file + self.src = src + self.indent = 0 + for c in self.src[0]: + if c == " ": + self.indent += 1 + else: + break + self.lineno_offset = start_lineno - 1 + self.raised = False + self.non_static_control_flow_status = NonStaticControlFlowStatus() + self.static_scope_status = StaticScopeStatus() + self.returned = ReturnStatus.NoReturn + self.ast_builder = ast_builder + self.visited_funcdef = False + self.is_real_function = is_real_function + self.kernel_args = [] + + # e.g.: FunctionDef, Module, Global + def variable_scope_guard(self): + return VariableScopeGuard(self.local_scopes) + + # e.g.: For, While + def loop_scope_guard(self, is_static=False): + if is_static: + return LoopScopeGuard(self.loop_scopes) + return LoopScopeGuard(self.loop_scopes, self.non_static_control_flow_guard()) + + def non_static_if_guard(self, if_node: ast.If): + return NonStaticIfGuard( + if_node, + self.current_loop_scope() if self.loop_scopes else None, + self.non_static_control_flow_status, + ) + + def non_static_control_flow_guard(self): + return NonStaticControlFlowGuard(self.non_static_control_flow_status) + + def static_scope_guard(self): + return StaticScopeGuard(self.static_scope_status) + + def current_scope(self): + return self.local_scopes[-1] + + def current_loop_scope(self): + return self.loop_scopes[-1] + + def loop_status(self): + if self.loop_scopes: + return self.loop_scopes[-1].status + return LoopStatus.Normal + + def set_loop_status(self, status): + self.loop_scopes[-1].status = status + + def is_in_static_for(self): + if self.loop_scopes: + return self.loop_scopes[-1].is_static + return False + + def is_in_non_static_control_flow(self): + return self.non_static_control_flow_status.is_in_non_static_control_flow + + def is_in_static_scope(self): + return self.static_scope_status.is_in_static_scope + + def is_var_declared(self, name): + for s in self.local_scopes: + if name in s: + return True + return False + + def create_variable(self, name, var): + if name in self.current_scope(): + raise TaichiSyntaxError("Recreating variables is not allowed") + self.current_scope()[name] = var + + def check_loop_var(self, loop_var): + if self.is_var_declared(loop_var): + raise TaichiSyntaxError( + f"Variable '{loop_var}' is already declared in the outer scope and cannot be used as loop variable" + ) + + def get_var_by_name(self, name): + for s in reversed(self.local_scopes): + if name in s: + return s[name] + if name in self.global_vars: + var = self.global_vars[name] + from taichi.lang.matrix import Matrix, make_matrix # pylint: disable-msg=C0415 + + if isinstance(var, Matrix): + return make_matrix(var.to_list()) + return var + try: + return getattr(builtins, name) + except AttributeError: + raise TaichiNameError(f'Name "{name}" is not defined') + + def get_pos_info(self, node): + msg = f'File "{self.file}", line {node.lineno + self.lineno_offset}, in {self.func.func.__name__}:\n' + if version_info < (3, 8): + msg += self.src[node.lineno - 1] + "\n" + return msg + col_offset = self.indent + node.col_offset + end_col_offset = self.indent + node.end_col_offset + + wrapper = TextWrapper(width=80) + + def gen_line(code, hint): + hint += " " * (len(code) - len(hint)) + code = wrapper.wrap(code) + hint = wrapper.wrap(hint) + if not len(code): + return "\n\n" + return "".join([c + "\n" + h + "\n" for c, h in zip(code, hint)]) + + if node.lineno == node.end_lineno: + hint = " " * col_offset + "^" * (end_col_offset - col_offset) + msg += gen_line(self.src[node.lineno - 1], hint) + else: + node_type = node.__class__.__name__ + + if node_type in ["For", "While", "FunctionDef", "If"]: + end_lineno = max(node.body[0].lineno - 1, node.lineno) + else: + end_lineno = node.end_lineno + + for i in range(node.lineno - 1, end_lineno): + last = len(self.src[i]) + while last > 0 and (self.src[i][last - 1].isspace() or not self.src[i][last - 1].isprintable()): + last -= 1 + first = 0 + while first < len(self.src[i]) and ( + self.src[i][first].isspace() or not self.src[i][first].isprintable() + ): + first += 1 + if i == node.lineno - 1: + hint = " " * col_offset + "^" * (last - col_offset) + elif i == node.end_lineno - 1: + hint = " " * first + "^" * (end_col_offset - first) + elif first < last: + hint = " " * first + "^" * (last - first) + else: + hint = "" + msg += gen_line(self.src[i], hint) + return msg diff --git a/local_packages/taichi/lang/ast/checkers.py b/local_packages/taichi/lang/ast/checkers.py new file mode 100644 index 0000000..bb95dd0 --- /dev/null +++ b/local_packages/taichi/lang/ast/checkers.py @@ -0,0 +1,104 @@ +import ast + +from taichi.lang._wrap_inspect import getsourcefile, getsourcelines +from taichi.lang.exception import TaichiSyntaxError + + +class KernelSimplicityASTChecker(ast.NodeVisitor): + class ScopeGuard: + def __init__(self, checker): + self.c = checker + self._allows_for_loop = True + self._allows_more_stmt = True + + @property + def allows_for_loop(self): + return self._allows_for_loop + + @property + def allows_more_stmt(self): + return self._allows_more_stmt + + def mark_no_more_for_loop(self): + self._allows_for_loop = False + + def mark_no_more_stmt(self): + self._allows_for_loop = False + self._allows_more_stmt = False + + def __enter__(self): + self.c._scope_guards.append(self) + + def __exit__(self, exc_type, exc_val, exc_tb): + self.c._scope_guards.pop() + + def __init__(self, func): + super().__init__() + self._func_file = getsourcefile(func) + self._func_lineno = getsourcelines(func)[1] + self._func_name = func.__name__ + self._scope_guards = [] + + def new_scope(self): + return KernelSimplicityASTChecker.ScopeGuard(self) + + @property + def current_scope(self): + return self._scope_guards[-1] + + @property + def top_level(self): + return len(self._scope_guards) == 0 + + def get_error_location(self, node): + # -1 because ast's lineno is 1-based. + lineno = self._func_lineno + node.lineno - 1 + return f"file={self._func_file} kernel={self._func_name} line={lineno}" + + @staticmethod + def should_check(node): + if not isinstance(node, ast.stmt): + return False + # TODO(#536): Frontend pass should help make sure |func| is a valid AST for + # Taichi. + ignored = [ast.FunctionDef, ast.AsyncFunctionDef, ast.ClassDef] + return not any(map(lambda t: isinstance(node, t), ignored)) + + def generic_visit(self, node): + if not self.should_check(node): + super().generic_visit(node) + return + + if not (self.top_level or self.current_scope.allows_more_stmt): + raise TaichiSyntaxError(f"No more statements allowed, at {self.get_error_location(node)}") + old_top_level = self.top_level + if old_top_level: + self._scope_guards.append(self.new_scope()) + # Marking here before the visit has the effect of disallow for-loops in + # nested blocks. E.g. if |node| is a IfStmt, then the checker would disallow + # for-loops inside it. + self.current_scope.mark_no_more_for_loop() + super().generic_visit(node) + if old_top_level: + self._scope_guards.pop() + + @staticmethod + def visit_for(node): + # TODO: since autodiff is enhanced, AST checker rules should be relaxed. This part should be updated. + # original code is #def visit_For(self, node) without #@staticmethod before fix pylint R0201 + return + # is_static = (isinstance(node.iter, ast.Call) + # and isinstance(node.iter.func, ast.Attribute) + # and isinstance(node.iter.func.value, ast.Name) + # and node.iter.func.value.id == 'ti' + # and node.iter.func.attr == 'static') + # if not (self.top_level or self.current_scope.allows_for_loop + # or is_static): + # raise TaichiSyntaxError( + # f'No more for loops allowed, at {self.get_error_location(node)}' + # ) + # with self.new_scope(): + # super().generic_visit(node) + # + # if not (self.top_level or is_static): + # self.current_scope.mark_no_more_stmt() diff --git a/local_packages/taichi/lang/ast/symbol_resolver.py b/local_packages/taichi/lang/ast/symbol_resolver.py new file mode 100644 index 0000000..96f6aca --- /dev/null +++ b/local_packages/taichi/lang/ast/symbol_resolver.py @@ -0,0 +1,55 @@ +"""Provides helpers to resolve AST nodes.""" + +import ast + + +class ASTResolver: + """Provides helper methods to resolve AST nodes.""" + + @staticmethod + def resolve_to(node, wanted, scope): + """Check if symbol ``node`` resolves to ``wanted`` object. + + This is only intended to check if a given AST node resolves to a symbol + under some namespaces, e.g. the ``a.b.c.foo`` pattern, but not meant for + more complicated expressions like ``(a + b).foo``. + + Args: + node (Union[ast.Attribute, ast.Name]): an AST node to be resolved. + wanted (Any): The expected python object. + scope (Dict[str, Any]): Maps from symbol names to objects, for + example, globals() + + Returns: + bool: The checked result. + """ + if isinstance(node, ast.Name): + return scope.get(node.id) is wanted + + if not isinstance(node, ast.Attribute): + return False + + v = node.value + chain = [node.attr] + while isinstance(v, ast.Attribute): + chain.append(v.attr) + v = v.value + if not isinstance(v, ast.Name): + # Example cases that fall under this branch: + # + # x[i].attr: ast.Subscript + # (a + b).attr: ast.BinOp + # ... + return False + chain.append(v.id) + + for attr in reversed(chain): + try: + if isinstance(scope, dict): + scope = scope[attr] + else: + scope = getattr(scope, attr) + except (KeyError, AttributeError): + return False + # The name ``scope`` here could be a bit confusing + return scope is wanted diff --git a/local_packages/taichi/lang/ast/transform.py b/local_packages/taichi/lang/ast/transform.py new file mode 100644 index 0000000..f9ca13b --- /dev/null +++ b/local_packages/taichi/lang/ast/transform.py @@ -0,0 +1,7 @@ +from taichi.lang.ast.ast_transformer import ASTTransformer +from taichi.lang.ast.ast_transformer_utils import ASTTransformerContext + + +def transform_tree(tree, ctx: ASTTransformerContext): + ASTTransformer()(ctx, tree) + return ctx.return_data diff --git a/local_packages/taichi/lang/common_ops.py b/local_packages/taichi/lang/common_ops.py new file mode 100644 index 0000000..12d95f6 --- /dev/null +++ b/local_packages/taichi/lang/common_ops.py @@ -0,0 +1,307 @@ +from taichi.lang import ops +from taichi.lang.util import in_python_scope +from taichi.types import primitive_types +from typing import TYPE_CHECKING + + +class TaichiOperations: + """The base class of taichi operations of expressions. Subclasses: :class:`~taichi.lang.expr.Expr`, :class:`~taichi.lang.matrix.Matrix`""" + + if TYPE_CHECKING: + # Make pylint happy + def __getattr__(self, item): + pass + + def __neg__(self): + return ops.neg(self) + + def __abs__(self): + return ops.abs(self) + + def __add__(self, other): + return ops.add(self, other) + + def __radd__(self, other): + return ops.add(other, self) + + def __sub__(self, other): + return ops.sub(self, other) + + def __rsub__(self, other): + return ops.sub(other, self) + + def __mul__(self, other): + return ops.mul(self, other) + + def __rmul__(self, other): + return ops.mul(other, self) + + def __truediv__(self, other): + return ops.truediv(self, other) + + def __rtruediv__(self, other): + return ops.truediv(other, self) + + def __floordiv__(self, other): + return ops.floordiv(self, other) + + def __rfloordiv__(self, other): + return ops.floordiv(other, self) + + def __mod__(self, other): + return ops.mod(self, other) + + def __rmod__(self, other): + return ops.mod(other, self) + + def __pow__(self, other, modulo=None): + return ops.pow(self, other) + + def __rpow__(self, other, modulo=None): + return ops.pow(other, self) + + def __le__(self, other): + return ops.cmp_le(self, other) + + def __lt__(self, other): + return ops.cmp_lt(self, other) + + def __ge__(self, other): + return ops.cmp_ge(self, other) + + def __gt__(self, other): + return ops.cmp_gt(self, other) + + def __eq__(self, other): + return ops.cmp_eq(self, other) + + def __ne__(self, other): + return ops.cmp_ne(self, other) + + def __and__(self, other): + return ops.bit_and(self, other) + + def __rand__(self, other): + return ops.bit_and(other, self) + + def __or__(self, other): + return ops.bit_or(self, other) + + def __ror__(self, other): + return ops.bit_or(other, self) + + def __xor__(self, other): + return ops.bit_xor(self, other) + + def __rxor__(self, other): + return ops.bit_xor(other, self) + + def __lshift__(self, other): + return ops.bit_shl(self, other) + + def __rlshift__(self, other): + return ops.bit_shl(other, self) + + def __rshift__(self, other): + return ops.bit_sar(self, other) + + def __rrshift__(self, other): + return ops.bit_sar(other, self) + + def __invert__(self): # ~a => a.__invert__() + return ops.bit_not(self) + + def __not__(self): # not a => a.__not__() + return ops.logical_not(self) + + def _atomic_add(self, other): + """Return the new expression of computing atomic add between self and a given operand. + + Args: + other (Any): Given operand. + + Returns: + :class:`~taichi.lang.expr.Expr`: The computing expression of atomic add.""" + return ops.atomic_add(self, other) + + def _atomic_mul(self, other): + """Return the new expression of computing atomic mul between self and a given operand. + + Args: + other (Any): Given operand. + + Returns: + :class:`~taichi.lang.expr.Expr`: The computing expression of atomic mul.""" + return ops.atomic_mul(self, other) + + def _atomic_sub(self, other): + """Return the new expression of computing atomic sub between self and a given operand. + + Args: + other (Any): Given operand. + + Returns: + :class:`~taichi.lang.expr.Expr`: The computing expression of atomic sub.""" + return ops.atomic_sub(self, other) + + def _atomic_and(self, other): + """Return the new expression of computing atomic and between self and a given operand. + + Args: + other (Any): Given operand. + + Returns: + :class:`~taichi.lang.expr.Expr`: The computing expression of atomic and.""" + return ops.atomic_and(self, other) + + def _atomic_xor(self, other): + """Return the new expression of computing atomic xor between self and a given operand. + + Args: + other (Any): Given operand. + + Returns: + :class:`~taichi.lang.expr.Expr`: The computing expression of atomic xor.""" + return ops.atomic_xor(self, other) + + def _atomic_or(self, other): + """Return the new expression of computing atomic or between self and a given operand. + + Args: + other (Any): Given operand. + + Returns: + :class:`~taichi.lang.expr.Expr`: The computing expression of atomic or.""" + return ops.atomic_or(self, other) + + # In-place operators in python scope returns NotImplemented to fall back to normal operators + def __iadd__(self, other): + if in_python_scope(): + return NotImplemented + self._atomic_add(other) + return self + + def __imul__(self, other): + if in_python_scope(): + return NotImplemented + self._atomic_mul(other) + return self + + def __isub__(self, other): + if in_python_scope(): + return NotImplemented + self._atomic_sub(other) + return self + + def __iand__(self, other): + if in_python_scope(): + return NotImplemented + self._atomic_and(other) + return self + + def __ixor__(self, other): + if in_python_scope(): + return NotImplemented + self._atomic_xor(other) + return self + + def __ior__(self, other): + if in_python_scope(): + return NotImplemented + self._atomic_or(other) + return self + + # we don't support atomic_mul/truediv/floordiv/mod yet: + def __imul__(self, other): + if in_python_scope(): + return NotImplemented + self._assign(ops.mul(self, other)) + return self + + def __itruediv__(self, other): + if in_python_scope(): + return NotImplemented + self._assign(ops.truediv(self, other)) + return self + + def __ifloordiv__(self, other): + if in_python_scope(): + return NotImplemented + self._assign(ops.floordiv(self, other)) + return self + + def __imod__(self, other): + if in_python_scope(): + return NotImplemented + self._assign(ops.mod(self, other)) + return self + + def __ilshift__(self, other): + if in_python_scope(): + return NotImplemented + self._assign(ops.bit_shl(self, other)) + return self + + def __irshift__(self, other): + if in_python_scope(): + return NotImplemented + self._assign(ops.bit_sar(self, other)) + return self + + def __ipow__(self, other): + if in_python_scope(): + return NotImplemented + self._assign(ops.pow(self, other)) + return self + + def _assign(self, other): + """Assign the expression of the given operand to self. + + Args: + other (Any): Given operand. + + Returns: + :class:`~taichi.lang.expr.Expr`: The expression after assigning.""" + return ops.assign(self, other) + + def _augassign(self, x, op): + """Generate the computing expression between self and the given operand of given operator and assigned to self. + + Args: + x (Any): Given operand. + op (str): The name of operator.""" + if op == "Add": + self += x + elif op == "Sub": + self -= x + elif op == "Mult": + self *= x + elif op == "Div": + self /= x + elif op == "FloorDiv": + self //= x + elif op == "Mod": + self %= x + elif op == "BitAnd": + self &= x + elif op == "BitOr": + self |= x + elif op == "BitXor": + self ^= x + elif op == "RShift": + self >>= x + elif op == "LShift": + self <<= x + elif op == "Pow": + self **= x + else: + assert False, op + + def __ti_int__(self): + return ops.cast(self, int) + + def __ti_bool__(self): + return ops.cast(self, primitive_types.u1) + + def __ti_float__(self): + return ops.cast(self, float) diff --git a/local_packages/taichi/lang/enums.py b/local_packages/taichi/lang/enums.py new file mode 100644 index 0000000..7c2e2ad --- /dev/null +++ b/local_packages/taichi/lang/enums.py @@ -0,0 +1,47 @@ +from taichi._lib import core as _ti_core + +Layout = _ti_core.Layout +AutodiffMode = _ti_core.AutodiffMode +SNodeGradType = _ti_core.SNodeGradType +Format = _ti_core.Format +BoundaryMode = _ti_core.BoundaryMode + + +def to_boundary_enum(boundary): + if boundary == "clamp": + return BoundaryMode.CLAMP + if boundary == "unsafe": + return BoundaryMode.UNSAFE + raise ValueError(f"Invalid boundary argument: {boundary}") + + +class DeviceCapability: + spirv_version_1_3 = "spirv_version=66304" + spirv_version_1_4 = "spirv_version=66560" + spirv_version_1_5 = "spirv_version=66816" + spirv_has_int8 = "spirv_has_int8" + spirv_has_int16 = "spirv_has_int16" + spirv_has_int64 = "spirv_has_int64" + spirv_has_float16 = "spirv_has_float16" + spirv_has_float64 = "spirv_has_float64" + spirv_has_atomic_int64 = "spirv_has_atomic_int64" + spirv_has_atomic_float16 = "spirv_has_atomic_float16" + spirv_has_atomic_float16_add = "spirv_has_atomic_float16_add" + spirv_has_atomic_float16_minmax = "spirv_has_atomic_float16_minmax" + spirv_has_atomic_float = "spirv_has_atomic_float" + spirv_has_atomic_float_add = "spirv_has_atomic_float_add" + spirv_has_atomic_float_minmax = "spirv_has_atomic_float_minmax" + spirv_has_atomic_float64 = "spirv_has_atomic_float64" + spirv_has_atomic_float64_add = "spirv_has_atomic_float64_add" + spirv_has_atomic_float64_minmax = "spirv_has_atomic_float64_minmax" + spirv_has_variable_ptr = "spirv_has_variable_ptr" + spirv_has_physical_storage_buffer = "spirv_has_physical_storage_buffer" + spirv_has_subgroup_basic = "spirv_has_subgroup_basic" + spirv_has_subgroup_vote = "spirv_has_subgroup_vote" + spirv_has_subgroup_arithmetic = "spirv_has_subgroup_arithmetic" + spirv_has_subgroup_ballot = "spirv_has_subgroup_ballot" + spirv_has_non_semantic_info = "spirv_has_non_semantic_info" + spirv_has_no_integer_wrap_decoration = "spirv_has_no_integer_wrap_decoration" + + +__all__ = ["Layout", "AutodiffMode", "SNodeGradType", "Format", "DeviceCapability"] diff --git a/local_packages/taichi/lang/exception.py b/local_packages/taichi/lang/exception.py new file mode 100644 index 0000000..8ebab90 --- /dev/null +++ b/local_packages/taichi/lang/exception.py @@ -0,0 +1,78 @@ +from taichi._lib import core + + +class TaichiCompilationError(Exception): + """Base class for all compilation exceptions.""" + + pass + + +class TaichiSyntaxError(TaichiCompilationError, SyntaxError): + """Thrown when a syntax error is found during compilation.""" + + pass + + +class TaichiNameError(TaichiCompilationError, NameError): + """Thrown when an undefine name is found during compilation.""" + + pass + + +class TaichiIndexError(TaichiCompilationError, IndexError): + """Thrown when an index error is found during compilation.""" + + pass + + +class TaichiTypeError(TaichiCompilationError, TypeError): + """Thrown when a type mismatch is found during compilation.""" + + pass + + +class TaichiRuntimeError(RuntimeError): + """Thrown when the compiled program cannot be executed due to unspecified reasons.""" + + pass + + +class TaichiAssertionError(TaichiRuntimeError, AssertionError): + """Thrown when assertion fails at runtime.""" + + pass + + +class TaichiRuntimeTypeError(TaichiRuntimeError, TypeError): + @staticmethod + def get(pos, needed, provided): + return TaichiRuntimeTypeError( + f"Argument {pos} (type={provided}) cannot be converted into required type {needed}" + ) + + @staticmethod + def get_ret(needed, provided): + return TaichiRuntimeTypeError(f"Return (type={provided}) cannot be converted into required type {needed}") + + +def handle_exception_from_cpp(exc): + if isinstance(exc, core.TaichiTypeError): + return TaichiTypeError(str(exc)) + if isinstance(exc, core.TaichiSyntaxError): + return TaichiSyntaxError(str(exc)) + if isinstance(exc, core.TaichiIndexError): + return TaichiIndexError(str(exc)) + if isinstance(exc, core.TaichiAssertionError): + return TaichiAssertionError(str(exc)) + return exc + + +__all__ = [ + "TaichiSyntaxError", + "TaichiTypeError", + "TaichiCompilationError", + "TaichiNameError", + "TaichiRuntimeError", + "TaichiRuntimeTypeError", + "TaichiAssertionError", +] diff --git a/local_packages/taichi/lang/expr.py b/local_packages/taichi/lang/expr.py new file mode 100644 index 0000000..7552d38 --- /dev/null +++ b/local_packages/taichi/lang/expr.py @@ -0,0 +1,170 @@ +import numpy as np +from taichi._lib import core as _ti_core +from taichi.lang import impl +from taichi.lang.common_ops import TaichiOperations +from taichi.lang.exception import TaichiCompilationError, TaichiTypeError +from taichi.lang.matrix import make_matrix +from taichi.lang.util import is_matrix_class, is_taichi_class, to_numpy_type +from taichi.types import primitive_types +from taichi.types.primitive_types import integer_types, real_types + + +# Scalar, basic data type +class Expr(TaichiOperations): + """A Python-side Expr wrapper, whose member variable `ptr` is an instance of C++ Expr class. A C++ Expr object contains member variable `expr` which holds an instance of C++ Expression class.""" + + def __init__(self, *args, dbg_info=None, dtype=None): + self.dbg_info = dbg_info + self.ptr_type_checked = False + if len(args) == 1: + if isinstance(args[0], _ti_core.Expr): + self.ptr = args[0] + elif isinstance(args[0], Expr): + self.ptr = args[0].ptr + self.ptr_type_checked = args[0].ptr_type_checked + self.dbg_info = args[0].dbg_info + elif is_matrix_class(args[0]): + self.ptr = make_matrix(args[0].to_list()).ptr + elif isinstance(args[0], (list, tuple)): + self.ptr = make_matrix(args[0]).ptr + else: + # assume to be constant + arg = args[0] + if isinstance(arg, np.ndarray): + if arg.shape: + raise TaichiTypeError( + "Only 0-dimensional numpy array can be used to initialize a scalar expression" + ) + arg = arg.dtype.type(arg) + self.ptr = make_constant_expr(arg, dtype).ptr + else: + assert False + if self.dbg_info: + self.ptr.set_dbg_info(self.dbg_info) + if not self.ptr_type_checked: + self.ptr.type_check(impl.get_runtime().prog.config()) + self.ptr_type_checked = True + + def is_tensor(self): + return self.ptr.is_tensor() + + def is_struct(self): + return self.ptr.is_struct() + + def element_type(self): + return self.ptr.get_rvalue_type().element_type() + + def get_shape(self): + if not self.is_tensor(): + raise TaichiCompilationError(f"Getting shape of non-tensor type: {self.ptr.get_rvalue_type()}") + return tuple(self.ptr.get_shape()) + + @property + def n(self): + shape = self.get_shape() + if len(shape) < 1: + raise TaichiCompilationError(f"Getting n of tensor type < 1D: {self.ptr.get_rvalue_type()}") + return shape[0] + + @property + def m(self): + shape = self.get_shape() + if len(shape) < 2: + raise TaichiCompilationError(f"Getting m of tensor type < 2D: {self.ptr.get_rvalue_type()}") + return shape[1] + + def __hash__(self): + return self.ptr.get_raw_address() + + def __str__(self): + return "" + + def __repr__(self): + return "" + + +def _check_in_range(npty, val): + iif = np.iinfo(npty) + return iif.min <= val <= iif.max + + +def _clamp_unsigned_to_range(npty, val): + # npty: np.int32 or np.int64 + iif = np.iinfo(npty) + if iif.min <= val <= iif.max: + return val + cap = 1 << iif.bits + assert 0 <= val < cap + new_val = val - cap + return new_val + + +def make_constant_expr(val, dtype): + if isinstance(val, (bool, np.bool_)): + constant_dtype = primitive_types.u1 + return Expr(_ti_core.make_const_expr_bool(constant_dtype, val)) + + if isinstance(val, (float, np.floating)): + constant_dtype = impl.get_runtime().default_fp if dtype is None else dtype + if constant_dtype not in real_types: + raise TaichiTypeError( + "Floating-point literals must be annotated with a floating-point type. For type casting, use `ti.cast`." + ) + return Expr(_ti_core.make_const_expr_fp(constant_dtype, val)) + + if isinstance(val, (int, np.integer)): + constant_dtype = impl.get_runtime().default_ip if dtype is None else dtype + if constant_dtype not in integer_types: + raise TaichiTypeError( + "Integer literals must be annotated with a integer type. For type casting, use `ti.cast`." + ) + if _check_in_range(to_numpy_type(constant_dtype), val): + return Expr(_ti_core.make_const_expr_int(constant_dtype, _clamp_unsigned_to_range(np.int64, val))) + if dtype is None: + raise TaichiTypeError( + f"Integer literal {val} exceeded the range of default_ip: {impl.get_runtime().default_ip}, please specify the dtype via e.g. `ti.u64({val})` or set a different `default_ip` in `ti.init()`" + ) + else: + raise TaichiTypeError(f"Integer literal {val} exceeded the range of specified dtype: {dtype}") + + raise TaichiTypeError(f"Invalid constant scalar data type: {type(val)}") + + +def make_var_list(size, ast_builder=None): + exprs = [] + for _ in range(size): + if ast_builder is None: + exprs.append(impl.get_runtime().prog.make_id_expr("")) + else: + exprs.append(ast_builder.make_id_expr("")) + return exprs + + +def make_expr_group(*exprs): + from taichi.lang.matrix import Matrix # pylint: disable=C0415 + + if len(exprs) == 1: + if isinstance(exprs[0], (list, tuple)): + exprs = exprs[0] + elif isinstance(exprs[0], Matrix): + mat = exprs[0] + assert mat.m == 1 + exprs = mat.entries + expr_group = _ti_core.ExprGroup() + for i in exprs: + flattened = _get_flattened_ptrs(i) + for item in flattened: + expr_group.push_back(item) + return expr_group + + +def _get_flattened_ptrs(val): + if is_taichi_class(val): + ptrs = [] + for item in val._members: + ptrs.extend(_get_flattened_ptrs(item)) + return ptrs + return [Expr(val).ptr] + + +__all__ = [] diff --git a/local_packages/taichi/lang/field.py b/local_packages/taichi/lang/field.py new file mode 100644 index 0000000..4c7c026 --- /dev/null +++ b/local_packages/taichi/lang/field.py @@ -0,0 +1,462 @@ +import taichi.lang +from taichi._lib import core as _ti_core +from taichi._logging import warn +from taichi.lang import impl +from taichi.lang.exception import TaichiSyntaxError +from taichi.lang.util import ( + in_python_scope, + python_scope, + to_numpy_type, + to_paddle_type, + to_pytorch_type, +) + + +class Field: + """Taichi field class. + + A field is constructed by a list of field members. + For example, a scalar field has 1 field member, while a 3x3 matrix field has 9 field members. + A field member is a Python Expr wrapping a C++ FieldExpression. + + Args: + vars (List[Expr]): Field members. + """ + + def __init__(self, _vars): + assert all(_vars) + self.vars = _vars + self.host_accessors = None + self.grad = None + self.dual = None + + @property + def snode(self): + """Gets representative SNode for info purposes. + + Returns: + SNode: Representative SNode (SNode of first field member). + """ + return self._snode + + @property + def _snode(self): + """Gets representative SNode for info purposes. + + Returns: + SNode: Representative SNode (SNode of first field member). + """ + return taichi.lang.snode.SNode(self.vars[0].ptr.snode()) + + @property + def shape(self): + """Gets field shape. + + Returns: + Tuple[Int]: Field shape. + """ + return self._snode.shape + + @property + def dtype(self): + """Gets data type of each individual value. + + Returns: + DataType: Data type of each individual value. + """ + return self._snode._dtype + + @property + def _name(self): + """Gets field name. + + Returns: + str: Field name. + """ + return self._snode._name + + def parent(self, n=1): + """Gets an ancestor of the representative SNode in the SNode tree. + + Args: + n (int): the number of levels going up from the representative SNode. + + Returns: + SNode: The n-th parent of the representative SNode. + """ + return self.snode.parent(n) + + def _get_field_members(self): + """Gets field members. + + Returns: + List[Expr]: Field members. + """ + return self.vars + + def _loop_range(self): + """Gets SNode of representative field member for loop range info. + + Returns: + taichi_python.SNode: SNode of representative (first) field member. + """ + return self.vars[0].ptr.snode() + + def _set_grad(self, grad): + """Sets corresponding grad field (reverse mode). + Args: + grad (Field): Corresponding grad field. + """ + self.grad = grad + + def _set_dual(self, dual): + """Sets corresponding dual field (forward mode). + + Args: + dual (Field): Corresponding dual field. + """ + self.dual = dual + + @python_scope + def fill(self, val): + """Fills `self` with a specific value. + + Args: + val (Union[int, float]): Value to fill. + """ + raise NotImplementedError() + + @python_scope + def to_numpy(self, dtype=None): + """Converts `self` to a numpy array. + + Args: + dtype (DataType, optional): The desired data type of returned numpy array. + + Returns: + numpy.ndarray: The result numpy array. + """ + raise NotImplementedError() + + @python_scope + def to_torch(self, device=None): + """Converts `self` to a torch tensor. + + Args: + device (torch.device, optional): The desired device of returned tensor. + + Returns: + torch.tensor: The result torch tensor. + """ + raise NotImplementedError() + + @python_scope + def to_paddle(self, place=None): + """Converts `self` to a paddle tensor. + + Args: + place (paddle.CPUPlace()/CUDAPlace(n), optional): The desired place of returned tensor. + + Returns: + paddle.Tensor: The result paddle tensor. + """ + raise NotImplementedError() + + @python_scope + def from_numpy(self, arr): + """Loads all elements from a numpy array. + + The shape of the numpy array needs to be the same as `self`. + + Args: + arr (numpy.ndarray): The source numpy array. + """ + raise NotImplementedError() + + @python_scope + def _from_external_arr(self, arr): + raise NotImplementedError() + + @python_scope + def from_torch(self, arr): + """Loads all elements from a torch tensor. + + The shape of the torch tensor needs to be the same as `self`. + + Args: + arr (torch.tensor): The source torch tensor. + """ + self._from_external_arr(arr.contiguous()) + + @python_scope + def from_paddle(self, arr): + """Loads all elements from a paddle tensor. + + The shape of the paddle tensor needs to be the same as `self`. + + Args: + arr (paddle.Tensor): The source paddle tensor. + """ + self.from_numpy(arr) + + @python_scope + def copy_from(self, other): + """Copies all elements from another field. + + The shape of the other field needs to be the same as `self`. + + Args: + other (Field): The source field. + """ + if not isinstance(other, Field): + raise TypeError("Cannot copy from a non-field object") + if self.shape != other.shape: + raise ValueError(f"ti.field shape {self.shape} does not match" f" the source field shape {other.shape}") + from taichi._kernels import tensor_to_tensor # pylint: disable=C0415 + + tensor_to_tensor(self, other) + + @python_scope + def __setitem__(self, key, value): + """Sets field element in Python scope. + + Args: + key (Union[List[int], int, None]): Coordinates of the field element. + value (element type): Value to set. + """ + raise NotImplementedError() + + @python_scope + def __getitem__(self, key): + """Gets field element in Python scope. + + Args: + key (Union[List[int], int, None]): Coordinates of the field element. + + Returns: + element type: Value retrieved. + """ + raise NotImplementedError() + + def __str__(self): + if taichi.lang.impl.inside_kernel(): + return self.__repr__() # make pybind11 happy, see Matrix.__str__ + if self._snode.ptr is None: + return "" + return str(self.to_numpy()) + + def _pad_key(self, key): + if key is None: + key = () + if not isinstance(key, (tuple, list)): + key = (key,) + + if len(key) != len(self.shape): + raise AssertionError("Slicing is not supported on ti.field") + + return key + ((0,) * (_ti_core.get_max_num_indices() - len(key))) + + def _initialize_host_accessors(self): + if self.host_accessors: + return + taichi.lang.impl.get_runtime().materialize() + self.host_accessors = [SNodeHostAccessor(e.ptr.snode()) for e in self.vars] + + def _host_access(self, key): + return [SNodeHostAccess(e, key) for e in self.host_accessors] + + def __iter__(self): + raise NotImplementedError("Struct for is only available in Taichi scope.") + + +class ScalarField(Field): + """Taichi scalar field with SNode implementation. + + Args: + var (Expr): Field member. + """ + + def __init__(self, var): + super().__init__([var]) + + def fill(self, val): + """Fills this scalar field with a specified value.""" + if in_python_scope(): + from taichi._kernels import fill_field # pylint: disable=C0415 + + fill_field(self, val) + else: + from taichi._funcs import field_fill_taichi_scope # pylint: disable=C0415 + + field_fill_taichi_scope(self, val) + + @python_scope + def to_numpy(self, dtype=None): + """Converts this field to a `numpy.ndarray`.""" + if self.parent()._snode.ptr.type == _ti_core.SNodeType.dynamic: + warn( + "You are trying to convert a dynamic snode to a numpy array, be aware that inactive items in the snode will be converted to zeros in the resulting array." + ) + if dtype is None: + dtype = to_numpy_type(self.dtype) + import numpy as np # pylint: disable=C0415 + + arr = np.zeros(shape=self.shape, dtype=dtype) + from taichi._kernels import tensor_to_ext_arr # pylint: disable=C0415 + + tensor_to_ext_arr(self, arr) + taichi.lang.runtime_ops.sync() + return arr + + @python_scope + def to_torch(self, device=None): + """Converts this field to a `torch.tensor`.""" + import torch # pylint: disable=C0415 + + # pylint: disable=E1101 + arr = torch.zeros(size=self.shape, dtype=to_pytorch_type(self.dtype), device=device) + from taichi._kernels import tensor_to_ext_arr # pylint: disable=C0415 + + tensor_to_ext_arr(self, arr) + taichi.lang.runtime_ops.sync() + return arr + + @python_scope + def to_paddle(self, place=None): + """Converts this field to a `paddle.Tensor`.""" + import paddle # pylint: disable=C0415 + + # pylint: disable=E1101 + # paddle.empty() doesn't support argument `place`` + arr = paddle.to_tensor(paddle.zeros(self.shape, to_paddle_type(self.dtype)), place=place) + from taichi._kernels import tensor_to_ext_arr # pylint: disable=C0415 + + tensor_to_ext_arr(self, arr) + taichi.lang.runtime_ops.sync() + return arr + + @python_scope + def _from_external_arr(self, arr): + if len(self.shape) != len(arr.shape): + raise ValueError(f"ti.field shape {self.shape} does not match" f" the numpy array shape {arr.shape}") + for i, _ in enumerate(self.shape): + if self.shape[i] != arr.shape[i]: + raise ValueError(f"ti.field shape {self.shape} does not match" f" the numpy array shape {arr.shape}") + from taichi._kernels import ext_arr_to_tensor # pylint: disable=C0415 + + ext_arr_to_tensor(arr, self) + taichi.lang.runtime_ops.sync() + + @python_scope + def from_numpy(self, arr): + """Copies the data from a `numpy.ndarray` into this field.""" + if not arr.flags.c_contiguous: + import numpy as np # pylint: disable=C0415 + + arr = np.ascontiguousarray(arr) + self._from_external_arr(arr) + + @python_scope + def __setitem__(self, key, value): + self._initialize_host_accessors() + self.host_accessors[0].setter(value, *self._pad_key(key)) + + @python_scope + def __getitem__(self, key): + self._initialize_host_accessors() + # Check for potential slicing behaviour + # for instance: x[0, :] + padded_key = self._pad_key(key) + import numpy as np # pylint: disable=C0415 + + for key in padded_key: + if not isinstance(key, (int, np.integer)): + raise TypeError( + f"Detected illegal element of type: {type(key)}. " + f"Please be aware that slicing a ti.field is not supported so far." + ) + return self.host_accessors[0].getter(*padded_key) + + def __repr__(self): + # make interactive shell happy, prevent materialization + return "" + + +class SNodeHostAccessor: + def __init__(self, snode): + if _ti_core.is_real(snode.data_type()): + write_func = snode.write_float + read_func = snode.read_float + else: + + def write_func(key, value): + if value >= 0: + snode.write_uint(key, value) + else: + snode.write_int(key, value) + + if _ti_core.is_signed(snode.data_type()): + read_func = snode.read_int + else: + read_func = snode.read_uint + + def getter(*key): + assert len(key) == _ti_core.get_max_num_indices() + return read_func(key) + + def setter(value, *key): + assert len(key) == _ti_core.get_max_num_indices() + write_func(key, value) + # same as above + if ( + impl.get_runtime().target_tape + and impl.get_runtime().target_tape.grad_checker + and not impl.get_runtime().grad_replaced + ): + for x in impl.get_runtime().target_tape.grad_checker.to_check: + assert snode != x.snode.ptr, "Overwritten is prohibitive when doing grad check." + impl.get_runtime().target_tape.insert(write_func, (key, value)) + + self.getter = getter + self.setter = setter + + +class SNodeHostAccess: + def __init__(self, accessor, key): + self.accessor = accessor + self.key = key + + +class BitpackedFields: + """Taichi bitpacked fields, where fields with quantized types are packed together. + + Args: + max_num_bits (int): Maximum number of bits all fields inside can occupy in total. Only 32 or 64 is allowed. + """ + + def __init__(self, max_num_bits): + self.fields = [] + self.bit_struct_type_builder = _ti_core.BitStructTypeBuilder(max_num_bits) + + def place(self, *args, shared_exponent=False): + """Places a list of fields with quantized types inside. + + Args: + *args (List[Field]): A list of fields with quantized types to place. + shared_exponent (bool): Whether the fields have a shared exponent. + """ + if shared_exponent: + self.bit_struct_type_builder.begin_placing_shared_exponent() + count = 0 + for arg in args: + assert isinstance(arg, Field) + for var in arg._get_field_members(): + self.fields.append((var.ptr, self.bit_struct_type_builder.add_member(var.ptr.get_dt()))) + count += 1 + if shared_exponent: + self.bit_struct_type_builder.end_placing_shared_exponent() + if count <= 1: + raise TaichiSyntaxError("At least 2 fields need to be placed when shared_exponent=True") + + +__all__ = ["BitpackedFields", "Field", "ScalarField"] diff --git a/local_packages/taichi/lang/impl.py b/local_packages/taichi/lang/impl.py new file mode 100644 index 0000000..7648cb0 --- /dev/null +++ b/local_packages/taichi/lang/impl.py @@ -0,0 +1,1208 @@ +import numbers +from types import FunctionType, MethodType +from typing import Any, Iterable, Sequence + +import numpy as np +from taichi._lib import core as _ti_core +from taichi._snode.fields_builder import FieldsBuilder +from taichi.lang._ndarray import ScalarNdarray +from taichi.lang._ndrange import GroupedNDRange, _Ndrange +from taichi.lang._texture import RWTextureAccessor +from taichi.lang.any_array import AnyArray +from taichi.lang.enums import SNodeGradType +from taichi.lang.exception import ( + TaichiCompilationError, + TaichiRuntimeError, + TaichiSyntaxError, + TaichiTypeError, +) +from taichi.lang.expr import Expr, make_expr_group +from taichi.lang.field import Field, ScalarField +from taichi.lang.kernel_arguments import SparseMatrixProxy +from taichi.lang.matrix import ( + Matrix, + MatrixField, + MatrixNdarray, + MatrixType, + Vector, + VectorNdarray, + make_matrix, +) +from taichi.lang.mesh import ( + ConvType, + MeshElementFieldProxy, + MeshInstance, + MeshRelationAccessProxy, + MeshReorderedMatrixFieldProxy, + MeshReorderedScalarFieldProxy, + element_type_name, +) +from taichi.lang.simt.block import SharedArray +from taichi.lang.snode import SNode +from taichi.lang.struct import Struct, StructField, _IntermediateStruct +from taichi.lang.util import ( + cook_dtype, + get_traceback, + is_taichi_class, + python_scope, + taichi_scope, + warning, +) +from taichi.types.primitive_types import ( + all_types, + f16, + f32, + f64, + i32, + i64, + u8, + u32, + u64, +) + + +@taichi_scope +def expr_init_shared_array(shape, element_type): + return ( + get_runtime() + .compiling_callable.ast_builder() + .expr_alloca_shared_array(shape, element_type, _ti_core.DebugInfo(get_runtime().get_current_src_info())) + ) + + +@taichi_scope +def expr_init(rhs): + if rhs is None: + return Expr( + get_runtime() + .compiling_callable.ast_builder() + .expr_alloca(_ti_core.DebugInfo(get_runtime().get_current_src_info())) + ) + if isinstance(rhs, Matrix) and (hasattr(rhs, "_DIM")): + return Matrix(*rhs.to_list(), ndim=rhs.ndim) + if isinstance(rhs, Matrix): + return make_matrix(rhs.to_list()) + if isinstance(rhs, SharedArray): + return rhs + if isinstance(rhs, Struct): + return Struct(rhs.to_dict(include_methods=True, include_ndim=True)) + if isinstance(rhs, list): + return [expr_init(e) for e in rhs] + if isinstance(rhs, tuple): + return tuple(expr_init(e) for e in rhs) + if isinstance(rhs, dict): + return dict((key, expr_init(val)) for key, val in rhs.items()) + if isinstance(rhs, _ti_core.DataType): + return rhs + if isinstance(rhs, _ti_core.Arch): + return rhs + if isinstance(rhs, _Ndrange): + return rhs + if isinstance(rhs, MeshElementFieldProxy): + return rhs + if isinstance(rhs, MeshRelationAccessProxy): + return rhs + if hasattr(rhs, "_data_oriented"): + return rhs + return Expr( + get_runtime() + .compiling_callable.ast_builder() + .expr_var(Expr(rhs).ptr, _ti_core.DebugInfo(get_runtime().get_current_src_info())) + ) + + +@taichi_scope +def expr_init_func(rhs): # temporary solution to allow passing in fields as arguments + if isinstance(rhs, Field): + return rhs + return expr_init(rhs) + + +def begin_frontend_struct_for(ast_builder, group, loop_range): + if not isinstance(loop_range, (AnyArray, Field, SNode, RWTextureAccessor, _Root)): + raise TypeError( + f"Cannot loop over the object {type(loop_range)} in Taichi scope. Only Taichi fields (via template) or dense arrays (via types.ndarray) are supported." + ) + if group.size() != len(loop_range.shape): + raise IndexError( + "Number of struct-for indices does not match loop variable dimensionality " + f"({group.size()} != {len(loop_range.shape)}). Maybe you wanted to " + 'use "for I in ti.grouped(x)" to group all indices into a single vector I?' + ) + dbg_info = _ti_core.DebugInfo(get_runtime().get_current_src_info()) + if isinstance(loop_range, (AnyArray, RWTextureAccessor)): + ast_builder.begin_frontend_struct_for_on_external_tensor(group, loop_range._loop_range(), dbg_info) + else: + ast_builder.begin_frontend_struct_for_on_snode(group, loop_range._loop_range(), dbg_info) + + +def begin_frontend_if(ast_builder, cond, stmt_dbg_info): + assert ast_builder is not None + if is_taichi_class(cond): + raise ValueError( + "The truth value of vectors/matrices is ambiguous.\n" + "Consider using `any` or `all` when comparing vectors/matrices:\n" + " if all(x == y):\n" + "or\n" + " if any(x != y):\n" + ) + ast_builder.begin_frontend_if(Expr(cond).ptr, stmt_dbg_info) + + +@taichi_scope +def _calc_slice(index, default_stop): + start, stop, step = index.start or 0, index.stop or default_stop, index.step or 1 + + def check_validity(x): + # TODO(mzmzm): support variable in slice + if isinstance(x, Expr): + raise TaichiCompilationError( + "Taichi does not support variables in slice now, please use constant instead of it." + ) + + check_validity(start), check_validity(stop), check_validity(step) + return [_ for _ in range(start, stop, step)] + + +def validate_subscript_index(value, index): + if isinstance(value, Field): + # field supports negative indices + return + + if isinstance(index, Expr): + return + + if isinstance(index, Iterable): + for ind in index: + validate_subscript_index(value, ind) + + if isinstance(index, slice): + validate_subscript_index(value, index.start) + validate_subscript_index(value, index.stop) + + if isinstance(index, numbers.Number) and index < 0: + raise TaichiSyntaxError("Negative indices are not supported in Taichi kernels.") + + +@taichi_scope +def subscript(ast_builder, value, *_indices, skip_reordered=False): + dbg_info = _ti_core.DebugInfo(get_runtime().get_current_src_info()) + ast_builder = get_runtime().compiling_callable.ast_builder() + # Directly evaluate in Python for non-Taichi types + if not isinstance( + value, + ( + Expr, + Field, + AnyArray, + SparseMatrixProxy, + MeshElementFieldProxy, + MeshRelationAccessProxy, + SharedArray, + ), + ): + if len(_indices) == 1: + _indices = _indices[0] + return value.__getitem__(_indices) + + has_slice = False + + flattened_indices = [] + for _index in _indices: + if isinstance(_index, Matrix): + ind = _index.to_list() + elif isinstance(_index, slice): + ind = [_index] + has_slice = True + else: + ind = [_index] + flattened_indices += ind + indices = tuple(flattened_indices) + validate_subscript_index(value, indices) + + if len(indices) == 1 and indices[0] is None: + indices = () + + if has_slice: + if not (isinstance(value, Expr) and value.is_tensor()): + raise TaichiSyntaxError(f"The type {type(value)} do not support index of slice type") + else: + indices_expr_group = make_expr_group(*indices) + + if isinstance(value, SharedArray): + return value.subscript(*indices) + if isinstance(value, MeshElementFieldProxy): + return value.subscript(*indices) + if isinstance(value, MeshRelationAccessProxy): + return value.subscript(*indices) + if isinstance(value, (MeshReorderedScalarFieldProxy, MeshReorderedMatrixFieldProxy)) and not skip_reordered: + reordered_index = tuple( + [ + Expr( + ast_builder.mesh_index_conversion( + value.mesh_ptr, value.element_type, Expr(indices[0]).ptr, ConvType.g2r, dbg_info + ) + ) + ] + ) + return subscript(ast_builder, value, *reordered_index, skip_reordered=True) + if isinstance(value, SparseMatrixProxy): + return value.subscript(*indices) + if isinstance(value, Field): + _var = value._get_field_members()[0].ptr + snode = _var.snode() + if snode is None: + if _var.is_primal(): + raise RuntimeError(f"{_var.get_expr_name()} has not been placed.") + else: + raise RuntimeError( + f"Gradient {_var.get_expr_name()} has not been placed, check whether `needs_grad=True`" + ) + + if isinstance(value, MatrixField): + return Expr(ast_builder.expr_subscript(value.ptr, indices_expr_group, dbg_info)) + if isinstance(value, StructField): + entries = {k: subscript(ast_builder, v, *indices) for k, v in value._items} + entries["__struct_methods"] = value.struct_methods + return _IntermediateStruct(entries) + return Expr(ast_builder.expr_subscript(_var, indices_expr_group, dbg_info)) + if isinstance(value, AnyArray): + return Expr(ast_builder.expr_subscript(value.ptr, indices_expr_group, dbg_info)) + assert isinstance(value, Expr) + # Index into TensorType + # value: IndexExpression with ret_type = TensorType + assert value.is_tensor() + + if has_slice: + shape = value.get_shape() + dim = len(shape) + assert dim == len(indices) + indices = [ + _calc_slice(index, shape[i]) if isinstance(index, slice) else index for i, index in enumerate(indices) + ] + if dim == 1: + assert isinstance(indices[0], list) + multiple_indices = [make_expr_group(i) for i in indices[0]] + return_shape = (len(indices[0]),) + else: + assert dim == 2 + if isinstance(indices[0], list) and isinstance(indices[1], list): + multiple_indices = [make_expr_group(i, j) for i in indices[0] for j in indices[1]] + return_shape = (len(indices[0]), len(indices[1])) + elif isinstance(indices[0], list): # indices[1] is not list + multiple_indices = [make_expr_group(i, indices[1]) for i in indices[0]] + return_shape = (len(indices[0]),) + else: # indices[0] is not list while indices[1] is list + multiple_indices = [make_expr_group(indices[0], j) for j in indices[1]] + return_shape = (len(indices[1]),) + return Expr( + _ti_core.subscript_with_multiple_indices( + value.ptr, + multiple_indices, + return_shape, + dbg_info, + ) + ) + return Expr(ast_builder.expr_subscript(value.ptr, indices_expr_group, dbg_info)) + + +class SrcInfoGuard: + def __init__(self, info_stack, info): + self.info_stack = info_stack + self.info = info + + def __enter__(self): + self.info_stack.append(self.info) + + def __exit__(self, exc_type, exc_val, exc_tb): + self.info_stack.pop() + + +class PyTaichi: + def __init__(self, kernels=None): + self.materialized = False + self.prog = None + self.src_info_stack = [] + self.inside_kernel = False + self.compiling_callable = None # pointer to instance of lang::Kernel/Function + self.current_kernel = None + self.global_vars = [] + self.grad_vars = [] + self.dual_vars = [] + self.matrix_fields = [] + self.default_fp = f32 + self.default_ip = i32 + self.default_up = u32 + self.target_tape = None + self.fwd_mode_manager = None + self.grad_replaced = False + self.kernels = kernels or [] + self._signal_handler_registry = None + self.unfinalized_fields_builder = {} + + def initialize_fields_builder(self, builder): + self.unfinalized_fields_builder[builder] = get_traceback(2) + + def clear_compiled_functions(self): + for k in self.kernels: + k.compiled_kernels.clear() + + def finalize_fields_builder(self, builder): + self.unfinalized_fields_builder.pop(builder) + + def validate_fields_builder(self): + for builder, tb in self.unfinalized_fields_builder.items(): + if builder == _root_fb: + continue + + raise TaichiRuntimeError( + f"Field builder {builder} is not finalized. " f"Please call finalize() on it. Traceback:\n{tb}" + ) + + def get_num_compiled_functions(self): + count = 0 + for k in self.kernels: + count += len(k.compiled_kernels) + return count + + def src_info_guard(self, info): + return SrcInfoGuard(self.src_info_stack, info) + + def get_current_src_info(self): + return self.src_info_stack[-1] + + def set_default_fp(self, fp): + assert fp in [f16, f32, f64] + self.default_fp = fp + default_cfg().default_fp = self.default_fp + + def set_default_ip(self, ip): + assert ip in [i32, i64] + self.default_ip = ip + self.default_up = u32 if ip == i32 else u64 + default_cfg().default_ip = self.default_ip + default_cfg().default_up = self.default_up + + def create_program(self): + if self.prog is None: + self.prog = _ti_core.Program() + + @staticmethod + def materialize_root_fb(is_first_call): + if root.finalized: + return + if not is_first_call and root.empty: + # We have to forcefully finalize when `is_first_call` is True (even + # if the root itself is empty), so that there is a valid struct + # llvm::Module, if no field has been declared before the first kernel + # invocation. Example case: + # https://github.com/taichi-dev/taichi/blob/27bb1dc3227d9273a79fcb318fdb06fd053068f5/tests/python/test_ad_basics.py#L260-L266 + return + + if get_runtime().prog.config().debug: + if not root.finalized: + root._allocate_adjoint_checkbit() + + root.finalize(raise_warning=not is_first_call) + global _root_fb + _root_fb = FieldsBuilder() + + @staticmethod + def _finalize_root_fb_for_aot(): + if _root_fb.finalized: + raise RuntimeError("AOT: can only finalize the root FieldsBuilder once") + _root_fb._finalize_for_aot() + + @staticmethod + def _get_tb(_var): + return getattr(_var, "declaration_tb", str(_var.ptr)) + + def _check_field_not_placed(self): + not_placed = [] + for _var in self.global_vars: + if _var.ptr.snode() is None: + not_placed.append(self._get_tb(_var)) + + if len(not_placed): + bar = "=" * 44 + "\n" + raise RuntimeError( + f"These field(s) are not placed:\n{bar}" + + f"{bar}".join(not_placed) + + f"{bar}Please consider specifying a shape for them. E.g.," + + "\n\n x = ti.field(float, shape=(2, 3))" + ) + + def _check_gradient_field_not_placed(self, gradient_type): + not_placed = set() + gradient_vars = [] + if gradient_type == "grad": + gradient_vars = self.grad_vars + elif gradient_type == "dual": + gradient_vars = self.dual_vars + for _var in gradient_vars: + if _var.ptr.snode() is None: + not_placed.add(self._get_tb(_var)) + + if len(not_placed): + bar = "=" * 44 + "\n" + raise RuntimeError( + f"These field(s) requrie `needs_{gradient_type}=True`, however their {gradient_type} field(s) are not placed:\n{bar}" + + f"{bar}".join(not_placed) + + f"{bar}Please consider place the {gradient_type} field(s). E.g.," + + "\n\n ti.root.dense(ti.i, 1).place(x.{gradient_type})" + + "\n\n Or specify a shape for the field(s). E.g.," + + "\n\n x = ti.field(float, shape=(2, 3), needs_{gradient_type}=True)" + ) + + def _check_matrix_field_member_shape(self): + for _field in self.matrix_fields: + shapes = [_field.get_scalar_field(i, j).shape for i in range(_field.n) for j in range(_field.m)] + if any(shape != shapes[0] for shape in shapes): + raise RuntimeError( + "Members of the following field have different shapes " + + f"{shapes}:\n{self._get_tb(_field._get_field_members()[0])}" + ) + + def _calc_matrix_field_dynamic_index_stride(self): + for _field in self.matrix_fields: + _field._calc_dynamic_index_stride() + + def materialize(self): + self.materialize_root_fb(not self.materialized) + self.materialized = True + + self.validate_fields_builder() + + self._check_field_not_placed() + self._check_gradient_field_not_placed("grad") + self._check_gradient_field_not_placed("dual") + self._check_matrix_field_member_shape() + self._calc_matrix_field_dynamic_index_stride() + self.global_vars = [] + self.grad_vars = [] + self.dual_vars = [] + self.matrix_fields = [] + + def _register_signal_handlers(self): + if self._signal_handler_registry is None: + self._signal_handler_registry = _ti_core.HackedSignalRegister() + + def clear(self): + if self.prog: + self.prog.finalize() + self.prog = None + self._signal_handler_registry = None + self.materialized = False + + def sync(self): + self.materialize() + self.prog.synchronize() + + +pytaichi = PyTaichi() + + +def get_runtime(): + return pytaichi + + +def reset(): + global pytaichi + old_kernels = pytaichi.kernels + pytaichi.clear() + pytaichi = PyTaichi(old_kernels) + for k in old_kernels: + k.reset() + _ti_core.reset_default_compile_config() + + +@taichi_scope +def static_print(*args, __p=print, **kwargs): + """The print function in Taichi scope. + + This function is called at compile time and has no runtime overhead. + """ + __p(*args, **kwargs) + + +# we don't add @taichi_scope decorator for @ti.pyfunc to work +def static_assert(cond, msg=None): + """Throw AssertionError when `cond` is False. + + This function is called at compile time and has no runtime overhead. + The bool value in `cond` must can be determined at compile time. + + Args: + cond (bool): an expression with a bool value. + msg (str): assertion message. + + Example:: + + >>> year = 2001 + >>> @ti.kernel + >>> def test(): + >>> ti.static_assert(year % 4 == 0, "the year must be a lunar year") + AssertionError: the year must be a lunar year + """ + if isinstance(cond, Expr): + raise TaichiTypeError("Static assert with non-static condition") + if msg is not None: + assert cond, msg + else: + assert cond + + +def inside_kernel(): + return pytaichi.inside_kernel + + +def index_nd(dim): + return axes(*range(dim)) + + +class _UninitializedRootFieldsBuilder: + def __getattr__(self, item): + if item == "__qualname__": + # For sphinx docstring extraction. + return "_UninitializedRootFieldsBuilder" + raise TaichiRuntimeError("Please call init() first") + + +# `root` initialization must be delayed until after the program is +# created. Unfortunately, `root` exists in both taichi.lang.impl module and +# the top-level taichi module at this point; so if `root` itself is written, we +# would have to make sure that `root` in all the modules get updated to the same +# instance. This is an error-prone process. +# +# To avoid this situation, we create `root` once during the import time, and +# never write to it. The core part, `_root_fb`, is the one whose initialization +# gets delayed. `_root_fb` will only exist in the taichi.lang.impl module, so +# writing to it is would result in less for maintenance cost. +# +# `_root_fb` will be overridden inside :func:`taichi.lang.init`. +_root_fb = _UninitializedRootFieldsBuilder() + + +def deactivate_all_snodes(): + """Recursively deactivate all SNodes.""" + for root_fb in FieldsBuilder._finalized_roots(): + root_fb.deactivate_all() + + +class _Root: + """Wrapper around the default root FieldsBuilder instance.""" + + @staticmethod + def parent(n=1): + """Same as :func:`taichi.SNode.parent`""" + return _root_fb.root.parent(n) + + @staticmethod + def _loop_range(): + """Same as :func:`taichi.SNode.loop_range`""" + return _root_fb.root._loop_range() + + @staticmethod + def _get_children(): + """Same as :func:`taichi.SNode.get_children`""" + return _root_fb.root._get_children() + + # TODO: Record all of the SNodeTrees that finalized under 'ti.root' + @staticmethod + def deactivate_all(): + warning("""'ti.root.deactivate_all()' would deactivate all finalized snodes.""") + deactivate_all_snodes() + + @property + def shape(self): + """Same as :func:`taichi.SNode.shape`""" + return _root_fb.root.shape + + @property + def _id(self): + return _root_fb.root._id + + def __getattr__(self, item): + return getattr(_root_fb, item) + + def __repr__(self): + return "ti.root" + + +root = _Root() +"""Root of the declared Taichi :func:`~taichi.lang.impl.field`s. + +See also https://docs.taichi-lang.org/docs/layout + +Example:: + + >>> x = ti.field(ti.f32) + >>> ti.root.pointer(ti.ij, 4).dense(ti.ij, 8).place(x) +""" + + +def _create_snode(axis_seq: Sequence[int], shape_seq: Sequence[numbers.Number], same_level: bool): + dim = len(axis_seq) + assert dim == len(shape_seq) + snode = root + if same_level: + snode = snode.dense(axes(*axis_seq), shape_seq) + else: + for i in range(dim): + snode = snode.dense(axes(axis_seq[i]), (shape_seq[i],)) + return snode + + +@python_scope +def create_field_member(dtype, name, needs_grad, needs_dual): + dtype = cook_dtype(dtype) + + # primal + prog = get_runtime().prog + if prog is None: + raise TaichiRuntimeError("Cannont create field, maybe you forgot to call `ti.init()` first?") + + x = Expr(prog.make_id_expr("")) + x.declaration_tb = get_traceback(stacklevel=4) + x.ptr = _ti_core.expr_field(x.ptr, dtype) + x.ptr.set_name(name) + x.ptr.set_grad_type(SNodeGradType.PRIMAL) + pytaichi.global_vars.append(x) + + x_grad = None + x_dual = None + # The x_grad_checkbit is used for global data access rule checker + x_grad_checkbit = None + if _ti_core.is_real(dtype): + # adjoint + x_grad = Expr(get_runtime().prog.make_id_expr("")) + x_grad.declaration_tb = get_traceback(stacklevel=4) + x_grad.ptr = _ti_core.expr_field(x_grad.ptr, dtype) + x_grad.ptr.set_name(name + ".grad") + x_grad.ptr.set_grad_type(SNodeGradType.ADJOINT) + x.ptr.set_adjoint(x_grad.ptr) + if needs_grad: + pytaichi.grad_vars.append(x_grad) + + if prog.config().debug: + # adjoint checkbit + x_grad_checkbit = Expr(get_runtime().prog.make_id_expr("")) + dtype = u8 + if prog.config().arch in (_ti_core.opengl, _ti_core.vulkan, _ti_core.gles): + dtype = i32 + x_grad_checkbit.ptr = _ti_core.expr_field(x_grad_checkbit.ptr, cook_dtype(dtype)) + x_grad_checkbit.ptr.set_name(name + ".grad_checkbit") + x_grad_checkbit.ptr.set_grad_type(SNodeGradType.ADJOINT_CHECKBIT) + x.ptr.set_adjoint_checkbit(x_grad_checkbit.ptr) + + # dual + x_dual = Expr(get_runtime().prog.make_id_expr("")) + x_dual.ptr = _ti_core.expr_field(x_dual.ptr, dtype) + x_dual.ptr.set_name(name + ".dual") + x_dual.ptr.set_grad_type(SNodeGradType.DUAL) + x.ptr.set_dual(x_dual.ptr) + if needs_dual: + pytaichi.dual_vars.append(x_dual) + elif needs_grad or needs_dual: + raise TaichiRuntimeError(f"{dtype} is not supported for field with `needs_grad=True` or `needs_dual=True`.") + + return x, x_grad, x_dual + + +@python_scope +def _field( + dtype, + shape=None, + order=None, + name="", + offset=None, + needs_grad=False, + needs_dual=False, +): + x, x_grad, x_dual = create_field_member(dtype, name, needs_grad, needs_dual) + x = ScalarField(x) + if x_grad: + x_grad = ScalarField(x_grad) + x._set_grad(x_grad) + if x_dual: + x_dual = ScalarField(x_dual) + x._set_dual(x_dual) + + if shape is None: + if offset is not None: + raise TaichiSyntaxError("shape cannot be None when offset is set") + if order is not None: + raise TaichiSyntaxError("shape cannot be None when order is set") + else: + if isinstance(shape, numbers.Number): + shape = (shape,) + if isinstance(offset, numbers.Number): + offset = (offset,) + dim = len(shape) + if offset is not None and dim != len(offset): + raise TaichiSyntaxError(f"The dimensionality of shape and offset must be the same ({dim} != {len(offset)})") + axis_seq = [] + shape_seq = [] + if order is not None: + if dim != len(order): + raise TaichiSyntaxError( + f"The dimensionality of shape and order must be the same ({dim} != {len(order)})" + ) + if dim != len(set(order)): + raise TaichiSyntaxError("The axes in order must be different") + for ch in order: + axis = ord(ch) - ord("i") + if axis < 0 or axis >= dim: + raise TaichiSyntaxError(f"Invalid axis {ch}") + axis_seq.append(axis) + shape_seq.append(shape[axis]) + else: + axis_seq = list(range(dim)) + shape_seq = list(shape) + same_level = order is None + _create_snode(axis_seq, shape_seq, same_level).place(x, offset=offset) + if needs_grad: + _create_snode(axis_seq, shape_seq, same_level).place(x_grad, offset=offset) + if needs_dual: + _create_snode(axis_seq, shape_seq, same_level).place(x_dual, offset=offset) + return x + + +@python_scope +def field(dtype, *args, **kwargs): + """Defines a Taichi field. + + A Taichi field can be viewed as an abstract N-dimensional array, hiding away + the complexity of how its underlying :class:`~taichi.lang.snode.SNode` are + actually defined. The data in a Taichi field can be directly accessed by + a Taichi :func:`~taichi.lang.kernel_impl.kernel`. + + See also https://docs.taichi-lang.org/docs/field + + Args: + dtype (DataType): data type of the field. Note it can be vector or matrix types as well. + shape (Union[int, tuple[int]], optional): shape of the field. + order (str, optional): order of the shape laid out in memory. + name (str, optional): name of the field. + offset (Union[int, tuple[int]], optional): offset of the field domain. + needs_grad (bool, optional): whether this field participates in autodiff (reverse mode) + and thus needs an adjoint field to store the gradients. + needs_dual (bool, optional): whether this field participates in autodiff (forward mode) + and thus needs an dual field to store the gradients. + + Example:: + + The code below shows how a Taichi field can be declared and defined:: + + >>> x1 = ti.field(ti.f32, shape=(16, 8)) + >>> # Equivalently + >>> x2 = ti.field(ti.f32) + >>> ti.root.dense(ti.ij, shape=(16, 8)).place(x2) + >>> + >>> x3 = ti.field(ti.f32, shape=(16, 8), order='ji') + >>> # Equivalently + >>> x4 = ti.field(ti.f32) + >>> ti.root.dense(ti.j, shape=8).dense(ti.i, shape=16).place(x4) + >>> + >>> x5 = ti.field(ti.math.vec3, shape=(16, 8)) + + """ + if isinstance(dtype, MatrixType): + if dtype.ndim == 1: + return Vector.field(dtype.n, dtype.dtype, *args, **kwargs) + return Matrix.field(dtype.n, dtype.m, dtype.dtype, *args, **kwargs) + return _field(dtype, *args, **kwargs) + + +@python_scope +def ndarray(dtype, shape, needs_grad=False): + """Defines a Taichi ndarray with scalar elements. + + Args: + dtype (Union[DataType, MatrixType]): Data type of each element. This can be either a scalar type like ti.f32 or a compound type like ti.types.vector(3, ti.i32). + shape (Union[int, tuple[int]]): Shape of the ndarray. + + Example: + The code below shows how a Taichi ndarray with scalar elements can be declared and defined:: + + >>> x = ti.ndarray(ti.f32, shape=(16, 8)) # ndarray of shape (16, 8), each element is ti.f32 scalar. + >>> vec3 = ti.types.vector(3, ti.i32) + >>> y = ti.ndarray(vec3, shape=(10, 2)) # ndarray of shape (10, 2), each element is a vector of 3 ti.i32 scalars. + >>> matrix_ty = ti.types.matrix(3, 4, float) + >>> z = ti.ndarray(matrix_ty, shape=(4, 5)) # ndarray of shape (4, 5), each element is a matrix of (3, 4) ti.float scalars. + """ + # primal + prog = get_runtime().prog + if prog is None: + raise TaichiRuntimeError("Cannont create ndarray, maybe you forgot to call `ti.init()` first?") + + if isinstance(shape, numbers.Number): + shape = (shape,) + if not all((isinstance(x, int) or isinstance(x, np.integer)) and x > 0 and x <= 2**31 - 1 for x in shape): + raise TaichiRuntimeError(f"{shape} is not a valid shape for ndarray") + if dtype in all_types: + dt = cook_dtype(dtype) + x = ScalarNdarray(dt, shape) + elif isinstance(dtype, MatrixType): + if dtype.ndim == 1: + x = VectorNdarray(dtype.n, dtype.dtype, shape) + else: + x = MatrixNdarray(dtype.n, dtype.m, dtype.dtype, shape) + dt = dtype.dtype + else: + raise TaichiRuntimeError(f"{dtype} is not supported as ndarray element type") + if needs_grad: + if not _ti_core.is_real(dt): + raise TaichiRuntimeError(f"{dt} is not supported for ndarray with `needs_grad=True` or `needs_dual=True`.") + x_grad = ndarray(dtype, shape, needs_grad=False) + x._set_grad(x_grad) + return x + + +@taichi_scope +def ti_format_list_to_content_entries(raw): + # return a pair of [content, format] + def entry2content(_var): + if isinstance(_var, str): + return [_var, None] + if isinstance(_var, list): + assert len(_var) == 2 and (isinstance(_var[1], str) or _var[1] is None) + _var[0] = Expr(_var[0]).ptr + return _var + return [Expr(_var).ptr, None] + + def list_ti_repr(_var): + yield "[" # distinguishing tuple & list will increase maintenance cost + for i, v in enumerate(_var): + if i: + yield ", " + yield v + yield "]" + + def vars2entries(_vars): + for _var in _vars: + # If the first element is '__ti_fmt_value__', this list is an Expr and its format. + if isinstance(_var, list) and len(_var) == 3 and isinstance(_var[0], str) and _var[0] == "__ti_fmt_value__": + # yield [Expr, format] as a whole and don't pass it to vars2entries() again + yield _var[1:] + continue + elif hasattr(_var, "__ti_repr__"): + res = _var.__ti_repr__() + elif isinstance(_var, (list, tuple)): + # If the first element is '__ti_format__', this list is the result of ti_format. + if len(_var) > 0 and isinstance(_var[0], str) and _var[0] == "__ti_format__": + res = _var[1:] + else: + res = list_ti_repr(_var) + else: + yield _var + continue + + for v in vars2entries(res): + yield v + + def fused_string(entries): + accumated = "" + for entry in entries: + if isinstance(entry, str): + accumated += entry + else: + if accumated: + yield accumated + accumated = "" + yield entry + if accumated: + yield accumated + + def extract_formats(entries): + contents, formats = zip(*entries) + return list(contents), list(formats) + + entries = vars2entries(raw) + entries = fused_string(entries) + entries = [entry2content(entry) for entry in entries] + return extract_formats(entries) + + +@taichi_scope +def ti_print(*_vars, sep=" ", end="\n"): + def add_separators(_vars): + for i, _var in enumerate(_vars): + if i: + yield sep + yield _var + yield end + + _vars = add_separators(_vars) + contents, formats = ti_format_list_to_content_entries(_vars) + get_runtime().compiling_callable.ast_builder().create_print( + contents, formats, _ti_core.DebugInfo(get_runtime().get_current_src_info()) + ) + + +@taichi_scope +def ti_format(*args): + content = args[0] + mixed = args[1:] + new_mixed = [] + args = [] + for x in mixed: + # x is a (formatted) Expr + if isinstance(x, Expr) or (isinstance(x, list) and len(x) == 3 and x[0] == "__ti_fmt_value__"): + new_mixed.append("{}") + args.append(x) + else: + new_mixed.append(x) + content = content.format(*new_mixed) + res = content.split("{}") + assert len(res) == len(args) + 1, "Number of args is different from number of positions provided in string" + + for i, arg in enumerate(args): + res.insert(i * 2 + 1, arg) + res.insert(0, "__ti_format__") + return res + + +@taichi_scope +def ti_assert(cond, msg, extra_args, dbg_info): + # Mostly a wrapper to help us convert from Expr (defined in Python) to + # _ti_core.Expr (defined in C++) + get_runtime().compiling_callable.ast_builder().create_assert_stmt(Expr(cond).ptr, msg, extra_args, dbg_info) + + +@taichi_scope +def ti_int(_var): + if hasattr(_var, "__ti_int__"): + return _var.__ti_int__() + return int(_var) + + +@taichi_scope +def ti_bool(_var): + if hasattr(_var, "__ti_bool__"): + return _var.__ti_bool__() + return bool(_var) + + +@taichi_scope +def ti_float(_var): + if hasattr(_var, "__ti_float__"): + return _var.__ti_float__() + return float(_var) + + +@taichi_scope +def zero(x): + # TODO: get dtype from Expr and Matrix: + """Returns an array of zeros with the same shape and type as the input. It's also a scalar + if the input is a scalar. + + Args: + x (Union[:mod:`~taichi.types.primitive_types`, :class:`~taichi.Matrix`]): The input. + + Returns: + A new copy of the input but filled with zeros. + + Example:: + + >>> x = ti.Vector([1, 1]) + >>> @ti.kernel + >>> def test(): + >>> y = ti.zero(x) + >>> print(y) + [0, 0] + """ + return x * 0 + + +@taichi_scope +def one(x): + """Returns an array of ones with the same shape and type as the input. It's also a scalar + if the input is a scalar. + + Args: + x (Union[:mod:`~taichi.types.primitive_types`, :class:`~taichi.Matrix`]): The input. + + Returns: + A new copy of the input but filled with ones. + + Example:: + + >>> x = ti.Vector([0, 0]) + >>> @ti.kernel + >>> def test(): + >>> y = ti.one(x) + >>> print(y) + [1, 1] + """ + return zero(x) + 1 + + +def axes(*x: Iterable[int]): + """Defines a list of axes to be used by a field. + + Args: + *x: A list of axes to be activated + + Note that Taichi has already provided a set of commonly used axes. For example, + `ti.ij` is just `axes(0, 1)` under the hood. + """ + return [_ti_core.Axis(i) for i in x] + + +Axis = _ti_core.Axis + + +def static(x, *xs) -> Any: + """Evaluates a Taichi-scope expression at compile time. + + `static()` is what enables the so-called metaprogramming in Taichi. It is + in many ways similar to ``constexpr`` in C++. + + See also https://docs.taichi-lang.org/docs/meta. + + Args: + x (Any): an expression to be evaluated + *xs (Any): for Python-ish swapping assignment + + Example: + The most common usage of `static()` is for compile-time evaluation:: + + >>> cond = False + >>> + >>> @ti.kernel + >>> def run(): + >>> if ti.static(cond): + >>> do_a() + >>> else: + >>> do_b() + + Depending on the value of ``cond``, ``run()`` will be directly compiled + into either ``do_a()`` or ``do_b()``. Thus there won't be a runtime + condition check. + + Another common usage is for compile-time loop unrolling:: + + >>> @ti.kernel + >>> def run(): + >>> for i in ti.static(range(3)): + >>> print(i) + >>> + >>> # The above will be unrolled to: + >>> @ti.kernel + >>> def run(): + >>> print(0) + >>> print(1) + >>> print(2) + """ + if len(xs): # for python-ish pointer assign: x, y = ti.static(y, x) + return [static(x)] + [static(x) for x in xs] + + if ( + isinstance( + x, + ( + bool, + int, + float, + range, + list, + tuple, + enumerate, + GroupedNDRange, + _Ndrange, + zip, + filter, + map, + ), + ) + or x is None + ): + return x + if isinstance(x, (np.bool_, np.integer, np.floating)): + return x + + if isinstance(x, AnyArray): + return x + if isinstance(x, Field): + return x + if isinstance(x, (FunctionType, MethodType)): + return x + raise ValueError(f"Input to ti.static must be compile-time constants or global pointers, instead of {type(x)}") + + +@taichi_scope +def grouped(x): + """Groups the indices in the iterator returned by `ndrange()` into a 1-D vector. + + This is often used when you want to iterate over all indices returned by `ndrange()` + in one `for` loop and a single index. + + Args: + x (:func:`~taichi.ndrange`): an iterator object returned by `ti.ndrange`. + + Example:: + >>> # without ti.grouped + >>> for I in ti.ndrange(2, 3): + >>> print(I) + prints 0, 1, 2, 3, 4, 5 + + >>> # with ti.grouped + >>> for I in ti.grouped(ti.ndrange(2, 3)): + >>> print(I) + prints [0, 0], [0, 1], [0, 2], [1, 0], [1, 1], [1, 2] + """ + if isinstance(x, _Ndrange): + return x.grouped() + return x + + +def stop_grad(x): + """Stops computing gradients during back propagation. + + Args: + x (:class:`~taichi.Field`): A field. + """ + get_runtime().compiling_callable.ast_builder().stop_grad(x.snode.ptr) + + +def current_cfg(): + return get_runtime().prog.config() + + +def default_cfg(): + return _ti_core.default_compile_config() + + +def call_internal(name, *args, with_runtime_context=True): + return expr_init(_ti_core.insert_internal_func_call(getattr(_ti_core.InternalOp, name), make_expr_group(args))) + + +def get_cuda_compute_capability(): + return _ti_core.query_int64("cuda_compute_capability") + + +@taichi_scope +def mesh_relation_access(mesh, from_index, to_element_type): + # to support ti.mesh_local and access mesh attribute as field + if isinstance(from_index, MeshInstance): + return getattr(from_index, element_type_name(to_element_type)) + if isinstance(mesh, MeshInstance): + return MeshRelationAccessProxy(mesh, from_index, to_element_type) + raise RuntimeError("Relation access should be with a mesh instance!") + + +__all__ = [ + "axes", + "deactivate_all_snodes", + "field", + "grouped", + "ndarray", + "one", + "root", + "static", + "static_assert", + "static_print", + "stop_grad", + "zero", +] diff --git a/local_packages/taichi/lang/kernel_arguments.py b/local_packages/taichi/lang/kernel_arguments.py new file mode 100644 index 0000000..ccee06c --- /dev/null +++ b/local_packages/taichi/lang/kernel_arguments.py @@ -0,0 +1,155 @@ +import inspect + +import taichi.lang +from taichi._lib import core as _ti_core +from taichi.lang import impl, ops +from taichi.lang._texture import RWTextureAccessor, TextureSampler +from taichi.lang.any_array import AnyArray +from taichi.lang.expr import Expr +from taichi.lang.matrix import MatrixType +from taichi.lang.struct import StructType +from taichi.lang.util import cook_dtype +from taichi.types.primitive_types import RefType, u64 +from taichi.types.compound_types import CompoundType + + +class KernelArgument: + def __init__(self, _annotation, _name, _default=inspect.Parameter.empty): + self.annotation = _annotation + self.name = _name + self.default = _default + + +class SparseMatrixEntry: + def __init__(self, ptr, i, j, dtype): + self.ptr = ptr + self.i = i + self.j = j + self.dtype = dtype + + def _augassign(self, value, op): + call_func = f"insert_triplet_{self.dtype}" + if op == "Add": + taichi.lang.impl.call_internal(call_func, self.ptr, self.i, self.j, ops.cast(value, self.dtype)) + elif op == "Sub": + taichi.lang.impl.call_internal(call_func, self.ptr, self.i, self.j, -ops.cast(value, self.dtype)) + else: + assert False, "Only operations '+=' and '-=' are supported on sparse matrices." + + +class SparseMatrixProxy: + def __init__(self, ptr, dtype): + self.ptr = ptr + self.dtype = dtype + + def subscript(self, i, j): + return SparseMatrixEntry(self.ptr, i, j, self.dtype) + + +def decl_scalar_arg(dtype, name, arg_depth): + is_ref = False + if isinstance(dtype, RefType): + is_ref = True + dtype = dtype.tp + dtype = cook_dtype(dtype) + if is_ref: + arg_id = impl.get_runtime().compiling_callable.insert_pointer_param(dtype, name) + else: + arg_id = impl.get_runtime().compiling_callable.insert_scalar_param(dtype, name) + + argload_di = _ti_core.DebugInfo(impl.get_runtime().get_current_src_info()) + return Expr( + _ti_core.make_arg_load_expr(arg_id, dtype, is_ref, create_load=True, arg_depth=arg_depth, dbg_info=argload_di) + ) + + +def get_type_for_kernel_args(dtype, name): + if isinstance(dtype, MatrixType): + # Compiling the matrix type to a struct type because the support for the matrix type is not ready yet on SPIR-V based backends. + if dtype.ndim == 1: + elements = [(dtype.dtype, f"{name}_{i}") for i in range(dtype.n)] + else: + elements = [(dtype.dtype, f"{name}_{i}_{j}") for i in range(dtype.n) for j in range(dtype.m)] + return _ti_core.get_type_factory_instance().get_struct_type(elements) + if isinstance(dtype, StructType): + elements = [] + for k, element_type in dtype.members.items(): + if isinstance(element_type, CompoundType): + new_dtype = get_type_for_kernel_args(element_type, k) + elements.append([new_dtype, k]) + else: + elements.append([element_type, k]) + return _ti_core.get_type_factory_instance().get_struct_type(elements) + # Assuming dtype is a primitive type + return dtype + + +def decl_matrix_arg(matrixtype, name, arg_depth): + arg_type = get_type_for_kernel_args(matrixtype, name) + arg_id = impl.get_runtime().compiling_callable.insert_scalar_param(arg_type, name) + argload_di = _ti_core.DebugInfo(impl.get_runtime().get_current_src_info()) + arg_load = Expr( + _ti_core.make_arg_load_expr(arg_id, arg_type, create_load=False, arg_depth=arg_depth, dbg_info=argload_di) + ) + return matrixtype.from_taichi_object(arg_load) + + +def decl_struct_arg(structtype, name, arg_depth): + arg_type = get_type_for_kernel_args(structtype, name) + arg_id = impl.get_runtime().compiling_callable.insert_scalar_param(arg_type, name) + argload_di = _ti_core.DebugInfo(impl.get_runtime().get_current_src_info()) + arg_load = Expr( + _ti_core.make_arg_load_expr(arg_id, arg_type, create_load=False, arg_depth=arg_depth, dbg_info=argload_di) + ) + return structtype.from_taichi_object(arg_load) + + +def push_argpack_arg(name): + impl.get_runtime().compiling_callable.insert_argpack_param_and_push(name) + + +def decl_argpack_arg(argpacktype, member_dict): + impl.get_runtime().compiling_callable.pop_argpack_stack() + return argpacktype.from_taichi_object(member_dict) + + +def decl_sparse_matrix(dtype, name): + value_type = cook_dtype(dtype) + ptr_type = cook_dtype(u64) + # Treat the sparse matrix argument as a scalar since we only need to pass in the base pointer + arg_id = impl.get_runtime().compiling_callable.insert_scalar_param(ptr_type, name) + argload_di = _ti_core.DebugInfo(impl.get_runtime().get_current_src_info()) + return SparseMatrixProxy( + _ti_core.make_arg_load_expr(arg_id, ptr_type, is_ptr=False, dbg_info=argload_di), value_type + ) + + +def decl_ndarray_arg(element_type, ndim, name, needs_grad, boundary): + arg_id = impl.get_runtime().compiling_callable.insert_ndarray_param(element_type, ndim, name, needs_grad) + return AnyArray(_ti_core.make_external_tensor_expr(element_type, ndim, arg_id, needs_grad, 0, boundary)) + + +def decl_texture_arg(num_dimensions, name): + # FIXME: texture_arg doesn't have element_shape so better separate them + arg_id = impl.get_runtime().compiling_callable.insert_texture_param(num_dimensions, name) + dbg_info = _ti_core.DebugInfo(impl.get_runtime().get_current_src_info()) + return TextureSampler(_ti_core.make_texture_ptr_expr(arg_id, num_dimensions, 0, dbg_info), num_dimensions) + + +def decl_rw_texture_arg(num_dimensions, buffer_format, lod, name): + # FIXME: texture_arg doesn't have element_shape so better separate them + arg_id = impl.get_runtime().compiling_callable.insert_rw_texture_param(num_dimensions, buffer_format, name) + dbg_info = _ti_core.DebugInfo(impl.get_runtime().get_current_src_info()) + return RWTextureAccessor( + _ti_core.make_rw_texture_ptr_expr(arg_id, num_dimensions, 0, buffer_format, lod, dbg_info), num_dimensions + ) + + +def decl_ret(dtype): + if isinstance(dtype, StructType): + dtype = dtype.dtype + if isinstance(dtype, MatrixType): + dtype = _ti_core.get_type_factory_instance().get_tensor_type([dtype.n, dtype.m], dtype.dtype) + else: + dtype = cook_dtype(dtype) + impl.get_runtime().compiling_callable.insert_ret(dtype) diff --git a/local_packages/taichi/lang/kernel_impl.py b/local_packages/taichi/lang/kernel_impl.py new file mode 100644 index 0000000..dfd8e23 --- /dev/null +++ b/local_packages/taichi/lang/kernel_impl.py @@ -0,0 +1,1245 @@ +import ast +import functools +import inspect +import operator +import re +import sys +import textwrap +import typing +import types +import warnings +import weakref + +import numpy as np +import taichi.lang +from taichi._lib import core as _ti_core +from taichi.lang import impl, ops, runtime_ops +from taichi.lang.any_array import AnyArray +from taichi.lang._wrap_inspect import getsourcefile, getsourcelines +from taichi.lang.argpack import ArgPackType, ArgPack +from taichi.lang.ast import ( + ASTTransformerContext, + KernelSimplicityASTChecker, + transform_tree, +) +from taichi.lang.ast.ast_transformer_utils import ReturnStatus +from taichi.lang.enums import AutodiffMode, Layout +from taichi.lang.exception import ( + TaichiCompilationError, + TaichiRuntimeError, + TaichiRuntimeTypeError, + TaichiSyntaxError, + TaichiTypeError, + handle_exception_from_cpp, +) +from taichi.lang.expr import Expr +from taichi.lang.kernel_arguments import KernelArgument +from taichi.lang.matrix import MatrixType +from taichi.lang.shell import _shell_pop_print +from taichi.lang.struct import StructType +from taichi.lang.util import cook_dtype, has_paddle, has_pytorch, to_taichi_type +from taichi.types import ( + ndarray_type, + primitive_types, + sparse_matrix_builder, + template, + texture_type, +) +from taichi.types.compound_types import CompoundType +from taichi.types.utils import is_signed + +from taichi import _logging + + +def func(fn, is_real_function=False): + """Marks a function as callable in Taichi-scope. + + This decorator transforms a Python function into a Taichi one. Taichi + will JIT compile it into native instructions. + + Args: + fn (Callable): The Python function to be decorated + is_real_function (bool): Whether the function is a real function + + Returns: + Callable: The decorated function + + Example:: + + >>> @ti.func + >>> def foo(x): + >>> return x + 2 + >>> + >>> @ti.kernel + >>> def run(): + >>> print(foo(40)) # 42 + """ + is_classfunc = _inside_class(level_of_class_stackframe=3 + is_real_function) + + fun = Func(fn, _classfunc=is_classfunc, is_real_function=is_real_function) + + @functools.wraps(fn) + def decorated(*args, **kwargs): + return fun.__call__(*args, **kwargs) + + decorated._is_taichi_function = True + decorated._is_real_function = is_real_function + decorated.func = fun + return decorated + + +def real_func(fn): + return func(fn, is_real_function=True) + + +def pyfunc(fn): + """Marks a function as callable in both Taichi and Python scopes. + + When called inside the Taichi scope, Taichi will JIT compile it into + native instructions. Otherwise it will be invoked directly as a + Python function. + + See also :func:`~taichi.lang.kernel_impl.func`. + + Args: + fn (Callable): The Python function to be decorated + + Returns: + Callable: The decorated function + """ + is_classfunc = _inside_class(level_of_class_stackframe=3) + fun = Func(fn, _classfunc=is_classfunc, _pyfunc=True) + + @functools.wraps(fn) + def decorated(*args, **kwargs): + return fun.__call__(*args, **kwargs) + + decorated._is_taichi_function = True + decorated._is_real_function = False + decorated.func = fun + return decorated + + +def _get_tree_and_ctx( + self, + excluded_parameters=(), + is_kernel=True, + arg_features=None, + args=None, + ast_builder=None, + is_real_function=False, +): + file = getsourcefile(self.func) + src, start_lineno = getsourcelines(self.func) + src = [textwrap.fill(line, tabsize=4, width=9999) for line in src] + tree = ast.parse(textwrap.dedent("\n".join(src))) + + func_body = tree.body[0] + func_body.decorator_list = [] + + global_vars = _get_global_vars(self.func) + + if is_kernel or is_real_function: + # inject template parameters into globals + for i in self.template_slot_locations: + template_var_name = self.arguments[i].name + global_vars[template_var_name] = args[i] + + return tree, ASTTransformerContext( + excluded_parameters=excluded_parameters, + is_kernel=is_kernel, + func=self, + arg_features=arg_features, + global_vars=global_vars, + argument_data=args, + src=src, + start_lineno=start_lineno, + file=file, + ast_builder=ast_builder, + is_real_function=is_real_function, + ) + + +def _process_args(self, args, kwargs): + ret = [argument.default for argument in self.arguments] + len_args = len(args) + + if len_args > len(ret): + arg_str = ", ".join([str(arg) for arg in args]) + expected_str = ", ".join([f"{arg.name} : {arg.annotation}" for arg in self.arguments]) + msg = f"Too many arguments. Expected ({expected_str}), got ({arg_str})." + raise TaichiSyntaxError(msg) + + for i, arg in enumerate(args): + ret[i] = arg + + for key, value in kwargs.items(): + found = False + for i, arg in enumerate(self.arguments): + if key == arg.name: + if i < len_args: + raise TaichiSyntaxError(f"Multiple values for argument '{key}'.") + ret[i] = value + found = True + break + if not found: + raise TaichiSyntaxError(f"Unexpected argument '{key}'.") + + for i, arg in enumerate(ret): + if arg is inspect.Parameter.empty: + if self.arguments[i].annotation is inspect._empty: + raise TaichiSyntaxError(f"Parameter `{self.arguments[i].name}` missing.") + else: + raise TaichiSyntaxError( + f"Parameter `{self.arguments[i].name} : {self.arguments[i].annotation}` missing." + ) + + return ret + + +class Func: + function_counter = 0 + + def __init__(self, _func, _classfunc=False, _pyfunc=False, is_real_function=False): + self.func = _func + self.func_id = Func.function_counter + Func.function_counter += 1 + self.compiled = None + self.classfunc = _classfunc + self.pyfunc = _pyfunc + self.is_real_function = is_real_function + self.arguments = [] + self.return_type = None + self.extract_arguments() + self.template_slot_locations = [] + for i, arg in enumerate(self.arguments): + if isinstance(arg.annotation, template): + self.template_slot_locations.append(i) + self.mapper = TaichiCallableTemplateMapper(self.arguments, self.template_slot_locations) + self.taichi_functions = {} # The |Function| class in C++ + self.has_print = False + + def __call__(self, *args, **kwargs): + args = _process_args(self, args, kwargs) + + if not impl.inside_kernel(): + if not self.pyfunc: + raise TaichiSyntaxError("Taichi functions cannot be called from Python-scope.") + return self.func(*args) + + if self.is_real_function: + if impl.get_runtime().current_kernel.autodiff_mode != AutodiffMode.NONE: + raise TaichiSyntaxError("Real function in gradient kernels unsupported.") + instance_id, arg_features = self.mapper.lookup(args) + key = _ti_core.FunctionKey(self.func.__name__, self.func_id, instance_id) + if self.compiled is None: + self.compiled = {} + if key.instance_id not in self.compiled: + self.do_compile(key=key, args=args, arg_features=arg_features) + return self.func_call_rvalue(key=key, args=args) + tree, ctx = _get_tree_and_ctx( + self, + is_kernel=False, + args=args, + ast_builder=impl.get_runtime().current_kernel.ast_builder(), + is_real_function=self.is_real_function, + ) + ret = transform_tree(tree, ctx) + if not self.is_real_function: + if self.return_type and ctx.returned != ReturnStatus.ReturnedValue: + raise TaichiSyntaxError("Function has a return type but does not have a return statement") + return ret + + def func_call_rvalue(self, key, args): + # Skip the template args, e.g., |self| + assert self.is_real_function + non_template_args = [] + dbg_info = _ti_core.DebugInfo(impl.get_runtime().get_current_src_info()) + for i, kernel_arg in enumerate(self.arguments): + anno = kernel_arg.annotation + if not isinstance(anno, template): + if id(anno) in primitive_types.type_ids: + non_template_args.append(ops.cast(args[i], anno)) + elif isinstance(anno, primitive_types.RefType): + non_template_args.append(_ti_core.make_reference(args[i].ptr, dbg_info)) + elif isinstance(anno, ndarray_type.NdarrayType): + if not isinstance(args[i], AnyArray): + raise TaichiTypeError( + f"Expected ndarray in the kernel argument for argument {kernel_arg.name}, got {args[i]}" + ) + non_template_args += _ti_core.get_external_tensor_real_func_args(args[i].ptr, dbg_info) + else: + non_template_args.append(args[i]) + non_template_args = impl.make_expr_group(non_template_args) + func_call = ( + impl.get_runtime() + .compiling_callable.ast_builder() + .insert_func_call(self.taichi_functions[key.instance_id], non_template_args, dbg_info) + ) + if self.return_type is None: + return None + func_call = Expr(func_call) + ret = [] + + for i, return_type in enumerate(self.return_type): + if id(return_type) in primitive_types.type_ids: + ret.append( + Expr( + _ti_core.make_get_element_expr( + func_call.ptr, (i,), _ti_core.DebugInfo(impl.get_runtime().get_current_src_info()) + ) + ) + ) + elif isinstance(return_type, (StructType, MatrixType)): + ret.append(return_type.from_taichi_object(func_call, (i,))) + else: + raise TaichiTypeError(f"Unsupported return type for return value {i}: {return_type}") + if len(ret) == 1: + return ret[0] + return tuple(ret) + + def do_compile(self, key, args, arg_features): + tree, ctx = _get_tree_and_ctx( + self, is_kernel=False, args=args, arg_features=arg_features, is_real_function=self.is_real_function + ) + fn = impl.get_runtime().prog.create_function(key) + + def func_body(): + old_callable = impl.get_runtime().compiling_callable + impl.get_runtime().compiling_callable = fn + ctx.ast_builder = fn.ast_builder() + transform_tree(tree, ctx) + impl.get_runtime().compiling_callable = old_callable + + self.taichi_functions[key.instance_id] = fn + self.compiled[key.instance_id] = func_body + self.taichi_functions[key.instance_id].set_function_body(func_body) + + def extract_arguments(self): + sig = inspect.signature(self.func) + if sig.return_annotation not in (inspect.Signature.empty, None): + self.return_type = sig.return_annotation + if sys.version_info >= (3, 9): + if ( + isinstance(self.return_type, (types.GenericAlias, typing._GenericAlias)) + and self.return_type.__origin__ is tuple + ): + self.return_type = self.return_type.__args__ + else: + if isinstance(self.return_type, typing._GenericAlias) and self.return_type.__origin__ is tuple: + self.return_type = self.return_type.__args__ + if not isinstance(self.return_type, (list, tuple)): + self.return_type = (self.return_type,) + for i, return_type in enumerate(self.return_type): + if return_type is Ellipsis: + raise TaichiSyntaxError("Ellipsis is not supported in return type annotations") + params = sig.parameters + arg_names = params.keys() + for i, arg_name in enumerate(arg_names): + param = params[arg_name] + if param.kind == inspect.Parameter.VAR_KEYWORD: + raise TaichiSyntaxError("Taichi functions do not support variable keyword parameters (i.e., **kwargs)") + if param.kind == inspect.Parameter.VAR_POSITIONAL: + raise TaichiSyntaxError("Taichi functions do not support variable positional parameters (i.e., *args)") + if param.kind == inspect.Parameter.KEYWORD_ONLY: + raise TaichiSyntaxError("Taichi functions do not support keyword parameters") + if param.kind != inspect.Parameter.POSITIONAL_OR_KEYWORD: + raise TaichiSyntaxError('Taichi functions only support "positional or keyword" parameters') + annotation = param.annotation + if annotation is inspect.Parameter.empty: + if i == 0 and self.classfunc: + annotation = template() + # TODO: pyfunc also need type annotation check when real function is enabled, + # but that has to happen at runtime when we know which scope it's called from. + elif not self.pyfunc and self.is_real_function: + raise TaichiSyntaxError( + f"Taichi function `{self.func.__name__}` parameter `{arg_name}` must be type annotated" + ) + else: + if isinstance(annotation, ndarray_type.NdarrayType): + pass + elif isinstance(annotation, MatrixType): + pass + elif isinstance(annotation, StructType): + pass + elif id(annotation) in primitive_types.type_ids: + pass + elif isinstance(annotation, template): + pass + elif isinstance(annotation, primitive_types.RefType): + pass + else: + raise TaichiSyntaxError(f"Invalid type annotation (argument {i}) of Taichi function: {annotation}") + self.arguments.append(KernelArgument(annotation, param.name, param.default)) + + +class TaichiCallableTemplateMapper: + def __init__(self, arguments, template_slot_locations): + self.arguments = arguments + self.num_args = len(arguments) + self.template_slot_locations = template_slot_locations + self.mapping = {} + + @staticmethod + def extract_arg(arg, anno, arg_name): + if isinstance(anno, template): + if isinstance(arg, taichi.lang.snode.SNode): + return arg.ptr + if isinstance(arg, taichi.lang.expr.Expr): + return arg.ptr.get_underlying_ptr_address() + if isinstance(arg, _ti_core.Expr): + return arg.get_underlying_ptr_address() + if isinstance(arg, tuple): + return tuple(TaichiCallableTemplateMapper.extract_arg(item, anno, arg_name) for item in arg) + if isinstance(arg, taichi.lang._ndarray.Ndarray): + raise TaichiRuntimeTypeError( + "Ndarray shouldn't be passed in via `ti.template()`, please annotate your kernel using `ti.types.ndarray(...)` instead" + ) + + if isinstance(arg, (list, tuple, dict, set)) or hasattr(arg, "_data_oriented"): + # [Composite arguments] Return weak reference to the object + # Taichi kernel will cache the extracted arguments, thus we can't simply return the original argument. + # Instead, a weak reference to the original value is returned to avoid memory leak. + + # TODO(zhanlue): replacing "tuple(args)" with "hash of argument values" + # This can resolve the following issues: + # 1. Invalid weak-ref will leave a dead(dangling) entry in both caches: "self.mapping" and "self.compiled_functions" + # 2. Different argument instances with same type and same value, will get templatized into seperate kernels. + return weakref.ref(arg) + + # [Primitive arguments] Return the value + return arg + if isinstance(anno, ArgPackType): + if not isinstance(arg, ArgPack): + raise TaichiRuntimeTypeError(f"Argument {arg_name} must be a argument pack, got {type(arg)}") + return tuple( + TaichiCallableTemplateMapper.extract_arg(arg[name], dtype, arg_name) + for index, (name, dtype) in enumerate(anno.members.items()) + ) + if isinstance(anno, texture_type.TextureType): + if not isinstance(arg, taichi.lang._texture.Texture): + raise TaichiRuntimeTypeError(f"Argument {arg_name} must be a texture, got {type(arg)}") + if arg.num_dims != anno.num_dimensions: + raise TaichiRuntimeTypeError( + f"TextureType dimension mismatch for argument {arg_name}: expected {anno.num_dimensions}, got {arg.num_dims}" + ) + return (arg.num_dims,) + if isinstance(anno, texture_type.RWTextureType): + if not isinstance(arg, taichi.lang._texture.Texture): + raise TaichiRuntimeTypeError(f"Argument {arg_name} must be a texture, got {type(arg)}") + if arg.num_dims != anno.num_dimensions: + raise TaichiRuntimeTypeError( + f"RWTextureType dimension mismatch for argument {arg_name}: expected {anno.num_dimensions}, got {arg.num_dims}" + ) + if arg.fmt != anno.fmt: + raise TaichiRuntimeTypeError( + f"RWTextureType format mismatch for argument {arg_name}: expected {anno.fmt}, got {arg.fmt}" + ) + # (penguinliong) '0' is the assumed LOD level. We currently don't + # support mip-mapping. + return arg.num_dims, arg.fmt, 0 + if isinstance(anno, ndarray_type.NdarrayType): + if isinstance(arg, taichi.lang._ndarray.Ndarray): + anno.check_matched(arg.get_type(), arg_name) + needs_grad = (arg.grad is not None) if anno.needs_grad is None else anno.needs_grad + return arg.element_type, len(arg.shape), needs_grad, anno.boundary + if isinstance(arg, AnyArray): + ty = arg.get_type() + anno.check_matched(arg.get_type(), arg_name) + return ty.element_type, len(arg.shape), ty.needs_grad, anno.boundary + # external arrays + shape = getattr(arg, "shape", None) + if shape is None: + raise TaichiRuntimeTypeError(f"Invalid type for argument {arg_name}, got {arg}") + shape = tuple(shape) + element_shape = () + dtype = to_taichi_type(arg.dtype) + if isinstance(anno.dtype, MatrixType): + if anno.ndim is not None: + if len(shape) != anno.dtype.ndim + anno.ndim: + raise ValueError( + f"Invalid value for argument {arg_name} - required array has ndim={anno.ndim} element_dim={anno.dtype.ndim}, " + f"array with {len(shape)} dimensions is provided" + ) + else: + if len(shape) < anno.dtype.ndim: + raise ValueError( + f"Invalid value for argument {arg_name} - required element_dim={anno.dtype.ndim}, " + f"array with {len(shape)} dimensions is provided" + ) + element_shape = shape[-anno.dtype.ndim :] + anno_element_shape = anno.dtype.get_shape() + if None not in anno_element_shape and element_shape != anno_element_shape: + raise ValueError( + f"Invalid value for argument {arg_name} - required element_shape={anno_element_shape}, " + f"array with element shape of {element_shape} is provided" + ) + elif anno.dtype is not None: + # User specified scalar dtype + if anno.dtype != dtype: + raise ValueError( + f"Invalid value for argument {arg_name} - required array has dtype={anno.dtype.to_string()}, " + f"array with dtype={dtype.to_string()} is provided" + ) + + if anno.ndim is not None and len(shape) != anno.ndim: + raise ValueError( + f"Invalid value for argument {arg_name} - required array has ndim={anno.ndim}, " + f"array with {len(shape)} dimensions is provided" + ) + needs_grad = getattr(arg, "requires_grad", False) if anno.needs_grad is None else anno.needs_grad + element_type = ( + _ti_core.get_type_factory_instance().get_tensor_type(element_shape, dtype) + if len(element_shape) != 0 + else arg.dtype + ) + return element_type, len(shape) - len(element_shape), needs_grad, anno.boundary + if isinstance(anno, sparse_matrix_builder): + return arg.dtype + # Use '#' as a placeholder because other kinds of arguments are not involved in template instantiation + return "#" + + def extract(self, args): + extracted = [] + for arg, kernel_arg in zip(args, self.arguments): + extracted.append(self.extract_arg(arg, kernel_arg.annotation, kernel_arg.name)) + return tuple(extracted) + + def lookup(self, args): + if len(args) != self.num_args: + raise TypeError(f"{self.num_args} argument(s) needed but {len(args)} provided.") + + key = self.extract(args) + if key not in self.mapping: + count = len(self.mapping) + self.mapping[key] = count + return self.mapping[key], key + + +def _get_global_vars(_func): + # Discussions: https://github.com/taichi-dev/taichi/issues/282 + global_vars = _func.__globals__.copy() + + freevar_names = _func.__code__.co_freevars + closure = _func.__closure__ + if closure: + freevar_values = list(map(lambda x: x.cell_contents, closure)) + for name, value in zip(freevar_names, freevar_values): + global_vars[name] = value + + return global_vars + + +class Kernel: + counter = 0 + + def __init__(self, _func, autodiff_mode, _classkernel=False): + self.func = _func + self.kernel_counter = Kernel.counter + Kernel.counter += 1 + assert autodiff_mode in ( + AutodiffMode.NONE, + AutodiffMode.VALIDATION, + AutodiffMode.FORWARD, + AutodiffMode.REVERSE, + ) + self.autodiff_mode = autodiff_mode + self.grad = None + self.arguments = [] + self.return_type = None + self.classkernel = _classkernel + self.extract_arguments() + self.template_slot_locations = [] + for i, arg in enumerate(self.arguments): + if isinstance(arg.annotation, template): + self.template_slot_locations.append(i) + self.mapper = TaichiCallableTemplateMapper(self.arguments, self.template_slot_locations) + impl.get_runtime().kernels.append(self) + self.reset() + self.kernel_cpp = None + self.compiled_kernels = {} + self.has_print = False + + def ast_builder(self): + assert self.kernel_cpp is not None + return self.kernel_cpp.ast_builder() + + def reset(self): + self.runtime = impl.get_runtime() + self.compiled_kernels = {} + + def extract_arguments(self): + sig = inspect.signature(self.func) + if sig.return_annotation not in (inspect._empty, None): + self.return_type = sig.return_annotation + if sys.version_info >= (3, 9): + if ( + isinstance(self.return_type, (types.GenericAlias, typing._GenericAlias)) + and self.return_type.__origin__ is tuple + ): + self.return_type = self.return_type.__args__ + else: + if isinstance(self.return_type, typing._GenericAlias) and self.return_type.__origin__ is tuple: + self.return_type = self.return_type.__args__ + if not isinstance(self.return_type, (list, tuple)): + self.return_type = (self.return_type,) + for return_type in self.return_type: + if return_type is Ellipsis: + raise TaichiSyntaxError("Ellipsis is not supported in return type annotations") + params = sig.parameters + arg_names = params.keys() + for i, arg_name in enumerate(arg_names): + param = params[arg_name] + if param.kind == inspect.Parameter.VAR_KEYWORD: + raise TaichiSyntaxError("Taichi kernels do not support variable keyword parameters (i.e., **kwargs)") + if param.kind == inspect.Parameter.VAR_POSITIONAL: + raise TaichiSyntaxError("Taichi kernels do not support variable positional parameters (i.e., *args)") + if param.default is not inspect.Parameter.empty: + raise TaichiSyntaxError("Taichi kernels do not support default values for arguments") + if param.kind == inspect.Parameter.KEYWORD_ONLY: + raise TaichiSyntaxError("Taichi kernels do not support keyword parameters") + if param.kind != inspect.Parameter.POSITIONAL_OR_KEYWORD: + raise TaichiSyntaxError('Taichi kernels only support "positional or keyword" parameters') + annotation = param.annotation + if param.annotation is inspect.Parameter.empty: + if i == 0 and self.classkernel: # The |self| parameter + annotation = template() + else: + raise TaichiSyntaxError("Taichi kernels parameters must be type annotated") + else: + if isinstance( + annotation, + ( + template, + ndarray_type.NdarrayType, + texture_type.TextureType, + texture_type.RWTextureType, + ), + ): + pass + elif id(annotation) in primitive_types.type_ids: + pass + elif isinstance(annotation, sparse_matrix_builder): + pass + elif isinstance(annotation, MatrixType): + pass + elif isinstance(annotation, StructType): + pass + elif isinstance(annotation, ArgPackType): + pass + else: + raise TaichiSyntaxError(f"Invalid type annotation (argument {i}) of Taichi kernel: {annotation}") + self.arguments.append(KernelArgument(annotation, param.name, param.default)) + + def materialize(self, key=None, args=None, arg_features=None): + if key is None: + key = (self.func, 0, self.autodiff_mode) + self.runtime.materialize() + + if key in self.compiled_kernels: + return + + kernel_name = f"{self.func.__name__}_c{self.kernel_counter}_{key[1]}" + _logging.trace(f"Compiling kernel {kernel_name} in {self.autodiff_mode}...") + + tree, ctx = _get_tree_and_ctx( + self, + args=args, + excluded_parameters=self.template_slot_locations, + arg_features=arg_features, + ) + + if self.autodiff_mode != AutodiffMode.NONE: + KernelSimplicityASTChecker(self.func).visit(tree) + + # Do not change the name of 'taichi_ast_generator' + # The warning system needs this identifier to remove unnecessary messages + def taichi_ast_generator(kernel_cxx): + if self.runtime.inside_kernel: + raise TaichiSyntaxError( + "Kernels cannot call other kernels. I.e., nested kernels are not allowed. " + "Please check if you have direct/indirect invocation of kernels within kernels. " + "Note that some methods provided by the Taichi standard library may invoke kernels, " + "and please move their invocations to Python-scope." + ) + self.kernel_cpp = kernel_cxx + self.runtime.inside_kernel = True + self.runtime.current_kernel = self + assert self.runtime.compiling_callable is None + self.runtime.compiling_callable = kernel_cxx + try: + ctx.ast_builder = kernel_cxx.ast_builder() + transform_tree(tree, ctx) + if not ctx.is_real_function: + if self.return_type and ctx.returned != ReturnStatus.ReturnedValue: + raise TaichiSyntaxError("Kernel has a return type but does not have a return statement") + finally: + self.runtime.inside_kernel = False + self.runtime.current_kernel = None + self.runtime.compiling_callable = None + + taichi_kernel = impl.get_runtime().prog.create_kernel(taichi_ast_generator, kernel_name, self.autodiff_mode) + assert key not in self.compiled_kernels + self.compiled_kernels[key] = taichi_kernel + + def launch_kernel(self, t_kernel, *args): + assert len(args) == len(self.arguments), f"{len(self.arguments)} arguments needed but {len(args)} provided" + + tmps = [] + callbacks = [] + + actual_argument_slot = 0 + launch_ctx = t_kernel.make_launch_context() + max_arg_num = 64 + exceed_max_arg_num = False + + def set_arg_ndarray(indices, v): + v_primal = v.arr + v_grad = v.grad.arr if v.grad else None + if v_grad is None: + launch_ctx.set_arg_ndarray(indices, v_primal) + else: + launch_ctx.set_arg_ndarray_with_grad(indices, v_primal, v_grad) + + def set_arg_texture(indices, v): + launch_ctx.set_arg_texture(indices, v.tex) + + def set_arg_rw_texture(indices, v): + launch_ctx.set_arg_rw_texture(indices, v.tex) + + def set_arg_ext_array(indices, v, needed): + # Element shapes are already specialized in Taichi codegen. + # The shape information for element dims are no longer needed. + # Therefore we strip the element shapes from the shape vector, + # so that it only holds "real" array shapes. + is_soa = needed.layout == Layout.SOA + array_shape = v.shape + if functools.reduce(operator.mul, array_shape, 1) > np.iinfo(np.int32).max: + warnings.warn("Ndarray index might be out of int32 boundary but int64 indexing is not supported yet.") + if needed.dtype is None or id(needed.dtype) in primitive_types.type_ids: + element_dim = 0 + else: + element_dim = needed.dtype.ndim + array_shape = v.shape[element_dim:] if is_soa else v.shape[:-element_dim] + if isinstance(v, np.ndarray): + if v.flags.c_contiguous: + launch_ctx.set_arg_external_array_with_shape(indices, int(v.ctypes.data), v.nbytes, array_shape, 0) + elif v.flags.f_contiguous: + # TODO: A better way that avoids copying is saving strides info. + tmp = np.ascontiguousarray(v) + # Purpose: DO NOT GC |tmp|! + tmps.append(tmp) + + def callback(original, updated): + np.copyto(original, np.asfortranarray(updated)) + + callbacks.append(functools.partial(callback, v, tmp)) + launch_ctx.set_arg_external_array_with_shape( + indices, int(tmp.ctypes.data), tmp.nbytes, array_shape, 0 + ) + else: + raise ValueError( + "Non contiguous numpy arrays are not supported, please call np.ascontiguousarray(arr) " + "before passing it into taichi kernel." + ) + elif has_pytorch(): + import torch # pylint: disable=C0415 + + if isinstance(v, torch.Tensor): + if not v.is_contiguous(): + raise ValueError( + "Non contiguous tensors are not supported, please call tensor.contiguous() before " + "passing it into taichi kernel." + ) + taichi_arch = self.runtime.prog.config().arch + + def get_call_back(u, v): + def call_back(): + u.copy_(v) + + return call_back + + # FIXME: only allocate when launching grad kernel + if v.requires_grad and v.grad is None: + v.grad = torch.zeros_like(v) + + if v.requires_grad: + if not isinstance(v.grad, torch.Tensor): + raise ValueError( + f"Expecting torch.Tensor for gradient tensor, but getting {v.grad.__class__.__name__} instead" + ) + if not v.grad.is_contiguous(): + raise ValueError( + "Non contiguous gradient tensors are not supported, please call tensor.grad.contiguous() before passing it into taichi kernel." + ) + + tmp = v + if (str(v.device) != "cpu") and not ( + str(v.device).startswith("cuda") and taichi_arch == _ti_core.Arch.cuda + ): + # Getting a torch CUDA tensor on Taichi non-cuda arch: + # We just replace it with a CPU tensor and by the end of kernel execution we'll use the + # callback to copy the values back to the original CUDA tensor. + host_v = v.to(device="cpu", copy=True) + tmp = host_v + callbacks.append(get_call_back(v, host_v)) + + launch_ctx.set_arg_external_array_with_shape( + indices, + int(tmp.data_ptr()), + tmp.element_size() * tmp.nelement(), + array_shape, + int(v.grad.data_ptr()) if v.grad is not None else 0, + ) + else: + raise TaichiRuntimeTypeError( + f"Argument {needed.to_string()} cannot be converted into required type {v}" + ) + elif has_paddle(): + import paddle # pylint: disable=C0415 + + if isinstance(v, paddle.Tensor): + # For now, paddle.fluid.core.Tensor._ptr() is only available on develop branch + def get_call_back(u, v): + def call_back(): + u.copy_(v, False) + + return call_back + + tmp = v.value().get_tensor() + taichi_arch = self.runtime.prog.config().arch + if v.place.is_gpu_place(): + if taichi_arch != _ti_core.Arch.cuda: + # Paddle cuda tensor on Taichi non-cuda arch + host_v = v.cpu() + tmp = host_v.value().get_tensor() + callbacks.append(get_call_back(v, host_v)) + elif v.place.is_cpu_place(): + if taichi_arch == _ti_core.Arch.cuda: + # Paddle cpu tensor on Taichi cuda arch + gpu_v = v.cuda() + tmp = gpu_v.value().get_tensor() + callbacks.append(get_call_back(v, gpu_v)) + else: + # Paddle do support many other backends like XPU, NPU, MLU, IPU + raise TaichiRuntimeTypeError(f"Taichi do not support backend {v.place} that Paddle support") + launch_ctx.set_arg_external_array_with_shape( + indices, int(tmp._ptr()), v.element_size() * v.size, array_shape, 0 + ) + else: + raise TaichiRuntimeTypeError( + f"Argument {needed.to_string()} cannot be converted into required type {v}" + ) + else: + raise TaichiRuntimeTypeError( + f"Argument {needed.to_string()} cannot be converted into required type {v}" + ) + + def set_arg_matrix(indices, v, needed): + if needed.dtype in primitive_types.real_types: + + def cast_func(x): + if not isinstance(x, (int, float, np.integer, np.floating)): + raise TaichiRuntimeTypeError( + f"Argument {needed.dtype.to_string()} cannot be converted into required type {type(x)}" + ) + return float(x) + + elif needed.dtype in primitive_types.integer_types: + + def cast_func(x): + if not isinstance(x, (int, np.integer)): + raise TaichiRuntimeTypeError( + f"Argument {needed.dtype.to_string()} cannot be converted into required type {type(x)}" + ) + return int(x) + + else: + raise ValueError(f"Matrix dtype {needed.dtype} is not integer type or real type.") + + if needed.ndim == 2: + v = [cast_func(v[i, j]) for i in range(needed.n) for j in range(needed.m)] + else: + v = [cast_func(v[i]) for i in range(needed.n)] + v = needed(*v) + needed.set_kernel_struct_args(v, launch_ctx, indices) + + def set_arg_sparse_matrix_builder(indices, v): + # Pass only the base pointer of the ti.types.sparse_matrix_builder() argument + launch_ctx.set_arg_uint(indices, v._get_ndarray_addr()) + + set_later_list = [] + + def recursive_set_args(needed, provided, v, indices): + in_argpack = len(indices) > 1 + nonlocal actual_argument_slot, exceed_max_arg_num, set_later_list + if actual_argument_slot >= max_arg_num: + exceed_max_arg_num = True + return 0 + actual_argument_slot += 1 + if isinstance(needed, ArgPackType): + if not isinstance(v, ArgPack): + raise TaichiRuntimeTypeError.get(indices, str(needed), str(provided)) + idx_new = 0 + for j, (name, anno) in enumerate(needed.members.items()): + idx_new += recursive_set_args(anno, type(v[name]), v[name], indices + (idx_new,)) + launch_ctx.set_arg_argpack(indices, v._ArgPack__argpack) + return 1 + # Note: do not use sth like "needed == f32". That would be slow. + if id(needed) in primitive_types.real_type_ids: + if not isinstance(v, (float, int, np.floating, np.integer)): + raise TaichiRuntimeTypeError.get(indices, needed.to_string(), provided) + if in_argpack: + return 1 + launch_ctx.set_arg_float(indices, float(v)) + return 1 + if id(needed) in primitive_types.integer_type_ids: + if not isinstance(v, (int, np.integer)): + raise TaichiRuntimeTypeError.get(indices, needed.to_string(), provided) + if in_argpack: + return 1 + if is_signed(cook_dtype(needed)): + launch_ctx.set_arg_int(indices, int(v)) + else: + launch_ctx.set_arg_uint(indices, int(v)) + return 1 + if isinstance(needed, sparse_matrix_builder): + if in_argpack: + set_later_list.append((set_arg_sparse_matrix_builder, (v,))) + return 0 + set_arg_sparse_matrix_builder(indices, v) + return 1 + if isinstance(needed, ndarray_type.NdarrayType) and isinstance(v, taichi.lang._ndarray.Ndarray): + if in_argpack: + set_later_list.append((set_arg_ndarray, (v,))) + return 0 + set_arg_ndarray(indices, v) + return 1 + if isinstance(needed, texture_type.TextureType) and isinstance(v, taichi.lang._texture.Texture): + if in_argpack: + set_later_list.append((set_arg_texture, (v,))) + return 0 + set_arg_texture(indices, v) + return 1 + if isinstance(needed, texture_type.RWTextureType) and isinstance(v, taichi.lang._texture.Texture): + if in_argpack: + set_later_list.append((set_arg_rw_texture, (v,))) + return 0 + set_arg_rw_texture(indices, v) + return 1 + if isinstance(needed, ndarray_type.NdarrayType): + if in_argpack: + set_later_list.append((set_arg_ext_array, (v, needed))) + return 0 + set_arg_ext_array(indices, v, needed) + return 1 + if isinstance(needed, MatrixType): + if in_argpack: + return 1 + set_arg_matrix(indices, v, needed) + return 1 + if isinstance(needed, StructType): + if in_argpack: + return 1 + if not isinstance(v, needed): + raise TaichiRuntimeTypeError(f"Argument {provided} cannot be converted into required type {needed}") + needed.set_kernel_struct_args(v, launch_ctx, indices) + return 1 + raise ValueError(f"Argument type mismatch. Expecting {needed}, got {type(v)}.") + + template_num = 0 + for i, val in enumerate(args): + needed_ = self.arguments[i].annotation + if isinstance(needed_, template): + template_num += 1 + continue + recursive_set_args(needed_, type(val), val, (i - template_num,)) + + for i, (set_arg_func, params) in enumerate(set_later_list): + set_arg_func((len(args) - template_num + i,), *params) + + if exceed_max_arg_num: + raise TaichiRuntimeError( + f"The number of elements in kernel arguments is too big! Do not exceed {max_arg_num} on {_ti_core.arch_name(impl.current_cfg().arch)} backend." + ) + + try: + prog = impl.get_runtime().prog + # Compile kernel (& Online Cache & Offline Cache) + compiled_kernel_data = prog.compile_kernel(prog.config(), prog.get_device_caps(), t_kernel) + # Launch kernel + prog.launch_kernel(compiled_kernel_data, launch_ctx) + except Exception as e: + e = handle_exception_from_cpp(e) + if impl.get_runtime().print_full_traceback: + raise e + raise e from None + + ret = None + ret_dt = self.return_type + has_ret = ret_dt is not None + + if has_ret or self.has_print: + runtime_ops.sync() + + if has_ret: + ret = [] + for i, ret_type in enumerate(ret_dt): + ret.append(self.construct_kernel_ret(launch_ctx, ret_type, (i,))) + if len(ret_dt) == 1: + ret = ret[0] + if callbacks: + for c in callbacks: + c() + + return ret + + def construct_kernel_ret(self, launch_ctx, ret_type, index=()): + if isinstance(ret_type, CompoundType): + return ret_type.from_kernel_struct_ret(launch_ctx, index) + if ret_type in primitive_types.integer_types: + if is_signed(cook_dtype(ret_type)): + return launch_ctx.get_struct_ret_int(index) + return launch_ctx.get_struct_ret_uint(index) + if ret_type in primitive_types.real_types: + return launch_ctx.get_struct_ret_float(index) + raise TaichiRuntimeTypeError(f"Invalid return type on index={index}") + + def ensure_compiled(self, *args): + instance_id, arg_features = self.mapper.lookup(args) + key = (self.func, instance_id, self.autodiff_mode) + self.materialize(key=key, args=args, arg_features=arg_features) + return key + + # For small kernels (< 3us), the performance can be pretty sensitive to overhead in __call__ + # Thus this part needs to be fast. (i.e. < 3us on a 4 GHz x64 CPU) + @_shell_pop_print + def __call__(self, *args, **kwargs): + args = _process_args(self, args, kwargs) + + # Transform the primal kernel to forward mode grad kernel + # then recover to primal when exiting the forward mode manager + if self.runtime.fwd_mode_manager and not self.runtime.grad_replaced: + # TODO: if we would like to compute 2nd-order derivatives by forward-on-reverse in a nested context manager fashion, + # i.e., a `Tape` nested in the `FwdMode`, we can transform the kernels with `mode_original == AutodiffMode.REVERSE` only, + # to avoid duplicate computation for 1st-order derivatives + self.runtime.fwd_mode_manager.insert(self) + + # Both the class kernels and the plain-function kernels are unified now. + # In both cases, |self.grad| is another Kernel instance that computes the + # gradient. For class kernels, args[0] is always the kernel owner. + + # No need to capture grad kernels because they are already bound with their primal kernels + if ( + self.autodiff_mode in (AutodiffMode.NONE, AutodiffMode.VALIDATION) + and self.runtime.target_tape + and not self.runtime.grad_replaced + ): + self.runtime.target_tape.insert(self, args) + + if self.autodiff_mode != AutodiffMode.NONE and impl.current_cfg().opt_level == 0: + _logging.warn("""opt_level = 1 is enforced to enable gradient computation.""") + impl.current_cfg().opt_level = 1 + key = self.ensure_compiled(*args) + kernel_cpp = self.compiled_kernels[key] + return self.launch_kernel(kernel_cpp, *args) + + +# For a Taichi class definition like below: +# +# @ti.data_oriented +# class X: +# @ti.kernel +# def foo(self): +# ... +# +# When ti.kernel runs, the stackframe's |code_context| of Python 3.8(+) is +# different from that of Python 3.7 and below. In 3.8+, it is 'class X:', +# whereas in <=3.7, it is '@ti.data_oriented'. More interestingly, if the class +# inherits, i.e. class X(object):, then in both versions, |code_context| is +# 'class X(object):'... +_KERNEL_CLASS_STACKFRAME_STMT_RES = [ + re.compile(r"@(\w+\.)?data_oriented"), + re.compile(r"class "), +] + + +def _inside_class(level_of_class_stackframe): + try: + maybe_class_frame = sys._getframe(level_of_class_stackframe) + statement_list = inspect.getframeinfo(maybe_class_frame)[3] + first_statment = statement_list[0].strip() + for pat in _KERNEL_CLASS_STACKFRAME_STMT_RES: + if pat.match(first_statment): + return True + except: + pass + return False + + +def _kernel_impl(_func, level_of_class_stackframe, verbose=False): + # Can decorators determine if a function is being defined inside a class? + # https://stackoverflow.com/a/8793684/12003165 + is_classkernel = _inside_class(level_of_class_stackframe + 1) + + if verbose: + print(f"kernel={_func.__name__} is_classkernel={is_classkernel}") + primal = Kernel(_func, autodiff_mode=AutodiffMode.NONE, _classkernel=is_classkernel) + adjoint = Kernel(_func, autodiff_mode=AutodiffMode.REVERSE, _classkernel=is_classkernel) + # Having |primal| contains |grad| makes the tape work. + primal.grad = adjoint + + if is_classkernel: + # For class kernels, their primal/adjoint callables are constructed + # when the kernel is accessed via the instance inside + # _BoundedDifferentiableMethod. + # This is because we need to bind the kernel or |grad| to the instance + # owning the kernel, which is not known until the kernel is accessed. + # + # See also: _BoundedDifferentiableMethod, data_oriented. + @functools.wraps(_func) + def wrapped(*args, **kwargs): + # If we reach here (we should never), it means the class is not decorated + # with @ti.data_oriented, otherwise getattr would have intercepted the call. + clsobj = type(args[0]) + assert not hasattr(clsobj, "_data_oriented") + raise TaichiSyntaxError(f"Please decorate class {clsobj.__name__} with @ti.data_oriented") + + else: + + @functools.wraps(_func) + def wrapped(*args, **kwargs): + try: + return primal(*args, **kwargs) + except (TaichiCompilationError, TaichiRuntimeError) as e: + if impl.get_runtime().print_full_traceback: + raise e + raise type(e)("\n" + str(e)) from None + + wrapped.grad = adjoint + + wrapped._is_wrapped_kernel = True + wrapped._is_classkernel = is_classkernel + wrapped._primal = primal + wrapped._adjoint = adjoint + return wrapped + + +def kernel(fn): + """Marks a function as a Taichi kernel. + + A Taichi kernel is a function written in Python, and gets JIT compiled by + Taichi into native CPU/GPU instructions (e.g. a series of CUDA kernels). + The top-level ``for`` loops are automatically parallelized, and distributed + to either a CPU thread pool or massively parallel GPUs. + + Kernel's gradient kernel would be generated automatically by the AutoDiff system. + + See also https://docs.taichi-lang.org/docs/syntax#kernel. + + Args: + fn (Callable): the Python function to be decorated + + Returns: + Callable: The decorated function + + Example:: + + >>> x = ti.field(ti.i32, shape=(4, 8)) + >>> + >>> @ti.kernel + >>> def run(): + >>> # Assigns all the elements of `x` in parallel. + >>> for i in x: + >>> x[i] = i + """ + return _kernel_impl(fn, level_of_class_stackframe=3) + + +class _BoundedDifferentiableMethod: + def __init__(self, kernel_owner, wrapped_kernel_func): + clsobj = type(kernel_owner) + if not getattr(clsobj, "_data_oriented", False): + raise TaichiSyntaxError(f"Please decorate class {clsobj.__name__} with @ti.data_oriented") + self._kernel_owner = kernel_owner + self._primal = wrapped_kernel_func._primal + self._adjoint = wrapped_kernel_func._adjoint + self._is_staticmethod = wrapped_kernel_func._is_staticmethod + self.__name__ = None + + def __call__(self, *args, **kwargs): + try: + if self._is_staticmethod: + return self._primal(*args, **kwargs) + return self._primal(self._kernel_owner, *args, **kwargs) + except (TaichiCompilationError, TaichiRuntimeError) as e: + if impl.get_runtime().print_full_traceback: + raise e + raise type(e)("\n" + str(e)) from None + + def grad(self, *args, **kwargs): + return self._adjoint(self._kernel_owner, *args, **kwargs) + + +def data_oriented(cls): + """Marks a class as Taichi compatible. + + To allow for modularized code, Taichi provides this decorator so that + Taichi kernels can be defined inside a class. + + See also https://docs.taichi-lang.org/docs/odop + + Example:: + + >>> @ti.data_oriented + >>> class TiArray: + >>> def __init__(self, n): + >>> self.x = ti.field(ti.f32, shape=n) + >>> + >>> @ti.kernel + >>> def inc(self): + >>> for i in self.x: + >>> self.x[i] += 1.0 + >>> + >>> a = TiArray(32) + >>> a.inc() + + Args: + cls (Class): the class to be decorated + + Returns: + The decorated class. + """ + + def _getattr(self, item): + method = cls.__dict__.get(item, None) + is_property = method.__class__ == property + is_staticmethod = method.__class__ == staticmethod + if is_property: + x = method.fget + else: + x = super(cls, self).__getattribute__(item) + if hasattr(x, "_is_wrapped_kernel"): + if inspect.ismethod(x): + wrapped = x.__func__ + else: + wrapped = x + wrapped._is_staticmethod = is_staticmethod + assert inspect.isfunction(wrapped) + if wrapped._is_classkernel: + ret = _BoundedDifferentiableMethod(self, wrapped) + ret.__name__ = wrapped.__name__ + if is_property: + return ret() + return ret + if is_property: + return x(self) + return x + + cls.__getattribute__ = _getattr + cls._data_oriented = True + + return cls + + +__all__ = ["data_oriented", "func", "kernel", "pyfunc", "real_func"] diff --git a/local_packages/taichi/lang/matrix.py b/local_packages/taichi/lang/matrix.py new file mode 100644 index 0000000..49a818c --- /dev/null +++ b/local_packages/taichi/lang/matrix.py @@ -0,0 +1,1875 @@ +import functools +import numbers +from collections.abc import Iterable +from itertools import product + +import numpy as np +from taichi._lib import core as ti_python_core +from taichi.lang import expr, impl +from taichi.lang import ops as ops_mod +from taichi.lang import runtime_ops +from taichi.lang._ndarray import Ndarray, NdarrayHostAccess +from taichi.lang.common_ops import TaichiOperations +from taichi.lang.enums import Layout +from taichi.lang.exception import ( + TaichiRuntimeError, + TaichiRuntimeTypeError, + TaichiSyntaxError, + TaichiTypeError, +) +from taichi.lang.field import Field, ScalarField, SNodeHostAccess +from taichi.lang.util import ( + cook_dtype, + get_traceback, + in_python_scope, + python_scope, + taichi_scope, + to_numpy_type, + to_paddle_type, + to_pytorch_type, + warning, +) +from taichi.types import primitive_types +from taichi.types.compound_types import CompoundType +from taichi.types.utils import is_signed +from taichi._lib.utils import ti_python_core as _ti_python_core + +_type_factory = _ti_python_core.get_type_factory_instance() + + +def _generate_swizzle_patterns(key_group: str, required_length=4): + """Generate vector swizzle patterns from a given set of characters. + + Example: + + For `key_group=xyzw` and `required_length=4`, this function will return a + list consists of all possible strings (no repeats) in characters + `x`, `y`, `z`, `w` and of length<=4: + [`x`, `y`, `z`, `w`, `xx`, `xy`, `yx`, ..., `xxxx`, `xxxy`, `xyzw`, ...] + The length of the list will be 4 + 4x4 + 4x4x4 + 4x4x4x4 = 340. + """ + result = [] + for k in range(1, required_length + 1): + result.extend(product(key_group, repeat=k)) + result = ["".join(pat) for pat in result] + return result + + +def _gen_swizzles(cls): + # https://www.khronos.org/opengl/wiki/Data_Type_(GLSL)#Swizzling + KEYGROUP_SET = ["xyzw", "rgba", "stpq"] + cls._swizzle_to_keygroup = {} + cls._keygroup_to_checker = {} + + def make_valid_attribs_checker(key_group): + def check(instance, pattern): + valid_attribs = set(key_group[: instance.n]) + pattern_set = set(pattern) + diff = pattern_set - valid_attribs + if len(diff): + valid_attribs = tuple(sorted(valid_attribs)) + pattern = tuple(pattern) + raise TaichiSyntaxError(f"vec{instance.n} only has " f"attributes={valid_attribs}, got={pattern}") + + return check + + for key_group in KEYGROUP_SET: + cls._keygroup_to_checker[key_group] = make_valid_attribs_checker(key_group) + for index, attr in enumerate(key_group): + + def gen_property(attr, attr_idx, key_group): + checker = cls._keygroup_to_checker[key_group] + + def prop_getter(instance): + checker(instance, attr) + return instance[attr_idx] + + @python_scope + def prop_setter(instance, value): + checker(instance, attr) + instance[attr_idx] = value + + return property(prop_getter, prop_setter) + + prop = gen_property(attr, index, key_group) + setattr(cls, attr, prop) + cls._swizzle_to_keygroup[attr] = key_group + + for key_group in KEYGROUP_SET: + sw_patterns = _generate_swizzle_patterns(key_group, required_length=4) + # len=1 accessors are handled specially above + sw_patterns = filter(lambda p: len(p) > 1, sw_patterns) + for prop_key in sw_patterns: + # Create a function for value capturing + def gen_property(pattern, key_group): + checker = cls._keygroup_to_checker[key_group] + + def prop_getter(instance): + checker(instance, pattern) + res = [] + for ch in pattern: + res.append(instance[key_group.index(ch)]) + return Vector(res) + + @python_scope + def prop_setter(instance, value): + if len(pattern) != len(value): + raise TaichiRuntimeError(f"value len does not match the swizzle pattern={pattern}") + checker(instance, pattern) + for ch, val in zip(pattern, value): + instance[key_group.index(ch)] = val + + prop = property(prop_getter, prop_setter) + return prop + + prop = gen_property(prop_key, key_group) + setattr(cls, prop_key, prop) + cls._swizzle_to_keygroup[prop_key] = key_group + return cls + + +def _infer_entry_dt(entry): + if isinstance(entry, (int, np.integer)): + return impl.get_runtime().default_ip + if isinstance(entry, (float, np.floating)): + return impl.get_runtime().default_fp + if isinstance(entry, expr.Expr): + dt = entry.ptr.get_rvalue_type() + if dt == ti_python_core.DataType_unknown: + raise TaichiTypeError("Element type of the matrix cannot be inferred. Please set dt instead for now.") + return dt + raise TaichiTypeError("Element type of the matrix is invalid.") + + +def _infer_array_dt(arr): + assert len(arr) > 0 + return functools.reduce(ti_python_core.promoted_type, map(_infer_entry_dt, arr)) + + +def make_matrix_with_shape(arr, shape, dt): + return expr.Expr( + impl.get_runtime() + .compiling_callable.ast_builder() + .make_matrix_expr( + shape, + dt, + [expr.Expr(elt).ptr for elt in arr], + ti_python_core.DebugInfo(impl.get_runtime().get_current_src_info()), + ) + ) + + +def make_matrix(arr, dt=None): + if len(arr) == 0: + # the only usage of an empty vector is to serve as field indices + shape = [0] + dt = primitive_types.i32 + else: + if isinstance(arr[0], Iterable): # matrix + shape = [len(arr), len(arr[0])] + arr = [elt for row in arr for elt in row] + else: # vector + shape = [len(arr)] + if dt is None: + dt = _infer_array_dt(arr) + else: + dt = cook_dtype(dt) + return expr.Expr( + impl.get_runtime() + .compiling_callable.ast_builder() + .make_matrix_expr( + shape, + dt, + [expr.Expr(elt).ptr for elt in arr], + ti_python_core.DebugInfo(impl.get_runtime().get_current_src_info()), + ) + ) + + +def _read_host_access(x): + if isinstance(x, SNodeHostAccess): + return x.accessor.getter(*x.key) + assert isinstance(x, NdarrayHostAccess) + return x.getter() + + +def _write_host_access(x, value): + if isinstance(x, SNodeHostAccess): + x.accessor.setter(value, *x.key) + else: + assert isinstance(x, NdarrayHostAccess) + x.setter(value) + + +@_gen_swizzles +class Matrix(TaichiOperations): + """The matrix class. + + A matrix is a 2-D rectangular array with scalar entries, it's row-majored, and is + aligned continuously. We recommend only use matrix with no more than 32 elements for + efficiency considerations. + + Note: in taichi a matrix is strictly two-dimensional and only stores scalars. + + Args: + arr (Union[list, tuple, np.ndarray]): the initial values of a matrix. + dt (:mod:`~taichi.types.primitive_types`): the element data type. + ndim (int optional): the number of dimensions of the matrix; forced reshape if given. + + Example:: + + use a 2d list to initialize a matrix + + >>> @ti.kernel + >>> def test(): + >>> n = 5 + >>> M = ti.Matrix([[0] * n for _ in range(n)], ti.i32) + >>> print(M) # a 5x5 matrix with integer elements + + get the number of rows and columns via the `n`, `m` property: + + >>> M = ti.Matrix([[0, 1], [2, 3], [4, 5]], ti.i32) + >>> M.n # number of rows + 3 + >>> M.m # number of cols + >>> 2 + + you can even initialize a matrix with an empty list: + + >>> M = ti.Matrix([[], []], ti.i32) + >>> M.n + 2 + >>> M.m + 0 + """ + + _is_taichi_class = True + _is_matrix_class = True + __array_priority__ = 1000 + + def __init__(self, arr, dt=None): + if not isinstance(arr, (list, tuple, np.ndarray)): + raise TaichiTypeError("An Matrix/Vector can only be initialized with an array-like object") + if len(arr) == 0: + self.ndim = 0 + self.n, self.m = 0, 0 + self.entries = np.array([]) + self.is_host_access = False + elif isinstance(arr[0], Matrix): + raise Exception("cols/rows required when using list of vectors") + elif isinstance(arr[0], Iterable): # matrix + self.ndim = 2 + self.n, self.m = len(arr), len(arr[0]) + if isinstance(arr[0][0], (SNodeHostAccess, NdarrayHostAccess)): + self.entries = arr + self.is_host_access = True + else: + self.entries = np.array(arr, None if dt is None else to_numpy_type(dt)) + self.is_host_access = False + else: # vector + self.ndim = 1 + self.n, self.m = len(arr), 1 + if isinstance(arr[0], (SNodeHostAccess, NdarrayHostAccess)): + self.entries = arr + self.is_host_access = True + else: + self.entries = np.array(arr, None if dt is None else to_numpy_type(dt)) + self.is_host_access = False + + if self.n * self.m > 32: + warning( + f"Taichi matrices/vectors with {self.n}x{self.m} > 32 entries are not suggested." + " Matrices/vectors will be automatically unrolled at compile-time for performance." + " So the compilation time could be extremely long if the matrix size is too big." + " You may use a field to store a large matrix like this, e.g.:\n" + f" x = ti.field(ti.f32, ({self.n}, {self.m})).\n" + " See https://docs.taichi-lang.org/docs/field#matrix-size" + " for more details.", + UserWarning, + stacklevel=2, + ) + + def get_shape(self): + if self.ndim == 1: + return (self.n,) + if self.ndim == 2: + return (self.n, self.m) + return None + + def __matmul__(self, other): + """Matrix-matrix or matrix-vector multiply. + + Args: + other (Union[Matrix, Vector]): a matrix or a vector. + + Returns: + The matrix-matrix product or matrix-vector product. + + """ + from taichi.lang import matrix_ops # pylint: disable=C0415 + + return matrix_ops.matmul(self, other) + + # host access & python scope operation + def __len__(self): + """Get the length of each row of a matrix""" + # TODO: When this is a vector, should return its dimension? + return self.n + + def __iter__(self): + if self.ndim == 1: + return (self[i] for i in range(self.n)) + return ([self[i, j] for j in range(self.m)] for i in range(self.n)) + + def __getitem__(self, indices): + """Access to the element at the given indices in a matrix. + + Args: + indices (Sequence[Expr]): the indices of the element. + + Returns: + The value of the element at a specific position of a matrix. + + """ + entry = self._get_entry(indices) + if self.is_host_access: + return _read_host_access(entry) + return entry + + @python_scope + def __setitem__(self, indices, item): + """Set the element value at the given indices in a matrix. + + Args: + indices (Sequence[Expr]): the indices of a element. + + """ + if self.is_host_access: + entry = self._get_entry(indices) + _write_host_access(entry, item) + else: + if not isinstance(indices, (list, tuple)): + indices = [indices] + assert len(indices) in [1, 2] + assert len(indices) == self.ndim, f"Expected {self.ndim} indices, got {len(indices)}" + if self.ndim == 1: + self.entries[indices[0]] = item + else: + self.entries[indices[0]][indices[1]] = item + + def _get_entry(self, indices): + if not isinstance(indices, (list, tuple)): + indices = [indices] + assert len(indices) in [1, 2] + assert len(indices) == self.ndim, f"Expected {self.ndim} indices, got {len(indices)}" + if self.ndim == 1: + return self.entries[indices[0]] + return self.entries[indices[0]][indices[1]] + + def _get_slice(self, a, b): + if isinstance(a, slice): + a = range(a.start or 0, a.stop or self.n, a.step or 1) + if isinstance(b, slice): + b = range(b.start or 0, b.stop or self.m, b.step or 1) + if isinstance(a, range) and isinstance(b, range): + return Matrix([[self._get_entry(i, j) for j in b] for i in a]) + if isinstance(a, range): # b is not range + return Vector([self._get_entry(i, b) for i in a]) + # a is not range while b is range + return Vector([self._get_entry(a, j) for j in b]) + + @python_scope + def _set_entries(self, value): + if isinstance(value, Matrix): + value = value.to_list() + if self.is_host_access: + if self.ndim == 1: + for i in range(self.n): + _write_host_access(self.entries[i], value[i]) + else: + for i in range(self.n): + for j in range(self.m): + _write_host_access(self.entries[i][j], value[i][j]) + else: + if self.ndim == 1: + for i in range(self.n): + self.entries[i] = value[i] + else: + for i in range(self.n): + for j in range(self.m): + self.entries[i][j] = value[i][j] + + @property + def _members(self): + return self.entries + + def to_list(self): + """Return this matrix as a 1D `list`. + + This is similar to `numpy.ndarray`'s `flatten` and `ravel` methods, + the difference is that this function always returns a new list. + """ + if self.is_host_access: + if self.ndim == 1: + return [_read_host_access(self.entries[i]) for i in range(self.n)] + assert self.ndim == 2 + return [[_read_host_access(self.entries[i][j]) for j in range(self.m)] for i in range(self.n)] + return self.entries.tolist() + + @taichi_scope + def cast(self, dtype): + """Cast the matrix elements to a specified data type. + + Args: + dtype (:mod:`~taichi.types.primitive_types`): data type of the + returned matrix. + + Returns: + :class:`taichi.Matrix`: A new matrix with the specified data dtype. + + Example:: + + >>> A = ti.Matrix([0, 1, 2], ti.i32) + >>> B = A.cast(ti.f32) + >>> B + [0.0, 1.0, 2.0] + """ + if self.ndim == 1: + return Vector([ops_mod.cast(self[i], dtype) for i in range(self.n)]) + return Matrix([[ops_mod.cast(self[i, j], dtype) for j in range(self.m)] for i in range(self.n)]) + + def trace(self): + """The sum of a matrix diagonal elements. + + To call this method the matrix must be square-like. + + Returns: + The sum of a matrix diagonal elements. + + Example:: + + >>> m = ti.Matrix([[1, 2], [3, 4]]) + >>> m.trace() + 5 + """ + # pylint: disable-msg=C0415 + from taichi.lang import matrix_ops + + return matrix_ops.trace(self) + + def inverse(self): + """Returns the inverse of this matrix. + + Note: + The matrix dimension should be less than or equal to 4. + + Returns: + :class:`~taichi.Matrix`: The inverse of a matrix. + + Raises: + Exception: Inversions of matrices with sizes >= 5 are not supported. + """ + from taichi.lang import matrix_ops # pylint: disable=C0415 + + return matrix_ops.inverse(self) + + def normalized(self, eps=0): + """Normalize a vector, i.e. matrices with the second dimension being + equal to one. + + The normalization of a vector `v` is a vector of length 1 + and has the same direction with `v`. It's equal to `v/|v|`. + + Args: + eps (float): a safe-guard value for sqrt, usually 0. + + Example:: + + >>> a = ti.Vector([3, 4], ti.f32) + >>> a.normalized() + [0.6, 0.8] + """ + # pylint: disable-msg=C0415 + from taichi.lang import matrix_ops + + return matrix_ops.normalized(self, eps) + + def transpose(self): + """Returns the transpose of a matrix. + + Returns: + :class:`~taichi.Matrix`: The transpose of this matrix. + + Example:: + + >>> A = ti.Matrix([[0, 1], [2, 3]]) + >>> A.transpose() + [[0, 2], [1, 3]] + """ + # pylint: disable=C0415 + from taichi.lang import matrix_ops + + return matrix_ops.transpose(self) + + @taichi_scope + def determinant(a): + """Returns the determinant of this matrix. + + Note: + The matrix dimension should be less than or equal to 4. + + Returns: + dtype: The determinant of this matrix. + + Raises: + Exception: Determinants of matrices with sizes >= 5 are not supported. + """ + # pylint: disable=C0415 + from taichi.lang import matrix_ops + + return matrix_ops.determinant(a) + + @staticmethod + def diag(dim, val): + """Returns a diagonal square matrix with the diagonals filled + with `val`. + + Args: + dim (int): the dimension of the wanted square matrix. + val (TypeVar): value for the diagonal elements. + + Returns: + :class:`~taichi.Matrix`: The wanted diagonal matrix. + + Example:: + + >>> m = ti.Matrix.diag(3, 1) + [[1, 0, 0], + [0, 1, 0], + [0, 0, 1]] + """ + # pylint: disable=C0415 + from taichi.lang import matrix_ops + + return matrix_ops.diag(dim, val) + + def sum(self): + """Return the sum of all elements. + + Example:: + + >>> m = ti.Matrix([[1, 2], [3, 4]]) + >>> m.sum() + 10 + """ + # pylint: disable=C0415 + from taichi.lang import matrix_ops + + return matrix_ops.sum(self) + + def norm(self, eps=0): + """Returns the square root of the sum of the absolute squares + of its elements. + + Args: + eps (Number): a safe-guard value for sqrt, usually 0. + + Example:: + + >>> a = ti.Vector([3, 4]) + >>> a.norm() + 5 + + Returns: + The square root of the sum of the absolute squares of its elements. + """ + # pylint: disable=C0415 + from taichi.lang import matrix_ops + + return matrix_ops.norm(self, eps=eps) + + def norm_inv(self, eps=0): + """The inverse of the matrix :func:`~taichi.lang.matrix.Matrix.norm`. + + Args: + eps (float): a safe-guard value for sqrt, usually 0. + + Returns: + The inverse of the matrix/vector `norm`. + """ + # pylint: disable=C0415 + from taichi.lang import matrix_ops + + return matrix_ops.norm_inv(self, eps=eps) + + def norm_sqr(self): + """Returns the sum of the absolute squares of its elements.""" + # pylint: disable=C0415 + from taichi.lang import matrix_ops + + return matrix_ops.norm_sqr(self) + + def max(self): + """Returns the maximum element value.""" + # pylint: disable=C0415 + from taichi.lang import matrix_ops + + return matrix_ops.max(self) + + def min(self): + """Returns the minimum element value.""" + # pylint: disable=C0415 + from taichi.lang import matrix_ops + + return matrix_ops.min(self) + + def any(self): + """Test whether any element not equal zero. + + Returns: + bool: `True` if any element is not equal zero, `False` otherwise. + + Example:: + + >>> v = ti.Vector([0, 0, 1]) + >>> v.any() + True + """ + # pylint: disable=C0415 + from taichi.lang import matrix_ops + + return matrix_ops.any(self) + + def all(self): + """Test whether all element not equal zero. + + Returns: + bool: `True` if all elements are not equal zero, `False` otherwise. + + Example:: + + >>> v = ti.Vector([0, 0, 1]) + >>> v.all() + False + """ + # pylint: disable=C0415 + from taichi.lang import matrix_ops + + return matrix_ops.all(self) + + def fill(self, val): + """Fills the matrix with a specified value. + + Args: + val (Union[int, float]): Value to fill. + + Example:: + + >>> A = ti.Matrix([0, 1, 2, 3]) + >>> A.fill(-1) + >>> A + [-1, -1, -1, -1] + """ + # pylint: disable=C0415 + from taichi.lang import matrix_ops + + return matrix_ops.fill(self, val) + + def to_numpy(self): + """Converts this matrix to a numpy array. + + Returns: + numpy.ndarray: The result numpy array. + + Example:: + + >>> A = ti.Matrix([[0], [1], [2], [3]]) + >>> A.to_numpy() + >>> A + array([[0], [1], [2], [3]]) + """ + if self.is_host_access: + return np.array(self.to_list()) + return self.entries + + @taichi_scope + def __ti_repr__(self): + yield "[" + for i in range(self.n): + if i: + yield ", " + if self.m != 1: + yield "[" + for j in range(self.m): + if j: + yield ", " + yield self(i, j) + if self.m != 1: + yield "]" + yield "]" + + def __str__(self): + """Python scope matrix print support.""" + if impl.inside_kernel(): + """ + It seems that when pybind11 got an type mismatch, it will try + to invoke `repr` to show the object... e.g.: + + TypeError: make_const_expr_f32(): incompatible function arguments. The following argument types are supported: + 1. (arg0: float) -> taichi_python.Expr + + Invoked with: + + So we have to make it happy with a dummy string... + """ + return f"<{self.n}x{self.m} ti.Matrix>" + return str(self.to_numpy()) + + def __repr__(self): + return str(self.to_numpy()) + + @staticmethod + @taichi_scope + def zero(dt, n, m=None): + """Constructs a Matrix filled with zeros. + + Args: + dt (DataType): The desired data type. + n (int): The first dimension (row) of the matrix. + m (int, optional): The second dimension (column) of the matrix. + + Returns: + :class:`~taichi.lang.matrix.Matrix`: A :class:`~taichi.lang.matrix.Matrix` instance filled with zeros. + + """ + from taichi.lang import matrix_ops # pylint: disable=C0415 + + if m is None: + return matrix_ops._filled_vector(n, dt, 0) + return matrix_ops._filled_matrix(n, m, dt, 0) + + @staticmethod + @taichi_scope + def one(dt, n, m=None): + """Constructs a Matrix filled with ones. + + Args: + dt (DataType): The desired data type. + n (int): The first dimension (row) of the matrix. + m (int, optional): The second dimension (column) of the matrix. + + Returns: + :class:`~taichi.lang.matrix.Matrix`: A :class:`~taichi.lang.matrix.Matrix` instance filled with ones. + + """ + from taichi.lang import matrix_ops # pylint: disable=C0415 + + if m is None: + return matrix_ops._filled_vector(n, dt, 1) + return matrix_ops._filled_matrix(n, m, dt, 1) + + @staticmethod + @taichi_scope + def unit(n, i, dt=None): + """Constructs a n-D vector with the `i`-th entry being equal to one and + the remaining entries are all zeros. + + Args: + n (int): The length of the vector. + i (int): The index of the entry that will be filled with one. + dt (:mod:`~taichi.types.primitive_types`, optional): The desired data type. + + Returns: + :class:`~taichi.Matrix`: The returned vector. + + Example:: + + >>> A = ti.Matrix.unit(3, 1) + >>> A + [0, 1, 0] + """ + from taichi.lang import matrix_ops # pylint: disable=C0415 + + if dt is None: + dt = int + assert 0 <= i < n + return matrix_ops._unit_vector(n, i, dt) + + @staticmethod + @taichi_scope + def identity(dt, n): + """Constructs an identity Matrix with shape (n, n). + + Args: + dt (DataType): The desired data type. + n (int): The number of rows/columns. + + Returns: + :class:`~taichi.Matrix`: An `n x n` identity matrix. + """ + from taichi.lang import matrix_ops # pylint: disable=C0415 + + return matrix_ops._identity_matrix(n, dt) + + @classmethod + @python_scope + def field( + cls, + n, + m, + dtype, + shape=None, + order=None, + name="", + offset=None, + needs_grad=False, + needs_dual=False, + layout=Layout.AOS, + ndim=None, + ): + """Construct a data container to hold all elements of the Matrix. + + Args: + n (int): The desired number of rows of the Matrix. + m (int): The desired number of columns of the Matrix. + dtype (DataType, optional): The desired data type of the Matrix. + shape (Union[int, tuple of int], optional): The desired shape of the Matrix. + order (str, optional): order of the shape laid out in memory. + name (string, optional): The custom name of the field. + offset (Union[int, tuple of int], optional): The coordinate offset + of all elements in a field. + needs_grad (bool, optional): Whether the Matrix need grad field (reverse mode autodiff). + needs_dual (bool, optional): Whether the Matrix need dual field (forward mode autodiff). + layout (Layout, optional): The field layout, either Array Of + Structure (AOS) or Structure Of Array (SOA). + + Returns: + :class:`~taichi.Matrix`: A matrix. + """ + entries = [] + element_dim = ndim if ndim is not None else 2 + if isinstance(dtype, (list, tuple, np.ndarray)): + # set different dtype for each element in Matrix + # see #2135 + if m == 1: + assert ( + len(np.shape(dtype)) == 1 and len(dtype) == n + ), f"Please set correct dtype list for Vector. The shape of dtype list should be ({n}, ) instead of {np.shape(dtype)}" + for i in range(n): + entries.append( + impl.create_field_member( + dtype[i], + name=name, + needs_grad=needs_grad, + needs_dual=needs_dual, + ) + ) + else: + assert ( + len(np.shape(dtype)) == 2 and len(dtype) == n and len(dtype[0]) == m + ), f"Please set correct dtype list for Matrix. The shape of dtype list should be ({n}, {m}) instead of {np.shape(dtype)}" + for i in range(n): + for j in range(m): + entries.append( + impl.create_field_member( + dtype[i][j], + name=name, + needs_grad=needs_grad, + needs_dual=needs_dual, + ) + ) + else: + for _ in range(n * m): + entries.append(impl.create_field_member(dtype, name=name, needs_grad=needs_grad, needs_dual=needs_dual)) + entries, entries_grad, entries_dual = zip(*entries) + + entries = MatrixField(entries, n, m, element_dim) + if all(entries_grad): + entries_grad = MatrixField(entries_grad, n, m, element_dim) + entries._set_grad(entries_grad) + if all(entries_dual): + entries_dual = MatrixField(entries_dual, n, m, element_dim) + entries._set_dual(entries_dual) + + impl.get_runtime().matrix_fields.append(entries) + + if shape is None: + if offset is not None: + raise TaichiSyntaxError("shape cannot be None when offset is set") + if order is not None: + raise TaichiSyntaxError("shape cannot be None when order is set") + else: + if isinstance(shape, numbers.Number): + shape = (shape,) + if isinstance(offset, numbers.Number): + offset = (offset,) + dim = len(shape) + if offset is not None and dim != len(offset): + raise TaichiSyntaxError( + f"The dimensionality of shape and offset must be the same ({dim} != {len(offset)})" + ) + axis_seq = [] + shape_seq = [] + if order is not None: + if dim != len(order): + raise TaichiSyntaxError( + f"The dimensionality of shape and order must be the same ({dim} != {len(order)})" + ) + if dim != len(set(order)): + raise TaichiSyntaxError("The axes in order must be different") + for ch in order: + axis = ord(ch) - ord("i") + if axis < 0 or axis >= dim: + raise TaichiSyntaxError(f"Invalid axis {ch}") + axis_seq.append(axis) + shape_seq.append(shape[axis]) + else: + axis_seq = list(range(dim)) + shape_seq = list(shape) + same_level = order is None + if layout == Layout.SOA: + for e in entries._get_field_members(): + impl._create_snode(axis_seq, shape_seq, same_level).place(ScalarField(e), offset=offset) + if needs_grad: + for e in entries_grad._get_field_members(): + impl._create_snode(axis_seq, shape_seq, same_level).place(ScalarField(e), offset=offset) + if needs_dual: + for e in entries_dual._get_field_members(): + impl._create_snode(axis_seq, shape_seq, same_level).place(ScalarField(e), offset=offset) + else: + impl._create_snode(axis_seq, shape_seq, same_level).place(entries, offset=offset) + if needs_grad: + impl._create_snode(axis_seq, shape_seq, same_level).place(entries_grad, offset=offset) + if needs_dual: + impl._create_snode(axis_seq, shape_seq, same_level).place(entries_dual, offset=offset) + return entries + + @classmethod + @python_scope + def ndarray(cls, n, m, dtype, shape): + """Defines a Taichi ndarray with matrix elements. + This function must be called in Python scope, and after `ti.init` is called. + + Args: + n (int): Number of rows of the matrix. + m (int): Number of columns of the matrix. + dtype (DataType): Data type of each value. + shape (Union[int, tuple[int]]): Shape of the ndarray. + + Example:: + + The code below shows how a Taichi ndarray with matrix elements \ + can be declared and defined:: + + >>> x = ti.Matrix.ndarray(4, 5, ti.f32, shape=(16, 8)) + """ + if isinstance(shape, numbers.Number): + shape = (shape,) + return MatrixNdarray(n, m, dtype, shape) + + @staticmethod + def rows(rows): + """Constructs a matrix by concatenating a list of + vectors/lists row by row. Must be called in Taichi scope. + + Args: + rows (List): A list of Vector (1-D Matrix) or a list of list. + + Returns: + :class:`~taichi.Matrix`: A matrix. + + Example:: + + >>> @ti.kernel + >>> def test(): + >>> v1 = ti.Vector([1, 2, 3]) + >>> v2 = ti.Vector([4, 5, 6]) + >>> m = ti.Matrix.rows([v1, v2]) + >>> print(m) + >>> + >>> test() + [[1, 2, 3], [4, 5, 6]] + """ + from taichi.lang import matrix_ops # pylint: disable=C0415 + + return matrix_ops.rows(rows) + + @staticmethod + def cols(cols): + """Constructs a Matrix instance by concatenating Vectors/lists column by column. + + Args: + cols (List): A list of Vector (1-D Matrix) or a list of list. + + Returns: + :class:`~taichi.Matrix`: A matrix. + + Example:: + + >>> @ti.kernel + >>> def test(): + >>> v1 = ti.Vector([1, 2, 3]) + >>> v2 = ti.Vector([4, 5, 6]) + >>> m = ti.Matrix.cols([v1, v2]) + >>> print(m) + >>> + >>> test() + [[1, 4], [2, 5], [3, 6]] + """ + from taichi.lang import matrix_ops # pylint: disable=C0415 + + return matrix_ops.cols(cols) + + def __hash__(self): + # TODO: refactor KernelTemplateMapper + # If not, we get `unhashable type: Matrix` when + # using matrices as template arguments. + return id(self) + + def dot(self, other): + """Performs the dot product of two vectors. + + To call this method, both multiplicatives must be vectors. + + Args: + other (:class:`~taichi.Matrix`): The input Vector. + + Returns: + DataType: The dot product result (scalar) of the two Vectors. + + Example:: + + >>> v1 = ti.Vector([1, 2, 3]) + >>> v2 = ti.Vector([3, 4, 5]) + >>> v1.dot(v2) + 26 + """ + from taichi.lang import matrix_ops # pylint: disable=C0415 + + return matrix_ops.dot(self, other) + + def cross(self, other): + """Performs the cross product with the input vector (1-D Matrix). + + Both two vectors must have the same dimension <= 3. + + For two 2d vectors (x1, y1) and (x2, y2), the return value is the + scalar `x1*y2 - x2*y1`. + + For two 3d vectors `v` and `w`, the return value is the 3d vector + `v x w`. + + Args: + other (:class:`~taichi.Matrix`): The input Vector. + + Returns: + :class:`~taichi.Matrix`: The cross product of the two Vectors. + """ + from taichi.lang import matrix_ops # pylint: disable=C0415 + + return matrix_ops.cross(self, other) + + def outer_product(self, other): + """Performs the outer product with the input Vector (1-D Matrix). + + The outer_product of two vectors `v = (x1, x2, ..., xn)`, + `w = (y1, y2, ..., yn)` is a `n` times `n` square matrix, and its `(i, j)` + entry is equal to `xi*yj`. + + Args: + other (:class:`~taichi.Matrix`): The input Vector. + + Returns: + :class:`~taichi.Matrix`: The outer product of the two Vectors. + """ + from taichi.lang import matrix_ops # pylint: disable=C0415 + + return matrix_ops.outer_product(self, other) + + +class Vector(Matrix): + def __init__(self, arr, dt=None, **kwargs): + """Constructs a vector from given array. + + A vector is an instance of a 2-D matrix with the second dimension being equal to 1. + + Args: + arr (Union[list, tuple, np.ndarray]): The initial values of the Vector. + dt (:mod:`~taichi.types.primitive_types`): data type of the vector. + + Returns: + :class:`~taichi.Matrix`: A vector instance. + Example:: + >>> u = ti.Vector([1, 2]) + >>> print(u.m, u.n) # verify a vector is a matrix of shape (n, 1) + 2 1 + >>> v = ti.Vector([3, 4]) + >>> u + v + [4 6] + """ + super().__init__(arr, dt=dt, **kwargs) + + def get_shape(self): + return (self.n,) + + @classmethod + def field(cls, n, dtype, *args, **kwargs): + """ti.Vector.field""" + ndim = kwargs.get("ndim", 1) + assert ndim == 1 + kwargs["ndim"] = 1 + return super().field(n, 1, dtype, *args, **kwargs) + + @classmethod + @python_scope + def ndarray(cls, n, dtype, shape): + """Defines a Taichi ndarray with vector elements. + + Args: + n (int): Size of the vector. + dtype (DataType): Data type of each value. + shape (Union[int, tuple[int]]): Shape of the ndarray. + + Example: + The code below shows how a Taichi ndarray with vector elements can be declared and defined:: + + >>> x = ti.Vector.ndarray(3, ti.f32, shape=(16, 8)) + """ + if isinstance(shape, numbers.Number): + shape = (shape,) + return VectorNdarray(n, dtype, shape) + + +class MatrixField(Field): + """Taichi matrix field with SNode implementation. + + Args: + vars (List[Expr]): Field members. + n (Int): Number of rows. + m (Int): Number of columns. + ndim (Int): Number of dimensions; forced reshape if given. + """ + + def __init__(self, _vars, n, m, ndim=2): + assert len(_vars) == n * m + assert ndim in (0, 1, 2) + super().__init__(_vars) + self.n = n + self.m = m + self.ndim = ndim + self.ptr = ti_python_core.expr_matrix_field([var.ptr for var in self.vars], [n, m][:ndim]) + + def get_scalar_field(self, *indices): + """Creates a ScalarField using a specific field member. + + Args: + indices (Tuple[Int]): Specified indices of the field member. + + Returns: + ScalarField: The result ScalarField. + """ + assert len(indices) in [1, 2] + i = indices[0] + j = 0 if len(indices) == 1 else indices[1] + return ScalarField(self.vars[i * self.m + j]) + + def _get_dynamic_index_stride(self): + if self.ptr.get_dynamic_indexable(): + return self.ptr.get_dynamic_index_stride() + return None + + def _calc_dynamic_index_stride(self): + # Algorithm: https://github.com/taichi-dev/taichi/issues/3810 + paths = [ScalarField(var).snode._path_from_root() for var in self.vars] + num_members = len(paths) + if num_members == 1: + self.ptr.set_dynamic_index_stride(0) + return + length = len(paths[0]) + if any(len(path) != length or ti_python_core.is_quant(path[length - 1]._dtype) for path in paths): + return + for i in range(length): + if any(path[i] != paths[0][i] for path in paths): + depth_below_lca = i + break + for i in range(depth_below_lca, length - 1): + if any( + path[i].ptr.type != ti_python_core.SNodeType.dense + or path[i]._cell_size_bytes != paths[0][i]._cell_size_bytes + or path[i + 1]._offset_bytes_in_parent_cell != paths[0][i + 1]._offset_bytes_in_parent_cell + for path in paths + ): + return + stride = ( + paths[1][depth_below_lca]._offset_bytes_in_parent_cell + - paths[0][depth_below_lca]._offset_bytes_in_parent_cell + ) + for i in range(2, num_members): + if ( + stride + != paths[i][depth_below_lca]._offset_bytes_in_parent_cell + - paths[i - 1][depth_below_lca]._offset_bytes_in_parent_cell + ): + return + self.ptr.set_dynamic_index_stride(stride) + + def fill(self, val): + """Fills this matrix field with specified values. + + Args: + val (Union[Number, Expr, List, Tuple, Matrix]): Values to fill, + should have consistent dimension consistent with `self`. + """ + if isinstance(val, numbers.Number) or (isinstance(val, expr.Expr) and not val.is_tensor()): + if self.ndim == 2: + val = tuple(tuple(val for _ in range(self.m)) for _ in range(self.n)) + else: + assert self.ndim == 1 + val = tuple(val for _ in range(self.n)) + elif isinstance(val, expr.Expr) and val.is_tensor(): + assert val.n == self.n + if self.ndim != 1: + assert val.m == self.m + else: + if isinstance(val, Matrix): + val = val.to_list() + assert isinstance(val, (list, tuple)) + val = tuple(tuple(x) if isinstance(x, list) else x for x in val) + assert len(val) == self.n + if self.ndim != 1: + assert len(val[0]) == self.m + if in_python_scope(): + from taichi._kernels import field_fill_python_scope # pylint: disable=C0415 + + field_fill_python_scope(self, val) + else: + from taichi._funcs import field_fill_taichi_scope # pylint: disable=C0415 + + field_fill_taichi_scope(self, val) + + @python_scope + def to_numpy(self, keep_dims=False, dtype=None): + """Converts the field instance to a NumPy array. + + Args: + keep_dims (bool, optional): Whether to keep the dimension after conversion. + When keep_dims=True, on an n-D matrix field, the numpy array always has n+2 dims, even for 1x1, 1xn, nx1 matrix fields. + When keep_dims=False, the resulting numpy array should skip the matrix dims with size 1. + For example, a 4x1 or 1x4 matrix field with 5x6x7 elements results in an array of shape 5x6x7x4. + dtype (DataType, optional): The desired data type of returned numpy array. + + Returns: + numpy.ndarray: The result NumPy array. + """ + if dtype is None: + dtype = to_numpy_type(self.dtype) + as_vector = self.m == 1 and not keep_dims + shape_ext = (self.n,) if as_vector else (self.n, self.m) + arr = np.zeros(self.shape + shape_ext, dtype=dtype) + from taichi._kernels import matrix_to_ext_arr # pylint: disable=C0415 + + matrix_to_ext_arr(self, arr, as_vector) + runtime_ops.sync() + return arr + + def to_torch(self, device=None, keep_dims=False): + """Converts the field instance to a PyTorch tensor. + + Args: + device (torch.device, optional): The desired device of returned tensor. + keep_dims (bool, optional): Whether to keep the dimension after conversion. + See :meth:`~taichi.lang.field.MatrixField.to_numpy` for more detailed explanation. + + Returns: + torch.tensor: The result torch tensor. + """ + import torch # pylint: disable=C0415 + + as_vector = self.m == 1 and not keep_dims + shape_ext = (self.n,) if as_vector else (self.n, self.m) + # pylint: disable=E1101 + arr = torch.empty(self.shape + shape_ext, dtype=to_pytorch_type(self.dtype), device=device) + from taichi._kernels import matrix_to_ext_arr # pylint: disable=C0415 + + matrix_to_ext_arr(self, arr, as_vector) + runtime_ops.sync() + return arr + + def to_paddle(self, place=None, keep_dims=False): + """Converts the field instance to a Paddle tensor. + + Args: + place (paddle.CPUPlace()/CUDAPlace(n), optional): The desired place of returned tensor. + keep_dims (bool, optional): Whether to keep the dimension after conversion. + See :meth:`~taichi.lang.field.MatrixField.to_numpy` for more detailed explanation. + + Returns: + paddle.Tensor: The result paddle tensor. + """ + import paddle # pylint: disable=C0415 + + as_vector = self.m == 1 and not keep_dims and self.ndim == 1 + shape_ext = (self.n,) if as_vector else (self.n, self.m) + # pylint: disable=E1101 + # paddle.empty() doesn't support argument `place`` + arr = paddle.to_tensor( + paddle.empty(self.shape + shape_ext, to_paddle_type(self.dtype)), + place=place, + ) + from taichi._kernels import matrix_to_ext_arr # pylint: disable=C0415 + + matrix_to_ext_arr(self, arr, as_vector) + runtime_ops.sync() + return arr + + @python_scope + def _from_external_arr(self, arr): + if len(arr.shape) == len(self.shape) + 1: + as_vector = True + assert self.m == 1, "This is not a vector field" + else: + as_vector = False + assert len(arr.shape) == len(self.shape) + 2 + dim_ext = 1 if as_vector else 2 + assert len(arr.shape) == len(self.shape) + dim_ext + from taichi._kernels import ext_arr_to_matrix # pylint: disable=C0415 + + ext_arr_to_matrix(arr, self, as_vector) + runtime_ops.sync() + + @python_scope + def from_numpy(self, arr): + """Copies an `numpy.ndarray` into this field. + + Example:: + + >>> m = ti.Matrix.field(2, 2, ti.f32, shape=(3, 3)) + >>> arr = numpy.ones((3, 3, 2, 2)) + >>> m.from_numpy(arr) + """ + + if not arr.flags.c_contiguous: + arr = np.ascontiguousarray(arr) + self._from_external_arr(arr) + + @python_scope + def __setitem__(self, key, value): + self._initialize_host_accessors() + self[key]._set_entries(value) + + @python_scope + def __getitem__(self, key): + self._initialize_host_accessors() + key = self._pad_key(key) + _host_access = self._host_access(key) + if self.ndim == 1: + return Vector([_host_access[i] for i in range(self.n)]) + return Matrix([[_host_access[i * self.m + j] for j in range(self.m)] for i in range(self.n)]) + + def __repr__(self): + # make interactive shell happy, prevent materialization + return f"<{self.n}x{self.m} ti.Matrix.field>" + + +class MatrixType(CompoundType): + def __init__(self, n, m, ndim, dtype): + self.n = n + self.m = m + self.ndim = ndim + # FIXME(haidong): dtypes should not be left empty for ndarray. + # Remove the None dtype when we are ready to break legacy code. + if dtype is not None: + self.dtype = cook_dtype(dtype) + shape = (n, m) if ndim == 2 else (n,) + self.tensor_type = _type_factory.get_tensor_type(shape, self.dtype) + else: + self.dtype = None + self.tensor_type = None + + def __call__(self, *args): + """Return a matrix matching the shape and dtype. + + This function will try to convert the input to a `n x m` matrix, with n, m being + the number of rows/cols of this matrix type. + + Example:: + + >>> mat4x3 = MatrixType(4, 3, float) + >>> mat2x6 = MatrixType(2, 6, float) + + Create from n x m scalars, of a 1d list of n x m scalars: + + >>> m = mat4x3([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]) + >>> m = mat4x3(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12) + + Create from n vectors/lists, with each one of dimension m: + + >>> m = mat4x3([1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]) + + Create from a single scalar + + >>> m = mat4x3(1) + + Create from another 2d list/matrix, as long as they have the same number of entries + + >>> m = mat4x3([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]) + >>> m = mat4x3(m) + >>> k = mat2x6(m) + + """ + if len(args) == 0: + raise TaichiSyntaxError("Custom type instances need to be created with an initial value.") + if len(args) == 1: + # Init from a real Matrix + if isinstance(args[0], expr.Expr) and args[0].ptr.is_tensor(): + arg = args[0] + shape = arg.ptr.get_rvalue_type().shape() + assert self.ndim == len(shape) + assert self.n == shape[0] + if self.ndim > 1: + assert self.m == shape[1] + return expr.Expr(arg.ptr) + + # initialize by a single scalar, e.g. matnxm(1) + if isinstance(args[0], (numbers.Number, expr.Expr)): + entries = [args[0] for _ in range(self.m) for _ in range(self.n)] + return self._instantiate(entries) + args = args[0] + # collect all input entries to a 1d list and then reshape + # this is mostly for glsl style like vec4(v.xyz, 1.) + entries = [] + for x in args: + if isinstance(x, (list, tuple)): + entries += x + elif isinstance(x, np.ndarray): + entries += list(x.ravel()) + elif isinstance(x, Matrix): + entries += x.to_list() + else: + entries.append(x) + + return self._instantiate(entries) + + def from_taichi_object(self, func_ret, ret_index=()): + return self( + [ + expr.Expr( + ti_python_core.make_get_element_expr( + func_ret.ptr, + ret_index + (i,), + _ti_python_core.DebugInfo(impl.get_runtime().get_current_src_info()), + ) + ) + for i in range(self.m * self.n) + ] + ) + + def from_kernel_struct_ret(self, launch_ctx, ret_index=()): + if self.dtype in primitive_types.integer_types: + if is_signed(cook_dtype(self.dtype)): + get_ret_func = launch_ctx.get_struct_ret_int + else: + get_ret_func = launch_ctx.get_struct_ret_uint + elif self.dtype in primitive_types.real_types: + get_ret_func = launch_ctx.get_struct_ret_float + else: + raise TaichiRuntimeTypeError(f"Invalid return type on index={ret_index}") + return self([get_ret_func(ret_index + (i,)) for i in range(self.m * self.n)]) + + def set_kernel_struct_args(self, mat, launch_ctx, ret_index=()): + if self.dtype in primitive_types.integer_types: + if is_signed(cook_dtype(self.dtype)): + set_arg_func = launch_ctx.set_struct_arg_int + else: + set_arg_func = launch_ctx.set_struct_arg_uint + elif self.dtype in primitive_types.real_types: + set_arg_func = launch_ctx.set_struct_arg_float + else: + raise TaichiRuntimeTypeError(f"Invalid return type on index={ret_index}") + if self.ndim == 1: + for i in range(self.n): + set_arg_func(ret_index + (i,), mat[i]) + else: + for i in range(self.n): + for j in range(self.m): + set_arg_func(ret_index + (i * self.m + j,), mat[i, j]) + + def set_argpack_struct_args(self, mat, argpack, ret_index=()): + if self.dtype in primitive_types.integer_types: + if is_signed(cook_dtype(self.dtype)): + set_arg_func = argpack.set_arg_int + else: + set_arg_func = argpack.set_arg_uint + elif self.dtype in primitive_types.real_types: + set_arg_func = argpack.set_arg_float + else: + raise TaichiRuntimeTypeError(f"Invalid return type on index={ret_index}") + if self.ndim == 1: + for i in range(self.n): + set_arg_func(ret_index + (i,), mat[i]) + else: + for i in range(self.n): + for j in range(self.m): + set_arg_func(ret_index + (i * self.m + j,), mat[i, j]) + + def _instantiate_in_python_scope(self, entries): + entries = [[entries[k * self.m + i] for i in range(self.m)] for k in range(self.n)] + return Matrix( + [ + [ + int(entries[i][j]) if self.dtype in primitive_types.integer_types else float(entries[i][j]) + for j in range(self.m) + ] + for i in range(self.n) + ], + dt=self.dtype, + ) + + def _instantiate(self, entries): + if in_python_scope(): + return self._instantiate_in_python_scope(entries) + + return make_matrix_with_shape(entries, [self.n, self.m], self.dtype) + + def field(self, **kwargs): + assert kwargs.get("ndim", self.ndim) == self.ndim + kwargs.update({"ndim": self.ndim}) + return Matrix.field(self.n, self.m, dtype=self.dtype, **kwargs) + + def ndarray(self, **kwargs): + assert kwargs.get("ndim", self.ndim) == self.ndim + kwargs.update({"ndim": self.ndim}) + return Matrix.ndarray(self.n, self.m, dtype=self.dtype, **kwargs) + + def get_shape(self): + if self.ndim == 1: + return (self.n,) + return (self.n, self.m) + + def to_string(self): + dtype_str = self.dtype.to_string() if self.dtype is not None else "" + return f"MatrixType[{self.n},{self.m}, {dtype_str}]" + + def check_matched(self, other): + if self.ndim != len(other.shape()): + return False + if self.dtype is not None and self.dtype != other.element_type(): + return False + shape = self.get_shape() + for i in range(self.ndim): + if shape[i] is not None and shape[i] != other.shape()[i]: + return False + return True + + +class VectorType(MatrixType): + def __init__(self, n, dtype): + super().__init__(n, 1, 1, dtype) + + def __call__(self, *args): + """Return a vector matching the shape and dtype. + + This function will try to convert the input to a `n`-component vector. + + Example:: + + >>> vec3 = VectorType(3, float) + + Create from n scalars: + + >>> v = vec3(1, 2, 3) + + Create from a list/tuple of n scalars: + + >>> v = vec3([1, 2, 3]) + + Create from a single scalar + + >>> v = vec3(1) + + """ + if len(args) == 0: + raise TaichiSyntaxError("Custom type instances need to be created with an initial value.") + if len(args) == 1: + # Init from a real Matrix + if isinstance(args[0], expr.Expr) and args[0].ptr.is_tensor(): + arg = args[0] + shape = arg.ptr.get_rvalue_type().shape() + assert len(shape) == 1 + assert self.n == shape[0] + return expr.Expr(arg.ptr) + + # initialize by a single scalar, e.g. matnxm(1) + if isinstance(args[0], (numbers.Number, expr.Expr)): + entries = [args[0] for _ in range(self.n)] + return self._instantiate(entries) + args = args[0] + # collect all input entries to a 1d list and then reshape + # this is mostly for glsl style like vec4(v.xyz, 1.) + entries = [] + for x in args: + if isinstance(x, (list, tuple)): + entries += x + elif isinstance(x, np.ndarray): + entries += list(x.ravel()) + elif isinstance(x, Matrix): + entries += x.to_list() + else: + entries.append(x) + + # type cast + return self._instantiate(entries) + + def _instantiate_in_python_scope(self, entries): + return Vector( + [ + int(entries[i]) if self.dtype in primitive_types.integer_types else float(entries[i]) + for i in range(self.n) + ], + dt=self.dtype, + ) + + def _instantiate(self, entries): + if in_python_scope(): + return self._instantiate_in_python_scope(entries) + + return make_matrix_with_shape(entries, [self.n], self.dtype) + + def field(self, **kwargs): + return Vector.field(self.n, dtype=self.dtype, **kwargs) + + def ndarray(self, **kwargs): + return Vector.ndarray(self.n, dtype=self.dtype, **kwargs) + + def to_string(self): + dtype_str = self.dtype.to_string() if self.dtype is not None else "" + return f"VectorType[{self.n}, {dtype_str}]" + + +class MatrixNdarray(Ndarray): + """Taichi ndarray with matrix elements. + + Args: + n (int): Number of rows of the matrix. + m (int): Number of columns of the matrix. + dtype (DataType): Data type of each value. + shape (Union[int, tuple[int]]): Shape of the ndarray. + + Example:: + + >>> arr = ti.MatrixNdarray(2, 2, ti.f32, shape=(3, 3)) + """ + + def __init__(self, n, m, dtype, shape): + self.n = n + self.m = m + super().__init__() + # TODO(zhanlue): remove self.dtype and migrate its usages to element_type + self.dtype = cook_dtype(dtype) + + self.layout = Layout.AOS + self.shape = tuple(shape) + self.element_type = _type_factory.get_tensor_type((self.n, self.m), self.dtype) + # TODO: we should pass in element_type, shape, layout instead. + self.arr = impl.get_runtime().prog.create_ndarray( + cook_dtype(self.element_type), + shape, + Layout.AOS, + zero_fill=True, + dbg_info=ti_python_core.DebugInfo(get_traceback()), + ) + + @property + def element_shape(self): + """Returns the shape of each element (a 2D matrix) in this ndarray. + + Example:: + + >>> arr = ti.MatrixNdarray(2, 2, ti.f32, shape=(3, 3)) + >>> arr.element_shape + (2, 2) + """ + return tuple(self.arr.element_shape()) + + @python_scope + def __setitem__(self, key, value): + if not isinstance(value, (list, tuple)): + value = list(value) + if not isinstance(value[0], (list, tuple)): + value = [[i] for i in value] + for i in range(self.n): + for j in range(self.m): + self[key][i, j] = value[i][j] + + @python_scope + def __getitem__(self, key): + key = () if key is None else (key,) if isinstance(key, numbers.Number) else tuple(key) + return Matrix([[NdarrayHostAccess(self, key, (i, j)) for j in range(self.m)] for i in range(self.n)]) + + @python_scope + def to_numpy(self): + """Converts this ndarray to a `numpy.ndarray`. + + Example:: + + >>> arr = ti.MatrixNdarray(2, 2, ti.f32, shape=(2, 1)) + >>> arr.to_numpy() + [[[[0. 0.] + [0. 0.]]] + + [[[0. 0.] + [0. 0.]]]] + """ + return self._ndarray_matrix_to_numpy(as_vector=0) + + @python_scope + def from_numpy(self, arr): + """Copies the data of a `numpy.ndarray` into this array. + + Example:: + + >>> m = ti.MatrixNdarray(2, 2, ti.f32, shape=(2, 1), layout=0) + >>> arr = np.ones((2, 1, 2, 2)) + >>> m.from_numpy(arr) + """ + self._ndarray_matrix_from_numpy(arr, as_vector=0) + + @python_scope + def __deepcopy__(self, memo=None): + ret_arr = MatrixNdarray(self.n, self.m, self.dtype, self.shape) + ret_arr.copy_from(self) + return ret_arr + + @python_scope + def _fill_by_kernel(self, val): + from taichi._kernels import fill_ndarray_matrix # pylint: disable=C0415 + + shape = self.element_type.shape() + n = shape[0] + m = 1 + if len(shape) > 1: + m = shape[1] + + prim_dtype = self.element_type.element_type() + matrix_type = MatrixType(n, m, len(shape), prim_dtype) + if isinstance(val, Matrix): + value = val + else: + value = matrix_type(val) + fill_ndarray_matrix(self, value) + + @python_scope + def __repr__(self): + return f"<{self.n}x{self.m} {Layout.AOS} ti.Matrix.ndarray>" + + +class VectorNdarray(Ndarray): + """Taichi ndarray with vector elements. + + Args: + n (int): Size of the vector. + dtype (DataType): Data type of each value. + shape (Tuple[int]): Shape of the ndarray. + + Example:: + + >>> a = ti.VectorNdarray(3, ti.f32, (3, 3)) + """ + + def __init__(self, n, dtype, shape): + self.n = n + super().__init__() + # TODO(zhanlue): remove self.dtype and migrate its usages to element_type + self.dtype = cook_dtype(dtype) + + self.layout = Layout.AOS + self.shape = tuple(shape) + self.element_type = _type_factory.get_tensor_type((n,), self.dtype) + self.arr = impl.get_runtime().prog.create_ndarray( + cook_dtype(self.element_type), + shape, + Layout.AOS, + zero_fill=True, + dbg_info=ti_python_core.DebugInfo(get_traceback()), + ) + + @property + def element_shape(self): + """Gets the dimension of the vector of this ndarray. + + Example:: + + >>> a = ti.VectorNdarray(3, ti.f32, (3, 3)) + >>> a.element_shape + (3,) + """ + return tuple(self.arr.element_shape()) + + @python_scope + def __setitem__(self, key, value): + if not isinstance(value, (list, tuple)): + value = list(value) + for i in range(self.n): + self[key][i] = value[i] + + @python_scope + def __getitem__(self, key): + key = () if key is None else (key,) if isinstance(key, numbers.Number) else tuple(key) + return Vector([NdarrayHostAccess(self, key, (i,)) for i in range(self.n)]) + + @python_scope + def to_numpy(self): + """Converts this vector ndarray to a `numpy.ndarray`. + + Example:: + + >>> a = ti.VectorNdarray(3, ti.f32, (2, 2)) + >>> a.to_numpy() + array([[[0., 0., 0.], + [0., 0., 0.]], + + [[0., 0., 0.], + [0., 0., 0.]]], dtype=float32) + """ + return self._ndarray_matrix_to_numpy(as_vector=1) + + @python_scope + def from_numpy(self, arr): + """Copies the data from a `numpy.ndarray` into this ndarray. + + The shape and data type of `arr` must match this ndarray. + + Example:: + + >>> import numpy as np + >>> a = ti.VectorNdarray(3, ti.f32, (2, 2), 0) + >>> b = np.ones((2, 2, 3), dtype=np.float32) + >>> a.from_numpy(b) + """ + self._ndarray_matrix_from_numpy(arr, as_vector=1) + + @python_scope + def __deepcopy__(self, memo=None): + ret_arr = VectorNdarray(self.n, self.dtype, self.shape) + ret_arr.copy_from(self) + return ret_arr + + @python_scope + def _fill_by_kernel(self, val): + from taichi._kernels import fill_ndarray_matrix # pylint: disable=C0415 + + shape = self.element_type.shape() + prim_dtype = self.element_type.element_type() + vector_type = VectorType(shape[0], prim_dtype) + if isinstance(val, Vector): + value = val + else: + value = vector_type(val) + fill_ndarray_matrix(self, value) + + @python_scope + def __repr__(self): + return f"<{self.n} {Layout.AOS} ti.Vector.ndarray>" + + +__all__ = ["Matrix", "Vector", "MatrixField", "MatrixNdarray", "VectorNdarray"] diff --git a/local_packages/taichi/lang/matrix_ops.py b/local_packages/taichi/lang/matrix_ops.py new file mode 100644 index 0000000..0ac3d96 --- /dev/null +++ b/local_packages/taichi/lang/matrix_ops.py @@ -0,0 +1,339 @@ +import taichi.lang.ops as ops_mod +from taichi.lang.impl import static +from taichi.lang.kernel_impl import func, pyfunc +from taichi.lang.matrix import Matrix, Vector +from taichi.lang.matrix_ops_utils import ( + arg_at, + arg_foreach_check, + assert_list, + assert_tensor, + assert_vector, + check_matmul, + check_transpose, + dim_lt, + is_int_const, + preconditions, + same_shapes, + square_matrix, +) +from taichi.types.annotations import template + + +@preconditions(arg_at(0, assert_tensor)) +@pyfunc +def _reduce(mat, fun: template()): + shape = static(mat.get_shape()) + if static(len(shape) == 1): + result = mat[0] + for i in static(range(1, shape[0])): + result = fun(result, mat[i]) + return result + result = mat[0, 0] + for i in static(range(shape[0])): + for j in static(range(shape[1])): + if static(i != 0 or j != 0): + result = fun(result, mat[i, j]) + return result + + +@pyfunc +def _filled_vector(n: template(), dtype: template(), val: template()): + return Vector([val for _ in static(range(n))], dtype) + + +@pyfunc +def _filled_matrix(n: template(), m: template(), dtype: template(), val: template()): + return Matrix([[val for _ in static(range(m))] for _ in static(range(n))], dtype) + + +@pyfunc +def _unit_vector(n: template(), i: template(), dtype: template()): + return Vector([i == j for j in static(range(n))], dtype) + + +@pyfunc +def _identity_matrix(n: template(), dtype: template()): + return Matrix([[i == j for j in static(range(n))] for i in static(range(n))], dtype) + + +@preconditions( + arg_at(0, lambda xs: same_shapes(*xs)), + arg_foreach_check( + 0, + fns=[assert_vector(), assert_list], + logic="or", + msg="Cols/rows must be a list of lists, or a list of vectors", + ), +) +@pyfunc +def rows(rows): # pylint: disable=W0621 + return Matrix([[x for x in row] for row in rows]) + + +@pyfunc +def cols(cols): # pylint: disable=W0621 + return rows(cols).transpose() + + +@pyfunc +def E(mat: template(), x: template(), y: template(), n: template()): + return mat[x % n, y % n] + + +@preconditions(square_matrix, dim_lt(0, 5)) +@pyfunc +def determinant(mat): + shape = static(mat.get_shape()) + if static(shape[0] == 1): + return mat[0, 0] + if static(shape[0] == 2): + return mat[0, 0] * mat[1, 1] - mat[0, 1] * mat[1, 0] + if static(shape[0] == 3): + return ( + mat[0, 0] * (mat[1, 1] * mat[2, 2] - mat[2, 1] * mat[1, 2]) + - mat[1, 0] * (mat[0, 1] * mat[2, 2] - mat[2, 1] * mat[0, 2]) + + mat[2, 0] * (mat[0, 1] * mat[1, 2] - mat[1, 1] * mat[0, 2]) + ) + if static(shape[0] == 4): + det = mat[0, 0] * 0 # keep type + for i in static(range(4)): + det = det + (-1) ** i * ( + mat[i, 0] + * ( + E(mat, i + 1, 1, 4) + * (E(mat, i + 2, 2, 4) * E(mat, i + 3, 3, 4) - E(mat, i + 3, 2, 4) * E(mat, i + 2, 3, 4)) + - E(mat, i + 2, 1, 4) + * (E(mat, i + 1, 2, 4) * E(mat, i + 3, 3, 4) - E(mat, i + 3, 2, 4) * E(mat, i + 1, 3, 4)) + + E(mat, i + 3, 1, 4) + * (E(mat, i + 1, 2, 4) * E(mat, i + 2, 3, 4) - E(mat, i + 2, 2, 4) * E(mat, i + 1, 3, 4)) + ) + ) + return det + # unreachable + return None + + +@preconditions(square_matrix, dim_lt(0, 5)) +@pyfunc +def inverse(mat): + shape = static(mat.get_shape()) + if static(shape[0] == 1): + return Matrix([[1.0 / mat[0, 0]]]) + inv_determinant = 1.0 / determinant(mat) + if static(shape[0] == 2): + return inv_determinant * Matrix([[mat[1, 1], -mat[0, 1]], [-mat[1, 0], mat[0, 0]]]) + if static(shape[0] == 3): + return inv_determinant * Matrix( + [ + [ + E(mat, i + 1, j + 1, 3) * E(mat, i + 2, j + 2, 3) + - E(mat, i + 2, j + 1, 3) * E(mat, i + 1, j + 2, 3) + for i in static(range(3)) + ] + for j in static(range(3)) + ] + ) + if static(shape[0] == 4): + return inv_determinant * Matrix( + [ + [ + (-1) ** (i + j) + * ( + ( + E(mat, i + 1, j + 1, 4) + * ( + E(mat, i + 2, j + 2, 4) * E(mat, i + 3, j + 3, 4) + - E(mat, i + 3, j + 2, 4) * E(mat, i + 2, j + 3, 4) + ) + - E(mat, i + 2, j + 1, 4) + * ( + E(mat, i + 1, j + 2, 4) * E(mat, i + 3, j + 3, 4) + - E(mat, i + 3, j + 2, 4) * E(mat, i + 1, j + 3, 4) + ) + + E(mat, i + 3, j + 1, 4) + * ( + E(mat, i + 1, j + 2, 4) * E(mat, i + 2, j + 3, 4) + - E(mat, i + 2, j + 2, 4) * E(mat, i + 1, j + 3, 4) + ) + ) + ) + for i in static(range(4)) + ] + for j in static(range(4)) + ] + ) + # unreachable + return None + + +@preconditions(check_transpose) +@pyfunc +def transpose(mat): + shape = static(mat.get_shape()) + return Matrix([[mat[i, j] for i in static(range(shape[0]))] for j in static(range(shape[1]))]) + + +@preconditions(arg_at(0, is_int_const)) +@pyfunc +def diag(dim: template(), val: template()): + return Matrix([[val if i == j else 0 for j in static(range(dim))] for i in static(range(dim))]) + + +@preconditions(assert_tensor) +@pyfunc +def sum(mat): # pylint: disable=W0622 + return _reduce(mat, ops_mod.add) + + +@preconditions(assert_tensor) +@pyfunc +def norm_sqr(mat): + return sum(mat * mat) + + +@preconditions(arg_at(0, assert_tensor)) +@pyfunc +def norm(mat, eps=0.0): + return ops_mod.sqrt(norm_sqr(mat) + eps) + + +@preconditions(arg_at(0, assert_tensor)) +@pyfunc +def norm_inv(mat, eps=0.0): + return ops_mod.rsqrt(norm_sqr(mat) + eps) + + +@preconditions(arg_at(0, assert_vector())) +@pyfunc +def normalized(vec, eps=0.0): + invlen = 1 / (norm(vec) + eps) + return invlen * vec + + +@preconditions(assert_tensor) +@pyfunc +def any(mat): # pylint: disable=W0622 + return _reduce(mat != 0, ops_mod.logical_or) and True + + +@preconditions(assert_tensor) +@pyfunc +def all(mat): # pylint: disable=W0622 + return _reduce(mat != 0, ops_mod.logical_and) and True + + +@preconditions(assert_tensor) +@pyfunc +def max(mat): # pylint: disable=W0622 + return _reduce(mat, ops_mod.max_impl) + + +@preconditions(assert_tensor) +@pyfunc +def min(mat): # pylint: disable=W0622 + return _reduce(mat, ops_mod.min_impl) + + +@preconditions(square_matrix) +@pyfunc +def trace(mat): + shape = static(mat.get_shape()) + result = mat[0, 0] + # TODO: get rid of static when + # CHI IR Tensor repr is ready stable + for i in static(range(1, shape[0])): + result = result + mat[i, i] + return result + + +@preconditions(arg_at(0, assert_tensor)) +@pyfunc +def fill(mat: template(), val): + shape = static(mat.get_shape()) + if static(len(shape) == 1): + for i in static(range(shape[0])): + mat[i] = val + else: + for i in static(range(shape[0])): + for j in static(range(shape[1])): + mat[i, j] = val + + +@preconditions(check_matmul) +@pyfunc +def _matmul_helper(mat_x, mat_y): + shape_x = static(mat_x.get_shape()) + shape_y = static(mat_y.get_shape()) + if static(len(shape_x) == 1 and len(shape_y) == 1): + return dot(mat_x, mat_y) + if static(len(shape_y) == 1): + zero_elem = mat_x[0, 0] * mat_y[0] * 0 # for correct return type + vec_z = _filled_vector(shape_x[0], None, zero_elem) + for i in static(range(shape_x[0])): + for j in static(range(shape_x[1])): + vec_z[i] = vec_z[i] + mat_x[i, j] * mat_y[j] + return vec_z + zero_elem = mat_x[0, 0] * mat_y[0, 0] * 0 # for correct return type + mat_z = _filled_matrix(shape_x[0], shape_y[1], None, zero_elem) + for i in static(range(shape_x[0])): + for j in static(range(shape_y[1])): + for k in static(range(shape_x[1])): + mat_z[i, j] = mat_z[i, j] + mat_x[i, k] * mat_y[k, j] + return mat_z + + +@pyfunc +def matmul(mat_x, mat_y): + shape_x = static(mat_x.get_shape()) + shape_y = static(mat_y.get_shape()) + if static(len(shape_x) == 1 and len(shape_y) == 2): + return _matmul_helper(transpose(mat_y), mat_x) + return _matmul_helper(mat_x, mat_y) + + +@preconditions( + arg_at(0, assert_vector("lhs for dot is not a vector")), + arg_at(1, assert_vector("rhs for dot is not a vector")), +) +@pyfunc +def dot(vec_x, vec_y): + return sum(vec_x * vec_y) + + +@preconditions( + arg_at(0, assert_vector("lhs for cross is not a vector")), + arg_at(1, assert_vector("rhs for cross is not a vector")), + same_shapes, + arg_at(0, dim_lt(0, 4)), +) +@pyfunc +def cross(vec_x, vec_y): + shape = static(vec_x.get_shape()) + if static(shape[0] == 2): + return vec_x[0] * vec_y[1] - vec_x[1] * vec_y[0] + if static(shape[0] == 3): + return Vector( + [ + vec_x[1] * vec_y[2] - vec_x[2] * vec_y[1], + vec_x[2] * vec_y[0] - vec_x[0] * vec_y[2], + vec_x[0] * vec_y[1] - vec_x[1] * vec_y[0], + ] + ) + return None + + +@preconditions( + arg_at(0, assert_vector("lhs for outer_product is not a vector")), + arg_at(1, assert_vector("rhs for outer_product is not a vector")), +) +@pyfunc +def outer_product(vec_x, vec_y): + shape_x = static(vec_x.get_shape()) + shape_y = static(vec_y.get_shape()) + return Matrix([[vec_x[i] * vec_y[j] for j in static(range(shape_y[0]))] for i in static(range(shape_x[0]))]) + + +@preconditions(assert_tensor) +@func +def cast(mat, dtype: template()): + return ops_mod.cast(mat, dtype) diff --git a/local_packages/taichi/lang/matrix_ops_utils.py b/local_packages/taichi/lang/matrix_ops_utils.py new file mode 100644 index 0000000..5871903 --- /dev/null +++ b/local_packages/taichi/lang/matrix_ops_utils.py @@ -0,0 +1,188 @@ +import functools + +from taichi.lang.exception import TaichiCompilationError +from taichi.lang.expr import Expr +from taichi.lang.matrix import Matrix + + +def do_check(checker_fns, *args, **kwargs): + for f in checker_fns: + ok, msg = f(*args, **kwargs) + if not ok: + return False, msg + return True, None + + +def preconditions(*checker_funcs): + def decorator(func): + @functools.wraps(func) + def wrapper(*args, **kwargs): + ok, msg = do_check(checker_funcs, *args, **kwargs) + if not ok: + raise TaichiCompilationError(msg) + return func(*args, **kwargs) + + return wrapper + + return decorator + + +def arg_at(indices, *fns): + def check(*args, **kwargs): + nonlocal indices + if isinstance(indices, int): + indices = [indices] + for i in indices: + if i in kwargs: + arg = kwargs[i] + else: + arg = args[i] + ok, msg = do_check(fns, arg) + if not ok: + return False, msg + return True, None + + return check + + +def assert_tensor(m, msg="not tensor type: {}"): + if isinstance(m, Matrix): + return True, None + if isinstance(m, Expr) and m.is_tensor(): + return True, None + return False, msg.format(type(m)) + + +def assert_vector(msg="expected a vector, got {}"): + def check(v): + if (isinstance(v, Expr) or isinstance(v, Matrix)) and len(v.get_shape()) == 1: + return True, None + return False, msg.format(type(v)) + + return check + + +def assert_list(x, msg="not a list: {}"): + if isinstance(x, list): + return True, None + return False, msg.format(type(x)) + + +def arg_foreach_check(*arg_indices, fns=[], logic="or", msg=None): + def check(*args, **kwargs): + for i in arg_indices: + if i in kwargs: + arg = kwargs[i] + else: + arg = args[i] + if logic == "or": + for a in arg: + passed = False + for fn in fns: + ok, _ = do_check([fn], a) + if ok: + passed = True + break + if not passed: + return False, msg + elif logic == "and": + for a in arg: + ok, _ = do_check(fns, a) + if not ok: + return False, msg + else: + raise ValueError(f"Unknown logic: {logic}") + return True, None + + return check + + +def get_list_shape(x): + outer_shape = [len(x)] + inner_shape = None + for element in x: + if isinstance(element, list): + cur_shape = get_list_shape(element) + else: + cur_shape = [] + + if inner_shape: + assert curr_shape == inner_shape + else: + inner_shape = cur_shape + + return outer_shape + inner_shape + + +def same_shapes(*xs): + shapes = [] + for x in xs: + if isinstance(x, Matrix): + shapes.append(x.get_shape()) + elif isinstance(x, list): + shapes.append(tuple(get_list_shape(x))) + elif isinstance(x, Expr): + shapes.append(tuple(x.ptr.get_rvalue_type().shape())) + else: + return False, f"same_shapes() received an unexpected argument of type: {x}" + + if len(set(shapes)) != 1: + return False, f"required shapes to be the same, got shapes {shapes}" + return True, None + + +def square_matrix(x): + assert_tensor(x) + shape = x.get_shape() + if len(shape) != 2 or shape[0] != shape[1]: + return False, f"expected a square matrix, got shape {shape}" + return True, None + + +def dim_lt(dim, limit): + def check(x): + assert_tensor(x) + shape = x.get_shape() + return shape[dim] < limit, (f"only dimension < {limit} is supported, got shape {shape}") + + return check + + +def is_int_const(x): + if isinstance(x, int): + return True, None + if isinstance(x, Expr) and x.val_int() is not None: + return True, None + return False, f"not an integer: {x} of type {type(x).__name__}" + + +def check_matmul(x, y): + assert_tensor(x, f"left hand side is not a matrix: {type(x)}") + assert_tensor(y, f"right hand side is not a matrix: {type(y)}") + x_shape = x.get_shape() + y_shape = y.get_shape() + if len(x_shape) == 1: + if len(y_shape) == 1: + return True, None + if x_shape[0] != y_shape[0]: + return ( + False, + f"dimension mismatch between {x_shape} and {y_shape} for left multiplication", + ) + else: + if x_shape[1] != y_shape[0]: + return ( + False, + f"dimension mismatch between {x_shape} and {y_shape} for matrix multiplication", + ) + return True, None + + +def check_transpose(x): + ok, msg = assert_tensor(x) + if ok and len(x.get_shape()) == 1: + return ( + False, + "`transpose()` cannot apply to a vector. If you want something like `a @ b.transpose()`, write `a.outer_product(b)` instead.", + ) + return ok, msg diff --git a/local_packages/taichi/lang/mesh.py b/local_packages/taichi/lang/mesh.py new file mode 100644 index 0000000..1ba4cea --- /dev/null +++ b/local_packages/taichi/lang/mesh.py @@ -0,0 +1,685 @@ +import json + +import numpy as np +from taichi._lib import core as _ti_core +from taichi.lang import impl +from taichi.lang.enums import Layout +from taichi.lang.exception import TaichiSyntaxError +from taichi.lang.field import Field, ScalarField +from taichi.lang.matrix import Matrix, MatrixField +from taichi.lang.struct import StructField +from taichi.lang.util import python_scope +from taichi.types import u16, u32 +from taichi.types.compound_types import CompoundType + +from taichi import lang + +MeshTopology = _ti_core.MeshTopology +MeshElementType = _ti_core.MeshElementType +MeshRelationType = _ti_core.MeshRelationType +ConvType = _ti_core.ConvType +element_order = _ti_core.element_order +from_end_element_order = _ti_core.from_end_element_order +to_end_element_order = _ti_core.to_end_element_order +relation_by_orders = _ti_core.relation_by_orders +inverse_relation = _ti_core.inverse_relation +element_type_name = _ti_core.element_type_name + + +class MeshAttrType: + def __init__(self, name, dtype, reorder, needs_grad): + self.name = name + self.dtype = dtype + self.reorder = reorder + self.needs_grad = needs_grad + + +class MeshReorderedScalarFieldProxy(ScalarField): + def __init__( + self, + field: ScalarField, + mesh_ptr: _ti_core.MeshPtr, + element_type: MeshElementType, + g2r_field: ScalarField, + ): + self.vars = field.vars + self.host_accessors = field.host_accessors + self.grad = field.grad + + self.mesh_ptr = mesh_ptr + self.element_type = element_type + self.g2r_field = g2r_field + + @python_scope + def __setitem__(self, key, value): + self._initialize_host_accessors() + key = self.g2r_field[key] + self.host_accessors[0].setter(value, *self._pad_key(key)) + + @python_scope + def __getitem__(self, key): + self._initialize_host_accessors() + key = self.g2r_field[key] + return self.host_accessors[0].getter(*self._pad_key(key)) + + +class MeshReorderedMatrixFieldProxy(MatrixField): + def __init__( + self, + field: MatrixField, + mesh_ptr: _ti_core.MeshPtr, + element_type: MeshElementType, + g2r_field: ScalarField, + ): + self.vars = field.vars + self.host_accessors = field.host_accessors + self.grad = field.grad + self.n = field.n + self.m = field.m + self.ndim = field.ndim + self.ptr = field.ptr + + self.mesh_ptr = mesh_ptr + self.element_type = element_type + self.g2r_field = g2r_field + + @python_scope + def __setitem__(self, key, value): + self._initialize_host_accessors() + self[key]._set_entries(value) + + @python_scope + def __getitem__(self, key): + self._initialize_host_accessors() + key = self.g2r_field[key] + key = self._pad_key(key) + return Matrix(self._host_access(key)) + + +class MeshElementField: + def __init__(self, mesh_instance, _type, attr_dict, field_dict, g2r_field): + self.mesh = mesh_instance + self._type = _type + self.attr_dict = attr_dict + self.field_dict = field_dict + self.g2r_field = g2r_field + + self._register_fields() + + def place(self, members, reorder=False, needs_grad=False, layout=Layout.SOA): + """Declares mesh attributes for the mesh element in current mesh. + + Args: + members (Dict[str, Union[PrimitiveType, MatrixType]]): \ + names and types for element attributes. + reorder: True if reorders the internal memory for coalesced data access within mesh-for loop. + needs_grad: True if needs to record grad. + layout: ti.Layout.AoS/ti.Layout.SoA + + Example:: + >>> import meshtaichi_patcher as Patcher + >>> vec3 = ti.types.vector(3, ti.f32) + >>> mesh = Patcher.load_mesh("bunny.obj", relations=['FV']) + >>> mesh.faces.place({'area' : ti.f32}) # declares a mesh attribute `area` for each face element. + >>> mesh.verts.place({'pos' : vec3}, reorder=True) # declares a mesh attribute `pos` for each vertex element, and reorder it in memory. + """ + + for key, dtype in members.items(): + if key in {"verts", "edges", "faces", "cells"}: + raise TaichiSyntaxError( + f"'{key}' cannot use as attribute name. It has been reserved as MeshTaichi's keyword." + ) + if key in self.attr_dict: + raise TaichiSyntaxError(f"'{key}' has already use as attribute name.") + + # init attr type + self.attr_dict[key] = MeshAttrType(key, dtype, reorder, needs_grad) + + # init field + if isinstance(dtype, CompoundType): + self.field_dict[key] = dtype.field(shape=None, needs_grad=needs_grad) + else: + self.field_dict[key] = impl.field(dtype, shape=None, needs_grad=needs_grad) + + size = _ti_core.get_num_elements(self.mesh.mesh_ptr, self._type) + if layout == Layout.SOA: + for key in members.keys(): + impl.root.dense(impl.axes(0), size).place(self.field_dict[key]) + if self.attr_dict[key].needs_grad: + impl.root.dense(impl.axes(0), size).place(self.field_dict[key].grad) + elif len(members) > 0: + _member_fields = {} + for key in members.keys(): + _member_fields[key] = self.field_dict[key] + impl.root.dense(impl.axes(0), size).place(*tuple(_member_fields.values())) + grads = [] + for key in members.keys(): + if self.attr_dict[key].needs_grad: + grads.append(self.field_dict[key].grad) + if len(grads) > 0: + impl.root.dense(impl.axes(0), size).place(*grads) + + for key, dtype in members.items(): + # expose interface + setattr(MeshElementField, key, property(fget=MeshElementField._make_getter(key))) + + @property + def keys(self): + return list(self.field_dict.keys()) + + @property + def _members(self): + return list(self.field_dict.values()) + + @property + def _items(self): + return self.field_dict.items() + + @staticmethod + def _make_getter(key): + def getter(self): + if key not in self.getter_dict: + if self.attr_dict[key].reorder: + if isinstance(self.field_dict[key], ScalarField): + self.getter_dict[key] = MeshReorderedScalarFieldProxy( + self.field_dict[key], + self.mesh.mesh_ptr, + self._type, + self.g2r_field, + ) + elif isinstance(self.field_dict[key], MatrixField): + self.getter_dict[key] = MeshReorderedMatrixFieldProxy( + self.field_dict[key], + self.mesh.mesh_ptr, + self._type, + self.g2r_field, + ) + else: + self.getter_dict[key] = self.field_dict[key] + """Get an entry from custom struct by name.""" + return self.getter_dict[key] + + return getter + + def _register_fields(self): + self.getter_dict = {} + for k in self.keys: + setattr(MeshElementField, k, property(fget=MeshElementField._make_getter(k))) + + def _get_field_members(self): + field_members = [] + for m in self._members: + assert isinstance(m, Field) + field_members += m._get_field_members() + return field_members + + def _initialize_host_accessors(self): + for v in self._members: + v._initialize_host_accessors() + + def get_member_field(self, key): + return self.field_dict[key] + + @python_scope + def __len__(self): + return _ti_core.get_num_elements(self.mesh.mesh_ptr, self._type) + + +class MeshElement: + def __init__(self, _type, builder): + self.builder = builder + self._type = _type + self.layout = Layout.SOA + self.attr_dict = {} + + def _SOA(self, soa=True): # AOS/SOA + self.layout = Layout.SOA if soa else Layout.AOS + + def _AOS(self, aos=True): + self.layout = Layout.AOS if aos else Layout.SOA + + SOA = property(fset=_SOA) + """(Deprecated) Set `True` for SOA (structure of arrays) layout. + """ + AOS = property(fset=_AOS) + """(Deprecated) Set `True` for AOS (array of structures) layout. + """ + + def place( + self, + members, + reorder=False, + needs_grad=False, + ): + """(Deprecated) Declares mesh attributes for the mesh element in current mesh builder. + + Args: + members (Dict[str, Union[PrimitiveType, MatrixType]]): \ + names and types for element attributes. + reorder: True if reorders the internal memory for coalesced data access within mesh-for loop. + needs_grad: True if needs to record grad. + + Example:: + >>> vec3 = ti.types.vector(3, ti.f32) + >>> mesh = ti.TriMesh() + >>> mesh.faces.place({'area' : ti.f32}) # declares a mesh attribute `area` for each face element. + >>> mesh.verts.place({'pos' : vec3}, reorder=True) # declares a mesh attribute `pos` for each vertex element, and reorder it in memory. + """ + for key, dtype in members.items(): + if key in {"verts", "edges", "faces", "cells"}: + raise TaichiSyntaxError( + f"'{key}' cannot use as attribute name. It has been reserved as MeshTaichi's keyword." + ) + self.attr_dict[key] = MeshAttrType(key, dtype, reorder, needs_grad) + + def build(self, mesh_instance, size, g2r_field): + field_dict = {} + + for key, attr in self.attr_dict.items(): + if isinstance(attr.dtype, CompoundType): + field_dict[key] = attr.dtype.field(shape=None, needs_grad=attr.needs_grad) + else: + field_dict[key] = impl.field(attr.dtype, shape=None, needs_grad=attr.needs_grad) + + if self.layout == Layout.SOA: + for key, field in field_dict.items(): + impl.root.dense(impl.axes(0), size).place(field) + if self.attr_dict[key].needs_grad: + impl.root.dense(impl.axes(0), size).place(field.grad) + elif len(field_dict) > 0: + impl.root.dense(impl.axes(0), size).place(*tuple(field_dict.values())) + grads = [] + for key, field in field_dict.items(): + if self.attr_dict[key].needs_grad: + grads.append(field.grad) + if len(grads) > 0: + impl.root.dense(impl.axes(0), size).place(*grads) + + return MeshElementField(mesh_instance, self._type, self.attr_dict, field_dict, g2r_field) + + +# Define the instance of the Mesh Type, stores the field (type and data) info +class MeshInstance: + def __init__(self): + self.mesh_ptr = _ti_core.create_mesh() + self.relation_set = set() + self.verts = MeshElementField(self, MeshElementType.Vertex, {}, {}, {}) + self.edges = MeshElementField(self, MeshElementType.Edge, {}, {}, {}) + self.faces = MeshElementField(self, MeshElementType.Face, {}, {}, {}) + self.cells = MeshElementField(self, MeshElementType.Cell, {}, {}, {}) + + def get_position_as_numpy(self): + """Get the vertex position of current mesh to numpy array. + + Returns: + 3d numpy array: [x, y, z] with float-format. + """ + if hasattr(self, "_vert_position"): + return self._vert_position + raise TaichiSyntaxError("Position info is not in the file.") + + def set_owned_offset(self, element_type: MeshElementType, owned_offset: ScalarField): + _ti_core.set_owned_offset(self.mesh_ptr, element_type, owned_offset.vars[0].ptr.snode()) + + def set_total_offset(self, element_type: MeshElementType, total_offset: ScalarField): + _ti_core.set_total_offset(self.mesh_ptr, element_type, total_offset.vars[0].ptr.snode()) + + def set_index_mapping(self, element_type: MeshElementType, conv_type: ConvType, mapping: ScalarField): + _ti_core.set_index_mapping(self.mesh_ptr, element_type, conv_type, mapping.vars[0].ptr.snode()) + + def set_num_patches(self, num_patches: int): + _ti_core.set_num_patches(self.mesh_ptr, num_patches) + + def set_patch_max_element_num(self, element_type: MeshElementType, max_element_num: int): + _ti_core.set_patch_max_element_num(self.mesh_ptr, element_type, max_element_num) + + def set_relation_fixed(self, rel_type: MeshRelationType, value: ScalarField): + self.relation_set.add(rel_type) + _ti_core.set_relation_fixed(self.mesh_ptr, rel_type, value.vars[0].ptr.snode()) + + def set_relation_dynamic( + self, + rel_type: MeshRelationType, + value: ScalarField, + patch_offset: ScalarField, + offset: ScalarField, + ): + self.relation_set.add(rel_type) + _ti_core.set_relation_dynamic( + self.mesh_ptr, + rel_type, + value.vars[0].ptr.snode(), + patch_offset.vars[0].ptr.snode(), + offset.vars[0].ptr.snode(), + ) + + def add_mesh_attribute(self, element_type, snode, reorder_type): + _ti_core.add_mesh_attribute(self.mesh_ptr, element_type, snode, reorder_type) + + def get_relation_size(self, from_index, to_element_type): + return _ti_core.get_relation_size( + self.mesh_ptr, + from_index.ptr, + to_element_type, + _ti_core.DebugInfo(impl.get_runtime().get_current_src_info()), + ) + + def get_relation_access(self, from_index, to_element_type, neighbor_idx_ptr): + return _ti_core.get_relation_access( + self.mesh_ptr, + from_index.ptr, + to_element_type, + neighbor_idx_ptr, + _ti_core.DebugInfo(impl.get_runtime().get_current_src_info()), + ) + + +class MeshMetadata: + def __init__(self, data): + self.num_patches = data["num_patches"] + + self.element_fields = {} + self.relation_fields = {} + self.num_elements = {} + self.max_num_per_patch = {} + + for element in data["elements"]: + element_type = MeshElementType(element["order"]) + self.num_elements[element_type] = element["num"] + self.max_num_per_patch[element_type] = element["max_num_per_patch"] + + element["l2g_mapping"] = np.array(element["l2g_mapping"]) + element["l2r_mapping"] = np.array(element["l2r_mapping"]) + element["g2r_mapping"] = np.array(element["g2r_mapping"]) + self.element_fields[element_type] = {} + self.element_fields[element_type]["owned"] = impl.field(dtype=u32, shape=self.num_patches + 1) + self.element_fields[element_type]["total"] = impl.field(dtype=u32, shape=self.num_patches + 1) + self.element_fields[element_type]["l2g"] = impl.field(dtype=u32, shape=element["l2g_mapping"].shape[0]) + self.element_fields[element_type]["l2r"] = impl.field(dtype=u32, shape=element["l2r_mapping"].shape[0]) + self.element_fields[element_type]["g2r"] = impl.field(dtype=u32, shape=element["g2r_mapping"].shape[0]) + + for relation in data["relations"]: + from_order = relation["from_order"] + to_order = relation["to_order"] + rel_type = MeshRelationType(relation_by_orders(from_order, to_order)) + self.relation_fields[rel_type] = {} + self.relation_fields[rel_type]["value"] = impl.field(dtype=u16, shape=len(relation["value"])) + if from_order <= to_order: + self.relation_fields[rel_type]["offset"] = impl.field(dtype=u16, shape=len(relation["offset"])) + self.relation_fields[rel_type]["patch_offset"] = impl.field( + dtype=u32, shape=len(relation["patch_offset"]) + ) + self.relation_fields[rel_type]["from_order"] = from_order + self.relation_fields[rel_type]["to_order"] = to_order + + for element in data["elements"]: + element_type = MeshElementType(element["order"]) + self.element_fields[element_type]["owned"].from_numpy(np.array(element["owned_offsets"])) + self.element_fields[element_type]["total"].from_numpy(np.array(element["total_offsets"])) + self.element_fields[element_type]["l2g"].from_numpy(element["l2g_mapping"]) + self.element_fields[element_type]["l2r"].from_numpy(element["l2r_mapping"]) + self.element_fields[element_type]["g2r"].from_numpy(element["g2r_mapping"]) + + for relation in data["relations"]: + from_order = relation["from_order"] + to_order = relation["to_order"] + rel_type = MeshRelationType(relation_by_orders(from_order, to_order)) + self.relation_fields[rel_type]["value"].from_numpy(np.array(relation["value"])) + if from_order <= to_order: + self.relation_fields[rel_type]["patch_offset"].from_numpy(np.array(relation["patch_offset"])) + self.relation_fields[rel_type]["offset"].from_numpy(np.array(relation["offset"])) + + self.attrs = {} + self.attrs["x"] = np.array(data["attrs"]["x"]).reshape(-1, 3) + if "patcher" in data: + self.patcher = data["patcher"] + else: + self.patcher = None + + +# Define the Mesh Type, stores the field type info +class MeshBuilder: + def __init__(self): + if not lang.misc.is_extension_supported(impl.current_cfg().arch, lang.extension.mesh): + raise Exception("Backend " + str(impl.current_cfg().arch) + " doesn't support MeshTaichi extension") + + self.verts = MeshElement(MeshElementType.Vertex, self) + self.edges = MeshElement(MeshElementType.Edge, self) + self.faces = MeshElement(MeshElementType.Face, self) + self.cells = MeshElement(MeshElementType.Cell, self) + + def build(self, metadata: MeshMetadata) -> MeshInstance: + instance = MeshInstance() + instance.fields = {} + + instance.set_num_patches(metadata.num_patches) + + for element in metadata.element_fields: + _ti_core.set_num_elements(instance.mesh_ptr, element, metadata.num_elements[element]) + instance.set_patch_max_element_num(element, metadata.max_num_per_patch[element]) + + element_name = element_type_name(element) + setattr( + instance, + element_name, + getattr(self, element_name).build( + instance, + metadata.num_elements[element], + metadata.element_fields[element]["g2r"], + ), + ) + instance.fields[element] = getattr(instance, element_name) + + instance.set_owned_offset(element, metadata.element_fields[element]["owned"]) + instance.set_total_offset(element, metadata.element_fields[element]["total"]) + instance.set_index_mapping(element, ConvType.l2g, metadata.element_fields[element]["l2g"]) + instance.set_index_mapping(element, ConvType.l2r, metadata.element_fields[element]["l2r"]) + instance.set_index_mapping(element, ConvType.g2r, metadata.element_fields[element]["g2r"]) + + for rel_type in metadata.relation_fields: + from_order = metadata.relation_fields[rel_type]["from_order"] + to_order = metadata.relation_fields[rel_type]["to_order"] + if from_order <= to_order: + instance.set_relation_dynamic( + rel_type, + metadata.relation_fields[rel_type]["value"], + metadata.relation_fields[rel_type]["patch_offset"], + metadata.relation_fields[rel_type]["offset"], + ) + else: + instance.set_relation_fixed(rel_type, metadata.relation_fields[rel_type]["value"]) + + instance._vert_position = metadata.attrs["x"] + instance.patcher = metadata.patcher + + return instance + + +# Mesh First Class +class Mesh: + """The Mesh type class. + + MeshTaichi offers first-class support for triangular/tetrahedral meshes + and allows efficient computation on these irregular data structures, + only available for backends supporting `ti.extension.mesh`. + + See more details in https://github.com/taichi-dev/meshtaichi + """ + + def __init__(self): + pass + + @staticmethod + def _create_instance(metadata: MeshMetadata) -> MeshInstance: + instance = MeshInstance() + instance.fields = {} + + instance.set_num_patches(metadata.num_patches) + + for element in metadata.element_fields: + _ti_core.set_num_elements(instance.mesh_ptr, element, metadata.num_elements[element]) + instance.set_patch_max_element_num(element, metadata.max_num_per_patch[element]) + + element_name = element_type_name(element) + setattr( + instance, + element_name, + MeshElementField(instance, element, {}, {}, metadata.element_fields[element]["g2r"]), + ) + instance.fields[element] = getattr(instance, element_name) + + instance.set_owned_offset(element, metadata.element_fields[element]["owned"]) + instance.set_total_offset(element, metadata.element_fields[element]["total"]) + instance.set_index_mapping(element, ConvType.l2g, metadata.element_fields[element]["l2g"]) + instance.set_index_mapping(element, ConvType.l2r, metadata.element_fields[element]["l2r"]) + instance.set_index_mapping(element, ConvType.g2r, metadata.element_fields[element]["g2r"]) + + for rel_type in metadata.relation_fields: + from_order = metadata.relation_fields[rel_type]["from_order"] + to_order = metadata.relation_fields[rel_type]["to_order"] + if from_order <= to_order: + instance.set_relation_dynamic( + rel_type, + metadata.relation_fields[rel_type]["value"], + metadata.relation_fields[rel_type]["patch_offset"], + metadata.relation_fields[rel_type]["offset"], + ) + else: + instance.set_relation_fixed(rel_type, metadata.relation_fields[rel_type]["value"]) + + instance._vert_position = metadata.attrs["x"] + instance.patcher = metadata.patcher + + return instance + + @staticmethod + def load_meta(filename): + with open(filename, "r") as fi: + data = json.loads(fi.read()) + return MeshMetadata(data) + + @staticmethod + def generate_meta(data): + return MeshMetadata(data) + + +def _TriMesh(): + """(Deprecated) Create a triangle mesh (a set of vert/edge/face elements, attributes, and connectivity) builder. + + Returns: + An instance of mesh builder. + """ + return MeshBuilder() + + +def _TetMesh(): + """(Deprecated) Create a tetrahedron mesh (a set of vert/edge/face/cell elements, attributes, and connectivity) builder. + + Returns: + An instance of mesh builder. + """ + return MeshBuilder() + + +class MeshElementFieldProxy: + def __init__(self, mesh: MeshInstance, element_type: MeshElementType, entry_expr: impl.Expr): + ast_builder = impl.get_runtime().compiling_callable.ast_builder() + + self.mesh = mesh + self.element_type = element_type + self.entry_expr = entry_expr + + element_field = self.mesh.fields[self.element_type] + for key, attr in element_field.field_dict.items(): + global_entry_expr = impl.Expr( + ast_builder.mesh_index_conversion( + self.mesh.mesh_ptr, + element_type, + entry_expr, + ConvType.l2r if element_field.attr_dict[key].reorder else ConvType.l2g, + _ti_core.DebugInfo(impl.get_runtime().get_current_src_info()), + ) + ) # transform index space + global_entry_expr_group = impl.make_expr_group(*tuple([global_entry_expr])) + if isinstance(attr, MatrixField): + setattr( + self, + key, + impl.Expr( + ast_builder.expr_subscript( + attr.ptr, + global_entry_expr_group, + _ti_core.DebugInfo(impl.get_runtime().get_current_src_info()), + ) + ), + ) + elif isinstance(attr, StructField): + raise RuntimeError("MeshTaichi has not support StructField yet") + else: # isinstance(attr, Field) + var = attr._get_field_members()[0].ptr + setattr( + self, + key, + impl.Expr( + ast_builder.expr_subscript( + var, + global_entry_expr_group, + _ti_core.DebugInfo(impl.get_runtime().get_current_src_info()), + ) + ), + ) + + for element_type in { + MeshElementType.Vertex, + MeshElementType.Edge, + MeshElementType.Face, + MeshElementType.Cell, + }: + setattr( + self, + element_type_name(element_type), + impl.mesh_relation_access(self.mesh, self, element_type), + ) + + @property + def ptr(self): + return self.entry_expr + + @property + def id(self): # return the global non-reordered index + ast_builder = impl.get_runtime().compiling_callable.ast_builder() + l2g_expr = impl.Expr( + ast_builder.mesh_index_conversion( + self.mesh.mesh_ptr, + self.element_type, + self.entry_expr, + ConvType.l2g, + _ti_core.DebugInfo(impl.get_runtime().get_current_src_info()), + ) + ) + return l2g_expr + + +class MeshRelationAccessProxy: + def __init__( + self, + mesh: MeshInstance, + from_index: impl.Expr, + to_element_type: MeshElementType, + ): + self.mesh = mesh + self.from_index = from_index + self.to_element_type = to_element_type + + @property + def size(self): + return impl.Expr(self.mesh.get_relation_size(self.from_index, self.to_element_type)) + + def subscript(self, *indices): + assert len(indices) == 1 + entry_expr = self.mesh.get_relation_access(self.from_index, self.to_element_type, impl.Expr(indices[0]).ptr) + entry_expr.type_check(impl.get_runtime().prog.config()) + return MeshElementFieldProxy(self.mesh, self.to_element_type, entry_expr) + + +__all__ = ["Mesh", "MeshInstance"] diff --git a/local_packages/taichi/lang/misc.py b/local_packages/taichi/lang/misc.py new file mode 100644 index 0000000..272ccf4 --- /dev/null +++ b/local_packages/taichi/lang/misc.py @@ -0,0 +1,806 @@ +import atexit +import functools +import os +import shutil +import tempfile +import warnings +from copy import deepcopy as _deepcopy + +from taichi._lib import core as _ti_core +from taichi.lang import impl +from taichi.lang.expr import Expr +from taichi.lang.impl import axes, get_runtime +from taichi.profiler.kernel_profiler import get_default_kernel_profiler +from taichi.types.primitive_types import f32, f64, i32, i64 + +from taichi import _logging, _snode, _version_check + +warnings.filterwarnings("once", category=DeprecationWarning, module="taichi") + +# ---------------------- +i = axes(0) +"""Axis 0. For multi-dimensional arrays it's the direction downward the rows. +For a 1d array it's the direction along this array. +""" +# ---------------------- + +j = axes(1) +"""Axis 1. For multi-dimensional arrays it's the direction across the columns. +""" +# ---------------------- + +k = axes(2) +"""Axis 2. For arrays of dimension `d` >= 3, view each cell as an array of +lower dimension d-2, it's the first axis of this cell. +""" +# ---------------------- + +l = axes(3) +"""Axis 3. For arrays of dimension `d` >= 4, view each cell as an array of +lower dimension d-2, it's the second axis of this cell. +""" +# ---------------------- + +ij = axes(0, 1) +"""Axes (0, 1). +""" +# ---------------------- + +ik = axes(0, 2) +"""Axes (0, 2). +""" +# ---------------------- + +il = axes(0, 3) +"""Axes (0, 3). +""" +# ---------------------- + +jk = axes(1, 2) +"""Axes (1, 2). +""" +# ---------------------- + +jl = axes(1, 3) +"""Axes (1, 3). +""" +# ---------------------- + +kl = axes(2, 3) +"""Axes (2, 3). +""" +# ---------------------- + +ijk = axes(0, 1, 2) +"""Axes (0, 1, 2). +""" +# ---------------------- + +ijl = axes(0, 1, 3) +"""Axes (0, 1, 3). +""" +# ---------------------- + +ikl = axes(0, 2, 3) +"""Axes (0, 2, 3). +""" +# ---------------------- + +jkl = axes(1, 2, 3) +"""Axes (1, 2, 3). +""" +# ---------------------- + +ijkl = axes(0, 1, 2, 3) +"""Axes (0, 1, 2, 3). +""" +# ---------------------- + +# ---------------------- + +x86_64 = _ti_core.x64 +"""The x64 CPU backend. +""" +# ---------------------- + +x64 = _ti_core.x64 +"""The X64 CPU backend. +""" +# ---------------------- + +arm64 = _ti_core.arm64 +"""The ARM CPU backend. +""" +# ---------------------- + +cuda = _ti_core.cuda +"""The CUDA backend. +""" +# ---------------------- + +amdgpu = _ti_core.amdgpu +"""The AMDGPU backend. +""" +# ---------------------- + +metal = _ti_core.metal +"""The Apple Metal backend. +""" +# ---------------------- + +opengl = _ti_core.opengl +"""The OpenGL backend. OpenGL 4.3 required. +""" +# ---------------------- + +gles = _ti_core.gles +"""The OpenGL ES backend. OpenGL ES 3.1 required. +""" +# ---------------------- + +vulkan = _ti_core.vulkan +"""The Vulkan backend. +""" +# ---------------------- + +dx11 = _ti_core.dx11 +"""The DX11 backend. +""" +# ---------------------- + +dx12 = _ti_core.dx12 +"""The DX11 backend. +""" +# ---------------------- + +gpu = [cuda, metal, vulkan, opengl, dx11, dx12, gles, amdgpu] +"""A list of GPU backends supported on the current system. +Currently contains 'cuda', 'metal', 'opengl', 'vulkan', 'dx11', 'dx12', 'gles', 'amdgpu'. + +When this is used, Taichi automatically picks the matching GPU backend. If no +GPU is detected, Taichi falls back to the CPU backend. +""" +# ---------------------- + +cpu = _ti_core.host_arch() +"""A list of CPU backends supported on the current system. +Currently contains 'x64', 'x86_64', 'arm64'. + +When this is used, Taichi automatically picks the matching CPU backend. +""" +# ---------------------- + + +def timeline_clear(): + return impl.get_runtime().prog.timeline_clear() + + +def timeline_save(fn): + return impl.get_runtime().prog.timeline_save(fn) + + +extension = _ti_core.Extension +"""An instance of Taichi extension. + +The list of currently available extensions is ['sparse', 'quant', \ + 'mesh', 'quant_basic', 'data64', 'adstack', 'bls', 'assertion', \ + 'extfunc']. +""" + + +def is_extension_supported(arch, ext): + """Checks whether an extension is supported on an arch. + + Args: + arch (taichi_python.Arch): Specified arch. + ext (taichi_python.Extension): Specified extension. + + Returns: + bool: Whether `ext` is supported on `arch`. + """ + return _ti_core.is_extension_supported(arch, ext) + + +def reset(): + """Resets Taichi to its initial state. + This will destroy all the allocated fields and kernels, and restore + the runtime to its default configuration. + + Example:: + + >>> a = ti.field(ti.i32, shape=()) + >>> a[None] = 1 + >>> print("before reset: ", a) + before rest: 1 + >>> + >>> ti.reset() + >>> print("after reset: ", a) + # will raise error because a is unavailable after reset. + """ + impl.reset() + global runtime + runtime = impl.get_runtime() + + +class _EnvironmentConfigurator: + def __init__(self, kwargs, _cfg): + self.cfg = _cfg + self.kwargs = kwargs + self.keys = [] + + def add(self, key, _cast=None): + _cast = _cast or self.bool_int + + self.keys.append(key) + + # TI_OFFLINE_CACHE= : no effect + # TI_OFFLINE_CACHE=0 : False + # TI_OFFLINE_CACHE=1 : True + name = "TI_" + key.upper() + value = os.environ.get(name, "") + if key in self.kwargs: + self[key] = self.kwargs[key] + if value: + _ti_core.warn(f'Environment variable {name}={value} overridden by ti.init argument "{key}"') + del self.kwargs[key] # mark as recognized + elif value: + self[key] = _cast(value) + + def __getitem__(self, key): + return getattr(self.cfg, key) + + def __setitem__(self, key, value): + setattr(self.cfg, key, value) + + @staticmethod + def bool_int(x): + return bool(int(x)) + + +class _SpecialConfig: + # like CompileConfig in C++, this is the configurations that belong to other submodules + def __init__(self): + self.log_level = "info" + self.gdb_trigger = False + self.short_circuit_operators = True + self.print_full_traceback = False + self.unrolling_limit = 32 + + +def prepare_sandbox(): + """ + Returns a temporary directory, which will be automatically deleted on exit. + It may contain the taichi_python shared object or some misc. files. + """ + tmp_dir = tempfile.mkdtemp(prefix="taichi-") + atexit.register(shutil.rmtree, tmp_dir) + print(f"[Taichi] preparing sandbox at {tmp_dir}") + os.mkdir(os.path.join(tmp_dir, "runtime/")) + return tmp_dir + + +def check_require_version(require_version): + """ + Check if installed version meets the requirements. + Allow to specify .... + . is optional. If not match, raise an exception. + """ + # Extract version number part (i.e. toss any revision / hash parts). + version_number_str = require_version + for c_idx, c in enumerate(require_version): + if not (c.isdigit() or c == "."): + version_number_str = require_version[:c_idx] + break + # Get required version. + try: + version_number_tuple = tuple([int(n) for n in version_number_str.split(".")]) + major = version_number_tuple[0] + minor = version_number_tuple[1] + patch = 0 + if len(version_number_tuple) > 2: + patch = version_number_tuple[2] + except: + raise Exception( + "The require_version should be formatted following PEP 440, " + "and inlucdes major, minor, and patch number, " + "e.g., major.minor.patch." + ) from None + # Get installed version + versions = [ + int(_ti_core.get_version_major()), + int(_ti_core.get_version_minor()), + int(_ti_core.get_version_patch()), + ] + # Match installed version and required version. + match = major == versions[0] and (minor < versions[1] or minor == versions[1] and patch <= versions[2]) + + if not match: + raise Exception( + f"Taichi version mismatch. Required version >= {major}.{minor}.{patch}, installed version = {_ti_core.get_version_string()}." + ) + + +def init( + arch=None, + default_fp=None, + default_ip=None, + _test_mode=False, + enable_fallback=True, + require_version=None, + **kwargs, +): + """Initializes the Taichi runtime. + + This should always be the entry point of your Taichi program. Most + importantly, it sets the backend used throughout the program. + + Args: + arch: Backend to use. This is usually :const:`~taichi.lang.cpu` or :const:`~taichi.lang.gpu`. + default_fp (Optional[type]): Default floating-point type. + default_ip (Optional[type]): Default integral type. + require_version (Optional[string]): A version string. + **kwargs: Taichi provides highly customizable compilation through + ``kwargs``, which allows for fine grained control of Taichi compiler + behavior. Below we list some of the most frequently used ones. For a + complete list, please check out + https://github.com/taichi-dev/taichi/blob/master/taichi/program/compile_config.h. + + * ``cpu_max_num_threads`` (int): Sets the number of threads used by the CPU thread pool. + * ``debug`` (bool): Enables the debug mode, under which Taichi does a few more things like boundary checks. + * ``print_ir`` (bool): Prints the CHI IR of the Taichi kernels. + *``offline_cache`` (bool): Enables offline cache of the compiled kernels. Default to True. When this is enabled Taichi will cache compiled kernel on your local disk to accelerate future calls. + *``random_seed`` (int): Sets the seed of the random generator. The default is 0. + """ + # Check version for users every 7 days if not disabled by users. + _version_check.start_version_check_thread() + + # FIXME(https://github.com/taichi-dev/taichi/issues/4811): save the current working directory since it may be + # changed by the Vulkan backend initialization on OS X. + current_dir = os.getcwd() + + # Check if installed version meets the requirements. + if require_version is not None: + check_require_version(require_version) + + if "default_up" in kwargs: + raise KeyError("'default_up' is always the unsigned type of 'default_ip'. Please set 'default_ip' instead.") + # Make a deepcopy in case these args reference to items from ti.cfg, which are + # actually references. If no copy is made and the args are indeed references, + # ti.reset() could override the args to their default values. + default_fp = _deepcopy(default_fp) + default_ip = _deepcopy(default_ip) + kwargs = _deepcopy(kwargs) + reset() + + cfg = impl.default_cfg() + cfg.offline_cache = True # Enable offline cache in frontend instead of C++ side + + spec_cfg = _SpecialConfig() + env_comp = _EnvironmentConfigurator(kwargs, cfg) + env_spec = _EnvironmentConfigurator(kwargs, spec_cfg) + + # configure default_fp/ip: + # TODO: move these stuff to _SpecialConfig too: + env_default_fp = os.environ.get("TI_DEFAULT_FP") + if env_default_fp: + if default_fp is not None: + _ti_core.warn( + f'Environment variable TI_DEFAULT_FP={env_default_fp} overridden by ti.init argument "default_fp"' + ) + elif env_default_fp == "32": + default_fp = f32 + elif env_default_fp == "64": + default_fp = f64 + elif env_default_fp is not None: + raise ValueError(f"Invalid TI_DEFAULT_FP={env_default_fp}, should be 32 or 64") + + env_default_ip = os.environ.get("TI_DEFAULT_IP") + if env_default_ip: + if default_ip is not None: + _ti_core.warn( + f'Environment variable TI_DEFAULT_IP={env_default_ip} overridden by ti.init argument "default_ip"' + ) + elif env_default_ip == "32": + default_ip = i32 + elif env_default_ip == "64": + default_ip = i64 + elif env_default_ip is not None: + raise ValueError(f"Invalid TI_DEFAULT_IP={env_default_ip}, should be 32 or 64") + + if default_fp is not None: + impl.get_runtime().set_default_fp(default_fp) + if default_ip is not None: + impl.get_runtime().set_default_ip(default_ip) + + # submodule configurations (spec_cfg): + env_spec.add("log_level", str) + env_spec.add("gdb_trigger") + env_spec.add("short_circuit_operators") + env_spec.add("print_full_traceback") + env_spec.add("unrolling_limit") + + # compiler configurations (ti.cfg): + for key in dir(cfg): + if key in ["arch", "default_fp", "default_ip"]: + continue + _cast = type(getattr(cfg, key)) + if _cast is bool: + _cast = None + env_comp.add(key, _cast) + + unexpected_keys = kwargs.keys() + + if len(unexpected_keys): + raise KeyError(f'Unrecognized keyword argument(s) for ti.init: {", ".join(unexpected_keys)}') + + # dispatch configurations that are not in ti.cfg: + if not _test_mode: + _ti_core.set_core_trigger_gdb_when_crash(spec_cfg.gdb_trigger) + impl.get_runtime().short_circuit_operators = spec_cfg.short_circuit_operators + impl.get_runtime().print_full_traceback = spec_cfg.print_full_traceback + impl.get_runtime().unrolling_limit = spec_cfg.unrolling_limit + _logging.set_logging_level(spec_cfg.log_level.lower()) + + # select arch (backend): + env_arch = os.environ.get("TI_ARCH") + if env_arch is not None: + _logging.info(f"Following TI_ARCH setting up for arch={env_arch}") + arch = _ti_core.arch_from_name(env_arch) + cfg.arch = adaptive_arch_select(arch, enable_fallback) + print(f"[Taichi] Starting on arch={_ti_core.arch_name(cfg.arch)}") + + if _test_mode: + return spec_cfg + + get_default_kernel_profiler().set_kernel_profiler_mode(cfg.kernel_profiler) + + # create a new program: + impl.get_runtime().create_program() + + _logging.trace("Materializing runtime...") + impl.get_runtime().prog.materialize_runtime() + + impl._root_fb = _snode.FieldsBuilder() + + if cfg.debug: + impl.get_runtime()._register_signal_handlers() + + # Recover the current working directory (https://github.com/taichi-dev/taichi/issues/4811) + os.chdir(current_dir) + return None + + +def no_activate(*args): + """Deactivates a SNode pointer.""" + assert isinstance(get_runtime().compiling_callable, _ti_core.Kernel) + for v in args: + get_runtime().compiling_callable.no_activate(v._snode.ptr) + + +def block_local(*args): + """Hints Taichi to cache the fields and to enable the BLS optimization. + + Please visit https://docs.taichi-lang.org/docs/performance + for how BLS is used. + + Args: + *args (List[Field]): A list of sparse Taichi fields. + """ + if impl.current_cfg().opt_level == 0: + _logging.warn("""opt_level = 1 is enforced to enable bls analysis.""") + impl.current_cfg().opt_level = 1 + for a in args: + for v in a._get_field_members(): + get_runtime().compiling_callable.ast_builder().insert_snode_access_flag( + _ti_core.SNodeAccessFlag.block_local, v.ptr + ) + + +def mesh_local(*args): + """Hints the compiler to cache the mesh attributes + and to enable the mesh BLS optimization, + only available for backends supporting `ti.extension.mesh` and to use with mesh-for loop. + + Related to https://github.com/taichi-dev/taichi/issues/3608 + + Args: + *args (List[Attribute]): A list of mesh attributes or fields accessed as attributes. + + Examples:: + + # instantiate model + mesh_builder = ti.Mesh.tri() + mesh_builder.verts.place({ + 'x' : ti.f32, + 'y' : ti.f32 + }) + model = mesh_builder.build(meta) + + @ti.kernel + def foo(): + # hint the compiler to cache mesh vertex attribute `x` and `y`. + ti.mesh_local(model.verts.x, model.verts.y) + for v0 in model.verts: # mesh-for loop + for v1 in v0.verts: + v0.x += v1.y + """ + for a in args: + for v in a._get_field_members(): + get_runtime().compiling_callable.ast_builder().insert_snode_access_flag( + _ti_core.SNodeAccessFlag.mesh_local, v.ptr + ) + + +def cache_read_only(*args): + for a in args: + for v in a._get_field_members(): + get_runtime().compiling_callable.ast_builder().insert_snode_access_flag( + _ti_core.SNodeAccessFlag.read_only, v.ptr + ) + + +def assume_in_range(val, base, low, high): + """Hints the compiler that a value is between a specified range, + for the compiler to perform scatchpad optimization, and return the + value untouched. + + The assumed range is `[base + low, base + high)`. + + Args: + + val (Number): The input value. + base (Number): The base point for the range interval. + low (Number): The lower offset relative to `base` (included). + high (Number): The higher offset relative to `base` (excluded). + + Returns: + Return the input `value` untouched. + + Example:: + + >>> # hint the compiler that x is in range [8, 12). + >>> x = ti.assume_in_range(x, 10, -2, 2) + >>> x + 10 + """ + return _ti_core.expr_assume_in_range( + Expr(val).ptr, Expr(base).ptr, low, high, _ti_core.DebugInfo(impl.get_runtime().get_current_src_info()) + ) + + +def loop_unique(val, covers=None): + if covers is None: + covers = [] + if not isinstance(covers, (list, tuple)): + covers = [covers] + covers = [x.snode.ptr if isinstance(x, Expr) else x.ptr for x in covers] + return _ti_core.expr_loop_unique( + Expr(val).ptr, covers, _ti_core.DebugInfo(impl.get_runtime().get_current_src_info()) + ) + + +def _parallelize(v): + """Sets the number of threads to use on CPU.""" + get_runtime().compiling_callable.ast_builder().parallelize(v) + if v == 1: + get_runtime().compiling_callable.ast_builder().strictly_serialize() + + +def _serialize(): + """Sets the number of threads to 1.""" + _parallelize(1) + + +def _block_dim(dim): + """Set the number of threads in a block to `dim`.""" + get_runtime().compiling_callable.ast_builder().block_dim(dim) + + +def _block_dim_adaptive(block_dim_adaptive): + """Enable/Disable backends set block_dim adaptively.""" + if get_runtime().prog.config().arch != cpu: + _logging.warn("Adaptive block_dim is supported on CPU backend only") + else: + get_runtime().prog.config().cpu_block_dim_adaptive = block_dim_adaptive + + +def _bit_vectorize(): + """Enable bit vectorization of struct fors on quant_arrays.""" + get_runtime().compiling_callable.ast_builder().bit_vectorize() + + +def loop_config( + *, + block_dim=None, + serialize=False, + parallelize=None, + block_dim_adaptive=True, + bit_vectorize=False, +): + """Sets directives for the next loop + + Args: + block_dim (int): The number of threads in a block on GPU + serialize (bool): Whether to let the for loop execute serially, `serialize=True` equals to `parallelize=1` + parallelize (int): The number of threads to use on CPU + block_dim_adaptive (bool): Whether to allow backends set block_dim adaptively, enabled by default + bit_vectorize (bool): Whether to enable bit vectorization of struct fors on quant_arrays. + + Examples:: + + @ti.kernel + def break_in_serial_for() -> ti.i32: + a = 0 + ti.loop_config(serialize=True) + for i in range(100): # This loop runs serially + a += i + if i == 10: + break + return a + + break_in_serial_for() # returns 55 + + n = 128 + val = ti.field(ti.i32, shape=n) + @ti.kernel + def fill(): + ti.loop_config(parallelize=8, block_dim=16) + # If the kernel is run on the CPU backend, 8 threads will be used to run it + # If the kernel is run on the CUDA backend, each block will have 16 threads. + for i in range(n): + val[i] = i + + u1 = ti.types.quant.int(bits=1, signed=False) + x = ti.field(dtype=u1) + y = ti.field(dtype=u1) + cell = ti.root.dense(ti.ij, (128, 4)) + cell.quant_array(ti.j, 32).place(x) + cell.quant_array(ti.j, 32).place(y) + @ti.kernel + def copy(): + ti.loop_config(bit_vectorize=True) + # 32 bits, instead of 1 bit, will be copied at a time + for i, j in x: + y[i, j] = x[i, j] + """ + if block_dim is not None: + _block_dim(block_dim) + + if serialize: + _parallelize(1) + elif parallelize is not None: + _parallelize(parallelize) + + if not block_dim_adaptive: + _block_dim_adaptive(block_dim_adaptive) + + if bit_vectorize: + _bit_vectorize() + + +def global_thread_idx(): + """Returns the global thread id of this running thread, + only available for cpu and cuda backends. + + For cpu backends this is equal to the cpu thread id, + For cuda backends this is equal to `block_id * block_dim + thread_id`. + + Example:: + + >>> f = ti.field(ti.f32, shape=(16, 16)) + >>> @ti.kernel + >>> def test(): + >>> for i in ti.grouped(f): + >>> print(ti.global_thread_idx()) + >>> + test() + """ + return impl.get_runtime().compiling_callable.ast_builder().insert_thread_idx_expr() + + +def mesh_patch_idx(): + """Returns the internal mesh patch id of this running thread, + only available for backends supporting `ti.extension.mesh` and to use within mesh-for loop. + + Related to https://github.com/taichi-dev/taichi/issues/3608 + """ + return ( + impl.get_runtime() + .compiling_callable.ast_builder() + .insert_patch_idx_expr(_ti_core.DebugInfo(impl.get_runtime().get_current_src_info())) + ) + + +def is_arch_supported(arch): + """Checks whether an arch is supported on the machine. + + Args: + arch (taichi_python.Arch): Specified arch. + + Returns: + bool: Whether `arch` is supported on the machine. + """ + + arch_table = { + cuda: _ti_core.with_cuda, + amdgpu: _ti_core.with_amdgpu, + metal: _ti_core.with_metal, + opengl: functools.partial(_ti_core.with_opengl, False), + gles: functools.partial(_ti_core.with_opengl, True), + vulkan: _ti_core.with_vulkan, + dx11: _ti_core.with_dx11, + dx12: _ti_core.with_dx12, + cpu: lambda: True, + } + with_arch = arch_table.get(arch, lambda: False) + try: + return with_arch() + except Exception as e: + arch = _ti_core.arch_name(arch) + _ti_core.warn( + f"{e.__class__.__name__}: '{e}' occurred when detecting " + f"{arch}, consider adding `TI_ENABLE_{arch.upper()}=0` " + f" to environment variables to suppress this warning message." + ) + return False + + +def adaptive_arch_select(arch, enable_fallback): + if arch is None: + return cpu + if not isinstance(arch, (list, tuple)): + arch = [arch] + for a in arch: + if is_arch_supported(a): + return a + if not enable_fallback: + raise RuntimeError(f"Arch={arch} is not supported") + _logging.warn(f"Arch={arch} is not supported, falling back to CPU") + return cpu + + +def get_host_arch_list(): + return [_ti_core.host_arch()] + + +__all__ = [ + "i", + "ij", + "ijk", + "ijkl", + "ijl", + "ik", + "ikl", + "il", + "j", + "jk", + "jkl", + "jl", + "k", + "kl", + "l", + "x86_64", + "x64", + "dx11", + "dx12", + "arm64", + "cpu", + "cuda", + "amdgpu", + "gles", + "gpu", + "metal", + "opengl", + "vulkan", + "extension", + "loop_config", + "global_thread_idx", + "assume_in_range", + "block_local", + "cache_read_only", + "init", + "mesh_local", + "no_activate", + "reset", + "mesh_patch_idx", +] diff --git a/local_packages/taichi/lang/ops.py b/local_packages/taichi/lang/ops.py new file mode 100644 index 0000000..e259886 --- /dev/null +++ b/local_packages/taichi/lang/ops.py @@ -0,0 +1,1486 @@ +import builtins +import functools +import operator as _bt_ops_mod # bt for builtin +from typing import Union + +import numpy as np +from taichi._lib import core as _ti_core +from taichi.lang import expr, impl +from taichi.lang.exception import TaichiSyntaxError +from taichi.lang.field import Field +from taichi.lang.util import cook_dtype, is_matrix_class, is_taichi_class, taichi_scope + + +def stack_info(): + return impl.get_runtime().get_current_src_info() + + +def is_taichi_expr(a): + return isinstance(a, expr.Expr) + + +def wrap_if_not_expr(a): + return ( + expr.Expr(a, dbg_info=_ti_core.DebugInfo(impl.get_runtime().get_current_src_info())) + if not is_taichi_expr(a) + else a + ) + + +def _read_matrix_or_scalar(x): + if is_matrix_class(x): + return x.to_numpy() + return x + + +def writeback_binary(foo): + @functools.wraps(foo) + def wrapped(a, b): + if isinstance(a, Field) or isinstance(b, Field): + return NotImplemented + if not (is_taichi_expr(a) and a.ptr.is_lvalue()): + raise TaichiSyntaxError(f"cannot use a non-writable target as the first operand of '{foo.__name__}'") + return foo(a, wrap_if_not_expr(b)) + + return wrapped + + +def cast(obj, dtype): + """Copy and cast a scalar or a matrix to a specified data type. + Must be called in Taichi scope. + + Args: + obj (Union[:mod:`~taichi.types.primitive_types`, :class:`~taichi.Matrix`]): \ + Input scalar or matrix. + + dtype (:mod:`~taichi.types.primitive_types`): A primitive type defined in :mod:`~taichi.types.primitive_types`. + + Returns: + A copy of `obj`, casted to the specified data type `dtype`. + + Example:: + + >>> @ti.kernel + >>> def test(): + >>> x = ti.Matrix([0, 1, 2], ti.i32) + >>> y = ti.cast(x, ti.f32) + >>> print(y) + >>> + >>> test() + [0.0, 1.0, 2.0] + """ + dtype = cook_dtype(dtype) + if is_taichi_class(obj): + # TODO: unify with element_wise_unary + return obj.cast(dtype) + return expr.Expr(_ti_core.value_cast(expr.Expr(obj).ptr, dtype)) + + +def bit_cast(obj, dtype): + """Copy and cast a scalar to a specified data type with its underlying + bits preserved. Must be called in taichi scope. + + This function is equivalent to `reinterpret_cast` in C++. + + Args: + obj (:mod:`~taichi.types.primitive_types`): Input scalar. + + dtype (:mod:`~taichi.types.primitive_types`): Target data type, must have \ + the same precision bits as the input (hence `f32` -> `f64` is not allowed). + + Returns: + A copy of `obj`, casted to the specified data type `dtype`. + + Example:: + + >>> @ti.kernel + >>> def test(): + >>> x = 3.14 + >>> y = ti.bit_cast(x, ti.i32) + >>> print(y) # 1078523331 + >>> + >>> z = ti.bit_cast(y, ti.f32) + >>> print(z) # 3.14 + """ + dtype = cook_dtype(dtype) + if is_taichi_class(obj): + raise ValueError("Cannot apply bit_cast on Taichi classes") + else: + return expr.Expr(_ti_core.bits_cast(expr.Expr(obj).ptr, dtype)) + + +def _unary_operation(taichi_op, python_op, a): + if isinstance(a, Field): + return NotImplemented + if is_taichi_expr(a): + return expr.Expr(taichi_op(a.ptr), dbg_info=_ti_core.DebugInfo(stack_info())) + from taichi.lang.matrix import Matrix # pylint: disable-msg=C0415 + + if isinstance(a, Matrix): + return Matrix(python_op(a.to_numpy())) + return python_op(a) + + +def _binary_operation(taichi_op, python_op, a, b): + if isinstance(a, Field) or isinstance(b, Field): + return NotImplemented + if is_taichi_expr(a) or is_taichi_expr(b): + a, b = wrap_if_not_expr(a), wrap_if_not_expr(b) + return expr.Expr(taichi_op(a.ptr, b.ptr), dbg_info=_ti_core.DebugInfo(stack_info())) + from taichi.lang.matrix import Matrix # pylint: disable-msg=C0415 + + if isinstance(a, Matrix) or isinstance(b, Matrix): + return Matrix(python_op(_read_matrix_or_scalar(a), _read_matrix_or_scalar(b))) + return python_op(a, b) + + +def _ternary_operation(taichi_op, python_op, a, b, c): + if isinstance(a, Field) or isinstance(b, Field) or isinstance(c, Field): + return NotImplemented + if is_taichi_expr(a) or is_taichi_expr(b) or is_taichi_expr(c): + a, b, c = wrap_if_not_expr(a), wrap_if_not_expr(b), wrap_if_not_expr(c) + return expr.Expr(taichi_op(a.ptr, b.ptr, c.ptr), dbg_info=_ti_core.DebugInfo(stack_info())) + from taichi.lang.matrix import Matrix # pylint: disable-msg=C0415 + + if isinstance(a, Matrix) or isinstance(b, Matrix) or isinstance(c, Matrix): + return Matrix( + python_op( + _read_matrix_or_scalar(a), + _read_matrix_or_scalar(b), + _read_matrix_or_scalar(c), + ) + ) + return python_op(a, b, c) + + +def neg(x): + """Numerical negative, element-wise. + + Args: + x (Union[:mod:`~taichi.types.primitive_types`, :class:`~taichi.Matrix`]): \ + Input scalar or matrix. + + Returns: + Matrix or scalar `y`, so that `y = -x`. `y` has the same type as `x`. + + Example:: + >>> x = ti.Matrix([1, -1]) + >>> y = ti.neg(a) + >>> y + [-1, 1] + """ + return _unary_operation(_ti_core.expr_neg, _bt_ops_mod.neg, x) + + +def sin(x): + """Trigonometric sine, element-wise. + + Args: + x (Union[:mod:`~taichi.types.primitive_types`, :class:`~taichi.Matrix`]): \ + Angle, in radians. + + Returns: + The sine of each element of `x`. + + Example:: + + >>> from math import pi + >>> x = ti.Matrix([-pi/2., 0, pi/2.]) + >>> ti.sin(x) + [-1., 0., 1.] + """ + return _unary_operation(_ti_core.expr_sin, np.sin, x) + + +def cos(x): + """Trigonometric cosine, element-wise. + + Args: + x (Union[:mod:`~taichi.type.primitive_types`, :class:`~taichi.Matrix`]): \ + Angle, in radians. + + Returns: + The cosine of each element of `x`. + + Example:: + + >>> from math import pi + >>> x = ti.Matrix([-pi, 0, pi/2.]) + >>> ti.cos(x) + [-1., 1., 0.] + """ + return _unary_operation(_ti_core.expr_cos, np.cos, x) + + +def asin(x): + """Trigonometric inverse sine, element-wise. + + The inverse of `sin` so that, if `y = sin(x)`, then `x = asin(y)`. + + For input `x` not in the domain `[-1, 1]`, this function returns `nan` if \ + it's called in taichi scope, or raises exception if it's called in python scope. + + Args: + x (Union[:mod:`~taichi.types.primitive_types`, :class:`~taichi.Matrix`]): \ + A scalar or a matrix with elements in [-1, 1]. + + Returns: + The inverse sine of each element in `x`, in radians and in the closed \ + interval `[-pi/2, pi/2]`. + + Example:: + + >>> from math import pi + >>> ti.asin(ti.Matrix([-1.0, 0.0, 1.0])) * 180 / pi + [-90., 0., 90.] + """ + return _unary_operation(_ti_core.expr_asin, np.arcsin, x) + + +def acos(x): + """Trigonometric inverse cosine, element-wise. + + The inverse of `cos` so that, if `y = cos(x)`, then `x = acos(y)`. + + For input `x` not in the domain `[-1, 1]`, this function returns `nan` if \ + it's called in taichi scope, or raises exception if it's called in python scope. + + Args: + x (Union[:mod:`~taichi.types.primitive_types`, :class:`~taichi.Matrix`]): \ + A scalar or a matrix with elements in [-1, 1]. + + Returns: + The inverse cosine of each element in `x`, in radians and in the closed \ + interval `[0, pi]`. This is a scalar if `x` is a scalar. + + Example:: + + >>> from math import pi + >>> ti.acos(ti.Matrix([-1.0, 0.0, 1.0])) * 180 / pi + [180., 90., 0.] + """ + return _unary_operation(_ti_core.expr_acos, np.arccos, x) + + +def sqrt(x): + """Return the non-negative square-root of a scalar or a matrix, + element wise. If `x < 0` an exception is raised. + + Args: + x (Union[:mod:`~taichi.types.primitive_types`, :class:`~taichi.Matrix`]): \ + The scalar or matrix whose square-roots are required. + + Returns: + The square-root `y` so that `y >= 0` and `y^2 = x`. `y` has the same type as `x`. + + Example:: + + >>> x = ti.Matrix([1., 4., 9.]) + >>> y = ti.sqrt(x) + >>> y + [1.0, 2.0, 3.0] + """ + return _unary_operation(_ti_core.expr_sqrt, np.sqrt, x) + + +def rsqrt(x): + """The reciprocal of the square root function. + + Args: + x (Union[:mod:`~taichi.types.primitive_types`, :class:`~taichi.Matrix`]): \ + A scalar or a matrix. + + Returns: + The reciprocal of `sqrt(x)`. + """ + + def _rsqrt(x): + return 1 / np.sqrt(x) + + return _unary_operation(_ti_core.expr_rsqrt, _rsqrt, x) + + +def _round(x): + return _unary_operation(_ti_core.expr_round, np.round, x) + + +def round(x, dtype=None): # pylint: disable=redefined-builtin + """Round to the nearest integer, element-wise. + + Args: + x (Union[:mod:`~taichi.types.primitive_types`, :class:`~taichi.Matrix`]): \ + A scalar or a matrix. + + dtype: (:mod:`~taichi.types.primitive_types`): the returned type, default to `None`. If \ + set to `None` the retuned value will have the same type with `x`. + + Returns: + The nearest integer of `x`, with return value type `dtype`. + + Example:: + + >>> @ti.kernel + >>> def test(): + >>> x = ti.Vector([-1.5, 1.2, 2.7]) + >>> print(ti.round(x)) + [-2., 1., 3.] + """ + result = _round(x) + if dtype is not None: + result = cast(result, dtype) + return result + + +def _floor(x): + return _unary_operation(_ti_core.expr_floor, np.floor, x) + + +def floor(x, dtype=None): + """Return the floor of the input, element-wise. + The floor of the scalar `x` is the largest integer `k`, such that `k <= x`. + + Args: + x (Union[:mod:`~taichi.types.primitive_types`, :class:`~taichi.Matrix`]): \ + Input scalar or matrix. + + dtype: (:mod:`~taichi.types.primitive_types`): the returned type, default to `None`. If \ + set to `None` the retuned value will have the same type with `x`. + + Returns: + The floor of each element in `x`, with return value type `dtype`. + + Example:: + >>> @ti.kernel + >>> def test(): + >>> x = ti.Matrix([-1.1, 2.2, 3.]) + >>> y = ti.floor(x, ti.f64) + >>> print(y) # [-2.000000000000, 2.000000000000, 3.000000000000] + """ + result = _floor(x) + if dtype is not None: + result = cast(result, dtype) + return result + + +def _ceil(x): + return _unary_operation(_ti_core.expr_ceil, np.ceil, x) + + +def ceil(x, dtype=None): + """Return the ceiling of the input, element-wise. + + The ceil of the scalar `x` is the smallest integer `k`, such that `k >= x`. + + Args: + x (Union[:mod:`~taichi.types.primitive_types`, :class:`~taichi.Matrix`]): \ + Input scalar or matrix. + + dtype: (:mod:`~taichi.types.primitive_types`): the returned type, default to `None`. If \ + set to `None` the retuned value will have the same type with `x`. + + Returns: + The ceiling of each element in `x`, with return value type `dtype`. + + Example:: + + >>> @ti.kernel + >>> def test(): + >>> x = ti.Matrix([3.14, -1.5]) + >>> y = ti.ceil(x) + >>> print(y) # [4.0, -1.0] + """ + result = _ceil(x) + if dtype is not None: + result = cast(result, dtype) + return result + + +def frexp(x): + return _unary_operation(_ti_core.expr_frexp, np.frexp, x) + + +def tan(x): + """Trigonometric tangent function, element-wise. + + Equivalent to `ti.sin(x)/ti.cos(x)` element-wise. + + Args: + x (Union[:mod:`~taichi.types.primitive_types`, :class:`~taichi.Matrix`]): \ + Input scalar or matrix. + + Returns: + The tangent values of `x`. + + Example:: + + >>> from math import pi + >>> @ti.kernel + >>> def test(): + >>> x = ti.Matrix([-pi, pi/2, pi]) + >>> y = ti.tan(x) + >>> print(y) + >>> + >>> test() + [-0.0, -22877334.0, 0.0] + """ + return _unary_operation(_ti_core.expr_tan, np.tan, x) + + +def tanh(x): + """Compute the hyperbolic tangent of `x`, element-wise. + + Args: + x (Union[:mod:`~taichi.types.primitive_types`, :class:`~taichi.Matrix`]): \ + Input scalar or matrix. + + Returns: + The corresponding hyperbolic tangent values. + + Example:: + + >>> @ti.kernel + >>> def test(): + >>> x = ti.Matrix([-1.0, 0.0, 1.0]) + >>> y = ti.tanh(x) + >>> print(y) + >>> + >>> test() + [-0.761594, 0.000000, 0.761594] + """ + return _unary_operation(_ti_core.expr_tanh, np.tanh, x) + + +def exp(x): + """Compute the exponential of all elements in `x`, element-wise. + + Args: + x (Union[:mod:`~taichi.types.primitive_types`, :class:`~taichi.Matrix`]): \ + Input scalar or matrix. + + Returns: + Element-wise exponential of `x`. + + Example:: + + >>> @ti.kernel + >>> def test(): + >>> x = ti.Matrix([-1.0, 0.0, 1.0]) + >>> y = ti.exp(x) + >>> print(y) + >>> + >>> test() + [0.367879, 1.000000, 2.718282] + """ + return _unary_operation(_ti_core.expr_exp, np.exp, x) + + +def log(x): + """Compute the natural logarithm, element-wise. + + The natural logarithm `log` is the inverse of the exponential function, + so that `log(exp(x)) = x`. The natural logarithm is logarithm in base `e`. + + Args: + x (Union[:mod:`~taichi.types.primitive_types`, :class:`~taichi.Matrix`]): \ + Input scalar or matrix. + + Returns: + The natural logarithm of `x`, element-wise. + + Example:: + + >>> @ti.kernel + >>> def test(): + >>> x = ti.Vector([-1.0, 0.0, 1.0]) + >>> y = ti.log(x) + >>> print(y) + >>> + >>> test() + [-nan, -inf, 0.000000] + """ + return _unary_operation(_ti_core.expr_log, np.log, x) + + +def abs(x): # pylint: disable=W0622 + """Compute the absolute value :math:`|x|` of `x`, element-wise. + + Args: + x (Union[:mod:`~taichi.types.primitive_types`, :class:`~taichi.Matrix`]): \ + Input scalar or matrix. + + Returns: + The absolute value of each element in `x`. + + Example:: + + >>> @ti.kernel + >>> def test(): + >>> x = ti.Vector([-1.0, 0.0, 1.0]) + >>> y = ti.abs(x) + >>> print(y) + >>> + >>> test() + [1.0, 0.0, 1.0] + """ + return _unary_operation(_ti_core.expr_abs, builtins.abs, x) + + +def bit_not(a): + """The bit not function. + + Args: + a (Union[:class:`~taichi.lang.expr.Expr`, :class:`~taichi.lang.matrix.Matrix`]): A number or a matrix. + + Returns: + Bitwise not of `a`. + """ + return _unary_operation(_ti_core.expr_bit_not, _bt_ops_mod.invert, a) + + +def popcnt(a): + def _popcnt(x): + return bin(x).count("1") + + return _unary_operation(_ti_core.expr_popcnt, _popcnt, a) + + +def logical_not(a): + """The logical not function. + + Args: + a (Union[:class:`~taichi.lang.expr.Expr`, :class:`~taichi.lang.matrix.Matrix`]): A number or a matrix. + + Returns: + `1` iff `a=0`, otherwise `0`. + """ + return _unary_operation(_ti_core.expr_logic_not, np.logical_not, a) + + +def random(dtype=float) -> Union[float, int]: + """Return a single random float/integer according to the specified data type. + Must be called in taichi scope. + + If the required `dtype` is float type, this function returns a random number + sampled from the uniform distribution in the half-open interval [0, 1). + + For integer types this function returns a random integer in the + half-open interval [0, 2^32) if a 32-bit integer is required, + or a random integer in the half-open interval [0, 2^64) if a + 64-bit integer is required. + + Args: + dtype (:mod:`~taichi.types.primitive_types`): Type of the required random value. + + Returns: + A random value with type `dtype`. + + Example:: + + >>> @ti.kernel + >>> def test(): + >>> x = ti.random(float) + >>> print(x) # 0.090257 + >>> + >>> y = ti.random(ti.f64) + >>> print(y) # 0.716101627301 + >>> + >>> i = ti.random(ti.i32) + >>> print(i) # -963722261 + >>> + >>> j = ti.random(ti.i64) + >>> print(j) # 73412986184350777 + """ + dtype = cook_dtype(dtype) + x = expr.Expr(_ti_core.make_rand_expr(dtype, _ti_core.DebugInfo(impl.get_runtime().get_current_src_info()))) + return impl.expr_init(x) + + +# NEXT: add matpow(self, power) + + +def add(a, b): + """The add function. + + Args: + a (Union[:class:`~taichi.lang.expr.Expr`, :class:`~taichi.lang.matrix.Matrix`]): A number or a matrix. + b (Union[:class:`~taichi.lang.expr.Expr`, :class:`~taichi.lang.matrix.Matrix`]): A number or a matrix. + + Returns: + sum of `a` and `b`. + """ + return _binary_operation(_ti_core.expr_add, _bt_ops_mod.add, a, b) + + +def sub(a, b): + """The sub function. + + Args: + a (Union[:class:`~taichi.lang.expr.Expr`, :class:`~taichi.lang.matrix.Matrix`]): A number or a matrix. + b (Union[:class:`~taichi.lang.expr.Expr`, :class:`~taichi.lang.matrix.Matrix`]): A number or a matrix. + + Returns: + `a` subtract `b`. + """ + return _binary_operation(_ti_core.expr_sub, _bt_ops_mod.sub, a, b) + + +def mul(a, b): + """The multiply function. + + Args: + a (Union[:class:`~taichi.lang.expr.Expr`, :class:`~taichi.lang.matrix.Matrix`]): A number or a matrix. + b (Union[:class:`~taichi.lang.expr.Expr`, :class:`~taichi.lang.matrix.Matrix`]): A number or a matrix. + + Returns: + `a` multiplied by `b`. + """ + return _binary_operation(_ti_core.expr_mul, _bt_ops_mod.mul, a, b) + + +def mod(x1, x2): + """Returns the element-wise remainder of division. + + This is equivalent to the Python modulus operator `x1 % x2` and + has the same sign as the divisor x2. + + Args: + x1 (Union[:mod:`~taichi.types.primitive_types`, :class:`~taichi.Matrix`]): \ + Dividend scalar or matrix. + + x2 (Union[:mod:`~taichi.types.primitive_types`, :class:`~taichi.Matrix`]): \ + Divisor scalar or matrix. When both `x1` and `x2` are matrices they must have the same shape. + + Returns: + The element-wise remainder of the quotient `floordiv(x1, x2)`. This is a scalar \ + if both `x1` and `x2` are scalars. + + Example:: + + >>> @ti.kernel + >>> def test(): + >>> x = ti.Matrix([3.0, 4.0, 5.0]) + >>> y = 3 + >>> z = ti.mod(y, x) + >>> print(z) + >>> + >>> test() + [1.0, 0.0, 4.0] + """ + + def expr_python_mod(a, b): + # a % b = a - (a // b) * b + quotient = expr.Expr(_ti_core.expr_floordiv(a, b)) + multiply = expr.Expr(_ti_core.expr_mul(b, quotient.ptr)) + return _ti_core.expr_sub(a, multiply.ptr) + + return _binary_operation(expr_python_mod, _bt_ops_mod.mod, x1, x2) + + +def pow(base, exponent): # pylint: disable=W0622 + """First array elements raised to second array elements :math:`{base}^{exponent}`, element-wise. + + The result type of two scalar operands is determined as follows: + - If the exponent is an integral value, then the result type takes the type of the base. + - Otherwise, the result type follows + [Implicit type casting in binary operations](https://docs.taichi-lang.org/docs/type#implicit-type-casting-in-binary-operations). + + With the above rules, an integral value raised to a negative integral value cannot have a + feasible type. Therefore, an exception will be raised if debug mode or optimization passes + are on; otherwise 1 will be returned. + + In the following situations, the result is undefined: + - A negative value raised to a non-integral value. + - A zero value raised to a non-positive value. + + Args: + base (Union[:mod:`~taichi.types.primitive_types`, :class:`~taichi.Matrix`]): \ + The bases. + exponent (Union[:mod:`~taichi.types.primitive_types`, :class:`~taichi.Matrix`]): \ + The exponents. + + Returns: + `base` raised to `exponent`. This is a scalar if both `base` and `exponent` are scalars. + + Example:: + + >>> @ti.kernel + >>> def test(): + >>> x = ti.Matrix([-2.0, 2.0]) + >>> y = -3 + >>> z = ti.pow(x, y) + >>> print(z) + >>> + >>> test() + [-0.125000, 0.125000] + """ + return _binary_operation(_ti_core.expr_pow, _bt_ops_mod.pow, base, exponent) + + +def floordiv(a, b): + """The floor division function. + + Args: + a (Union[:class:`~taichi.lang.expr.Expr`, :class:`~taichi.lang.matrix.Matrix`]): A number or a matrix. + b (Union[:class:`~taichi.lang.expr.Expr`, :class:`~taichi.lang.matrix.Matrix`]): A number or a matrix with elements not equal to zero. + + Returns: + The floor function of `a` divided by `b`. + """ + return _binary_operation(_ti_core.expr_floordiv, _bt_ops_mod.floordiv, a, b) + + +def truediv(a, b): + """True division function. + + Args: + a (Union[:class:`~taichi.lang.expr.Expr`, :class:`~taichi.lang.matrix.Matrix`]): A number or a matrix. + b (Union[:class:`~taichi.lang.expr.Expr`, :class:`~taichi.lang.matrix.Matrix`]): A number or a matrix with elements not equal to zero. + + Returns: + The true value of `a` divided by `b`. + """ + return _binary_operation(_ti_core.expr_truediv, _bt_ops_mod.truediv, a, b) + + +def max_impl(a, b): + """The maxnimum function. + + Args: + a (Union[:class:`~taichi.lang.expr.Expr`, :class:`~taichi.lang.matrix.Matrix`]): A number or a matrix. + b (Union[:class:`~taichi.lang.expr.Expr`, :class:`~taichi.lang.matrix.Matrix`]): A number or a matrix. + + Returns: + The maxnimum of `a` and `b`. + """ + return _binary_operation(_ti_core.expr_max, np.maximum, a, b) + + +def min_impl(a, b): + """The minimum function. + + Args: + a (Union[:class:`~taichi.lang.expr.Expr`, :class:`~taichi.lang.matrix.Matrix`]): A number or a matrix. + b (Union[:class:`~taichi.lang.expr.Expr`, :class:`~taichi.lang.matrix.Matrix`]): A number or a matrix. + + Returns: + The minimum of `a` and `b`. + """ + return _binary_operation(_ti_core.expr_min, np.minimum, a, b) + + +def atan2(x1, x2): + """Element-wise arc tangent of `x1/x2`. + + Args: + x1 (Union[:mod:`~taichi.types.primitive_types`, :class:`~taichi.Matrix`]): \ + y-coordinates. + x2 (Union[:mod:`~taichi.types.primitive_types`, :class:`~taichi.Matrix`]): \ + x-coordinates. + + Returns: + Angles in radians, in the range `[-pi, pi]`. + This is a scalar if both `x1` and `x2` are scalars. + + Example:: + + >>> from math import pi + >>> @ti.kernel + >>> def test(): + >>> x = ti.Matrix([-1.0, 1.0, -1.0, 1.0]) + >>> y = ti.Matrix([-1.0, -1.0, 1.0, 1.0]) + >>> z = ti.atan2(y, x) * 180 / pi + >>> print(z) + >>> + >>> test() + [-135.0, -45.0, 135.0, 45.0] + """ + return _binary_operation(_ti_core.expr_atan2, np.arctan2, x1, x2) + + +def raw_div(x1, x2): + """Return `x1 // x2` if both `x1`, `x2` are integers, otherwise return `x1/x2`. + + Args: + x1 (Union[:mod:`~taichi.types.primitive_types`, :class:`~taichi.Matrix`]): Dividend. + x2 (Union[:mod:`~taichi.types.primitive_types`, :class:`~taichi.Matrix`]): Divisor. + + Returns: + Return `x1 // x2` if both `x1`, `x2` are integers, otherwise return `x1/x2`. + + Example:: + + >>> @ti.kernel + >>> def main(): + >>> x = 5 + >>> y = 3 + >>> print(raw_div(x, y)) # 1 + >>> z = 4.0 + >>> print(raw_div(x, z)) # 1.25 + """ + + def c_div(a, b): + if isinstance(a, int) and isinstance(b, int): + return a // b + return a / b + + return _binary_operation(_ti_core.expr_div, c_div, x1, x2) + + +def raw_mod(x1, x2): + """Return the remainder of `x1/x2`, element-wise. + This is the C-style `mod` function. + + Args: + x1 (Union[:mod:`~taichi.types.primitive_types`, :class:`~taichi.Matrix`]): \ + The dividend. + x2 (Union[:mod:`~taichi.types.primitive_types`, :class:`~taichi.Matrix`]): \ + The divisor. + + Returns: + The remainder of `x1` divided by `x2`. + + Example:: + + >>> @ti.kernel + >>> def main(): + >>> print(ti.mod(-4, 3)) # 2 + >>> print(ti.raw_mod(-4, 3)) # -1 + """ + + def c_mod(x, y): + return x - y * int(float(x) / y) + + return _binary_operation(_ti_core.expr_mod, c_mod, x1, x2) + + +def cmp_lt(a, b): + """Compare two values (less than) + + Args: + a (Union[:class:`~taichi.lang.expr.Expr`, :class:`~taichi.lang.matrix.Matrix`]): value LHS + b (Union[:class:`~taichi.lang.expr.Expr`, :class:`~taichi.lang.matrix.Matrix`]): value RHS + + Returns: + Union[:class:`~taichi.lang.expr.Expr`, bool]: True if LHS is strictly smaller than RHS, False otherwise + + """ + return _binary_operation(_ti_core.expr_cmp_lt, _bt_ops_mod.lt, a, b) + + +def cmp_le(a, b): + """Compare two values (less than or equal to) + + Args: + a (Union[:class:`~taichi.lang.expr.Expr`, :class:`~taichi.lang.matrix.Matrix`]): value LHS + b (Union[:class:`~taichi.lang.expr.Expr`, :class:`~taichi.lang.matrix.Matrix`]): value RHS + + Returns: + Union[:class:`~taichi.lang.expr.Expr`, bool]: True if LHS is smaller than or equal to RHS, False otherwise + + """ + return _binary_operation(_ti_core.expr_cmp_le, _bt_ops_mod.le, a, b) + + +def cmp_gt(a, b): + """Compare two values (greater than) + + Args: + a (Union[:class:`~taichi.lang.expr.Expr`, :class:`~taichi.lang.matrix.Matrix`]): value LHS + b (Union[:class:`~taichi.lang.expr.Expr`, :class:`~taichi.lang.matrix.Matrix`]): value RHS + + Returns: + Union[:class:`~taichi.lang.expr.Expr`, bool]: True if LHS is strictly larger than RHS, False otherwise + + """ + return _binary_operation(_ti_core.expr_cmp_gt, _bt_ops_mod.gt, a, b) + + +def cmp_ge(a, b): + """Compare two values (greater than or equal to) + + Args: + a (Union[:class:`~taichi.lang.expr.Expr`, :class:`~taichi.lang.matrix.Matrix`]): value LHS + b (Union[:class:`~taichi.lang.expr.Expr`, :class:`~taichi.lang.matrix.Matrix`]): value RHS + + Returns: + bool: True if LHS is greater than or equal to RHS, False otherwise + + """ + return _binary_operation(_ti_core.expr_cmp_ge, _bt_ops_mod.ge, a, b) + + +def cmp_eq(a, b): + """Compare two values (equal to) + + Args: + a (Union[:class:`~taichi.lang.expr.Expr`, :class:`~taichi.lang.matrix.Matrix`]): value LHS + b (Union[:class:`~taichi.lang.expr.Expr`, :class:`~taichi.lang.matrix.Matrix`]): value RHS + + Returns: + Union[:class:`~taichi.lang.expr.Expr`, bool]: True if LHS is equal to RHS, False otherwise. + + """ + return _binary_operation(_ti_core.expr_cmp_eq, _bt_ops_mod.eq, a, b) + + +def cmp_ne(a, b): + """Compare two values (not equal to) + + Args: + a (Union[:class:`~taichi.lang.expr.Expr`, :class:`~taichi.lang.matrix.Matrix`]): value LHS + b (Union[:class:`~taichi.lang.expr.Expr`, :class:`~taichi.lang.matrix.Matrix`]): value RHS + + Returns: + Union[:class:`~taichi.lang.expr.Expr`, bool]: True if LHS is not equal to RHS, False otherwise + + """ + return _binary_operation(_ti_core.expr_cmp_ne, _bt_ops_mod.ne, a, b) + + +def bit_or(a, b): + """Computes bitwise-or + + Args: + a (Union[:class:`~taichi.lang.expr.Expr`, :class:`~taichi.lang.matrix.Matrix`]): value LHS + b (Union[:class:`~taichi.lang.expr.Expr`, :class:`~taichi.lang.matrix.Matrix`]): value RHS + + Returns: + Union[:class:`~taichi.lang.expr.Expr`, bool]: LHS bitwise-or with RHS + + """ + return _binary_operation(_ti_core.expr_bit_or, _bt_ops_mod.or_, a, b) + + +def bit_and(a, b): + """Compute bitwise-and + + Args: + a (Union[:class:`~taichi.lang.expr.Expr`, :class:`~taichi.lang.matrix.Matrix`]): value LHS + b (Union[:class:`~taichi.lang.expr.Expr`, :class:`~taichi.lang.matrix.Matrix`]): value RHS + + Returns: + Union[:class:`~taichi.lang.expr.Expr`, bool]: LHS bitwise-and with RHS + + """ + return _binary_operation(_ti_core.expr_bit_and, _bt_ops_mod.and_, a, b) + + +def bit_xor(a, b): + """Compute bitwise-xor + + Args: + a (Union[:class:`~taichi.lang.expr.Expr`, :class:`~taichi.lang.matrix.Matrix`]): value LHS + b (Union[:class:`~taichi.lang.expr.Expr`, :class:`~taichi.lang.matrix.Matrix`]): value RHS + + Returns: + Union[:class:`~taichi.lang.expr.Expr`, bool]: LHS bitwise-xor with RHS + + """ + return _binary_operation(_ti_core.expr_bit_xor, _bt_ops_mod.xor, a, b) + + +def bit_shl(a, b): + """Compute bitwise shift left + + Args: + a (Union[:class:`~taichi.lang.expr.Expr`, :class:`~taichi.lang.matrix.Matrix`]): value LHS + b (Union[:class:`~taichi.lang.expr.Expr`, :class:`~taichi.lang.matrix.Matrix`]): value RHS + + Returns: + Union[:class:`~taichi.lang.expr.Expr`, int]: LHS << RHS + + """ + return _binary_operation(_ti_core.expr_bit_shl, _bt_ops_mod.lshift, a, b) + + +def bit_sar(a, b): + """Compute bitwise shift right + + Args: + a (Union[:class:`~taichi.lang.expr.Expr`, :class:`~taichi.lang.matrix.Matrix`]): value LHS + b (Union[:class:`~taichi.lang.expr.Expr`, :class:`~taichi.lang.matrix.Matrix`]): value RHS + + Returns: + Union[:class:`~taichi.lang.expr.Expr`, int]: LHS >> RHS + + """ + return _binary_operation(_ti_core.expr_bit_sar, _bt_ops_mod.rshift, a, b) + + +@taichi_scope +def bit_shr(x1, x2): + """Elements in `x1` shifted to the right by number of bits in `x2`. + Both `x1`, `x2` must have integer type. + + Args: + x1 (Union[:mod:`~taichi.types.primitive_types`, :class:`~taichi.Matrix`]): \ + Input data. + x2 (Union[:mod:`~taichi.types.primitive_types`, :class:`~taichi.Matrix`]): \ + Number of bits to remove at the right of `x1`. + + Returns: + Return `x1` with bits shifted `x2` times to the right. + This is a scalar if both `x1` and `x2` are scalars. + + Example:: + >>> @ti.kernel + >>> def main(): + >>> x = ti.Matrix([7, 8]) + >>> y = ti.Matrix([1, 2]) + >>> print(ti.bit_shr(x, y)) + >>> + >>> main() + [3, 2] + """ + return _binary_operation(_ti_core.expr_bit_shr, _bt_ops_mod.rshift, x1, x2) + + +def logical_and(a, b): + """Compute logical_and + + Args: + a (Union[:class:`~taichi.lang.expr.Expr`, :class:`~taichi.lang.matrix.Matrix`]): value LHS + b (Union[:class:`~taichi.lang.expr.Expr`, :class:`~taichi.lang.matrix.Matrix`]): value RHS + + Returns: + Union[:class:`~taichi.lang.expr.Expr`, bool]: LHS logical-and RHS (with short-circuit semantics) + + """ + return _binary_operation(_ti_core.expr_logical_and, lambda a, b: a and b, a, b) + + +def logical_or(a, b): + """Compute logical_or + + Args: + a (Union[:class:`~taichi.lang.expr.Expr`, :class:`~taichi.lang.matrix.Matrix`]): value LHS + b (Union[:class:`~taichi.lang.expr.Expr`, :class:`~taichi.lang.matrix.Matrix`]): value RHS + + Returns: + Union[:class:`~taichi.lang.expr.Expr`, bool]: LHS logical-or RHS (with short-circuit semantics) + + """ + return _binary_operation(_ti_core.expr_logical_or, lambda a, b: a or b, a, b) + + +def select(cond, x1, x2): + """Return an array drawn from elements in `x1` or `x2`, + depending on the conditions in `cond`. + + Args: + cond (Union[:mod:`~taichi.types.primitive_types`, :class:`~taichi.Matrix`]): \ + The array of conditions. + x1, x2 (Union[:mod:`~taichi.types.primitive_types`, :class:`~taichi.Matrix`]): \ + The arrays where the output elements are taken from. + + Returns: + The output at position `k` is the k-th element of `x1` if the k-th element + in `cond` is `True`, otherwise it's the k-th element of `x2`. + + Example:: + + >>> @ti.kernel + >>> def main(): + >>> cond = ti.Matrix([0, 1, 0, 1]) + >>> x = ti.Matrix([1, 2, 3, 4]) + >>> y = ti.Matrix([-1, -2, -3, -4]) + >>> print(ti.select(cond, x, y)) + >>> + >>> main() + [-1, 2, -3, 4] + """ + # TODO: systematically resolve `-1 = True` problem by introducing u1: + cond = logical_not(logical_not(cond)) + + def py_select(cond, x1, x2): + return x1 * cond + x2 * (1 - cond) + + return _ternary_operation(_ti_core.expr_select, py_select, cond, x1, x2) + + +def ifte(cond, x1, x2): + """Evaluate and return `x1` if `cond` is true; otherwise evaluate and return `x2`. This operator guarantees + short-circuit semantics: exactly one of `x1` or `x2` will be evaluated. + + Args: + cond (:mod:`~taichi.types.primitive_types`): \ + The condition. + x1, x2 (:mod:`~taichi.types.primitive_types`): \ + The outputs. + + Returns: + `x1` if `cond` is true and `x2` otherwise. + """ + # TODO: systematically resolve `-1 = True` problem by introducing u1: + cond = logical_not(logical_not(cond)) + + def py_ifte(cond, x1, x2): + return x1 if cond else x2 + + return _ternary_operation(_ti_core.expr_ifte, py_ifte, cond, x1, x2) + + +def clz(a): + """Count the number of leading zeros for a 32bit integer""" + + def _clz(x): + for i in range(32): + if 2**i > x: + return 32 - i + return 0 + + return _unary_operation(_ti_core.expr_clz, _clz, a) + + +@writeback_binary +def atomic_add(x, y): + """Atomically compute `x + y`, store the result in `x`, + and return the old value of `x`. + + `x` must be a writable target, constant expressions or scalars + are not allowed. + + Args: + x, y (Union[:mod:`~taichi.types.primitive_types`, :class:`~taichi.Matrix`]): \ + The input. + + Returns: + The old value of `x`. + + Example:: + + >>> @ti.kernel + >>> def test(): + >>> x = ti.Vector([0, 0, 0]) + >>> y = ti.Vector([1, 2, 3]) + >>> z = ti.atomic_add(x, y) + >>> print(x) # [1, 2, 3] the new value of x + >>> print(z) # [0, 0, 0], the old value of x + >>> + >>> ti.atomic_add(1, x) # will raise TaichiSyntaxError + """ + return impl.expr_init(expr.Expr(_ti_core.expr_atomic_add(x.ptr, y.ptr), dbg_info=_ti_core.DebugInfo(stack_info()))) + + +@writeback_binary +def atomic_mul(x, y): + """Atomically compute `x * y`, store the result in `x`, + and return the old value of `x`. + + `x` must be a writable target, constant expressions or scalars + are not allowed. + + Args: + x, y (Union[:mod:`~taichi.types.primitive_types`, :class:`~taichi.Matrix`]): \ + The input. + + Returns: + The old value of `x`. + + Example:: + + >>> @ti.kernel + >>> def test(): + >>> x = ti.Vector([1, 2, 3]) + >>> y = ti.Vector([4, 5, 6]) + >>> z = ti.atomic_mul(x, y) + >>> print(x) # [1, 2, 3] the new value of x + >>> print(z) # [4, 10, 18], the old value of x + >>> + >>> ti.atomic_mul(1, x) # will raise TaichiSyntaxError + """ + return impl.expr_init(expr.Expr(_ti_core.expr_atomic_mul(x.ptr, y.ptr), dbg_info=_ti_core.DebugInfo(stack_info()))) + + +@writeback_binary +def atomic_sub(x, y): + """Atomically subtract `x` by `y`, store the result in `x`, + and return the old value of `x`. + + `x` must be a writable target, constant expressions or scalars + are not allowed. + + Args: + x, y (Union[:mod:`~taichi.types.primitive_types`, :class:`~taichi.Matrix`]): \ + The input. + + Returns: + The old value of `x`. + + Example:: + + >>> @ti.kernel + >>> def test(): + >>> x = ti.Vector([0, 0, 0]) + >>> y = ti.Vector([1, 2, 3]) + >>> z = ti.atomic_sub(x, y) + >>> print(x) # [-1, -2, -3] the new value of x + >>> print(z) # [0, 0, 0], the old value of x + >>> + >>> ti.atomic_sub(1, x) # will raise TaichiSyntaxError + """ + return impl.expr_init(expr.Expr(_ti_core.expr_atomic_sub(x.ptr, y.ptr), dbg_info=_ti_core.DebugInfo(stack_info()))) + + +@writeback_binary +def atomic_min(x, y): + """Atomically compute the minimum of `x` and `y`, element-wise. + Store the result in `x`, and return the old value of `x`. + + `x` must be a writable target, constant expressions or scalars + are not allowed. + + Args: + x, y (Union[:mod:`~taichi.types.primitive_types`, :class:`~taichi.Matrix`]): \ + The input. + + Returns: + The old value of `x`. + + Example:: + + >>> @ti.kernel + >>> def test(): + >>> x = 2 + >>> y = 1 + >>> z = ti.atomic_min(x, y) + >>> print(x) # 1 the new value of x + >>> print(z) # 2, the old value of x + >>> + >>> ti.atomic_min(1, x) # will raise TaichiSyntaxError + """ + return impl.expr_init(expr.Expr(_ti_core.expr_atomic_min(x.ptr, y.ptr), dbg_info=_ti_core.DebugInfo(stack_info()))) + + +@writeback_binary +def atomic_max(x, y): + """Atomically compute the maximum of `x` and `y`, element-wise. + Store the result in `x`, and return the old value of `x`. + + `x` must be a writable target, constant expressions or scalars + are not allowed. + + Args: + x, y (Union[:mod:`~taichi.types.primitive_types`, :class:`~taichi.Matrix`]): \ + The input. + + Returns: + The old value of `x`. + + Example:: + + >>> @ti.kernel + >>> def test(): + >>> x = 1 + >>> y = 2 + >>> z = ti.atomic_max(x, y) + >>> print(x) # 2 the new value of x + >>> print(z) # 1, the old value of x + >>> + >>> ti.atomic_max(1, x) # will raise TaichiSyntaxError + """ + return impl.expr_init(expr.Expr(_ti_core.expr_atomic_max(x.ptr, y.ptr), dbg_info=_ti_core.DebugInfo(stack_info()))) + + +@writeback_binary +def atomic_and(x, y): + """Atomically compute the bit-wise AND of `x` and `y`, element-wise. + Store the result in `x`, and return the old value of `x`. + + `x` must be a writable target, constant expressions or scalars + are not allowed. + + Args: + x, y (Union[:mod:`~taichi.types.primitive_types`, :class:`~taichi.Matrix`]): \ + The input. When both are matrices they must have the same shape. + + Returns: + The old value of `x`. + + Example:: + + >>> @ti.kernel + >>> def test(): + >>> x = ti.Vector([-1, 0, 1]) + >>> y = ti.Vector([1, 2, 3]) + >>> z = ti.atomic_and(x, y) + >>> print(x) # [1, 0, 1] the new value of x + >>> print(z) # [-1, 0, 1], the old value of x + >>> + >>> ti.atomic_and(1, x) # will raise TaichiSyntaxError + """ + return impl.expr_init( + expr.Expr(_ti_core.expr_atomic_bit_and(x.ptr, y.ptr), dbg_info=_ti_core.DebugInfo(stack_info())) + ) + + +@writeback_binary +def atomic_or(x, y): + """Atomically compute the bit-wise OR of `x` and `y`, element-wise. + Store the result in `x`, and return the old value of `x`. + + `x` must be a writable target, constant expressions or scalars + are not allowed. + + Args: + x, y (Union[:mod:`~taichi.types.primitive_types`, :class:`~taichi.Matrix`]): \ + The input. When both are matrices they must have the same shape. + + Returns: + The old value of `x`. + + Example:: + + >>> @ti.kernel + >>> def test(): + >>> x = ti.Vector([-1, 0, 1]) + >>> y = ti.Vector([1, 2, 3]) + >>> z = ti.atomic_or(x, y) + >>> print(x) # [-1, 2, 3] the new value of x + >>> print(z) # [-1, 0, 1], the old value of x + >>> + >>> ti.atomic_or(1, x) # will raise TaichiSyntaxError + """ + return impl.expr_init( + expr.Expr(_ti_core.expr_atomic_bit_or(x.ptr, y.ptr), dbg_info=_ti_core.DebugInfo(stack_info())) + ) + + +@writeback_binary +def atomic_xor(x, y): + """Atomically compute the bit-wise XOR of `x` and `y`, element-wise. + Store the result in `x`, and return the old value of `x`. + + `x` must be a writable target, constant expressions or scalars + are not allowed. + + Args: + x, y (Union[:mod:`~taichi.types.primitive_types`, :class:`~taichi.Matrix`]): \ + The input. When both are matrices they must have the same shape. + + Returns: + The old value of `x`. + + Example:: + + >>> @ti.kernel + >>> def test(): + >>> x = ti.Vector([-1, 0, 1]) + >>> y = ti.Vector([1, 2, 3]) + >>> z = ti.atomic_xor(x, y) + >>> print(x) # [-2, 2, 2] the new value of x + >>> print(z) # [-1, 0, 1], the old value of x + >>> + >>> ti.atomic_xor(1, x) # will raise TaichiSyntaxError + """ + return impl.expr_init( + expr.Expr(_ti_core.expr_atomic_bit_xor(x.ptr, y.ptr), dbg_info=_ti_core.DebugInfo(stack_info())) + ) + + +@writeback_binary +def assign(a, b): + impl.get_runtime().compiling_callable.ast_builder().expr_assign(a.ptr, b.ptr, _ti_core.DebugInfo(stack_info())) + return a + + +def max(*args): # pylint: disable=W0622 + """Compute the maximum of the arguments, element-wise. + + This function takes no effect on a single argument, even it's array-like. + When there are both scalar and matrix arguments in `args`, the matrices + must have the same shape, and scalars will be broadcasted to the same shape as the matrix. + + Args: + args: (List[:mod:`~taichi.types.primitive_types`, :class:`~taichi.Matrix`]): \ + The input. + + Returns: + Maximum of the inputs. + + Example:: + + >>> @ti.kernel + >>> def foo(): + >>> x = ti.Vector([0, 1, 2]) + >>> y = ti.Vector([3, 4, 5]) + >>> z = ti.max(x, y, 4) + >>> print(z) # [4, 4, 5] + """ + num_args = len(args) + assert num_args >= 1 + if num_args == 1: + return args[0] + if num_args == 2: + return max_impl(args[0], args[1]) + return max_impl(args[0], max(*args[1:])) + + +def min(*args): # pylint: disable=W0622 + """Compute the minimum of the arguments, element-wise. + + This function takes no effect on a single argument, even it's array-like. + When there are both scalar and matrix arguments in `args`, the matrices + must have the same shape, and scalars will be broadcasted to the same shape as the matrix. + + Args: + args: (List[:mod:`~taichi.types.primitive_types`, :class:`~taichi.Matrix`]): \ + The input. + + Returns: + Minimum of the inputs. + + Example:: + + >>> @ti.kernel + >>> def foo(): + >>> x = ti.Vector([0, 1, 2]) + >>> y = ti.Vector([3, 4, 5]) + >>> z = ti.min(x, y, 1) + >>> print(z) # [0, 1, 1] + """ + num_args = len(args) + assert num_args >= 1 + if num_args == 1: + return args[0] + if num_args == 2: + return min_impl(args[0], args[1]) + return min_impl(args[0], min(*args[1:])) + + +__all__ = [ + "acos", + "asin", + "atan2", + "atomic_and", + "atomic_or", + "atomic_xor", + "atomic_max", + "atomic_sub", + "atomic_min", + "atomic_add", + "atomic_mul", + "bit_cast", + "bit_shr", + "cast", + "ceil", + "cos", + "exp", + "floor", + "frexp", + "log", + "random", + "raw_mod", + "raw_div", + "round", + "rsqrt", + "sin", + "sqrt", + "tan", + "tanh", + "max", + "min", + "select", + "abs", + "pow", +] diff --git a/local_packages/taichi/lang/runtime_ops.py b/local_packages/taichi/lang/runtime_ops.py new file mode 100644 index 0000000..a21b85f --- /dev/null +++ b/local_packages/taichi/lang/runtime_ops.py @@ -0,0 +1,11 @@ +from taichi.lang import impl + + +def sync(): + """Blocks the calling thread until all the previously + launched Taichi kernels have completed. + """ + impl.get_runtime().sync() + + +__all__ = ["sync"] diff --git a/local_packages/taichi/lang/shell.py b/local_packages/taichi/lang/shell.py new file mode 100644 index 0000000..0786178 --- /dev/null +++ b/local_packages/taichi/lang/shell.py @@ -0,0 +1,33 @@ +import functools +import os +import sys + +from taichi._lib import core as _ti_core +from taichi._logging import info + +pybuf_enabled = False +_env_enable_pybuf = os.environ.get("TI_ENABLE_PYBUF", "1") +if not _env_enable_pybuf or int(_env_enable_pybuf): + # When using in Jupyter / IDLE, the sys.stdout will be their wrapped ones. + # While sys.__stdout__ should always be the raw console stdout. + pybuf_enabled = sys.stdout is not sys.__stdout__ + +_ti_core.toggle_python_print_buffer(pybuf_enabled) + + +def _shell_pop_print(old_call): + if not pybuf_enabled: + # zero-overhead! + return old_call + + info("Graphical python shell detected, using wrapped sys.stdout") + + @functools.wraps(old_call) + def new_call(*args, **kwargs): + ret = old_call(*args, **kwargs) + # print's in kernel won't take effect until ti.sync(), discussion: + # https://github.com/taichi-dev/taichi/pull/1303#discussion_r444897102 + print(_ti_core.pop_python_print_buffer(), end="") + return ret + + return new_call diff --git a/local_packages/taichi/lang/simt/__init__.py b/local_packages/taichi/lang/simt/__init__.py new file mode 100644 index 0000000..0ebf1c4 --- /dev/null +++ b/local_packages/taichi/lang/simt/__init__.py @@ -0,0 +1,3 @@ +from taichi.lang.simt import block, grid, subgroup, warp + +__all__ = ["warp", "subgroup", "block", "grid"] diff --git a/local_packages/taichi/lang/simt/block.py b/local_packages/taichi/lang/simt/block.py new file mode 100644 index 0000000..6b657f2 --- /dev/null +++ b/local_packages/taichi/lang/simt/block.py @@ -0,0 +1,92 @@ +from taichi._lib import core as _ti_core +from taichi.lang import impl +from taichi.lang.expr import make_expr_group +from taichi.lang.util import taichi_scope + + +def arch_uses_spv(arch): + return arch == _ti_core.vulkan or arch == _ti_core.metal or arch == _ti_core.opengl or arch == _ti_core.dx11 + + +def sync(): + arch = impl.get_runtime().prog.config().arch + if arch == _ti_core.cuda or arch == _ti_core.amdgpu: + return impl.call_internal("block_barrier", with_runtime_context=False) + if arch_uses_spv(arch): + return impl.call_internal("workgroupBarrier", with_runtime_context=False) + raise ValueError(f"ti.block.shared_array is not supported for arch {arch}") + + +def sync_all_nonzero(predicate): + arch = impl.get_runtime().prog.config().arch + if arch == _ti_core.cuda: + return impl.call_internal("block_barrier_and_i32", predicate, with_runtime_context=False) + raise ValueError(f"ti.block.sync_all_nonzero is not supported for arch {arch}") + + +def sync_any_nonzero(predicate): + arch = impl.get_runtime().prog.config().arch + if arch == _ti_core.cuda: + return impl.call_internal("block_barrier_or_i32", predicate, with_runtime_context=False) + raise ValueError(f"ti.block.sync_any_nonzero is not supported for arch {arch}") + + +def sync_count_nonzero(predicate): + arch = impl.get_runtime().prog.config().arch + if arch == _ti_core.cuda: + return impl.call_internal("block_barrier_count_i32", predicate, with_runtime_context=False) + raise ValueError(f"ti.block.sync_count_nonzero is not supported for arch {arch}") + + +def mem_sync(): + arch = impl.get_runtime().prog.config().arch + if arch == _ti_core.cuda: + return impl.call_internal("block_barrier", with_runtime_context=False) + if arch_uses_spv(arch): + return impl.call_internal("workgroupMemoryBarrier", with_runtime_context=False) + raise ValueError(f"ti.block.mem_sync is not supported for arch {arch}") + + +def thread_idx(): + arch = impl.get_runtime().prog.config().arch + if arch_uses_spv(arch): + return impl.call_internal("localInvocationId", with_runtime_context=False) + raise ValueError(f"ti.block.thread_idx is not supported for arch {arch}") + + +def global_thread_idx(): + arch = impl.get_runtime().prog.config().arch + if arch == _ti_core.cuda or _ti_core.amdgpu: + return impl.get_runtime().compiling_callable.ast_builder().insert_thread_idx_expr() + if arch_uses_spv(arch): + return impl.call_internal("globalInvocationId", with_runtime_context=False) + raise ValueError(f"ti.block.global_thread_idx is not supported for arch {arch}") + + +class SharedArray: + _is_taichi_class = True + + def __init__(self, shape, dtype): + if isinstance(shape, int): + self.shape = (shape,) + elif (isinstance(shape, tuple) or isinstance(shape, list)) and all(isinstance(s, int) for s in shape): + self.shape = shape + else: + raise ValueError( + f"ti.simt.block.shared_array shape must be an integer or a tuple of integers, but got {shape}" + ) + if isinstance(dtype, impl.MatrixType): + dtype = dtype.tensor_type + self.dtype = dtype + self.shared_array_proxy = impl.expr_init_shared_array(self.shape, dtype) + + @taichi_scope + def subscript(self, *indices): + ast_builder = impl.get_runtime().compiling_callable.ast_builder() + return impl.Expr( + ast_builder.expr_subscript( + self.shared_array_proxy, + make_expr_group(*indices), + _ti_core.DebugInfo(impl.get_runtime().get_current_src_info()), + ) + ) diff --git a/local_packages/taichi/lang/simt/grid.py b/local_packages/taichi/lang/simt/grid.py new file mode 100644 index 0000000..62bf5e3 --- /dev/null +++ b/local_packages/taichi/lang/simt/grid.py @@ -0,0 +1,5 @@ +from taichi.lang import impl + + +def memfence(): + return impl.call_internal("grid_memfence", with_runtime_context=False) diff --git a/local_packages/taichi/lang/simt/subgroup.py b/local_packages/taichi/lang/simt/subgroup.py new file mode 100644 index 0000000..07c8eaa --- /dev/null +++ b/local_packages/taichi/lang/simt/subgroup.py @@ -0,0 +1,189 @@ +from taichi.lang import impl + + +def barrier(): + return impl.call_internal("subgroupBarrier", with_runtime_context=False) + + +def memory_barrier(): + return impl.call_internal("subgroupMemoryBarrier", with_runtime_context=False) + + +def elect(): + return impl.call_internal("subgroupElect", with_runtime_context=False) + + +def all_true(cond): + # TODO + pass + + +def any_true(cond): + # TODO + pass + + +def all_equal(value): + # TODO + pass + + +def broadcast_first(value): + # TODO + pass + + +def broadcast(value, index): + return impl.call_internal("subgroupBroadcast", value, index, with_runtime_context=False) + + +def group_size(): + return impl.call_internal("subgroupSize", with_runtime_context=False) + + +def invocation_id(): + return impl.call_internal("subgroupInvocationId", with_runtime_context=False) + + +def reduce_add(value): + return impl.call_internal("subgroupAdd", value, with_runtime_context=False) + + +def reduce_mul(value): + return impl.call_internal("subgroupMul", value, with_runtime_context=False) + + +def reduce_min(value): + return impl.call_internal("subgroupMin", value, with_runtime_context=False) + + +def reduce_max(value): + return impl.call_internal("subgroupMax", value, with_runtime_context=False) + + +def reduce_and(value): + return impl.call_internal("subgroupAnd", value, with_runtime_context=False) + + +def reduce_or(value): + return impl.call_internal("subgroupOr", value, with_runtime_context=False) + + +def reduce_xor(value): + return impl.call_internal("subgroupXor", value, with_runtime_context=False) + + +def inclusive_add(value): + return impl.call_internal("subgroupInclusiveAdd", value, with_runtime_context=False) + + +def inclusive_mul(value): + return impl.call_internal("subgroupInclusiveMul", value, with_runtime_context=False) + + +def inclusive_min(value): + return impl.call_internal("subgroupInclusiveMin", value, with_runtime_context=False) + + +def inclusive_max(value): + return impl.call_internal("subgroupInclusiveMax", value, with_runtime_context=False) + + +def inclusive_and(value): + return impl.call_internal("subgroupInclusiveAnd", value, with_runtime_context=False) + + +def inclusive_or(value): + return impl.call_internal("subgroupInclusiveOr", value, with_runtime_context=False) + + +def inclusive_xor(value): + return impl.call_internal("subgroupInclusiveXor", value, with_runtime_context=False) + + +def exclusive_add(value): + # TODO + pass + + +def exclusive_mul(value): + # TODO + pass + + +def exclusive_min(value): + # TODO + pass + + +def exclusive_max(value): + # TODO + pass + + +def exclusive_and(value): + # TODO + pass + + +def exclusive_or(value): + # TODO + pass + + +def exclusive_xor(value): + # TODO + pass + + +def shuffle(value, index): + return impl.call_internal("subgroupShuffle", value, index, with_runtime_context=False) + + +def shuffle_xor(value, mask): + # TODO + pass + + +def shuffle_up(value, offset): + return impl.call_internal("subgroupShuffleUp", value, offset, with_runtime_context=False) + + +def shuffle_down(value, offset): + return impl.call_internal("subgroupShuffleDown", value, offset, with_runtime_context=False) + + +__all__ = [ + "barrier", + "memory_barrier", + "elect", + "all_true", + "any_true", + "all_equal", + "broadcast_first", + "reduce_add", + "reduce_mul", + "reduce_min", + "reduce_max", + "reduce_and", + "reduce_or", + "reduce_xor", + "inclusive_add", + "inclusive_mul", + "inclusive_min", + "inclusive_max", + "inclusive_and", + "inclusive_or", + "inclusive_xor", + "exclusive_add", + "exclusive_mul", + "exclusive_min", + "exclusive_max", + "exclusive_and", + "exclusive_or", + "exclusive_xor", + "shuffle", + "shuffle_xor", + "shuffle_up", + "shuffle_down", +] diff --git a/local_packages/taichi/lang/simt/warp.py b/local_packages/taichi/lang/simt/warp.py new file mode 100644 index 0000000..9822cb5 --- /dev/null +++ b/local_packages/taichi/lang/simt/warp.py @@ -0,0 +1,94 @@ +from taichi.lang import impl + + +def all_nonzero(mask, predicate): + return impl.call_internal("cuda_all_sync_i32", mask, predicate, with_runtime_context=False) + + +def any_nonzero(mask, predicate): + return impl.call_internal("cuda_any_sync_i32", mask, predicate, with_runtime_context=False) + + +def unique(mask, predicate): + return impl.call_internal("cuda_uni_sync_i32", mask, predicate, with_runtime_context=False) + + +def ballot(predicate): + return impl.call_internal("cuda_ballot_i32", predicate, with_runtime_context=False) + + +def shfl_sync_i32(mask, val, offset): + # lane offset is 31 for warp size 32 + return impl.call_internal("cuda_shfl_sync_i32", mask, val, offset, 31, with_runtime_context=False) + + +def shfl_sync_f32(mask, val, offset): + # lane offset is 31 for warp size 32 + return impl.call_internal("cuda_shfl_sync_f32", mask, val, offset, 31, with_runtime_context=False) + + +def shfl_up_i32(mask, val, offset): + # lane offset is 0 for warp size 32 + return impl.call_internal("cuda_shfl_up_sync_i32", mask, val, offset, 0, with_runtime_context=False) + + +def shfl_up_f32(mask, val, offset): + # lane offset is 0 for warp size 32 + return impl.call_internal("cuda_shfl_up_sync_f32", mask, val, offset, 0, with_runtime_context=False) + + +def shfl_down_i32(mask, val, offset): + # lane offset is 31 for warp size 32 + return impl.call_internal("cuda_shfl_down_sync_i32", mask, val, offset, 31, with_runtime_context=False) + + +def shfl_down_f32(mask, val, offset): + # lane offset is 31 for warp size 32 + return impl.call_internal("cuda_shfl_down_sync_f32", mask, val, offset, 31, with_runtime_context=False) + + +def shfl_xor_i32(mask, val, offset): + return impl.call_internal("cuda_shfl_xor_sync_i32", mask, val, offset, 31, with_runtime_context=False) + + +def match_any(mask, value): + # These intrinsics are only available on compute_70 or higher + # https://docs.nvidia.com/cuda/pdf/NVVM_IR_Specification.pdf + if impl.get_cuda_compute_capability() < 70: + raise AssertionError("match_any intrinsic only available on compute_70 or higher") + return impl.call_internal("cuda_match_any_sync_i32", mask, value, with_runtime_context=False) + + +def match_all(mask, val): + # These intrinsics are only available on compute_70 or higher + # https://docs.nvidia.com/cuda/pdf/NVVM_IR_Specification.pdf + if impl.get_cuda_compute_capability() < 70: + raise AssertionError("match_all intrinsic only available on compute_70 or higher") + return impl.call_internal("cuda_match_all_sync_i32", mask, val, with_runtime_context=False) + + +def active_mask(): + return impl.call_internal("cuda_active_mask", with_runtime_context=False) + + +def sync(mask): + return impl.call_internal("warp_barrier", mask, with_runtime_context=False) + + +__all__ = [ + "all_nonzero", + "any_nonzero", + "unique", + "ballot", + "shfl_sync_i32", + "shfl_sync_f32", + "shfl_up_i32", + "shfl_up_f32", + "shfl_down_i32", + "shfl_down_f32", + "shfl_xor_i32", + "match_any", + "match_all", + "active_mask", + "sync", +] diff --git a/local_packages/taichi/lang/snode.py b/local_packages/taichi/lang/snode.py new file mode 100644 index 0000000..152c918 --- /dev/null +++ b/local_packages/taichi/lang/snode.py @@ -0,0 +1,483 @@ +import numbers + +from taichi._lib import core as _ti_core +from taichi.lang import expr, impl, matrix +from taichi.lang.exception import TaichiRuntimeError +from taichi.lang.field import BitpackedFields, Field +from taichi.lang.util import get_traceback + + +class SNode: + """A Python-side SNode wrapper. + + For more information on Taichi's SNode system, please check out + these references: + + * https://docs.taichi-lang.org/docs/sparse + * https://yuanming.taichi.graphics/publication/2019-taichi/taichi-lang.pdf + + Arg: + ptr (pointer): The C++ side SNode pointer. + """ + + def __init__(self, ptr): + self.ptr = ptr + + def dense(self, axes, dimensions): + """Adds a dense SNode as a child component of `self`. + + Args: + axes (List[Axis]): Axes to activate. + dimensions (Union[List[int], int]): Shape of each axis. + + Returns: + The added :class:`~taichi.lang.SNode` instance. + """ + if isinstance(dimensions, numbers.Number): + dimensions = [dimensions] * len(axes) + return SNode(self.ptr.dense(axes, dimensions, _ti_core.DebugInfo(get_traceback()))) + + def pointer(self, axes, dimensions): + """Adds a pointer SNode as a child component of `self`. + + Args: + axes (List[Axis]): Axes to activate. + dimensions (Union[List[int], int]): Shape of each axis. + + Returns: + The added :class:`~taichi.lang.SNode` instance. + """ + if not _ti_core.is_extension_supported(impl.current_cfg().arch, _ti_core.Extension.sparse): + raise TaichiRuntimeError("Pointer SNode is not supported on this backend.") + if isinstance(dimensions, numbers.Number): + dimensions = [dimensions] * len(axes) + return SNode(self.ptr.pointer(axes, dimensions, _ti_core.DebugInfo(get_traceback()))) + + @staticmethod + def _hash(axes, dimensions): + # original code is #def hash(self,axes, dimensions) without #@staticmethod before fix pylint R0201 + """Not supported.""" + raise RuntimeError("hash not yet supported") + # if isinstance(dimensions, int): + # dimensions = [dimensions] * len(axes) + # return SNode(self.ptr.hash(axes, dimensions)) + + def dynamic(self, axis, dimension, chunk_size=None): + """Adds a dynamic SNode as a child component of `self`. + + Args: + axis (List[Axis]): Axis to activate, must be 1. + dimension (int): Shape of the axis. + chunk_size (int): Chunk size. + + Returns: + The added :class:`~taichi.lang.SNode` instance. + """ + if not _ti_core.is_extension_supported(impl.current_cfg().arch, _ti_core.Extension.sparse): + raise TaichiRuntimeError("Dynamic SNode is not supported on this backend.") + assert len(axis) == 1 + if chunk_size is None: + chunk_size = dimension + return SNode(self.ptr.dynamic(axis[0], dimension, chunk_size, _ti_core.DebugInfo(get_traceback()))) + + def bitmasked(self, axes, dimensions): + """Adds a bitmasked SNode as a child component of `self`. + + Args: + axes (List[Axis]): Axes to activate. + dimensions (Union[List[int], int]): Shape of each axis. + + Returns: + The added :class:`~taichi.lang.SNode` instance. + """ + if not _ti_core.is_extension_supported(impl.current_cfg().arch, _ti_core.Extension.sparse): + raise TaichiRuntimeError("Bitmasked SNode is not supported on this backend.") + if isinstance(dimensions, numbers.Number): + dimensions = [dimensions] * len(axes) + return SNode(self.ptr.bitmasked(axes, dimensions, _ti_core.DebugInfo(get_traceback()))) + + def quant_array(self, axes, dimensions, max_num_bits): + """Adds a quant_array SNode as a child component of `self`. + + Args: + axes (List[Axis]): Axes to activate. + dimensions (Union[List[int], int]): Shape of each axis. + max_num_bits (int): Maximum number of bits it can hold. + + Returns: + The added :class:`~taichi.lang.SNode` instance. + """ + if isinstance(dimensions, numbers.Number): + dimensions = [dimensions] * len(axes) + return SNode(self.ptr.quant_array(axes, dimensions, max_num_bits, _ti_core.DebugInfo(get_traceback()))) + + def place(self, *args, offset=None): + """Places a list of Taichi fields under the `self` container. + + Args: + *args (List[ti.field]): A list of Taichi fields to place. + offset (Union[Number, tuple[Number]]): Offset of the field domain. + + Returns: + The `self` container. + """ + if offset is None: + offset = () + if isinstance(offset, numbers.Number): + offset = (offset,) + + for arg in args: + if isinstance(arg, BitpackedFields): + bit_struct_type = arg.bit_struct_type_builder.build() + bit_struct_snode = self.ptr.bit_struct(bit_struct_type, _ti_core.DebugInfo(get_traceback())) + for field, id_in_bit_struct in arg.fields: + bit_struct_snode.place(field, offset, id_in_bit_struct) + elif isinstance(arg, Field): + for var in arg._get_field_members(): + self.ptr.place(var.ptr, offset, -1) + elif isinstance(arg, list): + for x in arg: + self.place(x, offset=offset) + else: + raise ValueError(f"{arg} cannot be placed") + return self + + def lazy_grad(self): + """Automatically place the adjoint fields following the layout of their primal fields. + + Users don't need to specify ``needs_grad`` when they define scalar/vector/matrix fields (primal fields) using autodiff. + When all the primal fields are defined, using ``taichi.root.lazy_grad()`` could automatically generate + their corresponding adjoint fields (gradient field). + + To know more details about primal, adjoint fields and ``lazy_grad()``, + please see Page 4 and Page 13-14 of DiffTaichi Paper: https://arxiv.org/pdf/1910.00935.pdf + """ + self.ptr.lazy_grad() + + def lazy_dual(self): + """Automatically place the dual fields following the layout of their primal fields.""" + self.ptr.lazy_dual() + + def _allocate_adjoint_checkbit(self): + """Automatically place the adjoint flag fields following the layout of their primal fields for global data access rule checker""" + self.ptr.allocate_adjoint_checkbit() + + def parent(self, n=1): + """Gets an ancestor of `self` in the SNode tree. + + Args: + n (int): the number of levels going up from `self`. + + Returns: + Union[None, _Root, SNode]: The n-th parent of `self`. + """ + p = self.ptr + while p and n > 0: + p = p.parent + n -= 1 + if p is None: + return None + + if p.type == _ti_core.SNodeType.root: + return impl.root + + return SNode(p) + + def _path_from_root(self): + """Gets the path from root to `self` in the SNode tree. + + Returns: + List[Union[_Root, SNode]]: The list of SNodes on the path from root to `self`. + """ + p = self + res = [p] + while p != impl.root: + p = p.parent() + res.append(p) + res.reverse() + return res + + @property + def _dtype(self): + """Gets the data type of `self`. + + Returns: + DataType: The data type of `self`. + """ + return self.ptr.data_type() + + @property + def _id(self): + """Gets the id of `self`. + + Returns: + int: The id of `self`. + """ + return self.ptr.id + + @property + def _snode_tree_id(self): + return self.ptr.get_snode_tree_id() + + @property + def shape(self): + """Gets the number of elements from root in each axis of `self`. + + Returns: + Tuple[int]: The number of elements from root in each axis of `self`. + """ + dim = self.ptr.num_active_indices() + ret = tuple(self.ptr.get_shape_along_axis(i) for i in range(dim)) + + return ret + + def _loop_range(self): + """Gets the taichi_python.SNode to serve as loop range. + + Returns: + taichi_python.SNode: See above. + """ + return self.ptr + + @property + def _name(self): + """Gets the name of `self`. + + Returns: + str: The name of `self`. + """ + return self.ptr.name() + + @property + def _snode(self): + """Gets `self`. + Returns: + SNode: `self`. + """ + return self + + def _get_children(self): + """Gets all children components of `self`. + + Returns: + List[SNode]: All children components of `self`. + """ + children = [] + for i in range(self.ptr.get_num_ch()): + children.append(SNode(self.ptr.get_ch(i))) + return children + + @property + def _num_dynamically_allocated(self): + runtime = impl.get_runtime() + runtime.materialize_root_fb(False) + return runtime.prog.get_snode_num_dynamically_allocated(self.ptr) + + @property + def _cell_size_bytes(self): + impl.get_runtime().materialize_root_fb(False) + return self.ptr.cell_size_bytes + + @property + def _offset_bytes_in_parent_cell(self): + impl.get_runtime().materialize_root_fb(False) + return self.ptr.offset_bytes_in_parent_cell + + def deactivate_all(self): + """Recursively deactivate all children components of `self`.""" + ch = self._get_children() + for c in ch: + c.deactivate_all() + SNodeType = _ti_core.SNodeType + if self.ptr.type == SNodeType.pointer or self.ptr.type == SNodeType.bitmasked: + from taichi._kernels import snode_deactivate # pylint: disable=C0415 + + snode_deactivate(self) + if self.ptr.type == SNodeType.dynamic: + # Note that dynamic nodes are different from other sparse nodes: + # instead of deactivating each element, we only need to deactivate + # its parent, whose linked list of chunks of elements will be deleted. + from taichi._kernels import ( # pylint: disable=C0415 + snode_deactivate_dynamic, + ) + + snode_deactivate_dynamic(self) + + def __repr__(self): + type_ = str(self.ptr.type)[len("SNodeType.") :] + return f"" + + def __str__(self): + # ti.root.dense(ti.i, 3).dense(ti.jk, (4, 5)).place(x) + # ti.root => dense [3] => dense [3, 4, 5] => place [3, 4, 5] + type_ = str(self.ptr.type)[len("SNodeType.") :] + shape = str(list(self.shape)) + parent = str(self.parent()) + return f"{parent} => {type_} {shape}" + + def __eq__(self, other): + return self.ptr == other.ptr + + def _physical_index_position(self): + """Gets mappings from virtual axes to physical axes. + + Returns: + Dict[int, int]: Mappings from virtual axes to physical axes. + """ + ret = {} + for virtual, physical in enumerate(self.ptr.get_physical_index_position()): + if physical != -1: + ret[virtual] = physical + return ret + + +def rescale_index(a, b, I): + """Rescales the index 'I' of field (or SNode) 'a' to match the shape of SNode 'b'. + + Args: + + a, b (Union[:class:`~taichi.Field`, :class:`~taichi.MatrixField`): Input taichi fields or snodes. + I (Union[list, :class:`~taichi.Vector`]): grouped loop index. + + Returns: + Ib (:class:`~taichi.Vector`): rescaled grouped loop index + """ + + assert isinstance(a, (Field, SNode)), "The first argument must be a field or an SNode" + assert isinstance(b, (Field, SNode)), "The second argument must be a field or an SNode" + if isinstance(I, list): + n = len(I) + else: + assert isinstance( + I, (expr.Expr, matrix.Matrix) + ), "The third argument must be an index (list, ti.Vector, or Expr with TensorType)" + n = I.n + + from taichi.lang.kernel_impl import pyfunc # pylint: disable=C0415 + + @pyfunc + def _rescale_index(): + result = matrix.Vector([I[i] for i in range(n)]) + for i in impl.static(range(min(n, min(len(a.shape), len(b.shape))))): + if a.shape[i] > b.shape[i]: + result[i] = I[i] // (a.shape[i] // b.shape[i]) + if a.shape[i] < b.shape[i]: + result[i] = I[i] * (b.shape[i] // a.shape[i]) + return result + + return _rescale_index() + + +def append(node, indices, val): + """Append a value `val` to a SNode `node` at index `indices`. + + Args: + node (:class:`~taichi.SNode`): Input SNode. + indices (Union[int, :class:`~taichi.Vector`]): the indices to visit. + val (Union[:mod:`~taichi.types.primitive_types`, :mod:`~taichi.types.compound_types`]): the data to be appended. + """ + ptrs = expr._get_flattened_ptrs(val) + append_expr = expr.Expr( + impl.get_runtime() + .compiling_callable.ast_builder() + .expr_snode_append(node._snode.ptr, expr.make_expr_group(indices), ptrs), + dbg_info=_ti_core.DebugInfo(impl.get_runtime().get_current_src_info()), + ) + a = impl.expr_init(append_expr) + return a + + +def is_active(node, indices): + """Explicitly query whether a cell in a SNode `node` at location + `indices` is active or not. + + Args: + node (:class:`~taichi.SNode`): Must be a pointer, hash or bitmasked node. + indices (Union[int, list, :class:`~taichi.Vector`]): the indices to visit. + + Returns: + bool: the cell `node[indices]` is active or not. + """ + return expr.Expr( + impl.get_runtime() + .compiling_callable.ast_builder() + .expr_snode_is_active(node._snode.ptr, expr.make_expr_group(indices)), + dbg_info=_ti_core.DebugInfo(impl.get_runtime().get_current_src_info()), + ) + + +def activate(node, indices): + """Explicitly activate a cell of `node` at location `indices`. + + Args: + node (:class:`~taichi.SNode`): Must be a pointer, hash or bitmasked node. + indices (Union[int, :class:`~taichi.Vector`]): the indices to activate. + """ + impl.get_runtime().compiling_callable.ast_builder().insert_activate( + node._snode.ptr, expr.make_expr_group(indices), _ti_core.DebugInfo(impl.get_runtime().get_current_src_info()) + ) + + +def deactivate(node, indices): + """Explicitly deactivate a cell of `node` at location `indices`. + + After deactivation, the Taichi runtime automatically recycles and zero-fills + the memory of the deactivated cell. + + Args: + node (:class:`~taichi.SNode`): Must be a pointer, hash or bitmasked node. + indices (Union[int, :class:`~taichi.Vector`]): the indices to deactivate. + """ + impl.get_runtime().compiling_callable.ast_builder().insert_deactivate( + node._snode.ptr, expr.make_expr_group(indices), _ti_core.DebugInfo(impl.get_runtime().get_current_src_info()) + ) + + +def length(node, indices): + """Return the length of the dynamic SNode `node` at index `indices`. + + Args: + node (:class:`~taichi.SNode`): a dynamic SNode. + indices (Union[int, :class:`~taichi.Vector`]): the indices to query. + + Returns: + int: the length of cell `node[indices]`. + """ + return expr.Expr( + impl.get_runtime() + .compiling_callable.ast_builder() + .expr_snode_length(node._snode.ptr, expr.make_expr_group(indices)), + dbg_info=_ti_core.DebugInfo(impl.get_runtime().get_current_src_info()), + ) + + +def get_addr(f, indices): + """Query the memory address (on CUDA/x64) of field `f` at index `indices`. + + Currently, this function can only be called inside a taichi kernel. + + Args: + f (Union[:class:`~taichi.Field`, :class:`~taichi.MatrixField`]): Input taichi field for memory address query. + indices (Union[int, :class:`~taichi.Vector`]): The specified field indices of the query. + + Returns: + ti.u64: The memory address of `f[indices]`. + """ + return expr.Expr( + impl.get_runtime() + .compiling_callable.ast_builder() + .expr_snode_get_addr(f._snode.ptr, expr.make_expr_group(indices)), + dbg_info=_ti_core.DebugInfo(impl.get_runtime().get_current_src_info()), + ) + + +__all__ = [ + "activate", + "append", + "deactivate", + "get_addr", + "is_active", + "length", + "rescale_index", + "SNode", +] diff --git a/local_packages/taichi/lang/source_builder.py b/local_packages/taichi/lang/source_builder.py new file mode 100644 index 0000000..6398a02 --- /dev/null +++ b/local_packages/taichi/lang/source_builder.py @@ -0,0 +1,148 @@ +import atexit +import ctypes +import os +import shutil +import subprocess +import tempfile + +from taichi._lib import core as _ti_core +from taichi.lang import impl +from taichi.lang.exception import TaichiSyntaxError +from taichi.lang.expr import make_expr_group +from taichi.lang.util import get_clangpp + + +class SourceBuilder: + def __init__(self): + self.bc = None + self.so = None + self.mode = None + self.td = None + + def cleanup(): + if self.td is not None: + shutil.rmtree(self.td) + + atexit.register(cleanup) + + @classmethod + def from_file(cls, filename, compile_fn=None, _temp_dir=None): + self = cls() + self.td = _temp_dir + if self.td is None: + self.td = tempfile.mkdtemp() + + if filename.endswith((".cpp", ".c", ".cc")): + if impl.current_cfg().arch not in [_ti_core.Arch.x64, _ti_core.Arch.cuda]: + raise TaichiSyntaxError("Unsupported arch for external function call") + if compile_fn is None: + + def compile_fn_impl(filename): + if impl.current_cfg().arch == _ti_core.Arch.x64: + subprocess.call( + get_clangpp() + " -flto -c " + filename + " -o " + os.path.join(self.td, "source.bc"), + shell=True, + ) + else: + subprocess.call( + get_clangpp() + + " -flto -c " + + filename + + " -o " + + os.path.join(self.td, "source.bc") + + " -target nvptx64-nvidia-cuda", + shell=True, + ) + return os.path.join(self.td, "source.bc") + + compile_fn = compile_fn_impl + self.bc = compile_fn(filename) + self.mode = "bc" + elif filename.endswith(".cu"): + if impl.current_cfg().arch not in [_ti_core.Arch.cuda]: + raise TaichiSyntaxError("Unsupported arch for external function call") + if compile_fn is None: + shutil.copy(filename, os.path.join(self.td, "source.cu")) + + def compile_fn_impl(filename): + # Cannot use -o to specify multiple output files + subprocess.call( + get_clangpp() + + " " + + os.path.join(self.td, "source.cu") + + " -c -emit-llvm -std=c++17 --cuda-gpu-arch=sm_50 -nocudalib", + cwd=self.td, + shell=True, + ) + return os.path.join(self.td, "source-cuda-nvptx64-nvidia-cuda-sm_50.bc") + + compile_fn = compile_fn_impl + self.bc = compile_fn(filename) + self.mode = "bc" + elif filename.endswith((".so", ".dylib", ".dll")): + if impl.current_cfg().arch not in [_ti_core.Arch.x64]: + raise TaichiSyntaxError("Unsupported arch for external function call") + self.so = ctypes.CDLL(filename) + self.mode = "so" + elif filename.endswith(".ll"): + if impl.current_cfg().arch not in [_ti_core.Arch.x64, _ti_core.Arch.cuda]: + raise TaichiSyntaxError("Unsupported arch for external function call") + subprocess.call( + "llvm-as " + filename + " -o " + os.path.join(self.td, "source.bc"), + shell=True, + ) + self.bc = os.path.join(self.td, "source.bc") + self.mode = "bc" + elif filename.endswith(".bc"): + if impl.current_cfg().arch not in [_ti_core.Arch.x64, _ti_core.Arch.cuda]: + raise TaichiSyntaxError("Unsupported arch for external function call") + self.bc = filename + self.mode = "bc" + else: + raise TaichiSyntaxError("Unsupported file type for external function call.") + return self + + @classmethod + def from_source(cls, source_code, compile_fn=None): + if impl.current_cfg().arch not in [_ti_core.Arch.x64, _ti_core.Arch.cuda]: + raise TaichiSyntaxError("Unsupported arch for external function call") + _temp_dir = tempfile.mkdtemp() + _temp_source = os.path.join(_temp_dir, "_temp_source.cpp") + with open(_temp_source, "w") as f: + f.write(source_code) + return SourceBuilder.from_file(_temp_source, compile_fn, _temp_dir) + + def __getattr__(self, item): + def bitcode_func_call_wrapper(*args): + impl.get_runtime().compiling_callable.ast_builder().insert_external_func_call( + 0, + "", + self.bc, + item, + make_expr_group(args), + make_expr_group([]), + _ti_core.DebugInfo(impl.get_runtime().get_current_src_info()), + ) + + if self.mode == "bc": + return bitcode_func_call_wrapper + + def external_func_call_wrapper(args=[], outputs=[]): + func_addr = ctypes.cast(self.so.__getattr__(item), ctypes.c_void_p).value + impl.get_runtime().compiling_callable.ast_builder().insert_external_func_call( + func_addr, + "", + "", + "", + make_expr_group(args), + make_expr_group(outputs), + _ti_core.DebugInfo(impl.get_runtime().get_current_src_info()), + ) + + if self.mode == "so": + return external_func_call_wrapper + + raise TaichiSyntaxError("Error occurs when calling external function.") + + +__all__ = [] diff --git a/local_packages/taichi/lang/struct.py b/local_packages/taichi/lang/struct.py new file mode 100644 index 0000000..6d1f6f2 --- /dev/null +++ b/local_packages/taichi/lang/struct.py @@ -0,0 +1,852 @@ +import numbers +from types import MethodType + +import numpy as np +from taichi._lib import core as _ti_core +from taichi.lang import expr, impl, ops +from taichi.lang.enums import Layout +from taichi.lang.exception import ( + TaichiRuntimeTypeError, + TaichiSyntaxError, + TaichiTypeError, +) +from taichi.lang.expr import Expr +from taichi.lang.field import Field, ScalarField, SNodeHostAccess +from taichi.lang.matrix import Matrix, MatrixType +from taichi.lang.util import cook_dtype, in_python_scope, python_scope, taichi_scope +from taichi.types import primitive_types +from taichi.types.compound_types import CompoundType +from taichi.types.utils import is_signed + + +class Struct: + """The Struct type class. + + A struct is a dictionary-like data structure that stores members as + (key, value) pairs. Valid data members of a struct can be scalars, + matrices or other dictionary-like structures. + + Args: + entries (Dict[str, Union[Dict, Expr, Matrix, Struct]]): \ + keys and values for struct members. Entries can optionally + include a dictionary of functions with the key '__struct_methods' + which will be attached to the struct for executing on the struct data. + + Returns: + An instance of this struct. + + Example:: +_ + >>> vec3 = ti.types.vector(3, ti.f32) + >>> a = ti.Struct(v=vec3([0, 0, 0]), t=1.0) + >>> print(a.items) + dict_items([('v', [0. 0. 0.]), ('t', 1.0)]) + >>> + >>> B = ti.Struct(v=vec3([0., 0., 0.]), t=1.0, A=a) + >>> print(B.items) + dict_items([('v', [0. 0. 0.]), ('t', 1.0), ('A', {'v': [[0.], [0.], [0.]], 't': 1.0})]) + """ + + _is_taichi_class = True + _instance_count = 0 + + def __init__(self, *args, **kwargs): + # converts lists to matrices and dicts to structs + if len(args) == 1 and kwargs == {} and isinstance(args[0], dict): + self.__entries = args[0] + elif len(args) == 0: + self.__entries = kwargs + else: + raise TaichiSyntaxError( + "Custom structs need to be initialized using either dictionary or keyword arguments" + ) + self.__methods = self.__entries.pop("__struct_methods", {}) + matrix_ndim = self.__entries.pop("__matrix_ndim", {}) + self._register_methods() + + for k, v in self.__entries.items(): + if isinstance(v, (list, tuple)): + v = Matrix(v) + if isinstance(v, dict): + v = Struct(v) + self.__entries[k] = v if in_python_scope() else impl.expr_init(v) + self._register_members() + self.__dtype = None + + @property + def keys(self): + """Returns the list of member names in string format. + + Example:: + + >>> vec3 = ti.types.vector(3, ti.f32) + >>> sphere = ti.Struct(center=vec3([0, 0, 0]), radius=1.0) + >>> a.keys + ['center', 'radius'] + """ + return list(self.__entries.keys()) + + @property + def _members(self): + return list(self.__entries.values()) + + @property + def entries(self): + return self.__entries + + @property + def methods(self): + return self.__methods + + @property + def items(self): + """Returns the items in this struct. + + Example:: + + >>> vec3 = ti.types.vector(3, ti.f32) + >>> sphere = ti.Struct(center=vec3([0, 0, 0]), radius=1.0) + >>> sphere.items + dict_items([('center', 2), ('radius', 1.0)]) + """ + return self.__entries.items() + + def _register_members(self): + # https://stackoverflow.com/questions/48448074/adding-a-property-to-an-existing-object-instance + cls = self.__class__ + new_cls_name = cls.__name__ + str(cls._instance_count) + cls._instance_count += 1 + properties = {k: property(cls._make_getter(k), cls._make_setter(k)) for k in self.keys} + self.__class__ = type(new_cls_name, (cls,), properties) + + def _register_methods(self): + for name, method in self.__methods.items(): + # use MethodType to pass self (this object) to the method + setattr(self, name, MethodType(method, self)) + + def __getitem__(self, key): + ret = self.__entries[key] + if isinstance(ret, SNodeHostAccess): + ret = ret.accessor.getter(*ret.key) + return ret + + def __setitem__(self, key, value): + if isinstance(self.__entries[key], SNodeHostAccess): + self.__entries[key].accessor.setter(value, *self.__entries[key].key) + else: + if in_python_scope(): + if isinstance(self.__entries[key], Struct) or isinstance(self.__entries[key], Matrix): + self.__entries[key]._set_entries(value) + else: + if isinstance(value, numbers.Number): + self.__entries[key] = value + else: + raise TypeError("A number is expected when assigning struct members") + else: + self.__entries[key] = value + + def _set_entries(self, value): + if isinstance(value, dict): + value = Struct(value) + for k in self.keys: + self[k] = value[k] + self.__dtype = value.__dtype + + @staticmethod + def _make_getter(key): + def getter(self): + """Get an entry from custom struct by name.""" + return self[key] + + return getter + + @staticmethod + def _make_setter(key): + @python_scope + def setter(self, value): + self[key] = value + + return setter + + @taichi_scope + def _assign(self, other): + if not isinstance(other, (dict, Struct)): + raise TaichiTypeError("Only dict or Struct can be assigned to a Struct") + if isinstance(other, dict): + other = Struct(other) + if self.__entries.keys() != other.__entries.keys(): + raise TaichiTypeError(f"Member mismatch between structs {self.keys}, {other.keys}") + for k, v in self.items: + v._assign(other.__entries[k]) + self.__dtype = other.__dtype + return self + + def __len__(self): + """Get the number of entries in a custom struct""" + return len(self.__entries) + + def __iter__(self): + return self.__entries.values() + + def __str__(self): + """Python scope struct array print support.""" + if impl.inside_kernel(): + item_str = ", ".join([str(k) + "=" + str(v) for k, v in self.items]) + item_str += f", struct_methods={self.__methods}" + return f"" + return str(self.to_dict()) + + def __repr__(self): + return str(self.to_dict()) + + def to_dict(self, include_methods=False, include_ndim=False): + """Converts the Struct to a dictionary. + + Args: + include_methods (bool): Whether any struct methods should be included + in the result dictionary under the key '__struct_methods'. + + Returns: + Dict: The result dictionary. + """ + res_dict = { + k: ( + v.to_dict(include_methods=include_methods, include_ndim=include_ndim) + if isinstance(v, Struct) + else v.to_list() if isinstance(v, Matrix) else v + ) + for k, v in self.__entries.items() + } + if include_methods: + res_dict["__struct_methods"] = self.__methods + if include_ndim: + res_dict["__matrix_ndim"] = dict() + for k, v in self.__entries.items(): + if isinstance(v, Matrix): + res_dict["__matrix_ndim"][k] = v.ndim + return res_dict + + @classmethod + @python_scope + def field( + cls, + members, + methods={}, + shape=None, + name="", + offset=None, + needs_grad=False, + needs_dual=False, + layout=Layout.AOS, + ): + """Creates a :class:`~taichi.StructField` with each element + has this struct as its type. + + Args: + members (dict): a dict, each item is like `name: type`. + methods (dict): a dict of methods that should be included with + the field. Each struct item of the field will have the + methods as instance functions. + shape (Tuple[int]): width and height of the field. + offset (Tuple[int]): offset of the indices of the created field. + For example if `offset=(-10, -10)` the indices of the field + will start at `(-10, -10)`, not `(0, 0)`. + needs_grad (bool): enabling grad field (reverse mode autodiff) or not. + needs_dual (bool): enabling dual field (forward mode autodiff) or not. + layout: AOS or SOA. + + Example: + + >>> vec3 = ti.types.vector(3, ti.f32) + >>> sphere = {"center": vec3, "radius": float} + >>> F = ti.Struct.field(sphere, shape=(3, 3)) + >>> F + {'center': array([[[0., 0., 0.], + [0., 0., 0.], + [0., 0., 0.]], + + [[0., 0., 0.], + [0., 0., 0.], + [0., 0., 0.]], + + [[0., 0., 0.], + [0., 0., 0.], + [0., 0., 0.]]], dtype=float32), 'radius': array([[0., 0., 0.], + [0., 0., 0.], + [0., 0., 0.]], dtype=float32)} + """ + + if shape is None and offset is not None: + raise TaichiSyntaxError("shape cannot be None when offset is being set") + + field_dict = {} + + for key, dtype in members.items(): + field_name = name + "." + key + if isinstance(dtype, CompoundType): + if isinstance(dtype, StructType): + field_dict[key] = dtype.field( + shape=None, + name=field_name, + offset=offset, + needs_grad=needs_grad, + needs_dual=needs_dual, + ) + else: + field_dict[key] = dtype.field( + shape=None, + name=field_name, + offset=offset, + needs_grad=needs_grad, + needs_dual=needs_dual, + ndim=getattr(dtype, "ndim", 2), + ) + else: + field_dict[key] = impl.field( + dtype, + shape=None, + name=field_name, + offset=offset, + needs_grad=needs_grad, + needs_dual=needs_dual, + ) + + if shape is not None: + if isinstance(shape, numbers.Number): + shape = (shape,) + if isinstance(offset, numbers.Number): + offset = (offset,) + + if offset is not None and len(shape) != len(offset): + raise TaichiSyntaxError( + f"The dimensionality of shape and offset must be the same ({len(shape)} != {len(offset)})" + ) + dim = len(shape) + if layout == Layout.SOA: + for e in field_dict.values(): + impl.root.dense(impl.index_nd(dim), shape).place(e, offset=offset) + if needs_grad: + for e in field_dict.values(): + impl.root.dense(impl.index_nd(dim), shape).place(e.grad, offset=offset) + if needs_dual: + for e in field_dict.values(): + impl.root.dense(impl.index_nd(dim), shape).place(e.dual, offset=offset) + else: + impl.root.dense(impl.index_nd(dim), shape).place(*tuple(field_dict.values()), offset=offset) + if needs_grad: + grads = tuple(e.grad for e in field_dict.values()) + impl.root.dense(impl.index_nd(dim), shape).place(*grads, offset=offset) + + if needs_dual: + duals = tuple(e.dual for e in field_dict.values()) + impl.root.dense(impl.index_nd(dim), shape).place(*duals, offset=offset) + + return StructField(field_dict, methods, name=name) + + +class _IntermediateStruct(Struct): + """Intermediate struct class for compiler internal use only. + + Args: + entries (Dict[str, Union[Expr, Matrix, Struct]]): keys and values for struct members. + Any methods included under the key '__struct_methods' will be applied to each + struct instance. + """ + + def __init__(self, entries): + assert isinstance(entries, dict) + self._Struct__methods = entries.pop("__struct_methods", {}) + self._register_methods() + self._Struct__entries = entries + self._register_members() + + +class StructField(Field): + """Taichi struct field with SNode implementation. + + Instead of directly constraining Expr entries, the StructField object + directly hosts members as `Field` instances to support nested structs. + + Args: + field_dict (Dict[str, Field]): Struct field members. + struct_methods (Dict[str, callable]): Dictionary of functions to apply + to each struct instance in the field. + name (string, optional): The custom name of the field. + """ + + def __init__(self, field_dict, struct_methods, name=None, is_primal=True): + # will not call Field initializer + self.field_dict = field_dict + self.struct_methods = struct_methods + self.name = name + self.grad = None + self.dual = None + if is_primal: + grad_field_dict = {} + for k, v in self.field_dict.items(): + grad_field_dict[k] = v.grad + self.grad = StructField(grad_field_dict, struct_methods, name + ".grad", is_primal=False) + + dual_field_dict = {} + for k, v in self.field_dict.items(): + dual_field_dict[k] = v.dual + self.dual = StructField(dual_field_dict, struct_methods, name + ".dual", is_primal=False) + self._register_fields() + + @property + def keys(self): + """Returns the list of names of the field members. + + Example:: + + >>> f1 = ti.Vector.field(3, ti.f32, shape=(3, 3)) + >>> f2 = ti.field(ti.f32, shape=(3, 3)) + >>> F = ti.StructField({"center": f1, "radius": f2}) + >>> F.keys + ['center', 'radius'] + """ + return list(self.field_dict.keys()) + + @property + def _members(self): + return list(self.field_dict.values()) + + @property + def _items(self): + return self.field_dict.items() + + @staticmethod + def _make_getter(key): + def getter(self): + """Get an entry from custom struct by name.""" + return self.field_dict[key] + + return getter + + @staticmethod + def _make_setter(key): + @python_scope + def setter(self, value): + self.field_dict[key] = value + + return setter + + def _register_fields(self): + for k in self.keys: + setattr(self, k, self.field_dict[k]) + + def _get_field_members(self): + """Gets A flattened list of all struct elements. + + Returns: + A list of struct elements. + """ + field_members = [] + for m in self._members: + assert isinstance(m, Field) + field_members += m._get_field_members() + return field_members + + @property + def _snode(self): + """Gets representative SNode for info purposes. + + Returns: + SNode: Representative SNode (SNode of first field member). + """ + return self._members[0]._snode + + def _loop_range(self): + """Gets SNode of representative field member for loop range info. + + Returns: + taichi_python.SNode: SNode of representative (first) field member. + """ + return self._members[0]._loop_range() + + @python_scope + def copy_from(self, other): + """Copies all elements from another field. + + The shape of the other field needs to be the same as `self`. + + Args: + other (Field): The source field. + """ + assert isinstance(other, Field) + assert set(self.keys) == set(other.keys) + for k in self.keys: + self.field_dict[k].copy_from(other.get_member_field(k)) + + @python_scope + def fill(self, val): + """Fills this struct field with a specified value. + + Args: + val (Union[int, float]): Value to fill. + """ + for v in self._members: + v.fill(val) + + def _initialize_host_accessors(self): + for v in self._members: + v._initialize_host_accessors() + + def get_member_field(self, key): + """Creates a ScalarField using a specific field member. + + Args: + key (str): Specified key of the field member. + + Returns: + ScalarField: The result ScalarField. + """ + return self.field_dict[key] + + @python_scope + def from_numpy(self, array_dict): + """Copies the data from a set of `numpy.array` into this field. + + The argument `array_dict` must be a dictionay-like object, it + contains all the keys in this field and the copying process + between corresponding items can be performed. + """ + for k, v in self._items: + v.from_numpy(array_dict[k]) + + @python_scope + def from_torch(self, array_dict): + """Copies the data from a set of `torch.tensor` into this field. + + The argument `array_dict` must be a dictionay-like object, it + contains all the keys in this field and the copying process + between corresponding items can be performed. + """ + for k, v in self._items: + v.from_torch(array_dict[k]) + + @python_scope + def from_paddle(self, array_dict): + """Copies the data from a set of `paddle.Tensor` into this field. + + The argument `array_dict` must be a dictionay-like object, it + contains all the keys in this field and the copying process + between corresponding items can be performed. + """ + for k, v in self._items: + v.from_paddle(array_dict[k]) + + @python_scope + def to_numpy(self): + """Converts the Struct field instance to a dictionary of NumPy arrays. + + The dictionary may be nested when converting nested structs. + + Returns: + Dict[str, Union[numpy.ndarray, Dict]]: The result NumPy array. + """ + return {k: v.to_numpy() for k, v in self._items} + + @python_scope + def to_torch(self, device=None): + """Converts the Struct field instance to a dictionary of PyTorch tensors. + + The dictionary may be nested when converting nested structs. + + Args: + device (torch.device, optional): The + desired device of returned tensor. + + Returns: + Dict[str, Union[torch.Tensor, Dict]]: The result + PyTorch tensor. + """ + return {k: v.to_torch(device=device) for k, v in self._items} + + @python_scope + def to_paddle(self, place=None): + """Converts the Struct field instance to a dictionary of Paddle tensors. + + The dictionary may be nested when converting nested structs. + + Args: + place (paddle.CPUPlace()/CUDAPlace(n), optional): The + desired place of returned tensor. + + Returns: + Dict[str, Union[paddle.Tensor, Dict]]: The result + Paddle tensor. + """ + return {k: v.to_paddle(place=place) for k, v in self._items} + + @python_scope + def __setitem__(self, indices, element): + self._initialize_host_accessors() + self[indices]._set_entries(element) + + @python_scope + def __getitem__(self, indices): + self._initialize_host_accessors() + # scalar fields does not instantiate SNodeHostAccess by default + entries = { + k: v._host_access(self._pad_key(indices))[0] if isinstance(v, ScalarField) else v[indices] + for k, v in self._items + } + entries["__struct_methods"] = self.struct_methods + return Struct(entries) + + +class StructType(CompoundType): + def __init__(self, **kwargs): + self.members = {} + self.methods = {} + elements = [] + for k, dtype in kwargs.items(): + if k == "__struct_methods": + self.methods = dtype + elif isinstance(dtype, StructType): + self.members[k] = dtype + elements.append([dtype.dtype, k]) + elif isinstance(dtype, MatrixType): + self.members[k] = dtype + elements.append([dtype.tensor_type, k]) + else: + dtype = cook_dtype(dtype) + self.members[k] = dtype + elements.append([dtype, k]) + self.dtype = _ti_core.get_type_factory_instance().get_struct_type(elements) + + def __call__(self, *args, **kwargs): + """Create an instance of this struct type.""" + d = {} + items = self.members.items() + # iterate over the members of this struct + for index, pair in enumerate(items): + name, dtype = pair # (member name, member type) + if index < len(args): # set from args + data = args[index] + else: # set from kwargs + data = kwargs.get(name, 0) + + # If dtype is CompoundType and data is a scalar, it cannot be + # casted in the self.cast call later. We need an initialization here. + if isinstance(dtype, CompoundType) and not isinstance(data, (dict, Struct)): + data = dtype(data) + + d[name] = data + + entries = Struct(d) + entries._Struct__dtype = self.dtype + struct = self.cast(entries) + struct._Struct__dtype = self.dtype + return struct + + def __instancecheck__(self, instance): + if not isinstance(instance, Struct): + return False + if list(self.members.keys()) != list(instance._Struct__entries.keys()): + return False + if ( + hasattr(instance, "_Struct__dtype") + and instance._Struct__dtype is not None + and instance._Struct__dtype != self.dtype + ): + return False + for index, (name, dtype) in enumerate(self.members.items()): + val = instance._members[index] + if isinstance(dtype, StructType): + if not isinstance(val, dtype): + return False + elif isinstance(dtype, MatrixType): + if isinstance(val, Expr): + if not val.is_tensor(): + return False + if val.get_shape() != dtype.get_shape(): + return False + elif dtype in primitive_types.integer_types: + if isinstance(val, Expr): + if val.is_tensor() or val.is_struct() or val.element_type() not in primitive_types.integer_types: + return False + elif not isinstance(val, (int, np.integer)): + return False + elif dtype in primitive_types.real_types: + if isinstance(val, Expr): + if val.is_tensor() or val.is_struct() or val.element_type() not in primitive_types.real_types: + return False + elif not isinstance(val, (float, np.floating)): + return False + return True + + def from_taichi_object(self, func_ret, ret_index=()): + d = {} + items = self.members.items() + for index, pair in enumerate(items): + name, dtype = pair + if isinstance(dtype, CompoundType): + d[name] = dtype.from_taichi_object(func_ret, ret_index + (index,)) + else: + d[name] = expr.Expr( + _ti_core.make_get_element_expr( + func_ret.ptr, + ret_index + (index,), + _ti_core.DebugInfo(impl.get_runtime().get_current_src_info()), + ) + ) + d["__struct_methods"] = self.methods + + struct = Struct(d) + struct._Struct__dtype = self.dtype + return struct + + def from_kernel_struct_ret(self, launch_ctx, ret_index=()): + d = {} + items = self.members.items() + for index, pair in enumerate(items): + name, dtype = pair + if isinstance(dtype, CompoundType): + d[name] = dtype.from_kernel_struct_ret(launch_ctx, ret_index + (index,)) + else: + if dtype in primitive_types.integer_types: + if is_signed(cook_dtype(dtype)): + d[name] = launch_ctx.get_struct_ret_int(ret_index + (index,)) + else: + d[name] = launch_ctx.get_struct_ret_uint(ret_index + (index,)) + elif dtype in primitive_types.real_types: + d[name] = launch_ctx.get_struct_ret_float(ret_index + (index,)) + else: + raise TaichiRuntimeTypeError(f"Invalid return type on index={ret_index + (index, )}") + d["__struct_methods"] = self.methods + + struct = Struct(d) + struct._Struct__dtype = self.dtype + return struct + + def set_kernel_struct_args(self, struct, launch_ctx, ret_index=()): + # TODO: move this to class Struct after we add dtype to Struct + items = self.members.items() + for index, pair in enumerate(items): + name, dtype = pair + if isinstance(dtype, CompoundType): + dtype.set_kernel_struct_args(struct[name], launch_ctx, ret_index + (index,)) + else: + if dtype in primitive_types.integer_types: + if is_signed(cook_dtype(dtype)): + launch_ctx.set_struct_arg_int(ret_index + (index,), struct[name]) + else: + launch_ctx.set_struct_arg_uint(ret_index + (index,), struct[name]) + elif dtype in primitive_types.real_types: + launch_ctx.set_struct_arg_float(ret_index + (index,), struct[name]) + else: + raise TaichiRuntimeTypeError(f"Invalid argument type on index={ret_index + (index, )}") + + def set_argpack_struct_args(self, struct, argpack, ret_index=()): + # TODO: move this to class Struct after we add dtype to Struct + items = self.members.items() + for index, pair in enumerate(items): + name, dtype = pair + if isinstance(dtype, CompoundType): + dtype.set_kernel_struct_args(struct[name], argpack, ret_index + (index,)) + else: + if dtype in primitive_types.integer_types: + if is_signed(cook_dtype(dtype)): + argpack.set_arg_int(ret_index + (index,), struct[name]) + else: + argpack.set_arg_uint(ret_index + (index,), struct[name]) + elif dtype in primitive_types.real_types: + argpack.set_arg_float(ret_index + (index,), struct[name]) + else: + raise TaichiRuntimeTypeError(f"Invalid argument type on index={ret_index + (index, )}") + + def cast(self, struct): + # sanity check members + if self.members.keys() != struct._Struct__entries.keys(): + raise TaichiSyntaxError("Incompatible arguments for custom struct members!") + entries = {} + for k, dtype in self.members.items(): + if isinstance(dtype, MatrixType): + entries[k] = dtype(struct._Struct__entries[k]) + elif isinstance(dtype, CompoundType): + entries[k] = dtype.cast(struct._Struct__entries[k]) + else: + if in_python_scope(): + v = struct._Struct__entries[k] + entries[k] = int(v) if dtype in primitive_types.integer_types else float(v) + else: + entries[k] = ops.cast(struct._Struct__entries[k], dtype) + entries["__struct_methods"] = self.methods + struct = Struct(entries) + struct._Struct__dtype = self.dtype + return struct + + def filled_with_scalar(self, value): + entries = {} + for k, dtype in self.members.items(): + if isinstance(dtype, MatrixType): + entries[k] = dtype(value) + elif isinstance(dtype, CompoundType): + entries[k] = dtype.filled_with_scalar(value) + else: + entries[k] = value + entries["__struct_methods"] = self.methods + struct = Struct(entries) + struct._Struct__dtype = self.dtype + return struct + + def field(self, **kwargs): + return Struct.field(self.members, self.methods, **kwargs) + + def __str__(self): + """Python scope struct type print support.""" + item_str = ", ".join([str(k) + "=" + str(v) for k, v in self.members.items()]) + item_str += f", struct_methods={self.methods}" + return f"" + + +def dataclass(cls): + """Converts a class with field annotations and methods into a taichi struct type. + + This will return a normal custom struct type, with the functions added to it. + Struct fields can be generated in the normal way from the struct type. + Functions in the class can be run on the struct instance. + + This class decorator inspects the class for annotations and methods and + 1. Sets the annotations as fields for the struct + 2. Attaches the methods to the struct type + + Example:: + + >>> @ti.dataclass + >>> class Sphere: + >>> center: vec3 + >>> radius: ti.f32 + >>> + >>> @ti.func + >>> def area(self): + >>> return 4 * 3.14 * self.radius * self.radius + >>> + >>> my_spheres = Sphere.field(shape=(n, )) + >>> my_sphere[2].area() + + Args: + cls (Class): the class with annotations and methods to convert to a struct + + Returns: + A taichi struct with the annotations as fields + and methods from the class attached. + """ + # save the annotation fields for the struct + fields = getattr(cls, "__annotations__", {}) + # raise error if there are default values + for k in fields.keys(): + if hasattr(cls, k): + raise TaichiSyntaxError("Default value in @dataclass is not supported.") + # get the class methods to be attached to the struct types + fields["__struct_methods"] = { + attribute: getattr(cls, attribute) + for attribute in dir(cls) + if callable(getattr(cls, attribute)) and not attribute.startswith("__") + } + return StructType(**fields) + + +__all__ = ["Struct", "StructField", "dataclass"] diff --git a/local_packages/taichi/lang/util.py b/local_packages/taichi/lang/util.py new file mode 100644 index 0000000..071209f --- /dev/null +++ b/local_packages/taichi/lang/util.py @@ -0,0 +1,378 @@ +import functools +import os +import traceback +import warnings + +import numpy as np +from colorama import Fore, Style +from taichi._lib import core as _ti_core +from taichi._logging import is_logging_effective +from taichi.lang import impl +from taichi.types.primitive_types import ( + f16, + f32, + f64, + i8, + i16, + i32, + i64, + u1, + u8, + u16, + u32, + u64, +) + + +def has_pytorch(): + """Whether has pytorch in the current Python environment. + + Returns: + bool: True if has pytorch else False. + + """ + _has_pytorch = False + _env_torch = os.environ.get("TI_ENABLE_TORCH", "1") + if not _env_torch or int(_env_torch): + try: + import torch # pylint: disable=C0415 + + _has_pytorch = True + except: + pass + return _has_pytorch + + +def has_paddle(): + """Whether has paddle in the current Python environment. + + Returns: + bool: True if has paddle else False. + """ + _has_paddle = False + _env_paddle = os.environ.get("TI_ENABLE_PADDLE", "1") + if not _env_paddle or int(_env_paddle): + try: + import paddle # pylint: disable=C0415 + + _has_paddle = True + except: + pass + return _has_paddle + + +def get_clangpp(): + from distutils.spawn import find_executable # pylint: disable=C0415 + + # Taichi itself uses llvm-10.0.0 to compile. + # There will be some issues compiling CUDA with other clang++ version. + _clangpp_candidates = ["clang++-10"] + for c in _clangpp_candidates: + if find_executable(c) is not None: + _clangpp_presence = find_executable(c) + return _clangpp_presence + return None + + +def has_clangpp(): + return get_clangpp() is not None + + +def is_matrix_class(rhs): + matrix_class = False + try: + if rhs._is_matrix_class: + matrix_class = True + except: + pass + return matrix_class + + +def is_taichi_class(rhs): + taichi_class = False + try: + if rhs._is_taichi_class: + taichi_class = True + except: + pass + return taichi_class + + +def to_numpy_type(dt): + """Convert taichi data type to its counterpart in numpy. + + Args: + dt (DataType): The desired data type to convert. + + Returns: + DataType: The counterpart data type in numpy. + + """ + if dt == f32: + return np.float32 + if dt == f64: + return np.float64 + if dt == i32: + return np.int32 + if dt == i64: + return np.int64 + if dt == i8: + return np.int8 + if dt == i16: + return np.int16 + if dt == u1: + return np.bool_ + if dt == u8: + return np.uint8 + if dt == u16: + return np.uint16 + if dt == u32: + return np.uint32 + if dt == u64: + return np.uint64 + if dt == f16: + return np.half + assert False + + +def to_pytorch_type(dt): + """Convert taichi data type to its counterpart in torch. + + Args: + dt (DataType): The desired data type to convert. + + Returns: + DataType: The counterpart data type in torch. + + """ + import torch # pylint: disable=C0415 + + # pylint: disable=E1101 + if dt == f32: + return torch.float32 + if dt == f64: + return torch.float64 + if dt == i32: + return torch.int32 + if dt == i64: + return torch.int64 + if dt == i8: + return torch.int8 + if dt == i16: + return torch.int16 + if dt == u1: + return torch.bool + if dt == u8: + return torch.uint8 + if dt == f16: + return torch.float16 + + if dt in (u16, u32, u64): + if hasattr(torch, "uint16"): + if dt == u16: + return torch.uint16 + if dt == u32: + return torch.uint32 + if dt == u64: + return torch.uint64 + raise RuntimeError(f"PyTorch doesn't support {dt.to_string()} data type before version 2.3.0.") + + raise RuntimeError(f"PyTorch doesn't support {dt.to_string()} data type.") + + +def to_paddle_type(dt): + """Convert taichi data type to its counterpart in paddle. + + Args: + dt (DataType): The desired data type to convert. + + Returns: + DataType: The counterpart data type in paddle. + + """ + import paddle # pylint: disable=C0415 + + if dt == f32: + return paddle.float32 + if dt == f64: + return paddle.float64 + if dt == i32: + return paddle.int32 + if dt == i64: + return paddle.int64 + if dt == i8: + return paddle.int8 + if dt == i16: + return paddle.int16 + if dt == u1: + return paddle.bool + if dt == u8: + return paddle.uint8 + if dt == f16: + return paddle.float16 + if dt in (u16, u32, u64): + raise RuntimeError(f"Paddle doesn't support {dt.to_string()} data type.") + assert False + + +def to_taichi_type(dt): + """Convert numpy or torch or paddle data type to its counterpart in taichi. + + Args: + dt (DataType): The desired data type to convert. + + Returns: + DataType: The counterpart data type in taichi. + + """ + if type(dt) == _ti_core.DataType: + return dt + + if dt == np.float32: + return f32 + if dt == np.float64: + return f64 + if dt == np.int32: + return i32 + if dt == np.int64: + return i64 + if dt == np.int8: + return i8 + if dt == np.int16: + return i16 + if dt == np.bool_: + return u1 + if dt == np.uint8: + return u8 + if dt == np.uint16: + return u16 + if dt == np.uint32: + return u32 + if dt == np.uint64: + return u64 + if dt == np.half: + return f16 + + if has_pytorch(): + import torch # pylint: disable=C0415 + + # pylint: disable=E1101 + if dt == torch.float32: + return f32 + if dt == torch.float64: + return f64 + if dt == torch.int32: + return i32 + if dt == torch.int64: + return i64 + if dt == torch.int8: + return i8 + if dt == torch.int16: + return i16 + if dt == torch.bool: + return u1 + if dt == torch.uint8: + return u8 + if dt == torch.float16: + return f16 + + if hasattr(torch, "uint16"): + if dt == torch.uint16: + return u16 + if dt == torch.uint32: + return u32 + if dt == torch.uint64: + return u64 + + raise RuntimeError(f"PyTorch doesn't support {dt.to_string()} data type before version 2.3.0.") + + if has_paddle(): + import paddle # pylint: disable=C0415 + + if dt == paddle.float32: + return f32 + if dt == paddle.float64: + return f64 + if dt == paddle.int32: + return i32 + if dt == paddle.int64: + return i64 + if dt == paddle.int8: + return i8 + if dt == paddle.int16: + return i16 + if dt == paddle.bool: + return u1 + if dt == paddle.uint8: + return u8 + if dt == paddle.float16: + return f16 + if dt in (u16, u32, u64): + raise RuntimeError(f"Paddle doesn't support {dt.to_string()} data type.") + + raise AssertionError(f"Unknown type {dt}") + + +def cook_dtype(dtype): + if isinstance(dtype, _ti_core.DataType): + return dtype + if isinstance(dtype, _ti_core.Type): + return _ti_core.DataType(dtype) + if dtype is float: + return impl.get_runtime().default_fp + if dtype is int: + return impl.get_runtime().default_ip + if dtype is bool: + return u1 + raise ValueError(f"Invalid data type {dtype}") + + +def in_taichi_scope(): + return impl.inside_kernel() + + +def in_python_scope(): + return not in_taichi_scope() + + +def taichi_scope(func): + @functools.wraps(func) + def wrapped(*args, **kwargs): + assert in_taichi_scope(), f"{func.__name__} cannot be called in Python-scope" + return func(*args, **kwargs) + + return wrapped + + +def python_scope(func): + @functools.wraps(func) + def wrapped(*args, **kwargs): + assert in_python_scope(), f"{func.__name__} cannot be called in Taichi-scope" + return func(*args, **kwargs) + + return wrapped + + +def warning(msg, warning_type=UserWarning, stacklevel=1, print_stack=True): + """Print a warning message. Note that the builtin `warnings` module is + unreliable since it may be suppressed by other packages such as IPython. + + Args: + msg (str): message to print. + warning_type (Type[Warning]): type of warning. + stacklevel (int): warning stack level from the caller. + print_stack (bool): whether to print the stack + """ + if not is_logging_effective("warn"): + return + if print_stack: + msg += f"\n{get_traceback(stacklevel)}" + warnings.warn(Fore.YELLOW + Style.BRIGHT + msg + Style.RESET_ALL, warning_type) + + +def get_traceback(stacklevel=1): + s = traceback.extract_stack()[: -1 - stacklevel] + return "".join(traceback.format_list(s)) + + +__all__ = [] diff --git a/local_packages/taichi/linalg/__init__.py b/local_packages/taichi/linalg/__init__.py new file mode 100644 index 0000000..723c24a --- /dev/null +++ b/local_packages/taichi/linalg/__init__.py @@ -0,0 +1,6 @@ +"""Taichi support module for sparse matrix operations.""" + +from taichi.linalg.sparse_cg import SparseCG +from taichi.linalg.sparse_matrix import * +from taichi.linalg.sparse_solver import SparseSolver +from taichi.linalg.matrixfree_cg import * diff --git a/local_packages/taichi/linalg/matrixfree_cg.py b/local_packages/taichi/linalg/matrixfree_cg.py new file mode 100644 index 0000000..3daabf3 --- /dev/null +++ b/local_packages/taichi/linalg/matrixfree_cg.py @@ -0,0 +1,306 @@ +from math import sqrt + +from taichi.lang.exception import TaichiRuntimeError, TaichiTypeError + +import taichi as ti + + +@ti.data_oriented +class LinearOperator: + def __init__(self, matvec_kernel): + self._matvec = matvec_kernel + + def matvec(self, x, Ax): + if x.shape != Ax.shape: + raise TaichiRuntimeError(f"Dimension mismatch x.shape{x.shape} != Ax.shape{Ax.shape}.") + self._matvec(x, Ax) + + +def MatrixFreeCG(A, b, x, tol=1e-6, maxiter=5000, quiet=True): + """Matrix-free conjugate-gradient solver. + + Use conjugate-gradient method to solve the linear system Ax = b, where A is implicitly + represented as a LinearOperator. + + Args: + A (LinearOperator): The coefficient matrix A of the linear system. + b (Field): The right-hand side of the linear system. + x (Field): The initial guess for the solution. + maxiter (int): Maximum number of iterations. + atol: Tolerance(absolute) for convergence. + quiet (bool): Switch to turn on/off iteration log. + """ + + if b.dtype != x.dtype: + raise TaichiTypeError(f"Dtype mismatch b.dtype({b.dtype}) != x.dtype({x.dtype}).") + if str(b.dtype) == "f32": + solver_dtype = ti.f32 + elif str(b.dtype) == "f64": + solver_dtype = ti.f64 + else: + raise TaichiTypeError(f"Not supported dtype: {b.dtype}") + if b.shape != x.shape: + raise TaichiRuntimeError(f"Dimension mismatch b.shape{b.shape} != x.shape{x.shape}.") + + size = b.shape + vector_fields_builder = ti.FieldsBuilder() + p = ti.field(dtype=solver_dtype) + r = ti.field(dtype=solver_dtype) + Ap = ti.field(dtype=solver_dtype) + Ax = ti.field(dtype=solver_dtype) + if len(size) == 1: + axes = ti.i + elif len(size) == 2: + axes = ti.ij + elif len(size) == 3: + axes = ti.ijk + else: + raise TaichiRuntimeError(f"MatrixFreeCG only support 1D, 2D, 3D inputs; your inputs is {len(size)}-D.") + vector_fields_builder.dense(axes, size).place(p, r, Ap, Ax) + vector_fields_snode_tree = vector_fields_builder.finalize() + + scalar_builder = ti.FieldsBuilder() + alpha = ti.field(dtype=solver_dtype) + beta = ti.field(dtype=solver_dtype) + scalar_builder.place(alpha, beta) + scalar_snode_tree = scalar_builder.finalize() + + @ti.kernel + def init(): + for I in ti.grouped(x): + r[I] = b[I] - Ax[I] + p[I] = 0.0 + Ap[I] = 0.0 + + @ti.kernel + def reduce(p: ti.template(), q: ti.template()) -> solver_dtype: + result = solver_dtype(0.0) + for I in ti.grouped(p): + result += p[I] * q[I] + return result + + @ti.kernel + def update_x(): + for I in ti.grouped(x): + x[I] += alpha[None] * p[I] + + @ti.kernel + def update_r(): + for I in ti.grouped(r): + r[I] -= alpha[None] * Ap[I] + + @ti.kernel + def update_p(): + for I in ti.grouped(p): + p[I] = r[I] + beta[None] * p[I] + + def solve(): + succeeded = True + A._matvec(x, Ax) + init() + initial_rTr = reduce(r, r) + if not quiet: + print(f">>> Initial residual = {initial_rTr:e}") + old_rTr = initial_rTr + new_rTr = initial_rTr + update_p() + if sqrt(initial_rTr) >= tol: # Do nothing if the initial residual is small enough + # -- Main loop -- + for i in range(maxiter): + A._matvec(p, Ap) # compute Ap = A x p + pAp = reduce(p, Ap) + alpha[None] = old_rTr / pAp + update_x() + update_r() + new_rTr = reduce(r, r) + if sqrt(new_rTr) < tol: + if not quiet: + print(">>> Conjugate Gradient method converged.") + print(f">>> #iterations {i}") + break + beta[None] = new_rTr / old_rTr + update_p() + old_rTr = new_rTr + if not quiet: + print(f">>> Iter = {i+1:4}, Residual = {sqrt(new_rTr):e}") + if new_rTr >= tol: + if not quiet: + print( + f">>> Conjugate Gradient method failed to converge in {maxiter} iterations: Residual = {sqrt(new_rTr):e}" + ) + succeeded = False + return succeeded + + succeeded = solve() + vector_fields_snode_tree.destroy() + scalar_snode_tree.destroy() + return succeeded + + +def MatrixFreeBICGSTAB(A, b, x, tol=1e-6, maxiter=5000, quiet=True): + """Matrix-free biconjugate-gradient stabilized solver (BiCGSTAB). + + Use BiCGSTAB method to solve the linear system Ax = b, where A is implicitly + represented as a LinearOperator. + + Args: + A (LinearOperator): The coefficient matrix A of the linear system. + b (Field): The right-hand side of the linear system. + x (Field): The initial guess for the solution. + maxiter (int): Maximum number of iterations. + atol: Tolerance(absolute) for convergence. + quiet (bool): Switch to turn on/off iteration log. + """ + + if b.dtype != x.dtype: + raise TaichiTypeError(f"Dtype mismatch b.dtype({b.dtype}) != x.dtype({x.dtype}).") + if str(b.dtype) == "f32": + solver_dtype = ti.f32 + elif str(b.dtype) == "f64": + solver_dtype = ti.f64 + else: + raise TaichiTypeError(f"Not supported dtype: {b.dtype}") + if b.shape != x.shape: + raise TaichiRuntimeError(f"Dimension mismatch b.shape{b.shape} != x.shape{x.shape}.") + + size = b.shape + vector_fields_builder = ti.FieldsBuilder() + p = ti.field(dtype=solver_dtype) + p_hat = ti.field(dtype=solver_dtype) + r = ti.field(dtype=solver_dtype) + r_tld = ti.field(dtype=solver_dtype) + s = ti.field(dtype=solver_dtype) + s_hat = ti.field(dtype=solver_dtype) + t = ti.field(dtype=solver_dtype) + Ap = ti.field(dtype=solver_dtype) + Ax = ti.field(dtype=solver_dtype) + Ashat = ti.field(dtype=solver_dtype) + if len(size) == 1: + axes = ti.i + elif len(size) == 2: + axes = ti.ij + elif len(size) == 3: + axes = ti.ijk + else: + raise TaichiRuntimeError(f"MatrixFreeBICGSTAB only support 1D, 2D, 3D inputs; your inputs is {len(size)}-D.") + vector_fields_builder.dense(axes, size).place(p, p_hat, r, r_tld, s, s_hat, t, Ap, Ax, Ashat) + vector_fields_snode_tree = vector_fields_builder.finalize() + + scalar_builder = ti.FieldsBuilder() + alpha = ti.field(dtype=solver_dtype) + beta = ti.field(dtype=solver_dtype) + omega = ti.field(dtype=solver_dtype) + rho = ti.field(dtype=solver_dtype) + rho_1 = ti.field(dtype=solver_dtype) + scalar_builder.place(alpha, beta, omega, rho, rho_1) + scalar_snode_tree = scalar_builder.finalize() + succeeded = True + + @ti.kernel + def init(): + for I in ti.grouped(x): + r[I] = b[I] - Ax[I] + r_tld[I] = b[I] + p[I] = 0.0 + Ap[I] = 0.0 + Ashat[I] = 0.0 + rho[None] = 0.0 + rho_1[None] = 1.0 + alpha[None] = 1.0 + beta[None] = 1.0 + omega[None] = 1.0 + + @ti.kernel + def reduce(p: ti.template(), q: ti.template()) -> solver_dtype: + result = solver_dtype(0.0) + for I in ti.grouped(p): + result += p[I] * q[I] + return result + + @ti.kernel + def copy(orig: ti.template(), dest: ti.template()): + for I in ti.grouped(orig): + dest[I] = orig[I] + + @ti.kernel + def update_p(): + for I in ti.grouped(p): + p[I] = r[I] + beta[None] * (p[I] - omega[None] * Ap[I]) + + @ti.kernel + def update_phat(): + for I in ti.grouped(p_hat): + p_hat[I] = p[I] + + @ti.kernel + def update_s(): + for I in ti.grouped(s): + s[I] = r[I] - alpha[None] * Ap[I] + + @ti.kernel + def update_shat(): + for I in ti.grouped(s_hat): + s_hat[I] = s[I] + + @ti.kernel + def update_x(): + for I in ti.grouped(x): + x[I] += alpha[None] * p_hat[I] + omega[None] * s_hat[I] + + @ti.kernel + def update_r(): + for I in ti.grouped(r): + r[I] = s[I] - omega[None] * t[I] + + def solve(): + succeeded = True + A._matvec(x, Ax) + init() + initial_rTr = reduce(r, r) + rTr = initial_rTr + if not quiet: + print(f">>> Initial residual = {initial_rTr:e}") + if sqrt(initial_rTr) >= tol: # Do nothing if the initial residual is small enough + for i in range(maxiter): + rho[None] = reduce(r, r_tld) + if rho[None] == 0.0: + if not quiet: + print(">>> BICGSTAB failed because r@r_tld = 0.") + succeeded = False + break + if i == 0: + copy(orig=r, dest=p) + else: + beta[None] = (rho[None] / rho_1[None]) * (alpha[None] / omega[None]) + update_p() + update_phat() + A._matvec(p, Ap) + alpha_lower = reduce(r_tld, Ap) + alpha[None] = rho[None] / alpha_lower + update_s() + update_shat() + A._matvec(s_hat, Ashat) + copy(orig=Ashat, dest=t) + omega_upper = reduce(t, s) + omega_lower = reduce(t, t) + omega[None] = omega_upper / (omega_lower + 1e-16) if omega_lower == 0.0 else omega_upper / omega_lower + update_x() + update_r() + rTr = reduce(r, r) + if not quiet: + print(f">>> Iter = {i+1:4}, Residual = {sqrt(rTr):e}") + if sqrt(rTr) < tol: + if not quiet: + print(f">>> BICGSTAB method converged at #iterations {i}") + break + rho_1[None] = rho[None] + if rTr >= tol: + if not quiet: + print(f">>> BICGSTAB failed to converge in {maxiter} iterations: Residual = {sqrt(rTr):e}") + succeeded = False + return succeeded + + succeeded = solve() + vector_fields_snode_tree.destroy() + scalar_snode_tree.destroy() + return succeeded diff --git a/local_packages/taichi/linalg/sparse_cg.py b/local_packages/taichi/linalg/sparse_cg.py new file mode 100644 index 0000000..e0aba81 --- /dev/null +++ b/local_packages/taichi/linalg/sparse_cg.py @@ -0,0 +1,56 @@ +import numpy as np +from taichi._lib import core as _ti_core +from taichi.lang._ndarray import Ndarray, ScalarNdarray +from taichi.lang.exception import TaichiRuntimeError +from taichi.lang.impl import get_runtime +from taichi.types import f32, f64 + + +class SparseCG: + """Conjugate-gradient solver built for SparseMatrix. + + Use conjugate-gradient method to solve the linear system Ax = b, where A is SparseMatrix. + + Args: + A (SparseMatrix): The coefficient matrix A of the linear system. + b (numpy ndarray, taichi Ndarray): The right-hand side of the linear system. + x0 (numpy ndarray, taichi Ndarray): The initial guess for the solution. + max_iter (int): Maximum number of iterations. + atol: Tolerance(absolute) for convergence. + """ + + def __init__(self, A, b, x0=None, max_iter=50, atol=1e-6): + self.dtype = A.dtype + self.ti_arch = get_runtime().prog.config().arch + self.matrix = A + self.b = b + if self.ti_arch == _ti_core.Arch.cuda: + self.cg_solver = _ti_core.make_cucg_solver(A.matrix, max_iter, atol, True) + elif self.ti_arch == _ti_core.Arch.x64 or self.ti_arch == _ti_core.Arch.arm64: + if self.dtype == f32: + self.cg_solver = _ti_core.make_float_cg_solver(A.matrix, max_iter, atol, True) + elif self.dtype == f64: + self.cg_solver = _ti_core.make_double_cg_solver(A.matrix, max_iter, atol, True) + else: + raise TaichiRuntimeError(f"Unsupported CG dtype: {self.dtype}") + if isinstance(b, Ndarray): + self.cg_solver.set_b_ndarray(get_runtime().prog, b.arr) + elif isinstance(b, np.ndarray): + self.cg_solver.set_b(b) + if isinstance(x0, Ndarray): + self.cg_solver.set_x_ndarray(get_runtime().prog, x0.arr) + elif isinstance(x0, np.ndarray): + self.cg_solver.set_x(x0) + else: + raise TaichiRuntimeError(f"Unsupported CG arch: {self.ti_arch}") + + def solve(self): + if self.ti_arch == _ti_core.Arch.cuda: + if isinstance(self.b, Ndarray): + x = ScalarNdarray(self.b.dtype, [self.matrix.m]) + self.cg_solver.solve(get_runtime().prog, x.arr, self.b.arr) + return x, True + raise TaichiRuntimeError(f"Unsupported CG RHS type: {type(self.b)}") + else: + self.cg_solver.solve() + return self.cg_solver.get_x(), self.cg_solver.is_success() diff --git a/local_packages/taichi/linalg/sparse_matrix.py b/local_packages/taichi/linalg/sparse_matrix.py new file mode 100644 index 0000000..61f1e19 --- /dev/null +++ b/local_packages/taichi/linalg/sparse_matrix.py @@ -0,0 +1,300 @@ +from functools import reduce + +import numpy as np +from taichi._lib import core as _ti_core +from taichi.lang._ndarray import Ndarray, ScalarNdarray +from taichi.lang.exception import TaichiRuntimeError +from taichi.lang.field import Field +from taichi.lang.impl import get_runtime +from taichi.types import f32 + + +class SparseMatrix: + """Taichi's Sparse Matrix class + + A sparse matrix allows the programmer to solve a large linear system. + + Args: + n (int): the first dimension of a sparse matrix. + m (int): the second dimension of a sparse matrix. + sm (SparseMatrix): another sparse matrix that will be built from. + """ + + def __init__(self, n=None, m=None, sm=None, dtype=f32, storage_format="col_major"): + self.dtype = dtype + if sm is None: + self.n = n + self.m = m if m else n + self.matrix = get_runtime().prog.create_sparse_matrix(n, m, dtype, storage_format) + else: + self.n = sm.num_rows() + self.m = sm.num_cols() + self.matrix = sm + + def __iadd__(self, other): + """Addition operation for sparse matrix. + + Returns: + The result sparse matrix of the addition. + """ + assert ( + self.n == other.n and self.m == other.m + ), f"Dimension mismatch between sparse matrices ({self.n}, {self.m}) and ({other.n}, {other.m})" + self.matrix += other.matrix + return self + + def __add__(self, other): + """Addition operation for sparse matrix. + + Returns: + The result sparse matrix of the addition. + """ + assert ( + self.n == other.n and self.m == other.m + ), f"Dimension mismatch between sparse matrices ({self.n}, {self.m}) and ({other.n}, {other.m})" + sm = self.matrix + other.matrix + return SparseMatrix(sm=sm) + + def __isub__(self, other): + """Subtraction operation for sparse matrix. + + Returns: + The result sparse matrix of the subtraction. + """ + assert ( + self.n == other.n and self.m == other.m + ), f"Dimension mismatch between sparse matrices ({self.n}, {self.m}) and ({other.n}, {other.m})" + self.matrix -= other.matrix + return self + + def __sub__(self, other): + """Subtraction operation for sparse matrix. + + Returns: + The result sparse matrix of the subtraction. + """ + assert ( + self.n == other.n and self.m == other.m + ), f"Dimension mismatch between sparse matrices ({self.n}, {self.m}) and ({other.n}, {other.m})" + sm = self.matrix - other.matrix + return SparseMatrix(sm=sm) + + def __mul__(self, other): + """Sparse matrix's multiplication against real numbers or the hadamard product against another matrix + + Args: + other (float or SparseMatrix): the other operand of multiplication. + Returns: + The result of multiplication. + """ + if isinstance(other, float): + sm = other * self.matrix + return SparseMatrix(sm=sm) + if isinstance(other, SparseMatrix): + assert ( + self.n == other.n and self.m == other.m + ), f"Dimension mismatch between sparse matrices ({self.n}, {self.m}) and ({other.n}, {other.m})" + sm = self.matrix * other.matrix + return SparseMatrix(sm=sm) + + return None + + def __rmul__(self, other): + """Right scalar multiplication for sparse matrix. + + Args: + other (float): the other operand of scalar multiplication. + Returns: + The result of multiplication. + """ + if isinstance(other, float): + sm = self.matrix * other + return SparseMatrix(sm=sm) + + return None + + def transpose(self): + """Sparse Matrix transpose. + + Returns: + The transposed sparse mastrix. + """ + sm = self.matrix.transpose() + return SparseMatrix(sm=sm) + + def __matmul__(self, other): + """Matrix multiplication. + + Args: + other (SparseMatrix, Field, or numpy.array): the other sparse matrix of the multiplication. + Returns: + The result of matrix multiplication. + """ + if isinstance(other, SparseMatrix): + assert ( + self.m == other.n + ), f"Dimension mismatch between sparse matrices ({self.n}, {self.m}) and ({other.n}, {other.m})" + sm = self.matrix.matmul(other.matrix) + return SparseMatrix(sm=sm) + if isinstance(other, Field): + assert ( + self.m == other.shape[0] + ), f"Dimension mismatch between sparse matrix ({self.n}, {self.m}) and vector ({other.shape})" + return self.matrix.mat_vec_mul(other.to_numpy()) + if isinstance(other, np.ndarray): + assert ( + self.m == other.shape[0] + ), f"Dimension mismatch between sparse matrix ({self.n}, {self.m}) and vector ({other.shape})" + return self.matrix.mat_vec_mul(other) + if isinstance(other, Ndarray): + if self.m != other.shape[0]: + raise TaichiRuntimeError( + f"Dimension mismatch between sparse matrix ({self.n}, {self.m}) and vector ({other.shape})" + ) + res = ScalarNdarray(dtype=other.dtype, arr_shape=(self.n,)) + self.matrix.spmv(get_runtime().prog, other.arr, res.arr) + return res + raise TaichiRuntimeError( + f"Sparse matrix-matrix/vector multiplication does not support {type(other)} for now. Supported types are SparseMatrix, ti.field, and numpy ndarray." + ) + + def __getitem__(self, indices): + return self.matrix.get_element(indices[0], indices[1]) + + def __setitem__(self, indices, value): + self.matrix.set_element(indices[0], indices[1], value) + + def __str__(self): + """Python scope matrix print support.""" + return self.matrix.to_string() + + def __repr__(self): + return self.matrix.to_string() + + @property + def shape(self): + """The shape of the sparse matrix.""" + return (self.n, self.m) + + def build_from_ndarray(self, ndarray): + """Build the sparse matrix from a ndarray. + + Args: + ndarray (Union[ti.ndarray, ti.Vector.ndarray, ti.Matrix.ndarray]): the ndarray to build the sparse matrix from. + + Raises: + TaichiRuntimeError: If the input is not a ndarray or the length is not divisible by 3. + + Example:: + >>> N = 5 + >>> triplets = ti.Vector.ndarray(n=3, dtype=ti.f32, shape=10, layout=ti.Layout.AOS) + >>> @ti.kernel + >>> def fill(triplets: ti.types.ndarray()): + >>> for i in range(N): + >>> triplets[i] = ti.Vector([i, (i + 1) % N, i+1], dt=ti.f32) + >>> fill(triplets) + >>> A = ti.linalg.SparseMatrix(n=N, m=N, dtype=ti.f32) + >>> A.build_from_ndarray(triplets) + >>> print(A) + [0, 1, 0, 0, 0] + [0, 0, 2, 0, 0] + [0, 0, 0, 3, 0] + [0, 0, 0, 0, 4] + [5, 0, 0, 0, 0] + """ + if isinstance(ndarray, Ndarray): + num_scalars = reduce(lambda x, y: x * y, ndarray.shape + ndarray.element_shape) + if num_scalars % 3 != 0: + raise TaichiRuntimeError("The number of ndarray elements must have a length that is divisible by 3.") + get_runtime().prog.make_sparse_matrix_from_ndarray(self.matrix, ndarray.arr) + else: + raise TaichiRuntimeError( + "Sparse matrix only supports building from [ti.ndarray, ti.Vector.ndarray, ti.Matrix.ndarray]" + ) + + def mmwrite(self, filename): + """Writes the sparse matrix to Matrix Market file-like target. + + Args: + filename (str): the file name to write the sparse matrix to. + """ + self.matrix.mmwrite(filename) + + +class SparseMatrixBuilder: + """A python wrap around sparse matrix builder. + + Use this builder to fill the sparse matrix. + + Args: + num_rows (int): the first dimension of a sparse matrix. + num_cols (int): the second dimension of a sparse matrix. + max_num_triplets (int): the maximum number of triplets. + dtype (ti.dtype): the data type of the sparse matrix. + storage_format (str): the storage format of the sparse matrix. + """ + + def __init__( + self, + num_rows=None, + num_cols=None, + max_num_triplets=0, + dtype=f32, + storage_format="col_major", + ): + self.num_rows = num_rows + self.num_cols = num_cols if num_cols else num_rows + self.dtype = dtype + if num_rows is not None: + taichi_arch = get_runtime().prog.config().arch + if taichi_arch in [ + _ti_core.Arch.x64, + _ti_core.Arch.arm64, + _ti_core.Arch.cuda, + ]: + self.ptr = _ti_core.SparseMatrixBuilder( + num_rows, + num_cols, + max_num_triplets, + dtype, + storage_format, + ) + self.ptr.create_ndarray(get_runtime().prog) + else: + raise TaichiRuntimeError("SparseMatrix only supports CPU and CUDA for now.") + + def _get_addr(self): + """Get the address of the sparse matrix""" + return self.ptr.get_addr() + + def _get_ndarray_addr(self): + """Get the address of the ndarray""" + return self.ptr.get_ndarray_data_ptr() + + def print_triplets(self): + """Print the triplets stored in the builder""" + taichi_arch = get_runtime().prog.config().arch + if taichi_arch in [_ti_core.Arch.x64, _ti_core.Arch.arm64]: + self.ptr.print_triplets_eigen() + elif taichi_arch == _ti_core.Arch.cuda: + self.ptr.print_triplets_cuda() + + def build(self, dtype=f32, _format="CSR"): + """Create a sparse matrix using the triplets""" + taichi_arch = get_runtime().prog.config().arch + if taichi_arch in [_ti_core.Arch.x64, _ti_core.Arch.arm64]: + sm = self.ptr.build() + return SparseMatrix(sm=sm, dtype=self.dtype) + if taichi_arch == _ti_core.Arch.cuda: + if self.dtype != f32: + raise TaichiRuntimeError("CUDA sparse matrix only supports f32.") + sm = self.ptr.build_cuda() + return SparseMatrix(sm=sm, dtype=self.dtype) + raise TaichiRuntimeError("Sparse matrix only supports CPU and CUDA backends.") + + def __del__(self): + if get_runtime() is not None and get_runtime().prog is not None: + self.ptr.delete_ndarray(get_runtime().prog) + + +__all__ = ["SparseMatrix", "SparseMatrixBuilder"] diff --git a/local_packages/taichi/linalg/sparse_solver.py b/local_packages/taichi/linalg/sparse_solver.py new file mode 100644 index 0000000..3850cbe --- /dev/null +++ b/local_packages/taichi/linalg/sparse_solver.py @@ -0,0 +1,120 @@ +import numpy as np +import taichi.lang +from taichi._lib import core as _ti_core +from taichi.lang._ndarray import Ndarray, ScalarNdarray +from taichi.lang.exception import TaichiRuntimeError +from taichi.lang.field import Field +from taichi.lang.impl import get_runtime +from taichi.linalg.sparse_matrix import SparseMatrix +from taichi.types.primitive_types import f32 + + +class SparseSolver: + """Sparse linear system solver + + Use this class to solve linear systems represented by sparse matrices. + + Args: + solver_type (str): The factorization type. + ordering (str): The method for matrices re-ordering. + """ + + def __init__(self, dtype=f32, solver_type="LLT", ordering="AMD"): + self.matrix = None + self.dtype = dtype + solver_type_list = ["LLT", "LDLT", "LU"] + solver_ordering = ["AMD", "COLAMD"] + if solver_type in solver_type_list and ordering in solver_ordering: + taichi_arch = taichi.lang.impl.get_runtime().prog.config().arch + assert ( + taichi_arch == _ti_core.Arch.x64 + or taichi_arch == _ti_core.Arch.arm64 + or taichi_arch == _ti_core.Arch.cuda + ), "SparseSolver only supports CPU and CUDA for now." + if taichi_arch == _ti_core.Arch.cuda: + self.solver = _ti_core.make_cusparse_solver(dtype, solver_type, ordering) + else: + self.solver = _ti_core.make_sparse_solver(dtype, solver_type, ordering) + else: + raise TaichiRuntimeError( + f"The solver type {solver_type} with {ordering} is not supported for now. Only {solver_type_list} with {solver_ordering} are supported." + ) + + @staticmethod + def _type_assert(sparse_matrix): + raise TaichiRuntimeError( + f"The parameter type: {type(sparse_matrix)} is not supported in linear solvers for now." + ) + + def compute(self, sparse_matrix): + """This method is equivalent to calling both `analyze_pattern` and then `factorize`. + + Args: + sparse_matrix (SparseMatrix): The sparse matrix to be computed. + """ + if isinstance(sparse_matrix, SparseMatrix): + self.matrix = sparse_matrix + taichi_arch = taichi.lang.impl.get_runtime().prog.config().arch + if taichi_arch == _ti_core.Arch.x64 or taichi_arch == _ti_core.Arch.arm64: + self.solver.compute(sparse_matrix.matrix) + elif taichi_arch == _ti_core.Arch.cuda: + self.analyze_pattern(self.matrix) + self.factorize(self.matrix) + else: + self._type_assert(sparse_matrix) + + def analyze_pattern(self, sparse_matrix): + """Reorder the nonzero elements of the matrix, such that the factorization step creates less fill-in. + + Args: + sparse_matrix (SparseMatrix): The sparse matrix to be analyzed. + """ + if isinstance(sparse_matrix, SparseMatrix): + self.matrix = sparse_matrix + if self.matrix.dtype != self.dtype: + raise TaichiRuntimeError( + f"The SparseSolver's dtype {self.dtype} is not consistent with the SparseMatrix's dtype {self.matrix.dtype}." + ) + self.solver.analyze_pattern(sparse_matrix.matrix) + else: + self._type_assert(sparse_matrix) + + def factorize(self, sparse_matrix): + """Do the factorization step + + Args: + sparse_matrix (SparseMatrix): The sparse matrix to be factorized. + """ + if isinstance(sparse_matrix, SparseMatrix): + self.matrix = sparse_matrix + self.solver.factorize(sparse_matrix.matrix) + else: + self._type_assert(sparse_matrix) + + def solve(self, b): # pylint: disable=R1710 + """Computes the solution of the linear systems. + Args: + b (numpy.array or Field): The right-hand side of the linear systems. + + Returns: + numpy.array: The solution of linear systems. + """ + if self.matrix is None: + raise TaichiRuntimeError("Please call compute() before calling solve().") + if isinstance(b, Field): + return self.solver.solve(b.to_numpy()) + if isinstance(b, np.ndarray): + return self.solver.solve(b) + if isinstance(b, Ndarray): + x = ScalarNdarray(b.dtype, [self.matrix.m]) + self.solver.solve_rf(get_runtime().prog, self.matrix.matrix, b.arr, x.arr) + return x + raise TaichiRuntimeError(f"The parameter type: {type(b)} is not supported in linear solvers for now.") + + def info(self): + """Check if the linear systems are solved successfully. + + Returns: + bool: True if the solving process succeeded, False otherwise. + """ + return self.solver.info() diff --git a/local_packages/taichi/math/__init__.py b/local_packages/taichi/math/__init__.py new file mode 100644 index 0000000..c924fc6 --- /dev/null +++ b/local_packages/taichi/math/__init__.py @@ -0,0 +1,9 @@ +"""Taichi math module. + +The math module supports glsl-style vectors, matrices and functions. +""" + +from ._complex import * +from .mathimpl import * # pylint: disable=W0622 + +del mathimpl diff --git a/local_packages/taichi/math/_complex.py b/local_packages/taichi/math/_complex.py new file mode 100644 index 0000000..2af467c --- /dev/null +++ b/local_packages/taichi/math/_complex.py @@ -0,0 +1,203 @@ +from taichi.lang import ops +from taichi.lang.kernel_impl import func + +from .mathimpl import dot, vec2 + + +@func +def cmul(z1, z2): + """Performs complex multiplication between two 2d vectors. + + This is equivalent to the multiplication in the complex number field + when `z1` and `z2` are treated as complex numbers. + + Args: + z1 (:class:`~taichi.math.vec2`): The first input. + z2 (:class:`~taichi.math.vec2`): The second input. + + Example:: + + >>> @ti.kernel + >>> def test(): + >>> z1 = ti.math.vec2(1, 1) + >>> z2 = ti.math.vec2(0, 1) + >>> ti.math.cmul(z1, z2) # [-1, 1] + + Returns: + :class:`~taichi.math.vec2`: the complex multiplication `z1 * z2`. + """ + x1, y1 = z1[0], z1[1] + x2, y2 = z2[0], z2[1] + return vec2(x1 * x2 - y1 * y2, x1 * y2 + x2 * y1) + + +@func +def cconj(z): + """Returns the complex conjugate of a 2d vector. + + If `z=(x, y)` then the conjugate of `z` is `(x, -y)`. + + Args: + z (:class:`~taichi.math.vec2`): The input. + + Returns: + :class:`~taichi.math.vec2`: The complex conjugate of `z`. + """ + return vec2(z[0], -z[1]) + + +@func +def cdiv(z1, z2): + """Performs complex division between two 2d vectors. + + This is equivalent to the division in the complex number field + when `z1` and `z2` are treated as complex numbers. + + Args: + z1 (:class:`~taichi.math.vec2`): The first input. + z2 (:class:`~taichi.math.vec2`): The second input. + + Example:: + + >>> @ti.kernel + >>> def test(): + >>> z1 = ti.math.vec2(1, 1) + >>> z2 = ti.math.vec2(0, 1) + >>> ti.math.cdiv(z1, z2) # [1, -1] + + Returns: + :class:`~taichi.math.vec2`: the complex division of `z1 / z2`. + """ + x1, y1 = z1[0], z1[1] + x2, y2 = z2[0], z2[1] + return vec2(x1 * x2 + y1 * y2, -x1 * y2 + x2 * y1) / dot(z2, z2) + + +@func +def csqrt(z): + """Returns the complex square root of a 2d vector `z`, so that + if `w^2=z`, then `w = csqrt(z)`. + + Among the two square roots of `z`, if their real parts are non-zero, + the one with positive real part is returned. If both their real parts + are zero, the one with non-negative imaginary part is returned. + + Args: + z (:class:`~taichi.math.vec2`): The input. + + Example:: + + >>> @ti.kernel + >>> def test(): + >>> z = ti.math.vec2(-1, 0) + >>> w = ti.math.csqrt(z) # [0, 1] + + Returns: + :class:`~taichi.math.vec2`: The complex square root. + """ + result = vec2(0.0) + if any(z): + r = ops.sqrt(z.norm()) + a = ops.atan2(z[1], z[0]) + result = r * vec2(ops.cos(a / 2.0), ops.sin(a / 2.0)) + + return result + + +@func +def cinv(z): + """Computes the reciprocal of a complex `z`. + + Args: + z (:class:`~taichi.math.vec2`): The input. + + Example:: + + >>> @ti.kernel + >>> def test(): + >>> z = ti.math.vec2(1, 1) + >>> w = ti.math.cinv(z) # [0.5, -0.5] + + Returns: + :class:`~taichi.math.vec2`: The reciprocal of `z`. + """ + return cconj(z) / dot(z, z) + + +@func +def cpow(z, n): + """Computes the power of a complex `z`: :math:`z^a`. + + Args: + z (:class:`~taichi.math.vec2`): The base. + a (float): The exponent. + + Example:: + + >>> @ti.kernel + >>> def test(): + >>> z = ti.math.vec2(1, 1) + >>> w = ti.math.cpow(z) # [-2, 2] + + Returns: + :class:`~taichi.math.vec2`: The power :math:`z^a`. + """ + result = vec2(0.0) + if any(z): + r2 = dot(z, z) + a = ops.atan2(z[1], z[0]) * n + result = ops.pow(r2, n / 2.0) * vec2(ops.cos(a), ops.sin(a)) + + return result + + +@func +def cexp(z): + """Returns the complex exponential :math:`e^z`. + + `z` is a 2d vector treated as a complex number. + + Args: + z (:class:`~taichi.math.vec2`): The exponent. + + Example:: + + >>> @ti.kernel + >>> def test(): + >>> z = ti.math.vec2(1, 1) + >>> w = ti.math.cexp(z) # [1.468694, 2.287355] + + Returns: + :class:`~taichi.math.vec2`: The power :math:`exp(z)` + """ + r = ops.exp(z[0]) + return vec2(r * ops.cos(z[1]), r * ops.sin(z[1])) + + +@func +def clog(z): + """Returns the complex logarithm of `z`, so that if :math:`e^w = z`, + then :math:`log(z) = w`. + + `z` is a 2d vector treated as a complex number. The argument of :math:`w` + lies in the range (-pi, pi]. + + Args: + z (:class:`~taichi.math.vec2`): The input. + + Example:: + + >>> @ti.kernel + >>> def test(): + >>> z = ti.math.vec2(1, 1) + >>> w = ti.math.clog(z) # [0.346574, 0.785398] + + Returns: + :class:`~taichi.math.vec2`: The logarithm of `z`. + """ + ang = ops.atan2(z[1], z[0]) + r2 = dot(z, z) + return vec2(ops.log(r2) / 2.0, ang) + + +__all__ = ["cconj", "cdiv", "cexp", "cinv", "clog", "cmul", "cpow", "csqrt"] diff --git a/local_packages/taichi/math/mathimpl.py b/local_packages/taichi/math/mathimpl.py new file mode 100644 index 0000000..85fb456 --- /dev/null +++ b/local_packages/taichi/math/mathimpl.py @@ -0,0 +1,884 @@ +# pylint: disable=W0622 +""" +Math functions for glsl-like functions and other stuff. +""" +import math + +from taichi.lang import impl, ops +from taichi.lang.impl import static, zero +from taichi.lang.kernel_impl import func +from taichi.lang.matrix import Matrix +from taichi.lang.ops import ( + acos, + asin, + atan2, + ceil, + cos, + exp, + floor, + log, + max, + min, + pow, + round, + sin, + sqrt, + tan, + tanh, +) +from taichi.types import matrix, template, vector +from taichi.types.primitive_types import f64, u32, u64 + +cfg = impl.default_cfg + +e = math.e +"""The mathematical constant e = 2.718281…. +Directly imported from the Python standard library `math`. +""" + +pi = math.pi +"""The mathematical constant π = 3.141592…. +Directly imported from the Python standard library `math`. +""" + +inf = math.inf +"""A floating-point positive infinity. (For negative infinity, use `-inf`). +Directly imported from the Python standard library `math`. +""" + +nan = math.nan +"""A floating-point "not a number" (NaN) value. +Directly imported from the Python standard library `math` +""" + +vec2 = vector(2, cfg().default_fp) +"""2D floating vector type. +""" + +vec3 = vector(3, cfg().default_fp) +"""3D floating vector type. +""" + +vec4 = vector(4, cfg().default_fp) +"""4D floating vector type. +""" + +ivec2 = vector(2, cfg().default_ip) +"""2D signed int vector type. +""" + +ivec3 = vector(3, cfg().default_ip) +"""3D signed int vector type. +""" + +ivec4 = vector(4, cfg().default_ip) +"""3D signed int vector type. +""" + +uvec2 = vector(2, cfg().default_up) +"""2D unsigned int vector type. +""" + +uvec3 = vector(3, cfg().default_up) +"""3D unsigned int vector type. +""" + +uvec4 = vector(4, cfg().default_up) +"""4D unsigned int vector type. +""" + +mat2 = matrix(2, 2, cfg().default_fp) +"""2x2 floating matrix type. +""" + +mat3 = matrix(3, 3, cfg().default_fp) +"""3x3 floating matrix type. +""" + +mat4 = matrix(4, 4, cfg().default_fp) +"""4x4 floating matrix type. +""" + + +@func +def mix(x, y, a): + """Performs a linear interpolation between `x` and `y` using + `a` to weight between them. The return value is computed as + `x * (1 - a) + a * y`. + + The arguments can be scalars or :class:`~taichi.Matrix`, + as long as the operation can be performed. + + This function is similar to the `mix` function in GLSL. + + Args: + x (:mod:`~taichi.types.primitive_types`, :class:`~taichi.Matrix`): Specify + the start of the range in which to interpolate. + y (:mod:`~taichi.types.primitive_types`, :class:`~taichi.Matrix`): Specify + the end of the range in which to interpolate. + a (:mod:`~taichi.types.primitive_types`, :class:`~taichi.Matrix`): Specify + the weight to use to interpolate between x and y. + + Returns: + (:mod:`~taichi.types.primitive_types`, :class:`~taichi.Matrix`): The linear + interpolation of `x` and `y` by weight `a`. + + Example:: + + >>> x = ti.Vector([1, 1, 1]) + >>> y = ti.Vector([2, 2, 2]) + >>> a = ti.Vector([1, 0, 0]) + >>> ti.math.mix(x, y, a) + [2.000000, 1.000000, 1.000000] + >>> x = ti.Matrix([[1, 2], [2, 3]], ti.f32) + >>> y = ti.Matrix([[3, 5], [4, 5]], ti.f32) + >>> a = 0.5 + >>> ti.math.mix(x, y, a) + [[2.000000, 3.500000], [3.000000, 4.000000]] + """ + return x * (1.0 - a) + y * a + + +@func +def clamp(x, xmin, xmax): + """Constrain a value to lie between two further values, element-wise. + The returned value is computed as `min(max(x, xmin), xmax)`. + + The arguments can be scalars or :class:`~taichi.Matrix`, + as long as they can be broadcasted to a common shape. + + Args: + x (:mod:`~taichi.types.primitive_types`, :class:`~taichi.Matrix`): Specify + the value to constrain. + y (:mod:`~taichi.types.primitive_types`, :class:`~taichi.Matrix`): Specify + the lower end of the range into which to constrain `x`. + a (:mod:`~taichi.types.primitive_types`, :class:`~taichi.Matrix`): Specify + the upper end of the range into which to constrain `x`. + + Returns: + The value of `x` constrained to the range `xmin` to `xmax`. + + Example:: + + >>> v = ti.Vector([0, 0.5, 1.0, 1.5]) + >>> ti.math.clamp(v, 0.5, 1.0) + [0.500000, 0.500000, 1.000000, 1.000000] + >>> x = ti.Matrix([[0, 1], [-2, 2]], ti.f32) + >>> y = ti.Matrix([[1, 2], [1, 2]], ti.f32) + >>> ti.math.clamp(x, 0.5, y) + [[0.500000, 1.000000], [0.500000, 2.000000]] + """ + return ops.min(xmax, ops.max(xmin, x)) + + +@func +def step(edge, x): + """Generate a step function by comparing two values, element-wise. + + `step` generates a step function by comparing `x` to edge. + For element i of the return value, 0.0 is returned if x[i] < edge[i], + and 1.0 is returned otherwise. + + The two arguments can be scalars or :class:`~taichi.Matrix`, + as long as they can be broadcasted to a common shape. + + Args: + edge (:mod:`~taichi.types.primitive_types`, :class:`~taichi.Matrix`): Specify + the location of the edge of the step function. + x (:mod:`~taichi.types.primitive_types`, :class:`~taichi.Matrix`): Specify + the value to be used to generate the step function. + + Returns: + The return value is computed as `x >= edge`, with type promoted. + + Example:: + + >>> x = ti.Matrix([[0, 1], [2, 3]], ti.f32) + >>> y = 1 + >>> ti.math.step(x, y) + [[1.000000, 1.000000], [0.000000, 0.000000]] + """ + return ops.cast(x >= edge, float) + + +@func +def fract(x): + """Compute the fractional part of the argument, element-wise. + It's equivalent to `x - ti.floor(x)`. + + Args: + x (:mod:`~taichi.types.primitive_types`, :class:`~taichi.Matrix`): The + input value. + + Returns: + The fractional part of `x`. + + Example:: + + >>> x = ti.Vector([-1.2, -0.7, 0.3, 1.2]) + >>> ti.math.fract(x) + [0.800000, 0.300000, 0.300000, 0.200000] + """ + return x - ops.floor(x) + + +@func +def smoothstep(edge0, edge1, x): + """Performs smooth Hermite interpolation between 0 and 1 when + `edge0 < x < edge1`, element-wise. + + The arguments can be scalars or :class:`~taichi.Matrix`, + as long as they can be broadcasted to a common shape. + + This function is equivalent to the `smoothstep` in GLSL. + + Args: + edge0 (:mod:`~taichi.types.primitive_types`, :class:`~taichi.Matrix`): Specifies + the value of the lower edge of the Hermite function. + edge1 (:mod:`~taichi.types.primitive_types`, :class:`~taichi.Matrix`): Specifies + the value of the upper edge of the Hermite function. + x (:mod:`~taichi.types.primitive_types`, :class:`~taichi.Matrix`): Specifies + the source value for interpolation. + + Returns: + The smoothly interpolated value. + + Example:: + + >>> edge0 = ti.Vector([0, 1, 2]) + >>> edge1 = 1 + >>> x = ti.Vector([0.5, 1.5, 2.5]) + >>> ti.math.smoothstep(edge0, edge1, x) + [0.500000, 1.000000, 0.000000] + """ + t = clamp((x - edge0) / (edge1 - edge0), 0.0, 1.0) + return t * t * (3.0 - 2.0 * t) + + +@func +def sign(x): + """Extract the sign of the parameter, element-wise. + + Args: + x (:mod:`~taichi.types.primitive_types`, :class:`~taichi.Matrix`): The + input value. + + Returns: + -1.0 if `x` is less than 0.0, 0.0 if `x` is equal to 0.0, + and +1.0 if `x` is greater than 0.0. + + Example:: + + >>> x = ti.Vector([-1.0, 0.0, 1.0]) + >>> ti.math.sign(x) + [-1.000000, 0.000000, 1.000000] + """ + return ops.cast((x >= 0.0), float) - ops.cast((x <= 0.0), float) + + +@func +def normalize(v): + """Calculates the unit vector in the same direction as the + original vector `v`. + + It's equivalent to the `normalize` function is GLSL. + + Args: + x (:class:`~taichi.Matrix`): The vector to normalize. + + Returns: + The normalized vector :math:`v/|v|`. + + Example:: + + >>> v = ti.Vector([1, 2, 3]) + >>> ti.math.normalize(v) + [0.267261, 0.534522, 0.801784] + """ + return v / v.norm() + + +@func +def log2(x): + """Return the base 2 logarithm of `x`, so that if :math:`2^y=x`, + then :math:`y=\\log2(x)`. + + This is equivalent to the `log2` function is GLSL. + + Args: + x (:class:`~taichi.Matrix`): The input value. + + Returns: + The base 2 logarithm of `x`. + + Example:: + + >>> x = ti.Vector([1., 2., 3.]) + >>> ti.math.log2(x) + [0.000000, 1.000000, 1.584962] + """ + return ops.log(x) / static(ops.log(2.0)) + + +@func +def reflect(x, n): + """Calculate the reflection direction for an incident vector. + + For a given incident vector `x` and surface normal `n` this + function returns the reflection direction calculated as + :math:`x - 2.0 * dot(x, n) * n`. + + This is equivalent to the `reflect` function is GLSL. + + `n` should be normalized in order to achieve the desired result. + + Args: + x (:class:`~taichi.Matrix`): The incident vector. + n (:class:`~taichi.Matrix`): The normal vector. + + Returns: + The reflected vector. + + Example:: + + >>> x = ti.Vector([1., 2., 3.]) + >>> n = ti.Vector([0., 1., 0.]) + >>> ti.math.reflect(x, n) + [1.000000, -2.000000, 3.000000] + """ + k = x.dot(n) + return x - 2.0 * k * n + + +@func +def degrees(x): + """Convert `x` in radians to degrees, element-wise. + + Args: + x (:class:`~taichi.Matrix`): The input angle in radians. + + Returns: + angle in degrees. + + Example:: + + >>> x = ti.Vector([-pi/2, pi/2]) + >>> ti.math.degrees(x) + [-90.000000, 90.000000] + """ + return x * static(180.0 / pi) + + +@func +def radians(x): + """Convert `x` in degrees to radians, element-wise. + + Args: + x (:class:`~taichi.Matrix`): The input angle in degrees. + + Returns: + angle in radians. + + Example:: + + >>> x = ti.Vector([-90., 45., 90.]) + >>> ti.math.radians(x) / pi + [-0.500000, 0.250000, 0.500000] + """ + return x * static(pi / 180.0) + + +@func +def distance(x, y): + """Calculate the distance between two points. + + This function is equivalent to the `distance` function is GLSL. + + Args: + x (:mod:`~taichi.types.primitive_types`, :class:`~taichi.Matrix`): The first input point. + y (:mod:`~taichi.types.primitive_types`, :class:`~taichi.Matrix`): The second input point. + + Returns: + The distance between the two points. + + Example:: + + >>> x = ti.Vector([0, 0, 0]) + >>> y = ti.Vector([1, 1, 1]) + >>> ti.math.distance(x, y) + 1.732051 + """ + return (x - y).norm() + + +@func +def refract(x, n, eta): + """Calculate the refraction direction for an incident vector. + + This function is equivalent to the `refract` function in GLSL. + + Args: + x (:class:`~taichi.Matrix`): The incident vector. + n (:class:`~taichi.Matrix`): The normal vector. + eta (float): The ratio of indices of refraction. + + Returns: + :class:`~taichi.Matrix`: The refraction direction vector. + + Example:: + + >>> x = ti.Vector([1., 1., 1.]) + >>> y = ti.Vector([0, 1., 0]) + >>> ti.math.refract(x, y, 2.0) + [2.000000, -1.000000, 2.000000] + """ + dxn = x.dot(n) + result = zero(x) + k = 1.0 - eta * eta * (1.0 - dxn * dxn) + if k >= 0.0: + result = eta * x - (eta * dxn + ops.sqrt(k)) * n + return result + + +@func +def dot(x, y): + """Calculate the dot product of two vectors. + + Args: + x (:class:`~taichi.Matrix`): The first input vector. + y (:class:`~taichi.Matrix`): The second input vector. + + Returns: + The dot product of two vectors. + + Example:: + + >>> x = ti.Vector([1., 1., 0.]) + >>> y = ti.Vector([0., 1., 1.]) + >>> ti.math.dot(x, y) + 1.000000 + """ + return x.dot(y) + + +@func +def cross(x, y): + """Calculate the cross product of two vectors. + + The two input vectors must have the same dimension :math:`d <= 3`. + + This function calls the `cross` method of :class:`~taichi.Vector`. + + Args: + x (:class:`~taichi.Matrix`): The first input vector. + y (:class:`~taichi.Matrix`): The second input vector. + + Returns: + The cross product of two vectors. + + Example:: + + >>> x = ti.Vector([1., 0., 0.]) + >>> y = ti.Vector([0., 1., 0.]) + >>> ti.math.cross(x, y) + [0.000000, 0.000000, 1.000000] + """ + return x.cross(y) + + +@func +def mod(x, y): + """Compute value of one parameter modulo another, element-wise. + + Args: + x (:mod:`~taichi.types.primitive_types`, :class:`~taichi.Matrix`): The first input. + y (:mod:`~taichi.types.primitive_types`, :class:`~taichi.Matrix`): The second input. + + Returns: + the value of `x` modulo `y`. This is computed as `x - y * floor(x/y)`. + + Example:: + + >>> x = ti.Vector([-0.5, 0.5, 1.]) + >>> y = 1.0 + >>> ti.math.mod(x, y) + [0.500000, 0.500000, 0.000000] + """ + return x - y * ops.floor(x / y) + + +@func +def translate(dx, dy, dz): + """Constructs a translation Matrix with shape (4, 4). + + Args: + dx (float): delta x. + dy (float): delta y. + dz (float): delta z. + + Returns: + :class:`~taichi.math.mat4`: translation matrix. + + Example:: + + >>> ti.math.translate(1, 2, 3) + [[ 1. 0. 0. 1.] + [ 0. 1. 0. 2.] + [ 0. 0. 1. 3.] + [ 0. 0. 0. 1.]] + """ + return mat4( + [ + [1.0, 0.0, 0.0, dx], + [0.0, 1.0, 0.0, dy], + [0.0, 0.0, 1.0, dz], + [0.0, 0.0, 0.0, 1.0], + ] + ) + + +@func +def scale(sx, sy, sz): + """Constructs a scale Matrix with shape (4, 4). + + Args: + sx (float): scale x. + sy (float): scale y. + sz (float): scale z. + + Returns: + :class:`~taichi.math.mat4`: scale matrix. + + Example:: + + >>> ti.math.scale(1, 2, 3) + [[ 1. 0. 0. 0.] + [ 0. 2. 0. 0.] + [ 0. 0. 3. 0.] + [ 0. 0. 0. 1.]] + """ + return mat4( + [ + [sx, 0.0, 0.0, 0.0], + [0.0, sy, 0.0, 0.0], + [0.0, 0.0, sz, 0.0], + [0.0, 0.0, 0.0, 1.0], + ] + ) + + +@func +def rot_by_axis(axis, ang): + """Returns the 4x4 matrix representation of a 3d rotation with given axis `axis` and angle `ang`. + + Args: + axis (vec3): rotation axis + ang (float): angle in radians unit + + Returns: + :class:`~taichi.math.mat4`: rotation matrix + """ + c = ops.cos(ang) + s = ops.sin(ang) + + axis = normalize(axis) + temp = (1 - c) * axis + return mat4( + [ + [ + c + temp[0] * axis[0], + temp[0] * axis[1] + s * axis[2], + temp[0] * axis[2] - s * axis[1], + 0.0, + ], + [ + temp[1] * axis[0] - s * axis[2], + c + temp[1] * axis[1], + temp[1] * axis[2] + s * axis[0], + 0.0, + ], + [ + temp[2] * axis[0] + s * axis[1], + temp[2] * axis[1] - s * axis[0], + c + temp[2] * axis[2], + 0.0, + ], + [0.0, 0.0, 0.0, 1.0], + ] + ) + + +@func +def rot_yaw_pitch_roll(yaw, pitch, roll): + """Returns a 4x4 homogeneous rotation matrix representing the 3d rotation with Euler angles (rotate with Y axis first, X axis second, Z axis third). + + Args: + yaw (float): yaw angle in radians unit + pitch (float): pitch angle in radians unit + roll (float): roll angle in radians unit + + Returns: + :class:`~taichi.math.mat4`: rotation matrix + """ + ch = ops.cos(yaw) + sh = ops.sin(yaw) + cp = ops.cos(pitch) + sp = ops.sin(pitch) + cb = ops.cos(roll) + sb = ops.sin(roll) + + return mat4( + [ + [ch * cb + sh * sp * sb, sb * cp, -sh * cb + ch * sp * sb, 0.0], + [-ch * sb + sh * sp * cb, cb * cp, sb * sh + ch * sp * cb, 0.0], + [sh * cp, -sp, ch * cp, 0.0], + [0.0, 0.0, 0.0, 1.0], + ] + ) + + +@func +def rotation2d(ang): + """Returns the matrix representation of a 2d counter-clockwise rotation, + given the angle of rotation. + + Args: + ang (float): Angle of rotation in radians. + + Returns: + :class:`~taichi.math.mat2`: 2x2 rotation matrix. + + Example:: + + >>>ti.math.rotation2d(ti.math.radians(30)) + [[0.866025, -0.500000], [0.500000, 0.866025]] + """ + ca, sa = ops.cos(ang), ops.sin(ang) + return mat2([[ca, -sa], [sa, ca]]) + + +@func +def rotation3d(ang_x, ang_y, ang_z): + """Returns a 4x4 homogeneous rotation matrix representing the 3d rotation with Euler angles (rotate with Y axis first, X axis second, Z axis third). + + Args: + ang_x (float): angle in radians unit around X axis + ang_y (float): angle in radians unit around Y axis + ang_z (float): angle in radians unit around Z axis + Returns: + :class:`~taichi.math.mat4`: rotation matrix + Example: + + >>> ti.math.rotation3d(0.52, -0.785, 1.046) + [[ 0.05048351 -0.61339645 -0.78816002 0. ] + [ 0.65833154 0.61388511 -0.4355969 0. ] + [ 0.75103329 -0.49688014 0.4348093 0. ] + [ 0. 0. 0. 1. ]] + """ + return rot_yaw_pitch_roll(ang_z, ang_x, ang_y) + + +@func +def eye(n: template()): + """Returns the nxn identity matrix. + + Alias for :func:`~taichi.Matrix.identity`. + """ + return Matrix.identity(float, n) + + +@func +def length(x): + """Calculate the length of a vector. + + This function is equivalent to the `length` function in GLSL. + Args: + x (:class:`~taichi.Matrix`): The vector of which to calculate the length. + + Returns: + The Euclidean norm of the vector. + + Example:: + + >>> x = ti.Vector([1, 1, 1]) + >>> ti.math.length(x) + 1.732051 + """ + return x.norm() + + +@func +def determinant(m): + """Alias for :func:`taichi.Matrix.determinant`.""" + return m.determinant() + + +@func +def inverse(mat): # pylint: disable=R1710 + """Calculate the inverse of a matrix. + + This function is equivalent to the `inverse` function in GLSL. + + Args: + mat (:class:`taichi.Matrix`): The matrix of which to take the inverse. \ + Supports only 2x2, 3x3 and 4x4 matrices. + + Returns: + Inverse of the input matrix. + + Example:: + + >>> m = ti.math.mat3([(1, 1, 0), (0, 1, 1), (0, 0, 1)]) + >>> ti.math.inverse(m) + [[1.000000, -1.000000, 1.000000], + [0.000000, 1.000000, -1.000000], + [0.000000, 0.000000, 1.000000]] + """ + return mat.inverse() + + +@func +def isinf(x): + """Determines whether the parameter is positive or negative infinity, element-wise. + + Args: + x (:mod:`~taichi.types.primitive_types`, :class:`taichi.Matrix`): The input. + + Example: + + >>> x = ti.math.vec4(inf, -inf, nan, 1) + >>> ti.math.isinf(x) + [1, 1, 0, 0] + + Returns: + For each element i of the result, returns 1 if x[i] is posititve or negative floating point infinity and 0 otherwise. + """ + ftype = impl.get_runtime().default_fp + fx = ops.cast(x, ftype) + if static(ftype == f64): + y = ops.bit_cast(fx, u64) + return (ops.cast(y >> 32, u32) & 0x7FFFFFFF) == 0x7FF00000 and (ops.cast(y, u32) == 0) + + y = ops.bit_cast(fx, u32) + return (y & 0x7FFFFFFF) == 0x7F800000 + + +@func +def isnan(x): + """Determines whether the parameter is a number, element-wise. + + Args: + x (:mod:`~taichi.types.primitive_types`, :class:`taichi.Matrix`): The input. + + Example: + + >>> x = ti.math.vec4(nan, -nan, inf, 1) + >>> ti.math.isnan(x) + [1, 1, 0, 0] + + Returns: + For each element i of the result, returns 1 if x[i] is posititve or negative floating point NaN (Not a Number) and 0 otherwise. + """ + ftype = impl.get_runtime().default_fp + fx = ops.cast(x, ftype) + if static(ftype == f64): + y = ops.bit_cast(fx, u64) + return (ops.cast(y >> 32, u32) & 0x7FFFFFFF) + (ops.cast(y, u32) != 0) > 0x7FF00000 + + y = ops.bit_cast(fx, u32) + return (y & 0x7FFFFFFF) > 0x7F800000 + + +@func +def vdir(ang): + """Returns the 2d unit vector with argument equals `ang`. + + x (:mod:`~taichi.types.primitive_types`): The input angle in radians. + + Example: + + >>> x = pi / 2 + >>> ti.math.vdir(x) + [0, 1] + + Returns: + a 2d vector with argument equals `ang`. + """ + return vec2(cos(ang), sin(ang)) + + +@func +def popcnt(x): + return ops.popcnt(x) + + +@func +def clz(x): + return ops.clz(x) + + +__all__ = [ + "acos", + "asin", + "atan2", + "ceil", + "clamp", + "clz", + "cos", + "cross", + "degrees", + "determinant", + "distance", + "dot", + "e", + "exp", + "eye", + "floor", + "fract", + "inf", + "inverse", + "isinf", + "isnan", + "ivec2", + "ivec3", + "ivec4", + "length", + "log", + "log2", + "mat2", + "mat3", + "mat4", + "max", + "min", + "mix", + "mod", + "translate", + "scale", + "nan", + "normalize", + "pi", + "pow", + "popcnt", + "radians", + "reflect", + "refract", + "rot_by_axis", + "rot_yaw_pitch_roll", + "rotation2d", + "rotation3d", + "round", + "sign", + "sin", + "smoothstep", + "sqrt", + "step", + "tan", + "tanh", + "uvec2", + "uvec3", + "uvec4", + "vdir", + "vec2", + "vec3", + "vec4", +] diff --git a/local_packages/taichi/profiler/__init__.py b/local_packages/taichi/profiler/__init__.py new file mode 100644 index 0000000..780b2a9 --- /dev/null +++ b/local_packages/taichi/profiler/__init__.py @@ -0,0 +1,4 @@ +from taichi.profiler.kernel_metrics import * +from taichi.profiler.kernel_profiler import * +from taichi.profiler.memory_profiler import * +from taichi.profiler.scoped_profiler import * diff --git a/local_packages/taichi/profiler/kernel_metrics.py b/local_packages/taichi/profiler/kernel_metrics.py new file mode 100644 index 0000000..a30ee1c --- /dev/null +++ b/local_packages/taichi/profiler/kernel_metrics.py @@ -0,0 +1,258 @@ +from taichi._lib import core as _ti_core + + +class CuptiMetric: + """A class to add CUPTI metric for :class:`~taichi.profiler.kernel_profiler.KernelProfiler`. + + This class is designed to add user selected CUPTI metrics. + Only available for the CUDA backend now, i.e. you need ``ti.init(kernel_profiler=True, arch=ti.cuda)``. + For usage of this class, see examples in func :func:`~taichi.profiler.set_kernel_profiler_metrics` and :func:`~taichi.profiler.collect_kernel_profiler_metrics`. + + Args: + name (str): name of metric that collected by CUPTI toolkit. used by :func:`~taichi.profiler.set_kernel_profiler_metrics` and :func:`~taichi.profiler.collect_kernel_profiler_metrics`. + header (str): column header of this metric, used by :func:`~taichi.profiler.print_kernel_profiler_info`. + val_format (str): format for print metric value (and unit of this value), used by :func:`~taichi.profiler.print_kernel_profiler_info`. + scale (float): scale of metric value, used by :func:`~taichi.profiler.print_kernel_profiler_info`. + + Example:: + + >>> import taichi as ti + + >>> ti.init(kernel_profiler=True, arch=ti.cuda) + >>> num_elements = 128*1024*1024 + + >>> x = ti.field(ti.f32, shape=num_elements) + >>> y = ti.field(ti.f32, shape=()) + >>> y[None] = 0 + + >>> @ti.kernel + >>> def reduction(): + >>> for i in x: + >>> y[None] += x[i] + + >>> global_op_atom = ti.profiler.CuptiMetric( + >>> name='l1tex__t_set_accesses_pipe_lsu_mem_global_op_atom.sum', + >>> header=' global.atom ', + >>> val_format=' {:8.0f} ') + + >>> # add and set user defined metrics + >>> profiling_metrics = ti.profiler.get_predefined_cupti_metrics('global_access') + [global_op_atom] + >>> ti.profiler.set_kernel_profile_metrics(profiling_metrics) + + >>> for i in range(16): + >>> reduction() + >>> ti.profiler.print_kernel_profiler_info('trace') + + Note: + For details about using CUPTI in Taichi, please visit https://docs.taichi-lang.org/docs/profiler#advanced-mode. + """ + + def __init__(self, name="", header="unnamed_header", val_format=" {:8.0f} ", scale=1.0): + self.name = name + self.header = header + self.val_format = val_format + self.scale = scale + + +# Global Memory Metrics +dram_utilization = CuptiMetric( + name="dram__throughput.avg.pct_of_peak_sustained_elapsed", + header=" global.uti ", + val_format=" {:6.2f} % ", +) + +dram_bytes_sum = CuptiMetric( + name="dram__bytes.sum", + header=" global.R&W ", + val_format="{:9.3f} MB ", + scale=1.0 / 1024 / 1024, +) + +dram_bytes_throughput = CuptiMetric( + name="dram__bytes.sum.per_second", + header=" global.R&W/s ", + val_format="{:8.3f} GB/s ", + scale=1.0 / 1024 / 1024 / 1024, +) + +dram_bytes_read = CuptiMetric( + name="dram__bytes_read.sum", + header=" global.R ", + val_format="{:8.3f} MB ", + scale=1.0 / 1024 / 1024, +) + +dram_read_throughput = CuptiMetric( + name="dram__bytes_read.sum.per_second", + header=" global.R/s ", + val_format="{:8.3f} GB/s ", + scale=1.0 / 1024 / 1024 / 1024, +) + +dram_bytes_write = CuptiMetric( + name="dram__bytes_write.sum", + header=" global.W ", + val_format="{:8.3f} MB ", + scale=1.0 / 1024 / 1024, +) + +dram_write_throughput = CuptiMetric( + name="dram__bytes_write.sum.per_second", + header=" global.W/s ", + val_format="{:8.3f} GB/s ", + scale=1.0 / 1024 / 1024 / 1024, +) + +# Shared Memory Metrics +shared_utilization = CuptiMetric( + name="l1tex__data_pipe_lsu_wavefronts_mem_shared.avg.pct_of_peak_sustained_elapsed", + header=" uti.shared ", + val_format=" {:6.2f} % ", +) + +shared_transactions_load = CuptiMetric( + name="l1tex__data_pipe_lsu_wavefronts_mem_shared_op_ld.sum", + header=" shared.trans.W ", + val_format=" {:10.0f} ", +) + +shared_transactions_store = CuptiMetric( + name="l1tex__data_pipe_lsu_wavefronts_mem_shared_op_st.sum", + header=" shared.trans.R ", + val_format=" {:10.0f} ", +) + +shared_bank_conflicts_store = CuptiMetric( + name="l1tex__data_bank_conflicts_pipe_lsu_mem_shared_op_st.sum", + header=" bank.conflict.W ", + val_format=" {:10.0f} ", +) + +shared_bank_conflicts_load = CuptiMetric( + name="l1tex__data_bank_conflicts_pipe_lsu_mem_shared_op_ld.sum", + header=" bank.conflict.R ", + val_format=" {:10.0f} ", +) + +# Atomic Metrics +global_op_atom = CuptiMetric( + name="l1tex__t_set_accesses_pipe_lsu_mem_global_op_atom.sum", + header=" global.atom ", + val_format=" {:8.0f} ", +) + +global_op_reduction = CuptiMetric( + name="l1tex__t_set_accesses_pipe_lsu_mem_global_op_red.sum", + header=" global.red ", + val_format=" {:8.0f} ", +) + +# Hardware Utilization Metrics +sm_throughput = CuptiMetric( + name="smsp__cycles_active.avg.pct_of_peak_sustained_elapsed", + header=" core.uti ", + val_format=" {:6.2f} % ", +) + +dram_throughput = CuptiMetric( + name="gpu__dram_throughput.avg.pct_of_peak_sustained_elapsed", + header=" mem.uti ", + val_format=" {:6.2f} % ", +) + +l1tex_throughput = CuptiMetric( + name="l1tex__throughput.avg.pct_of_peak_sustained_elapsed", + header=" L1.uti ", + val_format=" {:6.2f} % ", +) + +l2_throughput = CuptiMetric( + name="lts__throughput.avg.pct_of_peak_sustained_elapsed", + header=" L2.uti ", + val_format=" {:6.2f} % ", +) + +# Misc Metrics +l1_hit_rate = CuptiMetric(name="l1tex__t_sector_hit_rate.pct", header=" L1.hit ", val_format=" {:6.2f} % ") + +l2_hit_rate = CuptiMetric(name="lts__t_sector_hit_rate.pct", header=" L2.hit ", val_format=" {:6.2f} % ") + +achieved_occupancy = CuptiMetric( + name="sm__warps_active.avg.pct_of_peak_sustained_active", + header=" occupancy", + val_format=" {:6.0f} ", +) + +# metric suite: global load & store +global_access = [ + dram_bytes_sum, + dram_bytes_throughput, + dram_bytes_read, + dram_read_throughput, + dram_bytes_write, + dram_write_throughput, +] + +# metric suite: shared load & store +shared_access = [ + shared_transactions_load, + shared_transactions_store, + shared_bank_conflicts_store, + shared_bank_conflicts_load, +] + +# metric suite: atomic access +atomic_access = [ + global_op_atom, + global_op_reduction, +] + +# metric suite: cache hit rate +cache_hit_rate = [ + l1_hit_rate, + l2_hit_rate, +] + +# metric suite: device throughput +device_utilization = [ + sm_throughput, + dram_throughput, + shared_utilization, + l1tex_throughput, + l2_throughput, +] + +# Predefined metrics suites +predefined_cupti_metrics = { + "global_access": global_access, + "shared_access": shared_access, + "atomic_access": atomic_access, + "cache_hit_rate": cache_hit_rate, + "device_utilization": device_utilization, +} + + +def get_predefined_cupti_metrics(name=""): + """Returns the specified cupti metric. + + Accepted arguments are 'global_access', 'shared_access', 'atomic_access', + 'cache_hit_rate', 'device_utilization'. + + Args: + name (str): cupti metri name. + """ + if name not in predefined_cupti_metrics: + _ti_core.warn("Valid Taichi predefined metrics list (str):") + for key in predefined_cupti_metrics: + _ti_core.warn(f" '{key}'") + return None + return predefined_cupti_metrics[name] + + +# Default metrics list +default_cupti_metrics = [dram_bytes_sum] +"""The metrics list, each is an instance of the :class:`~taichi.profiler.CuptiMetric`. +Default to `dram_bytes_sum`. +""" + +__all__ = ["CuptiMetric", "get_predefined_cupti_metrics"] diff --git a/local_packages/taichi/profiler/kernel_profiler.py b/local_packages/taichi/profiler/kernel_profiler.py new file mode 100644 index 0000000..3464d37 --- /dev/null +++ b/local_packages/taichi/profiler/kernel_profiler.py @@ -0,0 +1,590 @@ +from contextlib import contextmanager + +from taichi._lib import core as _ti_core +from taichi.lang import impl +from taichi.profiler.kernel_metrics import default_cupti_metrics + + +class StatisticalResult: + """Statistical result of records. + + Profiling records with the same kernel name will be counted in a ``StatisticalResult`` instance via function ``insert_record(time)``. + Currently, only the kernel elapsed time is counted, other statistics related to the kernel will be added in the feature. + """ + + def __init__(self, name): + self.name = name + self.counter = 0 + self.min_time = 0.0 + self.max_time = 0.0 + self.total_time = 0.0 + + def __lt__(self, other): + # For sorted() + return self.total_time < other.total_time + + def insert_record(self, time): + """Insert records with the same kernel name. + + Currently, only the kernel elapsed time is counted. + """ + if self.counter == 0: + self.min_time = time + self.max_time = time + self.counter += 1 + self.total_time += time + self.min_time = min(self.min_time, time) + self.max_time = max(self.max_time, time) + + +class KernelProfiler: + """Kernel profiler of Taichi. + + Kernel profiler acquires kernel profiling records from backend, counts records in Python scope, + and prints the results to the console by :func:`~taichi.profiler.kernel_profiler.KernelProfiler.print_info`. + + ``KernelProfiler`` now support detailed low-level performance metrics (such as memory bandwidth consumption) in its advanced mode. + This mode is only available for the CUDA backend with CUPTI toolkit, i.e. you need ``ti.init(kernel_profiler=True, arch=ti.cuda)``. + + Note: + For details about using CUPTI in Taichi, please visit https://docs.taichi-lang.org/docs/profiler#advanced-mode. + """ + + def __init__(self): + self._profiling_mode = False + self._profiling_toolkit = "default" + self._metric_list = [default_cupti_metrics] + self._total_time_ms = 0.0 + self._traced_records = [] + self._statistical_results = {} + + # public methods + + def set_kernel_profiler_mode(self, mode=False): + """Turn on or off :class:`~taichi.profiler.kernel_profiler.KernelProfiler`.""" + if type(mode) is bool: + self._profiling_mode = mode + else: + raise TypeError(f"Arg `mode` must be of type boolean. Type {type(mode)} is not supported.") + + def get_kernel_profiler_mode(self): + """Get status of :class:`~taichi.profiler.kernel_profiler.KernelProfiler`.""" + return self._profiling_mode + + def set_toolkit(self, toolkit_name="default"): + if self._check_not_turned_on_with_warning_message(): + return False + status = impl.get_runtime().prog.set_kernel_profiler_toolkit(toolkit_name) + if status is True: + self._profiling_toolkit = toolkit_name + else: + _ti_core.warn( + f"Failed to set kernel profiler toolkit ({toolkit_name}) , keep using ({self._profiling_toolkit})." + ) + return status + + def get_total_time(self): + """Get elapsed time of all kernels recorded in KernelProfiler. + + Returns: + time (float): total time in second. + """ + if self._check_not_turned_on_with_warning_message(): + return 0.0 + self._update_records() # kernel records + self._count_statistics() # _total_time_ms is counted here + return self._total_time_ms / 1000 # ms to s + + def clear_info(self): + """Clear all records both in front-end :class:`~taichi.profiler.kernel_profiler.KernelProfiler` and back-end instance ``KernelProfilerBase``. + + Note: + The values of ``self._profiling_mode`` and ``self._metric_list`` will not be cleared. + """ + if self._check_not_turned_on_with_warning_message(): + return None + # sync first + impl.get_runtime().prog.sync_kernel_profiler() + # then clear backend & frontend info + impl.get_runtime().prog.clear_kernel_profiler() + self._clear_frontend() + + return None + + def query_info(self, name): + """For docstring of this function, see :func:`~taichi.profiler.query_kernel_profiler_info`.""" + if self._check_not_turned_on_with_warning_message(): + return None + self._update_records() # kernel records + self._count_statistics() # statistics results + # TODO : query self.StatisticalResult in python scope + return impl.get_runtime().prog.query_kernel_profile_info(name) + + def set_metrics(self, metric_list=default_cupti_metrics): + """For docstring of this function, see :func:`~taichi.profiler.set_kernel_profiler_metrics`.""" + if self._check_not_turned_on_with_warning_message(): + return None + self._metric_list = metric_list + metric_name_list = [metric.name for metric in metric_list] + self.clear_info() + impl.get_runtime().prog.reinit_kernel_profiler_with_metrics(metric_name_list) + + return None + + @contextmanager + def collect_metrics_in_context(self, metric_list=default_cupti_metrics): + """This function is not exposed to user now. + + For usage of this function, see :func:`~taichi.profiler.collect_kernel_profiler_metrics`. + """ + if self._check_not_turned_on_with_warning_message(): + return None + self.set_metrics(metric_list) + yield self + self.set_metrics() # back to default metric list + + return None + + # mode of print_info + COUNT = "count" # print the statistical results (min,max,avg time) of Taichi kernels. + TRACE = "trace" # print the records of launched Taichi kernels with specific profiling metrics (time, memory load/store and core utilization etc.) + + def print_info(self, mode=COUNT): + """Print the profiling results of Taichi kernels. + + For usage of this function, see :func:`~taichi.profiler.print_kernel_profiler_info`. + + Args: + mode (str): the way to print profiling results. + """ + if self._check_not_turned_on_with_warning_message(): + return None + self._update_records() # kernel records + self._count_statistics() # statistics results + + # COUNT mode (default) : print statistics of all kernel + if mode == self.COUNT: + self._print_statistics_info() + # TRACE mode : print records of launched kernel + elif mode == self.TRACE: + self._print_kernel_info() + else: + raise ValueError("Arg `mode` must be of type 'str', and has the value 'count' or 'trace'.") + + return None + + # private methods + def _check_not_turned_on_with_warning_message(self): + if self._profiling_mode is False: + _ti_core.warn("use 'ti.init(kernel_profiler = True)' to turn on KernelProfiler.") + return True + return False + + def _clear_frontend(self): + """Clear member variables in :class:`~taichi.profiler.kernel_profiler.KernelProfiler`. + + Note: + The values of ``self._profiling_mode`` and ``self._metric_list`` will not be cleared. + """ + self._total_time_ms = 0.0 + self._traced_records.clear() + self._statistical_results.clear() + + def _update_records(self): + """Acquires kernel records from a backend.""" + impl.get_runtime().prog.sync_kernel_profiler() + impl.get_runtime().prog.update_kernel_profiler() + self._clear_frontend() + self._traced_records = impl.get_runtime().prog.get_kernel_profiler_records() + + def _count_statistics(self): + """Counts the statistics of launched kernels during the profiling period. + + The profiling records with the same kernel name are counted as a profiling result. + """ + for record in self._traced_records: + if self._statistical_results.get(record.name) is None: + self._statistical_results[record.name] = StatisticalResult(record.name) + self._statistical_results[record.name].insert_record(record.kernel_time) + self._total_time_ms += record.kernel_time + self._statistical_results = { + k: v + for k, v in sorted( + self._statistical_results.items(), + key=lambda item: item[1], + reverse=True, + ) + } + + def _make_table_header(self, mode): + header_str = f"Kernel Profiler({mode}, {self._profiling_toolkit})" + arch_name = f" @ {_ti_core.arch_name(impl.current_cfg().arch).upper()}" + device_name = impl.get_runtime().prog.get_kernel_profiler_device_name() + if len(device_name) > 1: # default device_name = ' ' + device_name = " on " + device_name + return header_str + arch_name + device_name + + def _print_statistics_info(self): + """Print statistics of launched kernels during the profiling period.""" + + # headers + table_header = table_header = self._make_table_header("count") + column_header = "[ % total count | min avg max ] Kernel name" + # partition line + line_length = max(len(column_header), len(table_header)) + outer_partition_line = "=" * line_length + inner_partition_line = "-" * line_length + + # message in one line + string_list = [] + values_list = [] + for key in self._statistical_results: + result = self._statistical_results[key] + fraction = result.total_time / self._total_time_ms * 100.0 + string_list.append("[{:6.2f}% {:7.3f} s {:6d}x |{:9.3f} {:9.3f} {:9.3f} ms] {}") + values_list.append( + [ + fraction, + result.total_time / 1000.0, + result.counter, + result.min_time, + result.total_time / result.counter, # avg_time + result.max_time, + result.name, + ] + ) + + # summary + summary_line = "[100.00%] Total execution time: " + summary_line += f"{self._total_time_ms/1000:7.3f} s " + summary_line += f"number of results: {len(self._statistical_results)}" + + # print + print(outer_partition_line) + print(table_header) + print(outer_partition_line) + print(column_header) + print(inner_partition_line) + result_num = len(self._statistical_results) + for idx in range(result_num): + print(string_list[idx].format(*values_list[idx])) + print(inner_partition_line) + print(summary_line) + print(outer_partition_line) + + def _print_kernel_info(self): + """Print a list of launched kernels during the profiling period.""" + metric_list = self._metric_list + values_num = len(self._traced_records[0].metric_values) + + # We currently get kernel attributes through CUDA Driver API, + # there is no corresponding implementation in other backends yet. + # Profiler dose not print invalid kernel attributes info for now. + kernel_attribute_state = self._traced_records[0].register_per_thread > 0 + + # headers + table_header = self._make_table_header("trace") + column_header = "[ start.time | kernel.time |" # default + if kernel_attribute_state: + column_header += " regs | shared mem | grid size | block size | occupancy |" # kernel_attributes + for idx in range(values_num): + column_header += metric_list[idx].header + "|" + column_header = (column_header + "] Kernel name").replace("|]", "]") + + # partition line + line_length = max(len(column_header), len(table_header)) + outer_partition_line = "=" * line_length + inner_partition_line = "-" * line_length + + # message in one line: formatted_str.format(*values) + fake_timestamp = 0.0 + string_list = [] + values_list = [] + for record in self._traced_records: + formatted_str = "[{:9.3f} ms |{:9.3f} ms |" # default + values = [fake_timestamp, record.kernel_time] # default + if kernel_attribute_state: + formatted_str += " {:4d} | {:6d} bytes | {:6d} | {:6d} | {:2d} blocks |" + values += [ + record.register_per_thread, + record.shared_mem_per_block, + record.grid_size, + record.block_size, + record.active_blocks_per_multiprocessor, + ] + for idx in range(values_num): + formatted_str += metric_list[idx].val_format + "|" + values += [record.metric_values[idx] * metric_list[idx].scale] + formatted_str = formatted_str + "] " + record.name + string_list.append(formatted_str.replace("|]", "]")) + values_list.append(values) + fake_timestamp += record.kernel_time + + # print + print(outer_partition_line) + print(table_header) + print(outer_partition_line) + print(column_header) + print(inner_partition_line) + record_num = len(self._traced_records) + for idx in range(record_num): + print(string_list[idx].format(*values_list[idx])) + print(inner_partition_line) + print(f"Number of records: {len(self._traced_records)}") + print(outer_partition_line) + + +_ti_kernel_profiler = KernelProfiler() + + +def get_default_kernel_profiler(): + """We have only one :class:`~taichi.profiler.kernelprofiler.KernelProfiler` instance(i.e. ``_ti_kernel_profiler``) now. + + For ``KernelProfiler`` using ``CuptiToolkit``, GPU devices can only work in a certain configuration. + Profiling mode and metrics are configured by the host(CPU) via CUPTI APIs, and device(GPU) will use + its counter registers to collect specific metrics. + So if there are multiple instances of ``KernelProfiler``, the device will work in the latest configuration, + the profiling configuration of other instances will be changed as a result. + For data retention purposes, multiple instances will be considered in the future. + """ + return _ti_kernel_profiler + + +def print_kernel_profiler_info(mode="count"): + """Print the profiling results of Taichi kernels. + + To enable this profiler, set ``kernel_profiler=True`` in ``ti.init()``. + ``'count'`` mode: print the statistics (min,max,avg time) of launched kernels, + ``'trace'`` mode: print the records of launched kernels with specific profiling metrics (time, memory load/store and core utilization etc.), + and defaults to ``'count'``. + + Args: + mode (str): the way to print profiling results. + + Example:: + + >>> import taichi as ti + + >>> ti.init(ti.cpu, kernel_profiler=True) + >>> var = ti.field(ti.f32, shape=1) + + >>> @ti.kernel + >>> def compute(): + >>> var[0] = 1.0 + + >>> compute() + >>> ti.profiler.print_kernel_profiler_info() + >>> # equivalent calls : + >>> # ti.profiler.print_kernel_profiler_info('count') + + >>> ti.profiler.print_kernel_profiler_info('trace') + + Note: + Currently the result of `KernelProfiler` could be incorrect on OpenGL + backend due to its lack of support for `ti.sync()`. + + For advanced mode of `KernelProfiler`, please visit https://docs.taichi-lang.org/docs/profiler#advanced-mode. + """ + get_default_kernel_profiler().print_info(mode) + + +def query_kernel_profiler_info(name): + """Query kernel elapsed time(min,avg,max) on devices using the kernel name. + + To enable this profiler, set `kernel_profiler=True` in `ti.init`. + + Args: + name (str): kernel name. + + Returns: + KernelProfilerQueryResult (class): with member variables(counter, min, max, avg) + + Example:: + + >>> import taichi as ti + + >>> ti.init(ti.cpu, kernel_profiler=True) + >>> n = 1024*1024 + >>> var = ti.field(ti.f32, shape=n) + + >>> @ti.kernel + >>> def fill(): + >>> for i in range(n): + >>> var[i] = 0.1 + + >>> fill() + >>> ti.profiler.clear_kernel_profiler_info() #[1] + >>> for i in range(100): + >>> fill() + >>> query_result = ti.profiler.query_kernel_profiler_info(fill.__name__) #[2] + >>> print("kernel executed times =",query_result.counter) + >>> print("kernel elapsed time(min_in_ms) =",query_result.min) + >>> print("kernel elapsed time(max_in_ms) =",query_result.max) + >>> print("kernel elapsed time(avg_in_ms) =",query_result.avg) + + Note: + [1] To get the correct result, query_kernel_profiler_info() must be used in conjunction with + clear_kernel_profiler_info(). + + [2] Currently the result of `KernelProfiler` could be incorrect on OpenGL + backend due to its lack of support for `ti.sync()`. + """ + return get_default_kernel_profiler().query_info(name) + + +def clear_kernel_profiler_info(): + """Clear all KernelProfiler records.""" + get_default_kernel_profiler().clear_info() + + +def get_kernel_profiler_total_time(): + """Get elapsed time of all kernels recorded in KernelProfiler. + + Returns: + time (float): total time in second. + """ + return get_default_kernel_profiler().get_total_time() + + +def set_kernel_profiler_toolkit(toolkit_name="default"): + """Set the toolkit used by KernelProfiler. + + Currently, we only support toolkits: ``'default'`` and ``'cupti'``. + + Args: + toolkit_name (str): string of toolkit name. + + Returns: + status (bool): whether the setting is successful or not. + + Example:: + + >>> import taichi as ti + + >>> ti.init(arch=ti.cuda, kernel_profiler=True) + >>> x = ti.field(ti.f32, shape=1024*1024) + + >>> @ti.kernel + >>> def fill(): + >>> for i in x: + >>> x[i] = i + + >>> ti.profiler.set_kernel_profiler_toolkit('cupti') + >>> for i in range(100): + >>> fill() + >>> ti.profiler.print_kernel_profiler_info() + + >>> ti.profiler.set_kernel_profiler_toolkit('default') + >>> for i in range(100): + >>> fill() + >>> ti.profiler.print_kernel_profiler_info() + """ + return get_default_kernel_profiler().set_toolkit(toolkit_name) + + +def set_kernel_profiler_metrics(metric_list=default_cupti_metrics): + """Set metrics that will be collected by the CUPTI toolkit. + + Args: + metric_list (list): a list of :class:`~taichi.profiler.CuptiMetric()` instances, default value: :data:`~taichi.profiler.kernel_metrics.default_cupti_metrics`. + + Example:: + + >>> import taichi as ti + + >>> ti.init(kernel_profiler=True, arch=ti.cuda) + >>> ti.profiler.set_kernel_profiler_toolkit('cupti') + >>> num_elements = 128*1024*1024 + + >>> x = ti.field(ti.f32, shape=num_elements) + >>> y = ti.field(ti.f32, shape=()) + >>> y[None] = 0 + + >>> @ti.kernel + >>> def reduction(): + >>> for i in x: + >>> y[None] += x[i] + + >>> # In the case of not parameter, Taichi will print its pre-defined metrics list + >>> ti.profiler.get_predefined_cupti_metrics() + >>> # get Taichi pre-defined metrics + >>> profiling_metrics = ti.profiler.get_predefined_cupti_metrics('shared_access') + + >>> global_op_atom = ti.profiler.CuptiMetric( + >>> name='l1tex__t_set_accesses_pipe_lsu_mem_global_op_atom.sum', + >>> header=' global.atom ', + >>> format=' {:8.0f} ') + >>> # add user defined metrics + >>> profiling_metrics += [global_op_atom] + + >>> # metrics setting will be retained until the next configuration + >>> ti.profiler.set_kernel_profiler_metrics(profiling_metrics) + >>> for i in range(16): + >>> reduction() + >>> ti.profiler.print_kernel_profiler_info('trace') + + Note: + Metrics setting will be retained until the next configuration. + """ + get_default_kernel_profiler().set_metrics(metric_list) + + +@contextmanager +def collect_kernel_profiler_metrics(metric_list=default_cupti_metrics): + """Set temporary metrics that will be collected by the CUPTI toolkit within this context. + + Args: + metric_list (list): a list of :class:`~taichi.profiler.CuptiMetric()` instances, default value: :data:`~taichi.profiler.kernel_metrics.default_cupti_metrics`. + + Example:: + + >>> import taichi as ti + + >>> ti.init(kernel_profiler=True, arch=ti.cuda) + >>> ti.profiler.set_kernel_profiler_toolkit('cupti') + >>> num_elements = 128*1024*1024 + + >>> x = ti.field(ti.f32, shape=num_elements) + >>> y = ti.field(ti.f32, shape=()) + >>> y[None] = 0 + + >>> @ti.kernel + >>> def reduction(): + >>> for i in x: + >>> y[None] += x[i] + + >>> # In the case of not parameter, Taichi will print its pre-defined metrics list + >>> ti.profiler.get_predefined_cupti_metrics() + >>> # get Taichi pre-defined metrics + >>> profiling_metrics = ti.profiler.get_predefined_cupti_metrics('device_utilization') + + >>> global_op_atom = ti.profiler.CuptiMetric( + >>> name='l1tex__t_set_accesses_pipe_lsu_mem_global_op_atom.sum', + >>> header=' global.atom ', + >>> format=' {:8.0f} ') + >>> # add user defined metrics + >>> profiling_metrics += [global_op_atom] + + >>> # metrics setting is temporary, and will be clear when exit from this context. + >>> with ti.profiler.collect_kernel_profiler_metrics(profiling_metrics): + >>> for i in range(16): + >>> reduction() + >>> ti.profiler.print_kernel_profiler_info('trace') + + Note: + The configuration of the ``metric_list`` will be clear when exit from this context. + """ + get_default_kernel_profiler().set_metrics(metric_list) + yield get_default_kernel_profiler() + get_default_kernel_profiler().set_metrics() + + +__all__ = [ + "clear_kernel_profiler_info", + "collect_kernel_profiler_metrics", + "get_kernel_profiler_total_time", + "print_kernel_profiler_info", + "query_kernel_profiler_info", + "set_kernel_profiler_metrics", + "set_kernel_profiler_toolkit", +] diff --git a/local_packages/taichi/profiler/memory_profiler.py b/local_packages/taichi/profiler/memory_profiler.py new file mode 100644 index 0000000..5a1d6f0 --- /dev/null +++ b/local_packages/taichi/profiler/memory_profiler.py @@ -0,0 +1,13 @@ +from taichi.lang.impl import get_runtime + + +def print_memory_profiler_info(): + """Memory profiling tool for LLVM backends with full sparse support. + + This profiler is automatically on. + """ + get_runtime().materialize() + get_runtime().prog.print_memory_profiler_info() + + +__all__ = ["print_memory_profiler_info"] diff --git a/local_packages/taichi/profiler/scoped_profiler.py b/local_packages/taichi/profiler/scoped_profiler.py new file mode 100644 index 0000000..0948248 --- /dev/null +++ b/local_packages/taichi/profiler/scoped_profiler.py @@ -0,0 +1,34 @@ +from taichi._lib import core as _ti_core + + +def print_scoped_profiler_info(): + """Print time elapsed on the host tasks in a hierarchical format. + + This profiler is automatically on. + + Call function imports from C++ : _ti_core.print_profile_info() + + Example:: + + >>> import taichi as ti + >>> ti.init(arch=ti.cpu) + >>> var = ti.field(ti.f32, shape=1) + >>> @ti.kernel + >>> def compute(): + >>> var[0] = 1.0 + >>> print("Setting var[0] =", var[0]) + >>> compute() + >>> ti.profiler.print_scoped_profiler_info() + """ + _ti_core.print_profile_info() + + +def clear_scoped_profiler_info(): + """Clear profiler's records about time elapsed on the host tasks. + + Call function imports from C++ : _ti_core.clear_profile_info() + """ + _ti_core.clear_profile_info() + + +__all__ = ["print_scoped_profiler_info", "clear_scoped_profiler_info"] diff --git a/local_packages/taichi/shaders/Circles_vk.frag b/local_packages/taichi/shaders/Circles_vk.frag new file mode 100644 index 0000000..9b03a98 --- /dev/null +++ b/local_packages/taichi/shaders/Circles_vk.frag @@ -0,0 +1,29 @@ +#version 450 + +layout(binding = 0) uniform UBO { + vec3 color; + int use_per_vertex_color; + int use_per_vertex_radius; + float radius; + float window_width; + float window_height; +} +ubo; + +layout(location = 1) in vec3 selected_color; +layout(location = 2) in vec2 pos_2d; + +layout(location = 0) out vec4 out_color; + +void main() { + float dist_sq = dot(pos_2d, pos_2d); + float alpha = 1.0 - step(1.0, dist_sq); + + /* + if (length(pos_2d) >= 1.0) { + discard; + } + */ + + out_color = vec4(selected_color, alpha); +} diff --git a/local_packages/taichi/shaders/Circles_vk.vert b/local_packages/taichi/shaders/Circles_vk.vert new file mode 100644 index 0000000..04fe2fa --- /dev/null +++ b/local_packages/taichi/shaders/Circles_vk.vert @@ -0,0 +1,45 @@ +#version 450 + +layout(location = 0) in vec3 in_position; +layout(location = 1) in vec3 in_normal; +layout(location = 2) in vec2 in_texcoord; +layout(location = 3) in vec4 in_color; + +layout(binding = 0) uniform UBO { + vec3 color; + int use_per_vertex_color; + int use_per_vertex_radius; + float radius; + float window_width; + float window_height; +} +ubo; + +layout(location = 1) out vec3 selected_color; +layout(location = 2) out vec2 pos_2d; + +const vec2 offsets[6] = { + vec2(-1.0f, 1.0f), + vec2(1.0f, -1.0f), + vec2(-1.0f, -1.0f), + vec2(-1.0f, 1.0f), + vec2(1.0f, 1.0f), + vec2(1.0f, -1.0f), +}; + +void main() { + float x = in_position.x * 2.0 - 1.0; + float y = -(in_position.y * 2.0 - 1.0); + + pos_2d = offsets[gl_VertexIndex % 6]; + float radius_1d = ubo.use_per_vertex_radius == 0 ? ubo.radius : in_normal.x; + + gl_Position = vec4(x, y, 0.0, 1.0); + gl_Position.xy += pos_2d * vec2(radius_1d / ubo.window_width * ubo.window_height, radius_1d) * 2.0; + + if (ubo.use_per_vertex_color == 0) { + selected_color = ubo.color; + } else { + selected_color = in_color.rgb; + } +} diff --git a/local_packages/taichi/shaders/Circles_vk_frag.spv b/local_packages/taichi/shaders/Circles_vk_frag.spv new file mode 100644 index 0000000..d62b7e6 Binary files /dev/null and b/local_packages/taichi/shaders/Circles_vk_frag.spv differ diff --git a/local_packages/taichi/shaders/Circles_vk_vert.spv b/local_packages/taichi/shaders/Circles_vk_vert.spv new file mode 100644 index 0000000..21dbc53 Binary files /dev/null and b/local_packages/taichi/shaders/Circles_vk_vert.spv differ diff --git a/local_packages/taichi/shaders/Lines_vk.frag b/local_packages/taichi/shaders/Lines_vk.frag new file mode 100644 index 0000000..c221ed6 --- /dev/null +++ b/local_packages/taichi/shaders/Lines_vk.frag @@ -0,0 +1,9 @@ +#version 450 + +layout(location = 0) in vec3 color; + +layout(location = 0) out vec4 out_color; + +void main() { + out_color = vec4(color, 1); +} diff --git a/local_packages/taichi/shaders/Lines_vk.vert b/local_packages/taichi/shaders/Lines_vk.vert new file mode 100644 index 0000000..a2f7098 --- /dev/null +++ b/local_packages/taichi/shaders/Lines_vk.vert @@ -0,0 +1,11 @@ +#version 450 + +layout(location = 0) in vec2 in_position; +layout(location = 1) in uint in_color_encoded; + +layout(location = 0) out vec3 frag_color; + +void main() { + gl_Position = vec4(in_position.x * 2.0 - 1.0, -(in_position.y * 2.0 - 1.0), 0.0, 1.0); + frag_color = unpackUnorm4x8(in_color_encoded).rgb; +} diff --git a/local_packages/taichi/shaders/Lines_vk_frag.spv b/local_packages/taichi/shaders/Lines_vk_frag.spv new file mode 100644 index 0000000..c886d71 Binary files /dev/null and b/local_packages/taichi/shaders/Lines_vk_frag.spv differ diff --git a/local_packages/taichi/shaders/Lines_vk_vert.spv b/local_packages/taichi/shaders/Lines_vk_vert.spv new file mode 100644 index 0000000..1223137 Binary files /dev/null and b/local_packages/taichi/shaders/Lines_vk_vert.spv differ diff --git a/local_packages/taichi/shaders/Mesh_vk.frag b/local_packages/taichi/shaders/Mesh_vk.frag new file mode 100644 index 0000000..dadfa11 --- /dev/null +++ b/local_packages/taichi/shaders/Mesh_vk.frag @@ -0,0 +1,71 @@ +#version 450 + +layout(location = 0) in vec3 frag_pos; +layout(location = 1) in vec3 frag_normal; +layout(location = 2) in vec2 frag_texcoord; + +layout(location = 0) out vec4 out_color; + +struct SceneUBO { + vec3 camera_pos; + mat4 view; + mat4 projection; + vec3 ambient_light; + int point_light_count; +}; + +layout(binding = 0) uniform UBORenderable { + vec3 color; + int use_per_vertex_color; + int two_sided; + float has_attribute; +} +ubo_renderable; + +layout(binding = 1) uniform UBOScene { + SceneUBO scene; + float window_width; + float window_height; + float tan_half_fov; + float aspect_ratio; +} +ubo_scene; + +struct PointLight { + vec3 pos; + vec3 color; +}; + +layout(binding = 2, std430) buffer SSBO { + PointLight point_lights[]; +} +ssbo; + +layout(location = 3) in vec4 selected_color; + +vec3 lambertian() { + vec3 ambient = ubo_scene.scene.ambient_light * selected_color.rgb; + vec3 result = ambient; + + for (int i = 0; i < ubo_scene.scene.point_light_count; ++i) { + vec3 light_color = ssbo.point_lights[i].color; + + vec3 light_dir = normalize(ssbo.point_lights[i].pos - frag_pos); + vec3 normal = normalize(frag_normal); + float factor = 0.0; + if(ubo_renderable.two_sided != 0){ + factor = abs(dot(light_dir, normal)); + } + else{ + factor = max(dot(light_dir, normal), 0); + } + vec3 diffuse = factor * selected_color.rgb * light_color; + result += diffuse; + } + + return result; +} + +void main() { + out_color = vec4(lambertian(), selected_color.a); +} diff --git a/local_packages/taichi/shaders/Mesh_vk.vert b/local_packages/taichi/shaders/Mesh_vk.vert new file mode 100644 index 0000000..1885b8a --- /dev/null +++ b/local_packages/taichi/shaders/Mesh_vk.vert @@ -0,0 +1,68 @@ +#version 450 + +layout(location = 0) in vec3 in_position; +layout(location = 1) in vec3 in_normal; +layout(location = 2) in vec2 in_texcoord; +layout(location = 3) in vec4 in_color; + +layout(location = 0) out vec3 frag_pos; +layout(location = 1) out vec3 frag_normal; +layout(location = 2) out vec2 frag_texcoord; +layout(location = 3) out vec4 selected_color; + +struct SceneUBO { + vec3 camera_pos; + mat4 view; + mat4 projection; + vec3 ambient_light; + int point_light_count; +}; + +struct PointLight { + vec3 pos; + vec3 color; +}; + +struct MeshAttribute { + mat4 model; +}; + +layout(binding = 0) uniform UBORenderable { + vec3 color; + int use_per_vertex_color; + int two_sided; + float has_attribute; +} +ubo_renderable; + +layout(binding = 1) uniform UBOScene { + SceneUBO scene; + float window_width; + float window_height; + float tan_half_fov; + float aspect_ratio; +} +ubo_scene; + +layout(binding = 3, std430) buffer MeshAttributeBuffer { + MeshAttribute mesh_attr[]; +} +mesh_attr_buffer; + +void main() { + mat4 model_tmp = transpose(mesh_attr_buffer.mesh_attr[gl_InstanceIndex].model); + // if mesh attributes not given, then use Identity as model matrix + mat4 model = mat4(1.0) * (1.0 - ubo_renderable.has_attribute) + model_tmp * ubo_renderable.has_attribute; + + gl_Position = ubo_scene.scene.projection * ubo_scene.scene.view * model * vec4(in_position, 1.0); + gl_Position.y *= -1.0; + frag_texcoord = in_texcoord; + frag_pos = in_position; + frag_normal = in_normal; + + if (ubo_renderable.use_per_vertex_color == 0) { + selected_color = vec4(ubo_renderable.color, 1.0); + } else { + selected_color = in_color; + } +} diff --git a/local_packages/taichi/shaders/Mesh_vk_frag.spv b/local_packages/taichi/shaders/Mesh_vk_frag.spv new file mode 100644 index 0000000..b6d8cfc Binary files /dev/null and b/local_packages/taichi/shaders/Mesh_vk_frag.spv differ diff --git a/local_packages/taichi/shaders/Mesh_vk_vert.spv b/local_packages/taichi/shaders/Mesh_vk_vert.spv new file mode 100644 index 0000000..b83db25 Binary files /dev/null and b/local_packages/taichi/shaders/Mesh_vk_vert.spv differ diff --git a/local_packages/taichi/shaders/SceneLines2quad_vk_comp.spv b/local_packages/taichi/shaders/SceneLines2quad_vk_comp.spv new file mode 100644 index 0000000..ec9dcba Binary files /dev/null and b/local_packages/taichi/shaders/SceneLines2quad_vk_comp.spv differ diff --git a/local_packages/taichi/shaders/SceneLines_vk.frag b/local_packages/taichi/shaders/SceneLines_vk.frag new file mode 100644 index 0000000..c221ed6 --- /dev/null +++ b/local_packages/taichi/shaders/SceneLines_vk.frag @@ -0,0 +1,9 @@ +#version 450 + +layout(location = 0) in vec3 color; + +layout(location = 0) out vec4 out_color; + +void main() { + out_color = vec4(color, 1); +} diff --git a/local_packages/taichi/shaders/SceneLines_vk.vert b/local_packages/taichi/shaders/SceneLines_vk.vert new file mode 100644 index 0000000..d1a868e --- /dev/null +++ b/local_packages/taichi/shaders/SceneLines_vk.vert @@ -0,0 +1,12 @@ +#version 450 + +layout(location = 0) in vec4 in_position; +layout(location = 1) in vec4 in_color; + +layout(location = 0) out vec3 frag_color; + +void main() { + gl_Position = in_position; + gl_Position.y = -gl_Position.y; + frag_color = in_color.rgb; +} diff --git a/local_packages/taichi/shaders/SceneLines_vk_frag.spv b/local_packages/taichi/shaders/SceneLines_vk_frag.spv new file mode 100644 index 0000000..c886d71 Binary files /dev/null and b/local_packages/taichi/shaders/SceneLines_vk_frag.spv differ diff --git a/local_packages/taichi/shaders/SceneLines_vk_vert.spv b/local_packages/taichi/shaders/SceneLines_vk_vert.spv new file mode 100644 index 0000000..e508cb1 Binary files /dev/null and b/local_packages/taichi/shaders/SceneLines_vk_vert.spv differ diff --git a/local_packages/taichi/shaders/SetImage_vk.frag b/local_packages/taichi/shaders/SetImage_vk.frag new file mode 100644 index 0000000..eb1e635 --- /dev/null +++ b/local_packages/taichi/shaders/SetImage_vk.frag @@ -0,0 +1,21 @@ +#version 450 + +layout(binding = 0) uniform sampler2D texSampler; + +layout(location = 0) in vec2 frag_texcoord; + +layout(location = 0) out vec4 out_color; + +layout(binding = 1) uniform UBO { + vec2 lower_bound; + vec2 upper_bound; + float x_factor; + float y_factor; + int is_transposed; +} ubo; + +void main() { + vec2 coord = frag_texcoord * vec2(ubo.x_factor,ubo.y_factor); + coord = clamp(coord, ubo.lower_bound, ubo.upper_bound); + out_color = textureLod(texSampler, ubo.is_transposed != 0 ? coord.yx : coord, 0); +} diff --git a/local_packages/taichi/shaders/SetImage_vk.vert b/local_packages/taichi/shaders/SetImage_vk.vert new file mode 100644 index 0000000..b8610c5 --- /dev/null +++ b/local_packages/taichi/shaders/SetImage_vk.vert @@ -0,0 +1,15 @@ +#version 450 + +// layout(binding = 0) uniform UniformBufferObject {} ubo; + +layout(location = 0) in vec3 in_position; +layout(location = 1) in vec3 in_normal; +layout(location = 2) in vec2 in_texcoord; +layout(location = 3) in vec4 in_color; + +layout(location = 0) out vec2 frag_texcoord; + +void main() { + gl_Position = vec4(in_position.xy, 0.0, 1.0); + frag_texcoord = in_texcoord; +} diff --git a/local_packages/taichi/shaders/SetImage_vk_frag.spv b/local_packages/taichi/shaders/SetImage_vk_frag.spv new file mode 100644 index 0000000..54da8e9 Binary files /dev/null and b/local_packages/taichi/shaders/SetImage_vk_frag.spv differ diff --git a/local_packages/taichi/shaders/SetImage_vk_vert.spv b/local_packages/taichi/shaders/SetImage_vk_vert.spv new file mode 100644 index 0000000..0c41bc9 Binary files /dev/null and b/local_packages/taichi/shaders/SetImage_vk_vert.spv differ diff --git a/local_packages/taichi/shaders/Triangles_vk.frag b/local_packages/taichi/shaders/Triangles_vk.frag new file mode 100644 index 0000000..e4850b6 --- /dev/null +++ b/local_packages/taichi/shaders/Triangles_vk.frag @@ -0,0 +1,16 @@ +#version 450 + +layout(location = 0) in vec2 frag_texcoord; +layout(location = 1) in vec3 selected_color; + +layout(location = 0) out vec4 out_color; + +layout(binding = 0) uniform UniformBufferObject { + vec3 color; + int use_per_vertex_color; +} +ubo; + +void main() { + out_color = vec4(selected_color, 1); +} diff --git a/local_packages/taichi/shaders/Triangles_vk.vert b/local_packages/taichi/shaders/Triangles_vk.vert new file mode 100644 index 0000000..914396b --- /dev/null +++ b/local_packages/taichi/shaders/Triangles_vk.vert @@ -0,0 +1,29 @@ +#version 450 + +layout(location = 0) in vec3 in_position; +layout(location = 1) in vec3 in_normal; +layout(location = 2) in vec2 in_texcoord; +layout(location = 3) in vec4 in_color; + +layout(location = 0) out vec2 frag_texcoord; +layout(location = 1) out vec3 selected_color; + +layout(binding = 0) uniform UniformBufferObject { + vec3 color; + int use_per_vertex_color; +} +ubo; + +void main() { + float x = in_position.x * 2.0 - 1.0; + float y = -(in_position.y * 2.0 - 1.0); + + gl_Position = vec4(x, y, 0.0, 1.0); + frag_texcoord = in_texcoord; + + if (ubo.use_per_vertex_color == 0) { + selected_color = ubo.color; + } else { + selected_color = in_color.rgb; + } +} diff --git a/local_packages/taichi/shaders/Triangles_vk_frag.spv b/local_packages/taichi/shaders/Triangles_vk_frag.spv new file mode 100644 index 0000000..4e0e396 Binary files /dev/null and b/local_packages/taichi/shaders/Triangles_vk_frag.spv differ diff --git a/local_packages/taichi/shaders/Triangles_vk_vert.spv b/local_packages/taichi/shaders/Triangles_vk_vert.spv new file mode 100644 index 0000000..179cd6d Binary files /dev/null and b/local_packages/taichi/shaders/Triangles_vk_vert.spv differ diff --git a/local_packages/taichi/shaders/lines2quad_vk_comp.spv b/local_packages/taichi/shaders/lines2quad_vk_comp.spv new file mode 100644 index 0000000..5b7aed0 Binary files /dev/null and b/local_packages/taichi/shaders/lines2quad_vk_comp.spv differ diff --git a/local_packages/taichi/sparse/__init__.py b/local_packages/taichi/sparse/__init__.py new file mode 100644 index 0000000..00b4096 --- /dev/null +++ b/local_packages/taichi/sparse/__init__.py @@ -0,0 +1 @@ +from ._sparse_grid import * diff --git a/local_packages/taichi/sparse/_sparse_grid.py b/local_packages/taichi/sparse/_sparse_grid.py new file mode 100644 index 0000000..e581990 --- /dev/null +++ b/local_packages/taichi/sparse/_sparse_grid.py @@ -0,0 +1,75 @@ +from taichi.lang.impl import grouped, root, static +from taichi.lang.kernel_impl import kernel +from taichi.lang.misc import ij, ijk +from taichi.lang.snode import is_active +from taichi.lang.struct import Struct +from taichi.types.annotations import template +from taichi.types.primitive_types import f32 + + +def grid(field_dict, shape): + """Creates a 2D/3D sparse grid with each element is a struct. The struct is placed on a bitmasked snode. + + Args: + field_dict (dict): a dict, each item is like `name: type`. + shape (Tuple[int]): shape of the field. + Returns: + x: the created sparse grid, which is a bitmasked `ti.Struct.field`. + + Examples:: + # create a 2D sparse grid + >>> grid = ti.sparse.grid({'pos': ti.math.vec2, 'mass': ti.f32, 'grid2particles': ti.types.vector(20, ti.i32)}, shape=(10, 10)) + + # access + >>> grid[0, 0].pos = ti.math.vec2(1.0, 2.0) + >>> grid[0, 0].mass = 1.0 + >>> grid[0, 0].grid2particles[2] = 123 + + # print the usage of the sparse grid, which is in [0,1] + >>> print(ti.sparse.usage(grid)) + # 0.009999999776482582 + """ + x = Struct.field(field_dict) + if len(shape) == 2: + snode = root.bitmasked(ij, shape) + snode.place(x) + elif len(shape) == 3: + snode = root.bitmasked(ijk, shape) + snode.place(x) + else: + raise Exception("Only 2D and 3D sparse grids are supported") + return x + + +@kernel +def usage(x: template()) -> f32: + """ + Get the usage of the sparse grid, which is in [0,1] + + Args: + x(struct field): the sparse grid to be checked. + Returns: + usage(f32): the usage of the sparse grid, which is in [0,1] + + Examples:: + >>> grid = ti.sparse.grid({'pos': ti.math.vec2, 'mass': ti.f32, 'grid2particles': ti.types.vector(20, ti.i32)}, shape=(10, 10)) + >>> grid[0, 0].mass = 1.0 + >>> print(ti.sparse.usage(grid)) + # 0.009999999776482582 + """ + cnt = 0 + for I in grouped(x.parent()): + if is_active(x.parent(), I): + cnt += 1 + total = 1.0 + if static(len(x.shape) == 2): + total = x.shape[0] * x.shape[1] + elif static(len(x.shape) == 3): + total = x.shape[0] * x.shape[1] * x.shape[2] + else: + raise ValueError("The dimension of the sparse grid should be 2 or 3") + res = cnt / total + return res + + +__all__ = ["grid", "usage"] diff --git a/local_packages/taichi/tools/__init__.py b/local_packages/taichi/tools/__init__.py new file mode 100644 index 0000000..a66c175 --- /dev/null +++ b/local_packages/taichi/tools/__init__.py @@ -0,0 +1,12 @@ +"""Taichi utility module. + +- `image` submodule for image io. +- `video` submodule for exporting results to video files. +- `diagnose` submodule for printing system environment information. +""" + +from taichi.tools.diagnose import * +from taichi.tools.image import * +from taichi.tools.np2ply import * +from taichi.tools.video import * +from taichi.tools.vtk import * diff --git a/local_packages/taichi/tools/diagnose.py b/local_packages/taichi/tools/diagnose.py new file mode 100644 index 0000000..db1b263 --- /dev/null +++ b/local_packages/taichi/tools/diagnose.py @@ -0,0 +1,122 @@ +import locale +import os +import platform +import subprocess +import sys + + +def main(): + print("Taichi system diagnose:") + print("") + executable = sys.executable + + print(f"python: {sys.version}") + print(f"system: {sys.platform}") + print(f"executable: {executable}") + print(f"platform: {platform.platform()}") + print(f'architecture: {" ".join(platform.architecture())}') + print(f"uname: {platform.uname()}") + + print(f'locale: {".".join(locale.getdefaultlocale())}') + print(f'PATH: {os.environ.get("PATH")}') + print(f"PYTHONPATH: {sys.path}") + print("") + + try: + lsb_release = subprocess.check_output(["lsb_release", "-a"]) + except Exception as e: + print(f"`lsb_release` not available: {e}") + else: + print(f"{lsb_release.decode()}") + + for k, v in os.environ.items(): + if k.startswith("TI_"): + print(f"{k}={v}") + print("") + + def try_print(tag, expr): + try: + cmd = f'import taichi as ti; print("===="); print({expr}, end="")' + ret = subprocess.check_output([executable, "-c", cmd]).decode() + ret = ret.split("====" + os.linesep, maxsplit=1)[1] + print(f"{tag}: {ret}") + except Exception as e: + print(f"{tag}: ERROR {e}") + + print("") + try_print("import", "ti") + print("") + for arch in ["cpu", "metal", "opengl", "cuda", "vulkan"]: + try_print(arch, f"ti.lang.misc.is_arch_supported(ti.{arch})") + print("") + + try: + glewinfo = subprocess.check_output(["glewinfo"]) + except Exception as e: + print(f"`glewinfo` not available: {e}") + else: + for line in glewinfo.decode().splitlines(): + if line.startswith("OpenGL version"): + print(line) + continue + + exts = [ + "GL_ARB_compute_shader", + "GL_ARB_gpu_shader_int64", + "GL_NV_shader_atomic_float", + "GL_NV_shader_atomic_float64", + "GL_NV_shader_atomic_int64", + ] + if line.split(":")[0] in exts: + print(line) + + print("") + try: + nvidia_smi = subprocess.check_output(["nvidia-smi"]) + except Exception as e: + print(f"`nvidia-smi` not available: {e}") + else: + print(f"{nvidia_smi.decode()}") + + try: + ti_header = subprocess.check_output([executable, "-c", "import taichi"]) + except Exception as e: + print(f"`import taichi` failed: {e}") + else: + print(f"{ti_header.decode()}") + + try: + ti_init_test = subprocess.check_output([executable, "-c", "import taichi as ti; ti.init()"]) + except Exception as e: + print(f"`ti.init()` failed: {e}") + else: + print(f"{ti_init_test.decode()}") + + try: + ti_opengl_test = subprocess.check_output([executable, "-c", "import taichi as ti; ti.init(arch=ti.opengl)"]) + except Exception as e: + print(f"Taichi OpenGL test failed: {e}") + else: + print(f"{ti_opengl_test.decode()}") + + try: + ti_cuda_test = subprocess.check_output([executable, "-c", "import taichi as ti; ti.init(arch=ti.cuda)"]) + except Exception as e: + print(f"Taichi CUDA test failed: {e}") + else: + print(f"{ti_cuda_test.decode()}") + + try: + ti_laplace = subprocess.check_output([executable, "-m", "taichi", "example", "minimal"]) + except Exception as e: + print(f"`python/taichi/examples/algorithm/laplace.py` failed: {e}") + else: + print(f"{ti_laplace.decode()}") + + print("Consider attaching this log when maintainers ask about system information.") + + +if __name__ == "__main__": + main() + +__all__ = [] diff --git a/local_packages/taichi/tools/image.py b/local_packages/taichi/tools/image.py new file mode 100644 index 0000000..0ba0adf --- /dev/null +++ b/local_packages/taichi/tools/image.py @@ -0,0 +1,127 @@ +import numpy as np +from taichi._lib import core as _ti_core + +import taichi as ti + +import ctypes + + +def cook_image_to_bytes(img): + """ + Takes a NumPy array or Taichi field of any type. + Returns a NumPy array of uint8. + This is used by ti.tools.imwrite. + """ + if not isinstance(img, np.ndarray): + img = img.to_numpy() + + if img.dtype in [np.uint16, np.uint32, np.uint64]: + img = (img // (np.iinfo(img.dtype).max // 256)).astype(np.uint8) + elif img.dtype in [np.float32, np.float64]: + img = (np.clip(img, 0, 1) * 255.0).astype(np.uint8) + elif img.dtype != np.uint8: + raise ValueError(f"Data type {img.dtype} not supported in ti.tools.imwrite") + + assert len(img.shape) in [2, 3], "Image must be either RGB/RGBA or greyscale" + + if len(img.shape) == 2: + img = img.reshape(*img.shape, 1) + + assert img.shape[2] in [1, 3, 4], "Image must be either RGB/RGBA or greyscale" + + return img.swapaxes(0, 1)[::-1, :] + + +def imresize(img, w, h=None): + """Resize an image to a specific size. + + Args: + img (Union[ti.field, np.ndarray]): A field of of array with shape `(width, height, ...)` + w (int): The output width after resize. + h (int, optional): The output height after resize, will be the same as width if not set. Default to `None`. + + Returns: + np.ndarray: An output image after resize input. + """ + if not isinstance(img, np.ndarray): + img = img.to_numpy() + if h is None: + h = w + if (w, h) == img.shape[:2]: + return img + assert isinstance(w, int) and isinstance(h, int) and w > 1 and h > 1 + u, v = (img.shape[0]) / (w), (img.shape[1]) / (h) + x = np.clip(np.arange(w) * u, 0, img.shape[0] - 1).astype(np.int32) + y = np.clip(np.arange(h) * v, 0, img.shape[1] - 1).astype(np.int32) + return img[tuple(np.meshgrid(x, y))].swapaxes(0, 1) + + +def imwrite(img, filename): + """Save a field to a a specific file. + + Args: + img (Union[ti.field, np.ndarray]): A field of shape `(height, width)` or `(height, width, 3)` or `(height, width, 4)`, \ + if dtype is float-type (`ti.f16`, `ti.f32`, `np.float32` etc), **the value of each pixel should be float between \[0.0, 1.0\]**. Otherwise `ti.tools.imwrite` will first clip them into \[0.0, 1.0\]\ + if dtype is int-type (`ti.u8`, `ti.u16`, `np.uint8` etc), , **the value of each pixel can be any valid integer in its own bounds**. These integers in this field will be scaled to \[0, 255\] by being divided over the upper bound of its basic type accordingly. + filename (str): The filename to save to. + """ + img = cook_image_to_bytes(img) + img = np.ascontiguousarray(img) + ptr = img.ctypes.data + resy, resx, comp = img.shape + _ti_core.imwrite(filename, ptr, resx, resy, comp) + + +def imread(filename, channels=0): + """Load image from a specific file. + + Args: + filename (str): An image filename to load from. + channels (int, optional): The channels hint of input image, Default to 0. + + Returns: + np.ndarray : An output image loaded from given filename. + """ + ptr, resx, resy, comp = _ti_core.imread(filename, channels) + img = np.copy(np.ctypeslib.as_array((ctypes.c_uint8 * resx * resy * comp).from_address(ptr))).reshape( + resy, resx, comp + ) + _ti_core.imfree(ptr) + return img.swapaxes(0, 1)[:, ::-1, :] + + +def imshow(img, title="imshow"): + """Display a taichi.field or a numpy.ndarray in a Taichi GUI window or an interactive Ipython notebook. + For an interactive Ipython environment, the image will be shown in the notebook. + + Args: + img (Union[ti.field, np.ndarray]): A field of of array with shape `(width, height)` or `(height, width, 3)` or `(height, width, 4)`. + title (str, optional): The title of GUI window. Default to `imshow`. + """ + try: # check if we are in Ipython environment + get_ipython() + except: + if not isinstance(img, np.ndarray): + img = img.to_numpy() + assert len(img.shape) in [ + 2, + 3, + ], "Image must be either RGB/RGBA or greyscale" + + with ti.GUI(title, res=img.shape[:2]) as gui: + img = gui.cook_image(img) + while gui.running: + if gui.get_event(ti.GUI.ESCAPE): + gui.running = False + + gui.set_image(img) + gui.show() + else: + import IPython.display # pylint: disable=C0415 + import PIL.Image # pylint: disable=C0415 + + img = cook_image_to_bytes(img) + IPython.display.display(PIL.Image.fromarray(img)) + + +__all__ = ["imread", "imresize", "imshow", "imwrite"] diff --git a/local_packages/taichi/tools/np2ply.py b/local_packages/taichi/tools/np2ply.py new file mode 100644 index 0000000..a124c52 --- /dev/null +++ b/local_packages/taichi/tools/np2ply.py @@ -0,0 +1,362 @@ +# convert numpy array to ply files +import sys + +import numpy as np + + +class PLYWriter: + """Writes `numpy.array` data to `ply` files. + + Args: + num_vertices (int): number of vertices. + num_faces (int, optional): number of faces. + face_type (str): `tri` or `quad`. + comment (str): comment message. + """ + + def __init__( + self, + num_vertices: int, + num_faces=0, + face_type="tri", + comment="created by PLYWriter", + ): + assert num_vertices > 0, "num_vertices should be greater than 0" + assert num_faces >= 0, "num_faces shouldn't be less than 0" + assert face_type == "tri" or face_type == "quad", "Only tri and quad faces are supported for now" + + self.ply_supported_types = [ + "char", + "uchar", + "short", + "ushort", + "int", + "uint", + "float", + "double", + ] + self.corresponding_numpy_types = [ + np.int8, + np.uint8, + np.int16, + np.uint16, + np.int32, + np.uint32, + np.float32, + np.float64, + ] + self.type_map = {} + for i, ply_type in enumerate(self.ply_supported_types): + self.type_map[ply_type] = self.corresponding_numpy_types[i] + + self.num_vertices = num_vertices + self.num_vertex_channels = 0 + self.vertex_channels = [] + self.vertex_data_type = [] + self.vertex_data = [] + self.num_faces = num_faces + self.num_face_channels = 0 + self.face_channels = [] + self.face_data_type = [] + self.face_data = [] + self.face_type = face_type + if face_type == "tri": + self.face_indices = -np.ones((self.num_faces, 3), dtype=np.int32) + elif face_type == "quad": + self.face_indices = -np.ones((self.num_faces, 4), dtype=np.int32) + self.comment = comment + + def add_vertex_channel(self, key: str, data_type: str, data: np.array): + if data_type not in self.ply_supported_types: + print("Unknown type " + data_type + " detected, skipping this channel") + return + if data.ndim == 1: + assert data.size == self.num_vertices, "The dimension of the vertex channel is not correct" + self.num_vertex_channels += 1 + if key in self.vertex_channels: + print("WARNING: duplicate key " + key + " detected") + self.vertex_channels.append(key) + self.vertex_data_type.append(data_type) + self.vertex_data.append(self.type_map[data_type](data)) + else: + num_col = data.size // self.num_vertices + assert ( + data.ndim == 2 and data.size == num_col * self.num_vertices + ), "The dimension of the vertex channel is not correct" + data.shape = (self.num_vertices, num_col) + self.num_vertex_channels += num_col + for i in range(num_col): + item_key = key + "_" + str(i + 1) + if item_key in self.vertex_channels: + print("WARNING: duplicate key " + item_key + " detected") + self.vertex_channels.append(item_key) + self.vertex_data_type.append(data_type) + self.vertex_data.append(self.type_map[data_type](data[:, i])) + + def add_vertex_pos(self, x: np.array, y: np.array, z: np.array): + """Set the (x, y, z) coordinates of the vertices. + + Args: + x (`numpy.array(float)`): x-coordinates of the vertices. + y (`numpy.array(float)`): y-coordinates of the vertices. + z (`numpy.array(float)`): z-coordinates of the vertices. + """ + self.add_vertex_channel("x", "float", x) + self.add_vertex_channel("y", "float", y) + self.add_vertex_channel("z", "float", z) + + # TODO active and refactor later if user feedback indicates the necessity for a compact the input list + # pass ti vector/matrix field directly + # def add_vertex_pos(self, pos): + # assert isinstance(pos, (np.ndarray, ti.Matrix)) + # if not isinstance(pos, np.ndarray): + # pos = pos.to_numpy() + # dim = pos.shape[pos.ndim-1] + # assert dim == 2 or dim == 3, "Only 2D and 3D positions are supported" + # n = pos.size // dim + # assert n == self.num_vertices, "Size of the input is not correct" + # pos = np.reshape(pos, (n, dim)) + # self.add_vertex_channel("x", "float", pos[:, 0]) + # self.add_vertex_channel("y", "float", pos[:, 1]) + # if(dim == 3): + # self.add_vertex_channel("z", "float", pos[:, 2]) + # if(dim == 2): + # self.add_vertex_channel("z", "float", np.zeros(n)) + + def add_vertex_normal(self, nx: np.array, ny: np.array, nz: np.array): + """Add normal vectors at the vertices. + + The three arguments are all numpy arrays of float type and have + the same length. + + Args: + nx (`numpy.array(float)`): x-coordinates of the normal vectors. + ny (`numpy.array(float)`): y-coordinates of the normal vectors. + nz (`numpy.array(float)`): z-coordinates of the normal vectors. + """ + self.add_vertex_channel("nx", "float", nx) + self.add_vertex_channel("ny", "float", ny) + self.add_vertex_channel("nz", "float", nz) + + # TODO active and refactor later if user feedback indicates the necessity for a compact the input list + # pass ti vector/matrix field directly + # def add_vertex_normal(self, normal): + # assert isinstance(normal, (np.ndarray, ti.Matrix)) + # if not isinstance(normal, np.ndarray): + # normal = normal.to_numpy() + # dim = normal.shape[normal.ndim-1] + # assert dim == 3, "Only 3D normal is supported" + # n = normal.size // dim + # assert n == self.num_vertices, "Size of the input is not correct" + # normal = np.reshape(normal, (n, dim)) + # self.add_vertex_channel("nx", "float", normal[:, 0]) + # self.add_vertex_channel("ny", "float", normal[:, 1]) + # self.add_vertex_channel("nz", "float", normal[:, 2]) + + def add_vertex_vel(self, vx: np.array, vy: np.array, vz: np.array): + """Add velocity vectors at the vertices. + + Args: + vx (`numpy.array(float)`): x-coordinates of the velocity vectors. + vy (`numpy.array(float)`): y-coordinates of the velocity vectors. + vz (`numpy.array(float)`): z-coordinates of the velocity vectors. + """ + self.add_vertex_channel("vx", "float", vx) + self.add_vertex_channel("vy", "float", vy) + self.add_vertex_channel("vz", "float", vz) + + def add_vertex_color(self, r: np.array, g: np.array, b: np.array): + """Sets the (r, g, b) channels of the colors at the vertices. + + The three arguments are all numpy arrays of float type and have + the same length. + + Args: + r (`numpy.array(float)`): the r-channel (red) of the colors. + g (`numpy.array(float)`): the g-channel (green) of the color. + b (`numpy.array(float)`): the b-channel (blue) of the colors. + """ + self.add_vertex_channel("red", "float", r) + self.add_vertex_channel("green", "float", g) + self.add_vertex_channel("blue", "float", b) + + def add_vertex_alpha(self, alpha: np.array): + """Sets the alpha-channel (transparent) of the vertex colors. + + Args: + alpha (`numpy.array(float)`): the alpha-channel (transparent) of the colors. + """ + self.add_vertex_channel("Alpha", "float", alpha) + + def add_vertex_rgba(self, r: np.array, g: np.array, b: np.array, a: np.array): + """Sets the (r, g, b, a) channels of the colors at the vertices. + + Args: + r (`numpy.array(float)`): the r-channel (red) of the colors. + g (`numpy.array(float)`): the g-channel (green) of the color. + b (`numpy.array(float)`): the b-channel (blue) of the colors. + a (`numpy.array(float)`): the a-channel (alpha) of the colors. + """ + self.add_vertex_channel("red", "float", r) + self.add_vertex_channel("green", "float", g) + self.add_vertex_channel("blue", "float", b) + self.add_vertex_channel("Alpha", "float", a) + + # TODO active and refactor later if user feedback indicates the necessity for a compact the input list + # pass ti vector/matrix field directly + # def add_vertex_color(self, color): + # assert isinstance(color, (np.ndarray, ti.Matrix)) + # if not isinstance(color, np.ndarray): + # color = color.to_numpy() + # channels = color.shape[color.ndim-1] + # assert channels == 3 or channels == 4, "The dimension for color should be either be 3 (rgb) or 4 (rgba)" + # n = color.size // channels + # assert n == self.num_vertices, "Size of the input is not correct" + # color = np.reshape(color, (n, channels)) + # self.add_vertex_channel("red", "float", color[:, 0]) + # self.add_vertex_channel("green", "float", color[:, 1]) + # self.add_vertex_channel("blue", "float", color[:, 2]) + # if channels == 4: + # self.add_vertex_channel("Alpha", "float", color[:, 3]) + + def add_vertex_id(self): + """Sets the ids of the vertices. + + The id of a vertex is equal to its index in the vertex array. + """ + self.add_vertex_channel("id", "int", np.arange(self.num_vertices)) + + def add_vertex_piece(self, piece: np.array): + self.add_vertex_channel("piece", "int", piece) + + def add_faces(self, indices: np.array): + if self.face_type == "tri": + vert_per_face = 3 + else: + vert_per_face = 4 + assert vert_per_face * self.num_faces == indices.size, "The dimension of the face vertices is not correct" + self.face_indices = np.reshape(indices, (self.num_faces, vert_per_face)) + self.face_indices = self.face_indices.astype(np.int32) + + def add_face_channel(self, key: str, data_type: str, data: np.array): + if data_type not in self.ply_supported_types: + print("Unknown type " + data_type + " detected, skipping this channel") + return + if data.ndim == 1: + assert data.size == self.num_faces, "The dimension of the face channel is not correct" + self.num_face_channels += 1 + if key in self.face_channels: + print("WARNING: duplicate key " + key + " detected") + self.face_channels.append(key) + self.face_data_type.append(data_type) + self.face_data.append(self.type_map[data_type](data)) + else: + num_col = data.size // self.num_faces + assert ( + data.ndim == 2 and data.size == num_col * self.num_faces + ), "The dimension of the face channel is not correct" + data.shape = (self.num_faces, num_col) + self.num_face_channels += num_col + for i in range(num_col): + item_key = key + "_" + str(i + 1) + if item_key in self.face_channels: + print("WARNING: duplicate key " + item_key + " detected") + self.face_channels.append(item_key) + self.face_data_type.append(data_type) + self.face_data.append(self.type_map[data_type](data[:, i])) + + def add_face_id(self): + self.add_face_channel("id", "int", np.arange(self.num_faces)) + + def add_face_piece(self, piece: np.array): + self.add_face_channel("piece", "int", piece) + + def sanity_check(self): + assert "x" in self.vertex_channels, "The vertex pos channel is missing" + assert "y" in self.vertex_channels, "The vertex pos channel is missing" + assert "z" in self.vertex_channels, "The vertex pos channel is missing" + if self.num_faces > 0: + for idx in self.face_indices.flatten(): + assert idx >= 0 and idx < self.num_vertices, "The face indices are invalid" + + def print_header(self, path: str, _format: str): + with open(path, "w") as f: + f.writelines( + [ + "ply\n", + "format " + _format + " 1.0\n", + "comment " + self.comment + "\n", + ] + ) + f.write("element vertex " + str(self.num_vertices) + "\n") + for i in range(self.num_vertex_channels): + f.write("property " + self.vertex_data_type[i] + " " + self.vertex_channels[i] + "\n") + if self.num_faces != 0: + f.write("element face " + str(self.num_faces) + "\n") + f.write("property list uchar int vertex_indices\n") + for i in range(self.num_face_channels): + f.write("property " + self.face_data_type[i] + " " + self.face_channels[i] + "\n") + f.write("end_header\n") + + def export(self, path): + self.sanity_check() + self.print_header(path, "binary_" + sys.byteorder + "_endian") + with open(path, "ab") as f: + for i in range(self.num_vertices): + for j in range(self.num_vertex_channels): + f.write(self.vertex_data[j][i]) + if self.face_type == "tri": + vert_per_face = np.uint8(3) + else: + vert_per_face = np.uint8(4) + for i in range(self.num_faces): + f.write(vert_per_face) + for j in range(vert_per_face): + f.write(self.face_indices[i, j]) + for j in range(self.num_face_channels): + f.write(self.face_data[j][i]) + + def export_ascii(self, path): + self.sanity_check() + self.print_header(path, "ascii") + with open(path, "a") as f: + for i in range(self.num_vertices): + for j in range(self.num_vertex_channels): + f.write(str(self.vertex_data[j][i]) + " ") + f.write("\n") + if self.face_type == "tri": + vert_per_face = 3 + else: + vert_per_face = 4 + for i in range(self.num_faces): + f.writelines( + [ + str(vert_per_face) + " ", + " ".join(map(str, self.face_indices[i, :])), + " ", + ] + ) + for j in range(self.num_face_channels): + f.write(str(self.face_data[j][i]) + " ") + f.write("\n") + + def export_frame_ascii(self, series_num: int, path: str): + # if path has ply ending + last_4_char = path[-4:] + if last_4_char == ".ply": + path = path[:-4] + + real_path = path + "_" + f"{series_num:0=6d}" + ".ply" + self.export_ascii(real_path) + + def export_frame(self, series_num: int, path: str): + # if path has ply ending + last_4_char = path[-4:] + if last_4_char == ".ply": + path = path[:-4] + + real_path = path + "_" + f"{series_num:0=6d}" + ".ply" + self.export(real_path) + + +__all__ = ["PLYWriter"] diff --git a/local_packages/taichi/tools/video.py b/local_packages/taichi/tools/video.py new file mode 100644 index 0000000..70f734e --- /dev/null +++ b/local_packages/taichi/tools/video.py @@ -0,0 +1,259 @@ +import os +import shutil + +from taichi._lib.utils import get_os_name +from taichi.tools.image import imwrite + +FRAME_FN_TEMPLATE = "%06d.png" +FRAME_DIR = "frames" + +# Write the frames to the disk and then make videos (mp4 or gif) if necessary + + +def scale_video(input_fn, output_fn, ratiow, ratioh): + os.system(f'ffmpeg -i {input_fn} -vf "scale=iw*{ratiow:.4f}:ih*{ratioh:.4f}" {output_fn}') + + +def crop_video(input_fn, output_fn, x_begin, x_end, y_begin, y_end): + os.system( + f'ffmpeg -i {input_fn} -filter:v "crop=iw*{x_end - x_begin:.4f}:ih*{y_end - y_begin:.4f}:iw*{x_begin:0.4f}:ih*{1 - y_end:0.4f}" {output_fn}' + ) + + +def accelerate_video(input_fn, output_fn, speed): + os.system(f'ffmpeg -i {input_fn} -filter:v "setpts={1 / speed:.4f}*PTS" {output_fn}') + + +def get_ffmpeg_path(): + return "ffmpeg" + + +def mp4_to_gif(input_fn, output_fn, framerate): + # Generate the palette + palette_name = "palette.png" + if get_os_name() == "win": + command = get_ffmpeg_path() + f" -loglevel panic -i {input_fn} -vf 'palettegen' -y {palette_name}" + else: + command = ( + get_ffmpeg_path() + f" -loglevel panic -i {input_fn} -vf 'fps={framerate}," + f"scale=320:640:flags=lanczos,palettegen' -y {palette_name}" + ) + # print command + os.system(command) + + # Generate the GIF + command = get_ffmpeg_path() + f" -loglevel panic -i {input_fn} -i {palette_name} -lavfi paletteuse -y {output_fn}" + # print command + os.system(command) + os.remove(palette_name) + + +class VideoManager: + """Utility class for exporting results to `mp4` and `gif` formats. + This class relies on `ffmpeg`. + + Args: + output_dir (str): directory to save the frames. + video_filename (str): filename for the video. default filename is video.mp4. + width, height (int): resolution of the video. + post_processor (object): any object that implements the `process(img)` + method, which accepts an image as a `numpy.ndarray` and returns + the process image. + framerate (int): frame rate of the video. + automatic_build (bool): automatically generate the resulting video or not. + + Example:: + + >>> video_manager = ti.tools.VideoManager(output_dir="./output", framerate=24, automatic_build=False) + >>> for i in range(50): + >>> render() + >>> img = pixels.to_numpy() + >>> video_manager.write_frame(img) + >>> + >>> video_manager.make_video(gif=True, mp4=True) + + Returns: + An instance of :class:`taichi.tools.VideoManager` class. + """ + + def __init__( + self, + output_dir, + video_filename=None, + width=None, + height=None, + post_processor=None, + framerate=24, + automatic_build=True, + ): + assert (width is None) == (height is None) + self.width = width + self.height = height + self.directory = output_dir + self.frame_directory = os.path.join(self.directory, FRAME_DIR) + try: + os.makedirs(self.frame_directory) + except: + pass + self.video_filename = video_filename + self.next_video_checkpoint = 4 + self.framerate = framerate + self.post_processor = post_processor + self.frame_counter = 0 + self.frame_fns = [] + self.automatic_build = automatic_build + + def get_output_filename(self, suffix): + if not self.video_filename: + return os.path.join(self.directory, "video" + suffix) + filename, extension = os.path.splitext(self.video_filename) + if extension is not None: + print(f"Warning: file extension {extension} will be disregarded!") + return os.path.join(self.directory, filename + suffix) + + def write_frame(self, img): + """Write an `numpy.ndarray` `img` to an image file. + + The filename will be automatically determined by this manager + and the frame counter. + """ + if img.shape[0] % 2 != 0: + print("Warning: height is not divisible by 2! Dropping last row") + img = img[:-1] + if img.shape[1] % 2 != 0: + print("Warning: width is not divisible by 2! Dropping last column") + img = img[:, :-1] + if self.post_processor: + img = self.post_processor.process(img) + if self.width is None: + self.width = img.shape[0] + self.height = img.shape[1] + assert os.path.exists(self.directory) + fn = FRAME_FN_TEMPLATE % self.frame_counter + self.frame_fns.append(fn) + imwrite(img, os.path.join(self.frame_directory, fn)) + self.frame_counter += 1 + if self.frame_counter % self.next_video_checkpoint == 0: + if self.automatic_build: + self.make_video() + self.next_video_checkpoint *= 2 + + def get_frame_directory(self): + """Returns path to the directory where the image files are located in.""" + return self.frame_directory + + def write_frames(self, images): + """Write a list of `numpy.ndarray` `images` to image files.""" + for img in images: + self.write_frame(img) + + def clean_frames(self): + """Delete all previous image files in the saved directory.""" + for fn in os.listdir(self.frame_directory): + if fn.endswith(".png") and fn in self.frame_fns: + os.remove(fn) + + def make_video(self, mp4=True, gif=True): + """Convert the image files to a `mp4` or `gif` animation.""" + fn = self.get_output_filename(".mp4") + command = ( + (get_ffmpeg_path() + f" -loglevel panic -framerate {self.framerate} -i ") + + os.path.join(self.frame_directory, FRAME_FN_TEMPLATE) + + " -s:v " + + str(self.width) + + "x" + + str(self.height) + + " -c:v libx264 -profile:v high -crf 1 -pix_fmt yuv420p -y " + + fn + ) + + os.system(command) + + if gif: + mp4_to_gif( + self.get_output_filename(".mp4"), + self.get_output_filename(".gif"), + self.framerate, + ) + + if not mp4: + os.remove(fn) + + +def interpolate_frames(frame_dir, mul=4): + # TODO: remove dependency on cv2 here + import cv2 # pylint: disable=C0415 + + files = os.listdir(frame_dir) + images = [] + images_interpolated = [] + for f in sorted(files): + if f.endswith("png"): + images.append(cv2.imread(f) / 255.0) # pylint: disable=E1101 + + for i in range(len(images) - 1): + images_interpolated.append(images[i]) + for j in range(mul - 1): + alpha = 1 - j / mul + images_interpolated.append(images[i] * alpha + images[i + 1] * (1 - alpha)) + + images_interpolated.append(images[-1]) + + os.makedirs("interpolated", exist_ok=True) + for i, img in enumerate(images_interpolated): + cv2.imwrite(f"interpolated/{i:05d}.png", img * 255.0) # pylint: disable=E1101 + + +def ffmpeg_common_args(frame_rate, input_fn, width, height, crf, output_path): + return ( + f"{get_ffmpeg_path()} -y -loglevel panic -framerate {frame_rate} -i {input_fn} -s:v {width}x{height} " + + f"-c:v libx264 -profile:v high -crf {crf} -pix_fmt yuv420p {output_path}" + ) + + +def make_video(input_files, width=0, height=0, frame_rate=24, crf=20, output_path="video.mp4"): + """Convert a list of image files to a `gif` or `mp4` animation. + + Args: + input_files (list[str]): the list of image file names. + width (int): output video width. + height (int): output video height. + frame_rate (int): framerate of the output video. + crf (int): quality of the output video, the lower the better quality, + but also larger file size. + output_path (str): path to the output video. + """ + if isinstance(input_files, list): + from PIL import Image # pylint: disable=C0415 + + with Image.open(input_files[0]) as img: + width, height = img.size + tmp_dir = "tmp_ffmpeg_dir" + os.mkdir(tmp_dir) + if width % 2 != 0: + print(f"Width ({width}) not divisible by 2") + width -= 1 + if height % 2 != 0: + print(f"Height ({width}) not divisible by 2") + height -= 1 + for i, inp in enumerate(input_files): + shutil.copy(inp, os.path.join(tmp_dir, f"{i:06d}.png")) + inputs = f"{tmp_dir}/%06d.png" + command = ffmpeg_common_args(frame_rate, inputs, width, height, crf, output_path) + ret = os.system(command) + assert ret == 0, "ffmpeg failed to generate video file." + for i in range(len(input_files)): + os.remove(os.path.join(tmp_dir, f"{i:06d}.png")) + os.rmdir(tmp_dir) + elif isinstance(input_files, str): + assert width != 0 and height != 0 + command = ffmpeg_common_args(frame_rate, input_files, width, height, crf, output_path) + ret = os.system(command) + assert ret == 0, "ffmpeg failed to generate video file." + else: + assert ( + False + ), f'input_files should be list (of files) or str (of file template, e.g., "%04d.png") instead of {type(input_files)}' + + +__all__ = ["VideoManager"] diff --git a/local_packages/taichi/tools/vtk.py b/local_packages/taichi/tools/vtk.py new file mode 100644 index 0000000..596ec3c --- /dev/null +++ b/local_packages/taichi/tools/vtk.py @@ -0,0 +1,34 @@ +import numpy as np + + +def write_vtk(scalar_field, filename): + try: + from pyevtk.hl import gridToVTK # pylint: disable=import-outside-toplevel + except ImportError: + raise RuntimeError( + "Failed to import pyevtk. Please install it via /\ + `pip install pyevtk` first. " + ) + + scalar_field_np = scalar_field.to_numpy() + field_shape = scalar_field_np.shape + dimensions = len(field_shape) + + if dimensions not in (2, 3): + raise ValueError("The input field must be a 2D or 3D scalar field.") + + if dimensions == 2: + scalar_field_np = scalar_field_np[np.newaxis, :, :] + zcoords = np.array([0, 1]) + elif dimensions == 3: + zcoords = np.arange(0, field_shape[2]) + gridToVTK( + filename, + x=np.arange(0, field_shape[0]), + y=np.arange(0, field_shape[1]), + z=zcoords, + cellData={filename: scalar_field_np}, + ) + + +__all__ = ["write_vtk"] diff --git a/local_packages/taichi/types/__init__.py b/local_packages/taichi/types/__init__.py new file mode 100644 index 0000000..4a11219 --- /dev/null +++ b/local_packages/taichi/types/__init__.py @@ -0,0 +1,17 @@ +""" +This module defines data types in Taichi: + +- primitive: int, float, etc. +- compound: matrix, vector, struct. +- template: for reference types. +- ndarray: for arbitrary arrays. +- quant: for quantized types, see "https://yuanming.taichi.graphics/publication/2021-quantaichi/quantaichi.pdf" +""" + +from taichi.types import quant +from taichi.types.annotations import * +from taichi.types.compound_types import * +from taichi.types.ndarray_type import * +from taichi.types.primitive_types import * +from taichi.types.texture_type import * +from taichi.types.utils import * diff --git a/local_packages/taichi/types/annotations.py b/local_packages/taichi/types/annotations.py new file mode 100644 index 0000000..ade7bed --- /dev/null +++ b/local_packages/taichi/types/annotations.py @@ -0,0 +1,44 @@ +class Template: + """Type annotation for template kernel parameter. + Useful for passing parameters to kernels by reference. + + See also https://docs.taichi-lang.org/docs/meta. + + Args: + tensor (Any): unused + dim (Any): unused + + Example:: + + >>> a = 1 + >>> + >>> @ti.kernel + >>> def test(): + >>> print(a) + >>> + >>> @ti.kernel + >>> def test_template(a: ti.template()): + >>> print(a) + >>> + >>> test(a) # will print 1 + >>> test_template(a) # will also print 1 + >>> a = 2 + >>> test(a) # will still print 1 + >>> test_template(a) # will print 2 + """ + + def __init__(self, tensor=None, dim=None): + self.tensor = tensor + self.dim = dim + + +template = Template +"""Alias for :class:`~taichi.types.annotations.Template`. +""" + + +class sparse_matrix_builder: + pass + + +__all__ = ["template", "sparse_matrix_builder"] diff --git a/local_packages/taichi/types/compound_types.py b/local_packages/taichi/types/compound_types.py new file mode 100644 index 0000000..a4f9aef --- /dev/null +++ b/local_packages/taichi/types/compound_types.py @@ -0,0 +1,88 @@ +from taichi._lib.utils import ti_python_core as _ti_python_core + +import taichi + +_type_factory = _ti_python_core.get_type_factory_instance() + + +class CompoundType: + pass + + +# TODO: maybe move MatrixType, StructType here to avoid the circular import? +def matrix(n=None, m=None, dtype=None): + """Creates a matrix type with given shape and data type. + + Args: + n (int): number of rows of the matrix. + m (int): number of columns of the matrix. + dtype (:mod:`~taichi.types.primitive_types`): matrix data type. + + Returns: + A matrix type. + + Example:: + + >>> mat2x2 = ti.types.matrix(2, 2, ti.f32) # 2x2 matrix type + >>> M = mat2x2([[1., 2.], [3., 4.]]) # an instance of this type + """ + return taichi.lang.matrix.MatrixType(n, m, 2, dtype) + + +def vector(n=None, dtype=None): + """Creates a vector type with given shape and data type. + + Args: + n (int): dimension of the vector. + dtype (:mod:`~taichi.types.primitive_types`): vector data type. + + Returns: + A vector type. + + Example:: + + >>> vec3 = ti.types.vector(3, ti.f32) # 3d vector type + >>> v = vec3([1., 2., 3.]) # an instance of this type + """ + return taichi.lang.matrix.VectorType(n, dtype) + + +def struct(**kwargs): + """Creates a struct type with given members. + + Args: + kwargs (dict): a dictionary contains the names and types of the + struct members. + + Returns: + A struct type. + + Example:: + + >>> vec3 = ti.types.vector(3, ti.f32) + >>> sphere = ti.types.struct(center=vec3, radius=float) + >>> s = sphere(center=vec3([0., 0., 0.]), radius=1.0) + """ + return taichi.lang.struct.StructType(**kwargs) + + +def argpack(**kwargs): + """Creates an argument pack type with given members. + + Args: + kwargs (dict): a dictionary contains the names and types of the + argument pack members. + + Returns: + A argument pack type. + + Example:: + + >>> vec3 = ti.types.vector(3, ti.f32) + >>> sphere = ti.types.argpack(center=vec3, radius=float) + >>> s = sphere(center=vec3([0., 0., 0.]), radius=1.0) + """ + return taichi.lang.argpack.ArgPackType(**kwargs) + + +__all__ = ["matrix", "vector", "struct", "argpack"] diff --git a/local_packages/taichi/types/ndarray_type.py b/local_packages/taichi/types/ndarray_type.py new file mode 100644 index 0000000..f107f9a --- /dev/null +++ b/local_packages/taichi/types/ndarray_type.py @@ -0,0 +1,139 @@ +from taichi.lang.enums import Layout, to_boundary_enum +from taichi.types.compound_types import CompoundType, matrix, vector +from taichi.lang import util + + +class NdarrayTypeMetadata: + def __init__(self, element_type, shape=None, needs_grad=False): + self.element_type = element_type + self.shape = shape + self.layout = Layout.AOS + self.needs_grad = needs_grad + + +# TODO(Haidong): This is a helper function that creates a MatrixType +# with respect to element_dim and element_shape. +# Remove this function when the two args are totally deprecated. +def _make_matrix_dtype_from_element_shape(element_dim, element_shape, primitive_dtype): + if isinstance(primitive_dtype, CompoundType): + raise TypeError(f'Cannot specifiy matrix dtype "{primitive_dtype}" and element shape or dim at the same time.') + + # Scalars + if element_dim == 0 or (element_shape is not None and len(element_shape) == 0): + return primitive_dtype + + # Cook element dim and shape into matrix type. + mat_dtype = None + if element_dim is not None: + # TODO: expand use case with arbitary tensor dims! + if element_dim < 0 or element_dim > 2: + raise ValueError("Only scalars, vectors, and matrices are allowed as elements of ti.types.ndarray()") + # Check dim consistency. The matrix dtype will be cooked later. + if element_shape is not None and len(element_shape) != element_dim: + raise ValueError( + f"Both element_shape and element_dim are specified, but shape doesn't match specified dim: {len(element_shape)}!={element_dim}" + ) + mat_dtype = vector(None, primitive_dtype) if element_dim == 1 else matrix(None, None, primitive_dtype) + elif element_shape is not None: + if len(element_shape) > 2: + raise ValueError("Only scalars, vectors, and matrices are allowed as elements of ti.types.ndarray()") + mat_dtype = ( + vector(element_shape[0], primitive_dtype) + if len(element_shape) == 1 + else matrix(element_shape[0], element_shape[1], primitive_dtype) + ) + return mat_dtype + + +class NdarrayType: + """Type annotation for arbitrary arrays, including external arrays (numpy ndarrays and torch tensors) and Taichi ndarrays. + + For external arrays, we treat it as a Taichi data container with Scalar, Vector or Matrix elements. + For Taichi vector/matrix ndarrays, we will automatically identify element dimension and their corresponding axis by the dimension of datatype, say scalars, matrices or vectors. + For example, given type annotation `ti.types.ndarray(dtype=ti.math.vec3)`, a numpy array `np.zeros(10, 10, 3)` will be recognized as a 10x10 matrix composed of vec3 elements. + + Args: + dtype (Union[PrimitiveType, VectorType, MatrixType, NoneType], optional): None if not speicified. + ndim (Union[Int, NoneType]): None if not specified, number of field dimensions. This argument is ignored for external arrays for now. + element_dim (Union[Int, NoneType], optional): None if not specified (will be treated as 0 for external arrays), 0 if scalar elements, 1 if vector elements, and 2 if matrix elements. + element_shape (Union[Tuple[Int], NoneType]): None if not specified, shapes of each element. For example, element_shape must be 1d for vector and 2d tuple for matrix. This argument is ignored for external arrays for now. + """ + + def __init__( + self, + dtype=None, + ndim=None, + element_dim=None, + element_shape=None, + field_dim=None, + needs_grad=None, + boundary="unsafe", + ): + if field_dim is not None: + raise ValueError("The field_dim argument for ndarray type is already deprecated. Please use ndim instead.") + if element_dim is not None or element_shape is not None: + self.dtype = _make_matrix_dtype_from_element_shape(element_dim, element_shape, dtype) + else: + self.dtype = dtype + + self.ndim = ndim + self.layout = Layout.AOS + self.needs_grad = needs_grad + self.boundary = to_boundary_enum(boundary) + + def check_matched(self, ndarray_type: NdarrayTypeMetadata, arg_name: str): + # FIXME(Haidong) Cannot use Vector/MatrixType due to circular import + # Use the CompuoundType instead to determine the specific typs. + # TODO Replace CompoundType with MatrixType and VectorType + + # Check dtype match + if isinstance(self.dtype, CompoundType): + if not self.dtype.check_matched(ndarray_type.element_type): + raise ValueError( + f"Invalid value for argument {arg_name} - required element type: {self.dtype.to_string()}, but {ndarray_type.element_type.to_string()} is provided" + ) + else: + if self.dtype is not None: + # Check dtype match for scalar. + if not util.cook_dtype(self.dtype) == ndarray_type.element_type: + raise TypeError( + f"Expect element type {self.dtype} for argument {arg_name}, but get {ndarray_type.element_type}" + ) + + # Check ndim match + if self.ndim is not None and ndarray_type.shape is not None and self.ndim != len(ndarray_type.shape): + raise ValueError( + f"Invalid value for argument {arg_name} - required ndim={self.ndim}, but {len(ndarray_type.shape)}d ndarray with shape {ndarray_type.shape} is provided" + ) + + # Check needs_grad + if self.needs_grad is not None and self.needs_grad > ndarray_type.needs_grad: + # It's okay to pass a needs_grad=True ndarray at runtime to a need_grad=False arg but not vice versa. + raise ValueError( + f"Invalid value for argument {arg_name} - required needs_grad={self.needs_grad}, but {ndarray_type.needs_grad} is provided" + ) + + def __repr__(self): + return f"NdarrayType(dtype={self.dtype}, ndim={self.ndim}, layout={self.layout}, needs_grad={self.needs_grad})" + + def __str__(self): + return self.__repr__() + + +ndarray = NdarrayType +"""Alias for :class:`~taichi.types.ndarray_type.NdarrayType`. + +Example:: + + >>> @ti.kernel + >>> def to_numpy(x: ti.types.ndarray(), y: ti.types.ndarray()): + >>> for i in range(n): + >>> x[i] = y[i] + >>> + >>> y = ti.ndarray(ti.f64, shape=n) + >>> ... # calculate y + >>> x = numpy.zeros(n) + >>> to_numpy(x, y) # `x` will be filled with `y`'s data. +""" + +__all__ = ["ndarray"] diff --git a/local_packages/taichi/types/primitive_types.py b/local_packages/taichi/types/primitive_types.py new file mode 100644 index 0000000..aad85d6 --- /dev/null +++ b/local_packages/taichi/types/primitive_types.py @@ -0,0 +1,201 @@ +from taichi._lib import core as ti_python_core + +# ======================================== +# real types + +# ---------------------------------------- + +float16 = ti_python_core.DataType_f16 +"""16-bit precision floating point data type. +""" + +# ---------------------------------------- + +f16 = float16 +"""Alias for :const:`~taichi.types.primitive_types.float16` +""" + +# ---------------------------------------- + +float32 = ti_python_core.DataType_f32 +"""32-bit single precision floating point data type. +""" + +# ---------------------------------------- + +f32 = float32 +"""Alias for :const:`~taichi.types.primitive_types.float32` +""" + +# ---------------------------------------- + +float64 = ti_python_core.DataType_f64 +"""64-bit double precision floating point data type. +""" + +# ---------------------------------------- + +f64 = float64 +"""Alias for :const:`~taichi.types.primitive_types.float64` +""" +# ---------------------------------------- + +# ======================================== +# Integer types + +# ---------------------------------------- + +int8 = ti_python_core.DataType_i8 +"""8-bit signed integer data type. +""" + +# ---------------------------------------- + +i8 = int8 +"""Alias for :const:`~taichi.types.primitive_types.int8` +""" + +# ---------------------------------------- + +int16 = ti_python_core.DataType_i16 +"""16-bit signed integer data type. +""" + +# ---------------------------------------- + +i16 = int16 +"""Alias for :const:`~taichi.types.primitive_types.int16` +""" + +# ---------------------------------------- + +int32 = ti_python_core.DataType_i32 +"""32-bit signed integer data type. +""" + +# ---------------------------------------- + +i32 = int32 +"""Alias for :const:`~taichi.types.primitive_types.int32` +""" + +# ---------------------------------------- + +int64 = ti_python_core.DataType_i64 +"""64-bit signed integer data type. +""" + +# ---------------------------------------- + +i64 = int64 +"""Alias for :const:`~taichi.types.primitive_types.int64` +""" + +# ---------------------------------------- + +uint8 = ti_python_core.DataType_u8 +"""8-bit unsigned integer data type. +""" + +# ---------------------------------------- + +uint1 = ti_python_core.DataType_u1 +"""1-bit unsigned integer data type. Same as booleans. +""" + +# ---------------------------------------- + +u1 = uint1 +"""Alias for :const:`~taichi.types.primitive_types.uint1` +""" + +# ---------------------------------------- + +u8 = uint8 +"""Alias for :const:`~taichi.types.primitive_types.uint8` +""" + +# ---------------------------------------- + +uint16 = ti_python_core.DataType_u16 +"""16-bit unsigned integer data type. +""" + +# ---------------------------------------- + +u16 = uint16 +"""Alias for :const:`~taichi.types.primitive_types.uint16` +""" + +# ---------------------------------------- + +uint32 = ti_python_core.DataType_u32 +"""32-bit unsigned integer data type. +""" + +# ---------------------------------------- + +u32 = uint32 +"""Alias for :const:`~taichi.types.primitive_types.uint32` +""" + +# ---------------------------------------- + +uint64 = ti_python_core.DataType_u64 +"""64-bit unsigned integer data type. +""" + +# ---------------------------------------- + +u64 = uint64 +"""Alias for :const:`~taichi.types.primitive_types.uint64` +""" + +# ---------------------------------------- + + +class RefType: + def __init__(self, tp): + self.tp = tp + + +def ref(tp): + return RefType(tp) + + +real_types = [f16, f32, f64, float] +real_type_ids = [id(t) for t in real_types] + +integer_types = [i8, i16, i32, i64, u1, u8, u16, u32, u64, int, bool] +integer_type_ids = [id(t) for t in integer_types] + +all_types = real_types + integer_types +type_ids = [id(t) for t in all_types] + +__all__ = [ + "float32", + "f32", + "float64", + "f64", + "float16", + "f16", + "int8", + "i8", + "int16", + "i16", + "int32", + "i32", + "int64", + "i64", + "uint1", + "u1", + "uint8", + "u8", + "uint16", + "u16", + "uint32", + "u32", + "uint64", + "u64", + "ref", +] diff --git a/local_packages/taichi/types/quant.py b/local_packages/taichi/types/quant.py new file mode 100644 index 0000000..02c7a59 --- /dev/null +++ b/local_packages/taichi/types/quant.py @@ -0,0 +1,81 @@ +""" +This module defines generators of quantized types. +For more details, read https://yuanming.taichi.graphics/publication/2021-quantaichi/quantaichi.pdf. +""" + +from taichi._lib.utils import ti_python_core as _ti_python_core +from taichi.lang import impl +from taichi.types.primitive_types import i32 + +_type_factory = _ti_python_core.get_type_factory_instance() + + +def int(bits, signed=True, compute=None): # pylint: disable=W0622 + """Generates a quantized type for integers. + + Args: + bits (int): Number of bits. + signed (bool): Signed or unsigned. + compute (DataType): Type for computation. + + Returns: + DataType: The specified type. + """ + if compute is None: + compute = impl.get_runtime().default_ip if signed else impl.get_runtime().default_up + if isinstance(compute, _ti_python_core.DataType): + compute = compute.get_ptr() + return _type_factory.get_quant_int_type(bits, signed, compute) + + +def fixed(bits, signed=True, max_value=1.0, compute=None, scale=None): + """Generates a quantized type for fixed-point real numbers. + + Args: + bits (int): Number of bits. + signed (bool): Signed or unsigned. + max_value (float): Maximum value of the number. + compute (DataType): Type for computation. + scale (float): Scaling factor. The argument is prioritized over range. + + Returns: + DataType: The specified type. + """ + if compute is None: + compute = impl.get_runtime().default_fp + if isinstance(compute, _ti_python_core.DataType): + compute = compute.get_ptr() + # TODO: handle cases with bits > 32 + underlying_type = int(bits=bits, signed=signed, compute=i32) + if scale is None: + if signed: + scale = max_value / 2 ** (bits - 1) + else: + scale = max_value / 2**bits + return _type_factory.get_quant_fixed_type(underlying_type, compute, scale) + + +def float(exp, frac, signed=True, compute=None): # pylint: disable=W0622 + """Generates a quantized type for floating-point real numbers. + + Args: + exp (int): Number of exponent bits. + frac (int): Number of fraction bits. + signed (bool): Signed or unsigned. + compute (DataType): Type for computation. + + Returns: + DataType: The specified type. + """ + if compute is None: + compute = impl.get_runtime().default_fp + if isinstance(compute, _ti_python_core.DataType): + compute = compute.get_ptr() + # Exponent is always unsigned + exp_type = int(bits=exp, signed=False, compute=i32) + # TODO: handle cases with frac > 32 + frac_type = int(bits=frac, signed=signed, compute=i32) + return _type_factory.get_quant_float_type(frac_type, exp_type, compute) + + +__all__ = ["int", "fixed", "float"] diff --git a/local_packages/taichi/types/texture_type.py b/local_packages/taichi/types/texture_type.py new file mode 100644 index 0000000..2cab885 --- /dev/null +++ b/local_packages/taichi/types/texture_type.py @@ -0,0 +1,83 @@ +from taichi.lang.enums import Format +from taichi.lang.exception import TaichiCompilationError +from taichi.types.primitive_types import f16, f32, i8, i16, i32, u8, u16, u32 + +FORMAT2TY_CH = { + Format.r8: (u8, 1), + Format.r8u: (u8, 1), + Format.r8i: (i8, 1), + Format.rg8: (u8, 2), + Format.rg8u: (u8, 2), + Format.rg8i: (i8, 2), + Format.rgba8: (u8, 4), + Format.rgba8u: (u8, 4), + Format.rgba8i: (i8, 4), + Format.r16: (u16, 1), + Format.r16u: (u16, 1), + Format.r16i: (i16, 1), + Format.r16f: (f16, 1), + Format.rg16: (u16, 2), + Format.rg16u: (u16, 2), + Format.rg16i: (i16, 2), + Format.rg16f: (f16, 2), + Format.rgb16: (u16, 3), + Format.rgb16u: (u16, 3), + Format.rgb16i: (i16, 3), + Format.rgb16f: (f16, 3), + Format.rgba16: (u16, 4), + Format.rgba16u: (u16, 4), + Format.rgba16i: (i16, 4), + Format.rgba16f: (f16, 4), + Format.r32u: (u32, 1), + Format.r32i: (i32, 1), + Format.r32f: (f32, 1), + Format.rg32u: (u32, 2), + Format.rg32i: (i32, 2), + Format.rg32f: (f32, 2), + Format.rgb32u: (u32, 3), + Format.rgb32i: (i32, 3), + Format.rgb32f: (f32, 3), + Format.rgba32u: (u32, 4), + Format.rgba32i: (i32, 4), + Format.rgba32f: (f32, 4), +} + +# Reverse lookup by (channel_format, num_channels) +TY_CH2FORMAT = {v: k for k, v in FORMAT2TY_CH.items()} + + +class TextureType: + """Type annotation for Textures. + + Args: + num_dimensions (int): Number of dimensions. For examples for a 2D texture this should be `2`. + """ + + def __init__(self, num_dimensions): + self.num_dimensions = num_dimensions + + +class RWTextureType: + """Type annotation for RW Textures (image load store). + + Args: + num_dimensions (int): Number of dimensions. For examples for a 2D texture this should be `2`. + lod (float): Specifies the explicit level-of-detail. + fmt (ti.Format): Color format of texture + """ + + def __init__(self, num_dimensions, lod=0, fmt=None): + self.num_dimensions = num_dimensions + if fmt is None: + raise TaichiCompilationError("fmt is required for rw_texture type") + else: + self.fmt = fmt + self.lod = lod + + +texture = TextureType +rw_texture = RWTextureType +"""Alias for :class:`~taichi.types.ndarray_type.TextureType`. +""" + +__all__ = ["texture", "rw_texture"] diff --git a/local_packages/taichi/types/utils.py b/local_packages/taichi/types/utils.py new file mode 100644 index 0000000..71cc6ce --- /dev/null +++ b/local_packages/taichi/types/utils.py @@ -0,0 +1,11 @@ +from taichi._lib import core as ti_python_core + +is_signed = ti_python_core.is_signed + +is_integral = ti_python_core.is_integral + +is_real = ti_python_core.is_real + +is_tensor = ti_python_core.is_tensor + +__all__ = ["is_signed", "is_integral", "is_real", "is_tensor"] diff --git a/local_packages/taichi/ui/__init__.py b/local_packages/taichi/ui/__init__.py new file mode 100644 index 0000000..66bbd40 --- /dev/null +++ b/local_packages/taichi/ui/__init__.py @@ -0,0 +1,9 @@ +""" +Taichi gui module for visualization. + +This module contains a cpu based GUI system, a vulkan based GGUI system, +and other helper utilities like adding widgets and exporting video files. +""" + +from .gui import * +from .ui import * diff --git a/local_packages/taichi/ui/camera.py b/local_packages/taichi/ui/camera.py new file mode 100644 index 0000000..88cac69 --- /dev/null +++ b/local_packages/taichi/ui/camera.py @@ -0,0 +1,267 @@ +import time +from math import pi + +from taichi._lib import core as _ti_core +from taichi.lang.matrix import Vector + +from .utils import check_ggui_availability, euler_to_vec, vec_to_euler + + +class Camera: + """The Camera class. + + You should also manually set the camera parameters like `camera.position`, + `camera.lookat`, `camera.up`, etc. The default settings may not work for + your scene. + + Example:: + + >>> scene = window.get_scene() # assumes you have a window + >>> + >>> camera = ti.ui.Camera() + >>> camera.position(1, 1, 1) # set camera position + >>> camera.lookat(0, 0, 0) # set camera lookat + >>> camera.up(0, 1, 0) # set camera up vector + >>> scene.set_camera(camera) + >>> + >>> # you can also control camera movement in a window + >>> window = ti.ui.Window("GGUI Camera", res=(640, 480), vsync=True) + >>> camera.track_user_inputs(window, movement_speed=0.03, hold_key=ti.ui.RMB) + """ + + def __init__(self): + check_ggui_availability() + self.ptr = _ti_core.PyCamera() + + self.position(0.0, 0.0, 0.0) + self.lookat(0.0, 0.0, 1.0) + self.up(0.0, 1.0, 0.0) + + # used for tracking user inputs + self.last_mouse_x = None + self.last_mouse_y = None + self.last_time = None + + def position(self, x, y, z): + """Set the camera position. + + Args: + args (:mod:`taichi.types.primitive_types`): 3D coordinates. + + Example:: + + >>> camera.position(1, 1, 1) + """ + self.curr_position = Vector([x, y, z]) + self.ptr.position(x, y, z) + + def lookat(self, x, y, z): + """Set the camera lookat. + + Args: + args (:mod:`taichi.types.primitive_types`): 3D coordinates. + + Example:: + + >>> camera.lookat(0, 0, 0) + """ + self.curr_lookat = Vector([x, y, z]) + self.ptr.lookat(x, y, z) + + def up(self, x, y, z): + """Set the camera up vector. + + Args: + args (:mod:`taichi.types.primitive_types`): 3D coordinates. + + Example:: + + >>> camera.up(0, 1, 0) + """ + self.curr_up = Vector([x, y, z]) + self.ptr.up(x, y, z) + + def projection_mode(self, mode): + """Camera projection mode, 0 for perspective and 1 for orthogonal.""" + self.ptr.projection_mode(mode) + + def fov(self, fov): + """Set the camera fov angle (field of view) in degrees. + + Args: + fov (:mod:`taichi.types.primitive_types`): Angle in range (0, 180). + + Example:: + + >>> camera.fov(45) + """ + self.ptr.fov(fov) + + def left(self, left): + """Set the offset of the left clipping plane in camera frustum. + + Args: + left (:mod:`taichi.types.primitive_types`): \ + offset of the left clipping plane. + + Example:: + + >>> camera.left(-1.0) + """ + self.ptr.left(left) + + def right(self, right): + """Set the offset of the right clipping plane in camera frustum. + + Args: + right (:mod:`taichi.types.primitive_types`): \ + offset of the right clipping plane. + + Example:: + + >>> camera.right(1.0) + """ + self.ptr.right(right) + + def top(self, top): + """Set the offset of the top clipping plane in camera frustum. + + Args: + top (:mod:`taichi.types.primitive_types`): \ + offset of the top clipping plane. + + Example:: + + >>> camera.top(-1.0) + """ + self.ptr.top(top) + + def bottom(self, bottom): + """Set the offset of the bottom clipping plane in camera frustum. + + Args: + bottom (:mod:`taichi.types.primitive_types`): \ + offset of the bottom clipping plane. + + Example:: + + >>> camera.bottom(1.0) + """ + self.ptr.bottom(bottom) + + def z_near(self, z_near): + """Set the offset of the near clipping plane in camera frustum. + + Args: + near (:mod:`taichi.types.primitive_types`): \ + offset of the near clipping plane. + + Example:: + + >>> camera.near(0.1) + """ + self.ptr.z_near(z_near) + + def z_far(self, z_far): + """Set the offset of the far clipping plane in camera frustum. + + Args: + far (:mod:`taichi.types.primitive_types`): \ + offset of the far clipping plane. + + Example:: + + >>> camera.left(1000.0) + """ + self.ptr.z_far(z_far) + + def get_view_matrix(self): + """Get the view matrix(in row major) of the camera. + + Example:: + + >>> camera.get_view_matrix() + """ + return self.ptr.get_view_matrix() + + def get_projection_matrix(self, aspect): + """Get the projection matrix(in row major) of the camera. + + Args: + aspect (:mod:`taichi.types.primitive_types`): \ + aspect ratio of the camera + + Example:: + + >>> camera.get_projection_matrix(1080/720) + """ + return self.ptr.get_projection_matrix(aspect) + + def track_user_inputs( + self, + window, + movement_speed: float = 1.0, + yaw_speed: float = 2.0, + pitch_speed: float = 2.0, + hold_key=None, + ): + """Move the camera according to user inputs. + Press `w`, `s`, `a`, `d`, `e`, `q` to move the camera + `formard`, `back`, `left`, `right`, `head up`, `head down`, accordingly. + + Args: + + window (:class:`~taichi.ui.Window`): a windown instance. + movement_speed (:mod:`~taichi.types.primitive_types`): camera movement speed. + yaw_speed (:mod:`~taichi.types.primitive_types`): speed of changes in yaw angle. + pitch_speed (:mod:`~taichi.types.primitive_types`): speed of changes in pitch angle. + hold_key (:mod:`~taichi.ui`): User defined key for holding the camera movement. + """ + front = (self.curr_lookat - self.curr_position).normalized() + position_change = Vector([0.0, 0.0, 0.0]) + left = self.curr_up.cross(front) + up = self.curr_up + + if self.last_time is None: + self.last_time = time.perf_counter_ns() + time_elapsed = (time.perf_counter_ns() - self.last_time) * 1e-9 + self.last_time = time.perf_counter_ns() + + movement_speed *= time_elapsed * 60.0 + if window.is_pressed("w"): + position_change += front * movement_speed + if window.is_pressed("s"): + position_change -= front * movement_speed + if window.is_pressed("a"): + position_change += left * movement_speed + if window.is_pressed("d"): + position_change -= left * movement_speed + if window.is_pressed("e"): + position_change += up * movement_speed + if window.is_pressed("q"): + position_change -= up * movement_speed + self.position(*(self.curr_position + position_change)) + + curr_mouse_x, curr_mouse_y = window.get_cursor_pos() + + if (hold_key is None) or window.is_pressed(hold_key): + if (self.last_mouse_x is None) or (self.last_mouse_y is None): + self.last_mouse_x, self.last_mouse_y = curr_mouse_x, curr_mouse_y + dx = curr_mouse_x - self.last_mouse_x + dy = curr_mouse_y - self.last_mouse_y + + yaw, pitch = vec_to_euler(front) + + yaw -= dx * yaw_speed * time_elapsed * 60.0 + pitch += dy * pitch_speed * time_elapsed * 60.0 + + pitch_limit = pi / 2 * 0.99 + if pitch > pitch_limit: + pitch = pitch_limit + elif pitch < -pitch_limit: + pitch = -pitch_limit + + front = euler_to_vec(yaw, pitch) + + self.lookat(*(self.curr_position + front)) + self.last_mouse_x, self.last_mouse_y = curr_mouse_x, curr_mouse_y diff --git a/local_packages/taichi/ui/canvas.py b/local_packages/taichi/ui/canvas.py new file mode 100644 index 0000000..4b265ac --- /dev/null +++ b/local_packages/taichi/ui/canvas.py @@ -0,0 +1,253 @@ +from taichi._lib import core as _ti_core +from taichi.lang import impl +from taichi.lang._texture import Texture +from .scene import SceneV2 + +from .staging_buffer import ( + copy_all_to_vbo, + copy_all_to_vbo_particle, + get_indices_field_v2, + get_vbo_field_v2, + to_rgba8, +) +from .utils import get_field_info + + +class Canvas: + """The Canvas class. + + This is the context manager for managing drawing commands on a window. + You should not instantiate this class directly via `__init__`, instead + please call the `get_canvas()` method of :class:`~taichi.ui.Window`. + """ + + def __init__(self, canvas) -> None: + self.canvas = canvas # reference to a PyCanvas + + def set_background_color(self, color): + """Set the background color of this canvas. + + Args: + color (tuple(float)): RGB triple in the range [0, 1]. + """ + self.canvas.set_background_color(color) + + def set_image(self, img): + """Set the content of this canvas to an `img`. + + Args: + img (numpy.ndarray, :class:`~taichi.MatrixField`, :class:`~taichi.Field`, :class:`~taichi.Texture`): \ + the image to be shown. + """ + is_texture = isinstance(img, Texture) + prog_is_vk = impl.pytaichi.prog.config().arch == _ti_core.Arch.vulkan + # FIXME: Remove this hack. Maybe add a query function for whether the texture can be presented + if is_texture and prog_is_vk: + self.canvas.set_image_texture(img.tex) + else: + staging_img = to_rgba8(img) + info = get_field_info(staging_img) + self.canvas.set_image(info) + + def contour(self, scalar_field, cmap_name="plasma", normalize=False): + """Plot a contour view of a scalar field. + + The input scalar_field will be converted to a Numpy array first, and then plotted + using Matplotlib's colormap. Users can specify the color map through the cmap_name + argument. + + Args: + scalar_field (ti.field): The scalar field being plotted. Must be 2D. + cmap_name (str, Optional): The name of the color map in Matplotlib. + normalize (bool, Optional): Display the normalized scalar field if set to True. + Default is False. + """ + try: + import numpy as np # pylint: disable=import-outside-toplevel + from matplotlib import cm # pylint: disable=import-outside-toplevel + except ImportError: + raise RuntimeError( + "Failed to import Numpy and Matplotlib. /\ + Please install Numpy and Matplotlib before using contour()." + ) + + scalar_field_np = scalar_field.to_numpy() + field_shape = scalar_field_np.shape + ndim = len(field_shape) + if ndim != 2: + raise ValueError("contour() can only be used on a 2D scalar field.") + + if normalize: + scalar_max = np.max(scalar_field_np) + scalar_min = np.min(scalar_field_np) + scalar_field_np = (scalar_field_np - scalar_min) / (scalar_max - scalar_min + 1e-16) + + cmap = cm.get_cmap(cmap_name) + output_rgba = cmap(scalar_field_np) + output_rgb = output_rgba.astype(np.float32)[:, :, :3] + output = np.ascontiguousarray(output_rgb) + self.set_image(output) + + def triangles(self, vertices, color=(0.5, 0.5, 0.5), indices=None, per_vertex_color=None): + """Draw a set of 2D triangles on this canvas. + + Args: + vertices: a taichi 2D Vector field, where each element indicate \ + the 3D location of a vertex. + indices: a taichi int field of shape (3 * #triangles), which \ + indicate the vertex indices of the triangles. If this is None, \ + then it is assumed that the vertices are already arranged in \ + triangles order. + color: a global color for the triangles as 3 floats representing \ + RGB values. If `per_vertex_color` is provided, this is ignored. + per_vertex_color (Tuple[float]): a taichi 3D vector field, \ + where each element indicate the RGB color of a vertex. + """ + vbo = get_vbo_field_v2(vertices) + has_per_vertex_color = per_vertex_color is not None + copy_all_to_vbo(vbo, vertices, 0, 0, per_vertex_color if has_per_vertex_color else 0) + vbo_info = get_field_info(vbo) + indices_ndarray = None + if indices: + indices_ndarray = get_indices_field_v2(indices) + indices_info = get_field_info(indices_ndarray) + self.canvas.triangles(vbo_info, indices_info, has_per_vertex_color, color) + + def lines( + self, + vertices, + width, + indices=None, + color=(0.5, 0.5, 0.5), + per_vertex_color=None, + ): + """Draw a set of 2D lines on this canvas. + + Args: + vertices: a taichi 2D Vector field, where each element indicate the \ + 3D location of a vertex. + width (float): width of the lines, relative to the height of the screen. + indices: a taichi int field of shape (2 * #lines), which indicate \ + the vertex indices of the lines. If this is None, then it is \ + assumed that the vertices are already arranged in lines order. + color: a global color for the triangles as 3 floats representing \ + RGB values. If `per_vertex_color` is provided, this is ignored. + per_vertex_color (tuple[float]): a taichi 3D vector field, where \ + each element indicate the RGB color of a vertex. + """ + vbo = get_vbo_field_v2(vertices) + has_per_vertex_color = per_vertex_color is not None + copy_all_to_vbo(vbo, vertices, 0, 0, per_vertex_color if has_per_vertex_color else 0) + vbo_info = get_field_info(vbo) + indices_ndarray = None + if indices: + indices_ndarray = get_indices_field_v2(indices) + indices_info = get_field_info(indices_ndarray) + self.canvas.lines(vbo_info, indices_info, has_per_vertex_color, color, width) + + def circles(self, centers, radius, color=(0.5, 0.5, 0.5), per_vertex_color=None, per_vertex_radius=None): + """Draw a set of 2D circles on this canvas. + + Args: + centers: a taichi 2D Vector field, where each element indicate the \ + 2D location of the center of a vertex. + radius: radius of the circles in pixels. + color: a global color for the triangles as 3 floats representing \ + RGB values. If `per_vertex_color` is provided, this is ignored. + per_vertex_color: a taichi 3D vector field, where \ + each element indicates the RGB color of a circle. + per_vertex_radius: a taichi float field, where \ + each element indicates the radius of a circle. + """ + vbo = get_vbo_field_v2(centers) + has_per_vertex_color = per_vertex_color is not None + has_per_vertex_radius = per_vertex_radius is not None + copy_all_to_vbo_particle( + vbo, + centers, + per_vertex_radius if has_per_vertex_radius else 0, + per_vertex_color if has_per_vertex_color else 0, + ) + vbo_info = get_field_info(vbo) + self.canvas.circles(vbo_info, has_per_vertex_color, has_per_vertex_radius, color, radius) + + def vector_field(self, vector_field, arrow_spacing=5, scale=0.1, width=0.002, color=(0, 0, 0)): + """Draw a vector field on this canvas. + + Args: + vector_field: The vector field to be plotted on the canvas. + arrow_spacing (int): Spacing used when sample on the vector field. + scale (float): Maximum vector length proportional to the canvas. + width (float): Line width when drawing the arrow. + color (tuple[float]): The RGB color of arrows. + """ + try: + import numpy as np # pylint: disable=import-outside-toplevel + + import taichi as ti # pylint: disable=import-outside-toplevel + except ImportError: + raise RuntimeError("Can't import taichi and/or numpy.") + v_np = vector_field.to_numpy() + v_norm = np.linalg.norm(v_np, axis=-1) + v_max = np.max(v_norm) + nx, ny = vector_field.shape + N = 6 * nx * ny + points = ti.Vector.field(3, dtype=ti.f32) # End points for arrows + fb = ti.FieldsBuilder() # Prepare to destroy the points in memory + fb.dense(ti.i, N).place(points) + fb_snode_tree = fb.finalize() + + @ti.kernel + def cal_lines_points(): + for i, j in ti.ndrange(nx // arrow_spacing + 1, ny // arrow_spacing + 1): + i = i * arrow_spacing if i < nx // arrow_spacing else nx - 1 + j = j * arrow_spacing if j < ny // arrow_spacing else ny - 1 + linear_id = (i + j * nx) * 6 # 6 because an arrow needs 6 end points + # Begin point of the arrow + x = i / (nx - 1) + y = j / (ny - 1) + points[linear_id] = ti.Vector([x, y, 0]) + # End point of the arrow + scale_factor = scale / v_max + dx, dy = scale_factor * vector_field[i, j] + points[linear_id + 1] = ti.Vector([x + dx, y + dy, 0]) + # The tip segments + line_angle = ti.atan2(dy, dx) + tip_angle_radians = 30 / 180 * 3.14 + line_length = ti.sqrt(dx**2 + dy**2) + tip_length = line_length * 0.2 + angle1 = line_angle + 3.14 - tip_angle_radians + angle2 = line_angle - 3.14 + tip_angle_radians + points[linear_id + 2] = ti.Vector([x + dx, y + dy, 0]) + points[linear_id + 3] = ti.Vector( + [ + x + dx + tip_length * ti.cos(angle1), + y + dy + tip_length * ti.sin(angle1), + 0, + ] + ) + points[linear_id + 4] = ti.Vector([x + dx, y + dy, 0]) + points[linear_id + 5] = ti.Vector( + [ + x + dx + tip_length * ti.cos(angle2), + y + dy + tip_length * ti.sin(angle2), + 0, + ] + ) + + cal_lines_points() + self.lines(points, color=color, width=width) + fb_snode_tree.destroy() + + def scene(self, scene): + """Draw a 3D scene on the canvas + + Args: + scene (:class:`~taichi.ui.Scene`): an instance of :class:`~taichi.ui.Scene`. + """ + # FIXME: (penguinliong) Add a point light to ensure the allocation of light source SSBO. + scene.point_light((0.0, 0.0, 0.0), (0.0, 0.0, 0.0)) + if isinstance(scene, SceneV2): + self.canvas.scene_v2(scene.scene) # pass in PySceneV2 + else: + self.canvas.scene(scene.scene) # pass in PyScene diff --git a/local_packages/taichi/ui/constants.py b/local_packages/taichi/ui/constants.py new file mode 100644 index 0000000..f610e23 --- /dev/null +++ b/local_packages/taichi/ui/constants.py @@ -0,0 +1,22 @@ +"""Key constants for :mod:`~taichi.ui`""" + +SHIFT = "Shift" +ALT = "Alt" +CTRL = "Control" +ESCAPE = "Escape" +RETURN = "Return" +TAB = "Tab" +BACKSPACE = "BackSpace" +SPACE = " " +UP = "Up" +DOWN = "Down" +LEFT = "Left" +RIGHT = "Right" +CAPSLOCK = "CapsLock" +LMB = "LMB" +MMB = "MMB" +RMB = "RMB" + +# Event types +PRESS = "Press" +RELEASE = "Release" diff --git a/local_packages/taichi/ui/gui.py b/local_packages/taichi/ui/gui.py new file mode 100644 index 0000000..1f47496 --- /dev/null +++ b/local_packages/taichi/ui/gui.py @@ -0,0 +1,1016 @@ +import math +import numbers +import os + +import numpy as np +import taichi.lang +from taichi._kernels import tensor_to_image, vector_to_fast_image, vector_to_image +from taichi._lib import core as _ti_core +from taichi.lang.field import Field, ScalarField + +import taichi as ti + + +# For window creation and drawing in the original ti.GUI system. +class GUI: + """Taichi Graphical User Interface class. + + Args: + name (str, optional): The name of the GUI to be constructed. + Default is 'Taichi'. + res (Union[int, List[int]], optional): The resolution of created + GUI. Default is 512*512. If `res` is scalar, then width will be equal to height. + background_color (int, optional): The background color of created GUI. + Default is 0x000000. + show_gui (bool, optional): Specify whether to render the GUI. Default is True. + fullscreen (bool, optional): Specify whether to render the GUI in + fullscreen mode. Default is False. + fast_gui (bool, optional): Specify whether to use fast gui mode of + Taichi. Default is False. + + Returns: + :class:`~taichi.misc.gui.GUI` :The created taichi GUI object. + + """ + + class Event: + """Class for holding a gui event. + + An event is represented by: + + + type (PRESS, MOTION, RELEASE) + + modifier (modifier keys like ctrl, shift, etc) + + pos (mouse position) + + key (event key) + + delta (for holding mouse wheel) + """ + + def __init__(self): + self.type = None + self.modifier = None + self.pos = None + self.key = None + self.delta = None + + # Event keys + SHIFT = "Shift" + ALT = "Alt" + CTRL = "Control" + ESCAPE = "Escape" + RETURN = "Return" + TAB = "Tab" + BACKSPACE = "BackSpace" + SPACE = " " + UP = "Up" + DOWN = "Down" + LEFT = "Left" + RIGHT = "Right" + CAPSLOCK = "Caps_Lock" + LMB = "LMB" + MMB = "MMB" + RMB = "RMB" + EXIT = "WMClose" + WHEEL = "Wheel" + MOVE = "Motion" + + # Event types + MOTION = _ti_core.KeyEvent.EType.Move + PRESS = _ti_core.KeyEvent.EType.Press + RELEASE = _ti_core.KeyEvent.EType.Release + + def __init__( + self, + name="Taichi", + res=512, + background_color=0x0, + show_gui=True, + fullscreen=False, + fast_gui=False, + ): + show_gui = self.get_bool_environ("TI_GUI_SHOW", show_gui) + fullscreen = self.get_bool_environ("TI_GUI_FULLSCREEN", fullscreen) + fast_gui = self.get_bool_environ("TI_GUI_FAST", fast_gui) + + self.name = name + if isinstance(res, numbers.Number): + res = (res, res) + self.res = res + self.fast_gui = fast_gui + if fast_gui: + self.img = np.ascontiguousarray(np.zeros(self.res[0] * self.res[1], dtype=np.uint32)) + fast_buf = self.img.ctypes.data + else: + # The GUI canvas uses RGBA for storage, therefore we need NxMx4 for an image. + self.img = np.ascontiguousarray(np.zeros(self.res + (4,), np.float32)) + fast_buf = 0 + self.core = _ti_core.GUI(name, core_veci(*res), show_gui, fullscreen, fast_gui, fast_buf) + self.canvas = self.core.get_canvas() + self.background_color = background_color + self.key_pressed = set() + self.event = None + self.frame = 0 + self.clear() + + def __enter__(self): + return self + + def __exit__(self, e_type, val, tb): + self.close() + + def __del__(self): + self.close() + + def close(self): + """Close this GUI. + + Example:: + + >>> while gui.running: + >>> if gui.get_event(gui.PRESS, ti.GUI.ESCAPE): + >>> gui.close() + >>> gui.show() + """ + self.core = None # dereference to call GUI::~GUI() + + # Widget system + + class WidgetValue: + """Class for maintaining id of gui widgets.""" + + def __init__(self, gui, wid): + self.gui = gui + self.wid = wid + + @property + def value(self): + return self.gui.core.get_widget_value(self.wid) + + @value.setter + def value(self, value): + self.gui.core.set_widget_value(self.wid, value) + + @staticmethod + def get_bool_environ(key, default): + """Get an environment variable and cast it to `bool`. + + Args: + key (str): The environment variable key. + default (bool): The default value. + Return: + The environment variable value cast to bool. \ + If the value is not found, directly return argument 'default'. + """ + if key not in os.environ: + return default + return bool(int(os.environ[key])) + + def slider(self, text, minimum, maximum, step=1): + """Creates a slider object on canvas to be manipulated with. + + Args: + text (str): The title of slider. + minimum (int, float): The minimum value of slider. + maximum (int, float): The maximum value of slider. + step (int, float): The changing step of slider. Optional and default to 1. + + Return: + :class:`~taichi.misc.gui.GUI.WidgetValue` :The created slider object. + """ + wid = self.core.make_slider(text, minimum, minimum, maximum, step) + return GUI.WidgetValue(self, wid) + + def label(self, text): + """Creates a label object on canvas. + + Args: + text (str): The title of label. + + Return: + :class:`~taichi.misc.gui.GUI.WidgetValue` :The created label object. + + """ + wid = self.core.make_label(text, 0) + return GUI.WidgetValue(self, wid) + + def button(self, text, event_name=None): + """Create a button object on canvas to be manipulated with. + + Args: + text (str): The title of button. + event_name (str, optional): The event name associated with button. + Default is WidgetButton_{text} + + Return: + The event name associated with created button. + + """ + event_name = event_name or f"WidgetButton_{text}" + self.core.make_button(text, event_name) + return event_name + + # Drawing system + + def clear(self, color=None): + """Clears the canvas with the color provided. + + Args: + color (int, optional): Specify the color to clear the canvas. Default + is the background color of GUI. + + """ + if color is None: + color = self.background_color + self.canvas.clear(color) + + def cook_image(self, img): + """Converts an img to range [0, 1] for display. + + The input image is stored in a `numpy.ndarray`, if it's dtype + is `int` it will be rescaled and mapped into range [0, 1]. If + the dtype is `float` it will be directly casted to 32-bit float type. + """ + if img.dtype in [np.uint8, np.uint16, np.uint32, np.uint64]: + img = img.astype(np.float32) * (1 / np.iinfo(img.dtype).max) + elif img.dtype in [np.float16, np.float32, np.float64]: + img = img.astype(np.float32) + else: + raise ValueError(f"Data type {img.dtype} not supported in GUI.set_image") + + if len(img.shape) == 2: + img = img[..., None] + + if img.shape[2] == 1: + img = img + np.zeros((1, 1, 4), np.float32) + if img.shape[2] == 3: + zeros = np.zeros((img.shape[0], img.shape[1], 1), np.float32) + img = np.concatenate([img, zeros], axis=2) + if img.shape[2] == 2: + zeros = np.zeros((img.shape[0], img.shape[1], 2), np.float32) + img = np.concatenate([img, zeros], axis=2) + + assert img.shape[2] == 4, "Image must be grayscale, RG, RGB or RGBA" + + res = img.shape[:2] + assert res == self.res, "Image resolution does not match GUI resolution" + return np.ascontiguousarray(img) + + def get_image(self): + """Return the window content as an `numpy.ndarray`. + + Returns: + :class:`numpy.array` :The image data in numpy contiguous array type. + """ + self.img = np.ascontiguousarray(self.img) + self.core.get_img(self.img.ctypes.data) + return self.img + + def set_image(self, img): + """Sets an image to display on the window. + + The image pixels are set from the values of `img[i, j]`, where `i` indicates + the horizontal coordinates (from left to right) and `j` the vertical coordinates + (from bottom to top). + + If the window size is `(x, y)`, then `img` must be one of: + - `ti.field(shape=(x, y))`, a gray-scale image + - `ti.field(shape=(x, y, 3))`, where `3` is for `(r, g, b)` channels + - `ti.field(shape=(x, y, 2))`, where `2` is for `(r, g)` channels + - `ti.Vector.field(3, shape=(x, y))` `(r, g, b)` channels on each component + - `ti.Vector.field(2, shape=(x, y))` `(r, g)` channels on each component + - `np.ndarray(shape=(x, y))` + - `np.ndarray(shape=(x, y, 3))` + - `np.ndarray(shape=(x, y, 2))` + + The data type of `img` must be one of: + - `uint8`, range `[0, 255]` + - `uint16`, range `[0, 65535]` + - `uint32`, range `[0, 4294967295]` + - `float32`, range `[0, 1]` + - `float64`, range `[0, 1]` + + Args: + img (Union[:class:`taichi.field`, `numpy.array`]): The color array \ + representing the image to be drawn. Support greyscale, RG, RGB, \ + and RGBA color representations. Its shape must match GUI resolution. + """ + + if self.fast_gui: + assert isinstance( + img, taichi.lang.matrix.MatrixField + ), "Only ti.Vector.field is supported in GUI.set_image when fast_gui=True" + assert img.shape == self.res, "Image resolution does not match GUI resolution" + assert img.n in [3, 4] and img.m == 1, "Only RGB images are supported in GUI.set_image when fast_gui=True" + assert img.dtype in [ + ti.f32, + ti.f64, + ti.u8, + ], "Only f32, f64, u8 are supported in GUI.set_image when fast_gui=True" + + vector_to_fast_image(img, self.img) + return + + if isinstance(img, ScalarField): + if _ti_core.is_integral(img.dtype) or len(img.shape) != 2: + # Images of uint is not optimized by xxx_to_image + self.img = self.cook_image(img.to_numpy()) + else: + # Type matched! We can use an optimized copy kernel. + assert img.shape == self.res, "Image resolution does not match GUI resolution" + tensor_to_image(img, self.img) + ti.sync() + + elif isinstance(img, taichi.lang.matrix.MatrixField): + if _ti_core.is_integral(img.dtype): + self.img = self.cook_image(img.to_numpy()) + else: + # Type matched! We can use an optimized copy kernel. + assert img.shape == self.res, "Image resolution does not match GUI resolution" + assert ( + img.n in [2, 3, 4] and img.m == 1 + ), "Only greyscale, RG, RGB or RGBA images are supported in GUI.set_image" + + vector_to_image(img, self.img) + ti.sync() + + elif isinstance(img, np.ndarray): + self.img = self.cook_image(img) + + else: + raise ValueError(f"GUI.set_image only takes a Taichi field or NumPy array, not {type(img)}") + + self.core.set_img(self.img.ctypes.data) + + def contour(self, scalar_field, normalize=False): + """Plot a contour view of a scalar field. + + The input scalar_field will be converted to a Numpy array first, and then plotted + by the Matplotlib colormap 'Plasma'. Notice this method will automatically perform + a bilinear interpolation on the field if the size of the field does not match with + the GUI window size. + + Args: + scalar_field (ti.field): The scalar field being plotted. + normalize (bool, Optional): Display the normalized scalar field if set to True. + Default is False. + """ + try: + from matplotlib import cm # pylint: disable=import-outside-toplevel + except ImportError: + raise RuntimeError( + "Failed to import Matplotlib. Please install it via /\ + `pip install matplotlib` first. " + ) + scalar_field_np = scalar_field.to_numpy() + if self.res != scalar_field_np.shape: + x, y = np.meshgrid(np.linspace(0, 1, self.res[1]), np.linspace(0, 1, self.res[0])) + x_idx = x * (scalar_field_np.shape[1] - 1) + y_idx = y * (scalar_field_np.shape[0] - 1) + x1 = x_idx.astype(int) + x2 = np.minimum(x1 + 1, scalar_field_np.shape[1] - 1) + y1 = y_idx.astype(int) + y2 = np.minimum(y1 + 1, scalar_field_np.shape[0] - 1) + array_y1 = scalar_field_np[y1, x1] * (1 - (x_idx - x1)) * (1 - (y_idx - y1)) + scalar_field_np[y1, x2] * ( + x_idx - x1 + ) * (1 - (y_idx - y1)) + array_y2 = scalar_field_np[y2, x1] * (1 - (x_idx - x1)) * (y_idx - y1) + scalar_field_np[y2, x2] * ( + x_idx - x1 + ) * (y_idx - y1) + output = array_y1 + array_y2 + else: + output = scalar_field_np + if normalize: + scalar_max, scalar_min = np.max(output), np.min(output) + output = (output - scalar_min) / (scalar_max - scalar_min) + self.set_image(cm.plasma(output)) + + def circle(self, pos, color=0xFFFFFF, radius=1): + """Draws a circle on canvas. + + Args: + pos (Union[List[int], numpy.array]): The position of the circle. + color (int, Optional): The color of the circle. Default is 0xFFFFFF. + radius (Number, Optional): The radius of the circle in pixel. Default is 1. + """ + self.canvas.circle_single(pos[0], pos[1], color, radius) + + def circles(self, pos, radius=1, color=0xFFFFFF, palette=None, palette_indices=None): + """Draws a list of circles on canvas. + + Args: + pos (numpy.array): The positions of the circles. + radius (Union[Number, numpy.array], optional): The radius of the circles in pixel. \ + Can be either a number, which will be applied to all circles, or a 1D NumPy array of the same length as `pos`. \ + The default is 1. + color (int, optional): The color of the circles. Default is 0xFFFFFF. + palette (list[int], optional): The List of colors from which to + choose to draw. Default is None. + palette_indices (Union[list[int], ti.field, numpy.array], optional): + The List of indices that choose color from palette for each + circle. Shape must match pos. Default is None. + + """ + n = pos.shape[0] + if len(pos.shape) == 3: + assert pos.shape[2] == 1 + pos = pos[:, :, 0] + + assert pos.shape == (n, 2) + pos = np.ascontiguousarray(pos.astype(np.float32)) + # Note: do not use "pos = int(pos.ctypes.data)" here + # Otherwise pos will get garbage collected by Python + # and the pointer to its data becomes invalid + pos_ptr = int(pos.ctypes.data) + + if isinstance(color, np.ndarray): + assert color.shape == (n,) + color = np.ascontiguousarray(color.astype(np.uint32)) + color_array = int(color.ctypes.data) + color_single = 0 + elif isinstance(color, int): + color_array = 0 + color_single = color + else: + raise ValueError("Color must be an ndarray or int (e.g., 0x956333)") + + if palette is not None: + assert palette_indices is not None, "palette must be used together with palette_indices" + + if isinstance(palette_indices, Field): + ind_int = palette_indices.to_numpy().astype(np.uint32) + elif isinstance(palette_indices, list) or isinstance(palette_indices, np.ndarray): + ind_int = np.array(palette_indices).astype(np.uint32) + else: + try: + ind_int = np.array(palette_indices) + except: + raise TypeError("palette_indices must be a type that can be converted to numpy.ndarray") + + assert issubclass(ind_int.dtype.type, np.integer), "palette_indices must be an integer array" + assert ind_int.shape == (n,), "palette_indices must be in 1-d shape with shape (num_particles, )" + assert min(ind_int) >= 0, "the min of palette_indices must not be less than zero" + assert max(ind_int) < len(palette), "the max of palette_indices must not exceed the length of palette" + color_array = np.array(palette, dtype=np.uint32)[ind_int] + color_array = np.ascontiguousarray(color_array) + color_array = color_array.ctypes.data + + if isinstance(radius, np.ndarray): + assert radius.shape == (n,) + radius = np.ascontiguousarray(radius.astype(np.float32)) + radius_array = int(radius.ctypes.data) + radius_single = 0 + elif isinstance(radius, numbers.Number): + radius_array = 0 + radius_single = radius + else: + raise ValueError("Radius must be an ndarray or float (e.g., 0.4)") + + self.canvas.circles_batched(n, pos_ptr, color_single, color_array, radius_single, radius_array) + + def triangles(self, a, b, c, color=0xFFFFFF): + """Draws a list of triangles on canvas. + + Args: + a (numpy.array): The positions of the first points of triangles. + b (numpy.array): The positions of the second points of triangles. + c (numpy.array): The positions of the third points of triangles. + color (Union[int, numpy.array], optional): The color or colors of triangles. + Can be either a single color or a list of colors whose shape matches + the shape of a & b & c. Default is 0xFFFFFF. + + """ + assert a.shape == b.shape + assert a.shape == c.shape + n = a.shape[0] + if len(a.shape) == 3: + assert a.shape[2] == 1 + a = a[:, :, 0] + b = b[:, :, 0] + c = c[:, :, 0] + + assert a.shape == (n, 2) + a = np.ascontiguousarray(a.astype(np.float32)) + b = np.ascontiguousarray(b.astype(np.float32)) + c = np.ascontiguousarray(c.astype(np.float32)) + # Note: do not use "a = int(a.ctypes.data)" here + # Otherwise a will get garbage collected by Python + # and the pointer to its data becomes invalid + a_ptr = int(a.ctypes.data) + b_ptr = int(b.ctypes.data) + c_ptr = int(c.ctypes.data) + + if isinstance(color, np.ndarray): + assert color.shape == (n,) + color = np.ascontiguousarray(color.astype(np.uint32)) + color_array = int(color.ctypes.data) + color_single = 0 + elif isinstance(color, int): + color_array = 0 + color_single = color + else: + raise ValueError('"color" must be an ndarray or int (e.g., 0x956333)') + + self.canvas.triangles_batched(n, a_ptr, b_ptr, c_ptr, color_single, color_array) + + def triangle(self, a, b, c, color=0xFFFFFF): + """Draws a single triangle on canvas. + + Args: + a (List[Number]): The position of the first point of triangle. Shape must be 2. + b (List[Number]): The position of the second point of triangle. Shape must be 2. + c (List[Number]): The position of the third point of triangle. Shape must be 2. + color (int, optional): The color of the triangle. Default is 0xFFFFFF. + """ + self.canvas.triangle_single(a[0], a[1], b[0], b[1], c[0], c[1], color) + + def lines(self, begin, end, radius=1, color=0xFFFFFF): + """Draw a list of lines on canvas. + + Args: + begin (numpy.array): The positions of one end of lines. + end (numpy.array): The positions of the other end of lines. + radius (Union[Number, numpy.array], optional): The width of lines. + Can be either a single width or a list of width whose shape matches + the shape of begin & end. Default is 1. + color (Union[int, numpy.array], optional): The color or colors of lines. + Can be either a single color or a list of colors whose shape matches + the shape of begin & end. Default is 0xFFFFFF. + + """ + assert begin.shape == end.shape + n = begin.shape[0] + if len(begin.shape) == 3: + assert begin.shape[2] == 1 + begin = begin[:, :, 0] + end = end[:, :, 0] + + assert begin.shape == (n, 2) + begin = np.ascontiguousarray(begin.astype(np.float32)) + end = np.ascontiguousarray(end.astype(np.float32)) + # Note: do not use "begin = int(begin.ctypes.data)" here + # Otherwise begin will get garbage collected by Python + # and the pointer to its data becomes invalid + begin_ptr = int(begin.ctypes.data) + end_ptr = int(end.ctypes.data) + + if isinstance(color, np.ndarray): + assert color.shape == (n,) + color = np.ascontiguousarray(color.astype(np.uint32)) + color_array = int(color.ctypes.data) + color_single = 0 + elif isinstance(color, int): + color_array = 0 + color_single = color + else: + raise ValueError("Color must be an ndarray or int (e.g., 0x956333)") + + if isinstance(radius, np.ndarray): + assert radius.shape == (n,) + radius = np.ascontiguousarray(radius.astype(np.float32)) + radius_array = int(radius.ctypes.data) + radius_single = 0 + elif isinstance(radius, numbers.Number): + radius_array = 0 + radius_single = radius + else: + raise ValueError("Radius must be an ndarray or float (e.g., 0.4)") + + self.canvas.paths_batched( + n, + begin_ptr, + end_ptr, + color_single, + color_array, + radius_single, + radius_array, + ) + + def line(self, begin, end, radius=1, color=0xFFFFFF): + """Draws a single line on canvas. + + Args: + begin (List[Number]): The position of one end of line. Shape must be 2. + end (List[Number]): The position of the other end of line. Shape must be 2. + radius (Number, optional): The width of line. Default is 1. + color (int, optional): The color of line. Default is 0xFFFFFF. + + """ + self.canvas.path_single(begin[0], begin[1], end[0], end[1], color, radius) + + @staticmethod + def _arrow_to_lines(orig, major, tip_scale=0.2, angle=45): + angle = math.radians(180 - angle) + c, s = math.cos(angle), math.sin(angle) + minor1 = np.array([major[:, 0] * c - major[:, 1] * s, major[:, 0] * s + major[:, 1] * c]).swapaxes(0, 1) + minor2 = np.array([major[:, 0] * c + major[:, 1] * s, -major[:, 0] * s + major[:, 1] * c]).swapaxes(0, 1) + end = orig + major + return [ + (orig, end), + (end, end + minor1 * tip_scale), + (end, end + minor2 * tip_scale), + ] + + def arrows(self, orig, direction, radius=1, color=0xFFFFFF, **kwargs): + """Draw a list arrows on canvas. + + Args: + orig (numpy.array): The positions where arrows start. + direction (numpy.array): The directions where arrows point to. + radius (Union[Number, np.array], optional): The width of arrows. Default is 1. + color (Union[int, np.array], optional): The color or colors of arrows. Default is 0xffffff. + + """ + for begin, end in self._arrow_to_lines(orig, direction, **kwargs): + self.lines(begin, end, radius, color) + + def arrow(self, orig, direction, radius=1, color=0xFFFFFF, **kwargs): + """Draws a single arrow on canvas. + + Args: + orig (List[Number]): The position where arrow starts. Shape must be 2. + direction (List[Number]): The direction where arrow points to. Shape must be 2. + radius (Number, optional): The width of arrow. Default is 1. + color (int, optional): The color of arrow. Default is 0xFFFFFF. + + """ + orig = np.array([orig]) + direction = np.array([direction]) + for begin, end in self._arrow_to_lines(orig, direction, **kwargs): + self.line(begin[0], end[0], radius, color) + + def rect(self, topleft, bottomright, radius=1, color=0xFFFFFF): + """Draws a single rectangle on canvas. + + Args: + topleft (List[Number]): The position of the topleft corner of rectangle. + Shape must be 2. + bottomright (List[Number]): The position of the bottomright corner + of rectangle. Shape must be 2. + radius (Number, optional): The width of rectangle's sides. Default is 1. + color (int, optional): The color of rectangle. Default is 0xFFFFFF. + + """ + a = topleft[0], topleft[1] + b = bottomright[0], topleft[1] + c = bottomright[0], bottomright[1] + d = topleft[0], bottomright[1] + self.line(a, b, radius, color) + self.line(b, c, radius, color) + self.line(c, d, radius, color) + self.line(d, a, radius, color) + + def text(self, content, pos, font_size=15, color=0xFFFFFF): + """Draws texts on canvas. + + Args: + content (str): The text to be drawn on canvas. + pos (List[Number]): The position where the text is to be put. + font_size (Number, optional): The font size of the text. + color (int, optional): The color of the text. Default is 0xFFFFFF. + + """ + + # TODO: refactor Canvas::text + font_size = float(font_size) + pos = core_vec(*pos) + r, g, b = hex_to_rgb(color) + color = core_vec(r, g, b, 1) + self.canvas.text(content, pos, font_size, color) + + @staticmethod + def _make_field_base(w, h, bound): + x = np.linspace(bound / w, 1 - bound / w, w) + y = np.linspace(bound / h, 1 - bound / h, h) + base = np.array(np.meshgrid(x, y)) + base = base.swapaxes(0, 1).swapaxes(1, 2).swapaxes(0, 1) + return base.reshape(w * h, 2) + + def point_field(self, radius, color=0xFFFFFF, bound=0.5): + """Draws a field of points on canvas. + + Args: + radius (np.array): The pattern and radius of the field of points. + color (Union[int, np.array], optional): The color or colors of points. + Default is 0xFFFFFF. + bound (Number, optional): The boundary of the field. Default is 0.5. + + """ + assert len(radius.shape) == 2 + base = self._make_field_base(radius.shape[0], radius.shape[1], bound) + radius = radius.reshape(radius.shape[0] * radius.shape[1]) + self.circles(base, radius=radius, color=color) + + def arrow_field(self, direction, radius=1, color=0xFFFFFF, bound=0.5, **kwargs): + """Draw a field of arrows on canvas. + + Args: + direction (np.array): The pattern and direction of the field of arrows. + color (Union[int, np.array], optional): The color or colors of arrows. + Default is 0xFFFFFF. + bound (Number, optional): The boundary of the field. Default is 0.5. + + """ + assert len(direction.shape) == 3 + assert direction.shape[2] == 2 + base = self._make_field_base(direction.shape[0], direction.shape[1], bound) + direction = direction.reshape(direction.shape[0] * direction.shape[1], 2) + self.arrows(base, direction, radius=radius, color=color, **kwargs) + + def vector_field(self, vector_field, arrow_spacing=5, color=0xFFFFFF): + """Display a vector field on canvas. + + Args: + vector_field (ti.Vector.field): The vector field being displayed. + arrow_spacing (int, optional): The spacing between vectors. + color (Union[int, np.array], optional): The color of vectors. + + """ + v_np = vector_field.to_numpy() + v_norm = np.linalg.norm(v_np, axis=-1) + nx, ny, ndim = v_np.shape + max_magnitude = np.max(v_norm) # Find the largest vector magnitude + + # The largest vector should occupy 10% of the window + scale_factor = 0.1 / (max_magnitude + 1e-16) + + x = np.arange(0, 1, arrow_spacing / nx) + y = np.arange(0, 1, arrow_spacing / ny) + X, Y = np.meshgrid(x, y) + begin = np.dstack((X, Y)).reshape(-1, 2, order="F") + incre = (v_np[::arrow_spacing, ::arrow_spacing] * scale_factor).reshape(-1, 2, order="C") + self.arrows(orig=begin, direction=incre, radius=1, color=color) + + def show(self, file=None): + """Shows the frame content in the gui window, or save the content to an + image file. + + Args: + file (str, optional): output filename. The default is `None`, and + the frame content is displayed in the gui window. If it's a valid + image filename the frame will be saved as the specified image. + """ + self.core.update() + if file: + self.core.screenshot(file) + self.frame += 1 + self.clear() + + # Event system + + class EventFilter: + """A set to store detected user events.""" + + def __init__(self, *e_filter): + self.filter = set() + for ent in e_filter: + if isinstance(ent, (list, tuple)): + e_type, key = ent + ent = (e_type, key) + self.filter.add(ent) + + def match(self, e): + """Check if a specified event `e` is among the detected events.""" + if (e.type, e.key) in self.filter: + return True + if e.type in self.filter: + return True + if e.key in self.filter: + return True + return False + + def has_key_event(self): + """Check if there is any key event registered. + + Returns: + bool: whether or not there is any key event registered. + """ + return self.core.has_key_event() + + def get_event(self, *e_filter): + """Checks if the specified event is triggered. + + Args: + *e_filter (ti.GUI.EVENT): The specific event to be checked. + + Returns: + bool: whether or not the specified event is triggered. + """ + for e in self.get_events(*e_filter): + self.event = e + return True + else: + return False + + def get_events(self, *e_filter): + """Gets a list of events that are triggered. + + Args: + *e_filter (List[ti.GUI.EVENT]): The type of events to be filtered. + + Returns: + :class:`~taichi.misc.gui.GUI.EVENT`: A list of events that are triggered. + """ + e_filter = e_filter and GUI.EventFilter(*e_filter) or None + + while True: + if not self.has_key_event(): + break + e = self.get_key_event() + if e_filter is None or e_filter.match(e): # pylint: disable=E1101 + yield e + + def get_key_event(self): + """Gets keyboard triggered event. + + Returns: + :class:`~taichi.misc.gui.GUI.EVENT`: The keyboard triggered event. + """ + self.core.wait_key_event() + + e = GUI.Event() + event = self.core.get_key_event_head() + + e.type = event.type + e.key = event.key + e.pos = self.core.canvas_untransform(event.pos) + e.pos = (e.pos[0], e.pos[1]) + e.modifier = [] + + if e.key == GUI.WHEEL: + e.delta = event.delta + else: + e.delta = (0, 0) + + for mod in ["Shift", "Alt", "Control"]: + if self.is_pressed(mod): + e.modifier.append(mod) + + if e.type == GUI.PRESS: + self.key_pressed.add(e.key) + else: + self.key_pressed.discard(e.key) + + self.core.pop_key_event_head() + return e + + def is_pressed(self, *keys): + """Checks if any key among a set of specified keys is pressed. + + Args: + *keys (Union[str, List[str]]): The keys to be listened to. + + Returns: + bool: whether or not any key among the specified keys is pressed. + """ + for key in keys: + if key in ["Shift", "Alt", "Control"]: + if key + "_L" in self.key_pressed or key + "_R" in self.key_pressed: + return True + if key in self.key_pressed: + return True + else: + return False + + def get_cursor_pos(self): + """Returns the current position of mouse as a pair of floats + in the range `[0, 1] x [0, 1]`. + + The origin of the coordinates system is located at the lower left + corner, with `+x` direction points to the right, and `+y` direcntion + points upward. + + Returns: + The current position of mouse. + """ + pos = self.core.get_cursor_pos() + return pos[0], pos[1] + + @property + def running(self): + """Returns whether this gui is running or not. + + Returns: + bool: whether this gui is running or not. + """ + return not self.core.should_close + + @running.setter + def running(self, value): + """Sets the running status of this gui. `True` for running + and `False` for stop. + + Args: + value (bool): `True/False` for running/stop. + """ + if value: + self.core.should_close = 0 + elif not self.core.should_close: + self.core.should_close = 1 + + @property + def fps_limit(self): + """Gets the maximum fps of this gui. + + Returns: + int: the maximum fps. + """ + if self.core.frame_delta_limit == 0: + return None + return 1 / self.core.frame_delta_limit + + @fps_limit.setter + def fps_limit(self, value): + """Set the maximum fps for the gui. This is the maximum number of + frames that the gui can show in one second. + + Args: + value (int): maximum fps. + """ + if value is None: + self.core.frame_delta_limit = 0 + else: + self.core.frame_delta_limit = 1 / value + + +def rgb_to_hex(c): + """Converts rgb color format to hex color format. + + Args: + c (List[int]): The rgb representation of color. + + Returns: + The hex representation of color. + """ + + def to255(x): + return np.clip(np.int32(x * 255), 0, 255) + + return (to255(c[0]) << 16) + (to255(c[1]) << 8) + to255(c[2]) + + +def hex_to_rgb(color): + """Converts hex color format to rgb color format. + + Args: + color (int): The hex representation of color. + + Returns: + The rgb representation of color. + """ + r, g, b = (color >> 16) & 0xFF, (color >> 8) & 0xFF, color & 0xFF + return r / 255, g / 255, b / 255 + + +def core_veci(*args): + if isinstance(args[0], _ti_core.Vector2i): + return args[0] + if isinstance(args[0], _ti_core.Vector3i): + return args[0] + if isinstance(args[0], tuple): + args = tuple(*args) + if len(args) == 2: + return _ti_core.Vector2i(int(args[0]), int(args[1])) + if len(args) == 3: + return _ti_core.Vector3i(int(args[0]), int(args[1]), int(args[2])) + if len(args) == 4: + return _ti_core.Vector4i(int(args[0]), int(args[1]), int(args[2]), int(args[3])) + assert False, type(args[0]) + + +def core_vec(*args): + if isinstance(args[0], _ti_core.Vector2f): + return args[0] + if isinstance(args[0], _ti_core.Vector3f): + return args[0] + if isinstance(args[0], _ti_core.Vector4f): + return args[0] + if isinstance(args[0], _ti_core.Vector2d): + return args[0] + if isinstance(args[0], _ti_core.Vector3d): + return args[0] + if isinstance(args[0], _ti_core.Vector4d): + return args[0] + if isinstance(args[0], tuple): + args = tuple(*args) + if _ti_core.get_default_float_size() == 4: + if len(args) == 2: + return _ti_core.Vector2f(float(args[0]), float(args[1])) + if len(args) == 3: + return _ti_core.Vector3f(float(args[0]), float(args[1]), float(args[2])) + if len(args) == 4: + return _ti_core.Vector4f(float(args[0]), float(args[1]), float(args[2]), float(args[3])) + assert False, type(args[0]) + else: + if len(args) == 2: + return _ti_core.Vector2d(float(args[0]), float(args[1])) + if len(args) == 3: + return _ti_core.Vector3d(float(args[0]), float(args[1]), float(args[2])) + if len(args) == 4: + return _ti_core.Vector4d(float(args[0]), float(args[1]), float(args[2]), float(args[3])) + assert False, type(args[0]) + + +__all__ = [ + "GUI", + "rgb_to_hex", + "hex_to_rgb", +] diff --git a/local_packages/taichi/ui/imgui.py b/local_packages/taichi/ui/imgui.py new file mode 100644 index 0000000..570a815 --- /dev/null +++ b/local_packages/taichi/ui/imgui.py @@ -0,0 +1,118 @@ +from contextlib import contextmanager + + +class Gui: + """For declaring IMGUI components in a :class:`taichi.ui.Window` + created by the GGUI system. + + Args: + gui: reference to a `PyGui`. + """ + + def __init__(self, gui) -> None: + self.gui = gui + + @contextmanager + def sub_window(self, name, x, y, width, height): + """Creating a context manager for subwindow. + + Note: + All args of this method should align with `begin`. + + Args: + x (float): The x-coordinate (between 0 and 1) of the top-left \ + corner of the subwindow, relative to the full window. + y (float): The y-coordinate (between 0 and 1) of the top-left \ + corner of the subwindow, relative to the full window. + width (float): The width of the subwindow relative to the full window. + height (float): The height of the subwindow relative to the full window. + + Example:: + + >>> with gui.sub_window(name, x, y, width, height) as g: + >>> g.text("Hello, World!") + """ + self.begin(name, x, y, width, height) + try: + yield self + finally: + self.end() + + def begin(self, name, x, y, width, height): + """Creates a subwindow that holds imgui widgets. + + All widget function calls (e.g. `text`, `button`) after the `begin` + and before the next `end` will describe the widgets within this subwindow. + + Args: + x (float): The x-coordinate (between 0 and 1) of the top-left \ + corner of the subwindow, relative to the full window. + y (float): The y-coordinate (between 0 and 1) of the top-left \ + corner of the subwindow, relative to the full window. + width (float): The width of the subwindow relative to the full window. + height (float): The height of the subwindow relative to the full window. + """ + self.gui.begin(name, x, y, width, height) + + def end(self): + """End the description of the current subwindow.""" + self.gui.end() + + def text(self, text, color=None): + """Declares a line of text.""" + if color is None: + self.gui.text(text) + else: + self.gui.text_colored(text, color) + + def checkbox(self, text, old_value): + """Declares a checkbox, and returns whether or not it has been checked. + + Args: + text (str): a line of text to be shown next to the checkbox. + old_value (bool): whether the checkbox is currently checked. + """ + return self.gui.checkbox(text, old_value) + + def slider_int(self, text, old_value, minimum, maximum): + """Declares a slider, and returns its newest value. + + Args: + text (str): a line of text to be shown next to the slider + old_value (int) : the current value of the slider. + minimum (int): the minimum value of the slider. + maximum (int): the maximum value of the slider. + + Returns: + int: the updated value of the slider. + """ + return self.gui.slider_int(text, old_value, minimum, maximum) + + def slider_float(self, text, old_value, minimum, maximum): + """Declares a slider, and returns its newest value. + + Args: + text (str): a line of text to be shown next to the slider + old_value (float): the current value of the slider. + minimum (float): the minimum value of the slider. + maximum (float): the maximum value of the slider. + """ + return self.gui.slider_float(text, old_value, minimum, maximum) + + def color_edit_3(self, text, old_value): + """Declares a color edit palate. + + Args: + text (str): a line of text to be shown next to the palate. + old_value (Tuple[float]): the current value of the color, this \ + should be a tuple of floats in [0,1] that indicates RGB values. + """ + return self.gui.color_edit_3(text, old_value) + + def button(self, text): + """Declares a button, and returns whether or not it had just been clicked. + + Args: + text (str): a line of text to be shown next to the button. + """ + return self.gui.button(text) diff --git a/local_packages/taichi/ui/scene.py b/local_packages/taichi/ui/scene.py new file mode 100644 index 0000000..35bced6 --- /dev/null +++ b/local_packages/taichi/ui/scene.py @@ -0,0 +1,802 @@ +import warnings +import taichi +from taichi._lib import core as _ti_core +from taichi.lang.impl import field +from taichi.lang.kernel_impl import kernel +from taichi.lang.matrix import Vector +from taichi.lang.ops import atomic_add +from taichi.types.annotations import template +from taichi.types.primitive_types import f32 + +from .staging_buffer import ( + copy_to_vbo_scalar, + copy_to_vbo_vector, + copy_all_to_vbo, + copy_all_to_vbo_particle, + get_indices_field, + get_indices_field_v2, + get_transforms_field, + get_transforms_field_v2, + get_vbo_field, + get_vbo_field_v2, +) +from .utils import check_ggui_availability, get_field_info + +normals_field_cache = {} + + +def get_normals_field(vertices): + if vertices not in normals_field_cache: + N = vertices.shape[0] + normals = Vector.field(3, f32, shape=(N,)) + normal_weights = field(f32, shape=(N,)) + normals_field_cache[vertices] = (normals, normal_weights) + return (normals, normal_weights) + return normals_field_cache[vertices] + + +@kernel +def gen_normals_kernel(vertices: template(), normals: template()): + N = vertices.shape[0] + for i in range(N / 3): + a = vertices[i * 3] + b = vertices[i * 3 + 1] + c = vertices[i * 3 + 2] + n = (a - b).cross(a - c).normalized() + normals[i * 3] = n + normals[i * 3 + 1] = n + normals[i * 3 + 2] = n + + +@kernel +def gen_normals_kernel_indexed(vertices: template(), indices: template(), normals: template(), weights: template()): + num_triangles = indices.shape[0] // 3 + num_vertices = vertices.shape[0] + for i in range(num_vertices): + normals[i] = Vector([0.0, 0.0, 0.0]) + weights[i] = 0.0 + for i in range(num_triangles): + i_a = indices[i * 3] + i_b = indices[i * 3 + 1] + i_c = indices[i * 3 + 2] + a = vertices[i_a] + b = vertices[i_b] + c = vertices[i_c] + n = (a - b).cross(a - c).normalized() + atomic_add(normals[i_a], n) + atomic_add(normals[i_b], n) + atomic_add(normals[i_c], n) + atomic_add(weights[i_a], 1.0) + atomic_add(weights[i_b], 1.0) + atomic_add(weights[i_c], 1.0) + for i in range(num_vertices): + if weights[i] > 0.0: + normals[i] = normals[i] / weights[i] + + +def gen_normals(vertices, indices): + normals, weights = get_normals_field(vertices) + if indices is None: + gen_normals_kernel(vertices, normals) + else: + gen_normals_kernel_indexed(vertices, indices, normals, weights) + return normals + + +class SceneV2: + """The new 3D scene class, which can contain meshes, lines and particles, + and can be rendered on a canvas. + An instance of this class can be retrieved using the + get_scene() function of a taichi.ui.Window. + """ + + def __init__(self, scene) -> None: + check_ggui_availability() + self.scene = scene + + def set_camera(self, camera): + """Set the camera for this scene. + + Args: + camera (:class:`~taichi.ui.Camera`): A camera instance. + """ + self.scene.set_camera(camera.ptr) + + def lines( + self, + vertices, + width, + indices=None, + color=(0.5, 0.5, 0.5), + per_vertex_color=None, + vertex_offset: int = 0, + vertex_count: int = None, + index_offset: int = 0, + index_count: int = None, + ): + """Declare multi-lines inside the scene. + + Note that under current situation, for example, there you have 4 vertices, + vertices.shape[0] is 4. So there will be 2 lines, the first line's two points + are vertices[0] and vertices[1], and the second line's two points are + vertices[2] and vertices[3]. + + Args: + vertices: a taichi 3D Vector field, where each element indicate the + 3D location of points of lines. + width: the line width (maybe different on different systems). + indices: a taichi int field of shape (2 * #points), which indicate + the points indices of the lines. If this is None, then it is + assumed that the points are already arranged in lines order. + color: a global color of the mesh as 3 floats representing RGB values. + If `per_vertex_color` is provided, this is ignored. + per_vertex_color (Tuple[float]): a taichi 3D vector field, where each + element indicate the RGB color of the line. + vertex_offset (int, optional): + if 'indices' is provided, this refers to the value added to the vertex + index before indexing into the vertex buffer, else this refers to the + index of the first vertex to draw. + vertex_count (int, optional): + only available when `indices` is not provided, which is the number + of vertices to draw. There are 2 cases that we will change your + `vertex_count`. [1] If the `vertex_count` is an odd number, then we + will change it to `vertex_count` - 1. [2] If `vertex_offset` plus + `vertex_count` greater than vertices.shape[0], then we will reduce + `vertex_count` to no more than vertices.shape[0]. + index_offset (int, optional): + Only available when `indices` is provided, which is the base index + within the index buffer. + index_count (int, optional): + Only available when `indices` is provided, which is the number + of vertices to draw. + """ + if vertex_count is None: + vertex_count = vertices.shape[0] + if index_count is None: + if indices is None: + index_count = vertex_count + else: + index_count = indices.shape[0] + if vertex_count % 2: + print("Warning! Odd drawing count will be cut to neast even number") + vertex_count -= 1 + if vertex_count + vertex_offset > vertices.shape[0]: + print("Warning! Drawing count greater than shape will be cut") + vertex_count = vertices.shape[0] - vertex_offset + vertex_count -= vertex_count % 2 + has_per_vertex_color = per_vertex_color is not None + vbo = get_vbo_field_v2(vertices) + if isinstance(vertices, taichi.Field): + copy_all_to_vbo(vbo, vertices, 0, 0, per_vertex_color if has_per_vertex_color else 0) + else: + copy_to_vbo_vector(vbo, vertices, 0, 3, taichi.Vector([0, 0, 0])) + if has_per_vertex_color: + copy_to_vbo_vector(vbo, per_vertex_color, 8, 4, taichi.Vector([1, 1, 1, 1])) + vbo_info = get_field_info(vbo) + indices_ndarray = get_indices_field_v2(indices) if indices is not None else None + indices_info = get_field_info(indices_ndarray) + self.scene.lines( + vbo_info, + indices_info, + has_per_vertex_color, + color, + width, + index_count, + index_offset, + vertex_count, + vertex_offset, + ) + + def mesh( + self, + vertices, + indices=None, + normals=None, + color=(0.5, 0.5, 0.5), + per_vertex_color=None, + two_sided=False, + vertex_offset: int = 0, + vertex_count: int = None, + index_offset: int = 0, + index_count: int = None, + show_wireframe: bool = False, + ): + """Declare a mesh inside the scene. + + if you indicate the index_offset and index_count, the normals will also + be sliced by the args, and the shading resultes will not be affected. + (It is equal to make a part of the mesh visible) + + Args: + vertices: a taichi 3D Vector field, where each element indicate the + 3D location of a vertex. + indices: a taichi int field of shape (3 * #triangles), which indicate + the vertex indices of the triangles. If this is None, then it is + assumed that the vertices are already arranged in triangles order. + normals: a taichi 3D Vector field, where each element indicate the + normal of a vertex. If this is none, normals will be automatically + inferred from vertex positions. + color: a global color of the mesh as 3 floats representing RGB values. + If `per_vertex_color` is provided, this is ignored. + per_vertex_color (Tuple[float]): a taichi 3D vector field, where each + element indicate the RGB color of a vertex. + two_sided (bool): whether or not the triangles should be able to be + seen from both sides. + vertex_offset (int, optional): + if 'indices' is provided, this refers to the value added to the vertex + index before indexing into the vertex buffer, else this refers to the + index of the first vertex to draw. + vertex_count (int, optional): + only available when `indices` is not provided, which is the number + of vertices to draw. + index_offset (int, optional): + only available when `indices` is provided, which is the base index + within the index buffer. + index_count (int, optional): + only available when `indices` is provided, which is the the number + of vertices to draw. + show_wireframe (bool, optional): + turn on/off WareFrame mode. + """ + has_per_vertex_color = per_vertex_color is not None + if normals is None: + normals = gen_normals(vertices, indices) + if vertex_count is None: + vertex_count = vertices.shape[0] + if index_count is None: + if indices is None: + index_count = vertex_count # FIXME : Need to confirm + else: + index_count = indices.shape[0] + vbo = get_vbo_field_v2(vertices) + copy_all_to_vbo(vbo, vertices, normals, 0, per_vertex_color if has_per_vertex_color else 0) + vbo_info = get_field_info(vbo) + indices_ndarray = get_indices_field_v2(indices) if indices is not None else None + indices_info = get_field_info(indices_ndarray) + + self.scene.mesh( + vbo_info, + has_per_vertex_color, + indices_info, + color, + two_sided, + index_count, + index_offset, + vertex_count, + vertex_offset, + show_wireframe, + ) + + def mesh_instance( + self, + vertices, + indices=None, + normals=None, + color=(0.5, 0.5, 0.5), + per_vertex_color=None, + two_sided=False, + transforms=None, + instance_offset: int = 0, + instance_count: int = None, + vertex_offset: int = 0, + vertex_count: int = None, + index_offset: int = 0, + index_count: int = None, + show_wireframe: bool = False, + ): + """Declare mesh instances inside the scene. + + If transforms is given, then according to the shape of transforms, it will + draw mesh instances based on the transforms, and you can indicate which instance + to draw first. If you indicate the index_offset and index_count, the normals will also + be sliced by the args, and the shading resultes will not be affected. + (It is equal to make a part of the mesh visible) + + Args: + vertices: a taichi 3D Vector field, where each element indicate the + 3D location of a vertex. + indices: a taichi int field of shape (3 * #triangles), which indicate + the vertex indices of the triangles. If this is None, then it is + assumed that the vertices are already arranged in triangles order. + normals: a taichi 3D Vector field, where each element indicate the + normal of a vertex. If this is none, normals will be automatically + inferred from vertex positions. + color: a global color of the mesh as 3 floats representing RGB values. + If `per_vertex_color` is provided, this is ignored. + per_vertex_color (Tuple[float]): a taichi 3D vector field, where each + element indicate the RGB color of a vertex. + two_sided (bool): whether or not the triangles should be able to be + seen from both sides. + transforms (ti.Matrix.field, optional): + The Matrix must be 4x4 size with N instances, and data type should + be ti.f32, ti.i32, ti.u32. If None, then it behaves like raw mesh (no copy). + instance_offset (int, optional): + Default value is 0 which means no offset to show mesh instances. Otherwise, + the mesh instances will show from the `instance_offset`. + instance_count (int, optional): + The default value is None. If this parameter is not provided, instance_count = transforms.shape[0] - instance_offset. + vertex_offset (int, optional): + if 'indices' is provided, this refers to the value added to the vertex + index before indexing into the vertex buffer, else this refers to the + index of the first vertex to draw. + vertex_count (int, optional): + only available when `indices` is not provided, which is the number + of vertices to draw. + index_offset (int, optional): + only available when `indices` is provided, which is the base index + within the index buffer. + index_count (int, optional): + only available when `indices` is provided, which is the the number + of indices to draw. + show_wireframe (bool, optional): + turn on/off WareFrame mode. + """ + has_per_vertex_color = per_vertex_color is not None + if normals is None: + normals = gen_normals(vertices, indices) + if vertex_count is None: + vertex_count = vertices.shape[0] + if index_count is None: + if indices is None: + index_count = vertex_count + else: + index_count = indices.shape[0] + if transforms: + if transforms.m != 4 or transforms.n != 4: + raise Exception("Error! Transform matrix must be 4x4 shape") + if instance_count is None: + instance_count = transforms.shape[0] + else: + instance_count = 1 + + vbo = get_vbo_field_v2(vertices) + copy_all_to_vbo(vbo, vertices, normals, 0, per_vertex_color if has_per_vertex_color else 0) + vbo_info = get_field_info(vbo) + indices_ndarray = get_indices_field_v2(indices) if indices else None + indices_info = get_field_info(indices_ndarray) + transforms_ndarray = get_transforms_field_v2(transforms) if transforms else None + transform_info = get_field_info(transforms_ndarray) + self.scene.mesh_instance( + vbo_info, + has_per_vertex_color, + indices_info, + color, + two_sided, + transform_info, + instance_count, + instance_offset, + index_count, + index_offset, + vertex_count, + vertex_offset, + show_wireframe, + ) + + def particles( + self, + centers, + radius, + color=(0.5, 0.5, 0.5), + per_vertex_color=None, + per_vertex_radius=None, + index_offset: int = 0, + index_count: int = None, + ): + """Declare a set of particles within the scene. + + Args: + centers: a taichi 3D Vector field, where each element indicate the + 3D location of the center of a particle. + color: a global color for the particles as 3 floats representing RGB + values. If `per_vertex_color` is provided, this is ignored. + radius: a global radius for the particles as a float. + If 'per_vertex_radius' is provided, this is ignored. + per_vertex_color: a taichi 3D vector field, where each + element indicates the RGB color of a particle. + per_vertex_radius: a taichi float field, where each + element indicates the radius of a particle. + index_offset (int, optional): + the index of the first vertex to draw. + index_count (int, optional): + the number of vertices to draw. + """ + has_per_vertex_color = per_vertex_color is not None + has_per_vertex_radius = per_vertex_radius is not None + if index_count is None: + index_count = centers.shape[0] + # per_vertex_radius_vec3[i] = Vector([per_vertex_radius[i], 0., 0.]) + vbo = get_vbo_field_v2(centers) + + if isinstance(centers, taichi.Field): + copy_all_to_vbo_particle( + vbo, + centers, + per_vertex_radius if has_per_vertex_radius else 0, + per_vertex_color if has_per_vertex_color else 0, + ) + else: + copy_to_vbo_vector(vbo, centers, 0, 3, taichi.Vector([0, 0, 0])) + if has_per_vertex_radius: + copy_to_vbo_scalar(vbo, per_vertex_radius, 3) + if has_per_vertex_color: + copy_to_vbo_vector(vbo, per_vertex_color, 8, 4, taichi.Vector([1, 1, 1, 1])) + + vbo_info = get_field_info(vbo) + self.scene.particles( + vbo_info, has_per_vertex_color, has_per_vertex_radius, color, radius, index_count, index_offset + ) + + def point_light(self, pos, color): # pylint: disable=W0235 + """Set a point light in this scene. + + Args: + pos (list, tuple, :class:`~taichi.types.vector(3, float)`): + 3D vector for light position. + color (list, tuple, :class:`~taichi.types.vector(3, float)`): + (r, g, b) triple for the color of the light, in the range [0, 1]. + """ + self.scene.point_light(tuple(pos), tuple(color)) + + def ambient_light(self, color): + """Set the ambient color of this scene. + + Example:: + >>> scene.ambient_light([0.2, 0.2, 0.2]) + """ + self.scene.ambient_light(tuple(color)) + + +class Scene: + """The old 3D scene class, which can contain meshes, lines and particles, + and can be rendered on a canvas. + This scene class is to be instantiated directly and is deprecated. + Use the new SceneV2 class instead, which can be retrieved using the + get_scene() function of a taichi.ui.Window. + """ + + def __init__(self): + check_ggui_availability() + self.scene = _ti_core.PyScene() + warnings.warn( + "Instantiating ti.ui.Scene directly is deprecated, use the get_scene() function from a taichi.ui.Window object instead.", + DeprecationWarning, + ) + + def set_camera(self, camera): + """Set the camera for this scene. + + Args: + camera (:class:`~taichi.ui.Camera`): A camera instance. + """ + self.scene.set_camera(camera.ptr) + + def lines( + self, + vertices, + width, + indices=None, + color=(0.5, 0.5, 0.5), + per_vertex_color=None, + vertex_offset: int = 0, + vertex_count: int = None, + index_offset: int = 0, + index_count: int = None, + ): + """Declare multi-lines inside the scene. + + Note that under current situation, for example, there you have 4 vertices, + vertices.shape[0] is 4. So there will be 2 lines, the first line's two points + are vertices[0] and vertices[1], and the second line's two points are + vertices[2] and vertices[3]. + + Args: + vertices: a taichi 3D Vector field, where each element indicate the + 3D location of points of lines. + width: the line width (maybe different on different systems). + indices: a taichi int field of shape (2 * #points), which indicate + the points indices of the lines. If this is None, then it is + assumed that the points are already arranged in lines order. + color: a global color of the mesh as 3 floats representing RGB values. + If `per_vertex_color` is provided, this is ignored. + per_vertex_color (Tuple[float]): a taichi 3D vector field, where each + element indicate the RGB color of the line. + vertex_offset (int, optional): + if 'indices' is provided, this refers to the value added to the vertex + index before indexing into the vertex buffer, else this refers to the + index of the first vertex to draw. + vertex_count (int, optional): + only available when `indices` is not provided, which is the number + of vertices to draw. There are 2 cases that we will change your + `vertex_count`. [1] If the `vertex_count` is an odd number, then we + will change it to `vertex_count` - 1. [2] If `vertex_offset` plus + `vertex_count` greater than vertices.shape[0], then we will reduce + `vertex_count` to no more than vertices.shape[0]. + index_offset (int, optional): + Only available when `indices` is provided, which is the base index + within the index buffer. + index_count (int, optional): + Only available when `indices` is provided, which is the number + of vertices to draw. + """ + if vertex_count is None: + vertex_count = vertices.shape[0] + if index_count is None: + if indices is None: + index_count = vertex_count + else: + index_count = indices.shape[0] + if vertex_count % 2: + print("Warning! Odd drawing count will be cut to neast even number") + vertex_count -= 1 + if vertex_count + vertex_offset > vertices.shape[0]: + print("Warning! Drawing count greater than shape will be cut") + vertex_count = vertices.shape[0] - vertex_offset + vertex_count -= vertex_count % 2 + has_per_vertex_color = per_vertex_color is not None + vbo = get_vbo_field(vertices) + copy_all_to_vbo(vbo, vertices, 0, 0, per_vertex_color if has_per_vertex_color else 0) + vbo_info = get_field_info(vbo) + indices_ndarray = get_indices_field(indices) if indices is not None else None + indices_info = get_field_info(indices_ndarray) + self.scene.lines( + vbo_info, + indices_info, + has_per_vertex_color, + color, + width, + index_count, + index_offset, + vertex_count, + vertex_offset, + ) + + def mesh( + self, + vertices, + indices=None, + normals=None, + color=(0.5, 0.5, 0.5), + per_vertex_color=None, + two_sided=False, + vertex_offset: int = 0, + vertex_count: int = None, + index_offset: int = 0, + index_count: int = None, + show_wireframe: bool = False, + ): + """Declare a mesh inside the scene. + + if you indicate the index_offset and index_count, the normals will also + be sliced by the args, and the shading resultes will not be affected. + (It is equal to make a part of the mesh visible) + + Args: + vertices: a taichi 3D Vector field, where each element indicate the + 3D location of a vertex. + indices: a taichi int field of shape (3 * #triangles), which indicate + the vertex indices of the triangles. If this is None, then it is + assumed that the vertices are already arranged in triangles order. + normals: a taichi 3D Vector field, where each element indicate the + normal of a vertex. If this is none, normals will be automatically + inferred from vertex positions. + color: a global color of the mesh as 3 floats representing RGB values. + If `per_vertex_color` is provided, this is ignored. + per_vertex_color (Tuple[float]): a taichi 3D vector field, where each + element indicate the RGB color of a vertex. + two_sided (bool): whether or not the triangles should be able to be + seen from both sides. + vertex_offset (int, optional): + if 'indices' is provided, this refers to the value added to the vertex + index before indexing into the vertex buffer, else this refers to the + index of the first vertex to draw. + vertex_count (int, optional): + only available when `indices` is not provided, which is the number + of vertices to draw. + index_offset (int, optional): + only available when `indices` is provided, which is the base index + within the index buffer. + index_count (int, optional): + only available when `indices` is provided, which is the the number + of vertices to draw. + show_wireframe (bool, optional): + turn on/off WareFrame mode. + """ + has_per_vertex_color = per_vertex_color is not None + if normals is None: + normals = gen_normals(vertices, indices) + if vertex_count is None: + vertex_count = vertices.shape[0] + if index_count is None: + if indices is None: + index_count = vertex_count # FIXME : Need to confirm + else: + index_count = indices.shape[0] + vbo = get_vbo_field(vertices) + copy_all_to_vbo(vbo, vertices, normals, 0, per_vertex_color if has_per_vertex_color else 0) + vbo_info = get_field_info(vbo) + indices_ndarray = get_indices_field(indices) if indices is not None else None + indices_info = get_field_info(indices_ndarray) + + self.scene.mesh( + vbo_info, + has_per_vertex_color, + indices_info, + color, + two_sided, + index_count, + index_offset, + vertex_count, + vertex_offset, + show_wireframe, + ) + + def mesh_instance( + self, + vertices, + indices=None, + normals=None, + color=(0.5, 0.5, 0.5), + per_vertex_color=None, + two_sided=False, + transforms=None, + instance_offset: int = 0, + instance_count: int = None, + vertex_offset: int = 0, + vertex_count: int = None, + index_offset: int = 0, + index_count: int = None, + show_wireframe: bool = False, + ): + """Declare mesh instances inside the scene. + + If transforms is given, then according to the shape of transforms, it will + draw mesh instances based on the transforms, and you can indicate which instance + to draw first. If you indicate the index_offset and index_count, the normals will also + be sliced by the args, and the shading resultes will not be affected. + (It is equal to make a part of the mesh visible) + + Args: + vertices: a taichi 3D Vector field, where each element indicate the + 3D location of a vertex. + indices: a taichi int field of shape (3 * #triangles), which indicate + the vertex indices of the triangles. If this is None, then it is + assumed that the vertices are already arranged in triangles order. + normals: a taichi 3D Vector field, where each element indicate the + normal of a vertex. If this is none, normals will be automatically + inferred from vertex positions. + color: a global color of the mesh as 3 floats representing RGB values. + If `per_vertex_color` is provided, this is ignored. + per_vertex_color (Tuple[float]): a taichi 3D vector field, where each + element indicate the RGB color of a vertex. + two_sided (bool): whether or not the triangles should be able to be + seen from both sides. + transforms (ti.Matrix.field, optional): + The Matrix must be 4x4 size with N instances, and data type should + be ti.f32, ti.i32, ti.u32. If None, then it behaves like raw mesh (no copy). + instance_offset (int, optional): + Default value is 0 which means no offset to show mesh instances. Otherwise, + the mesh instances will show from the `instance_offset`. + instance_count (int, optional): + The default value is None. If this parameter is not provided, instance_count = transforms.shape[0] - instance_offset. + vertex_offset (int, optional): + if 'indices' is provided, this refers to the value added to the vertex + index before indexing into the vertex buffer, else this refers to the + index of the first vertex to draw. + vertex_count (int, optional): + only available when `indices` is not provided, which is the number + of vertices to draw. + index_offset (int, optional): + only available when `indices` is provided, which is the base index + within the index buffer. + index_count (int, optional): + only available when `indices` is provided, which is the the number + of indices to draw. + show_wireframe (bool, optional): + turn on/off WareFrame mode. + """ + has_per_vertex_color = per_vertex_color is not None + if normals is None: + normals = gen_normals(vertices, indices) + if vertex_count is None: + vertex_count = vertices.shape[0] + if index_count is None: + if indices is None: + index_count = vertex_count + else: + index_count = indices.shape[0] + if transforms: + if transforms.m != 4 or transforms.n != 4: + raise Exception("Error! Transform matrix must be 4x4 shape") + if instance_count is None: + instance_count = transforms.shape[0] + else: + instance_count = 1 + + vbo = get_vbo_field(vertices) + copy_all_to_vbo(vbo, vertices, normals, 0, per_vertex_color if has_per_vertex_color else 0) + vbo_info = get_field_info(vbo) + indices_ndarray = get_indices_field(indices) if indices else None + indices_info = get_field_info(indices_ndarray) + transforms_ndarray = get_transforms_field(transforms) if transforms else None + transform_info = get_field_info(transforms_ndarray) + self.scene.mesh_instance( + vbo_info, + has_per_vertex_color, + indices_info, + color, + two_sided, + transform_info, + instance_count, + instance_offset, + index_count, + index_offset, + vertex_count, + vertex_offset, + show_wireframe, + ) + + def particles( + self, + centers, + radius, + color=(0.5, 0.5, 0.5), + per_vertex_color=None, + per_vertex_radius=None, + index_offset: int = 0, + index_count: int = None, + ): + """Declare a set of particles within the scene. + + Args: + centers: a taichi 3D Vector field, where each element indicate the + 3D location of the center of a particle. + color: a global color for the particles as 3 floats representing RGB + values. If `per_vertex_color` is provided, this is ignored. + radius: a global radius for the particles as a float. + If 'per_vertex_radius' is provided, this is ignored. + per_vertex_color: a taichi 3D vector field, where each + element indicates the RGB color of a particle. + per_vertex_radius: a taichi float field, where each + element indicates the radius of a particle. + index_offset (int, optional): + the index of the first vertex to draw. + index_count (int, optional): + the number of vertices to draw. + """ + has_per_vertex_color = per_vertex_color is not None + has_per_vertex_radius = per_vertex_radius is not None + if index_count is None: + index_count = centers.shape[0] + # per_vertex_radius_vec3[i] = Vector([per_vertex_radius[i], 0., 0.]) + vbo = get_vbo_field(centers) + copy_all_to_vbo_particle( + vbo, + centers, + per_vertex_radius if has_per_vertex_radius else 0, + per_vertex_color if has_per_vertex_color else 0, + ) + vbo_info = get_field_info(vbo) + self.scene.particles( + vbo_info, has_per_vertex_color, has_per_vertex_radius, color, radius, index_count, index_offset + ) + + def point_light(self, pos, color): # pylint: disable=W0235 + """Set a point light in this scene. + + Args: + pos (list, tuple, :class:`~taichi.types.vector(3, float)`): + 3D vector for light position. + color (list, tuple, :class:`~taichi.types.vector(3, float)`): + (r, g, b) triple for the color of the light, in the range [0, 1]. + """ + self.scene.point_light(tuple(pos), tuple(color)) + + def ambient_light(self, color): + """Set the ambient color of this scene. + + Example:: + + >>> scene = ti.ui.Scene() + >>> scene.ambient_light([0.2, 0.2, 0.2]) + """ + self.scene.ambient_light(tuple(color)) diff --git a/local_packages/taichi/ui/staging_buffer.py b/local_packages/taichi/ui/staging_buffer.py new file mode 100644 index 0000000..6aabc1f --- /dev/null +++ b/local_packages/taichi/ui/staging_buffer.py @@ -0,0 +1,321 @@ +import numpy as np +from taichi.types import ndarray as ndarray_type +from taichi.lang import ops +from taichi.lang._texture import Texture +from taichi.lang.impl import ndarray +from taichi.lang.kernel_impl import kernel +from taichi.types.annotations import template +from taichi.types.primitive_types import f32, u8, u32 + +import taichi as ti + +vbo_field_cache = {} +depth_ndarray_cache = {} +indices_ndarray_cache = {} +transforms_ndarray_cache = {} + + +def get_depth_ndarray(window): + if window not in depth_ndarray_cache: + w, h = window.get_window_shape() + depth_arr = ndarray(dtype=ti.f32, shape=w * h) + depth_ndarray_cache[window] = depth_arr + return depth_ndarray_cache[window] + + +def get_vbo_field(vertices): + if vertices not in vbo_field_cache: + N = vertices.shape[0] + pos = 3 + normal = 3 + tex_coord = 2 + color = 4 + vertex_stride = pos + normal + tex_coord + color + vbo = np.ndarray((N, vertex_stride), dtype=np.float32) + vbo_field_cache[vertices] = vbo + return vbo + return vbo_field_cache[vertices] + + +def get_vbo_field_v2(vertices): + N = vertices.shape[0] + pos = 3 + normal = 3 + tex_coord = 2 + color = 4 + vertex_stride = pos + normal + tex_coord + color + vbo = np.ndarray((N, vertex_stride), dtype=np.float32) + return vbo + + +def get_indices_field(indices): + if isinstance(indices, np.ndarray): + return indices + indices_arr = indices.to_numpy() + indices_ndarray_cache[indices] = indices_arr + return indices_arr + + +def get_indices_field_v2(indices): + if isinstance(indices, np.ndarray): + return indices + indices_arr = indices.to_numpy() + return indices_arr + + +def get_transforms_field(transforms): + if isinstance(transforms, np.ndarray): + return transforms + transforms_arr = transforms.to_numpy() + transforms_ndarray_cache[transforms] = transforms_arr + return transforms_arr + + +def get_transforms_field_v2(transforms): + if isinstance(transforms, np.ndarray): + return transforms + transforms_arr = transforms.to_numpy() + return transforms_arr + + +@kernel +def copy_to_vbo_vector( + vbo: ndarray_type(element_dim=1), + attribute: ndarray_type(element_dim=1), + offset: template(), + size: template(), + default: template(), +): + for i in attribute: + vbo[i][offset : offset + size] = default + vbo[i][offset : offset + attribute.element_shape()[0]] = attribute[i] + + +@kernel +def copy_to_vbo_scalar( + vbo: ndarray_type(element_dim=1), + attribute: ndarray_type(element_dim=0), + offset: template(), +): + for i in attribute: + vbo[i][offset] = attribute[i] + + +@kernel +def copy_all_to_vbo( + vbo: ndarray_type(element_dim=1), + vertex: template(), + normal: template(), + texcoords: template(), + color: template(), +): + for i in vertex: + if ti.static(vertex.n == 3): + vbo[i][0:3] = vertex[i] + else: + vbo[i][0:2] = vertex[i] + vbo[i][3] = 0.0 + if ti.static(normal != 0): + vbo[i][3:6] = normal[i] + if ti.static(texcoords != 0): + vbo[i][6:8] = texcoords[i] + if ti.static(color != 0): + if ti.static(color.n == 3): + vbo[i][8:11] = color[i] + vbo[i][11] = 1.0 + else: + vbo[i][8:12] = color[i] + + +@kernel +def copy_all_to_vbo_particle( # The vbo copy function used for vertices of particles and circles + vbo: ndarray_type(element_dim=1), + vertex: template(), + radius: template(), + color: template(), +): + for i in vertex: + if ti.static(vertex.n == 3): + vbo[i][0:3] = vertex[i] + else: + vbo[i][0:2] = vertex[i] + vbo[i][3] = 0.0 + if ti.static(radius != 0): + vbo[i][3] = radius[i] + if ti.static(color != 0): + if ti.static(color.n == 3): + vbo[i][8:11] = color[i] + vbo[i][11] = 1.0 + else: + vbo[i][8:12] = color[i] + + +@ti.kernel +def copy_texture_to_rgba8( + src: ti.types.texture(num_dimensions=2), + dst: ti.types.ndarray(), + w: ti.i32, + h: ti.i32, +): + for i, j in ti.ndrange(w, h): + c = src.fetch(ti.Vector([i, j]), 0) + c = ops.max(0.0, ops.min(1.0, c)) + c = c * 255 + px = ti.cast(c, u32) + dst[i, j] = px[0] << 0 | px[1] << 8 | px[2] << 16 | px[3] << 24 + + +@ti.kernel +def copy_image_f32_to_rgba8( + src: ti.template(), + dst: ti.types.ndarray(), + num_components: ti.template(), + gray_scale: ti.template(), +): + for i, j in ti.ndrange(src.shape[0], src.shape[1]): + px = ti.Vector([0, 0, 0, 0xFF], dt=u32) + if ti.static(gray_scale): + c = 0.0 + c = src[i, j] + c = ops.max(0.0, ops.min(1.0, c)) + c = c * 255 + px[0] = px[1] = px[2] = ti.cast(c, u32) + else: + for k in ti.static(range(num_components)): + c = 0.0 + if ti.static(len(src.shape) == 3): + # 3D field source image + c = src[i, j, k] + else: + # 2D vector field source image + c = src[i, j][k] + c = ops.max(0.0, ops.min(1.0, c)) + c = c * 255 + px[k] = ti.cast(c, u32) + pack = px[0] << 0 | px[1] << 8 | px[2] << 16 | px[3] << 24 + dst[i, j] = pack + + +@ti.kernel +def copy_image_f32_to_rgba8_np( + src: ti.types.ndarray(), + dst: ti.types.ndarray(), + num_components: ti.template(), + gray_scale: ti.template(), +): + for I in ti.grouped(src): + i, j = I[0], I[1] + px = ti.Vector([0, 0, 0, 0xFF], dt=u32) + if ti.static(gray_scale): + c = 0.0 + c = src[i, j] + c = ops.max(0.0, ops.min(1.0, c)) + c = c * 255 + px[0] = px[1] = px[2] = ti.cast(c, u32) + else: + for k in ti.static(range(num_components)): + c = src[i, j, k] + c = ops.max(0.0, ops.min(1.0, c)) + c = c * 255 + px[k] = ti.cast(c, u32) + pack = px[0] << 0 | px[1] << 8 | px[2] << 16 | px[3] << 24 + dst[i, j] = pack + + +@ti.kernel +def copy_image_u8_to_rgba8( + src: ti.template(), + dst: ti.types.ndarray(), + num_components: ti.template(), + gray_scale: ti.template(), +): + for i, j in ti.ndrange(src.shape[0], src.shape[1]): + px = ti.Vector([0, 0, 0, 0xFF], dt=u32) + if ti.static(gray_scale): + px[0] = px[1] = px[2] = ti.cast(src[i, j], u32) + else: + for k in ti.static(range(num_components)): + if ti.static(len(src.shape) == 3): + # 3D field source image + px[k] = ti.cast(src[i, j, k], u32) + else: + # 2D vector field source image + px[k] = ti.cast(src[i, j][k], u32) + pack = px[0] << 0 | px[1] << 8 | px[2] << 16 | px[3] << 24 + dst[i, j] = pack + + +@ti.kernel +def copy_image_u8_to_rgba8_np( + src: ti.types.ndarray(), + dst: ti.types.ndarray(), + num_components: ti.template(), + gray_scale: ti.template(), +): + for I in ti.grouped(src): + i, j = I[0], I[1] + px = ti.Vector([0, 0, 0, 0xFF], dt=u32) + if ti.static(gray_scale): + px[0] = px[1] = px[2] = ti.cast(src[i, j], u32) + else: + for k in ti.static(range(num_components)): + px[k] = ti.cast(src[i, j, k], u32) + pack = px[0] << 0 | px[1] << 8 | px[2] << 16 | px[3] << 24 + dst[i, j] = pack + + +# ggui renderer always assumes the input image to be u8 RGBA +# if the user input is not in this format, a staging ti field is needed +image_field_cache = {} + + +def to_rgba8(image): + is_texture = isinstance(image, Texture) + is_grayscale = not hasattr(image, "n") and len(image.shape) == 2 + is_numpy = isinstance(image, np.ndarray) + is_non_grayscale_field = (hasattr(image, "n") and image.m == 1) or len(image.shape) == 3 + + if not is_texture and not is_grayscale and not is_numpy and not is_non_grayscale_field: + raise Exception( + "the input image needs to be either:\n" + "a Vector field (matrix with 1 column)\n" + "a 2D(grayscale)/3D field\n" + "a 2D(grayscale)/3D numpy ndarray\n" + "a texture" + ) + channels = 3 + + if not is_grayscale: + if len(image.shape) == 2: + channels = image.n + elif len(image.shape) == 3: + channels = image.shape[2] + else: + raise Exception("the shape of the image must be of the form (width,height) or (width,height,channels)") + + staging_key = image.shape[0:2] if is_numpy else image + + if staging_key not in image_field_cache: + staging_img = np.ndarray(image.shape[0:2], dtype=np.uint32) + image_field_cache[staging_key] = staging_img + else: + staging_img = image_field_cache[staging_key] + + if is_texture: + copy_texture_to_rgba8(image, staging_img, *image.shape[0:2]) + elif is_numpy: + if image.dtype == np.uint8: + copy_image_u8_to_rgba8_np(image, staging_img, channels, is_grayscale) + elif image.dtype == np.float32: + copy_image_f32_to_rgba8_np(image, staging_img, channels, is_grayscale) + else: + raise Exception("dtype of input image must either be u8 or f32") + else: + if image.dtype == u8: + copy_image_u8_to_rgba8(image, staging_img, channels, is_grayscale) + elif image.dtype == f32: + copy_image_f32_to_rgba8(image, staging_img, channels, is_grayscale) + else: + raise Exception("dtype of input image must either be u8 or f32") + + return staging_img diff --git a/local_packages/taichi/ui/ui.py b/local_packages/taichi/ui/ui.py new file mode 100644 index 0000000..1f5c4f1 --- /dev/null +++ b/local_packages/taichi/ui/ui.py @@ -0,0 +1,19 @@ +from taichi._lib import core as _ti_core + +from .camera import Camera # pylint: disable=unused-import +from .canvas import Canvas # pylint: disable=unused-import +from .constants import * # pylint: disable=unused-import,wildcard-import +from .imgui import Gui # pylint: disable=unused-import +from .scene import Scene # pylint: disable=unused-import +from .utils import check_ggui_availability # pylint: disable=unused-import +from .window import Window # pylint: disable=unused-import + + +# ---------------------- +ProjectionMode = _ti_core.ProjectionMode if _ti_core.GGUI_AVAILABLE else None +"""Camera projection mode, 0 for perspective and 1 for orthogonal. +""" + + +def make_camera(): + return Camera() diff --git a/local_packages/taichi/ui/utils.py b/local_packages/taichi/ui/utils.py new file mode 100644 index 0000000..aeea270 --- /dev/null +++ b/local_packages/taichi/ui/utils.py @@ -0,0 +1,85 @@ +import platform +from math import asin, cos, sin + +import numpy as np +from taichi._lib import core as _ti_core +from taichi._lib.utils import try_get_wheel_tag +from taichi.lang._ndarray import Ndarray +from taichi.lang.matrix import Vector +from taichi.lang.util import to_taichi_type + + +def get_field_info(field): + info = _ti_core.FieldInfo() + if field is None: + info.valid = False + return info + info.valid = True + # NDArray & numpy.ndarray + if isinstance(field, np.ndarray): + info.field_source = _ti_core.FieldSource.HostMappedPtr + info.dev_alloc = _ti_core.DeviceAllocation(0, field.ctypes.data) + info.dtype = to_taichi_type(field.dtype) + info.num_elements = np.prod(field.shape) + info.shape = field.shape + return info + if isinstance(field, Ndarray): + info.field_source = _ti_core.FieldSource.TaichiNDarray + info.dev_alloc = field.arr.get_device_allocation() + info.dtype = field.dtype + info.num_elements = np.prod(field.shape) * np.prod(field.element_shape) + info.shape = field.shape + field.element_shape + return info + # SNode + raise Exception("unsupported data source") + + +def euler_to_vec(yaw, pitch): + v = Vector([0.0, 0.0, 0.0]) + v[0] = sin(yaw) * cos(pitch) + v[1] = sin(pitch) + v[2] = cos(yaw) * cos(pitch) + return v + + +def vec_to_euler(v, eps: float = 1e-6): + v = v.normalized() + pitch = asin(v[1]) + + cos_pitch = np.sqrt(1 - v[1] * v[1]) + + sin_yaw = v[0] / cos_pitch + cos_yaw = v[2] / cos_pitch + + yaw = np.arctan2(sin_yaw, cos_yaw) + + return yaw, pitch + + +class GGUINotAvailableException(Exception): + pass + + +def check_ggui_availability(): + """Checks if the `GGUI` environment is available.""" + if _ti_core.GGUI_AVAILABLE: + return + + try: + # Try identifying the reason + import taichi # pylint: disable=import-outside-toplevel + + wheel_tag = try_get_wheel_tag(taichi) + if platform.system() == "Linux" and wheel_tag and "manylinux2014" in wheel_tag: + raise GGUINotAvailableException( + "GGUI is not available since you have installed a restricted version of taichi. " + "Please see yellow warning messages printed during startup for details." + ) + + except GGUINotAvailableException: + raise + + except Exception: + pass + + raise GGUINotAvailableException("GGUI is not available.") diff --git a/local_packages/taichi/ui/window.py b/local_packages/taichi/ui/window.py new file mode 100644 index 0000000..e549cdf --- /dev/null +++ b/local_packages/taichi/ui/window.py @@ -0,0 +1,199 @@ +import pathlib + +import numpy +from taichi._kernels import ( + arr_vulkan_layout_to_arr_normal_layout, + arr_vulkan_layout_to_field_normal_layout, +) +from taichi._lib import core as _ti_core +from taichi.lang._ndarray import Ndarray +from taichi.lang.impl import Field, default_cfg, get_runtime +from taichi.ui.staging_buffer import get_depth_ndarray + +from taichi import f32 + +from .canvas import Canvas +from .scene import SceneV2 +from .constants import PRESS, RELEASE +from .imgui import Gui +from .utils import check_ggui_availability + + +class Window: + """The window class. + + Args: + name (str): Window title. + res (tuple[int]): resolution (width, height) of the window, in pixels. + vsync (bool): whether or not vertical sync should be enabled. + show_window (bool): where or not display the window after initialization. + pos (tuple[int]): position (left to right, up to bottom) of the window which origins from the left-top of your main screen, in pixels. + """ + + def __init__(self, name, res, vsync=False, show_window=True, fps_limit=1000, pos=(100, 100)): + check_ggui_availability() + package_path = str(pathlib.Path(__file__).parent.parent) + ti_arch = default_cfg().arch + self.window = _ti_core.PyWindow( + get_runtime().prog, + name, + res, + pos, + vsync, + show_window, + fps_limit, + package_path, + ti_arch, + ) + + @property + def running(self): + """Check whether this window is running or not.""" + return self.window.is_running() + + @running.setter + def running(self, value): + """Set the running status of this window. + + Example:: + + >>> window.running = False + """ + self.window.set_is_running(value) + + @property + def event(self): + """Get the current unprocessed event.""" + return self.window.get_current_event() + + @event.setter + def event(self, value): + """Set the current unprocessed event.""" + self.window.set_current_event(value) + + def get_events(self, tag=None): + """Get the current list of unprocessed events. + + Args: + tag (str): A tag used for filtering events. \ + If it is None, then all events are returned. + """ + if tag is None: + return self.window.get_events(_ti_core.EventType.Any) + if tag is PRESS: + return self.window.get_events(_ti_core.EventType.Press) + if tag is RELEASE: + return self.window.get_events(_ti_core.EventType.Release) + raise Exception("unrecognized event tag") + + def get_event(self, tag=None): + """Returns whether or not a event that matches tag has occurred. + + If tag is None, then no filters are applied. If this function + returns `True`, the `event` property of the window will be set + to the corresponding event. + """ + if tag is None: + return self.window.get_event(_ti_core.EventType.Any) + if tag is PRESS: + return self.window.get_event(_ti_core.EventType.Press) + if tag is RELEASE: + return self.window.get_event(_ti_core.EventType.Release) + raise Exception("unrecognized event tag") + + def is_pressed(self, *keys): + """Checks if any of a set of specified keys is pressed. + + Args: + keys (list[:mod:`~taichi.ui.constants`]): The keys to be matched. + + Returns: + bool: `True` if any key among `keys` is pressed, else `False`. + """ + for k in keys: + if self.window.is_pressed(k): + return True + return False + + def get_canvas(self): + """Returns a canvas handle. See :class`~taichi.ui.canvas.Canvas`""" + return Canvas(self.window.get_canvas()) + + def get_scene(self): + """Returns a scene handle. See :class`~taichi.ui.scene.SceneV2`""" + return SceneV2(self.window.get_scene()) + + @property + def GUI(self): + """Returns a IMGUI handle. See :class`~taichi.ui.ui.Gui` This is an + deprecated interface, please use `~taichi.ui.Window.get_gui` instead. + """ + return self.get_gui() + + def get_gui(self): + """Returns a IMGUI handle. See :class`~taichi.ui.ui.Gui`""" + return Gui(self.window.GUI()) + + def get_cursor_pos(self): + """Get current cursor position, in the range `[0, 1] x [0, 1]`.""" + return self.window.get_cursor_pos() + + def show(self): + """Display this window.""" + return self.window.show() + + def get_window_shape(self): + """Return the shape of window. + Return: + tuple : (width, height) + """ + return self.window.get_window_shape() + + def save_image(self, filename): + """Save the window content to an image file. + + Args: + filename (str): output filename. + """ + return self.window.write_image(filename) + + def get_depth_buffer(self, depth): + """fetch the depth information of current scene to ti.ndarray/ti.field + (support copy from vulkan to cuda/cpu which is a faster version) + Args: + depth(ti.ndarray/ti.field): [window_width, window_height] carries depth information. + """ + if not (len(depth.shape) == 2 and depth.dtype == f32): + raise Exception("Only Support 2d-shape and ti.f32 data format.") + if not isinstance(depth, (Ndarray, Field)): + raise Exception("Only Support Ndarray and Field data type.") + tmp_depth = get_depth_ndarray(self.window) + self.window.copy_depth_buffer_to_ndarray(tmp_depth.arr) + if isinstance(depth, Ndarray): + arr_vulkan_layout_to_arr_normal_layout(tmp_depth, depth) + else: + arr_vulkan_layout_to_field_normal_layout(tmp_depth, depth) + + def get_depth_buffer_as_numpy(self): + """Get the depth information of current scene to numpy array. + + Returns: + 2d numpy array: [width, height] with (0.0~1.0) float-format. + """ + tmp_depth = get_depth_ndarray(self.window) + self.window.copy_depth_buffer_to_ndarray(tmp_depth.arr) + depth_numpy_arr = numpy.zeros(self.get_window_shape(), dtype=numpy.float32) + arr_vulkan_layout_to_arr_normal_layout(tmp_depth, depth_numpy_arr) + return depth_numpy_arr + + def get_image_buffer_as_numpy(self): + """Get the window content to numpy array. + + Returns: + 3d numpy array: [width, height, channels] with (0.0~1.0) float-format color. + """ + return self.window.get_image_buffer_as_numpy() + + def destroy(self): + """Destroy this window. The window will be unavailable then.""" + return self.window.destroy()

' : '\U0001d4ab', + '\\' : '\U0001d4ac', + '\\' : '\U0000211b', + '\\' : '\U0001d4ae', + '\\' : '\U0001d4af', + '\\' : '\U0001d4b0', + '\\' : '\U0001d4b1', + '\\' : '\U0001d4b2', + '\\' : '\U0001d4b3', + '\\' : '\U0001d4b4', + '\\' : '\U0001d4b5', + '\\' : '\U0001d5ba', + '\\' : '\U0001d5bb', + '\\' : '\U0001d5bc', + '\\' : '\U0001d5bd', + '\\' : '\U0001d5be', + '\\' : '\U0001d5bf', + '\\' : '\U0001d5c0', + '\\' : '\U0001d5c1', + '\\' : '\U0001d5c2', + '\\' : '\U0001d5c3', + '\\' : '\U0001d5c4', + '\\' : '\U0001d5c5', + '\\' : '\U0001d5c6', + '\\' : '\U0001d5c7', + '\\' : '\U0001d5c8', + '\\